commit 10c9f34f67f487bdfbd7579290f4b2d6438f18c6 Author: Greg Burd Date: Sun Jun 6 13:46:45 2021 -0400 Initial import. diff --git a/FindBugsExclude.xml b/FindBugsExclude.xml new file mode 100644 index 0000000..01186ff --- /dev/null +++ b/FindBugsExclude.xml @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..a4e050c --- /dev/null +++ b/LICENSE @@ -0,0 +1,76 @@ +Copyright (C) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License") reproduced below or available at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +1. You must give any other recipients of the Work or Derivative Works a copy of this License; and +2. You must cause any modified files to carry prominent notices stating that You changed the files; and +3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS + + +ADDITIONAL THIRD PARTY NOTICES: + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2005 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/README b/README new file mode 100644 index 0000000..8631d26 --- /dev/null +++ b/README @@ -0,0 +1,5 @@ +Oracle: Berkeley DB, Java Edition 7.5.11: 2017-10-31 09:36:36 UTC + +This is Berkeley DB, Java Edition, version 7.5.11 from +Oracle. To view the release and installation documentation, load +the distribution file docs/index.html into your web browser. \ No newline at end of file diff --git a/ant/PrintBootClassPath.java b/ant/PrintBootClassPath.java new file mode 100644 index 0000000..5b34400 --- /dev/null +++ b/ant/PrintBootClassPath.java @@ -0,0 +1,16 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +/** + * Print the value of the "sun.boot.class.path" system property, which is the + * boot classpath for the Oracle Java virtual machine. + */ +public class PrintBootClassPath { + public static void main(String[] args) { + System.out.println(System.getProperty("sun.boot.class.path")); + } +} diff --git a/ant/compile.xml b/ant/compile.xml new file mode 100644 index 0000000..e475d1f --- /dev/null +++ b/ant/compile.xml @@ -0,0 +1,121 @@ + + + + Shared compile target. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ant/internal.xml b/ant/internal.xml new file mode 100644 index 0000000..d93d8f4 --- /dev/null +++ b/ant/internal.xml @@ -0,0 +1,1187 @@ + + + + + + + + + +Internal targets for JE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/build-common.xml b/build-common.xml new file mode 100644 index 0000000..99217e2 --- /dev/null +++ b/build-common.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + diff --git a/build.xml b/build.xml new file mode 100644 index 0000000..fa3a28b --- /dev/null +++ b/build.xml @@ -0,0 +1,2394 @@ + + + + + + + + + Compile and test JE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + debug the build file itself + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Builds the distribution package. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Using secure property configuration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Running je.SimpleExample to insert some data + + + + + + Running DbVerifyLog + + + + + + Running DbDump + + + + + + + + Running DbLoad + + + + + + Running DbDump + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MeasureInsertSize ran OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Javadoc public api + + + + + + + + + + + + + + + + + + + + + + + + +
Berkeley DB Java Edition
version ${release.version}]]> +
+ Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.]]> + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + Javadoc examples + + +
Berkeley DB Java Edition Examples
version ${release.version}]]> +
+ Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.]]> + + + + + + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + + + + + +
Berkeley DB Java Edition
version ${release.version}]]> +
+ Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.]]> + + + +
+ + + +
+ + + + + + + + + + + + + + Need to set the docsbooksdir property to the directory containing + the docs_books Mercurial repository cloned from + ssh://soc//a/hgroot/docs_books + + + + Need to set the fopdir property to the directory containing the + installation of Apache FOP (Formatting Objects Processor) + installed from soc:/b/htdocs/documentation/sleepycat-fop095.zip. + Make sure to update the font-base entry in the + conf/sleepycat-fop.conf file in that directory to match its + current location. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Install JE into destdir + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
diff --git a/dist/build.properties b/dist/build.properties new file mode 100644 index 0000000..fdab116 --- /dev/null +++ b/dist/build.properties @@ -0,0 +1,4 @@ +release.version=7.5.11 +release.numeric.version=7.5.11 +release.major=7 +release.minor=5 diff --git a/docs/GettingStartedGuide/BerkeleyDB-JE-GSG.pdf b/docs/GettingStartedGuide/BerkeleyDB-JE-GSG.pdf new file mode 100644 index 0000000..21d2e85 Binary files /dev/null and b/docs/GettingStartedGuide/BerkeleyDB-JE-GSG.pdf differ diff --git a/docs/GettingStartedGuide/Cursors.html b/docs/GettingStartedGuide/Cursors.html new file mode 100644 index 0000000..44a3a52 --- /dev/null +++ b/docs/GettingStartedGuide/Cursors.html @@ -0,0 +1,214 @@ + + + + + + Chapter 9. Using Cursors + + + + + + + + + +
+
+
+
+

Chapter 9. Using Cursors

+
+
+
+ +

+ Cursors provide a mechanism by which you can iterate over the records in a + database. Using cursors, you can get, put, and delete database records. If + a database allows duplicate records, then cursors are + + the only mechanism by + which you can access anything other than the first duplicate for a given + key. + + +

+

+ This chapter introduces cursors. It explains how to open and close them, how + to use them to modify databases, and how to use them with duplicate records. +

+
+
+
+
+

Opening and Closing Cursors

+
+
+
+

+ To use a cursor, you must open it using the Database.openCursor() + method. When you open a + cursor, you can optionally pass it a CursorConfig + object to set cursor properties. + + The cursor properties that you can set allows you to + determine whether the cursor will perform committed or + uncommitted reads. See the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide for more + information. + + +

+

For example:

+ +
package je.gettingStarted;
+    
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+
+import java.io.File;
+
+...
+Environment myDbEnvironment = null;
+Database myDatabase = null;
+Cursor myCursor = null;
+
+try {
+    myDbEnvironment = new Environment(new File("/export/dbEnv"), null);
+    myDatabase = myDbEnvironment.openDatabase(null, "myDB", null);
+
+    myCursor = myDatabase.openCursor(null, null);
+} catch (DatabaseException dbe) {
+    // Exception handling goes here ...
+}
+

+ To close the cursor, call the Cursor.close() + method. Note that if you close a database that has cursors open in it, + then it will throw an exception and close any open cursors for you. + For best results, close your cursors from within a + finally block. However, it is recommended that you always close all cursor handles immediately after their use to ensure concurrency and to release resources such as page locks. + +

+ +
package je.gettingStarted;
+    
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.Environment;
+
+...
+try {
+    ...
+} catch ... {
+} finally {
+    try {
+        if (myCursor != null) {
+            myCursor.close();
+        }
+
+        if (myDatabase != null) {
+            myDatabase.close();
+        }
+
+        if (myDbEnvironment != null) {
+            myDbEnvironment.close();
+        }
+    } catch(DatabaseException dbe) {
+        System.err.println("Error in close: " + dbe.toString());
+    }
+} 
+
+
+ + + diff --git a/docs/GettingStartedGuide/DBAdmin.html b/docs/GettingStartedGuide/DBAdmin.html new file mode 100644 index 0000000..5aeaa3b --- /dev/null +++ b/docs/GettingStartedGuide/DBAdmin.html @@ -0,0 +1,157 @@ + + + + + + Administrative Methods + + + + + + + + + +
+
+
+
+

Administrative Methods

+
+
+
+

+ Both the Environment and + Database classes provide methods that are useful + for manipulating databases. These methods are: +

+
+
    +
  • +

    + Database.getDatabaseName() +

    +

    Returns the database's name.

    + +
    String dbName = myDatabase.getDatabaseName();
    + + +
  • +
  • +

    + Database.getEnvironment() +

    +

    Returns the Environment that contains this database.

    + +
    Environment theEnv = myDatabase.getEnvironment();
    +
  • +
  • +

    + Database.preload() +

    +

    Preloads the database into the in-memory cache. Optionally takes + a long that identifies the maximum number of bytes to load into the + cache. If this parameter is not supplied, the maximum memory usage + allowed by the evictor thread is used. +

    + +
    myDatabase.preload(1048576l); // 1024*1024
    +
  • +
  • +

    + Environment.getDatabaseNames() +

    +

    Returns a list of Strings of all the databases contained by the + environment.

    + +
    import java.util.List;
    +...
    +List myDbNames = myDbEnv.getDatabaseNames();
    +for(int i=0; i < myDbNames.size(); i++) {
    +    System.out.println("Database Name: " + (String)myDbNames.get(i));
    +}
    +
  • +
  • +

    + Environment.removeDatabase() +

    +

    Deletes the database. The database must be closed when you + perform this action on it.

    + +
    String dbName = myDatabase.getDatabaseName();
    +myDatabase.close();
    +myDbEnv.removeDatabase(null, dbName);
    +
  • +
  • +

    + Environment.renameDatabase() +

    +

    Renames the database. The database must be closed when you + perform this action on it.

    + +
    String oldName = myDatabase.getDatabaseName();   
    +String newName = new String(oldName + ".new", "UTF-8");
    +myDatabase.close();
    +myDbEnv.renameDatabase(null, oldName, newName);
    +
  • +
  • +

    + Environment.truncateDatabase() +

    +

    + Deletes every record in the database and optionally returns the + number of records that were deleted. Note that it is much less + expensive to truncate a database without counting the number of + records deleted than it is to truncate and count. +

    + +
    int numDiscarded = 
    +    myEnv.truncate(null,                          // txn handle
    +                   myDatabase.getDatabaseName(),  // database name
    +                   true);                         // If true, then the 
    +                                                  // number of records 
    +                                                  // deleted are counted.
    +System.out.println("Discarded " + numDiscarded +
    +                   " records from database " + 
    +                   myDatabase.getDatabaseName()); 
    +
  • +
+
+
+ + + diff --git a/docs/GettingStartedGuide/DBEntry.html b/docs/GettingStartedGuide/DBEntry.html new file mode 100644 index 0000000..01992c9 --- /dev/null +++ b/docs/GettingStartedGuide/DBEntry.html @@ -0,0 +1,303 @@ + + + + + + Chapter 8. Database Records + + + + + + + + + +
+
+
+
+

Chapter 8. Database Records

+
+
+
+ +

+ JE records contain two parts — a key and some data. Both the key + and its corresponding data are + encapsulated in + DatabaseEntry class objects. + + + Therefore, to access a JE record, you need two such + + objects, one for the key and + one for the data. +

+

+ DatabaseEntry can hold any kind of data from simple + Java primitive types to complex Java objects so long as that data can be + represented as a Java byte array. Note that due to + performance considerations, you should not use Java serialization to convert + a Java object to a byte array. Instead, use the Bind APIs + to perform this conversion (see + Using the BIND APIs for more + information). +

+

+ This chapter describes how you can convert both Java primitives and Java + class objects into and out of byte arrays. It also + introduces storing and retrieving key/value pairs from a database. In + addition, this chapter describes how you can use comparators to influence + how JE sorts its database records. +

+
+
+
+
+

Using Database Records

+
+
+
+

+ Each database record is comprised of two + DatabaseEntry objects + + + — one for the key and another for the data. + + The key and data information are passed to- + and returned from JE using + DatabaseEntry objects as byte + arrays. Using DatabaseEntrys allows JE to + change the underlying byte array as well as return multiple values (that + is, key and data). Therefore, using DatabaseEntry instances + is mostly an exercise in efficiently moving your keys and your data in + and out of byte arrays. +

+

+ For example, to store a database record where both the key and the + data are Java String objects, you instantiate a + pair of DatabaseEntry objects: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+String aKey = "key";
+String aData = "data";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+    // Storing the record is described later in this chapter 
+
+

Note

+

+ Notice that we specify UTF-8 when we retrieve the + byte array from our String + object. Without parameters, String.getBytes() uses the + Java system's default encoding. You should never use a system's default + encoding when storing data in a database because the encoding can change. +

+
+

+ When the record is retrieved from the database, the method that you + use to perform this operation populates two DatabaseEntry + instances for you, one for the key and another for the data. Assuming Java + String objects, you retrieve your data from the + DatabaseEntry as follows: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+// theKey and theData are DatabaseEntry objects. Database
+// retrieval is described later in this chapter. For now, 
+// we assume some database get method has populated these
+// objects for us.
+
+// Use DatabaseEntry.getData() to retrieve the encapsulated Java
+// byte array.
+
+byte[] myKey = theKey.getData();
+byte[] myData = theData.getData();
+
+String key = new String(myKey, "UTF-8");
+String data = new String(myData, "UTF-8"); 
+

+ There are a large number of mechanisms that you can use to move data in + and out of byte arrays. To help you with this + activity, JE provides the bind APIs. These APIs allow you to + efficiently store both primitive data types and complex objects in + byte arrays. +

+

+ The next section describes basic database put and get operations. A + basic understanding of database access is useful when describing database + storage of more complex data such as is supported by the bind APIs. Basic + bind API usage is then described in Using the BIND APIs. +

+ + + +
+
+ + + diff --git a/docs/GettingStartedGuide/DeleteEntryWCursor.html b/docs/GettingStartedGuide/DeleteEntryWCursor.html new file mode 100644 index 0000000..7fe87f2 --- /dev/null +++ b/docs/GettingStartedGuide/DeleteEntryWCursor.html @@ -0,0 +1,122 @@ + + + + + + Deleting Records Using Cursors + + + + + + + + + +
+
+
+
+

Deleting Records Using Cursors

+
+
+
+

+ + To delete a record using a cursor, simply position the cursor to the + record that you want to delete and then call + + + Cursor.delete(). + Note that after deleting a + record, the value of Cursor.getCurrent() + is unchanged until such a + time as the cursor is moved again. Also, if you call + Cursor.delete() two or more times in a row + without repositioning the cursor, then all subsequent deletes result in + a return value of OperationStatus.KEYEMPTY. + + + +

+

For example:

+ +
package je.gettingStarted;
+    
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor. Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                 LockMode.DEFAULT);
+    
+    // Count the number of records using the given key. If there is only
+    // one, delete that record.
+    if (cursor.count() == 1) {
+            System.out.println("Deleting " + 
+                               new String(theKey.getData(), "UTF-8") +
+                               "|" + 
+                               new String(theData.getData(), "UTF-8"));
+            cursor.delete();
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+ + + diff --git a/docs/GettingStartedGuide/EnvProps.html b/docs/GettingStartedGuide/EnvProps.html new file mode 100644 index 0000000..c9cbc09 --- /dev/null +++ b/docs/GettingStartedGuide/EnvProps.html @@ -0,0 +1,263 @@ + + + + + + Environment Properties + + + + + + + + + +
+
+
+
+

Environment Properties

+
+
+
+ +

+ You set properties for the Environment using the + EnvironmentConfig class. You can also set properties for a + specific Environment instance using + EnvironmentMutableConfig. +

+
+
+
+
+

The EnvironmentConfig Class

+
+
+
+

+ The EnvironmentConfig class makes a large number of fields and + methods available to you. Describing all of these tuning parameters is beyond the scope of + this manual. However, there are a few properties that you are likely to want to set. They + are described here. +

+

+ Note that for each of the properties that you can commonly set, there is a corresponding + getter method. Also, you can always retrieve the + EnvironmentConfig object used by your environment using the + Environment.getConfig() method. +

+

+ You set environment configuration parameters using the following methods on the + EnvironmentConfig class: +

+
+
    +
  • +

    + EnvironmentConfig.setAllowCreate() +

    +

    + If true, the database environment is created when it is opened. + If false, environment open fails if the environment does not + exist. This property has no meaning if the database environment already exists. + Default is false. +

    +
  • +
  • +

    + EnvironmentConfig.setReadOnly() +

    +

    If true, then all databases opened in this + environment must be opened as read-only. If you are writing a + multi-process application, then all but one of your processes must set + this value to true. Default is false.

    +

    + You can also set this property using the je.env.isReadOnly + parameter in your env_home/je.properties file. +

    +
  • +
  • +

    + EnvironmentConfig.setTransactional() +

    +

    If true, configures the database environment + to support transactions. Default is false.

    +

    + You can also set this property using the je.env.isTransactional + parameter in your env_home/je.properties file. +

    +
  • +
+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+     
+...
+
+Environment myDatabaseEnvironment = null;
+try {
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    envConfig.setTransactional(true);
+    myDatabaseEnvironment = 
+        new Environment(new File("/export/dbEnv"), envConfig);
+} catch (DatabaseException dbe) {
+   System.err.println(dbe.toString());
+   System.exit(1);
+} 
+
+
+
+
+
+

EnvironmentMutableConfig

+
+
+
+

+ EnvironmentMutableConfig manages properties that can be reset after the + Environment object has been constructed. In addition, EnvironmentConfig + extends EnvironmentMutableConfig, so you can set these mutable properties at + Environment construction time if necessary. +

+

+ The EnvironmentMutableConfig class allows you to set the following + properties: +

+
+
    +
  • +

    + setCachePercent() +

    +

    + Determines the percentage of JVM memory available to the JE cache. + See + + Sizing the Cache + for more information. +

    +
  • +
  • +

    + setCacheSize() +

    +

    + Determines the total amount of memory available to the database cache. + See Sizing the Cache + for more information. +

    +
  • +
  • +

    + setTxnNoSync() +

    +

    + Determines whether change records created due to a transaction commit are written to the backing + log files on disk. A value of true causes + the data to not be flushed to + disk. See the Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

    +
  • +
  • +

    + setTxnWriteNoSync() +

    +

    + Determines whether logs are flushed on transaction commit (the logs are still written, however). + By setting this value to true, you potentially gain better performance than if + you flush the logs on commit, but you do so by losing some of your transaction durability guarantees. +

    +
  • +
+
+

+ There is also a corresponding getter method (getTxnNoSync()). + Moreover, you can always retrieve your environment's + EnvironmentMutableConfig object by + using the Environment.getMutableConfig() method. +

+

+ For example: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentMutableConfig;
+
+import java.io.File;
+
+
+...
+
+try {
+    Environment myEnv = new Environment(new File("/export/dbEnv"), null);
+    EnvironmentMutableConfig envMutableConfig = 
+        new EnvironmentMutableConfig();
+    envMutableConfig.setTxnNoSync(true);
+    myEnv.setMutableConfig(envMutableConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+
+
+ + + diff --git a/docs/GettingStartedGuide/Positioning.html b/docs/GettingStartedGuide/Positioning.html new file mode 100644 index 0000000..1741ed9 --- /dev/null +++ b/docs/GettingStartedGuide/Positioning.html @@ -0,0 +1,691 @@ + + + + + + Getting Records Using the Cursor + + + + + + + + + +
+
+
+
+

Getting Records Using the Cursor

+
+
+
+ +

+ To iterate over database records, from the first record to + the last, simply open the cursor and then use the + Cursor.getNext() + + + method. + + For example: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;  
+import com.sleepycat.je.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    // Cursors need a pair of DatabaseEntry objects to operate. These hold
+    // the key and data found at any given position in the database.
+    DatabaseEntry foundKey = new DatabaseEntry();
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    // To iterate, just call getNext() until the last database record has 
+    // been read. All cursor operations return an OperationStatus, so just
+    // read until we no longer see OperationStatus.SUCCESS
+    while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+        OperationStatus.SUCCESS) {
+        // getData() on the DatabaseEntry objects returns the byte array
+        // held by that object. We use this to get a String value. If the
+        // DatabaseEntry held a byte array representation of some other 
+        // data type (such as a complex object) then this operation would
+        // look considerably different.
+        String keyString = new String(foundKey.getData(), "UTF-8");
+        String dataString = new String(foundData.getData(), "UTF-8");
+        System.out.println("Key | Data : " + keyString + " | " + 
+                       dataString + "");
+    }
+} catch (DatabaseException de) {
+    System.err.println("Error accessing database." + de);
+} finally {
+    // Cursors must be closed.
+    cursor.close();
+}
+

+ To iterate over the database from the last record to the first, + instantiate the cursor, and then + use Cursor.getPrev() until you read the first record in + the database. For example: +

+ +
package je.gettingStarted;
+    
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;  
+import com.sleepycat.je.OperationStatus; 
+
+...
+
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    // Get the DatabaseEntry objects that the cursor will use.
+    DatabaseEntry foundKey = new DatabaseEntry();
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    // Iterate from the last record to the first in the database
+    while (cursor.getPrev(foundKey, foundData, LockMode.DEFAULT) == 
+        OperationStatus.SUCCESS) {
+
+        String theKey = new String(foundKey.getData(), "UTF-8");
+        String theData = new String(foundData.getData(), "UTF-8");
+        System.out.println("Key | Data : " +  theKey + " | " + 
+                           theData + "");
+    }
+} catch (DatabaseException de) {
+    System.err.println("Error accessing database." + de);
+} finally {
+    // Cursors must be closed.
+    cursor.close();
+}
+
+
+
+
+

Disk Ordered Cursors

+
+
+
+

+ The previous example shows how to scan through the records in + your database sequentially; that is, in the record's sort order. + This is mostly determined by the value contained in the records' keys + (additional sorting is required in the case of duplicate + records). However, you can use cursors to retrieve records based + on how they are stored on disk. This can improve retrieval times, + and is useful if your application needs to scan all the records + in the database quickly, without concern for key sort order. + You do this using the DiskOrderedCursor + class. +

+

+ DiskOrderedClass works in the same way as + a regular cusor: you simply open the cursor, then retrieve + records one after another using the + DiskOrderedClass.getNext() + method. +

+

+ You open a DiskOrderedCursor + using the Database.openCursor() + method. This version of the method takes a single parameter: an + instance of the + DiskOrderedCursorConfig + class, which can be used to configure various aspects of the + DiskOrderedCursor + class. +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DiskOrderedCursor;
+import com.sleepycat.je.DiskOrderedCursorConfig;
+import com.sleepycat.je.LockMode;  
+import com.sleepycat.je.OperationStatus; 
+
+...
+
+DiskOrderedCursor dcursor = null;
+DiskOrderedCursorConfig docc = new DiskOrderedCursorConfig();
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    dcursor = myDatabase.openCursor(docc);
+
+    // Cursors need a pair of DatabaseEntry objects to operate. These hold
+    // the key and data found at any given position in the database.
+    DatabaseEntry foundKey = new DatabaseEntry();
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    // To iterate, just call getNext() until the last database record has 
+    // been read. All cursor operations return an OperationStatus, so just
+    // read until we no longer see OperationStatus.SUCCESS
+    while (dcursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+        OperationStatus.SUCCESS) {
+        // getData() on the DatabaseEntry objects returns the byte array
+        // held by that object. We use this to get a String value. If the
+        // DatabaseEntry held a byte array representation of some other 
+        // data type (such as a complex object) then this operation would
+        // look considerably different.
+        String keyString = new String(foundKey.getData(), "UTF-8");
+        String dataString = new String(foundData.getData(), "UTF-8");
+        System.out.println("Key | Data : " + keyString + " | " + 
+                       dataString + "");
+    }
+} catch (DatabaseException de) {
+    System.err.println("Error accessing database." + de);
+} finally {
+    // Cursors must be closed.
+    cursor.close();
+}
+
+
+
+
+
+

Searching for Records

+
+
+
+

+ You can use cursors to search for database records. You can search based + on just a key, or you can search based on both the key and the data. + You can also perform partial matches if your database supports sorted + duplicate sets. In all cases, the key and data parameters of these + methods are filled with the key and data values of the database record + to which the cursor is positioned as a result of the search. +

+

+ Also, if the search fails, then cursor's state is left unchanged + and + OperationStatus.NOTFOUND + + is returned. + + +

+

+ The following Cursor methods allow you to + perform database searches: +

+
+
    +
  • +

    + Cursor.getSearchKey() + +

    +

    + Moves the cursor to the first record in the database with + the specified key. +

    +
  • +
  • +

    + Cursor.getSearchKeyRange() + +

    +

    + + Moves the cursor + + to the first record in the database whose + key is greater than or equal to the specified key. This comparison + is determined by the + comparator + + that you provide for the database. If no + comparator + + is provided, then the default + unsigned byte-by-byte + lexicographical sorting is used. +

    +

    + For example, suppose you have database records that use the + following + Strings + + as keys: +

    +
    Alabama
    +Alaska
    +Arizona
    +

    + Then providing a search key of Alaska moves the + cursor to the second key noted above. Providing a key of + Al moves the cursor to the first key (Alabama), providing + a search key of Alas moves the cursor to the second key + (Alaska), and providing a key of Ar moves the + cursor to the last key (Arizona). +

    +
  • +
  • +

    + Cursor.getSearchBoth() + +

    +

    + Moves the cursor to the first record in the database that uses + the specified key and data. +

    +
  • +
  • +

    + Cursor.getSearchBothRange() + +

    +

    + Moves the cursor to the first record in the database whose key matches the specified + key and whose data is + greater than or equal to the specified data. If the database supports + duplicate records, then on matching the key, the cursor is moved to + the duplicate record with the smallest data that is greater than or + equal to the specified data. +

    +

    + For example, + suppose you have + + database records that use the following key/data pairs: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence 
    +

    then providing:

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    a search key of ...and a search data of ...moves the cursor to ...
    AlaskaFaAlaska/Fairbanks
    ArizonaFlArizona/Florence
    AlaskaAnAlaska/Anchorage
    +
    +
  • +
+
+

+ For example, assuming a database containing sorted duplicate records of + U.S. States/U.S Cities key/data pairs (both as + Strings), + + then the following code fragment can be used to position the cursor + to any record in the database and print its key/data values: + + + +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus; 
+
+...
+  
+// For this example, hard code the search key and data
+String searchKey = "Alaska";
+String searchData = "Fa";
+
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    // Open the cursor. 
+    cursor = myDatabase.openCursor(null, null);
+
+    DatabaseEntry theKey = 
+         new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = 
+         new DatabaseEntry(searchData.getBytes("UTF-8"));
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Perform the search
+    OperationStatus retVal = cursor.getSearchBothRange(theKey, theData, 
+                                                       LockMode.DEFAULT);
+    // NOTFOUND is returned if a record cannot be found whose key 
+    // matches the search key AND whose data begins with the search data.
+    if (retVal == OperationStatus.NOTFOUND) {
+        System.out.println(searchKey + "/" + searchData + 
+                           " not matched in database " + 
+                           myDatabase.getDatabaseName());
+    } else {
+        // Upon completing a search, the key and data DatabaseEntry 
+        // parameters for getSearchBothRange() are populated with the 
+        // key/data values of the found record.
+        String foundKey = new String(theKey.getData(), "UTF-8");
+        String foundData = new String(theData.getData(), "UTF-8");
+        System.out.println("Found record " + foundKey + "/" + foundData + 
+                           "for search key/data: " + searchKey + 
+                           "/" + searchData);
+    }
+
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+
+
+
+
+

Working with Duplicate Records

+
+
+
+

+ If your database supports duplicate records, then it can potentially + contain multiple records that share the same key. + + + Using normal database get operations, you can only ever obtain the + first such record in a set of duplicate records. To access subsequent + duplicates, use a cursor. + + + + + The following + Cursor methods + + + are interesting when working with databases that support duplicate records: +

+
+
    +
  • +

    + + Cursor.getNext(), + Cursor.getPrev() + + +

    +

    + Shows the next/previous record in the database, regardless of + whether it is a duplicate of the current record. For an example of + using these methods, see Getting Records Using the Cursor. +

    +
  • +
  • +

    + Cursor.getSearchBothRange() + +

    +

    + Useful for seeking the cursor to a specific record, regardless of + whether it is a duplicate record. See Searching for Records for more + information. +

    +
  • +
  • +

    + + Cursor.getNextNoDup(), + Cursor.getPrevNoDup() + + +

    +

    + Gets the next/previous non-duplicate record in the database. This + allows you to skip over all the duplicates in a set of duplicate + records. If you call + Cursor.getPrevNoDup(), + + then the cursor is positioned to the last record for the previous + key in the database. For example, if you have the following records + in your database: +

    +
    Alabama/Athens
    +Alabama/Florence
    +Alaska/Anchorage
    +Alaska/Fairbanks
    +Arizona/Avondale
    +Arizona/Florence
    +

    + and your cursor is positioned to Alaska/Fairbanks, + and you then call + Cursor.getPrevNoDup(), + + then the cursor is positioned to Alabama/Florence. Similarly, if + you call + Cursor.getNextNoDup(), + + + then the cursor is positioned to the first record corresponding to + the next key in the database. +

    +

    + If there is no next/previous key in the database, then + OperationStatus.NOTFOUND + + is returned, and the cursor is left unchanged. +

    +
  • +
  • +

    + + Cursor.getNextDup(), + Cursor.getPrevDup() + + +

    +

    + + Gets the + next/previous + + record that shares the current key. If the + cursor is positioned at the last record in the duplicate set and + you call + Cursor.getNextDup(), + + + then + OperationStatus.NOTFOUND + + is returned and the cursor is left unchanged. + + Likewise, if you call + getPrevDup() and the + cursor is positioned at the first record in the duplicate set, then + OperationStatus.NOTFOUND is returned and the + cursor is left unchanged. + +

    +
  • +
  • +

    + Cursor.count() +

    +

    Returns the total number of records that share the current key.

    +
  • +
+
+

+ For example, the following code fragment positions a cursor to a key + + and, if the key contains duplicate records, + displays all the duplicates. + + + + Note that the following code fragment assumes that the database contains + only String objects for the keys and data. +

+ +
package je.gettingStarted;
+      
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus; 
+
+...
+  
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor
+    // Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                 LockMode.DEFAULT);
+    
+    // Count the number of duplicates. If the count is greater than 1, 
+    // print the duplicates.
+    if (cursor.count() > 1) {
+        while (retVal == OperationStatus.SUCCESS) {
+            String keyString = new String(theKey.getData(), "UTF-8");
+            String dataString = new String(theData.getData(), "UTF-8");
+            System.out.println("Key | Data : " +  keyString + " | " + 
+                               dataString + "");
+   
+            retVal = cursor.getNextDup(theKey, theData, LockMode.DEFAULT);
+        }
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+
+ + + diff --git a/docs/GettingStartedGuide/PutEntryWCursor.html b/docs/GettingStartedGuide/PutEntryWCursor.html new file mode 100644 index 0000000..d7af94b --- /dev/null +++ b/docs/GettingStartedGuide/PutEntryWCursor.html @@ -0,0 +1,186 @@ + + + + + + Putting Records Using Cursors + + + + + + + + + +
+
+
+
+

Putting Records Using Cursors

+
+
+
+

+ You can use cursors to put records into the database. JE's behavior + when putting records into the database differs depending on whether the + database supports duplicate records. If duplicates are allowed, its + behavior also differs depending on whether a comparator is provided for + the database. (Comparators are described in + Using Comparators). +

+

+ Note that when putting records to the database using a cursor, the + cursor is positioned at the record you inserted. +

+

+ You can use the following methods to put records to the database: +

+
+
    +
  • +

    + Cursor.put() +

    +

    + If the provided key does not exist in the database, + then the order that the record is put into the database + is determined by the BTree (key) comparator in use by the database. +

    +

    + If the provided key already exists in the database, and the database + does not support sorted duplicates, then the existing record data is + replaced with the data provided on this method. +

    +

    + If the provided key already exists in the database, and the database + does support sorted duplicates, then the order that the record is + inserted into the database is determined by the duplicate comparator + in use by the database. +

    +

    + Note that a version of this method exists which allows you to + specify a Time to Live value for the record that you are + inserting. See Using Time to Live + for more information. +

    +
  • +
  • +

    + Cursor.putNoDupData() + +

    +

    + If the provided key and data already exists + in the database, then this method returns + OperationStatus.KEYEXIST. +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the + + BTree (key) comparator in use by the database. + + +

    + +
  • +
  • +

    + Cursor.putNoOverwrite() +

    +

    + If the provided key already exists + in the database, then this method returns + OperationStatus.KEYEXIST. +

    +

    + If the key does not exist, then the order that the record is put into the database + is determined by the BTree (key) comparator in use by the database. +

    +
  • +
+
+

For example:

+ +
package je.gettingStarted;
+    
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.OperationStatus; 
+
+...
+  
+// Create the data to put into the database
+String key1str = "My first string";
+String data1str = "My first data";
+String key2str = "My second string";
+String data2str = "My second data";
+String data3str = "My third data";
+  
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+
+    DatabaseEntry key1 = new DatabaseEntry(key1str.getBytes("UTF-8"));
+    DatabaseEntry data1 = new DatabaseEntry(data1str.getBytes("UTF-8"));
+    DatabaseEntry key2 = new DatabaseEntry(key2str.getBytes("UTF-8"));
+    DatabaseEntry data2 = new DatabaseEntry(data2str.getBytes("UTF-8"));
+    DatabaseEntry data3 = new DatabaseEntry(data3str.getBytes("UTF-8"));
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Assuming an empty database.
+
+    OperationStatus retVal = cursor.put(key1, data1); // SUCCESS
+    retVal = cursor.put(key2, data2); // SUCCESS
+    retVal = cursor.put(key2, data3); // SUCCESS if dups allowed, 
+                                      // KEYEXIST if not.    
+                                              
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+
+ + + diff --git a/docs/GettingStartedGuide/ReplacingEntryWCursor.html b/docs/GettingStartedGuide/ReplacingEntryWCursor.html new file mode 100644 index 0000000..b55061d --- /dev/null +++ b/docs/GettingStartedGuide/ReplacingEntryWCursor.html @@ -0,0 +1,123 @@ + + + + + + Replacing Records Using Cursors + + + + + + + + + +
+
+
+
+

Replacing Records Using Cursors

+
+
+
+

+ You replace the data for a database record by using + + Cursor.putCurrent(). + This method takes just one + argument — the data that you want to write to the current location in the + database. + + + + + + +

+ +
import com.sleepycat.je.Cursor;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus; 
+
+...
+Cursor cursor = null;
+try {
+    ...
+    // Database and environment open omitted for brevity
+    ...
+    // Create DatabaseEntry objects
+    // searchKey is some String.
+    DatabaseEntry theKey = new DatabaseEntry(searchKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Open a cursor using a database handle
+    cursor = myDatabase.openCursor(null, null);
+
+    // Position the cursor. Ignoring the return value for clarity
+    OperationStatus retVal = cursor.getSearchKey(theKey, theData, 
+                                                LockMode.DEFAULT);
+    
+    // Replacement data
+    String replaceStr = "My replacement string";
+    DatabaseEntry replacementData = 
+        new DatabaseEntry(replaceStr.getBytes("UTF-8"));
+    cursor.putCurrent(replacementData);
+} catch (Exception e) {
+    // Exception handling goes here
+} finally {
+   // Make sure to close the cursor
+   cursor.close();
+}
+

+ Note that this method cannot be used if the record that you are trying + to replace is a member of a duplicate set. This is because records must + be sorted by their data and replacement would violate that sort order. +

+

+ + If + you want to replace the data contained by a duplicate record, + + delete the record and create a new record with the desired key and data. +

+
+ + + diff --git a/docs/GettingStartedGuide/admin.html b/docs/GettingStartedGuide/admin.html new file mode 100644 index 0000000..60c0ad0 --- /dev/null +++ b/docs/GettingStartedGuide/admin.html @@ -0,0 +1,237 @@ + + + + + + Part III. Administering JE Applications + + + + + + + + + +
+
+
+
+

Part III. Administering JE Applications

+
+
+
+
+
+

+ This section discusses concepts and mechanisms + useful for the administration of any JE + application, regardless of the API used to build + that application. +

+ +
+
+ + + diff --git a/docs/GettingStartedGuide/administration.html b/docs/GettingStartedGuide/administration.html new file mode 100644 index 0000000..63aff79 --- /dev/null +++ b/docs/GettingStartedGuide/administration.html @@ -0,0 +1,181 @@ + + + + + + Chapter 12. Administering Berkeley DB Java Edition Applications + + + + + + + + + +
+
+
+
+

Chapter 12. Administering Berkeley DB Java Edition Applications

+
+
+
+
+

+ Table of Contents +

+
+
+ + The JE Properties File + +
+
+ + Managing the Background Threads + +
+
+
+
+ + The Cleaner Thread + +
+
+ + The Checkpointer Thread + +
+
+
+
+ + Sizing the Cache + +
+
+ + Setting Disk Thresholds + +
+
+ + The Command Line Tools + +
+
+
+
+ + DbDump + +
+
+ + DbLoad + +
+
+ + DbVerify + +
+
+
+
+ + Logging + +
+
+
+
+ + Managing Logging Levels + +
+
+ + Managing Handler Levels + +
+
+
+
+
+

+ There are a series of tools and parameters of interest to the + administrator of a Berkeley DB Java Edition database. + These tools and parameters are useful for tuning your + JE database's behavior once it is in a production + setting, and they are described here. This chapter, however, does not describe backing up and restoring your + JE databases. See + Backing up and Restoring Berkeley DB Java Edition Applications for information on how to perform those procedures. +

+
+
+
+
+

The JE Properties File

+
+
+
+

+ JE applications can be controlled through a + Java properties file. This file must be placed in your environment home directory and it must be named + je.properties. +

+

+ The parameters set in this file take precedence over the configuration behavior coded into the + JE application by your application developers. +

+

+ Usually you will use this file to control the behavior of JE's background threads, and to control the size of your + in-memory cache. These topics, and the properties parameters related to them, are described in this chapter. + Beyond the properties described here, there are other properties identified throughout this manual that may be + of interest to you. However, the definitive identification of all + the property parameters available to you is described in the javadoc for the + EnvironmentConfig class. Each property has a + String constant in EnvironmentConfig that + describes its meaning, default value, and so forth. +

+
+
+ + + diff --git a/docs/GettingStartedGuide/applicationoverview.html b/docs/GettingStartedGuide/applicationoverview.html new file mode 100644 index 0000000..f5347bb --- /dev/null +++ b/docs/GettingStartedGuide/applicationoverview.html @@ -0,0 +1,628 @@ + + + + + + The JE Application + + + + + + + + + +
+
+
+
+

The JE Application

+
+
+
+ +

This section provides a brief overview to the major concepts and + operations that comprise a JE application. This section is concluded with a summary of the decisions that + you need to make when working with JE.

+

+ Note that the core JE classes are all contained in the com.sleepycat.je package. + In addition, this book describes some classes that are found in com.sleepycat.je.bind. + The bind APIs are used for converting Java objects in and out of byte arrays. +

+
+
+
+
+

Database Environments

+
+
+
+

+ Regardless of the JE API that you use, your data is + stored in databases. If you use the DPL, you do not manage + these databases directly; rather, they are managed for you by + the API. On the other hand, if you use the lower-level JE + APIs, then you must manage databases directly. This is not + difficult to do as it mostly involves opening and closing the + databases, giving them names, and so forth. See + Databases + for more + information. +

+

+ That said, JE always requires you to use a + database environment. Database + environments provide an unit of encapsulation for one or more + databases. Environments correspond to a directory location on + disk, and in them you will find all the files in use by JE. + Environments are also used to manage JE resources such as + transactions. +

+

+ To use a database environment, it must first be created and then + opened. In order to create a database environment, the + directory location in which it resides must already exist. +

+

+ You open a database environment by instantiating an + Environment object. Your + Environment instance is called + an environment handle. +

+

+ + Once you have opened an environment, what you do with it + depends on the nature of your application; that is, the + JE API you are using and whether you are using advanced + features such as transactions. (See + Berkeley DB, Java Edition Getting Started with Transaction Processing + for details on using + transactions). However, at a minimum you will always have to + open your environment before you can access your data stored + in JE. Also, before you end your application you should + always close your environment. +

+

+ Environments are described in greater detail in Database Environments. +

+
+
+
+
+
+

Key-Data Pairs

+
+
+
+

+ JE stores and retrieves data using + key-data pairs. The + data portion of this is the data + that you have decided to store in JE for future + retrieval. The key is the + information that you want to use to look up your + stored data once it has been placed inside a JE + database. +

+

+ For example, if you were building a database that + contained employee information, then the + data portion is all of the + information that you want to store about the employees: + name, address, phone numbers, physical location, their + manager, and so forth. +

+

+ The key, however, is the way that + you look up any given employee. You can have more than + one key if you wish, but every record in your database must have a + primary key. If you are using the DPL, then this key must be unique; that is, + it must not be used multiple times in the database. However, if you are using + the base API, then this requirement is relaxed. See + Duplicate Data for more + information. +

+

+ For example, in the case of an employee database, you would probably use + something like the employee identification number as the primary key as this + uniquely identifies a given employee. +

+

+ You can optionally also have secondary keys that represent indexes + into your database. These keys do not have to be unique + to a given record; in fact, they often are not. For + example, you might set up the employee's manager's name + as a secondary key so that it is easy to locate all + the employee's that work for a given manager. +

+
+
+
+
+
+

Storing Data

+
+
+
+

+ How you manage your stored information differs + significantly, depending on which API you are using. + Both APIs ultimately are doing the same thing, but the + DPL hides a lot of the details from you. +

+
+
+
+
+

Storing Data in the DPL

+
+
+
+

+ The DPL is used to store Java objects in an + underlying series of databases. These databases are + accessed using an EntityStore + class object. +

+

+ To use the DPL, you must decorate the classes you + want to store with Java annotations that identify them + as either an entity class or a + persistent class. +

+

+ Entity classes are classes that have a primary key, and + optionally one or more secondary keys. That is, these + are the classes that you will save and retrieve directly + using the DPL. You identify an entity class using the + @Entity java annotation. +

+

+ Persistent classes are classes used by entity classes. + They do not have primary or secondary indices used for + object retrieval. Rather, they are stored or retrieved + when an entity class makes direct use of them. You + identify an persistent class using the + @Persistent java annotation. +

+

+ The primary key for an object is obtained from one of the class' data members. + You identify which data member to use as the primary key using the + @PrimaryKey java annotation. +

+

+ Note that all non-transient instance fields of a + persistent class, as well as its superclasses and + subclasses, are persistent. Static and transient fields + are not persistent. The persistent fields of a class + may be private, package-private (default access), + protected or public. +

+

+ + Also, simple Java types, such as + java.lang.String and + java.util.Date, are automatically handled as a + persistent class when you use them in an entity class; + you do not have to do anything special to cause these + simple Java objects to be stored in the + EntityStore. + +

+
+
+
+
+
+

Storing Data using the Base API

+
+
+
+

+ When you are not using the DPL, both record keys and record data must be byte + arrays and are passed to and returned from JE using + DatabaseEntry instances. + DatabaseEntry only supports storage of Java byte arrays. + Complex objects must be marshaled using either Java serialization, or more + efficiently with the bind APIs provided with JE

+

Database + records and byte array conversion are described in Database Records. +

+

+ You store records in a Database by calling one of the + put methods on a Database handle. JE + automatically determines the record's proper placement in the database's + internal B-Tree using whatever key and data comparison functions that are + available to it. +

+

+ You can also retrieve, or get, records using the + Database handle. Gets are performed by providing the + key (and sometimes also the data) of the record that you want to retrieve. +

+

+ You can also use cursors for database puts and gets. Cursors are essentially + a mechanism by which you can iterate over the records in the database. Like + databases and database environments, cursors must be opened and closed. + Cursors are managed using the Cursor class. +

+

+ Databases are described in Databases. Cursors + are described in Using Cursors. +

+
+
+
+
+
+
+

Duplicate Data

+
+
+
+

+ If you are using the base API, then at creation time databases can be configured to + allow duplicate data. Remember that JE database records consist of a key/data + pair. Duplicate data, then, occurs when two or more records have + identical keys, but different data. By default, a Database does + not allow duplicate data. +

+

+ If your Database contains duplicate data, then a simple + database get based only on a key returns just the first record that uses that key. To + access all duplicate records for that key, you must use a cursor. +

+

+ If you are using the DPL, then you can duplicate date using + secondary keys, but not by using the primary key. For more information, see + Retrieving Multiple Objects. +

+
+
+
+
+
+

Replacing and Deleting Entries

+
+
+
+

+ If you are using the DPL, then replacing a stored entity object simply consists of + retrieving it, updating it, then storing it again. To delete the object, use the + delete() method that is available on either its primary or + secondary keys. If you use the delete() method available on + the secondary key, then all objects referenced by that key are also deleted. + See Deleting Entity Objects + for more information. +

+

+ If you are using the base API, then how you replace database records depends on whether + duplicate data is allowed in the database. +

+

+ If duplicate data is not allowed in the database, then simply calling + Database.put() with the appropriate key will cause any + existing record to be updated with the new data. Similarly, you can delete a record by + providing the appropriate key to the Database.delete() + method. +

+

+ If duplicate data is allowed in the database, then you must position a cursor to the + record that you want to update, and then perform the put operation using the cursor. +

+

+ To delete records using the base API, you can use either Database.delete() or + Cursor.delete(). If duplicate data is not allowed in your + database, then these two method behave identically. However, if duplicates are allowed + in the database, then Database.delete() deletes every record + that uses the provided key, while Cursor.delete() deletes just + the record at which the cursor is currently positioned. +

+
+
+
+
+
+

Secondary Keys

+
+
+
+

+ Secondary keys provide an alternative way to locate information stored in + JE, beyond that which is provided by the primary key. Frequently secondary + keys refer to more than one record in the database. In this way, you can find + all the cars that are green (if you are maintaining an automotive database) or + all the people with brown eyes (if you are maintaining a database about people). + In other words, secondary keys represent a index into your data. +

+

+ How you create and maintain secondary keys differs significantly, depending on + whether you are using the DPL or the base API. +

+
+
+
+
+

Using Secondaries with the DPL

+
+
+
+

+ Under the DPL, you declare a particular field to be a secondary key by + using the @SecondaryKey annotation. When you do this, + you must declare what kind of an index you are creating. For example, + you can declare a secondary key to be part of a + ONE_TO_ONE index, in which case the key is unique to + the object. Or you could declare the key to be + MANY_TO_ONE, in which case the key can be used for + multiple objects in the data store. +

+

+ Once you have identified secondary keys for a class, you can access + those keys by using the EntityStore.getSecondaryIndex() + method. +

+

+ For more information, see Declaring Secondary Indexes. +

+
+
+
+
+
+

Using Secondaries with the Base API.

+
+
+
+

+ When you are using the base API, you create and maintain secondary keys using a + special type of a database, called a secondary database. + When you are using secondary databases, the database that holds the data you are + indexing is called the primary database. +

+

+ You create a secondary database by opening it and associating it with an + existing primary database. You must also provide a class that generates the + secondary's keys (that is, the index) from primary records. Whenever a + record in the primary database is added or changed, JE uses this class + to determine what the secondary key should be. +

+

+ When a primary record is created, modified, or deleted, JE automatically + updates the secondary database(s) for you as is appropriate for the + operation performed on the primary. +

+

+ You manage secondary databases using the + SecondaryDatabase class. You identify how to create keys + for your secondary databases by supplying an instance of a class that implements + the SecondaryKeyCreator interface. +

+

+ Secondary databases are described in Secondary Databases. +

+
+
+
+
+
+
+

Transactions

+
+
+
+

+ Transactions provide a high level of safety for your JE operations by allowing + you to manage one or more operations as if they were a single unit of work. + Transactions provide your JE operations with recoverability, atomicity, and + isolation. +

+

+ Transactions provide recoverability by allowing JE to undo any + transactional-protected operations that may have been in progress at the time of an + application or system failure. +

+

+ Transactions provide atomicity by allowing you to group many operations into + a single unit of work. Either all operations succeed or none of them do. This means + that if one write operation fails for any reason, then all other writes contained + within that transaction also fail. This ensures that the database is never partially + updated as the result of an only partially successful chain of read/write operations. +

+

+ Transactions provide isolation by ensuring that the transaction will never write to a + record that is currently in use (for either read or write) by another transaction. + Similarly, any record to which the transaction has written can not be read outside of + the transaction until the transaction ends. Note that this is only the default + behavior; you can configure your Database, + Cursor, or Transaction handle to relax + its isolation guarantees. +

+

+ Essentially, transactional isolation provides a transaction with the same unmodified view of the + database that it would have received had the operations been performed in a single-threaded application. +

+

+ Transactions may be long or short lived, they can encompass as many + operations as you want, and (if using the base API) they can span databases + so long as all participating databases reside in the same environment. +

+

+ Transaction usage results in a performance penalty for the application because they + generally require more disk I/O than do non-transactional operations. Therefore, while + most applications will use transactions for JE writes, their usage is optional. + In particular, processes that are performing read-only operations + might not use transactions. Also, applications that use JE for an easily + recreated cache might also choose to avoid transactions. +

+

+ Using transactions with your JE applications is described in detail in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+
+
+
+
+
+

JE Resources

+
+
+
+

+ JE has some internal resources that you may want to manage. Most important of these is the in-memory cache. + You should carefully consider how large the JE cache needs to be. If you set this number too low, JE will + perform potentially unnecessary disk I/O which will result in a performance hit. If you set it too high, then + you are potentially wasting RAM that could be put to better purposes. +

+

+ Note that the size that you configure for the in-memory cache is a maximum size. At application startup, the + cache starts out fairly small (only about 7% of the maximum allowed size for the cache). It then grows as is + required by your application's database operations. Also, the cache is not pinned in memory – it can be + paged out by your operating system's virtual memory system. +

+

+ Beyond the cache, JE uses several background threads to clean + the JE log files, to compress the database by removing unneeded + subtrees, and to flush database changes seen in the cache to the backing data files. For the + majority of JE applications, the default behavior for the background threads should be acceptable and you + will not need to manage their behavior. Note that background threads are started no more than once per + process upon environment open. +

+

+ For more information on sizing the cache and on the background threads, see Administering Berkeley DB Java Edition Applications. +

+
+
+
+
+
+

+ Application Considerations +

+
+
+
+

When building your JE application, be sure to think about the following things:

+
+
    +
  • +

    What data do you want to store? What is best used for the + primary key? What is the best representation for primary record data? + If you are using the base API, think about the most efficient way to move your keys and data in and + out of byte arrays. See Database Records for more information.

    +
  • +
  • +

    Does the nature of your data require duplicate record support? + Remember that duplicate support can be configured only if you are using the base + API, and then only at database + creation time. See Opening Databases for more information.

    +

    If you are supporting duplicate records, you may also need to think + about duplicate comparators (not just key comparators). See + Using Comparators for more information.

    +
  • +
  • +

    What secondary indexes do you need? How can you compute your secondary indexes based on the data and + keys stored in your primary database? + Indexes are described in Secondary Databases.

    +
  • +
  • +

    What cache size do you need? See + Sizing the Cache for information on how to size your cache.

    +
  • +
  • +

    Does your application require transactions (most will). + Transactions are described in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

    +
  • +
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/backgroundthreads.html b/docs/GettingStartedGuide/backgroundthreads.html new file mode 100644 index 0000000..8759981 --- /dev/null +++ b/docs/GettingStartedGuide/backgroundthreads.html @@ -0,0 +1,187 @@ + + + + + + Managing the Background Threads + + + + + + + + + +
+
+
+
+

Managing the Background Threads

+
+
+
+
+
+
+ + The Cleaner Thread + +
+
+ + The Checkpointer Thread + +
+
+
+

+ JE uses some background threads to keep your database resources within pre-configured limits. If they are + going to run, the background threads are started once per application per process. That is, if your application + opens the same environment multiple times, the background threads will be started just once for that process. + See the following list for the default conditions that gate whether an individual thread is run. Note that you + can prevent a background thread from running by using the appropriate je.properties + parameter, but this is not recommended for production use and those parameters are not described here. +

+

+ The background threads are: +

+
+
    +
  • +

    + Cleaner thread. +

    +

    + Responsible for cleaning and deleting unused log files. See The Cleaner Thread for more information. +

    +

    + This thread is run only if the environment is opened for write access. +

    +
  • +
  • +

    + Compressor thread. +

    +

    + Responsible for cleaning up the internal BTree as database records are deleted. The compressor thread + ensures that the BTree does not contain unused nodes. There is no need for you to manage the + compressor and so it is not described further in this manual. +

    +

    + This thread is run only if the environment is opened for write access. +

    +
  • +
  • +

    + Checkpointer thread. +

    +

    + Responsible for running checkpoints on your environment. See + The Checkpointer Thread for more information. +

    +

    + This thread always runs. +

    +
  • +
+
+
+
+
+
+

The Cleaner Thread

+
+
+
+

+ The cleaner thread is responsible for cleaning, or compacting, your log files for you. + Log file cleaning is described in Cleaning the Log Files. +

+

+ The following two properties may be of interest to you when managing the cleaner thread: +

+
+
    +
  • +

    + je.cleaner.minUtilization +

    +

    + Identifies the percentage of the log file space that must be used for utilized records. If the + percentage of log file space used by utilized records is too low, then the cleaner removes + obsolete records until this threshold is reached. Default is 50%. +

    +
  • +
  • +

    + je.cleaner.expunge +

    +

    + Identifies the cleaner's behavior in the event that it is able to remove a log file. If + true, the log files that have been cleaned are deleted from the file system. If + false, the log files that have been cleaned are renamed from + NNNNNNNN.jdb to NNNNNNNN.del. You are then responsible for + deleting the renamed files. + +

    +
  • +
+
+

+ Note that the cleaner thread runs only if the environment is opened for write access. Also, be aware that + the cleaner is not guaranteed to finish running before the environment is closed, which can result in + unexpectedly large log files. See + Closing Database Environments for more information. +

+
+
+
+
+
+

The Checkpointer Thread

+
+
+
+

+ Automatically runs checkpoints. Checkpoints and the administration of this thread are described in + the Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+
+
+ + + diff --git a/docs/GettingStartedGuide/backup.html b/docs/GettingStartedGuide/backup.html new file mode 100644 index 0000000..fe21ef7 --- /dev/null +++ b/docs/GettingStartedGuide/backup.html @@ -0,0 +1,291 @@ + + + + + + Performing Backups + + + + + + + + + +
+
+
+
+

Performing Backups

+
+
+
+ +

+ This section describes how to backup your JE database(s) such that catastrophic recovery is possible for + non-transactional applications. Note that this same material is repeated in the + Berkeley DB, Java Edition Getting Started with Transaction Processing + guide, but for transactional applications. If you are writing transactional + applications, you may want to skip the rest of this chapter and go straight to that book. +

+

+ To backup your database, you can either take a hot backup or an offline backup. A hot + backup is performed while database write operations are in progress. +

+

+ Do not confuse hot and offline backups with the concept of a full and incremental backup. Both a + hot and an offline backup are full backups – you back up the entire database. The only difference + between them is how much of the contents of the in-memory cache are contained in them. On the other hand, + an incremental backup is a backup of just those log files modified or created since the time of the last + backup. Most backup software + is capable of performing both full and incremental backups for you. +

+
+
+
+
+

Performing a Hot Backup

+
+
+
+

+ To perform a hot backup of your JE databases, copy all log files + (*.jdb files) from your environment directory to + your archival location or backup media. The files must be copied + in alphabetical order (numerical in effect). You do not have to + stop any database operations in order to do this. +

+
+

Note

+

+ If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see Multiple Environment Subdirectories. +

+
+

+ To make this process a bit easier, you may want to make use of the + DbBackup helper class. See + Using the DbBackup Helper Class + for details. +

+

+ Note that any modifications made to the database since the time of the last + environment sync are not guaranteed to be contained in these log files. In this + case, you may want to consider running an offline backup in order to guarantee the + availability of all modifications made to your database. +

+
+
+
+
+
+

Performing an Offline Backup

+
+
+
+

+ An offline backup guarantees that you have captured the database in its entirety, including all contents + of your in-memory cache, at the moment that the + backup was taken. To do this, you must make sure that no write operations are in progress and all + database modifications have been written to your log files + on disk. To obtain an offline backup: +

+
+
    +
  1. +

    + Stop writing your databases. +

    +
  2. +
  3. +

    + Run Environment.sync() so as to + ensure that all database modifications are written to disk. Note that cleanly closing your + environment will also ensure that all database modifications are written to disk. +

    +
  4. +
  5. +

    + Copy all log files (*.jdb) from your environment + directory to your archival location or backup media. To make this process a + bit easier, you may want to make use of the DbBackup + helper class. See the next section for details. +

    +
    +

    Note

    +

    + If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see Multiple Environment Subdirectories. +

    +
    +
  6. +
+
+

+ You can now resume normal database operations. +

+
+
+
+
+
+

Using the DbBackup Helper Class

+
+
+
+

+ In order to simplify backup operations, JE + provides the DbBackup helper + class. This class stops and restarts JE background activity + in an open environment. It also lets the application create a + backup which can support restoring the environment to + a specific point in time. +

+

+ Because you do not have to stop JE write activity + in order to take a backup, it is usually necessary to + examine your log files twice before you decide that + your backup is complete. This is because JE may + create a new log file while you are running your + backup. A second pass over your log files allows you to + ensure that no new files have been created and so you + can declare your backup complete. +

+

+ For example: +

+
 time    files in                    activity
+         environment
+
+  t0     000000001.jdb     Backup starts copying file 1
+         000000003.jdb
+         000000004.jdb
+
+  t1     000000001.jdb     JE log cleaner migrates portion of file 3 to
+         000000004.jdb     newly created file 5 and deletes file 3. 
+         000000005.jdb     Backup finishes file 1, starts copying file 4.
+                           Backup MUST include file 5 for a consistent 
+                           backup!
+
+  t2     000000001.jdb     Backup finishes copying file 4, starts and 
+         000000004.jdb     finishes file 5, has caught up. Backup ends.
+         000000005.jdb
+
+

+ DbBackup works around this + problem by defining the set of files that must be + copied for each backup operation, and freezes all + changes to those files. The application can copy that + defined set of files and finish operation without + checking for the ongoing creation of new files. Also, + there will be no need to check for a newer version of + the last file on the next backup. +

+

+ In the example above, if DbBackup was used at t0, + the application would only have to copy files 1, 3 and + 4 to back up. On a subsequent backup, the application + could start its copying at file 5. There would be no + need to check for a newer version of file 4. +

+

+ The following code fragment illustrates this class' usage. + See the DbBackup javadoc for additional + examples and more information on incremental backups. +

+
package je.gettingStarted;
+
+...
+import com.sleepycat.je.util.DbBackup;
+...
+
+    // Find the file number of the last file in the previous backup
+    // persistently, by either checking the backup archive, or saving
+    // state in a persistent file.
+    long lastFileCopiedInPrevBackup =  ...
+
+    Environment env = new Environment(...);
+    DbBackup backupHelper = new DbBackup(env, lastFileCopiedInPrevBackup);
+
+    // Start backup, find out what needs to be copied.
+    // If multiple environment subdirectories are in use,
+    // the getLogFilesInBackupSet returns the log file
+    // name prefixed with the dataNNN/ directory in which
+    // it resides.
+    backupHelper.startBackup();
+    try {
+        String[] filesForBackup = backupHelper.getLogFilesInBackupSet();
+
+        // Copy the files to archival storage.
+        myApplicationCopyMethod(filesForBackup)
+        // Update our knowlege of the last file saved in the backup set,
+        // so we can copy less on the next backup
+        lastFileCopiedInPrevBackup = backupHelper.getLastFileInBackupSet();
+        myApplicationSaveLastFile(lastFileCopiedInBackupSet);
+    }
+    finally {
+        // Remember to exit backup mode, or all log files won't be cleaned
+        // and disk usage will bloat.
+       backupHelper.endBackup();
+   } 
+
+
+ + + diff --git a/docs/GettingStartedGuide/backuprestore.html b/docs/GettingStartedGuide/backuprestore.html new file mode 100644 index 0000000..7140ec1 --- /dev/null +++ b/docs/GettingStartedGuide/backuprestore.html @@ -0,0 +1,355 @@ + + + + + + Chapter 11. Backing up and Restoring Berkeley DB Java Edition Applications + + + + + + + + + +
+
+
+
+

Chapter 11. Backing up and Restoring Berkeley DB Java Edition Applications

+
+
+
+
+

+ Table of Contents +

+
+
+ + Databases and Log Files + +
+
+
+
+ + Log File Overview + +
+
+ + Cleaning the Log Files + +
+
+ + The BTree + +
+
+ + Database Modifications and Syncs + +
+
+ + Normal Recovery + +
+
+
+
+ + Performing Backups + +
+
+
+
+ + Performing a Hot Backup + +
+
+ + Performing an Offline Backup + +
+
+ + Using the DbBackup Helper Class + +
+
+
+
+ + Performing Catastrophic Recovery + +
+
+ + Hot Standby + +
+
+
+

+ Fundamentally, you backup your databases by copying JE log files off to a safe storage location. To restore + your database from a backup, you copy those files to an appropriate directory on disk and reopen your JE + application +

+

+ Beyond these simple activities, there are some differing backup strategies that you may want to consider. These + topics are described in this chapter. +

+
+
+
+
+

Databases and Log Files

+
+
+
+ +

+ Before describing JE backup and restore, it is necessary to describe some of JE's internal workings. In + particular, a high-level understanding of JE log files and the in-memory cache is required. You also need + to understand a little about how JE is using its internal data structures in order to understand why + checkpoints and/or syncs are required. +

+

+ You can skip this section so long as you understand that: +

+
+
    +
  • +

    + JE databases are stored in log files contained in your environment directory. +

    +
  • +
  • +

    + Every time a JE environment is opened, normal recovery is run. +

    +
  • +
  • +

    + For transactional applications, checkpoints should be run in order to bound normal recovery time. + Checkpoints are normally run by the checkpointer thread. Transactional applications and the + checkpointer thread are described in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

    +
  • +
  • +

    + For non-transactional applications, environment syncs must be performed if you want to guarantee the + persistence of your database modifications. Environment syncs are manually performed by the + application developer. See Data Persistence for details. +

    +
  • +
+
+
+
+
+
+

Log File Overview

+
+
+
+

+ Your JE database is stored on-disk in a series of log files. + JE uses no-overwrite log files, which is to say that JE only ever appends data to the end of a log + file. It will never delete or modify an existing log file record. +

+

+ JE log files are named + NNNNNNNN.jdb where NNNNNNNN is an 8-digit hexadecimal number that + increases by 1 (starting from 00000000) for each log file written to disk. +

+

+ JE creates a new log file whenever the current log file has reached a pre-configured size (10000000 + bytes by default). This size is controlled by the je.log.fileMax properties + parameter. See The JE Properties File for information on setting + JE properties. +

+

+ By default, log files are placed in the environment home directory. However, you can + cause JE to place log files in subdirectories within the environment home + directory. For more information, see + Multiple Environment Subdirectories. +

+
+
+
+
+
+

Cleaning the Log Files

+
+
+
+

+ Because JE uses no-overwrite log files, the logs must be compacted or cleaned so as to conserve disk space. +

+

+ JE uses the cleaner background thread to perform this task. When it runs, the cleaner thread picks + the log file with the smallest number of active records and scans each log record in it. + If the record is no longer + active in the database tree, the cleaner does nothing. If the record is still active in the tree, then + the cleaner copies the record forward to a newer log file. +

+

+ Once a log file is no longer needed (that is, it no longer contains active records), then the cleaner + thread deletes the log file for you. Or, optionally, the cleaner thread can simply rename the discarded + log file with a del suffix. +

+

+ JE uses a minimum log utilization property to determine how much cleaning to perform. The log files + contain both obsolete and utilized records. Obsolete records are records that are no longer in use, either + because they have been modified or because they have been deleted. Utilized records are those records + that are currently in use. The je.cleaner.minUtilization property identifies the + minimum percentage of log space that must be used by utilized records. If this minimum percentage is not + met, then log files are cleaned until the minimum percentage is met. +

+

+ For information on managing the cleaner thread, see The Cleaner Thread. +

+
+
+
+
+
+

The BTree

+
+
+
+

+ JE databases are internally organized as a BTree. In order to operate, JE requires the complete BTree + be available to it. +

+

+ When database records are created, modified, or deleted, the modifications are represented in the BTree's + leaf nodes. Beyond leaf + node changes, database record modifications can also cause changes to other BTree nodes and structures. +

+
+
+
+
+
+

Database Modifications and Syncs

+
+
+
+

+ When a write operation is performed in JE, the modified data + is written to a leaf node contained in the in-memory cache. If + your JE writes are performed without transactions, then the + in-memory cache is the only location guaranteed to receive a + database modification without further intervention on the part + of the application developer. +

+

+ For some class of applications, this lack of a guaranteed write to disk is ideal. By not writing these modifications to + the on-disk logs, the application can avoid most of the overhead caused by disk I/O. +

+

+ However, if the application requires its data to persist persist at a specific point in time, then the developer must + manually sync database modifications to the on-disk log files (again, this is only necessary for + non-transactional applications). This is done using Environment.sync(). +

+

+ Note that syncing the cache causes JE to write all modified objects in the cache + to disk. This is probably the most expensive operation that you can perform in JE. +

+
+
+
+
+
+

Normal Recovery

+
+
+
+

+ Every time a JE environment is opened, normal recovery is run. + Because of the way that JE organizes and manages its BTrees, all it needs is leaf nodes in order to + recreate the rest of the BTree. Essentially, this is what normal recovery is + doing – recreating any missing parts of the internal BTree from leaf node information stored in the + log files. +

+

+ Unlike a traditional database system, JE performs recovery for both transactional + and non-transactional operations. The integrity of the Btree is + guaranteed by JE in the face of both application and OS crashes. +

+
+
+
+ + + diff --git a/docs/GettingStartedGuide/baseapi.html b/docs/GettingStartedGuide/baseapi.html new file mode 100644 index 0000000..ad504a1 --- /dev/null +++ b/docs/GettingStartedGuide/baseapi.html @@ -0,0 +1,382 @@ + + + + + + Part II. Programming with the Base API + + + + + + + + + +
+
+
+
+

Part II. Programming with the Base API

+
+
+
+
+
+

+ This section discusses + application that are built using the JE base API. Note that + most JE applications can probably be written + using the DPL (see Programming with the Direct Persistence Layer for more + information). However, if you are porting an application + from the Berkeley DB API, then the base API is right for + you. +

+
+

+ Table of Contents +

+
+
+ + 7. Databases + +
+
+
+
+ + Opening Databases + +
+
+
+
+ + Deferred Write Databases + +
+
+ + Temporary Databases + +
+
+ + Closing Databases + +
+
+
+
+ + Database Properties + +
+
+ + Administrative Methods + +
+
+ + Database Example + +
+
+
+
+ + 8. Database Records + +
+
+
+
+ + Using Database Records + +
+
+ + Reading and Writing Database Records + +
+
+
+
+ + Writing Records to the Database + +
+
+ + Getting Records from the Database + +
+
+ + Deleting Records + +
+
+ + Data Persistence + +
+
+
+
+ + Using Time to Live + +
+
+
+
+ + Specifying a TTL Value + +
+
+ + Updating a TTL Value + +
+
+ + Deleting TTL Expiration + +
+
+
+
+ + Using the BIND APIs + +
+
+
+
+ + Numerical and String Objects + +
+
+ + Serializable Complex Objects + +
+
+ + Custom Tuple Bindings + +
+
+
+
+ + Using Comparators + +
+
+
+
+ + Writing Comparators + +
+
+ + Setting Comparators + +
+
+
+
+ + Database Record Example + +
+
+
+
+ + 9. Using Cursors + +
+
+
+
+ + Opening and Closing Cursors + +
+
+ + Getting Records Using the Cursor + +
+
+
+
+ + Disk Ordered Cursors + +
+
+ + Searching for Records + +
+
+ + Working with Duplicate Records + +
+
+
+
+ + Putting Records Using Cursors + +
+
+ + Deleting Records Using Cursors + +
+
+ + Replacing Records Using Cursors + +
+
+ + Cursor Example + +
+
+
+
+ + 10. Secondary Databases + +
+
+
+
+ + Opening and Closing Secondary Databases + +
+
+ + Implementing Key + Creators + + + +
+
+ + Secondary Database Properties + +
+
+ + Reading Secondary Databases + +
+
+ + Deleting Secondary Database Records + +
+
+ + + Using Secondary Cursors + + + +
+
+ + Database Joins + +
+
+
+
+ + Using Join Cursors + +
+
+ + JoinCursor Properties + +
+
+
+
+ + Secondary Database Example + +
+
+
+
+ + Opening Secondary Databases with + MyDbEnv + +
+
+ + Using Secondary Databases with ExampleInventoryRead + +
+
+
+
+
+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/bindAPI.html b/docs/GettingStartedGuide/bindAPI.html new file mode 100644 index 0000000..12601ad --- /dev/null +++ b/docs/GettingStartedGuide/bindAPI.html @@ -0,0 +1,792 @@ + + + + + + Using the BIND APIs + + + + + + + + + +
+
+
+
+

Using the BIND APIs

+
+
+
+ +

Except for Java String and boolean types, efficiently moving data in + and out of Java byte arrays for storage in a database can be a nontrivial + operation. To help you with this problem, JE provides the Bind APIs. + While these APIs are described in detail in the + + Berkeley DB, Java Edition Collections Tutorial, + + + + + this section provides a brief introduction to using the Bind APIs with:

+
+
    +
  • +

    Single field numerical and string objects

    +

    Use this if you want to store a single numerical or string object, + such as Long, Double, or + String.

    +
  • +
  • +

    Complex objects that implement Java serialization.

    +

    Use this if you are storing objects that implement + Serializable and if you do not need to sort them. +

    +
  • +
  • +

    Non-serialized complex objects.

    +

    If you are storing objects that do not implement serialization, + you can create your own custom tuple bindings. Note that you should + use custom tuple bindings even if your objects are serializable if + you want to sort on that data.

    +
  • +
+
+
+
+
+
+

Numerical and String Objects

+
+
+
+

You can use the Bind APIs to store primitive data in a DatabaseEntry + object. That is, you can store a single field containing one of the following types:

+
+
    +
  • +

    + String +

    +
  • +
  • +

    + Character +

    +
  • +
  • +

    + Boolean +

    +
  • +
  • +

    + Byte +

    +
  • +
  • +

    + Short +

    +
  • +
  • +

    + Integer +

    +
  • +
  • +

    + Long +

    +
  • +
  • +

    + Float +

    +
  • +
  • +

    + Double +

    +
  • +
+
+

+ To store primitive data using the Bind APIs: +

+
+
    +
  1. +

    Create an EntryBinding object.

    +

    When you do this, you use TupleBinding.getPrimitiveBinding() + to return an appropriate binding for the conversion.

    +
  2. +
  3. +

    Use the EntryBinding object to place + the numerical object on the DatabaseEntry.

    +
  4. +
+
+

Once the data is stored in the DatabaseEntry, you can put it to + the database in whatever manner you wish. For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+// Need a key for the put.
+try {
+    String aKey = "myLong";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));    
+
+    // Now build the DatabaseEntry using a TupleBinding
+    Long myLong = new Long(123456789l);
+    DatabaseEntry theData = new DatabaseEntry();
+    EntryBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class);
+    myBinding.objectToEntry(myLong, theData);
+
+    // Now store it
+    myDatabase.put(null, theKey, theData);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+

Retrieval from the DatabaseEntry object is + performed in much the same way:

+ +
package je.gettingStarted;
+      
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+
+...
+
+Database myDatabase = null;
+// Database open omitted for clarity
+
+try {
+    // Need a key for the get
+    String aKey = "myLong";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    
+    // Need a DatabaseEntry to hold the associated data.
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Bindings need only be created once for a given scope
+    EntryBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class);
+
+    // Get it
+    OperationStatus retVal = myDatabase.get(null, theKey, theData, 
+                                            LockMode.DEFAULT);
+    String retKey = null;
+    if (retVal == OperationStatus.SUCCESS) {
+        // Recreate the data.
+        // Use the binding to convert the byte array contained in theData
+        // to a Long type.
+        Long theLong = (Long) myBinding.entryToObject(theData);
+        retKey = new String(theKey.getData(), "UTF-8");
+        System.out.println("For key: '" + retKey + "' found Long: '" + 
+                            theLong + "'.");
+    } else {
+        System.out.println("No record found for key '" + retKey + "'.");
+    }
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Serializable Complex Objects

+
+
+
+

Frequently your application requires you to store and manage + objects for your record data and/or keys. You may need to do this if you + are caching objects created by another process. You may also want to do + this if you want to store multiple data values on a record. When used + with just primitive data, or with objects containing a single data member, + JE database records effectively represent a single row in a two-column table. + By storing a complex object in the record, you can turn each record into + a single row in an n-column table, where + n is the number of data members contained by the + stored object(s).

+

In order to store objects in a + JE database, you must convert them to and from a byte array. + The first instinct for many Java programmers is to do this using Java + serialization. While this is functionally a correct solution, the result + is poor space-performance because this causes the class information + to be stored on every such database record. This information can be quite large + and it is redundant — the class information does not vary for serialized objects of the same type. +

+

+ In other words, directly using serialization to place your objects into byte + arrays means that you will be storing a great deal of unnecessary information in + your database, which ultimately leads to larger databases and more expensive disk + I/O. +

+

The easiest way for you to solve this problem is to use the Bind + APIs to perform the serialization for you. Doing so causes the extra + object information to be saved off to a unique Database + dedicated for that purpose. This means that you do not have to duplicate + that information on each record in the Database + that your application is using to store its information.

+

+ Note that when you use the Bind APIs to perform serialization, you still + receive all the benefits of serialization. You can still use arbitrarily + complex object graphs, and you still receive built-in class evolution + through the serialVersionUID (SUID) scheme. All of the Java + serialization rules apply without modification. For example, you can + implement Externalizable instead of Serializable. +

+
+
+
+
+

Usage Caveats

+
+
+
+

Before using the Bind APIs to perform serialization, you may + want to consider writing your own custom tuple bindings. Specifically, + avoid serialization if: +

+
+
    +
  • +

    + If you need to sort based on the objects your are storing. + The sort order is meaningless for the byte arrays that you + obtain through serialization. Consequently, you should not use serialization for keys if you + care about their sort order. You should + also not use serialization for record data if your + Database supports duplicate records and you care about sort order. +

    +
  • +
  • +

    + You want to minimize the size of your byte arrays. Even when using the Bind APIs to perform the + serialization the resulting byte array may be larger than necessary. You can achieve + more compact results by building your own custom tuple binding. +

    +
  • +
  • +

    + You want to optimize for speed. In general, custom tuple bindings are faster than serialization at + moving data in and out of byte arrays. +

    +
  • +
  • +

    + You are using custom comparators. In JE, comparators are instantiated and called internally whenever + databases are not accessible. Because serial bindings depend on the class catalog, a serial binding + binding cannot be used during these times. As a result, attempting to use a serial binding with a + custom comparator will result in a NullPointerException during environment open or + close. +

    +
  • +
+
+

+ For information on building your own custom tuple binding, see Custom Tuple Bindings. +

+
+
+
+
+
+

Serializing Objects

+
+
+
+

To store a serializable complex object using the + Bind APIs:

+
+
    +
  1. +

    + Implement java.io.Serializable in the class whose instances that + you want to store. +

    +
  2. +
  3. +

    Open (create) your databases. You need two. The first is the + database that you use to store your data. The second is used to + store the class information.

    +
  4. +
  5. +

    Instantiate a class catalog. You do this with + com.sleepycat.bind.serial.StoredClassCatalog, + and at that time you must provide a handle to an open database + that is used to store the class information.

    +
  6. +
  7. +

    Create an entry binding that uses com.sleepycat.bind.serial.SerialBinding.

    +
  8. +
  9. +

    Instantiate an instance of the object that you want to + store, and place it in a DatabaseEntry + using the entry binding that you created in the previous step.

    +
  10. +
+
+

+ For example, suppose you want to store a long, double, and a + String as a record's data. Then you might create a class that + looks something like this: +

+ +
package je.gettingStarted;    
+
+import java.io.Serializable;
+
+public class MyData implements Serializable {
+    private long longData;
+    private double doubleData;
+    private String description;
+
+    MyData() {
+        longData = 0;
+        doubleData = 0.0;
+        description = null;
+    }
+
+    public void setLong(long data) {
+        longData = data;
+    }
+
+    public void setDouble(double data) {
+        doubleData = data;
+    }
+
+    public void setDescription(String data) {
+        description = data;
+    }
+
+    public long getLong() {
+        return longData;
+    }
+
+    public double getDouble() {
+        return doubleData;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+}
+

You can then store instances of this class as follows:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+// The key data.
+String aKey = "myData";
+
+// The data data
+MyData data2Store = new MyData();
+data2Store.setLong(123456789l);
+data2Store.setDouble(1234.9876543);
+data2Store.setDescription("A test instance of this class");
+
+try {
+    // Environment open omitted for brevity
+
+    // Open the database that you will use to store your data
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(true);
+    myDbConfig.setSortedDuplicates(true);
+    Database myDatabase = myDbEnv.openDatabase(null, "myDb", myDbConfig);
+
+    // Open the database that you use to store your class information.
+    // The db used to store class information does not require duplicates
+    // support.
+    myDbConfig.setSortedDuplicates(false);
+    Database myClassDb = myDbEnv.openDatabase(null, "classDb", 
+                                              myDbConfig); 
+
+    // Instantiate the class catalog
+    StoredClassCatalog classCatalog = new StoredClassCatalog(myClassDb);
+
+    // Create the binding
+    EntryBinding dataBinding = new SerialBinding(classCatalog, 
+                                                 MyData.class);
+
+    // Create the DatabaseEntry for the key
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+
+    // Create the DatabaseEntry for the data. Use the EntryBinding object
+    // that was just created to populate the DatabaseEntry
+    DatabaseEntry theData = new DatabaseEntry();
+    dataBinding.objectToEntry(data2Store, theData);
+
+    // Put it as normal
+    myDatabase.put(null, theKey, theData);
+    
+    // Database and environment close omitted for brevity 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Deserializing Objects

+
+
+
+

Once an object is stored in the database, you can retrieve the + MyData objects from the retrieved + DatabaseEntry using the Bind APIs in much the + same way as is described above. For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+
+...
+
+// The key data.
+String aKey = "myData";
+
+try {
+    // Environment open omitted for brevity.
+
+    // Open the database that stores your data
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(false);
+    Database myDatabase = myDbEnv.openDatabase(null, "myDb", myDbConfig);
+
+    // Open the database that stores your class information.
+    Database myClassDb = myDbEnv.openDatabase(null, "classDb", 
+                                              myDbConfig); 
+
+    // Instantiate the class catalog
+    StoredClassCatalog classCatalog = new StoredClassCatalog(myClassDb);
+
+    // Create the binding
+    EntryBinding dataBinding = new SerialBinding(classCatalog, 
+                                                 MyData.class);
+
+    // Create DatabaseEntry objects for the key and data
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    // Do the get as normal
+    myDatabase.get(null, theKey, theData, LockMode.DEFAULT);
+
+    // Recreate the MyData object from the retrieved DatabaseEntry using
+    // the EntryBinding created above
+    MyData retrievedData = (MyData) dataBinding.entryToObject(theData);
+ 
+    // Database and environment close omitted for brevity
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+
+

Custom Tuple Bindings

+
+
+
+

+ If you want to store complex objects in your database, then you can use + tuple bindings to do this. While they are more work to write and + maintain than if you were to use serialization, the + byte array conversion is faster. In addition, custom + tuple bindings should allow you to create byte arrays + that are smaller than those created by serialization. Custom tuple + bindings also allow you to optimize your BTree comparisons, whereas + serialization does not. +

+

+ For information on using serialization to store complex objects, see + Serializable Complex Objects. +

+

To store complex objects using a custom tuple binding:

+
+
    +
  1. +

    Implement the class whose instances that you want to store. + Note that you do not have to implement the Serializable interface.

    +
  2. +
  3. +

    Write a tuple binding using the com.sleepycat.bind.tuple.TupleBinding + class.

    +
  4. +
  5. +

    Open (create) your database. Unlike serialization, you only + need one.

    +
  6. +
  7. +

    Create an entry binding that uses the tuple binding that you + implemented in step 2.

    +
  8. +
  9. +

    Instantiate an instance of the object that you want to store, + and place it in a DatabaseEntry using the + entry binding that you created in the previous step.

    +
  10. +
+
+

+ For example, suppose you want to your keys to be instances of the + following class: +

+ +
package je.gettingStarted;
+
+public class MyData2 {
+    private long longData;
+    private Double doubleData;
+    private String description;
+
+    public MyData2() {
+        longData = 0;
+        doubleData = new Double(0.0);
+        description = "";
+    }
+
+    public void setLong(long data) {
+        longData = data;
+    }
+
+    public void setDouble(Double data) {
+        doubleData = data;
+    }
+
+    public void setString(String data) {
+        description = data;
+    }
+
+    public long getLong() {
+        return longData;
+    }
+
+    public Double getDouble() {
+        return doubleData;
+    }
+
+    public String getString() {
+        return description;
+    }
+} 
+

In this case, you need to write a tuple binding for the + MyData2 class. When you do this, you must + implement the TupleBinding.objectToEntry() + and TupleBinding.entryToObject() abstract methods. + Remember the following as you implement these methods:

+
+
    +
  • +

    You use TupleBinding.objectToEntry() to convert + objects to byte arrays. You use + com.sleepycat.bind.tuple.TupleOutput to write + primitive data types to the byte array. Note that + TupleOutput provides methods that allows + you to work with numerical types (long, + double, int, and so forth) and + not the corresponding java.lang numerical + classes.

    +
  • +
  • +

    The order that you write data to the byte + array in TupleBinding.objectToEntry() is the order that + it appears in the array. So given the MyData2 + class as an example, if you write description, + doubleData, and then longData, + then the resulting byte array will contain these data elements in + that order. This means that your records will sort based on the + value of the description data member and then + the doubleData member, and so forth. If you + prefer to sort based on, say, the longData data + member, write it to the byte array first.

    +
  • +
  • +

    You use TupleBinding.entryToObject() to convert + the byte array back into an instance of your + original class. You use com.sleepycat.bind.tuple.TupleInput + to get data from the byte array.

    +
  • +
  • +

    The order that you read data from the byte + array must be exactly the same as the order in which it was written.

    +
  • +
+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+public class MyTupleBinding extends TupleBinding {
+
+    // Write a MyData2 object to a TupleOutput
+    public void objectToEntry(Object object, TupleOutput to) {
+
+        MyData2 myData = (MyData2)object;
+
+        // Write the data to the TupleOutput (a DatabaseEntry).
+        // Order is important. The first data written will be
+        // the first bytes used by the default comparison routines.
+        to.writeDouble(myData.getDouble().doubleValue());
+        to.writeLong(myData.getLong());
+        to.writeString(myData.getString());
+    }
+
+    // Convert a TupleInput to a MyData2 object
+    public Object entryToObject(TupleInput ti) {
+
+        // Data must be read in the same order that it was
+        // originally written.
+        Double theDouble = new Double(ti.readDouble());
+        long theLong = ti.readLong();
+        String theString = ti.readString();
+
+        MyData2 myData = new MyData2();
+        myData.setDouble(theDouble);
+        myData.setLong(theLong);
+        myData.setString(theString);
+
+        return myData;
+    }
+} 
+

In order to use the tuple binding, instantiate the binding and + then use:

+
+
    +
  • +

    MyTupleBinding.objectToEntry() to + convert a MyData2 object to a DatabaseEntry.

    +
  • +
  • +

    MyTupleBinding.entryToObject() to convert + a DatabaseEntry to a MyData2 + object.

    +
  • +
+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.DatabaseEntry;
+ 
+...
+
+TupleBinding keyBinding = new MyTupleBinding();
+
+MyData2 theKeyData = new MyData2();
+theKeyData.setLong(123456789l);
+theKeyData.setDouble(new Double(12345.6789));
+theKeyData.setString("My key data");
+
+DatabaseEntry myKey = new DatabaseEntry();
+
+try {
+    // Store theKeyData in the DatabaseEntry
+    keyBinding.objectToEntry(theKeyData, myKey);
+
+    ...
+    // Database put and get activity omitted for clarity
+    ...
+
+    // Retrieve the key data
+    theKeyData = (MyData2) keyBinding.entryToObject(myKey);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/docs/GettingStartedGuide/cachesize.html b/docs/GettingStartedGuide/cachesize.html new file mode 100644 index 0000000..05b21c4 --- /dev/null +++ b/docs/GettingStartedGuide/cachesize.html @@ -0,0 +1,131 @@ + + + + + + Sizing the Cache + + + + + + + + + +
+
+
+
+

Sizing the Cache

+
+
+
+

+ By default, your cache is limited to a percentage of the JVM maximum + memory as specified by the -Xmx parameter. You can + change this percentage by using the je.maxMemoryPercent property + or through EnvironmentMutableConfig.setCachePercent(). + That is, the maximum amount of memory available to your cache is + normally calculated as: +

+
je.maxMemoryPercent * JVM_maximum_memory
+

+ You can find out what the value for this property is by using + EnvironmentConfig.getCachePercent(). +

+

+ Note that you can cause JE to use a fixed maximum cache size by + using je.maxMemory or by using + EnvironmentConfig.setCacheSize(). +

+

+ Also, not every JVM is capable of identifying the amount of memory requested via + the -Xmx parameter. For those JVMs you must use + je.maxMemory to change your maximum cache size. The + default maximum memory available to your cache in this case is 38M. +

+

+ Of the amount of memory allowed for your cache, 93% is used for the internal BTree and the other 7% is + used for internal buffers. When your application first starts up, the 7% for buffers is immediately allocated. + The remainder of the cache grows lazily as your application reads and writes data. +

+

+ In order for your application to start up successfully, the Java virtual machine must have enough memory + available to it (as identified by the -Xmx command line switch) for both your application and + 7% of your maximum cache value. In order for your application to run continuously (all the while loading data + into the cache), you must make sure your JVM has enough memory for your application plus the maximum cache size. +

+

+ The best way to determine how large your cache needs to be is to put your application into a production + environment and watch to see how much disk I/O is occurring. If the application is going to disk quite a lot to + retrieve database records, then you should increase the size of your cache (provided that you have enough memory + to do so). +

+

+ You can also use the + com.sleepycat.je.util.DbCacheSize utility + to obtain a rough estimate of how large your cache needs to be for + a given number of records and record characteristics. The utility + returns an estimate of the cache size to hold + the specified number of records in memory. See the + DbCacheSize javadoc + for information on the utility's usage. +

+

+ In order to determine how frequently your application is going to disk for database records not found in the + cache, you can examine the value returned by EnvironmentStats.getNCacheMiss(). +

+

+ EnvironmentStats.getNCacheMiss() identifies the total number of requests for + database objects that were + not serviceable from the cache. This value is cumulative since the application started. The faster this number grows, + the more your application is going to disk to service database operations. Upon application startup you can + expect this value to grow quite rapidly. However, as time passes and your cache is seeded with your most + frequently accessed database records, what you want is for this number's growth to be zero or at least very + small. +

+

+ Note that this statistic can only be collected from within the application itself or using the JMX extension + (see JConsole and JMX Support). +

+

+ For more information on collecting this statistic, see Environment Statistics. +

+
+ + + diff --git a/docs/GettingStartedGuide/catastrophicrecovery.html b/docs/GettingStartedGuide/catastrophicrecovery.html new file mode 100644 index 0000000..28fa7b4 --- /dev/null +++ b/docs/GettingStartedGuide/catastrophicrecovery.html @@ -0,0 +1,117 @@ + + + + + + Performing Catastrophic Recovery + + + + + + + + + +
+
+
+
+

Performing Catastrophic Recovery

+
+
+
+

+ Catastrophic recovery is necessary whenever your environment and/or database have been lost or corrupted + due to a media failure (disk failure, for example). Catastrophic recovery is also required if normal + recovery fails for any reason. +

+

+ In order to perform catastrophic recovery, you must have a full back up of your databases. You will use + this backup to restore your database. See Performing Backups for + information on running back ups. +

+

+ To perform catastrophic recovery: +

+
+
    +
  1. +

    + Shut down your application. +

    +
  2. +
  3. +

    + Delete the contents of your environment home directory (the one that experienced a catastrophic + failure), if there is anything there. +

    +
  4. +
  5. +

    + Copy your most recent full backup into your environment home directory. + If you are using subdirectories to store your log files, be sure to place + the recovered log files back into the subdirectory from which they were + originally backed up. +

    +
  6. +
  7. +

    + If you are using a backup utility that runs incremental backups of your + environment directory, copy any log files generated since the time of your + last full backup. Be sure to restore all log files in the order that they + were written. The order is important because it is possible the same log + file appears in multiple archives, and you want to run recovery using the + most recent version of each log file. Also, if you are using subdirectories to + store your log files, be sure to maintain the relationship between your log files + and the subdirectory in which JE originally placed them. +

    +
  8. +
  9. +

    + Open the environment as normal. JE's normal recovery will run, which will bring your database + to a consistent state relative to the changed data found in your log files. +

    +
  10. +
+
+

+ You are now done restoring your database. +

+
+ + + diff --git a/docs/GettingStartedGuide/commandlinetools.html b/docs/GettingStartedGuide/commandlinetools.html new file mode 100644 index 0000000..2ef2ef3 --- /dev/null +++ b/docs/GettingStartedGuide/commandlinetools.html @@ -0,0 +1,563 @@ + + + + + + The Command Line Tools + + + + + + + + + +
+
+
+
+

The Command Line Tools

+
+
+
+
+
+
+ + DbDump + +
+
+ + DbLoad + +
+
+ + DbVerify + +
+
+
+

+ JE ships with several command line tools that + you can use to help you manage your databases. They are: +

+
+
    +
  • +

    + DbDump +

    +

    + Dumps a database to a user-readable format. +

    +
  • +
  • +

    + DbLoad +

    +

    + Loads a database from the output produced by DbDump +

    +
  • +
  • +

    + DbVerify +

    +

    + Verifies the structure of a database. +

    +
  • +
+
+
+
+
+
+

DbDump

+
+
+
+

+ Dumps a database to a flat-text representation. Options are: +

+
+
+
+ + + -f + + +
+
+

+ Identifies the file to which the output from this command is written. + The console (standard out) is used by default. +

+
+
+ + + -h + + +
+
+

+ Identifies the environment's directory. This parameter is required. +

+
+
+ + + -l + + +
+
+

+ Lists the databases contained in the environment. If the -s is + not provided, then this argument is required. +

+
+
+ + + -p + + +
+
+

+ Prints database records in human-readable format. +

+
+
+ + + -r + + +
+
+

+ Salvage data from a possibly corrupt file. When used on a uncorrupted database, this option + should return data equivalent to a normal dump, but most likely in a different order. +

+

+ This option causes the ensuing output to go to a file named + dbname.dump where + dbname is the name of the database you are dumping. The + file is placed in the current working directory. +

+
+
+ + + -R + + +
+
+

+ Aggressively salvage data from a possibly corrupt file. This option differs from the -r option + in that it will return all possible data from the file at the risk of also returning already + deleted or otherwise nonsensical items. Data dumped in this fashion will almost certainly have + to be edited by hand or other means before the data is ready for reload into another database. +

+

+ This option causes the ensuing output to go to a file named + dbname.dump where + dbname is the name of the database you are dumping. The + file is placed in the current working directory. +

+
+
+ + + -s + + +
+
+

+ Identifies the database to be dumped. If this option is not specified, then the -l is required. +

+
+
+ + + -v + + +
+
+

+ Prints progress information to the console for + -r or -R mode. +

+
+
+ + + -V + + +
+
+

+ Prints the database version number and then quits. All other command line options are ignored. +

+
+
+
+

+ For example: +

+
> java com.sleepycat.je.util.DbDump -h . -p -s VendorDB  
+VERSION=3
+format=print
+type=btree
+database=VendorDB
+dupsort=false
+HEADER=END
+ Mom's Kitchen
+ sr\01\01xpt\00\0d53 Yerman Ct.t\00\0c763 554 9200t\00\0bMiddle Townt\00
+ \0eMaggie Kultgent\00\10763 554 9200 x12t\00\02MNt\00\0dMom's Kitchent\00
+ \0555432
+ Off the Vine
+ sr\01\01xpt\00\10133 American Ct.t\00\0c563 121 3800t\00\0aCentennialt\00
+ \08Bob Kingt\00\10563 121 3800 x54t\00\02IAt\00\0cOff the Vinet\00\0552002
+ Simply Fresh
+ sr\01\01xpt\00\1115612 Bogart Lanet\00\0c420 333 3912t\00\08Harrigant\00
+ \0fCheryl Swedbergt\00\0c420 333 3952t\00\02WIt\00\0cSimply Fresht\00\0
+ 553704
+ The Baking Pan
+ sr\01\01xpt\00\0e1415 53rd Ave.t\00\0c320 442 2277t\00\07Dutchint\00\09
+ Mike Roant\00\0c320 442 6879t\00\02MNt\00\0eThe Baking Pant\00\0556304
+ The Pantry
+ sr\01\01xpt\00\111206 N. Creek Wayt\00\0c763 555 3391t\00\0bMiddle Town
+ t\00\0fSully Beckstromt\00\0c763 555 3391t\00\02MNt\00\0aThe Pantryt\00
+ \0555432
+ TriCounty Produce
+ sr\01\01xpt\00\12309 S. Main Streett\00\0c763 555 5761t\00\0bMiddle Townt
+ \00\0dMort Dufresnet\00\0c763 555 5765t\00\02MNt\00\11TriCounty Producet
+ \00\0555432
+DATA=END
+> 
+
+
+
+
+
+

DbLoad

+
+
+
+

+ Loads a database from the output produced by DbDump. + Options are: +

+
+
+
+ + + -c + + +
+
+

Specifies configuration options. The options supplied here override the corresponding options + that appear in the data that is being loaded. This option takes values of the form + name=value, where name + is the configuration option that you are overriding and value + is the new value for the option. +

+

+ The following options can be specified: +

+
+
    +
  • +

    + database +

    +

    + The name of the database to be loaded. This option duplicates the functionality of + this command's -s command line option. +

    +
  • +
  • +

    + dupsort +

    +

    + Indicates whether duplicates are allowed in the database. A value of + true allows duplicates in the database. +

    +
  • +
+
+
+
+ + + -f + + +
+
+

+ Identifies the file from which the database is to be loaded. +

+
+
+ + + -n + + +
+
+

+ Do not overwrite existing keys in the database when loading into an already existing database. + If a key/data pair cannot be loaded into the database for this reason, a warning message is + displayed on the standard error output, and the key/data pair are skipped +

+
+
+ + + -h + + +
+
+

+ Identifies the environment's directory. This parameter is required. +

+
+
+ + + -l + + +
+
+

+ Allows loading databases that were dumped with the Berkeley DB C + product, when the dump file contains parameters not known to JE. +

+
+
+ + + -s + + +
+
+

+ Overrides the database name, causing the data to be loaded into a database that uses the name + supplied to this parameter. +

+
+
+ + + -T + + +
+
+

+ Causes a flat text file to be loaded into the database. +

+

+ The input must be paired lines of text, where the first line of the pair is the key item, + and the second line of the pair is its corresponding data item. +

+

+ A simple escape mechanism, where newline and backslash (\) characters are special, is + applied to the text input. Newline characters are interpreted as record separators. + Backslash characters in the text will be interpreted in one of two ways: If the backslash + character precedes another backslash character, the pair will be interpreted as a literal + backslash. If the backslash character precedes any other character, the two characters + following the backslash will be interpreted as a hexadecimal specification of a single + character; for example, \0a is a newline character in the ASCII character set. +

+

+ For this reason, any backslash or newline characters that naturally occur in the text input + must be escaped to avoid misinterpretation by db_load. +

+
+
+ + + -v + + +
+
+

+ Report periodic load status to the console. +

+
+
+ + + -V + + +
+
+

+ Prints the database version number and then quits. All other command line options are ignored. +

+
+
+
+

+ For example: +

+
> java com.sleepycat.je.util.DbDump -h . -s VendorDB -f vendordb.txt
+> java com.sleepycat.je.util.DbLoad -h . -f vendordb.txt
+> 
+
+
+
+
+
+

DbVerify

+
+
+
+

+ Examines the identified database for errors. Options are: +

+
+
+
+ + + -h + + +
+
+

+ Identifies the environment's directory. This parameter is required. +

+
+
+ + + -q + + +
+
+

+ Suppress the printing of any error descriptions. Instead, simply exit success or failure. +

+
+
+ + + -s + + +
+
+

+ Identifies the database to be verified. This parameter is required. +

+
+
+ + + -V + + +
+
+

+ Prints the database version number and then quits. All other command line options are ignored. +

+
+
+ + + -v + + +
+
+

+ Report intermediate statistics every N leaf nodes, where + N is the value that you provide this parameter. +

+
+
+
+

+ For example: +

+
> java com.sleepycat.je.util.DbVerify -h . -s VendorDB
+
+<BtreeStats>
+<BottomInternalNodesByLevel total="1">
+  <Item level="1" count="1"/>
+</BottomInternalNodesByLevel>
+<InternalNodesByLevel total="1">
+  <Item level="2" count="1"/>
+</InternalNodesByLevel>
+<LeafNodes count="6"/>
+<DeletedLeafNodes count="0"/>
+<DuplicateCountLeafNodes count="0"/>
+<MainTreeMaxDepth depth="2"/>
+<DuplicateTreeMaxDepth depth="0"/>
+</BtreeStats>
+
+
+ + + diff --git a/docs/GettingStartedGuide/comparator.html b/docs/GettingStartedGuide/comparator.html new file mode 100644 index 0000000..7f89338 --- /dev/null +++ b/docs/GettingStartedGuide/comparator.html @@ -0,0 +1,260 @@ + + + + + + Using Comparators + + + + + + + + + +
+
+
+
+

Using Comparators

+
+
+
+
+
+
+ + Writing Comparators + +
+
+ + Setting Comparators + +
+
+
+

Internally, JE databases are organized as BTrees. + This means that most database operations + (inserts, deletes, reads, and so forth) involve BTree node + comparisons. This comparison most frequently occurs based on database + keys, but if your database supports duplicate records then + comparisons can also occur based on the database data. +

+

+ By default, JE performs all such comparisons using a byte-by-byte + lexicographic comparison. This mechanism works well for most data. + However, in some cases you may need to specify your own comparison + routine. One frequent reason for this is to perform a language sensitive + lexical ordering of string keys. +

+
+
+
+
+

Writing Comparators

+
+
+
+

+ You override the default comparison function by providing a Java + Comparator class to the database. + The Java Comparator interface requires you to implement the + Comparator.compare() method + (see http://java.sun.com/j2se/1.4.2/docs/api/java/util/Comparator.html for details). +

+

+ JE passes your Comparator.compare() method + the byte arrays that you stored in the database. If + you know how your data is organized in the byte + array, then you can write a comparison routine that directly examines + the contents of the arrays. Otherwise, you have to reconstruct your + original objects, and then perform the comparison. +

+

+ For example, suppose you want to perform unicode lexical comparisons + instead of UTF-8 byte-by-byte comparisons. Then you could provide a + comparator that uses String.compareTo(), + which performs a Unicode comparison of two strings (note that for + single-byte roman characters, Unicode comparison and UTF-8 + byte-by-byte comparisons are identical – this is something you + would only want to do if you were using multibyte unicode characters + with JE). In this case, your comparator would look like the + following: +

+ +
package je.gettingStarted;
+
+import java.util.Comparator;
+
+public class MyDataComparator implements Comparator {
+
+    public MyDataComparator() {}
+
+    public int compare(Object d1, Object d2) {
+
+        byte[] b1 = (byte[])d1;
+        byte[] b2 = (byte[])d2;
+
+        String s1 = new String(b1, "UTF-8");
+        String s2 = new String(b2, "UTF-8");
+        return s1.compareTo(s2);
+    }
+} 
+
+
+
+
+
+

Setting Comparators

+
+
+
+

+ You specify a Comparator using the following + methods. Note that by default these methods can only be used at database + creation time, and they are ignored for normal database opens. Also, + note that JE uses the no-argument constructor for these comparators. + Further, it is not allowable for there to be a mutable state in these + comparators or else unpredictable results will occur. +

+
+
    +
  • +

    + DatabaseConfig.setBtreeComparator() +

    +

    + Sets the Java Comparator class used + to compare two keys in the database. +

    +
  • +
  • +

    + DatabaseConfig.setDuplicateComparator() +

    +

    + Sets the Java Comparator class used to compare the + data on two duplicate records in the database. This comparator is + used only if the database supports duplicate records. +

    +
  • +
+
+

+ You can use the above methods to set a database's comparator after + database creation time if you explicitly indicate that the comparator + is to be overridden. You do this by using the following methods: +

+
+

Note

+

+ If you override your comparator, the new comparator must preserve the + sort order implemented by your original comparator. That is, the new + comparator and the old comparator must return the same value for the + comparison of any two valid objects. Failure to observe this constraint + will cause unpredictable results for your application. +

+

+ If you want to change the fundamental sort order for your database, back + up the contents of the database, delete the database, recreate it, and + then reload its data. +

+
+
+
    +
  • +

    + DatabaseConfig.setOverrideBtreeComparator() +

    +

    + If set to true, causes the database's Btree + comparator to be overridden with the + Comparator specified on + DatabaseConfig.setBtreeComparator(). This + method can be used to change the comparator post-environment + creation. +

    +
  • +
  • +

    + DatabaseConfig.setOverrideDuplicateComparator() +

    +

    + If set to true, causes the database's + duplicates comparator to be overridden with the + Comparator specified on + DatabaseConfig.setDuplicateComparator(). +

    +
  • +
+
+

For example, to use the Comparator + described in the previous section:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+
+import java.util.Comparator;    
+
+...
+
+
+// Environment open omitted for brevity
+
+try {
+    // Get the database configuration object
+    DatabaseConfig myDbConfig = new DatabaseConfig();
+    myDbConfig.setAllowCreate(true);
+
+    // Set the duplicate comparator class
+    myDbConfig.setDuplicateComparator(MyDataComparator.class);
+
+    // Open the database that you will use to store your data
+    myDbConfig.setSortedDuplicates(true);
+    Database myDatabase = myDbEnv.openDatabase(null, "myDb", myDbConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/docs/GettingStartedGuide/concurrentProcessing.html b/docs/GettingStartedGuide/concurrentProcessing.html new file mode 100644 index 0000000..0cbd67b --- /dev/null +++ b/docs/GettingStartedGuide/concurrentProcessing.html @@ -0,0 +1,171 @@ + + + + + + Appendix A. Concurrent Processing in Berkeley DB Java Edition + + + + + + + + + +
+
+
+
+

Appendix A. Concurrent Processing in Berkeley DB Java Edition

+
+
+
+

+ An in-depth description of concurrent processing in JE is beyond the scope of this manual. However, there are + a few things that you should be aware of as you explore JE. Note that many of these topics are described in + greater detail in other parts of this book. This section is intended only to summarize JE concurrent + processing. +

+

+ Also, this appendix touches on a topic not + discussed in any detail in this manual: transactions. Transactional usage is + optional but nevertheless very commonly used for JE + applications, especially when writing multi-threaded or + multi-process applications. However, transactions also + represent a topic that is too large for this book. To read a + thorough description of JE and transactional processing, + see the Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+

+ This appendix first describes concurrency with multithreaded applications. It then goes on to describe + Multiprocess Applications. +

+
+
+
+
+

Multithreaded Applications

+
+
+
+

+ Note the following if you are writing an application that will use multiple threads for reading and writing + JE databases: +

+
+
    +
  • +

    + JE database and environment handles are free-threaded (that is, are thread safe), so from a mechanical perspective you + do not have to synchronize access to them when they are used by multiple threads of control. +

    +
  • +
  • +

    + It is dangerous to close environments and databases when other database operations are in + progress. So if you are going to share handles for these objects across threads, you should + architect your application such that there is no possibility of a thread closing a handle when + another thread is using that handle. +

    +
  • +
  • +

    + If a transaction is shared across threads, it is safe to call transaction.abort() from + any thread. However, be aware that any thread that attempts a database operation using an aborted + transaction will throw a DatabaseException. You should architect your + application such that your threads are able to gracefully deal with some other thread aborting the + current transaction. +

    +
  • +
  • +

    + If a transaction is shared across threads, make sure that + transaction.commit() can never be called until all threads participating in + the transaction have completed their database operations. +

    +
  • +
  • +

    + Locking is performed at the database record level. + JE always checks for lock conflicts, which can be caused either by operations that run for + too long a period of time, or by deadlocks. JE decides that a lock conflict has occured when + the lock cannot be obtained within a set timeout + period. If it cannot, regardless of why the lock could + not be obtained, then LockConflictException is thrown. +

    +
  • +
  • +

    + A non-transactional operation that reads a record locks it for the duration of the read. + While locked for read, a write lock can not be obtained on that record. However, another read lock + can be obtained for that record. This means that for threaded applications, multiple threads can + simultaneously read a record, but no thread can write to the record while a read is in progress. +

    +

    + Note that if you are performing uncommitted reads, then no locking is performed for that read. Instead, + JE uses internal mechanisms to ensure that the data you are reading is consistent (that is, it + will not change mid-read). +

    +

    + Finally, it is possible to specify that you want a write lock for your read operation. You do this + using LockMode.RMW. Use RMW when you know that your read will + subsequently be followed up with a write operation. Doing so can help to avoid lock conflicts. +

    +
  • +
  • +

    + An operation that writes to a record obtains a write lock on that record. While the write lock is in + progress, no other locks can be obtained for that record (either read or write). +

    +
  • +
  • +

    + All locks, read or write, obtained from within a transaction are held until the transaction is either + committed or aborted. + This means that the longer a transaction lives, the more likely other threads in your application + are to run into lock conflicts. That is, write operations + performed outside of the scope of the transaction will not be able to obtain a lock on those records + while the transaction is in progress. Also, by default, reads performed outside the scope of the + transaction will not be able to lock records written by the transaction. However, this behavior can be + overridden by configuring your reader to perform uncommitted reads. +

    +
  • +
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/cursorUsage.html b/docs/GettingStartedGuide/cursorUsage.html new file mode 100644 index 0000000..8a9f8e4 --- /dev/null +++ b/docs/GettingStartedGuide/cursorUsage.html @@ -0,0 +1,284 @@ + + + + + + Cursor Example + + + + + + + + + +
+
+
+
+

Cursor Example

+
+
+
+

In Database Example we wrote an + application that loaded two Database objects with vendor + and inventory information. In this example, we will use those databases to + display all of the items in the inventory database. As a part of showing + any given inventory item, we will look up the vendor who can provide the + item and show the vendor's contact information.

+

To do this, we create the ExampleInventoryRead + application. This application reads and displays all inventory records by:

+
+
    +
  1. +

    Opening the environment and then the inventory, vendor, and + class catalog Database objects. We do this using the + MyDbEnv class. See Stored Class Catalog Management with MyDbEnv + for a description of this class.

    +
  2. +
  3. +

    Obtaining a cursor from the inventory Database.

    +
  4. +
  5. +

    Steps through the Database, displaying + each record as it goes.

    +
  6. +
  7. +

    To display the Inventory record, the custom tuple binding that + we created in InventoryBinding.java is used.

    +
  8. +
  9. +

    Database.get() is used to obtain the vendor that corresponds to + the inventory item.

    +
  10. +
  11. +

    A serial binding is used to convert the + DatabaseEntry returned + by the get() to a Vendor object.

    +
  12. +
  13. +

    The contents of the Vendor object are displayed.

    +
  14. +
+
+

We implemented the Vendor class in Vendor.java. We implemented the + Inventory class in Inventory.java.

+

The full implementation of ExampleInventoryRead + can be found in: +

+
JE_HOME/examples/je/gettingStarted/ExampleInventoryRead.java
+

+ where JE_HOME is the location where you + placed your JE + distribution. +

+
+ +

+ Example 9.1 ExampleInventoryRead.java +

+
+

To begin, we import the necessary classes:

+ +
// file ExampleInventoryRead.java
+package je.gettingStarted;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding; 
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+
+import java.io.File;
+import java.io.IOException; 
+

Next we declare our class and set up some global variables. Note a + MyDbEnv object is instantiated here. We can do + this because its constructor never throws an exception. See Database Example for its implementation + details.

+ +
public class ExampleInventoryRead {
+
+        private static File myDbEnvPath =
+            new File("/tmp/JEDB");
+
+        // Encapsulates the database environment and databases.
+        private static MyDbEnv myDbEnv = new MyDbEnv();
+
+        private static TupleBinding inventoryBinding;
+        private static EntryBinding vendorBinding;
+

+ Next we create the ExampleInventoryRead.usage() and + ExampleInventoryRead.main() methods. + We perform almost all of our exception handling from ExampleInventoryRead.main(), and so we + must catch DatabaseException because the com.sleepycat.je.* + APIs throw them. +

+ +
   private static void usage() {
+        System.out.println("ExampleInventoryRead [-h <env directory>]");
+        System.exit(0);
+    }
+
+    public static void main(String args[]) {
+        ExampleInventoryRead eir = new ExampleInventoryRead();
+        try {
+            eir.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleInventoryRead: " + dbe.toString());
+            dbe.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+

In ExampleInventoryRead.run(), we call MyDbEnv.setup() to + open our environment and databases. Then we create the bindings that we need for using our data objects with + DatabaseEntry objects. +

+ +
    private void run(String args[]) throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+  
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      true);       // is this environment read-only?
+
+        // Setup our bindings.
+        inventoryBinding = new InventoryBinding();
+        vendorBinding =
+             new SerialBinding(myDbEnv.getClassCatalog(),
+                               Vendor.class);
+        showAllInventory();
+    }
+

Now we write the loop that displays the Inventory + records. We do this by opening a cursor on the inventory database and + iterating over all its contents, displaying each as we go.

+ +
    private void showAllInventory() 
+        throws DatabaseException {
+        // Get a cursor
+        Cursor cursor = myDbEnv.getInventoryDB().openCursor(null, null);
+
+        // DatabaseEntry objects used for reading records
+        DatabaseEntry foundKey = new DatabaseEntry();
+        DatabaseEntry foundData = new DatabaseEntry();
+
+        try { // always want to make sure the cursor gets closed.
+            while (cursor.getNext(foundKey, foundData,
+                        LockMode.DEFAULT) == OperationStatus.SUCCESS) {
+                Inventory theInventory =
+                    (Inventory)inventoryBinding.entryToObject(foundData);
+                displayInventoryRecord(foundKey, theInventory);
+            }
+        } catch (Exception e) {
+            System.err.println("Error on inventory cursor:");
+            System.err.println(e.toString());
+            e.printStackTrace();
+        } finally {
+            cursor.close();
+        }
+
+    } 
+

We use ExampleInventoryRead.displayInventoryRecord() to actually show the record. This + method first displays all the relevant information from the retrieved + Inventory object. It then uses the vendor database to retrieve and + display the vendor. Because the vendor database is keyed by vendor name, + and because each inventory object contains this key, it is trivial to + retrieve the appropriate vendor record.

+ +
   private void displayInventoryRecord(DatabaseEntry theKey,
+                                        Inventory theInventory)
+        throws DatabaseException {
+
+        DatabaseEntry searchKey = null;
+        try {
+            String theSKU = new String(theKey.getData(), "UTF-8");
+            System.out.println(theSKU + ":");
+            System.out.println("\t " + theInventory.getItemName());
+            System.out.println("\t " + theInventory.getCategory());
+            System.out.println("\t " + theInventory.getVendor());
+            System.out.println("\t\tNumber in stock: " +
+            theInventory.getVendorInventory());
+            System.out.println("\t\tPrice per unit:  " +
+                theInventory.getVendorPrice());
+            System.out.println("\t\tContact: ");
+
+            searchKey =
+             new DatabaseEntry(theInventory.getVendor().getBytes("UTF-8"));
+        } catch (IOException willNeverOccur) {}
+        DatabaseEntry foundVendor = new DatabaseEntry();
+
+        if (myDbEnv.getVendorDB().get(null, searchKey, foundVendor,
+                LockMode.DEFAULT) != OperationStatus.SUCCESS) {
+            System.out.println("Could not find vendor: " +
+                theInventory.getVendor() + ".");
+            System.exit(-1);
+        } else {
+            Vendor theVendor =
+                (Vendor)vendorBinding.entryToObject(foundVendor);
+            System.out.println("\t\t " + theVendor.getAddress());
+            System.out.println("\t\t " + theVendor.getCity() + ", " +
+                theVendor.getState() + " " + theVendor.getZipcode());
+            System.out.println("\t\t Business Phone: " +
+                theVendor.getBusinessPhoneNumber());
+            System.out.println("\t\t Sales Rep: " +
+                                theVendor.getRepName());
+            System.out.println("\t\t            " +
+                theVendor.getRepPhoneNumber());
+       }
+    }
+

The remainder of this application provides a utility method used + to parse the command line options. From the perspective of this + document, this is relatively uninteresting. You can see how this is + implemented by looking at: +

+
JE_HOME/examples/je/gettingStarted/ExampleInventoryRead.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/dataaccessorclass.html b/docs/GettingStartedGuide/dataaccessorclass.html new file mode 100644 index 0000000..2f5583c --- /dev/null +++ b/docs/GettingStartedGuide/dataaccessorclass.html @@ -0,0 +1,119 @@ + + + + + + DataAccessor.java + + + + + + + + + +
+
+
+
+

DataAccessor.java

+
+
+
+

+ Now that we have implemented our data classes, + we can write a class that will provide + convenient access to our primary and + secondary indexes. + Note that like our data classes, this class is shared by both our + example programs. +

+

+ If you compare this class against our + Vendor and + Inventory + class implementations, you will see that the + primary and secondary indices declared there are + referenced by this class. +

+

+ See Vendor.java + and + Inventory.java + for those implementations. +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex; 
+import com.sleepycat.persist.SecondaryIndex;
+                            
+public class DataAccessor {
+    // Open the indices
+    public DataAccessor(EntityStore store)
+        throws DatabaseException {
+
+        // Primary key for Inventory classes
+        inventoryBySku = store.getPrimaryIndex(
+            String.class, Inventory.class);
+
+        // Secondary key for Inventory classes
+        // Last field in the getSecondaryIndex() method must be
+        // the name of a class member; in this case, an Inventory.class
+        // data member.
+        inventoryByName = store.getSecondaryIndex(
+            inventoryBySku, String.class, "itemName");
+
+        // Primary key for Vendor class
+        vendorByName = store.getPrimaryIndex(
+            String.class, Vendor.class);
+    }
+
+    // Inventory Accessors
+    PrimaryIndex<String,Inventory> inventoryBySku;
+    SecondaryIndex<String,String,Inventory> inventoryByName;
+
+    // Vendor Accessors
+    PrimaryIndex<String,Vendor> vendorByName;
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/databases.html b/docs/GettingStartedGuide/databases.html new file mode 100644 index 0000000..72927c8 --- /dev/null +++ b/docs/GettingStartedGuide/databases.html @@ -0,0 +1,546 @@ + + + + + + Chapter 7. Databases + + + + + + + + + +
+
+
+
+

Chapter 7. Databases

+
+
+
+
+

+ Table of Contents +

+
+
+ + Opening Databases + +
+
+
+
+ + Deferred Write Databases + +
+
+ + Temporary Databases + +
+
+ + Closing Databases + +
+
+
+
+ + Database Properties + +
+
+ + Administrative Methods + +
+
+ + Database Example + +
+
+
+

In Berkeley DB Java Edition, a database is a collection of records. Records, + in turn, consist of key/data pairings. +

+

+ Conceptually, you can think of a + Database + + as containing a two-column table where column 1 contains a key and column 2 + contains data. Both the key and the data are managed using + DatabaseEntry + + + class instances + + (see Database Records for details on this + class + ). + So, fundamentally, using a JE + Database + + involves putting, getting, and deleting database records, which in turns involves efficiently + managing information + encapsulated by + + + DatabaseEntry + + + + objects. + + The next several chapters of this book are dedicated to those activities. +

+

+ Note that on disk, databases are stored in sequentially numerically + named log files in the directory where the opening + environment is located. JE log files are described + Databases and Log Files. +

+

+ Also, note that in the previous section of this book, Programming with the Direct Persistence Layer, + we described the DPL The DPL handles all database management + for you, including creating all primary and secondary databases as is + required by your application. That said, if you are using the DPL + you can access the underlying database for a given index if + necessary. See the Javadoc for the DPL for more information. +

+
+
+
+
+

Opening Databases

+
+
+
+
+
+
+ + Deferred Write Databases + +
+
+ + Temporary Databases + +
+
+ + Closing Databases + +
+
+
+

+ You open a database by using the + Environment.openDatabase() + method (environments are described in Database Environments). This + method creates and returns a Database + object handle. + You must provide Environment.openDatabase() + with a database name. +

+

+ You can optionally provide Environment.openDatabase() + with a DatabaseConfig() object. + DatabaseConfig() allows you to set properties for + the database, such as whether it can be created if it does not currently + exist, whether you are opening it read-only, and whether the database is to support transactions. +

+

+ Note that by default, JE does not create databases if they do not already exist. + To override this behavior, set the creation property to true. +

+

+ Finally, if you configured your environment and database to support transactions, + you can optionally provide a transaction object to the + Environment.openDatabase(). + Transactions are described in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+

+ The following code fragment illustrates a database open: + +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+...
+
+Environment myDbEnvironment = null;
+Database myDatabase = null;
+
+...
+
+try {
+    // Open the environment. Create it if it does not already exist.
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    myDbEnvironment = new Environment(new File("/export/dbEnv"), 
+                                      envConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    myDatabase = myDbEnvironment.openDatabase(null, 
+                                              "sampleDatabase", 
+                                              dbConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+
+
+

Deferred Write Databases

+
+
+
+

+ By default, JE database operations that modify the + database are written (logged) at the time of the operation. For transactional + databases, changes become durable when the transaction is committed. +

+

+ However, deferred write databases operations are not written at the time + of the operation. Writing is deferred for as long as possible. The + changes are only guaranteed to be durable after the + Database.sync() method + is called or the database is properly closed. +

+

+ Deferring writes in this manner has two performance advantages when performing + database modifications: +

+
+
    +
  1. +

    + When multiple threads are performing writes, Concurrency is increased + because the bottleneck of writing to the log is avoided. +

    +
  2. +
  3. +

    + Less total writing takes place. If a single record is modified more + than once, or modified and deleted, then only the final result must + be written. If a record is inserted and deleted before a + database sync or close occurs, nothing at all is written to disk. + The same advantage holds for writing internal index + information. +

    +
  4. +
+
+

+ Deferred write databases are useful for applications that perform a + great deal of database modifications, record additions, deletions, and + so forth. By delaying the data write, you delay the disk I/O. Depending + on your workload, this can improve your data throughput by quite a lot. +

+

+ While the durability of a deferred write database is only + guaranteed when + Database.sync() + is called or the database is properly closed, writing may also occur at other times. + For example, a JE checkpoint will effectively perform a + Database.sync() on all deferred + write databases that are open at the time of the checkpoint. If you are + using deferred write to load a large data set, and you want to reduce + writing as much as possible during the load, consider disabling the JE checkpointer. +

+

+ Also, if the JE cache overflows as database modifications occur, information discarded + from the cache is written to disk in order to avoid losing the changes. If you wish to reduce this + writing to a minimum, configure your cache to be large enough to hold the entire + data set being modified, or as large as possible. +

+
+

Note

+

+ Despite the examples noted in the previous paragraphs, there is no guarantee that changes + to a deferred write database are durable unless Database.sync() + is called or the database is closed. If you need guaranteed + durability for an operation, consider using transactions instead of deferred write. +

+
+

+ You should also be aware that Database.sync() is a + relatively expensive operation because all outstanding changes to the + database are written, including internal index information. If you find + that you are calling Database.sync() + frequently, consider using transactions. +

+

+ All other rules of behavior pertain to deferred write databases + as they do to normal databases. Deferred write databases must be + named and created just as you would a normal database. If you want to + delete the deferred write database, you must remove it just as + you would a normal database. This is true even if the deferred + write database is empty because its name persists in the + environment's namespace until such a time as the database is + removed. +

+

+ Note that determining whether a database is deferred write is a + configuration option. It is therefore possible to switch a + database between "normal" mode and deferred write database. You + might want to do this if, for example, you want to load a lot + of data to the database. In this case, loading data to the + database while it is in deferred write state is faster than + in "normal" state, because you can avoid a lot of the normal disk + I/O overhead during the load process. Once the load is + complete, sync the database, close it, and and then reopen it + as a normal database. You can then continue operations + as if the database had been created as a "normal" database. +

+

+ To configure a database as deferred write, set + DatabaseConfig.setDeferredWrite() + to true and then open the database with + that DatabaseConfig option. +

+
+

Note

+

+ If you are using the DPL, then you configure your entire + store to be deferred write using + StoreConfig.setDeferredWrite(). + You can also sync every database in your store using + EntityStore.sync(). +

+
+

+ For example, the following code fragment opens and closes a + deferred write database: +

+
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+...
+
+Environment myDbEnvironment = null;
+Database myDatabase = null;
+
+...
+
+try {
+    // Open the environment. Create it if it does not already exist.
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    myDbEnvironment = new Environment(new File("/export/dbEnv"), 
+                                      envConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    // Make it deferred write
+    dbConfig.setDeferredWrite(true);
+    myDatabase = myDbEnvironment.openDatabase(null, 
+                                              "sampleDatabase", 
+                                              dbConfig); 
+
+    ...
+    // do work
+    ...
+    // Do this when you want the work to be persistent at a
+    // specific point, prior to closing the database.
+    myDatabase.sync();
+
+    // then close the database and environment here
+    // (described later in this chapter).
+
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Temporary Databases

+
+
+
+

+ By default, all JE databases are durable; that is, the data that you put in them + will remain in them across program runs, unless you explicitly delete the data. + However, it is possible to configure a + temporary database that is not durable. A temporary database is + automatically deleted when it is closed or after a crash occurs. +

+

+ Temporary databases are essentially in-memory only databases. Therefore, + they are particularly useful for applications that want databases which + are truly temporary. +

+

+ Note that temporary databases do not always avoid disk I/O. It is particularly + important to realize that temporary databases can page to disk if the cache is not + large enough to hold the database's entire contents. Therefore, temporary database + performance is best when your in-memory cache is large enough to hold the database's + entire data-set. +

+

+ A temporary database operates internally in deferred write mode and has + the same performance advantages as described above for deferred write + databases (see Deferred Write Databases). + However, unlike deferred write databases, a temporary database is not written + during checkpoints and this provides an additional performance advantage. +

+

+ Temporary databases must be named and created just as you would a normal database. + To configure a database as temporary, set + DatabaseConfig.setTemporary to + true and then open the database with that + DatabaseConfig instance. +

+

+ For example: +

+
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+...
+
+Environment myDbEnvironment = null;
+Database myDatabase = null;
+
+...
+
+try {
+    // Open the environment. Create it if it does not already exist.
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    myDbEnvironment = new Environment(new File("/export/dbEnv"), 
+                                      envConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    // Make it a temporary database
+    dbConfig.setTemporary(true);
+    myDatabase = myDbEnvironment.openDatabase(null, 
+                                              "sampleDatabase", 
+                                              dbConfig); 
+
+    ...
+    // do work
+    ...
+
+    // then close the database and environment here
+    // (see the next section)
+
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Closing Databases

+
+
+
+

Once you are done using the database, you must close it. You use the + Database.close() method to do this.

+

Closing a database causes it to become unusable until it is opened + again. If any cursors are opened for the database, + JE warns you about the open cursors, and then closes them for you. + Active cursors during a database + close can cause unexpected results, especially if any of those cursors are + writing to the database in another thread. You should always make sure that all your + database accesses have completed before closing your database.

+

It is recommended that you close all your + databases before closing the environment to which they belong.

+

Cursors are described in Using Cursors later in this manual.

+

+ The following illustrates database and environment close: +

+ +
import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.Environment;
+
+...
+
+try {
+        if (myDatabase != null) {
+            myDatabase.close();
+        }
+
+        if (myDbEnvironment != null) {
+            myDbEnvironment.close();
+        }
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/dbUsage.html b/docs/GettingStartedGuide/dbUsage.html new file mode 100644 index 0000000..077194f --- /dev/null +++ b/docs/GettingStartedGuide/dbUsage.html @@ -0,0 +1,203 @@ + + + + + + Database Example + + + + + + + + + +
+
+
+
+

Database Example

+
+
+
+

In Database Environment Management Example we created a class that manages an + Environment. We now extend that class to allow it + to open and manage multiple databases. Again, remember that you can find + this class in: +

+
JE_HOME/je/gettingStarted/MyDbEnv.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+ +

+ Example 7.1 Database Management with MyDbEnv +

+
+

First, we need to import a few additional classes, and setup some global variables to support databases. + The databases that we are configuring and creating here are used by applications developed in examples later in this guide.

+ +
// File MyDbEnv.java
+
+package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Database; 
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Environment;
+
+import java.io.File;
+
+public class MyDbEnv {
+    
+    private Environment myEnv;
+    private Database vendorDb;
+    private Database inventoryDb;
+
+    public MyDbEnv() {} 
+

+ + Next we need to update the MyDbEnv.setup() method to instantiate a + DatabaseConfig object. We also need to set some properties on that object. These property + values are determined by the value of the readOnly parameter. We want our databases to be + read-only if the environment is also read-only. We also want to allow our databases to be created if the databases + are not read-only. + +

+ +
    public void setup(File envHome, boolean readOnly)
+            throws DatabaseException {
+
+        // Instantiate an environment and database configuration object
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        // Configure the environment and databases for the read-only
+        // state as identified by the readOnly parameter on this 
+        // method call.
+        myEnvConfig.setReadOnly(readOnly);
+        myDbConfig.setReadOnly(readOnly);
+        // If the environment is opened for write, then we want to be
+        // able to create the environment and databases if 
+        // they do not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+        myDbConfig.setAllowCreate(!readOnly);
+
+        // Instantiate the Environment. This opens it and also possibly
+        // creates it.
+        myEnv = new Environment(envHome, myEnvConfig);
+
+        // Now create and open our databases.
+        vendorDb = myEnv.openDatabase(null,
+                                       "VendorDB",
+                                       myDbConfig); 
+
+        inventoryDb = myEnv.openDatabase(null,
+                                         "InventoryDB",
+                                         myDbConfig);
+    } 
+

+ Next we need some additional getter methods used to return our database handles. +

+ +
     // Getter methods
+    public Environment getEnvironment() {
+        return myEnv;
+    }
+
+    public Database getVendorDB() {
+        return vendorDb;
+    }
+
+    public Database getInventoryDB() {
+        return inventoryDb;
+    } 
+

+ Finally, we need to update the MyDbEnv.close() method to close our databases. +

+ +
    // Close the environment
+    public void close() {
+        if (myEnv != null) {
+            try {
+                vendorDb.close();
+                inventoryDb.close();
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " + 
+                                    dbe.toString());
+                System.exit(-1);
+            }
+        }
+    }
+}
+

We can now use MyDbEnv to open and close + both database environments and databases from the appropriate place in + our application. For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Database;
+
+import java.io.File;
+
+...
+
+MyDbEnv exampleDbEnv = new MyDbEnv();
+
+try {
+    exampleDbEnv.setup(new File("/directory/currently/exists"), true);
+    Database vendorDb = exampleDbEnv.getVendorDB();
+    Database inventoryDB = exampleDbEnv.getInventoryDB();
+
+    ...
+
+} catch(DatabaseException dbe) {
+    // Error code goes here
+} finally {
+    exampleDbEnv.close();
+} 
+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/dbenvUsageExample.html b/docs/GettingStartedGuide/dbenvUsageExample.html new file mode 100644 index 0000000..57af62c --- /dev/null +++ b/docs/GettingStartedGuide/dbenvUsageExample.html @@ -0,0 +1,180 @@ + + + + + + Database Environment Management Example + + + + + + + + + +
+
+
+
+

Database Environment Management Example

+
+
+
+

+ This example provides a complete class that can open and close an environment. It is + both extended and used in subsequent examples in this book to open and close both + environments and databases. We do this so as to make the example code + shorter and easier to manage. You can find this class in: +

+
JE_HOME/examples/je/gettingStarted/MyDbEnv.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+ +

+ Example 2.1 Database Environment Management Class +

+
+

First we write the normal class declarations. We also set up some + private data members that are used to manage environment creation. We + use the class constructor to instantiate the EnvironmentConfig + object that is used to configure our environment when we open it.

+ +
// File MyDbEnv.java
+package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+
+public class MyDbEnv {
+
+    private Environment myEnv;
+
+    public MyDbEnv() {} 
+

Next we need a method to open the environment. This is responsible + for instantiating our Environment object. + Remember that instantiation is what opens the environment (or creates it + if the creation property is set to true and the + environment does not currently exist). +

+ +
    public void setup(File envHome, boolean readOnly) 
+            throws DatabaseException {
+
+        // Instantiate an environment configuration object
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        // Configure the environment for the read-only state as identified
+        // by the readOnly parameter on this method call.
+        myEnvConfig.setReadOnly(readOnly);
+        // If the environment is opened for write, then we want to be 
+        // able to create the environment if it does not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+
+        // Instantiate the Environment. This opens it and also possibly
+        // creates it.
+        myEnv = new Environment(envHome, myEnvConfig);
+    } 
+

+ Next we provide a getter method that allows us to retrieve the + Environment directly. This is needed for later + examples in this guide. +

+ +
    // Getter methods
+    public Environment getEnv() {
+        return myEnv;
+    } 
+

Finally, we need a method to close our Environment. + We wrap this operation in a try block so that it can + be used gracefully in a finally statement.

+ +
    // Close the environment
+    public void close() {
+        if (myEnv != null) {
+            try {
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing environment" + 
+                     dbe.toString());
+            }
+        }
+    }
+} 
+

+ This completes the MyDbEnv class. While not particularly useful + as it currently exists, we will build upon it throughout this book so that it will + eventually open and close all of the entity stores or databases required by our + applications. +

+

+ We can now use MyDbEnv to open and close a database environment + from the appropriate place in our application. For example: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+
+import java.io.File;
+
+...
+
+MyDbEnv exampleDbEnv = new MyDbEnv();
+
+try {    
+    exampleDbEnv.setup(new File("/directory/currently/exists"), true);
+    ...
+
+} catch(DatabaseException dbe) {
+    // Error code goes here
+} finally {
+    exampleDbEnv.close();
+} 
+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/dbprops.html b/docs/GettingStartedGuide/dbprops.html new file mode 100644 index 0000000..70cf284 --- /dev/null +++ b/docs/GettingStartedGuide/dbprops.html @@ -0,0 +1,164 @@ + + + + + + Database Properties + + + + + + + + + +
+
+
+
+

Database Properties

+
+
+
+

You can set database properties using the DatabaseConfig + class. For each of the properties that you can set, there is a + corresponding getter method. Also, you can always retrieve the + DatabaseConfig object used by your database using + the Database.getConfig() method.

+

The database properties that you can set are:

+
+
    +
  • +

    + DatabaseConfig.setAllowCreate() +

    +

    If true, the database is created when it is + opened. If false, the database open fails if the database does not + exist. This property has no meaning if the database currently exists. + Default is false.

    +
  • +
  • +

    + DatabaseConfig.setBtreeComparator() +

    +

    Sets the class that is used to compare the keys found on two + database records. This class is used to determine the sort order for + two records in the database. By default, byte for byte comparison is + used. + For more information, see + Using Comparators. + +

    +
  • +
  • +

    + DatabaseConfig.setDuplicateComparator() +

    +

    + Sets the class that is used to compare two duplicate records in + the database. For more information, see + Using Comparators. + +

    +
  • +
  • +

    + DatabaseConfig.setSortedDuplicates() +

    +

    If true, duplicate records are allowed in the + database. If this value is false, then putting a duplicate record into the database + results in an error return from the put call. + Note that this property can be set only at database creation time. Default is false. +

    +

    + Note that your database must not support duplicates if it is to be associated with one or more + secondary indices. Secondaries are described in Secondary Databases. +

    +
  • +
  • +

    + DatabaseConfig.setExclusiveCreate() +

    +

    If true, the database open fails if the + database currently exists. That is, the open must result in the + creation of a new database. Default is false.

    +
  • +
  • +

    + DatabaseConfig.setReadOnly() +

    +

    If true, the database is opened for read activities only. + Default is false.

    +
  • +
  • +

    + DatabaseConfig.setTransactional() +

    +

    If true, the database supports transactions. + Default is false. Note that a database cannot support + transactions if the environment is non-transactional.

    +
  • +
+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+
+...
+// Environment open omitted for brevity
+...
+
+Database myDatabase = null;
+try {
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setAllowCreate(true);
+    dbConfig.setSortedDuplicates(true);
+    myDatabase = 
+        myDbEnv.openDatabase(null, 
+                             "sampleDatabase", 
+                             dbConfig); 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here.
+}
+
+ + + diff --git a/docs/GettingStartedGuide/dbtUsage.html b/docs/GettingStartedGuide/dbtUsage.html new file mode 100644 index 0000000..1d78112 --- /dev/null +++ b/docs/GettingStartedGuide/dbtUsage.html @@ -0,0 +1,693 @@ + + + + + + Database Record Example + + + + + + + + + +
+
+
+
+

Database Record Example

+
+
+
+

In Database Example, we created + MyDbEnv, a class that manages + DatabaseEnvironment and Database + opens and closes. We will now write an application that takes advantage of + this class to open databases, put a series of records in them, and then + close the databases and environment.

+

Remember that all of the classes and programs presented here can be + found in the following directory: +

+
JE_HOME/examples/je/gettingStarted
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+

Note that in this example, we are going to save two types of + information. First there are a series of inventory records that identify + information about some food items (fruits, vegetables, and desserts). + These records identify particulars about each item such as the vendor that + the item can be obtained from, how much the vendor has in stock, the price + per unit, and so forth.

+

+ We also want to manage vendor contact information, such as the + vendor's address and phone number, the sales representative's name + and his phone number, and so forth. +

+
+ +

+ Example 8.1 Inventory.java +

+
+

+ All Inventory data is encapsulated in an instance of the following + class. Note that because this class is not serializable, we need a + custom tuple binding in order to place it on a DatabaseEntry + object. Because the TupleInput and + TupleOutput classes used by custom tuple bindings + support Java numerical types and not Java numerical classes, we use + int and float here instead of the + corresponding Integer and Float + classes. + +

+ +
// File Inventory.java
+package je.gettingStarted;
+
+public class Inventory {
+
+    private String sku;
+    private String itemName;
+    private String category;
+    private String vendor;
+    private int vendorInventory;
+    private float vendorPrice;
+
+    public void setSku(String data) {
+            sku = data;
+    }
+
+    public void setItemName(String data) {
+            itemName = data;
+    }
+
+    public void setCategory(String data) {
+            category = data;
+    }
+
+    public void setVendorInventory(int data) {
+            vendorInventory = data;
+    }
+
+    public void setVendor(String data) {
+            vendor = data;
+    }
+
+    public void setVendorPrice(float data) {
+            vendorPrice = data;
+    }
+
+    public String getSku() { return sku; }
+    public String getItemName() { return itemName; }
+    public String getCategory() { return category; }
+    public int getVendorInventory() { return vendorInventory; }
+    public String getVendor() { return vendor; }
+    public float getVendorPrice() { return vendorPrice; }
+
+} 
+
+
+
+
+ +

+ Example 8.2 Vendor.java +

+
+

+ The data for vendor records are stored in instances of the following + class. Notice that we are using serialization with this class simply + to demonstrate serializing a class instance. +

+ +
// File Vendor.java
+package je.gettingStarted;
+
+import java.io.Serializable;
+
+public class Vendor implements Serializable {
+
+    private String repName;
+    private String address;
+    private String city;
+    private String state;
+    private String zipcode;
+    private String bizPhoneNumber;
+    private String repPhoneNumber;
+    private String vendor;
+
+    public void setRepName(String data) {
+        repName = data;
+    }
+
+    public void setAddress(String data) {
+        address = data;
+    }
+
+    public void setCity(String data) {
+        city = data;
+    }
+
+    public void setState(String data) {
+        state = data;
+    }
+
+    public void setZipcode(String data) {
+        zipcode = data;
+    }
+
+    public void setBusinessPhoneNumber(String data) {
+        bizPhoneNumber = data;
+    }
+
+    public void setRepPhoneNumber(String data) {
+        repPhoneNumber = data;
+    }
+
+    public void setVendorName(String data) {
+        vendor = data;
+    }
+
+    ...
+    // Corresponding getter methods omitted for brevity.
+    // See examples/je/gettingStarted/Vendor.java
+    // for a complete implementation of this class.
+
+} 
+
+
+
+

+ Because we will not be using serialization to convert our + Inventory objects to a DatabaseEntry + object, we need a custom tuple binding: +

+
+ +

+ Example 8.3 InventoryBinding.java +

+
+ +
// File InventoryBinding.java
+package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+
+public class InventoryBinding extends TupleBinding {
+
+    // Implement this abstract method. Used to convert
+    // a DatabaseEntry to an Inventory object.
+    public Object entryToObject(TupleInput ti) {
+
+        String sku = ti.readString();
+        String itemName = ti.readString();
+        String category = ti.readString();
+        String vendor = ti.readString();
+        int vendorInventory = ti.readInt();
+        float vendorPrice = ti.readFloat();
+
+        Inventory inventory = new Inventory();
+        inventory.setSku(sku);
+        inventory.setItemName(itemName);
+        inventory.setCategory(category);
+        inventory.setVendor(vendor);
+        inventory.setVendorInventory(vendorInventory);
+        inventory.setVendorPrice(vendorPrice);
+
+        return inventory;
+    }
+
+    // Implement this abstract method. Used to convert a
+    // Inventory object to a DatabaseEntry object.
+    public void objectToEntry(Object object, TupleOutput to) {
+
+        Inventory inventory = (Inventory)object;
+
+        to.writeString(inventory.getSku());
+        to.writeString(inventory.getItemName());
+        to.writeString(inventory.getCategory());
+        to.writeString(inventory.getVendor());
+        to.writeInt(inventory.getVendorInventory());
+        to.writeFloat(inventory.getVendorPrice());
+    }
+} 
+
+
+
+

In order to store the data identified above, we write the + ExampleDatabasePut application. This application + loads the inventory and vendor databases for you.

+

Inventory information is stored in a Database + dedicated for that purpose. The key for each such record is a product SKU. + The inventory data stored in this database are objects of the + Inventory class (see Inventory.java for more information). + ExampleDatabasePut loads the inventory database + as follows:

+
+
    +
  1. +

    Reads the inventory data from a flat text file prepared in + advance for this purpose.

    +
  2. +
  3. +

    Uses java.lang.String to create a key + based on the item's SKU.

    +
  4. +
  5. +

    Uses an Inventory class instance for the + record data. This object is stored on a DatabaseEntry + object using InventoryBinding, a custom tuple + binding that we implemented above.

    +
  6. +
  7. +

    Saves each record to the inventory database.

    +
  8. +
+
+

Vendor information is also stored in a Database + dedicated for that purpose. The vendor data stored in this database are objects of the + Vendor class (see Vendor.java for more information). To load this + Database, ExampleDatabasePut + does the following:

+
+
    +
  1. +

    Reads the vendor data from a flat text file prepared in advance + for this purpose.

    +
  2. +
  3. +

    Uses the vendor's name as the record's key.

    +
  4. +
  5. +

    Uses a Vendor class instance for the + record data. This object is stored on a DatabaseEntry + object using com.sleepycat.bind.serial.SerialBinding.

    +
  6. +
+
+
+ +

+ Example 8.4 Stored Class Catalog Management with MyDbEnv +

+
+

+ Before we can write ExampleDatabasePut, we need to update + MyDbEnv.java to support the class catalogs that we need for this application. +

+

+ To do this, we start by importing an additional class to support stored class catalogs: +

+ +
// File MyDbEnv.java
+package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Environment;
+
+import java.io.File;
+
+import com.sleepycat.bind.serial.StoredClassCatalog; 
+

+ We also need to add two additional private data members to this class. One supports the database used for the class + catalog, and the other is used as a handle for the class catalog itself. +

+ +
public class MyDbEnv {
+
+    private Environment myEnv;
+    private Database vendorDb;
+    private Database inventoryDb;
+    private Database classCatalogDb;
+
+    // Needed for object serialization
+    private StoredClassCatalog classCatalog;
+
+    public MyDbEnv() {} 
+

+ Next we need to update the MyDbEnv.setup() method to open the class catalog database and + create the class catalog. +

+ +
    public void setup(File envHome, boolean readOnly)
+            throws DatabaseException {
+
+        ...
+        // Database and environment configuration omitted for brevity
+        ...
+
+        // Instantiate the Environment. This opens it and also possibly
+        // creates it.
+        myEnv = new Environment(envHome, myEnvConfig);
+
+        // Now create and open our databases.
+        vendorDb = myEnv.openDatabase(null, "VendorDB", myDbConfig);
+
+        inventoryDb = myEnv.openDatabase(null, "InventoryDB", myDbConfig);
+
+        // Open the class catalog db. This is used to
+        // optimize class serialization.
+        classCatalogDb =
+            myEnv.openDatabase(null,
+                               "ClassCatalogDB",
+                               myDbConfig);
+
+        // Create our class catalog
+        classCatalog = new StoredClassCatalog(classCatalogDb);
+    } 
+

+ Next we need a getter method to return the class catalog. Note that we do not provide a getter for + the catalog database itself – our application has no need for that. +

+ +
// Getter methods
+    public Environment getEnvironment() {
+        return myEnv;
+    }
+
+    public Database getVendorDB() {
+        return vendorDb;
+    }
+
+    public Database getInventoryDB() {
+        return inventoryDb;
+    }
+
+    public StoredClassCatalog getClassCatalog() {
+        return classCatalog;
+    } 
+

+ Finally, we need to update the MyDbEnv.close() method to close the + class catalog database. +

+ +
    // Close the environment
+    public void close() {
+        if (myEnv != null) {
+            try {
+                vendorDb.close();
+                inventoryDb.close();
+                classCatalogDb.close()
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " +
+                                    dbe.toString());
+                System.exit(-1);
+            }
+        }
+    }
+}
+
+
+
+

+ So far we have identified the data that we want to store in our + databases and how we will convert that data in and out of + DatabaseEntry objects for database storage. We + have also updated MyDbEnv to manage our databases + for us. Now we write ExampleDatabasePut to + actually put the inventory and vendor data into their respective + databases. Because of the work that we have done so far, this + application is actually fairly simple to write. +

+
+ +

+ Example 8.5 ExampleDatabasePut.java +

+
+

First we need the usual series of import statements:

+ +
//File ExampleDatabasePut.java
+package je.gettingStarted;
+
+// Bind classes used to move class objects in an out of byte arrays.
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+
+// Standard JE database imports
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+
+// Most of this is used for loading data from a text file for storage
+// in the databases.
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+

Next comes the class declaration and the private data members that + we need for this class. Most of these are setting up default values for + the program.

+

Note that two DatabaseEntry objects are + instantiated here. We will reuse these for every database operation that + this program performs. Also a MyDbEnv object is + instantiated here. We can do this because its constructor never throws + an exception. See Stored Class Catalog Management with MyDbEnv for + its implementation details.

+

Finally, the inventory.txt and + vendors.txt file can be found in the GettingStarted + examples directory along with the classes described in this extended + example.

+ +
public class ExampleDatabasePut {
+
+    private static File myDbEnvPath = new File("/tmp/JEDB");
+    private static File inventoryFile = new File("./inventory.txt");
+    private static File vendorsFile = new File("./vendors.txt");
+
+    // DatabaseEntries used for loading records
+    private static DatabaseEntry theKey = new DatabaseEntry();
+    private static DatabaseEntry theData = new DatabaseEntry();
+
+    // Encapsulates the environment and databases.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+

+ Next comes the usage() and + main() methods. Notice the exception handling + in the main() method. This is the only place in the application where we + catch exceptions. For this reason, we must catch + DatabaseException which is thrown by the + com.sleepycat.je.* classes. +

+

Also notice the call to MyDbEnv.close() + in the finally block. This is the only place in the + application where MyDbEnv.close() is called. + MyDbEnv.close() is responsible for closing the + Environment and all open Database + handles for you.

+ +
    private static void usage() {
+        System.out.println("ExampleDatabasePut [-h <env directory>]");
+        System.out.println("    [-s <selections file>]");
+        System.out.println("    [-v <vendors file>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleDatabasePut edp = new ExampleDatabasePut();
+        try {
+            edp.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabasePut: " + dbe.toString());
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.err.println("Exception: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    } 
+

Next we write the ExampleDatabasePut.run() + method. This method is responsible for initializing all objects. + Because our environment and databases are all opened using the + MyDbEnv.setup() method, ExampleDatabasePut.run() + method is only responsible for calling MyDbEnv.setup() and then calling + the ExampleDatabasePut methods that actually load the databases. +

+ +
    private void run(String args[]) throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      false);      // is this environment read-only?
+
+        System.out.println("loading vendors db.");
+        loadVendorsDb();
+        System.out.println("loading inventory db.");
+        loadInventoryDb();
+    } 
+

This next method loads the vendor database. This method + uses serialization to convert the Vendor object + to a DatabaseEntry object.

+ +
   private void loadVendorsDb() 
+            throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List<String[]> vendors = loadFile(vendorsFile, 8);
+
+        // Now load the data into the database. The vendor's name is the
+        // key, and the data is a Vendor class object.
+
+        // Need a serial binding for the data
+        EntryBinding dataBinding =
+            new SerialBinding(myDbEnv.getClassCatalog(), Vendor.class);
+
+        for (int i = 0; i < vendors.size(); i++) {
+            String[] sArray = vendors.get(i);
+            Vendor theVendor = new Vendor();
+            theVendor.setVendorName(sArray[0]);
+            theVendor.setAddress(sArray[1]);
+            theVendor.setCity(sArray[2]);
+            theVendor.setState(sArray[3]);
+            theVendor.setZipcode(sArray[4]);
+            theVendor.setBusinessPhoneNumber(sArray[5]);
+            theVendor.setRepName(sArray[6]);
+            theVendor.setRepPhoneNumber(sArray[7]);
+
+            // The key is the vendor's name.
+            // ASSUMES THE VENDOR'S NAME IS UNIQUE!
+            String vendorName = theVendor.getVendorName();
+            try {
+                theKey = new DatabaseEntry(vendorName.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            // Convert the Vendor object to a DatabaseEntry object
+            // using our SerialBinding
+            dataBinding.objectToEntry(theVendor, theData);
+
+            // Put it in the database. These puts are transactionally
+            // protected (we're using autocommit).
+            myDbEnv.getVendorDB().put(null, theKey, theData);
+        }
+    } 
+

Now load the inventory database. This method uses our + custom tuple binding (see InventoryBinding.java) to convert the Inventory + object to a DatabaseEntry object.

+ +
    private void loadInventoryDb() 
+        throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List<String[]> inventoryArray = loadFile(inventoryFile, 6);
+
+        // Now load the data into the database. The item's sku is the
+        // key, and the data is an Inventory class object.
+
+        // Need a tuple binding for the Inventory class.
+        TupleBinding inventoryBinding = new InventoryBinding();
+
+        for (int i = 0; i < inventoryArray.size(); i++) {
+            String[] sArray = inventoryArray.get(i);
+            String sku = sArray[1];
+            try {
+                theKey = new DatabaseEntry(sku.getBytes("UTF-8"));
+            } catch (IOException willNeverOccur) {}
+
+            Inventory theInventory = new Inventory();
+            theInventory.setItemName(sArray[0]);
+            theInventory.setSku(sArray[1]);
+            theInventory.setVendorPrice(
+                            (new Float(sArray[2])).floatValue());
+            theInventory.setVendorInventory(
+                            (new Integer(sArray[3])).intValue());
+            theInventory.setCategory(sArray[4]);
+            theInventory.setVendor(sArray[5]);
+
+            // Place the Vendor object on the DatabaseEntry object using
+            // our the tuple binding we implemented in 
+            // InventoryBinding.java
+            inventoryBinding.objectToEntry(theInventory, theData);
+
+            // Put it in the database.
+            myDbEnv.getInventoryDB().put(null, theKey, theData);
+
+        }
+    }
+

The remainder of this application provides utility methods to + read a flat text file into an array of strings and parse the + command line options. From the perspective of this document, these + things are relatively uninteresting. You can see how they are + implemented by looking at: +

+
JE_HOME/examples/je/gettingStarted/ExampleDataPut.java 
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+ +
    private static void parseArgs(String args[]) {
+        // Implementation omitted for brevity.
+    }
+
+    private List loadFile(File theFile, int numFields) {
+        List<String[]> records = new ArrayList<String[]>();
+        // Implementation omitted for brevity.
+        return records;
+    }
+
+    protected ExampleDatabasePut() {}
+} 
+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/diskthreshold.html b/docs/GettingStartedGuide/diskthreshold.html new file mode 100644 index 0000000..14fef3d --- /dev/null +++ b/docs/GettingStartedGuide/diskthreshold.html @@ -0,0 +1,124 @@ + + + + + + Setting Disk Thresholds + + + + + + + + + +
+
+
+
+

Setting Disk Thresholds

+
+
+
+

+ You can control the maximum amount of disk space that JE can use by setting two different + threshold values. If JE exceeds either of these threshold values, writes will no longer + be allowed to the database. Instead, when a write is attempted, DiskLimitException is thrown. + The thresholds you can set are: +

+
+
    +
  • +

    + EnvironmentConfig.MAX_DISK +

    +

    + Specifies an upper limit on the total number of bytes that can be used for data storage. + By default, this property is set to 0, which means no upper + limit is enforced. Instead, the value set for EnvironmentConfig.FREE_DISK will control your + how much disk space your database is allowed to consume. +

    +

    + If multiple JE environments share the same storage volume, Oracle recommends that you + set EnvironmentConfig.MAX_DISK to a non-zero value, especially if an external application or service is + also consuming space on the disk volume. +

    +

    + This value can be managed using the EnvironmentMutableConfig.setMaxDisk() method. +

    +
  • +
  • +

    + EnvironmentConfig.FREE_DISK +

    +

    + Specifies the minimum amount of free space to maintain on the disk volume. The + default value is 5 GB, which is large enough to allow manual recovery if the + free space threshold is exceeded. +

    +

    + If EnvironmentConfig.MAX_DISK is set to 0, then the total amount of + space your JE database can consume is: +

    +
    <disk_size> - <FREE_DISK>
    +

    + So for a 300 GB volume and a free disk size of 5 GB, your database can grow + to consume 295 GB. +

    +

    + If EnvironmentConfig.MAX_DISK is set to a non-zero value, then the total amount of space + your JE database can consume is: +

    +
    <MAX_DISK> - <FREE_DISK>
    +

    + So for the same 300 GB volume, if max disk is 100 GB and free disk is 5 GB, then + your database can consume at most 95 GB. +

    +

    + Be aware that the subtraction shown, above, is performed only if EnvironmentConfig.FREE_DISK is explicitly + set or EnvironmentConfig.MAX_DISK is greater than 10GB. See the EnvironmentConfig.FREE_DISK Javadoc for more information. +

    +
  • +
+
+

+ For usage scenarios, see the EnvironmentConfig.MAX_DISK Javadoc. +

+
+ + + diff --git a/docs/GettingStartedGuide/dpl.html b/docs/GettingStartedGuide/dpl.html new file mode 100644 index 0000000..8784031 --- /dev/null +++ b/docs/GettingStartedGuide/dpl.html @@ -0,0 +1,279 @@ + + + + + + Part I. Programming with the Direct Persistence Layer + + + + + + + + + +
+
+
+
+

Part I. Programming with the Direct Persistence Layer

+
+
+
+
+
+

+ This section discusses how to build an + application using the DPL. The DPL is ideally + suited for those applications that want a + mechanism for storing and managing Java class + objects in a JE database. Note that the DPL + is best suited for applications that work with + classes with a relatively static schema. +

+

+ The DPL requires Java 1.5. +

+

+ If you are porting an application + from the Berkeley DB API, then you probably want + to use the base API instead of the DPL. For + information on using the base API, see + Programming with the Base API. +

+ +
+
+ + + diff --git a/docs/GettingStartedGuide/dpl_delete.html b/docs/GettingStartedGuide/dpl_delete.html new file mode 100644 index 0000000..775be78 --- /dev/null +++ b/docs/GettingStartedGuide/dpl_delete.html @@ -0,0 +1,114 @@ + + + + + + Deleting Entity Objects + + + + + + + + + +
+
+
+
+

Deleting Entity Objects

+
+
+
+

+ The simplest way to remove an object from your entity store + is to delete it by its primary index. For example, + using the SimpleDA class that we + created earlier in this document + (see SimpleDA.class), + you can delete the SimpleEntityClass + object with a primary key of keyone as + follows: +

+
sda.pIdx.delete("keyone");
+

+ You can also delete objects by their secondary keys. When + you do this, all objects related to the secondary key are + deleted, unless the key is a foreign object. +

+

+ For example, the following deletes all + SimpleEntityClass with a secondary + key of skeyone: +

+
sda.sIdx.delete("skeyone");
+

+ You can delete any single object by positioning a cursor to + that object and then calling the cursor's + delete() method. +

+
PrimaryIndex<String,SimpleEntityClass> pi =
+    store.getPrimaryIndex(String.class, SimpleEntityClass.class);
+
+SecondaryIndex<String,String,SimpleEntityClass> si = 
+    store.getSecondaryIndex(pi, String.class, "sKey");
+
+EntityCursor<SimpleEntityClass> sec_cursor = 
+    si.subIndex("skeyone").entities(); 
+
+try {
+    SimpleEntityClass sec;
+    Iterator<SimpleEntityClass> i = sec_cursor.iterator();
+    while (sec = i.nextDup() != null) {
+        if (sec.getSKey() == "some value") {
+            i.delete();
+        }
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    sec_cursor.close(); } 
+

+ Finally, if you are indexing by foreign key, then the + results of deleting the key is determined by the foreign + key constraint that you have set for the index. See + Foreign Key Constraints + for more information. +

+
+ + + diff --git a/docs/GettingStartedGuide/dpl_entityjoin.html b/docs/GettingStartedGuide/dpl_entityjoin.html new file mode 100644 index 0000000..f91091c --- /dev/null +++ b/docs/GettingStartedGuide/dpl_entityjoin.html @@ -0,0 +1,193 @@ + + + + + + Join Cursors + + + + + + + + + +
+
+
+
+

Join Cursors

+
+
+
+

+ If you have two or more secondary indexes set for + an entity object, then you can retrieve sets of + objects based on the intersection of multiple + secondary index values. You do this using an + EntityJoin + class. +

+

+ For example, suppose you had an entity class that + represented automobiles. In that case, you might + be storing information about automobiles such as + color, number of doors, fuel mileage, + automobile type, number of passengers, make, model, and year, + to name just a few. +

+

+ If you created a secondary index based this + information, then you could use an + EntityJoin to return + all those objects representing cars with, say, two + doors, that were built in 2002, and which are green + in color. +

+

+ To create a join cursor, you: +

+
+
    +
  1. +

    + Open the primary index for the + entity class on which you want to + perform the join. +

    +
  2. +
  3. +

    + Open the secondary indexes that you + want to use for the join. +

    +
  4. +
  5. +

    + Instantiate an + EntityJoin + object (you use the primary index + to do this). +

    +
  6. +
  7. +

    + Use two or more calls to + EntityJoin.addCondition() + to identify the secondary indexes + and their values that you want to use + for the equality match. +

    +
  8. +
  9. +

    + Call + EntityJoin.entities() + to obtain a cursor that you can use + to iterate over the join results. +

    +
  10. +
+
+

+ For example, suppose we had an entity class + that included the following features: +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+import com.sleepycat.persist.model.SecondaryKey;
+
+@Entity
+public class Automobiles {
+
+    // Primary key is the vehicle identification number
+    @PrimaryKey
+    private String vin;
+
+    // Secondary key is the vehicle's make
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String make;
+
+    // Secondary key is the vehicle's color
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String color;
+
+    ...
+
+    public String getVIN() {
+        return vin;
+    }
+
+    public String getMake() {
+        return make;
+    }
+
+    public String getColor() {
+        return color;
+    }
+    
+    ... 
+

+ Then we could perform an entity join that searches for all the + red automobiles made by Toyota as follows: +

+
+PrimaryIndex<String,Automobiles> vin_pidx;
+SecondaryIndex<String,String,Automobiles> make_sidx;
+SecondaryIndex<String,String,Automobiles> color_sidx;
+
+EntityJoin<String,Automobiles> join = new EntityJoin(vin_pidx);
+join.addCondition(make_sidx,"Toyota");
+join.addCondition(color_sidx,"Red");
+
+// Now iterate over the results of the join operation
+ForwardCursor<Automobiles> join_cursor = join.entities();
+try {
+    for (Automobiles autoi : join_cursor) {
+        // do something with each object "autoi"
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    join_cursor.close();
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/dpl_example.html b/docs/GettingStartedGuide/dpl_example.html new file mode 100644 index 0000000..12a4613 --- /dev/null +++ b/docs/GettingStartedGuide/dpl_example.html @@ -0,0 +1,275 @@ + + + + + + Chapter 6. A DPL Example + + + + + + + + + +
+
+
+
+

Chapter 6. A DPL Example

+
+
+
+
+

+ Table of Contents +

+
+
+ + Vendor.java + +
+
+ + Inventory.java + +
+
+ + MyDbEnv + +
+
+ + DataAccessor.java + +
+
+ + ExampleDatabasePut.java + +
+
+ + ExampleInventoryRead.java + +
+
+
+

+ In order to illustrate DPL usage, we provide a + complete working example in this chapter. This example + reads and writes inventory and vendor information for a + mythical business. The application consists of the + following classes: +

+
+ +
+

+ Be aware that this example can be found in your JE distribution in + the following location: +

+

+ JE_HOME/examples/persist/gettingStarted +

+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+
+
+
+

Vendor.java

+
+
+
+

+ The simplest class that our example wants to store contains + vendor contact information. This class contains no + secondary indices so all we have to do is identify it + as an entity class and identify the field in the + class used for the primary key. +

+

+ In the following example, we identify the + vendor data member as containing the + primary key. This data member is meant to contain a + vendor's name. Because of the way we will use our + EntityStore, the value + provided for this data member must be unique within + the store or runtime errors will result. +

+

+ When used with the DPL, our + Vendor class appears as + follows. Notice that the @Entity + annotation appears immediately before the class + declaration, and the @PrimaryKey + annotation appears immediately before the + vendor data member declaration. +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+
+@Entity
+public class Vendor {
+
+    private String address;
+    private String bizPhoneNumber;
+    private String city;
+    private String repName;
+    private String repPhoneNumber;
+    private String state;
+
+    // Primary key is the vendor's name
+    // This assumes that the vendor's name is
+    // unique in the database.
+    @PrimaryKey
+    private String vendor;
+
+    private String zipcode;
+
+    public void setRepName(String data) {
+        repName = data;
+    }
+
+    public void setAddress(String data) {
+        address = data;
+    }
+
+    public void setCity(String data) {
+        city = data;
+    }
+
+    public void setState(String data) {
+        state = data;
+    }
+
+    public void setZipcode(String data) {
+        zipcode = data;
+    }
+
+    public void setBusinessPhoneNumber(String data) {
+        bizPhoneNumber = data;
+    }
+
+    public void setRepPhoneNumber(String data) {
+        repPhoneNumber = data;
+    }
+
+    public void setVendorName(String data) {
+        vendor = data;
+    }
+
+    public String getRepName() {
+        return repName;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+
+    public String getCity() {
+        return city;
+    }
+
+    public String getState() {
+        return state;
+    }
+
+    public String getZipcode() {
+        return zipcode;
+    }
+
+    public String getBusinessPhoneNumber() {
+        return bizPhoneNumber;
+    }
+
+    public String getRepPhoneNumber() {
+        return repPhoneNumber;
+    }
+} 
+

+ For this class, the vendor value is set for an individual + Vendor class object by + the setVendorName() + method. If our example code fails to set this + value before storing the object, the data + member used to store the primary key is set to a + null value. This would result in a runtime + error. +

+
+
+ + + diff --git a/docs/GettingStartedGuide/dpl_exampledatabaseput.html b/docs/GettingStartedGuide/dpl_exampledatabaseput.html new file mode 100644 index 0000000..5650a1c --- /dev/null +++ b/docs/GettingStartedGuide/dpl_exampledatabaseput.html @@ -0,0 +1,337 @@ + + + + + + ExampleDatabasePut.java + + + + + + + + + +
+
+
+
+

ExampleDatabasePut.java

+
+
+
+

+ Our example reads inventory and vendor information from + flat text files, encapsulates this data in objects of + the appropriate type, and then writes each object to an + EntityStore. +

+

+ To begin, we import the Java classes that our example + needs. Most of the imports are related to reading the raw + data from flat text files and breaking them apart for usage + with our data classes. We also import classes from the + JE package, but we do not actually import any classes + from the DPL. The reason why is because we have + placed almost all of our DPL work off into + other classes, so there is no need for direct usage of + those APIs here. +

+
package persist.gettingStarted;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sleepycat.je.DatabaseException; 
+

+ Now we can begin the class itself. Here we set default paths + for the on-disk resources that we require (the environment + home, and the location of the text files containing our sample + data). We also declare DataAccessor + and MyDbEnv members. We describe these + classes and show their implementation in + DataAccessor.java + and + MyDbEnv. +

+
public class ExampleDatabasePut {
+
+    private static File myDbEnvPath = new File("/tmp/JEDB");
+    private static File inventoryFile = new File("./inventory.txt");
+    private static File vendorsFile = new File("./vendors.txt");
+
+    private DataAccessor da;
+
+    // Encapsulates the environment and data store.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+

+ Next, we provide our usage() + method. The command line options provided there are necessary + only if the default values to the on-disk resources are not + sufficient. +

+
    private static void usage() {
+        System.out.println("ExampleDatabasePut [-h <env directory>]");
+        System.out.println("      [-i <inventory file>]");
+        System.out.println("      [-v <vendors file>]");
+        System.exit(-1);
+    } 
+

+ Our main() method is also reasonably + self-explanatory. We simply instantiate an + ExampleDatabasePut object there and then + call its run() method. We also provide a + top-level try block there for any exceptions that might be thrown + during runtime. +

+

+ Notice that the finally statement in the + top-level try block calls + MyDbEnv.close(). This method closes our + EntityStore and Environment + objects. By placing it here in the finally + statement, we can make sure that our store and environment are + always cleanly closed. +

+
    public static void main(String args[]) {
+        ExampleDatabasePut edp = new ExampleDatabasePut();
+        try {
+            edp.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleDatabasePut: " + dbe.toString());
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    } 
+

+ Our run() method does four + things. It calls MyDbEnv.setup(), + which opens our Environment and + EntityStore. It then instantiates a + DataAccessor object, which we will use + to write data to the store. It calls + loadVendorsDb() which loads all of the + vendor information. And then it calls + loadInventoryDb() which loads all of + the inventory information. +

+

+ Notice that the MyDbEnv + object is being setup as read-write. This results in the + EntityStore being opened for + transactional support. + (See MyDbEnv + for implementation details.) +

+
    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath,  // Path to the environment home 
+                      false);       // Environment read-only?
+
+        // Open the data accessor. This is used to store
+        // persistent objects.
+        da = new DataAccessor(myDbEnv.getEntityStore());
+
+        System.out.println("loading vendors db....");
+        loadVendorsDb();
+
+        System.out.println("loading inventory db....");
+        loadInventoryDb();
+    } 
+

+ We can now implement the loadVendorsDb() + method. This method is responsible for reading the vendor + contact information from the appropriate flat-text file, + populating Vendor class objects with the + data and then writing it to the EntityStore. + As explained above, each individual object is written with + transactional support. However, because a transaction handle is + not explicitly used, the write is performed using auto-commit. + This happens because the EntityStore + was opened to support transactions. +

+

+ To actually write each class to the + EntityStore, we simply call the + PrimaryIndex.put() method for the + Vendor entity instance. We obtain this + method from our DataAccessor + class. +

+
    private void loadVendorsDb()
+            throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List vendors = loadFile(vendorsFile, 8);
+
+        // Now load the data into the store.
+        for (int i = 0; i < vendors.size(); i++) {
+            String[] sArray = (String[])vendors.get(i);
+            Vendor theVendor = new Vendor();
+            theVendor.setVendorName(sArray[0]);
+            theVendor.setAddress(sArray[1]);
+            theVendor.setCity(sArray[2]);
+            theVendor.setState(sArray[3]);
+            theVendor.setZipcode(sArray[4]);
+            theVendor.setBusinessPhoneNumber(sArray[5]);
+            theVendor.setRepName(sArray[6]);
+            theVendor.setRepPhoneNumber(sArray[7]);
+
+            // Put it in the store.
+            da.vendorByName.put(theVendor);
+        }
+    } 
+

+ Now we can implement our loadInventoryDb() + method. This does exactly the same thing as the + loadVendorsDb() + method. +

+
    private void loadInventoryDb()
+        throws DatabaseException {
+
+        // loadFile opens a flat-text file that contains our data
+        // and loads it into a list for us to work with. The integer
+        // parameter represents the number of fields expected in the
+        // file.
+        List inventoryArray = loadFile(inventoryFile, 6);
+
+        // Now load the data into the store. The item's sku is the
+        // key, and the data is an Inventory class object.
+
+        for (int i = 0; i < inventoryArray.size(); i++) {
+            String[] sArray = (String[])inventoryArray.get(i);
+            String sku = sArray[1];
+
+            Inventory theInventory = new Inventory();
+            theInventory.setItemName(sArray[0]);
+            theInventory.setSku(sArray[1]);
+            theInventory.setVendorPrice(
+                (new Float(sArray[2])).floatValue());
+            theInventory.setVendorInventory(
+                (new Integer(sArray[3])).intValue());
+            theInventory.setCategory(sArray[4]);
+            theInventory.setVendor(sArray[5]);
+
+            // Put it in the store. Note that this causes our secondary key
+            // to be automatically updated for us.
+            da.inventoryBySku.put(theInventory);
+        }
+    } 
+

+ The remainder of this example simple parses the command line + and loads data from a flat-text file. There is nothing here + that is of specific interest to the DPL, but we + show this part of the example anyway in the interest of + completeness. +

+
    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                  case 'h':
+                    myDbEnvPath = new File(args[++i]);
+                    break;
+                  case 'i':
+                    inventoryFile = new File(args[++i]);
+                    break;
+                  case 'v':
+                    vendorsFile = new File(args[++i]);
+                    break;
+                  default:
+                    usage();
+                }
+            }
+        }
+    }
+
+    private List loadFile(File theFile, int numFields) {
+        List<String[]> records = new ArrayList<String[]>();
+        try {
+            String theLine = null;
+            FileInputStream fis = new FileInputStream(theFile);
+            BufferedReader br = 
+                new BufferedReader(new InputStreamReader(fis));
+            while((theLine=br.readLine()) != null) {
+                String[] theLineArray = theLine.split("#");
+                if (theLineArray.length != numFields) {
+                    System.out.println("Malformed line found in " + 
+                        theFile.getPath());
+                    System.out.println("Line was: '" + theLine);
+                    System.out.println("length found was: " + 
+                        theLineArray.length);
+                    System.exit(-1);
+                }
+                records.add(theLineArray);
+            }
+            // Close the input stream handle
+            fis.close();
+        } catch (FileNotFoundException e) {
+            System.err.println(theFile.getPath() + " does not exist.");
+            e.printStackTrace();
+            usage();
+        } catch (IOException e)  {
+            System.err.println("IO Exception: " + e.toString());
+            e.printStackTrace();
+            System.exit(-1);
+        }
+        return records;
+    }
+
+    protected ExampleDatabasePut() {}
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/dpl_exampleinventoryread.html b/docs/GettingStartedGuide/dpl_exampleinventoryread.html new file mode 100644 index 0000000..0a1c377 --- /dev/null +++ b/docs/GettingStartedGuide/dpl_exampleinventoryread.html @@ -0,0 +1,271 @@ + + + + + + ExampleInventoryRead.java + + + + + + + + + +
+
+
+
+

ExampleInventoryRead.java

+
+
+
+

+ ExampleInventoryRead + retrieves + inventory information from our entity store and + displays it. When it displays each inventory item, it + also displays the related vendor contact information. +

+

+ ExampleInventoryRead + can do one of two things. If you provide no search + criteria, it displays all of the inventory items in the + store. If you provide an item name (using the + -s command line switch), then just + those inventory items using that name are displayed. +

+

+ The beginning of our example is almost identical to our + ExampleDatabasePut + example program. We + repeat that example code here for the sake of + completeness. For a complete walk-through of it, see + the previous section (ExampleDatabasePut.java). +

+
package persist.gettingStarted;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityCursor;
+
+public class ExampleInventoryRead {
+
+    private static File myDbEnvPath =
+        new File("/tmp/JEDB");
+
+    private DataAccessor da;
+
+    // Encapsulates the database environment.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+
+    // The item to locate if the -s switch is used
+    private static String locateItem;
+
+    private static void usage() {
+        System.out.println("ExampleInventoryRead [-h <env directory>]" +
+                           "[-s <item to locate>]");
+        System.exit(-1);
+    }
+
+    public static void main(String args[]) {
+        ExampleInventoryRead eir = new ExampleInventoryRead();
+        try {
+            eir.run(args);
+        } catch (DatabaseException dbe) {
+            System.err.println("ExampleInventoryRead: " + dbe.toString());
+            dbe.printStackTrace();
+        } finally {
+            myDbEnv.close();
+        }
+        System.out.println("All done.");
+    }
+
+    private void run(String args[])
+        throws DatabaseException {
+        // Parse the arguments list
+        parseArgs(args);
+
+        myDbEnv.setup(myDbEnvPath, // path to the environment home
+                      true);       // is this environment read-only?
+
+        // Open the data accessor. This is used to retrieve
+        // persistent objects.
+        da = new DataAccessor(myDbEnv.getEntityStore());
+
+        // If a item to locate is provided on the command line,
+        // show just the inventory items using the provided name.
+        // Otherwise, show everything in the inventory.
+        if (locateItem != null) {
+            showItem();
+        } else {
+            showAllInventory();
+        }
+    } 
+

+ The first method that we provide is used to show inventory + items related to a given inventory name. This method is called + only if an inventory name is passed to + ExampleInventoryRead + via the -s option. Given the sample data + that we provide with this example, each matching inventory name + will result in the display of three inventory objects. +

+

+ To display these objects we use the + Inventory class' + inventoryByName secondary index to retrieve + an EntityCursor, and then we iterate + over the resulting objects using the cursor. +

+

+ Notice that this method calls + displayInventoryRecord() + to display each individual object. We show this + method a little later in the example. +

+
    // Shows all the inventory items that exist for a given
+    // inventory name.
+    private void showItem() throws DatabaseException {
+
+        // Use the inventory name secondary key to retrieve
+        // these objects.
+        EntityCursor<Inventory> items =
+            da.inventoryByName.subIndex(locateItem).entities();
+        try {
+            for (Inventory item : items) {
+                displayInventoryRecord(item);
+            }
+        } finally {
+            items.close();
+        }
+    } 
+

+ Next we implement showAllInventory(), + which shows all of the Inventory + objects in the store. To do this, we + obtain an EntityCursor + from the Inventory class' + primary index and, again, we iterate using that cursor. +

+
    // Displays all the inventory items in the store
+    private void showAllInventory()
+        throws DatabaseException {
+
+        // Get a cursor that will walk every
+        // inventory object in the store.
+        EntityCursor<Inventory> items =
+            da.inventoryBySku.entities();
+
+        try {
+            for (Inventory item : items) {
+                displayInventoryRecord(item);
+            }
+        } finally {
+            items.close();
+        }
+    } 
+

+ Now we implement + displayInventoryRecord(). This + uses the getter methods on the Inventory + class to obtain the information that we want to display. + The only thing interesting about this method is that we + obtain Vendor objects within. + The vendor objects are retrieved Vendor + objects using their primary index. We get the key + for the retrieval from the Inventory + object that we are displaying at the time. +

+
    private void displayInventoryRecord(Inventory theInventory)
+            throws DatabaseException {
+
+            System.out.println(theInventory.getSku() + ":");
+            System.out.println("\t " + theInventory.getItemName());
+            System.out.println("\t " + theInventory.getCategory());
+            System.out.println("\t " + theInventory.getVendor());
+            System.out.println("\t\tNumber in stock: " +
+                theInventory.getVendorInventory());
+            System.out.println("\t\tPrice per unit:  " +
+                theInventory.getVendorPrice());
+            System.out.println("\t\tContact: ");
+
+            Vendor theVendor =
+                    da.vendorByName.get(theInventory.getVendor());
+            assert theVendor != null;
+
+            System.out.println("\t\t " + theVendor.getAddress());
+            System.out.println("\t\t " + theVendor.getCity() + ", " +
+                theVendor.getState() + " " + theVendor.getZipcode());
+            System.out.println("\t\t Business Phone: " +
+                theVendor.getBusinessPhoneNumber());
+            System.out.println("\t\t Sales Rep: " +
+                                theVendor.getRepName());
+            System.out.println("\t\t            " +
+                theVendor.getRepPhoneNumber());
+    } 
+

+ The last remaining parts of the example are used to parse + the command line. This is not very + interesting for our purposes here, but we show it anyway + for the sake of completeness. +

+
    protected ExampleInventoryRead() {}
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        myDbEnvPath = new File(args[++i]);
+                    break;
+                    case 's':
+                        locateItem = args[++i];
+                    break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/dpl_replace.html b/docs/GettingStartedGuide/dpl_replace.html new file mode 100644 index 0000000..1df533c --- /dev/null +++ b/docs/GettingStartedGuide/dpl_replace.html @@ -0,0 +1,109 @@ + + + + + + Replacing Entity Objects + + + + + + + + + +
+
+
+
+

Replacing Entity Objects

+
+
+
+

+ To modify a stored entity object, retrieve it, update + it, then put it back to the entity store: +

+
+SimpleEntityClass sec = sda.pIdx.get("keyone");
+sec.setSKey("skeyoneupdated");
+sda.pIdx.put(sec);
+
+

+ Note that because we updated a field on the object that is + a secondary key, this object will now be accessible by the + secondary key of skeyoneupdated instead + of the previous value, which was skeyone +

+

+ Be aware that if you modify the object's primary key, the behavior is + somewhat different. In this case, you cause a new instance + of the object to be created in the store, instead of + replacing an existing instance: +

+
// Results in two objects in the store.  One with a
+// primary index of "keyfive" and the other with primary index of 
+//'keyfivenew'.
+SimpleEntityClass sec = sda.pIdx.get("keyfive");
+sec.setPKey("keyfivenew");
+sda.pIdx.put(sec); 
+

+ Finally, if you are iterating over a collection of objects + using an EntityCursor, you can + update each object in turn using + EntityCursor.update(). Note, + however, that you must be iterating using a + PrimaryIndex; this operation is not + allowed if you are using a + SecondaryIndex. +

+

+ For example, the following iterates over every + SimpleEntityClass object in the entity + store, and it changes them all so that they have a + secondary index of updatedskey: +

+
EntityCursor<SimpleEntityClass> sec_pcursor = sda.pIdx.entities();
+for (SimpleEntityClass sec : sec_pcursor) {
+    sec.setSKey("updatedskey");
+    sec_pcursor.update(item);
+}
+sec_pcursor.close(); 
+
+ + + diff --git a/docs/GettingStartedGuide/dplindexcreate.html b/docs/GettingStartedGuide/dplindexcreate.html new file mode 100644 index 0000000..ce9708c --- /dev/null +++ b/docs/GettingStartedGuide/dplindexcreate.html @@ -0,0 +1,419 @@ + + + + + + Creating Indexes + + + + + + + + + +
+
+
+
+

Creating Indexes

+
+
+
+ +

+ To create an index using the DPL, you use Java + annotations to declare which feature on the class is used + for the primary index, and which features (if any) are to + be used as secondary indexes. +

+

+ All entity classes stored in the DPL must have a + primary index declared for it. +

+

+ Entity classes can have zero or more secondary + indexes declared for them. There is no limit on the + number of secondary indexes that you can declare. +

+
+
+
+
+

Declaring Primary Indexes

+
+
+
+

+ You declare a primary key for an entity class by + using the @PrimaryKey + annotation. This annotation must appear + immediately before the data member which + represents the class's primary key. For example: +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+
+@Entity
+public class Vendor {
+
+    private String address;
+    private String bizPhoneNumber;
+    private String city;
+    private String repName;
+    private String repPhoneNumber;
+    private String state;
+
+    // Primary key is the vendor's name
+    // This assumes that the vendor's name is
+    // unique in the database.
+    @PrimaryKey
+    private String vendor;
+
+    ... 
+

+ For this class, the vendor value is set for an individual + Vendor class object by + the setVendorName() + method. If our example code fails to set this + value before storing the object, the data + member used to store the primary key is set to a + null value. This would result in a runtime + error. +

+

+ You can avoid the need to explicitly set a + value for a class's primary index by specifying + a sequence to be used for the primary key. This + results in an unique integer value being used + as the primary key for each stored object. +

+

+ You declare a sequence is to be used by specifying + the sequence keyword to the + @PrimaryKey annotation. You must + also provide a name for the sequence. For example: + For example: +

+
@PrimaryKey(sequence="Sequence_Namespace")
+long myPrimaryKey; 
+
+
+
+
+
+

Declaring Secondary Indexes

+
+
+
+

+ To declare a secondary index, we use the + @SecondaryKey annotation. Note + that when we do this, we must declare what sort of + an index it is; that is, what is its relationship to + other data in the data store. +

+

+ The kind of indices that we + can declare are: +

+
+
    +
  • +

    + ONE_TO_ONE +

    +

    + This relationship indicates that + the secondary key is unique to the + object. If an object is stored with a + secondary key that already + exists in the data store, a run + time error is raised. +

    +

    + For example, a person object might + be stored with a primary key of a + social security number (in the US), + with a secondary key of the + person's employee number. Both + values are expected to be unique in + the data store. +

    +
  • +
  • +

    + MANY_TO_ONE +

    +

    + Indicates that the secondary key + may be used for multiple + objects in the data store. That is, + the key appears more than + once, but for each stored object it + can be used only once. +

    +

    + Consider a data store that relates + managers to employees. A given + manager will have multiple + employees, but each employee is + assumed to have just one manager. + In this case, the manager's + employee number might be a + secondary key, so that you can + quickly locate all the objects + related to that manager's + employees. +

    +
  • +
  • +

    + ONE_TO_MANY +

    +

    + Indicates that the secondary key + might be used more than once for a + given object. Index keys + themselves are assumed to be + unique, but multiple instances of + the index can be used per object. +

    +

    + For example, employees might have + multiple unique email addresses. In + this case, any given object can be + access by one or more email + addresses. Each such address is + unique in the data store, but each + such address will relate to a + single employee object. +

    +
  • +
  • +

    + MANY_TO_MANY +

    +

    + There can be multiple keys for + any given object, and for any given + key there can be many related + objects. +

    +

    + For example, suppose your + organization has a shared + resource, such as printers. You + might want to track which + printers a given employee can + use (there might be more than + one). You might also want to + track which employees can use a + specific printer. This + represents a many-to-many + relationship. +

    +
  • +
+
+

+ Note that for ONE_TO_ONE and + MANY_TO_ONE relationships, you + need a simple data member (not an array or + collection) to hold the key. For + ONE_TO_MANY and + MANY_TO_MANY relationships, you + need an array or collection to hold the keys: +

+
@SecondaryKey(relate=ONE_TO_ONE)
+private String primaryEmailAddress = new String();
+
+@SecondaryKey(relate=ONE_TO_MANY)
+private Set<String> emailAddresses = new HashSet<String>(); 
+
+
+
+
+
+

Foreign Key Constraints

+
+
+
+

+ Sometimes a secondary index is related in some + way to another entity class that is also + contained in the data store. That is, the + secondary key might be the primary key for + another entity class. If this is the case, you + can declare the foreign key constraint to make + data integrity easier to accomplish. +

+

+ For example, you might have one class that is + used to represent employees. + You might have another that is used to + represent corporate divisions. When you add or + modify an employee record, you might want to + ensure that the division to which the employee + belongs is known to the data store. You do this + by specifying a foreign key constraint. +

+

+ When a foreign key constraint is declared: +

+
+
    +
  • +

    + When a new secondary key + for the object is stored, + it is checked to make sure + it exists as a primary + key for the related + entity object. If it does + not, a runtime error + occurs. +

    +
  • +
  • +

    + When a related entity is + deleted (that is, a + corporate division is + removed from the data + store), some action is + automatically taken for + the entities that refer to + this object (that is, the + employee objects). Exactly + what that action is, is + definable by you. See + below. +

    +
  • +
+
+

+ When a related entity is deleted from the data + store, one of the following actions are taken: +

+
+
    +
  • +

    + ABORT +

    +

    + The delete operation is not + allowed. A runtime error is + raised as a result of the + operation. This is the + default behavior. +

    +
  • +
  • +

    + CASCADE +

    +

    + All entities related to this + one are deleted as well. For + example, if you deleted a + Division + object, then all + Employee + objects that belonged to the + division are also deleted. +

    +
  • +
  • +

    + NULLIFY +

    +

    + All entities related to the + deleted entity are updated so + that the pertinent data member + is nullified. That is, if you + deleted a division, then all + employee objects related to + that division would have their + division key + automatically set to null. +

    +
  • +
+
+

+ You declare a foreign key constraint by using + the relatedEntity keyword. You + declare the foreign key constraint deletion policy using the + onRelatedEntityDelete keyword. For + example, the following declares a foreign key + constraint to Division + class objects, and it causes related objects to + be deleted if the Division + class is deleted: +

+
@SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Division.class, 
+    onRelatedEntityDelete=CASCADE)
+private String division = new String(); 
+
+
+ + + diff --git a/docs/GettingStartedGuide/env.html b/docs/GettingStartedGuide/env.html new file mode 100644 index 0000000..25ce22a --- /dev/null +++ b/docs/GettingStartedGuide/env.html @@ -0,0 +1,412 @@ + + + + + + Chapter 2. Database Environments + + + + + + + + + +
+
+
+
+

Chapter 2. Database Environments

+
+
+
+ +

+ Regardless of whether you are using the DPL or the base API, you must use a database + environment. Database environments encapsulate one or more databases. This encapsulation + provides your threads with efficient access to your databases by allowing a single in-memory + cache to be used for each of the databases contained in the environment. This encapsulation + also allows you to group operations performed against multiple databases inside a single + transaction (see the Berkeley DB, Java Edition Getting Started with Transaction Processing guide for more information). +

+

+ If you are using the base API, most commonly you use database environments to create and + open databases (you close individual databases using the individual database handles). You + can also use environments to delete and rename databases. For transactional applications, + you use the environment to start transactions. For non-transactional applications, you use + the environment to sync your in-memory cache to disk. +

+

+ If you are using the DPL, all of these things are still being done, but the DPL takes + care of it for you. Under the DPL, the most common thing you will explicitly use an + environment for is to obtain transaction handles. +

+

+ + Regardless of the API that you use, you also use the database environment for + administrative and configuration activities related to your database log files and the + in-memory cache. + + See Administering Berkeley DB Java Edition Applications for + more information. +

+

+ To find out how to use environments with a transaction-protected application, see the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+
+
+
+
+

Opening Database Environments

+
+
+
+ +

+ You open a database environment by instantiating an Environment + object. You must provide to the constructor the name of the on-disk directory where the + environment is to reside. This directory location must exist or the open will fail. +

+

+ By default, the environment is not created for you if it does not exist. Set the creation property to true if you + want the environment to be created. For example: +

+
package je.gettingStarted;
+    
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not 
+// already exist.
+Environment myDbEnvironment = null;
+
+try {
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    myDbEnvironment = new Environment(new File("/export/dbEnv"), 
+                                      envConfig);
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+

+ Opening an environment usually causes some background threads to be started. JE uses + these threads for log file cleaning and some administrative tasks. However, these + threads will only be opened once per process, so if you open the same environment more + than once from within the same process, there is no performance impact on your + application. Also, if you open the environment as read-only, then the background + threads (with the exception of the evictor thread) are not started. +

+

+ Note that opening your environment causes normal recovery to be run. This + causes your databases to be brought into a consistent state relative to the + changed data found in your log files. + See Databases and Log Files + for more information. +

+
+
+
+
+

Multiple Environments

+
+
+
+

+ Most JE applications only need a single database environment because any + number of databases can be created in a single environment, and the + total size of the data in an environment is not limited. That said, + your application can open and use as many environments as you have disk and + memory to manage. Also, you can instantiate + multiple Environment objects + for the same physical environment. +

+

+ The main reason for multiple environments is that an application must manage + multiple unique data sets. By placing each data set in a separate environment, + the application can gain real advantages in manageability of the data, and with + application performance. By placing each data set in a unique environment, + a separate set of log files is created and maintained in a separate directory, + and so you can manipulate the log files for each data set separately. + That is, you can: +

+
+
    +
  • +

    + Backup, restore or delete a single data set + separately by copying or removing the files for its environment. +

    +
  • +
  • +

    + Balance the load between machines by moving the files for a + single data set from one machine to another. +

    +
  • +
  • +

    + Improve I/O performance by placing each data set on a separate + physical disk. +

    +
  • +
  • +

    + Delete individual data sets very efficiently by removing the + environment's log files. This is much more efficient than + deleting individual database records and is also move + efficient than removing databases, and so can be a real benefit + if you are managing large temporary data sets that must be + frequently deleted. +

    +
  • +
+
+

+ Be aware that there is a downside to using multiple environments. In particular, + understand that a single transaction cannot include changes + made in more than one environment. If you need to perform a set of + operations in more than one data set atomically (with a single + transaction), use a single environment and distinguish the data sets + using some other method. +

+

+ For example, an application running a hosted service for multiple clients may + wish to keep each client's data set separate. You can do this with multiple + environments, but then you can operate on all data sets atomically. If you need + to wrap operations for multiple data sets in a single transaction, consider some + other approach to keeping the data sets separate. +

+

+ You can, for example, distinguish each data set using a unique key range within + a single database. Or you can create a secondary key that identifies the data + set. Or you could use separate databases for each dataset. All of these + approaches allow you to maintain multiple distinct dataset within a single + environment, but each obviously adds a level of complexity to your code over + what is required to simply use a unique environment for each data set. +

+
+
+
+
+
+

Multiple Environment Subdirectories

+
+
+
+

+ You can spread your JE environment across multiple subdirectories. This allows you + to improve data throughput by spreading disk I/O across multiple disks or filesystems. + Environment subdirectories reside in the environment home directory and are named + data001/ through dataNNN/, consecutively, where + NNN is the number of subdirectories that you want to use. Typically, + each of the dataNNN/ names are symbolic links to actual directories + which reside on separate file systems or disks. Alternatively, each subdirectory can + be mount points for filesystems which reside on different disk drives. +

+

+ You control the number of subdirectories you want to use with the + je.log.nDataDirectories property in the + je.properties file. This value must be set prior to opening the + environment, and the subdirectories must already exist at that time. The value set for + this property can not change over the course of the environment's lifetime, or an + exception is thrown when you attempt to open the environment. +

+

+ The default value for je.log.nDataDirectories is 0, and this means no + subdirectories are in use for the environment. A value greater than 0 indicates the + number of subdirectories to use, and that number of subdirectories must exist prior to + opening the environment. +

+

+ For example, if you set je.log.nDataDirectories to 3, then the first + time you open the environment (and for every environment open after that) your + environment home directory must contain three subdirectories named + data001, data002 and data003. + This causes your JE log files (the *.jdb files) to be spread + evenly across those three subdirectories. Finally, if you change the value of + je.log.nDataDirectories without first completely deleting your + environment, then your application will throw exceptions when you open your environment. +

+
+
+
+
+
+

Configuring a Shared Cache for Multiple Environments

+
+
+
+

+ By default, each distinct JE environment has a separate, private + in-memory cache. If a single JVM process will keep open multiple + environments at the same time, it is strongly recommended that all such + environments are configured to use a shared cache. A shared cache makes + much more efficient use of memory than separate private caches. +

+

+ For example, imagine that you open 5 environments in a single process + and a total of 500 MB of memory is available for caching. Using private + caches, you could configure each cache to be 100 MB. If one of the + environments has a larger active data set than the others, it will + not be able to take advantage of unused memory in the other environment + caches. By using a shared cache, multiple open environments will make + better use of memory because the cache LRU algorithm is applied across + all information in all enviornments sharing the cache. +

+

+ In order to configure an environment to use a shared cache, set + EnvironmentConfig.setSharedCache() + to true. This must be set for every environment in the + process that you want to use the shared cache. For example: +

+
package je.gettingStarted;
+    
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not 
+// already exist.
+Environment myEnv1 = null;
+Environment myEnv2 = null;
+
+try {
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    envConfig.setSharedCache(true);
+
+    myEnv1 = new Environment(new File("/export/dbEnv1"), envConfig);
+    myEnv2 = new Environment(new File("/export/dbEnv2"), envConfig);
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/envStats.html b/docs/GettingStartedGuide/envStats.html new file mode 100644 index 0000000..bee8b4e --- /dev/null +++ b/docs/GettingStartedGuide/envStats.html @@ -0,0 +1,102 @@ + + + + + + Environment Statistics + + + + + + + + + +
+
+
+
+

Environment Statistics

+
+
+
+

+ JE offers a wealth of information that you can examine regarding your environment's operations. The majority + of this information involves numbers relevant only to the JE developer and as such a description of those + statistics is beyond the scope of this manual. +

+

+ However, one statistic that is very important (especially for + long-running applications) is + EnvironmentStats.getNCacheMiss(). + This statistic returns the total number of + requests for database objects that were not serviceable from the cache. + This number is important to the + application administrator who is attempting to determine the proper size for the in-memory cache. + + See Sizing the Cache for details. +

+

+ To obtain this statistic from your environment, call Environment.getStats() to return + an EnvironmentStats object. You can then call the + EnvironmentStats.getNCacheMiss() method. For example: +

+ +
import com.sleepycat.je.Environment;
+
+...
+
+long cacheMisses = myEnv.getStats(null).getNCacheMiss();
+
+...  
+
+

+ Note that Environment.getStats() can only obtain statistics from your application's + process. In order for the application administrator to obtain this statistic, you must either + use JMX to retrieve the statistic + (see JConsole and JMX Support) + or you must print it for examination (for example, log the value once a minute). +

+

+ Remember that what is really important for cache sizing is the change in this value over time, and not the actual value + itself. So you might consider offering a delta from one examination of this statistic to the next (a delta of 0 is + desired while large deltas are an indication that the cache is too small). +

+
+ + + diff --git a/docs/GettingStartedGuide/envclose.html b/docs/GettingStartedGuide/envclose.html new file mode 100644 index 0000000..88a57bf --- /dev/null +++ b/docs/GettingStartedGuide/envclose.html @@ -0,0 +1,127 @@ + + + + + + Closing Database Environments + + + + + + + + + +
+
+
+
+

Closing Database Environments

+
+
+
+

+ You close your environment by calling the Environment.close() + method. This method performs a checkpoint, so it is not necessary to perform a sync or a + checkpoint explicitly before calling it. For information on checkpoints, see the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. For information on syncs, see Database Modifications and Syncs. +

+ +
import com.sleepycat.je.DatabaseException;
+
+import com.sleepycat.je.Environment;
+
+...
+
+try {
+    if (myDbEnvironment != null) {
+        myDbEnvironment.close();
+    } 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+

+ If you are using the DPL, then close your environment(s) only after all other store + activities have completed and you have closed any stores currently opened in the + environment. If you are using the base API, then close your environment(s) only after + all other database activities have completed and you have closed any databases currently + opened in the environment. +

+
+

Note

+

+ It is possible for the environment to close before JE's + cleaner thread + has finished its work. This happens if you perform a large number of deletes immediately + before shutting down your environment. The result is that your log files may be quit a lot larger than you + expect them to be because the cleaner thread has not had a chance to finish its work. +

+

+ See The Cleaner Thread for + details on the cleaner threads. +

+

+ If you want to make sure that the cleaner has finished running before the environment is closed, + call Environment.cleanLog() before calling + Environment.close(): +

+ +
import com.sleepycat.je.DatabaseException;
+
+import com.sleepycat.je.Environment;
+
+...
+
+try {
+    if (myDbEnvironment != null) {
+        myDbEnvironment.cleanLog(); // Clean the log before closing
+        myDbEnvironment.close();
+    } 
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+} 
+
+

+ Closing the last environment handle in your application causes all internal data structures + to be released and the background threads to be stopped. If there are any opened databases, + then JE will complain before closing them as well. At this time, and any on-going + transactions are aborted. Also at this time any open cursors are also closed. However, it is recommended that you always close all cursor handles immediately after their use to ensure concurrency and to release resources such as page locks. +

+
+ + + diff --git a/docs/GettingStartedGuide/getmultiple.html b/docs/GettingStartedGuide/getmultiple.html new file mode 100644 index 0000000..cfc7f50 --- /dev/null +++ b/docs/GettingStartedGuide/getmultiple.html @@ -0,0 +1,336 @@ + + + + + + Retrieving Multiple Objects + + + + + + + + + +
+
+
+
+

Retrieving Multiple Objects

+
+
+
+
+
+
+ + Cursor Initialization + +
+
+ + Working with Duplicate Keys + +
+
+ + Key Ranges + +
+
+
+

+ It is possible to iterate over every object referenced + by a specific index. You may want to do this if, for + example, you want to examine or modify every object + accessible by a specific primary index. +

+

+ In addition, some indexes result in the retrieval of multiple + objects. For example, MANY_TO_ONE + secondary indexes can result in more than one object for any given + key (also known as duplicate keys). + When this is the case, you must iterate + over the resulting set of objects in order to examine + each object in turn. +

+

+ There are two ways to iterate over a collection of + objects as returned by an index. One is to use a + standard Java Iterator, which you + obtain using an EntityCursor, + which in turn you can obtain from a PrimaryIndex: +

+
PrimaryIndex<String,SimpleEntityClass> pi =
+    store.getPrimaryIndex(String.class, SimpleEntityClass.class);
+EntityCursor<SimpleEntityClass> pi_cursor = pi.entities();
+try {
+    Iterator<SimpleEntityClass> i = pi_cursor.iterator();
+    while (i.hasNext()) {
+        // Do something here
+    }
+} finally {
+    // Always close the cursor
+    pi_cursor.close();
+} 
+

+ Alternatively, you can use a Java "foreach" statement + to iterate over object set: +

+
PrimaryIndex<String,SimpleEntityClass> pi =
+    store.getPrimaryIndex(String.class, SimpleEntityClass.class);
+EntityCursor<SimpleEntityClass> pi_cursor = pi.entities();
+try {
+    for (SimpleEntityClass seci : pi_cursor) {
+        // do something with each object "seci"
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    pi_cursor.close();
+} 
+
+
+
+
+

Cursor Initialization

+
+
+
+

+ When a cursor is first opened, it is not + positioned to any value; that is, + it is not initialized. + Most of the EntityCursor + methods that move a cursor will initialize it + to either the first or last object, depending + on whether the operation is moving the cursor + forward (all next... + methods) or backwards (all + prev...) methods. +

+

+ You can also force a cursor, whether it is + initialized or not, to return the first object + by calling + EntityCursor.first(). + Similarly, you can force a return of the last + object using + EntityCursor.last(). +

+

+ Operations that do not move the cursor (such as + EntityCursor.current() + or EntityCursor.delete() + will throw an + IllegalStateException + when used on an uninitialized cursor. +

+
+
+
+
+
+

Working with Duplicate Keys

+
+
+
+

+ If you have duplicate secondary keys, you can return an + EntityIndex class object for them + using SecondaryIndex.subIndex() + Then, use that object's + entities() + method to obtain an EntityCursor + instance. +

+

+ For example: +

+
PrimaryIndex<String,SimpleEntityClass> pi =
+    store.getPrimaryIndex(String.class, SimpleEntityClass.class);
+
+SecondaryIndex<String,String,SimpleEntityClass> si = 
+    store.getSecondaryIndex(pi, String.class, "sKey");
+
+EntityCursor<SimpleEntityClass> sec_cursor = 
+    si.subIndex("skeyone").entities(); 
+
+try {
+for (SimpleEntityClass seci : sec_cursor) {
+        // do something with each object "seci"
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    sec_cursor.close(); } 
+

+ Note that if you are working with duplicate keys, you can + control how cursor iteration works by using the following + EntityCursor methods: +

+
+
    +
  • +

    + nextDup() +

    +

    + Moves the cursor to the next object with the + same key as the cursor is currently + referencing. That is, this method returns the + next duplicate object. If no such object + exists, this method returns + null. +

    +
  • +
  • +

    + prevDup() +

    +

    + Moves the cursor to the previous object with the + same key as the cursor is currently + referencing. That is, this method returns the + previous duplicate object in the cursor's set + of objects. If no such object exists, this method returns + null. +

    +
  • +
  • +

    + nextNoDup() +

    +

    + Moves the cursor to the next object in the + cursor's set that has a key which is different + than the key that the cursor is currently + referencing. That is, this method skips all + duplicate objects and returns the + next non-duplicate object in the cursor's set + of objects. If no such object exists, this method returns + null. +

    +
  • +
  • +

    + prevNoDup() +

    +

    + Moves the cursor to the previous object in the + cursor's set that has a key which is different + than the key that the cursor is currently + referencing. That is, this method skips all + duplicate objects and returns the + previous non-duplicate object in the cursor's set + of objects. If no such object exists, this method returns + null. +

    +
  • +
+
+

+ For example: +

+
PrimaryIndex<String,SimpleEntityClass> pi =
+    store.getPrimaryIndex(String.class, SimpleEntityClass.class);
+
+SecondaryIndex<String,String,SimpleEntityClass> si = 
+    store.getSecondaryIndex(pi, String.class, "sKey");
+
+EntityCursor<SimpleEntityClass> sec_cursor = 
+    si.subIndex("skeyone").entities(); 
+
+try {
+    SimpleEntityClass sec;
+    Iterator<SimpleEntityClass> i = sec_cursor.iterator();
+    while (sec = i.nextNoDup() != null) {
+        // Do something here
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    sec_cursor.close(); } 
+
+
+
+
+
+

Key Ranges

+
+
+
+

+ You can restrict the scope of a cursor's movement + by specifying a range when you + create the cursor. The cursor can then never be + positioned outside of the specified range. +

+

+ When specifying a range, you indicate whether a + range bound is inclusive or + exclusive by providing a + boolean value for each range. + true indicates that the provided + bound is inclusive, while false + indicates that it is exclusive. +

+

+ You provide this information when you call + PrimaryIndex.entities() + or + SecondaryIndex.entities(). + For example, suppose you had a class indexed by + numerical information. Suppose further that you + wanted to examine only those objects with indexed + values of 100 - 199. Then (assuming the numerical + information is the primary index), you can bound + your cursor as follows: +

+
+EntityCursor<SomeEntityClass> cursor = 
+    primaryIndex.entities(100, true, 200, false);
+
+try {
+    for (SomeEntityClass sec : cursor {
+        // Do something here to objects ranged from 100 to 199
+    }
+// Always make sure the cursor is closed when we are done with it.
+} finally {
+    cursor.close(); } 
+
+
+ + + diff --git a/docs/GettingStartedGuide/gettingStarted.css b/docs/GettingStartedGuide/gettingStarted.css new file mode 100644 index 0000000..6a2b24b --- /dev/null +++ b/docs/GettingStartedGuide/gettingStarted.css @@ -0,0 +1,50 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 10pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 10pt; } + +div.navfooter { font-size: 10pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 10pt; } + +span.emphasis { font-style: italic;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + +div.variablelist dl dt {margin-top: 1em; } + +div.libver p { + font-size: 8pt; + width: 30%; + margin-left: 2px; + margin-right: 2px; + padding-top: 3px; + padding-bottom: 3px; + } diff --git a/docs/GettingStartedGuide/gettingit.html b/docs/GettingStartedGuide/gettingit.html new file mode 100644 index 0000000..19700c5 --- /dev/null +++ b/docs/GettingStartedGuide/gettingit.html @@ -0,0 +1,103 @@ + + + + + + Getting and Using JE + + + + + + + + + +
+
+
+
+

Getting and Using JE

+
+
+
+

+ You can obtain JE by visiting the JE download page: http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html. +

+

+ To install JE, untar or unzip the distribution to the + directory of your choice. If you use unzip, make sure to specify + the -U option in order to preserve case. +

+

+ For more information on installing JE, see + JE_HOME/docs/relnotes.html, + where JE_HOME is the directory where you + unpacked JE. +

+
+

Note

+

+ JE is compatible with Java SE 8 (64-bit), and has been + tested and certified against Oracle Java SE 8 and IBM Java IBM + J9. It is recommended that you upgrade to the latest Java + releases to take advantage of the latest bug fixes and + performance improvements. The release notes included in the + JE download specify the exact Java versions that have been + used for certification. +

+

+ Linux, Oracle Solaris, and AIX are officially supported platforms for + JE. Both Oracle Solaris x86 and Oracle Solaris SPARC are + supported. A 64 bit JVM is required in order to run JE. +

+
+

+ You can use JE with your application by adding + JE_HOME/lib/je-<version>.jar + to your application's classpath. +

+

+ Beyond this manual, you can find documentation for JE at + JE_HOME/docs/index.html + directory. In particular, complete Javadoc for the JE API set + is available at + JE_HOME/docs/java/index.html. +

+
+ + + diff --git a/docs/GettingStartedGuide/hotfailover.html b/docs/GettingStartedGuide/hotfailover.html new file mode 100644 index 0000000..6ebb2d1 --- /dev/null +++ b/docs/GettingStartedGuide/hotfailover.html @@ -0,0 +1,128 @@ + + + + + + Hot Standby + + + + + + + + + +
+
+
+
+

Hot Standby

+
+
+
+

+ As a final backup/recovery strategy, you can create a hot standby. Note that using hot standbys requires + your application to be able to specify its environment home directory at application startup time. Most + application developers allow the environment home directory to be identified using a command line option or a + configuration or properties file. If your application has its + environment home hard-coded into it, you cannot use hot standbys. +

+

+ You create a hot standby by periodically backing up your database to an alternative + location on disk. Usually this alternative location is on a separate physical drive from + where you normally keep your database, but if multiple drives are not available then you + should at least put the hot standby on a separate disk partition. +

+

+ You failover to your hot standby by causing your application to reopen its environment using the hot standby + location. +

+

+ Note that a hot standby should not be used as a substitute for backing up and archiving your data to a safe + location away from your operating environment. Even if your data is spread across multiple physical disks, a + truly serious catastrophe (fires, malevolent software viruses, + faulty disk controllers, and so forth) + can still cause you to lose your data. +

+

+ To create and maintain a hot standby: +

+
+
    +
  1. +

    + Copy all log files (*.jdb) from your environment + directory to the location where you want to keep your standby. Either a hot + or an offline backup can be used for this purpose, but typically a hot + standby is initially created by taking an offline backup of your database. + This ensures that you have captured the contents of your in-memory cache. +

    +
    +

    Note

    +

    + If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see Multiple Environment Subdirectories. +

    +
    +
  2. +
  3. +

    + Periodically copy to your standby directory any log files that were changed or created since the + time of your last copy. Most backup software is capable of performing this kind of an incremental + backup for you. +

    +

    + Note that the frequency of your incremental copies determines the amount of data that is at risk due to catastrophic + failures. For example, if you perform the incremental copy once an hour then at most your hot standby is an hour behind + your production database, and so you are risking at most an hours worth of database changes. +

    +
  4. +
  5. +

    + Remove any *.jdb files from the hot standby directory that have been removed or + renamed to .del files in the primary directory. This is not necessary for + consistency, but will help to reduce disk space consumed by the hot standby. +

    +
  6. +
+
+
+ + + diff --git a/docs/GettingStartedGuide/index.html b/docs/GettingStartedGuide/index.html new file mode 100644 index 0000000..c212418 --- /dev/null +++ b/docs/GettingStartedGuide/index.html @@ -0,0 +1,1101 @@ + + + + + + Getting Started with Berkeley DB Java Edition + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB Java Edition

+
+
+
+ +

+ Legal Notice +

+

+ Copyright © 2002 - 2017 Oracle and/or its affiliates. All rights + reserved. +

+

+ This software and related documentation are provided under a + license agreement containing restrictions on use and disclosure + and are protected by intellectual property laws. Except as + expressly permitted in your license agreement or allowed by + law, you may not use, copy, reproduce, translate, broadcast, + modify, license, transmit, distribute, exhibit, perform, + publish, or display any part, in any form, or by any means. + Reverse engineering, disassembly, or decompilation of this + software, unless required by law for interoperability, is + prohibited. +

+

+ The information contained herein is subject to change without + notice and is not warranted to be error-free. If you find any + errors, please report them to us in writing. +

+

+ Berkeley DB, + + Berkeley DB Java Edition + and + Sleepycat are trademarks or registered trademarks of + Oracle. All rights to these marks are reserved. + No third-party use is permitted without the + express prior written consent of Oracle. +

+

+ Other names may be trademarks of their respective owners. +

+

+ If this is software or related documentation that is delivered + to the U.S. Government or anyone licensing it on behalf of the + U.S. Government, the following notice is applicable: +

+

+ U.S. GOVERNMENT END USERS: Oracle programs, including any + operating system, integrated software, any programs installed + on the hardware, and/or documentation, delivered to U.S. + Government end users are "commercial computer software" + pursuant to the applicable Federal Acquisition Regulation and + agency-specific supplemental regulations. As such, use, + duplication, disclosure, modification, and adaptation of the + programs, including any operating system, integrated software, + any programs installed on the hardware, and/or documentation, + shall be subject to license terms and license restrictions + applicable to the programs. No other rights are granted to the + U.S. Government. +

+

+ This software or hardware is developed for general use in a + variety of information management applications. It is not + developed or intended for use in any inherently dangerous + applications, including applications that may create a risk of + personal injury. If you use this software or hardware in + dangerous applications, then you shall be responsible to take + all appropriate fail-safe, backup, redundancy, and other + measures to ensure its safe use. Oracle Corporation and its + affiliates disclaim any liability for any damages caused by use + of this software or hardware in dangerous applications. +

+

+ Oracle and Java are registered trademarks of Oracle and/or its + affiliates. Other names may be trademarks of their respective + owners. +

+

+ Intel and Intel Xeon are trademarks or registered trademarks of + Intel Corporation. All SPARC trademarks are used under license + and are trademarks or registered trademarks of SPARC + International, Inc. AMD, Opteron, the AMD logo, and the AMD + Opteron logo are trademarks or registered trademarks of + Advanced Micro Devices. UNIX is a registered trademark of The + Open Group. +

+

+ This software or hardware and documentation may provide access + to or information on content, products, and services from third + parties. Oracle Corporation and its affiliates are not + responsible for and expressly disclaim all warranties of any + kind with respect to third-party content, products, and + services. Oracle Corporation and its affiliates will not be + responsible for any loss, costs, or damages incurred due to + your access to or use of third-party content, products, or + services. +

+
+
+
+

31-Oct-2017

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+
+ + 1. Introduction to Berkeley DB Java Edition + +
+
+
+
+ + Features + +
+
+
+
+ + DPL Features + +
+
+ + Base API Features + +
+
+ + Which API Should You Use? + +
+
+
+
+ + The JE Application + +
+
+
+
+ + Database Environments + +
+
+ + Key-Data Pairs + +
+
+ + Storing Data + +
+
+ + Duplicate Data + +
+
+ + Replacing and Deleting Entries + +
+
+ + Secondary Keys + +
+
+ + Transactions + +
+
+ + JE Resources + +
+
+ + + Application Considerations + + +
+
+
+
+ + JE Backup and Restore + +
+
+ + JCA Support + +
+
+ + JConsole and JMX Support + +
+
+ + Getting and Using JE + +
+
+ + JE Exceptions + +
+
+ + Six Things Everyone Should Know about JE Log Files + +
+
+
+
+ + 2. Database Environments + +
+
+
+
+ + Opening Database Environments + +
+
+
+
+ + Multiple Environments + +
+
+ + Multiple Environment Subdirectories + +
+
+ + Configuring a Shared Cache for Multiple Environments + +
+
+
+
+ + Closing Database Environments + +
+
+ + Environment Properties + +
+
+
+
+ + The EnvironmentConfig Class + +
+
+ + EnvironmentMutableConfig + +
+
+
+
+ + Environment Statistics + +
+
+ + Database Environment Management Example + +
+
+
+
+ + I. Programming with the Direct Persistence Layer + +
+
+
+
+ + 3. Direct Persistence Layer First Steps + +
+
+
+
+ + Entity Stores + +
+
+
+
+ + Opening and Closing Environments and Stores + +
+
+
+
+ + Persistent Objects + +
+
+ + Saving and Retrieving Data + +
+
+
+
+ + 4. Working with Indices + +
+
+
+
+ + Accessing Indexes + +
+
+
+
+ + Accessing Primary Indices + +
+
+ + Accessing Secondary Indices + +
+
+
+
+ + Creating Indexes + +
+
+
+
+ + Declaring Primary Indexes + +
+
+ + Declaring Secondary Indexes + +
+
+ + Foreign Key Constraints + +
+
+
+
+
+
+ + 5. Saving and Retrieving Objects + +
+
+
+
+ + A Simple Entity Class + +
+
+ + SimpleDA.class + +
+
+ + Placing Objects in an Entity Store + +
+
+ + Retrieving Objects from an Entity Store + +
+
+ + Retrieving Multiple Objects + +
+
+
+
+ + Cursor Initialization + +
+
+ + Working with Duplicate Keys + +
+
+ + Key Ranges + +
+
+
+
+ + Join Cursors + +
+
+ + Deleting Entity Objects + +
+
+ + Replacing Entity Objects + +
+
+
+
+ + 6. A DPL Example + +
+
+
+
+ + Vendor.java + +
+
+ + Inventory.java + +
+
+ + MyDbEnv + +
+
+ + DataAccessor.java + +
+
+ + ExampleDatabasePut.java + +
+
+ + ExampleInventoryRead.java + +
+
+
+
+
+
+ + II. Programming with the Base API + +
+
+
+
+ + 7. Databases + +
+
+
+
+ + Opening Databases + +
+
+
+
+ + Deferred Write Databases + +
+
+ + Temporary Databases + +
+
+ + Closing Databases + +
+
+
+
+ + Database Properties + +
+
+ + Administrative Methods + +
+
+ + Database Example + +
+
+
+
+ + 8. Database Records + +
+
+
+
+ + Using Database Records + +
+
+ + Reading and Writing Database Records + +
+
+
+
+ + Writing Records to the Database + +
+
+ + Getting Records from the Database + +
+
+ + Deleting Records + +
+
+ + Data Persistence + +
+
+
+
+ + Using Time to Live + +
+
+
+
+ + Specifying a TTL Value + +
+
+ + Updating a TTL Value + +
+
+ + Deleting TTL Expiration + +
+
+
+
+ + Using the BIND APIs + +
+
+
+
+ + Numerical and String Objects + +
+
+ + Serializable Complex Objects + +
+
+ + Custom Tuple Bindings + +
+
+
+
+ + Using Comparators + +
+
+
+
+ + Writing Comparators + +
+
+ + Setting Comparators + +
+
+
+
+ + Database Record Example + +
+
+
+
+ + 9. Using Cursors + +
+
+
+
+ + Opening and Closing Cursors + +
+
+ + Getting Records Using the Cursor + +
+
+
+
+ + Disk Ordered Cursors + +
+
+ + Searching for Records + +
+
+ + Working with Duplicate Records + +
+
+
+
+ + Putting Records Using Cursors + +
+
+ + Deleting Records Using Cursors + +
+
+ + Replacing Records Using Cursors + +
+
+ + Cursor Example + +
+
+
+
+ + 10. Secondary Databases + +
+
+
+
+ + Opening and Closing Secondary Databases + +
+
+ + Implementing Key + Creators + + + +
+
+ + Secondary Database Properties + +
+
+ + Reading Secondary Databases + +
+
+ + Deleting Secondary Database Records + +
+
+ + + Using Secondary Cursors + + + +
+
+ + Database Joins + +
+
+
+
+ + Using Join Cursors + +
+
+ + JoinCursor Properties + +
+
+
+
+ + Secondary Database Example + +
+
+
+
+ + Opening Secondary Databases with + MyDbEnv + +
+
+ + Using Secondary Databases with ExampleInventoryRead + +
+
+
+
+
+
+
+
+ + III. Administering JE Applications + +
+
+
+
+ + 11. Backing up and Restoring Berkeley DB Java Edition Applications + +
+
+
+
+ + Databases and Log Files + +
+
+
+
+ + Log File Overview + +
+
+ + Cleaning the Log Files + +
+
+ + The BTree + +
+
+ + Database Modifications and Syncs + +
+
+ + Normal Recovery + +
+
+
+
+ + Performing Backups + +
+
+
+
+ + Performing a Hot Backup + +
+
+ + Performing an Offline Backup + +
+
+ + Using the DbBackup Helper Class + +
+
+
+
+ + Performing Catastrophic Recovery + +
+
+ + Hot Standby + +
+
+
+
+ + 12. Administering Berkeley DB Java Edition Applications + +
+
+
+
+ + The JE Properties File + +
+
+ + Managing the Background Threads + +
+
+
+
+ + The Cleaner Thread + +
+
+ + The Checkpointer Thread + +
+
+
+
+ + Sizing the Cache + +
+
+ + Setting Disk Thresholds + +
+
+ + The Command Line Tools + +
+
+
+
+ + DbDump + +
+
+ + DbLoad + +
+
+ + DbVerify + +
+
+
+
+ + Logging + +
+
+
+
+ + Managing Logging Levels + +
+
+ + Managing Handler Levels + +
+
+
+
+
+
+
+
+ + A. Concurrent Processing in Berkeley DB Java Edition + +
+
+
+
+ + Multithreaded Applications + +
+
+ + Multiprocess Applications + +
+
+
+
+
+ +
+ + + diff --git a/docs/GettingStartedGuide/indexes.html b/docs/GettingStartedGuide/indexes.html new file mode 100644 index 0000000..98b6693 --- /dev/null +++ b/docs/GettingStartedGuide/indexes.html @@ -0,0 +1,398 @@ + + + + + + Chapter 10. Secondary Databases + + + + + + + + + +
+
+
+
+

Chapter 10. Secondary Databases

+
+
+
+ +

+ Usually you find database records by means of the record's key. However, + the key that you use for your record will not always contain the + information required to provide you with rapid access to the data that you + want to retrieve. For example, suppose your + Database + + contains records related to users. The key might be a string that is some + unique identifier for the person, such as a user ID. Each record's data, + however, would likely contain a complex object containing details about + people such as names, addresses, phone numbers, and so forth. + While your application may frequently want to query a person by user + ID (that is, by the information stored in the key), it may also on occasion + want to locate people by, say, their name. +

+

+ Rather than iterate through all of the records in your database, examining + each in turn for a given person's name, you create indexes based on names + and then just search that index for the name that you want. You can do this + using secondary databases. In JE, the + Database + + that contains your data is called a + primary database. A database that provides an + alternative set of keys to access that data is called a secondary + database, + and these are managed using SecondaryDatabase + class objects. In a secondary database, the keys are your alternative + (or secondary) index, and the data corresponds to a primary record's key. +

+

+ You create a secondary database by using a SecondaryConfig + class object to identify an implementation of a + SecondaryKeyCreator + class object that is used to create keys based on data found in the primary + database. You then pass this SecondaryConfig + object to the SecondaryDatabase constructor. +

+

+ Once opened, JE manages secondary databases for you. Adding or deleting + records in your primary database causes JE to update the secondary as + necessary. Further, changing a record's data in the primary database may cause + JE to modify a record in the secondary, depending on whether the change + forces a modification of a key in the secondary database. +

+

+ Note that you can not write directly to a secondary database. + + While methods + exist on SecondaryDatabase and + SecondaryCursor that appear to allow this, they in + fact always throw + UnsupportedOperationException. + + + + To change the data referenced by a + SecondaryDatabase + + record, modify the primary database instead. The exception to this rule is + that delete operations are allowed on the + SecondaryDatabase object. + + + See Deleting Secondary Database Records for more + information. +

+
+

Note

+

+ + Secondary database records are updated/created by JE + only if the + SecondaryKeyCreator.createSecondaryKey() method + + returns + true. + + If + false + + is returned, then JE will not add the key to the secondary database, and + in the event of a record update it will remove any existing key. + + + +

+

+ See Implementing Key + Creators + + for more + information on this interface and method. + + +

+
+

+ When you read a record from a secondary database, JE automatically + returns + the key and data + + from the corresponding record in the primary database. + + + +

+
+
+
+
+

Opening and Closing Secondary Databases

+
+
+
+

+ You manage secondary database opens and closes using the + + Environment.openSecondaryDatabase() method. + + + Just as is the case with primary databases, you must provide + Environment.openSecondaryDatabase() + + with the database's + name and, optionally, other properties such as whether duplicate + records are allowed, or whether the secondary database can be created on + open. In addition, you must also provide: +

+
+
    +
  • +

    A handle to the primary database that this secondary database is + indexing. Note that this means that secondary databases are maintained + only for the specified Database handle. If you + open the same Database multiple times for write + (such as might occur when opening a database for read-only and read-write in the same application), + then you should open the SecondaryDatabase for + each such Database handle.

    +
  • +
  • +

    A SecondaryConfig object that provides + properties specific to a secondary database. The most important of + these is used to identify the key creator for the database. The key + creator is responsible for generating keys for the secondary database. + See Secondary Database Properties for details.

    +
  • +
+
+
+

Note

+

+ Primary databases must not support duplicate records. + Secondary records point to primary records using the primary key, so that key must be unique. +

+
+

So to open (create) a secondary database, you:

+
+
    +
  1. +

    Open your primary database.

    +
  2. +
  3. +

    Instantiate your key creator.

    +
  4. +
  5. +

    Instantiate your SecondaryConfig object.

    +
  6. +
  7. +

    Set your key creator object on your SecondaryConfig + object.

    +
  8. +
  9. +

    Open your secondary database, specifying your primary database + and your SecondaryConfig at that time.

    +
  10. +
+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryConfig;
+
+import java.io.File;
+
+...
+
+DatabaseConfig myDbConfig = new DatabaseConfig();
+SecondaryConfig mySecConfig = new SecondaryConfig();
+myDbConfig.setAllowCreate(true);
+mySecConfig.setAllowCreate(true);
+// Duplicates are frequently required for secondary databases.
+mySecConfig.setSortedDuplicates(true);
+
+// Open the primary
+Environment myEnv = null;
+Database myDb = null;
+SecondaryDatabase mySecDb = null;
+try {
+    String dbName = "myPrimaryDatabase";
+
+    myEnv = new Environment(new File("/tmp/JEENV"), null);
+    myDb = myEnv.openDatabase(null, dbName, myDbConfig);
+
+    // A fake tuple binding that is not actually implemented anywhere
+    // in this manual. The tuple binding is dependent on the data in use.
+    // Tuple bindings are described earlier in this manual.
+    TupleBinding myTupleBinding = new MyTupleBinding();
+
+    // Open the secondary.
+    // Key creators are described in the next section.
+    FullNameKeyCreator keyCreator = 
+        new FullNameKeyCreator(myTupleBinding);
+
+    // Get a secondary object and set the key creator on it.
+    mySecConfig.setKeyCreator(keyCreator);
+
+    // Perform the actual open
+    String secDbName = "mySecondaryDatabase";
+    mySecDb = myEnv.openSecondaryDatabase(null, secDbName, myDb, 
+                                          mySecConfig); 
+} catch (DatabaseException de) {
+    // Exception handling goes here ...
+}
+

To close a secondary database, call its close() method. Note that + for best results, you should close all the secondary databases associated + with a primary database before closing the primary.

+

For example:

+ +
try {
+    if (mySecDb != null) {
+        mySecDb.close();
+    }
+
+    if (myDb != null) {
+        myDb.close(); 
+    }
+
+    if (myEnv != null) {
+        myEnv.close();
+    }
+} catch (DatabaseException dbe) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/docs/GettingStartedGuide/indexusage.html b/docs/GettingStartedGuide/indexusage.html new file mode 100644 index 0000000..4530dfc --- /dev/null +++ b/docs/GettingStartedGuide/indexusage.html @@ -0,0 +1,483 @@ + + + + + + Secondary Database Example + + + + + + + + + +
+
+
+
+

Secondary Database Example

+
+
+
+ +

In previous chapters in this book, we built applications that load + and display several JE databases. In this example, we will extend those + examples to use secondary databases. Specifically:

+
+ +
+

+ Before we can use a secondary database, we must implement a class to extract secondary keys for us. + We use ItemNameKeyCreator for this purpose. +

+
+ +

+ Example 10.1 ItemNameKeyCreator.java +

+
+

+ This class assumes the primary database + uses Inventory objects for the record data. The + Inventory class is described in Inventory.java.

+

In our key creator class, we make use of a custom tuple binding + called InventoryBinding. This class is described in InventoryBinding.java.

+

You can find the following class in:

+
JE_HOME/examples/je/gettingStarted/ItemNameKeyCreator.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.db.SecondaryKeyCreator;
+import com.sleepycat.db.DatabaseEntry;
+import com.sleepycat.db.DatabaseException;
+import com.sleepycat.db.SecondaryDatabase;
+
+public class ItemNameKeyCreator implements SecondaryKeyCreator {
+
+    private TupleBinding theBinding;
+
+    // Use the constructor to set the tuple binding
+    ItemNameKeyCreator(TupleBinding binding) {
+        theBinding = binding;
+    }
+
+    // Abstract method that we must implement
+    public boolean createSecondaryKey(SecondaryDatabase secDb,
+        DatabaseEntry keyEntry,    // From the primary
+        DatabaseEntry dataEntry,   // From the primary
+        DatabaseEntry resultEntry) // set the key data on this.
+        throws DatabaseException {
+
+        if (dataEntry != null) {
+            // Convert dataEntry to an Inventory object
+            Inventory inventoryItem =
+                  (Inventory)theBinding.entryToObject(dataEntry);
+            // Get the item name and use that as the key
+            String theItem = inventoryItem.getItemName();
+            resultEntry.setData(theItem.getBytes());
+        }
+        return true;
+    }
+} 
+
+
+
+

Now that we have a key creator, we can use it to generate keys for a secondary database. We will now extend + MyDbEnv to manage a secondary database, and to use ItemNameKeyCreator + to generate keys for that secondary database. +

+
+
+
+
+

Opening Secondary Databases with + MyDbEnv

+
+
+
+

In Stored Class Catalog Management with MyDbEnv we built + MyDbEnv as an example of a class that + encapsulates Environment and + Database opens and closes. We will now extend + that class to manage a SecondaryDatabase.

+
+ +

+ Example 10.2 SecondaryDatabase Management with MyDbEnv +

+
+

We start by importing two additional classes needed to support secondary databases. + We also add a global variable to use as a handle for our secondary database. +

+ +
// File MyDbEnv.java
+
+package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+
+import java.io.File;
+
+public class MyDbEnv {
+
+    private Environment myEnv;
+
+    // The databases that our application uses
+    private Database vendorDb;
+    private Database inventoryDb;
+    private Database classCatalogDb;
+    private SecondaryDatabase itemNameIndexDb;
+
+    // Needed for object serialization
+    private StoredClassCatalog classCatalog;
+
+    // Our constructor does nothing
+    public MyDbEnv() {}
+

+ Next we update the MyDbEnv.setup() method to open the + secondary database. As a part of this, we have to pass an + ItemNameKeyCreator object on the call to open the secondary + database. Also, in order to instantiate ItemNameKeyCreator, we need an + InventoryBinding object (we described this class in InventoryBinding.java). We do all this work together inside of + MyDbEnv.setup(). +

+ +
    public void setup(File envHome, boolean readOnly)
+        throws DatabaseException {
+
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        SecondaryConfig mySecConfig = new SecondaryConfig();
+
+        // If the environment is read-only, then
+        // make the databases read-only too.
+        myEnvConfig.setReadOnly(readOnly);
+        myDbConfig.setReadOnly(readOnly);
+        mySecConfig.setReadOnly(readOnly);
+
+        // If the environment is opened for write, then we want to be
+        // able to create the environment and databases if
+        // they do not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+        myDbConfig.setAllowCreate(!readOnly);
+        mySecConfig.setAllowCreate(!readOnly);
+
+        ...
+        // Environment and database opens omitted for brevity
+        ...
+
+        // Open the secondary database. We use this to create a
+        // secondary index for the inventory database
+
+        // We want to maintain an index for the inventory entries based
+        // on the item name. So, instantiate the appropriate key creator
+        // and open a secondary database.
+        ItemNameKeyCreator keyCreator =
+            new ItemNameKeyCreator(new InventoryBinding());
+
+        // Set up the secondary properties
+        mySecConfig.setAllowPopulate(true); // Allow autopopulate
+        mySecConfig.setKeyCreator(keyCreator);
+        // Need to allow duplicates for our secondary database
+        mySecConfig.setSortedDuplicates(true);
+
+        // Now open it
+        itemNameIndexDb =
+            myEnv.openSecondaryDatabase(
+                    null,     
+                    "itemNameIndex", // Index name
+                    inventoryDb,     // Primary database handle. This is
+                                     // the db that we're indexing. 
+                    mySecConfig);    // The secondary config
+    } 
+

+ Next we need an additional getter method for returning the secondary database. +

+ +
    public SecondaryDatabase getNameIndexDB() {
+        return itemNameIndexDb;
+    } 
+

Finally, we need to update the MyDbEnv.close() + method to close the new secondary database. We want to make sure that + the secondary is closed before the primaries. While + this is not necessary for this example because our + closes are single-threaded, it is still a good habit to adopt.

+ +
    public void close() {
+        if (myEnv != null) {
+            try {
+                //Close the secondary before closing the primaries
+                itemNameIndexDb.close();
+                vendorDb.close();
+                inventoryDb.close();
+                classCatalogDb.close();
+
+                // Finally, close the environment.
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " +
+                                    dbe.toString());
+                System.exit(-1);
+            }
+        }
+    }
+} 
+

That completes our update to MyDbEnv. You + can find the complete class implementation in: +

+
JE_HOME/examples/je/gettingStarted/MyDbEnv.java 
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+
+
+
+

Because we performed all our secondary database configuration management in + MyDbEnv, we do not need to modify ExampleDatabasePut at all in + order to create our secondary indices. When ExampleDatabasePut calls + MyDbEnv.setup(), all of the necessary work is performed for us. +

+

+ However, we still need to take advantage of the new secondary indices. We do this by updating + ExampleInventoryRead to allow us to query for an inventory record based on its name. + Remember that the primary key for an inventory record is the item's SKU. The item's name is contained in the + Inventory object that is stored as each record's data in the inventory database. But + our new secondary index now allows us to easily query based on the item's name. +

+
+
+
+
+

Using Secondary Databases with ExampleInventoryRead

+
+
+
+

In the previous section we changed MyDbEnv + to cause a secondary database to be built using inventory item names as + the secondary keys. In this section, we will update + ExampleInventoryRead to allow us to query our + inventory records based on the item name. To do this, we will modify + ExampleInventoryRead to accept a new command line switch, + -s, whose argument is the name of an inventory item. + If the switch is present on the command line call to + ExampleInventoryRead, then the application will + use the secondary database to look up and display all the inventory + records with that item name. Note that we use a SecondaryCursor + to seek to the item name key and then display all matching records.

+

Remember that you can find the following class in:

+
JE_HOME/examples/je/gettingStarted/ExampleInventoryRead.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+ +

+ Example 10.3 SecondaryDatabase usage with ExampleInventoryRead +

+
+

First we need to import a few additional classes in order to use + secondary databases and cursors, and then we add a single global variable:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryCursor;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import java.io.File;
+import java.io.IOException; 
+
+public class ExampleInventoryRead {
+
+    private static File myDbEnvPath =
+        new File("/tmp/JEDB");
+
+    // Encapsulates the database environment and databases.
+    private static MyDbEnv myDbEnv = new MyDbEnv();
+
+    private static TupleBinding inventoryBinding;
+    private static EntryBinding vendorBinding;
+
+    // The item to locate if the -s switch is used
+    private static String locateItem; 
+

Next we update ExampleInventoryRead.run() to + check to see if the locateItem global variable a + value. If it does, then we show just those records related to the item + name passed on the -s switch.

+ +
    private void run(String args[]) 
+        throws DatabaseException {
+            // Parse the arguments list
+            parseArgs(args);
+            myDbEnv.setup(myDbEnvPath, // path to the environment home
+                          true);      // is this environment read-only?
+
+            // Setup our bindings.
+            inventoryBinding = new InventoryBinding();
+            vendorBinding =
+                 new SerialBinding(myDbEnv.getClassCatalog(),
+                                   Vendor.class);
+
+            if (locateItem != null) {
+                showItem();
+            } else {
+                showAllInventory();
+            }
+    } 
+

+ Finally, we need to implement ExampleInventoryRead.showItem(). + This is a fairly simple method that opens a secondary cursor, + and then displays every primary record that is related to the secondary + key identified by the locateItem global variable. +

+ +
    private void showItem() throws DatabaseException {
+            SecondaryCursor secCursor = null;
+            try {
+                // searchKey is the key that we want to find in the 
+                // secondary db.
+                DatabaseEntry searchKey = 
+                    new DatabaseEntry(locateItem.getBytes("UTF-8"));
+
+                // foundKey and foundData are populated from the primary
+                // entry that is associated with the secondary db key.
+                DatabaseEntry foundKey = new DatabaseEntry();
+                DatabaseEntry foundData = new DatabaseEntry();
+
+                // open a secondary cursor
+                secCursor =
+                  myDbEnv.getNameIndexDB().openSecondaryCursor(null, null);
+
+                // Search for the secondary database entry.
+                OperationStatus retVal =
+                    secCursor.getSearchKey(searchKey, foundKey,
+                        foundData, LockMode.DEFAULT);
+
+                // Display the entry, if one is found. Repeat until no more
+                // secondary duplicate entries are found
+                while(retVal == OperationStatus.SUCCESS) {
+                    Inventory theInventory =
+                      (Inventory)inventoryBinding.entryToObject(foundData);
+                    displayInventoryRecord(foundKey, theInventory);
+                    retVal = secCursor.getNextDup(searchKey, foundKey,
+                        foundData, LockMode.DEFAULT);
+                }
+            } catch (Exception e) {
+                System.err.println("Error on inventory secondary cursor:");
+                System.err.println(e.toString());
+                e.printStackTrace();
+            } finally {
+                if (secCursor != null) {
+                    secCursor.close();
+                }
+            }
+        }
+

The only other thing left to do is to update + ExampleInventoryRead.parseArgs() to support the -s command + line switch. To see how this is done, see: +

+
JE_HOME/examples/je/gettingStarted/ExampleInventoryRead.java
+

+ where JE_HOME is the location where you + placed your JE distribution. +

+
+
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/introduction.html b/docs/GettingStartedGuide/introduction.html new file mode 100644 index 0000000..68dbde7 --- /dev/null +++ b/docs/GettingStartedGuide/introduction.html @@ -0,0 +1,616 @@ + + + + + + Chapter 1. Introduction to Berkeley DB Java Edition + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction to Berkeley DB Java Edition

+
+
+
+
+

+ Table of Contents +

+
+
+ + Features + +
+
+
+
+ + DPL Features + +
+
+ + Base API Features + +
+
+ + Which API Should You Use? + +
+
+
+
+ + The JE Application + +
+
+
+
+ + Database Environments + +
+
+ + Key-Data Pairs + +
+
+ + Storing Data + +
+
+ + Duplicate Data + +
+
+ + Replacing and Deleting Entries + +
+
+ + Secondary Keys + +
+
+ + Transactions + +
+
+ + JE Resources + +
+
+ + + Application Considerations + + +
+
+
+
+ + JE Backup and Restore + +
+
+ + JCA Support + +
+
+ + JConsole and JMX Support + +
+
+ + Getting and Using JE + +
+
+ + JE Exceptions + +
+
+ + Six Things Everyone Should Know about JE Log Files + +
+
+
+

+ Welcome to Berkeley DB Java Edition (JE). JE is a general-purpose, + transaction-protected, embedded database written in 100% Java (JE + makes no JNI calls). As such, it offers the Java developer safe and + efficient in-process storage and management of arbitrary data. +

+

+ You use JE through a series of Java APIs which give you the + ability to read and write your data, manage your database(s), and + perform other more advanced activities such as managing + transactions. The Java APIs that you use to interact with JE + come in two basic flavors. The first is a high-level API that + allows you to make Java classes persistent. The second is a + lower-level API which provides additional flexibility when interacting + with JE databases. +

+
+

Note

+

+ For long-time users of JE, the lower-level API is the + traditional API that you are probably accustomed to using. +

+
+

+ Regardless of the API set that you choose to use, there are a + series of concepts and APIs that are common across the product. + This manual starts by providing a high-level examination of + JE. It then describes the APIs you use regardless of the API + set that you choose to use. It then provides information on using the + Direct Persistence Layer (DPL) API, followed by information on using + the more extensive "base" API. Finally, we provide some database + administration information. +

+

+ Note that the information provided here is intended to focus on + only introductory API usage. Other books describe more advanced + topics, such as transactional usage. See the For More Information section for + a listing of other titles in the JE documentation set. +

+
+
+
+
+

Features

+
+
+
+
+
+
+ + DPL Features + +
+
+ + Base API Features + +
+
+ + Which API Should You Use? + +
+
+
+

+ JE provides an enterprise-class Java-based data management + solution. All you need to get started is to add a single jar file to your + application's classpath. See Getting and Using JE for more information. +

+

+ JE offers the following major features: +

+
+
    +
  • +

    + Large database support. JE databases efficiently scale + from one to millions of records. The size of your JE + databases are likely to be limited more by hardware resources + than by any limits imposed upon you by JE. +

    +

    + Databases are described in + Databases. +

    +
  • +
  • +

    + Database environments. Database environments provide + a unit of encapsulation and management for one or + more databases. Environments are also the + unit of management for internal resources such as the + in-memory cache and the background threads. + Finally, you use environments to manage concurrency and + transactions. Note that all applications using JE + are required to use database environments. +

    +

    + Database environments are described in + Database Environments. +

    +
  • +
  • +

    + Multiple thread and process support. JE is designed for + multiple threads of control. Both read and write operations + can be performed by multiple threads. JE uses record-level + locking for high concurrency in threaded applications. + Further, JE uses timeouts for deadlock detection to help + you ensure that two threads of control do not deadlock + indefinitely. +

    +

    + Moreover, JE allows multiple processes to access the same + databases. However, in this configuration JE requires that + there be no more than one process allowed to write to the + database. Read-only processes are guaranteed a consistent, + although potentially out of date, view of the stored data as of + the time that the environment is opened. +

    +
  • +
  • +

    Transactions. Transactions allow you to treat one or more + operations on one or more databases as a single unit of work. + JE transactions offer the application developer recoverability, atomicity, and + isolation for your database operations. +

    +

    Note that transaction protection is optional. Transactions are + described in the Berkeley DB, Java Edition Getting Started with Transaction Processing guide.

    +
  • +
  • +

    + In-memory cache. The cache allows for high speed + database access for both read and write operations by + avoiding unnecessary disk I/O. The cache will grow + on demand up to a pre-configured maximum size. To + improve your application's performance immediately + after startup time, you can preload your cache in + order to avoid disk I/O for production requests of + your data. +

    +

    + Cache management is described in + Sizing the Cache. +

    +
  • +
  • +

    + + Indexes. JE allows you to easily create and maintain + secondary indices for your primary data. In this way, you can obtain rapid + access to your data through the use of an alternative, or + secondary, key. +

    +

    + How indices work is dependent upon the API you are + using. If you are using the DPL, see + Working with Indices. + Otherwise, see + Secondary Databases. +

    +
  • +
  • +

    + Log files. JE databases are stored in one + or more numerically-named log files in the environment + directory. The log files are write-once and are + portable across platforms with different endian-ness. +

    +

    + Unlike other database implementations, there is no + distinction between database files (that is, the "material + database") and log files. Instead JE employs a log-based storage + system to protect database modifications. Before any + change is made to a database, JE writes information about the + change to the log file. +

    +

    + Note that JE's log files are not binary compatible with Berkeley + DB's database files. However, both products provide dump and load + utilities, and the files that these operate on are compatible across + product lines. +

    +

    + JE's log files are described in more detail in + Backing up and Restoring Berkeley DB Java Edition Applications. + For information on using JE's dump and load utilities, + see The Command Line Tools. + Finally, for a short list of things to know about log files while you are learning JE, + see Six Things Everyone Should Know about JE Log Files. +

    +
  • +
  • +

    + Background threads. JE provides several threads + that manage internal resources for you. The + checkpointer is responsible for flushing database + data to disk that was written to cache as the result + of a transaction commit (this is done in order to + shorten recovery time). The compressor thread + removes subtrees from the database that are empty + because of deletion activity. Finally, the cleaner + thread is responsible for cleaning and removing + unneeded log files, thereby helping you to save on + disk space. +

    +

    + Background thread management is described in + Managing the Background Threads. +

    +
  • +
  • +

    + + Backup and restore. JE's backup procedure + consists of simply copying JE's log files to a + safe location for storage. To recover from a + catastrophic failure, you copy your archived log + files back to your production location on disk and + reopen the JE environment. +

    +

    + + Note that JE always performs normal + recovery when it opens a database + environment. Normal recovery brings the database to a + consistent state based on change information found in + the database log files. + +

    +

    + JE's backup and recovery mechanisms are described in + Backing up and Restoring Berkeley DB Java Edition Applications. +

    +
  • +
+
+
+
+
+
+

DPL Features

+
+
+
+

+ The DPL is one of two APIs that JE provides for + interaction with JE databases. The DPL provides + the ability to cause any Java type to be persistent + without implementing special interfaces. The + only real requirement is that each persistent + class have a default constructor. +

+

+ The DPL provides all of the features previously identified + in this chapter. In addition, the DPL offers you: +

+
+
    +
  • +

    + A type safe, convenient way to access + persistent objects. +

    +
  • +
  • +

    + No hand-coding of bindings is required. + A binding is a way of transforming data + types into a format which can be stored + in a JE database. If you do not use + the DPL, you may be required to + create custom bindings for your data + types. +

    +

    + See Using the BIND APIs + for more information on creating data bindings. +

    +

    + Note that Java byte code enhancement is + used by the DPL API to provide fully + optimized bindings that do not use Java + reflection. +

    +
  • +
  • +

    + No external schema is required to define + primary and secondary index keys. Java + annotations are used to define all + metadata. +

    +
  • +
  • +

    + Interoperability with external components is + supported using the Java collections framework. + Any index can be accessed using a standard + java.util collection. +

    +
  • +
  • +

    + Class evolution is explicitly supported. This + means you can add fields or widen types + automatically and transparently. +

    +

    + You can also perform many incompatible class + changes, such as renaming fields or refactoring a + single class. This is done using a built-in + DPL mechanism called + mutations. + Mutations are automatically applied as data is + accessed so as to avoid downtime to convert large + databases during a software upgrade. +

    +
  • +
  • +

    + Persistent class fields can be private, + package-private, protected or public. The + DPL can access persistence fields + either by bytecode enhancement or by reflection. +

    +
  • +
  • +

    + The performance of the underlying JE + engine is safe-guarded. All + DPL operations are mapped directly to + the underlying APIs, object bindings are + lightweight, and all engine tuning parameters are + available. +

    +
  • +
  • +

    + Java 1.5 generic types and annotations are + supported. +

    +
  • +
+
+
+
+
+
+
+

Base API Features

+
+
+
+

+ If you are not using the DPL, then the following concepts and + features are likely to be of interest to you: +

+
+
    +
  • +

    + Database records. All database records are organized + as simple key/data pairs. Both keys and data can be + anything from primitive Java types to the most + complex of Java objects. +

    +

    + Database records are described in + Database Records. +

    +
  • +
  • +

    + Direct database read and write. You can use methods + of a Database object to read + and write database records. Reading and writing using + Database objects are described + in + Database Records. +

    +
  • +
  • +

    + Cursors. Cursors give you the ability to sequentially + move through a database. Using cursors, you can seek + to a specific point in the database (using search + criteria applied to the key and/or the data portion + of a database record) and then either step forward or + step backwards through the database. +

    +

    + Cursors are described in detail in + Using Cursors. +

    +
  • +
  • +

    + JCA. JE provides support for the Java Connector Architecture. See + JCA Support for more information. +

    +
  • +
  • +

    + JMX. JE provides support for Java Management Extensions. + See JConsole and JMX Support for more information. +

    +
  • +
+
+
+
+
+
+
+

Which API Should You Use?

+
+
+
+

+ Of the two APIs that JE makes available to you, we + recommend that you use the DPL if all + you want to do is make classes with a relatively static schema to + be persistent. +

+

+ Further, if you are porting an application between Berkley DB + and Berkeley DB Java Edition, then you should not use the DPL as the + base API is a much closer match to the Berkley DB Java API. +

+

+ Additionally, if your application uses a highly dynamic + schema, then the DPL is probably a poor choice for + your application, although the use of Java annotations + can make the DPL work a little better for you in this + situation. +

+
+
+
+ + + diff --git a/docs/GettingStartedGuide/inventoryclass.html b/docs/GettingStartedGuide/inventoryclass.html new file mode 100644 index 0000000..62ab7ec --- /dev/null +++ b/docs/GettingStartedGuide/inventoryclass.html @@ -0,0 +1,148 @@ + + + + + + Inventory.java + + + + + + + + + +
+
+
+
+

Inventory.java

+
+
+
+

+ Our example's Inventory + class is much like our Vendor + class in that it is simply used to encapsulate + data. However, in this case we want to be able + to access objects two different ways: by + product SKU and by product name. +

+

+ In our data set, the product SKU is required to be + unique, so we use that as the primary key. The + product name, however, is not a unique value so we + set this up as a secondary key. +

+

+ The class appears as follows in our example: +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+import com.sleepycat.persist.model.SecondaryKey;
+
+@Entity
+public class Inventory {
+
+    // Primary key is sku
+    @PrimaryKey
+    private String sku;
+
+    // Secondary key is the itemName
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String itemName;
+
+    private String category;
+    private String vendor;
+    private int vendorInventory;
+    private float vendorPrice;
+
+    public void setSku(String data) {
+        sku = data;
+    }
+
+    public void setItemName(String data) {
+        itemName = data;
+    }
+
+    public void setCategory(String data) {
+        category = data;
+    }
+
+    public void setVendorInventory(int data) {
+        vendorInventory = data;
+    }
+
+    public void setVendor(String data) {
+        vendor = data;
+    }
+
+    public void setVendorPrice(float data) {
+        vendorPrice = data;
+    }
+
+    public String getSku() {
+        return sku;
+    }
+
+    public String getItemName() {
+        return itemName;
+    }
+
+    public String getCategory() {
+        return category;
+    }
+
+    public int getVendorInventory() {
+        return vendorInventory;
+    }
+
+    public String getVendor() {
+        return vendor;
+    }
+
+    public float getVendorPrice() {
+        return vendorPrice;
+    }
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/jca.html b/docs/GettingStartedGuide/jca.html new file mode 100644 index 0000000..5231582 --- /dev/null +++ b/docs/GettingStartedGuide/jca.html @@ -0,0 +1,106 @@ + + + + + + JCA Support + + + + + + + + + +
+
+
+
+

JCA Support

+
+
+
+

+ JCA is the Java Connector Architecture. This architecture provides a standard for connecting + the J2EE platform to legacy enterprise information systems (EIS), such as ERP systems, database systems, and + legacy applications not written in Java. JE supports this architecture. +

+

+ Users who want to run JE within a J2EE Application Server can use the JCA Resource Adapter to connect to + JE through a standard API. Note that the base API is required if you want to do this. + The JE Resource Adapter supports all three J2EE application server transaction + types: +

+
+
    +
  • +

    + No transaction. +

    +
  • +
  • +

    + Local transactions. +

    +
  • +
  • +

    + XA transactions. +

    +
  • +
+
+

+ JCA also includes the Java Transaction API (JTA), which means that JE supports 2 phase commit (XA). + Therefore, JEs can participate in distributed transactions managed by either a J2EE server or the + applications direct use of the JTA API. +

+

+ The JE distribution includes an example showing JCA usage in a simple EJB. The Resource Adaptor has been + tested using JBoss 3.2.6, and the Sun Java System Application Server, version 8.1. Instructions for how to build + the Resource Adapter and run a simple "smoke test" example for each of the application servers can be found + here: +

+
JE_HOME/examples/jca/HOWTO-jboss.txt
+

+ and +

+
JE_HOME/examples/jca/HOWTO-sjsas.txt
+
+ + + diff --git a/docs/GettingStartedGuide/jeexceptions.html b/docs/GettingStartedGuide/jeexceptions.html new file mode 100644 index 0000000..3d82254 --- /dev/null +++ b/docs/GettingStartedGuide/jeexceptions.html @@ -0,0 +1,154 @@ + + + + + + JE Exceptions + + + + + + + + + +
+
+
+
+

JE Exceptions

+
+
+
+

+ Before describing the Java API usage, it is first useful to examine the + exceptions thrown by those APIs. So, briefly, this section describes the + exceptions that you can generally expect to encounter when writing JE + applications. This list is not definitive. Exceptions beyond these can be + expected, depending on the specific database activity you are performing. + See the Javadoc for more information. +

+

+ All of the JE APIs throw + DatabaseException. + DatabaseException extends + java.lang.Exception. Also, the following + classes are subclasses of DatabaseException: +

+
+
    +
  • +

    + DatabaseNotFoundException +

    +

    + Thrown whenever an operation requires a database, and that + database cannot be found. +

    +
  • +
  • +

    + DiskLimitException +

    +

    + Indicates that you have reached your disk usage thresholds. Writes are + no longer allowed when these thresholds are exceeded. The thresholds + are set using the EnvironmentConfig.MAX_DISK and EnvironmentConfig.FREE_DISK properties. This exception + can be thrown as the result of any write operation, including database + record writes, checkpoints, and database and environment syncs. +

    +

    + When closing the environment, this exception can be seen. + However, the environment will still be properly closed. +

    +

    + For information on setting these properties, see + Setting Disk Thresholds. +

    +
  • +
  • +

    + EnvironmentFailureException +

    +

    + Indicates that a failure has occurred that could impact the Environment as + a whole. Depending on the nature of the failure, this exception might + indicate that Environment.close() should + be called. Use Environment.isValid() to + determine if all Environment handles must + be reopened. If true, the environment can + continue operating without being reopened. +

    +
  • +
  • +

    + LockConflictException +

    +

    + The common base class for all exceptions that result from record + lock conflicts. Upon receiving this exception, any open cursors + must be closed, the enclosing transaction aborted and, + optionally, the transaction retried. Transactions are described + in the Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

    +
  • +
  • +

    + LogWriteException +

    +

    + Thrown when an IOException or other failure occurs + when writing to the JE log. This exception might be indicative of a + full disk, although an IOException does not contain + enough information to determine this definitively. +

    +

    + This exception can be thrown as the result of any write + operation, including database record writes, checkpoints, and + database and environment syncs. +

    +
  • +
+
+

+ Note that DatabaseException and its subclasses belong to the + com.sleepycat.je package. +

+
+ + + diff --git a/docs/GettingStartedGuide/jmx.html b/docs/GettingStartedGuide/jmx.html new file mode 100644 index 0000000..4f850ce --- /dev/null +++ b/docs/GettingStartedGuide/jmx.html @@ -0,0 +1,74 @@ + + + + + + JConsole and JMX Support + + + + + + + + + +
+
+
+
+

JConsole and JMX Support

+
+
+
+

+ JMX is the Java Management Extensions. This extension provides + tools for managing and monitoring devices, applications, and service-driven networks. JE supports this extension. +

+

+ The JE distribution supplies MBeans that can be deployed for monitoring a JE environment in any JMX + server (such as an J2EE application server). Statistics and key operations can be invoked from these MBeans. + In addition, JE provides a plugin for the jconsole utility which + lets the user graphically access this information. +

+

+ For information on how to use the jconsole plugin see: +

+
JE_HOME/docs/jconsole/JConsole-plugin.html
+
+ + + diff --git a/docs/GettingStartedGuide/joins.html b/docs/GettingStartedGuide/joins.html new file mode 100644 index 0000000..600b1f4 --- /dev/null +++ b/docs/GettingStartedGuide/joins.html @@ -0,0 +1,377 @@ + + + + + + Database Joins + + + + + + + + + +
+
+
+
+

Database Joins

+
+
+
+
+
+
+ + Using Join Cursors + +
+
+ + JoinCursor Properties + +
+
+
+

+ If you have two or more secondary databases associated with a primary + database, then you can retrieve primary records based on the intersection of + multiple secondary entries. You do this using a + JoinCursor. + +

+

+ Throughout this document we have presented a + class + + that stores + inventory + information on grocery + items. + + That + class + + is fairly simple with a limited + number of data members, few of which would be interesting from a query + perspective. But suppose, instead, that we were storing + information on something with many more characteristics that can be queried, such + as an automobile. In that case, you may be storing information such as + color, number of doors, fuel mileage, automobile type, number of + passengers, make, model, and year, to name just a few. +

+

+ In this case, you would still likely be using some unique value to key your + primary entries (in the United States, the automobile's VIN would be + ideal for this purpose). You would then create a + class + + that identifies + all the characteristics of the automobiles in your inventory. + + + You would + also have to create some mechanism by which you would move instances of + this class in and out of Java byte arrays. We + described the concepts and mechanisms by which you can perform these + activities in Database Records. + +

+

+ To query this data, you might then create multiple secondary databases, + one for each of the characteristics that you want to query. For + example, you might create a secondary for color, another for number of + doors, another for number of passengers, and so forth. Of course, you + will need a unique + key creator + + for each such secondary database. You do + all of this using the concepts and techniques described throughout this + chapter. +

+

+ Once you have created this primary database and all interesting + secondaries, what you have is the ability to retrieve automobile records + based on a single characteristic. You can, for example, find all the + automobiles that are red. Or you can find all the automobiles that have + four doors. Or all the automobiles that are minivans. +

+

+ The next most natural step, then, is to form compound queries, or joins. + For example, you might want to find all the automobiles that are red, + and that were built by Toyota, and that are minivans. You can do this + using a + JoinCursor class instance. + +

+
+
+
+
+

Using Join Cursors

+
+
+
+

+ To use a join cursor: +

+
+
    +
  • +

    + Open two or more + secondary cursors. These + cursors + must be obtained from + + secondary databases that are associated with + the same primary database. +

    +
  • +
  • +

    + Position each such cursor to the secondary key + value in which you are interested. For example, to build on + the previous description, the cursor for the color + database is positioned to the red records + while the cursor for the model database is positioned to the + minivan records, and the cursor for the + make database is positioned to Toyota. +

    +
  • +
  • +

    + + Create an array of secondary cursors, and + place in it each of the cursors that are participating in your join query. + + +

    +
  • +
  • +

    + + Obtain a join cursor. You do this using the + Database.join() + + + method. You must pass this method the array of secondary cursors that you + opened and positioned in the previous steps. +

    +
  • +
  • +

    + Iterate over the set of matching records + using JoinCursor.getNext() + until + OperationStatus is not SUCCESS. + +

    +
  • +
  • +

    + Close your join cursor. +

    +
  • +
  • +

    + If you are done with them, close all your secondary cursors. +

    +
  • +
+
+

+ For example: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.JoinCursor;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryCursor;
+import com.sleepycat.je.SecondaryDatabase;
+
+...
+
+// Database and secondary database opens omitted for brevity.
+// Assume a primary database handle:
+//   automotiveDB
+// Assume 3 secondary database handles:
+//   automotiveColorDB  -- index based on automobile color
+//   automotiveTypeDB  -- index based on automobile type
+//   automotiveMakeDB   -- index based on the manufacturer
+
+// Query strings:
+String theColor = "red";
+String theType = "minivan";
+String theMake = "Toyota";
+
+// Secondary cursors used for the query:
+SecondaryCursor colorSecCursor = null;
+SecondaryCursor typeSecCursor = null;
+SecondaryCursor makeSecCursor = null;
+
+// The join cursor
+JoinCursor joinCursor = null;
+
+// These are needed for our queries
+DatabaseEntry foundKey = new DatabaseEntry();
+DatabaseEntry foundData = new DatabaseEntry();
+
+// All cursor operations are enclosed in a try block to ensure that they
+// get closed in the event of an exception.
+
+try {
+    // Database entries used for the query:
+    DatabaseEntry color = new DatabaseEntry(theColor.getBytes("UTF-8"));
+    DatabaseEntry type = new DatabaseEntry(theType.getBytes("UTF-8"));
+    DatabaseEntry make = new DatabaseEntry(theMake.getBytes("UTF-8"));
+
+    colorSecCursor = automotiveColorDB.openSecondaryCursor(null, null); 
+    typeSecCursor = automotiveTypeDB.openSecondaryCursor(null, null); 
+    makeSecCursor = automotiveMakeDB.openSecondaryCursor(null, null); 
+
+    // Position all our secondary cursors to our query values.
+    OperationStatus colorRet = 
+        colorSecCursor.getSearchKey(color, foundData, LockMode.DEFAULT);
+    OperationStatus typeRet = 
+        typeSecCursor.getSearchKey(type, foundData, LockMode.DEFAULT);
+    OperationStatus makeRet = 
+        makeSecCursor.getSearchKey(make, foundData, LockMode.DEFAULT);
+
+    // If all our searches returned successfully, we can proceed
+    if (colorRet == OperationStatus.SUCCESS &&
+        typeRet == OperationStatus.SUCCESS &&
+        makeRet == OperationStatus.SUCCESS) {
+
+        // Get a secondary cursor array and populate it with our
+        // positioned cursors
+        SecondaryCursor[] cursorArray = {colorSecCursor,
+                                         typeSecCursor, 
+                                         makeSecCursor};
+
+        // Create the join cursor
+        joinCursor = automotiveDB.join(cursorArray, null);
+
+        // Now iterate over the results, handling each in turn
+        while (joinCursor.getNext(foundKey, foundData, LockMode.DEFAULT) ==
+                        OperationStatus.SUCCESS) {
+
+            // Do something with the key and data retrieved in
+            // foundKey and foundData
+        }
+    }
+} catch (DatabaseException dbe) {
+    // Error reporting goes here
+} catch (Exception e) {
+    // Error reporting goes here
+} finally {
+    try {
+        // Make sure to close out all our cursors
+        if (colorSecCursor != null) {
+            colorSecCursor.close();
+        }
+        if (typeSecCursor != null) {
+            typeSecCursor.close();
+        }
+        if (makeSecCursor != null) {
+            makeSecCursor.close();
+        }
+        if (joinCursor != null) {
+            joinCursor.close();
+        }
+    } catch (DatabaseException dbe) {
+        // Error reporting goes here
+    }
+} 
+
+
+
+
+
+

JoinCursor Properties

+
+
+
+

+ You can set JoinCursor properties using the + JoinConfig class. Currently there is just one property that you can + set: +

+
+
    +
  • +

    + JoinConfig.setNoSort() +

    +

    + Specifies whether automatic sorting of input cursors is disabled. The cursors are sorted from the + one that refers to the least number of data items to the one that refers to the most. +

    +

    + If the data is structured so that cursors with many data items also share many common elements, + higher performance will result from listing those cursors before cursors with fewer data + items. Turning off sorting permits applications to specify cursors in the proper order given this + scenario. +

    +

    + The default value is false (automatic cursor sorting is performed). +

    +

    + For example: +

    + +
    // All database and environments omitted
    +JoinConfig config = new JoinConfig();
    +config.setNoSort(true);
    +JoinCursor joinCursor = myDb.join(cursorArray, config); 
    +
  • +
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/keyCreator.html b/docs/GettingStartedGuide/keyCreator.html new file mode 100644 index 0000000..8bf2e3f --- /dev/null +++ b/docs/GettingStartedGuide/keyCreator.html @@ -0,0 +1,241 @@ + + + + + + Implementing Key Creators + + + + + + + + + +
+
+
+
+

Implementing Key + Creators + +

+
+
+
+

+ You must provide every secondary database with a + class + + that creates keys from primary records. You identify this + class + + + + using the SecondaryConfig.setKeyCreator() + method. + + +

+

+ You can create keys using whatever data you want. Typically you will + base your key on some information found in a record's data, but you + can also use information found in the primary record's key. How you build + your keys is entirely dependent upon the nature of the index that you + want to maintain. +

+

+ You implement a key creator by writing a class that implements the + SecondaryKeyCreator interface. This interface + requires you to implement the SecondaryKeyCreator.createSecondaryKey() + method. +

+

+ One thing to remember when implementing this method is that you will + need a way to extract the necessary information from the data's + DatabaseEntry and/or the key's + DatabaseEntry that are provided on calls to this + method. If you are using complex objects, then you are probably using the + Bind APIs to perform this conversion. The easiest thing to do is to + instantiate the EntryBinding or + TupleBinding that you need to perform the + conversion, and then provide this to your key creator's constructor. + The Bind APIs are introduced in Using the BIND APIs. +

+

+ SecondaryKeyCreator.createSecondaryKey() returns a + boolean. A return value of false indicates that + no secondary key exists, and therefore no record should be added to the secondary database for that primary record. + If a record already exists in the secondary database, it is deleted. +

+

+ For example, suppose your primary database uses the following class + for its record data: +

+ +
package je.gettingStarted;
+
+public class PersonData {
+    private String userID;
+    private String surname;
+    private String familiarName;
+
+    public PersonData(String userID, String surname, 
+                      String familiarName) {
+        this.userID = userID;
+        this.surname = surname;
+        this.familiarName = familiarName;
+    }
+
+    public String getUserID() {
+        return userID;
+    }
+
+    public String getSurname() {
+        return surname;
+    }
+
+    public String getFamiliarName() {
+        return familiarName;
+    }
+} 
+

+ Also, suppose that you have created a custom tuple binding, + PersonDataBinding, that you use to convert + PersonData objects to and from + DatabaseEntry objects. (Custom tuple bindings are + described in Custom Tuple Bindings.) +

+

+ Finally, suppose you want a secondary database that is keyed based + on the person's full name. +

+

+ Then in this case you might create a key creator as follows: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import com.sleepycat.je.SecondaryKeyCreator;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.SecondaryDatabase;
+
+import java.io.IOException;
+
+public class FullNameKeyCreator implements SecondaryKeyCreator {
+ 
+    private TupleBinding theBinding;
+
+    public FullNameKeyCreator(TupleBinding theBinding1) {
+            theBinding = theBinding1;
+    }
+
+    public boolean createSecondaryKey(SecondaryDatabase secDb,
+                                      DatabaseEntry keyEntry, 
+                                      DatabaseEntry dataEntry,
+                                      DatabaseEntry resultEntry) {
+
+        try {
+            PersonData pd = 
+                (PersonData) theBinding.entryToObject(dataEntry);
+                String fullName = pd.getFamiliarName() + " " + 
+                    pd.getSurname();
+                resultEntry.setData(fullName.getBytes("UTF-8"));
+        } catch (IOException willNeverOccur) {}
+        return true;
+    }
+} 
+

Finally, you use this key creator as follows:

+ +
package je.gettingStarted;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryConfig;
+
+...
+
+Environment myEnv = null;
+Database myDb = null;
+SecondaryDatabase mySecDb = null;
+try {
+    // Environment and primary database open omitted for brevity
+...
+
+    TupleBinding myDataBinding = new MyTupleBinding();
+    FullNameKeyCreator fnkc = new FullNameKeyCreator(myDataBinding);
+
+    SecondaryConfig mySecConfig = new SecondaryConfig();
+    mySecConfig.setKeyCreator(fnkc);
+
+    //Perform the actual open
+    String secDbName = "mySecondaryDatabase";
+    mySecDb = myEnv.openSecondaryDatabase(null, secDbName, myDb, 
+                                          mySecConfig);
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} finally {
+    try {
+        if (mySecDb != null) {
+            mySecDb.close();
+        }
+
+        if (myDb != null) {
+            myDb.close(); 
+        }
+
+        if (myEnv != null) {
+            myEnv.close();
+        }
+    } catch (DatabaseException dbe) {
+        // Exception handling goes here
+    }
+}
+
+ + + diff --git a/docs/GettingStartedGuide/logfilesrevealed.html b/docs/GettingStartedGuide/logfilesrevealed.html new file mode 100644 index 0000000..d429273 --- /dev/null +++ b/docs/GettingStartedGuide/logfilesrevealed.html @@ -0,0 +1,133 @@ + + + + + + Six Things Everyone Should Know about JE Log Files + + + + + + + + + +
+
+
+
+

Six Things Everyone Should Know about JE Log Files

+
+
+
+

+ JE log files are not like the log files of other database systems. Nor are they like the log files or database + files created by Berkeley DB C Edition. In this guide you will learn more about log files as you go along, but it is good + to keep the following points in mind as you begin using JE. +

+
+
    +
  1. +

    + JE log files are "append only". Record insertions, deletions, and updates are always added at the end + of the current file. The first file is named 00000000.jdb. When that file grows to a certain size + (10 MB by default) a new file named 00000001.jdb becomes the current file, and so on. +

    +
  2. +
  3. +

    + There are no separate database files. Unlike Berkeley DB C Edition, databases are not stored in files + that are separate from the transaction log. The transaction log and the database records are stored + together in a single sequential log consisting of multiple log files. +

    +
  4. +
  5. +

    + The JE cleaner is responsible for reclaiming unused disk space. When the records in a log file are + superseded by deletions or updates recorded in a later log file, the older log file is no longer fully + utilized. The cleaner, which operates by default as a separate thread, determines the least utilized + log files, copies any still utilized records in those files to the end of the current log file, and + finally deletes the now completely un-utilized log file. +

    +

    + See The Cleaner Thread for more information on the cleaner. +

    +
  6. +
  7. +

    + Cleaning does not start immediately and never produces 100% utilization. Until you have written enough + data to create several log files, and some of that data is obsoleted through deletions and updates, you + will not notice any log files being deleted by the cleaner. By default cleaning occurs in the background and + maintains the log files at 50% utilization. You can configure a higher utilization value, but + configuring too high a utilization value will reduce overall performance. +

    +
  8. +
  9. +

    + Cleaning is not automatically performed when closing the environment. If you wish to reduce unused disk + space to a minimum at a particular point in time, you must explicitly call a method to perform log + cleaning. See the Closing Database Environments for more information. +

    +
  10. +
  11. +

    + Log file deletion only occurs after a checkpoint. The cleaner prepares log files to be deleted, but + file deletion must be performed after a checkpoint to ensure that the files are no longer referenced. + Checkpoints occur on their own schedule, which is every 20 MB of log written, by default. This is part + of the reason that you will not see log files being deleted until after several files have been created. +

    +

    + When using JE's replication (high availability) feature, a checkpoint does not delete + the cleaned log file but instead instead changes the status of the file to reserved. + Reserved files are only deleted when disk utilization exceeds the values set for either + EnvironmentConfig.MAX_DISK or EnvironmentConfig.FREE_DISK. +

    +
  12. +
  13. +

    + Log files can be spread across multiple directories, and therefore across multiple + disks through the use of links or mount points. See + Multiple Environment Subdirectories + for more information. +

    +
  14. +
+
+
+ + + diff --git a/docs/GettingStartedGuide/managelogging.html b/docs/GettingStartedGuide/managelogging.html new file mode 100644 index 0000000..acafd8e --- /dev/null +++ b/docs/GettingStartedGuide/managelogging.html @@ -0,0 +1,260 @@ + + + + + + Logging + + + + + + + + + +
+
+
+
+

Logging

+
+
+
+ +

+ JE uses the java.util.logging package to + log operations and trace messages. A distinct logger is defined for + each significant component of the system. The use of distinct loggers, along with + controllable logging levels, allows the logging output to be + tuned to tell you exactly what you need to know (while avoiding a lot of + extra information that only gets in the way) in order to monitor + your application's activities and/or debug runtime problems. +

+

+ Logging output can be displayed to the console and the + je.info file in your application's + environment directory. Setting the logger level controls the + types of messages that are published to the handlers. Setting the handler level + determines if and where the published messages are displayed. +

+
+
+
+
+

Managing Logging Levels

+
+
+
+

+ The default logging level for JE loggers is + INFO. At that level, a non-replicated + environment issues messages only when critical exceptions are + encountered. A replicated environment issues node transition + messages which should be comprehensible to the user familiar with + the replication group life cycle and can be extremely useful when + monitoring your application's activities. The output at the + INFO is not verbose; it simply details the + node start up and shutdown operations. Initial configuration + problems, if any, should show up during the startup operation. + You are strongly advised to run your production application with + this level of logging. +

+

+ Finer levels of logging are available for debugging purposes. + These will generate verbose output that is rich in implementation + detail. The output at these levels is only likely to be helpful + to people familiar with JE's implementation and the + application's use of JE, so you should only configure your logging + for these more verbose levels if you are involved in a detailed + debugging effort. +

+

+ To set or change the logger level before the environment is + opened, do one of the following: +

+
+
    +
  1. +

    + Set logging levels using the standard Java LogManager + properties file. For example, you can set: +

    +
    com.sleepycat.je.level=INFO
    +

    + in the LogManager properties file to set the logger + level for all JE loggers. +

    +
  2. +
  3. +

    + Set logging levels programmatically using the + java.util.logging API. For example: +

    +
    ...
    +// All other imports are omitted for brevity
    +import java.util.logging.Logger;
    +...
    +
    +Logger parent = Logger.getLogger("com.sleepycat.je");
    +parent.setLevel(Level.FINE);  // Loggers will now publish more 
    +                              // detailed messages.   
    +
  4. +
+
+

+ To set or change the logger level after the environment is + opened, do one of the following: +

+
+
    +
  1. +

    + Use the standard java.util.logging MBean to set a + concrete JE logger. +

    +
  2. +
  3. +

    + Use the JEDiagnostic MBean to set the parent + com.sleepycat.je logger. + See the JConsole Plugin page for information on + this MBean. +

    +
  4. +
  5. +

    + Use the programmatic java.util.logging API described + above to change the logger. +

    +
  6. +
+
+
+
+
+
+
+

Managing Handler Levels

+
+
+
+

+ Output to the je.info file is managed by + the JE FileHandler, while output to the console is managed + by the JE ConsoleHandler. By default, no output is shown on + the console, and only INFO level messages are + sent to je.info. +

+

+ To set or change the handler level before the environment is + opened, do one of the following: +

+
+
    +
  1. +

    + Set logging levels using the standard Java + LogManager properties file. For example, you can + set: +

    +
    com.sleepycat.je.util.FileHandler.level=ALL
    +com.sleepycat.je.util.ConsoleHandler.level=ALL
    +

    + in the LogManager properties file to display all + logging output to the console and + je.info files. +

    +
  2. +
  3. +

    + The java.util.logging package does not supply an + API for setting handler levels. Instead, use the + following JE environment parameter: +

    +
    ...
    +EnvironmentConfig envConfig = new EnvironmentConfig();
    +envConfig.setAllowCreate(true);
    +envConfig.setConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, "ALL");
    +envConfig.setConfigParam(EnvironmentConfig.CONSOLE_LOGGING_LEVEL, 
    +                         "ALL");
    +
    +...
    +// Open your environment as normal here
    +...   
    +
  4. +
+
+

+ To set or change the handler level after the environment is + opened, do one of the following: +

+
+
    +
  1. +

    + Use EnvironmentMutableConfig.setConfigParam() to change the + handler levels using the JE properties described + above. +

    +
  2. +
  3. +

    + Use the JEDiagnostic MBean to change handler levels. + See the JConsole Plugin page for information on + this MBean. +

    +
  4. +
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/moreinfo.html b/docs/GettingStartedGuide/moreinfo.html new file mode 100644 index 0000000..2f2fd2c --- /dev/null +++ b/docs/GettingStartedGuide/moreinfo.html @@ -0,0 +1,165 @@ + + + + + + For More Information + + + + + + + + + +
+
+
+
+

For More Information

+
+
+
+
+
+
+ + Contact Us + +
+
+
+

+ Beyond this manual, you may also find the following sources of + information useful when building a JE application: +

+ + +

+ To download the latest + + Berkeley DB Java Edition + + documentation along with white papers and other collateral, + visit http://www.oracle.com/technetwork/indexes/documentation/index.html. +

+

+ For the latest version of the Oracle + + Berkeley DB Java Edition + + downloads, visit + http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html. +

+
+
+
+
+
+

Contact Us

+
+
+
+

+ You can post your comments and questions at the Oracle + Technology (OTN) forum for + + + + Oracle Berkeley DB Java Edition at: https://forums.oracle.com/forums/forum.jspa?forumID=273. + +

+

+ For sales or support information, email to: + berkeleydb-info_us@oracle.com + You can subscribe to a low-volume email announcement list for + the Berkeley DB product family by sending email to: + bdb-join@oss.oracle.com +

+
+
+ + + diff --git a/docs/GettingStartedGuide/multiprocess.html b/docs/GettingStartedGuide/multiprocess.html new file mode 100644 index 0000000..99537ca --- /dev/null +++ b/docs/GettingStartedGuide/multiprocess.html @@ -0,0 +1,89 @@ + + + + + + Multiprocess Applications + + + + + + + + +
+
+
+
+

Multiprocess Applications

+
+
+
+

+ Note the following if you are writing an application that wants to access JE databases from multiple + processes: +

+
+
    +
  • +

    + In JE, you must use environments. Further, a database can be opened for write access only if the + environment is opened for write access. Finally, only one process may have an environment opened for + write access at a time. +

    +
  • +
  • +

    + If your process attempts to open an environment for write, and another process has already opened that + environment for write, then the open will fail. In this event, the process must either exit or open the + environment as read-only. +

    +
  • +
  • +

    + A process that opens an environment for read-only receives a snapshot of the data in that environment. + If another process modifies the environment's databases in any way, the read-only version of the data + will not be updated until the read-only process closes and reopens the environment (and by extension all + databases in that environment). +

    +
  • +
+
+
+ + + diff --git a/docs/GettingStartedGuide/mydbenv-persist.html b/docs/GettingStartedGuide/mydbenv-persist.html new file mode 100644 index 0000000..d73ab2d --- /dev/null +++ b/docs/GettingStartedGuide/mydbenv-persist.html @@ -0,0 +1,156 @@ + + + + + + MyDbEnv + + + + + + + + + +
+
+
+
+

MyDbEnv

+
+
+
+

+ The applications that we are building for our example both + must open and close environments and entity stores. One + of our applications is writing to the entity store, so this + application needs to open the store as read-write. It also + wants to be able to create the store if it does not exist. +

+

+ Our second application only reads from the store. In this + case, the store should be opened as read-only. +

+

+ We perform these activities by creating a single class that + is responsible for opening and closing our store and + environment. This class is shared by both our applications. + To use it, callers need to only provide the path to the + environment home directory, and to indicate whether the + object is meant to be read-only. The class implementation + is as follows: +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+public class MyDbEnv {
+
+    private Environment myEnv;
+    private EntityStore store;
+
+    // Our constructor does nothing
+    public MyDbEnv() {}
+
+    // The setup() method opens the environment and store
+    // for us.
+    public void setup(File envHome, boolean readOnly)
+        throws DatabaseException {
+
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        StoreConfig storeConfig = new StoreConfig();
+
+        myEnvConfig.setReadOnly(readOnly);
+        storeConfig.setReadOnly(readOnly);
+
+        // If the environment is opened for write, then we want to be 
+        // able to create the environment and entity store if 
+        // they do not exist.
+        myEnvConfig.setAllowCreate(!readOnly);
+        storeConfig.setAllowCreate(!readOnly);
+
+        // Open the environment and entity store
+        myEnv = new Environment(envHome, myEnvConfig);
+        store = new EntityStore(myEnv, "EntityStore", storeConfig);
+
+    }
+
+    // Return a handle to the entity store
+    public EntityStore getEntityStore() {
+        return store;
+    }
+
+    // Return a handle to the environment
+    public Environment getEnv() {
+        return myEnv;
+    }
+
+    // Close the store and environment.
+    public void close() {
+        if (store != null) {
+            try {
+                store.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing store: " +
+                                    dbe.toString());
+               System.exit(-1);
+            }
+        }
+
+        if (myEnv != null) {
+            try {
+                // Finally, close the environment.
+                myEnv.close();
+            } catch(DatabaseException dbe) {
+                System.err.println("Error closing MyDbEnv: " +
+                                    dbe.toString());
+               System.exit(-1);
+            }
+        }
+    }
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/persist_access.html b/docs/GettingStartedGuide/persist_access.html new file mode 100644 index 0000000..43e22e8 --- /dev/null +++ b/docs/GettingStartedGuide/persist_access.html @@ -0,0 +1,243 @@ + + + + + + Chapter 5. Saving and Retrieving Objects + + + + + + + + + +
+
+
+
+

Chapter 5. Saving and Retrieving Objects

+
+
+
+ +

+ To store an object in an EntityStore you + must annotate the class appropriately and then store it using + PrimaryIndex.put(). +

+

+ To retrieve and object from an EntityStore + you use the get() method from either the + PrimaryIndex or + SecondaryIndex, whichever is most + appropriate for your application. +

+

+ In both cases, it simplifies things greatly if you create a data + accessor class to organize your indexes. +

+

+ In the next few sections we: +

+
+
    +
  1. +

    + Create an entity class that is ready to be stored + in an entity store. This class will have both a + primary index (required) declared for it, as well + as a secondary index (which is optional). +

    +

    + See the next section for this implementation. +

    +
  2. +
  3. +

    + Create a data accessor class which is used to + organize our data. +

    +

    + See SimpleDA.class + for this implementation. +

    +
  4. +
  5. +

    + Create a simple class that is used to put objects + to our entity store. +

    +

    + See Placing Objects in an Entity Store + for this implementation. +

    +
  6. +
  7. +

    + Create another class that retrieves objects from + our entity store. +

    +

    + See Retrieving Objects from an Entity Store + for this implementation. +

    +
  8. +
+
+
+
+
+
+

A Simple Entity Class

+
+
+
+

+ For clarity's sake, this entity class is as simple a class as we can write. + It contains only two data members, both of which are set + and retrieved by simple setter and getter methods. Beyond + that, by design this class does not do anything of particular + interest. +

+

+ Its implementation is as follows: +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+import com.sleepycat.persist.model.SecondaryKey;
+
+@Entity
+public class SimpleEntityClass {
+
+    // Primary key is pKey
+    @PrimaryKey
+    private String pKey;
+
+    // Secondary key is the sKey
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String sKey;
+
+    public void setPKey(String data) {
+        pKey = data;
+    }
+
+    public void setSKey(String data) {
+        sKey = data;
+    }
+
+    public String getPKey() {
+        return pKey;
+    }
+
+    public String getSKey() {
+        return sKey;
+    }
+} 
+
+
+ + + diff --git a/docs/GettingStartedGuide/persist_first.html b/docs/GettingStartedGuide/persist_first.html new file mode 100644 index 0000000..6d23063 --- /dev/null +++ b/docs/GettingStartedGuide/persist_first.html @@ -0,0 +1,316 @@ + + + + + + Chapter 3. Direct Persistence Layer First Steps + + + + + + + + + +
+
+
+
+

Chapter 3. Direct Persistence Layer First Steps

+
+
+
+
+

+ Table of Contents +

+
+
+ + Entity Stores + +
+
+
+
+ + Opening and Closing Environments and Stores + +
+
+
+
+ + Persistent Objects + +
+
+ + Saving and Retrieving Data + +
+
+
+

+ This chapter guides you through the first few steps required to + use the DPL with your application. These steps include: +

+
+
    +
  1. +

    + Opening your environment as was described in + + Opening Database Environments. + + +

    +
  2. +
  3. +

    + Opening your entity store. +

    +
  4. +
  5. +

    + Identifying the classes that you want to store in + JE as either a persistent + class or an entity. +

    +
  6. +
+
+

+ Once you have done these things, you can write your classes to + the JE databases, read them back from the databases, delete + them from the databases, and so forth. These activities are + described in the chapters that follow in this part of this manual. +

+
+
+
+
+

Entity Stores

+
+
+
+ +

+ Entity stores are the basic unit of storage that you use with the DPL. That is, it + is a unit of encapsulation for the classes that you want to store in JE. Under + the hood it actually interacts with JE databases, but the DPL provides a layer + of abstraction from the underlying JE APIs. The store, therefore, provides a + simplified mechanism by which you read and write your stored classes. By using a + store, you have access to your classes that is more simplified than if you were + interacting with databases directly, but this simplified access comes at the cost of + reduced flexibility. +

+

+ Entity stores have configurations in the same way that environments have + configurations. You can use a StoreConfig object + to identify store properties. Among these are methods that allow you to declare + whether: +

+
+
    +
  • +

    + the store can be created if it does not exist at the time + it is opened. Use the + StoreConfig.setAllowCreate() + method to set this. +

    +
  • +
  • +

    + deferred writes are allowed for the store. Use the + StoreConfig.setDeferredWrite() + method to set this. See + Deferred Write Databases + for general information on deferred write + databases. +

    +
  • +
  • +

    + the store is read-only. Use the + StoreConfig.setReadOnly() + method to set this. +

    +
  • +
  • +

    + the store supports transactions. Use the + StoreConfig.setTransactional() + method to set this. +

    +

    + Writing JE transactional applications is described in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

    +
  • +
+
+

+ EntityStore objects also provide methods for retrieving + information about the store, such as: +

+
+
    +
  • +

    + the store's name. Use the + EntityStore.getStoreName() + method to retrieve this. +

    +
  • +
  • +

    + a handle to the environment in which the store is opened. Use the + EntityStore.getEnvironment + method to retrieve this handle. +

    +
  • +
+
+

+ You can also use the EntityStore to + retrieve all the primary and secondary indexes related to a given type of entity + object contained in the store. See Working with Indices for + more information. +

+
+
+
+
+

Opening and Closing Environments and Stores

+
+
+
+

+ As described in + + Database Environments, + + + + an + environment is a unit of + encapsulation for JE databases. It also provides a + handle by which activities common across the databases + can be managed. +

+

+ To use an entity store, you must first open an environment and then provide that + environment handle to the EntityStore constructor. +

+

+ For example, the following code fragment configures both + the environment and the entity store such that they can + be created if they do not exist. Both the environment and + the entity store are then opened. +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+...
+
+private Environment myEnv;
+private EntityStore store;
+
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    StoreConfig storeConfig = new StoreConfig();
+
+    myEnvConfig.setAllowCreate(!readOnly);
+    storeConfig.setAllowCreate(!readOnly);
+
+    // Open the environment and entity store
+    myEnv = new Environment(envHome, myEnvConfig);
+    store = new EntityStore(myEnv, "EntityStore", storeConfig);
+} catch(DatabaseException dbe) {
+    System.err.println("Error opening environment and store: " +
+                        dbe.toString());
+    System.exit(-1);
+} 
+

+ As always, before you exit your program you should close both + your store and your environment. It is recommended that you close your store before you close your + environment. +

+
if (store != null) {
+    try {
+        store.close();
+    } catch(DatabaseException dbe) {
+        System.err.println("Error closing store: " +
+                            dbe.toString());
+        System.exit(-1);
+    }
+}
+
+if (myEnv != null) {
+    try {
+        // Finally, close environment.
+        myEnv.close();
+    } catch(DatabaseException dbe) {
+        System.err.println("Error closing MyDbEnv: " +
+                            dbe.toString());
+        System.exit(-1);
+    }
+} 
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/persist_index.html b/docs/GettingStartedGuide/persist_index.html new file mode 100644 index 0000000..414db0f --- /dev/null +++ b/docs/GettingStartedGuide/persist_index.html @@ -0,0 +1,245 @@ + + + + + + Chapter 4. Working with Indices + + + + + + + + + +
+
+
+
+

Chapter 4. Working with Indices

+
+
+
+
+

+ Table of Contents +

+
+
+ + Accessing Indexes + +
+
+
+
+ + Accessing Primary Indices + +
+
+ + Accessing Secondary Indices + +
+
+
+
+ + Creating Indexes + +
+
+
+
+ + Declaring Primary Indexes + +
+
+ + Declaring Secondary Indexes + +
+
+ + Foreign Key Constraints + +
+
+
+
+
+

+ All entity classes stored in JE using the DPL must have a + primary index, or key, identified for them. All such classes may + also have one or more secondary keys declared for them. This + chapter describes primary and secondary indexes in detail, and + shows how to access the indexes created for a given entity class. +

+

+ One way to organize access to your primary and secondary + indexes is to create a data accessor + class. We show an implementation of a data accessor class in + SimpleDA.class. +

+
+
+
+
+

Accessing Indexes

+
+
+
+ +

+ In order to retrieve any object from an entity store, you + must access at least the primary index for that object. + Different entity classes stored in an entity store can have + different primary indexes, but all entity classes must have a + primary index declared for it. The primary index is just + the default index used for the class. (That is, it is the + data's primary key for the underlying database.) +

+

+ Entity classes can optionally have secondary indexes + declared for them. In order to access these secondary + indexes, you must first access the primary index. +

+
+
+
+
+

Accessing Primary Indices

+
+
+
+

+ You retrieve a primary index using the + EntityStore.getPrimaryIndex() + method. To do this, you indicate the index key type + (that is, whether it is a String, Integer, and + so forth) and the class of the entities stored + in the index. +

+

+ For example, the following retrieves the + primary index for an Inventory + class (we provide an implementation of this class in + Inventory.java). + These index keys are of type String. +

+
PrimaryIndex<String,Inventory> inventoryBySku = 
+    store.getPrimaryIndex(String.class, Inventory.class); 
+
+
+
+
+
+

Accessing Secondary Indices

+
+
+
+

+ You retrieve a secondary index using the + EntityStore.getSecondaryIndex() + method. Because secondary indices actually + refer to a primary index somewhere in your data + store, to access a secondary index you: +

+
+
    +
  1. +

    + Provide the primary index as + returned by + EntityStore.getPrimaryIndex(). +

    +
  2. +
  3. +

    + Identify the key data type used by + the secondary index + (String, + Long, + and so forth). +

    +
  4. +
  5. +

    + Identify the name of the + secondary key field. + When you declare the + SecondaryIndex + object, you identify the entity class + to which the secondary index + must refer. +

    +
  6. +
+
+

+ For example, the following first retrieves the + primary index, and then uses that to retrieve a secondary + index. The secondary key is held by the + itemName field of the + Inventory class. +

+
PrimaryIndex<String,Inventory> inventoryBySku = 
+store.getPrimaryIndex(String.class, Inventory.class); 
+
+SecondaryIndex<String,String,Inventory> inventoryByName = 
+    store.getSecondaryIndex(inventoryBySku, String.class, "itemName"); 
+
+
+
+ + + diff --git a/docs/GettingStartedGuide/persistobject.html b/docs/GettingStartedGuide/persistobject.html new file mode 100644 index 0000000..3aecfd8 --- /dev/null +++ b/docs/GettingStartedGuide/persistobject.html @@ -0,0 +1,147 @@ + + + + + + Persistent Objects + + + + + + + + + +
+
+
+
+

Persistent Objects

+
+
+
+

+ When using the DPL, you store data in the underlying + JE databases by making objects + persistent. You do this using Java + annotations that both identify the type of persistent + object you are declaring, as well as the primary and + secondary indices. +

+

+ The following are the annotations you will use with your + DPL persistent classes: +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
AnnotationDescription
@Entity + Declares an entity class; that is, a class with a primary index + and optionally one or more indices. +
@Persistent + Declares a persistent class; that is, a class used by an entity + class. They do not have indices but instead are are stored or + retrieved when an entity class makes direct use of them. +
@PrimaryKey + Declares a specific data member in an entity class to be the + primary key for that object. This annotation must be used one + and only one time for every entity class. +
@SecondaryKey + Declares a specific data member in an entity class to be a + secondary key for that object. This annotation is optional, and + can be used multiple times for an entity class. +
+
+

+ For example, the following is declared to be an entity class: +

+
package persist.gettingStarted;
+
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+
+@Entity
+public class ExampleEntity {
+
+    // The primary key must be unique in the database.
+    @PrimaryKey
+    private String aPrimaryKey;
+
+    @SecondaryKey(relate=MANY_TO_ONE)
+    private String aSecondaryKey;
+
+    ...
+
+    // The remainder of the class' implementation is purposefully
+    // omitted in the interest of brevity.
+
+    ...
+} 
+

+ We discuss primary and secondary keys in more detail in Working with Indices. +

+
+ + + diff --git a/docs/GettingStartedGuide/preface.html b/docs/GettingStartedGuide/preface.html new file mode 100644 index 0000000..e571bbb --- /dev/null +++ b/docs/GettingStartedGuide/preface.html @@ -0,0 +1,174 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+

+ Welcome to Berkeley DB Java Edition (JE). + + + This document introduces JE, version 12c Release 2. + + +

+

+ This document is intended to provide a rapid introduction + to the JE API set and related concepts. The goal of this + document is to provide you with an efficient mechanism with which + you can evaluate JE against your project's technical + requirements. As such, this document is intended for + Java + + + developers and senior software architects who are looking for an + + in-process data management solution. + + + No prior experience with Berkeley DB Java Edition is expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: + + "The Environment.openDatabase() method + returns a Database class object." + + + + + + +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + JE_HOME + + + directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
import com.sleepycat.je.Environment;
+
+...
+
+// Open the environment. Allow it to be created if it does not 
+// already exist.
+Environment myDbEnv;
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not 
+// already exist.
+Environment myDbEnv;
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+myDbEnv = new Environment(new File("/export/dbEnv"), envConfig); 
+
+

Note

+

+ Finally, notes of interest are represented using a note block such + as this. +

+
+
+
+ + + diff --git a/docs/GettingStartedGuide/readSecondary.html b/docs/GettingStartedGuide/readSecondary.html new file mode 100644 index 0000000..3549787 --- /dev/null +++ b/docs/GettingStartedGuide/readSecondary.html @@ -0,0 +1,125 @@ + + + + + + Reading Secondary Databases + + + + + + + + + +
+
+
+
+

Reading Secondary Databases

+
+
+
+

+ Like a primary database, you can read records from your secondary + database either by using the + + SecondaryDatabase.get() method, + + + or by using + a SecondaryCursor. + + + The main difference between reading secondary and primary databases is that when + you read a secondary database record, the secondary record's data is not + returned to you. Instead, the primary key and data corresponding to the + secondary key are returned to you. +

+

+ For example, assuming your secondary database contains keys related + to a person's full name: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryDatabase;
+
+...
+try {
+    // Omitting all database and environment opens
+    ...
+
+    String searchName = "John Doe";
+    DatabaseEntry searchKey = 
+        new DatabaseEntry(searchName.getBytes("UTF-8"));
+    DatabaseEntry primaryKey = new DatabaseEntry();
+    DatabaseEntry primaryData = new DatabaseEntry();
+
+    // Get the primary key and data for the user 'John Doe'.
+    OperationStatus retVal = mySecondaryDatabase.get(null, searchKey, 
+                                                     primaryKey, 
+                                                     primaryData, 
+                                                     LockMode.DEFAULT); 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+

+ Note that, just like + Database.get(), + + + if your secondary database supports duplicate records then + SecondaryDatabase.get() + + only return the first record found in a matching duplicates set. If you + want to see all the records related to a specific secondary key, then use a + + SecondaryCursor (described in + Using Secondary Cursors + + ). + + +

+
+ + + diff --git a/docs/GettingStartedGuide/restore.html b/docs/GettingStartedGuide/restore.html new file mode 100644 index 0000000..29f9156 --- /dev/null +++ b/docs/GettingStartedGuide/restore.html @@ -0,0 +1,103 @@ + + + + + + JE Backup and Restore + + + + + + + + + +
+
+
+
+

JE Backup and Restore

+
+
+
+

+ To backup your database, copy the log files (the + .jdb files) starting from the lowest + numbered log file to the highest numbered log file to your + backup media. Be sure to copy the bytes of the individual + database files in order from the lowest to the highest. You do + not have to close your database or otherwise cease database + operations when you do this. +

+
+

Note

+

+ Note that if you are using subdirectories to store your log + files, then you should copy those subdirectories and their + contents instead of simply copying individual files. + Multiple subdirectories can be used to improve JE + throughput, but this feature is not turned on by default. + See Multiple Environment Subdirectories + for information on how to configure this feature. +

+
+

+ Restoring a JE database from a backup consists of closing your + JE environment, copying archived log files back into your + environment directory and then opening your JE environment + again. If you are using subdirectories to store your log files, + then make sure to copy those subdirectories back into the + environment home directory, and make sure the same log files are in + each subdirectory as was there when you took the backup. +

+

+ Note that whenever a JE environment is opened, JE runs + normal recovery. This involves bringing your + database into a consistent state given the changed data found in the + database. If you are using transactions during normal operations, then + JE automatically runs checkpoints for you so as to limit the time + required to run this recovery. In any case, running normal recovery is a + routine operation, while performing database restores is not. +

+

+ For more information on JE backup and restores, and on checkpoints, see + Backing up and Restoring Berkeley DB Java Edition Applications. +

+
+ + + diff --git a/docs/GettingStartedGuide/saveret.html b/docs/GettingStartedGuide/saveret.html new file mode 100644 index 0000000..4f385db --- /dev/null +++ b/docs/GettingStartedGuide/saveret.html @@ -0,0 +1,123 @@ + + + + + + Saving and Retrieving Data + + + + + + + + + +
+
+
+
+

Saving and Retrieving Data

+
+
+
+

+ All data stored using the DPL has one primary index and + zero or more secondary indices associated with it. + (Sometimes these are referred to as the primary and + secondary keys.) So to store data under the DPL, you must: +

+
+
    +
  1. +

    + Declare a class to be an entity class. +

    +
  2. +
  3. +

    + Identify the features on the class which + represent indexed material. +

    +
  4. +
  5. +

    + Retrieve the store's primary index for a + given class using the + EntityStore.getPrimaryIndex() + method. +

    +
  6. +
  7. +

    + Put class objects to the store using the + PrimaryIndex.put() + method. +

    +
  8. +
+
+

+ In order to retrieve an object from the store, you use + the index that is most convenient for your purpose. This + may be the primary index, or it may be some other + secondary index that you declared on your entity class. +

+

+ You obtain a primary index in the same was as when you + put the object to the store: using + EntityStore.getPrimaryIndex(). + You can get a secondary index for the store using the + EntityStore.getSecondaryIndex() + method. Note that + getSecondaryIndex() requires you + to provide a PrimaryIndex class + instance when you call it, so a class's primary index is + always required when retrieving objects from an entity + store. +

+

+ Usually all of the activity surrounding saving and + retrieving data is organized within a class or classes + specialized to that purpose. We describe the construction + of these data accessor classes in SimpleDA.class. But before you perform + any entity store activity, you need to understand + indexes. We therefore describe them in the next chapter. +

+
+ + + diff --git a/docs/GettingStartedGuide/secondaryCursor.html b/docs/GettingStartedGuide/secondaryCursor.html new file mode 100644 index 0000000..0982ec8 --- /dev/null +++ b/docs/GettingStartedGuide/secondaryCursor.html @@ -0,0 +1,155 @@ + + + + + + Using Secondary Cursors + + + + + + + + + +
+
+
+
+

+ Using Secondary Cursors + +

+
+
+
+

+ Just like cursors on a primary database, you can use + secondary cursors + + to iterate over the records in a secondary database. Like + + normal cursors, + + + you can also use + secondary cursors + + to search for specific records in a database, to seek to the first + or last record in the database, to get the next duplicate record, + to get the next non-duplicate record, + and so forth. For a complete description on cursors and their capabilities, see + Using Cursors. +

+

+ However, when you use + secondary cursors: + +

+
+
    +
  • +

    + Any data returned is the data contained on the primary database + record referenced by the secondary record. +

    +
  • +
  • +

    + SecondaryCursor.getSearchBoth() and + related methods do not search based on a key/data pair. Instead, you + search based on a secondary key and a primary key. The data returned + is the primary data that most closely matches the two keys provided + for the search. +

    +
  • +
+
+

+ For example, suppose you are using the databases, classes, and key + creators + + described in Implementing Key + Creators + + . + Then the following searches for a person's + name in the secondary database, and deletes all secondary and primary + records that use that name. +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryCursor;
+  
+...
+try {
+    // Database and environment opens omitted for brevity
+    ...
+
+    String secondaryName = "John Doe";
+    DatabaseEntry secondaryKey = 
+        new DatabaseEntry(secondaryName.getBytes("UTF-8"));
+
+    DatabaseEntry foundData = new DatabaseEntry();
+
+    SecondaryCursor mySecCursor = 
+        mySecondaryDatabase.openSecondaryCursor(null, null);
+
+    OperationStatus retVal = mySecCursor.getSearchKey(secondaryKey, 
+                                                      foundData, 
+                                                      LockMode.DEFAULT);
+    while (retVal == OperationStatus.SUCCESS) {
+        mySecCursor.delete();
+        retVal = mySecCursor.getNextDup(secondaryKey, 
+                                        foundData, 
+                                        LockMode.DEFAULT);
+    } 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+ + + diff --git a/docs/GettingStartedGuide/secondaryDelete.html b/docs/GettingStartedGuide/secondaryDelete.html new file mode 100644 index 0000000..5861dc9 --- /dev/null +++ b/docs/GettingStartedGuide/secondaryDelete.html @@ -0,0 +1,128 @@ + + + + + + Deleting Secondary Database Records + + + + + + + + + +
+
+
+
+

Deleting Secondary Database Records

+
+
+
+

+ In general, you + can + + not modify a secondary database directly. In + order to modify a secondary database, you should modify the primary + database and simply allow JE to manage the secondary modifications for you. +

+

+ However, as a convenience, you can delete + SecondaryDatabase + + records directly. Doing so causes the associated primary key/data pair to be deleted. + This in turn causes JE to delete all + SecondaryDatabase + + records that reference the primary record. +

+

+ You can use the + SecondaryDatabase.delete() + + + method to delete a secondary database record. Note that if your + + SecondaryDatabase + contains duplicate records, then deleting a record from the set of + duplicates causes all of the duplicates to be deleted as well. + +

+
+

Note

+

+ SecondaryDatabase.delete() causes the + previously described delete operations to occur + + + only if the primary database is opened for write access. +

+
+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.SecondaryDatabase;
+
+...
+try {
+    // Omitting all database and environment opens
+    ...
+
+    String searchName = "John Doe";
+    DatabaseEntry searchKey = 
+        new DatabaseEntry(searchName.getBytes("UTF-8"));
+
+    // Delete the first secondary record that uses "John Doe" as
+    // a key. This causes the primary record referenced by this secondary
+    // record to be deleted.
+    OperationStatus retVal = mySecondaryDatabase.delete(null, searchKey);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+ + + diff --git a/docs/GettingStartedGuide/secondaryProps.html b/docs/GettingStartedGuide/secondaryProps.html new file mode 100644 index 0000000..4e66f87 --- /dev/null +++ b/docs/GettingStartedGuide/secondaryProps.html @@ -0,0 +1,94 @@ + + + + + + Secondary Database Properties + + + + + + + + + +
+
+
+
+

Secondary Database Properties

+
+
+
+

Secondary databases accept SecondaryConfig + objects. SecondaryConfig is a subclass of DatabaseConfig, + so it can manage all of the same properties as does DatabaseConfig. + See Database Properties for more information.

+

In addition to the DatabaseConfig properties, + SecondaryConfig also allows you to manage the following properties: +

+
+
    +
  • +

    + SecondaryConfig.setAllowPopulate() +

    +

    If true, the secondary database can be auto-populated. This means + that on open, if the secondary database is empty then the primary + database is read in its entirety and additions/modifications to the + secondary's records occur automatically.

    +
  • +
  • +

    + SecondaryConfig.setKeyCreator() +

    +

    Identifies the key creator object to be used for secondary key + creation. See Implementing Key + Creators + + + for more information.

    +
  • +
+
+
+ + + diff --git a/docs/GettingStartedGuide/simpleda.html b/docs/GettingStartedGuide/simpleda.html new file mode 100644 index 0000000..2526697 --- /dev/null +++ b/docs/GettingStartedGuide/simpleda.html @@ -0,0 +1,104 @@ + + + + + + SimpleDA.class + + + + + + + + + +
+
+
+
+

SimpleDA.class

+
+
+
+

+ As mentioned above, we organize our primary and + secondary indexes using a specialize data + accessor class. The main reason for this class + to exist is to provide convenient access to all + the indexes in use for our entity class (see + the previous section, A Simple Entity Class, for that + implementation). +

+

+ For a description on retrieving primary and + secondary indexes under the DPL, see + Working with Indices +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.SecondaryIndex;
+
+public class SimpleDA {
+    // Open the indices
+    public SimpleDA(EntityStore store)
+        throws DatabaseException {
+
+        // Primary key for SimpleEntityClass classes
+        pIdx = store.getPrimaryIndex(
+            String.class, SimpleEntityClass.class);
+
+        // Secondary key for SimpleEntityClass classes
+        // Last field in the getSecondaryIndex() method must be
+        // the name of a class member; in this case, an 
+        // SimpleEntityClass.class data member.
+        sIdx = store.getSecondaryIndex(
+            pIdx, String.class, "sKey");
+    }
+
+    // Index Accessors
+    PrimaryIndex<String,SimpleEntityClass> pIdx;
+    SecondaryIndex<String,String,SimpleEntityClass> sIdx;
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/simpleget.html b/docs/GettingStartedGuide/simpleget.html new file mode 100644 index 0000000..d4abdbb --- /dev/null +++ b/docs/GettingStartedGuide/simpleget.html @@ -0,0 +1,170 @@ + + + + + + Retrieving Objects from an Entity Store + + + + + + + + + +
+
+
+
+

Retrieving Objects from an Entity Store

+
+
+
+

+ You retrieve objects placed in an entity store by using + either the object's primary index, or the appropriate + secondary index if it exists. The following application + illustrates this by retrieving some of the objects that + we placed in an entity store in the previous section. +

+

+ To begin, we import the Java classes that our example + needs. We also instantiate the private data members that we + require. +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+public class SimpleStoreGet {
+
+    private static File envHome = new File("./JEDB");
+
+    private Environment envmnt;
+    private EntityStore store;
+    private SimpleDA sda; 
+

+ Next we create a method that simply opens our database + environment and entity store for us. +

+
   // The setup() method opens the environment and store
+    // for us.
+    public void setup()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        StoreConfig storeConfig = new StoreConfig();
+
+        envConfig.setAllowCreate(true);
+        storeConfig.setAllowCreate(true);
+
+        // Open the environment and entity store
+        envmnt = new Environment(envHome, envConfig);
+        store = new EntityStore(envmnt, "EntityStore", storeConfig);
+    } 
+

+ We also need a method to close our environment and store. +

+
    // Close our environment and store.
+    public void shutdown()
+        throws DatabaseException {
+
+        store.close();
+        envmnt.close();
+    } 
+

+ Now we retrieve a few objects. To do this, we instantiate a + SimpleDA (see SimpleDA.class) class that we use to access + our primary and secondary indexes. Then we retrieve objects + based on a primary or secondary index value. And finally, we + display the retrieved objects. +

+
    // Retrieve some SimpleEntityClass objects from the store.
+    private void run()
+        throws DatabaseException {
+
+        setup();
+
+        // Open the data accessor. This is used to store
+        // persistent objects.
+        sda = new SimpleDA(store);
+
+        // Instantiate and store some entity classes
+        SimpleEntityClass sec1 = sda.pIdx.get("keyone");
+        SimpleEntityClass sec2 = sda.pIdx.get("keytwo");
+
+        SimpleEntityClass sec4 = sda.sIdx.get("skeythree");
+
+        System.out.println("sec1: " + sec1.getPKey());
+        System.out.println("sec2: " + sec2.getPKey());
+        System.out.println("sec4: " + sec4.getPKey());
+
+
+        shutdown();
+    } 
+

+ Finally, to complete our class, we need a + main() method, which simply calls our + run() method. +

+
    // main
+    public static void main(String args[]) {
+        SimpleStoreGet ssg = new SimpleStoreGet();
+        try {
+            ssg.run();
+        } catch (DatabaseException dbe) {
+            System.err.println("SimpleStoreGet: " + dbe.toString());
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        }
+        System.out.println("All done.");
+    }
+
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/simpleput.html b/docs/GettingStartedGuide/simpleput.html new file mode 100644 index 0000000..9c88143 --- /dev/null +++ b/docs/GettingStartedGuide/simpleput.html @@ -0,0 +1,229 @@ + + + + + + Placing Objects in an Entity Store + + + + + + + + + +
+
+
+
+

Placing Objects in an Entity Store

+
+
+
+

+ In order to place an object in a DPL entity store, + you must: +

+
+
    +
  1. +

    + Open the environment and store. +

    +
  2. +
  3. +

    + Instantiate the object. +

    +
  4. +
  5. +

    + Put the object to the store using the + put() method + for the object's primary index. +

    +
    +

    Note

    +

    + A version of this method exists which allows you to + specify a Time to Live value for the record that you are + inserting. See Using Time to Live + for more information. +

    +
    +
  6. +
+
+

+ The following example uses the SimpleDA + class that we show in SimpleDA.class to put a + SimpleEntityClass object (see + A Simple Entity Class) to the + entity store. +

+

+ To begin, we import the Java classes that our example + needs. We also instantiate the private data members that we + require. +

+
package persist.gettingStarted;
+
+import java.io.File;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig; 
+
+public class SimpleStorePut {
+
+    private static File envHome = new File("./JEDB");
+
+    private Environment envmnt;
+    private EntityStore store;
+    private SimpleDA sda; 
+

+ Next we create a method that simply opens our database + environment and entity store for us. +

+
   // The setup() method opens the environment and store
+    // for us.
+    public void setup()
+        throws DatabaseException {
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        StoreConfig storeConfig = new StoreConfig();
+
+        envConfig.setAllowCreate(true);
+        storeConfig.setAllowCreate(true);
+
+        // Open the environment and entity store
+        envmnt = new Environment(envHome, envConfig);
+        store = new EntityStore(envmnt, "EntityStore", storeConfig);
+    } 
+

+ We also need a method to close our environment and store. +

+
    // Close our environment and store.
+    public void shutdown()
+        throws DatabaseException {
+
+        store.close();
+        envmnt.close();
+    } 
+

+ Now we need to create a method to actually write objects to our + store. This method creates a SimpleDA + object (see SimpleDA.class) that we + will use to access our indexes. Then we instantiate a series + of SimpleEntityClass (see A Simple Entity Class) + objects that we + will place in our store. Finally, we use our primary index + (obtained from the SimpleDA class + instance) to actually place these objects in our store. +

+

+ In Retrieving Objects from an Entity Store + we show a class that is used to retrieve these objects. +

+
    // Populate the entity store
+    private void run()
+        throws DatabaseException {
+
+        setup();
+
+        // Open the data accessor. This is used to store
+        // persistent objects.
+        sda = new SimpleDA(store);
+
+        // Instantiate and store some entity classes
+        SimpleEntityClass sec1 = new SimpleEntityClass();
+        SimpleEntityClass sec2 = new SimpleEntityClass();
+        SimpleEntityClass sec3 = new SimpleEntityClass();
+        SimpleEntityClass sec4 = new SimpleEntityClass();
+        SimpleEntityClass sec5 = new SimpleEntityClass();
+
+        sec1.setPKey("keyone");
+        sec1.setSKey("skeyone");
+
+        sec2.setPKey("keytwo");
+        sec2.setSKey("skeyone");
+
+        sec3.setPKey("keythree");
+        sec3.setSKey("skeytwo");
+
+        sec4.setPKey("keyfour");
+        sec4.setSKey("skeythree");
+
+        sec5.setPKey("keyfive");
+        sec5.setSKey("skeyfour");
+
+        sda.pIdx.put(sec1);
+        sda.pIdx.put(sec2);
+        sda.pIdx.put(sec3);
+        sda.pIdx.put(sec4);
+        sda.pIdx.put(sec5);
+
+        shutdown();
+    } 
+

+ Finally, to complete our class, we need a + main() method, which simply calls our + run() method. +

+
    // main
+    public static void main(String args[]) {
+        SimpleStorePut ssp = new SimpleStorePut();
+        try {
+            ssp.run();
+        } catch (DatabaseException dbe) {
+            System.err.println("SimpleStorePut: " + dbe.toString());
+            dbe.printStackTrace();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e.toString());
+            e.printStackTrace();
+        }
+        System.out.println("All done.");
+    }
+
+} 
+
+ + + diff --git a/docs/GettingStartedGuide/timetolive.html b/docs/GettingStartedGuide/timetolive.html new file mode 100644 index 0000000..1fd0464 --- /dev/null +++ b/docs/GettingStartedGuide/timetolive.html @@ -0,0 +1,318 @@ + + + + + + Using Time to Live + + + + + + + + + +
+
+
+
+

Using Time to Live

+
+
+
+ + +

+ Time to Live (TTL) is a mechanism that allows you to automatically + expire + + database records. + TTL is expressed as the amount of time data is allowed to live in + the + + database. + Data which has reached its expiration timeout value can no longer + be retrieved, and will not appear in any + + database + statistics. Whether the data is physically removed from the + + database + is determined by an internal mechanism that is not + user-controllable. +

+

+ TTL represents a minimum guaranteed time to live. Data expires on + hour or day boundaries. This means that with a one hour TTL, there + can be as much as two hours worth of unexpired data. For example + (using a time format of hour:minute:second), given a one hour TTL, + data written between 00:00:00.000 and 00:59:59.999 will expire at + 02:00:00.000 because the data is guaranteed to expire no less than + one hour from when it is written. +

+

+ Expired data is invisible to queries and + + database + statistics, but even so it is using disk space until it has been + purged. The expired data is purged from disk at some point in + time after its expiration date. The exact time when the data is + purged is driven by internal mechanisms and the workload on your + + database. +

+

+ The TTL value for a + + database record + can be updated at any time before the expiration value has been + reached. Data that has expired can no longer be modified, and this + includes its TTL value. +

+

+ TTL is more efficient than manual user-deletion of the + + record + because it avoids the overhead of writing a database log entry for + the data deletion. The deletion also does not appear in the + replication stream. +

+

+ The following provides a brief introduction to using Time to Live. + For a more complete description of this mechanism, see the + + com.sleepycat.je.WriteOptions javadoc. + +

+
+
+
+
+
+

Specifying a TTL Value

+
+
+
+

+ TTL values are specified on a record by record basis using + the WriteOptions class, which can + but used by various put() methods + when writing to the database. For example, variations of + Database.put(), + PrimaryIndex.put(), and + Cursor.put() exist that accept a + WriteOptions class instance. +

+

+ WriteOptions allows + you to identify the number of days or hours the record will exist + in the database before expiring. A duration interval specified in + days is recommended because this results in the least amount of + storage consumed in the store. However, if you want a TTL value + that is not an even multiple of days, then specify the TTL + value in hours. +

+

+ The code example from + Writing Records to the Database + can be extended to specify a TTL value of 5 days like this: +

+
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Put;
+import com.sleepycat.je.WriteOptions;
+
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database must NOT be opened read-only.
+
+String aKey = "myFirstKey";
+String aData = "myFirstData";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+
+    WriteOptions wo = new WriteOptions();
+    // This sets the TTL using day units. Another variation
+    // of setTTL() exists that accepts a TimeUnit class instance.
+    wo.setTTL(5);
+    myDatabase.put(null,             // Transaction handle.
+                   theKey,           // Record's key.
+                   theData,          // Record's data.
+                   Put.NO_OVERWRITE, // If the record exists,
+                                     // do not overwrite it.
+                   wo);              // WriteOptions instance.
+
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Updating a TTL Value

+
+
+
+

+ To update the expiration time for a record, you update the + record as normal, and at the same time specify the new expiration + time. However, you must also indicate that the expiration time + is to be updated. By default, you can modify the record and + the expiration time will not be modified, even if you specify a + new TTL value for the record. +

+

+ To indicate that the the expiration time is to be updated, + specify true to the + WriteOptions.setUpdateTTL() + method. For example, using the previous example, to change + the TTL value to 10 days, do the following: +

+
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Put;
+import com.sleepycat.je.WriteOptions;
+
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database must NOT be opened read-only.
+
+String aKey = "myFirstKey";
+String aData = "myFirstData";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+
+    WriteOptions wo = new WriteOptions();
+    // This sets the TTL using day units. Another variation
+    // of setTTL() exists that accepts a TimeUnit class instance.
+    wo.setTTL(5);
+    // If the record currently exists, update the TTL value
+    wo.setUpdateTTL(true);
+    myDatabase.put(null,             // Transaction handle.
+                   theKey,           // Record's key.
+                   theData,          // Record's data.
+                   Put.OVERWRITE,    // If the record exists,
+                                     // overwrite it.
+                   wo);              // WriteOptions instance.
+
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Deleting TTL Expiration

+
+
+
+

+ If you have set a TTL value for a record and you later decide you + do not want it to ever automatically expire, you can turn off + TTL by setting a TTL value of 0: +

+
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Put;
+import com.sleepycat.je.WriteOptions;
+
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database must NOT be opened read-only.
+
+String aKey = "myFirstKey";
+String aData = "myFirstData";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+
+    WriteOptions wo = new WriteOptions();
+    // Turn off automatic expiration of this record.
+    wo.setTTL(0);
+    wo.setUpdateTTL(true);
+    myDatabase.put(null,             // Transaction handle.
+                   theKey,           // Record's key.
+                   theData,          // Record's data.
+                   Put.OVERWRITE,    // If the record exists,
+                                     // overwrite it.
+                   wo);              // WriteOptions instance.
+
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+ + + diff --git a/docs/GettingStartedGuide/usingDbt.html b/docs/GettingStartedGuide/usingDbt.html new file mode 100644 index 0000000..5dfcb64 --- /dev/null +++ b/docs/GettingStartedGuide/usingDbt.html @@ -0,0 +1,424 @@ + + + + + + Reading and Writing Database Records + + + + + + + + + +
+
+
+
+

Reading and Writing Database Records

+
+
+
+ +

+ When reading and writing database records, be aware that there are some + slight differences in behavior depending on whether your database supports duplicate + records. Two or more database records are considered to be duplicates of + one another if they share the same key. The collection of records + sharing the same key are called a duplicates set. + + +

+

+ By default, JE databases do + not support duplicate records. Where duplicate records are supported, + cursors (see below) are used + to access all of the records in the duplicates set. +

+

+ JE provides two basic mechanisms for the storage and retrieval of database + key/data pairs: +

+
+
    +
  • +

    + The + Database.put() + + + and + Database.get() + + + methods provide the easiest access for all non-duplicate records in the database. + These methods are described in this section. +

    +
  • +
  • +

    Cursors provide several methods for putting and getting database + records. Cursors and their database access methods are described in + Using Cursors.

    +
  • +
+
+
+
+
+
+

Writing Records to the Database

+
+
+
+

+ Database records are stored in the internal BTree based on + whatever sorting routine is available to the database. Records are + sorted first by their key. If the database supports duplicate records, + then the records for a specific key are sorted by their data. +

+

+ By default, JE sorts both keys and the data portion of duplicate + records using unsigned byte-by-byte + lexicographic comparisons. This default comparison works well for the + majority of cases. However, in some case performance benefits can be + realized by overriding the default comparison routine. See Using Comparators for more information. +

+

You can use the following methods to put database records:

+
+
    +
  • +

    + Database.put() +

    +

    + Puts a database record into the database. If your database does not + support duplicate records, and if the provided key already exists in + the database, then the currently existing record is replaced with + the new data. +

    +

    + Be aware that version of this method exists which accepts a + Put enum. If Put.OVERWRITE + is provided, then existing database records are + overwritten. If Put.NO_OVERWRITE is + provided, then existing records will not be overwritten. +

    +
  • +
  • +

    + Database.putNoOverwrite() +

    +

    + Disallows overwriting (replacing) an existing record in the + database. If the provided key already exists in the database, + then this method returns + OperationStatus.KEYEXIST even if + the database supports duplicates. +

    + + + +
  • +
  • +

    + Database.putNoDupData() +

    +

    + Puts a database record into the database. If the provided key + and data already exists in the database (that is, if you are + attempting to put a record that compares equally to an existing + record), then this returns + OperationStatus.KEYEXIST. +

    +
  • +
+
+

+ When you put database records, you provide both the key and the data as + DatabaseEntry objects. This means you must + convert your key and data into a Java byte array. For + example: +

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database must NOT be opened read-only.
+
+String aKey = "myFirstKey";
+String aData = "myFirstData";
+
+try {
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry(aData.getBytes("UTF-8"));
+    myDatabase.put(null, theKey, theData);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Getting Records from the Database

+
+
+
+

+ The Database class provides several + methods that you can use to retrieve database records. Note that if your + database supports duplicate records, then these methods will only ever + return the first record in a duplicate set. For this reason, if your + database supports duplicates, you should use a cursor to retrieve + records from it. Cursors are described in Using Cursors. +

+

+ You can use either of the following methods to retrieve records from the database: +

+
+
    +
  • +

    + Database.get() +

    +

    Retrieves the record whose key matches the key provided to the + method. If no records exists that uses the provided key, then + OperationStatus.NOTFOUND is returned.

    + + + +
  • +
  • +

    + Database.getSearchBoth() +

    +

    Retrieve the record whose key matches both the key and the data + provided to the method. If no record exists that uses the provided + key and data, then OperationStatus.NOTFOUND is + returned.

    +
  • +
+
+

Both the key and data for a database record are returned as + byte arrays in DatabaseEntry objects. These objects are + passed as parameter values to the Database.get() method. +

+

In order to retrieve your data once Database.get() + has completed, you must retrieve the byte array stored + in the DatabaseEntry and then convert that + byte array back to the + appropriate datatype. For example:

+ +
package je.gettingStarted;
+      
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database may be opened read-only.  
+  
+String aKey = "myFirstKey";
+
+try {
+    // Create a pair of DatabaseEntry objects. theKey
+    // is used to perform the search. theData is used
+    // to store the data returned by the get() operation.
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+    
+    // Perform the get.
+    if (myDatabase.get(null, theKey, theData, LockMode.DEFAULT) ==
+        OperationStatus.SUCCESS) {
+
+        // Recreate the data String.
+        byte[] retData = theData.getData();
+        String foundData = new String(retData, "UTF-8");
+        System.out.println("For key: '" + aKey + "' found data: '" + 
+                            foundData + "'.");
+    } else {
+        System.out.println("No record found for key '" + aKey + "'.");
+    } 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Deleting Records

+
+
+
+

+ + You can use the + Database.delete() + + + method to delete a record from the database. If your database supports + duplicate records, then all records associated with the provided key are + deleted. To delete just one record from a list of duplicates, use a + cursor. Cursors are described in Using Cursors. + +

+

+ You can also delete every record in the database by using + Environment.truncateDatabase(). + + +

+

For example:

+ +
package je.gettingStarted;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+
+...
+
+// Environment and database opens omitted for clarity.
+// Environment and database can NOT be opened read-only.  
+  
+try {
+    String aKey = "myFirstKey";
+    DatabaseEntry theKey = new DatabaseEntry(aKey.getBytes("UTF-8"));
+    
+    // Perform the deletion. All records that use this key are
+    // deleted.
+    myDatabase.delete(null, theKey); 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+
+
+
+

Data Persistence

+
+
+
+

+ When you perform a database modification, your modification is made + in the in-memory cache. This means that your data modifications + are not necessarily flushed to disk, and so your data may not appear + in the database after an application restart. +

+

+ Therefore, if you care if your data is durable across system + failures, and to guard against the rare possibility of + database corruption, you should use transactions to protect your + database modifications. Every time you commit a transaction, JE + ensures that the data will not be lost due to application or + system failure. Transaction usage is described in the + + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. + + + + + +

+

+ If you do not want to use transactions, then the assumption is that + your data is of a nature that it need not exist the next time your + application starts. You may want this if, for example, you are using + JE to cache data relevant only to the current application + runtime. +

+

+ If, however, you are not using transactions for some reason and you + still want some guarantee that your database modifications are + persistent, then you should periodically + run environment syncs. + + + Syncs cause any dirty entries in the in-memory cache and the + operating system's file cache to be written to disk. As + such, they are quite expensive and you should use them sparingly. +

+

+ Note that by default, a sync is run every time you close an environment. + You can also run a sync by calling the Environment.sync() + method. + + + +

+

+ For a brief description of how JE manages its data in the cache + and in the log files, and how sync works, see Databases and Log Files. +

+
+
+ + + diff --git a/docs/LICENSE.txt b/docs/LICENSE.txt new file mode 100644 index 0000000..a4e050c --- /dev/null +++ b/docs/LICENSE.txt @@ -0,0 +1,76 @@ +Copyright (C) 2002, 2016, Oracle and/or its affiliates. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License") reproduced below or available at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +1. You must give any other recipients of the Work or Derivative Works a copy of this License; and +2. You must cause any modified files to carry prominent notices stating that You changed the files; and +3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS + + +ADDITIONAL THIRD PARTY NOTICES: + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2005 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/docs/ReplicationGuide/BerkeleyDB-JE-Replication.pdf b/docs/ReplicationGuide/BerkeleyDB-JE-Replication.pdf new file mode 100644 index 0000000..96654fa Binary files /dev/null and b/docs/ReplicationGuide/BerkeleyDB-JE-Replication.pdf differ diff --git a/docs/ReplicationGuide/addremovenodes.html b/docs/ReplicationGuide/addremovenodes.html new file mode 100644 index 0000000..d4b4a4f --- /dev/null +++ b/docs/ReplicationGuide/addremovenodes.html @@ -0,0 +1,147 @@ + + + + + + Adding and Removing Nodes + + + + + + + + + +
+
+
+
+

Adding and Removing Nodes

+
+
+
+

+ As described in Adding and Removing Nodes from the Group, + a node is added to the replication group simply by starting it up + and allowing it to perform its start-up handshake with the + Master. Once an electable node has been added to the replication + group, it belongs to the replication group forever, or until you + explicitly remove it. Also, the node is uniquely identified within + the replication group by a name that you must give it when you start + up the process. +

+

+ This is worth remembering, because if you have electable nodes that + have been added to the replication group, but which you then + shutdown for a long period of time, your replication group might not + be able to perform a lot of tasks, such as: +

+
+
    +
  1. +

    + Elect a Master. +

    +
  2. +
  3. +

    + Add a new node to the replicated group. +

    +
  4. +
  5. +

    + Delete a node from the replication group. +

    +
  6. +
  7. +

    + Successfully commit a transaction (this depends on the + durability guarantees in place for your application). +

    +
  8. +
+
+

+ All of these actions might be adversely affected by a series of + unavailable electable nodes because in order to do these things the + Master must be in contact with a majority of the electable nodes + belonging to the replication group (Monitor and Secondary nodes do + not count). So if too many electable nodes are either shutdown or + unavailable due to a network partition event, then these functions + can become delayed or even completely unavailable. +

+

+ For this reason, if you have electable nodes that you want to + shutdown for a long time, then you should remove those nodes from + the replication group. JE provides a utility class that allows + for node removal, so your application developer should have provided + you with a tool of some kind that allows you to do this as a normal + administrative function. +

+

+ When removing an electable node from the replication group, remember + that: +

+
+
    +
  • +

    + for best results, shut down the node first. +

    +
  • +
  • +

    + a majority of the nodes must currently be in contact with + the Master in order to acknowledge the node removal. +

    +
  • +
+
+

+ If at some later time you want to restart the node and have it + join the replication group, you can do this using the normal + procedure that your application uses when starting a node for + the first time. Be aware, however, that you cannot reuse the + unique name that the node was using when you removed it from + the group as an electable node. Instead, give the node a + completely new unique name before having it rejoin the + replication group. +

+
+ + + diff --git a/docs/ReplicationGuide/administration.html b/docs/ReplicationGuide/administration.html new file mode 100644 index 0000000..6fefbda --- /dev/null +++ b/docs/ReplicationGuide/administration.html @@ -0,0 +1,229 @@ + + + + + + Chapter 7. Administration + + + + + + + + + +
+
+
+
+

Chapter 7. Administration

+
+
+
+ +

+ This chapter describes issues pertaining to running a JE + replication application. The topics discussed here have to do with + hardware configuration, backups, node configuration, and other + management issues that exist once the application has been + placed into production. +

+
+
+
+
+

Hardware

+
+
+
+

+ A JE replicated application should run well on typical + commodity multi-core hardware, although greater hardware + requirements than this may be driven by the architecture of your + particular application. Check with the software developers who + wrote your JE replicated application for any additional + requirements they may have over and above typical + multi-core hardware. +

+

+ That said, keep the following in mind when putting a JE + replication application into production: +

+
+
    +
  • +

    + Examine the hardware you intend to use, and review it for + common points of failure between nodes in the replication + groups, such as shared power supplies, routers and so + forth. +

    +
  • +
  • +

    + The hardware that you use does not have to be identical + across the entire production hardware. However, it is + important to ensure that the least capable electable node + has the resources to function as the Master. +

    +

    + The Master is typically the node where demand for machine + resources is the greatest. It needs to supply the + replication streams for each active Replica, in addition + to servicing the transaction load. +

    +

    + Note that JE requires Monitor nodes to have minimal + resource consumption (although, again, your application + developers may have written your Monitor nodes such that + they need resources over and above what JE requires), + because Monitor nodes only listen for changes in the + replication group. +

    +
  • +
  • +

    + Finally, your network is a critical part of your hardware + requirements. It is critical that your network be capable + of delivering adequate throughput under peak expected + production work loads. +

    +

    + Remember that your replicated application can consume + quite a lot of network resources when a Replica starts up + for the first time, or starts up after being shutdown for + a long time. This is because the Replica must obtain all + the data that it needs to operate. Essentially, this is a + duplicate of the data contained by the Master node. So + however much data the Master node holds, that much data + will be transmitted across your network per + node every time you start a new node. +

    +

    + For restarting nodes, the amount of data that will cross + your network is equal to the delta between the time the + Replica last shutdown and the state of your Master node + at the time that the Replica is starting up again. If the + Replica has been down for a long time (days or weeks), + this can be quite a lot of data, depending on your Master + node's workload. +

    +

    + Be aware, however, that restarting nodes do not have to + get their data from the Master node. It is possible for + them to catch up, or nearly catch up, using data obtained + from some other currently running Replica. See + Restoring Log Files + for more information. +

    +

    + Good application performance also depends on the + latency of network connections used by electable and + monitor nodes to perform elections, report election + results, and obtain acknowledgments. Consider + deploying secondary nodes on machines with higher + latency connections to the other members of the + replication group, keeping in mind that these nodes + still have the same throughput requirements as + electable nodes. +

    +
  • +
+
+
+
+ + + diff --git a/docs/ReplicationGuide/admintimesync.html b/docs/ReplicationGuide/admintimesync.html new file mode 100644 index 0000000..852b59e --- /dev/null +++ b/docs/ReplicationGuide/admintimesync.html @@ -0,0 +1,88 @@ + + + + + + Time Synchronization + + + + + + + + + +
+
+
+
+

Time Synchronization

+
+
+
+

+ For best results, you are strongly recommended to synchronize the + clocks on all the machines hosting your production + replication group. Running a time synchronization daemon like + NTPD is a simple way to + keep time synchronized across your replication machines. Once the + clocks are set, they are maintained by ntpd so that they rarely + stray more than 128ms away from one another. +

+

+ Be aware the JE checks for clock skew between the Master and + a starting Replica node, when the Replica node performs its + startup handshake with the Master. (See + Replica Startup + for information on the startup handshake.) If the clock skew between + the two nodes is too large, the handshake is aborted and JE + throws an EnvironmentFailureException. +

+

+ Also, well-synchronized clocks are required for a proper + implementation of a time consistency policy (see + Time Consistency Policies). + It is also required for correct internal booking by JE. +

+

+ Finally, synchronized system clocks make it easier to correlate + events in the logging output from different nodes in the group. +

+
+ + + diff --git a/docs/ReplicationGuide/availability.html b/docs/ReplicationGuide/availability.html new file mode 100644 index 0000000..ca98947 --- /dev/null +++ b/docs/ReplicationGuide/availability.html @@ -0,0 +1,220 @@ + + + + + + Availability + + + + + + + + + +
+
+
+
+

Availability

+
+
+
+
+
+
+ + Write Availability + +
+
+ + Read Availability + +
+
+
+

+ A key difference between standalone JE and JE HA is + that for standalone JE the environment is available for + both reads and writes as long as the application (including + the underlying hardware) is functioning correctly. That is, + the availability of a standalone JE application is + independent of the local durability policy set for the + transaction. However, the distributed nature of JE HA, means + that availability can be dependent upon the state of other + nodes in the replication group. It can also be dependent upon + the policies you set for your HA application. +

+
+
+
+
+

Write Availability

+
+
+
+

+ JE HA requires that a simple majority of electable nodes be + available to elect a Master. If a simple majority of those + nodes is not available, the group is not available for + writes because the group is unable to elect a Master. +

+

+ In the presence of a Master, the availability of a + replicated environment (at the Master) for write + operations is determined by the durability + requirements associated with the transaction: +

+
+
    +
  • +

    + If the transaction's durability requirements + specify an acknowledgement policy of NONE, the + Master is always available for write operations, + just as is the case for standalone JE + applications. +

    +
  • +
  • +

    + If the durability requirements are made more + stringent and specify a simple majority for + acknowledgements, or if all + the electable group members must acknowledge transaction + commits, the environment might not be available for + writes when one or more of the Electable Replicas is unable + to provide an acknowledgment. This loss of write + availability can occur even in the absence of + hardware failures. +

    +

    + Replicas might be unable to provide + acknowledgements because a node is down. It could + also occur if the Replica is simply lagging + too far behind in the replication stream and so + needs to commit earlier transactions before it can + commit the current transaction. Note that in the + absence of system level failures, the Replica will + eventually commit the transaction, it just can not + do so in the window of time required to indicate a + successful commit of the transaction to the Master. +

    +
  • +
+
+

+ In other words, a durability policy that calls for commit + acknowledgments can result in decreased availability of + the system for write operations. It is important for + you to keep this tradeoff in mind when choosing a + durability policy. +

+
+
+
+
+
+

Read Availability

+
+
+
+

+ A Master is always available for read operations because + the data on it is always absolutely consistent. However, + Replica read availability can be affected by the + consistency policy that you are using: +

+
+
    +
  • +

    + A Replica is always available for read operations + that do not have any read consistency requirements. + That is, when the Replica is allowed to lag + arbitrarily far behind the Master, then the Replica + will always be available to service read requests. +

    +
  • +
  • +

    + If you are using higher levels of read consistency, + then Replicas might not be available for read + operations. This occurs when the Replica is forced + to wait until it has caught up far enough in the + replication stream before it can service a read + operation. For example, if you choose a time + consistency policy, and the the Replica cannot meet + that consistency policy for a specific read + operation, then the operation might be delayed or + even abandoned entirely until the consistency + policy can be met. This represents a loss of read + availability. +

    +

    + There are many reasons why a Replica might not be + able to meet a consistency policy. For example, + the Master might be very busy and so is unable to + supply the Replica with the replication stream + fast enough. Or, it could be because the Replica + is experiencing very heavy read loads and so + the replication stream might not be fast enough + to keep up. It is also possible that the Replica + has been down and is trying to catch up, which + means that it might not be able to meet a + consistency policy. +

    +

    + All of these scenarios represent a loss of read + availability, albeit a temporary one. +

    +
  • +
+
+

+ In other words, a consistency policy that requires the + Replica to match the state of the Master to one degree or + another can affect the Replica's read availability. It + is important for you to keep this tradeoff in mind when + choosing a consistency policy. +

+
+
+ + + diff --git a/docs/ReplicationGuide/backups.html b/docs/ReplicationGuide/backups.html new file mode 100644 index 0000000..9f0a448 --- /dev/null +++ b/docs/ReplicationGuide/backups.html @@ -0,0 +1,92 @@ + + + + + + Running Backups + + + + + + + + + +
+
+
+
+

Running Backups

+
+
+
+

+ Because JE replication causes a current copy of your + environment to be available at every data node in the group, the need + for frequent backups is greatly reduced. Basically, every time a + change is made on the Master, that change is backed up to every + Replica node currently running. The result is that for each write + operation you get a real-time incremental backup to + n-1 nodes, where n are + the total number of data nodes (including the Master) currently + running in your replication group. +

+

+ For this reason, JE does not currently support formal + incremental backups of replicated environments. An application + based upon the DbBackup utility class can be written to allow + administrators to create full backups. This is useful for + creating a backup to be stored on offline media, if your data + strategy calls for that level of protection. +

+

+ Remember that when performing a full backup, you should obtain + the backup from a node that is current. Either use the Master + node itself, or use a Replica node that must acknowledge a + transaction commit before the commit operation can complete on + the Master. +

+

+ Note that DbBackup has some functionality that + is specifically useful for replicated environments. See + Backing up a Replicated Application + for details. +

+
+ + + diff --git a/docs/ReplicationGuide/cons_and_dur.html b/docs/ReplicationGuide/cons_and_dur.html new file mode 100644 index 0000000..6f2e951 --- /dev/null +++ b/docs/ReplicationGuide/cons_and_dur.html @@ -0,0 +1,390 @@ + + + + + + Consistency and Durability Use Cases + + + + + + + + + +
+
+
+
+

Consistency and Durability Use Cases

+
+
+
+
+
+
+ + Out on the Town + +
+
+ + Bio Labs, Inc + +
+
+
+

+ As discussed throughout this chapter, there is an interaction + between consistency and durability. This interaction results in + design decisions that you will have to make when designing your + HA application. To further illustrate this interaction, this + section provides several use cases as examples of how + durability and consistency policies are used to reach + application design goals. +

+
+
+
+
+

Out on the Town

+
+
+
+

+ Out on the Town is a social networking + site about restaurants and artistic events. Restaurant + locations and an event calendar are available on the site. + Members can submit reviews about restaurants and events, + and other members can comment on the reviews. Further, + members maintain accounts and profiles. +

+

+ The site experiences most of its traffic as read-only + requests. There is heavy read traffic from users who are + browsing the site. In addition, periodic write traffic + occurs as reviews and comments are submitted to the site. +

+
+
+
+
+

Reading Reviews

+
+
+
+

+ Based on the site's usage characteristics, the web developers + know that it is critical that the site perform well for read traffic. + Listings must be readily available, and the site must be + able to adapt to changing read loads. However, the site + only needs a low threshold for most reads. +

+

+ While users should not experience a delay when they + access the site, it is okay if read requests do not see + the very latest reviews. For this reason, when starting + read-only transactions for the purpose of viewing + reviews, the application specifies a consistency policy + of NoConsistencyRequiredPolicy. This provides the highest + possible availability for read requests for the Replica + nodes, which is the critical thing for this particular + site. (Any other consistency policy might cause the + node to delay reads while waiting for the node to meet + its consistency policy, which would represent an + unacceptable loss of availability as it could cost the + site lost readership.) +

+
+
+
+
+
+

Writing Reviews

+
+
+
+

+ Most write operations are for new user reviews, and for comments on those + reviews. For these writes, the application needs only a very lenient durability + policy. It is not critical that a new review is immediately + available to other users, nor is it critical that they are saved in the event of + a catastrophic failure. +

+

+ Therefore, the application uses the convenience + constant Durability.COMMIT_WRITE_NO_SYNC as the system default durability + policy. (This is done by specifying the durability policy using + EnvironmentMutableConfig.setDurability().) This means: +

+
+
    +
  • +

    + Write operations on the Master use + Durability.SyncPolicy.WRITE_NO_SYNC. +

    +
  • +
  • +

    + When the write operation is forwarded by the Master to the Replicas, those Replicas use + Durability.SyncPolicy.NO_SYNC when they internally update their own + databases. +

    +
  • +
  • +

    + Only a simple majority of the Electable nodes need to acknowledge the + update. +

    +
  • +
+
+
+
+
+
+
+

Updating Events and Restaurant Listings

+
+
+
+

+ Periodically, the calendar of events and restaurant locations are updated. These + write operations happen fairly infrequently relative to reviews and comments, + but the site's operators deem this information to be of more importance (or + valuable) than the reviews and comments. Therefore, they want a stronger + guarantee that the information is backed up to all nodes, which is the same + thing as saying they want a stronger durability guarantee. Nevertheless, they + also want this class of writes to consume few resources. +

+

+ To achieve this, for transactions performing these kind of writes, the web + engineers choose to override the site's default durability guarantee. Instead, + they use a durability guarantee that: +

+
+
    +
  • +

    + Uses Durability.SyncPolicy.SYNC for the local synchronization policy. + This ensures that the write is fully backed up to the Master's local + disk before the transaction commit operation returns. +

    +
  • +
  • +

    + Uses Durability.SyncPolicy.WRITE_NO_SYNC for the synchronization + policy on the Replica nodes. This causes the updates to be written to + the disk controller's buffers, but they are not flushed to disk before + the Electable Replicas acknowledge the commit operation. +

    +
  • +
  • +

    + Stays with a simply majority for acknowledgements, which is the same as + is used for the default durability policy. +

    +
  • +
+
+

+ That is, for updating events and restaurant locations, the application uses this + durability policy: +

+
    useForUpdates = 
+         new Durability(Durability.SyncPolicy.SYNC,
+                        Durability.SyncPolicy.WRITE_NO_SYNC,
+                        Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); 
+
+
+
+
+
+

Updating Account Profiles

+
+
+
+

+ If a user makes an account profile change as part of a + web session, she will naturally expect to see her + changes when she next looks at the profile during the + same session. From the user's perspective, this is all + one operation: she causes her profile to change and + then the profile page is refreshed with her new + information. +

+

+ However, from the application's perspective, there are + several things going on: +

+
+
    +
  • +

    + A write transaction is performed on the Master. +

    +
  • +
  • +

    + One or more read transactions are performed on the + Replica node in use by the user as she updates + her profile and then reads back the changes she + just made. +

    +
  • +
+
+

+ To ensure that the session interaction looks + intuitively consistent to the user, the application: +

+
+
    +
  • +

    + Performs the write transaction on the Master. +

    +
  • +
  • +

    + Saves the CommitToken for the account profile + update within the web session. +

    +
  • +
  • +

    + The Replica node uses a CommitPointConsistencyPolicy policy for the + follow-on account profile read(s). To do this, the application uses the + CommitToken stored in the previous step when beginning the read + transactions. In this way, the Replica will not serve up the new profile + page until it has received the profile updates from the Master. From the + user's perspective, there may be a delay in her page refresh when she + submits her updates. How long of a delay experienced by the user is a + function of how busy the site is with write updates, as well as the + performance characteristics of the hardware and networks in use by the + site. +

    +
  • +
+
+
+
+
+
+
+
+

Bio Labs, Inc

+
+
+
+

+ Bio Labs, Inc is a biotech company that is doing pharmaceutical + production which must be audited by government agencies. Production sampling results + are logged frequently. All such updates must be guaranteed to be backed up. (In + other words, this application requires a very high durability guarantee.) +

+

+ In addition, there are frequent application defined sample points that represent + phases in the production cycle. The application performs monitoring of the + production stream. These reads are time critical, so the data must be no older + than a specific point in time. +

+
+
+
+
+

Logging Sampling Results

+
+
+
+

+ Due to the auditing requirement for the sampling results, the application + developers want an extremely high data durability guarantee. Therefore, they + require the synchronization policy on both the + Master and all Electable Replica nodes to + be Durability.SyncPolicy.SYNC, which means that the logging data is guaranteed to + be written to stable storage before the host returns from its transaction + commit. +

+

+ For an acknowledgement policy, the engineers + considered requiring all Electable nodes to + acknowledge the commit. This would provide them with the strongest possible + durability guarantee. However, they decided against this because it represents a + possible loss of write availability for the + application; if even one Electable node is + shutdown or hidden by a network outage, then the Master would not be able to + perform any write operations at all. So instead, the engineers stick with the + default acknowledgement policy, which is to require a simple majority of the + Electable nodes to acknowledge the commit. +

+

+ The durability policy, then, looks like this: +

+
    resultsDurability = 
+         new Durability(Durability.SyncPolicy.SYNC,
+                        Durability.SyncPolicy.SYNC,
+                        Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); 
+
+
+
+
+
+

Monitoring the Production Stream

+
+
+
+

+ The BioLabs application is required to monitor the production stream. All such + monitoring must be of data that is no older than a defined age. +

+

+ This represents a read activity that has a time concurrency policy requirement. + Therefore, whenever the application performs a write (that is, logs sampling + results), the application creates a CommitToken. Each of the nodes, then, use + this commit token to specify a CommitPointConsistencyPolicy policy when the + Environment.beginTransaction() method is called. This guarantees that the + application's data monitoring activities will be performed on data that is not + out of date or stale. +

+
+
+
+ + + diff --git a/docs/ReplicationGuide/consistency.html b/docs/ReplicationGuide/consistency.html new file mode 100644 index 0000000..4191a9f --- /dev/null +++ b/docs/ReplicationGuide/consistency.html @@ -0,0 +1,508 @@ + + + + + + Managing Consistency + + + + + + + + + +
+
+
+
+

Managing Consistency

+
+
+
+ +

+ In a traditional stand-alone transactional application, consistency + means that a transaction takes the database from one consistent state to another. What + defines a consistent state is application-specific. This transition is made atomically, + that is, either all the operations that constitute the transaction are performed, or + none of them are. JE HA supports this type of transactional consistency both on the + Master, as well as on the Replicas as the replication stream is replayed. That is, in + the absence of failures, the Replicas will see exactly the same sequence of transitions, + from one consistent state to another, as the Master. + +

+

+ A JE HA application must additionally concern itself with the data consistency of + the Replica with respect to the Master. In a distributed system like JE HA, the + changes made at the Master are not always instantaneously available at every Replica, + although they eventually will be. For example, consider a + three node group, containing only Electable nodes, where a + change is made on the Master and the transaction is committed with a durability policy + requiring acknowledgments from a simple majority of nodes. After a successful commit of + this transaction, the changes will be available at the Master and at one other Replica, + thus satisfying the requirement for a simple majority of acknowledgments. The state of + the Master and the acknowledging Replica will be consistent with each other after the + transaction has been committed, but the transaction commit makes no guarantees about the + state of the third Replica after the commit. +

+

+ In general, Replicas not directly involved in contributing to the acknowledgment of a + transaction commit will lag in the replay of the replication stream because they do not + synchronize their commits with the Master. As a consequence, their state, on an + instantaneous basis, may not be current with respect to the Master. However, in the + absence of further updates, all Replicas will eventually catch up and reflect the + instantaneous state of the Master. This means that a Replica which is not + consistent with the Master simply reflects an earlier locally consistent state at the + Master because transaction updates on the Replica are always applied, atomically and in + order. From the application's perspective, the environment on the Replica goes through + exactly the same sequence of changes to its persistent state as the Master. +

+

+ A Replica may similarly lag behind the Master if it has been down for some period of + time and was unable to communicate with the Master. Such a Replica will catch up, when + it is brought back up and will eventually become consistent with the Master. +

+

+ Given the distributed nature of a JE HA application, and the fact that some nodes + might lag behind the Master, the question you have to ask yourself is how long will it take for + that node to be consistent relative to the Master. More to the + point: how far behind the Master are you willing to allow the node to lag? +

+

+ This should be one of your biggest concerns when it comes to + architecting a JE HA application. +

+

+ You define how current the nodes in your replication group must + be by defining a consistency policy. + You define your consistency policy using an implementation of the + ReplicaConsistencyPolicy interface. This interface allows you + to define how current the Replica must be before a transaction + can be started on the Replica. (Remember that all read + operations are performed within a transaction.) If the Replica + is not current enough, then the start of that transaction is + delayed until that level of consistency has been reached. This + means that Replicas that are not current enough will block read + operations until they are brought up to date. +

+

+ Obviously your consistency policy can have an affect on your Replica's read performance + by increasing the latency experienced by read transactions. This is because transactions + may have to wait to either begin or commit until the consistency policy can be + satisfied. If the consistency policy is so stringent that it cannot be satisfied using + the available resources, the Replica's availability for reads may deteriorate as + transactions timeout. A Durability.SyncPolicy.SYNC policy on the Replica can slow + down write operations on the Replica, making it harder for the Replica to meet its + consistency guarantee. Conversely, a Durability.SyncPolicy.NO_SYNC policy on the + Replica makes it easy for the Replica to keep up, which means you can have a stronger + consistency guarantee. +

+

+ One of three interface implementations are available for you to + use when defining your consistency policy: +

+
+
    +
  • +

    + NoConsistencyRequiredPolicy +

    +

    + No consistency policy is enforced. This policy allows + a transaction on a Replica to proceed regardless of the + state of the Replica relative to the Master. This + policy can also be used to access a database when the + replication node is in a DETACHED state. +

    +
  • +
  • +

    + TimeConsistencyPolicy +

    +

    + Defines how far back in time the Replica is permitted + to lag the Master. +

    +
  • +
  • +

    + CommitPointConsistencyPolicy +

    +

    + Defines consistency in terms of a specified commit + token. That is, the Replica must be at least as current + as the CommitToken provided to this class. +

    +
  • +
+
+
+
+
+
+

Setting Consistency Policies

+
+
+
+

+ You set a consistency policy by using + ReplicationConfig.setConsistencyPolicy(). + For example: +

+
   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Require no synchronization for transactional commit on the 
+   // Master, but full synchronization on the Replicas. Also,
+   // wait for acknowledgements from a simple majority of Replicas.
+   Durability durability =
+          new Durability(Durability.SyncPolicy.NO_SYNC,
+                         Durability.SyncPolicy.SYNC,
+                         Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+   envConfig.setDurability(durability);
+
+   // Identify the node
+   ReplicationConfig repConfig = 
+        new ReplicationConfig("PlanetaryRepGroup",
+                              "Jupiter",
+                              "jupiter.example.com:5002");
+ 
+   // Use the node at mercury.example.com:5001 as a helper to find the rest
+   // of the group.
+   repConfig.setHelperHosts("mercury.example.com:5001");
+
+   // Turn off consistency policies. Transactions can occur
+   // regardless of how consistent the Replica is relative
+   // to the Master.
+   NoConsistencyRequiredPolicy ncrp =
+        new NoConsistencyRequiredPolicy();
+   repConfig.setConsistencyPolicy(ncrp);
+
+   ReplicatedEnvironment repEnv =
+      new ReplicatedEnvironment(home, repConfig, envConfig); 
+

+ Note that the consistency policy is set on a node-by-node + basis. There is no requirement that you set the same policy for + every node in your replication group. +

+

+ You can also set consistency policies on a + transaction-by-transaction basis when you begin the + transaction: +

+
   // Turn off consistency policies. The transactions can
+   // be performed regardless of how consistent the Replica is 
+   // relative to the Master.
+   NoConsistencyRequiredPolicy ncrp =
+        new NoConsistencyRequiredPolicy();
+
+   TransactionConfig tc = new TransactionConfig();
+   tc.setConsistencyPolicy(ncrp);
+   // env is a ReplicatedEnvironment handle
+   env.beginTransaction(null, tc); 
+
+
+
+
+
+

Time Consistency Policies

+
+
+
+

+ A time consistency policy is a time-oriented policy that + defines how far back in time the Replica is permitted to + lag the Master. It does so by comparing the time + associated with the latest transaction committed on the + Master with the current time. If the Replica lags by an + amount greater than the permissible lag, it will hold back + the start of the transaction until the Replica has replayed + enough of the replication stream to narrow the lag to + within the permissible lag. +

+

+ Use of a time based consistency policy requires that nodes + in a replication group have their clocks reasonably + synchronized. This can be easily achieved using a daemon + like NTPD. +

+

+ You implement a time-based consistency policy by using the + TimeConsistencyPolicy class. To instantiate this class, + you provide it with the following: +

+
+
    +
  • +

    + A number representing the permissible lag. +

    +
  • +
  • +

    + A TimeUnit constant indicating the units of time + that the permissible lag represents. +

    +
  • +
  • +

    + A number representing the timeout period during + which a transaction will wait for the Replica to + catch up so that the consistency policy can be met. + If the transaction waits more than the timeout + period, a ReplicaConsistencyException is thrown. +

    +
  • +
  • +

    + A TimeUnit constant indicating the units of time + in use for the timeout value. +

    +
  • +
+
+

+ For example: +

+
   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Require no synchronization for transactional commit on the 
+   // Master, but full synchronization on the Replicas. Also,
+   // wait for acknowledgements from a simple majority of Replicas.
+   Durability durability =
+          new Durability(Durability.SyncPolicy.NO_SYNC,
+                         Durability.SyncPolicy.SYNC,
+                         Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+   envConfig.setDurability(durability);
+
+   // Identify the node
+   ReplicationConfig repConfig = 
+        new ReplicationConfig("PlanetaryRepGroup",
+                              "Jupiter",
+                              "jupiter.example.com:5002");
+ 
+   // Use the node at mercury.example.com:5001 as a helper to find the rest
+   // of the group.
+   repConfig.setHelperHosts("mercury.example.com:5001");
+
+   // Set consistency policy for replica.
+   TimeConsistencyPolicy consistencyPolicy = new TimeConsistencyPolicy
+       (1, TimeUnit.SECONDS, /* 1 sec of lag */
+       10, TimeUnit.SECONDS /* Wait up to 10 sec */);
+   repConfig.setConsistencyPolicy(consistencyPolicy);
+
+   ReplicatedEnvironment repEnv =
+      new ReplicatedEnvironment(home, repConfig, envConfig); 
+
+
+
+
+
+

Commit Point Consistency Policies

+
+
+
+

+ A commit point consistency policy defines consistency in + terms of the commit of a specific transaction. This policy + can be used to ensure that a Replica is at least current + enough to have the changes made by a specific transaction. + Because transactions are applied serially, by ensuring a + Replica has a specific commit applied to it, you know that + all transaction commits occurring prior to the specified + transaction have also been applied to the Replica. +

+

+ As is the case with a time consistency policy, if the + Replica is not current enough relative to the Master, all + attempts to begin a transaction will be delayed until the + Replica has caught up. If the Replica does not catch up + within a specified timeout period, the transaction will + throw a ReplicaConsistencyException. +

+

+ In order to specify a commit point consistency policy, you + must provide a CommitToken that is used to identify the + transaction that the Replica must have in order to be + current enough. Because the commit point that you care + about will change from transaction to transaction, you do + not specify commit point consistency policies on an + environment-wide basis. Instead, you specify them when you + begin a transaction. +

+

+ For example, suppose the application is a web application + where a replicated group is implemented within a load + balanced web server group. Each request to the web server + consists of an update operation followed by read operations + (say from the same client), The read operations naturally + expect to see the data from the updates executed by the + same request. However, the read operations might have been + routed to a node that did not execute the update. +

+

+ In such a case, the update request would generate a + CommitToken, which would be resubmitted by the browser, + along with subsequent read requests. The read request could + be directed at any one of the available web servers by a + load balancer. The node which executes the read request + would create a CommitPointConsistencyPolicy with that + CommitToken and use it at transaction begin. If the + environment at the web server was already current enough, + it could immediately execute the transaction + and satisfy the request. If not, the "transaction begin" + would stall until the Replica replay had caught up and the + change was available at that web server. +

+

+ You obtain a commit token using the + Transaction.getCommitToken() method. Use this method after + you have successfully committed the transaction that you + want to base a CommitPointConsistencyPolicy upon. +

+

+ For example: +

+
Database myDatabase = null;
+Environment myEnv = null;
+CommitToken ct = null;
+try {
+    ...
+    // Environment and database setup removed for brevity
+    ...
+
+    Transaction txn = myEnv.beginTransaction(null, null);
+
+    try {
+        myDatabase.put(txn, key, data);
+        txn.commit();
+        ct = txn.getCommitToken();
+        if (ct != null) {
+            // Do something with the commit token to
+            // forward it to the Replica where you
+            // want to use it.
+        }
+    } catch (Exception e) {
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+

+ To create your commit point token consistency policy, transfer the + commit token to the Replica performing a read using whatever + mechanism that makes sense for your HA application, and then create + the policy for that specific transaction handle: + Note that CommitToken implements Serializable, so you can + use the standard Java serialization mechanisms when passing the + commit token between processes. +

+
Database myDatabase = null;
+Environment myEnv = null;
+CommitToken ct = null;
+try {
+    ...
+    // Environment and database setup removed for brevity
+    ...
+
+    CommitPointConsistencyPolicy cpcp = 
+        new CommitPointConsistencyPolicy(ct,      // The commit token
+                           10, TimeUnit.SECONDS); // Timeout value
+
+    TransactionConfig txnConfig = new TransactionConfig();
+    txnConfig.setConsistencyPolicy(cpcp);
+
+
+    Transaction txn = myEnv.beginTransaction(null, txnConfig);
+
+    try {
+        // Perform your database read here using the transaction
+        // handle, txn.
+        txn.commit();
+    } catch (Exception e) {
+        // There are quite a lot of different exceptions that can be
+        // seen at this level, including the LockConflictException.
+        // We just catch Exception for this example for simplicity's 
+        // sake.
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (ReplicaConsistencyException rce) {
+        // Deal with this timeout error here. It is thrown by the
+        // beginTransaction operation if the consistency policy 
+        // cannot be met within the timeout time.
+} catch (DatabaseException de) {
+    // Database exception handling goes here.
+} catch (Exception ee) {
+    // General exception handling goes here.
+}
+
+
+ + + diff --git a/docs/ReplicationGuide/datamanagement.html b/docs/ReplicationGuide/datamanagement.html new file mode 100644 index 0000000..77034fa --- /dev/null +++ b/docs/ReplicationGuide/datamanagement.html @@ -0,0 +1,278 @@ + + + + + + Managing Data Guarantees + + + + + + + + + +
+
+
+
+

Managing Data Guarantees

+
+
+
+
+
+
+ + Durability + +
+
+ + Managing Data Consistency + +
+
+
+

+ All replicated applications are first transactional + applications. This means that you have the standard data + guarantee issues to consider, all of which have to do with + how durable and consistent you want your data to be. Of + course, considerations of this nature also play a role in + your application's performance. These issues are even more + important for replicated applications because replication + adds additional dimensions to them. +

+

+ Notably, in a replicated application you must decide how + durable your data is, by deciding how careful the Master will + be to make sure a data write has been written to disk on its + various Replica nodes before completing the transaction. +

+

+ Consistency also adds an additional dimension in a replicated + application, because now you must decide how consistent the + various nodes in the replication group will be relative to + the Master at any given time. If no writes are being + performed on the Master, all Replicas will eventually catch + up to the Master and so be completely consistent with it. + But for most HA applications, writes are occurring on the + Master, and so it is possible for some number of your + Replicas to lag behind the Master. What you have to decide, + then, is how sensitive your application is to this kind of + temporary inconsistency. +

+

+ Note that your consistency requirements can be gated by your + durability requirements. Durability, in turn, can be gated by + any concerns you might have on write throughput. At the same + time, your consistency requirement can have an affect on the + read performance of your Replicas. It is + therefore a mistake to think about any one of these + requirements in the absence of the others. +

+
+
+
+
+

Durability

+
+
+
+

+ One of the reasons you might be writing a replicated + application is to achieve a higher durability guarantee + than you can get with a traditional transactional + application. In a traditional application, your data's + durability is a function of how you perform your + transactional commits, and how frequently you perform + your backups. For this class of application, the + strongest durability guarantee you can have is to use + synchronous commits (the commit does not + complete until the data is written to disk), coupled with + very frequent backups of your environment. +

+

+ The problem with a stand-alone application in which you + are seeking a very high durability guarantee is that your + write throughput will suffer. Synchronous commits + require disk writes, and disk I/O is one of the most + expensive operations you can ask a database to perform. +

+

+ In order to increase write throughput in your + transactional application, you may decide to use + asynchronous commits that do not require the disk I/O to + complete before the transaction commit completes. + The problem with this is that your application can + potentially crash before a transaction has been + completely written to disk. This represents a loss of + data, which is to say the data is not durable. +

+

+ Replication can help with your data durability in a + couple of ways. Most importantly, replication allows you to + commit to the network. This means + that when your Master commits a transaction, the results + of that commit are sent to one or more nodes available + over the network. Consequently, multiple disks, disk + controllers, power supplies, and CPUs are used to ensure + the data modification makes it to stable storage. +

+

+ Usually JE makes the commit operation on the Master + wait until it receives acknowledgements from some number + of electable nodes before returning from the + operation. However, if you want to increase write + throughput, you can configure your Master to proceed + without acknowledgements, and so return immediately from + the commit operation (once the commit operation has met + the local durability requirement). The price that you pay + for this is a reduced durability guarantee. How reduced + the guarantee is, is a function of the number of electable + nodes in your replication group (the more you have, the + higher your durability guarantee is) and the quality and + stability of your network. +

+

+ Alternatively, you can obtain an + extremely high durability guarantee by configuring the Master + to wait for all electable nodes to acknowledge a commit + operation before returning from the operation. The price + you pay for this very high guarantee is greatly reduced + write throughput. +

+

+ For information on configuring and managing durability + guarantees for your replicated application, see + Managing Durability. +

+
+
+
+
+
+

Managing Data Consistency

+
+
+
+

+ Data consistency means that the data you thought you + wrote to your environment is in fact written to your + environment. It also means that you will never find + partial records written to your environment. +

+

+ In a replicated application, consistency also means that + data which is available on the Master is also available + on the Replicas. +

+

+ A simple transactional application offers consistency + guarantees that are enforced when you commit a + transaction. Your replicated application also offers this + consistency guarantee (because it is also a transactional + application). For this reason, the environment on the + Master is always absolutely consistent. But beyond that, you need to manage + consistency for data across all the nodes in your + replication group. +

+

+ When you commit a transaction on the Master, your + Replica nodes may or may not have the data changes + performed by that transaction at the end of the commit. + Whether they do depends on how high a durability + guarantee you implemented for your Master (see the + previous section). If, for example, you configured your + Master to require acknowledgements from all electable + nodes before returning from the commit, then the data + will be consistently available across all of those nodes + in the replication group, although not necessarily by + secondary nodes. However, if you configured the Master + such that no acknowledgements are necessary, then your + data is probably not consistent across the replication + group. +

+

+ To ensure that read transactions on the Replicas see a + sufficiently consistent view of the environment, you can + set a consistency policy for each transaction. This + policy describes how current the Replica must be before a + transaction can be initiated on it. If the Replica is not + current enough, the start of the transaction is delayed + until the Replica has caught up. +

+

+ There are two possible consistency policies. First, there + is a time-based policy that describes how far back in + time the Replica is allowed to lag behind the Master. + Secondly, you can use a commit-based consistency + policy that is based on the commit of a specified + transaction. This policy is used to ensure the Replica is + at least current enough to have the changes made by a + specific transaction, and by all transactions committed + prior to the specified transaction. The start of a + transaction on a Replica can be delayed until the Replica + can meet the consistency policy defined for that transaction. +

+

+ This means that a stringent consistency policy can affect + your Replica's read throughput. Transactions, even + read-only transactions, cannot begin until the Replica is + consistent enough. So if you have a + Replica that has lagged far behind the Master, and which + is having trouble catching up due to network latency or + other issues, then read requests may stall, and perhaps + even time out, which will affect the latency of your + Replica's read requests, and perhaps even its + overall availability for read requests. For this reason, + give careful consideration to how well you want your + Replica to perform on reads, versus how consistent you + want the Replica to be with other nodes in the + replication group. +

+

+ For more information on managing consistency in your + replicated application, see + Managing Consistency. +

+
+
+ + + diff --git a/docs/ReplicationGuide/dbbackup.html b/docs/ReplicationGuide/dbbackup.html new file mode 100644 index 0000000..85b8d54 --- /dev/null +++ b/docs/ReplicationGuide/dbbackup.html @@ -0,0 +1,111 @@ + + + + + + Backing up a Replicated Application + + + + + + + + + +
+
+
+
+

Backing up a Replicated Application

+
+
+
+

+ In a stand-alone, non-replicated JE application, the log is + strictly append only. You use the DbBackup class to help + applications coordinate while database operations are + continuing to add to the log. This helper class does this by + defining the log files needed for a consistent backup, and then freezes + all changes to those files, including any changes that might be + made by JE background operations. The application can copy + that defined set of files and finish operation without checking + for the ongoing creation of new files. Also, there will be no + need to check for a newer version of the last file on the next + backup. +

+

+ When you are using JE HA, however, log files other than the + last log file might be modified as part of the HA sync-up + operation. Though a rare occurrence, such + modifications would invalidate the backup because there is the + chance that files are modified after being copied. +

+

+ If this happens, DbBackup.endBackup() throws a + LogOverwriteException. Upon encountering this exception, the + backup files should be discarded and a new set of backup files + created. +

+

+ For example: +

+
        for (int i=0; i < BACKUP_RETRIES; i++) {
+            final ReplicatedEnvironment repEnv = ...;
+            final DbBackup backupHelper = new DbBackup(repEnv);
+            
+            backupHelper.startBackup();
+            String[] filesForBackup = 
+                backupHelper.getLogFilesInBackupSet();
+
+            /* Copy the files to archival storage. */
+            myApplicationCopyMethod(filesForBackup);
+            
+            try {
+                backupHelper.endBackup();
+                break;
+            } catch (LogOverwriteException e) {
+                /* Remove backed up files. */ 
+                myApplicationCleanupMethod();
+                continue;
+            } finally {
+                repEnv.close();
+            }
+        } 
+
+ + + diff --git a/docs/ReplicationGuide/election-override.html b/docs/ReplicationGuide/election-override.html new file mode 100644 index 0000000..d0f8446 --- /dev/null +++ b/docs/ReplicationGuide/election-override.html @@ -0,0 +1,331 @@ + + + + + + Appendix A. Managing a Failure of the Majority + + + + + + + + +
+
+
+
+

Appendix A. Managing a Failure of the Majority

+
+
+
+

+ Normal operation of JE HA requires that at least a simple majority + of electable nodes be available to form a quorum for election of a + new Master, or when committing a transaction with default + durability requirements. The number of electable nodes (the + Electable Group Size) is obtained from persistent internal metadata + that is stored in the environment and replicated across all + members. See Replication Group Life Cycle for details. +

+

+ Under exceptional circumstances, a simple majority of electable nodes may + become unavailable for some period of time. With only a minority + of electable nodes available, the overall availability of the group can be + adversely affected. For example, the group may be unavailable for + writes because a master cannot be elected. Also, the Master may be + unable to satisfy the durability requirements for a transaction + commit. The group may also be unavailable for reads, because the + absence of a Master might cause a Replica to be unable to meet + consistency requirements. +

+

+ To deal with this exceptional circumstance + — especially if the situation is likely to persist for an + unacceptably long period of time — JE HA provides a + mechanism by which you can modify the way in which the number of + electable nodes, and consequently the quorum requirements for + elections and commit acknowledgments, is calculated. The escape + mechanism provides a way to override the normal computation of the + Electable Group Size. The override is accomplished by specifying + the size using the mutable replication configuration parameter + ELECTABLE_GROUP_SIZE_OVERRIDE. +

+
+

Note

+

+ You should use this parameter sparingly, if at all. Overriding + your Electable Group Size can have the consequence of allowing + your replication group's election participants to elect two Masters + simultaneously. This is especially likely to occur if a + majority of the nodes are unavailable due to a network + partition event, and so all nodes are running but are simply + not communicating with one another. +

+

+ Be very cautious when using this configuration + option. +

+
+
+
+
+
+

Overriding the Electable Group Size

+
+
+
+ +

+ When you set ELECTABLE_GROUP_SIZE_OVERRIDE to a non-zero value, the + number that you provide identifies the number of electable nodes that + are required to meet quorum requirements. This means that the + internally stored Electable Group Size value is ignored (but + not changed) when this option is non-zero. By setting + ELECTABLE_GROUP_SIZE_OVERRIDE to the number of electable nodes known to be + available, the remaining replication group participants can + make forward progress, both in terms of electing a new + Master (if this is required) and in terms of meeting durability + and consistency requirements. +

+

+ When this option is zero (0), then the node will behave + normally, and the internal Electable Group Size is honored by + the node. This is the default value and behavior. +

+
+
+
+
+

Setting the Override

+
+
+
+

+ To override the internal Electable Group Size value: +

+
+
    +
  1. +

    + Verify that the simple majority of electable nodes are in fact + down and cannot elect their own independent Master. +

    +
  2. +
  3. +

    + Set ELECTABLE_GROUP_SIZE_OVERRIDE to the number of + electable nodes known to be available. For best + results, set this override on all available + electable nodes. +

    +

    + It might be sufficient to set ELECTABLE_GROUP_SIZE_OVERRIDE + on just one electable node in order to hold an election, because + the proposer at that one node can conclude the + election. However, if the election results in + Master that is not configured with this override, it + might result in InsufficientAcksExceptions at the Master. + So, again, set the override on all available + electable nodes. +

    +
  4. +
+
+

+ Having set the override, the available electable members of the + replication group can now meet quorum requirements. +

+
+
+
+
+
+

Restoring the Default State

+
+
+
+

+ Having restored the group to a functioning state by use of + the ELECTABLE_GROUP_SIZE_OVERRIDE override, it is desirable + to return the group to its normal state as soon as possible. The + normal operating state is one where the Electable Group + Size is maintained by JE HA, and the override is no longer + used. +

+

+ To restore the group to its normal operational state, do + one of the following: +

+
+
    +
  • +

    + Remove from the group any electable nodes that you + know will be down for an extended period of time. + Remove the nodes using the + ReplicationGroupAdmin.removeMember() API. +

    +
  • +
  • +

    + Bring up electable nodes as they once again come on + line, so that they can join the functioning group. + This must be done carefully one node at a time in + order to avoid the small possibility that a majority of the + downed nodes hold an election amongst themselves + and elect a second Master. +

    +
  • +
  • +

    + Perform some combination of node removal and + bringing up nodes which were previously down. +

    +
  • +
+
+

+ As soon as there is a sufficient number of electable nodes + up and running that election quorum requirements can be met in the + absence of the override, the override can be removed, and + normal HA operations resumed. +

+
+
+
+
+
+

Override Example

+
+
+
+

+ Consider a group consisting of 5 electable nodes: + n1-n5. Suppose a + simple majority of the nodes + (n3-n5) have become + unavailable. +

+

+ If one of the nodes in + n3-n5 was the + Master, then nodes n1 and + n2 will try to hold an election, and + fail due to the lack of a quorum. We now carry out the steps described, above: +

+
+
    +
  1. +

    + Verify that n3-n5 are down. +

    +
  2. +
  3. +

    + Set ELECTABLE_GROUP_SIZE_OVERRIDE to 2. Do this + at both n1 and n2. + You can do this dynamically using JConsole, or by + setting the property in the je.properties file and + restarting the node. +

    +
  4. +
  5. +

    + n1 and n2 + will choose a new Master, say, n1. + n1 can now process write + operations, and n2 can + acknowledge transaction commits. +

    +
  6. +
  7. +

    + Suppose that n3 is now repaired. + You can bring it back online and it will + automatically locate the new Master and join the + group. As is normal, it will catch up to + n1 and n2 in + the replication stream, and then begin + acknowledging commits as requested by + n1. +

    +
  8. +
  9. +

    + We now have three electable nodes that are operational. Because + we have a true simple majority of electable nodes available, we + can now reset ELECTABLE_GROUP_SIZE_OVERRIDE to 0 + (do this on n1 and n2), + which causes the replication group to resume normal + operations. Note that n1 remains + the Master. +

    +
  10. +
+
+

+ If n2 was the Master at the time of the + failure, then the situation is similar, except that an + election is not held. In this case, n2 will continue to + remain the Master throughout the entire process described + above. However, n2 might not be able to meet quorum + requirements for transaction commits until step 2 (above) is + performed. +

+
+
+
+ + + diff --git a/docs/ReplicationGuide/enablerep.html b/docs/ReplicationGuide/enablerep.html new file mode 100644 index 0000000..369c87b --- /dev/null +++ b/docs/ReplicationGuide/enablerep.html @@ -0,0 +1,140 @@ + + + + + + Converting Existing Environments for Replication + + + + + + + + + +
+
+
+
+

Converting Existing Environments for Replication

+
+
+
+

+ JE HA environments log files contain information and data + used only by replication. Non-replicated environments are + lacking this information, so in order to use a + previously-existing non-replicated environment in an HA + application, it must undergo a one time conversion. +

+
+

Note

+

+ If you try to open a non-replicated environment as a + replicated environment, the operation will throw an + UnsupportedOperationException. This is the only way your + code can tell if an environment needs to be converted. +

+
+

+ You use the DbEnableReplication class to perform this + one-time conversion. This class is particularly useful if you + want to prototype a standalone transactional application, and + then add in replication after the transactional application is + working as desired. +

+

+ The conversion process is one-way; once an environment + directory is converted, the rules that govern + ReplicatedEnvironment apply. This means the environment can + no longer be opened for writes by a standalone Environment handle + (however, it still can be opened by a standalone + Environment handle in read-only mode). +

+

+ Note that DbEnableReplication only adds a minimum amount of + replication metadata. The conversion process is not in any way + dependent on the size of the environment you are converting. +

+

+ The converted environment can be used to start a new + replication group. After conversion, the environment can be + opened as a ReplicatedEnvironment. Additional nodes that join + the group are then populated with data from the converted + environment. +

+

+ For example: +

+
// Create the first node using an existing environment 
+DbEnableReplication converter = 
+    new DbEnableReplication(envDirMars,          // env home dir
+                            "UniversalRepGroup", // group name
+                            "nodeMars",          // node name
+                            "mars:5001");        // node host,port
+converter.convert();
+
+ReplicatedEnvironment nodeMars =
+           new ReplicatedEnvironment(envDirMars, ...);
+
+// Bring up additional nodes, which will be initialized from 
+// nodeMars.
+ReplicationConfig repConfig = new ReplicationConfig();
+try {
+    repConfig.setGroupName("UniversalRepGroup");
+    repConfig.setNodeName("nodeVenus");
+    repConfig.setNodeHostPort("venus:5008");
+    repConfig.setHelperHosts("mars:5001");
+
+    nodeVenus = new ReplicatedEnvironment(envDirVenus, 
+                                          repConfig, 
+                                          envConfig);
+} catch (InsufficientLogException insufficientLogEx) {
+
+    // log files will be copied from another node in the group
+    NetworkRestore restore = new NetworkRestore();
+    restore.execute(insufficientLogEx, new NetworkRestoreConfig());
+
+    // try opening the node now
+    nodeVenus = new ReplicatedEnvironment(envDirVenus, 
+                                          repConfig,
+                                          envConfig);
+} 
+
+ + + diff --git a/docs/ReplicationGuide/events.html b/docs/ReplicationGuide/events.html new file mode 100644 index 0000000..222edfa --- /dev/null +++ b/docs/ReplicationGuide/events.html @@ -0,0 +1,179 @@ + + + + + + Listening for Events + + + + + + + + + +
+
+
+
+

Listening for Events

+
+
+
+

+ One of the things the Monitor class allows you to do is to + listen for certain events that occur in the composition of the + replication group. Your Monitor can be notified of these events + by running an event listener using Monitor.startListener(). + For example: +

+

+ Monitor.startListener() takes a single + argument, and that is an instance of MonitorChangeListener. + MonitorChangeListener is an interface + that you implement for the purpose of handling replication + group events. +

+

+ There are four events that the change listener can be notified + of. Each of these are represented by a unique class: +

+
+
    +
  1. +

    + GroupChangeEvent +

    +

    + A new instance of this event is generated each time + an electable or monitor node, but not a secondary + node, is added or removed from the replication group. +

    +
  2. +
  3. +

    + NewMasterEvent +

    +

    + A new instance of this event is generated each time a + new Master is elected. +

    +
  4. +
  5. +

    + JoinGroupEvent +

    +

    + A new instance of this event is generated each time an + electable or secondary node, but not a monitor node, + joins a group. The event is generated on a "best + effort" basis. It may not be generated, for example, + if the joining node was unable to communicate with the + monitor due to a network problem. The application must + be resilient in the face of such missing events. +

    +
  6. +
  7. +

    + LeaveGroupEvent +

    +

    + A new instance of this event is generated each time an + electable or secondary node, but not a monitor node, + node leaves the group. The event is generated on a + "best effort" basis. It may not be generated if the + node leaving the group dies (for example, it was + killed) before it has a chance to generate the event, + or if the node was unable to communicate with the + monitor due to a network problem. The application must + be resilient in the face of such missing events. +

    +
  8. +
+
+

+ For example, an implementation of the MonitorChangeListener + interface might be: +

+
class MyChangeListener implements MonitorChangeListener {
+
+   public void notify(NewMasterEvent newMasterEvent) {
+
+    String newNodeName = newMasterEvent.getNodeName();
+
+    InetSocketAddress newMasterAddr = 
+           newMasterEvent.getSocketAddress();
+    String newMasterHostName = newMasterAddr.getHostName();
+    int newMasterPort = newMasterAddr.getPort();
+
+    // Do something with this information here.
+   }
+
+   public void notify(GroupChangeEvent groupChangeEvent) {
+    ReplicationGroup repGroup = groupChangeEvent.getRepGroup();
+
+    // Do something with the new ReplicationGroup composition here.
+   }
+
+   ...
+
+} 
+

+ You can then start the Monitor listener as follows: +

+
 // Initialize the monitor node config
+ReplicationConfig config = 
+       new ReplicationConfig("MyRepGroupName",
+                             "mon1",
+                             "monhost1.acme.com:7000");
+config.setNodeType(NodeType.MONITOR);
+config.setHelperHosts("node1.acme.com:5000,node2.acme.com:5000");
+
+Monitor monitor = new Monitor(config);
+
+// If the monitor has not been registered as a member of the 
+// group, register it now. register() returns the current node 
+// that is the master.
+ReplicationNode currentMaster = monitor.register();
+
+// Start up the listener, so that it can be used to track changes 
+// in the master node, or group composition.
+monitor.startListener(new MyChangeListener()); 
+
+ + + diff --git a/docs/ReplicationGuide/exceptions.html b/docs/ReplicationGuide/exceptions.html new file mode 100644 index 0000000..7024c2c --- /dev/null +++ b/docs/ReplicationGuide/exceptions.html @@ -0,0 +1,318 @@ + + + + + + HA Exceptions + + + + + + + + + +
+
+
+
+

HA Exceptions

+
+
+
+ +

+ JE HA requires you to manage more error situations that you + would have to if you were writing a non-replicated application. + These error situations translate to additional exceptions that + you must contend with in your code. Before continuing with our + description of how to write a replicated application, it is + useful to review the HA-specific exceptions that your + application must manage. +

+
+
+
+
+

Master-Specific HA Exceptions

+
+
+
+

+ There are two exceptions that you can see on a Master node, + and which you will not see anywhere else. They are: +

+
+
    +
  • +

    + InsufficientReplicasException +

    +

    + This exception can be raised on a transaction begin or commit. It means that the + Master cannot successfully commit a transaction, or begin one, because it is not in contact + with enough Electable Replicas. The number of Electable Replicas required to successfully commit + the transaction is a function of the durability policy that + you have set for the transaction. See + Managing Durability + for more information. +

    +

    + If raised on a transaction commit operation, this + exception means that the transaction has not been + committed. Instead, it has been marked as invalid. + In response to this exception, your application + must at a minimum abort the transaction. It is up + to you whether you want to retry the transaction at + some later time when more Replicas are in contact + with the Master. +

    +

    + If raised on a transaction begin operation, this + exception means that the transaction has not begun. + If the application intended to initiate a read-only + transaction on a Master, it can avoid this + exception by ensuring that the transaction is + configured to not require any acknowledgments. For + information on configuring acknowledgments, see + Managing Acknowledgements. +

    +
  • +
  • +

    + InsufficientAcksException +

    +

    + This exception can be raised on a transaction commit. It means that the + Master has successfully committed the transaction locally, but it has not + received enough acknowledgements from its Electable Replicas in the timeframe + allocated for acknowledgements to be received. +

    +

    + The application should respond to this exception in such a way as to alert + the administrator that there might be a problem with the health of the + network or the nodes participating in the replication group. +

    +

    + For information on how to manage acknowledgement policies, see + Managing Acknowledgements. +

    +
  • +
+
+
+
+
+
+
+

Replica-Specific HA Exceptions

+
+
+
+

+ The exceptions that you can see on a Replica, and nowhere else, are: +

+
+
    +
  • +

    + ReplicaConsistencyException +

    +

    + Indicates that the Replica was unable to meet the defined consistency + requirements in the allocated period of time. +

    +

    + If this exception is encountered frequently, it indicates that the + consistency policy requirements are too strict and cannot be met routinely + given the load being placed on the system and the hardware resources that + are available to service the load. The exception may also indicate that + there is a network related issue that is preventing the Replica from + communicating with the Master and keeping up with the replication stream. +

    +

    + In response to this exception, your application can either attempt to retry + the transaction, or you can relax your application's consistency + requirements until the transaction can successfully complete. +

    +

    + For information on managing consistency policies, see + Managing Consistency. +

    +
  • +
  • +

    + ReplicaWriteException +

    +

    + An attempt was made to perform a write operation on a Replica. The exception + typically indicates an error in the application logic. In some extremely + rare cases it could be the result of a transition of the node from Master to + Replica, while a transaction was in progress. +

    +

    + The application must abort the current transaction and redirect all + subsequent update operations to the Master. For + example code that performs this action, see + Example Run Transaction Class. +

    +
  • +
  • +

    + LockPreemptedException +

    +

    + A read lock currently held by a Replica has been preempted by an HA write + operation. The Replica should abort and retry the read operation in response + to this exception. +

    +

    + Note that your application should attempt to catch the + LockConflictException base class rather than this class because all of the + locking exceptions are managed in the same way (abort and retry the + transaction). +

    +
  • +
  • +

    + DatabasePreemptedException +

    +

    + The database handle on a Replica was forcibly closed due to the replay of an + Environment.truncateDatabase(), Environment.removeDatabase() or + Environment.renameDatabase() operation in the + replication stream. +

    +

    + When this exception occurs, the application must close any open Cursors and + abort any open Transactions that are using the database, and then close the + Database handle. If the application wishes, it may reopen the database if it + still exists. +

    +
  • +
  • +

    + RollbackException +

    +

    + A new master has been selected, this Replica's log is ahead of the current + Master, but the Replica was unable to rollback without a + recovery. As a consequence, one or more of the most recently committed + transactions may need to be rolled back, before the Replica can synchronize + its state with that of the current Master. This + exception can happen if the electable Replica with the most + recent log files was unable to participate in the + election of the Master, perhaps because the node + had been shut down. +

    +

    + For details on how to handle this exception, see + Managing Transaction Rollbacks. +

    +
  • +
  • +

    + InsufficientLogException +

    +

    + Indicates that the log files constituting the Environment are insufficient + and cannot be used as the basis for continuing with the replication stream + provided by the current master. +

    +

    + This exception generally means that the node has been down for a long enough + time that it can not be brought up-to-date by the Master. For information on + how to respond to this condition, see + Restoring Log Files. +

    +
  • +
+
+
+
+
+
+
+

Replicated Environment Handle-Specific Exceptions

+
+
+
+

+ In addition to Master- and Replica-specific exceptions, it is possible for a + ReplicatedEnvironment handle to throw an UnknownMasterException. This exception + indicates that the operation being tried requires communication with a Master, but + the Master is not available. +

+

+ This exception typically indicates that there is a problem with your physical + infrastructure. It might mean that an insufficient number of electable nodes are available to + elect a Master, or that the current node is unable to communicate with other nodes + due to, for example, network problems. +

+

+ In response to this exception, your application can try any number of corrective + actions, from immediately retrying the operation, to logging the problem and then + abandoning the operation, to waiting some predetermined period of time before + attempting the operation again. Your application can also + use the Monitor or the StateChangeListener to be + notified when a Master becomes available. For more + information see Writing Monitor Nodes + or Using the StateChangeListener. +

+
+
+ + + diff --git a/docs/ReplicationGuide/gettingStarted.css b/docs/ReplicationGuide/gettingStarted.css new file mode 100644 index 0000000..6a2b24b --- /dev/null +++ b/docs/ReplicationGuide/gettingStarted.css @@ -0,0 +1,50 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 10pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 10pt; } + +div.navfooter { font-size: 10pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 10pt; } + +span.emphasis { font-style: italic;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + +div.variablelist dl dt {margin-top: 1em; } + +div.libver p { + font-size: 8pt; + width: 30%; + margin-left: 2px; + margin-right: 2px; + padding-top: 3px; + padding-bottom: 3px; + } diff --git a/docs/ReplicationGuide/groupreset.html b/docs/ReplicationGuide/groupreset.html new file mode 100644 index 0000000..813946a --- /dev/null +++ b/docs/ReplicationGuide/groupreset.html @@ -0,0 +1,75 @@ + + + + + + Resetting a Replication Group + + + + + + + + + +
+
+
+
+

Resetting a Replication Group

+
+
+
+

+ Under some circumstances it is useful to reset a group. Resetting + a group means taking a node from an existing group and using it to + form a brand new group of size 1. You can then grow the new group + as normal by adding additional nodes to it. In this way, you can + create an additional group that has the exact same data as the + original group. +

+

+ This functionality is useful when a copy of an existing group needs + to be made for use at some other site. +

+

+ To reset a group, use the DbResetRepGroup utility. +

+
+ + + diff --git a/docs/ReplicationGuide/hotupgrade.html b/docs/ReplicationGuide/hotupgrade.html new file mode 100644 index 0000000..d778d0a --- /dev/null +++ b/docs/ReplicationGuide/hotupgrade.html @@ -0,0 +1,288 @@ + + + + + + Upgrading a JE Replication Group + + + + + + + + + +
+
+
+
+

Upgrading a JE Replication Group

+
+
+
+ +

+ After deploying a BDB JE HA application, you may later want to + upgrade to a new version. Berkeley DB JE supports hot upgrade of a + replication group, by allowing mixed version operation. That is, + replication nodes running the newer software version can + inter-operate with older version nodes and both can be available for + user operations. However, in some cases, there are certain + constraints to performing such a hot upgrade. +

+
+
+
+
+

Upgrade Process

+
+
+
+

+ Each release of Berkeley DB JE is tied to a given log file + on-disk format. Log file formats do not necessarily change + every release. The Change Log for each release specifies + whether the log file format has changed or not. There are + no restrictions on upgrades across releases that use the + same log file format. For best performance and to take + advantage of bug fixes, ensure all nodes in a replication + group run with the same BDB JE version during normal + operations. Occasionally, a new release of Berkeley DB JE + includes a log file format change. The constraints that + apply to upgrades which introduce new log file formats are + explained in this section. +

+

+ In a replication group, the Master transmits log records that + must be read by the replicas. If a group is operating with + mixed version nodes, the Master must be running a version of + JE that is older than, equal to, or (by default) no more + than one version greater than the replicas. This is required + so that the Master can supply the replicas with a version of + the replication stream that they can understand. +

+

+ Note that some releases may support online upgrades with + replicas running versions that are more than one log file + format version different from each other. But any such + additional flexibility will be called out in the release + notes. +

+

+ To support the versioning requirement, make sure to upgrade + all nodes to the version with the next log file format before + upgrading any nodes to a still later log file format. If you + cannot meet this restriction, then a hot upgrade should be + performed. Instead, all nodes in the replication group should + be taken offline and upgraded before any are restarted. +

+

+ The recommended steps for upgrading Berkeley DB JE HA when + log file formats have changed are as follows: +

+
+
    +
  1. +

    + Determine if the upgrade skips any log file format + versions. If it has, and it is not possible to + upgrade to the intermediate versions, then plan to + perform an offline upgrade. +

    +
  2. +
  3. +

    + Bring all Replicas up to date with the Master. That + is, all Replicas must have the same environment + content as the Master. You can ascertain this by + using the DbGroupAdmin.dumpGroup() utility, or + programmatically using the + ReplicaConsistencyPolicy. +

    +
  4. +
  5. +

    + Perform the following upgrade procedures on each of + the environments that are part of the replication + group. +

    +
    +
      +
    • +

      + Shut down the old version of the application. +

      +
    • +
    • +

      + Install the new BDB JE jar file. +

      +
    • +
    • +

      + Restart the application. +

      +
    • +
    +
    +

    + Upgrade each individual data node, both Replicas and + the Master, and restart them to join the replication + group. +

    +
  6. +
+
+
+
+
+
+
+

Things To Remember While Upgrading

+
+
+
+

+ During a hot replication upgrade: +

+
+
    +
  • +

    + Upgrading BDB JE 4.0 directly to BDB JE 5 and higher + versions is prohibited. Upgrade BDB JE 4.0 to BDB JE + 4.1 first, and then upgrade BDB JE 4.1 to higher + versions. There is no constraint if you upgrade from + BDB JE 4.1 or later versions to a higher BDB JE + version. +

    +
  • +
+
+
+
+
+
+
+

Handling Problems While Upgrading

+
+
+
+

+ There are exceptions that you may run into during the upgrade + process. The following exceptions may be thrown when a + replication group node is restarted during the upgrade, and a + ReplicatedEnvironment object is instantiated: +

+
+
    +
  • +

    + RollbackException +

    +

    + This exception can be thrown by a Replica when its log + is ahead of the current Master and the Replica is + unable to rollback without a recovery. As a + consequence, one or more of the most recently committed + transactions may need to be rolled back, before the + Replica can synchronize its state with that of the + current Master. This exception can also be thrown if + the current Master crashes. To solve this exception + restart the ReplicatedEnvironment with the new JE + version. +

    +
  • +
  • +

    + RollbackProhibitedException +

    +

    + During synchronization, a Replica that has a newer log, + may have to roll back a number of committed + tranactions. If the number of rolled back transactions + exceeds the limit defined by TXN_ROLLBACK_LIMIT, the + Replica throws a RollbackProhibitedException. To solve + this exception you may have to truncate logs manually + by using DbTruncateLog and restart the + ReplicatedEnvironment with the new JE version. +

    +
  • +
  • +

    + EnvironmentFailureException +

    +

    + EnvironmentFailureException is thrown due to log + version incompatibility between the Master and the + Replica. This exception is thrown with the message: +

    +

    + "Incompatible log versions. Feeder log version: xxxx, + Feeder JE version: xxxx, Replica log version: xxxx, + Replica JE version: xxxx". +

    +

    + To solve this exception restart the + ReplicatedEnvironment with the new JE version. +

    +
  • +
+
+
+
+ + + diff --git a/docs/ReplicationGuide/index.html b/docs/ReplicationGuide/index.html new file mode 100644 index 0000000..4d36054 --- /dev/null +++ b/docs/ReplicationGuide/index.html @@ -0,0 +1,679 @@ + + + + + + Getting Started with Berkeley DB, Java Edition High Availability Applications + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB, Java Edition High Availability Applications

+
+
+
+ +

+ Legal Notice +

+

+ Copyright © 2002 - 2017 Oracle and/or its affiliates. All rights + reserved. +

+

+ This software and related documentation are provided under a + license agreement containing restrictions on use and disclosure + and are protected by intellectual property laws. Except as + expressly permitted in your license agreement or allowed by + law, you may not use, copy, reproduce, translate, broadcast, + modify, license, transmit, distribute, exhibit, perform, + publish, or display any part, in any form, or by any means. + Reverse engineering, disassembly, or decompilation of this + software, unless required by law for interoperability, is + prohibited. +

+

+ The information contained herein is subject to change without + notice and is not warranted to be error-free. If you find any + errors, please report them to us in writing. +

+

+ Berkeley DB, + + Berkeley DB Java Edition + and + Sleepycat are trademarks or registered trademarks of + Oracle. All rights to these marks are reserved. + No third-party use is permitted without the + express prior written consent of Oracle. +

+

+ Other names may be trademarks of their respective owners. +

+

+ If this is software or related documentation that is delivered + to the U.S. Government or anyone licensing it on behalf of the + U.S. Government, the following notice is applicable: +

+

+ U.S. GOVERNMENT END USERS: Oracle programs, including any + operating system, integrated software, any programs installed + on the hardware, and/or documentation, delivered to U.S. + Government end users are "commercial computer software" + pursuant to the applicable Federal Acquisition Regulation and + agency-specific supplemental regulations. As such, use, + duplication, disclosure, modification, and adaptation of the + programs, including any operating system, integrated software, + any programs installed on the hardware, and/or documentation, + shall be subject to license terms and license restrictions + applicable to the programs. No other rights are granted to the + U.S. Government. +

+

+ This software or hardware is developed for general use in a + variety of information management applications. It is not + developed or intended for use in any inherently dangerous + applications, including applications that may create a risk of + personal injury. If you use this software or hardware in + dangerous applications, then you shall be responsible to take + all appropriate fail-safe, backup, redundancy, and other + measures to ensure its safe use. Oracle Corporation and its + affiliates disclaim any liability for any damages caused by use + of this software or hardware in dangerous applications. +

+

+ Oracle and Java are registered trademarks of Oracle and/or its + affiliates. Other names may be trademarks of their respective + owners. +

+

+ Intel and Intel Xeon are trademarks or registered trademarks of + Intel Corporation. All SPARC trademarks are used under license + and are trademarks or registered trademarks of SPARC + International, Inc. AMD, Opteron, the AMD logo, and the AMD + Opteron logo are trademarks or registered trademarks of + Advanced Micro Devices. UNIX is a registered trademark of The + Open Group. +

+

+ This software or hardware and documentation may provide access + to or information on content, products, and services from third + parties. Oracle Corporation and its affiliates are not + responsible for and expressly disclaim all warranties of any + kind with respect to third-party content, products, and + services. Oracle Corporation and its affiliates will not be + responsible for any loss, costs, or damages incurred due to + your access to or use of third-party content, products, or + services. +

+
+
+
+

31-Oct-2017

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+
+
+ + For More Information + +
+
+ + Contact Us + +
+
+
+
+
+
+ + 1. Introduction + +
+
+
+
+ + Overview + +
+
+
+
+ + Replication Group Members + +
+
+ + Replicated Environments + +
+
+ + Selecting a Master + +
+
+ + Replication Streams + +
+
+
+
+ + Managing Data Guarantees + +
+
+
+
+ + Durability + +
+
+ + Managing Data Consistency + +
+
+
+
+ + Replication Group Life Cycle + +
+
+
+
+ + Terminology + +
+
+ + Node States + +
+
+ + New Replication Group Startup + +
+
+ + Subsequent Startups + +
+
+ + Replica Startup + +
+
+ + Master Failover + +
+
+ + Two Node Groups + +
+
+
+
+
+
+ + 2. Replication API First Steps + +
+
+
+
+ + Using Replicated Environments + +
+
+
+
+ + Configuring Replicated Environments + +
+
+
+
+ + HA Exceptions + +
+
+
+
+ + Master-Specific HA Exceptions + +
+
+ + Replica-Specific HA Exceptions + +
+
+ + Replicated Environment Handle-Specific Exceptions + +
+
+
+
+ + Opening a Replicated Environment + +
+
+ + Managing Write Requests at a Replica + +
+
+
+
+ + Using the StateChangeListener + +
+
+ + Catching ReplicaWriteException + +
+
+
+
+ + Secondary Nodes + +
+
+ + Time Synchronization + +
+
+ + Configuring Two-Node Groups + +
+
+
+
+ + 3. Transaction Management + +
+
+
+
+ + Managing Durability + +
+
+
+
+ + Durability Controls + +
+
+ + Commit File Synchronization + +
+
+ + Managing Acknowledgements + +
+
+
+
+ + Managing Consistency + +
+
+
+
+ + Setting Consistency Policies + +
+
+ + Time Consistency Policies + +
+
+ + Commit Point Consistency Policies + +
+
+
+
+ + Availability + +
+
+
+
+ + Write Availability + +
+
+ + Read Availability + +
+
+
+
+ + Consistency and Durability Use Cases + +
+
+
+
+ + Out on the Town + +
+
+ + Bio Labs, Inc + +
+
+
+
+ + Managing Transaction Rollbacks + +
+
+ + Example Run Transaction Class + +
+
+
+
+ + RunTransaction Class + +
+
+ + Using RunTransaction + +
+
+
+
+
+
+ + 4. Utilities + +
+
+
+
+ + Administering the Replication Group + +
+
+
+
+ + Listing Group Members + +
+
+ + Locating the Current Master + +
+
+ + Adding and Removing Nodes from the Group + +
+
+
+
+ + Restoring Log Files + +
+
+
+
+ + Reclaiming Log Files + +
+
+ + Suspending Writes Due to Disk Thresholds + +
+
+
+
+ + Backing up a Replicated Application + +
+
+ + Converting Existing Environments for Replication + +
+
+
+
+ + 5. Writing Monitor Nodes + +
+
+
+
+ + Monitor Class + +
+
+ + Listening for Events + +
+
+
+
+ + 6. Replication Examples + +
+
+ + 7. Administration + +
+
+
+
+ + Hardware + +
+
+ + Time Synchronization + +
+
+ + Node Configuration + +
+
+ + Running Backups + +
+
+ + Adding and Removing Nodes + +
+
+ + Upgrading a JE Replication Group + +
+
+
+
+ + Upgrade Process + +
+
+ + Things To Remember While Upgrading + +
+
+ + Handling Problems While Upgrading + +
+
+
+
+ + Resetting a Replication Group + +
+
+
+
+ + A. Managing a Failure of the Majority + +
+
+
+
+ + Overriding the Electable Group Size + +
+
+
+
+ + Setting the Override + +
+
+ + Restoring the Default State + +
+
+ + Override Example + +
+
+
+
+
+
+
+
+ + + diff --git a/docs/ReplicationGuide/introduction.html b/docs/ReplicationGuide/introduction.html new file mode 100644 index 0000000..6beb90b --- /dev/null +++ b/docs/ReplicationGuide/introduction.html @@ -0,0 +1,587 @@ + + + + + + Chapter 1. Introduction + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction

+
+
+
+
+

+ Table of Contents +

+
+
+ + Overview + +
+
+
+
+ + Replication Group Members + +
+
+ + Replicated Environments + +
+
+ + Selecting a Master + +
+
+ + Replication Streams + +
+
+
+
+ + Managing Data Guarantees + +
+
+
+
+ + Durability + +
+
+ + Managing Data Consistency + +
+
+
+
+ + Replication Group Life Cycle + +
+
+
+
+ + Terminology + +
+
+ + Node States + +
+
+ + New Replication Group Startup + +
+
+ + Subsequent Startups + +
+
+ + Replica Startup + +
+
+ + Master Failover + +
+
+ + Two Node Groups + +
+
+
+
+
+

+ This book provides a thorough introduction to + replication as used with Berkeley DB, Java Edition (JE). It begins by offering a + general overview to replication and the benefits it provides. It also + describes the APIs that you use to implement replication, and it + describes architecturally the things that you need to do to your + application code in order to use the replication APIs. +

+

+ You should understand the concepts from the Berkeley DB, Java Edition Getting Started with Transaction Processing + guide before reading this book. +

+
+
+
+
+

Overview

+
+
+
+ +

+ Welcome to the JE High Availability (HA) product. JE HA + is a replicated, single-master, embedded database engine based + on Berkeley DB, Java Edition. JE HA offers important improvements in + application availability, as well as offering improved read + scalability and performance. JE HA does this by extending + the data guarantees offered by a traditional transactional + system to processes running on multiple physical hosts. +

+

+ The JE replication APIs allow you to distribute your + database contents (performed on a read-write Master) to one or + more read-only Replicas. + For this reason, JE's replication implementation is said to be a + single master, multiple replica replication strategy. +

+

+ Replication offers your application a number of benefits that + can be a tremendous help. Primarily, replication's benefits + revolve around performance, but there is also a benefit in + terms of data durability guarantees. +

+

+ Briefly, some of the reasons why you might choose to implement + replication in your JE application are: +

+
+
    +
  • +

    + Improved application availability. +

    +

    + By spreading your data across multiple + machines, you can ensure that your + application's data continues to be + available even in the event of a + hardware failure on any given machine in + the replication group. +

    +
  • +
  • +

    + Improve read performance. +

    +

    + By using replication you can spread data reads across + multiple machines on your network. Doing so allows you + to vastly improve your application's read performance. + This strategy might be particularly interesting for + applications that have readers on remote network nodes; + you can push your data to the network's edges thereby + improving application data read responsiveness. +

    +
  • +
  • +

    + Improve transactional commit performance +

    +

    + In order to commit a transaction and achieve a + transactional durability guarantee, the commit must be + made durable. That is, the commit + must be written to disk (usually, but not always, + synchronously) before the application's thread of + control can continue operations. +

    +

    + Replication allows you to batch disk I/O so that it is + performed as efficiently as possible while still + maintaining a degree of durability by committing + to the network. In other words, you relax + your transactional durability guarantees on the machine + where you perform the database write, + but by virtue of replicating the data across the + network you gain some additional durability guarantees + beyond what is provided locally. +

    +
  • +
  • +

    + Improve data durability guarantee. +

    +

    + In a traditional transactional application, you commit your + transactions such that data modifications are saved to + disk. Beyond this, the durability of your data is + dependent upon the backup strategy that you choose to + implement for your site. +

    +

    + Replication allows you to increase this durability + guarantee by ensuring that data modifications are + written to multiple machines. This means that multiple + disks, disk controllers, power supplies, and CPUs are + used to ensure that your data modification makes it to + stable storage. In other words, replication allows you + to minimize the problem of a single point of failure + by using more hardware to guarantee your data writes. +

    +

    + If you are using replication for this reason, then you + probably will want to configure your application such + that it waits to hear about a successful commit from + one or more replicas before continuing with the next + operation. This will obviously impact your + application's write performance to some degree + — with the performance penalty being largely dependent + upon the speed and stability of the network connecting + your replication group. +

    +
  • +
+
+
+
+
+
+

Replication Group Members

+
+
+
+

+ Processes that take part in a JE HA application are + generically called nodes. Most nodes + serve as a read-only Replica. One node in the HA + application can perform database writes. This is the Master node. +

+

+ The sum totality of all the nodes taking part in the + replicated application is called the replication + group. While it is only a logical entity + (there is no object that you instantiate and destroy which + represents the replication group), the replication group is + the first-order element of management for a replicated HA + application. It is very important to remember that the + replication group is persistent in that it exists + regardless of whether its member nodes are currently + running. In fact, nodes that have been added to a replication + group (with the exception of Secondary nodes) will remain in + the group until they are manually removed from the group by + you or your application's administrator. +

+

+ Replication groups consist of electable nodes and, + optionally, Monitor and Secondary nodes. +

+

+ Electable nodes are replication group + members that can be elected to become the group's Master node + through a replication election. Electable + nodes are also the group members that vote in these elections. + If an electable node is not a Master, then it serves in the + replication group as a read-only Replica. Electable nodes have + access to a JE environment, and are persistent members of + the replication group. Electable nodes that are Replicas also + participate in transaction durability decisions by providing the + master with acknowledgments of transaction commits. +

+
+

Note

+

+ Beyond Master and Replica, a node can also be in + several other states. See + Replication Group Life Cycle + for more information. +

+
+

+ Most of the nodes in a replication group are electable + nodes, but it is possible to have nodes of the other types + as well. +

+

+ Secondary nodes also have access to a + JE environment, but can only serve as read-only replicas, + not masters, and do not participate in elections. Secondary + nodes can be used to provide read-only data access from + locations with higher latency network connections to the rest + of the replication group without introducing communication + delays into elections. Secondary nodes are not persistent + members of the replication group; they are only considered + members when they are connected to the current master. + Secondary nodes do not participate in transaction durability + decisions. +

+

+ Monitor nodes do not have access to a + JE environment and do not participate in elections. For + this reason, they cannot serve as either a Master or a + Replica. Instead, they merely monitor the composition of the + replication group as changes are made by adding and removing + electable nodes, joining and leaving of electable and + secondary nodes, and as elections are held to select a new + Master. Monitor nodes are therefore used by applications + external to the JE replicated application to route data + requests to the various members of the replication + group. Monitor nodes are persistent members of the + replication group. Monitor nodes do not participate in + transaction durability decisions. +

+

+ Note that all nodes in a replication group have a unique + group-wide name. Further, all replication groups are also + assigned a unique name. This is necessary because it is + possible for a single process to have access to multiple + replication groups. Further, any given collection of + hardware can be running multiple replication groups (a + production and a test group, for example.) By uniquely + identifying the replication group with a unique name, it is + possible for JE HA to internally check that nodes have + not been misconfigured and so make sure that messages are + being routed to the correct location. +

+
+
+
+
+
+

Replicated Environments

+
+
+
+

+ All electable and secondary nodes must have access to a + database environment. Further, no node can share a database + environment with another node. +

+

+ More to the point, in order to create an electable or + secondary node in a replication group, you use a + specialized form of the environment handle: + ReplicatedEnvironment. +

+

+ There is no JE-specified limit to the number of + environments which can join a replication group. + The only limitation here is one of resources — + network bandwidth, for example. +

+

+ We discuss ReplicatedEnvironment handle usage in + Using Replicated Environments. + For an introduction to database environments, see the + Getting Started with Berkeley DB, Java Edition guide. +

+
+
+
+
+
+

Selecting a Master

+
+
+
+

+ Every replication group is allowed one and only one + Master. Masters are selected by + holding an election. All such + elections are performed by the underlying Berkeley DB, Java Edition + replication code. +

+

+ When a node joins a replication group, it attempts to + locate the Master. If it is the first electable node added + to the replication group, then it automatically becomes + the Master. If it is an electable node, but is not the + first to startup in the replication group and it cannot + locate the Master, it calls for an election. Further, if + at any time the Master becomes unavailable to the + replication group, the electable replicas will call for an + election. +

+

+ When holding an election, election participants vote on + who should be the Master. Among the electable nodes + participating in the election, the node with the most + up-to-date set of logs will win the election. In order + to win an election, a node must win a simple majority + of the votes. +

+

+ Usually JE requires a majority of electable nodes to be + available to hold an election. If a simple majority is + not available, then the replication group will no + longer be able to accept write requests as there will + be no Master. +

+

+ Note that an electable node is part of the replication + group even if it is currently not running or is + otherwise unreachable by the rest of the replication + group. Membership of electable nodes in the replication + group is persistent; once an electable node joins the + group, it remains in the group regardless of its current + state. The only way an electable node leaves a + replication group is if you manually remove it from the + group (see + Adding and Removing Nodes from the Group + for details). This is a very important point to remember + when considering elections. An election cannot be held + if the majority of electable nodes in the group are not + running or are otherwise unreachable. +

+
+

Note

+

+ There are two circumstances under which a majority + of electable nodes need not be available in order + to hold an election. The first is for the special + circumstance of the two-node group. See Configuring Two-Node Groups for + details. +

+

+ The second circumstance is if you explicitly relax + the requirement for a majority of electable nodes to + be available in order to hold an election. This is a + dangerous thing to do, and your replication group + should rarely (if ever) be configured this way. See + Managing a Failure of the Majority + for more information. +

+
+

+ Once a node has been elected Master, it remains in that + role until the replication group has a reason to hold + another election. Currently, the only reason why the group + will try to elect a new Master is if the current Master + becomes unavailable to the group. This can happen + because you shutdown the current Master, the current Master + crashes due to bugs in your application code, or a network + outage causes the current Master to be unreachable by a + majority of the electable nodes in your replication group. +

+

+ In the event of a tie in the number of votes, JE's + underlying implementation of the election code will + pick the Master. Moreover, the election code will + always make a consistent choice when settling a tie. + That is, all things being even, the same node will + always be picked to win a tied election. +

+
+
+
+
+
+

Replication Streams

+
+
+
+

+ Write transactions can only be performed at the Master. + The results of these transactions are replicated to + Replicas using a logical replication stream. +

+

+ Logical replication streams are performed over a TCP/IP + connection. The stream contains a description of the + logical changes (for example, insert, update or delete) + operations that were performed on the database as a result + of the transaction commit. Each such replicated change is + assigned a group-wide unique identifier called a Virtual + Log Sequence Number (VLSN). The VLSN can be used to locate + the replicated change in the log files associated with any + member of the group. Through the use of the VLSN, each + operation described by the replication stream can be + replayed at each Replica using an efficient internal replay + mechanism. +

+

+ A consequence of this logical replaying of a transaction is + that physical characteristics of the log files contained at + the Replicas can be different across the replication group. + The data contents of the environments found across the replication + group, however, should be identical. +

+

+ Note that there is a process by which a non-replicated + environment can be converted such that it has the log + structure and metadata required for replication. See + Converting Existing Environments for Replication + for more information. +

+
+
+
+ + + diff --git a/docs/ReplicationGuide/lifecycle.html b/docs/ReplicationGuide/lifecycle.html new file mode 100644 index 0000000..381bbac --- /dev/null +++ b/docs/ReplicationGuide/lifecycle.html @@ -0,0 +1,622 @@ + + + + + + Replication Group Life Cycle + + + + + + + + + +
+
+
+
+

Replication Group Life Cycle

+
+
+
+
+
+
+ + Terminology + +
+
+ + Node States + +
+
+ + New Replication Group Startup + +
+
+ + Subsequent Startups + +
+
+ + Replica Startup + +
+
+ + Master Failover + +
+
+ + Two Node Groups + +
+
+
+

+ This section describes how your replication group behaves + over the course of the application's lifetime. Startup is + described, both for new nodes as well as for existing nodes + that are restarting. This section also describes Master + failover. +

+
+
+
+
+

Terminology

+
+
+
+

+ Before continuing, it is necessary to define some terms + used in this document as they relate to + node membership in a replication group. +

+
+
    +
  • +

    + Add/Remove +

    +

    + When we say that a node has been persistently + added to a replication group, + this means that it has become a persistent member of + the group. Regardless of whether the node is running + or otherwise reachable by the group, once it has been + added to the group it remains a member of the group. + If the added node is an electable node, the group size + used during elections, or transaction commit + acknowledgements, is increased by one. Note that + secondary nodes are not persistent members of the + replication group, so they are not considered to be + persistently added or removed. +

    +

    + A node that has been persistently added to a + replication group remains a member of that group + until it is explicitly removed + from the group. Once a node has been removed from + the group, it is no longer a member of the group. If + the node that was removed was an electable node, the + group size used during elections, or transaction + commit acknowledgements, is decreased by one. +

    +
  • +
  • +

    + Join/Leave +

    +

    + We say that a member has joined the + replication group when it starts up and begins + operating in the group as an active node. + Electable and secondary nodes join a replication + group by successfully opening a + ReplicatedEnvironment handle. Monitor nodes are + not considered to join a replication group because + they do not actively participate in replication or + elections. +

    +

    + A member, then, leaves a + replication group by shutting down, or losing the + network contact that allows it to operate as an + active member of the group. When operating + normally, member nodes leave a replication group by + closing their last ReplicatedEnvironment handle. +

    +

    + Joining or leaving a group does not change the + electable group size, and so the number of nodes + required to hold an election, as well as the + number of nodes required to acknowledge + transaction commits, does not change. +

    +
  • +
+
+
+
+
+
+
+

Node States

+
+
+
+

+ Member nodes can be in the following states: +

+
+
    +
  • +

    + Master +

    +

    + When in the Master state, a member node can service read and + write requests. At any given time, there can be only one node in the + Master state in the replication group. +

    +
  • +
  • +

    + Replica +

    +

    + Member nodes in the Replica state can only service + read requests. All of the electable nodes other + than the Master, and all of the secondary nodes, + should be in the Replica state. +

    +
  • +
  • +

    + Unknown +

    +

    + The member node is not aware of a Master and is actively + trying to discover or elect a Master. A node in this + state is constantly striving to transition to the + more productive Master or Replica state. +

    +

    + A node in the Unknown state can still process read + transactions if the node can satisfy its transaction + consistency requirements. +

    +
  • +
  • +

    + Detached +

    +

    + The member node has been shutdown (that is, it has + left the group, but it has not been removed from the + group — see the previous section). It is still + a member of the replication group, but is not active + in elections or replicating data. Note that + secondary nodes do not remain members when they are + in the detached state; when they lose contact with + the Master, they are no longer considered members of + the group. +

    +
  • +
+
+

+ Note that from time to time this documentation uses the + term active node. An active node is a + member node that is in the Master, Replica or Unknown + state. More to the point, an active node is a node that is + available to participate in elections — if it is an + electable node — and in data replication. Monitor + nodes are not considered active and do not report their + state. +

+
+
+
+
+
+

New Replication Group Startup

+
+
+
+

+ The first time you start up a replication group using an + electable node, the group + exists (for at least a small time) as a group of size one. At + this time, the single node belonging to the group becomes the + Master. So long as there is only one electable node in the + replication group, that one node behaves as if it is a + non-replicated application. There are some differences in the + format of the log file that the application maintains, but it + otherwise behaves identically to a non-replicated transactional + application. +

+

+ Subsequently, upon startup a new node must be given the contact + information for at least one currently active node in the + replication group in order for it to be added to the + group. The new node contacts this active node + who will identify the Master for the new node. +

+
+

Note

+

+ As is the case with elections, an electable node cannot + be added to the replication group unless a simple + majority of electable nodes are active at the time that + it starts up. If too many nodes are down or otherwise + unavailable, you cannot add a new electable node to the + group. +

+
+

+ The new node then contacts the Master, and provides all + necessary identification information about itself to the + Master. This includes host and port information, the + node's unique name, and the replication group name. For + electable nodes, the Master stores this identifying + information about the node persistently, meaning the + effective number of electable members of the replication + group has just grown by one. For secondary nodes, the + information about the node is only maintained while the + secondary node is active; the number of electable + members does not change. +

+
+

Note

+

+ Note that the new electable node is now a permanent member + of the replication group until you manually remove + it. This is true even if you shutdown the node for a long + time. See Adding and Removing Nodes from the Group for details. +

+
+

+ Once the new node is an established member of the group, the + Master provides the Replica with the logical logs needed to + replicate the environment. The sequence of logical log + records sent from the Master to the Replica constitutes the + Replication Stream. At this time, the + node is said to have joined the group. + Once a replication stream is established, it is maintained until either the + Replica or the Master goes down. +

+
+
+
+
+
+

Subsequent Startups

+
+
+
+

+ Each node stores information about + other persistent replication group members in its replicated + environment so that this information is available to it + upon restart. +

+

+ When a node that is already an established + member of a replication group is restarted, the node uses its + knowledge of other members of the replication group to + locate the Master. It does this by by querying the + members of the group to locate the current Master. If it + finds a Master, the node joins the group and proceeds to operate in the + group as a Replica. +

+

+ If a Master is not available and the restarting node is an + electable node, the node initiates an election so as to + establish a Master. If a simple majority of electable + nodes are available for the election, a Master is + elected. If the restarting node is elected Master, it then + waits for Replicas to connect to it so that it can supply + them a replication stream. If the restarting node is a + secondary node, then it continues to try to find the + Master, waiting for the electable nodes to elect a Master + as needed. +

+

+ Under ordinary circumstances, if a Master cannot be + determined for some reason, the restarting node will fail to + open. However, you can permit the node to instead open + in the UNKOWN state. While in this state, the node is + persistently attempting to find a Master, but it is also + available for read-only requests. +

+

+ To configure a node in this way, use the + ReplicationConfig.setConfigParam() method to set the + ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT parameter. + This parameter requires you to define a Master election + timeout period. If this election timeout expires while + the node is attempting to restart, then the node opens in + the UNKNOWN state instead of failing its open operation + entirely. +

+
+
+
+
+
+

Replica Startup

+
+
+
+

+ Regardless of how it happens, when a node joins + a replication group, it contacts the + Master and then goes through the following three steps: +

+
+
    +
  1. +

    + Handshake +

    +

    + The Replica sends the Master its configuration + information, along with the unique name + associated with the Replica's environment. This + name is a pseudo-randomly generated Universal + Unique Identifier (UUID). +

    +

    + This handshake establishes the node as a valid + member of the group. It is used both by new nodes + joining the group for the first time, and by + existing nodes that are simply restarting. +

    +

    + In addition, during this handshake process, the + Master and Replica nodes will compare their + clocks. If the clocks are too far off from one + another, the handshake will fail and the Replica + node will fail to start up. See + Time Synchronization + for more information. +

    +
  2. +
  3. +

    + Replication Stream Sync-Up +

    +

    + The Replica sends the Master its current position + in the replication stream sequence. The Master + and Replica then negotiate a point in the + replication stream that the Master can use as a + starting point to resume the flow of logical + records to the Replica. +

    +

    + Note that normally this sync-up process will be + transparent to your application. However, in rare + cases the sync-up may require that committed + transactions be undone. +

    +

    + Also, if the Replica has been offline for a long + time, it is possible that the Master can no + longer supply the Replica with the required contiguous + interval of the replication stream. (This can + happen due to log cleaning on the Master.) In + this case, the log files must be copied to the + restarting node from some other up-to-date node + in the replication group. See + Restoring Log Files + for details. +

    +
  4. +
  5. +

    + Steady state replication stream flow +

    +

    + Once the Replica has successfully started up and + joined the group, the + Master maintains a flow of log records to the + Replica. Beyond that, the Master will request + acknowledgements from electable Replicas whenever the + Master needs to meet transaction commit + durability requirements. +

    +
  6. +
+
+
+
+
+
+
+

Master Failover

+
+
+
+

+ A Master failing or shutting down causes all of the replication streams + between the Master and its various Replicas to terminate. + In reaction, the Replicas transition to the Unknown state + and the electable nodes initiate an election. +

+

+ An election can be held if at least a simple majority of + the replication group's electable nodes are active. The + electable node + that wins the election transitions to the Master state, + and all other active nodes transition to the Replica + state. +

+

+ Upon transitioning to the Replica state, nodes connect to + the new Master and proceed through the handshake, + sync-up, replication replay process described in the + previous section. +

+

+ If no Master can be elected (because a majority of electable nodes + are not available to participate in the election), then + the nodes remain in the Unknown state until such a time + as a Master can be elected. In this state, the nodes + might be able to service read-only requests, but the + replication group is incapable of servicing write + requests. Read requests can be serviced so long as the + transaction's consistency requirements can be met (see + Managing Consistency). +

+

+ Note that the JE Replication application needs to make + provisions for the following state transitions after + failover: +

+
+
    +
  • +

    + A node that transitions from the Replica state to + the Master state as a result of a failover needs + to start accepting update requests. There are + several ways to determine whether a node can + handle update requests. See + Managing Write Requests at a Replica + for more information. +

    +
  • +
  • +

    + If a node remains in the Replica state after a + failover, the failover should be transparent to + the application. However, an application may need + to take corrective action in the rare situation + where the sync-up process has to roll back + committed transactions. +

    +

    + See Managing Transaction Rollbacks + for an example of how handle a transaction commit roll back. +

    +
  • +
+
+
+
+
+
+
+

Two Node Groups

+
+
+
+

+ Replication groups comprised of just two electable nodes + represents a unique corner case for JE replication. In + order to elect a master, usually a simple majority of + electable nodes must be available to participate in an + election. However, for replication groups of size two, if + even one electable node is unavailable for the election then + by default it is impossible to hold an election. +

+

+ However, for some classes of application, it is desirable + for the application to proceed with operations using just + one electable node. That is, the application trades off the + durability guarantees offered by using two electable nodes + for the higher availability permissible by allowing the + application to run with just one of the nodes. +

+

+ JE allows you to do this by designating one of the nodes + in a two electable node group as a primary + node. When the non-primary node of the pair is + not available, the number of nodes required for a simple + majority is reduced from two to one by the primary + node. Consequently, the primary node is able to elect itself + as the Master. It can then commit transactions that require + a simple majority to acknowledge commits. When the + non-primary node becomes available again, the number of + nodes required for a simple majority at the primary once + again reverts to two. +

+

+ At any given time, there must be either zero or one + electable nodes designated as the primary node, but it is up + to your application to make sure both nodes are not + erroneously designated as the primary. Your application must + be very careful not to mistakenly designate two nodes as the + primary. If this happened, and the two nodes could not + communicate with one another (due to a network malfunction + of some kind, for example), they could both then consider + themselves to be Masters and start accepting write + requests. This violates a fundamental requirement that at + any given instant in time, there should be exactly one node + that is permitted to perform writes on the replicated + environment. +

+

+ Note that the non-primary electable node always needs two + electable nodes for a simple majority, so it can never + become the Master in the absence of the primary node. If the + primary node fails, you can make provisions to swap the + primary and non-primary designations so that the surviving + node is now the primary. This swap must be performed + carefully so as to ensure that both nodes are not + concurrently designated the primary. The most important + thing is that the failed node comes up as the non-primary + after it has been repaired. +

+

+ For more information on using two-node groups, see + Configuring Two-Node Groups. +

+
+
+ + + diff --git a/docs/ReplicationGuide/logfile-restore.html b/docs/ReplicationGuide/logfile-restore.html new file mode 100644 index 0000000..e582fe3 --- /dev/null +++ b/docs/ReplicationGuide/logfile-restore.html @@ -0,0 +1,210 @@ + + + + + + Restoring Log Files + + + + + + + + + +
+
+
+
+

Restoring Log Files

+
+
+
+ +

+ During normal operations, the nodes in a replication group + communicate with one another to ensure that the JE cleaner + does not reclaim log files still needed by the group. The tail + end of the replication stream may still be needed by a lagging + Replica in order to make it current with the Master, and so the + replication group tries to make sure the trailing log files needed to + bring lagging Replicas up-to-date are not reclaimed. +

+

+ However, if a node is unavailable for a long enough period + of time, then log files needed to bring it up to date might + have been reclaimed by the cleaner. + For information on how and when log files are reclaimed in + a replicated environment, see + Reclaiming Log Files. +

+

+ Once log files have been reclaimed by a cleaner, then the + Replica can no longer be brought up to date using the normal + replication stream. Your application code will know this has + happened when the ReplicatedEnvironment constructor throws + an InsufficientLogException. +

+

+ When your code catches an InsufficientLogException, then you + must bring the Replica up-to-date using a mechanism other than + the normal replication stream. You do this using the + NetworkRestore class. A call to NetworkRestore.execute() + causes the Replica to copy the missing log files from a member + of the replication group who owns the files and seems to be the + least busy. Once the Replica has obtained the log files that it + requires, it automatically re-establishes its replication stream + with the Master so that the Master can finish bringing the + Replica up-to-date. +

+

+ For example: +

+
 ...
+  try {
+     node = new ReplicatedEnvironment(envDir, repConfig, envConfig);
+ } catch (InsufficientLogException insufficientLogEx) {
+
+     NetworkRestore restore = new NetworkRestore();
+     NetworkRestoreConfig config = new NetworkRestoreConfig();
+     config.setRetainLogFiles(false); // delete obsolete log files.
+
+     // Use the members returned by insufficientLogEx.getLogProviders() 
+     // to select the desired subset of members and pass the resulting 
+     // list as the argument to config.setLogProviders(), if the 
+     // default selection of providers is not suitable.
+
+     restore.execute(insufficientLogEx, config);
+
+     // retry
+     node = new ReplicatedEnvironment(envDir, repConfig, envConfig);
+ } ...  
+

+ Note that the replication group does not maintain information + about the log files needed by secondary nodes. Instead, the + system retains a set of log files beyond those required for a + network restore based on the NETWORK_RESTORE_OVERHEAD property, + which you can manage using ReplicationConfig.setConfigParam(). + The default value is 10, which means that the system uses the + estimate of 10 percent for the additional amount of data that + performing a network restore needs to send over the network as + compared to using the same log files to perform replication. In + this case, the system saves files containing an additional 10 + percent of log data beyond the amount needed for a network + restore. +

+
+
+
+
+

Reclaiming Log Files

+
+
+
+

+ Ordinarily JE's cleaner thread reclaims log files as soon as possible + so as to minimize the amount of disk space used by the database. Log files + are reclaimed as records are deleted, and log files are subsequently compacted. +

+

+ However, various database activities might cause log files + to be temporarily reserved or protected temporarily. A + reserved file is a file that JE + can delete but has yet done so. A + protected file is a file that should + be deleted, but JE cannot do so due to some database + activity, such as a backup. +

+

+ For replicated environments, JE hangs on to log files as long + as possible in case they are needed to bring a replica up to date. Log + files that have been cleaned but then saved due because of replication + are in a reserved state. All such files are + retained until the disk usage thresholds as defined by EnvironmentConfig.MAX_DISK + and EnvironmentConfig.FREE_DISK are exceeded. At that point, JE deletes reserved + log files. +

+
+
+
+
+
+

Suspending Writes Due to Disk Thresholds

+
+
+
+

+ In the previous section, we mentioned that JE reserves + cleaned log files until disk threshold limits are encountered, + at which time log files are reclaimed (deleted). +

+

+ Be aware that if reclaiming log files does not allow JE + to meet its disk usage threshold limits, then writes are + disabled for one or more nodes in the replication group. +

+

+ If the threshold limits cannot be met on the Master, then + write operations will throw DiskLimitException just as + they would for a non-replicated environment. +

+

+ If the threshold limit cannot be met on a replica, then + writes are disabled only on that replica. In this case, the + Master might see InsufficientAcksException thrown in + response to a write — if your application's + durability guarantee cannot be met due to the replica being + unable to perform writes. +

+
+
+ + + diff --git a/docs/ReplicationGuide/monitors.html b/docs/ReplicationGuide/monitors.html new file mode 100644 index 0000000..4b57467 --- /dev/null +++ b/docs/ReplicationGuide/monitors.html @@ -0,0 +1,164 @@ + + + + + + Chapter 5. Writing Monitor Nodes + + + + + + + + + +
+
+
+
+

Chapter 5. Writing Monitor Nodes

+
+
+
+
+

+ Table of Contents +

+
+
+ + Monitor Class + +
+
+ + Listening for Events + +
+
+
+

+ So far in this book we have mostly discussed electable and + secondary nodes, which are by definition nodes that have access + to a JE ReplicatedEnvironment. However, replication groups + can include any number of nodes that have no access to the JE + replicated environment in use by the replication group. +

+

+ These type of nodes without environments are called + monitor nodes. The point of a monitor node + is to allow a process to have some understanding of the + replication group's structure such as which node is the Master + and what nodes belong to the group as Replicas. Monitor nodes + also have the ability to know when certain events have happened + in the replication group, such as when a new Master is elected or + when new nodes are added to, or removed from, the group. +

+

+ There are many uses for Monitor nodes, starting with the ability to + write processes that monitor the current status of your HA + application. But another, arguably more interesting, use for + Monitor nodes is for request routing. As we have explained earlier + in this book, Replicas can only service read-only requests; all + write requests must occur on the Master. However, Replicas are only + capable of noticing that they have been asked to process a write + request. At most, out of the box, they can complain about it by + throwing a ReplicaWriteException, and then completely rejecting + the request. +

+

+ One way to handle this problem is by writing an request router that + sits on your network between the data nodes and your clients. + This router can send write requests to the Master, and read + requests to the Replicas. A robust example of this sort of thing + could also perform load balancing across the various Replicas, so + that no one Replica becomes swamped by too many read requests. +

+
+
+
+
+

Monitor Class

+
+
+
+

+ You implement Monitor nodes using the Monitor class. The + Monitor class allows you to obtain + information about the replication group, such as its name, + where the Master is, and other such information. The + Monitor class also allows you to run an + event listener that can alert you to changes in the composition + of the replication group. +

+

+ You instantiate a Monitor class object + in much the same way as you instantiate a + ReplicatedEnvironment class object. It is necessary to give + the node a name, to indicate that it is a Monitor node, to + identify the node's host and port information, and to identify + helper hosts. You use a MonitorConfig object to do these + things. +

+

+ Once the Monitor object has been + instantiated, it must be registered at least once with the + Master so that the replication group will know to keep the node + informed about changes in the group composition. (Subsequent + attempts to register the node are simply ignored by the + Master.) You use the Monitor.register() method to register a + Monitor node with a Master. +

+

+ For example: +

+
 // Initialize the monitor node config
+MonitorConfig config = new MonitorConfig();
+config.setGroupName("MyRepGroupName");
+config.setNodeName("mon1");
+config.setNodeHostPort("monhost1.acme.com:7000");
+config.setHelperHosts("node1.acme.com:5000,node2.acme.com:5000");
+
+Monitor monitor = new Monitor(config);
+
+// If the monitor has not been registered as a member of the 
+// group, register it now. register() returns the current node 
+// that is the master.
+ReplicationNode currentMaster = monitor.register(); 
+
+
+ + + diff --git a/docs/ReplicationGuide/nodeconfig.html b/docs/ReplicationGuide/nodeconfig.html new file mode 100644 index 0000000..1fbf15a --- /dev/null +++ b/docs/ReplicationGuide/nodeconfig.html @@ -0,0 +1,227 @@ + + + + + + Node Configuration + + + + + + + + + +
+
+
+
+

Node Configuration

+
+
+
+

+ When you place a node into service, there is a set of information + that you must provide which will be unique to each and every + node. The application development team may or may not have provided + defaults for some or all of these values, so you should check + with them to see exactly what you need to override. +

+

+ This information can be provided to the application in two + different ways. One is by using JE API calls. Typically you + will pass the information to those calls using command line + parameters. Again, how you do this is specific to your + application. +

+

+ In addition, you can provide this information to the application + using the je.properties file. Note that the + information provided in this file is handled as if it is a + default setting. Therefore, if you also + provide conflicting information using the JE APIs (again, + usually passed to a production application using command line + parameters), then the information provided directly to the APIs + takes priority over whatever might be found in the + je.properties file. +

+

+ No matter how it is done, there are three pieces of information + that you must provide every JE replicated application: +

+
+
    +
  • +

    + Group Name +

    +

    + This is the replication's group name. This value must be + the same for every node in a given + replication group. This name must be made up of + alpha numeric characters and must not be zero length. +

    +

    + JE developers can provide this information to the + application using the + ReplicationConfig.GROUP_NAME + field. In the je.properties file, + it is defined using the + je.rep.group.name parameter. +

    +
  • +
  • +

    + Node Name +

    +

    + This is the name of the node. This name must be unique + within the group. This name combined with the group name + uniquely identifies the node. +

    +

    + JE developers can provide this information to the + application using the + ReplicationConfig.NODE_NAME + field. In the je.properties file, + it is defined using the + je.rep.node.name parameter. +

    +
  • +
  • +

    + Node Host +

    +

    + This is the hostname and port pair that is used by other + nodes in the replication group to communicate with this + node. The node uses this property to establish a TCP/IP + socket for communication with other members of the group. +

    +

    + The string that you provide to this property takes the + form: +

    +
    hostname[:port]
    +

    + The hostname provided to this property must be reachable + by the other nodes in the replication group. +

    +

    + The port number is optional for this property because a + default port can be defined using the + je.properties file (you use the + je.rep.defaultPort property to do + this). However, if a port is provided explicitly to this + property, then je.rep.defaultPort is + ignored. +

    +

    + Be careful to ensure that the port you identify for the + node does not conflict with ports used by other applications + (including other nodes, if any) currently running on the local machine. +

    +

    + Note that monitor nodes will use the socket identified by + this property so that they can be kept informed of the + results of elections, and so they can keep track of + changes in group composition. +

    +

    + Electable nodes use this socket to: +

    +
    +
      +
    • +

      + Hold elections +

      +
    • +
    • +

      + Supply commit acknowledgements +

      +
    • +
    +
    +

    + Both electable and secondary nodes use this socket to: +

    +
    +
      +
    • +

      + Establish replication streams between the Master + and its Replicas +

      +
    • +
    • +

      + Support network-based JE HA utility services, + such as JE's network restore utility. (See + Restoring Log Files + for details on this utility.) +

      +
    • +
    +
    +
    +

    Note

    +

    + You can change the hostname and/or port number for + an existing electable or monitor node using the + DbGroupAdmin.updateAddress() or + ReplicationGroupAdmin.updateAddress() methods. + Hostnames and port numbers for secondary nodes can + be changed by restarting the nodes with the + desired values. +

    +
    +
  • +
+
+

+ The properties discussed here are simply the bare-bones minimum + properties required to configure a JE node. For a complete + description of all the replication properties available to a + JE application, see the ReplicationConfig and + ReplicationMutableConfig class descriptions. +

+
+ + + diff --git a/docs/ReplicationGuide/preface.html b/docs/ReplicationGuide/preface.html new file mode 100644 index 0000000..bc55971 --- /dev/null +++ b/docs/ReplicationGuide/preface.html @@ -0,0 +1,260 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+
+
+ + For More Information + +
+
+ + Contact Us + +
+
+
+
+
+

+ This document describes how to write replicated Berkeley DB, Java Edition + applications. The APIs used to implement replication in your + application are described here. This book describes the concepts + surrounding replication, the scenarios under which you might choose + to use it, and the architectural requirements that a replication + application has over a transactional application. +

+

+ This book is aimed at the software engineer responsible for writing a + replicated JE application. +

+

+ This book assumes that you have already read and understood the + concepts contained in the Berkeley DB, Java Edition Getting Started with Transaction Processing guide. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+
+
+
+ + For More Information + +
+
+ + Contact Us + +
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: + + "The Environment() + constructor returns an Environment class object." +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + JE_HOME directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
import com.sleepycat.je.Environment;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnv;
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnv;
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+myDbEnv = new Environment(new File("/export/dbEnv"), envConfig); 
+
+

Note

+

+ Finally, notes of special interest are represented using a note block such + as this. +

+
+
+
+
+
+

For More Information

+
+
+
+

+ Beyond this manual, you may also find the following sources of + information useful when building a replicated JE + application: +

+ + +

+ To download the latest + + Berkeley DB Java Edition + + documentation along with white papers and other collateral, + visit http://www.oracle.com/technetwork/indexes/documentation/index.html. +

+

+ For the latest version of the Oracle + + Berkeley DB Java Edition + + downloads, visit + http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html. +

+
+
+
+
+
+
+

Contact Us

+
+
+
+

+ You can post your comments and questions at the Oracle + Technology (OTN) forum for + + + + Oracle Berkeley DB Java Edition at: https://forums.oracle.com/forums/forum.jspa?forumID=273. + +

+

+ For sales or support information, email to: + berkeleydb-info_us@oracle.com + You can subscribe to a low-volume email announcement list for + the Berkeley DB product family by sending email to: + bdb-join@oss.oracle.com +

+
+
+
+ + + diff --git a/docs/ReplicationGuide/progoverview.html b/docs/ReplicationGuide/progoverview.html new file mode 100644 index 0000000..5a6a053 --- /dev/null +++ b/docs/ReplicationGuide/progoverview.html @@ -0,0 +1,498 @@ + + + + + + Chapter 2. Replication API First Steps + + + + + + + + + +
+
+
+
+

Chapter 2. Replication API First Steps

+
+
+
+ +

+ From an API point of view, there are two basic requirements that + every replicated application must meet: +

+
+
    +
  1. +

    + It must be a transactional application. +

    +
  2. +
  3. +

    + It must use a specific form of the Environment handle, + which you get by using the ReplicatedEnvironment class. +

    +
  4. +
+
+

+ Beyond that, there are some additional requirements in terms of + exception handling that your application should perform. +

+

+ The transactional nature of your replicated application is + described in Transaction Management. + This chapter discusses replicated environments and the exceptions + unique to exceptions in detail. +

+
+
+
+
+

Using Replicated Environments

+
+
+
+ +

+ Every electable or secondary node manages a single replicated + JE environment directory. The environment follows the usual + regulations governing a JE environment; namely, only a single + read/write process can access the environment at a single point in + time. +

+

+ Usually this requirement is met naturally, because usually each + node in a replicated application is also operating on a machine + that is independent of all the other nodes. However, in some + test and development scenarios, this one node to one machine + rule might not be met, so the bottom line is that you need to + make sure that no two processes are ever attempting to manage + the same environment. +

+
+

Note

+

+ An application can access a replicated JE environment + directory using a read only Environment handle. The usual + semantics of read only non-replicated Environment handles + apply in this case. That is, the application can view a + snapshot of the replicated environment as of the time the + Environment handle was opened, through the Environment + handle. An application can therefore open a + ReplicatedEnvironment handle in one process, and + concurrently open read only Environment handles in other + processes. Any changes subsequently made to the replicated + environment, either by virtue of the node being a Master, + or due to a replay of the replication stream (if the node is a + Replica), are not accessible through the read only Environment + handles until they are closed and reopened. +

+
+

+ Normally you manage your JE environments using the + Environment class. However, to provide for the underlying + infrastructure needed to implement replication, your JE HA + application must instead use the ReplicatedEnvironment class, + which is a subclass of Environment. Its constructor accepts + the normal environment configuration properties using the + EnvironmentConfig class, just as you would normally configure + an Environment object. However, the ReplicatedEnvironment + class also accepts an ReplicationConfig class object, which + allows you to manage the properties specific to replication. +

+

+ The following is an example of how you instantiate a + ReplicatedEnvironment object. Note that there are some + differences in how this is used, depending on whether you are + starting a brand-new node or you are restarting an existing + node. We discuss these differences in the next section. +

+

+ For a general description of environments and environment + configuration, see the Getting Started with Berkeley + DB Java Edition guide. +

+
EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+
+// Identify the node
+ReplicationConfig repConfig = new ReplicationConfig();
+repConfig.setGroupName("PlanetaryRepGroup");
+repConfig.setNodeName("Mercury");
+repConfig.setNodeHostPort("mercury.example.com:5001");
+
+// This is the first node, so its helper is itself
+repConfig.setHelperHosts("mercury.example.com:5001");
+ 
+ReplicatedEnvironment repEnv =
+     new ReplicatedEnvironment(envHome, repConfig, envConfig);  
+
+
+
+
+

Configuring Replicated Environments

+
+
+
+

+ You configure a JE ReplicatedEnvironment handle using + two different configuration classes: EnvironmentConfig + and ReplicationConfig. Your usage of EnvironmentConfig + is no different than if you were writing a non-replicated + application, so we will not describe its usage here. + For an introduction to basic environment configuration, see + the Getting Started with Berkeley DB, Java Edition guide. +

+

+ The ReplicationConfig class allows you to configure + properties that are specific to + replicated applications. Some of these properties are + important in terms of how your application will behave + and how well it will perform. These properties are + discussed in detail later in this book. +

+

+ To an extent, you can get away with ignoring most of the + configuration properties until you are ready to tune your + application's performance and behavior. However, no matter + what, there are four properties you must always configure + for a ReplicatedEnvironment before opening it. They are: +

+
+
    +
  1. +

    + Group Name +

    +

    + The group name is a string that uniquely identifies + the group to which the node belongs. This name must + be unique. It is possible to operate multiple + replication groups on the same network. In fact, a + single process can even interact with multiple + replication groups, so long as it maintains + separate replicated environments for each group in + which it is participating. +

    +

    + By using unique group names, the JE replication + code can make sure that messages arriving at a + given client are actually meant for that client. +

    +

    + You set the group name by using the + ReplicationConfig.setGroupName() method. + Note that if you do not set a group name, then the + default GROUP_NAME value is used. +

    +
  2. +
  3. +

    + Node Name +

    +

    + This name must be unique to the replication group. + This name plus the replication group name uniquely + identifies a node in your enterprise. +

    +

    + You set the node name by using the + ReplicationConfig.setNodeName() method. +

    +
  4. +
  5. +

    + Host +

    +

    + The host property identifies the network name and + port where this node can be reached. Other nodes in + the replication group will use this host/port pair + to establish a TCP/IP connection to this node. This + connection is used to transfer data between + machines, hold elections, and monitor the status of + the replication group. +

    +

    + You provide the host and port information using a string of the + form: +

    +
    host:[port]
    +

    + The port that you provide must be higher than 1023. +

    +

    + You set the host information by using the + ReplicationConfig.setNodeHostPort() method. + Note that if you do not set a node host, then the + default NODE_HOST_PORT value is used. +

    +
  6. +
  7. +

    + Helper Host +

    +

    + The helper host or hosts are used by a node the + very first time it starts up to find the Master. + Basically, this string should provide one or more + host/port pairs for nodes who should know where the + Master is. +

    +

    + One of the nodes that you provide on this string + can be the current Master, but that is not + required. All that matters is that the hosts + identified here can tell a new node where the + current Master is. +

    +

    + If the brand new node is an electable node and cannot + find a Master, it will initiate an election. If no + other electable nodes are available to the new node, + and the current node is specified as the only helper + host, then it will elect itself as Master. If the + current node is truly the very first electable node + starting up in the replication group, then + self-electing itself to be the Master is probably what + you want it to do. +

    +

    + However, if the current node + is not the very first node starting up + in the replication group, then a misconfiguration of + this property can cause you to end up with multiple + replication groups, each with the same group name. + This represents an error situation, one that can be + very difficult to diagnose by people who are + inexperienced with managing replication groups. + For this reason, it is very important to make sure + the hosts identified on this string do NOT identify + only the local host except when creating the first + node. +

    +

    + On subsequent start ups after the very first + startup, the node should be able to locate other + participants in the replication group using + information located in its own database. In that + case, the information provided on this string is + largely ignored unless the current node has been + down or otherwise out of communication with the + rest of the group for so long that its locally + cached information has grown stale. In this case, + the node will attempt to use the information provided here to + locate the current Master. +

    +

    + You set the helper host information by using the + ReplicationConfig.setHelperHosts() method. +

    +
  8. +
+
+

+ When configuring and instantiating a + ReplicatedEnvironment object, you should usually + configure the environment so that a helper host other than + the local machine is used: +

+
EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+ 
+// Identify the node
+ReplicationConfig repConfig = new ReplicationConfig();
+repConfig.setGroupName("PlanetaryRepGroup");
+repConfig.setNodeName("Jupiter");
+repConfig.setNodeHostPort("jupiter.example.com:5002");
+ 
+// Use the node at mercury.example.com:5001 as a helper to find the rest
+// of the group.
+repConfig.setHelperHosts("mercury.example.com:5001");
+ 
+ReplicatedEnvironment repEnv =
+   new ReplicatedEnvironment(envHome, repConfig, envConfig);  
+

+ Note that if you are restarting a node that has already been added to + the replication group, then you do not have to supply a helper host at + all. This is because the node will already have locally stored host and port + information about the other nodes in the group. +

+
EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+ 
+// Identify the node
+ReplicationConfig repConfig = 
+    new ReplicationConfig("PlanetaryRepGroup", 
+                          "Jupiter", 
+                          "jupiter.example.com:5002");
+ 
+ReplicatedEnvironment repEnv =
+   new ReplicatedEnvironment(envHome, repConfig, envConfig);  
+

+ However, if you are starting the very first node in the replication + group for the very first time, then there is no other helper host that + the node can use to locate a Master. In this case, identify the current + node as the helper host, and it will then go ahead and become a + replication group of size 1 with itself as a Master. +

+
+

Note

+

+ Do this ONLY if you are truly starting the very first electable + node in a replication group for the very first time. +

+
+
EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+ 
+// Identify the node
+ReplicationConfig repConfig = 
+    new ReplicationConfig("PlanetaryRepGroup", 
+                          "Jupiter", 
+                          "jupiter.example.com:5002");
+ 
+// This is the first node, so the helper is itself.
+repConfig.setHelperHosts("jupiter.example.com:5002");
+ 
+ReplicatedEnvironment repEnv =
+   new ReplicatedEnvironment(envHome, repConfig, envConfig);  
+
+
+
+ + + diff --git a/docs/ReplicationGuide/repenvironmentopen.html b/docs/ReplicationGuide/repenvironmentopen.html new file mode 100644 index 0000000..fb43752 --- /dev/null +++ b/docs/ReplicationGuide/repenvironmentopen.html @@ -0,0 +1,157 @@ + + + + + + Opening a Replicated Environment + + + + + + + + + +
+
+
+
+

Opening a Replicated Environment

+
+
+
+

+ In the previous two sections we looked at the basics of how to create a + replicated environment, and what exceptions you can expect to see in a JE HA + application. Now we need to combine these two topics in order to examine how you should + open a ReplicatedEnvironment handle to an existing replicated + environment. +

+

+ When you open the handle, the underlying HA code will attempt to open a TCP/IP + connection to other nodes in the replication group, based on the node's stored + replication group metadata or the helper host information that you provide. In doing so, + the node will attempt to locate a Master or, failing that, will hold an election in order + to select a new Master, if it is an electable node. +

+

+ Due to issues of timing and network performance, the node may or may not be able to: +

+
+
    +
  1. +

    + locate the master; and +

    +
  2. +
  3. +

    + hold an election. +

    +
  4. +
+
+

+ This can happen if there simply are not enough electable nodes available in order for the current + node to start up, find the current master, or hold an election. Remember that a majority + of the electable nodes registered in the replication group must be available in order to hold an + election. +

+

+ If this situation occurs, the ReplicatedEnvironment constructor will throw an + UnknownMasterException. Therefore, typically, it is best that you prepare for this + situation by performing the handle creation in a retry loop, as shown in the following + code snippet. +

+

+ In addition, if the Replica has been down for a long enough + period of time, it might be so far out of date that it cannot + be brought up to date using the normal replication stream. In + this case, the ReplicatedEnvironment constructor will throw + an InsufficientLogException. See Restoring Log Files for information on how to handle this + exception. +

+
private static int REP_HANDLE_RETRY_MAX = 100;
+
+   ...
+
+ReplicatedEnvironment getEnvironment(File envHome, String groupName, 
+                                     String nodeName, String nodeHost,
+                                     String helperHosts) 
+     throws IllegalStateException, InterruptedException {
+
+   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Identify the node
+   ReplicationConfig repConfig = 
+        new ReplicationConfig();
+   repConfig.setGroupName(groupName);
+   repConfig.setNodeName(nodeName);
+   repConfig.setNodeHostPort(nodeHost);
+   repConfig.setHelperHosts(helperHosts);
+
+   for (int i = 0; i < REP_HANDLE_RETRY_MAX; i++) {
+        try {
+            return new 
+                ReplicatedEnvironment(envHome, repConfig, envConfig);
+        } catch (UnknownMasterException ume) {
+            /*
+             * Insert application specific code here to indicate that
+             * this problem was encountered, such as writing the 
+             * condition to a log file.
+             */
+
+            Thread.sleep(5 * 1000);
+            continue;
+        } catch (InsufficientLogException ile) {
+            /* A network restore is required, make the necessary calls */
+        }
+
+   }
+   throw new 
+        IllegalStateException("getEnvironment: reached max retries");
+} 
+

+ Note that for production code, you may want to retry the handle open without any maximum + retry limit. +

+
+ + + diff --git a/docs/ReplicationGuide/repexample.html b/docs/ReplicationGuide/repexample.html new file mode 100644 index 0000000..1fe8393 --- /dev/null +++ b/docs/ReplicationGuide/repexample.html @@ -0,0 +1,127 @@ + + + + + + Chapter 6. Replication Examples + + + + + + + + + +
+
+
+
+

Chapter 6. Replication Examples

+
+
+
+

+ JE HA provides three different example programs that illustrate + the concepts discussed in this manual. You can find them in your + JE distribution in the + <JE HOME>/examples/je/rep/quote directory, + where <JE HOME> is the directory where you + installed your JE distribution. +

+

+ The examples provided for you are each based on a mock stock ticker + application which stores stock values in a replicated JE + environment. The differences in the three examples have to do with + how each example handles requests for database access; in + particular, database write requests. +

+

+ Briefly, each of the examples are: +

+
+
    +
  • +

    + StockQuotes: Illustrates the most basic demonstration of + a replicated application. It is intended to help you gain + an understanding of basic HA concepts. It demonstrates use + of the HA APIs to create a replicated environment and issue + read and write transactions. +

    +

    + For this example, no attempt is made to route or forward + write requests. Instead, the application blindly attempts + any write requests that are made at the node. If the node + is in the Replica state, a ReplicaWriteException is + raised by the underlying HA code. The example then informs + you of the problem by way of rejecting the operation. +

    +
  • +
  • +

    + RouterDrivenStockQuotes and HARouter: Illustrates + how a software load balancer might be integrated with + JE HA, where HARouter plays the role of the load + balancer for purposes of the example. It does this by + using the Monitor class to direct application + requests to the appropriate node. Read-only requests + are sent to Replicas, while read-write requests are sent + to the replication group's Master. +

    +
  • +
  • +

    + UpdateForwardingStockQuotes and SimpleRouter: + Illustrates the use of an HA unaware router that load + balances read and write requests across the nodes in a + replication group. The router is implemented in + SimpleRouter, and is meant to illustrate how a load + balancer appliance might fit into the JE HA + architecture. +

    +

    + This example is based on RouterDrivenStockQuotes. +

    +
  • +
+
+

+ Usage of each of these examples is described in the Javadoc page + for each example. +

+
+ + + diff --git a/docs/ReplicationGuide/replicawrites.html b/docs/ReplicationGuide/replicawrites.html new file mode 100644 index 0000000..bbfff24 --- /dev/null +++ b/docs/ReplicationGuide/replicawrites.html @@ -0,0 +1,276 @@ + + + + + + Managing Write Requests at a Replica + + + + + + + + + +
+
+
+
+

Managing Write Requests at a Replica

+
+
+
+ +

+ For a replicated JE application, read requests can be + serviced by any electable or secondary node in the replication group, but + write requests can only be serviced by the Master node. For + this reason, your application must be prepared to deal with the + difference in operating behavior between read-only Replicas and + read-write Masters. +

+

+ It is possible to be quite sophisticated in terms of tracking + which node is the Master and so which node can service write + requests. You can even route write requests to the Master node + by writing a special router process. For an example of an + application that does this, see RouterDrivenStockQuotes and + HARouter, both of which are available in your JE + distribution in the + <JE HOME>/examples/je/rep/quote + directory. +

+

+ However, for our purposes here, we simply want to + make sure our Replica nodes can gracefully handle a situation + where they receive a write request. The write request should be + rejected by the node, with some notification being returned to + the requester that the write activity is rejected. While not + the most robust solution, this is the simplest thing your + JE replicated application can do if it receives a write + request at a Replica node. +

+

+ There are two ways to determine whether a write request can be + handled at the local node: +

+
+
    +
  • +

    + Use a monitor node to implement request routing. + Monitor nodes are described in Writing Monitor Nodes. +

    +
  • +
  • +

    + Use the StateChangeListener to detect when the local + node becomes a Master. Otherwise, forward the write + request to the Master node instead of attempting to + service it locally. +

    +
  • +
+
+

+ Either way, any code that attempts database writes for an HA + application should always be prepared to handle a + ReplicaWriteException. +

+
+
+
+
+

Using the StateChangeListener

+
+
+
+

+ You use the StateChangeListener interface to implement a + class that is capable of notifying your node when it has + changed state. In this way, you can track whether a node + is in the Master, Replica or Unknown state, and so know + whether the node is capable of handling write requests. +

+

+ To do this, you must implement StateChangeListener.stateChange(), + which receives a StateChangeEvent object whenever it is + called. +

+

+ If the node is not in the Master state, then the node + can either reject write requests outright or, more + usefully, forward write requests to the Master. For an + example of an HA application that forwards write requests + and uses the StateChangeListener, see the + UpdateForwardingStockQuotes example. +

+

+ Alternatively, you can write a router based on an HA + Monitor. See Writing Monitor Nodes + for more information. +

+

+ Briefly, you can implement + StateChangeListener as follows. Notice that this partial + implementation relies on StateChangeEvent.getState() to + determine the state that the node has just transitioned to. + It then uses StateChangeEvent.getMasterNodeName() to + determine where write requests should be forwarded to in + the event that the new state is not + MASTER. +

+
private class Listener implements StateChangeListener {
+
+    private String currentMaster = null;
+
+    public void stateChange(StateChangeEvent se)
+        throws RuntimeException {
+
+        switch (stateChangeEvent.getState()) {
+
+            case MASTER:
+                // Do whatever your code needs you to do when the 
+                // current node is the MASTER.  For example,
+                // set a flag to indicate that the local node
+                // is in the MASTER state. Here, we just fall
+                // through and do the same thing as if we
+                // transitioned to the REPLICA state.
+            case REPLICA:
+                // Again, do whatever your code needs done when
+                // a node is in the REPLICA state. At a minimum,
+                // you should probably capture which node is the
+                // current Master.
+                currentMaster = se.getMasterNodeName();
+                break;
+
+            // We get here if we have transitioned to the UNKNOWN
+            // state.
+            default:
+                currentmasterName = null;
+                break;
+        }
+    }
+
+    public String getCurrentMasterName() {
+        return currentMaster;
+   }
+} 
+

+ In order to make use of the new listener, the application + must call ReplicatedEnvironment.setStateChangeListener(). + Note that this method can be called at any time after the + ReplicatedEnvironment handle has been created. Also, the + listener is set per environment, not per handle. So if you + set different listeners for different + ReplicatedEnvironment handles, the last listener + configured is used environment-wide. +

+
   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Identify the node   
+   ReplicationConfig repConfig = new ReplicationConfig();
+   repConfig.setGroupName("PlanetaryRepGroup");
+   repConfig.setNodeName("Saturn");
+   repConfig.setNodeHostPort("saturn.example.com:5001");
+
+   // Use the node at mars.example.com:5002 as a helper to find
+   // the rest of the group.
+   repConfig.setHelperHosts("mars.example.com:5002");
+
+   ReplicatedEnvironment repEnv =
+        new ReplicatedEnvironment(home, repConfig, envConfig); 
+   StateChangeListener listener = new Listener();
+   repEnv.setStateChangeListener(listener);  
+
+
+
+
+
+

Catching ReplicaWriteException

+
+
+
+

+ If you perform a Database write operation on a node that is not in the + Master state, a ReplicaWriteException is thrown when you attempt to commit the + transaction. Therefore, whenever performing database write + operations in an HA application, you should catch and + handle ReplicaWriteException. +

+

+ For example: +

+
Transaction txn = null;
+try {
+    txn = env.beginTransaction(null, null);
+    /* 
+     * Perform your write operations under the protection 
+     * of the transaction handle here.
+     */
+    txn.commit();
+} catch (ReplicaWriteException replicaWrite) { 
+    /* 
+     * Perform whatever reporting (logging) activies you want
+     * to do in order to acknowledge that the write operation(s)
+     * failed. Then abort the transaction.
+     */
+
+     if (txn != null) {
+        txn.abort();
+     }
+} 
+
+
+ + + diff --git a/docs/ReplicationGuide/runtransaction.html b/docs/ReplicationGuide/runtransaction.html new file mode 100644 index 0000000..f4f6c65 --- /dev/null +++ b/docs/ReplicationGuide/runtransaction.html @@ -0,0 +1,572 @@ + + + + + + Example Run Transaction Class + + + + + + + + + +
+
+
+
+

Example Run Transaction Class

+
+
+
+
+
+
+ + RunTransaction Class + +
+
+ + Using RunTransaction + +
+
+
+

+ Usage of JE HA requires you to handle many different + HA-specific exceptions. While some of these are Master-specific + and others are Replica-specific, your code may still need to + handle both. The reason why is that it is not uncommon for HA + applications to have standard classes that perform database + access, regardless of whether the class is used for a node in + the Master state or a node in the Replica state. +

+

+ The following class is an example class that can be used to + perform transactional reads and writes in an HA application. + This class is used by the on-disk HA examples that you can find + in your JE distribution (see Replication Examples for more information). However, we + think this particular example class is important enough that we + also describe it here. +

+
+
+
+
+

RunTransaction Class

+
+
+
+

+ The RunTransaction abstract class is + used to implement a utility class that performs database access + for HA applications. It provides all the + transaction error handling and retry framework that + is required for database access in an HA environment. +

+

+ Because RunTransaction is a class that + is meant to be used by different example HA applications, it + does not actually implement the database operations. Instead, + it provides an abstract method that must be implemented by the + HA application that uses RunTransaction. +

+

+ We begin by importing the classes that + RunTransaction uses. +

+
package je.rep.quote;
+
+import java.io.PrintStream;
+
+import com.sleepycat.je.EnvironmentFailureException;
+import com.sleepycat.je.LockConflictException;
+import com.sleepycat.je.OperationFailureException;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.rep.InsufficientAcksException;
+import com.sleepycat.je.rep.InsufficientReplicasException;
+import com.sleepycat.je.rep.ReplicaConsistencyException;
+import com.sleepycat.je.rep.ReplicaWriteException;
+import com.sleepycat.je.rep.ReplicatedEnvironment;
+

+ Then we define a series of private data members that identify how + our HA transactions are going to behave in the event of an error + condition. +

+
abstract class RunTransaction {
+
+    /* The maximum number of times to retry the transaction. */
+    private static final int TRANSACTION_RETRY_MAX = 10;
+
+    /*
+     * The number of seconds to wait between retries when a sufficient
+     * number of replicas are not available for a transaction.
+     */
+    private static final int INSUFFICIENT_REPLICA_RETRY_SEC = 1;
+
+    /* Amount of time to wait to let a replica catch up before 
+     * retrying. 
+     */
+    private static final int CONSISTENCY_RETRY_SEC = 1;
+
+    /* Amount of time to wait after a lock conflict. */
+    private static final int LOCK_CONFLICT_RETRY_SEC = 1;
+
+    private final ReplicatedEnvironment env;
+    private final PrintStream out; 
+

+ Then we implement our class constructor, which is very simple + because all the heavy lifting is done by whatever application calls + this utility class. +

+
    RunTransaction(ReplicatedEnvironment repEnv, 
+                   PrintStream printStream) {
+        env = repEnv;
+        out = printStream;
+    } 
+

+ Now we implement our run() + method. This is what actually performs all the error checking and + retry work for the class. +

+

+ The run() method catches the exceptions + most likely to occur as we are reading and writing the database, + and then handles them, but it will also throw + InterruptedException and EnvironmentFailureException. +

+

+ InterruptedException can be thrown if the thread calling this + method is sleeping and some other thread interrupts it. The + exception is possible because this method calls Thread.sleep in + the retry cycle. +

+

+ EnvironmentFailureException can occur both when beginning a + transaction and also when committing a transaction. It means that + there is something significantly wrong with the node's environment. +

+

+ The readOnly parameter for this method is + used to indicate that the transaction will only perform + database reads. When that happens, the durability guarantee for + the transaction is changed to Durability.READ_ONLY_TXN + because that policy does not call for any acknowledgements. + This eliminates the possibility of an + InsufficientReplicasException being thrown from the + Environment.beginTransaction() operation. +

+
+    public void run(boolean readOnly)
+        throws InterruptedException, EnvironmentFailureException { 
+

+ Now we begin our retry loop and define our sleep cycle between + retries. Initially, we do not actually sleep before + retrying the transaction. However, some of the + error conditions caught by this method will cause the thread to + sleep before the operation is retried. After every sleep operation, + the sleep time is returned to 0 because usually putting the thread + to sleep is of no benefit. +

+
        OperationFailureException exception = null;
+        boolean success = false;
+        long sleepMillis = 0;
+        final TransactionConfig txnConfig = readOnly ?
+         new TransactionConfig().setDurability(Durability.READ_ONLY_TXN) :
+         null;
+
+        for (int i = 0; i < TRANSACTION_RETRY_MAX; i++) {
+            /* Sleep before retrying. */
+            if (sleepMillis != 0) {
+                Thread.sleep(sleepMillis);
+                sleepMillis = 0;
+             } 
+

+ Now we create our transaction, perform the database operations, and + then do the work. The doTransactionWork() + method is an abstract method that must be implemented by the + application using this class. Otherwise, this is standard + transaction begin/commit code that should hold no surprises for + you. +

+
            Transaction txn = null;
+            try {
+                txn = env.beginTransaction(null, null);
+                doTransactionWork(txn); /* CALL APP-SPECIFIC CODE */
+                txn.commit();
+                success = true;
+                return; 
+

+ The first error case that we check for is + InsufficientReplicasException. This exception means that the + Master is not in contact with enough Electable Replicas to successfully + commit the transaction. It is possible that Replicas are still + starting up after an application restart, so we put the thread to + sleep before attempting the transaction again. +

+

+ InsufficientReplicasException is thrown by Transaction.commit(), + so we do have to perform the transaction all over again. +

+
            } catch (InsufficientReplicasException insufficientReplicas) {
+
+                /*
+                 * Retry the transaction.  Give replicas a chance to 
+                 * contact this master, in case they have not had a 
+                 * chance to do so following an election.
+                 */
+                exception = insufficientReplicas;
+                out.printf(insufficientReplicas.toString());
+                sleepMillis = INSUFFICIENT_REPLICA_RETRY_SEC * 1000;
+                continue; 
+

+ Next we check for InsufficientAcksException. This exception + means that the transaction has successfully committed on the + Master, but not enough Electable Replicas have acknowledged the commit + within the allowed period of time. Whether you consider this to + be a successful commit depends on your durability policy. +

+

+ As provided here, the code considers this situation to be + an unsuccessful commit. But if you have a lot of Electable Replicas and you + have a strong durability guarantee on the Master, then you might + be able to still consider this to be a successful commit. If so, + you should set success = true; before + returning from the method. +

+

+ For more information on this error case, see + Managing Acknowledgement Timeouts. +

+
             } catch (InsufficientAcksException insufficientReplicas) {
+
+                /*
+                 * Transaction has been committed at this node. The 
+                 * other acknowledgments may be late in arriving, 
+                 * or may never arrive because the replica just 
+                 * went down.
+                 */
+
+                /*
+                 * INSERT APP-SPECIFIC CODE HERE: For example, repeat
+                 * idempotent changes to ensure they went through.
+                 *
+                 * Note that 'success' is false at this point, although
+                 * some applications may consider the transaction to be 
+                 * complete.
+                 */
+                out.println(insufficientReplicas.toString());
+                txn = null;
+                return; 
+

+ Next we check for ReplicaWriteException. This happens when a + write operation is attempted on a Replica. In response to this, any + number of things can be done, including reporting the problem to + the application attempting the write operation and then aborting, + to forwarding the write request to the Master. This particular + method responds to this condition based on how the + onReplicaWrite() method is implemented. +

+

+ For more information on how to handle this exception, see + Managing Write Requests at a Replica. +

+
            } catch (ReplicaWriteException replicaWrite) {
+
+                /*
+                 * Attempted a modification while in the Replica 
+                 * state.
+                 *
+                 * CALL APP-SPECIFIC CODE HERE: Cannot accomplish 
+                 * the changes on this node, redirect the write to 
+                 * the new master and retry the transaction there.  
+                 * This could be done by forwarding the request to 
+                 * the master here, or by returning an error to the
+                 * requester and retrying the request at a higher 
+                 * level.
+                 */
+                onReplicaWrite(replicaWrite);
+                return; 
+

+ Now we check for LockConflictException, which is thrown whenever + a transaction experiences a lock conflict with another thread. Note + that by catching this exception, we are also catching the + LockPreemptedException, which happens whenever the underlying HA + code "steals" a lock from an application transaction. The most + common cause of this is when the HA replication stream is updating + a Replica, and the Replica is holding a read lock that the + replication stream requires. +

+

+ Here, it is useful to sleep for a period of time before retrying + the transaction. +

+
            } catch (LockConflictException lockConflict) {
+
+                /*
+                 * Retry the transaction.  Note that LockConflictException
+                 * covers the HA LockPreemptedException.
+                 */
+                exception = lockConflict;
+                out.println(lockConflict.toString());
+                sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000;
+                continue; 
+

+ The last error we check for is ReplicaConsistencyException. This + exception can be thrown when the transaction begins. It means that + the beginTransaction() method has waited + too long for the Replica to catch up relative to the Master. This + situation does not really represent a failed transaction because + the transaction never had a chance to proceed in the first place. +

+

+ In any case, the proper thing to do is to put the thread to sleep + for a period of time so that the Replica has the chance to meet its + consistency requirements. Then we retry the transaction. +

+

+ Note that at this point in time, the transaction handle is in + whatever state it was in when beginTransaction() + was called. If the handle was in the null state before + attempting the operation, then it will still be in the null + state. The important thing to realize here is that the + transaction does not have to be aborted, because the + transaction never began in the first place. +

+

+ For more information on consistency policies, see + Managing Consistency. +

+
            } catch (ReplicaConsistencyException replicaConsistency) {
+
+                /*
+                 * Retry the transaction. The timeout associated with 
+                 * the ReplicaConsistencyPolicy may need to be 
+                 * relaxed if it's too stringent.
+                 */
+                exception = replicaConsistency;
+                out.println(replicaConsistency.toString());
+                sleepMillis = CONSISTENCY_RETRY_SEC * 1000;
+                continue; 
+

+ Finally, we abort our transaction and loop again as needed. + onRetryFailure() is called if the + transaction has been retried too many times (as defined by + TRANSACTION_RETRY_MAX. It provides the option to + log the situation. +

+
            } finally {
+
+                if (!success) {
+                    if (txn != null) {
+                        txn.abort();
+                    }
+
+                    /*
+                     * INSERT APP-SPECIFIC CODE HERE: Perform any 
+                     * app-specific cleanup.
+                     */
+                }
+            }
+        }
+
+        /*
+         * CALL APP-SPECIFIC CODE HERE: 
+         * Transaction failed, despite retries.
+         */
+        onRetryFailure(exception);
+    } 
+

+ Having done that, the class is almost completed. Left to do is to + define a couple of methods, one of which is an abstract method + that must be implemented by the application that uses this + class. +

+

+ doTransactionWork() is an abstract method + where the actual database operations are performed. +

+

+ onReplicaWrite() is a method that should be + implemented by the HA application that uses this class. It is used to + define whatever action the Replica should + take if a write is attempted on it. For examples of how this is + used, see the next section. +

+

+ For this implementation of the class, we simply throw + the ReplicaWriteException that got us here in the first place. +

+
    abstract void doTransactionWork(Transaction txn);
+
+    void onReplicaWrite(ReplicaWriteException replicaWrite) {
+        throw replicaWrite;
+    } 
+

+ Finally, we implement onRetryFailure(), + which is what this class does if the transaction retry loop + goes through too many iterations. Here, we simply print the error + to the console. A more robust application should probably write the + error to the application logs. +

+
    void onRetryFailure(OperationFailureException lastException) {
+            out.println("Failed despite retries." +
+                                ((lastException == null) ?
+                                  "" :
+                                  " Encountered exception:" + 
+                                  lastException));
+        }
+    } 
+
+
+
+
+
+

Using RunTransaction

+
+
+
+

+ Having implemented the RunTransaction class, it is fairly easy to use. + Essentially, you only have to implement the + RunTransaction.doTransactionWork() + method so that it performs whatever database access you + want. +

+

+ For example, the following method performs a read on an + EntityStore used by the StockQuotes example HA + application. Notice that the class is instantiated, + doTransactionWork() is + implemented, and the RunTransaction.run() + method are all called in one place. This makes for fairly + easy maintenance of the code. +

+
    private void printStocks(final PrintStream out)
+        throws InterruptedException {
+
+        new RunTransaction(repEnv, out) {
+
+            @Override
+            void doTransactionWork(Transaction txn) {
+
+                // dao is a DataAccessor class used to access
+                // an entity store.
+                final EntityCursor<Quote> quotes =
+                    dao.quoteById.entities(txn, null);
+                try {
+                    out.println("\tSymbol\tPrice");
+                    out.println("\t======\t=====");
+
+                    int count = 0;
+                    for (Quote quote : quotes) {
+                        out.println("\t" +  quote.stockSymbol +
+                                    "\t" + quote.lastTrade);
+                        count++;
+                    }
+                    out.println("\n\t" + count + " stock"
+                                + ((count == 1) ? "" : "s") +
+                                " listed.\n");
+                } finally {
+                    quotes.close();
+                }
+            }
+            }.run(true /*readOnly*/);
+
+        /* Output local indication of processing. */
+        System.out.println("Processed print request");
+    } 
+

+ In the previous example, we do not bother to override the + RunTransaction.onReplicaWrite() + method because this transaction is performing read-only + access to the database. Regardless of whether the + transaction is run on a Master or a Replica, + ReplicaWriteException can not be raised here, so we can + safely use the default implementation. +

+

+ However, if we were running a transaction that performs a + database write, then we should probably do something with + onReplicaWrite() other than merely + re-throwing the exception. +

+

+ The following is an example usage of + RunTransaction that is also used in + the StockQuotes example. +

+
    void updateStock(final String line, final PrintStream printStream)
+        throws InterruptedException {
+
+        // Quote is a utility class used to parse a line of input
+        // obtained from the console.
+        final Quote quote = QuoteUtil.parseQuote(line);
+        if (quote == null) {
+            return;
+        }
+
+        new RunTransaction(repEnv, printStream) {
+
+            void doTransactionWork(Transaction txn) {
+                // dao is a Data Accessor class used to perform access
+                // to the entity store.
+                dao.quoteById.put(txn, quote);
+                /* Output local indication of processing. */
+                System.out.println("Processed update request: " + line);
+            }
+
+            // For this example, we simply log the error condition.
+            // For a more robust example, so other action might be
+            // taken; for example, log the situation and then route
+            // the write request to the Master.
+            void onReplicaWrite(ReplicaWriteException replicaWrite) {
+                /* Attempted a modification while in the replica state. */
+                printStream.println
+                    (repEnv.getNodeName() +
+                     " is not currently the master.  Perform the update" +
+                     " at the node that's currently the master.");
+            }
+            }.run(false /*not readOnly */);
+    } 
+
+
+ + + diff --git a/docs/ReplicationGuide/secondary.html b/docs/ReplicationGuide/secondary.html new file mode 100644 index 0000000..bea57d2 --- /dev/null +++ b/docs/ReplicationGuide/secondary.html @@ -0,0 +1,106 @@ + + + + + + Secondary Nodes + + + + + + + + + +
+
+
+
+

Secondary Nodes

+
+
+
+

+ If you are creating a replication group where there will be higher + latency network connections between some nodes and the rest of the + replication group, typically because the nodes are located in + distant geographical regions, then it may be useful to create + those distant nodes nodes as Secondary nodes. Secondary nodes are + nodes that only serve as read-only Replicas. They cannot become + Masters, participate in elections, or provide acknowledgements for + commit operations. Secondary nodes can be used to provide a + distant location with quick access to read-only data, although the + data may be somewhat out of date due to replication delays over + the high latency link. By using Secondary nodes for the nodes at a + distance, the Electable nodes will be able to perform elections + and provide acknowledgments without experiencing network delays + associated with higher latency connections to the Secondary nodes. +

+

+ Here is an example of how to create a Secondary node that can join + an existing group of Electable nodes: +

+
EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+envConfig.setTransactional(true);
+
+// Identify the secondary node
+ReplicationConfig electableRepConfig =
+    new ReplicationConfig("PlanetaryRepGroup",
+                          "Mars",
+                          "mars.example.com:500");
+
+// Configure the node to be a secondary node
+repConfig.setNodeType(NodeType.SECONDARY);
+
+// Specify one of the electable nodes as a helper
+repConfig.setHelperHosts("jupiter.example.com:5002");
+
+ReplicatedEnvironment repEnv =
+   new ReplicatedEnvironment(envHome, repConfig, envConfig);
+

+ Note that, if you have created an environment with + NodeType.SECONDARY, it is possible to convert the node from a + Secondary node to an Electable node by restarting the environment + with NodeType.ELECTABLE. Once an environment has been used as an + Electable node, though, it is not possible to convert to be a + Secondary node. +

+
+ + + diff --git a/docs/ReplicationGuide/timesync.html b/docs/ReplicationGuide/timesync.html new file mode 100644 index 0000000..4c79f65 --- /dev/null +++ b/docs/ReplicationGuide/timesync.html @@ -0,0 +1,84 @@ + + + + + + Time Synchronization + + + + + + + + + +
+
+
+
+

Time Synchronization

+
+
+
+

+ For best results, you should synchronize the clocks used by all + machines in a replication group. If you are using a time-based + consistency policy, this is an absolute requirement + (see Time Consistency Policies for more information). + Time synchronization is easily achieved using a mechanism like NTPD. +

+

+ In addition, time synchronization is also a requirement for + internal JE HA bookkeeping. For example, JE checks for clock + skew between the Master and a Replica + when the Replica performs its startup handshake with the + Master. This handshake will abort and throw + EnvironmentFailureException if the clock skew between the + two machines is greater than the value set for the + MAX_CLOCK_DELTA property. This property can be set using the + ReplicationConfig.setMaxClockDelta() method, or in the JE + configuration file using the + je.rep.maxClockDelta property. +

+

+ Finally, well synchronized clocks make it easier to correlate + events in the logging output from different nodes in the group. +

+
+ + + diff --git a/docs/ReplicationGuide/two-node.html b/docs/ReplicationGuide/two-node.html new file mode 100644 index 0000000..7a8282f --- /dev/null +++ b/docs/ReplicationGuide/two-node.html @@ -0,0 +1,203 @@ + + + + + + Configuring Two-Node Groups + + + + + + + + + +
+
+
+
+

Configuring Two-Node Groups

+
+
+
+

+ A group needs at least a simple majority of active nodes in + order to elect a Master. This means that for a replication group of size + two, the failure of a single node means that the group as a + whole is no longer available. In some cases, it may be + desirable for the application to proceed anyway. If you are + using a two-node group, and you decide you want your application + to continue even if one of the nodes is unavailable, then you + can trade off some of your durability guarantees, as well as + potentially some of your performance, in exchange for a higher + availability guarantee. +

+

+ JE HA can explicitly relax the requirement for a simple majority of + nodes. This is only possible when the replication group size is + two. The application does this by designating one of the two + electable nodes as a Primary node. The other node in the group is + implicitly the Non-Primary node. +

+

+ At any given instant in time, exactly one of the two nodes can + be designated as the Primary. The application is responsible + for ensuring that this is the case. +

+

+ When the Non-Primary node is not available, the number of nodes + required for a simple majority is reduced to one. As a + consequence, the Primary is able to elect itself as the Master + and then commit transactions that require a simple majority to + commit. The Primary is said to be active + when it is operating in this state. The transition from a + designated Primary to an active Primary happens when the + Primary needs to contact the Non-Primary node, but fails to do so + for one of the following reasons: +

+
+
    +
  • +

    + An election is initiated by the Primary to determine a + new Master. This might happen because the Primary is + just starting up, or because the Primary has lost + contact with the Non-Primary. In either case, if the + election fails to establish a Master, the Primary is + activated and it becomes the Master. +

    +

    + Note that the Primary will attempt to locate a Master + until it has hit the retry limit as defined by the + ELECTIONS_PRIMARY_RETRIES configuration property. But + until the Primary has reached that limit, it will not + transition to the active state. +

    +
  • +
  • +

    + An Environment.beginTransaction() operation + is invoked on the Primary while it is in the Master + state, and it cannot establish contact with the + Non-Primary in the time period specified by the + INSUFFICIENT_REPLICAS_TIMEOUT configuration property. +

    +
  • +
  • +

    + A Transaction.commit() needing a commit acknowledgement + is invoked on the Primary while it is in the Master + state, and the Primary does not receive the commit + acknowledgement within the time period specified by the + REPLICA_ACK_TIMEOUT configuration property. +

    +
  • +
+
+

+ Both the INSUFFICIENT_REPLICAS_TIMEOUT and + REPLICA_ACK_TIMEOUT error cases are driven by the durability + policy that you are using for your transactions. See + Managing Durability + for more information. +

+

+ The three properties described above: + ELECTIONS_PRIMARY_RETRIES, INSUFFICIENT_REPLICAS_TIMEOUT + and REPLICA_ACK_TIMEOUT impact the time taken by the Primary + to become active in the absence of the Non-Primary. Choosing + smaller values for the timeouts and election retries will + generally result in smaller service disruptions by activating + the Primary more rapidly. The downside is that transient + network glitches may result in unnecessary transitions to the + active state where the Primary is operating with reduced + Durability. It's up to the application to make these tradeoffs + appropriately based on its operating environment. +

+

+ When the Non-Primary becomes available again, the Primary becomes + aware of it as part of the Master/Replica handshake (see + Replica Startup). + At that time, the number of nodes required for a simple majority + reverts to two. That is, the Primary is no longer in the active + state. +

+

+ Your application must be very careful to not designate two + nodes as Primaries. If both nodes are designated as Primaries, + and the two nodes cannot communicate with one another for some + reason, they could both consider themselves to be Masters and + start accepting write transactions. This would violate a + fundamental requirement of JE HA that at any given instant + in time, there is only one node that is permitted to write to + the replicated environment. +

+

+ The Non-Primary always needs two nodes for a simple majority, and + as a result can never become a Master in the absence of the + Primary. If the Primary node fails, you can make provisions to + swap the Primary and Non-Primary designations, so that the + surviving node is now the Primary. The swap must be done + carefully to ensure that both nodes are not concurrently + designated Primaries. In particular, the failed node must come + up as a Non-Primary after it has been repaired. +

+

+ You designate a node as Primary using the mutable config + property DESIGNATED_PRIMARY. You set this property using + ReplicationMutableConfig.setDesignatedPrimary(). This property + is ignored for groups of size greater than two. +

+

+ As stated above, this configuration can only be set for one node at a + time. This condition is checked during the Master/Replica + startup handshake, and if both are designated as Primary then + an EnvironmentFailureException is thrown. However, you + should not rely on this handshake process to guard against dual + Primaries. As stated above, if both nodes are designated + Primary at some point after the handshake occurs, and your + application experiences a network partition event such that the + two nodes can no longer communicate, then both nodes will + become Masters. This is error condition that will require you + to lose data on at least one of the nodes if writes have + occurred on both nodes while the network partition was in + progress. +

+
+ + + diff --git a/docs/ReplicationGuide/txn-management.html b/docs/ReplicationGuide/txn-management.html new file mode 100644 index 0000000..f66ea7d --- /dev/null +++ b/docs/ReplicationGuide/txn-management.html @@ -0,0 +1,799 @@ + + + + + + Chapter 3. Transaction Management + + + + + + + + + +
+
+
+
+

Chapter 3. Transaction Management

+
+
+
+ +

+ A JE HA application is essentially a transactional application + that distributes its data across multiple environments for you. The + assumption is that these environments are on separate physical + hosts, so the distribution of your data is performed over TCP/IP + connections. +

+

+ Because of this distribution activity, several new dimensions are + added to your transactional management. In particular, there is + more to consider in the areas of durability, consistency and + performance than you have to think about for single-environment + applications. +

+

+ Before continuing, some definitions are in order: +

+
+
    +
  1. +

    + Durability is defined by how likely + your data will continue to exist in the presence of + hardware breakage or a software crash. The first goal of + any durability scheme is to get your data stored onto + physical media. After that, to make your data even more + durable, you will usually start to consider your backup + schemes. +

    +

    + By its very nature, a JE HA application is offering you + more data durability than does a traditional transactional + application. This is because your HA application is + distributing your data across multiple environments (which + we assume are on multiple physical machines), which means + that data backups are built into the application. The more + backups, the more durable your application is. +

    +
  2. +
  3. +

    + Consistency is defined by how + current your data is. In a traditional + transactional application, consistency is guaranteed by + allowing you to group multiple read and write operations in + a single atomic unit, which is defined by the transactional + handle. This level of consistency continues to exist for + your HA application, but in addition you must concern + yourself with how consistent (or correct) the data is + across the various nodes in the replication group. +

    +

    + Because the replication group is a collection of differing + machines connected by a network, some amount of a delay in + data updates is to be naturally expected across the + Replicas. The amount of delay that you will see is + determined by the number and size of the data updates, the + performance of your network, the performance of the + hardware on which your nodes are running, and whether your + nodes are persistently available on the network (as opposed + to being down or offline or otherwise not on the network + for some period of time). Because they are not included + in acknowledgments, Secondary nodes may tend to show + greater delay than Electable nodes. +

    +

    + A highly consistent HA application, then, is an application + where the data across all nodes in the replication group is + identical or very nearly identical all the time. A not very + consistent HA application is one where data across the + replication group is frequently stale or out of date + relative to the data contained on the Master node. +

    +
  4. +
  5. +

    + Performance is simply how fast your HA + application is at performing read and write requests. By + its very nature, an HA application tends to perform much + better than a traditional transactional application at + read-only requests. This is because you have multiple + machines that are available to service read-only requests. + The only tricky thing here is to make sure you load balance + your read requests appropriately across all your nodes so + that you do not have some nodes that are swamped with + requests while others are mostly idle. +

    +

    + Write performance for an HA application is a mixed bag. + Depending on your goals, you can make the HA application + perform better than a traditional transactional + application that is committing writes to the disk + synchronously. However, in doing so + you will compromise your data's durability and consistency + guarantees. This is no different than configuring a + traditional transactional application to commit + transactions asynchronously to disk, and so lose the + guarantee that the write is stored on physical media before + the transaction completes. However, the good news is that + because of the distributed nature of the HA application, + you have a better durability guarantee than the + asynchronously committing single-environment transactional + application. That is, by "committing to the network" you + have a fairly good chance of a write making it to disk + somewhere on some node. +

    +

    + Mostly, though, HA applications commit a transaction and + then wait for an acknowledgement from some number of nodes + before the transaction is complete. An HA application + running with quorum acknowledgements and write no sync + durability can exhibit equal or better write performance + than a single node standalone application, but your write + performance will ultimately depend on your application's + configuration. +

    +
  6. +
+
+

+ As you design your HA application, remember that each of these + characteristics are interdependent. You cannot, for example, + configure your application to have extremely high durability + without sacrificing some amount of performance. A highly + consistent application may have to make sacrifices in durability. A + high performance HA application may require you to make trade-offs + in both durability and consistency. +

+
+
+
+
+

Managing Durability

+
+
+
+ +

+ A highly durable application is one where you attempt to make + sure you do not lose data, ever. This is frequently (but not + always) one of the most pressing design considerations for any + application that manages data. After all, data often equals + money because the data you are managing could involve billing + or inventory information. But even if your application is not + managing information that directly relates to money, a loss of + data may very well cost your enterprise money in terms of the + time and resources necessary to reacquire the information. +

+

+ HA applications attempt to increase their data + durability guarantees by distributing data writes across + multiple physical machines on the network. By spreading the + data in this way, you are placing it on stable storage on + multiple physical hard drives, CPUs and power supplies. + Obviously, the more physical resources available to contain + your data, the more durable it is. +

+

+ However, as you increase your data durability, you will + probably lower your consistency guarantees and probably your + write performance. Read performance may also take a hit, + depending on how many physical machines you include in the mix + and how high a durability guarantee you want. In order to + understand why, you have to understand how JE HA + applications handle transactional commits. +

+
+
+
+
+

Durability Controls

+
+
+
+

+ By default, JE HA makes transactional commit operations + on the Master wait to return from the operation until they receive + acknowledgements from some number of Replicas. Each + Replica, in turn, will only return an acknowledgement once + the write operation has met whatever durability requirement + exists for the Replica. (For example, you can require the + Replicas to successfully flush the write operation to disk + before returning an acknowledgement to the Master.) +

+
+

Note

+

+ Be aware that write operations received on the Replica + from the Master have lock priority. This means that if + the Replica is currently servicing a read request, it + might have to retry the read operation should a write + from the Master preempt the read lock. For this reason, + you can see read performance degradation if you have + Replicas that are heavily loaded with read requests at + a time when the Master is performing a lot of write + activity. The solution to this is to add + additional nodes to your replication group and/or better + load-balance your read requests across the Replicas. +

+
+

+ There are three things to control when you design your + durability guarantee: +

+
+
    +
  • +

    + Whether the Master synchronously writes the + transaction to disk. This is no different from the + durability consideration that you have for a + stand-alone transactional application. +

    +
  • +
  • +

    + Whether the Replica synchronously writes the + transaction to disk before returning an + acknowledgement to the Master, if any. +

    +
  • +
  • +

    + How many, if any, Replicas must acknowledge the + transaction commit before the commit operation on + the Master can complete. +

    +
  • +
+
+

+ You can configure your durability policy on a + transaction-by-transaction basis using + TransactionConfig.setDurability(), or on an + environment-wide basis using + EnvironmentMutableConfig.setDurability(). +

+
+
+
+
+
+

Commit File Synchronization

+
+
+
+

+ Synchronization policies are described in the + Berkeley DB, Java Edition Getting Started with Transaction Processing guide. However, for the + sake of completeness, we briefly cover this topic here + again. +

+

+ You define your commit synchronization policy by using + a Durability class object. For HA applications, the + Durability class constructor must define the + synchronization policy for both the Master and the Master's + replicas. The synchronization policy does not have to be + the same for both Master and Replica. +

+

+ You can use the following constants to define a + synchronization policy: +

+
+
    +
  • +

    + Durability.SyncPolicy.SYNC +

    +

    + Write and synchronously flush the log to disk upon + transaction commit. This offers the most durable + transaction configuration because the commit + operation will not return until all of the disk I/O + is complete. But, conversely, this offers the worse + possible write performance because disk I/O is an + expensive and time-consuming operation. +

    +
  • +
  • +

    + Durability.SyncPolicy.NO_SYNC +

    +

    + Do not synchronously flush the log on transaction + commit. All of the transaction's write activity is + held entirely in memory when the transaction + completes. The log will eventually make it to disk + (barring an application hardware crash of some kind). + However, the application's thread of control is + free to continue operations without waiting for + expensive disk I/O to complete. +

    +

    + This represents the least durable configuration + that you can provide for your transactions. But it + also offers much better write performance than the + other options. +

    +
  • +
  • +

    + Durability.SyncPolicy.WRITE_NO_SYNC +

    +

    + Log data is synchronously written to the OS's file + system buffers upon transaction commit, but the + data is not actually forced to disk. This protects + your write activities from an application crash, + but not from a hardware failure. +

    +

    + This policy represents an intermediate durability + guarantee. It is not has strong as SYNC, but is + also not as weak as NO_SYNC. Conversely, it + performs better than NO_SYNC (because your + application does not have to wait for actual disk + I/O), but it does not perform quite as well as SYNC + (because data still must be written to the file + system buffers). +

    +
  • +
+
+
+
+
+
+
+

Managing Acknowledgements

+
+
+
+

+ Whenever a Master commits a transaction, by default it + waits for acknowledgements from a majority of its Electable Replicas + before the commit operation on the Master completes. + By default, Electable Replicas respond with an acknowledgement once they have + successfully written the transaction to their local + disk. Note that Secondary Replicas do not ever provide + acknowledgements. +

+

+ Acknowledgements are expensive operations. They involve both + network traffic, as well as disk I/O at multiple physical + machines. So on the one hand, acknowledgements help to + increase your durability guarantees. On the other, they + hurt your application's performance, and may have a + negative impact on your application's consistency + guarantee. +

+

+ For this reason, JE allows you to manage + acknowledgements for your HA application. As is the case + with synchronization policies, you do this using the + Durability class. As a part of this class' constructor, + you can provide it with one of the following constants: +

+
+
    +
  • +

    + Durability.ReplicaAckPolicy.ALL +

    +

    + All of the Electable Replicas must acknowledge the + transactional commit. This represents the highest + possible durability guarantee for your HA application, + but it also represents the poorest performance. For + best results, do not use this policy unless your + replication group contains a very small number of + electable replicas, and those replicas are all on + extremely reliable networks and servers. +

    +
  • +
  • +

    + Durability.ReplicaAckPolicy.NONE +

    +

    + The Master will not wait for any acknowledgements + from its Replicas. In this case, your durability + guarantee is determined entirely by the + synchronization policy your Master is using for its + transactional commits. This policy also represents + the best possible choice for write + performance. +

    +
  • +
  • +

    + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY +

    +

    + A simple majority of the Electable Replicas must return + acknowledgements before the commit operation + returns on the Master. This is the default policy. + It should work well for most applications unless + you need an extremely high durability + guarantee, have a very large number of Electable Replicas, or + you otherwise have performance concerns that cause + you to want to avoid acknowledgements altogether. +

    +
  • +
+
+

+ You can configure your synchronization policy on a + transaction-by-transaction basis using + TransactionConfig.setDurability(), or on an + environment-wide basis using + EnvironmentMutableConfig.setDurability(). + For example: +

+
   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Require no synchronization for transactional commit on the 
+   // Master, but full synchronization on the Replicas. Also,
+   // wait for acknowledgements from a simple majority of Replicas.
+   Durability durability =
+          new Durability(Durability.SyncPolicy.WRITE_NO_SYNC,
+                         Durability.SyncPolicy.NO_SYNC,
+                         Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+   envConfig.setDurability(durability);
+
+   // Identify the node
+   ReplicationConfig repConfig = 
+        new ReplicationConfig("PlanetaryRepGroup",
+                              "Jupiter",
+                              "jupiter.example.com:5002");
+
+   // Use the node at mercury.example.com:5001 as a helper to find
+   // the rest of the group.
+   repConfig.setHelperHosts("mercury.example.com:5001");
+
+   ReplicatedEnvironment repEnv =
+      new ReplicatedEnvironment(home, repConfig, envConfig); 
+

+ Note that at the time of a transaction commit, if the + Master is not in contact with enough Electable Replicas to meet the + transaction's durability policy, the transaction commit + operation will throw an InsufficientReplicasException. + The proper action to take upon encountering this exception + is to abort the transaction, wait a small period of time in + the hopes that more Electable Replicas will become available, then + retry the exception. See + Example Run Transaction Class + for example code that implements this retry loop. +

+

+ You can also see an InsufficientReplicasException when + you begin a transaction if the Master fails to be in + contact with enough Electable Replicas to meet the acknowledgement + policy. To manage this, you can configure how long the + transaction begin operation will wait for enough Electable + Replicas before throwing this exception. You use the + INSUFFICIENT_REPLICAS_TIMEOUT configuration option, which + you can set using the ReplicationConfig.setConfigParam() + method. +

+
+
+
+
+

Managing Acknowledgement Timeouts

+
+
+
+

+ In addition to the acknowledgement policies, you have + to also consider your replication acknowledgement + timeout value. This value specifies the maximum amount + of time that the Master will wait for acknowledgements + from its Electable Replicas. +

+

+ If the + Master commits a transaction and the timeout value is + exceeded while waiting for enough acknowledgements, the + Transaction.commit() method will throw an + InsufficientAcksException exception. In this event, + the transaction has been committed on the Master, so at + least locally the transaction's durability policy has + been met. However, the transaction might not have been + committed on enough Electable Replicas to guarantee your HA + application's overall durability policy. +

+

+ There can be a lot of reasons why the Master did not + get enough acknowledgements before the timeout value, + such as a slow network, a network failure before or + after a transaction was transmitted to a replica, or a + failure of a replica. These failures have different + consequences for whether a transaction will become + durable or will be subject to rollback. As a result, an + application may respond in various ways, and for + example choose to: +

+
+
    +
  • +

    + Do nothing, assuming that the transaction will + eventually propagate to enough replicas to + become durable. +

    +
  • +
  • +

    + Retry the operation in a new transaction, which + may succeed or fail depending on whether the + underlying problems have been resolved. +

    +
  • +
  • +

    + Retry using a larger timeout interval and + return to the original timeout interval at a + later time. +

    +
  • +
  • +

    + Fall back temporarily to a read-only mode. +

    +
  • +
  • +

    + Increase the durability of the transaction on + the Master by ensuring that the changes are + flushed to the operating system's buffers or to + the disk. +

    +
  • +
  • +

    + Give up and report an error at a higher level, + perhaps to allow an administrator to check the + underlying cause of the failure. +

    +
  • +
+
+

+ The default value for this timeout is 5 seconds, which + should work for most cases where an acknowledgement + policy is in use. However, if you have a very large + number of Electable Replicas, or if you have a very unreliable + network, then you might see a lot of + InsufficientAcksException exceptions. In this case, + you should either increase this timeout value, relax + your acknowledgement policy, or find out why your + hardware and/or network is performing so poorly. +

+
+

Note

+

+ You can also see InsufficientAcksException + or InsufficientReplicasException + exceptions if one or more replicas have exceeded + their disk usage thresholds. See + Suspending Writes Due to Disk Thresholds + for more information. +

+
+

+ You can configure your acknowledgement policy using the + ReplicationConfig.setReplicaAckTimeout() method. +

+
   EnvironmentConfig envConfig = new EnvironmentConfig();
+   envConfig.setAllowCreate(true);
+   envConfig.setTransactional(true);
+
+   // Require no synchronization for transactional commit on the 
+   // Master, but full synchronization on the Replicas. Also,
+   // wait for acknowledgements from a simple majority of Replicas.
+   Durability durability =
+          new Durability(Durability.SyncPolicy.WRITE_NO_SYNC,
+                         Durability.SyncPolicy.NO_SYNC,
+                         Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+   envConfig.setDurability(durability);
+
+   // Identify the node
+   ReplicationConfig repConfig = 
+        new ReplicationConfig("PlanetaryRepGroup",
+                              "Jupiter",
+                              "jupiter.example.com:5002");
+ 
+   // Use the node at mercury.example.com:5001 as a helper to find the rest
+   // of the group.
+   repConfig.setHelperHosts("mercury.example.com:5001");
+
+   // Set a acknowledgement timeout that is slightly longer
+   // than the default 5 seconds.
+   repConfig.setReplicaAckTimeout(7, TimeUnit.SECONDS);
+
+   ReplicatedEnvironment repEnv =
+      new ReplicatedEnvironment(home, repConfig, envConfig); 
+
+
+
+
+ + + diff --git a/docs/ReplicationGuide/txnrollback.html b/docs/ReplicationGuide/txnrollback.html new file mode 100644 index 0000000..f87e179 --- /dev/null +++ b/docs/ReplicationGuide/txnrollback.html @@ -0,0 +1,119 @@ + + + + + + Managing Transaction Rollbacks + + + + + + + + + +
+
+
+
+

Managing Transaction Rollbacks

+
+
+
+

+ In the event that a new Master is elected, it is possible for a + Replica to find that some of its logs are ahead of the logs + held by the Master. While this is unlikely to occur, your code + must still be ready to deal with the situation. When it + happens, you must roll back the transactions represented by the + logs that are ahead of the Master. +

+

+ You do this by simply closing all your ReplicatedEnvironment + handles, and then reopening. During the handshaking process + that occurs when the Replica joins the replication group, the + discrepancy in log files is resolved for you. +

+

+ Note that the problem of logs on replicas being ahead of the + log on the master is unlikely to occur because the election + mechanism favors nodes with the most recent logs. When + selecting a master, a simple majority of nodes are required to + vote on the choice of master, and they will vote for the node + with the most recent log files. When the problem does occur, + though, it results in the updates reflected in the Replica's + log being discarded when the log is rolled back. +

+

+ Logs on a Replica can be ahead of the logs on the Master if + network or node failures result in transactions becoming + durable on fewer than a majority of the nodes in the + replication group. This reduced durability is more likely in + cases where one or more Replicas show large replication lags + relative to the Master. Administrators should monitor + replication lags and evaluate whether they are caused by issues + with network or host performance. Applications can reduce the + chance of transaction rollbacks by avoiding the use of weak + durability requirements like + ReplicaAckPolicy.NONE or a + ReplicationMutableConfig.NODE_PRIORITY of + zero. +

+

+ JE HA lets your application know that a transaction must be + rolled back by throwing RollbackException. This exception can + by thrown by any operation that is performing routine database + access. +

+
    ReplicatedEnvironment repEnv = new ReplicatedEnvironment(...);
+    boolean doWork = true;
+
+    while doWork {
+        try {
+            // performSomeDBWork is the method that
+            // performs your database access.
+            doWork = performSomeDBWork();
+        } catch (RollbackException rb) {
+            if (repEnv != null) {
+                repEnv.close();
+                repEnv = new ReplicatedEnvironment(...);
+        }
+    } 
+
+ + + diff --git a/docs/ReplicationGuide/utilities.html b/docs/ReplicationGuide/utilities.html new file mode 100644 index 0000000..86f1d66 --- /dev/null +++ b/docs/ReplicationGuide/utilities.html @@ -0,0 +1,472 @@ + + + + + + Chapter 4. Utilities + + + + + + + + + +
+
+
+
+

Chapter 4. Utilities

+
+
+
+ +

+ This chapter discusses the APIs that you use to administer and + manage your replication group. +

+
+
+
+
+

Administering the Replication Group

+
+
+
+ +

+ There are a series of administrative activities that an + application might want to take relative to a replication group. + These activities can be performed by electable or secondary nodes in the + replication group, or by applications that do not have access to + a replicated environment (in other words, utilities designed to + help administer and monitor the group). All of these functions + can be accessed using the ReplicationGroupAdmin class. +

+

+ You can use the ReplicationGroupAdmin class to: +

+
+
    +
  1. +

    + List replication group members. +

    +
  2. +
  3. +

    + Locate the current Master. +

    +
  4. +
  5. +

    + Remove electable nodes from the replication group. +

    +
  6. +
+
+

+ You instantiate an instance of the ReplicationGroupAdmin + class by providing it with the name of the replication group + that you want to administer, as well as a Set of + InetSocketAddress objects. The InetSocketAddress objects + are used as a list of helper hosts that the application can use + to perform administrative functions. For example: +

+
...
+
+    Set<InetSocketAddress> helpers =
+        new HashSet<InetSocketAddress>();
+    InetSocketAddress helper1 =
+        new InetSocketAddress("node1.example.com", 1550);
+    InetSocketAddress helper2 =
+        new InetSocketAddress("node2.example.com", 1550);
+
+    helpers.add(helper1);
+    helpers.add(helper2);
+
+    ReplicationGroupAdmin rga =
+        new ReplicationGroupAdmin("test_rep_group", helpers);   
+
+
+
+
+

Listing Group Members

+
+
+
+

+ To list all the members of a replication group, use the + ReplicationGroupAdmin.getGroup() method. This returns an + instance of ReplicationGroup. You then can then: +

+
+
    +
  1. +

    + use the ReplicationGroup.getNodes() method to locate + all the nodes in the replication group. +

    +
  2. +
  3. +

    + use the ReplicationGroup.getElectableNodes() method to locate all + the electable nodes in the replication group. +

    +
  4. +
  5. +

    + use the ReplicationGroup.getSecondaryNodes() + method to locate all the secondary nodes in the + replication group. +

    +
  6. +
  7. +

    + use ReplicationGroup.getMonitorNodes() to locate all the + monitor nodes that currently belong to the replication + group. +

    +
  8. +
+
+
+

Note

+

+ In order to obtain a ReplicationGroup object, + the process must be able to discover the current + Master. This means that the helper nodes you provide + when you instantiate the ReplicationGroupAdmin class + must be reachable and able to identify the current + Master. If they cannot, then these methods throw an + UnknownMasterException. +

+
+

+ All of these methods return a set of ReplicationNode objects, which + you can then use to query for node information, such as its + name, the InetSocketAddress where the node is located, + and the node's type. +

+

+ For example: +

+
...
+
+    Set<InetSocketAddress> helpers =
+        new HashSet<InetSocketAddress>();
+    InetSocketAddress helper1 =
+        new InetSocketAddress("node1.example.com", 1550);
+    InetSocketAddress helper2 =
+        new InetSocketAddress("node2.example.com", 1550);
+
+    helpers.add(helper1);
+    helpers.add(helper2);
+
+    ReplicationGroupAdmin rga =
+        new ReplicationGroupAdmin("test_rep_group", helpers); 
+
+    try {
+        ReplicationGroup rg = rga.getGroup();
+        for (ReplicationNode rn : rg.getElectableNodes()) {
+            // Do something with the replication node.
+        }
+    } catch (UnknownMasterException ume) {
+        // Can't find a master
+    }   
+
+
+
+
+
+

Locating the Current Master

+
+
+
+

+ You can use the ReplicationGroupAdmin class to locate the + current Master in the replication group. This information + is available using the + ReplicationGroupAdmin.getMasterNodeName() and + ReplicationGroupAdmin.getMasterSocket() methods. +

+

+ ReplicationGroupAdmin.getMasterNodeName() returns a string + that holds the node name associated with the Master. +

+

+ ReplicationGroupAdmin.getMasterSocket() returns an + InetSocketAddress class object that represents the host + and port where the Master can currently be found. +

+

+ Both methods will throw an UnknownMasterException if the + helper nodes are not able to identify the current Master. +

+

+ For example: +

+
import java.net.InetSocketAddress;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.sleepycat.je.rep.UnknownMasterException;
+import com.sleepycat.je.rep.util.ReplicationGroupAdmin;
+
+...
+
+    Set<InetSocketAddress> helpers =
+        new HashSet<InetSocketAddress>();
+    InetSocketAddress helper1 =
+        new InetSocketAddress("node1.example.com", 1550);
+    InetSocketAddress helper2 =
+        new InetSocketAddress("node2.example.com", 1550);
+
+    helpers.add(helper1);
+    helpers.add(helper2);
+
+    ReplicationGroupAdmin rga =
+        new ReplicationGroupAdmin("test_rep_group", helpers); 
+
+    try {
+        InetSocketAddress master = rga.getMasterSocket();
+        System.out.println("Master is on host " + 
+                    master.getHostName() + " at port " + 
+                    master.getPort()); 
+        }
+    } catch (UnknownMasterException ume) {
+        // Can't find a master
+    }   
+
+
+
+
+
+

Adding and Removing Nodes from the Group

+
+
+
+

+ In order to add nodes to a replication group, you simply + start up a node and identify at least one helper node that + can identify the current Master to the new node. After the + new node has been populated with a current enough copy of + the data contained on the Master, the new node is + automatically a member of the replication group. +

+

+ An electable node's status as a member of the group is persistent. + That is, it is a member of the group regardless of whether + it is running, and whether other nodes in the group can + reach it over the network. This means that for the purposes + of elections and message acknowledgements, the node counts + toward the total number of nodes that must respond and/or + participate in an event. +

+

+ If, for example, you are using a durability guarantee that + requires all electable nodes in the replication group to acknowledge + a transaction commit on the Master, and if a node is down + or otherwise unavailable for some reason, then the commit + cannot complete on the Master because it will not receive + acknowledgements from all the electable nodes in the replication + group. +

+

+ Similarly, elections for Masters require a bare majority of + electable nodes to participate in the election. If so many nodes are + shutdown or unavailable due to a network partition event + that a bare majority of electable nodes cannot be found to hold the + election, then your replication group can perform no write + activities. This situation persists until at least enough + nodes come back online to represent a bare majority of the + electable nodes belonging to the replication group. +

+

+ For this reason, if you have an electable node that you intend to + shutdown for a long time, then you should remove that node + from the replication group. You do this using the + ReplicationGroupAdmin.removeMember() method. Note the + following rules when using this method: +

+
+
    +
  • +

    + For best results, shutdown the node before removing + it. +

    +
  • +
  • +

    + You use the node's name (not the host/port pair) to identify the node + you want to remove from the group. If the node name + that you specify is unknown to the replication + group, a MemberNotFoundException is thrown. If + it names a secondary node, an + IllegalArgumentException is thrown. +

    +
  • +
  • +

    + Once removed, the electable node can no longer connect to the + Master, nor can it participate in elections. If + you want to reconnect the node to the Master (that + is, you want to add it back to the replication group), you + will have to do so using a different node name than + the node was using when it was removed from the + group. +

    +
  • +
  • +

    + An active Master cannot be removed from the group. To + remove the active Master, either shut it down or wait + until it transitions to the Replica state. + If you attempt to remove an active Master, a + MasterStateException is thrown. +

    +
  • +
+
+

+ For example: +

+
...
+
+    Set<InetSocketAddress> helpers =
+        new HashSet<InetSocketAddress>();
+    InetSocketAddress helper1 =
+        new InetSocketAddress("node1.example.com", 1550);
+    InetSocketAddress helper2 =
+        new InetSocketAddress("node2.example.com", 1550);
+
+    helpers.add(helper1);
+    helpers.add(helper2);
+
+    ReplicationGroupAdmin rga =
+        new ReplicationGroupAdmin("test_rep_group", helpers); 
+
+    try {
+        rga.removeMember("NODE3");
+    } catch (MemberNotFoundException mnfe) {
+        // Specified a node name that is not known to the
+        // replication group.
+    } catch (MasterStateException mse) {
+        // Tried to remove an active Master
+    }   
+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/BerkeleyDB-JE-Txn.pdf b/docs/TransactionGettingStarted/BerkeleyDB-JE-Txn.pdf new file mode 100644 index 0000000..5927070 Binary files /dev/null and b/docs/TransactionGettingStarted/BerkeleyDB-JE-Txn.pdf differ diff --git a/docs/TransactionGettingStarted/abortresults.html b/docs/TransactionGettingStarted/abortresults.html new file mode 100644 index 0000000..2ee7c59 --- /dev/null +++ b/docs/TransactionGettingStarted/abortresults.html @@ -0,0 +1,86 @@ + + + + + + Aborting a Transaction + + + + + + + + + +
+
+
+
+

Aborting a Transaction

+
+
+
+

+ When you abort a transaction, all database + + or store + + modifications performed + under the protection of the transaction are discarded, and all + locks currently held by the transaction are released. In this event, + your data is simply left in the + state that it was in before the transaction began performing data + modifications. +

+

+ Once you have aborted a transaction, the transaction + handle that you used for the transaction is no longer valid. To + perform database activities under the control of a new + transaction, you must obtain a fresh transactional handle. +

+

+ To abort a transaction, call + + + Transaction.abort(). + + +

+
+ + + diff --git a/docs/TransactionGettingStarted/apireq.html b/docs/TransactionGettingStarted/apireq.html new file mode 100644 index 0000000..8414095 --- /dev/null +++ b/docs/TransactionGettingStarted/apireq.html @@ -0,0 +1,176 @@ + + + + + + Application Requirements + + + + + + + + + +
+
+
+
+

Application Requirements

+
+
+
+

+ In order to use transactions, your application has certain + requirements beyond what is required of non-transactional protected + applications. They are: +

+
+
    +
  • +

    + Transaction subsystem. +

    +

    + In order to use transactions, you must explicitly + enable the transactional subsystem for your + application, and this must be done at the time that + your environment is first created. +

    +
  • +
  • +

    + + + Transaction + + handles. +

    +

    + In order to obtain the atomicity guarantee offered by + the transactional subsystem (that is, combine multiple + operations in a single unit of work), your application must use + transaction handles. These handles are obtained from your + + + + Environment + objects. They should normally be short-lived, and their usage is + reasonably simple. To complete a transaction and save + the work it performed, you + call its commit() method. To + complete a transaction and discard its work, you call its + abort() method. +

    +

    + In addition, it is possible to use auto commit if you want + to transactional protect a single write operation. Auto + commit allows a transaction to be used without + obtaining an explicit transaction handle. See + Auto Commit + for information on how to use auto commit. +

    +
  • +
  • +

    + Entity Store +

    +

    + If you are using the DPL, then you must + configure your entity stores for transactional + support before opening them (that is, before + obtaining a primary index from them for the first + time). +

    +
  • +
  • +

    + Database open requirements. +

    +

    + + + Your + + application must transaction protect the database + + opens, + and any secondary index associations, + + if subsequent operations on the databases are to be transaction + protected. The database open and secondary index + association are commonly transaction protected using + auto commit. +

    +
  • +
  • +

    + Deadlock detection. +

    +

    + Typically transactional applications use multiple + threads of control when accessing the database. + Any time multiple threads are used on a single resource, + the potential for lock contention arises. In turn, lock + contention can lead to deadlocks. See + Locks, Blocks, and Deadlocks + for more information. +

    +

    + Therefore, transactional applications must frequently + include code for detecting and responding to deadlocks. + Note that this requirement is not + specific to transactions + – you can certainly write concurrent + non-transactional JE applications. Further, not + every transactional application uses concurrency and + so not every transactional application must + manage deadlocks. Still, deadlock management is so + frequently a characteristic of transactional + applications that we discuss it in this + book. See Concurrency + for more information. +

    +
  • +
+
+
+ + + diff --git a/docs/TransactionGettingStarted/autocommit.html b/docs/TransactionGettingStarted/autocommit.html new file mode 100644 index 0000000..bf6f71f --- /dev/null +++ b/docs/TransactionGettingStarted/autocommit.html @@ -0,0 +1,166 @@ + + + + + + Auto Commit + + + + + + + + + +
+
+
+
+

Auto Commit

+
+
+
+

+ While transactions are frequently used to provide atomicity to + multiple database + + or store + + operations, it is sometimes necessary to perform + a single database + + or store + + operation under the control of a transaction. + Rather than force you to obtain a transaction, perform the single + write operation, and then either commit or abort the transaction, + you can automatically group this sequence of events using + auto commit. +

+

+ To use auto commit: +

+
+
    +
  1. +

    + Open your environment and your databases + + or store + + so that they support + transactions. See Enabling Transactions + for details. +

    +
  2. +
  3. +

    + Do not provide a transactional handle to the method that is + performing the database + + or store + + write operation. +

    +
  4. +
+
+

+ Note that auto commit is not available for cursors. You must always + open your cursor using a transaction if you want the cursor's + operations to be transactional protected. See + Transactional Cursors for details on using + transactional cursors. +

+
+

Note

+

+ Never have more than one active transaction in your thread + at a time. This is especially a problem if you mix an + explicit transaction with another operation that uses auto + commit. Doing so can result in undetectable deadlocks. +

+
+

+ For example, the following uses auto commit to perform the database write operation: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null,
+                                    "sampleDatabase",
+                                    dbConfig);
+
+    String keyString = "thekey";
+    String dataString = "thedata";
+    DatabaseEntry key = 
+        new DatabaseEntry(keyString.getBytes("UTF-8"));
+    DatabaseEntry data = 
+        new DatabaseEntry(dataString.getBytes("UTF-8"));
+
+    // Perform the write. Because the database was opened to 
+    // support transactions, this write is performed using auto commit.
+    myDatabase.put(null, key, data);
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+
+ + + diff --git a/docs/TransactionGettingStarted/backup.html b/docs/TransactionGettingStarted/backup.html new file mode 100644 index 0000000..5e44c57 --- /dev/null +++ b/docs/TransactionGettingStarted/backup.html @@ -0,0 +1,317 @@ + + + + + + Performing Backups + + + + + + + + + +
+
+
+
+

Performing Backups

+
+
+
+ +

+ This section describes how to backup your JE database(s) such that catastrophic recovery is possible. +

+

+ To backup your database, you can either take a hot backup + or an offline backup. A hot backup is performed while + database write operations are in progress. +

+

+ Do not confuse offline and hot backups with the concept of a + full and incremental backup. Both an offline and a hot backup + are full backups – you back up the entire database. The + only difference between them is how much of the contents of the + in-memory cache are contained in them. On the other hand, an + incremental backup is a backup of just those log files modified + or created since the time of the last backup. Most backup + software is capable of performing both full and incremental + backups for you. +

+
+
+
+
+

Performing a Hot Backup

+
+
+
+

+ To perform a hot backup of your JE databases, copy all log files + (*.jdb files) from your environment directory to + your archival location or backup media. The files must be copied + in alphabetical order (numerical in effect). You do not have to + stop any database operations in order to do this. +

+
+

Note

+

+ If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see the Getting Started with Berkeley DB, Java Edition guide. +

+
+

+ To make this process a bit easier, you may want to make use of the + DbBackup helper class. See + Using the DbBackup Helper Class + for details. +

+
+
+
+
+
+

Performing an Offline Backup

+
+
+
+

+ An offline backup guarantees that you have captured the database in its entirety, including all contents + of your in-memory cache, at the moment that the + backup was taken. To do this, you must make sure that no write operations are in progress and all + database modifications have been written to your log files + on disk. To obtain an offline backup: +

+
+
    +
  1. +

    + Stop writing your databases. +

    +
  2. +
  3. +

    + Make sure all your in-memory changes have been flushed to disk. How you do this depends on the + type of transactions that you are using: +

    +
    +
      +
    • +

      + If you are using transactions that writes all dirty data to disk on commit (this is the default + behavior), you simply need to make sure all on-going transactions are committed or + aborted. +

      +
    • +
    • +

      + If you are using transactions that do not synchronously write on + commit, you must run a checkpoint. Remember that closing your + environment causes a checkpoint to be run, so if your application is + shutting down completely before taking the backup, you have met this + requirement. +

      +

      + For information on changing the transactional sync behavior, see + Non-Durable Transactions. + For information on running a checkpoint, see + Checkpoints. +

      +
    • +
    +
    +
  4. +
  5. +

    + If you are using durable transactions, then optionally + run a checkpoint. Doing this can shorten the time + required to restore your database from this back up. +

    +
  6. +
  7. +

    + Copy all log files (*.jdb) from your environment + directory to your archival location or backup media. To make this process a + bit easier, you may want to make use of the DbBackup + helper class. See the next section for details. +

    +
    +

    Note

    +

    + If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see the Getting Started with Berkeley DB, Java Edition guide. +

    +
    +
  8. +
+
+

+ You can now resume normal database operations. +

+
+
+
+
+
+

Using the DbBackup Helper Class

+
+
+
+

+ In order to simplify backup operations, JE + provides the DbBackup helper + class. This class stops and restarts JE background activity + in an open environment. It also lets the application create a + backup which can support restoring the environment to + a specific point in time. +

+

+ Because you do not have to stop JE write activity + in order to take a backup, it is usually necessary to + examine your log files twice before you decide that + your backup is complete. This is because JE may + create a new log file while you are running your + backup. A second pass over your log files allows you to + ensure that no new files have been created and so you + can declare your backup complete. +

+

+ For example: +

+
 time    files in                    activity
+         environment
+
+  t0     000000001.jdb     Backup starts copying file 1
+         000000003.jdb
+         000000004.jdb
+
+  t1     000000001.jdb     JE log cleaner migrates portion of file 3 to
+         000000004.jdb     newly created file 5 and deletes file 3. 
+         000000005.jdb     Backup finishes file 1, starts copying file 4.
+                           Backup MUST include file 5 for a consistent 
+                           backup!
+
+  t2     000000001.jdb     Backup finishes copying file 4, starts and 
+         000000004.jdb     finishes file 5, has caught up. Backup ends.
+         000000005.jdb
+
+

+ DbBackup works around this + problem by defining the set of files that must be + copied for each backup operation, and freezes all + changes to those files. The application can copy that + defined set of files and finish operation without + checking for the ongoing creation of new files. Also, + there will be no need to check for a newer version of + the last file on the next backup. +

+

+ In the example above, if DbBackup was used at t0, + the application would only have to copy files 1, 3 and + 4 to back up. On a subsequent backup, the application + could start its copying at file 5. There would be no + need to check for a newer version of file 4. +

+

+ The following code fragment illustrates this class' usage. + See the DbBackup javadoc for additional + examples and more information on incremental backups. +

+
package je.gettingStarted;
+
+...
+import com.sleepycat.je.util.DbBackup;
+...
+
+    // Find the file number of the last file in the previous backup
+    // persistently, by either checking the backup archive, or saving
+    // state in a persistent file.
+    long lastFileCopiedInPrevBackup =  ...
+
+    Environment env = new Environment(...);
+    DbBackup backupHelper = new DbBackup(env, lastFileCopiedInPrevBackup);
+
+    // Start backup, find out what needs to be copied.
+    // If multiple environment subdirectories are in use,
+    // the getLogFilesInBackupSet returns the log file
+    // name prefixed with the dataNNN/ directory in which
+    // it resides.
+    backupHelper.startBackup();
+    try {
+        String[] filesForBackup = backupHelper.getLogFilesInBackupSet();
+
+        // Copy the files to archival storage.
+        myApplicationCopyMethod(filesForBackup)
+        // Update our knowlege of the last file saved in the backup set,
+        // so we can copy less on the next backup
+        lastFileCopiedInPrevBackup = backupHelper.getLastFileInBackupSet();
+        myApplicationSaveLastFile(lastFileCopiedInBackupSet);
+    }
+    finally {
+        // Remember to exit backup mode, or all log files won't be cleaned
+        // and disk usage will bloat.
+       backupHelper.endBackup();
+   } 
+
+
+ + + diff --git a/docs/TransactionGettingStarted/blocking_deadlocks.html b/docs/TransactionGettingStarted/blocking_deadlocks.html new file mode 100644 index 0000000..3a59493 --- /dev/null +++ b/docs/TransactionGettingStarted/blocking_deadlocks.html @@ -0,0 +1,610 @@ + + + + + + Locks, Blocks, and Deadlocks + + + + + + + + + +
+
+
+
+

Locks, Blocks, and Deadlocks

+
+
+
+
+
+
+ + Locks + +
+
+ + Blocks + +
+
+ + Deadlocks + +
+
+
+

+ It is important to understand how locking works in a + concurrent application before continuing with a description of + the concurrency mechanisms JE makes available to you. + Blocking and deadlocking have important performance implications + for your application. Consequently, this section provides a + fundamental description of these concepts, and how they affect + JE operations. +

+
+
+
+
+

Locks

+
+
+
+

+ When one thread of control wants to obtain access to an + object, it requests a lock for that + object. This lock is what allows JE to provide your + application with its transactional isolation guarantees by + ensuring that: +

+
+
    +
  • +

    + no other thread of control can read that object (in + the case of an exclusive lock), and +

    +
  • +
  • +

    + no other thread of control can modify that object + (in the case of an exclusive or non-exclusive lock). +

    +
  • +
+
+
+
+
+
+

Lock Resources

+
+
+
+

+ When locking occurs, there are conceptually three resources + in use: +

+
+
    +
  1. +

    + The locker. +

    +

    + This is the thing that holds the lock. In a + transactional application, the locker is a + transaction handle. + + + For non-transactional operations, the locker is the current thread. + +

    +
  2. +
  3. +

    + The lock. +

    +

    + This is the actual data structure that locks + the object. In JE, a locked + object structure in the lock manager + is representative of the object that + is locked. +

    +
  4. +
  5. +

    + The locked object. +

    +

    + The thing that your application + actually wants to lock. + In a JE + application, the locked object is usually a + + + database record. + +

    +
  6. +
+
+

+ JE has not set a limit for the maximum number of + these resources you can use. Instead, you are only + limited by the amount of memory available to your + application. +

+

+ The following figure shows a transaction handle, + Txn A, that is holding a lock on + database + + record + 002. In this graphic, Txn + A is the locker, and the locked object is + + record + 002. Only a single lock is in use + in this operation. +

+
+ +
+
+
+
+
+
+

Types of Locks

+
+
+
+

+ JE applications support both exclusive and + non-exclusive locks. Exclusive + locks are granted when a + locker wants to write to an object. For this reason, + exclusive locks are also sometimes called + write locks. +

+

+ An exclusive lock prevents any other locker from + obtaining any sort of a lock on the object. This + provides isolation by ensuring that no other locker can + observe or modify an exclusively locked object until the locker is done + writing to that object. +

+

+ Non-exclusive locks are granted + for read-only access. For this reason, non-exclusive + locks are also sometimes called read + locks. Since multiple lockers can + simultaneously hold read locks on the same + object, read locks are also + sometimes called shared locks. +

+

+ A non-exclusive lock prevents any other locker from + modifying the locked object while the locker is still + reading the object. This is how transactional cursors are able to + achieve repeatable reads; by default, the + cursor's transaction holds + a read lock on any object that the cursor has examined until + such a time as the transaction is committed + or aborted. + +

+

+ In the following figure, Txn A and + Txn B are both holding read locks on + + record + 002, while Txn C + is holding a write lock on + + record + 003: +

+
+ +
+
+
+
+
+
+

Lock Lifetime

+
+
+
+

+ A locker holds its locks until such a time as it does + not need the lock any more. What this means is: +

+
+
    +
  1. +

    + A transaction holds any locks that it obtains + until the transaction is committed or aborted. +

    +
  2. +
  3. +

    + All non-transaction operations hold locks + until such a time as the operation is completed. + For cursor operations, the lock is held until the cursor is moved to a new position or + closed. +

    +
  4. +
+
+
+
+
+
+
+
+

Blocks

+
+
+
+

+ Simply put, a thread of control is blocked when it attempts + to obtain a lock, but that attempt is denied because some + other thread of control holds a conflicting lock. + Once blocked, the thread of control is temporarily unable + to make any forward progress until the requested lock is + obtained or the operation requesting the lock is + abandoned. +

+

+ Be aware that when we talk about blocking, strictly + speaking the thread is not what is attempting to obtain the + lock. Rather, some object within the thread (such as a + cursor) is attempting to obtain the + lock. However, once a locker attempts to + obtain a lock, the entire thread of control must pause until the lock + request is in some way resolved. +

+

+ For example, if Txn A holds a write lock (an exclusive + lock) on + + record + 002, then if Txn B tries to obtain a read or write lock on + that + + record, + the thread of control in which Txn + B is running + is blocked: +

+
+ +
+

+ However, if Txn A only holds a read + lock (a shared lock) on + + record + 002, then only those handles that attempt to obtain a + write lock on that + + record + will block. +

+
+ +
+
+
+
+
+

Blocking and Application Performance

+
+
+
+

+ Multi-threaded + + applications typically perform better than simple + single-threaded applications because the + application can perform one part of its workload + (updating + a database record, + + for example) while it is waiting for some other + lengthy operation to complete (performing disk or + network I/O, for example). This performance + improvement is particularly noticeable if you use + hardware that offers multiple CPUs, because the threads + + can run simultaneously. +

+

+ That said, concurrent applications can see reduced + workload throughput if their threads of control are + seeing a large amount of lock contention. That is, + if threads are blocking on lock requests, then that + represents a performance penalty for your + application. +

+

+ Consider once again the previous diagram of a blocked write lock request. + In that diagram, Txn C cannot + obtain its requested write lock because + Txn A and Txn + B are both already holding read locks on + the requested + + record. + In this case, the thread in which + Txn C is running will pause until + such a time as Txn C either + obtains its write lock, or the operation + that is requesting the lock is abandoned. + The fact that Txn + C's thread has temporarily halted all + forward progress represents a performance penalty + for your application. +

+

+ Moreover, any read locks that are requested while + Txn C is waiting for its write + lock will also block until such a time as + Txn C has obtained and + subsequently released its write lock. +

+
+
+
+
+
+

Avoiding Blocks

+
+
+
+

+ Reducing lock contention is an important part of + performance tuning your concurrent JE + application. Applications that have multiple + threads of control obtaining exclusive (write) + locks are prone to contention issues. Moreover, as + you increase the numbers of lockers and as you + increase the time that a lock is held, you increase + the chances of your application seeing lock contention. +

+

+ As you are designing your application, try to do + the following in order to reduce lock contention: +

+
+
    +
  • +

    + Reduce the length of time your application + holds locks. +

    +

    + Shorter lived transactions will result in + shorter lock lifetimes, which will in turn + help to reduce lock contention. +

    +

    + In addition, by default transactional cursors hold read + locks until such a time as the transaction is completed. + For this reason, try to minimize the time you keep + transactional cursors opened, or reduce your isolation + levels – see below. +

    +
  • +
  • +

    + If possible, access heavily accessed (read + or write) items toward the end of the + transaction. This reduces the amount of + time that a heavily used + + + record + + is locked by the transaction. +

    +
  • +
  • +

    + Reduce your application's isolation guarantees. +

    +

    + By reducing your isolation guarantees, you + reduce the situations in which a lock can + block another lock. Try using uncommitted reads + for your read operations in order to + prevent a read lock being blocked by a + write lock. +

    +

    + In addition, for cursors you can use degree + 2 (read committed) isolation, which causes + the cursor to release its read locks as + soon as it is done reading the record (as + opposed to holding its read locks until the + transaction ends). +

    +

    + Be aware that reducing your + isolation guarantees can have + adverse consequences for your + application. Before deciding + to reduce your isolation, take + care to examine your + application's isolation + requirements. + For information on isolation + levels, see + Isolation. +

    +
  • +
  • +

    + Consider your data access patterns. +

    +

    + Depending on the nature of your application, + this may be something that you can not + do anything about. However, if it is + possible to create your threads such that + they operate only on non-overlapping + portions of your database, then you can + reduce lock contention because your + threads will rarely (if ever) block on one another's + locks. +

    +
  • +
+
+
+
+
+
+
+
+

Deadlocks

+
+
+
+

+ A deadlock occurs when two or more threads of control are + blocked, each waiting on a resource held by the other + thread. When this happens, there is no + possibility of the threads ever making forward progress + unless some outside agent takes action to break the + deadlock. +

+

+ For example, if + Txn A is + blocked by Txn B at the same time + Txn B is blocked by Txn + A then the threads of control containing + Txn A and Txn B are + deadlocked; neither thread can make + any forward progress because neither thread will ever release the lock + that is blocking the other thread. +

+
+ +
+

+ When two threads of control deadlock, the only + solution is to have a mechanism external to the two threads + capable of recognizing the deadlock and notifying at least + one thread that it is in a deadlock situation. + Once notified, a thread of + control must abandon the attempted operation in order to + resolve the deadlock. + + + + + JE is capable of notifying your application when it detects a deadlock. (For + JE, this is handled in the same way as any lock + conflict that a JE application might encounter.) See + Managing Deadlocks and other Lock Conflicts + for more information. + +

+

+ Note that when one locker in a thread of control is blocked + waiting on a lock held by another locker in that same + thread of the control, the thread is said to be + self-deadlocked. +

+

+ Note that in JE, a self-deadlock can occur only if + two or more transactions (lockers) are used in the same + thread. A self-deadlock cannot occur for + non-transactional usage, because the thread is the + locker. However, even if you have only one locker per + thread, there is still the possibility of a deadlock + occurring with another thread of control (it just will + not be a self-deadlock), so you still must write code + that defends against deadlocks. +

+
+
+
+
+

Deadlock Avoidance

+
+
+
+

+ The things that you do to avoid lock contention also + help to reduce deadlocks (see Avoiding Blocks). + + + + + Beyond that, you should also make sure all threads access data in the same order as all other + threads. So long as threads lock records in the same basic order, there is no possibility of a + deadlock (threads can still block, however). + +

+

+ Be aware that if you are using secondary + databases (indexes), then locking order is + different for reading and writing. For this + reason, if you are writing a concurrent + application and you are using secondary + databases, you should expect deadlocks. +

+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/chkpoint.html b/docs/TransactionGettingStarted/chkpoint.html new file mode 100644 index 0000000..f0477e4 --- /dev/null +++ b/docs/TransactionGettingStarted/chkpoint.html @@ -0,0 +1,88 @@ + + + + + + Checkpoints + + + + + + + + + +
+
+
+
+

Checkpoints

+
+
+
+

+ Running normal recovery can become expensive if over time all that is ever + written to disk is BTree leaf nodes. So in order to limit the time required for normal recovery, JE runs + checkpoints. Checkpoints write to your log files all the internal BTree nodes and structures modified as a part of + write operations. This means that your log files contain a complete BTree up to + the moment in time when the checkpoint was run. The result is that normal recovery only needs to recreate the + portion of the BTree that has been modified since the time of the last checkpoint. +

+

+ Checkpoints typically write more information to disk than do transaction commits, and so they are more + expensive from a disk I/O perspective. You will therefore need to consider how frequently to run checkpoints + as a part of your performance tuning activities. When you do this, balance the cost of the checkpoints + against the time it will take your application to restart due to the cost of running normal recovery. +

+

+ Checkpoints are normally performed by the checkpointer background thread, which is always running. Like + all background threads, it is managed using the je.properties file. Currently, the + only checkpointer property that you may want to manage is + je.checkpointer.bytesInterval. This property identifies how much JE's log files + can grow before a checkpoint is run. Its value is specified in bytes. Decreasing this value causes the + checkpointer thread to run checkpoints more frequently. This will improve the time that it takes to run + recovery, but it also increases the system resources (notably, I/O) required by JE. +

+

+ Note that checkpoints are also always performed when the environment is closed normally. Therefore, + normal recovery only has work to do if the application crashes or otherwise ends abnormally without + calling Environment.close(). +

+
+ + + diff --git a/docs/TransactionGettingStarted/deadlock.jpg b/docs/TransactionGettingStarted/deadlock.jpg new file mode 100644 index 0000000..0995a84 Binary files /dev/null and b/docs/TransactionGettingStarted/deadlock.jpg differ diff --git a/docs/TransactionGettingStarted/enabletxn.html b/docs/TransactionGettingStarted/enabletxn.html new file mode 100644 index 0000000..f7f5f15 --- /dev/null +++ b/docs/TransactionGettingStarted/enabletxn.html @@ -0,0 +1,260 @@ + + + + + + Chapter 2. Enabling Transactions + + + + + + + + + +
+
+
+
+

Chapter 2. Enabling Transactions

+
+
+
+ +

+ In order to use transactions with your application, you must turn them + on. To do this you must: +

+
+
    +
  • +

    + Turn on transactions for your environment. + + + You do this by using the + EnvironmentConfig.setTransactional() + method, or by using the + je.env.isTransactional + je.properties parameter. + + + + + +

    +
  • +
  • +

    + If you are using the DPL, transaction-enable your stores. + You do this by using the + StoreConfig.setTransactional() method. +

    +
  • +
  • +

    + + Transaction-enable your databases. + + + If you are using the base API, transaction-enable your databases. + + You do this by + + using the + DatabaseConfig.setTransactional() + method, and then opening the database from within a transaction. + + + + + + + Note that the common practice is for auto commit to be used to + transaction-protect the database open. To use auto-commit, you + must still enable transactions as described here, but you do + not have to explicitly use a transaction when you open your + database. An example of this is given in the next section. + +

    +
  • +
+
+
+
+
+
+

Opening a Transactional Environment and + + Store or Database + +

+
+
+
+

+ To enable transactions for your environment, you must initialize the + transactional subsystem. For example, do this with the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+import java.io.File;
+
+...
+
+Environment myEnv = null;
+EntityStore myStore = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    StoreConfig storeConfig = new StoreConfig();
+
+    myEnvConfig.setTransactional(true);
+    storeConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+    myStore = new EntityStore(myEnv, "EntityStore", storeConfig);
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+}
+

+ And when using the base API: +

+
package je.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+}
+

+ You then create and open your database(s) as you would for a non-transactional system. + + + + The only difference is that you must set + DatabaseConfig.setTransactional() + to true. Note that your database open must be + transactional-protected. However, if you do not give the + openDatabase() method a transaction handle, + then the open is automatically protected using auto commit. + Typically auto commit is used for this purpose. + For example: + + +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null,
+                                    "sampleDatabase",
+                                    dbConfig);
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+}
+
+
+
+

Note

+

+ Never close a database or + store that has active transactions. Make sure + all transactions are resolved (either committed or aborted) + before closing the database. +

+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/gettingStarted.css b/docs/TransactionGettingStarted/gettingStarted.css new file mode 100644 index 0000000..6a2b24b --- /dev/null +++ b/docs/TransactionGettingStarted/gettingStarted.css @@ -0,0 +1,50 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 10pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 10pt; } + +div.navfooter { font-size: 10pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 10pt; } + +span.emphasis { font-style: italic;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + +div.variablelist dl dt {margin-top: 1em; } + +div.libver p { + font-size: 8pt; + width: 30%; + margin-left: 2px; + margin-right: 2px; + padding-top: 3px; + padding-bottom: 3px; + } diff --git a/docs/TransactionGettingStarted/index.html b/docs/TransactionGettingStarted/index.html new file mode 100644 index 0000000..9b737af --- /dev/null +++ b/docs/TransactionGettingStarted/index.html @@ -0,0 +1,523 @@ + + + + + + Getting Started with Berkeley DB, Java Edition Transaction Processing + + + + + + + +
+
+
+
+

Getting Started with Berkeley DB, Java Edition Transaction Processing

+
+
+
+ +

+ Legal Notice +

+

+ Copyright © 2002 - 2017 Oracle and/or its affiliates. All rights + reserved. +

+

+ This software and related documentation are provided under a + license agreement containing restrictions on use and disclosure + and are protected by intellectual property laws. Except as + expressly permitted in your license agreement or allowed by + law, you may not use, copy, reproduce, translate, broadcast, + modify, license, transmit, distribute, exhibit, perform, + publish, or display any part, in any form, or by any means. + Reverse engineering, disassembly, or decompilation of this + software, unless required by law for interoperability, is + prohibited. +

+

+ The information contained herein is subject to change without + notice and is not warranted to be error-free. If you find any + errors, please report them to us in writing. +

+

+ Berkeley DB, + + Berkeley DB Java Edition + and + Sleepycat are trademarks or registered trademarks of + Oracle. All rights to these marks are reserved. + No third-party use is permitted without the + express prior written consent of Oracle. +

+

+ Other names may be trademarks of their respective owners. +

+

+ If this is software or related documentation that is delivered + to the U.S. Government or anyone licensing it on behalf of the + U.S. Government, the following notice is applicable: +

+

+ U.S. GOVERNMENT END USERS: Oracle programs, including any + operating system, integrated software, any programs installed + on the hardware, and/or documentation, delivered to U.S. + Government end users are "commercial computer software" + pursuant to the applicable Federal Acquisition Regulation and + agency-specific supplemental regulations. As such, use, + duplication, disclosure, modification, and adaptation of the + programs, including any operating system, integrated software, + any programs installed on the hardware, and/or documentation, + shall be subject to license terms and license restrictions + applicable to the programs. No other rights are granted to the + U.S. Government. +

+

+ This software or hardware is developed for general use in a + variety of information management applications. It is not + developed or intended for use in any inherently dangerous + applications, including applications that may create a risk of + personal injury. If you use this software or hardware in + dangerous applications, then you shall be responsible to take + all appropriate fail-safe, backup, redundancy, and other + measures to ensure its safe use. Oracle Corporation and its + affiliates disclaim any liability for any damages caused by use + of this software or hardware in dangerous applications. +

+

+ Oracle and Java are registered trademarks of Oracle and/or its + affiliates. Other names may be trademarks of their respective + owners. +

+

+ Intel and Intel Xeon are trademarks or registered trademarks of + Intel Corporation. All SPARC trademarks are used under license + and are trademarks or registered trademarks of SPARC + International, Inc. AMD, Opteron, the AMD logo, and the AMD + Opteron logo are trademarks or registered trademarks of + Advanced Micro Devices. UNIX is a registered trademark of The + Open Group. +

+

+ This software or hardware and documentation may provide access + to or information on content, products, and services from third + parties. Oracle Corporation and its affiliates are not + responsible for and expressly disclaim all warranties of any + kind with respect to third-party content, products, and + services. Oracle Corporation and its affiliates will not be + responsible for any loss, costs, or damages incurred due to + your access to or use of third-party content, products, or + services. +

+
+
+
+

31-Oct-2017

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+
+ + 1. Introduction + +
+
+
+
+ + Transaction Benefits + +
+
+ + A Note on System Failure + +
+
+ + Application Requirements + +
+
+ + Multi-threaded + + Applications + +
+
+ + Recoverability + +
+
+ + Performance Tuning + +
+
+
+
+ + 2. Enabling Transactions + +
+
+
+
+ + Opening a Transactional Environment and + + Store or Database + + + +
+
+
+
+ + 3. Transaction Basics + +
+
+
+
+ + Committing a Transaction + +
+
+ + Non-Durable Transactions + +
+
+ + Aborting a Transaction + +
+
+ + Auto Commit + +
+
+ + Transactional Cursors + +
+
+
+
+ + Using Transactional DPL Cursors + +
+
+
+
+ + Secondary Indices with Transaction Applications + +
+
+ + Configuring the Transaction Subsystem + +
+
+
+
+ + 4. Concurrency + +
+
+
+
+ + Which JE Handles are Free-Threaded + +
+
+ + Locks, Blocks, and Deadlocks + +
+
+
+
+ + Locks + +
+
+ + Blocks + +
+
+ + Deadlocks + +
+
+
+
+ + JE Lock Management + +
+
+
+
+ + Managing JE Lock Timeouts + +
+
+ + Managing Deadlocks and other Lock Conflicts + +
+
+
+
+ + Isolation + +
+
+
+
+ + Supported Degrees of Isolation + +
+
+ + Reading Uncommitted Data + +
+
+ + Committed Reads + +
+
+ + Configuring Serializable Isolation + +
+
+
+
+ + Transactional Cursors and Concurrent Applications + +
+
+
+
+ + Using Cursors with Uncommitted Data + +
+
+
+
+ + Read/Modify/Write + +
+
+
+
+ + 5. Backing up and Restoring Berkeley DB, Java Edition Applications + +
+
+
+
+ + Normal Recovery + +
+
+ + Checkpoints + +
+
+ + Performing Backups + +
+
+
+
+ + Performing a Hot Backup + +
+
+ + Performing an Offline Backup + +
+
+ + Using the DbBackup Helper Class + +
+
+
+
+ + Performing Catastrophic Recovery + +
+
+ + Hot Failover + +
+
+
+
+ + 6. Summary and Examples + +
+
+
+
+ + Anatomy of a Transactional Application + +
+
+ + Base API Transaction Example + +
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadData.java + +
+
+ + DBWriter.java + +
+
+
+
+ + DPL Transaction Example + +
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadDataEntity.java + +
+
+ + StoreWriter.java + +
+
+
+
+
+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/introduction.html b/docs/TransactionGettingStarted/introduction.html new file mode 100644 index 0000000..da79703 --- /dev/null +++ b/docs/TransactionGettingStarted/introduction.html @@ -0,0 +1,215 @@ + + + + + + Chapter 1. Introduction + + + + + + + + + +
+
+
+
+

Chapter 1. Introduction

+
+
+
+
+

+ Table of Contents +

+
+
+ + Transaction Benefits + +
+
+ + A Note on System Failure + +
+
+ + Application Requirements + +
+
+ + Multi-threaded + + Applications + +
+
+ + Recoverability + +
+
+ + Performance Tuning + +
+
+
+

+ This book provides a thorough introduction and discussion on transactions as + used with Berkeley DB, Java Edition (JE). + + + Both the base API as well as the Direct Persistence Layer API is used in this + manual. + + + It begins by offering a general overview to + transactions, the guarantees they provide, and the general application + infrastructure required to obtain full transactional protection for your + data. +

+

+ This book also provides detailed examples on how to write a + transactional application. Both single threaded and multi-threaded are discussed. A detailed description of various + backup and recovery strategies is included in this manual, as is a + discussion on performance considerations for your transactional application. +

+

+ You should understand the concepts from the + + + + Getting Started with Berkeley DB, Java Edition + + guide before reading this book. +

+
+
+
+
+

Transaction Benefits

+
+
+
+

+ Transactions offer your application's data protection from + application or system failures. That is, JE transactions offer + your application full ACID support: +

+
+
    +
  • +

    + Atomicity +

    +

    + Multiple database operations are treated as a single unit of + work. Once committed, all write operations performed under + the protection of the transaction are saved to your databases. + Further, in the event that you abort a transaction, all write + operations performed during the transaction are discarded. + In this event, your database is left in the state it was in + before the transaction began, regardless of the number or + type of write operations you may have performed during the + course of the transaction. +

    +

    + Note that JE transactions can span one or more + database handles. +

    +
  • +
  • +

    + Consistency +

    +

    + Your databases will never see a partially completed + transaction. This is true even if your application fails while there are + in-progress transactions. If the application or system fails, + then either all of the database changes appear when the + application next runs, or none of them appear. +

    +

    + In other words, whatever consistency requirements your application has will never be violated by JE. + If, for example, your application requires every record to include an employee ID, and your + code faithfully adds that ID to its database records, then JE will never + violate that consistency requirement. The ID will remain in the database records until such a time as your + application chooses to delete it. +

    +
  • +
  • +

    + Isolation +

    +

    + While a transaction is in progress, your databases will appear + to the transaction as if there are no other operations + occurring outside of the transaction. That is, operations + wrapped inside a transaction will always have a clean and + consistent view of your databases. They never have to see + updates currently in progress under the protection of another transaction. + Note, however, that isolation guarantees can be + + increased and + + relaxed from the default setting. See + Isolation + for more information. +

    +
  • +
  • +

    + Durability +

    +

    + Once committed to your databases, your modifications will + persist even in the event of an application or system failure. + Note that like isolation, your durability guarantee can be + relaxed. See Non-Durable Transactions + for more information. +

    +
  • +
+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/isolation.html b/docs/TransactionGettingStarted/isolation.html new file mode 100644 index 0000000..131d3e0 --- /dev/null +++ b/docs/TransactionGettingStarted/isolation.html @@ -0,0 +1,759 @@ + + + + + + Isolation + + + + + + + + + +
+
+
+
+

Isolation

+
+
+
+ +

+ Isolation guarantees are an important aspect of transactional + protection. Transactions + ensure the data your transaction is working with will not be changed by some other transaction. + Moreover, the modifications made by a transaction will never be viewable outside of that transaction until + the changes have been committed. +

+

+ That said, there are different degrees of isolation, and you can choose to relax your isolation + guarantees to one degree or another depending on your application's requirements. The primary reason why + you might want to do this is because of performance; the more isolation you ask your transactions to + provide, the more locking that your application must do. With more locking comes a greater chance of + blocking, which in turn causes your threads to pause while waiting for a lock. Therefore, by relaxing + your isolation guarantees, you can potentially improve your application's throughput. + Whether you actually see any improvement depends, of course, on + the nature of your application's data and transactions. +

+
+
+
+
+

Supported Degrees of Isolation

+
+
+
+

+ JE supports the following levels of isolation: +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DegreeANSI TermDefinition
1READ UNCOMMITTED + Uncommitted reads means that one transaction will never + overwrite another transaction's dirty data. Dirty data is + data that a transaction has modified but not yet committed + to the underlying data store. However, uncommitted reads allows a + transaction to see data dirtied by another + transaction. In addition, a transaction may read data + dirtied by another transaction, but which subsequently + is aborted by that other transaction. In this latter + case, the reading transaction may be reading data that + never really existed in the database. +
2READ COMMITTED +

+ Committed read isolation means that degree 1 is observed, except that dirty data is never read. +

+

+ In addition, this isolation level guarantees that data will never change so long as + it is addressed by the cursor, but the data may change before the reading cursor is closed. + In the case of a transaction, data at the current + cursor position will not change, but once the cursor + moves, the previous referenced data can change. This + means that readers release read locks before the cursor + is closed, and therefore, before the transaction + completes. Note that this level of isolation causes the + cursor to operate in exactly the same way as it does in + the absence of a transaction. +

+
(undefined)REPEATABLE READ +

+ Committed read is observed, plus the data read by a transaction, T, will never be dirtied by another + transaction before T completes. This means that both read and write locks are not + released until the transaction completes. +

+

+ This is JE's default isolation level. +

+
3SERIALIZABLE + +

+ + + + Committed read is observed, plus + + + no transactions will see phantoms. Phantoms are records + returned as a result of a search, but which were not seen by + the same transaction when the identical + search criteria was previously used. +

+ +
+
+

+ + By default, JE transactions and transactional cursors offer + + + repeatable read isolation. + + + You can optionally reduce your isolation level by configuring JE to use + uncommitted read isolation. See + Reading Uncommitted Data + for more information. + + You can also configure JE to use committed read isolation. See + Committed Reads + for more information. + + Finally, you can configure your transactions and transactional cursors to use + serializable isolation. See Configuring Serializable Isolation + for more information. + +

+
+
+
+
+
+

Reading Uncommitted Data

+
+
+
+

+ Berkeley DB allows you to configure your application to read data that has been modified but not yet + committed by another transaction; that is, dirty data. When you do this, you + may see a performance benefit by allowing your + application to not have to block waiting for write locks. On the other hand, the data that your + application is reading may change before the transaction has completed. +

+

+ When used with transactions, uncommitted reads means that one transaction can see data + modified but not yet committed by another transaction. When + used with transactional cursors, uncommitted reads means + that any database reader can see data modified by the + cursor before the cursor's transaction has committed. +

+

+ Because of this, uncommitted reads allow a transaction to read data + that may subsequently be aborted by another transaction. In + this case, the reading transaction will have read data that + never really existed in the database. +

+

+ To configure your application to read uncommitted data, specify that you want to use + uncommitted reads when you create a transaction or open the cursor. + To do this, you use the setReadUncommitted() + method on the relevant configuration object + (TransactionConfig or CursorConfig). +

+

+ For example: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null, "sampleDatabase", dbConfig);
+
+    TransactionConfig txnConfig = new TransactionConfig();
+    txnConfig.setReadUncommitted(true);          // Use uncommitted reads 
+                                                 // for this transaction.
+    Transaction txn = myEnv.beginTransaction(null, txnConfig);
+
+    // From here, you perform your database reads and writes as normal,
+    // committing and aborting the transactions as is necessary, and
+    // testing for deadlock exceptions as normal (omitted for brevity). 
+        
+    ...
+

+ If you are using the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+import java.io.File;
+
+...
+
+ myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the store.
+    StoreConfig myStoreConfig = new StoreConfig();
+    myStoreConfig.setAllowCreate(true);
+    myStoreConfig.setTransactional(true);
+
+    myStore = new EntityStore(myEnv, "store_name", myStoreConfig);
+
+    TransactionConfig txnConfig = new TransactionConfig();
+    txnConfig.setReadUncommitted(true);          // Use uncommitted reads 
+                                                 // for this transaction.
+    Transaction txn = myEnv.beginTransaction(null, txnConfig);
+
+    // From here, you perform your store reads and writes as normal,
+    // committing and aborting the transactions as is necessary, and
+    // testing for deadlock exceptions as normal (omitted for brevity). 
+        
+    ...
+

+ You can also configure uncommitted read isolation on a read-by-read basis + by specifying LockMode.READ_UNCOMMITTED: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+
+...
+
+Database myDb = null;
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Environment and database open omitted
+
+    ...
+
+    txn = myEnv.beginTransaction(null, null);
+
+    DatabaseEntry theKey =
+        new DatabaseEntry((new String("theKey")).getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    myDb.get(txn, theKey, theData, LockMode.READ_UNCOMMITTED);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+

+ Using the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+
+import com.sleepycat.persist.PrimaryIndex;
+
+...
+
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Environment and store open omitted
+
+    ...
+
+    txn = myEnv.beginTransaction(null, null);
+
+    AnEntityClass aec = aPrimaryIndex.get(txn, "pKeya", 
+                            LockMode.READ_UNCOMMITTED);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Committed Reads

+
+
+
+

+ You can configure your transaction so that the data being + read by a transactional cursor is consistent so long as it + is being addressed by the cursor. However, once the cursor is done reading the + + record or object, + + + + the cursor releases its lock on that + + record or object. + + + + This means that the data the cursor has read and released + may change before the cursor's transaction has completed. +

+

+ For example, + suppose you have two transactions, Ta and Tb. Suppose further that + Ta has a cursor that reads record R, but does not modify it. Normally, + Tb would then be unable to write record R because + Ta would be holding a read lock on it. But when you configure your transaction for + committed reads, Tb can modify record + R before Ta completes, so long as the reading cursor is no longer + addressing the + + record or object. + + + +

+

+ When you configure your application for this level of isolation, you may see better performance + throughput because there are fewer read locks being held by your transactions. + Read committed isolation is most useful when you have a cursor that is reading and/or writing records in + a single direction, and that does not ever have to go back to re-read those same records. In this case, + you can allow JE to release read locks as it goes, rather than hold them for the life of the + transaction. +

+

+ To configure your application to use committed reads, do one of the following: +

+
+
    +
  • +

    + Create your transaction such that it allows committed reads. You do this by + + + specifying true to + TransactionConfig.setReadCommitted(). + +

    +
  • +
  • +

    + + + Specify true to + CursorConfig.setReadCommitted(). + +

    +
  • +
+
+

+ For example, the following creates a transaction that allows committed reads: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null, "sampleDatabase", dbConfig);
+
+    // Open the transaction and enable committed reads. All cursors open
+    // with this transaction handle will use read committed isolation.
+    TransactionConfig txnConfig = new TransactionConfig();
+    txnConfig.setReadCommitted(true);          // Use committed reads 
+                                               // for this transaction.
+    Transaction txn = myEnv.beginTransaction(null, txnConfig);
+
+    // From here, you perform your database reads and writes as normal,
+    // committing and aborting the transactions as is necessary, and
+    // testing for deadlock exceptions as normal (omitted for brevity). 
+        
+    ...
+

+ Using the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+import java.io.File;
+
+...
+
+EntityStore myStore = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+
+    // Instantiate the store.
+    StoreConfig myStoreConfig = new StoreConfig();
+    myStoreConfig.setAllowCreate(true);
+    myStoreConfig.setTransactional(true);
+
+    // Open the transaction and enable committed reads. All cursors open
+    // with this transaction handle will use read committed isolation.
+    TransactionConfig txnConfig = new TransactionConfig();
+    txnConfig.setReadCommitted(true);          // Use committed reads 
+                                               // for this transaction.
+    Transaction txn = myEnv.beginTransaction(null, txnConfig);
+
+    // From here, you perform your store reads and writes as normal,
+    // committing and aborting the transactions as is necessary, and
+    // testing for deadlock exceptions as normal (omitted for brevity). 
+        
+    ...
+

+ You can also configure read committed isolation on a read-by-read basis + by specifying LockMode.READ_COMMITTED: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+
+...
+
+Database myDb = null;
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Environment and database open omitted
+
+    ...
+
+    txn = myEnv.beginTransaction(null, null);
+
+    DatabaseEntry theKey =
+        new DatabaseEntry((new String("theKey")).getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    myDb.get(txn, theKey, theData, LockMode.READ_COMMITTED);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+

+ Using the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+
+import com.sleepycat.persist.PrimaryIndex;
+
+...
+
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Environment and store open omitted
+
+    ...
+
+    txn = myEnv.beginTransaction(null, null);
+
+    // Primary index creation omitted
+    ...
+
+    AnEntityClass aec = aPrimaryIndex.get(txn, "pKeya", 
+                            LockMode.READ_COMMITTED);
+} catch (Exception e) {
+    // Exception handling goes here
+} 
+
+
+
+
+
+

Configuring Serializable Isolation

+
+
+
+

+ You can configure JE to use serializable isolation. + Serializable isolation prevents transactions from seeing + phantoms. Phantoms occur when a transaction obtains + inconsistent results when performing a given query. +

+

+ Suppose a transaction performs a search, S, and as a result of + that search NOTFOUND is returned. If you are using only repeatable read + isolation (the default isolation level), it is possible for the same + transaction to perform S at a later point in time and + return SUCCESS instead of NOTFOUND. This can occur if another thread of + control modified the database in such a way as to cause S to + successfully locate data, where before no data was found. + When this situation occurs, the results + returned by S are said to be a phantom. +

+

+ To prevent phantoms, you can use serializable isolation. Note that this + causes JE to perform additional locking in order to prevent keys + from being inserted until the transaction ends. However, this additional + locking can also result in reduced concurrency for your application, + which means that your database access can be slowed. +

+

+ You configure serializable isolation for all transactions in your + environment by using + EnvironmentConfig.setTxnSerializableIsolation(): +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.LockMode;
+
+...
+
+Database myDb = null;
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Open an environment
+    EnvironmentConfig envConfig = new EnvironmentConfig();
+    envConfig.setAllowCreate(true);
+    envConfig.setTransactional(true);
+
+    // Use serializable isolation
+    envConfig.setTxnSerializableIsolation(true);     
+
+    myEnv = new Environment(myHomeDirectory, envConfig);
+
+    // Database open omitted
+
+    ...
+
+    txn = myEnv.beginTransaction(null, null);
+
+    DatabaseEntry theKey = 
+        new DatabaseEntry((new String("theKey")).getBytes("UTF-8"));
+    DatabaseEntry theData = new DatabaseEntry();
+
+    myDb.get(txn, theKey, theData, LockMode.DEFAULT); 
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+

+ If you do not configure serializable isolation for all transactions, you + can configure serializable isolation for a specific transaction using + TransactionConfig.setSerializableIsolation(): +

+
package persist.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import com.sleepycat.persist.PrimaryIndex;
+
+...
+
+Database myDb = null;
+Environment myEnv = null;
+Transaction txn = null;
+
+try {
+
+    // Environment and store open omitted
+
+    ...
+
+    TransactionConfig tc = new TransactionConfig();
+    tc.setSerializableIsolation(true); // Use serializable isolation
+    txn = myEnv.beginTransaction(null, tc);
+
+    // Primary index creation omitted
+    ...
+
+    AnEntityClass aec = aPrimaryIndex.get(txn, "pKeya", 
+                            LockMode.DEFAULT);
+} catch (Exception e) {
+    // Exception handling goes here
+}
+
+
+ + + diff --git a/docs/TransactionGettingStarted/jebackuprestore.html b/docs/TransactionGettingStarted/jebackuprestore.html new file mode 100644 index 0000000..ac7220b --- /dev/null +++ b/docs/TransactionGettingStarted/jebackuprestore.html @@ -0,0 +1,171 @@ + + + + + + Chapter 5. Backing up and Restoring Berkeley DB, Java Edition Applications + + + + + + + + + +
+
+
+
+

Chapter 5. Backing up and Restoring Berkeley DB, Java Edition Applications

+
+
+
+
+

+ Table of Contents +

+
+
+ + Normal Recovery + +
+
+ + Checkpoints + +
+
+ + Performing Backups + +
+
+
+
+ + Performing a Hot Backup + +
+
+ + Performing an Offline Backup + +
+
+ + Using the DbBackup Helper Class + +
+
+
+
+ + Performing Catastrophic Recovery + +
+
+ + Hot Failover + +
+
+
+

+ Fundamentally, you backup your databases by copying JE log + files off to a safe storage location. To restore your database from + a backup, you copy those files to an appropriate directory on disk + and restart your JE application. +

+

+ Note that if you are using subdirectories to store your JE log + files, then your backup and restore process must maintain the + relationship between each log file and the subdirectory in which + JE intially placed it. That is, if JE placed log file + number 17 in the subdirectory named data003, + then when you perform a recovery log file number 17 must be placed + inside subdirectory data003. +

+

+ Beyond these simple activities, there are some differing backup strategies that you may want to consider. These + topics are described in this chapter. +

+

+ Before continuing, before you review the information on log files and background threads in the + Getting Started with Berkeley DB, Java Edition guide. Those topics contain important + information that is basic to the following discussion on backups and restores. +

+
+
+
+
+

Normal Recovery

+
+
+
+

+ Remember that internally JE databases are organized in a BTree, and that in order to operate JE + requires the complete BTree be available to it. +

+

+ When database records are created, modified, or deleted, the modifications are represented in the BTree's + leaf nodes. Beyond leaf node changes, database record modifications can also cause changes to other BTree + nodes and structures. +

+

+ Now, if your writes are transaction-protected, then every time a transaction is committed the leaf nodes + (and only the leaf nodes) modified by that transaction are written to the JE log + files on disk. Also, remember that the durability of the write (whether a flush or fsync is + performed) depends on the type of commit that is requested. See Non-Durable Transactions for more information. +

+

+ Normal recovery, then, is the process of recreating the entire BTree from the information available in the + leaf nodes. You do not have to do anything special to cause normal recovery to be run; this occurs every + time a JE environment is opened. +

+

+ You can know if normal recovery must be run by catching EnvironmentFailureException. + This exception indicates that a failure has occurred that impacts the + Environment as a whole. Upon seeing this exception, you should run + Environment.isValid(). If it returns true, + then you can continue operating without any further action. However, if it + returns false, then you must close and reopen all + Environment handles so that normal recovery can be run. +

+
+
+ + + diff --git a/docs/TransactionGettingStarted/jecatastrophicrecovery.html b/docs/TransactionGettingStarted/jecatastrophicrecovery.html new file mode 100644 index 0000000..5339c86 --- /dev/null +++ b/docs/TransactionGettingStarted/jecatastrophicrecovery.html @@ -0,0 +1,114 @@ + + + + + + Performing Catastrophic Recovery + + + + + + + + + +
+
+
+
+

Performing Catastrophic Recovery

+
+
+
+

+ Catastrophic recovery is necessary whenever your environment and/or database have been lost or corrupted + due to a media failure (disk failure, for example). Catastrophic recovery is also required if normal + recovery fails for any reason. +

+

+ In order to perform catastrophic recovery, you must have a full backup of your databases. You will use + this backup to restore your database. See the previous section for + information on running back ups. +

+

+ To perform catastrophic recovery: +

+
+
    +
  1. +

    + Shut down your application. +

    +
  2. +
  3. +

    + Delete the contents of your environment home directory (the one that experienced a catastrophic + failure), if there is anything there. +

    +
  4. +
  5. +

    + Copy your most recent full backup into your environment home directory. + If you are using subdirectories to store your log files, be sure to place + the recovered log files back into the subdirectory from which they were + originally backed up. +

    +
  6. +
  7. +

    + If you are using a backup utility that runs incremental backups of your environment directory, + copy any log files generated since the time of your last full backup. + Be sure to restore all log files in the order that they were written. The order is important because + it is possible the same log file appears in multiple archives, and you want to run recovery + using the most recent version of each log file. +

    +
  8. +
  9. +

    + Open the environment as normal. JE's normal recovery will run, which will bring your database + to a consistent state relative to the changed data found in your log files. +

    +
  10. +
+
+

+ You are now done restoring your database. +

+
+ + + diff --git a/docs/TransactionGettingStarted/jehotfailover.html b/docs/TransactionGettingStarted/jehotfailover.html new file mode 100644 index 0000000..d28efce --- /dev/null +++ b/docs/TransactionGettingStarted/jehotfailover.html @@ -0,0 +1,131 @@ + + + + + + Hot Failover + + + + + + + + + +
+
+
+
+

Hot Failover

+
+
+
+

+ As a final backup/recovery strategy, you can create a hot failover. Note that using hot failovers requires + your application to be able to specify its environment home directory at application startup time. Most + application developers allow the environment home directory to be identified using a command line option or a + configuration or properties file. If your application has its + environment home hard-coded into it, you cannot use hot failovers. +

+

+ You create a hot failover by periodically + backing up your database to an alternative location on disk. Usually this + alternative location is on a separate physical drive from where you normally keep your database, but if + multiple drives are not available then you should at least put the hot failover on a separate disk + partition. +

+

+ You failover by causing your application to reopen its environment using the failover + location. +

+

+ Note that a hot failover should not be used as a substitute for backing up and archiving + your data to a safe location physically remote from your computing environment. Even if + your data is spread across multiple physical disks, a truly serious catastrophe (fires, + malevolent software viruses, faulty disk controllers, and so forth) can still cause you + to lose your data. +

+

+ To create and maintain a hot failover: +

+
+
    +
  1. +

    + Copy all log files (*.jdb) from your environment + directory to the location where you want to keep your failover. Either an + offline or a hot backup can be used for this purpose, but typically a hot + failoveris initially created by taking an offline backup of your database. + This ensures that you have captured the contents of your in-memory cache. +

    +
    +

    Note

    +

    + If you are using subdirectories to store your log files, then you must backup + the subdirectories, making sure to keep log files in the subdirectory in which + JE placed them. For information on using subdirectories to store your log + files, see the Getting Started with Berkeley DB, Java Edition guide. +

    +
    +
  2. +
  3. +

    + Periodically copy to your failover directory any log files that were changed or created since the + time of your last copy. Most backup software is capable of performing this kind of an incremental + backup for you. +

    +

    + Note that the frequency of your incremental copies determines the amount of data + that is at risk due to catastrophic failures. For example, if you perform the + incremental copy once an hour then at most your hot failover is an hour behind + your production database, and so you are risking at most an hours worth of + database changes. +

    +
  4. +
  5. +

    + Remove any *.jdb files from the hot failover directory that have been removed or + renamed to .del files in the primary directory. This is not necessary for + consistency, but will help to reduce disk space consumed by the hot failover. +

    +
  6. +
+
+
+ + + diff --git a/docs/TransactionGettingStarted/jelock.html b/docs/TransactionGettingStarted/jelock.html new file mode 100644 index 0000000..2847b28 --- /dev/null +++ b/docs/TransactionGettingStarted/jelock.html @@ -0,0 +1,260 @@ + + + + + + JE Lock Management + + + + + + + + + +
+
+
+
+

JE Lock Management

+
+
+
+ +

+ To manage locks in JE, you must do two things: +

+
+
    +
  1. +

    + Manage lock timeouts. +

    +
  2. +
  3. +

    + Detect and respond to lock conflicts. Conceptually, + these are deadlocks. But from a coding point of view + there is no difference between what you do if a lock + times out, and what you do if you encounter a deadlock. + In fact, in JE, you cannot tell the difference + based on the exceptions that are thrown. +

    +
  4. +
+
+
+
+
+
+

Managing JE Lock Timeouts

+
+
+
+

+ Like transaction timeouts (see Configuring the Transaction Subsystem), + JE allows you to identify the longest period of time that it is allowed to hold a lock. + This value plays an important part in performing deadlock detection, because the only way JE can + identify a deadlock is if a lock is held past its timeout value. +

+

+ However, unlike transaction timeouts, lock timeouts are on a true timer. Transaction + timeouts are only identified when JE is has a reason to examine its lock table; that is, + when it is attempting to acquire a lock. If no such activity is + occurring in your application, a transaction can exist for a long time past its expiration timeout. + Conversely, lock timeouts are managed by a timer maintained by the JVM. Once this timer has expired, + your application will be notified of the event (see the next section on deadlock detection for more + information). +

+

+ You can set the lock timeout on a transaction by transaction basis, or for the entire environment. To + set it on a transaction basis, use Transaction.setLockTimeout(). + To set it for your entire environment, use EnvironmentConfig.setLockTimeout() + or use the je.lock.timeout parameter in the je.properties file. +

+

+ The value that you specify for the lock timeout is in microseconds. 500000 is used by + default. +

+

+ Note that changing this value can have an affect on your application's performance. If you set it too + low, locks may expire and be considered deadlocked even though the thread is in fact making + forward progress. This will cause your application to abort and retry transactions unnecessarily, which + can ultimately harm application throughput. + If you set it too high, threads may deadlock for too long before your application receives notification + and is able to take corrective action. Again, this can harm application throughput. +

+

+ Note that for applications in which you will have extremely long-lived locks, you + may want to set this value to 0. Doing so disables lock timeouts + entirely. Be aware that disabling lock timeouts can be dangerous because then your + application will never be notified of deadlocks. So, alternatively, you might want + to set this value to a very large timeout (such as ten minutes) if your application + is using extremely long-lived locks. +

+
+
+
+
+
+

Managing Deadlocks and other Lock Conflicts

+
+
+
+

+ A deadlock is the result of a lock conflict that cannot be + resolved by the underlying JE code before the lock + times out. Generically, we consider this situation a + lock conflict because there is no way + to tell if the lock timed out because of a true deadlock, + or if it timed out because a long-running operation simply + held the lock for too long a period of time. +

+

+ When a lock conflict occurs in JE, the thread of control holding + that lock is notified of the event using a + LockConflictException exception. Note + that this exception is actual a common base class for several + exception classes that might be able to give you more of a + hint as to what the actual problem is. However, the + response that you make for any of these exceptions is + probably going to be the same, so the best thing to do is + simply catch and manage LockConflictException. +

+

+ When a LockConflictException is + thrown, the thread must: +

+
+
    +
  1. +

    + Cease all read and write operations. +

    +
  2. +
  3. +

    + Close all open cursors. +

    +
  4. +
  5. +

    + Abort the transaction. +

    +
  6. +
  7. +

    + Optionally retry the operation. If your application + retries operations that are aborted due to a lock + conflict, the new attempt must be made using a new transaction. +

    +
  8. +
+
+
+

Note

+

+ If a thread has encountered a lock conflict, it may not make any + additional database calls using the transaction handle that has + experienced the lock conflict. +

+
+

+ For example: +

+
// retry_count is a counter used to identify how many times
+// we've retried this operation. To avoid the potential for 
+// endless looping, we won't retry more than MAX_DEADLOCK_RETRIES 
+// times.
+
+// txn is a transaction handle.
+// key and data are DatabaseEntry handles. Their usage is not shown here.
+while (retry_count < MAX_DEADLOCK_RETRIES) {
+    try {
+        txn = myEnv.beginTransaction(null, null);
+        myDatabase.put(txn, key, data);
+        txn.commit();
+        return 0;
+    } catch (LockConflictException le) {
+        try {
+            // Abort the transaction and increment the
+            // retry counter
+            if (txn != null) {
+                txn.abort();
+            }
+            retry_count++;
+            if (retry_count >= MAX_DEADLOCK_RETRIES) {
+                System.err.println("Exceeded retry limit. Giving up.");
+                return -1;
+            }
+        } catch (DatabaseException ae) {
+            System.err.println("txn abort failed: " + ae.toString());
+            return -1;    
+        }
+    } catch (DatabaseException e) {
+        // If we catch a generic DatabaseException instead of
+        // a LockConflictException, we simply abort and give
+        // up -- we don't retry the operation.
+        try {
+            // Abort the transaction.
+            if (txn != null) {
+                txn.abort();
+            }
+        } catch (DatabaseException ae) {
+            System.err.println("txn abort failed: " + ae.toString());
+        }
+        return -1;    
+    }
+} 
+
+
+ + + diff --git a/docs/TransactionGettingStarted/maxtxns.html b/docs/TransactionGettingStarted/maxtxns.html new file mode 100644 index 0000000..78a191a --- /dev/null +++ b/docs/TransactionGettingStarted/maxtxns.html @@ -0,0 +1,145 @@ + + + + + + Configuring the Transaction Subsystem + + + + + + + + + +
+
+
+
+

Configuring the Transaction Subsystem

+
+
+
+ + + + + +

+ + + + When you configure your transaction subsystem, you need to consider your transaction timeout + value. + + This value represents the longest period of time a + transaction can be active. Note, however, that + transaction timeouts are checked only when JE + examines its lock tables for blocked locks + (see Locks, Blocks, and Deadlocks + for more information). Therefore, a transaction's timeout can + have expired, but the application will not be notified until JE + has a reason to examine its lock tables. +

+

+ Be aware that some transactions may be + inappropriately timed out before the transaction has a + chance to complete. You should therefore use this + mechanism only if you know your application + might have unacceptably long transactions and + you want to make sure your application will + not stall during their execution. + (This might happen if, for example, your + transaction blocks or requests too much + data.) +

+

+ Note that by default transaction timeouts are set to 0 seconds, which means that they never time + out. +

+

+ To set the maximum timeout value for your transactions, + use the + + + EnvironmentConfig.setTxnTimeout() + method. This method configures the entire + environment; not just the handle used to set the + configuration. Further, this value may + be set at any time during the application's + lifetime. +

+ +

+ This value can also be set using the je.txn.timeout property in your JE + properties file. +

+
+ +
+

+ For example: +

+
package je.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    // Configure a maximum transaction timeout of 1 second.
+    myEnvConfig.setTxnTimeout(1000000);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // From here, you open your databases (or store), proceed with your 
+    // database or store operations, and respond to deadlocks as is 
+    // normal (omitted for brevity).
+
+    ...
+
+ + + diff --git a/docs/TransactionGettingStarted/moreinfo.html b/docs/TransactionGettingStarted/moreinfo.html new file mode 100644 index 0000000..ba3d47a --- /dev/null +++ b/docs/TransactionGettingStarted/moreinfo.html @@ -0,0 +1,166 @@ + + + + + + For More Information + + + + + + + + + +
+
+
+
+

For More Information

+
+
+
+
+
+
+ + Contact Us + +
+
+
+

+ Beyond this manual, you may also find the following sources of + information useful when building a transactional JE + application: +

+ + +

+ To download the latest + + Berkeley DB Java Edition + + documentation along with white papers and other collateral, + visit http://www.oracle.com/technetwork/indexes/documentation/index.html. +

+

+ For the latest version of the Oracle + + Berkeley DB Java Edition + + downloads, visit + http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html. +

+
+
+
+
+
+

Contact Us

+
+
+
+

+ You can post your comments and questions at the Oracle + Technology (OTN) forum for + + + + Oracle Berkeley DB Java Edition at: https://forums.oracle.com/forums/forum.jspa?forumID=273. + +

+

+ For sales or support information, email to: + berkeleydb-info_us@oracle.com + You can subscribe to a low-volume email announcement list for + the Berkeley DB product family by sending email to: + bdb-join@oss.oracle.com +

+
+
+ + + diff --git a/docs/TransactionGettingStarted/multithread-intro.html b/docs/TransactionGettingStarted/multithread-intro.html new file mode 100644 index 0000000..816341f --- /dev/null +++ b/docs/TransactionGettingStarted/multithread-intro.html @@ -0,0 +1,96 @@ + + + + + + Multi-threaded Applications + + + + + + + + + +
+
+
+
+

Multi-threaded + + Applications

+
+
+
+

+ JE is designed to support multi-threaded applications, but their usage means + you must pay careful attention to issues of concurrency. + Transactions help your application's concurrency by providing various levels of + isolation for your threads of control. In addition, JE + provides mechanisms that allow you to detect and respond to + deadlocks. +

+

+ Isolation means that database modifications made by + one transaction will not normally be seen by readers from another + transaction until the first commits its changes. Different threads + use different transaction handles, so + this mechanism is normally used to provide isolation between + database operations performed by different threads. +

+

+ Note that JE supports different isolation levels. For example, + you can configure your application to see uncommitted reads, which means + that one transaction can see data that has been modified but not yet + committed by another transaction. Doing this might mean your + transaction reads data "dirtied" by another transaction, + but which subsequently might change before that + other transaction commits its changes. + On the other hand, lowering your isolation + requirements means that your application can experience + improved throughput due to reduced lock contention. +

+

+ For more information on concurrency, on managing isolation + levels, and on deadlock detection, see Concurrency. +

+
+ + + diff --git a/docs/TransactionGettingStarted/nodurabletxn.html b/docs/TransactionGettingStarted/nodurabletxn.html new file mode 100644 index 0000000..36eff4c --- /dev/null +++ b/docs/TransactionGettingStarted/nodurabletxn.html @@ -0,0 +1,270 @@ + + + + + + Non-Durable Transactions + + + + + + + + + +
+
+
+
+

Non-Durable Transactions

+
+
+
+

+ As previously noted, by default transaction commits are + durable because they cause the modifications performed + under the transaction to be synchronously recorded in + your on-disk log files. However, it is possible to use + non-durable transactions. +

+

+ You may want non-durable transactions for performance + reasons. For example, you might be using transactions + simply for the isolation guarantee. + + + + In this case, you might want to relax the synchronized write to disk that JE normally performs + as part of a transaction commit. Doing so means that your data will still make it to disk; however, + your application will not necessarily have to wait for the disk I/O to complete before it can + perform another database operation. This can greatly improve throughput for some workloads. + +

+

+ To relax the durability guarantee for your transactions, + you use the Durability class to + define the durability policy that you want to use. The + Durability class constructor takes + three arguments, only one of which is interesting for a + standalone transactional application: +

+
+
    +
  • +

    + The synchronization policy for the local machine. +

    +
  • +
  • +

    + The synchronization policy for Replicas. Used only for + JE HA applications. +

    +
  • +
  • +

    + The acknowledgement policy. Again, this is required + only for JE HA applications. +

    +
  • +
+
+

+ We describe JE High Availability Applications in the + Berkeley DB, Java Edition Getting Started with High Availability Applications guide. +

+

+ The synchronization policy that you give the + Durability class constructor can be + one of the following: +

+
+
    +
  • +

    + Durability.SyncPolicy.SYNC +

    +

    + Write and synchronously flush the log to disk upon + transaction commit. This offers the most durable + transaction configuration because the commit + operation will not return until all of the disk I/O + is complete. But, conversely, this offers the worse + possible write performance because disk I/O is an + expensive and time-consuming operation. +

    +

    + This is the default synchronization policy. A + transaction that uses this policy is considered to + be durable. +

    +
  • +
  • +

    + Durability.SyncPolicy.NO_SYNC +

    +

    + This causes JE to not synchronously force any data + to disk upon transaction commit. That is, the + modifications are held entirely inside the JVM and + the modifications are not forced to the file system + for long-term storage. Note, however, that the + data will eventually make it to the filesystem + (assuming no application or OS crashes) as a part + of JE's management of its logging buffers and/or + cache. +

    +

    + This form of a commit provides a weak durability + guarantee because data loss can occur due to an + application, JVM, or OS crash. In fact, this + represents the least durable configuration that you + can provide for your transactions. But it also + offers much better write performance than the other + options. +

    +
  • +
  • +

    + Durability.SyncPolicy.WRITE_NO_SYNC +

    +

    + This causes data to be synchronously written to the + OS's file system buffers upon transaction commit. + The data will eventually be written to disk, but + this occurs when the operating system chooses to + schedule the activity; the transaction commit can + complete successfully before this disk I/O is + performed by the OS. +

    +
  • +
  • +

    + This form of commit protects you against + application and JVM crashes, but not against OS + crashes. This method offers less room for the + possibility of data loss than does NO_SYNC. +

    +
  • +
+
+

+ You can specify your durability policy on an + environment-wide basis by creating a + Durability class and then giving it + to EnvironmentConfig.setDurability(). + You can also override the environment default durability + policy on a transaction-by-transaction basis by providing a + Durability class to the + TransactionConfig object you use to + configure your transaction using the + TransactionConfig.setDurability() + method. +

+

+ For example: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Durability;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    Durability defaultDurability = 
+        new Durability(Durability.SyncPolicy.NO_SYNC, 
+                       null,    // unused by non-HA applications. 
+                       null);   // unused by non-HA applications.  
+
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+    myEnvConfig.setDurability(defaultDurability);
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null,
+                                    "sampleDatabase",
+                                    dbConfig);
+
+    String keyString = "thekey";
+    String dataString = "thedata";
+    DatabaseEntry key = 
+        new DatabaseEntry(keyString.getBytes("UTF-8"));
+    DatabaseEntry data = 
+        new DatabaseEntry(dataString.getBytes("UTF-8"));
+
+    Durability newDurability = 
+        new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, 
+                       null,    // unused by non-HA applications. 
+                       null);   // unused by non-HA applications.
+
+    TransactionConfig tc = new TransactionConfig();
+    tc.setDurability(newDurability);
+    Transaction txn = myEnv.beginTransaction(null, tc);
+        
+    try {
+        myDatabase.put(txn, key, data);
+        txn.commit();
+    } catch (Exception e) {
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+
+ + + diff --git a/docs/TransactionGettingStarted/perftune-intro.html b/docs/TransactionGettingStarted/perftune-intro.html new file mode 100644 index 0000000..d9b6f44 --- /dev/null +++ b/docs/TransactionGettingStarted/perftune-intro.html @@ -0,0 +1,79 @@ + + + + + + Performance Tuning + + + + + + + + + +
+
+
+
+

Performance Tuning

+
+
+
+

+ From a performance perspective, the use of transactions is not free. + Depending on how you configure them, transaction commits + usually require your application to perform disk I/O that a non-transactional + application does not perform. Also, for multi-threaded + applications, the use of transactions can + result in increased lock contention due to extra locking + requirements driven by transactional isolation guarantees. +

+

+ There is therefore a performance tuning component to transactional applications + that is not applicable for non-transactional applications (although + some tuning considerations do exist whether or not your application uses + transactions). Where appropriate, these tuning considerations are + introduced in the following chapters. + + + +

+
+ + + diff --git a/docs/TransactionGettingStarted/preface.html b/docs/TransactionGettingStarted/preface.html new file mode 100644 index 0000000..4f00821 --- /dev/null +++ b/docs/TransactionGettingStarted/preface.html @@ -0,0 +1,168 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+

+ This document describes how to use transactions with your Berkeley DB, Java Edition + applications. It is intended to describe how to + transaction protect your application's data. The APIs used to perform this task + are described here, as are the environment infrastructure and administrative tasks + required by a transactional application. This book also + describes multi-threaded JE applications and the requirements they + have for deadlock detection. +

+

+ This book describes Berkeley DB, Java Edition version 12c Release 2 +

+

+ This book is aimed at the software engineer responsible for writing a + transactional JE application. +

+

+ This book assumes that you have already read and understood the + concepts contained in the + + + Getting Started with Berkeley DB, Java Edition guide. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: + + "The Environment.openDatabase() method + returns a Database class object." + + + + +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + JE_HOME directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. + For example: +

+
import com.sleepycat.je.Environment;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnv;
+

+ In some situations, programming examples are updated from one chapter to the next. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnv;
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+myDbEnv = new Environment(new File("/export/dbEnv"), envConfig); 
+
+

Note

+

+ Finally, notes of special interest are represented using a note block such + as this. +

+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/readblock.jpg b/docs/TransactionGettingStarted/readblock.jpg new file mode 100644 index 0000000..16a511f Binary files /dev/null and b/docs/TransactionGettingStarted/readblock.jpg differ diff --git a/docs/TransactionGettingStarted/readmodifywrite.html b/docs/TransactionGettingStarted/readmodifywrite.html new file mode 100644 index 0000000..fb92ff7 --- /dev/null +++ b/docs/TransactionGettingStarted/readmodifywrite.html @@ -0,0 +1,159 @@ + + + + + + Read/Modify/Write + + + + + + + + + +
+
+
+
+

Read/Modify/Write

+
+
+
+

+ If you are retrieving + + + a record from the database or a class from the store + for the purpose of modifying or deleting it, you should declare + a read-modify-write cycle at the time that you read the + record. + + Doing so causes JE to obtain write locks (instead of a read + locks) at the time of the read. This helps to prevent deadlocks by + preventing another transaction from acquiring a read lock on the same + record while the read-modify-write cycle is in progress. +

+

+ Note that declaring a read-modify-write cycle may actually increase the amount of blocking that your + application sees, because readers immediately obtain write locks and write locks cannot be shared. For this + reason, you should use read-modify-write cycles only if you are seeing a large amount of deadlocking + occurring in your application. +

+

+ In order to declare a read/modify/write cycle when you perform a + read operation, + + + + + + + specify + + com.sleepycat.je.LockMode.RMW + to the database, cursor, + PrimaryIndex, or + SecondaryIndex get method. + +

+

+ For example: +

+
// Begin the deadlock retry loop as is normal.
+while (retry_count < MAX_DEADLOCK_RETRIES) {
+    try {
+        txn = myEnv.beginTransaction(null, null);
+
+        ...
+        // key and data are DatabaseEntry objects.
+        // Their usage is omitted for brevity.
+        ...
+
+        // Read the data. Declare the read/modify/write cycle here
+        myDatabase.get(txn, key, data, LockMode.RMW);
+
+
+        // Put the data. Note that you do not have to provide any 
+        // additional flags here due to the read/modify/write 
+        // cycle. Simply put the data and perform your deadlock 
+        // detection as normal.
+        myDatabase.put(txn, key, data);
+        txn.commit();
+        return 0;
+    } catch (DeadlockException de) {
+        // Deadlock detection and exception handling omitted
+        // for brevity
+        ... 
+

+ Or, with the DPL: +

+
// Begin the deadlock retry loop as is normal
+        while (retry_count < MAX_DEADLOCK_RETRIES) {
+    try {
+        txn = myEnv.beginTransaction(null, null);
+
+        ...
+        // 'store' is an EntityStore and 'Inventory' is an entity class
+        // Their usage and implementation is omitted for brevity.
+        ...
+
+        // Read the data, using the PrimaryIndex for the entity object
+        PrimaryIndex<String,Inventory> pi = 
+                store.getPrimaryIndex(String.class, Inventory.class);
+        Inventory iv = pi.get(txn, "somekey", LockMode.RMW);
+
+        // Do something to the retreived object
+
+
+        // Put the object. Note that you do not have to provide any 
+        // additional flags here due to the read/modify/write 
+        // cycle. Simply put the data and perform your deadlock 
+        // detection as normal.
+
+        pi.put(txn, iv);
+        txn.commit();
+        return 0;
+
+    } catch (DeadlockException de) {
+        // Deadlock detection and exception handling omitted
+        // for brevity
+        ... 
+
+ + + diff --git a/docs/TransactionGettingStarted/recovery-intro.html b/docs/TransactionGettingStarted/recovery-intro.html new file mode 100644 index 0000000..f75dc60 --- /dev/null +++ b/docs/TransactionGettingStarted/recovery-intro.html @@ -0,0 +1,98 @@ + + + + + + Recoverability + + + + + + + + + +
+
+
+
+

Recoverability

+
+
+
+

+ An important part of JE's transactional guarantees is durability. + Durability means that once a + transaction has been committed, the database modifications performed + under its protection will not be lost due to system failure. +

+

+ + + JE supports a normal recovery that runs against a subset of + your log files. This is a routine procedure used whenever your + environment is first opened upon application startup, and it is intended to + ensure that your database is in a consistent state. JE also + supports archival backup and recovery in the case of + catastrophic failure, such as the loss of a physical disk + drive. +

+

+ This book describes several different backup procedures + you can use to protect your on-disk data. These procedures + range from simple offline backup strategies to hot failovers. Hot failovers + provide not only a backup mechanism, but + also a way to recover from a fatal hardware failure. +

+

+ This book also describes the recovery procedures you should use + for each of the backup strategies that you might employ. +

+

+ For a detailed description of backup and restore procedures, see + + + the Getting Started with Berkeley DB, Java Edition guide. + + +

+
+ + + diff --git a/docs/TransactionGettingStarted/rwlocks1.jpg b/docs/TransactionGettingStarted/rwlocks1.jpg new file mode 100644 index 0000000..0fc88fd Binary files /dev/null and b/docs/TransactionGettingStarted/rwlocks1.jpg differ diff --git a/docs/TransactionGettingStarted/simplelock.jpg b/docs/TransactionGettingStarted/simplelock.jpg new file mode 100644 index 0000000..8dca4ad Binary files /dev/null and b/docs/TransactionGettingStarted/simplelock.jpg differ diff --git a/docs/TransactionGettingStarted/sysfailure.html b/docs/TransactionGettingStarted/sysfailure.html new file mode 100644 index 0000000..c92976d --- /dev/null +++ b/docs/TransactionGettingStarted/sysfailure.html @@ -0,0 +1,112 @@ + + + + + + A Note on System Failure + + + + + + + + + +
+
+
+
+

A Note on System Failure

+
+
+
+

+ From time to time this manual mentions that transactions + protect your data against 'system or application failure.' This + is true up to a certain extent. However, not all failures are + created equal and no data protection mechanism can protect you + against every conceivable way a computing system can find to + die. +

+

+ Generally, when this book talks about protection against + failures, it means that transactions offer protection against + the likeliest culprits for system and application crashes. So + long as your data modifications have been committed to disk, + those modifications should persist even if your application or + OS subsequently fails. And, even if the application or OS + fails in the middle of a transaction commit (or abort), the + data on disk should be either in a consistent state, or there + should be enough data available to bring your databases into a + consistent state (via a recovery procedure, for example). You + may, however, lose whatever data you were committing at the + time of the failure, but your databases will be otherwise + unaffected. +

+
+

Note

+

+ Be aware that many disks have a disk write cache and on + some systems it is enabled by default. This means that + a transaction can have committed, and to your + application the data may appear to reside on disk, but + the data may in fact reside only in the write cache at + that time. This means that if the disk write cache is + enabled and there is no battery backup for it, data can + be lost after an OS crash even when maximum durability + mode is in use. For maximum durability, disable the + disk write cache or use a disk write cache with a + battery backup. +

+
+

+ Of course, if your disk fails, then the transactional benefits described in this book + are only as good as the backups you have taken. + + + +

+

+ Finally, by following the programming examples shown in this book, you can write your code so as to protect + your data in the event that your code crashes. However, no programming API can protect you against logic + failures in your own code; transactions cannot protect you from simply writing the wrong thing to your + databases. +

+
+ + + diff --git a/docs/TransactionGettingStarted/txn_ccursor.html b/docs/TransactionGettingStarted/txn_ccursor.html new file mode 100644 index 0000000..4e7959a --- /dev/null +++ b/docs/TransactionGettingStarted/txn_ccursor.html @@ -0,0 +1,226 @@ + + + + + + Transactional Cursors and Concurrent Applications + + + + + + + + + +
+
+
+
+

Transactional Cursors and Concurrent Applications

+
+
+
+ +

+ When you use transactional cursors with a concurrent application, remember that + in the event of a deadlock you must make sure that you close your cursor before you abort and retry your + transaction. This is true of both + base API and DPL cursors. +

+

+ Also, remember that when you are using the default isolation level, + every time your cursor reads a record it locks + that record until the encompassing transaction is resolved. This + means that walking your database with a transactional cursor + increases the chance of lock contention. +

+

+ For this reason, if you must routinely walk your database with a + transactional cursor, consider using a reduced isolation level + such as read committed. This is + true of both base API and DPL cursors. +

+
+
+
+
+

Using Cursors with Uncommitted Data

+
+
+
+

+ + As described in Reading Uncommitted Data + above, it is possible to relax your transaction's isolation + level such that it can read data modified but not yet committed + by another transaction. You can configure this when you create + your transaction handle, and when you do so then all cursors opened + inside that transaction will automatically use uncommitted reads. +

+

+ You can also do this when you create a cursor handle from within + a serializable transaction. When you do this, only those + cursors configured for uncommitted reads uses uncommitted reads. +

+

+ The following example shows how to configure an individual cursor handle + to read uncommitted data from within a serializable (full isolation) transaction. + For an example of + configuring a transaction to perform uncommitted reads in + general, see Reading Uncommitted Data. +

+
package je.txn;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null,              // txn handle
+                                    "sampleDatabase",  // db file name
+                                    dbConfig);
+
+    // Open the transaction. Note that this is a repeatable
+    // read transaction.
+    Transaction txn = myEnv.beginTransaction(null, null);
+    Cursor cursor = null;
+    try {
+        // Use the transaction handle here
+        // Get our cursor. Note that we pass the transaction 
+        // handle here. Note also that we cause the cursor 
+        // to perform uncommitted reads.
+        CursorConfig cconfig = new CursorConfig();
+        cconfig.setReadUncommitted(true);
+        cursor = db.openCursor(txn, cconfig);
+
+        // From here, you perform your cursor reads and writes 
+        // as normal, committing and aborting the transactions as 
+        // is necessary, and testing for deadlock exceptions as 
+        // normal (omitted for brevity). 
+        
+        ... 
+

+ If you are using the DPL: +

+
package persist.txn;
+
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+import com.sleepycat.persist.StoreConfig;
+
+import java.util.Iterator;
+
+import java.io.File;
+
+...
+
+EntityStore myStore = null;
+Environment myEnv = null;
+PrimaryIndex<String,AnEntityClass> pKey;
+try {
+
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Set up the entity store
+    StoreConfig myStoreConfig = new StoreConfig();
+    myStoreConfig.setAllowCreate(true);
+    myStoreConfig.setTransactional(true);
+
+    // Instantiate the store
+    myStore = new EntityStore(myEnv, storeName, myStoreConfig);
+
+    // Open the transaction. Note that this is a repeatable
+    // read transaction.
+    Transaction txn = myEnv.beginTransaction(null, null);
+
+    //Configure our cursor for uncommitted reads.
+    CursorConfig cconfig = new CursorConfig();
+    cconfig.setReadUncommitted(true);
+
+    // Get our cursor. Note that we pass the transaction 
+    // handle here. Note also that we cause the cursor 
+    // to perform uncommitted reads.
+    EntityCursor<AnEntityClass> cursor = pKey.entities(txn, cconfig);
+
+    try {
+        // From here, you perform your cursor reads and writes 
+        // as normal, committing and aborting the transactions as 
+        // is necessary, and testing for deadlock exceptions as 
+        // normal (omitted for brevity). 
+        
+        ... 
+
+
+ + + diff --git a/docs/TransactionGettingStarted/txnconcurrency.html b/docs/TransactionGettingStarted/txnconcurrency.html new file mode 100644 index 0000000..ac45a7c --- /dev/null +++ b/docs/TransactionGettingStarted/txnconcurrency.html @@ -0,0 +1,373 @@ + + + + + + Chapter 4. Concurrency + + + + + + + + + +
+
+
+
+

Chapter 4. Concurrency

+
+
+
+ +

+ + JE offers a great deal of support for multi-threaded + + applications even when transactions are not in use. Many of JE's + handles are + thread-safe + and JE provides a + flexible locking subsystem for managing databases in a concurrent + application. Further, JE provides a robust mechanism for + detecting and responding to + + lock conflicts. + All of these concepts are + explored in this chapter. +

+

+ Before continuing, it is useful to define a few terms that will appear + throughout this chapter: +

+
+
    +
  • +

    + Thread of control +

    +

    + Refers to a thread that is performing work in your application. + Typically, in this book that thread will be performing JE + operations. +

    +
  • +
  • +

    + Locking +

    +

    + When a thread of control obtains + access to a shared resource, it is said to be + locking that resource. Note that + JE supports both exclusive and non-exclusive locks. See + Locks for more information. +

    +
  • +
  • +

    + Free-threaded +

    +

    + Data structures and objects are free-threaded if they can be + shared across threads of control without any explicit locking on + the part of the application. Some books, libraries, and + programming languages may use the term + thread-safe for data structures or objects + that have this characteristic. The two terms mean the + same thing. +

    +

    + For a description of free-threaded JE objects, see + Which JE Handles are Free-Threaded. +

    +
  • +
  • +

    + Blocked +

    +

    + When a thread cannot obtain a lock because some other + thread already holds a lock on that object, the lock + attempt is said to be blocked. See + Blocks for more information. +

    +
  • +
  • +

    + Deadlock +

    +

    + Occurs when two or more threads of control attempt to access conflicting resource in such a way as none + of the threads can any longer make further progress. +

    +

    + For example, if Thread A is blocked waiting for a resource held by Thread + B, while at the same time Thread B is blocked waiting for a + resource held by Thread A, then neither thread can make any + forward progress. In this situation, Thread A and Thread B + are said to be deadlocked. +

    +

    + For more information, see Deadlocks. +

    +
  • +
  • +

    + Lock Conflict +

    +

    + In JE, a lock conflict simply means that a thread of + control attempted to obtain a lock, but was unable to get + it before the lock timeout period expired. This may have + happened because a deadlock has occurred, or it might have + happened because another thread is taking too long to + complete a long-running database operation. Either way, + you do the same things in response to a lock conflict + as you do for a true deadlock. See JE Lock Management for more information. +

    +
  • +
+
+
+
+
+
+

Which JE Handles are Free-Threaded

+
+
+
+

+ The following describes to what extent and under what conditions + individual handles are free-threaded. +

+
+
    +
  • +

    + + + Environment + + and the DPL + EntityStore + +

    +

    + This class is free-threaded. +

    +
  • +
  • +

    + + + Database + + and the DPL + PrimaryIndex + +

    +

    + These classes are free-threaded. +

    +
  • +
  • +

    + SecondaryDatabase + and DPL SecondaryIndex +

    +

    + These classes are free-threaded. +

    +
  • +
  • +

    + + + Cursor + + and the DPL + EntityCursor + +

    +

    + If the cursor is a transactional cursor, it can be used by + multiple threads of control so long as the application + serializes access to the handle. + If the cursor is not a transactional cursor, it can not be + shared across multiple threads of control at all. +

    +
  • +
  • +

    + SecondaryCursor +

    +

    + Same conditions apply as for Cursor + handles. +

    +
  • +
  • +

    + + + Transaction + +

    +

    + This class is free-threaded. +

    +
  • +
+
+
+

Note

+

+ All other classes found in the DPL + (com.sleepycat.persist.*) and not + mentioned above are free-threaded. +

+

+ All classes found in the bind APIs (com.sleepycat.bind.*) are free-threaded. +

+
+
+
+ + + diff --git a/docs/TransactionGettingStarted/txncursor.html b/docs/TransactionGettingStarted/txncursor.html new file mode 100644 index 0000000..99d6704 --- /dev/null +++ b/docs/TransactionGettingStarted/txncursor.html @@ -0,0 +1,217 @@ + + + + + + Transactional Cursors + + + + + + + + + +
+
+
+
+

Transactional Cursors

+
+
+
+ +

+ You can transaction-protect your cursor operations by + specifying a transaction handle at the time that you create + your cursor. Beyond that, you do not ever + provide a transaction handle directly to a cursor method. +

+

+ Note that if you transaction-protect a cursor, then you must + make sure that the cursor is closed before you either commit or + abort the transaction. For example: +

+
package je.txn;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+
+    // Database and environment opens omitted
+
+    String replacementData = "new data";
+
+    Transaction txn = myEnv.beginTransaction(null, null);
+    Cursor cursor = null;
+    try {
+        // Use the transaction handle here
+        cursor = db.openCursor(txn, null);
+        DatabaseEntry key, data;
+        
+        DatabaseEntry key, data;
+        while(cursor.getNext(key, data, LockMode.DEFAULT) ==
+           OperationStatus.SUCCESS) {
+            
+            data.setData(replacementData.getBytes("UTF-8"));
+            // No transaction handle is used on the cursor read or write
+            // methods.
+            cursor.putCurrent(data);
+        }
+        
+        cursor.close();
+        cursor = null;
+        txn.commit();
+    } catch (Exception e) {
+        if (cursor != null) {
+            cursor.close();
+        }
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+
+
+
+
+

Using Transactional DPL Cursors

+
+
+
+

+ When using the DPL, you create the cursor using the entity + class's primary or secondary index (see the + Getting Started with Berkeley DB, Java Edition + + guide for details). At the time that you create the cursor, you + pass a transaction handle to the entities() + method, and this causes all subsequent operations performed + using that cursor to be performed within the scope of the + transaction. +

+

+ Note that if you are using a transaction-enabled store, + then you must provide a transaction handle when you open + your cursor. +

+

+ For example: +

+
package persist.txn;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+
+import java.io.File;
+
+...
+
+Environment myEnv = null;
+EntityStore store = null;
+
+...
+
+
+    // Store and environment open omitted, as is the DataAccessor
+    // instantiation.
+
+...
+
+    Transaction txn = myEnv.beginTransaction(null, null);
+    PrimaryIndex<String,Inventory> pi =
+        store.getPrimaryIndex(String.class, Inventory.class);
+    EntityCursor<Inventory> pi_cursor = pi.entities(txn, null);
+
+    try {
+        for (Inventory ii : pi_cursor) {
+            // do something with each object "ii"
+            // A transactional handle is not required for any write
+            // operations. All operations performed using this cursor
+            // will be done within the scope of the transaction, txn.
+        }
+        pi_cursor.close();
+        pi_cursor = null;
+        txn.commit();
+        txn = null;
+    // Always make sure the cursor is closed when we are done with it.
+    } catch (Exception e) {
+        if (pi_cursor != null) {
+            pi_cursor.close();
+        }
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    } 
+
+
+ + + diff --git a/docs/TransactionGettingStarted/txnexample_dpl.html b/docs/TransactionGettingStarted/txnexample_dpl.html new file mode 100644 index 0000000..4544f84 --- /dev/null +++ b/docs/TransactionGettingStarted/txnexample_dpl.html @@ -0,0 +1,705 @@ + + + + + + DPL Transaction Example + + + + + + + + +
+
+
+
+

DPL Transaction Example

+
+
+
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadDataEntity.java + +
+
+ + StoreWriter.java + +
+
+
+

+ The following Java code provides a fully functional example of a + multi-threaded transactional JE application using the DPL. + This example is nearly identical to the example provided in the + previous section, except that it uses an entity class and entity + store to manage its data. +

+

+ As is the case with the previous examples, this example opens + an environment and then an entity store. It then creates + 5 threads, each of which writes 500 records to the database. + The primary key for these writes are based on pre-determined + integers, while the data is randomly generated data. + This means that the actual data is arbitrary and therefore uninteresting; + we picked it only because it requires minimum code to implement and therefore will + stay out of the way of the main points of this example. +

+

+ Each thread writes 10 records under a single transaction + before committing and writing another 10 (this is repeated 50 + times). At the end of each transaction, but before committing, each + thread calls a function that uses a cursor to read every record in + the database. We do this in order to make some points about + database reads in a transactional environment. +

+

+ Of course, each writer thread performs deadlock detection as + described in this manual. In addition, normal recovery is performed + when the environment is opened. +

+

+ To implement this example, we need three classes: +

+
+
    +
  • +

    + TxnGuide.java +

    +

    + This is the main class for the application. It performs + environment and store management, spawns threads, and + creates the data that is placed in the database. See TxnGuide.java for implementation details. +

    +
  • +
  • +

    + StoreWriter.java +

    +

    + This class extends java.lang.Thread, and + as such it is our thread implementation. It is responsible + for actually reading and writing store. It also + performs all of our transaction management. See StoreWriter.java for + implementation details. +

    +
  • +
  • +

    + PayloadDataEntity.java +

    +

    + This is an entity class used to encapsulate several data + fields. See PayloadDataEntity.java for + implementation details. +

    +
  • +
+
+
+
+
+
+

TxnGuide.java

+
+
+
+

+ The main class in our example application is used to open and + close our environment and store. It also spawns all the + threads that we need. We start with the normal series + of Java package and import statements, followed by our class + declaration: +

+
// File TxnGuideDPL.java
+
+package persist.txn;
+
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+import java.io.File;
+
+public class TxnGuideDPL { 
+

+ Next we declare our class' private data members. Mostly these are used + for constants such as the name of the database that we are opening and + the number of threads that we are spawning. However, we also declare + our environment and database handles here. +

+
    private static String myEnvPath = "./";
+    private static String storeName = "exampleStore";
+
+    // Handles
+    private static EntityStore myStore = null;
+    private static Environment myEnv = null;
+    private static final int NUMTHREADS = 5; 
+

+ Next, we implement our usage() method. This + application optionally accepts a single command line argument which is + used to identify the environment home directory. +

+
    private static void usage() {
+        System.out.println("TxnGuideDPL [-h <env directory>]");
+        System.exit(-1);
+    } 
+

+ Now we implement our main() method. This method + simply calls the methods to parse the command line arguments and open + the environment and store. It also creates and then joins the store writer + threads. +

+
    public static void main(String args[]) {
+        try {
+            // Parse the arguments list
+            parseArgs(args);
+            // Open the environment and store
+            openEnv();
+
+            // Start the threads
+            StoreWriter[] threadArray;
+            threadArray = new StoreWriter[NUMTHREADS];
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i] = new StoreWriter(myEnv, myStore);
+                threadArray[i].start();
+            }
+
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i].join();
+            }
+        } catch (Exception e) {
+            System.err.println("TxnGuideDPL: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            closeEnv();
+        }
+        System.out.println("All done.");
+    } 
+

+ Next we implement openEnv(). This method is used + to open the environment and then an entity store in that environment. Along + the way, we make sure that the transactional subsystem is correctly + initialized. +

+
    private static void openEnv() throws DatabaseException {
+        System.out.println("opening env and store");
+
+        // Set up the environment.
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        myEnvConfig.setAllowCreate(true);
+        myEnvConfig.setTransactional(true);
+        //  Environment handles are free-threaded by default in JE,
+        // so we do not have to do anything to cause the
+        // environment handle to be free-threaded.
+
+        // Set up the entity store
+        StoreConfig myStoreConfig = new StoreConfig();
+        myStoreConfig.setAllowCreate(true);
+        myStoreConfig.setTransactional(true);
+
+        // Open the environment
+        myEnv = new Environment(new File(myEnvPath),    // Env home
+                                    myEnvConfig);
+
+        // Open the store
+        myStore = new EntityStore(myEnv, storeName, myStoreConfig);
+    } 
+

+ Finally, we implement the methods used to close our environment and + databases, parse the command line arguments, and provide our class + constructor. This is fairly standard code and it is mostly + uninteresting from the perspective of this manual. We include it here + purely for the purpose of completeness. +

+
    private static void closeEnv() {
+        System.out.println("Closing env and store");
+        if (myStore != null ) {
+            try {
+                myStore.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myStore: " +
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myEnv != null ) {
+            try {
+                myEnv.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: " + e.toString());
+                e.printStackTrace();
+            }
+        }
+    }
+
+    private TxnGuideDPL() {}
+
+    private static void parseArgs(String args[]) {
+        int nArgs = args.length;
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        if (i < nArgs - 1) {
+                            myEnvPath = new String(args[++i]);
+                        }
+                    break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+} 
+
+
+
+
+
+

PayloadDataEntity.java

+
+
+
+

+ Before we show the implementation of the store writer thread, we + need to show the class that we will be placing into the store. This + class is fairly minimal. It simply allows you to store and retrieve an + int, a String, and a + double. The int is our primary key. +

+
package persist.txn;
+import com.sleepycat.persist.model.Entity;
+import com.sleepycat.persist.model.PrimaryKey;
+import static com.sleepycat.persist.model.Relationship.*;
+
+@Entity
+public class PayloadDataEntity {
+    @PrimaryKey
+    private int oID;
+
+    private String threadName;
+
+    private double doubleData;
+
+    PayloadDataEntity() {}
+
+    public double getDoubleData() { return doubleData; }
+    public int getID() { return oID; }
+    public String getThreadName() { return threadName; }
+
+    public void setDoubleData(double dd) { doubleData = dd; }
+    public void setID(int id) { oID = id; }
+    public void setThreadName(String tn) { threadName = tn; }
+
+} 
+
+
+
+
+
+

StoreWriter.java

+
+
+
+

+ StoreWriter.java provides the implementation + for our entity store writer thread. It is responsible for: +

+
+
    +
  • +

    + All transaction management. +

    +
  • +
  • +

    + Responding to deadlock exceptions. +

    +
  • +
  • +

    + Providing data to be stored in the entity store. +

    +
  • +
  • +

    + Writing the data to the store. +

    +
  • +
+
+

+ In order to show off some of the ACID properties provided + by JE's transactional support, + StoreWriter.java does some things in a less + efficient way than you would probably decide to use in a + true production application. First, it groups 10 database + writes together in a single transaction when you could just + as easily perform one write for each transaction. If you + did this, you could use auto commit for the individual + database writes, which means your code would be slightly + simpler and you would run a much + smaller chance of encountering blocked and deadlocked + operations. However, by doing things this way, we are able + to show transactional atomicity, as well as deadlock + handling. +

+

+ To begin, we provide the usual package and import statements, and we declare our class: +

+
package persist.txn;
+
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.LockConflictException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.Transaction;
+
+import com.sleepycat.persist.EntityCursor;
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.PrimaryIndex;
+
+import java.util.Iterator;
+import java.util.Random;
+import java.io.UnsupportedEncodingException;
+
+public class StoreWriter extends Thread
+{ 
+

+ Next we declare our private data members. Notice that we get handles + for the environment and the entity store. The random number generator + that we instantiate is used + to generate unique data for storage in the database. Finally, the + MAX_RETRY variable is used to define how many times + we will retry a transaction in the face of a deadlock. +

+
    private EntityStore myStore = null;
+    private Environment myEnv = null;
+    private PrimaryIndex<Integer,PayloadDataEntity> pdKey;
+    private Random generator = new Random();
+    private boolean passTxn = false;
+
+    private static final int MAX_RETRY = 20; 
+

+ Next we implement our class constructor. The most interesting thing about our constructor is + that we use it to obtain our entity class's primary index. +

+
    // Constructor. Get our handles from here
+    StoreWriter(Environment env, EntityStore store)
+
+        throws DatabaseException {
+        myStore = store;
+        myEnv = env;
+
+        // Open the data accessor. This is used to store persistent
+        // objects.
+        pdKey = myStore.getPrimaryIndex(Integer.class,
+                    PayloadDataEntity.class);
+    } 
+

+ Now we implement our thread's run() method. + This is the method that is run when StoreWriter + threads are started in the main program (see TxnGuide.java). +

+
    // Thread method that writes a series of records
+    // to the database using transaction protection.
+    // Deadlock handling is demonstrated here.
+    public void run () { 
+

+ The first thing we do is get a null transaction + handle before going into our main loop. We also begin the top transaction loop here that causes our application to + perform 50 transactions. +

+
        Transaction txn = null;
+
+        // Perform 50 transactions
+        for (int i=0; i<50; i++) { 
+

+ Next we declare a retry variable. This is used to + determine whether a deadlock should result in our retrying the + operation. We also declare a retry_count variable + that is used to make sure we do not retry a transaction forever in the + unlikely event that the thread is unable to ever get a necessary lock. + (The only thing that might cause this is if some other thread dies + while holding an important lock. This is the only code that we have to + guard against that because the simplicity of this application makes it + highly unlikely that it will ever occur.) +

+
           boolean retry = true;
+           int retry_count = 0;
+           // while loop is used for deadlock retries
+           while (retry) { 
+

+ Now we go into the try block that we use for + deadlock detection. We also begin our transaction here. +

+
                // try block used for deadlock detection and
+                // general exception handling
+                try {
+
+                    // Get a transaction
+                    txn = myEnv.beginTransaction(null, null); 
+

+ Now we write 10 objects under the transaction that we have just begun. + By combining multiple writes together under a single transaction, + we increase the likelihood that a deadlock will occur. Normally, + you want to reduce the potential for a deadlock and in this case + the way to do that is to perform a single write per transaction. In + other words, we should be using auto commit to + write to our database for this workload. +

+

+ However, we want to show deadlock handling and by performing + multiple writes per transaction we can actually observe deadlocks + occurring. We also want to underscore the idea that you can + combing multiple database operations together in a single atomic + unit of work. So for our example, we do the (slightly) wrong thing. +

+
+
+                    // Write 10 PayloadDataEntity objects to the 
+                    // store for each transaction
+                    for (int j = 0; j < 10; j++) {
+                        // Instantiate an object
+                        PayloadDataEntity pd = new PayloadDataEntity();
+
+                        // Set the Object ID. This is used as the 
+                        // primary key.
+                        pd.setID(i + j);
+
+                        // The thread name is used as a secondary key, and
+                        // it is retrieved by this class's getName() 
+                        // method.
+                        pd.setThreadName(getName());
+
+                        // The last bit of data that we use is a double
+                        // that we generate randomly. This data is not
+                        // indexed.
+                        pd.setDoubleData(generator.nextDouble());
+
+                        // Do the put
+                        pdKey.put(txn, pd);
+                    } 
+

+ Having completed the inner database write loop, we could simply + commit the transaction and continue on to the next block of 10 + writes. However, we want to first illustrate a few points about + transactional processing so instead we call our + countObjects() method before calling the transaction + commit. countObjects() uses a cursor to read every + object in the entity store and return a count of the number of objects + that it found. +

+

+ Because + countObjects() + reads every object in the store, if used incorrectly the thread + will self-deadlock. The writer thread has just written 500 objects + to the database, but because the transaction used for that write + has not yet been committed, each of those 500 objects are still + locked by the thread's transaction. If we then simply run a + non-transactional cursor over the store from within the same + thread that has locked those 500 objects, the cursor will + block when it tries to read one of those transactional + protected records. The thread immediately stops operation at that + point while the cursor waits for the read lock it has + requested. Because that read lock will never be released (the thread + can never make any forward progress), this represents a + self-deadlock for the thread. +

+

+ There are three ways to prevent this self-deadlock: +

+
+
    +
  1. +

    + We can move the call to + countObjects() to a point after the + thread's transaction has committed. +

    +
  2. +
  3. +

    + We can allow countObjects() to + operate under the same transaction as all of the writes + were performed. +

    +
  4. +
  5. +

    + We can reduce our isolation guarantee for the application + by allowing uncommitted reads. +

    +
  6. +
+
+

+ For this example, we choose to use option 3 (uncommitted reads) to avoid + the deadlock. This means that we have to open our cursor handle + so that it knows to perform uncommitted reads. +

+
                    // commit
+                    System.out.println(getName() + " : committing txn : "
+                                       + i);
+                    System.out.println(getName() + " : Found " +
+                        countObjects(txn) + " objects in the store.");
+

+ Having performed this somewhat inelegant counting of the objects in the + database, we can now commit the transaction. +

+
                    try {
+                        txn.commit();
+                        txn = null;
+                    } catch (DatabaseException e) {
+                        System.err.println("Error on txn commit: " +
+                            e.toString());
+                    }
+                    retry = false; 
+

+ If all goes well with the commit, we are done and we can move on to the + next batch of 10 objects to add to the store. However, in the event + of an error, we must handle our exceptions correctly. The first of + these is a deadlock exception. In the event of a deadlock, we want to + abort and retry the transaction, provided that we have not already + exceeded our retry limit for this transaction. +

+
                } catch (LockConflictException lce) {
+                    System.out.println("################# " + getName() +
+                        " : caught deadlock");
+                    // retry if necessary
+                    if (retry_count < MAX_RETRY) {
+                        System.err.println(getName() +
+                            " : Retrying operation.");
+                        retry = true;
+                        retry_count++;
+                    } else {
+                        System.err.println(getName() +
+                            " : out of retries. Giving up.");
+                        retry = false;
+                    } 
+

+ In the event of a standard, non-specific database exception, we simply + log the exception and then give up (the transaction is not retried). +

+
                } catch (DatabaseException e) {
+                    // abort and don't retry
+                    retry = false;
+                    System.err.println(getName() +
+                        " : caught exception: " + e.toString());
+                    e.printStackTrace();  
+

+ And, finally, we always abort the transaction if the transaction handle + is not null. Note that immediately after committing our transaction, we + set the transaction handle to null to guard against aborting a + transaction that has already been committed. +

+
                } finally {
+                    if (txn != null) {
+                        try {
+                            txn.abort();
+                        } catch (Exception e) {
+                            System.err.println("Error aborting txn: " +
+                                e.toString());
+                            e.printStackTrace();
+                        }
+                    }
+                }
+            }
+        }
+    } 
+

+ The final piece of our StoreWriter class is the + countObjects() implementation. Notice how in + this example we open the cursor such that it performs uncommitted + reads: +

+
    // A method that counts every object in the store.
+
+    private int countObjects(Transaction txn)  throws DatabaseException {
+        int count = 0;
+
+        CursorConfig cc = new CursorConfig();
+        // This is ignored if the store is not opened with uncommitted read
+        // support.
+        cc.setReadUncommitted(true);
+        EntityCursor<PayloadDataEntity> cursor = pdKey.entities(txn, cc);
+
+        try {
+            for (PayloadDataEntity pdi : cursor) {
+                    count++;
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        return count;
+
+    }
+} 
+
+

+ This completes our transactional example. If you would like to + experiment with this code, you can find the example in the following + location in your JE distribution: +

+
JE_HOME/examples/persist/txn
+
+ + + diff --git a/docs/TransactionGettingStarted/txnexample_java.html b/docs/TransactionGettingStarted/txnexample_java.html new file mode 100644 index 0000000..a7fba10 --- /dev/null +++ b/docs/TransactionGettingStarted/txnexample_java.html @@ -0,0 +1,767 @@ + + + + + + Base API Transaction Example + + + + + + + + + +
+
+
+
+

Base API Transaction Example

+
+
+
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadData.java + +
+
+ + DBWriter.java + +
+
+
+

+ The following Java code provides a fully functional example of a + multi-threaded transactional JE application. + The example opens an environment and database, and then creates 5 + threads, each of which writes 500 records to the database. The keys + used for these writes are pre-determined strings, while the data is + a class that contains randomly generated data. This means that the actual + data is arbitrary and therefore uninteresting; we picked it only + because it requires minimum code to implement and therefore will + stay out of the way of the main points of this example. +

+

+ Each thread writes 10 records under a single transaction + before committing and writing another 10 (this is repeated 50 + times). At the end of each transaction, but before committing, each + thread calls a function that uses a cursor to read every record in + the database. We do this in order to make some points about + database reads in a transactional environment. +

+

+ Of course, each writer thread performs deadlock detection as + described in this manual. In addition, normal recovery is performed + when the environment is opened. +

+

+ To implement this example, we need three classes: +

+
+
    +
  • +

    + TxnGuide.java +

    +

    + This is the main class for the application. It performs + environment and database management, spawns threads, and + creates the data that is placed in the database. See TxnGuide.java for implementation details. +

    +
  • +
  • +

    + DBWriter.java +

    +

    + This class extends java.lang.Thread, and + as such it is our thread implementation. It is responsible + for actually reading and writing to the database. It also + performs all of our transaction management. See DBWriter.java for + implementation details. +

    +
  • +
  • +

    + PayloadData.java +

    +

    + This is a data class used to encapsulate several data + fields. It is fairly uninteresting, except that the usage + of a class means that we have to use the bind APIs to + serialize it for storage in the database. See PayloadData.java for + implementation details. +

    +
  • +
+
+
+
+
+
+

TxnGuide.java

+
+
+
+

+ The main class in our example application is used to open and + close our environment and database. It also spawns all the + threads that we need. We start with the normal series + of Java package and import statements, followed by our class + declaration: +

+
// File TxnGuide.java
+
+package je.txn;
+
+import com.sleepycat.bind.serial.StoredClassCatalog;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+public class TxnGuide { 
+

+ Next we declare our class' private data members. Mostly these are used + for constants such as the name of the database that we are opening and + the number of threads that we are spawning. However, we also declare + our environment and database handles here. +

+
    private static String myEnvPath = "./";
+    private static String dbName = "mydb.db";
+    private static String cdbName = "myclassdb.db";
+
+    // DB handles
+    private static Database myDb = null;
+    private static Database myClassDb = null;
+    private static Environment myEnv = null;
+
+    private static final int NUMTHREADS = 5; 
+

+ Next, we implement our usage() method. This + application optionally accepts a single command line argument which is + used to identify the environment home directory. +

+
    private static void usage() {
+        System.out.println("TxnGuide [-h <env directory>]");
+        System.exit(-1);
+    } 
+

+ Now we implement our main() method. This method + simply calls the methods to parse the command line arguments and open + the environment and database. It also creates the stored class catalog + that we use for serializing the data that we want to store in our + database. Finally, it creates and then joins the database writer + threads. +

+
    public static void main(String args[]) {
+        try {
+            // Parse the arguments list
+            parseArgs(args);
+            // Open the environment and databases
+            openEnv();
+            // Get our class catalog (used to serialize objects)
+            StoredClassCatalog classCatalog =
+                new StoredClassCatalog(myClassDb);
+
+            // Start the threads
+            DBWriter[] threadArray;
+            threadArray = new DBWriter[NUMTHREADS];
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i] = new DBWriter(myEnv, myDb, classCatalog);
+                threadArray[i].start();
+            }
+
+            // Join the threads. That is, wait for each thread to 
+            // complete before exiting the application.
+            for (int i = 0; i < NUMTHREADS; i++) {
+                threadArray[i].join();
+            }
+        } catch (Exception e) {
+            System.err.println("TxnGuide: " + e.toString());
+            e.printStackTrace();
+        } finally {
+            closeEnv();
+        }
+        System.out.println("All done.");
+    } 
+

+ Next we implement openEnv(). This method is used + to open the environment and then a database in that environment. Along + the way, we make sure that the transactional subsystem is correctly + initialized. +

+

+ For the database open, notice that we open the database such that it + supports duplicate records. This is required purely by the data that + we are writing to the database, and it is only necessary if you run the + application more than once without first deleting the environment. +

+
    private static void openEnv() throws DatabaseException {
+        System.out.println("opening env");
+
+        // Set up the environment.
+        EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+        myEnvConfig.setAllowCreate(true);
+        myEnvConfig.setTransactional(true);
+        // Environment handles are free-threaded by default in JE,
+        // so we do not have to do anything to cause the
+        // environment handle to be free-threaded.
+
+        // Set up the database
+        DatabaseConfig myDbConfig = new DatabaseConfig();
+        myDbConfig.setAllowCreate(true);
+        myDbConfig.setTransactional(true);
+        myDbConfig.setSortedDuplicates(true);
+
+        // Open the environment
+        myEnv = new Environment(new File(myEnvPath),    // Env home
+                                myEnvConfig);
+
+        // Open the database. Do not provide a txn handle. This open
+        // is auto committed because DatabaseConfig.setTransactional()
+        // is true.
+        myDb = myEnv.openDatabase(null,     // txn handle
+                                  dbName,   // Database file name
+                                  myDbConfig);
+
+        // Used by the bind API for serializing objects 
+        // Class database must not support duplicates
+        myDbConfig.setSortedDuplicates(false);
+        myClassDb = myEnv.openDatabase(null,     // txn handle
+                                       cdbName,  // Database file name
+                                       myDbConfig);
+    } 
+

+ Finally, we implement the methods used to close our environment and + databases, parse the command line arguments, and provide our class + constructor. This is fairly standard code and it is mostly + uninteresting from the perspective of this manual. We include it here + purely for the purpose of completeness. +

+
    private static void closeEnv() {
+        System.out.println("Closing env and databases");
+        if (myDb != null ) {
+            try {
+                myDb.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myDb: " +
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myClassDb != null ) {
+            try {
+                myClassDb.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: myClassDb: " +
+                    e.toString());
+                e.printStackTrace();
+            }
+        }
+
+        if (myEnv != null ) {
+            try {
+                myEnv.close();
+            } catch (DatabaseException e) {
+                System.err.println("closeEnv: " + e.toString());
+                e.printStackTrace();
+            }
+        }
+    }
+
+    private TxnGuide() {}
+
+    private static void parseArgs(String args[]) {
+        for(int i = 0; i < args.length; ++i) {
+            if (args[i].startsWith("-")) {
+                switch(args[i].charAt(1)) {
+                    case 'h':
+                        myEnvPath = new String(args[++i]);
+                        break;
+                    default:
+                        usage();
+                }
+            }
+        }
+    }
+} 
+
+
+
+
+
+

PayloadData.java

+
+
+
+

+ Before we show the implementation of the database writer thread, we + need to show the class that we will be placing into the database. This + class is fairly minimal. It simply allows you to store and retrieve an + int, a String, and a + double. We will be using the JE bind API from + within the writer thread to serialize instances of this class and place + them into our database. +

+
package je.txn;
+
+import java.io.Serializable;
+
+public class PayloadData implements Serializable {
+    private int oID;
+    private String threadName;
+    private double doubleData;
+
+    PayloadData(int id, String name, double data) {
+        oID = id;
+        threadName = name;
+        doubleData = data;
+    }
+
+    public double getDoubleData() { return doubleData; }
+    public int getID() { return oID; }
+    public String getThreadName() { return threadName; }
+} 
+
+
+
+
+
+

DBWriter.java

+
+
+
+

+ DBWriter.java provides the implementation + for our database writer thread. It is responsible for: +

+
+
    +
  • +

    + All transaction management. +

    +
  • +
  • +

    + Responding to deadlock exceptions. +

    +
  • +
  • +

    + Providing data to be stored into the database. +

    +
  • +
  • +

    + Serializing and then writing the data to the database. +

    +
  • +
+
+

+ In order to show off some of the ACID properties provided + by JE's transactional support, + DBWriter.java does some things in a less + efficient way than you would probably decide to use in a + true production application. First, it groups 10 database + writes together in a single transaction when you could just + as easily perform one write for each transaction. If you + did this, you could use auto commit for the individual + database writes, which means your code would be slightly + simpler and you would run a much + smaller chance of encountering blocked and deadlocked + operations. However, by doing things this way, we are able + to show transactional atomicity, as well as deadlock + handling. +

+

+ At the end of each transaction, + DBWriter.java runs a cursor over the + entire database by way of counting the number of records + currently existing in the database. There are better ways + to discover this information, but in this case we want to + make some points regarding cursors, transactional + applications, and deadlocking (we get into this in more + detail later in this section). +

+

+ To begin, we provide the usual package and import statements, and we declare our class: +

+
package je.txn;
+
+import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.tuple.StringBinding;
+
+import com.sleepycat.je.Cursor;
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.LockConflictException;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+
+import java.io.UnsupportedEncodingException;
+import java.util.Random;
+
+public class DBWriter extends Thread
+{ 
+

+ Next we declare our private data members. Notice that we get handles + for the environment and the database. We also obtain a handle for an + EntryBinding. We will use this to serialize + PayloadData class instances (see PayloadData.java) for storage in + the database. The random number generator that we instantiate is used + to generate unique data for storage in the database. The + MAX_RETRY variable is used to define how many times + we will retry a transaction in the face of a deadlock. And, finally, + keys is a String array that + holds the keys used for our database entries. +

+
    private Database myDb = null;
+    private Environment myEnv = null;
+    private EntryBinding dataBinding = null;
+    private Random generator = new Random();
+
+
+    private static final int MAX_RETRY = 20;
+
+    private static String[] keys = {"key 1", "key 2", "key 3",
+                                    "key 4", "key 5", "key 6",
+                                    "key 7", "key 8", "key 9",
+                                    "key 10"}; 
+

+ Next we implement our class constructor. The most interesting thing + we do here is instantiate a serial binding for serializing + PayloadData instances. +

+
    // Constructor. Get our DB handles from here
+    DBWriter(Environment env, Database db, StoredClassCatalog scc)
+        throws DatabaseException {
+        myDb = db;
+        myEnv = env;
+        dataBinding = new SerialBinding(scc, PayloadData.class);
+    } 
+

+ Now we implement our thread's run() method. + This is the method that is run when DBWriter + threads are started in the main program (see TxnGuide.java). +

+
    // Thread method that writes a series of records
+    // to the database using transaction protection.
+    // Deadlock handling is demonstrated here.
+    public void run () { 
+

+ The first thing we do is get a null transaction + handle before going into our main loop. We also begin the top transaction loop here that causes our application to + perform 50 transactions. +

+
        Transaction txn = null;
+
+        // Perform 50 transactions
+        for (int i=0; i<50; i++) { 
+

+ Next we declare a retry variable. This is used to + determine whether a deadlock should result in our retrying the + operation. We also declare a retry_count variable + that is used to make sure we do not retry a transaction forever in the + unlikely event that the thread is unable to ever get a necessary lock. + (The only thing that might cause this is if some other thread dies + while holding an important lock. This is the only code that we have to + guard against that because the simplicity of this application makes it + highly unlikely that it will ever occur.) +

+
           boolean retry = true;
+           int retry_count = 0;
+           // while loop is used for deadlock retries
+           while (retry) { 
+

+ Now we go into the try block that we use for + deadlock detection. We also begin our transaction here. +

+
                // try block used for deadlock detection and
+                // general db exception handling
+                try {
+
+                    // Get a transaction
+                    txn = myEnv.beginTransaction(null, null); 
+

+ Now we write 10 records under the transaction that we have just begun. + By combining multiple writes together under a single transaction, + we increase the likelihood that a deadlock will occur. Normally, + you want to reduce the potential for a deadlock and in this case + the way to do that is to perform a single write per transaction. In + other words, we should be using auto commit to + write to our database for this workload. +

+

+ However, we want to show deadlock handling and by performing + multiple writes per transaction we can actually observe deadlocks + occurring. We also want to underscore the idea that you can + combing multiple database operations together in a single atomic + unit of work. So for our example, we do the (slightly) wrong thing. +

+

+ Further, notice that we store our key into a + DatabaseEntry using + com.sleepycat.bind.tuple.StringBinding to + perform the serialization. Also, when we instantiate the + PayloadData object, we call + getName() which gives us the string + representation of this thread's name, as well as + Random.nextDouble() which gives us a random + double value. This latter value is used so as to avoid duplicate + records in the database. +

+
+                    // Write 10 records to the db
+                    // for each transaction
+                    for (int j = 0; j < 10; j++) {
+                        // Get the key
+                        DatabaseEntry key = new DatabaseEntry();
+                        StringBinding.stringToEntry(keys[j], key);
+
+                        // Get the data
+                        PayloadData pd = new PayloadData(i+j, getName(),
+                            generator.nextDouble());
+                        DatabaseEntry data = new DatabaseEntry();
+                        dataBinding.objectToEntry(pd, data);
+
+                        // Do the put
+                        myDb.put(txn, key, data);
+                    } 
+

+ Having completed the inner database write loop, we could simply + commit the transaction and continue on to the next block of 10 + writes. However, we want to first illustrate a few points about + transactional processing so instead we call our + countRecords() method before calling the transaction + commit. countRecords() uses a cursor to read every + record in the database and return a count of the number of records + that it found. +

+

+ Because + countRecords() + reads every record in the database, if used incorrectly the thread + will self-deadlock. The writer thread has just written 500 records + to the database, but because the transaction used for that write + has not yet been committed, each of those 500 records are still + locked by the thread's transaction. If we then simply run a + non-transactional cursor over the database from within the same + thread that has locked those 500 records, the cursor will + block when it tries to read one of those transactional + protected records. The thread immediately stops operation at that + point while the cursor waits for the read lock it has + requested. Because that read lock will never be released (the thread + can never make any forward progress), this represents a + self-deadlock for the thread. +

+

+ There are three ways to prevent this self-deadlock: +

+
+
    +
  1. +

    + We can move the call to + countRecords() to a point after the + thread's transaction has committed. +

    +
  2. +
  3. +

    + We can allow countRecords() to + operate under the same transaction as all of the writes + were performed. +

    +
  4. +
  5. +

    + We can reduce our isolation guarantee for the application + by allowing uncommitted reads. +

    +
  6. +
+
+

+ For this example, we choose to use option 3 (uncommitted reads) to avoid + the deadlock. This means that we have to open our cursor handle + so that it knows to perform uncommitted reads. +

+
                    // commit
+                    System.out.println(getName() + " : committing txn : " 
+                        + i);
+
+                    // Using uncommitted reads to avoid the deadlock, so
+                    // null is passed for the transaction here.
+                    System.out.println(getName() + " : Found " +
+                        countRecords(null) + " records in the database.");
+

+ Having performed this somewhat inelegant counting of the records in the + database, we can now commit the transaction. +

+
                    try {
+                        txn.commit();
+                        txn = null;
+                    } catch (DatabaseException e) {
+                        System.err.println("Error on txn commit: " +
+                            e.toString());
+                    }
+                    retry = false; 
+

+ If all goes well with the commit, we are done and we can move on to the + next batch of 10 records to add to the database. However, in the event + of an error, we must handle our exceptions correctly. The first of + these is a deadlock exception. In the event of a deadlock, we want to + abort and retry the transaction, provided that we have not already + exceeded our retry limit for this transaction. +

+
                } catch (LockConflictException le) {
+                    System.out.println("################# " + getName() +
+                        " : caught deadlock");
+                    // retry if necessary
+                    if (retry_count < MAX_RETRY) {
+                        System.err.println(getName() +
+                            " : Retrying operation.");
+                        retry = true;
+                        retry_count++;
+                    } else {
+                        System.err.println(getName() +
+                            " : out of retries. Giving up.");
+                        retry = false;
+                    } 
+

+ In the event of a standard, non-specific database exception, we simply + log the exception and then give up (the transaction is not retried). +

+
                } catch (DatabaseException e) {
+                    // abort and don't retry
+                    retry = false;
+                    System.err.println(getName() +
+                        " : caught exception: " + e.toString());
+                    e.printStackTrace();  
+

+ And, finally, we always abort the transaction if the transaction handle + is not null. Note that immediately after committing our transaction, we + set the transaction handle to null to guard against aborting a + transaction that has already been committed. +

+
                } finally {
+                    if (txn != null) {
+                        try {
+                            txn.abort();
+                        } catch (Exception e) {
+                            System.err.println("Error aborting txn: " +
+                                e.toString());
+                            e.printStackTrace();
+                        }
+                    }
+                }
+            }
+        }
+    } 
+

+ The final piece of our DBWriter class is the + countRecords() implementation. Notice how in + this example we open the cursor such that it performs uncommitted + reads: +

+
    // A method that counts every record in the database.
+
+    // Note that this method exists only for illustrative purposes.
+    // A more straight-forward way to count the number of records in
+    // a database is to use the Database.getStats() method.
+    private int countRecords(Transaction txn)  throws DatabaseException {
+        DatabaseEntry key = new DatabaseEntry();
+        DatabaseEntry data = new DatabaseEntry();
+        int count = 0;
+        Cursor cursor = null;
+
+        try {
+            // Get the cursor
+            CursorConfig cc = new CursorConfig();
+            cc.setReadUncommitted(true);
+            cursor = myDb.openCursor(txn, cc);
+            while (cursor.getNext(key, data, LockMode.DEFAULT) ==
+                    OperationStatus.SUCCESS) {
+
+                    count++;
+            }
+        } finally {
+            if (cursor != null) {
+                cursor.close();
+            }
+        }
+
+        return count;
+
+    }
+} 
+
+

+ This completes our transactional example. If you would like to + experiment with this code, you can find the example in the following + location in your JE distribution: +

+
JE_HOME/examples/je/txn
+
+ + + diff --git a/docs/TransactionGettingStarted/txnindices.html b/docs/TransactionGettingStarted/txnindices.html new file mode 100644 index 0000000..cf04481 --- /dev/null +++ b/docs/TransactionGettingStarted/txnindices.html @@ -0,0 +1,161 @@ + + + + + + Secondary Indices with Transaction Applications + + + + + + + + + +
+
+
+
+

Secondary Indices with Transaction Applications

+
+
+
+

+ You can use transactions with your secondary indices so long as you + + + + + open the secondary index so that it is transactional. + + + +

+

+ All other aspects of using secondary indices with transactions are + identical to using secondary indices without transactions. In + addition, transaction-protecting + + + secondary cursors is performed just as you protect normal + cursors — you simply have to make sure the cursor is + opened using a transaction handle, and that the cursor is + closed before the handle is either either committed or + aborted. + + See Transactional Cursors for details. +

+

+ Note that when you use transactions to protect your database writes, your secondary indices are protected from + corruption because updates to the primary and the secondaries are performed in a single atomic transaction. +

+
+

Note

+

+ If you are using the DPL, then be aware that + you never have to provide a transactional + handle when opening an index, be it a primary + or a secondary. However, if transactions are + enabled for your store, then all of the indexes + that you open will be enabled for transactional + usage. Moreover, any write operation performed + using that index will be done using a + transaction, regardless of whether you + explicitly provide a transactional handle to the + write operation. +

+

+ If you do not explicitly provide a transaction + handle to DPL write operations performed on a + transactional store, then auto commit is + silently used for that operation. +

+
+

+ For example: +

+
package je.txn;
+
+import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseType;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.SecondaryDatabase;
+import com.sleepycat.je.SecondaryConfig;
+
+import java.io.FileNotFoundException;
+
+...
+
+// Environment and primary database opens omitted.
+
+SecondaryConfig mySecConfig = new SecondaryConfig();
+mySecConfig.setAllowCreate(true);
+mySecConfig.setTransactional(true);
+
+SecondaryDatabase mySecDb = null;
+try {
+    // A fake tuple binding that is not actually implemented anywhere.
+    // The tuple binding is dependent on the data in use.
+    // See the Getting Started Guide for details
+    TupleBinding myTupleBinding = new MyTupleBinding();
+
+    // Open the secondary. FullNameKeyCreator is not actually implemented
+    // anywhere. See the Getting Started Guide for details.
+    FullNameKeyCreator keyCreator = 
+        new FullNameKeyCreator(myTupleBinding);
+
+    // Set the key creator on the secondary config object.
+    mySecConfig.setKeyCreator(keyCreator);
+
+    // Perform the actual open. Because this database is configured to be
+    // transactional, the open is automatically wrapped in a transaction.
+    //      - myEnv is the environment handle.
+    //      - myDb is the primary database handle.
+    String secDbName = "mySecondaryDatabase";
+    mySecDb = myEnv.openSecondary(null, secDbName, null, myDb, 
+                                  mySecConfig);
+} catch (DatabaseException de) {
+    // Exception handling goes here ...
+} 
+
+ + + diff --git a/docs/TransactionGettingStarted/usingtxns.html b/docs/TransactionGettingStarted/usingtxns.html new file mode 100644 index 0000000..ef3f47c --- /dev/null +++ b/docs/TransactionGettingStarted/usingtxns.html @@ -0,0 +1,390 @@ + + + + + + Chapter 3. Transaction Basics + + + + + + + + + +
+
+
+
+

Chapter 3. Transaction Basics

+
+
+
+ +

+ Once you have enabled transactions for your environment and your databases, + you can use them to protect your database operations. You do this by + acquiring a transaction handle and then using that handle for any + database operation that you want to participate in that transaction. +

+

+ You obtain a transaction handle using the + + + Environment.beginTransaction() method. + + +

+

+ Once you have completed all of the operations that you want to include + in the transaction, you must commit the transaction using the + + + Transaction.commit() method. + + + +

+

+ If, for any reason, you want to abandon the transaction, you abort + it using + + + Transaction.abort(). + + + + +

+

+ Any transaction handle that has been committed or aborted can no longer + be used by your application. +

+

+ Finally, you must make sure that all transaction handles are either + committed or aborted before closing your databases and environment. +

+
+

Note

+

+ If you only want to transaction protect a single database write operation, you can use auto commit to + perform the transaction administration. When you use auto commit, you do not need an explicit transaction + handle. See Auto Commit for more information. +

+
+

+ For example, the following example opens a transactional-enabled environment and + store, obtains a transaction handle, and then performs a write + operation under its protection. In the event of any failure in the + write operation, the transaction is aborted and the store is left in a + state as if no operations had ever been attempted in the first place. + +

+
package persist.txn;
+
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+
+import com.sleepycat.persist.EntityStore;
+import com.sleepycat.persist.StoreConfig;
+
+import java.io.File;
+
+...
+
+Environment myEnv = null;
+EntityStore store = null;
+
+// Our convenience data accessor class, used for easy access to 
+// EntityClass indexes.
+DataAccessor da;
+
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    StoreConfig storeConfig = new StoreConfig();
+    storeConfig.setTransactional(true);
+
+    EntityStore store = new EntityStore(myEnv, 
+                             "EntityStore", storeConfig);
+
+    da = new DataAccessor(store);
+
+    // Assume that Inventory is an entity class.
+    Inventory theInventory = new Inventory();
+    theInventory.setItemName("Waffles");
+    theInventory.setItemSku("waf23rbni");
+
+    Transaction txn = myEnv.beginTransaction(null, null);
+
+    try {
+        // Put the object to the store using the transaction handle.
+        da.inventoryBySku.put(txn, theInventory);
+
+        // Commit the transaction. The data is now safely written to the
+        // store.
+        txn.commit();
+    // If there is a problem, abort the transaction
+    } catch (Exception e) {
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+

+ The same thing can be done with the base API; the + database in use is left unchanged if the write operation fails: +

+
package je.txn;
+
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.Transaction;
+
+import java.io.File;
+
+...
+
+Database myDatabase = null;
+Environment myEnv = null;
+try {
+    EnvironmentConfig myEnvConfig = new EnvironmentConfig();
+    myEnvConfig.setTransactional(true);
+    myEnv = new Environment(new File("/my/env/home"),
+                              myEnvConfig);
+
+    // Open the database. Create it if it does not already exist.
+    DatabaseConfig dbConfig = new DatabaseConfig();
+    dbConfig.setTransactional(true);
+    myDatabase = myEnv.openDatabase(null,
+                                    "sampleDatabase",
+                                    dbConfig);
+
+    String keyString = "thekey";
+    String dataString = "thedata";
+    DatabaseEntry key = 
+        new DatabaseEntry(keyString.getBytes("UTF-8"));
+    DatabaseEntry data = 
+        new DatabaseEntry(dataString.getBytes("UTF-8"));
+
+    Transaction txn = myEnv.beginTransaction(null, null);
+        
+    try {
+        myDatabase.put(txn, key, data);
+        txn.commit();
+    } catch (Exception e) {
+        if (txn != null) {
+            txn.abort();
+            txn = null;
+        }
+    }
+
+} catch (DatabaseException de) {
+    // Exception handling goes here
+} 
+
+
+
+
+

Committing a Transaction

+
+
+
+

+ In order to fully understand what is happening when you commit + a transaction, you must first understand a little about what + JE is doing with + + + + + its log files. + + + Logging causes all database or + store write operations to be identified in + + + + + log files (remember that in JE, your log files are + your database files; there is no difference between the two). + Enough information is written to restore your entire BTree + + + in the event of a system or application failure, so by performing + logging, JE ensures the integrity of your data. +

+

+ Remember that all write activity made to your database or + store is + identified in JE's logs as the writes are performed by your + application. However, JE maintains logs in memory. + Eventually this information is written to disk, but especially + in the case of a transactional application this data may be + held in memory until the transaction is committed, or + JE runs out of buffer space for the logging information. +

+

+ When you commit a transaction, the following occurs: +

+
+
    +
  • +

    + A commit record is written to the log. This + indicates that the modifications made by the + transaction are now permanent. By default, this write is performed synchronously to disk so the + commit record arrives in the log files before any other actions are taken. +

    +
  • +
  • +

    + Any log information held in memory is (by default) + synchronously written to disk. Note that this requirement can be + relaxed, depending on the type of commit you perform. + See Non-Durable Transactions for + more information. + +

    +

    + Note that a transaction commit only writes the BTree's leaf nodes to JE's log files. All other + internal BTree structures are left unwritten. +

    +
  • +
  • +

    + All locks held by the transaction are released. This means + that read operations performed by other transactions or + threads of control can now see the modifications without + resorting to uncommitted reads (see Reading Uncommitted Data for more information). +

    +
  • +
+
+

+ To commit a transaction, you simply call + + + Transaction.commit(). + + +

+

+ Remember that transaction commit causes only the BTree leaf nodes to be written to JE's log files. Any + other modifications made to the the BTree as a result of the transaction's activities are not written to the + log file. This means that over time JE's normal recovery time can greatly increase (remember that JE always runs + normal recovery when it opens an environment). +

+

+ For this reason, JE by default runs the checkpointer thread. This background thread runs + a checkpoint on a periodic interval so as to ensure that the amount of data that needs to be + recovered upon environment open is minimized. In addition, you can also run a checkpoint manually. + For more information, see Checkpoints. +

+

+ Note that once you have committed a transaction, the transaction + handle that you used for the transaction is no longer valid. To + perform database activities under the control of a new + transaction, you must obtain a fresh transaction handle. +

+
+
+ + + diff --git a/docs/TransactionGettingStarted/wrapup.html b/docs/TransactionGettingStarted/wrapup.html new file mode 100644 index 0000000..d12fcef --- /dev/null +++ b/docs/TransactionGettingStarted/wrapup.html @@ -0,0 +1,254 @@ + + + + + + Chapter 6. Summary and Examples + + + + + + + + + +
+
+
+
+

Chapter 6. Summary and Examples

+
+
+
+
+

+ Table of Contents +

+
+
+ + Anatomy of a Transactional Application + +
+
+ + Base API Transaction Example + +
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadData.java + +
+
+ + DBWriter.java + +
+
+
+
+ + DPL Transaction Example + +
+
+
+
+ + TxnGuide.java + +
+
+ + PayloadDataEntity.java + +
+
+ + StoreWriter.java + +
+
+
+
+
+

+ Throughout this manual we have presented the concepts and + mechanisms that you need to provide transactional protection for + your application. In this chapter, we summarize these + mechanisms, and we provide a complete example of a multi-threaded + transactional JE application. +

+
+
+
+
+

Anatomy of a Transactional Application

+
+
+
+

+ Transactional applications are characterized by performing the + following activities: +

+
+
    +
  1. +

    + Create your environment handle. +

    +
  2. +
  3. +

    + Open your environment, specifying that the transactional subsystem is to be used. +

    +
  4. +
  5. +

    + If you are using the base API, open your database handles, indicating that they are to support transactions. + Otherwise, open your store such that it is configured for transactions. +

    +
  6. +
  7. +

    + Spawn off worker threads. How many of these you need and + how they split their JE workload is entirely up to your + application's requirements. However, any worker threads + that perform write operations will do the following: +

    +
    +
      +
    1. +

      + Begin a transaction. +

      +
    2. +
    3. +

      + Perform one or more read and write + operations. +

      +
    4. +
    5. +

      + Commit the transaction if all goes well. +

      +
    6. +
    7. +

      + Abort and retry the operation if a deadlock is + detected. +

      +
    8. +
    9. +

      + Abort the transaction for most other errors. +

      +
    10. +
    +
    +
  8. +
  9. +

    + On application shutdown: +

    +
    +
      +
    1. +

      + Make sure there are no opened cursors. +

      +
    2. +
    3. +

      + Make sure there are no active transactions. Either + abort or commit all transactions before shutting + down. +

      +
    4. +
    5. +

      + Close your databases. +

      +
    6. +
    7. +

      + Close your environment. +

      +
    8. +
    +
    +
  10. +
+
+
+

Note

+

+ Robust JE applications should monitor their worker threads to + make sure they have not died unexpectedly. If a thread does + terminate abnormally, you must shutdown all your worker threads + and then run normal recovery (you will have to reopen your + environment to do this). This is the only way to clear any + resources (such as a lock or a mutex) that the abnormally + exiting worker thread might have been holding at the time that + it died. +

+

+ Failure to perform this recovery can cause your + still-functioning worker threads to eventually block forever + while waiting for a lock that will never be released. +

+
+

+ In addition to these activities, which are entirely handled by code + within your application, you also need to periodically back up your + log files. This is required in order to obtain the durability + guarantee made by JE's transaction ACID support. See + Backing up and Restoring Berkeley DB, Java Edition Applications + for more information. +

+
+
+ + + diff --git a/docs/TransactionGettingStarted/writeblock.jpg b/docs/TransactionGettingStarted/writeblock.jpg new file mode 100644 index 0000000..4b382b8 Binary files /dev/null and b/docs/TransactionGettingStarted/writeblock.jpg differ diff --git a/docs/changelog.html b/docs/changelog.html new file mode 100644 index 0000000..11d71b1 --- /dev/null +++ b/docs/changelog.html @@ -0,0 +1,3587 @@ + + + + + + The Berkeley DB Java Edition Package: BDB JE Library Version + 12.2.7.5 (Release 7.5.11) Change Log + + + + + +

Oracle Berkeley DB Java Edition 12c R2 Change Log

+

Release 7.5.11

+ + + +

Upgrading from JE 7.4 or earlier

+ +In JE 7.5 the on-disk file format moved to 15. The file format change is +forward compatible in that JE files created with earlier releases can be read +when opened with JE 7.5 or later. The change is not backward compatible in that +files created with JE 7.5 or later cannot be read by earlier releases. After +an existing environment is opened read/write using JE 7.5, the environment can +no longer be read by earlier releases. + +

Upgrading from JE 7.3 or earlier

+ +No file format changes were included in JE 7.4 and there are no file format +compatibility issues when upgrading from JE 7.3. + +

Upgrading from JE 7.2 or earlier

+ +In JE 7.3 the on-disk file format moved to 14. The file format change is +forward compatible in that JE files created with earlier releases can be read +when opened with JE 7.3 or later. The change is not backward compatible in that +files created with JE 7.3 or later cannot be read by earlier releases. After +an existing environment is opened read/write using JE 7.3, the environment can +no longer be read by earlier releases. + +

Upgrading from JE 7.1 or earlier

+ +No file format changes were included in JE 7.2 and there are no file format +compatibility issues when upgrading from JE 7.1. + +

Upgrading from JE 7.0 or earlier

+ +In JE 7.1 the on-disk file format moved to 13. The file format change is +forward compatible in that JE files created with earlier releases can be read +when opened with JE 7.1 or later. The change is not backward compatible in that +files created with JE 7.1 or later cannot be read by earlier releases. After +an existing environment is opened read/write using JE 7.1, the environment can +no longer be read by earlier releases. +

+In JE 7.1 the HA wire format also changed in order to support the durable +transaction commits feature (see [#25057]). Until all nodes in a replication +group have been upgraded to JE 7.1, this optimization is not fully applied. + +

Upgrading from JE 6.4 or earlier

+ +In JE 7.0 the on-disk file format moved to 12. The file format change is +forward compatible in that JE files created with earlier releases can be read +when opened with JE 7.0 or later. The change is not backward compatible in that +files created with JE 7.0 or later cannot be read by earlier releases. After +an existing environment is opened read/write using JE 7.0, the environment can +no longer be read by earlier releases. +

+In JE 7.0 the HA wire format also changed in order to support the TTL feature. +Until all nodes in a replication group have been upgraded to JE 7.0, the TTL +feature cannot be used. An exception will be thrown if a write with a non-zero +TTL is attempted, and not all nodes have been upgraded. See further below for +a description of the TTL feature. + +

Upgrading from JE 6.3 or earlier

+ +No file format changes were included in JE 6.4 and there are no file format +compatibility issues when upgrading from JE 6.3. +

+A behavior change was made to DiskOrderedCursor that may require some +applications to increase the JE cache size. To prevent applications from +having to reserve memory in the Java heap for the DiskOrderedCursor, memory +used by the DiskOrderedCursor is now subtracted from the JE cache budget. The +maximum amount of such memory is specified, as before, using +DiskOrderedCursorConfig.setInternalMemoryLimit. [#24291] + +

Upgrading from JE 6.2 or earlier

+ +In JE 6.3 the on-disk file format moved to 11. The file format change is +forward compatible in that JE files created with earlier releases can be read +when opened with JE 6.3 or later. The change is not backward compatible in that +files created with JE 6.3 or later cannot be read by earlier releases. After +an existing environment is opened read/write using JE 6.3, the environment can +no longer be read by earlier releases. + +

Upgrading from JE 6.1 or earlier

+ +In JE 6.2 the on-disk file format moved to 10. The file format change is +forward compatible but not backward compatible, as usual. + +

Upgrading from JE 6.0 or earlier

+ +There was no file format change in JE 6.1. + +An API change in JE 6.1.3 [#23330] requires application changes if write +operations are performed on a non-replicated database in a replicated +environment. A code change is necessary for applications with the following +characteristics: +

+

+

+In order to perform write operations in such cases, the application must now +call TransactionConfig.setLocalWrite(true) and use this configuration to create +a Transaction for performing writes to the non-replicated database. +

+In addition, it is no longer possible to use a single transaction to write to +both replicated and a non-replicated databases. IllegalOperationException will +be thrown if this is attempted. +

+These changes were necessary to prevent corruption when a transaction contains +write operations for both replicated and non-replicated databases, and a +failover occurs that causes a rollback of this transaction. The probability of +corruption is low, but it can occur under the right conditions. +

+For more information see the javadoc for TransactionConfig.setLocalWrite(true), +and the "Non-replicated Databases in a Replicated Environment" section of the +ReplicatedEnvironment class javadoc. + +

Upgrading from JE 5.0 or earlier

+ +In addition to the file format changes, a change was made involving partial +Btree and duplicate comparators. Partial comparators are an advanced feature +that few applications use. As of JE 6.0, using partial comparators is not +recommended. Applications that do use partial comparators must change their +comparator classes to implement the new PartialComparator tag interface, before +running the application with JE 6. Failure to do so may cause incorrect +behavior during transaction aborts. See the PartialComparator javadoc for more +information. + +

Upgrading from JE 4.1 or earlier

+ +There are two important notes about the file format change in JE 5.0. +
    +
  1. + The file format change enabled significant improvements in operation + performance, memory and disk footprint, and concurrency of databases with + duplicate keys. Due to these changes, an upgrade utility must be run before + opening an environment with this release, if the environment was created + using JE 4.1 or earlier. See the Upgrade Procedure below for more + information. +
  2. +
  3. + An application which uses JE replication may not upgrade directly from JE + 4.0 to JE 5.0 or later. Instead, the upgrade must be done from JE 4.0 to + JE 4.1 and then to JE 5.0 or later. Applications already at JE 4.1 are not + affected. Upgrade guidance can be found in the new chapter, "Upgrading a + JE Replication Group", in the "Getting Started with BDB JE High + Availability" guide. +
  4. +
+ +Due to the format changes in JE 5, a special utility program must be run for an +environment created with JE 4.1 or earlier, prior to opening the environment +with JE 5.0 or later. The utility program is part of JE 4.1. JE 4.1.20, or a +later version of JE 4.1, must be used. +

+One of two utility programs must be used, which are available in the release +package for JE 4.1.20, or a later release of JE 4.1. If you are currently +running a release earlier than JE 4.1.20, then you must download the latest JE +4.1 release package in order to run these utilities. +

+The steps for upgrading are as follows. +

    +
  1. Stop the application using BDB JE.
  2. +
  3. Run the DbPreUpgrade_4_1 or DbRepPreUpgrade_4_1 utility. + If you are using a regular non-replicated Environment: +
        java -jar je-4.1.20.jar DbPreUpgrade_4_1 -h <dir>
    + If you are using a JE ReplicatedEnvironment: +
        java -jar je-4.1.20.jar DbRepPreUpgrade_4_1
    +         -h <dir>
    +         -groupName <group name>
    +         -nodeName <node name>
    +         -nodeHostPort <host:port>
  4. +
  5. Finally, start the application using the current JE 5.0 (or later) + release of BDB JE.
  6. +
+

+The second step -- running the utility program -- does not perform data +conversion. This step simply performs a special checkpoint to prepare the +environment for upgrade. It should take no longer than an ordinary startup and +shutdown. +

+During the last step -- when the application opens the JE environment using the +current release (JE 5 or later) -- all databases configured for duplicates will +automatically be converted before the Environment or +ReplicatedEnvironment constructor returns. Note that a database +might be explicitly configured for duplicates using +DatabaseConfig.setSortedDuplicates(true), or implicitly configured +for duplicates by using a DPL MANY_TO_XXX relationship +(Relationship.MANY_TO_ONE or +Relationship.MANY_TO_MANY). +

+The duplicate database conversion only rewrites internal nodes in the Btree, +not leaf nodes. In a test with a 500 MB cache, conversion of a 10 million +record data set (8 byte key and data) took between 1.5 and 6.5 minutes, +depending on number of duplicates per key. The high end of this range is when +10 duplicates per key were used; the low end is with 1 million duplicates per +key. +

+To make the duplicate database conversion predictable during deployment, users +should measure the conversion time on a non-production system before upgrading +a deployed system. When duplicates are converted, the Btree internal nodes are +preloaded into the JE cache. A new configuration option, +EnvironmentConfig.ENV_DUP_CONVERT_PRELOAD_ALL, can be set to false +to optimize this process if the cache is not large enough to hold the internal +nodes for all databases. For more information, see the javadoc for this +property. +

+If an application has no databases configured for duplicates, then the last +step simply opens the JE environment normally, and no data conversion is +performed. +

+If the user fails to run the DbPreUpgrade_4_1 or DbRepPreUpgrade_4_1 utility +program before opening an environment with JE 5 or later for the first time, an +exception such as the following will normally be thrown by the +Environment or ReplicatedEnvironment constructor: +

+  com.sleepycat.je.EnvironmentFailureException: (JE 6.0.1) JE 4.1 duplicate DB
+  entries were found in the recovery interval. Before upgrading to JE 5.0, the
+  following utility must be run using JE 4.1 (4.1.20 or later):
+  DbPreUpgrade_4_1.  See the change log.
+  UNEXPECTED_STATE: Unexpected internal state, may have side effects.
+    at com.sleepycat.je.EnvironmentFailureException.unexpectedState(EnvironmentFailureException.java:376)
+    at com.sleepycat.je.recovery.RecoveryManager.checkLogVersion8UpgradeViolations(RecoveryManager.java:2694)
+    at com.sleepycat.je.recovery.RecoveryManager.buildTree(RecoveryManager.java:549)
+    at com.sleepycat.je.recovery.RecoveryManager.recover(RecoveryManager.java:198)
+    at com.sleepycat.je.dbi.EnvironmentImpl.finishInit(EnvironmentImpl.java:610)
+    ...  
+

+If the user fails to run the DbPreUpgrade_4_1 or DbRepPreUpgrade_4_1 utility +program, but no exception is thrown when the environment is opened with JE 5 +or later, this is probably because the application performed an +Environment.sync before last closing the environment with JE 4.1 +or earlier, and nothing else happened to be written (by the application or JE +background threads) after the sync operation. In this case, running the +upgrade utility is not necessary. + + +


+

Changes in 7.5.11

+ +
    + + + +
  1. +Removed the following incorrect javadoc for cursor read operations: +

    In a replicated environment, an explicit +transaction must have been specified when opening the cursor, unless +read-uncommitted isolation is specified via the CursorConfig or LockMode +parameter.

    +

    When a null Transaction parameter is specified for a read operation in a +replicated environment, the default consistency +(ReplicationConfig.CONSISTENCY_POLICY) is used.

    +[#26037] (7.5.0) +

  2. + +
  3. +The data verifier has been enhanced to perform Btree verification. Btree +verification is performed by the background data verifier, the DbVerify +utility, the DbVerify.verify method, the Database.verify method and the +Environment.verify method. +

    +Previously, the DbVerify utility and the DbVerify/Database/Environment.verify +methods performed a very rudimentary and inefficient form of verification. +Btree verification now includes several different types of integrity checks +and is performed more efficiently than before. +

    +Background verification (see EnvironmentConfig.ENV_RUN_VERIFIER and +VERIFY_SCHEDULE) now includes basic Btree verification and secondary index +verification by default. There are two other types of verification can be +enabled as described below. Previously, background verification only included +log checksum verification (see EnvironmentConfig.VERIFY_LOG). +

    +The javadoc for these parameters contains a complete description of the types +of verification. Other changes to be aware of are: +

      +
    • Only one instance of log corruption or basic Btree corruption now will + be detected by data verification. Previously, the verifier would + attempt to skip over such a detected corruption and continue, although + this approach was unreliable. Now the Environment is always invalidated + when such corruption is detected, and it isn't possible to continue. +
    • When index corruption is detected, the environment is not invalidated. + Instead, the corrupt index (secondary database) is marked as corrupt + in memory. All subsequent access to a corrupt index will now throw + SecondaryIntegrityException. To correct the problem, the application + may perform a full restore or rebuild the corrupt index. This new + behavior applies whether the index corruption was detected during + Btree verification or during normal access to the index. +
    • When basic Btree verification or log checksum verification fails, the + Environment is invalidated (must be closed) and an + EnvironmentFailureException is thrown. If the corruption is known to be + persistent, the EnvironmentFailureException.isCorrupt method will + return true. Additionally, when a persistent corruption is detected and + the Environment is open for read-write access, a marker file named + 7fffffff.jdb is created in the Environment directory that will prevent + re-opening the environment. If an attempt is made to re-open the + Environment, the original EnvironmentFailureException will be thrown. + This is meant to safeguard against using a corrupt environment when the + original exception is accidentally overlooked. While the marker file + can be deleted to allow re-opening the environment, this is normally + unsafe and is not recommended.
    • +
    • The different types of verification can be enabled or disabled in the + background data verifier using EnvironmentConfig.VERIFY_BTREE, + VERIFY_SECONDARIES and VERIFY_DATA_RECORDS. Additional params control + the Btree verification batch size and delay between batches: + VERIFY_BTREE_BATCH_SIZE and VERIFY_BTREE_BATCH_DELAY.
    • +
    • When using the DbVerify/Database/Environment.verify methods, the + different types of verification can be enabled or disabled using + new methods in the VerifyConfig class: setVerifySecondaries and + setVerifyDataRecords. New methods also control the verification batch + size and delay between batches: setBatchSize and setBatchDelay.
    • +
    • When using the DbVerify command line, data record verification can be + enabled using -vdr, and batch size/delay can be specified + using -bs and -d. Note that secondary + integrity verification is not possible using the command line because + this feature requires the secondary databases to have been opened by + the application.
    • +
    • The Database.verify and Environment.verify methods now throw an + EnvironmentFailureException (as described above) if verification fails. + Previously, these methods did not give any indication of failure. This + is a change in behavior.
    • +
    • Updated existing javadoc in several cases where the javadoc was + incorrect. Existing behavior was not changed in these cases. +
        +
      • Updated Environment.verify javadoc to indicate that the 'out' + parameter is unused and VerifyConfig.setShowProgressStream + should be used instead.
      • +
      • Updated VerifyConfig.getPrintInfo javadoc to indicate that the + information in printed to System.err by default (not + System.out) and the default is to use the stream specified by + VerifyConfig.getShowProgressStream.
      • +
      • Updated the javadoc for VerifyConfig.setPropagateExceptions and + VerifyConfig.setAggressive to note that these settings + currently have no effect.
      • +
      +
    • +
    • Log verification (checksum validation) was previously supported. + However, performance testing determined that log verification had a + a negative impact on throughput and latency for some workloads. To + avoid this, a delay between reads has been added. This delay can be + configured using EnvironmentConfig.VERIFY_LOG_READ_DELAY, + VerifyLog.setReadDelay and the -d command line arg.
    • + +
    +[#25960] (7.5.1) +

  4. + +
  5. +Configuration parameters for limiting disk usage have been added: +Environment.MAX_DISK and FREE_DISK. MAX_DISK should be specified for JE HA +applications when upgrading to this release, since data files will be reserved +for potential replication to nodes that are out of contact. More reserved +files are retained for potential replication in this release, as described +further below. If MAX_DISK is not specified, all the free space on the volume +(minus 5GB of free space, with the default setting of FREE_DISK) will +eventually be used. The EnvironmentMutableConfig.setMaxDisk method is provided +as a convenience for setting MAX_DISK. +

    +Disk usage is now monitored and a new exception, DiskLimitException, is thrown +when attempting a write operation when the threshold is in danger of being +exceeded. In this situation, read operations are still allowed. Previously, the +Environment was invalidated and closed when the volume was filled. Allowing +read operations now provides partial availability in this situation. The +FREE_DISK parameter also now prevents filling the disk completely, which eases +manual recovery. +

    +Although behavior is now improved when available space has been used, the +application-level goal must be to prevent the situation entirely by monitoring +disk usage and taking recourse before the situation occurs. To support this, +new JE statistics have been added: +

      +
    • activeLogSize: EnvironmentStats.getActiveLogSize()
    • +
    • reservedLogSize: EnvironmentStats.getReservedLogSize()
    • +
    • protectedLogSize: EnvironmentStats.getProtectedLogSize()
    • +
    • protectedLogSizeMap: EnvironmentStats.getProtectedLogSizeMap()
    • +
    • availableLogSize: EnvironmentStats.getAvailableLogSize()
    • +
    +We strongly recommend using availableLogSize to monitor disk usage and take +corrective action well before this value reaches zero. Monitoring the file +system size of the JE data files is not a good substitute for this, since the +data files includes reserved files which will be deleted by JE automatically. +

    +Additional details are listed below. +

      +
    • + DiskLimitException may be thrown by all record write operations, + Environment.checkpoint, Environment.sync, and Environment.close + (when the final checkpoint cannot be performed). +
    • +
    • + The following HA config params are deprecated and no longer needed: + ReplicationConfig.REP_STREAM_TIMEOUT, REPLAY_COST_PERCENT and + REPLAY_FREE_DISK_PERCENT. Reserved files are now retained based on + available disk space. EnvironmentConfig.MAX_DISK and FREE_DISK should + be used instead. +

      + REPLAY_COST_PERCENT is no longer used. However, REP_STREAM_TIMEOUT is + still used when some, but not all, nodes in a group have been upgraded + to 7.5 or later. REPLAY_FREE_DISK_PERCENT is still used when it has + been specified and is non-zero, and FREE_DISK has not been specified. + In this case, REPLAY_FREE_DISK_PERCENT overrides the FREE_DISK default + value. If both REPLAY_FREE_DISK_PERCENT and FREE_DISK are specified, an + IllegalArgumentException is thrown. +

    • +
    • + EnvironmentStats.getFileDeletionBacklog has been deprecated and always + returns zero. Use EnvironmentStats.getProtectedLogSize() and + getProtectedLogSizeMap() to monitor protected files. +
    • +
    • + If EnvironmentConfig.CLEANER_BYTES_INTERVAL is zero or unspecified, it + is now set to the minimum of EnvironmentConfig.LOG_FILE_MAX divided by + four (this was the previous default) and 100 MB. The new 100 MB + maximum is to ensure that the cleaner is woken frequently enough, so + that reserved files are deleted quickly enough to avoid violating a + disk limit. Use caution when overriding the default value. +
    • +
    • + Previously, reserved files (files cleaned but not deleted) were not + persistently marked as being reserved. So when the Environment was + closed and re-opened, these files would be cleaned again. This + re-cleaning was fairly quick because they were 0% utilized, but was a + waste of resources nonetheless. Now, reserved files are marked as such + in the cleaner's persistent metadata and this avoids re-cleaning. +
    • +
    • + Previously, reserved files were included in the DbSpace output and + shown as 0% utilized. They were also reflected in the total + utilization, which was therefore inaccurate, since utilization applies + to activeLogSize. Now, reserved files are omitted from the list of + files and the total utilization. The amount of space used by reserved + files is printed at the end of the summary. If the {@code -q} option + is not specified, the reserved file numbers are also printed. +
    • +
    • + Database.count and DiskOrderedCursor (which both internally use a + disk-ordered scanner) now only protect active files from deletion. + Previously they unnecessarily also protected reserved files. +
    • +
    • + DbBackup now only protects active files from deletion. Previously it + unnecessarily also protected reserved files. In addition, the + DbBackup.removeFileProtection method has been added to allow removing + protection from a file that has been copied, before calling + DbBackup.endBackup. +
    • +
    • + NetworkRestore now only protects active files, and the two most recent + reserved files, from deletion. Previously it unnecessarily protected + all reserved files. In addition, the protection is removed for files + that have been transferred, prior to the completion of the entire + restore. +
    • +
    • + The totalLogSize and endOfLog stats (EnvironmentStats.getTotalLogSize + and getEndOfLog) are no longer "slow" stats. They are returned by + Environment.getStats regardless of the StatsConfig.getFast setting. +
    • +
    • + The je.stat.csv file now contains all stats, not just "fast" stats. + Previously, "slow" stats were omitted. Since the stat retrieval + frequency is one minute and this is done by a background thread, + there is no reason not to include all stats. +
    • +
    • + Fixed a bug where per-Database cleaner metadata could accumulate under + certain conditions. +
    • +
    +[#25220] (7.5.3) +

  6. + +
  7. +Fixed a compatibility problem with the Azul Zulu JVM. Previously the following +exception would occur when using JE with Zulu: +
    +The database environment could not be opened: java.lang.IllegalStateException:
    +Could not access Zing management bean. Make sure -XX:+UseZingMXBeans was
    +specified.
    +
    +[#26163] (7.5.3) +

  8. + +
  9. +Added ReplicationConfig.TXN_ROLLBACK_DISABLED to allow manual control over +rollback, including rollback of transactions considered to be non-durable. +See the javadoc for more information. +

    +[#26220] (7.5.7) +


  10. + +
  11. +Fixed a bug that could cause OutOfMemoryError when performing a network +restore (NetworkRestore.execute) after an InsufficientLogException (ILE) is +thrown. The ILE holds a reference to the internals (e.g., data cache) of the +old environment handle. Previously, this reference was not cleared by the +network restore. If the application then re-opened the environment, without +discarding all references to the ILE, OutOfMemoryError could occur due to the +presence of two data caches in the heap at the same time. Now the network +restore clears the internal references when the restore is complete. +

    +[#26305] (7.5.8) +


  12. + +
  13. +Fixed a bug that could prevent performing a network restore +(NetworkRestore.execute), after a prior network restore was aborted or +incomplete for any reason. For example, this could occur if the process is +killed during the first network restore, and then another network restore is +attempted. The problem could occur only in an environment with a relatively +large data set, specifically where at least one billion write transactions had +been performed. An example stack trace is below. +
    +java.lang.NumberFormatException: For input string: "7473413648"
    +    at java.lang.NumberFormatException.forInputString(
    +        NumberFormatException.java:65)
    +    at java.lang.Integer.parseInt(Integer.java:583)
    +    at java.lang.Integer.parseInt(Integer.java:615)
    +    at com.sleepycat.je.rep.InsufficientLogException.init(
    +        InsufficientLogException.java:218)
    +    at com.sleepycat.je.rep.impl.RepImpl.handleRestoreRequired(
    +        RepImpl.java:2296)
    +    at com.sleepycat.je.recovery.RecoveryManager.findEndOfLog(
    +        RecoveryManager.java:543)
    +    at com.sleepycat.je.recovery.RecoveryManager.recover(
    +        RecoveryManager.java:339)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.finishInit(
    +        EnvironmentImpl.java:841)
    +    at com.sleepycat.je.dbi.DbEnvPool.getEnvironment(DbEnvPool.java:222)
    +    at com.sleepycat.je.Environment.makeEnvironmentImpl(Environment.java:267)
    +    at com.sleepycat.je.Environment.init(Environment.java:252)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.init(
    +        ReplicatedEnvironment.java:607)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.init(
    +        ReplicatedEnvironment.java:466)
    +...
    +
    +This has been fixed. Without the fix, a workaround for the problem is to +remove all the .jdb files from the destination node, before performing the +network restore. +

    +[#26311] (7.5.8) +


  14. + +
  15. +Fixed an incorrect assertion when CLEANER_FORCE_CLEAN_FILES is specifid, and a +specified file is already being cleaned. An example stack traced is below: +
    +java.lang.AssertionError
    +at com.sleepycat.je.cleaner.FileSelector.selectFileForCleaning(FileSelector.java:193)
    +at com.sleepycat.je.cleaner.FileProcessor.doClean(FileProcessor.java:395)
    +at com.sleepycat.je.cleaner.Cleaner.doClean(Cleaner.java:670)
    +...
    +
    +[#26326] (7.5.9) +

  16. + +
+ + +
+

Changes in 7.4.5

+ +
    + +
  1. +Fixed an internal deadlock between the following internal classes: +ExpirationProfile, IN, FileSelector. This deadlock occurred very rarely (only +once in our testing). It did not cause a persistent problem -- restarting the +process was a safe workaround. +[#25613] (7.4.1) +

  2. + +
  3. +EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES has been made mutable. +[#25821] (7.4.2) +

  4. + +
  5. +The OperationResult.isUpdate() method has been added for distinguishing inserts +and updates performed by a Put.OVERWRITE operation. +[#25882] (7.4.2) +

  6. + +
  7. +Fixed unsafe file deletion issue in network restore. If the network restore was +performed on a node without closing the environment, the deletion of obsolete +log files was considered an unsafe operation that caused the restore to fail. + +The error message was: +
    +  com.sleepycat.je.EnvironmentFailureException:../env Log file 00000000.jdb
    +  was deleted unexpectedly. LOG_UNEXPECTED_FILE_DELETION: A log file was
    +  unexpectedly deleted, log is likely invalid. Environment is invalid and must
    +  be closed. Originally thrown by HA thread: REPLICA 3(-1) 
    +
    +[#25834] (7.4.3) +

  8. + +
  9. +Logging of internal nodes (INs) has been reduced when deleting many records in +a contiguous key range. +[#25939] (7.4.3) +

  10. + +
  11. +Fixed a bug that could have caused duplicate records to be returned via the +iterator() method of the DPL and Collections API. The iterator reads records +in batches, and if a record at the end of the last batch was deleted by another +thread, fetching the next batch could have read (and later returned via the +iterator) duplicate records, depending on thread timing. +[#25976] (7.4.3) +

  12. + +
  13. +Fixed a bug that prevented LogOverwriteException from being thrown when +assertions were disabled. LogOverwriteException is thrown to prevent creation +of an invalid backup, although this can only happen in rare cases and only on +an HA replica node. See the LogOverwriteException javadoc for more information. +[#25989] (7.4.4) +

  14. + +
  15. +Fixed a bug that caused the following assertion to fire when using a Database +in deferred-write mode (DatabaseConfig.setDeferredWrite(true)) for which data +was previously written in normal (non-deferred-write) mode. +
    +java.lang.AssertionError
    +  com.sleepycat.je.tree.BIN.shouldLogDelta(BIN.java:1927)
    +  ...
    +
    +[#25999] (7.4.4) +

  16. + +
+ + +
+

Changes in 7.3.7

+ +
    + +
  1. +EnvironmentConfig.LOG_N_DATA_DIRECTORIES has been deprecated. This +feature is not known to provide benefits beyond that of a simple RAID +configuration and will be removed in the next release, which is slated +for mid-April, 2017. +

  2. + +
  3. +Added Arbiter functionality that adds additional write availability +for replication groups that have two Electable members. For details see the +javadoc for com.sleepycat.je.rep.arbiter.Arbiter. +[#25567] (7.3.0) +

  4. + +
  5. +Operation throughput statistics have been simplified and improved. Previously, +these statistics represented API calls rather than CRUD operations which caused +confusion when a single API call performed multiple CRUD operations, some CRUD +operations (key search operations on any node, and all operations on a replica +node) were missing, the operation statistics were not included in the +EnvironmentStats.toString result, and none of the operation statistics were +available via EnvironmentStats getter methods. Previously the throughput stats, +listed below, were only visible via the je.stat.csv file. +
    +    Name
    +    ----
    +    dbDelete
    +    dbGet
    +    dbGetSearchBoth
    +    dbPut
    +    dbPutNoDupData
    +    dbPutNoOverWrite
    +    cursorDelete
    +    cursorGetCurrent
    +    cursorGetFirst
    +    cursorGetLast
    +    cursorGetNext
    +    cursorGetNextDup
    +    cursorGetNextNoDup
    +    cursorGetPrev
    +    cursorGetPrevDup
    +    cursorGetPrevNoDup
    +    cursorPut
    +    cursorPutCurrent
    +    cursorPutNoDupData
    +    cursorPutNoOverwrite
    +    secondaryCursorDelete
    +    secondaryCursorGetCurrent
    +    secondaryCursorGetFirst
    +    secondaryCursorGetLast
    +    secondaryCursorGetNext
    +    secondaryCursorGetNextDup
    +    secondaryCursorGetNextNoDup
    +    secondaryCursorGetPrev
    +    secondaryCursorGetPrevDup
    +    secondaryCursorGetPrevNoDup
    +    secondaryDbDelete
    +    secondaryDbGet
    +    secondaryDbGetSearchBoth
    +
    +Now, the following statistics representing CRUD operations are output in the +je.stats.cvs file and the EnvironmentStats.toString method, are included for +all nodes including replicas, and are available via new EnvironmentStats getter +methods. These replace the statistics listed above. +
    +    Name                   EnvironmentStats method
    +    ----                   -----------------------
    +    priSearch              getPriSearchOps()
    +    priSearchFail          getPriSearchFailOps()
    +    secSearch              getSecSearchOps()
    +    secSearchFail          getSecSearchFailOps()
    +    priPosition            getPriPositionOps()
    +    secPosition            getSecPositionOps()
    +    priInsert              getPriInsertOps()
    +    priInsertFail          getPriInsertFailOps()
    +    secInsert              getSecInsertOps()
    +    priUpdate              getPriUpdateOps()
    +    secUpdate              getSecUpdateOps()
    +    priDelete              getPriDeleteOps()
    +    priDeleteFail          getPriDeleteFailOps()
    +    secDelete              getSecDeleteOps()
    +
    +The new statistics should be considered internal operations or units of work +rather than API calls. This approach is used to allow correlating operations to +performance measurements. It also reduces the number of statistics by more than +half. The javadoc of the new EnvironmentStats getter methods describe the +mapping from API calls to operation statistics. +

    +[#23792] (7.3.0) +


  6. + +
  7. +Data corruption is now detected as soon as possible by using an internal JE +background task. This detects data corruption caused by media/disk failure by +reading the log sequentially and verifying checksums. This is the equivalent of +running the current DbVerifyLog utility, but it is performed automatically and +periodically. The schedule for performing verification can be controlled by the +new EnvironmentConfig.ENV_RUN_VERIFIER, VERIFY_SCHEDULE and VERIFY_LOG +parameters. By default, verification is on and occurs once a day at midnight, +local time. +

    +When corruption is detected, the Environment will be invalidated and an +EnvironmentFailureException will be thrown. Applications catching this +exception can call the new EnvironmentFailureException.isCorrupted method to +determine whether corruption was detected. +

    +If isCorrupted returns true, a network restore (or restore from backup) should +be performed to avoid further problems. The advantage of performing +verification frequently is that a problem may be detected sooner than it would +be otherwise. For HA applications, this means that the network restore can be +done while the other nodes in the group are up, minimizing exposure to +additional failures. +

    +[#25221] (7.3.0) +


  8. + +
  9. +Repeat-fault reads have been eliminated, for the most part, for LNs (Btree leaf +nodes, which represent record data on disk.) Previously, if an LN's on-disk +size was greater than EnvironmentConfig.LOG_FAULT_READ_SIZE (2kB by default), +two reads would be required to fetch the LN from disk. The first read would +always include the log entry header, which contains the exact entry size, and +the second read (repeat-read) was needed to read the entire entry. The second +read includes the entire entry, although normally it will be cached by the file +system. +

    +Now, only a single read is needed because the last logged size for LNs is now +stored in the Btree, for all LNs written with JE 6.0 and later, and this can be +used to determine the exact size needed for the read buffer. The benefits of +this change are 1) the amount of IO is reduced (although the repeat-read +normally reads data that is cached by the file system), and 2) the statistics +describing IO activity are simpler to analyze without the repeat-reads in the +picture. +

    +Note that INs (Btree internal nodes) can still cause repeat-reads when they are +fetched, because the last logged size for INs is not stored in the Btree. +However, in many applications all INs are cached and therefore INs are rarely +read from disk (except during a cache warm-up period). The nRepeatFaultReads +statistic (EnvironmentStats.getNRepeatFaultReads) indicates the number of +repeat-reads. +

    +[#25387] (7.3.0) +


  10. + +
  11. +Several bugs were fixed related to performing a preload (Database.preload or +Environment.preload) when an off-heap cache is configured (via +EnvironmentConfig.setOffHeapCacheSize). These bugs sometimes caused an +incomplete preload as well as producing an incorrect (corrupt) data set. +In releases prior to 7.3, preload should not be used with an off-heap cache. +[#25594] (7.3.1) +

  12. + +
  13. +Network restores are instigated by a JE HA application when an +environment open results in an InsufficientLogException. If a network +restore is interrupted, the application should retry until it +succeeds. Failing to do so might result in an environment log that is +corrupted or inconsistent. This JE release adds a new mechanism to +persistently mark that a network restore has started, and to prevent +inadvertent use of the environment before the restore has +completed. The marker file is named 7fffffff.jdb, and is recognized +and managed by JE. The required steps for handling an +InsufficientLogException are unchanged; the marker file is an internal +mechanism. +

    +[#25369] (7.3.1) +


  14. + +
  15. +Fixed a bug that prevented the transaction timeout for a write operation from +being honored in JE HA applications. When a transaction's ReplicaAckPolicy +required waiting for a replica, the timeout was not always honored and the +transaction sometimes took longer than the specified timeout. +[#25692] (7.3.4) +

  16. + +
  17. +Preload (Database.preload and Environment.preload) has been changed so that it +does not populate the off-heap cache. Only the main cache (in the Java heap) is +now populated. This was done to avoid a corruption problem linked to preload +and the off-heap cache. Population of the off-heap cache will be added in a +future release. Note that when an off-heap cache is configured, preload will +not populate it, but other operations will populate it as data is evicted from +the main cache. +[#25594] (7.3.7) +

  18. + +
+ + +
+

Changes in 7.2.8

+ +
    + +
  1. +Fixed a problem where Environment.removeDatabase or truncateDatabase may have +taken a long time to complete, due to internal retries. +[#25361] (7.2.0) +

  2. + +
  3. +Reduced GC overhead by avoiding the re-creation of internal lock objects, in +cases where a record is locked by only one thread/transaction at a time. This +overhead was introduced when deadlock detection was added in JE 7.1 [#16260]. +The overhead is small, but could have impacted certain critical code paths, +such as transaction replay on an HA replica node. +

    +[#25355] (7.2.0) +


  4. + +
  5. +Improved support for JDK 9. (Note that JDK 9 is not officially supported until +it becomes generally available.) Previously, using JE with JDK 9 would cause +the following exception: +
    +  java.lang.IllegalStateException: java.lang.IllegalAccessException: class
    +  com.sleepycat.je.utilint.JVMSystemUtils cannot access class
    +  sun.management.BaseOperatingSystemImpl (in module java.management) because
    +  module java.management does not export sun.management to unnamed module
    +  @73846619
    +
    +A workaround for this problem was to specify the following JVM option: +
    +  -XaddExports:java.management/sun.management=ALL-UNNAMED
    +
    +Specifying this option is no longer necessary. +[#25383] (7.2.0) +

  6. + +
  7. +Made several changes to make NullPointerExceptions less likely when closing an +Environment. NullPointerException sometimes occurs when one thread is calling +Environment.close and other threads (either application threads or internal +JE threads) are concurrently accessing the Environment. It is still possible +for NullPointerException and other unexpected exceptions to occur, but they +should now happen less frequently, and IllegalStateException should normally be +thrown instead. +

    +Several additional fixes were made as a result of these changes: +

      +
    • When a database is closed, Database.getDatabaseName now throws + IllegalStateException. Previously, it returned null and this was not + documented. This could have caused NullPointerException in the + application.
    • +
    • When a database is closed, Database.getConfig now throws + IllegalStateException. Previously, it returned a non-null, but + sometimes incorrect, configuration.
    • +
    • When an environment is closed, Environment.printStartupInfo() now + throws IllegalStateException; previously NullPointerException was + thrown. As before, this method may be called when the environment is + invalid but not yet closed, but now this behavior is documented.
    • +
    • As before, the Environment.getConfig and getMutableConfig methods may + be called when the environment is invalid but not yet closed, but now + this behavior is documented.
    • +
    • When an environment is invalid but not yet closed, and the + Environment.setMutableConfig method is called, an + EnvironmentFailureExcpetion is now thrown. Previously, the method's + behavior in this case was undefined.
    • +
    • When an environment is closed or invalid, + ReplicatedEnviornment.transferMaster now throws IllegalStateException + or EnvironmentFailureExcpetion. Previously a NullPointerException was + thrown.
    • +
    +

    +[#21590] (7.2.1) +


  8. + +
  9. +Fixed a problem where checkpointing sometimes did not occur after log cleaning +when application write operations stopped, preventing the reclaiming of disk +space. This was a common problem with tests that expect disk space to be +reclaimed. In production systems it could also be a problem during repair of an +out-of-disk situation. See the javadoc for the configuration property, +EnvironmentConfig.CLEANER_WAKEUP_INTERVAL, for details. +

    +Note that an earlier fix [#23180] in JE 7.1 caused cleaning to occur in this +situation, but a checkpoint is also needed to reclaim disk space after +cleaning. In addition, the earlier fix was not reliable in certain cases where +the cleaner thread awoke concurrently with the last write operation. +

    +[#25364] (7.2.1) +


  10. + +
  11. +Improved behavior and error handling support for an invalidated Environment. +When an Environment is invalidated due to an EnvironmentFailureException, the +user must call Environment.close(). Calls to any other JE methods will re-throw +the invalidating EnvironmentFailureException. In addition, this exception may +need special handling by the application, for example, an +InsufficientLogException (which extends EnvironmentFailureException) must be +handled by performing a network restore. +

    +Several changes have been made to make this process simpler and more reliable. +

      +
    • The first invalidating EnvironmentFailureException is now saved + internally, and this exception is re-thrown when making a JE API call + (other than Environment.close). Previously, when multiple + EnvironmentFailureException occurred, the last one thrown was saved and + re-thrown. +

      + (After the environment is invalidated by an + EnvironmentFailureException, other EnvironmentFailureExceptions may be + thrown later as side effects of the original problem, or possibly as + separate problems. It is normally the first invalidating exception that + is most relevant.)

    • + +
    • The Environment.getInvalidatingException method has been added. This + returns the invalidating exception described above.
    • + +
    • The Environment.isClosed method has been added. The existing + Environment.isValid returns false in two cases: when an environment is + closed, and when it is invalid but not yet closed. This new isClosed + method can be used to distinguish between these two cases. The javadoc + for isValid was clarified accordingly.
    • +
    +

    +[#25248] (7.2.1) +


  12. + +
  13. +Detect unexpected JE log file deletions. Normally all JE log file deletions +should be performed as a result of JE log cleaning. If an external file +deletion is detected, JE assumes this was accidental. This will cause the +environment to be invalidated and all methods will throw +EnvironmentFailureException. +[#25201] (7.2.2) +

  14. + +
  15. +Enhanced the background log flushing capability in JE HA, and made this feature +available with or without HA. +

    +Previously, the ReplicationConfig.RUN_LOG_FLUSH_TASK and +LOG_FLUSH_TASK_INTERVAL parameters specified whether and how often JE HA would +periodically perform a flush and fsync, to force NO_SYNC or WRITE_NO_SYNC +transactions to the file system and to the storage device. The default interval +was 5 minutes. These parameters are now deprecated. For backward compatibility +information, see the javadoc for these parameters. +

    +In place of the deprecated HA parameters, the +EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL and LOG_FLUSH_SYNC_INTERVAL +parameters have been added. These specify two separate intervals for flushing +to the file system and the storage device, with default values of 5 seconds and +20 seconds, respectively. Frequent periodic flushing to the file system +provides improved durability for NO_SYNC transactions. Without this flushing, +if application write operations stop, then some number of NO_SYNC transactions +would be left in JE memory buffers and would be lost in the event of a crash. +For HA applications, this flushing reduces the possibility of +RollbackProhibitedException. +

    +[#25417] (7.2.2) +


  16. + +
  17. +DbCacheSize has been improved for applications using CacheMode.EVICT_LN and an +off-heap cache. This change applies when the -offheap argument is specified. +The -maincache argument may now be omitted, and the size of the main cache is +assumed to be the amount needed to hold all internal nodes (INs). Previously, +it was difficult to use DbCacheSize to determine the main and off-heap cache +sizes when using EVICT_LN, because DbCacheSize required specifying the main +cache size and assumed that LNs would be stored in the main cache (when there +was room). +

    +[#25380] (7.2.6) +


  18. + +
+ + +
+

Changes in 7.1.9

+ +
    + +
  1. +Fixed a bug that might have caused data corruption. Multi-threaded writes were +incorrectly allowed during recovery, due to eviction. The smaller the cache +relative to the recovery interval and data set size, the more likely this was +to occur. This could have caused corruption, but this was never confirmed. +

    +Note that the corruption problem that motivated this fix occurred with an ext3 +file system with a default configuration (write barrier not enabled). This is +not recommended for JE, because JE relies on ordered writes. However, we don't +have any proof that the problem was ext3 specific, because it was not +reproducible. +

    +During testing of this fix, a separate problem was fixed in the exception +listener mechanism (EnvironmentConfig.setExceptionListener). Previously, when a +JE background thread threw an Error (due to an assertion or out-of-memory +condition, for example), this was not reported to the listener. Now, the +EnvironmentFailureException, which is created as a result of the Error, is +reported to the listener. +

    +In addition, when an unhandled exception occurred in the background eviction +threads, an EnvironmentFailureException was not created, and so the Environment +was not invalidated. This was another reason for the lack of notifications to +the exception listener. This has been corrected. +

    +Note that when using a shared cache, unhandled exceptions during eviction do +not always invalidate the Environment or cause exception listener events. +This issue is not addressed by the fixes mentioned. +

    +[#25084] (7.1.0) +


  2. + +
  3. +Changes to track durable transaction commits (transaction durability requiring +acknowledgements from at least a simple majority of nodes) explicitly in the JE +HA log. Only durable transaction commits now count towards the rollback limit +specified in com.sleepycat.je.rep.ReplicationConfig.TXN_ROLLBACK_LIMIT, thus +allowing for automatic rollback and recovery in more cases. +

    +[#25057] (7.1.0) +


  4. + +
  5. +Fixed a bug that could prevent an HA internal thread from exiting on the master +node, preventing internal state from being updated, and potentially causing +disk usage to grow on all nodes. The internal thread also cannot be +interrupted, causing a hang. An example thread dump for JE 5.0.98 is below. +
    +"Feeder Input for rg2-rn1" #93465 daemon prio=5 os_prio=0 tid=0x00007fb40c028800 nid=0x109c runnable [0x00007fb3757d6000]
    +    java.lang.Thread.State: RUNNABLE
    +         at java.lang.Throwable.fillInStackTrace(Native Method)
    +         at java.lang.Throwable.fillInStackTrace(Throwable.java:783)
    +         - locked <0x00000003d0ae00e0> (a java.lang.IllegalArgumentException)
    +         at java.lang.Throwable.(Throwable.java:250)
    +         at java.lang.Exception.(Exception.java:54)
    +         at java.lang.RuntimeException.(RuntimeException.java:51)
    +         at java.lang.IllegalArgumentException.(
    +            IllegalArgumentException.java:42)
    +         at java.nio.Buffer.position(Buffer.java:244)
    +         at com.sleepycat.je.log.FileReader.threadSafeBufferPosition(
    +            FileReader.java:920)
    +         at com.sleepycat.je.log.FileReader$ReadWindow.fillFromFile(
    +            FileReader.java:1185)
    +         at com.sleepycat.je.log.FileReader$ReadWindow.slideAndFill(
    +            FileReader.java:1063)
    +         at com.sleepycat.je.log.FileReader.setBackwardPosition(
    +            FileReader.java:587)
    +         at com.sleepycat.je.log.FileReader.getLogEntryInReadBuffer(
    +            FileReader.java:429)
    +         at com.sleepycat.je.log.FileReader.readNextEntryAllowExceptions(
    +            FileReader.java:256)
    +         at com.sleepycat.je.log.FileReader.readNextEntry(FileReader.java:229)
    +         at com.sleepycat.je.rep.stream.FeederSyncupReader.scanBackwards(
    +            FeederSyncupReader.java:123)
    +         at com.sleepycat.je.rep.stream.FeederReplicaSyncup.
    +            makeResponseToEntryRequest(FeederReplicaSyncup.java:283)
    +         at com.sleepycat.je.rep.stream.FeederReplicaSyncup.execute(
    +            FeederReplicaSyncup.java:100)
    +         at com.sleepycat.je.rep.impl.node.Feeder$InputThread.run(
    +            Feeder.java:413)
    +
    +[#25088] (7.1.0) +

  6. + +
  7. +Deadlock detection has been implemented to improve performance and behavior +when lock conflicts occur due to a deadlock. Performance is improved when the +deadlock can be detected without blocking or blocking for a shorter time +period, since the deadlock can be broken sooner and this can increase +concurrency. Behavior is improved because DeadlockException is now thrown +when a deadlock is detected, more debugging information is included in the +exception, and deadlock detection is reliable. +

    +In earlier releases, a LockTimeoutException was eventually thrown when a +deadlock occurred, but only after the lock timeout expired. This exception +sometimes contained information about a potential deadlock, but that +information was not always correct. +

    +Specific changes include: +

      +
    • + DeadlockException is now thrown when a deadlock is detected. Note that + LockTimeoutException is still thrown when the lock timeout expires and a + deadlock is not detected. TransactionTimeoutException is thrown when the + transaction timeout expires and a deadlock is not detected. +
    • + Deadlock detection is performed when a lock conflict is detected. A new + configuration parameter, EnvironmentConfig.LOCK_DEADLOCK_DETECT, can be + used to disable deadlock detection, By default, deadlock detection is + enabled. See EnvironmentConfig.LOCK_DEADLOCK_DETECT for more details about + the deadlock detection procedure. +
    • + When deadlock detection is enabled, another new parameter, + EnvironmentConfig.LOCK_DEADLOCK_DETECT_DELAY, may be used to improve + performance under certain circumstances. By default this is set to zero, + meaning no special delay. +
    • + EnvironmentConfig.LOCK_OLD_LOCK_EXCEPTIONS is now deprecated and has + no effect, as if were set to false. Also, LockNotGrantedException has been + removed; it was replaced by LockNotAvailableException in JE 3.3. In + addition, TransactionTimeoutException is always thrown when a transaction + times out, not DeadlockException. +

      +

      +
      Historical Note:
      +
      + In JE releases 3.3 and earlier, {@link DeadlockException} or a + subclass of it was always thrown when a lock conflict occurred. + Applications typically caught {@link DeadlockException} in order to + detect lock conflicts and determine whether to retry a transaction. + {@link DeadlockException} itself was thrown when a lock or transaction + timeout occurred and {@link LockNotGrantedException} (a subclass of + {@link DeadlockException}) was thrown when a lock conflict occurred + for a no-wait transaction (see {@link TransactionConfig#setNoWait}). +

      + In all releases after JE 3.3, new exceptions and the new base class + {@link LockConflictException} are available. {@link + LockConflictException} should be caught to handle lock conflicts in a + general manner, instead of catching {@link DeadlockException}. +

      + In all releases after JE 3.3, LockNotGrantedException was replaced by + LockNotAvailableException. LockNotGrantedException was deprecated + because it misleadingly extended DeadlockException. Now in JE 6.5, + LockNotGrantedException has been removed. +

      +
      +
    +[#16260] (7.1.1) +

  8. + +
  9. +Fixed a bug that impacts the use of the Serializable isolation mode. When +multiple threads were performing read and write operations, phantom prevention +did not work in certain cases. +[#25149] (7.1.1) +

  10. + +
  11. +Fixed a problem where cleaning sometimes did not occur after application write +operations stopped. This was a common problem with tests that expect disk space +to be reclaimed. In production systems it could also be a problem during repair +of an out-of-disk situation. See the javadoc for the new configuration +property, EnvironmentConfig.CLEANER_WAKEUP_INTERVAL, for details. +

    +(Note that this fix does not cause checkpointing to occur, and a checkpoint is +sometimes needed to reclaim disk space after cleaning. A later fix in JE 7.2 +[#25364] corrects this problem as well.) +

    +[#23180] (7.1.1) +


  12. + +
  13. +Fixed two bugs that could cause mutex deadlocks during Environment.close() and +ReplicatedEnvrionment.shutdownGroup(). An example deadlock is shown below. +
    +"ReplayThread" #37 daemon prio=5 os_prio=0 tid=0x00007fe11001f800 nid=0xfad
    +waiting for monitor entry [0x00007fe0fa1e6000]
    +   java.lang.Thread.State: BLOCKED (on object monitor)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.removeConfigObserver(
    +        EnvironmentImpl.java:2675)
    +    - waiting to lock <0x00000000f131de08> (a com.sleepycat.je.rep.impl.RepImpl)
    +    at com.sleepycat.je.statcap.StatCapture.clearEnv(StatCapture.java:176)
    +    - locked <0x00000000f131f078> (a com.sleepycat.je.statcap.StatCapture)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.shutdownStatCapture(
    +        EnvironmentImpl.java:2454)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.shutdownDaemons(
    +        EnvironmentImpl.java:2345)
    +    at com.sleepycat.je.rep.impl.node.Replica.processShutdown(Replica.java:694)
    +    at com.sleepycat.je.rep.impl.node.Replica.access$1100(Replica.java:153)
    +    at com.sleepycat.je.rep.impl.node.Replica$ReplayThread.run(Replica.java:1229)
    +
    +"UNKNOWN Node6(-1)" #1 prio=5 os_prio=0 tid=0x00007fe14400c000 nid=0xf81
    +waiting for monitor entry [0x00007fe14b9e4000]
    +   java.lang.Thread.State: BLOCKED (on object monitor)
    +    at com.sleepycat.je.statcap.StatCapture.clearEnv(StatCapture.java:170)
    +    - waiting to lock <0x00000000f131f078> (a
    +        com.sleepycat.je.statcap.StatCapture)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.shutdownStatCapture(
    +        EnvironmentImpl.java:2454)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.shutdownDaemons(
    +        EnvironmentImpl.java:2345)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.doClose(EnvironmentImpl.java:1884)
    +    - locked <0x00000000f131de08> (a com.sleepycat.je.rep.impl.RepImpl)
    +    at com.sleepycat.je.dbi.DbEnvPool.closeEnvironment(DbEnvPool.java:374)
    +    - locked <0x00000000f131de08> (a com.sleepycat.je.rep.impl.RepImpl)
    +    - locked <0x00000000f1015b30> (a com.sleepycat.je.dbi.DbEnvPool)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.close(EnvironmentImpl.java:1742)
    +    at com.sleepycat.je.Environment.close(Environment.java:445)
    +    - locked <0x00000000f2102ce8> (a com.sleepycat.je.rep.ReplicatedEnvironment)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.close(
    +        ReplicatedEnvironment.java:830)
    +    - locked <0x00000000f2102ce8> (a com.sleepycat.je.rep.ReplicatedEnvironment)
    +    ...
    +
    +[#25195] (7.1.2) +

  14. + +
  15. +A new exception, EnvironmentWedgedException, is now thrown by Environment.close +when a badly behaved internal thread cannot be shutdown, and the current +process must be shut down and restarted before re-opening the Environment. +Prior to this change, when a thread could not be shut down, the application was +not informed about the problem via an exception, and the badly behaved thread +somtimes caused unpredictable behavior in the Environment, even if it were +closed and re-opened. See EnvironmentWedgedException for more details. +

    +[#25222] (7.1.3) +


  16. + +
  17. +The default value for ReplicationConfig.REP_STREAM_TIMEOUT was changed from +24 hours to 30 minutes. The default value was changed to 30 minutes in the +documentation in JE 6.0.5 [#22575], but code change was omitted, accidentally +leaving the default value of 24 hours. A 30 minute value is much more +reasonable than 24 hours, since during this period, files will be retained for +feeding a dead or lagging replica, and this can cause an out-of-disk condition +if enough data is written during this period. In the earlier change in JE +6.0.5, the REPLAY_COST_PERCENT and REPLAY_FREE_DISK_PERCENT parameters were +added, and these also allow retention of files for replicas, but without the +risk of creating an out-of-disk condition. +

    +[#25254] (7.1.4) +


  18. + +
  19. +Improved Environment.close for an invalid Environment, to reduce the +probability of an OOME (OutOfMemoryError) when re-opening the Environment. +

    +For an invalid Environment, previously JE did not attempt to close Databases +during Environment.close. Also with an invalid Environment, Database.close +simply re-threw the invalidating exception, and the Database was not closed. +

    +The impact was that Environment and Database handles for a closed, invaid +Environment would continue to refer to internal data structures and +consequently to the cached data set. If another Environment was then opened, +while referencing the previous Environment or Database handles, this could +have caused OOME if the resident objects for both Environments did not fit in +the heap. This was especially likely if recovery for the new Environment +caused loading of a large data set. +

    +The javadoc indicates that applications should discard all handles after +closing an Environment. However, this is impractical at least in one use case: +when asynchronously closing an Environment due to an exception and then +re-opening it. When this is done asynchronously, it may be impractical to set +all old handle references to null before opening the new handle. So in this +case there will be a time interval where both Environments are referenced. +

    +Now, Environment.close clears references to internal data structures in the +Environment handle and all Database handles that have been opened via that +Environment. +

    +[#25238] (7.1.7) +


  20. + +
  21. +Fixes an HA bug that manifested itself as a RollbackProhibitedException when +replication nodes were running different JE versions and a version 6.4.15 +Replica contacted a version 7 Master with a version less than 7.1.8. +

    +[#25362] (7.1.8) +


  22. + +
+ + +
+

Changes in 7.0.5

+ +
    + +
  1. +A Time-To-Live (TTL) feature has been added to allow efficient purging of +records whose lifetime is can be set in advance. Records can be assigned a TTL +using WriteOptions.setTTL. The javadoc for the WriteOptions class contains a +Time-To-Live section with more information about the TTL feature. +

    +New 'get', 'put' and 'delete' API methods have been added to support the TTL +feature and expansion of the API in the future. Each 'get' method has a +ReadOptions parameter, and each 'put' and 'delete' method has a WriteOptions +parameter. WriteOptions includes TTL parameters so that a TTL can be assigned +to a record. The return value for the new methods is an OperationResult, or +null if the operation fails. OperationResult includes the record's expiration +time, for records that have been assigned a TTL. The new methods are as +follows. +

    +Note that the Collections API does not have new method signatures, since it +conforms to the standard Java collections interfaces. Therefore, it is not +currently possible to specify a TTL using the Collection API. However, it is +possible to use the DPL API for writing data with a TTL, and then use +EntityIndex.map or sortedMap to additionally use the Collections API. +

    +com.sleepycat.je.Database
    +
    +  OperationResult get(Transaction txn, DatabaseEntry key, DatabaseEntry data,
    +                      Get getType, ReadOptions options)
    +
    +  OperationResult put(Transaction txn, DatabaseEntry key, DatabaseEntry data,
    +                      Put putType, WriteOptions options)
    +
    +  OperationResult delete(Transaction txn, DatabaseEntry key,
    +                         WriteOptions options)
    +
    +com.sleepycat.je.Cursor
    +
    +  OperationResult get(DatabaseEntry key, DatabaseEntry data,
    +                      Get getType, ReadOptions options)
    +
    +  OperationResult put(DatabaseEntry key, DatabaseEntry data,
    +                      Put putType, WriteOptions options)
    +
    +  OperationResult delete(WriteOptions options)
    +
    +com.sleepycat.je.SecondaryDatabase
    +
    +  OperationResult get(Transaction txn, DatabaseEntry key, DatabaseEntry pKey,
    +                      DatabaseEntry data, Get getType, ReadOptions options)
    +
    +  OperationResult delete(Transaction txn, DatabaseEntry key,
    +                         WriteOptions options)
    +
    +com.sleepycat.je.SecondaryCursor
    +
    +  OperationResult get(DatabaseEntry key, DatabaseEntry pKey,
    +                      DatabaseEntry data, Get getType, ReadOptions options)
    +
    +  OperationResult delete(WriteOptions options)
    +
    +com.sleepycat.je.ForwardCursor
    +com.sleepycat.je.JoinCursor
    +com.sleepycat.je.DiskOrderedCursor
    +
    +  OperationResult get(DatabaseEntry key, DatabaseEntry data,
    +                      Get getType, ReadOptions options)
    +  // Get.NEXT and CURRENT only
    +
    +com.sleepycat.persist.PrimaryIndex
    +
    +  OperationResult put(Transaction txn, E entity,
    +                      Put putType, WriteOptions writeOptions)
    +  // Put.OVERWRITE and NO_OVERWRITE only
    +
    +com.sleepycat.persist.EntityIndex
    +
    +  EntityResult get(Transaction txn, K key,
    +                   Get getType, ReadOptions readOptions)
    +  // Get.SEARCH only, more types may be supported later
    +
    +  OperationResult delete(Transaction txn, K key, WriteOptions writeOptions)
    +
    +com.sleepycat.persist.EntityCursor
    +
    +  EntityResult get(Get getType, ReadOptions readOptions)
    +  // All Get types except SEARCH_*, which may be supported later
    +
    +  OperationResult update(V entity, WriteOptions writeOptions)
    +
    +  OperationResult delete(WriteOptions writeOptions)
    +
    +The 'put' methods are passed a Put enum value and the 'get' methods are passed +a Get enum value. The enum values correspond to the methods of the older API. +For example, Get.SEARCH corresponds to the older Cursor.getSearchKey method and +Put.NO_OVERWRITE corresponds to the older Database.putNoOverwrite method. +Future enhancements, like TTL, may be supported via the newer 'get' and 'put' +methods, so we recommend that these methods are used instead of the older API +methods. However, there are no plans to deprecate or remove the older methods +at this time. In fact, the older methods still appear in most of the JE example +programs and documentation. +

    +ReadOptions and WriteOptions contain a CacheMode parameter for specifying the +cache mode on a per-operation. ReadOptions also contains a LockMode property, +which corresponds to the LockMode parameter of the older 'get' and 'put' +methods. To ease the translation of existing code, a LockMode.toReadOptions +method is provided. +

    +Another API change has to do with key-only 'get' operations, where returning +the record data is not needed. Previously, returning the data and its +associated overhead could be avoided only by calling DatabaseEntry.setPartial. +Now, null may be passed for the data parameter instead. In fact, null may now +be passed for all "output parameters", in both the new and old versions of the +'get' and 'put' methods. For more information, see the "Input and Output +Parameters" section of the DatabaseEntry class javadoc. +

    +The JE cleaner has also been enhanced to perform purging of expired data. For +each data file, a histogram of expired data sizes is stored and used by the +cleaner. Along with the obsolete size information that the cleaner already +maintains, the histogram allows knowing when a file is ready for cleaning. New +related cleaner statistics are as follows: +

      +
    • EnvironmentStats.getNLNsExpired - the number of expired LNs processed + by the cleaner.
    • +
    • EnvironmentStats.getCurrentMinUtilization - replacement for + getLastKnownUtilization, which is now deprecated.
    • +
    • EnvironmentStats.getCurrentMaxUtilization - the maximum utilization + are often different than the minimum, when TTL is used.
    • +
    • EnvironmentStats.getNCleanerTwoPassRuns - two-pass cleaning is + used when the maximum and minimum diverge.
    • +
    • EnvironmentStats.getNCleanerRevisalRuns - two-pass cleaning can result + in revised expiration data.
    • +
    +

    +Another indication of expired data is shown by the DbSpace utility. This now +outputs minimum and maximum utilization and the total expired bytes. A new +option for this utility, -t DATE-TIME, shows the utilization and +expired bytes for a specified time. +

    +The DbCacheSize utility now has a -ttl option. Specifying this +option causes the estimated cache size to include space for an expiration time +for each record. +

    +The RecoveryProgress.POPULATE_EXPIRATION_PROFILE phase was added to indicate +that the cleaner is reading the stored histograms into cache. +

    +EnvironmentConfig.ENV_EXPIRATION_ENABLED is a new config param that is true by +default, meaning that expired data is filtered from queries and purged by the +cleaner. It might be set to false to recover data after an extended down time. +

    +In addition, the cleaner "backlog" mechanism has been removed, meaning that +EnvironmentStats.getCleanerBacklog and +EnvironmentConfig.CLEANER_MAX_BATCH_FILES are now deprecated. The backlog +mechansim has not been beneficial for some time and was due for removal. When +using TTL, because two-pass cleaning can occur even when true utilization is +below EnvironmentConfig.CLEANER_MIN_UTILIZATION, the cleaner backlog statistic +would have been misleading. +

    +[#16845] (7.0.0) +


  2. + +
  3. +Fixed a bug causing the following exception. In JE versions from 6.2 to 6.4, +this could occur when EnvironmentConfig.NODE_MAX_ENTRIES or +DatabaseConfig.setNodeMaxEntries is more than 128, which is the default value. +
    +Caused by: java.lang.ArrayIndexOutOfBoundsException: -96
    +    at com.sleepycat.je.tree.BINDeltaBloomFilter.setBit(
    +        BINDeltaBloomFilter.java:257)
    +    at com.sleepycat.je.tree.BINDeltaBloomFilter.add(
    +        BINDeltaBloomFilter.java:113)
    +    at com.sleepycat.je.tree.BIN.createBloomFilter(BIN.java:1863)
    +    at com.sleepycat.je.tree.IN.serialize(IN.java:6037)
    +    at com.sleepycat.je.tree.IN.writeToLog(IN.java:6021)
    +    at com.sleepycat.je.log.entry.INLogEntry.writeEntry(INLogEntry.java:349)
    +    at com.sleepycat.je.log.LogManager.marshallIntoBuffer(LogManager.java:731)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:346)
    +    ...
    +
    +[#24896] (7.0.0) +

  4. + +
+ + +
+

Changes in 6.4.15

+ +
    + +
  1. +Made several minor improvements to off-heap cache behavior. +
      +
    • + The OffHeap:offHeapCriticalNodesTargeted statistic was added for + monitoring off-heap critical eviction, which increases operation latency + in application threads. See + EnvironmentStats.getOffHeapCriticalNodesTargeted. +
    • +
    • + To reduce off-heap critical eviction, the default for + EnvironmentConfig.OFFHEAP_EVICT_BYTES was changed from 1MB to 50MB. +
    • +
    • + To reduce off-heap evictor thread contention, the default for + EnvironmentConfig.OFFHEAP_MAX_THREADS was changed from 10 to 3, and an + internal check was added to reduce contention when all threads are busy. +
    • +
    +[#23889] (6.4.10) +

  2. + +
  3. +Fixed a bug that caused internal Btree corruption when using the off-heap +cache and performing insertions. The bug was observed when using +CacheMode.EVICT_BIN, but could also occur if BIN eviction is frequent for +other reasons. The bug causes persistent corruption that would require +reverting to a backup (or HA network restore) to correct. The bug was observed +to cause one of the two following exceptions at the time the corruption was +created. +

    +The following assertion would rarely occur, and only if assertions were +enabled of course. +

    +com.sleepycat.je.EnvironmentFailureException: (JE 6.4.10)
    +UNEXPECTED_STATE: Unexpected internal state, may have side effects.
    +    at com.sleepycat.je.EnvironmentFailureException.unexpectedState(
    +    EnvironmentFailureException.java:397)
    +    at com.sleepycat.je.tree.IN.getKnownChildIndex(IN.java:782)
    +    at com.sleepycat.je.evictor.OffHeapCache.freeRedundantBIN(
    +    OffHeapCache.java:1974)
    +    at com.sleepycat.je.tree.IN.updateLRU(IN.java:695)
    +    at com.sleepycat.je.tree.IN.latchShared(IN.java:600)
    +    at com.sleepycat.je.recovery.DirtyINMap.selectDirtyINsForCheckpoint(
    +    DirtyINMap.java:277)
    +    at com.sleepycat.je.recovery.Checkpointer.doCheckpoint(
    +    Checkpointer.java:816)
    +    at com.sleepycat.je.recovery.Checkpointer.onWakeup(Checkpointer.java:593)
    +    at com.sleepycat.je.utilint.DaemonThread.run(DaemonThread.java:184)
    +    at java.lang.Thread.run(Thread.java:745)
    +
    +

    +The following assertion would occur more often, whether or not assertions were +enabled. +

    +com.sleepycat.je.EnvironmentFailureException: (JE 6.4.10)
    +UNEXPECTED_STATE_FATAL: Failed adding new IN ...
    +    at com.sleepycat.je.EnvironmentFailureException.unexpectedState(
    +    EnvironmentFailureException.java:441)
    +    at com.sleepycat.je.dbi.INList.add(INList.java:204)
    +    at com.sleepycat.je.tree.IN.addToMainCache(IN.java:2966)
    +    at com.sleepycat.je.tree.IN.postLoadInit(IN.java:2939)
    +    at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:2513)
    +    at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:2279)
    +    at com.sleepycat.je.tree.Tree.searchSplitsAllowed(Tree.java:1919)
    +    at com.sleepycat.je.tree.Tree.searchSplitsAllowed(Tree.java:1857)
    +    at com.sleepycat.je.tree.Tree.searchSplitsAllowed(Tree.java:1775)
    +    at com.sleepycat.je.tree.Tree.findBinForInsert(Tree.java:1746)
    +    at com.sleepycat.je.dbi.CursorImpl.insertRecordInternal(
    +    CursorImpl.java:1381)
    +    at com.sleepycat.je.dbi.CursorImpl.insertOrUpdateRecord(
    +    CursorImpl.java:1280)
    +    at com.sleepycat.je.Cursor.putNoNotify(Cursor.java:2504)
    +    at com.sleepycat.je.Cursor.putNotify(Cursor.java:2365)
    +    at com.sleepycat.je.Cursor.putNoDups(Cursor.java:2223)
    +    at com.sleepycat.je.Cursor.putInternal(Cursor.java:2060)
    +    at com.sleepycat.je.Cursor.put(Cursor.java:730)
    +
    +[#24564] (6.4.11) +

  4. + +
  5. +Fixed a bug that could cause queries to return the wrong result, and also +could cause persistent Btree corruption. The bug is present in releases 6.3.0 +to 6.4.11. The conditions for the bug are as follows. +
      +
    • A custom key comparator must not be configured.
    • +
    • The DB must not be a duplicates DB (because an internal key comparator + is used).
    • +
    • Key prefixing must be configured for the DB (or at least, it must have + been configured when data was written).
    • +
    • The bug will occur when the search key of an operation (either a read + or a write operation, including internal operations such as cleaning + and checkpointing) is a prefix of the common prefix for all the keys + in an IN. In this case, if a custom comparator is not used, the + default internal comparator will return a wrong result when comparing + the search key with another key in the IN. This will in general result + in wrong results and/or data corruption.
    • +
    • For a query to return the wrong result, the specified search key must + be a prefix of other keys in the DB. For example, key A is a prefix of + key A1 and A2.
    • +
    • For corruption to occur, some keys in the DB must be a prefix of other + keys. For example, keys A, A1 and A2 are stored.
    • +
    • In both cases above (a query with the wrong result and corruption), + the smaller key which is a prefix of other keys must also be smaller + or equal to JE's internal key prefix for the Btree internal node (IN) + that is accessed. This means that all keys in the IN, or roughly 100 + adjacent keys, must have this prefix.
    • +
    +[#24583] (6.4.12) +

  6. + +
  7. +Fixed a bug in preload (Database.preload and Environment.preload) that +prevented all data from being preloaded. It did not cause corruption of any +kind, and the data that was not preloaded was still accessible, i.e., it +would be loaded when acessed through normal API operations. +

    +Data was missed by preload when BIN-deltas were present in cache. If the +preload was performed immediately after opening the Environment, this would +normally happen only after a crash-recovery (a normal shutdown did not occur). +If the preload was performed later on, BIN-deltas might also be in cache due +to eviction. +

    +[#24565] (6.4.12) +


  8. + +
  9. +Fixed a bug in preload (Database.preload and Environment.preload) that caused +preloaded data to be evicted from cache by a subsequent operation using +CacheMode.UNCHANGED. +

    +[#24629] (6.4.14) +


  10. + +
  11. +Fixed a bug where the information about lock owners and waiters in +LockConflictException was sometimes incorrect due to a time window between +detecting the lock conflict and constructing the exception. The fix applies to +the LockConflictException.getOwnerTxnIds and getWaiterTxnIds methods, and to +the two lines in the first part of the exception message starting with +"Owners:" and "Waiters:". +

    +In addition, the list of waiters will now contain the locker or Transaction +requesting the lock, for which the LockConflictException is thrown. +

    +The fix does NOT apply to the information output when +EnvironmentConfig.TXN_DUMP_LOCKS is set to true. This information is by nature +somewhat inaccurate, because normal locking operations are not frozen when this +dump is occurring, so changes to the state of the lock table are occurring +concurrently. +

    +The fix also does NOT apply to the deadlock information that is sometimes +included in the exception message. This information can also be inaccurate due +to concurrent locking operations. This is a larger problem that will be fixed +in a future release. +

    +[#24623] (6.4.14) +


  12. + +
  13. +Fixed a bug that could cause a "log file not found" exception during recovery, +i.e., when opening the Environment. The circumstances that provoked this bug +are: +
      +
    • The bug could only occur in Btrees with 4 or more levels, which + typically means a single Database must have roughly one million records + or more. +
    • +
    • The bug is more likely to occur with insertion heavy workloads. +
    • +
    • The bug has existed in all earlier versions of JE, but is more likely + to occur in JE 6.2 and later. +
    • +
    +The bug does not cause a permanent data corruption, if upgrading to JE 6.4.14 +is possible. In other words, if the problem occurs in an earlier version, +upgrading to 6.4.14 or later will allow the Environment to be opened. +
    +Example exception:
    +
    +Exception in thread "main" com.sleepycat.je.EnvironmentFailureException:
    +(JE 6.4.9) ... last LSN=0x20c7b6/0xa986dc LOG_INTEGRITY: Log information is
    +incorrect, problem is likely persistent. Environment is invalid and must be
    +closed.
    +        at com.sleepycat.je.recovery.RecoveryManager.traceAndThrowException(
    +            RecoveryManager.java:3176)
    +        at com.sleepycat.je.recovery.RecoveryManager.readINs(
    +            RecoveryManager.java:1039)
    +        at com.sleepycat.je.recovery.RecoveryManager.buildINs(
    +            RecoveryManager.java:842)
    +        at com.sleepycat.je.recovery.RecoveryManager.buildTree(
    +            RecoveryManager.java:757)
    +        at com.sleepycat.je.recovery.RecoveryManager.recover(
    +            RecoveryManager.java:387)
    +        at com.sleepycat.je.dbi.EnvironmentImpl.finishInit(
    +            EnvironmentImpl.java:717)
    +        at com.sleepycat.je.dbi.DbEnvPool.getEnvironment(
    +            DbEnvPool.java:254)
    +        at com.sleepycat.je.Environment.makeEnvironmentImpl(
    +            Environment.java:287)
    +        at com.sleepycat.je.Environment.(Environment.java:268)
    +        at com.sleepycat.je.Environment.(Environment.java:212)
    +        at com.sleepycat.je.util.DbDump.openEnv(DbDump.java:422)
    +        at com.sleepycat.je.util.DbDump.listDbs(DbDump.java:316)
    +        at com.sleepycat.je.util.DbDump.main(DbDump.java:296)
    +Caused by: com.sleepycat.je.EnvironmentFailureException:
    +(JE 6.4.9) ... fetchIN of 0x20c756/0x4e81bd parent IN=2785507 IN
    +class=com.sleepycat.je.tree.IN lastFullLsn=0x20c7af/0xc81b2d
    +lastLoggedLsn=0x20c7af/0xc81b2d parent.getDirty()=true state=0
    +LOG_FILE_NOT_FOUND: Log file missing, log is likely invalid. Environment is
    +invalid and must be closed.
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:2523)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:2293)
    +        at com.sleepycat.je.tree.Tree.getParentINForChildIN(Tree.java:1418)
    +        at com.sleepycat.je.recovery.RecoveryManager.recoverChildIN(
    +            RecoveryManager.java:1338)
    +        at com.sleepycat.je.recovery.RecoveryManager.recoverIN(
    +            RecoveryManager.java:1166)
    +        at com.sleepycat.je.recovery.RecoveryManager.replayOneIN(
    +            RecoveryManager.java:1130)
    +        at com.sleepycat.je.recovery.RecoveryManager.readINs(
    +            RecoveryManager.java:1021)
    +        ... 11 more
    +Caused by: java.io.FileNotFoundException: .../0020c756.jdb (No such file or directory)
    +        at java.io.RandomAccessFile.open(Native Method)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:241)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:122)
    +        at com.sleepycat.je.log.FileManager$DefaultRandomAccessFile.(
    +            FileManager.java:3226)
    +        at com.sleepycat.je.log.FileManager$6.createFile(
    +            FileManager.java:3254)
    +        at com.sleepycat.je.log.FileManager.openFileHandle(
    +            FileManager.java:1333)
    +        at com.sleepycat.je.log.FileManager.getFileHandle(
    +            FileManager.java:1204)
    +        at com.sleepycat.je.log.LogManager.getLogSource(LogManager.java:1136)
    +        at com.sleepycat.je.log.LogManager.getLogEntry(
    +            LogManager.java:823)
    +        at com.sleepycat.je.log.LogManager.getLogEntryAllowInvisibleAtRecovery(
    +            LogManager.java:788)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:2345)
    +        ... 17 more
    +
    +Thanks to Alexander Kharichev for reproducing this bug and capturing the data +files that allowed us to find the problem. This took many months of +persistence, and special instrumentation for using with the CLEANER_EXPUNGE +option in a production environment. +

    +[#24663] (6.4.14) +


  14. + +
  15. +Fixed a bug that caused PreloadStats.getNEmbeddedLNs to return zero when using +PreloadConfig.setLoadLNs(false). getNEmbeddedLNs now returns the number of +embedded LNs loaded into cache, irrespective of the setLoadLNs setting. +

    +[#24688] (6.4.15) +


  16. + +
  17. +Fixed a performance problem related to the off-heap cache. Previously, when the +off-heap cache overflowed, BINs (bottom internal nodes) were evicted before +evicting LNs (records or leaf nodes), when the LNs were in dirty BINs. The +effect was that more read I/O was required to fetch the INs when they were +needed. In general, disregarding the LRU, BINs should be kept in cache in +preference to LNs, and the fix corrects the implementation of that policy. +

    +In addition, a change was made to allow off-heap LNs to be evicted sooner, to +to delay eviction of off-heap BINs (or their mutation to BIN-deltas). +Previously, when a BIN was evicted from main cache and moved off-heap, its +off-heap LNs were made "hot" in the off-heap cache. This no longer occurs. +

    +[#24717] (6.4.25) +


  18. + +
+ + +
+

Changes in 6.4.9

+ +
    + +
  1. +Fixed a bug that (rarely) caused an exception such as the following, during +shutdown of a ReplicatedEnvironment. This caused no persistent damage, but the +unexpected runtime exception could cause exception handling problems or at +least confusion. +
    +com.sleepycat.je.EnvironmentFailureException.unexpectedException(
    +    EnvironmentFailureException.java:351)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:496)
    +    at com.sleepycat.je.log.LogManager.logItem(LogManager.java:438)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:350)
    +    at com.sleepycat.je.tree.LN.logInternal(LN.java:752)
    +    at com.sleepycat.je.tree.LN.optionalLog(LN.java:473)
    +    at com.sleepycat.je.dbi.CursorImpl.updateRecordInternal(
    +        CursorImpl.java:1689)
    +    at com.sleepycat.je.dbi.CursorImpl.insertOrUpdateRecord(
    +        CursorImpl.java:1321)
    +    at com.sleepycat.je.Cursor.putNoNotify(Cursor.java:2509)
    +    at com.sleepycat.je.Cursor.putNotify(Cursor.java:2370)
    +    at com.sleepycat.je.Cursor.putForReplay(Cursor.java:2038)
    +    at com.sleepycat.je.DbInternal.putForReplay(DbInternal.java:186)
    +    at com.sleepycat.je.rep.impl.node.Replay.applyLN(Replay.java:1012)
    +    ... 2 more
    +Caused by: java.lang.NullPointerException
    +    at com.sleepycat.je.rep.vlsn.VLSNIndex.decrement(VLSNIndex.java:526)
    +    at com.sleepycat.je.rep.impl.RepImpl.decrementVLSN(RepImpl.java:840)
    +    at com.sleepycat.je.log.LogManager.serialLogWork(LogManager.java:710)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:481)
    +    ... 13 more
    +
    +[#24281] (6.4.0) +

  2. + +
  3. +Fixed a recovery (startup) performance problem that occurred when extremely +large numbers of .jdb files were present. For large data sets, the default file +size (10 MB) results in large numbers of files. A directory listing of these +files was performed by JE when reading the log sequentially during recovery, +and this noticeably slowed down recovery. With this fix, recovery no longer +performs a directory listing. +

    +However, other utilities that read the entire log (e.g., DbPrintLog) must +perform a directory listing to skip over gaps in the sequence of files numbers +caused by log file deletion (cleaning). Therefore, when a large data set is +expected or possible, the file size (EnvironmentConfig.LOG_FILE_MAX) should be +configured to a larger size. A file size of one GB is recommended for large +data sets. +

    +[#24332] (6.4.0) +


  4. + +
  5. +Fixed a transient problem for HA applications that resulted in an exception +such as the following. This occurred when quorum was temporarily lost. The fix +will prevent this exception from occurring. Note that even when the problem +occurred, the node automatically recovered quorum, so the problem was not +persistent. +
    +com.sleepycat.je.EnvironmentFailureException: (JE 6.3.7) Problem in
    +ReadWindow.fill, reading from = 0 UNEXPECTED_EXCEPTION: Unexpected internal
    +Exception, may have side effects. MasterFeederSource fetching vlsn=5,096,275
    +waitTime=1000 Uncaught exception in feeder thread:Thread[Feeder Output for
    +rg1-rn5,5,main] Originally thrown by HA thread: MASTER rg1-rn1(1)
    +    at com.sleepycat.je.EnvironmentFailureException.unexpectedException(
    +        EnvironmentFailureException.java:366)
    +    at com.sleepycat.je.rep.stream.FeederReader$SwitchWindow.fillNext(
    +        FeederReader.java:572)
    +    at com.sleepycat.je.log.FileReader.readData(FileReader.java:822)
    +    at com.sleepycat.je.log.FileReader.readNextEntryAllowExceptions(
    +        FileReader.java:379)
    +    at com.sleepycat.je.log.FileReader.readNextEntry(FileReader.java:276)
    +    at com.sleepycat.je.rep.stream.FeederReader.scanForwards(
    +        FeederReader.java:308)
    +    at com.sleepycat.je.rep.stream.MasterFeederSource.getWireRecord(
    +        MasterFeederSource.java:100)
    +    at com.sleepycat.je.rep.impl.node.Feeder$OutputThread.writeAvailableEntries(
    +        Feeder.java:1219)
    +    at com.sleepycat.je.rep.impl.node.Feeder$OutputThread.run(Feeder.java:1109)
    +Caused by: java.io.FileNotFoundException:
    +/scratch/suitao/dctesting/kvroot/mystore/sn3/rg1-rn1/env/00000000.jdb (No such
    +file or directory)
    +    at java.io.RandomAccessFile.open(Native Method)
    +    at java.io.RandomAccessFile.<init>(RandomAccessFile.java:241)
    +    at java.io.RandomAccessFile.<init>(RandomAccessFile.java:122)
    +    at com.sleepycat.je.log.FileManager$DefaultRandomAccessFile.<init>(
    +        FileManager.java:3201)
    +    at com.sleepycat.je.log.FileManager$6.createFile(FileManager.java:3229)
    +    at com.sleepycat.je.log.FileManager.openFileHandle(FileManager.java:1308)
    +    at com.sleepycat.je.log.FileManager.getFileHandle(FileManager.java:1179)
    +    at com.sleepycat.je.rep.stream.FeederReader$SwitchWindow.fillNext(
    +        FeederReader.java:511)
    +    ... 7 more
    +
    +[#24299] (6.4.0) +

  6. + +
  7. +Added an off-heap cache capability. An off-heap cache can be used to utilize +large memories more efficiently than when using the same memory for the file +system cache, while avoiding the Java GC overheap associated with large Java +heaps. See the EnvironmentMutableConfig.setOffHeapCacheSize javadoc for +information on how to enable the cache and its impact on performance. +

    +Please be aware of the following limitations in the initial release of this +feature: +

      +
    • + The off-heap cache is not currently used for deferred-write and + temporary databases, i.e., databases created using + DatabaseConfig.setTemporary(true) or setDeferredWrite(true). For such + databases, only the main (in-heap) cache is used. +
    • +
    • + As described in the EnvironmentMutableConfig.setOffHeapCacheSize + javadoc, the off-heap cache only works when Unsafe.allocateMemory is + available in the JDK used to run the JE application. The Oracle JDK is + compatible. +
    • +
    • + When testing the off-heap cache on the IBM JDK, using Linux, we + noticed that the per-memory block overhead is much higher than when + using the Oracle JDK. We observed an extra 70 byte overhead per block + that is allocated by Unsafe.allocateMemory. This overhead is not + currently accounted for in our initial version of the off-heap + allocator, so users of the IBM JDK should expect that more off-heap + memory will be used than what DbCacheSize calculates and more than + what the EnvironmentStats.getOffHeapTotalBytes method reports. We + would like to solitic input on this issue from our users who are + familiar with the internals of the IBM JDK. +
    • +
    • + The Getting Started Guide does does not yet contain information about + the off-heap cache. Please refer to the javadoc. +
    • +
    +

    +The following additional API additions are associated with the off-heap cache. +

      +
    • + EnvironmentMutableConfig.setOffHeapCacheSize + (EnvironmentConfig.MAX_OFF_HEAP_MEMORY). This is the only configuration + parameter that must be set to use the off-heap cache. See the + setOffHeapCacheSize javadoc for details on the purpose and function of the + off-heap cache. +
    • +
    • + EnvironmentConfig.OFFHEAP_N_LRU_LISTS. Allows reducing contention among + threads performing eviction, with the cost of reduced LRU accuracy. +
    • +
    • + EnvironmentConfig.OFFHEAP_CORE_THREADS, OFFHEAP_MAX_THREADS, + OFFHEAP_KEEP_ALIVE. Used to configure the thread pool for the off-heap + evictor. +
    • +
    • + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR. Used to disable the off-heap + evictor thread for applications calling Environment.evictMemory + explicitly. +
    • +
    • + EnvironmentConfig.OFFHEAP_EVICT_BYTES. Determines the size of an + eviction batch. +
    • +
    • + EnvironmentConfig.OFFHEAP_CHECKSUM. Can be used for debugging. +
    • +
    • + EnvironmentStats.getOffHeap*. These 20 new getter methods allow getting + off-heap cache statistics. +
    • +
    +

    +[#23889] (6.4.1) +


  8. + +
  9. +Several improvements were made to DiskOrderedCursor performance and behavior. +These improvements also apply to Database.count, which uses the same internal +scanning mechanism as DiskOrderedCursor. +
      +
    • + DiskOrderedCursor now no longer accumulates LSNs for data that is resident + in the JE cache. Before, data resident in cache would sometimes be fetched + from disk to avoid filling the output queue for the scan. This is no + longer the case, and this has two important benefits: +
        +
      1. The semantics of a DiskOrderedCursor scan are now roughly the + same as when using LockMode.READ_UNCOMMITTED. There is no longer a + potential lag back to the last checkpoint. See the updated + Consistency Guarantees section in the DiskOrderedCursor javadoc + for details.
      2. +
      3. Less read IO is performed in some cases.
      4. +
      + [#24226] +
    • +
    • + To prevent applications from having to reserve memory in the Java heap for + the DiskOrderedCursor, memory used by the DiskOrderedCursor is now + subtracted from the JE cache budget. The maximum amount of such memory is + specified, as before, using + DiskOrderedCursorConfig.setInternalMemoryLimit. This is a behavior change + and may require some applications to increase the JE cache size. + [#24291] +
    • +
    • + DiskOrderedCursor can now scan multiple databases using the new + Environment.openDiskOrderedCursor method. When scanning multiple databases + this method will provide better performance than scanning each database + separately. + [#24171] +
    • +
    • + DiskOrderedCursor scans now uses shared latches on upper INs, instead of + exclussive latches. This reduces contention between the DiskOrderedCursor + scan and other Btree operations, such as CRUD operations. + [#24192] +
    • +
    • + Whenever possible, DiskOrderedCursor no longer makes copies of BIN-deltas + found in the cache. This results in less memory useage (and consequently + less read IO). + [#24270] +
    • +
    +(6.4.2) +

  10. + +
  11. +Made improvements to the debug logging entries created to provide +information about log files that were protected from deletion. +

    +

      +
    • Modified entries created by the cleaner to identify which log + files were protected from deletion +
    • Modified entries created for replicated environments to provide + information about the reason files were protected from deletion +
    • Changed the logging level for these entries to INFO + to emphasize that the protection of files from deletion is expected + behavior +
    +

    +[#24241] (6.4.2) +


  12. + +
  13. +Fixed a bug where a Btree latch was not released when an Error was thrown by a +file read, during a secondary DB lookup. This could cause an +EnvironmentFailureException with the error message "Latch already held" at a +later time in the same thread, or a latch deadlock in another thread. +[#24375] (6.4.3) +

  14. + +
  15. +Fixed a bug in Database.count that caused it to loop "forever" with a large +out-of-cache data set. This also impacted Environment.truncateDatabase when +'true' was passed for the 'returnCount' param, since this causes +Database.count to be called. +[#24448] (6.4.7) +

  16. + +
  17. +Fixed a bug that could cause incomplete results to be returned from a query +using secondary indexes, when this query is performed on a replica and record +deletions are being performed on the master (and being replayed on the +replica). It could also cause LockConflictException to be thrown by the query +on the replica in this situation, even when the application's operation order +(locking order) should not cause a deadlock. +[#24507] (6.4.8) +

  18. + +
+ + +
+

Changes in 6.3.8

+ +
    + +
  1. +Added EnvironmentStats.getNDirtyNodesEvicted and the corresponding statistic +in the jestat.csv file. This can be used to determine how much logging and its +associated costs (cleaning, etc) are being caused by eviction when the cache +overflows. +[#24086] (6.3.0) +

  2. + +
  3. +Fixed a bug that resulted in an EnvironmentFailureException being +thrown from the method Environment.beginTransaction(), when a +replicated environment was closed at a master while new transactions were being +concurrently initiated. The following representative stack trace is symptomatic +of this problem (the specifics of the stack trace may vary depending on the JE +release): +
    +        ...
    +	at com.sleepycat.je.EnvironmentFailureException.unexpectedException(EnvironmentFailureException.java:351)
    +	at com.sleepycat.je.rep.utilint.RepUtils$ExceptionAwareCountDownLatch.awaitOrException(RepUtils.java:268)
    +	at com.sleepycat.je.rep.utilint.SizeAwaitMap.sizeAwait(SizeAwaitMap.java:106)
    +	at com.sleepycat.je.rep.impl.node.FeederManager.awaitFeederReplicaConnections(FeederManager.java:528)
    +	at com.sleepycat.je.rep.impl.node.DurabilityQuorum.ensureReplicasForCommit(DurabilityQuorum.java:74)
    +	at com.sleepycat.je.rep.impl.RepImpl.txnBeginHook(RepImpl.java:944)
    +	at com.sleepycat.je.rep.txn.MasterTxn.txnBeginHook(MasterTxn.java:158)
    +	at com.sleepycat.je.txn.Txn.initTxn(Txn.java:365)
    +	at com.sleepycat.je.txn.Txn.<init>(Txn.java:275)
    +	at com.sleepycat.je.txn.Txn.<init>(Txn.java:254)
    +	at com.sleepycat.je.rep.txn.MasterTxn.<init>(MasterTxn.java:114)
    +	at com.sleepycat.je.rep.txn.MasterTxn$1.create(MasterTxn.java:102)
    +	at com.sleepycat.je.rep.txn.MasterTxn.create(MasterTxn.java:380)
    +	at com.sleepycat.je.rep.impl.RepImpl.createRepUserTxn(RepImpl.java:924)
    +	at com.sleepycat.je.txn.Txn.createUserTxn(Txn.java:301)
    +	at com.sleepycat.je.txn.TxnManager.txnBegin(TxnManager.java:182)
    +	at com.sleepycat.je.dbi.EnvironmentImpl.txnBegin(EnvironmentImpl.java:2366)
    +	at com.sleepycat.je.Environment.beginTransactionInternal(Environment.java:1437)
    +	at com.sleepycat.je.Environment.beginTransaction(Environment.java:1319)
    +        ...
    +Caused by: java.lang.IllegalStateException: FeederManager shutdown
    +	at com.sleepycat.je.rep.impl.node.FeederManager.shutdownFeeders(FeederManager.java:498)
    +	at com.sleepycat.je.rep.impl.node.FeederManager.runFeeders(FeederManager.java:462)
    +	at com.sleepycat.je.rep.impl.node.RepNode.run(RepNode.java:1479)
    +
    +[#23970] (6.3.0) +

  4. + +
  5. +Fixed a bug that could cause a LOG_FILE_NOT_FOUND (log corruption) for +workloads where eviction is heavy and databases are often opened and closed. +[#24111] (6.3.0) +

  6. + +
  7. +Improved performance for "small" data records by embedding "small" LNs in BINs. +

    +Normally, records (key-value pairs) are stored on disk as individual byte +sequences called LNs (leaf nodes) and they are accessed via a Btree. +Specifically, the bottom layer nodes of the Btree (called BINs) contain +an array of slots, where each slot represents an associated data record. +Among other things, it stores the key of the record and the most recent +disk address of that record. Records and BTree nodes share the disk space +(are stored in the same kind of files), but LNs are stored separately from +BINs, i.e., there is no clustering or co-location of a BIN and its child LNs. +

    +With embedded LNs, a whole record may be stored inside a BIN (i.e., a BIN +slot may contain both the key and the data portion of a record). A record +will be "embedded" if the size (in bytes) of its data portion is less than +or equal to the value of the new EnvironmentConfig.TREE_MAX_EMBEDDED_LN +configuration parameter. The decision to embed a record or not is taken on a +record-by-record basis. As a result, a BIN may contain both embedded and +non-embedded records. The "embeddedness" of a record is a dynamic property: a +size-changing update may turn a non-embedded record to an embedded one or +vice-versa. +

    +The performance trade-offs of embedding or not embedding records are +described in the javadoc for the TREE_MAX_EMBEDDED_LN configuration parameter. +

    +To exploit embedded LNs during disk ordered scans, a new "binsOnly" mode +has been added in DiskOrderedCursorConfig. In this mode, only the BINs of +a database will be accessed (not the LNs). As a result, the scan will be +faster, but the data portion of a record will be returned only if the +record is embedded. This is most useful when we expect that all the records +in a database will be embedded. +

    +Finally, a new statistic has been added to the PreloadStats class. It is +the number of embedded LNs encountered during the preload() operation, +and is accessible via the getNEmbeddedLNs() method. +

    +[#21488] (6.3.0) +


  8. + +
  9. +Two more changes were done as side-effects of the embedded LNs work described +above. +

    +First, we clarified the documented definition of partial comparators, although +the actual behavior of partial comparators did not change. The documentation +change is subtle and will only be interesting to those currently using the +PartialComparator interface. See the PartialComparator javadoc for details. +

    +The second change is a fix for a bug that could occur only if a +PartialComparator was used (and as a result record keys were updatable). In +this case and under some rare situations, updates done on keys could be lost. +

    +[#21488] (6.3.0) +


  10. + +
  11. +Cleaner utilization adjustments are no longer needed, and the following related +APIs have been deprecated and will be removed completely in a future release. +In addition, cleaner probes are no longer performed, since they were used only +for utilization adjustments. +
      +
    • EnvironmentConfig.CLEANER_ADJUST_UTILIZATION
    • +
    • EnvironmentStats.getLNSizeCorrectionFactor
    • +
    • EnvironmentStats.getNCleanerProbeRuns
    • +
    +In JE 6.0 the default value for CLEANER_ADJUST_UTILIZATION was changed to +false, because the LN sizes it was adjusting were stored in the Btree in that +release. Now in JE 6.3, setting CLEANER_ADJUST_UTILIZATION has no effect and +the two stat getter methods always return zero. +

    +[#24090] (6.3.0) +


  12. + +
  13. +Added statistics that provide information about replication. +
      +
    • ReplicatedEnvironmentStats.getLastCommitTimestamp
    • +
    • ReplicatedEnvironmentStats.getLastCommitVLSN
    • +
    • ReplicatedEnvironmentStats.getReplicaDelayMap
    • +
    • ReplicatedEnvironmentStats.getReplicaLastCommitTimestampMap
    • +
    • ReplicatedEnvironmentStats.getReplicaLastCommitVLSNMap
    • +
    • ReplicatedEnvironmentStats.getReplicaVLSNLagMap
    • +
    • ReplicatedEnvironmentStats.getReplicaVLSNRateMap
    • +
    • ReplicatedEnvironmentStats.getVLSNRate
    • +
    +[#23896] (6.3.0) +

  14. + +
  15. +Made several improvements to CacheMode behavior and CacheMode javadoc, and +deprecated two CacheModes. +

    +

      +
    • + The behavior of CacheMode.EVICT_BIN has changed. Previously, the BIN + was evicted even when it was dirty. This means the BIN was logged if it + was evicted by a write operation using this mode, or if it was dirty + due to a previous write operation using any mode. Now, a dirty BIN will + not be evicted by this mode, but in this case all LNs in the BIN will + be evicted. This mode was changed in order to prevent BINs from being + logged repeatedly due to the use of this mode. Logging should be + deferred for as long as possible (ideally until the next checkpoint) in + order to reduce writing costs and associated log cleaning costs. +

      +

    • +
    • + The behavior of CacheMode.UNCHANGED has also changed. We expect the + UNCHANGED mode to be important for many applications, since it allows + performing a full Database scan without displacing hot data in the + cache. Previously, when a Btree node (LN or BIN) was loaded into cache + by an operation with this cache mode, it was left in cache. This means + that the cache was perturbed by operations using this mode, which is + contrary to the intent of the mode. Even worse such nodes were made + "hot" by the operation meaning that they would not be evicted soon. + Now, when the node is loaded into cache by an operation with this cache + mode, it is evicted from cache after the operation. An exception to + this rule is that a dirty BIN will not be evicted and logged, for the + same reasons stated above. +

      +

    • +
    • + Non-sticky cursors (see CursorConfig.setNonSticky) now work with all + cache modes. Previously, CacheMode.EVICT_BIN and MAKE_COLD were + incompatible with non-sticky cursors, because the implementation of BIN + eviction was problematic with non-sticky cursors. This problem has been + solved and these incompatibilities were removed, primarily so that + CacheMode.UNCHANGED (which may also evict BINs) will work with + non-sticky cursors. +

      +

    • +
    • + CacheMode.KEEP_HOT has been deprecated. In this release, its behavior + is unchanged. In the next release it will behave as if + CacheMode.DEFAULT were specified. The reasons for deprecating this mode + are: +

      + 1. The only potential benefit of KEEP_HOT, as compared to DEFAULT, is + that KEEP_HOT attempts to keep the record's leaf-node (LN) and its + containing bottom internal node (BIN) in cache even if it is not + accessed frequently. We don't know of a use case for this behavior. +

      + 2. There are currently implementation problems with KEEP_HOT. The + current implementation of the cache evictor is based on an LRU list, + and there is no practical way to keep all BINs accessed with KEEP_HOT + at the hot end of the LRU list. The current implementation moves it to + the hot end when it reaches the cold end (as other BINs are accessed + and moved to the hot end), if the BIN has not been accessed since it + was made "keep hot". But if the BIN again moves to the cold end, it is + evicted to try to prevent the cache from overflowing when KEEP_HOT is + used for many operations. This approach does not really guarantee that + the cache won't overflow, and also does not really force the node to + stay hot. +

      +

    • +
    • + CacheMode.MAKE_COLD has been deprecated. In this release, its behavior + is unchanged. In the next release it will behave as if + CacheMode.UNCHANGED were specified. The reasons for deprecating this + mode are: +

      + 1. MAKE_COLD was originally added in an attempt to avoid perturbing the + cache for full Database scans, etc. The UNCHANGED mode should really be + used for this purpose, especially given the improvements made to this + mode (discussed above). +

      + 2. The main difference between MAKE_COLD and the new behavior of + UNCHANGED is that MAKE_COLD always evicts the LN and BIN, regardless of + whether they have been made "hot" by other operations. Again, we don't + know of a use case for this behavior. +

      +

    • +
    • + The javadoc for the CacheMode enumeration has been reworked to reflect + the behavior changes described above. More information has also been + added about the eviction process and the behavior and intended use of + each cache mode. +

      +

    • +
    +[#24154] (6.3.2) +

  16. + +
  17. +DbBackup.startBackup has been enhanced to make the use of the +EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE unnecessary, except in special +cases. See the "Restoring from a backup" section in the DbBackup javadoc for +more information. [#22865] (6.3.4) +

  18. + +
  19. +The Environment.cleanLogFile method has been added to allow cleaning a single +file at a time. This is in contrast to Environment.cleanLog, which may clean a +large number of files over a long time period. See the javadoc for cleanLog and +cleanLogFile for details on the intended use cases and other information. +

    Also, the javadoc for Environment.close now talks about performing an extra +checkpoint prior to calling close and disabling the cleaner threads. This is +related to the "batch cleaning" process described in the cleanLogFile javadoc. +

    +[#24181] (6.3.4) +


  20. + +
  21. +Fixed a bug that can cause data log corruption, resulting in a failure +in DbVerifyLog or during recovery under certain circumstances. The bug could +occur when multiple threads are performing write operations concurrently. The +corruption could go unnoticed unless DbVerifyLog is run, or the corrupt portion +of the log happens to be processed by recovery. The latter is unlikely but +possible. An example of the DbVerifyLog failure is below. +
    +Caused by: com.sleepycat.je.util.LogVerificationException: Log is invalid,
    +fileName: 00038369.jdb fileNumber: 0x38369 logEntryOffset: 0x84
    +verifyState: INVALID reason: Header prevOffset=0x26 but prevEntryStart=0x45
    +
    +[#24211] (6.3.4) +

  22. + +
  23. +Fixed a bug that caused the following exception when setting the replication +helper host/port parameter to an empty string. +
    +Caused by: java.lang.IllegalArgumentException: Host and port pair was missing
    +    at com.sleepycat.je.rep.utilint.HostPortPair.getSocket(HostPortPair.java:29)
    +    at com.sleepycat.je.rep.utilint.HostPortPair.getSockets(HostPortPair.java:56)
    +    at com.sleepycat.je.rep.impl.RepImpl.getHelperSockets(RepImpl.java:1499)
    +    at com.sleepycat.je.rep.impl.node.RepNode.findMaster(RepNode.java:1214)
    +    at com.sleepycat.je.rep.impl.node.RepNode.startup(RepNode.java:787)
    +    at com.sleepycat.je.rep.impl.node.RepNode.joinGroup(RepNode.java:1988)
    +    at com.sleepycat.je.rep.impl.RepImpl.joinGroup(RepImpl.java:523)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.joinGroup(ReplicatedEnvironment.java:525)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.(ReplicatedEnvironment.java:587)
    +... 
    +
    +When an empty string is specified for the helper host/port, the parameter is +not used by JE. +[#24234] (6.3.6) +

  24. + +
  25. +Fixed DPL bytecode enhancer so it works with Java 8-compiled classes. The DPL +was working earlier with Java 8 in the sense that our Java 7-compiled libraries +could be used from a Java 8 app. But the bytecode enhancer was failing when +used to enhance a Java 8-compiled class. This was fixed by upgrading to ASM +5.0.3, which supports Java 8 bytecode. +[#24225] (6.3.6) +

  26. + +
+ + +
+

Changes in 6.2.7

+ +
    + +
  1. +A cursor may now be optionally configured to be "non-sticky". This has certain +performance advantages: +
      +
    • + Some processing is avoided because the prior position is not maintained. +
    • +
    • + The lock on the record at the prior position is released before acquiring + the lock on the record at the new position. This can help to prevent + deadlocks in certain situations. +
    • +
    +For more information, see the javadoc for CursorConfig.setNonSticky. +

    +[#23775] (6.2.0) +


  2. + +
  3. +Further exploitation of BIN-deltas for CRUD operations. +

    +For backgroud and previous work in this area, see the changelog for the 6.1 +release. In this release we have extended the set of CRUD operations that +are performed in BIN-deltas, without the need to mutate them to full BINs +(and thus saving the disk reads that would be required to fetch the full +BINs in memory). Specifically, the following additional operations can now +exploit BIN-deltas: +

    +Insertions and updates, when no tree node splits are required and the +key of the record to be inserted/updated is found in a BIN-delta. +

    +Blind operations: we say that a record operation (insertion, update, or +deletion) is performed "blindly" in a BIN-delta, when the delta does not +contain a slot with the operation's key and we don't need to access the +full BIN to check whether such a slot exists there or to extract any +information from the full-BIN slot, if it exists. The condition that no +tree node splits are required applies to blind operations as well. The +following operations can be performed blindly: +- Replay of insertions at replica nodes. +- Insertions during recovery redo. +- Updates and deletes during recovery redo, for databases with duplicates. +

    +A new statistic has been added to count the number blind operations performed, +including the blind put operations described below. This count can be obtained +via the EnvironmentStats.getNBINDeltaBlindOps() method. +

    +[#23680] (6.2.0) +


  4. + +
  5. +Blind put operations in BIN-deltas. +

    +Normally, blind puts are not possible: we need to know whether the put is +actually an update or an insertion, i.e., whether the key exists in the full +BIN or not. Furthermore, in case of update we also need to know the location +of the previous record version to make the current update abortable. However, +it is possible to answer at least the key existence question by adding a +small amount of extra information in the deltas. If we do so, puts that are +actual insertions can be done blindly. +

    +To answer whether a key exists in a full BIN or not, each BIN-delta stores +a bloom filter, which is a very compact, approximate representation of the +set of keys in the full BIN. Bloom filters can answer set membership questions +with no false negatives and very low probability of false positives. As a +result, put operations that are actual insertions can almost always be +performed blindly. +

    +To make possible the blind puts optimization in JE databases that use custom +BTree and/or duplicates comparators, these comparators must perform "binary +equality", that is, they must consider two keys (byte arrays) to be equal if +and only if they have the same length and they are equal byte-per-byte. To +communicate to the JE engine that a comparator does binary equality, the +comparator must implement the new BinaryEqualityComparator tag +interface. +

    +[#23768] (6.2.1) +


  6. + +
  7. +Added LockMode.READ_UNCOMMITTED_ALL. When using this mode, unlike +READ_UNCOMMITTED, deleted records will not be skipped by read operations when +the deleting transaction is still open (and may later abort, in which case the +record will no longer be deleted). See the LockMode javadoc for further +details. +

    +[#23660] (6.2.1) +


  8. + +
  9. +Added two optimizations for secondary DB read operations. +
      +
    • + For secondary DB read operations where the primary record data is not + requested (because DatabaseEntry.setPartial is called on the 'data' + parameter), a Btree lookup and record lock of the primary record are no + longer performed. This change does not impact the meaning of the isolation + mode used for such secondary reads, i.e., the semantics are correct without + acquiring a lock on the primary record. +
    • +
    • + For secondary DB read operations where the primary record data is + requested, one less record lock is now acquired. Previously, both the + primary and secondary records were locked. Now, only the primary record is + locked. This optimization does not apply to the serializable isolation + mode. The optimization applies only to the read-committed and + repeatable-read isolation modes, and does not impact the meaning of these + modes, i.e., the semantics are correct without acquiring a lock on the + secondary record. +
    • +
    +

    +[#23326] (6.2.2) +


  10. + +
  11. +Fixed a bug that could cause the following Collections API and DPL methods to +incorrectly return an empty result (no records). +
      +
    • + When using the DPL (com.sleepycat.persist) and calling EntityCursor.last() + when the cursor was created with an end point (toKey parameter). Also when + EntityCursor.prev() or prevNoDup() is called and the cursor is not + initialized, since this is equivalent to calling last(). +
    • +
    • + When using the Collections API (com.sleepycat.collections) and calling + SortedSet.last() or SortedMap.lastKey(). +
    • +
    +The problem occurs when searching for the last record in a key range, while +another thread is concurrently inserting records near the end of the key range. +An empty result is returned, regardless of the number of records in the key +range. +

    +[#23687] (6.2.2) +


  12. + +
  13. +Fixed bugs in the computation of the nINCompactKey and nINNoTarget stats +(EnvironmentStats.getNINCompactKeyIN and getNINNoTarget). Prior to the fixes, +these stats would sometimes have negative values. +[#23718] (6.2.3) +

  14. + +
  15. +Fixed a bug that caused a DB to become unusable when it is removed or truncated +(by calling Environment.removeDatabase or truncateDatabase) using a +read-committed transaction, and the transaction aborts (explicitly, or due to a +crash before commit). In this case the DB will not be accessible -- it cannot +be opened, truncated or removed. When attempting to open the DB, an exception +such as the following is thrown: +
    +Exception in thread "main" com.sleepycat.je.DatabaseNotFoundException: (JE 6.1.5) Attempted to remove non-existent database ...
    +at com.sleepycat.je.dbi.DbTree.lockNameLN(DbTree.java:869)
    +at com.sleepycat.je.dbi.DbTree.doRemoveDb(DbTree.java:1130)
    +at com.sleepycat.je.dbi.DbTree.dbRemove(DbTree.java:1183)
    +at com.sleepycat.je.Environment$1.runWork(Environment.java:947)
    +at com.sleepycat.je.Environment$DbNameOperation.runOnce(Environment.java:1172)
    +at com.sleepycat.je.Environment$DbNameOperation.run(Environment.java:1155)
    +at com.sleepycat.je.Environment.removeDatabase(Environment.java:941)
    +...
    +
    +A workaround for the problem in earlier releases is to avoid using +read-committed for a transaction used to perform a DB remove or truncate +operation. +

    +[#23821] (6.2.3) +


  16. + +
  17. +Fixed a bug that caused an exception during log cleaning, although it has been +observed very rarely. It could also potentially cause data corruption, but this +has never been reported or observed in tests. Examples of the exceptions that +have been observed are below. +
    +com.sleepycat.je.EnvironmentFailureException: Environment invalid because of
    +previous exception: (JE 6.1.0) ...
    +    at com.sleepycat.je.EnvironmentFailureException.unexpectedException(EnvironmentFailureException.java:315)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:477)
    +    at com.sleepycat.je.log.LogManager.logItems(LogManager.java:419)
    +    at com.sleepycat.je.log.LogManager.multiLog(LogManager.java:324)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:272)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:261)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:223)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.rewriteMapTreeRoot(EnvironmentImpl.java:1285)
    +    at com.sleepycat.je.cleaner.FileProcessor.processFile(FileProcessor.java:701)
    +    at com.sleepycat.je.cleaner.FileProcessor.doClean(FileProcessor.java:274)
    +    at com.sleepycat.je.cleaner.FileProcessor.onWakeup(FileProcessor.java:137)
    +    at com.sleepycat.je.utilint.DaemonThread.run(DaemonThread.java:148)
    +    at java.lang.Thread.run(Thread.java:744)
    +Caused by: java.lang.ArrayIndexOutOfBoundsException: 111
    +    at com.sleepycat.util.PackedInteger.writeInt(PackedInteger.java:188)
    +    at com.sleepycat.je.log.LogUtils.writePackedInt(LogUtils.java:155)
    +    at com.sleepycat.je.cleaner.DbFileSummary.writeToLog(DbFileSummary.java:79)
    +    at com.sleepycat.je.dbi.DatabaseImpl.writeToLog(DatabaseImpl.java:2410)
    +    at com.sleepycat.je.dbi.DbTree.writeToLog(DbTree.java:2050)
    +    at com.sleepycat.je.log.entry.SingleItemEntry.writeEntry(SingleItemEntry.java:114)
    +    at com.sleepycat.je.log.LogManager.marshallIntoBuffer(LogManager.java:745)
    +    at com.sleepycat.je.log.LogManager.serialLogWork(LogManager.java:611)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:461)
    +    ... 11 more
    +
    +Another instance of the same problem with a slightly different stack trace is +below: +
    +java.nio.BufferOverflowException UNEXPECTED_EXCEPTION_FATAL: Unexpected
    +internal Exception, unable to continue. Environment is invalid and must be
    +closed.
    +    at com.sleepycat.je.EnvironmentFailureException.unexpectedException(EnvironmentFailureException.java:315)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:481)
    +    at com.sleepycat.je.log.LogManager.logItems(LogManager.java:423)
    +    at com.sleepycat.je.log.LogManager.multiLog(LogManager.java:325)
    +    at com.sleepycat.je.log.LogManager.log(LogManager.java:273)
    +    at com.sleepycat.je.tree.LN.logInternal(LN.java:600)
    +    at com.sleepycat.je.tree.LN.log(LN.java:411)
    +    at com.sleepycat.je.cleaner.FileProcessor.processFoundLN(FileProcessor.java:1070)
    +    at com.sleepycat.je.cleaner.FileProcessor.processLN(FileProcessor.java:884)
    +    at com.sleepycat.je.cleaner.FileProcessor.processFile(FileProcessor.java:673)
    +    at com.sleepycat.je.cleaner.FileProcessor.doClean(FileProcessor.java:278)
    +    at com.sleepycat.je.cleaner.FileProcessor.onWakeup(FileProcessor.java:137)
    +    at com.sleepycat.je.utilint.DaemonThread.run(DaemonThread.java:148)
    +Caused by: java.nio.BufferOverflowException
    +    at java.nio.HeapByteBuffer.put(HeapByteBuffer.java:189)
    +    at java.nio.ByteBuffer.put(ByteBuffer.java:859)
    +    at com.sleepycat.je.log.LogUtils.writeBytesNoLength(LogUtils.java:350)
    +    at com.sleepycat.je.log.entry.LNLogEntry.writeBaseLNEntry(LNLogEntry.java:371)
    +    at com.sleepycat.je.log.entry.LNLogEntry.writeEntry(LNLogEntry.java:333)
    +    at com.sleepycat.je.log.entry.BaseReplicableEntry.writeEntry(BaseReplicableEntry.java:48)
    +    at com.sleepycat.je.log.entry.LNLogEntry.writeEntry(LNLogEntry.java:52)
    +    at com.sleepycat.je.log.LogManager.marshallIntoBuffer(LogManager.java:751)
    +    at com.sleepycat.je.log.LogManager.serialLogWork(LogManager.java:617)
    +    at com.sleepycat.je.log.LogManager.serialLog(LogManager.java:465)
    +
    +

    +[#23492] (6.2.3) +


  18. + +
  19. +Fixed a locking bug that causes a deadlock when no real deadlock exists. The +bug shows up with cursors using read-committed isolation. +

    +Here is the specific scenario: +

      +
    1. Cursor C1 in thread T1 reads a record R using Transaction X1. C1 + creates a ReadCommittedLocker L1, with X1 as its buddy. L1 locks R. +
    2. Cursor C2 in thread T2 tries to write-lock R, using another + Transaction X2. X2 waits for L1 (T2 waits for T1). +
    3. Cursor C3 in thread T1 tries to read R using X1. C3 creates a + ReadCommittedLocker L3, with X1 as its buddy. L3 tries to lock R. L1 and L3 + are not recognized as buddies, so L3 waits for X2 (T1 waits for T2) +
    +

    +[#23821] (6.2.4) +


  20. + +
  21. +The ant build (build.xml) has been updated so that the JUnit jar file is now +downloaded from Maven Central when needed for running tests with the 'test' +target. This jar is no longer needed for building a JE jar file with the 'jar' +target. See installation.html for an updated description of how to build JE and +run the unit tests. +[#23669] (6.2.7) +

  22. + +
  23. +Added EnvironmentConfig.CLEANER_USE_DELETED_DIR. This can be set to true when +trying to reproduce and analyze LOG_FILE_NOT_FOUND problems. See the javadoc +for details. More information was also added to the +EnvironmentConfig.CLEANER_EXPUNGE javadoc on the same topic. +[#23830] (6.2.8) +

  24. + +
  25. +Added debugging information when an internal latch deadlock occurs due to a bug +where a latch is not released. Note that latches are not user-visible entities +and are unrelated to record locking. Latches are used internally for thread +safety and only held for short durations. A latch deadlock is detected via a +timeout mechanism. An EnvironmentFailureException is thrown in the thread that +times out. Now, additionally a full thread dump is written to the je.info log +at logging level SEVERE. The thread dump can be used to find the deadlock. +

    +In addition, the EnvironmentConfig.ENV_LATCH_TIMEOUT parameter has been exposed +to provide control over the timeout interval for atypical applications. This +parameter has been present internally since latch timeouts were added in JE +6.0.3; however, the parameter was previously undocumented. +

    +[#23897] (6.2.9) +


  26. + +
  27. +Fixed two bugs having to do with lock conflicts. The two problems are distinct, +but both occurred while creating a LockConflictException due to a lock timeout. +
      +
    • + Fixed a bug that caused a ConcurrentModificationException when multiple + lock tables are configured (EnvironmentConfig.LOCK_N_LOCK_TABLES). The + exception was thrown when a lock conflict occurred along with particular + concurrent activity in another thread that holds a lock. The methods in + the stack trace when this problem occurs are: +
      +      ...
      +      LockManager.findDeadlock1
      +      LockManager.findDeadlock
      +      LockManager.makeTimeoutMsgInternal
      +      ...
      +      
      +
    • +
    • + Fixed a bug that caused a thread deadlock, eventually stopping all + threads accessing JE. This could happen when a lock conflict exception + occurred while attempting to lock a record with read-committed isolation, + and another thread (internal or external) also tried to lock the same + record. An example of the two threads involved in the deadlock is below. + Additional threads accessing JE methods are also likely to be blocked. +
      +      "THREAD-USING-READ-COMMITTED":
      +        at com.sleepycat.je.txn.Txn.setState(Txn.java:2039)
      +        - waiting to lock <0x000000078953b720> (a com.sleepycat.je.txn.Txn)
      +        at com.sleepycat.je.txn.Txn.setOnlyAbortable(Txn.java:1887)
      +        at com.sleepycat.je.txn.BuddyLocker.setOnlyAbortable(BuddyLocker.java:158)
      +        at com.sleepycat.je.OperationFailureException.(OperationFailureException.java:200)
      +        at com.sleepycat.je.LockConflictException.(LockConflictException.java:135)
      +        at com.sleepycat.je.LockTimeoutException.(LockTimeoutException.java:48)
      +        at com.sleepycat.je.txn.LockManager.newLockTimeoutException(LockManager.java:665)
      +        at com.sleepycat.je.txn.LockManager.makeTimeoutMsgInternal(LockManager.java:623)
      +        at com.sleepycat.je.txn.SyncedLockManager.makeTimeoutMsg(SyncedLockManager.java:97)
      +        - locked <0x000000079068eaa8> (a com.sleepycat.je.latch.Latch)
      +        at com.sleepycat.je.txn.LockManager.lockInternal(LockManager.java:390)
      +        at com.sleepycat.je.txn.LockManager.lock(LockManager.java:276)
      +        ...
      +      "ANOTHER-THREAD-LOCKING-THE-SAME-RECORD":
      +        at com.sleepycat.je.txn.SyncedLockManager.attemptLock(SyncedLockManager.java:73)
      +        - waiting to lock <0x000000079068eaa8> (a com.sleepycat.je.latch.Latch)
      +        at com.sleepycat.je.txn.LockManager.lockInternal(LockManager.java:292)
      +        at com.sleepycat.je.txn.LockManager.lock(LockManager.java:276)
      +        - locked <0x000000078953b720> (a com.sleepycat.je.txn.Txn)
      +        ...
      +      
      +
    • +
    +[#23894] (6.2.10) +

  28. + +
  29. +Fixed a bug that could cause the following exception when calling Cursor.count, +skipNext or skipPrev. The bug is likely to occur only when BINs (bottom +internal nodes of the Btree) are frequently being evicted. Although the +Environment is invalidated by this exception and must be closed, the problem is +transient -- the Environment can be re-opened and no data loss or corruption +will have occurred. +
    +(JE 6.2.6) ... Latch not held: BIN17923 currentThread: ...  currentTime: ...
    +exclusiveOwner: -none- UNEXPECTED_STATE_FATAL: Unexpected internal state,
    +unable to continue. Environment is invalid and must be closed.
    +at com.sleepycat.je.EnvironmentFailureException.unexpectedState(EnvironmentFailureException.java:405)
    +at com.sleepycat.je.latch.LatchImpl.release(LatchImpl.java:109)
    +at com.sleepycat.je.tree.IN.releaseLatch(IN.java:519)
    +at com.sleepycat.je.dbi.CursorImpl.skipInternal(CursorImpl.java:2737)
    +at com.sleepycat.je.dbi.CursorImpl.skip(CursorImpl.java:2612)
    +at com.sleepycat.je.Cursor.countHandleDups(Cursor.java:4055)
    +at com.sleepycat.je.Cursor.countInternal(Cursor.java:4028)
    +at com.sleepycat.je.Cursor.count(Cursor.java:1804) at
    +...
    +
    +The last line above is a call to Cursor.count. The same problem could happen if +Cursor.skipNext or skipPrev is called, and only the last few lines of the stack +trace above would be different. +

    +[#23872] (6.2.25) +


  30. + +
  31. +The HA Feeder output threads now batch network writes whenever possible to +reduce the resource overheads associated with transmitting small network +packets. These changes enhance replication performance; improvements in the +range of 5% have been observed for write intensive workloads. +

    +[#23274] (6.2.25) +


  32. + +
  33. +Added new statistics to count the number of user (non-internal) CRUD operations +that are performed entirely on BIN deltas. +

    +[#23883] (6.2.25) +


  34. + +
  35. +Fixed a bug where no exception was thrown when using ReplicaAckPolicy.ALL and +performing a write transaction in a two node replication group, and the replica +node was down/unresponsive. InsufficientAcksException is now thrown in this +situation, as specified in the documentation. +[#23934] (6.2.26) +

  36. + +
  37. +Fixed a bug in the internal SortedLSNTreeWalker class, which is used to +implement the Database.preload() and Environment.preload() methods. When these +methods are called, the bug can lead to the creation of a corrupted BTree, and +as a result, subsequent loss of data. The bug was introduced in JE 6.0. +

    +[#23952] (6.2.27) +


  38. + +
  39. +Added EntityIndex.getDatabase. +[#23971] (6.2.27) +

  40. + +
  41. +Fixed a bug where an assertion incorrectly fired during CRUD operations. This +happened when there was concurrent activity in other threads that changed the +number of records in the same portion of the Btree. An example stack trace is +below. +
    +java.lang.AssertionError
    + at com.sleepycat.je.dbi.CursorImpl.getCurrentKey(CursorImpl.java:500)
    + at com.sleepycat.je.dbi.CursorImpl.getCurrentKey(CursorImpl.java:483)
    + at com.sleepycat.je.Cursor.dupsGetNextOrPrevDup(Cursor.java:2882)
    + at com.sleepycat.je.Cursor.retrieveNextHandleDups(Cursor.java:2836)
    + at com.sleepycat.je.Cursor.retrieveNext(Cursor.java:2816)
    + at com.sleepycat.je.Cursor.getNextDup(Cursor.java:1150)
    + [ app specific portion ... ]
    +
    +In the stack trace above the Cursor.getNextDup method is being called. There +are other operations where the same thing could happen. The common factor is +the call to the internal CursorImpl.getCurrentKey method, which fires the +assertion. +

    +[#23971] (6.2.29) +


  42. + +
  43. +Fixed a bug that prevents recovery, i.e., prevents the Environment from being +opened. The bug has always been present in JE but has appeared in tests only +recently, and has not been reported in the field. Deleting records in a large +range of keys might make the bug more likely to occur. An example of the stack +trace when the failure occurs is below: +
    +com.sleepycat.je.EnvironmentFailureException: (JE 6.2.29) ... last
    +LSN=0x533/0x41f59 LOG_INTEGRITY: Log information is incorrect, problem is
    +likely persistent. Environment is invalid and must be closed.
    +        at com.sleepycat.je.recovery.RecoveryManager.traceAndThrowException(RecoveryManager.java:3031)
    +        at com.sleepycat.je.recovery.RecoveryManager.readINs(RecoveryManager.java:1010)
    +        at com.sleepycat.je.recovery.RecoveryManager.buildINs(RecoveryManager.java:804)
    +        at com.sleepycat.je.recovery.RecoveryManager.buildTree(RecoveryManager.java:717)
    +        at com.sleepycat.je.recovery.RecoveryManager.recover(RecoveryManager.java:352)
    +        at com.sleepycat.je.dbi.EnvironmentImpl.finishInit(EnvironmentImpl.java:670)
    +        at com.sleepycat.je.dbi.DbEnvPool.getEnvironment(DbEnvPool.java:208)
    +        at com.sleepycat.je.Environment.makeEnvironmentImpl(Environment.java:251)
    +        at com.sleepycat.je.Environment.(Environment.java:232)
    +        at com.sleepycat.je.Environment.(Environment.java:188)
    +        at com.sleepycat.je.rep.ReplicatedEnvironment.(ReplicatedEnvironment.java:573)
    +        at com.sleepycat.je.rep.ReplicatedEnvironment.(ReplicatedEnvironment.java:443)
    +        [ app specific portion ... ]
    +Caused by: com.sleepycat.je.EnvironmentFailureException: (JE 6.2.29) ...
    +fetchIN of 0x35c/0x3f7f9 parent IN=11688 IN class=com.sleepycat.je.tree.IN
    +lastFullVersion=0x533/0x5d47d lastLoggedVersion=0x533/0x5d47d
    +parent.getDirty()=false state=0 LOG_FILE_NOT_FOUND: Log file missing, log is
    +likely invalid. Environment is invalid and must be closed.
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1866)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1764)
    +        at com.sleepycat.je.tree.Tree.getParentINForChildIN(Tree.java:1346)
    +        at com.sleepycat.je.recovery.RecoveryManager.recoverChildIN(RecoveryManager.java:2025)
    +        at com.sleepycat.je.recovery.RecoveryManager.recoverIN(RecoveryManager.java:1834)
    +        at com.sleepycat.je.recovery.RecoveryManager.replayOneIN(RecoveryManager.java:1099)
    +        at com.sleepycat.je.recovery.RecoveryManager.readINs(RecoveryManager.java:988)
    +        ... 16 more
    +Caused by: java.io.FileNotFoundException: .../0000035c.jdb (No such file or directory)
    +        at java.io.RandomAccessFile.open(Native Method)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:241)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:122)
    +        at com.sleepycat.je.log.FileManager$DefaultRandomAccessFile.(FileManager.java:3260)
    +        at com.sleepycat.je.log.FileManager$6.createFile(FileManager.java:3288)
    +        at com.sleepycat.je.log.FileManager.openFileHandle(FileManager.java:1311)
    +        at com.sleepycat.je.log.FileManager.getFileHandle(FileManager.java:1183)
    +        at com.sleepycat.je.log.LogManager.getLogSource(LogManager.java:1135)
    +        at com.sleepycat.je.log.LogManager.getLogEntry(LogManager.java:822)
    +        at com.sleepycat.je.log.LogManager.getLogEntryAllowInvisibleAtRecovery(LogManager.java:787)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1801)
    +        ... 22 more
    +
    +[#23990] (6.2.31) +

  44. + +
  45. +Fixed a bug that can cause data log corruption. This has been reported only as +a rare occurrence, but could impact any application where not all Btree +internal nodes fit in cache. An example stack trace is below, although other +stack traces could also apply where an IN (internal node) is being fetched. +
    +com.sleepycat.je.EnvironmentFailureException: (JE 6.2.9) ...
    +fetchIN of 0x10cbc/0x696373 parent IN=84363 IN
    +class=com.sleepycat.je.tree.IN lastFullVersion=0x10e00/0x82006e
    +lastLoggedVersion=0x10e00/0x82006e parent.getDirty()=false state=0
    +LOG_FILE_NOT_FOUND: Log file missing, log is likely invalid. Environment is
    +invalid and must be closed.
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1866)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1752)
    +        at com.sleepycat.je.tree.Tree.search(Tree.java:2293)
    +        at com.sleepycat.je.tree.Tree.search(Tree.java:2193)
    +        at com.sleepycat.je.tree.Tree.getParentBINForChildLN(Tree.java:1481)
    +        at com.sleepycat.je.cleaner.FileProcessor.processLN(FileProcessor.java:836)
    +        ... 5 more
    +Caused by: java.io.FileNotFoundException: /local/pyrox/DS2/asinst_1/OUD/db/Europe/00010cbc.jdb (No such file or directory)
    +        at java.io.RandomAccessFile.open(Native Method)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:241)
    +        at java.io.RandomAccessFile.(RandomAccessFile.java:122)
    +        at com.sleepycat.je.log.FileManager$DefaultRandomAccessFile.(FileManager.java:3208)
    +        at com.sleepycat.je.log.FileManager$6.createFile(FileManager.java:3236)
    +        at com.sleepycat.je.log.FileManager.openFileHandle(FileManager.java:1305)
    +        at com.sleepycat.je.log.FileManager.getFileHandle(FileManager.java:1177)
    +        at com.sleepycat.je.log.LogManager.getLogSource(LogManager.java:1151)
    +        at com.sleepycat.je.log.LogManager.getLogEntry(LogManager.java:843)
    +        at com.sleepycat.je.log.LogManager.getLogEntryAllowInvisibleAtRecovery(LogManager.java:808)
    +        at com.sleepycat.je.tree.IN.fetchINWithNoLatch(IN.java:1801)
    +        ... 10 more
    +
    +[#24046] (6.2.31) +

  46. + +
  47. +Fixed a bug that can cause data log corruption when using a deferred-write +database. In the one reported instance of the problem, missing records were +reported. A corruption (e.g., LOG_FILE_NOT_FOUND) is also posssible. +[#24066] (6.2.31) +

  48. + +
+ + +
+

Changes in 6.1.5

+ +
    + +
  1. +Made an improvement to eviction for Oracle NoSQL DB users, and several +improvements to the DbCacheSize utility. +

    +For Oracle NoSQL DB users only, record versions are now discarded using a +separate eviction step. This means that the record versions can be discarded +to free cache memory without discarding the entire BIN (bottom internal node). +In general, this makes better use of memory and reduces IO for some workloads. +

    +The improvements to DbCacheSize are as follows. +

      +
    • + When -je.rep.preserveRecordVersion true is passed on the + command line, more information is output by the utility. See the new + Record Versions and Oracle NoSQL Database section of the DbCache javadoc + for more information. +
    • +
    • + The minimum and maximum cache sizes are no longer output. Previously, + the difference between these values was only due to an optimization that + applied to small log files. This optimization is now accounted for only + when the file size is small enough to allow for it. Be sure to pass + -je.log.fileMax LENGTH on the command line as described in + the javadoc. +
    • +
    • + The -outputproperties switch now outputs internalNodes, + internalNodesAndVersions, and allNodes, corresponding to the changes above. + The older minInternalNodes/maxInternalNodes and minAllNodes/maxAllNodes + values are still output but deprecated, and the min and max values in each + pair are equal. +
    • +
    • + The output has been simplified by removing the internal Btree information. + Btree information can optionally be output using the new + -btreeinfo switch. +
    • +
    +

    +[#23550] (6.1.0) +


  2. + +
  3. +Fixes a bug which prevented serialization +of ReplicaWriteException. Previously an attempt to serialize this +exception could fail with the following characteristic stack trace when +the StateChangeEvent object was encountered during serialization: + +
    + Caused by: java.io.NotSerializableException: com.sleepycat.je.rep.StateChangeEvent
    +
    +    at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1181)
    +    at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1541)
    +    at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1506)
    +    at java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1429)
    +    at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1175)
    +    at java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1541)
    +    at java.io.ObjectOutputStream.defaultWriteObject(ObjectOutputStream.java:439)
    +    at java.util.logging.LogRecord.writeObject(LogRecord.java:470)
    +    ...
    +
    +[#23578] (6.1.1) +

  4. + +
  5. +The JE HA replica replay mechanism now uses a separate thread to write replica +acknowledgements and heartbeat responses to the network. This change results in +two improvements: +
      +
    1. + The replay of changes sent by the master can make progress even in the + presence of brief network stalls, thus increasing replica replay + throughput; improvements in the range of 5 to 10% have been observed in + internal test scenarios. +
    2. +
    3. + This new thread is also used to send spontaneous heartbeat response + messages, making the heartbeat mechanism, used to detect node failures, + more robust. +
    4. +
    +[#23195] (6.1.1) +

  6. + +
  7. +Performance enhancement: executing a subset of CRUD and internal operations +on memory-resident BIN-deltas. +

    +Before JE 6.0, BIN-deltas were used as a disk optimization only: to reduce +the amount of bytes written to disk every time a new BIN version had to to be +logged. BIN-deltas would never appear in the in-memory BTrees, and if the +most recently logged version of a BIN was a delta, fetching that BIN into +the in-memory tree required 2 disk reads: one for the delta and one for the +most recent full-BIN version. +

    +Starting with JE 6.0, BIN-deltas can appear in the in-memory BTree. +Specifically, if a full dirty BIN is selected for eviction, rather than +evicting the whole BIN (and incurring a disk write), the BIN is converted +to a delta that stays in the cache. If a subsequent operation needs the full +BIN and the delta is still in the cache, only one disk read will be done. +

    +Further disk-read savings can be realized, because many operations can (under +certain conditions) be performed directly on the BIN-delta, without the need +for the full BIN. However, in 6.0, only a small subset of background +operations were modified to exploit BIN-deltas. In JE 6.1, the set of +operations that can be performed on BIN-deltas has been extended. Examples of +such operations include key searches in BTrees, if the search key is found on +a BIN delta and deletion or update of the record a cursor is located on, if +the cursor is located on a BIN-delta. These changes affect both internal +operations as well as the search, delete, and putCurrent methods of the +Database and Cursor API classes. +

    +[#23428] (6.1.1) +


  8. + +
  9. +Performance enhancement: Reduced latch contention during BTree searches. +

    +Typically, thread synchronization during BTree searches is done via latch +coupling: at most 2 tree nodes (a parent and a child) are latched at a time. +Furthermore, a node is latched in shared (SH) mode, unless it is expected that +it will be updated, in which case it is latched in exclusive (EX) mode. Finally, +SH latches are not upgradeable to EX latches (to avoid deadlocks and reduce +latching overhead). +

    +JE follows this general latch-coupling technique. However, it also has to deal +with the JE-specific fact that fetching a missing child node into the cache +requires that its memory-resident parent be updated (because the parent points +to its children via direct Java object references). As a result, during a JE +BTree search every node is potentially updated, which precludes the use of SH +latches. To cope with this complication, JE has been using one of the following +approaches during its various kinds of BTree searches: (a) use SH latches, but +if a missing child needs to be fetched, release the SH latch on the parent and +restart the search from the beginning, using EX latches on all nodes this time, +(b) do grandparent latching: use SH latches but keep a latch on the grandparent +node so that if we need to fetch a missing child of the parent node, the SH +latch on the parent can be released, and then the parent can be relatched in +EX mode, (c) do latch-coupling with EX latches only. Obviously, (c) is the +worst choice, but all of the 3 approaches result in more and longer-held EX +latches than necessary. As a result, some JE applications have experienced +performance problems due to excessive latch contention during BTree searches. +

    +In JE 6.1, a new latching algorithm has been implemented to replace all of +(a), (b), and (c) above. The new algorithm uses SH latches, but if a missing +child needs to be fetched, it first "pins" the parent (to prevent its eviction), +then releases the SH latch on the parent, and finally reads the child node +from the log (without any latches held). After the child is fetched, it +latches the remembered parent in EX mode, unpins it, and checks whether it +is still the correct parent for the search and for the child node that was +fetched. If so, the search continues down the tree. If not, it restarts the +search from the beginning. Compared to approach (a) above, this new algorithm +may restart a search multiple times, however the probability of even a single +restart is less than (a), and each restart uses SH latches. Furthermore, no +latches are held during the long random disk read done to fetch a missing +child. +

    +[#18617] (6.1.1) +


  10. + +
  11. +Fixed a bug that could result in the following exception in a JE HA + application: + +
    +com.sleepycat.je.EnvironmentFailureException: 
    + Node5(5):... VLSN 3,182,883 should be held within this tracker.
    +
    +or +
    +com.sleepycat.je.EnvironmentFailureException: 
    + Node5(5):...end of last bucket should match end of range ...
    +
    +[#23491] +

  12. + +
  13. +Improved the Monitor's ability to discover group status changes, which +should improve the robustness of notifications after the monitor is down +or when it has lost network connectivity. +

    +[#23631] (6.1.2) +


  14. + +
  15. +A new implementation for Database.count() and a new variant of Database.count() +that takes a memoryLimit as input. +

    +Counting the number of records in a database is now implemented using a +disk-ordered-scan (DOS), similar to the one used by DiskOrderedCursor. DOS may +consume a large amount of memory, and to avoid OutOfMemoryErrors, it requires +that a limit on its memory consumption is provided. As a result, a new method, +Database.count(long memoryLimit), has been implemented that takes this memory +limit as a parameter. The existing Database.count() method is still available +and uses an internally established limit. +

    +This change fixes two problems of the previous implementation (based on the +SortedLSNTreeWalker class): 1. There was no upper bound on the memory +consumption of the previous implementation and 2. It was buggy in the case +where concurrent thread activity could cause full BINs to be mutated to deltas +or vice versa. +

    +[#23646] (6.1.2) +


  16. + +
  17. +Fixed bug in DiskOrderedCursor. +

    +Iterating over the records of a database via a DiskOrderedCursor would cause +a crash if a BIN delta was encountered in the in-memory BTree (because in this +case a copy of the BIN delta was created and cached for later use, but the copy +did not contain all the needed information from the original). This bug was +introduced in JE 6.0.11. +

    +[#23646] (6.1.2) +


  18. + +
  19. +Fixed a bug in DiskOrderedCursor for DeferredWrite databases. An example of +the stack trace when the bug occurs is below. Note that although the exception +message indicates that a file is missing, actually the problem was transient +and no file was missing. Upgrading to the current JE release will fix the +problem without requiring data conversion or restoring from a backup. +
    +com.sleepycat.je.EnvironmentFailureException:
    +(JE 5.0.97) Environment must be closed, caused by:
    +com.sleepycat.je.EnvironmentFailureException:
    +Environment invalid because of previous exception:
    +(JE 5.0.97) ... java.io.FileNotFoundException: ...\ffffffff.jdb
    +(The system cannot find the file specified) LOG_FILE_NOT_FOUND:
    +Log file missing, log is likely invalid.
    +Environment is invalid and must be closed.
    +    at com.sleepycat.je.EnvironmentFailureException.wrapSelf(EnvironmentFailureException.java:210)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.checkIfInvalid(EnvironmentImpl.java:1594)
    +    at com.sleepycat.je.dbi.DiskOrderedCursorImpl.checkEnv(DiskOrderedCursorImpl.java:234)
    +    at com.sleepycat.je.DiskOrderedCursor.checkState(DiskOrderedCursor.java:367)
    +    at com.sleepycat.je.DiskOrderedCursor.getNext(DiskOrderedCursor.java:324)
    +    ...
    +
    +[#23676] (6.1.3) +

  20. + +
  21. +An API change requires application changes if +write operations are performed on a non-replicated database in a replicated +environment. A code change is necessary for applications with the following +characteristics: +

    +

      +
    • A ReplicatedEnvironment is used. +
    • A non-replicated, transactional Database is accessed + (DatabaseConfig.setReplicated(false) and setTransactional(true) are called) + in this environment. +
    • When writing to this database, an explicit (non-null) Transaction is + specified. +
    +

    +In order to perform write operations in such cases, the application must now +call TransactionConfig.setLocalWrite(true). +

    +In addition, it is no longer possible to use a single transaction to write to +both a replicated and a non-replicated databases. IllegalOperationException +will be thrown if this is attempted. +

    +These changes were necessary to prevent corruption when a transaction contains +write operations for both replicated and non-replicated databases, and a +failover occurs that causes a rollback of this transaction. The probability of +corruption is low, but it can occur under the right conditions. +

    +For more information see the javadoc for TransactionConfig.setLocalWrite(true), +and the "Non-replicated Databases in a Replicated Environment" section of the +ReplicatedEnvironment class javadoc. +

    +[#23330] (6.1.3) +


  22. + +
  23. +Read-only transactions are now supported. A read-only transaction prohibits +write operations, and more importantly in a replicated environment it +automatically uses Durability.ReplicaAckPolicy.NONE. A read-only transaction +on a Master will thus not be held up, or throw InsufficientReplicasException, +if the Master is not in contact with a sufficient number of Replicas at the +time the transaction is initiated. To configure a read-only transaction, call +TransactionConfig.setReadOnly(true). See this method's javadoc for more +information. +

    +Durability.READ_ONLY_TXN has been deprecated and TransactionConfig.setReadOnly +should be used instead. +

    +[#23330] (6.1.3) +


  24. + +
  25. +Fixed a bug that could cause a NullPointerException, such as the one below, +when a ReplicatedEnvironment is opened on an HA replica node. This prevents +the environment from being opened. +

    +The conditions that cause the bug are: +

      +
    1. a replica has been restarted after an abnormal shutdown + (ReplicatedEnvironment.close was not called), +
    2. a transaction writing records in multiple databases was in progress at + the time of the abnormal shutdown, +
    3. one of the databases, but not all of them, is then removed or truncated, + and finally +
    4. another abnormal shutdown occurs. +
    +

    +If this bug is encountered, it can be corrected by upgrading to the JE release +containing this fix, and no data loss will occur. +

    +This bug is similar to another bug that was fixed in JE 5.0.70 [#22052]. +This bug differs in that the transaction must write records in multiple +databases, and at least one but not all of the databases must be removed or +truncated between the two abnormal shutdowns. +

    +com.sleepycat.je.EnvironmentFailureException: (JE 6.1.3) Node1(-1):...
    +last LSN=0x3/0x4427 LOG_INTEGRITY: Log information is incorrect, problem is
    +likely persistent. Environment is invalid and must be closed.
    +    at com.sleepycat.je.recovery.RecoveryManager.traceAndThrowException(RecoveryManager.java:3012)
    +    at com.sleepycat.je.recovery.RecoveryManager.undoLNs(RecoveryManager.java:1253)
    +    at com.sleepycat.je.recovery.RecoveryManager.buildTree(RecoveryManager.java:741)
    +    at com.sleepycat.je.recovery.RecoveryManager.recover(RecoveryManager.java:352)
    +    at com.sleepycat.je.dbi.EnvironmentImpl.finishInit(EnvironmentImpl.java:654)
    +    at com.sleepycat.je.dbi.DbEnvPool.getEnvironment(DbEnvPool.java:208)
    +    at com.sleepycat.je.Environment.makeEnvironmentImpl(Environment.java:252)
    +    at com.sleepycat.je.Environment.(Environment.java:232)
    +    at com.sleepycat.je.Environment.(Environment.java:188)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.(ReplicatedEnvironment.java:573)
    +    at com.sleepycat.je.rep.ReplicatedEnvironment.(ReplicatedEnvironment.java:443)
    +    ... [app creates a new ReplicatedEnvironment here] ...
    +Caused by: java.lang.NullPointerException
    +    at com.sleepycat.je.log.entry.LNLogEntry.postFetchInit(LNLogEntry.java:412)
    +    at com.sleepycat.je.txn.TxnChain.(TxnChain.java:133)
    +    at com.sleepycat.je.txn.TxnChain.(TxnChain.java:84)
    +    at com.sleepycat.je.recovery.RollbackTracker$RollbackPeriod.getChain(RollbackTracker.java:1009)
    +    at com.sleepycat.je.recovery.RollbackTracker$Scanner.rollback(RollbackTracker.java:483)
    +    at com.sleepycat.je.recovery.RecoveryManager.undoLNs(RecoveryManager.java:1182)
    +    ... 11 more
    +
    +[#22071] (6.1.3) +

  26. + +
  27. +Fixed a bug where a transaction configured for no-wait (using +TransactionConfig.setNoWait(true)) behaved as a normal (wait) transction when +the ReadCommitted isolation mode was also used. Due to this bug, a +LockTimeoutException was thrown when a LockNotAvailableException should have +been thrown instead, and the transaction was invalidated when it should not +have been. +[#23653] (6.1.4) +

  28. + +
  29. +Fixed eviction bug for shared-cache environments. The bug caused LRU corruption +and potential memory leaks in certain cases. The bug was introduced in JE 6.0. +Note that the bug has no impact for environments that are not using a shared +cache (EnvironmentConfig.setSharedCache(true)). +[#23696] (6.1.4) +

  30. + +
+ + +
+

Changes in 6.0.11

+ +
    + +
  1. +Added support in JE HA for the new SECONDARY node type. SECONDARY nodes +can only be replicas, not masters, and do not participate in either +elections or durability decisions. SECONDARY nodes can be used to +increase the available number of read replicas without changing the +election or durability quorum of the group, and without requiring +communication with the secondaries during master elections or +transaction commits. +

    +Changes include adding the NodeType.SECONDARY +enumeration constant, and the +ReplicationGroup.getSecondaryNodes and +ReplicationGroup.getDataNodes methods. [#22482] (6.0.1) +


  2. + +
  3. +Made improvements to internal latching to allow interrupting threads that are +waiting on latches, cause a timeouts when a latch deadlock occurs, and enable +latch instrumentation via system properties. Note that latching is not related +to transactional locking and latches are intended to be held for very short +periods. +
      +
    • When a JE thread is waiting on an internal latch, for example, when + accessing Btree internal nodes, log buffers, etc, interrupting the thread + is now possible and will result in a ThreadInterruptedException. In + earlier versions, latching calls were not interruptible and a latch + deadlock would require a process restart.
    • +
    • When a JE thread is waiting on an internal latch, a timeout will occur + if the latch cannot be acquired after 5 minutes and a fatal + EnvironmentFailureException will be thrown. The timeout is intended to + detect latch deadlocks earlier. +
    • A system property, JE_TEST, may be defined to true (-DJE_TEST=true) to + enable JE debug/test instrumentation. Currently, this only adds latch + tracking so that an internal latching error will contain more information + about the problem. Over time, more JE instrumentation will be enabled via + this switch. The JE_TEST property is set to true automatically when + running the JE unit test suite via ant. This instrumentation is not + intended for production use. Note that in earlier versions, however, this + instrumentation was enabled when Java assertions (-ea) were enabled.
    • +
    • An additional system property, JE_CAPTURE_LATCH_OWNER, may be set to + true to capture the stack trace at the point that each latch is acquired + exclusively. This additional information will appear in latching error + messages and may help in debugging an internal latching problem. It is + fairly expensive to capture the stack trace, and this switch should not be + set in production.
    • +
    • An undocumented EnvironmentConfig parameter, je.env.sharedLatches, is + no longer used and silently ignored. Latches are now shared (read-write), + rather than exclusive, whenever possible.
    • +
    +[#22993] (6.0.3) +

  4. + +
  5. +The following log cleaner configuration parameters in the EnvironmentConfig +class have been deprecated and are no longer used. If configured, they will +be silently ignored. Lazy and proactive migration are no longer supported due +to negative impacts on eviction, checkpointing and Btree splits. If a +persistent log cleaner backlog occurs, the recommended solution is to configure +additional cleaner threads. +
      +
    • CLEANER_LAZY_MIGRATION
    • +
    • CLEANER_BACKGROUND_PROACTIVE_MIGRATION
    • +
    • CLEANER_FOREGROUND_PROACTIVE_MIGRATION
    • +
    +[#23070] (6.0.3) +

  6. + +
  7. +When using secondary databases and DPL secondary indexes, the locking order for +reads via a secondary has been changed to reduce the possibility of deadlocks. +This optimization does not apply when the serializable isolation mode is used, +and does not apply to the JoinCursor. +[#22368] (6.0.4) +

  8. + +
  9. +Improved Btree cache usage by caching a BIN-delta -- the partial form of a BIN +containing only the dirty entries -- in preference to logging it and then +evicting the entire BIN. This reduces disk reads if CRUD operations are +performed on the BIN before the entire BIN is evicted, because only one BIN +fetch rather than two is needed. Disk writes are also reduced to some degree. +The performance improvement applies only when BINs are being evicted from +cache. The improvement is signficant when CRUD operations address a non-random +subset of the keys in the data set. +

    +As part of the performance improvement work, the following statistics were +added. +

      +
    • nCachedBINDeltas: EnvironmentStats.getNCachedBINDeltas + -- Number of BIN-deltas (partial BINs) in cache. +
    • +
    • nBINDeltasFetchMiss: + EnvironmentStats.getNBINDeltasFetchMiss -- Number of BIN-deltas + fetched to satisfy btree operations. +
    • +
    • nBINsMutated: EnvironmentStats.getNBINsMutated -- The + number of BINs mutated to BIN-deltas by eviction. +
    • +
    • lastCheckpointInterval: + EnvironmentStats.getLastCheckpointInterval -- Byte length from + last checkpoint start to the previous checkpoint start. +
    • +
    +

    +In addition, the EnvironmentConfig.TREE_MAX_DELTA param has been deprecated. +As of JE 5.0, the benefit from logging BIN-deltas is unrelated to the number of +deltas that have been logged since the last full BIN. To configure BIN-delta +logging, use EnvironmentConfig.TREE_BIN_DELTA. +

    +[#22662] (6.0.5) +


  10. + +
  11. +An optimization for Databases with sorted duplicates configured has been made +to improve log cleaning performance. Records in duplicates databases need no +longer be tracked or processed by the log cleaner, which reduces cleaning costs +significantly when duplicates databases are used for a significant portion of a +data set, for example, as secondary index databases. +

    +As described under 'Upgrading from JE 5.0 or earlier' at the top of this +document, to support this cleaner optimization a change was made involving +partial Btree and duplicate comparators. Partial comparators are an advanced +feature that few applications use. As of JE 6.0, using partial comparators is +not recommended. Applications that do use partial comparators must now change +their comparator classes to implement the new PartialComparator tag interface, +before running the application with JE 6. Failure to do so may cause incorrect +behavior during transaction aborts. See the PartialComparator javadoc for more +information. +

    +[#22864] (6.0.5) +


  12. + +
  13. +Fixed a bug that sometimes resulted in an uncommitted record deletion performed +in one transaction to be visible (result in a NOTFOUND result) to an operation +performed in another transaction. This bug applies to the use of +Database.delete and PrimaryIndex.delete. It does not apply to the use of +SecondaryDatabase.delete, SecondaryIndex.delete, or the use of a cursor to +perform a deletion. Note that this problem is distinct from a similar bug that +was fixed in JE 5.0.98 ([#22892]). +

    +[#23132] (6.0.5) +


  14. + +
  15. +Modified the algorithm that protects cleaned log files from deletion to +consider the relative cost of replication replay versus network restore, +as well as available disk space. When JE HA decides whether to delete +cleaned log files, it uses information it stores about the progress of +replication replay for each electable replica to retain useful log files +even if the replicas are offline, subject to +the ReplicationConfig.REP_STREAM_TIMEOUT parameter. The +system does not store information about replication progress for +secondary replicas, though, so a different approach has been added. +

    +The modified algorithm estimates the costs of replication replay and +network restore, and protects log files from deletion that could be used +for replay if there is sufficient disk space and replay would be less +expensive than network restore. These computations apply to all +replicas, but are particularly useful for secondary replicas, for which +log files will not otherwise be retained if the replicas become +temporarily unreachable. Note that disk space calculations are only +performed when running with Java 7 or later. +

    +Two new ReplicationConfig parameters were added: +

      +
    • REPLAY_COST_PERCENT - The cost of replaying the + replication stream as compared to the cost of performing a network + restore. +
    • REPLAY_FREE_DISK_PERCENT - The target amount of free + disk space to maintain when selecting log files to retain for use in + replay. +
    +

    +[#22575] (6.0.5) +


  16. + +
  17. +An improvement was made to the calculation of log utilization to avoid +under-cleaning or over-cleaning. For example, when log utilization was +estimated to be lower than actual utilization, unnecessary over-cleaning would +occur, which could reduce performance. Or when log utilization was estimated +to be higher than actual utilization, under-cleaning would prevent reclaiming +unused disk space. +

    +To prevent these problems, the size of each logged record is now stored in the +Btree BINs (bottom internal nodes), so that utilization can be calculated +correctly during record updates and deletions, while still avoiding a fetch of +the old version of the record. With this change, the utilization adjustment +facility in the log cleaner, which attempted to compensate for this problem by +estimating utilization, is no longer needed by most applications. +

    +Therefore the EnvironmentConfig.CLEANER_ADJUST_UTILIZATION parameter is now +false by default rather than true, and will be disabled completely in a future +version of JE. For more information, see the javadoc for this parameter. +

    +[#22275] (6.0.7) +


  18. + +
  19. +The helper hosts parameter used in JE HA replication is now +mutable. Accordingly, the set/getHelperHosts() methods and the +HELPER_HOST definition in com.sleepycat.je.rep.ReplicationConfig have +been moved to their parent class, ReplicationMutableConfig. The change +is fully link and source compatible. +[#22753] (6.0.7) +

  20. + +
  21. +Improved the performance of eviction by removing a bottleneck that was causing +thread contention. Previously, for workloads with heavy eviction, threads were +often waiting on a mutex in the TargetSelector.selectIN method. This impacted +not only JE's dedicated background threads, but also application threads that +were participating in critical eviction. A new approach is used that +dramatically reduces thread contention and increases performance (compared to +JE 5 and earlier) for such workloads. +

    +In addition, the new eviction approach implements a more accurate LRU which +ensures that dirty nodes are evicted last and thereby reduces unnecessary +logging. +

    +As part of this change, the following configuration parameters were deprecated +and are ignored by JE: +

    +    EnvironmentConfig.EVICTOR_NODES_PER_SCAN
    +    EnvironmentConfig.EVICTOR_LRU_ONLY
    +
    +And the following configuration parameter was added: +
    +    EnvironmentConfig.EVICTOR_N_LRU_LISTS
    +
    +[#23063] (6.0.7) +

  22. + +
  23. +A change was made involving the charset for internal text (messages) that +appear in the JE log (.jdb files). Previously, the default JVM charset was +used. When dumping the log with DbPrintLog (e.g., for debugging purposes), +if the default JVM charset was different than the one at the time the log was +written, the text messages would be garbled. For example, this occurred when +the log was written with an EBCDIC charset and then dumped with a UTF8 charset. +This has been fixed by always writing and reading text in the UTF8 charset. +[#15296] (6.0.8) +

  24. + +
  25. +A new HA configuration parameter: +com.sleepycat.je.rep.ReplicationConfig.BIND_INADDR_ANY was added. This +parameter permits binding of the port used by HA to all the local interfaces on +the host. The javadoc associated with this configuration parameter provides +further details. +[#23437] (6.0.9) +

  26. + +
  27. +Fix a bug that could under rare conditions, primarily frequent failovers, cause +the following exception in an HA environment. +
    +Caused by: com.sleepycat.je.EnvironmentFailureException: (JE 5.0.97)
    +node2(2):foo\node2 Read invisible log entry at 0x0/0xcb776
    +hdr type="INS_LN_TX/8" vlsn v="19,373" isReplicated="1" isInvisible="1"
    +prev="0xcb74c" size="17" cksum="2626620732"
    +LOG_INTEGRITY: Log information is incorrect, problem is likely persistent.
    +fetchTarget of 0x0/0xcb776 parent IN=29 IN class=com.sleepycat.je.tree.BIN
    +lastFullVersion=0x0/0xf154c lastLoggedVersion=0x0/0xf588e
    +parent.getDirty()=true state=3
    +at com.sleepycat.je.log.LogManager.getLogEntryFromLogSource(LogManager.java:1054)
    +at com.sleepycat.je.log.LogManager.getLogEntry(LogManager.java:906)
    +at com.sleepycat.je.log.LogManager.getLogEntryAllowInvisibleAtRecovery(LogManager.java:867)
    +at com.sleepycat.je.tree.IN.fetchTarget(IN.java:1427)
    +at com.sleepycat.je.tree.BIN.fetchTarget(BIN.java:1250)
    +at com.sleepycat.je.recovery.RecoveryManager.undo(RecoveryManager.java:2415)
    +at com.sleepycat.je.recovery.RecoveryManager.rollbackUndo(RecoveryManager.java:2268)
    +...
    +
    +[#22848] (6.0.10) +

  28. + +
  29. +EntityStore.close has been changed to fix a bug that caused a memory leak when +the Database could not be closed, for example, if it had open cursors. The +javadoc for this method was also updated to warn that it must be called to +avoid memory leaks, even when the Environment is invalid. +[#23462] (6.0.10) +

  30. + +
+ + + diff --git a/docs/collections/tutorial/BasicProgram.html b/docs/collections/tutorial/BasicProgram.html new file mode 100644 index 0000000..19b84e9 --- /dev/null +++ b/docs/collections/tutorial/BasicProgram.html @@ -0,0 +1,454 @@ + + + + + + Chapter 2.  The Basic Program + + + + + + + + + +
+
+
+
+

Chapter 2.  + The Basic Program +

+
+
+
+ +

+ The Basic example is a minimal implementation of the shipment + program. It writes and reads the part, supplier and shipment + databases. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Defining Serialized Key and Value Classes +

+
+
+
+

+ The key and value classes for each type of shipment record — + Parts, Suppliers and Shipments — are defined as ordinary Java + classes. In this example the serialized form of the key and value + objects is stored directly in the database. Therefore these classes + must implement the standard Java java.io.Serializable interface. A + compact form of Java serialization is used that does not duplicate + the class description in each record. Instead the class + descriptions are stored in the class catalog store, which is + described in the next section. But in all other respects, standard + Java serialization is used. +

+

+ An important point is that instances of these classes are passed + and returned by value, not by reference, when they are stored and + retrieved from the database. This means that changing a key or + value object does not automatically change the database. The object + must be explicitly stored in the database after changing it. To + emphasize this point the key and value classes defined here have no + field setter methods. Setter methods can be defined, but it is + important to remember that calling a setter method will not cause + the change to be stored in the database. How to store and retrieve + objects in the database will be described later. +

+

+ Each key and value class contains a toString method that is used + to output the contents of the object in the example program. This + is meant for illustration only and is not required for database + objects in general. +

+

+ Notice that the key and value classes defined below do not + contain any references to com.sleepycat packages. An + important characteristic of these classes is that they are + independent of the database. Therefore, they may be easily used in + other contexts and may be defined in a way that is compatible with + other tools and libraries. +

+

+ The PartKey class contains only the Part's Number field. +

+

+ Note that PartKey (as well as SupplierKey below) + contain only a single String field. Instead of defining a specific + class for each type of key, the String class by itself could have + been used. Specific key classes were used to illustrate strong + typing and for consistency in the example. The use of a plain + String as an index key is illustrated in the next example program. + It is up to the developer to use either primitive Java classes such + as String and Integer, or strongly typed classes. When + there is the possibility that fields will be added later to a key + or value, a specific class should be used. + +

+ +
import java.io.Serializable;
+
+public class PartKey implements Serializable
+{
+    private String number;
+
+    public PartKey(String number) {
+        this.number = number;
+    }
+
+    public final String getNumber() {
+        return number;
+    }
+
+    public String toString() {
+        return "[PartKey: number=" + number + ']';
+    }
+} 
+

+ The PartData class contains the Part's Name, Color, + Weight and City fields. +

+ +
import java.io.Serializable;
+
+public class PartData implements Serializable
+{
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public PartData(String name, String color, Weight weight, String city)
+    {
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "[PartData: name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + ']';
+    }
+} 
+

+ The Weight class is also defined here, and is used as the + type of the Part's Weight field. Just as in standard Java + serialization, nothing special is needed to store nested objects as + long as they are all Serializable. +

+ +
import java.io.Serializable;
+
+public class Weight implements Serializable
+{
+    public final static String GRAMS = "grams";
+    public final static String OUNCES = "ounces";
+
+    private double amount;
+    private String units;
+
+    public Weight(double amount, String units)
+    {
+        this.amount = amount;
+        this.units = units;
+    }
+
+    public final double getAmount()
+    {
+        return amount;
+    }
+
+    public final String getUnits()
+    {
+        return units;
+    }
+
+    public String toString()
+    {
+        return "[" + amount + ' ' + units + ']';
+    }
+} 
+

+ The SupplierKey class contains the Supplier's Number + field. +

+ +
import java.io.Serializable;
+
+public class SupplierKey implements Serializable
+{
+    private String number;
+
+    public SupplierKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public String toString()
+    {
+        return "[SupplierKey: number=" + number + ']';
+    }
+} 
+

+ The SupplierData class contains the Supplier's Name, + Status and City fields. +

+ +
import java.io.Serializable;
+
+public class SupplierData implements Serializable
+{
+    private String name;
+    private int status;
+    private String city;
+
+    public SupplierData(String name, int status, String city)
+    {
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "[SupplierData: name=" + name +
+               " status=" + status +
+               " city=" + city + ']';
+    }
+}
+	
+

+ The ShipmentKey class contains the keys of both the Part + and Supplier. +

+ +
import java.io.Serializable;
+
+public class ShipmentKey implements Serializable
+{
+    private String partNumber;
+    private String supplierNumber;
+
+    public ShipmentKey(String partNumber, String supplierNumber)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    }
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public String toString()
+    {
+        return "[ShipmentKey: supplier=" + supplierNumber +
+                " part=" + partNumber + ']';
+    }
+} 
+

+ The ShipmentData class contains only the Shipment's + Quantity field. Like PartKey and SupplierKey, + ShipmentData contains only a single primitive field. + Therefore the Integer class could have been used instead of + defining a specific value class. +

+ +
import java.io.Serializable;
+
+public class ShipmentData implements Serializable
+{
+    private int quantity;
+
+    public ShipmentData(int quantity)
+    {
+        this.quantity = quantity;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "[ShipmentData: quantity=" + quantity + ']';
+    }
+} 
+
+
+ + + diff --git a/docs/collections/tutorial/BerkeleyDB-JE-Collections.pdf b/docs/collections/tutorial/BerkeleyDB-JE-Collections.pdf new file mode 100644 index 0000000..63e93df Binary files /dev/null and b/docs/collections/tutorial/BerkeleyDB-JE-Collections.pdf differ diff --git a/docs/collections/tutorial/Entity.html b/docs/collections/tutorial/Entity.html new file mode 100644 index 0000000..bd449e8 --- /dev/null +++ b/docs/collections/tutorial/Entity.html @@ -0,0 +1,356 @@ + + + + + + Chapter 4.  Using Entity Classes + + + + + + + + + +
+
+
+
+

Chapter 4.  + Using Entity Classes +

+
+
+
+ +

+ In the prior examples, the keys and values of each store were + represented using separate classes. For example, a PartKey + and a PartData class were used. Many times it is desirable + to have a single class representing both the key and the value, for + example, a Part class. +

+

+ Such a combined key and value class is called an entity + class and is used along with an entity binding. Entity + bindings combine a key and a value into an entity when reading a + record from a collection, and split an entity into a key and a + value when writing a record to a collection. Entity bindings are + used in place of value bindings, and entity objects are used with + collections in place of value objects. +

+

+ Some reasons for using entities are: +

+
+
    +
  • +

    + When the key is a property of an entity object representing the + record as a whole, the object's identity and concept are often + clearer than with key and value objects that are disjoint. +

    +
  • +
  • +

    + A single entity object per record is often more convenient to + use than two objects. +

    +
  • +
+
+

+ Of course, instead of using an entity binding, you could simply + create the entity yourself after reading the key and value from a + collection, and split the entity into a key and value yourself + before writing it to a collection. But this would detract from the + convenience of the using the Java collections API. It is convenient + to obtain a Part object directly from + Map.get + + and to add a Part object using + Set.add. + Collections having entity bindings can be used naturally without + combining and splitting objects each time a collection method is + called; however, an entity binding class must be defined by the + application. +

+

+ In addition to showing how to use entity bindings, this example + illustrates a key feature of all bindings: Bindings are independent + of database storage parameters and formats. Compare this example to + the prior Index example and you'll see that the Sample and + SampleViews classes have been changed to use entity + bindings, but the SampleDatabase class was not changed at + all. In fact, the Entity program and the Index program can be used + interchangeably to access the same physical database files. This + demonstrates that bindings are only a "view" onto the physical + stored data. +

+

+ Warning: When using multiple bindings for the same + database, it is the application's responsibility to ensure that the + same format is used for all bindings. For example, a serial binding + and a tuple binding cannot be used to access the same records. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Defining Entity Classes +

+
+
+
+

+ As described in the prior section, entity classes are + combined key/value classes that are managed by entity bindings. In + this example the Part, Supplier and Shipment + classes are entity classes. These classes contain fields that are a + union of the fields of the key and value classes that were defined + earlier for each store. +

+

+ In general, entity classes may be defined in any way desired by + the application. The entity binding, which is also defined by the + application, is responsible for mapping between key/value objects + and entity objects. +

+

+ The Part, Supplier and Shipment + entity classes are + defined below. +

+

+ An important difference between the entity classes defined here + and the key and value classes defined earlier is that the entity + classes are not serializable (do not implement the + Serializable + + interface). This is because the entity classes are not directly + stored. The entity binding decomposes an entity object into key and + value objects, and only the key and value objects are serialized + for storage. +

+

+ One advantage of using entities can already be seen in the + toString() method of the classes below. These return debugging + output for the combined key and value, and will be used later to + create a listing of the database that is more readable than in the + prior examples. +

+ +
public class Part
+{
+    private String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + '.';
+    }
+} 
+ +
public class Supplier
+{
+    private String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + '.';
+    }
+}  
+ +
public class Shipment
+{
+    private String partNumber;
+    private String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + '.';
+    }
+}  
+
+
+ + + diff --git a/docs/collections/tutorial/SerializableEntity.html b/docs/collections/tutorial/SerializableEntity.html new file mode 100644 index 0000000..c333cda --- /dev/null +++ b/docs/collections/tutorial/SerializableEntity.html @@ -0,0 +1,345 @@ + + + + + + Chapter 6.  Using Serializable Entities + + + + + + + + + +
+
+
+
+

Chapter 6.  + Using Serializable Entities +

+
+
+
+ +

+ In the prior examples that used entities (the Entity and Tuple examples) you + may have noticed the redundancy between the serializable value + classes and the entity classes. An entity class by definition + contains all properties of the value class as well as all + properties of the key class. +

+

+ When using serializable values it is possible to remove this + redundancy by changing the entity class in two ways: +

+
+
    +
  • +

    + Make the entity class serializable, so it can be used in place + of the value class. +

    +
  • +
  • +

    + Make the key fields transient, so they are not redundantly + stored in the record. +

    +
  • +
+
+

+ The modified entity class can then serve double-duty: It can be + serialized and stored as the record value, and it can be used as + the entity class as usual along with the Java collections API. The + PartData, SupplierData and ShipmentData + classes can then be removed. +

+

+ Transient fields are defined in Java as fields that are not + stored in the serialized form of an object. Therefore, when an + object is deserialized the transient fields must be explicitly + initialized. Since the entity binding is responsible for creating + entity objects, it is the natural place to initialize the transient + key fields. +

+

+ Note that it is not strictly necessary to make the key fields of + a serializable entity class transient. If this is not done, the key + will simply be stored redundantly in the record's value. This extra + storage may or may not be acceptable to an application. But since + we are using tuple keys and an entity binding class must be + implemented anyway to extract the key from the entity, it is + sensible to use transient key fields to reduce the record size. Of + course there may be a reason that transient fields are not desired; + for example, if an application wants to serialize the entity + objects for other purposes, then using transient fields should be + avoided. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Using Transient Fields in an Entity Class +

+
+
+
+

+ The entity classes in this example are redefined such that they + can be used both as serializable value classes and as entity + classes. Compared to the prior example there are three changes to + the Part, Supplier and Shipment entity + classes: +

+
+
    +
  • +

    + Each class now implements the Serializable + interface. +

    +
  • +
  • +

    + The key fields in each class are declared as transient. +

    +
  • +
  • +

    + A package-private setKey() method is added to each class + for initializing the transient key fields. This method will be + called from the entity bindings. +

    +
  • +
+
+ +
import java.io.Serializable;
+...
+public class Part implements Serializable
+{
+    private transient String number;
+    private String name;
+    private String color;
+    private Weight weight;
+    private String city;
+
+    public Part(String number, String name, String color, Weight weight,
+                String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.color = color;
+        this.weight = weight;
+        this.city = city;
+    }
+
+    final void setKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final String getColor()
+    {
+        return color;
+    }
+
+    public final Weight getWeight()
+    {
+        return weight;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Part: number=" + number +
+               " name=" + name +
+               " color=" + color +
+               " weight=" + weight +
+               " city=" + city + '.';
+    }
+}
+...
+public class Supplier implements Serializable
+{
+    private transient String number;
+    private String name;
+    private int status;
+    private String city;
+
+    public Supplier(String number, String name, int status, String city)
+    {
+        this.number = number;
+        this.name = name;
+        this.status = status;
+        this.city = city;
+    }
+
+    void setKey(String number)
+    {
+        this.number = number;
+    }
+
+    public final String getNumber()
+    {
+        return number;
+    }
+
+    public final String getName()
+    {
+        return name;
+    }
+
+    public final int getStatus()
+    {
+        return status;
+    }
+
+    public final String getCity()
+    {
+        return city;
+    }
+
+    public String toString()
+    {
+        return "Supplier: number=" + number +
+               " name=" + name +
+               " status=" + status +
+               " city=" + city + '.';
+    }
+}
+...
+public class Shipment implements Serializable
+{
+    private transient String partNumber;
+    private transient String supplierNumber;
+    private int quantity;
+
+    public Shipment(String partNumber, String supplierNumber, int quantity)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+        this.quantity = quantity;
+    }
+
+    void setKey(String partNumber, String supplierNumber)
+    {
+        this.partNumber = partNumber;
+        this.supplierNumber = supplierNumber;
+    } 
+
+    public final String getPartNumber()
+    {
+        return partNumber;
+    }
+
+    public final String getSupplierNumber()
+    {
+        return supplierNumber;
+    }
+
+    public final int getQuantity()
+    {
+        return quantity;
+    }
+
+    public String toString()
+    {
+        return "Shipment: part=" + partNumber +
+                " supplier=" + supplierNumber +
+                " quantity=" + quantity + '.';
+    }
+}
+	
+
+
+ + + diff --git a/docs/collections/tutorial/SerializedObjectStorage.html b/docs/collections/tutorial/SerializedObjectStorage.html new file mode 100644 index 0000000..423706f --- /dev/null +++ b/docs/collections/tutorial/SerializedObjectStorage.html @@ -0,0 +1,85 @@ + + + + + + Serialized Object Storage + + + + + + + + +
+
+
+
+

+ Serialized Object Storage +

+
+
+
+

+ Serialization of an object graph includes class information as + well as instance information. If more than one instance of the same + class is serialized as separate serialization operations then the + class information exists more than once. To eliminate this + inefficiency the + StoredClassCatalog + + class will store the class format for all database records stored + using a + SerialBinding. + Refer to the + ship sample code for examples (the class + SampleDatabase in + + <INSTALL_DIR>/examples/collections/ship/basic/SampleDatabase.java + is a good place to start). +

+
+ + + diff --git a/docs/collections/tutorial/Summary.html b/docs/collections/tutorial/Summary.html new file mode 100644 index 0000000..4378b31 --- /dev/null +++ b/docs/collections/tutorial/Summary.html @@ -0,0 +1,191 @@ + + + + + + Chapter 7.  Summary + + + + + + + + + +
+
+
+
+

Chapter 7.  + Summary +

+
+
+
+

+ In summary, the JE JE Collections API tutorial has + demonstrated how to create different types of bindings, as well as + how to use the basic facilities of the JE JE Collections API: + the environment, databases, secondary indices, collections, and + transactions. The final approach illustrated by the last example + program, Serializable Entity, uses tuple keys and serial entity + values. Hopefully it is clear that any type of object-to-data + binding may be implemented by an application and used along with + standard Java collections. +

+

+ The following table summarizes the differences between the + examples in the tutorial. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ExampleKeyValueEntityComments
+ + The Basic Program + + SerialSerialNoThe shipment program
+ + Using Secondary Indices and Foreign keys + + SerialSerialNoSecondary indices and foreign keys
+ + Using Entity Classes + + SerialSerialYesCombining the key and value in a single object
+ + Using Tuples + + TupleSerialYesCompact ordered keys
+ + Using Serializable Entities + + TupleSerialYesOne serializable class for entities and values
+
+

+ Having completed this tutorial, you may want to explore how other types of + bindings can be implemented. The bindings shown in this tutorial + are all external bindings, meaning that the data classes + themselves contain none of the binding implementation. It is also + possible to implement internal bindings, where the data + classes implement the binding. +

+

+ Internal bindings are called marshalled bindings in the + JE JE Collections API, and in this model each data class + implements a marshalling interface. A single external binding class + that understands the marshalling interface is used to call the + internal bindings of each data object, and therefore the overall + model and API is unchanged. To learn about marshalled bindings, see + the + + + + marshal and factory examples that + came with your JE distribution (you can find them in + <INSTALL_DIR>/examples/collections/ship + + where <INSTALL_DIR> is the location where you + unpacked your JE distribution). + + + These examples continue building on + the example programs used in the tutorial. The Marshal program is + the next program following the Serializable Entity program, and the + Factory program follows the Marshal program. The source code + comments in these examples explain their differences. +

+
+ + + diff --git a/docs/collections/tutorial/Tuple.html b/docs/collections/tutorial/Tuple.html new file mode 100644 index 0000000..57b81d2 --- /dev/null +++ b/docs/collections/tutorial/Tuple.html @@ -0,0 +1,209 @@ + + + + + + Chapter 5.  Using Tuples + + + + + + + + + +
+
+
+
+

Chapter 5.  + Using Tuples +

+
+
+
+ +

+ JE JE Collections API tuples are sequences of + primitive Java data types, for example, integers and strings. The + tuple format is a binary format for tuples that can be used + to store keys and/or values. +

+

+ Tuples are useful as keys because they have a meaningful sort + order, while serialized objects do not. This is because the binary + data for a tuple is written in such a way that its raw byte + ordering provides a useful sort order. For example, strings in + tuples are written with a null terminator rather than with a + leading length. +

+

+ Tuples are useful as keys or values when reducing the + record size to a minimum is important. A tuple is significantly + smaller than an equivalent serialized object. However, unlike + serialized objects, tuples cannot contain complex data types and + are not easily extended except by adding fields at the end of the + tuple. +

+

+ Whenever a tuple format is used, except when the key or value + class is a Java primitive wrapper class, a tuple binding class must + be implemented to map between the Java object and the tuple fields. + Because of this extra requirement, and because tuples are not + easily extended, a useful technique shown in this example is to use + tuples for keys and serialized objects for values. This provides + compact ordered keys but still allows arbitrary Java objects as + values, and avoids implementing a tuple binding for each value + class. +

+

+ Compare this example to the prior Entity example and you'll see + that the Sample class has not changed. When changing a + database format, while new bindings are needed to map key and value + objects to the new format, the application using the objects often + does not need to be modified. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Using the Tuple Format +

+
+
+
+

+ Tuples are sequences of primitive Java values that can be + written to, and read from, the raw data bytes of a stored record. + The primitive values are written or read one at a time in sequence, + using the JE JE Collections API + TupleInput + + and + TupleOutput + + classes. These classes are very similar to the standard Java + DataInput + + and + DataOutput + + interfaces. The primary difference is the binary format of the + data, which is designed for sorting in the case of tuples. +

+

+ For example, to read and write a tuple containing two string + values, the following code snippets could be used. +

+ +
import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+TupleInput input;
+TupleOutput output;
+...
+String partNumber = input.readString();
+String supplierNumber = input.readString();
+...
+output.writeString(partNumber);
+output.writeString(supplierNumber);  
+

+ Since a tuple is defined as an ordered sequence, reading and + writing order must match. If the wrong data type is read (an + integer instead of string, for example), an exception may be thrown + or at minimum invalid data will be read. +

+

+ When the tuple format is used, bindings and key creators must + read and write tuples using the tuple API as shown above. This will + be illustrated in the next two sections. +

+
+
+ + + diff --git a/docs/collections/tutorial/UsingCollectionsAPI.html b/docs/collections/tutorial/UsingCollectionsAPI.html new file mode 100644 index 0000000..20ea3b6 --- /dev/null +++ b/docs/collections/tutorial/UsingCollectionsAPI.html @@ -0,0 +1,358 @@ + + + + + + Using the JE JE Collections API + + + + + + + + + +
+
+
+
+

+ Using the JE JE Collections API +

+
+
+
+ +

+ An + Environment + + + manages the resources for one or more data stores. A + Database + + + object + represents a single database and is created via a method on the + environment object. + SecondaryDatabase + + + objects represent an index associated with a primary database. + + + + Primary and secondary databases are then used to create stored + collection objects, as described in + + Using Stored Collections + . +

+
+
+
+
+

+ Using Transactions +

+
+
+
+

+ Once you have an environment, one or more databases, and one or + more stored collections, you are ready to access (read and write) + stored data. For a transactional environment, a transaction must be + started before accessing data, and must be committed or aborted + after access is complete. The JE JE Collections API provides several + ways of managing transactions. +

+

+ The recommended technique is to use the + TransactionRunner + + class along with your own implementation of the + TransactionWorker + + interface. + TransactionRunner + + will call your + TransactionWorker + + implementation class to perform the data access or work of the + transaction. This technique has the following benefits: +

+
+
    +
  • +

    + Transaction exceptions will be handled transparently and + retries will be performed when deadlocks are detected. +

    +
  • +
  • +

    + The transaction will automatically be committed if your + TransactionWorker.doWork() + + method returns normally, or will be + aborted if doWork() throws an exception. +

    +
  • +
  • +

    + TransactionRunner can be used for non-transactional + environments as well, allowing you to write your application + independently of the environment. +

    +
  • +
+
+

+ If you don't want to use + TransactionRunner, + the alternative is to use the + CurrentTransaction + + class. +

+
+
    +
  1. +

    + Obtain a CurrentTransaction instance by calling the + CurrentTransaction.getInstance + + method. The instance returned + can be used by all threads in a program. +

    +
  2. +
  3. +

    + Use + CurrentTransaction.beginTransaction(), + CurrentTransaction.commitTransaction() + + and + CurrentTransaction.abortTransaction() + + to directly begin, commit and abort transactions. +

    +
  4. +
+
+

+ If you choose to use CurrentTransaction directly you must handle + the + LockConflictException + + + + exception and perform retries yourself. Also note that + CurrentTransaction may only be used in a transactional + environment. +

+

+ The JE JE Collections API supports transaction auto-commit. + If no transaction is active and a write operation is requested for + a transactional database, auto-commit is used automatically. +

+

+ The JE JE Collections API also supports transaction + dirty-read via the + StoredCollections + + class. When dirty-read is enabled for a collection, data will be + read that has been modified by another transaction but not + committed. Using dirty-read can improve concurrency since reading + will not wait for other transactions to complete. For a + non-transactional container, dirty-read has no effect. See + StoredCollections + + for how to create a dirty-read collection. +

+
+
+
+
+
+

+ Transaction Rollback +

+
+
+
+

+ When a transaction is aborted (or rolled back) the application + is responsible for discarding references to any data objects that + were modified during the transaction. Since the JE JE Collections API + treats data by value, not by reference, neither the data + objects nor the JE JE Collections API objects contain status + information indicating whether the data objects are 1- in sync with + the database, 2- dirty (contain changes that have not been written + to the database), 3- stale (were read previously but have become + out of sync with changes made to the database), or 4- contain + changes that cannot be committed because of an aborted + transaction. +

+

+ For example, a given data object will reflect the current state + of the database after reading it within a transaction. If the + object is then modified it will be out of sync with the database. + When the modified object is written to the database it will then be + in sync again. But if the transaction is aborted the object will + then be out of sync with the database. References to objects for aborted + transactions + should no longer be used. When these objects are needed later they + should be read fresh from the database. +

+

+ When an existing stored object is to be updated, special care + should be taken to read the data, then modify it, and then write it + to the database, all within a single transaction. If a stale data + object (an object that was read previously but has since been + changed in the database) is modified and then written to the + database, database changes may be overwritten unintentionally. +

+

+ When an application enforces rules about concurrent access to + specific data objects or all data objects, the rules described here + can be relaxed. For example, if the application knows that a + certain object is only modified in one place, it may be able to + reliably keep a current copy of that object. In that case, it is + not necessary to reread the object before updating it. That said, + if arbitrary concurrent access is to be supported, the safest + approach is to always read data before modifying it within a single + transaction. +

+

+ Similar concerns apply to using data that may have become stale. + If the application depends on current data, it should be read fresh + from the database just before it is used. +

+
+
+
+
+
+

+ Access Method Restrictions +

+
+
+
+

+ The BTREE access method is always used for JE Databases. Sorted + duplicates — more then one record for a single key — are + optional. +

+

+ The restrictions imposed by the access method on the database + model are: +

+
+
    +
  • +

    + If duplicates are allowed then more than one value may be + associated with the same key. This means that the data store cannot + be strictly considered a map — it is really a multi-map. See + + Using Stored Collections + + for implications on the use of the collection interfaces. +

    +
  • +
  • +

    + If duplicate keys are allowed for a data store then the data + store may not have secondary indices. +

    +
  • +
  • +

    + With sorted duplicates, all values for the same key must be + distinct. +

    +
  • +
+
+

+ See + + Using Stored Collections + + for more information on how access methods impact the use of stored + collections. +

+
+
+ + + diff --git a/docs/collections/tutorial/UsingSecondaries.html b/docs/collections/tutorial/UsingSecondaries.html new file mode 100644 index 0000000..55a9e3b --- /dev/null +++ b/docs/collections/tutorial/UsingSecondaries.html @@ -0,0 +1,447 @@ + + + + + + Chapter 3.  Using Secondary Indices and Foreign keys + + + + + + + + + +
+
+
+
+

Chapter 3.  + Using Secondary Indices and Foreign keys +

+
+
+
+ +

+ In the Basic example, each store has a single primary + key. The Index example extends the Basic example to add the use of + secondary keys and foreign keys. +

+

+ The complete source of the final version of the example program + is included in the Berkeley DB distribution. +

+
+
+
+
+

+ Opening Secondary Key Indices +

+
+
+
+

+ Secondary indices or secondary databases are used + to access a primary database by a key other than the primary key. + Recall that the Supplier Number field is the primary key of the + Supplier database. In this section, the Supplier City field will be + used as a secondary lookup key. Given a city value, we would like + to be able to find the Suppliers in that city. Note that more than + one Supplier may be in the same city. +

+

+ Both primary and secondary databases contain key-value records. + The key of an index record is the secondary key, and its value is + the key of the associated record in the primary database. When lookups by + secondary key are performed, the associated record in the primary + database is transparently retrieved by its primary key and returned + to the caller. +

+

+ Secondary indices are maintained automatically when index key + fields (the City field in this case) are added, modified or removed + in the records of the primary database. However, the application + must implement a + SecondaryKeyCreator + + + that extracts the index key from the database record. +

+

+ It is useful to contrast opening an secondary index with opening + a primary database (as described earlier in + + Opening and Closing Databases + . +

+
+
    +
  • +

    + A primary database may be associated with one or more secondary + indices. A secondary index is always associated with exactly one + primary database. +

    +
  • +
  • +

    + For a secondary index, a + SecondaryKeyCreator + + + must be implemented by the application to extract the index key + from the record of its associated primary database. +

    +
  • +
  • +

    + A primary database is represented by a + Database + + + object and a secondary index is represented by a + SecondaryDatabase + + + object. The + SecondaryDatabase + + + class extends the + Database + + + class. +

    +
  • +
  • +

    + When a + SecondaryDatabase + + + is created it is associated with a primary + Database + + + object and a + + SecondaryKeyCreator. + + +

    +
  • +
+
+

+ The SampleDatabase class is extended to open the + Supplier-by-City secondary key index. +

+ +
import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+...
+public class SampleDatabase
+{
+    ...
+    private static final String SUPPLIER_CITY_INDEX = 
+        "supplier_city_index";
+    ...
+    private SecondaryDatabase supplierByCityDb;
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+
+        secConfig.setKeyCreator(
+            new SupplierByCityKeyCreator(javaCatalog,
+                                         SupplierKey.class,
+                                         SupplierData.class,
+                                         String.class));
+
+        supplierByCityDb = env.openSecondaryDatabase(null, 
+                                                     SUPPLIER_CITY_INDEX,
+                                                     supplierDb,
+                                                     secConfig);
+    ...
+    }
+} 
+

+ A + SecondaryConfig + + + object is used to configure the secondary database. The + SecondaryConfig + + + class extends the + DatabaseConfig + + + class, and most steps for configuring a secondary database are the + same as for configuring a primary database. The main difference in + the example above is that the + SecondaryConfig.setSortedDuplicates() method is called to + allow duplicate index keys. This is how more than one Supplier may + be in the same City. If this property is not specified, the default is + that the index keys of all records must be unique. +

+

+ For a primary database, duplicate keys are not normally used + since a primary database with duplicate keys may not have any + associated secondary indices. If primary database keys are not + unique, there is no way for a secondary key to reference a specific + record in the primary database. +

+

+ Opening a secondary key index requires creating a + + SecondaryKeyCreator. + + + The SupplierByCityKeyCreator class implements the + SecondaryKeyCreator + + + interface and will be defined below. +

+

+ The + SecondaryDatabase + + + object is opened last. If you compare the + openSecondaryDatabase() and openDatabase() methods you'll + notice only two differences: +

+
+
    +
  • +

    + openSecondaryDatabase() has an extra parameter for + specifying the associated primary database. The primary database is + supplierDb in this case. +

    +
  • +
  • +

    + The last parameter of openSecondaryDatabase() is a + SecondaryConfig instead of a DatabaseConfig. +

    +
  • +
+
+

+ How to use the secondary index to access records will be shown + in a later section. +

+

+ The application-defined SupplierByCityKeyCreator class is + shown below. It was used above to configure the secondary + database. +

+ +
public class SampleDatabase
+{
+...
+    private static class SupplierByCityKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                      Class primaryKeyClass,
+                                      Class valueClass,
+                                      Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            SupplierData supplierData = (SupplierData) valueInput;
+            return supplierData.getCity();
+        }
+    }
+...
+} 
+

+ In general, a key creator class must implement the + SecondaryKeyCreator + + + interface. This interface has methods that operate on the record + data as raw bytes. In practice, it is easiest to use an abstract + base class that performs the conversion of record data to and from + the format defined for the database's key and value. The base class + implements the + SecondaryKeyCreator + + + interface and has abstract methods that must be implemented in turn + by the application. +

+

+ In this example the + SerialSerialKeyCreator + + base class is used because the database record uses the serial + format for both its key and its value. The abstract methods of this + class have key and value parameters of type + Object + + which are automatically converted to and from the raw record data + by the base class. +

+

+ To perform the conversions properly, the key creator must be + aware of all three formats involved: the key format of the primary + database record, the value format of the primary database record, + and the key format of the index record. The + SerialSerialKeyCreator + + constructor is given the base classes for these three formats as + parameters. +

+

+ The SerialSerialKeyCreator.createSecondaryKey method is + given the key and value of the primary database record as + parameters, and it returns the key of the index record. In this + example, the index key is a field in the primary database record + value. Since the record value is known to be a SupplierData + object, it is cast to that class and the city field is + returned. +

+

+ Note that the primaryKeyInput parameter is not used in + the example. This parameter is needed only when an index key is + derived from the key of the primary database record. Normally an + index key is derived only from the primary database record value, + but it may be derived from the key, value or both. +

+

+ The following getter methods return the secondary database + object for use by other classes in the example program. The + secondary database object is used to create Java collections for + accessing records via their secondary keys. +

+ +
public class SampleDatabase
+{
+    ...
+    public final SecondaryDatabase getSupplierByCityDatabase()
+    {
+        return supplierByCityDb;
+    }
+    ...
+} 
+

+ The following statement closes the secondary database. +

+ +
public class SampleDatabase
+{
+    ...
+    public void close()
+        throws DatabaseException {
+
+        supplierByCityDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    }
+    ...
+} 
+

+ Secondary databases must be closed before closing their + associated primary database. +

+
+
+ + + diff --git a/docs/collections/tutorial/UsingStoredCollections.html b/docs/collections/tutorial/UsingStoredCollections.html new file mode 100644 index 0000000..8d20fa1 --- /dev/null +++ b/docs/collections/tutorial/UsingStoredCollections.html @@ -0,0 +1,661 @@ + + + + + + Using Stored Collections + + + + + + + + + +
+
+
+
+

+ Using Stored Collections +

+
+
+
+ +

+ When a stored collection is created it is based on either a + Database + + + or a + + SecondaryDatabase. + + + When a database is used, the primary key of the database is used as + the collection key. When a secondary database is used, the index + key is used as the collection key. Indexed collections can be used + for reading elements and removing elements but not for adding or + updating elements. +

+
+
+
+
+

+ Stored Collection and Access Methods +

+
+
+
+

+ The use of stored collections is constrained in certain respects as + described below. + +

+
+ +
+
+
+
+
+
+

+ Stored Collections Versus Standard Java Collections +

+
+
+
+

+ Stored collections have the following differences with the + standard Java collection interfaces. Some of these are interface + contract violations. +

+

+ The Java collections interface does not support duplicate keys + (multi-maps or multi-sets). When the access method allows duplicate + keys, the collection interfaces are defined as follows. +

+
+
    +
  • +

    + Map.entrySet() + + may contain multiple + Map.Entry + + objects with the same key. +

    +
  • +
  • +

    + Map.keySet() + + always contains unique keys, it does not contain duplicates. +

    +
  • +
  • +

    + Map.values() + + contains all values including the values + associated with duplicate keys. +

    +
  • +
  • +

    + Map.put() + + appends a duplicate if the key already exists rather than replacing + the existing value, and always returns null. +

    +
  • +
  • +

    + Map.remove() + + removes all duplicates for the specified key. +

    +
  • +
  • +

    + Map.get() + + returns the first duplicate for the specified key. +

    +
  • +
  • +

    + StoredSortedMap.duplicates() + + is an additional method for returning the values for a given key as a + Collection. +

    +
  • +
+
+

+ Other differences are: +

+
+
    +
  • +

    + Collection.size() and Map.size() always throws + UnsupportedOperationException. + This is because the number of + records in a database cannot be determined reliably or + cheaply. +

    +
  • +
  • +

    + Because the size() method cannot be used, the bulk operation + methods of standard Java collections cannot be passed stored + collections as parameters, since the implementations rely on + size(). However, the bulk operation methods of stored collections + can be passed standard Java collections as parameters. + storedCollection.addAll(standardCollection) is allowed + while standardCollection.addAll(storedCollection) is + not allowed. This restriction applies to the standard + collection constructors that take a Collection parameter (copy + constructors), the Map.putAll() method, and the following + Collection methods: addAll(), containsAll(), removeAll() and + retainAll(). +

    +
  • +
  • +

    + Comparator + + objects cannot be used and the + SortedMap.comparator() + + and + SortedSet.comparator() + + methods always return null. The + Comparable + + interface is not supported. However, Comparators that operate on + byte arrays may be specified using + + DatabaseConfig.setBtreeComparator. + + +

    +
  • +
  • +

    + The + Object.equals() + + method is not used to determine whether a key + or value is contained in a collection, to locate a value by key, + etc. Instead the byte array representation of the keys and values + are used. However, the equals() method is called for each + key and value when comparing two collections for equality. It is + the responsibility of the application to make sure that the + equals() method returns true if and only if the byte array + representations of the two objects are equal. Normally this occurs + naturally since the byte array representation is derived from the + object's fields. +

    +
  • +
+
+
+
+
+
+
+

+ Other Stored Collection Characteristics +

+
+
+
+

+ The following characteristics of stored collections are + extensions of the definitions in the + java.util + + package. These differences do not violate the Java + collections interface contract. +

+
+
    +
  • +

    + All stored collections are thread safe (can be used by multiple + threads concurrently). + + Locking is handled by the Berkeley DB Java Edition + environment. To access a collection from multiple threads, creation + of synchronized collections using the + Collections + + class is not necessary. + + Iterators, however, should always be used only by a single thread. +

    +
  • +
  • +

    + All stored collections may be read-only if desired by passing + false for the writeAllowed parameter of their constructor. Creation + of immutable collections using the + Collections + + class is not necessary. +

    +
  • +
  • +

    + A stored collection is partially read-only if a secondary + index is used. Specifically, values may be removed but may not be + added or updated. The following methods will throw + UnsupportedOperationException + + when an index is used: + Collection.add(), + + ListIterator.set() + + and + Map.Entry.setValue(). +

    +
  • +
  • +

    + SortedMap.entrySet() + + and + SortedMap.keySet() + + return a + SortedSet, + not just a + Set + + as specified in Java collections interface. This allows using the + SortedSet + + methods on the returned collection. +

    +
  • +
  • +

    + SortedMap.values() + + returns a + SortedSet, + not just a + Collection, + whenever the keys of the map can be derived from the values using + an entity binding. Note that the sorted set returned is not really + a set if duplicates are allowed, since it is technically a + collection; however, the + SortedSet + + methods (for example, subSet()), can still be used. +

    +
  • +
  • +

    + For + SortedSet + + and + SortedMap + + views, additional subSet() and subMap() methods are provided that + allow control over whether keys are treated as inclusive or + exclusive values in the key range. +

    +
  • +
  • +

    + Keys and values are stored by value, not by reference. This is + because objects that are added to collections are converted to byte + arrays (by bindings) and stored in the database. When they are + retrieved from the collection they are read from the database and + converted from byte arrays to objects. Therefore, the object + reference added to a collection will not be the same as the + reference later retrieved from the collection. +

    +
  • +
  • +

    + A runtime exception, + RuntimeExceptionWrapper, + is thrown whenever database exceptions occur which are not runtime + exceptions. The + RuntimeExceptionWrapper.getCause() + + method can be called to get the underlying exception. +

    +
  • +
  • +

    + All iterators for stored collections implement the + ListIterator + + interface as well as the + Iterator + + interface. This is to allow use of the + ListIterator.hasPrevious() + + and + ListIterator.previous() + + methods, which work for all collections + since Berkeley DB provides bidirectional cursors. +

    +
  • +
  • +

    + All stored collections have a + StoredCollection.iterator(boolean) + + method that allows creating + a read-only iterator for a writable collection. For the standard + Collection.iterator() + + method, the iterator is read-only only + when the collection is read-only. + + +

    +
  • +
  • +

    + Iterator stability for stored collections is greater than the + iterator stability defined by the Java collections interfaces. + Stored iterator stability is the same as the cursor stability + defined by Berkeley DB. +

    +
  • +
  • +

    + When an entity binding is used, updating (setting) a value is + not allowed if the key in the entity is not equal to the original + key. For example, calling + Map.put() + + is not allowed when the key parameter is not equal to the key of + the entity parameter. + Map.put(), + + ListIterator.set(), + and + Map.Entry.setValue() + + will throw + IllegalArgumentException + + in this situation. +

    +
  • +
  • +

    + The + + + + StoredSortedMap.append(java.lang.Object) + + extension method allows + adding a new record with an automatically assigned key. + + An application-defined + PrimaryKeyAssigner + + is used to assign the key value. +

    +
  • +
+
+
+
+
+
+
+

+ Why Java Collections for Berkeley DB Java Edition +

+
+
+
+

+ The Java collections interface was chosen as the best Java API + for JE given these requirements: +

+
+
    +
  1. +

    + Provide the Java developer with an API that is as familiar and + easy to use as possible. +

    +
  2. +
  3. +

    + Provide access to all, or a large majority, of the features of + the underlying Berkeley DB Java Edition storage system. +

    +
  4. +
  5. +

    + Compared to the JE API, provide a higher-level API + that is oriented toward Java developers. +

    +
  6. +
  7. +

    + For ease of use, support object-to-data bindings, per-thread + transactions, and some traditional database features such as + foreign keys. +

    +
  8. +
  9. +

    + Provide a thin layer that can be thoroughly tested and which + does not significantly impact the reliability and performance of + JE. +

    +
  10. +
+
+

+ Admittedly there are several things about the Java Collections + API that don't quite fit with JE or with any transactional + database, and therefore there are some new rules for applying the + Java Collections API. However, these disadvantages are considered + to be smaller than the disadvantages of the alternatives: +

+
+
    +
  • +

    + A new API not based on the Java Collections API could have been + designed that maps well to JE but is higher-level. + However, this would require designing an entirely new model. The + exceptions for using the Java Collections API are considered easier + to learn than a whole new model. A new model would also require a + long design stabilization period before being as complete and + understandable as either the Java Collections API or the JE + API. +

    +
  • +
  • +

    + The ODMG API or another object persistence API could have been + implemented on top of JE. However, an object persistence + implementation would add much code and require a long stabilization + period. And while it may work well for applications that require + object persistence, it would probably never perform well enough for + many other applications. +

    +
  • +
+
+
+
+ + + diff --git a/docs/collections/tutorial/addingdatabaseitems.html b/docs/collections/tutorial/addingdatabaseitems.html new file mode 100644 index 0000000..3b474a4 --- /dev/null +++ b/docs/collections/tutorial/addingdatabaseitems.html @@ -0,0 +1,229 @@ + + + + + + Adding Database Items + + + + + + + + + +
+
+
+
+

+ Adding Database Items +

+
+
+
+

+ Adding (as well as updating, removing, and deleting) information + in the database is accomplished via the standard Java collections + API. In the example, the + Map.put + + method is used to add objects. All standard Java methods for + modifying a collection may be used with the JE JE Collections API. +

+

+ The PopulateDatabase.doWork() method calls private methods + for adding objects to each of the three database stores. It is + called via the + TransactionRunner + + class and was outlined in the previous section. +

+ +
import java.util.Map;
+import com.sleepycat.collections.TransactionWorker;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PopulateDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            addSuppliers();
+            addParts();
+            addShipments();
+        }
+    }
+    ...
+
+    private void addSuppliers()
+    {
+    }
+
+    private void addParts()
+    {
+    }
+
+    private void addShipments()
+    {
+    }
+} 
+

+ The addSuppliers(), addParts() and addShipments() + methods add objects to the Suppliers, Parts and Shipments stores. + The + Map + + for each store is obtained from the SampleViews object. +

+ +
    private void addSuppliers()
+    {
+        Map suppliers = views.getSupplierMap();
+        if (suppliers.isEmpty())
+        {
+            System.out.println("Adding Suppliers");
+            suppliers.put(new SupplierKey("S1"),
+                          new SupplierData("Smith", 20, "London"));
+            suppliers.put(new SupplierKey("S2"),
+                          new SupplierData("Jones", 10, "Paris"));
+            suppliers.put(new SupplierKey("S3"),
+                          new SupplierData("Blake", 30, "Paris"));
+            suppliers.put(new SupplierKey("S4"),
+                          new SupplierData("Clark", 20, "London"));
+            suppliers.put(new SupplierKey("S5"),
+                          new SupplierData("Adams", 30, "Athens"));
+        }
+    }
+
+    private void addParts()
+    {
+        Map parts = views.getPartMap();
+        if (parts.isEmpty())
+        {
+            System.out.println("Adding Parts");
+            parts.put(new PartKey("P1"),
+                      new PartData("Nut", "Red",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P2"),
+                      new PartData("Bolt", "Green",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P3"),
+                      new PartData("Screw", "Blue",
+                                    new Weight(17.0, Weight.GRAMS),
+                                    "Rome"));
+            parts.put(new PartKey("P4"),
+                      new PartData("Screw", "Red",
+                                    new Weight(14.0, Weight.GRAMS),
+                                    "London"));
+            parts.put(new PartKey("P5"),
+                      new PartData("Cam", "Blue",
+                                    new Weight(12.0, Weight.GRAMS),
+                                    "Paris"));
+            parts.put(new PartKey("P6"),
+                      new PartData("Cog", "Red",
+                                    new Weight(19.0, Weight.GRAMS),
+                                    "London"));
+        }
+    }
+
+    private void addShipments()
+    {
+        Map shipments = views.getShipmentMap();
+        if (shipments.isEmpty())
+        {
+            System.out.println("Adding Shipments");
+            shipments.put(new ShipmentKey("P1", "S1"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P3", "S1"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P4", "S1"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P5", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P6", "S1"),
+                          new ShipmentData(100));
+            shipments.put(new ShipmentKey("P1", "S2"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P2", "S2"),
+                          new ShipmentData(400));
+            shipments.put(new ShipmentKey("P2", "S3"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P2", "S4"),
+                          new ShipmentData(200));
+            shipments.put(new ShipmentKey("P4", "S4"),
+                          new ShipmentData(300));
+            shipments.put(new ShipmentKey("P5", "S4"),
+                          new ShipmentData(400));
+        }
+    } 
+}
+

+ The key and value classes used above were defined in the + + Defining Serialized Key and Value Classes + . +

+

+ In each method above, objects are added only if the map is not + empty. This is a simple way of allowing the example program to be + run repeatedly. In real-life applications another technique — + checking the + Map.containsKey + + method, for example — might be used. +

+
+ + + diff --git a/docs/collections/tutorial/collectionOverview.html b/docs/collections/tutorial/collectionOverview.html new file mode 100644 index 0000000..73026e6 --- /dev/null +++ b/docs/collections/tutorial/collectionOverview.html @@ -0,0 +1,451 @@ + + + + + + Appendix A.  API Notes and Details + + + + + + + + + +
+
+
+
+

Appendix A.  + API Notes and Details +

+
+
+
+

+ This appendix contains information useful to the collections programmer + that is too detailed to easily fit into the format of a tutorial. + Specifically, this appendix contains the following information: +

+ +
+
+
+
+

+ Using Data Bindings +

+
+
+
+ +

+ Data bindings determine how keys and values are represented as + stored data (byte arrays) in the database, and how stored data is + converted to and from Java objects. +

+

+ The selection of data bindings is, in general, independent of + the selection of + + collection views. In other + words, any binding can be used with any + + collection. + +

+
+

Note

+

+ In this document, bindings are described in the + context of their use for stored data in a database. However, + bindings may also be used independently of a database to operate on + an arbitrary byte array. This allows using bindings when data is to + be written to a file or sent over a network, for example. +

+
+
+
+
+
+

+ Selecting Binding Formats +

+
+
+
+

+ For the key and value of each stored collection, you may select + one of the following types of bindings. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Binding FormatOrderedDescription
+ SerialBinding + + No + The data is stored using a compact form of Java serialization, + where the class descriptions are stored separately in a catalog + database. Arbitrary Java objects are supported. +
+ TupleBinding + + Yes + The data is stored using a series of fixed length primitive + values or zero terminated character arrays (strings). Class/type + evolution is not supported. +
Custom binding formatUser-defined + The data storage format and ordering is determined by the + custom binding implementation. +
+
+

+ As shown in the table above, the tuple format supports built-in ordering + (without specifying a custom comparator), while the serial format does + not. This means that when a specific key order is needed, tuples should + be used instead of serial data. Alternatively, a custom Btree comparator should be + specified using + DatabaseConfig.setBtreeComparator(). Note that + a custom Btree comparator will usually execute more slowly than the + default byte-by-byte comparison. This makes using tuples an attractive + option, since they provide ordering along with optimal performance. +

+

+ The tuple binding uses less space and executes faster than the + serial binding. But once a tuple is written to a database, the + order of fields in the tuple may not be changed and fields may not + be deleted. The only type evolution allowed is the addition of + fields at the end of the tuple, and this must be explicitly + supported by the custom binding implementation. +

+

+ The serial binding supports the full generality of Java + serialization including type evolution. But serialized data can + only be accessed by Java applications, its size is larger, and its + bindings are slower to execute. +

+
+
+
+
+
+

+ Selecting Data Bindings +

+
+
+
+

+ There are two types of binding interfaces. Simple entry bindings + implement the + EntryBinding + + interface and can be used for key or value objects. Entity bindings + implement the + EntityBinding + + interface and are used for combined key and value objects called + entities. +

+

+ Simple entry bindings map between the key or value data stored + by Berkeley DB and a key or value object. This is a simple + one-to-one mapping. +

+

+ Simple entry bindings are easy to implement and in some cases + require no coding. For example, a + SerialBinding + + can be used for keys or values without writing any additional + code. A tuple binding for a single-item tuple can also be used without + writing any code; see the + TupleBinding.getPrimitiveBinding + + method. +

+

+ Entity bindings must divide an entity object into its key and + value data, and then combine the key and value data to re-create + the entity object. This is a two-to-one mapping. +

+

+ Entity bindings are useful when a stored application object + naturally has its primary key as a property, which is very common. + For example, an Employee object would naturally have an + EmployeeNumber property (its primary key) and an entity binding + would then be needed. Of course, entity bindings are more complex + to implement, especially if their key and data formats are + different. +

+

+ Note that even when an entity binding is used a key binding is + also usually needed. For example, a key binding is used to create + key objects that are passed to the + Map.get() + + method. A key object is passed to this method even though it may + return an entity that also contains the key. +

+
+
+
+
+
+

+ Implementing Bindings +

+
+
+
+

+ There are two ways to implement bindings. The first way is to + create a binding class that implements one of the two binding + interfaces, + EntryBinding + + or + EntityBinding. + For tuple bindings and serial bindings there are a number of + abstract classes that make this easier. For example, you can extend + TupleBinding + + to implement a simple binding for a tuple key or value. Abstract + classes are also provided for entity bindings and are named after + the format names of the key and value. For example, you can extend + TupleSerialBinding + + to implement an entity binding with a tuple key and serial + value. +

+

+ Another way to implement bindings is with marshalling + interfaces. These are interfaces which perform the binding + operations and are implemented by the key, value or entity classes + themselves. With marshalling you use a binding which calls the + marshalling interface and you implement the marshalling interface + for each key, value or entity class. For example, you can use + TupleMarshalledBinding + + along with key or value classes that implement the + MarshalledTupleEntry + + interface. +

+
+
+
+
+
+

+ Using Bindings +

+
+
+
+

+ Bindings are specified whenever a stored collection is created. + A key binding must be specified for map, key set and entry set + views. A value binding or entity binding must be specified for map, + value set and entry set views. +

+

+ Any number of bindings may be created for the same stored data. + This allows multiple views over the same data. For example, a tuple + might be bound to an array of values or to a class with properties + for each object. +

+

+ It is important to be careful of bindings that only use a subset + of the stored data. This can be useful to simplify a view or to + hide information that should not be accessible. However, if you + write records using these bindings you may create stored data that + is invalid from the application's point of view. It is up to the + application to guard against this by creating a read-only + collection when such bindings are used. +

+
+
+
+
+
+

+ Secondary Key Creators +

+
+
+
+

+ Secondary Key Creators are needed whenever database indices are + used. For each secondary index + + (SecondaryDatabase) + + + a key creator is used to derive index key data from key/value data. + Key creators are objects whose classes implement the + SecondaryKeyCreator + + + interface. +

+

+ Like bindings, key creators may be implemented using a separate + key creator class or using a marshalling interface. Abstract key + creator classes and marshalling interfaces are provided in the + com.sleepycat.bind.tuple and com.sleepycat.bind.serial + packages. +

+

+ Unlike bindings, key creators fundamentally operate on key and + value data, not necessarily on the objects derived from the data by + bindings. In this sense key creators are a part of a database + definition, and may be independent of the various bindings that may + be used to view data in a database. However, key creators are not + prohibited from using higher level objects produced by bindings, + and doing so may be convenient for some applications. For example, + marshalling interfaces, which are defined for objects produced by + bindings, are a convenient way to define key creators. +

+
+
+
+ + + diff --git a/docs/collections/tutorial/collectionswithentities.html b/docs/collections/tutorial/collectionswithentities.html new file mode 100644 index 0000000..31c4238 --- /dev/null +++ b/docs/collections/tutorial/collectionswithentities.html @@ -0,0 +1,164 @@ + + + + + + Creating Collections with Entity Bindings + + + + + + + + + +
+
+
+
+

+ Creating Collections with Entity Bindings +

+
+
+
+

+ Stored map objects are created in this example in the same way + as in prior examples, but using entity bindings in place of value + bindings. All value objects passed and returned to the Java + collections API are then actually entity objects (Part, + Supplier and Shipment). The application no longer + deals directly with plain value objects (PartData, + SupplierData and ShipmentData). +

+

+ Since the partDataBinding, supplierDataBinding + and shipmentDataBinding were defined as entity bindings in + the prior section, there are no source code changes necessary for + creating the stored map objects. +

+ +
public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ...
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+                          partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+                          supplierKeyBinding, supplierDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+                          shipmentKeyBinding, shipmentDataBinding, true);
+      ...
+    } 
+

+ Specifying an + EntityBinding + + will select a different + StoredSortedMap + + constructor, but the syntax is the same. In general, an entity + binding may be used anywhere that a value binding is used. +

+

+ The following getter methods are defined for use by other + classes in the example program. Instead of returning the map's + entry set + (Map.entrySet), + the map's value set + (Map.values) + is returned. The entry set was convenient in prior examples because + it allowed enumerating all key/value pairs in the collection. Since + an entity contains the key and the value, enumerating the value set + can now be used more conveniently for the same purpose. +

+ +
import com.sleepycat.collections.StoredValueSet;
+...
+public class SampleViews
+{
+    ...
+    public StoredValueSet getPartSet()
+    {
+        return (StoredValueSet) partMap.values();
+    }
+
+    public StoredValueSet getSupplierSet()
+    {
+        return (StoredValueSet) supplierMap.values();
+    }
+
+    public StoredValueSet getShipmentSet()
+    {
+        return (StoredValueSet) shipmentMap.values();
+    }
+    ...
+} 
+

+ Notice that the collection returned by the + StoredSortedMap.values + + method is actually a + StoredValueSet + + and not just a + Collection + + as defined by the + Map.values + + interface. As long as duplicate keys are not allowed, this + collection will behave as a true set and will disallow the addition + of duplicates, etc. +

+
+ + + diff --git a/docs/collections/tutorial/createbindingscollections.html b/docs/collections/tutorial/createbindingscollections.html new file mode 100644 index 0000000..5224e83 --- /dev/null +++ b/docs/collections/tutorial/createbindingscollections.html @@ -0,0 +1,283 @@ + + + + + + Creating Bindings and Collections + + + + + + + + + +
+
+
+
+

+ Creating Bindings and Collections +

+
+
+
+

+ Bindings translate between stored records and Java objects. + In this example, Java serialization bindings are used. Serial + bindings are the simplest type of bindings because no mapping of + fields or type conversion is needed. Tuple bindings — which are + more difficult to create than serial bindings but have some + advantages — will be introduced later in the Tuple example + program. +

+

+ Standard Java collections are used to access records in a + database. Stored collections use bindings transparently to convert + the records to objects when they are retrieved from the collection, + and to convert the objects to records when they are stored in the + collection. +

+

+ An important characteristic of stored collections is that they + do not perform object caching. Every time an object is + accessed via a collection it will be added to or retrieved from the + database, and the bindings will be invoked to convert the data. + Objects are therefore always passed and returned by value, not by + reference. Because Berkeley DB is an embedded database, efficient + caching of stored raw record data is performed by the database library. +

+

+ The SampleViews class is used to create the bindings and + collections. This class is separate from the SampleDatabase + class to illustrate the idea that a single set of stored data can + be accessed via multiple bindings and collections, or views. + The skeleton for the SampleViews class follows. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.ClassCatalog;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredSortedMap;
+...
+
+public class SampleViews
+{
+    private StoredSortedMap partMap;
+    private StoredSortedMap supplierMap;
+    private StoredSortedMap shipmentMap;
+
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+    }
+} 
+

+ A + StoredSortedMap + + field is used for each database. The StoredSortedMap class implements the + standard Java + Map + + interface, which has methods for obtaining a + Set + + of keys, a + Collection + + of values, or a + Set + + of + Map.Entry + + key/value pairs. Because databases contain key/value pairs, any + Berkeley DB database may be represented as a Java map. +

+

+ The following statements create the key and data bindings using + the + SerialBinding + + class. +

+ +
    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntryBinding partDataBinding =
+            new SerialBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntryBinding supplierDataBinding =
+            new SerialBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntryBinding shipmentDataBinding =
+            new SerialBinding(catalog, ShipmentData.class);
+        ...
+    } 
+

+ The first parameter of the + SerialBinding + + constructor is the class catalog, and is used to store the class + descriptions of the serialized objects. +

+

+ The second parameter is the base class for the serialized + objects and is used for type checking of the stored objects. If + null or Object.class is specified, then any Java + class is allowed. Otherwise, all objects stored in that format must + be instances of the specified class or derived from the specified + class. In the example, specific classes are used to enable strong + type checking. +

+

+ The following statements create standard Java maps using the + StoredSortedMap + + class. +

+ +
    public SampleViews(SampleDatabase db)
+    {
+        ...
+        partMap =
+            new StoredSortedMap(db.getPartDatabase(),
+                          partKeyBinding, partDataBinding, true);
+        supplierMap =
+            new StoredSortedMap(db.getSupplierDatabase(),
+                          supplierKeyBinding, partDataBinding, true);
+        shipmentMap =
+            new StoredSortedMap(db.getShipmentDatabase(),
+                          shipmentKeyBinding, partDataBinding, true);
+    ...
+    } 
+

+ The first parameter of the + StoredSortedMap + + constructor is the database. In a StoredSortedMap, the database keys (the primary + keys) are used as the map keys. The Index + example shows how to use secondary index keys as map keys. +

+

+ The second and third parameters are the key and value bindings + to use when storing and retrieving objects via the map. +

+

+ The fourth and last parameter specifies whether changes will be + allowed via the collection. If false is passed, the collection will + be read-only. +

+

+ The following getter methods return the stored maps for use by + other classes in the example program. Convenience methods for + returning entry sets are also included. +

+ +
public class SampleViews
+{
+    ...
+    public final StoredSortedMap getPartMap()
+    {
+        return partMap;
+    }
+
+    public final StoredSortedMap getSupplierMap()
+    {
+        return supplierMap;
+    }
+
+    public final StoredSortedMap getShipmentMap()
+    {
+        return shipmentMap;
+    }
+
+    public final StoredEntrySet getPartEntrySet()
+    {
+        return (StoredEntrySet) partMap.entrySet();
+    }
+
+    public final StoredEntrySet getSupplierEntrySet()
+    {
+        return (StoredEntrySet) supplierMap.entrySet();
+    }
+
+    public final StoredEntrySet getShipmentEntrySet()
+    {
+        return (StoredEntrySet) shipmentMap.entrySet();
+    }
+    ...
+} 
+

+ Note that StoredSortedMap and StoredEntrySet are returned rather than + just returning Map and Set. Since StoredSortedMap implements the Map + interface and StoredEntrySet implements the Set interface, you may + ask why Map and Set were not returned directly. +

+

+ StoredSortedMap, StoredEntrySet, + and other stored collection classes + have a small number of extra methods beyond those in the Java + collection interfaces. The stored collection types are therefore + returned to avoid casting when using the extended methods. + Normally, however, only a Map or Set is needed, and may be used as + follows. +

+ +
    SampleDatabase sd = new SampleDatabase(new String("/home"));
+    SampleViews views = new SampleViews(sd);
+    Map partMap = views.getPartMap();
+    Set supplierEntries = views.getSupplierEntrySet(); 
+
+ + + diff --git a/docs/collections/tutorial/creatingentitybindings.html b/docs/collections/tutorial/creatingentitybindings.html new file mode 100644 index 0000000..77da81a --- /dev/null +++ b/docs/collections/tutorial/creatingentitybindings.html @@ -0,0 +1,271 @@ + + + + + + Creating Entity Bindings + + + + + + + + + +
+
+
+
+

+ Creating Entity Bindings +

+
+
+
+

+ Entity bindings are similar to ordinary bindings in that + they convert between Java objects and the stored data format of + keys and values. In addition, entity bindings map between key/value + pairs and entity objects. An ordinary binding is a one-to-one + mapping, while an entity binding is a two-to-one mapping. +

+

+ The partDataBinding, supplierDataBinding and + shipmentDataBinding bindings are created below as entity + bindings rather than (in the prior examples) serial bindings. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.EntityBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.bind.serial.SerialSerialBinding;
+...
+
+public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        SerialBinding partKeyBinding =
+            new SerialBinding(catalog, PartKey.class);
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, PartKey.class, PartData.class);
+        SerialBinding supplierKeyBinding =
+            new SerialBinding(catalog, SupplierKey.class);
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, SupplierKey.class,
+                                SupplierData.class);
+        SerialBinding shipmentKeyBinding =
+            new SerialBinding(catalog, ShipmentKey.class);
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, ShipmentKey.class,
+                                ShipmentData.class);
+        SerialBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+        ...
+    }
+} 
+

+ The entity bindings will be used in the next section to + construct stored map objects. +

+

+ The PartBinding class is defined below. +

+ +
public class SampleViews
+{
+    ...
+    private static class PartBinding extends SerialSerialBinding {
+        private PartBinding(ClassCatalog classCatalog,
+                            Class keyClass,
+                            Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            PartKey key = (PartKey) keyInput;
+            PartData data = (PartData) dataInput;
+            return new Part(key.getNumber(), data.getName(),
+                            data.getColor(), data.getWeight(), 
+                            data.getCity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Part part = (Part) object;
+            return new PartKey(part.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                part.getWeight(), part.getCity());
+        }
+    }
+    ...
+} 
+

+ In general, an entity binding is any class that implements the + EntityBinding + + interface, just as an ordinary binding is any class that implements + the + EntryBinding + + interface. In the prior examples the built-in + SerialBinding + + class (which implements + EntryBinding) + was used and no application-defined binding classes were needed. +

+

+ In this example, application-defined binding classes are used + that extend the + SerialSerialBinding + + abstract base class. This base class implements + EntityBinding + + and provides the conversions between key/value bytes and key/value + objects, just as the + SerialBinding + + class does. The application-defined entity class implements the + abstract methods defined in the base class that map between + key/value objects and entity objects. +

+

+ Three abstract methods are implemented for each entity binding. + The entryToObject() method takes as input the key and data + objects, which have been deserialized automatically by the base + class. As output, it returns the combined Part entity. +

+

+ The objectToKey() and objectToData() methods take an + entity object as input. As output they return the part key or data + object that is extracted from the entity object. The key or data + will then be serialized automatically by the base class. +

+

+ The SupplierBinding and ShipmentBinding classes + are very similar to the PartBinding class. +

+ +
public class SampleViews
+{
+    ...
+    private static class SupplierBinding extends SerialSerialBinding {
+        private SupplierBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            SupplierKey key = (SupplierKey) keyInput;
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(key.getNumber(), data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierKey(supplier.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(),
+                                    supplier.getStatus(), 
+                                    supplier.getCity());
+        }
+    }
+
+    private static class ShipmentBinding extends SerialSerialBinding {
+        private ShipmentBinding(ClassCatalog classCatalog,
+                                Class keyClass,
+                                Class dataClass)
+        {
+            super(classCatalog, keyClass, dataClass);
+        }
+
+        public Object entryToObject(Object keyInput, Object dataInput)
+        {
+            ShipmentKey key = (ShipmentKey) keyInput;
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(key.getPartNumber(), 
+                                key.getSupplierNumber(),
+                                data.getQuantity());
+        }
+
+        public Object objectToKey(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentKey(shipment.getPartNumber(),
+                                   shipment.getSupplierNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/docs/collections/tutorial/developing.html b/docs/collections/tutorial/developing.html new file mode 100644 index 0000000..28d9ddd --- /dev/null +++ b/docs/collections/tutorial/developing.html @@ -0,0 +1,186 @@ + + + + + + Developing a JE Collections Application + + + + + + + + + +
+
+
+
+

Developing a JE Collections Application

+
+
+
+

+ There are several important choices to make when developing an + application using the JE JE Collections API. +

+
+
    +
  1. +

    + Choose the Format for Keys and Values +

    +

    + For each database you may choose a binding format for the keys + and values. For example, the tuple format is useful for keys + because it has a deterministic sort order. The serial format is + useful for values if you want to store arbitrary Java objects. In + some cases a custom format may be appropriate. For details on + choosing a binding format see + + Using Data Bindings + . +

    +
  2. +
  3. +

    + Choose the Binding for Keys and Values +

    +

    + With the serial data format you do not have to create a binding + for each Java class that is stored since Java serialization is + used. But for other formats a binding must be defined that + translates between stored byte arrays and Java objects. For details + see + + Using Data Bindings + . +

    +
  4. +
  5. +

    + Choose Secondary Indices and Foreign Key Indices +

    +

    + Any database that has unique keys may have any number of + secondary indices. A secondary index has keys that are derived from + data values in the primary database. This allows lookup and + iteration of objects in the database by its index keys. + + + A foreign key index is a special type of secondary index where the index keys + are also the primary keys of another primary database. + + + For each index you must define how the index keys are derived from the data + values using a + + SecondaryKeyCreator. + + + For details see the + + + SecondaryDatabase, + + + + SecondaryConfig + + + and + SecondaryKeyCreator + + + classes. +

    +
  6. +
  7. +

    + Choose the Collection Interface for each Database +

    +

    + The standard Java Collection interfaces are used for accessing + databases and secondary indices. The Map and Set interfaces may be + used for any type of database. The Iterator interface is used + through the Set interfaces. For more information on the collection + interfaces see + + Using Stored Collections + . +

    +
  8. +
+
+

+ Any number of bindings and collections may be created for the + same database. This allows multiple views of the same stored data. + For example, a data store may be viewed as a Map of keys to values, + a Set of keys, or a Collection of values. String values, for + example, may be used with the built-in binding to the String class, + or with a custom binding to another class that represents the + string values differently. +

+

+ It is sometimes desirable to use a Java class that encapsulates + both a data key and a data value. For example, a Part object might + contain both the part number (key) and the part name (value). Using + the JE JE Collections API this type of object is called an + "entity". An entity binding is used to translate between the Java + object and the stored data key and value. Entity bindings may be + used with all Collection types. +

+

+ Please be aware that the provided JE JE Collections API collection classes + do not conform completely to the interface contracts + defined in the java.util package. For example, all + iterators must be explicitly closed and the size() + method is not available. The differences between the JE JE Collections API + collections and the standard Java collections are + documented in + + Stored Collections Versus Standard Java Collections + . +

+
+ + + diff --git a/docs/collections/tutorial/entitieswithcollections.html b/docs/collections/tutorial/entitieswithcollections.html new file mode 100644 index 0000000..b806b21 --- /dev/null +++ b/docs/collections/tutorial/entitieswithcollections.html @@ -0,0 +1,251 @@ + + + + + + Using Entities with Collections + + + + + + + + + +
+
+
+
+

+ Using Entities with Collections +

+
+
+
+

+ In this example entity objects, rather than key and value + objects, are used for adding and enumerating the records in a + collection. Because fewer classes and objects are involved, adding + and enumerating is done more conveniently and more simply than in + the prior examples. +

+

+ For adding and iterating entities, the collection of entities + returned by + Map.values + + is used. In general, when using an entity binding, all Java + collection methods that are passed or returned a value object will + be passed or returned an entity object instead. +

+

+ The Sample class has been changed in this example to add + objects using the + Set.add + + method rather than the + Map.put + + method that was used in the prior examples. Entity objects are + constructed and passed to + Set.add. +

+ +
import java.util.Set;
+...
+public class Sample
+{
+    ...
+    private void addSuppliers()
+    {
+        Set suppliers = views.getSupplierSet();
+        if (suppliers.isEmpty())
+        {
+            System.out.println("Adding Suppliers");
+            suppliers.add(new Supplier("S1", "Smith", 20, "London"));
+            suppliers.add(new Supplier("S2", "Jones", 10, "Paris"));
+            suppliers.add(new Supplier("S3", "Blake", 30, "Paris"));
+            suppliers.add(new Supplier("S4", "Clark", 20, "London"));
+            suppliers.add(new Supplier("S5", "Adams", 30, "Athens"));
+        }
+    }
+
+    private void addParts()
+    {
+        Set parts = views.getPartSet();
+        if (parts.isEmpty())
+        {
+            System.out.println("Adding Parts");
+            parts.add(new Part("P1", "Nut", "Red",
+                      new Weight(12.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P2", "Bolt", "Green",
+                      new Weight(17.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P3", "Screw", "Blue",
+                      new Weight(17.0, Weight.GRAMS), "Rome"));
+            parts.add(new Part("P4", "Screw", "Red",
+                      new Weight(14.0, Weight.GRAMS), "London"));
+            parts.add(new Part("P5", "Cam", "Blue",
+                      new Weight(12.0, Weight.GRAMS), "Paris"));
+            parts.add(new Part("P6", "Cog", "Red",
+                      new Weight(19.0, Weight.GRAMS), "London"));
+        }
+    }
+
+    private void addShipments()
+    {
+        Set shipments = views.getShipmentSet();
+        if (shipments.isEmpty())
+        {
+            System.out.println("Adding Shipments");
+            shipments.add(new Shipment("P1", "S1", 300));
+            shipments.add(new Shipment("P2", "S1", 200));
+            shipments.add(new Shipment("P3", "S1", 400));
+            shipments.add(new Shipment("P4", "S1", 200));
+            shipments.add(new Shipment("P5", "S1", 100));
+            shipments.add(new Shipment("P6", "S1", 100));
+            shipments.add(new Shipment("P1", "S2", 300));
+            shipments.add(new Shipment("P2", "S2", 400));
+            shipments.add(new Shipment("P2", "S3", 200));
+            shipments.add(new Shipment("P2", "S4", 200));
+            shipments.add(new Shipment("P4", "S4", 300));
+            shipments.add(new Shipment("P5", "S4", 400));
+        }
+    } 
+

+ Instead of printing the key/value pairs by iterating over the + Map.entrySet + + as done in the prior example, this example + iterates over the entities in the + Map.values + + collection. +

+ +
import java.util.Iterator;
+import java.util.Set;
+...
+public class Sample
+{
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printValues("Parts",
+                         views.getPartSet().iterator());
+            printValues("Suppliers",
+                         views.getSupplierSet().iterator());
+            printValues("Suppliers for City Paris",
+                         views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printValues("Shipments",
+                         views.getShipmentSet().iterator());
+            printValues("Shipments for Part P1",
+                         views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                         views.getShipmentBySupplierMap().duplicates(
+                                      new SupplierKey("S1")).iterator());
+        }
+    }
+    ...
+} 
+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+Part: number=P1 name=Nut color=Red weight=[12.0 grams] city=London
+Part: number=P2 name=Bolt color=Green weight=[17.0 grams] city=Paris
+Part: number=P3 name=Screw color=Blue weight=[17.0 grams] city=Rome
+Part: number=P4 name=Screw color=Red weight=[14.0 grams] city=London
+Part: number=P5 name=Cam color=Blue weight=[12.0 grams] city=Paris
+Part: number=P6 name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+Supplier: number=S1 name=Smith status=20 city=London
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+Supplier: number=S4 name=Clark status=20 city=London
+Supplier: number=S5 name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+
+--- Shipments ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P2 supplier=S2 quantity=400
+Shipment: part=P2 supplier=S3 quantity=200
+Shipment: part=P2 supplier=S4 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P4 supplier=S4 quantity=300
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P5 supplier=S4 quantity=400
+Shipment: part=P6 supplier=S1 quantity=100
+
+--- Shipments for Part P1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+
+--- Shipments for Supplier S1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P6 supplier=S1 quantity=100 
+
+ + + diff --git a/docs/collections/tutorial/gettingStarted.css b/docs/collections/tutorial/gettingStarted.css new file mode 100644 index 0000000..6a2b24b --- /dev/null +++ b/docs/collections/tutorial/gettingStarted.css @@ -0,0 +1,50 @@ +body { width: 45em; + margin-left: 3em; + font-family: Arial, Helvetica, sans-serif; + font-size: 11pt; + } + +h2.title { margin-left: -1em; + font-family: Verdana, serif; + font-size: 16pt; + } + +h3.title { font-family: Verdana, serif; + font-size: 14pt; + } + +pre.programlisting { + font-family: monospace; + background-color: #eae8e9; +} + +div.navheader { font-size: 10pt; + width: 60em; + margin-left: -2em; + } + +div.navheader table tr td { font-size: 10pt; } + +div.navfooter { font-size: 10pt; + width: 60em; + margin-left: -2em; + } +div.navfooter table tr td { font-size: 10pt; } + +span.emphasis { font-style: italic;} + +div.appendix div.informaltable { font-size: 9pt; } +div.appendix div.informaltable td { vertical-align: top; } +div.appendix div.informaltable p { margin-top: .25em; } +div.appendix div.informaltable p { margin-bottom: .25em; } + +div.variablelist dl dt {margin-top: 1em; } + +div.libver p { + font-size: 8pt; + width: 30%; + margin-left: 2px; + margin-right: 2px; + padding-top: 3px; + padding-bottom: 3px; + } diff --git a/docs/collections/tutorial/handlingexceptions.html b/docs/collections/tutorial/handlingexceptions.html new file mode 100644 index 0000000..02e7dec --- /dev/null +++ b/docs/collections/tutorial/handlingexceptions.html @@ -0,0 +1,217 @@ + + + + + + Handling Exceptions + + + + + + + + + +
+
+
+
+

+ Handling Exceptions +

+
+
+
+

+ Exception handling was illustrated previously in + + Implementing the Main Program + + and + + Using Transactions + + exception handling in a JE JE Collections API application in + more detail. +

+

+ There are two exceptions that must be treated specially: + RunRecoveryException + + + and + + LockConflictException. + + +

+

+ RunRecoveryException + + + is thrown when the only solution is to shut down the application + and run recovery. All applications must catch this exception and + follow the recovery procedure. +

+

+ When + LockConflictException + + + + is thrown, the application should normally retry the operation. If + a deadlock continues to occur for some maximum number of retries, + the application should give up and try again later or take other + corrective actions. The JE JE Collections API provides two APIs + for transaction execution. +

+
+ +
+

+ When using the + TransactionRunner + + class there are two other considerations. +

+
+
    +
  • +

    + First, if the application-defined + TransactionWorker.doWork + + method throws an exception the + transaction will automatically be aborted, and otherwise the + transaction will automatically be committed. Applications should + design their transaction processing with this in mind. +

    +
  • +
  • +

    + Second, please be aware that + TransactionRunner.run + + unwraps exceptions in order to discover whether a nested exception is a + + LockConflictException. + + + This is particularly important since all Berkeley DB exceptions + that occur while calling a stored collection method are wrapped + with a + RuntimeExceptionWrapper. + This wrapping is necessary because Berkeley DB exceptions are + checked exceptions, and the Java collections API does not allow + such exceptions to be thrown. +

    +
  • +
+
+

+ When calling + TransactionRunner.run, + the unwrapped (nested) exception will be unwrapped and thrown + automatically. If you are not using + TransactionRunner + + or if you are handling exceptions directly for some other reason, + use the + ExceptionUnwrapper.unwrap + + method to get the nested exception. For example, this can be used + to discover that an exception is a + RunRecoveryException + + + as shown below. +

+ +
import com.sleepycat.je.RunRecoveryException;
+import com.sleepycat.util.ExceptionUnwrapper;
+...
+    catch (Exception e)
+    {
+        e = ExceptionUnwrapper.unwrap(e);
+        if (e instanceof RunRecoveryException)
+        {
+            // follow recovery procedure
+        }
+    } 
+
+ + + diff --git a/docs/collections/tutorial/implementingmain.html b/docs/collections/tutorial/implementingmain.html new file mode 100644 index 0000000..8f33cfc --- /dev/null +++ b/docs/collections/tutorial/implementingmain.html @@ -0,0 +1,253 @@ + + + + + + Implementing the Main Program + + + + + + + + + +
+
+
+
+

+ Implementing the Main Program +

+
+
+
+

+ The main program opens the environment and databases, stores and retrieves + objects within a transaction, and finally closes the environment + databases. This section describes the main program shell, and the + next section describes how to run transactions for storing and + retrieving objects. +

+

+ The Sample class contains the main program. The skeleton + for the Sample class follows. +

+ +
import com.sleepycat.je.DatabaseException;
+import java.io.FileNotFoundException;
+
+public class Sample
+{
+    private SampleDatabase db;
+    private SampleViews views;
+
+    public static void main(String args)
+    {
+    }
+
+    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException
+    {
+    }
+
+    private void close()
+        throws DatabaseException
+    {
+    }
+
+    private void run()
+        throws Exception
+    {
+    }
+} 
+

+ The main program uses the SampleDatabase and + SampleViews classes that were described in the preceding + sections. The main method will create an instance of the + Sample class, and call its run() and close() + methods. + +

+

+ The following statements parse the program's command line + arguments. +

+ +
    public static void main(String[] args)
+    {
+        System.out.println("\nRunning sample: " + Sample.class);
+        String homeDir = "./tmp";
+        for (int i = 0; i < args.length; i += 1)
+        {
+            String arg = args[i];
+            if (args[i].equals("-h") && i < args.length - 1)
+            {
+                i += 1;
+                homeDir = args[i];
+            }
+            else
+            {
+                System.err.println("Usage:\n java " + 
+                                   Sample.class.getName() +
+                                  "\n  [-h <home-directory>]");
+                System.exit(2);
+            }
+        }
+        ...
+    } 
+

+ The usage command is: +

+
java com.sleepycat.examples.bdb.shipment.basic.Sample
+     [-h <home-directory> ] 
+

+ The -h command is used to set the homeDir + variable, which will later be passed to the SampleDatabase() + constructor. Normally all Berkeley DB programs should provide a way + to configure their database environment home directory. +

+

+ The default for the home directory is ./tmp — the tmp + subdirectory of the current directory where the sample is run. The + home directory must exist before running the sample. To re-create + the sample database from scratch, delete all files in the home + directory before running the sample. +

+

+ The home directory was described previously in + + Opening and Closing the Database Environment + . +

+

+ Of course, the command line arguments shown are only examples + and a real-life application may use different techniques for + configuring these options. + +

+

+ The following statements create an instance of the Sample + class and call its run() and close() methods. +

+ +
    public static void main(String args)
+    {
+        ...
+        Sample sample = null;
+        try
+        {
+            sample = new Sample(homeDir);
+            sample.run();
+        }
+        catch (Exception e)
+        {
+            e.printStackTrace();
+        }
+        finally
+        {
+            if (sample != null)
+            {
+                try
+                {
+                    sample.close();
+                }
+                catch (Exception e)
+                {
+                    System.err.println("Exception during database close:");
+                    e.printStackTrace();
+                }
+            }
+        }
+    } 
+

+ The Sample() constructor will open the environment and + databases, and the run() method will run transactions for + storing and retrieving objects. If either of these throws an + exception, then the program was unable to run and should normally + terminate. (Transaction retries are handled at a lower level and + will be described later.) The first catch statement handles + such exceptions. +

+

+ The finally statement is used to call the close() + method since an attempt should always be made to close the environment and + databases + cleanly. If an exception is thrown during close and a prior + exception occurred above, then the exception during close is likely + a side effect of the prior exception. +

+

+ The Sample() constructor creates the SampleDatabase + and SampleViews objects. +

+ +
    private Sample(String homeDir)
+        throws DatabaseException, FileNotFoundException
+    {
+        db = new SampleDatabase(homeDir);
+        views = new SampleViews(db);
+    } 
+

+ Recall that creating the SampleDatabase object will open + the environment and all databases. +

+

+ To close the database the Sample.close() method simply + calls SampleDatabase.close(). +

+ +
     private void close()
+        throws DatabaseException
+    {
+        db.close();
+    } 
+

+ The run() method is described in the next section. +

+
+ + + diff --git a/docs/collections/tutorial/index.html b/docs/collections/tutorial/index.html new file mode 100644 index 0000000..3d2f912 --- /dev/null +++ b/docs/collections/tutorial/index.html @@ -0,0 +1,597 @@ + + + + + + Berkeley DB Java Edition Collections Tutorial + + + + + + + +
+
+
+
+

Berkeley DB Java Edition Collections Tutorial

+
+
+
+ +

+ Legal Notice +

+

+ Copyright © 2002 - 2017 Oracle and/or its affiliates. All rights + reserved. +

+

+ This software and related documentation are provided under a + license agreement containing restrictions on use and disclosure + and are protected by intellectual property laws. Except as + expressly permitted in your license agreement or allowed by + law, you may not use, copy, reproduce, translate, broadcast, + modify, license, transmit, distribute, exhibit, perform, + publish, or display any part, in any form, or by any means. + Reverse engineering, disassembly, or decompilation of this + software, unless required by law for interoperability, is + prohibited. +

+

+ The information contained herein is subject to change without + notice and is not warranted to be error-free. If you find any + errors, please report them to us in writing. +

+

+ Berkeley DB, + + Berkeley DB Java Edition + and + Sleepycat are trademarks or registered trademarks of + Oracle. All rights to these marks are reserved. + No third-party use is permitted without the + express prior written consent of Oracle. +

+

+ Other names may be trademarks of their respective owners. +

+

+ If this is software or related documentation that is delivered + to the U.S. Government or anyone licensing it on behalf of the + U.S. Government, the following notice is applicable: +

+

+ U.S. GOVERNMENT END USERS: Oracle programs, including any + operating system, integrated software, any programs installed + on the hardware, and/or documentation, delivered to U.S. + Government end users are "commercial computer software" + pursuant to the applicable Federal Acquisition Regulation and + agency-specific supplemental regulations. As such, use, + duplication, disclosure, modification, and adaptation of the + programs, including any operating system, integrated software, + any programs installed on the hardware, and/or documentation, + shall be subject to license terms and license restrictions + applicable to the programs. No other rights are granted to the + U.S. Government. +

+

+ This software or hardware is developed for general use in a + variety of information management applications. It is not + developed or intended for use in any inherently dangerous + applications, including applications that may create a risk of + personal injury. If you use this software or hardware in + dangerous applications, then you shall be responsible to take + all appropriate fail-safe, backup, redundancy, and other + measures to ensure its safe use. Oracle Corporation and its + affiliates disclaim any liability for any damages caused by use + of this software or hardware in dangerous applications. +

+

+ Oracle and Java are registered trademarks of Oracle and/or its + affiliates. Other names may be trademarks of their respective + owners. +

+

+ Intel and Intel Xeon are trademarks or registered trademarks of + Intel Corporation. All SPARC trademarks are used under license + and are trademarks or registered trademarks of SPARC + International, Inc. AMD, Opteron, the AMD logo, and the AMD + Opteron logo are trademarks or registered trademarks of + Advanced Micro Devices. UNIX is a registered trademark of The + Open Group. +

+

+ This software or hardware and documentation may provide access + to or information on content, products, and services from third + parties. Oracle Corporation and its affiliates are not + responsible for and expressly disclaim all warranties of any + kind with respect to third-party content, products, and + services. Oracle Corporation and its affiliates will not be + responsible for any loss, costs, or damages incurred due to + your access to or use of third-party content, products, or + services. +

+
+
+
+

31-Oct-2017

+
+
+
+
+
+

+ Table of Contents +

+
+
+ + Preface + +
+
+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+
+ + 1. + Introduction + + +
+
+
+
+ + Features + +
+
+ + Developing a JE Collections Application + +
+
+ + Tutorial Introduction + +
+
+
+
+ + 2. + The Basic Program + + +
+
+
+
+ + + Defining Serialized Key and Value Classes + + +
+
+ + + Opening and Closing the Database Environment + + +
+
+ + + Opening and Closing the Class Catalog + + +
+
+ + + Opening and Closing Databases + + +
+
+ + + Creating Bindings and Collections + + +
+
+ + + Implementing the Main Program + + +
+
+ + + Using Transactions + + +
+
+ + + Adding Database Items + + +
+
+ + + Retrieving Database Items + + +
+
+ + + Handling Exceptions + + +
+
+
+
+ + 3. + Using Secondary Indices and Foreign keys + + +
+
+
+
+ + + Opening Secondary Key Indices + + +
+
+ + + Opening Foreign Key Indices + + + +
+
+ + + Creating Indexed Collections + + +
+
+ + + Retrieving Items by Index Key + + +
+
+
+
+ + 4. + Using Entity Classes + + +
+
+
+
+ + + Defining Entity Classes + + +
+
+ + + Creating Entity Bindings + + +
+
+ + + Creating Collections with Entity Bindings + + +
+
+ + + Using Entities with Collections + + +
+
+
+
+ + 5. + Using Tuples + + +
+
+
+
+ + + Using the Tuple Format + + +
+
+ + + Using Tuples with Key Creators + + +
+
+ + + Creating Tuple Key Bindings + + +
+
+ + +Creating Tuple-Serial Entity Bindings + + +
+
+ + + Using Sorted Collections + + +
+
+
+
+ + 6. + Using Serializable Entities + + +
+
+
+
+ + + Using Transient Fields in an Entity Class + + +
+
+ + + Using Transient Fields in an Entity Binding + + +
+
+ + + Removing the Redundant Value Classes + + +
+
+
+
+ + 7. + Summary + + +
+
+ + A. + API Notes and Details + + +
+
+
+
+ + + Using Data Bindings + + +
+
+
+
+ + + Selecting Binding Formats + + +
+
+ + + Selecting Data Bindings + + +
+
+ + + Implementing Bindings + + +
+
+ + + Using Bindings + + +
+
+ + + Secondary Key Creators + + +
+
+
+
+ + + Using the JE JE Collections API + + +
+
+
+
+ + + Using Transactions + + +
+
+ + + Transaction Rollback + + +
+
+ + + Access Method Restrictions + + +
+
+
+
+ + + Using Stored Collections + + +
+
+
+
+ + + Stored Collection and Access Methods + + +
+
+ + + Stored Collections Versus Standard Java Collections + + +
+
+ + + Other Stored Collection Characteristics + + +
+
+ + + Why Java Collections for Berkeley DB Java Edition + + +
+
+
+
+ + + Serialized Object Storage + + +
+
+
+
+
+
+ + + diff --git a/docs/collections/tutorial/indexedcollections.html b/docs/collections/tutorial/indexedcollections.html new file mode 100644 index 0000000..2641737 --- /dev/null +++ b/docs/collections/tutorial/indexedcollections.html @@ -0,0 +1,248 @@ + + + + + + Creating Indexed Collections + + + + + + + + + +
+
+
+
+

+ Creating Indexed Collections +

+
+
+
+

+ In the prior Basic example, bindings and Java collections were + created for accessing databases via their primary keys. In this + example, bindings and collections are added for accessing the same + databases via their index keys. As in the prior example, serial + bindings and the Java + Map + + class are used. +

+

+ When a map is created from a + + SecondaryDatabase, + + + the keys of the map will be the index keys. However, the values of + the map will be the values of the primary database associated with + the index. This is how index keys can be used to access the values + in a primary database. +

+

+ For example, the Supplier's City field is an index key that can + be used to access the Supplier database. When a map is created + using the supplierByCityDb() method, the key to the map will be the + City field, a + String + + object. When + Map.get + + is called passing the City as the key parameter, a + SupplierData + object will be returned. +

+

+ The SampleViews class is extended to create an index key + binding for the Supplier's City field and three Java maps based on + the three indices created in the prior section. +

+ +
import com.sleepycat.bind.EntryBinding;
+import com.sleepycat.bind.serial.SerialBinding;
+import com.sleepycat.collections.StoredEntrySet;
+import com.sleepycat.collections.StoredSortedMap;
+...
+
+public class SampleViews
+{
+    ...
+    private StoredSortedMap supplierByCityMap;
+    private StoredSortedMap shipmentByPartMap;
+    private StoredSortedMap shipmentBySupplierMap;
+    ...
+
+    public SampleViews(SampleDatabase db)
+    {
+        ClassCatalog catalog = db.getClassCatalog();
+        ...
+        EntryBinding cityKeyBinding =
+            new SerialBinding(catalog, String.class);
+        ...
+        supplierByCityMap =
+            new StoredSortedMap(db.getSupplierByCityDatabase(),
+                          cityKeyBinding, supplierDataBinding, true);
+        shipmentByPartMap =
+            new StoredSortedMap(db.getShipmentByPartDatabase(),
+                          partKeyBinding, supplierDataBinding, true);
+        shipmentBySupplierMap =
+            new StoredSortedMap(db.getShipmentBySupplierDatabase(),
+                          supplierKeyBinding, supplierDataBinding, true); 
+    ...
+    }
+} 
+

+ In general, the indexed maps are created here in the same way as + the unindexed maps were created in the Basic example. The + differences are: +

+
+
    +
  • +

    + The first parameter of the + StoredSortedMap + + constructor is a + SecondaryDatabase + + + rather than a + + Database. + + +

    +
  • +
  • +

    + The second parameter is the index key binding rather than the + primary key binding. +

    +
  • +
+
+

+ For the supplierByCityMap, the cityKeyBinding must + first be created. This binding was not created in the Basic example + because the City field is not a primary key. +

+

+ Like the bindings created earlier for keys and values, the + cityKeyBinding is a + SerialBinding. + Unlike the bindings created earlier, it is an example of creating a + binding for a built-in Java class, + String, + instead of an application-defined class. Any serializable class may + be used. +

+

+ For the shipmentByPartMap and + shipmentBySupplierMap, the partKeyBinding and + supplierKeyBinding are used. These were created in the Basic + example and used as the primary key bindings for the partMap + and supplierMap. +

+

+ The value bindings — supplierDataBinding and + shipmentDataBinding — were also created in the Basic + example. +

+

+ This illustrates that bindings and formats may and should be + reused where appropriate for creating maps and other + collections. +

+

+ The following getter methods return the stored maps for use by + other classes in the example program. Convenience methods for + returning entry sets are also included. +

+ +
public class SampleViews
+{
+    ...
+    public final StoredSortedMap getShipmentByPartMap()
+    {
+        return shipmentByPartMap;
+    }
+
+    public final StoredSortedMap getShipmentBySupplierMap()
+    {
+        return shipmentBySupplierMap;
+    }
+
+    public final StoredSortedMap getSupplierByCityMap()
+    {
+        return supplierByCityMap;
+    }
+
+    public final StoredEntrySet getShipmentByPartEntrySet()
+    {
+        return (StoredEntrySet) shipmentByPartMap.entrySet();
+    }
+
+    public final StoredEntrySet getShipmentBySupplierEntrySet()
+    {
+        return (StoredEntrySet) shipmentBySupplierMap.entrySet();
+    }
+
+    public final StoredEntrySet getSupplierByCityEntrySet()
+    {
+        return (StoredEntrySet) supplierByCityMap.entrySet();
+    }
+    ...
+} 
+
+ + + diff --git a/docs/collections/tutorial/intro.html b/docs/collections/tutorial/intro.html new file mode 100644 index 0000000..a04b369 --- /dev/null +++ b/docs/collections/tutorial/intro.html @@ -0,0 +1,205 @@ + + + + + + Chapter 1.  Introduction + + + + + + + + + +
+
+
+
+

Chapter 1.  + Introduction +

+
+
+
+
+

+ Table of Contents +

+
+
+ + Features + +
+
+ + Developing a JE Collections Application + +
+
+ + Tutorial Introduction + +
+
+
+

+ The JE JE Collections API is a Java framework that extends + the well known + Java Collections + design pattern such that collections can now be + stored, updated and queried in a transactional manner. The + JE JE Collections API is a layer on top of JE. +

+

+ Together the JE JE Collections API and Berkeley DB Java Edition provide an + embedded data management solution with all the benefits of a full + transactional storage and the simplicity of a well known Java API. + Java programmers who need fast, scalable, transactional data + management for their projects can quickly adopt and deploy the + JE JE Collections API with confidence. +

+

+ This framework was first known as + Greybird DB + written by Mark Hayes. Mark collaborated with us to + permanently incorporate his excellent work into our distribution + and to support it as an ongoing part of Berkeley DB and Berkeley DB Java + Edition. The repository of source code that remains at SourceForge at version 0.9.0 is + considered the last version before incorporation and will remain + intact but will not be updated to reflect changes made as part of + Berkeley DB or Berkeley DB Java Edition. +

+
+
+
+
+

Features

+
+
+
+

+ + + JE provides a Java API that can be roughly + described as a map and cursor interface, where the keys and values + are represented as byte arrays. + + The JE JE Collections API is a layer on top of + + + + JE. + It adds significant new functionality in several ways. +

+
+
    +
  • +

    + An implementation of the Java Collections interfaces (Map, + SortedMap, Set, SortedSet, + + and Iterator) is provided. +

    +
  • +
  • +

    + Transactions are supported using the conventional Java + transaction-per-thread model, where the current transaction is + implicitly associated with the current thread. +

    +
  • +
  • +

    + Transaction runner utilities are provided that automatically + perform transaction retry and exception handling. +

    +
  • +
  • +

    + Keys and values are represented as Java objects rather than + byte arrays. Bindings are used to map between Java objects and the + stored byte arrays. +

    +
  • +
  • +

    + The tuple data format is provided as the simplest data + representation, and is useful for keys as well as simple compact + values. +

    +
  • +
  • +

    + The serial data format is provided for storing arbitrary Java + objects without writing custom binding code. Java serialization is + extended to store the class descriptions separately, making the + data records much more compact than with standard Java + serialization. +

    +
  • +
  • +

    + Custom data formats and bindings can be easily added. XML data + format and XML bindings could easily be created using this feature, + for example. +

    +
  • +
+
+

+ Note that the JE JE Collections API does not support caching + of programming language objects nor does it keep track of their stored + status. This is in contrast to "persistent object" approaches such + as those defined by + ODMG + and JDO + (JSR 12). + Such approaches have benefits but also require sophisticated object + caching. For simplicity the JE JE Collections API treats data + objects by value, not by reference, and does not perform object + caching of any kind. Since the JE JE Collections API is a thin + layer, its reliability and performance characteristics are roughly + equivalent to those of Berkeley DB, and database tuning is + accomplished in the same way as for any Berkeley DB database. +

+
+
+ + + diff --git a/docs/collections/tutorial/moreinfo.html b/docs/collections/tutorial/moreinfo.html new file mode 100644 index 0000000..580410a --- /dev/null +++ b/docs/collections/tutorial/moreinfo.html @@ -0,0 +1,155 @@ + + + + + + For More Information + + + + + + + + + +
+
+
+
+

For More Information

+
+
+
+
+
+
+ + Contact Us + +
+
+
+

+ Beyond this manual, you may also find the following sources of information useful when building a + JE application: +

+ + +

+ To download the latest + + Berkeley DB Java Edition + + documentation along with white papers and other collateral, + visit http://www.oracle.com/technetwork/indexes/documentation/index.html. +

+

+ For the latest version of the Oracle + + Berkeley DB Java Edition + + downloads, visit + http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html. +

+
+
+
+
+
+

Contact Us

+
+
+
+

+ You can post your comments and questions at the Oracle + Technology (OTN) forum for + + + + Oracle Berkeley DB Java Edition at: https://forums.oracle.com/forums/forum.jspa?forumID=273. + +

+

+ For sales or support information, email to: + berkeleydb-info_us@oracle.com + You can subscribe to a low-volume email announcement list for + the Berkeley DB product family by sending email to: + bdb-join@oss.oracle.com +

+
+
+ + + diff --git a/docs/collections/tutorial/openclasscatalog.html b/docs/collections/tutorial/openclasscatalog.html new file mode 100644 index 0000000..e5c7273 --- /dev/null +++ b/docs/collections/tutorial/openclasscatalog.html @@ -0,0 +1,198 @@ + + + + + + Opening and Closing the Class Catalog + + + + + + + + + +
+
+
+
+

+ Opening and Closing the Class Catalog +

+
+
+
+

+ This section describes how to open and close the Java class + catalog. The class catalog is a specialized database store that + contains the Java class descriptions of the serialized objects that + are stored in the database. The class descriptions are stored in + the catalog rather than storing them redundantly in each database + record. A single class catalog per environment must be opened + whenever serialized objects will be stored in the database. +

+

+ The SampleDatabase class is extended to open and close + the class catalog. The following additional imports and class + members are needed. +

+ +
import com.sleepycat.bind.serial.StoredClassCatalog;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+import java.io.FileNotFoundException;
+
+...
+
+public class SampleDatabase
+{
+    private Environment env;
+    private static final String CLASS_CATALOG = "java_class_catalog";
+    ...
+    private StoredClassCatalog javaCatalog;
+    ...
+} 
+

+ While the class catalog is itself a database, it contains + metadata for other databases and is therefore treated specially by + the JE JE Collections API. The + StoredClassCatalog + + class encapsulates the catalog store and implements this special + behavior. +

+

+ The following statements open the class catalog by creating a + Database and a StoredClassCatalog object. The catalog + database is created if it does not already exist. +

+ +
    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+
+        Database catalogDb = env.openDatabase(null, CLASS_CATALOG, 
+                                              dbConfig);
+
+        javaCatalog = new StoredClassCatalog(catalogDb);
+        ...
+    }
+    ...
+    public final StoredClassCatalog getClassCatalog() {
+        return javaCatalog;
+    } 
+

+ The + DatabaseConfig + + + class is used to specify configuration parameters when opening a + database. The first configuration option specified — + setTransactional() — is set to true to create a transactional + database. While non-transactional databases can also be created, + the examples in this tutorial use transactional databases. +

+

+ setAllowCreate() is set to true to specify + that the database will be created if it does not already exist. If + this parameter is not specified, an exception will be thrown if the + database does not already exist. +

+

+ The first parameter of the openDatabase() method is an + optional transaction that is used for creating a new database. If + null is passed, auto-commit is used when creating a database. +

+

+ The second parameter of openDatabase() specifies the + database name and must not be a null. +

+

+ The last parameter of openDatabase() specifies the database + configuration object. +

+

+ Lastly, the StoredClassCatalog object is created to manage the + information in the class catalog database. The + StoredClassCatalog object will be used in the sections + following for creating serial bindings. +

+

+ The getClassCatalog method returns the catalog object for + use by other classes in the example program. +

+

+ When the environment is closed, the class catalog is closed + also. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        javaCatalog.close();
+        env.close();
+    } 
+

+ The StoredClassCatalog.close() method simply closes the + underlying class catalog database and in fact the + Database.close() + + + method may be called instead, if desired. It is recommended that you close the catalog database and + all other databases, before closing the + environment. +

+
+ + + diff --git a/docs/collections/tutorial/opendatabases.html b/docs/collections/tutorial/opendatabases.html new file mode 100644 index 0000000..643f2a6 --- /dev/null +++ b/docs/collections/tutorial/opendatabases.html @@ -0,0 +1,167 @@ + + + + + + Opening and Closing Databases + + + + + + + + + +
+
+
+
+

+ Opening and Closing Databases +

+
+
+
+

+ This section describes how to open and close the Part, Supplier + and Shipment databases. A database is a collection of + records, each of which has a key and a value. The keys and values + are stored in a selected format, which defines the syntax of the + stored data. Two examples of formats are Java serialization format + and tuple format. In a given database, all keys have the same + format and all values have the same format. +

+

+ The SampleDatabase class is extended to open and close + the three databases. The following additional class members are + needed. +

+ +
public class SampleDatabase
+{
+    ...
+    private static final String SUPPLIER_STORE = "supplier_store";
+    private static final String PART_STORE = "part_store";
+    private static final String SHIPMENT_STORE = "shipment_store";
+    ...
+    private Database supplierDb;
+    private Database partDb;
+    private Database shipmentDb;
+    ...
+} 
+

+ For each database there is a database name constant and a + Database object. +

+

+ The following statements open the three databases by + constructing a Database object. +

+ +
    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        DatabaseConfig dbConfig = new DatabaseConfig();
+        dbConfig.setTransactional(true);
+        dbConfig.setAllowCreate(true);
+        ...
+        partDb = env.openDatabase(null, PART_STORE, dbConfig);
+        supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig);
+        shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig);
+        ...
+    } 
+

+ The database configuration object that was used previously for + opening the catalog database is reused for opening the three + databases above. The databases are created if they don't already + exist. The parameters of the openDatabase() method were + described earlier when the class catalog database was opened. +

+

+ The following statements close the three databases. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    } 
+

+ It is recommended that all databases, including the catalog database, is closed + before closing the environment. +

+

+ The following getter methods return the databases for use by + other classes in the example program. +

+ +
public class SampleDatabase
+{
+    ...
+    public final Database getPartDatabase()
+    {
+        return partDb;
+    }
+
+    public final Database getSupplierDatabase()
+    {
+        return supplierDb;
+    }
+
+    public final Database getShipmentDatabase()
+    {
+        return shipmentDb;
+    }
+    ...
+}
+
+ + + diff --git a/docs/collections/tutorial/opendbenvironment.html b/docs/collections/tutorial/opendbenvironment.html new file mode 100644 index 0000000..290e723 --- /dev/null +++ b/docs/collections/tutorial/opendbenvironment.html @@ -0,0 +1,197 @@ + + + + + + Opening and Closing the Database Environment + + + + + + + + + +
+
+
+
+

+ Opening and Closing the Database Environment +

+
+
+
+

+ This section of the tutorial describes how to open and close the + database environment. The database environment manages resources + (for example, memory, locks and transactions) for any number of + databases. A single environment instance is normally used for all + databases. +

+

+ The SampleDatabase class is used to open and close the + environment. It will also be used in following sections to open and + close the class catalog and other databases. Its constructor is + used to open the environment and its close() method is used + to close the environment. The skeleton for the + SampleDatabase class follows. +

+ +
import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+import java.io.FileNotFoundException;
+
+public class SampleDatabase
+{
+    private Environment env;
+
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+    }
+
+    public void close()
+        throws DatabaseException
+    {
+    }
+} 
+

+ The first thing to notice is that the Environment class is in + the + com.sleepycat.je + + package, not the com.sleepycat.collections + package. The + com.sleepycat.je + + package contains all core Berkeley DB + functionality. The com.sleepycat.collections package contains + extended functionality that is based on the Java Collections API. + The collections package is layered on top of the + com.sleepycat.je + + package. Both packages are needed to create a complete application + based on the JE JE Collections API. +

+

+ The following statements create an + Environment + + + object. +

+ +
public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        System.out.println("Opening environment in: " + homeDirectory);
+
+        EnvironmentConfig envConfig = new EnvironmentConfig();
+        envConfig.setTransactional(true);
+        envConfig.setAllowCreate(true);
+
+        env = new Environment(new File(homeDirectory), envConfig);
+    } 
+

+ The + EnvironmentConfig + + + class is used to specify environment configuration parameters. The + first configuration option specified — setTransactional() — + is set to true to create an environment where transactional (and + non-transactional) databases may be opened. While non-transactional + environments can also be created, the examples in this tutorial use + a transactional environment. +

+

+ setAllowCreate() is set to true to specify + that the environment's files will be created if they don't already + exist. If this parameter is not specified, an exception will be + thrown if the environment does not already exist. A similar + parameter will be used later to cause databases to be created if + they don't exist. +

+

+ When an Environment object is constructed, a home + directory and the environment configuration object are specified. + The home directory is the location of the environment's log files + that store all database information. +

+

+ The following statement closes the environment. The environment + must be closed when database work is completed to free + allocated resources and to avoid having to run recovery later. + It is recommended that databases are closed before closing the + environment. +

+ +
    public void close()
+        throws DatabaseException
+    {
+        env.close();
+    } 
+

+ The following getter method returns the environment for use by + other classes in the example program. The environment is used for + opening databases and running transactions. +

+ +
public class SampleDatabase
+{
+    ...
+    public final Environment getEnvironment()
+    {
+        return env;
+    }
+    ...
+} 
+
+ + + diff --git a/docs/collections/tutorial/openingforeignkeys.html b/docs/collections/tutorial/openingforeignkeys.html new file mode 100644 index 0000000..4629998 --- /dev/null +++ b/docs/collections/tutorial/openingforeignkeys.html @@ -0,0 +1,339 @@ + + + + + + Opening Foreign Key Indices + + + + + + + + + +
+
+
+
+

+ Opening Foreign Key Indices + +

+
+
+
+

+ This section builds on the prior section describing secondary + key indices to show how to open foreign key indices. A foreign + key index is a secondary key index that also provides integrity + constraints. When the primary key of a record in one database is + embedded in the value of a record in another database, integrity + constraints ensure that the record in the first database exists, + i.e, that there are no "dangling pointers". In this example the + Shipment's PartNumber and SupplierNumber fields will be used as + foreign keys. +

+

+ When a foreign key index is defined, an "delete action" + parameter is specified. This parameter determines what action is + taken by the + Berkeley DB Java Edition + + API when the record is deleted to + which a foreign key refers. For example, consider what happens to a + Shipment record when a Part or Supplier record that is referred to + by that Shipment is deleted. There are three possibilities. +

+
+
    +
  • +

    + ForeignKeyDeleteAction.ABORT + + specifies that the transaction should be aborted by throwing an + exception. The effect is that deleting a Part or Supplier that is + referred to by one or more Shipments will not be possible. The + Berkeley DB Java Edition + + will automatically throw a + DatabaseException, + which should normally cause the transaction to be aborted during + exception processing. This is the default delete action if none is + specified. +

    +
  • +
  • +

    + ForeignKeyDeleteAction.NULLIFY + + specifies that the Part or Supplier Number field in the Shipment + record should be cleared, or set to a null or empty value. The + effect is that the deleted Part or Supplier will no longer be + referenced by any Shipment record. This option applies when the + foreign key field is optional, i.e, when the application allows it + to be set to a null or empty value. When using this option, the + application must implement the nullifyForeignKey() + method of the + ForeignKeyNullifier + + interface. +

    +
  • +
  • +

    + ForeignKeyDeleteAction.CASCADE + + specifies that the Shipment record should be deleted also. The + effect is that deleting a Part or Supplier will delete all + Shipments for that Part or Supplier. This option applies when the + deleted record is considered the "parent" or "owner" of the record + containing the foreign key, and is used in this example. Since + deleting the Shipment record could cause other deletions if other + records contain the foreign key of the Shipment, and so on, the + term "cascade" is used to describe the effect. +

    +
  • +
+
+

+ The SampleDatabase class is extended to open the + Shipment-by-Part and Shipment-by-Supplier secondary key + indices. +

+ +
import com.sleepycat.bind.serial.SerialSerialKeyCreator;
+import com.sleepycat.je.ForeignKeyDeleteAction;
+import com.sleepycat.je.SecondaryConfig;
+import com.sleepycat.je.SecondaryDatabase;
+...
+public class SampleDatabase
+{
+    ...
+    private static final String SHIPMENT_PART_INDEX = 
+        "shipment_part_index";
+    private static final String SHIPMENT_SUPPLIER_INDEX = 
+        "shipment_supplier_index";
+    ...
+    private SecondaryDatabase shipmentByPartDb;
+    private SecondaryDatabase shipmentBySupplierDb;
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        SecondaryConfig secConfig = new SecondaryConfig();
+        secConfig.setTransactional(true);
+        secConfig.setAllowCreate(true);
+        secConfig.setSortedDuplicates(true);
+        ...
+        secConfig.setForeignKeyDatabase(partDb);
+            secConfig.setForeignKeyDeleteAction(
+                                         ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentByPartKeyCreator(javaCatalog,
+                                         ShipmentKey.class,
+                                         ShipmentData.class,
+                                         PartKey.class));
+        shipmentByPartDb = env.openSecondaryDatabase(null, 
+                                                     SHIPMENT_PART_INDEX,
+                                                     shipmentDb,
+                                                     secConfig);
+
+        secConfig.setForeignKeyDatabase(supplierDb);
+        secConfig.setForeignKeyDeleteAction(
+                                         ForeignKeyDeleteAction.CASCADE);
+        secConfig.setKeyCreator(
+            new ShipmentBySupplierKeyCreator(javaCatalog,
+                                             ShipmentKey.class,
+                                             ShipmentData.class,
+                                             SupplierKey.class));
+        shipmentBySupplierDb = env.openSecondaryDatabase(null,
+                                                SHIPMENT_SUPPLIER_INDEX,
+                                                shipmentDb,
+                                                secConfig);
+    ...
+    }
+} 
+

+ If you compare these statements for opening foreign key indices + to the statements used in the previous section for opening a + secondary index, you'll notice that the only significant difference + is that the setForeignKeyDatabase() + and + setForeignKeyDeleteAction() methods are called. + setForeignKeyDatabase() specifies the foreign database that + contains the records to which the foreign keys refer; this + configures the secondary database as a foreign key index. + setForeignKeyDeleteAction() specifies the delete action. +

+

+ The application-defined ShipmentByPartKeyCreator + and ShipmentBySupplierKeyCreator classes are shown below. They + were used above to configure the secondary database objects. +

+ +
public class SampleDatabase
+{
+...
+    private static class ShipmentByPartKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class primaryKeyClass,
+                                         Class valueClass,
+                                         Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new PartKey(shipmentKey.getPartNumber());
+        }
+    }
+
+    private static class ShipmentBySupplierKeyCreator
+        extends SerialSerialKeyCreator
+    {
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class primaryKeyClass,
+                                             Class valueClass,
+                                             Class indexKeyClass)
+        {
+            super(catalog, primaryKeyClass, valueClass, indexKeyClass);
+        }
+
+        public Object createSecondaryKey(Object primaryKeyInput,
+                                         Object valueInput)
+        {
+            ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput;
+            return new SupplierKey(shipmentKey.getSupplierNumber());
+        }
+    }
+    ...
+} 
+

+ The key creator classes above are almost identical to the one + defined in the previous section for use with a secondary index. The + index key fields are different, of course, but the interesting + difference is that the index keys are extracted from the key, not + the value, of the Shipment record. This illustrates that an index + key may be derived from the primary database record key, value, or + both. +

+

+ Note that the + SerialSerialKeyCreator.nullifyForeignKey + + method is not + overridden above. This is because + ForeignKeyDeleteAction.NULLIFY + + was not used when creating the + SecondaryDatabase + + objects. If it were used, implementing the nullifyForeignKey() + methods would be needed to set the part number and supplier number + to null in the Shipment key. But record keys cannot be changed! And + in fact, the primary key is not passed to the + SerialSerialKeyCreator.nullifyForeignKey() method, only the + primary value is passed. Therefore, if a foreign index key is + derived from the primary key, + ForeignKeyDeleteAction.NULLIFY + + may not be used. +

+

+ The following getter methods return the secondary database + objects for use by other classes in the example program. +

+ +
public class SampleDatabase
+{
+    ...
+    public final SecondaryDatabase getShipmentByPartDatabase()
+    {
+        return shipmentByPartDb;
+    }
+
+    public final SecondaryDatabase getShipmentBySupplierDatabase()
+    {
+        return shipmentBySupplierDb;
+    }
+    ...
+} 
+

+ The following statements close the secondary databases. +

+ +
public class SampleDatabase
+{
+    ...
+    public void close()
+        throws DatabaseException {
+
+        supplierByCityDb.close();
+        shipmentByPartDb.close();
+        shipmentBySupplierDb.close();
+        partDb.close();
+        supplierDb.close();
+        shipmentDb.close();
+        javaCatalog.close();
+        env.close();
+    }
+    ...
+} 
+

+ Secondary databases must be closed before closing their + associated primary database. +

+
+ + + diff --git a/docs/collections/tutorial/preface.html b/docs/collections/tutorial/preface.html new file mode 100644 index 0000000..dbcdf6d --- /dev/null +++ b/docs/collections/tutorial/preface.html @@ -0,0 +1,142 @@ + + + + + + Preface + + + + + + + + + +
+
+
+
+

Preface

+
+
+
+
+

+ Table of Contents +

+
+
+ + Conventions Used in this Book + +
+
+ + For More Information + +
+
+
+
+ + Contact Us + +
+
+
+
+
+

+ Welcome to the Berkeley DB Java Edition (JE) Collections API. This document + provides a tutorial that introduces the collections API. + The goal of this document is to provide you with an efficient mechanism + with which you can quickly become efficient with this API. As such, this document is + intended for Java developers and senior software architects who are + looking for transactionally-protected backing of their Java collections. + No prior experience with JE technologies is expected or required. +

+
+
+
+
+

Conventions Used in this Book

+
+
+
+

+ The following typographical conventions are used within in this manual: +

+

+ Class names are represented in monospaced font, as are method + names. For example: "The Environment.openDatabase() method + returns a Database class object." +

+

+ Variable or non-literal text is presented in italics. For example: "Go to your + JE_HOME directory." +

+

+ Program examples are displayed in a monospaced font on a shaded background. For example: +

+
import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnvironment;
+

+ In situations in this book, programming examples are updated from one chapter to the next in this book. When + this occurs, the new code is presented in monospaced bold font. For example: +

+
import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import java.io.File;
+
+...
+
+// Open the environment. Allow it to be created if it does not already 
+// exist.
+Environment myDbEnv;
+EnvironmentConfig envConfig = new EnvironmentConfig();
+envConfig.setAllowCreate(true);
+myDbEnv = new Environment(new File("/export/dbEnv"), envConfig); 
+
+
+ + + diff --git a/docs/collections/tutorial/removingredundantvalueclasses.html b/docs/collections/tutorial/removingredundantvalueclasses.html new file mode 100644 index 0000000..83922e2 --- /dev/null +++ b/docs/collections/tutorial/removingredundantvalueclasses.html @@ -0,0 +1,131 @@ + + + + + + Removing the Redundant Value Classes + + + + + + + + + +
+
+
+
+

+ Removing the Redundant Value Classes +

+
+
+
+

+ The PartData, SupplierData and ShipmentData + classes have been removed in this example, and the Part, + Supplier and Shipment entity classes are used in + their place. +

+

+ The serial formats are created with the entity classes. +

+ +
public class SampleDatabase
+{
+    ...
+    public SampleDatabase(String homeDirectory)
+        throws DatabaseException, FileNotFoundException
+    {
+        ...
+        secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog,
+                                                     Supplier.class));
+        ...
+        secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog,
+                                                     Shipment.class));
+        ...
+        secConfig.setKeyCreator(new 
+                                ShipmentBySupplierKeyCreator(javaCatalog,
+                                                     Shipment.class));
+        ...
+    }
+} 
+

+ The index key creator uses the entity class as well. +

+ +
public class SampleDatabase
+{
+    ...
+
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            Supplier supplier = (Supplier) valueInput;
+            String city = supplier.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplier.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+} 
+
+ + + diff --git a/docs/collections/tutorial/retrievingbyindexkey.html b/docs/collections/tutorial/retrievingbyindexkey.html new file mode 100644 index 0000000..fabd19b --- /dev/null +++ b/docs/collections/tutorial/retrievingbyindexkey.html @@ -0,0 +1,279 @@ + + + + + + Retrieving Items by Index Key + + + + + + + + + +
+
+
+
+

+ Retrieving Items by Index Key +

+
+
+
+

+ Retrieving information via database index keys can be + accomplished using the standard Java collections API, using a + collection created from a + SecondaryDatabase + + + rather than a + + Database. + + + However, the standard Java API does not support duplicate keys: more + than one element in a collection having the same key. All three + indices created in the prior section have duplicate keys because of + the nature of the city, part number and supplier number index keys. + More than one supplier may be in the same city, and more than one + shipment may have the same part number or supplier number. This + section describes how to use extended methods for stored + collections to return all values for a given key. +

+

+ Using the standard Java collections API, the + Map.get + + method for a stored collection with duplicate keys will return only + the first value for a given key. To obtain all values for a given + key, the + StoredSortedMap.duplicates + + method may be called. This returns a + Collection + + of values for the given key. If duplicate keys are not allowed, the + returned collection will have at most one value. If the key is not + present in the map, an empty collection is returned. +

+

+ The Sample class is extended to retrieve duplicates for + specific index keys that are present in the database. +

+ +
import java.util.Iterator;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printEntries("Parts",
+                          views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+                          views.getSupplierEntrySet().iterator());
+            printValues("Suppliers for City Paris",
+                         views.getSupplierByCityMap().duplicates(
+                                            "Paris").iterator());
+            printEntries("Shipments",
+                          views.getShipmentEntrySet().iterator());
+            printValues("Shipments for Part P1",
+                         views.getShipmentByPartMap().duplicates(
+                                            new PartKey("P1")).iterator());
+            printValues("Shipments for Supplier S1",
+                         views.getShipmentBySupplierMap().duplicates(
+                                            new
+                                            SupplierKey("S1")).iterator());
+        }
+    }
+
+    private void printValues(String label, Iterator iterator)
+    {
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext())
+        {
+                System.out.println(iterator.next().toString());
+        }
+     } 
+    ...
+} 
+

+ The + StoredSortedMap.duplicates + + method is called passing the desired key. The returned value is a + standard Java + Collection + + containing the values for the specified key. A standard Java + Iterator + + is then obtained for this collection and all values returned by + that iterator are printed. +

+

+ Another technique for retrieving duplicates is to use the + collection returned by + Map.entrySet. + When duplicate keys are present, a + Map.Entry + + object will be present in this collection for each duplicate. This + collection can then be iterated or a subset can be created from it, + all using the standard Java collection API. +

+

+ Note that we did not discuss how duplicates keys can be + explicitly added or removed in a collection. For index keys, the + addition and deletion of duplicate keys happens automatically when + records containing the index key are added, updated, or + removed. +

+

+ While not shown in the example program, it is also possible to + create a store with duplicate keys in the same way as an index with + duplicate keys — by calling + DatabaseConfig.setSortedDuplicates() method. In that case, + calling + Map.put + + will add duplicate keys. To remove all duplicate keys, call + Map.remove. + To remove a specific duplicate key, call + StoredSortedMap.duplicates + + and then call + Collection.remove + + using the returned collection. Duplicate + values may also be added to this collection using + Collection.add. +

+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+PartKey: number=P1
+PartData: name=Nut color=Red weight=[12.0 grams] city=London
+PartKey: number=P2
+PartData: name=Bolt color=Green weight=[17.0 grams] city=Paris
+PartKey: number=P3
+PartData: name=Screw color=Blue weight=[17.0 grams] city=Rome
+PartKey: number=P4
+PartData: name=Screw color=Red weight=[14.0 grams] city=London
+PartKey: number=P5
+PartData: name=Cam color=Blue weight=[12.0 grams] city=Paris
+PartKey: number=P6
+PartData: name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+SupplierKey: number=S1
+SupplierData: name=Smith status=20 city=London
+SupplierKey: number=S2
+SupplierData: name=Jones status=10 city=Paris
+SupplierKey: number=S3
+SupplierData: name=Blake status=30 city=Paris
+SupplierKey: number=S4
+SupplierData: name=Clark status=20 city=London
+SupplierKey: number=S5
+SupplierData: name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+SupplierData: name=Jones status=10 city=Paris
+SupplierData: name=Blake status=30 city=Paris
+
+--- Shipments ---
+ShipmentKey: supplier=S1 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S2 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S2 part=P2
+ShipmentData: quantity=400
+ShipmentKey: supplier=S3 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S1 part=P3
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P4
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P4
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P5
+ShipmentData: quantity=100
+ShipmentKey: supplier=S4 part=P5
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P6
+ShipmentData: quantity=100 
+
+--- Shipments for Part P1 ---
+ShipmentData: quantity=300
+ShipmentData: quantity=300
+
+--- Shipments for Supplier S1 ---
+ShipmentData: quantity=300
+ShipmentData: quantity=200
+ShipmentData: quantity=400
+ShipmentData: quantity=200
+ShipmentData: quantity=100
+ShipmentData: quantity=100 
+
+ + + diff --git a/docs/collections/tutorial/retrievingdatabaseitems.html b/docs/collections/tutorial/retrievingdatabaseitems.html new file mode 100644 index 0000000..4044d99 --- /dev/null +++ b/docs/collections/tutorial/retrievingdatabaseitems.html @@ -0,0 +1,216 @@ + + + + + + Retrieving Database Items + + + + + + + + + +
+
+
+
+

+ Retrieving Database Items +

+
+
+
+

+ Retrieving information from the database is accomplished via the + standard Java collections API. In the example, the + Set.iterator + + method is used to iterate all + Map.Entry + + objects for each store. All standard Java methods for retrieving + objects from a collection may be used with the JE JE Collections API. +

+

+ The PrintDatabase.doWork() method calls + printEntries() + to print the map entries for each database store. It is called via + the + TransactionRunner + + class and was outlined in the previous section. +

+ +
import java.util.Iterator;
+...
+public class Sample
+{
+    ...
+    private SampleViews views;
+    ...
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+            printEntries("Parts",
+                          views.getPartEntrySet().iterator());
+            printEntries("Suppliers",
+                          views.getSupplierEntrySet().iterator());
+            printEntries("Shipments",
+                          views.getShipmentEntrySet().iterator());
+        }
+    }
+    ...
+
+    private void printEntries(String label, Iterator iterator)
+    {
+    }
+    ...
+} 
+

+ The + Set + + of + Map.Entry + + objects for each store is obtained from the SampleViews + object. This set can also be obtained by calling the + Map.entrySet + + method of a stored map. +

+

+ The printEntries() prints the map entries for any stored + map. The + Object.toString + + method of each key and value is called to + obtain a printable representation of each object. +

+ +
    private void printEntries(String label, Iterator iterator)
+    {
+        System.out.println("\n--- " + label + " ---");
+        while (iterator.hasNext())
+        {
+            Map.Entry entry = (Map.Entry) iterator.next();
+            System.out.println(entry.getKey().toString());
+            System.out.println(entry.getValue().toString());
+        }
+    } 
+

+ This is one of a small number of behavioral differences between + standard Java collections and stored collections. For a complete + list see + + Using Stored Collections + . +

+

+ The output of the example program is shown below. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+PartKey: number=P1
+PartData: name=Nut color=Red weight=[12.0 grams] city=London
+PartKey: number=P2
+PartData: name=Bolt color=Green weight=[17.0 grams] city=Paris
+PartKey: number=P3
+PartData: name=Screw color=Blue weight=[17.0 grams] city=Rome
+PartKey: number=P4
+PartData: name=Screw color=Red weight=[14.0 grams] city=London
+PartKey: number=P5
+PartData: name=Cam color=Blue weight=[12.0 grams] city=Paris
+PartKey: number=P6
+PartData: name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+SupplierKey: number=S1
+SupplierData: name=Smith status=20 city=London
+SupplierKey: number=S2
+SupplierData: name=Jones status=10 city=Paris
+SupplierKey: number=S3
+SupplierData: name=Blake status=30 city=Paris
+SupplierKey: number=S4
+SupplierData: name=Clark status=20 city=London
+SupplierKey: number=S5
+SupplierData: name=Adams status=30 city=Athens
+
+--- Shipments ---
+ShipmentKey: supplier=S1 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S2 part=P1
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S2 part=P2
+ShipmentData: quantity=400
+ShipmentKey: supplier=S3 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P2
+ShipmentData: quantity=200
+ShipmentKey: supplier=S1 part=P3
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P4
+ShipmentData: quantity=200
+ShipmentKey: supplier=S4 part=P4
+ShipmentData: quantity=300
+ShipmentKey: supplier=S1 part=P5
+ShipmentData: quantity=100
+ShipmentKey: supplier=S4 part=P5
+ShipmentData: quantity=400
+ShipmentKey: supplier=S1 part=P6
+ShipmentData: quantity=100 
+
+ + + diff --git a/docs/collections/tutorial/sortedcollections.html b/docs/collections/tutorial/sortedcollections.html new file mode 100644 index 0000000..90c17ff --- /dev/null +++ b/docs/collections/tutorial/sortedcollections.html @@ -0,0 +1,137 @@ + + + + + + Using Sorted Collections + + + + + + + + + +
+
+
+
+

+ Using Sorted Collections +

+
+
+
+

+ In general, no changes to the prior example are necessary to use + collections having tuple keys. Iteration of elements in a stored + collection will be ordered by the sort order of the tuples. +

+

+ Although not shown in the example, all methods of the + SortedMap + + and + SortedSet + + interfaces may be used with sorted collections. For example, + submaps and subsets may be created. +

+

+ The output of the example program shows that records are sorted + by key value. +

+
Adding Suppliers
+Adding Parts
+Adding Shipments
+
+--- Parts ---
+Part: number=P1 name=Nut color=Red weight=[12.0 grams] city=London
+Part: number=P2 name=Bolt color=Green weight=[17.0 grams] city=Paris
+Part: number=P3 name=Screw color=Blue weight=[17.0 grams] city=Rome
+Part: number=P4 name=Screw color=Red weight=[14.0 grams] city=London
+Part: number=P5 name=Cam color=Blue weight=[12.0 grams] city=Paris
+Part: number=P6 name=Cog color=Red weight=[19.0 grams] city=London
+
+--- Suppliers ---
+Supplier: number=S1 name=Smith status=20 city=London
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+Supplier: number=S4 name=Clark status=20 city=London
+Supplier: number=S5 name=Adams status=30 city=Athens
+
+--- Suppliers for City Paris ---
+Supplier: number=S2 name=Jones status=10 city=Paris
+Supplier: number=S3 name=Blake status=30 city=Paris
+
+--- Shipments ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P2 supplier=S2 quantity=400
+Shipment: part=P2 supplier=S3 quantity=200
+Shipment: part=P2 supplier=S4 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P4 supplier=S4 quantity=300
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P5 supplier=S4 quantity=400
+Shipment: part=P6 supplier=S1 quantity=100
+
+--- Shipments for Part P1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P1 supplier=S2 quantity=300
+
+--- Shipments for Supplier S1 ---
+Shipment: part=P1 supplier=S1 quantity=300
+Shipment: part=P2 supplier=S1 quantity=200
+Shipment: part=P3 supplier=S1 quantity=400
+Shipment: part=P4 supplier=S1 quantity=200
+Shipment: part=P5 supplier=S1 quantity=100
+Shipment: part=P6 supplier=S1 quantity=100 
+
+ + + diff --git a/docs/collections/tutorial/transientfieldsinbinding.html b/docs/collections/tutorial/transientfieldsinbinding.html new file mode 100644 index 0000000..0f93c94 --- /dev/null +++ b/docs/collections/tutorial/transientfieldsinbinding.html @@ -0,0 +1,177 @@ + + + + + + Using Transient Fields in an Entity Binding + + + + + + + + + +
+
+
+
+

+ Using Transient Fields in an Entity Binding +

+
+
+
+

+ The entity bindings from the prior example have been changed in + this example to use the entity object both as a value object and an + entity object. +

+

+ Before, the entryToObject() method combined the + deserialized value object with the key fields to create a new + entity object. Now, this method uses the deserialized object + directly as an entity, and initializes its key using the fields + read from the key tuple. +

+

+ Before, the objectToData() method constructed a new value + object using information in the entity. Now it simply returns the + entity. Nothing needs to be changed in the entity, since the + transient key fields won't be serialized. +

+ +
import com.sleepycat.bind.serial.ClassCatalog;
+...
+public class SampleViews
+{
+    ...
+    private static class PartBinding extends TupleSerialBinding
+    {
+        private PartBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            Part part = (Part) dataInput;
+            part.setKey(number);
+            return part;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+
+    private static class SupplierBinding extends TupleSerialBinding
+    {
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            Supplier supplier = (Supplier) dataInput;
+            supplier.setKey(number);
+            return supplier;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+
+    private static class ShipmentBinding extends TupleSerialBinding
+    {
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            Shipment shipment = (Shipment) dataInput;
+            shipment.setKey(partNumber, supplierNumber);
+            return shipment;
+        }
+
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+
+        public Object objectToData(Object object)
+        {
+            return object;
+        }
+    }
+} 
+
+ + + diff --git a/docs/collections/tutorial/tuple-serialentitybindings.html b/docs/collections/tutorial/tuple-serialentitybindings.html new file mode 100644 index 0000000..ad19835 --- /dev/null +++ b/docs/collections/tutorial/tuple-serialentitybindings.html @@ -0,0 +1,198 @@ + + + + + + Creating Tuple-Serial Entity Bindings + + + + + + + + + +
+
+
+
+

+Creating Tuple-Serial Entity Bindings +

+
+
+
+

+In the prior example serial keys and serial values were used, +and the +SerialSerialBinding + +base class was used for entity bindings. In this example, tuple +keys and serial values are used and therefore the +TupleSerialBinding + +base class is used for entity bindings. +

+

+As with any entity binding, a key and value is converted to an +entity in the +TupleSerialBinding.entryToObject + +method, and from an entity to +a key and value in the +TupleSerialBinding.objectToKey + +and +TupleSerialBinding.objectToData + +methods. But since keys are +stored as tuples, not as serialized objects, key fields are read +and written using the +TupleInput + +and +TupleOutput + +parameters. +

+

+The SampleViews class contains the modified entity +binding classes that were defined in the prior example: +PartBinding, SupplierBinding and +ShipmentBinding. +

+ +
import com.sleepycat.bind.serial.TupleSerialBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleViews
+{
+    ...
+    private static class PartBinding extends TupleSerialBinding 
+    {
+        private PartBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            PartData data = (PartData) dataInput;
+            return new Part(number, data.getName(), data.getColor(),
+                            data.getWeight(), data.getCity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Part part = (Part) object;
+            output.writeString(part.getNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Part part = (Part) object;
+            return new PartData(part.getName(), part.getColor(),
+                                 part.getWeight(), part.getCity());
+        }
+    }
+    ...
+    private static class SupplierBinding extends TupleSerialBinding
+    {
+        private SupplierBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String number = keyInput.readString();
+            SupplierData data = (SupplierData) dataInput;
+            return new Supplier(number, data.getName(),
+                                data.getStatus(), data.getCity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Supplier supplier = (Supplier) object;
+            output.writeString(supplier.getNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Supplier supplier = (Supplier) object;
+            return new SupplierData(supplier.getName(), 
+                                    supplier.getStatus(),
+                                    supplier.getCity());
+        }
+    }
+    ...
+    private static class ShipmentBinding extends TupleSerialBinding
+    {
+        private ShipmentBinding(ClassCatalog classCatalog, Class dataClass)
+        {
+            super(classCatalog, dataClass);
+        }
+        public Object entryToObject(TupleInput keyInput, Object dataInput)
+        {
+            String partNumber = keyInput.readString();
+            String supplierNumber = keyInput.readString();
+            ShipmentData data = (ShipmentData) dataInput;
+            return new Shipment(partNumber, supplierNumber,
+                                data.getQuantity());
+        }
+        public void objectToKey(Object object, TupleOutput output)
+        {
+            Shipment shipment = (Shipment) object;
+            output.writeString(shipment.getPartNumber());
+            output.writeString(shipment.getSupplierNumber());
+        }
+        public Object objectToData(Object object)
+        {
+            Shipment shipment = (Shipment) object;
+            return new ShipmentData(shipment.getQuantity());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/docs/collections/tutorial/tuplekeybindings.html b/docs/collections/tutorial/tuplekeybindings.html new file mode 100644 index 0000000..363e81a --- /dev/null +++ b/docs/collections/tutorial/tuplekeybindings.html @@ -0,0 +1,219 @@ + + + + + + Creating Tuple Key Bindings + + + + + + + + + +
+
+
+
+

+ Creating Tuple Key Bindings +

+
+
+
+

+ Serial bindings were used in prior examples as key bindings, and + keys were stored as serialized objects. In this example, a tuple + binding is used for each key since keys will be stored as tuples. + Because keys are no longer stored as serialized objects, the + PartKey, SupplierKey and ShipmentKey classes + no longer implement the + Serializable + + interface (this is the only change to these classes and is not + shown below). +

+

+ For the Part key, Supplier key, + and Shipment key, the + SampleViews class was changed in this example to create a + custom + TupleBinding + + instead of a + SerialBinding. + The custom tuple key binding classes are defined further below. +

+ +
import com.sleepycat.bind.tuple.TupleBinding;
+...
+public class SampleViews
+{
+    ...
+    public SampleViews(SampleDatabase db)
+    {
+        ...
+        ClassCatalog catalog = db.getClassCatalog();
+        EntryBinding partKeyBinding =
+            new PartKeyBinding();
+        EntityBinding partDataBinding =
+            new PartBinding(catalog, PartData.class);
+        EntryBinding supplierKeyBinding =
+            new SupplierKeyBinding();
+        EntityBinding supplierDataBinding =
+            new SupplierBinding(catalog, SupplierData.class);
+        EntryBinding shipmentKeyBinding =
+            new ShipmentKeyBinding();
+        EntityBinding shipmentDataBinding =
+            new ShipmentBinding(catalog, ShipmentData.class);
+        EntryBinding cityKeyBinding =
+            TupleBinding.getPrimitiveBinding(String.class);
+        ...
+    }
+} 
+

+ For the City key, however, a custom binding class is not needed + because the key class is a primitive Java type, + String. + For any primitive Java type, a tuple binding may be created using the + TupleBinding.getPrimitiveBinding + + static method. +

+

+ The custom key binding classes, PartKeyBinding, + SupplierKeyBinding and ShipmentKeyBinding, are + defined by extending the + TupleBinding + + class. The + TupleBinding + + abstract class implements the + EntryBinding + + interface, and is used for one-to-one bindings between tuples and + objects. Each binding class implements two methods for converting + between tuples and objects. Tuple fields are read using the + TupleInput + + parameter and written using the + TupleOutput + + parameter. +

+ +
import com.sleepycat.bind.tuple.TupleBinding;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleViews
+{
+...
+
+    private static class PartKeyBinding extends TupleBinding
+    {
+        private PartKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input)
+        {
+            String number = input.readString();
+            return new PartKey(number);
+        }
+
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            PartKey key = (PartKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+    ...
+    private static class SupplierKeyBinding extends TupleBinding
+    {
+        private SupplierKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input)
+        {
+            String number = input.readString();
+            return new SupplierKey(number);
+        }
+
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            SupplierKey key = (SupplierKey) object;
+            output.writeString(key.getNumber());
+        }
+    }
+    ...
+    private static class ShipmentKeyBinding extends TupleBinding
+    {
+        private ShipmentKeyBinding()
+        {
+        }
+
+        public Object entryToObject(TupleInput input) 
+        {
+             String partNumber = input.readString();
+             String supplierNumber = input.readString();
+             return new ShipmentKey(partNumber, supplierNumber);
+        }
+        public void objectToEntry(Object object, TupleOutput output)
+        {
+            ShipmentKey key = (ShipmentKey) object;
+            output.writeString(key.getPartNumber());
+            output.writeString(key.getSupplierNumber());
+        }
+    }
+    ...
+} 
+
+ + + diff --git a/docs/collections/tutorial/tupleswithkeycreators.html b/docs/collections/tutorial/tupleswithkeycreators.html new file mode 100644 index 0000000..f24c8e6 --- /dev/null +++ b/docs/collections/tutorial/tupleswithkeycreators.html @@ -0,0 +1,206 @@ + + + + + + Using Tuples with Key Creators + + + + + + + + + +
+
+
+
+

+ Using Tuples with Key Creators +

+
+
+
+

+ Key creators were used in prior examples to extract index keys + from value objects. The keys were returned as deserialized key + objects, since the serial format was used for keys. In this + example, the tuple format is used for keys and the key creators + return keys by writing information to a tuple. The differences + between this example and the prior example are: +

+
+ +
+

+ In addition to writing key tuples, the + ShipmentByPartKeyCreator and + ShipmentBySupplierKeyCreator classes also read the key tuple + of the primary key. This is because they extract the index key from + fields in the Shipment's primary key. Instead of calling getter + methods on the ShipmentKey object, as in prior examples, + these methods call + TupleInput.readString. + The ShipmentKey consists of two string fields that are read + in sequence. +

+

+ The modified key creators are shown below: + SupplierByCityKeyCreator, + ShipmentByPartKeyCreator + and ShipmentBySupplierKeyCreator. +

+ +
import com.sleepycat.bind.serial.TupleSerialKeyCreator;
+import com.sleepycat.bind.tuple.TupleInput;
+import com.sleepycat.bind.tuple.TupleOutput;
+...
+public class SampleDatabase
+{
+    ...
+    private static class SupplierByCityKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private SupplierByCityKeyCreator(ClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            SupplierData supplierData = (SupplierData) valueInput;
+            String city = supplierData.getCity();
+            if (city != null) {
+                indexKeyOutput.writeString(supplierData.getCity());
+                return true;
+            } else {
+                return false;
+            }
+        }
+    }
+
+    private static class ShipmentByPartKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private ShipmentByPartKeyCreator(ClassCatalog catalog,
+                                         Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            String partNumber = primaryKeyInput.readString();
+            // don't bother reading the supplierNumber
+            indexKeyOutput.writeString(partNumber);
+            return true;
+        }
+    }
+
+    private static class ShipmentBySupplierKeyCreator
+        extends TupleSerialKeyCreator
+    {
+        private ShipmentBySupplierKeyCreator(ClassCatalog catalog,
+                                             Class valueClass)
+        {
+            super(catalog, valueClass);
+        }
+
+        public boolean createSecondaryKey(TupleInput primaryKeyInput,
+                                          Object valueInput,
+                                          TupleOutput indexKeyOutput)
+        {
+            primaryKeyInput.readString(); // skip the partNumber
+            String supplierNumber = primaryKeyInput.readString();
+            indexKeyOutput.writeString(supplierNumber);
+            return true;
+        }
+    }
+    ...
+}
+	
+
+ + + diff --git a/docs/collections/tutorial/tutorialintroduction.html b/docs/collections/tutorial/tutorialintroduction.html new file mode 100644 index 0000000..4c68371 --- /dev/null +++ b/docs/collections/tutorial/tutorialintroduction.html @@ -0,0 +1,411 @@ + + + + + + Tutorial Introduction + + + + + + + + + +
+
+
+
+

Tutorial Introduction

+
+
+
+

+ Most of the remainder of this document illustrates the use of the + JE JE Collections API by presenting a tutorial that describes usage of the API. + This tutorial builds a shipment database, a familiar example from classic + database texts. +

+

+ The examples illustrate the following concepts of the JE JE Collections API: +

+
+
    +
  • +

    + Object-to-data bindings +

    +
  • +
  • +

    + The database environment +

    +
  • +
  • +

    + Databases that contain key/value records +

    +
  • +
  • +

    + Secondary index databases that contain index keys +

    +
  • +
  • +

    + Java collections for accessing databases and + indices +

    +
  • +
  • +

    + Transactions used to commit or undo database + changes +

    +
  • +
+
+

+ The examples build on each other, but at the same time the + source code for each example stands alone. +

+ +

+ The shipment database consists of three database stores: the + part store, the supplier store, and the shipment store. Each store + contains a number of records, and each record consists of a key and + a value. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
StoreKeyValue
PartPart NumberName, Color, Weight, City
SupplierSupplier NumberName, Status, City
ShipmentPart Number, Supplier NumberQuantity
+
+

+ In the example programs, Java classes containing the fields + above are defined for the key and value of each store: + PartKey, + PartData, + SupplierKey, + SupplierData, + ShipmentKey and ShipmentData. In + addition, because the Part's Weight field is itself composed of two + fields — the weight value and the unit of measure — it is + represented by a separate Weight class. These classes will + be defined in the first example program. +

+

+ In general the JE JE Collections API uses bindings to + describe how Java objects are stored. A binding defines the stored + data syntax and the mapping between a Java object and the stored + data. The example programs show how to create different types of + bindings, and explains the characteristics of each type. +

+

+ The following tables show the record values that are used in + all the example programs in the tutorial. + +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NumberNameColorWeightCity
P1NutRed12.0 gramsLondon
P2BoltGreen17.0 gramsParis
P3ScrewBlue17.0 gramsRome
P4ScrewRed14.0 gramsLondon
P5CamBlue12.0 gramsParis
P6CogRed19.0 gramsLondon
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NumberNameStatusCity
S1Smith20London
S2Jones10Paris
S3Blake30Paris
S4Clark20London
S5Adams30Athens
+
+

+ +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Part NumberSupplier NumberQuantity
P1S1300
P1S2300
P2S1200
P2S2400
P2S3200
P2S4200
P3S1400
P4S1200
P4S4300
P5S1100
P5S4400
P6S1100
+
+
+ + + diff --git a/docs/collections/tutorial/usingtransactions.html b/docs/collections/tutorial/usingtransactions.html new file mode 100644 index 0000000..f443615 --- /dev/null +++ b/docs/collections/tutorial/usingtransactions.html @@ -0,0 +1,222 @@ + + + + + + Using Transactions + + + + + + + + + +
+
+
+
+

+ Using Transactions +

+
+
+
+

+ JE transactional applications have standard + transactional characteristics: recoverability, atomicity and + integrity (this is sometimes also referred to generically as ACID + properties). The JE JE Collections API provides these + transactional capabilities using a transaction-per-thread + model. Once a transaction is begun, it is implicitly associated + with the current thread until it is committed or aborted. This + model is used for the following reasons. +

+
+
    +
  • +

    + The transaction-per-thread model is commonly used in other Java + APIs such as J2EE. +

    +
  • +
  • +

    + Since the Java collections API is used for data access, there + is no way to pass a transaction object to methods such + as + Map.put. +

    +
  • +
+
+

+ The JE JE Collections API provides two transaction APIs. The + lower-level API is the + CurrentTransaction + + class. It provides a way to get the transaction for the current + thread, and to begin, commit and abort transactions. It also + provides access to the Berkeley DB core API + Transaction + + + object. With + CurrentTransaction, + just as in the + com.sleepycat.je + + API, the application is responsible + for beginning, committing and aborting transactions, and for + handling deadlock exceptions and retrying operations. This API may + be needed for some applications, but it is not used in the + example. +

+

+ The example uses the higher-level + TransactionRunner + + and + TransactionWorker + + APIs, which are build on top of + CurrentTransaction. + TransactionRunner.run() automatically begins a transaction and + then calls the TransactionWorker.doWork() method, which is + implemented by the application. +

+

+ The TransactionRunner.run() method automatically detects + deadlock exceptions and performs retries by repeatedly calling the + TransactionWorker.doWork() method until the operation succeeds + or the maximum retry count is reached. If the maximum retry count + is reached or if another exception (other than + + LockConflictException) + + + is thrown by TransactionWorker.doWork(), then the transaction + will be automatically aborted. Otherwise, the transaction will be + automatically committed. +

+

+ Using this high-level API, if TransactionRunner.run() + throws an exception, the application can assume that the operation + failed and the transaction was aborted; otherwise, when an + exception is not thrown, the application can assume the operation + succeeded and the transaction was committed. +

+

+ The Sample.run() method creates a TransactionRunner + object and calls its run() method. +

+ +
import com.sleepycat.collections.TransactionRunner;
+import com.sleepycat.collections.TransactionWorker;
+...
+public class Sample
+{
+    private SampleDatabase db;
+    ...
+    private void run()
+        throws Exception
+    {
+        TransactionRunner runner = 
+            new TransactionRunner(db.getEnvironment());
+        runner.run(new PopulateDatabase());
+        runner.run(new PrintDatabase());
+    }
+    ...
+    private class PopulateDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+        }
+    }
+
+    private class PrintDatabase implements TransactionWorker
+    {
+        public void doWork()
+            throws Exception
+        {
+        }
+    }
+} 
+

+ The run() method is called by main() and was outlined + in the previous section. It first creates a + TransactionRunner, passing the database environment to its + constructor. +

+

+ It then calls TransactionRunner.run() to execute two + transactions, passing instances of the application-defined + PopulateDatabase and + PrintDatabase nested classes. + These classes implement the TransactionWorker.doWork() method + and will be fully described in the next two sections. +

+

+ For each call to TransactionRunner.run(), a separate + transaction will be performed. The use of two transactions in the + example — one for populating the database and another for printing + its contents — is arbitrary. A real-life application should be + designed to create transactions for each group of operations that + should have ACID properties, while also + taking into account the impact of transactions on performance. +

+

+ The advantage of using TransactionRunner is that deadlock + retries and transaction begin, commit and abort are handled + automatically. However, a TransactionWorker class must be + implemented for each type of transaction. If desired, anonymous + inner classes can be used to implement the TransactionWorker + interface. +

+
+ + + diff --git a/docs/doclet/Debug.java b/docs/doclet/Debug.java new file mode 100644 index 0000000..97620be --- /dev/null +++ b/docs/doclet/Debug.java @@ -0,0 +1,58 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.io.PrintStream; + +final class Debug { + /** + * Set this value to true to enable debugging. + */ + public static final boolean DEBUG = true; + + /** + * Set this value to control where debug messages appear. + */ + public static PrintStream pstrmError = System.out; + + public static void println(Object objMessage) { + if (DEBUG) { + pstrmError.println(objMessage); + } + } + + private static void _printAssert() { + println("Assertion failed at :"); + + (new Throwable()).printStackTrace(); + } + + public static void _assert(boolean fExp) { + if (DEBUG) { + if (!fExp) { + _printAssert(); + } + } + } + + public static void _assert(boolean fExp, String szMessage) { + if (DEBUG) { + if (!fExp) { + println(szMessage); + _printAssert(); + } + } + } + + public static void printStackTrace(Throwable t) { + t.printStackTrace(pstrmError); + } + + public static void printException(Exception e) { + pstrmError.println(e); + printStackTrace(e); + } +} diff --git a/docs/doclet/HidingAnnotatedTypeWrapper.java b/docs/doclet/HidingAnnotatedTypeWrapper.java new file mode 100644 index 0000000..27bdee7 --- /dev/null +++ b/docs/doclet/HidingAnnotatedTypeWrapper.java @@ -0,0 +1,35 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotatedType; +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.Type; + +class HidingAnnotatedTypeWrapper extends HidingTypeWrapper + implements AnnotatedType { + + public HidingAnnotatedTypeWrapper(AnnotatedType type, Map mapWrappers) { + super(type, mapWrappers); + } + + private AnnotatedType _getAnnotatedType() { + return (AnnotatedType) getWrappedObject(); + } + + @Override + public AnnotationDesc[] annotations() { + return (AnnotationDesc[]) wrapOrHide( + _getAnnotatedType().annotations()); + } + + @Override + public Type underlyingType() { + return (Type) wrapOrHide(_getAnnotatedType().underlyingType()); + } +} diff --git a/docs/doclet/HidingAnnotationDescWrapper.java b/docs/doclet/HidingAnnotationDescWrapper.java new file mode 100644 index 0000000..4c65305 --- /dev/null +++ b/docs/doclet/HidingAnnotationDescWrapper.java @@ -0,0 +1,40 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.AnnotationTypeDoc; + +class HidingAnnotationDescWrapper extends HidingWrapper + implements AnnotationDesc { + + public HidingAnnotationDescWrapper(AnnotationDesc type, + Map mapWrappers) { + super(type, mapWrappers); + } + + private AnnotationDesc _getAnnotationDesc() { + return (AnnotationDesc)getWrappedObject(); + } + + @Override + public AnnotationTypeDoc annotationType() { + return (AnnotationTypeDoc) + wrapOrHide(_getAnnotationDesc().annotationType()); + } + + @Override + public AnnotationDesc.ElementValuePair[] elementValues() { + return _getAnnotationDesc().elementValues(); + } + + @Override + public boolean isSynthesized() { + return _getAnnotationDesc().isSynthesized(); + } +} diff --git a/docs/doclet/HidingAnnotationTypeDocWrapper.java b/docs/doclet/HidingAnnotationTypeDocWrapper.java new file mode 100644 index 0000000..a0ac9ee --- /dev/null +++ b/docs/doclet/HidingAnnotationTypeDocWrapper.java @@ -0,0 +1,29 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationTypeDoc; +import com.sun.javadoc.AnnotationTypeElementDoc; + +class HidingAnnotationTypeDocWrapper extends HidingClassDocWrapper + implements AnnotationTypeDoc { + public HidingAnnotationTypeDocWrapper(AnnotationTypeDoc type, + Map mapWrappers) { + super(type, mapWrappers); + } + + private AnnotationTypeDoc _getAnnotationTypeDoc() { + return (AnnotationTypeDoc)getWrappedObject(); + } + + @Override + public AnnotationTypeElementDoc[] elements() { + return (AnnotationTypeElementDoc[]) + wrapOrHide(_getAnnotationTypeDoc().elements()); + } +} diff --git a/docs/doclet/HidingAnnotationTypeElementDocWrapper.java b/docs/doclet/HidingAnnotationTypeElementDocWrapper.java new file mode 100644 index 0000000..647719a --- /dev/null +++ b/docs/doclet/HidingAnnotationTypeElementDocWrapper.java @@ -0,0 +1,30 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationTypeElementDoc; +import com.sun.javadoc.AnnotationValue; + +class HidingAnnotationTypeElementDocWrapper extends HidingMethodDocWrapper + implements AnnotationTypeElementDoc { + + public HidingAnnotationTypeElementDocWrapper( + AnnotationTypeElementDoc memdoc, Map mapWrappers) { + super(memdoc, mapWrappers); + } + + private AnnotationTypeElementDoc _getAnnotationTypeElementDoc() { + return (AnnotationTypeElementDoc)getWrappedObject(); + } + + @Override + public AnnotationValue defaultValue() { + return (AnnotationValue) + wrapOrHide(_getAnnotationTypeElementDoc().defaultValue()); + } +} diff --git a/docs/doclet/HidingAnnotationValueWrapper.java b/docs/doclet/HidingAnnotationValueWrapper.java new file mode 100644 index 0000000..12df9f8 --- /dev/null +++ b/docs/doclet/HidingAnnotationValueWrapper.java @@ -0,0 +1,32 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationValue; + +class HidingAnnotationValueWrapper extends HidingWrapper + implements AnnotationValue { + public HidingAnnotationValueWrapper(AnnotationValue value, + Map mapWrappers) { + super(value, mapWrappers); + } + + private AnnotationValue _getAnnotationValue() { + return (AnnotationValue)getWrappedObject(); + } + + @Override + public Object value() { + return _getAnnotationValue().value(); + } + + @Override + public String toString() { + return _getAnnotationValue().toString(); + } +} diff --git a/docs/doclet/HidingClassDocWrapper.java b/docs/doclet/HidingClassDocWrapper.java new file mode 100644 index 0000000..3956206 --- /dev/null +++ b/docs/doclet/HidingClassDocWrapper.java @@ -0,0 +1,241 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotatedType; +import com.sun.javadoc.AnnotationTypeDoc; +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ConstructorDoc; +import com.sun.javadoc.FieldDoc; +import com.sun.javadoc.MethodDoc; +import com.sun.javadoc.PackageDoc; +import com.sun.javadoc.ParamTag; +import com.sun.javadoc.ParameterizedType; +import com.sun.javadoc.Type; +import com.sun.javadoc.TypeVariable; +import com.sun.javadoc.WildcardType; + +class HidingClassDocWrapper extends HidingProgramElementDocWrapper + implements ClassDoc { + + public HidingClassDocWrapper(ClassDoc classdoc, Map mapWrappers) { + super(classdoc, mapWrappers); + } + + private ClassDoc _getClassDoc() { + return (ClassDoc)getWrappedObject(); + } + + /* ClassDoc */ + + @Override + public boolean isAbstract() { + return _getClassDoc().isAbstract(); + } + + @Override + public boolean isSerializable() { + return _getClassDoc().isSerializable(); + } + + @Override + public boolean isExternalizable() { + return _getClassDoc().isExternalizable(); + } + + @Override + public MethodDoc[] serializationMethods() { + return (MethodDoc[])wrapOrHide(_getClassDoc().serializationMethods()); + } + + @Override + public FieldDoc[] serializableFields() { + return (FieldDoc[])wrapOrHide(_getClassDoc().serializableFields()); + } + + @Override + public boolean definesSerializableFields() { + return _getClassDoc().definesSerializableFields(); + } + + @Override + public ClassDoc superclass() { + return (ClassDoc)wrapOrHide(_getClassDoc().superclass()); + } + + @Override + public Type superclassType() { + return (Type) wrapOrHide(_getClassDoc().superclassType()); + } + + @Override + public boolean subclassOf(ClassDoc classdoc) { + if (classdoc instanceof HidingClassDocWrapper) { + classdoc = (ClassDoc) + ((HidingClassDocWrapper)classdoc).getWrappedObject(); + } + + return _getClassDoc().subclassOf(classdoc); + } + + @Override + public ClassDoc[] interfaces() { + return (ClassDoc[])wrapOrHide(_getClassDoc().interfaces()); + } + + @Override + public Type[] interfaceTypes() { + return (Type[]) wrapOrHide(_getClassDoc().interfaceTypes()); + } + + @Override + public TypeVariable[] typeParameters() { + return (TypeVariable[]) wrapOrHide(_getClassDoc().typeParameters()); + } + + @Override + public ParamTag[] typeParamTags() { + return (ParamTag[]) wrapOrHide(_getClassDoc().typeParamTags()); + } + + @Override + public FieldDoc[] fields() { + return (FieldDoc[])wrapOrHide(_getClassDoc().fields()); + } + + @Override + public FieldDoc[] fields(boolean filter) { + return (FieldDoc[])wrapOrHide(_getClassDoc().fields(filter)); + } + + @Override + public FieldDoc[] enumConstants() { + return (FieldDoc[])wrapOrHide(_getClassDoc().enumConstants()); + } + + @Override + public MethodDoc[] methods() { + return (MethodDoc[])wrapOrHide(_getClassDoc().methods()); + } + + @Override + public MethodDoc[] methods(boolean filter) { + return (MethodDoc[])wrapOrHide(_getClassDoc().methods(filter)); + } + + @Override + public ConstructorDoc[] constructors() { + return (ConstructorDoc[])wrapOrHide(_getClassDoc().constructors()); + } + + @Override + public ConstructorDoc[] constructors(boolean filter) { + return (ConstructorDoc[]) + wrapOrHide(_getClassDoc().constructors(filter)); + } + + @Override + public ClassDoc[] innerClasses() { + return (ClassDoc[])wrapOrHide(_getClassDoc().innerClasses()); + } + + @Override + public ClassDoc[] innerClasses(boolean filter) { + return (ClassDoc[])wrapOrHide(_getClassDoc().innerClasses(filter)); + } + + @Override + public ClassDoc findClass(String szClassName) { + return (ClassDoc)wrapOrHide(_getClassDoc().findClass(szClassName)); + } + + /** + * @deprecated as of 11.0 + */ + @Override + public ClassDoc[] importedClasses() { + return (ClassDoc[])wrapOrHide(_getClassDoc().importedClasses()); + } + + /** + * @deprecated as of 11.0 + */ + @Override + public PackageDoc[] importedPackages() { + return (PackageDoc[])wrapOrHide(_getClassDoc().importedPackages()); + } + + /* Type */ + + @Override + public String typeName() { + return _getClassDoc().typeName(); + } + + @Override + public String qualifiedTypeName() { + return _getClassDoc().qualifiedTypeName(); + } + + @Override + public String simpleTypeName() { + return _getClassDoc().simpleTypeName(); + } + + @Override + public String dimension() { + return _getClassDoc().dimension(); + } + + @Override + public String toString() { + return _getClassDoc().toString(); + } + + @Override + public boolean isPrimitive() { + return _getClassDoc().isPrimitive(); + } + + @Override + public ClassDoc asClassDoc() { + return this; + } + + @Override + public ParameterizedType asParameterizedType() { + return (ParameterizedType)wrapOrHide( + _getClassDoc().asParameterizedType()); + } + + @Override + public TypeVariable asTypeVariable() { + return (TypeVariable)wrapOrHide(_getClassDoc().asTypeVariable()); + } + + @Override + public WildcardType asWildcardType() { + return (WildcardType)wrapOrHide(_getClassDoc().asWildcardType()); + } + + @Override + public AnnotatedType asAnnotatedType() { + return (AnnotatedType)wrapOrHide(_getClassDoc().asAnnotatedType()); + } + + @Override + public AnnotationTypeDoc asAnnotationTypeDoc() { + return (AnnotationTypeDoc)wrapOrHide( + _getClassDoc().asAnnotationTypeDoc()); + } + + @Override + public Type getElementType() { + return (Type)wrapOrHide(_getClassDoc().getElementType()); + } +} diff --git a/docs/doclet/HidingConstructorDocWrapper.java b/docs/doclet/HidingConstructorDocWrapper.java new file mode 100644 index 0000000..8eb3bd0 --- /dev/null +++ b/docs/doclet/HidingConstructorDocWrapper.java @@ -0,0 +1,22 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ConstructorDoc; + +class HidingConstructorDocWrapper extends HidingExecutableMemberDocWrapper + implements ConstructorDoc { + public HidingConstructorDocWrapper(ConstructorDoc constrdoc, + Map mapWrappers) { + super(constrdoc, mapWrappers); + } + + private ConstructorDoc _getConstructorDoc() { + return (ConstructorDoc)getWrappedObject(); + } +} diff --git a/docs/doclet/HidingDocWrapper.java b/docs/doclet/HidingDocWrapper.java new file mode 100644 index 0000000..8bd70e7 --- /dev/null +++ b/docs/doclet/HidingDocWrapper.java @@ -0,0 +1,149 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.Doc; +import com.sun.javadoc.SeeTag; +import com.sun.javadoc.SourcePosition; +import com.sun.javadoc.Tag; + +class HidingDocWrapper extends HidingWrapper implements Doc { + + public HidingDocWrapper(Doc doc, Map mapWrappers) { + super(doc, mapWrappers); + } + + private Doc _getDoc() { + return (Doc)getWrappedObject(); + } + + @Override + public String commentText() { + return _getDoc().commentText(); + } + + @Override + public Tag[] tags() { + return (Tag[])wrapOrHide(_getDoc().tags()); + } + + @Override + public Tag[] tags(String szTagName) { + return (Tag[])wrapOrHide(_getDoc().tags(szTagName)); + } + + @Override + public SeeTag[] seeTags() { + return (SeeTag[])wrapOrHide(_getDoc().seeTags()); + } + + @Override + public Tag[] inlineTags() { + return (Tag[])wrapOrHide(_getDoc().inlineTags()); + } + + @Override + public Tag[] firstSentenceTags() { + return (Tag[])wrapOrHide(_getDoc().firstSentenceTags()); + } + + @Override + public String getRawCommentText() { + return _getDoc().getRawCommentText(); + } + + @Override + public void setRawCommentText(String szText) { + _getDoc().setRawCommentText(szText); + } + + @Override + public String name() { + return _getDoc().name(); + } + + @Override + public int compareTo(Object obj) { + if (obj instanceof HidingWrapper) { + return _getDoc(). + compareTo(((HidingWrapper)obj).getWrappedObject()); + } else { + return _getDoc().compareTo(obj); + } + } + + @Override + public boolean isField() { + return _getDoc().isField(); + } + + @Override + public boolean isEnumConstant() { + return _getDoc().isEnumConstant(); + } + + @Override + public boolean isConstructor() { + return _getDoc().isConstructor(); + } + + @Override + public boolean isMethod() { + return _getDoc().isMethod(); + } + + @Override + public boolean isAnnotationTypeElement() { + return _getDoc().isAnnotationTypeElement(); + } + + @Override + public boolean isInterface() { + return _getDoc().isInterface(); + } + + @Override + public boolean isException() { + return _getDoc().isException(); + } + + @Override + public boolean isError() { + return _getDoc().isError(); + } + + @Override + public boolean isEnum() { + return _getDoc().isEnum(); + } + + @Override + public boolean isAnnotationType() { + return _getDoc().isAnnotationType(); + } + + @Override + public boolean isOrdinaryClass() { + return _getDoc().isOrdinaryClass(); + } + + @Override + public boolean isClass() { + return _getDoc().isClass(); + } + + @Override + public boolean isIncluded() { + return _getDoc().isIncluded(); + } + + @Override + public SourcePosition position() { + return _getDoc().position(); + } +} diff --git a/docs/doclet/HidingDoclet.java b/docs/doclet/HidingDoclet.java new file mode 100644 index 0000000..891f00b --- /dev/null +++ b/docs/doclet/HidingDoclet.java @@ -0,0 +1,64 @@ +/*- + * See the files LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.HashMap; + +import com.sun.javadoc.DocErrorReporter; +import com.sun.javadoc.LanguageVersion; +import com.sun.javadoc.RootDoc; +import com.sun.tools.doclets.standard.Standard; + +/** + * A doclet that allows developers to hide documentation for java elements. + * + * @see oracle.olapi.hidingDoclet + */ +public class HidingDoclet extends Standard { + + private static Class s_classBaseDoclet; + + /** + * javadoc calls this method to generate documentation + */ + public static boolean start(RootDoc root) { + return Standard.start(new HidingRootDocWrapper(root, new HashMap())); + } + + /** + * javadoc calls this method to check the validity of doclet-specific + * command-line arguments. + *

+ * Any arguments accepted by the standard doclet will be accepted by + * HidingDoclet. + */ + public static boolean validOptions(String options[][], + DocErrorReporter reporter) { + return Standard.validOptions(options, reporter); + } + + /** + * javadoc calls this method to check the number of non-flag command-line + * arguments that should follow the given command-line flag. + *

+ * Any arguments accepted by the standard doclet will be accepted by + * HidingDoclet. + */ + public static int optionLength(String option) { + return Standard.optionLength(option); + } + + /** + * javadoc calls this method to check whether the doclet supports the + * Java 5 extensions (generic types, annotations, enums, and varArgs) + *

+ * Any arguments accepted by the standard doclet will be accepted by + * HidingDoclet. + */ + public static LanguageVersion languageVersion() { + return LanguageVersion.JAVA_1_5; //dgm code change + } +} diff --git a/docs/doclet/HidingExecutableMemberDocWrapper.java b/docs/doclet/HidingExecutableMemberDocWrapper.java new file mode 100644 index 0000000..dc4da8b --- /dev/null +++ b/docs/doclet/HidingExecutableMemberDocWrapper.java @@ -0,0 +1,100 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ExecutableMemberDoc; +import com.sun.javadoc.ParamTag; +import com.sun.javadoc.Parameter; +import com.sun.javadoc.ThrowsTag; +import com.sun.javadoc.Type; +import com.sun.javadoc.TypeVariable; + +class HidingExecutableMemberDocWrapper extends HidingMemberDocWrapper + implements ExecutableMemberDoc { + + public HidingExecutableMemberDocWrapper(ExecutableMemberDoc execmemdoc, + Map mapWrappers) { + super(execmemdoc, mapWrappers); + } + + private ExecutableMemberDoc _getExecutableMemberDoc() { + return (ExecutableMemberDoc)getWrappedObject(); + } + + @Override + public ClassDoc[] thrownExceptions() { + return (ClassDoc[]) + wrapOrHide(_getExecutableMemberDoc().thrownExceptions()); + } + + @Override + public Type[] thrownExceptionTypes() { + return (Type[]) + wrapOrHide(_getExecutableMemberDoc().thrownExceptionTypes()); + } + + @Override + public boolean isNative() { + return _getExecutableMemberDoc().isNative(); + } + + @Override + public boolean isSynchronized() { + return _getExecutableMemberDoc().isSynchronized(); + } + + @Override + public boolean isVarArgs() { + return _getExecutableMemberDoc().isVarArgs(); + } + + @Override + public Parameter[] parameters() { + return (Parameter[])wrapOrHide(_getExecutableMemberDoc().parameters()); + } + + @Override + public Type receiverType() { + return (Type)wrapOrHide(_getExecutableMemberDoc().receiverType()); + } + + @Override + public ThrowsTag[] throwsTags() { + return (ThrowsTag[]) + wrapOrHide(_getExecutableMemberDoc().throwsTags()); + } + + @Override + public ParamTag[] paramTags() { + return (ParamTag[]) + wrapOrHide(_getExecutableMemberDoc().paramTags()); + } + + @Override + public ParamTag[] typeParamTags() { + return (ParamTag[]) + wrapOrHide(_getExecutableMemberDoc().typeParamTags()); + } + + @Override + public String signature() { + return _getExecutableMemberDoc().signature(); + } + + @Override + public String flatSignature() { + return _getExecutableMemberDoc().flatSignature(); + } + + @Override + public TypeVariable[] typeParameters() { + return (TypeVariable[]) + wrapOrHide(_getExecutableMemberDoc().typeParameters()); + } +} diff --git a/docs/doclet/HidingFieldDocWrapper.java b/docs/doclet/HidingFieldDocWrapper.java new file mode 100644 index 0000000..5667834 --- /dev/null +++ b/docs/doclet/HidingFieldDocWrapper.java @@ -0,0 +1,54 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.FieldDoc; +import com.sun.javadoc.SerialFieldTag; +import com.sun.javadoc.Type; + +class HidingFieldDocWrapper extends HidingMemberDocWrapper + implements FieldDoc { + + public HidingFieldDocWrapper(FieldDoc fielddoc, Map mapWrappers) { + super(fielddoc, mapWrappers); + } + + private FieldDoc _getFieldDoc() { + return (FieldDoc)getWrappedObject(); + } + + @Override + public Type type() { + return (Type)wrapOrHide(_getFieldDoc().type()); + } + + @Override + public boolean isTransient() { + return _getFieldDoc().isTransient(); + } + + @Override + public boolean isVolatile() { + return _getFieldDoc().isVolatile(); + } + + @Override + public SerialFieldTag[] serialFieldTags() { + return (SerialFieldTag[])wrapOrHide(_getFieldDoc().serialFieldTags()); + } + + @Override + public Object constantValue() { + return _getFieldDoc().constantValue(); + } + + @Override + public String constantValueExpression() { + return _getFieldDoc().constantValueExpression(); + } +} diff --git a/docs/doclet/HidingMemberDocWrapper.java b/docs/doclet/HidingMemberDocWrapper.java new file mode 100644 index 0000000..7919178 --- /dev/null +++ b/docs/doclet/HidingMemberDocWrapper.java @@ -0,0 +1,27 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.MemberDoc; + +class HidingMemberDocWrapper extends HidingProgramElementDocWrapper + implements MemberDoc { + + public HidingMemberDocWrapper(MemberDoc memdoc, Map mapWrappers) { + super(memdoc, mapWrappers); + } + + private MemberDoc _getMemberDoc() { + return (MemberDoc)getWrappedObject(); + } + + @Override + public boolean isSynthetic() { + return _getMemberDoc().isSynthetic(); + } +} diff --git a/docs/doclet/HidingMethodDocWrapper.java b/docs/doclet/HidingMethodDocWrapper.java new file mode 100644 index 0000000..5039f21 --- /dev/null +++ b/docs/doclet/HidingMethodDocWrapper.java @@ -0,0 +1,68 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.MethodDoc; +import com.sun.javadoc.Type; +import com.sun.javadoc.AnnotationTypeElementDoc; + +class HidingMethodDocWrapper extends HidingExecutableMemberDocWrapper + implements MethodDoc { + public HidingMethodDocWrapper(MethodDoc methdoc, Map mapWrappers) { + super(methdoc, mapWrappers); + } + + private MethodDoc _getMethodDoc() { + return (MethodDoc)getWrappedObject(); + } + + @Override + public boolean isAbstract() { + return _getMethodDoc().isAbstract(); + } + + @Override + public boolean isDefault() { + return _getMethodDoc().isDefault(); + } + + @Override + public Type returnType() { + return (Type)wrapOrHide(_getMethodDoc().returnType()); + } + + @Override + public ClassDoc overriddenClass() { + return (ClassDoc)wrapOrHide(_getMethodDoc().overriddenClass()); + } + + @Override + public Type overriddenType() { + return (Type)wrapOrHide(_getMethodDoc().overriddenType()); + } + + @Override + public MethodDoc overriddenMethod() { + return (MethodDoc)wrapOrHide(_getMethodDoc().overriddenMethod()); + } + + @Override + public boolean overrides(MethodDoc meth) { + if (meth instanceof HidingAnnotationTypeElementDocWrapper) { + meth = (AnnotationTypeElementDoc) + ((HidingAnnotationTypeElementDocWrapper)meth). + getWrappedObject(); + } else if (meth instanceof HidingMethodDocWrapper) { + meth = (MethodDoc) + ((HidingMethodDocWrapper)meth).getWrappedObject(); + } + + return _getMethodDoc().overrides((MethodDoc) meth); + } +} diff --git a/docs/doclet/HidingPackageDocWrapper.java b/docs/doclet/HidingPackageDocWrapper.java new file mode 100644 index 0000000..44f8175 --- /dev/null +++ b/docs/doclet/HidingPackageDocWrapper.java @@ -0,0 +1,73 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.AnnotationTypeDoc; +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.PackageDoc; + +class HidingPackageDocWrapper extends HidingDocWrapper implements PackageDoc { + public HidingPackageDocWrapper(PackageDoc packdoc, Map mapWrappers) { + super(packdoc, mapWrappers); + } + + private PackageDoc _getPackageDoc() { + return (PackageDoc)getWrappedObject(); + } + + @Override + public ClassDoc[] allClasses(boolean filter) { + return (ClassDoc[])wrapOrHide(_getPackageDoc().allClasses(filter)); + } + + @Override + public ClassDoc[] allClasses() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().allClasses()); + } + + @Override + public ClassDoc[] ordinaryClasses() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().ordinaryClasses()); + } + + @Override + public ClassDoc[] exceptions() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().exceptions()); + } + + @Override + public ClassDoc[] errors() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().errors()); + } + + @Override + public ClassDoc[] enums() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().enums()); + } + + @Override + public ClassDoc[] interfaces() { + return (ClassDoc[])wrapOrHide(_getPackageDoc().interfaces()); + } + + @Override + public AnnotationTypeDoc[] annotationTypes() { + return _getPackageDoc().annotationTypes(); + } + + @Override + public AnnotationDesc[] annotations() { + return _getPackageDoc().annotations(); + } + + @Override + public ClassDoc findClass(String szClassName) { + return (ClassDoc)wrapOrHide(_getPackageDoc().findClass(szClassName)); + } +} diff --git a/docs/doclet/HidingParamTagWrapper.java b/docs/doclet/HidingParamTagWrapper.java new file mode 100644 index 0000000..55a1b6a --- /dev/null +++ b/docs/doclet/HidingParamTagWrapper.java @@ -0,0 +1,35 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ParamTag; + +class HidingParamTagWrapper extends HidingTagWrapper implements ParamTag { + public HidingParamTagWrapper(ParamTag paramtag, Map mapWrappers) { + super(paramtag, mapWrappers); + } + + private ParamTag _getParamTag() { + return (ParamTag)getWrappedObject(); + } + + @Override + public String parameterName() { + return _getParamTag().parameterName(); + } + + @Override + public String parameterComment() { + return _getParamTag().parameterComment(); + } + + @Override + public boolean isTypeParameter() { + return _getParamTag().isTypeParameter(); + } +} diff --git a/docs/doclet/HidingParameterWrapper.java b/docs/doclet/HidingParameterWrapper.java new file mode 100644 index 0000000..d45cc7e --- /dev/null +++ b/docs/doclet/HidingParameterWrapper.java @@ -0,0 +1,47 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.Parameter; +import com.sun.javadoc.Type; + +class HidingParameterWrapper extends HidingWrapper implements Parameter { + public HidingParameterWrapper(Parameter param, Map mapWrappers) { + super(param, mapWrappers); + } + + private Parameter _getParameter() { + return (Parameter)getWrappedObject(); + } + + @Override + public Type type() { + return (Type)wrapOrHide(_getParameter().type()); + } + + @Override + public String name() { + return _getParameter().name(); + } + + @Override + public String typeName() { + return _getParameter().typeName(); + } + + @Override + public String toString() { + return _getParameter().toString(); + } + + @Override + public AnnotationDesc[] annotations() { + return (AnnotationDesc[])wrapOrHide(_getParameter().annotations()); + } +} diff --git a/docs/doclet/HidingParameterizedTypeWrapper.java b/docs/doclet/HidingParameterizedTypeWrapper.java new file mode 100644 index 0000000..aa42f9c --- /dev/null +++ b/docs/doclet/HidingParameterizedTypeWrapper.java @@ -0,0 +1,49 @@ +/*- + * See the file LICENSE for redistributiion information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ParameterizedType; +import com.sun.javadoc.Type; + +class HidingParameterizedTypeWrapper extends HidingTypeWrapper + implements ParameterizedType { + public HidingParameterizedTypeWrapper(ParameterizedType type, + Map mapWrappers) { + super(type, mapWrappers); + } + + private ParameterizedType _getParameterizedType() { + return (ParameterizedType)getWrappedObject(); + } + + @Override + public ClassDoc asClassDoc() { + return (ClassDoc) wrapOrHide(_getParameterizedType().asClassDoc()); + } + + @Override + public Type[] typeArguments() { + return (Type[])wrapOrHide(_getParameterizedType().typeArguments()); + } + + @Override + public Type superclassType() { + return (Type)wrapOrHide(_getParameterizedType().superclassType()); + } + + @Override + public Type[] interfaceTypes() { + return (Type[])wrapOrHide(_getParameterizedType().interfaceTypes()); + } + + @Override + public Type containingType() { + return (Type)wrapOrHide(_getParameterizedType().containingType()); + } +} diff --git a/docs/doclet/HidingProgramElementDocWrapper.java b/docs/doclet/HidingProgramElementDocWrapper.java new file mode 100644 index 0000000..cb3a896 --- /dev/null +++ b/docs/doclet/HidingProgramElementDocWrapper.java @@ -0,0 +1,87 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.PackageDoc; +import com.sun.javadoc.ProgramElementDoc; + +class HidingProgramElementDocWrapper extends HidingDocWrapper + implements ProgramElementDoc { + public HidingProgramElementDocWrapper(ProgramElementDoc progelemdoc, + Map mapWrappers) { + super(progelemdoc, mapWrappers); + } + + private ProgramElementDoc _getProgramElementDoc() { + return (ProgramElementDoc)getWrappedObject(); + } + + @Override + public ClassDoc containingClass() { + return (ClassDoc)wrapOrHide(_getProgramElementDoc().containingClass()); + } + + @Override + public PackageDoc containingPackage() { + return (PackageDoc) + wrapOrHide(_getProgramElementDoc().containingPackage()); + } + + @Override + public String qualifiedName() { + return _getProgramElementDoc().qualifiedName(); + } + + @Override + public int modifierSpecifier() { + return _getProgramElementDoc().modifierSpecifier(); + } + + @Override + public String modifiers() { + return _getProgramElementDoc().modifiers(); + } + + @Override + public AnnotationDesc[] annotations() { + return (AnnotationDesc[]) + wrapOrHide(_getProgramElementDoc().annotations()); + } + + @Override + public boolean isPublic() { + return _getProgramElementDoc().isPublic(); + } + + @Override + public boolean isProtected() { + return _getProgramElementDoc().isProtected(); + } + + @Override + public boolean isPrivate() { + return _getProgramElementDoc().isPrivate(); + } + + @Override + public boolean isPackagePrivate() { + return _getProgramElementDoc().isPackagePrivate(); + } + + @Override + public boolean isStatic() { + return _getProgramElementDoc().isStatic(); + } + + @Override + public boolean isFinal() { + return _getProgramElementDoc().isFinal(); + } +} diff --git a/docs/doclet/HidingRootDocWrapper.java b/docs/doclet/HidingRootDocWrapper.java new file mode 100644 index 0000000..bce0b90 --- /dev/null +++ b/docs/doclet/HidingRootDocWrapper.java @@ -0,0 +1,87 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.PackageDoc; +import com.sun.javadoc.RootDoc; +import com.sun.javadoc.SourcePosition; + +class HidingRootDocWrapper extends HidingDocWrapper implements RootDoc { + public HidingRootDocWrapper(RootDoc rootdoc, Map mapWrappers) { + super(rootdoc, mapWrappers); + } + + private RootDoc _getRootDoc() { + return (RootDoc)getWrappedObject(); + } + + /* RootDoc */ + + @Override + public String[][] options() { + return _getRootDoc().options(); + } + + @Override + public PackageDoc[] specifiedPackages() { + return (PackageDoc[])wrapOrHide(_getRootDoc().specifiedPackages()); + } + + @Override + public ClassDoc[] specifiedClasses() { + return (ClassDoc[])wrapOrHide(_getRootDoc().specifiedClasses()); + } + + @Override + public ClassDoc[] classes() { + return (ClassDoc[])wrapOrHide(_getRootDoc().classes()); + } + + @Override + public PackageDoc packageNamed(String szName) { + return (PackageDoc)wrapOrHide(_getRootDoc().packageNamed(szName)); + } + + @Override + public ClassDoc classNamed(String szName) { + return (ClassDoc)wrapOrHide(_getRootDoc().classNamed(szName)); + } + + /* DocErrorReporter */ + + @Override + public void printError(String szError) { + _getRootDoc().printError(szError); + } + + @Override + public void printError(SourcePosition pos, String szError) { + _getRootDoc().printError(pos, szError); + } + + @Override + public void printWarning(String szWarning) { + _getRootDoc().printWarning(szWarning); + } + + @Override + public void printWarning(SourcePosition pos, String szWarning) { + _getRootDoc().printWarning(pos, szWarning); + } + + @Override + public void printNotice(String szNotice) { + _getRootDoc().printNotice(szNotice); + } + + @Override + public void printNotice(SourcePosition pos, String szNotice) { + _getRootDoc().printNotice(pos, szNotice); + } +} diff --git a/docs/doclet/HidingSeeTagWrapper.java b/docs/doclet/HidingSeeTagWrapper.java new file mode 100644 index 0000000..a0b1b2b --- /dev/null +++ b/docs/doclet/HidingSeeTagWrapper.java @@ -0,0 +1,53 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.MemberDoc; +import com.sun.javadoc.PackageDoc; +import com.sun.javadoc.SeeTag; + +class HidingSeeTagWrapper extends HidingTagWrapper implements SeeTag { + public HidingSeeTagWrapper(SeeTag seetag, Map mapWrappers) { + super(seetag, mapWrappers); + } + + private SeeTag _getSeeTag() { + return (SeeTag)getWrappedObject(); + } + + @Override + public String label() { + return _getSeeTag().label(); + } + + @Override + public PackageDoc referencedPackage() { + return (PackageDoc)wrapOrHide(_getSeeTag().referencedPackage()); + } + + @Override + public String referencedClassName() { + return _getSeeTag().referencedClassName(); + } + + @Override + public ClassDoc referencedClass() { + return (ClassDoc)wrapOrHide(_getSeeTag().referencedClass()); + } + + @Override + public String referencedMemberName() { + return _getSeeTag().referencedMemberName(); + } + + @Override + public MemberDoc referencedMember() { + return (MemberDoc)wrapOrHide(_getSeeTag().referencedMember()); + } +} diff --git a/docs/doclet/HidingSerialFieldTagWrapper.java b/docs/doclet/HidingSerialFieldTagWrapper.java new file mode 100644 index 0000000..0db3727 --- /dev/null +++ b/docs/doclet/HidingSerialFieldTagWrapper.java @@ -0,0 +1,59 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.SerialFieldTag; + +class HidingSerialFieldTagWrapper extends HidingTagWrapper + implements SerialFieldTag { + public HidingSerialFieldTagWrapper(SerialFieldTag serfldtag, + Map mapWrappers) { + super(serfldtag, mapWrappers); + } + + private SerialFieldTag _getSerialFieldTag() { + return (SerialFieldTag)getWrappedObject(); + } + + /* SerialFieldTag */ + + @Override + public String fieldName() { + return _getSerialFieldTag().fieldName(); + } + + @Override + public String fieldType() { + return _getSerialFieldTag().fieldType(); + } + + @Override + public ClassDoc fieldTypeDoc() { + return (ClassDoc)wrapOrHide(_getSerialFieldTag().fieldTypeDoc()); + } + + @Override + public String description() { + return _getSerialFieldTag().description(); + } + + /* Comparable */ + + @Override + public int compareTo(Object obj) { + if (obj instanceof HidingWrapper) { + return _getSerialFieldTag(). + compareTo(((HidingWrapper)obj).getWrappedObject()); + } else { + return _getSerialFieldTag().compareTo(obj); + } + } + + +} diff --git a/docs/doclet/HidingSourcePositionWrapper.java b/docs/doclet/HidingSourcePositionWrapper.java new file mode 100644 index 0000000..cf3a147 --- /dev/null +++ b/docs/doclet/HidingSourcePositionWrapper.java @@ -0,0 +1,42 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.io.File; +import java.util.Map; + +import com.sun.javadoc.SourcePosition; + +class HidingSourcePositionWrapper extends HidingWrapper + implements SourcePosition { + public HidingSourcePositionWrapper(SourcePosition type, Map mapWrappers) { + super(type, mapWrappers); + } + + private SourcePosition _getSourcePosition() { + return (SourcePosition)getWrappedObject(); + } + + @Override + public File file() { + return _getSourcePosition().file(); + } + + @Override + public int line() { + return _getSourcePosition().line(); + } + + @Override + public int column() { + return _getSourcePosition().column(); + } + + @Override + public String toString() { + return _getSourcePosition().toString(); + } +} diff --git a/docs/doclet/HidingTagWrapper.java b/docs/doclet/HidingTagWrapper.java new file mode 100644 index 0000000..6fcc466 --- /dev/null +++ b/docs/doclet/HidingTagWrapper.java @@ -0,0 +1,62 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.Doc; +import com.sun.javadoc.SourcePosition; +import com.sun.javadoc.Tag; + +class HidingTagWrapper extends HidingWrapper implements Tag { + public HidingTagWrapper(Tag tag, Map mapWrappers) { + super(tag, mapWrappers); + } + + private Tag _getTag() { + return (Tag)getWrappedObject(); + } + + @Override + public String name() { + return _getTag().name(); + } + + @Override + public Doc holder() { + return (Doc) wrapOrHide(_getTag().holder()); + } + + @Override + public String kind() { + return _getTag().kind(); + } + + @Override + public String text() { + return _getTag().text(); + } + + @Override + public String toString() { + return _getTag().toString(); + } + + @Override + public Tag[] inlineTags() { + return (Tag[])wrapOrHide(_getTag().inlineTags()); + } + + @Override + public Tag[] firstSentenceTags() { + return (Tag[])wrapOrHide(_getTag().firstSentenceTags()); + } + + @Override + public SourcePosition position() { + return (SourcePosition) wrapOrHide(_getTag().position()); + } +} diff --git a/docs/doclet/HidingThrowsTagWrapper.java b/docs/doclet/HidingThrowsTagWrapper.java new file mode 100644 index 0000000..3145c93 --- /dev/null +++ b/docs/doclet/HidingThrowsTagWrapper.java @@ -0,0 +1,42 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ThrowsTag; +import com.sun.javadoc.Type; + +class HidingThrowsTagWrapper extends HidingTagWrapper implements ThrowsTag { + public HidingThrowsTagWrapper(ThrowsTag thrtag, Map mapWrappers) { + super(thrtag, mapWrappers); + } + + private ThrowsTag _getThrowsTag() { + return (ThrowsTag)getWrappedObject(); + } + + @Override + public String exceptionName() { + return _getThrowsTag().exceptionName(); + } + + @Override + public String exceptionComment() { + return _getThrowsTag().exceptionComment(); + } + + @Override + public ClassDoc exception() { + return (ClassDoc)wrapOrHide(_getThrowsTag().exception()); + } + + @Override + public Type exceptionType() { + return (Type)wrapOrHide(_getThrowsTag().exceptionType()); + } +} diff --git a/docs/doclet/HidingTypeVariableWrapper.java b/docs/doclet/HidingTypeVariableWrapper.java new file mode 100644 index 0000000..3879de6 --- /dev/null +++ b/docs/doclet/HidingTypeVariableWrapper.java @@ -0,0 +1,39 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.ProgramElementDoc; +import com.sun.javadoc.Type; +import com.sun.javadoc.TypeVariable; + +class HidingTypeVariableWrapper extends HidingTypeWrapper + implements TypeVariable { + public HidingTypeVariableWrapper(TypeVariable type, Map mapWrappers) { + super(type, mapWrappers); + } + + private TypeVariable _getTypeVariable() { + return (TypeVariable)getWrappedObject(); + } + + @Override + public Type[] bounds() { + return (Type[]) wrapOrHide(_getTypeVariable().bounds()); + } + + @Override + public ProgramElementDoc owner() { + return (ProgramElementDoc) wrapOrHide(_getTypeVariable().owner()); + } + + @Override + public AnnotationDesc[] annotations() { + return (AnnotationDesc[]) wrapOrHide(_getTypeVariable().annotations()); + } +} diff --git a/docs/doclet/HidingTypeWrapper.java b/docs/doclet/HidingTypeWrapper.java new file mode 100644 index 0000000..e3354d0 --- /dev/null +++ b/docs/doclet/HidingTypeWrapper.java @@ -0,0 +1,91 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotatedType; +import com.sun.javadoc.AnnotationTypeDoc; +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ParameterizedType; +import com.sun.javadoc.Type; +import com.sun.javadoc.TypeVariable; +import com.sun.javadoc.WildcardType; + +class HidingTypeWrapper extends HidingWrapper implements Type { + public HidingTypeWrapper(Type type, Map mapWrappers) { + super(type, mapWrappers); + } + + private Type _getType() { + return (Type)getWrappedObject(); + } + + @Override + public String typeName() { + return _getType().typeName(); + } + + @Override + public String qualifiedTypeName() { + return _getType().qualifiedTypeName(); + } + + @Override + public String simpleTypeName() { + return _getType().simpleTypeName(); + } + + @Override + public String dimension() { + return _getType().dimension(); + } + + @Override + public String toString() { + return _getType().toString(); + } + + @Override + public boolean isPrimitive() { + return _getType().isPrimitive(); + } + + @Override + public ClassDoc asClassDoc() { + return (ClassDoc)wrapOrHide(_getType().asClassDoc()); + } + + @Override + public ParameterizedType asParameterizedType() { + return (ParameterizedType)wrapOrHide(_getType().asParameterizedType()); + } + + @Override + public TypeVariable asTypeVariable() { + return (TypeVariable)wrapOrHide(_getType().asTypeVariable()); + } + + @Override + public WildcardType asWildcardType() { + return (WildcardType)wrapOrHide(_getType().asWildcardType()); + } + + @Override + public AnnotatedType asAnnotatedType() { + return (AnnotatedType)wrapOrHide(_getType().asAnnotatedType()); + } + + @Override + public AnnotationTypeDoc asAnnotationTypeDoc() { + return (AnnotationTypeDoc)wrapOrHide(_getType().asAnnotationTypeDoc()); + } + + @Override + public Type getElementType() { + return (Type)wrapOrHide(_getType().getElementType()); + } +} diff --git a/docs/doclet/HidingWildcardTypeWrapper.java b/docs/doclet/HidingWildcardTypeWrapper.java new file mode 100644 index 0000000..669d068 --- /dev/null +++ b/docs/doclet/HidingWildcardTypeWrapper.java @@ -0,0 +1,32 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.Type; +import com.sun.javadoc.WildcardType; + +class HidingWildcardTypeWrapper extends HidingTypeWrapper + implements WildcardType { + public HidingWildcardTypeWrapper(WildcardType type, Map mapWrappers) { + super(type, mapWrappers); + } + + private WildcardType _getWildcardType() { + return (WildcardType)getWrappedObject(); + } + + @Override + public Type[] extendsBounds() { + return (Type[])wrapOrHide(_getWildcardType().extendsBounds()); + } + + @Override + public Type[] superBounds() { + return (Type[])wrapOrHide(_getWildcardType().superBounds()); + } +} diff --git a/docs/doclet/HidingWrapper.java b/docs/doclet/HidingWrapper.java new file mode 100644 index 0000000..c05ec51 --- /dev/null +++ b/docs/doclet/HidingWrapper.java @@ -0,0 +1,309 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.util.Map; + +import com.sun.javadoc.AnnotatedType; +import com.sun.javadoc.AnnotationDesc; +import com.sun.javadoc.AnnotationTypeDoc; +import com.sun.javadoc.AnnotationTypeElementDoc; +import com.sun.javadoc.AnnotationValue; +import com.sun.javadoc.ClassDoc; +import com.sun.javadoc.ConstructorDoc; +import com.sun.javadoc.Doc; +import com.sun.javadoc.ExecutableMemberDoc; +import com.sun.javadoc.FieldDoc; +import com.sun.javadoc.MemberDoc; +import com.sun.javadoc.MethodDoc; +import com.sun.javadoc.PackageDoc; +import com.sun.javadoc.ParamTag; +import com.sun.javadoc.Parameter; +import com.sun.javadoc.ParameterizedType; +import com.sun.javadoc.ProgramElementDoc; +import com.sun.javadoc.RootDoc; +import com.sun.javadoc.SeeTag; +import com.sun.javadoc.SerialFieldTag; +import com.sun.javadoc.SourcePosition; +import com.sun.javadoc.Tag; +import com.sun.javadoc.ThrowsTag; +import com.sun.javadoc.Type; +import com.sun.javadoc.TypeVariable; +import com.sun.javadoc.WildcardType; + +class HidingWrapper { + private Object _objWrapped; + private Map _mapWrappers; + + public HidingWrapper(Object objWrapped, Map mapWrappers) { + _objWrapped = objWrapped; + _mapWrappers = mapWrappers; + } + + public Object getWrappedObject() { + return _objWrapped; + } + + public Map getWrapperMap() { + return _mapWrappers; + } + + public String toString() { + return getWrappedObject().toString(); + } + + public HidingWrapper wrapOrHide(Object object) { + if ((object == null) || (object instanceof HidingWrapper)) { + return (HidingWrapper)object; + } else if (getWrapperMap().containsKey(object)) { + return (HidingWrapper)getWrapperMap().get(object); + } else { + HidingWrapper wrapper = _wrapOrHide(object); + getWrapperMap().put(object, wrapper); + return wrapper; + } + } + + public Object[] wrapOrHide(Object[] objects) { + HidingWrapper[] wrappers = new HidingWrapper[objects.length]; + int iFilteredCount = 0; + + for (int i = 0; i < objects.length; i++) { + HidingWrapper wrapper = wrapOrHide(objects[i]); + + if (wrapper != null) { + wrappers[iFilteredCount] = wrapper; + iFilteredCount++; + } + } + + Object[] wrappersTrimmedAndTyped = + _createHidingWrapperArray(objects, iFilteredCount); + System.arraycopy(wrappers, 0, + wrappersTrimmedAndTyped, 0, iFilteredCount); + + return wrappersTrimmedAndTyped; + } + + private boolean _isHidden(Doc doc) { + if (doc == null) { + return false; + } else { + return (doc.tags("hidden").length > 0); + } + } + + /** + * This is the method that actually instantiates objects. Update it if + * the Doclet API changes. One hack here: ClassDoc must be handled before + * Type because ClassDocs are also Type. If we instantiate a + * HidingTypeWrapper to hold a ClassDoc, + * we'll have problems. We should only + * instantiate HidingTypeWrapper for otherwise unknown Types. + */ + private HidingWrapper _wrapOrHide(Object object) { + if (object == null) { + return null; + } else if (object instanceof Doc) { + if (_isHidden((Doc)object)) { + return null; + } else if (object instanceof PackageDoc) { + return new HidingPackageDocWrapper((PackageDoc)object, + getWrapperMap()); + } else if (object instanceof ProgramElementDoc) { + if ((_isHidden(((ProgramElementDoc)object). + containingClass())) || (_isHidden(((ProgramElementDoc) + object).containingPackage()))) { + return null; + } + + if (object instanceof ClassDoc) { + if (object instanceof AnnotationTypeDoc) { + return new HidingAnnotationTypeDocWrapper( + (AnnotationTypeDoc)object, getWrapperMap()); + } else { + return new HidingClassDocWrapper((ClassDoc)object, + getWrapperMap()); + } + } else if (object instanceof MemberDoc) { + if (object instanceof ExecutableMemberDoc) + { + if (object instanceof ConstructorDoc) { + return new HidingConstructorDocWrapper( + (ConstructorDoc)object, + getWrapperMap()); + } else if (object instanceof MethodDoc) { + // Added new classes for 1.5. + if (object instanceof AnnotationTypeElementDoc) { + return + new HidingAnnotationTypeElementDocWrapper( + (AnnotationTypeElementDoc)object, + getWrapperMap()); + } else { + return new HidingMethodDocWrapper( + (MethodDoc)object, getWrapperMap()); + } + } else if (object instanceof + AnnotationTypeElementDoc) { + return new HidingAnnotationTypeElementDocWrapper( + (AnnotationTypeElementDoc)object, + getWrapperMap()); + } else { + return new HidingExecutableMemberDocWrapper( + (ExecutableMemberDoc)object, + getWrapperMap()); + } + } else if (object instanceof FieldDoc) { + return new HidingFieldDocWrapper((FieldDoc)object, + getWrapperMap()); + } else { + return new HidingMemberDocWrapper((MemberDoc)object, + getWrapperMap()); + } + } else { + return new HidingProgramElementDocWrapper( + (ProgramElementDoc)object, getWrapperMap()); + } + } else if (object instanceof RootDoc) { + return new HidingRootDocWrapper((RootDoc)object, + getWrapperMap()); + } else { + return new HidingDocWrapper((Doc)object, getWrapperMap()); + } + } else if (object instanceof Parameter) { + return new HidingParameterWrapper((Parameter)object, + getWrapperMap()); + } else if (object instanceof Tag) { + if (object instanceof ParamTag) { + return new HidingParamTagWrapper((ParamTag)object, + getWrapperMap()); + } else if (object instanceof SeeTag) { + return new HidingSeeTagWrapper((SeeTag)object, getWrapperMap()); + } else if (object instanceof SerialFieldTag) { + return new HidingSerialFieldTagWrapper((SerialFieldTag)object, + getWrapperMap()); + } else if (object instanceof ThrowsTag) { + return new HidingThrowsTagWrapper((ThrowsTag)object, + getWrapperMap()); + } else { + return new HidingTagWrapper((Tag)object, getWrapperMap()); + } + } else if (object instanceof Type) { + if (object instanceof AnnotatedType) { + return new HidingAnnotatedTypeWrapper( + (AnnotatedType) object, getWrapperMap()); + } else if (object instanceof AnnotationTypeDoc) { + return new HidingAnnotationTypeDocWrapper( + (AnnotationTypeDoc)object, getWrapperMap()); + } else if (object instanceof ParameterizedType) { + return new HidingParameterizedTypeWrapper( + (ParameterizedType)object, getWrapperMap()); + } else if (object instanceof TypeVariable) { + return new HidingTypeVariableWrapper((TypeVariable)object, + getWrapperMap()); + } else if (object instanceof WildcardType) { + return new HidingWildcardTypeWrapper((WildcardType)object, + getWrapperMap()); + } else { + return new HidingTypeWrapper((Type)object, getWrapperMap()); + } + } else if (object instanceof AnnotationDesc) { + return new HidingAnnotationDescWrapper((AnnotationDesc)object, + getWrapperMap()); + } else if (object instanceof AnnotationValue) { + return new HidingAnnotationValueWrapper((AnnotationValue)object, + getWrapperMap()); + } else if (object instanceof SourcePosition) { + return new HidingSourcePositionWrapper((SourcePosition)object, + getWrapperMap()); + } else { + return new HidingWrapper(object, getWrapperMap()); + } + } + + /** + * This is the method that instantiates types arrays. + * @see _wrapOrHide(Object) + */ + private static Object[] _createHidingWrapperArray(Object[] objects, + int size) { + if (objects instanceof Doc[]) { + if (objects instanceof PackageDoc[]) { + return new PackageDoc[size]; + } else if (objects instanceof ProgramElementDoc[]) { + if (objects instanceof ClassDoc[]) { + if (objects instanceof AnnotationTypeDoc[]) { + return new AnnotationTypeDoc[size]; + } else { + return new ClassDoc[size]; + } + } else if (objects instanceof MemberDoc[]) { + if (objects instanceof ExecutableMemberDoc[]) { + if (objects instanceof ConstructorDoc[]) { + return new ConstructorDoc[size]; + } else if (objects instanceof MethodDoc[]) { + if (objects instanceof + AnnotationTypeElementDoc[]) { + return new AnnotationTypeElementDoc[size]; + } else { + return new MethodDoc[size]; + } + } else if (objects instanceof + AnnotationTypeElementDoc[]) { + return new AnnotationTypeElementDoc[size]; + } else { + return new ExecutableMemberDoc[size]; + } + } else if (objects instanceof FieldDoc[]) { + return new FieldDoc[size]; + } else { + return new MemberDoc[size]; + } + } else { + return new ProgramElementDoc[size]; + } + } else if (objects instanceof RootDoc[]) { + return new RootDoc[size]; + } else { + return new Doc[size]; + } + } else if (objects instanceof Parameter[]) { + return new Parameter[size]; + } else if (objects instanceof Tag[]) { + if (objects instanceof ParamTag[]) { + return new ParamTag[size]; + } else if (objects instanceof SeeTag[]) { + return new SeeTag[size]; + } else if (objects instanceof SerialFieldTag[]) { + return new SerialFieldTag[size]; + } else if (objects instanceof ThrowsTag[]) { + return new ThrowsTag[size]; + } else { + return new Tag[size]; + } + } else if (objects instanceof Type[]) { + if (objects instanceof AnnotationTypeDoc[]) { + return new AnnotationTypeDoc[size]; + } else if (objects instanceof ParameterizedType[]) { + return new ParameterizedType[size]; + } else if (objects instanceof TypeVariable[]) { + return new TypeVariable[size]; + } else if (objects instanceof WildcardType[]) { + return new WildcardType[size]; + } else { + return new Type[size]; + } + } else if (objects instanceof AnnotationDesc[]) { + return new AnnotationDesc[size]; + } else if (objects instanceof AnnotationValue[]) { + return new AnnotationValue[size]; + } else if (objects instanceof SourcePosition[]) { + return new SourcePosition[size]; + } else { + return new Object[size]; + } + } +} diff --git a/docs/examples.html b/docs/examples.html new file mode 100644 index 0000000..214e67d --- /dev/null +++ b/docs/examples.html @@ -0,0 +1,353 @@ + + + + + Berkeley DB Java Edition Examples + + + +

+Oracle +

+ +

Berkeley DB Java Edition
Examples

+ +

The JE distribution comes with examples that illustrate:

+ +
+Building and running JE examples
+Basic Example
+Getting Started Examples
+Writing Transactional Applications Examples
+Translating SQL Queries
+Examples List
+ +

Building and Running a Basic Example

+ +

Compiling and running a simple example can serve as a sanity check of the +installation. Follow the instructions below to compile and run the +PersonExample.

+ +

You can find the source for this example at:

+ +
+
JE_HOME/examples/persist/PersonExample.java
+
+ +

Assuming you have installed the JavaSE JDK and have verified that you +have a working Java compiler, you can build PersonExample +as follows.

+ +
    +
  1. Change to the +
    JE_HOME/examples
    directory.
  2. +
    +
  3. Set your CLASSPATH to include both
    JE_HOME/lib/je-M.N.P.jar
    + and the
    JE_HOME/examples
    directory.
  4. +
    +
  5. Compile PersonExample.java with the following command: +
    javac persist/PersonExample.java
    or on Windows: +
    javac persist\PersonExample.java
    +
  6. +
+ +

To run PersonExample , use the following command, specifying an +environment directory for the data generated by the example:

+ +
+
java persist.PersonExample -h <environment directory>
+
+ +

For example, using "." for the second parameter will write +the database files into the current directory. You'll notice that a +00000000.jdb file and and je.lck file are +created. This is the first log file in the environment and a lock +file. If you need to delete the environment for running a different +example, simply delete these two files.

+ +

When you run the program you'll see the following output. While this is not +a very entertaining program, it is enough to test that you have installed JE +correctly.

+ +
222-22-2222 Jack Smith
+333-33-3333 Mary Smith
+ +

The other JE examples are compiled and run similarly. How to run +the examples from the Getting Started Guide and Writing Transactional +Applications is described in the sections below, as well as how to run +the Translating SQL Queries examples. Instructions for running other +examples are contained in the example's source file.

+ +

Running the Getting Started Examples

+ +

As described in the Berkeley DB Java Edition Getting Started +Guide, the final examples in every chapter exist in the JE +package. You can build and run these examples as follows:

+ + + +

Running the Writing Transactional Applications +Examples

+ +

The examples in Writing Transactional Applications with +Berkeley DB, Java Edition guide exist in the JE package. You +can build and run these examples as follows:

+ + +

Running the Translating SQL Query Examples

+ +

This example shows how some common SQL queries can be implemented +using the Direct Persistence Layer. It's meant to help users who are +more familiar with SQL translate those approaches to the DPL. These +queries include:

+
+
Basic data retrieval:

+
SELECT * FROM tab ORDER BY col ASC;

+ +
A prefix query:

+
SELECT * FROM tab WHERE col LIKE 'prefix%';

+ +
A range query, where the data type of A (as well as B) may be an +int, a float, a String, etc:

+
SELECT * FROM tab WHERE col gt;= A AND col <= B;

+ +
An equi-join on a single primary database:

+
SELECT * FROM tab WHERE col1 = A AND col2 =B;

+ +
An equi-join on two primary databases combined +with a filtering on "t2.col2". Note that if "t2.col2" is a secondary key, the +filtering does a index lookup. Otherwise the filtering is done through +database scanning:

+
SELECT t1.* FROM table1 t1, table2 t2 WHERE t1.col1 = t2.col1 +AND t2.col2 = A;
+
+

You can build and run these examples as follows:

+ +

List of Examples

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ExampleLocationAPIDescription
Getting Started Guideexamples/persist/ gettingStartedDPLscenarios using the Direct Persistence Layer from the Getting Started + Guide
Writing Transactional Applicationsexamples/persist/txnDPLscenarios using the Direct Persistence Layer from Writing Transactional + Applications
Writing Transactional Applicationsexamples/je/txnBasescenarios using the Base API from Writing Transactional + Applications
Translating SQL Queriesexamples/persist/sqlAppDPLshows how some common SQL queries can be implemented using the Direct + Persistence Layer
PersonExampleexamples/persistDPLdemonstrates basic use of the Direct Persistence Layer
ScalaPersonExampleexamples/persistDPLdemonstrates using JE with the Scala programming language
EventExample EventExampleDPLexamples/persistDPLcontrasts the Base API and the Direct Persistence Layer with an + example of storing event objects
CustomKeyOrderExampleexamples/persistDPLshows how to use a Comparable to specify key order
DplDumpexamples/persistDPLdumps objects stored using the Direct Persistence Layer in XML + format
HelloDatabaseWorldexamples/collections/helloCollectionstrivial example using the Collections API
AccessExampleexamples/collections/accessCollectionsreimplementation of the Base API AccessExample using the Collections + API
Shipmentsexamples/collections/shipCollectionsseries of examples based on a shipment database
SimpleExampleexamples/jeBasedoes basic data insertion and retrieval
BindingExampleexamples/jeBaseshows how to use com.sleepycat.bind to convert between Java objects and + JE data records
SecondaryExampleexamples/jeBaseillustrates the use of secondary indices
SequenceExampleexamples/jeBasedemonstrates the use of Sequence objects
ToManyExampleexamples/jeBaseshows how to use multi-key secondary indices to support many-many and + one-many primary/secondary key relationships
MeasureInsertSizeexamples/jeBaseinserts a given set of key/value pairs in order to measure the disk + space consumed by a given data set
JCAexamples/jcaBaseshows how to use the J2EE Connector Architecture with JE
StockQuotesexamples/je/rep/quoteHigh Availability/Replicationshows how to use BDB JE High Availability
+ +
+

Copyright (c) 2002, 2017 Oracle and/or its affiliates. +All rights reserved.

+ + + diff --git a/docs/examples/allclasses-frame.html b/docs/examples/allclasses-frame.html new file mode 100644 index 0000000..204ca76 --- /dev/null +++ b/docs/examples/allclasses-frame.html @@ -0,0 +1,26 @@ + + + + + +All Classes (Oracle - Berkeley DB Java Edition Examples) + + + + + +

All Classes

+
+ +
+ + diff --git a/docs/examples/allclasses-noframe.html b/docs/examples/allclasses-noframe.html new file mode 100644 index 0000000..59b7583 --- /dev/null +++ b/docs/examples/allclasses-noframe.html @@ -0,0 +1,26 @@ + + + + + +All Classes (Oracle - Berkeley DB Java Edition Examples) + + + + + +

All Classes

+
+ +
+ + diff --git a/docs/examples/constant-values.html b/docs/examples/constant-values.html new file mode 100644 index 0000000..6390929 --- /dev/null +++ b/docs/examples/constant-values.html @@ -0,0 +1,154 @@ + + + + + +Constant Field Values (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

Constant Field Values

+

Contents

+ +
+
+ + +

je.rep.*

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/deprecated-list.html b/docs/examples/deprecated-list.html new file mode 100644 index 0000000..e6add35 --- /dev/null +++ b/docs/examples/deprecated-list.html @@ -0,0 +1,125 @@ + + + + + +Deprecated List (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

Deprecated API

+

Contents

+
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/help-doc.html b/docs/examples/help-doc.html new file mode 100644 index 0000000..6578c67 --- /dev/null +++ b/docs/examples/help-doc.html @@ -0,0 +1,222 @@ + + + + + +API Help (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

How This API Document Is Organized

+
This API (Application Programming Interface) document has pages corresponding to the items in the navigation bar, described as follows.
+
+
+ +This help file applies to API documentation generated using the standard doclet.
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/index-all.html b/docs/examples/index-all.html new file mode 100644 index 0000000..de847d4 --- /dev/null +++ b/docs/examples/index-all.html @@ -0,0 +1,283 @@ + + + + + +Index (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
D H J M O Q R S U W  + + +

D

+
+
doTransactionWork(Transaction) - Method in class je.rep.quote.RunTransaction
+
+
Must be implemented to perform operations using the given Transaction.
+
+
+ + + +

H

+
+
HARouter - Class in je.rep.quote
+
+
This example illustrates use of an HA aware Router used to forward high + level requests to replication nodes implemented by + RouterDrivenStockQuotes.
+
+
+ + + +

J

+
+
je.rep.quote - package je.rep.quote
+
+
JE Replication Stock Quote example.
+
+
+ + + +

M

+
+
main(String[]) - Static method in class je.rep.quote.HARouter
+
 
+
main(String[]) - Static method in class je.rep.quote.RouterDrivenStockQuotes
+
 
+
main(String[]) - Static method in class je.rep.quote.SimpleRouter
+
 
+
main(String[]) - Static method in class je.rep.quote.StockQuotes
+
 
+
main(String[]) - Static method in class je.rep.quote.StockQuotesRMIForwarding
+
 
+
main(String[]) - Static method in class je.rep.quote.UpdateForwardingStockQuotes
+
 
+
+ + + +

O

+
+
onReplicaWrite(ReplicaWriteException) - Method in class je.rep.quote.RunTransaction
+
+
May be optionally overridden to handle a ReplicaWriteException.
+
+
onRetryFailure(OperationFailureException) - Method in class je.rep.quote.RunTransaction
+
+
May be optionally overridden to handle a failure after the + TRANSACTION_RETRY_MAX has been exceeded.
+
+
+ + + +

Q

+
+
quit(PrintStream) - Method in class je.rep.quote.StockQuotes
+
+
Implements the "quit" command.
+
+
quit(PrintStream) - Method in class je.rep.quote.StockQuotesRMIForwarding
+
+
Performs the RMI associated cleanup so that the RMI serve can be + shutdown cleanly.
+
+
+ + + +

R

+
+
RMI_NAME - Static variable in class je.rep.quote.StockQuotesRMIForwarding
+
 
+
RouterDrivenStockQuotes - Class in je.rep.quote
+
+
This class is based on StockQuotes and illustrates use of an + HA-aware router (implemented by HARouter), in conjunction with the + Monitor class, to direct + application requests, based upon the type of request (read or write) and the + state (Master or Replica) of a node in the replication group.
+
+
run(boolean) - Method in class je.rep.quote.RunTransaction
+
+
Runs a transaction, calls the doTransactionWork method, and retries as + needed.
+
+
RunTransaction - Class in je.rep.quote
+
+
Utility class to begin and commit/abort a transaction and handle exceptions + according to this application's policies.
+
+
+ + + +

S

+
+
SimpleRouter - Class in je.rep.quote
+
+
This example illustrates the use of a simple HA-unaware router that is used + in conjunction with UpdateForwardingStockQuotes.
+
+
StockQuotes - Class in je.rep.quote
+
+
The most basic demonstration of a replicated application.
+
+
StockQuotesRMIForwarding - Class in je.rep.quote
+
+
This example is a small variation on the basic StockQuotes example.
+
+
StockQuotesRMIForwarding.WriteServices - Interface in je.rep.quote
+
 
+
StockQuotesRMIForwarding.WriteServicesImpl - Class in je.rep.quote
+
+
The class supplies the RMI implementation of the write methods.
+
+
+ + + +

U

+
+
update(Quote) - Method in interface je.rep.quote.StockQuotesRMIForwarding.WriteServices
+
+
The "write" operation which will update the price associated with + the Stock.
+
+
update(Quote) - Method in class je.rep.quote.StockQuotesRMIForwarding.WriteServicesImpl
+
+
The update operation invoked by a Replica on this Master.
+
+
UpdateForwardingStockQuotes - Class in je.rep.quote
+
+
This class is based on RouterDrivenStockQuotes and illustrates use + of an HA unaware router (implemented by SimpleRouter), that load + balances requests (both read and write) across all the nodes in a + replication group.
+
+
+ + + +

W

+
+
WriteServicesImpl(PrintStream) - Constructor for class je.rep.quote.StockQuotesRMIForwarding.WriteServicesImpl
+
 
+
+D H J M O Q R S U W 
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/index.html b/docs/examples/index.html new file mode 100644 index 0000000..bc8d3a3 --- /dev/null +++ b/docs/examples/index.html @@ -0,0 +1,72 @@ + + + + + +Oracle - Berkeley DB Java Edition Examples + + + + + + +<noscript> +<div>JavaScript is disabled on your browser.</div> +</noscript> +<h2>Frame Alert</h2> +<p>This document is designed to be viewed using the frames feature. If you see this message, you are using a non-frame-capable web client. Link to <a href="je/rep/quote/package-summary.html">Non-frame version</a>.</p> + + + diff --git a/docs/examples/je/rep/quote/HARouter.html b/docs/examples/je/rep/quote/HARouter.html new file mode 100644 index 0000000..7205e4b --- /dev/null +++ b/docs/examples/je/rep/quote/HARouter.html @@ -0,0 +1,301 @@ + + + + + +HARouter (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class HARouter

+
+
+ +
+
    +
  • +
    +
    +
    public class HARouter
    +extends java.lang.Object
    +
    This example illustrates use of an HA aware Router used to forward high + level requests to replication nodes implemented by + RouterDrivenStockQuotes. The router is built using the APIs provided + by the Monitor; it's a + standalone application and does not itself access a JE Environment. The + router forwards logical requests, that represent some service provided by + the application. It only has knowledge of whether a request will potentially + require an write to the database, but does not have any other application + level logic, nor does it access a JE environment. The HARouter accepts a + request from the console and dispatches it to the application running on the + master, if it's a write request, or to one of the replicas if it's a read + request. The HARouter keeps track of the current Master via the events that + are delivered to the Monitor. +

    + It's the HARouter instead of each individual node (as in the + UpdateForwardingStockQuotes example) that tracks the current Master + via the Monitor. Since the + router ensures that writes are directed to the master node, the logic in + the node itself is simpler: the node simply services the requests forwarded + to it by the router on a port dedicated for this purpose. +

    + The protocol used to communicate between the router and the nodes has been + deliberately kept very simple. In particular, it makes limited provisions + for error reporting back to the router. +

    + The router requires the following arguments: + +

    + java je.rep.quote.HARouter -nodeName <nodeName> \
    +                            -nodeHost <host:port> \
    +                            -helperHost <host:port>"
    +  The arguments are described below:
    +   -nodeName identifies the monitor name associated with this Router
    +   -nodeHost the hostname:port combination used by the Monitor to listen for
    +             election results and group level changes.
    +   -helperHost one or more nodes that may be used by the Monitor to locate the
    +               Master and register the Monitor with the Master.
    + 
    + + Note that the arguments are similar to the ones used to start a replication + node. A key difference is that the -env option is absent, since the router + is standalone and is not associated with one. +

    + The router can be started as follows: + +

    + java je.rep.quote.HARouter -nodeName n1 \
    +                            -nodeHost node.acme.com:6000 \
    +                            -helperHost node.acme.com:5001
    + 
    + + The replication nodes involved in the routing can be started as described in + RouterDrivenStockQuotes. The Router and the nodes can be started in + any convenient order.
    +
    +
    See Also:
    +
    RouterDrivenStockQuotes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/RouterDrivenStockQuotes.html b/docs/examples/je/rep/quote/RouterDrivenStockQuotes.html new file mode 100644 index 0000000..fe5da56 --- /dev/null +++ b/docs/examples/je/rep/quote/RouterDrivenStockQuotes.html @@ -0,0 +1,335 @@ + + + + + +RouterDrivenStockQuotes (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class RouterDrivenStockQuotes

+
+
+ +
+
    +
  • +
    +
    Direct Known Subclasses:
    +
    UpdateForwardingStockQuotes
    +
    +
    +
    +
    public class RouterDrivenStockQuotes
    +extends StockQuotes
    +
    This class is based on StockQuotes and illustrates use of an + HA-aware router (implemented by HARouter), in conjunction with the + Monitor class, to direct + application requests, based upon the type of request (read or write) and the + state (Master or Replica) of a node in the replication group. This example + is meant to illustrate how a software load balancer might be integrated with + JE HA, where HARouter plays the role of the load balancer for + purposes of the example. +

    + Be sure to read the Example Overview first to put this + example into context. +

    + In this example, unlike StockQuotes, only the HARouter has a + console associated with it. It accepts commands typed into its console and + forwards them as appropriate to the Master and Replicas in the group. The + logic for tracking the Master resides in HARouter, and + information about the state of the replication group is supplied by the + Monitor. While this example + uses just one HARouter instance for the entire group, production + applications could use multiple router instances to avoid single points of + failure. +

    + Each node, which in this example is an instance of + RouterDrivenStockQuotes, establishes a server socket on which + it can listen for requests from HARouter. The node that is currently the + Master will expect both write and read requests from HARouter, while nodes + that are Replicas will only expect read requests from the router. +

    + The request flow between nodes in this example is shown below. +

    + ------------               Read requests
    + | HARouter |------------------------------------||
    + | Instance |---------------------||             ||
    + ------------                     ||             ||
    +  ||                              ||             ||
    +  || Write requests               ||             ||
    +  \/                              ||             ||
    + ---------------------------      ||             ||
    + | RouterDrivenStockQuotes |      ||             ||
    + | Instance 1: Master      |      ||             ||
    + ---------------------------      \/             ||
    +                ---------------------------      ||
    +                | RouterDrivenStockQuotes |      ||
    +                | Instance 2: Replica     |      ||
    +                ---------------------------      \/
    +                               ---------------------------
    +                               | RouterDrivenStockQuotes |
    +                               | Instance 3: Replica     |
    +                               ---------------------------
    +
    +                                       ...more Replica instances...
    + 
    +

    + This example is intended to be illustrative. It forwards requests as text, + and receives responses in text form. Actual applications may for example, + forward HTTP requests, or use some other application level network protocol + to forward such requests. +

    + Please review the javadoc in StockQuotes for a detailed description + of the arguments that must be supplied at startup. The only difference is + that you must use the name of this class when invoking the JVM. For example, + the first node can be started as follows: + +

    + java je.rep.quote.RouterDrivenStockQuotes -env /tmp/stockQuotes1 \
    +                                           -nodeName n1 \
    +                                           -nodeHost node.acme.com:5001 \
    +                                           -helperHost node.acme.com:5001
    + 
    + + In addition to starting the nodes, you will also need to start the + HARouter as described in its javadoc.
    +
    +
    See Also:
    +
    HARouter
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/RunTransaction.html b/docs/examples/je/rep/quote/RunTransaction.html new file mode 100644 index 0000000..c752f0f --- /dev/null +++ b/docs/examples/je/rep/quote/RunTransaction.html @@ -0,0 +1,318 @@ + + + + + +RunTransaction (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class RunTransaction

+
+
+ +
+
    +
  • +
    +
    +
    public abstract class RunTransaction
    +extends java.lang.Object
    +
    Utility class to begin and commit/abort a transaction and handle exceptions + according to this application's policies. The doTransactionWork method is + abstract and must be implemented by callers. The transaction is run and + doTransactionWork is called by the run() method of this class. The + onReplicaWrite and onRetryFailure methods may optionally be overridden.
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Abstract Methods Concrete Methods 
      Modifier and TypeMethod and Description
      abstract voiddoTransactionWork(Transaction txn) +
      Must be implemented to perform operations using the given Transaction.
      +
      voidonReplicaWrite(ReplicaWriteException replicaWrite) +
      May be optionally overridden to handle a ReplicaWriteException.
      +
      voidonRetryFailure(OperationFailureException lastException) +
      May be optionally overridden to handle a failure after the + TRANSACTION_RETRY_MAX has been exceeded.
      +
      voidrun(boolean readOnly) +
      Runs a transaction, calls the doTransactionWork method, and retries as + needed.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + + + + + +
        +
      • +

        doTransactionWork

        +
        public abstract void doTransactionWork(Transaction txn)
        +
        Must be implemented to perform operations using the given Transaction.
        +
      • +
      + + + +
        +
      • +

        onReplicaWrite

        +
        public void onReplicaWrite(ReplicaWriteException replicaWrite)
        +
        May be optionally overridden to handle a ReplicaWriteException. After + this method is called, the RunTransaction constructor will return. By + default, this method throws the ReplicaWriteException.
        +
      • +
      + + + +
        +
      • +

        onRetryFailure

        +
        public void onRetryFailure(OperationFailureException lastException)
        +
        May be optionally overridden to handle a failure after the + TRANSACTION_RETRY_MAX has been exceeded. After this method is called, + the RunTransaction constructor will return. By default, this method + prints the last exception.
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/SimpleRouter.html b/docs/examples/je/rep/quote/SimpleRouter.html new file mode 100644 index 0000000..8b47b30 --- /dev/null +++ b/docs/examples/je/rep/quote/SimpleRouter.html @@ -0,0 +1,285 @@ + + + + + +SimpleRouter (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class SimpleRouter

+
+
+ +
+
    +
  • +
    +
    +
    public class SimpleRouter
    +extends java.lang.Object
    +
    This example illustrates the use of a simple HA-unaware router that is used + in conjunction with UpdateForwardingStockQuotes. The router is + unaware of the state (Master or Replica) of each + node and simply forwards requests entered at the router's console to each + node in the group in Round Robin fashion. +

    + The UpdateForwardingStockQuotes instance will in turn, if + necessary, forward any write requests to the current master and return the + results back to SimpleRouter. UpdateForwardingStockQuotes + instances do not have their own consoles, they only service requests + delivered over the network by this router. +

    + SimpleRouter takes host:port pairs as arguments, one pair for + each instance of the UpdateForwardingStockQuotes application. + The port numbers in this case are application, not HA, port numbers on which + the UpdateForwardingStockQuotes application listens for + application messages forwarded by SimpleRouter. They must + therefore be different from the ports used internally by HA, that is, from + the HA port numbers specified as arguments to + UpdateForwardingStockQuotes. The application port number is + computed in this example by adding + HARouter.APP_PORT_DISPLACEMENT (default value 100) to the HA + port number associated with the node. So, if node "n1" uses port 5001 for + HA, it must (based upon the conventions used in these examples) use port + 5101, for application level communication. +

    + SimpleRouter can thus be invoked as follows: + +

    + java je.rep.quote.SimpleRouter node.acme.com:5101 node.acme.com:5102 node.acme.com:5103
    + 
    + + for a three node group. In this case, the applications will use ports 5101, + through 5103 for application messages, while HA will use ports 5001 through + 5003. + +

    + SimpleRouter and UpdateForwardingStockQuotes can be started in any order.

    +
    +
    See Also:
    +
    UpdateForwardingStockQuotes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/StockQuotes.html b/docs/examples/je/rep/quote/StockQuotes.html new file mode 100644 index 0000000..3e79894 --- /dev/null +++ b/docs/examples/je/rep/quote/StockQuotes.html @@ -0,0 +1,410 @@ + + + + + +StockQuotes (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class StockQuotes

+
+
+ +
+
    +
  • +
    +
    Direct Known Subclasses:
    +
    RouterDrivenStockQuotes, StockQuotesRMIForwarding
    +
    +
    +
    +
    public class StockQuotes
    +extends java.lang.Object
    +
    The most basic demonstration of a replicated application. It's intended to + help gain an understanding of basic HA concepts and demonstrate use of the + HA APIs to create a replicated environment and issue read and write + transactions. +

    + Be sure to read the Example Overview first to put this + example into context. +

    + The program can be used to start up multiple stock quote servers supplying + the following arguments: + +

    + java je.rep.quote.StockQuotes -env <environment home> \
    +                               -nodeName <nodeName> \
    +                               -nodeHost <hostname:port> \
    +                               -helperHost <hostname:port>
    + 
    + + The argument names resemble the ReplicationConfig names to draw + attention to the connection between the program argument names and + ReplicationConfig APIs. + +
    +  -env        a pre-existing directory for the replicated JE environment
    +  -nodeName   the name used to uniquely identify this node in the replication
    +  -nodeHost   the unique hostname, port pair for this node
    +  -helperHost the hostname, port pair combination for the helper node. It's
    +              the same as the nodeHost only if this node is intended to
    +              become the initial Master, during the formation of the
    +              replication group.
    + 
    + + A typical demo session begins with a set of commands such as the following + to start each node. The first node can be started as below: + +
    + java je.rep.quote.StockQuotes -env dir1 -nodeName n1 \
    +                               -nodeHost node.acme.com:5001 \
    +                               -helperHost node.acme.com:5001
    + 
    + + Note that the helperHost and the nodeHost are the + same, since it's the first node in the group. HA uses this fact to start a + brand new replication group of size one, with this node as the master if + there is no existing environment in the environment directory + dir1. +

    + Nodes can be added to the group by using a variation of the above. The + second and third node can be started as follows: + +

    + java je.rep.quote.StockQuotes -env dir2 -nodeName n2 \
    +                               -nodeHost node.acme.com:5002 \
    +                               -helperHost node.acme.com:5001
    +
    + java je.rep.quote.StockQuotes -env dir3 -nodeName n3 \
    +                               -nodeHost node.acme.com:5003 \
    +                               -helperHost node.acme.com:5002
    + 
    + + Note that each node has its own unique node name, and a distinct directory + for its replicated environment. This and any subsequent nodes can use the + first node as a helper to get itself going. In fact, you can pick any node + already in the group to serve as a helper. So, for example when adding the + third node, node 2 or node 1, could serve as helper nodes. The helper nodes + simply provide a mechanism to help a new node get itself admitted into the + group. The helper node is not needed once a node becomes part of the group. +

    + When initially running the example, please use a group of at least three + nodes. A two node group is a special case, and it is best to learn how to + run larger groups first. For more information, see + + Two-Node Replication Groups. When initially creating the nodes, it is + also important to start the master first. +

    + But once the nodes have been created, the order in which the nodes are + started up does not matter. It minimizes the initial overall group startup + time to have the master (the one where the helperHost and the + nodeHost are the same) node started first, since the master + initializes the replicated environment and is ready to start accepting and + processing commands even as the other nodes concurrently join the group. +

    + The above commands start up a group with three nodes all running locally on + the same machine. You can start up nodes on different machines connected by + a TCP/IP network by executing the above commands on the respective machines. + It's important in this case that the clocks on these machines, be reasonably + synchronized, that is, they should be within a couple of seconds of each + other. You can do this manually, but it's best to use a protocol like NTP for this purpose. +

    + Upon subsequent restarts the nodes will automatically hold an election and + select one of the nodes in the group to be the master. The choice of master + is made visible by the master/replica prompt that the application uses to + make the distinction clear. Note that at least a simple majority of nodes + must be started before the application will respond with a prompt because + it's only after a simple majority of nodes is available that an election can + be held and a master elected. For a two node group, both nodes must be + started before an election can be held. +

    + Commands are submitted directly at the command prompt in the console + established by the application at each node. Update commands are only + accepted at the console associated with the current master, identified by + the master prompt as below: + +

    StockQuotes-2 (master)>
    + + After issuing a few commands, you may want to experiment with shutting down + or killing some number of the replicated environments and bringing them back + up to see how the application behaves. +

    + If you type stock updates at an application that is currently running as a + replica node, the update is refused and you must manually re-enter the + updates on the console associated with the master. This is of course quite + cumbersome and serves as motivation for the subsequent examples. +

    + As shown below, there is no routing of requests between nodes in this + example, which is why write requests fail when they are issued on a Replica + node. +

    + -----------------------
    + | StockQuotes         | Read and Write requests both succeed,
    + | Instance 1: Master  | because this is the Master.
    + -----------------------
    +
    +      -----------------------
    +      | StockQuotes         | Read requests succeed,
    +      | Instance 2: Replica | but Write requests fail on a Replica.
    +      -----------------------
    +
    +           -----------------------
    +           | StockQuotes         | Read requests succeed,
    +           | Instance 3: Replica | but Write requests fail on a Replica.
    +           -----------------------
    +
    +               ...more Replica instances...
    + 
    +

    + See UpdateForwardingStockQuotes for an example that uses + SimpleRouter, along with application supplied inter-node request + routing to direct write requests to the master. +

    + See RouterDrivenStockQuotes along with HARouterfor an + example that uses an external router built using the + Monitor to route write + requests externally to the master and provide primitive load balancing + across the nodes in the replication group.

    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      voidquit(java.io.PrintStream out) +
      Implements the "quit" command.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        quit

        +
        public void quit(java.io.PrintStream out)
        +
        Implements the "quit" command. Subclasses can override to take + additional cleanup measures.
        +
      • +
      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServices.html b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServices.html new file mode 100644 index 0000000..fbc33d3 --- /dev/null +++ b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServices.html @@ -0,0 +1,245 @@ + + + + + +StockQuotesRMIForwarding.WriteServices (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Interface StockQuotesRMIForwarding.WriteServices

+
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Instance Methods Abstract Methods 
      Modifier and TypeMethod and Description
      voidupdate(je.rep.quote.Quote quote) +
      The "write" operation which will update the price associated with + the Stock.
      +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        update

        +
        void update(je.rep.quote.Quote quote)
        +     throws java.rmi.RemoteException
        +
        The "write" operation which will update the price associated with + the Stock.
        +
        +
        Throws:
        +
        java.rmi.RemoteException
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServicesImpl.html b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServicesImpl.html new file mode 100644 index 0000000..9ebcdc2 --- /dev/null +++ b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.WriteServicesImpl.html @@ -0,0 +1,301 @@ + + + + + +StockQuotesRMIForwarding.WriteServicesImpl (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class StockQuotesRMIForwarding.WriteServicesImpl

+
+
+ +
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      WriteServicesImpl(java.io.PrintStream printStream) 
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      voidupdate(je.rep.quote.Quote quote) +
      The update operation invoked by a Replica on this Master.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        WriteServicesImpl

        +
        public WriteServicesImpl(java.io.PrintStream printStream)
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        update

        +
        public void update(je.rep.quote.Quote quote)
        +            throws java.rmi.RemoteException
        +
        The update operation invoked by a Replica on this Master. + +

        Note that this method is executed in an RMI thread and does not + handle the environment failure level exceptions: + InsufficientLogException and + RollbackException exception in order to keep the + example simple. Production code would handle the exception here and + coordinate with the main thread of control and other RMI threads to + take corrective actions and re-estabblish the environment and + database handles.

        +
        +
        Specified by:
        +
        update in interface StockQuotesRMIForwarding.WriteServices
        +
        Throws:
        +
        java.rmi.RemoteException
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/StockQuotesRMIForwarding.html b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.html new file mode 100644 index 0000000..8b01136 --- /dev/null +++ b/docs/examples/je/rep/quote/StockQuotesRMIForwarding.html @@ -0,0 +1,375 @@ + + + + + +StockQuotesRMIForwarding (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class StockQuotesRMIForwarding

+
+
+ +
+
    +
  • +
    +
    +
    public class StockQuotesRMIForwarding
    +extends StockQuotes
    +
    This example is a small variation on the basic StockQuotes example. + Instead of rejecting update requests made at a Replica's console, it + illustrates how RMI could be used to forward write requests to a Master. The + example is otherwise identical to StockQuotes and you should + read the javadoc associated with it before proceeding with this example. The + discussion that follows thus focusses entirely on the RMI based + write-forwarding aspects of this example. +

    + Each node in this example is an RMI server and hosts an RMI registry. The + registry contains exactly one binding associated with the name: + RMI_NAME. The object associated + with the RMI binding (an instance of StockQuotesRMIForwarding.WriteServicesImpl) makes + available all the high level database write operations that are part of the + application. When this node is the Master, + Replicas will use the remote methods to invoke write operations + on it. All nodes are RMI servers, but only the current Master + is actually used to serve write requests while it is in the + Master state. The Replicas play the role of RMI clients making + remote method calls to the Master to foward their write requests. + +

    + Please review the javadoc in StockQuotes for a detailed description + of the arguments that must be supplied at startup. The only difference is + that you must use the name of this class when invoking the Java VM. +

    + For example, the first node can be started as follows: + +

    + java je.rep.quote.StockQuotesWriteForwarding -env /tmp/stockQuotes1 \
    +                                               -nodeName n1 \
    +                                               -nodeHost node.acme.com:5001 \
    +                                               -helperHost node.acme.com:5001
    + 
    +

    + This instance of the application will therefore use port 5001 for HA, and, + by convention, port 5101 (5001 + RMI_PORT_DISPLACEMENT) for + the RMI registry. If you are running on multiple machines you may (depending + upon your DNS setup) need to specify the + java.rmi.server.hostname property to ensure that RMI does not + associate loopback addresses with entries in its registry.

    +
  • +
+
+
+
    +
  • + + + +
      +
    • + + +

      Field Summary

      + + + + + + + + + + +
      Fields 
      Modifier and TypeField and Description
      static java.lang.StringRMI_NAME 
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      voidquit(java.io.PrintStream out) +
      Performs the RMI associated cleanup so that the RMI serve can be + shutdown cleanly.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + + + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        quit

        +
        public void quit(java.io.PrintStream out)
        +
        Performs the RMI associated cleanup so that the RMI serve can be + shutdown cleanly.
        +
        +
        Overrides:
        +
        quit in class StockQuotes
        +
        +
      • +
      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/UpdateForwardingStockQuotes.html b/docs/examples/je/rep/quote/UpdateForwardingStockQuotes.html new file mode 100644 index 0000000..7840203 --- /dev/null +++ b/docs/examples/je/rep/quote/UpdateForwardingStockQuotes.html @@ -0,0 +1,341 @@ + + + + + +UpdateForwardingStockQuotes (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + + +
+
je.rep.quote
+

Class UpdateForwardingStockQuotes

+
+
+ +
+
    +
  • +
    +
    +
    public class UpdateForwardingStockQuotes
    +extends RouterDrivenStockQuotes
    +
    This class is based on RouterDrivenStockQuotes and illustrates use + of an HA unaware router (implemented by SimpleRouter), that load + balances requests (both read and write) across all the nodes in a + replication group. This example is meant to illustrate how a load balancer + appliance might fit into the JE HA architecture, where SimpleRouter + plays the role of the load balancer appliance for purposes of the example. +

    + Be sure to read the Example Overview first to put this + example into context. +

    + The router is unaware of the state (Master or Replica) of each node, or the + type (read or write) of the request. Nodes use the StateChangeListener to track the + node that is currently the master and redirect write requests to it. That + is, unlike the RouterDrivenStockQuotes example, it's the nodes and + not the router that keeps track of the current master. +

    + In this example, unlike StockQuotes, only the + SimpleRouter has a console associated with it. It accepts commands + typed into its console and forwards them as appropriate to the nodes in the + group. The logic for tracking the Master resides in each node, and is + supplied by the StateChangeListener. +

    + Each node, which in this example is an instance of + UpdateForwardingStockQuotes, establishes a server socket on + which it can listen for requests from SimpleRouter. Read + requests are processed directly by the node. Write requests are redirected + to the current master and the result is communicated back to + SimpleRouter. +

    + The request flow between nodes in this example is shown below. +

    + ----------------       Read and Write requests
    + | SimpleRouter |------------------------------------||
    + | Instance     |---------------------||             ||
    + ----------------      ||             ||             ||
    +                       ||             ||             ||
    +                       \/             ||             ||
    + -------------------------------      ||             ||
    + | UpdateForwardingStockQuotes |      ||             ||
    + | Instance 1: Master          |      ||             ||
    + -------------------------------      \/             ||
    +   /\           -------------------------------      ||
    +   ||           | UpdateForwardingStockQuotes |      ||
    +   ||---------- | Instance 2: Replica         |      ||
    +   || Write     -------------------------------      \/
    +   || requests                 -------------------------------
    +   ||                          | UpdateForwardingStockQuotes |
    +   ||--------------------------| Instance 3: Replica         |
    +                               -------------------------------
    +
    +                                       ...more Replica instances...
    + 
    +

    + This example is intended to be illustrative. It forwards requests as text, + and receives responses in text form. Actual applications may for example, + forward HTTP requests, or use some other application level network protocol + to forward such requests. +

    + Please review the javadoc in StockQuotes for a detailed description + of the arguments that must be supplied at startup. The only difference is + that you must use the name of this class when invoking the java vm. +

    + For example, the first node can be started as follows: + +

    + java je.rep.quote.UpdateForwardingStockQuotes -env /tmp/stockQuotes1 \
    +                                               -nodeName n1 \
    +                                               -nodeHost node.acme.com:5001 \
    +                                               -helperHost node.acme.com:5001
    + 
    +

    + This instance of the application will therefore use port 5001 for HA, and, + by convention, port 5101 (5001 + HARouter.APP_PORT_DISPLACEMENT) + for application messages sent to it. +

    + In addition to starting the nodes, you will also need to start the + SimpleRouter as described in its javadoc.

    +
    +
    See Also:
    +
    SimpleRouter
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static voidmain(java.lang.String[] argv) 
      + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        main

        +
        public static void main(java.lang.String[] argv)
        +                 throws java.lang.Exception
        +
        +
        Throws:
        +
        java.lang.Exception
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/package-frame.html b/docs/examples/je/rep/quote/package-frame.html new file mode 100644 index 0000000..9ffca9c --- /dev/null +++ b/docs/examples/je/rep/quote/package-frame.html @@ -0,0 +1,30 @@ + + + + + +je.rep.quote (Oracle - Berkeley DB Java Edition Examples) + + + + + +

je.rep.quote

+
+

Interfaces

+ +

Classes

+ +
+ + diff --git a/docs/examples/je/rep/quote/package-summary.html b/docs/examples/je/rep/quote/package-summary.html new file mode 100644 index 0000000..c7e0b96 --- /dev/null +++ b/docs/examples/je/rep/quote/package-summary.html @@ -0,0 +1,269 @@ + + + + + +je.rep.quote (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

Package je.rep.quote

+
+
JE Replication Stock Quote example.
+
+

See: Description

+
+
+ + + + +

Package je.rep.quote Description

+
JE Replication Stock Quote example. + +

Example Overview

+This example is a simple but complete demonstration of a replicated +application. The application is a mock stock ticker which stores stock values +in a replicated JE environment. The following commands are accepted: +
    +
  • <stock> <number> : enter this stock price into the +database
  • +
  • print : print all the stocks and current prices held in the database
  • +
  • quit : shut down
  • +
+

+There are three versions of the example which illustrate different application +designs and aspects of JE functionality. Please be sure to walk through the +three examples in the order listed below, since the information in one example +builds on the one before it. The javadoc description for each class describes +the example and explains how to run it. More detailed information is found in +the example source code. +

    +
  1. StockQuotes: This example is the most +basic demonstration of a replicated application. It's intended to help gain an +understanding of basic HA concepts and demonstrate use of the HA APIs to create +a replicated environment and issue read and write transactions. +

  2. +
  3. RouterDrivenStockQuotes and +HARouter: This example is based on StockQuotes and illustrates use of an HA-aware router (implemented by HARouter), in conjunction with the Monitor class, to direct application requests, based upon the type of request +(read or write) and the state (Master or Replica) of a node in the replication +group. This example is meant to illustrate how a software load balancer might +be integrated with JE HA, where HARouter plays the role of the load +balancer for purposes of the example. +

  4. +
  5. UpdateForwardingStockQuotes and SimpleRouter: This example is based on RouterDrivenStockQuotes and +illustrates use of an HA unaware router (implemented by SimpleRouter), +that load balances requests (both read and write) across all the nodes in a +replication group. This example is meant to illustrate how a load balancer +appliance might fit into the JE HA architecture, where SimpleRouter +plays the role of the load balancer appliance for purposes of the example. +
  6. +
+Disclaimer: This example is intended to be illustrative. The example +is single threaded, while actual applications may be multithreaded. The +example forwards requests as text and receives responses in text form, while +actual applications may for example, forward HTTP requests, or use some other +application level network protocol to forward such requests. The example opens +and closes a socket to send each request, while actual applications will +typically use a connection management facility. +

+The example +StockQuotesRMIForwarding, +a minor variation to the basic StockQuotes +example is also included in this package. It's intended to help illustrate how +RMI could be used to forward write requests from a Replica to the Master. +

+
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/je/rep/quote/package-tree.html b/docs/examples/je/rep/quote/package-tree.html new file mode 100644 index 0000000..fee4bee --- /dev/null +++ b/docs/examples/je/rep/quote/package-tree.html @@ -0,0 +1,155 @@ + + + + + +je.rep.quote Class Hierarchy (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

Hierarchy For Package je.rep.quote

+
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/overview-tree.html b/docs/examples/overview-tree.html new file mode 100644 index 0000000..a54451a --- /dev/null +++ b/docs/examples/overview-tree.html @@ -0,0 +1,159 @@ + + + + + +Class Hierarchy (Oracle - Berkeley DB Java Edition Examples) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +
+

Hierarchy For All Packages

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition Examples
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/examples/package-list b/docs/examples/package-list new file mode 100644 index 0000000..c9e2485 --- /dev/null +++ b/docs/examples/package-list @@ -0,0 +1 @@ +je.rep.quote diff --git a/docs/examples/script.js b/docs/examples/script.js new file mode 100644 index 0000000..b346356 --- /dev/null +++ b/docs/examples/script.js @@ -0,0 +1,30 @@ +function show(type) +{ + count = 0; + for (var key in methods) { + var row = document.getElementById(key); + if ((methods[key] & type) != 0) { + row.style.display = ''; + row.className = (count++ % 2) ? rowColor : altColor; + } + else + row.style.display = 'none'; + } + updateTabs(type); +} + +function updateTabs(type) +{ + for (var value in tabs) { + var sNode = document.getElementById(tabs[value][0]); + var spanNode = sNode.firstChild; + if (value == type) { + sNode.className = activeTableTab; + spanNode.innerHTML = tabs[value][1]; + } + else { + sNode.className = tableTab; + spanNode.innerHTML = "" + tabs[value][1] + ""; + } + } +} diff --git a/docs/examples/standard-stylesheet.css b/docs/examples/standard-stylesheet.css new file mode 100644 index 0000000..98055b2 --- /dev/null +++ b/docs/examples/standard-stylesheet.css @@ -0,0 +1,574 @@ +/* Javadoc style sheet */ +/* +Overall document style +*/ + +@import url('resources/fonts/dejavu.css'); + +body { + background-color:#ffffff; + color:#353833; + font-family:'DejaVu Sans', Arial, Helvetica, sans-serif; + font-size:14px; + margin:0; +} +a:link, a:visited { + text-decoration:none; + color:#4A6782; +} +a:hover, a:focus { + text-decoration:none; + color:#bb7a2a; +} +a:active { + text-decoration:none; + color:#4A6782; +} +a[name] { + color:#353833; +} +a[name]:hover { + text-decoration:none; + color:#353833; +} +pre { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; +} +h1 { + font-size:20px; +} +h2 { + font-size:18px; +} +h3 { + font-size:16px; + font-style:italic; +} +h4 { + font-size:13px; +} +h5 { + font-size:12px; +} +h6 { + font-size:11px; +} +ul { + list-style-type:disc; +} +code, tt { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + padding-top:4px; + margin-top:8px; + line-height:1.4em; +} +dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + padding-top:4px; +} +table tr td dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + vertical-align:top; + padding-top:4px; +} +sup { + font-size:8px; +} +/* +Document title and Copyright styles +*/ +.clear { + clear:both; + height:0px; + overflow:hidden; +} +.aboutLanguage { + float:right; + padding:0px 21px; + font-size:11px; + z-index:200; + margin-top:-9px; +} +.legalCopy { + margin-left:.5em; +} +.bar a, .bar a:link, .bar a:visited, .bar a:active { + color:#FFFFFF; + text-decoration:none; +} +.bar a:hover, .bar a:focus { + color:#bb7a2a; +} +.tab { + background-color:#0066FF; + color:#ffffff; + padding:8px; + width:5em; + font-weight:bold; +} +/* +Navigation bar styles +*/ +.bar { + background-color:#4D7A97; + color:#FFFFFF; + padding:.8em .5em .4em .8em; + height:auto;/*height:1.8em;*/ + font-size:11px; + margin:0; +} +.topNav { + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.bottomNav { + margin-top:10px; + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.subNav { + background-color:#dee3e9; + float:left; + width:100%; + overflow:hidden; + font-size:12px; +} +.subNav div { + clear:left; + float:left; + padding:0 0 5px 6px; + text-transform:uppercase; +} +ul.navList, ul.subNavList { + float:left; + margin:0 25px 0 0; + padding:0; +} +ul.navList li{ + list-style:none; + float:left; + padding: 5px 6px; + text-transform:uppercase; +} +ul.subNavList li{ + list-style:none; + float:left; +} +.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited { + color:#FFFFFF; + text-decoration:none; + text-transform:uppercase; +} +.topNav a:hover, .bottomNav a:hover { + text-decoration:none; + color:#bb7a2a; + text-transform:uppercase; +} +.navBarCell1Rev { + background-color:#F8981D; + color:#253441; + margin: auto 5px; +} +.skipNav { + position:absolute; + top:auto; + left:-9999px; + overflow:hidden; +} +/* +Page header and footer styles +*/ +.header, .footer { + clear:both; + margin:0 20px; + padding:5px 0 0 0; +} +.indexHeader { + margin:10px; + position:relative; +} +.indexHeader span{ + margin-right:15px; +} +.indexHeader h1 { + font-size:13px; +} +.title { + color:#2c4557; + margin:10px 0; +} +.subTitle { + margin:5px 0 0 0; +} +.header ul { + margin:0 0 15px 0; + padding:0; +} +.footer ul { + margin:20px 0 5px 0; +} +.header ul li, .footer ul li { + list-style:none; + font-size:13px; +} +/* +Heading styles +*/ +div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; +} +ul.blockList ul.blockList ul.blockList li.blockList h3 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; +} +ul.blockList ul.blockList li.blockList h3 { + padding:0; + margin:15px 0; +} +ul.blockList li.blockList h2 { + padding:0px 0 20px 0; +} +/* +Page layout container styles +*/ +.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer { + clear:both; + padding:10px 20px; + position:relative; +} +.indexContainer { + margin:10px; + position:relative; + font-size:12px; +} +.indexContainer h2 { + font-size:13px; + padding:0 0 3px 0; +} +.indexContainer ul { + margin:0; + padding:0; +} +.indexContainer ul li { + list-style:none; + padding-top:2px; +} +.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt { + font-size:12px; + font-weight:bold; + margin:10px 0 0 0; + color:#4E4E4E; +} +.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd { + margin:5px 0 10px 0px; + font-size:14px; + font-family:'DejaVu Sans Mono',monospace; +} +.serializedFormContainer dl.nameValue dt { + margin-left:1px; + font-size:1.1em; + display:inline; + font-weight:bold; +} +.serializedFormContainer dl.nameValue dd { + margin:0 0 0 1px; + font-size:1.1em; + display:inline; +} +/* +List styles +*/ +ul.horizontal li { + display:inline; + font-size:0.9em; +} +ul.inheritance { + margin:0; + padding:0; +} +ul.inheritance li { + display:inline; + list-style:none; +} +ul.inheritance li ul.inheritance { + margin-left:15px; + padding-left:15px; + padding-top:1px; +} +ul.blockList, ul.blockListLast { + margin:10px 0 10px 0; + padding:0; +} +ul.blockList li.blockList, ul.blockListLast li.blockList { + list-style:none; + margin-bottom:15px; + line-height:1.4; +} +ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList { + padding:0px 20px 5px 10px; + border:1px solid #ededed; + background-color:#f8f8f8; +} +ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList { + padding:0 0 5px 8px; + background-color:#ffffff; + border:none; +} +ul.blockList ul.blockList ul.blockList ul.blockList li.blockList { + margin-left:0; + padding-left:0; + padding-bottom:15px; + border:none; +} +ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast { + list-style:none; + border-bottom:none; + padding-bottom:0; +} +table tr td dl, table tr td dl dt, table tr td dl dd { + margin-top:0; + margin-bottom:1px; +} +/* +Table styles +*/ +.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary { + width:100%; + border-left:1px solid #EEE; + border-right:1px solid #EEE; + border-bottom:1px solid #EEE; +} +.overviewSummary, .memberSummary { + padding:0px; +} +.overviewSummary caption, .memberSummary caption, .typeSummary caption, +.useSummary caption, .constantsSummary caption, .deprecatedSummary caption { + position:relative; + text-align:left; + background-repeat:no-repeat; + color:#253441; + font-weight:bold; + clear:none; + overflow:hidden; + padding:0px; + padding-top:10px; + padding-left:1px; + margin:0px; + white-space:pre; +} +.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link, +.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link, +.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover, +.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover, +.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active, +.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active, +.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited, +.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited { + color:#FFFFFF; +} +.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span, +.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + padding-bottom:7px; + display:inline-block; + float:left; + background-color:#F8981D; + border: none; + height:16px; +} +.memberSummary caption span.activeTableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#F8981D; + height:16px; +} +.memberSummary caption span.tableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#4D7A97; + height:16px; +} +.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab { + padding-top:0px; + padding-left:0px; + padding-right:0px; + background-image:none; + float:none; + display:inline; +} +.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd, +.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd { + display:none; + width:5px; + position:relative; + float:left; + background-color:#F8981D; +} +.memberSummary .activeTableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + float:left; + background-color:#F8981D; +} +.memberSummary .tableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + background-color:#4D7A97; + float:left; + +} +.overviewSummary td, .memberSummary td, .typeSummary td, +.useSummary td, .constantsSummary td, .deprecatedSummary td { + text-align:left; + padding:0px 0px 12px 10px; +} +th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th, +td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{ + vertical-align:top; + padding-right:0px; + padding-top:8px; + padding-bottom:3px; +} +th.colFirst, th.colLast, th.colOne, .constantsSummary th { + background:#dee3e9; + text-align:left; + padding:8px 3px 3px 7px; +} +td.colFirst, th.colFirst { + white-space:nowrap; + font-size:13px; +} +td.colLast, th.colLast { + font-size:13px; +} +td.colOne, th.colOne { + font-size:13px; +} +.overviewSummary td.colFirst, .overviewSummary th.colFirst, +.useSummary td.colFirst, .useSummary th.colFirst, +.overviewSummary td.colOne, .overviewSummary th.colOne, +.memberSummary td.colFirst, .memberSummary th.colFirst, +.memberSummary td.colOne, .memberSummary th.colOne, +.typeSummary td.colFirst{ + width:25%; + vertical-align:top; +} +td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover { + font-weight:bold; +} +.tableSubHeadingColor { + background-color:#EEEEFF; +} +.altColor { + background-color:#FFFFFF; +} +.rowColor { + background-color:#EEEEEF; +} +/* +Content styles +*/ +.description pre { + margin-top:0; +} +.deprecatedContent { + margin:0; + padding:10px 0; +} +.docSummary { + padding:0; +} + +ul.blockList ul.blockList ul.blockList li.blockList h3 { + font-style:normal; +} + +div.block { + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; +} + +td.colLast div { + padding-top:0px; +} + + +td.colLast a { + padding-bottom:3px; +} +/* +Formatting effect styles +*/ +.sourceLineNo { + color:green; + padding:0 30px 0 0; +} +h1.hidden { + visibility:hidden; + overflow:hidden; + font-size:10px; +} +.block { + display:block; + margin:3px 10px 2px 0px; + color:#474747; +} +.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink, +.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel, +.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink { + font-weight:bold; +} +.deprecationComment, .emphasizedPhrase, .interfaceName { + font-style:italic; +} + +div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase, +div.block div.block span.interfaceName { + font-style:normal; +} + +div.contentContainer ul.blockList li.blockList h2{ + padding-bottom:0px; +} diff --git a/docs/examples/style.css b/docs/examples/style.css new file mode 100644 index 0000000..9dc28c5 --- /dev/null +++ b/docs/examples/style.css @@ -0,0 +1,15 @@ +/* Javadoc style sheet for Java 7 and later versions */ + +/* Import standard style sheet for defaults */ +@import url('standard-stylesheet.css'); + +/* + * Modify the style for code samples to use a pale blue background with + * a thin, medium blue border that matches the style of various + * headings. + */ +pre.code { + border: 1px solid #9eadc0; /* Medium blue */ + padding: 2px; + background-color: #dee3e9; /* Pale blue */ +} diff --git a/docs/images/Oracle_BerkeleyDB_small.png b/docs/images/Oracle_BerkeleyDB_small.png new file mode 100644 index 0000000..dd30e5f Binary files /dev/null and b/docs/images/Oracle_BerkeleyDB_small.png differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..ecf0e9c --- /dev/null +++ b/docs/index.html @@ -0,0 +1,118 @@ + + + + + + Berkeley DB Java Edition (Version: 7.5.11, 2017-10-31 09:36:36 UTC + + + + + + +
+

Oracle Berkeley DB Java Edition, 12c Release 2

+ +Library 12.2.7.5, Version 7.5.11, 2017-10-31 09:36:36 UTC
+ +
+Getting Started Guides
+ +

If you are new to Berkeley DB Java Edition, the following guides +will help you learn about important concepts and get started. +

+ +

+ + + + + + + + + + + + + + + + + + + +
Getting Started Guide + HTML | + PDF
Writing Transactional Applications + HTML | + PDF
Java Collections Tutorial + HTML | + PDF
Getting Started with BDB JE High Availability + HTML | + PDF
+

+ +Programmatic APIs + + +

+Javadoc: +the javadoc covers the full set of all public JE APIs. Starting points for the different +APIs are listed below. +

+Base API
+Direct +Persistence Layer (DPL)
+High Availability/Replication
+Collections
+

+

+Examples +

+General Examples
+Translating SQL into BDB Example
+BDB JE HA Examples
+

+ +Build and Installation Notes
+

+ Installation Notes
+ Release Notes
+ Change Log
+ Using JE trace logging
+ Monitoring JE with JMX and + JConsole
+

+ +Additional Information
+ + + + + +

+ + +
+ +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved. +

+ + + diff --git a/docs/installation.html b/docs/installation.html new file mode 100644 index 0000000..814ac46 --- /dev/null +++ b/docs/installation.html @@ -0,0 +1,151 @@ + + + + + Berkeley DB Java Edition Installation Notes + + + +

+Oracle +

+ +

Berkeley DB Java Edition
Installation Notes

+ + +

7.5.11, 2017-10-31 09:36:36 UTC

+

These installation notes describe:

+ + +

Installing JE

+ +

The release notes document the Java versions +that JE is compatible with and that have been used for certification.

+ +

To install JE, use unzip or tar to unpack the JE distribution. If you are +using unzip, be sure to use the -U option to preserve case in file names. For +example:

+ +
+
unzip -U je-M.N.P.zip
+
+

or

+
+
gunzip je-M.N.P.tar.gz
tar xvf je-M.N.P.tar
+
+ +

Unpacking the distribution creates a je-M.N.P directory on disk, +where M.N.P is the release's version number. This directory contains +the following contents:

+ +
+
docs/
examples/
lib/
src/
test/
+
+ +

The remainder of this document refers to the je-M.N.P/ +directory created in this step as JE_HOME.

+ +

Using JE

+ +

To compile and run a program using JE, you only need the +je-M.N.P.jar file in your class path. This file can be found +at:

+ +
+
JE_HOME/lib/je-M.N.P.jar
+
+ +

When using JE within a web container, note that it is important that the JE +jar file and your application jar files -- in particular the classes that are +being serialized by SerialBinding -- are loaded under the same class loader. +For running in a servlet, this typically means that you would place the JE jar +file and your application jars in the same directory.

+ +

Additionally, it is important to not place the JE jar file in the extensions +directory for your JVM. Instead place the JE jar file in the same location as +your application jars. The extensions directory is reserved for privileged +library code.

+ +

Compiling and running a simple example can serve as a sanity check of the +installation. Follow these instructions to compile +and run the first basic example, called PersonExample.

+ +

Uninstalling JE

+ +

To uninstall, just remove the directory that you unzipped into.

+ +

Building JE

+ +

+You need Apache Ant version 1.8.0 or later in order to do builds. You +can download Ant from: +

+http://ant.apache.org/ +
+

+You also need Apache Ivy for loading dependencies. You can download Ivy +from: +

+http://ant.apache.org/ivy/ +
+

+Make sure to add the ivy JAR file to your ~/.ant/lib directory +or specify the ivy directory using the ant -lib option. +

+JE must be built with the version of Java specified as the compatibility +requirement in the release notes. +

+Once ant and JUnit are installed, you can build JE using the following +command:

+
+
+cd JE_HOME
+ant clean jar
+
+
+

+The JE jar file will appear in JE_HOME/build/lib/je.jar. + +

Building and Running the Unit Tests

+ +

+You can run the unit tests using the command: "ant test". +

+The ant 'test' target will automatically download a junit jar into the extlib +directory, and this jar is needed for compiling the tests. If this command +hangs when attempting to download the jar, due to a VPN for example, you may +need to additionally specify an HTTP proxy host and/or port. This can be +specified using the proxy.host and proxy.port properties, for example: +"ant test -Dproxy.host=my.proxy". By default, proxy.port is 80. +

+On Windows you may see OutOfMemoryErrors while running the unit tests. To avoid +this, increase the JVM maximum memory size by setting the ANT_OPTS environment +variable so that it includes -Xmx256M. + +

Using JE with J2EE/JCA

+ +

JE can be used as a J2EE/JCA Resource Adapter. It has been tested +with Oracle Application Server (OC4J) 10.1.3.2.0, JBoss 3.2.6 and Sun +Java System Application Server 8.1. For cookbook style "HOWTO's" +using the JE J2EE/JCA Resource Adapter see +JE_HOME/examples/jca/HOWTO-oc4j.txt, +JE_HOME/examples/jca/HOWTO-jboss.txt and +JE_HOME/examples/jca/HOWTO-sjsas.txt.

+ +

Using JE with JMX

+ +

JE supplies a ready to install JMX (Java Management Extensions) MBean +as well as a JConsole plugin for viewing the mbean. See the JConsole README + +Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved. + + + diff --git a/docs/java/allclasses-frame.html b/docs/java/allclasses-frame.html new file mode 100644 index 0000000..b45d446 --- /dev/null +++ b/docs/java/allclasses-frame.html @@ -0,0 +1,323 @@ + + + + + +All Classes (Oracle - Berkeley DB Java Edition API) + + + + + +

All Classes

+
+ +
+ + diff --git a/docs/java/allclasses-noframe.html b/docs/java/allclasses-noframe.html new file mode 100644 index 0000000..76ddf3c --- /dev/null +++ b/docs/java/allclasses-noframe.html @@ -0,0 +1,323 @@ + + + + + +All Classes (Oracle - Berkeley DB Java Edition API) + + + + + +

All Classes

+
+ +
+ + diff --git a/docs/java/com/sleepycat/bind/ByteArrayBinding.html b/docs/java/com/sleepycat/bind/ByteArrayBinding.html new file mode 100644 index 0000000..3491814 --- /dev/null +++ b/docs/java/com/sleepycat/bind/ByteArrayBinding.html @@ -0,0 +1,328 @@ + + + + + +ByteArrayBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind
+

Class ByteArrayBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<byte[]>
    +
    +
    +
    +
    public class ByteArrayBinding
    +extends java.lang.Object
    +implements EntryBinding<byte[]>
    +
    A pass-through EntryBinding that uses the entry's byte array as + the key or data object.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      ByteArrayBinding() +
      Creates a byte array binding.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      byte[]entryToObject(DatabaseEntry entry) +
      Converts a entry buffer into an Object.
      +
      voidobjectToEntry(byte[] object, + DatabaseEntry entry) +
      Converts an Object into a entry buffer.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        ByteArrayBinding

        +
        public ByteArrayBinding()
        +
        Creates a byte array binding.
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public byte[] entryToObject(DatabaseEntry entry)
        +
        Description copied from interface: EntryBinding
        +
        Converts a entry buffer into an Object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntryBinding<byte[]>
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(byte[] object,
        +                          DatabaseEntry entry)
        +
        Description copied from interface: EntryBinding
        +
        Converts an Object into a entry buffer.
        +
        +
        Specified by:
        +
        objectToEntry in interface EntryBinding<byte[]>
        +
        Parameters:
        +
        object - is the source Object.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/EntityBinding.html b/docs/java/com/sleepycat/bind/EntityBinding.html new file mode 100644 index 0000000..03183de --- /dev/null +++ b/docs/java/com/sleepycat/bind/EntityBinding.html @@ -0,0 +1,303 @@ + + + + + +EntityBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind
+

Interface EntityBinding<E>

+
+
+
+
    +
  • +
    +
    All Known Implementing Classes:
    +
    SerialSerialBinding, TupleSerialBinding, TupleSerialMarshalledBinding, TupleTupleBinding, TupleTupleMarshalledBinding
    +
    +
    +
    +
    public interface EntityBinding<E>
    +
    A binding between a key-value entry pair and an entity object. + +

    WARNING: Binding instances are typically shared by multiple + threads and binding methods are called without any special synchronization. + Therefore, bindings must be thread safe. In general no shared state should + be used and any caching of computed values must be done with proper + synchronization.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        E entryToObject(DatabaseEntry key,
        +                DatabaseEntry data)
        +
        Converts key and data entry buffers into an entity Object.
        +
        +
        Parameters:
        +
        key - is the source key entry.
        +
        data - is the source data entry.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        void objectToKey(E object,
        +                 DatabaseEntry key)
        +
        Extracts the key entry from an entity Object.
        +
        +
        Parameters:
        +
        object - is the source Object.
        +
        key - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        void objectToData(E object,
        +                  DatabaseEntry data)
        +
        Extracts the data entry from an entity Object.
        +
        +
        Parameters:
        +
        object - is the source Object.
        +
        data - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/EntryBinding.html b/docs/java/com/sleepycat/bind/EntryBinding.html new file mode 100644 index 0000000..024ec0d --- /dev/null +++ b/docs/java/com/sleepycat/bind/EntryBinding.html @@ -0,0 +1,275 @@ + + + + + +EntryBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind
+

Interface EntryBinding<E>

+
+
+
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        E entryToObject(DatabaseEntry entry)
        +
        Converts a entry buffer into an Object.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToEntry

        +
        void objectToEntry(E object,
        +                   DatabaseEntry entry)
        +
        Converts an Object into a entry buffer.
        +
        +
        Parameters:
        +
        object - is the source Object.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html b/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html new file mode 100644 index 0000000..3b2029a --- /dev/null +++ b/docs/java/com/sleepycat/bind/class-use/ByteArrayBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.ByteArrayBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.ByteArrayBinding

+
+
No usage of com.sleepycat.bind.ByteArrayBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/class-use/EntityBinding.html b/docs/java/com/sleepycat/bind/class-use/EntityBinding.html new file mode 100644 index 0000000..3535dd4 --- /dev/null +++ b/docs/java/com/sleepycat/bind/class-use/EntityBinding.html @@ -0,0 +1,332 @@ + + + + + +Uses of Interface com.sleepycat.bind.EntityBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.bind.EntityBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/class-use/EntryBinding.html b/docs/java/com/sleepycat/bind/class-use/EntryBinding.html new file mode 100644 index 0000000..712f2c3 --- /dev/null +++ b/docs/java/com/sleepycat/bind/class-use/EntryBinding.html @@ -0,0 +1,582 @@ + + + + + +Uses of Interface com.sleepycat.bind.EntryBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.bind.EntryBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/package-frame.html b/docs/java/com/sleepycat/bind/package-frame.html new file mode 100644 index 0000000..786af17 --- /dev/null +++ b/docs/java/com/sleepycat/bind/package-frame.html @@ -0,0 +1,25 @@ + + + + + +com.sleepycat.bind (Oracle - Berkeley DB Java Edition API) + + + + + +

com.sleepycat.bind

+
+

Interfaces

+ +

Classes

+ +
+ + diff --git a/docs/java/com/sleepycat/bind/package-summary.html b/docs/java/com/sleepycat/bind/package-summary.html new file mode 100644 index 0000000..8bc25f9 --- /dev/null +++ b/docs/java/com/sleepycat/bind/package-summary.html @@ -0,0 +1,189 @@ + + + + + +com.sleepycat.bind (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Package com.sleepycat.bind

+
+
Bindings between database entries and Java objects.
+
+

See: Description

+
+
+ + + + +

Package com.sleepycat.bind Description

+
Bindings between database entries and Java objects. +
+
+
See Also:
+
[Getting Started Guide] +
+
+
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/package-tree.html b/docs/java/com/sleepycat/bind/package-tree.html new file mode 100644 index 0000000..e107b34 --- /dev/null +++ b/docs/java/com/sleepycat/bind/package-tree.html @@ -0,0 +1,147 @@ + + + + + +com.sleepycat.bind Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Hierarchy For Package com.sleepycat.bind

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/package-use.html b/docs/java/com/sleepycat/bind/package-use.html new file mode 100644 index 0000000..53d9403 --- /dev/null +++ b/docs/java/com/sleepycat/bind/package-use.html @@ -0,0 +1,279 @@ + + + + + +Uses of Package com.sleepycat.bind (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Package
com.sleepycat.bind

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/ClassCatalog.html b/docs/java/com/sleepycat/bind/serial/ClassCatalog.html new file mode 100644 index 0000000..b4fe862 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/ClassCatalog.html @@ -0,0 +1,363 @@ + + + + + +ClassCatalog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Interface ClassCatalog

+
+
+
+
    +
  • +
    +
    All Superinterfaces:
    +
    java.lang.AutoCloseable, java.io.Closeable
    +
    +
    +
    All Known Implementing Classes:
    +
    StoredClassCatalog
    +
    +
    +
    +
    public interface ClassCatalog
    +extends java.io.Closeable
    +
    A catalog of class description information for use during object + serialization. + +

    A catalog is used to store class descriptions separately from serialized + objects, to avoid redundantly stored information with each object. + When serialized objects are stored in a database, a StoredClassCatalog should be used.

    + +

    This information is used for serialization of class descriptors or + java.io.ObjectStreamClass objects, each of which represents a unique class + format. For each unique format, a unique class ID is assigned by the + catalog. The class ID can then be used in the serialization stream in place + of the full class information. When used with SerialInput and + SerialOutput or any of the serial bindings, the use of the catalog + is transparent to the application.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Abstract Methods 
      Modifier and TypeMethod and Description
      voidclose() +
      Close a catalog database and release any cached resources.
      +
      java.io.ObjectStreamClassgetClassFormat(byte[] classID) +
      Return the ObjectStreamClass for the given class ID.
      +
      byte[]getClassID(java.io.ObjectStreamClass classDesc) +
      Return the class ID for the current version of the given class + description.
      +
      java.lang.ClassLoadergetClassLoader() +
      Returns the ClassLoader to be used by bindings that use this catalog, or + null if a default class loader should be used.
      +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        close

        +
        void close()
        +    throws DatabaseException
        +
        Close a catalog database and release any cached resources.
        +
        +
        Specified by:
        +
        close in interface java.lang.AutoCloseable
        +
        Specified by:
        +
        close in interface java.io.Closeable
        +
        Throws:
        +
        DatabaseException - if an error occurs closing the catalog + database.
        +
        +
      • +
      + + + +
        +
      • +

        getClassID

        +
        byte[] getClassID(java.io.ObjectStreamClass classDesc)
        +           throws DatabaseException,
        +                  java.lang.ClassNotFoundException
        +
        Return the class ID for the current version of the given class + description. + This is used for storing in serialization streams in place of a full + class descriptor, since it is much more compact. To get back the + ObjectStreamClass for a class ID, call getClassFormat(byte[]). + This function causes a new class ID to be assigned if the class + description has changed.
        +
        +
        Parameters:
        +
        classDesc - The class description for which to return the + class ID.
        +
        Returns:
        +
        The class ID for the current version of the class.
        +
        Throws:
        +
        DatabaseException - if an error occurs accessing the catalog + database.
        +
        java.lang.ClassNotFoundException - if the class does not exist.
        +
        +
      • +
      + + + +
        +
      • +

        getClassFormat

        +
        java.io.ObjectStreamClass getClassFormat(byte[] classID)
        +                                  throws DatabaseException,
        +                                         java.lang.ClassNotFoundException
        +
        Return the ObjectStreamClass for the given class ID. This may or may + not be the current class format, depending on whether the class has + changed since the class ID was generated.
        +
        +
        Parameters:
        +
        classID - The class ID for which to return the class format.
        +
        Returns:
        +
        The class format for the given class ID, which may or may not + represent the current version of the class.
        +
        Throws:
        +
        DatabaseException - if an error occurs accessing the catalog + database.
        +
        java.lang.ClassNotFoundException - if the class does not exist.
        +
        +
      • +
      + + + +
        +
      • +

        getClassLoader

        +
        java.lang.ClassLoader getClassLoader()
        +
        Returns the ClassLoader to be used by bindings that use this catalog, or + null if a default class loader should be used. The ClassLoader is used + by SerialBinding to load classes whose description is stored in + the catalog. + +

        In BDB JE, the implementation of this method in StoredClassCatalog returns the ClassLoader property of the catalog + database Environment. This ensures that the Environment's ClassLoader + property is used for loading all user-supplied classes.

        +
        +
        Returns:
        +
        the ClassLoader or null.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialBase.html b/docs/java/com/sleepycat/bind/serial/SerialBase.html new file mode 100644 index 0000000..045eb0a --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialBase.html @@ -0,0 +1,369 @@ + + + + + +SerialBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialBase

+
+
+ +
+
    +
  • +
    +
    Direct Known Subclasses:
    +
    SerialBinding
    +
    +
    +
    +
    public class SerialBase
    +extends java.lang.Object
    +
    A base class for serial bindings creators that provides control over the + allocation of the output buffer. + +

    Serial bindings append data to a FastOutputStream instance. This + object has a byte array buffer that is resized when it is full. The + reallocation of this buffer can be a performance factor for some + applications using large objects. To manage this issue, the setSerialBufferSize(int) method may be used to control the initial size of the + buffer, and the getSerialOutput(java.lang.Object) method may be overridden by + subclasses to take over creation of the FastOutputStream object.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      SerialBase() +
      Initializes the initial output buffer size to zero.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      intgetSerialBufferSize() +
      Returns the initial byte size of the output buffer.
      +
      protected FastOutputStreamgetSerialOutput(java.lang.Object object) +
      Returns an empty SerialOutput instance that will be used by the serial + binding or key creator.
      +
      voidsetSerialBufferSize(int byteSize) +
      Sets the initial byte size of the output buffer that is allocated by the + default implementation of getSerialOutput(java.lang.Object).
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + + + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        setSerialBufferSize

        +
        public void setSerialBufferSize(int byteSize)
        +
        Sets the initial byte size of the output buffer that is allocated by the + default implementation of getSerialOutput(java.lang.Object). + +

        If this property is zero (the default), the default FastOutputStream.DEFAULT_INIT_SIZE size is used.

        +
        +
        Parameters:
        +
        byteSize - the initial byte size of the output buffer, or zero to + use the default size.
        +
        +
      • +
      + + + +
        +
      • +

        getSerialBufferSize

        +
        public int getSerialBufferSize()
        +
        Returns the initial byte size of the output buffer.
        +
        +
        Returns:
        +
        the initial byte size of the output buffer.
        +
        See Also:
        +
        setSerialBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        getSerialOutput

        +
        protected FastOutputStream getSerialOutput(java.lang.Object object)
        +
        Returns an empty SerialOutput instance that will be used by the serial + binding or key creator. + +

        The default implementation of this method creates a new SerialOutput + with an initial buffer size that can be changed using the setSerialBufferSize(int) method.

        + +

        This method may be overridden to return a FastOutputStream instance. + For example, an instance per thread could be created and returned by + this method. If a FastOutputStream instance is reused, be sure to call + its FastOutputStream.reset() method before each use.

        +
        +
        Parameters:
        +
        object - is the object to be written to the serial output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty FastOutputStream instance.
        +
        See Also:
        +
        setSerialBufferSize(int)
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialBinding.html b/docs/java/com/sleepycat/bind/serial/SerialBinding.html new file mode 100644 index 0000000..d558be2 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialBinding.html @@ -0,0 +1,450 @@ + + + + + +SerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialBinding<E>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<E>
    +
    +
    +
    +
    public class SerialBinding<E>
    +extends SerialBase
    +implements EntryBinding<E>
    +
    A concrete EntryBinding that treats a key or data entry as + a serialized object. + +

    This binding stores objects in serialized object format. The + deserialized objects are returned by the binding, and their + Class must implement the Serializable + interface.

    + +

    For key bindings, a tuple binding is usually a better choice than a + serial binding. A tuple binding gives a reasonable sort order, and works + with comparators in all cases -- see below.

    + +

    WARNING: SerialBinding should not be used with Berkeley DB Java + Edition for key bindings, when a custom comparator is used. In JE, + comparators are instantiated and called internally at times when databases + are not accessible. Because serial bindings depend on the class catalog + database, a serial binding cannot be used during these times. An attempt + to use a serial binding with a custom comparator will result in a + NullPointerException during environment open or close.

    + +

    Class Evolution

    + +

    SerialBinding and other classes in this package use standard Java + serialization and all rules of Java serialization apply. This includes the + rules for class evolution. Once an instance of a class is stored, the class + must maintain its serialVersionUID and follow the rules defined in + the Java specification. To use a new incompatible version of a class, a + different ClassCatalog must be used or the class catalog database + must be truncated.

    + +

    If more advanced class evolution features are required, consider using + the Direct Persistence Layer.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SerialBinding

        +
        public SerialBinding(ClassCatalog classCatalog,
        +                     java.lang.Class<E> baseClass)
        +
        Creates a serial binding.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        baseClass - is the base class for serialized objects stored using + this binding -- all objects using this binding must be an instance of + this class. Note that if this parameter is non-null, then this binding + will not support serialization of null values.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getBaseClass

        +
        public final java.lang.Class<E> getBaseClass()
        +
        Returns the base class for this binding.
        +
        +
        Returns:
        +
        the base class for this binding.
        +
        +
      • +
      + + + +
        +
      • +

        getClassLoader

        +
        public java.lang.ClassLoader getClassLoader()
        +
        Returns the class loader to be used during deserialization, or null if a + default class loader should be used. The default implementation of this + method returns ClassCatalog.getClassLoader(), if it returns a + non-null value. If ClassCatalog.getClassLoader() returns null, + then Thread.currentThread().getContextClassLoader() is + returned. + +

        This method may be overridden to return a dynamically determined + class loader. For example, getBaseClass().getClassLoader() + could be called to use the class loader for the base class, assuming + that a base class has been specified.

        + +

        If this method returns null, a default class loader will be used as + determined by the java.io.ObjectInputStream.resolveClass + method.

        +
        +
        Returns:
        +
        the ClassLoader or null.
        +
        +
      • +
      + + + +
        +
      • +

        entryToObject

        +
        public E entryToObject(DatabaseEntry entry)
        +
        Deserialize an object from an entry buffer. May only be called for data + that was serialized using objectToEntry(E, com.sleepycat.je.DatabaseEntry), since the fixed + serialization header is assumed to not be included in the input data. + SerialInput is used to deserialize the object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntryBinding<E>
        +
        Parameters:
        +
        entry - is the input serialized entry.
        +
        Returns:
        +
        the output deserialized object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(E object,
        +                          DatabaseEntry entry)
        +
        Serialize an object into an entry buffer. The fixed serialization + header is not included in the output data to save space, and therefore + to deserialize the data the complementary entryToObject(com.sleepycat.je.DatabaseEntry) method + must be used. SerialOutput is used to serialize the object. + +

        Note that this method sets the DatabaseEntry offset property to a + non-zero value and the size property to a value less than the length of + the byte array.

        +
        +
        Specified by:
        +
        objectToEntry in interface EntryBinding<E>
        +
        Parameters:
        +
        object - is the input deserialized object.
        +
        entry - is the output serialized entry.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if the object is not an instance of the + base class for this binding, including if the object is null and a + non-null base class was specified.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialInput.html b/docs/java/com/sleepycat/bind/serial/SerialInput.html new file mode 100644 index 0000000..1371f02 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialInput.html @@ -0,0 +1,423 @@ + + + + + +SerialInput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialInput

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Closeable, java.io.DataInput, java.io.ObjectInput, java.io.ObjectStreamConstants, java.lang.AutoCloseable
    +
    +
    +
    +
    public class SerialInput
    +extends ClassResolver.Stream
    +
    A specialized ObjectInputStream that gets class description + information from a ClassCatalog. It is used by + SerialBinding. + +

    This class is used instead of an ObjectInputStream, which it + extends, to read an object stream written by the SerialOutput class. + For reading objects from a database normally one of the serial binding + classes is used. SerialInput is used when an ObjectInputStream is needed along with compact storage. A ClassCatalog must be supplied, however, to stored shared class + descriptions.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Nested Class Summary

      +
        +
      • + + +

        Nested classes/interfaces inherited from class java.io.ObjectInputStream

        +java.io.ObjectInputStream.GetField
      • +
      +
    • +
    + +
      +
    • + + +

      Field Summary

      +
        +
      • + + +

        Fields inherited from interface java.io.ObjectStreamConstants

        +baseWireHandle, PROTOCOL_VERSION_1, PROTOCOL_VERSION_2, SC_BLOCK_DATA, SC_ENUM, SC_EXTERNALIZABLE, SC_SERIALIZABLE, SC_WRITE_METHOD, STREAM_MAGIC, STREAM_VERSION, SUBCLASS_IMPLEMENTATION_PERMISSION, SUBSTITUTION_PERMISSION, TC_ARRAY, TC_BASE, TC_BLOCKDATA, TC_BLOCKDATALONG, TC_CLASS, TC_CLASSDESC, TC_ENDBLOCKDATA, TC_ENUM, TC_EXCEPTION, TC_LONGSTRING, TC_MAX, TC_NULL, TC_OBJECT, TC_PROXYCLASSDESC, TC_REFERENCE, TC_RESET, TC_STRING
      • +
      +
    • +
    + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + + + + +
      Constructors 
      Constructor and Description
      SerialInput(java.io.InputStream in, + ClassCatalog classCatalog) +
      Creates a serial input stream.
      +
      SerialInput(java.io.InputStream in, + ClassCatalog classCatalog, + java.lang.ClassLoader classLoader) +
      Creates a serial input stream.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      protected java.io.ObjectStreamClassreadClassDescriptor() 
      + +
        +
      • + + +

        Methods inherited from class java.io.ObjectInputStream

        +available, close, defaultReadObject, enableResolveObject, read, read, readBoolean, readByte, readChar, readDouble, readFields, readFloat, readFully, readFully, readInt, readLine, readLong, readObject, readObjectOverride, readShort, readStreamHeader, readUnshared, readUnsignedByte, readUnsignedShort, readUTF, registerValidation, resolveObject, resolveProxyClass, skipBytes
      • +
      +
        +
      • + + +

        Methods inherited from class java.io.InputStream

        +mark, markSupported, read, reset, skip
      • +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.io.ObjectInput

        +read, skip
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SerialInput

        +
        public SerialInput(java.io.InputStream in,
        +                   ClassCatalog classCatalog)
        +            throws java.io.IOException
        +
        Creates a serial input stream.
        +
        +
        Parameters:
        +
        in - is the input stream from which compact serialized objects will + be read.
        +
        classCatalog - is the catalog containing the class descriptions + for the serialized objects.
        +
        Throws:
        +
        java.io.IOException - if an I/O error occurs while reading stream header.
        +
        +
      • +
      + + + +
        +
      • +

        SerialInput

        +
        public SerialInput(java.io.InputStream in,
        +                   ClassCatalog classCatalog,
        +                   java.lang.ClassLoader classLoader)
        +            throws java.io.IOException
        +
        Creates a serial input stream.
        +
        +
        Parameters:
        +
        in - is the input stream from which compact serialized objects will + be read.
        +
        classCatalog - is the catalog containing the class descriptions + for the serialized objects.
        +
        classLoader - is the class loader to use, or null if a default + class loader should be used.
        +
        Throws:
        +
        java.io.IOException - if an I/O error occurs while reading stream header.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        readClassDescriptor

        +
        protected java.io.ObjectStreamClass readClassDescriptor()
        +                                                 throws java.io.IOException,
        +                                                        java.lang.ClassNotFoundException
        +
        +
        Overrides:
        +
        readClassDescriptor in class java.io.ObjectInputStream
        +
        Throws:
        +
        java.io.IOException
        +
        java.lang.ClassNotFoundException
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialOutput.html b/docs/java/com/sleepycat/bind/serial/SerialOutput.html new file mode 100644 index 0000000..20b61d1 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialOutput.html @@ -0,0 +1,394 @@ + + + + + +SerialOutput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialOutput

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Closeable, java.io.DataOutput, java.io.Flushable, java.io.ObjectOutput, java.io.ObjectStreamConstants, java.lang.AutoCloseable
    +
    +
    +
    +
    public class SerialOutput
    +extends java.io.ObjectOutputStream
    +
    A specialized ObjectOutputStream that stores class description + information in a ClassCatalog. It is used by + SerialBinding. + +

    This class is used instead of an ObjectOutputStream, which it + extends, to write a compact object stream. For writing objects to a + database normally one of the serial binding classes is used. SerialOutput is used when an ObjectOutputStream is needed along + with compact storage. A ClassCatalog must be supplied, however, to + stored shared class descriptions.

    + +

    The ClassCatalog is used to store class definitions rather than + embedding these into the stream. Instead, a class format identifier is + embedded into the stream. This identifier is then used by SerialInput to load the class format to deserialize the object.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Nested Class Summary

      +
        +
      • + + +

        Nested classes/interfaces inherited from class java.io.ObjectOutputStream

        +java.io.ObjectOutputStream.PutField
      • +
      +
    • +
    + +
      +
    • + + +

      Field Summary

      +
        +
      • + + +

        Fields inherited from interface java.io.ObjectStreamConstants

        +baseWireHandle, PROTOCOL_VERSION_1, PROTOCOL_VERSION_2, SC_BLOCK_DATA, SC_ENUM, SC_EXTERNALIZABLE, SC_SERIALIZABLE, SC_WRITE_METHOD, STREAM_MAGIC, STREAM_VERSION, SUBCLASS_IMPLEMENTATION_PERMISSION, SUBSTITUTION_PERMISSION, TC_ARRAY, TC_BASE, TC_BLOCKDATA, TC_BLOCKDATALONG, TC_CLASS, TC_CLASSDESC, TC_ENDBLOCKDATA, TC_ENUM, TC_EXCEPTION, TC_LONGSTRING, TC_MAX, TC_NULL, TC_OBJECT, TC_PROXYCLASSDESC, TC_REFERENCE, TC_RESET, TC_STRING
      • +
      +
    • +
    + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      SerialOutput(java.io.OutputStream out, + ClassCatalog classCatalog) +
      Creates a serial output stream.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static byte[]getStreamHeader() +
      Returns the fixed stream header used for all serialized streams in + PROTOCOL_VERSION_2 format.
      +
      protected voidwriteClassDescriptor(java.io.ObjectStreamClass classdesc) 
      +
        +
      • + + +

        Methods inherited from class java.io.ObjectOutputStream

        +annotateClass, annotateProxyClass, close, defaultWriteObject, drain, enableReplaceObject, flush, putFields, replaceObject, reset, useProtocolVersion, write, write, write, writeBoolean, writeByte, writeBytes, writeChar, writeChars, writeDouble, writeFields, writeFloat, writeInt, writeLong, writeObject, writeObjectOverride, writeShort, writeStreamHeader, writeUnshared, writeUTF
      • +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SerialOutput

        +
        public SerialOutput(java.io.OutputStream out,
        +                    ClassCatalog classCatalog)
        +             throws java.io.IOException
        +
        Creates a serial output stream.
        +
        +
        Parameters:
        +
        out - is the output stream to which the compact serialized objects + will be written.
        +
        classCatalog - is the catalog to which the class descriptions for + the serialized objects will be written.
        +
        Throws:
        +
        java.io.IOException - if an I/O error occurs while writing stream header.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        writeClassDescriptor

        +
        protected void writeClassDescriptor(java.io.ObjectStreamClass classdesc)
        +                             throws java.io.IOException
        +
        +
        Overrides:
        +
        writeClassDescriptor in class java.io.ObjectOutputStream
        +
        Throws:
        +
        java.io.IOException
        +
        +
      • +
      + + + +
        +
      • +

        getStreamHeader

        +
        public static byte[] getStreamHeader()
        +
        Returns the fixed stream header used for all serialized streams in + PROTOCOL_VERSION_2 format. To save space this header can be removed and + serialized streams before storage and inserted before deserializing. + SerialOutput always uses PROTOCOL_VERSION_2 serialization format + to guarantee that this header is fixed. SerialBinding removes + this header from serialized streams automatically.
        +
        +
        Returns:
        +
        the fixed stream header.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html b/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html new file mode 100644 index 0000000..c5794a2 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialSerialBinding.html @@ -0,0 +1,481 @@ + + + + + +SerialSerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialSerialBinding<K,D,E>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntityBinding<E>
    +
    +
    +
    +
    public abstract class SerialSerialBinding<K,D,E>
    +extends java.lang.Object
    +implements EntityBinding<E>
    +
    An abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects. + +

    This class takes care of serializing and deserializing the key and + data entry automatically. Its three abstract methods must be implemented by + a concrete subclass to convert the deserialized objects to/from an entity + object.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SerialSerialBinding

        +
        public SerialSerialBinding(ClassCatalog classCatalog,
        +                           java.lang.Class<K> keyClass,
        +                           java.lang.Class<D> dataClass)
        +
        Creates a serial-serial entity binding.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        keyClass - is the key base class.
        +
        dataClass - is the data base class.
        +
        +
      • +
      + + + +
        +
      • +

        SerialSerialBinding

        +
        public SerialSerialBinding(SerialBinding<K> keyBinding,
        +                           SerialBinding<D> dataBinding)
        +
        Creates a serial-serial entity binding.
        +
        +
        Parameters:
        +
        keyBinding - is the key binding.
        +
        dataBinding - is the data binding.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public E entryToObject(DatabaseEntry key,
        +                       DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Converts key and data entry buffers into an entity Object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntityBinding<E>
        +
        Parameters:
        +
        key - is the source key entry.
        +
        data - is the source data entry.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public void objectToKey(E object,
        +                        DatabaseEntry key)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the key entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToKey in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        key - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public void objectToData(E object,
        +                         DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the data entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToData in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        data - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        entryToObject

        +
        public abstract E entryToObject(K keyInput,
        +                                D dataInput)
        +
        Constructs an entity object from deserialized key and data objects.
        +
        +
        Parameters:
        +
        keyInput - is the deserialized key object.
        +
        dataInput - is the deserialized data object.
        +
        Returns:
        +
        the entity object constructed from the key and data.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public abstract K objectToKey(E object)
        +
        Extracts a key object from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        Returns:
        +
        the deserialized key object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public abstract D objectToData(E object)
        +
        Extracts a data object from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        Returns:
        +
        the deserialized data object.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html b/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html new file mode 100644 index 0000000..53be845 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/SerialSerialKeyCreator.html @@ -0,0 +1,548 @@ + + + + + +SerialSerialKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class SerialSerialKeyCreator<PK,D,SK>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SerialSerialKeyCreator

        +
        public SerialSerialKeyCreator(ClassCatalog classCatalog,
        +                              java.lang.Class<PK> primaryKeyClass,
        +                              java.lang.Class<D> dataClass,
        +                              java.lang.Class<SK> indexKeyClass)
        +
        Creates a serial-serial key creator.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        primaryKeyClass - is the primary key base class.
        +
        dataClass - is the data base class.
        +
        indexKeyClass - is the index key base class.
        +
        +
      • +
      + + + +
        +
      • +

        SerialSerialKeyCreator

        +
        public SerialSerialKeyCreator(SerialBinding<PK> primaryKeyBinding,
        +                              SerialBinding<D> dataBinding,
        +                              SerialBinding<SK> indexKeyBinding)
        +
        Creates a serial-serial entity binding.
        +
        +
        Parameters:
        +
        primaryKeyBinding - is the primary key binding.
        +
        dataBinding - is the data binding.
        +
        indexKeyBinding - is the index key binding.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        createSecondaryKey

        +
        public boolean createSecondaryKey(SecondaryDatabase db,
        +                                  DatabaseEntry primaryKeyEntry,
        +                                  DatabaseEntry dataEntry,
        +                                  DatabaseEntry indexKeyEntry)
        +
        Description copied from interface: SecondaryKeyCreator
        +
        Creates a secondary key entry, given a primary key and data entry. + +

        A secondary key may be derived from the primary key, primary data, or + a combination of the primary key and data. For secondary keys that are + optional, the key creator method may return false and the key/data pair + will not be indexed. To ensure the integrity of a secondary database + the key creator method must always return the same result for a given + set of input parameters.

        + +

        A RuntimeException may be thrown by this method if an error + occurs attempting to create the secondary key. This exception will be + thrown by the API method currently in progress, for example, a put method. However, this will cause the write operation + to be incomplete. When databases are not configured to be + transactional, caution should be used to avoid integrity problems. See + Special considerations for + using Secondary Databases with and without Transactions.

        +
        +
        Specified by:
        +
        createSecondaryKey in interface SecondaryKeyCreator
        +
        Parameters:
        +
        db - the database to which the secondary key will be + added. This parameter is passed for informational purposes but is not + commonly used. This parameter is always non-null.
        +
        primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. This parameter is always non-null.
        +
        dataEntry - the primary data entry. This parameter must not be modified + by this method. If SecondaryConfig#setExtractFromPrimaryKeyOnly + is configured as true, the data param may be either null + or non-null, and the implementation is expected to ignore it; otherwise, + this parameter is always non-null.
        +
        indexKeyEntry - the secondary key created by this method. This parameter + is always non-null.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + +
        +
      • +

        nullifyForeignKey

        +
        public boolean nullifyForeignKey(SecondaryDatabase db,
        +                                 DatabaseEntry dataEntry)
        +
        Description copied from interface: ForeignKeyNullifier
        +
        Sets the foreign key reference to null in the datum of the primary + database.
        +
        +
        Specified by:
        +
        nullifyForeignKey in interface ForeignKeyNullifier
        +
        Parameters:
        +
        db - the database in which the foreign key integrity + constraint is defined. This parameter is passed for informational + purposes but is not commonly used.
        +
        dataEntry - the existing primary datum in which the foreign key + reference should be set to null. This parameter should be updated by + this method if it returns true.
        +
        Returns:
        +
        true if the datum was modified, or false to indicate that the + key is not present.
        +
        +
      • +
      + + + + + +
        +
      • +

        createSecondaryKey

        +
        public abstract SK createSecondaryKey(PK primaryKey,
        +                                      D data)
        +
        Creates the index key object from primary key and data objects.
        +
        +
        Parameters:
        +
        primaryKey - is the deserialized source primary key entry, or + null if no primary key entry is used to construct the index key.
        +
        data - is the deserialized source data entry, or null if no + data entry is used to construct the index key.
        +
        Returns:
        +
        the destination index key object, or null to indicate that + the key is not present.
        +
        +
      • +
      + + + + + + +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html b/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html new file mode 100644 index 0000000..7f8feb7 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/StoredClassCatalog.html @@ -0,0 +1,424 @@ + + + + + +StoredClassCatalog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class StoredClassCatalog

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    ClassCatalog, java.io.Closeable, java.lang.AutoCloseable
    +
    +
    +
    +
    public class StoredClassCatalog
    +extends java.lang.Object
    +implements ClassCatalog
    +
    A ClassCatalog that is stored in a Database. + +

    A single StoredClassCatalog object is normally used along + with a set of databases that stored serialized objects.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Class Evolution
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      StoredClassCatalog(Database database) +
      Creates a catalog based on a given database.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      voidclose() +
      Close a catalog database and release any cached resources.
      +
      java.io.ObjectStreamClassgetClassFormat(byte[] classID) +
      Return the ObjectStreamClass for the given class ID.
      +
      byte[]getClassID(java.io.ObjectStreamClass classFormat) +
      Return the class ID for the current version of the given class + description.
      +
      java.lang.ClassLoadergetClassLoader() +
      For BDB JE, returns the ClassLoader property of the catalog database + environment.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredClassCatalog

        +
        public StoredClassCatalog(Database database)
        +                   throws DatabaseException,
        +                          java.lang.IllegalArgumentException
        +
        Creates a catalog based on a given database. To save resources, only a + single catalog object should be used for each unique catalog database.
        +
        +
        Parameters:
        +
        database - an open database to use as the class catalog. It must + be a BTREE database and must not allow duplicates.
        +
        Throws:
        +
        DatabaseException - if an error occurs accessing the database.
        +
        java.lang.IllegalArgumentException - if the database is not a BTREE database + or if it configured to allow duplicates.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        close

        +
        public void close()
        +           throws DatabaseException
        +
        Description copied from interface: ClassCatalog
        +
        Close a catalog database and release any cached resources.
        +
        +
        Specified by:
        +
        close in interface ClassCatalog
        +
        Specified by:
        +
        close in interface java.io.Closeable
        +
        Specified by:
        +
        close in interface java.lang.AutoCloseable
        +
        Throws:
        +
        DatabaseException - if an error occurs closing the catalog + database.
        +
        +
      • +
      + + + +
        +
      • +

        getClassID

        +
        public byte[] getClassID(java.io.ObjectStreamClass classFormat)
        +                  throws DatabaseException,
        +                         java.lang.ClassNotFoundException
        +
        Description copied from interface: ClassCatalog
        +
        Return the class ID for the current version of the given class + description. + This is used for storing in serialization streams in place of a full + class descriptor, since it is much more compact. To get back the + ObjectStreamClass for a class ID, call ClassCatalog.getClassFormat(byte[]). + This function causes a new class ID to be assigned if the class + description has changed.
        +
        +
        Specified by:
        +
        getClassID in interface ClassCatalog
        +
        Parameters:
        +
        classFormat - The class description for which to return the + class ID.
        +
        Returns:
        +
        The class ID for the current version of the class.
        +
        Throws:
        +
        DatabaseException - if an error occurs accessing the catalog + database.
        +
        java.lang.ClassNotFoundException - if the class does not exist.
        +
        +
      • +
      + + + +
        +
      • +

        getClassFormat

        +
        public java.io.ObjectStreamClass getClassFormat(byte[] classID)
        +                                         throws DatabaseException,
        +                                                java.lang.ClassNotFoundException
        +
        Description copied from interface: ClassCatalog
        +
        Return the ObjectStreamClass for the given class ID. This may or may + not be the current class format, depending on whether the class has + changed since the class ID was generated.
        +
        +
        Specified by:
        +
        getClassFormat in interface ClassCatalog
        +
        Parameters:
        +
        classID - The class ID for which to return the class format.
        +
        Returns:
        +
        The class format for the given class ID, which may or may not + represent the current version of the class.
        +
        Throws:
        +
        DatabaseException - if an error occurs accessing the catalog + database.
        +
        java.lang.ClassNotFoundException - if the class does not exist.
        +
        +
      • +
      + + + +
        +
      • +

        getClassLoader

        +
        public java.lang.ClassLoader getClassLoader()
        +
        For BDB JE, returns the ClassLoader property of the catalog database + environment. This ensures that the Environment's ClassLoader property + is used for loading all user-supplied classes. + +

        For BDB, this method returns null because no Environment ClassLoader + property is available. This method may be overridden to return a + ClassLoader.

        +
        +
        Specified by:
        +
        getClassLoader in interface ClassCatalog
        +
        Returns:
        +
        the ClassLoader or null.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html b/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html new file mode 100644 index 0000000..84e44c9 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/TupleSerialBinding.html @@ -0,0 +1,530 @@ + + + + + +TupleSerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class TupleSerialBinding<D,E>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleSerialBinding

        +
        public TupleSerialBinding(ClassCatalog classCatalog,
        +                          java.lang.Class<D> baseClass)
        +
        Creates a tuple-serial entity binding.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        baseClass - is the base class.
        +
        +
      • +
      + + + +
        +
      • +

        TupleSerialBinding

        +
        public TupleSerialBinding(SerialBinding<D> dataBinding)
        +
        Creates a tuple-serial entity binding.
        +
        +
        Parameters:
        +
        dataBinding - is the data binding.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public E entryToObject(DatabaseEntry key,
        +                       DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Converts key and data entry buffers into an entity Object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntityBinding<E>
        +
        Parameters:
        +
        key - is the source key entry.
        +
        data - is the source data entry.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public void objectToKey(E object,
        +                        DatabaseEntry key)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the key entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToKey in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        key - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public void objectToData(E object,
        +                         DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the data entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToData in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        data - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        entryToObject

        +
        public abstract E entryToObject(TupleInput keyInput,
        +                                D dataInput)
        +
        Constructs an entity object from TupleInput key entry and + deserialized data entry objects.
        +
        +
        Parameters:
        +
        keyInput - is the TupleInput key entry object.
        +
        dataInput - is the deserialized data entry object.
        +
        Returns:
        +
        the entity object constructed from the key and data.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public abstract void objectToKey(E object,
        +                                 TupleOutput keyOutput)
        +
        Extracts a key tuple from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        keyOutput - is the TupleOutput to which the key should be + written.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public abstract D objectToData(E object)
        +
        Extracts a data object from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        Returns:
        +
        the deserialized data object.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html b/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html new file mode 100644 index 0000000..a5f3af2 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.html @@ -0,0 +1,536 @@ + + + + + +TupleSerialKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class TupleSerialKeyCreator<D>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleSerialKeyCreator

        +
        public TupleSerialKeyCreator(ClassCatalog classCatalog,
        +                             java.lang.Class<D> dataClass)
        +
        Creates a tuple-serial key creator.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        dataClass - is the data base class.
        +
        +
      • +
      + + + +
        +
      • +

        TupleSerialKeyCreator

        +
        public TupleSerialKeyCreator(SerialBinding<D> dataBinding)
        +
        Creates a tuple-serial key creator.
        +
        +
        Parameters:
        +
        dataBinding - is the data binding.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        createSecondaryKey

        +
        public boolean createSecondaryKey(SecondaryDatabase db,
        +                                  DatabaseEntry primaryKeyEntry,
        +                                  DatabaseEntry dataEntry,
        +                                  DatabaseEntry indexKeyEntry)
        +
        Description copied from interface: SecondaryKeyCreator
        +
        Creates a secondary key entry, given a primary key and data entry. + +

        A secondary key may be derived from the primary key, primary data, or + a combination of the primary key and data. For secondary keys that are + optional, the key creator method may return false and the key/data pair + will not be indexed. To ensure the integrity of a secondary database + the key creator method must always return the same result for a given + set of input parameters.

        + +

        A RuntimeException may be thrown by this method if an error + occurs attempting to create the secondary key. This exception will be + thrown by the API method currently in progress, for example, a put method. However, this will cause the write operation + to be incomplete. When databases are not configured to be + transactional, caution should be used to avoid integrity problems. See + Special considerations for + using Secondary Databases with and without Transactions.

        +
        +
        Specified by:
        +
        createSecondaryKey in interface SecondaryKeyCreator
        +
        Parameters:
        +
        db - the database to which the secondary key will be + added. This parameter is passed for informational purposes but is not + commonly used. This parameter is always non-null.
        +
        primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. This parameter is always non-null.
        +
        dataEntry - the primary data entry. This parameter must not be modified + by this method. If SecondaryConfig#setExtractFromPrimaryKeyOnly + is configured as true, the data param may be either null + or non-null, and the implementation is expected to ignore it; otherwise, + this parameter is always non-null.
        +
        indexKeyEntry - the secondary key created by this method. This parameter + is always non-null.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + +
        +
      • +

        nullifyForeignKey

        +
        public boolean nullifyForeignKey(SecondaryDatabase db,
        +                                 DatabaseEntry dataEntry)
        +
        Description copied from interface: ForeignKeyNullifier
        +
        Sets the foreign key reference to null in the datum of the primary + database.
        +
        +
        Specified by:
        +
        nullifyForeignKey in interface ForeignKeyNullifier
        +
        Parameters:
        +
        db - the database in which the foreign key integrity + constraint is defined. This parameter is passed for informational + purposes but is not commonly used.
        +
        dataEntry - the existing primary datum in which the foreign key + reference should be set to null. This parameter should be updated by + this method if it returns true.
        +
        Returns:
        +
        true if the datum was modified, or false to indicate that the + key is not present.
        +
        +
      • +
      + + + + + +
        +
      • +

        createSecondaryKey

        +
        public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
        +                                           D dataInput,
        +                                           TupleOutput indexKeyOutput)
        +
        Creates the index key entry from primary key tuple entry and + deserialized data entry.
        +
        +
        Parameters:
        +
        primaryKeyInput - is the TupleInput for the primary key + entry, or null if no primary key entry is used to construct the index + key.
        +
        dataInput - is the deserialized data entry, or null if no data + entry is used to construct the index key.
        +
        indexKeyOutput - is the destination index key tuple. For index + keys which are optionally present, no tuple entry should be output to + indicate that the key is not present or null.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + + + +
        +
      • +

        nullifyForeignKey

        +
        public D nullifyForeignKey(D data)
        +
        Clears the index key in the deserialized data entry. + +

        On entry the data parameter contains the index key to be cleared. It + should be changed by this method such that createSecondaryKey(com.sleepycat.je.SecondaryDatabase, com.sleepycat.je.DatabaseEntry, com.sleepycat.je.DatabaseEntry, com.sleepycat.je.DatabaseEntry) + will return false. Other fields in the data object should remain + unchanged.

        +
        +
        Parameters:
        +
        data - is the source and destination deserialized data + entry.
        +
        Returns:
        +
        the destination data object, or null to indicate that the + key is not present and no change is necessary. The data returned may + be the same object passed as the data parameter or a newly created + object.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html b/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html new file mode 100644 index 0000000..de51b5d --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.html @@ -0,0 +1,445 @@ + + + + + +TupleSerialMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class TupleSerialMarshalledBinding<E extends MarshalledTupleKeyEntity>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntityBinding<E>
    +
    +
    +
    +
    public class TupleSerialMarshalledBinding<E extends MarshalledTupleKeyEntity>
    +extends TupleSerialBinding<E,E>
    +
    A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class. + +

    The MarshalledTupleKeyEntity interface must be implemented by the + entity class to convert between the key/data entry and entity object.

    + +

    The binding is "tricky" in that it uses the entity class for both the + stored data entry and the combined entity object. To do this, the entity's + key field(s) are transient and are set by the binding after the data object + has been deserialized. This avoids the use of a "data" class completely. +

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    MarshalledTupleKeyEntity, +Class Evolution
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleSerialMarshalledBinding

        +
        public TupleSerialMarshalledBinding(ClassCatalog classCatalog,
        +                                    java.lang.Class<E> baseClass)
        +
        Creates a tuple-serial marshalled binding object.
        +
        +
        Parameters:
        +
        classCatalog - is the catalog to hold shared class information and + for a database should be a StoredClassCatalog.
        +
        baseClass - is the base class for serialized objects stored using + this binding -- all objects using this binding must be an instance of + this class.
        +
        +
      • +
      + + + +
        +
      • +

        TupleSerialMarshalledBinding

        +
        public TupleSerialMarshalledBinding(SerialBinding<E> dataBinding)
        +
        Creates a tuple-serial marshalled binding object.
        +
        +
        Parameters:
        +
        dataBinding - is the binding used for serializing and deserializing + the entity object.
        +
        +
      • +
      +
    • +
    + + +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html b/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html new file mode 100644 index 0000000..c9be8ad --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.html @@ -0,0 +1,403 @@ + + + + + +TupleSerialMarshalledKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.serial
+

Class TupleSerialMarshalledKeyCreator<D extends MarshalledTupleKeyEntity>

+
+
+ +
+ +
+
+ +
+
+ +
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html b/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html new file mode 100644 index 0000000..c6ef1d2 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/ClassCatalog.html @@ -0,0 +1,278 @@ + + + + + +Uses of Interface com.sleepycat.bind.serial.ClassCatalog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.bind.serial.ClassCatalog

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialBase.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialBase.html new file mode 100644 index 0000000..13c4906 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialBase.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialBase

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html new file mode 100644 index 0000000..80df09e --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialBinding.html @@ -0,0 +1,243 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html new file mode 100644 index 0000000..f70f393 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialInput.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialInput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialInput

+
+
No usage of com.sleepycat.bind.serial.SerialInput
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html new file mode 100644 index 0000000..74e2eea --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialOutput.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialOutput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialOutput

+
+
No usage of com.sleepycat.bind.serial.SerialOutput
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html new file mode 100644 index 0000000..d7b77d3 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialSerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialSerialBinding

+
+
No usage of com.sleepycat.bind.serial.SerialSerialBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html b/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html new file mode 100644 index 0000000..a7bc7b6 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/SerialSerialKeyCreator.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.SerialSerialKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.SerialSerialKeyCreator

+
+
No usage of com.sleepycat.bind.serial.SerialSerialKeyCreator
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html b/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html new file mode 100644 index 0000000..60577ed --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/StoredClassCatalog.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.StoredClassCatalog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.StoredClassCatalog

+
+
No usage of com.sleepycat.bind.serial.StoredClassCatalog
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html new file mode 100644 index 0000000..e02fb53 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialBinding.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.TupleSerialBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html new file mode 100644 index 0000000..9ac34e6 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialKeyCreator.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.TupleSerialKeyCreator

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html new file mode 100644 index 0000000..4b78934 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledBinding.html @@ -0,0 +1,196 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.TupleSerialMarshalledBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html new file mode 100644 index 0000000..c7c61c8 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/class-use/TupleSerialMarshalledKeyCreator.html @@ -0,0 +1,175 @@ + + + + + +Uses of Class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/package-frame.html b/docs/java/com/sleepycat/bind/serial/package-frame.html new file mode 100644 index 0000000..a256087 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/package-frame.html @@ -0,0 +1,34 @@ + + + + + +com.sleepycat.bind.serial (Oracle - Berkeley DB Java Edition API) + + + + + +

com.sleepycat.bind.serial

+
+

Interfaces

+ +

Classes

+ +
+ + diff --git a/docs/java/com/sleepycat/bind/serial/package-summary.html b/docs/java/com/sleepycat/bind/serial/package-summary.html new file mode 100644 index 0000000..f584d62 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/package-summary.html @@ -0,0 +1,250 @@ + + + + + +com.sleepycat.bind.serial (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Package com.sleepycat.bind.serial

+
+
Bindings that use Java serialization.
+
+

See: Description

+
+
+ + + + +

Package com.sleepycat.bind.serial Description

+
Bindings that use Java serialization. +
+
+
See Also:
+
[Getting Started Guide] +
+
+
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/package-tree.html b/docs/java/com/sleepycat/bind/serial/package-tree.html new file mode 100644 index 0000000..1826a0c --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/package-tree.html @@ -0,0 +1,197 @@ + + + + + +com.sleepycat.bind.serial Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Hierarchy For Package com.sleepycat.bind.serial

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/serial/package-use.html b/docs/java/com/sleepycat/bind/serial/package-use.html new file mode 100644 index 0000000..636df17 --- /dev/null +++ b/docs/java/com/sleepycat/bind/serial/package-use.html @@ -0,0 +1,231 @@ + + + + + +Uses of Package com.sleepycat.bind.serial (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Package
com.sleepycat.bind.serial

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/BigDecimalBinding.html b/docs/java/com/sleepycat/bind/tuple/BigDecimalBinding.html new file mode 100644 index 0000000..bcaf188 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/BigDecimalBinding.html @@ -0,0 +1,442 @@ + + + + + +BigDecimalBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class BigDecimalBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.math.BigDecimal>
    +
    +
    +
    +
    public class BigDecimalBinding
    +extends TupleBinding<java.math.BigDecimal>
    +
    A concrete TupleBinding for an unsorted BigDecimal + value. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    BigDecimal Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        BigDecimalBinding

        +
        public BigDecimalBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.math.BigDecimal entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.math.BigDecimal>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.math.BigDecimal object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.math.BigDecimal>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.math.BigDecimal object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.math.BigDecimal>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToBigDecimal

        +
        public static java.math.BigDecimal entryToBigDecimal(DatabaseEntry entry)
        +
        Converts an entry buffer into a BigDecimal value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        bigDecimalToEntry

        +
        public static void bigDecimalToEntry(java.math.BigDecimal val,
        +                                     DatabaseEntry entry)
        +
        Converts a BigDecimal value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/BigIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/BigIntegerBinding.html new file mode 100644 index 0000000..dc2929c --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/BigIntegerBinding.html @@ -0,0 +1,435 @@ + + + + + +BigIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class BigIntegerBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.math.BigInteger>
    +
    +
    +
    +
    public class BigIntegerBinding
    +extends TupleBinding<java.math.BigInteger>
    +
    A concrete TupleBinding for a BigInteger value. + +

    This class produces byte array values that by default (without a custom + comparator) sort correctly.

    +
    +
    See Also:
    +
    Integer Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        BigIntegerBinding

        +
        public BigIntegerBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.math.BigInteger entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.math.BigInteger>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.math.BigInteger object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.math.BigInteger>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.math.BigInteger object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.math.BigInteger>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToBigInteger

        +
        public static java.math.BigInteger entryToBigInteger(DatabaseEntry entry)
        +
        Converts an entry buffer into a BigInteger value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        bigIntegerToEntry

        +
        public static void bigIntegerToEntry(java.math.BigInteger val,
        +                                     DatabaseEntry entry)
        +
        Converts a BigInteger value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html b/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html new file mode 100644 index 0000000..f5161c5 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/BooleanBinding.html @@ -0,0 +1,443 @@ + + + + + +BooleanBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class BooleanBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        BooleanBinding

        +
        public BooleanBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Boolean entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Boolean>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Boolean object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Boolean>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Boolean object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Boolean>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToBoolean

        +
        public static boolean entryToBoolean(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple boolean value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        booleanToEntry

        +
        public static void booleanToEntry(boolean val,
        +                                  DatabaseEntry entry)
        +
        Converts a simple boolean value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/ByteBinding.html b/docs/java/com/sleepycat/bind/tuple/ByteBinding.html new file mode 100644 index 0000000..c374605 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/ByteBinding.html @@ -0,0 +1,443 @@ + + + + + +ByteBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class ByteBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        ByteBinding

        +
        public ByteBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Byte entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Byte>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Byte object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Byte>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Byte object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Byte>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToByte

        +
        public static byte entryToByte(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple byte value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        byteToEntry

        +
        public static void byteToEntry(byte val,
        +                               DatabaseEntry entry)
        +
        Converts a simple byte value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html b/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html new file mode 100644 index 0000000..3a86206 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/CharacterBinding.html @@ -0,0 +1,443 @@ + + + + + +CharacterBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class CharacterBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        CharacterBinding

        +
        public CharacterBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Character entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Character>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Character object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Character>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Character object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Character>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToChar

        +
        public static char entryToChar(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple char value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        charToEntry

        +
        public static void charToEntry(char val,
        +                               DatabaseEntry entry)
        +
        Converts a simple char value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html b/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html new file mode 100644 index 0000000..62d24d7 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/DoubleBinding.html @@ -0,0 +1,443 @@ + + + + + +DoubleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class DoubleBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        DoubleBinding

        +
        public DoubleBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Double entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Double>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Double object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Double>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Double object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Double>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToDouble

        +
        public static double entryToDouble(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple double value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        doubleToEntry

        +
        public static void doubleToEntry(double val,
        +                                 DatabaseEntry entry)
        +
        Converts a simple double value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/FloatBinding.html b/docs/java/com/sleepycat/bind/tuple/FloatBinding.html new file mode 100644 index 0000000..1ae0aef --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/FloatBinding.html @@ -0,0 +1,443 @@ + + + + + +FloatBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class FloatBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        FloatBinding

        +
        public FloatBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Float entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Float>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Float object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Float>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Float object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Float>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToFloat

        +
        public static float entryToFloat(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple float value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        floatToEntry

        +
        public static void floatToEntry(float val,
        +                                DatabaseEntry entry)
        +
        Converts a simple float value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html new file mode 100644 index 0000000..2f75ba1 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/IntegerBinding.html @@ -0,0 +1,443 @@ + + + + + +IntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class IntegerBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        IntegerBinding

        +
        public IntegerBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Integer entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Integer object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Integer object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Integer>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToInt

        +
        public static int entryToInt(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple int value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        intToEntry

        +
        public static void intToEntry(int val,
        +                              DatabaseEntry entry)
        +
        Converts a simple int value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/LongBinding.html b/docs/java/com/sleepycat/bind/tuple/LongBinding.html new file mode 100644 index 0000000..5c4fa8a --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/LongBinding.html @@ -0,0 +1,443 @@ + + + + + +LongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class LongBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        LongBinding

        +
        public LongBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Long entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Long object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Long object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Long>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToLong

        +
        public static long entryToLong(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple long value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        longToEntry

        +
        public static void longToEntry(long val,
        +                               DatabaseEntry entry)
        +
        Converts a simple long value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html b/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html new file mode 100644 index 0000000..5c2b558 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/MarshalledTupleEntry.html @@ -0,0 +1,271 @@ + + + + + +MarshalledTupleEntry (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Interface MarshalledTupleEntry

+
+
+
+
    +
  • +
    +
    +
    public interface MarshalledTupleEntry
    +
    A marshalling interface implemented by key, data or entity classes that + are represented as tuples. + +

    Key classes implement this interface to marshal their key entry. Data or + entity classes implement this interface to marshal their data entry. + Implementations of this interface must have a public no arguments + constructor so that they can be instantiated by a binding, prior to calling + the unmarshalEntry(com.sleepycat.bind.tuple.TupleInput) method.

    + +

    Note that implementing this interface is not necessary when the object is + a Java simple type, for example: String, Integer, etc. These types can be + used with built-in bindings returned by TupleBinding.getPrimitiveBinding(java.lang.Class<T>).

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    TupleTupleMarshalledBinding
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        marshalEntry

        +
        void marshalEntry(TupleOutput dataOutput)
        +
        Construct the key or data tuple entry from the key or data object.
        +
        +
        Parameters:
        +
        dataOutput - is the output tuple.
        +
        +
      • +
      + + + +
        +
      • +

        unmarshalEntry

        +
        void unmarshalEntry(TupleInput dataInput)
        +
        Construct the key or data object from the key or data tuple entry.
        +
        +
        Parameters:
        +
        dataInput - is the input tuple.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html b/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html new file mode 100644 index 0000000..65c9362 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.html @@ -0,0 +1,331 @@ + + + + + +MarshalledTupleKeyEntity (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Interface MarshalledTupleKeyEntity

+
+
+
+
    +
  • +
    +
    +
    public interface MarshalledTupleKeyEntity
    +
    A marshalling interface implemented by entity classes that represent keys as + tuples. Since MarshalledTupleKeyEntity objects are instantiated + using Java deserialization, no particular constructor is required by classes + that implement this interface. + +

    Note that a marshalled tuple key extractor is somewhat less efficient + than a non-marshalled key tuple extractor because more conversions are + needed. A marshalled key extractor must convert the entry to an object in + order to extract the key fields, while an unmarshalled key extractor does + not.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    TupleTupleMarshalledBinding, +TupleSerialMarshalledBinding
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        marshalPrimaryKey

        +
        void marshalPrimaryKey(TupleOutput keyOutput)
        +
        Extracts the entity's primary key and writes it to the key output.
        +
        +
        Parameters:
        +
        keyOutput - is the output tuple.
        +
        +
      • +
      + + + +
        +
      • +

        unmarshalPrimaryKey

        +
        void unmarshalPrimaryKey(TupleInput keyInput)
        +
        Completes construction of the entity by setting its primary key from the + stored primary key.
        +
        +
        Parameters:
        +
        keyInput - is the input tuple.
        +
        +
      • +
      + + + +
        +
      • +

        marshalSecondaryKey

        +
        boolean marshalSecondaryKey(java.lang.String keyName,
        +                            TupleOutput keyOutput)
        +
        Extracts the entity's secondary key and writes it to the key output.
        +
        +
        Parameters:
        +
        keyName - identifies the secondary key.
        +
        keyOutput - is the output tuple.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + +
        +
      • +

        nullifyForeignKey

        +
        boolean nullifyForeignKey(java.lang.String keyName)
        +
        Clears the entity's secondary key fields for the given key name. + +

        The specified index key should be changed by this method such that + marshalSecondaryKey(java.lang.String, com.sleepycat.bind.tuple.TupleOutput) for the same key name will return false. + Other fields in the data object should remain unchanged.

        + +

        If ForeignKeyDeleteAction.NULLIFY was + specified when opening the secondary database, this method is called + when the entity for this foreign key is deleted. If NULLIFY was not + specified, this method will not be called and may always return + false.

        +
        +
        Parameters:
        +
        keyName - identifies the secondary key.
        +
        Returns:
        +
        true if the key was cleared, or false to indicate that the key + is not present and no change is necessary.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/PackedIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/PackedIntegerBinding.html new file mode 100644 index 0000000..3a3f36a --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/PackedIntegerBinding.html @@ -0,0 +1,445 @@ + + + + + +PackedIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class PackedIntegerBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Integer>
    +
    +
    +
    +
    public class PackedIntegerBinding
    +extends TupleBinding<java.lang.Integer>
    +
    A concrete TupleBinding for an unsorted Integer + primitive wrapper or an unsorted int primitive, that stores the + value in the smallest number of bytes possible. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Integer Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        PackedIntegerBinding

        +
        public PackedIntegerBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Integer entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Integer object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Integer object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Integer>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToInt

        +
        public static int entryToInt(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple int value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        intToEntry

        +
        public static void intToEntry(int val,
        +                              DatabaseEntry entry)
        +
        Converts a simple int value into an entry buffer, using + PackedInteger format.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/PackedLongBinding.html b/docs/java/com/sleepycat/bind/tuple/PackedLongBinding.html new file mode 100644 index 0000000..aac572a --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/PackedLongBinding.html @@ -0,0 +1,445 @@ + + + + + +PackedLongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class PackedLongBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Long>
    +
    +
    +
    +
    public class PackedLongBinding
    +extends TupleBinding<java.lang.Long>
    +
    A concrete TupleBinding for an unsorted Long + primitive wrapper or an unsorted long primitive, that stores + the value in the smallest number of bytes possible. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Integer Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        PackedLongBinding

        +
        public PackedLongBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Long entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Long object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Long object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Long>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToLong

        +
        public static java.lang.Long entryToLong(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple Long value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        longToEntry

        +
        public static void longToEntry(long val,
        +                               DatabaseEntry entry)
        +
        Converts a simple Long value into an entry buffer, using + PackedLong format.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/ShortBinding.html b/docs/java/com/sleepycat/bind/tuple/ShortBinding.html new file mode 100644 index 0000000..d89b517 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/ShortBinding.html @@ -0,0 +1,443 @@ + + + + + +ShortBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class ShortBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        ShortBinding

        +
        public ShortBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Short entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Short>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Short object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Short>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Short object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Short>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToShort

        +
        public static short entryToShort(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple short value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        shortToEntry

        +
        public static void shortToEntry(short val,
        +                                DatabaseEntry entry)
        +
        Converts a simple short value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/SortedBigDecimalBinding.html b/docs/java/com/sleepycat/bind/tuple/SortedBigDecimalBinding.html new file mode 100644 index 0000000..56763b0 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/SortedBigDecimalBinding.html @@ -0,0 +1,442 @@ + + + + + +SortedBigDecimalBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class SortedBigDecimalBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.math.BigDecimal>
    +
    +
    +
    +
    public class SortedBigDecimalBinding
    +extends TupleBinding<java.math.BigDecimal>
    +
    A concrete TupleBinding for a sorted BigDecimal + value. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    BigDecimal Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SortedBigDecimalBinding

        +
        public SortedBigDecimalBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.math.BigDecimal entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.math.BigDecimal>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.math.BigDecimal object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.math.BigDecimal>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.math.BigDecimal object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.math.BigDecimal>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToBigDecimal

        +
        public static java.math.BigDecimal entryToBigDecimal(DatabaseEntry entry)
        +
        Converts an entry buffer into a BigDecimal value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        bigDecimalToEntry

        +
        public static void bigDecimalToEntry(java.math.BigDecimal val,
        +                                     DatabaseEntry entry)
        +
        Converts a BigDecimal value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/SortedDoubleBinding.html b/docs/java/com/sleepycat/bind/tuple/SortedDoubleBinding.html new file mode 100644 index 0000000..a3e10c3 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/SortedDoubleBinding.html @@ -0,0 +1,442 @@ + + + + + +SortedDoubleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class SortedDoubleBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Double>
    +
    +
    +
    +
    public class SortedDoubleBinding
    +extends TupleBinding<java.lang.Double>
    +
    A concrete TupleBinding for a sorted Double + primitive wrapper or a sorted double primitive. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Floating Point Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SortedDoubleBinding

        +
        public SortedDoubleBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Double entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Double>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Double object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Double>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Double object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Double>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToDouble

        +
        public static double entryToDouble(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple double value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        doubleToEntry

        +
        public static void doubleToEntry(double val,
        +                                 DatabaseEntry entry)
        +
        Converts a simple double value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/SortedFloatBinding.html b/docs/java/com/sleepycat/bind/tuple/SortedFloatBinding.html new file mode 100644 index 0000000..2006437 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/SortedFloatBinding.html @@ -0,0 +1,442 @@ + + + + + +SortedFloatBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class SortedFloatBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Float>
    +
    +
    +
    +
    public class SortedFloatBinding
    +extends TupleBinding<java.lang.Float>
    +
    A concrete TupleBinding for a sorted Float + primitive wrapper or sorted a float primitive. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Floating Point Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SortedFloatBinding

        +
        public SortedFloatBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Float entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Float>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Float object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Float>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Float object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Float>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToFloat

        +
        public static float entryToFloat(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple float value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        floatToEntry

        +
        public static void floatToEntry(float val,
        +                                DatabaseEntry entry)
        +
        Converts a simple float value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.html new file mode 100644 index 0000000..926e76e --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.html @@ -0,0 +1,445 @@ + + + + + +SortedPackedIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class SortedPackedIntegerBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Integer>
    +
    +
    +
    +
    public class SortedPackedIntegerBinding
    +extends TupleBinding<java.lang.Integer>
    +
    A concrete TupleBinding for a sorted Integer + primitive wrapper or a sorted int primitive, that stores the + value in the smallest number of bytes possible. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Integer Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SortedPackedIntegerBinding

        +
        public SortedPackedIntegerBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Integer entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Integer object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Integer>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Integer object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Integer>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToInt

        +
        public static int entryToInt(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple int value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        intToEntry

        +
        public static void intToEntry(int val,
        +                              DatabaseEntry entry)
        +
        Converts a simple int value into an entry buffer, using + SortedPackedInteger format.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/SortedPackedLongBinding.html b/docs/java/com/sleepycat/bind/tuple/SortedPackedLongBinding.html new file mode 100644 index 0000000..801ca72 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/SortedPackedLongBinding.html @@ -0,0 +1,445 @@ + + + + + +SortedPackedLongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class SortedPackedLongBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<java.lang.Long>
    +
    +
    +
    +
    public class SortedPackedLongBinding
    +extends TupleBinding<java.lang.Long>
    +
    A concrete TupleBinding for a sorted Long + primitive wrapper or a sorted long primitive, that stores the + value in the smallest number of bytes possible. + +

    There are two ways to use this class:

    +
      +
    1. When using the com.sleepycat.je package directly, the static + methods in this class can be used to convert between primitive values and + DatabaseEntry objects.
    2. +
    3. When using the com.sleepycat.collections package, an instance of + this class can be used with any stored collection.
    4. +
    +
    +
    See Also:
    +
    Integer Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        SortedPackedLongBinding

        +
        public SortedPackedLongBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.Long entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.Long object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.Long>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.Long object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.Long>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToLong

        +
        public static java.lang.Long entryToLong(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple Long value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        longToEntry

        +
        public static void longToEntry(long val,
        +                               DatabaseEntry entry)
        +
        Converts a simple Long value into an entry buffer, using + SortedPackedLong format.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/StringBinding.html b/docs/java/com/sleepycat/bind/tuple/StringBinding.html new file mode 100644 index 0000000..eb7031d --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/StringBinding.html @@ -0,0 +1,442 @@ + + + + + +StringBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class StringBinding

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StringBinding

        +
        public StringBinding()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public java.lang.String entryToObject(TupleInput input)
        +
        Description copied from class: TupleBinding
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Specified by:
        +
        entryToObject in class TupleBinding<java.lang.String>
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(java.lang.String object,
        +                          TupleOutput output)
        +
        Description copied from class: TupleBinding
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Specified by:
        +
        objectToEntry in class TupleBinding<java.lang.String>
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(java.lang.String object)
        +
        Description copied from class: TupleBase
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the TupleBase.setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Overrides:
        +
        getTupleOutput in class TupleBase<java.lang.String>
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        TupleBase.setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        entryToString

        +
        public static java.lang.String entryToString(DatabaseEntry entry)
        +
        Converts an entry buffer into a simple String value.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting value.
        +
        +
      • +
      + + + +
        +
      • +

        stringToEntry

        +
        public static void stringToEntry(java.lang.String val,
        +                                 DatabaseEntry entry)
        +
        Converts a simple String value into an entry buffer.
        +
        +
        Parameters:
        +
        val - is the source value.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleBase.html b/docs/java/com/sleepycat/bind/tuple/TupleBase.html new file mode 100644 index 0000000..64d16b0 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleBase.html @@ -0,0 +1,492 @@ + + + + + +TupleBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleBase<E>

+
+
+ +
+
    +
  • +
    +
    Direct Known Subclasses:
    +
    TupleBinding, TupleSerialBinding, TupleSerialKeyCreator, TupleTupleBinding, TupleTupleKeyCreator
    +
    +
    +
    +
    public class TupleBase<E>
    +extends java.lang.Object
    +
    A base class for tuple bindings and tuple key creators that provides control + over the allocation of the output buffer. + +

    Tuple bindings and key creators append data to a TupleOutput + instance, which is also a FastOutputStream + instance. This object has a byte array buffer that is resized when it is + full. The reallocation of this buffer can be a performance factor for + some applications using large objects. To manage this issue, the setTupleBufferSize(int) method may be used to control the initial size of the + buffer, and the getTupleOutput(E) method may be overridden by + subclasses to take over creation of the TupleOutput object.

    +
  • +
+
+
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        setTupleBufferSize

        +
        public void setTupleBufferSize(int byteSize)
        +
        Sets the initial byte size of the output buffer that is allocated by the + default implementation of getTupleOutput(E). + +

        If this property is zero (the default), the default FastOutputStream.DEFAULT_INIT_SIZE size is used.

        +
        +
        Parameters:
        +
        byteSize - the initial byte size of the output buffer, or zero to + use the default size.
        +
        +
      • +
      + + + +
        +
      • +

        getTupleBufferSize

        +
        public int getTupleBufferSize()
        +
        Returns the initial byte size of the output buffer.
        +
        +
        Returns:
        +
        the initial byte size of the output buffer.
        +
        See Also:
        +
        setTupleBufferSize(int)
        +
        +
      • +
      + + + + + +
        +
      • +

        getTupleOutput

        +
        protected TupleOutput getTupleOutput(E object)
        +
        Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator. + +

        The default implementation of this method creates a new TupleOutput + with an initial buffer size that can be changed using the setTupleBufferSize(int) method.

        + +

        This method may be overridden to return a TupleOutput instance. For + example, an instance per thread could be created and returned by this + method. If a TupleOutput instance is reused, be sure to call its + FastOutputStream.reset() method before each + use.

        +
        +
        Parameters:
        +
        object - is the object to be written to the tuple output, and may + be used by subclasses to determine the size of the output buffer.
        +
        Returns:
        +
        an empty TupleOutput instance.
        +
        See Also:
        +
        setTupleBufferSize(int)
        +
        +
      • +
      + + + +
        +
      • +

        outputToEntry

        +
        public static void outputToEntry(TupleOutput output,
        +                                 DatabaseEntry entry)
        +
        Utility method to set the data in a entry buffer to the data in a tuple + output object.
        +
        +
        Parameters:
        +
        output - is the source tuple output object.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      + + + +
        +
      • +

        inputToEntry

        +
        public static void inputToEntry(TupleInput input,
        +                                DatabaseEntry entry)
        +
        Utility method to set the data in a entry buffer to the data in a tuple + input object.
        +
        +
        Parameters:
        +
        input - is the source tuple input object.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      + + + +
        +
      • +

        entryToInput

        +
        public static TupleInput entryToInput(DatabaseEntry entry)
        +
        Utility method to create a new tuple input object for reading the data + from a given buffer. If an existing input is reused, it is reset before + returning it.
        +
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the new tuple input object.
        +
        +
      • +
      + + + +
        +
      • +

        newOutput

        +
        public static TupleOutput newOutput()
        +
        Deprecated. replaced by getTupleOutput(E)
        +
        Utility method for use by bindings to create a tuple output object.
        +
        +
        Returns:
        +
        a new tuple output object.
        +
        +
      • +
      + + + +
        +
      • +

        newOutput

        +
        public static TupleOutput newOutput(byte[] buffer)
        +
        Deprecated. replaced by getTupleOutput(E)
        +
        Utility method for use by bindings to create a tuple output object + with a specific starting size.
        +
        +
        Parameters:
        +
        buffer - is the byte array to use as the buffer.
        +
        Returns:
        +
        a new tuple output object.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleBinding.html b/docs/java/com/sleepycat/bind/tuple/TupleBinding.html new file mode 100644 index 0000000..fca5a86 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleBinding.html @@ -0,0 +1,450 @@ + + + + + +TupleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleBinding<E>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleBinding

        +
        public TupleBinding()
        +
        Creates a tuple binding.
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public E entryToObject(DatabaseEntry entry)
        +
        Description copied from interface: EntryBinding
        +
        Converts a entry buffer into an Object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntryBinding<E>
        +
        Parameters:
        +
        entry - is the source entry buffer.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToEntry

        +
        public void objectToEntry(E object,
        +                          DatabaseEntry entry)
        +
        Description copied from interface: EntryBinding
        +
        Converts an Object into a entry buffer.
        +
        +
        Specified by:
        +
        objectToEntry in interface EntryBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        entry - is the destination entry buffer.
        +
        +
      • +
      + + + +
        +
      • +

        entryToObject

        +
        public abstract E entryToObject(TupleInput input)
        +
        Constructs a key or data object from a TupleInput entry.
        +
        +
        Parameters:
        +
        input - is the tuple key or data entry.
        +
        Returns:
        +
        the key or data object constructed from the entry.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToEntry

        +
        public abstract void objectToEntry(E object,
        +                                   TupleOutput output)
        +
        Converts a key or data object to a tuple entry.
        +
        +
        Parameters:
        +
        object - is the key or data object.
        +
        output - is the tuple entry to which the key or data should be + written.
        +
        +
      • +
      + + + +
        +
      • +

        getPrimitiveBinding

        +
        public static <T> TupleBinding<T> getPrimitiveBinding(java.lang.Class<T> cls)
        +
        Creates a tuple binding for a primitive Java class. The following + Java classes are supported. +
          +
        • String
        • +
        • Character
        • +
        • Boolean
        • +
        • Byte
        • +
        • Short
        • +
        • Integer
        • +
        • Long
        • +
        • Float
        • +
        • Double
        • +
        + +

        Note: getPrimitiveBinding(java.lang.Class<T>) returns bindings that do + not sort negative floating point numbers correctly by default. See + SortedFloatBinding and SortedDoubleBinding for + details.

        +
        +
        Type Parameters:
        +
        T - the primitive Java class.
        +
        Parameters:
        +
        cls - the primitive Java class.
        +
        Returns:
        +
        a new binding for the primitive class or null if the cls + parameter is not one of the supported classes.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleInput.html b/docs/java/com/sleepycat/bind/tuple/TupleInput.html new file mode 100644 index 0000000..4ee5d0f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleInput.html @@ -0,0 +1,1303 @@ + + + + + +TupleInput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleInput

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Closeable, java.lang.AutoCloseable
    +
    +
    +
    +
    public class TupleInput
    +extends FastInputStream
    +
    An InputStream with DataInput-like methods for + reading tuple fields. It is used by TupleBinding. + +

    This class has many methods that have the same signatures as methods in + the DataInput interface. The reason this class does not + implement DataInput is because it would break the interface + contract for those methods because of data format differences.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Tuple Formats
    +
    +
  • +
+
+
+
    +
  • + + + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + + + + + + + +
      Constructors 
      Constructor and Description
      TupleInput(byte[] buffer) +
      Creates a tuple input object for reading a byte array of tuple data.
      +
      TupleInput(byte[] buffer, + int offset, + int length) +
      Creates a tuple input object for reading a byte array of tuple data at + a given offset for a given length.
      +
      TupleInput(TupleOutput output) +
      Creates a tuple input object from the data contained in a tuple output + object.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      intgetBigDecimalByteLength() +
      Returns the byte length of an unsorted BigDecimal.
      +
      intgetBigIntegerByteLength() +
      Returns the byte length of a BigInteger.
      +
      intgetPackedIntByteLength() +
      Returns the byte length of a packed integer.
      +
      intgetPackedLongByteLength() +
      Returns the byte length of a packed long integer.
      +
      intgetSortedBigDecimalByteLength() +
      Returns the byte length of a sorted BigDecimal.
      +
      intgetSortedPackedIntByteLength() +
      Returns the byte length of a sorted packed integer.
      +
      intgetSortedPackedLongByteLength() +
      Returns the byte length of a sorted packed long integer.
      +
      intgetStringByteLength() +
      Returns the byte length of a null-terminated UTF string in the data + buffer, including the terminator.
      +
      java.math.BigDecimalreadBigDecimal() +
      Reads an unsorted BigDecimal.
      +
      java.math.BigIntegerreadBigInteger() +
      Reads a BigInteger.
      +
      booleanreadBoolean() +
      Reads a boolean (one byte) unsigned value from the buffer and returns + true if it is non-zero and false if it is zero.
      +
      bytereadByte() +
      Reads a signed byte (one byte) value from the buffer.
      +
      voidreadBytes(char[] chars) +
      Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array.
      +
      java.lang.StringreadBytes(int length) +
      Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting string.
      +
      charreadChar() +
      Reads a char (two byte) unsigned value from the buffer.
      +
      voidreadChars(char[] chars) +
      Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array.
      +
      java.lang.StringreadChars(int length) +
      Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting string.
      +
      doublereadDouble() +
      Reads an unsorted double (eight byte) value from the buffer.
      +
      floatreadFloat() +
      Reads an unsorted float (four byte) value from the buffer.
      +
      intreadInt() +
      Reads a signed int (four byte) value from the buffer.
      +
      longreadLong() +
      Reads a signed long (eight byte) value from the buffer.
      +
      intreadPackedInt() +
      Reads an unsorted packed integer.
      +
      longreadPackedLong() +
      Reads an unsorted packed long integer.
      +
      shortreadShort() +
      Reads a signed short (two byte) value from the buffer.
      +
      java.math.BigDecimalreadSortedBigDecimal() +
      Reads a sorted BigDecimal, with support for correct default + sorting.
      +
      doublereadSortedDouble() +
      Reads a sorted double (eight byte) value from the buffer.
      +
      floatreadSortedFloat() +
      Reads a sorted float (four byte) value from the buffer.
      +
      intreadSortedPackedInt() +
      Reads a sorted packed integer.
      +
      longreadSortedPackedLong() +
      Reads a sorted packed long integer.
      +
      java.lang.StringreadString() +
      Reads a null-terminated UTF string from the data buffer and converts + the data from UTF to Unicode.
      +
      voidreadString(char[] chars) +
      Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
      +
      java.lang.StringreadString(int length) +
      Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
      +
      intreadUnsignedByte() +
      Reads an unsigned byte (one byte) value from the buffer.
      +
      longreadUnsignedInt() +
      Reads an unsigned int (four byte) value from the buffer.
      +
      intreadUnsignedShort() +
      Reads an unsigned short (two byte) value from the buffer.
      +
      + +
        +
      • + + +

        Methods inherited from class java.io.InputStream

        +close
      • +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleInput

        +
        public TupleInput(byte[] buffer)
        +
        Creates a tuple input object for reading a byte array of tuple data. A + reference to the byte array will be kept by this object (it will not be + copied) and therefore the byte array should not be modified while this + object is in use.
        +
        +
        Parameters:
        +
        buffer - is the byte array to be read and should contain data in + tuple format.
        +
        +
      • +
      + + + +
        +
      • +

        TupleInput

        +
        public TupleInput(byte[] buffer,
        +                  int offset,
        +                  int length)
        +
        Creates a tuple input object for reading a byte array of tuple data at + a given offset for a given length. A reference to the byte array will + be kept by this object (it will not be copied) and therefore the byte + array should not be modified while this object is in use.
        +
        +
        Parameters:
        +
        buffer - is the byte array to be read and should contain data in + tuple format.
        +
        offset - is the byte offset at which to begin reading.
        +
        length - is the number of bytes to be read.
        +
        +
      • +
      + + + +
        +
      • +

        TupleInput

        +
        public TupleInput(TupleOutput output)
        +
        Creates a tuple input object from the data contained in a tuple output + object. A reference to the tuple output's byte array will be kept by + this object (it will not be copied) and therefore the tuple output + object should not be modified while this object is in use.
        +
        +
        Parameters:
        +
        output - is the tuple output object containing the data to be read.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        readString

        +
        public final java.lang.String readString()
        +                                  throws java.lang.IndexOutOfBoundsException,
        +                                         java.lang.IllegalArgumentException
        +
        Reads a null-terminated UTF string from the data buffer and converts + the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(String).
        +
        +
        Returns:
        +
        the converted string.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if no null terminating byte is found + in the buffer.
        +
        java.lang.IllegalArgumentException - malformed UTF data is encountered.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        readChar

        +
        public final char readChar()
        +                    throws java.lang.IndexOutOfBoundsException
        +
        Reads a char (two byte) unsigned value from the buffer. + Reads values that were written using TupleOutput.writeChar(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readBoolean

        +
        public final boolean readBoolean()
        +                          throws java.lang.IndexOutOfBoundsException
        +
        Reads a boolean (one byte) unsigned value from the buffer and returns + true if it is non-zero and false if it is zero. + Reads values that were written using TupleOutput.writeBoolean(boolean).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readByte

        +
        public final byte readByte()
        +                    throws java.lang.IndexOutOfBoundsException
        +
        Reads a signed byte (one byte) value from the buffer. + Reads values that were written using TupleOutput.writeByte(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readShort

        +
        public final short readShort()
        +                      throws java.lang.IndexOutOfBoundsException
        +
        Reads a signed short (two byte) value from the buffer. + Reads values that were written using TupleOutput.writeShort(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readInt

        +
        public final int readInt()
        +                  throws java.lang.IndexOutOfBoundsException
        +
        Reads a signed int (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeInt(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readLong

        +
        public final long readLong()
        +                    throws java.lang.IndexOutOfBoundsException
        +
        Reads a signed long (eight byte) value from the buffer. + Reads values that were written using TupleOutput.writeLong(long).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readFloat

        +
        public final float readFloat()
        +                      throws java.lang.IndexOutOfBoundsException
        +
        Reads an unsorted float (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeFloat(float).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        readDouble

        +
        public final double readDouble()
        +                        throws java.lang.IndexOutOfBoundsException
        +
        Reads an unsorted double (eight byte) value from the buffer. + Reads values that were written using TupleOutput.writeDouble(double).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        readSortedFloat

        +
        public final float readSortedFloat()
        +                            throws java.lang.IndexOutOfBoundsException
        +
        Reads a sorted float (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeSortedFloat(float).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        readSortedDouble

        +
        public final double readSortedDouble()
        +                              throws java.lang.IndexOutOfBoundsException
        +
        Reads a sorted double (eight byte) value from the buffer. + Reads values that were written using TupleOutput.writeSortedDouble(double).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        readUnsignedByte

        +
        public final int readUnsignedByte()
        +                           throws java.lang.IndexOutOfBoundsException
        +
        Reads an unsigned byte (one byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedByte(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readUnsignedShort

        +
        public final int readUnsignedShort()
        +                            throws java.lang.IndexOutOfBoundsException
        +
        Reads an unsigned short (two byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedShort(int).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readUnsignedInt

        +
        public final long readUnsignedInt()
        +                           throws java.lang.IndexOutOfBoundsException
        +
        Reads an unsigned int (four byte) value from the buffer. + Reads values that were written using TupleOutput.writeUnsignedInt(long).
        +
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readBytes

        +
        public final java.lang.String readBytes(int length)
        +                                 throws java.lang.IndexOutOfBoundsException
        +
        Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting string. + Reads values that were written using TupleOutput.writeBytes(java.lang.String).
        +
        +
        Parameters:
        +
        length - is the number of bytes to be read.
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readChars

        +
        public final java.lang.String readChars(int length)
        +                                 throws java.lang.IndexOutOfBoundsException
        +
        Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting string. + Reads values that were written using TupleOutput.writeChars(java.lang.String).
        +
        +
        Parameters:
        +
        length - is the number of characters to be read.
        +
        Returns:
        +
        the value read from the buffer.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readBytes

        +
        public final void readBytes(char[] chars)
        +                     throws java.lang.IndexOutOfBoundsException
        +
        Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array. + Reads values that were written using TupleOutput.writeBytes(java.lang.String).
        +
        +
        Parameters:
        +
        chars - is the array to receive the data and whose length is used + to determine the number of bytes to be read.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readChars

        +
        public final void readChars(char[] chars)
        +                     throws java.lang.IndexOutOfBoundsException
        +
        Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array. + Reads values that were written using TupleOutput.writeChars(java.lang.String).
        +
        +
        Parameters:
        +
        chars - is the array to receive the data and whose length is used + to determine the number of characters to be read.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if not enough bytes are available in + the buffer.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readString

        +
        public final java.lang.String readString(int length)
        +                                  throws java.lang.IndexOutOfBoundsException,
        +                                         java.lang.IllegalArgumentException
        +
        Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(char[]).
        +
        +
        Parameters:
        +
        length - is the number of characters to be read.
        +
        Returns:
        +
        the converted string.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if no null terminating byte is found + in the buffer.
        +
        java.lang.IllegalArgumentException - malformed UTF data is encountered.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        readString

        +
        public final void readString(char[] chars)
        +                      throws java.lang.IndexOutOfBoundsException,
        +                             java.lang.IllegalArgumentException
        +
        Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode. + Reads values that were written using TupleOutput.writeString(char[]).
        +
        +
        Parameters:
        +
        chars - is the array to receive the data and whose length is used + to determine the number of characters to be read.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if no null terminating byte is found + in the buffer.
        +
        java.lang.IllegalArgumentException - malformed UTF data is encountered.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        getStringByteLength

        +
        public final int getStringByteLength()
        +                              throws java.lang.IndexOutOfBoundsException,
        +                                     java.lang.IllegalArgumentException
        +
        Returns the byte length of a null-terminated UTF string in the data + buffer, including the terminator. Used with string values that were + written using TupleOutput.writeString(String).
        +
        +
        Returns:
        +
        the byte length.
        +
        Throws:
        +
        java.lang.IndexOutOfBoundsException - if no null terminating byte is found + in the buffer.
        +
        java.lang.IllegalArgumentException - malformed UTF data is encountered.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        readPackedInt

        +
        public final int readPackedInt()
        +
        Reads an unsorted packed integer.
        +
        +
        Returns:
        +
        the int value.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getPackedIntByteLength

        +
        public final int getPackedIntByteLength()
        +
        Returns the byte length of a packed integer.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readPackedLong

        +
        public final long readPackedLong()
        +
        Reads an unsorted packed long integer.
        +
        +
        Returns:
        +
        the long value.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getPackedLongByteLength

        +
        public final int getPackedLongByteLength()
        +
        Returns the byte length of a packed long integer.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readSortedPackedInt

        +
        public final int readSortedPackedInt()
        +
        Reads a sorted packed integer.
        +
        +
        Returns:
        +
        the int value.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getSortedPackedIntByteLength

        +
        public final int getSortedPackedIntByteLength()
        +
        Returns the byte length of a sorted packed integer.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readSortedPackedLong

        +
        public final long readSortedPackedLong()
        +
        Reads a sorted packed long integer.
        +
        +
        Returns:
        +
        the long value.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getSortedPackedLongByteLength

        +
        public final int getSortedPackedLongByteLength()
        +
        Returns the byte length of a sorted packed long integer.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readBigInteger

        +
        public final java.math.BigInteger readBigInteger()
        +
        Reads a BigInteger.
        +
        +
        Returns:
        +
        the non-null BigInteger value.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getBigIntegerByteLength

        +
        public final int getBigIntegerByteLength()
        +
        Returns the byte length of a BigInteger.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        readBigDecimal

        +
        public final java.math.BigDecimal readBigDecimal()
        +
        Reads an unsorted BigDecimal.
        +
        +
        Returns:
        +
        the non-null BigDecimal value.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        getBigDecimalByteLength

        +
        public final int getBigDecimalByteLength()
        +
        Returns the byte length of an unsorted BigDecimal.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        readSortedBigDecimal

        +
        public final java.math.BigDecimal readSortedBigDecimal()
        +
        Reads a sorted BigDecimal, with support for correct default + sorting.
        +
        +
        Returns:
        +
        the non-null BigDecimal value.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        getSortedBigDecimalByteLength

        +
        public final int getSortedBigDecimalByteLength()
        +
        Returns the byte length of a sorted BigDecimal.
        +
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html b/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html new file mode 100644 index 0000000..2d0b91b --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleInputBinding.html @@ -0,0 +1,332 @@ + + + + + +TupleInputBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleInputBinding

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<TupleInput>
    +
    +
    +
    +
    public class TupleInputBinding
    +extends java.lang.Object
    +implements EntryBinding<TupleInput>
    +
    A concrete EntryBinding that uses the TupleInput + object as the key or data object. + + A concrete tuple binding for key or data entries which are TupleInput objects. This binding is used when tuples themselves are the + objects, rather than using application defined objects. A TupleInput + must always be used. To convert a TupleOutput to a TupleInput, use the TupleInput.TupleInput(TupleOutput) constructor.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleInputBinding

        +
        public TupleInputBinding()
        +
        Creates a tuple input binding.
        +
      • +
      +
    • +
    + + +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html b/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html new file mode 100644 index 0000000..a3c4817 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleMarshalledBinding.html @@ -0,0 +1,365 @@ + + + + + +TupleMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleMarshalledBinding<E extends MarshalledTupleEntry>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntryBinding<E>
    +
    +
    +
    +
    public class TupleMarshalledBinding<E extends MarshalledTupleEntry>
    +extends TupleBinding<E>
    +
    A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object. + +

    This class works by calling the methods of the MarshalledTupleEntry interface, which must be implemented by the key or + data class, to convert between the key or data entry and the object.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleMarshalledBinding

        +
        public TupleMarshalledBinding(java.lang.Class<E> cls)
        +
        Creates a tuple marshalled binding object. + +

        The given class is used to instantiate key or data objects using + Class.newInstance(), and therefore must be a public class and have + a public no-arguments constructor. It must also implement the MarshalledTupleEntry interface.

        +
        +
        Parameters:
        +
        cls - is the class of the key or data objects.
        +
        +
      • +
      +
    • +
    + + +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleOutput.html b/docs/java/com/sleepycat/bind/tuple/TupleOutput.html new file mode 100644 index 0000000..6c69171 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleOutput.html @@ -0,0 +1,1110 @@ + + + + + +TupleOutput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleOutput

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Closeable, java.io.Flushable, java.lang.AutoCloseable
    +
    +
    +
    +
    public class TupleOutput
    +extends FastOutputStream
    +
    An OutputStream with DataOutput-like methods for + writing tuple fields. It is used by TupleBinding. + +

    This class has many methods that have the same signatures as methods in + the DataOutput interface. The reason this class does not + implement DataOutput is because it would break the interface + contract for those methods because of data format differences.

    +
    +
    Author:
    +
    Mark Hayes
    +
    See Also:
    +
    Tuple Formats
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleOutput

        +
        public TupleOutput()
        +
        Creates a tuple output object for writing a byte array of tuple data.
        +
      • +
      + + + +
        +
      • +

        TupleOutput

        +
        public TupleOutput(byte[] buffer)
        +
        Creates a tuple output object for writing a byte array of tuple data, + using a given buffer. A new buffer will be allocated only if the number + of bytes needed is greater than the length of this buffer. A reference + to the byte array will be kept by this object and therefore the byte + array should not be modified while this object is in use.
        +
        +
        Parameters:
        +
        buffer - is the byte array to use as the buffer.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        writeBytes

        +
        public final TupleOutput writeBytes(java.lang.String val)
        +
        Writes the specified bytes to the buffer, converting each character to + an unsigned byte value. + Writes values that can be read using TupleInput.readBytes(int).
        +
        +
        Parameters:
        +
        val - is the string containing the values to be written. + Only characters with values below 0x100 may be written using this + method, since the high-order 8 bits of all characters are discarded.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if the val parameter is null.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeChars

        +
        public final TupleOutput writeChars(java.lang.String val)
        +
        Writes the specified characters to the buffer, converting each character + to a two byte unsigned value. + Writes values that can be read using TupleInput.readChars(int).
        +
        +
        Parameters:
        +
        val - is the string containing the characters to be written.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if the val parameter is null.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeString

        +
        public final TupleOutput writeString(java.lang.String val)
        +
        Writes the specified characters to the buffer, converting each character + to UTF format, and adding a null terminator byte. + Writes values that can be read using TupleInput.readString().
        +
        +
        Parameters:
        +
        val - is the string containing the characters to be written.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeChar

        +
        public final TupleOutput writeChar(int val)
        +
        Writes a char (two byte) unsigned value to the buffer. + Writes values that can be read using TupleInput.readChar().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeBoolean

        +
        public final TupleOutput writeBoolean(boolean val)
        +
        Writes a boolean (one byte) unsigned value to the buffer, writing one + if the value is true and zero if it is false. + Writes values that can be read using TupleInput.readBoolean().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeByte

        +
        public final TupleOutput writeByte(int val)
        +
        Writes an signed byte (one byte) value to the buffer. + Writes values that can be read using TupleInput.readByte().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeShort

        +
        public final TupleOutput writeShort(int val)
        +
        Writes an signed short (two byte) value to the buffer. + Writes values that can be read using TupleInput.readShort().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeInt

        +
        public final TupleOutput writeInt(int val)
        +
        Writes an signed int (four byte) value to the buffer. + Writes values that can be read using TupleInput.readInt().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeLong

        +
        public final TupleOutput writeLong(long val)
        +
        Writes an signed long (eight byte) value to the buffer. + Writes values that can be read using TupleInput.readLong().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeFloat

        +
        public final TupleOutput writeFloat(float val)
        +
        Writes an unsorted float (four byte) value to the buffer. + Writes values that can be read using TupleInput.readFloat().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeDouble

        +
        public final TupleOutput writeDouble(double val)
        +
        Writes an unsorted double (eight byte) value to the buffer. + Writes values that can be read using TupleInput.readDouble().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeSortedFloat

        +
        public final TupleOutput writeSortedFloat(float val)
        +
        Writes a sorted float (four byte) value to the buffer. + Writes values that can be read using TupleInput.readSortedFloat().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeSortedDouble

        +
        public final TupleOutput writeSortedDouble(double val)
        +
        Writes a sorted double (eight byte) value to the buffer. + Writes values that can be read using TupleInput.readSortedDouble().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Floating Point + Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeBytes

        +
        public final TupleOutput writeBytes(char[] chars)
        +
        Writes the specified bytes to the buffer, converting each character to + an unsigned byte value. + Writes values that can be read using TupleInput.readBytes(int).
        +
        +
        Parameters:
        +
        chars - is the array of values to be written. + Only characters with values below 0x100 may be written using this + method, since the high-order 8 bits of all characters are discarded.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if the chars parameter is null.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeChars

        +
        public final TupleOutput writeChars(char[] chars)
        +
        Writes the specified characters to the buffer, converting each character + to a two byte unsigned value. + Writes values that can be read using TupleInput.readChars(int).
        +
        +
        Parameters:
        +
        chars - is the array of characters to be written.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if the chars parameter is null.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeString

        +
        public final TupleOutput writeString(char[] chars)
        +
        Writes the specified characters to the buffer, converting each character + to UTF format. + Writes values that can be read using TupleInput.readString(int) + or TupleInput.readString(char[]).
        +
        +
        Parameters:
        +
        chars - is the array of characters to be written.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if the chars parameter is null.
        +
        See Also:
        +
        String Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeUnsignedByte

        +
        public final TupleOutput writeUnsignedByte(int val)
        +
        Writes an unsigned byte (one byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedByte().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeUnsignedShort

        +
        public final TupleOutput writeUnsignedShort(int val)
        +
        Writes an unsigned short (two byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedShort().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeUnsignedInt

        +
        public final TupleOutput writeUnsignedInt(long val)
        +
        Writes an unsigned int (four byte) value to the buffer. + Writes values that can be read using TupleInput.readUnsignedInt().
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writePackedInt

        +
        public final TupleOutput writePackedInt(int val)
        +
        Writes an unsorted packed integer.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writePackedLong

        +
        public final TupleOutput writePackedLong(long val)
        +
        Writes an unsorted packed long integer.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeSortedPackedInt

        +
        public final TupleOutput writeSortedPackedInt(int val)
        +
        Writes a sorted packed integer.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeSortedPackedLong

        +
        public final TupleOutput writeSortedPackedLong(long val)
        +
        Writes a sorted packed long integer.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeBigInteger

        +
        public final TupleOutput writeBigInteger(java.math.BigInteger val)
        +
        Writes a BigInteger.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if val is null.
        +
        java.lang.IllegalArgumentException - if the byte array representation of val + is larger than 0x7fff bytes.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        getBigIntegerByteLength

        +
        public static int getBigIntegerByteLength(java.math.BigInteger val)
        +
        Returns the exact byte length that would would be output for a given + BigInteger value if writeBigInteger(java.math.BigInteger) were + called.
        +
        +
        Parameters:
        +
        val - the BigInteger
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        Integer Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeBigDecimal

        +
        public final TupleOutput writeBigDecimal(java.math.BigDecimal val)
        +
        Writes an unsorted BigDecimal.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        Throws:
        +
        java.lang.NullPointerException - if val is null.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        getBigDecimalMaxByteLength

        +
        public static int getBigDecimalMaxByteLength(java.math.BigDecimal val)
        +
        Returns the maximum byte length that would be output for a given BigDecimal value if writeBigDecimal(java.math.BigDecimal) were called.
        +
        +
        Parameters:
        +
        val - the BigDecimal.
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        writeSortedBigDecimal

        +
        public final TupleOutput writeSortedBigDecimal(java.math.BigDecimal val)
        +
        Writes a sorted BigDecimal.
        +
        +
        Parameters:
        +
        val - is the value to write to the buffer.
        +
        Returns:
        +
        this tuple output object.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      + + + +
        +
      • +

        getSortedBigDecimalMaxByteLength

        +
        public static int getSortedBigDecimalMaxByteLength(java.math.BigDecimal val)
        +
        Returns the maximum byte length that would be output for a given BigDecimal value if writeSortedBigDecimal(java.math.BigDecimal) were + called.
        +
        +
        Parameters:
        +
        val - the BigDecimal.
        +
        Returns:
        +
        the byte length.
        +
        See Also:
        +
        BigDecimal + Formats
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html b/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html new file mode 100644 index 0000000..27300a5 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleTupleBinding.html @@ -0,0 +1,465 @@ + + + + + +TupleTupleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleTupleBinding<E>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleTupleBinding

        +
        public TupleTupleBinding()
        +
        Creates a tuple-tuple entity binding.
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        entryToObject

        +
        public E entryToObject(DatabaseEntry key,
        +                       DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Converts key and data entry buffers into an entity Object.
        +
        +
        Specified by:
        +
        entryToObject in interface EntityBinding<E>
        +
        Parameters:
        +
        key - is the source key entry.
        +
        data - is the source data entry.
        +
        Returns:
        +
        the resulting Object.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public void objectToKey(E object,
        +                        DatabaseEntry key)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the key entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToKey in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        key - is the destination entry buffer.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public void objectToData(E object,
        +                         DatabaseEntry data)
        +
        Description copied from interface: EntityBinding
        +
        Extracts the data entry from an entity Object.
        +
        +
        Specified by:
        +
        objectToData in interface EntityBinding<E>
        +
        Parameters:
        +
        object - is the source Object.
        +
        data - is the destination entry buffer.
        +
        +
      • +
      + + + +
        +
      • +

        entryToObject

        +
        public abstract E entryToObject(TupleInput keyInput,
        +                                TupleInput dataInput)
        +
        Constructs an entity object from TupleInput key and data + entries.
        +
        +
        Parameters:
        +
        keyInput - is the TupleInput key entry object.
        +
        dataInput - is the TupleInput data entry object.
        +
        Returns:
        +
        the entity object constructed from the key and data.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToKey

        +
        public abstract void objectToKey(E object,
        +                                 TupleOutput output)
        +
        Extracts a key tuple from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        output - is the TupleOutput to which the key should be + written.
        +
        +
      • +
      + + + + + +
        +
      • +

        objectToData

        +
        public abstract void objectToData(E object,
        +                                  TupleOutput output)
        +
        Extracts a key tuple from an entity object.
        +
        +
        Parameters:
        +
        object - is the entity object.
        +
        output - is the TupleOutput to which the data should be + written.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html b/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html new file mode 100644 index 0000000..62034da --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleTupleKeyCreator.html @@ -0,0 +1,464 @@ + + + + + +TupleTupleKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleTupleKeyCreator<E>

+
+
+ +
+ +
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleTupleKeyCreator

        +
        public TupleTupleKeyCreator()
        +
        Creates a tuple-tuple key creator.
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        createSecondaryKey

        +
        public boolean createSecondaryKey(SecondaryDatabase db,
        +                                  DatabaseEntry primaryKeyEntry,
        +                                  DatabaseEntry dataEntry,
        +                                  DatabaseEntry indexKeyEntry)
        +
        Description copied from interface: SecondaryKeyCreator
        +
        Creates a secondary key entry, given a primary key and data entry. + +

        A secondary key may be derived from the primary key, primary data, or + a combination of the primary key and data. For secondary keys that are + optional, the key creator method may return false and the key/data pair + will not be indexed. To ensure the integrity of a secondary database + the key creator method must always return the same result for a given + set of input parameters.

        + +

        A RuntimeException may be thrown by this method if an error + occurs attempting to create the secondary key. This exception will be + thrown by the API method currently in progress, for example, a put method. However, this will cause the write operation + to be incomplete. When databases are not configured to be + transactional, caution should be used to avoid integrity problems. See + Special considerations for + using Secondary Databases with and without Transactions.

        +
        +
        Specified by:
        +
        createSecondaryKey in interface SecondaryKeyCreator
        +
        Parameters:
        +
        db - the database to which the secondary key will be + added. This parameter is passed for informational purposes but is not + commonly used. This parameter is always non-null.
        +
        primaryKeyEntry - the primary key entry. This parameter must not be modified + by this method. This parameter is always non-null.
        +
        dataEntry - the primary data entry. This parameter must not be modified + by this method. If SecondaryConfig#setExtractFromPrimaryKeyOnly + is configured as true, the data param may be either null + or non-null, and the implementation is expected to ignore it; otherwise, + this parameter is always non-null.
        +
        indexKeyEntry - the secondary key created by this method. This parameter + is always non-null.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + +
        +
      • +

        nullifyForeignKey

        +
        public boolean nullifyForeignKey(SecondaryDatabase db,
        +                                 DatabaseEntry dataEntry)
        +
        Description copied from interface: ForeignKeyNullifier
        +
        Sets the foreign key reference to null in the datum of the primary + database.
        +
        +
        Specified by:
        +
        nullifyForeignKey in interface ForeignKeyNullifier
        +
        Parameters:
        +
        db - the database in which the foreign key integrity + constraint is defined. This parameter is passed for informational + purposes but is not commonly used.
        +
        dataEntry - the existing primary datum in which the foreign key + reference should be set to null. This parameter should be updated by + this method if it returns true.
        +
        Returns:
        +
        true if the datum was modified, or false to indicate that the + key is not present.
        +
        +
      • +
      + + + +
        +
      • +

        createSecondaryKey

        +
        public abstract boolean createSecondaryKey(TupleInput primaryKeyInput,
        +                                           TupleInput dataInput,
        +                                           TupleOutput indexKeyOutput)
        +
        Creates the index key from primary key tuple and data tuple.
        +
        +
        Parameters:
        +
        primaryKeyInput - is the TupleInput for the primary key + entry.
        +
        dataInput - is the TupleInput for the data entry.
        +
        indexKeyOutput - is the destination index key tuple.
        +
        Returns:
        +
        true if a key was created, or false to indicate that the key is + not present.
        +
        +
      • +
      + + + + +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html b/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html new file mode 100644 index 0000000..9516d60 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.html @@ -0,0 +1,403 @@ + + + + + +TupleTupleMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleTupleMarshalledBinding<E extends MarshalledTupleEntry & MarshalledTupleKeyEntity>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    EntityBinding<E>
    +
    +
    +
    +
    public class TupleTupleMarshalledBinding<E extends MarshalledTupleEntry & MarshalledTupleKeyEntity>
    +extends TupleTupleBinding<E>
    +
    A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class. + +

    This class calls the methods of the MarshalledTupleEntry + interface to convert between the data entry and entity object. It calls the + methods of the MarshalledTupleKeyEntity interface to convert between + the key entry and the entity object. These two interfaces must both be + implemented by the entity class.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+ +
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html b/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html new file mode 100644 index 0000000..5c2b7e7 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.html @@ -0,0 +1,382 @@ + + + + + +TupleTupleMarshalledKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.bind.tuple
+

Class TupleTupleMarshalledKeyCreator<E extends MarshalledTupleEntry & MarshalledTupleKeyEntity>

+
+
+ +
+ +
+
+ +
+
+ +
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/BigDecimalBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/BigDecimalBinding.html new file mode 100644 index 0000000..5dc45f6 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/BigDecimalBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.BigDecimalBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.BigDecimalBinding

+
+
No usage of com.sleepycat.bind.tuple.BigDecimalBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/BigIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/BigIntegerBinding.html new file mode 100644 index 0000000..c15225f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/BigIntegerBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.BigIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.BigIntegerBinding

+
+
No usage of com.sleepycat.bind.tuple.BigIntegerBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html new file mode 100644 index 0000000..d5a701b --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/BooleanBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.BooleanBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.BooleanBinding

+
+
No usage of com.sleepycat.bind.tuple.BooleanBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html new file mode 100644 index 0000000..0078a05 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/ByteBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.ByteBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.ByteBinding

+
+
No usage of com.sleepycat.bind.tuple.ByteBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html new file mode 100644 index 0000000..0d53a14 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/CharacterBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.CharacterBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.CharacterBinding

+
+
No usage of com.sleepycat.bind.tuple.CharacterBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html new file mode 100644 index 0000000..03d9e6b --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/DoubleBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.DoubleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.DoubleBinding

+
+
No usage of com.sleepycat.bind.tuple.DoubleBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html new file mode 100644 index 0000000..fbf5a18 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/FloatBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.FloatBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.FloatBinding

+
+
No usage of com.sleepycat.bind.tuple.FloatBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html new file mode 100644 index 0000000..6753863 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/IntegerBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.IntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.IntegerBinding

+
+
No usage of com.sleepycat.bind.tuple.IntegerBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html new file mode 100644 index 0000000..6d79c41 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/LongBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.LongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.LongBinding

+
+
No usage of com.sleepycat.bind.tuple.LongBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html b/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html new file mode 100644 index 0000000..4107cbd --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleEntry.html @@ -0,0 +1,188 @@ + + + + + +Uses of Interface com.sleepycat.bind.tuple.MarshalledTupleEntry (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.bind.tuple.MarshalledTupleEntry

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html b/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html new file mode 100644 index 0000000..b393786 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/MarshalledTupleKeyEntity.html @@ -0,0 +1,264 @@ + + + + + +Uses of Interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.bind.tuple.MarshalledTupleKeyEntity

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/PackedIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/PackedIntegerBinding.html new file mode 100644 index 0000000..6e502ba --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/PackedIntegerBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.PackedIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.PackedIntegerBinding

+
+
No usage of com.sleepycat.bind.tuple.PackedIntegerBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/PackedLongBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/PackedLongBinding.html new file mode 100644 index 0000000..b9510ca --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/PackedLongBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.PackedLongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.PackedLongBinding

+
+
No usage of com.sleepycat.bind.tuple.PackedLongBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html new file mode 100644 index 0000000..68c45f8 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/ShortBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.ShortBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.ShortBinding

+
+
No usage of com.sleepycat.bind.tuple.ShortBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/SortedBigDecimalBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/SortedBigDecimalBinding.html new file mode 100644 index 0000000..dcd8d4e --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/SortedBigDecimalBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.SortedBigDecimalBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.SortedBigDecimalBinding

+
+
No usage of com.sleepycat.bind.tuple.SortedBigDecimalBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/SortedDoubleBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/SortedDoubleBinding.html new file mode 100644 index 0000000..5ed34ae --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/SortedDoubleBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.SortedDoubleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.SortedDoubleBinding

+
+
No usage of com.sleepycat.bind.tuple.SortedDoubleBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/SortedFloatBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/SortedFloatBinding.html new file mode 100644 index 0000000..772fca1 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/SortedFloatBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.SortedFloatBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.SortedFloatBinding

+
+
No usage of com.sleepycat.bind.tuple.SortedFloatBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedIntegerBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedIntegerBinding.html new file mode 100644 index 0000000..18d241f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedIntegerBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.SortedPackedIntegerBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.SortedPackedIntegerBinding

+
+
No usage of com.sleepycat.bind.tuple.SortedPackedIntegerBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedLongBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedLongBinding.html new file mode 100644 index 0000000..0977488 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/SortedPackedLongBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.SortedPackedLongBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.SortedPackedLongBinding

+
+
No usage of com.sleepycat.bind.tuple.SortedPackedLongBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html new file mode 100644 index 0000000..d4a8c0d --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/StringBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.StringBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.StringBinding

+
+
No usage of com.sleepycat.bind.tuple.StringBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleBase.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleBase.html new file mode 100644 index 0000000..39b172e --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleBase.html @@ -0,0 +1,382 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleBase

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html new file mode 100644 index 0000000..ae8410e --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleBinding.html @@ -0,0 +1,317 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html new file mode 100644 index 0000000..ea68d10 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleInput.html @@ -0,0 +1,382 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleInput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleInput

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html new file mode 100644 index 0000000..4e12b9f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleInputBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleInputBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleInputBinding

+
+
No usage of com.sleepycat.bind.tuple.TupleInputBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html new file mode 100644 index 0000000..9553992 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleMarshalledBinding.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleMarshalledBinding

+
+
No usage of com.sleepycat.bind.tuple.TupleMarshalledBinding
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html new file mode 100644 index 0000000..7452522 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleOutput.html @@ -0,0 +1,674 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleOutput (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleOutput

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html new file mode 100644 index 0000000..7ec5455 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleBinding.html @@ -0,0 +1,175 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleTupleBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html new file mode 100644 index 0000000..8def823 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleKeyCreator.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleTupleKeyCreator

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html new file mode 100644 index 0000000..e3ad62f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledBinding.html @@ -0,0 +1,172 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleTupleMarshalledBinding

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html new file mode 100644 index 0000000..4538261 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/class-use/TupleTupleMarshalledKeyCreator.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator

+
+
No usage of com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/package-frame.html b/docs/java/com/sleepycat/bind/tuple/package-frame.html new file mode 100644 index 0000000..4dc9b75 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/package-frame.html @@ -0,0 +1,52 @@ + + + + + +com.sleepycat.bind.tuple (Oracle - Berkeley DB Java Edition API) + + + + + +

com.sleepycat.bind.tuple

+
+

Interfaces

+ +

Classes

+ +
+ + diff --git a/docs/java/com/sleepycat/bind/tuple/package-summary.html b/docs/java/com/sleepycat/bind/tuple/package-summary.html new file mode 100644 index 0000000..e4329a1 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/package-summary.html @@ -0,0 +1,754 @@ + + + + + +com.sleepycat.bind.tuple (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Package com.sleepycat.bind.tuple

+
+
Bindings that use sequences of primitive fields, or tuples.
+
+

See: Description

+
+
+ + + + +

Package com.sleepycat.bind.tuple Description

+
Bindings that use sequences of primitive fields, or tuples. + + +For a general discussion of bindings, see the +Getting Started Guide. + + +

Tuple Formats

+ +

The serialization format for tuple bindings are designed for compactness, +serialization speed and proper default sorting.

+ +

When a format is used for database keys, it is important to use default +sorting for best performance. Although a custom comparator may be specified +for a database or +entity +index, custom comparators often reduce performance because comparators are +called very frequently during Btree operations.

+ +

For proper default sorting, the byte array of the stored format must be +designed so that a byte-by-byte unsigned comparison results in the natural sort +order, as defined by the Comparable.compareTo(T) method of the +data type. For example, the natural sort order for integers is the standard +mathematical definition, and is implemented by Integer.compareTo, +Long.compareTo, etc. This is called default natural +sorting.

+ +

Although most tuple formats provide default natural sorting, not all of them +do. Certain formats do not provide default natural sorting for historical +reasons (see the discussion of packed integer and float formats below.) Other +formats sacrifice default natural sorting for other performance factors (see +the discussion of BigDecimal formats below.)

+ + +

Another performance factor has to do with amount of memory used by keys in +the Btree. Keys are stored in their serialized form in the Btree. If keys are +small (currently 16 bytes or less), Btree memory can be optimized. Optimized +memory storage is based on the maximum size of all keys in a single Btree +node. A single Btree node holds N adjacent key values, where N is 128 by +default and can be configured for each database or index.

+ + +

String Formats

+ +

All String formats support default natural sorting.

+ +

Strings are stored as a byte array of UTF encoded characters, either where +the length must be known by the application, or the byte array is +zero-terminated. The UTF encoding is described below.

+
    +
  • Null strings are UTF encoded as { 0xFF }, which is not allowed in a +standard UTF encoding. This allows null strings, as distinct from empty or +zero length strings, to be represented. Using default sorting, null strings +will be ordered last. +
  • +
  • Zero (0x0000) character values are UTF encoded as non-zero values, and +therefore embedded zeros in the string are supported. The sequence { 0xC0, +0x80 } is used to encode a zero character. This UTF encoding is the same one +used by the native Java UTF libraries and is called +Modified UTF-8. +However, this encoding of zero does impact the lexicographical ordering, and +zeros will not be sorted first (the natural order) or last. +
  • +
  • For all character values other than zero, the standard UTF encoding is +used, and the default sorting is the same as the Unicode lexicographical +character ordering. +
  • +
+ +

Binding classes and methods are provided for zero-terminated and +known-length String values.

+ + +

Integer Formats

+ +

Fixed Size Integer Formats

+ +

All fixed size integer formats support default natural sorting.

+ +

The size of the stored value depends on the type, and ranges (as one would +expect) from 1 byte for type byte and class Byte, to 8 bytes for +type long and class Long.

+ +

Signed numbers are stored in the buffer in MSB (most significant byte first) +order with their sign bit (high-order bit) inverted to cause negative numbers +to be sorted first when comparing values as unsigned byte arrays, as done in a +database.

+ + + +

Unsigned numbers, including characters, are stored in MSB order with no +change to their sign bit. Arrays of characters and unsigned bytes may also be +stored and may be treated as String values. For booleans, true +is stored as the unsigned byte value one and false as the unsigned byte +value zero.

+ + + +

Packed Integer Formats

+ +

The packed integer format stores integers with small absolute values in a +single byte. The size increases as the absolute value increases, up to a +maximum of 5 bytes for int values and 9 bytes for long +values.

+ +

The packed integer format can be used for integer values between Long.MIN_VALUE and Long.MAX_VALUE. However, +different bindings and methods are provided for type int and long, to avoid unsafe casting from long to int when int values are used.

+ +

Because the same packed format is used for int and long +values, stored int values may be expanded to long values +without introducing a format incompatibility. In other words, you can treat +previously stored packed int values as packed long values.

+ +

Packed integer formats come in two varieties: those that support default +natural sorting and those that don't. The formats of the two varieties are +incompatible. For new applications, the format that supports default natural +sorting should normally be used. There is no performance advantage to using +the unsorted format.

+ +

The format with support for default natural sorting stores values in the +inclusive range [-119,120] in a single byte.

+ + +

The unsorted packed integer format is an older, legacy format that is used +internally and supported for compatibility. It stores values in the inclusive +range [-119,119] in a single byte. Because default natural sorting is not +supported, this format should not be used for keys. However, it so happens +that packed integers in the inclusive range [0,630] are sorted correctly by +default, and this may be useful for some applications.

+ + +

BigInteger Formats

+ +

All BigInteger formats support default natural sorting.

+ +

BigInteger values are variable length and are stored as signed +values with a preceding byte length. The length has the same sign as the +value, in order to support default natural sorting.

+ +

The length is stored as a 2-byte (short), fixed size, signed integer. +Supported values are therefore limited to those with a byte array (BigInteger.toByteArray()) representation with a size of 0x7fff bytes +or less. The maximum BigInteger value is (20x3fff7 - 1) and +the minimum value is (-20x3fff7).

+ + + +

Floating Point Formats

+ +

Floats and doubles are stored in a fixed size, 4 and 8 byte format, +respectively. Floats and doubles are stored using two different +representations: a representation with default natural sorting, and an +unsorted, integer-bit (IEEE 754) representation. For new applications, the +format that supports default natural sorting should normally be used. There is +no performance advantage to using the unsorted format.

+ +

For float values, Float.floatToIntBits and the following +bit manipulations are used to convert the signed float value to a +representation that is sorted correctly by default.

+
+ int intVal = Float.floatToIntBits(val);
+ intVal ^= (intVal < 0) ? 0xffffffff : 0x80000000;
+
+ +

For double values, Float.doubleToLongBits and the +following bit manipulations are used to convert the signed double value to a +representation that is sorted correctly by default.

+
+ long longVal = Double.doubleToLongBits(val);
+ longVal ^= (longVal < 0) ? 0xffffffffffffffffL : 0x8000000000000000L;
+
+ +

In both cases, the resulting int or long value is stored as +an unsigned value.

+ + + +

The unsorted floating point format is an older, legacy format that is +supported for compatibility. With this format, only zero and positive values +have default natural sorting; negative values do not.

+ + + +

BigDecimal Formats

+ +

BigDecimal values are stored using two different, variable length +representations: a representation that supports default natural sorting, and an +unsorted representation. Differences between the two formats are: +

    +
  • The BigDecimal format with default natural sorting should normally +be used for database keys.
  • +
      +
    • Default natural sorting is supported.
    • +
    • The stored value is around 3 bytes larger than the unsorted format, + more or less, and is a minimum of 8 bytes.
    • +
    • More computation is required for serialization than the unsorted + format.
    • +
    • Trailing zeros after the decimal place are stripped, meaning that + precision is not preserved.
    • +
    +
  • The unsorted BigDecimal format should normally be used for non-key +values.
  • +
      +
    • Default natural sorting is not supported.
    • +
    • The stored value is around 3 bytes smaller than the sorted format, more + or less, and is a minimum of 3 bytes.
    • +
    • Less computation is required for serialization than the sorted + format.
    • +
    • Trailing zeros after the decimal place are preserved, meaning that + precision is preserved.
    • +
    +
+ +

Both formats store the scale or exponent separately from the unscaled value, +and the stored size does not increase proportionally as the absolute value of +the scale or exponent increases.

+ +
+
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/package-tree.html b/docs/java/com/sleepycat/bind/tuple/package-tree.html new file mode 100644 index 0000000..e191ef8 --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/package-tree.html @@ -0,0 +1,202 @@ + + + + + +com.sleepycat.bind.tuple Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Hierarchy For Package com.sleepycat.bind.tuple

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/bind/tuple/package-use.html b/docs/java/com/sleepycat/bind/tuple/package-use.html new file mode 100644 index 0000000..eb1561f --- /dev/null +++ b/docs/java/com/sleepycat/bind/tuple/package-use.html @@ -0,0 +1,281 @@ + + + + + +Uses of Package com.sleepycat.bind.tuple (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Package
com.sleepycat.bind.tuple

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/CurrentTransaction.html b/docs/java/com/sleepycat/collections/CurrentTransaction.html new file mode 100644 index 0000000..8b17796 --- /dev/null +++ b/docs/java/com/sleepycat/collections/CurrentTransaction.html @@ -0,0 +1,437 @@ + + + + + +CurrentTransaction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class CurrentTransaction

+
+
+ +
+
    +
  • +
    +
    +
    public class CurrentTransaction
    +extends java.lang.Object
    +
    Provides access to the current transaction for the current thread within the + context of a Berkeley DB environment. This class provides explicit + transaction control beyond that provided by the TransactionRunner + class. However, both methods of transaction control manage per-thread + transactions.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      TransactionabortTransaction() +
      Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
      +
      TransactionbeginTransaction(TransactionConfig config) +
      Begins a new transaction for this environment and associates it with + the current thread.
      +
      TransactioncommitTransaction() +
      Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
      +
      EnvironmentgetEnvironment() +
      Returns the underlying Berkeley DB environment.
      +
      static CurrentTransactiongetInstance(Environment env) +
      Gets the CurrentTransaction accessor for a specified Berkeley DB + environment.
      +
      TransactiongetTransaction() +
      Returns the transaction associated with the current thread for this + environment, or null if no transaction is active.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getInstance

        +
        public static CurrentTransaction getInstance(Environment env)
        +
        Gets the CurrentTransaction accessor for a specified Berkeley DB + environment. This method always returns the same reference when called + more than once with the same environment parameter.
        +
        +
        Parameters:
        +
        env - is an open Berkeley DB environment.
        +
        Returns:
        +
        the CurrentTransaction accessor for the given environment, or + null if the environment is not transactional.
        +
        +
      • +
      + + + +
        +
      • +

        getEnvironment

        +
        public final Environment getEnvironment()
        +
        Returns the underlying Berkeley DB environment.
        +
        +
        Returns:
        +
        the Environment.
        +
        +
      • +
      + + + +
        +
      • +

        getTransaction

        +
        public final Transaction getTransaction()
        +
        Returns the transaction associated with the current thread for this + environment, or null if no transaction is active.
        +
        +
        Returns:
        +
        the Transaction.
        +
        +
      • +
      + + + + + + + +
        +
      • +

        commitTransaction

        +
        public final Transaction commitTransaction()
        +                                    throws DatabaseException,
        +                                           java.lang.IllegalStateException
        +
        Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
        +
        +
        Returns:
        +
        the parent transaction or null if the committed transaction was + not nested. + +
        +
        Throws:
        +
        InsufficientReplicasException - if the master + in a replicated environment could not contact a quorum of replicas as + determined by the Durability.ReplicaAckPolicy. + The application must abort the transaction and can choose to retry it.
        +
        InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + although the commit succeeded locally.
        +
        ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
        +
        OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        DatabaseException - if an error occurs committing the transaction. + The transaction will still be closed and the parent transaction will + become the current transaction.
        +
        java.lang.IllegalStateException - if no transaction is active for the + current thread for this environment.
        +
        +
      • +
      + + + +
        +
      • +

        abortTransaction

        +
        public final Transaction abortTransaction()
        +                                   throws DatabaseException,
        +                                          java.lang.IllegalStateException
        +
        Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
        +
        +
        Returns:
        +
        the parent transaction or null if the aborted transaction was + not nested. + +
        +
        Throws:
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        DatabaseException - if an error occurs aborting the transaction. + The transaction will still be closed and the parent transaction will + become the current transaction.
        +
        java.lang.IllegalStateException - if no transaction is active for the + current thread for this environment.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/MapEntryParameter.html b/docs/java/com/sleepycat/collections/MapEntryParameter.html new file mode 100644 index 0000000..faeb4c4 --- /dev/null +++ b/docs/java/com/sleepycat/collections/MapEntryParameter.html @@ -0,0 +1,443 @@ + + + + + +MapEntryParameter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class MapEntryParameter<K,V>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.util.Map.Entry<K,V>
    +
    +
    +
    +
    public class MapEntryParameter<K,V>
    +extends java.lang.Object
    +implements java.util.Map.Entry<K,V>
    +
    A simple Map.Entry implementation that can be used as in + input parameter. Since a MapEntryParameter is not obtained + from a map, it is not attached to any map in particular. To emphasize that + changing this object does not change the map, the setValue(V) method + always throws UnsupportedOperationException. + +

    Warning: Use of this interface violates the Java Collections + interface contract since these state that Map.Entry objects + should only be obtained from Map.entrySet() sets, while this + class allows constructing them directly. However, it is useful for + performing operations on an entry set such as add(), contains(), etc. For + restrictions see getValue() and setValue(V).

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      MapEntryParameter(K key, + V value) +
      Creates a map entry with a given key and value.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      booleanequals(java.lang.Object other) +
      Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object).
      +
      KgetKey() +
      Returns the key of this entry.
      +
      VgetValue() +
      Returns the value of this entry.
      +
      inthashCode() +
      Computes a hash code as specified by Map.Entry.hashCode().
      +
      VsetValue(V newValue) +
      Always throws UnsupportedOperationException since this + object is not attached to a map.
      +
      java.lang.StringtoString() +
      Converts the entry to a string representation for debugging.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Map.Entry

        +comparingByKey, comparingByKey, comparingByValue, comparingByValue
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + + + +
        +
      • +

        MapEntryParameter

        +
        public MapEntryParameter(K key,
        +                         V value)
        +
        Creates a map entry with a given key and value.
        +
        +
        Parameters:
        +
        key - is the key to use.
        +
        value - is the value to use.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        hashCode

        +
        public int hashCode()
        +
        Computes a hash code as specified by Map.Entry.hashCode().
        +
        +
        Specified by:
        +
        hashCode in interface java.util.Map.Entry<K,V>
        +
        Overrides:
        +
        hashCode in class java.lang.Object
        +
        Returns:
        +
        the computed hash code.
        +
        +
      • +
      + + + +
        +
      • +

        equals

        +
        public boolean equals(java.lang.Object other)
        +
        Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object).
        +
        +
        Specified by:
        +
        equals in interface java.util.Map.Entry<K,V>
        +
        Overrides:
        +
        equals in class java.lang.Object
        +
        Returns:
        +
        the computed hash code.
        +
        +
      • +
      + + + +
        +
      • +

        getKey

        +
        public final K getKey()
        +
        Returns the key of this entry.
        +
        +
        Specified by:
        +
        getKey in interface java.util.Map.Entry<K,V>
        +
        Returns:
        +
        the key of this entry.
        +
        +
      • +
      + + + +
        +
      • +

        getValue

        +
        public final V getValue()
        +
        Returns the value of this entry. Note that this will be the value + passed to the constructor or the last value passed to setValue(V). + It will not reflect changes made to a Map.
        +
        +
        Specified by:
        +
        getValue in interface java.util.Map.Entry<K,V>
        +
        Returns:
        +
        the value of this entry.
        +
        +
      • +
      + + + + + +
        +
      • +

        setValue

        +
        public V setValue(V newValue)
        +
        Always throws UnsupportedOperationException since this + object is not attached to a map.
        +
        +
        Specified by:
        +
        setValue in interface java.util.Map.Entry<K,V>
        +
        +
      • +
      + + + +
        +
      • +

        toString

        +
        public java.lang.String toString()
        +
        Converts the entry to a string representation for debugging.
        +
        +
        Overrides:
        +
        toString in class java.lang.Object
        +
        Returns:
        +
        the string representation.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html b/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html new file mode 100644 index 0000000..403a99b --- /dev/null +++ b/docs/java/com/sleepycat/collections/PrimaryKeyAssigner.html @@ -0,0 +1,245 @@ + + + + + +PrimaryKeyAssigner (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Interface PrimaryKeyAssigner

+
+
+
+
    +
  • +
    +
    +
    public interface PrimaryKeyAssigner
    +
    An interface implemented to assign new primary key values. + An implementation of this interface is passed to the StoredMap + or StoredSortedMap constructor to assign primary keys for that + store. Key assignment occurs when StoredMap.append() is called.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        assignKey

        +
        void assignKey(DatabaseEntry keyData)
        +        throws DatabaseException
        +
        Assigns a new primary key value into the given buffer.
        +
        +
        Parameters:
        +
        keyData - the buffer.
        +
        Throws:
        +
        DatabaseException - to stop the operation and cause this exception + to be propagated to the caller of StoredMap.append().
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredCollection.html b/docs/java/com/sleepycat/collections/StoredCollection.html new file mode 100644 index 0000000..d9c5463 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredCollection.html @@ -0,0 +1,948 @@ + + + + + +StoredCollection (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredCollection<E>

+
+
+ +
+ +
+
+
    +
  • + +
      +
    • + + +

      Field Summary

      + + + + + + + + + + +
      Fields 
      Modifier and TypeField and Description
      static intDEFAULT_ITERATOR_BLOCK_SIZE +
      The default number of records read at one time by iterators.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods Deprecated Methods 
      Modifier and TypeMethod and Description
      booleanaddAll(java.util.Collection<? extends E> coll) +
      Adds all of the elements in the specified collection to this collection + (optional operation).
      +
      booleancontainsAll(java.util.Collection<?> coll) +
      Returns true if this collection contains all of the elements in the + specified collection.
      +
      booleanequals(java.lang.Object other) +
      Compares the specified object with this collection for equality.
      +
      intgetIteratorBlockSize() +
      Returns the number of records read at one time by iterators returned by + the iterator() method.
      +
      inthashCode() 
      java.util.Iterator<E>iterator() +
      Returns an iterator over the elements in this collection.
      +
      StoredIterator<E>iterator(boolean writeAllowed) +
      Deprecated.  +
      Please use storedIterator() or storedIterator(boolean) instead. Because the iterator returned must + be closed, the method name iterator is confusing since standard + Java iterators do not need to be closed.
      +
      +
      StoredIterator<E>join(StoredContainer[] indices, + java.lang.Object[] indexKeys, + JoinConfig joinConfig) +
      Returns an iterator representing an equality join of the indices and + index key values specified.
      +
      booleanremoveAll(java.util.Collection<?> coll) +
      Removes all this collection's elements that are also contained in the + specified collection (optional operation).
      +
      booleanretainAll(java.util.Collection<?> coll) +
      Retains only the elements in this collection that are contained in the + specified collection (optional operation).
      +
      voidsetIteratorBlockSize(int blockSize) +
      Changes the number of records read at one time by iterators returned by + the iterator() method.
      +
      intsize() +
      Returns a non-transactional count of the records in the collection or + map.
      +
      StoredIterator<E>storedIterator() +
      Returns an iterator over the elements in this collection.
      +
      StoredIterator<E>storedIterator(boolean writeAllowed) +
      Returns a read or read-write iterator over the elements in this + collection.
      +
      java.lang.Object[]toArray() +
      Returns an array of all the elements in this collection.
      +
      <T> T[]toArray(T[] a) +
      Returns an array of all the elements in this collection whose runtime + type is that of the specified array.
      +
      java.util.List<E>toList() +
      Returns a copy of this collection as an ArrayList.
      +
      java.lang.StringtoString() +
      Converts the collection to a string representation for debugging.
      +
      + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Collection

        +add, clear, contains, isEmpty, parallelStream, remove, removeIf, spliterator, stream
      • +
      +
        +
      • + + +

        Methods inherited from interface java.lang.Iterable

        +forEach
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + + + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getIteratorBlockSize

        +
        public int getIteratorBlockSize()
        +
        Returns the number of records read at one time by iterators returned by + the iterator() method. By default this value is DEFAULT_ITERATOR_BLOCK_SIZE.
        +
        +
        Returns:
        +
        the number of records.
        +
        +
      • +
      + + + +
        +
      • +

        setIteratorBlockSize

        +
        public void setIteratorBlockSize(int blockSize)
        +
        Changes the number of records read at one time by iterators returned by + the iterator() method. By default this value is DEFAULT_ITERATOR_BLOCK_SIZE.
        +
        +
        Parameters:
        +
        blockSize - the number of records.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if the blockSize is less than two.
        +
        +
      • +
      + + + +
        +
      • +

        iterator

        +
        public java.util.Iterator<E> iterator()
        +
        Returns an iterator over the elements in this collection. + The iterator will be read-only if the collection is read-only. + This method conforms to the Collection.iterator() interface. + +

        The iterator returned by this method does not keep a database cursor + open and therefore it does not need to be closed. It reads blocks of + records as needed, opening and closing a cursor to read each block of + records. The number of records per block is 10 by default and can be + changed with setIteratorBlockSize(int).

        + +

        Because this iterator does not keep a cursor open, if it is used + without transactions, the iterator does not have cursor + stability characteristics. In other words, the record at the + current iterator position can be changed or deleted by another thread. + To prevent this from happening, call this method within a transaction or + use the storedIterator() method instead.

        +
        +
        Specified by:
        +
        iterator in interface java.lang.Iterable<E>
        +
        Specified by:
        +
        iterator in interface java.util.Collection<E>
        +
        Returns:
        +
        a standard Iterator for this collection.
        +
        See Also:
        +
        StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + +
        +
      • +

        storedIterator

        +
        public StoredIterator<E> storedIterator()
        +
        Returns an iterator over the elements in this collection. + The iterator will be read-only if the collection is read-only. + This method does not exist in the standard Collection interface. + +

        If Iterator.set or Iterator.remove will be called + and the underlying Database is transactional, then a transaction must be + active when calling this method and must remain active while using the + iterator.

        + +

        Warning: The iterator returned must be explicitly + closed using StoredIterator.close() or StoredIterator.close(java.util.Iterator) to release the underlying + database cursor resources.

        +
        +
        Returns:
        +
        a StoredIterator for this collection.
        +
        See Also:
        +
        StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + +
        +
      • +

        storedIterator

        +
        public StoredIterator<E> storedIterator(boolean writeAllowed)
        +
        Returns a read or read-write iterator over the elements in this + collection. + This method does not exist in the standard Collection interface. + +

        If Iterator.set or Iterator.remove will be called + and the underlying Database is transactional, then a transaction must be + active when calling this method and must remain active while using the + iterator.

        + +

        Warning: The iterator returned must be explicitly + closed using StoredIterator.close() or StoredIterator.close(java.util.Iterator) to release the underlying + database cursor resources.

        +
        +
        Parameters:
        +
        writeAllowed - is true to open a read-write iterator or false to + open a read-only iterator. If the collection is read-only the iterator + will always be read-only.
        +
        Returns:
        +
        a StoredIterator for this collection.
        +
        Throws:
        +
        java.lang.IllegalStateException - if writeAllowed is true but the collection + is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        See Also:
        +
        StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + +
        +
      • +

        iterator

        +
        public StoredIterator<E> iterator(boolean writeAllowed)
        +
        Deprecated. Please use storedIterator() or storedIterator(boolean) instead. Because the iterator returned must + be closed, the method name iterator is confusing since standard + Java iterators do not need to be closed.
        +
        +
        Parameters:
        +
        writeAllowed - is true to open a read-write iterator or false to + open a read-only iterator. If the collection is read-only the iterator + will always be read-only.
        +
        Returns:
        +
        a StoredIterator for this collection.
        +
        +
      • +
      + + + +
        +
      • +

        toArray

        +
        public java.lang.Object[] toArray()
        +
        Returns an array of all the elements in this collection. + This method conforms to the Collection.toArray() interface. + +
        +
        +
        Specified by:
        +
        toArray in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        toArray

        +
        public <T> T[] toArray(T[] a)
        +
        Returns an array of all the elements in this collection whose runtime + type is that of the specified array. + This method conforms to the Collection.toArray(Object[]) + interface. + +
        +
        +
        Specified by:
        +
        toArray in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        containsAll

        +
        public boolean containsAll(java.util.Collection<?> coll)
        +
        Returns true if this collection contains all of the elements in the + specified collection. + This method conforms to the Collection.containsAll(java.util.Collection<?>) interface. + +
        +
        +
        Specified by:
        +
        containsAll in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        addAll

        +
        public boolean addAll(java.util.Collection<? extends E> coll)
        +
        Adds all of the elements in the specified collection to this collection + (optional operation). + This method calls the Collection.add(Object) method of the concrete + collection class, which may or may not be supported. + This method conforms to the Collection.addAll(java.util.Collection<? extends E>) interface. + +
        +
        +
        Specified by:
        +
        addAll in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only, or + if the collection is indexed, or if the add method is not supported by + the concrete collection.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        removeAll

        +
        public boolean removeAll(java.util.Collection<?> coll)
        +
        Removes all this collection's elements that are also contained in the + specified collection (optional operation). + This method conforms to the Collection.removeAll(java.util.Collection<?>) interface. + +
        +
        +
        Specified by:
        +
        removeAll in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        retainAll

        +
        public boolean retainAll(java.util.Collection<?> coll)
        +
        Retains only the elements in this collection that are contained in the + specified collection (optional operation). + This method conforms to the Collection.removeAll(java.util.Collection<?>) interface. + +
        +
        +
        Specified by:
        +
        retainAll in interface java.util.Collection<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        equals

        +
        public boolean equals(java.lang.Object other)
        +
        Compares the specified object with this collection for equality. + A value comparison is performed by this method and the stored values + are compared rather than calling the equals() method of each element. + This method conforms to the Collection.equals(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        equals in interface java.util.Collection<E>
        +
        Overrides:
        +
        equals in class java.lang.Object
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        hashCode

        +
        public int hashCode()
        +
        +
        Specified by:
        +
        hashCode in interface java.util.Collection<E>
        +
        Overrides:
        +
        hashCode in class java.lang.Object
        +
        +
      • +
      + + + +
        +
      • +

        toList

        +
        public java.util.List<E> toList()
        +
        Returns a copy of this collection as an ArrayList. This is the same as + toArray() but returns a collection instead of an array.
        +
        +
        Returns:
        +
        an ArrayList containing a copy of all elements in this + collection. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        toString

        +
        public java.lang.String toString()
        +
        Converts the collection to a string representation for debugging. + WARNING: The returned string may be very large.
        +
        +
        Overrides:
        +
        toString in class java.lang.Object
        +
        Returns:
        +
        the string representation. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        size

        +
        public int size()
        +
        Description copied from class: StoredContainer
        +
        Returns a non-transactional count of the records in the collection or + map. This method conforms to the Collection.size() and + Map.size() interfaces. + + +

        This operation is faster than obtaining a count by scanning the + collection manually, and will not perturb the current contents of the + cache. However, the count is not guaranteed to be accurate if there are + concurrent updates.

        +
        +
        +
        Specified by:
        +
        size in interface java.util.Collection<E>
        +
        Specified by:
        +
        size in class StoredContainer
        +
        Returns:
        +
        the number of records. + +
        +
        +
      • +
      + + + +
        +
      • +

        join

        +
        public StoredIterator<E> join(StoredContainer[] indices,
        +                              java.lang.Object[] indexKeys,
        +                              JoinConfig joinConfig)
        +
        Returns an iterator representing an equality join of the indices and + index key values specified. + This method does not exist in the standard Collection interface. + +

        Warning: The iterator returned must be explicitly + closed using StoredIterator.close() or StoredIterator.close(java.util.Iterator) to release the underlying + database cursor resources.

        + +

        The returned iterator supports only the two methods: hasNext() and + next(). All other methods will throw UnsupportedOperationException.

        +
        +
        Parameters:
        +
        indices - is an array of indices with elements corresponding to + those in the indexKeys array.
        +
        indexKeys - is an array of index key values identifying the + elements to be selected.
        +
        joinConfig - is the join configuration, or null to use the + default configuration.
        +
        Returns:
        +
        an iterator over the elements in this collection that match + all specified index key values. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.IllegalArgumentException - if this collection is indexed or if a + given index does not have the same store as this collection.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredCollections.html b/docs/java/com/sleepycat/collections/StoredCollections.html new file mode 100644 index 0000000..a40d9b8 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredCollections.html @@ -0,0 +1,476 @@ + + + + + +StoredCollections (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredCollections

+
+
+ +
+
    +
  • +
    +
    +
    public class StoredCollections
    +extends java.lang.Object
    +
    Static methods operating on collections and maps. + +

    This class consists exclusively of static methods that operate on or + return stored collections and maps, jointly called containers. It contains + methods for changing certain properties of a container. Because container + properties are immutable, these methods always return a new container + instance. This allows stored container instances to be used safely by + multiple threads. Creating the new container instance is not expensive and + creates only two new objects.

    + +

    When a container is created with a particular property, all containers + and iterators derived from that container will inherit the property. For + example, if a read-uncommitted Map is created then calls to its subMap(), + values(), entrySet(), and keySet() methods will create read-uncommitted + containers also.

    + +

    Method names beginning with "configured" create a new container with a + specified CursorConfig from a given stored container. This allows + configuring a container for read-committed isolation, read-uncommitted + isolation, or any other property supported by CursorConfig. + All operations performed with the resulting container will be performed with + the specified cursor configuration.

    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static <E> java.util.Collection<E>configuredCollection(java.util.Collection<E> storedCollection, + CursorConfig config) +
      Creates a configured collection from a given stored collection.
      +
      static <E> java.util.List<E>configuredList(java.util.List<E> storedList, + CursorConfig config) +
      Creates a configured list from a given stored list.
      +
      static <K,V> java.util.Map<K,V>configuredMap(java.util.Map<K,V> storedMap, + CursorConfig config) +
      Creates a configured map from a given stored map.
      +
      static <E> java.util.Set<E>configuredSet(java.util.Set<E> storedSet, + CursorConfig config) +
      Creates a configured set from a given stored set.
      +
      static <K,V> java.util.SortedMap<K,V>configuredSortedMap(java.util.SortedMap<K,V> storedSortedMap, + CursorConfig config) +
      Creates a configured sorted map from a given stored sorted map.
      +
      static <E> java.util.SortedSet<E>configuredSortedSet(java.util.SortedSet<E> storedSortedSet, + CursorConfig config) +
      Creates a configured sorted set from a given stored sorted set.
      +
      static <E> java.util.Iterator<E>iterator(java.util.Iterator<E> iter) +
      Clones an iterator preserving its current position.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        configuredCollection

        +
        public static <E> java.util.Collection<E> configuredCollection(java.util.Collection<E> storedCollection,
        +                                                               CursorConfig config)
        +
        Creates a configured collection from a given stored collection.
        +
        +
        Type Parameters:
        +
        E - the element class.
        +
        Parameters:
        +
        storedCollection - the base collection.
        +
        config - is the cursor configuration to be used for all operations + performed via the new collection instance; null may be specified to use + the default configuration.
        +
        Returns:
        +
        the configured collection.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        configuredList

        +
        public static <E> java.util.List<E> configuredList(java.util.List<E> storedList,
        +                                                   CursorConfig config)
        +
        Creates a configured list from a given stored list. + +

        Note that this method may not be called in the JE product, since the + StoredList class is not supported.

        +
        +
        Type Parameters:
        +
        E - the element class.
        +
        Parameters:
        +
        storedList - the base list.
        +
        config - is the cursor configuration to be used for all operations + performed via the new list instance; null may be specified to use the + default configuration.
        +
        Returns:
        +
        the configured list.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        configuredMap

        +
        public static <K,V> java.util.Map<K,V> configuredMap(java.util.Map<K,V> storedMap,
        +                                                     CursorConfig config)
        +
        Creates a configured map from a given stored map.
        +
        +
        Type Parameters:
        +
        K - the key class.
        +
        V - the value class.
        +
        Parameters:
        +
        storedMap - the base map.
        +
        config - is the cursor configuration to be used for all operations + performed via the new map instance; null may be specified to use the + default configuration.
        +
        Returns:
        +
        the configured map.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        configuredSet

        +
        public static <E> java.util.Set<E> configuredSet(java.util.Set<E> storedSet,
        +                                                 CursorConfig config)
        +
        Creates a configured set from a given stored set.
        +
        +
        Type Parameters:
        +
        E - the element class.
        +
        Parameters:
        +
        storedSet - the base set.
        +
        config - is the cursor configuration to be used for all operations + performed via the new set instance; null may be specified to use the + default configuration.
        +
        Returns:
        +
        the configured set.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        configuredSortedMap

        +
        public static <K,V> java.util.SortedMap<K,V> configuredSortedMap(java.util.SortedMap<K,V> storedSortedMap,
        +                                                                 CursorConfig config)
        +
        Creates a configured sorted map from a given stored sorted map.
        +
        +
        Type Parameters:
        +
        K - the key class.
        +
        V - the value class.
        +
        Parameters:
        +
        storedSortedMap - the base map.
        +
        config - is the cursor configuration to be used for all operations + performed via the new map instance; null may be specified to use the + default configuration.
        +
        Returns:
        +
        the configured map.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        configuredSortedSet

        +
        public static <E> java.util.SortedSet<E> configuredSortedSet(java.util.SortedSet<E> storedSortedSet,
        +                                                             CursorConfig config)
        +
        Creates a configured sorted set from a given stored sorted set.
        +
        +
        Type Parameters:
        +
        E - the element class.
        +
        Parameters:
        +
        storedSortedSet - the base set.
        +
        config - is the cursor configuration to be used for all operations + performed via the new set instance; null may be specified to use the + default configuration.
        +
        Returns:
        +
        the configured set.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given container is not a + StoredContainer.
        +
        +
      • +
      + + + +
        +
      • +

        iterator

        +
        public static <E> java.util.Iterator<E> iterator(java.util.Iterator<E> iter)
        +
        Clones an iterator preserving its current position.
        +
        +
        Type Parameters:
        +
        E - the element class.
        +
        Parameters:
        +
        iter - an iterator to clone.
        +
        Returns:
        +
        a new Iterator having the same position as the given + iterator.
        +
        Throws:
        +
        java.lang.ClassCastException - if the given iterator was not obtained via a + StoredCollection method.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredContainer.html b/docs/java/com/sleepycat/collections/StoredContainer.html new file mode 100644 index 0000000..5b8fe1b --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredContainer.html @@ -0,0 +1,599 @@ + + + + + +StoredContainer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredContainer

+
+
+ +
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Abstract Methods Concrete Methods 
      Modifier and TypeMethod and Description
      booleanareDuplicatesAllowed() +
      Returns whether duplicate keys are allowed in this container.
      +
      booleanareDuplicatesOrdered() +
      Returns whether duplicate keys are allowed and sorted by element value.
      +
      booleanareKeyRangesAllowed() +
      Returns whether key ranges are allowed in this container.
      +
      booleanareKeysRenumbered() +
      Returns whether keys are renumbered when insertions and deletions occur.
      +
      voidclear() +
      Removes all mappings or elements from this map or collection (optional + operation).
      +
      CursorConfiggetCursorConfig() +
      Returns the cursor configuration that is used for all operations + performed via this container.
      +
      booleanisEmpty() +
      Returns true if this map or collection contains no mappings or elements.
      +
      booleanisOrdered() +
      Returns whether keys are ordered in this container.
      +
      booleanisSecondary() +
      Returns whether this container is a view on a secondary database rather + than directly on a primary database.
      +
      booleanisTransactional() +
      Returns whether the databases underlying this container are + transactional.
      +
      booleanisWriteAllowed() +
      Returns true if this is a read-write container or false if this is a + read-only container.
      +
      abstract intsize() +
      Returns a non-transactional count of the records in the collection or + map.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        isWriteAllowed

        +
        public final boolean isWriteAllowed()
        +
        Returns true if this is a read-write container or false if this is a + read-only container. + This method does not exist in the standard Map or + Collection interfaces.
        +
        +
        Returns:
        +
        whether write is allowed.
        +
        +
      • +
      + + + +
        +
      • +

        getCursorConfig

        +
        public final CursorConfig getCursorConfig()
        +
        Returns the cursor configuration that is used for all operations + performed via this container. + For example, if CursorConfig.getReadUncommitted returns + true, data will be read that is modified but not committed. + This method does not exist in the standard Map or + Collection interfaces.
        +
        +
        Returns:
        +
        the cursor configuration, or null if no configuration has been + specified.
        +
        +
      • +
      + + + +
        +
      • +

        isTransactional

        +
        public final boolean isTransactional()
        +
        Returns whether the databases underlying this container are + transactional. + Even in a transactional environment, a database will be transactional + only if it was opened within a transaction or if the auto-commit option + was specified when it was opened. + This method does not exist in the standard Map or + Collection interfaces.
        +
        +
        Returns:
        +
        whether the database is transactional.
        +
        +
      • +
      + + + +
        +
      • +

        areDuplicatesAllowed

        +
        public final boolean areDuplicatesAllowed()
        +
        Returns whether duplicate keys are allowed in this container. + Duplicates are optionally allowed for HASH and BTREE databases. + This method does not exist in the standard Map or + Collection interfaces. + +

        Note that the JE product only supports BTREE databases.

        +
        +
        Returns:
        +
        whether duplicates are allowed.
        +
        +
      • +
      + + + +
        +
      • +

        areDuplicatesOrdered

        +
        public final boolean areDuplicatesOrdered()
        +
        Returns whether duplicate keys are allowed and sorted by element value. + Duplicates are optionally sorted for HASH and BTREE databases. + This method does not exist in the standard Map or + Collection interfaces. + +

        Note that the JE product only supports BTREE databases, and + duplicates are always sorted.

        +
        +
        Returns:
        +
        whether duplicates are ordered.
        +
        +
      • +
      + + + +
        +
      • +

        areKeysRenumbered

        +
        public final boolean areKeysRenumbered()
        +
        Returns whether keys are renumbered when insertions and deletions occur. + Keys are optionally renumbered for RECNO databases. + This method does not exist in the standard Map or + Collection interfaces. + +

        Note that the JE product does not support RECNO databases, and + therefore keys are never renumbered.

        +
        +
        Returns:
        +
        whether keys are renumbered.
        +
        +
      • +
      + + + +
        +
      • +

        isOrdered

        +
        public final boolean isOrdered()
        +
        Returns whether keys are ordered in this container. + Keys are ordered for BTREE, RECNO and QUEUE databases. + This method does not exist in the standard Map or + Collection interfaces. + +

        Note that the JE product only support BTREE databases, and + therefore keys are always ordered.

        +
        +
        Returns:
        +
        whether keys are ordered.
        +
        +
      • +
      + + + +
        +
      • +

        areKeyRangesAllowed

        +
        public final boolean areKeyRangesAllowed()
        +
        Returns whether key ranges are allowed in this container. + Key ranges are allowed only for BTREE databases. + This method does not exist in the standard Map or + Collection interfaces. + +

        Note that the JE product only supports BTREE databases, and + therefore key ranges are always allowed.

        +
        +
        Returns:
        +
        whether keys are ordered.
        +
        +
      • +
      + + + +
        +
      • +

        isSecondary

        +
        public final boolean isSecondary()
        +
        Returns whether this container is a view on a secondary database rather + than directly on a primary database. + This method does not exist in the standard Map or + Collection interfaces.
        +
        +
        Returns:
        +
        whether the view is for a secondary database.
        +
        +
      • +
      + + + +
        +
      • +

        size

        +
        public abstract int size()
        +
        Returns a non-transactional count of the records in the collection or + map. This method conforms to the Collection.size() and + Map.size() interfaces. + + +

        This operation is faster than obtaining a count by scanning the + collection manually, and will not perturb the current contents of the + cache. However, the count is not guaranteed to be accurate if there are + concurrent updates.

        +
        +
        +
        Returns:
        +
        the number of records. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        isEmpty

        +
        public boolean isEmpty()
        +
        Returns true if this map or collection contains no mappings or elements. + This method conforms to the Collection.isEmpty() and + Map.isEmpty() interfaces.
        +
        +
        Returns:
        +
        whether the container is empty. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        clear

        +
        public void clear()
        +
        Removes all mappings or elements from this map or collection (optional + operation). + This method conforms to the Collection.clear() and + Map.clear() interfaces. + +
        +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the container is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredEntrySet.html b/docs/java/com/sleepycat/collections/StoredEntrySet.html new file mode 100644 index 0000000..374b038 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredEntrySet.html @@ -0,0 +1,460 @@ + + + + + +StoredEntrySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredEntrySet<K,V>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.lang.Iterable<java.util.Map.Entry<K,V>>, java.util.Collection<java.util.Map.Entry<K,V>>, java.util.Set<java.util.Map.Entry<K,V>>
    +
    +
    +
    Direct Known Subclasses:
    +
    StoredSortedEntrySet
    +
    +
    +
    +
    public class StoredEntrySet<K,V>
    +extends StoredCollection<java.util.Map.Entry<K,V>>
    +implements java.util.Set<java.util.Map.Entry<K,V>>
    +
    The Set returned by Map.entrySet(). This class may not be instantiated + directly. Contrary to what is stated by Map.entrySet() this class + does support the add(java.util.Map.Entry<K, V>) and StoredCollection.addAll(java.util.Collection<? extends E>) methods. + +

    The Map.Entry.setValue(V) method of the Map.Entry objects + that are returned by this class and its iterators behaves just as the StoredIterator.set(E) method does.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        add

        +
        public boolean add(java.util.Map.Entry<K,V> mapEntry)
        +
        Adds the specified element to this set if it is not already present + (optional operation). + This method conforms to the Set.add(E) interface.
        +
        +
        Specified by:
        +
        add in interface java.util.Collection<java.util.Map.Entry<K,V>>
        +
        Specified by:
        +
        add in interface java.util.Set<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        mapEntry - must be a Map.Entry instance.
        +
        Returns:
        +
        true if the key-value pair was added to the set (and was not + previously present). + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        java.lang.ClassCastException - if the mapEntry is not a Map.Entry instance.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public boolean remove(java.lang.Object mapEntry)
        +
        Removes the specified element from this set if it is present (optional + operation). + This method conforms to the Set.remove(java.lang.Object) interface.
        +
        +
        Specified by:
        +
        remove in interface java.util.Collection<java.util.Map.Entry<K,V>>
        +
        Specified by:
        +
        remove in interface java.util.Set<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        mapEntry - is a Map.Entry instance to be removed.
        +
        Returns:
        +
        true if the key-value pair was removed from the set, or false if + the mapEntry is not a Map.Entry instance or is not + present in the set. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        contains

        +
        public boolean contains(java.lang.Object mapEntry)
        +
        Returns true if this set contains the specified element. + This method conforms to the Set.contains(java.lang.Object) interface.
        +
        +
        Specified by:
        +
        contains in interface java.util.Collection<java.util.Map.Entry<K,V>>
        +
        Specified by:
        +
        contains in interface java.util.Set<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        mapEntry - is a Map.Entry instance to be checked.
        +
        Returns:
        +
        true if the key-value pair is present in the set, or false if + the mapEntry is not a Map.Entry instance or is not + present in the set. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        toString

        +
        public java.lang.String toString()
        +
        Description copied from class: StoredCollection
        +
        Converts the collection to a string representation for debugging. + WARNING: The returned string may be very large.
        +
        +
        Overrides:
        +
        toString in class StoredCollection<java.util.Map.Entry<K,V>>
        +
        Returns:
        +
        the string representation. + +
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredIterator.html b/docs/java/com/sleepycat/collections/StoredIterator.html new file mode 100644 index 0000000..886a2c9 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredIterator.html @@ -0,0 +1,807 @@ + + + + + +StoredIterator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredIterator<E>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Closeable, java.lang.AutoCloseable, java.lang.Cloneable, java.util.Iterator<E>, java.util.ListIterator<E>
    +
    +
    +
    +
    public class StoredIterator<E>
    +extends java.lang.Object
    +implements java.util.ListIterator<E>, java.lang.Cloneable, java.io.Closeable
    +
    The Iterator returned by all stored collections. + +

    While in general this class conforms to the Iterator interface, + it is important to note that all iterators for stored collections must be + explicitly closed with close(). The static method close(java.util.Iterator) allows calling close for all iterators without + harm to iterators that are not from stored collections, and also avoids + casting. If a stored iterator is not closed, unpredictable behavior + including process death may result.

    + +

    This class implements the Iterator interface for all stored + iterators. It also implements ListIterator because some list + iterator methods apply to all stored iterators, for example, previous() and hasPrevious(). Other list iterator methods are always + supported for lists, but for other types of collections are only supported + under certain conditions. See nextIndex(), previousIndex(), + add(E) and set(E) for details.

    + +

    In addition, this class provides the following methods for stored + collection iterators only. Note that the use of these methods is not + compatible with the standard Java collections interface.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Static Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      voidadd(E value) +
      Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation).
      +
      voidclose() +
      Closes this iterator.
      +
      static voidclose(java.util.Iterator<?> i) +
      Closes the given iterator using close() if it is a StoredIterator.
      +
      intcount() +
      Returns the number of elements having the same key value as the key + value of the element last returned by next() or previous().
      +
      StoredCollection<E>getCollection() +
      Returns the collection associated with this iterator.
      +
      booleanhasNext() +
      Returns true if this iterator has more elements when traversing in the + forward direction.
      +
      booleanhasPrevious() +
      Returns true if this iterator has more elements when traversing in the + reverse direction.
      +
      booleanisReadModifyWrite() +
      Returns whether write-locks will be obtained when reading with this + cursor.
      +
      Enext() +
      Returns the next element in the iteration.
      +
      intnextIndex() +
      Returns the index of the element that would be returned by a subsequent + call to next.
      +
      Eprevious() +
      Returns the next element in the iteration.
      +
      intpreviousIndex() +
      Returns the index of the element that would be returned by a subsequent + call to previous.
      +
      voidremove() +
      Removes the last element that was returned by next or previous (optional + operation).
      +
      voidset(E value) +
      Replaces the last element returned by next or previous with the + specified element (optional operation).
      +
      voidsetReadModifyWrite(boolean lockForWrite) +
      Changes whether write-locks will be obtained when reading with this + cursor.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Iterator

        +forEachRemaining
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        close

        +
        public static void close(java.util.Iterator<?> i)
        +
        Closes the given iterator using close() if it is a StoredIterator. If the given iterator is not a StoredIterator, + this method does nothing.
        +
        +
        Parameters:
        +
        i - is the iterator to close.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        isReadModifyWrite

        +
        public final boolean isReadModifyWrite()
        +
        Returns whether write-locks will be obtained when reading with this + cursor. + Obtaining write-locks can prevent deadlocks when reading and then + modifying data.
        +
        +
        Returns:
        +
        the write-lock setting.
        +
        +
      • +
      + + + +
        +
      • +

        setReadModifyWrite

        +
        public void setReadModifyWrite(boolean lockForWrite)
        +
        Changes whether write-locks will be obtained when reading with this + cursor. + Obtaining write-locks can prevent deadlocks when reading and then + modifying data.
        +
        +
        Parameters:
        +
        lockForWrite - the write-lock setting.
        +
        +
      • +
      + + + +
        +
      • +

        hasNext

        +
        public boolean hasNext()
        +
        Returns true if this iterator has more elements when traversing in the + forward direction. False is returned if the iterator has been closed. + This method conforms to the Iterator.hasNext() interface.
        +
        +
        Specified by:
        +
        hasNext in interface java.util.Iterator<E>
        +
        Specified by:
        +
        hasNext in interface java.util.ListIterator<E>
        +
        Returns:
        +
        whether next() will succeed. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        hasPrevious

        +
        public boolean hasPrevious()
        +
        Returns true if this iterator has more elements when traversing in the + reverse direction. It returns false if the iterator has been closed. + This method conforms to the ListIterator.hasPrevious() interface.
        +
        +
        Specified by:
        +
        hasPrevious in interface java.util.ListIterator<E>
        +
        Returns:
        +
        whether previous() will succeed. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        next

        +
        public E next()
        +
        Returns the next element in the iteration. + This method conforms to the Iterator.next() interface.
        +
        +
        Specified by:
        +
        next in interface java.util.Iterator<E>
        +
        Specified by:
        +
        next in interface java.util.ListIterator<E>
        +
        Returns:
        +
        the next element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        previous

        +
        public E previous()
        +
        Returns the next element in the iteration. + This method conforms to the ListIterator.previous() interface.
        +
        +
        Specified by:
        +
        previous in interface java.util.ListIterator<E>
        +
        Returns:
        +
        the previous element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        nextIndex

        +
        public int nextIndex()
        +
        Returns the index of the element that would be returned by a subsequent + call to next. + This method conforms to the ListIterator.nextIndex() interface + except that it returns Integer.MAX_VALUE for stored lists when + positioned at the end of the list, rather than returning the list size + as specified by the ListIterator interface. This is because the database + size is not available.
        +
        +
        Specified by:
        +
        nextIndex in interface java.util.ListIterator<E>
        +
        Returns:
        +
        the next index. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if this iterator's collection does + not use record number keys.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        previousIndex

        +
        public int previousIndex()
        +
        Returns the index of the element that would be returned by a subsequent + call to previous. + This method conforms to the ListIterator.previousIndex() + interface.
        +
        +
        Specified by:
        +
        previousIndex in interface java.util.ListIterator<E>
        +
        Returns:
        +
        the previous index. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if this iterator's collection does + not use record number keys.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        set

        +
        public void set(E value)
        +
        Replaces the last element returned by next or previous with the + specified element (optional operation). + This method conforms to the ListIterator.set(E) interface. + +

        In order to call this method, if the underlying Database is + transactional then a transaction must be active when creating the + iterator.

        +
        +
        Specified by:
        +
        set in interface java.util.ListIterator<E>
        +
        Parameters:
        +
        value - the new value. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is a StoredKeySet (the set returned by Map.keySet()), or if + duplicates are sorted since this would change the iterator position, or + if the collection is indexed, or if the collection is read-only.
        +
        java.lang.IllegalArgumentException - if an entity value binding is used and + the primary key of the value given is different than the existing stored + primary key.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public void remove()
        +
        Removes the last element that was returned by next or previous (optional + operation). + This method conforms to the ListIterator.remove() interface except + that when the collection is a list and the RECNO-RENUMBER access method + is not used, list indices will not be renumbered. + +

        In order to call this method, if the underlying Database is + transactional then a transaction must be active when creating the + iterator.

        + +

        Note that for the JE product, RECNO-RENUMBER databases are not + supported, and therefore list indices are never renumbered by this + method.

        + +
        +
        +
        Specified by:
        +
        remove in interface java.util.Iterator<E>
        +
        Specified by:
        +
        remove in interface java.util.ListIterator<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is a sublist, or + if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        add

        +
        public void add(E value)
        +
        Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation). + This method conforms to the ListIterator.add(E) interface when + the collection is a list and the RECNO-RENUMBER access method is used. + Otherwise, this method may only be called when duplicates are allowed. + If duplicates are unsorted, the new value will be inserted in the same + manner as list elements. + If duplicates are sorted, the new value will be inserted in sort order. + +

        Note that for the JE product, RECNO-RENUMBER databases are not + supported, and therefore this method may only be used to add + duplicates.

        +
        +
        Specified by:
        +
        add in interface java.util.ListIterator<E>
        +
        Parameters:
        +
        value - the new value. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is a sublist, or + if the collection is indexed, or if the collection is read-only, or if + the collection is a list and the RECNO-RENUMBER access method was not + used, or if the collection is not a list and duplicates are not allowed.
        +
        java.lang.IllegalStateException - if the collection is empty and is not a + list with RECNO-RENUMBER access.
        +
        java.lang.IllegalArgumentException - if a duplicate value is being added + that already exists and duplicates are sorted.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        count

        +
        public int count()
        +
        Returns the number of elements having the same key value as the key + value of the element last returned by next() or previous(). If no + duplicates are allowed, 1 is always returned. + This method does not exist in the standard Iterator or ListIterator interfaces. + +
        +
        +
        Returns:
        +
        the number of duplicates.
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.IllegalStateException - if next() or previous() has not been + called for this iterator, or if remove() or add() were called after + the last call to next() or previous().
        +
        +
      • +
      + + + +
        +
      • +

        close

        +
        public void close()
        +
        Closes this iterator. + This method does not exist in the standard Iterator or ListIterator interfaces. + +

        After being closed, only the hasNext() and hasPrevious() methods may be called and these will return false. close() may also be called again and will do nothing. If other + methods are called a NullPointerException will generally be + thrown.

        +
        +
        Specified by:
        +
        close in interface java.io.Closeable
        +
        Specified by:
        +
        close in interface java.lang.AutoCloseable
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        getCollection

        +
        public final StoredCollection<E> getCollection()
        +
        Returns the collection associated with this iterator. + This method does not exist in the standard Iterator or ListIterator interfaces.
        +
        +
        Returns:
        +
        the collection associated with this iterator.
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredKeySet.html b/docs/java/com/sleepycat/collections/StoredKeySet.html new file mode 100644 index 0000000..eab8422 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredKeySet.html @@ -0,0 +1,480 @@ + + + + + +StoredKeySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredKeySet<K>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.lang.Iterable<K>, java.util.Collection<K>, java.util.Set<K>
    +
    +
    +
    Direct Known Subclasses:
    +
    StoredSortedKeySet
    +
    +
    +
    +
    public class StoredKeySet<K>
    +extends StoredCollection<K>
    +implements java.util.Set<K>
    +
    The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed. + Since this collection is a set it only contains one element for each key, + even when duplicates are allowed. Key set iterators are therefore + particularly useful for enumerating the unique keys of a store or index that + allows duplicates.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredKeySet

        +
        public StoredKeySet(Database database,
        +                    EntryBinding<K> keyBinding,
        +                    boolean writeAllowed)
        +
        Creates a key set view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + + + +
        +
      • +

        add

        +
        public boolean add(K key)
        +
        Adds the specified key to this set if it is not already present + (optional operation). + This method conforms to the Set.add(E) interface. + +

        WARNING: When a key is added the value in the underlying data store + will be empty, i.e., the byte array will be zero length. Such a record + cannot be accessed using the Map interface unless the value binding + supports zero length byte arrays.

        + +
        +
        +
        Specified by:
        +
        add in interface java.util.Collection<K>
        +
        Specified by:
        +
        add in interface java.util.Set<K>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public boolean remove(java.lang.Object key)
        +
        Removes the specified key from this set if it is present (optional + operation). + If duplicates are allowed, this method removes all duplicates for the + given key. + This method conforms to the Set.remove(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        remove in interface java.util.Collection<K>
        +
        Specified by:
        +
        remove in interface java.util.Set<K>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        contains

        +
        public boolean contains(java.lang.Object key)
        +
        Returns true if this set contains the specified key. + This method conforms to the Set.contains(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        contains in interface java.util.Collection<K>
        +
        Specified by:
        +
        contains in interface java.util.Set<K>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredMap.html b/docs/java/com/sleepycat/collections/StoredMap.html new file mode 100644 index 0000000..afe5322 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredMap.html @@ -0,0 +1,1190 @@ + + + + + +StoredMap (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredMap<K,V>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.util.concurrent.ConcurrentMap<K,V>, java.util.Map<K,V>
    +
    +
    +
    Direct Known Subclasses:
    +
    StoredSortedMap
    +
    +
    +
    +
    public class StoredMap<K,V>
    +extends StoredContainer
    +implements java.util.concurrent.ConcurrentMap<K,V>
    +
    A Map view of a Database. + +

    In addition to the standard Map methods, this class provides the + following methods for stored maps only. Note that the use of these methods + is not compatible with the standard Java collections interface.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Nested Class Summary

      +
        +
      • + + +

        Nested classes/interfaces inherited from interface java.util.Map

        +java.util.Map.Entry<K,V>
      • +
      +
    • +
    + + + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      Kappend(V value) +
      Appends a given value returning the newly assigned key.
      +
      booleancontainsKey(java.lang.Object key) +
      Returns true if this map contains the specified key.
      +
      booleancontainsValue(java.lang.Object value) +
      Returns true if this map contains the specified value.
      +
      java.util.Collection<V>duplicates(K key) +
      Returns a new collection containing the values mapped to the given key + in this map.
      +
      <PK> java.util.Map<PK,V>duplicatesMap(K secondaryKey, + EntryBinding primaryKeyBinding) +
      Returns a new map from primary key to value for the subset of records + having a given secondary key (duplicates).
      +
      java.util.Set<java.util.Map.Entry<K,V>>entrySet() +
      Returns a set view of the mappings contained in this map.
      +
      booleanequals(java.lang.Object other) +
      Compares the specified object with this map for equality.
      +
      Vget(java.lang.Object key) +
      Returns the value to which this map maps the specified key.
      +
      inthashCode() 
      java.util.Set<K>keySet() +
      Returns a set view of the keys contained in this map.
      +
      Vput(K key, + V value) +
      Associates the specified value with the specified key in this map + (optional operation).
      +
      voidputAll(java.util.Map<? extends K,? extends V> map) +
      Copies all of the mappings from the specified map to this map (optional + operation).
      +
      VputIfAbsent(K key, + V value) +
      If the specified key is not already associated with a value, associate + it with the given value.
      +
      Vremove(java.lang.Object key) +
      Removes the mapping for this key from this map if present (optional + operation).
      +
      booleanremove(java.lang.Object key, + java.lang.Object value) +
      Remove entry for key only if currently mapped to given value.
      +
      Vreplace(K key, + V value) +
      Replace entry for key only if currently mapped to some value.
      +
      booleanreplace(K key, + V oldValue, + V newValue) +
      Replace entry for key only if currently mapped to given value.
      +
      intsize() +
      Returns a non-transactional count of the records in the collection or + map.
      +
      java.lang.StringtoString() +
      Converts the map to a string representation for debugging.
      +
      java.util.Collection<V>values() +
      Returns a collection view of the values contained in this map.
      +
      + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.concurrent.ConcurrentMap

        +compute, computeIfAbsent, computeIfPresent, forEach, getOrDefault, merge, replaceAll
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Map

        +clear, isEmpty
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredMap

        +
        public StoredMap(Database database,
        +                 EntryBinding<K> keyBinding,
        +                 EntryBinding<V> valueBinding,
        +                 boolean writeAllowed)
        +
        Creates a map view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueBinding - is the binding used to translate between value + buffers and value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredMap

        +
        public StoredMap(Database database,
        +                 EntryBinding<K> keyBinding,
        +                 EntryBinding<V> valueBinding,
        +                 PrimaryKeyAssigner keyAssigner)
        +
        Creates a map view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueBinding - is the binding used to translate between value + buffers and value objects.
        +
        keyAssigner - is used by the append(V) method to assign + primary keys.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredMap

        +
        public StoredMap(Database database,
        +                 EntryBinding<K> keyBinding,
        +                 EntityBinding<V> valueEntityBinding,
        +                 boolean writeAllowed)
        +
        Creates a map entity view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredMap

        +
        public StoredMap(Database database,
        +                 EntryBinding<K> keyBinding,
        +                 EntityBinding<V> valueEntityBinding,
        +                 PrimaryKeyAssigner keyAssigner)
        +
        Creates a map entity view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        keyAssigner - is used by the append(V) method to assign + primary keys.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        get

        +
        public V get(java.lang.Object key)
        +
        Returns the value to which this map maps the specified key. If + duplicates are allowed, this method returns the first duplicate, in the + order in which duplicates are configured, that maps to the specified + key. + + This method conforms to the Map.get(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        get in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        put

        +
        public V put(K key,
        +             V value)
        +
        Associates the specified value with the specified key in this map + (optional operation). If duplicates are allowed and the specified key + is already mapped to a value, this method appends the new duplicate + after the existing duplicates. This method conforms to the Map.put(K, V) interface. + +

        The key parameter may be null if an entity binding is used and the + key will be derived from the value (entity) parameter. If an entity + binding is used and the key parameter is non-null, then the key + parameter must be equal to the key derived from the value parameter.

        +
        +
        Specified by:
        +
        put in interface java.util.Map<K,V>
        +
        Returns:
        +
        the previous value associated with specified key, or null if + there was no mapping for the key or if duplicates are allowed. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only.
        +
        java.lang.IllegalArgumentException - if an entity value binding is used and + the primary key of the value given is different than the existing stored + primary key.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        append

        +
        public K append(V value)
        +
        Appends a given value returning the newly assigned key. If a PrimaryKeyAssigner is associated with Store for this map, it will be + used to assigned the returned key. Otherwise the Store must be a QUEUE + or RECNO database and the next available record number is assigned as + the key. This method does not exist in the standard Map + interface. + +

        Note that for the JE product, QUEUE and RECNO databases are not + supported, and therefore a PrimaryKeyAssigner must be associated with + the map in order to call this method.

        +
        +
        Parameters:
        +
        value - the value to be appended.
        +
        Returns:
        +
        the assigned key. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is indexed, or + if the collection is read-only, or if the Store has no PrimaryKeyAssigner and is not a QUEUE or RECNO database.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public V remove(java.lang.Object key)
        +
        Removes the mapping for this key from this map if present (optional + operation). If duplicates are allowed, this method removes all + duplicates for the given key. This method conforms to the Map.remove(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        remove in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        putIfAbsent

        +
        public V putIfAbsent(K key,
        +                     V value)
        +
        If the specified key is not already associated with a value, associate + it with the given value. This method conforms to the ConcurrentMap.putIfAbsent(K, V) interface. + +
        +
        +
        Specified by:
        +
        putIfAbsent in interface java.util.concurrent.ConcurrentMap<K,V>
        +
        Specified by:
        +
        putIfAbsent in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public boolean remove(java.lang.Object key,
        +                      java.lang.Object value)
        +
        Remove entry for key only if currently mapped to given value. This + method conforms to the ConcurrentMap.remove(Object,Object) + interface. + +
        +
        +
        Specified by:
        +
        remove in interface java.util.concurrent.ConcurrentMap<K,V>
        +
        Specified by:
        +
        remove in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        replace

        +
        public V replace(K key,
        +                 V value)
        +
        Replace entry for key only if currently mapped to some value. This + method conforms to the ConcurrentMap.replace(Object,Object) + interface. + +
        +
        +
        Specified by:
        +
        replace in interface java.util.concurrent.ConcurrentMap<K,V>
        +
        Specified by:
        +
        replace in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        replace

        +
        public boolean replace(K key,
        +                       V oldValue,
        +                       V newValue)
        +
        Replace entry for key only if currently mapped to given value. This + method conforms to the ConcurrentMap.replace(Object,Object,Object) interface. + +
        +
        +
        Specified by:
        +
        replace in interface java.util.concurrent.ConcurrentMap<K,V>
        +
        Specified by:
        +
        replace in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        containsKey

        +
        public boolean containsKey(java.lang.Object key)
        +
        Returns true if this map contains the specified key. This method + conforms to the Map.containsKey(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        containsKey in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        containsValue

        +
        public boolean containsValue(java.lang.Object value)
        +
        Returns true if this map contains the specified value. When an entity + binding is used, this method returns whether the map contains the + primary key and value mapping of the entity. This method conforms to + the Map.containsValue(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        containsValue in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        putAll

        +
        public void putAll(java.util.Map<? extends K,? extends V> map)
        +
        Copies all of the mappings from the specified map to this map (optional + operation). When duplicates are allowed, the mappings in the specified + map are effectively appended to the existing mappings in this map, that + is no previously existing mappings in this map are replaced. This + method conforms to the Map.putAll(java.util.Map<? extends K, ? extends V>) interface. + +
        +
        +
        Specified by:
        +
        putAll in interface java.util.Map<K,V>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only, or + if the collection is indexed.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        keySet

        +
        public java.util.Set<K> keySet()
        +
        Returns a set view of the keys contained in this map. A SortedSet is returned if the map supports key ranges. The + returned collection will be read-only if the map is read-only. This + method conforms to the Map.keySet() interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        keySet in interface java.util.Map<K,V>
        +
        Returns:
        +
        a StoredKeySet or a StoredSortedKeySet for this + map.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        See Also:
        +
        StoredContainer.areKeyRangesAllowed(), +StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + +
        +
      • +

        entrySet

        +
        public java.util.Set<java.util.Map.Entry<K,V>> entrySet()
        +
        Returns a set view of the mappings contained in this map. A SortedSet is returned if the map supports key ranges. The + returned collection will be read-only if the map is read-only. This + method conforms to the Map.entrySet() interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        entrySet in interface java.util.Map<K,V>
        +
        Returns:
        +
        a StoredEntrySet or a StoredSortedEntrySet for + this map.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        See Also:
        +
        StoredContainer.areKeyRangesAllowed(), +StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + +
        +
      • +

        values

        +
        public java.util.Collection<V> values()
        +
        Returns a collection view of the values contained in this map. A SortedSet is returned if the map supports key ranges and the + value/entity binding can be used to derive the map's key from its + value/entity object. The returned collection will be read-only if the + map is read-only. This method conforms to the Map.values() + interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        values in interface java.util.Map<K,V>
        +
        Returns:
        +
        a StoredValueSet or a StoredSortedValueSet for + this map.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        See Also:
        +
        StoredContainer.areKeyRangesAllowed(), +StoredContainer.isWriteAllowed()
        +
        +
      • +
      + + + + + +
        +
      • +

        duplicates

        +
        public java.util.Collection<V> duplicates(K key)
        +
        Returns a new collection containing the values mapped to the given key + in this map. This collection's iterator() method is particularly useful + for iterating over the duplicates for a given key, since this is not + supported by the standard Map interface. This method does not exist in + the standard Map interface. + +

        If no mapping for the given key is present, an empty collection is + returned. If duplicates are not allowed, at most a single value will be + in the collection returned. If duplicates are allowed, the returned + collection's add() method may be used to add values for the given + key.

        +
        +
        Parameters:
        +
        key - is the key for which values are to be returned.
        +
        Returns:
        +
        the new collection.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        duplicatesMap

        +
        public <PK> java.util.Map<PK,V> duplicatesMap(K secondaryKey,
        +                                              EntryBinding primaryKeyBinding)
        +
        Returns a new map from primary key to value for the subset of records + having a given secondary key (duplicates). This method does not exist + in the standard Map interface. + +

        If no mapping for the given key is present, an empty collection is + returned. If duplicates are not allowed, at most a single value will be + in the collection returned. If duplicates are allowed, the returned + collection's add() method may be used to add values for the given + key.

        +
        +
        Type Parameters:
        +
        PK - the primary key class.
        +
        Parameters:
        +
        secondaryKey - is the secondary key for which duplicates values + will be represented by the returned map.
        +
        primaryKeyBinding - is the binding used for keys in the returned + map.
        +
        Returns:
        +
        the new map.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        equals

        +
        public boolean equals(java.lang.Object other)
        +
        Compares the specified object with this map for equality. A value + comparison is performed by this method and the stored values are + compared rather than calling the equals() method of each element. This + method conforms to the Map.equals(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        equals in interface java.util.Map<K,V>
        +
        Overrides:
        +
        equals in class java.lang.Object
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        hashCode

        +
        public int hashCode()
        +
        +
        Specified by:
        +
        hashCode in interface java.util.Map<K,V>
        +
        Overrides:
        +
        hashCode in class java.lang.Object
        +
        +
      • +
      + + + +
        +
      • +

        size

        +
        public int size()
        +
        Description copied from class: StoredContainer
        +
        Returns a non-transactional count of the records in the collection or + map. This method conforms to the Collection.size() and + Map.size() interfaces. + + +

        This operation is faster than obtaining a count by scanning the + collection manually, and will not perturb the current contents of the + cache. However, the count is not guaranteed to be accurate if there are + concurrent updates.

        +
        +
        +
        Specified by:
        +
        size in interface java.util.Map<K,V>
        +
        Specified by:
        +
        size in class StoredContainer
        +
        Returns:
        +
        the number of records. + +
        +
        +
      • +
      + + + +
        +
      • +

        toString

        +
        public java.lang.String toString()
        +
        Converts the map to a string representation for debugging. WARNING: All + mappings will be converted to strings and returned and therefore the + returned string may be very large.
        +
        +
        Overrides:
        +
        toString in class java.lang.Object
        +
        Returns:
        +
        the string representation.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html b/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html new file mode 100644 index 0000000..59b2aac --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredSortedEntrySet.html @@ -0,0 +1,638 @@ + + + + + +StoredSortedEntrySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredSortedEntrySet<K,V>

+
+
+ +
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      java.util.Comparator<? super java.util.Map.Entry<K,V>>comparator() +
      Returns null since comparators are not supported.
      +
      java.util.Map.Entry<K,V>first() +
      Returns the first (lowest) element currently in this sorted set.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>headSet(java.util.Map.Entry<K,V> toMapEntry) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>headSet(java.util.Map.Entry<K,V> toMapEntry, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry, optionally including toMapEntry.
      +
      java.util.Map.Entry<K,V>last() +
      Returns the last (highest) element currently in this sorted set.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>subSet(java.util.Map.Entry<K,V> fromMapEntry, + boolean fromInclusive, + java.util.Map.Entry<K,V> toMapEntry, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry and strictly less than toMapEntry, + optionally including fromMapEntry and toMapEntry.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>subSet(java.util.Map.Entry<K,V> fromMapEntry, + java.util.Map.Entry<K,V> toMapEntry) +
      Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>tailSet(java.util.Map.Entry<K,V> fromMapEntry) +
      Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry.
      +
      java.util.SortedSet<java.util.Map.Entry<K,V>>tailSet(java.util.Map.Entry<K,V> fromMapEntry, + boolean fromInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry.
      +
      + + + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.SortedSet

        +spliterator
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Set

        +add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Collection

        +parallelStream, removeIf, stream
      • +
      +
        +
      • + + +

        Methods inherited from interface java.lang.Iterable

        +forEach
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        comparator

        +
        public java.util.Comparator<? super java.util.Map.Entry<K,V>> comparator()
        +
        Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface.
        +
        +
        Specified by:
        +
        comparator in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Returns:
        +
        null.
        +
        +
      • +
      + + + +
        +
      • +

        first

        +
        public java.util.Map.Entry<K,V> first()
        +
        Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface.
        +
        +
        Specified by:
        +
        first in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Returns:
        +
        the first element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        last

        +
        public java.util.Map.Entry<K,V> last()
        +
        Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface.
        +
        +
        Specified by:
        +
        last in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Returns:
        +
        the last element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> headSet(java.util.Map.Entry<K,V> toMapEntry)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry. + This method conforms to the SortedSet.headSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        headSet in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        toMapEntry - the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> headSet(java.util.Map.Entry<K,V> toMapEntry,
        +                                                             boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry, optionally including toMapEntry. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        toMapEntry - is the upper bound.
        +
        toInclusive - is true to include toMapEntry.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> tailSet(java.util.Map.Entry<K,V> fromMapEntry)
        +
        Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry. + This method conforms to the SortedSet.tailSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        tailSet in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        fromMapEntry - is the lower bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> tailSet(java.util.Map.Entry<K,V> fromMapEntry,
        +                                                             boolean fromInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromMapEntry - is the lower bound.
        +
        fromInclusive - is true to include fromMapEntry.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> subSet(java.util.Map.Entry<K,V> fromMapEntry,
        +                                                            java.util.Map.Entry<K,V> toMapEntry)
        +
        Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive. + This method conforms to the SortedSet.subSet(E, E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        subSet in interface java.util.SortedSet<java.util.Map.Entry<K,V>>
        +
        Parameters:
        +
        fromMapEntry - is the lower bound.
        +
        toMapEntry - is the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<java.util.Map.Entry<K,V>> subSet(java.util.Map.Entry<K,V> fromMapEntry,
        +                                                            boolean fromInclusive,
        +                                                            java.util.Map.Entry<K,V> toMapEntry,
        +                                                            boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry and strictly less than toMapEntry, + optionally including fromMapEntry and toMapEntry. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromMapEntry - is the lower bound.
        +
        fromInclusive - is true to include fromMapEntry.
        +
        toMapEntry - is the upper bound.
        +
        toInclusive - is true to include toMapEntry.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredSortedKeySet.html b/docs/java/com/sleepycat/collections/StoredSortedKeySet.html new file mode 100644 index 0000000..e545349 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredSortedKeySet.html @@ -0,0 +1,704 @@ + + + + + +StoredSortedKeySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredSortedKeySet<K>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.lang.Iterable<K>, java.util.Collection<K>, java.util.Set<K>, java.util.SortedSet<K>
    +
    +
    +
    +
    public class StoredSortedKeySet<K>
    +extends StoredKeySet<K>
    +implements java.util.SortedSet<K>
    +
    The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed. + Since this collection is a set it only contains one element for each key, + even when duplicates are allowed. Key set iterators are therefore + particularly useful for enumerating the unique keys of a store or index that + allows duplicates. + +

    In addition to the standard SortedSet methods, this class provides the + following methods for stored sorted sets only. Note that the use of these + methods is not compatible with the standard Java collections interface.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + + + + + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      java.util.Comparator<? super K>comparator() +
      Returns null since comparators are not supported.
      +
      Kfirst() +
      Returns the first (lowest) element currently in this sorted set.
      +
      java.util.SortedSet<K>headSet(K toKey) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toKey.
      +
      java.util.SortedSet<K>headSet(K toKey, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toKey, optionally including toKey.
      +
      Klast() +
      Returns the last (highest) element currently in this sorted set.
      +
      java.util.SortedSet<K>subSet(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey.
      +
      java.util.SortedSet<K>subSet(K fromKey, + K toKey) +
      Returns a view of the portion of this sorted set whose elements range + from fromKey, inclusive, to toKey, exclusive.
      +
      java.util.SortedSet<K>tailSet(K fromKey) +
      Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey.
      +
      java.util.SortedSet<K>tailSet(K fromKey, + boolean fromInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey.
      +
      + + + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.SortedSet

        +spliterator
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Set

        +add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Collection

        +parallelStream, removeIf, stream
      • +
      +
        +
      • + + +

        Methods inherited from interface java.lang.Iterable

        +forEach
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredSortedKeySet

        +
        public StoredSortedKeySet(Database database,
        +                          EntryBinding<K> keyBinding,
        +                          boolean writeAllowed)
        +
        Creates a sorted key set view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        comparator

        +
        public java.util.Comparator<? super K> comparator()
        +
        Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface.
        +
        +
        Specified by:
        +
        comparator in interface java.util.SortedSet<K>
        +
        Returns:
        +
        null.
        +
        +
      • +
      + + + +
        +
      • +

        first

        +
        public K first()
        +
        Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface.
        +
        +
        Specified by:
        +
        first in interface java.util.SortedSet<K>
        +
        Returns:
        +
        the first element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        last

        +
        public K last()
        +
        Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface.
        +
        +
        Specified by:
        +
        last in interface java.util.SortedSet<K>
        +
        Returns:
        +
        the last element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<K> headSet(K toKey)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toKey. + This method conforms to the SortedSet.headSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        headSet in interface java.util.SortedSet<K>
        +
        Parameters:
        +
        toKey - is the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<K> headSet(K toKey,
        +                                      boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toKey, optionally including toKey. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        toKey - is the upper bound.
        +
        toInclusive - is true to include toKey.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<K> tailSet(K fromKey)
        +
        Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey. + This method conforms to the SortedSet.tailSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        tailSet in interface java.util.SortedSet<K>
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<K> tailSet(K fromKey,
        +                                      boolean fromInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        fromInclusive - is true to include fromKey.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<K> subSet(K fromKey,
        +                                     K toKey)
        +
        Returns a view of the portion of this sorted set whose elements range + from fromKey, inclusive, to toKey, exclusive. + This method conforms to the SortedSet.subSet(E, E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        subSet in interface java.util.SortedSet<K>
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        toKey - is the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<K> subSet(K fromKey,
        +                                     boolean fromInclusive,
        +                                     K toKey,
        +                                     boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        fromInclusive - is true to include fromKey.
        +
        toKey - is the upper bound.
        +
        toInclusive - is true to include toKey.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredSortedMap.html b/docs/java/com/sleepycat/collections/StoredSortedMap.html new file mode 100644 index 0000000..b0bfe53 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredSortedMap.html @@ -0,0 +1,792 @@ + + + + + +StoredSortedMap (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredSortedMap<K,V>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.util.concurrent.ConcurrentMap<K,V>, java.util.Map<K,V>, java.util.SortedMap<K,V>
    +
    +
    +
    +
    public class StoredSortedMap<K,V>
    +extends StoredMap<K,V>
    +implements java.util.SortedMap<K,V>
    +
    A SortedMap view of a Database. + +

    In addition to the standard SortedMap methods, this class provides the + following methods for stored sorted maps only. Note that the use of these + methods is not compatible with the standard Java collections interface.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredSortedMap

        +
        public StoredSortedMap(Database database,
        +                       EntryBinding<K> keyBinding,
        +                       EntryBinding<V> valueBinding,
        +                       boolean writeAllowed)
        +
        Creates a sorted map view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueBinding - is the binding used to translate between value + buffers and value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredSortedMap

        +
        public StoredSortedMap(Database database,
        +                       EntryBinding<K> keyBinding,
        +                       EntryBinding<V> valueBinding,
        +                       PrimaryKeyAssigner keyAssigner)
        +
        Creates a sorted map view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueBinding - is the binding used to translate between value + buffers and value objects.
        +
        keyAssigner - is used by the StoredMap.append(V) method to assign + primary keys.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredSortedMap

        +
        public StoredSortedMap(Database database,
        +                       EntryBinding<K> keyBinding,
        +                       EntityBinding<V> valueEntityBinding,
        +                       boolean writeAllowed)
        +
        Creates a sorted map entity view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredSortedMap

        +
        public StoredSortedMap(Database database,
        +                       EntryBinding<K> keyBinding,
        +                       EntityBinding<V> valueEntityBinding,
        +                       PrimaryKeyAssigner keyAssigner)
        +
        Creates a sorted map entity view of a Database with a PrimaryKeyAssigner. Writing is allowed for the created map.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        keyBinding - is the binding used to translate between key buffers + and key objects.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        keyAssigner - is used by the StoredMap.append(V) method to assign + primary keys.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        comparator

        +
        public java.util.Comparator<? super K> comparator()
        +
        Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedMap.comparator() + interface.
        +
        +
        Specified by:
        +
        comparator in interface java.util.SortedMap<K,V>
        +
        Returns:
        +
        null.
        +
        +
      • +
      + + + +
        +
      • +

        firstKey

        +
        public K firstKey()
        +
        Returns the first (lowest) key currently in this sorted map. + This method conforms to the SortedMap.firstKey() interface.
        +
        +
        Specified by:
        +
        firstKey in interface java.util.SortedMap<K,V>
        +
        Returns:
        +
        the first key. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        lastKey

        +
        public K lastKey()
        +
        Returns the last (highest) element currently in this sorted map. + This method conforms to the SortedMap.lastKey() interface.
        +
        +
        Specified by:
        +
        lastKey in interface java.util.SortedMap<K,V>
        +
        Returns:
        +
        the last key. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headMap

        +
        public java.util.SortedMap<K,V> headMap(K toKey)
        +
        Returns a view of the portion of this sorted set whose keys are + strictly less than toKey. + This method conforms to the SortedMap.headMap(K) interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        headMap in interface java.util.SortedMap<K,V>
        +
        Parameters:
        +
        toKey - is the upper bound.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headMap

        +
        public java.util.SortedMap<K,V> headMap(K toKey,
        +                                        boolean toInclusive)
        +
        Returns a view of the portion of this sorted map whose elements are + strictly less than toKey, optionally including toKey. + This method does not exist in the standard SortedMap interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        toKey - is the upper bound.
        +
        toInclusive - is true to include toKey.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailMap

        +
        public java.util.SortedMap<K,V> tailMap(K fromKey)
        +
        Returns a view of the portion of this sorted map whose elements are + greater than or equal to fromKey. + This method conforms to the SortedMap.tailMap(K) interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        tailMap in interface java.util.SortedMap<K,V>
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailMap

        +
        public java.util.SortedMap<K,V> tailMap(K fromKey,
        +                                        boolean fromInclusive)
        +
        Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey, optionally including fromKey. + This method does not exist in the standard SortedMap interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        fromInclusive - is true to include fromKey.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subMap

        +
        public java.util.SortedMap<K,V> subMap(K fromKey,
        +                                       K toKey)
        +
        Returns a view of the portion of this sorted map whose elements range + from fromKey, inclusive, to toKey, exclusive. + This method conforms to the SortedMap.subMap(K, K) interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        subMap in interface java.util.SortedMap<K,V>
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        toKey - is the upper bound.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subMap

        +
        public java.util.SortedMap<K,V> subMap(K fromKey,
        +                                       boolean fromInclusive,
        +                                       K toKey,
        +                                       boolean toInclusive)
        +
        Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey. + This method does not exist in the standard SortedMap interface. + +

        Note that the return value is a StoredStoredMap and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromKey - is the lower bound.
        +
        fromInclusive - is true to include fromKey.
        +
        toKey - is the upper bound.
        +
        toInclusive - is true to include toKey.
        +
        Returns:
        +
        the submap.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredSortedValueSet.html b/docs/java/com/sleepycat/collections/StoredSortedValueSet.html new file mode 100644 index 0000000..50c7f82 --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredSortedValueSet.html @@ -0,0 +1,703 @@ + + + + + +StoredSortedValueSet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredSortedValueSet<E>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.lang.Iterable<E>, java.util.Collection<E>, java.util.Set<E>, java.util.SortedSet<E>
    +
    +
    +
    +
    public class StoredSortedValueSet<E>
    +extends StoredValueSet<E>
    +implements java.util.SortedSet<E>
    +
    The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed. + Although this collection is a set it may contain duplicate values. Only if + an entity value binding is used are all elements guaranteed to be unique. + +

    In addition to the standard SortedSet methods, this class provides the + following methods for stored sorted value sets only. Note that the use of + these methods is not compatible with the standard Java collections + interface.

    +
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+
    +
  • + + + + + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods 
      Modifier and TypeMethod and Description
      java.util.Comparator<? super E>comparator() +
      Returns null since comparators are not supported.
      +
      Efirst() +
      Returns the first (lowest) element currently in this sorted set.
      +
      java.util.SortedSet<E>headSet(E toValue) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toValue.
      +
      java.util.SortedSet<E>headSet(E toValue, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue.
      +
      Elast() +
      Returns the last (highest) element currently in this sorted set.
      +
      java.util.SortedSet<E>subSet(E fromValue, + boolean fromInclusive, + E toValue, + boolean toInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue.
      +
      java.util.SortedSet<E>subSet(E fromValue, + E toValue) +
      Returns a view of the portion of this sorted set whose elements range + from fromValue, inclusive, to toValue, exclusive.
      +
      java.util.SortedSet<E>tailSet(E fromValue) +
      Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue.
      +
      java.util.SortedSet<E>tailSet(E fromValue, + boolean fromInclusive) +
      Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue.
      +
      + + + +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.SortedSet

        +spliterator
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Set

        +add, addAll, clear, contains, containsAll, equals, hashCode, isEmpty, iterator, remove, removeAll, retainAll, size, toArray, toArray
      • +
      +
        +
      • + + +

        Methods inherited from interface java.util.Collection

        +parallelStream, removeIf, stream
      • +
      +
        +
      • + + +

        Methods inherited from interface java.lang.Iterable

        +forEach
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredSortedValueSet

        +
        public StoredSortedValueSet(Database database,
        +                            EntityBinding<E> valueEntityBinding,
        +                            boolean writeAllowed)
        +
        Creates a sorted value set entity view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        comparator

        +
        public java.util.Comparator<? super E> comparator()
        +
        Returns null since comparators are not supported. The natural ordering + of a stored collection is data byte order, whether the data classes + implement the Comparable interface or not. + This method does not conform to the SortedSet.comparator() + interface.
        +
        +
        Specified by:
        +
        comparator in interface java.util.SortedSet<E>
        +
        Returns:
        +
        null.
        +
        +
      • +
      + + + +
        +
      • +

        first

        +
        public E first()
        +
        Returns the first (lowest) element currently in this sorted set. + This method conforms to the SortedSet.first() interface.
        +
        +
        Specified by:
        +
        first in interface java.util.SortedSet<E>
        +
        Returns:
        +
        the first element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + +
        +
      • +

        last

        +
        public E last()
        +
        Returns the last (highest) element currently in this sorted set. + This method conforms to the SortedSet.last() interface.
        +
        +
        Specified by:
        +
        last in interface java.util.SortedSet<E>
        +
        Returns:
        +
        the last element. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<E> headSet(E toValue)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toValue. + This method conforms to the SortedSet.headSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        headSet in interface java.util.SortedSet<E>
        +
        Parameters:
        +
        toValue - the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        headSet

        +
        public java.util.SortedSet<E> headSet(E toValue,
        +                                      boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        toValue - is the upper bound.
        +
        toInclusive - is true to include toValue.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<E> tailSet(E fromValue)
        +
        Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue. + This method conforms to the SortedSet.tailSet(E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        tailSet in interface java.util.SortedSet<E>
        +
        Parameters:
        +
        fromValue - is the lower bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        tailSet

        +
        public java.util.SortedSet<E> tailSet(E fromValue,
        +                                      boolean fromInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromValue - is the lower bound.
        +
        fromInclusive - is true to include fromValue.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<E> subSet(E fromValue,
        +                                     E toValue)
        +
        Returns a view of the portion of this sorted set whose elements range + from fromValue, inclusive, to toValue, exclusive. + This method conforms to the SortedSet.subSet(E, E) interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Specified by:
        +
        subSet in interface java.util.SortedSet<E>
        +
        Parameters:
        +
        fromValue - is the lower bound.
        +
        toValue - is the upper bound.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      + + + + + +
        +
      • +

        subSet

        +
        public java.util.SortedSet<E> subSet(E fromValue,
        +                                     boolean fromInclusive,
        +                                     E toValue,
        +                                     boolean toInclusive)
        +
        Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue. + This method does not exist in the standard SortedSet interface. + +

        Note that the return value is a StoredCollection and must be treated + as such; for example, its iterators must be explicitly closed.

        +
        +
        Parameters:
        +
        fromValue - is the lower bound.
        +
        fromInclusive - is true to include fromValue.
        +
        toValue - is the upper bound.
        +
        toInclusive - is true to include toValue.
        +
        Returns:
        +
        the subset.
        +
        Throws:
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/StoredValueSet.html b/docs/java/com/sleepycat/collections/StoredValueSet.html new file mode 100644 index 0000000..628ebda --- /dev/null +++ b/docs/java/com/sleepycat/collections/StoredValueSet.html @@ -0,0 +1,516 @@ + + + + + +StoredValueSet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class StoredValueSet<E>

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.lang.Cloneable, java.lang.Iterable<E>, java.util.Collection<E>, java.util.Set<E>
    +
    +
    +
    Direct Known Subclasses:
    +
    StoredSortedValueSet
    +
    +
    +
    +
    public class StoredValueSet<E>
    +extends StoredCollection<E>
    +implements java.util.Set<E>
    +
    The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed. + Although this collection is a set it may contain duplicate values. Only if + an entity value binding is used are all elements guaranteed to be unique.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        StoredValueSet

        +
        public StoredValueSet(Database database,
        +                      EntryBinding<E> valueBinding,
        +                      boolean writeAllowed)
        +
        Creates a value set view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        valueBinding - is the binding used to translate between value + buffers and value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        StoredValueSet

        +
        public StoredValueSet(Database database,
        +                      EntityBinding<E> valueEntityBinding,
        +                      boolean writeAllowed)
        +
        Creates a value set entity view of a Database.
        +
        +
        Parameters:
        +
        database - is the Database underlying the new collection.
        +
        valueEntityBinding - is the binding used to translate between + key/value buffers and entity value objects.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Throws:
        +
        java.lang.IllegalArgumentException - if formats are not consistently + defined or a parameter is invalid.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + + + +
        +
      • +

        add

        +
        public boolean add(E entity)
        +
        Adds the specified entity to this set if it is not already present + (optional operation). + This method conforms to the Set.add(E) interface.
        +
        +
        Specified by:
        +
        add in interface java.util.Collection<E>
        +
        Specified by:
        +
        add in interface java.util.Set<E>
        +
        Parameters:
        +
        entity - is the entity to be added.
        +
        Returns:
        +
        true if the entity was added, that is the key-value pair + represented by the entity was not previously present in the collection. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only, + if the collection is indexed, or if an entity binding is not used.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        contains

        +
        public boolean contains(java.lang.Object value)
        +
        Returns true if this set contains the specified element. + This method conforms to the Set.contains(java.lang.Object) + interface.
        +
        +
        Specified by:
        +
        contains in interface java.util.Collection<E>
        +
        Specified by:
        +
        contains in interface java.util.Set<E>
        +
        Parameters:
        +
        value - the value to check.
        +
        Returns:
        +
        whether the set contains the given value. + +
        +
        Throws:
        +
        OperationFailureException - if one of the Read Operation + Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      + + + +
        +
      • +

        remove

        +
        public boolean remove(java.lang.Object value)
        +
        Removes the specified value from this set if it is present (optional + operation). + If an entity binding is used, the key-value pair represented by the + given entity is removed. If an entity binding is used, the first + occurrence of a key-value pair with the given value is removed. + This method conforms to the Set.remove(java.lang.Object) interface. + +
        +
        +
        Specified by:
        +
        remove in interface java.util.Collection<E>
        +
        Specified by:
        +
        remove in interface java.util.Set<E>
        +
        Throws:
        +
        OperationFailureException - if one of the Write + Operation Failures occurs.
        +
        EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
        +
        java.lang.UnsupportedOperationException - if the collection is read-only.
        +
        RuntimeExceptionWrapper - if a checked exception is thrown, + including a DatabaseException on BDB (C Edition).
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/TransactionRunner.html b/docs/java/com/sleepycat/collections/TransactionRunner.html new file mode 100644 index 0000000..543519c --- /dev/null +++ b/docs/java/com/sleepycat/collections/TransactionRunner.html @@ -0,0 +1,649 @@ + + + + + +TransactionRunner (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class TransactionRunner

+
+
+ +
+
    +
  • +
    +
    +
    public class TransactionRunner
    +extends java.lang.Object
    +
    Starts a transaction, calls TransactionWorker.doWork(), and handles + transaction retry and exceptions. To perform a transaction, the user + implements the TransactionWorker interface and passes an instance of + that class to the run method. + +

    A single TransactionRunner instance may be used by any number of threads + for any number of transactions.

    + +

    The behavior of the run() method depends on whether the environment is + transactional, whether nested transactions are enabled, and whether a + transaction is already active.

    + +
      +
    • When the run() method is called in a transactional environment and no + transaction is active for the current thread, a new transaction is started + before calling doWork(). If LockConflictException is thrown by doWork(), + the transaction will be aborted and the process will be repeated up to the + maximum number of retries. If another exception is thrown by doWork() or + the maximum number of retries has occurred, the transaction will be aborted + and the exception will be rethrown by the run() method. If no exception is + thrown by doWork(), the transaction will be committed. The run() method + will not attempt to commit or abort a transaction if it has already been + committed or aborted by doWork().
    • + +
    • When the run() method is called and a transaction is active for the + current thread, and nested transactions are enabled, a nested transaction is + started before calling doWork(). The transaction that is active when + calling the run() method will become the parent of the nested transaction. + The nested transaction will be committed or aborted by the run() method + following the same rules described above. Note that nested transactions may + not be enabled for the JE product, since JE does not support nested + transactions.
    • + +
    • When the run() method is called in a non-transactional environment, the + doWork() method is called without starting a transaction. The run() method + will return without committing or aborting a transaction, and any exceptions + thrown by the doWork() method will be thrown by the run() method.
    • + +
    • When the run() method is called and a transaction is active for the + current thread and nested transactions are not enabled (the default) the + same rules as above apply. All the operations performed by the doWork() + method will be part of the currently active transaction.
    • +
    + +

    In a transactional environment, the rules described above support nested + calls to the run() method and guarantee that the outermost call will cause + the transaction to be committed or aborted. This is true whether or not + nested transactions are supported or enabled. Note that nested transactions + are provided as an optimization for improving concurrency but do not change + the meaning of the outermost transaction. Nested transactions are not + currently supported by the JE product.

    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Field Detail

      + + + +
        +
      • +

        DEFAULT_MAX_RETRIES

        +
        public static final int DEFAULT_MAX_RETRIES
        +
        The default maximum number of retries.
        +
        +
        See Also:
        +
        Constant Field Values
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TransactionRunner

        +
        public TransactionRunner(Environment env)
        +
        Creates a transaction runner for a given Berkeley DB environment. + The default maximum number of retries (DEFAULT_MAX_RETRIES) and + a null (default) TransactionConfig will be used.
        +
        +
        Parameters:
        +
        env - is the environment for running transactions.
        +
        +
      • +
      + + + +
        +
      • +

        TransactionRunner

        +
        public TransactionRunner(Environment env,
        +                         int maxRetries,
        +                         TransactionConfig config)
        +
        Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries.
        +
        +
        Parameters:
        +
        env - is the environment for running transactions.
        +
        maxRetries - is the maximum number of retries that will be + performed when deadlocks are detected.
        +
        config - the transaction configuration used for calling + Environment.beginTransaction(com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig), or null to use the default + configuration. The configuration object is not cloned, and + any modifications to it will impact subsequent transactions.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getMaxRetries

        +
        public int getMaxRetries()
        +
        Returns the maximum number of retries that will be performed when + deadlocks are detected.
        +
        +
        Returns:
        +
        the maximum number of retries.
        +
        +
      • +
      + + + +
        +
      • +

        setMaxRetries

        +
        public void setMaxRetries(int maxRetries)
        +
        Changes the maximum number of retries that will be performed when + deadlocks are detected. + Calling this method does not impact transactions already running.
        +
        +
        Parameters:
        +
        maxRetries - the maximum number of retries.
        +
        +
      • +
      + + + +
        +
      • +

        getAllowNestedTransactions

        +
        public boolean getAllowNestedTransactions()
        +
        Returns whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. + By default this property is false.
        +
        +
        Returns:
        +
        whether nested transactions will be created. + +

        Note that this method always returns false in the JE product, since + nested transactions are not supported by JE.

        +
        +
      • +
      + + + +
        +
      • +

        setAllowNestedTransactions

        +
        public void setAllowNestedTransactions(boolean allowNestedTxn)
        +
        Changes whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread. + Calling this method does not impact transactions already running.
        +
        +
        Parameters:
        +
        allowNestedTxn - whether nested transactions will be created. + +

        Note that true may not be passed to this method in the JE product, + since nested transactions are not supported by JE.

        +
        +
      • +
      + + + + + + + + + + + +
        +
      • +

        run

        +
        public void run(TransactionWorker worker)
        +         throws DatabaseException,
        +                java.lang.Exception
        +
        Calls the TransactionWorker.doWork() method and, for transactional + environments, may begin and end a transaction. If the environment given + is non-transactional, a transaction will not be used but the doWork() + method will still be called. See the class description for more + information.
        +
        +
        Parameters:
        +
        worker - the TransactionWorker.
        +
        Throws:
        +
        LockConflictException - when it is thrown by doWork() and the + maximum number of retries has occurred. The transaction will have been + aborted by this method.
        +
        java.lang.Exception - when any other exception is thrown by doWork(). The + exception will first be unwrapped by calling ExceptionUnwrapper.unwrap(java.lang.Exception). The transaction will have been aborted by + this method.
        +
        DatabaseException
        +
        +
      • +
      + + + +
        +
      • +

        handleException

        +
        public int handleException(java.lang.Exception exception,
        +                           int retries,
        +                           int maxRetries)
        +                    throws java.lang.Exception
        +
        Handles exceptions that occur during a transaction, and may implement + transaction retry policy. The transaction is aborted by the run method before calling this method. + +

        The default implementation of this method throws the exception parameter if it is not an instance of LockConflictException and otherwise returns the maxRetries + parameter value. This method can be overridden to throw a different + exception or return a different number of retries. For example:

        +
          +
        • This method could call Thread.sleep for a short interval to + allow other transactions to finish.
        • + +
        • This method could return a different maxRetries value + depending on the exception that occurred.
        • + +
        • This method could throw an application-defined exception when the + retries value is greater or equal to the maxRetries and + a LockConflictException occurs, to override the default behavior + which is to throw the LockConflictException.
        • +
        +
        +
        Parameters:
        +
        exception - an exception that was thrown by the TransactionWorker.doWork() method or thrown when beginning or committing + the transaction. If the retries value is greater or equal to + maxRetries when this method returns normally, this exception + will be thrown by the run method.
        +
        retries - the current value of a counter that starts out at zero + and is incremented when each retry is performed.
        +
        maxRetries - the maximum retries to be performed. By default, + this value is set to getMaxRetries(). This method may return a + different maximum retries value to override that default.
        +
        Returns:
        +
        the maximum number of retries to perform. The + default policy is to return the maxRetries parameter value + if the exception parameter value is an instance of LockConflictException.
        +
        Throws:
        +
        java.lang.Exception - to cause the exception to be thrown by the run method. The default policy is to throw the exception + parameter value if it is not an instance of LockConflictException.
        +
        Since:
        +
        3.4
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/TransactionWorker.html b/docs/java/com/sleepycat/collections/TransactionWorker.html new file mode 100644 index 0000000..dbe776f --- /dev/null +++ b/docs/java/com/sleepycat/collections/TransactionWorker.html @@ -0,0 +1,246 @@ + + + + + +TransactionWorker (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Interface TransactionWorker

+
+
+
+ +
+
+ +
+
+ +
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/TupleSerialFactory.html b/docs/java/com/sleepycat/collections/TupleSerialFactory.html new file mode 100644 index 0000000..a442acb --- /dev/null +++ b/docs/java/com/sleepycat/collections/TupleSerialFactory.html @@ -0,0 +1,423 @@ + + + + + +TupleSerialFactory (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.collections
+

Class TupleSerialFactory

+
+
+ +
+
    +
  • +
    +
    +
    public class TupleSerialFactory
    +extends java.lang.Object
    +
    Creates stored collections having tuple keys and serialized entity values. + The entity classes must be Serializable and must implement the + MarshalledTupleKeyEntity interfaces. The key classes must either implement + the MarshalledTupleEntry interface or be one of the Java primitive type + classes. Underlying binding objects are created automatically.
    +
    +
    Author:
    +
    Mark Hayes
    +
    +
  • +
+
+
+ +
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        TupleSerialFactory

        +
        public TupleSerialFactory(ClassCatalog catalog)
        +
        Creates a tuple-serial factory for given environment and class catalog.
        +
        +
        Parameters:
        +
        catalog - the ClassCatalog.
        +
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getCatalog

        +
        public final ClassCatalog getCatalog()
        +
        Returns the class catalog associated with this factory.
        +
        +
        Returns:
        +
        the catalog.
        +
        +
      • +
      + + + +
        +
      • +

        newMap

        +
        public <K,V extends MarshalledTupleKeyEntityStoredMap<K,V> newMap(Database db,
        +                                                                    java.lang.Class<K> keyClass,
        +                                                                    java.lang.Class<V> valueBaseClass,
        +                                                                    boolean writeAllowed)
        +
        Creates a map from a previously opened Database object.
        +
        +
        Type Parameters:
        +
        K - the key class.
        +
        V - the value base class.
        +
        Parameters:
        +
        db - the previously opened Database object.
        +
        keyClass - is the class used for map keys. It must implement the + MarshalledTupleEntry interface or be one of the Java primitive + type classes.
        +
        valueBaseClass - the base class of the entity values for this + store. It must implement the MarshalledTupleKeyEntity + interface.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Returns:
        +
        the map.
        +
        +
      • +
      + + + +
        +
      • +

        newSortedMap

        +
        public <K,V extends MarshalledTupleKeyEntityStoredSortedMap<K,V> newSortedMap(Database db,
        +                                                                                java.lang.Class<K> keyClass,
        +                                                                                java.lang.Class<V> valueBaseClass,
        +                                                                                boolean writeAllowed)
        +
        Creates a sorted map from a previously opened Database object.
        +
        +
        Type Parameters:
        +
        K - the key class.
        +
        V - the value base class.
        +
        Parameters:
        +
        db - the previously opened Database object.
        +
        keyClass - is the class used for map keys. It must implement the + MarshalledTupleEntry interface or be one of the Java primitive + type classes.
        +
        valueBaseClass - the base class of the entity values for this + store. It must implement the MarshalledTupleKeyEntity + interface.
        +
        writeAllowed - is true to create a read-write collection or false + to create a read-only collection.
        +
        Returns:
        +
        the sorted map.
        +
        +
      • +
      + + + + + + + + +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html b/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html new file mode 100644 index 0000000..4e54cfc --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/CurrentTransaction.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.collections.CurrentTransaction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.CurrentTransaction

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html b/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html new file mode 100644 index 0000000..2859174 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/MapEntryParameter.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.MapEntryParameter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.MapEntryParameter

+
+
No usage of com.sleepycat.collections.MapEntryParameter
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html b/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html new file mode 100644 index 0000000..46e09e3 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/PrimaryKeyAssigner.html @@ -0,0 +1,198 @@ + + + + + +Uses of Interface com.sleepycat.collections.PrimaryKeyAssigner (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.collections.PrimaryKeyAssigner

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredCollection.html b/docs/java/com/sleepycat/collections/class-use/StoredCollection.html new file mode 100644 index 0000000..db322a6 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredCollection.html @@ -0,0 +1,222 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredCollection (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredCollection

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredCollections.html b/docs/java/com/sleepycat/collections/class-use/StoredCollections.html new file mode 100644 index 0000000..ef77770 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredCollections.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredCollections (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredCollections

+
+
No usage of com.sleepycat.collections.StoredCollections
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredContainer.html b/docs/java/com/sleepycat/collections/class-use/StoredContainer.html new file mode 100644 index 0000000..82c2eaa --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredContainer.html @@ -0,0 +1,243 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredContainer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredContainer

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html b/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html new file mode 100644 index 0000000..706f19d --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredEntrySet.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredEntrySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredEntrySet

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredIterator.html b/docs/java/com/sleepycat/collections/class-use/StoredIterator.html new file mode 100644 index 0000000..a245bb3 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredIterator.html @@ -0,0 +1,199 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredIterator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredIterator

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html b/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html new file mode 100644 index 0000000..770fa52 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredKeySet.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredKeySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredKeySet

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredMap.html b/docs/java/com/sleepycat/collections/class-use/StoredMap.html new file mode 100644 index 0000000..2ab212e --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredMap.html @@ -0,0 +1,191 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredMap (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredMap

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html b/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html new file mode 100644 index 0000000..3a47532 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredSortedEntrySet.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredSortedEntrySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredSortedEntrySet

+
+
No usage of com.sleepycat.collections.StoredSortedEntrySet
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html b/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html new file mode 100644 index 0000000..f83d228 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredSortedKeySet.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredSortedKeySet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredSortedKeySet

+
+
No usage of com.sleepycat.collections.StoredSortedKeySet
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html b/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html new file mode 100644 index 0000000..befcf9e --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredSortedMap.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredSortedMap (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredSortedMap

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html b/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html new file mode 100644 index 0000000..8cfb4bf --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredSortedValueSet.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredSortedValueSet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredSortedValueSet

+
+
No usage of com.sleepycat.collections.StoredSortedValueSet
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html b/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html new file mode 100644 index 0000000..b804221 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/StoredValueSet.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.collections.StoredValueSet (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.StoredValueSet

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html b/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html new file mode 100644 index 0000000..8b2afe8 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/TransactionRunner.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.TransactionRunner (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.TransactionRunner

+
+
No usage of com.sleepycat.collections.TransactionRunner
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html b/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html new file mode 100644 index 0000000..0316984 --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/TransactionWorker.html @@ -0,0 +1,174 @@ + + + + + +Uses of Interface com.sleepycat.collections.TransactionWorker (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Interface
com.sleepycat.collections.TransactionWorker

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html b/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html new file mode 100644 index 0000000..92f290a --- /dev/null +++ b/docs/java/com/sleepycat/collections/class-use/TupleSerialFactory.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.collections.TupleSerialFactory (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Class
com.sleepycat.collections.TupleSerialFactory

+
+
No usage of com.sleepycat.collections.TupleSerialFactory
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/package-frame.html b/docs/java/com/sleepycat/collections/package-frame.html new file mode 100644 index 0000000..4c49cfc --- /dev/null +++ b/docs/java/com/sleepycat/collections/package-frame.html @@ -0,0 +1,40 @@ + + + + + +com.sleepycat.collections (Oracle - Berkeley DB Java Edition API) + + + + + +

com.sleepycat.collections

+
+

Interfaces

+ +

Classes

+ +
+ + diff --git a/docs/java/com/sleepycat/collections/package-summary.html b/docs/java/com/sleepycat/collections/package-summary.html new file mode 100644 index 0000000..8f8c3e8 --- /dev/null +++ b/docs/java/com/sleepycat/collections/package-summary.html @@ -0,0 +1,282 @@ + + + + + +com.sleepycat.collections (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Package com.sleepycat.collections

+
+
Data access based on the standard Java collections API.
+
+

See: Description

+
+
+ + + + +

Package com.sleepycat.collections Description

+
Data access based on the standard Java collections API. + +Examples can be found in je/examples/collections. Build and run directions are +in the installation notes. +
+
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/package-tree.html b/docs/java/com/sleepycat/collections/package-tree.html new file mode 100644 index 0000000..d4d899d --- /dev/null +++ b/docs/java/com/sleepycat/collections/package-tree.html @@ -0,0 +1,180 @@ + + + + + +com.sleepycat.collections Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Hierarchy For Package com.sleepycat.collections

+Package Hierarchies: + +
+
+

Class Hierarchy

+ +

Interface Hierarchy

+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/collections/package-use.html b/docs/java/com/sleepycat/collections/package-use.html new file mode 100644 index 0000000..912ae60 --- /dev/null +++ b/docs/java/com/sleepycat/collections/package-use.html @@ -0,0 +1,219 @@ + + + + + +Uses of Package com.sleepycat.collections (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +
+

Uses of Package
com.sleepycat.collections

+
+
+ +
+ +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/je/BinaryEqualityComparator.html b/docs/java/com/sleepycat/je/BinaryEqualityComparator.html new file mode 100644 index 0000000..06a096a --- /dev/null +++ b/docs/java/com/sleepycat/je/BinaryEqualityComparator.html @@ -0,0 +1,225 @@ + + + + + +BinaryEqualityComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.je
+

Interface BinaryEqualityComparator

+
+
+
+
    +
  • +
    +
    +
    public interface BinaryEqualityComparator
    +
    A tag interface used to mark a BTree or duplicate comparator class as a + binary equality comparator, that is, a comparator that considers + two keys (byte arrays) to be equal if and only if they have the same + length and they are equal byte-per-byte. +

    + If both the BTree and duplicate comparators used by a databse are + binary-equality comparators, then certain internal optimizations can be + enabled. Specifically, the "BIN-delta blind-puts" optimization described + below is made possible. +

    + We say that a record operation (insertion, update, or deletion) is performed + blindly in a BIN-delta when the delta does not contain a slot with the + operation's key and we don't need to access the full BIN to check whether + such a slot exists there or to extract any information from the full-BIN + slot, if it exists. Performing a blind operation involves inserting the + record in the BIN-delta, and in case of deletion, marking the BIN slot as + deleted. When the delta and the full BIN are merged at a later time, the + blind operation will be translated to an insertion, update, or delete + depending on whether the full BIN contained the record or not. +

    + Normally, blind puts are not possible: we need to know whether the put + is actually an update or an insertion, i.e., whether the key exists in + the full BIN or not. Furthermore, in case of update we also need to + know the location of the previous record version to make the current + update abortable. However, it is possible to answer at least the key + existence question by adding a small amount of extra information in + the deltas. If we do so, puts that are actual insertions can be done + blindly. +

    + To answer whether a key exists in a full BIN or not, each BIN-delta + stores a bloom filter, which is a very compact, approximate + representation of the set of keys in the full BIN. Bloom filters can + answer set membership questions with no false negatives and very low + probability of false positives. As a result, put operation that are + actual insertions can almost always be performed blindly. +

    + Because bloom filters work by applying hash functions on keys (where each + key byte participates in the hash computation), an additional requirement + for blind puts is that a database uses "binary equality" comparators, that + is, a comparator that considers two keys to be equal if and only if they + have the same length and they are equal byte-per-byte. Inheriting from the + BinaryEqualityComparator interface marks an actual comparator as having the + "binary equality" property. +

    + Comparators are configured using + DatabaseConfig.setBtreeComparator(java.util.Comparator) or + DatabaseConfig.setBtreeComparator(Class), and + DatabaseConfig.setDuplicateComparator(java.util.Comparator) or + DatabaseConfig.setDuplicateComparator(Class). +

    + As described in the javadoc for these methods, comparators must be used + with great caution, since a badly behaved comparator can cause B-tree + corruption.

    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/je/BtreeStats.html b/docs/java/com/sleepycat/je/BtreeStats.html new file mode 100644 index 0000000..85671e4 --- /dev/null +++ b/docs/java/com/sleepycat/je/BtreeStats.html @@ -0,0 +1,668 @@ + + + + + +BtreeStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.je
+

Class BtreeStats

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Serializable
    +
    +
    +
    +
    public class BtreeStats
    +extends DatabaseStats
    +
    The BtreeStats object is used to return Btree database statistics.
    +
    +
    See Also:
    +
    Serialized Form
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Summary

      + + + + + + + + +
      Constructors 
      Constructor and Description
      BtreeStats() 
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      All Methods Instance Methods Concrete Methods Deprecated Methods 
      Modifier and TypeMethod and Description
      long[]getBINEntriesHistogram() +
      Returns an array representing a histogram of the number of Bottom + Internal Nodes with various percentages of non-deleted entry counts.
      +
      long[]getBINsByLevel() +
      Returns the count of Bottom Internal Nodes per level, indexed by level.
      +
      longgetBottomInternalNodeCount() +
      Returns the number of Bottom Internal Nodes in the database tree.
      +
      long[]getDBINsByLevel() +
      Deprecated.  +
      as of 5.0, returns an empty array.
      +
      +
      longgetDeletedLeafNodeCount() +
      Returns the number of deleted data records in the database tree that + are pending removal by the compressor.
      +
      long[]getDINsByLevel() +
      Deprecated.  +
      as of 5.0, returns an empty array.
      +
      +
      longgetDupCountLeafNodeCount() +
      Deprecated.  +
      as of 5.0, returns zero.
      +
      +
      longgetDuplicateBottomInternalNodeCount() +
      Deprecated.  +
      as of 5.0, returns zero.
      +
      +
      longgetDuplicateInternalNodeCount() +
      Deprecated.  +
      as of 5.0, returns zero.
      +
      +
      intgetDuplicateTreeMaxDepth() +
      Deprecated.  +
      as of 5.0, returns zero.
      +
      +
      long[]getINsByLevel() +
      Returns the count of Internal Nodes per level, indexed by level.
      +
      longgetInternalNodeCount() +
      Returns the number of Internal Nodes in the database tree.
      +
      longgetLeafNodeCount() +
      Returns the number of leaf nodes in the database tree, which can equal + the number of records.
      +
      intgetMainTreeMaxDepth() +
      Returns the maximum depth of the main database tree.
      +
      longgetRelatches() +
      Returns the number of latch upgrades (relatches) required while + operating on this database's BTree.
      +
      intgetRootSplits() +
      The number of times the root of the BTree was split.
      +
      java.lang.StringtoString() +
      For convenience, the BtreeStats class has a toString method that lists + all the data fields.
      +
      java.lang.StringtoStringVerbose() 
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        BtreeStats

        +
        public BtreeStats()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        getBottomInternalNodeCount

        +
        public long getBottomInternalNodeCount()
        +
        Returns the number of Bottom Internal Nodes in the database tree. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        number of Bottom Internal Nodes in the database tree.
        +
        +
      • +
      + + + +
        +
      • +

        getDuplicateBottomInternalNodeCount

        +
        public long getDuplicateBottomInternalNodeCount()
        +
        Deprecated. as of 5.0, returns zero.
        +
      • +
      + + + +
        +
      • +

        getDeletedLeafNodeCount

        +
        public long getDeletedLeafNodeCount()
        +
        Returns the number of deleted data records in the database tree that + are pending removal by the compressor. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        number of deleted data records in the database tree that are + pending removal by the compressor.
        +
        +
      • +
      + + + +
        +
      • +

        getDupCountLeafNodeCount

        +
        public long getDupCountLeafNodeCount()
        +
        Deprecated. as of 5.0, returns zero.
        +
      • +
      + + + +
        +
      • +

        getInternalNodeCount

        +
        public long getInternalNodeCount()
        +
        Returns the number of Internal Nodes in the database tree. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        number of Internal Nodes in the database tree.
        +
        +
      • +
      + + + +
        +
      • +

        getDuplicateInternalNodeCount

        +
        public long getDuplicateInternalNodeCount()
        +
        Deprecated. as of 5.0, returns zero.
        +
      • +
      + + + +
        +
      • +

        getLeafNodeCount

        +
        public long getLeafNodeCount()
        +
        Returns the number of leaf nodes in the database tree, which can equal + the number of records. This is calculated without locks or transactions, + and therefore is only an accurate count of the current number of records + when the database is quiescent. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        number of leaf nodes in the database tree, which can equal the + number of records. This is calculated without locks or transactions, and + therefore is only an accurate count of the current number of records + when the database is quiescent.
        +
        +
      • +
      + + + +
        +
      • +

        getMainTreeMaxDepth

        +
        public int getMainTreeMaxDepth()
        +
        Returns the maximum depth of the main database tree. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        maximum depth of the main database tree.
        +
        +
      • +
      + + + +
        +
      • +

        getDuplicateTreeMaxDepth

        +
        public int getDuplicateTreeMaxDepth()
        +
        Deprecated. as of 5.0, returns zero.
        +
      • +
      + + + +
        +
      • +

        getINsByLevel

        +
        public long[] getINsByLevel()
        +
        Returns the count of Internal Nodes per level, indexed by level. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        count of Internal Nodes per level, indexed by level.
        +
        +
      • +
      + + + +
        +
      • +

        getBINsByLevel

        +
        public long[] getBINsByLevel()
        +
        Returns the count of Bottom Internal Nodes per level, indexed by level. + +

        The information is included only if the Database.getStats call was not + configured by the StatsConfig.setFast method.

        +
        +
        Returns:
        +
        count of Bottom Internal Nodes per level, indexed by level.
        +
        +
      • +
      + + + +
        +
      • +

        getBINEntriesHistogram

        +
        public long[] getBINEntriesHistogram()
        +
        Returns an array representing a histogram of the number of Bottom + Internal Nodes with various percentages of non-deleted entry counts. + The array is 10 elements and each element represents a range of 10%. + +
        + element [0]: # BINs with 0% to 9% entries used by non-deleted values
        + element [1]: # BINs with 10% to 19% entries used by non-deleted values
        + element [2]: # BINs with 20% to 29% entries used by non-deleted values
        + ...
        + element [0]: # BINs with 90% to 100% entries used by non-deleted values
        + 
        +
        +
        Returns:
        +
        an array representing a histogram of the number of BINs with + various percentages of non-deleted entries.
        +
        +
      • +
      + + + +
        +
      • +

        getDINsByLevel

        +
        public long[] getDINsByLevel()
        +
        Deprecated. as of 5.0, returns an empty array.
        +
      • +
      + + + +
        +
      • +

        getDBINsByLevel

        +
        public long[] getDBINsByLevel()
        +
        Deprecated. as of 5.0, returns an empty array.
        +
      • +
      + + + +
        +
      • +

        getRelatches

        +
        public long getRelatches()
        +
        Returns the number of latch upgrades (relatches) required while + operating on this database's BTree. Latch upgrades are required when an + operation assumes that a shared (read) latch will be sufficient but + later determines that an exclusive (write) latch will actually be + required.
        +
        +
        Returns:
        +
        number of latch upgrades (relatches) required.
        +
        +
      • +
      + + + +
        +
      • +

        getRootSplits

        +
        public int getRootSplits()
        +
        The number of times the root of the BTree was split.
        +
        +
        Returns:
        +
        number of times the root was split.
        +
        +
      • +
      + + + +
        +
      • +

        toString

        +
        public java.lang.String toString()
        +
        For convenience, the BtreeStats class has a toString method that lists + all the data fields.
        +
        +
        Overrides:
        +
        toString in class java.lang.Object
        +
        +
      • +
      + + + +
        +
      • +

        toStringVerbose

        +
        public java.lang.String toStringVerbose()
        +
      • +
      +
    • +
    +
  • +
+
+
+ + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + +

Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

+ + diff --git a/docs/java/com/sleepycat/je/CacheMode.html b/docs/java/com/sleepycat/je/CacheMode.html new file mode 100644 index 0000000..60bc902 --- /dev/null +++ b/docs/java/com/sleepycat/je/CacheMode.html @@ -0,0 +1,723 @@ + + + + + +CacheMode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
+ + + + + + + +
Berkeley DB Java Edition
version 7.5.11 +
+
+ + + +
+
com.sleepycat.je
+

Enum CacheMode

+
+
+ +
+
    +
  • +
    +
    All Implemented Interfaces:
    +
    java.io.Serializable, java.lang.Comparable<CacheMode>
    +
    +
    +
    +
    public enum CacheMode
    +extends java.lang.Enum<CacheMode>
    +
    Modes that can be specified for control over caching of records in the JE + in-memory cache. When a record is stored or retrieved, the cache mode + determines how long the record is subsequently retained in the JE in-memory + cache, relative to other records in the cache. + +

    When the cache overflows, JE must evict some records from the cache. By + default, JE uses a Least Recently Used (LRU) algorithm for determining which + records to evict. With the LRU algorithm, JE makes a best effort to evict + the "coldest" (least recently used or accessed) records and to retain the + "hottest" records in the cache for as long as possible.

    + +

    When an off-heap + cache is configured, records evicted from the main cache are placed in + the off-heap cache, and a separate LRU is used to determine when to evict + a record from the off-heap cache.

    + +

    JE uses an approximate LRU approach with some exceptions and special + cases.

    +
      +
    • + Individual records (LNs or Leaf Nodes) do not appear on the LRU + list, i.e., their "hotness" is not explicitly tracked. Instead, + their containing Btree node (BIN or bottom internal node) appears on + the LRU list. Each BIN contains roughly 100 LNs + (see EnvironmentConfig.NODE_MAX_ENTRIES). + When an LN is accessed, its BIN is moved to the hot end of the LRU + list, implying that all other LNs in the same BIN also are treated + as if they are hot. The same applies if the BIN is moved to the cold + end of the LRU list. The above statement applies to the off-heap + cache also, when one is configured. +
    • +
    • + When a BIN contains LNs and the BIN reaches the cold end of the LRU + list, memory can be reclaimed by evicting the LNs, and eviction of + the BIN is deferred. The empty BIN is moved to the hot end of the + LRU list. When an off-heap cache is configured, the eviction of LNs + in this manner occurs independently in both caches. +
    • +
    • + When a BIN contains no LNs, it may be evicted entirely. When the + BINs parent node becomes empty, it may also be evicted, and so on. + The BINs and INs are evicted on the basis of an LRU, but with two + exceptions: +

      + 1) Dirty BINs and INs are evicted only after eviction of all + non-dirty BINs and INs. This is important to reduce logging and + associated cleaning costs. When an off-heap cache is configured, + BINs and INs are evicted from the main cache without regard to + whether they are dirty. Dirty BINs and INs are evicted last, as + just described, only from the off-heap cache. +

      + 2) A BIN may be mutated to a BIN-delta to reclaim memory, rather + then being evicted entirely. A BIN-delta contains only the dirty + entries (for LNs recently logged). A BIN-delta is used when its + size relative to the full BIN will be small enough so that it will + be more efficient, both on disk and in memory, to store the delta + rather than the full BIN. + (see EnvironmentConfig.TREE_BIN_DELTA). + The advantage of keeping a BIN-delta in cache is that some + operations, particularly record insertions, can be performed using + the delta without having the complete BIN in cache. When a BIN is + mutated to a BIN-delta to reclaim memory, it is placed at the hot + end of the LRU list. When an off-heap cache is configured, BINs are + not mutated to BIN-deltas in the main cache, but rather this is done + only in the off-heap cache. +

    • +
    • + To reduce contention among threads on the LRU list, multiple LRU + lists may be configured. See + EnvironmentConfig.EVICTOR_N_LRU_LISTS. + As described in the javadoc for this parameter, there is a trade-off + between thread contention and the accuracy of the LRU. This + parameter determines the number of main cache LRU lists as well as + the number of off-heap cache LRU lists, when an off-heap cache is + configured. +
    • +
    • + A non-default cache mode may be explicitly specified to override + the LRU behavior. See the CacheMode enumeration values for details. + the normal LRU behavior described above. See the CacheMode + enumeration values for details. The behavior of each CacheMode + when an off-heap cache is configured is also described. +
    • +
    + +

    When no cache mode is explicitly specified, the default cache mode is + DEFAULT. The default mode causes the normal LRU algorithm to be + used.

    + +

    An explicit cache mode may be specified as an Environment property, a Database property, a Cursor property, or on a per-operation basis using + ReadOptions.setCacheMode(CacheMode) or WriteOptions.setCacheMode(CacheMode). If none are specified, DEFAULT is used. If more than one non-null property is specified, the + Cursor property overrides the Database and Environment properties, and the + Database property overrides the Environment property.

    + +

    When all records in a given Database, or all Databases, should be treated + the same with respect to caching, using the Database and/or Environment + cache mode properties is sufficient. For applications that need finer + grained control, the Cursor cache mode property can be used to provide a + specific cache mode for individual records or operations. The Cursor cache + mode property can be changed at any time, and the cache mode specified will + apply to subsequent operations performed with that Cursor.

    + +

    In a Replicated Environment where a non-default cache mode is desired, + the cache mode can be configured on the Master node as described above. + However, it is important to configure the cache mode on the Replica nodes + using an Environment property. That way, the cache mode will apply to + write operations that are replayed on the Replica for all + Databases, even if the Databases are not open by the application on the + Replica. Since all nodes may be Replicas at some point in their life cycle, + it is recommended to configure the desired cache mode as an Environment + property on all nodes in a Replicated Environment.

    + +

    On a Replica, per-Database control over the cache mode for write + operations is possible by opening the Database on the Replica and + configuring the cache mode. Per-Cursor control (meaning per-record or + per-operation) control of the cache mode is not possible on a Replica for + write operations. For read operations, both per-Database + and per-Cursor control is possible on the Replica, as described above.

    +

    + The cache related stats in EnvironmentStats can provide some measure + of the effectiveness of the cache mode choice.

    +
    +
    See Also:
    +
    Cache Statistics: + Sizing
    +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Enum Constant Summary

      + + + + + + + + + + + + + + + + + + + + + + + +
      Enum Constants 
      Enum Constant and Description
      DEFAULT +
      The record's hotness is changed to "most recently used" by the + operation.
      +
      EVICT_BIN +
      The record's BIN (and its LNs) are evicted after the operation.
      +
      EVICT_LN +
      The record's LN is evicted after the operation, and the containing + BIN is moved to the hot end of the LRU list.
      +
      KEEP_HOT +
      Deprecated.  +
      please use DEFAULT instead. As of JE 4.0, this mode + functions exactly as if DEFAULT were specified.
      +
      +
      MAKE_COLD +
      Deprecated.  +
      please use UNCHANGED instead. As of JE 4.0, this + mode functions exactly as if UNCHANGED were specified.
      +
      +
      UNCHANGED +
      The record's hotness or coldness is unchanged by the operation where + this cache mode is specified.
      +
      +
    • +
    + +
      +
    • + + +

      Method Summary

      + + + + + + + + + + + + + + +
      All Methods Static Methods Concrete Methods 
      Modifier and TypeMethod and Description
      static CacheModevalueOf(java.lang.String name) +
      Returns the enum constant of this type with the specified name.
      +
      static CacheMode[]values() +
      Returns an array containing the constants of this enum type, in +the order they are declared.
      +
      +
        +
      • + + +

        Methods inherited from class java.lang.Enum

        +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
      • +
      +
        +
      • + + +

        Methods inherited from class java.lang.Object

        +getClass, notify, notifyAll, wait, wait, wait
      • +
      +
    • +
    +
  • +
+
+
+
    +
  • + +
      +
    • + + +

      Enum Constant Detail

      + + + +
        +
      • +

        DEFAULT

        +
        public static final CacheMode DEFAULT
        +
        The record's hotness is changed to "most recently used" by the + operation. + +

        This cache mode is used when the application does not need explicit + control over the cache and a standard LRU approach is sufficient.

        + +

        Note that null may be specified to use the DEFAULT + mode.

        + +

        Specifically: +

          +
        • The BIN containing the record's LN will remain in the main + cache, and it is moved to the hot end of its LRU list.
        • + +
        • When an off-heap cache is configured, the record's LN and BIN + will be loaded into the main cache only. They will be removed from + the off-heap cache, if they were present there. However, if other + LNs belonging to this BIN were present in the off-heap cache, they + will remain there.
        • +
          +
        • +
        + + + +
          +
        • +

          KEEP_HOT

          +
          public static final CacheMode KEEP_HOT
          +
          Deprecated. please use DEFAULT instead. As of JE 4.0, this mode + functions exactly as if DEFAULT were specified.
          +
        • +
        + + + +
          +
        • +

          UNCHANGED

          +
          public static final CacheMode UNCHANGED
          +
          The record's hotness or coldness is unchanged by the operation where + this cache mode is specified. + +

          This cache mode is normally used when the application prefers that + the operation should not perturb the cache, for example, when scanning + over all records in a database.

          + +

          Specifically: +

            +
          • A record's LN and BIN must be loaded into the main cache in + order to perform the operation. However, they may be removed from + the main cache after the operation, to avoid a net change to the + cache, according to the rules below.
          • + +
          • If the record's LN was not present in the main cache prior to + the operation, then the LN will be evicted from the main cache + after the operation. The LN will not be added to, or removed from, + the off-heap cache.
          • + +
          • When the LN is to be evicted from the main cache (according to + the above rules) and the operation is not performed via a cursor, + the LN is evicted when the operation is complete. When a cursor is + used, the LN is evicted when the cursor is moved to a different + record or closed.
          • + +
          • If the record's BIN was not present in the main cache prior to + the operation, the action taken depends on whether the BIN is dirty + and whether an off-heap cache is configured. +
              +
            • When the BIN is not dirty, the BIN (and LN) will be evicted + from the main cache after the operation. The BIN (and LN) will + not be added to, or removed from, the off-heap cache.
            • + +
            • When the BIN is dirty and an off-heap cache is + not configured, the BIN will not be evicted from the + main cache and will be moved to the hot end of its main cache + LRU list. This is done to reduce logging.
            • + +
            • When the BIN is dirty and an off-heap cache is + configured, we evict the BIN from the main cache even when it + is dirty because the BIN (and LN) will be stored in the off-heap + cache and the BIN will not be logged. The BIN will be placed at + the hot end of its off-heap LRU list.
            • + +
            • Note that when this operation loaded the BIN and the BIN + becomes dirty, it is normally because this operation is a write + operation. However, other concurrent threads can also dirty the + BIN.
            • +
            + +
          • When the BIN is to be evicted from the main cache (according + to the above rules) and the operation is not performed via a + cursor, the BIN is evicted when the operation is complete. When a + cursor is used, the BIN is evicted only when the cursor moves to a + different BIN or is closed. Because of the way BINs are evicted, + when multiple operations are performed using a single cursor and + not perturbing the cache is desired, it is important to use this + cache mode for all of the operations.
          • + +
          • When the BIN was present in the main cache prior to the + operation, its position in the LRU list will not be changed. Its + position in the off-heap LRU list, if it is present in the off-heap + cache, will also not be changed.
          • +
          +
        • +
        + + + +
          +
        • +

          MAKE_COLD

          +
          public static final CacheMode MAKE_COLD
          +
          Deprecated. please use UNCHANGED instead. As of JE 4.0, this + mode functions exactly as if UNCHANGED were specified.
          +
        • +
        + + + +
          +
        • +

          EVICT_LN

          +
          public static final CacheMode EVICT_LN
          +
          The record's LN is evicted after the operation, and the containing + BIN is moved to the hot end of the LRU list. + +

          This cache mode is normally used when not all LNs will fit into the + main cache, and the application prefers to read the LN from the log file + or load it from the off-heap cache when the record is accessed again, + rather than have it take up space in the main cache and potentially + cause expensive Java GC. By using this mode, the file system cache or + off-heap cache can be relied on for holding LNs, which complements the + use of the JE cache to hold BINs and INs.

          + +

          Note that using this mode for all operations will prevent the cache + from filling, if all internal nodes fit in cache.

          + +

          Specifically: +

            +
          • The record's LN will be evicted from the main cache after the + operation. The LN will be added to the off-heap cache, if it is not + already present and an off-heap cache is configured.
          • + +
          • When the operation is not performed via a cursor, the LN is + evicted when the operation is complete. When a cursor is used, the + LN is evicted when the cursor is moved to a different record or + closed.
          • +
          +
          +
          Since:
          +
          3.3.98
          +
          +
        • +
        + + + +
          +
        • +

          EVICT_BIN

          +
          public static final CacheMode EVICT_BIN
          +
          The record's BIN (and its LNs) are evicted after the operation. + +

          This cache mode is normally used when not all BINs will fit into the + main cache, and the application prefers to read the LN and BIN from the + log file or load it from the off-heap cache when the record is accessed + again, rather than have them take up space in the JE cache and + potentially cause expensive Java GC.

          + +

          Because this mode evicts all LNs in the BIN, even if they are "hot" + from the perspective of a different accessor, this mode should be used + with caution. One valid use case is where all accessors use this mode; + in this case the cache mode might be set on a per-Database or + per-Environment basis.

          + +

          Note that using this mode for all operations will prevent the cache + from filling, if all upper internal nodes fit in cache.

          + +

          Specifically: +

            +
          • The record's LN will be evicted from the main cache after the + operation. The LN will be added to the off-heap cache, if it is not + already present and an off-heap cache is configured.
          • + +
          • When the operation is not performed via a cursor, the LN is + evicted when the operation is complete. When a cursor is used, the + LN is evicted when the cursor is moved to a different record or + closed.
          • + +
          • Whether the BIN is evicted depends on whether the BIN is dirty + and whether an off-heap cache is configured. +
              +
            • When the BIN is not dirty, the BIN (and LN) will be evicted + from the main cache after the operation. The BIN (and LN) will + be added to the off-heap cache, if they are not already present + and an off-heap cache is configured. The BIN will be placed at + the hot end of its off-heap LRU list.
            • + +
            • When the BIN is dirty and an off-heap cache is + not configured, the BIN will not be evicted from the + main cache and will be moved to the hot end of its main cache + LRU list. This is done to reduce logging.
            • + +
            • When the BIN is dirty and an off-heap cache is + configured, we evict the BIN from the main cache even when it + is dirty because the BIN (and LN) will be stored in the off-heap + cache and the BIN will not be logged. The BIN will be placed at + the hot end of its off-heap LRU list.
            • + +
            • Note that BIN may have been dirtied by this operation, if + it is a write operation, or by earlier write operations.
            • +
            + +
          • When the BIN is to be evicted from the main cache (according + to the above rules) and the operation is not performed via a + cursor, the BIN is evicted when the operation is complete. When a + cursor is used, the BIN is evicted only when the cursor moves to a + different BIN or is closed. Because of the way BINs are evicted, + when multiple operations are performed using a single cursor and + not perturbing the cache is desired, it is important to use this + cache mode for all of the operations.
          • +
          +
          +
          Since:
          +
          4.0.97
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static CacheMode[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (CacheMode c : CacheMode.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static CacheMode valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/CheckpointConfig.html b/docs/java/com/sleepycat/je/CheckpointConfig.html new file mode 100644 index 0000000..2ea633e --- /dev/null +++ b/docs/java/com/sleepycat/je/CheckpointConfig.html @@ -0,0 +1,541 @@ + + + + + +CheckpointConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class CheckpointConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class CheckpointConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a checkpoint operation invoked from Environment.checkpoint.
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CheckpointConfig() +
        An instance created using the default constructor is initialized with + the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        CheckpointConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetForce() +
        Returns the configuration of the checkpoint force option.
        +
        intgetKBytes() +
        Returns the checkpoint log data threshold, in kilobytes.
        +
        booleangetMinimizeRecoveryTime() +
        Returns the configuration of the minimize recovery time option.
        +
        intgetMinutes() +
        Returns the checkpoint time threshold, in minutes.
        +
        CheckpointConfigsetForce(boolean force) +
        Configures the checkpoint force option.
        +
        CheckpointConfigsetKBytes(int kBytes) +
        Configures the checkpoint log data threshold, in kilobytes.
        +
        CheckpointConfigsetMinimizeRecoveryTime(boolean minimizeRecoveryTime) +
        Configures the minimize recovery time option.
        +
        CheckpointConfigsetMinutes(int minutes) +
        Configures the checkpoint time threshold, in minutes.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CheckpointConfig

          +
          public CheckpointConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setKBytes

          +
          public CheckpointConfig setKBytes(int kBytes)
          +
          Configures the checkpoint log data threshold, in kilobytes. + +

          The default is 0 for this class and the database environment.

          +
          +
          Parameters:
          +
          kBytes - If the kBytes parameter is non-zero, a checkpoint will + be performed if more than kBytes of log data have been written since + the last checkpoint.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getKBytes

          +
          public int getKBytes()
          +
          Returns the checkpoint log data threshold, in kilobytes. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The checkpoint log data threshold, in kilobytes.
          +
          +
        • +
        + + + +
          +
        • +

          setMinutes

          +
          public CheckpointConfig setMinutes(int minutes)
          +
          Configures the checkpoint time threshold, in minutes. + +

          The default is 0 for this class and the database environment.

          +
          +
          Parameters:
          +
          minutes - If the minutes parameter is non-zero, a checkpoint is + performed if more than min minutes have passed since the last + checkpoint.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getMinutes

          +
          public int getMinutes()
          +
          Returns the checkpoint time threshold, in minutes.
          +
          +
          Returns:
          +
          The checkpoint time threshold, in minutes.
          +
          +
        • +
        + + + +
          +
        • +

          setForce

          +
          public CheckpointConfig setForce(boolean force)
          +
          Configures the checkpoint force option. + +

          The default is false for this class and the BDB JE environment.

          +
          +
          Parameters:
          +
          force - If set to true, force a checkpoint, even if there has + been no activity since the last checkpoint.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getForce

          +
          public boolean getForce()
          +
          Returns the configuration of the checkpoint force option.
          +
          +
          Returns:
          +
          The configuration of the checkpoint force option.
          +
          +
        • +
        + + + +
          +
        • +

          setMinimizeRecoveryTime

          +
          public CheckpointConfig setMinimizeRecoveryTime(boolean minimizeRecoveryTime)
          +
          Configures the minimize recovery time option. + +

          The default is false for this class and the BDB JE environment.

          +
          +
          Parameters:
          +
          minimizeRecoveryTime - If set to true, the checkpoint will itself + take longer but will cause a subsequent recovery (Environment.open) to + finish more quickly.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getMinimizeRecoveryTime

          +
          public boolean getMinimizeRecoveryTime()
          +
          Returns the configuration of the minimize recovery time option.
          +
          +
          Returns:
          +
          The configuration of the minimize recovery time option.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public CheckpointConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/CommitToken.html b/docs/java/com/sleepycat/je/CommitToken.html new file mode 100644 index 0000000..9ce8716 --- /dev/null +++ b/docs/java/com/sleepycat/je/CommitToken.html @@ -0,0 +1,352 @@ + + + + + +CommitToken (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class CommitToken

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<CommitToken>
      +
      +
      +
      +
      public class CommitToken
      +extends java.lang.Object
      +implements java.io.Serializable, java.lang.Comparable<CommitToken>
      +
      Defines an opaque token that can be used to identify a specific transaction + commit in a replicated environment. It's unique relative to its environment. +

      + Since CommitTokens identify a point in the serialized transaction schedule + created on the master, it's meaningful to compare commit tokens, + as described in the compareTo(CommitToken) method below. + CommitTokens are obtained from Transaction.getCommitToken()

      +
      +
      See Also:
      +
      CommitPointConsistencyPolicy, +Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getRepenvUUID

          +
          public java.util.UUID getRepenvUUID()
          +
        • +
        + + + +
          +
        • +

          getVLSN

          +
          public long getVLSN()
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          compareTo

          +
          public int compareTo(CommitToken other)
          +
          Implements the Comparable interface. Note that it's not meaningful to + compare commit tokens across environments, since they represent + states in unrelated serialized transaction streams. +

          + CommitToken(1) < CommitToken(2) implies that CommitToken(1) represents + a state of the database that preceded the state defined by + CommitToken(2).

          +
          +
          Specified by:
          +
          compareTo in interface java.lang.Comparable<CommitToken>
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if two tokens from different + environments are compared.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Cursor.html b/docs/java/com/sleepycat/je/Cursor.html new file mode 100644 index 0000000..6d1e399 --- /dev/null +++ b/docs/java/com/sleepycat/je/Cursor.html @@ -0,0 +1,1975 @@ + + + + + +Cursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Cursor

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ForwardCursor, java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      SecondaryCursor
      +
      +
      +
      +
      public class Cursor
      +extends java.lang.Object
      +implements ForwardCursor
      +
      A database cursor. Cursors are used for operating on collections of records, + for iterating over a database, and for saving handles to individual records, + so that they can be modified after they have been read. + +

      Cursors which are opened with a transaction instance are transactional + cursors and may be used by multiple threads, but only serially. That is, + the application must serialize access to the handle. Non-transactional + cursors, opened with a null transaction instance, may not be used by + multiple threads.

      + +

      If the cursor is to be used to perform operations on behalf of a + transaction, the cursor must be opened and closed within the context of that + single transaction.

      + +

      Once the cursor close() method has been called, the handle may not + be accessed again, regardless of the close method's success or + failure, with one exception: the close method itself may be called + any number of times to simplify error handling.

      + +

      To obtain a cursor with default attributes:

      + +
      +     Cursor cursor = myDatabase.openCursor(txn, null);
      + 
      + +

      To customize the attributes of a cursor, use a CursorConfig object.

      + +
      +     CursorConfig config = new CursorConfig();
      +     config.setReadUncommitted(true);
      +     Cursor cursor = myDatabase.openCursor(txn, config);
      + 
      + +

      Modifications to the database during a sequential scan will be reflected + in the scan; that is, records inserted behind a cursor will not be returned + while records inserted in front of a cursor will be returned.

      + +

      By default, a cursor is "sticky", meaning that the prior position is + maintained by cursor movement operations, and the cursor stays at the + prior position when the operation does not succeed. However, it is possible + to configure a cursor as non-sticky to enable certain performance benefits. + See CursorConfig.setNonSticky(boolean) for details.

      + +

      Using Null and Partial DatabaseEntry + Parameters

      + +

      Null can be passed for DatabaseEntry output parameters if the value is + not needed. The DatabaseEntry Partial + property can also be used to optimize in certain cases. These provide + varying degrees of performance benefits that depend on the specific + operation, as described below.

      + +

      When retrieving a record with a Database or Cursor + method, if only the key is needed by the application then the retrieval of + the data item can be suppressed by passing null. If null is passed as + the data parameter, the data item will not be returned by the Database or Cursor method.

      + +

      Suppressing the return of the data item potentially has a large + performance benefit. In this case, if the record data is not already in the + JE cache, it will not be read from disk. The performance benefit is + potentially large because random access disk reads may be reduced. Examples + use cases are:

      +
        +
      • Scanning all records in key order, when the data is not needed.
      • +
      • Skipping over records quickly with READ_UNCOMMITTED isolation to + select records for further processing by examining the key value.
      • +
      + +

      Note that by "record data" we mean both the data parameter for a + regular or primary DB, and the pKey parameter for a secondary DB. + However, the performance advantage of a key-only operation does not apply to + databases configured for duplicates. For a duplicates DB, the data is always + available along with the key and does not have to be fetched separately.

      + +

      The Partial property may also be used to retrieve or update only a + portion of a data item. This avoids copying the entire record between the + JE cache and the application data parameter. However, this feature has less + of a performance benefit than one might assume, since the entire record is + always read or written to the database, and the entire record is cached. A + partial update may be performed only with + Cursor.putCurrent.

      + +

      A null or partial DatabaseEntry output parameter may also be used in + other cases, for example, to retrieve a partial key item. However, in + practice this has limited value since the entire key is usually needed by + the application, and the benefit of copying a portion of the key is + generally very small.

      + +

      Historical note: Prior to JE 7.0, null could not be passed for output + parameters. Instead, DatabaseEntry.setPartial(0, 0, true) was called + for a data parameter to avoid reading the record's data. Now, null can be + passed instead.

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDatabase

          +
          public Database getDatabase()
          +
          Returns the Database handle associated with this Cursor.
          +
          +
          Specified by:
          +
          getDatabase in interface ForwardCursor
          +
          Returns:
          +
          The Database handle associated with this Cursor.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public CursorConfig getConfig()
          +
          Returns this cursor's configuration. + +

          This may differ from the configuration used to open this object if + the cursor existed previously.

          +
          +
          Returns:
          +
          This cursor's configuration.
          +
          +
        • +
        + + + +
          +
        • +

          getCacheMode

          +
          public CacheMode getCacheMode()
          +
          Returns the default CacheMode used for subsequent operations + performed using this cursor. If setCacheMode(com.sleepycat.je.CacheMode) has not been + called with a non-null value, the configured Database or Environment + default is returned.
          +
          +
          Returns:
          +
          the CacheMode default used for subsequent operations + using this cursor.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          close

          +
          public void close()
          +
          Discards the cursor. + +

          The cursor handle may not be used again after this method has been + called, regardless of the method's success or failure, with one + exception: the close method itself may be called any number of + times.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface ForwardCursor
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + +
          +
        • +

          dup

          +
          public Cursor dup(boolean samePosition)
          +
          Returns a new cursor with the same transaction and locker ID as the + original cursor. + +

          This is useful when an application is using locking and requires + two or more cursors in the same thread of control.

          +
          +
          Parameters:
          +
          samePosition - If true, the newly created cursor is initialized + to refer to the same position in the database as the original cursor + (if any) and hold the same locks (if any). If false, or the original + cursor does not hold a database position and locks, the returned + cursor is uninitialized and will behave like a newly created cursor.
          +
          Returns:
          +
          A new cursor with the same transaction and locker ID as the + original cursor.
          +
          Throws:
          +
          DatabasePreemptedException - in a replicated + environment if the master has truncated, removed or renamed the + database.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          public OperationResult delete(WriteOptions options)
          +
          Deletes the record to which the cursor refers. When the database has + associated secondary databases, this method also deletes the associated + index records. + +

          The cursor position is unchanged after a delete, and subsequent calls + to cursor functions expecting the cursor to refer to an existing record + will fail.

          +
          +
          Parameters:
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is deleted, else null if the + record at the cursor position has already been deleted.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, + or the database is read-only.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          public OperationStatus delete()
          +
          Deletes the record to which the cursor refers. When the database has + associated secondary databases, this method also deletes the associated + index records. + +

          The cursor position is unchanged after a delete, and subsequent calls + to cursor functions expecting the cursor to refer to an existing record + will fail.

          + +

          Calling this method is equivalent to calling delete(WriteOptions).

          +
          +
          Returns:
          +
          OperationStatus.KEYEMPTY if the record at the cursor position has + already been deleted; otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, + or the database is read-only.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          +
        • +
        + + + +
          +
        • +

          put

          +
          public OperationResult put(DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Put putType,
          +                           WriteOptions options)
          +
          Inserts or updates a record according to the specified Put + type. + +

          If the operation succeeds, the record will be locked according to the + lock mode specified, the cursor will + be positioned on the record, and a non-null OperationResult will be + returned. If the operation fails because the record already exists (or + does not exist, depending on the putType), null is returned.

          + +

          When the database has associated secondary databases, this method + also inserts or deletes associated index records as necessary.

          + +

          The following table lists each allowed operation. See the individual + Put operations for more information.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Put operationDescriptionReturns null when?Other special rules
          Put.OVERWRITEInserts or updates a record depending on whether a matching + record is already present.Never returns null.Without duplicates, a matching record is one with the same key; + with duplicates, it is one with the same key and data.
          Put.NO_OVERWRITEInserts a record if a record with a matching key is not already + present.When an existing record matches.If the database has duplicate keys, a record is inserted only if + there are no records with a matching key.
          Put.NO_DUP_DATAInserts a record in a database with duplicate keys if a record + with a matching key and data is not already present.When an existing record matches.Without duplicates, this operation is not allowed.
          Put.CURRENTUpdates the data of the record at the cursor position.When the record at the cursor position has been deleted.With duplicates, the data must be considered equal by the + duplicate comparator, meaning that changing the data is only + possible if a custom duplicate comparator is configured. +

          + Cannot be used to update the key of an existing record and in + fact the key parameter must be null. +

          + A partial data item may be + specified to optimize for partial data update. +

          +
          +
          Parameters:
          +
          key - the key used as + input. Must be null when + putType is Put.CURRENT.
          +
          data - the data used as + input. May be partial only when + putType is Put.CURRENT.
          +
          putType - the Put operation type. May not be null.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is written, else null.
          +
          Throws:
          +
          DuplicateDataException - if putType is Put.CURRENT and the old and + new data are not equal according to the configured duplicate comparator + or default comparator.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, + or the database is read-only, or putType is Put.NO_DUP_DATA and the + database is not configured for duplicates.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null putType, a null input key/data parameter, + an input key/data parameter with a null data array, a partial key/data + input parameter.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          put

          +
          public OperationStatus put(DatabaseEntry key,
          +                           DatabaseEntry data)
          +
          Stores a key/data pair into the database. + +

          Calling this method is equivalent to calling put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) with + Put.OVERWRITE.

          + +

          If the put method succeeds, the cursor is positioned to refer to the + newly inserted item.

          + +

          If the key already appears in the database and duplicates are + supported, the new data value is inserted at the correct sorted + location, unless the new data value also appears in the database + already. In the later case, although the given key/data pair compares + equal to an existing key/data pair, the two records may not be identical + if custom comparators are used, in which case the existing record will + be replaced with the new record. If the key already appears in the + database and duplicates are not supported, the data associated with + the key will be replaced.

          +
          +
          Parameters:
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, + or the database is read-only.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          putNoDupData

          +
          public OperationStatus putNoDupData(DatabaseEntry key,
          +                                    DatabaseEntry data)
          +
          Stores a key/data pair into the database. The database must be + configured for duplicates. + +

          Calling this method is equivalent to calling put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) with + Put.NO_DUP_DATA.

          + +

          If the putNoDupData method succeeds, the cursor is positioned to + refer to the newly inserted item.

          + +

          Insert the specified key/data pair into the database, unless a + key/data pair comparing equally to it already exists in the database. + If a matching key/data pair already exists in the database, OperationStatus.KEYEXIST is + returned.

          +
          +
          Parameters:
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.KEYEXIST if the key/data pair already appears in the + database, else OperationStatus.SUCCESS
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, or + the database is read-only, or the database is not configured for + duplicates.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          putCurrent

          +
          public OperationStatus putCurrent(DatabaseEntry data)
          +
          Replaces the data in the key/data pair at the current cursor position. + +

          Calling this method is equivalent to calling put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) with + Put.CURRENT.

          + +

          Overwrite the data of the key/data pair to which the cursor refers + with the specified data item. This method will return + OperationStatus.NOTFOUND if the cursor currently refers to an + already-deleted key/data pair.

          + +

          For a database that does not support duplicates, the data may be + changed by this method. If duplicates are supported, the data may be + changed only if a custom partial comparator is configured and the + comparator considers the old and new data to be equal (that is, the + comparator returns zero). For more information on partial comparators + see DatabaseConfig.setDuplicateComparator(java.util.Comparator<byte[]>).

          + +

          If the old and new data are unequal according to the comparator, a + DuplicateDataException is thrown. Changing the data in this + case would change the sort order of the record, which would change the + cursor position, and this is not allowed. To change the sort order of a + record, delete it and then re-insert it.

          +
          +
          Parameters:
          +
          data - the data used as + input. + A partial data item may be + specified to optimize for partial data update.
          +
          Returns:
          +
          OperationStatus.KEYEMPTY if the key/pair at the cursor position has + been deleted; otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          DuplicateDataException - if the old and new data are not equal + according to the configured duplicate comparator or default comparator.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is transactional + but this cursor was not opened with a non-null transaction parameter, + or the database is read-only.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Moves the cursor to a record according to the specified Get + type. + +

          If the operation succeeds, the record at the resulting cursor + position will be locked according to the lock mode specified, the key and/or data will + be returned via the (non-null) DatabaseEntry parameters, and a non-null + OperationResult will be returned. If the operation fails because the + record requested is not found, null is returned.

          + +

          The following table lists each allowed operation and whether the key + and data parameters are input or + output parameters. Also specified is whether the cursor must be + initialized (positioned on a record) before calling this method. See the + individual Get operations for more information.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Get operationDescription'key' parameter'data' parameterCursor position
          must be initialized?
          Get.SEARCHSearches using an exact match by key.inputoutputno
          Get.SEARCH_BOTHSearches using an exact match by key and data.inputinputno
          Get.SEARCH_GTESearches using a GTE match by key.input/outputoutputno
          Get.SEARCH_BOTH_GTESearches using an exact match by key and a GTE match by data.inputinput/outputno
          Get.CURRENTAccesses the current recordoutputoutputyes
          Get.FIRSTFinds the first record in the database.outputoutputno
          Get.LASTFinds the last record in the database.outputoutputno
          Get.NEXTMoves to the next record.outputoutputno**
          Get.NEXT_DUPMoves to the next record with the same key.outputoutputyes
          Get.NEXT_NO_DUPMoves to the next record with a different key.outputoutputno**
          Get.PREVMoves to the previous record.outputoutputno**
          Get.PREV_DUPMoves to the previous record with the same key.outputoutputyes
          Get.PREV_NO_DUPMoves to the previous record with a different key.outputoutputno**
          + +

          ** - For these 'next' and 'previous' operations the cursor may be + uninitialized, in which case the cursor will be moved to the first or + last record, respectively.

          +
          +
          Specified by:
          +
          get in interface ForwardCursor
          +
          Parameters:
          +
          key - the key input or output parameter, depending on getType.
          +
          data - the data input or output parameter, depending on getType.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + the cursor is uninitialized (not positioned on a record) and this is not + permitted (see above), or the non-transactional cursor was created in a + different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null getType, a null input key/data parameter, + an input key/data parameter with a null data array, a partial key/data + input parameter, and specifying a lock mode of READ_COMMITTED.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + + + + + + + + + + + + + + + + + +
          +
        • +

          getNextNoDup

          +
          public OperationStatus getNextNoDup(DatabaseEntry key,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Moves the cursor to the next non-duplicate key/data pair and returns + that pair. If the matching key has duplicate values, the first data + item in the set of duplicates is returned. + +

          Calling this method is equivalent to calling get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.NEXT_NO_DUP.

          + +

          If the cursor is not yet initialized, move the cursor to the first + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the next non-duplicate key of the database, and that + key/data pair is returned.

          +
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          getPrev

          +
          public OperationStatus getPrev(DatabaseEntry key,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Moves the cursor to the previous key/data pair and returns that pair. + +

          Calling this method is equivalent to calling get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.PREV.

          + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous key/data pair of the database, and that + pair is returned. In the presence of duplicate key values, the value of + the key may not change.

          +
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getPrevNoDup

          +
          public OperationStatus getPrevNoDup(DatabaseEntry key,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Moves the cursor to the previous non-duplicate key/data pair and returns + that pair. If the matching key has duplicate values, the last data item + in the set of duplicates is returned. + +

          Calling this method is equivalent to calling get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.PREV_NO_DUP.

          + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous non-duplicate key of the database, and + that key/data pair is returned.

          +
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          skipNext

          +
          public long skipNext(long maxCount,
          +                     DatabaseEntry key,
          +                     DatabaseEntry data,
          +                     LockMode lockMode)
          +
          Skips forward a given number of key/data pairs and returns the number by + which the cursor is moved. + +

          Without regard to performance, calling this method is equivalent to + repeatedly calling getNext with LockMode.READ_UNCOMMITTED to skip over the desired number of key/data + pairs, and then calling getCurrent with the lockMode parameter to return the final key/data pair.

          + +

          With regard to performance, this method is optimized to skip over + key/value pairs using a smaller number of Btree operations. When there + is no contention on the bottom internal nodes (BINs) and all BINs are in + cache, the number of Btree operations is reduced by roughly two orders + of magnitude, where the exact number depends on the EnvironmentConfig.NODE_MAX_ENTRIES setting. When there is contention + on BINs or fetching BINs is required, the scan is broken up into smaller + operations to avoid blocking other threads for long time periods.

          + +

          If the returned count is greater than zero, then the key/data pair at + the new cursor position is also returned. If zero is returned, then + there are no key/value pairs that follow the cursor position and a + key/data pair is not returned.

          +
          +
          Parameters:
          +
          maxCount - the maximum number of key/data pairs to skip, i.e., the + maximum number by which the cursor should be moved; must be greater + than zero.
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          the number of key/data pairs skipped, i.e., the number by which + the cursor has moved; if zero is returned, the cursor position is + unchanged and the key/data pair is not returned.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          skipPrev

          +
          public long skipPrev(long maxCount,
          +                     DatabaseEntry key,
          +                     DatabaseEntry data,
          +                     LockMode lockMode)
          +
          Skips backward a given number of key/data pairs and returns the number + by which the cursor is moved. + +

          Without regard to performance, calling this method is equivalent to + repeatedly calling getPrev with LockMode.READ_UNCOMMITTED to skip over the desired number of key/data + pairs, and then calling getCurrent with the lockMode parameter to return the final key/data pair.

          + +

          With regard to performance, this method is optimized to skip over + key/value pairs using a smaller number of Btree operations. When there + is no contention on the bottom internal nodes (BINs) and all BINs are in + cache, the number of Btree operations is reduced by roughly two orders + of magnitude, where the exact number depends on the EnvironmentConfig.NODE_MAX_ENTRIES setting. When there is contention + on BINs or fetching BINs is required, the scan is broken up into smaller + operations to avoid blocking other threads for long time periods.

          + +

          If the returned count is greater than zero, then the key/data pair at + the new cursor position is also returned. If zero is returned, then + there are no key/value pairs that follow the cursor position and a + key/data pair is not returned.

          + +

          In a replicated environment, an explicit transaction must have been + specified when opening the cursor, unless read-uncommitted isolation is + specified via the CursorConfig or LockMode + parameter.

          +
          +
          Parameters:
          +
          maxCount - the maximum number of key/data pairs to skip, i.e., the + maximum number by which the cursor should be moved; must be greater + than zero.
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          the number of key/data pairs skipped, i.e., the number by which + the cursor has moved; if zero is returned, the cursor position is + unchanged and the key/data pair is not returned.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchKeyRange

          +
          public OperationStatus getSearchKeyRange(DatabaseEntry key,
          +                                         DatabaseEntry data,
          +                                         LockMode lockMode)
          +
          Moves the cursor to the closest matching key of the database, and + returns the data item associated with the matching key. If the matching + key has duplicate values, the first data item in the set of duplicates + is returned. + +

          Calling this method is equivalent to calling get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.SEARCH_GTE.

          + +

          The returned key/data pair is for the smallest key greater than or + equal to the specified key (as determined by the key comparison + function), permitting partial key matches and range searches.

          +
          +
          Parameters:
          +
          key - the key used as + input and returned as output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes + are used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchBothRange

          +
          public OperationStatus getSearchBothRange(DatabaseEntry key,
          +                                          DatabaseEntry data,
          +                                          LockMode lockMode)
          +
          Moves the cursor to the specified key and closest matching data item of + the database. + +

          Calling this method is equivalent to calling get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.SEARCH_BOTH_GTE.

          + +

          In the case of any database supporting sorted duplicate sets, the + returned key/data pair is for the smallest data item greater than or + equal to the specified data item (as determined by the duplicate + comparison function), permitting partial matches and range searches in + duplicate data sets.

          + +

          In the case of databases that do not support sorted duplicate sets, + this method is equivalent to getSearchBoth.

          +
          +
          Parameters:
          +
          key - the key used as + input.
          +
          data - the data used as + input and returned as output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          public int count()
          +
          Returns a count of the number of data items for the key to which the + cursor refers. + +

          If the database is configured for duplicates, the database is scanned + internally, without taking any record locks, to count the number of + non-deleted entries. Although the internal scan is more efficient under + some conditions, the result is the same as if a cursor were used to + iterate over the entries using LockMode.READ_UNCOMMITTED.

          + +

          If the database is not configured for duplicates, the count returned + is always zero or one, depending on the record at the cursor position is + deleted or not.

          + +

          The cost of this method is directly proportional to the number of + records scanned.

          +
          +
          Returns:
          +
          A count of the number of data items for the key to which the + cursor refers.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          +
        • +
        + + + +
          +
        • +

          countEstimate

          +
          public long countEstimate()
          +
          Returns a rough estimate of the count of the number of data items for + the key to which the cursor refers. + +

          If the database is configured for duplicates, a quick estimate of the + number of records is computed using information in the Btree. Because + the Btree is unbalanced, in some cases the estimate may be off by a + factor of two or more. The estimate is accurate when the number of + records is less than the configured NodeMaxEntries.

          + +

          If the database is not configured for duplicates, the count returned + is always zero or one, depending on the record at the cursor position is + deleted or not.

          + +

          The cost of this method is fixed, rather than being proportional to + the number of records scanned. Because its accuracy is variable, this + method should normally be used when accuracy is not required, such as + for query optimization, and a fixed cost operation is needed. For + example, this method is used internally for determining the index + processing order in a JoinCursor.

          +
          +
          Returns:
          +
          an estimate of the count of the number of data items for the key + to which the cursor refers.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/CursorConfig.html b/docs/java/com/sleepycat/je/CursorConfig.html new file mode 100644 index 0000000..d083ead --- /dev/null +++ b/docs/java/com/sleepycat/je/CursorConfig.html @@ -0,0 +1,608 @@ + + + + + +CursorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class CursorConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class CursorConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of database cursor. An instance created with the + default constructor is initialized with the system's default settings.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static CursorConfigDEFAULT +
        Default configuration used if null is passed to methods that create a + cursor.
        +
        static CursorConfigREAD_COMMITTED +
        A convenience instance to configure a cursor for read committed + isolation.
        +
        static CursorConfigREAD_UNCOMMITTED +
        A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CursorConfig() +
        An instance created using the default constructor is initialized with + the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        CursorConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetNonSticky() +
        Returns the non-sticky setting.
        +
        booleangetReadCommitted() +
        Returns true if read operations performed by the cursor are configured + to obey read committed isolation.
        +
        booleangetReadUncommitted() +
        Returns true if read operations performed by the cursor are configured + to return modified but not yet committed data.
        +
        CursorConfigsetNonSticky(boolean nonSticky) +
        Configures the behavior of the cursor when a cursor movement operation + returns OperationStatus.NOTFOUND.
        +
        CursorConfigsetReadCommitted(boolean readCommitted) +
        Configures read operations performed by the cursor to obey read + committed isolation.
        +
        CursorConfigsetReadUncommitted(boolean readUncommitted) +
        Configures read operations performed by the cursor to return modified + but not yet committed data.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final CursorConfig DEFAULT
          +
          Default configuration used if null is passed to methods that create a + cursor.
          +
        • +
        + + + +
          +
        • +

          READ_UNCOMMITTED

          +
          public static final CursorConfig READ_UNCOMMITTED
          +
          A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data.
          +
        • +
        + + + +
          +
        • +

          READ_COMMITTED

          +
          public static final CursorConfig READ_COMMITTED
          +
          A convenience instance to configure a cursor for read committed + isolation. + + This ensures the stability of the current data item read by the cursor + but permits data read by this cursor to be modified or deleted prior to + the commit of the transaction.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CursorConfig

          +
          public CursorConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setReadUncommitted

          +
          public CursorConfig setReadUncommitted(boolean readUncommitted)
          +
          Configures read operations performed by the cursor to return modified + but not yet committed data.
          +
          +
          Parameters:
          +
          readUncommitted - If true, configure read operations performed by + the cursor to return modified but not yet committed data.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          LockMode.READ_UNCOMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          getReadUncommitted

          +
          public boolean getReadUncommitted()
          +
          Returns true if read operations performed by the cursor are configured + to return modified but not yet committed data.
          +
          +
          Returns:
          +
          true if read operations performed by the cursor are configured + to return modified but not yet committed data.
          +
          See Also:
          +
          LockMode.READ_UNCOMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          setReadCommitted

          +
          public CursorConfig setReadCommitted(boolean readCommitted)
          +
          Configures read operations performed by the cursor to obey read + committed isolation. Read committed isolation provides for cursor + stability but not repeatable reads. Data items which have been + previously read by this transaction may be deleted or modified by other + transactions before the cursor is closed or the transaction completes.
          +
          +
          Parameters:
          +
          readCommitted - If true, configure read operations performed by + the cursor to obey read committed isolation.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          LockMode.READ_COMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          getReadCommitted

          +
          public boolean getReadCommitted()
          +
          Returns true if read operations performed by the cursor are configured + to obey read committed isolation.
          +
          +
          Returns:
          +
          true if read operations performed by the cursor are configured + to obey read committed isolation.
          +
          See Also:
          +
          LockMode.READ_COMMITTED
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getNonSticky

          +
          public boolean getNonSticky()
          +
          Returns the non-sticky setting.
          +
          +
          See Also:
          +
          setNonSticky(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public CursorConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/CustomStats.html b/docs/java/com/sleepycat/je/CustomStats.html new file mode 100644 index 0000000..06e1f73 --- /dev/null +++ b/docs/java/com/sleepycat/je/CustomStats.html @@ -0,0 +1,264 @@ + + + + + +CustomStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface CustomStats

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface CustomStats
      +
      A custom statistics object. Custom statistics allow for customization + of statistics that are written at periodic intervals to the je.stats.csv + file. The field names returned from the getFieldNames() method are used as + column headers in the je.stat.csv file. The getFieldNames() method is only + called once when the environment is opened. The field values are associated + with the field names in the order of the returned array. The + getFieldValues() method is called when a row is written to the statistics + file. The semantic for the values are implementation specific. The values + may represent totals, incremental (since the last getFieldValues() call), or + stateless (computed at the time the statistic is requested).
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        java.lang.String[]getFieldNames() +
        The field names that are output to the je.stats.csv file.
        +
        java.lang.String[]getFieldValues() +
        The field values that are output to the je.stats.csv file.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getFieldNames

          +
          java.lang.String[] getFieldNames()
          +
          The field names that are output to the je.stats.csv file.
          +
          +
          Returns:
          +
          Array of strings that represent the field values.
          +
          +
        • +
        + + + +
          +
        • +

          getFieldValues

          +
          java.lang.String[] getFieldValues()
          +
          The field values that are output to the je.stats.csv file.
          +
          +
          Returns:
          +
          Array of strings that represent a value for the + associated field name.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Database.html b/docs/java/com/sleepycat/je/Database.html new file mode 100644 index 0000000..697221d --- /dev/null +++ b/docs/java/com/sleepycat/je/Database.html @@ -0,0 +1,1550 @@ + + + + + +Database (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Database

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      SecondaryDatabase
      +
      +
      +
      +
      public class Database
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      A database handle. + +

      Database attributes are specified in the DatabaseConfig class. Database handles are + free-threaded and may be used concurrently by multiple threads.

      + +

      To open an existing database with default attributes:

      + +
      +     Environment env = new Environment(home, null);
      +     Database myDatabase = env.openDatabase(null, "mydatabase", null);
      + 
      + +

      To create a transactional database that supports duplicates:

      + +
      +     DatabaseConfig dbConfig = new DatabaseConfig();
      +     dbConfig.setTransactional(true);
      +     dbConfig.setAllowCreate(true);
      +     dbConfig.setSortedDuplicates(true);
      +     Database db = env.openDatabase(txn, "mydatabase", dbConfig);
      + 
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          close

          +
          public void close()
          +
          Discards the database handle. +

          + When closing the last open handle for a deferred-write database, any + cached database information is flushed to disk as if sync() were + called. +

          + The database handle should not be closed while any other handle that + refers to it is not yet closed; for example, database handles should not + be closed while cursor handles into the database remain open, or + transactions that include operations on the database have not yet been + committed or aborted. Specifically, this includes Cursor and Transaction handles. +

          + When multiple threads are using the Database handle concurrently, only a single thread may call this + method. +

          + When called on a database that is the primary database for a secondary + index, the primary database should be closed only after all secondary + indices which reference it have been closed. +

          + The database handle may not be accessed again after this method is + called, regardless of the method's success or failure, with one + exception: the close method itself may be called any number of + times.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if cursors associated with this database + are still open.
          +
          See Also:
          +
          DatabaseConfig.setDeferredWrite
          +
          +
        • +
        + + + +
          +
        • +

          sync

          +
          public void sync()
          +
          Flushes any cached information for this database to disk; only + applicable for deferred-write databases. +

          Note that deferred-write databases are automatically flushed to disk + when the close() method is called.

          +
          +
          Throws:
          +
          DatabasePreemptedException - in a replicated + environment if the master has truncated, removed or renamed the + database.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is not a deferred-write + database, or this database is read-only.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          See Also:
          +
          DatabaseConfig.setDeferredWrite
          +
          +
        • +
        + + + +
          +
        • +

          openSequence

          +
          public Sequence openSequence(Transaction txn,
          +                             DatabaseEntry key,
          +                             SequenceConfig config)
          +
          Opens a sequence in the database.
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may + be specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - The key DatabaseEntry of the sequence.
          +
          config - The sequence attributes. If null, default attributes are + used.
          +
          Returns:
          +
          a new Sequence handle.
          +
          Throws:
          +
          SequenceExistsException - if the sequence record already exists + and the SequenceConfig ExclusiveCreate parameter is true.
          +
          SequenceNotFoundException - if the sequence record does not exist + and the SequenceConfig AllowCreate parameter is false.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the sequence does not exist and the AllowCreate parameter is true, then one + of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this database is read-only, or + this database is configured for duplicates.
          +
          java.lang.IllegalStateException - if the Sequence record is deleted by + another thread during this method invocation, or the database has been + closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid SequenceConfig parameter.
          +
          +
        • +
        + + + +
          +
        • +

          removeSequence

          +
          public void removeSequence(Transaction txn,
          +                           DatabaseEntry key)
          +
          Removes the sequence from the database. This method should not be + called if there are open handles on this sequence.
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - The key DatabaseEntry of the sequence.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this database is read-only.
          +
          +
        • +
        + + + +
          +
        • +

          openCursor

          +
          public Cursor openCursor(Transaction txn,
          +                         CursorConfig cursorConfig)
          +
          Returns a cursor into the database.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the database is non-transactional, null must be + specified. For a transactional database, the transaction is optional + for read-only access and required for read-write access.
          +
          cursorConfig - The cursor attributes. If null, default attributes + are used.
          +
          Returns:
          +
          A database cursor.
          +
          Throws:
          +
          DatabasePreemptedException - in a replicated + environment if the master has truncated, removed or renamed the + database.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid CursorConfig parameter.
          +
          +
        • +
        + + + +
          +
        • +

          openCursor

          +
          public DiskOrderedCursor openCursor(DiskOrderedCursorConfig cursorConfig)
          +
          Create a DiskOrderedCursor to iterate over the records in 'this' + Database. Because the retrieval is based on Log Sequence Number (LSN) + order rather than key order, records are returned in unsorted order in + exchange for generally faster retrieval. LSN order approximates disk + sector order. +

          + See DiskOrderedCursor for more details and a description of the + consistency guarantees provided by the scan. +

          + WARNING: After calling this method, deletion of log files by + the JE log cleaner will be disabled until DiskOrderedCursor.close() is called. To prevent unbounded growth of + disk usage, be sure to call DiskOrderedCursor.close() to + re-enable log file deletion.

          +
        • +
        + + + +
          +
        • +

          delete

          +
          public OperationResult delete(Transaction txn,
          +                              DatabaseEntry key,
          +                              WriteOptions options)
          +
          Removes records with a given key from the database. In the presence of + duplicate keys, all records associated with the given key will be + removed. When the database has associated secondary databases, this + method also deletes the associated index records.
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may + be specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is deleted, else null if the + given key was not found in the database.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this database is read-only.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null input key parameter, an input key parameter + with a null data array, or a partial key input parameter.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          public OperationStatus delete(Transaction txn,
          +                              DatabaseEntry key)
          +
          Removes records with a given key from the database. In the presence of + duplicate keys, all records associated with the given key will be + removed. When the database has associated secondary databases, this + method also deletes the associated index records. + +

          Calling this method is equivalent to calling delete(Transaction, DatabaseEntry, WriteOptions).

          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may + be specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input.
          +
          Returns:
          +
          The method will return OperationStatus.NOTFOUND if + the given key is not found in the database; otherwise OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this database is read-only.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null input key parameter, an input key parameter + with a null data array, or a partial key input parameter.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Retrieves a record according to the specified Get type. + +

          If the operation succeeds, the record will be locked according to the + lock mode specified, the key and/or + data will be returned via the (non-null) DatabaseEntry parameters, and a + non-null OperationResult will be returned. If the operation fails + because the record requested is not found, null is returned.

          + +

          The following table lists each allowed operation and whether the key + and data parameters are input or + output parameters. See the individual Get operations for + more information.

          + +
          + + + + + + + + + + + + + + + + + + +
          Get operationDescription'key' parameter'data' parameter
          Get.SEARCHSearches using an exact match by key.inputoutput
          Get.SEARCH_BOTHSearches using an exact match by key and data.inputinput
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified to transaction-protect the operation, or null may be specified + to perform the operation without transaction protection. For a + non-transactional database, null must be specified.
          +
          key - the key input parameter.
          +
          data - the data input or output parameter, depending on getType.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null getType, a null input key/data parameter, + an input key/data parameter with a null data array, and a partial + key/data input parameter.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          put

          +
          public OperationResult put(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Put putType,
          +                           WriteOptions options)
          +
          Inserts or updates a record according to the specified Put + type. + +

          If the operation succeeds, the record will be locked according to the + lock mode specified, the cursor will + be positioned on the record, and a non-null OperationResult will be + returned. If the operation fails because the record already exists (or + does not exist, depending on the putType), null is returned.

          + +

          When the database has associated secondary databases, this method + also inserts or deletes associated index records as necessary.

          + +

          The following table lists each allowed operation. See the individual + Put operations for more information.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + +
          Put operationDescriptionReturns null when?Other special rules
          Put.OVERWRITEInserts or updates a record depending on whether a matching + record is already present.Never returns null.Without duplicates, a matching record is one with the same key; + with duplicates, it is one with the same key and data.
          Put.NO_OVERWRITEInserts a record if a record with a matching key is not already + present.When an existing record matches.If the database has duplicate keys, a record is inserted only if + there are no records with a matching key.
          Put.NO_DUP_DATAInserts a record in a database with duplicate keys if a record + with a matching key and data is not already present.When an existing record matches.Without duplicates, this operation is not allowed.
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input.
          +
          data - the data used as + input.
          +
          putType - the Put operation type. May not be null.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is written, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if the database is read-only, or + putType is Put.NO_DUP_DATA and the database is not configured for + duplicates.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null putType, a null input key/data parameter, + an input key/data parameter with a null data array, a partial key/data + input parameter, or when putType is Put.CURRENT.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          join

          +
          public JoinCursor join(Cursor[] cursors,
          +                       JoinConfig config)
          +
          Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices. + +

          Each cursor in the cursors array must have been + initialized to refer to the key on which the underlying database should + be joined. Typically, this initialization is done by calling Cursor.getSearchKey.

          + +

          Once the cursors have been passed to this method, they should not be + accessed or modified until the newly created join cursor has been + closed, or else inconsistent results may be returned. However, the + position of the cursors will not be changed by this method or by the + methods of the join cursor.

          +
          +
          Parameters:
          +
          cursors - an array of cursors associated with this primary + database.
          +
          config - The join attributes. If null, default attributes are + used.
          +
          Returns:
          +
          a specialized cursor that returns the results of the equality + join operation.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid JoinConfig parameter.
          +
          See Also:
          +
          JoinCursor
          +
          +
        • +
        + + + +
          +
        • +

          preload

          +
          public void preload(long maxBytes)
          +
          Deprecated. As of JE 2.0.83, replaced by preload(PreloadConfig).

          +
          Preloads the cache. This method should only be called when there are no + operations being performed on the database in other threads. Executing + preload during concurrent updates may result in some or all of the tree + being loaded into the JE cache. Executing preload during any other + types of operations may result in JE exceeding its allocated cache + size. preload() effectively locks the entire database and therefore will + lock out the checkpointer, cleaner, and compressor, as well as not allow + eviction to occur.
          +
          +
          Parameters:
          +
          maxBytes - The maximum number of bytes to load. If maxBytes is 0, + je.evictor.maxMemory is used.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          preload

          +
          public void preload(long maxBytes,
          +                    long maxMillisecs)
          +
          Deprecated. As of JE 2.0.101, replaced by preload(PreloadConfig).

          +
          Preloads the cache. This method should only be called when there are no + operations being performed on the database in other threads. Executing + preload during concurrent updates may result in some or all of the tree + being loaded into the JE cache. Executing preload during any other + types of operations may result in JE exceeding its allocated cache + size. preload() effectively locks the entire database and therefore will + lock out the checkpointer, cleaner, and compressor, as well as not allow + eviction to occur.
          +
          +
          Parameters:
          +
          maxBytes - The maximum number of bytes to load. If maxBytes is 0, + je.evictor.maxMemory is used.
          +
          maxMillisecs - The maximum time in milliseconds to use when + preloading. Preloading stops once this limit has been reached. If + maxMillisecs is 0, preloading can go on indefinitely or until maxBytes + (if non-0) is reached.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          preload

          +
          public PreloadStats preload(PreloadConfig config)
          +
          Preloads the cache. This method should only be called when there are no + operations being performed on the database in other threads. Executing + preload during concurrent updates may result in some or all of the tree + being loaded into the JE cache. Executing preload during any other + types of operations may result in JE exceeding its allocated cache + size. preload() effectively locks the entire database and therefore will + lock out the checkpointer, cleaner, and compressor, as well as not allow + eviction to occur. If the database is replicated and the environment is + in the replica state, then the replica may become temporarily + disconnected from the master if the replica needs to replay changes + against the database and is locked out because the time taken by the + preload operation exceeds ReplicationConfig.FEEDER_TIMEOUT. +

          + While this method preloads a single database, Environment.preload(com.sleepycat.je.Database[], com.sleepycat.je.PreloadConfig) lets you preload multiple databases.

          +
          +
          Parameters:
          +
          config - The PreloadConfig object that specifies the parameters + of the preload.
          +
          Returns:
          +
          A PreloadStats object with various statistics about the + preload() operation.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if PreloadConfig.getMaxBytes is + greater than size of the JE cache.
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          public long count()
          +
          Counts the key/data pairs in the database. This operation is faster than + obtaining a count from a cursor based scan of the database, and will not + perturb the current contents of the cache. However, the count is not + guaranteed to be accurate if there are concurrent updates. Note that + this method does scan a significant portion of the database and should + be considered a fairly expensive operation. +

          + This operation uses the an internal infrastructure and algorithm that is + similar to the one used for the DiskOrderedCursor. Specifically, + it will disable deletion of log files by the JE log cleaner during its + execution and will consume a certain amount of memory (but without + affecting the memory that is available for the JE cache). To avoid + excessive memory consumption (and a potential OutOfMemoryError) + this method places an internal limit on its memory consumption. If this + limit is reached, the method will still work properly, but its + performance will degrade. To specify a different memory limit than the + one used by this method, use the + count(long memoryLimit) method. +

          + Currently, the internal memory limit is calculated as 10% of the + difference between the max JVM memory (the value returned by + Runtime.getRuntime().maxMemory()) and the configured JE cache size.

          +
          +
          Returns:
          +
          The count of key/data pairs in the database.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          See Also:
          +
          Cache + Statistics: Unexpected Sizes
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          public long count(long memoryLimit)
          +
          Counts the key/data pairs in the database. This operation is faster than + obtaining a count from a cursor based scan of the database, and will not + perturb the current contents of the cache. However, the count is not + guaranteed to be accurate if there are concurrent updates. Note that + this method does scan a significant portion of the database and should + be considered a fairly expensive operation. +

          + This operation uses the an internal infrastructure and algorithm that is + similar to the one used for the DiskOrderedCursor. Specifically, + it will disable deletion of log files by the JE log cleaner during its + execution and will consume a certain amount of memory (but without + affecting the memory that is available for the JE cache). To avoid + excessive memory consumption (and a potential OutOfMemoryError) + this method takes as input an upper bound on the memory it may consume. + If this limit is reached, the method will still work properly, but its + performance will degrade.

          +
          +
          Parameters:
          +
          memoryLimit - The maximum memory (in bytes) that may be consumed + by this method.
          +
          Returns:
          +
          The count of key/data pairs in the database.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          See Also:
          +
          Cache + Statistics: Unexpected Sizes
          +
          +
        • +
        + + + +
          +
        • +

          getStats

          +
          public DatabaseStats getStats(StatsConfig config)
          +
          Returns database statistics. + +

          If this method has not been configured to avoid expensive operations + (using the StatsConfig.setFast method), it will access some of or all the pages in + the database, incurring a severe performance penalty as well as possibly + flushing the underlying cache.

          + +

          In the presence of multiple threads or processes accessing an active + database, the information returned by this method may be + out-of-date.

          +
          +
          Parameters:
          +
          config - The statistics returned; if null, default statistics are + returned.
          +
          Returns:
          +
          Database statistics.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          verify

          +
          public DatabaseStats verify(VerifyConfig config)
          +
          Verifies the integrity of the database. + +

          Verification is an expensive operation that should normally only be + used for troubleshooting and debugging.

          +
          +
          Parameters:
          +
          config - Configures the verify operation; if null, the default + operation is performed.
          +
          Returns:
          +
          Database statistics.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if a corruption is detected, or if + an unexpected, internal or environment-wide failure occurs. If a + persistent corruption is detected, + EnvironmentFailureException.isCorrupted() will return true.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          getDatabaseName

          +
          public java.lang.String getDatabaseName()
          +
          Returns the database name. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The database name.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public DatabaseConfig getConfig()
          +
          Returns this Database object's configuration. + +

          This may differ from the configuration used to open this object if + the database existed previously.

          + +

          Unlike most Database methods, this method may be called after the + database is closed.

          +
          +
          Returns:
          +
          This Database object's configuration.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          getEnvironment

          +
          public Environment getEnvironment()
          +
          Returns the Environment handle for + the database environment underlying the Database. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The Environment handle + for the database environment underlying the Database.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryDatabases

          +
          public java.util.List<SecondaryDatabase> getSecondaryDatabases()
          +
          Returns a list of all SecondaryDatabase objects associated with a primary database. + +

          If no secondaries are associated with this database, an empty list is + returned.

          +
        • +
        + + + +
          +
        • +

          compareKeys

          +
          public int compareKeys(DatabaseEntry entry1,
          +                       DatabaseEntry entry2)
          +
          Compares two keys using either the default comparator if no BTree + comparator has been set or the BTree comparator if one has been set.
          +
          +
          Returns:
          +
          -1 if entry1 compares less than entry2, + 0 if entry1 compares equal to entry2, + 1 if entry1 compares greater than entry2
          +
          Throws:
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if either entry is a partial + DatabaseEntry, or is null.
          +
          +
        • +
        + + + +
          +
        • +

          compareDuplicates

          +
          public int compareDuplicates(DatabaseEntry entry1,
          +                             DatabaseEntry entry2)
          +
          Compares two data elements using either the default comparator if no + duplicate comparator has been set or the duplicate comparator if one has + been set.
          +
          +
          Returns:
          +
          -1 if entry1 compares less than entry2, + 0 if entry1 compares equal to entry2, + 1 if entry1 compares greater than entry2
          +
          Throws:
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if either entry is a partial + DatabaseEntry, or is null.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseComparator.html b/docs/java/com/sleepycat/je/DatabaseComparator.html new file mode 100644 index 0000000..b69cce5 --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseComparator.html @@ -0,0 +1,254 @@ + + + + + +DatabaseComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface DatabaseComparator

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      java.util.Comparator<byte[]>, java.io.Serializable
      +
      +
      +
      +
      public interface DatabaseComparator
      +extends java.util.Comparator<byte[]>, java.io.Serializable
      +
      Implemented by btree and duplicate comparators that need to be initialized + before they are used or need access to the environment's ClassLoader + property.
      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidinitialize(java.lang.ClassLoader loader) +
        Called to initialize a comparator object after it is instantiated or + deserialized, and before it is used.
        +
        +
          +
        • + + +

          Methods inherited from interface java.util.Comparator

          +compare, comparing, comparing, comparingDouble, comparingInt, comparingLong, equals, naturalOrder, nullsFirst, nullsLast, reversed, reverseOrder, thenComparing, thenComparing, thenComparing, thenComparingDouble, thenComparingInt, thenComparingLong
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          initialize

          +
          void initialize(java.lang.ClassLoader loader)
          +
          Called to initialize a comparator object after it is instantiated or + deserialized, and before it is used.
          +
          +
          Parameters:
          +
          loader - is the environment's ClassLoader property.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseConfig.html b/docs/java/com/sleepycat/je/DatabaseConfig.html new file mode 100644 index 0000000..7caa9b2 --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseConfig.html @@ -0,0 +1,1737 @@ + + + + + +DatabaseConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      Direct Known Subclasses:
      +
      SecondaryConfig
      +
      +
      +
      +
      public class DatabaseConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +

      Specifies the attributes of a database.

      + +

      There are two groups of database attributes: per-database handle + attributes, and database-wide attributes. An attribute may be + persistent/transient or mutable/immutable:

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      ScopeMutablePersistentAttribute
      Database-wide attributeTrueTruebtree comparator
      + duplicate comparator
      + key prefixing
      + nodeMaxEntries
      + +
      TrueFalsedeferred write
      + transactional
      FalseTrue + sorted duplicates
      +
      FalseFalsetemporary
      Per-database handle attributesFalseFalseallow create
      + exclusive create
      + read only
      + getCacheMode() read only}
      + use existing config
      +
      +

      + +

      Persistent attributes will be saved in the log and remain in effect + every time the environment is reopened. Transient attributes only remain + in effect until:

      + +
        +
      • the database configuration is updated
      • +
      • the database handle(per-database handle attributes) is closed, or all + handles for this database (database-wide attributes) are closed.
      • +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final DatabaseConfig DEFAULT
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DatabaseConfig

          +
          public DatabaseConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setAllowCreate

          +
          public DatabaseConfig setAllowCreate(boolean allowCreate)
          +
          Configures the Environment.openDatabase method to create the database if it does not + already exist.
          +
          +
          Parameters:
          +
          allowCreate - If true, configure the Environment.openDatabase + method to create the database if it does not already exist.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAllowCreate

          +
          public boolean getAllowCreate()
          +
          Returns true if the Environment.openDatabase method is configured to create the database + if it does not already exist. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Environment.openDatabase method is configured to create the database + if it does not already exist.
          +
          +
        • +
        + + + +
          +
        • +

          setExclusiveCreate

          +
          public DatabaseConfig setExclusiveCreate(boolean exclusiveCreate)
          +
          Configure the Environment.openDatabase method to fail if the database already exists. + +

          The exclusiveCreate mode is only meaningful if specified with the + allowCreate mode.

          +
          +
          Parameters:
          +
          exclusiveCreate - If true, configure the Environment.openDatabase + method to fail if the database already exists.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getExclusiveCreate

          +
          public boolean getExclusiveCreate()
          +
          Returns true if the Environment.openDatabase method is configured to fail if the database + already exists. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Environment.openDatabase method is configured to fail if the database + already exists.
          +
          +
        • +
        + + + +
          +
        • +

          setSortedDuplicates

          +
          public DatabaseConfig setSortedDuplicates(boolean sortedDuplicates)
          +
          Configures the database to support records with duplicate keys. + +

          When duplicate keys are configured for a database, key prefixing is + also implicitly configured. Without key prefixing, databases with + duplicates would store keys inefficiently. Key prefixing is therefore + mandatory for databases with duplicates.

          + +

          Although two records may have the same key, they may not also have + the same data item. Two identical records, that have the same key and + data, may not be stored in a database.

          + +

          The ordering of duplicates in the database is determined by the + duplicate comparison function. See setDuplicateComparator(java.util.Comparator<byte[]>). If + the application does not specify a duplicate comparison function, a + default lexical comparison will be used.

          + +

          If a primary database is to be associated with one or more secondary + databases, it may not be configured for duplicates.

          + +

          Calling this method affects the database, including all threads of + control accessing the database.

          + +

          If the database already exists when the database is opened, any + database configuration specified by this method must be the same as the + existing database or an error will be returned.

          +
          +
          Parameters:
          +
          sortedDuplicates - If true, configure the database to support + duplicate data items. A value of false is illegal to this method, that + is, once set, the configuration cannot be cleared.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getSortedDuplicates

          +
          public boolean getSortedDuplicates()
          +
          Returns true if the database is configured to support records with + duplicate keys. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database is configured to support records with + duplicate keys.
          +
          +
        • +
        + + + +
          +
        • +

          getKeyPrefixing

          +
          public boolean getKeyPrefixing()
          +
          Returns the key prefixing configuration. Note that key prefixing is + always enabled for a database with duplicates configured.
          +
          +
          Returns:
          +
          true if key prefixing has been enabled in this database.
          +
          +
        • +
        + + + +
          +
        • +

          setKeyPrefixing

          +
          public DatabaseConfig setKeyPrefixing(boolean keyPrefixing)
          +
          Configure the database to support key prefixing. + +

          Key prefixing causes the representation of keys in the b-tree + internal nodes to be split in each BIN (bottom internal node) between + the common prefix of all keys and the suffixes. Using this often + results in a more space-efficient representation in both the + in-memory and on-disk formats. In general the cost of maintaining + the prefix separately is low compared to the benefit, and therefore + enabling key prefixing is strongly recommended.

          + +

          When duplicate keys are configured for a database, key prefixing is + also implicitly configured. Without key prefixing, databases with + duplicates would store keys inefficiently. Key prefixing is therefore + mandatory for databases with duplicates.

          +
          +
          Parameters:
          +
          keyPrefixing - If true, enables keyPrefixing for the database.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalStateException - if the keyPrefixing argument is false and + setSortedDuplicates(boolean) has been called to configure duplicates. + Key prefixing is therefore mandatory for databases with duplicates.
          +
          See Also:
          +
          Cache + Statistics: Size Optimizations
          +
          +
        • +
        + + + +
          +
        • +

          setTransactional

          +
          public DatabaseConfig setTransactional(boolean transactional)
          +
          Encloses the database open within a transaction. + +

          If the call succeeds, the open operation will be recoverable. If the + call fails, no database will have been created.

          + +

          All future operations on this database, which are not explicitly + enclosed in a transaction by the application, will be enclosed in in a + transaction within the library.

          +
          +
          Parameters:
          +
          transactional - If true, enclose the database open within a + transaction.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getTransactional

          +
          public boolean getTransactional()
          +
          Returns true if the database open is enclosed within a transaction. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database open is enclosed within a transaction.
          +
          +
        • +
        + + + +
          +
        • +

          setReadOnly

          +
          public DatabaseConfig setReadOnly(boolean readOnly)
          +
          Configures the database in read-only mode. + +

          Any attempt to modify items in the database will fail, regardless of + the actual permissions of any underlying files.

          +
          +
          Parameters:
          +
          readOnly - If true, configure the database in read-only mode.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getReadOnly

          +
          public boolean getReadOnly()
          +
          Returns true if the database is configured in read-only mode. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database is configured in read-only mode.
          +
          +
        • +
        + + + +
          +
        • +

          setNodeMaxEntries

          +
          public DatabaseConfig setNodeMaxEntries(int nodeMaxEntries)
          +
          Configures the Environment.openDatabase method to have a B+Tree fanout of + nodeMaxEntries. + +

          The nodeMaxEntries parameter is only meaningful if specified with the + allowCreate mode. See EnvironmentConfig.NODE_MAX_ENTRIES for the + valid value range, and the default value.

          +
          +
          Parameters:
          +
          nodeMaxEntries - The maximum children per B+Tree node.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          setNodeMaxDupTreeEntries

          +
          public DatabaseConfig setNodeMaxDupTreeEntries(int nodeMaxDupTreeEntries)
          +
          Deprecated. this property no longer has any effect; setNodeMaxEntries(int) should be used instead.
          +
        • +
        + + + +
          +
        • +

          getNodeMaxEntries

          +
          public int getNodeMaxEntries()
          +
          Returns the maximum number of children a B+Tree node can have. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The maximum number of children a B+Tree node can have.
          +
          +
        • +
        + + + +
          +
        • +

          getNodeMaxDupTreeEntries

          +
          public int getNodeMaxDupTreeEntries()
          +
          Deprecated. this property no longer has any effect and zero is always + returned; getNodeMaxEntries() should be used instead.
          +
        • +
        + + + +
          +
        • +

          setBtreeComparator

          +
          public DatabaseConfig setBtreeComparator(java.util.Comparator<byte[]> btreeComparator)
          +
          By default, a byte by byte lexicographic comparison is used for btree + keys. To customize the comparison, supply a different Comparator. + +

          Note that there are two ways to set the comparator: by specifying the + class or by specifying a serializable object. This method is used to + specify a serializable object. The comparator class must implement + java.util.Comparator and must be serializable. JE will serialize the + Comparator and deserialize it when subsequently opening the + database.

          + +

          If a comparator needs to be initialized before it is used or needs + access to the environment's ClassLoader property, it may implement the + DatabaseComparator interface.

          + +

          The Comparator.compare() method is passed the byte arrays that are + stored in the database. If you know how your data is organized in the + byte array, then you can write a comparison routine that directly + examines the contents of the arrays. Otherwise, you have to reconstruct + your original objects, and then perform the comparison. See the Getting Started Guide for examples.

          + +

          WARNING: There are several special considerations that must + be taken into account when implementing a comparator.

          +

            +
          • Comparator instances are shared by multiple threads and comparator + methods are called without any special synchronization. Therefore, + comparators must be thread safe. In general no shared state should be + used and any caching of computed values must be done with proper + synchronization.
          • + +
          • Because records are stored in the order determined by the + Comparator, the Comparator's behavior must not change over time and + therefore should not be dependent on any state that may change over + time. In addition, although it is possible to change the comparator + for an existing database, care must be taken that the new comparator + provides compatible results with the previous comparator, or database + corruption will occur.
          • + +
          • JE uses comparators internally in a wide variety of circumstances, + so custom comparators must be sure to return valid values for any two + arbitrary keys. The user must not make any assumptions about the + range of key values that might be compared. For example, it's possible + for the comparator may be used against previously deleted values.
          • +
          + +

          A special type of comparator is a partial comparator, which + allows for the keys of a database to be updated, but only if the updates + do not change the relative order of the keys. For example, if a database + uses strings as keys and a case-insensitive comparator, it is possible + to change the case of characters in the keys, as this will not change + the ordering of the keys. Another example is when the keys contain + multiple fields but uniquely identify each record with a single field. + The partial comparator could then compare only the single identifying + field, allowing the rest of the fields to be updated. A query + (Cursor.getSearchKey, for example) could + then be performed by passing a partial key that contains only the + identifying field. + +

          WARNING: To allow for key updates in situations + like those described above, all partial comparators must implement the + PartialComparator tag interface. Otherwise, BDB JE will raise + an exception if an attempt is made to update a key in a database whose + comparators do not implement PartialComparator. See "Upgrading from JE + 5.0 or earlier" in the change log and the PartialComparator + javadoc for more information.

          +

          + Another special type of comparator is a binary equality + comparator, which considers two keys to be equal if and only if they + have the same length and they are equal byte-per-byte. All binary + equality comparators must implement the BinaryEqualityComparator + interface. The significance of binary equality comparators is that they + make possible certain internal optimizations, like the "blind puts" + optimization, described in + BinaryEqualityComparator +

          + The comparator for an existing database will not be overridden unless + setOverrideBtreeComparator() is set to true.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          setBtreeComparator

          +
          public DatabaseConfig setBtreeComparator(java.lang.Class<? extends java.util.Comparator<byte[]>> btreeComparatorClass)
          +
          By default, a byte by byte lexicographic comparison is used for btree + keys. To customize the comparison, supply a different Comparator. + +

          Note that there are two ways to set the comparator: by specifying the + class or by specifying a serializable object. This method is used to + specify a Comparator class. The comparator class must implement + java.util.Comparator and must have a public zero-parameter constructor. + JE will store the class name and instantiate the Comparator by class + name (using Class.forName and newInstance) + when subsequently opening the database. Because the Comparator is + instantiated using its default constructor, it should not be dependent + on other constructor parameters.

          + +

          The Comparator.compare() method is passed the byte arrays that are + stored in the database. If you know how your data is organized in the + byte array, then you can write a comparison routine that directly + examines the contents of the arrays. Otherwise, you have to reconstruct + your original objects, and then perform the comparison. See the Getting Started Guide for examples.

          + +

          If a comparator needs to be initialized before it is used or needs + access to the environment's ClassLoader property, it may implement the + DatabaseComparator interface.

          + +

          WARNING: There are several special considerations that must + be taken into account when implementing a comparator.

          +

            +
          • Comparator instances are shared by multiple threads and comparator + methods are called without any special synchronization. Therefore, + comparators must be thread safe. In general no shared state should be + used and any caching of computed values must be done with proper + synchronization.
          • + +
          • Because records are stored in the order determined by the + Comparator, the Comparator's behavior must not change over time and + therefore should not be dependent on any state that may change over + time. In addition, although it is possible to change the comparator + for an existing database, care must be taken that the new comparator + provides compatible results with the previous comparator, or database + corruption will occur.
          • + +
          • JE uses comparators internally in a wide variety of circumstances, + so custom comparators must be sure to return valid values for any two + arbitrary keys. The user must not make any assumptions about the + range of key values that might be compared. For example, it's possible + for the comparator may be used against previously deleted values.
          • +
          + +

          A special type of comparator is a partial comparator, which + allows for the keys of a database to be updated, but only if the updates + do not change the relative order of the keys. For example, if a database + uses strings as keys and a case-insensitive comparator, it is possible + to change the case of characters in the keys, as this will not change the + ordering of the keys. Another example is when the keys contain multiple + fields but uniquely identify each record with a single field. The + partial comparator could then compare only the single identifying field, + allowing the rest of the fields to be updated. A query + (Cursor.getSearchKey, for example) could + then be performed by passing a partial key that contains only the + identifying field. + +

          WARNING: To allow for key updates in situations + like those described above, all partial comparators must implement the + PartialComparator tag interface. See "Upgrading from JE 5.0 + or earlier" in the change log and the PartialComparator javadoc + for more information.

          + + Another special type of comparator is a binary equality + comparator, which considers two keys to be equal if and only if they + have the same length and they are equal byte-per-byte. All binary + equality comparators must implement the BinaryEqualityComparator + interface. The significance of binary equality comparators is that they + make possible certain internal optimizations, like the "blind puts" + optimization, described in + BinaryEqualityComparator +

          + The comparator for an existing database will not be overridden unless + setOverrideBtreeComparator() is set to true.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getBtreeComparator

          +
          public java.util.Comparator<byte[]> getBtreeComparator()
          +
          Returns the Comparator used for key comparison on this database.
          +
        • +
        + + + +
          +
        • +

          getBtreeComparatorByClassName

          +
          public boolean getBtreeComparatorByClassName()
          +
          Returns true if the btree comparator is set by class name, not by + serializable Comparator object
          +
          +
          Returns:
          +
          true if the comparator is set by class name, not by serializable + Comparator object.
          +
          +
        • +
        + + + +
          +
        • +

          setOverrideBtreeComparator

          +
          public DatabaseConfig setOverrideBtreeComparator(boolean override)
          +
          Sets to true if the database exists and the btree comparator specified + in this configuration object should override the current comparator.
          +
          +
          Parameters:
          +
          override - Set to true to override the existing comparator.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getOverrideBtreeComparator

          +
          public boolean getOverrideBtreeComparator()
          +
          Returns the override setting for the btree comparator.
          +
        • +
        + + + +
          +
        • +

          setDuplicateComparator

          +
          public DatabaseConfig setDuplicateComparator(java.util.Comparator<byte[]> duplicateComparator)
          +
          By default, a byte by byte lexicographic comparison is used for + duplicate data items in a duplicate set. To customize the comparison, + supply a different Comparator. + +

          Note that there are two ways to set the comparator: by specifying the + class or by specifying a serializable object. This method is used to + specify a serializable object. The comparator class must implement + java.util.Comparator and must be serializable. JE will serialize the + Comparator and deserialize it when subsequently opening the + database.

          + +

          The Comparator.compare() method is passed the byte arrays that are + stored in the database. If you know how your data is organized in the + byte array, then you can write a comparison routine that directly + examines the contents of the arrays. Otherwise, you have to reconstruct + your original objects, and then perform the comparison. See the Getting Started Guide for examples.

          + +

          If a comparator needs to be initialized before it is used or needs + access to the environment's ClassLoader property, it may implement the + DatabaseComparator interface.

          + +

          WARNING: There are several special considerations that must + be taken into account when implementing a comparator.

          +

            +
          • Comparator instances are shared by multiple threads and comparator + methods are called without any special synchronization. Therefore, + comparators must be thread safe. In general no shared state should be + used and any caching of computed values must be done with proper + synchronization.
          • + +
          • Because records are stored in the order determined by the + Comparator, the Comparator's behavior must not change over time and + therefore should not be dependent on any state that may change over + time. In addition, although it is possible to change the comparator + for an existing database, care must be taken that the new comparator + provides compatible results with the previous comparator, or database + corruption will occur.
          • + +
          • JE uses comparators internally in a wide variety of circumstances, + so custom comparators must be sure to return valid values for any two + arbitrary keys. The user must not make any assumptions about the + range of key values that might be compared. For example, it's possible + for the comparator may be used against previously deleted values.
          • +
          + +

          A special type of comparator is a partial comparator, which + allows for the keys of a database to be updated, but only if the updates + do not change the relative order of the keys. For example, if a database + uses strings as keys and a case-insensitive comparator, it is possible to + change the case of characters in the keys, as this will not change the + ordering of the keys. Another example is when the keys contain multiple + fields but uniquely identify each record with a single field. The + partial comparator could then compare only the single identifying field, + allowing the rest of the fields to be updated. A query + (Cursor.getSearchKey, for example) could + then be performed by passing a partial key that contains only the + identifying field. + +

          When using a partial duplicates comparator, it is possible to update + the data for a duplicate record, as long as only the non-identifying + fields in the data are changed. See + Cursor.putCurrent for more information.

          + +

          WARNING: To allow for key updates in situations + like those described above, all partial comparators must implement the + PartialComparator tag interface. See "Upgrading from JE 5.0 + or earlier" in the change log and the PartialComparator javadoc + for more information.

          + +

          + Another special type of comparator is a binary equality + comparator, which considers two keys to be equal if and only if they + have the same length and they are equal byte-per-byte. All binary + equality comparators must implement the BinaryEqualityComparator + interface. The significance of binary equality comparators is that they + make possible certain internal optimizations, like the "blind puts" + optimization, described in + BinaryEqualityComparator +

          + The comparator for an existing database will not be overridden unless + setOverrideDuplicateComparator() is set to true.

          +
        • +
        + + + +
          +
        • +

          setDuplicateComparator

          +
          public DatabaseConfig setDuplicateComparator(java.lang.Class<? extends java.util.Comparator<byte[]>> duplicateComparatorClass)
          +
          By default, a byte by byte lexicographic comparison is used for + duplicate data items in a duplicate set. To customize the comparison, + supply a different Comparator. + +

          Note that there are two ways to set the comparator: by specifying the + class or by specifying a serializable object. This method is used to + specify a Comparator class. The comparator class must implement + java.util.Comparator and must have a public zero-parameter constructor. + JE will store the class name and instantiate the Comparator by class + name (using Class.forName and newInstance) + when subsequently opening the database. Because the Comparator is + instantiated using its default constructor, it should not be dependent + on other constructor parameters.

          + +

          The Comparator.compare() method is passed the byte arrays that are + stored in the database. If you know how your data is organized in the + byte array, then you can write a comparison routine that directly + examines the contents of the arrays. Otherwise, you have to reconstruct + your original objects, and then perform the comparison. See the Getting Started Guide for examples.

          + +

          If a comparator needs to be initialized before it is used or needs + access to the environment's ClassLoader property, it may implement the + DatabaseComparator interface.

          + +

          WARNING: There are several special considerations that must + be taken into account when implementing a comparator.

          +

            +
          • Comparator instances are shared by multiple threads and comparator + methods are called without any special synchronization. Therefore, + comparators must be thread safe. In general no shared state should be + used and any caching of computed values must be done with proper + synchronization.
          • + +
          • Because records are stored in the order determined by the + Comparator, the Comparator's behavior must not change over time and + therefore should not be dependent on any state that may change over + time. In addition, although it is possible to change the comparator + for an existing database, care must be taken that the new comparator + provides compatible results with the previous comparator, or database + corruption will occur.
          • + +
          • JE uses comparators internally in a wide variety of circumstances, + so custom comparators must be sure to return valid values for any two + arbitrary keys. The user must not make any assumptions about the + range of key values that might be compared. For example, it's possible + for the comparator may be used against previously deleted values.
          • +
          + +

          A special type of comparator is a partial comparator, which + allows for the keys of a database to be updated, but only if the updates + do not change the relative order of the keys. For example, if a database + uses strings as keys and a case-insensitive comparator, it is possible to + change the case of characters in the keys, as this will not change the + ordering of the keys. Another example is when the keys contain multiple + fields but uniquely identify each record with a single field. The + partial comparator could then compare only the single identifying field, + allowing the rest of the fields to be updated. A query + (Cursor.getSearchKey, for example) could + then be performed by passing a partial key that contains only the + identifying field. + +

          When using a partial duplicates comparator, it is possible to update + the data for a duplicate record, as long as only the non-identifying + fields in the data are changed. See + Cursor.putCurrent for more information.

          + +

          WARNING: To allow for key updates in situations + like those described above, all partial comparators must implement the + PartialComparator tag interface. See "Upgrading from JE 5.0 + or earlier" in the change log and the PartialComparator javadoc + for more information.

          +

          + Another special type of comparator is a binary equality + comparator, which considers two keys to be equal if and only if they + have the same length and they are equal byte-per-byte. All binary + equality comparators must implement the BinaryEqualityComparator + interface. The significance of binary equality comparators is that they + make possible certain internal optimizations, like the "blind puts" + optimization, described in + BinaryEqualityComparator +

          + The comparator for an existing database will not be overridden unless + setOverrideDuplicateComparator() is set to true.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getDuplicateComparator

          +
          public java.util.Comparator<byte[]> getDuplicateComparator()
          +
          Returns the Comparator used for duplicate record comparison on this + database.
          +
        • +
        + + + +
          +
        • +

          getDuplicateComparatorByClassName

          +
          public boolean getDuplicateComparatorByClassName()
          +
          Returns true if the duplicate comparator is set by class name, not by + serializable Comparator object.
          +
          +
          Returns:
          +
          true if the duplicate comparator is set by class name, not by + serializable Comparator object.
          +
          +
        • +
        + + + +
          +
        • +

          setOverrideDuplicateComparator

          +
          public DatabaseConfig setOverrideDuplicateComparator(boolean override)
          +
          Sets to true if the database exists and the duplicate comparator + specified in this configuration object should override the current + comparator.
          +
          +
          Parameters:
          +
          override - Set to true to override the existing comparator.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getOverrideDuplicateComparator

          +
          public boolean getOverrideDuplicateComparator()
          +
          Returns the override setting for the duplicate comparator.
          +
        • +
        + + + +
          +
        • +

          setTemporary

          +
          public DatabaseConfig setTemporary(boolean temporary)
          +
          Sets the temporary database option. + +

          Temporary databases operate internally in deferred-write mode to + provide reduced disk I/O and increased concurrency. But unlike an + ordinary deferred-write database, the information in a temporary + database is not durable or persistent. + +

          A temporary database is not flushed to disk when the database is + closed or when a checkpoint is performed, and the Database.sync method + may not be called. When all handles for a temporary database are + closed, the database is automatically removed. If a crash occurs before + closing a temporary database, the database will be automatically removed + when the environment is re-opened. + +

          Note that although temporary databases can page to disk if the cache + is not large enough to hold the databases, they are much more efficient + if the database remains in memory. See the JE FAQ on the Oracle + Technology Network site for information on how to estimate the cache + size needed by a given database. + +

          + See the Getting + Started Guide, Database chapter for a full description of temporary + databases. +

          +
          +
          Parameters:
          +
          temporary - if true, the database will be opened as a temporary + database.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getTemporary

          +
          public boolean getTemporary()
          +
          Returns the temporary database option.
          +
          +
          Returns:
          +
          boolean if true, the database is temporary.
          +
          +
        • +
        + + + +
          +
        • +

          setDeferredWrite

          +
          public DatabaseConfig setDeferredWrite(boolean deferredWrite)
          +
          Sets the deferred-write option. + +

          Deferred-write databases have reduced disk I/O and improved + concurrency. Disk I/O is reduced when data records are frequently + modified or deleted. The information in a deferred-write database is + not guaranteed to be durable or persistent until Database.close() + or Database.sync() is called, or a checkpoint is performed. Since + the usual write ahead logging system is relaxed in order to improve + performance, if the environment crashes before a Database.sync() + or Database.close(), none, all, or a unpredictable set of the + operations previously done may be persistent. + +

          After a deferred-write database is closed it may be re-opened as an + ordinary transactional or non-transactional database. For example, this + can be used to initially load a large data set in deferred-write mode + and then switch to transactional mode for subsequent operations. + +

          Note that although deferred-write databases can page to disk if the + cache is not large enough to hold the databases, they are much more + efficient if the database remains in memory. See the JE FAQ on the + Oracle Technology Network site for information on how to estimate the + cache size needed by a given database. + +

          + See the Getting + Started Guide, Database chapter for a full description + of deferred-write databases. + +

          +
          +
          Parameters:
          +
          deferredWrite - if true, the database will be opened as a + deferred-write database.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getDeferredWrite

          +
          public boolean getDeferredWrite()
          +
          Returns the deferred-write option.
          +
          +
          Returns:
          +
          boolean if true, deferred-write is enabled.
          +
          +
        • +
        + + + +
          +
        • +

          setUseExistingConfig

          +
          public DatabaseConfig setUseExistingConfig(boolean useExistingConfig)
          +
          Setting useExistingConfig to true allows a program to open a database + without knowing a prior what its configuration is. For example, if you + want to open a database without knowing whether it contains sorted + duplicates or not, you can set this property to true. In general, this + is used by the JE utilities, to avoid having to know the configuration + of a database. The databases should be opened readOnly when this + property is set to true.
          +
          +
          Parameters:
          +
          useExistingConfig - true if this Database should be opened using + the existing configuration.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getUseExistingConfig

          +
          public boolean getUseExistingConfig()
          +
          Return the value of the useExistingConfig property.
          +
          +
          Returns:
          +
          the value of the useExistingConfig property.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getCacheMode

          +
          public CacheMode getCacheMode()
          +
          Returns the default CacheMode used for operations performed on + this database, or null if the environment default is used.
          +
          +
          Returns:
          +
          the default CacheMode used for operations performed on + this database, or null if the environment default is used.
          +
          Since:
          +
          4.0.97
          +
          See Also:
          +
          setCacheMode(com.sleepycat.je.CacheMode)
          +
          +
        • +
        + + + +
          +
        • +

          setReplicated

          +
          public DatabaseConfig setReplicated(boolean replicated)
          +
          Configures a database to be replicated or non-replicated, in a + replicated Environment. By default this property is true, meaning that + by default a database is replicated in a replicated Environment. +

          + In a non-replicated Environment, this property is ignored. All + databases are non-replicated in a non-replicated Environment.

          +
          +
          See Also:
          +
          Non-replicated + Databases in a Replicated Environment
          +
          +
        • +
        + + + +
          +
        • +

          getReplicated

          +
          public boolean getReplicated()
          +
          Returns the replicated property for the database. +

          + This method returns true by default. However, in a non-replicated + Environment, this property is ignored. All databases are non-replicated + in a non-replicated Environment.

          +
          +
          See Also:
          +
          setReplicated(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          cloneConfig

          +
          public DatabaseConfig cloneConfig()
          +
          Deprecated. As of JE 4.0.13, replaced by clone().

          +
          Returns a copy of this configuration object.
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public DatabaseConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseEntry.html b/docs/java/com/sleepycat/je/DatabaseEntry.html new file mode 100644 index 0000000..b89d90b --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseEntry.html @@ -0,0 +1,908 @@ + + + + + +DatabaseEntry (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseEntry

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class DatabaseEntry
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Encodes database key and data items as a byte array. + +

      Storage and retrieval for the Database + and Cursor methods are based on key/data + pairs. Both key and data items are represented by DatabaseEntry objects. + Key and data byte arrays may refer to arrays of zero length up to arrays of + essentially unlimited length.

      + +

      The DatabaseEntry class provides simple access to an underlying object + whose elements can be examined or changed. DatabaseEntry objects can be + subclassed, providing a way to associate with it additional data or + references to other structures.

      + +

      Access to DatabaseEntry objects is not re-entrant. In particular, if + multiple threads simultaneously access the same DatabaseEntry object using + Database or Cursor methods, the results are undefined.

      + +

      DatabaseEntry objects may be used in conjunction with the object mapping + support provided in the com.sleepycat.bind package.

      + +

      Input and Output Parameters

      + +

      DatabaseEntry objects are used for both input values (for example, when + writing to a database or specifying a search parameter) and output values + (for example, when reading from a database). For every CRUD method + (get, put, etc), each of the method's DatabaseEntry + parameters (key, data, etc) may be input or output + parameters, and this is specified by the method's documentation.

      + +

      Input Parameters

      + +

      An input parameter is required by the JE method. The parameter may not be + null, and the caller is also responsible for initializing the data of the + DatabaseEntry to a non-null byte array.

      + +

      Input parameters normally may not be partial. However, this is allowed under certain circumstances, namely + the Cursor.putCurrent(com.sleepycat.je.DatabaseEntry) method allows specifying a partial data + parameter in order to update only part of the record's data value. Input + parameters are NOT allowed to be partial unless this is explicitly stated in + the method documentation.

      + +

      Although an input parameter is always used for input, in some cases it + may be also used for output. For example, the Cursor.getSearchKeyRange(com.sleepycat.je.DatabaseEntry, com.sleepycat.je.DatabaseEntry, com.sleepycat.je.LockMode) method is passed a key parameter that is used as + input, but since a record with a different key (greater or equal to the key + given) may be found, the key parameter is also used to return the key + that was found. Such parameters are documented as "input/output" + parameters.

      + +

      Another example is when a custom key comparator is used and a key + parameter is passed to a search method. The input parameter may match a + record's key even if the bytes are not equal, and the key of the record + found will be returned via the parameter. The same thing is true of data (or + primary key) parameters when a custom duplicate comparator is used. Because + of this, all input parameters of "get" methods can potentially be used for + output, however, they are not explicitly documented to be input/output + parameters.

      + +

      Output Parameters

      + +

      An output parameter is not required by the JE method. It is used to + optionally return a value to the caller. Null may be passed for the + parameter if no returned value is needed. Passing null is a common way to + optimize read operations when only the record's key, and not the record's + data, is required. By passing null for the data parameter, a read from + disk can be avoided when the data is not already cached. In addition, all + output parameters may be partial to + allow only returning a part of the data byte array. See Using Null and Partial DatabaseEntry + Parameters for more information.

      + +

      For output parameters, the byte array specified by the caller will not be + used and may be null. The JE method will will always allocate a new byte + array. Therefore, after calling a method that returns output parameters, + the application can safely keep a reference to the byte array returned by + getData() without danger that the array will be overwritten in a + subsequent call.

      + +

      Historical note: Prior to JE 7.0, null could not be passed for output + parameters. Instead, DatabaseEntry.setPartial(0, 0, true) was called + for a data parameter to avoid reading the record's data. Now, null can be + passed instead.

      + +

      Offset and Size Properties

      + +

      By default the Offset property is zero and the Size property is the + length of the byte array. However, to allow for optimizations involving the + partial use of a byte array, the Offset and Size may be set to non-default + values.

      + +

      For output parameters, the Size will always be set to the length of the + byte array and the Offset will always be set to zero.

      + +

      However, for input parameters the Offset and Size are set to non-default + values by the built-in tuple and serial bindings. For example, with a tuple + or serial binding the byte array is grown dynamically as data is output, and + the Size is set to the number of bytes actually used. For a serial binding, + the Offset is set to a non-zero value in order to implement an optimization + having to do with the serialization stream header.

      + +

      WARNING: In callbacks that are passed DatabaseEntry parameters, the + application should always honor the Size and Offset properties, rather than + assuming they have default values.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        DatabaseEntry() +
        Constructs a DatabaseEntry with null data.
        +
        DatabaseEntry(byte[] data) +
        Constructs a DatabaseEntry with a given byte array.
        +
        DatabaseEntry(byte[] data, + int offset, + int size) +
        Constructs a DatabaseEntry with a given byte array, offset and size.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object o) +
        Compares the data of two entries for byte-by-byte equality.
        +
        byte[]getData() +
        Returns the byte array.
        +
        intgetOffset() +
        Returns the byte offset into the data array.
        +
        booleangetPartial() +
        Returns whether this DatabaseEntry is configured to read or write + partial records.
        +
        intgetPartialLength() +
        Returns the byte length of the partial record being read or written by + the application, in bytes.
        +
        intgetPartialOffset() +
        Returns the offset of the partial record being read or written by the + application, in bytes.
        +
        intgetSize() +
        Returns the byte size of the data array.
        +
        inthashCode() +
        Returns a hash code based on the data value.
        +
        voidsetData(byte[] data) +
        Sets the byte array.
        +
        voidsetData(byte[] data, + int offset, + int size) +
        Sets the byte array, offset and size.
        +
        voidsetOffset(int offset) +
        Sets the byte offset into the data array.
        +
        voidsetPartial(boolean partial) +
        Configures this DatabaseEntry to read or write partial records.
        +
        voidsetPartial(int doff, + int dlen, + boolean partial) +
        Configures this DatabaseEntry to read or write partial records.
        +
        voidsetPartialLength(int dlen) +
        Sets the byte length of the partial record being read or written by the + application, in bytes.
        +
        voidsetPartialOffset(int doff) +
        Sets the offset of the partial record being read or written by the + application, in bytes.
        +
        voidsetSize(int size) +
        Sets the byte size of the data array.
        +
        java.lang.StringtoString() +
        Returns all the attributes of the database entry in text form, including + the underlying data.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DatabaseEntry

          +
          public DatabaseEntry()
          +
          Constructs a DatabaseEntry with null data. The offset and size are set + to zero.
          +
        • +
        + + + +
          +
        • +

          DatabaseEntry

          +
          public DatabaseEntry(byte[] data)
          +
          Constructs a DatabaseEntry with a given byte array. The offset is set + to zero; the size is set to the length of the array, or to zero if null + is passed.
          +
          +
          Parameters:
          +
          data - Byte array wrapped by the DatabaseEntry.
          +
          +
        • +
        + + + +
          +
        • +

          DatabaseEntry

          +
          public DatabaseEntry(byte[] data,
          +                     int offset,
          +                     int size)
          +
          Constructs a DatabaseEntry with a given byte array, offset and size.
          +
          +
          Parameters:
          +
          data - Byte array wrapped by the DatabaseEntry.
          +
          offset - Offset in the first byte in the byte array to be included.
          +
          size - Number of bytes in the byte array to be included.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns all the attributes of the database entry in text form, including + the underlying data. The maximum number of bytes that will be formatted + is taken from the static variable DatabaseEntry.MAX_DUMP_BYTES, which + defaults to 100. MAX_DUMP_BYTES may be changed by an application if it + wishes to cause more bytes to be formatted.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          getData

          +
          public byte[] getData()
          +
          Returns the byte array. + +

          For a DatabaseEntry that is used as an output parameter, the byte + array will always be a newly allocated array. The byte array specified + by the caller will not be used and may be null.

          +
          +
          Returns:
          +
          The byte array.
          +
          +
        • +
        + + + +
          +
        • +

          setData

          +
          public void setData(byte[] data)
          +
          Sets the byte array. The offset is set to zero; the size is set to the + length of the array, or to zero if null is passed.
          +
          +
          Parameters:
          +
          data - Byte array wrapped by the DatabaseEntry.
          +
          +
        • +
        + + + +
          +
        • +

          setData

          +
          public void setData(byte[] data,
          +                    int offset,
          +                    int size)
          +
          Sets the byte array, offset and size.
          +
          +
          Parameters:
          +
          data - Byte array wrapped by the DatabaseEntry.
          +
          offset - Offset in the first byte in the byte array to be included.
          +
          size - Number of bytes in the byte array to be included.
          +
          +
        • +
        + + + +
          +
        • +

          setPartial

          +
          public void setPartial(int doff,
          +                       int dlen,
          +                       boolean partial)
          +
          Configures this DatabaseEntry to read or write partial records. + +

          By default the specified data (byte array, offset and size) + corresponds to the full stored key or data item. Optionally, the + Partial property can be set to true, and the PartialOffset and + PartialLength properties are used to specify the portion of the key or + data item to be read or written.

          + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method, nor will they every be + set by bindings. Therefore, the application can assume that the Partial + properties are not set, unless the application itself sets them + explicitly.

          + +

          All {partial. However, this is allowed under + certain circumstances, namely the Cursor.putCurrent(com.sleepycat.je.DatabaseEntry) method + allows specifying a partial data parameter in order to update only part + of the record's data value. Input parameters are NOT allowed to be + partial unless this is explicitly stated in the method + documentation.

          + +

          For storing an item using a partial parameter, length bytes specified + by dlen, starting at the offset set by doff bytes from + the beginning of the specified key's data item are replaced by the data + specified by the DatabaseEntry. If the partial length is smaller than + the data, the record will grow; if the partial length is larger than the + data, the record will shrink. If the partial offset is greater than the + length of the data, the record will be extended using zero bytes as + necessary, and the store will succeed.

          +
          +
          Parameters:
          +
          doff - The offset of the partial record being read or written by + the application, in bytes.
          +
          dlen - The byte length of the partial record being read or written + by the application, in bytes.
          +
          partial - Whether this DatabaseEntry is configured to read or write + partial records.
          +
          +
        • +
        + + + +
          +
        • +

          getPartialLength

          +
          public int getPartialLength()
          +
          Returns the byte length of the partial record being read or written by + the application, in bytes. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Returns:
          +
          The byte length of the partial record being read or written by + the application, in bytes.
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setPartialLength

          +
          public void setPartialLength(int dlen)
          +
          Sets the byte length of the partial record being read or written by the + application, in bytes. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Parameters:
          +
          dlen - The byte length of the partial record being read or written + by the
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          getPartialOffset

          +
          public int getPartialOffset()
          +
          Returns the offset of the partial record being read or written by the + application, in bytes. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Returns:
          +
          The offset of the partial record being read or written by the + application, in bytes.
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setPartialOffset

          +
          public void setPartialOffset(int doff)
          +
          Sets the offset of the partial record being read or written by the + application, in bytes. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Parameters:
          +
          doff - The offset of the partial record being read or written by + the application, in bytes.
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          getPartial

          +
          public boolean getPartial()
          +
          Returns whether this DatabaseEntry is configured to read or write + partial records. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Returns:
          +
          Whether this DatabaseEntry is configured to read or write + partial records.
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setPartial

          +
          public void setPartial(boolean partial)
          +
          Configures this DatabaseEntry to read or write partial records. + +

          Note that the Partial properties are set only by the caller. They + will never be set by a Database or Cursor method.

          +
          +
          Parameters:
          +
          partial - Whether this DatabaseEntry is configured to read or write + partial records.
          +
          See Also:
          +
          setPartial(int,int,boolean)
          +
          +
        • +
        + + + +
          +
        • +

          getOffset

          +
          public int getOffset()
          +
          Returns the byte offset into the data array. + +

          For a DatabaseEntry that is used as an output parameter, the offset + will always be zero.

          +
          +
          Returns:
          +
          Offset in the first byte in the byte array to be included.
          +
          +
        • +
        + + + +
          +
        • +

          setOffset

          +
          public void setOffset(int offset)
          +
          Sets the byte offset into the data array. + + ArrayIndexOutOfBoundsException if the data, offset, and size parameters + refer to elements of the data array which do not exist. Note that this + exception will not be thrown by setSize() or setOffset(), but will be + thrown by varous JE methods if "this" is inconsistent and is used as an + input parameter to those methods. It is the caller's responsibility to + ensure that size, offset, and data.length are consistent.
          +
          +
          Parameters:
          +
          offset - Offset in the first byte in the byte array to be included.
          +
          +
        • +
        + + + +
          +
        • +

          getSize

          +
          public int getSize()
          +
          Returns the byte size of the data array. + +

          For a DatabaseEntry that is used as an output parameter, the size + will always be the length of the data array.

          +
          +
          Returns:
          +
          Number of bytes in the byte array to be included.
          +
          +
        • +
        + + + +
          +
        • +

          setSize

          +
          public void setSize(int size)
          +
          Sets the byte size of the data array. + + ArrayIndexOutOfBoundsException if the data, offset, and size parameters + refer to elements of the data array which do not exist. Note that this + exception will not be thrown by setSize() or setOffset(), but will be + thrown by varous JE methods if "this" is inconsistent and is used as an + input parameter to those methods. It is the caller's responsibility to + ensure that size, offset, and data.length are consistent.
          +
          +
          Parameters:
          +
          size - Number of bytes in the byte array to be included.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object o)
          +
          Compares the data of two entries for byte-by-byte equality. + +

          In either entry, if the offset is non-zero or the size is not equal + to the data array length, then only the data bounded by these values is + compared. The data array length and offset need not be the same in both + entries for them to be considered equal.

          + +

          If the data array is null in one entry, then to be considered equal + both entries must have a null data array.

          + +

          If the partial property is set in either entry, then to be considered + equal both entries must have the same partial properties: partial, + partialOffset and partialLength.

          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          Returns a hash code based on the data value.
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseException.html b/docs/java/com/sleepycat/je/DatabaseException.html new file mode 100644 index 0000000..16f559f --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseException.html @@ -0,0 +1,309 @@ + + + + + +DatabaseException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      OperationFailureException, RunRecoveryException
      +
      +
      +
      +
      public abstract class DatabaseException
      +extends java.lang.RuntimeException
      +
      The root of all BDB JE-defined exceptions. + +

      Exceptions thrown by BDB JE fall into three categories.

      +
        +
      1. When a method is used incorrectly as the result of an application + programming error, a standard Java runtime exception is thrown: IllegalArgumentException, IllegalStateException or UnsupportedOperationException. These exceptions have the standard meaning + defined by their javadoc. Note that JE throws IllegalArgumentException rather than NullPointerException when a + required parameter is null. +
      2. +
      3. When an operation failure occurs, OperationFailureException or + one of its subclasses is thrown. See OperationFailureException for + details. +
      4. +
      5. When an Environment failure occurs, EnvironmentFailureException or one of its subclasses is thrown. See EnvironmentFailureException for details. +
      6. +
      + +

      OperationFailureException and EnvironmentFailureException + are the only two direct subclasses of DatabaseException.

      + +

      (Actually the above statement is not strictly correct. EnvironmentFailureException extends RunRecoveryException which + extends DatabaseException. RunRecoveryException exists for + backward compatibility and has been deprecated. EnvironmentFailureException should be used instead.)

      + +

      Note that in some cases, certain methods return status values without + issuing an exception. This occurs in situations that are not normally + considered an error, but when some informational status is returned. For + example, Database.get returns OperationStatus.NOTFOUND when a + requested key does not appear in the database.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetMessage() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getMessage

          +
          public java.lang.String getMessage()
          +
          +
          Overrides:
          +
          getMessage in class java.lang.Throwable
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseExistsException.html b/docs/java/com/sleepycat/je/DatabaseExistsException.html new file mode 100644 index 0000000..f5f8438 --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseExistsException.html @@ -0,0 +1,258 @@ + + + + + +DatabaseExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseExistsException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseNotFoundException.html b/docs/java/com/sleepycat/je/DatabaseNotFoundException.html new file mode 100644 index 0000000..2dcc409 --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseNotFoundException.html @@ -0,0 +1,254 @@ + + + + + +DatabaseNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseNotFoundException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class DatabaseNotFoundException
      +extends OperationFailureException
      +
      Thrown when an operation requires a database and that database does not + exist. + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DatabaseStats.html b/docs/java/com/sleepycat/je/DatabaseStats.html new file mode 100644 index 0000000..daf3185 --- /dev/null +++ b/docs/java/com/sleepycat/je/DatabaseStats.html @@ -0,0 +1,258 @@ + + + + + +DatabaseStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DatabaseStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      BtreeStats
      +
      +
      +
      +
      public abstract class DatabaseStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Statistics for a single database.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected DatabaseStats() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DatabaseStats

          +
          protected DatabaseStats()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DeadlockException.html b/docs/java/com/sleepycat/je/DeadlockException.html new file mode 100644 index 0000000..b429965 --- /dev/null +++ b/docs/java/com/sleepycat/je/DeadlockException.html @@ -0,0 +1,291 @@ + + + + + +DeadlockException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DeadlockException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      LockNotGrantedException
      +
      +
      +
      +
      public class DeadlockException
      +extends LockConflictException
      +
      Thrown when a deadlock is detected. When this exception is thrown, JE + detected a deadlock and chose one transaction (or non-transactional + operation), the "victim", to invalidate in order to break the deadlock. + Note that this is different than a lock + timeout or TransactionTimeoutException, which occur for other + reasons. + +

      For more information on deadlock detection, see + EnvironmentConfig.LOCK_DEADLOCK_DETECT. As described there, a + DeadlockException is normally thrown when a random victim is + selected; in this case the exception message will contain the string: + This locker was chosen randomly as the victim. If the deadlock + exception is thrown in a non-victim thread, due to live lock or an + unresponsive thread, the message will contain the string: + Unable to break deadlock using random victim selection within the + timeout interval.

      + +

      TODO: describe how to debug using info included with the exception.

      + +

      Normally, applications should catch the base class LockConflictException rather than catching one of its subclasses. All lock + conflicts are typically handled in the same way, which is normally to abort + and retry the transaction. See LockConflictException for more + information.

      + +

      The Transaction handle is invalidated as a result of this + exception.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DeleteConstraintException.html b/docs/java/com/sleepycat/je/DeleteConstraintException.html new file mode 100644 index 0000000..ba659d4 --- /dev/null +++ b/docs/java/com/sleepycat/je/DeleteConstraintException.html @@ -0,0 +1,286 @@ + + + + + +DeleteConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DeleteConstraintException

    +
    +
    + +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DiskLimitException.html b/docs/java/com/sleepycat/je/DiskLimitException.html new file mode 100644 index 0000000..2467b1e --- /dev/null +++ b/docs/java/com/sleepycat/je/DiskLimitException.html @@ -0,0 +1,257 @@ + + + + + +DiskLimitException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DiskLimitException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DiskOrderedCursor.html b/docs/java/com/sleepycat/je/DiskOrderedCursor.html new file mode 100644 index 0000000..ea0d49f --- /dev/null +++ b/docs/java/com/sleepycat/je/DiskOrderedCursor.html @@ -0,0 +1,552 @@ + + + + + +DiskOrderedCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DiskOrderedCursor

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ForwardCursor, java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class DiskOrderedCursor
      +extends java.lang.Object
      +implements ForwardCursor
      +
      DiskOrderedCursor returns records in unsorted order in exchange for + generally faster retrieval times. Instead of key order, an approximation of + disk order is used, which results in less I/O. This can be useful when the + application needs to scan all records in one or more databases, and will be + applying filtering logic which does not need key ordered retrieval. + A DiskOrderedCursor is created using the Database.openCursor(DiskOrderedCursorConfig) method or the Environment.openDiskOrderedCursor(Database[], DiskOrderedCursorConfig) + method. +

      + WARNING: After opening a DiskOrderedCursor, deletion of log files + by the JE log cleaner will be disabled until close() is called. To + prevent unbounded growth of disk usage, be sure to call close() to + re-enable log file deletion. +

      + Optional configurations: the following options are available to + tune the DiskOrderedCursor. +

      + The DiskOrderedCursor creates a background producer thread which prefetches + some target records and inserts them in a queue for use by the cursor. The + parameter EnvironmentConfig.DOS_PRODUCER_QUEUE_TIMEOUT applies to + this background thread, and controls the timeout which governs the blocking + queue. +

      + See DiskOrderedCursorConfig for additional options. +

      +

      Consistency Guarantees

      +

      + The consistency guarantees provided by a DiskOrderedCursor are, at best, the + same as those provided by READ_UNCOMMITTED (see LockMode). With + READ_UNCOMMITTED, changes made by all transactions, including uncommitted + transactions, may be returned by the scan. Also, a record returned by the + scan is not locked, and may be modified or deleted by the application after + it is returned, including modification or deletion of the record at the + cursor position. +

      + In other words, the records returned by the scan correspond to the state + of the database (as if READ_UNCOMMITTED were used) at the beginning of the + scan plus some, but not all, changes made by the application after the start + of the scan. The user should not rely on the scan returning any changes + made after the start of the scan. For example, if the record referred to by + the DiskOrderedCursor is deleted after the DiskOrderedCursor is positioned + at that record, getCurrent() will still return the key and value of that + record and OperationStatus.SUCCESS. + + If a transactionally correct data set is required (as defined by + READ_COMMITTED), the application must ensure that all transactions that + write to the database are committed before the beginning of the scan. + During the scan, no records in the database of the scan may be + inserted, deleted, or modified. While this is possible, it is not the + expected use case for a DiskOrderedCursor. +

      +

      Performance Considerations

      +

      + The internal algorithm used to approximate disk ordered reads is as follows. + For simplicity, the algorithm description assumes that a single database is + being scanned, but the algorithm is almost the same when multiple databases + are involved. + An internal producer thread is used to scan the database. This thread is + created and started when the DiskOrderedCursor is created, and is + destroyed by close(). Scanning consists of two + phases. In phase I the in-cache Btree of the scanned database is traversed + in key order. The LSNs (physical record addresses) of the data to be + fetched are accumulated in a memory buffer. Btree latches are held during + the traversal, but only for short durations. In phase II the accumulated + LSNs are sorted into disk order, fetched one at a time in that order, and + the fetched data is added to a blocking queue. The getNext method + in this class removes the next entry from the queue. This approach allows + concurrent access to the Database during both phases of the scan, including + access by the application's consumer thread (the thread calling getNext). +

      + Phase I does not always process the entire Btree. During phase I if the + accumulation of LSNs causes the internal memory limit or + LSN batch size to be + exceeded, phase I is ended and phase II begins. In this case, after phase + II finishes, phase I resumes where it left off in the Btree traversal. + Phase I and II are repeated until the entire database is scanned. +

      + By default, the internal memory limit and LSN batch size are unbounded (see + DiskOrderedCursorConfig). For a database with a large number of + records, this could cause an OutOfMemoryError. Therefore, it is + strongly recommended that either the internal memory limit or LSN batch size + is configured to limit the use of memory during the scan. On the other + hand, the efficiency of the scan is proportional to the amount of memory + used. If enough memory is available, the ideal case would be that the + database is scanned in in a single iteration of phase I and II. The more + iterations, the more random IO will occur. +

      + Another factor is the queue + size. During the phase I Btree traversal, data that is resident in the JE + cache will be added to the queue immediately, rather than waiting until + phase II and fetching it, but only if the queue is not full. Therefore, + increasing the size of the queue can avoid fetching data that is resident in + the JE cache. Also, increasing the queue size can improve parallelism of + the work done by the producer and consumer threads. +

      + Also note that a keys-only scan + is much more efficient than the default keys-and-data scan. With a + keys-only scan, only the BINs (bottom internal nodes) of the Btree need to + be fetched; the LNs (leaf nodes) do not. This is also true of databases + configured for duplicates, even + for a keys-and-data scan, since internally the key and data are both + contained in the BIN.

      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DiskOrderedCursorConfig.html b/docs/java/com/sleepycat/je/DiskOrderedCursorConfig.html new file mode 100644 index 0000000..d84661b --- /dev/null +++ b/docs/java/com/sleepycat/je/DiskOrderedCursorConfig.html @@ -0,0 +1,698 @@ + + + + + +DiskOrderedCursorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DiskOrderedCursorConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class DiskOrderedCursorConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a DiskOrderedCursor.
      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static DiskOrderedCursorConfigDEFAULT +
        Default configuration used if null is passed to methods that create a + cursor.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DiskOrderedCursorConfig() +
        An instance created using the default constructor is initialized with + the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        DiskOrderedCursorConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetBINsOnly() +
        Returns true if the DiskOrderedCursor is configured to scan BINs only, + returning all record keys and only those record data that are embedded + in the BINs.
        +
        longgetInternalMemoryLimit() +
        Returns the maximum amount of JE Cache Memory that the + DiskOrderedScan can use at one time.
        +
        booleangetKeysOnly() +
        Returns true if the DiskOrderedCursor is configured to return only + keys.
        +
        longgetLSNBatchSize() +
        Returns the maximum number of LSNs to be sorted that this + DiskOrderedCursor is configured for.
        +
        longgetMaxSeedMillisecs() +
        Deprecated.  +
        this method returns zero and will be removed in a future + release.
        +
        +
        longgetMaxSeedNodes() +
        Deprecated.  +
        this method returns zero and will be removed in a future + release.
        +
        +
        intgetQueueSize() +
        Returns the maximum number of entries in the queue before the + DiskOrderedCursor producer thread blocks.
        +
        DiskOrderedCursorConfigsetBINsOnly(boolean binsOnly) +
        Specify whether the DiskOrderedCursor should scan the BINs only.
        +
        DiskOrderedCursorConfigsetInternalMemoryLimit(long internalMemoryLimit) +
        Set the maximum amount of JE Cache Memory that the DiskOrderedScan + can use at one time.
        +
        DiskOrderedCursorConfigsetKeysOnly(boolean keysOnly) +
        Specify whether the DiskOrderedCursor should return only the key or key + + data.
        +
        DiskOrderedCursorConfigsetLSNBatchSize(long lsnBatchSize) +
        Set the maximum number of LSNs to gather and sort at any one time.
        +
        DiskOrderedCursorConfigsetMaxSeedMillisecs(long maxSeedMillisecs) +
        Deprecated.  +
        this method has no effect and will be removed in a future + release.
        +
        +
        DiskOrderedCursorConfigsetMaxSeedNodes(long maxSeedNodes) +
        Deprecated.  +
        this method has no effect and will be removed in a future + release.
        +
        +
        DiskOrderedCursorConfigsetQueueSize(int queueSize) +
        Set the queue size for entries being passed between the + DiskOrderedCursor producer thread and the application's consumer + thread.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final DiskOrderedCursorConfig DEFAULT
          +
          Default configuration used if null is passed to methods that create a + cursor.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DiskOrderedCursorConfig

          +
          public DiskOrderedCursorConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setKeysOnly

          +
          public DiskOrderedCursorConfig setKeysOnly(boolean keysOnly)
          +
          Specify whether the DiskOrderedCursor should return only the key or key + + data. The default value is false (key + data). If keyOnly is true, + the performance of the disk ordered scan will be better, because the + Cursor only descends to the BIN level.
          +
          +
          Parameters:
          +
          keysOnly - If true, return only keys from this cursor.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getKeysOnly

          +
          public boolean getKeysOnly()
          +
          Returns true if the DiskOrderedCursor is configured to return only + keys. Returns false if it is configured to return keys + data.
          +
          +
          Returns:
          +
          true if the DiskOrderedCursor is configured to return keys only.
          +
          +
        • +
        + + + +
          +
        • +

          setBINsOnly

          +
          public DiskOrderedCursorConfig setBINsOnly(boolean binsOnly)
          +
          Specify whether the DiskOrderedCursor should scan the BINs only. If + true, the performance of the disk ordered scan will be better, because + LNs are not read from disk. However, in this case, the data portion + of a record will be returned only if it is embedded in the BIN; + otherwise only the key will be returned.
          +
          +
          Parameters:
          +
          binsOnly - If true, return keys and, if available, the associated + embedded data.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getBINsOnly

          +
          public boolean getBINsOnly()
          +
          Returns true if the DiskOrderedCursor is configured to scan BINs only, + returning all record keys and only those record data that are embedded + in the BINs.
          +
          +
          Returns:
          +
          true if the DiskOrderedCursor is configured to scan BINs only.
          +
          +
        • +
        + + + +
          +
        • +

          setLSNBatchSize

          +
          public DiskOrderedCursorConfig setLSNBatchSize(long lsnBatchSize)
          +
          Set the maximum number of LSNs to gather and sort at any one time. The + default is an unlimited number of LSNs. Setting this lower causes the + DiskOrderedScan to use less memory, but it sorts and processes LSNs more + frequently thereby causing slower performance. Setting this higher will + in general improve performance at the expense of memory. Each LSN uses + 16 bytes of memory.
          +
          +
          Parameters:
          +
          lsnBatchSize - the maximum number of LSNs to accumulate and sort + per batch.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getLSNBatchSize

          +
          public long getLSNBatchSize()
          +
          Returns the maximum number of LSNs to be sorted that this + DiskOrderedCursor is configured for.
          +
          +
          Returns:
          +
          the maximum number of LSNs to be sorted that this + DiskOrderedCursor is configured for.
          +
          +
        • +
        + + + +
          +
        • +

          setInternalMemoryLimit

          +
          public DiskOrderedCursorConfig setInternalMemoryLimit(long internalMemoryLimit)
          +
          Set the maximum amount of JE Cache Memory that the DiskOrderedScan + can use at one time. The default is an unlimited amount of memory. + Setting this lower causes the DiskOrderedScan to use less memory, but it + sorts and processes LSNs more frequently thereby generally causing slower + performance. Setting this higher will in general improve performance at + the expense of JE cache memory.
          +
          +
          Parameters:
          +
          internalMemoryLimit - the maximum number of non JE Cache bytes to + use.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          Cache + Statistics: Unexpected Sizes
          +
          +
        • +
        + + + +
          +
        • +

          getInternalMemoryLimit

          +
          public long getInternalMemoryLimit()
          +
          Returns the maximum amount of JE Cache Memory that the + DiskOrderedScan can use at one time.
          +
          +
          Returns:
          +
          the maximum amount of non JE Cache Memory that preload can use at + one time.
          +
          +
        • +
        + + + +
          +
        • +

          setQueueSize

          +
          public DiskOrderedCursorConfig setQueueSize(int queueSize)
          +
          Set the queue size for entries being passed between the + DiskOrderedCursor producer thread and the application's consumer + thread. If the queue size reaches this number of entries, the producer + thread will block until the application thread removes one or more + entries (by calling ForwardCursor.getNext(). The default is 1000.
          +
          +
          Parameters:
          +
          queueSize - the maximum number of entries the queue can hold before + the producer thread blocks.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getQueueSize

          +
          public int getQueueSize()
          +
          Returns the maximum number of entries in the queue before the + DiskOrderedCursor producer thread blocks.
          +
          +
          Returns:
          +
          the maximum number of entries in the queue before the + DiskOrderedCursor producer thread blocks.
          +
          +
        • +
        + + + +
          +
        • +

          setMaxSeedMillisecs

          +
          public DiskOrderedCursorConfig setMaxSeedMillisecs(long maxSeedMillisecs)
          +
          Deprecated. this method has no effect and will be removed in a future + release.
          +
        • +
        + + + +
          +
        • +

          getMaxSeedMillisecs

          +
          public long getMaxSeedMillisecs()
          +
          Deprecated. this method returns zero and will be removed in a future + release.
          +
        • +
        + + + +
          +
        • +

          setMaxSeedNodes

          +
          public DiskOrderedCursorConfig setMaxSeedNodes(long maxSeedNodes)
          +
          Deprecated. this method has no effect and will be removed in a future + release.
          +
        • +
        + + + +
          +
        • +

          getMaxSeedNodes

          +
          public long getMaxSeedNodes()
          +
          Deprecated. this method returns zero and will be removed in a future + release.
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public DiskOrderedCursorConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DiskOrderedCursorProducerException.html b/docs/java/com/sleepycat/je/DiskOrderedCursorProducerException.html new file mode 100644 index 0000000..fe6fa20 --- /dev/null +++ b/docs/java/com/sleepycat/je/DiskOrderedCursorProducerException.html @@ -0,0 +1,254 @@ + + + + + +DiskOrderedCursorProducerException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DiskOrderedCursorProducerException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/DuplicateDataException.html b/docs/java/com/sleepycat/je/DuplicateDataException.html new file mode 100644 index 0000000..20fb3e4 --- /dev/null +++ b/docs/java/com/sleepycat/je/DuplicateDataException.html @@ -0,0 +1,262 @@ + + + + + +DuplicateDataException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class DuplicateDataException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class DuplicateDataException
      +extends OperationFailureException
      +
      Thrown by Cursor.putCurrent if the old and new + data are not equal according to the configured duplicate comparator or + default comparator. + +

      If the old and new data are unequal according to the comparator, this + would change the sort order of the record, which would change the cursor + position, and this is not allowed. To change the sort order of a record, + delete it and then re-insert it.

      + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Durability.ReplicaAckPolicy.html b/docs/java/com/sleepycat/je/Durability.ReplicaAckPolicy.html new file mode 100644 index 0000000..f44f4eb --- /dev/null +++ b/docs/java/com/sleepycat/je/Durability.ReplicaAckPolicy.html @@ -0,0 +1,422 @@ + + + + + +Durability.ReplicaAckPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum Durability.ReplicaAckPolicy

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Durability.ReplicaAckPolicy>
      +
      +
      +
      Enclosing class:
      +
      Durability
      +
      +
      +
      +
      public static enum Durability.ReplicaAckPolicy
      +extends java.lang.Enum<Durability.ReplicaAckPolicy>
      +
      A replicated environment makes it possible to increase an application's + transaction commit guarantees by committing changes to its replicas on + the network. ReplicaAckPolicy defines the policy for how such network + commits are handled. +

      + The choice of a ReplicaAckPolicy must be consistent across all the + replicas in a replication group, to ensure that the policy is + consistently enforced in the event of an election. + +

      Note that SECONDARY nodes are not included in the set of replicas + that must acknowledge transaction commits.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ALL +
        All ELECTABLE replicas must acknowledge that they have committed the + transaction.
        +
        NONE +
        No transaction commit acknowledgments are required and the master + will never wait for replica acknowledgments.
        +
        SIMPLE_MAJORITY +
        A simple majority of ELECTABLE replicas must acknowledge that they + have committed the transaction.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intminAckNodes(int groupSize) +
        Returns the minimum number of ELECTABLE replicas required to + implement the ReplicaAckPolicy for a given replication group size.
        +
        static Durability.ReplicaAckPolicyvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Durability.ReplicaAckPolicy[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          ALL

          +
          public static final Durability.ReplicaAckPolicy ALL
          +
          All ELECTABLE replicas must acknowledge that they have committed the + transaction. This policy should be selected only if your replication + group has a small number of ELECTABLE replicas, and those replicas + are on extremely reliable networks and servers.
          +
        • +
        + + + +
          +
        • +

          NONE

          +
          public static final Durability.ReplicaAckPolicy NONE
          +
          No transaction commit acknowledgments are required and the master + will never wait for replica acknowledgments. In this case, + transaction durability is determined entirely by the type of commit + that is being performed on the master.
          +
        • +
        + + + +
          +
        • +

          SIMPLE_MAJORITY

          +
          public static final Durability.ReplicaAckPolicy SIMPLE_MAJORITY
          +
          A simple majority of ELECTABLE replicas must acknowledge that they + have committed the transaction. This acknowledgment policy, in + conjunction with an election policy which requires at least a simple + majority, ensures that the changes made by the transaction remains + durable if a new election is held. +

          + This is the default.

          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Durability.ReplicaAckPolicy[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Durability.ReplicaAckPolicy c : Durability.ReplicaAckPolicy.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Durability.ReplicaAckPolicy valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          minAckNodes

          +
          public int minAckNodes(int groupSize)
          +
          Returns the minimum number of ELECTABLE replicas required to + implement the ReplicaAckPolicy for a given replication group size.
          +
          +
          Parameters:
          +
          groupSize - the number of ELECTABLE replicas in the replication + group
          +
          Returns:
          +
          the number of ELECTABLE replicas needed
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Durability.SyncPolicy.html b/docs/java/com/sleepycat/je/Durability.SyncPolicy.html new file mode 100644 index 0000000..e8575f6 --- /dev/null +++ b/docs/java/com/sleepycat/je/Durability.SyncPolicy.html @@ -0,0 +1,394 @@ + + + + + +Durability.SyncPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum Durability.SyncPolicy

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Durability.SyncPolicy>
      +
      +
      +
      Enclosing class:
      +
      Durability
      +
      +
      +
      +
      public static enum Durability.SyncPolicy
      +extends java.lang.Enum<Durability.SyncPolicy>
      +
      Defines the synchronization policy to be used when committing a + transaction. High levels of synchronization offer a greater guarantee + that the transaction is persistent to disk, but trade that off for + lower performance.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        NO_SYNC +
        Do not write or synchronously flush the log on transaction commit.
        +
        SYNC +
        Write and synchronously flush the log on transaction commit.
        +
        WRITE_NO_SYNC +
        Write but do not synchronously flush the log on transaction commit.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Durability.SyncPolicyvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Durability.SyncPolicy[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          SYNC

          +
          public static final Durability.SyncPolicy SYNC
          +
          Write and synchronously flush the log on transaction commit. + Transactions exhibit all the ACID (atomicity, consistency, + isolation, and durability) properties. +

          + This is the default.

          +
        • +
        + + + +
          +
        • +

          NO_SYNC

          +
          public static final Durability.SyncPolicy NO_SYNC
          +
          Do not write or synchronously flush the log on transaction commit. + Transactions exhibit the ACI (atomicity, consistency, and isolation) + properties, but not D (durability); that is, database integrity will + be maintained, but if the application or system fails, it is + possible some number of the most recently committed transactions may + be undone during recovery. The number of transactions at risk is + governed by how many log updates can fit into the log buffer, how + often the operating system flushes dirty buffers to disk, and how + often the log is checkpointed.
          +
        • +
        + + + +
          +
        • +

          WRITE_NO_SYNC

          +
          public static final Durability.SyncPolicy WRITE_NO_SYNC
          +
          Write but do not synchronously flush the log on transaction commit. + Transactions exhibit the ACI (atomicity, consistency, and isolation) + properties, but not D (durability); that is, database integrity will + be maintained, but if the operating system fails, it is possible + some number of the most recently committed transactions may be + undone during recovery. The number of transactions at risk is + governed by how often the operating system flushes dirty buffers to + disk, and how often the log is checkpointed.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Durability.SyncPolicy[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Durability.SyncPolicy c : Durability.SyncPolicy.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Durability.SyncPolicy valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Durability.html b/docs/java/com/sleepycat/je/Durability.html new file mode 100644 index 0000000..cea2be6 --- /dev/null +++ b/docs/java/com/sleepycat/je/Durability.html @@ -0,0 +1,590 @@ + + + + + +Durability (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Durability

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class Durability
      +extends java.lang.Object
      +
      Durability defines the overall durability characteristics associated with a + transaction. When operating on a local environment the durability of a + transaction is completely determined by the local Durability.SyncPolicy that is + in effect. When using replication, the overall durability is a function of + the local Durability.SyncPolicy plus the Durability.ReplicaAckPolicy used by the + master and the Durability.SyncPolicy in effect at each Replica.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class Durability.ReplicaAckPolicy +
        A replicated environment makes it possible to increase an application's + transaction commit guarantees by committing changes to its replicas on + the network.
        +
        static class Durability.SyncPolicy +
        Defines the synchronization policy to be used when committing a + transaction.
        +
        +
      • +
      + + + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object obj) 
        Durability.SyncPolicygetLocalSync() +
        Returns the transaction synchronization policy to be used locally when + committing a transaction.
        +
        Durability.ReplicaAckPolicygetReplicaAck() +
        Returns the replica acknowledgment policy used by the master when + committing changes to a replicated environment.
        +
        Durability.SyncPolicygetReplicaSync() +
        Returns the transaction synchronization policy to be used by the replica + as it replays a transaction that needs an acknowledgment.
        +
        inthashCode() 
        static Durabilityparse(java.lang.String durabilityString) +
        Parses the string and returns the durability it represents.
        +
        java.lang.StringtoString() +
        Returns the string representation of durability in the format defined + by string form of the Durability constructor.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          COMMIT_SYNC

          +
          public static final Durability COMMIT_SYNC
          +
          A convenience constant that defines a durability policy with COMMIT_SYNC + for local commit synchronization. + + The replicated environment policies default to COMMIT_NO_SYNC for + commits of replicated transactions that need acknowledgment and + SIMPLE_MAJORITY for the acknowledgment policy.
          +
        • +
        + + + +
          +
        • +

          COMMIT_NO_SYNC

          +
          public static final Durability COMMIT_NO_SYNC
          +
          A convenience constant that defines a durability policy with + COMMIT_NO_SYNC for local commit synchronization. + + The replicated environment policies default to COMMIT_NO_SYNC for + commits of replicated transactions that need acknowledgment and + SIMPLE_MAJORITY for the acknowledgment policy.
          +
        • +
        + + + +
          +
        • +

          COMMIT_WRITE_NO_SYNC

          +
          public static final Durability COMMIT_WRITE_NO_SYNC
          +
          A convenience constant that defines a durability policy with + COMMIT_WRITE_NO_SYNC for local commit synchronization. + + The replicated environment policies default to COMMIT_NO_SYNC for + commits of replicated transactions that need acknowledgment and + SIMPLE_MAJORITY for the acknowledgment policy.
          +
        • +
        + + + +
          +
        • +

          READ_ONLY_TXN

          +
          public static final Durability READ_ONLY_TXN
          +
          Deprecated. use TransactionConfig.setReadOnly(boolean) instead.
          +
          A convenience constant that defines a durability policy, with + ReplicaAckPolicy.NONE for use with a read only transaction. + A read only transaction on a Master, using this Durability, will thus + not be held up, or throw InsufficientReplicasException, if + the Master is not in contact with a sufficient number of Replicas at the + time the transaction was initiated.

          + + It's worth noting that since the transaction is read only, the sync + policies, although specified as NO_SYNC, do not really + matter.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Durability

          +
          public Durability(Durability.SyncPolicy localSync,
          +                  Durability.SyncPolicy replicaSync,
          +                  Durability.ReplicaAckPolicy replicaAck)
          +
          Creates an instance of a Durability specification.
          +
          +
          Parameters:
          +
          localSync - the SyncPolicy to be used when committing the + transaction locally.
          +
          replicaSync - the SyncPolicy to be used remotely, as part of a + transaction acknowledgment, at a Replica node.
          +
          replicaAck - the acknowledgment policy used when obtaining + transaction acknowledgments from Replicas.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          parse

          +
          public static Durability parse(java.lang.String durabilityString)
          +
          Parses the string and returns the durability it represents. The string + must have the following format: +

          + + SyncPolicy[,SyncPolicy[,ReplicaAckPolicy]] + +

          + The first SyncPolicy in the above format applies to the Master, and the + optional second SyncPolicy to the replica. Specific SyncPolicy or + ReplicaAckPolicy values are denoted by the name of the enumeration + value. +

          + For example, the string:sync,sync,quorum describes a durability + policy where the master and replica both use Durability.SyncPolicy.SYNC + to commit transactions and Durability.ReplicaAckPolicy.SIMPLE_MAJORITY to + acknowledge a transaction commit. +

          + Durability.SyncPolicy.NO_SYNC, is the default value for a node's + SyncPolicy. +

          + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY is the default for the + ReplicaAckPolicy.

          +
          +
          Parameters:
          +
          durabilityString - the durability string in the above format
          +
          Returns:
          +
          the Durability resulting from the parse, or null if the + durabilityString argument was itself null.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the durabilityString is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the string representation of durability in the format defined + by string form of the Durability constructor.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          See Also:
          +
          parse(String)
          +
          +
        • +
        + + + +
          +
        • +

          getLocalSync

          +
          public Durability.SyncPolicy getLocalSync()
          +
          Returns the transaction synchronization policy to be used locally when + committing a transaction.
          +
        • +
        + + + +
          +
        • +

          getReplicaSync

          +
          public Durability.SyncPolicy getReplicaSync()
          +
          Returns the transaction synchronization policy to be used by the replica + as it replays a transaction that needs an acknowledgment.
          +
        • +
        + + + +
          +
        • +

          getReplicaAck

          +
          public Durability.ReplicaAckPolicy getReplicaAck()
          +
          Returns the replica acknowledgment policy used by the master when + committing changes to a replicated environment.
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Environment.html b/docs/java/com/sleepycat/je/Environment.html new file mode 100644 index 0000000..0fd56eb --- /dev/null +++ b/docs/java/com/sleepycat/je/Environment.html @@ -0,0 +1,1749 @@ + + + + + +Environment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Environment

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      ReplicatedEnvironment, XAEnvironment
      +
      +
      +
      +
      public class Environment
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      A database environment. Environments include support for some or all of + caching, locking, logging and transactions. + +

      To open an existing environment with default attributes the application + may use a default environment configuration object or null: +

      +
      +      // Open an environment handle with default attributes.
      +     Environment env = new Environment(home, new EnvironmentConfig());
      +     
      +
      + or +
      +     Environment env = new Environment(home, null);
      + 
      +

      Note that many Environment objects may access a single environment.

      +

      To create an environment or customize attributes, the application should + customize the configuration class. For example:

      +
      +     EnvironmentConfig envConfig = new EnvironmentConfig();
      +     envConfig.setTransactional(true);
      +     envConfig.setAllowCreate(true);
      +     envConfig.setCacheSize(1000000);
      +     Environment newlyCreatedEnv = new Environment(home, envConfig);
      + 
      + +

      Note that environment configuration parameters can also be set through + the <environment home>/je.properties file. This file takes precedence + over any programmatically specified configuration parameters so that + configuration changes can be made without recompiling. Environment + configuration follows this order of precedence:

      + +
        +
      1. Configuration parameters specified in + <environment home>/je.properties take first precedence. +
      2. Configuration parameters set in the EnvironmentConfig object used at + Environment construction e tameters not set by the application are set to + system defaults, described along with the parameter name String constants + in the EnvironmentConfig class. +
      + +

      An environment handle is an Environment instance. More than one + Environment instance may be created for the same physical directory, which + is the same as saying that more than one Environment handle may be open at + one time for a given environment.

      + + The Environment handle should not be closed while any other handle remains + open that is using it as a reference (for example, Database or Transaction. Once Environment.close is called, this object may not be accessed again.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Environment

          +
          public Environment(java.io.File envHome,
          +                   EnvironmentConfig configuration)
          +            throws EnvironmentNotFoundException,
          +                   EnvironmentLockedException,
          +                   VersionMismatchException,
          +                   DatabaseException,
          +                   java.lang.IllegalArgumentException
          +
          Creates a database environment handle.
          +
          +
          Parameters:
          +
          envHome - The database environment's home directory.
          +
          configuration - The database environment attributes. If null, + default attributes are used.
          +
          Throws:
          +
          EnvironmentNotFoundException - if the environment does not exist + (does not contain at least one log file) and the EnvironmentConfig AllowCreate parameter is false.
          +
          EnvironmentLockedException - when an environment cannot be opened + for write access because another process has the same environment open + for write access. Warning: This exception should be + handled when an environment is opened by more than one process.
          +
          VersionMismatchException - when the existing log is not compatible + with the version of JE that is running. This occurs when a later + version of JE was used to create the log. Warning: + This exception should be handled when more than one version of JE may be + used to access an environment.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this environment was previously + opened for replication and is not being opened read-only.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid EnvironmentConfig parameter.
          +
          DatabaseException
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          The Environment.close method closes the Berkeley DB environment. + +

          When the last environment handle is closed, allocated resources are + freed, and daemon threads are stopped, even if they are performing work. + For example, if the cleaner is still cleaning the log, it will be + stopped at the next reasonable opportunity and perform no more cleaning + operations. After stopping background threads, a final checkpoint is + performed by this method, in order to reduce the time to recover the + next time the environment is opened.

          + +

          When minimizing recovery time is desired, it is often useful to stop + all application activity and perform an additional checkpoint prior to + calling close. This additional checkpoint will write most of + dirty Btree information, so that that the final checkpoint is very + small (and recovery is fast). To ensure that recovery time is minimized, + the log cleaner threads should also be stopped prior to the extra + checkpoint. This prevents log cleaning from dirtying the Btree, which + can make the final checkpoint larger (and recovery time longer). The + recommended procedure for minimizing recovery time is:

          + +
          +     // Stop/finish all application operations that are using JE.
          +     ...
          +
          +     // Stop the cleaner daemon threads.
          +     EnvironmentMutableConfig config = env.getMutableConfig();
          +     config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
          +     env.setMutableConfig(config);
          +
          +     // Perform an extra checkpoint
          +     env.checkpoint(new CheckpointConfig().setForce(true));
          +
          +     // Finally, close the environment.
          +     env.close();
          + 
          + +

          The Environment handle should not be closed while any other handle + that refers to it is not yet closed; for example, database environment + handles must not be closed while database handles remain open, or + transactions in the environment have not yet committed or aborted. + Specifically, this includes Database, + and Transaction handles.

          + +

          If this handle has already been closed, this method does nothing and + returns without throwing an exception.

          + +

          In multithreaded applications, only a single thread should call + Environment.close.

          + +

          The environment handle may not be used again after this method has + been called, regardless of the method's success or failure, with one + exception: the close method itself may be called any number of + times.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          EnvironmentWedgedException - when the current process must be + shut down and restarted before re-opening the Environment.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DiskLimitException - if the final checkpoint cannot be performed + because a disk limit has been violated. The Environment will be closed, + but this exception will be thrown so that the application is aware that + a checkpoint was not performed.
          +
          java.lang.IllegalStateException - if any open databases or transactions + refer to this handle. The Environment will be closed, but this exception + will be thrown so that the application is aware that not all databases + and transactions were closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          openDatabase

          +
          public Database openDatabase(Transaction txn,
          +                             java.lang.String databaseName,
          +                             DatabaseConfig dbConfig)
          +                      throws DatabaseNotFoundException,
          +                             DatabaseExistsException,
          +                             java.lang.IllegalArgumentException,
          +                             java.lang.IllegalStateException
          +
          Opens, and optionally creates, a Database.
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          databaseName - The name of the database.
          +
          dbConfig - The database attributes. If null, default attributes + are used.
          +
          Returns:
          +
          Database handle.
          +
          Throws:
          +
          DatabaseExistsException - if the database already exists and the + DatabaseConfig ExclusiveCreate parameter is true.
          +
          DatabaseNotFoundException - if the database does not exist and the + DatabaseConfig AllowCreate parameter is false.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the database does not exist and the AllowCreate parameter is true, then one + of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid DatabaseConfig property.
          +
          java.lang.IllegalStateException - if DatabaseConfig properties are changed + and there are other open handles for this database.
          +
          +
        • +
        + + + +
          +
        • +

          openSecondaryDatabase

          +
          public SecondaryDatabase openSecondaryDatabase(Transaction txn,
          +                                               java.lang.String databaseName,
          +                                               Database primaryDatabase,
          +                                               SecondaryConfig dbConfig)
          +                                        throws DatabaseNotFoundException,
          +                                               DatabaseExistsException,
          +                                               DatabaseException,
          +                                               java.lang.IllegalArgumentException,
          +                                               java.lang.IllegalStateException
          +
          Opens and optionally creates a SecondaryDatabase. + +

          Note that the associations between primary and secondary databases + are not stored persistently. Whenever a primary database is opened for + write access by the application, the appropriate associated secondary + databases should also be opened by the application. This is necessary + to ensure data integrity when changes are made to the primary + database.

          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          databaseName - The name of the database.
          +
          primaryDatabase - the primary database with which the secondary + database will be associated. The primary database must not be + configured for duplicates.
          +
          dbConfig - The secondary database attributes. If null, default + attributes are used.
          +
          Returns:
          +
          Database handle.
          +
          Throws:
          +
          DatabaseExistsException - if the database already exists and the + DatabaseConfig ExclusiveCreate parameter is true.
          +
          DatabaseNotFoundException - if the database does not exist and the + DatabaseConfig AllowCreate parameter is false.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the database does not exist and the AllowCreate parameter is true, then one + of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid SecondaryConfig property.
          +
          java.lang.IllegalStateException - if DatabaseConfig properties are changed + and there are other open handles for this database.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          removeDatabase

          +
          public void removeDatabase(Transaction txn,
          +                           java.lang.String databaseName)
          +                    throws DatabaseNotFoundException
          +
          Removes a database from the environment, discarding all records in the + database and removing the database name itself. + +

          Compared to deleting all the records in a database individually, + removeDatabase is a very efficient operation. Some internal + housekeeping information is updated, but the database records are not + read or written, and very little I/O is needed.

          + +

          When called on a database configured with secondary indices, the + application is responsible for also removing all associated secondary + indices. To guarantee integrity, a primary database and all of its + secondary databases should be removed atomically using a single + transaction.

          + +

          Applications should not remove a database with open Database handles. If the database is open with the same transaction as + passed in the txn parameter, IllegalStateException is + thrown by this method. If the database is open using a different + transaction, this method will block until all database handles are + closed, or until the conflict is resolved by throwing LockConflictException.

          +
          +
          Parameters:
          +
          txn - For a transactional environment, an explicit transaction + may be specified or null may be specified to use auto-commit. For a + non-transactional environment, null must be specified.
          +
          databaseName - The database to be removed.
          +
          Throws:
          +
          DatabaseNotFoundException - if the database does not exist.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is a read-only + environment.
          +
          java.lang.IllegalStateException - if the database is currently open using + the transaction passed in the txn parameter, or if this handle + or the underlying environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          renameDatabase

          +
          public void renameDatabase(Transaction txn,
          +                           java.lang.String databaseName,
          +                           java.lang.String newName)
          +                    throws DatabaseNotFoundException
          +
          Renames a database, without removing the records it contains. + +

          Applications should not rename a database with open Database handles. If the database is open with the same transaction as + passed in the txn parameter, IllegalStateException is + thrown by this method. If the database is open using a different + transaction, this method will block until all database handles are + closed, or until the conflict is resolved by throwing LockConflictException.

          +
          +
          Parameters:
          +
          txn - For a transactional environment, an explicit transaction + may be specified or null may be specified to use auto-commit. For a + non-transactional environment, null must be specified.
          +
          databaseName - The new name of the database.
          +
          Throws:
          +
          DatabaseNotFoundException - if the database does not exist.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is a read-only + environment.
          +
          java.lang.IllegalStateException - if the database is currently open using + the transaction passed in the txn parameter, or if this handle + or the underlying environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          truncateDatabase

          +
          public long truncateDatabase(Transaction txn,
          +                             java.lang.String databaseName,
          +                             boolean returnCount)
          +                      throws DatabaseNotFoundException
          +
          Empties the database, discarding all the records it contains, without + removing the database name. + +

          Compared to deleting all the records in a database individually, + truncateDatabase is a very efficient operation. Some internal + housekeeping information is updated, but the database records are not + read or written, and very little I/O is needed.

          + +

          When called on a database configured with secondary indices, the + application is responsible for also truncating all associated secondary + indices. To guarantee integrity, a primary database and all of its + secondary databases should be truncated atomically using a single + transaction.

          + +

          Applications should not truncate a database with open Database handles. If the database is open with the same transaction as + passed in the txn parameter, IllegalStateException is + thrown by this method. If the database is open using a different + transaction, this method will block until all database handles are + closed, or until the conflict is resolved by throwing LockConflictException.

          +
          +
          Parameters:
          +
          txn - For a transactional environment, an explicit transaction may + be specified or null may be specified to use auto-commit. For a + non-transactional environment, null must be specified.
          +
          databaseName - The database to be truncated.
          +
          returnCount - If true, count and return the number of records + discarded.
          +
          Returns:
          +
          The number of records discarded, or -1 if returnCount is false.
          +
          Throws:
          +
          DatabaseNotFoundException - if the database does not exist.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is a read-only + environment.
          +
          java.lang.IllegalStateException - if the database is currently open using + the transaction passed in the txn parameter, or if this handle + or the underlying environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          getHome

          +
          public java.io.File getHome()
          +                     throws DatabaseException
          +
          Returns the database environment's home directory. + + This method may be called when the environment has been invalidated, but + not yet closed. In other words, EnvironmentFailureException is + never thrown by this method.
          +
          +
          Returns:
          +
          The database environment's home directory. + environment-wide failure occurs.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          beginTransaction

          +
          public Transaction beginTransaction(Transaction parent,
          +                                    TransactionConfig txnConfig)
          +                             throws DatabaseException,
          +                                    java.lang.IllegalArgumentException
          +
          Creates a new transaction in the database environment. + +

          Transaction handles are free-threaded; transactions handles may be + used concurrently by multiple threads.

          + +

          Cursors may not span transactions; that is, each cursor must be + opened and closed within a single transaction. The parent parameter is a + placeholder for nested transactions, and must currently be null.

          +
          +
          Parameters:
          +
          txnConfig - The transaction attributes. If null, default + attributes are used.
          +
          Returns:
          +
          The newly created transaction's handle.
          +
          Throws:
          +
          InsufficientReplicasException - if the Master + in a replicated environment could not contact a quorum of replicas as + determined by the Durability.ReplicaAckPolicy.
          +
          ReplicaConsistencyException - if a replica + in a replicated environment cannot become consistent within the timeout + period.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is not a transactional + environment.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid TransactionConfig parameter.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          checkpoint

          +
          public void checkpoint(CheckpointConfig ckptConfig)
          +                throws DatabaseException
          +
          Synchronously checkpoint the database environment. +

          + This is an optional action for the application since this activity + is, by default, handled by a database environment owned background + thread. +

          + A checkpoint has the side effect of flushing all preceding + non-transactional write operations, as well as any preceding + transactions that were committed with no-sync durability. However, for best + performance, checkpoints should be used only to bound recovery time. + flushLog(boolean) can be used to write buffered data for durability + purposes.

          +
          +
          Parameters:
          +
          ckptConfig - The checkpoint attributes. If null, default + attributes are used.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DiskLimitException - if the checkpoint cannot be performed + because a disk limit has been violated.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          sync

          +
          public void sync()
          +          throws DatabaseException
          +
          Synchronously flushes database environment databases to stable storage. + Calling this method is equivalent to forcing a checkpoint and setting + CheckpointConfig.setMinimizeRecoveryTime(boolean) to true. +

          + A checkpoint has the side effect of flushing all preceding + non-transactional write operations, as well as any preceding + transactions that were committed with no-sync durability. However, for best + performance, checkpoints should be used only to bound recovery time. + flushLog(boolean) can be used to write buffered data for durability + purposes.

          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DiskLimitException - if the sync cannot be performed + because a disk limit has been violated.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          flushLog

          +
          public void flushLog(boolean fsync)
          +
          Writes buffered data to the log, and optionally performs an fsync to + guarantee that data is written to the physical device. +

          + This method is used to make durable, by writing to the log, all + preceding non-transactional write operations, as well as any preceding + transactions that were committed with no-sync durability. If the fsync + parameter is true, it can also be used to flush all logged data to the + physical storage device, by performing an fsync. +

          + Note that this method does not flush previously unwritten data + in deferred-write databases; that is done by calling Database.sync() or performing a checkpoint.

          +
          +
          Parameters:
          +
          fsync - is true to perform an fsync as well as a file write, or + false to perform only a file write.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          cleanLog

          +
          public int cleanLog()
          +             throws DatabaseException
          +
          Synchronously invokes log file (data file) cleaning until the target + disk space utilization has been reached; this method is called + periodically by the cleaner background threads. + +

          Zero or more log files will be cleaned as necessary to bring the + current disk space + utilization of the environment above the configured utilization threshold. + +

          Note that this method does not perform the complete task of cleaning + a log file. Eviction and checkpointing log Btree information that is + marked dirty by the cleaner, and a full checkpoint is necessary, + following cleaning, before cleaned files will be deleted (or renamed). + Checkpoints occur periodically and when the environment is closed.

          + +

          This is an optional action for the application since this activity + is, by default, handled by one or more Environment-owned background + threads.

          + +

          The intended use case for the cleanLog method is when the + application wishes to disable the built-in cleaner threads using the + EnvironmentConfig.ENV_RUN_CLEANER property. To replace the + functionality of the cleaner threads, the application should call + cleanLog periodically.

          + +

          Note that because this method cleans multiple files before returning, + in an attempt to reach the target utilization, it may not return for a + long time when there is a large backlog of files to be cleaned. This + method cannot be aborted except by closing the environment. If the + application needs the ability to abort the cleaning process, the + cleanLogFile() method should be used instead.

          + +

          Note that in certain unusual situations the cleaner may not be able + to make forward progress and the target utilization will never be + reached. For example, this can occur if the target utilization is set + too high or checkpoints are performed too often. To guard against + cleaning "forever", this method will return when all files have been + cleaned, even when the target utilization has not been reached.

          +
          +
          Returns:
          +
          The number of log files that were cleaned, and that will be + deleted (or renamed) when a qualifying checkpoint occurs.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is a read-only or + memory-only environment.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          cleanLogFile

          +
          public boolean cleanLogFile()
          +                     throws DatabaseException
          +
          Synchronously invokes cleaning of a single log file (data file), if + the target disk space utilization has not been reached. + +

          One log file will be cleaned if the current disk space utilization of the + environment is below the configured utilization threshold. No + files will be cleaned if disk space utilization is currently above the + threshold. The lowest utilized file is selected for cleaning, since it + has the lowest cleaning cost.

          + +

          Note that this method does not perform the complete task of cleaning + a log file. Eviction and checkpointing log Btree information that is + marked dirty by the cleaner, and a full checkpoint is necessary, + following cleaning, before cleaned files will be deleted (or renamed). + Checkpoints occur periodically and when the environment is closed.

          + +

          The intended use case for the cleanLog method is "batch + cleaning". This is when the application disables the cleaner threads + (using the EnvironmentConfig.ENV_RUN_CLEANER property) + for maximum performance during active periods, and calls cleanLog during periods when the application is quiescent or less + active than usual. Similarly, there may be times when an application + wishes to perform cleaning explicitly until the target utilization + rather than relying on the cleaner's background threads. For example, + some applications may wish to perform batch cleaning prior to closing + the environment, to reclaim as much disk space as possible at that + time.

          + +

          To clean until the target utilization threshold is reached, cleanLogFile can be called in a loop until it returns false. + When there is a large backlog of files to be cleaned, the application may wish to limit the + amount of cleaning. Batch cleaning can be aborted simply by breaking out + of the loop. The cleaning of a single file is not a long operation; it + should take several minutes at most. For example:

          + +
          +     boolean cleaningAborted;
          +     boolean anyCleaned = false;
          +
          +     while (!cleaningAborted && env.cleanLogFile()) {
          +         anyCleaned = true;
          +     }
          + 
          + +

          Note that in certain unusual situations the cleaner may not be able + to make forward progress and the target utilization will never be + reached. For example, this can occur if the target utilization is set + too high or checkpoints are performed too often. To guard against + cleaning "forever", the application may wish to cancel the batch + cleaning (break out of the loop) when the cleaning time or number of + files cleaned exceeds some reasonable limit.

          + +

          As mentioned above, the cleaned log files will not be deleted until + the next full checkpoint. If the application wishes to reclaim this disk + space as soon as possible, an explicit checkpoint may be performed after + the batch cleaning operation. For example:

          + +
          +     if (anyCleaned) {
          +         env.checkpoint(new CheckpointConfig().setForce(true));
          +     }
          + 
          + +

          However, even an explicit checkpoint is not guaranteed to delete the + cleaned log files if, at the time the file was cleaned, records in the + file were locked or were part of a database that was being removed, due + to concurrent application activity that was accessing records or + removing databases. In this case the files will be deleted only after + these operations are complete and a subsequent checkpoint is performed. + To guarantee that the cleaned files will be deleted, an application may + stop all concurrent activity (ensure all operations and transactions + have ended) and then perform a checkpoint.

          + +

          When closing the environment and minimizing recovery time is desired + (see close()), as well as reclaiming disk space, the recommended + procedure is as follows:

          + +
          +     // Stop/finish all application operations that are using JE.
          +     ...
          +
          +     // Stop the cleaner daemon threads.
          +     EnvironmentMutableConfig config = env.getMutableConfig();
          +     config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
          +     env.setMutableConfig(config);
          +
          +     // Perform batch cleaning.
          +     while (!cleaningAborted && env.cleanLogFile()) {
          +     }
          +
          +     // Perform an extra checkpoint
          +     env.checkpoint(new CheckpointConfig().setForce(true));
          +
          +     // Finally, close the environment.
          +     env.close();
          + 
          +
          +
          Returns:
          +
          true if one log was cleaned, or false if none were cleaned.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.UnsupportedOperationException - if this is a read-only or + memory-only environment.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          evictMemory

          +
          public void evictMemory()
          +                 throws DatabaseException
          +
          Synchronously invokes the mechanism for keeping memory usage within the + cache size boundaries. + +

          This is an optional action for the application since this activity + is, by default, handled by a database environment owned background + thread.

          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          compress

          +
          public void compress()
          +              throws DatabaseException
          +
          Synchronously invokes the compressor mechanism which compacts in memory + data structures after delete operations. + +

          This is an optional action for the application since this activity + is, by default, handled by a database environment owned background + thread.

          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          preload

          +
          public PreloadStats preload(Database[] databases,
          +                            PreloadConfig config)
          +                     throws DatabaseException
          +
          Preloads the cache with multiple databases. This method should only be + called when there are no operations being performed on the specified + databases in other threads. Executing preload during concurrent updates + of the specified databases may result in some or all of the tree being + loaded into the JE cache. Executing preload during any other types of + operations may result in JE exceeding its allocated cache + size. preload() effectively locks all of the specified database and + therefore will lock out the checkpointer, cleaner, and compressor, as + well as not allow eviction to occur. If databases are replicated and + the environment is in the replica state, then the replica may become + temporarily disconnected from the master if the replica needs to replay + changes against the database and is locked out because the time taken by + the preload operation exceeds ReplicationConfig.FEEDER_TIMEOUT.
          +
          +
          Parameters:
          +
          config - The PreloadConfig object that specifies the parameters + of the preload.
          +
          Returns:
          +
          A PreloadStats object with the result of the preload operation + and various statistics about the preload() operation.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if any of the databases has been closed.
          +
          DatabaseException
          +
          See Also:
          +
          Database.preload(PreloadConfig)
          +
          +
        • +
        + + + +
          +
        • +

          openDiskOrderedCursor

          +
          public DiskOrderedCursor openDiskOrderedCursor(Database[] databases,
          +                                               DiskOrderedCursorConfig config)
          +                                        throws DatabaseException
          +
          Create a DiskOrderedCursor to iterate over the records of a given set + of databases. Because the retrieval is based on Log Sequence Number + (LSN) order rather than key order, records are returned in unsorted + order in exchange for generally faster retrieval. LSN order + approximates disk sector order. +

          + See DiskOrderedCursor for more details and a description of the + consistency guarantees provided by the scan. +

          + WARNING: After calling this method, deletion of log files by + the JE log cleaner will be disabled until DiskOrderedCursor.close() is called. To prevent unbounded growth of + disk usage, be sure to call DiskOrderedCursor.close() to + re-enable log file deletion.

          +
          +
          Parameters:
          +
          databases - An array containing the handles to the database that + are to be scanned. All these handles must be currently open. + Furthermore, all the databases must belong to this environments, and + they should all support duplicates or none of them should support + duplicates. Note: this method does not make a copy of this array, + and as a result, the contents of the array should not be modified + while the returned DiskOrderedCursor is still in use.
          +
          config - The DiskOrderedCursorConfig object that specifies the + parameters of the disk ordered scan.
          +
          Returns:
          +
          the new DiskOrderedCursor object.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if (a) the databases parameter is + null or an empty array, or (b) any of the handles in the databases + parameter is null, or (c) the databases do not all belong to this + environment, or (d) some databases support duplicates and some don't.
          +
          java.lang.IllegalStateException - if any of the databases has been + closed or invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public EnvironmentConfig getConfig()
          +                            throws DatabaseException
          +
          Returns this object's configuration.
          +
          +
          Returns:
          +
          This object's configuration. + +

          Unlike most Environment methods, this method may be called if the + environment is invalid, but not yet closed.

          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          setMutableConfig

          +
          public void setMutableConfig(EnvironmentMutableConfig mutableConfig)
          +                      throws DatabaseException
          +
          Sets database environment attributes. + +

          Attributes only apply to a specific Environment object and are not + necessarily shared by other Environment objects accessing this + database environment.

          + +

          Unlike most Environment methods, this method may be called if the + environment is invalid, but not yet closed.

          +
          +
          Parameters:
          +
          mutableConfig - The database environment attributes. If null, + default attributes are used.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getMutableConfig

          +
          public EnvironmentMutableConfig getMutableConfig()
          +                                          throws DatabaseException
          +
          Returns database environment attributes. + +

          Unlike most Environment methods, this method may be called if the + environment is invalid, but not yet closed.

          +
          +
          Returns:
          +
          Environment attributes.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getStats

          +
          public EnvironmentStats getStats(StatsConfig config)
          +                          throws DatabaseException
          +
          Returns the general database environment statistics.
          +
          +
          Parameters:
          +
          config - The general statistics attributes. If null, default + attributes are used.
          +
          Returns:
          +
          The general database environment statistics.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getLockStats

          +
          public LockStats getLockStats(StatsConfig config)
          +                       throws DatabaseException
          +
          Deprecated. as of 4.0.10, replaced by getStats(StatsConfig).

          +
          Returns the database environment's locking statistics.
          +
          +
          Parameters:
          +
          config - The locking statistics attributes. If null, default + attributes are used.
          +
          Returns:
          +
          The database environment's locking statistics.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getTransactionStats

          +
          public TransactionStats getTransactionStats(StatsConfig config)
          +                                     throws DatabaseException
          +
          Returns the database environment's transactional statistics.
          +
          +
          Parameters:
          +
          config - The transactional statistics attributes. If null, + default attributes are used.
          +
          Returns:
          +
          The database environment's transactional statistics.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getDatabaseNames

          +
          public java.util.List<java.lang.String> getDatabaseNames()
          +                                                  throws DatabaseException
          +
          Returns a List of database names for the database environment. + +

          Each element in the list is a String.

          +
          +
          Returns:
          +
          A List of database names for the database environment.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          verify

          +
          public boolean verify(VerifyConfig config,
          +                      java.io.PrintStream out)
          +               throws DatabaseException
          +
          Returns if the database environment is consistent and correct. + +

          Verification is an expensive operation that should normally only be + used for troubleshooting and debugging.

          +
          +
          Parameters:
          +
          config - The verification attributes. If null, default + attributes are used.
          +
          out - is unused. To specify the output stream for verification + information, use VerifyConfig.setShowProgressStream(java.io.PrintStream).
          +
          Returns:
          +
          true if the database environment is consistent and correct. + Currently true is always returned when this method returns normally, + i.e., when no exception is thrown.
          +
          Throws:
          +
          EnvironmentFailureException - if a corruption is detected, or if + an unexpected, internal or environment-wide failure occurs. If a + persistent corruption is detected, + EnvironmentFailureException.isCorrupted() will return true.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getThreadTransaction

          +
          public Transaction getThreadTransaction()
          +                                 throws DatabaseException
          +
          Returns the transaction associated with this thread if implied + transactions are being used. Implied transactions are used in an XA or + JCA "Local Transaction" environment. In an XA environment the + XAEnvironment.start() entrypoint causes a transaction to be created and + become associated with the calling thread. Subsequent API calls + implicitly use that transaction. XAEnvironment.end() causes the + transaction to be disassociated with the thread. In a JCA Local + Transaction environment, the call to JEConnectionFactory.getConnection() + causes a new transaction to be created and associated with the calling + thread.
          +
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          setThreadTransaction

          +
          public void setThreadTransaction(Transaction txn)
          +
          Sets the transaction associated with this thread if implied transactions + are being used. Implied transactions are used in an XA or JCA "Local + Transaction" environment. In an XA environment the + XAEnvironment.start() entrypoint causes a transaction to be created and + become associated with the calling thread. Subsequent API calls + implicitly use that transaction. XAEnvironment.end() causes the + transaction to be disassociated with the thread. In a JCA Local + Transaction environment, the call to JEConnectionFactory.getConnection() + causes a new transaction to be created and associated with the calling + thread.
          +
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          isValid

          +
          public boolean isValid()
          +
          Returns whether this Environment is open, valid and can be used. + +

          When an EnvironmentFailureException, or one of its + subclasses, is caught, the isValid method can be called to + determine whether the Environment can continue to be used, or + should be closed. Some EnvironmentFailureExceptions invalidate the + environment and others do not.

          + +

          If this method returns false, the environment may have been closed by + the application, or may have been invalidated by an exception and not + yet closed. The isClosed() method may be used to distinguish + between these two cases, and getInvalidatingException() can be + used to return the exception. Note that it is safe to call close() redundantly, so it is safe to always call close() when + this method returns false.

          +
        • +
        + + + +
          +
        • +

          isClosed

          +
          public boolean isClosed()
          +
          Returns whether the environment has been closed by the application. + +

          If this method returns true, close()} has been called. If + the environment was previously invalidated by an exception, it will be + returned by getInvalidatingException().

          + +

          If this method returns false, the environment may or may not be + usable, since it may have been invalidated by an exception but not yet + closed. To determine whether it was invalidated, call isValid() + or getInvalidatingException().

          +
          +
          Returns:
          +
          whether the environment has been closed by the application.
          +
          Since:
          +
          7.2
          +
          +
        • +
        + + + +
          +
        • +

          getInvalidatingException

          +
          public EnvironmentFailureException getInvalidatingException()
          +
          Returns the exception that caused the environment to be invalidated, or + null if the environment was not invalidated by an exception. + +

          This method may be used to determine whether the environment was + invalidated by an exception, by checking for a non-null return value. + This method will return the invalidating exception, regardless of + whether the environment is closed. Note that isValid() will + return false when the environment is closed, even when it was not + invalidated by an exception.

          + +

          This method may also be used to identify and handle the original + invalidating exception, when more than one exception is thrown. When an + environment is first invalidated by an EnvironmentFailureException, the + exception is saved so that it can be returned by this method. Other + EnvironmentFailureExceptions may be thrown later as side effects of the + original problem, or possibly as separate problems. It is normally the + first invalidating exception that is most relevant.

          +
          +
          Returns:
          +
          the invalidating exception or null.
          +
          Since:
          +
          7.2
          +
          +
        • +
        + + + +
          +
        • +

          printStartupInfo

          +
          public void printStartupInfo(java.io.PrintStream out)
          +
          Print a detailed report about the costs of different phases of + environment startup. This report is by default logged to the je.info + file if startup takes longer than je.env.startupThreshold. + +

          Unlike most Environment methods, this method may be called if the + environment is invalid, but not yet closed.

          +
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has been closed.
          +
          +
        • +
        + + + +
          +
        • +

          isInternalHandle

          +
          protected boolean isInternalHandle()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentConfig.html b/docs/java/com/sleepycat/je/EnvironmentConfig.html new file mode 100644 index 0000000..bb18cdf --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentConfig.html @@ -0,0 +1,7254 @@ + + + + + +EnvironmentConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Cloneable
      +
      +
      +
      +
      public class EnvironmentConfig
      +extends EnvironmentMutableConfig
      +
      Specifies the attributes of an environment. + +

      To change the default settings for a database environment, an application + creates a configuration object, customizes settings and uses it for + environment construction. The set methods of this class validate the + configuration values when the method is invoked. An + IllegalArgumentException is thrown if the value is not valid for that + attribute.

      + +

      Most parameters are described by the parameter name String constants in + this class. These parameters can be specified or individually by calling + setConfigParam(java.lang.String, java.lang.String), through a Properties object passed to EnvironmentConfig(Properties), or via properties in the je.properties + files located in the environment home directory.

      + +

      For example, an application can change the default btree node size + with:

      + +
      +     envConfig.setConfigParam(EnvironmentConfig.LOCK_TIMEOUT, "250 ms");
      + 
      + +

      Some commonly used environment attributes have convenience setter/getter + methods defined in this class. For example, to change the default + lock timeout setting for an environment, the application can instead do + the following:

      +
      +     // customize an environment configuration
      +     EnvironmentConfig envConfig = new EnvironmentConfig();
      +     // will throw if timeout value is invalid
      +     envConfig.setLockTimeout(250, TimeUnit.MILLISECONDS);
      +     // Open the environment using this configuration.
      +     Environment myEnvironment = new Environment(home, envConfig);
      + 
      + +

      Parameter values are applied using this order of precedence:

      +
        +
      1. Configuration parameters specified in je.properties take first + precedence.
      2. +
      3. Configuration parameters set in the EnvironmentConfig object used at + Environment construction are next.
      4. +
      5. Any configuration parameters not set by the application are set to + system defaults, described along with the parameter name String + constants in this class.
      6. +
      + +

      However, a small number of parameters do not have string constants in + this class, and cannot be set using setConfigParam(java.lang.String, java.lang.String), a Properties + object, or the je.properties file. These parameters can only be changed + via the following setter methods:

      + + +

      An EnvironmentConfig can be used to specify both mutable and immutable + environment properties. Immutable properties may be specified when the + first Environment handle (instance) is opened for a given physical + environment. When more handles are opened for the same environment, the + following rules apply:

      +
        +
      1. Immutable properties must equal the original values specified when + constructing an Environment handle for an already open environment. When a + mismatch occurs, an exception is thrown.
      2. +
      3. Mutable properties are ignored when constructing an Environment handle + for an already open environment.
      4. +
      + +

      After an Environment has been constructed, its mutable properties may be + changed using Environment.setMutableConfig(com.sleepycat.je.EnvironmentMutableConfig). See EnvironmentMutableConfig for a list of mutable properties; all other + properties are immutable. Whether a property is mutable or immutable is + also described along with the parameter name String constants in this + class.

      + +

      Getting the Current Environment Properties

      + + To get the current "live" properties of an environment after constructing it + or changing its properties, you must call Environment.getConfig() or + Environment.getMutableConfig(). The original EnvironmentConfig or + EnvironmentMutableConfig object used to set the properties is not kept up to + date as properties are changed, and does not reflect property validation or + properties that are computed. + +

      Time Duration Properties

      + +

      Several environment and transaction configuration properties are time + durations. For these properties, a time unit is specified along with an + integer duration value.

      + +

      When specific setter and getter methods exist for a time duration + property, these methods have a TimeUnit argument. Examples are + setLockTimeout(long,TimeUnit) and getLockTimeout(TimeUnit). Note that the TimeUnit argument may + be null only when the duration value is zero; there is no default unit that + is used when null is specified.

      + +

      When a time duration is specified as a string value, the following format + is used.

      + +
          <value> [ <whitespace> <unit> ]
      + +

      The <value> is an integer. The <unit> name, if present, + must be preceded by one or more spaces or tabs.

      + +

      The following <unit> names are allowed. Both TimeUnit + names and IEEE standard abbreviations are allowed. Unit names are case + insensitive.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      IEEE abbreviationTimeUnit name + Definition
      nsNANOSECONDSone billionth (10-9) of a second
      usMICROSECONDSone millionth (10-6) of a second
      msMILLISECONDSone thousandth (10-3) of a second
      sSECONDS1 second
      min 60 seconds
      h 3600 seconds
      + +

      Examples are:

      +
      + 3 seconds
      + 3 s
      + 500 ms
      + 1000000 (microseconds is implied)
      + 
      + +

      The maximum duration value is currently Integer.MAX_VALUE milliseconds. + This translates to almost 25 days (2147483647999999 ns, 2147483647999 us, + 2147483647 ms, 2147483 s, 35791 min, 596 h).

      + +

      Note that when the <unit> is omitted, microseconds is implied. + This default is supported for compatibility with JE 3.3 and earlier. In JE + 3.3 and earlier, explicit time units were not used and durations were always + implicitly specified in microseconds. The older methods that do not have a + TimeUnit argument, such as setLockTimeout(long) and getLockTimeout(), use microsecond durations and have been deprecated.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static java.lang.StringADLER32_CHUNK_SIZE +
        By default, JE passes an entire log record to the Adler32 class for + checksumming.
        +
        static java.lang.StringCHECKPOINTER_BYTES_INTERVAL +
        Ask the checkpointer to run every time we write this many bytes to the + log.
        +
        static java.lang.StringCHECKPOINTER_DEADLOCK_RETRY +
        The number of times to retry a checkpoint if it runs into a deadlock.
        +
        static java.lang.StringCHECKPOINTER_HIGH_PRIORITY +
        If true, the checkpointer uses more resources in order to complete the + checkpoint in a shorter time interval.
        +
        static java.lang.StringCHECKPOINTER_WAKEUP_INTERVAL +
        The checkpointer wakeup interval in microseconds.
        +
        static java.lang.StringCLEANER_ADJUST_UTILIZATION +
        Deprecated.  +
        in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
        +
        +
        static java.lang.StringCLEANER_BACKGROUND_PROACTIVE_MIGRATION +
        Deprecated.  +
        This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and + checkpointing. To reduce a cleaner backlog, configure more cleaner + threads.
        +
        +
        static java.lang.StringCLEANER_BYTES_INTERVAL +
        The cleaner checks disk utilization every time we write this many bytes + to the log.
        +
        static java.lang.StringCLEANER_DEADLOCK_RETRY +
        The number of times to retry cleaning if a deadlock occurs.
        +
        static java.lang.StringCLEANER_DETAIL_MAX_MEMORY_PERCENTAGE +
        Tracking of detailed cleaning information will use no more than this + percentage of the cache.
        +
        static java.lang.StringCLEANER_EXPUNGE +
        If true (the default setting), the cleaner deletes log files after + successful cleaning.
        +
        static java.lang.StringCLEANER_FETCH_OBSOLETE_SIZE +
        If true, the cleaner will fetch records to determine their size and more + accurately calculate log utilization.
        +
        static java.lang.StringCLEANER_FORCE_CLEAN_FILES +
        Specifies a list of files or file ranges to be cleaned at a time when no + other log cleaning is necessary.
        +
        static java.lang.StringCLEANER_FOREGROUND_PROACTIVE_MIGRATION +
        Deprecated.  +
        This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and Btree + splits. To reduce a cleaner backlog, configure more cleaner threads.
        +
        +
        static java.lang.StringCLEANER_LAZY_MIGRATION +
        Deprecated.  +
        This parameter is ignored and lazy migration is no longer + supported due to its negative impact on eviction and checkpointing. + To reduce a cleaner backlog, configure more cleaner threads.
        +
        +
        static java.lang.StringCLEANER_LOCK_TIMEOUT +
        The lock timeout for cleaner transactions in microseconds.
        +
        static java.lang.StringCLEANER_LOOK_AHEAD_CACHE_SIZE +
        The look ahead cache size for cleaning in bytes.
        +
        static java.lang.StringCLEANER_MAX_BATCH_FILES +
        Deprecated.  +
        in 7.0. No longer used because the cleaner no longer has a + backlog.
        +
        +
        static java.lang.StringCLEANER_MIN_AGE +
        The minimum age of a file (number of files between it and the active + file) to qualify it for cleaning under any conditions.
        +
        static java.lang.StringCLEANER_MIN_FILE_UTILIZATION +
        A log file will be cleaned if its utilization percentage is below this + value, irrespective of total utilization.
        +
        static java.lang.StringCLEANER_MIN_UTILIZATION +
        The cleaner will keep the total disk space utilization percentage above + this value.
        +
        static java.lang.StringCLEANER_READ_SIZE +
        The read buffer size for cleaning.
        +
        static java.lang.StringCLEANER_THREADS +
        The number of threads allocated by the cleaner for log file processing.
        +
        static java.lang.StringCLEANER_UPGRADE_TO_LOG_VERSION +
        All log files having a log version prior to the specified version will + be cleaned at a time when no other log cleaning is necessary.
        +
        static java.lang.StringCLEANER_USE_DELETED_DIR +
        When CLEANER_EXPUNGE is false, the CLEANER_USE_DELETED_DIR parameter determines whether successfully + cleaned files are moved to the "deleted" sub-directory.
        +
        static java.lang.StringCLEANER_WAKEUP_INTERVAL +
        The cleaner checks whether cleaning is needed if this interval elapses + without any writing, to handle the case where cleaning or checkpointing + is necessary to reclaim disk space, but writing has stopped.
        +
        static java.lang.StringCOMPRESSOR_DEADLOCK_RETRY +
        The number of times to retry a compression run if a deadlock occurs.
        +
        static java.lang.StringCOMPRESSOR_LOCK_TIMEOUT +
        The lock timeout for compressor transactions in microseconds.
        +
        static java.lang.StringCOMPRESSOR_PURGE_ROOT +
        Deprecated.  +
        as of 3.3.87. Compression of the root node no longer has + any benefit and this feature has been removed. This parameter has no + effect.
        +
        +
        static java.lang.StringCOMPRESSOR_WAKEUP_INTERVAL +
        The compressor thread wakeup interval in microseconds.
        +
        static java.lang.StringCONSOLE_LOGGING_LEVEL +
        Trace messages equal and above this level will be logged to the + console.
        +
        static java.lang.StringDOS_PRODUCER_QUEUE_TIMEOUT +
        The timeout for Disk Ordered Scan producer thread queue offers in + milliseconds.
        +
        static java.lang.StringENV_BACKGROUND_READ_LIMIT +
        The maximum number of read operations performed by JE background + activities (e.g., cleaning) before sleeping to ensure that application + threads can perform I/O.
        +
        static java.lang.StringENV_BACKGROUND_SLEEP_INTERVAL +
        The duration that JE background activities will sleep when the ENV_BACKGROUND_WRITE_LIMIT or ENV_BACKGROUND_READ_LIMIT is + reached.
        +
        static java.lang.StringENV_BACKGROUND_WRITE_LIMIT +
        The maximum number of write operations performed by JE background + activities (e.g., checkpointing and eviction) before sleeping to ensure + that application threads can perform I/O.
        +
        static java.lang.StringENV_CHECK_LEAKS +
        Debugging support: check leaked locks and txns at env close.
        +
        static java.lang.StringENV_DB_EVICTION +
        If true, enable eviction of metadata for closed databases.
        +
        static java.lang.StringENV_DUP_CONVERT_PRELOAD_ALL +
        If true (the default) preload all duplicates databases at once when + upgrading from JE 4.1 and earlier.
        +
        static java.lang.StringENV_EXPIRATION_ENABLED +
        If true (the default), expired data is filtered from queries and purged + by the cleaner.
        +
        static java.lang.StringENV_FAIR_LATCHES +
        If true, use latches instead of synchronized blocks to implement the + lock table and log write mutexes.
        +
        static java.lang.StringENV_FORCED_YIELD +
        Debugging support: call Thread.yield() at strategic points.
        +
        static java.lang.StringENV_IS_LOCKING +
        Configures the database environment for no locking.
        +
        static java.lang.StringENV_IS_TRANSACTIONAL +
        Configures the use of transactions.
        +
        static java.lang.StringENV_LATCH_TIMEOUT +
        The timeout for detecting internal latch timeouts, so that deadlocks can + be detected.
        +
        static java.lang.StringENV_READ_ONLY +
        Configures the database environment to be read-only, and any attempt to + modify a database will fail.
        +
        static java.lang.StringENV_RECOVERY_FORCE_CHECKPOINT +
        If true, a checkpoint is forced following recovery, even if the + log ends with a checkpoint.
        +
        static java.lang.StringENV_RECOVERY_FORCE_NEW_FILE +
        Used after performing a restore from backup to force creation of a new + log file prior to recovery.
        +
        static java.lang.StringENV_RUN_CHECKPOINTER +
        If true, starts up the checkpointer thread.
        +
        static java.lang.StringENV_RUN_CLEANER +
        If true, starts up the cleaner thread.
        +
        static java.lang.StringENV_RUN_EVICTOR +
        If true, eviction is done by a pool of evictor threads, as well as being + done inline by application threads.
        +
        static java.lang.StringENV_RUN_IN_COMPRESSOR +
        If true, starts up the INCompressor thread.
        +
        static java.lang.StringENV_RUN_OFFHEAP_EVICTOR +
        If true, off-heap eviction is done by a pool of evictor threads, as well + as being done inline by application threads.
        +
        static java.lang.StringENV_RUN_VERIFIER +
        Whether to run the background verifier.
        +
        static java.lang.StringENV_TTL_CLOCK_TOLERANCE +
        The interval added to the system clock time for determining that a + record may have expired.
        +
        static java.lang.StringEVICTOR_ALLOW_BIN_DELTAS +
        Allow Bottom Internal Nodes (BINs) to be written in a delta format + during eviction.
        +
        static java.lang.StringEVICTOR_CORE_THREADS +
        The minimum number of threads in the eviction thread pool.
        +
        static java.lang.StringEVICTOR_CRITICAL_PERCENTAGE +
        At this percentage over the allotted cache, critical eviction will + start.
        +
        static java.lang.StringEVICTOR_DEADLOCK_RETRY +
        Deprecated.  +
        as of JE 4.1, since the single evictor thread has + been replaced be a more robust thread pool.
        +
        +
        static java.lang.StringEVICTOR_EVICT_BYTES +
        When eviction occurs, the evictor will push memory usage to this number + of bytes below MAX_MEMORY.
        +
        static java.lang.StringEVICTOR_FORCED_YIELD +
        Call Thread.yield() at each check for cache overflow.
        +
        static java.lang.StringEVICTOR_KEEP_ALIVE +
        The duration that excess threads in the eviction thread pool will stay + idle; after this period, idle threads will terminate.
        +
        static java.lang.StringEVICTOR_LRU_ONLY +
        Deprecated.  +
        as of JE 6.0. This parameter is ignored by the new, + more efficient and more accurate evictor.
        +
        +
        static java.lang.StringEVICTOR_MAX_THREADS +
        The maximum number of threads in the eviction thread pool.
        +
        static java.lang.StringEVICTOR_N_LRU_LISTS +
        The number of LRU lists in the main JE cache.
        +
        static java.lang.StringEVICTOR_NODES_PER_SCAN +
        Deprecated.  +
        as of JE 6.0. This parameter is ignored by the new, more + efficient and more accurate evictor.
        +
        +
        static java.lang.StringFILE_LOGGING_LEVEL +
        Trace messages equal and above this level will be logged to the je.info + file, which is in the Environment home directory.
        +
        static java.lang.StringFREE_DISK +
        A lower limit on the number of bytes of free space to maintain on a + volume and per JE Environment.
        +
        static java.lang.StringHALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION +
        By default, if a checksum exception is found at the end of the log + during Environment startup, JE will assume the checksum is due to + previously interrupted I/O and will quietly truncate the log and + restart.
        +
        static java.lang.StringLOCK_DEADLOCK_DETECT +
        Whether to perform deadlock detection when a lock conflict occurs.
        +
        static java.lang.StringLOCK_DEADLOCK_DETECT_DELAY +
        The delay after a lock conflict, before performing deadlock detection.
        +
        static java.lang.StringLOCK_N_LOCK_TABLES +
        Number of Lock Tables.
        +
        static java.lang.StringLOCK_OLD_LOCK_EXCEPTIONS +
        Deprecated.  +
        since JE 6.5; has no effect, as if it were set to false.
        +
        +
        static java.lang.StringLOCK_TIMEOUT +
        Configures the default lock timeout.
        +
        static java.lang.StringLOG_BUFFER_SIZE +
        The maximum starting size of a JE log buffer.
        +
        static java.lang.StringLOG_CHECKSUM_READ +
        If true, perform a checksum check when reading entries from log.
        +
        static java.lang.StringLOG_CHUNKED_NIO +
        Deprecated.  +
        NIO is no longer used by JE and this parameter has no + effect.
        +
        +
        static java.lang.StringLOG_DETECT_FILE_DELETE +
        If true, periodically detect unexpected file deletions.
        +
        static java.lang.StringLOG_DETECT_FILE_DELETE_INTERVAL +
        The interval used to check for unexpected file deletions.
        +
        static java.lang.StringLOG_DIRECT_NIO +
        Deprecated.  +
        NIO is no longer used by JE and this parameter has no + effect.
        +
        +
        static java.lang.StringLOG_FAULT_READ_SIZE +
        The buffer size for faulting in objects from disk, in bytes.
        +
        static java.lang.StringLOG_FILE_CACHE_SIZE +
        The size of the file handle cache.
        +
        static java.lang.StringLOG_FILE_MAX +
        The maximum size of each individual JE log file, in bytes.
        +
        static java.lang.StringLOG_FLUSH_NO_SYNC_INTERVAL +
        The maximum time interval between committing a transaction with + NO_SYNC durability, and + making the transaction durable with respect to the file system.
        +
        static java.lang.StringLOG_FLUSH_SYNC_INTERVAL +
        The maximum time interval between committing a transaction with + NO_SYNC or WRITE_NO_SYNC durability, + and making the transaction durable with respect to the storage device.
        +
        static java.lang.StringLOG_FSYNC_TIME_LIMIT +
        If the time taken by an fsync exceeds this limit, a WARNING level + message is logged.
        +
        static java.lang.StringLOG_FSYNC_TIMEOUT +
        The timeout limit for group file sync, in microseconds.
        +
        static java.lang.StringLOG_GROUP_COMMIT_INTERVAL +
        The time interval in nanoseconds during which transactions may be + grouped to amortize the cost of write and/or fsync when a transaction + commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC on the local + machine.
        +
        static java.lang.StringLOG_GROUP_COMMIT_THRESHOLD +
        The threshold value impacts the number of transactions that may be + grouped to amortize the cost of write and/or fsync when a + transaction commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC + on the local machine.
        +
        static java.lang.StringLOG_ITERATOR_MAX_SIZE +
        The maximum read buffer size for log iterators, which are used when + scanning the log during activities like log cleaning and environment + open, in bytes.
        +
        static java.lang.StringLOG_ITERATOR_READ_SIZE +
        The read buffer size for log iterators, which are used when scanning the + log during activities like log cleaning and environment open, in bytes.
        +
        static java.lang.StringLOG_MEM_ONLY +
        If true, operates in an in-memory test mode without flushing the log to + disk.
        +
        static java.lang.StringLOG_N_DATA_DIRECTORIES +
        Deprecated.  +
        as of 7.3. This feature is not known to provide benefits + beyond that of a simple RAID configuration, and will be removed in the + next release, which is slated for mid-April, 2017.
        +
        +
        static java.lang.StringLOG_NUM_BUFFERS +
        The number of JE log buffers.
        +
        static java.lang.StringLOG_TOTAL_BUFFER_BYTES +
        The total memory taken by log buffers, in bytes.
        +
        static java.lang.StringLOG_USE_NIO +
        Deprecated.  +
        NIO is no longer used by JE and this parameter has no + effect.
        +
        +
        static java.lang.StringLOG_USE_ODSYNC +
        If true (default is false) O_DSYNC is used to open JE log files.
        +
        static java.lang.StringLOG_USE_WRITE_QUEUE +
        If true (default is true) the Write Queue is used for file I/O + operations which are blocked by concurrent I/O operations.
        +
        static java.lang.StringLOG_VERIFY_CHECKSUMS +
        If true, perform a checksum verification just before and after writing + to the log.
        +
        static java.lang.StringLOG_WRITE_QUEUE_SIZE +
        The size of the Write Queue.
        +
        static java.lang.StringMAX_DISK +
        An upper limit on the number of bytes used for data storage.
        +
        static java.lang.StringMAX_MEMORY +
        Configures the JE main cache size in bytes.
        +
        static java.lang.StringMAX_MEMORY_PERCENT +
        Configures the JE main cache size as a percentage of the JVM maximum + memory.
        +
        static java.lang.StringMAX_OFF_HEAP_MEMORY +
        Configures the number of bytes to be used as a secondary, off-heap cache.
        +
        static java.lang.StringNODE_DUP_TREE_MAX_ENTRIES +
        Deprecated.  +
        this property no longer has any effect; DatabaseConfig.setNodeMaxEntries(int) should be used instead.
        +
        +
        static java.lang.StringNODE_MAX_ENTRIES +
        The maximum number of entries in an internal btree node.
        +
        static java.lang.StringOFFHEAP_CHECKSUM +
        Can be used to add a checksum to each off-heap block when the block is + written, and validate the checksum when the block is read, for debugging + purposes.
        +
        static java.lang.StringOFFHEAP_CORE_THREADS +
        The minimum number of threads in the off-heap eviction thread pool.
        +
        static java.lang.StringOFFHEAP_EVICT_BYTES +
        The off-heap evictor will attempt to keep memory usage this number of + bytes below MAX_OFF_HEAP_MEMORY.
        +
        static java.lang.StringOFFHEAP_KEEP_ALIVE +
        The duration that excess threads in the off-heap eviction thread pool + will stay idle; after this period, idle threads will terminate.
        +
        static java.lang.StringOFFHEAP_MAX_THREADS +
        The maximum number of threads in the off-heap eviction thread pool.
        +
        static java.lang.StringOFFHEAP_N_LRU_LISTS +
        The number of LRU lists in the off-heap JE cache.
        +
        static java.lang.StringSHARED_CACHE +
        If true, the shared cache is used by this environment.
        +
        static java.lang.StringSTARTUP_DUMP_THRESHOLD +
        If environment startup exceeds this duration, startup statistics are + logged and can be found in the je.info file.
        +
        static java.lang.StringSTATS_COLLECT +
        If true collect and log statistics.
        +
        static java.lang.StringSTATS_COLLECT_INTERVAL +
        The duration of the statistics capture interval.
        +
        static java.lang.StringSTATS_FILE_DIRECTORY +
        The directory to save the statistics log file.
        +
        static java.lang.StringSTATS_FILE_ROW_COUNT +
        Log file maximum row count for Stat collection.
        +
        static java.lang.StringSTATS_MAX_FILES +
        Maximum number of statistics log files to retain.
        +
        static java.lang.StringTRACE_CONSOLE +
        Deprecated.  +
        in favor of CONSOLE_LOGGING_LEVEL As of JE + 4.0, use the standard java.util.logging configuration + methodologies. To enable console output, set + com.sleepycat.je.util.ConsoleHandler.level = <LEVEL> through + the java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.ConsoleHandler.level" in the + EnvironmentConfig object.
        +
        +
        static java.lang.StringTRACE_DB +
        Deprecated.  +
        As of JE 4.0, event tracing to the .jdb files has been + separated from the java.util.logging mechanism. This parameter has + no effect.
        +
        +
        static java.lang.StringTRACE_FILE +
        Deprecated.  +
        in favor of FILE_LOGGING_LEVEL As of JE 4.0, + use the standard java.util.logging configuration methodologies. To + enable logging output to the je.info files, set + com.sleepycat.je.util.FileHandler.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.FileHandler.level" in the EnvironmentConfig + object.
        +
        +
        static java.lang.StringTRACE_FILE_COUNT +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file count, + set com.sleepycat.je.util.FileHandler.count = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_FILE_LIMIT +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file size, + set com.sleepycat.je.util.FileHandler.limit = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_LEVEL +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. Set logging levels using class names + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_LEVEL_CLEANER +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see cleaner logging, set + com.sleepycat.je.cleaner.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_LEVEL_EVICTOR +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see evictor logging, set + com.sleepycat.je.evictor.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_LEVEL_LOCK_MANAGER +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see locking logging, set + com.sleepycat.je.txn.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTRACE_LEVEL_RECOVERY +
        Deprecated.  +
        As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see recovery logging, set + com.sleepycat.je.recovery.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
        +
        +
        static java.lang.StringTREE_BIN_DELTA +
        If more than this percentage of entries are changed on a BIN, log a a + full version instead of a delta.
        +
        static java.lang.StringTREE_COMPACT_MAX_KEY_LENGTH +
        Specifies the maximum unprefixed key length for use in the compact + in-memory key representation.
        +
        static java.lang.StringTREE_MAX_DELTA +
        Deprecated.  +
        as of JE 6.0. The TREE_BIN_DELTA param alone now + determines whether a delta is logged.
        +
        +
        static java.lang.StringTREE_MAX_EMBEDDED_LN +
        The maximum size (in bytes) of a record's data portion that will cause + the record to be embedded in its parent LN.
        +
        static java.lang.StringTREE_MIN_MEMORY +
        The minimum bytes allocated out of the memory cache to hold Btree data + including internal nodes and record keys and data.
        +
        static java.lang.StringTXN_DEADLOCK_STACK_TRACE +
        Set this parameter to true to add stacktrace information to deadlock + (lock timeout) exception messages.
        +
        static java.lang.StringTXN_DUMP_LOCKS +
        Dump the lock table when a lock timeout is encountered, for debugging + assistance.
        +
        static java.lang.StringTXN_DURABILITY +
        Configures the default durability associated with transactions.
        +
        static java.lang.StringTXN_SERIALIZABLE_ISOLATION +
        Configures all transactions for this environment to have Serializable + (Degree 3) isolation.
        +
        static java.lang.StringTXN_TIMEOUT +
        Configures the transaction timeout.
        +
        static java.lang.StringVERIFY_BTREE +
        Whether the background verifier should perform Btree verification, + as if the DbVerify utility were run.
        +
        static java.lang.StringVERIFY_BTREE_BATCH_DELAY +
        The delay between batches during Btree + verification.
        +
        static java.lang.StringVERIFY_BTREE_BATCH_SIZE +
        The number of records verified per batch during Btree verification.
        +
        static java.lang.StringVERIFY_DATA_RECORDS +
        Whether to verify data records (leaf nodes, or LNs) during Btree + verification.
        +
        static java.lang.StringVERIFY_LOG +
        Whether the background verifier should verify checksums in the log, + as if the DbVerifyLog utility were run.
        +
        static java.lang.StringVERIFY_LOG_READ_DELAY +
        The delay between reads during log verification.
        +
        static java.lang.StringVERIFY_SCHEDULE +
        A crontab-format string indicating when to start the background + verifier.
        +
        static java.lang.StringVERIFY_SECONDARIES +
        Whether to verify secondary index references during Btree verification.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        EnvironmentConfig() +
        Creates an EnvironmentConfig initialized with the system default + settings.
        +
        EnvironmentConfig(java.util.Properties properties) +
        Creates an EnvironmentConfig which includes the properties specified in + the properties parameter.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          MAX_MEMORY

          +
          public static final java.lang.String MAX_MEMORY
          +
          Configures the JE main cache size in bytes. + +

          Either MAX_MEMORY or MAX_MEMORY_PERCENT may be used to configure the + cache size. When MAX_MEMORY is zero (its default value), + MAX_MEMORY_PERCENT determines the cache size. See + MAX_MEMORY_PERCENT for more information.

          + +

          When using MAX_MEMORY, take care to ensure that the overhead + of the JVM does not leave less free space in the heap than intended. + Some JVMs have more overhead than others, and some JVMs allocate their + overhead within the specified heap size (the -Xmx value). To be sure + that enough free space is available, use MAX_MEMORY_PERCENT rather than + MAX_MEMORY.

          + +

          When using the Oracle NoSQL DB product

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.maxMemory"LongYes0-none--none-

          +
          +
          See Also:
          +
          EnvironmentMutableConfig.setCacheSize(long), +MAX_MEMORY_PERCENT, +Cache Statistics: + Sizing, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          MAX_MEMORY_PERCENT

          +
          public static final java.lang.String MAX_MEMORY_PERCENT
          +
          Configures the JE main cache size as a percentage of the JVM maximum + memory. + +

          The system will evict database objects when it comes within a + prescribed margin of the limit.

          + +

          By default, JE sets the cache size to:

          + +
          + (MAX_MEMORY_PERCENT * JVM maximum memory) / 100 +
          + +

          where JVM maximum memory is specified by the JVM -Xmx flag. Note that + the actual heap size may be somewhat less, depending on JVM overheads. + The value used in the calculation above is the actual heap size as + returned by Runtime.maxMemory().

          + +

          The above calculation applies when MAX_MEMORY is zero, which + is its default value. Setting MAX_MEMORY to a non-zero value overrides + the percentage based calculation and sets the cache size explicitly.

          + +

          The following details apply to setting the cache size to a percentage + of the JVM heap size byte size (this parameter) as well as to a byte + size (MAX_MEMORY

          + +

          If SHARED_CACHE is set to true, MAX_MEMORY and + MAX_MEMORY_PERCENT specify the total size of the shared cache, and + changing these parameters will change the size of the shared cache. New + environments that join the cache may alter the cache size if their + configuration uses a different cache size parameter.

          + +

          The size of the cache is often directly proportional to operation + performance. See Cache + Statistics for information on understanding and monitoring the + cache. It is strongly recommended that the cache is large enough to + hold all INs. See DbCacheSize for information on sizing the + cache.

          + +

          To take full advantage of JE cache memory, it is strongly recommended + that + compressed oops + (-XX:+UseCompressedOops) is specified when a 64-bit JVM is + used and the maximum heap size is less than 32 GB. As described in the + referenced documentation, compressed oops is sometimes the default JVM + mode even when it is not explicitly specified in the Java command. + However, if compressed oops is desired then it must be + explicitly specified in the Java command when running DbCacheSize or a + JE application. If it is not explicitly specified then JE will not + aware of it, even if it is the JVM default setting, and will not take it + into account when calculating cache memory sizes.

          + +

          Note that log write buffers may be flushed to disk if the cache size + is changed after the environment has been opened.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.maxMemoryPercent"IntegerYes60190

          +
          +
          See Also:
          +
          EnvironmentMutableConfig.setCachePercent(int), +MAX_MEMORY, +Cache Statistics: + Sizing, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          MAX_OFF_HEAP_MEMORY

          +
          public static final java.lang.String MAX_OFF_HEAP_MEMORY
          +
          Configures the number of bytes to be used as a secondary, off-heap cache. + + The off-heap cache is used to hold record data and Btree nodes when + these are evicted from the "main cache" because it overflows. Eviction + occurs according to an LRU algorithm and takes into account the user- + specified CacheMode. When the off-heap cache overflows, eviction + occurs there also according to the same algorithm. +

          + The main cache is in the Java heap and consists primarily of the Java + objects making up the in-memory Btree data structure. Btree objects are + not serialized the main cache, so no object materialization is needed to + access the Btree there. Access to records in the main cache is therefore + very fast, but the main cache has drawbacks as well: 1) The larger the + main cache, the more likely it is to have Java GC performance problems. + 2) When the Java heap exceeds 32GB, the "compressed OOPs" setting no + longer applies and less data will fit in the same amount of memory. For + these reasons, JE applications often configure a heap of 32GB or less, + and a main cache that is significantly less than 32GB, leaving any + additional machine memory for use by the file system cache. +

          + The use of the file system cache has performance benefits, but + also has its own drawbacks: 1) There is a significant redundancy + between the main cache and the file system cache because all data and + Btree information that is logged (written) by JE appears in the file + system and may also appear in the main cache. 2) It is not possible + for dirty Btree information to be placed in the file system + cache without logging it, this logging may be otherwise unnecessary, and + the logging creates additional work for the JE cleaner; in other words, + the size of the main cache alone determines the maximum size of the + in-memory "dirty set". +

          + The off-heap cache is stored outside the Java heap using a native + platform memory allocator. The current implementation relies on + internals that are specific to the Oracle and IBM JDKs; however, a + memory allocator interface that can be implemented for other situations + is being considered for a future release. Records and Btree objects are + serialized when they are placed in the off-heap cache, and they must be + materialized when they are moved back to the main cache in order to + access them. This serialization and materialization adds some CPU + overhead and thread contention, as compared to accessing data directly + in the main cache. The off-heap cache can contain dirty Btree + information, so it can be used to increase the maximum size of the + in-memory "dirty set". +

          + NOTE: If an off-heap cache is configured but cannot be used because + that native allocator is not available in the JDK that is used, an + IllegalStateException will be thrown by the Environment + or ReplicatedEnvironment constructor. In + the current release, this means that the sun.misc.Unsafe class + must contain the allocateMemory method and related methods, as + defined in the Oracle JDK. +

          + When configuring an off-heap cache you can think of the performance + trade-offs in two ways. First, if the off-heap cache is considered to be + a replacement for the file system cache, the serialization and + materialization overhead is not increased. In this case, the use of + the off-heap cache is clearly beneficial, and using the off-heap cache + "instead of" the file system cache is normally recommended. Second, the + off-heap cache can be used along with a main cache that is reduced in + size in order to compensate for Java GC problems. In this case, the + trade-off is between the additional serialization, materialization and + contention overheads of the off-heap cache, as compared to the Java GC + overhead. +

          + When dividing up available memory for the JVM heap, the off-heap cache, + and for other uses, please be aware that the file system cache and the + off-heap cache are different in one important respect. The file system + cache automatically shrinks when memory is needed by the OS or other + processes, while the off-heap cache does not. Therefore, it is best to + be conservative about leaving memory free for other uses, and it is not + a good idea to size the off-heap cache such that all machine memory will + be allocated. If off-heap allocations or other allocations fail because + there is no available memory, the process is likely to die without any + exception being thrown. In one test on Linux, for example, the process + was killed abruptly by the OS and the only indication of the problem was + the following shown by dmesg. +

          + Out of memory: Kill process 28768 (java) score 974 or sacrifice child
          + Killed process 28768 (java)
          +    total-vm:278255336kB, anon-rss:257274420kB, file-rss:0kB
          + 
          +

          + WARNING: Although this configuration property is mutable, it cannot be + changed from zero to non-zero, or non-zero to zero. In other words, the + size of the off-heap cache can be changed after initially configuring a + non-zero size, but the off-heap cache cannot be turned on and off + dynamically. An attempt to do so will cause an IllegalArgumentException to be thrown by the Environment or + ReplicatedEnvironment constructor. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.maxOffHeapMemory"LongYes00-none-

          +
          +
          See Also:
          +
          EnvironmentMutableConfig.setOffHeapCacheSize(long), +Cache Statistics: + Sizing, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          SHARED_CACHE

          +
          public static final java.lang.String SHARED_CACHE
          +
          If true, the shared cache is used by this environment. + +

          By default this parameter is false and this environment uses a + private cache. If this parameter is set to true, this environment will + use a cache that is shared with all other open environments in this + process that also set this parameter to true. There is a single shared + cache per process.

          + +

          By using the shared cache, multiple open environments will make + better use of memory because the cache LRU algorithm is applied across + all information in all environments sharing the cache. For example, if + one environment is open but not recently used, then it will only use a + small portion of the cache, leaving the rest of the cache for + environments that have been recently used.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.sharedCache"BooleanNofalse

          +
          +
          See Also:
          +
          setSharedCache(boolean), +Cache Statistics: + Sizing, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          MAX_DISK

          +
          public static final java.lang.String MAX_DISK
          +
          An upper limit on the number of bytes used for data storage. Works + with FREE_DISK to define the storage limit. If the limit is + exceeded, write operations will be prohibited. +

          + If set to zero (the default), no usage limit is enforced, meaning that + all space on the storage volume, minus FREE_DISK, may be used. + If MAX_DISK is non-zero, FREE_DISK is subtracted from MAX_DISK to + determine the usage threshold for prohibiting write operations. If + multiple JE environments share the same storage volume, setting MAX_DISK + to a non-zero value is strongly recommended. + +

          Note: An exception to the rule above is + when MAX_DISK is less than or equal to 10GB and FREE_DISK is not + explicitly specified. See FREE_DISK more information.

          + + Both the FREE_DISK and MAX_DISK thresholds (if configured) are checked + during a write operation. If either threshold is crossed, the behavior + of the JE environment is as follows: +
            +
          • + Application write operations will throw DiskLimitException. + DiskLimitException extends OperationFailureException and + will invalidate the transaction, but will not invalidate the + environment. Read operations may continue even when write operations + are prohibited. +
          • +
          • + When using NoSQL DB, the above item applies to client CRUD + operations as well as operations performed on internal metadata. + When a disk limit is violated, NoSQL DB will throw exceptions for + client write operations and for operations that update internal + metadata. Related exceptions may be logged for other internal write + operations. Such exceptions will be derived from the JE + DiskLimitException. +
          • +
          • + Environment.checkpoint(com.sleepycat.je.CheckpointConfig), Environment.sync() and + Database.sync() will throw DiskLimitException. +
          • +
          • + Environment.close() may throw DiskLimitException when a final + checkpoint is performed. However, the environment will be properly + closed in other respects. +
          • +
          • + The JE evictor will not log dirty nodes when the cache overflows + and therefore dirty nodes cannot be evicted from cache. So + although read operations are allowed, cache thrashing may occur if + all INs do not fit in cache as recommended. +
          • +
          • + In an HA environment a disk limit may be violated on a replica node + but not the master node. In this case, a DiskLimitException will not + be thrown by a write operation on the master node. Instead, + InsufficientAcksException or + InsufficientReplicasException will be + thrown if the ack requirements + are not met. +
          • +
          +

          + JE uses a log structured storage system where data files often become + gradually obsolete over time (see CLEANER_MIN_UTILIZATION). The + JE cleaner is responsible for reclaiming obsolete space by cleaning and + deleting data files. In a standalone (non-HA) environment, data files + are normally deleted quickly after being cleaned, but may be reserved + and protected temporarily by a DbBackup or + DiskOrderedCursor. These reserved files will be deleted as soon + as they are no longer protected. +

          + In an HA environment, JE will retain as many reserved files as possible + to support replication to nodes that are out of contact. All cleaned + files are reserved (not deleted) until approaching a disk limit, at + which time they are deleted, as long as they are not protected. + Reserved files are protected when they are needed for + replication to active nodes or for feeding an active network restore. +

          + For more information on reserved and protected data files, see + EnvironmentStats.getActiveLogSize(), + EnvironmentStats.getReservedLogSize(), + EnvironmentStats.getProtectedLogSize(), + EnvironmentStats.getProtectedLogSizeMap(), + EnvironmentStats.getAvailableLogSize() and + EnvironmentStats.getTotalLogSize(). +

          + When multiple JE environments share the same storage volume, the + FREE_DISK amount will be maintained for each environment. The following + scenario illustrates use of a single shared volume with capacity 300GB: +

            +
          • + JE-1 and JE-2 each have MAX_DISK=100GB and FREE_DISK=5GB, +
          • +
          • + 100GB is used for fixed miscellaneous storage. +
          • +
          +

          + Each JE environment will use no more than 95GB each, so at least 10GB + will remain free overall. In other words, if both JE environments reach + their threshold and write operations are prohibited, each JE environment + will have 5GB of free space for recovery (10GB total). +

          + On the other hand, when an external service is also consuming disk + space and its usage of disk space is variable over time, the situation + is more complex and JE cannot always guarantee that FREE_DISK is + honored. The following scenario includes multiple JE environments as + well an external service, all sharing a 300GB volume. +

            +
          • + JE-1 and JE-2 each have MAX_DISK=100GB and FREE_DISK=5GB, +
          • +
          • + an external service is expected to use up to 50GB, and +
          • +
          • + 50GB is used for fixed miscellaneous storage. +
          • +
          +

          + Assuming that the external service stays within its 50GB limit then, as + the previous example, each JE environment will normally use no more than + 95GB each, and at least 10GB will remain free overall. However, if the + external service exceeds its threshold, JE will make a best effort to + prohibit write operations in order to honor the FREE_DISK limit, but + this is not always possible, as illustrated by the following sequence + of events: +

            +
          • + If the external service uses all its allocated space, 50GB, and JE + environments are each using 75GB, then there will be 50GB free + overall (25GB for each JE environment). Write operations are allowed + in both JE environments. +
          • +
          • + If the external service then exceeds its limit by 25GB and uses + 75GB, there will only 25GB free overall. But each JE environment is + still under its 90GB limit and there is still more than 5GB free + overall, so write operations are still allowed. +
          • +
          • + If each JE environment uses an additional 10GB of space, there will + only be 5GB free overall. Each JE environment is using only 85GB, + which is under its 95GB limit. But the 5GB FREE_DISK limit for the + volume overall has been reached and therefore JE write operations + will be prohibited. +
          • +
          + Leaving only 5GB of free space in the prior scenario is not ideal, but + it is at least enough for one JE environment at a time to be recovered. + The reality is that when an external entity exceeds its expected disk + usage, JE cannot always compensate. For example, if the external service + continues to use more space in the scenario above, the volume will + eventually be filled completely. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.maxDisk"LongYes00-none-

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          FREE_DISK, +EnvironmentMutableConfig.setMaxDisk(long), +EnvironmentMutableConfig.getMaxDisk(), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          FREE_DISK

          +
          public static final java.lang.String FREE_DISK
          +
          A lower limit on the number of bytes of free space to maintain on a + volume and per JE Environment. Works with MAX_DISK to define + the storage limit. If the limit is exceeded, write operations will be + prohibited. +

          + The default FREE_DISK value is 5GB. This value is designed to be large + enough to allow manual recovery after exceeding a disk threshold. +

          + If FREE_DISK is set to zero, no free space limit is enforced. This is + not recommended, since manual recovery may be very difficult or + impossible when the volume is completely full. +

          + If non-zero, this parameter is used in two ways. +

            +
          • + FREE_DISK determines the minimum of free space left on the storage + volume. If less than this amount is free, write operations are + prohibited. +
          • +
          • + If MAX_DISK is configured, FREE_DISK is subtracted from MAX_DISK to + determine the usage threshold for prohibiting write operations. See + MAX_DISK for more information. + +

            Note that this subtraction could make + testing inconvenient when a small value is specified for MAX_DISK + and FREE_DISK is not also specified. For example, if MAX_DISK is + 1GB and FREE_DISK is 5G (its default value), then no writing + would be allowed (MAX_DISK minus FREE_DISK is negative 4G). To + address this, the subtraction is performed only if one of two + conditions is met: +

              +
            1. FREE_DISK is explicitly specified, or
            2. +
            3. MAX_DISK is greater than 10GB.
            4. +

            + +
          • +
          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.freeDisk"LongYes5,368,709,120 (5GB)-none--none-

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          MAX_DISK, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RECOVERY_FORCE_CHECKPOINT

          +
          public static final java.lang.String ENV_RECOVERY_FORCE_CHECKPOINT
          +
          If true, a checkpoint is forced following recovery, even if the + log ends with a checkpoint. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.recoveryForceCheckpoint"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RECOVERY_FORCE_NEW_FILE

          +
          public static final java.lang.String ENV_RECOVERY_FORCE_NEW_FILE
          +
          Used after performing a restore from backup to force creation of a new + log file prior to recovery. +

          + As of JE 6.3, the use of this parameter is unnecessary except in special + cases. See the "Restoring from a backup" section in the DbBackup javadoc + for more information. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.recoveryForceNewFile"BooleanNofalse

          +
          +
          See Also:
          +
          Restoring from a backup, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION

          +
          public static final java.lang.String HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION
          +
          By default, if a checksum exception is found at the end of the log + during Environment startup, JE will assume the checksum is due to + previously interrupted I/O and will quietly truncate the log and + restart. If this property is set to true, when a ChecksumException + occurs in the last log file during recovery, instead of truncating the + log file, and automatically restarting, attempt to continue reading past + the corrupted record with the checksum error to see if there are commit + records following the corruption. If there are, throw an + EnvironmentFailureException to indicate the presence of committed + transactions. The user may then need to run DbTruncateLog to truncate + the log for further recovery after doing manual analysis of the log. + Setting this property is suitable when the application wants to guard + against unusual cases. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.haltOnCommitAfterChecksumException"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_IN_COMPRESSOR

          +
          public static final java.lang.String ENV_RUN_IN_COMPRESSOR
          +
          If true, starts up the INCompressor thread. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runINCompressor"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_CHECKPOINTER

          +
          public static final java.lang.String ENV_RUN_CHECKPOINTER
          +
          If true, starts up the checkpointer thread. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runCheckpointer"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_CLEANER

          +
          public static final java.lang.String ENV_RUN_CLEANER
          +
          If true, starts up the cleaner thread. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runCleaner"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_EVICTOR

          +
          public static final java.lang.String ENV_RUN_EVICTOR
          +
          If true, eviction is done by a pool of evictor threads, as well as being + done inline by application threads. If false, the evictor pool is not + used, regardless of the values of EVICTOR_CORE_THREADS and + EVICTOR_MAX_THREADS. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runEvictor"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_OFFHEAP_EVICTOR

          +
          public static final java.lang.String ENV_RUN_OFFHEAP_EVICTOR
          +
          If true, off-heap eviction is done by a pool of evictor threads, as well + as being done inline by application threads. If false, the evictor pool + is not used, regardless of the values of OFFHEAP_CORE_THREADS + and OFFHEAP_MAX_THREADS. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runOffHeapEvictor"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_BACKGROUND_READ_LIMIT

          +
          public static final java.lang.String ENV_BACKGROUND_READ_LIMIT
          +
          The maximum number of read operations performed by JE background + activities (e.g., cleaning) before sleeping to ensure that application + threads can perform I/O. If zero (the default) then no limitation on + I/O is enforced. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.backgroundReadLimit"IntegerYes00-none-

          +
          +
          See Also:
          +
          ENV_BACKGROUND_SLEEP_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_BACKGROUND_WRITE_LIMIT

          +
          public static final java.lang.String ENV_BACKGROUND_WRITE_LIMIT
          +
          The maximum number of write operations performed by JE background + activities (e.g., checkpointing and eviction) before sleeping to ensure + that application threads can perform I/O. If zero (the default) then no + limitation on I/O is enforced. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.backgroundWriteLimit"IntegerYes00-none-

          +
          +
          See Also:
          +
          ENV_BACKGROUND_SLEEP_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          ENV_CHECK_LEAKS

          +
          public static final java.lang.String ENV_CHECK_LEAKS
          +
          Debugging support: check leaked locks and txns at env close. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.checkLeaks"BooleanNotrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_FORCED_YIELD

          +
          public static final java.lang.String ENV_FORCED_YIELD
          +
          Debugging support: call Thread.yield() at strategic points. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.forcedYield"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_IS_TRANSACTIONAL

          +
          public static final java.lang.String ENV_IS_TRANSACTIONAL
          +
          Configures the use of transactions. + +

          This should be set to true when transactional guarantees such as + atomicity of multiple operations and durability are important.

          + +

          If true, create an environment that is capable of performing + transactions. If true is not passed, transactions may not be used. For + licensing purposes, the use of this method distinguishes the use of the + Transactional product. Note that if transactions are not used, + specifying true does not create additional overhead in the + environment.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.isTransactional"BooleanNofalse

          +
          +
          See Also:
          +
          setTransactional(boolean), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_IS_LOCKING

          +
          public static final java.lang.String ENV_IS_LOCKING
          +
          Configures the database environment for no locking. + +

          If true, create the environment with record locking. This property + should be set to false only in special circumstances when it is safe to + run without record locking.

          + +

          This configuration option should be used when locking guarantees such + as consistency and isolation are not important. If locking mode is + disabled (it is enabled by default), the cleaner is automatically + disabled. The user is responsible for invoking the cleaner and ensuring + that there are no concurrent operations while the cleaner is + running.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.isLocking"BooleanNotrue

          +
          +
          See Also:
          +
          setLocking(boolean), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_READ_ONLY

          +
          public static final java.lang.String ENV_READ_ONLY
          +
          Configures the database environment to be read-only, and any attempt to + modify a database will fail. + +

          A read-only environment has several limitations and is recommended + only in special circumstances. Note that there is no performance + advantage to opening an environment read-only.

          + +

          The primary reason for opening an environment read-only is to open a + single environment in multiple JVM processes. Only one JVM process at a + time may open the environment read-write. See EnvironmentLockedException.

          + +

          When the environment is open read-only, the following limitations + apply.

          +
            +
          • In the read-only environment no writes may be performed, as + expected, and databases must be opened read-only using DatabaseConfig.setReadOnly(boolean).
          • +
          • The read-only environment receives a snapshot of the data that is + effectively frozen at the time the environment is opened. If the + application has the environment open read-write in another JVM process + and modifies the environment's databases in any way, the read-only + version of the data will not be updated until the read-only JVM process + closes and reopens the environment (and by extension all databases in + that environment).
          • +
          • If the read-only environment is opened while the environment is in + use by another JVM process in read-write mode, opening the environment + read-only (recovery) is likely to take longer than it does after a clean + shutdown. This is due to the fact that the read-write JVM process is + writing and checkpoints are occurring that are not coordinated with the + read-only JVM process. The effect is similar to opening an environment + after a crash.
          • +
          • In a read-only environment, the JE cache will contain information + that cannot be evicted because it was reconstructed by recovery and + cannot be flushed to disk. This means that the read-only environment + may not be suitable for operations that use large amounts of memory, and + poor performance may result if this is attempted.
          • +
          • In a read-write environment, the log cleaner will be prohibited from + deleting log files for as long as the environment is open read-only in + another JVM process. This may cause disk usage to rise, and for this + reason it is not recommended that an environment is kept open read-only + in this manner for long periods.
          • +
          + +

          For these reasons, it is recommended that a read-only environment be + used only for short periods and for operations that are not performance + critical or memory intensive. With few exceptions, all application + functions that require access to a JE environment should be built into a + single application so that they can be performed in the JVM process + where the environment is open read-write.

          + +

          In most applications, opening an environment read-only can and should + be avoided.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.isReadOnly"BooleanNofalse

          +
          +
          See Also:
          +
          setReadOnly(boolean), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_FAIR_LATCHES

          +
          public static final java.lang.String ENV_FAIR_LATCHES
          +
          If true, use latches instead of synchronized blocks to implement the + lock table and log write mutexes. Latches require that threads queue to + obtain the mutex in question and therefore guarantee that there will be + no mutex starvation, but do incur a performance penalty. Latches should + not be necessary in most cases, so synchronized blocks are the default. + An application that puts heavy load on JE with threads with different + thread priorities might find it useful to use latches. In a Java 5 JVM, + where java.util.concurrent.locks.ReentrantLock is used for the latch + implementation, this parameter will determine whether they are 'fair' or + not. This parameter is 'static' across all environments. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.fairLatches"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_LATCH_TIMEOUT

          +
          public static final java.lang.String ENV_LATCH_TIMEOUT
          +
          The timeout for detecting internal latch timeouts, so that deadlocks can + be detected. Latches are held internally for very short durations. If + due to unforeseen problems a deadlock occurs, a timeout will occur after + the duration specified by this parameter. When a latch timeout occurs: +
            +
          • The Environment is invalidated and must be closed.
          • +
          • An EnvironmentFailureException is thrown.
          • +
          • A full thread dump is logged at level SEVERE.
          • +
          + If this happens, thread dump in je.info file should be preserved so it + can be used to analyze the problem. +

          + Most applications should not change this parameter. The default value, 5 + minutes, should be much longer than a latch is ever held. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.latchTimeout"DurationNo5 min1 ms-none-

          +
          +
          Since:
          +
          6.2
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_TTL_CLOCK_TOLERANCE

          +
          public static final java.lang.String ENV_TTL_CLOCK_TOLERANCE
          +
          The interval added to the system clock time for determining that a + record may have expired. Used when an internal integrity error may be + present, but may also be due to a record that expired and the system + clock was moved back. +

          + For example, say a record expires and then the clock is moved back by + one hour to correct a daylight savings time error. Because the LN and + BIN slot for an expired record are purged separately (see + Time-To_live), in this case the LN was + purged but the BIN slot was not purged. When accessing the record's key + via the BIN slot, it will appear that it is not expired. But then when + accessing the the data, the LN will not be accessible. Normally this + would be considered a fatal integrity error, but since the record will + expire within the 2 hour limit, it is simply treated as an expired + record. +

          + Most applications should not change this parameter. The default value, + two hours, is enough to account for minor clock adjustments or + accidentally setting the clock one hour off. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.ttlClockTolerance"DurationNo2 h1 ms-none-

          +
          +
          Since:
          +
          7.0
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_EXPIRATION_ENABLED

          +
          public static final java.lang.String ENV_EXPIRATION_ENABLED
          +
          If true (the default), expired data is filtered from queries and purged + by the cleaner. This might be set to false to recover data after an + extended down time. +

          + WARNING: Disabling expiration is intended for special-purpose access + for data recovery only. When this parameter is set to false, records + that have expired may or may not have been purged, so they may or may + not be accessible. In addition, it is possible for the key and data of + a record to expire independently, so the key may be accessible (if the + data is not requested by the read operation), while the record will + appear to be deleted when the data is requested. The same thing is + true of primary and secondary records, which are also purged + independently. A record may be accessible by primary key but not + secondary key, and vice-versa. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.expirationEnabled"Booleanyestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_DB_EVICTION

          +
          public static final java.lang.String ENV_DB_EVICTION
          +
          If true, enable eviction of metadata for closed databases. There is + no known benefit to setting this parameter to false. + +

          This param is unlikely to be needed for tuning, but is sometimes + useful for debugging and testing.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.dbEviction"BooleanNotrue

          +
          +
          See Also:
          +
          Cache Statistics: + Debugging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_DUP_CONVERT_PRELOAD_ALL

          +
          public static final java.lang.String ENV_DUP_CONVERT_PRELOAD_ALL
          +
          If true (the default) preload all duplicates databases at once when + upgrading from JE 4.1 and earlier. If false, preload each duplicates + database individually instead. Preloading all databases at once gives a + performance advantage if the JE cache is roughly large enough to contain + the internal nodes for all duplicates databases. Preloading each + database individually gives a performance advantage if the JE cache is + roughly large enough to contain the internal nodes for a single + duplicates database. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.dupConvertPreloadAll"BooleanNotrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ADLER32_CHUNK_SIZE

          +
          public static final java.lang.String ADLER32_CHUNK_SIZE
          +
          By default, JE passes an entire log record to the Adler32 class for + checksumming. This can cause problems with the GC in some cases if the + records are large and there is concurrency. Setting this parameter will + cause JE to pass chunks of the log record to the checksumming class so + that the GC does not block. 0 means do not chunk. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.adler32.chunkSize"IntegerYes001048576 (1M)

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_TOTAL_BUFFER_BYTES

          +
          public static final java.lang.String LOG_TOTAL_BUFFER_BYTES
          +
          The total memory taken by log buffers, in bytes. If 0, use 7% of + je.maxMemory. If 0 and je.sharedCache=true, use 7% divided by N where N + is the number of environments sharing the global cache. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.totalBufferBytes"LongNo06144L-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_NUM_BUFFERS

          +
          public static final java.lang.String LOG_NUM_BUFFERS
          +
          The number of JE log buffers. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.numBuffers"IntegerNo32-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_BUFFER_SIZE

          +
          public static final java.lang.String LOG_BUFFER_SIZE
          +
          The maximum starting size of a JE log buffer. JE silently restricts + this value to be no more than the configured maximum log file size + (je.log.fileMax). + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.bufferSize"IntegerNo1048576 (1M)1024 (1K)-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FAULT_READ_SIZE

          +
          public static final java.lang.String LOG_FAULT_READ_SIZE
          +
          The buffer size for faulting in objects from disk, in bytes. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.faultReadSize"IntegerNo2048 (2K)32-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_ITERATOR_READ_SIZE

          +
          public static final java.lang.String LOG_ITERATOR_READ_SIZE
          +
          The read buffer size for log iterators, which are used when scanning the + log during activities like log cleaning and environment open, in bytes. + This may grow as the system encounters larger log entries. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.iteratorReadSize"IntegerNo8192 (8K)128-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_ITERATOR_MAX_SIZE

          +
          public static final java.lang.String LOG_ITERATOR_MAX_SIZE
          +
          The maximum read buffer size for log iterators, which are used when + scanning the log during activities like log cleaning and environment + open, in bytes. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.iteratorMaxSize"IntegerNo16777216 (16M)128-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FILE_MAX

          +
          public static final java.lang.String LOG_FILE_MAX
          +
          The maximum size of each individual JE log file, in bytes. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.fileMax"LongNo10000000 (10M)1000000 (1M)1073741824 (1G)

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_N_DATA_DIRECTORIES

          +
          public static final java.lang.String LOG_N_DATA_DIRECTORIES
          +
          Deprecated. as of 7.3. This feature is not known to provide benefits + beyond that of a simple RAID configuration, and will be removed in the + next release, which is slated for mid-April, 2017.
          +
          The JE environment can be spread across multiple subdirectories. + Environment subdirectories may be used to spread an environment's .jdb + files over multiple directories, and therefore over multiple disks or + file systems. Environment subdirectories reside in the environment home + directory and are named data001/ through dataNNN/, consecutively, where + NNN is the value of je.log.nDataDirectories. A typical configuration + would be to have each of the dataNNN/ names be symbolic links to actual + directories which each reside on separate file systems or disks. +

          + If 0, all log files (*.jdb) will reside in the environment + home directory passed to the Environment constructor. A non-zero value + indicates the number of environment subdirectories to use for holding the + environment log files. +

          + If data subdirectories are used (i.e. je.log.nDataDirectories > 0), this + parameter must be set when the environment is initially created. + Like the environment home directory, each and every one of the dataNNN/ + subdirectories must also be present and writable. This parameter must + be set to the same value for all subsequent openings of the environment + or an exception will be thrown. +

          + If the set of existing dataNNN/ subdirectories is not equivalent to the + set { 1 ... je.log.nDataDirectories } when the environment is opened, an + EnvironmentFailureException will be thrown, and the Environment will + fail to be opened. +

          + This parameter should be set using the je.properties file rather than + the EnvironmentConfig. If not, JE command line utilities that open the + Environment will throw an exception because they will not know of the + non-zero value of this parameter. +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximumJVM
          "je.log.nDataDirectories"IntegerNo00256

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_CHECKSUM_READ

          +
          public static final java.lang.String LOG_CHECKSUM_READ
          +
          If true, perform a checksum check when reading entries from log. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.checksumRead"BooleanNotrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_VERIFY_CHECKSUMS

          +
          public static final java.lang.String LOG_VERIFY_CHECKSUMS
          +
          If true, perform a checksum verification just before and after writing + to the log. This is primarily used for debugging. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.verifyChecksums"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_MEM_ONLY

          +
          public static final java.lang.String LOG_MEM_ONLY
          +
          If true, operates in an in-memory test mode without flushing the log to + disk. An environment directory must be specified, but it need not exist + and no files are written. The system operates until it runs out of + memory, at which time an OutOfMemoryError is thrown. Because the entire + log is kept in memory, this mode is normally useful only for testing. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.memOnly"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FILE_CACHE_SIZE

          +
          public static final java.lang.String LOG_FILE_CACHE_SIZE
          +
          The size of the file handle cache. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.fileCacheSize"IntegerNo1003-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_DETECT_FILE_DELETE

          +
          public static final java.lang.String LOG_DETECT_FILE_DELETE
          +
          If true, periodically detect unexpected file deletions. Normally all + file deletions should be performed as a result of JE log cleaning. + If an external file deletion is detected, JE assumes this was + accidental. This will cause the environment to be invalidated and + all methods will throw EnvironmentFailureException. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.detectFileDelete"BooleanNotrue

          +
          +
          Since:
          +
          7.2
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_DETECT_FILE_DELETE_INTERVAL

          +
          public static final java.lang.String LOG_DETECT_FILE_DELETE_INTERVAL
          +
          The interval used to check for unexpected file deletions. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.detectFileDeleteInterval"DurationNo1000 ms1 msnone

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FSYNC_TIMEOUT

          +
          public static final java.lang.String LOG_FSYNC_TIMEOUT
          +
          The timeout limit for group file sync, in microseconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.fsyncTimeout"DurationNo500 ms10 ms24 d

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FSYNC_TIME_LIMIT

          +
          public static final java.lang.String LOG_FSYNC_TIME_LIMIT
          +
          If the time taken by an fsync exceeds this limit, a WARNING level + message is logged. If this parameter set to zero, a message will not be + logged. By default, this parameter is 5 seconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.fsyncTimeLimit"DurationNo5 szero30 s

          +
          +
          Since:
          +
          7.0
          +
          See Also:
          +
          EnvironmentStats.getFSyncMaxTime(), +Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_GROUP_COMMIT_INTERVAL

          +
          public static final java.lang.String LOG_GROUP_COMMIT_INTERVAL
          +
          The time interval in nanoseconds during which transactions may be + grouped to amortize the cost of write and/or fsync when a transaction + commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC on the local + machine. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.groupCommitInterval"DurationNo00none

          +
          +
          Since:
          +
          5.0.76
          +
          See Also:
          +
          Time Duration + Properties, +LOG_GROUP_COMMIT_THRESHOLD, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_GROUP_COMMIT_THRESHOLD

          +
          public static final java.lang.String LOG_GROUP_COMMIT_THRESHOLD
          +
          The threshold value impacts the number of transactions that may be + grouped to amortize the cost of write and/or fsync when a + transaction commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC + on the local machine. +

          + Specifying larger values can result in more transactions being grouped + together decreasing average commit times. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.groupCommitThreshold"IntegerNo00-none-
          +

          +
          +
          Since:
          +
          5.0.76
          +
          See Also:
          +
          LOG_GROUP_COMMIT_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FLUSH_SYNC_INTERVAL

          +
          public static final java.lang.String LOG_FLUSH_SYNC_INTERVAL
          +
          The maximum time interval between committing a transaction with + NO_SYNC or WRITE_NO_SYNC durability, + and making the transaction durable with respect to the storage device. + To provide this guarantee, a JE background thread is used to flush any + data buffered by JE to the file system, and also perform an fsync to + force any data buffered by the file system to the storage device. If + this parameter is set to zero, this JE background task is disabled and + no such guarantee is provided. +

          + Separately, the LOG_FLUSH_NO_SYNC_INTERVAL flushing provides a + guarantee that data is periodically flushed to the file system. To guard + against data loss due to an OS crash (and to improve performance) we + recommend that the file system is configured to periodically flush dirty + pages to the storage device. This parameter, LOG_FLUSH_SYNC_INTERVAL, provides a fallback for flushing to the + storage device, in case the file system is not adequately configured. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.flushSyncInterval" + Duration + Yes20 s0-none-
          +

          +
          +
          Since:
          +
          7.2
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_FLUSH_NO_SYNC_INTERVAL

          +
          public static final java.lang.String LOG_FLUSH_NO_SYNC_INTERVAL
          +
          The maximum time interval between committing a transaction with + NO_SYNC durability, and + making the transaction durable with respect to the file system. To + provide this guarantee, a JE background thread is used to flush any data + buffered by JE to the file system. If this parameter is set to zero, + this JE background task is disabled and no such guarantee is provided. +

          + Frequent periodic flushing to the file system provides improved + durability for NO_SYNC transactions. Without this flushing, if + application write operations stop, then some number of NO_SYNC + transactions would be left in JE memory buffers and would be lost in the + event of a crash. For HA applications, this flushing reduces the + possibility of RollbackProhibitedException. + Note that periodic flushing reduces the time window where a crash can + cause transaction loss and RollbackProhibitedException, but the + window cannot be closed completely when using NO_SYNC durability. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.flushNoSyncInterval" + Duration + Yes5 s0-none-
          +

          +
          +
          Since:
          +
          7.2
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_USE_ODSYNC

          +
          public static final java.lang.String LOG_USE_ODSYNC
          +
          If true (default is false) O_DSYNC is used to open JE log files. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.useODSYNC"BooleanNofalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_USE_NIO

          +
          public static final java.lang.String LOG_USE_NIO
          +
          Deprecated. NIO is no longer used by JE and this parameter has no + effect.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_USE_WRITE_QUEUE

          +
          public static final java.lang.String LOG_USE_WRITE_QUEUE
          +
          If true (default is true) the Write Queue is used for file I/O + operations which are blocked by concurrent I/O operations. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.log.useWriteQueue"BooleanNotrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_WRITE_QUEUE_SIZE

          +
          public static final java.lang.String LOG_WRITE_QUEUE_SIZE
          +
          The size of the Write Queue. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.log.writeQueueSize"IntegerNo1MB4KB32MB-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_DIRECT_NIO

          +
          public static final java.lang.String LOG_DIRECT_NIO
          +
          Deprecated. NIO is no longer used by JE and this parameter has no + effect.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOG_CHUNKED_NIO

          +
          public static final java.lang.String LOG_CHUNKED_NIO
          +
          Deprecated. NIO is no longer used by JE and this parameter has no + effect.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_RUN_VERIFIER

          +
          public static final java.lang.String ENV_RUN_VERIFIER
          +
          Whether to run the background verifier. +

          + If true (the default), the verifier runs according to the schedule + given by VERIFY_SCHEDULE. Each time the verifier runs, it + performs checksum verification if the VERIFY_LOG setting is + true and performs Btree verification if the VERIFY_BTREE + setting is true. +

          + When corruption is detected, the Environment will be invalidated and an + EnvironmentFailureException will be thrown. Applications catching this + exception can call the new EnvironmentFailureException.isCorrupted() method to determine whether + corruption was detected. +

          + If isCorrupted returns true, a full restore (an HA NetworkRestore or restore from backup) + should be performed to avoid further problems. The advantage of + performing verification frequently is that a problem may be detected + sooner than it would be otherwise. For HA applications, this means that + the network restore can be done while the other nodes in the group are + up, minimizing exposure to additional failures. +

          + When index corruption is detected, the environment is not invalidated. + Instead, the corrupt index (secondary database) is marked as corrupt + in memory and a warning message is logged. All subsequent access to the + index will throw SecondaryIntegrityException. To correct the + problem, the application may perform a full restore or rebuild the + corrupt index. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.runVerifier"BooleanYestrue

          +
          +
          Since:
          +
          7.3
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_SCHEDULE

          +
          public static final java.lang.String VERIFY_SCHEDULE
          +
          A crontab-format string indicating when to start the background + verifier. +

          + See https://en.wikipedia.org/wiki/Cron#Configuration_file + Note that times and dates are specified in local time, not UTC time. +

          + The data verifier will run at most once per scheduled interval. If the + complete verification (log verification followed by Btree verification) + takes longer than the scheduled interval, then the next verification + will start at the next increment of the interval. For example, if the + default schedule is used (one per day at midnight), and verification + takes 25 hours, then verification will occur once every two + days (48 hours), starting at midnight. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.verifySchedule"StringYes"0 0 * * * (run once a day at midnight, local time)"

          +
          +
          Since:
          +
          7.3
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_LOG

          +
          public static final java.lang.String VERIFY_LOG
          +
          Whether the background verifier should verify checksums in the log, + as if the DbVerifyLog utility were run. +

          + If true, the entire log is read sequentially and verified. The size + of the read buffer is determined by LOG_ITERATOR_READ_SIZE. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.verifyLog"BooleanYestrue

          +
          +
          Since:
          +
          7.3
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_LOG_READ_DELAY

          +
          public static final java.lang.String VERIFY_LOG_READ_DELAY
          +
          The delay between reads during log verification. + A delay between reads is needed to allow other JE components, such as + HA, to make timely progress. +

          + A 100ms delay, the default value, with the read buffer size 131072, i.e. + 128K, for a 1GB file, the total delay time is about 13 minutes. +

          + This parameter applies only to the background + verifier. It does not apply to use of DbVerifyLog. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.verifyLogReadDelay"DurationYes100 ms0 ms10 s

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_BTREE

          +
          public static final java.lang.String VERIFY_BTREE
          +
          Whether the background verifier should perform Btree verification, + as if the DbVerify utility were run. +

          + If true, the Btree of all databases, external and internal, is + verified. The in-memory cache is used for verification and internal + data structures are checked. References to data records (log sequence + numbers, or LSNs) are checked to ensure they do not refer to deleted + files -- this is the most common type of corruption. Additional + checks are performed, depending on the settings for VERIFY_SECONDARIES and VERIFY_DATA_RECORDS. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.verifyBtree"BooleanYestrue

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_SECONDARIES

          +
          public static final java.lang.String VERIFY_SECONDARIES
          +
          Whether to verify secondary index references during Btree verification. +

          + An index record contains a reference to a primary key, and the + verification involves checking that a record for the primary key exists. +

          + Note that secondary index references are verified only for each + SecondaryDatabase (and SecondaryIndex) that is currently + open. The relationship between a secondary and primary database is not + stored persistently, so JE is not aware of the relationship unless the + secondary database has been opened by the application. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.verifySecondaries"BooleanYestrue

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_DATA_RECORDS

          +
          public static final java.lang.String VERIFY_DATA_RECORDS
          +
          Whether to verify data records (leaf nodes, or LNs) during Btree + verification. +

          + Regardless of this parameter's value, the Btree reference to the data + record (the log sequence number, or LSN) is checked to ensure that + it doesn't refer to a file that has been deleted by the JE cleaner -- + this sort of "dangling reference" is the most common type of + corruption. If this parameter value is true, the LN is additionally + fetched from disk (if not in cache) to verify that the LSN refers to + a valid log entry. Because LNs are often not cached, this can cause + expensive random IO, and the default value for this parameter is false + for this reason. Some applications may choose to set this parameter to + true, for example, when using a storage device with fast random + IO (an SSD). +

          + Note that Btree internal nodes (INs) are always fetched from disk + during verification, if they are not in cache, and this can result + in random IO. Verification was implemented with the assumption that + most INs will be in cache. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.env.verifyDataRecords"BooleanYesfalse

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_BTREE_BATCH_SIZE

          +
          public static final java.lang.String VERIFY_BTREE_BATCH_SIZE
          +
          The number of records verified per batch during Btree verification. In order to give database remove/truncate the + opportunity to execute, records are verified in batches and there is + a delay between batches. +

          + This parameter applies only to the background + verifier. It does not apply to use of DbVerify. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.verifyBtreeBatchSize"IntegerYes1000110000

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          VERIFY_BTREE_BATCH_DELAY

          +
          public static final java.lang.String VERIFY_BTREE_BATCH_DELAY
          +
          The delay between batches during Btree + verification. In order to give database remove/truncate the + opportunity to execute, records are verified in batches and there is a delay between batches. +

          + A 10ms delay, the default value, should be enough to allow other + threads to run. A large value, for example 1s, would result in a total + delay of 28 hours when verifying 100m records or 100k batches. +

          + This parameter applies only to the background + verifier. It does not apply to use of DbVerify. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.verifyBtreeBatchDelay"DurationYes10 ms0 ms10 s

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          NODE_MAX_ENTRIES

          +
          public static final java.lang.String NODE_MAX_ENTRIES
          +
          The maximum number of entries in an internal btree node. This can be + set per-database using the DatabaseConfig object. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.nodeMaxEntries"IntegerNo128432767 (32K)

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          TREE_MAX_EMBEDDED_LN

          +
          public static final java.lang.String TREE_MAX_EMBEDDED_LN
          +
          The maximum size (in bytes) of a record's data portion that will cause + the record to be embedded in its parent LN. +

          + Normally, records (key-value pairs) are stored on disk as individual + byte sequences called LNs (leaf nodes) and they are accessed via a + Btree. The nodes of the Btree are called INs (Internal Nodes) and the + INs at the bottom layer of the Btree are called BINs (Bottom Internal + Nodes). Conceptually, each BIN contains an array of slots. A slot + represents an associated data record. Among other things, it stores + the key of the record and the most recent disk address of that record. + Records and INs share the disk space (are stored in the same kind of + files), but LNs are stored separately from BINs, i.e., there is no + clustering or co-location of a BIN and its child LNs. +

          + With embedded LNs, a whole record may be stored inside a BIN (i.e., + a BIN slot may contain both the key and the data portion of a record). + Specifically, a record will be "embedded" if the size (in bytes) of its + data portion is less than or equal to the value of the + TREE_MAX_EMBEDDED_LN configuration parameter. The decision to embed a + record or not is taken on a record-by-record basis. As a result, a BIN + may contain both embedded and non-embedded records. The "embeddedness" + of a record is a dynamic property: a size-changing update may turn a + non-embedded record to an embedded one or vice-versa. +

          + Notice that even though a record may be embedded, when the record is + inserted, updated, or deleted an LN for that record is still generated + and written to disk. This is because LNs also act as log records, + which are needed during recovery and/or transaction abort to undo/redo + operations that are/are-not currently reflected in the BINs. However, + during normal processing, these LNs will never be fetched from disk. +

          + Obviously, embedding records has the performance advantage that no + extra disk read is needed to fetch the record data (i.e., the LN) + during read operations. This is especially true for operations like + cursor scans and for random searches within key ranges whose + containing BINs can fit in the JE cache (in other words when there + is locality of reference). Furthermore, embedded records do not need + to be migrated during cleaning; they are considered obsolete by default, + because they will never be needed again after their containing log file + is deleted. This makes cleaning faster, and more importantly, avoids + the dirtying of the parent BINs, which would otherwise cause even more + cleaning later. +

          + On the other hand, embedded LNs make the BINs larger, which can lead to + more cache eviction of BINs and the associated performance problems. + When eviction does occur, performance can deteriorate as the size of + the data portion of the records grows. This is especially true for + insertion-only workloads. Therefore, increasing the value of + TREE_MAX_EMBEDDED_LN beyond the default value of 16 bytes should be + done "carefully": by considering the kind of workloads that will be run + against BDB-JE and their relative importance and expected response + times, and by running performance tests with both embedded and + non-embedded LNs. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.tree.maxEmbeddedLN"IntegerNo160Integer.MAX_VALUE

          +
          +
          See Also:
          +
          Cache + Statistics: Size Optimizations, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TREE_MAX_DELTA

          +
          public static final java.lang.String TREE_MAX_DELTA
          +
          Deprecated. as of JE 6.0. The TREE_BIN_DELTA param alone now + determines whether a delta is logged.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TREE_BIN_DELTA

          +
          public static final java.lang.String TREE_BIN_DELTA
          +
          If more than this percentage of entries are changed on a BIN, log a a + full version instead of a delta. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.tree.binDelta"IntegerNo25075

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TREE_MIN_MEMORY

          +
          public static final java.lang.String TREE_MIN_MEMORY
          +
          The minimum bytes allocated out of the memory cache to hold Btree data + including internal nodes and record keys and data. If the specified + value is larger than the size initially available in the cache, it will + be truncated to the amount available. + +

          TREE_MIN_MEMORY is the minimum for a single environment. By + default, 500 KB or the size initially available in the cache is used, + whichever is smaller.

          + +

          This param is only likely to be needed for tuning of Environments + with extremely small cache sizes. It is sometimes also useful for + debugging and testing.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.tree.minMemory"LongYes512000 (500K)51200 (50K)-none-

          +
          +
          See Also:
          +
          Cache Statistics: + Debugging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TREE_COMPACT_MAX_KEY_LENGTH

          +
          public static final java.lang.String TREE_COMPACT_MAX_KEY_LENGTH
          +
          Specifies the maximum unprefixed key length for use in the compact + in-memory key representation. + +

          In the Btree, the JE in-memory cache, the default representation for + keys uses a byte array object per key. The per-key object overhead of + this approach ranges from 20 to 32 bytes, depending on the JVM + platform.

          + +

          To reduce memory overhead, a compact representation can instead be + used where keys will be represented inside a single byte array instead + of having one byte array per key. Within the single array, all keys are + assigned a storage size equal to that taken up by the largest key, plus + one byte to hold the actual key length. The use of the fixed size array + reduces Java GC activity as well as memory overhead.

          + +

          In order for the compact representation to reduce memory usage, all + keys in a database, or in a Btree internal node, must be roughly the + same size. The more fully populated the internal node, the more the + savings with this representation since the single byte array is sized to + hold the maximum number of keys in the internal node, regardless of the + actual number of keys that are present.

          + +

          It's worth noting that the storage savings of the compact + representation are realized in addition to the storage benefits of key + prefixing (if it is configured), since the keys stored in the key array + are the smaller key values after the prefix has been stripped, reducing + the length of the key and making it more likely that it's small enough + for this specialized representation. This configuration parameter + (TREE_COMPACT_MAX_KEY_LENGTH) is the maximum key length, not + including the common prefix, for the keys in a Btree internal node + stored using the compact representation. See DatabaseConfig.setKeyPrefixing(boolean).

          + +

          The compact representation is used automatically when both of the + following conditions hold.

          +
            +
          • All keys in a Btree internal node must have an unprefixed length + that is less than or equal to the length specified by this parameter + (TREE_COMPACT_MAX_KEY_LENGTH).
          • +
          • If key lengths vary by large amounts within an internal node, the + wasted space of the fixed length storage may negate the benefits of the + compact representation and cause more memory to be used than with the + default representation. In that case, the default representation will + be used.
          • +
          + +

          If this configuration parameter is set to zero, the compact + representation will not be used.

          + +

          The default value of this configuration parameter is 16 bytes. The + potential drawbacks of specifying a larger length are:

          +
            +
          • Insertion and deletion for larger keys move bytes proportional to + the storage length of the keys.
          • +
          • With the compact representation, all operations create temporary + byte arrays for each key involved in the operation. Larger byte arrays + mean more work for the Java GC, even though these objects are short + lived.
          • +
          + +

          Mutation of the key representation between the default and compact + approaches is automatic on a per-Btree internal node basis. For + example, if a key that exceeds the configured length is added to a node + that uses the compact representation, the node is automatically + mutated to the default representation. A best effort is made to + prevent frequent mutations that could increase Java GC activity.

          + +

          To determine how often the compact representation is used in a + running application, see EnvironmentStats.getNINCompactKeyIN().

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.tree.compactMaxKeyLength"IntegerNo160256

          +
          +
          Since:
          +
          5.0
          +
          See Also:
          +
          DatabaseConfig.setKeyPrefixing(boolean), +EnvironmentStats.getNINCompactKeyIN(), +Cache + Statistics: Size Optimizations, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          COMPRESSOR_WAKEUP_INTERVAL

          +
          public static final java.lang.String COMPRESSOR_WAKEUP_INTERVAL
          +
          The compressor thread wakeup interval in microseconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.compressor.wakeupInterval"DurationNo5 s1 s75 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          COMPRESSOR_DEADLOCK_RETRY

          +
          public static final java.lang.String COMPRESSOR_DEADLOCK_RETRY
          +
          The number of times to retry a compression run if a deadlock occurs. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.compressor.deadlockRetry"IntegerNo30-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          COMPRESSOR_LOCK_TIMEOUT

          +
          public static final java.lang.String COMPRESSOR_LOCK_TIMEOUT
          +
          The lock timeout for compressor transactions in microseconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.compressor.lockTimeout"DurationNo500 ms075 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          COMPRESSOR_PURGE_ROOT

          +
          public static final java.lang.String COMPRESSOR_PURGE_ROOT
          +
          Deprecated. as of 3.3.87. Compression of the root node no longer has + any benefit and this feature has been removed. This parameter has no + effect.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_EVICT_BYTES

          +
          public static final java.lang.String EVICTOR_EVICT_BYTES
          +
          When eviction occurs, the evictor will push memory usage to this number + of bytes below MAX_MEMORY. No more than 50% of je.maxMemory + will be evicted per eviction cycle, regardless of this setting. + +

          When using the shared cache feature, the value of this property is + applied the first time the cache is set up. New environments that + join the cache do not alter the cache setting.

          + +

          This parameter impacts + how often background + evictor threads are awoken as well as the size of latency spikes + caused by + critical + eviction.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.evictBytes"LongNo524288 (512K)1024 (1K)-none-

          +
          +
          See Also:
          +
          Cache Statistics: + Eviction, +Cache + Statistics: Critical Eviction, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_NODES_PER_SCAN

          +
          public static final java.lang.String EVICTOR_NODES_PER_SCAN
          +
          Deprecated. as of JE 6.0. This parameter is ignored by the new, more + efficient and more accurate evictor.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_CRITICAL_PERCENTAGE

          +
          public static final java.lang.String EVICTOR_CRITICAL_PERCENTAGE
          +
          At this percentage over the allotted cache, critical eviction will + start. For example, if this parameter is 5, then when the cache size is + 5% over its maximum or 105% full, critical eviction will start. +

          + Critical eviction is eviction performed in application threads as part + of normal database access operations. Background eviction, on the other + hand, is performed in JE evictor threads as well as during log cleaning + and checkpointing. Background eviction is unconditionally started when + the cache size exceeds its maximum. When critical eviction is also + performed (concurrently with background eviction), it helps to ensure + that the cache size does not continue to grow, but can have a negative + impact on operation latency. +

          + By default this parameter is zero, which means that critical eviction + will start as soon as the cache size exceeds its maximum. Some + applications may wish to set this parameter to a non-zero value to + improve operation latency, when eviction is a significant performance + factor and latency requirements are not being satisfied. +

          + When setting this parameter to a non-zero value, for example 5, be sure + to reserve enough heap memory for the cache size to be over its + configured maximum, for example 105% full. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.criticalPercentage"IntegerNo001000

          +
          +
          See Also:
          +
          Cache + Statistics: Critical Eviction, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_DEADLOCK_RETRY

          +
          public static final java.lang.String EVICTOR_DEADLOCK_RETRY
          +
          Deprecated. as of JE 4.1, since the single evictor thread has + been replaced be a more robust thread pool.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_LRU_ONLY

          +
          public static final java.lang.String EVICTOR_LRU_ONLY
          +
          Deprecated. as of JE 6.0. This parameter is ignored by the new, + more efficient and more accurate evictor.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_N_LRU_LISTS

          +
          public static final java.lang.String EVICTOR_N_LRU_LISTS
          +
          The number of LRU lists in the main JE cache. + +

          Ideally, all nodes managed by an LRU eviction policy should appear in + a single LRU list, ordered by the "hotness" of each node. However, + such a list is accessed very frequently by multiple threads, and can + become a synchronization bottleneck. To avoid this problem, the + evictor can employ multiple LRU lists. The nLRULists parameter + specifies the number of LRU lists to be used. Increasing the number + of LRU lists alleviates any potential synchronization bottleneck, but + it also decreases the quality of the LRU approximation.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.nLRULists"IntegerNo4132

          +
          +
          See Also:
          +
          Cache Statistics: LRU List + Contention, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_FORCED_YIELD

          +
          public static final java.lang.String EVICTOR_FORCED_YIELD
          +
          Call Thread.yield() at each check for cache overflow. This potentially + improves GC performance, but little testing has been done and the actual + benefit is unknown. + +

          When using the shared cache feature, the value of this property is + applied the first time the cache is set up. New environments that + join the cache do not alter the cache setting.

          + +

          This param is unlikely to be needed for tuning, but is sometimes + useful for debugging and testing.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.evictor.forcedYield"BooleanNofalse

          +
          +
          See Also:
          +
          Cache Statistics: + Debugging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_CORE_THREADS

          +
          public static final java.lang.String EVICTOR_CORE_THREADS
          +
          The minimum number of threads in the eviction thread pool. +

          + These threads help keep memory usage within cache bounds, offloading + work from application threads. +

          + EVICTOR_CORE_THREADS, EVICTOR_MAX_THREADS and EVICTOR_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.coreThreads"Integeryes10Integer.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_MAX_THREADS

          +
          public static final java.lang.String EVICTOR_MAX_THREADS
          +
          The maximum number of threads in the eviction thread pool. +

          + These threads help keep memory usage within cache bound, offloading work + from application threads. If the eviction thread pool receives more + work, it will allocate up to this number of threads. These threads will + terminate if they are idle for more than the time indicated by EVICTOR_KEEP_ALIVE. +

          + EVICTOR_CORE_THREADS, EVICTOR_MAX_THREADS and EVICTOR_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.maxThreads"Integeryes101Integer.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_KEEP_ALIVE

          +
          public static final java.lang.String EVICTOR_KEEP_ALIVE
          +
          The duration that excess threads in the eviction thread pool will stay + idle; after this period, idle threads will terminate. +

          + EVICTOR_CORE_THREADS, EVICTOR_MAX_THREADS and EVICTOR_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.keepAlive"DurationYes10 min1 s1 d

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          EVICTOR_ALLOW_BIN_DELTAS

          +
          public static final java.lang.String EVICTOR_ALLOW_BIN_DELTAS
          +
          Allow Bottom Internal Nodes (BINs) to be written in a delta format + during eviction. Using a delta format will improve write and log + cleaning performance. There is no known performance benefit to setting + this parameter to false. + +

          This param is unlikely to be needed for tuning, but is sometimes + useful for debugging and testing.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.evictor.allowBinDeltas"BooleanNotrue

          +
          +
          See Also:
          +
          Cache Statistics: + Debugging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_EVICT_BYTES

          +
          public static final java.lang.String OFFHEAP_EVICT_BYTES
          +
          The off-heap evictor will attempt to keep memory usage this number of + bytes below MAX_OFF_HEAP_MEMORY. +

          + If this value is too small, memory usage may exceed the maximum and then + "critical eviction" is needed, which will increase operation latency in + the application threads. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.offHeap.evictBytes"LongNo52428800 (50MB)1024 (1K)-none-

          +
          +
          See Also:
          +
          Cache + Statistics: Critical Eviction, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_N_LRU_LISTS

          +
          public static final java.lang.String OFFHEAP_N_LRU_LISTS
          +
          The number of LRU lists in the off-heap JE cache. + +

          Ideally, all nodes managed by an LRU eviction policy should appear in + a single LRU list, ordered by the "hotness" of each node. However, + such a list is accessed very frequently by multiple threads, and can + become a synchronization bottleneck. To avoid this problem, the + evictor can employ multiple LRU lists. The nLRULists parameter + specifies the number of LRU lists to be used. Increasing the number + of LRU lists alleviates any potential synchronization bottleneck, but + it also decreases the quality of the LRU approximation.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.evictor.nLRULists"IntegerNo4132

          +
          +
          See Also:
          +
          Cache Statistics: LRU List + Contention, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_CHECKSUM

          +
          public static final java.lang.String OFFHEAP_CHECKSUM
          +
          Can be used to add a checksum to each off-heap block when the block is + written, and validate the checksum when the block is read, for debugging + purposes. Setting this param to true adds memory and CPU overhead, and + it should normally be set to false in a production environment. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.offHeap.checksum"BooleanNofalse

          +
          +
          See Also:
          +
          Cache Statistics: + Debugging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_CORE_THREADS

          +
          public static final java.lang.String OFFHEAP_CORE_THREADS
          +
          The minimum number of threads in the off-heap eviction thread pool. +

          + These threads help keep memory usage within cache bounds, offloading + work from application threads. +

          + OFFHEAP_CORE_THREADS, OFFHEAP_MAX_THREADS and OFFHEAP_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.offHeap.coreThreads"Integeryes10Integer.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_MAX_THREADS

          +
          public static final java.lang.String OFFHEAP_MAX_THREADS
          +
          The maximum number of threads in the off-heap eviction thread pool. +

          + These threads help keep memory usage within cache bound, offloading + work from application threads. If the eviction thread pool receives + more work, it will allocate up to this number of threads. These + threads will terminate if they are idle for more than the time + indicated by OFFHEAP_KEEP_ALIVE. +

          + If the number of threads is too small, memory usage may exceed the + maximum and then "critical eviction" is needed, which will increase + operation latency in the application threads. +

          + OFFHEAP_CORE_THREADS, OFFHEAP_MAX_THREADS and OFFHEAP_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.offHeap.maxThreads"Integeryes31Integer.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          OFFHEAP_KEEP_ALIVE

          +
          public static final java.lang.String OFFHEAP_KEEP_ALIVE
          +
          The duration that excess threads in the off-heap eviction thread pool + will stay idle; after this period, idle threads will terminate. +

          + OFFHEAP_CORE_THREADS, OFFHEAP_MAX_THREADS and OFFHEAP_KEEP_ALIVE are used to configure the core, max and keepalive + attributes for the ThreadPoolExecutor which + implements the eviction thread pool. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.offHeap.keepAlive"DurationYes10 min1 s1 d

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CHECKPOINTER_BYTES_INTERVAL

          +
          public static final java.lang.String CHECKPOINTER_BYTES_INTERVAL
          +
          Ask the checkpointer to run every time we write this many bytes to the + log. If set, supersedes CHECKPOINTER_WAKEUP_INTERVAL. To use + time based checkpointing, set this to 0. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.checkpointer.bytesInterval"LongNo20000000 (20M)0-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CHECKPOINTER_WAKEUP_INTERVAL

          +
          public static final java.lang.String CHECKPOINTER_WAKEUP_INTERVAL
          +
          The checkpointer wakeup interval in microseconds. By default, this + is inactive and we wakeup the checkpointer as a function of the + number of bytes written to the log (CHECKPOINTER_BYTES_INTERVAL). + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.checkpointer.wakeupInterval"DurationNo01 s75 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CHECKPOINTER_DEADLOCK_RETRY

          +
          public static final java.lang.String CHECKPOINTER_DEADLOCK_RETRY
          +
          The number of times to retry a checkpoint if it runs into a deadlock. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.checkpointer.deadlockRetry"IntegerNo30-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CHECKPOINTER_HIGH_PRIORITY

          +
          public static final java.lang.String CHECKPOINTER_HIGH_PRIORITY
          +
          If true, the checkpointer uses more resources in order to complete the + checkpoint in a shorter time interval. Btree latches are held and other + threads are blocked for a longer period. When set to true, application + response time may be longer during a checkpoint. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.checkpointer.highPriority"BooleanYesfalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_MIN_UTILIZATION

          +
          public static final java.lang.String CLEANER_MIN_UTILIZATION
          +
          The cleaner will keep the total disk space utilization percentage above + this value. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.minUtilization"IntegerYes50090

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_MIN_FILE_UTILIZATION

          +
          public static final java.lang.String CLEANER_MIN_FILE_UTILIZATION
          +
          A log file will be cleaned if its utilization percentage is below this + value, irrespective of total utilization. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.minFileUtilization"IntegerYes5050

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_BYTES_INTERVAL

          +
          public static final java.lang.String CLEANER_BYTES_INTERVAL
          +
          The cleaner checks disk utilization every time we write this many bytes + to the log. If zero (and by default) it is set to either the LOG_FILE_MAX value divided by four, or to 100 MB, whichever is + smaller. + +

          When overriding the default value, use caution to ensure that the + cleaner is woken frequently enough, so that reserved files are deleted + quickly enough to avoid violating a disk limit.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.bytesInterval"LongYes00-none-

          +
          +
          See Also:
          +
          CLEANER_WAKEUP_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_WAKEUP_INTERVAL

          +
          public static final java.lang.String CLEANER_WAKEUP_INTERVAL
          +
          The cleaner checks whether cleaning is needed if this interval elapses + without any writing, to handle the case where cleaning or checkpointing + is necessary to reclaim disk space, but writing has stopped. This + addresses the problem that CLEANER_BYTES_INTERVAL may not cause + cleaning, and CHECKPOINTER_BYTES_INTERVAL may not cause + checkpointing, when enough writing has not occurred to exceed these + intervals. + +

          If this parameter is set to zero, the cleaner wakeup interval is + disabled, and cleaning and checkpointing will occur only via CLEANER_BYTES_INTERVAL, CHECKPOINTER_BYTES_INTERVAL, and + CHECKPOINTER_WAKEUP_INTERVAL.

          + +

          For example, if a database were removed or truncated, or large + records were deleted, the amount written to the log may not exceed + CLEANER_BYTES_INTERVAL. If writing were to stop at that point, no + cleaning would occur, if it were not for the wakeup interval.

          + +

          In addition, even when cleaning is performed, a checkpoint is + additionally needed to reclaim disk space. This may not occur if + CHECKPOINTER_BYTES_INTERVAL or + CHECKPOINTER_WAKEUP_INTERVAL does not happen to cause a + checkpoint after write operations have stopped. If files have been + cleaned and a checkpoint is needed to reclaim space, and write + operations have stopped, a checkpoint will be scheduled when the + CLEANER_WAKEUP_INTERVAL elapses. The checkpoint will be performed in the + JE checkpointer thread if it is not disabled, or when + Environment.checkpoint(com.sleepycat.je.CheckpointConfig) is called.

          + +

          In test environments it is fairly common for application writing to + stop, and then to expect cleaning to occur as a result of the last set + of operations. This situation may also arise in production environments, + for example, during repair of an out-of-disk situation.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.wakeupInterval"DurationYes10 s010 h

          +
          +
          Since:
          +
          7.1
          +
          See Also:
          +
          Time Duration + Properties, +CLEANER_BYTES_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_FETCH_OBSOLETE_SIZE

          +
          public static final java.lang.String CLEANER_FETCH_OBSOLETE_SIZE
          +
          If true, the cleaner will fetch records to determine their size and more + accurately calculate log utilization. Normally when a record is updated + or deleted without first being read (sometimes called a blind + delete/update), the size of the previous version of the record is + unknown and therefore the cleaner's utilization calculations may be + incorrect. Setting this parameter to true will cause a record to be + read during a blind delete/update, in order to determine its size. This + will ensure that the cleaner's utilization calculations are correct, but + will cause more (potentially random) IO. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.cleaner.fetchObsoleteSize"BooleanYesfalse

          +
          +
          See Also:
          +
          CLEANER_ADJUST_UTILIZATION, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_ADJUST_UTILIZATION

          +
          public static final java.lang.String CLEANER_ADJUST_UTILIZATION
          +
          Deprecated. in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_DEADLOCK_RETRY

          +
          public static final java.lang.String CLEANER_DEADLOCK_RETRY
          +
          The number of times to retry cleaning if a deadlock occurs. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.deadlockRetry"IntegerYes30-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_LOCK_TIMEOUT

          +
          public static final java.lang.String CLEANER_LOCK_TIMEOUT
          +
          The lock timeout for cleaner transactions in microseconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.lockTimeout"DurationYes500 ms075 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_EXPUNGE

          +
          public static final java.lang.String CLEANER_EXPUNGE
          +
          If true (the default setting), the cleaner deletes log files after + successful cleaning. + + This parameter may be set to false for diagnosing log cleaning problems. + For example, if a bug causes a LOG_FILE_NOT_FOUND exception, when + reproducing the problem it is often necessary to avoid deleting files so + they can be used for diagnosis. When this parameter is false: +
            +
          • + Rather than delete files that are successfully cleaned, the cleaner + renames them. +
          • +
          • + When renaming a file, its extension is changed from ".jdb" to ".del" + and its last modification date is set to the current time. +
          • +
          • + Depending on the setting of the CLEANER_USE_DELETED_DIR + parameter, the file is either renamed in its current data directory + (the default), or moved into the "deleted" sub-directory. +
          • +
          +

          + When this parameter is set to false, disk usage may grow without bounds + and the application is responsible for removing the cleaned files. It + may be necessary to write a script for deleting the least recently + cleaned files when disk usage is low. The .del extension and the last + modification time can be leveraged to write such a script. The "deleted" + sub-directory can be used to avoid granting write or delete permissions + for the main data directory to the script. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.cleaner.expunge"BooleanYestrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_USE_DELETED_DIR

          +
          public static final java.lang.String CLEANER_USE_DELETED_DIR
          +
          When CLEANER_EXPUNGE is false, the CLEANER_USE_DELETED_DIR parameter determines whether successfully + cleaned files are moved to the "deleted" sub-directory. + + CLEANER_USE_DELETED_DIR applies only when CLEANER_EXPUNGE is false. When CLEANER_EXPUNGE is true, + successfully cleaned files are deleted and the CLEANER_USE_DELETED_DIR parameter setting is ignored. +

          + When CLEANER_USE_DELETED_DIR is true (and CLEANER_EXPUNGE is false), the cleaner will move successfully cleaned + data files (".jdb" files) to the "deleted" sub-directory of the + Environment directory, in addition to changing the file extension to + "*.del". In this case, the "deleted" sub-directory must have been + created by the application before opening the Environment. This allows + the application to control permissions on this sub-directory. When + multiple data directories are used (LOG_N_DATA_DIRECTORIES), a + "deleted" sub-directory must be created under each data directory. Note + that File.renameTo(File) is used to move the file, and + this method may or may not support moving the file to a different volume + (when the "deleted" directory is a file system link) on a particular + platform. +

          + When CLEANER_USE_DELETED_DIR is false (and CLEANER_EXPUNGE is false), the cleaner will change the file extension + of successfully cleaned data files from ".jdb" to ".del", but will not + move the files to a different directory. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.cleaner.useDeletedDir"BooleanYesfalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_MIN_AGE

          +
          public static final java.lang.String CLEANER_MIN_AGE
          +
          The minimum age of a file (number of files between it and the active + file) to qualify it for cleaning under any conditions. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.minAge"IntegerYes211000

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_MAX_BATCH_FILES

          +
          public static final java.lang.String CLEANER_MAX_BATCH_FILES
          +
          Deprecated. in 7.0. No longer used because the cleaner no longer has a + backlog.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_READ_SIZE

          +
          public static final java.lang.String CLEANER_READ_SIZE
          +
          The read buffer size for cleaning. If zero (the default), then LOG_ITERATOR_READ_SIZE value is used. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.readSize"IntegerYes0128-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE

          +
          public static final java.lang.String CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE
          +
          Tracking of detailed cleaning information will use no more than this + percentage of the cache. The default value is 2% of MAX_MEMORY. If 0 and SHARED_CACHE is true, use 2% divided by + N where N is the number of environments sharing the global cache. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.detailMaxMemoryPercentage"IntegerYes2190

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_FORCE_CLEAN_FILES

          +
          public static final java.lang.String CLEANER_FORCE_CLEAN_FILES
          +
          Specifies a list of files or file ranges to be cleaned at a time when no + other log cleaning is necessary. This parameter is intended for use in + forcing the cleaning of a large number of log files. File numbers are + in hex and are comma separated or hyphen separated to specify ranges, + e.g.: '9,a,b-d' will clean 5 files. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.cleaner.forceCleanFiles"StringNo""

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_UPGRADE_TO_LOG_VERSION

          +
          public static final java.lang.String CLEANER_UPGRADE_TO_LOG_VERSION
          +
          All log files having a log version prior to the specified version will + be cleaned at a time when no other log cleaning is necessary. Intended + for use in upgrading old format log files forward to the current log + format version, e.g., to take advantage of format improvements; note + that log upgrading is optional. The default value zero (0) specifies + that no upgrading will occur. The value negative one (-1) specifies + upgrading to the current log version. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.upgradeToLogVersion"IntegerNo0-1-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_THREADS

          +
          public static final java.lang.String CLEANER_THREADS
          +
          The number of threads allocated by the cleaner for log file processing. + If the cleaner backlog becomes large, try increasing this value. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.threads"IntegerYes11-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_LOOK_AHEAD_CACHE_SIZE

          +
          public static final java.lang.String CLEANER_LOOK_AHEAD_CACHE_SIZE
          +
          The look ahead cache size for cleaning in bytes. Increasing this value + can reduce the number of Btree lookups. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.cleaner.lookAheadCacheSize"IntegerYes8192 (8K)0-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_FOREGROUND_PROACTIVE_MIGRATION

          +
          public static final java.lang.String CLEANER_FOREGROUND_PROACTIVE_MIGRATION
          +
          Deprecated. This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and Btree + splits. To reduce a cleaner backlog, configure more cleaner threads.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_BACKGROUND_PROACTIVE_MIGRATION

          +
          public static final java.lang.String CLEANER_BACKGROUND_PROACTIVE_MIGRATION
          +
          Deprecated. This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and + checkpointing. To reduce a cleaner backlog, configure more cleaner + threads.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CLEANER_LAZY_MIGRATION

          +
          public static final java.lang.String CLEANER_LAZY_MIGRATION
          +
          Deprecated. This parameter is ignored and lazy migration is no longer + supported due to its negative impact on eviction and checkpointing. + To reduce a cleaner backlog, configure more cleaner threads.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          DOS_PRODUCER_QUEUE_TIMEOUT

          +
          public static final java.lang.String DOS_PRODUCER_QUEUE_TIMEOUT
          +
          The timeout for Disk Ordered Scan producer thread queue offers in + milliseconds. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.diskOrderedScanLockTimeout"DurationYes10 secs075 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOCK_N_LOCK_TABLES

          +
          public static final java.lang.String LOCK_N_LOCK_TABLES
          +
          Number of Lock Tables. Set this to a value other than 1 when an + application has multiple threads performing concurrent JE operations. + It should be set to a prime number, and in general not higher than the + number of application threads performing JE operations. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.lock.nLockTables"IntegerNo1132767 (32K)

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOCK_TIMEOUT

          +
          public static final java.lang.String LOCK_TIMEOUT
          +
          Configures the default lock timeout. It may be overridden on a + per-transaction basis by calling + Transaction.setLockTimeout(long, TimeUnit). + +

          A value of zero disables lock timeouts. This is not recommended, even + when the application expects that deadlocks will not occur or will be + easily resolved. A lock timeout is a fall-back that guards against + unexpected "live lock", unresponsive threads, or application failure to + close a cursor or to commit or abort a transaction.

          + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.lock.timeout"DurationNo500 ms075 min

          +
          +
          See Also:
          +
          setLockTimeout(long,TimeUnit), +Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOCK_DEADLOCK_DETECT

          +
          public static final java.lang.String LOCK_DEADLOCK_DETECT
          +
          Whether to perform deadlock detection when a lock conflict occurs. + By default, deadlock detection is enabled (this parameter is true) in + order to reduce thread wait times when there are deadlocks. +

          + Deadlock detection is performed as follows. +

            +
          1. When a lock is requested by a record read or write operation, JE + checks for lock conflicts with another transaction or another + thread performing a non-transactional operation. If there is no + conflict, the lock is acquired and the operation returns + normally.
          2. +
          3. When there is a conflict, JE performs deadlock detection. However, + before performing deadlock detection, JE waits for the + LOCK_DEADLOCK_DETECT_DELAY interval, if it is non-zero. + This delay is useful for avoiding the overhead of deadlock + detection when normal, short-lived contention (not a deadlock) is + the reason for the conflict. If the lock is acquired during the + delay, the thread wakes up and the operation returns + normally.
          4. +
          5. If a deadlock is detected, DeadlockException is thrown in + one of the threads participating in the deadlock, called the + "victim". The victim is chosen at random to prevent a repeated + pattern of deadlocks, called "live lock". A non-victim thread that + detects a deadlock will notify the victim and perform short + delays, waiting for the deadlock to be broken; if the lock is + acquired, the operation returns normally.
          6. +
          7. It is possible for live lock to occur in spite of using random + victim selection. It is also possible that a deadlock is not + broken because the victim thread is unresponsive or the + application fails to close a cursor or to commit or abort a + transaction. In these cases, if the lock or transaction timeout + expires without acquiring the lock, a DeadlockException is + thrown for the last deadlock detected, in the thread that detected + the deadlock. In this case, DeadlockException may be + thrown by more than one thread participating in the deadlock. +
          8. +
          9. When no deadlock is detected, JE waits for the lock or transaction + timeout to expire. If the lock is acquired during this delay, the + thread wakes up and the operation returns normally.
          10. +
          11. When the lock or transaction timeout expires without acquiring the + lock, JE checks for deadlocks one final time. If a deadlock is + detected, DeadlockException is thrown; otherwise, + LockTimeoutException or + TransactionTimeoutExceptionis thrown.
          12. +
          +

          + Deadlock detection may be disabled (by setting this parameter to false) + in applications that are known to be free of deadlocks, and this may + provide a slight performance improvement in certain scenarios. However, + this is not recommended because deadlock-free operation is difficult to + guarantee. If deadlock detection is disabled, JE skips steps 2, 3 and 4 + above. However, deadlock detection is always performed in the last step, + and DeadlockException may be thrown. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.lock.deadlockDetect"BooleanNotrue

          +
          +
          Since:
          +
          7.1
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOCK_DEADLOCK_DETECT_DELAY

          +
          public static final java.lang.String LOCK_DEADLOCK_DETECT_DELAY
          +
          The delay after a lock conflict, before performing deadlock detection. + + This delay is used to avoid the overhead of deadlock detection when + normal contention (not a deadlock) is the reason for the conflict. See + LOCK_DEADLOCK_DETECT for more information. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.lock.deadlockDetectDelay"DurationNo0075 min

          +
          +
          Since:
          +
          7.1
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          LOCK_OLD_LOCK_EXCEPTIONS

          +
          public static final java.lang.String LOCK_OLD_LOCK_EXCEPTIONS
          +
          Deprecated. since JE 6.5; has no effect, as if it were set to false.
          +
          Used in JE releases 3.4 through 6.4 to throw old-style lock exceptions + for compatibility with JE release 3.3 and earlier.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          TXN_SERIALIZABLE_ISOLATION

          +
          public static final java.lang.String TXN_SERIALIZABLE_ISOLATION
          +
          Configures all transactions for this environment to have Serializable + (Degree 3) isolation. By setting Serializable isolation, phantoms will + be prevented. By default transactions provide Repeatable Read + isolation. + + The default is false for the database environment. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.txn.serializableIsolation"BooleanNofalse

          +
          +
          See Also:
          +
          setTxnSerializableIsolation(boolean), +Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          TXN_DEADLOCK_STACK_TRACE

          +
          public static final java.lang.String TXN_DEADLOCK_STACK_TRACE
          +
          Set this parameter to true to add stacktrace information to deadlock + (lock timeout) exception messages. The stack trace will show where each + lock was taken. The default is false, and true should only be used + during debugging because of the added memory/processing cost. This + parameter is 'static' across all environments. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.txn.deadlockStackTrace"BooleanYesfalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TXN_DUMP_LOCKS

          +
          public static final java.lang.String TXN_DUMP_LOCKS
          +
          Dump the lock table when a lock timeout is encountered, for debugging + assistance. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.txn.dumpLocks"BooleanYesfalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_FILE

          +
          public static final java.lang.String TRACE_FILE
          +
          Deprecated. in favor of FILE_LOGGING_LEVEL As of JE 4.0, + use the standard java.util.logging configuration methodologies. To + enable logging output to the je.info files, set + com.sleepycat.je.util.FileHandler.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.FileHandler.level" in the EnvironmentConfig + object.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_CONSOLE

          +
          public static final java.lang.String TRACE_CONSOLE
          +
          Deprecated. in favor of CONSOLE_LOGGING_LEVEL As of JE + 4.0, use the standard java.util.logging configuration + methodologies. To enable console output, set + com.sleepycat.je.util.ConsoleHandler.level = <LEVEL> through + the java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.ConsoleHandler.level" in the + EnvironmentConfig object.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_DB

          +
          public static final java.lang.String TRACE_DB
          +
          Deprecated. As of JE 4.0, event tracing to the .jdb files has been + separated from the java.util.logging mechanism. This parameter has + no effect.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_FILE_LIMIT

          +
          public static final java.lang.String TRACE_FILE_LIMIT
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file size, + set com.sleepycat.je.util.FileHandler.limit = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_FILE_COUNT

          +
          public static final java.lang.String TRACE_FILE_COUNT
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file count, + set com.sleepycat.je.util.FileHandler.count = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_LEVEL

          +
          public static final java.lang.String TRACE_LEVEL
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. Set logging levels using class names + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          CONSOLE_LOGGING_LEVEL

          +
          public static final java.lang.String CONSOLE_LOGGING_LEVEL
          +
          Trace messages equal and above this level will be logged to the + console. Value should be one of the predefined + java.util.logging.Level values. +

          + Setting this parameter in the je.properties file or through setConfigParam(java.lang.String, java.lang.String) is analogous to setting + the property in the java.util.logging properties file or MBean. + It is preferred to use the standard java.util.logging mechanisms for + configuring java.util.logging.Handler, but this JE parameter is provided + because the java.util.logging API doesn't provide a method to set + handler levels programmatically. + +

          + + + + + + + +
          NameTypeMutableDefault
          "com.sleepycat.je.util.ConsoleHandler.level"StringNo"OFF"

          +
          +
          See Also:
          +
          Chapter 12. Logging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          FILE_LOGGING_LEVEL

          +
          public static final java.lang.String FILE_LOGGING_LEVEL
          +
          Trace messages equal and above this level will be logged to the je.info + file, which is in the Environment home directory. Value should + be one of the predefined java.util.logging.Level values. +

          + Setting this parameter in the je.properties file or through setConfigParam(java.lang.String, java.lang.String) is analogous to setting + the property in the java.util.logging properties file or MBean. + It is preferred to use the standard java.util.logging mechanisms for + configuring java.util.logging.Handler, but this JE parameter is provided + because the java.util.logging APIs doesn't provide a method to set + handler levels programmatically. + +

          + + + + + + + +
          NameTypeMutableDefault
          "com.sleepycat.je.util.FileHandler.level"StringNo"INFO"

          +
          +
          See Also:
          +
          Chapter 12. Logging, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_LEVEL_LOCK_MANAGER

          +
          public static final java.lang.String TRACE_LEVEL_LOCK_MANAGER
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see locking logging, set + com.sleepycat.je.txn.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_LEVEL_RECOVERY

          +
          public static final java.lang.String TRACE_LEVEL_RECOVERY
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see recovery logging, set + com.sleepycat.je.recovery.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_LEVEL_EVICTOR

          +
          public static final java.lang.String TRACE_LEVEL_EVICTOR
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see evictor logging, set + com.sleepycat.je.evictor.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TRACE_LEVEL_CLEANER

          +
          public static final java.lang.String TRACE_LEVEL_CLEANER
          +
          Deprecated. As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see cleaner logging, set + com.sleepycat.je.cleaner.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STARTUP_DUMP_THRESHOLD

          +
          public static final java.lang.String STARTUP_DUMP_THRESHOLD
          +
          If environment startup exceeds this duration, startup statistics are + logged and can be found in the je.info file. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.env.startupThreshold"DurationNo5 min0none

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STATS_COLLECT

          +
          public static final java.lang.String STATS_COLLECT
          +
          If true collect and log statistics. The statistics are logged in CSV + format and written to the log file at a user specified interval. + The logging occurs per-Environment when the Environment is opened + in read/write mode. Statistics are written to a filed named je.stat.csv. + Successively older files are named by adding "0", "1", "2", etc into + the file name. The file name format is je.stat.[version number].csv. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.stats.collect"BooleanYesTrue0none

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STATS_MAX_FILES

          +
          public static final java.lang.String STATS_MAX_FILES
          +
          Maximum number of statistics log files to retain. The rotating set of + files, as each file reaches a given size limit, is closed, rotated out, + and a new file opened. The name of the log file is je.stat.csv. + Successively older files are named by adding "0", "1", "2", etc into + the file name. The file name format is je.stat.[version number].csv. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.stats.max.files"IntegerYes101-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STATS_FILE_ROW_COUNT

          +
          public static final java.lang.String STATS_FILE_ROW_COUNT
          +
          Log file maximum row count for Stat collection. When the number of + rows in the statistics file reaches the maximum row count, the file + is closed, rotated out, and a new file opened. The name of the log + file is je.stat.csv. Successively older files are named by adding "0", + "1", "2", etc into the file name. The file name format is + je.stat.[version number].csv. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.stats.file.row.count"IntegerYes14401-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STATS_COLLECT_INTERVAL

          +
          public static final java.lang.String STATS_COLLECT_INTERVAL
          +
          The duration of the statistics capture interval. Statistics are captured + and written to the log file at this interval. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.stats.collect.interval"DurationYes1 min1 s24 d

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          STATS_FILE_DIRECTORY

          +
          public static final java.lang.String STATS_FILE_DIRECTORY
          +
          The directory to save the statistics log file. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.stats.file.directory"StringNo"NULL-> Environment home directory"

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EnvironmentConfig

          +
          public EnvironmentConfig()
          +
          Creates an EnvironmentConfig initialized with the system default + settings.
          +
        • +
        + + + +
          +
        • +

          EnvironmentConfig

          +
          public EnvironmentConfig(java.util.Properties properties)
          +                  throws java.lang.IllegalArgumentException
          +
          Creates an EnvironmentConfig which includes the properties specified in + the properties parameter.
          +
          +
          Parameters:
          +
          properties - Supported properties are described in this class
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If any properties read from the + properties param are invalid.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setAllowCreate

          +
          public EnvironmentConfig setAllowCreate(boolean allowCreate)
          +
          If true, creates the database environment if it doesn't already exist.
          +
          +
          Parameters:
          +
          allowCreate - If true, the database environment is created if it + doesn't already exist.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAllowCreate

          +
          public boolean getAllowCreate()
          +
          Returns a flag that specifies if we may create this environment.
          +
          +
          Returns:
          +
          true if we may create this environment.
          +
          +
        • +
        + + + +
          +
        • +

          setLockTimeout

          +
          public EnvironmentConfig setLockTimeout(long timeout,
          +                                        java.util.concurrent.TimeUnit unit)
          +                                 throws java.lang.IllegalArgumentException
          +
          Convenience method for setting LOCK_TIMEOUT.
          +
          +
          Parameters:
          +
          timeout - The lock timeout for all transactional and + non-transactional operations, or zero to disable lock timeouts.
          +
          unit - the TimeUnit of the timeout value. May be null only + if timeout is zero.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the value of timeout is invalid
          +
          See Also:
          +
          LOCK_TIMEOUT, +Transaction.setLockTimeout(long,TimeUnit)
          +
          +
        • +
        + + + +
          +
        • +

          setLockTimeout

          +
          public EnvironmentConfig setLockTimeout(long timeout)
          +                                 throws java.lang.IllegalArgumentException
          +
          Deprecated. as of 4.0, replaced by setLockTimeout(long, + TimeUnit).
          +
          Configures the lock timeout, in microseconds. This method is equivalent + to: + +
          setLockTimeout(long, TimeUnit.MICROSECONDS);
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          +
        • +
        + + + +
          +
        • +

          getLockTimeout

          +
          public long getLockTimeout(java.util.concurrent.TimeUnit unit)
          +
          Returns the lock timeout setting.
          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null. + + A value of 0 means no timeout is set.
          +
          +
        • +
        + + + +
          +
        • +

          getLockTimeout

          +
          public long getLockTimeout()
          +
          Deprecated. as of 4.0, replaced by getLockTimeout(TimeUnit).
          +
          Returns the lock timeout setting, in microseconds. This method is + equivalent to: + +
          getLockTimeout(TimeUnit.MICROSECONDS);
          +
        • +
        + + + +
          +
        • +

          setReadOnly

          +
          public EnvironmentConfig setReadOnly(boolean readOnly)
          +
          Convenience method for setting ENV_READ_ONLY.
          +
          +
          Parameters:
          +
          readOnly - If true, configure the database environment to be read + only, and any attempt to modify a database will fail.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getReadOnly

          +
          public boolean getReadOnly()
          +
          Returns true if the database environment is configured to be read only. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database environment is configured to be read only.
          +
          +
        • +
        + + + +
          +
        • +

          setTransactional

          +
          public EnvironmentConfig setTransactional(boolean transactional)
          +
          Convenience method for setting + ENV_IS_TRANSACTIONAL.
          +
          +
          Parameters:
          +
          transactional - If true, configure the database environment for + transactions.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getTransactional

          +
          public boolean getTransactional()
          +
          Returns true if the database environment is configured for transactions. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database environment is configured for transactions.
          +
          +
        • +
        + + + +
          +
        • +

          setLocking

          +
          public EnvironmentConfig setLocking(boolean locking)
          +
          Convenience method for setting + ENV_IS_LOCKING.
          +
          +
          Parameters:
          +
          locking - If false, configure the database environment for no + locking. The default is true.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getLocking

          +
          public boolean getLocking()
          +
          Returns true if the database environment is configured for locking. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the database environment is configured for locking.
          +
          +
        • +
        + + + +
          +
        • +

          setTxnTimeout

          +
          public EnvironmentConfig setTxnTimeout(long timeout,
          +                                       java.util.concurrent.TimeUnit unit)
          +                                throws java.lang.IllegalArgumentException
          +
          A convenience method for setting TXN_TIMEOUT.
          +
          +
          Parameters:
          +
          timeout - The transaction timeout. A value of 0 turns off + transaction timeouts.
          +
          unit - the TimeUnit of the timeout value. May be null only + if timeout is zero.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If the value of timeout is negative
          +
          See Also:
          +
          TXN_TIMEOUT, +Transaction.setTxnTimeout(long, java.util.concurrent.TimeUnit)
          +
          +
        • +
        + + + +
          +
        • +

          setTxnTimeout

          +
          public EnvironmentConfig setTxnTimeout(long timeout)
          +                                throws java.lang.IllegalArgumentException
          +
          Deprecated. as of 4.0, replaced by setTxnTimeout(long, + TimeUnit).
          +
          Configures the transaction timeout, in microseconds. This method is + equivalent to: + +
          setTxnTimeout(long, TimeUnit.MICROSECONDS);
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          +
        • +
        + + + +
          +
        • +

          getTxnTimeout

          +
          public long getTxnTimeout(java.util.concurrent.TimeUnit unit)
          +
          A convenience method for getting TXN_TIMEOUT. + +

          A value of 0 means transaction timeouts are not configured.

          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null.
          +
          Returns:
          +
          The transaction timeout.
          +
          +
        • +
        + + + +
          +
        • +

          getTxnTimeout

          +
          public long getTxnTimeout()
          +
          Deprecated. as of 4.0, replaced by getTxnTimeout(TimeUnit).
          +
          Returns the transaction timeout, in microseconds. This method is + equivalent to: + +
          getTxnTimeout(TimeUnit.MICROSECONDS);
          +
        • +
        + + + + + + + +
          +
        • +

          getTxnSerializableIsolation

          +
          public boolean getTxnSerializableIsolation()
          +
          A convenience method for getting + TXN_SERIALIZABLE_ISOLATION.
          +
          +
          Returns:
          +
          true if the environment has been configured to have repeatable + read isolation.
          +
          See Also:
          +
          LockMode
          +
          +
        • +
        + + + +
          +
        • +

          setSharedCache

          +
          public EnvironmentConfig setSharedCache(boolean sharedCache)
          +
          A convenience method for setting the + SHARED_CACHE parameter.
          +
          +
          Parameters:
          +
          sharedCache - If true, the shared cache is used by this + environment.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getSharedCache

          +
          public boolean getSharedCache()
          +
          A convenience method for getting the + SHARED_CACHE parameter.
          +
          +
          Returns:
          +
          true if the shared cache is used by this environment. @see + #setSharedCache
          +
          +
        • +
        + + + +
          +
        • +

          setNodeName

          +
          public EnvironmentConfig setNodeName(java.lang.String nodeName)
          +
          Sets the user defined nodeName for the Environment. If set, exception + messages, logging messages, and thread names will have this nodeName + included in them. If a user has multiple Environments in a single JVM, + setting this to a string unique to each Environment may make it easier + to diagnose certain exception conditions as well as thread dumps.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the user defined nodeName for the Environment.
          +
        • +
        + + + +
          +
        • +

          setCustomStats

          +
          public EnvironmentConfig setCustomStats(CustomStats customStats)
          +
          Sets the custom statistics object.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getCustomStats

          +
          public CustomStats getCustomStats()
          +
          Gets the custom statstics object.
          +
          +
          Returns:
          +
          customStats
          +
          +
        • +
        + + + +
          +
        • +

          setLoggingHandler

          +
          public EnvironmentConfig setLoggingHandler(java.util.logging.Handler handler)
          +
          Set a java.util.logging.Handler which will be used by all + java.util.logging.Loggers instantiated by this Environment. This lets + the application specify a handler which +
            +
          • requires a constructor with arguments
          • +
          • is specific to this environment, which is important if the + application is using multiple environments within the same process. +
          + Note that Handler is not serializable, and the logging + handler should be set within the same process.
          +
        • +
        + + + +
          +
        • +

          getLoggingHandler

          +
          public java.util.logging.Handler getLoggingHandler()
          +
          Returns the custom java.util.logging.Handler specified by the + application.
          +
        • +
        + + + +
          +
        • +

          setConfigParam

          +
          public EnvironmentConfig setConfigParam(java.lang.String paramName,
          +                                        java.lang.String value)
          +                                 throws java.lang.IllegalArgumentException
          +
          Description copied from class: EnvironmentMutableConfig
          +
          Set this configuration parameter. First validate the value specified for + the configuration parameter; if it is valid, the value is set in the + configuration.
          +
          +
          Overrides:
          +
          setConfigParam in class EnvironmentMutableConfig
          +
          Parameters:
          +
          paramName - the configuration parameter name, one of the String + constants in this class
          +
          value - The configuration value
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName or value is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          setRecoveryProgressListener

          +
          public EnvironmentConfig setRecoveryProgressListener(ProgressListener<RecoveryProgress> progressListener)
          +
          Configure the environment to make periodic calls to a ProgressListener to + provide feedback on environment startup (recovery). The + ProgressListener.progress() method is called at different stages of + the recovery process. See RecoveryProgress for information about + those stages. +

          + When using progress listeners, review the information at ProgressListener.progress(T, long, long) to avoid any unintended disruption to + environment startup.

          +
          +
          Parameters:
          +
          progressListener - The ProgressListener to callback during + environment startup (recovery).
          +
          +
        • +
        + + + +
          +
        • +

          getRecoveryProgressListener

          +
          public ProgressListener<RecoveryProgress> getRecoveryProgressListener()
          +
          Return the ProgressListener to be used at this environment startup.
          +
        • +
        + + + +
          +
        • +

          setClassLoader

          +
          public EnvironmentConfig setClassLoader(java.lang.ClassLoader classLoader)
          +
          Configure the environment to use a specified ClassLoader for loading + user-supplied classes by name.
          +
        • +
        + + + +
          +
        • +

          getClassLoader

          +
          public java.lang.ClassLoader getClassLoader()
          +
          Returns the ClassLoader for loading user-supplied classes by name, or + null if no specified ClassLoader is configured.
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public EnvironmentConfig clone()
          +
          Returns a copy of this configuration object.
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentFailureException.html b/docs/java/com/sleepycat/je/EnvironmentFailureException.html new file mode 100644 index 0000000..f4fc4cb --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentFailureException.html @@ -0,0 +1,389 @@ + + + + + +EnvironmentFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentFailureException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      EnvironmentLockedException, EnvironmentNotFoundException, EnvironmentWedgedException, GroupShutdownException, LogWriteException, RestartRequiredException, ThreadInterruptedException, VersionMismatchException
      +
      +
      +
      +
      public class EnvironmentFailureException
      +extends RunRecoveryException
      +
      Indicates that a failure has occurred that could impact the Environment as a whole. For failures that impact only the current + operation and/or transaction, see OperationFailureException). For + an overview of all exceptions thrown by JE, see DatabaseException. + +

      Depending on the nature of the failure, this exception may indicate that + Environment.close() must be called. The application should catch + EnvironmentFailureException and then call Environment.isValid(). If false is returned, all Environment + handles (instances) must be closed and re-opened in order to run recovery + and continue operating. If true is returned, the Environment can continue operating without being closed and re-opened. + Also note that Environment.isValid() may be called at any time, not + just during exception handling.

      + +

      The use of the Environment.isValid() method allows JE to determine + dynamically whether the failure requires recovery or not, and allows for + this determination to change in future releases. Over time, internal + improvements to error handling may allow more error conditions to be handled + without invalidating the Environment.

      + +

      (Although this exception class extends RunRecoveryException, it + does not always indicate that recovery is necessary, as described above. + RunRecoveryException has been deprecated and EnvironmentFailureException should be used instead.)

      + +

      If an EnvironmentFailureException consistently occurs soon after + opening the Environment, this may indicate a persistent problem. It may + indicate a system problem or a persistent storage problem. In this case, + human intervention is normally required and restoring from a backup may be + necessary.

      + +

      Note that subclasses of EnvironmentFailureException indicate how + to handle the exception in more specific ways.

      +
        +
      • If Thread.interrupt is called for a thread performing JE + operations, a ThreadInterruptedException is thrown. Since + interrupting a thread is intentional, it does not indicate a persistent + problem and human intervention is not normally required. +
      • +
      • If an IOException occurs while writing to the JE log, a + LogWriteException is thrown. Although an IOException can + occur for different reasons, it is a hint that the disk may be full and + applications may wish to attempt recovery after making more disk space + available. +
      • +
      • For replicated environments, see the subclasses of EnvironmentFailureException in the com.sleepycat.je.rep package for + more information. Such exceptions may require special handling. +
      • +
      + +

      If Environment.close() is not called after an EnvironmentFailureException invalidates the Environment, all + subsequent method calls for the Environment will throw the same + exception. This provides more than one opportunity to catch and handle the + specific exception subclass that caused the failure.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetMessage() 
        booleanisCorrupted() +
        Whether the EnvironmentFailureException indicates that the log is + corrupt, meaning that a network restore (or restore from backup) should + be performed.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          isCorrupted

          +
          public boolean isCorrupted()
          +
          Whether the EnvironmentFailureException indicates that the log is + corrupt, meaning that a network restore (or restore from backup) should + be performed. +

          + This method currently returns true only when corruption has been + detected and is persistent. This may have been detected by verifying + checksums in the disk data log, and in this case the corruption + indicates a media/disk failure. The checksum error may have + been detected when accessing data normally via the JE API, or by the + background data verifier (see EnvironmentConfig.VERIFY_LOG). + Or a persistent Btree corruption may have been detected by the data + verifier (see EnvironmentConfig.VERIFY_BTREE) or by the + Environment.verify(VerifyConfig, PrintStream) or + Database.verify(VerifyConfig) methods. This method will + returns true in all such cases. +

          + Additionally, when a persistent corruption is detected and the + Environment is open for read-write access, a marker file named + 7fffffff.jdb is created in the Environment directory that will + prevent re-opening the environment. If an attempt is made to + re-open the Environment, the original EnvironmentFailureException + will be thrown. This is meant to safeguard against using a corrupt + environment when the original exception is accidentally overlooked. + While the marker file can be deleted to allow re-opening the + environment, this is normally unsafe and is not recommended.

          +
          +
          Returns:
          +
          true if the environment is corrupt.
          +
          Since:
          +
          7.3
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentLockedException.html b/docs/java/com/sleepycat/je/EnvironmentLockedException.html new file mode 100644 index 0000000..cf393ea --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentLockedException.html @@ -0,0 +1,260 @@ + + + + + +EnvironmentLockedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentLockedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EnvironmentLockedException
      +extends EnvironmentFailureException
      +
      Thrown by the Environment constructor when an environment cannot be + opened for write access because another process has the same environment + open for write access. + +

      Warning: This exception should be handled when an + environment is opened by more than one process.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentMutableConfig.html b/docs/java/com/sleepycat/je/EnvironmentMutableConfig.html new file mode 100644 index 0000000..d51fe76 --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentMutableConfig.html @@ -0,0 +1,875 @@ + + + + + +EnvironmentMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentMutableConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Cloneable
      +
      +
      +
      Direct Known Subclasses:
      +
      EnvironmentConfig
      +
      +
      +
      +
      public class EnvironmentMutableConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable, java.io.Serializable
      +
      Specifies the environment attributes that may be changed after the + environment has been opened. EnvironmentMutableConfig is a parameter to + Environment.setMutableConfig(com.sleepycat.je.EnvironmentMutableConfig) and is returned by Environment.getMutableConfig(). + +

      There are two types of mutable environment properties: per-environment + handle properties, and environment wide properties.

      + +

      Per-Environment Handle Properties

      + +

      Per-environment handle properties apply only to a single Environment + instance. For example, to change the default transaction commit behavior + for a single environment handle, do this:

      + +
      +     // Specify no-sync behavior for a given handle.
      +     EnvironmentMutableConfig mutableConfig = env.getMutableConfig();
      +     mutableConfig.setDurability(Durability.COMMIT_NO_SYNC);
      +     env.setMutableConfig(mutableConfig);
      + 
      + +

      The per-environment handle properties are listed below. These properties + are accessed using the setter and getter methods listed, as shown in the + example above.

      + + + +

      Environment-Wide Mutable Properties

      + +

      Environment-wide mutable properties are those that can be changed for an + environment as a whole, irrespective of which environment instance (for the + same physical environment) is used. For example, to stop the cleaner daemon + thread, do this:

      + +
      +     // Stop the cleaner daemon threads for the environment.
      +     EnvironmentMutableConfig mutableConfig = env.getMutableConfig();
      +     mutableConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
      +     env.setMutableConfig(mutableConfig);
      + 
      + +

      The environment-wide mutable properties are documented as such for each + EnvironmentConfig String constant.

      + +

      Getting the Current Environment Properties

      + + To get the current "live" properties of an environment after constructing it + or changing its properties, you must call Environment.getConfig() or + Environment.getMutableConfig(). The original EnvironmentConfig or + EnvironmentMutableConfig object used to set the properties is not kept up to + date as properties are changed, and does not reflect property validation or + properties that are computed.
      +
      +
      See Also:
      +
      EnvironmentConfig, +Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EnvironmentMutableConfig

          +
          public EnvironmentMutableConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setTxnNoSync

          +
          public EnvironmentMutableConfig setTxnNoSync(boolean noSync)
          + +
          Configures the database environment for asynchronous transactions.
          +
          +
          Parameters:
          +
          noSync - If true, do not write or synchronously flush the log on + transaction commit. This means that transactions exhibit the ACI + (Atomicity, Consistency, and Isolation) properties, but not D + (Durability); that is, database integrity is maintained, but if the JVM + or operating system fails, it is possible some number of the most + recently committed transactions may be undone during recovery. The + number of transactions at risk is governed by how many updates fit into + a log buffer, how often the operating system flushes dirty buffers to + disk, and how often the database environment is checkpointed. + +

          This attribute is false by default for this class and for the + database environment.

          +
          +
        • +
        + + + +
          +
        • +

          getTxnNoSync

          +
          public boolean getTxnNoSync()
          +
          Deprecated. replaced by getDurability()
          +
          Returns true if the database environment is configured for asynchronous + transactions.
          +
          +
          Returns:
          +
          true if the database environment is configured for asynchronous + transactions.
          +
          +
        • +
        + + + +
          +
        • +

          setTxnWriteNoSync

          +
          public EnvironmentMutableConfig setTxnWriteNoSync(boolean writeNoSync)
          + +
          Configures the database environment for transactions which write but do + not flush the log.
          +
          +
          Parameters:
          +
          writeNoSync - If true, write but do not synchronously flush the log + on transaction commit. This means that transactions exhibit the ACI + (Atomicity, Consistency, and Isolation) properties, but not D + (Durability); that is, database integrity is maintained, but if the + operating system fails, it is possible some number of the most recently + committed transactions may be undone during recovery. The number of + transactions at risk is governed by how often the operating system + flushes dirty buffers to disk, and how often the database environment is + checkpointed. + +

          The motivation for this attribute is to provide a transaction that + has more durability than asynchronous (nosync) transactions, but has + higher performance than synchronous transactions.

          + +

          This attribute is false by default for this class and for the + database environment.

          +
          +
        • +
        + + + +
          +
        • +

          getTxnWriteNoSync

          +
          public boolean getTxnWriteNoSync()
          +
          Deprecated. replaced by getDurability()
          +
          Returns true if the database environment is configured for transactions + which write but do not flush the log.
          +
          +
          Returns:
          +
          true if the database environment is configured for transactions + which write but do not flush the log.
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          setCacheSize

          +
          public EnvironmentMutableConfig setCacheSize(long totalBytes)
          +                                      throws java.lang.IllegalArgumentException
          +
          A convenience method for setting EnvironmentConfig.MAX_MEMORY.
          +
          +
          Parameters:
          +
          totalBytes - The memory available to the database system, in bytes.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          See Also:
          +
          EnvironmentConfig.MAX_MEMORY
          +
          +
        • +
        + + + +
          +
        • +

          getCacheSize

          +
          public long getCacheSize()
          +
          Returns the memory available to the database system, in bytes. A valid + value is only available if this EnvironmentConfig object has been + returned from Environment.getConfig().
          +
          +
          Returns:
          +
          The memory available to the database system, in bytes.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getCachePercent

          +
          public int getCachePercent()
          +
          A convenience method for getting EnvironmentConfig.MAX_MEMORY_PERCENT.
          +
          +
          Returns:
          +
          the percentage value used in the JE cache size calculation.
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          setMaxDisk

          +
          public EnvironmentMutableConfig setMaxDisk(long totalBytes)
          +                                    throws java.lang.IllegalArgumentException
          +
          A convenience method for setting EnvironmentConfig.MAX_DISK.
          +
          +
          Parameters:
          +
          totalBytes - is an upper limit on the number of bytes used for + data storage, or zero if no limit is desired.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          See Also:
          +
          EnvironmentConfig.MAX_DISK
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setExceptionListener

          +
          public EnvironmentMutableConfig setExceptionListener(ExceptionListener exceptionListener)
          +
          Sets the exception listener for an Environment. The listener is called + when a daemon thread throws an exception, in order to provide a + notification mechanism for these otherwise asynchronous exceptions. + Daemon thread exceptions are also printed through stderr. +

          + Not all daemon exceptions are fatal, and the application bears + responsibility for choosing how to respond to the notification. Since + exceptions may repeat, the application should also choose how to handle + a spate of exceptions. For example, the application may choose to act + upon each notification, or it may choose to batch up its responses + by implementing the listener so it stores exceptions, and only acts + when a certain number have been received.

          +
          +
          Parameters:
          +
          exceptionListener - the callback to be executed when an exception + occurs.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getExceptionListener

          +
          public ExceptionListener getExceptionListener()
          +
          Returns the exception listener, if set.
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          setConfigParam

          +
          public EnvironmentMutableConfig setConfigParam(java.lang.String paramName,
          +                                               java.lang.String value)
          +                                        throws java.lang.IllegalArgumentException
          +
          Set this configuration parameter. First validate the value specified for + the configuration parameter; if it is valid, the value is set in the + configuration.
          +
          +
          Parameters:
          +
          paramName - the configuration parameter name, one of the String + constants in this class
          +
          value - The configuration value
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName or value is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          getConfigParam

          +
          public java.lang.String getConfigParam(java.lang.String paramName)
          +                                throws java.lang.IllegalArgumentException
          +
          Returns the value for this configuration parameter.
          +
          +
          Parameters:
          +
          paramName - a valid configuration parameter, one of the String + constants in this class.
          +
          Returns:
          +
          the configuration value.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Display configuration values.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentNotFoundException.html b/docs/java/com/sleepycat/je/EnvironmentNotFoundException.html new file mode 100644 index 0000000..2bc9f8c --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentNotFoundException.html @@ -0,0 +1,259 @@ + + + + + +EnvironmentNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentNotFoundException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EnvironmentNotFoundException
      +extends EnvironmentFailureException
      +
      Thrown by the Environment constructor when EnvironmentConfig + AllowCreate property is false (environment creation is not permitted), but + there are no log files in the environment directory.
      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentStats.html b/docs/java/com/sleepycat/je/EnvironmentStats.html new file mode 100644 index 0000000..de8e28a --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentStats.html @@ -0,0 +1,5941 @@ + + + + + +EnvironmentStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EnvironmentStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Statistics for a single environment. Statistics provide indicators for + system monitoring and performance tuning. + +

      Each statistic has a name and a getter method in this class. For example, + the cacheTotalBytes stat is returned by the getCacheTotalBytes() method. Statistics are categorized into several + groups, for example, cacheTotalBytes is in the Cache + group. Each stat and group has a name and a description.

      + +

      Viewing the statistics through toString() shows the stat names + and values organized by group. Viewing the stats with toStringVerbose() additionally shows the description of each stat and + group.

      + +

      Statistics are periodically output in CSV format to the je.stat.csv file + (see EnvironmentConfig.STATS_COLLECT). The column header in the .csv + file has group:stat format, where 'group' is the group name and + 'stat' is the stat name. In Oracle NoSQL DB, in the addition to the .csv + file, JE stats are output in the .stat files.

      + +

      Stat values may also be obtained via JMX using the JEMonitor mbean. + In Oracle NoSQL DB, JE stats are obtained via a different JMX interface in + JSON format. The JSON format uses property names of the form group_stat where 'group' is the group name and 'stat' is the stat name.

      + +

      The stat groups are listed below. Each group name links to a summary of + the statistics in the group.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Group NameDescription
      "Cache" + "The main cache resides in the Java heap and holds data, keys, Btree internal nodes, locks and JE metadata." +
      "OffHeap" + "The optional off-heap cache resides outside the Java heap and serves as an overflow area for the main cache." +
      "I/O" + "The I/O portion of the append-only storage system includes access to data files and caching of file handles." +
      "Cleaning" + "Log cleaning involves garbage collection of data files in the append-only storage system." +
      "Node Compression" + "Deleted records are removed from Btree internal nodes asynchronously and nodes are deleted when they become empty." +
      "Checkpoints" + "Dirty Btree internal nodes are written to the data log periodically to bound recovery time." +
      "Locks" + "Record locking is used to provide transactional capabilities." +
      "Environment" + "Miscellaneous environment wide statistics." +
      + +

      The following sections describe each group of stats along with some + common strategies for using them for monitoring and performance tuning.

      + +

      Cache Statistics

      + +

      Group Name: "Cache" +
      Description: "The main cache resides in the Java heap and holds data, keys, Btree internal nodes, locks and JE metadata."

      + +

      Group Name: "OffHeap" +
      Description: "The optional off-heap cache resides outside the Java heap and serves as an overflow area for the main cache."

      + +

      The JE cache consists of the main (in-heap) cache and and optional + off-heap cache. The vast majority of the cache is occupied by Btree nodes, + including internal nodes (INs) and leaf nodes (LNs). INs contain record keys + while LNs contain record data.

      + +

      Each IN refers to a configured maximum number of child nodes (EnvironmentConfig.NODE_MAX_ENTRIES). The INs form a Btree of at least 2 + levels. With a large data set the Btree will normally have 4 or 5 levels. + The top level is a single node, the root IN. Levels are numbered from the + bottom up, starting with level 1 for bottom level INs (BINs). Levels are + added at the top when the root IN splits.

      + +

      When an off-heap cache is configured, it serves as an overflow for the + main cache. See EnvironmentConfig.MAX_OFF_HEAP_MEMORY.

      + +

      Cache Statistics: Sizing

      + +

      Operation performance is often directly proportional to how much of the + active data set is cached. BINs and LNs form the vast majority of the cache. + Caching of BINs and LNs have different performance impacts, and behavior + varies depending on whether an off-heap cache is configured and which CacheMode is used.

      + +

      Main cache current usage is indicated by the following stats. Note that + there is currently no stat for the number of LNs in the main cache.

      + + +

      Off-heap cache current usage is indicated by:

      + +

      + +

      A cache miss is considered a miss only when the object is not found in + either cache. Misses often result in file I/O and are a primary indicator + of cache performance. Fetches (access requests) and misses are indicated + by:

      + + +

      When the number of LN misses (nLNsFetchMiss) or the number of + BIN misses (nBINsFetchMiss + nFullBINsMiss) are significant, the + JE cache may be undersized, as discussed below. But note that it is not + practical to correlate the number of fetches and misses directly to + application operations, because LNs are sometimes + embedded, BINs are sometimes + accessed multiple times per operation, and internal Btree accesses are + included in the stat values.

      + +

      Ideally, all BINs and LNs for the active data set should fit in cache so + that operations do not result in fetch misses, which often perform random + read I/O. When this is not practical, which is often the case for large + data sets, the next best thing is to ensure that all BINs fit in cache, + so that an operation will perform at most one random read I/O to fetch + the LN. The DbCacheSize javadoc describes how to size the cache + to ensure that all BINs and/or LNs fit in cache.

      + +

      Normally EnvironmentConfig.MAX_MEMORY_PERCENT determines the JE + cache size as a value relative to the JVM heap size, i.e., the heap size + determines the cache size.

      + +

      For configuring cache size and behavior, see:

      + + +

      When using Oracle NoSQL DB, a sizing exercise and DbCacheSize are + used to determine the cache size needed to hold all BINs in memory. The + memory available to each node is divided between a 32 GB heap for the JVM + process (so that CompressedOops may be used) and the off-heap cache (when + more than 32 GB of memory is available).

      + +

      It is also important not to configured the cache size too large, relative + to the JVM heap size. If there is not enough free space in the heap, Java + GC pauses may become a problem. Increasing the default value for MAX_MEMORY_PERCENT, or setting MAX_MEMORY (which overrides MAX_MEMORY_PERCENT), should be done carefully.

      + +

      Java GC performance may also be improved by using CacheMode.EVICT_LN. Record data sizes should also be kept below 1 MB to + avoid "humongous objects" (see Java GC documentation).

      + +

      When using Oracle NoSQL DB, by default, MAX_MEMORY_PERCENT is + set to 70% and CacheMode.EVICT_LN is used. The LOB (large object) + API is implemented using multiple JE records per LOB where the data size of + each record is 1 MB or less.

      + +

      When a shared cache is configured, the main and off-heap cache may be + shared by multiple JE Environments in a single JVM process. See:

      + + +

      When using Oracle NoSQL DB, the JE shared cache feature is not used + because each node only uses a single JE Environment.

      + +

      Cache Statistics: Size + Optimizations

      + +

      Since a large portion of an IN consists of record keys, JE uses + key prefix compression. + Ideally, key suffixes are small enough to be stored using the compact key format. The + following stat indicates the number of INs using this compact format:

      + + +

      Configuration params impacting key prefixing and the compact key format + are:

      + + +

      Enabling key prefixing for all databases is strongly recommended. When + using Oracle NoSQL DB, key prefixing is always enabled.

      + +

      Another configuration param impacting BIN cache size is TREE_MAX_EMBEDDED_LN. There is currently no stat indicating the number of + embedded LNs. See:

      + + +

      Cache Statistics: Unexpected + Sizes

      + +

      Although the Btree normally occupies the vast majority of the cache, it + is possible that record locks occupy unexpected amounts of cache when + large transactions are used, or when cursors or transactions are left open + due to application bugs. The following stat indicates the amount of cache + used by record locks:

      + + +

      To reduce the amount of memory used for record locks:

      +
        +
      • Use a small number of write operations per transaction. Write + locks are held until the end of a transaction.
      • +
      • For transactions using Serializable isolation or RepeatableRead + isolation (the default), use a small number of read operations per + transaction.
      • +
      • To read large numbers of records, use LockMode.READ_COMMITTED isolation or use a null Transaction (which + implies ReadCommitted). With ReadCommitted isolation, locks are + released after each read operation. Using LockMode.READ_UNCOMMITTED will also avoid record locks, but does not + provide any transactional guarantees.
      • +
      • Ensure that all cursors and transactions are closed + promptly.
      • +
      + +

      Note that the above guidelines are also important for reducing contention + when records are accessed concurrently from multiple threads and + transactions. When using Oracle NoSQL DB, the application should avoid + performing a large number of write operations in a single request. For read + operations, NoSQL DB uses ReadCommitted isolation to avoid accumulation of + locks.

      + +

      Another unexpected use of cache is possible when using a DiskOrderedCursor or when calling Database.count(). The amount of + cache used by these operations is indicated by:

      + + +

      DiskOrderedCursor and Database.count should normally be + explicitly constrained to use a maximum amount of cache memory. See:

      + + +

      Oracle NoSQL DB does not currently use DiskOrderedCursor or + Database.count.

      + +

      Cache Statistics: Eviction

      + +

      Eviction is removal of Btree node from the cache in order to make room + for newly added nodes. See CacheMode for a description of + eviction.

      + +

      Normally eviction is performed via background threads in the eviction + thread pools. Disabling the eviction pool threads is not recommended.

      + + +

      Eviction stats are important indicator of cache efficiency and provide a + deeper understanding of cache behavior. Main cache eviction is indicated + by:

      + + +

      Note that objects evicted from the main cache are moved to the off-heap + cache whenever possible.

      + +

      Off-heap cache eviction is indicated by:

      + + +

      When analyzing Java GC performance, the most relevant stats are NLNsEvicted, NNodesMutated and NNodesEvicted, which all + indicate eviction from the main cache based on LRU. Large values for these + stats indicate that many old generation Java objects are being GC'd, which + is often a cause of GC pauses.

      + +

      Note that CacheMode.EVICT_LN is used or when LNs are embedded, NLNsEvicted will + be close to zero because LNs are not evicted based on LRU. And if an + off-heap cache is configured, NNodesMutated will be close to zero + because BIN mutation takes place in the off-heap cache. If any of the three + values are large, this points to a potential GC performance problem. The GC + logs should be consulted to confirm this.

      + +

      Large values for NDirtyNodesEvicted or OffHeapDirtyNodesEvicted indicate that the cache is severely undersized and + there is a risk of using all available disk space and severe performance + problems. Dirty nodes are evicted last (after evicting all non-dirty nodes) + because they must be written to disk. This causes excessive writing and JE + log cleaning may be unproductive.

      + +

      Note that when an off-heap cache is configured, NDirtyNodesEvicted will be zero because dirty nodes in the main cache are + moved to the off-heap cache if they don't fit in the main cache, and are + evicted completely and written to disk only when they don't fit in the + off-heap cache.

      + +

      Another type of eviction tuning for the main cache involves changing the + number of bytes evicted each time an evictor thread is awoken:

      + + +

      If the number of bytes is too large, it may cause a noticeable spike in + eviction activity, reducing resources available to other threads. If the + number of bytes is too small, the overhead of waking the evictor threads + more often may be noticeable. The default values for this parameter is + generally a good compromise. This parameter also impacts critical eviction, + which is described next.

      + +

      Note that the corresponding parameter for the off-heap cache, EnvironmentConfig.OFFHEAP_EVICT_BYTES, works differently and is described + in the next section.

      + +

      Cache Statistics: Critical + Eviction

      + +

      The following stats indicate that critical eviction is occurring:

      + + +

      Eviction is performed by eviction pool threads, calls to Environment.evictMemory() in application background threads, or via CacheMode.EVICT_LN or CacheMode.EVICT_BIN. If these mechanisms are + not sufficient to evict memory from cache as quickly as CRUD operations are + adding memory to cache, then critical eviction comes into play. Critical + eviction is performed in-line in the thread performing the CRUD operation, + which is very undesirable since it increases operation latency.

      + +

      Critical eviction in the main cache is indicated by large values for + NBytesEvictedCritical, as compared to the other NBytesEvictedXXX stats. Critical eviction in the off-heap cache is + indicated by large values for OffHeapCriticalNodesTargeted compared + to OffHeapNodesTargeted.

      + +

      Additional stats indicating that background eviction threads may be + insufficient are:

      + + +

      Critical eviction can sometimes be reduced by changing EnvironmentConfig.EVICTOR_CRITICAL_PERCENTAGE or modifying the eviction + thread pool parameters.

      + + +

      When using Oracle NoSQL DB, EVICTOR_CRITICAL_PERCENTAGE is set to + 20% rather than using the JE default of 0%.

      + +

      In the main cache, critical eviction uses the same parameter as + background eviction for determining how many bytes to evict at one + time:

      + + +

      Be careful when increasing this value, since this will cause longer + operation latencies when critical eviction is occurring in the main + cache.

      + +

      The corresponding parameter for the off-heap cache, OFFHEAP_EVICT_BYTES, works differently:

      + + +

      Unlike in the main cache, OFFHEAP_EVICT_BYTES defines the goal + for background eviction to be below MAX_OFF_HEAP_MEMORY. The + background evictor threads for the off-heap cache attempt to maintain the + size of the off-heap cache at MAX_OFF_HEAP_MEMORY - + OFFHEAP_EVICT_BYTES. If the off-heap cache size grows larger than MAX_OFF_HEAP_MEMORY, critical off-heap eviction will occur. The default + value for OFFHEAP_EVICT_BYTES is fairly large to ensure that + critical eviction does not occur. Be careful when lowering this value.

      + +

      This approach is intended to prevent the off-heap cache from exceeding + its maximum size. If the maximum is exceeded, there is a danger that the + JVM process will be killed by the OS. See getOffHeapAllocFailures().

      + +

      Cache Statistics: LRU List + Contention

      + +

      Another common tuning issue involves thread contention on the cache LRU + lists, although there is no stat to indicate such contention. Since each + time a node is accessed it must be moved to the end of the LRU list, a + single LRU list would cause contention among threads performing CRUD + operations. By default there are 4 LRU lists for each cache. If contention + is noticeable on internal Evictor.LRUList or OffHeapCache.LRUList methods, + consider increasing the number of LRU lists:

      + + +

      However, note that increasing the number of LRU lists will decrease the + accuracy of the LRU.

      + +

      Cache Statistics: Debugging

      + +

      The following cache stats are unlikely to be needed for monitoring or + tuning, but are sometimes useful for debugging and testing.

      + + +

      Likewise, the following cache configuration params are unlikely to be + needed for tuning, but are sometimes useful for debugging and testing.

      + + +
      +
      +
      See Also:
      +
      Viewing + Statistics with JConsole, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        longgetActiveLogSize() +
        "Bytes used by all active data files: files required for basic JE operation."
        +
        longgetAdminBytes() +
        "Number of bytes of JE main cache used for cleaner and checkpointer metadata, in bytes."
        +
        longgetAvailableLogSize() +
        "Bytes available for write operations when unprotected reserved files are deleted: free space + reservedLogSize - protectedLogSize."
        +
        longgetAvgBatchCacheMode() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetAvgBatchCritical() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetAvgBatchDaemon() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetAvgBatchEvictorThread() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetAvgBatchManual() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetBufferBytes() +
        The total memory currently consumed by log buffers, in bytes.
        +
        longgetCacheDataBytes() +
        Deprecated.  +
        Please use getDataBytes() to get the amount of cache + used for data and use getAdminBytes(), getLockBytes() and + getBufferBytes() to get other components of the total cache usage + (getCacheTotalBytes()).
        +
        +
        longgetCacheTotalBytes() +
        "Total amount of JE main cache in use, in bytes."
        +
        intgetCleanerBacklog() +
        Deprecated.  +
        in 7.0, always returns zero. Use getCurrentMinUtilization() and getCurrentMaxUtilization() to + monitor cleaner behavior.
        +
        +
        floatgetCorrectedAvgLNSize() +
        Deprecated.  +
        in JE 5.0.56, use getCorrectedAvgLNSize() instead.
        +
        +
        intgetCurrentMaxUtilization() +
        "The current maximum (upper bound) log utilization as a percentage."
        +
        intgetCurrentMinUtilization() +
        "The current minimum (lower bound) log utilization as a percentage."
        +
        longgetCursorsBins() +
        The number of BINs encountered by the INCompressor that had cursors + referring to them when the compressor ran.
        +
        longgetDataAdminBytes() +
        "Amount of JE main cache used for holding per-database cleaner utilization metadata, in bytes."
        +
        longgetDataBytes() +
        "Amount of JE main cache used for holding data, keys and internal Btree nodes, in bytes."
        +
        longgetDbClosedBins() +
        The number of BINs encountered by the INCompressor that had their + database closed between the time they were put on the compressor queue + and when the compressor ran.
        +
        longgetDirtyLRUSize() +
        "Number of INs in the dirty/priority-2 LRU "
        +
        longgetDOSBytes() +
        "Amount of JE main cache consumed by disk-ordered cursor and Database.count operations, in bytes."
        +
        longgetEndOfLog() +
        The location of the next entry to be written to the log.
        +
        longgetEnvironmentCreationTime() +
        The time the Environment was created.
        +
        floatgetEstimatedAvgLNSize() +
        Deprecated.  +
        in JE 5.0.56, use getCorrectedAvgLNSize() instead.
        +
        +
        intgetFileDeletionBacklog() +
        Deprecated.  +
        in 7.5, always returns zero. Use getProtectedLogSize() getProtectedLogSizeMap() to monitor + file protection.
        +
        +
        longgetFSyncMaxTime() +
        The maximum number of milliseconds used to perform a single fsync.
        +
        longgetFSyncTime() +
        The total number of milliseconds used to perform fsyncs.
        +
        longgetInCompQueueSize() +
        The number of entries in the INCompressor queue when the getStats() + call was made.
        +
        longgetLastCheckpointEnd() +
        The location in the log of the last checkpoint end.
        +
        longgetLastCheckpointId() +
        The Id of the last checkpoint.
        +
        longgetLastCheckpointInterval() +
        Byte length from last checkpoint start to the previous checkpoint start.
        +
        longgetLastCheckpointStart() +
        The location in the log of the last checkpoint start.
        +
        intgetLastKnownUtilization() +
        Deprecated.  + +
        +
        floatgetLNSizeCorrectionFactor() +
        Deprecated.  +
        in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
        +
        +
        longgetLockBytes() +
        "Number of bytes of JE cache used for holding locks and transactions, in bytes."
        +
        longgetMixedLRUSize() +
        "Number of INs in the mixed/priority-1 LRU "
        +
        intgetNAcquiresNoWaiters() +
        Number of acquires of lock table latch with no contention.
        +
        intgetNAcquiresNoWaitSuccessful() +
        Number of successful no-wait acquires of the lock table latch.
        +
        intgetNAcquiresNoWaitUnSuccessful() +
        Number of unsuccessful no-wait acquires of the lock table latch.
        +
        intgetNAcquiresSelfOwned() +
        Number of acquires of lock table latch when it was already owned + by the caller.
        +
        intgetNAcquiresWithContention() +
        Number of acquires of lock table latch when it was already owned by + another thread.
        +
        longgetNBatchesCacheMode() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBatchesCritical() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBatchesDaemon() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBatchesEvictorThread() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBatchesManual() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINDeltaBlindOps() +
        "The number of operations performed blindly in BIN deltas"
        +
        longgetNBinDeltaDeleteOps() +
        The number of user (non-internal) Cursor and Database delete operations + performed in BIN deltas.
        +
        longgetNBinDeltaGetOps() +
        The number of user (non-internal) Cursor and Database get operations + performed in BIN deltas.
        +
        longgetNBinDeltaInsertOps() +
        The number of user (non-internal) Cursor and Database insert operations + performed in BIN deltas (these are insertions performed via the various + put methods).
        +
        longgetNBINDeltasCleaned() +
        "Accumulated number of BIN-deltas cleaned."
        +
        longgetNBINDeltasDead() +
        "Accumulated number of BIN-deltas that were not found in the tree anymore (deleted)."
        +
        longgetNBINDeltasFetchMiss() +
        "Number of BIN-deltas (partial BINs) fetched to satisfy btree operations that were not in main cache."
        +
        longgetNBINDeltasMigrated() +
        "Accumulated number of BIN-deltas migrated."
        +
        longgetNBINDeltasObsolete() +
        "Accumulated number of BIN-deltas obsolete."
        +
        longgetNBinDeltaUpdateOps() +
        The number of user (non-internal) Cursor and Database update operations + performed in BIN deltas (these are updates performed via the various + put methods).
        +
        longgetNBINsEvictedCacheMode() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINsEvictedCritical() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINsEvictedDaemon() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINsEvictedEvictorThread() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINsEvictedManual() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNBINsFetch() +
        "Number of BINs (bottom internal nodes) and BIN-deltas requested by btree operations."
        +
        longgetNBINsFetchMiss() +
        "Number of full BINs (bottom internal nodes) and BIN-deltas fetched to satisfy btree operations that were not in main cache."
        +
        floatgetNBINsFetchMissRatio() +
        "The BIN fetch miss ratio (nBINsFetchMiss / nBINsFetch)"
        +
        longgetNBINsMutated() +
        Deprecated.  +
        Use getNNodesMutated() instead.
        +
        +
        longgetNBINsStripped() +
        Deprecated.  +
        Use getNNodesStripped() instead.
        +
        +
        longgetNBytesEvictedCacheMode() +
        "Number of bytes evicted by operations for which CacheMode.EVICT_BIN is specified."
        +
        longgetNBytesEvictedCritical() +
        "Number of bytes evicted in the application thread because the cache is over budget."
        +
        longgetNBytesEvictedDeamon() +
        "Number of bytes evicted by JE deamon threads."
        +
        longgetNBytesEvictedEvictorThread() +
        "Number of bytes evicted by evictor pool threads."
        +
        longgetNBytesEvictedManual() +
        "Number of bytes evicted by the Environment.evictMemory or during Environment startup."
        +
        longgetNBytesReadFromWriteQueue() +
        The number of bytes read to fulfill file read operations by reading out + of the pending write queue.
        +
        longgetNBytesWrittenFromWriteQueue() +
        The number of bytes written from the pending write queue.
        +
        longgetNCachedBINDeltas() +
        "Number of BIN-deltas (partial BINs) in main cache. This is a subset of the nCachedBINs value."
        +
        longgetNCachedBINs() +
        "Number of BINs (bottom internal nodes) and BIN-deltas in main cache."
        +
        longgetNCachedUpperINs() +
        "Number of upper INs (non-bottom internal nodes) in main cache."
        +
        longgetNCacheMiss() +
        The total number of requests for database objects which were not in + memory.
        +
        longgetNCheckpoints() +
        The total number of checkpoints run so far.
        +
        longgetNCleanerDeletions() +
        "Number of cleaner file deletions."
        +
        longgetNCleanerDiskRead() +
        "Number of disk reads by the cleaner."
        +
        longgetNCleanerEntriesRead() +
        "Accumulated number of log entries read by the cleaner."
        +
        longgetNCleanerProbeRuns() +
        Deprecated.  +
        in JE 6.3, always returns zero.
        +
        +
        longgetNCleanerRevisalRuns() +
        "Number of cleaner runs that ended in revising expiration info, but not in any cleaning."
        +
        longgetNCleanerRuns() +
        "Number of cleaner runs, including two-pass runs."
        +
        longgetNCleanerTwoPassRuns() +
        "Number of cleaner two-pass runs."
        +
        longgetNClusterLNsProcessed() +
        "Accumulated number of LNs processed because they qualify for clustering."
        +
        longgetNDeltaINFlush() +
        The accumulated number of Delta INs flushed to the log.
        +
        longgetNDirtyNodesEvicted() +
        "Number of dirty target nodes logged and evicted."
        +
        longgetNEvictionRuns() +
        "Number of times the background eviction thread is awoken."
        +
        longgetNEvictPasses() +
        Deprecated.  +
        Use getNEvictionRuns() instead.
        +
        +
        intgetNFileOpens() +
        The number of times a log file has been opened.
        +
        longgetNFSyncRequests() +
        The number of fsyncs requested through the group commit manager.
        +
        longgetNFSyncs() +
        The number of fsyncs issued through the group commit manager.
        +
        longgetNFSyncTimeouts() +
        The number of fsync requests submitted to the group commit manager which + timed out.
        +
        longgetNFullBINFlush() +
        The accumulated number of full BINs flushed to the log.
        +
        longgetNFullBINsMiss() +
        "Number of times a BIN-delta had to be mutated to a full BIN (and as a result a full BIN had to be read in from the log)."
        +
        longgetNFullINFlush() +
        The accumulated number of full INs flushed to the log.
        +
        longgetNINCompactKeyIN() +
        "Number of INs that use a compact key representation to minimize the key object representation overhead."
        +
        longgetNINNoTarget() +
        "Number of INs that use a compact representation when none of its child nodes are in the main cache."
        +
        longgetNINsCleaned() +
        "Accumulated number of INs cleaned."
        +
        longgetNINsDead() +
        "Accumulated number of INs that were not found in the tree anymore (deleted)."
        +
        longgetNINsMigrated() +
        "Accumulated number of INs migrated."
        +
        longgetNINsObsolete() +
        "Accumulated number of INs obsolete."
        +
        longgetNINSparseTarget() +
        "Number of INs that use a compact sparse array representation to point to child nodes in the main cache."
        +
        longgetNLNQueueHits() +
        "Accumulated number of LNs processed without a tree lookup."
        +
        longgetNLNsCleaned() +
        "Accumulated number of LNs cleaned."
        +
        longgetNLNsDead() +
        "Accumulated number of LNs that were not found in the tree anymore (deleted)."
        +
        longgetNLNsEvicted() +
        "Number of LNs evicted as a result of LRU-based eviction (but not CacheMode.EVICT_LN)."
        +
        longgetNLNsExpired() +
        "Accumulated number of obsolete LNs that were expired."
        +
        longgetNLNsFetch() +
        "Number of LNs (data records) requested by btree operations."
        +
        longgetNLNsFetchMiss() +
        "Number of LNs (data records) requested by btree operations that were not in main cache."
        +
        longgetNLNsLocked() +
        "Accumulated number of LNs encountered that were locked."
        +
        longgetNLNsMarked() +
        "Accumulated number of LNs in temporary DBs that were dirtied by the cleaner and subsequently logging during checkpoint/eviction."
        +
        longgetNLNsMigrated() +
        "Accumulated number of LNs that were migrated forward in the log by the cleaner."
        +
        longgetNLNsObsolete() +
        "Accumulated number of LNs obsolete."
        +
        intgetNLogBuffers() +
        The number of log buffers currently instantiated.
        +
        longgetNLogFSyncs() +
        The total number of fsyncs of the JE log.
        +
        longgetNMarkedLNsProcessed() +
        "Accumulated number of LNs processed because they were previously marked for migration."
        +
        longgetNNodesEvicted() +
        "Number of target nodes (INs) evicted from the main cache."
        +
        longgetNNodesExplicitlyEvicted() +
        Deprecated.  +
        Use getNNodesEvicted() instead.
        +
        +
        longgetNNodesMovedToDirtyLRU() +
        "Number of nodes (INs) moved from the mixed/priority-1 to the dirty/priority-2 LRU list."
        +
        longgetNNodesMutated() +
        "Number of target BINs mutated to BIN-deltas."
        +
        longgetNNodesPutBack() +
        "Number of target nodes (INs) moved to the cold end of the LRU list without any action taken on them."
        +
        longgetNNodesScanned() +
        Deprecated.  +
        This statistic has no meaning after the implementation + of the new evictor in JE 6.0. The method returns 0 always.
        +
        +
        longgetNNodesSelected() +
        Deprecated.  +
        use getNNodesTargeted() instead.
        +
        +
        longgetNNodesSkipped() +
        "Number of nodes (INs) that did not require any action."
        +
        longgetNNodesStripped() +
        "Number of target BINs whose child LNs were evicted (stripped)."
        +
        longgetNNodesTargeted() +
        "Number of nodes (INs) selected as eviction targets."
        +
        longgetNNotResident() +
        The number of requests for database objects not contained within the + in memory data structures.
        +
        longgetNonEmptyBins() +
        The number of BINs encountered by the INCompressor that were not + actually empty when the compressor ran.
        +
        intgetNOpenFiles() +
        The number of files currently open in the file cache.
        +
        intgetNOwners() +
        Total lock owners in lock table.
        +
        longgetNPendingLNsLocked() +
        "Accumulated number of pending LNs that could not be locked for migration because of a long duration application lock."
        +
        longgetNPendingLNsProcessed() +
        "Accumulated number of LNs processed because they were previously locked."
        +
        longgetNRandomReadBytes() +
        The number of bytes read which required repositioning the disk head + more than 1MB from the previous file position.
        +
        longgetNRandomReads() +
        The number of disk reads which required repositioning the disk head + more than 1MB from the previous file position.
        +
        longgetNRandomWriteBytes() +
        The number of bytes written which required repositioning the disk head + more than 1MB from the previous file position.
        +
        longgetNRandomWrites() +
        The number of disk writes which required repositioning the disk head by + more than 1MB from the previous file position.
        +
        intgetNReadLocks() +
        Total read locks currently held.
        +
        longgetNReadsFromWriteQueue() +
        The number of file read operations which were fulfilled by reading out + of the pending write queue.
        +
        intgetNReleases() +
        Number of releases of the lock table latch.
        +
        longgetNRepeatFaultReads() +
        The number of reads which had to be repeated when faulting in an object + from disk because the read chunk size controlled by je.log.faultReadSize + is too small.
        +
        longgetNRepeatIteratorReads() +
        "Number of attempts to read a log entry larger than the read buffer size during which the log buffer couldn\'t be grown enough to accommodate the object."
        +
        longgetNRequests() +
        Total number of lock requests to date.
        +
        longgetNRootNodesEvicted() +
        "Number of database root nodes (INs) evicted."
        +
        longgetNSequentialReadBytes() +
        The number of bytes read which did not require repositioning the disk + head more than 1MB from the previous file position.
        +
        longgetNSequentialReads() +
        The number of disk reads which did not require repositioning the disk + head more than 1MB from the previous file position.
        +
        longgetNSequentialWriteBytes() +
        The number of bytes written which did not require repositioning the + disk head more than 1MB from the previous file position.
        +
        longgetNSequentialWrites() +
        The number of disk writes which did not require repositioning the disk + head by more than 1MB from the previous file position.
        +
        intgetNSharedCacheEnvironments() +
        "Number of Environments sharing the main cache."
        +
        longgetNTempBufferWrites() +
        The number of writes which had to be completed using the temporary + marshalling buffer because the fixed size log buffers specified by + je.log.totalBufferBytes and je.log.numBuffers were not large enough.
        +
        longgetNThreadUnavailable() +
        "Number of eviction tasks that were submitted to the background evictor pool, but were refused because all eviction threads were busy."
        +
        longgetNToBeCleanedLNsProcessed() +
        "Accumulated number of LNs processed because they are soon to be cleaned."
        +
        intgetNTotalLocks() +
        Total locks currently in lock table.
        +
        longgetNUpperINsEvictedCacheMode() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNUpperINsEvictedCritical() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNUpperINsEvictedDaemon() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNUpperINsEvictedEvictorThread() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNUpperINsEvictedManual() +
        Deprecated.  +
        This statistic has been removed. The method returns 0 + always.
        +
        +
        longgetNUpperINsFetch() +
        "Number of Upper INs (non-bottom internal nodes) requested by btree operations."
        +
        longgetNUpperINsFetchMiss() +
        "Number of Upper INs (non-bottom internal nodes) requested by btree operations that were not in main cache."
        +
        intgetNWaiters() +
        Total transactions waiting for locks.
        +
        longgetNWaits() +
        Total number of lock waits to date.
        +
        intgetNWriteLocks() +
        Total write locks currently held.
        +
        longgetNWriteQueueOverflow() +
        The number of writes operations which would overflow the Write Queue.
        +
        longgetNWriteQueueOverflowFailures() +
        The number of writes operations which would overflow the Write Queue + and could not be queued.
        +
        longgetNWritesFromWriteQueue() +
        The number of file writes operations executed from the pending write + queue.
        +
        longgetOffHeapAllocFailures() +
        "Number of off-heap allocation failures due to lack of system memory."
        +
        longgetOffHeapAllocOverflows() +
        "Number of off-heap allocation attempts that exceeded the cache size."
        +
        longgetOffHeapBINsLoaded() +
        "Number of BINs loaded from the off-heap cache."
        +
        longgetOffHeapBINsStored() +
        "Number of BINs stored into the off-heap cache."
        +
        intgetOffHeapCachedBINDeltas() +
        "Number of BIN-deltas residing in the off-heap cache."
        +
        intgetOffHeapCachedBINs() +
        "Number of BINs (full BINs and BIN-deltas) residing in the off-heap cache."
        +
        intgetOffHeapCachedLNs() +
        "Number of LNs residing in the off-heap cache."
        +
        longgetOffHeapCriticalNodesTargeted() +
        "Number of nodes targeted in \'critical eviction\' mode."
        +
        longgetOffHeapDirtyNodesEvicted() +
        "Number of target BINs evicted from the off-heap cache that were dirty and therefore were logged."
        +
        longgetOffHeapLNsEvicted() +
        "Number of LNs evicted from the off-heap cache as a result of BIN stripping."
        +
        longgetOffHeapLNsLoaded() +
        "Number of LNs loaded from the off-heap cache."
        +
        longgetOffHeapLNsStored() +
        "Number of LNs stored into the off-heap cache."
        +
        longgetOffHeapLRUSize() +
        "Number of LRU entries used for the off-heap cache."
        +
        longgetOffHeapNodesEvicted() +
        "Number of target BINs (including BIN-deltas) evicted from the off-heap cache."
        +
        longgetOffHeapNodesMutated() +
        "Number of off-heap target BINs mutated to BIN-deltas."
        +
        longgetOffHeapNodesSkipped() +
        "Number of off-heap target BINs on which no action was taken."
        +
        longgetOffHeapNodesStripped() +
        "Number of target BINs whose off-heap child LNs were evicted (stripped)."
        +
        longgetOffHeapNodesTargeted() +
        "Number of BINs selected as off-heap eviction targets."
        +
        longgetOffHeapThreadUnavailable() +
        "Number of eviction tasks that were submitted to the background off-heap evictor pool, but were refused because all eviction threads were busy."
        +
        longgetOffHeapTotalBlocks() +
        "Total number of memory blocks in off-heap cache."
        +
        longgetOffHeapTotalBytes() +
        "Total number of estimated bytes in off-heap cache."
        +
        intgetPendingLNQueueSize() +
        "Number of LNs pending because they were locked and could not be migrated."
        +
        longgetPriDeleteFailOps() +
        Number of failed primary DB deletion operations.
        +
        longgetPriDeleteOps() +
        Number of successful primary DB deletion operations.
        +
        longgetPriInsertFailOps() +
        Number of failed primary DB insertion operations.
        +
        longgetPriInsertOps() +
        Number of successful primary DB insertion operations.
        +
        longgetPriPositionOps() +
        Number of successful primary DB position operations.
        +
        longgetPriSearchFailOps() +
        Number of failed primary DB key search operations.
        +
        longgetPriSearchOps() +
        Number of successful primary DB key search operations.
        +
        longgetPriUpdateOps() +
        Number of successful primary DB update operations.
        +
        longgetProcessedBins() +
        The number of BINs that were successfully processed by the IN + Compressor.
        +
        longgetProtectedLogSize() +
        "Bytes used by all protected data files: the subset of reserved files that are temporarily protected and cannot be deleted."
        +
        java.util.SortedMap<java.lang.String,java.lang.Long>getProtectedLogSizeMap() +
        "A breakdown of protectedLogSize as a map of protecting entity name to protected size in bytes."
        +
        longgetRelatchesRequired() +
        Returns the number of latch upgrades (relatches) required while + operating on this Environment.
        +
        longgetRequiredEvictBytes() +
        Deprecated.  +
        The method returns 0 always.
        +
        +
        longgetReservedLogSize() +
        "Bytes used by all reserved data files: files that have beencleaned and can be deleted if they are not protected."
        +
        longgetSecDeleteOps() +
        Number of successful secondary DB deletion operations.
        +
        longgetSecInsertOps() +
        Number of successful secondary DB insertion operations.
        +
        longgetSecPositionOps() +
        Number of successful secondary DB position operations.
        +
        longgetSecSearchFailOps() +
        Number of failed secondary DB key search operations.
        +
        longgetSecSearchOps() +
        Number of successful secondary DB key search operations.
        +
        longgetSecUpdateOps() +
        Number of successful secondary DB update operations.
        +
        longgetSharedCacheTotalBytes() +
        "Total amount of the shared JE main cache in use, in bytes."
        +
        longgetSplitBins() +
        The number of BINs encountered by the INCompressor that were split + between the time they were put on the compressor queue and when the + compressor ran.
        +
        longgetTotalLogSize() +
        "Total bytes used by data files on disk: activeLogSize + reservedLogSize."
        +
        java.lang.StringtoString() +
        Returns a String representation of the stats in the form of + <stat>=<value>
        +
        java.lang.StringtoStringVerbose() +
        Returns a String representation of the stats which includes stats + descriptions in addition to <stat>=<value>
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getCursorsBins

          +
          public long getCursorsBins()
          +
          The number of BINs encountered by the INCompressor that had cursors + referring to them when the compressor ran.
          +
        • +
        + + + +
          +
        • +

          getEnvironmentCreationTime

          +
          public long getEnvironmentCreationTime()
          +
          The time the Environment was created.
          +
        • +
        + + + +
          +
        • +

          getDbClosedBins

          +
          public long getDbClosedBins()
          +
          The number of BINs encountered by the INCompressor that had their + database closed between the time they were put on the compressor queue + and when the compressor ran.
          +
        • +
        + + + +
          +
        • +

          getInCompQueueSize

          +
          public long getInCompQueueSize()
          +
          The number of entries in the INCompressor queue when the getStats() + call was made.
          +
        • +
        + + + +
          +
        • +

          getNonEmptyBins

          +
          public long getNonEmptyBins()
          +
          The number of BINs encountered by the INCompressor that were not + actually empty when the compressor ran.
          +
        • +
        + + + +
          +
        • +

          getProcessedBins

          +
          public long getProcessedBins()
          +
          The number of BINs that were successfully processed by the IN + Compressor.
          +
        • +
        + + + +
          +
        • +

          getSplitBins

          +
          public long getSplitBins()
          +
          The number of BINs encountered by the INCompressor that were split + between the time they were put on the compressor queue and when the + compressor ran.
          +
        • +
        + + + +
          +
        • +

          getLastCheckpointId

          +
          public long getLastCheckpointId()
          +
          The Id of the last checkpoint.
          +
        • +
        + + + +
          +
        • +

          getNCheckpoints

          +
          public long getNCheckpoints()
          +
          The total number of checkpoints run so far.
          +
        • +
        + + + +
          +
        • +

          getNFullINFlush

          +
          public long getNFullINFlush()
          +
          The accumulated number of full INs flushed to the log.
          +
        • +
        + + + +
          +
        • +

          getNFullBINFlush

          +
          public long getNFullBINFlush()
          +
          The accumulated number of full BINs flushed to the log.
          +
        • +
        + + + +
          +
        • +

          getNDeltaINFlush

          +
          public long getNDeltaINFlush()
          +
          The accumulated number of Delta INs flushed to the log.
          +
        • +
        + + + +
          +
        • +

          getLastCheckpointInterval

          +
          public long getLastCheckpointInterval()
          +
          Byte length from last checkpoint start to the previous checkpoint start.
          +
        • +
        + + + +
          +
        • +

          getLastCheckpointStart

          +
          public long getLastCheckpointStart()
          +
          The location in the log of the last checkpoint start.
          +
        • +
        + + + +
          +
        • +

          getLastCheckpointEnd

          +
          public long getLastCheckpointEnd()
          +
          The location in the log of the last checkpoint end.
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          getCurrentMinUtilization

          +
          public int getCurrentMinUtilization()
          +

          "The current minimum (lower bound) log utilization as a percentage."

          + +

          Group: "Cleaning" +
          Name: "minUtilization"

          + + The last known log minimum utilization as a percentage. This statistic + provides a cheap way of checking the log utilization without having to + run the DbSpace utility. +

          + The log utilization is the percentage of the total log size (all .jdb + files) that is utilized or active. The remaining portion of the log + is obsolete. The log cleaner is responsible for keeping the log + utilization below the configured threshold, + EnvironmentConfig.CLEANER_MIN_UTILIZATION. +

          + This statistic is computed every time the log cleaner examines the + utilization of the log, in order to determine whether cleaning is + needed. The frequency can be configured using + EnvironmentConfig.CLEANER_BYTES_INTERVAL. +

          + The obsolete portion of the log includes data that has expired at the + time the statistic was last computed. An expiration histogram is stored + for each file and used to compute the expired size. The minimum and + maximum utilization are the lower and upper bounds of computed + utilization, which may be different when some data has expired. See + getNCleanerTwoPassRuns() for more information. +

          + Note that the size of the utilized data in the log is always greater + than the amount of user data (total size of keys and data). The active + Btree internal nodes and other metadata are also included. +

          +
          +
          Returns:
          +
          the current minimum utilization, or -1 if the utilization has + not been calculated for this environment since it was last opened.
          +
          Since:
          +
          6.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getCurrentMaxUtilization

          +
          public int getCurrentMaxUtilization()
          +

          "The current maximum (upper bound) log utilization as a percentage."

          + +

          Group: "Cleaning" +
          Name: "maxUtilization"

          + + The last known log maximum utilization as a percentage. This statistic + provides a cheap way of checking the log utilization without having to + run the DbSpace utility. +

          + The log utilization is the percentage of the total log size (all .jdb + files) that is utilized or active. The remaining portion of the log + is obsolete. The log cleaner is responsible for keeping the log + utilization below the configured threshold, + EnvironmentConfig.CLEANER_MIN_UTILIZATION. +

          + This statistic is computed every time the log cleaner examines the + utilization of the log, in order to determine whether cleaning is + needed. The frequency can be configured using + EnvironmentConfig.CLEANER_BYTES_INTERVAL. +

          + The obsolete portion of the log includes data that has expired at the + time the statistic was last computed. An expiration histogram is stored + for each file and used to compute the expired size. The minimum and + maximum utilization are the lower and upper bounds of computed + utilization, which may be different when some data has expired. See + getNCleanerTwoPassRuns() for more information. +

          + Note that the size of the utilized data in the log is always greater + than the amount of user data (total size of keys and data). The active + Btree internal nodes and other metadata are also included. +

          +
          +
          Returns:
          +
          the current maximum utilization, or -1 if the utilization has + not been calculated for this environment since it was last opened.
          +
          Since:
          +
          6.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getLNSizeCorrectionFactor

          +
          public float getLNSizeCorrectionFactor()
          +
          Deprecated. in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
          +
        • +
        + + + +
          +
        • +

          getCorrectedAvgLNSize

          +
          public float getCorrectedAvgLNSize()
          +
          Deprecated. in JE 5.0.56, use getCorrectedAvgLNSize() instead.
          +
        • +
        + + + +
          +
        • +

          getEstimatedAvgLNSize

          +
          public float getEstimatedAvgLNSize()
          +
          Deprecated. in JE 5.0.56, use getCorrectedAvgLNSize() instead.
          +
        • +
        + + + +
          +
        • +

          getNCleanerRuns

          +
          public long getNCleanerRuns()
          +

          "Number of cleaner runs, including two-pass runs."

          + +

          Group: "Cleaning" +
          Name: "nCleanerRuns"

          + + Total number of cleaner runs, including two-pass runs but not including revisal runs. The minimum and + maximum utilization values are used + to drive cleaning.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCleanerTwoPassRuns

          +
          public long getNCleanerTwoPassRuns()
          +

          "Number of cleaner two-pass runs."

          + +

          Group: "Cleaning" +
          Name: "nTwoPassRuns"

          + + Number of cleaner two-pass runs, which are a subset of the + total cleaner runs. The minimum and maximum utilization values are used to + drive cleaning. +

          + The obsolete portion of the log includes data that has expired. An + expiration histogram is stored for each file and used to compute the + expired size. The minimum and maximum utilization are the lower and + upper bounds of computed utilization. They are different only when the + TTL feature is used, and some data in the file has expired while other + data has become obsolete for other reasons, such as record updates, + record deletions or checkpoints. In this case the strictly obsolete size + and the expired size may overlap because they are maintained separately. +

          + If they overlap completely then the minimum utilization is correct, + while if there is no overlap then the maximum utilization is correct. + Both utilization values trigger cleaning, but when there is significant + overlap, the cleaner will perform two-pass cleaning. +

          + In the first pass of two-pass cleaning, the file is read to recompute + obsolete and expired sizes, but the file is not cleaned. As a result of + recomputing the expired sizes, the strictly obsolete and expired sizes + will no longer overlap, and the minimum and maximum utilization will be + equal. If the file should still be cleaned, based on the recomputed + utilization, it is cleaned as usual, and in this case the number of + two-pass runs (this statistic) is incremented. +

          + If the file should not be cleaned because its recomputed utilization is + higher than expected, the file will not be cleaned. Instead, its + recomputed expiration histogram, which has size information that now + does not overlap with the strictly obsolete data, is stored for future + use. By storing the revised histogram, the cleaner can select the most + appropriate files for cleaning in the future. In this case the number of + revisal runs is incremented, and the + number of total runs is not incremented.

          +
          +
          Since:
          +
          6.5.0
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCleanerRevisalRuns

          +
          public long getNCleanerRevisalRuns()
          +

          "Number of cleaner runs that ended in revising expiration info, but not in any cleaning."

          + +

          Group: "Cleaning" +
          Name: "nRevisalRuns"

          + + Number of cleaner runs that ended in revising expiration info, but not + in any cleaning.
          +
          +
          Since:
          +
          6.5.0
          +
          See Also:
          +
          getNCleanerTwoPassRuns(), +Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCleanerProbeRuns

          +
          public long getNCleanerProbeRuns()
          +
          Deprecated. in JE 6.3, always returns zero.
          +
        • +
        + + + +
          +
        • +

          getNCleanerDeletions

          +
          public long getNCleanerDeletions()
          +

          "Number of cleaner file deletions."

          + +

          Group: "Cleaning" +
          Name: "nCleanerDeletions"

          + + The number of cleaner file deletions this session.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getPendingLNQueueSize

          +
          public int getPendingLNQueueSize()
          +

          "Number of LNs pending because they were locked and could not be migrated."

          + +

          Group: "Cleaning" +
          Name: "pendingLNQueueSize"

          + + The number of LNs pending because they were locked and could not be + migrated.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCleanerDiskRead

          +
          public long getNCleanerDiskRead()
          +

          "Number of disk reads by the cleaner."

          + +

          Group: "Cleaning" +
          Name: "nCleanerDisksReads"

          + + The number of disk reads performed by the cleaner.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCleanerEntriesRead

          +
          public long getNCleanerEntriesRead()
          +

          "Accumulated number of log entries read by the cleaner."

          + +

          Group: "Cleaning" +
          Name: "nCleanerEntriesRead"

          + + The accumulated number of log entries read by the cleaner.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNINsObsolete

          +
          public long getNINsObsolete()
          +

          "Accumulated number of INs obsolete."

          + +

          Group: "Cleaning" +
          Name: "nINsObsolete"

          + + The accumulated number of INs obsolete.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNINsCleaned

          +
          public long getNINsCleaned()
          +

          "Accumulated number of INs cleaned."

          + +

          Group: "Cleaning" +
          Name: "nINsCleaned"

          + + The accumulated number of INs cleaned.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNINsDead

          +
          public long getNINsDead()
          +

          "Accumulated number of INs that were not found in the tree anymore (deleted)."

          + +

          Group: "Cleaning" +
          Name: "nINsDead"

          + + The accumulated number of INs that were not found in the tree anymore + (deleted).
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNINsMigrated

          +
          public long getNINsMigrated()
          +

          "Accumulated number of INs migrated."

          + +

          Group: "Cleaning" +
          Name: "nINsMigrated"

          + + The accumulated number of INs migrated.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltasObsolete

          +
          public long getNBINDeltasObsolete()
          +

          "Accumulated number of BIN-deltas obsolete."

          + +

          Group: "Cleaning" +
          Name: "nBINDeltasObsolete"

          + + The accumulated number of BIN-deltas obsolete.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltasCleaned

          +
          public long getNBINDeltasCleaned()
          +

          "Accumulated number of BIN-deltas cleaned."

          + +

          Group: "Cleaning" +
          Name: "nBINDeltasCleaned"

          + + The accumulated number of BIN-deltas cleaned.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltasDead

          +
          public long getNBINDeltasDead()
          +

          "Accumulated number of BIN-deltas that were not found in the tree anymore (deleted)."

          + +

          Group: "Cleaning" +
          Name: "nBINDeltasDead"

          + + The accumulated number of BIN-deltas that were not found in the tree + anymore (deleted).
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltasMigrated

          +
          public long getNBINDeltasMigrated()
          +

          "Accumulated number of BIN-deltas migrated."

          + +

          Group: "Cleaning" +
          Name: "nBINDeltasMigrated"

          + + The accumulated number of BIN-deltas migrated.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsObsolete

          +
          public long getNLNsObsolete()
          +

          "Accumulated number of LNs obsolete."

          + +

          Group: "Cleaning" +
          Name: "nLNsObsolete"

          + + The accumulated number of LNs obsolete.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsExpired

          +
          public long getNLNsExpired()
          +

          "Accumulated number of obsolete LNs that were expired."

          + +

          Group: "Cleaning" +
          Name: "nLNsExpired"

          + + The accumulated number of obsolete LNs that were expired. Note that + this does not included embedded LNs (those having a data size less than + EnvironmentConfig.TREE_MAX_EMBEDDED_LN), because embedded LNs + are always considered obsolete.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsCleaned

          +
          public long getNLNsCleaned()
          +

          "Accumulated number of LNs cleaned."

          + +

          Group: "Cleaning" +
          Name: "nLNsCleaned"

          + + The accumulated number of LNs cleaned.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsDead

          +
          public long getNLNsDead()
          +

          "Accumulated number of LNs that were not found in the tree anymore (deleted)."

          + +

          Group: "Cleaning" +
          Name: "nLNsDead"

          + + The accumulated number of LNs that were not found in the tree anymore + (deleted).
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsLocked

          +
          public long getNLNsLocked()
          +

          "Accumulated number of LNs encountered that were locked."

          + +

          Group: "Cleaning" +
          Name: "nLNsLocked"

          + + The accumulated number of LNs encountered that were locked.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsMigrated

          +
          public long getNLNsMigrated()
          +

          "Accumulated number of LNs that were migrated forward in the log by the cleaner."

          + +

          Group: "Cleaning" +
          Name: "nLNsMigrated"

          + + The accumulated number of LNs encountered that were migrated forward in + the log by the cleaner.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsMarked

          +
          public long getNLNsMarked()
          +

          "Accumulated number of LNs in temporary DBs that were dirtied by the cleaner and subsequently logging during checkpoint/eviction."

          + +

          Group: "Cleaning" +
          Name: "nLNsMarked"

          + + The accumulated number of LNs in temporary DBs that were dirtied by the + cleaner and subsequently logging during checkpoint/eviction.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNLNQueueHits

          +
          public long getNLNQueueHits()
          +

          "Accumulated number of LNs processed without a tree lookup."

          + +

          Group: "Cleaning" +
          Name: "nLNQueueHits"

          + + The accumulated number of LNs processed without a tree lookup.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNPendingLNsProcessed

          +
          public long getNPendingLNsProcessed()
          +

          "Accumulated number of LNs processed because they were previously locked."

          + +

          Group: "Cleaning" +
          Name: "nPendingLNsProcessed"

          + + The accumulated number of LNs processed because they were previously + locked.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNMarkedLNsProcessed

          +
          public long getNMarkedLNsProcessed()
          +

          "Accumulated number of LNs processed because they were previously marked for migration."

          + +

          Group: "Cleaning" +
          Name: "nMarkLNsProcessed"

          + + The accumulated number of LNs processed because they were previously + marked for migration.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNToBeCleanedLNsProcessed

          +
          public long getNToBeCleanedLNsProcessed()
          +

          "Accumulated number of LNs processed because they are soon to be cleaned."

          + +

          Group: "Cleaning" +
          Name: "nToBeCleanedLNsProcessed"

          + + The accumulated number of LNs processed because they are soon to be + cleaned.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNClusterLNsProcessed

          +
          public long getNClusterLNsProcessed()
          +

          "Accumulated number of LNs processed because they qualify for clustering."

          + +

          Group: "Cleaning" +
          Name: "nClusterLNsProcessed"

          + + The accumulated number of LNs processed because they qualify for + clustering.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNPendingLNsLocked

          +
          public long getNPendingLNsLocked()
          +

          "Accumulated number of pending LNs that could not be locked for migration because of a long duration application lock."

          + +

          Group: "Cleaning" +
          Name: "nPendingLNsLocked"

          + + The accumulated number of pending LNs that could not be locked for + migration because of a long duration application lock.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNRepeatIteratorReads

          +
          public long getNRepeatIteratorReads()
          +

          "Number of attempts to read a log entry larger than the read buffer size during which the log buffer couldn\'t be grown enough to accommodate the object."

          + +

          Group: "Cleaning" +
          Name: "nRepeatIteratorReads"

          + + The number of times we tried to read a log entry larger than the read + buffer size and couldn't grow the log buffer to accommodate the large + object. This happens during scans of the log during activities like + environment open or log cleaning. Implies that the read chunk size + controlled by je.log.iteratorReadSize is too small.
          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getActiveLogSize

          +
          public long getActiveLogSize()
          +

          "Bytes used by all active data files: files required for basic JE operation."

          + +

          Group: "Cleaning" +
          Name: "activeLogSize"

          + +

          The log utilization is the + percentage of activeLogSize that is currently referenced or active.

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getReservedLogSize

          +
          public long getReservedLogSize()
          +

          "Bytes used by all reserved data files: files that have beencleaned and can be deleted if they are not protected."

          + +

          Group: "Cleaning" +
          Name: "reservedLogSize"

          + +

          Deletion of reserved files may be postponed for several reasons. + This occurs if an active file is protected (by a backup, for example), + and then the file is cleaned and becomes a reserved file. See + getProtectedLogSizeMap() for more information. In a + standalone JE environment, reserved files are normally deleted very + soon after being cleaned.

          + +

          In an HA environment, reserved files are retained because they might + be used for replication to electable nodes that have been offline + for the ReplicationConfig.FEEDER_TIMEOUT + interval or longer, or to offline secondary nodes. The replication + stream position of these nodes is unknown, so whether these files could + be used to avoid a network restore, when bringing these nodes online, + is also unknown. The files are retained just in case they can be used + for such replication. Files are reserved for replication on both master + and replicas, since a replica may become a master at a future time. + Such files will be deleted (oldest file first) to make room for a + write operation, if the write operation would have caused a disk limit + to be violated.

          + +

          In NoSQL DB, this retention of reserved files has the additional + benefit of supplying the replication stream to subscribers of the + Stream API, when such subscribers need to replay the stream from an + earlier point in time.

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getProtectedLogSize

          +
          public long getProtectedLogSize()
          +

          "Bytes used by all protected data files: the subset of reserved files that are temporarily protected and cannot be deleted."

          + +

          Group: "Cleaning" +
          Name: "protectedLogSize"

          + +

          Reserved files are protected for reasons described by getProtectedLogSizeMap().

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getProtectedLogSizeMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getProtectedLogSizeMap()
          +

          "A breakdown of protectedLogSize as a map of protecting entity name to protected size in bytes."

          + +

          Group: "Cleaning" +
          Name: "protectedLogSizeMap"

          + +

          Reserved data files are temporarily + protected for a number of reasons. The + keys in the protected log size map are the names of the protecting + entities, and the values are the number of bytes protected by each + entity. The type and format of the entity names are as follows:

          + +
          +    Backup-N
          +    DatabaseCount-N
          +    DiskOrderedCursor-N
          +    Syncup-N
          +    Feeder-N
          +    NetworkRestore-N
          + 
          + +

          Where:

          +
            +
          • + Backup-N represents a DbBackup in progress, + i.e., for which DbBackup.startBackup() has been called + and DbBackup.endBackup() has not yet been called. All + active files are initially protected by the backup, but these + are not reserved files ond only appear in the map if they are + cleaned and become reserved after the backup starts. Files + are not protected if they have been copied and + DbBackup.removeFileProtection(String) has been called. + N is a sequentially assigned integer. +
          • +
          • + DatabaseCount-N represents an outstanding call to + Database.count(). + All active files are initially protected by this method, but + these are not reserved files ond only appear in the map if + they are cleaned and become reserved during the execution of + Database.count. + N is a sequentially assigned integer. +
          • +
          • + DiskOrderedCursor-N represents a + DiskOrderedCursor that has not yet been closed by + DiskOrderedCursor.close(). + All active files are initially protected when the cursor is + opened, but these are not reserved files ond only appear in + the map if they are cleaned and become reserved while the + cursor is open. + N is a sequentially assigned integer. +
          • +
          • + Syncup-N represents an in-progress negotiation between + a master and replica node in an HA replication group to + establish a replication stream. This is a normally a very short + negotiation and occurs when a replica joins the group or after + an election is held. During syncup, all reserved files are + protected. + N is the node name of the other node involved in the + syncup, i.e, if this node is a master then it is the name of + the replica, and vice versa. +
          • +
          • + Feeder-N represents an HA master node that is supplying + the replication stream to a replica. Normally data in active + files is being supplied and this data is not in the reserved + or protected categories. But if the replica is lagging, data + from reserved files may be supplied, and in that case will be + protected and appear in the map. + N is the node name of the replica receiving the + replication stream. +
          • +
          • + NetworkRestore-N represents an HA replica or master + node that is supplying files to a node that is performing a + NetworkRestore. The files supplied + are all active files plus the two most recently written + reserved files. The two reserved files will appear in the map, + as well as any of the active files that were cleaned and became + reserved during the network restore. Files that have already + been copied by the network restore are not protected. + N is the name of the node performing the + NetworkRestore. +
          • +
          + +

          When more than one entity is included in the map, in general the + largest value points to the entity primarily responsible for + preventing reclamation of disk space. Note that the values normally + sum to more than getProtectedLogSize(), since protection often + overlaps.

          + +

          The string format of this stat consists of name=size pairs + separated by semicolons, where name is the entity name described + above and size is the number of protected bytes.

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getAvailableLogSize

          +
          public long getAvailableLogSize()
          +

          "Bytes available for write operations when unprotected reserved files are deleted: free space + reservedLogSize - protectedLogSize."

          + +

          Group: "Cleaning" +
          Name: "availableLogSize"

          + +

          This is the amount that can be logged by write operations, and + other JE activity such as checkpointing, without violating a disk + limit. The files making up reservedLogSize can be deleted to + make room for these write operations, so availableLogSize is + the sum of the current disk free space and the reserved size that is not + protected (reservedLogSize - protectedLogSize). The + current disk free space is calculated using the disk volume's free + space, EnvironmentConfig.MAX_DISK and EnvironmentConfig.FREE_DISK.

          + +

          Note that when a record is written, the number of bytes includes JE + overheads for the record. Also, this causes Btree metadata to be + written during checkpoints, and other metadata is also written by JE. + So the space occupied on disk by a given set of records cannot be + calculated by simply summing the key/data sizes.

          + +

          Also note that availableLogSize will be negative when a disk + limit has been violated, representing the amount that needs to be freed + before write operations are allowed.

          +
          +
          Since:
          +
          7.5
          +
          See Also:
          +
          Cleaner Statistics, +EnvironmentConfig.MAX_DISK, +EnvironmentConfig.FREE_DISK
          +
          +
        • +
        + + + +
          +
        • +

          getTotalLogSize

          +
          public long getTotalLogSize()
          +

          "Total bytes used by data files on disk: activeLogSize + reservedLogSize."

          + +

          Group: "Cleaning" +
          Name: "totalLogSize"

          +
          +
          See Also:
          +
          Cleaner Statistics
          +
          +
        • +
        + + + +
          +
        • +

          getNCacheMiss

          +
          public long getNCacheMiss()
          +
          The total number of requests for database objects which were not in + memory.
          +
        • +
        + + + +
          +
        • +

          getEndOfLog

          +
          public long getEndOfLog()
          +
          The location of the next entry to be written to the log. + +

          Note that the log entries prior to this position may not yet have + been flushed to disk. Flushing can be forced using a Sync or + WriteNoSync commit, or a checkpoint.

          +
        • +
        + + + +
          +
        • +

          getNFSyncs

          +
          public long getNFSyncs()
          +
          The number of fsyncs issued through the group commit manager. A subset + of nLogFsyncs.
          +
        • +
        + + + +
          +
        • +

          getNFSyncRequests

          +
          public long getNFSyncRequests()
          +
          The number of fsyncs requested through the group commit manager.
          +
        • +
        + + + +
          +
        • +

          getNFSyncTimeouts

          +
          public long getNFSyncTimeouts()
          +
          The number of fsync requests submitted to the group commit manager which + timed out.
          +
        • +
        + + + +
          +
        • +

          getFSyncTime

          +
          public long getFSyncTime()
          +
          The total number of milliseconds used to perform fsyncs.
          +
          +
          Since:
          +
          7.0, although the stat was output by toString() and + appeared in the je.stat.csv file in earlier versions.
          +
          +
        • +
        + + + +
          +
        • +

          getFSyncMaxTime

          +
          public long getFSyncMaxTime()
          +
          The maximum number of milliseconds used to perform a single fsync.
          +
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          getNLogFSyncs

          +
          public long getNLogFSyncs()
          +
          The total number of fsyncs of the JE log. This includes those fsyncs + issued on behalf of transaction commits.
          +
        • +
        + + + +
          +
        • +

          getNLogBuffers

          +
          public int getNLogBuffers()
          +
          The number of log buffers currently instantiated.
          +
        • +
        + + + +
          +
        • +

          getNRandomReads

          +
          public long getNRandomReads()
          +
          The number of disk reads which required repositioning the disk head + more than 1MB from the previous file position. Reads in a different + *.jdb log file then the last IO constitute a random read. +

          + This number is approximate and may differ from the actual number of + random disk reads depending on the type of disks and file system, disk + geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNRandomReadBytes

          +
          public long getNRandomReadBytes()
          +
          The number of bytes read which required repositioning the disk head + more than 1MB from the previous file position. Reads in a different + *.jdb log file then the last IO constitute a random read. +

          + This number is approximate vary depending on the type of disks and file + system, disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNRandomWrites

          +
          public long getNRandomWrites()
          +
          The number of disk writes which required repositioning the disk head by + more than 1MB from the previous file position. Writes to a different + *.jdb log file (i.e. a file "flip") then the last IO constitute a random + write. +

          + This number is approximate and may differ from the actual number of + random disk writes depending on the type of disks and file system, disk + geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNRandomWriteBytes

          +
          public long getNRandomWriteBytes()
          +
          The number of bytes written which required repositioning the disk head + more than 1MB from the previous file position. Writes in a different + *.jdb log file then the last IO constitute a random write. +

          + This number is approximate vary depending on the type of disks and file + system, disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNSequentialReads

          +
          public long getNSequentialReads()
          +
          The number of disk reads which did not require repositioning the disk + head more than 1MB from the previous file position. Reads in a + different *.jdb log file then the last IO constitute a random read. +

          + This number is approximate and may differ from the actual number of + sequential disk reads depending on the type of disks and file system, + disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNSequentialReadBytes

          +
          public long getNSequentialReadBytes()
          +
          The number of bytes read which did not require repositioning the disk + head more than 1MB from the previous file position. Reads in a + different *.jdb log file then the last IO constitute a random read. +

          + This number is approximate vary depending on the type of disks and file + system, disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNSequentialWrites

          +
          public long getNSequentialWrites()
          +
          The number of disk writes which did not require repositioning the disk + head by more than 1MB from the previous file position. Writes to a + different *.jdb log file (i.e. a file "flip") then the last IO + constitute a random write. +

          + This number is approximate and may differ from the actual number of + sequential disk writes depending on the type of disks and file system, + disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNSequentialWriteBytes

          +
          public long getNSequentialWriteBytes()
          +
          The number of bytes written which did not require repositioning the + disk head more than 1MB from the previous file position. Writes in a + different *.jdb log file then the last IO constitute a random write. +

          + This number is approximate vary depending on the type of disks and file + system, disk geometry, and file system cache size.

          +
        • +
        + + + +
          +
        • +

          getNBytesReadFromWriteQueue

          +
          public long getNBytesReadFromWriteQueue()
          +
          The number of bytes read to fulfill file read operations by reading out + of the pending write queue.
          +
        • +
        + + + +
          +
        • +

          getNBytesWrittenFromWriteQueue

          +
          public long getNBytesWrittenFromWriteQueue()
          +
          The number of bytes written from the pending write queue.
          +
        • +
        + + + +
          +
        • +

          getNReadsFromWriteQueue

          +
          public long getNReadsFromWriteQueue()
          +
          The number of file read operations which were fulfilled by reading out + of the pending write queue.
          +
        • +
        + + + +
          +
        • +

          getNWritesFromWriteQueue

          +
          public long getNWritesFromWriteQueue()
          +
          The number of file writes operations executed from the pending write + queue.
          +
        • +
        + + + +
          +
        • +

          getNWriteQueueOverflow

          +
          public long getNWriteQueueOverflow()
          +
          The number of writes operations which would overflow the Write Queue.
          +
        • +
        + + + +
          +
        • +

          getNWriteQueueOverflowFailures

          +
          public long getNWriteQueueOverflowFailures()
          +
          The number of writes operations which would overflow the Write Queue + and could not be queued.
          +
        • +
        + + + +
          +
        • +

          getBufferBytes

          +
          public long getBufferBytes()
          +
          The total memory currently consumed by log buffers, in bytes. If this + environment uses the shared cache, this method returns only the amount + used by this environment.
          +
        • +
        + + + +
          +
        • +

          getNNotResident

          +
          public long getNNotResident()
          +
          The number of requests for database objects not contained within the + in memory data structures.
          +
        • +
        + + + +
          +
        • +

          getNRepeatFaultReads

          +
          public long getNRepeatFaultReads()
          +
          The number of reads which had to be repeated when faulting in an object + from disk because the read chunk size controlled by je.log.faultReadSize + is too small.
          +
        • +
        + + + +
          +
        • +

          getNTempBufferWrites

          +
          public long getNTempBufferWrites()
          +
          The number of writes which had to be completed using the temporary + marshalling buffer because the fixed size log buffers specified by + je.log.totalBufferBytes and je.log.numBuffers were not large enough.
          +
        • +
        + + + +
          +
        • +

          getNFileOpens

          +
          public int getNFileOpens()
          +
          The number of times a log file has been opened.
          +
        • +
        + + + +
          +
        • +

          getNOpenFiles

          +
          public int getNOpenFiles()
          +
          The number of files currently open in the file cache.
          +
        • +
        + + + +
          +
        • +

          getRequiredEvictBytes

          +
          public long getRequiredEvictBytes()
          +
          Deprecated. The method returns 0 always.
          +
        • +
        + + + +
          +
        • +

          getNNodesScanned

          +
          public long getNNodesScanned()
          +
          Deprecated. This statistic has no meaning after the implementation + of the new evictor in JE 6.0. The method returns 0 always.
          +
        • +
        + + + +
          +
        • +

          getNEvictPasses

          +
          public long getNEvictPasses()
          +
          Deprecated. Use getNEvictionRuns() instead.
          +
        • +
        + + + +
          +
        • +

          getNNodesSelected

          +
          public long getNNodesSelected()
          +
          Deprecated. use getNNodesTargeted() instead.
          +
        • +
        + + + +
          +
        • +

          getNNodesExplicitlyEvicted

          +
          public long getNNodesExplicitlyEvicted()
          +
          Deprecated. Use getNNodesEvicted() instead.
          +
        • +
        + + + +
          +
        • +

          getNBINsStripped

          +
          public long getNBINsStripped()
          +
          Deprecated. Use getNNodesStripped() instead.
          +
        • +
        + + + +
          +
        • +

          getNBINsMutated

          +
          public long getNBINsMutated()
          +
          Deprecated. Use getNNodesMutated() instead.
          +
        • +
        + + + + + + + +
          +
        • +

          getNNodesTargeted

          +
          public long getNNodesTargeted()
          +

          "Number of nodes (INs) selected as eviction targets."

          + +

          Group: "Cache" +
          Name: "nNodesTargeted"

          + +

          An eviction target may actually be evicted, or skipped, or put back + to the LRU, potentially after partial eviction (stripping) or + BIN-delta mutation is done on it. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getNRootNodesEvicted

          +
          public long getNRootNodesEvicted()
          +

          "Number of database root nodes (INs) evicted."

          + +

          Group: "Cache" +
          Name: "nRootNodesEvicted"

          + +

          The root node of a Database is only evicted after all other nodes in + the Database, so this implies that the entire Database has fallen out of + cache and is probably closed. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNDirtyNodesEvicted

          +
          public long getNDirtyNodesEvicted()
          +

          "Number of dirty target nodes logged and evicted."

          + +

          Group: "Cache" +
          Name: "nDirtyNodesEvicted"

          + +

          When a dirty IN is evicted from main cache and no off-heap cache is + configured, the IN must be logged. When an off-heap cache is configured, + dirty INs can be moved from main cache to off-heap cache based on LRU, + but INs are only logged when they are evicted from off-heap cache. + Therefore, this stat is always zero when an off-heap cache is configured. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsEvicted

          +
          public long getNLNsEvicted()
          +

          "Number of LNs evicted as a result of LRU-based eviction (but not CacheMode.EVICT_LN)."

          + +

          Group: "Cache" +
          Name: "nLNsEvicted"

          + +

          When a BIN is considered for eviction based on LRU, if the BIN + contains resident LNs in main cache, it is stripped of the LNs rather + than being evicted. This stat reflects LNs evicted in this manner, but + not LNs evicted as a result of using CacheMode.EVICT_LN. Also + note that embedded LNs + are evicted immediately and are not reflected in this stat value. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNNodesStripped

          +
          public long getNNodesStripped()
          +

          "Number of target BINs whose child LNs were evicted (stripped)."

          + +

          Group: "Cache" +
          Name: "nNodesStripped"

          + +

          BINs are stripped in order to evict LNs. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNNodesMutated

          +
          public long getNNodesMutated()
          +

          "Number of target BINs mutated to BIN-deltas."

          + +

          Group: "Cache" +
          Name: "nNodesMutated"

          + +

          When a BIN is considered for eviction based on LRU, if the BIN + can be mutated to a BIN-delta, it is mutated rather than being evicted. + Note that when an off-heap cache is configured, this stat value will be + zero because BIN mutation will take place only in the off-heap cache; + see getOffHeapNodesMutated(). + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNNodesPutBack

          +
          public long getNNodesPutBack()
          +

          "Number of target nodes (INs) moved to the cold end of the LRU list without any action taken on them."

          + +

          Group: "Cache" +
          Name: "nNodesPutBack"

          + +

          Reasons for putting back a target IN are:

          +
            +
          • The IN was accessed by an operation while the evictor was + processing it.
          • +
          • To prevent the cache usage for Btree objects from falling below + EnvironmentConfig.TREE_MIN_MEMORY.
          • +
          + +

          See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNNodesMovedToDirtyLRU

          +
          public long getNNodesMovedToDirtyLRU()
          +

          "Number of nodes (INs) moved from the mixed/priority-1 to the dirty/priority-2 LRU list."

          + +

          Group: "Cache" +
          Name: "nNodesMovedToDirtyLRU"

          + +

          When an off-cache is not configured, dirty nodes are evicted last + from the main cache by moving them to a 2nd priority LRU list. When an + off-cache is configured, level-2 INs that reference off-heap BINs are + evicted last from the main cache, using the same approach. + See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNNodesSkipped

          +
          public long getNNodesSkipped()
          +

          "Number of nodes (INs) that did not require any action."

          + +

          Group: "Cache" +
          Name: "nNodesSkipped"

          + +

          Reasons for skipping a target IN are:

          +
            +
          • It has already been evicted by another thread.
          • +
          • It cannot be evicted because concurrent activity added resident + child nodes.
          • +
          • It cannot be evicted because it is dirty and the environment is + read-only.
          • +
          +

          See CacheMode for a description of eviction.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNThreadUnavailable

          +
          public long getNThreadUnavailable()
          +

          "Number of eviction tasks that were submitted to the background evictor pool, but were refused because all eviction threads were busy."

          + +

          Group: "Cache" +
          Name: "nThreadUnavailable"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNSharedCacheEnvironments

          +
          public int getNSharedCacheEnvironments()
          +

          "Number of Environments sharing the main cache."

          + +

          Group: "Cache" +
          Name: "nSharedCacheEnvironments"

          + +

          This method says nothing about whether this environment is using + the shared cache or not.

          +
        • +
        + + + +
          +
        • +

          getNLNsFetch

          +
          public long getNLNsFetch()
          +

          "Number of LNs (data records) requested by btree operations."

          + +

          Group: "Cache" +
          Name: "nLNsFetch"

          + +

          Note that the number of LN fetches does not necessarily correspond + to the number of records accessed, since some LNs may be + embedded.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNBINsFetch

          +
          public long getNBINsFetch()
          +

          "Number of BINs (bottom internal nodes) and BIN-deltas requested by btree operations."

          + +

          Group: "Cache" +
          Name: "nBINsFetch"

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNUpperINsFetch

          +
          public long getNUpperINsFetch()
          +

          "Number of Upper INs (non-bottom internal nodes) requested by btree operations."

          + +

          Group: "Cache" +
          Name: "nUpperINsFetch"

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNLNsFetchMiss

          +
          public long getNLNsFetchMiss()
          +

          "Number of LNs (data records) requested by btree operations that were not in main cache."

          + +

          Group: "Cache" +
          Name: "nLNsFetchMiss"

          + +

          Note that the number of LN fetches does not necessarily correspond + to the number of records accessed, since some LNs may be + embedded.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNBINsFetchMiss

          +
          public long getNBINsFetchMiss()
          +

          "Number of full BINs (bottom internal nodes) and BIN-deltas fetched to satisfy btree operations that were not in main cache."

          + +

          Group: "Cache" +
          Name: "nBINsFetchMiss"

          + +

          This is the portion of getNBINsFetch() that resulted in a + fetch miss. The fetch may be for a full BIN or BIN-delta + (getNBINDeltasFetchMiss()), depending on whether a BIN-delta + currently exists (see EnvironmentConfig.TREE_BIN_DELTA). + However, additional full BIN fetches occur when mutating a BIN-delta to + a full BIN (getNFullBINsMiss()) whenever this is necessary for + completing an operation.

          + +

          Therefore, the total number of BIN fetch misses + (including BIN-deltas) is:

          + +

          nFullBINsMiss + nBINsFetchMiss

          + +

          And the total number of full BIN (vs BIN-delta) fetch misses is:

          + +

          nFullBINsMiss + nBINsFetchMiss - + nBINDeltasFetchMiss

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltasFetchMiss

          +
          public long getNBINDeltasFetchMiss()
          +

          "Number of BIN-deltas (partial BINs) fetched to satisfy btree operations that were not in main cache."

          + +

          Group: "Cache" +
          Name: "nBINDeltasFetchMiss"

          + +

          This represents the portion of nBINsFetchMiss() that fetched + BIN-deltas rather than full BINs. See getNBINsFetchMiss().

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNFullBINsMiss

          +
          public long getNFullBINsMiss()
          +

          "Number of times a BIN-delta had to be mutated to a full BIN (and as a result a full BIN had to be read in from the log)."

          + +

          Group: "Cache" +
          Name: "nFullBINsMiss"

          + +

          Note that this stat does not include full BIN misses that are + not due to BIN-delta mutations. See + getNBINsFetchMiss()

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNUpperINsFetchMiss

          +
          public long getNUpperINsFetchMiss()
          +

          "Number of Upper INs (non-bottom internal nodes) requested by btree operations that were not in main cache."

          + +

          Group: "Cache" +
          Name: "nUpperINsFetchMiss"

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNBINsFetchMissRatio

          +
          public float getNBINsFetchMissRatio()
          +

          "The BIN fetch miss ratio (nBINsFetchMiss / nBINsFetch)"

          + +

          Group: "Cache" +
          Name: "nBINsFetchMissRatio"

          + +

          This stat can be misleading because it does not include the number + of full BIN fetch misses resulting from BIN-delta mutations (getNFullBINsMiss(). It may be improved, or perhaps deprecated, in a + future release.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNBINDeltaBlindOps

          +
          public long getNBINDeltaBlindOps()
          +

          "The number of operations performed blindly in BIN deltas"

          + +

          Group: "Cache" +
          Name: "nBinDeltaBlindOps"

          + +

          Note that this stat is misplaced. It should be in the + "Environment" group + and will probably be moved there in a future release.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging, +EnvironmentConfig.TREE_BIN_DELTA
          +
          +
        • +
        + + + +
          +
        • +

          getNCachedUpperINs

          +
          public long getNCachedUpperINs()
          +

          "Number of upper INs (non-bottom internal nodes) in main cache."

          + +

          Group: "Cache" +
          Name: "nCachedUpperINs"

          + +

          When used on shared environment caches, zero is returned when fast stats are requested.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNCachedBINs

          +
          public long getNCachedBINs()
          +

          "Number of BINs (bottom internal nodes) and BIN-deltas in main cache."

          + +

          Group: "Cache" +
          Name: "nCachedBINs"

          + +

          When used on shared environment caches, zero is returned when fast stats are requested.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNCachedBINDeltas

          +
          public long getNCachedBINDeltas()
          +

          "Number of BIN-deltas (partial BINs) in main cache. This is a subset of the nCachedBINs value."

          + +

          Group: "Cache" +
          Name: "nCachedBINDeltas"

          + +

          When used on shared environment caches, zero is returned when fast stats are requested.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getNINSparseTarget

          +
          public long getNINSparseTarget()
          +

          "Number of INs that use a compact sparse array representation to point to child nodes in the main cache."

          + +

          Group: "Cache" +
          Name: "nINSparseTarget"

          + +

          Each IN contains an array of references to child INs or LNs. When + there are between one and four children resident, the size of the array + is reduced to four. This saves a significant amount of cache memory for + BINs when CacheMode.EVICT_LN is used, because there are + typically only a small number of LNs resident in main cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getNINNoTarget

          +
          public long getNINNoTarget()
          +

          "Number of INs that use a compact representation when none of its child nodes are in the main cache."

          + +

          Group: "Cache" +
          Name: "nINNoTarget"

          + +

          Each IN contains an array of references to child INs or LNs. When + there are no children resident, no array is allocated. This saves a + significant amount of cache memory for BINs when CacheMode.EVICT_LN is used, because there are typically only a small + number of LNs resident in main cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          getNBINsEvictedEvictorThread

          +
          public long getNBINsEvictedEvictorThread()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBINsEvictedManual

          +
          public long getNBINsEvictedManual()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBINsEvictedCritical

          +
          public long getNBINsEvictedCritical()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBINsEvictedCacheMode

          +
          public long getNBINsEvictedCacheMode()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBINsEvictedDaemon

          +
          public long getNBINsEvictedDaemon()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNUpperINsEvictedEvictorThread

          +
          public long getNUpperINsEvictedEvictorThread()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNUpperINsEvictedManual

          +
          public long getNUpperINsEvictedManual()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNUpperINsEvictedCritical

          +
          public long getNUpperINsEvictedCritical()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNUpperINsEvictedCacheMode

          +
          public long getNUpperINsEvictedCacheMode()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNUpperINsEvictedDaemon

          +
          public long getNUpperINsEvictedDaemon()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBatchesEvictorThread

          +
          public long getNBatchesEvictorThread()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBatchesManual

          +
          public long getNBatchesManual()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBatchesCacheMode

          +
          public long getNBatchesCacheMode()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBatchesCritical

          +
          public long getNBatchesCritical()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBatchesDaemon

          +
          public long getNBatchesDaemon()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getNBytesEvictedEvictorThread

          +
          public long getNBytesEvictedEvictorThread()
          +

          "Number of bytes evicted by evictor pool threads."

          + +

          Group: "Cache" +
          Name: "nBytesEvictedEVICTORTHREAD"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNBytesEvictedManual

          +
          public long getNBytesEvictedManual()
          +

          "Number of bytes evicted by the Environment.evictMemory or during Environment startup."

          + +

          Group: "Cache" +
          Name: "nBytesEvictedMANUAL"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNBytesEvictedCacheMode

          +
          public long getNBytesEvictedCacheMode()
          +

          "Number of bytes evicted by operations for which CacheMode.EVICT_BIN is specified."

          + +

          Group: "Cache" +
          Name: "nBytesEvictedCACHEMODE"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNBytesEvictedCritical

          +
          public long getNBytesEvictedCritical()
          +

          "Number of bytes evicted in the application thread because the cache is over budget."

          + +

          Group: "Cache" +
          Name: "nBytesEvictedCRITICAL"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getNBytesEvictedDeamon

          +
          public long getNBytesEvictedDeamon()
          +

          "Number of bytes evicted by JE deamon threads."

          + +

          Group: "Cache" +
          Name: "nBytesEvictedDAEMON"

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getAvgBatchEvictorThread

          +
          public long getAvgBatchEvictorThread()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getAvgBatchManual

          +
          public long getAvgBatchManual()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getAvgBatchCacheMode

          +
          public long getAvgBatchCacheMode()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getAvgBatchCritical

          +
          public long getAvgBatchCritical()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getAvgBatchDaemon

          +
          public long getAvgBatchDaemon()
          +
          Deprecated. This statistic has been removed. The method returns 0 + always.
          +
        • +
        + + + +
          +
        • +

          getSharedCacheTotalBytes

          +
          public long getSharedCacheTotalBytes()
          +

          "Total amount of the shared JE main cache in use, in bytes."

          + +

          Group: "Cache" +
          Name: "sharedCacheTotalBytes"

          + +

          If this + environment uses the shared cache, this method returns the total size of + the shared cache, i.e., the sum of the getCacheTotalBytes() for + all environments that are sharing the cache. If this environment does + not use the shared cache, this method returns zero.

          + +

          To get the configured maximum cache size, see EnvironmentMutableConfig.getCacheSize().

          +
        • +
        + + + + + + + +
          +
        • +

          getDataBytes

          +
          public long getDataBytes()
          +

          "Amount of JE main cache used for holding data, keys and internal Btree nodes, in bytes."

          + +

          Group: "Cache" +
          Name: "dataBytes"

          + +

          The value returned by this method includes the amount returned by + getDataAdminBytes().

          + +

          If this environment uses the shared cache, this method returns only + the amount used by this environment.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getDataAdminBytes

          +
          public long getDataAdminBytes()
          +

          "Amount of JE main cache used for holding per-database cleaner utilization metadata, in bytes."

          + +

          Group: "Cache" +
          Name: "dataAdminBytes"

          + +

          If this environment uses the shared cache, this method returns only + the amount used by this environment.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getDOSBytes

          +
          public long getDOSBytes()
          +

          "Amount of JE main cache consumed by disk-ordered cursor and Database.count operations, in bytes."

          + +

          Group: "Cache" +
          Name: "DOSBytes"

          + +

          If this environment uses the shared cache, this method returns only + the amount used by this environment.

          +
          +
          See Also:
          +
          Cache Statistics: Unexpected + Sizes
          +
          +
        • +
        + + + +
          +
        • +

          getAdminBytes

          +
          public long getAdminBytes()
          +

          "Number of bytes of JE main cache used for cleaner and checkpointer metadata, in bytes."

          + +

          Group: "Cache" +
          Name: "adminBytes"

          + +

          If this environment uses the shared cache, this method returns only + the amount used by this environment.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getLockBytes

          +
          public long getLockBytes()
          +

          "Number of bytes of JE cache used for holding locks and transactions, in bytes."

          + +

          Group: "Cache" +
          Name: "lockBytes"

          + +

          If this environment uses the shared cache, this method returns only + the amount used by this environment.

          +
          +
          See Also:
          +
          Cache Statistics: Unexpected + Sizes
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getOffHeapAllocFailures

          +
          public long getOffHeapAllocFailures()
          +

          "Number of off-heap allocation failures due to lack of system memory."

          + +

          Group: "OffHeap" +
          Name: "offHeapAllocFailure"

          + +

          Currently, with the default off-heap allocator, an allocation + failure occurs only when OutOfMemoryError is thrown by Unsafe.allocateMemory. This might be considered a fatal error, since it + means that no memory is available on the machine or VM. In practice, + we have not seen this occur because Linux will automatically kill + processes that are rapidly allocating memory when available memory is + very low.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapAllocOverflows

          +
          public long getOffHeapAllocOverflows()
          +

          "Number of off-heap allocation attempts that exceeded the cache size."

          + +

          Group: "OffHeap" +
          Name: "offHeapAllocOverflow"

          + +

          Currently, with the default off-heap allocator, this never happens + because the allocator will perform the allocation as long as any memory + is available. Even so, the off-heap evictor normally prevents + overflowing of the off-heap cache by freeing memory before it is + needed.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapThreadUnavailable

          +
          public long getOffHeapThreadUnavailable()
          +

          "Number of eviction tasks that were submitted to the background off-heap evictor pool, but were refused because all eviction threads were busy."

          + +

          Group: "OffHeap" +
          Name: "offHeapThreadUnavailable"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapNodesTargeted

          +
          public long getOffHeapNodesTargeted()
          +

          "Number of BINs selected as off-heap eviction targets."

          + +

          Group: "OffHeap" +
          Name: "offHeapNodesTargeted"

          + +

          Nodes are selected as targets by the evictor based on LRU, always + selecting from the cold end of the LRU list. First, non-dirty nodes and + nodes referring to off-heap LNs are selected based on LRU. When there + are no more such nodes then dirty nodes with no off-heap LNs are + selected, based on LRU.

          + +

          An eviction target may actually be evicted, or skipped, or put + back to the LRU, potentially after stripping child LNs or mutation to + a BIN-delta.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapCriticalNodesTargeted

          +
          public long getOffHeapCriticalNodesTargeted()
          +

          "Number of nodes targeted in \'critical eviction\' mode."

          + +

          Group: "OffHeap" +
          Name: "offHeapCriticalNodesTargeted"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Critical + Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapNodesEvicted

          +
          public long getOffHeapNodesEvicted()
          +

          "Number of target BINs (including BIN-deltas) evicted from the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapNodesEvicted"

          + +

          An evicted BIN is completely removed from the off-heap cache and LRU + list. If it is dirty, it must be logged. A BIN is evicted only if it has + no off-heap child LNs and it cannot be mutated to a BIN-delta.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapDirtyNodesEvicted

          +
          public long getOffHeapDirtyNodesEvicted()
          +

          "Number of target BINs evicted from the off-heap cache that were dirty and therefore were logged."

          + +

          Group: "OffHeap" +
          Name: "offHeapDirtyNodesEvicted"

          + +

          This stat value is a subset of getOffHeapNodesEvicted().

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapNodesStripped

          +
          public long getOffHeapNodesStripped()
          +

          "Number of target BINs whose off-heap child LNs were evicted (stripped)."

          + +

          Group: "OffHeap" +
          Name: "offHeapNodesStripped"

          + +

          When a BIN is stripped, all off-heap LNs that the BIN refers to are + evicted. The getOffHeapLNsEvicted() stat is incremented + accordingly.

          + +

          A stripped BIN could be a BIN in main cache that is stripped of + off-heap LNs, or a BIN that is off-heap and also refers to off-heap + LNs. When a main cache BIN is stripped, it is removed from the + off-heap LRU. When an off-heap BIN is stripped, it is either modified + in place to remove the LN references (this is done when a small + number of LNs are referenced and the wasted space is small), or is + copied to a new, smaller off-heap block with no LN references.

          + +

          After stripping an off-heap BIN, it is moved to the hot end of the + LRU list. Off-heap BINs are only mutated to BIN-deltas or evicted + completely when they do not refer to any off-heap LNs. This gives + BINs precedence over LNs in the cache. + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapNodesMutated

          +
          public long getOffHeapNodesMutated()
          +

          "Number of off-heap target BINs mutated to BIN-deltas."

          + +

          Group: "OffHeap" +
          Name: "offHeapNodesMutated"

          + +

          Mutation to a BIN-delta is performed for full BINs that do not + refer to any off-heap LNs and can be represented as BIN-deltas in + cache and on disk (see EnvironmentConfig.TREE_BIN_DELTA). + When a BIN is mutated, it is is copied to a new, smaller off-heap + block. After mutating an off-heap BIN, it is moved to the hot end of + the LRU list.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapNodesSkipped

          +
          public long getOffHeapNodesSkipped()
          +

          "Number of off-heap target BINs on which no action was taken."

          + +

          Group: "OffHeap" +
          Name: "offHeapNodesSkipped"

          + +

          For example, a node will be skipped if it has been moved to the + hot end of the LRU list by another thread, or more rarely, already + processed by another evictor thread. This can occur because there is + a short period of time where a targeted node has been removed from + the LRU by the evictor thread, but not yet latched.

          + +

          The number of skipped nodes is normally very small, compared to the + number of targeted nodes.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapLNsEvicted

          +
          public long getOffHeapLNsEvicted()
          +

          "Number of LNs evicted from the off-heap cache as a result of BIN stripping."

          + +

          Group: "OffHeap" +
          Name: "offHeapLNsEvicted"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Eviction
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapLNsLoaded

          +
          public long getOffHeapLNsLoaded()
          +

          "Number of LNs loaded from the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapLNsLoaded"

          + +

          LNs are loaded when requested by CRUD operations or other internal + btree operations.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapLNsStored

          +
          public long getOffHeapLNsStored()
          +

          "Number of LNs stored into the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapLNsStored"

          + +

          LNs are stored off-heap when they are evicted from the main cache. + Note that when CacheMode.EVICT_LN is used, the LN resides in + the main cache for a very short period since it is evicted after the + CRUD operation is complete.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapBINsLoaded

          +
          public long getOffHeapBINsLoaded()
          +

          "Number of BINs loaded from the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapBINsLoaded"

          + +

          BINs are loaded when needed by CRUD operations or other internal + btree operations.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapBINsStored

          +
          public long getOffHeapBINsStored()
          +

          "Number of BINs stored into the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapBINsStored"

          + +

          BINs are stored off-heap when they are evicted from the main cache. + Note that when CacheMode.EVICT_BIN is used, the BIN resides + in the main cache for a very short period since it is evicted after + the CRUD operation is complete.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapCachedLNs

          +
          public int getOffHeapCachedLNs()
          +

          "Number of LNs residing in the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapCachedLNs"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapCachedBINs

          +
          public int getOffHeapCachedBINs()
          +

          "Number of BINs (full BINs and BIN-deltas) residing in the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapCachedBINs"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapCachedBINDeltas

          +
          public int getOffHeapCachedBINDeltas()
          +

          "Number of BIN-deltas residing in the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapCachedBINDeltas"

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapTotalBytes

          +
          public long getOffHeapTotalBytes()
          +

          "Total number of estimated bytes in off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapTotalBytes"

          + +

          This includes the estimated overhead for off-heap memory blocks, as + well as their contents.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          + +

          To get the configured maximum off-heap cache size, see EnvironmentMutableConfig.getOffHeapCacheSize().

          +
          +
          See Also:
          +
          Cache Statistics: Sizing
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapTotalBlocks

          +
          public long getOffHeapTotalBlocks()
          +

          "Total number of memory blocks in off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapTotalBlocks"

          + +

          There is one block for each off-heap BIN and one for each off-heap + LN. So the total number of blocks is the sum of + getOffHeapCachedLNs() and getOffHeapCachedBINs().

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getOffHeapLRUSize

          +
          public long getOffHeapLRUSize()
          +

          "Number of LRU entries used for the off-heap cache."

          + +

          Group: "OffHeap" +
          Name: "offHeapLruSize"

          + +

          The off-heap LRU list is stored in the Java heap. Each entry occupies + 20 bytes of memory when compressed oops are used, or 24 bytes otherwise. + This memory is not considered part of the JE main cache, and is not + included in main cache statistics.

          + +

          There is one LRU entry for each off-heap BIN, and one for each BIN in + main cache that refers to one or more off-heap LNs. The latter approach + avoids an LRU entry per off-heap LN, which would use excessive amounts + of space in the Java heap. Similarly, when an off-heap BIN refers to + off-heap LNs, only one LRU entry (for the BIN) is used.

          + +

          If this environment uses the shared cache, the return value is the + total for all environments that are sharing the cache.

          +
          +
          See Also:
          +
          Cache Statistics: Debugging
          +
          +
        • +
        + + + +
          +
        • +

          getRelatchesRequired

          +
          public long getRelatchesRequired()
          +
          Returns the number of latch upgrades (relatches) required while + operating on this Environment. Latch upgrades are required when an + operation assumes that a shared (read) latch will be sufficient but + later determines that an exclusive (write) latch will actually be + required.
          +
          +
          Returns:
          +
          number of latch upgrades (relatches) required.
          +
          +
        • +
        + + + +
          +
        • +

          getNOwners

          +
          public int getNOwners()
          +
          Total lock owners in lock table. Only provided when Environment.getStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNReadLocks

          +
          public int getNReadLocks()
          +
          Total read locks currently held. Only provided when Environment.getStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNTotalLocks

          +
          public int getNTotalLocks()
          +
          Total locks currently in lock table. Only provided when Environment.getStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNWaiters

          +
          public int getNWaiters()
          +
          Total transactions waiting for locks. Only provided when Environment.getStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNWriteLocks

          +
          public int getNWriteLocks()
          +
          Total write locks currently held. Only provided when Environment.getStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNRequests

          +
          public long getNRequests()
          +
          Total number of lock requests to date.
          +
        • +
        + + + +
          +
        • +

          getNWaits

          +
          public long getNWaits()
          +
          Total number of lock waits to date.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaiters

          +
          public int getNAcquiresNoWaiters()
          +
          Number of acquires of lock table latch with no contention.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresSelfOwned

          +
          public int getNAcquiresSelfOwned()
          +
          Number of acquires of lock table latch when it was already owned + by the caller.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresWithContention

          +
          public int getNAcquiresWithContention()
          +
          Number of acquires of lock table latch when it was already owned by + another thread.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaitSuccessful

          +
          public int getNAcquiresNoWaitSuccessful()
          +
          Number of successful no-wait acquires of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaitUnSuccessful

          +
          public int getNAcquiresNoWaitUnSuccessful()
          +
          Number of unsuccessful no-wait acquires of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          getNReleases

          +
          public int getNReleases()
          +
          Number of releases of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          getNBinDeltaGetOps

          +
          public long getNBinDeltaGetOps()
          +
          The number of user (non-internal) Cursor and Database get operations + performed in BIN deltas.
          +
        • +
        + + + +
          +
        • +

          getNBinDeltaInsertOps

          +
          public long getNBinDeltaInsertOps()
          +
          The number of user (non-internal) Cursor and Database insert operations + performed in BIN deltas (these are insertions performed via the various + put methods).
          +
        • +
        + + + +
          +
        • +

          getNBinDeltaUpdateOps

          +
          public long getNBinDeltaUpdateOps()
          +
          The number of user (non-internal) Cursor and Database update operations + performed in BIN deltas (these are updates performed via the various + put methods).
          +
        • +
        + + + +
          +
        • +

          getNBinDeltaDeleteOps

          +
          public long getNBinDeltaDeleteOps()
          +
          The number of user (non-internal) Cursor and Database delete operations + performed in BIN deltas.
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          getSecSearchOps

          +
          public long getSecSearchOps()
          +
          Number of successful secondary DB key search operations. +

          + This operation corresponds to a successful call to SecondaryCursor.get or SecondaryDatabase.get with + Get.SEARCH, Get.SEARCH_GTE, Get.SEARCH_BOTH, or + Get.SEARCH_BOTH_GTE. +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + +
          +
        • +

          getSecSearchFailOps

          +
          public long getSecSearchFailOps()
          +
          Number of failed secondary DB key search operations. +

          + This operation corresponds to a call to SecondaryCursor.get or SecondaryDatabase.get with Get.SEARCH, Get.SEARCH_GTE, Get.SEARCH_BOTH, or Get.SEARCH_BOTH_GTE, when the specified key is not found in the DB. +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + + + + + +
          +
        • +

          getSecPositionOps

          +
          public long getSecPositionOps()
          +
          Number of successful secondary DB position operations. +

          + This operation corresponds to a successful call to SecondaryCursor.get or SecondaryDatabase.get with + Get.FIRST, Get.LAST, + Get.NEXT, Get.NEXT_DUP, Get.NEXT_NO_DUP, + Get.PREV, Get.PREV_DUP or Get.PREV_NO_DUP. +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + +
          +
        • +

          getPriInsertOps

          +
          public long getPriInsertOps()
          +
          Number of successful primary DB insertion operations. +

          + This operation corresponds to a successful call to Cursor.put + or Database.put in one of the following cases: +

          +
        • +
        + + + +
          +
        • +

          getPriInsertFailOps

          +
          public long getPriInsertFailOps()
          +
          Number of failed primary DB insertion operations. +

          + This operation corresponds to a call to Cursor.put or Database.put with Put.NO_OVERWRITE or Put.NO_DUP_DATA, when the key could not be inserted because it + previously existed in the DB.

          +
        • +
        + + + +
          +
        • +

          getSecInsertOps

          +
          public long getSecInsertOps()
          +
          Number of successful secondary DB insertion operations. +

          + This operation corresponds to a successful call to Cursor.put + or Database.put, for a primary DB with an associated + secondary DB. A secondary record is inserted when inserting a primary + record with a non-null secondary key, or when updating a primary record + and the secondary key is changed to to a non-null value that is + different than the previously existing value. +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + +
          +
        • +

          getPriUpdateOps

          +
          public long getPriUpdateOps()
          +
          Number of successful primary DB update operations. +

          + This operation corresponds to a successful call to Cursor.put + or Database.put in one of the following cases: +

            +
          • + When Put.OVERWRITE is specified and the key previously + existed in the DB. +
          • +
          • + When calling Cursor.put with Put.CURRENT. +
          • +
          +
        • +
        + + + +
          +
        • +

          getSecUpdateOps

          +
          public long getSecUpdateOps()
          +
          Number of successful secondary DB update operations. +

          + This operation corresponds to a successful call to Cursor.put + or Database.put, when a primary record is updated and its + TTL is changed. The associated secondary records must also be updated to + reflect the change in the TTL. +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + + + + + +
          +
        • +

          getPriDeleteFailOps

          +
          public long getPriDeleteFailOps()
          +
          Number of failed primary DB deletion operations. +

          + This operation corresponds to a call to Database.delete or SecondaryDatabase.delete, when the key could not be deleted because it + did not previously exist in the DB.

          +
        • +
        + + + +
          +
        • +

          getSecDeleteOps

          +
          public long getSecDeleteOps()
          +
          Number of successful secondary DB deletion operations. +

          + This operation corresponds to one of the following API calls: +

          +

          + Note: Operations are currently counted as secondary DB (rather than + primary DB) operations only if the DB has been opened by the application + as a secondary DB. In particular the stats may be confusing on an HA + replica node if a secondary DB has not been opened by the application on + the replica.

          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns a String representation of the stats in the form of + <stat>=<value>
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toStringVerbose

          +
          public java.lang.String toStringVerbose()
          +
          Returns a String representation of the stats which includes stats + descriptions in addition to <stat>=<value>
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/EnvironmentWedgedException.html b/docs/java/com/sleepycat/je/EnvironmentWedgedException.html new file mode 100644 index 0000000..ffe6f46 --- /dev/null +++ b/docs/java/com/sleepycat/je/EnvironmentWedgedException.html @@ -0,0 +1,278 @@ + + + + + +EnvironmentWedgedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class EnvironmentWedgedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EnvironmentWedgedException
      +extends EnvironmentFailureException
      +
      Thrown by the Environment.close() when the current process must be + shut down and restarted before re-opening the Environment. +

      + If during close(), a badly behaved internal thread cannot be stopped, + then the JVM process must be stopped and restarted. The close() method first + attempts a soft shutdown of each thread. If that fails to stop the thread, + it is interrupted. If that fails to stop the thread, because it never + becomes interruptible, then EnvironmentWedgedException is thrown by + close(), after performing as much of the normal shutdown process as + possible. Before this exception is thrown, a full thread dump is logged, to + aid in debugging. +

      + Note that prior to calling close(), if JE attempts to shut down an internal + thread and it cannot be shut down, the Environment will be invalidated, also causing an EnvironmentWedgedException to be thrown. In this case (as in all other + cases where an EnvironmentFailureException is thrown and the + Environment is invalidated), the application should call Environment.close(). The close() method will throw EnvironmentWedgedException in this case, as described above. +

      + If the application fails to restart the process when this exception is + thrown, it is likely that re-opening the Environment will not be possible, + or will result in unpredictable behavior. This is because the thread that + stopped may be holding a resource that is needed by the newly opened + Environment.

      +
      +
      Since:
      +
      7.1
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ExceptionEvent.html b/docs/java/com/sleepycat/je/ExceptionEvent.html new file mode 100644 index 0000000..c1e0f4e --- /dev/null +++ b/docs/java/com/sleepycat/je/ExceptionEvent.html @@ -0,0 +1,328 @@ + + + + + +ExceptionEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class ExceptionEvent

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class ExceptionEvent
      +extends java.lang.Object
      +
      A class representing an exception event. Contains an exception and the name + of the daemon thread that it was thrown from.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        ExceptionEvent(java.lang.Exception exception) 
        ExceptionEvent(java.lang.Exception exception, + java.lang.String threadName) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.ExceptiongetException() +
        Returns the exception in the event.
        +
        java.lang.StringgetThreadName() +
        Returns the name of the daemon thread that threw the exception.
        +
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ExceptionEvent

          +
          public ExceptionEvent(java.lang.Exception exception,
          +                      java.lang.String threadName)
          +
        • +
        + + + +
          +
        • +

          ExceptionEvent

          +
          public ExceptionEvent(java.lang.Exception exception)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getException

          +
          public java.lang.Exception getException()
          +
          Returns the exception in the event.
          +
        • +
        + + + +
          +
        • +

          getThreadName

          +
          public java.lang.String getThreadName()
          +
          Returns the name of the daemon thread that threw the exception.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ExceptionListener.html b/docs/java/com/sleepycat/je/ExceptionListener.html new file mode 100644 index 0000000..f0ddb4d --- /dev/null +++ b/docs/java/com/sleepycat/je/ExceptionListener.html @@ -0,0 +1,236 @@ + + + + + +ExceptionListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ExceptionListener

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface ExceptionListener
      +
      A callback to notify the application program when an exception occurs in a + JE Daemon thread.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          exceptionThrown

          +
          void exceptionThrown(ExceptionEvent event)
          +
          This method is called if an exception is seen in a JE Daemon thread.
          +
          +
          Parameters:
          +
          event - the ExceptionEvent representing the exception that was + thrown.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ForeignConstraintException.html b/docs/java/com/sleepycat/je/ForeignConstraintException.html new file mode 100644 index 0000000..d605e1b --- /dev/null +++ b/docs/java/com/sleepycat/je/ForeignConstraintException.html @@ -0,0 +1,283 @@ + + + + + +ForeignConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class ForeignConstraintException

    +
    +
    + +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ForeignKeyDeleteAction.html b/docs/java/com/sleepycat/je/ForeignKeyDeleteAction.html new file mode 100644 index 0000000..8faaabc --- /dev/null +++ b/docs/java/com/sleepycat/je/ForeignKeyDeleteAction.html @@ -0,0 +1,408 @@ + + + + + +ForeignKeyDeleteAction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum ForeignKeyDeleteAction

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<ForeignKeyDeleteAction>
      +
      +
      +
      +
      public enum ForeignKeyDeleteAction
      +extends java.lang.Enum<ForeignKeyDeleteAction>
      +
      The action taken when a referenced record in the foreign key database is + deleted. + +

      The delete action applies to a secondary database that is configured to + have a foreign key integrity constraint. The delete action is specified by + calling SecondaryConfig.setForeignKeyDeleteAction(com.sleepycat.je.ForeignKeyDeleteAction).

      + +

      When a record in the foreign key database is deleted, it is checked to + see if it is referenced by any record in the associated secondary database. + If the key is referenced, the delete action is applied. By default, the + delete action is ABORT.

      +
      +
      See Also:
      +
      SecondaryConfig
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ABORT +
        When a referenced record in the foreign key database is deleted, abort + the transaction by throwing a DeleteConstraintException.
        +
        CASCADE +
        When a referenced record in the foreign key database is deleted, delete + the primary database record that references it.
        +
        NULLIFY +
        When a referenced record in the foreign key database is deleted, set the + reference to null in the primary database record that references it, + thereby deleting the secondary key.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringtoString() 
        static ForeignKeyDeleteActionvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static ForeignKeyDeleteAction[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + + + + + +
          +
        • +

          CASCADE

          +
          public static final ForeignKeyDeleteAction CASCADE
          +
          When a referenced record in the foreign key database is deleted, delete + the primary database record that references it.
          +
        • +
        + + + +
          +
        • +

          NULLIFY

          +
          public static final ForeignKeyDeleteAction NULLIFY
          +
          When a referenced record in the foreign key database is deleted, set the + reference to null in the primary database record that references it, + thereby deleting the secondary key. @see ForeignKeyNullifier @see + ForeignMultiKeyNullifier
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static ForeignKeyDeleteAction[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (ForeignKeyDeleteAction c : ForeignKeyDeleteAction.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static ForeignKeyDeleteAction valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Enum<ForeignKeyDeleteAction>
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ForeignKeyNullifier.html b/docs/java/com/sleepycat/je/ForeignKeyNullifier.html new file mode 100644 index 0000000..eaedbb4 --- /dev/null +++ b/docs/java/com/sleepycat/je/ForeignKeyNullifier.html @@ -0,0 +1,271 @@ + + + + + +ForeignKeyNullifier (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ForeignKeyNullifier

    +
    +
    +
    + +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          nullifyForeignKey

          +
          boolean nullifyForeignKey(SecondaryDatabase secondary,
          +                          DatabaseEntry data)
          +                   throws DatabaseException
          +
          Sets the foreign key reference to null in the datum of the primary + database.
          +
          +
          Parameters:
          +
          secondary - the database in which the foreign key integrity + constraint is defined. This parameter is passed for informational + purposes but is not commonly used.
          +
          data - the existing primary datum in which the foreign key + reference should be set to null. This parameter should be updated by + this method if it returns true.
          +
          Returns:
          +
          true if the datum was modified, or false to indicate that the + key is not present.
          +
          Throws:
          +
          DatabaseException - if an error occurs attempting to clear the key + reference.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ForeignMultiKeyNullifier.html b/docs/java/com/sleepycat/je/ForeignMultiKeyNullifier.html new file mode 100644 index 0000000..ed1806a --- /dev/null +++ b/docs/java/com/sleepycat/je/ForeignMultiKeyNullifier.html @@ -0,0 +1,272 @@ + + + + + +ForeignMultiKeyNullifier (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ForeignMultiKeyNullifier

    +
    +
    +
    + +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          nullifyForeignKey

          +
          boolean nullifyForeignKey(SecondaryDatabase secondary,
          +                          DatabaseEntry key,
          +                          DatabaseEntry data,
          +                          DatabaseEntry secKey)
          +                   throws DatabaseException
          +
          Sets the foreign key reference to null in the datum of the primary + database.
          +
          +
          Parameters:
          +
          secondary - the database in which the foreign key integrity + constraint is defined. This parameter is passed for informational + purposes but is not commonly used.
          +
          key - the existing primary key. This parameter is passed for + informational purposes but is not commonly used.
          +
          data - the existing primary datum in which the foreign key + reference should be set to null. This parameter should be updated by + this method if it returns true.
          +
          secKey - the secondary key to be nullified. This parameter is + needed for knowing which key to nullify when multiple keys are present, + as when SecondaryMultiKeyCreator is used.
          +
          Returns:
          +
          true if the datum was modified, or false to indicate that the + key is not present.
          +
          Throws:
          +
          DatabaseException - if an error occurs attempting to clear the key + reference.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ForwardCursor.html b/docs/java/com/sleepycat/je/ForwardCursor.html new file mode 100644 index 0000000..3865271 --- /dev/null +++ b/docs/java/com/sleepycat/je/ForwardCursor.html @@ -0,0 +1,423 @@ + + + + + +ForwardCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ForwardCursor

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      java.lang.AutoCloseable, java.io.Closeable
      +
      +
      +
      All Known Implementing Classes:
      +
      Cursor, DiskOrderedCursor, JoinCursor, SecondaryCursor
      +
      +
      +
      +
      public interface ForwardCursor
      +extends java.io.Closeable
      +
      The interface for forward-moving Cursor operations. Specific implementations + may modify the documented behavior on each of these methods.
      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDatabase

          +
          Database getDatabase()
          +
          Returns the Database handle associated with this ForwardCursor.
          +
          +
          Returns:
          +
          The Database handle associated with this ForwardCursor.
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          void close()
          +
          Discards the cursor. + +

          The cursor handle may not be used again after this method has been + called, regardless of the method's success or failure.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          OperationResult get(DatabaseEntry key,
          +                    DatabaseEntry data,
          +                    Get getType,
          +                    ReadOptions options)
          +
          Moves the cursor to a record according to the specified Get + type.
          +
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          getType - is Get.NEXT or Get.CURRENT. + interface. Get.CURRENT is permitted only if the cursor is + initialized (positioned on a record).
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + the cursor is uninitialized (not positioned on a record) and this is not + permitted (see above), or the non-transactional cursor was created in a + different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null getType, a null input key/data parameter, + an input key/data parameter with a null data array, a partial key/data + input parameter, and specifying a lock mode of READ_COMMITTED.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Get.html b/docs/java/com/sleepycat/je/Get.html new file mode 100644 index 0000000..dbfb186 --- /dev/null +++ b/docs/java/com/sleepycat/je/Get.html @@ -0,0 +1,689 @@ + + + + + +Get (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum Get

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Get>
      +
      +
      +
      +
      public enum Get
      +extends java.lang.Enum<Get>
      +
      The operation type passed to "get" methods on databases and cursors.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        CURRENT +
        Accesses the current record.
        +
        FIRST +
        Finds the first record in the database.
        +
        LAST +
        Finds the last record in the database.
        +
        NEXT +
        Moves to the next record.
        +
        NEXT_DUP +
        Moves to the next record with the same key.
        +
        NEXT_NO_DUP +
        Moves to the next record with a different key.
        +
        PREV +
        Moves to the previous record.
        +
        PREV_DUP +
        Moves to the previous record with the same key.
        +
        PREV_NO_DUP +
        Moves to the previous record with a different key.
        +
        SEARCH +
        Searches using an exact match by key.
        +
        SEARCH_BOTH +
        Searches using an exact match by key and data (or pKey).
        +
        SEARCH_BOTH_GTE +
        Searches using an exact match by key and a GTE match by data (or pKey).
        +
        SEARCH_GTE +
        Searches using a GTE match by key.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static GetvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Get[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          SEARCH

          +
          public static final Get SEARCH
          +
          Searches using an exact match by key. + +

          Returns, or moves the cursor to, the record having a key exactly + matching the given key parameter.

          + +

          If the database has duplicate keys, the record with the matching key + and lowest data value (or the lowest primary key, for secondary + databases) is selected.

          + +

          The operation does not succeed if no record matches.

          +
        • +
        + + + +
          +
        • +

          SEARCH_BOTH

          +
          public static final Get SEARCH_BOTH
          +
          Searches using an exact match by key and data (or pKey). + +

          Returns, or moves the cursor to, the record having a key exactly + matching the given key parameter, and having a data value (or primary + key) exactly matching the given data (or pKey) parameter. The data is + matched for Database and Cursor operations, while the primary key is + matched for SecondaryDatabase and SecondaryCursor operations.

          + +

          If the database has duplicate keys, the search is performed by key + and data (or pKey) using the database Btree. If the database has does + not have duplicate keys, the search is performed by key alone using the + Btree, and then the data (or primary key) of the matching record is + simply compared to the data (pKey) parameter. In other words, using + this operation has no performance advantage over SEARCH when + the database does not have duplicates.

          + +

          The operation does not succeed (null is returned) if no record + matches.

          +
        • +
        + + + +
          +
        • +

          SEARCH_GTE

          +
          public static final Get SEARCH_GTE
          +
          Searches using a GTE match by key. + +

          Returns, or moves the cursor to, the record with a key that is + greater than or equal to (GTE) the given key parameter.

          + +

          If the database has duplicate keys, the record with the lowest data + value (or the lowest primary key, for a secondary database) is selected + among the duplicates with the matching key.

          + +

          The operation does not succeed (null is returned) if no record + matches.

          +
        • +
        + + + +
          +
        • +

          SEARCH_BOTH_GTE

          +
          public static final Get SEARCH_BOTH_GTE
          +
          Searches using an exact match by key and a GTE match by data (or pKey). + +

          Returns, or moves the cursor to, the record with a key exactly + matching the given key parameter, and having a data value (or primary + key) that is greater than or equal to (GTE) the given data (or pKey) + parameter. The data is matched for Database and Cursor operations, while + the primary key is matched for SecondaryDatabase and SecondaryCursor + operations.

          + +

          If the database does not have duplicate keys, the data (or pKey) is + matched exactly and this operation is equivalent to SEARCH_BOTH.

          + +

          The operation does not succeed (null is returned) if no record + matches.

          +
        • +
        + + + +
          +
        • +

          CURRENT

          +
          public static final Get CURRENT
          +
          Accesses the current record. + +

          Accesses the record at the current cursor position. If the cursor is + uninitialized (not positioned on a record), IllegalStateException is thrown.

          + +

          The operation does not succeed (null is returned) if the record at + the current position has been deleted. This can occur in two cases: 1. + If the record was deleted using this cursor and then accessed. 2. If the + record was not locked by this cursor or transaction, and was deleted by + another thread or transaction after this cursor was positioned on + it.

          +
        • +
        + + + +
          +
        • +

          FIRST

          +
          public static final Get FIRST
          +
          Finds the first record in the database. + +

          Moves the cursor to the record in the database with the lowest valued + key.

          + +

          If the database has duplicate keys, the record with the lowest data + value (or the lowest primary key, for a secondary database) is selected + among the duplicates for the lowest key.

          + +

          The operation does not succeed (null is returned) if the database is + empty.

          +
        • +
        + + + +
          +
        • +

          LAST

          +
          public static final Get LAST
          +
          Finds the last record in the database. + +

          Moves the cursor to the record in the database with the highest + valued key.

          + +

          If the database has duplicate keys, the record with the highest data + value (or the highest primary key, for a secondary database) is selected + among the duplicates for the highest key.

          + +

          The operation does not succeed (null is returned) if the database is + empty.

          +
        • +
        + + + +
          +
        • +

          NEXT

          +
          public static final Get NEXT
          +
          Moves to the next record. + +

          Moves the cursor to the record following the record at the current + cursor position. If the cursor is uninitialized (not positioned on a + record), moves to the first record and this operation is equivalent to + FIRST.

          + +

          If the database does not have duplicate keys, the following record is + defined as the record with the next highest key. If the database does + have duplicate keys, the following record is defined as the record with + the same key and the next highest data value (or the next highest + primary key, for a secondary database) among the duplicates for that + key; or if there are no more records with the same key, the following + record is the record with the next highest key and the lowest data value + (or the lowest primary key, for a secondary database) among the + duplicates for that key.

          + +

          The operation does not succeed (null is returned) if the record at + the cursor position is the last record in the database.

          +
        • +
        + + + +
          +
        • +

          NEXT_DUP

          +
          public static final Get NEXT_DUP
          +
          Moves to the next record with the same key. + +

          Moves the cursor to the record following the record at the current + cursor position and having the same key. If the cursor is uninitialized + (not positioned on a record), IllegalStateException is + thrown.

          + +

          If the database has duplicate keys, moves to the record with the same + key and the next highest data value (or the next highest primary key, + for a secondary database) among the duplicates for that key.

          + +

          The operation does not succeed (null is returned) if there are no + following records with the same key. This is always the case when + database does not have duplicate keys.

          +
        • +
        + + + +
          +
        • +

          NEXT_NO_DUP

          +
          public static final Get NEXT_NO_DUP
          +
          Moves to the next record with a different key. + +

          Moves the cursor to the record following the record at the current + cursor position and having the next highest key. If the cursor is + uninitialized (not positioned on a record), moves to the first record + and this operation is equivalent to FIRST.

          + +

          If the database has duplicate keys, moves to the record with the next + highest key and the lowest data value (or the lowest primary key, for a + secondary database) among the duplicates for that key; this effectively + skips over records having the same key and a higher data value (or a + higher primary key, for a secondary database). If the database does not + have duplicate keys, this operation is equivalent to NEXT.

          + +

          The operation does not succeed (null is returned) if there are no + following records with a different key.

          +
        • +
        + + + +
          +
        • +

          PREV

          +
          public static final Get PREV
          +
          Moves to the previous record. + +

          Moves the cursor to the record preceding the record at the current + cursor position. If the cursor is uninitialized (not positioned on a + record), moves to the last record and this operation is equivalent to + LAST.

          + +

          If the database does not have duplicate keys, the preceding record is + defined as the record with the next lowest key. If the database does + have duplicate keys, the preceding record is defined as the record with + the same key and the next lowest data value (or the next lowest primary + key, for a secondary database) among the duplicates for that key; or if + there are no preceding records with the same key, the preceding record + is the record with the next lowest key and the highest data value (or + the highest primary key, for a secondary database) among the duplicates + for that key.

          + +

          The operation does not succeed (null is returned) if the record at + the cursor position is the first record in the database.

          +
        • +
        + + + +
          +
        • +

          PREV_DUP

          +
          public static final Get PREV_DUP
          +
          Moves to the previous record with the same key. + +

          Moves the cursor to the record preceding the record at the current + cursor position and having the same key. If the cursor is uninitialized + (not positioned on a record), IllegalStateException is + thrown.

          + +

          If the database has duplicate keys, moves to the record with the same + key and the next lowest data value (or the next lowest primary key, for + a secondary database) among the duplicates for that key.

          + +

          The operation does not succeed (null is returned) if there are no + preceding records with the same key. This is always the case when + database does not have duplicate keys.

          +
        • +
        + + + +
          +
        • +

          PREV_NO_DUP

          +
          public static final Get PREV_NO_DUP
          +
          Moves to the previous record with a different key. + +

          Moves the cursor to the record preceding the record at the current + cursor position and having the next lowest key. If the cursor is + uninitialized (not positioned on a record), moves to the last record + and this operation is equivalent to LAST.

          + +

          If the database has duplicate keys, moves to the record with the next + lowest key and the highest data value (or the highest primary key, for a + secondary database) among the duplicates for that key; this effectively + skips over records having the same key and a lower data value (or a + lower primary key, for a secondary database). If the database does not + have duplicate keys, this operation is equivalent to PREV.

          + +

          The operation does not succeed (null is returned) if there are no + preceding records with a different key.

          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Get[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Get c : Get.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Get valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/JEVersion.html b/docs/java/com/sleepycat/je/JEVersion.html new file mode 100644 index 0000000..d03fe9d --- /dev/null +++ b/docs/java/com/sleepycat/je/JEVersion.html @@ -0,0 +1,484 @@ + + + + + +JEVersion (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class JEVersion

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<JEVersion>
      +
      +
      +
      +
      public class JEVersion
      +extends java.lang.Object
      +implements java.lang.Comparable<JEVersion>, java.io.Serializable
      +
      Berkeley DB Java Edition version information. Versions consist of major, + minor and patch numbers. +

      + There is one JEVersion object per running JVM and it may be accessed using + the static field JEVersion.CURRENT_VERSION.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static JEVersionCURRENT_VERSION +
        Release version.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        JEVersion(java.lang.String version) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intcompareTo(JEVersion comparedVersion) 
        booleanequals(java.lang.Object o) 
        intgetMajor() +
        Major number of the release version.
        +
        intgetMinor() +
        Minor number of the release version.
        +
        java.lang.StringgetNumericVersionString() +
        The numeric version string, without the patch tag.
        +
        intgetPatch() +
        Patch number of the release version.
        +
        java.lang.StringgetVersionString() +
        Release version, suitable for display.
        +
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          CURRENT_VERSION

          +
          public static final JEVersion CURRENT_VERSION
          +
          Release version.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEVersion

          +
          public JEVersion(java.lang.String version)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          getMajor

          +
          public int getMajor()
          +
          Major number of the release version.
          +
          +
          Returns:
          +
          The major number of the release version.
          +
          +
        • +
        + + + +
          +
        • +

          getMinor

          +
          public int getMinor()
          +
          Minor number of the release version.
          +
          +
          Returns:
          +
          The minor number of the release version.
          +
          +
        • +
        + + + +
          +
        • +

          getPatch

          +
          public int getPatch()
          +
          Patch number of the release version.
          +
          +
          Returns:
          +
          The patch number of the release version.
          +
          +
        • +
        + + + +
          +
        • +

          getNumericVersionString

          +
          public java.lang.String getNumericVersionString()
          +
          The numeric version string, without the patch tag.
          +
          +
          Returns:
          +
          The release version
          +
          +
        • +
        + + + +
          +
        • +

          getVersionString

          +
          public java.lang.String getVersionString()
          +
          Release version, suitable for display.
          +
          +
          Returns:
          +
          The release version, suitable for display.
          +
          +
        • +
        + + + +
          +
        • +

          compareTo

          +
          public int compareTo(JEVersion comparedVersion)
          +
          +
          Specified by:
          +
          compareTo in interface java.lang.Comparable<JEVersion>
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object o)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/JoinConfig.html b/docs/java/com/sleepycat/je/JoinConfig.html new file mode 100644 index 0000000..306d718 --- /dev/null +++ b/docs/java/com/sleepycat/je/JoinConfig.html @@ -0,0 +1,436 @@ + + + + + +JoinConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class JoinConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class JoinConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      The configuration properties of a JoinCursor. The join cursor + configuration is specified when calling Database.join. + +

      To create a configuration object with default attributes:

      + +
      +     JoinConfig config = new JoinConfig();
      + 
      + +

      To set custom attributes:

      + +
      +     JoinConfig config = new JoinConfig();
      +     config.setNoSort(true);
      + 
      +
      +
      See Also:
      +
      Database.join, +JoinCursor
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static JoinConfigDEFAULT +
        Default configuration used if null is passed to Database.join.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        JoinConfig() +
        Creates an instance with the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        JoinConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetNoSort() +
        Returns whether automatic sorting of the input cursors is disabled.
        +
        JoinConfigsetNoSort(boolean noSort) +
        Specifies whether automatic sorting of the input cursors is disabled.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JoinConfig

          +
          public JoinConfig()
          +
          Creates an instance with the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setNoSort

          +
          public JoinConfig setNoSort(boolean noSort)
          +
          Specifies whether automatic sorting of the input cursors is disabled. + +

          Joined values are retrieved by doing a sequential iteration over the + first cursor in the cursor array, and a nested iteration over each + following cursor in the order they are specified in the array. This + requires database traversals to search for the current datum in all the + cursors after the first. For this reason, the best join performance + normally results from sorting the cursors from the one that refers to + the least number of data items to the one that refers to the + most. Unless this method is called with true, Database.join + does this sort on behalf of its caller using the Cursor.countEstimate() method.

          + +

          If the data are structured so that cursors with many data items also + share many common elements, higher performance will result from listing + those cursors before cursors with fewer data items; that is, a sort + order other than the default. Calling this method permits applications + to perform join optimization prior to calling + Database.join.

          +
          +
          Parameters:
          +
          noSort - whether automatic sorting of the input cursors is + disabled.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          Database.join
          +
          +
        • +
        + + + +
          +
        • +

          getNoSort

          +
          public boolean getNoSort()
          +
          Returns whether automatic sorting of the input cursors is disabled.
          +
          +
          Returns:
          +
          whether automatic sorting of the input cursors is disabled.
          +
          See Also:
          +
          setNoSort(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public JoinConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/JoinCursor.html b/docs/java/com/sleepycat/je/JoinCursor.html new file mode 100644 index 0000000..a6f9008 --- /dev/null +++ b/docs/java/com/sleepycat/je/JoinCursor.html @@ -0,0 +1,558 @@ + + + + + +JoinCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class JoinCursor

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ForwardCursor, java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class JoinCursor
      +extends java.lang.Object
      +implements ForwardCursor, java.io.Closeable
      +
      A specialized join cursor for use in performing equality or natural joins on + secondary indices. + +

      A join cursor is returned when calling Database.join.

      + +

      To open a join cursor using two secondary cursors:

      + +
      +     Transaction txn = ...
      +     Database primaryDb = ...
      +     SecondaryDatabase secondaryDb1 = ...
      +     SecondaryDatabase secondaryDb2 = ...
      +     

      + SecondaryCursor cursor1 = null; + SecondaryCursor cursor2 = null; + JoinCursor joinCursor = null; + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); +

      + cursor1 = secondaryDb1.openSecondaryCursor(txn, null); + cursor2 = secondaryDb2.openSecondaryCursor(txn, null); +

      + key.setData(...); // initialize key for secondary index 1 + OperationStatus status1 = + cursor1.getSearchKey(key, data, LockMode.DEFAULT); + key.setData(...); // initialize key for secondary index 2 + OperationStatus status2 = + cursor2.getSearchKey(key, data, LockMode.DEFAULT); +

      + if (status1 == OperationStatus.SUCCESS && + status2 == OperationStatus.SUCCESS) { +

      + SecondaryCursor[] cursors = {cursor1, cursor2}; + joinCursor = primaryDb.join(cursors, null); +

      + while (true) { + OperationStatus joinStatus = joinCursor.getNext(key, data, + LockMode.DEFAULT); + if (joinStatus == OperationStatus.SUCCESS) { + // Do something with the key and data. + } else { + break; + } + } + } + } finally { + if (cursor1 != null) { + cursor1.close(); + } + if (cursor2 != null) { + cursor2.close(); + } + if (joinCursor != null) { + joinCursor.close(); + } + } +

      + +

      The join algorithm is described here so that its cost can be estimated and + compared to other approaches for performing a query. Say that N cursors are + provided for the join operation. According to the order they appear in the + array the cursors are labeled C(1) through C(n), and the keys at each cursor + position are labeled K(1) through K(n).

      + +
        + +
      1. Using C(1), the join algorithm iterates sequentially through all records + having K(1). This iteration is equivalent to a Cursor.getNextDup operation on the secondary index. The primary key of a + candidate record is determined in this manner. The primary record itself is + not retrieved and the primary database is not accessed.
      2. + +
      3. For each candidate primary key found in step 1, a Btree lookup is + performed using C(2) through C(n), in that order. The Btree lookups are + exact searches to determine whether the candidate record also contains + secondary keys K(2) through K(n). The lookups are equivalent to a Cursor.getSearchBoth operation on the secondary index. + The primary record itself is not retrieved and the primary database is not + accessed.
      4. + +
      5. If any lookup in step 2 fails, the algorithm advances to the next + candidate record using C(1). Lookups are performed in the order of the + cursor array, and the algorithm proceeds to the next C(1) candidate key as + soon as a single lookup fails.
      6. + +
      7. If all lookups in step 2 succeed, then the matching key and/or data is + returned by the getNext method. If the getNext(DatabaseEntry,DatabaseEntry,LockMode) method signature is used, + then the primary database is read to obtain the record data, as if Cursor.getSearchKey were called for the primary + database. If the getNext(DatabaseEntry,LockMode) method signature + is used, then only the primary key is returned and the primary database is + not accessed.
      8. + +
      9. The algorithm ends when C(1) has no more candidate records with K(1), + and the getNext method will then return OperationStatus.NOTFOUND.
      10. + +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          Closes the cursors that have been opened by this join cursor. + +

          The cursors passed to Database.join are not + closed by this method, and should be closed by the caller.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface ForwardCursor
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getDatabase

          +
          public Database getDatabase()
          +
          Returns the primary database handle associated with this cursor.
          +
          +
          Specified by:
          +
          getDatabase in interface ForwardCursor
          +
          Returns:
          +
          the primary database handle associated with this cursor.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public JoinConfig getConfig()
          +
          Returns this object's configuration.
          +
          +
          Returns:
          +
          this object's configuration.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Returns the next primary key and data resulting from the join operation.
          +
          +
          Specified by:
          +
          get in interface ForwardCursor
          +
          Parameters:
          +
          getType - is Get.NEXT.
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getNext

          +
          public OperationStatus getNext(DatabaseEntry key,
          +                               LockMode lockMode)
          +
          Returns the next primary key resulting from the join operation. + +

          An entry is returned by the join cursor for each primary key/data + pair having all secondary key values that were specified using the array + of secondary cursors passed to Database.join.

          +
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockConflictException.html b/docs/java/com/sleepycat/je/LockConflictException.html new file mode 100644 index 0000000..81f99d8 --- /dev/null +++ b/docs/java/com/sleepycat/je/LockConflictException.html @@ -0,0 +1,420 @@ + + + + + +LockConflictException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LockConflictException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      DeadlockException, LockNotAvailableException, LockPreemptedException, LockTimeoutException, TransactionTimeoutException
      +
      +
      +
      +
      public abstract class LockConflictException
      +extends OperationFailureException
      +
      The common base class for all exceptions that result from record lock + conflicts during read and write operations. + +

      This exception normally indicates that a transaction may be retried. + Catching this exception, rather than its subclasses, is convenient and + recommended for handling lock conflicts and performing transaction retries + in a general purpose manner. See below for information on performing + transaction retries.

      + +

      The exception carries two arrays of transaction ids, one of the owners and + the other of the waiters, at the time of the lock conflict. This + information may be used along with the Transaction + ID for diagnosing locking problems. See getOwnerTxnIds() and getWaiterTxnIds().

      + +

      The Transaction handle is invalidated as a result of this + exception.

      + +

      Performing Transaction Retries

      + +

      If a lock conflict occurs during a transaction, the transaction may be + retried by performing the following steps. Some applications may also wish + to sleep for a short interval before retrying, to give other concurrent + transactions a chance to finish and release their locks.

      +
        +
      1. Close all cursors opened under the transaction.
      2. +
      3. Abort the transaction.
      4. +
      5. Begin a new transaction and repeat the operations.
      6. +
      + +

      To handle LockConflictException reliably for all types of JE + applications including JE-HA applications, it is important to handle it when + it is thrown by all Database and Cursor read and write + operations.

      + +

      The following example code illustrates the recommended approach. Note + that the Environment.beginTransaction and Transaction.commit + calls are intentially inside the try block. When using JE-HA, this + will make it easy to add a catch for other exceptions that can be + resolved by retrying the transaction, such as consistency exceptions.

      + +
      +  void doTransaction(final Environment env,
      +                     final Database db1,
      +                     final Database db2,
      +                     final int maxTries)
      +      throws DatabaseException {
      +
      +      boolean success = false;
      +      long sleepMillis = 0;
      +      for (int i = 0; i < maxTries; i++) {
      +          // Sleep before retrying.
      +          if (sleepMillis != 0) {
      +              Thread.sleep(sleepMillis);
      +              sleepMillis = 0;
      +          }
      +          Transaction txn = null;
      +          try {
      +              txn = env.beginTransaction(null, null);
      +              final Cursor cursor1 = db1.openCursor(txn, null);
      +              try {
      +                  final Cursor cursor2 = db2.openCursor(txn, null);
      +                  try {
      +                      // INSERT APP-SPECIFIC CODE HERE:
      +                      // Perform read and write operations.
      +                  } finally {
      +                      cursor2.close();
      +                  }
      +              } finally {
      +                  cursor1.close();
      +              }
      +              txn.commit();
      +              success = true;
      +              return;
      +          } catch (LockConflictException e) {
      +              sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000;
      +              continue;
      +          } finally {
      +              if (!success) {
      +                  if (txn != null) {
      +                      txn.abort();
      +                  }
      +              }
      +          }
      +      }
      +      // INSERT APP-SPECIFIC CODE HERE:
      +      // Transaction failed, despite retries.
      +      // Take some app-specific course of action.
      +  }
      + +

      For more information on transactions and lock conflicts, see Writing Transactional Applications.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        long[]getOwnerTxnIds() +
        Returns an array of longs containing transaction ids of owners at the + the time of the timeout.
        +
        long[]getWaiterTxnIds() +
        Returns an array of longs containing transaction ids of waiters at the + the time of the timeout.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getOwnerTxnIds

          +
          public long[] getOwnerTxnIds()
          +
          Returns an array of longs containing transaction ids of owners at the + the time of the timeout.
          +
          +
          Returns:
          +
          an array of longs containing transaction ids of owners at the + the time of the timeout.
          +
          +
        • +
        + + + +
          +
        • +

          getWaiterTxnIds

          +
          public long[] getWaiterTxnIds()
          +
          Returns an array of longs containing transaction ids of waiters at the + the time of the timeout.
          +
          +
          Returns:
          +
          an array of longs containing transaction ids of waiters at the + the time of the timeout.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockMode.html b/docs/java/com/sleepycat/je/LockMode.html new file mode 100644 index 0000000..1f7d4dc --- /dev/null +++ b/docs/java/com/sleepycat/je/LockMode.html @@ -0,0 +1,704 @@ + + + + + +LockMode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum LockMode

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<LockMode>
      +
      +
      +
      +
      public enum LockMode
      +extends java.lang.Enum<LockMode>
      +
      Record lock modes for read operations. Lock mode parameters may be specified + for all operations that retrieve data. + +

      Locking Rules

      + +

      Together with CursorConfig, TransactionConfig and EnvironmentConfig settings, lock mode parameters determine how records are + locked during read operations. Record locking is used to enforce the + isolation modes that are configured. Record locking is summarized below for + read and write operations. For more information on isolation levels and + transactions, see Writing Transactional Applications.

      + +

      With one exception, a record lock is always acquired when a record is + read or written, and a cursor will always hold the lock as long as it is + positioned on the record. The exception is when READ_UNCOMMITTED + is specified, which allows a record to be read without any locking.

      + +

      Both read (shared) and write (exclusive) locks are used. Read locks are + normally acquired on read (get method) operations and write locks on + write (put method) operations. The only exception is that a write + lock will be acquired on a read operation if RMW is specified.

      + +

      Because read locks are shared, multiple accessors may read the same + record. Because write locks are exclusive, if a record is written by one + accessor it may not be read or written by another accessor. An accessor is + either a transaction or a thread (for non-transactional operations).

      + +

      Whether additional locking is performed and how locks are released depend + on whether the operation is transactional and other configuration + settings.

      + +

      Transactional Locking

      + +

      Transactional operations include all write operations for a transactional + database, and read operations when a non-null Transaction parameter + is passed. When a null transaction parameter is passed for a write + operation for a transactional database, an auto-commit transaction is + automatically used.

      + +

      With transactions, read and write locks are normally held until the end + of the transaction (commit or abort). Write locks are always held until the + end of the transaction. However, if READ_COMMITTED is configured, + then read locks for cursor operations are only held during the operation and + while the cursor is positioned on the record. The read lock is released + when the cursor is moved to a different record or closed. When READ_COMMITTED is used for a database (non-cursor) operation, the read + lock is released before the method returns.

      + +

      When neither READ_UNCOMMITTED nor READ_COMMITTED is + specified, read and write locking as described above provide Repeatable Read + isolation, which is the default transactional isolation level. If + Serializable isolation is configured, additional "next key" locking is + performed to prevent "phantoms" -- records that are not visible at one point + in a transaction but that become visible at a later point after being + inserted by another transaction. Serializable isolation is configured via + TransactionConfig.setSerializableIsolation(boolean) or EnvironmentConfig.setTxnSerializableIsolation(boolean).

      + +

      Non-Transactional Locking

      + +

      Non-transactional operations include all operations for a + non-transactional database (including a Deferred Write database), and read + operations for a transactional database when a null Transaction + parameter is passed.

      + +

      For non-transactional operations, both read and write locks are only held + while a cursor is positioned on the record, and are released when the cursor + is moved to a different record or closed. For database (non-cursor) + operations, the read or write lock is released before the method + returns.

      + +

      This behavior is similar to READ_COMMITTED, except that both + read and write locks are released. Configuring READ_COMMITTED for + a non-transactional database cursor has no effect.

      + +

      Because the current thread is the accessor (locker) for non-transactional + operations, a single thread may have multiple cursors open without locking + conflicts. Two non-transactional cursors in the same thread may access the + same record via write or read operations without conflicts, and the changes + make by one cursor will be visible to the other cursor.

      + +

      However, a non-transactional operation will conflict with a transactional + operation for the same record even when performed in the same thread. When + using a transaction in a particular thread for a particular database, to + avoid conflicts you should use that transaction for all access to that + database in that thread. In other words, to avoid conflicts always pass the + transaction parameter, not null, for all operations. If you don't wish to + hold the read lock for the duration of the transaction, specify READ_COMMITTED.

      + +

      Read Uncommitted (Dirty-Read)

      + +

      When READ_UNCOMMITTED is configured, no locking is performed + by a read operation. READ_UNCOMMITTED does not apply to write + operations.

      + +

      READ_UNCOMMITTED is sometimes called dirty-read because records + are visible to the caller in their current state in the Btree at the time of + the read, even when that state is due to operations performed using a + transaction that has not yet committed. In addition, because no lock is + acquired by the dirty read operation, the record's state may change at any + time, even while a cursor used to do the dirty-read is still positioned on + the record.

      + +

      To illustrate this, let's say a record is read with dirty-read + (READ_UNCOMMITTED) by calling Cursor.getNext + with a cursor C, and changes to the record are also being made in another + thread using transaction T. When a locking (non-dirty-read) call to Cursor.getCurrent is subsequently made to read the same + record again with C at the current position, a result may be returned that + is different than the result returned by the earlier call to getNext. For example: +

        +
      • If the record is updated by T after the dirty-read getNext + call, and T is committed, a subsequent call to getCurrent will + return the data updated by T.
      • + +
      • If the record is updated by T before the dirty-read getNext + call, the getNext will returned the data updated by T. But if + call, the getNext will return the data updated by T. But if + T is then aborted, a subsequent call to getCurrent will return + the version of the data before it was updated by T.
      • + +
      • If the record was inserted by T before the dirty-read getNext call, the getNext call will return the inserted record. + But if T is aborted, a subsequent call to getCurrent will return + OperationStatus.KEYEMPTY.
      • + +
      • If the record is deleted by T after the dirty-read getNext + call, and T is committed, a subsequent call to getCurrent will + return OperationStatus.KEYEMPTY.
      • +
      +

      + +

      Note that deleted records are handled specially in JE. Deleted records + remain in the Btree until after the deleting transaction is committed, and + they are removed from the Btree asynchronously (not immediately at commit + time). When using #READ_UNCOMMITTED, any record encountered in the + Btree that was previously deleted, whether or not the deleting transaction + has been committed, will be ignored (skipped over) by the read operation. + Of course, if the deleting transaction is aborted, the record will no longer + be deleted. If the application is scanning records, for example, this means + that such records may be skipped by the scan. If this behavior is not + desirable, READ_UNCOMMITTED_ALL may be used instead. This mode + ensures that records deleted by a transaction that is later aborted will not + be skipped by a read operation. This is accomplished in two different ways + depending on the type of database and whether the record's data is requested + by the operation. +

        +
      1. If the DB is configured for duplicates or the record's data + is not requested, then a record that has been deleted by an open + transaction is returned by the read operation.
      2. + +
      3. If the DB is not configured for duplicates and the record's data is + requested, then the read operation must wait for the deleting + transaction to close (commit or abort). After the transaction is + closed, the record will be returned if it is actually not deleted and + otherwise will be skipped.
      4. +
      + +

      By "record data" we mean both the data parameter for a regular or + primary DB, and the pKey parameter for a secondary DB. By "record + data requested" we mean that all or part of the DatabaseEntry will + be returned by the read operation. Unless explicitly not + requested, the complete DatabaseEntry is returned. See + Using Partial DatabaseEntry + Parameters for more information.

      + +

      Because of this difference in behavior, although #READ_UNCOMMITTED is fully non-blocking, #READ_UNCOMMITTED_ALL is + not (under the conditions described). As a result, when using #READ_UNCOMMITTED_ALL under these conditions, a LockConflictException will be thrown when blocking results in a deadlock or + lock timeout.

      + +

      To summarize, callers that use READ_UNCOMMITTED or #READ_UNCOMMITTED_ALL should be prepared for the following behaviors. +

        +
      • After a successful dirty-read operation, because no lock is acquired + the record can be changed by another transaction, even when the cursor + used to perform the dirty-read operation is still positioned on the + record.
      • + +
      • After a successful dirty-read operation using a cursor C, say that + another transaction T deletes the record, and T is committed. In this + case, OperationStatus.KEYEMPTY will be returned by the following + methods if they are called while C is still positioned on the deleted + record: Cursor.getCurrent, Cursor.putCurrent and Cursor.delete.
      • + +
      • When using READ_UNCOMMITTED, deleted records will be skipped + even when the deleting transaction is still open. No blocking will occur + and LockConflictException is never thrown when using this + mode.
      • + +
      • When using #READ_UNCOMMITTED_ALL, deleted records will not + be skipped even when the deleting transaction is open. If the DB is a + duplicates DB or the record's data is not requested, the deleted record + will be returned. If the DB is not a duplicates DB and the record's + data is requested, blocking will occur until the deleting transaction is + closed. In the latter case, LockConflictException will be thrown + when this blocking results in a deadlock or a lock timeout.
      • +
      +

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        DEFAULT +
        Uses the default lock mode and is equivalent to passing null for + the lock mode parameter.
        +
        READ_COMMITTED +
        Read committed isolation provides for cursor stability but not + repeatable reads.
        +
        READ_UNCOMMITTED +
        Reads modified but not yet committed data.
        +
        READ_UNCOMMITTED_ALL +
        Reads modified but not yet committed data, ensuring that records are not + skipped due to transaction aborts.
        +
        RMW +
        Acquire write locks instead of read locks when doing the retrieval.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        ReadOptionstoReadOptions() +
        Returns a ReadOptions with this LockMode property, and default values + for all other properties.
        +
        java.lang.StringtoString() 
        static LockModevalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static LockMode[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + + + + + +
          +
        • +

          READ_UNCOMMITTED

          +
          public static final LockMode READ_UNCOMMITTED
          +
          Reads modified but not yet committed data. + +

          The Read Uncommitted mode is used if this lock mode is explicitly + passed for the lock mode parameter, or if null or DEFAULT is + passed and Read Uncommitted is the default -- see DEFAULT for + details.

          + +

          Unlike READ_UNCOMMITTED_ALL, deleted records will be skipped + even when the deleting transaction is still open. No blocking will occur + and LockConflictException is never thrown when using this + mode.

          + +

          See the locking rules for information on how Read + Uncommitted impacts transactional and non-transactional locking.

          +
        • +
        + + + +
          +
        • +

          READ_UNCOMMITTED_ALL

          +
          public static final LockMode READ_UNCOMMITTED_ALL
          +
          Reads modified but not yet committed data, ensuring that records are not + skipped due to transaction aborts. + +

          The Read Uncommitted mode is used only when this lock mode is + explicitly passed for the lock mode parameter.

          + +

          Unlike READ_UNCOMMITTED, deleted records will not be skipped + even when the deleting transaction is open. If the DB is a duplicates DB + or the record's data is not requested, the deleted record will be + returned. If the DB is not a duplicates DB and the record's data is + requested, blocking will occur until the deleting transaction is closed. + In the latter case, LockConflictException will be thrown when + this blocking results in a deadlock or a lock timeout.

          + +

          See the locking rules for information on how Read + Uncommitted impacts transactional and non-transactional locking.

          +
        • +
        + + + +
          +
        • +

          READ_COMMITTED

          +
          public static final LockMode READ_COMMITTED
          +
          Read committed isolation provides for cursor stability but not + repeatable reads. Data items which have been previously read by this + transaction may be deleted or modified by other transactions before the + cursor is closed or the transaction completes. + +

          Note that this LockMode may only be passed to Database get + methods, not to Cursor methods. To configure a cursor for Read + Committed isolation, use CursorConfig.setReadCommitted(boolean).

          + +

          See the locking rules for information on how Read + Committed impacts transactional and non-transactional locking.

          +
          +
          See Also:
          +
          Cache + Statistics: Unexpected Sizes
          +
          +
        • +
        + + + +
          +
        • +

          RMW

          +
          public static final LockMode RMW
          +
          Acquire write locks instead of read locks when doing the retrieval. + +

          Because it causes a write lock to be acquired, specifying this lock + mode as a Cursor or Database get (read) method + parameter will override the Read Committed or Read Uncommitted isolation + mode that is configured using CursorConfig or TransactionConfig. The write lock will acquired and held until the end + of the transaction. For non-transactional use, the write lock will be + released when the cursor is moved to a new position or closed.

          + +

          Setting this flag can eliminate deadlock during a read-modify-write + cycle by acquiring the write lock during the read part of the cycle so + that another thread of control acquiring a read lock for the same item, + in its own read-modify-write cycle, will not result in deadlock.

          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static LockMode[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (LockMode c : LockMode.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static LockMode valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          toReadOptions

          +
          public ReadOptions toReadOptions()
          +
          Returns a ReadOptions with this LockMode property, and default values + for all other properties. + +

          WARNING: Do not modify the returned object, since it is a singleton.

          +
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Enum<LockMode>
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockNotAvailableException.html b/docs/java/com/sleepycat/je/LockNotAvailableException.html new file mode 100644 index 0000000..faddcd4 --- /dev/null +++ b/docs/java/com/sleepycat/je/LockNotAvailableException.html @@ -0,0 +1,273 @@ + + + + + +LockNotAvailableException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LockNotAvailableException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LockNotAvailableException
      +extends LockConflictException
      +
      Thrown when a non-blocking operation fails to get a lock. Non-blocking + transactions are configured using TransactionConfig.setNoWait(boolean). + +

      The Transaction handle is not invalidated as a result of + this exception.

      + +

      Normally, applications should catch the base class LockConflictException rather than catching one of its subclasses. All lock + conflicts are typically handled in the same way, which is normally to abort + and retry the transaction. See LockConflictException for more + information.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockNotGrantedException.html b/docs/java/com/sleepycat/je/LockNotGrantedException.html new file mode 100644 index 0000000..c316ecf --- /dev/null +++ b/docs/java/com/sleepycat/je/LockNotGrantedException.html @@ -0,0 +1,284 @@ + + + + + +LockNotGrantedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LockNotGrantedException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockStats.html b/docs/java/com/sleepycat/je/LockStats.html new file mode 100644 index 0000000..dc33c8e --- /dev/null +++ b/docs/java/com/sleepycat/je/LockStats.html @@ -0,0 +1,534 @@ + + + + + +LockStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LockStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Deprecated.  +
      as of 4.0.10, replaced by Environment.getStats(StatsConfig).

      +
      +
      +
      public class LockStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Lock statistics for a database environment. + +

      Note that some of the lock statistics may be expensive to obtain because + the lock table is unavailable to other operations while the statistics are + gathered. These expensive statistics are only provided if Environment.getLockStats is + called with a StatsConfig parameter that has been configured for "slow" + stats.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        intgetNAcquiresNoWaiters() +
        Deprecated. 
        +
        Number of acquires of lock table latch with no contention.
        +
        intgetNAcquiresNoWaitSuccessful() +
        Deprecated. 
        +
        Number of successful no-wait acquires of the lock table latch.
        +
        intgetNAcquiresNoWaitUnSuccessful() +
        Deprecated. 
        +
        Number of unsuccessful no-wait acquires of the lock table latch.
        +
        intgetNAcquiresSelfOwned() +
        Deprecated. 
        +
        Number of acquires of lock table latch when it was already owned + by the caller.
        +
        intgetNAcquiresWithContention() +
        Deprecated. 
        +
        Number of acquires of lock table latch when it was already owned by + another thread.
        +
        intgetNOwners() +
        Deprecated. 
        +
        Total lock owners in lock table.
        +
        intgetNReadLocks() +
        Deprecated. 
        +
        Total read locks currently held.
        +
        intgetNReleases() +
        Deprecated. 
        +
        Number of releases of the lock table latch.
        +
        longgetNRequests() +
        Deprecated. 
        +
        Total number of lock requests to date.
        +
        intgetNTotalLocks() +
        Deprecated. 
        +
        Total locks currently in lock table.
        +
        intgetNWaiters() +
        Deprecated. 
        +
        Total transactions waiting for locks.
        +
        longgetNWaits() +
        Deprecated. 
        +
        Total number of lock waits to date.
        +
        intgetNWriteLocks() +
        Deprecated. 
        +
        Total write locks currently held.
        +
        java.lang.StringtoString() +
        Deprecated. 
        +
        For convenience, LockTable.toString will display all stats in + an easily readable format.
        +
        java.lang.StringtoStringVerbose() +
        Deprecated. 
        +
        Like #toString, display all stats.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNOwners

          +
          public int getNOwners()
          +
          Deprecated. 
          +
          Total lock owners in lock table. Only provided when Environment.getLockStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNReadLocks

          +
          public int getNReadLocks()
          +
          Deprecated. 
          +
          Total read locks currently held. Only provided when Environment.getLockStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNTotalLocks

          +
          public int getNTotalLocks()
          +
          Deprecated. 
          +
          Total locks currently in lock table. Only provided when Environment.getLockStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNWaiters

          +
          public int getNWaiters()
          +
          Deprecated. 
          +
          Total transactions waiting for locks. Only provided when Environment.getLockStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNWriteLocks

          +
          public int getNWriteLocks()
          +
          Deprecated. 
          +
          Total write locks currently held. Only provided when Environment.getLockStats is + called in "slow" mode.
          +
        • +
        + + + +
          +
        • +

          getNRequests

          +
          public long getNRequests()
          +
          Deprecated. 
          +
          Total number of lock requests to date.
          +
        • +
        + + + +
          +
        • +

          getNWaits

          +
          public long getNWaits()
          +
          Deprecated. 
          +
          Total number of lock waits to date.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaiters

          +
          public int getNAcquiresNoWaiters()
          +
          Deprecated. 
          +
          Number of acquires of lock table latch with no contention.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresSelfOwned

          +
          public int getNAcquiresSelfOwned()
          +
          Deprecated. 
          +
          Number of acquires of lock table latch when it was already owned + by the caller.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresWithContention

          +
          public int getNAcquiresWithContention()
          +
          Deprecated. 
          +
          Number of acquires of lock table latch when it was already owned by + another thread.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaitSuccessful

          +
          public int getNAcquiresNoWaitSuccessful()
          +
          Deprecated. 
          +
          Number of successful no-wait acquires of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          getNAcquiresNoWaitUnSuccessful

          +
          public int getNAcquiresNoWaitUnSuccessful()
          +
          Deprecated. 
          +
          Number of unsuccessful no-wait acquires of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          getNReleases

          +
          public int getNReleases()
          +
          Deprecated. 
          +
          Number of releases of the lock table latch.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Deprecated. 
          +
          For convenience, LockTable.toString will display all stats in + an easily readable format.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toStringVerbose

          +
          public java.lang.String toStringVerbose()
          +
          Deprecated. 
          +
          Like #toString, display all stats. Includes a description of each + stat.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LockTimeoutException.html b/docs/java/com/sleepycat/je/LockTimeoutException.html new file mode 100644 index 0000000..348229a --- /dev/null +++ b/docs/java/com/sleepycat/je/LockTimeoutException.html @@ -0,0 +1,285 @@ + + + + + +LockTimeoutException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LockTimeoutException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LockTimeoutException
      +extends LockConflictException
      +
      Thrown when multiple threads are competing for a lock and the lock timeout + interval is exceeded for the current operation. This is normally because + another transaction or cursor holds a lock for longer than the timeout + interval. It may also occur if the application fails to close a cursor, or + fails to commit or abort a transaction, since any locks held by the cursor + or transaction will be held indefinitely. + +

      This exception is not thrown if a deadlock is detected, even if the + timeout elapses before the deadlock is broken. If a deadlock is detected, + DeadlockException is always thrown instead.

      + +

      The lock timeout interval may be set using + EnvironmentConfig.setLockTimeout(long, java.util.concurrent.TimeUnit) or + Transaction.setLockTimeout(long, java.util.concurrent.TimeUnit).

      + +

      The Transaction handle is invalidated as a result of this + exception.

      + +

      Normally, applications should catch the base class LockConflictException rather than catching one of its subclasses. All lock + conflicts are typically handled in the same way, which is normally to abort + and retry the transaction. See LockConflictException for more + information.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/LogWriteException.html b/docs/java/com/sleepycat/je/LogWriteException.html new file mode 100644 index 0000000..a265a81 --- /dev/null +++ b/docs/java/com/sleepycat/je/LogWriteException.html @@ -0,0 +1,266 @@ + + + + + +LogWriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class LogWriteException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LogWriteException
      +extends EnvironmentFailureException
      +
      Thrown when an IOException or other failure occurs when writing to + the JE log. This exception may be indicative of a full disk, although an + IOException does not contain enough information to determine this + definitively. + +

      This exception may be thrown as the result of any write operation, + including record writes, checkpoints, etc.

      + +

      Existing Environment handles are invalidated as a result of this + exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/OperationFailureException.html b/docs/java/com/sleepycat/je/OperationFailureException.html new file mode 100644 index 0000000..016ce48 --- /dev/null +++ b/docs/java/com/sleepycat/je/OperationFailureException.html @@ -0,0 +1,404 @@ + + + + + +OperationFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class OperationFailureException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/OperationResult.html b/docs/java/com/sleepycat/je/OperationResult.html new file mode 100644 index 0000000..fa7ac12 --- /dev/null +++ b/docs/java/com/sleepycat/je/OperationResult.html @@ -0,0 +1,299 @@ + + + + + +OperationResult (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class OperationResult

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class OperationResult
      +extends java.lang.Object
      +
      The result of an operation that successfully reads or writes a record. +

      + An OperationResult does not contain any failure information. Methods that + perform unsuccessful reads or writes return null or throw an exception. Null + is returned if the operation failed for commonly expected reasons, such as a + read that fails because the key does not exist, or an insertion that fails + because the key does exist. +

      + Methods that return OperationResult can be compared to methods that return + OperationStatus as follows: If OperationStatus.SUCCESS is + returned by the latter methods, this is equivalent to returning a non-null + OperationResult by the former methods.

      +
      +
      Since:
      +
      7.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longgetExpirationTime() +
        Returns the expiration time of the record, in milliseconds, or zero + if the record has no TTL and does not expire.
        +
        booleanisUpdate() +
        Returns whether the operation was an update, for distinguishing inserts + and updates performed by a Put.OVERWRITE operation.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isUpdate

          +
          public boolean isUpdate()
          +
          Returns whether the operation was an update, for distinguishing inserts + and updates performed by a Put.OVERWRITE operation.
          +
          +
          Returns:
          +
          whether an existing record was updated by this operation.
          +
          +
        • +
        + + + +
          +
        • +

          getExpirationTime

          +
          public long getExpirationTime()
          +
          Returns the expiration time of the record, in milliseconds, or zero + if the record has no TTL and does not expire. +

          + For 'get' operations, this is the expiration time of the current record. + For 'put operations, this is the expiration time of the newly written + record. For 'delete' operation, this is the expiration time of the + record that was deleted. +

          + The return value will always be evenly divisible by the number of + milliseconds in one hour. If TimeUnit.Days was specified + when the record was written, the return value will also be evenly + divisible by the number of milliseconds in one day.

          +
          +
          Returns:
          +
          the expiration time in milliseconds, or zero.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/OperationStatus.html b/docs/java/com/sleepycat/je/OperationStatus.html new file mode 100644 index 0000000..323e0ad --- /dev/null +++ b/docs/java/com/sleepycat/je/OperationStatus.html @@ -0,0 +1,406 @@ + + + + + +OperationStatus (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum OperationStatus

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<OperationStatus>
      +
      +
      +
      +
      public enum OperationStatus
      +extends java.lang.Enum<OperationStatus>
      +
      Status values from database operations.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        KEYEMPTY +
        The cursor operation was unsuccessful because the current record was + deleted.
        +
        KEYEXIST +
        The operation to insert data was configured to not allow overwrite and + the key already exists in the database.
        +
        NOTFOUND +
        The requested key/data pair was not found.
        +
        SUCCESS +
        The operation was successful.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringtoString()
        static OperationStatusvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static OperationStatus[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          SUCCESS

          +
          public static final OperationStatus SUCCESS
          +
          The operation was successful.
          +
        • +
        + + + +
          +
        • +

          KEYEXIST

          +
          public static final OperationStatus KEYEXIST
          +
          The operation to insert data was configured to not allow overwrite and + the key already exists in the database.
          +
        • +
        + + + +
          +
        • +

          KEYEMPTY

          +
          public static final OperationStatus KEYEMPTY
          +
          The cursor operation was unsuccessful because the current record was + deleted. This can only occur if a Cursor is positioned to an existing + record, then the record is deleted, and then the getCurrent, putCurrent, + or delete methods is called.
          +
        • +
        + + + +
          +
        • +

          NOTFOUND

          +
          public static final OperationStatus NOTFOUND
          +
          The requested key/data pair was not found.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static OperationStatus[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (OperationStatus c : OperationStatus.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static OperationStatus valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Enum<OperationStatus>
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/PartialComparator.html b/docs/java/com/sleepycat/je/PartialComparator.html new file mode 100644 index 0000000..70af63a --- /dev/null +++ b/docs/java/com/sleepycat/je/PartialComparator.html @@ -0,0 +1,200 @@ + + + + + +PartialComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface PartialComparator

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface PartialComparator
      +
      A tag interface used to mark a B-tree or duplicate comparator class as a + partial comparator. + + Comparators are configured using + DatabaseConfig.setBtreeComparator(java.util.Comparator) or + DatabaseConfig.setBtreeComparator(Class), and + DatabaseConfig.setDuplicateComparator(java.util.Comparator) or + DatabaseConfig.setDuplicateComparator(Class). +

      + As described in the javadoc for these methods, a partial comparator is a + comparator that allows for the keys of a database to be updated, but only + if the updates are not significant with respect to uniqueness and ordering. + Also described is the fact that comparators must be used with great caution, + since a badly behaved comparator can cause B-tree corruption. +

      + Even greater caution is needed when using partial comparators, for several + reasons. Partial comparators are normally used for performance reasons in + certain situations, but the performance trade-offs are very subtle and + difficult to understand. In addition, as of JE 6, this tag interface must + be added to all partial comparator classes so that JE can correctly perform + transaction aborts, while maintaining the last committed key or duplicate + data values properly. In addition, for a database with duplicates + configured, a partial comparator (implementing this tag interface) will + disable optimizations in JE 6 that drastically reduce cleaner costs. +

      + For these reasons, we do not recommend using partial comparators, although + they are supported in order to avoid breaking applications that used them + prior to JE 6. Whenever possible, please avoid using partial comparators.

      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/PreloadConfig.Phases.html b/docs/java/com/sleepycat/je/PreloadConfig.Phases.html new file mode 100644 index 0000000..74ac66d --- /dev/null +++ b/docs/java/com/sleepycat/je/PreloadConfig.Phases.html @@ -0,0 +1,343 @@ + + + + + +PreloadConfig.Phases (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum PreloadConfig.Phases

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<PreloadConfig.Phases>
      +
      +
      +
      Enclosing class:
      +
      PreloadConfig
      +
      +
      +
      +
      public static enum PreloadConfig.Phases
      +extends java.lang.Enum<PreloadConfig.Phases>
      +
      Preload progress listeners report this phase value, along with a + count of the number if times that the preload has fetched from disk.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        PRELOAD +
        Preload is in progress and resulted in a fetch from disk.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static PreloadConfig.PhasesvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static PreloadConfig.Phases[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          PRELOAD

          +
          public static final PreloadConfig.Phases PRELOAD
          +
          Preload is in progress and resulted in a fetch from disk.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static PreloadConfig.Phases[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (PreloadConfig.Phases c : PreloadConfig.Phases.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static PreloadConfig.Phases valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/PreloadConfig.html b/docs/java/com/sleepycat/je/PreloadConfig.html new file mode 100644 index 0000000..5c125c8 --- /dev/null +++ b/docs/java/com/sleepycat/je/PreloadConfig.html @@ -0,0 +1,631 @@ + + + + + +PreloadConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class PreloadConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class PreloadConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of an application invoked preload operation.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class PreloadConfig.Phases +
        Preload progress listeners report this phase value, along with a + count of the number if times that the preload has fetched from disk.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        PreloadConfig() +
        Default configuration used if null is passed to Database.preload.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          PreloadConfig

          +
          public PreloadConfig()
          +
          Default configuration used if null is passed to Database.preload.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setMaxBytes

          +
          public PreloadConfig setMaxBytes(long maxBytes)
          +
          Configure the maximum number of bytes to preload. + +

          The default is 0 for this class.

          +
          +
          Parameters:
          +
          maxBytes - If the maxBytes parameter is non-zero, a preload will + stop when the cache contains this number of bytes.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getMaxBytes

          +
          public long getMaxBytes()
          +
          Return the number of bytes in the cache to stop the preload at. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The number of bytes in the cache to stop the preload at.
          +
          +
        • +
        + + + +
          +
        • +

          setMaxMillisecs

          +
          public PreloadConfig setMaxMillisecs(long maxMillisecs)
          +
          Configure the maximum number of milliseconds to execute preload. + +

          The default is 0 for this class.

          +
          +
          Parameters:
          +
          maxMillisecs - If the maxMillisecs parameter is non-zero, a preload + will stop when this amount of time has passed.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getMaxMillisecs

          +
          public long getMaxMillisecs()
          +
          Return the number of millisecs to stop the preload after. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The number of millisecs to stop the preload after.
          +
          +
        • +
        + + + +
          +
        • +

          setLoadLNs

          +
          public PreloadConfig setLoadLNs(boolean loadLNs)
          +
          Configure the preload load LNs option. + +

          The default is false for this class.

          +
          +
          Parameters:
          +
          loadLNs - If set to true, the preload will load Leaf Nodes (LNs) + containing the data values.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getLoadLNs

          +
          public boolean getLoadLNs()
          +
          Return the configuration of the preload load LNs option.
          +
          +
          Returns:
          +
          The configuration of the preload load LNs option.
          +
          +
        • +
        + + + +
          +
        • +

          setProgressListener

          +
          public PreloadConfig setProgressListener(ProgressListener<PreloadConfig.Phases> progressListener)
          +
          Configure the preload operation to make periodic calls to a ProgressListener to provide feedback on preload progress. + The ProgressListener.progress() method is called each time the preload + mush fetch a btree node or data record from disk. +

          + When using progress listeners, review the information at ProgressListener.progress(T, long, long) to avoid any unintended disruption to + replication stream syncup.

          +
          +
          Parameters:
          +
          progressListener - The ProgressListener to callback during + preload.
          +
          +
        • +
        + + + +
          +
        • +

          getProgressListener

          +
          public ProgressListener<PreloadConfig.Phases> getProgressListener()
          +
          Return the ProgressListener for this PreloadConfig.
          +
          +
          Returns:
          +
          the ProgressListener for this PreloadConfig.
          +
          +
        • +
        + + + +
          +
        • +

          setLSNBatchSize

          +
          public PreloadConfig setLSNBatchSize(long lsnBatchSize)
          +
          Set the maximum number of LSNs to gather and sort at any one time. The + default is an unlimited number of LSNs. Setting this lower causes the + preload to use less memory, but it sorts and processes LSNs more + frequently thereby causing slower performance. Setting this higher will + in general improve performance at the expense of memory. Each LSN uses + 16 bytes of memory.
          +
          +
          Parameters:
          +
          lsnBatchSize - the maximum number of LSNs to accumulate and sort + per batch.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getLSNBatchSize

          +
          public long getLSNBatchSize()
          +
          Preload is implemented to optimize I/O cost by fetching the records of + a Database by disk order, so that disk access is are sequential rather + than random. LSNs (log sequence numbers) are the disk addresses of + database records. Setting this value causes the preload to process + batches of LSNs rather than all in-memory LSNs at one time, + which bounds the memory usage of + the preload processing, at the expense of preload performance.
          +
          +
          Returns:
          +
          the maximum number of LSNs to be sorted that this + preload is configured for.
          +
          +
        • +
        + + + +
          +
        • +

          setInternalMemoryLimit

          +
          public PreloadConfig setInternalMemoryLimit(long internalMemoryLimit)
          +
          Set the maximum amount of non JE Cache Memory that preload can use at + one time. The default is an unlimited amount of memory. Setting this + lower causes the preload to use less memory, but generally results in + slower performance. Setting this higher will often improve performance + at the expense of higher memory utilization.
          +
          +
          Parameters:
          +
          internalMemoryLimit - the maximum number of non JE Cache bytes to + use.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getInternalMemoryLimit

          +
          public long getInternalMemoryLimit()
          +
          Returns the maximum amount of non JE Cache Memory that preload can use at + one time.
          +
          +
          Returns:
          +
          the maximum amount of non JE Cache Memory that preload can use at + one time.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public PreloadConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/PreloadStats.html b/docs/java/com/sleepycat/je/PreloadStats.html new file mode 100644 index 0000000..1923390 --- /dev/null +++ b/docs/java/com/sleepycat/je/PreloadStats.html @@ -0,0 +1,427 @@ + + + + + +PreloadStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class PreloadStats

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        intgetNBINsLoaded() +
        Returns the number of BINs that were loaded into the cache during the + preload() operation.
        +
        intgetNCountMemoryExceeded() +
        Returns the count of the number of times that the internal memory budget + specified by PreloadConfig.setInternalMemoryLimit() was exceeded.
        +
        intgetNDBINsLoaded() +
        Deprecated.  +
        returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
        +
        +
        intgetNDINsLoaded() +
        Deprecated.  +
        returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
        +
        +
        intgetNDupCountLNsLoaded() +
        Deprecated.  +
        returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
        +
        +
        intgetNEmbeddedLNs() +
        Returns the number of embedded LNs encountered during the preload() + operation.
        +
        intgetNINsLoaded() +
        Returns the number of INs that were loaded into the cache during the + preload() operation.
        +
        intgetNLNsLoaded() +
        Returns the number of LNs that were loaded into the cache during the + preload() operation.
        +
        PreloadStatusgetStatus() +
        Returns the PreloadStatus value for the preload() operation.
        +
        java.lang.StringtoString() +
        Returns a String representation of the stats in the form of + <stat>=<value>
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNINsLoaded

          +
          public int getNINsLoaded()
          +
          Returns the number of INs that were loaded into the cache during the + preload() operation.
          +
        • +
        + + + +
          +
        • +

          getNBINsLoaded

          +
          public int getNBINsLoaded()
          +
          Returns the number of BINs that were loaded into the cache during the + preload() operation.
          +
        • +
        + + + +
          +
        • +

          getNLNsLoaded

          +
          public int getNLNsLoaded()
          +
          Returns the number of LNs that were loaded into the cache during the + preload() operation.
          +
        • +
        + + + +
          +
        • +

          getNEmbeddedLNs

          +
          public int getNEmbeddedLNs()
          +
          Returns the number of embedded LNs encountered during the preload() + operation.
          +
        • +
        + + + +
          +
        • +

          getNDINsLoaded

          +
          public int getNDINsLoaded()
          +
          Deprecated. returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
          +
        • +
        + + + +
          +
        • +

          getNDBINsLoaded

          +
          public int getNDBINsLoaded()
          +
          Deprecated. returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
          +
        • +
        + + + +
          +
        • +

          getNDupCountLNsLoaded

          +
          public int getNDupCountLNsLoaded()
          +
          Deprecated. returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
          +
        • +
        + + + +
          +
        • +

          getNCountMemoryExceeded

          +
          public int getNCountMemoryExceeded()
          +
          Returns the count of the number of times that the internal memory budget + specified by PreloadConfig.setInternalMemoryLimit() was exceeded.
          +
        • +
        + + + +
          +
        • +

          getStatus

          +
          public PreloadStatus getStatus()
          +
          Returns the PreloadStatus value for the preload() operation.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns a String representation of the stats in the form of + <stat>=<value>
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/PreloadStatus.html b/docs/java/com/sleepycat/je/PreloadStatus.html new file mode 100644 index 0000000..952039e --- /dev/null +++ b/docs/java/com/sleepycat/je/PreloadStatus.html @@ -0,0 +1,385 @@ + + + + + +PreloadStatus (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class PreloadStatus

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class PreloadStatus
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Describes the result of the Database.preload operation.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          PreloadStatus

          +
          public PreloadStatus(java.lang.String statusName)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ProgressListener.html b/docs/java/com/sleepycat/je/ProgressListener.html new file mode 100644 index 0000000..97162ec --- /dev/null +++ b/docs/java/com/sleepycat/je/ProgressListener.html @@ -0,0 +1,288 @@ + + + + + +ProgressListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ProgressListener<T extends java.lang.Enum<T>>

    +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        booleanprogress(T phase, + long n, + long total) +
        Called by BDB JE to indicate to the user that progress has been + made on a potentially long running or asynchronous operation.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          progress

          +
          boolean progress(T phase,
          +                 long n,
          +                 long total)
          +
          Called by BDB JE to indicate to the user that progress has been + made on a potentially long running or asynchronous operation. +

          + This method should do the minimal amount of work, queuing any resource + intensive operations for processing by another thread before returning + to the caller, so that it does not unduly delay the target operation, + which invokes this method. +

          + The applicaton should also be aware that the method has potential to + disrupt the reported-upon operation. If the progress() throws a + RuntimeException, the operation for which the progress is being reported + will be aborted and the exception propagated back to the original + caller. Also, if progress() returns false, the operation will be + halted. For recovery and syncup listeners, a false return value can + invalidate and close the environment.

          +
          +
          Parameters:
          +
          phase - an enum indicating the phase of the operation for + which progress is being reported.
          +
          n - indicates the number of units that have been processed so far. + If this does not apply, -1 is returned.
          +
          total - indicates the total number of units that will be processed + if it is known by JE. If total is < 0, then the total number is + unknown. When total == n, this indicates that processing of this + operation is 100% complete, even if all previous calls to progress + passed a negative value for total.
          +
          Returns:
          +
          true to continue the operation, false to stop it.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Put.html b/docs/java/com/sleepycat/je/Put.html new file mode 100644 index 0000000..a55d75d --- /dev/null +++ b/docs/java/com/sleepycat/je/Put.html @@ -0,0 +1,441 @@ + + + + + +Put (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum Put

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Put>
      +
      +
      +
      +
      public enum Put
      +extends java.lang.Enum<Put>
      +
      The operation type passed to "put" methods on databases and cursors.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        CURRENT +
        Updates the data of the record at the cursor position.
        +
        NO_DUP_DATA +
        Inserts a record in a database with duplicate keys if a record with a + matching key and data is not already present.
        +
        NO_OVERWRITE +
        Inserts a record if a record with a matching key is not already present.
        +
        OVERWRITE +
        Inserts or updates a record depending on whether a matching record is + already present.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static PutvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Put[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          OVERWRITE

          +
          public static final Put OVERWRITE
          +
          Inserts or updates a record depending on whether a matching record is + already present. + +

          If the database does not have duplicate keys, a matching record is + defined as one with the same key. The existing record's data will be + replaced. In addition, if a custom key comparator is configured, and the + key bytes are different but considered equal by the comparator, the key + is replaced.

          + +

          If the database does have duplicate keys, a matching record is + defined as one with the same key and data. As above, if a custom key + comparator is configured, and the key bytes are different but considered + equal by the comparator, the key is replaced. In addition, if a custom + duplicate comparator is configured, and the data bytes are different but + considered equal by the comparator, the data is replaced.

          + +

          The operation always succeeds (null is never returned).

          +
        • +
        + + + +
          +
        • +

          NO_OVERWRITE

          +
          public static final Put NO_OVERWRITE
          +
          Inserts a record if a record with a matching key is not already present. + +

          If the database has duplicate keys, a record is inserted only if + there are no records with a matching key.

          + +

          The operation does not succeed (null is returned) when an existing + record matches.

          +
        • +
        + + + +
          +
        • +

          NO_DUP_DATA

          +
          public static final Put NO_DUP_DATA
          +
          Inserts a record in a database with duplicate keys if a record with a + matching key and data is not already present. + +

          This operation is not allowed for databases that do not have + duplicate keys.

          + +

          The operation does not succeed (null is returned) when an existing + record matches.

          +
        • +
        + + + +
          +
        • +

          CURRENT

          +
          public static final Put CURRENT
          +
          Updates the data of the record at the cursor position. + +

          If the database does not have duplicate keys, the existing record's + data will be replaced.

          + +

          If the database does have duplicate keys, the existing data is + replaced but it is must be considered equal by the duplicate comparator. + If the data is not considered equal, DuplicateDataException is + thrown. Using the default comparator, a key is considered equal only if + its bytes are equal. Therefore, changing the data is only possible if a + custom duplicate comparator is configured.

          + +

          A partial data item may be + specified to optimize for partial data update.

          + +

          This operation cannot be used to update the key of an existing record + and in fact the key parameter must be null when calling generic put + methods such as + Database.put(Transaction, DatabaseEntry, DatabaseEntry, Put, + WriteOptions) and + Cursor.put(DatabaseEntry, DatabaseEntry, Put, WriteOptions).

          + +

          The operation does not succeed (null is returned) if the record at + the current position has been deleted. This can occur in two cases: 1. + If the record was deleted using this cursor and then accessed. 2. If the + record was not locked by this cursor or transaction, and was deleted by + another thread or transaction after this cursor was positioned on + it.

          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Put[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Put c : Put.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Put valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ReadOptions.html b/docs/java/com/sleepycat/je/ReadOptions.html new file mode 100644 index 0000000..124134f --- /dev/null +++ b/docs/java/com/sleepycat/je/ReadOptions.html @@ -0,0 +1,388 @@ + + + + + +ReadOptions (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class ReadOptions

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class ReadOptions
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Options for calling methods that read records.
      +
      +
      Since:
      +
      7.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ReadOptions() +
        Constructs a ReadOptions object with default values for all properties.
        +
        +
      • +
      + + +
    • +
    +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/RecoveryProgress.html b/docs/java/com/sleepycat/je/RecoveryProgress.html new file mode 100644 index 0000000..bf905ed --- /dev/null +++ b/docs/java/com/sleepycat/je/RecoveryProgress.html @@ -0,0 +1,620 @@ + + + + + +RecoveryProgress (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum RecoveryProgress

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        BECOME_CONSISTENT +
        For replicated systems only: if a replica, process enough of the + replication stream so that the environment fulfills the required + consistency policy, as defined by parameters passed to the + ReplicatedEnvironment constructor.
        +
        CKPT +
        Perform a checkpoint to make all the work of this environment + startup persistent, so it is not repeated in future startups.
        +
        FIND_END_OF_LOG +
        Find the last valid entry in the database log.
        +
        FIND_LAST_CKPT +
        Find the last complete checkpoint in the database log.
        +
        FIND_MASTER +
        For replicated systems only: locate the master of the + replication group by querying others in the group, and holding an + election if necessary.
        +
        POPULATE_EXPIRATION_PROFILE +
        Populate internal metadata which stores information about the + expiration time/data windows (histogram) of each log file, for + efficient log cleaning.
        +
        POPULATE_UTILIZATION_PROFILE +
        Populate internal metadata which stores information about the + utilization level of each log file, for efficient log cleaning.
        +
        READ_DATA_INFO +
        Read log entries that pertain to the database indices.
        +
        READ_DBMAP_INFO +
        Read log entries that pertain to the database map, which is an + internal index of all databases.
        +
        RECOVERY_FINISHED +
        Basic recovery is completed, and the environment is able to + service operations.
        +
        REDO_DATA_INFO +
        Redo log entries that pertain to the database indices.
        +
        REDO_DATA_RECORDS +
        Repeat committed data operations, such as inserts, updates + and deletes.
        +
        REDO_DBMAP_INFO +
        Redo log entries that pertain to the database map, which is an + internal index of all databases.
        +
        REDO_DBMAP_RECORDS +
        Redo committed database creation, deletion and truncations.
        +
        REMOVE_TEMP_DBS +
        Remove temporary databases created by the application that + are no longer valid.
        +
        UNDO_DATA_RECORDS +
        Rollback uncommitted data operations, such as inserts, updates + and deletes.
        +
        UNDO_DBMAP_RECORDS +
        Rollback uncommitted database creation, deletion and truncations.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static RecoveryProgressvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static RecoveryProgress[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          FIND_END_OF_LOG

          +
          public static final RecoveryProgress FIND_END_OF_LOG
          +
          Find the last valid entry in the database log.
          +
        • +
        + + + +
          +
        • +

          FIND_LAST_CKPT

          +
          public static final RecoveryProgress FIND_LAST_CKPT
          +
          Find the last complete checkpoint in the database log.
          +
        • +
        + + + +
          +
        • +

          READ_DBMAP_INFO

          +
          public static final RecoveryProgress READ_DBMAP_INFO
          +
          Read log entries that pertain to the database map, which is an + internal index of all databases.
          +
        • +
        + + + +
          +
        • +

          REDO_DBMAP_INFO

          +
          public static final RecoveryProgress REDO_DBMAP_INFO
          +
          Redo log entries that pertain to the database map, which is an + internal index of all databases.
          +
        • +
        + + + +
          +
        • +

          UNDO_DBMAP_RECORDS

          +
          public static final RecoveryProgress UNDO_DBMAP_RECORDS
          +
          Rollback uncommitted database creation, deletion and truncations.
          +
        • +
        + + + +
          +
        • +

          REDO_DBMAP_RECORDS

          +
          public static final RecoveryProgress REDO_DBMAP_RECORDS
          +
          Redo committed database creation, deletion and truncations.
          +
        • +
        + + + +
          +
        • +

          READ_DATA_INFO

          +
          public static final RecoveryProgress READ_DATA_INFO
          +
          Read log entries that pertain to the database indices.
          +
        • +
        + + + +
          +
        • +

          REDO_DATA_INFO

          +
          public static final RecoveryProgress REDO_DATA_INFO
          +
          Redo log entries that pertain to the database indices.
          +
        • +
        + + + +
          +
        • +

          UNDO_DATA_RECORDS

          +
          public static final RecoveryProgress UNDO_DATA_RECORDS
          +
          Rollback uncommitted data operations, such as inserts, updates + and deletes.
          +
        • +
        + + + +
          +
        • +

          REDO_DATA_RECORDS

          +
          public static final RecoveryProgress REDO_DATA_RECORDS
          +
          Repeat committed data operations, such as inserts, updates + and deletes.
          +
        • +
        + + + +
          +
        • +

          POPULATE_UTILIZATION_PROFILE

          +
          public static final RecoveryProgress POPULATE_UTILIZATION_PROFILE
          +
          Populate internal metadata which stores information about the + utilization level of each log file, for efficient log cleaning.
          +
        • +
        + + + +
          +
        • +

          POPULATE_EXPIRATION_PROFILE

          +
          public static final RecoveryProgress POPULATE_EXPIRATION_PROFILE
          +
          Populate internal metadata which stores information about the + expiration time/data windows (histogram) of each log file, for + efficient log cleaning.
          +
          +
          Since:
          +
          6.5
          +
          +
        • +
        + + + +
          +
        • +

          REMOVE_TEMP_DBS

          +
          public static final RecoveryProgress REMOVE_TEMP_DBS
          +
          Remove temporary databases created by the application that + are no longer valid.
          +
        • +
        + + + +
          +
        • +

          CKPT

          +
          public static final RecoveryProgress CKPT
          +
          Perform a checkpoint to make all the work of this environment + startup persistent, so it is not repeated in future startups.
          +
        • +
        + + + +
          +
        • +

          RECOVERY_FINISHED

          +
          public static final RecoveryProgress RECOVERY_FINISHED
          +
          Basic recovery is completed, and the environment is able to + service operations.
          +
        • +
        + + + +
          +
        • +

          FIND_MASTER

          +
          public static final RecoveryProgress FIND_MASTER
          +
          For replicated systems only: locate the master of the + replication group by querying others in the group, and holding an + election if necessary.
          +
        • +
        + + + +
          +
        • +

          BECOME_CONSISTENT

          +
          public static final RecoveryProgress BECOME_CONSISTENT
          +
          For replicated systems only: if a replica, process enough of the + replication stream so that the environment fulfills the required + consistency policy, as defined by parameters passed to the + ReplicatedEnvironment constructor.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static RecoveryProgress[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (RecoveryProgress c : RecoveryProgress.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static RecoveryProgress valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ReplicaConsistencyPolicy.html b/docs/java/com/sleepycat/je/ReplicaConsistencyPolicy.html new file mode 100644 index 0000000..fb9bcdb --- /dev/null +++ b/docs/java/com/sleepycat/je/ReplicaConsistencyPolicy.html @@ -0,0 +1,270 @@ + + + + + +ReplicaConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface ReplicaConsistencyPolicy

    +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetName() +
        Returns the name used to identify the policy.
        +
        longgetTimeout(java.util.concurrent.TimeUnit unit) +
        The timeout associated with the consistency policy.
        +
        +
      • +
      +
    • +
    +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/RunRecoveryException.html b/docs/java/com/sleepycat/je/RunRecoveryException.html new file mode 100644 index 0000000..2eaf7bb --- /dev/null +++ b/docs/java/com/sleepycat/je/RunRecoveryException.html @@ -0,0 +1,263 @@ + + + + + +RunRecoveryException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class RunRecoveryException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryConfig.html b/docs/java/com/sleepycat/je/SecondaryConfig.html new file mode 100644 index 0000000..e336ae5 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryConfig.html @@ -0,0 +1,864 @@ + + + + + +SecondaryConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class SecondaryConfig
      +extends DatabaseConfig
      +
      The configuration properties of a SecondaryDatabase extend + those of a primary Database. The secondary database + configuration is specified when calling Environment.openSecondaryDatabase. + +

      To create a configuration object with default attributes:

      + +
      +     SecondaryConfig config = new SecondaryConfig();
      + 
      + +

      To set custom attributes:

      + +
      +     SecondaryConfig config = new SecondaryConfig();
      +     config.setAllowCreate(true);
      +     config.setSortedDuplicates(true);
      +     config.setKeyCreator(new MyKeyCreator());
      + 
      +
      +
      See Also:
      +
      Environment.openSecondaryDatabase @see SecondaryDatabase
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          SecondaryConfig

          +
          public SecondaryConfig()
          +
          Creates an instance with the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setKeyCreator

          +
          public SecondaryConfig setKeyCreator(SecondaryKeyCreator keyCreator)
          +
          Specifies the user-supplied object used for creating single-valued + secondary keys. + +

          Unless the primary database is read-only, a key creator is required + when opening a secondary database. Either a KeyCreator or + MultiKeyCreator must be specified, but both may not be specified.

          + +

          Unless the primary database is read-only, a key creator is required + when opening a secondary database.

          + +

          WARNING: Key creator instances are shared by multiple + threads and key creator methods are called without any special + synchronization. Therefore, key creators must be thread safe. In + general no shared state should be used and any caching of computed + values must be done with proper synchronization.

          +
          +
          Parameters:
          +
          keyCreator - the user-supplied object used for creating + single-valued secondary keys.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setMultiKeyCreator

          +
          public SecondaryConfig setMultiKeyCreator(SecondaryMultiKeyCreator multiKeyCreator)
          +
          Specifies the user-supplied object used for creating multi-valued + secondary keys. + +

          Unless the primary database is read-only, a key creator is required + when opening a secondary database. Either a KeyCreator or + MultiKeyCreator must be specified, but both may not be specified.

          + +

          WARNING: Key creator instances are shared by multiple + threads and key creator methods are called without any special + synchronization. Therefore, key creators must be thread safe. In + general no shared state should be used and any caching of computed + values must be done with proper synchronization.

          +
          +
          Parameters:
          +
          multiKeyCreator - the user-supplied object used for creating + multi-valued secondary keys.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setAllowPopulate

          +
          public SecondaryConfig setAllowPopulate(boolean allowPopulate)
          +
          Specifies whether automatic population of the secondary is allowed. + +

          If automatic population is allowed, when the secondary database is + opened it is checked to see if it is empty. If it is empty, the primary + database is read in its entirety and keys are added to the secondary + database using the information read from the primary.

          + +

          If this property is set to true and the database is transactional, + the population of the secondary will be done within the explicit or + auto-commit transaction that is used to open the database.

          +
          +
          Parameters:
          +
          allowPopulate - whether automatic population of the secondary is + allowed.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAllowPopulate

          +
          public boolean getAllowPopulate()
          +
          Returns whether automatic population of the secondary is allowed. If + setAllowPopulate(boolean) has not been called, this method returns + false.
          +
          +
          Returns:
          +
          whether automatic population of the secondary is allowed.
          +
          See Also:
          +
          setAllowPopulate(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setForeignKeyDatabase

          +
          public SecondaryConfig setForeignKeyDatabase(Database foreignKeyDatabase)
          +
          Defines a foreign key integrity constraint for a given foreign key + database. + +

          If this property is non-null, a record must be present in the + specified foreign database for every record in the secondary database, + where the secondary key value is equal to the foreign database key + value. Whenever a record is to be added to the secondary database, the + secondary key is used as a lookup key in the foreign database. If the + key is not found in the foreign database, a ForeignConstraintException is thrown.

          + +

          The foreign database must not have duplicates allowed. If duplicates + are allowed, an IllegalArgumentException will be thrown when the + secondary database is opened.

          +
          +
          Parameters:
          +
          foreignKeyDatabase - the database used to check the foreign key + integrity constraint, or null if no foreign key constraint should be + checked.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getForeignKeyDatabase

          +
          public Database getForeignKeyDatabase()
          +
          Returns the database used to check the foreign key integrity constraint, + or null if no foreign key constraint will be checked.
          +
          +
          Returns:
          +
          the foreign key database, or null.
          +
          See Also:
          +
          setForeignKeyDatabase(com.sleepycat.je.Database)
          +
          +
        • +
        + + + +
          +
        • +

          setForeignKeyDeleteAction

          +
          public SecondaryConfig setForeignKeyDeleteAction(ForeignKeyDeleteAction foreignKeyDeleteAction)
          +
          Specifies the action taken when a referenced record in the foreign key + database is deleted. + +

          This property is ignored if the foreign key database property is + null.

          +
          +
          Parameters:
          +
          foreignKeyDeleteAction - the action taken when a referenced record + in the foreign key database is deleted.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          @see #setForeignKeyDatabase
          +
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          setForeignMultiKeyNullifier

          +
          public SecondaryConfig setForeignMultiKeyNullifier(ForeignMultiKeyNullifier foreignMultiKeyNullifier)
          +
          Specifies the user-supplied object used for setting multi-valued foreign + keys to null. + +

          If the foreign key database property is non-null and the foreign key + delete action is NULLIFY, this property is required to be + non-null; otherwise, this property is ignored.

          + +

          WARNING: Key nullifier instances are shared by multiple + threads and key nullifier methods are called without any special + synchronization. Therefore, key creators must be thread safe. In + general no shared state should be used and any caching of computed + values must be done with proper synchronization.

          +
          +
          Parameters:
          +
          foreignMultiKeyNullifier - the user-supplied object used for + setting multi-valued foreign keys to null.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          @see ForeignKeyDeleteAction#NULLIFY @see + #setForeignKeyDatabase
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setImmutableSecondaryKey

          +
          public SecondaryConfig setImmutableSecondaryKey(boolean immutableSecondaryKey)
          +
          Specifies whether the secondary key is immutable. + +

          Specifying that a secondary key is immutable can be used to optimize + updates when the secondary key in a primary record will never be changed + after that primary record is inserted. For immutable secondary keys, a + best effort is made to avoid calling + SecondaryKeyCreator.createSecondaryKey when a primary + record is updated. This optimization may reduce the overhead of an + update operation significantly if the createSecondaryKey + operation is expensive.

          + +

          Be sure to set this property to true only if the secondary key in the + primary record is never changed. If this rule is violated, the + secondary index will become corrupted, that is, it will become out of + sync with the primary.

          +
          +
          Parameters:
          +
          immutableSecondaryKey - whether the secondary key is immutable.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class DatabaseConfig
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryConstraintException.html b/docs/java/com/sleepycat/je/SecondaryConstraintException.html new file mode 100644 index 0000000..b46f1d1 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryConstraintException.html @@ -0,0 +1,274 @@ + + + + + +SecondaryConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryConstraintException

    +
    +
    + +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryCursor.html b/docs/java/com/sleepycat/je/SecondaryCursor.html new file mode 100644 index 0000000..6432a2d --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryCursor.html @@ -0,0 +1,2142 @@ + + + + + +SecondaryCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryCursor

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ForwardCursor, java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class SecondaryCursor
      +extends Cursor
      +
      A database cursor for a secondary database. Cursors are not thread safe and + the application is responsible for coordinating any multithreaded access to + a single cursor object. + +

      Secondary cursors are returned by SecondaryDatabase.openCursor and SecondaryDatabase.openSecondaryCursor. The distinguishing characteristics + of a secondary cursor are:

      + +
      • Direct calls to put() methods on a secondary cursor + are prohibited. + +
      • The delete(com.sleepycat.je.WriteOptions) method of a secondary cursor will delete the primary + record and as well as all its associated secondary records. + +
      • Calls to all get methods will return the data from the associated + primary database. + +
      • Additional get method signatures are provided to return the primary key + in an additional pKey parameter. + +
      • Calls to dup(boolean) will return a SecondaryCursor. + +
      + +

      To obtain a secondary cursor with default attributes:

      + +
      +     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, null);
      + 
      + +

      To customize the attributes of a cursor, use a CursorConfig object.

      + +
      +     CursorConfig config = new CursorConfig();
      +     config.setReadUncommitted(true);
      +     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, config);
      + 
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          getPrimaryDatabase

          +
          public Database getPrimaryDatabase()
          +
          Returns the primary Database + associated with this cursor. + +

          Calling this method is the equivalent of the following + expression:

          + +
          +         getDatabase().getPrimaryDatabase()
          + 
          +
          +
          Returns:
          +
          The primary Database + associated with this cursor.
          +
          +
        • +
        + + + +
          +
        • +

          dup

          +
          public SecondaryCursor dup(boolean samePosition)
          +
          Returns a new SecondaryCursor for the same transaction as + the original cursor. + +
          +
          +
          Overrides:
          +
          dup in class Cursor
          +
          Parameters:
          +
          samePosition - If true, the newly created cursor is initialized + to refer to the same position in the database as the original cursor + (if any) and hold the same locks (if any). If false, or the original + cursor does not hold a database position and locks, the returned + cursor is uninitialized and will behave like a newly created cursor.
          +
          Returns:
          +
          A new cursor with the same transaction and locker ID as the + original cursor.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          delete

          +
          public OperationResult delete(WriteOptions options)
          +
          Delete the record to which the cursor refers from the primary database + and all secondary indices. + +

          This method behaves as if Database.delete(Transaction, + DatabaseEntry, WriteOptions) were called for the primary database, + using the primary key associated with this cursor position.

          + +

          The cursor position is unchanged after a delete, and subsequent calls + to cursor functions expecting the cursor to refer to an existing record + will fail.

          + +

          WARNING: Unlike read operations using a SecondaryCursor, write + operations like this one are deadlock-prone.

          + +
          +
          +
          Overrides:
          +
          delete in class Cursor
          +
          Parameters:
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is deleted, else null if the + record at the cursor position has already been deleted.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          put

          +
          public OperationResult put(DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Put putType,
          +                           WriteOptions options)
          +
          This operation is not allowed on a secondary cursor. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary cursor should be used instead.
          +
          +
          Overrides:
          +
          put in class Cursor
          +
          Parameters:
          +
          key - the key used as + input. Must be null when + putType is Put.CURRENT.
          +
          data - the data used as + input. May be partial only when + putType is Put.CURRENT.
          +
          putType - the Put operation type. May not be null.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is written, else null.
          +
          +
        • +
        + + + +
          +
        • +

          put

          +
          public OperationStatus put(DatabaseEntry key,
          +                           DatabaseEntry data)
          +
          This operation is not allowed on a secondary cursor. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary cursor should be used instead.
          +
          +
          Overrides:
          +
          put in class Cursor
          +
          Parameters:
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.SUCCESS.
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          putCurrent

          +
          public OperationStatus putCurrent(DatabaseEntry data)
          +
          This operation is not allowed on a secondary cursor. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary cursor should be used instead.
          +
          +
          Overrides:
          +
          putCurrent in class Cursor
          +
          Parameters:
          +
          data - the data used as + input. + A partial data item may be + specified to optimize for partial data update.
          +
          Returns:
          +
          OperationStatus.KEYEMPTY if the key/pair at the cursor position has + been deleted; otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Moves the cursor to a record according to the specified Get + type. + +

          The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data. + In addition, two operations are not supported by this method: + Get.SEARCH_BOTH and Get.SEARCH_BOTH_GTE.

          +
          +
          Specified by:
          +
          get in interface ForwardCursor
          +
          Overrides:
          +
          get in class Cursor
          +
          Parameters:
          +
          key - the key input or output parameter, depending on getType.
          +
          data - the data input or output parameter, depending on getType.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(DatabaseEntry key,
          +                           DatabaseEntry pKey,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Moves the cursor to a record according to the specified Get + type. + +

          If the operation succeeds, the record at the resulting cursor + position will be locked according to the lock mode specified, the key, primary key, + and/or data will be returned via the (non-null) DatabaseEntry + parameters, and a non-null OperationResult will be returned. If the + operation fails because the record requested is not found, null is + returned.

          + +

          The following table lists each allowed operation and whether the key, + pKey and data parameters are input + or output parameters. Also specified is whether the cursor must be + initialized (positioned on a record) before calling this method. See the + individual Get operations for more information.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Get operationDescription'key' parameter'pKey' parameter'data' parameterCursor position
          must be initialized?
          Get.SEARCHSearches using an exact match by key.inputoutputoutputno
          Get.SEARCH_BOTHSearches using an exact match by key and pKey.inputinputoutputno
          Get.SEARCH_GTESearches using a GTE match by key.input/outputoutputoutputno
          Get.SEARCH_BOTH_GTESearches using an exact match by key and a GTE match by pKey.inputinput/outputoutputno
          Get.CURRENTAccesses the current recordoutputoutputoutputyes
          Get.FIRSTFinds the first record in the database.outputoutputoutputno
          Get.LASTFinds the last record in the database.outputoutputoutputno
          Get.NEXTMoves to the next record.outputoutputoutputno**
          Get.NEXT_DUPMoves to the next record with the same key.outputoutputoutputyes
          Get.NEXT_NO_DUPMoves to the next record with a different key.outputoutputoutputno**
          Get.PREVMoves to the previous record.outputoutputoutputno**
          Get.PREV_DUPMoves to the previous record with the same key.outputoutputoutputyes
          Get.PREV_NO_DUPMoves to the previous record with a different key.outputoutputoutputno**
          + +

          ** - For these 'next' and 'previous' operations the cursor may be + uninitialized, in which case the cursor will be moved to the first or + last record, respectively.

          +
          +
          Parameters:
          +
          key - the secondary key input or output parameter, depending on + getType.
          +
          pKey - the primary key input or output parameter, depending on + getType.
          +
          data - the primary data output parameter.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + the cursor is uninitialized (not positioned on a record) and this is not + permitted (see above), or the non-transactional cursor was created in a + different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null getType, a null input key/pKey parameter, + an input key/pKey parameter with a null data array, a partial key/pKey + input parameter, and specifying a lock mode of READ_COMMITTED.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getCurrent

          +
          public OperationStatus getCurrent(DatabaseEntry key,
          +                                  DatabaseEntry pKey,
          +                                  DatabaseEntry data,
          +                                  LockMode lockMode)
          +
          Returns the key/data pair to which the cursor refers.
          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.KEYEMPTY if the key/pair at the cursor position has + been deleted; otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getFirst

          +
          public OperationStatus getFirst(DatabaseEntry key,
          +                                DatabaseEntry pKey,
          +                                DatabaseEntry data,
          +                                LockMode lockMode)
          +
          Move the cursor to the first key/data pair of the database, and return + that pair. If the first key has duplicate values, the first data item + in the set of duplicates is returned.
          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getLast

          +
          public OperationStatus getLast(DatabaseEntry key,
          +                               DatabaseEntry pKey,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Move the cursor to the last key/data pair of the database, and return + that pair. If the last key has duplicate values, the last data item in + the set of duplicates is returned.
          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + +
          +
        • +

          getNext

          +
          public OperationStatus getNext(DatabaseEntry key,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Moves the cursor to the next key/data pair and returns that pair. + +

          Calling this method is equivalent to calling Cursor.get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.NEXT.

          + +

          If the cursor is not yet initialized, move the cursor to the first + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the next key/data pair of the database, and that pair + is returned. In the presence of duplicate key values, the value of the + key may not change.

          + + The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.
          +
          +
          Specified by:
          +
          getNext in interface ForwardCursor
          +
          Overrides:
          +
          getNext in class Cursor
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          getNext

          +
          public OperationStatus getNext(DatabaseEntry key,
          +                               DatabaseEntry pKey,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Move the cursor to the next key/data pair and return that pair. If the + matching key has duplicate values, the first data item in the set of + duplicates is returned. + +

          If the cursor is not yet initialized, move the cursor to the first + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the next key/data pair of the database, and that pair + is returned. In the presence of duplicate key values, the value of the + key may not change.

          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getNextDup

          +
          public OperationStatus getNextDup(DatabaseEntry key,
          +                                  DatabaseEntry pKey,
          +                                  DatabaseEntry data,
          +                                  LockMode lockMode)
          +
          If the next key/data pair of the database is a duplicate data record for + the current key/data pair, move the cursor to the next key/data pair of + the database and return that pair.
          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + +
          +
        • +

          getNextNoDup

          +
          public OperationStatus getNextNoDup(DatabaseEntry key,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Moves the cursor to the next non-duplicate key/data pair and returns + that pair. If the matching key has duplicate values, the first data + item in the set of duplicates is returned. + +

          Calling this method is equivalent to calling Cursor.get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.NEXT_NO_DUP.

          + +

          If the cursor is not yet initialized, move the cursor to the first + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the next non-duplicate key of the database, and that + key/data pair is returned.

          + + The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.
          +
          +
          Overrides:
          +
          getNextNoDup in class Cursor
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          getNextNoDup

          +
          public OperationStatus getNextNoDup(DatabaseEntry key,
          +                                    DatabaseEntry pKey,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Move the cursor to the next non-duplicate key/data pair and return that + pair. If the matching key has duplicate values, the first data item in + the set of duplicates is returned. + +

          If the cursor is not yet initialized, move the cursor to the first + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the next non-duplicate key of the database, and that + key/data pair is returned.

          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + +
          +
        • +

          getPrev

          +
          public OperationStatus getPrev(DatabaseEntry key,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Moves the cursor to the previous key/data pair and returns that pair. + +

          Calling this method is equivalent to calling Cursor.get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.PREV.

          + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous key/data pair of the database, and that + pair is returned. In the presence of duplicate key values, the value of + the key may not change.

          + + The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.
          +
          +
          Overrides:
          +
          getPrev in class Cursor
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          getPrev

          +
          public OperationStatus getPrev(DatabaseEntry key,
          +                               DatabaseEntry pKey,
          +                               DatabaseEntry data,
          +                               LockMode lockMode)
          +
          Move the cursor to the previous key/data pair and return that pair. If + the matching key has duplicate values, the last data item in the set of + duplicates is returned. + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous key/data pair of the database, and that + pair is returned. In the presence of duplicate key values, the value of + the key may not change.

          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getPrevDup

          +
          public OperationStatus getPrevDup(DatabaseEntry key,
          +                                  DatabaseEntry pKey,
          +                                  DatabaseEntry data,
          +                                  LockMode lockMode)
          +
          If the previous key/data pair of the database is a duplicate data record + for the current key/data pair, move the cursor to the previous key/data + pair of the database and return that pair.
          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the cursor is uninitialized (not positioned on a record), or the + non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + +
          +
        • +

          getPrevNoDup

          +
          public OperationStatus getPrevNoDup(DatabaseEntry key,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Moves the cursor to the previous non-duplicate key/data pair and returns + that pair. If the matching key has duplicate values, the last data item + in the set of duplicates is returned. + +

          Calling this method is equivalent to calling Cursor.get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.PREV_NO_DUP.

          + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous non-duplicate key of the database, and + that key/data pair is returned.

          + + The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.
          +
          +
          Overrides:
          +
          getPrevNoDup in class Cursor
          +
          Parameters:
          +
          key - the key returned as + output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          getPrevNoDup

          +
          public OperationStatus getPrevNoDup(DatabaseEntry key,
          +                                    DatabaseEntry pKey,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Move the cursor to the previous non-duplicate key/data pair and return + that pair. If the matching key has duplicate values, the last data item + in the set of duplicates is returned. + +

          If the cursor is not yet initialized, move the cursor to the last + key/data pair of the database, and return that pair. Otherwise, the + cursor is moved to the previous non-duplicate key of the database, and + that key/data pair is returned.

          +
          +
          Parameters:
          +
          key - the secondary key returned as output. Its byte array does + not need to be initialized by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchKey

          +
          public OperationStatus getSearchKey(DatabaseEntry key,
          +                                    DatabaseEntry pKey,
          +                                    DatabaseEntry data,
          +                                    LockMode lockMode)
          +
          Move the cursor to the given key of the database, and return the datum + associated with the given key. If the matching key has duplicate + values, the first data item in the set of duplicates is returned.
          +
          +
          Parameters:
          +
          key - the secondary key used as input. It must be initialized with + a non-null byte array by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + +
          +
        • +

          getSearchKeyRange

          +
          public OperationStatus getSearchKeyRange(DatabaseEntry key,
          +                                         DatabaseEntry data,
          +                                         LockMode lockMode)
          +
          Moves the cursor to the closest matching key of the database, and + returns the data item associated with the matching key. If the matching + key has duplicate values, the first data item in the set of duplicates + is returned. + +

          Calling this method is equivalent to calling Cursor.get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.SEARCH_GTE.

          + +

          The returned key/data pair is for the smallest key greater than or + equal to the specified key (as determined by the key comparison + function), permitting partial key matches and range searches.

          + + The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.
          +
          +
          Overrides:
          +
          getSearchKeyRange in class Cursor
          +
          Parameters:
          +
          key - the key used as + input and returned as output.
          +
          data - the data returned as + output.
          +
          lockMode - the locking attributes; if null, default attributes + are used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          getSearchKeyRange

          +
          public OperationStatus getSearchKeyRange(DatabaseEntry key,
          +                                         DatabaseEntry pKey,
          +                                         DatabaseEntry data,
          +                                         LockMode lockMode)
          +
          Move the cursor to the closest matching key of the database, and return + the data item associated with the matching key. If the matching key has + duplicate values, the first data item in the set of duplicates is + returned. + +

          The returned key/data pair is for the smallest key greater than or + equal to the specified key (as determined by the key comparison + function), permitting partial key matches and range searches.

          +
          +
          Parameters:
          +
          key - the secondary key used as input and returned as output. It + must be initialized with a non-null byte array by the caller.
          +
          pKey - the primary key returned as output. Its byte array does not + need to be initialized by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + A partial data item may be + specified to optimize for key only or partial data retrieval.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchBoth

          +
          public OperationStatus getSearchBoth(DatabaseEntry key,
          +                                     DatabaseEntry pKey,
          +                                     DatabaseEntry data,
          +                                     LockMode lockMode)
          +
          Move the cursor to the specified secondary and primary key, where both + the primary and secondary key items must match.
          +
          +
          Parameters:
          +
          key - the secondary key used as input. It must be initialized with + a non-null byte array by the caller.
          +
          pKey - the primary key used as input. It must be initialized with + a non-null byte array by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchBothRange

          +
          public OperationStatus getSearchBothRange(DatabaseEntry key,
          +                                          DatabaseEntry pKey,
          +                                          DatabaseEntry data,
          +                                          LockMode lockMode)
          +
          Move the cursor to the specified secondary key and closest matching + primary key of the database. + +

          In the case of any database supporting sorted duplicate sets, the + returned key/data pair is for the smallest primary key greater than or + equal to the specified primary key (as determined by the key comparison + function), permitting partial matches and range searches in duplicate + data sets.

          +
          +
          Parameters:
          +
          key - the secondary key used as input. It must be initialized with + a non-null byte array by the caller.
          +
          pKey - the primary key used as input and returned as output. It + must be initialized with a non-null byte array by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller.
          +
          lockMode - the locking attributes; if null, default attributes are + used. LockMode.READ_COMMITTED is not allowed.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the cursor or database has been closed, + or the non-transactional cursor was created in a different thread.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, if a DatabaseEntry parameter is null or does not contain a + required non-null byte array.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryDatabase.html b/docs/java/com/sleepycat/je/SecondaryDatabase.html new file mode 100644 index 0000000..21bd5a2 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryDatabase.html @@ -0,0 +1,1171 @@ + + + + + +SecondaryDatabase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryDatabase

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class SecondaryDatabase
      +extends Database
      +
      A secondary database handle. + +

      Secondary databases are opened with Environment.openSecondaryDatabase and are + always associated with a single primary database. The distinguishing + characteristics of a secondary database are:

      + +
        +
      • Records are automatically added to a secondary database when records are + added, modified and deleted in the primary database. Direct calls to + put() methods on a secondary database are prohibited.
      • +
      • The delete method of a secondary database will delete + the primary record and as well as all its associated secondary records.
      • +
      • Calls to all get() methods will return the data from the + associated primary database.
      • +
      • Additional get() method signatures are provided to return + the primary key in an additional pKey parameter.
      • +
      • Calls to openCursor will return a SecondaryCursor, which itself has get() methods that return + the data of the primary database and additional get() method + signatures for returning the primary key.
      • +
      +

      Before opening or creating a secondary database you must implement + the SecondaryKeyCreator or SecondaryMultiKeyCreator + interface.

      + +

      For example, to create a secondary database that supports duplicates:

      + +
      +     Database primaryDb; // The primary database must already be open.
      +     SecondaryKeyCreator keyCreator; // Your key creator implementation.
      +     SecondaryConfig secConfig = new SecondaryConfig();
      +     secConfig.setAllowCreate(true);
      +     secConfig.setSortedDuplicates(true);
      +     secConfig.setKeyCreator(keyCreator);
      +     SecondaryDatabase newDb = env.openSecondaryDatabase(transaction,
      +                                                         "myDatabaseName",
      +                                                         primaryDb,
      +                                                         secConfig)
      + 
      + +

      If a primary database is to be associated with one or more secondary + databases, it may not be configured for duplicates.

      + +

      WARNING: The associations between primary and secondary databases + are not stored persistently. Whenever a primary database is opened for + write access by the application, the appropriate associated secondary + databases should also be opened by the application. This is necessary to + ensure data integrity when changes are made to the primary database. If the + secondary database is not opened, it will not be updated when the primary is + updated, and the references between the databases will become invalid. + (Note that this warning does not apply when using the DPL, which does store secondary relationships + persistently.)

      + +

      Special considerations for using Secondary + Databases with and without Transactions

      + +

      Normally, during a primary database write operation (insert, update or + delete), all associated secondary databases are also updated. However, when + an exception occurs during the write operation, the updates may be + incomplete. If the databases are transactional, this is handled by aborting + the transaction to undo the incomplete operation. If an auto-commit + transaction is used (null is passed for the transaction), the transaction + will be aborted automatically. If an explicit transaction is used, it + must be aborted by the application caller after the exception is caught.

      + +

      However, if the databases are non-transactional, integrity problems can + result when an exception occurs during the write operation. Because the + write operation is not made atomic by a transaction, references between the + databases will become invalid if the operation is incomplete. This results + in a SecondaryIntegrityException when attempting to access the + databases later.

      + +

      A secondary integrity problem is persistent; it cannot be resolved by + reopening the databases or the environment. The only way to resolve the + problem is to restore the environment from a valid backup, or, if the + integrity of the primary database is assumed, to remove and recreate all + secondary databases.

      + +

      Therefore, secondary databases and indexes should always be used in + conjunction with transactional databases and stores. Without transactions, + it is the responsibility of the application to handle the results of the + incomplete write operation or to take steps to prevent this situation from + happening in the first place.

      + +

      The following exceptions may be thrown during a write operation, and may + cause an integrity problem in the absence of transactions.

      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          close

          +
          public void close()
          +
          Closes a secondary database and dis-associates it from its primary + database. A secondary database should be closed before closing its + associated primary database. + + Discards the database handle. +

          + When closing the last open handle for a deferred-write database, any + cached database information is flushed to disk as if Database.sync() were + called. +

          + The database handle should not be closed while any other handle that + refers to it is not yet closed; for example, database handles should not + be closed while cursor handles into the database remain open, or + transactions that include operations on the database have not yet been + committed or aborted. Specifically, this includes Cursor and Transaction handles. +

          + When multiple threads are using the Database handle concurrently, only a single thread may call this + method. +

          + When called on a database that is the primary database for a secondary + index, the primary database should be closed only after all secondary + indices which reference it have been closed. +

          + The database handle may not be accessed again after this method is + called, regardless of the method's success or failure, with one + exception: the close method itself may be called any number of + times.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          + +
          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Overrides:
          +
          close in class Database
          +
          See Also:
          +
          DatabaseConfig.setDeferredWrite
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryDatabase

          +
          public Database getPrimaryDatabase()
          +
          Returns the primary database associated with this secondary database.
          +
          +
          Returns:
          +
          the primary database associated with this secondary database.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryDatabases

          +
          public java.util.List<SecondaryDatabase> getSecondaryDatabases()
          +
          Returns an empty list, since this database is itself a secondary + database.
          +
          +
          Overrides:
          +
          getSecondaryDatabases in class Database
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryConfig

          +
          public SecondaryConfig getSecondaryConfig()
          +
          Deprecated. As of JE 4.0.13, replaced by getConfig().
          +
          Returns a copy of the secondary configuration of this database.
          +
          +
          Returns:
          +
          a copy of the secondary configuration of this database.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public SecondaryConfig getConfig()
          +
          Returns a copy of the secondary configuration of this database.
          +
          +
          Overrides:
          +
          getConfig in class Database
          +
          Returns:
          +
          a copy of the secondary configuration of this database.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          openCursor

          +
          public SecondaryCursor openCursor(Transaction txn,
          +                                  CursorConfig cursorConfig)
          +
          Obtain a cursor on a database, returning a SecondaryCursor.
          +
          +
          Overrides:
          +
          openCursor in class Database
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the database is non-transactional, null must be + specified. For a transactional database, the transaction is optional + for read-only access and required for read-write access.
          +
          cursorConfig - The cursor attributes. If null, default attributes + are used.
          +
          Returns:
          +
          A database cursor.
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          public OperationResult delete(Transaction txn,
          +                              DatabaseEntry key,
          +                              WriteOptions options)
          +
          Deletes the record associated with the given secondary key. In the + presence of duplicate keys, all primary records associated with the + given secondary key will be deleted. + +

          When multiple primary records are deleted, the expiration time in the + returned result is that of the last record deleted.

          + +

          When the primary records are deleted, their associated secondary + records are deleted as if Database.delete(com.sleepycat.je.Transaction, com.sleepycat.je.DatabaseEntry, com.sleepycat.je.WriteOptions) were called. This + includes, but is not limited to, the secondary record referenced by the + given key.

          +
          +
          Overrides:
          +
          delete in class Database
          +
          Parameters:
          +
          key - the key used as + input. + +
          +
          txn - For a transactional database, an explicit transaction may + be specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is deleted, else null if the + given key was not found in the database.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          get

          +
          public OperationResult get(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Moves the cursor to a record according to the specified Get + type. + +

          The difference between this method and the method it overrides in + Cursor is that the key here is defined as the secondary + records's key, and the data is defined as the primary record's data.

          +
          +
          Overrides:
          +
          get in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified to transaction-protect the operation, or null may be specified + to perform the operation without transaction protection. For a + non-transactional database, null must be specified.
          +
          key - the key input parameter.
          +
          data - the data input or output parameter, depending on getType.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationResult get(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry pKey,
          +                           DatabaseEntry data,
          +                           Get getType,
          +                           ReadOptions options)
          +
          Retrieves a record according to the specified Get type. + +

          If the operation succeeds, the record will be locked according to the + lock mode specified, the key, primary + key and/or data will be returned via the (non-null) DatabaseEntry + parameters, and a non-null OperationResult will be returned. If the + operation fails because the record requested is not found, null is + returned.

          + +

          The following table lists each allowed operation and whether the key, + pKey and data parameters are input + or output parameters. See the individual Get operations for + more information.

          + +
          + + + + + + + + + + + + + + + + + + + + + +
          Get operationDescription'key' parameter'pKey' parameter'data' parameter
          Get.SEARCHSearches using an exact match by key.inputoutputoutput
          Get.SEARCH_BOTHSearches using an exact match by key and data.inputinputoutput
          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified to transaction-protect the operation, or null may be specified + to perform the operation without transaction protection. For a + non-transactional database, null must be specified.
          +
          key - the secondary key input parameter.
          +
          pKey - the primary key input or output parameter, depending on + getType.
          +
          data - the primary data output parameter.
          +
          getType - the Get operation type. May not be null.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record requested is found, else null.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the database has been closed.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified. + This includes passing a null getType, a null input key/pKey parameter, + an input key/pKey parameter with a null data array, a partial key/pKey + input parameter, and specifying a lock mode of READ_COMMITTED.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public OperationStatus get(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           LockMode lockMode)
          +
          Description copied from class: Database
          +
          Retrieves the key/data pair with the given key. If the matching key has + duplicate values, the first data item in the set of duplicates is + returned. Retrieval of duplicates requires the use of Cursor + operations. + +

          Calling this method is equivalent to calling Database.get(Transaction, DatabaseEntry, DatabaseEntry, Get, ReadOptions) with + Get.SEARCH.

          +
          +
          Overrides:
          +
          get in class Database
          +
          Parameters:
          +
          key - the secondary key used as input. It must be initialized with + a non-null byte array by the caller.
          +
          data - the primary data returned as output. Its byte array does + not need to be initialized by the caller. + +
          +
          txn - For a transactional database, an explicit transaction may be + specified to transaction-protect the operation, or null may be specified + to perform the operation without transaction protection. For a + non-transactional database, null must be specified.
          +
          lockMode - the locking attributes; if null, default attributes are + used.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getSearchBoth

          +
          public OperationStatus getSearchBoth(Transaction txn,
          +                                     DatabaseEntry key,
          +                                     DatabaseEntry data,
          +                                     LockMode lockMode)
          +
          This operation is not allowed with this method signature. UnsupportedOperationException will always be thrown by this method. + The corresponding method with the pKey parameter should be + used instead.
          +
          +
          Overrides:
          +
          getSearchBoth in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified to transaction-protect the operation, or null may be specified + to perform the operation without transaction protection. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input.
          +
          data - the data used as + input.
          +
          lockMode - the locking attributes; if null, default attributes are + used.
          +
          Returns:
          +
          OperationStatus.NOTFOUND if no matching key/data pair is found; + otherwise, OperationStatus.SUCCESS.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          put

          +
          public OperationResult put(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           Put putType,
          +                           WriteOptions options)
          +
          This operation is not allowed on a secondary database. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary database should be used instead.
          +
          +
          Overrides:
          +
          put in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input.
          +
          data - the data used as + input.
          +
          putType - the Put operation type. May not be null.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is written, else null.
          +
          +
        • +
        + + + +
          +
        • +

          put

          +
          public OperationStatus put(Transaction txn,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data)
          +
          This operation is not allowed on a secondary database. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary database should be used instead.
          +
          +
          Overrides:
          +
          put in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.SUCCESS.
          +
          +
        • +
        + + + +
          +
        • +

          putNoOverwrite

          +
          public OperationStatus putNoOverwrite(Transaction txn,
          +                                      DatabaseEntry key,
          +                                      DatabaseEntry data)
          +
          This operation is not allowed on a secondary database. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary database should be used instead.
          +
          +
          Overrides:
          +
          putNoOverwrite in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.KEYEXIST if the key already appears in the database, + else OperationStatus.SUCCESS
          +
          +
        • +
        + + + +
          +
        • +

          putNoDupData

          +
          public OperationStatus putNoDupData(Transaction txn,
          +                                    DatabaseEntry key,
          +                                    DatabaseEntry data)
          +
          This operation is not allowed on a secondary database. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary database should be used instead.
          +
          +
          Overrides:
          +
          putNoDupData in class Database
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          key - the key used as + input..
          +
          data - the data used as + input.
          +
          Returns:
          +
          OperationStatus.KEYEXIST if the key/data pair already appears in the + database, else OperationStatus.SUCCESS
          +
          +
        • +
        + + + +
          +
        • +

          join

          +
          public JoinCursor join(Cursor[] cursors,
          +                       JoinConfig config)
          +
          This operation is not allowed on a secondary database. UnsupportedOperationException will always be thrown by this method. + The corresponding method on the primary database should be used instead.
          +
          +
          Overrides:
          +
          join in class Database
          +
          Parameters:
          +
          cursors - an array of cursors associated with this primary + database.
          +
          config - The join attributes. If null, default attributes are + used.
          +
          Returns:
          +
          a specialized cursor that returns the results of the equality + join operation.
          +
          See Also:
          +
          JoinCursor
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryIntegrityException.html b/docs/java/com/sleepycat/je/SecondaryIntegrityException.html new file mode 100644 index 0000000..c9c4da2 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryIntegrityException.html @@ -0,0 +1,300 @@ + + + + + +SecondaryIntegrityException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryIntegrityException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SecondaryIntegrityException
      +extends SecondaryReferenceException
      +
      Thrown when an integrity problem is detected while accessing a secondary + database, including access to secondaries while writing to a primary + database. Secondary integrity problems are normally caused by the use of + secondaries without transactions. + +

      The Transaction handle is invalidated as a result of this + exception. In addition, the corrupt index (secondary database) is marked + as corrupt in memory. All subsequent access to the index will throw + SecondaryIntegrityException. To correct the problem, the + application may perform a full restore (an HA NetworkRestore or restore from backup) or rebuild + the corrupt index.

      + +

      Some possible causes of a secondary integrity exception are listed + below. Note that only the first item -- the use of a non-transactional + store -- is applicable when using the DPL. + All other items below do not apply to the use of the DPL, because the DPL + ensures that secondary databases are configured and managed correctly.

      +
        +
      1. The use of non-transactional databases or stores can cause secondary + corruption as described in Special considerations for using + Secondary Databases with and without Transactions. Secondary databases + and indexes should always be used in conjunction with transactional + databases and stores.
      2. + +
      3. Secondary corruption can be caused by an incorrectly implemented + secondary key creator method, for example, one which uses mutable state + information or is not properly synchronized. When the DPL is not used, the + application is responsible for correctly implementing the key creator.
      4. + +
      5. Secondary corruption can be caused by failing to open a secondary + database before writing to the primary database, by writing to a secondary + database directly using a Database handle, or by truncating or + removing primary database without also truncating or removing all secondary + databases. When the DPL is not used, the application is responsible for + managing associated databases correctly.

        +
      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryKeyCreator.html b/docs/java/com/sleepycat/je/SecondaryKeyCreator.html new file mode 100644 index 0000000..4ab7e9f --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryKeyCreator.html @@ -0,0 +1,342 @@ + + + + + +SecondaryKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface SecondaryKeyCreator

    +
    +
    +
    +
      +
    • +
      +
      All Known Implementing Classes:
      +
      SerialSerialKeyCreator, TupleSerialKeyCreator, TupleSerialMarshalledKeyCreator, TupleTupleKeyCreator, TupleTupleMarshalledKeyCreator
      +
      +
      +
      +
      public interface SecondaryKeyCreator
      +
      The interface implemented for extracting single-valued secondary keys from + primary records. + +

      The key creator object is specified by calling SecondaryConfig.setKeyCreator. The secondary + database configuration is specified when calling Environment.openSecondaryDatabase.

      + +

      For example:

      + +
      +     class MyKeyCreator implements SecondaryKeyCreator {
      +         public boolean createSecondaryKey(SecondaryDatabase secondary,
      +                                             DatabaseEntry key,
      +                                             DatabaseEntry data,
      +                                             DatabaseEntry result) {
      +             //
      +             // DO HERE: Extract the secondary key from the primary key and
      +             // data, and set the secondary key into the result parameter.
      +             //
      +             return true;
      +         }
      +     }
      +     ...
      +     SecondaryConfig secConfig = new SecondaryConfig();
      +     secConfig.setKeyCreator(new MyKeyCreator());
      +     // Now pass secConfig to Environment.openSecondaryDatabase
      + 
      + +

      Use this interface when zero or one secondary key is present in a single + primary record, in other words, for many-to-one and one-to-one + relationships. When more than one secondary key may be present (for + many-to-many and one-to-many relationships), use the SecondaryMultiKeyCreator interface instead. The table below summarizes how + to create all four variations of relationships.

      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + +
      RelationshipInterfaceDuplicatesExample
      One-to-oneSecondaryKeyCreatorNoA person record with a unique social security number key.
      Many-to-oneSecondaryKeyCreatorYesA person record with a non-unique employer key.
      One-to-manySecondaryMultiKeyCreatorNoA person record with multiple unique email address keys.
      Many-to-manySecondaryMultiKeyCreatorYesA person record with multiple non-unique organization keys.
      + +
      + +

      To configure a database for duplicates. pass true to DatabaseConfig.setSortedDuplicates(boolean).

      + +

      WARNING: Key creator instances are shared by multiple threads + and key creator methods are called without any special synchronization. + Therefore, key creators must be thread safe. In general no shared state + should be used and any caching of computed values must be done with proper + synchronization.

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          createSecondaryKey

          +
          boolean createSecondaryKey(SecondaryDatabase secondary,
          +                           DatabaseEntry key,
          +                           DatabaseEntry data,
          +                           DatabaseEntry result)
          +
          Creates a secondary key entry, given a primary key and data entry. + +

          A secondary key may be derived from the primary key, primary data, or + a combination of the primary key and data. For secondary keys that are + optional, the key creator method may return false and the key/data pair + will not be indexed. To ensure the integrity of a secondary database + the key creator method must always return the same result for a given + set of input parameters.

          + +

          A RuntimeException may be thrown by this method if an error + occurs attempting to create the secondary key. This exception will be + thrown by the API method currently in progress, for example, a put method. However, this will cause the write operation + to be incomplete. When databases are not configured to be + transactional, caution should be used to avoid integrity problems. See + Special considerations for + using Secondary Databases with and without Transactions.

          +
          +
          Parameters:
          +
          secondary - the database to which the secondary key will be + added. This parameter is passed for informational purposes but is not + commonly used. This parameter is always non-null.
          +
          key - the primary key entry. This parameter must not be modified + by this method. This parameter is always non-null.
          +
          data - the primary data entry. This parameter must not be modified + by this method. If SecondaryConfig#setExtractFromPrimaryKeyOnly + is configured as true, the data param may be either null + or non-null, and the implementation is expected to ignore it; otherwise, + this parameter is always non-null.
          +
          result - the secondary key created by this method. This parameter + is always non-null.
          +
          Returns:
          +
          true if a key was created, or false to indicate that the key is + not present.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryMultiKeyCreator.html b/docs/java/com/sleepycat/je/SecondaryMultiKeyCreator.html new file mode 100644 index 0000000..28a15e6 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryMultiKeyCreator.html @@ -0,0 +1,342 @@ + + + + + +SecondaryMultiKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Interface SecondaryMultiKeyCreator

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface SecondaryMultiKeyCreator
      +
      The interface implemented for extracting multi-valued secondary keys from + primary records. + +

      The key creator object is specified by calling SecondaryConfig.setMultiKeyCreator. The + secondary database configuration is specified when calling Environment.openSecondaryDatabase.

      + +

      For example:

      + +
      +     class MyMultiKeyCreator implements SecondaryMultiKeyCreator {
      +         public void createSecondaryKeys(SecondaryDatabase secondary,
      +                                         DatabaseEntry key,
      +                                         DatabaseEntry data,
      +                                         Set<DatabaseEntry> results) {
      +             //
      +             // DO HERE: Extract the secondary keys from the primary key and
      +             // data.  For each key extracted, create a DatabaseEntry and add
      +             // it to the results set.
      +             //
      +         }
      +     }
      +     ...
      +     SecondaryConfig secConfig = new SecondaryConfig();
      +     secConfig.setMultiKeyCreator(new MyMultiKeyCreator());
      +     // Now pass secConfig to Environment.openSecondaryDatabase
      + 
      + +

      Use this interface when any number of secondary keys may be present in a + single primary record, in other words, for many-to-many and one-to-many + relationships. When only zero or one secondary key is present (for + many-to-one and one-to-one relationships) you may use the SecondaryKeyCreator interface instead. The table below summarizes how to + create all four variations of relationships.

      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + +
      RelationshipInterfaceDuplicatesExample
      One-to-oneSecondaryKeyCreatorNoA person record with a unique social security number key.
      Many-to-oneSecondaryKeyCreatorYesA person record with a non-unique employer key.
      One-to-manySecondaryMultiKeyCreatorNoA person record with multiple unique email address keys.
      Many-to-manySecondaryMultiKeyCreatorYesA person record with multiple non-unique organization keys.
      + +
      + +

      To configure a database for duplicates. pass true to DatabaseConfig.setSortedDuplicates(boolean).

      + +

      Note that SecondaryMultiKeyCreator may also be used for + single key secondaries (many-to-one and one-to-one); in this case, at most a + single key is added to the results set. + SecondaryMultiKeyCreator is only slightly less efficient than + SecondaryKeyCreator in that two or three temporary sets must be + created to hold the results. @see SecondaryConfig

      + +

      WARNING: Key creator instances are shared by multiple threads + and key creator methods are called without any special synchronization. + Therefore, key creators must be thread safe. In general no shared state + should be used and any caching of computed values must be done with proper + synchronization.

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          createSecondaryKeys

          +
          void createSecondaryKeys(SecondaryDatabase secondary,
          +                         DatabaseEntry key,
          +                         DatabaseEntry data,
          +                         java.util.Set<DatabaseEntry> results)
          +
          Creates a secondary key entry, given a primary key and data entry. + +

          A secondary key may be derived from the primary key, primary data, or + a combination of the primary key and data. Zero or more secondary keys + may be derived from the primary record and returned in the results + parameter. To ensure the integrity of a secondary database the key + creator method must always return the same results for a given set of + input parameters.

          + +

          A RuntimeException may be thrown by this method if an error + occurs attempting to create the secondary key. This exception will be + thrown by the API method currently in progress, for example, a put method. However, this will cause the write operation + to be incomplete. When databases are not configured to be + transactional, caution should be used to avoid integrity problems. See + Special considerations for + using Secondary Databases with and without Transactions.

          +
          +
          Parameters:
          +
          secondary - the database to which the secondary key will be + added. This parameter is passed for informational purposes but is not + commonly used. This parameter is always non-null.
          +
          key - the primary key entry. This parameter must not be modified + by this method. This parameter is always non-null.
          +
          data - the primary data entry. This parameter must not be modified + by this method. If SecondaryConfig#setExtractFromPrimaryKeyOnly + is configured as true, the data param may be either null + or non-null, and the implementation is expected to ignore it; otherwise, + this parameter is always non-null.
          +
          results - the set to contain the the secondary key DatabaseEntry + objects created by this method.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SecondaryReferenceException.html b/docs/java/com/sleepycat/je/SecondaryReferenceException.html new file mode 100644 index 0000000..4f17298 --- /dev/null +++ b/docs/java/com/sleepycat/je/SecondaryReferenceException.html @@ -0,0 +1,365 @@ + + + + + +SecondaryReferenceException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SecondaryReferenceException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longgetExpirationTime() +
        Returns the expiration time of the record being accessed during the + failure.
        +
        DatabaseEntrygetPrimaryKey() +
        Returns the primary key being accessed during the failure.
        +
        java.lang.StringgetSecondaryDatabaseName() +
        Returns the name of the secondary database being accessed during the + failure.
        +
        DatabaseEntrygetSecondaryKey() +
        Returns the secondary key being accessed during the failure.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getSecondaryDatabaseName

          +
          public java.lang.String getSecondaryDatabaseName()
          +
          Returns the name of the secondary database being accessed during the + failure.
          +
        • +
        + + + +
          +
        • +

          getSecondaryKey

          +
          public DatabaseEntry getSecondaryKey()
          +
          Returns the secondary key being accessed during the failure. Note that + in some cases, the returned primary key can be null.
          +
        • +
        + + + +
          +
        • +

          getPrimaryKey

          +
          public DatabaseEntry getPrimaryKey()
          +
          Returns the primary key being accessed during the failure. Note that + in some cases, the returned primary key can be null.
          +
        • +
        + + + +
          +
        • +

          getExpirationTime

          +
          public long getExpirationTime()
          +
          Returns the expiration time of the record being accessed during the + failure.
          +
          +
          Since:
          +
          7.0
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Sequence.html b/docs/java/com/sleepycat/je/Sequence.html new file mode 100644 index 0000000..bef3665 --- /dev/null +++ b/docs/java/com/sleepycat/je/Sequence.html @@ -0,0 +1,409 @@ + + + + + +Sequence (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Sequence

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class Sequence
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      A Sequence handle is used to manipulate a sequence record in a + database. Sequence handles are opened using the Database.openSequence method.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidclose() +
        Closes a sequence.
        +
        longget(Transaction txn, + int delta) +
        Returns the next available element in the sequence and changes the + sequence value by delta.
        +
        DatabasegetDatabase() +
        Returns the Database handle associated with this sequence.
        +
        DatabaseEntrygetKey() +
        Returns the DatabaseEntry used to open this sequence.
        +
        SequenceStatsgetStats(StatsConfig config) +
        Returns statistical information about the sequence.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          Closes a sequence. Any unused cached values are lost. + +

          The sequence handle may not be used again after this method has + been called, regardless of the method's success or failure.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          public long get(Transaction txn,
          +                int delta)
          +         throws DatabaseException
          +
          Returns the next available element in the sequence and changes the + sequence value by delta. The value of delta + must be greater than zero. If there are enough cached values in the + sequence handle then they will be returned. Otherwise the next value + will be fetched from the database and incremented (decremented) by + enough to cover the delta and the next batch of cached + values. + + This method is synchronized to protect updating of the cached value, + since multiple threads may share a single handle. Multiple handles for + the same database/key may be used to increase concurrency.

          + +

          The txn handle must be null if the sequence handle was + opened with a non-zero cache size.

          + +

          For maximum concurrency, a non-zero cache size should be specified + prior to opening the sequence handle, the txn handle should + be null, and SequenceConfig.setAutoCommitNoSync should be called to disable log + flushes.

          +
          +
          Parameters:
          +
          txn - For a transactional database, an explicit transaction may be + specified, or null may be specified to use auto-commit. For a + non-transactional database, null must be specified.
          +
          delta - the amount by which to increment or decrement the sequence
          +
          Returns:
          +
          the next available element in the sequence
          +
          Throws:
          +
          SequenceOverflowException - if the end of the sequence is reached + and wrapping is not configured.
          +
          SequenceIntegrityException - if the sequence record has been + deleted.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalArgumentException - if the delta is less than or equal to + zero, or larger than the size of the sequence's range.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getDatabase

          +
          public Database getDatabase()
          +
          Returns the Database handle associated with this sequence.
          +
          +
          Returns:
          +
          The Database handle associated with this sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getKey

          +
          public DatabaseEntry getKey()
          +
          Returns the DatabaseEntry used to open this sequence.
          +
          +
          Returns:
          +
          The DatabaseEntry used to open this sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getStats

          +
          public SequenceStats getStats(StatsConfig config)
          +                       throws DatabaseException
          +
          Returns statistical information about the sequence. + +

          In the presence of multiple threads or processes accessing an active + sequence, the information returned by this method may be + out-of-date.

          + +

          The getStats method cannot be transaction-protected. For this reason, + it should be called in a thread of control that has no open cursors or + active transactions.

          +
          +
          Parameters:
          +
          config - The statistics returned; if null, default statistics are + returned.
          +
          Returns:
          +
          Sequence statistics.
          +
          Throws:
          +
          SequenceIntegrityException - if the sequence record has been + deleted.
          +
          DatabaseException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceConfig.html b/docs/java/com/sleepycat/je/SequenceConfig.html new file mode 100644 index 0000000..3ddac96 --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceConfig.html @@ -0,0 +1,811 @@ + + + + + +SequenceConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class SequenceConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a sequence.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static SequenceConfigDEFAULT +
        Default configuration used if null is passed to methods that create a + cursor.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        SequenceConfig() +
        An instance created using the default constructor is initialized with + the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        SequenceConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetAllowCreate() +
        Returns true if the Database.openSequence method is configured to create the sequence if it + does not already exist.
        +
        booleangetAutoCommitNoSync() +
        Returns true if the auto-commit operations on the sequence are configure + to not flush the transaction log..
        +
        intgetCacheSize() +
        Returns the number of elements cached by a sequence handle..
        +
        booleangetDecrement() +
        Returns true if the sequence is configured to decrement.
        +
        booleangetExclusiveCreate() +
        Returns true if the Database.openSequence method is configured to fail if the database + already exists.
        +
        longgetInitialValue() +
        Returns the initial value for a sequence..
        +
        longgetRangeMax() +
        Returns the maximum value for the sequence.
        +
        longgetRangeMin() +
        Returns the minimum value for the sequence.
        +
        booleangetWrap() +
        Returns true if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
        +
        SequenceConfigsetAllowCreate(boolean allowCreate) +
        Configures the Database.openSequence method to create the sequence if it does not + already exist.
        +
        SequenceConfigsetAutoCommitNoSync(boolean autoCommitNoSync) +
        Configures auto-commit operations on the sequence to not flush the + transaction log.
        +
        SequenceConfigsetCacheSize(int cacheSize) +
        Set the Configure the number of elements cached by a sequence handle.
        +
        SequenceConfigsetDecrement(boolean decrement) +
        Specifies that the sequence should be decremented.
        +
        SequenceConfigsetExclusiveCreate(boolean exclusiveCreate) +
        Configures the Database.openSequence method to fail if the database already exists.
        +
        SequenceConfigsetInitialValue(long initialValue) +
        Sets the initial value for a sequence.
        +
        SequenceConfigsetRange(long min, + long max) +
        Configures a sequence range.
        +
        SequenceConfigsetWrap(boolean wrap) +
        Specifies that the sequence should wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final SequenceConfig DEFAULT
          +
          Default configuration used if null is passed to methods that create a + cursor.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          SequenceConfig

          +
          public SequenceConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setAllowCreate

          +
          public SequenceConfig setAllowCreate(boolean allowCreate)
          +
          Configures the Database.openSequence method to create the sequence if it does not + already exist. + +

          The default value is false.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          allowCreate - If true, configure the Database.openSequence method to + create the sequence if it does not already exist.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAllowCreate

          +
          public boolean getAllowCreate()
          +
          Returns true if the Database.openSequence method is configured to create the sequence if it + does not already exist. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Database.openSequence method is configured to create the sequence if it + does not already exist.
          +
          +
        • +
        + + + +
          +
        • +

          setCacheSize

          +
          public SequenceConfig setCacheSize(int cacheSize)
          +
          Set the Configure the number of elements cached by a sequence handle. + +

          The default value is zero.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          cacheSize - The number of elements cached by a sequence handle. + May not be larger than the size of the range defined by setRange(long, long).
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getCacheSize

          +
          public int getCacheSize()
          +
          Returns the number of elements cached by a sequence handle.. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The number of elements cached by a sequence handle..
          +
          +
        • +
        + + + +
          +
        • +

          setDecrement

          +
          public SequenceConfig setDecrement(boolean decrement)
          +
          Specifies that the sequence should be decremented. + +

          The default value is false.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          decrement - If true, specify that the sequence should be + decremented.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getDecrement

          +
          public boolean getDecrement()
          +
          Returns true if the sequence is configured to decrement. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the sequence is configured to decrement.
          +
          +
        • +
        + + + +
          +
        • +

          setExclusiveCreate

          +
          public SequenceConfig setExclusiveCreate(boolean exclusiveCreate)
          +
          Configures the Database.openSequence method to fail if the database already exists. + +

          The default value is false.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          exclusiveCreate - If true, configure the Database.openSequence method to + fail if the database already exists.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getExclusiveCreate

          +
          public boolean getExclusiveCreate()
          +
          Returns true if the Database.openSequence method is configured to fail if the database + already exists. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Database.openSequence method is configured to fail if the database + already exists.
          +
          +
        • +
        + + + +
          +
        • +

          setInitialValue

          +
          public SequenceConfig setInitialValue(long initialValue)
          +
          Sets the initial value for a sequence. + +

          The default initial value is zero.

          + +

          This call is only effective when the sequence is being created.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          initialValue - The initial value for a sequence. Must be within + the range minimum and maximum values, inclusive.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getInitialValue

          +
          public long getInitialValue()
          +
          Returns the initial value for a sequence.. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The initial value for a sequence..
          +
          +
        • +
        + + + +
          +
        • +

          setAutoCommitNoSync

          +
          public SequenceConfig setAutoCommitNoSync(boolean autoCommitNoSync)
          +
          Configures auto-commit operations on the sequence to not flush the + transaction log. + +

          The default value is false.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          autoCommitNoSync - If true, configure auto-commit operations on + the sequence to not flush the transaction log.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAutoCommitNoSync

          +
          public boolean getAutoCommitNoSync()
          +
          Returns true if the auto-commit operations on the sequence are configure + to not flush the transaction log.. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the auto-commit operations on the sequence are configure + to not flush the transaction log..
          +
          +
        • +
        + + + +
          +
        • +

          setRange

          +
          public SequenceConfig setRange(long min,
          +                               long max)
          +
          Configures a sequence range. This call is only effective when the + sequence is being created. + +

          The default minimum is Long.MIN_VALUE and the default maximum + is Long.MAX_VALUE.

          +
          +
          Parameters:
          +
          min - The minimum value for the sequence. Must be less than max.
          +
          max - The maximum value for the sequence. Must be greater than + min.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getRangeMin

          +
          public long getRangeMin()
          +
          Returns the minimum value for the sequence. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The minimum value for the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getRangeMax

          +
          public long getRangeMax()
          +
          Returns the maximum value for the sequence. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          The maximum value for the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          setWrap

          +
          public SequenceConfig setWrap(boolean wrap)
          +
          Specifies that the sequence should wrap around when it is incremented + (decremented) past the specified maximum (minimum) value. + +

          The default value is false.

          + +

          This method may be called at any time during the life of the + application.

          +
          +
          Parameters:
          +
          wrap - If true, specify that the sequence should wrap around when + it is incremented (decremented) past the specified maximum (minimum) + value.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getWrap

          +
          public boolean getWrap()
          +
          Returns true if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public SequenceConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceExistsException.html b/docs/java/com/sleepycat/je/SequenceExistsException.html new file mode 100644 index 0000000..8c2b289 --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceExistsException.html @@ -0,0 +1,257 @@ + + + + + +SequenceExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceExistsException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SequenceExistsException
      +extends OperationFailureException
      +
      Thrown by Database.openSequence if the + sequence record already exists and the SequenceConfig + ExclusiveCreate parameter is true. + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceIntegrityException.html b/docs/java/com/sleepycat/je/SequenceIntegrityException.html new file mode 100644 index 0000000..387137b --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceIntegrityException.html @@ -0,0 +1,256 @@ + + + + + +SequenceIntegrityException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceIntegrityException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SequenceIntegrityException
      +extends OperationFailureException
      +
      Thrown by Sequence.get if the sequence record has been + deleted. + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceNotFoundException.html b/docs/java/com/sleepycat/je/SequenceNotFoundException.html new file mode 100644 index 0000000..ec890d7 --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceNotFoundException.html @@ -0,0 +1,257 @@ + + + + + +SequenceNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceNotFoundException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SequenceNotFoundException
      +extends OperationFailureException
      +
      Thrown by Database.openSequence if the + sequence record does not exist and the SequenceConfig AllowCreate + parameter is false. + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceOverflowException.html b/docs/java/com/sleepycat/je/SequenceOverflowException.html new file mode 100644 index 0000000..72ecc41 --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceOverflowException.html @@ -0,0 +1,256 @@ + + + + + +SequenceOverflowException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceOverflowException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SequenceOverflowException
      +extends OperationFailureException
      +
      Thrown by Sequence.get if the end of the sequence is + reached and wrapping is not configured. + +

      The Transaction handle is not invalidated as a result of + this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/SequenceStats.html b/docs/java/com/sleepycat/je/SequenceStats.html new file mode 100644 index 0000000..1397875 --- /dev/null +++ b/docs/java/com/sleepycat/je/SequenceStats.html @@ -0,0 +1,432 @@ + + + + + +SequenceStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class SequenceStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SequenceStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      A SequenceStats object is used to return sequence statistics.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intgetCacheSize() +
        Returns the number of values that will be cached in this handle.
        +
        longgetCurrent() +
        Returns the current value of the sequence in the database.
        +
        longgetLastValue() +
        Returns the last cached value of the sequence.
        +
        longgetMax() +
        Returns the maximum permitted value of the sequence.
        +
        longgetMin() +
        Returns the minimum permitted value of the sequence.
        +
        intgetNCachedGets() +
        Returns the number of times that Sequence.get was called and a cached + value was returned.
        +
        intgetNGets() +
        Returns the number of times that Sequence.get was called successfully.
        +
        longgetValue() +
        Returns the current cached value of the sequence.
        +
        java.lang.StringtoString() 
        java.lang.StringtoStringVerbose() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNGets

          +
          public int getNGets()
          +
          Returns the number of times that Sequence.get was called successfully.
          +
          +
          Returns:
          +
          number of times that Sequence.get was called successfully.
          +
          +
        • +
        + + + +
          +
        • +

          getNCachedGets

          +
          public int getNCachedGets()
          +
          Returns the number of times that Sequence.get was called and a cached + value was returned.
          +
          +
          Returns:
          +
          number of times that Sequence.get was called and a cached + value was returned.
          +
          +
        • +
        + + + +
          +
        • +

          getCurrent

          +
          public long getCurrent()
          +
          Returns the current value of the sequence in the database.
          +
          +
          Returns:
          +
          current value of the sequence in the database.
          +
          +
        • +
        + + + +
          +
        • +

          getValue

          +
          public long getValue()
          +
          Returns the current cached value of the sequence.
          +
          +
          Returns:
          +
          current cached value of the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getLastValue

          +
          public long getLastValue()
          +
          Returns the last cached value of the sequence.
          +
          +
          Returns:
          +
          last cached value of the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getMin

          +
          public long getMin()
          +
          Returns the minimum permitted value of the sequence.
          +
          +
          Returns:
          +
          minimum permitted value of the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getMax

          +
          public long getMax()
          +
          Returns the maximum permitted value of the sequence.
          +
          +
          Returns:
          +
          maximum permitted value of the sequence.
          +
          +
        • +
        + + + +
          +
        • +

          getCacheSize

          +
          public int getCacheSize()
          +
          Returns the number of values that will be cached in this handle.
          +
          +
          Returns:
          +
          number of values that will be cached in this handle.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toStringVerbose

          +
          public java.lang.String toStringVerbose()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/StatsConfig.html b/docs/java/com/sleepycat/je/StatsConfig.html new file mode 100644 index 0000000..ea2b002 --- /dev/null +++ b/docs/java/com/sleepycat/je/StatsConfig.html @@ -0,0 +1,554 @@ + + + + + +StatsConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class StatsConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class StatsConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a statistics retrieval operation.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static StatsConfigCLEAR +
        A convenience instance for which setClear(true) has been called, and + all other properties have default values.
        +
        static StatsConfigDEFAULT +
        A convenience instance embodying the default configuration.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        StatsConfig() +
        An instance created using the default constructor is initialized with + the system's default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        StatsConfigclone() +
        Returns a copy of this configuration object.
        +
        booleangetClear() +
        Returns true if the statistics operation is configured to reset + statistics after they are returned.
        +
        booleangetFast() +
        Returns true if the statistics operation is configured to return only + the values which do not require expensive actions.
        +
        intgetShowProgressInterval() +
        Returns the showProgressInterval value, if set.
        +
        java.io.PrintStreamgetShowProgressStream() +
        Returns the PrintStream on which the progress messages will be displayed + during long running statistics gathering operations.
        +
        StatsConfigsetClear(boolean clear) +
        Configures the statistics operation to reset statistics after they are + returned.
        +
        StatsConfigsetFast(boolean fast) +
        Configures the statistics operation to return only the values which do + not incur some performance penalty.
        +
        StatsConfigsetShowProgressInterval(int showProgressInterval) +
        When the statistics operation is configured to display progress the + showProgressInterval is the number of LNs between each progress report.
        +
        StatsConfigsetShowProgressStream(java.io.PrintStream showProgressStream) +
        Configures the statistics operation to display progress to the + PrintStream argument.
        +
        java.lang.StringtoString() +
        Returns the values for each configuration attribute.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final StatsConfig DEFAULT
          +
          A convenience instance embodying the default configuration.
          +
        • +
        + + + +
          +
        • +

          CLEAR

          +
          public static final StatsConfig CLEAR
          +
          A convenience instance for which setClear(true) has been called, and + all other properties have default values.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          StatsConfig

          +
          public StatsConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setFast

          +
          public StatsConfig setFast(boolean fast)
          +
          Configures the statistics operation to return only the values which do + not incur some performance penalty. + +

          The default value is false.

          + +

          For example, skip stats that require a traversal of the database or + in-memory tree, or which lock down the lock table for a period of + time.

          +
          +
          Parameters:
          +
          fast - If set to true, configure the statistics operation to return + only the values which do not incur some performance penalty.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getFast

          +
          public boolean getFast()
          +
          Returns true if the statistics operation is configured to return only + the values which do not require expensive actions.
          +
          +
          Returns:
          +
          true if the statistics operation is configured to return only + the values which do not require expensive actions.
          +
          +
        • +
        + + + +
          +
        • +

          setClear

          +
          public StatsConfig setClear(boolean clear)
          +
          Configures the statistics operation to reset statistics after they are + returned. The default value is false.
          +
          +
          Parameters:
          +
          clear - If set to true, configure the statistics operation to + reset statistics after they are returned.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getClear

          +
          public boolean getClear()
          +
          Returns true if the statistics operation is configured to reset + statistics after they are returned.
          +
          +
          Returns:
          +
          true if the statistics operation is configured to reset + statistics after they are returned.
          +
          +
        • +
        + + + +
          +
        • +

          setShowProgressStream

          +
          public StatsConfig setShowProgressStream(java.io.PrintStream showProgressStream)
          +
          Configures the statistics operation to display progress to the + PrintStream argument. The accumulated statistics will be displayed + every N records, where N is the value of showProgressInterval.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getShowProgressStream

          +
          public java.io.PrintStream getShowProgressStream()
          +
          Returns the PrintStream on which the progress messages will be displayed + during long running statistics gathering operations.
          +
        • +
        + + + +
          +
        • +

          setShowProgressInterval

          +
          public StatsConfig setShowProgressInterval(int showProgressInterval)
          +
          When the statistics operation is configured to display progress the + showProgressInterval is the number of LNs between each progress report.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getShowProgressInterval

          +
          public int getShowProgressInterval()
          +
          Returns the showProgressInterval value, if set.
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public StatsConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/ThreadInterruptedException.html b/docs/java/com/sleepycat/je/ThreadInterruptedException.html new file mode 100644 index 0000000..74f305a --- /dev/null +++ b/docs/java/com/sleepycat/je/ThreadInterruptedException.html @@ -0,0 +1,292 @@ + + + + + +ThreadInterruptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class ThreadInterruptedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class ThreadInterruptedException
      +extends EnvironmentFailureException
      +
      Thrown when java.lang.InterruptedException (a thread interrupt) or + java.nio.channels.ClosedChannelException (which also results from a + thread interrupt) occurs in any JE method. This occurs when the application, + or perhaps a library or container that the application is using, calls + Thread.interrupt(). + +

      Calling Thread.interrupt is not recommended for an active JE + thread if the goal is to stop the thread or do thread coordination. If you + interrupt a thread that is executing a JE operation, the state of the + environment will be undefined. That's because JE might have been in the + middle of I/O activity when the operation was aborted midstream, and it + becomes very difficult to detect and handle all possible outcomes.

      + +

      When JE detects the interrupt, it will mark the environment invalid and + will throw a ThreadInterruptedException. This tells you that you + must close the environment and re-open it before using it again. This is + necessary, because if JE didn't throw ThreadInterruptedException, it + is very likely that you would get some other exception that is less + meaningful, or simply see corrupted data.

      + +

      Instead, applications should use other mechanisms like Object.notify and wait to coordinate threads. For example, use a + keepRunning variable of some kind in each thread. Check this + variable in your threads, and return from the thread when it is false. Set + it to false when you want to stop the thread. If this thread is waiting to + be woken up to do another unit of work, use Object.notify to wake it + up. This is the recommended technique.

      + +

      However, if the use of Thread.interrupt is unavoidable, be sure + to use it only when shutting down the environment. In this situation, + the ThreadInterruptedException should be expected. Note that + by shutting down the environment abnormally, recovery time will be longer + when the environment is subsequently opened, because a final checkpoint was + not performed.

      + +

      Existing Environment handles are invalidated as a result of this + exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Transaction.State.html b/docs/java/com/sleepycat/je/Transaction.State.html new file mode 100644 index 0000000..30b7fc6 --- /dev/null +++ b/docs/java/com/sleepycat/je/Transaction.State.html @@ -0,0 +1,444 @@ + + + + + +Transaction.State (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Enum Transaction.State

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Transaction.State>
      +
      +
      +
      Enclosing class:
      +
      Transaction
      +
      +
      +
      +
      public static enum Transaction.State
      +extends java.lang.Enum<Transaction.State>
      +
      The current state of the transaction.
      +
      +
      Since:
      +
      5.0.48
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ABORTED +
        The transaction has been aborted.
        +
        COMMITTED +
        The transaction has been committed and is locally durable according + to the local SyncPolicy requested.
        +
        MUST_ABORT +
        The transaction has been invalidated by an exception and cannot be + committed.
        +
        OPEN +
        The transaction has not been committed or aborted, and can be used + for performing operations.
        +
        POSSIBLY_COMMITTED +
        An exception was thrown by the commit method due to an error + that occurred while attempting to make the transaction durable.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static Transaction.StatevalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Transaction.State[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + + + + + +
          +
        • +

          POSSIBLY_COMMITTED

          +
          public static final Transaction.State POSSIBLY_COMMITTED
          +
          An exception was thrown by the commit method due to an error + that occurred while attempting to make the transaction durable. The + transaction may or may not be locally durable, according to the + local SyncPolicy requested. +

          + This is an unusual situation and is normally due to a system + failure, storage device failure, disk full condition, thread + interrupt, or a bug of some kind. When a transaction is in this + state, the Environment will have been invalidated by the error. +

          + In a replicated environment, a transaction in this state is not + transferred to replicas. If it turns out that the transaction is + indeed durable, it will be transferred to replicas via normal + replication mechanisms when the Environment is re-opened. +

          + When the commit method throws an exception and the + transaction is in the POSSIBLY_COMMITTED state, some + applications may wish to perform a data query to determine whether + the transaction is durable or not. Note that in the event of a + system level failure, the reads themselves may be unreliable, e.g. + the data may be in the file system cache but not on disk. Other + applications may wish to repeat the transaction unconditionally, + after resolving the error condition, particularly when the set of + operations in the transaction is designed to be idempotent.

          +
        • +
        + + + +
          +
        • +

          COMMITTED

          +
          public static final Transaction.State COMMITTED
          +
          The transaction has been committed and is locally durable according + to the local SyncPolicy requested. +

          + Note that a transaction may be in this state even when an exception + is thrown by the commit method. For example, in a + replicated environment, an InsufficientAcksException may be thrown after + the transaction is committed locally.

          +
        • +
        + + + +
          +
        • +

          MUST_ABORT

          +
          public static final Transaction.State MUST_ABORT
          +
          The transaction has been invalidated by an exception and cannot be + committed. See OperationFailureException for a description + of how a transaction can become invalid. The application is + responsible for aborting the transaction.
          +
        • +
        + + + +
          +
        • +

          ABORTED

          +
          public static final Transaction.State ABORTED
          +
          The transaction has been aborted.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Transaction.State[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Transaction.State c : Transaction.State.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Transaction.State valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/Transaction.html b/docs/java/com/sleepycat/je/Transaction.html new file mode 100644 index 0000000..bb0dd7c --- /dev/null +++ b/docs/java/com/sleepycat/je/Transaction.html @@ -0,0 +1,1020 @@ + + + + + +Transaction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class Transaction

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class Transaction
      +extends java.lang.Object
      +
      The Transaction object is the handle for a transaction. Methods off the + transaction handle are used to configure, abort and commit the transaction. + Transaction handles are provided to other Berkeley DB methods in order to + transactionally protect those operations. + +

      A single Transaction may be used to protect operations for any number of + Databases in a given environment. However, a single Transaction may not be + used for operations in more than one distinct environment.

      + +

      Transaction handles are free-threaded; transactions handles may be used + concurrently by multiple threads. Once the Transaction.abort or Transaction.commit method + is called, the handle may not be accessed again, regardless of the success + or failure of the method, with one exception: the abort method may + be called any number of times to simplify error handling.

      + +

      To obtain a transaction with default attributes:

      + +
      +     Transaction txn = myEnvironment.beginTransaction(null, null);
      + 
      + +

      To customize the attributes of a transaction:

      + +
      +     TransactionConfig config = new TransactionConfig();
      +     config.setReadUncommitted(true);
      +     Transaction txn = myEnvironment.beginTransaction(null, config);
      + 
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class Transaction.State +
        The current state of the transaction.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        voidabort() +
        Cause an abnormal termination of the transaction.
        +
        voidcommit() +
        End the transaction.
        +
        voidcommit(Durability durability) +
        End the transaction using the specified durability requirements.
        +
        voidcommitNoSync() +
        End the transaction, not writing to stable storage and not committing + synchronously.
        +
        voidcommitSync() +
        End the transaction, writing to stable storage and committing + synchronously.
        +
        voidcommitWriteNoSync() +
        End the transaction, writing to stable storage but not committing + synchronously.
        +
        CommitTokengetCommitToken() +
        This method is intended for use with a replicated environment.
        +
        longgetId() +
        Return the transaction's unique ID.
        +
        longgetLockTimeout(java.util.concurrent.TimeUnit unit) +
        Returns the lock request timeout value for the transaction.
        +
        java.lang.StringgetName() +
        Get the user visible name for the transaction.
        +
        Transaction.StategetState() +
        Returns the current state of the transaction.
        +
        longgetTxnTimeout(java.util.concurrent.TimeUnit unit) +
        Returns the timeout value for the transaction lifetime.
        +
        booleanisValid() +
        Returns whether this Transaction is open, which is equivalent + to when getState() returns Transaction.State.OPEN.
        +
        voidsetLockTimeout(long timeOut) +
        Deprecated.  +
        as of 4.0, replaced by setLockTimeout(long, + TimeUnit).
        +
        +
        voidsetLockTimeout(long timeOut, + java.util.concurrent.TimeUnit unit) +
        Configures the lock request timeout value for the transaction.
        +
        voidsetName(java.lang.String name) +
        Set the user visible name for the transaction.
        +
        voidsetTxnTimeout(long timeOut) +
        Deprecated.  +
        as of 4.0, replaced by setTxnTimeout(long, + TimeUnit).
        +
        +
        voidsetTxnTimeout(long timeOut, + java.util.concurrent.TimeUnit unit) +
        Configures the timeout value for the transaction lifetime.
        +
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          abort

          +
          public void abort()
          +           throws DatabaseException
          +
          Cause an abnormal termination of the transaction. + +

          The log is played backward, and any necessary undo operations are + done. Before Transaction.abort returns, any locks held by the + transaction will have been released.

          + +

          In the case of nested transactions, aborting a parent transaction + causes all children (unresolved or not) of the parent transaction to be + aborted.

          + +

          All cursors opened within the transaction must be closed before the + transaction is aborted.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method itself may be called any number of + times to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the environment has been closed, or + cursors associated with the transaction are still open.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getId

          +
          public long getId()
          +
          Return the transaction's unique ID.
          +
          +
          Returns:
          +
          The transaction's unique ID.
          +
          +
        • +
        + + + +
          +
        • +

          getCommitToken

          +
          public CommitToken getCommitToken()
          +                           throws java.lang.IllegalStateException
          +
          This method is intended for use with a replicated environment. +

          + It returns the commitToken associated with a successful replicated + commit. A null value is returned if the txn was not associated with a + replicated environment, or the txn did not result in any changes to the + environment. This method should only be called after the transaction + has finished. +

          + This method is typically used in conjunction with the + CommitPointConsistencyPolicy.

          +
          +
          Returns:
          +
          the token used to identify the replicated commit. Return null if + the transaction has aborted, or has committed without making any + updates.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the method is called before the + transaction has committed or aborted.
          +
          See Also:
          +
          CommitPointConsistencyPolicy
          +
          +
        • +
        + + + +
          +
        • +

          commit

          +
          public void commit()
          +            throws DatabaseException
          +
          End the transaction. If the environment is configured for synchronous + commit, the transaction will be committed synchronously to stable + storage before the call returns. This means the transaction will + exhibit all of the ACID (atomicity, consistency, isolation, and + durability) properties. + +

          If the environment is not configured for synchronous commit, the + commit will not necessarily have been committed to stable storage before + the call returns. This means the transaction will exhibit the ACI + (atomicity, consistency, and isolation) properties, but not D + (durability); that is, database integrity will be maintained, but it is + possible this transaction may be undone during recovery.

          + +

          All cursors opened within the transaction must be closed before the + transaction is committed.

          + +

          If the method encounters an error, the transaction will have been aborted when the call + returns.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method may be called any number of times + to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Throws:
          +
          InsufficientReplicasException - if the master + in a replicated environment could not contact a quorum of replicas as + determined by the Durability.ReplicaAckPolicy.
          +
          InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + although the commit succeeded locally.
          +
          ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed, or cursors associated with the transaction are still open.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          commit

          +
          public void commit(Durability durability)
          +            throws DatabaseException
          +
          End the transaction using the specified durability requirements. This + requirement overrides any default durability requirements associated + with the environment. If the durability requirements cannot be satisfied, + an exception is thrown to describe the problem. Please see + Durability for specific exceptions that could result when the + durability requirements cannot be satisfied. + +

          All cursors opened within the transaction must be closed before the + transaction is committed.

          + +

          If the method encounters an error, the transaction will have been aborted when the call + returns.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method may be called any number of times + to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Parameters:
          +
          durability - the durability requirements for this transaction
          +
          Throws:
          +
          InsufficientReplicasException - if the master + in a replicated environment could not contact enough replicas to + initiate the commit.
          +
          InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + althought the commit succeeded locally.
          +
          ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed, or cursors associated with the transaction are still open.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          commitSync

          +
          public void commitSync()
          +                throws DatabaseException
          +
          End the transaction, writing to stable storage and committing + synchronously. This means the transaction will exhibit all of the ACID + (atomicity, consistency, isolation, and durability) properties. + +

          This behavior is the default for database environments unless + otherwise configured using the EnvironmentConfig.setTxnNoSync method. This behavior may also be set + for a single transaction using the Environment.beginTransaction method. Any value specified to this + method overrides both of those settings.

          + +

          All cursors opened within the transaction must be closed before the + transaction is committed.

          + +

          If the method encounters an error, the transaction will have been aborted when the call + returns.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method may be called any number of times + to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Throws:
          +
          InsufficientReplicasException - if the master + in a replicated environment could not contact enough replicas to + initiate the commit.
          +
          InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + althought the commit succeeded locally.
          +
          ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed, or cursors associated with the transaction are still open.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          commitNoSync

          +
          public void commitNoSync()
          +                  throws DatabaseException
          +
          End the transaction, not writing to stable storage and not committing + synchronously. This means the transaction will exhibit the ACI + (atomicity, consistency, and isolation) properties, but not D + (durability); that is, database integrity will be maintained, but it is + possible this transaction may be undone during recovery. + +

          This behavior may be set for a database environment using the EnvironmentConfig.setTxnNoSync method or for a single transaction using + the Environment.beginTransaction method. Any value specified to this + method overrides both of those settings.

          + +

          All cursors opened within the transaction must be closed before the + transaction is committed.

          + +

          If the method encounters an error, the transaction will have been aborted when the call + returns.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method may be called any number of times + to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Throws:
          +
          InsufficientReplicasException - if the master + in a replicated environment could not contact enough replicas to + initiate the commit.
          +
          InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + althought the commit succeeded locally.
          +
          ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed, or cursors associated with the transaction are still open.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          commitWriteNoSync

          +
          public void commitWriteNoSync()
          +                       throws DatabaseException
          +
          End the transaction, writing to stable storage but not committing + synchronously. This means the transaction will exhibit the ACI + (atomicity, consistency, and isolation) properties, but not D + (durability); that is, database integrity will be maintained, but it is + possible this transaction may be undone during recovery. + +

          This behavior is the default for database environments unless + otherwise configured using the EnvironmentConfig.setTxnNoSync method. This behavior may also be set + for a single transaction using the Environment.beginTransaction method. Any value specified to this + method overrides both of those settings.

          + +

          All cursors opened within the transaction must be closed before the + transaction is committed.

          + +

          If the method encounters an error, the transaction will have been aborted when the call + returns.

          + +

          After this method has been called, regardless of its return, the + Transaction handle may not be accessed again, with one + exception: the abort method may be called any number of times + to simplify error handling.

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Throws:
          +
          InsufficientReplicasException - if the master + in a replicated environment could not contact enough replicas to + initiate the commit.
          +
          InsufficientAcksException - if the master in + a replicated environment did not receive enough replica acknowledgments, + althought the commit succeeded locally.
          +
          ReplicaWriteException - if a write operation + was performed with this transaction, but this node is now a Replica.
          +
          OperationFailureException - if this exception occurred earlier and + caused the transaction to be invalidated.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed, or cursors associated with the transaction are still open.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getTxnTimeout

          +
          public long getTxnTimeout(java.util.concurrent.TimeUnit unit)
          +                   throws EnvironmentFailureException,
          +                          java.lang.IllegalStateException,
          +                          java.lang.IllegalArgumentException
          +
          Returns the timeout value for the transaction lifetime. + +

          If setTxnTimeout(long,TimeUnit) has not been called to + configure the timeout, the environment configuration value (EnvironmentConfig.TXN_TIMEOUT )is returned.

          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed.
          +
          java.lang.IllegalArgumentException - if the unit is null.
          +
          Since:
          +
          4.0
          +
          +
        • +
        + + + +
          +
        • +

          setTxnTimeout

          +
          public void setTxnTimeout(long timeOut,
          +                          java.util.concurrent.TimeUnit unit)
          +                   throws java.lang.IllegalArgumentException,
          +                          DatabaseException
          +
          Configures the timeout value for the transaction lifetime. + +

          If the transaction runs longer than this time, an operation using the + transaction may throw TransactionTimeoutException. The + transaction timeout is checked when locking a record, as part of a read + or write operation.

          + +

          A value of zero (which is the default) disables timeouts for the + transaction, meaning that no limit on the duration of the transaction is + enforced. Note that the setLockTimeout(long, TimeUnit) lock + timeout} is independent of the transaction timeout, and the lock timeout + should not normally be set to zero.

          +
          +
          Parameters:
          +
          timeOut - The timeout value for the transaction lifetime, or zero + to disable transaction timeouts.
          +
          unit - the TimeUnit of the timeOut value. May be null only + if timeOut is zero.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed.
          +
          java.lang.IllegalArgumentException - if timeOut or unit is invalid.
          +
          DatabaseException
          +
          Since:
          +
          4.0
          +
          +
        • +
        + + + +
          +
        • +

          setTxnTimeout

          +
          public void setTxnTimeout(long timeOut)
          +                   throws java.lang.IllegalArgumentException,
          +                          DatabaseException
          +
          Deprecated. as of 4.0, replaced by setTxnTimeout(long, + TimeUnit).
          +
          Configures the timeout value for the transaction lifetime, with the + timeout value specified in microseconds. This method is equivalent to: + +
          setTxnTimeout(long, TimeUnit.MICROSECONDS);
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getLockTimeout

          +
          public long getLockTimeout(java.util.concurrent.TimeUnit unit)
          +                    throws EnvironmentFailureException,
          +                           java.lang.IllegalStateException,
          +                           java.lang.IllegalArgumentException
          +
          Returns the lock request timeout value for the transaction. + +

          If setLockTimeout(long,TimeUnit) has not been called to + configure the timeout, the environment configuration value (EnvironmentConfig.LOCK_TIMEOUT) is returned.

          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed.
          +
          java.lang.IllegalArgumentException - if the unit is null.
          +
          Since:
          +
          4.0
          +
          +
        • +
        + + + +
          +
        • +

          setLockTimeout

          +
          public void setLockTimeout(long timeOut,
          +                           java.util.concurrent.TimeUnit unit)
          +                    throws java.lang.IllegalArgumentException,
          +                           DatabaseException
          +
          Configures the lock request timeout value for the transaction. This + overrides the default lock timeout. + +

          A value of zero disables lock timeouts. This is not recommended, even + when the application expects that deadlocks will not occur or will be + easily resolved. A lock timeout is a fall-back that guards against + unexpected "live lock", unresponsive threads, or application failure to + close a cursor or to commit or abort a transaction.

          +
          +
          Parameters:
          +
          timeOut - The lock timeout for all transactional and + non-transactional operations, or zero to disable lock timeouts.
          +
          unit - the TimeUnit of the timeOut value. May be null only + if timeOut is zero.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the transaction or environment has been + closed.
          +
          java.lang.IllegalArgumentException - if timeOut or unit is invalid.
          +
          DatabaseException
          +
          Since:
          +
          4.0
          +
          +
        • +
        + + + +
          +
        • +

          setLockTimeout

          +
          public void setLockTimeout(long timeOut)
          +                    throws java.lang.IllegalArgumentException,
          +                           DatabaseException
          +
          Deprecated. as of 4.0, replaced by setLockTimeout(long, + TimeUnit).
          +
          Configures the lock request timeout value for the transaction, with the + timeout value specified in microseconds. This method is equivalent to: + +
          setLockTimeout(long, TimeUnit.MICROSECONDS);
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          setName

          +
          public void setName(java.lang.String name)
          +
          Set the user visible name for the transaction.
          +
          +
          Parameters:
          +
          name - The user visible name for the transaction.
          +
          +
        • +
        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          Get the user visible name for the transaction.
          +
          +
          Returns:
          +
          The user visible name for the transaction.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          isValid

          +
          public boolean isValid()
          +
          Returns whether this Transaction is open, which is equivalent + to when getState() returns Transaction.State.OPEN. See Transaction.State.OPEN for more + information. + +

          When an OperationFailureException, or one of its subclasses, + is caught, the isValid method may be called to determine whether + the Transaction can continue to be used, or should be + aborted.

          +
        • +
        + + + +
          +
        • +

          getState

          +
          public Transaction.State getState()
          +
          Returns the current state of the transaction.
          +
          +
          Since:
          +
          5.0.48
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/TransactionConfig.html b/docs/java/com/sleepycat/je/TransactionConfig.html new file mode 100644 index 0000000..cb2cc58 --- /dev/null +++ b/docs/java/com/sleepycat/je/TransactionConfig.html @@ -0,0 +1,976 @@ + + + + + +TransactionConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class TransactionConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class TransactionConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a database environment transaction.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final TransactionConfig DEFAULT
          +
          Default configuration used if null is passed to methods that create a + transaction.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          TransactionConfig

          +
          public TransactionConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setSync

          +
          public TransactionConfig setSync(boolean sync)
          +
          Configures the transaction to write and synchronously flush the log it + when commits. + +

          This behavior may be set for a database environment using the + Environment.setMutableConfig method. Any value specified to this method + overrides that setting.

          + +

          The default is false for this class and true for the database + environment.

          + +

          If true is passed to both setSync and setNoSync, setSync will take + precedence.

          +
          +
          Parameters:
          +
          sync - If true, transactions exhibit all the ACID (atomicity, + consistency, isolation, and durability) properties.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getSync

          +
          public boolean getSync()
          +
          Returns true if the transaction is configured to write and synchronously + flush the log it when commits.
          +
          +
          Returns:
          +
          true if the transaction is configured to write and synchronously + flush the log it when commits.
          +
          +
        • +
        + + + +
          +
        • +

          setNoSync

          +
          public TransactionConfig setNoSync(boolean noSync)
          + +
          Configures the transaction to not write or synchronously flush the log + it when commits. + +

          This behavior may be set for a database environment using the + Environment.setMutableConfig method. Any value specified to this method + overrides that setting.

          + +

          The default is false for this class and the database environment.

          +
          +
          Parameters:
          +
          noSync - If true, transactions exhibit the ACI (atomicity, + consistency, and isolation) properties, but not D (durability); that is, + database integrity will be maintained, but if the application or system + fails, it is possible some number of the most recently committed + transactions may be undone during recovery. The number of transactions + at risk is governed by how many log updates can fit into the log buffer, + how often the operating system flushes dirty buffers to disk, and how + often the log is checkpointed.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getNoSync

          +
          public boolean getNoSync()
          +
          Deprecated. replaced by getDurability()
          +
          Returns true if the transaction is configured to not write or + synchronously flush the log it when commits.
          +
          +
          Returns:
          +
          true if the transaction is configured to not write or + synchronously flush the log it when commits.
          +
          +
        • +
        + + + +
          +
        • +

          setWriteNoSync

          +
          public TransactionConfig setWriteNoSync(boolean writeNoSync)
          + +
          Configures the transaction to write but not synchronously flush the log + it when commits. + +

          This behavior may be set for a database environment using the + Environment.setMutableConfig method. Any value specified to this method + overrides that setting.

          + +

          The default is false for this class and the database environment.

          +
          +
          Parameters:
          +
          writeNoSync - If true, transactions exhibit the ACI (atomicity, + consistency, and isolation) properties, but not D (durability); that is, + database integrity will be maintained, but if the operating system + fails, it is possible some number of the most recently committed + transactions may be undone during recovery. The number of transactions + at risk is governed by how often the operating system flushes dirty + buffers to disk, and how often the log is checkpointed.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getWriteNoSync

          +
          public boolean getWriteNoSync()
          +
          Deprecated. replaced by getDurability()
          +
          Returns true if the transaction is configured to write but not + synchronously flush the log it when commits.
          +
          +
          Returns:
          +
          true if the transaction is configured to not write or + synchronously flush the log it when commits.
          +
          +
        • +
        + + + +
          +
        • +

          setDurability

          +
          public TransactionConfig setDurability(Durability durability)
          +
          Configures the durability associated with a transaction when it commits. + Changes to durability are not reflected back to the "sync" booleans -- + there isn't a one to one mapping. + + Note that you should not use both the durability and the XXXSync() apis + on the same config object.
          +
          +
          Parameters:
          +
          durability - the durability definition
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setConsistencyPolicy

          +
          public TransactionConfig setConsistencyPolicy(ReplicaConsistencyPolicy consistencyPolicy)
          +
          Associates a consistency policy with this configuration.
          +
          +
          Parameters:
          +
          consistencyPolicy - the consistency definition
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getConsistencyPolicy

          +
          public ReplicaConsistencyPolicy getConsistencyPolicy()
          +
          Returns the consistency policy associated with the configuration.
          +
          +
          Returns:
          +
          the consistency policy currently associated with this config.
          +
          +
        • +
        + + + +
          +
        • +

          setNoWait

          +
          public TransactionConfig setNoWait(boolean noWait)
          +
          Configures the transaction to not wait if a lock request cannot be + immediately granted. + +

          The default is false for this class and the database environment.

          +
          +
          Parameters:
          +
          noWait - If true, transactions will not wait if a lock request + cannot be immediately granted, instead LockNotAvailableException + will be thrown.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getNoWait

          +
          public boolean getNoWait()
          +
          Returns true if the transaction is configured to not wait if a lock + request cannot be immediately granted.
          +
          +
          Returns:
          +
          true if the transaction is configured to not wait if a lock + request cannot be immediately granted.
          +
          +
        • +
        + + + +
          +
        • +

          setReadUncommitted

          +
          public TransactionConfig setReadUncommitted(boolean readUncommitted)
          +
          Configures read operations performed by the transaction to return + modified but not yet committed data.
          +
          +
          Parameters:
          +
          readUncommitted - If true, configure read operations performed by + the transaction to return modified but not yet committed data.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          LockMode.READ_UNCOMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          getReadUncommitted

          +
          public boolean getReadUncommitted()
          +
          Returns true if read operations performed by the transaction are + configured to return modified but not yet committed data.
          +
          +
          Returns:
          +
          true if read operations performed by the transaction are + configured to return modified but not yet committed data.
          +
          See Also:
          +
          LockMode.READ_UNCOMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          setReadCommitted

          +
          public TransactionConfig setReadCommitted(boolean readCommitted)
          +
          Configures the transaction for read committed isolation. + +

          This ensures the stability of the current data item read by the + cursor but permits data read by this transaction to be modified or + deleted prior to the commit of the transaction.

          +
          +
          Parameters:
          +
          readCommitted - If true, configure the transaction for read + committed isolation.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          LockMode.READ_COMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          getReadCommitted

          +
          public boolean getReadCommitted()
          +
          Returns true if the transaction is configured for read committed + isolation.
          +
          +
          Returns:
          +
          true if the transaction is configured for read committed + isolation.
          +
          See Also:
          +
          LockMode.READ_COMMITTED
          +
          +
        • +
        + + + +
          +
        • +

          setSerializableIsolation

          +
          public TransactionConfig setSerializableIsolation(boolean serializableIsolation)
          +
          Configures this transaction to have serializable (degree 3) isolation. + By setting serializable isolation, phantoms will be prevented. + +

          By default a transaction provides Repeatable Read isolation; EnvironmentConfig.setTxnSerializableIsolation(boolean) may be called to override + the default. If the environment is configured for serializable + isolation, all transactions will be serializable regardless of whether + this method is called; calling setSerializableIsolation(boolean) with a + false parameter will not disable serializable isolation.

          + + The default is false for this class and the database environment.
          +
          +
          Returns:
          +
          this
          +
          See Also:
          +
          LockMode
          +
          +
        • +
        + + + +
          +
        • +

          getSerializableIsolation

          +
          public boolean getSerializableIsolation()
          +
          Returns true if the transaction has been explicitly configured to have + serializable (degree 3) isolation.
          +
          +
          Returns:
          +
          true if the transaction has been configured to have serializable + isolation.
          +
          See Also:
          +
          LockMode
          +
          +
        • +
        + + + +
          +
        • +

          setReadOnly

          +
          public TransactionConfig setReadOnly(boolean readOnly)
          +
          Configures this transaction to disallow write operations, regardless of + whether writes are allowed for the Environment or the + Databases that are accessed. + +

          If a write operation is attempted using a read-only transaction, + an UnsupportedOperationException will be thrown.

          + +

          For a read-only transaction, the transaction's Durability is + ignored, even when it is explicitly specified using setDurability(Durability).

          + +

          In a ReplicatedEnvironment, a read-only + transaction implicitly uses + Durability.ReplicaAckPolicy.NONE. + A read-only transaction on a Master will thus not be held up, or + throw InsufficientReplicasException, if the + Master is not in contact with a sufficient number of Replicas at the + time the transaction is initiated.

          + +

          The default setting is false (writes are allowed).

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getReadOnly

          +
          public boolean getReadOnly()
          +
          Returns whether read-only is configured for this transaction.
          +
        • +
        + + + +
          +
        • +

          setLocalWrite

          +
          public TransactionConfig setLocalWrite(boolean localWrite)
          +
          Configures this transaction to allow writing to non-replicated + Databases in a + ReplicatedEnvironment. + +

          In a replicated environment, a given transaction may be used to + write to either replicated databases or non-replicated databases, but + not both. If a write operation to a replicated database is attempted + when local-write is true, or to a non-replicated database when + local-write is false, an UnsupportedOperationException will be + thrown.

          + +

          Note that for auto-commit transactions (when the Transaction + parameter is null), the local-write setting is automatically set to + correspond to whether the database is replicated. With auto-commit, + local-write is always true for a non-replicated database, and + always false for a replicated database.

          + +

          In a replicated environment, a local-write transaction implicitly + uses Durability.ReplicaAckPolicy.NONE. + A local-write transaction on a Master will thus not be held up, or + throw InsufficientReplicasException, if the + Master is not in contact with a sufficient number of Replicas at the + time the transaction is initiated.

          + +

          By default the local-write setting is false, meaning that the + transaction may only write to replicated Databases in a replicated + environment.

          + +

          This configuration setting is ignored in a non-replicated Environment + since no databases are replicated.

          +
          +
          Returns:
          +
          this
          +
          See Also:
          +
          + + +
            +
          • +

            getLocalWrite

            +
            public boolean getLocalWrite()
            +
            Returns whether local-write is configured for this transaction.
            +
          • +
          + + + +
            +
          • +

            clone

            +
            public TransactionConfig clone()
            +
            Returns a copy of this configuration object.
            +
            +
            Overrides:
            +
            clone in class java.lang.Object
            +
            +
          • +
          + + + +
            +
          • +

            toString

            +
            public java.lang.String toString()
            +
            Returns the values for each configuration attribute.
            +
            +
            Overrides:
            +
            toString in class java.lang.Object
            +
            Returns:
            +
            the values for each configuration attribute.
            +
            +
          • +
          +
        • +
        +
      • +
      +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/TransactionStats.Active.html b/docs/java/com/sleepycat/je/TransactionStats.Active.html new file mode 100644 index 0000000..2b8da01 --- /dev/null +++ b/docs/java/com/sleepycat/je/TransactionStats.Active.html @@ -0,0 +1,308 @@ + + + + + +TransactionStats.Active (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class TransactionStats.Active

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Enclosing class:
      +
      TransactionStats
      +
      +
      +
      +
      public static class TransactionStats.Active
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      The Active class represents an active transaction.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longgetId() +
        The transaction ID of the transaction.
        +
        java.lang.StringgetName() +
        The transaction name, including the thread name if available.
        +
        longgetParentId() +
        The transaction ID of the parent transaction (or 0, if no parent).
        +
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getId

          +
          public long getId()
          +
          The transaction ID of the transaction.
          +
        • +
        + + + +
          +
        • +

          getParentId

          +
          public long getParentId()
          +
          The transaction ID of the parent transaction (or 0, if no parent).
          +
        • +
        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          The transaction name, including the thread name if available.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/TransactionStats.html b/docs/java/com/sleepycat/je/TransactionStats.html new file mode 100644 index 0000000..1c162ec --- /dev/null +++ b/docs/java/com/sleepycat/je/TransactionStats.html @@ -0,0 +1,422 @@ + + + + + +TransactionStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class TransactionStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class TransactionStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Transaction statistics for a database environment.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class TransactionStats.Active +
        The Active class represents an active transaction.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        TransactionStats.Active[]getActiveTxns() +
        Return the array of active transactions.
        +
        longgetNAborts() +
        The number of transactions that have aborted.
        +
        intgetNActive() +
        The number of transactions that are currently active.
        +
        longgetNBegins() +
        The number of transactions that have begun.
        +
        longgetNCommits() +
        The number of transactions that have committed.
        +
        longgetNXAAborts() +
        The number of XA transactions that have aborted.
        +
        longgetNXACommits() +
        The number of XA transactions that have committed.
        +
        longgetNXAPrepares() +
        The number of XA transactions that have been prepared.
        +
        java.lang.StringtoString()
        java.lang.StringtoStringVerbose() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getActiveTxns

          +
          public TransactionStats.Active[] getActiveTxns()
          +
          Return the array of active transactions.
          +
          +
          Returns:
          +
          The array of active transactions.
          +
          +
        • +
        + + + +
          +
        • +

          getNAborts

          +
          public long getNAborts()
          +
          The number of transactions that have aborted.
          +
        • +
        + + + +
          +
        • +

          getNXAAborts

          +
          public long getNXAAborts()
          +
          The number of XA transactions that have aborted.
          +
        • +
        + + + +
          +
        • +

          getNXAPrepares

          +
          public long getNXAPrepares()
          +
          The number of XA transactions that have been prepared.
          +
        • +
        + + + +
          +
        • +

          getNActive

          +
          public int getNActive()
          +
          The number of transactions that are currently active.
          +
        • +
        + + + +
          +
        • +

          getNBegins

          +
          public long getNBegins()
          +
          The number of transactions that have begun.
          +
        • +
        + + + +
          +
        • +

          getNCommits

          +
          public long getNCommits()
          +
          The number of transactions that have committed.
          +
        • +
        + + + +
          +
        • +

          getNXACommits

          +
          public long getNXACommits()
          +
          The number of XA transactions that have committed.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toStringVerbose

          +
          public java.lang.String toStringVerbose()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/TransactionTimeoutException.html b/docs/java/com/sleepycat/je/TransactionTimeoutException.html new file mode 100644 index 0000000..2f140e1 --- /dev/null +++ b/docs/java/com/sleepycat/je/TransactionTimeoutException.html @@ -0,0 +1,284 @@ + + + + + +TransactionTimeoutException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class TransactionTimeoutException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class TransactionTimeoutException
      +extends LockConflictException
      +
      Thrown when the transaction timeout interval is exceeded. This is normally + because the cumulative operation time for the transaction exceeds the + timeout, or another transaction or cursor holds a lock for longer than the + timeout interval. It may also occur if the application fails to close a + cursor, or fails to commit or abort a transaction, since any locks held by + the cursor or transaction will be held indefinitely. + +

      This exception is not thrown if a deadlock is detected, even if the + timeout elapses before the deadlock is broken. If a deadlock is detected, + DeadlockException is always thrown instead.

      + +

      The transaction timeout interval may be set using + Transaction.setTxnTimeout(long, java.util.concurrent.TimeUnit).

      + +

      The Transaction handle is invalidated as a result of this + exception.

      + +

      Normally, applications should catch the base class LockConflictException rather than catching one of its subclasses. All lock + conflicts are typically handled in the same way, which is normally to abort + and retry the transaction. See LockConflictException for more + information.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/UniqueConstraintException.html b/docs/java/com/sleepycat/je/UniqueConstraintException.html new file mode 100644 index 0000000..d18fcb0 --- /dev/null +++ b/docs/java/com/sleepycat/je/UniqueConstraintException.html @@ -0,0 +1,284 @@ + + + + + +UniqueConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class UniqueConstraintException

    +
    +
    + +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/VerifyConfig.html b/docs/java/com/sleepycat/je/VerifyConfig.html new file mode 100644 index 0000000..2fde442 --- /dev/null +++ b/docs/java/com/sleepycat/je/VerifyConfig.html @@ -0,0 +1,776 @@ + + + + + +VerifyConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class VerifyConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class VerifyConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes of a verification operation.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          VerifyConfig

          +
          public VerifyConfig()
          +
          An instance created using the default constructor is initialized with + the system's default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setPropagateExceptions

          +
          public VerifyConfig setPropagateExceptions(boolean propagate)
          +
          Configures Environment.verify and Database.verify to propagate exceptions found during verification. + +

          By default this is false and exception information is printed to + System.out for notification but does not stop the verification activity, + which continues on for as long as possible.

          + +

          Note: Currently this method has no effect.

          +
          +
          Parameters:
          +
          propagate - If set to true, configure Environment.verify and Database.verify to propagate + exceptions found during verification.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getPropagateExceptions

          +
          public boolean getPropagateExceptions()
          +
          Returns true if the Environment.verify and Database.verify are configured to propagate exceptions found during + verification. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Environment.verify and Database.verify are configured to propagate exceptions found during + verification.
          +
          +
        • +
        + + + +
          +
        • +

          setAggressive

          +
          public VerifyConfig setAggressive(boolean aggressive)
          +
          Configures Environment.verify and Database.verify to perform fine granularity consistency checking that + includes verifying in memory constructs. + +

          This level of checking should only be performed while the database + environment is quiescent.

          + +

          By default this is false.

          + +

          Note: Currently, enabling aggressive verification has no additional + effect.

          +
          +
          Parameters:
          +
          aggressive - If set to true, configure Environment.verify and Database.verify to perform fine + granularity consistency checking that includes verifying in memory + constructs.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getAggressive

          +
          public boolean getAggressive()
          +
          Returns true if the Environment.verify and Database.verify are configured to perform fine granularity consistency + checking that includes verifying in memory constructs. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Environment.verify and Database.verify are configured to perform fine granularity consistency + checking that includes verifying in memory constructs.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getPrintInfo

          +
          public boolean getPrintInfo()
          +
          Returns true if the Environment.verify and Database.verify are configured to print basic verification information. + +

          This method may be called at any time during the life of the + application.

          +
          +
          Returns:
          +
          true if the Environment.verify and Database.verify are configured to print basic verification information.
          +
          +
        • +
        + + + +
          +
        • +

          setShowProgressStream

          +
          public VerifyConfig setShowProgressStream(java.io.PrintStream showProgressStream)
          +
          Configures the verify operation to display progress to the PrintStream + argument. The accumulated statistics will be displayed every N records, + where N is the value of showProgressInterval.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getShowProgressStream

          +
          public java.io.PrintStream getShowProgressStream()
          +
          Returns the PrintStream on which the progress messages will be displayed + during long running verify operations.
          +
        • +
        + + + +
          +
        • +

          setShowProgressInterval

          +
          public VerifyConfig setShowProgressInterval(int showProgressInterval)
          +
          When the verify operation is configured to display progress the + showProgressInterval is the number of LNs between each progress report.
          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getShowProgressInterval

          +
          public int getShowProgressInterval()
          +
          Returns the showProgressInterval value, if set.
          +
        • +
        + + + +
          +
        • +

          setVerifySecondaries

          +
          public VerifyConfig setVerifySecondaries(boolean verifySecondaries)
          +
          Configures verification to verify secondary database integrity. This is + equivalent to verifying secondaries in the background Btree verifier, + when EnvironmentConfig.VERIFY_SECONDARIES is set to true. + +

          By default this is true.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getVerifySecondaries

          +
          public boolean getVerifySecondaries()
          +
          Returns the verifySecondaries value.
          +
        • +
        + + + +
          +
        • +

          setVerifyDataRecords

          +
          public VerifyConfig setVerifyDataRecords(boolean verifyDataRecords)
          +
          Configures verification to read and verify the leaf node (LN) of a + primary data record. This is equivalent to verifying data records in the + background Btree verifier, when + EnvironmentConfig.VERIFY_DATA_RECORDS is set to true. + +

          By default this is false.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getVerifyDataRecords

          +
          public boolean getVerifyDataRecords()
          +
          Returns the verifyDataRecords value.
          +
        • +
        + + + +
          +
        • +

          setBatchSize

          +
          public VerifyConfig setBatchSize(int batchSize)
          +
          Configures the number of records verified per batch. In order to give + database remove/truncate the opportunity to execute, records are + verified in batches and there is a delay + between batches. + +

          By default the batch size is 1000.

          + +

          Note that when using the background data verifier, the batch size is + EnvironmentConfig.VERIFY_BTREE_BATCH_SIZE.

          +
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getBatchSize

          +
          public int getBatchSize()
          +
          Returns the batchSize value.
          +
        • +
        + + + +
          +
        • +

          setBatchDelay

          +
          public VerifyConfig setBatchDelay(long delay,
          +                                  java.util.concurrent.TimeUnit unit)
          +
          Configures the delay between batches. In order to give database + remove/truncate the opportunity to execute, records are verified in + batches and there is a delay between batches. + +

          By default the batch delay is 10 ms.

          + +

          Note that when using the background data verifier, the batch delay is + EnvironmentConfig.VERIFY_BTREE_BATCH_DELAY.

          +
          +
          Parameters:
          +
          delay - the delay between batches.
          +
          unit - the TimeUnit of the delay value. May be + null only if delay is zero.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getBatchDelay

          +
          public long getBatchDelay(java.util.concurrent.TimeUnit unit)
          +
          Returns the batch delay.
          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public VerifyConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns the values for each configuration attribute.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          Returns:
          +
          the values for each configuration attribute.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/VersionMismatchException.html b/docs/java/com/sleepycat/je/VersionMismatchException.html new file mode 100644 index 0000000..37717ad --- /dev/null +++ b/docs/java/com/sleepycat/je/VersionMismatchException.html @@ -0,0 +1,263 @@ + + + + + +VersionMismatchException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class VersionMismatchException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class VersionMismatchException
      +extends EnvironmentFailureException
      +
      Thrown by the Environment constructor when an environment cannot be + opened because the version of the existing log is not compatible with the + version of JE that is running. This occurs when a later version of JE was + used to create the log. + +

      Warning: This exception should be handled when more than + one version of JE may be used to access an environment.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/WriteOptions.html b/docs/java/com/sleepycat/je/WriteOptions.html new file mode 100644 index 0000000..b0d6a52 --- /dev/null +++ b/docs/java/com/sleepycat/je/WriteOptions.html @@ -0,0 +1,804 @@ + + + + + +WriteOptions (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class WriteOptions

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class WriteOptions
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Options for calling methods that write (insert, update or delete) records. + +

      Time-To-Live

      + +

      When performing a 'put' operation, a TTL may be specified using + setTTL(int, TimeUnit) or setTTL(int).

      + +

      By default, the TTL property is zero, meaning there is no automatic + expiration. A non-zero TTL may be specified to cause an inserted record + to expire. The expiration time may also be changed for an existing + record by updating the record and specifying a different TTL, including + specifying zero to prevent the record from expiring. However, the TTL of + an existing record is updated only if setUpdateTTL(boolean) is + explicitly set to true. When deleting a record, the TTL parameter is + ignored.

      + +

      Records expire on day or hour boundaries, depending on the timeUnit parameter. At the time of the write operation, the TTL + parameter is used to compute the record's expiration time by first + converting it from days (or hours) to milliseconds, and then adding it + to the current system time. If the resulting expiration time is not + evenly divisible by the number of milliseconds in one day (or hour), it + is rounded up to the nearest day (or hour).

      + +

      Passing TimeUnit.DAYS, rather than TimeUnit.HOURS, for the timeUnit + parameter is recommended to minimize storage requirements (memory + and disk). Because the expiration time is stored in the JE Btree + internally, when using the TTL feature, the additional memory and disk + space required for storing Btree internal nodes (INs) is twice as much + when using TimeUnit.HOURS as when using TimeUnit.DAYS. Using + TimeUnit.DAYS adds about 5% to the space needed for INs, while + TimeUnit.HOURS adds about 10%.

      + +

      Note that JE stores the expiration time of the record and not the + original TTL value that was specified. The expiration time of a record + is available when reading (or writing) records via OperationResult.getExpirationTime().

      + +

      A summary of the behavior of expired records is as follows.

      +
        +
      • Space for expired records will be purged in the background by + the JE cleaner, and expired records will be filtered out of queries + even if they have not been purged.

      • + +
      • Expired records are removed individually: there is no guarantee + that records with the same expiration time will be removed + simultaneously.

      • + +
      • Records with expiration times support repeatable-read semantics + in most cases, but with some exceptions (described below).

      • +
      + +

      A more detailed description is below, including some information on + how expired records are handled internally.

      +
        +
      • Expired records will be purged in order to reclaim disk space. + This happens in background over time, and there is no guarantee that + the space for a record will be reclaimed at any particular time. + Purging of expired records occurs during the normal JE cleaning + process. The goals of the purging process are: +
          +
        1. to minimize the cost of purging;
        2. +
        3. to keep disk utilization below the EnvironmentConfig.CLEANER_MIN_UTILIZATION threshold, as usual, + but taking into account expired data; and
        4. +
        5. to reclaim expired data gradually and avoid spikes in + cleaning on day and hour boundaries.
        6. +
        + +
      • Expired records that have not been purged will be filtered out + of queries and will not be returned to the application. In a + replicated environment, purging and filtering occur independently on + each node. For queries to return consistent results on all nodes, + the system clocks on all nodes must be synchronized.

      • + +
      • Repeatable-read semantics are supported for records that expire + after being read. If a lock of any kind is held on a record and the + record expires, when accessing it again using the same transaction + or cursor, it will be accessed as if it is not expired. In other + words, locking a record prevents it from expiring, from the + viewpoint of that transaction or cursor. However, there are some + caveats and exceptions to this rule: +
          +
        • A lock by one transaction or cursor will not prevent a + record from being seen as expired when accessing it using a + different transaction or cursor.

        • + +
        • In the unlikely event that the system clock is changed, + locking a record may not guarantee that the record's data has + not been purged, if the data is not read at the time the + record is locked. This is because the record's key and its + data are purged independently. It is possible to lock a record + without reading its data by passing null for the 'data' + parameter. If a record is locked in this manner, and the data + was previously purged because the system clock was changed, + then one of the following may occur, even when using the same + transaction or cursor that was used to lock the record: +

        • +
            +
          • If the record is read again with a non-null data + parameter, the operation may fail (return null) because the + data cannot be read.

          • + +
          • If a partial update is attempted (passing a partial 'data' + parameter), the operation may fail (return null) + because the pre-existing data cannot be read.

          • +
          +
        + +
      • Even when multiple records have the same expiration time, JE + does not provide a way for them to expire atomically, as could be + done by explicitly deleting multiple records in a single + transaction. This restriction is for performance reasons; if records + could expire atomically, they could not be purged efficiently using + the JE cleaning process. Instead, each record expires individually, + as if each were deleted in a separate transaction. This means that + even when a set of records is inserted or updated atomically, a + query may return some but not not all of the records, when any of + the records expire at a time very close to the time of the query. + This is because the system clock is checked for each record + individually at the time it is read by the query, and because + expired records may be purged by other threads.

      • + +
      • There are several special cases of the above rule that involve + access to primary and secondary databases. Because a given primary + record and its associated secondary records are normal records in + most respects, this set of records does not expire atomically. For + most read and write operations, JE treats the expiration of any + record in this set as if all records have expired, and in these + cases there is no special behavior to consider. For example:

        +

          +
        • As long as the primary and secondary databases are + transactional, JE ensures that the expiration times of a + given primary record and all its associated secondary records + are the same.

        • + +
        • When reading a primary record via a secondary key, JE + first reads the secondary record and then the primary. If + either record expires during this process, both records are + treated as expired.

        • + +
        • When updating or deleting a primary record, JE first + reads the primary record to obtain the secondary keys and + then deletes/updates/inserts the secondary records as + needed. If a secondary record expires during this process, + this will not cause a SecondaryIntegrityException, as + would normally happen when an expected associated record is + missing.

        • + +
        • When a primary and/or secondary record expires after + being read, with few exceptions, repeatable-read semantics + are supported as described above, i.e., locks prevent + expiration from the viewpoint of the locking transaction or + cursor. Exceptions to this rule are described below.

        • +
        + + However, there are several cases where such treatment by JE is not + practical, and the user should be aware of special behavior when + primary or secondary records expire. These are not common use cases, + but it is important to be aware of them. In the cases described + below, let us assume a primary database has two associated secondary + databases, and a particular primary record with primary key X has + two secondary records with keys A and B, one in each secondary + database.

        +

          +
        • After a transaction or cursor reads and locks the primary + record via primary key X, reading via primary key X again + with the same transaction or cursor will also be successful + even if the record has expired, i.e., repeatable-read is + supported. However, if the record expires and the same + transaction or cursor attempts to read via key A or B, the + record will not be found. This is because the secondary + records for key A and B were not locked and they expire + independently of the primary record.

        • + +
        • Similarly, after a transaction or cursor reads and locks + the primary record via secondary key A successfully, reading + via key A again with the same transaction or cursor will also + be successful even if the record has expired. Reading via + primary key X will also be successful, even if the record has + expired, because the primary record was locked. However, if + the record expires and the same transaction or cursor + attempts to read via key B, the record will not be found. + This is because the secondary record for key B was not locked + and it expires independently of the primary record and the + secondary record for key A.

        • + +
        • When reading via a secondary database, it is possible to + read the only the secondary key and primary key (which are + both contained in the secondary record), but not the primary + record, by passing null for the 'data' parameter. In this + case the primary record is not locked. Therefore, if the + record expires and the same transaction or cursor attempts to + read the primary record (via any secondary key or the primary + key), the record will not be found.
        • + +
        • When a record expires, if its database serves as + a foreign key + database, the foreign key delete + action will not be enforced. Therefore, setting a TTL for a + record in a foreign key database is not recommended. The same + is true when using the DPL and a foreign key database is + specified using SecondaryKey.relatedEntity(). +

        • +
        +

      • + +
      • When JE detects what may be an internal integrity error, it + tries to determine whether an expired record, rather than a true + integrity error, is the underlying cause. To prevent internal errors + when small changes in the system clock time are made, if a record + has expired within EnvironmentConfig.ENV_TTL_CLOCK_TOLERANCE + (two hours, by default), JE treats the record as deleted and no + exception is thrown. + +

        When an integrity error does cause an exception to be thrown, the + record's expiration time will be included in the exception message + and this can help to diagnose the problem. This includes the + following exceptions: +

        +

        + +

        In cases where the clock has been changed by more than one hour + and integrity exceptions occur because of this, it may be possible + to avoid the exceptions by setting the EnvironmentConfig.ENV_TTL_CLOCK_TOLERANCE configuration parameter + to a larger value.

      • +
      + +

      In order to use the TTL feature in a ReplicatedEnvironment, all nodes + must be upgraded to JE 7.0 or later. If one or more nodes in a group + uses an earlier version, an IllegalStateException will be thrown when + attempting a put operation with a non-zero TTL. Also, once records with + a non-zero TTL have been written, a node using an earlier version of JE + may not join the group; if this is attempted, the node will fail during + open with an EnvironmentFailureException.

      +
      +
      Since:
      +
      7.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        WriteOptions() +
        Constructs a WriteOptions object with default values for all properties.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        WriteOptionsclone() 
        CacheModegetCacheMode() +
        Returns the CacheMode to be used for the operation, or null + if the Cursor, Database or Environment default will be used.
        +
        intgetTTL() +
        Returns the Time-To-Live property for a 'put' operation.
        +
        java.util.concurrent.TimeUnitgetTTLUnit() +
        Returns the Time-To-Live time unit for a 'put' operation.
        +
        booleangetUpdateTTL() +
        Returns the update-TTL property for a 'put' operation.
        +
        WriteOptionssetCacheMode(CacheMode cacheMode) +
        Sets the CacheMode to be used for the operation.
        +
        WriteOptionssetExpirationTime(long expirationTime, + java.util.concurrent.TimeUnit timeUnit) +
        A convenience method to set the TTL based on a given expiration time + and the current system time.
        +
        WriteOptionssetTTL(int ttl) +
        Sets the Time-To-Live property for a 'put' operation, using + TimeUnit.Days as the TTL unit.
        +
        WriteOptionssetTTL(int ttl, + java.util.concurrent.TimeUnit timeUnit) +
        Sets the Time-To-Live property for a 'put' operation, using the given + TimeUnit.
        +
        WriteOptionssetUpdateTTL(boolean updateTtl) +
        Sets the update-TTL property for a 'put' operation.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          WriteOptions

          +
          public WriteOptions()
          +
          Constructs a WriteOptions object with default values for all properties.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          clone

          +
          public WriteOptions clone()
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getCacheMode

          +
          public CacheMode getCacheMode()
          +
          Returns the CacheMode to be used for the operation, or null + if the Cursor, Database or Environment default will be used.
          +
          +
          See Also:
          +
          setCacheMode(CacheMode)
          +
          +
        • +
        + + + +
          +
        • +

          setTTL

          +
          public WriteOptions setTTL(int ttl)
          +
          Sets the Time-To-Live property for a 'put' operation, using + TimeUnit.Days as the TTL unit.
          +
          +
          Parameters:
          +
          ttl - the number of days after the current time on which + the record will automatically expire, or zero for no automatic + expiration. May not be negative.
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          Time-To-Live
          +
          +
        • +
        + + + +
          +
        • +

          setTTL

          +
          public WriteOptions setTTL(int ttl,
          +                           java.util.concurrent.TimeUnit timeUnit)
          +
          Sets the Time-To-Live property for a 'put' operation, using the given + TimeUnit.
          +
          +
          Parameters:
          +
          ttl - the number of days or hours after the current time on which + the record will automatically expire, or zero for no automatic + expiration. May not be negative.
          +
          timeUnit - is TimeUnit.DAYS or TimeUnit.HOURS. TimeUnit.DAYS is + recommended to minimize storage requirements (memory and disk).
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          Time-To-Live
          +
          +
        • +
        + + + +
          +
        • +

          getTTL

          +
          public int getTTL()
          +
          Returns the Time-To-Live property for a 'put' operation.
          +
          +
          See Also:
          +
          setTTL(int)
          +
          +
        • +
        + + + +
          +
        • +

          getTTLUnit

          +
          public java.util.concurrent.TimeUnit getTTLUnit()
          +
          Returns the Time-To-Live time unit for a 'put' operation.
          +
          +
          See Also:
          +
          setTTL(int, TimeUnit)
          +
          +
        • +
        + + + +
          +
        • +

          setUpdateTTL

          +
          public WriteOptions setUpdateTTL(boolean updateTtl)
          +
          Sets the update-TTL property for a 'put' operation. +

          + If this property is true and the operation updates a record, the + specified TTL will be used to assign a new expiration time for the + record, or to clear the record's expiration time if the specified + TTL is zero. +

          + If this parameter is false and the operation updates a record, the + record's expiration time will not be changed. +

          + If the operation inserts a record, this parameter is ignored and the + specified TTL is always applied. +

          + By default, this property is false.

          +
          +
          Parameters:
          +
          updateTtl - is whether to assign (or clear) the expiration time + when updating an existing record.
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          Time-To-Live
          +
          +
        • +
        + + + +
          +
        • +

          getUpdateTTL

          +
          public boolean getUpdateTTL()
          +
          Returns the update-TTL property for a 'put' operation.
          +
          +
          See Also:
          +
          setUpdateTTL(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setExpirationTime

          +
          public WriteOptions setExpirationTime(long expirationTime,
          +                                      java.util.concurrent.TimeUnit timeUnit)
          +
          A convenience method to set the TTL based on a given expiration time + and the current system time. +

          + Given a desired expiration time and TimeUnit (DAYS or HOURS), + sets the TTL to a value that will cause a record to expire at or after + the given time, if the record is stored at the current time. The + intended use case is to determine the TTL when writing a record and the + desired expiration time, rather than the TTL, is known. +

          + This method determines the TTL by taking the difference between the + current time and the given time, converting it from milliseconds to + days (or hours), and rounding up if it is not evenly divisible by + the number of milliseconds in one day (or hour). +

          + A special use case is when the expiration time was previously obtained + from OperationResult.getExpirationTime(), for example, when + performing an export followed by an import. To support this, null can be + passed for the timeUnit parameter and the time unit will be determined + as follows. +

            +
          • This method first converts the expiration time to a TTL in + hours, as described above. If the expiration time was obtained by + calling OperationResult.getExpirationTime(), then it will be + evenly divisible by the number of milliseconds in one hour and no + rounding will occur.
          • + +
          • If the resulting TTL in hours is an even multiple of 24, + DAYS is used; otherwise, HOURS is used. For example, + when performing an import, if the original expiration time was + specified in DAYS, and obtained by calling + OperationResult.getExpirationTime(), the unit derived by this + method will also be DAYS.
          • +
          +

          + Note that when a particular time unit is desired, null should not be + passed for the timeUnit parameter. Normally TimeUnit.DAYS is + recommended instead of TimeUnit.HOURS, to minimize storage + requirements (memory and disk). When the desired unit is known, the unit + should be passed explicitly.

          +
          +
          Parameters:
          +
          expirationTime - is the desired expiration time in milliseconds + (UTC), or zero for no automatic expiration.
          +
          timeUnit - is TimeUnit.DAYS or TimeUnit.HOURS, or + null to derive the time unit as described above.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if ttlUnits is not DAYS, HOURS or null.
          +
          See Also:
          +
          Time-To-Live
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/XAEnvironment.html b/docs/java/com/sleepycat/je/XAEnvironment.html new file mode 100644 index 0000000..f4bf4fa --- /dev/null +++ b/docs/java/com/sleepycat/je/XAEnvironment.html @@ -0,0 +1,517 @@ + + + + + +XAEnvironment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class XAEnvironment

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable, javax.transaction.xa.XAResource
      +
      +
      +
      +
      public class XAEnvironment
      +extends Environment
      +implements javax.transaction.xa.XAResource
      +
      An Environment that implements XAResource. If JE is used in an XA + environment, this class should be used instead of Environment so that + appropriate XA functions are available.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          commit

          +
          public void commit(javax.transaction.xa.Xid xid,
          +                   boolean ignore)
          +            throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          commit in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          end

          +
          public void end(javax.transaction.xa.Xid xid,
          +                int flags)
          +         throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          end in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          forget

          +
          public void forget(javax.transaction.xa.Xid xid)
          +            throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          forget in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          isSameRM

          +
          public boolean isSameRM(javax.transaction.xa.XAResource rm)
          +                 throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          isSameRM in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          prepare

          +
          public int prepare(javax.transaction.xa.Xid xid)
          +            throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          prepare in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          recover

          +
          public javax.transaction.xa.Xid[] recover(int flags)
          +                                   throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          recover in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          rollback

          +
          public void rollback(javax.transaction.xa.Xid xid)
          +              throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          rollback in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          getTransactionTimeout

          +
          public int getTransactionTimeout()
          +                          throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          getTransactionTimeout in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        + + + +
          +
        • +

          setTransactionTimeout

          +
          public boolean setTransactionTimeout(int timeout)
          +
          +
          Specified by:
          +
          setTransactionTimeout in interface javax.transaction.xa.XAResource
          +
          +
        • +
        + + + +
          +
        • +

          start

          +
          public void start(javax.transaction.xa.Xid xid,
          +                  int flags)
          +           throws javax.transaction.xa.XAException
          +
          +
          Specified by:
          +
          start in interface javax.transaction.xa.XAResource
          +
          Throws:
          +
          javax.transaction.xa.XAException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/XAFailureException.html b/docs/java/com/sleepycat/je/XAFailureException.html new file mode 100644 index 0000000..93a13ed --- /dev/null +++ b/docs/java/com/sleepycat/je/XAFailureException.html @@ -0,0 +1,257 @@ + + + + + +XAFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je
    +

    Class XAFailureException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class XAFailureException
      +extends OperationFailureException
      +
      Thrown if an attempt is made to use a Transaction after it has been + invalidated as the result of an XA failure. The invalidation occurs when + XAResource.end is called by the resource manager with a XAResource.TMFAIL flag. + +

      The Transaction handle is invalidated as a result of this + exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/BinaryEqualityComparator.html b/docs/java/com/sleepycat/je/class-use/BinaryEqualityComparator.html new file mode 100644 index 0000000..9db518c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/BinaryEqualityComparator.html @@ -0,0 +1,129 @@ + + + + + +Uses of Interface com.sleepycat.je.BinaryEqualityComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.BinaryEqualityComparator

    +
    +
    No usage of com.sleepycat.je.BinaryEqualityComparator
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/BtreeStats.html b/docs/java/com/sleepycat/je/class-use/BtreeStats.html new file mode 100644 index 0000000..c481be2 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/BtreeStats.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.BtreeStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.BtreeStats

    +
    +
    No usage of com.sleepycat.je.BtreeStats
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/CacheMode.html b/docs/java/com/sleepycat/je/class-use/CacheMode.html new file mode 100644 index 0000000..b421a9e --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/CacheMode.html @@ -0,0 +1,302 @@ + + + + + +Uses of Class com.sleepycat.je.CacheMode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.CacheMode

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/CheckpointConfig.html b/docs/java/com/sleepycat/je/class-use/CheckpointConfig.html new file mode 100644 index 0000000..cef7d56 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/CheckpointConfig.html @@ -0,0 +1,228 @@ + + + + + +Uses of Class com.sleepycat.je.CheckpointConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.CheckpointConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/CommitToken.html b/docs/java/com/sleepycat/je/class-use/CommitToken.html new file mode 100644 index 0000000..ad2c2ac --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/CommitToken.html @@ -0,0 +1,234 @@ + + + + + +Uses of Class com.sleepycat.je.CommitToken (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.CommitToken

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Cursor.html b/docs/java/com/sleepycat/je/class-use/Cursor.html new file mode 100644 index 0000000..e24a30b --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Cursor.html @@ -0,0 +1,221 @@ + + + + + +Uses of Class com.sleepycat.je.Cursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Cursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/CursorConfig.html b/docs/java/com/sleepycat/je/class-use/CursorConfig.html new file mode 100644 index 0000000..3509ece --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/CursorConfig.html @@ -0,0 +1,413 @@ + + + + + +Uses of Class com.sleepycat.je.CursorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.CursorConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/CustomStats.html b/docs/java/com/sleepycat/je/class-use/CustomStats.html new file mode 100644 index 0000000..566e8c5 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/CustomStats.html @@ -0,0 +1,189 @@ + + + + + +Uses of Interface com.sleepycat.je.CustomStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.CustomStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Database.html b/docs/java/com/sleepycat/je/class-use/Database.html new file mode 100644 index 0000000..4ee40ff --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Database.html @@ -0,0 +1,582 @@ + + + + + +Uses of Class com.sleepycat.je.Database (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Database

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseComparator.html b/docs/java/com/sleepycat/je/class-use/DatabaseComparator.html new file mode 100644 index 0000000..58cd522 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseComparator.html @@ -0,0 +1,129 @@ + + + + + +Uses of Interface com.sleepycat.je.DatabaseComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.DatabaseComparator

    +
    +
    No usage of com.sleepycat.je.DatabaseComparator
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseConfig.html b/docs/java/com/sleepycat/je/class-use/DatabaseConfig.html new file mode 100644 index 0000000..5bf9a5a --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseConfig.html @@ -0,0 +1,438 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseEntry.html b/docs/java/com/sleepycat/je/class-use/DatabaseEntry.html new file mode 100644 index 0000000..463df1f --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseEntry.html @@ -0,0 +1,1532 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseEntry (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseEntry

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseException.html b/docs/java/com/sleepycat/je/class-use/DatabaseException.html new file mode 100644 index 0000000..c17acbb --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseException.html @@ -0,0 +1,1964 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseExistsException.html b/docs/java/com/sleepycat/je/class-use/DatabaseExistsException.html new file mode 100644 index 0000000..93334dd --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseExistsException.html @@ -0,0 +1,185 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseExistsException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseNotFoundException.html b/docs/java/com/sleepycat/je/class-use/DatabaseNotFoundException.html new file mode 100644 index 0000000..895be42 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseNotFoundException.html @@ -0,0 +1,240 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseNotFoundException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DatabaseStats.html b/docs/java/com/sleepycat/je/class-use/DatabaseStats.html new file mode 100644 index 0000000..f1be047 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DatabaseStats.html @@ -0,0 +1,195 @@ + + + + + +Uses of Class com.sleepycat.je.DatabaseStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DatabaseStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DeadlockException.html b/docs/java/com/sleepycat/je/class-use/DeadlockException.html new file mode 100644 index 0000000..cc2bb57 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DeadlockException.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.DeadlockException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DeadlockException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DeleteConstraintException.html b/docs/java/com/sleepycat/je/class-use/DeleteConstraintException.html new file mode 100644 index 0000000..11da38f --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DeleteConstraintException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.DeleteConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DeleteConstraintException

    +
    +
    No usage of com.sleepycat.je.DeleteConstraintException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DiskLimitException.html b/docs/java/com/sleepycat/je/class-use/DiskLimitException.html new file mode 100644 index 0000000..1b3c601 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DiskLimitException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.DiskLimitException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DiskLimitException

    +
    +
    No usage of com.sleepycat.je.DiskLimitException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DiskOrderedCursor.html b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursor.html new file mode 100644 index 0000000..4f3bbf6 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursor.html @@ -0,0 +1,183 @@ + + + + + +Uses of Class com.sleepycat.je.DiskOrderedCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DiskOrderedCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorConfig.html b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorConfig.html new file mode 100644 index 0000000..016b737 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorConfig.html @@ -0,0 +1,272 @@ + + + + + +Uses of Class com.sleepycat.je.DiskOrderedCursorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DiskOrderedCursorConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorProducerException.html b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorProducerException.html new file mode 100644 index 0000000..0e64558 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DiskOrderedCursorProducerException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.DiskOrderedCursorProducerException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DiskOrderedCursorProducerException

    +
    +
    No usage of com.sleepycat.je.DiskOrderedCursorProducerException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/DuplicateDataException.html b/docs/java/com/sleepycat/je/class-use/DuplicateDataException.html new file mode 100644 index 0000000..e9c273f --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/DuplicateDataException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.DuplicateDataException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.DuplicateDataException

    +
    +
    No usage of com.sleepycat.je.DuplicateDataException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Durability.ReplicaAckPolicy.html b/docs/java/com/sleepycat/je/class-use/Durability.ReplicaAckPolicy.html new file mode 100644 index 0000000..11ba312 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Durability.ReplicaAckPolicy.html @@ -0,0 +1,247 @@ + + + + + +Uses of Class com.sleepycat.je.Durability.ReplicaAckPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Durability.ReplicaAckPolicy

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Durability.SyncPolicy.html b/docs/java/com/sleepycat/je/class-use/Durability.SyncPolicy.html new file mode 100644 index 0000000..32ce279 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Durability.SyncPolicy.html @@ -0,0 +1,210 @@ + + + + + +Uses of Class com.sleepycat.je.Durability.SyncPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Durability.SyncPolicy

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Durability.html b/docs/java/com/sleepycat/je/class-use/Durability.html new file mode 100644 index 0000000..ec1aeab --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Durability.html @@ -0,0 +1,251 @@ + + + + + +Uses of Class com.sleepycat.je.Durability (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Durability

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Environment.html b/docs/java/com/sleepycat/je/class-use/Environment.html new file mode 100644 index 0000000..4ee390c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Environment.html @@ -0,0 +1,661 @@ + + + + + +Uses of Class com.sleepycat.je.Environment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Environment

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentConfig.html b/docs/java/com/sleepycat/je/class-use/EnvironmentConfig.html new file mode 100644 index 0000000..b29dca2 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentConfig.html @@ -0,0 +1,411 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentFailureException.html b/docs/java/com/sleepycat/je/class-use/EnvironmentFailureException.html new file mode 100644 index 0000000..c4d54e9 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentFailureException.html @@ -0,0 +1,416 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentFailureException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentLockedException.html b/docs/java/com/sleepycat/je/class-use/EnvironmentLockedException.html new file mode 100644 index 0000000..ae09cb5 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentLockedException.html @@ -0,0 +1,296 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentLockedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentLockedException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentMutableConfig.html b/docs/java/com/sleepycat/je/class-use/EnvironmentMutableConfig.html new file mode 100644 index 0000000..e6fb6c8 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentMutableConfig.html @@ -0,0 +1,271 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentMutableConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentNotFoundException.html b/docs/java/com/sleepycat/je/class-use/EnvironmentNotFoundException.html new file mode 100644 index 0000000..65694da --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentNotFoundException.html @@ -0,0 +1,296 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentNotFoundException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentStats.html b/docs/java/com/sleepycat/je/class-use/EnvironmentStats.html new file mode 100644 index 0000000..db10df3 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentStats.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/EnvironmentWedgedException.html b/docs/java/com/sleepycat/je/class-use/EnvironmentWedgedException.html new file mode 100644 index 0000000..883d632 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/EnvironmentWedgedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.EnvironmentWedgedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.EnvironmentWedgedException

    +
    +
    No usage of com.sleepycat.je.EnvironmentWedgedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ExceptionEvent.html b/docs/java/com/sleepycat/je/class-use/ExceptionEvent.html new file mode 100644 index 0000000..0ca2210 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ExceptionEvent.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.ExceptionEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.ExceptionEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ExceptionListener.html b/docs/java/com/sleepycat/je/class-use/ExceptionListener.html new file mode 100644 index 0000000..4d3edbb --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ExceptionListener.html @@ -0,0 +1,189 @@ + + + + + +Uses of Interface com.sleepycat.je.ExceptionListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ExceptionListener

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ForeignConstraintException.html b/docs/java/com/sleepycat/je/class-use/ForeignConstraintException.html new file mode 100644 index 0000000..c7ff8f0 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ForeignConstraintException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.ForeignConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.ForeignConstraintException

    +
    +
    No usage of com.sleepycat.je.ForeignConstraintException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ForeignKeyDeleteAction.html b/docs/java/com/sleepycat/je/class-use/ForeignKeyDeleteAction.html new file mode 100644 index 0000000..aa19251 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ForeignKeyDeleteAction.html @@ -0,0 +1,204 @@ + + + + + +Uses of Class com.sleepycat.je.ForeignKeyDeleteAction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.ForeignKeyDeleteAction

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ForeignKeyNullifier.html b/docs/java/com/sleepycat/je/class-use/ForeignKeyNullifier.html new file mode 100644 index 0000000..8e26973 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ForeignKeyNullifier.html @@ -0,0 +1,261 @@ + + + + + +Uses of Interface com.sleepycat.je.ForeignKeyNullifier (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ForeignKeyNullifier

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ForeignMultiKeyNullifier.html b/docs/java/com/sleepycat/je/class-use/ForeignMultiKeyNullifier.html new file mode 100644 index 0000000..c8b0be9 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ForeignMultiKeyNullifier.html @@ -0,0 +1,191 @@ + + + + + +Uses of Interface com.sleepycat.je.ForeignMultiKeyNullifier (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ForeignMultiKeyNullifier

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ForwardCursor.html b/docs/java/com/sleepycat/je/class-use/ForwardCursor.html new file mode 100644 index 0000000..8bb3a72 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ForwardCursor.html @@ -0,0 +1,194 @@ + + + + + +Uses of Interface com.sleepycat.je.ForwardCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ForwardCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Get.html b/docs/java/com/sleepycat/je/class-use/Get.html new file mode 100644 index 0000000..978a713 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Get.html @@ -0,0 +1,332 @@ + + + + + +Uses of Class com.sleepycat.je.Get (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Get

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/JEVersion.html b/docs/java/com/sleepycat/je/class-use/JEVersion.html new file mode 100644 index 0000000..ad64c91 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/JEVersion.html @@ -0,0 +1,215 @@ + + + + + +Uses of Class com.sleepycat.je.JEVersion (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.JEVersion

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/JoinConfig.html b/docs/java/com/sleepycat/je/class-use/JoinConfig.html new file mode 100644 index 0000000..0a6b1b6 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/JoinConfig.html @@ -0,0 +1,254 @@ + + + + + +Uses of Class com.sleepycat.je.JoinConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.JoinConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/JoinCursor.html b/docs/java/com/sleepycat/je/class-use/JoinCursor.html new file mode 100644 index 0000000..500033c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/JoinCursor.html @@ -0,0 +1,183 @@ + + + + + +Uses of Class com.sleepycat.je.JoinCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.JoinCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockConflictException.html b/docs/java/com/sleepycat/je/class-use/LockConflictException.html new file mode 100644 index 0000000..3cdb4a4 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockConflictException.html @@ -0,0 +1,230 @@ + + + + + +Uses of Class com.sleepycat.je.LockConflictException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockConflictException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockMode.html b/docs/java/com/sleepycat/je/class-use/LockMode.html new file mode 100644 index 0000000..497ff2d --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockMode.html @@ -0,0 +1,826 @@ + + + + + +Uses of Class com.sleepycat.je.LockMode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockMode

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockNotAvailableException.html b/docs/java/com/sleepycat/je/class-use/LockNotAvailableException.html new file mode 100644 index 0000000..a722988 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockNotAvailableException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.LockNotAvailableException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockNotAvailableException

    +
    +
    No usage of com.sleepycat.je.LockNotAvailableException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockNotGrantedException.html b/docs/java/com/sleepycat/je/class-use/LockNotGrantedException.html new file mode 100644 index 0000000..32d4489 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockNotGrantedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.LockNotGrantedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockNotGrantedException

    +
    +
    No usage of com.sleepycat.je.LockNotGrantedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockStats.html b/docs/java/com/sleepycat/je/class-use/LockStats.html new file mode 100644 index 0000000..03896e4 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockStats.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.LockStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LockTimeoutException.html b/docs/java/com/sleepycat/je/class-use/LockTimeoutException.html new file mode 100644 index 0000000..40d80e9 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LockTimeoutException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.LockTimeoutException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LockTimeoutException

    +
    +
    No usage of com.sleepycat.je.LockTimeoutException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/LogWriteException.html b/docs/java/com/sleepycat/je/class-use/LogWriteException.html new file mode 100644 index 0000000..1a9e284 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/LogWriteException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.LogWriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.LogWriteException

    +
    +
    No usage of com.sleepycat.je.LogWriteException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/OperationFailureException.html b/docs/java/com/sleepycat/je/class-use/OperationFailureException.html new file mode 100644 index 0000000..23c46ba --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/OperationFailureException.html @@ -0,0 +1,526 @@ + + + + + +Uses of Class com.sleepycat.je.OperationFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.OperationFailureException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/OperationResult.html b/docs/java/com/sleepycat/je/class-use/OperationResult.html new file mode 100644 index 0000000..9145ff9 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/OperationResult.html @@ -0,0 +1,387 @@ + + + + + +Uses of Class com.sleepycat.je.OperationResult (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.OperationResult

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/OperationStatus.html b/docs/java/com/sleepycat/je/class-use/OperationStatus.html new file mode 100644 index 0000000..a883bdc --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/OperationStatus.html @@ -0,0 +1,778 @@ + + + + + +Uses of Class com.sleepycat.je.OperationStatus (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.OperationStatus

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/PartialComparator.html b/docs/java/com/sleepycat/je/class-use/PartialComparator.html new file mode 100644 index 0000000..e0d54f7 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/PartialComparator.html @@ -0,0 +1,129 @@ + + + + + +Uses of Interface com.sleepycat.je.PartialComparator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.PartialComparator

    +
    +
    No usage of com.sleepycat.je.PartialComparator
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/PreloadConfig.Phases.html b/docs/java/com/sleepycat/je/class-use/PreloadConfig.Phases.html new file mode 100644 index 0000000..8d3cfd9 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/PreloadConfig.Phases.html @@ -0,0 +1,211 @@ + + + + + +Uses of Class com.sleepycat.je.PreloadConfig.Phases (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.PreloadConfig.Phases

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/PreloadConfig.html b/docs/java/com/sleepycat/je/class-use/PreloadConfig.html new file mode 100644 index 0000000..fb735c0 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/PreloadConfig.html @@ -0,0 +1,233 @@ + + + + + +Uses of Class com.sleepycat.je.PreloadConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.PreloadConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/PreloadStats.html b/docs/java/com/sleepycat/je/class-use/PreloadStats.html new file mode 100644 index 0000000..4191d0e --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/PreloadStats.html @@ -0,0 +1,181 @@ + + + + + +Uses of Class com.sleepycat.je.PreloadStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.PreloadStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/PreloadStatus.html b/docs/java/com/sleepycat/je/class-use/PreloadStatus.html new file mode 100644 index 0000000..11b9a98 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/PreloadStatus.html @@ -0,0 +1,211 @@ + + + + + +Uses of Class com.sleepycat.je.PreloadStatus (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.PreloadStatus

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ProgressListener.html b/docs/java/com/sleepycat/je/class-use/ProgressListener.html new file mode 100644 index 0000000..f6ec186 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ProgressListener.html @@ -0,0 +1,245 @@ + + + + + +Uses of Interface com.sleepycat.je.ProgressListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ProgressListener

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Put.html b/docs/java/com/sleepycat/je/class-use/Put.html new file mode 100644 index 0000000..6c6f8fc --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Put.html @@ -0,0 +1,261 @@ + + + + + +Uses of Class com.sleepycat.je.Put (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Put

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ReadOptions.html b/docs/java/com/sleepycat/je/class-use/ReadOptions.html new file mode 100644 index 0000000..27df027 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ReadOptions.html @@ -0,0 +1,342 @@ + + + + + +Uses of Class com.sleepycat.je.ReadOptions (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.ReadOptions

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/RecoveryProgress.html b/docs/java/com/sleepycat/je/class-use/RecoveryProgress.html new file mode 100644 index 0000000..de652bf --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/RecoveryProgress.html @@ -0,0 +1,212 @@ + + + + + +Uses of Class com.sleepycat.je.RecoveryProgress (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.RecoveryProgress

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ReplicaConsistencyPolicy.html b/docs/java/com/sleepycat/je/class-use/ReplicaConsistencyPolicy.html new file mode 100644 index 0000000..ab883d7 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ReplicaConsistencyPolicy.html @@ -0,0 +1,291 @@ + + + + + +Uses of Interface com.sleepycat.je.ReplicaConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.ReplicaConsistencyPolicy

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/RunRecoveryException.html b/docs/java/com/sleepycat/je/class-use/RunRecoveryException.html new file mode 100644 index 0000000..03d3e73 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/RunRecoveryException.html @@ -0,0 +1,292 @@ + + + + + +Uses of Class com.sleepycat.je.RunRecoveryException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.RunRecoveryException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryConfig.html b/docs/java/com/sleepycat/je/class-use/SecondaryConfig.html new file mode 100644 index 0000000..cf83b09 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryConfig.html @@ -0,0 +1,349 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryConstraintException.html b/docs/java/com/sleepycat/je/class-use/SecondaryConstraintException.html new file mode 100644 index 0000000..0569189 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryConstraintException.html @@ -0,0 +1,192 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryConstraintException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryCursor.html b/docs/java/com/sleepycat/je/class-use/SecondaryCursor.html new file mode 100644 index 0000000..d6032ad --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryCursor.html @@ -0,0 +1,199 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryDatabase.html b/docs/java/com/sleepycat/je/class-use/SecondaryDatabase.html new file mode 100644 index 0000000..4600ead --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryDatabase.html @@ -0,0 +1,399 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryDatabase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryDatabase

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryIntegrityException.html b/docs/java/com/sleepycat/je/class-use/SecondaryIntegrityException.html new file mode 100644 index 0000000..3e72dae --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryIntegrityException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryIntegrityException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryIntegrityException

    +
    +
    No usage of com.sleepycat.je.SecondaryIntegrityException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryKeyCreator.html b/docs/java/com/sleepycat/je/class-use/SecondaryKeyCreator.html new file mode 100644 index 0000000..c7e4c34 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryKeyCreator.html @@ -0,0 +1,261 @@ + + + + + +Uses of Interface com.sleepycat.je.SecondaryKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.SecondaryKeyCreator

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryMultiKeyCreator.html b/docs/java/com/sleepycat/je/class-use/SecondaryMultiKeyCreator.html new file mode 100644 index 0000000..ce342c0 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryMultiKeyCreator.html @@ -0,0 +1,191 @@ + + + + + +Uses of Interface com.sleepycat.je.SecondaryMultiKeyCreator (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.SecondaryMultiKeyCreator

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SecondaryReferenceException.html b/docs/java/com/sleepycat/je/class-use/SecondaryReferenceException.html new file mode 100644 index 0000000..e0746b2 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SecondaryReferenceException.html @@ -0,0 +1,207 @@ + + + + + +Uses of Class com.sleepycat.je.SecondaryReferenceException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SecondaryReferenceException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Sequence.html b/docs/java/com/sleepycat/je/class-use/Sequence.html new file mode 100644 index 0000000..35f8283 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Sequence.html @@ -0,0 +1,204 @@ + + + + + +Uses of Class com.sleepycat.je.Sequence (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Sequence

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceConfig.html b/docs/java/com/sleepycat/je/class-use/SequenceConfig.html new file mode 100644 index 0000000..e51e3aa --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceConfig.html @@ -0,0 +1,303 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceExistsException.html b/docs/java/com/sleepycat/je/class-use/SequenceExistsException.html new file mode 100644 index 0000000..666ca62 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceExistsException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceExistsException

    +
    +
    No usage of com.sleepycat.je.SequenceExistsException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceIntegrityException.html b/docs/java/com/sleepycat/je/class-use/SequenceIntegrityException.html new file mode 100644 index 0000000..16cdc54 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceIntegrityException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceIntegrityException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceIntegrityException

    +
    +
    No usage of com.sleepycat.je.SequenceIntegrityException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceNotFoundException.html b/docs/java/com/sleepycat/je/class-use/SequenceNotFoundException.html new file mode 100644 index 0000000..2527dec --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceNotFoundException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceNotFoundException

    +
    +
    No usage of com.sleepycat.je.SequenceNotFoundException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceOverflowException.html b/docs/java/com/sleepycat/je/class-use/SequenceOverflowException.html new file mode 100644 index 0000000..5c35a4c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceOverflowException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceOverflowException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceOverflowException

    +
    +
    No usage of com.sleepycat.je.SequenceOverflowException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/SequenceStats.html b/docs/java/com/sleepycat/je/class-use/SequenceStats.html new file mode 100644 index 0000000..e8985b6 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/SequenceStats.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.SequenceStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.SequenceStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/StatsConfig.html b/docs/java/com/sleepycat/je/class-use/StatsConfig.html new file mode 100644 index 0000000..85d9c7e --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/StatsConfig.html @@ -0,0 +1,347 @@ + + + + + +Uses of Class com.sleepycat.je.StatsConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.StatsConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/ThreadInterruptedException.html b/docs/java/com/sleepycat/je/class-use/ThreadInterruptedException.html new file mode 100644 index 0000000..2a3ac83 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/ThreadInterruptedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.ThreadInterruptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.ThreadInterruptedException

    +
    +
    No usage of com.sleepycat.je.ThreadInterruptedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Transaction.State.html b/docs/java/com/sleepycat/je/class-use/Transaction.State.html new file mode 100644 index 0000000..61df8fb --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Transaction.State.html @@ -0,0 +1,187 @@ + + + + + +Uses of Class com.sleepycat.je.Transaction.State (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Transaction.State

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/Transaction.html b/docs/java/com/sleepycat/je/class-use/Transaction.html new file mode 100644 index 0000000..da4711a --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/Transaction.html @@ -0,0 +1,741 @@ + + + + + +Uses of Class com.sleepycat.je.Transaction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.Transaction

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/TransactionConfig.html b/docs/java/com/sleepycat/je/class-use/TransactionConfig.html new file mode 100644 index 0000000..028568d --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/TransactionConfig.html @@ -0,0 +1,378 @@ + + + + + +Uses of Class com.sleepycat.je.TransactionConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.TransactionConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/TransactionStats.Active.html b/docs/java/com/sleepycat/je/class-use/TransactionStats.Active.html new file mode 100644 index 0000000..523352d --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/TransactionStats.Active.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.TransactionStats.Active (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.TransactionStats.Active

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/TransactionStats.html b/docs/java/com/sleepycat/je/class-use/TransactionStats.html new file mode 100644 index 0000000..43cc247 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/TransactionStats.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.TransactionStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.TransactionStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/TransactionTimeoutException.html b/docs/java/com/sleepycat/je/class-use/TransactionTimeoutException.html new file mode 100644 index 0000000..967471c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/TransactionTimeoutException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.TransactionTimeoutException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.TransactionTimeoutException

    +
    +
    No usage of com.sleepycat.je.TransactionTimeoutException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/UniqueConstraintException.html b/docs/java/com/sleepycat/je/class-use/UniqueConstraintException.html new file mode 100644 index 0000000..0f40410 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/UniqueConstraintException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.UniqueConstraintException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.UniqueConstraintException

    +
    +
    No usage of com.sleepycat.je.UniqueConstraintException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/VerifyConfig.html b/docs/java/com/sleepycat/je/class-use/VerifyConfig.html new file mode 100644 index 0000000..791ca9d --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/VerifyConfig.html @@ -0,0 +1,268 @@ + + + + + +Uses of Class com.sleepycat.je.VerifyConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.VerifyConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/VersionMismatchException.html b/docs/java/com/sleepycat/je/class-use/VersionMismatchException.html new file mode 100644 index 0000000..5e3d80c --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/VersionMismatchException.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.je.VersionMismatchException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.VersionMismatchException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/WriteOptions.html b/docs/java/com/sleepycat/je/class-use/WriteOptions.html new file mode 100644 index 0000000..e34d072 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/WriteOptions.html @@ -0,0 +1,340 @@ + + + + + +Uses of Class com.sleepycat.je.WriteOptions (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.WriteOptions

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/XAEnvironment.html b/docs/java/com/sleepycat/je/class-use/XAEnvironment.html new file mode 100644 index 0000000..b94e3e3 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/XAEnvironment.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.XAEnvironment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.XAEnvironment

    +
    +
    No usage of com.sleepycat.je.XAEnvironment
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/class-use/XAFailureException.html b/docs/java/com/sleepycat/je/class-use/XAFailureException.html new file mode 100644 index 0000000..bcf7ee0 --- /dev/null +++ b/docs/java/com/sleepycat/je/class-use/XAFailureException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.XAFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.XAFailureException

    +
    +
    No usage of com.sleepycat.je.XAFailureException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/JEConnection.html b/docs/java/com/sleepycat/je/jca/ra/JEConnection.html new file mode 100644 index 0000000..d03e9a0 --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/JEConnection.html @@ -0,0 +1,417 @@ + + + + + +JEConnection (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jca.ra
    +

    Class JEConnection

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class JEConnection
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      A JEConnection provides access to JE services. See + <JEHOME>/examples/jca/HOWTO-**.txt and + <JEHOME>/examples/jca/simple/SimpleBean.java for more information on + how to build the resource adaptor and use a JEConnection.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEConnection

          +
          public JEConnection(com.sleepycat.je.jca.ra.JEManagedConnection mc)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setManagedConnection

          +
          protected void setManagedConnection(com.sleepycat.je.jca.ra.JEManagedConnection mc,
          +                                    com.sleepycat.je.jca.ra.JELocalTransaction lt)
          +
        • +
        + + + +
          +
        • +

          getEnvironment

          +
          public Environment getEnvironment()
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          truncateDatabase

          +
          public long truncateDatabase(java.lang.String databaseName,
          +                             boolean returnCount)
          +                      throws DatabaseException
          +
          +
          Throws:
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getTransaction

          +
          public Transaction getTransaction()
          +                           throws javax.resource.ResourceException
          +
          +
          Throws:
          +
          javax.resource.ResourceException
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          public void close()
          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/JEConnectionFactory.html b/docs/java/com/sleepycat/je/jca/ra/JEConnectionFactory.html new file mode 100644 index 0000000..ee6c43e --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/JEConnectionFactory.html @@ -0,0 +1,286 @@ + + + + + +JEConnectionFactory (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jca.ra
    +

    Interface JEConnectionFactory

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      javax.naming.Referenceable, java.io.Serializable
      +
      +
      +
      +
      public interface JEConnectionFactory
      +extends javax.resource.Referenceable, java.io.Serializable
      +
      An application may obtain a JEConnection in this manner: +
      +    InitialContext iniCtx = new InitialContext();
      +    Context enc = (Context) iniCtx.lookup("java:comp/env");
      +    Object ref = enc.lookup("ra/JEConnectionFactory");
      +    JEConnectionFactory dcf = (JEConnectionFactory) ref;
      +    JEConnection dc = dcf.getConnection(envDir, envConfig);
      + 
      + + See <JEHOME>/examples/jca/HOWTO-**.txt and + <JEHOME>/examples/jca/simple/SimpleBean.java for more information + on how to build the resource adapter and use a JEConnection.
      +
    • +
    +
    +
    + +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/JEException.html b/docs/java/com/sleepycat/je/jca/ra/JEException.html new file mode 100644 index 0000000..2cda769 --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/JEException.html @@ -0,0 +1,267 @@ + + + + + +JEException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jca.ra
    +

    Class JEException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class JEException
      +extends java.lang.Exception
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        JEException(java.lang.String message) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEException

          +
          public JEException(java.lang.String message)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnection.html b/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnection.html new file mode 100644 index 0000000..eca0838 --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnection.html @@ -0,0 +1,181 @@ + + + + + +Uses of Class com.sleepycat.je.jca.ra.JEConnection (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jca.ra.JEConnection

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnectionFactory.html b/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnectionFactory.html new file mode 100644 index 0000000..ba8093d --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/class-use/JEConnectionFactory.html @@ -0,0 +1,129 @@ + + + + + +Uses of Interface com.sleepycat.je.jca.ra.JEConnectionFactory (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.jca.ra.JEConnectionFactory

    +
    +
    No usage of com.sleepycat.je.jca.ra.JEConnectionFactory
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/class-use/JEException.html b/docs/java/com/sleepycat/je/jca/ra/class-use/JEException.html new file mode 100644 index 0000000..5332110 --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/class-use/JEException.html @@ -0,0 +1,181 @@ + + + + + +Uses of Class com.sleepycat.je.jca.ra.JEException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jca.ra.JEException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/package-frame.html b/docs/java/com/sleepycat/je/jca/ra/package-frame.html new file mode 100644 index 0000000..81f635f --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/package-frame.html @@ -0,0 +1,28 @@ + + + + + +com.sleepycat.je.jca.ra (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.jca.ra

    +
    +

    Interfaces

    + +

    Classes

    + +

    Exceptions

    + +
    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/package-summary.html b/docs/java/com/sleepycat/je/jca/ra/package-summary.html new file mode 100644 index 0000000..166c982 --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/package-summary.html @@ -0,0 +1,219 @@ + + + + + +com.sleepycat.je.jca.ra (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.jca.ra

    +
    +
    Support for the Java Connector Architecture, which provides a standard +for connecting the J2EE platform to legacy enterprise information +systems (EIS), such as ERP systems, database systems, and legacy +applications not written in Java.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je.jca.ra Description

    +
    Support for the Java Connector Architecture, which provides a standard +for connecting the J2EE platform to legacy enterprise information +systems (EIS), such as ERP systems, database systems, and legacy +applications not written in Java. + +

    Package Specification

    + +

    +Users who want to run JE within a J2EE Application Server can use the +JCA Resource Adapter to connect to JE through a standard API. The JE +Resource Adapter supports all three J2EE application server +transaction types: +

    + +
      +
    • No transaction. +
    • Local transactions. +
    • XA transactions. +
    • +
    + +

    +JCA also includes the Java Transaction API (JTA), which means that JE +supports 2 phase commit (XA). Therefore, JE can participate +in distributed transactions managed by either a J2EE server or +the applications direct use of the JTA API. +

    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/package-tree.html b/docs/java/com/sleepycat/je/jca/ra/package-tree.html new file mode 100644 index 0000000..3bb300e --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/package-tree.html @@ -0,0 +1,168 @@ + + + + + +com.sleepycat.je.jca.ra Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.jca.ra

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jca/ra/package-use.html b/docs/java/com/sleepycat/je/jca/ra/package-use.html new file mode 100644 index 0000000..90cce4e --- /dev/null +++ b/docs/java/com/sleepycat/je/jca/ra/package-use.html @@ -0,0 +1,178 @@ + + + + + +Uses of Package com.sleepycat.je.jca.ra (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.jca.ra

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/JEDiagnostics.html b/docs/java/com/sleepycat/je/jmx/JEDiagnostics.html new file mode 100644 index 0000000..8d359dc --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/JEDiagnostics.html @@ -0,0 +1,656 @@ + + + + + +JEDiagnostics (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jmx
    +

    Class JEDiagnostics

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar, javax.management.DynamicMBean
      +
      +
      +
      +
      public class JEDiagnostics
      +extends JEMBean
      +implements javax.management.DynamicMBean
      +

      + JEDiagnostics is a debugging mbean for a non replicated JE Environment. + This is intended as a locus of field support functionality. While it may be + used by the application developer, the primary use case is for a support + situation. Currently much of this functionality is also available through + the standard java.util.logging MBean. +

      + It is a concrete MBean created by registering a JE Environment as an MBean + through setting the JEDiagnostics system property. It only works on an + active JE Environment, and one Environment can only have one JEDiagnostics + instance. There are two attributes and one operation: +

      + Attributes: +

        +
      • consoleHandlerLevel: which sets the console handler level. +
      • fileHandlerLevel: which sets the file handler level. +
      + Operations: +
        +
      • resetLoggingLevel: which sets the level for the current loggers in + the LogManager. +
      +

      + We can use these attributes and operations to dynamically change the + logging level for debugging purposes.

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          CONSOLEHANDLER_LEVEL

          +
          protected static final java.lang.String CONSOLEHANDLER_LEVEL
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          FILEHANDLER_LEVEL

          +
          protected static final java.lang.String FILEHANDLER_LEVEL
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_CONSOLEHANDLER_LEVEL

          +
          protected static final javax.management.MBeanAttributeInfo ATT_CONSOLEHANDLER_LEVEL
          +
        • +
        + + + +
          +
        • +

          ATT_FILEHANDLER_LEVEL

          +
          protected static final javax.management.MBeanAttributeInfo ATT_FILEHANDLER_LEVEL
          +
        • +
        + + + +
          +
        • +

          OP_RESET_LOGGING

          +
          protected static final java.lang.String OP_RESET_LOGGING
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          resetLoggingParams

          +
          protected static final javax.management.MBeanParameterInfo[] resetLoggingParams
          +
        • +
        + + + +
          +
        • +

          OP_RESET_LOGGING_LEVEL

          +
          protected static final javax.management.MBeanOperationInfo OP_RESET_LOGGING_LEVEL
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEDiagnostics

          +
          protected JEDiagnostics(Environment env)
          +
        • +
        + + + +
          +
        • +

          JEDiagnostics

          +
          public JEDiagnostics()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          initClassFields

          +
          protected void initClassFields()
          +
          +
          Specified by:
          +
          initClassFields in class JEMBean
          +
          +
        • +
        + + + +
          +
        • +

          getAttribute

          +
          public java.lang.Object getAttribute(java.lang.String attributeName)
          +                              throws javax.management.AttributeNotFoundException,
          +                                     javax.management.MBeanException
          +
          +
          Specified by:
          +
          getAttribute in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.getAttribute(java.lang.String)
          +
          +
        • +
        + + + +
          +
        • +

          setAttribute

          +
          public void setAttribute(javax.management.Attribute attribute)
          +                  throws javax.management.AttributeNotFoundException,
          +                         javax.management.InvalidAttributeValueException,
          +                         javax.management.MBeanException
          +
          +
          Specified by:
          +
          setAttribute in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.InvalidAttributeValueException
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.setAttribute(javax.management.Attribute)
          +
          +
        • +
        + + + +
          +
        • +

          getAttributes

          +
          public javax.management.AttributeList getAttributes(java.lang.String[] attributes)
          +
          +
          Specified by:
          +
          getAttributes in interface javax.management.DynamicMBean
          +
          See Also:
          +
          DynamicMBean.getAttributes(java.lang.String[])
          +
          +
        • +
        + + + +
          +
        • +

          setAttributes

          +
          public javax.management.AttributeList setAttributes(javax.management.AttributeList attributes)
          +
          +
          Specified by:
          +
          setAttributes in interface javax.management.DynamicMBean
          +
          See Also:
          +
          DynamicMBean.setAttributes(javax.management.AttributeList)
          +
          +
        • +
        + + + +
          +
        • +

          invoke

          +
          public java.lang.Object invoke(java.lang.String actionName,
          +                               java.lang.Object[] params,
          +                               java.lang.String[] signature)
          +                        throws javax.management.MBeanException
          +
          +
          Specified by:
          +
          invoke in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.invoke(java.lang.String, java.lang.Object[], java.lang.String[])
          +
          +
        • +
        + + + +
          +
        • +

          doRegisterMBean

          +
          protected void doRegisterMBean(Environment env)
          +                        throws java.lang.Exception
          +
          +
          Specified by:
          +
          doRegisterMBean in class JEMBean
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          getAttributeList

          +
          protected javax.management.MBeanAttributeInfo[] getAttributeList()
          +
          Description copied from class: JEMBean
          +
          Get attribute metadata for this MBean.
          +
          +
          Specified by:
          +
          getAttributeList in class JEMBean
          +
          Returns:
          +
          array of MBeanAttributeInfo objects describing the available + attributes.
          +
          +
        • +
        + + + +
          +
        • +

          addOperations

          +
          protected void addOperations()
          +
          Description copied from class: JEMBean
          +
          Add MBean operations into the list.
          +
          +
          Specified by:
          +
          addOperations in class JEMBean
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/JEMBean.html b/docs/java/com/sleepycat/je/jmx/JEMBean.html new file mode 100644 index 0000000..4b8df6e --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/JEMBean.html @@ -0,0 +1,629 @@ + + + + + +JEMBean (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jmx
    +

    Class JEMBean

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar
      +
      +
      +
      Direct Known Subclasses:
      +
      JEDiagnostics, JEMonitor
      +
      +
      +
      +
      public abstract class JEMBean
      +extends java.lang.Object
      +implements com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected java.lang.StringclassName 
        protected java.lang.Class<?>currentClass 
        protected java.lang.StringDESCRIPTION 
        protected Environmentenv 
        protected javax.management.ObjectNamejeName 
        protected java.util.ArrayList<javax.management.MBeanOperationInfo>operationList 
        protected javax.management.MBeanServerserver 
        static javax.management.MBeanParameterInfo[]statParams 
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
         JEMBean() 
        protected JEMBean(Environment env) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods Concrete Methods 
        Modifier and TypeMethod and Description
        protected abstract voidaddOperations() +
        Add MBean operations into the list.
        +
        voiddoRegister(Environment env) +
        For EnvironmentImpl.MBeanRegistrar interface.
        +
        protected abstract voiddoRegisterMBean(Environment env) 
        voiddoUnregister() +
        For EnvironmentImpl.MBeanRegistrar interface.
        +
        protected abstract javax.management.MBeanAttributeInfo[]getAttributeList() +
        Get attribute metadata for this MBean.
        +
        protected javax.management.MBeanConstructorInfo[]getConstructors() +
        Get constructor metadata for this MBean.
        +
        javax.management.MBeanInfogetMBeanInfo() 
        protected javax.management.MBeanNotificationInfo[]getNotificationInfo() +
        Get notification metadata for this MBean.
        +
        protected StatsConfiggetStatsConfig(java.lang.Object[] params) +
        Helper for creating a StatsConfig object to use as an operation + parameter.
        +
        protected abstract voidinitClassFields() 
        protected voidresetMBeanInfo() +
        Create the available management interface for this environment.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          statParams

          +
          public static final javax.management.MBeanParameterInfo[] statParams
          +
        • +
        + + + +
          +
        • +

          server

          +
          protected javax.management.MBeanServer server
          +
        • +
        + + + +
          +
        • +

          jeName

          +
          protected javax.management.ObjectName jeName
          +
        • +
        + + + +
          +
        • +

          className

          +
          protected java.lang.String className
          +
        • +
        + + + +
          +
        • +

          DESCRIPTION

          +
          protected java.lang.String DESCRIPTION
          +
        • +
        + + + +
          +
        • +

          currentClass

          +
          protected java.lang.Class<?> currentClass
          +
        • +
        + + + + + + + +
          +
        • +

          operationList

          +
          protected java.util.ArrayList<javax.management.MBeanOperationInfo> operationList
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + + + + + +
          +
        • +

          JEMBean

          +
          public JEMBean()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          initClassFields

          +
          protected abstract void initClassFields()
          +
        • +
        + + + +
          +
        • +

          resetMBeanInfo

          +
          protected void resetMBeanInfo()
          +
          Create the available management interface for this environment. The + attributes and operations available vary according to environment + configuration.
          +
        • +
        + + + +
          +
        • +

          getAttributeList

          +
          protected abstract javax.management.MBeanAttributeInfo[] getAttributeList()
          +
          Get attribute metadata for this MBean.
          +
          +
          Returns:
          +
          array of MBeanAttributeInfo objects describing the available + attributes.
          +
          +
        • +
        + + + +
          +
        • +

          addOperations

          +
          protected abstract void addOperations()
          +
          Add MBean operations into the list.
          +
        • +
        + + + +
          +
        • +

          getConstructors

          +
          protected javax.management.MBeanConstructorInfo[] getConstructors()
          +
          Get constructor metadata for this MBean. + + Since the process of getting constructors is the same for each concrete + MBean, define it here to reduce coding work.
          +
          +
          Returns:
          +
          array of MBeanConstructorInfo objects describing the constructor + attributes.
          +
          +
        • +
        + + + +
          +
        • +

          getNotificationInfo

          +
          protected javax.management.MBeanNotificationInfo[] getNotificationInfo()
          +
          Get notification metadata for this MBean.
          +
          +
          Returns:
          +
          array of MBeanNotificationInfo describing notifications.
          +
          +
        • +
        + + + +
          +
        • +

          doRegister

          +
          public void doRegister(Environment env)
          +                throws java.lang.Exception
          +
          For EnvironmentImpl.MBeanRegistrar interface. + + Register this MBean with the MBeanServer.
          +
          +
          Specified by:
          +
          doRegister in interface com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          doRegisterMBean

          +
          protected abstract void doRegisterMBean(Environment env)
          +                                 throws java.lang.Exception
          +
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          doUnregister

          +
          public void doUnregister()
          +                  throws java.lang.Exception
          +
          For EnvironmentImpl.MBeanRegistrar interface. + + Remove this MBean from the MBeanServer.
          +
          +
          Specified by:
          +
          doUnregister in interface com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          getMBeanInfo

          +
          public javax.management.MBeanInfo getMBeanInfo()
          +
          +
          See Also:
          +
          Implement the getMBeanInfo method of DynamicMBean.
          +
          +
        • +
        + + + +
          +
        • +

          getStatsConfig

          +
          protected StatsConfig getStatsConfig(java.lang.Object[] params)
          +
          Helper for creating a StatsConfig object to use as an operation + parameter.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/JEMBeanHelper.html b/docs/java/com/sleepycat/je/jmx/JEMBeanHelper.html new file mode 100644 index 0000000..4ed0f61 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/JEMBeanHelper.html @@ -0,0 +1,805 @@ + + + + + +JEMBeanHelper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jmx
    +

    Class JEMBeanHelper

    +
    +
    + +
    +
      +
    • +
      +
      Deprecated.  +
      As of JE 4, JEMBeanHelper is deprecated in favor of the concrete + MBeans available by default with a JE environment. These MBeans can be + registered and enabled by the environment by setting the following JVM + property: + JEMonitor: + This MBean provides general stats monitoring and access to basic + environment level operations. + + JEMBeanHelper is a utility class for the MBean implementation which wants to + add management of a JE environment to its capabilities. MBean + implementations can contain a JEMBeanHelper instance to get MBean metadata + for JE and to set attributes, get attributes, and invoke operations. +

      + com.sleepycat.je.jmx.JEMonitor and the example program + jmx.JEApplicationMBean are two MBean implementations which provide support + different application use cases. See those classes for examples of how to + use JEMBeanHelper.

      +
      +
      +
      public class JEMBeanHelper
      +extends java.lang.Object
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        JEMBeanHelper(java.io.File environmentHome, + boolean canConfigure) +
        Deprecated. 
        +
        Instantiate a helper, specifying environment home and open capabilities.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        java.lang.ObjectgetAttribute(Environment targetEnv, + java.lang.String attributeName) +
        Deprecated. 
        +
        Get an attribute value for the given environment.
        +
        java.util.List<javax.management.MBeanAttributeInfo>getAttributeList(Environment targetEnv) +
        Deprecated. 
        +
        Get MBean attribute metadata for this environment.
        +
        java.io.FilegetEnvironmentHome() +
        Deprecated. 
        +
        Return the target environment directory.
        +
        EnvironmentConfiggetEnvironmentOpenConfig() +
        Deprecated. 
        +
        If the helper was instantiated with canConfigure==true, it shows + environment configuration attributes.
        +
        booleangetNeedReset() +
        Deprecated. 
        +
        Tell the MBean if the available set of functionality has changed.
        +
        javax.management.MBeanNotificationInfo[]getNotificationInfo(Environment targetEnv) +
        Deprecated. 
        +
        No notifications are supported.
        +
        java.util.List<javax.management.MBeanOperationInfo>getOperationList(Environment targetEnv) +
        Deprecated. 
        +
        Get mbean operation metadata for this environment.
        +
        java.lang.Objectinvoke(Environment targetEnv, + java.lang.String actionName, + java.lang.Object[] params, + java.lang.String[] signature) +
        Deprecated. 
        +
        Invoke an operation for the given environment.
        +
        voidsetAttribute(Environment targetEnv, + javax.management.Attribute attribute) +
        Deprecated. 
        +
        Set an attribute value for the given environment.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          ATT_ENV_HOME

          +
          public static final java.lang.String ATT_ENV_HOME
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_OPEN

          +
          public static final java.lang.String ATT_OPEN
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_READ_ONLY

          +
          public static final java.lang.String ATT_IS_READ_ONLY
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_TRANSACTIONAL

          +
          public static final java.lang.String ATT_IS_TRANSACTIONAL
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_CACHE_SIZE

          +
          public static final java.lang.String ATT_CACHE_SIZE
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_CACHE_PERCENT

          +
          public static final java.lang.String ATT_CACHE_PERCENT
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_LOCK_TIMEOUT

          +
          public static final java.lang.String ATT_LOCK_TIMEOUT
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_SERIALIZABLE

          +
          public static final java.lang.String ATT_IS_SERIALIZABLE
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_TXN_TIMEOUT

          +
          public static final java.lang.String ATT_TXN_TIMEOUT
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_SET_READ_ONLY

          +
          public static final java.lang.String ATT_SET_READ_ONLY
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_SET_TRANSACTIONAL

          +
          public static final java.lang.String ATT_SET_TRANSACTIONAL
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_SET_SERIALIZABLE

          +
          public static final java.lang.String ATT_SET_SERIALIZABLE
          +
          Deprecated. 
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEMBeanHelper

          +
          public JEMBeanHelper(java.io.File environmentHome,
          +                     boolean canConfigure)
          +
          Deprecated. 
          +
          Instantiate a helper, specifying environment home and open capabilities.
          +
          +
          Parameters:
          +
          environmentHome - home directory of the target JE environment.
          +
          canConfigure - If true, the helper will show environment + configuration attributes.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getEnvironmentHome

          +
          public java.io.File getEnvironmentHome()
          +
          Deprecated. 
          +
          Return the target environment directory.
          +
          +
          Returns:
          +
          the environment directory.
          +
          +
        • +
        + + + +
          +
        • +

          getEnvironmentOpenConfig

          +
          public EnvironmentConfig getEnvironmentOpenConfig()
          +
          Deprecated. 
          +
          If the helper was instantiated with canConfigure==true, it shows + environment configuration attributes. Those attributes are returned + within this EnvironmentConfig object for use in opening environments.
          +
          +
          Returns:
          +
          EnvironmentConfig object which saves configuration attributes + recorded through MBean attributes.
          +
          +
        • +
        + + + +
          +
        • +

          getNeedReset

          +
          public boolean getNeedReset()
          +
          Deprecated. 
          +
          Tell the MBean if the available set of functionality has changed.
          +
          +
          Returns:
          +
          true if the MBean should regenerate its JE metadata.
          +
          +
        • +
        + + + +
          +
        • +

          getAttributeList

          +
          public java.util.List<javax.management.MBeanAttributeInfo> getAttributeList(Environment targetEnv)
          +
          Deprecated. 
          +
          Get MBean attribute metadata for this environment.
          +
          +
          Parameters:
          +
          targetEnv - The target JE environment. May be null if the + environment is not open.
          +
          Returns:
          +
          list of MBeanAttributeInfo objects describing the available + attributes.
          +
          +
        • +
        + + + +
          +
        • +

          getAttribute

          +
          public java.lang.Object getAttribute(Environment targetEnv,
          +                                     java.lang.String attributeName)
          +                              throws javax.management.AttributeNotFoundException,
          +                                     javax.management.MBeanException
          +
          Deprecated. 
          +
          Get an attribute value for the given environment. Check + JEMBeanHelper.getNeedReset() after this call because the helper may + detect that the environment has changed and that the MBean metadata + should be reset.
          +
          +
          Parameters:
          +
          targetEnv - The target JE environment. May be null if the + environment is not open.
          +
          attributeName - attribute name.
          +
          Returns:
          +
          attribute value.
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.MBeanException
          +
          +
        • +
        + + + +
          +
        • +

          setAttribute

          +
          public void setAttribute(Environment targetEnv,
          +                         javax.management.Attribute attribute)
          +                  throws javax.management.AttributeNotFoundException,
          +                         javax.management.InvalidAttributeValueException
          +
          Deprecated. 
          +
          Set an attribute value for the given environment.
          +
          +
          Parameters:
          +
          targetEnv - The target JE environment. May be null if the + environment is not open.
          +
          attribute - name/value pair
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.InvalidAttributeValueException
          +
          +
        • +
        + + + +
          +
        • +

          getOperationList

          +
          public java.util.List<javax.management.MBeanOperationInfo> getOperationList(Environment targetEnv)
          +
          Deprecated. 
          +
          Get mbean operation metadata for this environment.
          +
          +
          Parameters:
          +
          targetEnv - The target JE environment. May be null if the + environment is not open.
          +
          Returns:
          +
          List of MBeanOperationInfo describing available operations.
          +
          +
        • +
        + + + +
          +
        • +

          invoke

          +
          public java.lang.Object invoke(Environment targetEnv,
          +                               java.lang.String actionName,
          +                               java.lang.Object[] params,
          +                               java.lang.String[] signature)
          +                        throws javax.management.MBeanException
          +
          Deprecated. 
          +
          Invoke an operation for the given environment.
          +
          +
          Parameters:
          +
          targetEnv - The target JE environment. May be null if the + environment is not open.
          +
          actionName - operation name.
          +
          params - operation parameters. May be null.
          +
          signature - operation signature. May be null.
          +
          Returns:
          +
          the operation result
          +
          Throws:
          +
          javax.management.MBeanException
          +
          +
        • +
        + + + +
          +
        • +

          getNotificationInfo

          +
          public javax.management.MBeanNotificationInfo[] getNotificationInfo(Environment targetEnv)
          +
          Deprecated. 
          +
          No notifications are supported.
          +
          +
          Returns:
          +
          List of MBeanNotificationInfo for available notifications.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/JEMonitor.html b/docs/java/com/sleepycat/je/jmx/JEMonitor.html new file mode 100644 index 0000000..a138052 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/JEMonitor.html @@ -0,0 +1,674 @@ + + + + + +JEMonitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.jmx
    +

    Class JEMonitor

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar, javax.management.DynamicMBean
      +
      +
      +
      +
      public class JEMonitor
      +extends JEMBean
      +implements javax.management.DynamicMBean
      +

      + JEMonitor is a JMX MBean which makes statistics and basic administrative + operations available. The MBean is registered and enabled when the system + property JEMonitor is set. It only works on an active JE Environment, and + an Environment can only register one instance of JEMonitor.

      +
      +
      See Also:
      +
      Monitoring + JE with JConsole and JMX
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          ATT_ENV_HOME

          +
          public static final java.lang.String ATT_ENV_HOME
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_READ_ONLY

          +
          public static final java.lang.String ATT_IS_READ_ONLY
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_TRANSACTIONAL

          +
          public static final java.lang.String ATT_IS_TRANSACTIONAL
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_CACHE_SIZE

          +
          public static final java.lang.String ATT_CACHE_SIZE
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_CACHE_PERCENT

          +
          public static final java.lang.String ATT_CACHE_PERCENT
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_LOCK_TIMEOUT

          +
          public static final java.lang.String ATT_LOCK_TIMEOUT
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_IS_SERIALIZABLE

          +
          public static final java.lang.String ATT_IS_SERIALIZABLE
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ATT_TXN_TIMEOUT

          +
          public static final java.lang.String ATT_TXN_TIMEOUT
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          JEMonitor

          +
          protected JEMonitor(Environment env)
          +
        • +
        + + + +
          +
        • +

          JEMonitor

          +
          public JEMonitor()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          initClassFields

          +
          protected void initClassFields()
          +
          +
          Specified by:
          +
          initClassFields in class JEMBean
          +
          +
        • +
        + + + +
          +
        • +

          getAttribute

          +
          public java.lang.Object getAttribute(java.lang.String attributeName)
          +                              throws javax.management.AttributeNotFoundException,
          +                                     javax.management.MBeanException
          +
          +
          Specified by:
          +
          getAttribute in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.getAttribute(java.lang.String)
          +
          +
        • +
        + + + +
          +
        • +

          setAttribute

          +
          public void setAttribute(javax.management.Attribute attribute)
          +                  throws javax.management.AttributeNotFoundException,
          +                         javax.management.InvalidAttributeValueException,
          +                         javax.management.MBeanException
          +
          +
          Specified by:
          +
          setAttribute in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.AttributeNotFoundException
          +
          javax.management.InvalidAttributeValueException
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.setAttribute(javax.management.Attribute)
          +
          +
        • +
        + + + +
          +
        • +

          getAttributes

          +
          public javax.management.AttributeList getAttributes(java.lang.String[] attributes)
          +
          +
          Specified by:
          +
          getAttributes in interface javax.management.DynamicMBean
          +
          See Also:
          +
          DynamicMBean.getAttributes(java.lang.String[])
          +
          +
        • +
        + + + +
          +
        • +

          setAttributes

          +
          public javax.management.AttributeList setAttributes(javax.management.AttributeList attributes)
          +
          +
          Specified by:
          +
          setAttributes in interface javax.management.DynamicMBean
          +
          See Also:
          +
          DynamicMBean.setAttributes(javax.management.AttributeList)
          +
          +
        • +
        + + + +
          +
        • +

          invoke

          +
          public java.lang.Object invoke(java.lang.String actionName,
          +                               java.lang.Object[] params,
          +                               java.lang.String[] signature)
          +                        throws javax.management.MBeanException
          +
          +
          Specified by:
          +
          invoke in interface javax.management.DynamicMBean
          +
          Throws:
          +
          javax.management.MBeanException
          +
          See Also:
          +
          DynamicMBean.invoke(java.lang.String, java.lang.Object[], java.lang.String[])
          +
          +
        • +
        + + + +
          +
        • +

          doRegisterMBean

          +
          protected void doRegisterMBean(Environment env)
          +                        throws java.lang.Exception
          +
          +
          Specified by:
          +
          doRegisterMBean in class JEMBean
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          getAttributeList

          +
          protected javax.management.MBeanAttributeInfo[] getAttributeList()
          +
          Description copied from class: JEMBean
          +
          Get attribute metadata for this MBean.
          +
          +
          Specified by:
          +
          getAttributeList in class JEMBean
          +
          Returns:
          +
          array of MBeanAttributeInfo objects describing the available + attributes.
          +
          +
        • +
        + + + +
          +
        • +

          addOperations

          +
          protected void addOperations()
          +
          Description copied from class: JEMBean
          +
          Add MBean operations into the list.
          +
          +
          Specified by:
          +
          addOperations in class JEMBean
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/class-use/JEDiagnostics.html b/docs/java/com/sleepycat/je/jmx/class-use/JEDiagnostics.html new file mode 100644 index 0000000..b91561b --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/class-use/JEDiagnostics.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.jmx.JEDiagnostics (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jmx.JEDiagnostics

    +
    +
    No usage of com.sleepycat.je.jmx.JEDiagnostics
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/class-use/JEMBean.html b/docs/java/com/sleepycat/je/jmx/class-use/JEMBean.html new file mode 100644 index 0000000..bea1e49 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/class-use/JEMBean.html @@ -0,0 +1,182 @@ + + + + + +Uses of Class com.sleepycat.je.jmx.JEMBean (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jmx.JEMBean

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/class-use/JEMBeanHelper.html b/docs/java/com/sleepycat/je/jmx/class-use/JEMBeanHelper.html new file mode 100644 index 0000000..6b38598 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/class-use/JEMBeanHelper.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.jmx.JEMBeanHelper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jmx.JEMBeanHelper

    +
    +
    No usage of com.sleepycat.je.jmx.JEMBeanHelper
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/class-use/JEMonitor.html b/docs/java/com/sleepycat/je/jmx/class-use/JEMonitor.html new file mode 100644 index 0000000..9f861b8 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/class-use/JEMonitor.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.jmx.JEMonitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.jmx.JEMonitor

    +
    +
    No usage of com.sleepycat.je.jmx.JEMonitor
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/package-frame.html b/docs/java/com/sleepycat/je/jmx/package-frame.html new file mode 100644 index 0000000..2f05250 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/package-frame.html @@ -0,0 +1,23 @@ + + + + + +com.sleepycat.je.jmx (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.jmx

    +
    +

    Classes

    + +
    + + diff --git a/docs/java/com/sleepycat/je/jmx/package-summary.html b/docs/java/com/sleepycat/je/jmx/package-summary.html new file mode 100644 index 0000000..accc45a --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/package-summary.html @@ -0,0 +1,186 @@ + + + + + +com.sleepycat.je.jmx (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.jmx

    +
    +
    Implementations of JMX MBeans for JE.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je.jmx Description

    +
    Implementations of JMX MBeans for JE. + +

    Package Specification

    +This package provides deployable JMX MBeans for JE.
    +
    +
    See Also:
    +
    Monitoring +JE with JConsole and JMX
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/package-tree.html b/docs/java/com/sleepycat/je/jmx/package-tree.html new file mode 100644 index 0000000..cd18b86 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/package-tree.html @@ -0,0 +1,148 @@ + + + + + +com.sleepycat.je.jmx Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.jmx

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/jmx/package-use.html b/docs/java/com/sleepycat/je/jmx/package-use.html new file mode 100644 index 0000000..19fdf21 --- /dev/null +++ b/docs/java/com/sleepycat/je/jmx/package-use.html @@ -0,0 +1,164 @@ + + + + + +Uses of Package com.sleepycat.je.jmx (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.jmx

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/package-frame.html b/docs/java/com/sleepycat/je/package-frame.html new file mode 100644 index 0000000..9069b0d --- /dev/null +++ b/docs/java/com/sleepycat/je/package-frame.html @@ -0,0 +1,123 @@ + + + + + +com.sleepycat.je (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je

    +
    +

    Interfaces

    + +

    Classes

    + +

    Enums

    + +

    Exceptions

    + +
    + + diff --git a/docs/java/com/sleepycat/je/package-summary.html b/docs/java/com/sleepycat/je/package-summary.html new file mode 100644 index 0000000..9073612 --- /dev/null +++ b/docs/java/com/sleepycat/je/package-summary.html @@ -0,0 +1,852 @@ + + + + + +com.sleepycat.je (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je

    +
    +
    Foundation for creating environments, databases and transactions; provides +cursor based data access.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je Description

    +
    Foundation for creating environments, databases and transactions; provides +cursor based data access. + +

    Package Specification

    +This package constitutes the base public API for Berkeley DB, Java +Edition. The classes here are used to create database +objects, and insert and retrieve data. +

    +This package provides a key/data pair model of a database +record. Databases and database cursors are the key objects used to +access data. An alternative collections based API is available through +com.sleepycat.collections. +

    +The Environment class embodies the database environment and is the starting +point for the application. Databases and transaction objects are +created through the Environment class. +

    +Data can be inserted and retrieved directly through the Database +object, or through a Cursor obtained from the Database. A database record +consist of a key/data pair, where key and data are each individually +represented by a DatabaseEntry object. Classes in com.sleepycat.bind +provide optional support for mapping a Java object to a DatabaseEntry. +

    +Configuration classes are used to specify the attributes of particular +operations. For example the attributes of a database environment are +specified in the EnvironmentConfig class. An instance of that class is +required for Environment construction. Likewise, the attributes of a +database are described in DatabaseConfig, which is a parameter to the +Environment.openDatabase() method.

    +
    +
    See Also:
    +
    [Getting Started Guide]
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/package-tree.html b/docs/java/com/sleepycat/je/package-tree.html new file mode 100644 index 0000000..522f827 --- /dev/null +++ b/docs/java/com/sleepycat/je/package-tree.html @@ -0,0 +1,321 @@ + + + + + +com.sleepycat.je Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +

    Enum Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/package-use.html b/docs/java/com/sleepycat/je/package-use.html new file mode 100644 index 0000000..bb2c386 --- /dev/null +++ b/docs/java/com/sleepycat/je/package-use.html @@ -0,0 +1,1250 @@ + + + + + +Uses of Package com.sleepycat.je (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/AppStateMonitor.html b/docs/java/com/sleepycat/je/rep/AppStateMonitor.html new file mode 100644 index 0000000..d3383eb --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/AppStateMonitor.html @@ -0,0 +1,274 @@ + + + + + +AppStateMonitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Interface AppStateMonitor

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface AppStateMonitor
      +
      A mechanism for adding application specific information when asynchronously + tracking the state of a running JE HA application. +

      + NodeState provides information about the current state of a member + of the replication group. The application can obtain NodeState via ReplicationGroupAdmin.getNodeState(com.sleepycat.je.rep.ReplicationNode, int) or DbPing.getNodeState(). A NodeState contains mostly + JE-centric information, such as whether the node is a master or + replica. However, it may be important to add in some application specific + information to enable the best use of the status. +

      + For example, an application may want to direct operations to specific nodes + based on whether the node is available. The fields in NodeState will + tell the application whether the node is up and available in a JE HA sense, + but the application may also need information about an application level + resource, which would affect the load balancing decision. The AppStateMonitor + is a way for the application to inject this kind of application specific + information into the replicated node status. +

      + The AppStateMonitor is registered with the replicated environment using + ReplicatedEnvironment.registerAppStateMonitor(AppStateMonitor). + There is at most one AppStateMonitor associated with the actual environment + (not an Environment handle) at any given time. JE + HA calls getAppState() when it is assembling status + information for a given node. +

      + After registration, the application can obtain this application specific + information along with other JE HA status information when it obtains a + NodeState, through NodeState.getAppState(). +

      + getAppState() returns a byte array whose length + should be larger than 0. An IllegalStateException will be thrown if the + returned byte array is 0 size. Users are responsible for serializing and + deserializing the desired information into this byte array.

      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        byte[]getAppState() +
        Return a byte array which holds information about the application's + state.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getAppState

          +
          byte[] getAppState()
          +
          Return a byte array which holds information about the application's + state. The application is responsible for serialize and deserialize this + information. +

          + Note the returned byte array's length should be larger than 0.

          +
          +
          Returns:
          +
          the application state
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/CommitPointConsistencyPolicy.html b/docs/java/com/sleepycat/je/rep/CommitPointConsistencyPolicy.html new file mode 100644 index 0000000..6465665 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/CommitPointConsistencyPolicy.html @@ -0,0 +1,498 @@ + + + + + +CommitPointConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class CommitPointConsistencyPolicy

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ReplicaConsistencyPolicy
      +
      +
      +
      +
      public class CommitPointConsistencyPolicy
      +extends java.lang.Object
      +implements ReplicaConsistencyPolicy
      +
      A consistency policy which ensures that the environment on a Replica node is + at least as current as denoted by the specified CommitToken. This + token represents a point in the serialized transaction schedule created by + the master. In other words, this token is like a bookmark, representing a + particular transaction commit in the replication stream. The Replica ensures + that the commit identified by the CommitToken has been executed on + this node before allowing the application's Environment.beginTransaction() + operation on the Replica to proceed. +

      + For example, suppose the application is a web application where a replicated + group is implemented within a load balanced web server group. Each request + to the web server consists of an update operation followed by read + operations (say from the same client), The read operations naturally expect + to see the data from the updates executed by the same request. However, the + read operations might have been routed to a node that did not execute the + update. +

      + In such a case, the update request would generate a CommitToken, + which would be resubmitted by the browser, along with subsequent read + requests. The read request could be directed at any one of the available web + servers by a load balancer. The node which executes the read request would + create a CommitPointConsistencyPolicy with that CommitToken and use + it at transaction begin. If the environment at the web server was already + current (wrt the commit token), it could immediately execute the transaction + and satisfy the request. If not, the "transaction begin" would stall until + the Replica replay had caught up and the change was available at that web + server. +

      + Consistency policies are specified at either a per-transaction level through + TransactionConfig.setConsistencyPolicy(com.sleepycat.je.ReplicaConsistencyPolicy) or as an + replication node wide default through ReplicationConfig.setConsistencyPolicy(com.sleepycat.je.ReplicaConsistencyPolicy)

      +
      +
      See Also:
      +
      CommitToken, +Managing Consistency
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static java.lang.StringNAME +
        The name:"CommitPointConsistencyPolicy" associated with this policy.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        CommitPointConsistencyPolicy(CommitToken commitToken, + long timeout, + java.util.concurrent.TimeUnit timeoutUnit) +
        Defines how current a Replica needs to be in terms of a specific + transaction that was committed on the Master.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object obj) 
        CommitTokengetCommitToken() +
        Return the CommitToken used to create this consistency + policy.
        +
        java.lang.StringgetName() +
        Returns the name:"CommitPointConsistencyPolicy", associated with this policy.
        +
        longgetTimeout(java.util.concurrent.TimeUnit unit) +
        Return the timeout specified when creating this consistency policy.
        +
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          NAME

          +
          public static final java.lang.String NAME
          +
          The name:"CommitPointConsistencyPolicy" associated with this policy. The name can be used when + constructing policy property values for use in je.properties files.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          CommitPointConsistencyPolicy

          +
          public CommitPointConsistencyPolicy(CommitToken commitToken,
          +                                    long timeout,
          +                                    java.util.concurrent.TimeUnit timeoutUnit)
          +
          Defines how current a Replica needs to be in terms of a specific + transaction that was committed on the Master. A transaction on the + Replica that uses this consistency policy is allowed to start only + after the transaction identified by the commitToken has + been committed on the Replica. The Environment.beginTransaction() will wait for at most + timeout for the Replica to catch up. If the Replica has + not caught up in this period, the beginTransaction() + method will throw a ReplicaConsistencyException.
          +
          +
          Parameters:
          +
          commitToken - the token identifying the transaction
          +
          timeout - the maximum amount of time that the transaction start + will wait to allow the Replica to catch up.
          +
          timeoutUnit - the TimeUnit for the timeout parameter.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the commitToken or timeoutUnit is + null.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          getCommitToken

          +
          public CommitToken getCommitToken()
          +
          Return the CommitToken used to create this consistency + policy.
          +
          +
          Returns:
          +
          the CommitToken used to create this consistency + policy.
          +
          +
        • +
        + + + +
          +
        • +

          getTimeout

          +
          public long getTimeout(java.util.concurrent.TimeUnit unit)
          +
          Return the timeout specified when creating this consistency policy.
          +
          +
          Specified by:
          +
          getTimeout in interface ReplicaConsistencyPolicy
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value.
          +
          Returns:
          +
          the timeout specified when creating this consistency policy
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          See Also:
          +
          Object.hashCode()
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          See Also:
          +
          Object.equals(java.lang.Object)
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/DatabasePreemptedException.html b/docs/java/com/sleepycat/je/rep/DatabasePreemptedException.html new file mode 100644 index 0000000..36ee0d2 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/DatabasePreemptedException.html @@ -0,0 +1,349 @@ + + + + + +DatabasePreemptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class DatabasePreemptedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class DatabasePreemptedException
      +extends OperationFailureException
      +
      Thrown when attempting to use a Database handle that was forcibly closed by + replication. This exception only occurs in a replicated environment and + normally only occurs on a Replica node. In the case of a DPL schema upgrade + where an entity class or secondary key is renamed, it may also occur on a + Master node, as described below. + +

      This exception occurs when accessing a database or store and one of the + following methods was recently executed on the master node and then replayed + on a replica node: + truncateDatabase, + removeDatabase and + renameDatabase.

      + +

      When using the DPL, this occurs only in two + circumstances:

      +
        +
      1. This exception is thrown on a Replica node when the truncateClass method has + been called on the Master node.
      2. +
      3. This exception is thrown on a Replica or Master node when an entity + class or secondary key has been renamed and the application has been + upgraded. See + Upgrading + a Replication Group.
      4. +
      + +

      When this exception occurs, the application must close any open cursors + and abort any open transactions that are using the database or store, and + then close the database or store handle. If the application wishes, it may + then reopen the database (if it still exists) or store.

      + +

      Some applications may wish to coordinate the Master and Replica sites to + prevent a Replica from accessing a database that is being truncated, removed + or renamed, and thereby prevent this exception. Such coordination is not + directly supported by JE. The DatabasePreemptedException is provided to + allow an application to handle database truncation, removal and renaming + without such coordination between nodes.

      + +

      The Transaction handle is not + invalidated as a result of this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        DatabasegetDatabase() +
        Returns the database handle that was forcibly closed.
        +
        java.lang.StringgetDatabaseName() +
        Returns the name of the database that was forcibly closed.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDatabase

          +
          public Database getDatabase()
          +
          Returns the database handle that was forcibly closed.
          +
        • +
        + + + +
          +
        • +

          getDatabaseName

          +
          public java.lang.String getDatabaseName()
          +
          Returns the name of the database that was forcibly closed.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/GroupShutdownException.html b/docs/java/com/sleepycat/je/rep/GroupShutdownException.html new file mode 100644 index 0000000..380353c --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/GroupShutdownException.html @@ -0,0 +1,257 @@ + + + + + +GroupShutdownException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class GroupShutdownException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/InsufficientAcksException.html b/docs/java/com/sleepycat/je/rep/InsufficientAcksException.html new file mode 100644 index 0000000..4d41685 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/InsufficientAcksException.html @@ -0,0 +1,372 @@ + + + + + +InsufficientAcksException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class InsufficientAcksException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class InsufficientAcksException
      +extends OperationFailureException
      +

      + This exception is thrown at the time of a commit in a Master, if the Master + could not obtain transaction commit acknowledgments from its Replicas in + accordance with the Durability.ReplicaAckPolicy currently in effect and within + the requested timeout interval. This exception will never be thrown when the + ReplicaAckPolicy of NONE is in effect. +

      + Note that an InsufficientAcksException means the transaction has + already committed at the master. The transaction may also have been + committed at one or more Replicas, but the lack of replica acknowledgments + means that the number of replicas that committed could not be + established. If the transaction was in fact committed by less than a simple + majority of the nodes, it could result in a RollbackException when + the node subsequently attempts to rejoin the group as a Replica. +

      + The application can handle the exception and choose to respond in a number + of ways. For example, it can +

        +
      • do nothing, assuming that the transaction will eventually propagate to + enough replicas to become durable, +
      • retry the operation in a new transaction, which may succeed or fail + depending on whether the underlying problems have been resolved, +
      • retry using a larger timeout interval and return to the original + timeout interval at a later time, +
      • fall back temporarily to a read-only mode, +
      • increase the durability of the transaction on the Master by ensuring + that the changes are flushed to the operating system's buffers or to + the disk, or +
      • give up and report an error at a higher level, perhaps to allow an + administrator to check the underlying cause of the failure. +
      +
      +
      See Also:
      +
      Durability, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intacksPending() +
        It returns the number of Replicas that did not respond with an + acknowledgment within the Replica commit timeout period.
        +
        intacksRequired() +
        It returns the number of acknowledgments required by the commit policy.
        +
        intackTimeout() +
        Returns the acknowledgment timeout that was in effect at the time of the + exception.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          acksPending

          +
          public int acksPending()
          +
          It returns the number of Replicas that did not respond with an + acknowledgment within the Replica commit timeout period.
          +
          +
          Returns:
          +
          the number of missing acknowledgments
          +
          +
        • +
        + + + +
          +
        • +

          acksRequired

          +
          public int acksRequired()
          +
          It returns the number of acknowledgments required by the commit policy.
          +
          +
          Returns:
          +
          the number of acknowledgments required
          +
          +
        • +
        + + + +
          +
        • +

          ackTimeout

          +
          public int ackTimeout()
          +
          Returns the acknowledgment timeout that was in effect at the time of the + exception.
          +
          +
          Returns:
          +
          the acknowledgment timeout in milliseconds
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/InsufficientLogException.html b/docs/java/com/sleepycat/je/rep/InsufficientLogException.html new file mode 100644 index 0000000..57474a9 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/InsufficientLogException.html @@ -0,0 +1,344 @@ + + + + + +InsufficientLogException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class InsufficientLogException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class InsufficientLogException
      +extends RestartRequiredException
      +
      This exception indicates that the log files constituting the Environment are + insufficient and cannot be used as the basis for continuing with the + replication stream provided by the current master. +

      + This exception is typically thrown by the ReplicatedEnvironment constructor + when a node has been down for a long period of time and is being started up + again. It may also be thrown when a brand new node attempts to become a + member of the group and it does not have a sufficiently current set of log + files. If the group experiences sustained network connectivity problems, + this exception may also be thrown by an active Replica that has been unable + to stay in touch with the members of its group for an extended period of + time. +

      + In the typical case, application handles the exception by invoking + NetworkRestore.execute(com.sleepycat.je.rep.InsufficientLogException, com.sleepycat.je.rep.NetworkRestoreConfig) to obtain the log files it needs from one of + the members of the replication group. After the log files are obtained, the + node recreates its environment handle and resumes participation as an active + member of the group.

      +
      +
      See Also:
      +
      NetworkRestore, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.util.Set<ReplicationNode>getLogProviders() +
        Returns the members of the replication group that can serve as candidate + log providers to supply the logs needed by this node.
        +
        java.lang.StringtoString() 
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getLogProviders

          +
          public java.util.Set<ReplicationNode> getLogProviders()
          +
          Returns the members of the replication group that can serve as candidate + log providers to supply the logs needed by this node.
          +
          +
          Returns:
          +
          a list of members that can provide logs
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Throwable
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/InsufficientReplicasException.html b/docs/java/com/sleepycat/je/rep/InsufficientReplicasException.html new file mode 100644 index 0000000..75369b1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/InsufficientReplicasException.html @@ -0,0 +1,393 @@ + + + + + +InsufficientReplicasException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class InsufficientReplicasException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        InsufficientReplicasException(com.sleepycat.je.txn.Locker locker, + Durability.ReplicaAckPolicy ackPolicy, + int requiredAckCount, + java.util.Set<java.lang.String> availableReplicas) +
        Creates a Commit exception.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.util.Set<java.lang.String>getAvailableReplicas() +
        Returns the set of Replicas that were in contact with the master at the + time of the commit operation.
        +
        Durability.ReplicaAckPolicygetCommitPolicy() +
        Returns the Replica ack policy that was in effect for the transaction.
        +
        intgetRequiredNodeCount() +
        Returns the number of nodes (including the master) that were + required to be active in order to satisfy the Replica ack + policy.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          InsufficientReplicasException

          +
          public InsufficientReplicasException(com.sleepycat.je.txn.Locker locker,
          +                                     Durability.ReplicaAckPolicy ackPolicy,
          +                                     int requiredAckCount,
          +                                     java.util.Set<java.lang.String> availableReplicas)
          +
          Creates a Commit exception.
          +
          +
          Parameters:
          +
          ackPolicy - the ack policy that could not be implemented
          +
          requiredAckCount - the replica acks required to satisfy the policy
          +
          availableReplicas - the set of available Replicas
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getCommitPolicy

          +
          public Durability.ReplicaAckPolicy getCommitPolicy()
          +
          Returns the Replica ack policy that was in effect for the transaction.
          +
          +
          Returns:
          +
          the Replica ack policy
          +
          +
        • +
        + + + +
          +
        • +

          getRequiredNodeCount

          +
          public int getRequiredNodeCount()
          +
          Returns the number of nodes (including the master) that were + required to be active in order to satisfy the Replica ack + policy.
          +
          +
          Returns:
          +
          the required number of nodes
          +
          +
        • +
        + + + +
          +
        • +

          getAvailableReplicas

          +
          public java.util.Set<java.lang.String> getAvailableReplicas()
          +
          Returns the set of Replicas that were in contact with the master at the + time of the commit operation.
          +
          +
          Returns:
          +
          a set of Replica node names
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/LockPreemptedException.html b/docs/java/com/sleepycat/je/rep/LockPreemptedException.html new file mode 100644 index 0000000..65953ed --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/LockPreemptedException.html @@ -0,0 +1,278 @@ + + + + + +LockPreemptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class LockPreemptedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LockPreemptedException
      +extends LockConflictException
      +
      Thrown when a lock has been "stolen", or preempted, from a transaction in a + replicated environment. + +

      The Transaction handle is invalidated as a + result of this exception.

      + +

      Locks may be preempted in a JE HA environment on a Replica system when + the HA write operation needs a lock that an application reader transaction + or cursor holds. This exception is thrown by a reader transaction or cursor + method that is called after a lock has been preempted.

      + +

      Normally, applications should catch the base class LockConflictException rather than catching one of its subclasses. All lock + conflicts are typically handled in the same way, which is normally to abort + and retry the transaction. See LockConflictException for more + information.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/LogOverwriteException.html b/docs/java/com/sleepycat/je/rep/LogOverwriteException.html new file mode 100644 index 0000000..83c3866 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/LogOverwriteException.html @@ -0,0 +1,267 @@ + + + + + +LogOverwriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class LogOverwriteException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LogOverwriteException
      +extends OperationFailureException
      +
      Thrown when one or more log files are modified (overwritten) as the result + of a replication operation. This occurs when a replication operation must + change existing data in a log file in order to synchronize with other nodes + in a replication group. Any previously copied log files may be invalid and + should be discarded. + +

      This exception is thrown by DbBackup. Backups and similar operations that copy + log files should discard any copied files when this exception occurs, and + may retry the operation at a later time. The time interval during which + backups are not possible will be fairly short (less than a minute).

      + +

      Note that this exception is never thrown in a standalone (non-replicated) + environment.

      + +

      The Transaction handle is not + invalidated as a result of this exception.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/MasterReplicaTransitionException.html b/docs/java/com/sleepycat/je/rep/MasterReplicaTransitionException.html new file mode 100644 index 0000000..efffcc7 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/MasterReplicaTransitionException.html @@ -0,0 +1,318 @@ + + + + + +MasterReplicaTransitionException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class MasterReplicaTransitionException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Deprecated.  +
      as of JE 5.0.88 because the environment no longer needs to + restart when transitioning from master to replica.
      +
      +
      +
      @Deprecated
      +public class MasterReplicaTransitionException
      +extends RestartRequiredException
      +
      In the past, MasterReplicaTransitionException was sometimes thrown in JE + replication systems when an environment that was a master and transitioned + to replica state. In some cases, the environment had to reinitialize + internal state to become a replica, and the application was required to + the application close and reopen its environment handle, thereby + properly reinitializing the node. +

      + As of JE 5.0.88, the environment can transition from master to replica + without requiring an environment close and re-open.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        MasterReplicaTransitionException(com.sleepycat.je.dbi.EnvironmentImpl envImpl, + java.lang.Exception cause) +
        Deprecated. 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          MasterReplicaTransitionException

          +
          public MasterReplicaTransitionException(com.sleepycat.je.dbi.EnvironmentImpl envImpl,
          +                                        java.lang.Exception cause)
          +
          Deprecated. 
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/MasterStateException.html b/docs/java/com/sleepycat/je/rep/MasterStateException.html new file mode 100644 index 0000000..0f69f5b --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/MasterStateException.html @@ -0,0 +1,264 @@ + + + + + +MasterStateException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class MasterStateException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/MasterTransferFailureException.html b/docs/java/com/sleepycat/je/rep/MasterTransferFailureException.html new file mode 100644 index 0000000..b3d4291 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/MasterTransferFailureException.html @@ -0,0 +1,251 @@ + + + + + +MasterTransferFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class MasterTransferFailureException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/MemberNotFoundException.html b/docs/java/com/sleepycat/je/rep/MemberNotFoundException.html new file mode 100644 index 0000000..f55fb48 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/MemberNotFoundException.html @@ -0,0 +1,251 @@ + + + + + +MemberNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class MemberNotFoundException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class MemberNotFoundException
      +extends OperationFailureException
      +
      Thrown when an operation requires a replication group member and that member + is not present in the replication group.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/NetworkRestore.html b/docs/java/com/sleepycat/je/rep/NetworkRestore.html new file mode 100644 index 0000000..4d66fd2 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/NetworkRestore.html @@ -0,0 +1,361 @@ + + + + + +NetworkRestore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class NetworkRestore

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class NetworkRestore
      +extends java.lang.Object
      +
      Obtains log files for a Replica from other members of the replication + group. A Replica may need to do so if it has been offline for some time, and + has fallen behind in its execution of the replication stream. +

      + During that time, the connected nodes may have reduced their log files by + deleting files after doing log cleaning. When this node rejoins the group, + it is possible that the current Master's log files do not go back far enough + to adequately + sync * up this node. In that case, the node can use a NetworkRestore object to copy the log files from one of the nodes in the + group. The system tries to avoid deleting log files that either would be + needed for replication by current nodes or where replication would be more + efficient than network restore. +

      + A Replica discovers the need for a NetworkRestore operation when a call to + ReplicatedEnvironment() fails with a InsufficientLogException. +

      + A call to NetworkRestore.execute() will copy the required log + files from a member of the group who owns the files and seems to be the + least busy. For example: +

      +  try {
      +     node = new ReplicatedEnvironment(envDir, envConfig, repConfig);
      + } catch (InsufficientLogException insufficientLogEx) {
      +
      +     NetworkRestore restore = new NetworkRestore();
      +     NetworkRestoreConfig config = new NetworkRestoreConfig();
      +     config.setRetainLogFiles(false); // delete obsolete log files.
      +
      +     // Use the members returned by insufficientLogEx.getLogProviders() to
      +     // select the desired subset of members and pass the resulting list
      +     // as the argument to config.setLogProviders(), if the default selection
      +     // of providers is not suitable.
      +
      +     restore.execute(insufficientLogEx, config);
      +
      +     // retry
      +     node = new ReplicatedEnvironment(envDir, envConfig, repConfig);
      + }
      + 
      +
      +
      See Also:
      +
      + Restoring Log Files
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        NetworkRestore() +
        Creates an instance of NetworkRestore suitable for restoring the logs at + this node.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          NetworkRestore

          +
          public NetworkRestore()
          +
          Creates an instance of NetworkRestore suitable for restoring the logs at + this node. After the logs are restored, the node can create a new + ReplicatedEnvironment and join the group
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public void execute(InsufficientLogException logException,
          +                    NetworkRestoreConfig config)
          +             throws EnvironmentFailureException,
          +                    java.lang.IllegalArgumentException
          +
          Restores the log files from one of the members of the replication group. +

          + If config.getLogProviders() returns null, or an empty list, + it uses the member that is least busy as the provider of the log files. + Otherwise it selects a member from the list, choosing the first member + that's available, to provide the log files. If the members in this list + are not present in logException.getLogProviders(), it will + result in an IllegalArgumentException being thrown. + Exceptions handlers for InsufficientLogException will + typically use InsufficientLogException.getLogProviders() as the + starting point to compute an appropriate list, with which to set up + the config argument. +

          + Log files that are currently at the node will be retained if they are + part of a consistent set of log files. Obsolete log files are either + deleted, or are renamed based on the the configuration of + config.getRetainLogFiles().

          +
          +
          Parameters:
          +
          logException - the exception thrown by ReplicatedEnvironment() that necessitated this log refresh operation
          +
          config - configures the execution of the network restore operation
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalArgumentException - if the config is invalid
          +
          See Also:
          +
          NetworkRestoreConfig
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/NetworkRestoreConfig.html b/docs/java/com/sleepycat/je/rep/NetworkRestoreConfig.html new file mode 100644 index 0000000..f40886c --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/NetworkRestoreConfig.html @@ -0,0 +1,427 @@ + + + + + +NetworkRestoreConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class NetworkRestoreConfig

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class NetworkRestoreConfig
      +extends java.lang.Object
      +
      NetworkRestoreConfig defines the configuration parameters used to configure + a NetworkRestore operation.
      +
      +
      See Also:
      +
      NetworkRestore
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.util.List<ReplicationNode>getLogProviders() +
        Returns the candidate list of data nodes, either ELECTABLE or SECONDARY + members, that may be used to obtain log files.
        +
        intgetReceiveBufferSize() +
        Returns the size of the receive buffer associated with the socket used + to transfer files during the NetworkRestore operation.
        +
        booleangetRetainLogFiles() +
        Returns a boolean indicating whether existing log files should be + retained or deleted.
        +
        NetworkRestoreConfigsetLogProviders(java.util.List<ReplicationNode> providers) +
        Sets the prioritized list of data nodes, either ELECTABLE or SECONDARY + members, used to select a node from which to obtain log files for the + NetworkRestore operation.
        +
        NetworkRestoreConfigsetReceiveBufferSize(int receiveBufferSize) +
        Sets the size of the receive buffer associated with the socket used to + transfer files during the NetworkRestore operation.
        +
        NetworkRestoreConfigsetRetainLogFiles(boolean retainLogFiles) +
        If true retains obsolete log files, by renaming them instead of deleting + them.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          NetworkRestoreConfig

          +
          public NetworkRestoreConfig()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getRetainLogFiles

          +
          public boolean getRetainLogFiles()
          +
          Returns a boolean indicating whether existing log files should be + retained or deleted.
          +
          +
          Returns:
          +
          true if log files must be retained
          +
          +
        • +
        + + + +
          +
        • +

          setRetainLogFiles

          +
          public NetworkRestoreConfig setRetainLogFiles(boolean retainLogFiles)
          +
          If true retains obsolete log files, by renaming them instead of deleting + them. The default is "true". +

          + A renamed file has its .jdb suffix replaced by + .bup and an additional numeric monotonically increasing + numeric suffix. All files that were renamed as part of the same + NetworkRestore attempt will have the same numeric suffix. +

          + For example, if files 00000001.jdb and files 00000002.jdb were rendered + obsolete, and 4 was the highest suffix in use for this environment when + the operation was initiated, then the files would be renamed as + 00000001.bup.5 and 00000002.bup.5.

          +
          +
          Parameters:
          +
          retainLogFiles - if true retains obsolete log files
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getReceiveBufferSize

          +
          public int getReceiveBufferSize()
          +
          Returns the size of the receive buffer associated with the socket used + to transfer files during the NetworkRestore operation.
          +
        • +
        + + + +
          +
        • +

          setReceiveBufferSize

          +
          public NetworkRestoreConfig setReceiveBufferSize(int receiveBufferSize)
          +
          Sets the size of the receive buffer associated with the socket used to + transfer files during the NetworkRestore operation. +

          + Note that if the size specified is larger than the operating system + constrained maximum, it will be limited to this maximum value. For + example, on Linux you may need to set the kernel parameter: + net.core.rmem_max property using the command: sysctl -w + net.core.rmem_max=1048576 to increase the operating system imposed + limit. +

          +
          +
          Parameters:
          +
          receiveBufferSize - the size of the receive buffer. If it's zero, + the operating system default value is used.
          +
          +
        • +
        + + + +
          +
        • +

          getLogProviders

          +
          public java.util.List<ReplicationNode> getLogProviders()
          +
          Returns the candidate list of data nodes, either ELECTABLE or SECONDARY + members, that may be used to obtain log files.
          +
          +
          Returns:
          +
          the list of data nodes in priority order, or null
          +
          +
        • +
        + + + +
          +
        • +

          setLogProviders

          +
          public NetworkRestoreConfig setLogProviders(java.util.List<ReplicationNode> providers)
          +
          Sets the prioritized list of data nodes, either ELECTABLE or SECONDARY + members, used to select a node from which to obtain log files for the + NetworkRestore operation. If a list is supplied, NetworkRestore will + only use nodes from this list, trying each one in order. + +

          The default value is null. If a null value is configured for + NetworkRestore, it will choose the least busy data node with a current + set of logs, as the provider of log files.

          +
          +
          Parameters:
          +
          providers - the list of data nodes in priority order, or null
          +
          Returns:
          +
          this
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.html b/docs/java/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.html new file mode 100644 index 0000000..0ebb55b --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.html @@ -0,0 +1,435 @@ + + + + + +NoConsistencyRequiredPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class NoConsistencyRequiredPolicy

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static java.lang.StringNAME +
        The name:"NoConsistencyRequiredPolicy" associated with this policy.
        +
        static NoConsistencyRequiredPolicyNO_CONSISTENCY +
        Convenience instance.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        NoConsistencyRequiredPolicy() +
        Create a NoConsistencyRequiredPolicy.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidensureConsistency(com.sleepycat.je.dbi.EnvironmentImpl repInstance) 
        booleanequals(java.lang.Object obj) 
        java.lang.StringgetName() +
        Returns the name:"NoConsistencyRequiredPolicy", associated with this policy.
        +
        longgetTimeout(java.util.concurrent.TimeUnit unit) +
        Always returns 0, no timeout is needed for this policy.
        +
        inthashCode() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          NAME

          +
          public static final java.lang.String NAME
          +
          The name:"NoConsistencyRequiredPolicy" associated with this policy. The name can be used when + constructing policy property values for use in je.properties files.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          NoConsistencyRequiredPolicy

          +
          public NoConsistencyRequiredPolicy()
          +
          Create a NoConsistencyRequiredPolicy.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          ensureConsistency

          +
          public void ensureConsistency(com.sleepycat.je.dbi.EnvironmentImpl repInstance)
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          getTimeout

          +
          public long getTimeout(java.util.concurrent.TimeUnit unit)
          +
          Always returns 0, no timeout is needed for this policy.
          +
          +
          Specified by:
          +
          getTimeout in interface ReplicaConsistencyPolicy
          +
          Returns:
          +
          the timeout associated with the policy
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/NodeState.html b/docs/java/com/sleepycat/je/rep/NodeState.html new file mode 100644 index 0000000..99fdb6f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/NodeState.html @@ -0,0 +1,521 @@ + + + + + +NodeState (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class NodeState

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class NodeState
      +extends java.lang.Object
      +
      The current state of a replication node and the application this node is + running in. +

      + This includes the following information: +

      +

        +
      • the replication state of this + node
      • +
      • the name of the current master, as known by this node
      • +
      • the time when this node joined the replication group
      • +
      • the latest transaction end (abort or commit) VLSN on this node
      • +
      • the transaction end (abort or commit) VLSN on the master known by this + node. The difference between transaction end VLSNs on the master versus on + this node gives an indication of how current this node's data is. The gap + in VLSN values indicates the number of replication records that must be + processed by this node, to be caught up to the master.
      • +
      • the number of feeders running on this node
      • +
      • the system load average for the last minute
      • +
      • + The appState field is a byte array meant to hold information generated by + the JE HA application, as provided by a registered AppStateMonitor. + Users are responsible for serializing and deserializing information for this + field.
      • +
      +
      +
      Since:
      +
      5.0
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the name of the node whose state is requested.
          +
          +
          Returns:
          +
          the name of the node.
          +
          +
        • +
        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Returns the name of the group which the node joins.
          +
          +
          Returns:
          +
          name of the group which the node joins
          +
          +
        • +
        + + + +
          +
        • +

          getNodeState

          +
          public ReplicatedEnvironment.State getNodeState()
          +
          Returns the replication state of + this node.
          +
          +
          Returns:
          +
          the replication state of this node.
          +
          +
        • +
        + + + +
          +
        • +

          getMasterName

          +
          public java.lang.String getMasterName()
          +
          Returns the name of the current + master known by this node.
          +
          +
          Returns:
          +
          the name of the current master
          +
          +
        • +
        + + + +
          +
        • +

          getJEVersion

          +
          public JEVersion getJEVersion()
          +
          Returns the current JEVersion that this node runs on.
          +
          +
          Returns:
          +
          the current JEVersion used by this node.
          +
          +
        • +
        + + + +
          +
        • +

          getJoinTime

          +
          public long getJoinTime()
          +
          Returns the time when this node joins the replication group.
          +
          +
          Returns:
          +
          the time when this node joins the group
          +
          +
        • +
        + + + +
          +
        • +

          getCurrentTxnEndVLSN

          +
          public long getCurrentTxnEndVLSN()
          +
          Returns the latest transaction end VLSN on this replication node.
          +
          +
          Returns:
          +
          the commit VLSN on this node
          +
          +
        • +
        + + + +
          +
        • +

          getKnownMasterTxnEndVLSN

          +
          public long getKnownMasterTxnEndVLSN()
          +
          Returns the transaction end VLSN on the master known by this node.
          +
          +
          Returns:
          +
          the known commit VLSN on master
          +
          +
        • +
        + + + +
          +
        • +

          getActiveFeeders

          +
          public int getActiveFeeders()
          +
          Returns the number of current active Feeders running on this node.
          +
          +
          Returns:
          +
          the number of running Feeders on the node
          +
          +
        • +
        + + + +
          +
        • +

          getLogVersion

          +
          public int getLogVersion()
          +
          Returns the log version of this node.
          +
          +
          Returns:
          +
          the log version of this node.
          +
          +
        • +
        + + + +
          +
        • +

          getAppState

          +
          public byte[] getAppState()
          +
          Returns the application state which is obtained via + AppStateMonitor.getAppState().
          +
          +
          Returns:
          +
          the application state
          +
          +
        • +
        + + + +
          +
        • +

          getSystemLoad

          +
          public double getSystemLoad()
          +
          Returns the system load average for the last minute.
          +
          +
          Returns:
          +
          the system average load, -1.0 if the node is running on jdk5 or + exceptions thrown while getting this information.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/NodeType.html b/docs/java/com/sleepycat/je/rep/NodeType.html new file mode 100644 index 0000000..2f08344 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/NodeType.html @@ -0,0 +1,516 @@ + + + + + +NodeType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Enum NodeType

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<NodeType>
      +
      +
      +
      +
      public enum NodeType
      +extends java.lang.Enum<NodeType>
      +
      The different types of nodes that can be in a replication group.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ARBITER 
        ELECTABLE +
        A full fledged member of the replication group with an associated + replicated environment that can serve as both a Master and a Replica.
        +
        MONITOR +
        A node that passively listens for the results of elections, but does not + participate in them.
        +
        SECONDARY +
        A member of the replication group with an associated replicated + environment that serves as a Replica but does not participate in + elections or durability decisions.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanisArbiter() +
        Returns whether this is the ARBITER type.
        +
        booleanisDataNode() +
        Returns whether this type represents a data node, either ELECTABLE or SECONDARY.
        +
        booleanisElectable() +
        Returns whether this is the ELECTABLE type.
        +
        booleanisMonitor() +
        Returns whether this is the MONITOR type.
        +
        booleanisSecondary() +
        Returns whether this is the SECONDARY type.
        +
        static NodeTypevalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static NodeType[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          MONITOR

          +
          public static final NodeType MONITOR
          +
          A node that passively listens for the results of elections, but does not + participate in them. It does not have a replicated environment + associated with it.
          +
          +
          See Also:
          +
          Monitor
          +
          +
        • +
        + + + +
          +
        • +

          ELECTABLE

          +
          public static final NodeType ELECTABLE
          +
          A full fledged member of the replication group with an associated + replicated environment that can serve as both a Master and a Replica.
          +
        • +
        + + + +
          +
        • +

          SECONDARY

          +
          public static final NodeType SECONDARY
          +
          A member of the replication group with an associated replicated + environment that serves as a Replica but does not participate in + elections or durability decisions. Secondary nodes are only remembered + by the group while they maintain contact with the Master. + +

          You can use SECONDARY nodes to: +

            +
          • Provide a copy of the data available at a distant location +
          • Maintain an extra copy of the data to increase redundancy +
          • Change the number of replicas to adjust to dynamically changing read + loads +
          +
          +
          Since:
          +
          6.0
          +
          +
        • +
        + + + +
          +
        • +

          ARBITER

          +
          public static final NodeType ARBITER
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static NodeType[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (NodeType c : NodeType.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static NodeType valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          isMonitor

          +
          public boolean isMonitor()
          +
          Returns whether this is the MONITOR type.
          +
          +
          Returns:
          +
          whether this is MONITOR
          +
          Since:
          +
          6.0
          +
          +
        • +
        + + + +
          +
        • +

          isElectable

          +
          public boolean isElectable()
          +
          Returns whether this is the ELECTABLE type.
          +
          +
          Returns:
          +
          whether this is ELECTABLE
          +
          Since:
          +
          6.0
          +
          +
        • +
        + + + +
          +
        • +

          isSecondary

          +
          public boolean isSecondary()
          +
          Returns whether this is the SECONDARY type.
          +
          +
          Returns:
          +
          whether this is SECONDARY
          +
          Since:
          +
          6.0
          +
          +
        • +
        + + + +
          +
        • +

          isDataNode

          +
          public boolean isDataNode()
          +
          Returns whether this type represents a data node, either ELECTABLE or SECONDARY.
          +
          +
          Returns:
          +
          whether this represents a data node
          +
          Since:
          +
          6.0
          +
          +
        • +
        + + + +
          +
        • +

          isArbiter

          +
          public boolean isArbiter()
          +
          Returns whether this is the ARBITER type.
          +
          +
          Returns:
          +
          whether this is ARBITER
          +
          Since:
          +
          6.0
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/QuorumPolicy.html b/docs/java/com/sleepycat/je/rep/QuorumPolicy.html new file mode 100644 index 0000000..393483f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/QuorumPolicy.html @@ -0,0 +1,385 @@ + + + + + +QuorumPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Enum QuorumPolicy

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<QuorumPolicy>
      +
      +
      +
      +
      public enum QuorumPolicy
      +extends java.lang.Enum<QuorumPolicy>
      +
      The quorum policy determine the number of nodes that must participate to + pick the winner of an election, and therefore the master of the group. + The default quorum policy during the lifetime of the group is + QuorumPolicy.SIMPLE_MAJORITY. The only time that the application needs to + specify a specific quorum policy is at node startup time, by passing one + to the ReplicatedEnvironment constructor. + +

      Note that NodeType.SECONDARY nodes are not counted as part of + master election quorums.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ALL +
        All participants are required to vote.
        +
        SIMPLE_MAJORITY +
        A simple majority of participants is required to vote.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intquorumSize(int groupSize) +
        Returns the minimum number of nodes to needed meet the quorum policy.
        +
        static QuorumPolicyvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static QuorumPolicy[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          ALL

          +
          public static final QuorumPolicy ALL
          +
          All participants are required to vote.
          +
        • +
        + + + +
          +
        • +

          SIMPLE_MAJORITY

          +
          public static final QuorumPolicy SIMPLE_MAJORITY
          +
          A simple majority of participants is required to vote.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static QuorumPolicy[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (QuorumPolicy c : QuorumPolicy.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static QuorumPolicy valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          quorumSize

          +
          public int quorumSize(int groupSize)
          +
          Returns the minimum number of nodes to needed meet the quorum policy.
          +
          +
          Parameters:
          +
          groupSize - the number of election participants in the replication + group
          +
          Returns:
          +
          the number of nodes that are needed for a quorum for a group + with groupSize number of election participants
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicaConsistencyException.html b/docs/java/com/sleepycat/je/rep/ReplicaConsistencyException.html new file mode 100644 index 0000000..feb5144 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicaConsistencyException.html @@ -0,0 +1,364 @@ + + + + + +ReplicaConsistencyException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicaConsistencyException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class ReplicaConsistencyException
      +extends OperationFailureException
      +
      This exception is thrown by a Replica to indicate it could not meet the + consistency requirements as defined by the + ReplicaConsistencyPolicy in effect for the transaction, within + the allowed timeout period. +

      + A Replica will typically keep current with its Master. However, network + problems, excessive load on the Master, or Replica, may prevent the Replica + from keeping up and the Replica may fall further behind than is permitted by + its consistency policy. If the Replica cannot catch up in the time defined + by its ReplicaConsistencyPolicy, it will throw this exception + from the Environment.beginTransaction method, thus preventing the transaction from + accessing data that does not meet its consistency requirements. +

      + If this exception is encountered frequently, it indicates that the + consistency policy requirements are too strict and cannot be met routinely + given the load being placed on the system and the hardware resources that + are available to service the load. The exception may also indicate that + there is a network related issue that is preventing the Replica from + communicating with the master and keeping up with the replication stream. +

      + The application can choose to retry the transaction, until the underlying + system problem has been resolved. Or it can try relaxing the consistency + constraints, or choose the NoConsistencyRequiredPolicy so that the + constraints can be satisfied more easily. + For example, in a two node + replication group, if the primary goes down, the application may want + the secondary node to continue to service read requests, and will lower the + consistency requirement on that node in order to maintain read availability.

      +
      +
      See Also:
      +
      ReplicaConsistencyPolicy, +Managing Consistency, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        ReplicaConsistencyPolicygetConsistencyPolicy() +
        Returns the Replica consistency policy that could not be satisfied.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ReplicaConsistencyException

          +
          public ReplicaConsistencyException(java.lang.String message,
          +                                   ReplicaConsistencyPolicy consistencyPolicy)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getConsistencyPolicy

          +
          public ReplicaConsistencyPolicy getConsistencyPolicy()
          +
          Returns the Replica consistency policy that could not be satisfied.
          +
          +
          Returns:
          +
          the Replica consistency policy
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicaStateException.html b/docs/java/com/sleepycat/je/rep/ReplicaStateException.html new file mode 100644 index 0000000..7ffd707 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicaStateException.html @@ -0,0 +1,264 @@ + + + + + +ReplicaStateException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicaStateException

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicaWriteException.html b/docs/java/com/sleepycat/je/rep/ReplicaWriteException.html new file mode 100644 index 0000000..54cb137 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicaWriteException.html @@ -0,0 +1,272 @@ + + + + + +ReplicaWriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicaWriteException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class ReplicaWriteException
      +extends StateChangeException
      +
      This exception indicates that an update operation or transaction commit + or abort was attempted while in the + ReplicatedEnvironment.State.REPLICA state. The transaction is marked + as being invalid. +

      + The exception is the result of either an error in the application logic or + the result of a transition of the node from Master to Replica while a + transaction was in progress. +

      + The application must abort the current transaction and redirect all + subsequent update operations to the Master.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.State.html b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.State.html new file mode 100644 index 0000000..dffa9a4 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.State.html @@ -0,0 +1,518 @@ + + + + + +ReplicatedEnvironment.State (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Enum ReplicatedEnvironment.State

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<ReplicatedEnvironment.State>
      +
      +
      +
      Enclosing class:
      +
      ReplicatedEnvironment
      +
      +
      +
      +
      public static enum ReplicatedEnvironment.State
      +extends java.lang.Enum<ReplicatedEnvironment.State>
      +
      The replication node state determines the operations that the + application can perform against its replicated environment. + The method ReplicatedEnvironment.getState() returns the current state. +

      + When the first handle to a ReplicatedEnvironment is instantiated + and the node is bought up, the node usually establishes + MASTER or REPLICA state before returning from + the constructor. However, these states are actually preceeded by the + UNKNOWN state, which may be visible if the application has + configured a suitable ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT. +

      + As the various remote nodes in the group become unavailable and + elections are held, the local node may change between + MASTER and REPLICA states, always with a + (usually brief) transition through UNKNOWN state. +

      + When the last handle to the environment is closed, the node transitions + to the DETACHED state. +

      + The state transitions visible to the application can be summarized by + the regular expression: +

      + [ MASTER | REPLICA | UNKNOWN ]+ DETACHED +
      + with the caveat that redundant "transitions" (MASTER to + MASTER, REPLICA to REPLICA, etc.) + never occur.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        DETACHED +
        The node is not associated with the group.
        +
        MASTER +
        The node is the unique master of the group and can both read and + write to its environment.
        +
        REPLICA +
        The node is a replica that is being updated by the master.
        +
        UNKNOWN +
        The node is not currently in contact with the master, but is actively + trying to establish contact with, or decide upon, a master.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          DETACHED

          +
          public static final ReplicatedEnvironment.State DETACHED
          +
          The node is not associated with the group. Its handle has been + closed. No operations can be performed on the environment when it is + in this state.
          +
        • +
        + + + +
          +
        • +

          UNKNOWN

          +
          public static final ReplicatedEnvironment.State UNKNOWN
          +
          The node is not currently in contact with the master, but is actively + trying to establish contact with, or decide upon, a master. While in + this state the node is restricted to performing just read operations + on its environment. In a functioning group, this state is + transitory.
          +
        • +
        + + + +
          +
        • +

          MASTER

          +
          public static final ReplicatedEnvironment.State MASTER
          +
          The node is the unique master of the group and can both read and + write to its environment. When the node transitions to the + state, the application running on the node must make provisions to + start processing application level write requests in addition to + read requests.
          +
        • +
        + + + +
          +
        • +

          REPLICA

          +
          public static final ReplicatedEnvironment.State REPLICA
          +
          The node is a replica that is being updated by the master. It is + restricted to reading its environment. When the node + transitions to this state, the application running on the node must + make provisions to ensure that it does not write to the + environment. It must arrange for all write requests to be routed to + the master.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static ReplicatedEnvironment.State[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (ReplicatedEnvironment.State c : ReplicatedEnvironment.State.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static ReplicatedEnvironment.State valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        + + + +
          +
        • +

          isMaster

          +
          public final boolean isMaster()
          +
          +
          Returns:
          +
          true if the node is a Master when in this state
          +
          +
        • +
        + + + +
          +
        • +

          isReplica

          +
          public final boolean isReplica()
          +
          +
          Returns:
          +
          true if the node is a Replica when in this state
          +
          +
        • +
        + + + +
          +
        • +

          isDetached

          +
          public final boolean isDetached()
          +
          +
          Returns:
          +
          true if the node is disconnected from the replication + group when in this state.
          +
          +
        • +
        + + + +
          +
        • +

          isUnknown

          +
          public final boolean isUnknown()
          +
          +
          Returns:
          +
          true if the node's state is unknown, and it is attempting + to transition to Master or Replica.
          +
          +
        • +
        + + + +
          +
        • +

          isActive

          +
          public final boolean isActive()
          +
          +
          Returns:
          +
          true if the node is currently participating in the group as + a Replica or a Master
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.html b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.html new file mode 100644 index 0000000..d74ac7a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironment.html @@ -0,0 +1,1293 @@ + + + + + +ReplicatedEnvironment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicatedEnvironment

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class ReplicatedEnvironment
      +extends Environment
      +
      A replicated database environment that is a node in a replication + group. Please read the Berkeley DB JE High + Availability Overview for an introduction to basic concepts and key + terminology. +

      + Berkeley DB JE High Availability (JE HA) is a replicated, embedded database + management system which provides fast, reliable, and scalable data + management. JE HA enables replication of an environment across a Replication + Group. A ReplicatedEnvironment is a single node in the replication group. +

      + ReplicatedEnvironment extends Environment. All database operations + are executed in the same fashion in both replicated and non replicated + applications, using Environment methods. A ReplicatedEnvironment + must be transactional. All replicated databases created in the replicated + environment must be transactional as well. However, non-replicated databases may be used as well. +

      + ReplicatedEnvironment handles are analogous to Environment + handles. A replicated environment handle is a ReplicatedEnvironment + instance; multiple ReplicatedEnvironment instances may be created for the + same physical directory. In other words, more than one ReplicatedEnvironment + handle may be open at a time for a given environment. +

      +

      + A ReplicatedEnvironment joins its replication group when it is instantiated. + When the constructor returns, the node will have established contact with + the other members of the group and will be ready to service operations. The + life + cycle overview is useful for understanding replication group creation. +

      + The membership of a replication group is dynamically defined. The group + comes into being when ReplicatedEnvironments that are configured as members + of a group are created and discover each other. ReplicatedEnvironments are + identified by a group name, a node name, and a hostname:port + value. Membership information for electable and monitor nodes is stored in + an internal, replicated database available to electable and secondary nodes. +

      + To start a node and join a group, instantiate a ReplicatedEnvironment. The + very first instantiation of a node differs slightly from all future + instantiations. A brand new, empty node does not yet have access to the + membership database, so it must discover the group with the aid of a + helper node, which is a fellow member. If this is the very first node of the + entire group, there is no available helper. Instead, the helper host address + to use is the node's own address. The example below takes the simple + approach of creating a replication group by starting up a node that will act + as the first master, though it is not necessary to follow this order. + + Configuring Replicated Environments describes group startup in greater + detail. +

      + To create the master node in a brand new group, instantiate a + ReplicatedEnvironment this way: +

      + EnvironmentConfig envConfig = new EnvironmentConfig();
      + envConfig.setAllowCreate(true);
      + envConfig.setTransactional(true);
      +
      + // Identify the node
      + ReplicationConfig repConfig = new ReplicationConfig();
      + repConfig.setGroupName("PlanetaryRepGroup");
      + repConfig.setNodeName("Mercury");
      + repConfig.setNodeHostPort("mercury.acme.com:5001");
      +
      + // This is the first node, so its helper is itself
      + repConfig.setHelperHosts("mercury.acme.com:5001");
      +
      + ReplicatedEnvironment repEnv =
      +     new ReplicatedEnvironment(envHome, repConfig, envConfig);
      + 
      +

      + To create a new node when there are other existing group members, + set a helper address which points to an existing node in the group. A simple + way to bring up a new group is to "chain" the new nodes by having the + helpers reference a previously created node. +

      + EnvironmentConfig envConfig = new EnvironmentConfig();
      + envConfig.setAllowCreate(true);
      + envConfig.setTransactional(true);
      +
      + // Identify the node
      + ReplicationConfig repConfig =
      +     new ReplicationConfig("PlanetaryRepGroup",
      +                           "Jupiter",
      +                           "jupiter.acme.com:5002");
      +
      + // Use the node at mercury.acme.com:5001 as a helper to find the rest
      + // of the group.
      + repConfig.setHelperHosts("mercury.acme.com:5001");
      +
      + ReplicatedEnvironment repEnv =
      +     new ReplicatedEnvironment(envHome, repConfig, envConfig);
      + 
      +

      + In these examples, node Mercury was configured as its own helper, and + becomes the first master. The next nodes were configured to use Mercury as + their helper, and became replicas. It is also possible to start these in + reverse order, bringing mercury up last. In that case, the earlier nodes + will block until a helper is awake and can service their requests for group + metadata. +

      + Creating a ReplicatedEnvironment for an existing environment requires + less configuration. The call + to EnvironmentConfig.setAllowCreate() is eliminated to guard + against the unintentional creation of a new environment. Also, there is no + need to set a helper host address, because the environment exists and has + access to the shared, persistent membership information. +

      + EnvironmentConfig envConfig = new EnvironmentConfig();
      + envConfig.setTransactional(true);
      + ReplicationConfig repConfig =
      +     new ReplicationConfig("PlanetaryRepGroup",
      +                           "Mercury",
      +                           "mercury.acme.com:5001");
      +
      + ReplicatedEnvironment repEnv =
      +     new ReplicatedEnvironment(envHome, repConfig, envConfig);
      + 
      +

      + See ReplicationGroupAdmin for information on how to remove nodes from the + replication group. + +

      + ReplicatedEnvironment properties can be set via the the <environmentHome>/je.properties file, just like Environment + properties. They follow the same property value precedence rules. + +

      + A replicated environment directory can only be accessed by a read write + ReplicatedEnvironment handle or a read only Environment handle. In + the current release, there is an additional restriction that a read only + Environment is only permitted when the directory is not also + accessed from a different process by a read/write ReplicatedEnvironment. If + a read/write ReplicatedEnvironment and a read only Environment from + two different processes concurrently access an environment directory, there + is the small possibility that the read only Environment may see + see exceptions thrown about an inconsistent log if the ReplicatedEnvironment + executes certain kinds of failover. There is no problem if the Environment and ReplicatedEnvironment are in the same process, or are not + concurrent. +

      + JE HA prohibits opening a replicated environment directory with a read/write + Environment handle, because from the group's perspective, + unreplicated updates to a single node would cause data inconsistency. To + use an existing, non-replicated environment to bootstrap a replication + group, use DbEnableReplication to do a one + time conversion of the directory. +

      + All other database objects, such as Database or + Cursor (when using the Base API) or EntityStore or PrimaryIndex (when using the Direct Persistence + Layer) should be created, used and closed before calling close(). + +

      Replicated environments can be created with node type NodeType.ELECTABLE or NodeType.SECONDARY. ELECTABLE nodes can be + masters or replicas, and participate in both master elections and commit + durability decisions. + +

      SECONDARY nodes can only be replicas, not masters, and do not participate + in either elections or durability decisions. SECONDARY nodes can be used to + increase the available number of read replicas without changing the election + or durability quorum of the group, and without requiring communication with + the secondaries during master elections or transaction commits. As a result, + SECONDARY nodes are a good choice for nodes that are connected to the other + nodes in the group by high latency network connections, for example over + long distance networks. SECONDARY nodes maintain replication streams with + the replication group master to update the data contents of their + environment. + +

      You can use SECONDARY nodes to: +

        +
      • Provide a copy of the data available at a distant location +
      • Maintain an extra copy of the data to increase redundancy +
      • Change the number of replicas to adjust to dynamically changing read + loads +
      + +

      Membership information for SECONDARY nodes is not stored persistently, so + their membership is only known to the master, and only while the nodes + remain connected to the master. Because a SECONDARY node cannot become a + master, it will not act as master even if it is the first node created for + the group. + +

      Non-replicated Databases in a Replicated + Environment

      + + A database or entity store in a replicated environment is replicated by + default, but may be explicitly configured as non-replicated using + DatabaseConfig.setReplicated(boolean) or + StoreConfig.setReplicated(boolean). Such + non-replicated databases may be transactional or non-transactional + (including deferred-write and temporary). The special considerations for + using non-replicated databases in a replicated environment are described + below. +

      + The data in a non-replicated database is not guaranteed to be persistent, + for two reasons. +

        +
      • + When a hard recovery occurs as part of an election, some data at the end of + the transaction log may be lost. For a replicated database this data is + automatically recovered from other members of the group, but for a + non-replicated database it is not. +
      • +
      • + When a node's contents are replaced via network restore or by otherwise + copying the transaction log from another node, all previously existing + non-replicated databases on that node are destroyed, and the non-replicated + databases from the source node are copied along with the replicated + data. The non-replicated databases copied from the source node will be in + whatever state they were in at the time of the copy. +
      • +
      +

      + Therefore, non-replicated databases are intended to be used primarily for + persistent caching and other non-critical local storage. The application + is responsible for maintaining the state of the database and handling data + loss after one the events described above. +

      + To perform write operations on a non-replicated database, special + considerations are necessary for user-supplied transactions. Namely, the + transaction must be configured for + local-write. A given transaction may be used to write to either replicated + databases or non-replicated databases, but not both. +

      + For auto-commit transactions (when the Transaction parameter is null), the + local-write setting is automatically set to correspond to whether the + database is replicated. With auto-commit, local-write is always true for a + non-replicated database, and always false for a replicated database. +

      + A local-write transaction automatically uses + Durability.ReplicaAckPolicy.NONE. + A local-write transaction on a Master will thus not be held up, or + throw InsufficientReplicasException, if the + Master is not in contact with a sufficient number of Replicas at the + time the transaction is initiated. +

      + For read operations, a single transaction may be used to read any + combination of replicated and non-replicated databases. If only read + operations are performed, it is normally desirable to configure a user + supplied transaction as + read-only. + Like a local-write transaction, a read-only transaction automatically uses + Durability.ReplicaAckPolicy.NONE. +

      + For user-supplied transactions, note that even when accessing only + non-replicated databases, group consistency checks are performed by + default. In this case it is normally desirable to disable consistency + checks by calling + TransactionConfig.setConsistencyPolicy(com.sleepycat.je.ReplicaConsistencyPolicy) with + NoConsistencyRequiredPolicy.NO_CONSISTENCY. This allows the + non-replicated databases to be accessed regardless of the state of the other + members of the group and the network connections to them. When auto-commit + is used (when the Transaction parameter is null) with a non-replicated + database, consistency checks are automatically disabled.

      +
      +
      Since:
      +
      4.0
      +
      See Also:
      +
      Environment, +Replication First Steps
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ReplicatedEnvironment

          +
          public ReplicatedEnvironment(java.io.File envHome,
          +                             ReplicationConfig repConfig,
          +                             EnvironmentConfig envConfig,
          +                             ReplicaConsistencyPolicy consistencyPolicy,
          +                             QuorumPolicy initialElectionPolicy)
          +                      throws EnvironmentNotFoundException,
          +                             EnvironmentLockedException,
          +                             InsufficientLogException,
          +                             ReplicaConsistencyException,
          +                             java.lang.IllegalArgumentException
          +
          Creates a replicated environment handle and starts participating in the + replication group as either a Master or a Replica. The node's state is + determined when it joins the group, and mastership is not preconfigured. + If the group has no current master and the node has the default node + type of NodeType.ELECTABLE, then creation of a handle will + trigger an election to determine whether this node will participate as a + Master or a Replica. +

          + If the node participates as a Master, the constructor will return after + a sufficient number of Replicas, in accordance with the + initialElectionPolicy argument, have established contact with + the Master. +

          + If the node participates as a Replica, it will become consistent in + accordance with the consistencyPolicy argument before returning + from the constructor. +

          + If an election cannot be concluded in the time period defined by ReplicationConfig.ENV_SETUP_TIMEOUT, by default it will throw an UnknownMasterException. This behavior can be overridden via the ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT to permit the creation of + the handle in the ReplicatedEnvironment.State.UNKNOWN state. A handle in UNKNOWN state + can be used to service read operations with an appropriately relaxed + consistency policy. Note that these timeouts do not apply when opening + an environment for the very first time. In the first time case, if the + node is not the only group member, or if it is a SECONDARY node, the + constructor will wait indefinitely until it can contact an existing + group member. +

          + A brand new node will always join an existing group as a Replica, unless + it is the very first electable node that is creating the group. In that + case it joins as the Master of the newly formed singleton group. A brand + new node must always specify one or more active helper nodes via the + ReplicationMutableConfig.setHelperHosts(String) method, or via the + <environment home>/je.properties file. If this is the + very first member of a nascent group, it must specify just itself as the + helper. +

          + There are special considerations to keep in mind when a replication + group is started and elections are first held to determine a master. The + default QuorumPolicy.SIMPLE_MAJORITY calls + for a simple majority vote. If the group members were previously created + and populated, the default election policy may result in the election of + a master that may not have the most up to date copy of the environment. + This could happen if the best qualified node is slow to start up; it's + possible that by the time it's ready to participate in an election, the + election has already have completed with a simple majority. +

          + To avoid this possibility, the method has a parameter + initialElectionPolicy, which can be used to specify + QuorumPolicy.ALL, which will cause the + elections to wait until all electable nodes can vote. By ensuring that + all the nodes can vote, the best possible node is chosen to be the + master at group startup. +

          + Note that it is the application's responsibility to ensure that all + electable nodes coordinate their choice of initialElectionPolicy so that + the very first elections held when a group is brought up use the same + value for this parameter. This parameter is only used for the first + election. After the first election has been held and the group is + functioning, subsequent elections do not require participation of all + the nodes. A simple majority is sufficient to elect the node with the + most up to date environment as the master. +

          +
          +
          Parameters:
          +
          envHome - The environment's home directory.
          +
          repConfig - replication configurations. If null, the default + replication configurations are used.
          +
          envConfig - environment configurations for this node. If null, the + default environment configurations are used.
          +
          consistencyPolicy - the consistencyPolicy used by the Replica at + startup to make its environment current with respect to the master. This + differs from the consistency policy specified + ReplicationConfig.setConsistencyPolicy(com.sleepycat.je.ReplicaConsistencyPolicy) because it is used only + at construction, when the node joins the group for the first time. The + consistency policy set in ReplicationConfig is used any time a + policy is used after node startup, such as at transaction begins.
          +
          initialElectionPolicy - the policy to use when holding the initial + election.
          +
          Throws:
          +
          RestartRequiredException - if some type of corrective action is + required. The subclasses of this exception provide further details.
          +
          ReplicaConsistencyException - if it is a Replica and cannot + satisfy the specified consistency policy within the consistency timeout + period
          +
          UnknownMasterException - if the + ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT has a zero value and + the node cannot join the group in the time period specified by the + ReplicationConfig.ENV_SETUP_TIMEOUT property. The node may be + unable to join the group because the Master could not be determined due + to a lack of sufficient nodes as required by the election policy, or + because a master was present but lacked a + QuorumPolicy.SIMPLE_MAJORITY needed to update the environment + with information about this node, if it's a new node and is joining the + group for the first time.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          EnvironmentLockedException - when an environment cannot be opened + for write access because another process has the same environment open + for write access. Warning: This exception should be + handled when an environment is opened by more than one process.
          +
          VersionMismatchException - when the existing log is not compatible + with the version of JE that is running. This occurs when a later version + of JE was used to create the log. Warning: This + exception should be handled when more than one version of JE may be used + to access an environment.
          +
          java.lang.UnsupportedOperationException - if the environment exists and has + not been enabled for replication.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid EnvironmentConfig parameter.
          +
          EnvironmentNotFoundException
          +
          InsufficientLogException
          +
          +
        • +
        + + + + +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          getState

          +
          public ReplicatedEnvironment.State getState()
          +                                     throws DatabaseException
          +
          Returns the current state of the node associated with this replication + environment. See ReplicatedEnvironment.State for a description of node states. +

          + If the caller's intent is to track the state of the node, + StateChangeListener may be a more convenient and efficient + approach, rather than using getState() directly.

          +
          +
          Returns:
          +
          the current replication state associated with this node
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getGroup

          +
          public ReplicationGroup getGroup()
          +                          throws DatabaseException
          +
          Returns a description of the replication group as known by this node. + The replicated group metadata is stored in a replicated database and + updates are propagated by the current master node to all replicas. If + this node is not the master, it is possible for its description of the + group to be out of date, and it will not include information about + SECONDARY nodes.
          +
          +
          Returns:
          +
          the group description
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          Close this ReplicatedEnvironment and release any resources used by the + handle. + +

          + When the last handle is closed, allocated resources are freed, and + daemon threads are stopped, even if they are performing work. The node + ceases participation in the replication group. If the node was currently + the master, the rest of the group will hold an election. If a quorum of + nodes can participate in the election, a new master will be chosen. +

          + The ReplicatedEnvironment should not be closed while any other type of + handle that refers to it is not yet closed. For example, the + ReplicatedEnvironment should not be closed while there are open Database + instances, or while transactions in the environment have not yet + committed or aborted. Specifically, this includes Database, Cursor and Transaction handles. +

          + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Overrides:
          +
          close in class Environment
          +
          Throws:
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          setStateChangeListener

          +
          public void setStateChangeListener(StateChangeListener listener)
          +                            throws DatabaseException
          +
          Sets the listener used to receive asynchronous replication node state + change events. Note that there is one listener per replication node, not + one per handle. Invoking this method replaces the previous Listener. + + Invoking this method typically results in an immediate callback to the + application via the StateChangeListener.stateChange(com.sleepycat.je.rep.StateChangeEvent) method, so + that the application is made aware of the existing state of the + node at the time StateChangeListener is first established.
          +
          +
          Parameters:
          +
          listener - the state change listener.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getStateChangeListener

          +
          public StateChangeListener getStateChangeListener()
          +                                           throws DatabaseException
          +
          Returns the listener used to receive asynchronous replication node state + change events. A StateChangeListener provides the replication + application with an asynchronous mechanism for tracking the State of the replicated environment. +

          + Note that there is one listener per replication node, not one per + ReplicatedEnvironment handle.

          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          getRepConfig

          +
          public ReplicationConfig getRepConfig()
          +                               throws DatabaseException
          +
          Return the replication configuration that has been used to create this + handle. This is derived from the original configuration argument, after + cloning a copy to keep it distinct from the user's instance, applying + je.properties settings, and validating against the underlying + node.
          +
          +
          Returns:
          +
          this handle's configuration.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getRepStats

          +
          public ReplicatedEnvironmentStats getRepStats(StatsConfig config)
          +                                       throws DatabaseException
          +
          Returns statistics associated with this environment. See ReplicatedEnvironmentStats for the kind of information available.
          +
          +
          Parameters:
          +
          config - is used to specify attributes such as whether the stats + should be cleared, whether the complete set of stats should be obtained, + etc.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          printStartupInfo

          +
          public void printStartupInfo(java.io.PrintStream out)
          +
          Print a detailed report about the costs of different phases of + environment startup. This report is by default logged to the je.info + file if startup takes longer than je.env.startupThreshold.
          +
          +
          Overrides:
          +
          printStartupInfo in class Environment
          +
          +
        • +
        + + + +
          +
        • +

          shutdownGroup

          +
          public void shutdownGroup(long replicaShutdownTimeout,
          +                          java.util.concurrent.TimeUnit unit)
          +                   throws java.lang.IllegalStateException
          +
          Closes this handle and shuts down the Replication Group by forcing all + active Replicas to exit. +

          + This method must be invoked on the node that's currently the Master + after all other outstanding handles have been closed. +

          + The Master waits for all active Replicas to catch up so that they have a + current set of logs, and then shuts them down. The Master will wait for + a maximum of replicaShutdownTimeout for a Replica to catch + up. If the Replica has not caught up in this time period it will force + the Replica to shut down before it is completely caught up. A negative + or zero replicaShutdownTimeout value will result in an + immediate shutdown without waiting for lagging Replicas to catch up. + Nodes that are currently inactive cannot be contacted by the Master, as + a consequence, their state is not impacted by the shutdown. +

          + The shutdown operation will close this handle on the Master node. The + environments on Replica nodes will be invalidated, and attempts to use + those handles will result in a GroupShutdownException being + thrown. The application is responsible for closing the remaining handles + on the Replica.

          +
          +
          Parameters:
          +
          replicaShutdownTimeout - the maximum amount of time the Master + waits for a Replica to shutdown.
          +
          unit - the time unit associated with the + replicaShutdownTimeout
          +
          Throws:
          +
          java.lang.IllegalStateException - if the method is invoked on a node that's + not currently the Master, or there are other open handles to this + Environment.
          +
          +
        • +
        + + + +
          +
        • +

          registerAppStateMonitor

          +
          public void registerAppStateMonitor(AppStateMonitor appStateMonitor)
          +                             throws java.lang.IllegalStateException
          +
          Registers an AppStateMonitor to receive the application state + which this ReplicatedEnvironment is running in. Note that there + is only one AppStateMonitor per replication node, not one + per handle. Invoking this method replaces the previous + AppStateMonitor. +

          + After registration, the application state can be returned by invoking + ReplicationGroupAdmin.getNodeState(com.sleepycat.je.rep.ReplicationNode, int).

          +
          +
          Parameters:
          +
          appStateMonitor - the user implemented AppStateMonitor
          +
          Throws:
          +
          java.lang.IllegalStateException - if this handle or the underlying + environment has already been closed.
          +
          +
        • +
        + + + +
          +
        • +

          transferMaster

          +
          public java.lang.String transferMaster(java.util.Set<java.lang.String> replicas,
          +                                       int timeout,
          +                                       java.util.concurrent.TimeUnit timeUnit)
          +
          Transfers the current master state from this node to one of the + electable replicas supplied in the argument list. The replica that is + actually chosen to be the new master is the one with which the Master + Transfer can be completed most rapidly. The transfer operation ensures + that all changes at this node are available at the new master upon + conclusion of the operation. +

          + The following sequence of steps is used to accomplish the transfer: +

            +
          1. The master first waits for at least one replica, from + amongst the supplied Set of candidate replicas, to + become reasonably current. It may have to wait for at least + one of the replicas to establish a feeder, if none of them are + currently connected to the master. "Reasonably current" means + that the replica is close enough to the end of the transaction + stream that it has managed to acknowledge a transaction within + the time that the commit thread is still awaiting + acknowledgments. If the candidate replicas are working + through a long backlog after having been disconnected, this can + take some time, so the timeout value should be chosen to allow + for this possibility. + +
          2. The master blocks new transactions from being committed or + aborted. + +
          3. The master now waits for one of the candidate replicas to + become fully current (completely caught up with the end of the + log on the master). The first replica that becomes current is + the one that is chosen to become the new master. This second + wait period is expected to be brief, since it only has to wait + until transactions that were committed in the interval between + step 1) and step 2) have been acknowledged by a replica. + +
          4. The master sends messages to all other nodes announcing the chosen + replica as the new master. This node will eventually become a replica, + and any subsequent attempt commit or abort existing transactions, or to + do write operations will result in a ReplicaWriteException. + +
          5. The current master releases the transactions that were blocked in + step 2) allowing them to proceed. The released transactions will fail + with ReplicaWriteException since the environment has become a + replica. +
          +
          +
          Parameters:
          +
          replicas - the set of replicas to be considered when choosing the + new master. The method returns immediately if this node is a member of + the set.
          +
          timeout - the amount of time to allow for the transfer to be + accomplished. A MasterTransferFailureException is thrown if the + transfer is not accomplished within this timeout period.
          +
          timeUnit - the time unit associated with the timeout
          +
          Returns:
          +
          the name of the replica that was chosen to be the new master + from amongst the set of supplied replicas
          +
          Throws:
          +
          MasterTransferFailureException - if the master transfer operation + fails
          +
          java.lang.IllegalArgumentException - if any of the named replicas is not a + member of the replication group or is not of type + NodeType.ELECTABLE
          +
          java.lang.IllegalStateException - if this node is not currently the master, + or this handle or the underlying environment has already been closed.
          +
          +
        • +
        + + + +
          +
        • +

          transferMaster

          +
          public java.lang.String transferMaster(java.util.Set<java.lang.String> replicas,
          +                                       int timeout,
          +                                       java.util.concurrent.TimeUnit timeUnit,
          +                                       boolean force)
          +
          Transfers the current master state from this node to one of the replicas + supplied in the argument list.
          +
          +
          Parameters:
          +
          force - true if this request should supersede and cancel any + currently pending Master Transfer operation
          +
          See Also:
          +
          transferMaster(Set, int, TimeUnit)
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicatedEnvironmentStats.html b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironmentStats.html new file mode 100644 index 0000000..64dd086 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicatedEnvironmentStats.html @@ -0,0 +1,1329 @@ + + + + + +ReplicatedEnvironmentStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicatedEnvironmentStats

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class ReplicatedEnvironmentStats
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Statistics for a replicated environment. +

      + The statistics are logically grouped into four categories. Viewing the + statistics through toString() displays + the values in these categories, as does viewing the stats through the RepJEMonitor + mbean. Viewing the stats with toStringVerbose() will provide more detailed + descriptions of the stats and stat categories. +

      + The current categories are: +

        +
      • FeederManager: A feed is the replication + stream between a master and replica. The current number of feeders + gives a sense of the connectivity of the replication group. +
      • +
      • BinaryProtocol: These statistics center on the network traffic + engendered by the replication stream, and provide a sense of the network + bandwidth seen by the replication group. +
      • +
      • Replay: The act of receiving and applying the replication stream + at the Replica node is called Replay. These stats give a sense of how much + load the replica node is experiencing when processing the traffic from the + replication group. +
      • +
      • ConsistencyTracker: The tracker is invoked when consistency + policies are used at a replica node. This provides a measure of delays + experienced by read requests at a replica, in order to conform with the + consistency specified by the application. +
      • +
      +
      +
      See Also:
      +
      Viewing + Statistics with JConsole, +Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNFeedersCreated

          +
          public int getNFeedersCreated()
          +
          The number of Feeder threads since this node was started. A Master + supplies the Replication Stream to a Replica via a Feeder thread. The + Feeder thread is created when a Replica connects to the node and is + shutdown when the connection is terminated.
          +
        • +
        + + + +
          +
        • +

          getNFeedersShutdown

          +
          public int getNFeedersShutdown()
          +
          The number of Feeder threads that were shut down, either because this + node, or the Replica terminated the connection.
          +
          +
          See Also:
          +
          getNFeedersCreated()
          +
          +
        • +
        + + + +
          +
        • +

          getNMaxReplicaLag

          +
          public long getNMaxReplicaLag()
          +
          The lag (in VLSNs) associated with the replica that's farthest behind in + replaying the replication stream.
          +
        • +
        + + + +
          +
        • +

          getNMaxReplicaLagName

          +
          public java.lang.String getNMaxReplicaLagName()
          +
          The name of the replica that's farthest behind in replaying the + replication stream.
          +
        • +
        + + + +
          +
        • +

          getReplicaDelayMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getReplicaDelayMap()
          +
          Returns a map from replica node name to the delay, in milliseconds, + between when a transaction was committed on the master and when the + master learned that the transaction was processed on the replica, if + known. Returns an empty map if this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getReplicaLastCommitTimestampMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getReplicaLastCommitTimestampMap()
          +
          Returns a map from replica node name to the commit timestamp of the last + committed transaction that was processed on the replica, if known. + Returns an empty map if this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getReplicaLastCommitVLSNMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getReplicaLastCommitVLSNMap()
          +
          Returns a map from replica node name to the VLSN of the last committed + transaction that was processed on the replica, if known. Returns an + empty map if this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getReplicaVLSNLagMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getReplicaVLSNLagMap()
          +
          Returns a map from replica node name to the lag, in VLSNs, between the + replication state of the replica and the master, if known. Returns an + empty map if this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getReplicaVLSNRateMap

          +
          public java.util.SortedMap<java.lang.String,java.lang.Long> getReplicaVLSNRateMap()
          +
          Returns a map from replica node name to a moving average of the rate, in + VLSNs per minute, that the replica is processing replication data, if + known. Returns an empty map if this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getNTxnsAcked

          +
          public long getNTxnsAcked()
          +
          The number of transactions that were successfully acknowledged based + upon the Durability.ReplicaAckPolicy policy associated with the + transaction commit.
          +
        • +
        + + + + + + + +
          +
        • +

          getTotalTxnMs

          +
          public long getTotalTxnMs()
          +
          The total time in milliseconds spent in replicated transactions. This + represents the time from the start of the transaction until its + successful commit and acknowledgment. It includes the time spent + waiting for transaction commit acknowledgments, as determined by + getAckWaitMs().
          +
        • +
        + + + +
          +
        • +

          getAckWaitMs

          +
          public long getAckWaitMs()
          +
          The total time in milliseconds that the master spent waiting for the + Durability.ReplicaAckPolicy to be satisfied during successful transaction + commits.
          +
          +
          See Also:
          +
          getTotalTxnMs()
          +
          +
        • +
        + + + +
          +
        • +

          getLastCommitVLSN

          +
          public long getLastCommitVLSN()
          +
          The VLSN of the last committed transaction on the master, or 0 if not + known or this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getLastCommitTimestamp

          +
          public long getLastCommitTimestamp()
          +
          The commit timestamp of the last committed transaction on the master, or + 0 if not known or this node is not the master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getVLSNRate

          +
          public long getVLSNRate()
          +
          A moving average of the rate replication data is being generated by the + master, in VLSNs per minute, or 0 if not known or this node is not the + master.
          +
          +
          Since:
          +
          6.3.0
          +
          +
        • +
        + + + +
          +
        • +

          getNReplayCommits

          +
          public long getNReplayCommits()
          +
          The number of commit log records that were replayed by this node when + it was a Replica. There is one commit record record for each actual + commit on the Master.
          +
        • +
        + + + +
          +
        • +

          getNReplayCommitAcks

          +
          public long getNReplayCommitAcks()
          +
          The number of commit log records that needed to be acknowledged to the + Master by this node when it was a Replica. The rate of change of this + statistic, will show a strong correlation with that of + NReplayCommits statistic, if the Durability + policy used by transactions on the master calls for transaction commit + acknowledgments and the Replica is current with respect to the Master.
          +
        • +
        + + + +
          +
        • +

          getNReplayCommitSyncs

          +
          public long getNReplayCommitSyncs()
          +
          The number of commitSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
          +
        • +
        + + + +
          +
        • +

          getNReplayCommitNoSyncs

          +
          public long getNReplayCommitNoSyncs()
          +
          The number of commitNoSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
          +
        • +
        + + + +
          +
        • +

          getNReplayCommitWriteNoSyncs

          +
          public long getNReplayCommitWriteNoSyncs()
          +
          The number of commitNoSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
          +
        • +
        + + + +
          +
        • +

          getNReplayAborts

          +
          public long getNReplayAborts()
          +
          The number of abort records which were replayed while the node was in + the Replica state.
          +
        • +
        + + + +
          +
        • +

          getNReplayNameLNs

          +
          public long getNReplayNameLNs()
          +
          The number of NameLN records which were replayed while the node was in + the Replica state.
          +
        • +
        + + + +
          +
        • +

          getNReplayLNs

          +
          public long getNReplayLNs()
          +
          The number of data records (creation, update, deletion) which were + replayed while the node was in the Replica state.
          +
        • +
        + + + +
          +
        • +

          getReplayElapsedTxnTime

          +
          public long getReplayElapsedTxnTime()
          +
          The total elapsed time in milliseconds spent replaying committed and + aborted transactions.
          +
        • +
        + + + +
          +
        • +

          getNReplayGroupCommitTimeouts

          +
          public long getNReplayGroupCommitTimeouts()
          +
          The number of group commits that were initiated due to the + group timeout + interval being exceeded.
          +
          +
          Since:
          +
          5.0.76
          +
          +
        • +
        + + + +
          +
        • +

          getNReplayGroupCommitMaxExceeded

          +
          public long getNReplayGroupCommitMaxExceeded()
          +
          The number of group commits that were initiated due the + max group size being + exceeded.
          +
          +
          Since:
          +
          5.0.76
          +
          +
        • +
        + + + +
          +
        • +

          getNReplayGroupCommitTxns

          +
          public long getNReplayGroupCommitTxns()
          +
          The number of replay transaction commits that were part of a group + commit operation.
          +
          +
          Since:
          +
          5.0.76
          +
          +
        • +
        + + + +
          +
        • +

          getNReplayGroupCommits

          +
          public long getNReplayGroupCommits()
          +
          The number of group commit operations.
          +
          +
          Since:
          +
          5.0.76
          +
          +
        • +
        + + + +
          +
        • +

          getReplayMinCommitProcessingNanos

          +
          public long getReplayMinCommitProcessingNanos()
          +
          The minimum time taken to replay a transaction commit operation.
          +
        • +
        + + + +
          +
        • +

          getReplayMaxCommitProcessingNanos

          +
          public long getReplayMaxCommitProcessingNanos()
          +
          The maximum time taken to replay a transaction commit operation.
          +
        • +
        + + + +
          +
        • +

          getReplayTotalCommitProcessingNanos

          +
          public long getReplayTotalCommitProcessingNanos()
          +
          The total time spent to replay all commit operations.
          +
        • +
        + + + +
          +
        • +

          getNProtocolBytesRead

          +
          public long getNProtocolBytesRead()
          +
          The number of bytes of Replication Stream read over the network. It does + not include the TCP/IP overhead. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getNProtocolMessagesRead

          +
          public long getNProtocolMessagesRead()
          +
          The number of Replication Stream messages read over the network. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getNProtocolBytesWritten

          +
          public long getNProtocolBytesWritten()
          +
          The number of Replication Stream bytes written over the network. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getNProtocolMessagesBatched

          +
          public long getNProtocolMessagesBatched()
          +
          The number of Replication Stream messages that were written as part + of a message batch instead of being written individually. + + It represents a subset of the messages returned by + getNProtocolMessagesWritten()
          +
          +
          Since:
          +
          6.2.7
          +
          See Also:
          +
          getNProtocolMessageBatches()
          +
          +
        • +
        + + + +
          +
        • +

          getNProtocolMessageBatches

          +
          public long getNProtocolMessageBatches()
          +
          The number of Replication Stream message batches written to the network.
          +
          +
          Since:
          +
          6.2.7
          +
          See Also:
          +
          getNProtocolMessagesBatched()
          +
          +
        • +
        + + + +
          +
        • +

          getNProtocolMessagesWritten

          +
          public long getNProtocolMessagesWritten()
          +
          The total number of Replication Stream messages written over the + network. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolReadNanos

          +
          public long getProtocolReadNanos()
          +
          The number of nanoseconds spent reading from the network channel. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolWriteNanos

          +
          public long getProtocolWriteNanos()
          +
          The number of nanoseconds spent writing to the network channel. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the sum total of all Feeder related + network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolMessageReadRate

          +
          public long getProtocolMessageReadRate()
          +
          Incoming replication message throughput, in terms of messages received + from the replication network channels per second. +

          If the node has served as both a Replica and Master since + it was first started, the number represents the message reading rate + over all Feeder related network activity, as well as Replica network + activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolMessageWriteRate

          +
          public long getProtocolMessageWriteRate()
          +
          Outgoing message throughput, in terms of message written to the + replication network channels per second. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the message writing rate over all Feeder + related network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolBytesReadRate

          +
          public long getProtocolBytesReadRate()
          +
          Bytes read throughput, in terms of bytes received from the replication + network channels per second. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the bytes reading rate over all Feeder + related network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getProtocolBytesWriteRate

          +
          public long getProtocolBytesWriteRate()
          +
          Bytes written throughput, in terms of bytes written to the replication + network channels per second. +

          + If the node has served as both a Replica and Master since it was first + started, the number represents the bytes writing rate over all Feeder + related network activity, as well as Replica network activity.

          +
        • +
        + + + +
          +
        • +

          getNProtocolEntriesWrittenOldVersion

          +
          public long getNProtocolEntriesWrittenOldVersion()
          +
          Returns the number of messages containing log entries that were written + to the replication stream using the previous log format to support + replication to a replica running an earlier version during an upgrade.
          +
        • +
        + + + + + + + + + + + + + + + + + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns a string representation of the statistics.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toStringVerbose

          +
          public java.lang.String toStringVerbose()
          +
        • +
        + + + +
          +
        • +

          getTips

          +
          public java.util.Map<java.lang.String,java.lang.String> getTips()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicationConfig.html b/docs/java/com/sleepycat/je/rep/ReplicationConfig.html new file mode 100644 index 0000000..5cca53f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicationConfig.html @@ -0,0 +1,2412 @@ + + + + + +ReplicationConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicationConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      com.sleepycat.je.dbi.RepConfigProxy, java.io.Serializable, java.lang.Cloneable
      +
      +
      +
      +
      public class ReplicationConfig
      +extends ReplicationMutableConfig
      +implements com.sleepycat.je.dbi.RepConfigProxy
      +
      Specifies the immutable attributes of a replicated environment. +

      + To change the default settings for a replicated environment, an application + creates a configuration object, customizes settings and uses it for ReplicatedEnvironment construction. The set methods of this class validate + the configuration values when the method is invoked. An + IllegalArgumentException is thrown if the value is not valid for that + attribute. +

      + Note that ReplicationConfig only describes those attributes which must be + set at ReplicatedEnvironment construction time, while its superclass + ReplicationMutableConfig describes attributes that may be modified + during the life of the replication group. +

      + ReplicationConfig follows precedence rules similar to those of + EnvironmentConfig. +

        +
      1. Configuration parameters specified + in <environmentHome>/je.properties take first precedence.
      2. +
      3. Configuration parameters set in the ReplicationConfig object used + at ReplicatedEnvironment construction are next.
      4. +
      5. Any configuration parameters not set by the application are set to + system defaults, described along with the parameter name String constants + in this class.
      6. +
      +

      + After a ReplicatedEnvironment has been constructed, its mutable + properties may be changed using ReplicatedEnvironment#setMutableConfig. See ReplicationMutableConfig for a list of mutable properties; all other + properties are immutable. Whether a property is mutable or immutable is + also described along with the parameter name String constants in this class. + +

      Getting the Current ReplicatedEnvironment Properties

      + + To get the current "live" properties of a replicated environment after + constructing it or changing its properties, you must call ReplicatedEnvironment.getRepConfig() or ReplicatedEnvironment.getRepMutableConfig(). The original ReplicationConfig + or ReplicationMutableConfig object used to set the properties is not kept up + to date as properties are changed, and does not reflect property validation + or properties that are computed.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          GROUP_NAME

          +
          public static final java.lang.String GROUP_NAME
          +
          The name for the replication group. + The name should consist of letters, digits, and/or hyphen ("-"), + underscore ("_"), or period ("."). + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.groupName"StringNo"DefaultGroup"

          +
          +
          See Also:
          +
          setGroupName(java.lang.String), +getGroupName(), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          NODE_NAME

          +
          public static final java.lang.String NODE_NAME
          +
          The node name uniquely identifies this node within the replication + group. + The name should consist of letters, digits, and/or hyphen ("-"), + underscore ("_"), or period ("."). + +

          Note that the node name is immutable. Normally the host name should + not be used as the node name, unless you intend to reuse the host + name when a machine fails and is replaced, or the node is upgraded to + new hardware.

          + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.nodeName"StringNo"DefaultRepNodeName"

          +
          +
          See Also:
          +
          setNodeName(java.lang.String), +getNodeName(), +Constant Field Values
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          DEFAULT_PORT

          +
          public static final java.lang.String DEFAULT_PORT
          +
          The default port used for replication. +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.defaultPort"IntegerNo50011024Short.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          NODE_HOST_PORT

          +
          public static final java.lang.String NODE_HOST_PORT
          +
          Names the hostname and port associated with this node in the + replication group, e.g. je.rep.nodeHostPort=foo.com:5001. +

          + The hostname is defaulted to "localhost" to make it easy to prototype + and to execute the examples, but the user should be very sure to set a + specific hostname before starting nodes on multiple machines. The value + of je.rep.nodeHostPort is saved persistently in replication group + metadata and is expected to be a unique address, and a value of + "localhost" in the replication metadata will cause severe communication + confusion. +

          + The port portion of the host value is optional. If it's not specified, + the value of "je.rep.defaultPort" is used. +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.nodeHostPort"StringNo"localhost"

          +
          +
          See Also:
          +
          setNodeHostPort(java.lang.String), +getNodeHostPort(), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          BIND_INADDR_ANY

          +
          public static final java.lang.String BIND_INADDR_ANY
          +
          When this configuration parameter is set to true, it binds the HA socket + to INADDR_ANY, so that HA services are available on all network + interfaces. The default value (false) results in the HA socket being + bound to the specific interface specified by the NODE_HOST_PORT + configuration. + +

          + + + + + + + + + + + + + +
          NameTypeMutableDefault
          "je.rep.bindInaddrAny"BooleanNofalse
          +

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          REP_STREAM_TIMEOUT

          +
          public static final java.lang.String REP_STREAM_TIMEOUT
          +
          Deprecated. and no longer used as of JE 7.5. Reserved files are now + retained based on available disk space -- see + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK should be used instead. + However, this param is still used when some, but not all, nodes in a + group have been upgraded to 7.5 or later.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          REPLAY_FREE_DISK_PERCENT

          +
          public static final java.lang.String REPLAY_FREE_DISK_PERCENT
          +
          Deprecated. and no longer needed as of JE 7.5. Reserved files are now + retained based on available disk space -- see + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK should be used instead. + However, this param is still used when it has been specified and + is non-zero, and FREE_DISK has not been specified. In this case, + REPLAY_FREE_DISK_PERCENT overrides the FREE_DISK default value. If + both REPLAY_FREE_DISK_PERCENT and FREE_DISK are specified, an + IllegalArgumentException is thrown.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLAY_TXN_LOCK_TIMEOUT

          +
          public static final java.lang.String REPLAY_TXN_LOCK_TIMEOUT
          +
          The maximum amount of time for a replay transaction to wait for a lock. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replayTxnLockTimeout" + Duration + No500 ms1 ms75 min

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLAY_MAX_OPEN_DB_HANDLES

          +
          @Deprecated
          +public static final java.lang.String REPLAY_MAX_OPEN_DB_HANDLES
          + +
          The maximum number of most recently used database handles that + are kept open during the replay of the replication stream. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replayMaxOpenDbHandles"IntYes101-none-

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLAY_DB_HANDLE_TIMEOUT

          +
          @Deprecated
          +public static final java.lang.String REPLAY_DB_HANDLE_TIMEOUT
          + +
          The maximum amount of time that an inactive database handle is kept open + during a replay of the replication stream. Handles that are inactive for + more than this time period are automatically closed. Note that this does + not impact any handles that may have been opened by the application. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replayOpenHandleTimeout"DurationYes30 sec1 sec-none-

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_CONSISTENCY_TIMEOUT

          +
          public static final java.lang.String ENV_CONSISTENCY_TIMEOUT
          +
          The amount of time to wait for a Replica to become consistent with the + Master, when a ReplicatedEnvironment handle is created and + no ConsistencyPolicy is specified. If the Replica does not + become consistent within this period, a + ReplicaConsistencyException is thrown by the + ReplicatedEnvironment constructor. +

          + If an explicit ConsistencyPolicy is specified via a + constructor argument, then the timeout defined by the + ConsistencyPolicy argument is used instead of this default. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.envConsistencyTimeout" + Duration + No5 min10 ms-none-
          +

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLICA_ACK_TIMEOUT

          +
          public static final java.lang.String REPLICA_ACK_TIMEOUT
          +
          The amount of time that the + Transaction.commit(com.sleepycat.je.Durability) + on the Master will wait for a sufficient number of acknowledgments from + electable Replicas. If the Master does not receive a sufficient number of + acknowledgments within this timeout period, the commit() + will throw InsufficientAcksException. In the special case of a + two node group, if this node is the designated Primary, + the Primary will be activated, and the + commit() will proceed normally instead of throwing an + exception. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replicaAckTimeout" + Duration + No5 s10 ms-none-
          +

          +
          +
          See Also:
          +
          Time Duration + Properties, +ReplicationMutableConfig.DESIGNATED_PRIMARY, +Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          MAX_MESSAGE_SIZE

          +
          public static final java.lang.String MAX_MESSAGE_SIZE
          +
          The maximum message size which will be accepted by a node (to prevent + DOS attacks). While the default shown here is 0, it dynamically + calculated when the node is created and is set to the half of the + environment cache size. The cache size is mutable, but changing the + cache size at run time (after environment initialization) will not + change the value of this parameter. If a value other than cache size / + 2 is desired, this non-mutable parameter should be specified at + initialization time. +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.maxMessageSize"LongNohalf of cache size256KBLong.MAX_VALUE

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          ELECTIONS_PRIMARY_RETRIES

          +
          public static final java.lang.String ELECTIONS_PRIMARY_RETRIES
          +
          The number of times an unsuccessful election will be retried by a + designated Primary in a two node group before it is + activated and becomes the Master. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.electionsPrimaryRetries"IntegerNo20Integer.MAX_VALUE

          +
          +
          See Also:
          +
          ReplicationMutableConfig.DESIGNATED_PRIMARY, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ELECTIONS_REBROADCAST_PERIOD

          +
          public static final java.lang.String ELECTIONS_REBROADCAST_PERIOD
          +
          The time interval between rebroadcasts of election results by the master + node to all nodes not currently connected to it. These rebroadcasts help + ensure that a replication group is fully restored after a network + partition, by permitting nodes on either side of the resolved partition + to catch up with the latest election results. +

          + A network partition, may in some circumstances, result in a node + continuing to think it is the master, even though it is on the side of + the partition containing a minority of electable nodes, and the side + with the majority has elected a new master. Rebroadcasting election + results on a periodic basis ensures that the obsolete master is brought + up to date after the network partition has been resolved. As a result of + the update, the environment at the obsolete master will transition into + a replica state. +

          + Decreasing the period will result in more frequent broadcasts and thus a + faster return to normal operations after a network partition has been + resolved. + +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.electionsRebroadcastPeriod" + DurationNo1 min1 snone
          +

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TXN_ROLLBACK_LIMIT

          +
          public static final java.lang.String TXN_ROLLBACK_LIMIT
          +
          In rare cases, a node may need to rollback committed transactions in + order to rejoin a replication group. This parameter limits the number of + durable transactions that may be rolled back. Durable transactions are + transactions that were successfully committed with a durability + requiring acknowledgments from at least a simple majority of nodes. If + the number of durable committed transactions targeted for rollback + exceeds this parameter, a RollbackProhibitedException will be + thrown. + +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.txnRollbackLimit"IntegerNo100Integer.MAX_VALUE
          +

          +
          +
          See Also:
          +
          RollbackProhibitedException, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          TXN_ROLLBACK_DISABLED

          +
          public static final java.lang.String TXN_ROLLBACK_DISABLED
          +
          In rare cases, a node may need to rollback committed transactions in + order to rejoin a replication group. If this parameter is set to true + and a rollback is necessary to rejoin the group, a RollbackProhibitedException will be thrown. + +

          Unlike setting TXN_ROLLBACK_LIMIT to zero, setting this + parameter to true disables the rollback without regard to whether the + transactions to roll back are considered durable.

          + +

          Setting TXN_ROLLBACK_DISABLED to true should not be + necessary for most applications. Its intended purpose is for the rare + application that needs manual control over rollback of all transactions, + including transactions that are not considered to be durable.

          + +

          + + + + + + + + + + + + + +
          NameTypeMutableDefault
          "je.rep.txnRollbackDisabled"BooleanNoFalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          FEEDER_TIMEOUT

          +
          public static final java.lang.String FEEDER_TIMEOUT
          +
          A heartbeat is exchanged between the feeder and replica to ensure they + are alive. This is the timeout associated with the heartbeat on the + feeder side of the connection. +

          + Reducing this value enables the master to discover failed Replicas, and + recycle feeder connections, faster. However, it increases the chances of + false timeouts, if the network is experiencing transient problems, or + the Java GC is responsible for long pauses. In the latter case, it's + generally better to tune the GC to avoid such pauses. + +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.feederTimeout" + Duration + No30 s2 s-none-
          +

          +
          +
          Since:
          +
          4.0.100
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLICA_TIMEOUT

          +
          public static final java.lang.String REPLICA_TIMEOUT
          +
          A heartbeat is exchanged between the feeder and replica to ensure they + are alive. This is the timeout associated with the heartbeat on the + replica side of the connection. +

          + Reducing the value means that a master failure will be discovered more + promptly in some circumstances and the overall time needed to failover + to a new master will be reduced. However, it increases the chances of + false timeouts, if the network is experiencing transient problems, or + the Java GC is responsible for long pauses. In the latter case, it's + generally better to tune the GC to avoid such pauses. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replicaTimeout" + Duration + No30 s2 s-none-
          +

          +
          +
          Since:
          +
          4.0.100
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLICA_RECEIVE_BUFFER_SIZE

          +
          public static final java.lang.String REPLICA_RECEIVE_BUFFER_SIZE
          +
          The size of the the TCP receive buffer associated with the socket used + by the replica to transfer the replication stream. +

          + Larger values help handle incoming network traffic even when the replica + has been paused for a garbage collection. The parameter default value of + 1 MB should be sufficient in most of the environments. Consider + increasing the value if network monitoring shows packet loss, or if your + JE environment contains large data values. Note that if the size + specified is larger than the operating system constrained maximum, it + will be limited to this maximum value. For example, on Linux you may + need to set the kernel parameter: net.core.rmem_max property using the + command: sysctl -w net.core.rmem_max=1048576 to increase the + operating system imposed limit. +

          + A parameter value of zero will result in the use of operating system + specified default socket buffer size. + +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replicaReceiveBufferSize"IntegerNo10485760-none-
          +

          +
          +
          Since:
          +
          5.0.37
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLICA_MAX_GROUP_COMMIT

          +
          public static final java.lang.String REPLICA_MAX_GROUP_COMMIT
          +
          The maximum number of transactions that can be grouped to amortize the + cost of an fsync when a transaction commits with SyncPolicy#SYNC on the + Replica. A value of zero effectively turns off the group commit + optimization. +

          + Specifying larger values can result in more transactions being grouped + together decreasing average commit times. +

          + An fsync is issued if the size of the transaction group reaches the + maximum within the time period specified by + REPLICA_GROUP_COMMIT_INTERVAL. +

          + The ReplicatedEnvironmentStats.getNReplayGroupCommitMaxExceeded() + statistic may be used to tune this parameter. Large values indicate that + commit throughput could be improved by increasing the current value. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replicaMaxGroupCommit"IntegerNo2000-none-
          +

          +
          +
          Since:
          +
          5.0.76
          +
          See Also:
          +
          REPLICA_GROUP_COMMIT_INTERVAL, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLICA_GROUP_COMMIT_INTERVAL

          +
          public static final java.lang.String REPLICA_GROUP_COMMIT_INTERVAL
          +
          The time interval during which transactions may be grouped to amortize + the cost of fsync when a transaction commits with SyncPolicy#SYNC on the + Replica. This parameter is only meaningful if the + group commit size is greater than one. +

          + The first (as ordered by transaction serialization) transaction in a + transaction group may be delayed by at most this amount. Subsequent + transactions in the group will have smaller delays since they are later + in the serialization order. +

          + The ReplicatedEnvironmentStats.getNReplayGroupCommitTimeouts() + statistic may be used to tune this parameter. Large numbers of timeouts + in conjunction with large numbers of group commits ( + ReplicatedEnvironmentStats.getNReplayGroupCommits()) indicate + that commit throughput could be improved by increasing the time + interval. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replicaGroupCommitInterval"DurationNo3 ms0-none-
          +

          +
          +
          Since:
          +
          5.0.76
          +
          See Also:
          +
          REPLICA_MAX_GROUP_COMMIT, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_SETUP_TIMEOUT

          +
          public static final java.lang.String ENV_SETUP_TIMEOUT
          +
          The maximum amount of time for the internal housekeeping, like + elections, syncup with the master, etc. to be accomplished when opening + a new handle to an environment. +

          + This timeout does not encompass the time spent making the node + consistent with the master, if it is a Replica. The timeout associated + with making a replica consistent is normally determined by the + ENV_CONSISTENCY_TIMEOUT parameter but can be overridden by the + timeout associated with the ReplicaConsistencyPolicy if a + consistencyPolicy argument was supplied to the handle + constructor. +

          + Note that the default value (10 hours) is a long time to allow for cases + where elections may take a long time when other nodes are not available. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.envSetupTimeout"DurationNo10 h-none--none-
          +

          +
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ENV_UNKNOWN_STATE_TIMEOUT

          +
          public static final java.lang.String ENV_UNKNOWN_STATE_TIMEOUT
          +
          Permits opening of a ReplicatedEnvironment handle in the + ReplicatedEnvironment.State.UNKNOWN state, if a Master cannot be + determined within this timeout period. For the timeout to be meaningful + it must be less than ENV_SETUP_TIMEOUT. This parameter is + ignored when creating a replicated environment for the first time. +

          + A ReplicatedEnvironment handle in the + ReplicatedEnvironment.State.UNKNOWN state can only be used to + initiate read operations with an appropriately relaxed, e.g. + NoConsistencyRequiredPolicy; write operations will fail with a + ReplicaWriteException. The handle will transition to a + Master or Replica state when it can contact a + sufficient number of other nodes in the replication group. +

          + If the parameter is set to zero, and an election cannot be concluded + within the timeout defined by ENV_SETUP_TIMEOUT, the + ReplicatedEnvironment constructor will throw UnknownMasterException. +

          + + + + + + + + + + + + + + + + + +
          +
          Since:
          +
          5.0.33
          +
          See Also:
          +
          Constant Field Values
          +
          + + + + + +
            +
          • +

            PROTOCOL_OLD_STRING_ENCODING

            +
            public static final java.lang.String PROTOCOL_OLD_STRING_ENCODING
            +
            When set to true, which is currently the default, the + replication network protocol will use the JVM platform default charset + (text encoding) for node names and host names. This is incorrect, in + that it requires that the JVM for all nodes in a replication group have + the same default charset. +

            + When this parameter is set to false, the UTF-8 charset is + always used in the replication protocol. In other words, the JVM + default charset has no impact on the replication protocol. +

            + An application is not impacted by this issue, and does not need + to set this parameter, if it has the following characteristics. +

              +
            • The default charset on all JVMs is UTF-8 or ASCII, or
            • +
            • all node names and host names contain only ASCII characters, and + the default charset on all JVMs is a superset of ASCII.
            • +
            +

            + In JE 5.1, the default value for this parameter will be changed to + false. In preparation for this, impacted applications should explicitly + set the parameter to false at the next available opportunity. For + applications not yet deployed, this should be done now. For deployed + applications, a hot upgrade may not be performed when changing the + parameter. Instead, a cold upgrade must be performed: all nodes must + be stopped and upgraded before bringing them up again. In other words, + for impacted applications the value of this configuration parameter must + be the same for all running nodes in a replication group. +

            + Note that the default charset issue applies only to the replication + network protocol and not to stored data of any kind. +

            +

          NameTypeMutableDefaultMinimumMaximum
          "je.rep.envUnknownStateTimeout"DurationNo0-none-ENV_SETUP_TIMEOUT
          + + + + + + + + + + + + +
          NameTypeMutableDefault
          "je.rep.protocolOldStringEncoding"BooleanNoTrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ARBITER_OUTPUT_QUEUE_SIZE

          +
          public static final java.lang.String ARBITER_OUTPUT_QUEUE_SIZE
          +
          The size of the the queue used to hold commit records that the Feeder + uses to request acknowledgment from an Arbiter. +

          + An entry is attempted to be put on the queue. If it cannot be done within + a certain amount of time, the transaction will fail due to insufficient + acks. +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.arbiterOutputQueueSize"IntegerNo40960-none-
          +

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ReplicationConfig

          +
          public ReplicationConfig()
          +
          Creates a ReplicationConfig initialized with the system default + settings. Defaults are documented with the string constants in this + class.
          +
        • +
        + + + +
          +
        • +

          ReplicationConfig

          +
          public ReplicationConfig(java.lang.String groupName,
          +                         java.lang.String nodeName,
          +                         java.lang.String hostPort)
          +
          Creates a ReplicationConfig initialized with the system default + settings and the specified group name, node name, and hostname/port + values. + +

          Note that the node name is immutable. Normally the host name should + not be used as the node name, unless you intend to reuse the host + name when a machine fails and is replaced, or the node is upgraded to + new hardware.

          +
          +
          Parameters:
          +
          groupName - the name for the replication group
          +
          nodeName - the name for this node
          +
          hostPort - the hostname and port for this node
          +
          See Also:
          +
          setGroupName(java.lang.String), +setNodeName(java.lang.String)
          +
          +
        • +
        + + + +
          +
        • +

          ReplicationConfig

          +
          public ReplicationConfig(java.util.Properties properties)
          +                  throws java.lang.IllegalArgumentException
          +
          Creates a ReplicationConfig which includes the properties specified in + the properties parameter.
          +
          +
          Parameters:
          +
          properties - Supported properties are described as the string + constants in this class.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If any properties read from the + properties parameter are invalid.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Gets the name associated with the replication group.
          +
          +
          Returns:
          +
          the name of this replication group.
          +
          +
        • +
        + + + +
          +
        • +

          setGroupName

          +
          public ReplicationConfig setGroupName(java.lang.String groupName)
          +                               throws java.lang.IllegalArgumentException
          +
          Sets the name for the replication group. +

          + The name should consist of letters, digits, and/or hyphen ("-"), + underscore ("_"), or period (".").

          +
          +
          Parameters:
          +
          groupName - the string representing the name
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If the string name is not valid
          +
          +
        • +
        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the unique name associated with this node.
          +
          +
          Returns:
          +
          the node name
          +
          +
        • +
        + + + +
          +
        • +

          setNodeName

          +
          public ReplicationConfig setNodeName(java.lang.String nodeName)
          +                              throws java.lang.IllegalArgumentException
          +
          Sets the name to be associated with this node. It must be unique within + the group. When the node is instantiated and joins the replication + group, a check is done to ensure that the name is unique, and a RestartRequiredException is thrown if it is not. +

          + The name should consist of letters, digits, and/or hyphen ("-"), + underscore ("_"), or period ("."). + +

          Note that the node name is immutable. Normally the host name should + not be used as the node name, unless you intend to reuse the host + name when a machine fails and is replaced, or the node is upgraded to + new hardware.

          +
          +
          Parameters:
          +
          nodeName - the node name for this replicated environment.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If the name is not valid
          +
          +
        • +
        + + + +
          +
        • +

          getNodeType

          +
          public NodeType getNodeType()
          +
          Returns the NodeType of this node.
          +
          +
          Returns:
          +
          the node type
          +
          +
        • +
        + + + +
          +
        • +

          setNodeType

          +
          public ReplicationConfig setNodeType(NodeType nodeType)
          +
          Sets the type of this node.
          +
          +
          Parameters:
          +
          nodeType - the node type
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getNodeHostPort

          +
          public java.lang.String getNodeHostPort()
          +
          Returns the hostname and port associated with this node. The hostname + and port combination are denoted by a string of the form: +
          +   hostname:port
          + 
          +
          +
          Returns:
          +
          the hostname and port string.
          +
          See Also:
          +
          NODE_HOST_PORT
          +
          +
        • +
        + + + +
          +
        • +

          setNodeHostPort

          +
          public ReplicationConfig setNodeHostPort(java.lang.String hostPort)
          +
          Sets the hostname and port associated with this node. The hostname + and port combination are denoted by a string of the form: +
          +  hostname[:port]
          + 
          + The port must be outside the range of "Well Known Ports" + (zero through 1023).
          +
          +
          Parameters:
          +
          hostPort - the string containing the hostname and port as above.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          NODE_HOST_PORT
          +
          +
        • +
        + + + +
          +
        • +

          getReplicaAckTimeout

          +
          public long getReplicaAckTimeout(java.util.concurrent.TimeUnit unit)
          +
          Returns the configured replica timeout value.
          +
          +
          Returns:
          +
          the timeout in milliseconds
          +
          +
        • +
        + + + +
          +
        • +

          setReplicaAckTimeout

          +
          public ReplicationConfig setReplicaAckTimeout(long replicaAckTimeout,
          +                                              java.util.concurrent.TimeUnit unit)
          +
          Set the replica commit timeout.
          +
          +
          Parameters:
          +
          replicaAckTimeout - time in milliseconds
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getMaxClockDelta

          +
          public long getMaxClockDelta(java.util.concurrent.TimeUnit unit)
          +
          Returns the maximum acceptable clock skew between this Replica and its + Feeder, which is the node that is the source of its replication stream.
          +
          +
          Returns:
          +
          the max permissible clock skew
          +
          +
        • +
        + + + +
          +
        • +

          setMaxClockDelta

          +
          public ReplicationConfig setMaxClockDelta(long maxClockDelta,
          +                                          java.util.concurrent.TimeUnit unit)
          +                                   throws java.lang.IllegalArgumentException
          +
          Sets the maximum acceptable clock skew between this Replica and its + Feeder, which is the node that is the source of its replication + stream. This value is checked whenever a Replica establishes a + connection to its replication stream source. The connection is abandoned + if the clock skew is larger than this value.
          +
          +
          Parameters:
          +
          maxClockDelta - the maximum acceptable clock skew
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the value is not a positive integer
          +
          +
        • +
        + + + +
          +
        • +

          setConsistencyPolicy

          +
          public ReplicationConfig setConsistencyPolicy(ReplicaConsistencyPolicy policy)
          +
          Sets the consistency policy to be associated with the configuration. + This policy acts as the default policy used to govern the consistency + requirements when starting new transactions. See the overview on + consistency in replicated systems for more background. +

          +
          +
          Parameters:
          +
          policy - the consistency policy to be set for this config.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getConsistencyPolicy

          +
          public ReplicaConsistencyPolicy getConsistencyPolicy()
          +
          Returns the default consistency policy associated with the + configuration. +

          + If the user does not set the default consistency policy through setConsistencyPolicy(com.sleepycat.je.ReplicaConsistencyPolicy), the system will use the policy + defined by CONSISTENCY_POLICY.

          +
          +
          Specified by:
          +
          getConsistencyPolicy in interface com.sleepycat.je.dbi.RepConfigProxy
          +
          Returns:
          +
          the consistency policy currently associated with this config.
          +
          +
        • +
        + + + +
          +
        • +

          setConfigParam

          +
          public ReplicationConfig setConfigParam(java.lang.String paramName,
          +                                        java.lang.String value)
          +                                 throws java.lang.IllegalArgumentException
          +
          Description copied from class: ReplicationMutableConfig
          +
          Set this configuration parameter with this value. Values are validated + before setting the parameter.
          +
          +
          Overrides:
          +
          setConfigParam in class ReplicationMutableConfig
          +
          Parameters:
          +
          paramName - the configuration parameter name, one of the String + constants in this class
          +
          value - the configuration value.
          +
          Returns:
          +
          this;
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName or value is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public ReplicationConfig clone()
          +
          Returns a copy of this configuration object.
          +
        • +
        + + + +
          +
        • +

          getNodeHostname

          +
          public java.lang.String getNodeHostname()
          +
          Returns the hostname component of the nodeHost property.
          +
          +
          Returns:
          +
          the hostname string
          +
          +
        • +
        + + + +
          +
        • +

          getNodePort

          +
          public int getNodePort()
          +
          Returns the port component of the nodeHost property.
          +
          +
          Returns:
          +
          the port number
          +
          +
        • +
        + + + +
          +
        • +

          setSyncupProgressListener

          +
          public ReplicationConfig setSyncupProgressListener(ProgressListener<SyncupProgress> progressListener)
          +
          Configure the environment to make periodic calls to a ProgressListener to provide feedback on replication stream sync-up. + The ProgressListener.progress() method is called at different stages of + the syncup process. See SyncupProgress for information about + those stages. +

          + When using progress listeners, review the information at ProgressListener.progress(T, long, long) to avoid any unintended disruption to + replication stream syncup.

          +
          +
          Parameters:
          +
          progressListener - The ProgressListener to callback during + environment instantiation (syncup).
          +
          Since:
          +
          5.0
          +
          See Also:
          +
          Replication Group Life Cycle
          +
          +
        • +
        + + + +
          +
        • +

          getSyncupProgressListener

          +
          public ProgressListener<SyncupProgress> getSyncupProgressListener()
          +
          Return the ProgressListener to be used at this environment startup.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicationGroup.html b/docs/java/com/sleepycat/je/rep/ReplicationGroup.html new file mode 100644 index 0000000..d689824 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicationGroup.html @@ -0,0 +1,462 @@ + + + + + +ReplicationGroup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicationGroup

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class ReplicationGroup
      +extends java.lang.Object
      +
      An administrative view of the collection of nodes that form the replication + group. Can be obtained from a ReplicatedEnvironment or a ReplicationGroupAdmin.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.util.Set<ReplicationNode>getArbiterNodes() +
        Returns the subset of nodes in the group that participates in elections + but does not have a copy of the data and cannot become a master.
        +
        java.util.Set<ReplicationNode>getDataNodes() +
        Returns the subset of nodes in the group that store replication data.
        +
        java.util.Set<ReplicationNode>getElectableNodes() +
        Returns the subset of nodes in the group with replicated environments + that participate in elections and can become masters, ignoring node + priority.
        +
        ReplicationNodegetMember(java.lang.String nodeName) +
        Get administrative information about a node by its node name.
        +
        java.util.Set<ReplicationNode>getMonitorNodes() +
        Returns the subset of nodes in the group that monitor group membership + but do not maintain replicated environments.
        +
        java.lang.StringgetName() +
        Returns the name associated with the group.
        +
        java.util.Set<ReplicationNode>getNodes() +
        Returns the set of all nodes in the group.
        +
        java.util.Set<ReplicationNode>getSecondaryNodes() +
        Returns the subset of nodes in the group with replicated environments + that do not participate in elections and cannot become masters.
        +
        java.lang.StringtoString() +
        Returns a formatted version of the information held in a + ReplicationGroup.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          Returns the name associated with the group.
          +
          +
          Returns:
          +
          the name of the replication group.
          +
          +
        • +
        + + + +
          +
        • +

          getNodes

          +
          public java.util.Set<ReplicationNode> getNodes()
          +
          Returns the set of all nodes in the group. The return value includes + ELECTABLE, MONITOR, and SECONDARY nodes. + +

          Note that SECONDARY nodes will only be included in the result when + this method is called for a replicated environment that is the master.

          +
          +
          Returns:
          +
          the set of all nodes
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getElectableNodes

          +
          public java.util.Set<ReplicationNode> getElectableNodes()
          +
          Returns the subset of nodes in the group with replicated environments + that participate in elections and can become masters, ignoring node + priority. The return value includes ELECTABLE nodes, and excludes + MONITOR and SECONDARY nodes.
          +
          +
          Returns:
          +
          the set of electable nodes
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryNodes

          +
          public java.util.Set<ReplicationNode> getSecondaryNodes()
          +
          Returns the subset of nodes in the group with replicated environments + that do not participate in elections and cannot become masters. The + return value includes SECONDARY nodes, and excludes ELECTABLE and + MONITOR nodes. + +

          Note that SECONDARY nodes will only be returned when this method is + called for a replicated environment that is the master.

          +
          +
          Returns:
          +
          the set of secondary nodes
          +
          Since:
          +
          6.0
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getMonitorNodes

          +
          public java.util.Set<ReplicationNode> getMonitorNodes()
          +
          Returns the subset of nodes in the group that monitor group membership + but do not maintain replicated environments. The return value includes + MONITOR nodes, but excludes ELECTABLE and SECONDARY nodes.
          +
          +
          Returns:
          +
          the set of monitor nodes
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getDataNodes

          +
          public java.util.Set<ReplicationNode> getDataNodes()
          +
          Returns the subset of nodes in the group that store replication data. + The return value includes all ELECTABLE and SECONDARY nodes, but + excludes MONITOR nodes. + +

          Note that SECONDARY nodes will only be included in the result when + this method is called for a replicated environment that is the master.

          +
          +
          Returns:
          +
          the set of data nodes
          +
          Since:
          +
          6.0
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getArbiterNodes

          +
          public java.util.Set<ReplicationNode> getArbiterNodes()
          +
          Returns the subset of nodes in the group that participates in elections + but does not have a copy of the data and cannot become a master. + The return value includes ARBITER nodes.
          +
          +
          Returns:
          +
          the set of arbiter nodes
          +
          See Also:
          +
          NodeType
          +
          +
        • +
        + + + +
          +
        • +

          getMember

          +
          public ReplicationNode getMember(java.lang.String nodeName)
          +
          Get administrative information about a node by its node name. + +

          Note that SECONDARY nodes will only be returned when this method is + called for a replicated environment that is the master.

          +
          +
          Parameters:
          +
          nodeName - the node name to be used in the lookup
          +
          Returns:
          +
          an administrative view of the node associated with nodeName, or + null if there isn't such a node currently in the group
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns a formatted version of the information held in a + ReplicationGroup.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicationMutableConfig.html b/docs/java/com/sleepycat/je/rep/ReplicationMutableConfig.html new file mode 100644 index 0000000..241ae9e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicationMutableConfig.html @@ -0,0 +1,1005 @@ + + + + + +ReplicationMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class ReplicationMutableConfig

    +
    +
    + +
    + +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          ALLOW_ARBITER_ACK

          +
          public static final java.lang.String ALLOW_ARBITER_ACK
          +
          Boolean flag if set to true, an Arbiter may acknowledge a transaction if + a replication node is not available. + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.allowArbiterAck"BooleanYesTrue

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          DESIGNATED_PRIMARY

          +
          public static final java.lang.String DESIGNATED_PRIMARY
          +
          Identifies the Primary node in a two node group. See the discussion of + issues when + + configuring two node groups + +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.designatedPrimary"BooleanYesFalse

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          ELECTABLE_GROUP_SIZE_OVERRIDE

          +
          public static final java.lang.String ELECTABLE_GROUP_SIZE_OVERRIDE
          +
          An escape mechanism to modify the way in which the number of electable + nodes, and consequently the quorum requirements for elections and commit + acknowledgments, is calculated. The override is accomplished by + specifying the quorum size via this mutable configuration parameter. +

          + When this parameter is set to a non-zero value at a member node, the + member will use this value as the electable group size, instead of using + the metadata stored in the RepGroup database for its quorum + calculations. This parameter's value should be set to the number of + electable nodes known to be available. The default value is zero, which + indicates normal operation with the electable group size being + calculated from the metadata. +

          + Please keep in mind that this is an escape mechanism, only for use in + exceptional circumstances, to be used with care. Since JE HA is no + longer maintaining quorum requirements automatically, there is the + possibility that the simple majority of unavailable nodes could elect + their own Master, which would result in a diverging set of changes to + the same environment being made by multiple Masters. It is essential to + ensure that the problematic nodes are in fact down before making this + temporary configuration change. + + See the discussion in Appendix: + Managing a Failure of the Majority. +

          + + + + + + + + + + + + + +
          NameTypeMutableDefault
          "je.rep.electableGroupSizeOverride"IntegerYes0
          +

          +
          +
          See Also:
          +
          QuorumPolicy, +Durability.ReplicaAckPolicy, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          NODE_PRIORITY

          +
          public static final java.lang.String NODE_PRIORITY
          +
          The election priority associated with this node. The election algorithm + for choosing a new master will pick the participating node that has the + most current set of log files. When there is a tie, the election + priority is used as a tie-breaker to select amongst these nodes. +

          + A priority of zero is used to ensure that this node is never elected + master, even if it has the most up to date log files. Note that the node + still votes for a Master and participates in quorum requirements. Please + use this option with care, since it means that some node with less + current log files could be elected master. As a result, this node would + be forced to rollback committed data and must be prepared to handle any + RollbackException exceptions that might be thrown. + +

          + + + + + + + + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.node.priority"IntegerYes10Integer.MAX_VALUE
          +

          +
          +
          See Also:
          +
          RollbackException, +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          RUN_LOG_FLUSH_TASK

          +
          public static final java.lang.String RUN_LOG_FLUSH_TASK
          +
          Deprecated. as of 7.2. Log flushing can be disabled by setting EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL and EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL to zero. For compatibility + with earlier releases, if this parameter is specified as false, no log + flushing will be performed; in this case, EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL and EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL may not also be specified.
          +
          If true, JE HA (replication) will flush all committed transactions to + disk at the specified time interval. This is of interest because the + default durability for replicated transactions of Durability.COMMIT_NO_SYNC. The default for this behavior is true. +

          + When using Durability.COMMIT_NO_SYNC, continued activity will + naturally cause the steady flush of committed transactions, but a pause + in activity may cause the latest commits to stay in memory. In such a + case, it is unlikely but possible that all members of the replication + group have these last transactions in memory and that no members have + persisted them to disk. A catastrophic failure of all nodes in the + replication group would cause a loss of these transactions, in this + unlikely scenario. This background flush task will reduce such a + possibility. +

          + Note that enabling this feature when using Durability.COMMIT_NO_SYNC, does not constitute a guarantee that + updates made by a transaction are persisted. For an explicit guarantee, + transactions should use Durability.COMMIT_SYNC or Durability.COMMIT_WRITE_NO_SYNC. These more stringent, persistent + Durability options can be set at the environment or per-transaction + scope. Using one of these Durability settings for a given transaction + will also flush all commits that occurred earlier in time. +

          + + + + + + + + + + + + + +
          NameTypeMutableDefault
          "je.rep.runLogFlushTask"BooleanNotrue
          +

          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + + + + + +
          +
        • +

          REPLAY_MAX_OPEN_DB_HANDLES

          +
          public static final java.lang.String REPLAY_MAX_OPEN_DB_HANDLES
          +
          The maximum number of most recently used database handles that + are kept open during the replay of the replication stream. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replayMaxOpenDbHandles"IntYes101-none-

          +
          +
          Since:
          +
          5.0.38
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          HELPER_HOSTS

          +
          public static final java.lang.String HELPER_HOSTS
          +
          The string identifying one or more helper host and port pairs in + this format: +
          + hostname[:port][,hostname[:port]]*
          + 
          +

          + + + + + + + +
          NameTypeMutableDefault
          "je.rep.helperHosts"StringYes""

          +
          +
          See Also:
          +
          setHelperHosts(java.lang.String), +getHelperHosts(), +Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          REPLAY_DB_HANDLE_TIMEOUT

          +
          public static final java.lang.String REPLAY_DB_HANDLE_TIMEOUT
          +
          The maximum amount of time that an inactive database handle is kept open + during a replay of the replication stream. Handles that are inactive for + more than this time period are automatically closed. Note that this does + not impact any handles that may have been opened by the application. + +

          + + + + + + + + + + +
          NameTypeMutableDefaultMinimumMaximum
          "je.rep.replayOpenHandleTimeout"DurationNo30 sec1 sec-none-

          +
          +
          Since:
          +
          5.0.38
          +
          See Also:
          +
          Time Duration + Properties, +Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ReplicationMutableConfig

          +
          public ReplicationMutableConfig()
          +
          Create a ReplicationMutableConfig initialized with the system + default settings. Parameter defaults are documented with the string + constants in this class.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setDesignatedPrimary

          +
          public ReplicationMutableConfig setDesignatedPrimary(boolean isPrimary)
          +
          If isPrimary is true, designate this node as a Primary. This + setting only takes effect for electable nodes. The application must + ensure that exactly one electable node is designated to be a Primary at + any given time. Primary node configuration is only a concern when the + group has two electable nodes, and there cannot be a simple + majority. See the overview on configuring two + node groups.
          +
          +
          Parameters:
          +
          isPrimary - true if this node is to be made the Primary
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getDesignatedPrimary

          +
          public boolean getDesignatedPrimary()
          +
          Determines whether this node is the currently designated Primary. See + the overview on issues around + two node groups
          +
          +
          Returns:
          +
          true if this node is a Primary, false otherwise.
          +
          +
        • +
        + + + +
          +
        • +

          getElectableGroupSizeOverride

          +
          public int getElectableGroupSizeOverride()
          +
          Returns the value associated with the override. A value of zero means + that the number of electable nodes is determined as usual, that is, from + the contents of the group metadata.
          +
          +
          Returns:
          +
          the number of electable nodes as specified by the override
          +
          See Also:
          +
          ELECTABLE_GROUP_SIZE_OVERRIDE
          +
          +
        • +
        + + + +
          +
        • +

          setElectableGroupSizeOverride

          +
          public ReplicationMutableConfig setElectableGroupSizeOverride(int override)
          +
          Sets the size used to determine the number of electable nodes.
          +
          +
          Parameters:
          +
          override - the number of electable nodes. A value of zero means + that the number of electable nodes is determined as usual, that is, from + the contents of the group metadata.
          +
          Returns:
          +
          this
          +
          See Also:
          +
          ELECTABLE_GROUP_SIZE_OVERRIDE
          +
          +
        • +
        + + + +
          +
        • +

          getNodePriority

          +
          public int getNodePriority()
          +
          Returns the election priority associated with the node.
          +
          +
          Returns:
          +
          the priority for this node
          +
          See Also:
          +
          NODE_PRIORITY
          +
          +
        • +
        + + + +
          +
        • +

          setNodePriority

          +
          public ReplicationMutableConfig setNodePriority(int priority)
          +
          Sets the election priority for the node. The algorithm for choosing a + new master will pick the participating node that has the most current + set of log files. When there is a tie, the priority is used as a + tie-breaker to select amongst these nodes. +

          + A priority of zero is used to ensure that a node is never elected + master, even if it has the most current set of files. Please use this + option with caution, since it means that a node with less current log + files could be elected master potentially forcing this node to rollback + data that had been committed.

          +
          +
          Parameters:
          +
          priority - the priority to be associated with the node. It must be + zero, or a positive number.
          +
          See Also:
          +
          NODE_PRIORITY
          +
          +
        • +
        + + + +
          +
        • +

          getHelperHosts

          +
          public java.lang.String getHelperHosts()
          +
          Returns the string identifying one or more helper host and port pairs in + this format: +
          + hostname[:port][,hostname[:port]]*
          + 
          + The port name may be omitted if it's the default port.
          +
          +
          Returns:
          +
          the string representing the host port pairs
          +
          +
        • +
        + + + +
          +
        • +

          setHelperHosts

          +
          public ReplicationMutableConfig setHelperHosts(java.lang.String hostsAndPorts)
          +
          Identify one or more helpers nodes by their host and port pairs in this + format: +
          + hostname[:port][,hostname[:port]]*
          + 
          + If the port is omitted, the default port defined by XXX is used.
          +
          +
          Parameters:
          +
          hostsAndPorts - the string representing the host and port pairs.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          setConfigParam

          +
          public ReplicationMutableConfig setConfigParam(java.lang.String paramName,
          +                                               java.lang.String value)
          +                                        throws java.lang.IllegalArgumentException
          +
          Set this configuration parameter with this value. Values are validated + before setting the parameter.
          +
          +
          Parameters:
          +
          paramName - the configuration parameter name, one of the String + constants in this class
          +
          value - the configuration value.
          +
          Returns:
          +
          this;
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName or value is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          getConfigParam

          +
          public java.lang.String getConfigParam(java.lang.String paramName)
          +                                throws java.lang.IllegalArgumentException
          +
          Return the value for this parameter.
          +
          +
          Parameters:
          +
          paramName - a valid configuration parameter, one of the String + constants in this class.
          +
          Returns:
          +
          the configuration value.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the paramName is invalid.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          List the configuration parameters and values that have been set + in this configuration object.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/ReplicationNode.html b/docs/java/com/sleepycat/je/rep/ReplicationNode.html new file mode 100644 index 0000000..3ebdf46 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/ReplicationNode.html @@ -0,0 +1,316 @@ + + + + + +ReplicationNode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Interface ReplicationNode

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface ReplicationNode
      +
      An administrative view of a node in a replication group.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetHostName() +
        Returns the host name associated with the node.
        +
        java.lang.StringgetName() +
        Returns the unique name associated with the node.
        +
        intgetPort() +
        Returns the port number associated with the node.
        +
        java.net.InetSocketAddressgetSocketAddress() +
        The socket address used by other nodes in the replication group to + communicate with this node.
        +
        NodeTypegetType() +
        Returns the type associated with the node.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          java.lang.String getName()
          +
          Returns the unique name associated with the node.
          +
          +
          Returns:
          +
          the name of the node
          +
          +
        • +
        + + + +
          +
        • +

          getType

          +
          NodeType getType()
          +
          Returns the type associated with the node.
          +
          +
          Returns:
          +
          the node type
          +
          +
        • +
        + + + +
          +
        • +

          getSocketAddress

          +
          java.net.InetSocketAddress getSocketAddress()
          +
          The socket address used by other nodes in the replication group to + communicate with this node.
          +
          +
          Returns:
          +
          the socket address
          +
          +
        • +
        + + + +
          +
        • +

          getHostName

          +
          java.lang.String getHostName()
          +
          Returns the host name associated with the node.
          +
          +
          Returns:
          +
          the host name of the node
          +
          +
        • +
        + + + +
          +
        • +

          getPort

          +
          int getPort()
          +
          Returns the port number associated with the node.
          +
          +
          Returns:
          +
          the port number of the node
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/RestartRequiredException.html b/docs/java/com/sleepycat/je/rep/RestartRequiredException.html new file mode 100644 index 0000000..0375c62 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/RestartRequiredException.html @@ -0,0 +1,360 @@ + + + + + +RestartRequiredException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class RestartRequiredException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      InsufficientLogException, MasterReplicaTransitionException, RollbackException, RollbackProhibitedException
      +
      +
      +
      +
      public abstract class RestartRequiredException
      +extends EnvironmentFailureException
      +
      RestartRequiredException serves as the base class for all exceptions which + makes it impossible for HA to proceed without some form of corrective action + on the part of the user, followed by a restart of the application. The + corrective action may involve an increase in resources used by the + application, a JE configurations change, discarding cached state, etc. The + error message details the nature of the problem.
      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
         RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl, + com.sleepycat.je.dbi.EnvironmentFailureReason reason) 
         RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl, + com.sleepycat.je.dbi.EnvironmentFailureReason reason, + java.lang.Exception cause) 
         RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl, + com.sleepycat.je.dbi.EnvironmentFailureReason reason, + java.lang.String msg) 
        protected RestartRequiredException(java.lang.String message, + RestartRequiredException cause) +
        For internal use only.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          RestartRequiredException

          +
          public RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl,
          +                                com.sleepycat.je.dbi.EnvironmentFailureReason reason)
          +
        • +
        + + + +
          +
        • +

          RestartRequiredException

          +
          public RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl,
          +                                com.sleepycat.je.dbi.EnvironmentFailureReason reason,
          +                                java.lang.Exception cause)
          +
        • +
        + + + +
          +
        • +

          RestartRequiredException

          +
          public RestartRequiredException(com.sleepycat.je.dbi.EnvironmentImpl envImpl,
          +                                com.sleepycat.je.dbi.EnvironmentFailureReason reason,
          +                                java.lang.String msg)
          +
        • +
        + + + +
          +
        • +

          RestartRequiredException

          +
          protected RestartRequiredException(java.lang.String message,
          +                                   RestartRequiredException cause)
          +
          For internal use only.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/RollbackException.html b/docs/java/com/sleepycat/je/rep/RollbackException.html new file mode 100644 index 0000000..3f62bb2 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/RollbackException.html @@ -0,0 +1,358 @@ + + + + + +RollbackException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class RollbackException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class RollbackException
      +extends RestartRequiredException
      +
      This asynchronous exception indicates that a new master has been selected, + this Replica's log is ahead of the current Master, + and in this case, the Replica was unable to rollback without a + recovery. As a consequence, it is possible that one or more of the most + recently committed transactions may need to be rolled back, before the + Replica can synchronize its state with that of the current + Master. Note that any CommitTokens obtained before restarting + this Replica shouldn't be used after RollbackException + is thrown because the token may no longer exist on the current + Master node, due to failover processing. +

      + Existing ReplicatedEnvironment, and consequently Database + handles, are invalidated as a result of this exception. The application must + close all old handles and create new handles before it can proceed. The + actual rollback of any recently committed transactions is done when the + application re-instantiates and thereby reopens the ReplicatedEnvironment. The application is responsible for discarding and + recreating any transient state that may be associated with the committed + transactions that were rolled back. getEarliestTransactionId() and + getEarliestTransactionCommitTime() provide information to help + determine which transactions might be rolled back. Note that it is possible + that no committed transactions have been rolled back and that the + application need do no adjustments, in which case + getEarliestTransactionCommitTime() will return null. +

      + This exception should be encountered relatively infrequently in practice, + since the election mechanism favors nodes with the most advanced log when + deciding upon a master. The exception, due to its nature, can only be + encountered when the node is in the Replica state, or the node + is trying to transition to the Replica state. +

      + Use of weak durability requirements like + Durability.ReplicaAckPolicy.NONE or a + ReplicationMutableConfig.NODE_PRIORITY of zero + increases the likelihood of this exception.

      +
      +
      See Also:
      +
      RollbackProhibitedException, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.LonggetEarliestTransactionCommitTime() +
        Return the time in milliseconds of the earliest transaction commit that + has been rolled back.
        +
        longgetEarliestTransactionId() +
        Return the id of the earliest transaction commit that has been + rolled back.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getEarliestTransactionCommitTime

          +
          public java.lang.Long getEarliestTransactionCommitTime()
          +
          Return the time in milliseconds of the earliest transaction commit that + has been rolled back. May return null if no commits have been rolled + back.
          +
        • +
        + + + +
          +
        • +

          getEarliestTransactionId

          +
          public long getEarliestTransactionId()
          +
          Return the id of the earliest transaction commit that has been + rolled back. 0 is returned if no commits have been rolled back.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/RollbackProhibitedException.html b/docs/java/com/sleepycat/je/rep/RollbackProhibitedException.html new file mode 100644 index 0000000..cd5443b --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/RollbackProhibitedException.html @@ -0,0 +1,397 @@ + + + + + +RollbackProhibitedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class RollbackProhibitedException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class RollbackProhibitedException
      +extends RestartRequiredException
      +
      This exception may be thrown by a Replica during the + replication stream sync-up phase of startup. It indicates that a syncup + cannot proceed without undoing a number of committed transactions that + exceeds the limit defined by ReplicationConfig.TXN_ROLLBACK_LIMIT. +

      + It is rare for committed transactions to be rolled back during a + sync-up. One way this can happen is if a replication group has been + executing with a Durability policy that specifies a + ReplicaAckPolicy of + NONE. +

      + When ReplicaAckPolicy.NONE is specified, transactions can commit on the + master without receiving any acknowledgments from replica nodes. Using that + policy, it is possible that if the master node crashes at a given time, and + the group fails over and continues on with a new master, the old master's + environment will have transactions on disk that were never replicated and + received by other nodes. When this old master comes back up and rejoins the + group as a replica, it will have committed transactions that need to be + rolled back. +

      + If the number of committed transactions to be rolled back is less than or + equal to the limit specified by ReplicationConfig.TXN_ROLLBACK_LIMIT, JE will automatically truncate the + environment log to remove the unreplicated transactions, and will throw a + RollbackException. The application only needs to reinstantiate the + ReplicatedEnvironment and proceed on. If the limit specified by ReplicationConfig.TXN_ROLLBACK_LIMIT is exceeded, the application will + receive a RollbackProhibitedException to indicate that manual intervention + is required. +

      + The RollbackProhibitedException lets the user interject application specific + processing before the log is truncated. The exception message and getter + methods indicate the number of transactions that must be rolled back, and + the time and id of the earliest targeted transaction, and the user can use + this information to make any desired application adjustments. The + application may then manually truncate the log using DbTruncateLog. +

      + Note that any CommitTokens obtained before restarting this + Replica shouldn't be used after + RollbackProhibitedException is thrown because the token may no + longer exist on the current Master node.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getTruncationFileNumber

          +
          public long getTruncationFileNumber()
          +
        • +
        + + + +
          +
        • +

          getTruncationFileOffset

          +
          public long getTruncationFileOffset()
          +
          The JE log must be truncated to this offset in the specified + file in order for this node to rejoin the group.
          +
        • +
        + + + +
          +
        • +

          getEarliestTransactionCommitTime

          +
          public java.lang.Long getEarliestTransactionCommitTime()
          +
          Return the time in milliseconds of the earliest transaction commit that + will be rolled back if the log is truncated to the location specified by + getTruncationFileNumber() and getTruncationFileOffset()
          +
        • +
        + + + +
          +
        • +

          getEarliestTransactionId

          +
          public long getEarliestTransactionId()
          +
          Return the id of the earliest transaction commit that will be + rolled back if the log is truncated to the location specified by + getTruncationFileNumber() and getTruncationFileOffset()
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/StateChangeEvent.html b/docs/java/com/sleepycat/je/rep/StateChangeEvent.html new file mode 100644 index 0000000..0a4aa9d --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/StateChangeEvent.html @@ -0,0 +1,313 @@ + + + + + +StateChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class StateChangeEvent

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class StateChangeEvent
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      Communicates the state change at a node + to the StateChangeListener. There is a distinct instance of this event + representing each state change at a node. +

      + Each event instance may have zero or more state change related exceptions + associated with it. The exceptions are of type StateChangeException. + StateChangeException has a method called StateChangeException.getEvent() that can be used to associate an event with + an exception.

      +
      +
      See Also:
      +
      StateChangeListener, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longgetEventTime() +
        Returns the time (in nano second units) the event occurred, as reported + by System.nanoTime()
        +
        java.lang.StringgetMasterNodeName() +
        Returns the node name identifying the master at the time of the event.
        +
        ReplicatedEnvironment.StategetState() +
        Returns the state that the node has transitioned to.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getState

          +
          public ReplicatedEnvironment.State getState()
          +
          Returns the state that the node has transitioned to.
          +
          +
          Returns:
          +
          the new State resulting from this event
          +
          +
        • +
        + + + +
          +
        • +

          getEventTime

          +
          public long getEventTime()
          +
          Returns the time (in nano second units) the event occurred, as reported + by System.nanoTime()
          +
          +
          Returns:
          +
          the time the event occurred, in nanoseconds
          +
          +
        • +
        + + + +
          +
        • +

          getMasterNodeName

          +
          public java.lang.String getMasterNodeName()
          +                                   throws java.lang.IllegalStateException
          +
          Returns the node name identifying the master at the time of the event.
          +
          +
          Returns:
          +
          the master node name
          +
          Throws:
          +
          java.lang.IllegalStateException - if the node is in the + DETACHED or UNKNOWN state.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/StateChangeException.html b/docs/java/com/sleepycat/je/rep/StateChangeException.html new file mode 100644 index 0000000..9a7c0f2 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/StateChangeException.html @@ -0,0 +1,349 @@ + + + + + +StateChangeException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class StateChangeException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      MasterStateException, ReplicaStateException, ReplicaWriteException, UnknownMasterException
      +
      +
      +
      +
      public abstract class StateChangeException
      +extends OperationFailureException
      +
      Provides a synchronous mechanism for informing an application about a change + in the state of the replication node. StateChangeException is an abstract + class, with subtypes for each type of Transition. +

      + A single state change can result in multiple state change exceptions (one + per thread operating against the environment). Each exception is associated + with the event that provoked the exception. The application can use this + association to ensure that each such event is processed just once.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected StateChangeException(java.lang.String message, + java.lang.Exception reason) +
        Used when no state change event is available
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        StateChangeEventgetEvent() +
        Returns the event that resulted in this exception.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          StateChangeException

          +
          protected StateChangeException(java.lang.String message,
          +                               java.lang.Exception reason)
          +
          Used when no state change event is available
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getEvent

          +
          public StateChangeEvent getEvent()
          +
          Returns the event that resulted in this exception.
          +
          +
          Returns:
          +
          the state change event
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/StateChangeListener.html b/docs/java/com/sleepycat/je/rep/StateChangeListener.html new file mode 100644 index 0000000..fbd0711 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/StateChangeListener.html @@ -0,0 +1,262 @@ + + + + + +StateChangeListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Interface StateChangeListener

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface StateChangeListener
      +
      An asynchronous mechanism for tracking the State of the replicated environment and + choosing how to route database operations. State determines which + operations are currently permitted on the node. For example, only the MASTER node can execute write + operations. +

      + The Listener is registered with the replicated environment using ReplicatedEnvironment.setStateChangeListener(StateChangeListener). There + is at most one Listener associated with the actual environment (not an + Environment handle) at any given instance in time. +

      + See the + examples for information on different approaches toward routing + database operations and an example of using the StateChangeListener.

      +
      +
      See Also:
      +
      Managing + Write Requests at a Replica
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          stateChange

          +
          void stateChange(StateChangeEvent stateChangeEvent)
          +          throws java.lang.RuntimeException
          +
          The notification method. It is initially invoked when the StateChangeListener is first associated with the ReplicatedEnvironment via the ReplicatedEnvironment.setStateChangeListener(StateChangeListener) + method and subsequently each time there is a state change. +

          + This method should do the minimal amount of work, queuing any resource + intensive operations for processing by another thread before returning + to the caller, so that it does not unduly delay the other housekeeping + operations performed by the internal thread which invokes this method. +

          +
          +
          Parameters:
          +
          stateChangeEvent - the new state change event
          +
          Throws:
          +
          java.lang.RuntimeException - Any uncaught exceptions will result in the + shutdown of the ReplicatedEnvironment.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/SyncupProgress.html b/docs/java/com/sleepycat/je/rep/SyncupProgress.html new file mode 100644 index 0000000..7303143 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/SyncupProgress.html @@ -0,0 +1,402 @@ + + + + + +SyncupProgress (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Enum SyncupProgress

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        CHECK_FOR_ROLLBACK +
        A matchpoint has been found, and the replica is determining whether it + has to rollback any uncommitted replicated records applied from the + previous master.
        +
        DO_ROLLBACK +
        The replica is rolling back uncommitted replicated records applied from + the previous master.
        +
        END +
        Replication stream syncup has ended.
        +
        FIND_MATCHPOINT +
        Syncup is starting up.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static SyncupProgressvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static SyncupProgress[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          FIND_MATCHPOINT

          +
          public static final SyncupProgress FIND_MATCHPOINT
          +
          Syncup is starting up. The replica and feeder are searching for the + most recent common shared point in the replication stream.
          +
        • +
        + + + +
          +
        • +

          CHECK_FOR_ROLLBACK

          +
          public static final SyncupProgress CHECK_FOR_ROLLBACK
          +
          A matchpoint has been found, and the replica is determining whether it + has to rollback any uncommitted replicated records applied from the + previous master.
          +
        • +
        + + + +
          +
        • +

          DO_ROLLBACK

          +
          public static final SyncupProgress DO_ROLLBACK
          +
          The replica is rolling back uncommitted replicated records applied from + the previous master.
          +
        • +
        + + + +
          +
        • +

          END

          +
          public static final SyncupProgress END
          +
          Replication stream syncup has ended.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static SyncupProgress[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (SyncupProgress c : SyncupProgress.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static SyncupProgress valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/TimeConsistencyPolicy.html b/docs/java/com/sleepycat/je/rep/TimeConsistencyPolicy.html new file mode 100644 index 0000000..20b862f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/TimeConsistencyPolicy.html @@ -0,0 +1,478 @@ + + + + + +TimeConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class TimeConsistencyPolicy

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static java.lang.StringNAME +
        The name:"TimeConsistencyPolicy" associated with this policy.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        TimeConsistencyPolicy(long permissibleLag, + java.util.concurrent.TimeUnit permissibleLagUnit, + long timeout, + java.util.concurrent.TimeUnit timeoutUnit) +
        Specifies the amount of time by which the Replica is allowed to lag the + master when initiating a transaction.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object obj) 
        java.lang.StringgetName() +
        Returns the name:"TimeConsistencyPolicy", associated with this policy.
        +
        longgetPermissibleLag(java.util.concurrent.TimeUnit unit) +
        Returns the allowed time lag associated with this policy.
        +
        longgetTimeout(java.util.concurrent.TimeUnit unit) +
        Returns the consistency timeout associated with this policy.
        +
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          NAME

          +
          public static final java.lang.String NAME
          +
          The name:"TimeConsistencyPolicy" associated with this policy. The name can be used when + constructing policy property values for use in je.properties files.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          TimeConsistencyPolicy

          +
          public TimeConsistencyPolicy(long permissibleLag,
          +                             java.util.concurrent.TimeUnit permissibleLagUnit,
          +                             long timeout,
          +                             java.util.concurrent.TimeUnit timeoutUnit)
          +
          Specifies the amount of time by which the Replica is allowed to lag the + master when initiating a transaction. The Replica ensures that all + transactions that were committed on the Master before this lag interval + are available at the Replica before allowing a transaction to proceed + with Environment.beginTransaction. + + Effective use of this policy requires that the clocks on the Master and + Replica are synchronized by using a protocol like NTP.
          +
          +
          Parameters:
          +
          permissibleLag - the time interval by which the Replica may be out + of date with respect to the Master when a transaction is initiated on + the Replica.
          +
          permissibleLagUnit - the TimeUnit for the permissibleLag + parameter.
          +
          timeout - the amount of time to wait for the consistency to be + reached.
          +
          timeoutUnit - the TimeUnit for the timeout parameter.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the permissibleLagUnit or + timeoutUnit is null.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          getPermissibleLag

          +
          public long getPermissibleLag(java.util.concurrent.TimeUnit unit)
          +
          Returns the allowed time lag associated with this policy.
          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value.
          +
          Returns:
          +
          the permissible lag time in the specified unit.
          +
          +
        • +
        + + + +
          +
        • +

          getTimeout

          +
          public long getTimeout(java.util.concurrent.TimeUnit unit)
          +
          Returns the consistency timeout associated with this policy.
          +
          +
          Specified by:
          +
          getTimeout in interface ReplicaConsistencyPolicy
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value.
          +
          Returns:
          +
          the consistency timeout in the specified unit.
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object obj)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/UnknownMasterException.html b/docs/java/com/sleepycat/je/rep/UnknownMasterException.html new file mode 100644 index 0000000..4fc3b1b --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/UnknownMasterException.html @@ -0,0 +1,351 @@ + + + + + +UnknownMasterException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep
    +

    Class UnknownMasterException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class UnknownMasterException
      +extends StateChangeException
      +
      Indicates that the underlying operation requires communication with a + Master, but that a Master was not available. +

      + This exception typically indicates there is a system level problem. It could + indicate for example, that a sufficient number of nodes are not available to + hold an election and elect a Master, or that this node was having problems + with the network and was unable to communicate with other nodes. +

      + The application can choose to retry the operation, potentially logging the + problem, until the underlying system level problem has been addressed.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        UnknownMasterException(com.sleepycat.je.txn.Locker locker, + StateChangeEvent stateChangeEvent) 
        UnknownMasterException(java.lang.String message) +
        Used when the inability to determine a master is not related to a + state change.
        +
        UnknownMasterException(java.lang.String message, + java.lang.Exception reason) +
        Used when the inability to determine a master is not related to a + state change but some inability to communicate with a node identified + as a master.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          UnknownMasterException

          +
          public UnknownMasterException(com.sleepycat.je.txn.Locker locker,
          +                              StateChangeEvent stateChangeEvent)
          +
        • +
        + + + +
          +
        • +

          UnknownMasterException

          +
          public UnknownMasterException(java.lang.String message)
          +
          Used when the inability to determine a master is not related to a + state change.
          +
        • +
        + + + +
          +
        • +

          UnknownMasterException

          +
          public UnknownMasterException(java.lang.String message,
          +                              java.lang.Exception reason)
          +
          Used when the inability to determine a master is not related to a + state change but some inability to communicate with a node identified + as a master. The reason contains further explanation.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/Arbiter.html b/docs/java/com/sleepycat/je/rep/arbiter/Arbiter.html new file mode 100644 index 0000000..f17ebc4 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/Arbiter.html @@ -0,0 +1,406 @@ + + + + + +Arbiter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.arbiter
    +

    Class Arbiter

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class Arbiter
      +extends java.lang.Object
      +
      Provides a mechanism to allow write availability for the Replication + group even when the number of replication nodes is less than majority. + The main use of an Arbiter is when the replication group consists of + two nodes. The addition of an Arbiter to the replication group + allows for one node to fail and provide write availability with ACK + durability of SIMPLE_MAJORITY. The Arbiter acknowledges the transaction, + but does not retain a copy of the data. The Arbiter persists a + small amount of state to insure that only the Replication nodes that + contain the Arbiter acknowledged transactions may become a Master. +

      + The Arbiter node participates in elections and may acknowledge transaction + commits. +

      + The Arbiter state is as follows: + UNKNOWN [ UNKNOWN | REPLICA]+ DETACHED

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Arbiter

          +
          public Arbiter(ArbiterConfig arbiterConfig)
          +        throws EnvironmentNotFoundException,
          +               EnvironmentLockedException,
          +               DatabaseException,
          +               java.lang.IllegalArgumentException
          +
          An Arbiter used in elections and transaction acknowledgments. + This method returns when a connection to the current master + replication node is made. The Arbiter.shutdown() method is + used to shutdown the threads that run as part of the Arbiter.
          +
          +
          Parameters:
          +
          arbiterConfig - Configuration parameters for the Arbiter.
          +
          Throws:
          +
          EnvironmentNotFoundException - if the environment does not exist
          +
          EnvironmentLockedException - when an environment cannot be opened + because another Arbiter has the environment open.
          +
          DatabaseException - problem establishing connection to the master.
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified, + for example, an invalid ArbiterConfig parameter.
          +
          +
        • +
        +
      • +
      + + +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/ArbiterConfig.html b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterConfig.html new file mode 100644 index 0000000..827c502 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterConfig.html @@ -0,0 +1,680 @@ + + + + + +ArbiterConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.arbiter
    +

    Class ArbiterConfig

    +
    +
    + +
    + +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ArbiterConfig

          +
          public ArbiterConfig()
          +
          Arbiter configuration.
          +
        • +
        + + + +
          +
        • +

          ArbiterConfig

          +
          public ArbiterConfig(java.util.Properties props)
          +
          Arbiter configuration.
          +
          +
          Parameters:
          +
          props - to initialize configuration object.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getArbiterHome

          +
          public java.lang.String getArbiterHome()
          +
          Gets the Arbiter home directory.
          +
          +
          Returns:
          +
          Path of the Arbiter home directory.
          +
          +
        • +
        + + + +
          +
        • +

          setArbiterHome

          +
          public void setArbiterHome(java.lang.String arbiterHome)
          +
          Sets the Arbiter Home directory
          +
          +
          Parameters:
          +
          arbiterHome - Path of the Arbiter home directory.
          +
          +
        • +
        + + + +
          +
        • +

          setNodeName

          +
          public ArbiterConfig setNodeName(java.lang.String nodeName)
          +                          throws java.lang.IllegalArgumentException
          +
          Sets the name to be associated with this Arbiter. It must + be unique within the group. When the Arbiter is + instantiated and joins the replication group, a check is done to ensure + that the name is unique, and a + RestartRequiredException is thrown if it is + not.
          +
          +
          Parameters:
          +
          nodeName - the name of this arbiter.
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          +
        • +
        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the unique name associated with this Arbiter.
          +
          +
          Returns:
          +
          the Arbiter name
          +
          +
        • +
        + + + +
          +
        • +

          setGroupName

          +
          public ArbiterConfig setGroupName(java.lang.String groupName)
          +                           throws java.lang.IllegalArgumentException
          +
          Sets the name for the replication group. The name must be made up of + just alpha numeric characters and must not be zero length.
          +
          +
          Parameters:
          +
          groupName - the alpha numeric string representing the name.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the string name is not valid.
          +
          +
        • +
        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Gets the name associated with the replication group.
          +
          +
          Returns:
          +
          the name of this replication group.
          +
          +
        • +
        + + + +
          +
        • +

          setNodeHostPort

          +
          public ArbiterConfig setNodeHostPort(java.lang.String hostPort)
          +
          Sets the hostname and port associated with this arbiter. The hostname + and port combination are denoted by a string of the form: +
          +  hostname[:port]
          + 
          + The port must be outside the range of "Well Known Ports" + (zero through 1023).
          +
          +
          Parameters:
          +
          hostPort - the string containing the hostname and port as above.
          +
          +
        • +
        + + + +
          +
        • +

          getNodeHostPort

          +
          public java.lang.String getNodeHostPort()
          +
          Returns the hostname and port associated with this node. The hostname + and port combination are denoted by a string of the form: +
          +  hostname:port
          + 
          +
          +
          Returns:
          +
          the hostname and port string of this Arbiter.
          +
          +
        • +
        + + + +
          +
        • +

          setUnknownStateTimeout

          +
          public ArbiterConfig setUnknownStateTimeout(long timeout,
          +                                            java.util.concurrent.TimeUnit unit)
          +                                     throws java.lang.IllegalArgumentException
          +
          Time to wait for the discovery of the Master during the instantiation + of the Arbiter. If no Master is found with in the timeout period, + the Arbiter constructor return with the Arbiter in the UNKNOWN state.
          +
          +
          Parameters:
          +
          timeout - The unknown state timeout. A value of 0 turns off + Unknown state timeouts. The creation of the Arbiter will wait until + a Master is found.
          +
          unit - the TimeUnit of the timeout value. May be null only + if timeout is zero.
          +
          Returns:
          +
          this
          +
          Throws:
          +
          java.lang.IllegalArgumentException - If the value of timeout is negative
          +
          +
        • +
        + + + +
          +
        • +

          getUnknownStateTimeout

          +
          public long getUnknownStateTimeout(java.util.concurrent.TimeUnit unit)
          +
          Returns the Unknown state timeout. + +

          A value of 0 means Unknown state timeouts are not configured.

          +
          +
          Parameters:
          +
          unit - the TimeUnit of the returned value. May not be null.
          +
          Returns:
          +
          The transaction timeout.
          +
          +
        • +
        + + + +
          +
        • +

          setHeartbeatInterval

          +
          public ArbiterConfig setHeartbeatInterval(int millis)
          +
          Sets the heartbeat interval.
          +
          +
          Parameters:
          +
          millis - Interval in milliseconds.
          +
          Returns:
          +
          this
          +
          +
        • +
        + + + +
          +
        • +

          getHeartbeatInterval

          +
          public int getHeartbeatInterval()
          +
          Gets the heartbeat interval in milliseconds.
          +
          +
          Returns:
          +
          Heartbeat interval.
          +
          +
        • +
        + + + +
          +
        • +

          setConfigParam

          +
          public ArbiterConfig setConfigParam(java.lang.String paramName,
          +                                    java.lang.String value)
          +                             throws java.lang.IllegalArgumentException
          +
          Documentation inherited from ArbiterMutableConfig.setConfigParam.
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setLoggingHandler

          +
          public ArbiterConfig setLoggingHandler(java.util.logging.Handler handler)
          +
        • +
        + + + +
          +
        • +

          getLoggingHandler

          +
          public java.util.logging.Handler getLoggingHandler()
          +
          Returns the custom java.util.logging.Handler specified by the + application.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Display configuration values.
          +
          +
          Overrides:
          +
          toString in class ArbiterMutableConfig
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.html b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.html new file mode 100644 index 0000000..fd31683 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.html @@ -0,0 +1,473 @@ + + + + + +ArbiterMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.arbiter
    +

    Class ArbiterMutableConfig

    +
    +
    + +
    + +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setHelperHosts

          +
          public ArbiterMutableConfig setHelperHosts(java.lang.String helperHosts)
          +
          Identify one or more helpers nodes by their host and port pairs in this + format: +
          + hostname[:port][,hostname[:port]]*
          + 
          +
          +
          Parameters:
          +
          helperHosts - the string representing the host and port pairs.
          +
          +
        • +
        + + + +
          +
        • +

          getHelperHosts

          +
          public java.lang.String getHelperHosts()
          +
          Returns the string identifying one or more helper host and port pairs in + this format: +
          + hostname[:port][,hostname[:port]]*
          + 
          +
          +
          Returns:
          +
          the string representing the host port pairs.
          +
          +
        • +
        + + + +
          +
        • +

          setFileLoggingLevel

          +
          public ArbiterMutableConfig setFileLoggingLevel(java.lang.String val)
          +
          Trace messages equal and above this level will be logged to the je.info + file, which is in the Arbiter home directory. Value should + be one of the predefined java.util.logging.Level values. +

          + +

          + + + + + + + +
          NameTypeMutableDefault
          com.sleepycat.je.util.FileHandler.levelStringNo"INFO"

          +
          +
          Parameters:
          +
          val - value of the logging level.
          +
          Returns:
          +
          ArbiterConfig.
          +
          See Also:
          +
          Chapter 12. Logging
          +
          +
        • +
        + + + +
          +
        • +

          getFileLoggingLevel

          +
          public java.lang.String getFileLoggingLevel()
          +
          Gets the file logging level.
          +
          +
          Returns:
          +
          logging level
          +
          +
        • +
        + + + +
          +
        • +

          setConsoleLoggingLevel

          +
          public ArbiterMutableConfig setConsoleLoggingLevel(java.lang.String val)
          +
          Trace messages equal and above this level will be logged to the + console. Value should be one of the predefined + java.util.logging.Level values. + +

          + + + + + + + +
          NameTypeMutableDefault
          com.sleepycat.je.util.ConsoleHandler.levelStringNo"OFF"

          +
          +
          Parameters:
          +
          val - Logging level.
          +
          Returns:
          +
          this.
          +
          See Also:
          +
          Chapter 12. Logging
          +
          +
        • +
        + + + +
          +
        • +

          getConsoleLoggingLevel

          +
          public java.lang.String getConsoleLoggingLevel()
          +
          Gets the console logging level.
          +
          +
          Returns:
          +
          logging level
          +
          +
        • +
        + + + + + + + +
          +
        • +

          clone

          +
          public ArbiterMutableConfig clone()
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          getProps

          +
          public java.util.Properties getProps()
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Display configuration values.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/ArbiterStats.html b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterStats.html new file mode 100644 index 0000000..b536c9e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/ArbiterStats.html @@ -0,0 +1,376 @@ + + + + + +ArbiterStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.arbiter
    +

    Class ArbiterStats

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longgetAcks() +
        The number of transactions that has been + acknowledged.
        +
        longgetDTVLSN() +
        The highest commit DTVLSN that has been + acknowledged.
        +
        longgetFSyncs() +
        The number of file fsyncs.
        +
        java.lang.StringgetMaster() +
        The current master node.
        +
        longgetReplayQueueOverflow() +
        The number of attempts to queue a response when + the queue was full.
        +
        java.lang.StringgetState() +
        The ReplicatedEnvironment.State of the node.
        +
        longgetVLSN() +
        The highest commit VLSN that has been + acknowledged.
        +
        longgetWrites() +
        The number of file writes.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getReplayQueueOverflow

          +
          public long getReplayQueueOverflow()
          +
          The number of attempts to queue a response when + the queue was full.
          +
        • +
        + + + +
          +
        • +

          getAcks

          +
          public long getAcks()
          +
          The number of transactions that has been + acknowledged.
          +
        • +
        + + + +
          +
        • +

          getMaster

          +
          public java.lang.String getMaster()
          +
          The current master node.
          +
        • +
        + + + +
          +
        • +

          getState

          +
          public java.lang.String getState()
          +
          The ReplicatedEnvironment.State of the node.
          +
        • +
        + + + +
          +
        • +

          getVLSN

          +
          public long getVLSN()
          +
          The highest commit VLSN that has been + acknowledged.
          +
        • +
        + + + +
          +
        • +

          getDTVLSN

          +
          public long getDTVLSN()
          +
          The highest commit DTVLSN that has been + acknowledged.
          +
        • +
        + + + +
          +
        • +

          getWrites

          +
          public long getWrites()
          +
          The number of file writes.
          +
        • +
        + + + +
          +
        • +

          getFSyncs

          +
          public long getFSyncs()
          +
          The number of file fsyncs.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/class-use/Arbiter.html b/docs/java/com/sleepycat/je/rep/arbiter/class-use/Arbiter.html new file mode 100644 index 0000000..9c500e7 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/class-use/Arbiter.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.arbiter.Arbiter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.arbiter.Arbiter

    +
    +
    No usage of com.sleepycat.je.rep.arbiter.Arbiter
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterConfig.html b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterConfig.html new file mode 100644 index 0000000..a4e8ab1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterConfig.html @@ -0,0 +1,228 @@ + + + + + +Uses of Class com.sleepycat.je.rep.arbiter.ArbiterConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.arbiter.ArbiterConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterMutableConfig.html b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterMutableConfig.html new file mode 100644 index 0000000..5eb1851 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterMutableConfig.html @@ -0,0 +1,233 @@ + + + + + +Uses of Class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.arbiter.ArbiterMutableConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterStats.html b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterStats.html new file mode 100644 index 0000000..2b814dc --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/class-use/ArbiterStats.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.je.rep.arbiter.ArbiterStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.arbiter.ArbiterStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/package-frame.html b/docs/java/com/sleepycat/je/rep/arbiter/package-frame.html new file mode 100644 index 0000000..0c09bc9 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/package-frame.html @@ -0,0 +1,23 @@ + + + + + +com.sleepycat.je.rep.arbiter (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.rep.arbiter

    +
    +

    Classes

    + +
    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/package-summary.html b/docs/java/com/sleepycat/je/rep/arbiter/package-summary.html new file mode 100644 index 0000000..7f7c0b0 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/package-summary.html @@ -0,0 +1,179 @@ + + + + + +com.sleepycat.je.rep.arbiter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.rep.arbiter

    +
    +
    Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je.rep.arbiter Description

    +
    Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority.
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/package-tree.html b/docs/java/com/sleepycat/je/rep/arbiter/package-tree.html new file mode 100644 index 0000000..8f25005 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/package-tree.html @@ -0,0 +1,148 @@ + + + + + +com.sleepycat.je.rep.arbiter Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.rep.arbiter

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/arbiter/package-use.html b/docs/java/com/sleepycat/je/rep/arbiter/package-use.html new file mode 100644 index 0000000..8b52bb9 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/arbiter/package-use.html @@ -0,0 +1,177 @@ + + + + + +Uses of Package com.sleepycat.je.rep.arbiter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.rep.arbiter

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/AppStateMonitor.html b/docs/java/com/sleepycat/je/rep/class-use/AppStateMonitor.html new file mode 100644 index 0000000..4f815e4 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/AppStateMonitor.html @@ -0,0 +1,176 @@ + + + + + +Uses of Interface com.sleepycat.je.rep.AppStateMonitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.rep.AppStateMonitor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/CommitPointConsistencyPolicy.html b/docs/java/com/sleepycat/je/rep/class-use/CommitPointConsistencyPolicy.html new file mode 100644 index 0000000..90265df --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/CommitPointConsistencyPolicy.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.CommitPointConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.CommitPointConsistencyPolicy

    +
    +
    No usage of com.sleepycat.je.rep.CommitPointConsistencyPolicy
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/DatabasePreemptedException.html b/docs/java/com/sleepycat/je/rep/class-use/DatabasePreemptedException.html new file mode 100644 index 0000000..efe02ed --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/DatabasePreemptedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.DatabasePreemptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.DatabasePreemptedException

    +
    +
    No usage of com.sleepycat.je.rep.DatabasePreemptedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/GroupShutdownException.html b/docs/java/com/sleepycat/je/rep/class-use/GroupShutdownException.html new file mode 100644 index 0000000..4c63e05 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/GroupShutdownException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.GroupShutdownException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.GroupShutdownException

    +
    +
    No usage of com.sleepycat.je.rep.GroupShutdownException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/InsufficientAcksException.html b/docs/java/com/sleepycat/je/rep/class-use/InsufficientAcksException.html new file mode 100644 index 0000000..5ab4d93 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/InsufficientAcksException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.InsufficientAcksException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.InsufficientAcksException

    +
    +
    No usage of com.sleepycat.je.rep.InsufficientAcksException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/InsufficientLogException.html b/docs/java/com/sleepycat/je/rep/class-use/InsufficientLogException.html new file mode 100644 index 0000000..0df603a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/InsufficientLogException.html @@ -0,0 +1,202 @@ + + + + + +Uses of Class com.sleepycat.je.rep.InsufficientLogException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.InsufficientLogException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/InsufficientReplicasException.html b/docs/java/com/sleepycat/je/rep/class-use/InsufficientReplicasException.html new file mode 100644 index 0000000..116f9e9 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/InsufficientReplicasException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.InsufficientReplicasException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.InsufficientReplicasException

    +
    +
    No usage of com.sleepycat.je.rep.InsufficientReplicasException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/LockPreemptedException.html b/docs/java/com/sleepycat/je/rep/class-use/LockPreemptedException.html new file mode 100644 index 0000000..fed2667 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/LockPreemptedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.LockPreemptedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.LockPreemptedException

    +
    +
    No usage of com.sleepycat.je.rep.LockPreemptedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/LogOverwriteException.html b/docs/java/com/sleepycat/je/rep/class-use/LogOverwriteException.html new file mode 100644 index 0000000..f8488b6 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/LogOverwriteException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.LogOverwriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.LogOverwriteException

    +
    +
    No usage of com.sleepycat.je.rep.LogOverwriteException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/MasterReplicaTransitionException.html b/docs/java/com/sleepycat/je/rep/class-use/MasterReplicaTransitionException.html new file mode 100644 index 0000000..a366565 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/MasterReplicaTransitionException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.MasterReplicaTransitionException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.MasterReplicaTransitionException

    +
    +
    No usage of com.sleepycat.je.rep.MasterReplicaTransitionException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/MasterStateException.html b/docs/java/com/sleepycat/je/rep/class-use/MasterStateException.html new file mode 100644 index 0000000..2851727 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/MasterStateException.html @@ -0,0 +1,183 @@ + + + + + +Uses of Class com.sleepycat.je.rep.MasterStateException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.MasterStateException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/MasterTransferFailureException.html b/docs/java/com/sleepycat/je/rep/class-use/MasterTransferFailureException.html new file mode 100644 index 0000000..65eb56d --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/MasterTransferFailureException.html @@ -0,0 +1,177 @@ + + + + + +Uses of Class com.sleepycat.je.rep.MasterTransferFailureException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.MasterTransferFailureException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/MemberNotFoundException.html b/docs/java/com/sleepycat/je/rep/class-use/MemberNotFoundException.html new file mode 100644 index 0000000..8c956d5 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/MemberNotFoundException.html @@ -0,0 +1,183 @@ + + + + + +Uses of Class com.sleepycat.je.rep.MemberNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.MemberNotFoundException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/NetworkRestore.html b/docs/java/com/sleepycat/je/rep/class-use/NetworkRestore.html new file mode 100644 index 0000000..4caad50 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/NetworkRestore.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.NetworkRestore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.NetworkRestore

    +
    +
    No usage of com.sleepycat.je.rep.NetworkRestore
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/NetworkRestoreConfig.html b/docs/java/com/sleepycat/je/rep/class-use/NetworkRestoreConfig.html new file mode 100644 index 0000000..c7d82be --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/NetworkRestoreConfig.html @@ -0,0 +1,207 @@ + + + + + +Uses of Class com.sleepycat.je.rep.NetworkRestoreConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.NetworkRestoreConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/NoConsistencyRequiredPolicy.html b/docs/java/com/sleepycat/je/rep/class-use/NoConsistencyRequiredPolicy.html new file mode 100644 index 0000000..d88f9ab --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/NoConsistencyRequiredPolicy.html @@ -0,0 +1,175 @@ + + + + + +Uses of Class com.sleepycat.je.rep.NoConsistencyRequiredPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.NoConsistencyRequiredPolicy

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/NodeState.html b/docs/java/com/sleepycat/je/rep/class-use/NodeState.html new file mode 100644 index 0000000..21830b8 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/NodeState.html @@ -0,0 +1,180 @@ + + + + + +Uses of Class com.sleepycat.je.rep.NodeState (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.NodeState

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/NodeType.html b/docs/java/com/sleepycat/je/rep/class-use/NodeType.html new file mode 100644 index 0000000..c231aa6 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/NodeType.html @@ -0,0 +1,209 @@ + + + + + +Uses of Class com.sleepycat.je.rep.NodeType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.NodeType

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/QuorumPolicy.html b/docs/java/com/sleepycat/je/rep/class-use/QuorumPolicy.html new file mode 100644 index 0000000..d60c374 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/QuorumPolicy.html @@ -0,0 +1,200 @@ + + + + + +Uses of Class com.sleepycat.je.rep.QuorumPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.QuorumPolicy

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicaConsistencyException.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicaConsistencyException.html new file mode 100644 index 0000000..916128e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicaConsistencyException.html @@ -0,0 +1,186 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicaConsistencyException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicaConsistencyException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicaStateException.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicaStateException.html new file mode 100644 index 0000000..38d7a40 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicaStateException.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicaStateException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicaStateException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicaWriteException.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicaWriteException.html new file mode 100644 index 0000000..8b1a016 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicaWriteException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicaWriteException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicaWriteException

    +
    +
    No usage of com.sleepycat.je.rep.ReplicaWriteException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.State.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.State.html new file mode 100644 index 0000000..1d27de0 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.State.html @@ -0,0 +1,229 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicatedEnvironment.State (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicatedEnvironment.State

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.html new file mode 100644 index 0000000..f8c78d6 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironment.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicatedEnvironment (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicatedEnvironment

    +
    +
    No usage of com.sleepycat.je.rep.ReplicatedEnvironment
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironmentStats.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironmentStats.html new file mode 100644 index 0000000..cbb6306 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicatedEnvironmentStats.html @@ -0,0 +1,175 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicatedEnvironmentStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicatedEnvironmentStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicationConfig.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicationConfig.html new file mode 100644 index 0000000..fcf68bf --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicationConfig.html @@ -0,0 +1,294 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicationConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicationConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicationGroup.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicationGroup.html new file mode 100644 index 0000000..07ae90a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicationGroup.html @@ -0,0 +1,235 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicationGroup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicationGroup

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicationMutableConfig.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicationMutableConfig.html new file mode 100644 index 0000000..5fd04d1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicationMutableConfig.html @@ -0,0 +1,233 @@ + + + + + +Uses of Class com.sleepycat.je.rep.ReplicationMutableConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.ReplicationMutableConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/ReplicationNode.html b/docs/java/com/sleepycat/je/rep/class-use/ReplicationNode.html new file mode 100644 index 0000000..9191c54 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/ReplicationNode.html @@ -0,0 +1,329 @@ + + + + + +Uses of Interface com.sleepycat.je.rep.ReplicationNode (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.rep.ReplicationNode

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/RestartRequiredException.html b/docs/java/com/sleepycat/je/rep/class-use/RestartRequiredException.html new file mode 100644 index 0000000..f3277fa --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/RestartRequiredException.html @@ -0,0 +1,217 @@ + + + + + +Uses of Class com.sleepycat.je.rep.RestartRequiredException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.RestartRequiredException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/RollbackException.html b/docs/java/com/sleepycat/je/rep/class-use/RollbackException.html new file mode 100644 index 0000000..35d958c --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/RollbackException.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.rep.RollbackException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.RollbackException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/RollbackProhibitedException.html b/docs/java/com/sleepycat/je/rep/class-use/RollbackProhibitedException.html new file mode 100644 index 0000000..17393b8 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/RollbackProhibitedException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.RollbackProhibitedException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.RollbackProhibitedException

    +
    +
    No usage of com.sleepycat.je.rep.RollbackProhibitedException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/StateChangeEvent.html b/docs/java/com/sleepycat/je/rep/class-use/StateChangeEvent.html new file mode 100644 index 0000000..1bdd638 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/StateChangeEvent.html @@ -0,0 +1,202 @@ + + + + + +Uses of Class com.sleepycat.je.rep.StateChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.StateChangeEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/StateChangeException.html b/docs/java/com/sleepycat/je/rep/class-use/StateChangeException.html new file mode 100644 index 0000000..a318924 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/StateChangeException.html @@ -0,0 +1,200 @@ + + + + + +Uses of Class com.sleepycat.je.rep.StateChangeException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.StateChangeException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/StateChangeListener.html b/docs/java/com/sleepycat/je/rep/class-use/StateChangeListener.html new file mode 100644 index 0000000..fa1a88f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/StateChangeListener.html @@ -0,0 +1,192 @@ + + + + + +Uses of Interface com.sleepycat.je.rep.StateChangeListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.rep.StateChangeListener

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/SyncupProgress.html b/docs/java/com/sleepycat/je/rep/class-use/SyncupProgress.html new file mode 100644 index 0000000..87bad2a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/SyncupProgress.html @@ -0,0 +1,212 @@ + + + + + +Uses of Class com.sleepycat.je.rep.SyncupProgress (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.SyncupProgress

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/TimeConsistencyPolicy.html b/docs/java/com/sleepycat/je/rep/class-use/TimeConsistencyPolicy.html new file mode 100644 index 0000000..1beef49 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/TimeConsistencyPolicy.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.TimeConsistencyPolicy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.TimeConsistencyPolicy

    +
    +
    No usage of com.sleepycat.je.rep.TimeConsistencyPolicy
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/class-use/UnknownMasterException.html b/docs/java/com/sleepycat/je/rep/class-use/UnknownMasterException.html new file mode 100644 index 0000000..13646cb --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/class-use/UnknownMasterException.html @@ -0,0 +1,240 @@ + + + + + +Uses of Class com.sleepycat.je.rep.UnknownMasterException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.UnknownMasterException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.GroupChangeType.html b/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.GroupChangeType.html new file mode 100644 index 0000000..3fe12eb --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.GroupChangeType.html @@ -0,0 +1,357 @@ + + + + + +GroupChangeEvent.GroupChangeType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Enum GroupChangeEvent.GroupChangeType

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ADD +
        A new node was added to the replication group.
        +
        REMOVE +
        A node was removed from the replication group.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static GroupChangeEvent.GroupChangeTypevalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static GroupChangeEvent.GroupChangeType[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static GroupChangeEvent.GroupChangeType[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (GroupChangeEvent.GroupChangeType c : GroupChangeEvent.GroupChangeType.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static GroupChangeEvent.GroupChangeType valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.html new file mode 100644 index 0000000..5574e93 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/GroupChangeEvent.html @@ -0,0 +1,322 @@ + + + + + +GroupChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class GroupChangeEvent

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class GroupChangeEvent
      +extends MonitorChangeEvent
      +
      The event generated when the group composition changes. A new instance of + this event is generated each time a node is added or removed from the + group. Note that SECONDARY nodes do not generate these events.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getRepGroup

          +
          public ReplicationGroup getRepGroup()
          +
          Returns the current description of the replication group.
          +
        • +
        + + + +
          +
        • +

          getChangeType

          +
          public GroupChangeEvent.GroupChangeType getChangeType()
          +
          Returns the type of the change (the addition of a new member or the + removal of an existing member) made to the group. The method + MonitorChangeEvent.getNodeName + can be used to identify the node that triggered the event.
          +
          +
          Returns:
          +
          the group change type.
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/JoinGroupEvent.html b/docs/java/com/sleepycat/je/rep/monitor/JoinGroupEvent.html new file mode 100644 index 0000000..2cd4124 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/JoinGroupEvent.html @@ -0,0 +1,293 @@ + + + + + +JoinGroupEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class JoinGroupEvent

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class JoinGroupEvent
      +extends MemberChangeEvent
      +
      The event generated when a node joins the group. A new instance of this + event is generated each time a node joins the group. + + The event is generated on a "best effort" basis. It may not be generated, + for example, if the joining node was unable to communicate with the monitor + due to a network problem. The application must be resilient in the face of + such missing events.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getJoinTime

          +
          public java.util.Date getJoinTime()
          +
          Returns the time at which the node joined the group.
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.LeaveReason.html b/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.LeaveReason.html new file mode 100644 index 0000000..4b863cb --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.LeaveReason.html @@ -0,0 +1,372 @@ + + + + + +LeaveGroupEvent.LeaveReason (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Enum LeaveGroupEvent.LeaveReason

    +
    +
    + +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static LeaveGroupEvent.LeaveReasonvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static LeaveGroupEvent.LeaveReason[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static LeaveGroupEvent.LeaveReason[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (LeaveGroupEvent.LeaveReason c : LeaveGroupEvent.LeaveReason.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static LeaveGroupEvent.LeaveReason valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.html b/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.html new file mode 100644 index 0000000..9305ce8 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/LeaveGroupEvent.html @@ -0,0 +1,348 @@ + + + + + +LeaveGroupEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class LeaveGroupEvent

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class LeaveGroupEvent
      +extends MemberChangeEvent
      +
      The event generated when a node leaves the group. A new instance of this + event is generated each time a node leaves the group. + + The events is generated on a "best effort" basis. It may not be generated if + the node leaving the group dies before it has a chance to generate the + event, for example, if the process was killed, or the node was unable to + communicate with the monitor due to a network problem. The application must + be resilient in the face of such missing events.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getJoinTime

          +
          public java.util.Date getJoinTime()
          +
          +
          Returns:
          +
          the time this node joins the group.
          +
          +
        • +
        + + + +
          +
        • +

          getLeaveTime

          +
          public java.util.Date getLeaveTime()
          +
          Returns the time at which the node left the group.
          +
        • +
        + + + + + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/MemberChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/MemberChangeEvent.html new file mode 100644 index 0000000..594e99f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/MemberChangeEvent.html @@ -0,0 +1,264 @@ + + + + + +MemberChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class MemberChangeEvent

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetMasterName() +
        Returns the name of the master at the time of this event.
        +
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getMasterName

          +
          public java.lang.String getMasterName()
          +
          Returns the name of the master at the time of this event. The return + value may be null if there is no current master.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/Monitor.html b/docs/java/com/sleepycat/je/rep/monitor/Monitor.html new file mode 100644 index 0000000..426112e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/Monitor.html @@ -0,0 +1,594 @@ + + + + + +Monitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class Monitor

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class Monitor
      +extends java.lang.Object
      +
      Provides a lightweight mechanism to track the current master node and the + members of the replication group. The information provided by the monitor + can be used to route update requests to the node that is currently the + master and distribute read requests across the other members of the group. +

      + The Monitor is typically run on a machine that participates in load + balancing, request routing or is simply serving as a basis for application + level monitoring and does not have a replicated environment. To avoid + creating a single point of failure, an application may need to create + multiple monitor instances, with each monitor running on a distinct machine. +

      + Applications with direct access to a ReplicatedEnvironment can use + its + synchronous and asynchronous mechanisms for determining the master node + and group composition changes. The Monitor class is not needed by such + applications. +

      + The Monitor generally learns about changes to group status through events + issued by replication group members. In addition, the Monitor maintains a + daemon thread which periodically pings members of the group so that the + Monitor can proactively discover group status changes that occur when it is + down or has lost network connectivity. +

      + The following code excerpt illustrates the typical code sequence used to + initiate a Monitor. Exception handling has been omitted to simplify the + example. + +

      + MonitorConfig monConfig = new MonitorConfig();
      + monConfig.setGroupName("PlanetaryRepGroup");
      + monConfig.setNodeName("mon1");
      + monConfig.setNodeHostPort("monhost1.acme.com:7000");
      + monConfig.setHelperHosts("mars.acme.com:5000,jupiter.acme.com:5000");
      +
      + Monitor monitor = new Monitor(monConfig);
      +
      + // If the monitor has not been registered as a member of the group,
      + // register it now. register() returns the current node that is the
      + // master.
      +
      + ReplicationNode currentMaster = monitor.register();
      +
      + // Start up the listener, so that it can be used to track changes
      + // in the master node, or group composition. It can also be used to help
      + // determine the electable nodes that are currently active and participating
      + // in the replication group.
      + monitor.startListener(new MyChangeListener());
      + 
      +
      +
      See Also:
      +
      MonitorChangeListener, +Writing Monitor + Nodes, +je.rep.quote + Examples
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        ReplicationGroupgetGroup() +
        Returns the current composition of the group.
        +
        java.lang.StringgetGroupName() +
        Returns the name of the group associated with the Monitor.
        +
        java.lang.StringgetMasterNodeName() +
        Identifies the master of the replication group, resulting from the last + successful election.
        +
        java.net.InetSocketAddressgetMonitorSocketAddress() +
        Returns the socket used by this monitor to listen for group changes
        +
        java.lang.StringgetNodeName() +
        Returns the group-wide unique name associated with the monitor
        +
        ReplicationNoderegister() +
        Registers the monitor with the group so that it can be kept informed + of the outcome of elections and group membership changes.
        +
        voidshutdown() +
        Release monitor resources and shut down the monitor.
        +
        voidstartListener(MonitorChangeListener newListener) +
        Starts the listener so it's actively listening for election results and + broadcasts of replication group changes.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Monitor

          +
          @Deprecated
          +public Monitor(ReplicationConfig monitorConfig)
          +
          Deprecated. As of JE 5, replaced by + Monitor(MonitorConfig)
          +
          Deprecated as of JE5. Creates a monitor instance using a ReplicationConfig. Monitor-specific properties that are not available + in ReplicationConfig use default settings.
          +
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        + + + +
          +
        • +

          Monitor

          +
          public Monitor(MonitorConfig monitorConfig)
          +
          Creates a monitor instance. +

          +
          +
          Parameters:
          +
          monitorConfig - configuration used by a Monitor
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Returns the name of the group associated with the Monitor.
          +
          +
          Returns:
          +
          the group name
          +
          +
        • +
        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the group-wide unique name associated with the monitor
          +
          +
          Returns:
          +
          the monitor name
          +
          +
        • +
        + + + +
          +
        • +

          getMonitorSocketAddress

          +
          public java.net.InetSocketAddress getMonitorSocketAddress()
          +
          Returns the socket used by this monitor to listen for group changes
          +
          +
          Returns:
          +
          the monitor socket address
          +
          +
        • +
        + + + +
          +
        • +

          register

          +
          public ReplicationNode register()
          +                         throws EnvironmentFailureException
          +
          Registers the monitor with the group so that it can be kept informed + of the outcome of elections and group membership changes. The + monitor, just like a replication node, is identified by its nodeName. + The Monitor uses the helper nodes to locate a master with which it can + register itself. If the helper nodes are not available the registration + will fail. +

          + A monitor must be registered at least once in order to be informed of + ongoing election results and group changes. Attempts to re-register the + same monitor are ignored. Registration, once it has been completed + successfully, persists beyond the lifetime of the Monitor instance and + does not need to be repeated. Repeated registrations are benign and + merely confirm that the current monitor configuration is consistent with + earlier registrations of this monitor.

          +
          +
          Returns:
          +
          the node that is the current master
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the monitor has been shutdown, or no + helper sockets were specified at Monitor initialization.
          +
          +
        • +
        + + + +
          +
        • +

          startListener

          +
          public void startListener(MonitorChangeListener newListener)
          +                   throws DatabaseException,
          +                          java.io.IOException
          +
          Starts the listener so it's actively listening for election results and + broadcasts of replication group changes. +

          + register() should be called before starting the listener. + If the monitor has not been registered, it will not be updated, and its + listener will not be invoked. +

          + Once the registration has been completed, the Monitor can start + listening even if none of the other nodes in the group are available. + It will be contacted automatically by the other nodes as they come up. +

          + If the group has a Master, invoking startListener results + in a synchronous callback to the application via the MonitorChangeListener.notify(NewMasterEvent) method. If there is no + Master at this time, the callback takes place asynchronously, after the + method returns, when a Master is eventually elected. +

          + Starting the listener will start the underlying ping thread, which + proactively checks group status for changes that might have been + missed when this Monitor instance has lost network connectivity or + is down.

          +
          +
          Parameters:
          +
          newListener - the listener used to monitor events of interest.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.io.IOException - if the monitor socket could not be set up
          +
          java.lang.IllegalArgumentException - if an invalid parameter is specified.
          +
          java.lang.IllegalStateException - if the monitor has been shutdown, or a + listener has already been established.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          getMasterNodeName

          +
          public java.lang.String getMasterNodeName()
          +                                   throws UnknownMasterException
          +
          Identifies the master of the replication group, resulting from the last + successful election. This method relies on the helper nodes supplied + to the monitor and queries them for the master. + + This method is useful when a Monitor first starts up and the Master + needs to be determined. Once a Monitor is registered and the Listener + has been started, it's kept up to date via events that are delivered + to the Listener.
          +
          +
          Returns:
          +
          the id associated with the master replication node.
          +
          Throws:
          +
          UnknownMasterException - if the master could not be determined + from the set of helpers made available to the Monitor.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the monitor has been shutdown.
          +
          +
        • +
        + + + +
          +
        • +

          getGroup

          +
          public ReplicationGroup getGroup()
          +                          throws UnknownMasterException,
          +                                 DatabaseException
          +
          Returns the current composition of the group. It does so by first + querying the helpers to determine the master and then obtaining the + group information from the master.
          +
          +
          Returns:
          +
          an instance of RepGroup denoting the current composition of the + group
          +
          Throws:
          +
          UnknownMasterException - if the master could not be determined + from the set of helpers made available to the Monitor.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if the monitor has been shutdown.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          shutdown

          +
          public void shutdown()
          +              throws java.lang.InterruptedException
          +
          Release monitor resources and shut down the monitor.
          +
          +
          Throws:
          +
          java.lang.InterruptedException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeEvent.html new file mode 100644 index 0000000..c9961a8 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeEvent.html @@ -0,0 +1,254 @@ + + + + + +MonitorChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class MonitorChangeEvent

    +
    +
    + +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetNodeName() +
        Returns the name of the node associated with the event.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the name of the node associated with the event.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeListener.html b/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeListener.html new file mode 100644 index 0000000..8540847 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/MonitorChangeListener.html @@ -0,0 +1,329 @@ + + + + + +MonitorChangeListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Interface MonitorChangeListener

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface MonitorChangeListener
      +
      Applications can register for Monitor event notification through + Monitor.startListener(com.sleepycat.je.rep.monitor.MonitorChangeListener). The interface defines an overloaded notify + event for each event supported by the Monitor. +

      + Changes in the composition of the replication group, or in the dynamic state + of a member, are communicated to the listener as events that are represented + as subclasses of MonitorChangeEvent. Classes + implementing this interface supply implementations for a notify + associated with each type of event, so they can respond with some + application-specific course of action. +

      + See Replication Guide, Writing Monitor Nodes

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          notify

          +
          void notify(NewMasterEvent newMasterEvent)
          +
          The method is invoked whenever there is new master associated with the + replication group. + + If the method throws an exception, JE will log the exception as a trace + message, which will be propagated through the usual channels.
          +
          +
          Parameters:
          +
          newMasterEvent - the event that resulted in the notify. It + identifies the new master.
          +
          +
        • +
        + + + +
          +
        • +

          notify

          +
          void notify(GroupChangeEvent groupChangeEvent)
          +
          The method is invoked whenever there is a change in the composition of + the replication group. That is, a new node has been added to the group + or an existing member has been removed from the group. Note that + SECONDARY nodes do not produce these events. + + If the method throws an exception, JE will log the exception as a trace + message, which will be propagated through the usual channels.
          +
          +
          Parameters:
          +
          groupChangeEvent - the event that resulted in the notify. It + describes the new group composition and identifies the node that + provoked the change.
          +
          +
        • +
        + + + +
          +
        • +

          notify

          +
          void notify(JoinGroupEvent joinGroupEvent)
          +
          The method is invoked whenever a node joins the group, by successfully + opening its first + ReplicatedEnvironment handle.
          +
          +
          Parameters:
          +
          joinGroupEvent - the event that resulted in the notify. It + identifies the node that joined the group.
          +
          +
        • +
        + + + +
          +
        • +

          notify

          +
          void notify(LeaveGroupEvent leaveGroupEvent)
          +
          The method is invoked whenever a node leaves the group by closing its + last ReplicatedEnvironment handle.
          +
          +
          Parameters:
          +
          leaveGroupEvent - the event that resulted in the notify. It + identifies the node that left the group.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/MonitorConfig.html b/docs/java/com/sleepycat/je/rep/monitor/MonitorConfig.html new file mode 100644 index 0000000..9f4e9d8 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/MonitorConfig.html @@ -0,0 +1,706 @@ + + + + + +MonitorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class MonitorConfig

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class MonitorConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Specifies the attributes used by a replication Monitor. +

      + The following properties identify the target group. +

        +
      • groupName: the name of the replication group being monitored.
      • +
      • nodeName: the group-wide unique name associated with this + monitor node.
      • +
      • nodeHost: the hostname and port associated with this Monitor. Used + by group members to contact the Monitor.
      • +
      • helperHosts: the list of replication nodes which the Monitor uses to + register itself so it can receive notifications about group status + changes.
      • +
      + The following properties configure the daemon ping thread implemented + within the Monitor. This daemon thread lets the Monitor proactively find + status changes that occur when the Monitor is down or has lost network + connectivity. +
        +
      • numRetries: number of times the ping thread attempts to contact a + node before deeming is unreachable.
      • +
      • retryInterval: number of milliseconds between ping thread retries. +
      • +
      • timeout: socketConnection timeout, in milliseconds, specified + when the ping thread attempts to establish a connection with a replication + node.
      • +
      +
      +
      Since:
      +
      JE 5.0
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static MonitorConfigDEFAULT +
        An instance created using the default constructor is initialized with + the default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        MonitorConfig() +
        An instance created using the default constructor is initialized with + the default settings.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        MonitorConfigclone() +
        Returns a copy of this configuration object.
        +
        java.lang.StringgetGroupName() +
        Gets the name associated with the replication group.
        +
        java.lang.StringgetHelperHosts() +
        Returns the string identifying one or more helper host and port pairs in + this format:
        +
        java.lang.StringgetNodeHostname() +
        Returns the hostname component of the nodeHost property.
        +
        java.lang.StringgetNodeHostPort() +
        Returns the hostname and port associated with this node.
        +
        java.lang.StringgetNodeName() +
        Returns the unique name associated with this monitor.
        +
        intgetNodePort() +
        Returns the port component of the nodeHost property.
        +
        intgetNumRetries() +
        Returns the number of times a ping thread attempts to contact a node + before deeming it unreachable.
        +
        longgetRetryInterval() +
        Returns the number of milliseconds between ping thread retries.
        +
        intgetSocketConnectTimeout() +
        Returns the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node.
        +
        MonitorConfigsetGroupName(java.lang.String groupName) +
        Sets the name for the replication group.
        +
        MonitorConfigsetHelperHosts(java.lang.String helperHosts) +
        Identify one or more helpers nodes by their host and port pairs in this + format:
        +
        MonitorConfigsetNodeHostPort(java.lang.String hostPort) +
        Sets the hostname and port associated with this monitor.
        +
        MonitorConfigsetNodeName(java.lang.String nodeName) +
        Sets the name to be associated with this monitor.
        +
        MonitorConfigsetNumRetries(int numRetries) +
        Sets the number of times a ping thread attempts to contact a node + before deeming it unreachable.
        +
        MonitorConfigsetRetryInterval(long retryInterval) +
        Sets the number of milliseconds between ping thread retries.
        +
        MonitorConfigsetSocketConnectTimeout(int socketConnectTimeout) +
        Sets the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final MonitorConfig DEFAULT
          +
          An instance created using the default constructor is initialized with + the default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          MonitorConfig

          +
          public MonitorConfig()
          +
          An instance created using the default constructor is initialized with + the default settings.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          setGroupName

          +
          public MonitorConfig setGroupName(java.lang.String groupName)
          +                           throws java.lang.IllegalArgumentException
          +
          Sets the name for the replication group. The name must be made up of + just alpha numeric characters and must not be zero length.
          +
          +
          Parameters:
          +
          groupName - the alpha numeric string representing the name.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the string name is not valid.
          +
          +
        • +
        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Gets the name associated with the replication group.
          +
          +
          Returns:
          +
          the name of this replication group.
          +
          +
        • +
        + + + +
          +
        • +

          setNodeName

          +
          public MonitorConfig setNodeName(java.lang.String nodeName)
          +                          throws java.lang.IllegalArgumentException
          +
          Sets the name to be associated with this monitor. It must + be unique within the group. When the monitor is + instantiated and joins the replication group, a check is done to ensure + that the name is unique, and a + RestartRequiredException is thrown if it is + not.
          +
          +
          Parameters:
          +
          nodeName - the name of this monitor.
          +
          Throws:
          +
          java.lang.IllegalArgumentException
          +
          +
        • +
        + + + +
          +
        • +

          getNodeName

          +
          public java.lang.String getNodeName()
          +
          Returns the unique name associated with this monitor.
          +
          +
          Returns:
          +
          the monitor name
          +
          +
        • +
        + + + +
          +
        • +

          setNodeHostPort

          +
          public MonitorConfig setNodeHostPort(java.lang.String hostPort)
          +
          Sets the hostname and port associated with this monitor. The hostname + and port combination are denoted by a string of the form: +
          +  hostname[:port]
          + 
          + The port must be outside the range of "Well Known Ports" + (zero through 1023).
          +
          +
          Parameters:
          +
          hostPort - the string containing the hostname and port as above.
          +
          +
        • +
        + + + +
          +
        • +

          getNodeHostPort

          +
          public java.lang.String getNodeHostPort()
          +
          Returns the hostname and port associated with this node. The hostname + and port combination are denoted by a string of the form: +
          +  hostname:port
          + 
          +
          +
          Returns:
          +
          the hostname and port string of this monitor.
          +
          +
        • +
        + + + +
          +
        • +

          setHelperHosts

          +
          public MonitorConfig setHelperHosts(java.lang.String helperHosts)
          +
          Identify one or more helpers nodes by their host and port pairs in this + format: +
          + hostname[:port][,hostname[:port]]*
          + 
          +
          +
          Parameters:
          +
          helperHosts - the string representing the host and port pairs.
          +
          +
        • +
        + + + +
          +
        • +

          getHelperHosts

          +
          public java.lang.String getHelperHosts()
          +
          Returns the string identifying one or more helper host and port pairs in + this format: +
          + hostname[:port][,hostname[:port]]*
          + 
          +
          +
          Returns:
          +
          the string representing the host port pairs.
          +
          +
        • +
        + + + +
          +
        • +

          getNodeHostname

          +
          public java.lang.String getNodeHostname()
          +
          Returns the hostname component of the nodeHost property.
          +
          +
          Returns:
          +
          the hostname string
          +
          +
        • +
        + + + +
          +
        • +

          getNodePort

          +
          public int getNodePort()
          +
          Returns the port component of the nodeHost property.
          +
          +
          Returns:
          +
          the port number
          +
          +
        • +
        + + + +
          +
        • +

          setNumRetries

          +
          public MonitorConfig setNumRetries(int numRetries)
          +
          Sets the number of times a ping thread attempts to contact a node + before deeming it unreachable. + The default value is 5.
          +
        • +
        + + + +
          +
        • +

          getNumRetries

          +
          public int getNumRetries()
          +
          Returns the number of times a ping thread attempts to contact a node + before deeming it unreachable.
          +
        • +
        + + + +
          +
        • +

          setRetryInterval

          +
          public MonitorConfig setRetryInterval(long retryInterval)
          +
          Sets the number of milliseconds between ping thread retries. The default + value is 1000.
          +
        • +
        + + + +
          +
        • +

          getRetryInterval

          +
          public long getRetryInterval()
          +
          Returns the number of milliseconds between ping thread retries.
          +
        • +
        + + + +
          +
        • +

          setSocketConnectTimeout

          +
          public MonitorConfig setSocketConnectTimeout(int socketConnectTimeout)
          +
          Sets the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node. The default value is 10,000.
          +
        • +
        + + + +
          +
        • +

          getSocketConnectTimeout

          +
          public int getSocketConnectTimeout()
          +
          Returns the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node.
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public MonitorConfig clone()
          +
          Returns a copy of this configuration object.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.html b/docs/java/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.html new file mode 100644 index 0000000..bf55a96 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.html @@ -0,0 +1,351 @@ + + + + + +MonitorConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class MonitorConfigBeanInfo

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.beans.BeanInfo
      +
      +
      +
      +
      public class MonitorConfigBeanInfo
      +extends ConfigBeanInfoBase
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          MonitorConfigBeanInfo

          +
          public MonitorConfigBeanInfo()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getBeanDescriptor

          +
          public java.beans.BeanDescriptor getBeanDescriptor()
          +
          +
          Specified by:
          +
          getBeanDescriptor in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getBeanDescriptor in class java.beans.SimpleBeanInfo
          +
          +
        • +
        + + + +
          +
        • +

          getPropertyDescriptors

          +
          public java.beans.PropertyDescriptor[] getPropertyDescriptors()
          +
          +
          Specified by:
          +
          getPropertyDescriptors in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getPropertyDescriptors in class java.beans.SimpleBeanInfo
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/NewMasterEvent.html b/docs/java/com/sleepycat/je/rep/monitor/NewMasterEvent.html new file mode 100644 index 0000000..549470a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/NewMasterEvent.html @@ -0,0 +1,288 @@ + + + + + +NewMasterEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.monitor
    +

    Class NewMasterEvent

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class NewMasterEvent
      +extends MemberChangeEvent
      +
      The event generated upon detecting a new Master. A new instance of this + event is generated each time a new master is elected for the group.
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getSocketAddress

          +
          public java.net.InetSocketAddress getSocketAddress()
          +
          Returns the socket address associated with the new master
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.GroupChangeType.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.GroupChangeType.html new file mode 100644 index 0000000..2891d39 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.GroupChangeType.html @@ -0,0 +1,189 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.html new file mode 100644 index 0000000..c6cbbe1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/GroupChangeEvent.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.GroupChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.GroupChangeEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/JoinGroupEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/JoinGroupEvent.html new file mode 100644 index 0000000..c382da6 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/JoinGroupEvent.html @@ -0,0 +1,177 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.JoinGroupEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.JoinGroupEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.LeaveReason.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.LeaveReason.html new file mode 100644 index 0000000..a5ebb34 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.LeaveReason.html @@ -0,0 +1,188 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.html new file mode 100644 index 0000000..e4b43bd --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/LeaveGroupEvent.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.LeaveGroupEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.LeaveGroupEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/MemberChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/MemberChangeEvent.html new file mode 100644 index 0000000..b157c1e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/MemberChangeEvent.html @@ -0,0 +1,187 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.MemberChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.MemberChangeEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/Monitor.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/Monitor.html new file mode 100644 index 0000000..aab7923 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/Monitor.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.Monitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.Monitor

    +
    +
    No usage of com.sleepycat.je.rep.monitor.Monitor
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeEvent.html new file mode 100644 index 0000000..a1be27f --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeEvent.html @@ -0,0 +1,199 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.MonitorChangeEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.MonitorChangeEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeListener.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeListener.html new file mode 100644 index 0000000..c54a68a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorChangeListener.html @@ -0,0 +1,176 @@ + + + + + +Uses of Interface com.sleepycat.je.rep.monitor.MonitorChangeListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.je.rep.monitor.MonitorChangeListener

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfig.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfig.html new file mode 100644 index 0000000..e97b257 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfig.html @@ -0,0 +1,250 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.MonitorConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.MonitorConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfigBeanInfo.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfigBeanInfo.html new file mode 100644 index 0000000..066bea1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/MonitorConfigBeanInfo.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo

    +
    +
    No usage of com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/class-use/NewMasterEvent.html b/docs/java/com/sleepycat/je/rep/monitor/class-use/NewMasterEvent.html new file mode 100644 index 0000000..9334342 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/class-use/NewMasterEvent.html @@ -0,0 +1,176 @@ + + + + + +Uses of Class com.sleepycat.je.rep.monitor.NewMasterEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.monitor.NewMasterEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/package-frame.html b/docs/java/com/sleepycat/je/rep/monitor/package-frame.html new file mode 100644 index 0000000..450602c --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/package-frame.html @@ -0,0 +1,37 @@ + + + + + +com.sleepycat.je.rep.monitor (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.rep.monitor

    +
    +

    Interfaces

    + +

    Classes

    + +

    Enums

    + +
    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/package-summary.html b/docs/java/com/sleepycat/je/rep/monitor/package-summary.html new file mode 100644 index 0000000..169a28d --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/package-summary.html @@ -0,0 +1,261 @@ + + + + + +com.sleepycat.je.rep.monitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.rep.monitor

    +
    +
    BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je.rep.monitor Description

    +
    BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing. + +

    Package Specification

    +The Monitor is intended for applications that do not directly +reference ReplicatedEnvironment, but need to track the composition of +a replication group and the current Master. A Monitor tracks changes +in replication group membership and roles.
    +
    +
    See Also:
    +
    Replication Guide, Writing Monitor Nodes
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/package-tree.html b/docs/java/com/sleepycat/je/rep/monitor/package-tree.html new file mode 100644 index 0000000..d79e8e2 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/package-tree.html @@ -0,0 +1,181 @@ + + + + + +com.sleepycat.je.rep.monitor Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.rep.monitor

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +

    Enum Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/monitor/package-use.html b/docs/java/com/sleepycat/je/rep/monitor/package-use.html new file mode 100644 index 0000000..c393894 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/monitor/package-use.html @@ -0,0 +1,214 @@ + + + + + +Uses of Package com.sleepycat.je.rep.monitor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.rep.monitor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/package-frame.html b/docs/java/com/sleepycat/je/rep/package-frame.html new file mode 100644 index 0000000..6099f72 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/package-frame.html @@ -0,0 +1,66 @@ + + + + + +com.sleepycat.je.rep (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.rep

    +
    +

    Interfaces

    + +

    Classes

    + +

    Enums

    + +

    Exceptions

    + +
    + + diff --git a/docs/java/com/sleepycat/je/rep/package-summary.html b/docs/java/com/sleepycat/je/rep/package-summary.html new file mode 100644 index 0000000..466317a --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/package-summary.html @@ -0,0 +1,553 @@ + + + + + +com.sleepycat.je.rep (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.rep

    +
    +
    +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments.
    +
    +

    See: Description

    +
    +
    + + + + +

    Package com.sleepycat.je.rep Description

    +
    +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments. JE HA is an embedded database management +system designed to provide fast, reliable, and scalable data +management. A JE environment is replicated across the nodes of a +single read/write Master, multiple read only Replica Replication +Group. JE HA is used to improve application availability, provide +improved read performance, and increase data durability. +

    Getting Started

    +The +Replication +Guide is invaluable for understanding the capabilities of JE HA +and how best to design your replicated application. +
    +

    +The +Introduction +covers terminology, the replication group +lifecycle, and the concepts of durability and consistency. Much of the +javadoc refers to the topics covered there. +

    +Replication +API First Steps explains how to configure and start a replication +group. +

    +Transaction Management + highlights the tradeoffs that must be considered in a replicated +application and provides some use cases. +

    +In addition, the +je.rep.quote example +package provides three example replication applications. + +

    What the com.sleepycat.je.rep package contains

    +
    Replication control
    +
      +
    • ReplicatedEnvironment is the main access point to + replication.
    • +
    • ReplicationConfig and + ReplicationMutableConfig specify attributes of the + replication system.
    • +
    +
    Administration
    +
      +
    • + ReplicationNode and ReplicationGroup supply + administrative views of the replication system. +
    • +
    +
    Support
    +
      +
    • + StateChangeListener and StateChangeEvent + implement a Listener pattern for tracking changes in the replication system. +
    • +
    • + CommitPointConsistencyPolicy, + TimeConsistencyPolicy and + NoConsistencyPolicy let the user control the read only + replica's view of the the replicated data. +
    • +
    • + ReplicatedEnviromentStats provide feedback on system execution. +
    • +
    + +

    Related Packages

    +
      +
    • +com.sleepycat.je.rep.monitor lets the application track +the replication system in order to do tasks such as load balancing and +write request routing. +
    • +
    • +com.sleepycat.je.rep.util provides command line and +programmatic APIs for administering and starting up a replication +system. +
    • +
    +

    Related Documentation

    +
    +
    See Also:
    +
    Replication Guide, + + JE HA Examples
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/package-tree.html b/docs/java/com/sleepycat/je/rep/package-tree.html new file mode 100644 index 0000000..e68f547 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/package-tree.html @@ -0,0 +1,238 @@ + + + + + +com.sleepycat.je.rep Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.rep

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +

    Enum Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/package-use.html b/docs/java/com/sleepycat/je/rep/package-use.html new file mode 100644 index 0000000..0068af0 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/package-use.html @@ -0,0 +1,412 @@ + + + + + +Uses of Package com.sleepycat.je.rep (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.rep

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + + + + + + + + + +
      Packages that use com.sleepycat.je.rep 
      PackageDescription
      com.sleepycat.je.rep +
      +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments.
      +
      com.sleepycat.je.rep.arbiter +
      Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority.
      +
      com.sleepycat.je.rep.monitor +
      BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing.
      +
      com.sleepycat.je.rep.util +
      BDB JE High Availability command line utilities and helper classes.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Classes in com.sleepycat.je.rep used by com.sleepycat.je.rep 
      Class and Description
      AppStateMonitor +
      A mechanism for adding application specific information when asynchronously + tracking the state of a running JE HA application.
      +
      InsufficientLogException +
      This exception indicates that the log files constituting the Environment are + insufficient and cannot be used as the basis for continuing with the + replication stream provided by the current master.
      +
      NetworkRestoreConfig +
      NetworkRestoreConfig defines the configuration parameters used to configure + a NetworkRestore operation.
      +
      NoConsistencyRequiredPolicy +
      A consistency policy that lets a transaction on a replica using this policy + proceed regardless of the state of the Replica relative to the Master.
      +
      NodeType +
      The different types of nodes that can be in a replication group.
      +
      QuorumPolicy +
      The quorum policy determine the number of nodes that must participate to + pick the winner of an election, and therefore the master of the group.
      +
      ReplicaConsistencyException +
      This exception is thrown by a Replica to indicate it could not meet the + consistency requirements as defined by the + ReplicaConsistencyPolicy in effect for the transaction, within + the allowed timeout period.
      +
      ReplicatedEnvironment.State +
      The replication node state determines the operations that the + application can perform against its replicated environment.
      +
      ReplicatedEnvironmentStats +
      Statistics for a replicated environment.
      +
      ReplicationConfig +
      Specifies the immutable attributes of a replicated environment.
      +
      ReplicationGroup +
      An administrative view of the collection of nodes that form the replication + group.
      +
      ReplicationMutableConfig +
      Specifies the attributes that may be changed after a ReplicatedEnvironment has been created.
      +
      ReplicationNode +
      An administrative view of a node in a replication group.
      +
      RestartRequiredException +
      RestartRequiredException serves as the base class for all exceptions which + makes it impossible for HA to proceed without some form of corrective action + on the part of the user, followed by a restart of the application.
      +
      RollbackException +
      This asynchronous exception indicates that a new master has been selected, + this Replica's log is ahead of the current Master, + and in this case, the Replica was unable to rollback without a + recovery.
      +
      StateChangeEvent +
      Communicates the state change at a node + to the StateChangeListener.
      +
      StateChangeException +
      Provides a synchronous mechanism for informing an application about a change + in the state of the replication node.
      +
      StateChangeListener +
      An asynchronous mechanism for tracking the State of the replicated environment and + choosing how to route database operations.
      +
      SyncupProgress +
      Describes the different phases of replication stream syncup that are + executed when a replica starts working with a new replication group master.
      +
      +
    • +
    • + + + + + + + + + + + + +
      Classes in com.sleepycat.je.rep used by com.sleepycat.je.rep.arbiter 
      Class and Description
      ReplicatedEnvironment.State +
      The replication node state determines the operations that the + application can perform against its replicated environment.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + +
      Classes in com.sleepycat.je.rep used by com.sleepycat.je.rep.monitor 
      Class and Description
      ReplicationConfig +
      Specifies the immutable attributes of a replicated environment.
      +
      ReplicationGroup +
      An administrative view of the collection of nodes that form the replication + group.
      +
      ReplicationNode +
      An administrative view of a node in a replication group.
      +
      UnknownMasterException +
      Indicates that the underlying operation requires communication with a + Master, but that a Master was not available.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Classes in com.sleepycat.je.rep used by com.sleepycat.je.rep.util 
      Class and Description
      MasterStateException +
      This exception indicates that the application attempted an operation that is + not permitted when it is in the ReplicatedEnvironment.State.MASTER + state.
      +
      MasterTransferFailureException +
      Thrown by ReplicatedEnvironment.transferMaster(java.util.Set<java.lang.String>, int, java.util.concurrent.TimeUnit) if a Master Transfer + operation cannot be completed within the allotted time.
      +
      MemberNotFoundException +
      Thrown when an operation requires a replication group member and that member + is not present in the replication group.
      +
      NodeState +
      The current state of a replication node and the application this node is + running in.
      +
      ReplicaStateException +
      This exception indicates that the application attempted an operation that is + not permitted when it is in the ReplicatedEnvironment.State.REPLICA + state.
      +
      ReplicationGroup +
      An administrative view of the collection of nodes that form the replication + group.
      +
      ReplicationNode +
      An administrative view of a node in a replication group.
      +
      UnknownMasterException +
      Indicates that the underlying operation requires communication with a + Master, but that a Master was not available.
      +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/AtomicLongMax.html b/docs/java/com/sleepycat/je/rep/util/AtomicLongMax.html new file mode 100644 index 0000000..22f3eaf --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/AtomicLongMax.html @@ -0,0 +1,316 @@ + + + + + +AtomicLongMax (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class AtomicLongMax

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.AtomicLongMax
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class AtomicLongMax
      +extends java.lang.Object
      +
      An Atomic long that maintains a max value
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        AtomicLongMax(long initialValue) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        longget() +
        Gets the current value.
        +
        longset(long newValue) +
        Set the value to newValue and returns the old value.
        +
        longupdateMax(long newMax) +
        Updates the max value if the argument is greater than the current max.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          AtomicLongMax

          +
          public AtomicLongMax(long initialValue)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          updateMax

          +
          public long updateMax(long newMax)
          +
          Updates the max value if the argument is greater than the current max.
          +
        • +
        + + + +
          +
        • +

          get

          +
          public long get()
          +
          Gets the current value.
          +
          +
          Returns:
          +
          the current value
          +
          +
        • +
        + + + +
          +
        • +

          set

          +
          public long set(long newValue)
          +
          Set the value to newValue and returns the old value.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/DbEnableReplication.html b/docs/java/com/sleepycat/je/rep/util/DbEnableReplication.html new file mode 100644 index 0000000..ff13850 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/DbEnableReplication.html @@ -0,0 +1,394 @@ + + + + + +DbEnableReplication (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class DbEnableReplication

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.DbEnableReplication
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbEnableReplication
      +extends java.lang.Object
      +
      A utility to convert an existing, non replicated JE environment for + replication. This is useful when the user wants to initially prototype and + develop a standalone transactional application, and then add replication as + a second stage. +

      + JE HA environment log files contain types of log records and metadata used + only by replication. Non replicated environments are lacking that + information and must undergo a one time conversion process to add that + metadata and enable replication. The conversion process is one way. Once an + environment directory is converted, the rules that govern ReplicatedEnvironment apply; namely, the directory cannot be opened by a + read/write standalone Environment. Only a minimum + amount of replication metadata is added, and the conversion process is not + dependent on the size of the existing directory. +

      + The conversion process takes these steps: +

        +
      1. Use DbEnableReplication to convert an existing environment + directory. DbEnableReplication can be used as a command line + utility, and must be executed locally on the host which houses the + environment directory. Alternatively, DbEnableReplication may be + used programmatically through the provided APIs. +
      2. +
      3. Once converted, the environment directory may be treated as an existing + master node, and can be opened with a ReplicatedEnvironment. No + helper host configuration is needed. +
      4. Additional nodes may be created and can join the group as newly created + replicas, as described in ReplicatedEnvironment. Since these new + nodes are empty, they should be configured to use the converted master as + their helper node, and will go through the + replication node lifecycle to populate their environment + directories. In this case, there will be data in the converted master that + can only be transferred to the replica through a file copy executed with the + help of a NetworkRestore +
      5. +
      +

      + For example: +

      + // Create the first node using an existing environment 
      + DbEnableReplication converter = 
      +     new DbEnableReplication(envDirMars,          // env home dir
      +                             "UniversalRepGroup", // group name
      +                             "nodeMars",          // node name
      +                             "mars:5001");        // node host,port
      + converter.convert();
      +
      + ReplicatedEnvironment nodeMars = new ReplicatedEnvironment(envDirMars, ...);
      + 
      + // Bring up additional nodes, which will be initialized from 
      + // nodeMars.
      + ReplicationConfig repConfig = null;
      + try {
      +     repConfig = new ReplicationConfig("UniversalRepGroup", // groupName
      +                                       "nodeVenus",         // nodeName
      +                                       "venus:5008");       // nodeHostPort
      +     repConfig.setHelperHosts("mars:5001");
      + 
      +     nodeVenus = new ReplicatedEnvironment(envDirB, repConfig, envConfig);
      + } catch (InsufficientLogException insufficientLogEx) {
      + 
      +     // log files will be copied from another node in the group
      +     NetworkRestore restore = new NetworkRestore();
      +     restore.execute(insufficientLogEx, new NetworkRestoreConfig());
      +     
      +     // try opening the node now
      +     nodeVenus = new ReplicatedEnvironment(envDirVenus, 
      +                                           repConfig,
      +                                           envConfig);
      + }
      + ...
      + 
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbEnableReplication(java.io.File envHome, + java.lang.String groupName, + java.lang.String nodeName, + java.lang.String nodeHostPort) +
        Create a DbEnableReplication object for this node.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidconvert() +
        Modify the log files in the environment directory to add a modicum of + replication required metadata.
        +
        static voidmain(java.lang.String[] args) +
        Usage:
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbEnableReplication

          +
          public DbEnableReplication(java.io.File envHome,
          +                           java.lang.String groupName,
          +                           java.lang.String nodeName,
          +                           java.lang.String nodeHostPort)
          +
          Create a DbEnableReplication object for this node.
          +
          +
          Parameters:
          +
          envHome - The node's environment directory
          +
          groupName - The name of the new replication group
          +
          nodeName - The node's name
          +
          nodeHostPort - The host and port for this node
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +
          Usage: +
          + java -cp je.jar com.sleepycat.je.rep.util.DbEnableReplication
          +   -h <dir>                          # environment home directory
          +   -groupName <group name>           # replication group name
          +   -nodeName <node name>             # replicated node name
          +   -nodeHostPort <host name:port number> # host name or IP address
          +                                             and port number to use
          +                                             for this node
          + 
          +
        • +
        + + + +
          +
        • +

          convert

          +
          public void convert()
          +
          Modify the log files in the environment directory to add a modicum of + replication required metadata.
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/DbGroupAdmin.html b/docs/java/com/sleepycat/je/rep/util/DbGroupAdmin.html new file mode 100644 index 0000000..4b936e0 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/DbGroupAdmin.html @@ -0,0 +1,429 @@ + + + + + +DbGroupAdmin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class DbGroupAdmin

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.DbGroupAdmin
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbGroupAdmin
      +extends java.lang.Object
      +
      DbGroupAdmin supplies the functionality of the administrative class ReplicationGroupAdmin in a convenient command line utility. For example, it + can be used to display replication group information, or to remove a node + from the replication group. +

      + Note: This utility does not handle security and authorization. It is left + to the user to ensure that the utility is invoked with proper authorization. +

      + See main(java.lang.String...) for a full description of the command line + arguments.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbGroupAdmin(java.lang.String groupName, + java.util.Set<java.net.InetSocketAddress> helperSockets) +
        Create a DbGroupAdmin instance for programmatic use.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voiddumpGroup() +
        Display group information.
        +
        static voidmain(java.lang.String... args) +
        Usage:
        +
        voidremoveMember(java.lang.String name) +
        Remove a node from the replication group.
        +
        voidtransferMaster(java.lang.String nodeList, + java.lang.String timeout) +
        Transfers the master role from the current master to one of the + electable replicas specified in the argument list.
        +
        voidupdateAddress(java.lang.String nodeName, + java.lang.String newHostName, + int newPort) +
        Update the network address for a specified node.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbGroupAdmin

          +
          public DbGroupAdmin(java.lang.String groupName,
          +                    java.util.Set<java.net.InetSocketAddress> helperSockets)
          +
          Create a DbGroupAdmin instance for programmatic use.
          +
          +
          Parameters:
          +
          groupName - replication group name
          +
          helperSockets - set of host and port pairs for group members which + can be queried to obtain group information.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String... args)
          +                 throws java.lang.Exception
          +
          Usage: +
          + java {com.sleepycat.je.rep.util.DbGroupAdmin |
          +       -jar je-<version>.jar DbGroupAdmin}
          +   -groupName <group name>  # name of replication group
          +   -helperHosts <host:port> # identifier for one or more members
          +                            # of the replication group which can be
          +                            # contacted for group information, in
          +                            # this format:
          +                            # hostname[:port][,hostname[:port]]*
          +   -dumpGroup               # dump group information
          +   -removeMember <node name># node to be removed
          +   -updateAddress <node name> <new host:port>
          +                            # update the network address for a specified
          +                            # node. The node should not be alive when
          +                            # updating address
          +   -transferMaster [-force] <node1,node2,...> <timeout>
          +                            # transfer master role to one of the
          +                            # specified nodes.
          + 
          +
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          dumpGroup

          +
          public void dumpGroup()
          +
          Display group information. Lists all members and the group master. Can + be used when reviewing the group configuration.
          +
        • +
        + + + +
          +
        • +

          removeMember

          +
          public void removeMember(java.lang.String name)
          +
          Remove a node from the replication group. Once removed, a + node cannot be added again to the group under the same node name. + +

          Secondary nodes cannot be removed; they + automatically leave the group when they are shut down or become + disconnected from the master.

          +
          +
          Parameters:
          +
          name - name of the node to be removed
          +
          See Also:
          +
          ReplicationGroupAdmin.removeMember(java.lang.String)
          +
          +
        • +
        + + + +
          +
        • +

          updateAddress

          +
          public void updateAddress(java.lang.String nodeName,
          +                          java.lang.String newHostName,
          +                          int newPort)
          +
          Update the network address for a specified node. When updating the + address of a node, the node cannot be alive. See ReplicationGroupAdmin.updateAddress(java.lang.String, java.lang.String, int) for more information. + +

          The address of a NodeType.SECONDARY node cannot be updated + with this method, since nodes must be members but not alive to be + updated, and secondary nodes are not members when they are not alive. + To change the address of a secondary node, restart the node with the + updated address.

          +
          +
          Parameters:
          +
          nodeName - the name of the node whose address will be updated
          +
          newHostName - the new host name of the node
          +
          newPort - the new port number of the node
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/DbPing.html b/docs/java/com/sleepycat/je/rep/util/DbPing.html new file mode 100644 index 0000000..9fcd600 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/DbPing.html @@ -0,0 +1,334 @@ + + + + + +DbPing (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class DbPing

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.DbPing
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbPing
      +extends java.lang.Object
      +
      This class provides the utility to request the current state of a replica in + a JE replication group, see more details in + NodeState.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbPing(ReplicationNode repNode, + java.lang.String groupName, + int socketTimeout) +
        Create a DbPing instance for programmatic use.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbPing

          +
          public DbPing(ReplicationNode repNode,
          +              java.lang.String groupName,
          +              int socketTimeout)
          +
          Create a DbPing instance for programmatic use.
          +
          +
          Parameters:
          +
          repNode - a class that implements + ReplicationNode
          +
          groupName - name of the group which the node joins
          +
          socketTimeout - timeout value for creating a socket connection + with the node
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +                 throws java.lang.Exception
          +
          Usage: +
          + java {com.sleepycat.je.rep.util.DbPing |
          +       -jar je-<version>.jar DbPing}
          +   -nodeName <node name> # name of the node whose state is
          +                               # requested
          +   -groupName <group name> # name of the group which the node joins
          +   -nodeHost <host:port> # the host name and port pair the node
          +                               # used to join the group
          +   -socketTimeout              # the timeout value for creating a
          +                               # socket connection with the node,
          +                               # default is 10 seconds if not set
          + 
          +
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          getNodeState

          +
          public NodeState getNodeState()
          +                       throws java.io.IOException,
          +                              com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException
          +
          +
          Throws:
          +
          java.io.IOException
          +
          com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/DbResetRepGroup.html b/docs/java/com/sleepycat/je/rep/util/DbResetRepGroup.html new file mode 100644 index 0000000..c713a81 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/DbResetRepGroup.html @@ -0,0 +1,416 @@ + + + + + +DbResetRepGroup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class DbResetRepGroup

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.DbResetRepGroup
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbResetRepGroup
      +extends java.lang.Object
      +
      A utility to reset the members of a replication group, replacing the group + with a new group consisting of a single new member as described by the + arguments supplied to the utility. +

      + This utility is useful when a copy of an existing replicated environment + needs to be used at a different site, with the same data, but with a + different initial node that can be used to grow the replication group as + usual. The utility can also be used to change the group name associated with + the environment. +

      + The reset environment has a different identity from the environment before + the reset operation although it contains the same application data. To avoid + confusion, the reset environment is assigned a new internal unique id. The + unique id is checked whenever nodes attempt to communicate with each other + and ensure that all nodes in a group are dealing with the same data. +

      + The reset process is typically accomplished using the steps outlined below. + It's good practice to back up your environment before running any utilities + that modify an environment. +

        +
      1. Use DbResetRepGroup to reset an existing environment. + DbResetRepGroup can be used as a command line utility, and must be + executed locally on the host specified in the -nodeHostPort argument. The + host must also contain the environment directory. + Alternatively, DbResetRepGroup may be used programmatically through + the provided APIs.
      2. +
      3. Once reset, the environment can be opened with a + ReplicatedEnvironment, using the same node configuration as the one + that was passed in to the utility. No helper host configuration is needed. + Since the group consists of a single node, it will assume the role of a + Master, so long as it is created as an electable node. +
      4. Additional nodes may now be created and can join the group as newly + created replicas, as described in ReplicatedEnvironment. Since these + new nodes are empty, they should be configured to use the new master as + their helper node, and will go through the + replication node lifecycle + to populate their environment directories. In this case, there will be data + in the converted master that can only be transferred to the replica through + a file copy executed with the help of a + NetworkRestore
      5. +
      +

      + For example: + +

      + // Run the utility on a copy of an existing replicated environment. Usually
      + // this environment will have originated on a different node and its
      + // replication group information will contain meta data referring to its
      + // previous host. The utility will reset this metadata so that it has a
      + // rep group (UniversalRepGroup) with a single node named nodeMars. The node
      + // is associated with the machine mars and will communicate on port 5001.
      +
      + DbResetRepGroup resetUtility =
      +     new DbResetRepGroup(envDirMars,          // env home dir
      +                         "UniversalRepGroup", // group name
      +                         "nodeMars",          // node name
      +                         "mars:5001");        // node host,port
      + resetUtility.reset();
      +
      + // Open the reset environment; it will take on the role of master.
      + ReplicatedEnvironment nodeMars = new ReplicatedEnvironment(envDirMars, ...);
      + ...
      + // Bring up additional nodes, which will be initialized from
      + // nodeMars. For example, from the machine venus you can now add a new
      + // member to the group(UniversalRepGroup) as below.
      +
      + ReplicationConfig repConfig = null;
      + try {
      +     repConfig = new ReplicationConfig("UniversalRepGroup", // groupName
      +                                       "nodeVenus",         // nodeName
      +                                       "venus:5008");       // nodeHostPort
      +     repConfig.setHelperHosts("mars:5001");
      +
      +     nodeVenus = new ReplicatedEnvironment(envDirB, repConfig, envConfig);
      + } catch (InsufficientLogException insufficientLogEx) {
      +
      +     // log files will be copied from another node in the group
      +     NetworkRestore restore = new NetworkRestore();
      +     restore.execute(insufficientLogEx, new NetworkRestoreConfig());
      +
      +     // try opening the node now that the environment files have been
      +     // restored on this machine.
      +     nodeVenus = new ReplicatedEnvironment(envDirVenus,
      +                                           repConfig,
      +                                           envConfig);
      + }
      + ...
      + 
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbResetRepGroup(java.io.File envHome, + java.lang.String groupName, + java.lang.String nodeName, + java.lang.String nodeHostPort) +
        Create a DbResetRepGroup object for this node.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] args) +
        Usage:
        +
        voidreset() +
        Replaces the existing group with the new group having a single new node + as described by the constructor arguments.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbResetRepGroup

          +
          public DbResetRepGroup(java.io.File envHome,
          +                       java.lang.String groupName,
          +                       java.lang.String nodeName,
          +                       java.lang.String nodeHostPort)
          +
          Create a DbResetRepGroup object for this node.
          +
          +
          Parameters:
          +
          envHome - The node's replicated environment directory. The + directory must be accessible on this host.
          +
          groupName - The name of the new replication group
          +
          nodeName - The node's name
          +
          nodeHostPort - The host and port for this node. The utility + must be executed on this host.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +
          Usage: +
          + java -cp je.jar com.sleepycat.je.rep.util.DbResetRepGroup
          +   -h <dir>                          # environment home directory
          +   -groupName <group name>           # replication group name
          +   -nodeName <node name>             # replicated node name
          +   -nodeHostPort <host name:port number> # host name or IP address
          +                                             and port number to use
          +                                             for this node
          + 
          +
        • +
        + + + +
          +
        • +

          reset

          +
          public void reset()
          +
          Replaces the existing group with the new group having a single new node + as described by the constructor arguments.
          +
          +
          See Also:
          +
          DbResetRepGroup
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/ReplicationGroupAdmin.html b/docs/java/com/sleepycat/je/rep/util/ReplicationGroupAdmin.html new file mode 100644 index 0000000..623c974 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/ReplicationGroupAdmin.html @@ -0,0 +1,596 @@ + + + + + +ReplicationGroupAdmin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.rep.util
    +

    Class ReplicationGroupAdmin

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.rep.util.ReplicationGroupAdmin
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class ReplicationGroupAdmin
      +extends java.lang.Object
      +
      Administrative APIs for use by applications which do not have direct access + to a replicated environment. The class supplies methods that can be + used to list group members, remove members, update network addresses, and + find the current master. + + Information is found and updated by querying nodes in the group. Because of + that, ReplicationGroupAdmin can only obtain information when there is at + least one node alive in the replication group.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ReplicationGroupAdmin(java.lang.String groupName, + java.util.Set<java.net.InetSocketAddress> helperSockets) +
        Constructs a group admin object.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        ReplicationGroupgetGroup() +
        Returns the current composition of the group from the Master.
        +
        java.lang.StringgetGroupName() +
        Returns the name of the replication group.
        +
        java.util.Set<java.net.InetSocketAddress>getHelperSockets() +
        Returns the helper sockets being used to contact a replication group + member, in order to query for the information.
        +
        java.lang.StringgetMasterNodeName() +
        Returns the node name associated with the master
        +
        NodeStategetNodeState(ReplicationNode repNode, + int socketConnectTimeout) +
        Returns the state of a replicated + node and state of the application where the node is + running in.
        +
        voidremoveMember(java.lang.String nodeName) +
        Removes this node from the group, so that it is no longer a member of + the group.
        +
        voidsetHelperSockets(java.util.Set<java.net.InetSocketAddress> helperSockets) +
        Sets the helper sockets being used to contact a replication group + member, in order to query for the information.
        +
        java.lang.StringtransferMaster(java.util.Set<java.lang.String> nodeNames, + int timeout, + java.util.concurrent.TimeUnit timeUnit, + boolean force) +
        Transfers the master state from the current master to one of the + electable replicas supplied in the argument list.
        +
        voidupdateAddress(java.lang.String nodeName, + java.lang.String newHostName, + int newPort) +
        Update the network address for a specified member of the replication + group.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ReplicationGroupAdmin

          +
          public ReplicationGroupAdmin(java.lang.String groupName,
          +                             java.util.Set<java.net.InetSocketAddress> helperSockets)
          +
          Constructs a group admin object.
          +
          +
          Parameters:
          +
          groupName - the name of the group to be administered
          +
          helperSockets - the sockets on which it can contact helper nodes + in the replication group to carry out admin services.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getHelperSockets

          +
          public java.util.Set<java.net.InetSocketAddress> getHelperSockets()
          +
          Returns the helper sockets being used to contact a replication group + member, in order to query for the information.
          +
          +
          Returns:
          +
          the set of helper sockets.
          +
          +
        • +
        + + + +
          +
        • +

          setHelperSockets

          +
          public void setHelperSockets(java.util.Set<java.net.InetSocketAddress> helperSockets)
          +
          Sets the helper sockets being used to contact a replication group + member, in order to query for the information.
          +
          +
          Parameters:
          +
          helperSockets - the sockets on which it can contact helper nodes + in the replication group to carry out admin services.
          +
          +
        • +
        + + + +
          +
        • +

          getGroupName

          +
          public java.lang.String getGroupName()
          +
          Returns the name of the replication group.
          +
          +
          Returns:
          +
          the group name.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          removeMember

          +
          public void removeMember(java.lang.String nodeName)
          +                  throws UnknownMasterException,
          +                         MemberNotFoundException,
          +                         MasterStateException,
          +                         EnvironmentFailureException
          +
          Removes this node from the group, so that it is no longer a member of + the group. When removed, it will no longer be able to connect to a + master, nor can it participate in elections. If the node is a Monitor it will no longer be informed of + election results. Once removed, a node cannot be added again to the + group under the same node name. +

          + Ideally, the node being removed should be shut down before this call is + issued. +

          + If the node is an active Replica the master will terminate + its connection with the node and will not allow the replica to reconnect + with the group, since it's no longer a member of the group. If the node + wishes to re-join it should do so with a different node name. +

          + An active Master cannot be removed. It must first be shutdown, or + transition to the Replica state before it can be removed + from the group. +

          + Secondary nodes cannot be removed; they + automatically leave the group when they are shut down or become + disconnected from the master.

          +
          +
          Parameters:
          +
          nodeName - identifies the node being removed from the group
          +
          Throws:
          +
          UnknownMasterException - if the master was not found
          +
          java.lang.IllegalArgumentException - if the type of the node is SECONDARY
          +
          MemberNotFoundException - if the node denoted by + nodeName is not a member of the replication group
          +
          MasterStateException - if the member being removed is currently + the Master
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          See Also:
          +
          Adding and Removing Nodes From the Group
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getNodeState

          +
          public NodeState getNodeState(ReplicationNode repNode,
          +                              int socketConnectTimeout)
          +                       throws java.io.IOException,
          +                              com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException
          +
          Returns the state of a replicated + node and state of the application where the node is + running in.
          +
          +
          Parameters:
          +
          repNode - a ReplicationNode includes those information which are + needed to connect to the node
          +
          socketConnectTimeout - the timeout value for creating a socket + connection with the replicated node
          +
          Returns:
          +
          the state of the replicated node
          +
          Throws:
          +
          java.io.IOException - if the machine is down or no response is returned
          +
          com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException - if can't connect to the service + running on the replicated node
          +
          +
        • +
        + + + +
          +
        • +

          updateAddress

          +
          public void updateAddress(java.lang.String nodeName,
          +                          java.lang.String newHostName,
          +                          int newPort)
          +                   throws EnvironmentFailureException,
          +                          MasterStateException,
          +                          MemberNotFoundException,
          +                          ReplicaStateException,
          +                          UnknownMasterException
          +
          Update the network address for a specified member of the replication + group. When updating the address of this target replication node, the + node cannot be alive. One common use case is when the replication member + must be moved to a new host, possibly because of machine failure. +

          + To make a network address change, take these steps: +

            +
          1. Shutdown the node that is being updated. +
          2. Use this method to change the hostname and port of the node. +
          3. Start the node on the new machine, or at its new port, using the new + hostname/port. If the log files are available at the node, they will + be reused. A network restore operation may need to be initiated by + the application to copy over any needed log files if no log files are + available, or if they have become obsolete. +
          +

          + The address of a NodeType.SECONDARY node cannot be updated with + this method, since nodes must be members but not alive to be updated, + and secondary nodes are not members when they are not alive. To change + the address of a secondary node, restart the node with the updated + address.

          +
          +
          Parameters:
          +
          nodeName - the name of the node whose address will be updated.
          +
          newHostName - the new host name of the node
          +
          newPort - the new port number of the node
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs
          +
          MasterStateException - if the member being updated is currently + the master
          +
          MemberNotFoundException - if the node denoted by + nodeName is not a member of the replication group
          +
          ReplicaStateException - if the member being updated is currently + alive
          +
          UnknownMasterException - if the master was not found
          +
          See Also:
          +
          DbResetRepGroup, which can be used in a + related but different use case to copy and move a group.
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/AtomicLongMax.html b/docs/java/com/sleepycat/je/rep/util/class-use/AtomicLongMax.html new file mode 100644 index 0000000..b770374 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/AtomicLongMax.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.AtomicLongMax (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.AtomicLongMax

    +
    +
    No usage of com.sleepycat.je.rep.util.AtomicLongMax
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/DbEnableReplication.html b/docs/java/com/sleepycat/je/rep/util/class-use/DbEnableReplication.html new file mode 100644 index 0000000..93ce74e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/DbEnableReplication.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.DbEnableReplication (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.DbEnableReplication

    +
    +
    No usage of com.sleepycat.je.rep.util.DbEnableReplication
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/DbGroupAdmin.html b/docs/java/com/sleepycat/je/rep/util/class-use/DbGroupAdmin.html new file mode 100644 index 0000000..2de0823 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/DbGroupAdmin.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.DbGroupAdmin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.DbGroupAdmin

    +
    +
    No usage of com.sleepycat.je.rep.util.DbGroupAdmin
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/DbPing.html b/docs/java/com/sleepycat/je/rep/util/class-use/DbPing.html new file mode 100644 index 0000000..e179095 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/DbPing.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.DbPing (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.DbPing

    +
    +
    No usage of com.sleepycat.je.rep.util.DbPing
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/DbResetRepGroup.html b/docs/java/com/sleepycat/je/rep/util/class-use/DbResetRepGroup.html new file mode 100644 index 0000000..b0fbc78 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/DbResetRepGroup.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.DbResetRepGroup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.DbResetRepGroup

    +
    +
    No usage of com.sleepycat.je.rep.util.DbResetRepGroup
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/class-use/ReplicationGroupAdmin.html b/docs/java/com/sleepycat/je/rep/util/class-use/ReplicationGroupAdmin.html new file mode 100644 index 0000000..3440cd1 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/class-use/ReplicationGroupAdmin.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.rep.util.ReplicationGroupAdmin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.rep.util.ReplicationGroupAdmin

    +
    +
    No usage of com.sleepycat.je.rep.util.ReplicationGroupAdmin
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/package-frame.html b/docs/java/com/sleepycat/je/rep/util/package-frame.html new file mode 100644 index 0000000..ce5ba50 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/package-frame.html @@ -0,0 +1,25 @@ + + + + + +com.sleepycat.je.rep.util (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.rep.util

    + + + diff --git a/docs/java/com/sleepycat/je/rep/util/package-summary.html b/docs/java/com/sleepycat/je/rep/util/package-summary.html new file mode 100644 index 0000000..ae06a19 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/package-summary.html @@ -0,0 +1,203 @@ + + + + + +com.sleepycat.je.rep.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.rep.util

    +
    +
    BDB JE High Availability command line utilities and helper classes.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      AtomicLongMax +
      An Atomic long that maintains a max value
      +
      DbEnableReplication +
      A utility to convert an existing, non replicated JE environment for + replication.
      +
      DbGroupAdmin +
      DbGroupAdmin supplies the functionality of the administrative class ReplicationGroupAdmin in a convenient command line utility.
      +
      DbPing +
      This class provides the utility to request the current state of a replica in + a JE replication group, see more details in + NodeState.
      +
      DbResetRepGroup +
      A utility to reset the members of a replication group, replacing the group + with a new group consisting of a single new member as described by the + arguments supplied to the utility.
      +
      ReplicationGroupAdmin +
      Administrative APIs for use by applications which do not have direct access + to a replicated environment.
      +
      +
    • +
    + + + +

    Package com.sleepycat.je.rep.util Description

    +
    BDB JE High Availability command line utilities and helper classes. + +

    Package Specification

    +This package provides support for activities like administering and +starting up replication groups.
    +
    +
    See Also:
    +
    Replication +Guide, Chapter 4: Utilities
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/package-tree.html b/docs/java/com/sleepycat/je/rep/util/package-tree.html new file mode 100644 index 0000000..c581d52 --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/package-tree.html @@ -0,0 +1,147 @@ + + + + + +com.sleepycat.je.rep.util Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.rep.util

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/rep/util/package-use.html b/docs/java/com/sleepycat/je/rep/util/package-use.html new file mode 100644 index 0000000..2886f3e --- /dev/null +++ b/docs/java/com/sleepycat/je/rep/util/package-use.html @@ -0,0 +1,129 @@ + + + + + +Uses of Package com.sleepycat.je.rep.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.rep.util

    +
    +
    No usage of com.sleepycat.je.rep.util
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/ConsoleHandler.html b/docs/java/com/sleepycat/je/util/ConsoleHandler.html new file mode 100644 index 0000000..dd958d0 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/ConsoleHandler.html @@ -0,0 +1,298 @@ + + + + + +ConsoleHandler (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class ConsoleHandler

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.util.logging.Handler
      • +
      • +
          +
        • java.util.logging.StreamHandler
        • +
        • +
            +
          • java.util.logging.ConsoleHandler
          • +
          • +
              +
            • com.sleepycat.je.util.ConsoleHandler
            • +
            +
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class ConsoleHandler
      +extends java.util.logging.ConsoleHandler
      +
      JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.ConsoleHandler. By default, the + handler's level is Level.OFF. To enable the console output, use the + standard java.util.logging.LogManager configuration to set the desired + level: +
      + com.sleepycat.je.util.ConsoleHandler.level=ALL
      + 
      + JE augments the java.util.logging API with a JE environment parameter for + setting handler levels. This is described in greater detail in + + Chapter 12.Administering Berkeley DB Java Edition Applications
      +
      +
      See Also:
      +
      + Chapter 12. Logging, +Using JE Trace Logging
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ConsoleHandler(java.util.logging.Formatter formatter, + com.sleepycat.je.dbi.EnvironmentImpl envImpl) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        +
          +
        • + + +

          Methods inherited from class java.util.logging.ConsoleHandler

          +close, publish
        • +
        +
          +
        • + + +

          Methods inherited from class java.util.logging.StreamHandler

          +flush, isLoggable, setEncoding, setOutputStream
        • +
        +
          +
        • + + +

          Methods inherited from class java.util.logging.Handler

          +getEncoding, getErrorManager, getFilter, getFormatter, getLevel, reportError, setErrorManager, setFilter, setFormatter, setLevel
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ConsoleHandler

          +
          public ConsoleHandler(java.util.logging.Formatter formatter,
          +                      com.sleepycat.je.dbi.EnvironmentImpl envImpl)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbBackup.html b/docs/java/com/sleepycat/je/util/DbBackup.html new file mode 100644 index 0000000..5f83c93 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbBackup.html @@ -0,0 +1,868 @@ + + + + + +DbBackup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbBackup

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbBackup
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbBackup
      +extends java.lang.Object
      +
      DbBackup is a helper class for stopping and restarting JE background + activity in an open environment in order to simplify backup operations. It + also lets the application create a backup which can support restoring the + environment to a specific point in time. +

      + Backing up without DbBackup +

      + Because JE has an append only log file architecture, it is always possible + to do a hot backup without the use of DbBackup by copying all log files + (.jdb files) to your archival location. As long as the log files are copied + in alphabetical order, (numerical in effect) and all log files are + copied, the environment can be successfully backed up without any need to + stop database operations or background activity. This means that your + backup operation must do a loop to check for the creation of new log files + before deciding that the backup is finished. For example: +

      + time    files in                    activity
      +         environment
      +
      +  t0     000000001.jdb     Backup starts copying file 1
      +         000000003.jdb
      +         000000004.jdb
      +
      +  t1     000000001.jdb     JE log cleaner migrates portion of file 3 to newly
      +         000000004.jdb     created file 5 and deletes file 3. Backup finishes
      +         000000005.jdb     file 1, starts copying file 4. Backup MUST include
      +                           file 5 for a consistent backup!
      +
      +  t2     000000001.jdb     Backup finishes copying file 4, starts and
      +         000000004.jdb     finishes file 5, has caught up. Backup ends.
      +         000000005.jdb
      +
      +

      + In the example above, the backup operation must be sure to copy file 5, + which came into existence after the backup had started. If the backup + stopped operations at file 4, the backup set would include only file 1 and + 4, omitting file 3, which would be an inconsistent set. +

      + Also note that log file 5 may not have filled up before it was copied to + archival storage. On the next backup, there might be a newer, larger version + of file 5, and that newer version should replace the older file 5 in archive + storage. +

      + Using the approach above, as opposed to using DbBackup, will copy all files + including reserved files as well + as active files. A large number of + reserved files may be present in an HA Environment, and they are essentially + wasted space in a backup. Using DbBackup is strongly recommended for this + reason, as well as to reduce the complexity of file copying. +

      + Backing up with DbBackup +

      + DbBackup helps simplify application backup by defining the set of active files that must be copied for each + backup operation. If the environment directory has read/write protection, + the application must pass DbBackup an open, read/write environment handle. +

      + When entering backup mode, JE determines the set of active files needed for + a consistent backup, and freezes all changes to those files. The application + can copy that defined set of files and finish operation without checking for + the ongoing creation of new files. Also, there will be no need to check for + a newer version of the last file on the next backup. +

      + In the example above, if DbBackup was used at t0, the application would only + have to copy files 1, 3 and 4 to back up. On a subsequent backup, the + application could start its copying at file 5. There would be no need to + check for a newer version of file 4. +

      + When it is important to minimize the time that it takes to recover using a + backup, a checkpoint should be performed immediately before calling startBackup(). This will reduce recovery time when opening the environment + with the restored log files. A checkpoint is performed explicitly by + calling Environment.checkpoint(com.sleepycat.je.CheckpointConfig) using a config object for which + setForce(true) has been called. +

      + Performing simple/full backups +

      + The following examples shows how to perform a full backup. A checkpoint is + performed to minimize recovery time. +

      + void myBackup(Environment env, File destDir) {
      +     DbBackup backupHelper = new DbBackup(env);
      +
      +     // Optional: Do a checkpoint to reduce recovery time after a restore.
      +     env.checkpoint(new CheckpointConfig().setForce(true));
      +
      +     // Start backup, find out what needs to be copied.
      +     backupHelper.startBackup();
      +     try {
      +         // Copy the necessary files to archival storage.
      +         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
      +         myCopyFiles(env, backupHelper, filesToCopy, destDir);
      +     } finally {
      +         // Remember to exit backup mode, or the JE cleaner cannot delete
      +         // log files and disk usage will grow without bounds.
      +        backupHelper.endBackup();
      +     }
      + }
      +
      + void myCopyFiles(
      +     Environment env,
      +     DbBackup backupHelper,
      +     String[] filesToCopy,
      +     File destDir) {
      +
      +     for (String fileName : filesToCopy) {
      +         // Copy fileName to destDir.
      +         // See LogVerificationReadableByteChannel and
      +         // LogVerificationInputStream.
      +         ....
      +
      +         // Remove protection to allow file to be deleted in order to reclaim
      +         // disk space.
      +         backupHelper.removeFileProtection(fileName);
      +     }
      + }
      + 
      + When copying files to the backup directory, it is critical that each file is + verified before or during the copy. If a file is copied that is corrupt + (due to an earlier disk failure that went unnoticed, for example), the + backup will be invalid and provide a false sense of security. +

      + The example here shows how to implement + the myCopyFiles method using LogVerificationInputStream. A LogVerificationReadableByteChannel + could also be used for higher performance copying. A filter input stream is + used to verify the file efficiently as it is being read. If you choose to + use a script for copying files, the DbVerifyLog command line tool + can be used instead. +

      + Assuming that the full backup copied files into an empty directory, to + restore you can simply copy these files back into another empty directory. +

      + Always start with an empty directory as the destination for a full backup or + a restore, to ensure that no unused files are present. Unused files -- + perhaps the residual of an earlier environment or an earlier backup -- will + take up space, and they will never be deleted by the JE log cleaner. Also + note that such files will not be used by JE for calculating utilization and + will not appear in the DbSpace output. +

      + Performing incremental backups +

      + Incremental backups are used to reduce the number of files copied during + each backup. Compared to a full backup, there are two additional pieces of + information needed for an incremental backup: the number of the last file in + the previous backup, and a list of the active files in the environment + directory at the time of the current backup, i.e., the current snapshot. + Their purpose is explained below. +

      + The number of the last file in the previous backup is used to avoid copying + files that are already present in the backup set. This file number must be + obtained before beginning the backup, either by checking the backup archive, + or getting this value from a stored location. For example, the last file + number could be written to a special file in the backup set at the time of a + backup, and then read from the special file before starting the next backup. +

      + The list of files in the current snapshot, which should be obtained by + calling getLogFilesInSnapshot() (after calling startBackup()), + is used to avoid unused files after a restore, and may also be used to + reduce the size of the backup set. How to use this list is described below. +

      + Some applications need the ability to restore to the point in time of any of + the incremental backups that were made in the past, and other applications + only need to restore to the point in time of the most recent backup. + Accordingly, the list of current files (that is made at the time of the + backup), should be used in one of two ways. +

        +
      1. If you only need to restore to the point in time of the most recent + backup, then the list should be used to delete unused files from the + backup set. After copying all files during the backup, any file that is + not present in the list may then be deleted from the backup set. + This both reduces the size of the backup set, and ensures that unused + files will not be present in the backup set and therefore will not be + restored.
      2. +
      3. If you need to keep all log files from each backup so you can restore + to more than one point in time, then the list for each backup should be + saved with the backup file set so it can be used during a restore. During + the restore, only the files in the list should be copied, starting with an + empty destination directory. This ensures that unused files will not be + restored.
      4. +
      +

      + The following two examples shows how to perform an incremental backup. In + the first example, the list of current files is used to delete files from + the backup set that are no longer needed. +

      + void myBackup(Environment env, File destDir) {
      +
      +     // Get the file number of the last file in the previous backup.
      +     long lastFileInPrevBackup =  ...
      +
      +     DbBackup backupHelper = new DbBackup(env, lastFileInPrevBackup);
      +
      +     // Optional: Do a checkpoint to reduce recovery time after a restore.
      +     env.checkpoint(new CheckpointConfig().setForce(true));
      +
      +     // Start backup, find out what needs to be copied.
      +     backupHelper.startBackup();
      +     try {
      +         // Copy the necessary files to archival storage.
      +         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
      +         myCopyFiles(env, backupHelper, filesToCopy, destDir);
      +
      +         // Delete files that are no longer needed.
      +         // WARNING: This should only be done after copying all new files.
      +         String[] filesInSnapshot = backupHelper.getLogFilesInSnapshot();
      +         myDeleteUnusedFiles(destDir, filesInSnapshot);
      +
      +         // Update knowledge of last file saved in the backup set.
      +         lastFileInPrevBackup = backupHelper.getLastFileInBackupSet();
      +         // Save lastFileInPrevBackup persistently here ...
      +     } finally {
      +         // Remember to exit backup mode, or the JE cleaner cannot delete
      +         // log files and disk usage will grow without bounds.
      +        backupHelper.endBackup();
      +     }
      + }
      +
      + void myDeleteUnusedFiles(File destDir, String[] filesInSnapshot) {
      +     // For each file in destDir that is NOT in filesInSnapshot, it should
      +     // be deleted from destDir to save disk space in the backup set, and to
      +     // ensure that unused files will not be restored.
      + }
      +
      + See myCopyFiles further above.
      + 
      +

      + When performing backups as shown in the first example above, to restore you + can simply copy all files from the backup set into an empty directory. +

      + In the second example below, the list of current files is saved with the + backup set so it can be used during a restore. The backup set will + effectively hold multiple backups that can be used to restore to different + points in time. +

      + void myBackup(Environment env, File destDir) {
      +
      +     // Get the file number of the last file in the previous backup.
      +     long lastFileInPrevBackup =  ...
      +
      +     DbBackup backupHelper = new DbBackup(env, lastFileInPrevBackup);
      +
      +     // Optional: Do a checkpoint to reduce recovery time after a restore.
      +     env.checkpoint(new CheckpointConfig().setForce(true));
      +
      +     // Start backup, find out what needs to be copied.
      +     backupHelper.startBackup();
      +     try {
      +         // Copy the necessary files to archival storage.
      +         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
      +         myCopyFiles(env, backupHelper, filesToCopy, destDir);
      +
      +         // Save current list of files with backup data set.
      +         String[] filesInSnapshot = backupHelper.getLogFilesInSnapshot();
      +         // Save filesInSnapshot persistently here ...
      +
      +         // Update knowledge of last file saved in the backup set.
      +         lastFileInPrevBackup = backupHelper.getLastFileInBackupSet();
      +         // Save lastFileInPrevBackup persistently here ...
      +     } finally {
      +         // Remember to exit backup mode, or the JE cleaner cannot delete
      +         // log files and disk usage will grow without bounds.
      +        backupHelper.endBackup();
      +     }
      + }
      +
      + See myCopyFiles further above.
      + 
      +

      + When performing backups as shown in the second example above, to restore you + must choose one of the file lists that was saved. You may choose the list + written by the most recent backup, or a list written by an earlier backup. + To restore, the files in the list should be copied into an empty destination + directory. +

      + Restoring from a backup +

      + As described in the sections above, the restore procedure is to copy the + files from a backup set into an empty directory. Depending on the type of + backup that was performed (see above), either all files from the backup set + are copied, or only the files on a list that was created during the backup. +

      + There is one additional consideration when performing a restore, under the + following condition: +

        +
      • Incremental backups are used, AND +
          +
        • the backup was created using DbBackup with JE 6.2 or earlier, + OR
        • +
        • the backup was created in a read-only JE environment.
        • +
        +
      • +
      +

      + If the above condition holds, after copying the files an additional step is + needed. To enable the creation of future incremental backups using the + restored files, the EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE parameter + should be set to true when opening the JE Environment for the first time + after the restore. When this parameter is set to true, the last .jdb file + restored will not be modified when opening the Environment, and the next + .jdb file will be created and will become the end-of-log file. +

      + WARNING: When the above special condition is true and this property is + not set to true when opening the environment for the first time + after a restore, then the backup set that was restored may not be used as + the basis for future incremental backups. If a future incremental backup + were performed based on this backup set, it would be incomplete and data + would be lost if that incremental backup were restored. +

      + When JE 6.3 or later is used to create the backup, and the backup is created + in a read-write environment (the usual case), this extra step is + unnecessary. In this case, startBackup() will have added an + "immutable file" marker to the last file in the backup and this will prevent + that file from being modified, just as if the + ENV_RECOVERY_FORCE_NEW_FILE parameter were set to true.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        DbBackup(Environment env) +
        Creates a DbBackup helper for a full backup.
        +
        DbBackup(Environment env, + long lastFileInPrevBackup) +
        Creates a DbBackup helper for an incremental backup.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        voidendBackup() +
        End backup mode, thereby re-enabling normal deletion of log files by the + JE log cleaner.
        +
        longgetLastFileInBackupSet() +
        Can only be called in backup mode, after startBackup() has been called.
        +
        java.lang.String[]getLogFilesInBackupSet() +
        Get the minimum list of files that must be copied for this backup.
        +
        java.lang.String[]getLogFilesInBackupSet(long lastFileInPrevBackup) +
        Deprecated.  +
        replaced by getLogFilesInBackupSet(); pass + lastFileInPrevBackup to the DbBackup(Environment,long) + constructor.
        +
        +
        java.lang.String[]getLogFilesInSnapshot() +
        Get the list of all active files that are needed for the environment at + the point of time when backup mode started, i.e., the current snapshot.
        +
        voidremoveFileProtection(java.lang.String fileName) +
        Removes protection for a file in the backup set.
        +
        voidstartBackup() +
        Start backup mode in order to determine the definitive backup set needed + at this point in time.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbBackup

          +
          public DbBackup(Environment env)
          +         throws DatabaseException
          +
          Creates a DbBackup helper for a full backup. + +

          This is equivalent to using DbBackup(Environment,long) and + passing -1 for the lastFileInPrevBackup parameter.

          +
          +
          Parameters:
          +
          env - with an open, valid environment handle. If the environment + directory has read/write permissions, the environment handle must be + configured for read/write.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the environment directory has + read/write permissions, but the environment handle is not configured for + read/write.
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          DbBackup

          +
          public DbBackup(Environment env,
          +                long lastFileInPrevBackup)
          +
          Creates a DbBackup helper for an incremental backup.
          +
          +
          Parameters:
          +
          env - with an open, valid environment handle. If the environment + directory has read/write permissions, the environment handle must be + configured for read/write.
          +
          lastFileInPrevBackup - the last file in the previous backup set + when performing an incremental backup, or -1 to perform a full + backup. The first file in this backup set will be the file following + lastFileInPrevBackup.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalArgumentException - if the environment directory has + read/write permissions, but the environment handle is not configured for + read/write.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          startBackup

          +
          public void startBackup()
          +                 throws DatabaseException
          +
          Start backup mode in order to determine the definitive backup set needed + at this point in time. + +

          This method determines the last file in the backup set, which is the + last log file in the environment at this point in time. Following this + method call, all new data will be written to other, new log files. In + other words, the last file in the backup set will not be modified after + this method returns.

          + +

          WARNING: After calling this method, deletion of log files in + the backup set by the JE log cleaner will be disabled until endBackup() is called. To prevent unbounded growth of disk usage, be + sure to call endBackup() to re-enable log file deletion. + Additionally, the Environment can't be closed until endBackup() is + called. +

          +
          +
          Throws:
          +
          LogOverwriteException - if a replication + operation is overwriting log files. The backup can not proceed because + files may be invalid. The backup may be attempted at a later time.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if a backup is already in progress
          +
          DatabaseException
          +
          +
        • +
        + + + +
          +
        • +

          endBackup

          +
          public void endBackup()
          +
          End backup mode, thereby re-enabling normal deletion of log files by the + JE log cleaner.
          +
          +
          Throws:
          +
          LogOverwriteException - if a replication + operation has overwritten log files. Any copied files should be + considered invalid and discarded. The backup may be attempted at a + later time.
          +
          EnvironmentFailureException - if an unexpected, + internal or environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if a backup has not been started.
          +
          +
        • +
        + + + +
          +
        • +

          getLastFileInBackupSet

          +
          public long getLastFileInBackupSet()
          +
          Can only be called in backup mode, after startBackup() has been called.
          +
          +
          Returns:
          +
          the file number of the last file in the current backup set. + Save this value to reduce the number of files that must be copied at + the next backup session.
          +
          Throws:
          +
          java.lang.IllegalStateException - if a backup has not been started.
          +
          +
        • +
        + + + +
          +
        • +

          getLogFilesInBackupSet

          +
          public java.lang.String[] getLogFilesInBackupSet()
          +
          Get the minimum list of files that must be copied for this backup. When + performing an incremental backup, this consists of the set of active + files that are greater than the last file copied in the previous backup + session. When performing a full backup, this consists of the set of all + active files. Can only be called in backup mode, after startBackup() has + been called. + +

          The file numbers returned are in the range from the constructor + parameter lastFileInPrevBackup + 1 to the last log file at the + time that startBackup() was called.

          +
          +
          Returns:
          +
          the names of all files to be copied, sorted in alphabetical + order. The return values are generally simple file names, not full + paths. However, if multiple data directories are being used (i.e. the + + je.log.nDataDirectories parameter is non-0), then the file names are + prepended with the associated "dataNNN/" prefix, where "dataNNN/" is + the data directory name within the environment home directory and "/" + is the relevant file separator for the platform.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if a backup has not been started.
          +
          +
        • +
        + + + +
          +
        • +

          getLogFilesInBackupSet

          +
          @Deprecated
          +public java.lang.String[] getLogFilesInBackupSet(long lastFileInPrevBackup)
          +
          Deprecated. replaced by getLogFilesInBackupSet(); pass + lastFileInPrevBackup to the DbBackup(Environment,long) + constructor.
          +
          Get the minimum list of files that must be copied for this backup. This + consists of the set of active files that are greater than the last file + copied in the previous backup session. Can only be called in backup + mode, after startBackup() has been called.
          +
          +
          Parameters:
          +
          lastFileInPrevBackup - file number of last file copied in the last + backup session, obtained from getLastFileInBackupSet().
          +
          Returns:
          +
          the names of all the files to be copied that come after + lastFileInPrevBackup.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if a backup has not been started.
          +
          +
        • +
        + + + +
          +
        • +

          getLogFilesInSnapshot

          +
          public java.lang.String[] getLogFilesInSnapshot()
          +
          Get the list of all active files that are needed for the environment at + the point of time when backup mode started, i.e., the current snapshot. + Can only be called in backup mode, after startBackup() has been called. + +

          When performing an incremental backup, this method is called to + determine the files that would needed for a restore. As described in + the examples at the top of this class, this list can be used to avoid + unused files after a restore, and may also be used to reduce the size of + the backup set.

          + +

          When performing a full backup this method is normally not needed, + since in that case it returns the same set of files that is returned by + getLogFilesInBackupSet().

          +
          +
          Returns:
          +
          the names of all files in the snapshot, sorted in alphabetical + order. The return values are generally simple file names, not full + paths. However, if multiple data directories are being used (i.e. the + + je.log.nDataDirectories parameter is non-0), then the file names are + prepended with the associated "dataNNN/" prefix, where "dataNNN/" is + the data directory name within the environment home directory and "/" + is the relevant file separator for the platform.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.IllegalStateException - if a backup has not been started.
          +
          +
        • +
        + + + +
          +
        • +

          removeFileProtection

          +
          public void removeFileProtection(java.lang.String fileName)
          +
          Removes protection for a file in the backup set. This method should be + called after copying a file, so that it may be deleted to avoid + exceeding disk usage limits.
          +
          +
          Parameters:
          +
          fileName - a file name that has already been copied, in the format + returned by getLogFilesInBackupSet() .
          +
          Since:
          +
          7.5
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbCacheSize.html b/docs/java/com/sleepycat/je/util/DbCacheSize.html new file mode 100644 index 0000000..6b62375 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbCacheSize.html @@ -0,0 +1,674 @@ + + + + + +DbCacheSize (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbCacheSize

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbCacheSize
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbCacheSize
      +extends java.lang.Object
      +
      Estimates the in-memory cache size needed to hold a specified data set. + + To get an estimate of the in-memory footprint for a given database, + specify the number of records and database characteristics and DbCacheSize + will return an estimate of the cache size required for holding the + database in memory. Based on this information a JE main cache size can be + chosen and then configured using EnvironmentMutableConfig.setCacheSize(long) or + using the EnvironmentConfig.MAX_MEMORY property. An off-heap cache + may also be optionally configured using EnvironmentMutableConfig.setOffHeapCacheSize(long) or using the EnvironmentConfig.MAX_OFF_HEAP_MEMORY property. + +

      Importance of the JE Cache

      + + The JE cache is not an optional cache. It is used to hold the metadata for + accessing JE data. In fact the JE cache size is probably the most critical + factor to JE performance, since Btree nodes will have to be fetched during a + database read or write operation if they are not in cache. During a single + read or write operation, at each level of the Btree that a fetch is + necessary, an IO may be necessary at a different disk location for each + fetch. In addition, if internal nodes (INs) are not in cache, then write + operations will cause additional copies of the INs to be written to storage, + as modified INs are moved out of the cache to make room for other parts of + the Btree during subsequent operations. This additional fetching and + writing means that sizing the cache too small to hold the INs will result in + lower operation performance. +

      + For best performance, all Btree nodes should fit in the JE cache, including + leaf nodes (LNs), which hold the record data, and INs, which hold record + keys and other metadata. However, because system memory is limited, it is + sometimes necessary to size the cache to hold all or at least most INs, but + not the LNs. This utility estimates the size necessary to hold only INs, + and the size to hold INs and LNs. +

      + In addition, a common problem with large caches is that Java GC overhead + can become significant. When a Btree node is evicted from the JE main + cache based on JE's LRU algorithm, typically the node will have been + resident in the JVM heap for an extended period of time, and will be + expensive to GC. Therefore, when most or all LNs do not fit in + the main cache, using CacheMode.EVICT_LN can be beneficial to + reduce the Java GC cost of collecting the LNs as they are moved out of the + main cache. With EVICT_LN, the LNs only reside in the JVM heap for a short + period and are cheap to collect. A recommended approach is to size the JE + main cache to hold only INs, and size the Java heap to hold that amount plus + the amount needed for GC working space and application objects, leaving + any additional memory for use by the file system cache or the off-heap + cache. Tests show this approach results in lower GC overhead and more + predictable latency. +

      + Another issue is that 64-bit JVMs store object references using less space + when the heap size is slightly less than 32GiB. When the heap size is 32GiB + or more, object references are larger and less data can be cached per GiB of + memory. This JVM feature is enabled with the + Compressed Oops + (-XX:+UseCompressedOops) option, although in modern JVMs it is + on by default. Because of this factor, and because Java GC overhead is + usually higher with larger heaps, a maximum heap size slightly less than + 32GiB is recommended, along with Compressed Oops option. +

      + Of course, the JE main cache size must be less than the heap size since the + main cache is stored in the heap. In fact, around 30% of free space should + normally be reserved in the heap for use by Java GC, to avoid high GC + overheads. For example, if the application uses roughly 2GiB of the heap, + then with a 32GiB heap the JE main cache should normally be no more than + 20GiB. +

      + As of JE 6.4, an optional off-heap cache may be configured in addition to + the main JE cache. See EnvironmentMutableConfig.setOffHeapCacheSize(long) for + information about the trade-offs in using an off-heap cache. When the + -offheap argument is specified, this utility displays sizing + information for both the main and off-heap caches. The portion of the data + set that fits in the main cache, and the off-heap size needed to hold the + rest of the data set, will be shown. The main cache size can be specified + with the -maincache argument, or is implied to be the amount needed + to hold all internal nodes if this argument is omitted. Omitting this + argument is appropriate when CacheMode.EVICT_LN is used, since only + internal nodes will be stored in the main cache. +

      + To reduce Java GC overhead, sometimes a small main cache is used along + with an off-heap cache. Note that it is important that the size the main + cache is at least large enough to hold all the upper INs (the INs at level + 2 and above). This is because the off-heap cache does not contain upper + INs, it only contains LNs and bottom internal nodes (BINs). When a level 2 + IN is evicted from the main cache, its children (BINs and LNs) in the + off-heap cache, if any, must also be evicted, which can be undesirable, + especially if the off-heap cache is not full. This utility displays the + main cache size needed to hold all upper INs, and displays a warning if + this is smaller than the main cache size specified. + +

      Estimating the JE Cache Size

      + + Estimating JE in-memory sizes is not straightforward for several reasons. + There is some fixed overhead for each Btree internal node, so fanout + (maximum number of child entries per parent node) and degree of node + sparseness impacts memory consumption. In addition, JE uses various compact + in-memory representations that depend on key sizes, data sizes, key + prefixing, how many child nodes are resident, etc. The physical proximity + of node children also allows compaction of child physical address values. +

      + Therefore, when running this utility it is important to specify all EnvironmentConfig and DatabaseConfig settings that will be used in + a production system. The EnvironmentConfig settings are specified + by command line options for each property, using the same names as the + EnvironmentConfig parameter name values. For example, EnvironmentConfig.LOG_FILE_MAX, which influences the amount of memory used + to store physical record addresses, can be specified on the command line as: +

      + -je.log.fileMax LENGTH +

      + To be sure that this utility takes into account all relevant settings, + especially as the utility is enhanced in future versions, it is best to + specify all EnvironmentConfig settings used by the application. +

      + The DatabaseConfig settings are specified using command line options + defined by this utility. +

      +

      + This utility estimates the JE cache size by creating an in-memory + Environment and Database. In addition to the size of the Database, the + minimum overhead for the Environment is output. The Environment overhead + shown is likely to be smaller than actually needed because it doesn't take + into account use of memory by JE daemon threads (cleaner, checkpointer, etc) + the memory used for locks that are held by application operations and + transactions, the memory for HA network connections, etc. An additional + amount should be added to account for these factors. +

      + This utility estimates the cache size for a single JE Database, or a logical + table spread across multiple databases (as in the case of Oracle NoSQL DB, + for example). To estimate the size for multiple databases/tables with + different configuration parameters or different key and data sizes, run + this utility for each database/table and sum the sizes. If you are summing + multiple runs for multiple databases/tables that are opened in a single + Environment, the overhead size for the Environment should only be added once. +

      + In some applications with databases/tables having variable key and data + sizes, it may be difficult to determine the key and data size input + parameters for this utility. If a representative data set can be created, + one approach is to use the DbPrintLog utility with the -S + option to find the average key and data size for all databases/tables, and + use these values as input parameters, as if there were only a single + database/tables. With this approach, it is important that the DatabaseConfig parameters are the same, or at least similar, for all + databases/tables. + +

      Key Prefixing and Compaction

      + + Key prefixing deserves special consideration. It can significantly reduce + the size of the cache and is generally recommended; however, the benefit can + be difficult to predict. Key prefixing, in turn, impacts the benefits of + key compaction, and the use of the EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH parameter. +

      + For a given data set, the impact of key prefixing is determined by how many + leading bytes are in common for the keys in a single bottom internal node + (BIN). For example, if keys are assigned sequentially as long (8 byte) + integers, and the maximum entries + per node is 128 (the default value) then 6 or 7 of the 8 bytes of the key + will have a common prefix in each BIN. Of course, when records are deleted, + the number of prefixed bytes may be reduced because the range of key values + in a BIN will be larger. For this example we will assume that, on average, + 5 bytes in each BIN are a common prefix leaving 3 bytes per key that are + unprefixed. +

      + Key compaction is applied when the number of unprefixed bytes is less than a + configured value; see EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH. + In the example, the 3 unprefixed bytes per key is less than the default used + for key compaction (16 bytes). This means that each key will use 16 bytes + of memory, in addition to the amount used for the prefix for each BIN. The + per-key overhead could be reduced by changing the TREE_COMPACT_MAX_KEY_LENGTH parameter to a smaller value, but care should + be taken to ensure the compaction will be effective as keys are inserted and + deleted over time. +

      + Because key prefixing depends so much on the application key format and the + way keys are assigned, the number of expected prefix bytes must be estimated + by the user and specified to DbCacheSize using the -keyprefix + argument. + +

      Key Prefixing and Duplicates

      + + When duplicates are configured + for a Database (including DPL MANY_TO_ONE and MANY_TO_MANY secondary + indices), key prefixing is always used. This is because the internal key in + a duplicates database BIN is formed by concatenating the user-specified key + and data. In secondary databases with duplicates configured, the data is + the primary key, so the internal key is the concatenation of the secondary + key and the primary key. +

      + Key prefixing is always used for duplicates databases because prefixing is + necessary to store keys efficiently. When the number of duplicates per + unique user-specified key is more than the number of entries per BIN, the + entire user-specified key will be the common prefix. +

      + For example, a database that stores user information may use email address + as the primary key and zip code as a secondary key. The secondary index + database will be a duplicates database, and the internal key stored in the + BINs will be a two part key containing zip code followed by email address. + If on average there are more users per zip code than the number of entries + in a BIN, then the key prefix will normally be at least as long as the zip + code key. If there are less (more than one zip code appears in each BIN), + then the prefix will be shorter than the zip code key. +

      + It is also possible for the key prefix to be larger than the secondary key. + If for one secondary key value (one zip code) there are a large number of + primary keys (email addresses), then a single BIN may contain concatenated + keys that all have the same secondary key (same zip code) and have primary + keys (email addresses) that all have some number of prefix bytes in common. + Therefore, when duplicates are specified it is possible to specify a prefix + size that is larger than the key size. + +

      Small Data Sizes and Embedded LNs

      + + Another special data representation involves small data sizes. When the + data size of a record is less than or equal to EnvironmentConfig.TREE_MAX_EMBEDDED_LN (16 bytes, by default), the data + is stored (embedded) in the BIN, and the LN is not stored in cache at all. + This increases the size needed to hold all INs in cache, but it decreases + the size needed to hold the complete data set. If the data size specified + when running this utility is less than or equal to TREE_MAX_EMBEDDED_LN, + the size displayed for holding INs only will be the same as the size + displayed for holdings INs and LNs. +

      + See EnvironmentConfig.TREE_MAX_EMBEDDED_LN for information about + the trade-offs in using the embedded LNs feature. + +

      Record Versions and Oracle NoSQL Database

      + + This note applies only to when JE is used with Oracle NoSQL DB. In Oracle + NoSQL DB, an internal JE environment configuration parameter is always + used: -je.rep.preserveRecordVersion true. This allows using record + versions in operations such as "put if version", "delete if version", etc. + This feature performs best when the cache is sized large enough to hold the + record versions. +

      + When using JE with Oracle NoSQL DB, always add -je.rep.preserveRecordVersion true to the command line. This ensures that + the cache sizes calculated are correct, and also outputs an additional line + showing how much memory is required to hold the internal nodes and record + versions (but not the leaf nodes). This is the minimum recommended size + when the "... if version" operations are used. + +

      Running the DbCacheSize utility

      + + Usage: +
      + java { com.sleepycat.je.util.DbCacheSize |
      +        -jar je-<version>.jar DbCacheSize }
      +  -records COUNT
      +      # Total records (key/data pairs); required
      +  -key BYTES
      +      # Average key bytes per record; required
      +  [-data BYTES]
      +      # Average data bytes per record; if omitted no leaf
      +      # node sizes are included in the output; required with
      +      # -duplicates, and specifies the primary key length
      +  [-offheap]
      +      # Indicates that an off-heap cache will be used.
      +  [-maincache BYTES]
      +      # The size of the main cache (in the JVM heap).
      +      # The size of the off-heap cache displayed is the
      +      # additional amount needed to hold the data set.
      +      # If omitted, the main cache size is implied to
      +      # be the amount needed to hold all internal nodes.
      +      # Ignored if -offheap is not also specified.
      +  [-keyprefix BYTES]
      +      # Expected size of the prefix for the keys in each
      +      # BIN; default: key prefixing is not configured;
      +      # required with -duplicates
      +  [-nodemax ENTRIES]
      +      # Number of entries per Btree node; default: 128
      +  [-orderedinsertion]
      +      # Assume ordered insertions and no deletions, so BINs
      +      # are 100% full; default: unordered insertions and/or
      +      # deletions, BINs are 70% full
      +  [-duplicates]
      +      # Indicates that sorted duplicates are used, including
      +      # MANY_TO_ONE and MANY_TO_MANY secondary indices;
      +      # default: false
      +  [-ttl]
      +      # Indicates that TTL is used; default: false
      +  [-replicated]
      +      # Use a ReplicatedEnvironment; default: false
      +  [-ENV_PARAM_NAME VALUE]...
      +      # Any number of EnvironmentConfig parameters and
      +      # ReplicationConfig parameters (if -replicated)
      +  [-btreeinfo]
      +      # Outputs additional Btree information
      +  [-outputproperties]
      +      # Writes Java properties file to System.out
      + 
      +

      + You should run DbCacheSize on the same target platform and JVM for which you + are sizing the cache, as cache sizes will vary. You may also need to + specify -d32 or -d64 depending on your target, if the default JVM mode is + not the same as the mode to be used in production. +

      + To take full advantage of JE cache memory, it is strongly recommended that + compressed oops + (-XX:+UseCompressedOops) is specified when a 64-bit JVM is used + and the maximum heap size is less than 32 GB. As described in the + referenced documentation, compressed oops is sometimes the default JVM mode + even when it is not explicitly specified in the Java command. However, if + compressed oops is desired then it must be explicitly specified in + the Java command when running DbCacheSize or a JE application. If it is not + explicitly specified then JE will not aware of it, even if it is the JVM + default setting, and will not take it into account when calculating cache + memory sizes. +

      + For example: +

      + $ java -jar je-X.Y.Z.jar DbCacheSize -records 554719 -key 16 -data 100
      +
      +  === Environment Cache Overhead ===
      +
      +  3,157,213 minimum bytes
      +
      + To account for JE daemon operation, record locks, HA network connections, etc,
      + a larger amount is needed in practice.
      +
      +  === Database Cache Size ===
      +
      +  Number of Bytes  Description
      +  ---------------  -----------
      +       23,933,736  Internal nodes only
      +      107,206,616  Internal nodes and leaf nodes
      + 
      +

      + This indicates that the minimum memory size to hold only the internal nodes + of the Database Btree is approximately 24MB. The maximum size to hold the + entire database, both internal nodes and data records, is approximately + 107MB. To either of these amounts, at least 3MB (plus more for locks and + daemons) should be added to account for the environment overhead. +

      + The following example adds the use of an off-heap cache, where the main + cache size is specified to be 30MB. +

      + $ java -jar je-X.Y.Z.jar DbCacheSize -records 554719 -key 16 -data 100 \
      +      -offheap -maincache 30000000
      +
      +  === Environment Cache Overhead ===
      +
      +  5,205,309 minimum bytes
      +
      + To account for JE daemon operation, record locks, HA network connections, etc,
      + a larger amount is needed in practice.
      +
      +  === Database Cache Size ===
      +
      +  Number of Bytes  Description
      +  ---------------  -----------
      +       23,933,736  Internal nodes only: MAIN cache
      +                0  Internal nodes only: OFF-HEAP cache
      +       24,794,691  Internal nodes and leaf nodes: MAIN cache
      +       70,463,604  Internal nodes and leaf nodes: OFF-HEAP cache
      + 
      + There are several things of interest in the output. +
        +
      • The environment overhead is larger because of memory used for the + off-heap LRU.
      • +
      • To cache only internal nodes, an off-heap cache is not needed since + the internal nodes take around 24MB, which when added to the 5MB + overhead is less than the 30MB main cache specified. This is why the + number of bytes on the second line is zero.
      • +
      • To cache all nodes, the main cache size specified should be used + (25MB added to the 5MB overhead is 30MB), and an off-heap cache of + around 71MB should be configured.
      • +
      + +

      Output Properties

      + +

      + When -outputproperties is specified, a list of properties in Java + properties file format will be written to System.out, instead of the output + shown above. The properties and their meanings are listed below. +

        +
      • The following properties are always output (except allNodes, see + below). They describe the estimated size of the main cache. +
          +
        • overhead: The environment overhead, as shown + under Environment Cache Overhead above.
        • +
        • internalNodes: The Btree size in the main + cache for holding the internal nodes. This is the "Internal nodes + only" line above (followed by "MAIN cache" when -offheap is + specified).
        • +
        • internalNodesAndVersions: The Btree size needed + to hold the internal nodes and record versions in the main cache. + This value is zero when -offheap is specified; currently JE + does not cache record versions off-heap unless their associated LNs + are also cached off-heap, so there is no way to calculate this + property.
        • +
        • allNodes: The Btree size in the main cache + needed to hold all nodes. This is the "Internal nodes and leaf + nodes" line above (followed by "MAIN cache" when -offheap is + specified). This property is not output unless -data is + specified.
        • +
        +
      • The following properties are output only when -offheap is + specified. They describe the estimated size of the off-heap cache. +
          +
        • minMainCache: The minimum size of the main + cache needed to hold all upper INs. When the -maincache + value specified is less than this minimum, not all internal nodes + can be cached. See the discussion further above.
        • +
        • offHeapInternalNodes: The size of the off-heap + cache needed to hold the internal nodes. This is the "Internal nodes + only: OFF_HEAP cache" line above.
        • +
        • offHeapAllNodes: The size of the off-heap cache + needed to hold all nodes. This is the "Internal nodes and leaf + nodes: OFF_HEAP cache" line above. This property is not output + unless -data is specified.
        • +
        +
      • The following properties are deprecated but are output for + compatibility with earlier releases. +
          +
        • minInternalNodes, maxInternalNodes, minAllNodes, and (when + -data is specified) maxAllNodes
        • +
        +
      +
      +
      See Also:
      +
      EnvironmentMutableConfig.setCacheSize(long), +EnvironmentMutableConfig.setOffHeapCacheSize(long), +CacheMode, +Cache Statistics: + Sizing
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] args) +
        Runs DbCacheSize as a command line utility.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +                 throws java.lang.Throwable
          +
          Runs DbCacheSize as a command line utility. + For command usage, see class description.
          +
          +
          Throws:
          +
          java.lang.Throwable
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbDeleteReservedFiles.html b/docs/java/com/sleepycat/je/util/DbDeleteReservedFiles.html new file mode 100644 index 0000000..4717d8d --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbDeleteReservedFiles.html @@ -0,0 +1,279 @@ + + + + + +DbDeleteReservedFiles (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbDeleteReservedFiles

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbDeleteReservedFiles
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbDeleteReservedFiles
      +extends java.lang.Object
      +
      Command line utility used to delete reserved files explicitly, when + attempting to recover from a disk-full condition. + +

      When using HA (ReplicatedEnvironment), + cleaned files are reserved + and are not deleted until a disk limit is approached. Normally the + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK limits will + cause the reserved files to be deleted automatically to prevent + filling the disk. However, if these limits are both set to zero, or disk + space is used outside of the JE environment, it is possible for the disk + to become full. Manual recovery from this situation may require deleting + the reserved files without opening the JE Environment using the + application. This situation is not expected, but the DbDeleteReservedFiles utility provides a safeguard.

      + +

      Depending on the arguments given, the utility will either delete or list + the oldest reserved files. The files deleted or listed are those that can + be deleted in order to free the amount specified. Note that size deleted + may be larger than the specified size, because only whole files can be + deleted.

      + +
      + java { com.sleepycat.je.util.DbDeleteReservedFiles |
      +        -jar je-<version>.jar DbDeleteReservedFiles }
      +   -h <dir>            # environment home directory
      +   -s <size in MB>     # desired size to be freed in MB
      +  [-l]                       # list reserved files/sizes, do not delete
      +  [-V]                       # print JE version number
      +
      + +

      When the application uses custom key comparators, be sure to add the + jars or classes to the classpath that contain the application's comparator + classes.

      + +

      This utility opens the JE Environment in read-only mode in order to + determine which files are reserved. To speed up this process, specify + a large Java heap size when running the utility; 32 GB is recommended.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] args) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbDump.html b/docs/java/com/sleepycat/je/util/DbDump.html new file mode 100644 index 0000000..c4a52d5 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbDump.html @@ -0,0 +1,722 @@ + + + + + +DbDump (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbDump

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbDump
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Direct Known Subclasses:
      +
      DbScavenger
      +
      +
      +
      +
      public class DbDump
      +extends java.lang.Object
      +
      Dump the contents of a database. This utility may be used programmatically + or from the command line. + +

      When using this utility as a command line program, and the + application uses custom key comparators, be sure to add the jars or + classes to the classpath that contain the application's comparator + classes.

      + +
      + java { com.sleepycat.je.util.DbDump |
      +        -jar je-<version>.jar DbDump }
      +   -h <dir>           # environment home directory
      +  [-f <fileName>]     # output file, for non -rR dumps
      +  [-l]                # list databases in the environment
      +  [-p]                # output printable characters
      +  [-r]                # salvage mode
      +  [-R]                # aggressive salvage mode
      +  [-d] <directory>    # directory for *.dump files (salvage mode)
      +  [-s <databaseName>] # database to dump
      +  [-v]                # verbose in salvage mode
      +  [-V]                # print JE version number
      +
      + See main(java.lang.String[]) for a full description of the + command line arguments. +

      + To dump a database to a stream from code: +

      +    DbDump dump = new DbDump(env, databaseName, outputStream, boolean);
      +    dump.dump();
      + 
      + +

      + Because a DATA=END marker is used to terminate the dump of + each database, multiple databases can be dumped and loaded using a single + stream. The dump() method leaves the stream positioned after + the last line written and the DbLoad.load() method leaves the stream + positioned after the last line read.

      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        DbDump(Environment env, + java.lang.String dbName, + java.io.PrintStream outputFile, + boolean formatUsingPrintable) +
        Create a DbDump object for a specific environment and database.
        +
        DbDump(Environment env, + java.lang.String dbName, + java.io.PrintStream outputFile, + java.lang.String outputDirectory, + boolean formatUsingPrintable) +
        Deprecated.  +
        Please use the 4-arg ctor without outputDirectory instead.
        +
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voiddump() +
        Perform the dump.
        +
        protected voiddumpOne(java.io.PrintStream o, + byte[] ba, + boolean formatUsingPrintable) 
        static voidmain(java.lang.String[] argv) +
        The main used by the DbDump utility.
        +
        protected voidopenEnv(boolean doRecovery) 
        protected booleanparseArgs(java.lang.String[] argv) 
        protected voidprintHeader(java.io.PrintStream o, + boolean dupSort, + boolean formatUsingPrintable) 
        protected voidprintUsage(java.lang.String msg) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          envHome

          +
          protected java.io.File envHome
          +
        • +
        + + + + + + + +
          +
        • +

          dbName

          +
          protected java.lang.String dbName
          +
        • +
        + + + +
          +
        • +

          formatUsingPrintable

          +
          protected boolean formatUsingPrintable
          +
        • +
        + + + +
          +
        • +

          outputDirectory

          +
          protected java.lang.String outputDirectory
          +
        • +
        + + + +
          +
        • +

          outputFile

          +
          protected java.io.PrintStream outputFile
          +
        • +
        + + + +
          +
        • +

          doScavengerRun

          +
          protected boolean doScavengerRun
          +
        • +
        + + + +
          +
        • +

          doAggressiveScavengerRun

          +
          protected boolean doAggressiveScavengerRun
          +
        • +
        + + + +
          +
        • +

          verbose

          +
          protected boolean verbose
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbDump

          +
          @Deprecated
          +public DbDump(Environment env,
          +                          java.lang.String dbName,
          +                          java.io.PrintStream outputFile,
          +                          java.lang.String outputDirectory,
          +                          boolean formatUsingPrintable)
          +
          Deprecated. Please use the 4-arg ctor without outputDirectory instead.
          +
        • +
        + + + +
          +
        • +

          DbDump

          +
          public DbDump(Environment env,
          +              java.lang.String dbName,
          +              java.io.PrintStream outputFile,
          +              boolean formatUsingPrintable)
          +
          Create a DbDump object for a specific environment and database.
          +
          +
          Parameters:
          +
          env - The Environment containing the database to dump.
          +
          dbName - The name of the database to dump.
          +
          outputFile - The output stream to dump the database to.
          +
          formatUsingPrintable - true if the dump should use printable + characters.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +                 throws java.lang.Exception
          +
          The main used by the DbDump utility.
          +
          +
          Parameters:
          +
          argv - The arguments accepted by the DbDump utility. + +
          + usage: java { com.sleepycat.je.util.DbDump | -jar
          + je-<version>.jar DbDump }
          +             [-f output-file] [-l] [-p] [-V]
          +             [-s database] -h dbEnvHome [-rR] [-v]
          +             [-d directory]
          + 
          + +
          +
          + -f - the file to dump to. If omitted, output is to System.out. + Does not apply when -r or -R is used. +
          + -l - list the databases in the environment. +
          + -p - output printable characters. +
          If characters in either the key or data items are printing + characters (as defined by isprint(3)), use printing characters in file + to represent them. This option permits users to use standard text + editors and tools to modify the contents of databases.
          +
          + -V - display the version of the JE library. +
          + -s database - the database to dump. Does not apply when -r or -R is + used. +
          + -h dbEnvHome - the directory containing the database environment. +
          + -d directory - the output directory for *.dump files. Applies only when + -r or -R is used. +
          + -v - print progress information to stdout for -r or -R mode. +
          + -r - Salvage data from possibly corrupt data files. +
          + The records for all databases are output. The records for each database + are saved into <databaseName>.dump files in the current directory. +

          + This option recreates the Btree structure in memory, so as large a heap + size as possible should be specified. If -r cannot be used due to + insufficient memory, use -R instead. +

          + When used on uncorrupted data files, this option should return + equivalent data to a normal dump, but most likely in a different order; + in other words, it should output a transactionally correct data set. + However, there is one exception where not all committed records will be + output: +

            +
          • When a committed transaction spans more than one .jdb file, and + the last file in this set of files has been deleted by the log + cleaner but earlier files have not, records for that transaction + that appear in the earlier files will not be output. This is because + the Commit entry in the last file is missing, and DbDump believes + that the transaction was not committed. Such missing output should + be relatively rare. Note that records in deleted files will be + output, because they were migrated forward by the log cleaner and + are no longer associated with a transaction.
          • +
          +
          +
          + -R - Aggressively salvage data from a possibly corrupt file. +
          +

          + The records for all databases are output. The records for each database + are saved into <databaseName>.dump files in the current directory. +

          + Unlike -r, the -R option does not recreate the Btree structure in + memory. However, it does use a bit set to track all committed + transactions, so as large a heap size as possible should be specified. +

          + -R also differs from -r in that -R does not return a transactionally + correct data set. This is because the Btree information is not + reconstructed in memory. Therefore, data dumped in this fashion will + almost certainly have to be edited by hand or other means before or + after the data is reloaded. Be aware of the following abnormalities. +

            +
          • Deleted records are often output. An application specific + technique should normally be used to correct for this.
          • +
          • Multiple versions of the same record are sometimes output. When + this happens, the more recent version of a record is output first. + Therefore, the -n option should normally be used when running + DbLoad.
          • +
          • When a committed transaction spans more than one .jdb file, and + the last file in this set of files has been deleted by the log + cleaner but earlier files have not, records for that transaction + that appear in the earlier files will not be output. This is because + the Commit entry in the last file is missing, and DbDump believes + that the transaction was not committed. Such missing output should + be relatively rare. Note that records in deleted files will be + output, because they were migrated forward by the log cleaner and + are no longer associated with a transaction. (This abnormality also + occurs with -r.)
          • +
          +
          +
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          printUsage

          +
          protected void printUsage(java.lang.String msg)
          +
        • +
        + + + +
          +
        • +

          parseArgs

          +
          protected boolean parseArgs(java.lang.String[] argv)
          +                     throws java.io.IOException
          +
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        + + + + + + + + + + + +
          +
        • +

          printHeader

          +
          protected void printHeader(java.io.PrintStream o,
          +                           boolean dupSort,
          +                           boolean formatUsingPrintable)
          +
        • +
        + + + +
          +
        • +

          dumpOne

          +
          protected void dumpOne(java.io.PrintStream o,
          +                       byte[] ba,
          +                       boolean formatUsingPrintable)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbFilterStats.html b/docs/java/com/sleepycat/je/util/DbFilterStats.html new file mode 100644 index 0000000..a2856b8 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbFilterStats.html @@ -0,0 +1,328 @@ + + + + + +DbFilterStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbFilterStats

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbFilterStats
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbFilterStats
      +extends java.lang.Object
      +
      Transform one or more je.stat.csv statistics files and + write the output to stdout. A set of column names is used to + specify the order and which columns are written to the output. + The utility is used to create an output file that is easier to + analyze by projecting and ordering only the data that is required. + Each user specified column name will either be a exact match of a + column in the file or a prefix match. In order to output the "time" + and all "Op" group statistics a column list "time,Op" could be used. + Multiple input files are processed in the order specified on the + command line. Duplicate column headers are suppressed in the output + when processing multiple input files.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbFilterStats() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanexecute(java.lang.String[] argv) +
        Performs the processing of the DbFilterStats command.
        +
        static voidmain(java.lang.String[] argv) +
        The main used by the DbFilterStats utility.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbFilterStats

          +
          public DbFilterStats()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +
          The main used by the DbFilterStats utility.
          +
          +
          Parameters:
          +
          argv - An array of command line arguments to the DbFilterStats + utility. + +
          + usage: java { com.sleepycat.je.util.DbFilterStats | -jar
          + je.jar DbFilterStats }
          +  -f  <projection file>
          +  -p  <column projection list> A comma separated list of column
          +      names to project.
          +  <stat file> [<stat file>]
          + 
          + +

          At least one argument must be specified.

          +
          +
        • +
        + + + +
          +
        • +

          execute

          +
          public boolean execute(java.lang.String[] argv)
          +
          Performs the processing of the DbFilterStats command.
          +
          +
          Parameters:
          +
          argv - DbFilterStats command arguments
          +
          Returns:
          +
          true if command is successful, otherwise false
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbLoad.html b/docs/java/com/sleepycat/je/util/DbLoad.html new file mode 100644 index 0000000..28ed8d1 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbLoad.html @@ -0,0 +1,614 @@ + + + + + +DbLoad (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbLoad

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbLoad
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbLoad
      +extends java.lang.Object
      +
      Loads a database from a dump file generated by DbDump. + This utility may be used programmatically or from the command line. + +

      When using this utility as a command line program, and the + application uses custom key comparators, be sure to add the jars or + classes to the classpath that contain the application's comparator + classes.

      + +
      + java { com.sleepycat.je.util.DbLoad |
      +        -jar je-<version>.jar DbLoad }
      +     -h <dir>            # environment home directory
      +    [-f <fileName>]      # input file
      +    [-n]                 # no overwrite mode
      +    [-T]                 # input file is in text mode
      +    [-I]                 # ignore unknown parameters
      +    [-c name=value]      # config values
      +    [-s <databaseName> ] # database to load
      +    [-v]                 # show progress
      +    [-V]                 # print JE version number
      +
      + See main(java.lang.String[]) for a full description of the + command line arguments. +

      + To load a database to a stream from code: +

      +    DbLoad loader = new DbLoad();
      +    loader.setEnv(env);
      +    loader.setDbName(dbName);
      +    loader.setInputStream(stream);
      +    loader.setNoOverwrite(noOvrwr);
      +    loader.setTextFileMode(tfm);
      +    loader.load();
      + 
      + +

      Because a DATA=END marker is used to terminate the dump of + each database, multiple databases can be dumped and loaded using a single + stream. The DbDump.dump() method leaves the stream positioned after + the last line written and the load() method leaves the stream + positioned after the last line read.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected Environmentenv 
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbLoad() +
        Creates a DbLoad object.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanload() 
        static voidmain(java.lang.String[] argv) +
        The main used by the DbLoad utility.
        +
        voidsetDbName(java.lang.String dbName) +
        Sets the database name to load.
        +
        voidsetEnv(Environment env) +
        Sets the Environment to load from.
        +
        voidsetIgnoreUnknownConfig(boolean ignoreUnknownConfigMode) +
        Sets whether to ignore unknown parameters in the config file.
        +
        voidsetInputReader(java.io.BufferedReader reader) +
        Sets the BufferedReader to load from.
        +
        voidsetNoOverwrite(boolean noOverwrite) +
        Sets whether the load should overwrite existing data or not.
        +
        voidsetProgressInterval(long progressInterval) +
        If progressInterval is set, progress status messages are generated to + stdout at set percentages of the load.
        +
        voidsetTextFileMode(boolean textFileMode) +
        Sets whether the load data is in text file format.
        +
        voidsetTotalLoadBytes(long totalLoadBytes) +
        Used for progress status messages.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbLoad

          +
          public DbLoad()
          +
          Creates a DbLoad object.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +                 throws java.lang.Exception
          +
          The main used by the DbLoad utility.
          +
          +
          Parameters:
          +
          argv - The arguments accepted by the DbLoad utility. + +
          + usage: java { com.sleepycat.je.util.DbLoad | -jar
          + je-<version>.jar DbLoad }
          +             [-f input-file] [-n] [-V] [-v] [-T] [-I]
          +             [-c name=value]
          +             [-s database] -h dbEnvHome
          + 
          + +

          -f - the file to load from (in DbDump format)
          + -n - no overwrite mode. Do not overwrite existing data.
          + -V - display the version of the JE library.
          + -T - input file is in Text mode.
          + -I - ignore unknown parameters in the config file.

          + +

          If -f is not specified, the dump is read from System.in.

          + +

          The -T option allows JE applications to easily load text files into + databases.

          + +

          The -I option allows loading databases that were dumped with the + Berkeley DB C product, when the dump file contains parameters not known + to JE.

          + +

          The input must be paired lines of text, where the first line of the + pair is the key item, and the second line of the pair is its + corresponding data item.

          + +

          A simple escape mechanism, where newline and backslash (\) characters + are special, is applied to the text input. Newline characters are + interpreted as record separators. Backslash characters in the text will + be interpreted in one of two ways: If the backslash character precedes + another backslash character, the pair will be interpreted as a literal + backslash. If the backslash character precedes any other character, the + two characters following the backslash will be interpreted as a + hexadecimal specification of a single character; for example, \0a is a + newline character in the ASCII character set.

          + +

          For this reason, any backslash or newline characters that naturally + occur in the text input must be escaped to avoid misinterpretation by + db_load.

          + +

          -c name=value - Specify configuration options ignoring any value they + may have based on the input. The command-line format is name=value. See + the Supported Keywords section below for a list of keywords supported by + the -c option.

          + +

          -s database - the database to load.
          + -h dbEnvHome - the directory containing the database environment.
          + -v - report progress

          + +

          Supported Keywords
          + version=N - specify the version of the input file. Currently only + version 3 is supported.
          + format - specify the format of the file. Allowable values are "print" + and "bytevalue".
          + dupsort - specify whether the database allows duplicates or not. + Allowable values are "true" and "false".
          + type - specifies the type of database. Only "btree" is allowed.
          + database - specifies the name of the database to be loaded.

          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          setEnv

          +
          public void setEnv(Environment env)
          +
          Sets the Environment to load from.
          +
          +
          Parameters:
          +
          env - The environment.
          +
          +
        • +
        + + + +
          +
        • +

          setDbName

          +
          public void setDbName(java.lang.String dbName)
          +
          Sets the database name to load.
          +
          +
          Parameters:
          +
          dbName - database name
          +
          +
        • +
        + + + +
          +
        • +

          setInputReader

          +
          public void setInputReader(java.io.BufferedReader reader)
          +
          Sets the BufferedReader to load from.
          +
          +
          Parameters:
          +
          reader - The BufferedReader.
          +
          +
        • +
        + + + +
          +
        • +

          setNoOverwrite

          +
          public void setNoOverwrite(boolean noOverwrite)
          +
          Sets whether the load should overwrite existing data or not.
          +
          +
          Parameters:
          +
          noOverwrite - True if existing data should not be overwritten.
          +
          +
        • +
        + + + +
          +
        • +

          setTextFileMode

          +
          public void setTextFileMode(boolean textFileMode)
          +
          Sets whether the load data is in text file format.
          +
          +
          Parameters:
          +
          textFileMode - True if the load data is in text file format.
          +
          +
        • +
        + + + +
          +
        • +

          setIgnoreUnknownConfig

          +
          public void setIgnoreUnknownConfig(boolean ignoreUnknownConfigMode)
          +
          Sets whether to ignore unknown parameters in the config file. This + allows loading databases that were dumped with the Berkeley DB C + product, when the dump file contains parameters not known to JE.
          +
          +
          Parameters:
          +
          ignoreUnknownConfigMode - True to ignore unknown parameters in + the config file.
          +
          +
        • +
        + + + +
          +
        • +

          setProgressInterval

          +
          public void setProgressInterval(long progressInterval)
          +
          If progressInterval is set, progress status messages are generated to + stdout at set percentages of the load.
          +
          +
          Parameters:
          +
          progressInterval - Specifies the percentage intervals for status + messages. If 0, no messages are generated.
          +
          +
        • +
        + + + +
          +
        • +

          setTotalLoadBytes

          +
          public void setTotalLoadBytes(long totalLoadBytes)
          +
          Used for progress status messages. Must be set to greater than + 0 if the progressInterval is greater than 0.
          +
          +
          Parameters:
          +
          totalLoadBytes - number of input bytes to be loaded.
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbPrintLog.html b/docs/java/com/sleepycat/je/util/DbPrintLog.html new file mode 100644 index 0000000..bf3341e --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbPrintLog.html @@ -0,0 +1,361 @@ + + + + + +DbPrintLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbPrintLog

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbPrintLog
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbPrintLog
      +extends java.lang.Object
      +
      Dumps the contents of the log in XML format to System.out. + +

      To print an environment log:

      + +
      +      DbPrintLog.main(argv);
      + 
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbPrintLog() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voiddump(java.io.File envHome, + java.lang.String entryTypes, + java.lang.String txnIds, + long startLsn, + long endLsn, + boolean verbose, + boolean stats, + boolean repEntriesOnly, + boolean csvFormat, + boolean forwards, + boolean vlsnDistribution, + java.lang.String customDumpReaderClass) +
        Dump a JE log into human readable form.
        +
        static voidmain(java.lang.String[] argv) +
        The main used by the DbPrintLog utility.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbPrintLog

          +
          public DbPrintLog()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +
          The main used by the DbPrintLog utility.
          +
          +
          Parameters:
          +
          argv - An array of command line arguments to the DbPrintLog + utility. + +
          + usage: java { com.sleepycat.je.util.DbPrintLog | -jar
          + je-<version>.jar DbPrintLog }
          +  -h <envHomeDir>
          +  -s  <start file number or LSN, in hex>
          +  -e  <end file number or LSN, in hex>
          +  -k  <binary|hex|text|obfuscate> (format for dumping the key/data)
          +  -db <targeted db ids, comma separated>
          +  -tx <targeted txn ids, comma separated>
          +  -ty <targeted entry types, comma separated>
          +  -S  show summary of log entries
          +  -SC show summary of log entries in CSV format
          +  -r  only print replicated log entries
          +  -b  scan log backwards. The entire log must be scanned, cannot be used
          +      with -s or -e
          +  -q  if specified, concise version is printed,
          +      default is verbose version
          +  -c  <name of custom dump reader class> if specified, DbPrintLog
          +      will attempt to load a class of this name, which will be used to
          +      process log entries. Used to customize formatting and dumping when
          +      debugging files.
          + 
          + +

          All arguments are optional. The current directory is used if -h is not specified.

          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbScavenger.html b/docs/java/com/sleepycat/je/util/DbScavenger.html new file mode 100644 index 0000000..7396a67 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbScavenger.html @@ -0,0 +1,371 @@ + + + + + +DbScavenger (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbScavenger

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class DbScavenger
      +extends DbDump
      +
      Used to retrieve as much data as possible from a corrupted environment. + This utility is meant to be used programmatically, and is the equivalent + to the -R or -r options for DbDump. +

      + To scavenge a database: +

      +  DbScavenger scavenger =
      +      new DbScavenger(env, outputDirectory, , , );
      +  scavenger.dump();
      +
      + +

      + The recovered databases will put placed in the outputDirectory with ".dump" + file suffixes. The format of the .dump files will be suitable for use with + DbLoad.

      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbScavenger

          +
          public DbScavenger(Environment env,
          +                   java.lang.String outputDirectory,
          +                   boolean formatUsingPrintable,
          +                   boolean doAggressiveScavengerRun,
          +                   boolean verbose)
          +
          Create a DbScavenger object for a specific environment. +

          +
          +
          Parameters:
          +
          env - The Environment containing the database to dump.
          +
          outputDirectory - The directory to create the .dump files in.
          +
          formatUsingPrintable - true if the dump should use printable + characters.
          +
          doAggressiveScavengerRun - true if true, then all data records are + dumped, regardless of whether they are the latest version or not.
          +
          verbose - true if status output should be written to System.out + during scavenging.
          +
          +
        • +
        +
      • +
      + + +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbSpace.html b/docs/java/com/sleepycat/je/util/DbSpace.html new file mode 100644 index 0000000..2e03f0c --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbSpace.html @@ -0,0 +1,403 @@ + + + + + +DbSpace (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbSpace

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbSpace
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbSpace
      +extends java.lang.Object
      +
      DbSpace displays the disk space utilization for an environment. +
      + usage: java { com.sleepycat.je.util.DbSpace |
      +               -jar je-<version>.jar DbSpace }
      +          -h <dir># environment home directory
      +         [-q]     # quiet, print grand totals only
      +         [-u]     # sort by average utilization
      +         [-d]     # dump file summary details
      +         [-r]     # recalculate utilization (expensive)
      +         [-R]     # recalculate expired data (expensive)
      +         [-s]     # start file number or LSN, in hex
      +         [-e]     # end file number or LSN, in hex
      +         [-t]     # time for calculating expired data
      +                  #   format: yyyy-MM-dd'T'HHZ
      +                  #  example: 2016-03-09T22-0800
      +         [-V]     # print JE version number
      + 
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbSpace(Environment env, + boolean quiet, + boolean details, + boolean sorted) +
        Creates a DbSpace object for calculating utilization using an open + Environment.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] argv) 
        voidprint(java.io.PrintStream out) +
        Calculates utilization and prints a report to the given output stream.
        +
        voidsetEndFile(long endFile) +
        Sets the ending file number, which is an upper bound on the range of + files for which utilization is reported and (optionally) recalculated.
        +
        voidsetRecalculate(boolean recalc) +
        Sets the recalculation property, which if true causes a more expensive + recalculation of utilization to be performed for debugging purposes.
        +
        voidsetStartFile(long startFile) +
        Sets the start file number, which is a lower bound on the range of + files for which utilization is reported and (optionally) recalculated.
        +
        voidsetTime(long time) +
        Sets the time for calculating expired data.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbSpace

          +
          public DbSpace(Environment env,
          +               boolean quiet,
          +               boolean details,
          +               boolean sorted)
          +
          Creates a DbSpace object for calculating utilization using an open + Environment.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +                 throws java.lang.Exception
          +
          +
          Throws:
          +
          java.lang.Exception
          +
          +
        • +
        + + + +
          +
        • +

          setRecalculate

          +
          public void setRecalculate(boolean recalc)
          +
          Sets the recalculation property, which if true causes a more expensive + recalculation of utilization to be performed for debugging purposes. + This property is false by default.
          +
        • +
        + + + +
          +
        • +

          setStartFile

          +
          public void setStartFile(long startFile)
          +
          Sets the start file number, which is a lower bound on the range of + files for which utilization is reported and (optionally) recalculated. + By default there is no lower bound.
          +
        • +
        + + + +
          +
        • +

          setEndFile

          +
          public void setEndFile(long endFile)
          +
          Sets the ending file number, which is an upper bound on the range of + files for which utilization is reported and (optionally) recalculated. + By default there is no upper bound.
          +
        • +
        + + + +
          +
        • +

          setTime

          +
          public void setTime(long time)
          +
          Sets the time for calculating expired data.
          +
        • +
        + + + +
          +
        • +

          print

          +
          public void print(java.io.PrintStream out)
          +           throws DatabaseException
          +
          Calculates utilization and prints a report to the given output stream.
          +
          +
          Throws:
          +
          DatabaseException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbStat.html b/docs/java/com/sleepycat/je/util/DbStat.html new file mode 100644 index 0000000..c290668 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbStat.html @@ -0,0 +1,313 @@ + + + + + +DbStat (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbStat

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class DbStat
      +extends DbVerify
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbStat(Environment env, + java.lang.String dbName) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] argv) 
        booleanstats(java.io.PrintStream out) 
        + +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbTruncateLog.html b/docs/java/com/sleepycat/je/util/DbTruncateLog.html new file mode 100644 index 0000000..2dab8c6 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbTruncateLog.html @@ -0,0 +1,316 @@ + + + + + +DbTruncateLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbTruncateLog

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbTruncateLog
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbTruncateLog
      +extends java.lang.Object
      +
      DbTruncateLog is a utility that lets the user truncate JE log starting at a + specified file and offset to the last log file, inclusive. Generally used in + replication systems for handling + com.sleepycat.je.rep.RollbackProhibitedException, to permit the application + to interject application specific handling. Should be used with caution. +

      + The parameters for DbTruncateLog are provided through the + RollbackProhibitedException instance, and the exception message. The goal is + to truncate the JE log after a specified file number and file offset. + DbTruncateLog will automatically delete all log entries after that specified + log entry. +

      + For example, suppose the JE log consists of these files: +

      +    00000002.jdb
      +    0000000e.jdb
      +    0000000f.jdb
      +    00000010.jdb
      +    00000012.jdb
      +    0000001d.jdb
      +    0000001e.jdb
      +    0000001f.jdb
      + 
      + And the log must be truncated at file 0x1d, offset 0x34567, users should use + the following command: +
        +
      1. DbTruncateLog -h <envDir> -f 0x1d -o 0x34567
      2. +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        DbTruncateLog() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] argv) +
        Usage:
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          DbTruncateLog

          +
          public DbTruncateLog()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +
          Usage: +
          +  -h environmentDirectory
          +  -f file number. If hex, prefix with "0x"
          +  -o file offset byte. If hex, prefix with "0x"
          + 
          + For example, to truncate a log to file 0xa, offset 0x1223: +
          + DbTruncateLog -h <environmentDir> -f 0xa -o 0x1223
          + 
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbVerify.html b/docs/java/com/sleepycat/je/util/DbVerify.html new file mode 100644 index 0000000..238f24a --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbVerify.html @@ -0,0 +1,389 @@ + + + + + +DbVerify (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbVerify

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbVerify
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Direct Known Subclasses:
      +
      DbStat
      +
      +
      +
      +
      public class DbVerify
      +extends java.lang.Object
      +
      Verifies the internal structures of a database. + +

      When using this utility as a command line program, and the + application uses custom key comparators, be sure to add the jars or + classes to the classpath that contain the application's comparator + classes.

      + +

      To verify a database and write the errors to stream:

      + +
      +    DbVerify verifier = new DbVerify(env, dbName, quiet);
      +    verifier.verify();
      + 
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +                 throws DatabaseException
          +
          The main used by the DbVerify utility.
          +
          +
          Parameters:
          +
          argv - The arguments accepted by the DbVerify utility. + +
          + usage: java { com.sleepycat.je.util.DbVerify | -jar
          + je-<version>.jar DbVerify }
          +             [-q] [-V] -s database -h dbEnvHome [-v progressInterval]
          +             [-bs batchSize] [-d delayMs] [-vdr]
          + 
          + +

          + -V - show the version of the JE library.
          + -s - specify the database to verify
          + -h - specify the environment directory
          + -q - work quietly and don't display errors
          + -v - report intermediate statistics every progressInterval Leaf Nodes
          + -bs - specify how many records to check each batch
          + -d - specify the delay in ms between batches
          + -vdr - verify data records (read LNs)
          +

          + +

          Note that the DbVerify command line cannot be used to verify the + integrity of secondary databases, because this feature requires the + secondary databases to have been opened by the application. To verify + secondary database integrity, use Environment.verify(com.sleepycat.je.VerifyConfig, java.io.PrintStream) or + Database.verify(com.sleepycat.je.VerifyConfig) instead, from within the + application.

          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/DbVerifyLog.html b/docs/java/com/sleepycat/je/util/DbVerifyLog.html new file mode 100644 index 0000000..ea40808 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/DbVerifyLog.html @@ -0,0 +1,421 @@ + + + + + +DbVerifyLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class DbVerifyLog

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.DbVerifyLog
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class DbVerifyLog
      +extends java.lang.Object
      +
      Verifies the checksums in one or more log files. + +

      This class may be instantiated and used programmatically, or used as a + command line utility as described below.

      + +
      + usage: java { com.sleepycat.je.util.DbVerifyLog |
      +               -jar je-<version>.jar DbVerifyLog }
      +  [-h <dir>]      # environment home directory
      +  [-s <file>]     # starting (minimum) file number
      +  [-e <file>]     # ending (one past the maximum) file number
      +  [-d <millis>]   # delay in ms between reads (default is zero)
      +  [-V]                  # print JE version number"
      + 
      + +

      All arguments are optional. The current directory is used if -h + is not specified. File numbers may be specified in hex (preceded by 0x) or decimal format. For convenience when copy/pasting from other + output, LSN format (<file>/<offset>) is also allowed.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        DbVerifyLog(Environment env) +
        Creates a utility object for verifying the checksums in log files.
        +
        DbVerifyLog(Environment env, + int readBufferSize) +
        Creates a utility object for verifying log files.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static voidmain(java.lang.String[] argv) 
        voidsetReadDelay(long delay, + java.util.concurrent.TimeUnit unit) +
        Configures the delay between file reads during verification.
        +
        voidverify(long startFile, + long endFile) +
        Verifies the given range of log files in the environment.
        +
        voidverifyAll() +
        Verifies all log files in the environment.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + + + + + +
          +
        • +

          DbVerifyLog

          +
          public DbVerifyLog(Environment env,
          +                   int readBufferSize)
          +
          Creates a utility object for verifying log files.
          +
          +
          Parameters:
          +
          env - the Environment associated with the log.
          +
          readBufferSize - is the buffer size to use. If a value less than + or equal to zero is specified, EnvironmentConfig.LOG_ITERATOR_READ_SIZE is used.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          verifyAll

          +
          public void verifyAll()
          +               throws LogVerificationException,
          +                      java.io.IOException
          +
          Verifies all log files in the environment.
          +
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          java.io.IOException - if an IOException occurs while reading a log file.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + +
          +
        • +

          verify

          +
          public void verify(long startFile,
          +                   long endFile)
          +            throws LogVerificationException,
          +                   java.io.IOException
          +
          Verifies the given range of log files in the environment.
          +
          +
          Parameters:
          +
          startFile - is the lowest numbered log file to be verified.
          +
          endFile - is one greater than the highest numbered log file to be + verified.
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          java.io.IOException - if an IOException occurs while reading a log file.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] argv)
          +
        • +
        + + + +
          +
        • +

          setReadDelay

          +
          public void setReadDelay(long delay,
          +                         java.util.concurrent.TimeUnit unit)
          +
          Configures the delay between file reads during verification. A delay + between reads is needed to allow other JE components, such as HA, to + make timely progress. + +

          By default there is no read delay (it is zero).

          + +

          Note that when using the background data verifier, the delay between reads is + EnvironmentConfig.VERIFY_LOG_READ_DELAY.

          +
          +
          Parameters:
          +
          delay - the delay between reads or zero for no delay.
          +
          unit - the TimeUnit of the delay value. May be + null only if delay is zero.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/FileHandler.html b/docs/java/com/sleepycat/je/util/FileHandler.html new file mode 100644 index 0000000..6452611 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/FileHandler.html @@ -0,0 +1,351 @@ + + + + + +FileHandler (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class FileHandler

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.util.logging.Handler
      • +
      • +
          +
        • java.util.logging.StreamHandler
        • +
        • +
            +
          • java.util.logging.FileHandler
          • +
          • +
              +
            • com.sleepycat.je.util.FileHandler
            • +
            +
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class FileHandler
      +extends java.util.logging.FileHandler
      +
      JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.FileHandler. By default, the handler's + level is Level.INFO To enable the console output, use the standard + java.util.logging.LogManager configuration to set the desired level: +
      + com.sleepycat.je.util.FileHandler.level=INFO
      + 
      +

      + The default destination for this output is a circular set of files named + <environmentHome>/je.info.# The logging file size can be configured + with standard java.util.logging.FileHandler configuration. +

      + JE augments the java.util.logging API with a JE environment parameter for + setting handler levels. This is described in greater detail in + + Chapter 12.Administering Berkeley DB Java Edition Applications

      +
      +
      See Also:
      +
      + Chapter 12. Logging, +Using JE Trace Logging
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        FileHandler(java.lang.String pattern, + int limit, + int count, + java.util.logging.Formatter formatter, + com.sleepycat.je.dbi.EnvironmentImpl envImpl) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        +
          +
        • + + +

          Methods inherited from class java.util.logging.FileHandler

          +close, publish
        • +
        +
          +
        • + + +

          Methods inherited from class java.util.logging.StreamHandler

          +flush, isLoggable, setEncoding, setOutputStream
        • +
        +
          +
        • + + +

          Methods inherited from class java.util.logging.Handler

          +getEncoding, getErrorManager, getFilter, getFormatter, getLevel, reportError, setErrorManager, setFilter, setFormatter, setLevel
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          STIFLE_DEFAULT_ERROR_MANAGER

          +
          public static boolean STIFLE_DEFAULT_ERROR_MANAGER
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          FileHandler

          +
          public FileHandler(java.lang.String pattern,
          +                   int limit,
          +                   int count,
          +                   java.util.logging.Formatter formatter,
          +                   com.sleepycat.je.dbi.EnvironmentImpl envImpl)
          +            throws java.lang.SecurityException,
          +                   java.io.IOException
          +
          +
          Throws:
          +
          java.lang.SecurityException
          +
          java.io.IOException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/LogVerificationException.html b/docs/java/com/sleepycat/je/util/LogVerificationException.html new file mode 100644 index 0000000..03a21b7 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/LogVerificationException.html @@ -0,0 +1,291 @@ + + + + + +LogVerificationException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class LogVerificationException

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Throwable
      • +
      • +
          +
        • java.lang.Exception
        • +
        • +
            +
          • java.io.IOException
          • +
          • +
              +
            • com.sleepycat.je.util.LogVerificationException
            • +
            +
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class LogVerificationException
      +extends java.io.IOException
      +
      Thrown during log verification if a checksum cannot be verified or a log + entry is determined to be invalid by examining its contents. + +

      This class extends IOException so that it can be thrown by the + InputStream methods of LogVerificationInputStream.

      +
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          LogVerificationException

          +
          public LogVerificationException(java.lang.String message)
          +
        • +
        + + + +
          +
        • +

          LogVerificationException

          +
          public LogVerificationException(java.lang.String message,
          +                                java.lang.Throwable cause)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/LogVerificationInputStream.html b/docs/java/com/sleepycat/je/util/LogVerificationInputStream.html new file mode 100644 index 0000000..2cb6aa1 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/LogVerificationInputStream.html @@ -0,0 +1,528 @@ + + + + + +LogVerificationInputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class LogVerificationInputStream

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.io.InputStream
      • +
      • +
          +
        • com.sleepycat.je.util.LogVerificationInputStream
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class LogVerificationInputStream
      +extends java.io.InputStream
      +
      Verifies the checksums in an InputStream for a log file in a JE + Environment. + +

      This InputStream reads input from some other given InputStream, and verifies checksums while reading. Its primary intended + use is to verify log files that are being copied as part of a programmatic + backup. It is critical that invalid files are not added to a backup set, + since then both the live environment and the backup will be invalid.

      + +

      The following example verifies log files as they are being copied. The + DbBackup class should normally be used to obtain the array of files + to be copied.

      + + + +
      +  void copyFiles(final Environment env,
      +                 final String[] fileNames,
      +                 final File destDir,
      +                 final int bufSize)
      +      throws IOException, DatabaseException {
      +
      +      final File srcDir = env.getHome();
      +
      +      for (final String fileName : fileNames) {
      +
      +          final File destFile = new File(destDir, fileName);
      +          final FileOutputStream fos = new FileOutputStream(destFile);
      +
      +          final File srcFile = new File(srcDir, fileName);
      +          final FileInputStream fis = new FileInputStream(srcFile);
      +          final LogVerificationInputStream vis =
      +              new LogVerificationInputStream(env, fis, fileName);
      +
      +          final byte[] buf = new byte[bufSize];
      +
      +          try {
      +              while (true) {
      +                  final int len = vis.read(buf);
      +                  if (len < 0) {
      +                      break;
      +                  }
      +                  fos.write(buf, 0, len);
      +              }
      +          } finally {
      +              fos.close();
      +              vis.close();
      +          }
      +      }
      +  }
      + 
      + +

      It is important to read the entire underlying input stream until the + end-of-file is reached to detect incomplete entries at the end of the log + file.

      + +

      Note that mark and reset are not supported and markSupported returns false. The default InputStream + implementation of these methods is used.

      +
      +
      See Also:
      +
      DbBackup, +DbVerifyLog
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        LogVerificationInputStream(Environment env, + java.io.InputStream in, + java.lang.String fileName) +
        Creates a verification input stream.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intavailable()
        voidclose()
        intread()
        intread(byte[] b)
        intread(byte[] b, + int off, + int len)
        longskip(long bytesToSkip)
        +
          +
        • + + +

          Methods inherited from class java.io.InputStream

          +mark, markSupported, reset
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          LogVerificationInputStream

          +
          public LogVerificationInputStream(Environment env,
          +                                  java.io.InputStream in,
          +                                  java.lang.String fileName)
          +
          Creates a verification input stream.
          +
          +
          Parameters:
          +
          env - the Environment associated with the log.
          +
          in - the underlying InputStream for the log to be read.
          +
          fileName - the file name of the input stream, for reporting in the + LogVerificationException. This should be a simple file name of + the form NNNNNNNN.jdb, where NNNNNNNN is the file number in + hexadecimal format.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          read

          +
          public int read()
          +         throws java.io.IOException
          +
          + +

          This method reads the underlying InputStream and verifies the + contents of the stream.

          +
          +
          Specified by:
          +
          read in class java.io.InputStream
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          read

          +
          public int read(byte[] b)
          +         throws java.io.IOException
          +
          + +

          This method reads the underlying InputStream and verifies the + contents of the stream.

          +
          +
          Overrides:
          +
          read in class java.io.InputStream
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          read

          +
          public int read(byte[] b,
          +                int off,
          +                int len)
          +         throws java.io.IOException
          +
          + +

          This method reads the underlying InputStream and verifies the + contents of the stream.

          +
          +
          Overrides:
          +
          read in class java.io.InputStream
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          skip

          +
          public long skip(long bytesToSkip)
          +          throws java.io.IOException
          +
          + +

          This method reads the underlying InputStream in order to + skip the required number of bytes and verifies the contents of the + stream. A temporary buffer is allocated lazily for reading.

          +
          +
          Overrides:
          +
          skip in class java.io.InputStream
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          available

          +
          public int available()
          +              throws java.io.IOException
          +
          + +

          This method simply performs in.available().

          +
          +
          Overrides:
          +
          available in class java.io.InputStream
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws java.io.IOException
          +
          + +

          This method simply performs in.close().

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Overrides:
          +
          close in class java.io.InputStream
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/LogVerificationReadableByteChannel.html b/docs/java/com/sleepycat/je/util/LogVerificationReadableByteChannel.html new file mode 100644 index 0000000..8207cff --- /dev/null +++ b/docs/java/com/sleepycat/je/util/LogVerificationReadableByteChannel.html @@ -0,0 +1,430 @@ + + + + + +LogVerificationReadableByteChannel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class LogVerificationReadableByteChannel

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.LogVerificationReadableByteChannel
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable, java.nio.channels.Channel, java.nio.channels.ReadableByteChannel
      +
      +
      +
      +
      public class LogVerificationReadableByteChannel
      +extends java.lang.Object
      +implements java.nio.channels.ReadableByteChannel
      +
      Verifies the checksums in a ReadableByteChannel for a log file in a + JE Environment. This class is similar to the LogVerificationInputStream class, but permits using NIO channels and direct + buffers to provide better copying performance. + +

      This ReadableByteChannel reads input from some other given ReadableByteChannel, and verifies checksums while reading. Its primary + intended use is to verify log files that are being copied as part of a + programmatic backup. It is critical that invalid files are not added to a + backup set, since then both the live environment and the backup will be + invalid. + +

      The following example verifies log files as they are being copied. The + DbBackup class should normally be used to obtain the array of files + to be copied. + + + +

      +  void copyFilesNIO(final Environment env,
      +                    final String[] fileNames,
      +                    final File destDir,
      +                    final int bufSize)
      +      throws IOException, DatabaseException {
      +
      +      final File srcDir = env.getHome();
      +
      +      for (final String fileName : fileNames) {
      +
      +          final File destFile = new File(destDir, fileName);
      +          final FileOutputStream fos = new FileOutputStream(destFile);
      +          final FileChannel foc = fos.getChannel();
      +
      +          final File srcFile = new File(srcDir, fileName);
      +          final FileInputStream fis = new FileInputStream(srcFile);
      +          final FileChannel fic = fis.getChannel();
      +          final LogVerificationReadableByteChannel vic =
      +              new LogVerificationReadableByteChannel(env, fic, fileName);
      +
      +          final ByteBuffer buf = ByteBuffer.allocateDirect(bufSize);
      +
      +          try {
      +              while (true) {
      +                  final int len = vic.read(buf);
      +                  if (len < 0) {
      +                      break;
      +                  }
      +                  buf.flip();
      +                  foc.write(buf);
      +                  buf.clear();
      +              }
      +          } finally {
      +              fos.close();
      +              vic.close();
      +          }
      +      }
      +  }
      + 
      + +

      It is important to read the entire underlying input stream until the + end-of-file is reached to detect incomplete entries at the end of the log + file.

      +
      +
      See Also:
      +
      DbBackup, +DbVerifyLog, +LogVerificationInputStream
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        LogVerificationReadableByteChannel(Environment env, + java.nio.channels.ReadableByteChannel channel, + java.lang.String fileName) +
        Creates a verification input stream.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidclose()
        booleanisOpen()
        intread(java.nio.ByteBuffer buffer)
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          LogVerificationReadableByteChannel

          +
          public LogVerificationReadableByteChannel(Environment env,
          +                                          java.nio.channels.ReadableByteChannel channel,
          +                                          java.lang.String fileName)
          +
          Creates a verification input stream.
          +
          +
          Parameters:
          +
          env - the Environment associated with the log
          +
          channel - the underlying ReadableByteChannel for the log to + be read
          +
          fileName - the file name of the input stream, for reporting in the + LogVerificationException. This should be a simple file name of + the form NNNNNNNN.jdb, where NNNNNNNN is the file number in + hexadecimal format.
          +
          Throws:
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          read

          +
          public int read(java.nio.ByteBuffer buffer)
          +         throws java.io.IOException
          +
          + +

          This method reads the underlying ReadableByteChannel and + verifies the contents of the stream.

          +
          +
          Specified by:
          +
          read in interface java.nio.channels.ReadableByteChannel
          +
          Throws:
          +
          LogVerificationException - if a checksum cannot be verified or a + log entry is determined to be invalid by examining its contents
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws java.io.IOException
          +
          + +

          This method calls close on the underlying channel.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Specified by:
          +
          close in interface java.nio.channels.Channel
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          isOpen

          +
          public boolean isOpen()
          +
          + +

          This method calls isOpen on the underlying channel.

          +
          +
          Specified by:
          +
          isOpen in interface java.nio.channels.Channel
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/Splitter.html b/docs/java/com/sleepycat/je/util/Splitter.html new file mode 100644 index 0000000..6d1cbe1 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/Splitter.html @@ -0,0 +1,280 @@ + + + + + +Splitter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.je.util
    +

    Class Splitter

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.je.util.Splitter
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class Splitter
      +extends java.lang.Object
      +
      Splitter is used to split a string based on a delimiter. + Support includes double quoted strings, and the escape character. + Raw tokens are returned that include the double quotes, white space, + and escape characters.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Splitter(char delimiter) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.String[]tokenize(java.lang.String inrow) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Splitter

          +
          public Splitter(char delimiter)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          tokenize

          +
          public java.lang.String[] tokenize(java.lang.String inrow)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/ConsoleHandler.html b/docs/java/com/sleepycat/je/util/class-use/ConsoleHandler.html new file mode 100644 index 0000000..5f2e0f7 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/ConsoleHandler.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.ConsoleHandler (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.ConsoleHandler

    +
    +
    No usage of com.sleepycat.je.util.ConsoleHandler
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbBackup.html b/docs/java/com/sleepycat/je/util/class-use/DbBackup.html new file mode 100644 index 0000000..a0dcb33 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbBackup.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbBackup (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbBackup

    +
    +
    No usage of com.sleepycat.je.util.DbBackup
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbCacheSize.html b/docs/java/com/sleepycat/je/util/class-use/DbCacheSize.html new file mode 100644 index 0000000..c33f5e8 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbCacheSize.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbCacheSize (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbCacheSize

    +
    +
    No usage of com.sleepycat.je.util.DbCacheSize
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbDeleteReservedFiles.html b/docs/java/com/sleepycat/je/util/class-use/DbDeleteReservedFiles.html new file mode 100644 index 0000000..74c48f8 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbDeleteReservedFiles.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbDeleteReservedFiles (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbDeleteReservedFiles

    +
    +
    No usage of com.sleepycat.je.util.DbDeleteReservedFiles
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbDump.html b/docs/java/com/sleepycat/je/util/class-use/DbDump.html new file mode 100644 index 0000000..e75f98d --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbDump.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbDump (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbDump

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbFilterStats.html b/docs/java/com/sleepycat/je/util/class-use/DbFilterStats.html new file mode 100644 index 0000000..f94cb48 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbFilterStats.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbFilterStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbFilterStats

    +
    +
    No usage of com.sleepycat.je.util.DbFilterStats
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbLoad.html b/docs/java/com/sleepycat/je/util/class-use/DbLoad.html new file mode 100644 index 0000000..4bde792 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbLoad.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbLoad (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbLoad

    +
    +
    No usage of com.sleepycat.je.util.DbLoad
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbPrintLog.html b/docs/java/com/sleepycat/je/util/class-use/DbPrintLog.html new file mode 100644 index 0000000..ceb9345 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbPrintLog.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbPrintLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbPrintLog

    +
    +
    No usage of com.sleepycat.je.util.DbPrintLog
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbScavenger.html b/docs/java/com/sleepycat/je/util/class-use/DbScavenger.html new file mode 100644 index 0000000..f6c2716 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbScavenger.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbScavenger (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbScavenger

    +
    +
    No usage of com.sleepycat.je.util.DbScavenger
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbSpace.html b/docs/java/com/sleepycat/je/util/class-use/DbSpace.html new file mode 100644 index 0000000..4376bae --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbSpace.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbSpace (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbSpace

    +
    +
    No usage of com.sleepycat.je.util.DbSpace
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbStat.html b/docs/java/com/sleepycat/je/util/class-use/DbStat.html new file mode 100644 index 0000000..4030684 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbStat.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbStat (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbStat

    +
    +
    No usage of com.sleepycat.je.util.DbStat
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbTruncateLog.html b/docs/java/com/sleepycat/je/util/class-use/DbTruncateLog.html new file mode 100644 index 0000000..95b1018 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbTruncateLog.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbTruncateLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbTruncateLog

    +
    +
    No usage of com.sleepycat.je.util.DbTruncateLog
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbVerify.html b/docs/java/com/sleepycat/je/util/class-use/DbVerify.html new file mode 100644 index 0000000..f9a1a67 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbVerify.html @@ -0,0 +1,171 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbVerify (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbVerify

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/DbVerifyLog.html b/docs/java/com/sleepycat/je/util/class-use/DbVerifyLog.html new file mode 100644 index 0000000..be6a0bf --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/DbVerifyLog.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.DbVerifyLog (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.DbVerifyLog

    +
    +
    No usage of com.sleepycat.je.util.DbVerifyLog
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/FileHandler.html b/docs/java/com/sleepycat/je/util/class-use/FileHandler.html new file mode 100644 index 0000000..d5e05bf --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/FileHandler.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.FileHandler (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.FileHandler

    +
    +
    No usage of com.sleepycat.je.util.FileHandler
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/LogVerificationException.html b/docs/java/com/sleepycat/je/util/class-use/LogVerificationException.html new file mode 100644 index 0000000..14639de --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/LogVerificationException.html @@ -0,0 +1,180 @@ + + + + + +Uses of Class com.sleepycat.je.util.LogVerificationException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.LogVerificationException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/LogVerificationInputStream.html b/docs/java/com/sleepycat/je/util/class-use/LogVerificationInputStream.html new file mode 100644 index 0000000..5def7bc --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/LogVerificationInputStream.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.LogVerificationInputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.LogVerificationInputStream

    +
    +
    No usage of com.sleepycat.je.util.LogVerificationInputStream
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/LogVerificationReadableByteChannel.html b/docs/java/com/sleepycat/je/util/class-use/LogVerificationReadableByteChannel.html new file mode 100644 index 0000000..8eb88a2 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/LogVerificationReadableByteChannel.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.LogVerificationReadableByteChannel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.LogVerificationReadableByteChannel

    +
    +
    No usage of com.sleepycat.je.util.LogVerificationReadableByteChannel
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/class-use/Splitter.html b/docs/java/com/sleepycat/je/util/class-use/Splitter.html new file mode 100644 index 0000000..a821173 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/class-use/Splitter.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.je.util.Splitter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.je.util.Splitter

    +
    +
    No usage of com.sleepycat.je.util.Splitter
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/package-frame.html b/docs/java/com/sleepycat/je/util/package-frame.html new file mode 100644 index 0000000..6a404f5 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/package-frame.html @@ -0,0 +1,41 @@ + + + + + +com.sleepycat.je.util (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.je.util

    + + + diff --git a/docs/java/com/sleepycat/je/util/package-summary.html b/docs/java/com/sleepycat/je/util/package-summary.html new file mode 100644 index 0000000..c341793 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/package-summary.html @@ -0,0 +1,294 @@ + + + + + +com.sleepycat.je.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.je.util

    +
    +
    Supporting utilities.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      ConsoleHandler +
      JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.ConsoleHandler.
      +
      DbBackup +
      DbBackup is a helper class for stopping and restarting JE background + activity in an open environment in order to simplify backup operations.
      +
      DbCacheSize +
      Estimates the in-memory cache size needed to hold a specified data set.
      +
      DbDeleteReservedFiles +
      Command line utility used to delete reserved files explicitly, when + attempting to recover from a disk-full condition.
      +
      DbDump +
      Dump the contents of a database.
      +
      DbFilterStats +
      Transform one or more je.stat.csv statistics files and + write the output to stdout.
      +
      DbLoad +
      Loads a database from a dump file generated by DbDump.
      +
      DbPrintLog +
      Dumps the contents of the log in XML format to System.out.
      +
      DbScavenger +
      Used to retrieve as much data as possible from a corrupted environment.
      +
      DbSpace +
      DbSpace displays the disk space utilization for an environment.
      +
      DbStat 
      DbTruncateLog +
      DbTruncateLog is a utility that lets the user truncate JE log starting at a + specified file and offset to the last log file, inclusive.
      +
      DbVerify +
      Verifies the internal structures of a database.
      +
      DbVerifyLog +
      Verifies the checksums in one or more log files.
      +
      FileHandler +
      JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.FileHandler.
      +
      LogVerificationInputStream +
      Verifies the checksums in an InputStream for a log file in a JE + Environment.
      +
      LogVerificationReadableByteChannel +
      Verifies the checksums in a ReadableByteChannel for a log file in a + JE Environment.
      +
      Splitter +
      Splitter is used to split a string based on a delimiter.
      +
      +
    • +
    • + + + + + + + + + + + + +
      Exception Summary 
      ExceptionDescription
      LogVerificationException +
      Thrown during log verification if a checksum cannot be verified or a log + entry is determined to be invalid by examining its contents.
      +
      +
    • +
    + + + +

    Package com.sleepycat.je.util Description

    +
    Supporting utilities. + +

    Package Specification

    +This package provides support for activities like +loading and dumping data. Most utilities can be used as a command line +tool or called programmatically.
    +
    +
    See Also:
    +
    [Getting Started Guide]
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/package-tree.html b/docs/java/com/sleepycat/je/util/package-tree.html new file mode 100644 index 0000000..22526d2 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/package-tree.html @@ -0,0 +1,198 @@ + + + + + +com.sleepycat.je.util Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.je.util

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/je/util/package-use.html b/docs/java/com/sleepycat/je/util/package-use.html new file mode 100644 index 0000000..dbd0604 --- /dev/null +++ b/docs/java/com/sleepycat/je/util/package-use.html @@ -0,0 +1,177 @@ + + + + + +Uses of Package com.sleepycat.je.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.je.util

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/EntityCursor.html b/docs/java/com/sleepycat/persist/EntityCursor.html new file mode 100644 index 0000000..87bf578 --- /dev/null +++ b/docs/java/com/sleepycat/persist/EntityCursor.html @@ -0,0 +1,1666 @@ + + + + + +EntityCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Interface EntityCursor<V>

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      java.lang.AutoCloseable, java.io.Closeable, ForwardCursor<V>, java.lang.Iterable<V>
      +
      +
      +
      +
      public interface EntityCursor<V>
      +extends ForwardCursor<V>
      +
      Traverses entity values or key values and allows deleting or updating the + entity at the current cursor position. The value type (V) is either an + entity class or a key class, depending on how the cursor was opened. + +

      EntityCursor objects are not thread-safe. Cursors + should be opened, used and closed by a single thread.

      + +

      Cursors are opened using the EntityIndex.keys() and EntityIndex.entities() family of methods. These methods are available for + objects of any class that implements EntityIndex: PrimaryIndex, SecondaryIndex, and the indices returned by SecondaryIndex.keysIndex and SecondaryIndex.subIndex(SK). A ForwardCursor, which implements a subset of cursor operations, is also + available via the EntityJoin.keys() and EntityJoin.entities() + methods.

      + +

      Values are always returned by a cursor in key order, where the key is + defined by the underlying EntityIndex. For example, a cursor on a + SecondaryIndex returns values ordered by secondary key, while an + index on a PrimaryIndex or a SecondaryIndex.subIndex(SK) returns + values ordered by primary key.

      + +

      WARNING: Cursors must always be closed to prevent resource leaks + which could lead to the index becoming unusable or cause an + OutOfMemoryError. To ensure that a cursor is closed in the + face of exceptions, call close() in a finally block. For example, + the following code traverses all Employee entities and closes the cursor + whether or not an exception occurs:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + EntityStore store = ...
      +
      + PrimaryIndex<Long, Employee> primaryIndex =
      +     store.getPrimaryIndex(Long.class, Employee.class);
      +
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     for (Employee entity = cursor.first();
      +                   entity != null;
      +                   entity = cursor.next()) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Initializing the Cursor Position

      + +

      When it is opened, a cursor is not initially positioned on any value; in + other words, it is uninitialized. Most methods in this interface initialize + the cursor position but certain methods, for example, current() and + delete(), throw IllegalStateException when called for an + uninitialized cursor.

      + +

      Note that the next() and prev() methods return the first or + last value respectively for an uninitialized cursor. This allows the loop + in the example above to be rewritten as follows:

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     Employee entity;
      +     while ((entity = cursor.next()) != null) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Cursors and Iterators

      + +

      The iterator() method can be used to return a standard Java Iterator that returns the same values that the cursor returns. For + example:

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     Iterator<Employee> i = cursor.iterator();
      +     while (i.hasNext()) {
      +          Employee entity = i.next();
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      The Iterable interface is also extended by EntityCursor + to allow using the cursor as the target of a Java "foreach" statement:

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      The iterator uses the cursor directly, so any changes to the cursor + position impact the iterator and vice versa. The iterator advances the + cursor by calling next() when Iterator.hasNext() or Iterator.next() is called. Because of this interaction, to keep things + simple it is best not to mix the use of an EntityCursor + Iterator with the use of the EntityCursor traversal methods + such as next(), for a single EntityCursor object.

      + +

      Key Ranges

      + +

      A key range may be specified when opening the cursor, to restrict the + key range of the cursor to a subset of the complete range of keys in the + index. A fromKey and/or toKey parameter may be specified + when calling EntityIndex.keys(Object,boolean,Object,boolean) or + EntityIndex.entities(Object,boolean,Object,boolean). The key + arguments may be specified as inclusive or exclusive values.

      + +

      Whenever a cursor with a key range is moved, the key range bounds will be + checked, and the cursor will never be positioned outside the range. The + first() cursor value is the first existing value in the range, and + the last() cursor value is the last existing value in the range. For + example, the following code traverses Employee entities with keys from 100 + (inclusive) to 200 (exclusive):

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities(100, true, 200, false);
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Duplicate Keys

      + +

      When using a cursor for a SecondaryIndex, the keys in the index + may be non-unique (duplicates) if SecondaryKey.relate() is MANY_TO_ONE or MANY_TO_MANY. For example, a MANY_TO_ONE Employee.department secondary key is non-unique because there are multiple + Employee entities with the same department key value. The nextDup(), + prevDup(), nextNoDup() and prevNoDup() methods may be + used to control how non-unique keys are returned by the cursor.

      + +

      nextDup() and prevDup() return the next or previous value + only if it has the same key as the current value, and null is returned when + a different key is encountered. For example, these methods can be used to + return all employees in a given department.

      + +

      nextNoDup() and prevNoDup() return the next or previous + value with a unique key, skipping over values that have the same key. For + example, these methods can be used to return the first employee in each + department.

      + +

      For example, the following code will find the first employee in each + department with nextNoDup() until it finds a department name that + matches a particular regular expression. For each matching department it + will find all employees in that department using nextDup().

      + +
      + SecondaryIndex<String, Long, Employee> secondaryIndex =
      +     store.getSecondaryIndex(primaryIndex, String.class, "department");
      +
      + String regex = ...;
      + EntityCursor<Employee> cursor = secondaryIndex.entities();
      + try {
      +     for (Employee entity = cursor.first();
      +                   entity != null;
      +                   entity = cursor.nextNoDup()) {
      +         if (entity.department.matches(regex)) {
      +             while (entity != null) {
      +                 // Do something with the matching entities...
      +                 entity = cursor.nextDup();
      +             }
      +         }
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Updating and Deleting Entities with a Cursor

      + +

      The update(V) and delete() methods operate on the entity at + the current cursor position. Cursors on any type of index may be used to + delete entities. For example, the following code deletes all employees in + departments which have names that match a particular regular expression:

      + +
      + SecondaryIndex<String, Long, Employee> secondaryIndex =
      +     store.getSecondaryIndex(primaryIndex, String.class, "department");
      +
      + String regex = ...;
      + EntityCursor<Employee> cursor = secondaryIndex.entities();
      + try {
      +     for (Employee entity = cursor.first();
      +                   entity != null;
      +                   entity = cursor.nextNoDup()) {
      +         if (entity.department.matches(regex)) {
      +             while (entity != null) {
      +                 cursor.delete();
      +                 entity = cursor.nextDup();
      +             }
      +         }
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Note that the cursor can be moved to the next (or previous) value after + deleting the entity at the current position. This is an important property + of cursors, since without it you would not be able to easily delete while + processing multiple values with a cursor. A cursor positioned on a deleted + entity is in a special state. In this state, current() will return + null, delete() will return false, and update(V) will return + false.

      + +

      The update(V) method is supported only if the value type is an + entity class (not a key class) and the underlying index is a PrimaryIndex; in other words, for a cursor returned by one of the BasicIndex.entities() methods. For example, the following code changes all + employee names to uppercase:

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     for (Employee entity = cursor.first();
      +                   entity != null;
      +                   entity = cursor.next()) {
      +         entity.name = entity.name.toUpperCase();
      +         cursor.update(entity);
      +     }
      + } finally {
      +     cursor.close();
      + }
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidclose() +
        Closes the cursor.
        +
        intcount() +
        Returns the number of values (duplicates) for the key at the cursor + position, or returns zero if all values for the key have been deleted.
        +
        longcountEstimate() +
        Returns a rough estimate of the number of values (duplicates) for the + key at the cursor position, or returns zero if all values for the key + have been deleted.
        +
        Vcurrent() +
        Returns the value at the cursor position, or null if the value at the + cursor position has been deleted.
        +
        Vcurrent(LockMode lockMode) +
        Returns the value at the cursor position, or null if the value at the + cursor position has been deleted.
        +
        booleandelete() +
        Deletes the entity at the cursor position.
        +
        OperationResultdelete(WriteOptions options) +
        Deletes the entity at the cursor position, using a WriteOptions + parameter and returning an OperationResult.
        +
        EntityCursor<V>dup() +
        Duplicates the cursor at the cursor position.
        +
        Vfirst() +
        Moves the cursor to the first value and returns it, or returns null if + the cursor range is empty.
        +
        Vfirst(LockMode lockMode) +
        Moves the cursor to the first value and returns it, or returns null if + the cursor range is empty.
        +
        EntityResult<V>get(Get getType, + ReadOptions options) +
        Moves the cursor according to the specified Get type and returns + the value at the updated position.
        +
        CacheModegetCacheMode() +
        Returns the default CacheMode used for subsequent operations + performed using this cursor.
        +
        java.util.Iterator<V>iterator() +
        Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
        +
        java.util.Iterator<V>iterator(LockMode lockMode) +
        Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
        +
        Vlast() +
        Moves the cursor to the last value and returns it, or returns null if + the cursor range is empty.
        +
        Vlast(LockMode lockMode) +
        Moves the cursor to the last value and returns it, or returns null if + the cursor range is empty.
        +
        Vnext() +
        Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
        +
        Vnext(LockMode lockMode) +
        Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
        +
        VnextDup() +
        Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position.
        +
        VnextDup(LockMode lockMode) +
        Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position.
        +
        VnextNoDup() +
        Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range.
        +
        VnextNoDup(LockMode lockMode) +
        Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range.
        +
        Vprev() +
        Moves the cursor to the previous value and returns it, or returns null + if there are no preceding values in the cursor range.
        +
        Vprev(LockMode lockMode) +
        Moves the cursor to the previous value and returns it, or returns null + if there are no preceding values in the cursor range.
        +
        VprevDup() +
        Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position.
        +
        VprevDup(LockMode lockMode) +
        Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position.
        +
        VprevNoDup() +
        Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range.
        +
        VprevNoDup(LockMode lockMode) +
        Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range.
        +
        voidsetCacheMode(CacheMode cacheMode) +
        Changes the CacheMode default used for subsequent operations + performed using this cursor.
        +
        booleanupdate(V entity) +
        Replaces the entity at the cursor position with the given entity.
        +
        OperationResultupdate(V entity, + WriteOptions options) +
        Replaces the entity at the cursor position with the given entity, + using a WriteOptions parameter and returning an OperationResult.
        +
        +
          +
        • + + +

          Methods inherited from interface java.lang.Iterable

          +forEach, spliterator
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +
        • +

          nextDup

          +
          V nextDup()
          +   throws DatabaseException
          +
          Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position. + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Returns:
          +
          the next value with the same key, or null if no more values are + present for the key at the current position.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          nextDup

          +
          V nextDup(LockMode lockMode)
          +   throws DatabaseException
          +
          Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position.
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the next value with the same key, or null if no more values are + present for the key at the current position.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          nextNoDup

          +
          V nextNoDup()
          +     throws DatabaseException
          +
          Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range. + If the cursor is uninitialized, this method is equivalent to first(). + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Returns:
          +
          the next value with a different key, or null if there are no + more unique keys in the cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          nextNoDup

          +
          V nextNoDup(LockMode lockMode)
          +     throws DatabaseException
          +
          Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range. + If the cursor is uninitialized, this method is equivalent to first().
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the next value with a different key, or null if there are no + more unique keys in the cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          prev

          +
          V prev(LockMode lockMode)
          +throws DatabaseException
          +
          Moves the cursor to the previous value and returns it, or returns null + if there are no preceding values in the cursor range. If the cursor is + uninitialized, this method is equivalent to last().
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the previous value, or null if there are no preceding values in + the cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          prevDup

          +
          V prevDup()
          +   throws DatabaseException
          +
          Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position. + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Returns:
          +
          the previous value with the same key, or null if no preceding + values are present for the key at the current position.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          prevDup

          +
          V prevDup(LockMode lockMode)
          +   throws DatabaseException
          +
          Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position.
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the previous value with the same key, or null if no preceding + values are present for the key at the current position.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          prevNoDup

          +
          V prevNoDup()
          +     throws DatabaseException
          +
          Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range. If the cursor is uninitialized, this method is equivalent to + last(). + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Returns:
          +
          the previous value with a different key, or null if there are no + preceding unique keys in the cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          prevNoDup

          +
          V prevNoDup(LockMode lockMode)
          +     throws DatabaseException
          +
          Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range. If the cursor is uninitialized, this method is equivalent to + last().
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the previous value with a different key, or null if there are no + preceding unique keys in the cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          current

          +
          V current(LockMode lockMode)
          +   throws DatabaseException
          +
          Returns the value at the cursor position, or null if the value at the + cursor position has been deleted.
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the value at the cursor position, or null if it has been + deleted.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          get

          +
          EntityResult<V> get(Get getType,
          +                    ReadOptions options)
          +             throws DatabaseException
          +
          Moves the cursor according to the specified Get type and returns + the value at the updated position. + +

          The following table lists each allowed operation. Also specified is + whether the cursor must be initialized (positioned on a value) before + calling this method. See the individual Get operations for more + information.

          + +
          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          Get operationDescriptionCursor position
          must be initialized?
          Get.CURRENTAccesses the current value.yes
          Get.FIRSTFinds the first value in the cursor range.no
          Get.LASTFinds the last value in the cursor range.no
          Get.NEXTMoves to the next value.no**
          Get.NEXT_DUPMoves to the next value with the same key.yes
          Get.NEXT_NO_DUPMoves to the next value with a different key.no**
          Get.PREVMoves to the previous value.no**
          Get.PREV_DUPMoves to the previous value with the same key.yes
          Get.PREV_NO_DUPMoves to the previous value with a different key.no**
          + +

          ** - For these 'next' and 'previous' operations the cursor may be + uninitialized, in which case the cursor will be moved to the first or + last value in the cursor range, respectively.

          +
          +
          Parameters:
          +
          getType - the Get operation type. Must be one of the values listed + above.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the EntityResult, including the value at the new cursor + position, or null if the requested value is not present in the cursor + range.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          int count()
          +   throws DatabaseException
          +
          Returns the number of values (duplicates) for the key at the cursor + position, or returns zero if all values for the key have been deleted. + Returns one or zero if the underlying index has unique keys. + + +

          The cost of this method is directly proportional to the number of + values.

          + + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Returns:
          +
          the number of duplicates, or zero if all values for the current + key have been deleted.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized. + +
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          countEstimate

          +
          long countEstimate()
          +            throws DatabaseException
          +
          Returns a rough estimate of the number of values (duplicates) for the + key at the cursor position, or returns zero if all values for the key + have been deleted. Returns one or zero if the underlying index has + unique keys. + +

          If the underlying index has non-unique keys, a quick estimate of the + number of values is computed using information in the Btree. Because + the Btree is unbalanced, in some cases the estimate may be off by a + factor of two or more. The estimate is accurate when the number of + records is less than the configured NodeMaxEntries.

          + +

          The cost of this method is fixed, rather than being proportional to + the number of values. Because its accuracy is variable, this method + should normally be used when accuracy is not required, such as for query + optimization, and a fixed cost operation is needed. For example, this + method is used internally for determining the index processing order in + an EntityJoin.

          +
          +
          Returns:
          +
          an estimate of the count of the number of data items for the key + to which the cursor refers.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          iterator

          +
          java.util.Iterator<V> iterator()
          +
          Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized. + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Specified by:
          +
          iterator in interface ForwardCursor<V>
          +
          Specified by:
          +
          iterator in interface java.lang.Iterable<V>
          +
          Returns:
          +
          the iterator.
          +
          +
        • +
        + + + +
          +
        • +

          iterator

          +
          java.util.Iterator<V> iterator(LockMode lockMode)
          +
          Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
          +
          +
          Specified by:
          +
          iterator in interface ForwardCursor<V>
          +
          Parameters:
          +
          lockMode - the lock mode to use for all operations performed + using the iterator, or null to use LockMode.DEFAULT.
          +
          Returns:
          +
          the iterator.
          +
          +
        • +
        + + + + + +
          +
        • +

          update

          +
          boolean update(V entity)
          +        throws DatabaseException
          +
          Replaces the entity at the cursor position with the given entity.
          +
          +
          Parameters:
          +
          entity - the entity to replace the entity at the current position.
          +
          Returns:
          +
          true if successful or false if the entity at the current + position was previously deleted.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          java.lang.UnsupportedOperationException - if the index is read only or if + the value type is not an entity type. + +
          +
          DuplicateDataException - if the old and new data are not equal + according to the configured duplicate comparator or default comparator.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          update

          +
          OperationResult update(V entity,
          +                       WriteOptions options)
          +                throws DatabaseException
          +
          Replaces the entity at the cursor position with the given entity, + using a WriteOptions parameter and returning an OperationResult.
          +
          +
          Parameters:
          +
          entity - the entity to replace the entity at the current position.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if successful or null if the entity at the + current position was previously deleted.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          java.lang.UnsupportedOperationException - if the index is read only or if + the value type is not an entity type.
          +
          DuplicateDataException - if the old and new data are not equal + according to the configured duplicate comparator or default comparator.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          boolean delete()
          +        throws DatabaseException
          +
          Deletes the entity at the cursor position.
          +
          +
          Returns:
          +
          true if successful or false if the entity at the current + position has been deleted. + +
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          java.lang.UnsupportedOperationException - if the index is read only.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          delete

          +
          OperationResult delete(WriteOptions options)
          +                throws DatabaseException
          +
          Deletes the entity at the cursor position, using a WriteOptions + parameter and returning an OperationResult.
          +
          +
          Returns:
          +
          the OperationResult if successful or null if the entity at the + current position was previously deleted.
          +
          Throws:
          +
          java.lang.IllegalStateException - if the cursor is uninitialized.
          +
          java.lang.UnsupportedOperationException - if the index is read only.
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          dup

          +
          EntityCursor<V> dup()
          +             throws DatabaseException
          +
          Duplicates the cursor at the cursor position. The returned cursor will + be initially positioned at the same position as this current cursor, and + will inherit this cursor's Transaction and CursorConfig.
          +
          +
          Returns:
          +
          the duplicated cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          void close()
          +    throws DatabaseException
          +
          Closes the cursor.
          +
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface ForwardCursor<V>
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          setCacheMode

          +
          void setCacheMode(CacheMode cacheMode)
          +
          Changes the CacheMode default used for subsequent operations + performed using this cursor. For a newly opened cursor, the default is + CacheMode.DEFAULT. Note that the default is always overridden by + a non-null cache mode that is specified via ReadOptions or + WriteOptions.
          +
          +
          Parameters:
          +
          cacheMode - is the default CacheMode used for subsequent + operations using this cursor, or null to configure the Database or + Environment default.
          +
          See Also:
          +
          CacheMode
          +
          +
        • +
        + + + +
          +
        • +

          getCacheMode

          +
          CacheMode getCacheMode()
          +
          Returns the default CacheMode used for subsequent operations + performed using this cursor. If setCacheMode(com.sleepycat.je.CacheMode) has not been + called with a non-null value, the configured Database or Environment + default is returned.
          +
          +
          Returns:
          +
          the CacheMode default used for subsequent operations + using this cursor.
          +
          See Also:
          +
          CacheMode
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/EntityIndex.html b/docs/java/com/sleepycat/persist/EntityIndex.html new file mode 100644 index 0000000..2d0c513 --- /dev/null +++ b/docs/java/com/sleepycat/persist/EntityIndex.html @@ -0,0 +1,1653 @@ + + + + + +EntityIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Interface EntityIndex<K,V>

    +
    +
    +
    +
      +
    • +
      +
      All Known Implementing Classes:
      +
      PrimaryIndex, SecondaryIndex
      +
      +
      +
      +
      public interface EntityIndex<K,V>
      +
      The interface for accessing keys and entities via a primary or secondary + index. + +

      EntityIndex objects are thread-safe. Multiple threads may safely + call the methods of a shared EntityIndex object.

      + +

      An index is conceptually a map. {key:value} mappings are + stored in the index and accessed by key. In fact, for interoperability with + other libraries that use the standard Java Map or SortedMap + interfaces, an EntityIndex may be accessed via these standard + interfaces by calling the map() or sortedMap() methods.

      + +

      EntityIndex is an interface that is implemented by several + classes in this package for different purposes. Depending on the context, + the key type (K) and value type (V) of the index take on different meanings. + The different classes that implement EntityIndex are:

      + + +

      In all cases, the index key type (K) is a primary or secondary key class. + The index value type (V) is an entity class in all cases except for a SecondaryIndex.keysIndex, when it is a primary key class.

      + +

      In the following example, a Employee entity with a MANY_TO_ONE secondary key is defined.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      + +

      Consider that we have stored the entities below:

      + +
      + + + + + + +
      Entities
      IDDepartmentName
      1EngineeringJane Smith
      2SalesJoan Smith
      3EngineeringJohn Smith
      4SalesJim Smith
      + +

      PrimaryIndex maps primary keys to entities:

      + +
      + PrimaryIndex<Long, Employee> primaryIndex =
      +     store.getPrimaryIndex(Long.class, Employee.class);
      + +
      + + + + + + +
      primaryIndex
      Primary KeyEntity
      11EngineeringJane Smith
      22SalesJoan Smith
      33EngineeringJohn Smith
      44SalesJim Smith
      + +

      SecondaryIndex maps secondary keys to entities:

      + +
      + SecondaryIndex<String, Long, Employee> secondaryIndex =
      +     store.getSecondaryIndex(primaryIndex, String.class, "department");
      + +
      + + + + + + +
      secondaryIndex
      Secondary KeyEntity
      Engineering1EngineeringJane Smith
      Engineering3EngineeringJohn Smith
      Sales2SalesJoan Smith
      Sales4SalesJim Smith
      + +

      SecondaryIndex.keysIndex maps secondary keys to primary + keys:

      + +
      + EntityIndex<String, Long> keysIndex = secondaryIndex.keysIndex();
      + +
      + + + + + + +
      keysIndex
      Secondary KeyPrimary Key
      Engineering1
      Engineering3
      Sales2
      Sales4
      + +

      SecondaryIndex.subIndex(SK) maps primary keys to entities, for the + subset of entities having a specified secondary key:

      + +
      + EntityIndex<Long, Entity> subIndex = secondaryIndex.subIndex("Engineering");
      + +
      + + + + +
      subIndex
      Primary KeyEntity
      11EngineeringJane Smith
      33EngineeringJohn Smith
      + +

      Accessing the Index

      + +

      An EntityIndex provides a variety of methods for retrieving + entities from an index. It also provides methods for deleting entities. + However, it does not provide methods for inserting and updating. To insert + and update entities, use the PrimaryIndex.put(E) family of methods in + the PrimaryIndex class.

      + +

      An EntityIndex supports two mechanisms for retrieving + entities:

      +
        +
      1. The get(K) method returns a single value for a given key. If there + are multiple values with the same secondary key (duplicates), it returns the + first entity in the duplicate set.
      2. +
      3. An EntityCursor can be obtained using the keys() and + entities() family of methods. A cursor can be used to return all + values in the index, including duplicates. A cursor can also be used to + return values within a specified range of keys.
      4. +
      + +

      Using the example entities above, calling get(K) on the primary + index will always return the employee with the given ID, or null if no such + ID exists. But calling get(K) on the secondary index will retrieve + the first employee in the given department, which may not be very + useful:

      + +
      + Employee emp = primaryIndex.get(1);      // Returns by unique ID
      + emp = secondaryIndex.get("Engineering"); // Returns first in department
      + +

      Using a cursor, you can iterate through all duplicates in the secondary + index:

      + +
      + EntityCursor<Employee> cursor = secondaryIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         if (entity.department.equals("Engineering")) {
      +             // Do something with the entity...
      +         }
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      But for a large database it is much more efficient to iterate over only + those entities with the secondary key you're searching for. This could be + done by restricting a cursor to a range of keys:

      + +
      + EntityCursor<Employee> cursor =
      +     secondaryIndex.entities("Engineering", true, "Engineering", true);
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      However, when you are interested only in the entities with a particular + secondary key value, it is more convenient to use a sub-index:

      + +
      + EntityIndex<Long, Entity> subIndex = secondaryIndex.subIndex("Engineering");
      + EntityCursor<Employee> cursor = subIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      In addition to being more convenient than a cursor range, a sub-index + allows retrieving by primary key:

      + +
      + Employee emp = subIndex.get(1);
      + +

      When using a sub-index, all operations performed on the sub-index are + restricted to the single key that was specified when the sub-index was + created. For example, the following returns null because employee 2 is not + in the Engineering department and therefore is not part of the + sub-index:

      + +
      + Employee emp = subIndex.get(2);
      + +

      For more information on using cursors and cursor ranges, see EntityCursor.

      + +

      Note that when using an index, keys and values are stored and retrieved + by value not by reference. In other words, if an entity object is stored + and then retrieved, or retrieved twice, each object will be a separate + instance. For example, in the code below the assertion will always + fail.

      +
      + MyKey key = ...;
      + MyEntity entity1 = index.get(key);
      + MyEntity entity2 = index.get(key);
      + assert entity1 == entity2; // always fails!
      + 
      + +

      Deleting from the Index

      + +

      Any type of index may be used to delete entities with a specified key by + calling delete(K). The important thing to keep in mind is that + all entities with the specified key are deleted. In a primary index, + at most a single entity is deleted:

      + +
      + primaryIndex.delete(1); // Deletes a single employee by unique ID
      + +

      But in a secondary index, multiple entities may be deleted:

      + +
      + secondaryIndex.delete("Engineering"); // Deletes all Engineering employees
      + +

      This begs this question: How can a single entity be deleted without + knowing its primary key? The answer is to use cursors. After locating an + entity using a cursor, the entity can be deleted by calling EntityCursor.delete().

      + +

      Transactions

      + +

      Transactions can be used to provide standard ACID (Atomicity, + Consistency, Integrity and Durability) guarantees when retrieving, storing + and deleting entities. This section provides a brief overview of how to use + transactions with the Direct Persistence Layer. For more information on + using transactions, see Writing + Transactional Applications.

      + +

      Transactions may be used only with a transactional EntityStore, + which is one for which StoreConfig.setTransactional(true) has been called. Likewise, a + transactional store may only be used with a transactional Environment, which is one for which EnvironmentConfig.setTransactional(true) + has been called. For example:

      + +
      + EnvironmentConfig envConfig = new EnvironmentConfig();
      + envConfig.setTransactional(true);
      + envConfig.setAllowCreate(true);
      + Environment env = new Environment(new File("/my/data"), envConfig);
      +
      + StoreConfig storeConfig = new StoreConfig();
      + storeConfig.setTransactional(true);
      + storeConfig.setAllowCreate(true);
      + EntityStore store = new EntityStore(env, "myStore", storeConfig);
      + +

      Transactions are represented by Transaction objects, which are + part of the Base API. Transactions are created + using the Environment.beginTransaction + method.

      + +

      A transaction will include all operations for which the transaction + object is passed as a method argument. All retrieval, storage and deletion + methods have an optional Transaction parameter for this purpose. + When a transaction is passed to a method that opens a cursor, all retrieval, + storage and deletion operations performed using that cursor will be included + in the transaction.

      + +

      A transaction may be committed by calling Transaction.commit() or + aborted by calling Transaction.abort(). For example, two employees + may be deleted atomically with a transaction; other words, either both are + deleted or neither is deleted:

      + +
      + Transaction txn = env.beginTransaction(null, null);
      + try {
      +     primaryIndex.delete(txn, 1);
      +     primaryIndex.delete(txn, 2);
      +     txn.commit();
      +     txn = null;
      + } finally {
      +     if (txn != null) {
      +         txn.abort();
      +     }
      + }
      + +

      WARNING: Transactions must always be committed or aborted to + prevent resource leaks which could lead to the index becoming unusable or + cause an OutOfMemoryError. To ensure that a transaction is + aborted in the face of exceptions, call Transaction.abort() in a + finally block.

      + +

      For a transactional store, storage and deletion operations are always + transaction protected, whether or not a transaction is explicitly used. A + null transaction argument means to perform the operation using auto-commit, + or the implied thread transaction if an XAEnvironment is being used. A + transaction is automatically started as part of the operation and is + automatically committed if the operation completes successfully. The + transaction is automatically aborted if an exception occurs during the + operation, and the exception is re-thrown to the caller. For example, each + employee is deleted using a an auto-commit transaction below, but it is + possible that employee 1 will be deleted and employee 2 will not be deleted, + if an error or crash occurs while deleting employee 2:

      + +
      + primaryIndex.delete(null, 1);
      + primaryIndex.delete(null, 2);
      + +

      When retrieving entities, a null transaction argument means to perform + the operation non-transactionally. The operation is performed outside the + scope of any transaction, without providing transactional ACID guarantees. + If an implied thread transaction is present (i.e. if an XAEnvironment is + being used), that transaction is used. When a non-transactional store is + used, transactional ACID guarantees are also not provided.

      + +

      For non-transactional and auto-commit usage, overloaded signatures for + retrieval, storage and deletion methods are provided to avoid having to pass + a null transaction argument. For example, delete(K) may be called + instead of delete(Transaction,Object). For example, the following + code is equivalent to the code above where null was passed for the + transaction:

      + +
      + primaryIndex.delete(1);
      + primaryIndex.delete(2);
      + +

      For retrieval methods the overloaded signatures also include an optional + LockMode parameter, and overloaded signatures for opening cursors + include an optional CursorConfig parameter. These parameters are + described further below in the Locking and Lock Modes section.

      + +

      Transactions and Cursors

      + +

      There are two special consideration when using cursors with transactions. + First, for a transactional store, a non-null transaction must be passed to + methods that open a cursor if that cursor will be used to delete or update + entities. Cursors do not perform auto-commit when a null transaction is + explicitly passed or implied by the method signature. For example, the + following code will throw DatabaseException when the EntityCursor.delete() method is called:

      + +
      + // Does not work with a transactional store!
      + EntityCursor<Employee> cursor = primaryIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         cursor.delete(); // Will throw DatabaseException.
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      Instead, the entities(Transaction,CursorConfig) signature must + be used and a non-null transaction must be passed:

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities(txn, null);
      + try {
      +     for (Employee entity : cursor) {
      +         cursor.delete();
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      The second consideration is that error handling is more complex when + using both transactions and cursors, for the following reasons:

      +
        +
      1. When an exception occurs, the transaction should be aborted.
      2. +
      3. Cursors must be closed whether or not an exception occurs.
      4. +
      5. Cursors must be closed before committing or aborting the + transaction.
      6. +
      + +

      For example:

      + +
      + Transaction txn = env.beginTransaction(null, null);
      + EntityCursor<Employee> cursor = null;
      + try {
      +     cursor = primaryIndex.entities(txn, null);
      +     for (Employee entity : cursor) {
      +         cursor.delete();
      +     }
      +     cursor.close();
      +     cursor = null;
      +     txn.commit();
      +     txn = null;
      + } finally {
      +     if (cursor != null) {
      +         cursor.close();
      +     }
      +     if (txn != null) {
      +         txn.abort();
      +     }
      + }
      + +

      Locking and Lock Modes

      + +

      This section provides a brief overview of locking and describes how lock + modes are used with the Direct Persistence Layer. For more information on + locking, see Writing + Transactional Applications.

      + +

      When using transactions, locks are normally acquired on each entity that + is retrieved or stored. The locks are used to isolate one transaction from + another. Locks are normally released only when the transaction is committed + or aborted.

      + +

      When not using transactions, locks are also normally acquired on each + entity that is retrieved or stored. However, these locks are released when + the operation is complete. When using cursors, in order to provide + cursor stability locks are held until the cursor is moved to a + different entity or closed.

      + +

      This default locking behavior provides full transactional ACID guarantees + and cursor stability. However, application performance can sometimes be + improved by compromising these guarantees. As described in Writing + Transactional Applications, the LockMode and CursorConfig parameters are two of the mechanisms that can be used to make + compromises.

      + +

      For example, imagine that you need an approximate count of all entities + matching certain criterion, and it is acceptable for entities to be changed + by other threads or other transactions while performing this query. LockMode.READ_UNCOMMITTED can be used to perform the retrievals without + acquiring any locks. This reduces memory consumption, does less processing, + and improves concurrency.

      + +
      + EntityCursor<Employee> cursor = primaryIndex.entities(txn, null);
      + try {
      +     Employee entity;
      +     while ((entity = cursor.next(LockMode.READ_UNCOMMITTED)) != null) {
      +         // Examine the entity and accumulate totals...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      The LockMode parameter specifies locking behavior on a + per-operation basis. If null or LockMode.DEFAULT is specified, the + default lock mode is used.

      + +

      It is also possible to specify the default locking behavior for a cursor + using CursorConfig. The example below is equivalent to the example + above:

      + +
      + CursorConfig config = new CursorConfig();
      + config.setReadUncommitted(true);
      + EntityCursor<Employee> cursor = primaryIndex.entities(txn, config);
      + try {
      +     Employee entity;
      +     while ((entity = cursor.next()) != null) {
      +         // Examine the entity and accumulate totals...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + + +

      Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See Key Cursor Optimization with + READ_UNCOMMITTED

      + + +

      The use of other lock modes, cursor configuration, and transaction + configuration are discussed in Writing + Transactional Applications.

      + +

      Performing Transaction Retries

      + +

      Lock conflict handling is another important topic discussed in Writing + Transactional Applications. To go along with that material, here we + show a lock conflict handling loop in the context of the Direct Persistence + Layer. The example below shows deleting all entities in a primary index in + a single transaction. If a lock conflict occurs, the transaction is aborted + and the operation is retried.

      + + +

      This is a DPL version of the equivalent example code + for the base API.

      + +

      The following example code illustrates the recommended approach. Note + that the Environment.beginTransaction and Transaction.commit + calls are intentially inside the try block. When using JE-HA, this + will make it easy to add a catch for other exceptions that can be + resolved by retrying the transaction, such as consistency exceptions.

      + + +
      +  void doTransaction(final Environment env,
      +                     final PrimaryIndex<Long, Employee> primaryIndex,
      +                     final int maxTries)
      +      throws DatabaseException {
      +
      +      boolean success = false;
      +      long sleepMillis = 0;
      +      for (int i = 0; i < maxTries; i++) {
      +          // Sleep before retrying.
      +          if (sleepMillis != 0) {
      +              Thread.sleep(sleepMillis);
      +              sleepMillis = 0;
      +          }
      +          Transaction txn = null;
      +          try {
      +              txn = env.beginTransaction(null, null);
      +              final EntityCursor<Employee> cursor =
      +                  primaryIndex.entities(txn, null);
      +              try {
      +                  // INSERT APP-SPECIFIC CODE HERE:
      +                  // Perform read and write operations, for example:
      +                  for (Employee entity : cursor) {
      +                      cursor.delete();
      +                  }
      +              } finally {
      +                  cursor.close();
      +              }
      +              txn.commit();
      +              success = true;
      +              return;
      +          } catch (LockConflictException e) {
      +              sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000;
      +              continue;
      +          } finally {
      +              if (!success) {
      +                  if (txn != null) {
      +                      txn.abort();
      +                  }
      +              }
      +          }
      +      }
      +      // INSERT APP-SPECIFIC CODE HERE:
      +      // Transaction failed, despite retries.
      +      // Take some app-specific course of action.
      +  }
      + +

      Low Level Access

      + +

      Each Direct Persistence Layer index is associated with an underlying + Database or SecondaryDatabase defined in the Base API. At this level, an index is a Btree managed by + the Berkeley DB Java Edition transactional storage engine. Although you may + never need to work at the Base API level, keep in mind that some + types of performance tuning can be done by configuring the underlying + databases. See the EntityStore class for more information on + database and sequence configuration.

      + +

      If you wish to access an index using the Base API, you may call + the BasicIndex.getDatabase() or SecondaryIndex.getDatabase() + method to get the underlying database. To translate between entity or key + objects and DatabaseEntry objects at this level, use the bindings + returned by PrimaryIndex.getEntityBinding(), PrimaryIndex.getKeyBinding(), and SecondaryIndex.getKeyBinding().

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        booleancontains(K key) +
        Checks for existence of a key in this index.
        +
        booleancontains(Transaction txn, + K key, + LockMode lockMode) +
        Checks for existence of a key in this index.
        +
        longcount() +
        Returns a non-transactional count of the entities in this index.
        +
        longcount(long memoryLimit) +
        Returns a non-transactional count of the entities in this index.
        +
        booleandelete(K key) +
        Deletes all entities with a given index key.
        +
        booleandelete(Transaction txn, + K key) +
        Deletes all entities with a given index key.
        +
        OperationResultdelete(Transaction txn, + K key, + WriteOptions options) +
        Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
        +
        EntityCursor<V>entities() +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<V>entities(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing entities in a key range.
        +
        EntityCursor<V>entities(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<V>entities(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing entities in a key range.
        +
        Vget(K key) +
        Gets an entity via a key of this index.
        +
        EntityResult<V>get(Transaction txn, + K key, + Get getType, + ReadOptions options) +
        Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
        +
        Vget(Transaction txn, + K key, + LockMode lockMode) +
        Gets an entity via a key of this index.
        +
        DatabasegetDatabase() +
        Returns the underlying database for this index.
        +
        EntityCursor<K>keys() +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing keys in a key range.
        +
        EntityCursor<K>keys(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing keys in a key range.
        +
        java.util.Map<K,V>map() +
        Returns a standard Java map based on this entity index.
        +
        java.util.SortedMap<K,V>sortedMap() +
        Returns a standard Java sorted map based on this entity index.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDatabase

          +
          Database getDatabase()
          +
          Returns the underlying database for this index.
          +
          +
          Returns:
          +
          the database.
          +
          +
        • +
        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +
        • +

          get

          +
          EntityResult<V> get(Transaction txn,
          +                    K key,
          +                    Get getType,
          +                    ReadOptions options)
          +             throws DatabaseException
          +
          Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, or null + if the operation should not be transaction protected.
          +
          key - the key to search for.
          +
          getType - must be Get.SEARCH.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the EntityResult, including the value mapped to the given key, + or null if the key is not present in the index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          long count()
          +    throws DatabaseException
          +
          Returns a non-transactional count of the entities in this index. + + + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method places an internal limit on + its memory consumption. If this limit is reached, the method will + still work properly, but its performance will degrade. To specify + a different memory limit than the one used by this method, use the + count(long memoryLimit) method.

          + +
          +
          +
          Returns:
          +
          the number of entities in this index. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          long count(long memoryLimit)
          +    throws DatabaseException
          +
          Returns a non-transactional count of the entities in this index. + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method takes as input an upper bound + on the memory it may consume. If this limit is reached, the method + will still work properly, but its performance will degrade.

          +
          +
          Returns:
          +
          the number of entities in this index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + + + + + + + + + + + +
          +
        • +

          delete

          +
          OperationResult delete(Transaction txn,
          +                       K key,
          +                       WriteOptions options)
          +                throws DatabaseException
          +
          Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          key - the key to search for.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if any entities were deleted, else null. If
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          EntityCursor<K> keys()
          +              throws DatabaseException
          +
          Opens a cursor for traversing all keys in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          EntityCursor<K> keys(Transaction txn,
          +                     CursorConfig config)
          +              throws DatabaseException
          +
          Opens a cursor for traversing all keys in this index. + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          EntityCursor<V> entities()
          +                  throws DatabaseException
          +
          Opens a cursor for traversing all entities in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          EntityCursor<V> entities(Transaction txn,
          +                         CursorConfig config)
          +                  throws DatabaseException
          +
          Opens a cursor for traversing all entities in this index.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          EntityCursor<K> keys(K fromKey,
          +                     boolean fromInclusive,
          +                     K toKey,
          +                     boolean toInclusive)
          +              throws DatabaseException
          +
          Opens a cursor for traversing keys in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          EntityCursor<K> keys(Transaction txn,
          +                     K fromKey,
          +                     boolean fromInclusive,
          +                     K toKey,
          +                     boolean toInclusive,
          +                     CursorConfig config)
          +              throws DatabaseException
          +
          Opens a cursor for traversing keys in a key range. + + +

          Key Cursor Optimization with + READ_UNCOMMITTED

          + +

          Using a key cursor potentially has a large performance benefit when + the READ_UNCOMMITTED isolation mode is used. In this case, if + the record data is not in the JE cache, it will not be read from disk. + The performance benefit is potentially large because random access disk + reads may be reduced. Examples are:

          +
            +
          • Scanning all records in key order, when the entity is not needed and + READ_UNCOMMITTED isolation is acceptable.
          • +
          • Skipping over records quickly to perform approximate pagination with + READ_UNCOMMITTED isolation.
          • +
          + +

          For other isolation modes (READ_COMMITTED, REPEATABLE_READ and SERIALIZABLE), the performance benefit of a + key cursor is not as significant. In this case, the data item must be + read into the JE cache if it is not already present, in order to lock + the record. The only performance benefit is that the data will not be + copied from the JE cache to the application's entry parameter, and will + not be unmarshalled into an entity object.

          + +

          For information on specifying isolation modes, see LockMode, + CursorConfig and TransactionConfig.

          +
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          EntityCursor<V> entities(K fromKey,
          +                         boolean fromInclusive,
          +                         K toKey,
          +                         boolean toInclusive)
          +                  throws DatabaseException
          +
          Opens a cursor for traversing entities in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          EntityCursor<V> entities(Transaction txn,
          +                         K fromKey,
          +                         boolean fromInclusive,
          +                         K toKey,
          +                         boolean toInclusive,
          +                         CursorConfig config)
          +                  throws DatabaseException
          +
          Opens a cursor for traversing entities in a key range.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          map

          +
          java.util.Map<K,V> map()
          +
          Returns a standard Java map based on this entity index. The StoredMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        + + + +
          +
        • +

          sortedMap

          +
          java.util.SortedMap<K,V> sortedMap()
          +
          Returns a standard Java sorted map based on this entity index. The + StoredSortedMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/EntityJoin.html b/docs/java/com/sleepycat/persist/EntityJoin.html new file mode 100644 index 0000000..df8790c --- /dev/null +++ b/docs/java/com/sleepycat/persist/EntityJoin.html @@ -0,0 +1,501 @@ + + + + + +EntityJoin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class EntityJoin<PK,E>

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.EntityJoin<PK,E>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class EntityJoin<PK,E>
      +extends java.lang.Object
      +
      Performs an equality join on two or more secondary keys. + +

      EntityJoin objects are thread-safe. Multiple threads may safely + call the methods of a shared EntityJoin object.

      + +

      An equality join is a match on all entities in a given primary index that + have two or more specific secondary key values. Note that key ranges may + not be matched by an equality join, only exact keys are matched.

      + +

      For example:

      +
      +  // Index declarations -- see package summary example.
      +  //
      +  PrimaryIndex<String, Person> personBySsn;
      +  SecondaryIndex<String, String, Person> personByParentSsn;
      +  SecondaryIndex<Long, String, Person> personByEmployerIds;
      +  Employer employer = ...;
      +
      +  // Match on all Person objects having parentSsn "111-11-1111" and also
      +  // containing an employerId of employer.id.  In other words, match on all
      +  // of Bob's children that work for a given employer.
      +  //
      +  EntityJoin<String, Person> join = new EntityJoin(personBySsn);
      +  join.addCondition(personByParentSsn, "111-11-1111");
      +  join.addCondition(personByEmployerIds, employer.id);
      +
      +  // Perform the join operation by traversing the results with a cursor.
      +  //
      +  ForwardCursor<Person> results = join.entities();
      +  try {
      +      for (Person person : results) {
      +          System.out.println(person.ssn + ' ' + person.name);
      +      }
      +  } finally {
      +      results.close();
      +  }
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EntityJoin

          +
          public EntityJoin(PrimaryIndex<PK,E> index)
          +
          Creates a join object for a given primary index.
          +
          +
          Parameters:
          +
          index - the primary index on which the join will operate.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          addCondition

          +
          public <SK> void addCondition(SecondaryIndex<SK,PK,E> index,
          +                              SK key)
          +
          Adds a secondary key condition to the equality join. Only entities + having the given key value in the given secondary index will be returned + by the join operation.
          +
          +
          Type Parameters:
          +
          SK - the secondary key class.
          +
          Parameters:
          +
          index - the secondary index containing the given key value.
          +
          key - the key value to match during the join.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          entities

          +
          public ForwardCursor<E> entities(Transaction txn,
          +                                 CursorConfig config)
          +                          throws DatabaseException
          +
          Opens a cursor that returns the entities qualifying for the join. The + join operation is performed as the returned cursor is accessed.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          java.lang.IllegalStateException - if less than two conditions were added.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          keys

          +
          public ForwardCursor<PK> keys(Transaction txn,
          +                              CursorConfig config)
          +                       throws DatabaseException
          +
          Opens a cursor that returns the primary keys of entities qualifying for + the join. The join operation is performed as the returned cursor is + accessed.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          java.lang.IllegalStateException - if less than two conditions were added.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/EntityResult.html b/docs/java/com/sleepycat/persist/EntityResult.html new file mode 100644 index 0000000..09c1879 --- /dev/null +++ b/docs/java/com/sleepycat/persist/EntityResult.html @@ -0,0 +1,273 @@ + + + + + +EntityResult (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class EntityResult<V>

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.EntityResult<V>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class EntityResult<V>
      +extends java.lang.Object
      +
      Used to return an entity value from a 'get' operation along with an + OperationResult. If the operation fails, null is returned. If the operation + succeeds and a non-null EntityResult is returned, the contained entity value + and OperationResult are guaranteed to be non-null.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        OperationResultresult() +
        Returns the OperationResult resulting from the operation.
        +
        Vvalue() +
        Returns the entity value resulting from the operation.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          value

          +
          public V value()
          +
          Returns the entity value resulting from the operation.
          +
          +
          Returns:
          +
          the non-null entity value.
          +
          +
        • +
        + + + +
          +
        • +

          result

          +
          public OperationResult result()
          +
          Returns the OperationResult resulting from the operation.
          +
          +
          Returns:
          +
          the non-null OperationResult.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/EntityStore.html b/docs/java/com/sleepycat/persist/EntityStore.html new file mode 100644 index 0000000..51c0b2f --- /dev/null +++ b/docs/java/com/sleepycat/persist/EntityStore.html @@ -0,0 +1,1288 @@ + + + + + +EntityStore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class EntityStore

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.EntityStore
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class EntityStore
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      A store for managing persistent entity objects. + +

      EntityStore objects are thread-safe. Multiple threads may safely + call the methods of a shared EntityStore object.

      + +

      See the package + summary example for an example of using an EntityStore.

      + +

      Before creating an EntityStore you must create an Environment object using the Berkeley DB engine API. The environment may + contain any number of entity stores and their associated databases, as well + as other databases not associated with an entity store.

      + +

      An entity store is based on an EntityModel: a data model which + defines persistent classes (entity classes), primary keys, + secondary keys, and relationships between entities. A primary index is + created for each entity class. An associated secondary index is created for + each secondary key. The Entity, PrimaryKey and SecondaryKey annotations may be used to define entities and keys.

      + +

      To use an EntityStore, first obtain PrimaryIndex and + SecondaryIndex objects by calling getPrimaryIndex and getSecondaryIndex. Then use + these indices to store and access entity records by key.

      + +

      Although not normally needed, you can also use the entity store along + with the Base API. Methods in the PrimaryIndex and SecondaryIndex classes may be used to obtain + databases and bindings. The databases may be used directly for accessing + entity records. The bindings should be called explicitly to translate + between DatabaseEntry objects and entity model + objects.

      + +

      Each primary and secondary index is associated internally with a Database. With any of the above mentioned use cases, methods are provided + that may be used for database performance tuning. The setPrimaryConfig and setSecondaryConfig methods may be called anytime before a database is + opened via getPrimaryIndex or getSecondaryIndex. The setSequenceConfig method may be called anytime before getSequence is called or getPrimaryIndex is called + for a primary index associated with that sequence.

      + + +

      Database Names

      + +

      The database names of primary and secondary indices are designed to be + unique within the environment and identifiable for debugging and use with + tools such as DbDump and DbLoad.

      + +

      The syntax of a primary index database name is:

      +
         persist#STORE_NAME#ENTITY_CLASS
      +

      Where STORE_NAME is the name parameter passed to EntityStore and ENTITY_CLASS is name of the class passed to getPrimaryIndex.

      + +

      The syntax of a secondary index database name is:

      +
         persist#STORE_NAME#ENTITY_CLASS#KEY_NAME
      +

      Where KEY_NAME is the secondary key name passed to getSecondaryIndex.

      + +

      Although you should never have to construct these names manually, + understanding their syntax is useful for several reasons:

      +
        +
      • Exception messages sometimes contain the database name, from which you + can identify the entity class and secondary key.
      • +
      • If you create other databases in the same environment that are not + part of an EntityStore, to avoid naming conflicts the other + database names should not begin with "persist#".
      • +
      • If you are using DbDump or DbLoad to perform a backup or copy databases between + environments, knowing the database names can be useful. Normally you will + dump or load all database names starting with + "persist#STORE_NAME#".
      • +
      + +

      If you are copying all databases in a store as mentioned in the last + point above, there is one further consideration. There are two internal + databases that must be kept with the other databases in the store in order + for the store to be used. These contain the data formats and sequences for + the store:

      +
         persist#STORE_NAME#com.sleepycat.persist.formats
      +
         persist#STORE_NAME#com.sleepycat.persist.sequences
      +

      These databases must normally be included with copies of other databases + in the store. They should not be modified by the application.

      + +

      For example, the following code snippet removes all databases for a given + store in a single transaction.

      +
      +  Environment env = ...
      +  EntityStore store = ...
      +  Transaction txn = env.beginTransaction(null, null);
      +  String prefix = "persist#" + store.getStoreName() + "#";
      +  for (String dbName : env.getDatabaseNames()) {
      +      if (dbName.startsWith(prefix)) {
      +          env.removeDatabase(txn, dbName);
      +      }
      +  }
      +  txn.commit();
      + +
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        EntityStore(Environment env, + java.lang.String storeName, + StoreConfig config) +
        Opens an entity store in a given environment.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidclose() +
        Closes all databases and sequences that were opened via this store.
        +
        voidcloseClass(java.lang.Class entityClass) +
        Closes the primary and secondary databases for the given entity class + that were opened via this store.
        +
        EvolveStatsevolve(EvolveConfig config) +
        Performs conversion of unevolved objects in order to reduce lazy + conversion overhead.
        +
        StoreConfiggetConfig() +
        Returns a copy of the entity store configuration.
        +
        EnvironmentgetEnvironment() +
        Returns the environment associated with this store.
        +
        EntityModelgetModel() +
        Returns the current entity model for this store.
        +
        MutationsgetMutations() +
        Returns the set of mutations that were configured when the store was + opened, or if none were configured, the set of mutations that were + configured and stored previously.
        +
        DatabaseConfiggetPrimaryConfig(java.lang.Class entityClass) +
        Returns the default primary database Berkeley DB engine API + configuration for an entity class.
        +
        <PK,E> PrimaryIndex<PK,E>getPrimaryIndex(java.lang.Class<PK> primaryKeyClass, + java.lang.Class<E> entityClass) +
        Returns the primary index for a given entity class, opening it if + necessary.
        +
        SecondaryConfiggetSecondaryConfig(java.lang.Class entityClass, + java.lang.String keyName) +
        Returns the default secondary database Berkeley DB engine API + configuration for an entity class and key name.
        +
        <SK,PK,E> SecondaryIndex<SK,PK,E>getSecondaryIndex(PrimaryIndex<PK,E> primaryIndex, + java.lang.Class<SK> keyClass, + java.lang.String keyName) +
        Returns a secondary index for a given primary index and secondary key, + opening it if necessary.
        +
        SequencegetSequence(java.lang.String name) +
        Returns a named sequence for using Berkeley DB engine API directly, + opening it if necessary.
        +
        SequenceConfiggetSequenceConfig(java.lang.String name) +
        Returns the default Berkeley DB engine API configuration for a named key + sequence.
        +
        java.lang.StringgetStoreName() +
        Returns the name of this store.
        +
        static java.util.Set<java.lang.String>getStoreNames(Environment env) +
        Returns the names of all entity stores in the given environment.
        +
        <SK,PK,E1,E2 extends E1>
        SecondaryIndex<SK,PK,E2>
        getSubclassIndex(PrimaryIndex<PK,E1> primaryIndex, + java.lang.Class<E2> entitySubclass, + java.lang.Class<SK> keyClass, + java.lang.String keyName) +
        Returns a secondary index for a secondary key in an entity subclass, + opening it if necessary.
        +
        voidsetPrimaryConfig(java.lang.Class entityClass, + DatabaseConfig config) +
        Configures the primary database for an entity class using the Berkeley + DB engine API.
        +
        voidsetSecondaryConfig(java.lang.Class entityClass, + java.lang.String keyName, + SecondaryConfig config) +
        Configures a secondary database for an entity class and key name using + the Berkeley DB engine API.
        +
        voidsetSequenceConfig(java.lang.String name, + SequenceConfig config) +
        Configures a named key sequence using the Berkeley DB engine API.
        +
        voidsync() +
        Flushes each modified index to disk that was opened in deferred-write + mode.
        +
        voidtruncateClass(java.lang.Class entityClass) +
        Deletes all instances of this entity class and its (non-entity) + subclasses.
        +
        voidtruncateClass(Transaction txn, + java.lang.Class entityClass) +
        Deletes all instances of this entity class and its (non-entity) + subclasses.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getEnvironment

          +
          public Environment getEnvironment()
          +
          Returns the environment associated with this store.
          +
          +
          Returns:
          +
          the environment.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public StoreConfig getConfig()
          +
          Returns a copy of the entity store configuration.
          +
          +
          Returns:
          +
          the config.
          +
          +
        • +
        + + + +
          +
        • +

          getStoreName

          +
          public java.lang.String getStoreName()
          +
          Returns the name of this store.
          +
          +
          Returns:
          +
          the name.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          getModel

          +
          public EntityModel getModel()
          +
          Returns the current entity model for this store. The current model is + derived from the configured entity model and the live entity class + definitions.
          +
          +
          Returns:
          +
          the model.
          +
          +
        • +
        + + + +
          +
        • +

          getMutations

          +
          public Mutations getMutations()
          +
          Returns the set of mutations that were configured when the store was + opened, or if none were configured, the set of mutations that were + configured and stored previously.
          +
          +
          Returns:
          +
          the mutations.
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryIndex

          +
          public <PK,E> PrimaryIndex<PK,E> getPrimaryIndex(java.lang.Class<PK> primaryKeyClass,
          +                                                 java.lang.Class<E> entityClass)
          +                                          throws DatabaseException
          +
          Returns the primary index for a given entity class, opening it if + necessary. + +

          If they are not already open, the primary and secondary databases for + the entity class are created/opened together in a single internal + transaction. When the secondary indices are opened, that can cascade to + open other related primary indices.

          +
          +
          Type Parameters:
          +
          PK - the primary key class.
          +
          E - the entity class.
          +
          Parameters:
          +
          primaryKeyClass - the class of the entity's primary key field, or + the corresponding primitive wrapper class if the primary key field type + is a primitive.
          +
          entityClass - the entity class for which to open the primary index.
          +
          Returns:
          +
          the primary index.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the entity class or classes + referenced by it are not persistent, or the primary key class does not + match the entity's primary key field, or if metadata for the entity or + primary key is invalid. + +
          +
          IndexNotAvailableException - in a replicated environment if this + Replica's persistent classes have been upgraded to define a new index, + but the Master has not yet been upgraded.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the index does not exist and the ReadOnly parameter is false, then one of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryIndex

          +
          public <SK,PK,E> SecondaryIndex<SK,PK,E> getSecondaryIndex(PrimaryIndex<PK,E> primaryIndex,
          +                                                           java.lang.Class<SK> keyClass,
          +                                                           java.lang.String keyName)
          +                                                    throws DatabaseException
          +
          Returns a secondary index for a given primary index and secondary key, + opening it if necessary. + +

          NOTE: If the secondary key field is declared in a subclass + of the entity class, use getSubclassIndex(com.sleepycat.persist.PrimaryIndex<PK, E1>, java.lang.Class<E2>, java.lang.Class<SK>, java.lang.String) instead.

          + +

          If a SecondaryKey.relatedEntity() is used and the primary index + for the related entity is not already open, it will be opened by this + method. That will, in turn, open its secondary indices, which can + cascade to open other primary indices.

          +
          +
          Type Parameters:
          +
          SK - the secondary key class.
          +
          PK - the primary key class.
          +
          E - the entity class.
          +
          Parameters:
          +
          primaryIndex - the primary index associated with the returned + secondary index. The entity class of the primary index, or one of its + superclasses, must contain a secondary key with the given secondary key + class and key name.
          +
          keyClass - the class of the secondary key field, or the + corresponding primitive wrapper class if the secondary key field type is + a primitive.
          +
          keyName - the name of the secondary key field, or the SecondaryKey.name() if this name annotation property was specified.
          +
          Returns:
          +
          the secondary index.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the entity class or one of its + superclasses does not contain a key field of the given key class and key + name, or if the metadata for the secondary key is invalid. + +
          +
          IndexNotAvailableException - in a replicated environment if this + Replica's persistent classes have been upgraded to define a new index, + but the Master has not yet been upgraded.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the index does not exist and the ReadOnly parameter is false, then one of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getSubclassIndex

          +
          public <SK,PK,E1,E2 extends E1> SecondaryIndex<SK,PK,E2> getSubclassIndex(PrimaryIndex<PK,E1> primaryIndex,
          +                                                                          java.lang.Class<E2> entitySubclass,
          +                                                                          java.lang.Class<SK> keyClass,
          +                                                                          java.lang.String keyName)
          +                                                                   throws DatabaseException
          +
          Returns a secondary index for a secondary key in an entity subclass, + opening it if necessary. + +

          If a SecondaryKey.relatedEntity() is used and the primary index + for the related entity is not already open, it will be opened by this + method. That will, in turn, open its secondary indices, which can + cascade to open other primary indices.

          +
          +
          Type Parameters:
          +
          SK - the secondary key class.
          +
          PK - the primary key class.
          +
          E1 - the entity class.
          +
          E2 - the entity sub-class.
          +
          Parameters:
          +
          primaryIndex - the primary index associated with the returned + secondary index. The entity class of the primary index, or one of its + superclasses, must contain a secondary key with the given secondary key + class and key name.
          +
          entitySubclass - a subclass of the entity class for the primary + index. The entity subclass must contain a secondary key with the given + secondary key class and key name.
          +
          keyClass - the class of the secondary key field, or the + corresponding primitive wrapper class if the secondary key field type is + a primitive.
          +
          keyName - the name of the secondary key field, or the SecondaryKey.name() if this name annotation property was specified.
          +
          Returns:
          +
          the secondary index.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the given entity subclass does not + contain a key field of the given key class and key name, or if the + metadata for the secondary key is invalid. + +
          +
          IndexNotAvailableException - in a replicated environment if this + Replica's persistent classes have been upgraded to define a new index, + but the Master has not yet been upgraded.
          +
          OperationFailureException - if one of the Read Operation + Failures occurs. If the index does not exist and the ReadOnly parameter is false, then one of the Write + Operation Failures may also occur.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          evolve

          +
          public EvolveStats evolve(EvolveConfig config)
          +                   throws DatabaseException
          +
          Performs conversion of unevolved objects in order to reduce lazy + conversion overhead. Evolution may be performed concurrently with + normal access to the store. + +

          Conversion is performed one entity class at a time. An entity class + is converted only if it has Mutations associated with it via + StoreConfig.setMutations.

          + +

          Conversion of an entity class is performed by reading each entity, + converting it if necessary, and updating it if conversion was performed. + When all instances of an entity class are converted, references to the + appropriate Mutations are deleted. Therefore, if this method is + called twice successfully without changing class definitions, the second + call will do nothing.

          +
          +
          Parameters:
          +
          config - the EvolveConfig.
          +
          Returns:
          +
          the EvolveStats. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          See Also:
          +
          Class Evolution
          +
          +
        • +
        + + + + + + + + + + + + + + + + + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          Closes all databases and sequences that were opened via this store. The + caller must ensure that no databases opened via this store are in use. + + +

          WARNING: To prevent memory leaks, the application must call this + method even when the Environment has become invalid. While this is not + necessary for Database objects, it is necessary for EntityStore objects + to prevent the accumulation of memory in the global DPL metadata cache. + + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getSequence

          +
          public Sequence getSequence(java.lang.String name)
          +                     throws DatabaseException
          +
          Returns a named sequence for using Berkeley DB engine API directly, + opening it if necessary.
          +
          +
          Parameters:
          +
          name - the sequence name, which is normally defined using the + PrimaryKey.sequence() annotation property.
          +
          Returns:
          +
          the open sequence for the given sequence name.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getSequenceConfig

          +
          public SequenceConfig getSequenceConfig(java.lang.String name)
          +
          Returns the default Berkeley DB engine API configuration for a named key + sequence. + +

          The returned configuration is as follows. All other properties have + default values.

          +
          +
          +
          Parameters:
          +
          name - the sequence name, which is normally defined using the + PrimaryKey.sequence() annotation property.
          +
          Returns:
          +
          the default configuration for the given sequence name.
          +
          +
        • +
        + + + +
          +
        • +

          setSequenceConfig

          +
          public void setSequenceConfig(java.lang.String name,
          +                              SequenceConfig config)
          +
          Configures a named key sequence using the Berkeley DB engine API. + +

          To be compatible with the entity model and the Direct Persistence + Layer, the configuration should be retrieved using getSequenceConfig, modified, and then passed to this + method. The following configuration properties may not be changed:

          + +

          In addition, AllowCreate must be + the inverse of ReadOnly

          + +

          If the range is changed to include the value zero, see PrimaryKey for restrictions.

          +
          +
          Parameters:
          +
          name - the sequence name, which is normally defined using the + PrimaryKey.sequence() annotation property.
          +
          config - the configuration to use for the given sequence name.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the configuration is incompatible + with the entity model or the Direct Persistence Layer.
          +
          java.lang.IllegalStateException - if the sequence has already been opened.
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryConfig

          +
          public DatabaseConfig getPrimaryConfig(java.lang.Class entityClass)
          +
          Returns the default primary database Berkeley DB engine API + configuration for an entity class. + +

          The returned configuration is as follows. All other properties have + default values.

          +
          +
          +
          Parameters:
          +
          entityClass - the entity class identifying the primary database.
          +
          Returns:
          +
          the default configuration for the given entity class.
          +
          +
        • +
        + + + +
          +
        • +

          setPrimaryConfig

          +
          public void setPrimaryConfig(java.lang.Class entityClass,
          +                             DatabaseConfig config)
          +
          Configures the primary database for an entity class using the Berkeley + DB engine API. + +

          To be compatible with the entity model and the Direct Persistence + Layer, the configuration should be retrieved using getPrimaryConfig, modified, and then passed to this + method. The following configuration properties may not be changed:

          + +

          In addition, AllowCreate must be + the inverse of ReadOnly

          +
          +
          Parameters:
          +
          entityClass - the entity class identifying the primary database.
          +
          config - the configuration to use for the given entity class.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the configuration is incompatible + with the entity model or the Direct Persistence Layer.
          +
          java.lang.IllegalStateException - if the database has already been opened.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          setSecondaryConfig

          +
          public void setSecondaryConfig(java.lang.Class entityClass,
          +                               java.lang.String keyName,
          +                               SecondaryConfig config)
          +
          Configures a secondary database for an entity class and key name using + the Berkeley DB engine API. + +

          To be compatible with the entity model and the Direct Persistence + Layer, the configuration should be retrieved using getSecondaryConfig, modified, and then passed to + this method. The following configuration properties may not be + changed:

          + +

          In addition, AllowCreate must be + the inverse of ReadOnly

          +
          +
          Parameters:
          +
          entityClass - the entity class containing the given secondary key + name.
          +
          keyName - the name of the secondary key field, or the SecondaryKey.name() if this name annotation property was specified.
          +
          config - the configuration to use for the given secondary key.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the configuration is incompatible + with the entity model or the Direct Persistence Layer.
          +
          java.lang.IllegalStateException - if the database has already been opened.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/ForwardCursor.html b/docs/java/com/sleepycat/persist/ForwardCursor.html new file mode 100644 index 0000000..e4d455b --- /dev/null +++ b/docs/java/com/sleepycat/persist/ForwardCursor.html @@ -0,0 +1,398 @@ + + + + + +ForwardCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Interface ForwardCursor<V>

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      java.lang.AutoCloseable, java.io.Closeable, java.lang.Iterable<V>
      +
      +
      +
      All Known Subinterfaces:
      +
      EntityCursor<V>
      +
      +
      +
      +
      public interface ForwardCursor<V>
      +extends java.lang.Iterable<V>, java.io.Closeable
      +
      Cursor operations limited to traversing forward. See EntityCursor + for general information on cursors. + +

      ForwardCursor objects are not thread-safe. Cursors + should be opened, used and closed by a single thread.

      + +

      WARNING: Cursors must always be closed to prevent resource leaks + which could lead to the index becoming unusable or cause an + OutOfMemoryError. To ensure that a cursor is closed in the + face of exceptions, close it in a finally block.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        voidclose() +
        Closes the cursor.
        +
        java.util.Iterator<V>iterator() +
        Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
        +
        java.util.Iterator<V>iterator(LockMode lockMode) +
        Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
        +
        Vnext() +
        Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
        +
        Vnext(LockMode lockMode) +
        Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
        +
        +
          +
        • + + +

          Methods inherited from interface java.lang.Iterable

          +forEach, spliterator
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          next

          +
          V next(LockMode lockMode)
          +throws DatabaseException
          +
          Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range. If the cursor is + uninitialized, this method returns the first value.
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for this operation, or null to + use LockMode.DEFAULT.
          +
          Returns:
          +
          the next value, or null if there are no more values in the + cursor range. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          iterator

          +
          java.util.Iterator<V> iterator()
          +
          Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized. + +

          LockMode.DEFAULT is used implicitly.

          +
          +
          Specified by:
          +
          iterator in interface java.lang.Iterable<V>
          +
          Returns:
          +
          the iterator.
          +
          +
        • +
        + + + +
          +
        • +

          iterator

          +
          java.util.Iterator<V> iterator(LockMode lockMode)
          +
          Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
          +
          +
          Parameters:
          +
          lockMode - the lock mode to use for all operations performed + using the iterator, or null to use LockMode.DEFAULT.
          +
          Returns:
          +
          the iterator.
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          void close()
          +    throws DatabaseException
          +
          Closes the cursor.
          +
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/IndexNotAvailableException.html b/docs/java/com/sleepycat/persist/IndexNotAvailableException.html new file mode 100644 index 0000000..6615648 --- /dev/null +++ b/docs/java/com/sleepycat/persist/IndexNotAvailableException.html @@ -0,0 +1,276 @@ + + + + + +IndexNotAvailableException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class IndexNotAvailableException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class IndexNotAvailableException
      +extends OperationFailureException
      +
      Thrown by the getPrimaryIndex, getSecondaryIndex and getSubclassIndex when an index has not yet + been created. + + + This exception can be thrown in two circumstances. +
        +
      1. It can be thrown in a replicated environment when the Replica has been + upgraded to contain new persistent classes that define a new primary or + secondary index, but the Master has not yet been upgraded. The index does + not exist because the Master has not yet been upgraded with the new classes. + If the application is aware of when the Master is upgraded, it can wait for + that to occur and then open the index. Or, the application may repeatedly + try to open the index until it becomes available.
      2. +
      3. + +

        It can be thrown when opening an environment read-only with new + persistent classes that define a new primary or secondary index. The index + does not exist because the environment has not yet been opened read-write + with the new classes. When the index is created by a read-write + application, the read-only application must close and re-open the + environment in order to open the new index.

        + +
      4. +
      +
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/PrimaryIndex.html b/docs/java/com/sleepycat/persist/PrimaryIndex.html new file mode 100644 index 0000000..28b35a4 --- /dev/null +++ b/docs/java/com/sleepycat/persist/PrimaryIndex.html @@ -0,0 +1,1785 @@ + + + + + +PrimaryIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class PrimaryIndex<PK,E>

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.PrimaryIndex<PK,E>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      EntityIndex<PK,E>
      +
      +
      +
      +
      public class PrimaryIndex<PK,E>
      +extends java.lang.Object
      +
      The primary index for an entity class and its primary key. + +

      PrimaryIndex objects are thread-safe. Multiple threads may + safely call the methods of a shared PrimaryIndex object.

      + +

      PrimaryIndex implements EntityIndex to map the primary + key type (PK) to the entity type (E).

      + +

      The Entity annotation may be used to define an entity class and + the PrimaryKey annotation may be used to define a primary key as + shown in the following example.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     String name;
      +
      +     Employee(long id, String name) {
      +         this.id = id;
      +         this.name = name;
      +     }
      +
      +     private Employee() {} // For bindings
      + }
      + +

      To obtain the PrimaryIndex for a given entity class, call EntityStore.getPrimaryIndex, passing the + primary key class and the entity class. For example:

      + +
      + EntityStore store = new EntityStore(...);
      +
      + PrimaryIndex<Long, Employee> primaryIndex =
      +     store.getPrimaryIndex(Long.class, Employee.class);
      + +

      Note that Long.class is passed as the primary key class, but the + primary key field has the primitive type long. When a primitive + primary key field is used, the corresponding primitive wrapper class is used + to access the primary index. For more information on key field types, see + PrimaryKey.

      + +

      The PrimaryIndex provides the primary storage and access methods + for the instances of a particular entity class. Entities are inserted and + updated in the PrimaryIndex by calling a method in the family of + put(E) methods. The put(E) method will insert the entity if no + entity with the same primary key already exists. If an entity with the same + primary key does exist, it will update the entity and return the existing + (old) entity. For example:

      + +
      + Employee oldEntity;
      + oldEntity = primaryIndex.put(new Employee(1, "Jane Smith"));    // Inserts an entity
      + assert oldEntity == null;
      + oldEntity = primaryIndex.put(new Employee(2, "Joan Smith"));    // Inserts an entity
      + assert oldEntity == null;
      + oldEntity = primaryIndex.put(new Employee(2, "Joan M. Smith")); // Updates an entity
      + assert oldEntity != null;
      + +

      The putNoReturn(E) method can be used to avoid the overhead of + returning the existing entity, when the existing entity is not important to + the application. The return type of putNoReturn(E) is void. For + example:

      + +
      + primaryIndex.putNoReturn(new Employee(1, "Jane Smith"));    // Inserts an entity
      + primaryIndex.putNoReturn(new Employee(2, "Joan Smith"));    // Inserts an entity
      + primaryIndex.putNoReturn(new Employee(2, "Joan M. Smith")); // Updates an entity
      + +

      The putNoOverwrite(E) method can be used to ensure that an existing + entity is not overwritten. putNoOverwrite(E) returns true if the + entity was inserted, or false if an existing entity exists and no action was + taken. For example:

      + +
      + boolean inserted;
      + inserted = primaryIndex.putNoOverwrite(new Employee(1, "Jane Smith"));    // Inserts an entity
      + assert inserted;
      + inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan Smith"));    // Inserts an entity
      + assert inserted;
      + inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan M. Smith")); // No action was taken!
      + assert !inserted;
      + +

      Primary key values must be unique, in other words, each instance of a + given entity class must have a distinct primary key value. Rather than + assigning the unique primary key values yourself, a sequence can be + used to assign sequential integer values automatically, starting with the + value 1 (one). A sequence is defined using the PrimaryKey.sequence() + annotation property. For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey(sequence="ID")
      +     long id;
      +
      +     String name;
      +
      +     Employee(String name) {
      +         this.name = name;
      +     }
      +
      +     private Employee() {} // For bindings
      + }
      + +

      The name of the sequence used above is "ID". Any name can be used. If + the same sequence name is used in more than one entity class, the sequence + will be shared by those classes, in other words, a single sequence of + integers will be used for all instances of those classes. See PrimaryKey.sequence() for more information.

      + +

      Any method in the family of put(E) methods may be used to insert + entities where the primary key is assigned from a sequence. When the put(E) method returns, the primary key field of the entity object will be set + to the assigned key value. For example:

      + +
      + Employee employee;
      + employee = new Employee("Jane Smith");
      + primaryIndex.putNoReturn(employee);    // Inserts an entity
      + assert employee.id == 1;
      + employee = new Employee("Joan Smith");
      + primaryIndex.putNoReturn(employee);    // Inserts an entity
      + assert employee.id == 2;
      + +

      This begs the question: How do you update an existing entity, without + assigning a new primary key? The answer is that the put(E) methods + will only assign a new key from the sequence if the primary key field is + zero or null (for reference types). If an entity with a non-zero and + non-null key field is passed to a put(E) method, any existing entity + with that primary key value will be updated. For example:

      + +
      + Employee employee;
      + employee = new Employee("Jane Smith");
      + primaryIndex.putNoReturn(employee);    // Inserts an entity
      + assert employee.id == 1;
      + employee = new Employee("Joan Smith");
      + primaryIndex.putNoReturn(employee);    // Inserts an entity
      + assert employee.id == 2;
      + employee.name = "Joan M. Smith";
      + primaryIndex.putNoReturn(employee);    // Updates an existing entity
      + assert employee.id == 2;
      + +

      Since PrimaryIndex implements the EntityIndex interface, + it shares the common index methods for retrieving and deleting entities, + opening cursors and using transactions. See EntityIndex for more + information on these topics.

      + +

      Note that when using an index, keys and values are stored and retrieved + by value not by reference. In other words, if an entity object is stored + and then retrieved, or retrieved twice, each object will be a separate + instance. For example, in the code below the assertion will always + fail.

      +
      + MyKey key = ...;
      + MyEntity entity1 = new MyEntity(key, ...);
      + index.put(entity1);
      + MyEntity entity2 = index.get(key);
      + assert entity1 == entity2; // always fails!
      + 
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        PrimaryIndex(Database database, + java.lang.Class<PK> keyClass, + EntryBinding<PK> keyBinding, + java.lang.Class<E> entityClass, + EntityBinding<E> entityBinding) +
        Creates a primary index without using an EntityStore.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleancontains(K key) +
        Checks for existence of a key in this index.
        +
        booleancontains(Transaction txn, + K key, + LockMode lockMode) +
        Checks for existence of a key in this index.
        +
        longcount() +
        Returns a non-transactional count of the entities in this index.
        +
        longcount(long memoryLimit) +
        Returns a non-transactional count of the entities in this index.
        +
        booleandelete(K key) +
        Deletes all entities with a given index key.
        +
        booleandelete(Transaction txn, + K key) +
        Deletes all entities with a given index key.
        +
        OperationResultdelete(Transaction txn, + K key, + WriteOptions options) +
        Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
        +
        EntityCursor<E>entities() +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<E>entities(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing entities in a key range.
        +
        EntityCursor<E>entities(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<E>entities(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing entities in a key range.
        +
        Eget(PK key) +
        Gets an entity via a key of this index.
        +
        EntityResult<E>get(Transaction txn, + PK key, + Get getType, + ReadOptions options) +
        Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
        +
        Eget(Transaction txn, + PK key, + LockMode lockMode) +
        Gets an entity via a key of this index.
        +
        DatabasegetDatabase() +
        Returns the underlying database for this index.
        +
        EntityBinding<E>getEntityBinding() +
        Returns the entity binding for this index.
        +
        java.lang.Class<E>getEntityClass() +
        Returns the entity class for this index.
        +
        EntryBinding<PK>getKeyBinding() +
        Returns the primary key binding for this index.
        +
        java.lang.Class<PK>getKeyClass() +
        Returns the primary key class for this index.
        +
        EntityCursor<K>keys() +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing keys in a key range.
        +
        EntityCursor<K>keys(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing keys in a key range.
        +
        java.util.Map<PK,E>map() +
        Returns a standard Java map based on this entity index.
        +
        Eput(E entity) +
        Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity.
        +
        Eput(Transaction txn, + E entity) +
        Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity.
        +
        OperationResultput(Transaction txn, + E entity, + Put putType, + WriteOptions options) +
        Inserts or updates an entity, using Put type and WriteOptions + parameters, and returning an OperationResult.
        +
        booleanputNoOverwrite(E entity) +
        Inserts an entity and returns true, or returns false if the primary key + already exists.
        +
        booleanputNoOverwrite(Transaction txn, + E entity) +
        Inserts an entity and returns true, or returns false if the primary key + already exists.
        +
        voidputNoReturn(E entity) +
        Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity).
        +
        voidputNoReturn(Transaction txn, + E entity) +
        Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity).
        +
        java.util.SortedMap<PK,E>sortedMap() +
        Returns a standard Java sorted map based on this entity index.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          PrimaryIndex

          +
          public PrimaryIndex(Database database,
          +                    java.lang.Class<PK> keyClass,
          +                    EntryBinding<PK> keyBinding,
          +                    java.lang.Class<E> entityClass,
          +                    EntityBinding<E> entityBinding)
          +             throws DatabaseException
          +
          Creates a primary index without using an EntityStore. + +

          This constructor is not normally needed and is provided for + applications that wish to use custom bindings along with the Direct + Persistence Layer. Normally, getPrimaryIndex is used instead.

          + +

          Note that when this constructor is used directly, primary keys cannot + be automatically assigned from a sequence. The key assignment feature + requires knowledge of the primary key field, which is only available if + an EntityStore is used. Of course, primary keys may be + assigned from a sequence manually before calling the put + methods in this class.

          +
          +
          Parameters:
          +
          database - the primary database.
          +
          keyClass - the class of the primary key.
          +
          keyBinding - the binding to be used for primary keys.
          +
          entityClass - the class of the entities stored in this index.
          +
          entityBinding - the binding to be used for entities.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getKeyClass

          +
          public java.lang.Class<PK> getKeyClass()
          +
          Returns the primary key class for this index.
          +
          +
          Returns:
          +
          the key class.
          +
          +
        • +
        + + + +
          +
        • +

          getKeyBinding

          +
          public EntryBinding<PK> getKeyBinding()
          +
          Returns the primary key binding for this index.
          +
          +
          Returns:
          +
          the key binding.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityClass

          +
          public java.lang.Class<E> getEntityClass()
          +
          Returns the entity class for this index.
          +
          +
          Returns:
          +
          the entity class.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityBinding

          +
          public EntityBinding<E> getEntityBinding()
          +
          Returns the entity binding for this index.
          +
          +
          Returns:
          +
          the entity binding.
          +
          +
        • +
        + + + + + +
          +
        • +

          put

          +
          public E put(E entity)
          +      throws DatabaseException
          +
          Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          + +

          Auto-commit is used implicitly if the store is transactional.

          +
          +
          Parameters:
          +
          entity - the entity to be inserted or updated.
          +
          Returns:
          +
          the existing entity that was updated, or null if the entity was + inserted. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          put

          +
          public E put(Transaction txn,
          +             E entity)
          +      throws DatabaseException
          +
          Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          entity - the entity to be inserted or updated.
          +
          Returns:
          +
          the existing entity that was updated, or null if the entity was + inserted. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          putNoReturn

          +
          public void putNoReturn(E entity)
          +                 throws DatabaseException
          +
          Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity). This method may be used instead of + put(Object) to save the overhead of returning the existing + entity. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          + +

          Auto-commit is used implicitly if the store is transactional.

          +
          +
          Parameters:
          +
          entity - the entity to be inserted or updated. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          putNoReturn

          +
          public void putNoReturn(Transaction txn,
          +                        E entity)
          +                 throws DatabaseException
          +
          Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity). This method may be used instead of + put(Transaction,Object) to save the overhead of returning the + existing entity. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          entity - the entity to be inserted or updated. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          putNoOverwrite

          +
          public boolean putNoOverwrite(E entity)
          +                       throws DatabaseException
          +
          Inserts an entity and returns true, or returns false if the primary key + already exists. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          + +

          Auto-commit is used implicitly if the store is transactional.

          +
          +
          Parameters:
          +
          entity - the entity to be inserted.
          +
          Returns:
          +
          true if the entity was inserted, or false if an entity with the + same primary key is already present. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          putNoOverwrite

          +
          public boolean putNoOverwrite(Transaction txn,
          +                              E entity)
          +                       throws DatabaseException
          +
          Inserts an entity and returns true, or returns false if the primary key + already exists. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          entity - the entity to be inserted.
          +
          Returns:
          +
          true if the entity was inserted, or false if an entity with the + same primary key is already present. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          put

          +
          public OperationResult put(Transaction txn,
          +                           E entity,
          +                           Put putType,
          +                           WriteOptions options)
          +
          Inserts or updates an entity, using Put type and WriteOptions + parameters, and returning an OperationResult. + +

          If a PrimaryKey.sequence() is used and the primary key field of + the given entity is null or zero, this method will assign the next value + from the sequence to the primary key field of the given entity.

          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          entity - the entity to be inserted.
          +
          putType - is Put.OVERWRITE or Put.NO_OVERWRITE.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if the record is written, else null. If + Put.NO_OVERWRITE is used, null is returned if an entity with the + same primary key is already present. If Put.OVERWRITE is used, + null is never returned.
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          Since:
          +
          7.0
          +
          +
        • +
        + + + + + + + + + + + + + + + + + +
          +
        • +

          get

          +
          public EntityResult<E> get(Transaction txn,
          +                           PK key,
          +                           Get getType,
          +                           ReadOptions options)
          +                    throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, or null + if the operation should not be transaction protected.
          +
          key - the key to search for.
          +
          getType - must be Get.SEARCH.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the EntityResult, including the value mapped to the given key, + or null if the key is not present in the index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          map

          +
          public java.util.Map<PK,E> map()
          +
          Description copied from interface: EntityIndex
          +
          Returns a standard Java map based on this entity index. The StoredMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        + + + +
          +
        • +

          sortedMap

          +
          public java.util.SortedMap<PK,E> sortedMap()
          +
          Description copied from interface: EntityIndex
          +
          Returns a standard Java sorted map based on this entity index. The + StoredSortedMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        + + + +
          +
        • +

          getDatabase

          +
          public Database getDatabase()
          +
          Description copied from interface: EntityIndex
          +
          Returns the underlying database for this index.
          +
          +
          Specified by:
          +
          getDatabase in interface EntityIndex<K,E>
          +
          Returns:
          +
          the database.
          +
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          count

          +
          public long count()
          +           throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Returns a non-transactional count of the entities in this index. + + + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method places an internal limit on + its memory consumption. If this limit is reached, the method will + still work properly, but its performance will degrade. To specify + a different memory limit than the one used by this method, use the + EntityIndex.count(long memoryLimit) method.

          + +
          +
          +
          Specified by:
          +
          count in interface EntityIndex<K,E>
          +
          Returns:
          +
          the number of entities in this index. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          public long count(long memoryLimit)
          +           throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Returns a non-transactional count of the entities in this index. + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method takes as input an upper bound + on the memory it may consume. If this limit is reached, the method + will still work properly, but its performance will degrade.

          +
          +
          Specified by:
          +
          count in interface EntityIndex<K,E>
          +
          Returns:
          +
          the number of entities in this index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + + + + + + + + + + + +
          +
        • +

          delete

          +
          public OperationResult delete(Transaction txn,
          +                              K key,
          +                              WriteOptions options)
          +                       throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
          +
          +
          Specified by:
          +
          delete in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          key - the key to search for.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if any entities were deleted, else null. If
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys()
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all keys in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(Transaction txn,
          +                            CursorConfig config)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all keys in this index. + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities()
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all entities in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(Transaction txn,
          +                                CursorConfig config)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all entities in this index.
          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(K fromKey,
          +                            boolean fromInclusive,
          +                            K toKey,
          +                            boolean toInclusive)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing keys in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(Transaction txn,
          +                            K fromKey,
          +                            boolean fromInclusive,
          +                            K toKey,
          +                            boolean toInclusive,
          +                            CursorConfig config)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing keys in a key range. + + +

          Key Cursor Optimization with + READ_UNCOMMITTED

          + +

          Using a key cursor potentially has a large performance benefit when + the READ_UNCOMMITTED isolation mode is used. In this case, if + the record data is not in the JE cache, it will not be read from disk. + The performance benefit is potentially large because random access disk + reads may be reduced. Examples are:

          +
            +
          • Scanning all records in key order, when the entity is not needed and + READ_UNCOMMITTED isolation is acceptable.
          • +
          • Skipping over records quickly to perform approximate pagination with + READ_UNCOMMITTED isolation.
          • +
          + +

          For other isolation modes (READ_COMMITTED, REPEATABLE_READ and SERIALIZABLE), the performance benefit of a + key cursor is not as significant. In this case, the data item must be + read into the JE cache if it is not already present, in order to lock + the record. The only performance benefit is that the data will not be + copied from the JE cache to the application's entry parameter, and will + not be unmarshalled into an entity object.

          + +

          For information on specifying isolation modes, see LockMode, + CursorConfig and TransactionConfig.

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(K fromKey,
          +                                boolean fromInclusive,
          +                                K toKey,
          +                                boolean toInclusive)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing entities in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(Transaction txn,
          +                                K fromKey,
          +                                boolean fromInclusive,
          +                                K toKey,
          +                                boolean toInclusive,
          +                                CursorConfig config)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing entities in a key range.
          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/SecondaryIndex.html b/docs/java/com/sleepycat/persist/SecondaryIndex.html new file mode 100644 index 0000000..3eb196f --- /dev/null +++ b/docs/java/com/sleepycat/persist/SecondaryIndex.html @@ -0,0 +1,2097 @@ + + + + + +SecondaryIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class SecondaryIndex<SK,PK,E>

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.SecondaryIndex<SK,PK,E>
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      EntityIndex<SK,E>
      +
      +
      +
      +
      public class SecondaryIndex<SK,PK,E>
      +extends java.lang.Object
      +
      The secondary index for an entity class and a secondary key. + +

      SecondaryIndex objects are thread-safe. Multiple threads may + safely call the methods of a shared SecondaryIndex object.

      + +

      SecondaryIndex implements EntityIndex to map the + secondary key type (SK) to the entity type (E). In other words, entities + are accessed by secondary key values.

      + +

      The SecondaryKey annotation may be used to define a secondary key + as shown in the following example.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      + +

      Before obtaining a SecondaryIndex, the PrimaryIndex must + be obtained for the entity class. To obtain the SecondaryIndex call + EntityStore.getSecondaryIndex, passing + the primary index, the secondary key class and the secondary key name. For + example:

      + +
      + EntityStore store = new EntityStore(...);
      +
      + PrimaryIndex<Long, Employee> primaryIndex =
      +     store.getPrimaryIndex(Long.class, Employee.class);
      +
      + SecondaryIndex<String, Long, Employee> secondaryIndex =
      +     store.getSecondaryIndex(primaryIndex, String.class, "department");
      + +

      Since SecondaryIndex implements the EntityIndex + interface, it shares the common index methods for retrieving and deleting + entities, opening cursors and using transactions. See EntityIndex + for more information on these topics.

      + +

      SecondaryIndex does not provide methods for inserting + and updating entities. That must be done using the PrimaryIndex.

      + +

      Note that a SecondaryIndex has three type parameters <SK, + PK, E> or in the example <String, Long, Employee> while a PrimaryIndex has only two type parameters <PK, E> or <Long, + Employee>. This is because a SecondaryIndex has an extra level of + mapping: It maps from secondary key to primary key, and then from primary + key to entity. For example, consider this entity:

      + +
      + + +
      IDDepartmentName
      1EngineeringJane Smith
      + +

      The PrimaryIndex maps from id directly to the entity, or from + primary key 1 to the "Jane Smith" entity in the example. The SecondaryIndex maps from department to id, or from secondary key + "Engineering" to primary key 1 in the example, and then uses the PrimaryIndex to map from the primary key to the entity.

      + +

      Because of this extra type parameter and extra level of mapping, a SecondaryIndex can provide more than one mapping, or view, of the entities + in the primary index. The main mapping of a SecondaryIndex is to + map from secondary key (SK) to entity (E), or in the example, from the + String department key to the Employee entity. The SecondaryIndex + itself, by implementing EntityIndex<SK, E>, provides this + mapping.

      + +

      The second mapping provided by SecondaryIndex is from secondary + key (SK) to primary key (PK), or in the example, from the String department + key to the Long id key. The keysIndex method provides this + mapping. When accessing the keys index, the primary key is returned rather + than the entity. When only the primary key is needed and not the entire + entity, using the keys index is less expensive than using the secondary + index because the primary index does not have to be accessed.

      + +

      The third mapping provided by SecondaryIndex is from primary key + (PK) to entity (E), for the subset of entities having a given secondary key + (SK). This mapping is provided by the subIndex(SK) method. A + sub-index is convenient when you are interested in working with the subset + of entities having a particular secondary key value, for example, all + employees in a given department.

      + +

      All three mappings, along with the mapping provided by the PrimaryIndex, are shown using example data in the EntityIndex + interface documentation. See EntityIndex for more information.

      + +

      Note that when using an index, keys and values are stored and retrieved + by value not by reference. In other words, if an entity object is stored + and then retrieved, or retrieved twice, each object will be a separate + instance. For example, in the code below the assertion will always + fail.

      +
      + MyKey key = ...;
      + MyEntity entity1 = index.get(key);
      + MyEntity entity2 = index.get(key);
      + assert entity1 == entity2; // always fails!
      + 
      + +

      One-to-One Relationships

      + +

      A ONE_TO_ONE relationship, although less + common than other types of relationships, is the simplest type of + relationship. A single entity is related to a single secondary key value. + For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=ONE_TO_ONE)
      +     String ssn;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + SecondaryIndex<String, Long, Employee> employeeBySsn =
      +     store.getSecondaryIndex(primaryIndex, String.class, "ssn");
      + +

      With a ONE_TO_ONE relationship, the + secondary key must be unique; in other words, no two entities may have the + same secondary key value. If an attempt is made to store an entity having + the same secondary key value as another existing entity, a DatabaseException will be thrown.

      + +

      Because the secondary key is unique, it is useful to lookup entities by + secondary key using EntityIndex.get(K). For example:

      + +
      + Employee employee = employeeBySsn.get(mySsn);
      + +

      Many-to-One Relationships

      + +

      A MANY_TO_ONE relationship is the most + common type of relationship. One or more entities is related to a single + secondary key value. For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + SecondaryIndex<String, Long, Employee> employeeByDepartment =
      +     store.getSecondaryIndex(primaryIndex, String.class, "department");
      + +

      With a MANY_TO_ONE relationship, the + secondary key is not required to be unique; in other words, more than one + entity may have the same secondary key value. In this example, more than + one employee may belong to the same department.

      + +

      The most convenient way to access the employees in a given department is + by using a sub-index. For example:

      + +
      + EntityIndex<Long, Entity> subIndex = employeeByDepartment.subIndex(myDept);
      + EntityCursor<Employee> cursor = subIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      One-to-Many Relationships

      + +

      In a ONE_TO_MANY relationship, a single + entity is related to one or more secondary key values. For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=ONE_TO_MANY)
      +     Set<String> emailAddresses = new HashSet<String>;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + SecondaryIndex<String, Long, Employee> employeeByEmail =
      +     store.getSecondaryIndex(primaryIndex, String.class, "emailAddresses");
      + +

      With a ONE_TO_MANY relationship, the + secondary key must be unique; in other words, no two entities may have the + same secondary key value. In this example, no two employees may have the + same email address. If an attempt is made to store an entity having the + same secondary key value as another existing entity, a DatabaseException will be thrown.

      + +

      Because the secondary key is unique, it is useful to lookup entities by + secondary key using EntityIndex.get(K). For example:

      + +
      + Employee employee = employeeByEmail.get(myEmailAddress);
      + +

      The secondary key field for a ONE_TO_MANY relationship must be an array or collection type. To access + the email addresses of an employee, simply access the collection field + directly. For example:

      + +
      + Employee employee = primaryIndex.get(1); // Get the entity by primary key
      + employee.emailAddresses.add(myNewEmail); // Add an email address
      + primaryIndex.putNoReturn(1, employee);   // Update the entity
      + +

      Many-to-Many Relationships

      + +

      In a MANY_TO_MANY relationship, one + or more entities is related to one or more secondary key values. For + example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_MANY)
      +     Set<String> organizations = new HashSet<String>;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + SecondaryIndex<String, Long, Employee> employeeByOrganization =
      +     store.getSecondaryIndex(primaryIndex, String.class, "organizations");
      + +

      With a MANY_TO_MANY relationship, the + secondary key is not required to be unique; in other words, more than one + entity may have the same secondary key value. In this example, more than + one employee may belong to the same organization.

      + +

      The most convenient way to access the employees in a given organization + is by using a sub-index. For example:

      + +
      + EntityIndex<Long, Entity> subIndex = employeeByOrganization.subIndex(myOrg);
      + EntityCursor<Employee> cursor = subIndex.entities();
      + try {
      +     for (Employee entity : cursor) {
      +         // Do something with the entity...
      +     }
      + } finally {
      +     cursor.close();
      + }
      + +

      The secondary key field for a MANY_TO_MANY relationship must be an array or collection type. To access + the organizations of an employee, simply access the collection field + directly. For example:

      + +
      + Employee employee = primaryIndex.get(1); // Get the entity by primary key
      + employee.organizations.remove(myOldOrg); // Remove an organization
      + primaryIndex.putNoReturn(1, employee);   // Update the entity
      + +

      Foreign Key Constraints for Related Entities

      + +

      In all the examples above the secondary key is treated only as a simple + value, such as a String department field. In many cases, that is + sufficient. But in other cases, you may wish to constrain the secondary + keys of one entity class to be valid primary keys of another entity + class. For example, a Department entity may also be defined:

      + +
      + @Entity
      + class Department {
      +
      +     @PrimaryKey
      +     String name;
      +
      +     String missionStatement;
      +
      +     private Department() {}
      + }
      + +

      You may wish to constrain the department field values of the Employee + class in the examples above to be valid primary keys of the Department + entity class. In other words, you may wish to ensure that the department + field of an Employee will always refer to a valid Department entity.

      + +

      You can implement this constraint yourself by validating the department + field before you store an Employee. For example:

      + +
      + PrimaryIndex<String, Department> departmentIndex =
      +     store.getPrimaryIndex(String.class, Department.class);
      +
      + void storeEmployee(Employee employee) throws DatabaseException {
      +     if (departmentIndex.contains(employee.department)) {
      +         primaryIndex.putNoReturn(employee);
      +     } else {
      +         throw new IllegalArgumentException("Department does not exist: " +
      +                                            employee.department);
      +     }
      + }
      + +

      Or, instead you could define the Employee department field as a foreign + key, and this validation will be done for you when you attempt to store the + Employee entity. For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      + +

      The relatedEntity=Department.class above defines the department + field as a foreign key that refers to a Department entity. Whenever a + Employee entity is stored, its department field value will be checked to + ensure that a Department entity exists with that value as its primary key. + If no such Department entity exists, then a DatabaseException is + thrown, causing the transaction to be aborted (assuming that transactions + are used).

      + +

      This begs the question: What happens when a Department entity is deleted + while one or more Employee entities have department fields that refer to + the deleted department's primary key? If the department were allowed to be + deleted, the foreign key constraint for the Employee department field would + be violated, because the Employee department field would refer to a + department that does not exist.

      + +

      By default, when this situation arises the system does not allow the + department to be deleted. Instead, a DatabaseException is thrown, + causing the transaction to be aborted. In this case, in order to delete a + department, the department field of all Employee entities must first be + updated to refer to a different existing department, or set to null. This + is the responsibility of the application.

      + +

      There are two additional ways of handling deletion of a Department + entity. These alternatives are configured using the SecondaryKey.onRelatedEntityDelete() annotation property. Setting this + property to DeleteAction.NULLIFY causes the Employee department + field to be automatically set to null when the department they refer to is + deleted. This may or may not be desirable, depending on application + policies. For example:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class,
      +                                       onRelatedEntityDelete=NULLIFY)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      + +

      The DeleteAction.CASCADE value, on the other hand, causes the + Employee entities to be automatically deleted when the department they refer + to is deleted. This is probably not desirable in this particular example, + but is useful for parent-child relationships. For example:

      + +
      + @Entity
      + class Order {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     String description;
      +
      +     private Order() {}
      + }
      +
      + @Entity
      + class OrderItem {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Order.class,
      +                                       onRelatedEntityDelete=CASCADE)
      +     long orderId;
      +
      +     String description;
      +
      +     private OrderItem() {}
      + }
      + +

      The OrderItem orderId field refers to its "parent" Order entity. When an + Order entity is deleted, it may be useful to automatically delete its + "child" OrderItem entities.

      + +

      For more information, see SecondaryKey.onRelatedEntityDelete().

      + +

      One-to-Many versus Many-to-One for Related Entities

      + +

      When there is a conceptual Many-to-One relationship such as Employee to + Department as illustrated in the examples above, the relationship may be + implemented either as Many-to-One in the Employee class or as One-to-Many in + the Department class.

      + +

      Here is the Many-to-One approach.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)
      +     String department;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + @Entity
      + class Department {
      +
      +     @PrimaryKey
      +     String name;
      +
      +     String missionStatement;
      +
      +     private Department() {}
      + }
      + +

      And here is the One-to-Many approach.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + @Entity
      + class Department {
      +
      +     @PrimaryKey
      +     String name;
      +
      +     String missionStatement;
      +
      +     @SecondaryKey(relate=ONE_TO_MANY, relatedEntity=Employee.class)
      +     Set<Long> employees = new HashSet<Long>;
      +
      +     private Department() {}
      + }
      + +

      Which approach is best? The Many-to-One approach better handles large + number of entities on the to-Many side of the relationship because it + doesn't store a collection of keys as an entity field. With Many-to-One a + Btree is used to store the collection of keys and the Btree can easily + handle very large numbers of keys. With One-to-Many, each time a related + key is added or removed the entity on the One side of the relationship, + along with the complete collection of related keys, must be updated. + Therefore, if large numbers of keys may be stored per relationship, + Many-to-One is recommended.

      + +

      If the number of entities per relationship is not a concern, then you may + wish to choose the approach that is most natural in your application data + model. For example, if you think of a Department as containing employees + and you wish to modify the Department object each time an employee is added + or removed, then you may wish to store a collection of Employee keys in the + Department object (One-to-Many).

      + +

      Note that if you have a One-to-Many relationship and there is no related + entity, then you don't have a choice -- you have to use One-to-Many because + there is no entity on the to-Many side of the relationship where a + Many-to-One key could be defined. An example is the Employee to email + addresses relationship discussed above:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=ONE_TO_MANY)
      +     Set<String> emailAddresses = new HashSet<String>;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      + +

      For sake of argument imagine that each employee has thousands of email + addresses and employees frequently add and remove email addresses. To + avoid the potential performance problems associated with updating the + Employee entity every time an email address is added or removed, you could + create an EmployeeEmailAddress entity and use a Many-to-One relationship as + shown below:

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + @Entity
      + class EmployeeEmailAddress {
      +
      +     @PrimaryKey
      +     String emailAddress;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Employee.class)
      +     long employeeId;
      +
      +     private EmployeeEmailAddress() {}
      + }
      + +

      Key Placement with Many-to-Many for Related Entities

      + +

      As discussed in the section above, one drawback of a to-Many relationship + (One-to-Many was discussed above and Many-to-Many is discussed here) is that + it requires storing a collection of keys in an entity. Each time a key is + added or removed, the containing entity must be updated. This has potential + performance problems when there are large numbers of entities on the to-Many + side of the relationship, in other words, when there are large numbers of + keys in each secondary key field collection.

      + +

      If you have a Many-to-Many relationship with a reasonably small number of + entities on one side of the relationship and a large number of entities on + the other side, you can avoid the potential performance problems by defining + the secondary key field on the side with a small number of entities.

      + +

      For example, in an Employee-to-Organization relationship, the number of + organizations per employee will normally be reasonably small but the number + of employees per organization may be very large. Therefore, to avoid + potential performance problems, the secondary key field should be defined in + the Employee class as shown below.

      + +
      + @Entity
      + class Employee {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Organization.class)
      +     Set<String> organizations = new HashSet<String>;
      +
      +     String name;
      +
      +     private Employee() {}
      + }
      +
      + @Entity
      + class Organization {
      +
      +     @PrimaryKey
      +     String name;
      +
      +     String description;
      + }
      + +

      If instead a Set<Long> members key had been defined in the + Organization class, this set could potentially have a large number of + elements and performance problems could result.

      + +

      Many-to-Many Versus a Relationship Entity

      + +

      If you have a Many-to-Many relationship with a large number of entities + on both sides of the relationship, you can avoid the potential + performance problems by using a relationship entity. A + relationship entity defines the relationship between two other entities + using two Many-to-One relationships.

      + +

      Imagine a relationship between cars and trucks indicating whenever a + particular truck was passed on the road by a particular car. A given car + may pass a large number of trucks and a given truck may be passed by a large + number of cars. First look at a Many-to-Many relationship between these two + entities:

      + +
      + @Entity
      + class Car {
      +
      +     @PrimaryKey
      +     String licenseNumber;
      +
      +     @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Truck.class)
      +     Set<String> trucksPassed = new HashSet<String>;
      +
      +     String color;
      +
      +     private Car() {}
      + }
      +
      + @Entity
      + class Truck {
      +
      +     @PrimaryKey
      +     String licenseNumber;
      +
      +     int tons;
      +
      +     private Truck() {}
      + }
      + +

      With the Many-to-Many approach above, the trucksPassed set could + potentially have a large number of elements and performance problems could + result.

      + +

      To apply the relationship entity approach we define a new entity class + named CarPassedTruck representing a single truck passed by a single car. We + remove the secondary key from the Car class and use two secondary keys in + the CarPassedTruck class instead.

      + +
      + @Entity
      + class Car {
      +
      +     @PrimaryKey
      +     String licenseNumber;
      +
      +     String color;
      +
      +     private Car() {}
      + }
      +
      + @Entity
      + class Truck {
      +
      +     @PrimaryKey
      +     String licenseNumber;
      +
      +     int tons;
      +
      +     private Truck() {}
      + }
      +
      + @Entity
      + class CarPassedTruck {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)
      +     String carLicense;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)
      +     String truckLicense;
      +
      +     private CarPassedTruck() {}
      + }
      + +

      The CarPassedTruck entity can be used to access the relationship by car + license or by truck license.

      + +

      You may use the relationship entity approach because of the potential + performance problems mentioned above. Or, you may choose to use this + approach in order to store other information about the relationship. For + example, if for each car that passes a truck you wish to record how much + faster the car was going than the truck, then a relationship entity is the + logical place to store that property. In the example below the + speedDifference property is added to the CarPassedTruck class.

      + +
      + @Entity
      + class CarPassedTruck {
      +
      +     @PrimaryKey
      +     long id;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)
      +     String carLicense;
      +
      +     @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)
      +     String truckLicense;
      +
      +     int speedDifference;
      +
      +     private CarPassedTruck() {}
      + }
      + +

      Be aware that the relationship entity approach adds overhead compared to + Many-to-Many. There is one additional entity and one additional secondary + key. These factors should be weighed against its advantages and the + relevant application access patterns should be considered.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleancontains(K key) +
        Checks for existence of a key in this index.
        +
        booleancontains(Transaction txn, + K key, + LockMode lockMode) +
        Checks for existence of a key in this index.
        +
        longcount() +
        Returns a non-transactional count of the entities in this index.
        +
        longcount(long memoryLimit) +
        Returns a non-transactional count of the entities in this index.
        +
        booleandelete(K key) +
        Deletes all entities with a given index key.
        +
        booleandelete(Transaction txn, + K key) +
        Deletes all entities with a given index key.
        +
        OperationResultdelete(Transaction txn, + K key, + WriteOptions options) +
        Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
        +
        EntityCursor<E>entities() +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<E>entities(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing entities in a key range.
        +
        EntityCursor<E>entities(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all entities in this index.
        +
        EntityCursor<E>entities(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing entities in a key range.
        +
        Eget(SK key) +
        Gets an entity via a key of this index.
        +
        EntityResult<E>get(Transaction txn, + SK key, + Get getType, + ReadOptions options) +
        Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
        +
        Eget(Transaction txn, + SK key, + LockMode lockMode) +
        Gets an entity via a key of this index.
        +
        SecondaryDatabasegetDatabase() +
        Returns the underlying secondary database for this index.
        +
        EntryBinding<SK>getKeyBinding() +
        Returns the secondary key binding for the index.
        +
        java.lang.Class<SK>getKeyClass() +
        Returns the secondary key class for this index.
        +
        DatabasegetKeysDatabase() +
        Returns the underlying secondary database that is not associated with + the primary database and is used for the keysIndex.
        +
        PrimaryIndex<PK,E>getPrimaryIndex() +
        Returns the primary index associated with this secondary index.
        +
        EntityCursor<K>keys() +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) +
        Opens a cursor for traversing keys in a key range.
        +
        EntityCursor<K>keys(Transaction txn, + CursorConfig config) +
        Opens a cursor for traversing all keys in this index.
        +
        EntityCursor<K>keys(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) +
        Opens a cursor for traversing keys in a key range.
        +
        EntityIndex<SK,PK>keysIndex() +
        Returns a read-only keys index that maps secondary key to primary key.
        +
        java.util.Map<SK,E>map() +
        Returns a standard Java map based on this entity index.
        +
        java.util.SortedMap<SK,E>sortedMap() +
        Returns a standard Java sorted map based on this entity index.
        +
        EntityIndex<PK,E>subIndex(SK key) +
        Returns an index that maps primary key to entity for the subset of + entities having a given secondary key (duplicates).
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          SecondaryIndex

          +
          public SecondaryIndex(SecondaryDatabase database,
          +                      Database keysDatabase,
          +                      PrimaryIndex<PK,E> primaryIndex,
          +                      java.lang.Class<SK> secondaryKeyClass,
          +                      EntryBinding<SK> secondaryKeyBinding)
          +               throws DatabaseException
          +
          Creates a secondary index without using an EntityStore. + When using an EntityStore, call getSecondaryIndex instead. + +

          This constructor is not normally needed and is provided for + applications that wish to use custom bindings along with the Direct + Persistence Layer. Normally, getSecondaryIndex is used instead.

          +
          +
          Parameters:
          +
          database - the secondary database used for all access other than + via a keysIndex.
          +
          keysDatabase - another handle on the secondary database, opened + without association to the primary, and used only for access via a + keysIndex. If this argument is null and the keysIndex + method is called, then the keys database will be opened automatically; + however, the user is then responsible for closing the keys database. To + get the keys database in order to close it, call getKeysDatabase().
          +
          primaryIndex - the primary index associated with this secondary + index.
          +
          secondaryKeyClass - the class of the secondary key.
          +
          secondaryKeyBinding - the binding to be used for secondary keys.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDatabase

          +
          public SecondaryDatabase getDatabase()
          +
          Returns the underlying secondary database for this index.
          +
          +
          Specified by:
          +
          getDatabase in interface EntityIndex<SK,E>
          +
          Returns:
          +
          the secondary database.
          +
          +
        • +
        + + + +
          +
        • +

          getKeysDatabase

          +
          public Database getKeysDatabase()
          +
          Returns the underlying secondary database that is not associated with + the primary database and is used for the keysIndex.
          +
          +
          Returns:
          +
          the keys database.
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryIndex

          +
          public PrimaryIndex<PK,E> getPrimaryIndex()
          +
          Returns the primary index associated with this secondary index.
          +
          +
          Returns:
          +
          the primary index.
          +
          +
        • +
        + + + +
          +
        • +

          getKeyClass

          +
          public java.lang.Class<SK> getKeyClass()
          +
          Returns the secondary key class for this index.
          +
          +
          Returns:
          +
          the class.
          +
          +
        • +
        + + + +
          +
        • +

          getKeyBinding

          +
          public EntryBinding<SK> getKeyBinding()
          +
          Returns the secondary key binding for the index.
          +
          +
          Returns:
          +
          the key binding.
          +
          +
        • +
        + + + +
          +
        • +

          keysIndex

          +
          public EntityIndex<SK,PK> keysIndex()
          +                             throws DatabaseException
          +
          Returns a read-only keys index that maps secondary key to primary key. + When accessing the keys index, the primary key is returned rather than + the entity. When only the primary key is needed and not the entire + entity, using the keys index is less expensive than using the secondary + index because the primary index does not have to be accessed. + +

          Note the following in the unusual case that you are not + using an EntityStore: This method will open the keys + database, a second database handle for the secondary database, if it is + not already open. In this case, if you are not using an + EntityStore, then you are responsible for closing the + database returned by getKeysDatabase() before closing the + environment. If you are using an EntityStore, the + keys database will be closed automatically by EntityStore.close().

          +
          +
          Returns:
          +
          the keys index.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          subIndex

          +
          public EntityIndex<PK,E> subIndex(SK key)
          +                           throws DatabaseException
          +
          Returns an index that maps primary key to entity for the subset of + entities having a given secondary key (duplicates). A sub-index is + convenient when you are interested in working with the subset of + entities having a particular secondary key value. + +

          When using a MANY_TO_ONE or MANY_TO_MANY secondary key, the sub-index + represents the left (MANY) side of a relationship.

          +
          +
          Parameters:
          +
          key - the secondary key that identifies the entities in the + sub-index.
          +
          Returns:
          +
          the sub-index.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + + + + + + + + + + + +
          +
        • +

          get

          +
          public EntityResult<E> get(Transaction txn,
          +                           SK key,
          +                           Get getType,
          +                           ReadOptions options)
          +                    throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
          +
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, or null + if the operation should not be transaction protected.
          +
          key - the key to search for.
          +
          getType - must be Get.SEARCH.
          +
          options - the ReadOptions, or null to use default options.
          +
          Returns:
          +
          the EntityResult, including the value mapped to the given key, + or null if the key is not present in the index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          map

          +
          public java.util.Map<SK,E> map()
          +
          Description copied from interface: EntityIndex
          +
          Returns a standard Java map based on this entity index. The StoredMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        + + + +
          +
        • +

          sortedMap

          +
          public java.util.SortedMap<SK,E> sortedMap()
          +
          Description copied from interface: EntityIndex
          +
          Returns a standard Java sorted map based on this entity index. The + StoredSortedMap returned is defined by the Collections API. Stored collections conform + to the standard Java collections framework interface.
          +
          +
          Returns:
          +
          the map.
          +
          +
        • +
        + + + + + + + + + + + + + + + +
          +
        • +

          count

          +
          public long count()
          +           throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Returns a non-transactional count of the entities in this index. + + + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method places an internal limit on + its memory consumption. If this limit is reached, the method will + still work properly, but its performance will degrade. To specify + a different memory limit than the one used by this method, use the + EntityIndex.count(long memoryLimit) method.

          + +
          +
          +
          Specified by:
          +
          count in interface EntityIndex<K,E>
          +
          Returns:
          +
          the number of entities in this index. + +
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs. +
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          count

          +
          public long count(long memoryLimit)
          +           throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Returns a non-transactional count of the entities in this index. + +

          This operation is faster than obtaining a count by scanning the index + manually, and will not perturb the current contents of the cache. + However, the count is not guaranteed to be accurate if there are + concurrent updates. Note that this method does scan a significant + portion of the index and should be considered a fairly expensive + operation.

          + +

          This operation will disable deletion of log files by the JE log + cleaner during its execution and will consume a certain amount of + memory (but without affecting the memory that is available for the + JE cache). To avoid excessive memory consumption (and a potential + OutOfMemoryError) this method takes as input an upper bound + on the memory it may consume. If this limit is reached, the method + will still work properly, but its performance will degrade.

          +
          +
          Specified by:
          +
          count in interface EntityIndex<K,E>
          +
          Returns:
          +
          the number of entities in this index.
          +
          Throws:
          +
          OperationFailureException - if one of the Read Operation + Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + + + + + + + + + + + + + +
          +
        • +

          delete

          +
          public OperationResult delete(Transaction txn,
          +                              K key,
          +                              WriteOptions options)
          +                       throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
          +
          +
          Specified by:
          +
          delete in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect this operation, null to use + auto-commit, or null if the store is non-transactional.
          +
          key - the key to search for.
          +
          options - the WriteOptions, or null to use default options.
          +
          Returns:
          +
          the OperationResult if any entities were deleted, else null. If
          +
          Throws:
          +
          OperationFailureException - if one of the Write + Operation Failures occurs.
          +
          EnvironmentFailureException - if an unexpected, internal or + environment-wide failure occurs.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys()
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all keys in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(Transaction txn,
          +                            CursorConfig config)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all keys in this index. + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities()
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all entities in this index. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(Transaction txn,
          +                                CursorConfig config)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing all entities in this index.
          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(K fromKey,
          +                            boolean fromInclusive,
          +                            K toKey,
          +                            boolean toInclusive)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing keys in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          + + +

          Note that READ_UNCOMMITTED can be used with a key cursor to + reduce I/O, potentially providing significant performance benefits. See + Key Cursor Optimization with + READ_UNCOMMITTED

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          keys

          +
          public EntityCursor<K> keys(Transaction txn,
          +                            K fromKey,
          +                            boolean fromInclusive,
          +                            K toKey,
          +                            boolean toInclusive,
          +                            CursorConfig config)
          +                     throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing keys in a key range. + + +

          Key Cursor Optimization with + READ_UNCOMMITTED

          + +

          Using a key cursor potentially has a large performance benefit when + the READ_UNCOMMITTED isolation mode is used. In this case, if + the record data is not in the JE cache, it will not be read from disk. + The performance benefit is potentially large because random access disk + reads may be reduced. Examples are:

          +
            +
          • Scanning all records in key order, when the entity is not needed and + READ_UNCOMMITTED isolation is acceptable.
          • +
          • Skipping over records quickly to perform approximate pagination with + READ_UNCOMMITTED isolation.
          • +
          + +

          For other isolation modes (READ_COMMITTED, REPEATABLE_READ and SERIALIZABLE), the performance benefit of a + key cursor is not as significant. In this case, the data item must be + read into the JE cache if it is not already present, in order to lock + the record. The only performance benefit is that the data will not be + copied from the JE cache to the application's entry parameter, and will + not be unmarshalled into an entity object.

          + +

          For information on specifying isolation modes, see LockMode, + CursorConfig and TransactionConfig.

          +
          +
          +
          Specified by:
          +
          keys in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(K fromKey,
          +                                boolean fromInclusive,
          +                                K toKey,
          +                                boolean toInclusive)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing entities in a key range. + +

          The operations performed with the cursor will not be transaction + protected, and CursorConfig.DEFAULT is used implicitly. If the + store is transactional, the cursor may not be used to update or delete + entities.

          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + + + +
          +
        • +

          entities

          +
          public EntityCursor<E> entities(Transaction txn,
          +                                K fromKey,
          +                                boolean fromInclusive,
          +                                K toKey,
          +                                boolean toInclusive,
          +                                CursorConfig config)
          +                         throws DatabaseException
          +
          Description copied from interface: EntityIndex
          +
          Opens a cursor for traversing entities in a key range.
          +
          +
          Specified by:
          +
          entities in interface EntityIndex<K,E>
          +
          Parameters:
          +
          txn - the transaction used to protect all operations performed with + the cursor, or null if the operations should not be transaction + protected. If the store is non-transactional, null must be specified. + For a transactional store the transaction is optional for read-only + access and required for read-write access.
          +
          fromKey - is the lower bound of the key range, or null if the range + has no lower bound.
          +
          fromInclusive - is true if keys greater than or equal to fromKey + should be included in the key range, or false if only keys greater than + fromKey should be included.
          +
          toKey - is the upper bound of the key range, or null if the range + has no upper bound.
          +
          toInclusive - is true if keys less than or equal to toKey should be + included in the key range, or false if only keys less than toKey should + be included.
          +
          config - the cursor configuration that determines the default lock + mode used for all cursor operations, or null to implicitly use CursorConfig.DEFAULT.
          +
          Returns:
          +
          the cursor.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/StoreConfig.html b/docs/java/com/sleepycat/persist/StoreConfig.html new file mode 100644 index 0000000..fe4dd17 --- /dev/null +++ b/docs/java/com/sleepycat/persist/StoreConfig.html @@ -0,0 +1,889 @@ + + + + + +StoreConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class StoreConfig

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.StoreConfig
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class StoreConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Configuration properties used with an EntityStore or RawStore. + +

      StoreConfig objects are thread-safe. Multiple threads may safely + call the methods of a shared StoreConfig object.

      + +

      See the package + summary example for an example of using a StoreConfig.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static StoreConfigDEFAULT +
        The default store configuration containing properties as if the + configuration were constructed and not modified.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        StoreConfig() +
        Creates an entity store configuration object with default properties.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT

          +
          public static final StoreConfig DEFAULT
          +
          The default store configuration containing properties as if the + configuration were constructed and not modified.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          StoreConfig

          +
          public StoreConfig()
          +
          Creates an entity store configuration object with default properties.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          cloneConfig

          +
          public StoreConfig cloneConfig()
          +
          Deprecated. As of JE 4.0.13, replaced by clone().
          +
          Returns a shallow copy of the configuration.
          +
          +
          Returns:
          +
          the clone.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public StoreConfig clone()
          +
          Returns a shallow copy of the configuration.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          setAllowCreate

          +
          public StoreConfig setAllowCreate(boolean allowCreate)
          +
          Specifies whether creation of a new store is allowed. By default this + property is false. + +

          If this property is false and the internal store metadata database + does not exist, DatabaseException will be thrown when the store + is opened.

          +
          +
          Parameters:
          +
          allowCreate - whether creation of a new store is allowed.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getAllowCreate

          +
          public boolean getAllowCreate()
          +
          Returns whether creation of a new store is allowed.
          +
          +
          Returns:
          +
          whether creation of a new store is allowed.
          +
          +
        • +
        + + + +
          +
        • +

          setExclusiveCreate

          +
          public StoreConfig setExclusiveCreate(boolean exclusiveCreate)
          +
          Specifies whether opening an existing store is prohibited. By default + this property is false. + +

          If this property is true and the internal store metadata database + already exists, DatabaseException will be thrown when the store + is opened.

          +
          +
          Parameters:
          +
          exclusiveCreate - whether opening an existing store is prohibited.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getExclusiveCreate

          +
          public boolean getExclusiveCreate()
          +
          Returns whether opening an existing store is prohibited.
          +
          +
          Returns:
          +
          whether opening an existing store is prohibited.
          +
          +
        • +
        + + + +
          +
        • +

          setTransactional

          +
          public StoreConfig setTransactional(boolean transactional)
          +
          Sets the transactional configuration property. By default this property + is false. + +

          This property is true to open all store indices for transactional + access. True may not be specified if the environment is not also + transactional.

          +
          +
          Parameters:
          +
          transactional - whether the store is transactional.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getTransactional

          +
          public boolean getTransactional()
          +
          Returns the transactional configuration property.
          +
          +
          Returns:
          +
          whether the store is transactional.
          +
          +
        • +
        + + + +
          +
        • +

          setReadOnly

          +
          public StoreConfig setReadOnly(boolean readOnly)
          +
          Sets the read-only configuration property. By default this property is + false. + +

          This property is true to open all store indices for read-only access, + or false to open them for read-write access. False may not be specified + if the environment is read-only.

          +
          +
          Parameters:
          +
          readOnly - whether the store is read-only.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getReadOnly

          +
          public boolean getReadOnly()
          +
          Returns the read-only configuration property.
          +
          +
          Returns:
          +
          whether the store is read-only.
          +
          +
        • +
        + + + +
          +
        • +

          setReplicated

          +
          public StoreConfig setReplicated(boolean replicated)
          +
          Configures a store to be replicated or non-replicated, in a replicated + Environment. By default this property is true, meaning that by default + a store is replicated in a replicated Environment. +

          + In a non-replicated Environment, this property is ignored. All stores + are non-replicated in a non-replicated Environment.

          +
          +
          Parameters:
          +
          replicated - whether the store is replicated.
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          Non-replicated + Databases in a Replicated Environment
          +
          +
        • +
        + + + +
          +
        • +

          getReplicated

          +
          public boolean getReplicated()
          +
          Returns the replicated property for the store. +

          + This method returns true by default. However, in a non-replicated + Environment, this property is ignored. All stores are non-replicated + in a non-replicated Environment.

          +
          +
          Returns:
          +
          whether the store is replicated.
          +
          See Also:
          +
          setReplicated(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          setDeferredWrite

          +
          public StoreConfig setDeferredWrite(boolean deferredWrite)
          +
          Sets the deferred-write configuration property. By default this + property is false. + +

          This property is true to open all store index databases for + deferred-write access. True may not be specified if the store is + transactional.

          + +

          Deferred write stores avoid disk I/O and are not guaranteed to be + persistent until EntityStore.sync() or Environment.sync() is + called or the store is closed normally. This mode is particularly geared + toward stores that frequently modify and delete data records. See the + Getting Started Guide, Database chapter for a full description of the + mode.

          +
          +
          Parameters:
          +
          deferredWrite - whether the store is deferred-write.
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          setTransactional(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          getDeferredWrite

          +
          public boolean getDeferredWrite()
          +
          Returns the deferred-write configuration property.
          +
          +
          Returns:
          +
          whether the store is deferred-write.
          +
          +
        • +
        + + + +
          +
        • +

          setTemporary

          +
          public StoreConfig setTemporary(boolean temporary)
          +
          Sets the temporary configuration property. By default this property is + false. + +

          This property is true to open all store databases as temporary + databases. True may not be specified if the store is transactional.

          + +

          Temporary stores avoid disk I/O and are not persistent -- they are + deleted when the store is closed or after a crash. This mode is + particularly geared toward in-memory stores. See the Getting Started + Guide, Database chapter for a full description of the mode.

          +
          +
          Parameters:
          +
          temporary - whether the store is temporary.
          +
          Returns:
          +
          'this'.
          +
          See Also:
          +
          setTransactional(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          getTemporary

          +
          public boolean getTemporary()
          +
          Returns the temporary configuration property.
          +
          +
          Returns:
          +
          whether the store is temporary.
          +
          +
        • +
        + + + +
          +
        • +

          setSecondaryBulkLoad

          +
          public StoreConfig setSecondaryBulkLoad(boolean secondaryBulkLoad)
          +
          Sets the bulk-load-secondaries configuration property. By default this + property is false. + +

          This property is true to cause the initial creation of secondary + indices to be performed as a bulk load. If this property is true and + EntityStore.getSecondaryIndex has + never been called for a secondary index, that secondary index will not + be created or written as records are written to the primary index. In + addition, if that secondary index defines a foreign key constraint, the + constraint will not be enforced.

          + +

          The secondary index will be populated later when the getSecondaryIndex method is called for the first time for that index, + or when the store is closed and re-opened with this property set to + false and the primary index is obtained. In either case, the secondary + index is populated by reading through the entire primary index and + adding records to the secondary index as needed. While populating the + secondary, foreign key constraints will be enforced and an exception is + thrown if a constraint is violated.

          + +

          When loading a primary index along with secondary indexes from a + large input data set, configuring a bulk load of the secondary indexes + is sometimes more performant than updating the secondary indexes each + time the primary index is updated. The absence of foreign key + constraints during the load also provides more flexibility.

          +
          +
          Parameters:
          +
          secondaryBulkLoad - whether bulk-load-secondaries is used.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryBulkLoad

          +
          public boolean getSecondaryBulkLoad()
          +
          Returns the bulk-load-secondaries configuration property.
          +
          +
          Returns:
          +
          whether bulk-load-secondaries is used.
          +
          +
        • +
        + + + +
          +
        • +

          setModel

          +
          public StoreConfig setModel(EntityModel model)
          +
          Sets the entity model that defines entity classes and index keys. + +

          If null is specified or this method is not called, an AnnotationModel instance is used by default.

          +
          +
          Parameters:
          +
          model - the EntityModel.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getModel

          +
          public EntityModel getModel()
          +
          Returns the entity model that defines entity classes and index keys.
          +
          +
          Returns:
          +
          the EntityModel.
          +
          +
        • +
        + + + +
          +
        • +

          setMutations

          +
          public StoreConfig setMutations(Mutations mutations)
          +
          Configures mutations for performing lazy evolution of stored instances. + Existing mutations for this store are not cleared, so the mutations + required are only those changes that have been made since the store was + last opened. Some new mutations may override existing specifications, + and some may be supplemental. + +

          If null is specified and the store already exists, the previously + specified mutations are used. The mutations are stored persistently in + serialized form.

          + +

          Mutations must be available to handle all changes to classes that are + incompatible with the class definitions known to this store. See Mutations and Class Evolution for + more information.

          + +

          If an incompatible class change has been made and mutations are not + available for handling the change, IncompatibleClassException + will be thrown when creating an EntityStore.

          +
          +
          Parameters:
          +
          mutations - the Mutations.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getMutations

          +
          public Mutations getMutations()
          +
          Returns the configured mutations for performing lazy evolution of stored + instances.
          +
          +
          Returns:
          +
          the Mutations.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/StoreConfigBeanInfo.html b/docs/java/com/sleepycat/persist/StoreConfigBeanInfo.html new file mode 100644 index 0000000..5ead0b9 --- /dev/null +++ b/docs/java/com/sleepycat/persist/StoreConfigBeanInfo.html @@ -0,0 +1,351 @@ + + + + + +StoreConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class StoreConfigBeanInfo

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.beans.BeanInfo
      +
      +
      +
      +
      public class StoreConfigBeanInfo
      +extends ConfigBeanInfoBase
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          StoreConfigBeanInfo

          +
          public StoreConfigBeanInfo()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getBeanDescriptor

          +
          public java.beans.BeanDescriptor getBeanDescriptor()
          +
          +
          Specified by:
          +
          getBeanDescriptor in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getBeanDescriptor in class java.beans.SimpleBeanInfo
          +
          +
        • +
        + + + +
          +
        • +

          getPropertyDescriptors

          +
          public java.beans.PropertyDescriptor[] getPropertyDescriptors()
          +
          +
          Specified by:
          +
          getPropertyDescriptors in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getPropertyDescriptors in class java.beans.SimpleBeanInfo
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/StoreExistsException.html b/docs/java/com/sleepycat/persist/StoreExistsException.html new file mode 100644 index 0000000..d41eea6 --- /dev/null +++ b/docs/java/com/sleepycat/persist/StoreExistsException.html @@ -0,0 +1,253 @@ + + + + + +StoreExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class StoreExistsException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class StoreExistsException
      +extends OperationFailureException
      +
      Thrown by the EntityStore constructor when the ExclusiveCreate configuration parameter is + true and the store's internal catalog database already exists.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/StoreNotFoundException.html b/docs/java/com/sleepycat/persist/StoreNotFoundException.html new file mode 100644 index 0000000..36196b9 --- /dev/null +++ b/docs/java/com/sleepycat/persist/StoreNotFoundException.html @@ -0,0 +1,253 @@ + + + + + +StoreNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist
    +

    Class StoreNotFoundException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class StoreNotFoundException
      +extends OperationFailureException
      +
      Thrown by the EntityStore constructor when the AllowCreate configuration parameter is false and + the store's internal catalog database does not exist.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/EntityCursor.html b/docs/java/com/sleepycat/persist/class-use/EntityCursor.html new file mode 100644 index 0000000..cc264b3 --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/EntityCursor.html @@ -0,0 +1,240 @@ + + + + + +Uses of Interface com.sleepycat.persist.EntityCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.EntityCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/EntityIndex.html b/docs/java/com/sleepycat/persist/class-use/EntityIndex.html new file mode 100644 index 0000000..0d5cc10 --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/EntityIndex.html @@ -0,0 +1,202 @@ + + + + + +Uses of Interface com.sleepycat.persist.EntityIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.EntityIndex

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/EntityJoin.html b/docs/java/com/sleepycat/persist/class-use/EntityJoin.html new file mode 100644 index 0000000..3767cae --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/EntityJoin.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.EntityJoin (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.EntityJoin

    +
    +
    No usage of com.sleepycat.persist.EntityJoin
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/EntityResult.html b/docs/java/com/sleepycat/persist/class-use/EntityResult.html new file mode 100644 index 0000000..2d62f5d --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/EntityResult.html @@ -0,0 +1,200 @@ + + + + + +Uses of Class com.sleepycat.persist.EntityResult (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.EntityResult

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/EntityStore.html b/docs/java/com/sleepycat/persist/class-use/EntityStore.html new file mode 100644 index 0000000..0402ed3 --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/EntityStore.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.EntityStore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.EntityStore

    +
    +
    No usage of com.sleepycat.persist.EntityStore
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/ForwardCursor.html b/docs/java/com/sleepycat/persist/class-use/ForwardCursor.html new file mode 100644 index 0000000..35eba5f --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/ForwardCursor.html @@ -0,0 +1,212 @@ + + + + + +Uses of Interface com.sleepycat.persist.ForwardCursor (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.ForwardCursor

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/IndexNotAvailableException.html b/docs/java/com/sleepycat/persist/class-use/IndexNotAvailableException.html new file mode 100644 index 0000000..11d595a --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/IndexNotAvailableException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.IndexNotAvailableException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.IndexNotAvailableException

    +
    +
    No usage of com.sleepycat.persist.IndexNotAvailableException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/PrimaryIndex.html b/docs/java/com/sleepycat/persist/class-use/PrimaryIndex.html new file mode 100644 index 0000000..002756f --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/PrimaryIndex.html @@ -0,0 +1,258 @@ + + + + + +Uses of Class com.sleepycat.persist.PrimaryIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.PrimaryIndex

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/SecondaryIndex.html b/docs/java/com/sleepycat/persist/class-use/SecondaryIndex.html new file mode 100644 index 0000000..7f30b66 --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/SecondaryIndex.html @@ -0,0 +1,231 @@ + + + + + +Uses of Class com.sleepycat.persist.SecondaryIndex (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.SecondaryIndex

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/StoreConfig.html b/docs/java/com/sleepycat/persist/class-use/StoreConfig.html new file mode 100644 index 0000000..0306d4c --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/StoreConfig.html @@ -0,0 +1,321 @@ + + + + + +Uses of Class com.sleepycat.persist.StoreConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.StoreConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/StoreConfigBeanInfo.html b/docs/java/com/sleepycat/persist/class-use/StoreConfigBeanInfo.html new file mode 100644 index 0000000..799f85b --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/StoreConfigBeanInfo.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.StoreConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.StoreConfigBeanInfo

    +
    +
    No usage of com.sleepycat.persist.StoreConfigBeanInfo
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/StoreExistsException.html b/docs/java/com/sleepycat/persist/class-use/StoreExistsException.html new file mode 100644 index 0000000..95d203a --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/StoreExistsException.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.persist.StoreExistsException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.StoreExistsException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/class-use/StoreNotFoundException.html b/docs/java/com/sleepycat/persist/class-use/StoreNotFoundException.html new file mode 100644 index 0000000..85a303e --- /dev/null +++ b/docs/java/com/sleepycat/persist/class-use/StoreNotFoundException.html @@ -0,0 +1,200 @@ + + + + + +Uses of Class com.sleepycat.persist.StoreNotFoundException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.StoreNotFoundException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Conversion.html b/docs/java/com/sleepycat/persist/evolve/Conversion.html new file mode 100644 index 0000000..f9b0ca2 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Conversion.html @@ -0,0 +1,682 @@ + + + + + +Conversion (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Interface Conversion

    +
    +
    +
    +
      +
    • +
      +
      All Superinterfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public interface Conversion
      +extends java.io.Serializable
      +
      Converts an old version of an object value to conform to the current class + or field definition. + +

      The Conversion interface is implemented by the user. A + Conversion instance is passed to the Converter.Converter(java.lang.String, int, java.lang.String, com.sleepycat.persist.evolve.Conversion) + constructor.

      + +

      The Conversion interface extends Serializable and the + Conversion instance is serialized for storage using standard Java + serialization. Normally, the Conversion class should only have + transient fields that are initialized in the initialize(com.sleepycat.persist.model.EntityModel) method. + While non-transient fields are allowed, care must be taken to only include + fields that are serializable and will not pull in large amounts of data.

      + +

      When a class conversion is specified, two special considerations + apply:

      +
        +
      1. A class conversion is only applied when to instances of that class. The + conversion will not be applied when the class when it appears as a + superclass of the instance's class. In this case, a conversion for the + instance's class must also be specified.
      2. +
      3. Although field renaming (as well as all other changes) is handled by the + conversion method, a field Renamer is still needed when a secondary key + field is renamed and field Deleter is still needed when a secondary key + field is deleted. This is necessary for evolution of the metadata; + specifically, if the key name changes the database must be renamed and if + the key field is deleted the secondary database must be deleted.
      4. +
      + +

      The Conversion class must implement the standard equals method. + See equals(java.lang.Object) for more information.

      + +

      Conversions of simple types are generally simple. For example, a String field that contains only integer values can be easily converted to + an int field:

      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Persistent
      +  class Address {
      +      String zipCode;
      +      ...
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Persistent(version=1)
      +  class Address {
      +      int zipCode;
      +      ...
      +  }
      +
      +  // The conversion class.
      +  //
      +  class MyConversion1 implements Conversion {
      +
      +      public void initialize(EntityModel model) {
      +          // No initialization needed.
      +      }
      +
      +      public Object convert(Object fromValue) {
      +          return Integer.valueOf((String) fromValue);
      +      }
      +
      +      @Override
      +      public boolean equals(Object o) {
      +          return o instanceof MyConversion1;
      +      }
      +  }
      +
      +  // Create a field converter mutation.
      +  //
      +  Converter converter = new Converter(Address.class.getName(), 0,
      +                                      "zipCode", new MyConversion1());
      +
      +  // Configure the converter as described here.
      + +

      A conversion may perform arbitrary transformations on an object. For + example, a conversion may transform a single String address field into an + Address object containing four fields for street, city, state and zip + code.

      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Person {
      +      String address;
      +      ...
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Entity(version=1)
      +  class Person {
      +      Address address;
      +      ...
      +  }
      +
      +  // The new address class.
      +  //
      +  @Persistent
      +  class Address {
      +      String street;
      +      String city;
      +      String state;
      +      int zipCode;
      +      ...
      +  }
      +
      +  class MyConversion2 implements Conversion {
      +      private transient RawType addressType;
      +
      +      public void initialize(EntityModel model) {
      +          addressType = model.getRawType(Address.class.getName());
      +      }
      +
      +      public Object convert(Object fromValue) {
      +
      +          // Parse the old address and populate the new address fields
      +          //
      +          String oldAddress = (String) fromValue;
      +          Map<String, Object> addressValues = new HashMap<String, Object>();
      +          addressValues.put("street", parseStreet(oldAddress));
      +          addressValues.put("city", parseCity(oldAddress));
      +          addressValues.put("state", parseState(oldAddress));
      +          addressValues.put("zipCode", parseZipCode(oldAddress));
      +
      +          // Return new raw Address object
      +          //
      +          return new RawObject(addressType, addressValues, null);
      +      }
      +
      +      @Override
      +      public boolean equals(Object o) {
      +          return o instanceof MyConversion2;
      +      }
      +
      +      private String parseStreet(String oldAddress) { ... }
      +      private String parseCity(String oldAddress) { ... }
      +      private String parseState(String oldAddress) { ... }
      +      private Integer parseZipCode(String oldAddress) { ... }
      +  }
      +
      +  // Create a field converter mutation.
      +  //
      +  Converter converter = new Converter(Person.class.getName(), 0,
      +                                      "address", new MyConversion2());
      +
      +  // Configure the converter as described here.
      + +

      Note that when a conversion returns a RawObject, it must return + it with a RawType that is current as defined by the current class + definitions. The proper types can be obtained from the EntityModel + in the conversion's initialize method.

      + +

      A variation on the example above is where several fields in a class + (street, city, state and zipCode) are converted to a single field (address). + In this case a class converter rather than a field converter is used.

      + +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Person {
      +      String street;
      +      String city;
      +      String state;
      +      int zipCode;
      +      ...
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Entity(version=1)
      +  class Person {
      +      Address address;
      +      ...
      +  }
      +
      +  // The new address class.
      +  //
      +  @Persistent
      +  class Address {
      +      String street;
      +      String city;
      +      String state;
      +      int zipCode;
      +      ...
      +  }
      +
      +  class MyConversion3 implements Conversion {
      +      private transient RawType newPersonType;
      +      private transient RawType addressType;
      +
      +      public void initialize(EntityModel model) {
      +          newPersonType = model.getRawType(Person.class.getName());
      +          addressType = model.getRawType(Address.class.getName());
      +      }
      +
      +      public Object convert(Object fromValue) {
      +
      +          // Get field value maps for old and new objects.
      +          //
      +          RawObject person = (RawObject) fromValue;
      +          Map<String, Object> personValues = person.getValues();
      +          Map<String, Object> addressValues = new HashMap<String, Object>();
      +          RawObject address = new RawObject(addressType, addressValues, null);
      +
      +          // Remove the old address fields and insert the new one.
      +          //
      +          addressValues.put("street", personValues.remove("street"));
      +          addressValues.put("city", personValues.remove("city"));
      +          addressValues.put("state", personValues.remove("state"));
      +          addressValues.put("zipCode", personValues.remove("zipCode"));
      +          personValues.put("address", address);
      +
      +          return new RawObject(newPersonType, personValues, person.getSuper());
      +      }
      +
      +      @Override
      +      public boolean equals(Object o) {
      +          return o instanceof MyConversion3;
      +      }
      +  }
      +
      +  // Create a class converter mutation.
      +  //
      +  Converter converter = new Converter(Person.class.getName(), 0,
      +                                      new MyConversion3());
      +
      +  // Configure the converter as described here.
      + + +

      A conversion can also handle changes to class hierarchies. For example, + if a "name" field originally declared in class A is moved to its superclass + B, a conversion can move the field value accordingly:

      + +
      +  // The old classes.  Version 0 is implied.
      +  //
      +  @Persistent
      +  class A extends B {
      +      String name;
      +      ...
      +  }
      +  @Persistent
      +  abstract class B {
      +      ...
      +  }
      +
      +  // The new classes.  A new version number must be assigned.
      +  //
      +  @Persistent(version=1)
      +  class A extends B {
      +      ...
      +  }
      +  @Persistent(version=1)
      +  abstract class B {
      +      String name;
      +      ...
      +  }
      +
      +  class MyConversion4 implements Conversion {
      +      private transient RawType newAType;
      +      private transient RawType newBType;
      +
      +      public void initialize(EntityModel model) {
      +          newAType = model.getRawType(A.class.getName());
      +          newBType = model.getRawType(B.class.getName());
      +      }
      +
      +      public Object convert(Object fromValue) {
      +          RawObject oldA = (RawObject) fromValue;
      +          RawObject oldB = oldA.getSuper();
      +          Map<String, Object> aValues = oldA.getValues();
      +          Map<String, Object> bValues = oldB.getValues();
      +          bValues.put("name", aValues.remove("name"));
      +          RawObject newB = new RawObject(newBType, bValues, oldB.getSuper());
      +          RawObject newA = new RawObject(newAType, aValues, newB);
      +          return newA;
      +      }
      +
      +      @Override
      +      public boolean equals(Object o) {
      +          return o instanceof MyConversion4;
      +      }
      +  }
      +
      +  // Create a class converter mutation.
      +  //
      +  Converter converter = new Converter(A.class.getName(), 0,
      +                                      new MyConversion4());
      +
      +  // Configure the converter as described here.
      + +

      A conversion may return an instance of a different class entirely, as + long as it conforms to current class definitions and is the type expected + in the given context (a subtype of the old type, or a type compatible with + the new field type). For example, a field that is used to discriminate + between two types of objects could be removed and replaced by two new + subclasses:

      +  // The old class.  Version 0 is implied.
      +  //
      +  @Persistent
      +  class Pet {
      +      boolean isCatNotDog;
      +      ...
      +  }
      +
      +  // The new classes.  A new version number must be assigned to the Pet class.
      +  //
      +  @Persistent(version=1)
      +  class Pet {
      +      ...
      +  }
      +  @Persistent
      +  class Cat extends Pet {
      +      ...
      +  }
      +  @Persistent
      +  class Dog extends Pet {
      +      ...
      +  }
      +
      +  class MyConversion5 implements Conversion {
      +      private transient RawType newPetType;
      +      private transient RawType dogType;
      +      private transient RawType catType;
      +
      +      public void initialize(EntityModel model) {
      +          newPetType = model.getRawType(Pet.class.getName());
      +          dogType = model.getRawType(Dog.class.getName());
      +          catType = model.getRawType(Cat.class.getName());
      +      }
      +
      +      public Object convert(Object fromValue) {
      +          RawObject pet = (RawObject) fromValue;
      +          Map<String, Object> petValues = pet.getValues();
      +          Boolean isCat = (Boolean) petValues.remove("isCatNotDog");
      +          RawObject newPet = new RawObject(newPetType, petValues,
      +                                           pet.getSuper());
      +          RawType newSubType = isCat ? catType : dogType;
      +          return new RawObject(newSubType, Collections.emptyMap(), newPet);
      +      }
      +
      +      @Override
      +      public boolean equals(Object o) {
      +          return o instanceof MyConversion5;
      +      }
      +  }
      +
      +  // Create a class converter mutation.
      +  //
      +  Converter converter = new Converter(Pet.class.getName(), 0,
      +                                      new MyConversion5());
      +
      +  // Configure the converter as described here.
      + +

      The primary limitation of a conversion is that it may access at most a + single entity instance at one time. Conversions involving multiple entities + at once may be made by performing a store conversion.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        java.lang.Objectconvert(java.lang.Object fromValue) +
        Converts an old version of an object value to conform to the current + class or field definition.
        +
        booleanequals(java.lang.Object other) +
        The standard equals method that must be implemented by + conversion class.
        +
        voidinitialize(EntityModel model) +
        Initializes the conversion, allowing it to obtain raw type information + from the entity model.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          initialize

          +
          void initialize(EntityModel model)
          +
          Initializes the conversion, allowing it to obtain raw type information + from the entity model.
          +
          +
          Parameters:
          +
          model - the EntityModel.
          +
          +
        • +
        + + + +
          +
        • +

          convert

          +
          java.lang.Object convert(java.lang.Object fromValue)
          +
          Converts an old version of an object value to conform to the current + class or field definition. + +

          If a RuntimeException is thrown by this method, it will be + thrown to the original caller. Similarly, a IllegalArgumentException will be thrown to the original caller if the + object returned by this method does not conform to current class + definitions.

          + +

          The class of the input and output object may be one of the simple + types or RawObject. For primitive types, the primitive wrapper + class is used.

          +
          +
          Parameters:
          +
          fromValue - the object value being converted. The type of this + value is defined by the old class version that is being converted.
          +
          Returns:
          +
          the converted object. The type of this value must conform to + a current class definition. If this is a class conversion, it must + be the current version of the class. If this is a field conversion, it + must be of a type compatible with the current declared type of the + field.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          boolean equals(java.lang.Object other)
          +
          The standard equals method that must be implemented by + conversion class. + +

          When mutations are specified when opening a store, the specified and + previously stored mutations are compared for equality. If they are + equal, there is no need to replace the existing mutations in the stored + catalog. To accurately determine equality, the conversion class must + implement the equals method.

          + +

          If the equals method is not explicitly implemented by the + conversion class or a superclass other than Object, IllegalArgumentException will be thrown when the store is opened.

          + +

          Normally whenever equals is implemented the hashCode + method should also be implemented to support hash sets and maps. + However, hash sets and maps containing Conversion objects + are not used by the DPL and therefore the DPL does not require + hashCode to be implemented.

          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Converter.html b/docs/java/com/sleepycat/persist/evolve/Converter.html new file mode 100644 index 0000000..d7fdf15 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Converter.html @@ -0,0 +1,446 @@ + + + + + +Converter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class Converter

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      EntityConverter
      +
      +
      +
      +
      public class Converter
      +extends Mutation
      +
      A mutation for converting an old version of an object value to conform to + the current class or field definition. For example: + +
      +  package my.package;
      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Person {
      +      // ...
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Entity(version=1)
      +  class Person {
      +      // Incompatible changes were made here...
      +  }
      +
      +  // Add a converter mutation.
      +  //
      +  Mutations mutations = new Mutations();
      +
      +  mutations.addConverter(new Converter(Person.class.getName(), 0,
      +                                       new MyConversion()));
      +
      +  // Configure the mutations as described here.
      + +

      See Conversion for more information.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        Converter(java.lang.String className, + int classVersion, + Conversion conversion) +
        Creates a mutation for converting all instances of the given class + version to the current version of the class.
        +
        Converter(java.lang.String declaringClassName, + int declaringClassVersion, + java.lang.String fieldName, + Conversion conversion) +
        Creates a mutation for converting all values of the given field in the + given class version to a type compatible with the current declared type + of the field.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Converter

          +
          public Converter(java.lang.String className,
          +                 int classVersion,
          +                 Conversion conversion)
          +
          Creates a mutation for converting all instances of the given class + version to the current version of the class.
          +
          +
          Parameters:
          +
          className - the class to which this mutation applies.
          +
          classVersion - the class version to which this mutation applies.
          +
          conversion - converter instance.
          +
          +
        • +
        + + + +
          +
        • +

          Converter

          +
          public Converter(java.lang.String declaringClassName,
          +                 int declaringClassVersion,
          +                 java.lang.String fieldName,
          +                 Conversion conversion)
          +
          Creates a mutation for converting all values of the given field in the + given class version to a type compatible with the current declared type + of the field.
          +
          +
          Parameters:
          +
          declaringClassName - the class to which this mutation applies.
          +
          declaringClassVersion - the class version to which this mutation + applies.
          +
          fieldName - field name to which this mutation applies.
          +
          conversion - converter instance.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getConversion

          +
          public Conversion getConversion()
          +
          Returns the converter instance specified to the constructor.
          +
          +
          Returns:
          +
          the converter instance.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          Returns true if the conversion objects are equal in this object and + given object, and if the Mutation.equals(java.lang.Object) superclass method + returns true.
          +
          +
          Overrides:
          +
          equals in class Mutation
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class Mutation
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class Mutation
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/DeletedClassException.html b/docs/java/com/sleepycat/persist/evolve/DeletedClassException.html new file mode 100644 index 0000000..ac1c8b6 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/DeletedClassException.html @@ -0,0 +1,254 @@ + + + + + +DeletedClassException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class DeletedClassException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class DeletedClassException
      +extends OperationFailureException
      +
      While reading from an index, an instance of a deleted class version was + encountered.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Deleter.html b/docs/java/com/sleepycat/persist/evolve/Deleter.html new file mode 100644 index 0000000..32c26ea --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Deleter.html @@ -0,0 +1,396 @@ + + + + + +Deleter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class Deleter

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class Deleter
      +extends Mutation
      +
      A mutation for deleting an entity class or field. + +

      WARNING: The data for the deleted class or field will be + destroyed and will be recoverable only by restoring from backup. If you + wish to convert the instance data to a different type or format, use a + Conversion mutation instead.

      + +

      For example, to delete a field:

      + +
      +  package my.package;
      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Person {
      +      String name;
      +      String favoriteColors;
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Entity(version=1)
      +  class Person {
      +      String name;
      +  }
      +
      +  // Add the mutation for deleting a field.
      +  //
      +  Mutations mutations = new Mutations();
      +
      +  mutations.addDeleter(new Deleter(Person.class.getName(), 0,
      +                                   "favoriteColors");
      +
      +  // Configure the mutations as described here.
      + +

      To delete an entity class:

      + +
      +  package my.package;
      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Statistics {
      +      ...
      +  }
      +
      +  // Add the mutation for deleting a class.
      +  //
      +  Mutations mutations = new Mutations();
      +
      +  mutations.addDeleter(new Deleter("my.package.Statistics", 0));
      +
      +  // Configure the mutations as described here.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        Deleter(java.lang.String className, + int classVersion) +
        Creates a mutation for deleting an entity class.
        +
        Deleter(java.lang.String declaringClass, + int declaringClassVersion, + java.lang.String fieldName) +
        Creates a mutation for deleting the given field from all instances of + the given class version.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Deleter

          +
          public Deleter(java.lang.String className,
          +               int classVersion)
          +
          Creates a mutation for deleting an entity class.
          +
          +
          Parameters:
          +
          className - the class to which this mutation applies.
          +
          classVersion - the class version to which this mutation applies.
          +
          +
        • +
        + + + +
          +
        • +

          Deleter

          +
          public Deleter(java.lang.String declaringClass,
          +               int declaringClassVersion,
          +               java.lang.String fieldName)
          +
          Creates a mutation for deleting the given field from all instances of + the given class version.
          +
          +
          Parameters:
          +
          declaringClass - the class to which this mutation applies.
          +
          declaringClassVersion - the class version to which this mutation + applies.
          +
          fieldName - field name to which this mutation applies.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class Mutation
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EntityConverter.html b/docs/java/com/sleepycat/persist/evolve/EntityConverter.html new file mode 100644 index 0000000..8a8fcb4 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EntityConverter.html @@ -0,0 +1,405 @@ + + + + + +EntityConverter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class EntityConverter

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EntityConverter
      +extends Converter
      +
      A subclass of Converter that allows specifying keys to be deleted. + +

      When a Converter is used with an entity class, secondary keys cannot be + automatically deleted based on field deletion, because field Deleter objects + are not used in conjunction with a Converter mutation. The EntityConverter + can be used instead of a plain Converter to specify the key names to be + deleted.

      + +

      It is not currently possible to rename or insert secondary keys when + using a Converter mutation with an entity class.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Converter, +Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        EntityConverter(java.lang.String entityClassName, + int classVersion, + Conversion conversion, + java.util.Set<java.lang.String> deletedKeys) +
        Creates a mutation for converting all instances of the given entity + class version to the current version of the class.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EntityConverter

          +
          public EntityConverter(java.lang.String entityClassName,
          +                       int classVersion,
          +                       Conversion conversion,
          +                       java.util.Set<java.lang.String> deletedKeys)
          +
          Creates a mutation for converting all instances of the given entity + class version to the current version of the class.
          +
          +
          Parameters:
          +
          entityClassName - the entity class to which this mutation applies.
          +
          classVersion - the class version to which this mutation applies.
          +
          conversion - converter instance.
          +
          deletedKeys - the set of key names that are to be deleted.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDeletedKeys

          +
          public java.util.Set<java.lang.String> getDeletedKeys()
          +
          Returns the set of key names that are to be deleted.
          +
          +
          Returns:
          +
          the set of key names that are to be deleted.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          Returns true if the deleted and renamed keys are equal in this object + and given object, and if the Converter.equals(java.lang.Object) superclass method + returns true.
          +
          +
          Overrides:
          +
          equals in class Converter
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class Converter
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class Converter
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EvolveConfig.html b/docs/java/com/sleepycat/persist/evolve/EvolveConfig.html new file mode 100644 index 0000000..468aaf0 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EvolveConfig.html @@ -0,0 +1,409 @@ + + + + + +EvolveConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class EvolveConfig

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.evolve.EvolveConfig
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class EvolveConfig
      +extends java.lang.Object
      +implements java.lang.Cloneable
      +
      Configuration properties for eager conversion of unevolved objects. This + configuration is used with EntityStore.evolve.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EvolveConfig

          +
          public EvolveConfig()
          +
          Creates an evolve configuration with default properties.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          cloneConfig

          +
          public EvolveConfig cloneConfig()
          +
          Deprecated. As of JE 4.0.13, replaced by clone().
          +
          Returns a shallow copy of the configuration.
          +
          +
          Returns:
          +
          a shallow copy of the configuration.
          +
          +
        • +
        + + + +
          +
        • +

          clone

          +
          public EvolveConfig clone()
          +
          Returns a shallow copy of the configuration.
          +
          +
          Overrides:
          +
          clone in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          addClassToEvolve

          +
          public EvolveConfig addClassToEvolve(java.lang.String entityClass)
          +
          Adds an entity class for a primary index to be converted. If no classes + are added, all indexes that require evolution will be converted.
          +
          +
          Parameters:
          +
          entityClass - the entity class name.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getClassesToEvolve

          +
          public java.util.Set<java.lang.String> getClassesToEvolve()
          +
          Returns an unmodifiable set of the entity classes to be evolved.
          +
          +
          Returns:
          +
          an unmodifiable set of the entity classes to be evolved.
          +
          +
        • +
        + + + +
          +
        • +

          setEvolveListener

          +
          public EvolveConfig setEvolveListener(EvolveListener listener)
          +
          Sets a progress listener that is notified each time an entity is read.
          +
          +
          Parameters:
          +
          listener - the EvolveListener.
          +
          Returns:
          +
          'this'.
          +
          +
        • +
        + + + +
          +
        • +

          getEvolveListener

          +
          public EvolveListener getEvolveListener()
          +
          Returns the progress listener that is notified each time an entity is + read.
          +
          +
          Returns:
          +
          the EvolveListener.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.html b/docs/java/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.html new file mode 100644 index 0000000..5109ece --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.html @@ -0,0 +1,351 @@ + + + + + +EvolveConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class EvolveConfigBeanInfo

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.beans.BeanInfo
      +
      +
      +
      +
      public class EvolveConfigBeanInfo
      +extends ConfigBeanInfoBase
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EvolveConfigBeanInfo

          +
          public EvolveConfigBeanInfo()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getBeanDescriptor

          +
          public java.beans.BeanDescriptor getBeanDescriptor()
          +
          +
          Specified by:
          +
          getBeanDescriptor in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getBeanDescriptor in class java.beans.SimpleBeanInfo
          +
          +
        • +
        + + + +
          +
        • +

          getPropertyDescriptors

          +
          public java.beans.PropertyDescriptor[] getPropertyDescriptors()
          +
          +
          Specified by:
          +
          getPropertyDescriptors in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getPropertyDescriptors in class java.beans.SimpleBeanInfo
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EvolveEvent.html b/docs/java/com/sleepycat/persist/evolve/EvolveEvent.html new file mode 100644 index 0000000..a978ec5 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EvolveEvent.html @@ -0,0 +1,277 @@ + + + + + +EvolveEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class EvolveEvent

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.evolve.EvolveEvent
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class EvolveEvent
      +extends java.lang.Object
      +
      The event passed to the EvolveListener interface during eager entity + evolution.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetEntityClassName() +
        The class name of the current entity class being converted.
        +
        EvolveStatsgetStats() +
        The cumulative statistics gathered during eager evolution.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getStats

          +
          public EvolveStats getStats()
          +
          The cumulative statistics gathered during eager evolution.
          +
          +
          Returns:
          +
          the cumulative statistics.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityClassName

          +
          public java.lang.String getEntityClassName()
          +
          The class name of the current entity class being converted.
          +
          +
          Returns:
          +
          the class name.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EvolveListener.html b/docs/java/com/sleepycat/persist/evolve/EvolveListener.html new file mode 100644 index 0000000..b6a6f92 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EvolveListener.html @@ -0,0 +1,242 @@ + + + + + +EvolveListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Interface EvolveListener

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface EvolveListener
      +
      The listener interface called during eager entity evolution.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          evolveProgress

          +
          boolean evolveProgress(EvolveEvent event)
          +
          The listener method called during eager entity evolution.
          +
          +
          Parameters:
          +
          event - the EvolveEvent.
          +
          Returns:
          +
          true to continue evolution or false to stop.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/EvolveStats.html b/docs/java/com/sleepycat/persist/evolve/EvolveStats.html new file mode 100644 index 0000000..4d29e8e --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/EvolveStats.html @@ -0,0 +1,276 @@ + + + + + +EvolveStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class EvolveStats

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.evolve.EvolveStats
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class EvolveStats
      +extends java.lang.Object
      +
      Statistics accumulated during eager entity evolution.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intgetNConverted() +
        The total number of entities converted during eager evolution.
        +
        intgetNRead() +
        The total number of entities read during eager evolution.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNRead

          +
          public int getNRead()
          +
          The total number of entities read during eager evolution.
          +
          +
          Returns:
          +
          the number of entities read.
          +
          +
        • +
        + + + +
          +
        • +

          getNConverted

          +
          public int getNConverted()
          +
          The total number of entities converted during eager evolution.
          +
          +
          Returns:
          +
          the number of entities converted.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/IncompatibleClassException.html b/docs/java/com/sleepycat/persist/evolve/IncompatibleClassException.html new file mode 100644 index 0000000..01a62fa --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/IncompatibleClassException.html @@ -0,0 +1,305 @@ + + + + + +IncompatibleClassException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class IncompatibleClassException

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class IncompatibleClassException
      +extends OperationFailureException
      +
      A class has been changed incompatibly and no mutation has been configured to + handle the change or a new class version number has not been assigned. + + +

      In a replicated environment, this exception is also thrown when upgrading + an application (persistent classes have been changed) and an upgraded node + is elected Master before all of the Replica nodes have been upgraded. See + Upgrading a Replication Group + for more information.

      +
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      EntityStore.EntityStore, +Entity.version(), +Persistent.version(), +Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + + + +
        +
      • + + +

        Method Summary

        + +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          IncompatibleClassException

          +
          public IncompatibleClassException(java.lang.String message)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Mutation.html b/docs/java/com/sleepycat/persist/evolve/Mutation.html new file mode 100644 index 0000000..1985cfd --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Mutation.html @@ -0,0 +1,364 @@ + + + + + +Mutation (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class Mutation

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.evolve.Mutation
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      Converter, Deleter, Renamer
      +
      +
      +
      +
      public abstract class Mutation
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      The base class for all mutations.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object other) +
        Returns true if the class name, class version and field name are equal + in this object and given object.
        +
        java.lang.StringgetClassName() +
        Returns the class to which this mutation applies.
        +
        intgetClassVersion() +
        Returns the class version to which this mutation applies.
        +
        java.lang.StringgetFieldName() +
        Returns the field name to which this mutation applies, or null if this + mutation applies to the class itself.
        +
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getClassName

          +
          public java.lang.String getClassName()
          +
          Returns the class to which this mutation applies.
          +
          +
          Returns:
          +
          the class to which this mutation applies.
          +
          +
        • +
        + + + +
          +
        • +

          getClassVersion

          +
          public int getClassVersion()
          +
          Returns the class version to which this mutation applies.
          +
          +
          Returns:
          +
          the class version to which this mutation applies.
          +
          +
        • +
        + + + +
          +
        • +

          getFieldName

          +
          public java.lang.String getFieldName()
          +
          Returns the field name to which this mutation applies, or null if this + mutation applies to the class itself.
          +
          +
          Returns:
          +
          the field name to which this mutation applies, or null.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          Returns true if the class name, class version and field name are equal + in this object and given object.
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Mutations.html b/docs/java/com/sleepycat/persist/evolve/Mutations.html new file mode 100644 index 0000000..d177eee --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Mutations.html @@ -0,0 +1,587 @@ + + + + + +Mutations (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class Mutations

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.evolve.Mutations
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class Mutations
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      A collection of mutations for configuring class evolution. + +

      Mutations are configured when a store is opened via StoreConfig.setMutations. For example:

      + +
      +  Mutations mutations = new Mutations();
      +  // Add mutations...
      +  StoreConfig config = new StoreConfig();
      +  config.setMutations(mutations);
      +  EntityStore store = new EntityStore(env, "myStore", config);
      + +

      Mutations cause data conversion to occur lazily as instances are read + from the store. The EntityStore.evolve method + may also be used to perform eager conversion.

      + +

      Not all incompatible class changes can be handled via mutations. For + example, complex refactoring may require a transformation that manipulates + multiple entity instances at once. Such changes are not possible with + mutations but can made by performing a store conversion.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Mutations() +
        Creates an empty set of mutations.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidaddConverter(Converter converter) +
        Adds a converter mutation.
        +
        voidaddDeleter(Deleter deleter) +
        Adds a deleter mutation.
        +
        voidaddRenamer(Renamer renamer) +
        Adds a renamer mutation.
        +
        booleanequals(java.lang.Object other) +
        Returns true if this collection has the same set of mutations as the + given collection and all mutations are equal.
        +
        ConvertergetConverter(java.lang.String className, + int classVersion, + java.lang.String fieldName) +
        Returns the converter mutation for the given class, version and field, + or null if none exists.
        +
        java.util.Collection<Converter>getConverters() +
        Returns an unmodifiable collection of all converter mutations.
        +
        DeletergetDeleter(java.lang.String className, + int classVersion, + java.lang.String fieldName) +
        Returns the deleter mutation for the given class, version and field, or + null if none exists.
        +
        java.util.Collection<Deleter>getDeleters() +
        Returns an unmodifiable collection of all deleter mutations.
        +
        RenamergetRenamer(java.lang.String className, + int classVersion, + java.lang.String fieldName) +
        Returns the renamer mutation for the given class, version and field, or + null if none exists.
        +
        java.util.Collection<Renamer>getRenamers() +
        Returns an unmodifiable collection of all renamer mutations.
        +
        inthashCode() 
        booleanisEmpty() +
        Returns true if no mutations are present.
        +
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Mutations

          +
          public Mutations()
          +
          Creates an empty set of mutations.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          isEmpty

          +
          public boolean isEmpty()
          +
          Returns true if no mutations are present.
          +
          +
          Returns:
          +
          true if no mutations are present.
          +
          +
        • +
        + + + +
          +
        • +

          addRenamer

          +
          public void addRenamer(Renamer renamer)
          +
          Adds a renamer mutation.
          +
          +
          Parameters:
          +
          renamer - the Renamer.
          +
          +
        • +
        + + + +
          +
        • +

          getRenamer

          +
          public Renamer getRenamer(java.lang.String className,
          +                          int classVersion,
          +                          java.lang.String fieldName)
          +
          Returns the renamer mutation for the given class, version and field, or + null if none exists. A null field name should be specified to get a + class renamer.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          classVersion - the class version.
          +
          fieldName - the field name in the given class version.
          +
          Returns:
          +
          the Renamer, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getRenamers

          +
          public java.util.Collection<Renamer> getRenamers()
          +
          Returns an unmodifiable collection of all renamer mutations.
          +
          +
          Returns:
          +
          the renamers.
          +
          +
        • +
        + + + +
          +
        • +

          addDeleter

          +
          public void addDeleter(Deleter deleter)
          +
          Adds a deleter mutation.
          +
          +
          Parameters:
          +
          deleter - the Deleter.
          +
          +
        • +
        + + + +
          +
        • +

          getDeleter

          +
          public Deleter getDeleter(java.lang.String className,
          +                          int classVersion,
          +                          java.lang.String fieldName)
          +
          Returns the deleter mutation for the given class, version and field, or + null if none exists. A null field name should be specified to get a + class deleter.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          classVersion - the class version.
          +
          fieldName - the field name.
          +
          Returns:
          +
          the Deleter, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getDeleters

          +
          public java.util.Collection<Deleter> getDeleters()
          +
          Returns an unmodifiable collection of all deleter mutations.
          +
          +
          Returns:
          +
          the deleters.
          +
          +
        • +
        + + + +
          +
        • +

          addConverter

          +
          public void addConverter(Converter converter)
          +
          Adds a converter mutation.
          +
          +
          Parameters:
          +
          converter - the Converter.
          +
          +
        • +
        + + + +
          +
        • +

          getConverter

          +
          public Converter getConverter(java.lang.String className,
          +                              int classVersion,
          +                              java.lang.String fieldName)
          +
          Returns the converter mutation for the given class, version and field, + or null if none exists. A null field name should be specified to get a + class converter.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          classVersion - the class version.
          +
          fieldName - the field name.
          +
          Returns:
          +
          the Converter, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getConverters

          +
          public java.util.Collection<Converter> getConverters()
          +
          Returns an unmodifiable collection of all converter mutations.
          +
          +
          Returns:
          +
          the converters.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          Returns true if this collection has the same set of mutations as the + given collection and all mutations are equal.
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/Renamer.html b/docs/java/com/sleepycat/persist/evolve/Renamer.html new file mode 100644 index 0000000..665624d --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/Renamer.html @@ -0,0 +1,446 @@ + + + + + +Renamer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.evolve
    +

    Class Renamer

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class Renamer
      +extends Mutation
      +
      A mutation for renaming a class or field without changing the instance or + field value. For example: +
      +  package my.package;
      +
      +  // The old class.  Version 0 is implied.
      +  //
      +  @Entity
      +  class Person {
      +      String name;
      +  }
      +
      +  // The new class.  A new version number must be assigned.
      +  //
      +  @Entity(version=1)
      +  class Human {
      +      String fullName;
      +  }
      +
      +  // Add the mutations.
      +  //
      +  Mutations mutations = new Mutations();
      +
      +  mutations.addRenamer(new Renamer("my.package.Person", 0,
      +                                   Human.class.getName()));
      +
      +  mutations.addRenamer(new Renamer("my.package.Person", 0,
      +                                   "name", "fullName"));
      +
      +  // Configure the mutations as described here.
      + + +

      In a replicated environment, renaming an entity class or secondary key + field may require handling the DatabasePreemptedException during the upgrade process. + See + Upgrading a Replication Group + for more information.

      +
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Class Evolution, +Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        Renamer(java.lang.String fromClass, + int fromVersion, + java.lang.String toClass) +
        Creates a mutation for renaming the class of all instances of the given + class version.
        +
        Renamer(java.lang.String declaringClass, + int declaringClassVersion, + java.lang.String fromField, + java.lang.String toField) +
        Creates a mutation for renaming the given field for all instances of the + given class version.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Renamer

          +
          public Renamer(java.lang.String fromClass,
          +               int fromVersion,
          +               java.lang.String toClass)
          +
          Creates a mutation for renaming the class of all instances of the given + class version.
          +
          +
          Parameters:
          +
          fromClass - the class to rename.
          +
          fromVersion - the class version to rename.
          +
          toClass - the new class name.
          +
          +
        • +
        + + + +
          +
        • +

          Renamer

          +
          public Renamer(java.lang.String declaringClass,
          +               int declaringClassVersion,
          +               java.lang.String fromField,
          +               java.lang.String toField)
          +
          Creates a mutation for renaming the given field for all instances of the + given class version.
          +
          +
          Parameters:
          +
          declaringClass - the class to which this mutation applies.
          +
          declaringClassVersion - the class version to which this mutation + applies.
          +
          fromField - field name in the given class version.
          +
          toField - the new field name.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getNewName

          +
          public java.lang.String getNewName()
          +
          Returns the new class or field name specified in the constructor.
          +
          +
          Returns:
          +
          the new name.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          Returns true if the new class name is equal in this object and given + object, and if the Mutation.equals(java.lang.Object) method returns true.
          +
          +
          Overrides:
          +
          equals in class Mutation
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class Mutation
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class Mutation
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Conversion.html b/docs/java/com/sleepycat/persist/evolve/class-use/Conversion.html new file mode 100644 index 0000000..e76cb35 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Conversion.html @@ -0,0 +1,208 @@ + + + + + +Uses of Interface com.sleepycat.persist.evolve.Conversion (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.evolve.Conversion

    +
    +
    +
      +
    • + + + + + + + + + + + + +
      Packages that use Conversion 
      PackageDescription
      com.sleepycat.persist.evolve +
      Utilities for managing class evolution of persistent objects.
      +
      +
    • +
    • +
        +
      • + + +

        Uses of Conversion in com.sleepycat.persist.evolve

        + + + + + + + + + + + + +
        Methods in com.sleepycat.persist.evolve that return Conversion 
        Modifier and TypeMethod and Description
        ConversionConverter.getConversion() +
        Returns the converter instance specified to the constructor.
        +
        + + + + + + + + + + + + + + + + +
        Constructors in com.sleepycat.persist.evolve with parameters of type Conversion 
        Constructor and Description
        Converter(java.lang.String className, + int classVersion, + Conversion conversion) +
        Creates a mutation for converting all instances of the given class + version to the current version of the class.
        +
        Converter(java.lang.String declaringClassName, + int declaringClassVersion, + java.lang.String fieldName, + Conversion conversion) +
        Creates a mutation for converting all values of the given field in the + given class version to a type compatible with the current declared type + of the field.
        +
        EntityConverter(java.lang.String entityClassName, + int classVersion, + Conversion conversion, + java.util.Set<java.lang.String> deletedKeys) +
        Creates a mutation for converting all instances of the given entity + class version to the current version of the class.
        +
        +
      • +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Converter.html b/docs/java/com/sleepycat/persist/evolve/class-use/Converter.html new file mode 100644 index 0000000..52313f2 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Converter.html @@ -0,0 +1,221 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.Converter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.Converter

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/DeletedClassException.html b/docs/java/com/sleepycat/persist/evolve/class-use/DeletedClassException.html new file mode 100644 index 0000000..13dde5e --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/DeletedClassException.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.DeletedClassException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.DeletedClassException

    +
    +
    No usage of com.sleepycat.persist.evolve.DeletedClassException
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Deleter.html b/docs/java/com/sleepycat/persist/evolve/class-use/Deleter.html new file mode 100644 index 0000000..287881d --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Deleter.html @@ -0,0 +1,206 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.Deleter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.Deleter

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EntityConverter.html b/docs/java/com/sleepycat/persist/evolve/class-use/EntityConverter.html new file mode 100644 index 0000000..b2b3c6b --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EntityConverter.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.EntityConverter (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.EntityConverter

    +
    +
    No usage of com.sleepycat.persist.evolve.EntityConverter
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfig.html b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfig.html new file mode 100644 index 0000000..ee4cf38 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfig.html @@ -0,0 +1,221 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.EvolveConfig (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.EvolveConfig

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfigBeanInfo.html b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfigBeanInfo.html new file mode 100644 index 0000000..a08ae58 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveConfigBeanInfo.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.EvolveConfigBeanInfo (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.EvolveConfigBeanInfo

    +
    +
    No usage of com.sleepycat.persist.evolve.EvolveConfigBeanInfo
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EvolveEvent.html b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveEvent.html new file mode 100644 index 0000000..9fbcba1 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveEvent.html @@ -0,0 +1,173 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.EvolveEvent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.EvolveEvent

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EvolveListener.html b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveListener.html new file mode 100644 index 0000000..acd733e --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveListener.html @@ -0,0 +1,189 @@ + + + + + +Uses of Interface com.sleepycat.persist.evolve.EvolveListener (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.evolve.EvolveListener

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/EvolveStats.html b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveStats.html new file mode 100644 index 0000000..2a8bebe --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/EvolveStats.html @@ -0,0 +1,201 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.EvolveStats (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.EvolveStats

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/IncompatibleClassException.html b/docs/java/com/sleepycat/persist/evolve/class-use/IncompatibleClassException.html new file mode 100644 index 0000000..361c44e --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/IncompatibleClassException.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.IncompatibleClassException (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.IncompatibleClassException

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Mutation.html b/docs/java/com/sleepycat/persist/evolve/class-use/Mutation.html new file mode 100644 index 0000000..0fd2948 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Mutation.html @@ -0,0 +1,193 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.Mutation (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.Mutation

    +
    +
    +
      +
    • + + + + + + + + + + + + +
      Packages that use Mutation 
      PackageDescription
      com.sleepycat.persist.evolve +
      Utilities for managing class evolution of persistent objects.
      +
      +
    • +
    • +
        +
      • + + +

        Uses of Mutation in com.sleepycat.persist.evolve

        + + + + + + + + + + + + + + + + + + + + + + + + +
        Subclasses of Mutation in com.sleepycat.persist.evolve 
        Modifier and TypeClass and Description
        class Converter +
        A mutation for converting an old version of an object value to conform to + the current class or field definition.
        +
        class Deleter +
        A mutation for deleting an entity class or field.
        +
        class EntityConverter +
        A subclass of Converter that allows specifying keys to be deleted.
        +
        class Renamer +
        A mutation for renaming a class or field without changing the instance or + field value.
        +
        +
      • +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Mutations.html b/docs/java/com/sleepycat/persist/evolve/class-use/Mutations.html new file mode 100644 index 0000000..eb84296 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Mutations.html @@ -0,0 +1,224 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.Mutations (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.Mutations

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/class-use/Renamer.html b/docs/java/com/sleepycat/persist/evolve/class-use/Renamer.html new file mode 100644 index 0000000..dc8c935 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/class-use/Renamer.html @@ -0,0 +1,206 @@ + + + + + +Uses of Class com.sleepycat.persist.evolve.Renamer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.evolve.Renamer

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/package-frame.html b/docs/java/com/sleepycat/persist/evolve/package-frame.html new file mode 100644 index 0000000..106d22e --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/package-frame.html @@ -0,0 +1,39 @@ + + + + + +com.sleepycat.persist.evolve (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.persist.evolve

    + + + diff --git a/docs/java/com/sleepycat/persist/evolve/package-summary.html b/docs/java/com/sleepycat/persist/evolve/package-summary.html new file mode 100644 index 0000000..49a0cb6 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/package-summary.html @@ -0,0 +1,624 @@ + + + + + +com.sleepycat.persist.evolve (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.persist.evolve

    +
    +
    Utilities for managing class evolution of persistent objects.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      Conversion +
      Converts an old version of an object value to conform to the current class + or field definition.
      +
      EvolveListener +
      The listener interface called during eager entity evolution.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      Converter +
      A mutation for converting an old version of an object value to conform to + the current class or field definition.
      +
      Deleter +
      A mutation for deleting an entity class or field.
      +
      EntityConverter +
      A subclass of Converter that allows specifying keys to be deleted.
      +
      EvolveConfig +
      Configuration properties for eager conversion of unevolved objects.
      +
      EvolveConfigBeanInfo 
      EvolveEvent +
      The event passed to the EvolveListener interface during eager entity + evolution.
      +
      EvolveStats +
      Statistics accumulated during eager entity evolution.
      +
      Mutation +
      The base class for all mutations.
      +
      Mutations +
      A collection of mutations for configuring class evolution.
      +
      Renamer +
      A mutation for renaming a class or field without changing the instance or + field value.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + +
      Exception Summary 
      ExceptionDescription
      DeletedClassException +
      While reading from an index, an instance of a deleted class version was + encountered.
      +
      IncompatibleClassException +
      A class has been changed incompatibly and no mutation has been configured to + handle the change or a new class version number has not been assigned.
      +
      +
    • +
    + + + +

    Package com.sleepycat.persist.evolve Description

    +
    Utilities for managing class evolution of persistent objects. + +

    Class Evolution

    + +

    For persistent data that is not short lived, changes to persistent classes +are almost inevitable. Some changes are compatible with existing types, and +data conversion for these changes is performed automatically and transparently. +Other changes are not compatible with existing types. Mutations can be used to +explicitly manage many types of incompatible changes.

    + +

    Not all incompatible class changes can be handled via mutations. For +example, complex refactoring may require a transformation that manipulates +multiple entity instances at once. Such changes are not possible with +mutations but can be made by performing a store +conversion.

    + +

    The different categories of type changes are described below.

    + +

    Key Field Changes

    + +

    Unlike entity data, key data is not versioned. Therefore, the physical key +format for an index is fixed once the index has been opened, and the changes +allowed for key fields are very limited. The only changes allowed for key +fields are:

    +
      +
    • The name of a key field may be changed, as long as this change is +accompanied by a Renamer mutation.
    • +
    • A primitive type may be changed to its corresponding primitive wrapper +type. This is a compatible change.
    • +
    • For primary key fields and fields of a composite key class, a primitive +wrapper type may be changed to its corresponding primitive type. This is +allowed because these key fields with reference types may never have null +values. This is a compatible change.
    • +
    + +

    Any other changes to a key field are incompatible and may be made only by +performing a store conversion.

    + +

    Key ordering, including the behavior of a custom Comparable, is also fixed, since keys are stored in order in the +index. The specifications for key ordering may not be changed, and the +developer is responsible for not changing the behavior of a Comparable +key class. WARNING:: Changing the behavior of a Comparable key class is likely to make the index unusable.

    + +

    Compatible Type Changes

    + +

    Entity data, unlike key data, is versioned. Therefore, some changes can be +made compatibly and other changes can be handled via mutations. Compatible +changes are defined below. To make a compatible class change, a mutation is +not required; however, the class version must be assigned a new (greater) +integer value.

    + +

    Changes to a class hierarchy are compatible in some cases. A new class may +be inserted in the hierarchy. A class may be deleted from the hierarchy as +long as one of the following is true: 1) it contains no persistent fields, 2) +any persistent fields are deleted with field Deleter mutations, or 3) the class +is deleted with a class Deleter mutation. Classes in an existing hierarchy may +not be reordered compatibly, and fields may not moved from one class to another +compatibly; for such changes a class Converter mutation is required.

    + +

    Changes to field types in entity class definitions are compatible when they +conform to the Java Language Specification definitions for Widening +Primitive Conversions and Widening +Reference Conversions. For example, a smaller integer +type may be changed to a larger integer type, and a reference type may be +changed to one of its supertypes. Automatic widening conversions are performed +as described in the Java Language Specification.

    + +

    Primitive types may also be compatibly changed to their corresponding +primitive wrapper types, or to the wrapper type for a widened primitive type. +However, changing from a primitive wrapper type to a primitive type is not a +compatible change since existing null values could not be represented.

    + +

    Integer primitive types (byte, short, char, int, long) and their primitive +wrapper types may be compatibly changed to the BigInteger type.

    + +

    Enum values may be added compatibly, but may not be deleted or renamed. As +long as new values are declared after existing values, the default sort order +for enum key fields will match the declaration order, i.e, the default sort +order will match the enum ordinal order. If a new value is inserted (declared +before an existing value), it will be sorted after all existing values but +before newly added values. However, these ordering rules are only guaranteed +for enums containing up to 631 values and only if existing values are not +reordered. If more than 631 values are declared or the declarations of +existing values are reordered, then the default sort order will be arbitrary +and will not match the declaration (ordinal) order.

    + +

    In addition, adding fields to a class is a compatible change. When a +persistent instance of a class is read that does not contain the new field, the +new field is initialized by the default constructor.

    + +

    All other changes to instance fields are considered incompatible. +Incompatible changes may be handled via mutations, as described next.

    + +

    Note that whenever a class is changed, either compatibly or incompatibly, a +new (higher) class version number must be assigned. See Entity.version() and Persistent.version() for information on assigning +class version numbers.

    + +

    Mutations

    + +

    There are three types of mutations: Renamer, Deleter and Converter.

    + +

    A class or field can be renamed using a Renamer. Renaming is not expensive, since it +does not involve conversion of instance data.

    + +

    A class or field can be deleted using a Deleter.

    +
      +
    • Deleting an entity class causes removal of the primary and secondary +indices for the store, on other words, removal of all store entities for that +class and its subclasses. Removal is performed when the store is opened. A +Deleter should be used for an entity class +in all of the following circumstances: +
        +
      • When removing the entity class itself.
      • +
      • When removing Entity from the class + to make it non-persistent.
      • +
      • When removing Entity from the class + and adding Persistent, to use it as an + embedded persistent class but not an entity class. The version of the class + must be incremented in this case.
      • +
      +
    • + +
    • Deleting a non-entity class does not itself cause deletion of instance +data, but is needed to inform DPL that the deleted class will not be used. +Instances of the deleted class must be handled (discarded or converted to +another class) by Deleter or Converter mutations for the field or enclosing +class that contain embedded instances of the deleted class. A Deleter should be used for a non-entity class in +all of the following circumstances: +
        +
      • When removing the persistent class itself.
      • +
      • When removing Persistent from the + class to make it non-persistent.
      • +
      • When removing Persistent from the + class and adding Entity, to use it as an + entity class but not an embedded persistent class. The version of the class + must be incremented in this case.
      • +
      +
    • + +
    • Deleting a field causes automatic conversion of the instances containing +that field, in order to discard the field values.
    • +
    + +

    Other incompatible changes are handled by creating a Converter mutation and implementing a Conversion.convert method that +manipulates the raw objects and/or simple values directly. The convert +method is passed an object of the old incompatible type and it returns an +object of a current type.

    + +

    Conversions can be specified in two ways: for specific fields or for all +instances of a class. A different Converter constructor is used in each case. +Field-specific conversions are used instead of class conversions when both are +applicable.

    + +

    Note that a class conversion may be not specified for an enum class. A +field conversion, or a class conversion for the class declaring the field, may +be used.

    + +

    Note that each mutation is applied to a specific class version number. The +class version must be explicitly specified in a mutation for two reasons:

    +
      +
    1. This provides safety in the face of multiple unconverted versions of a +given type. Without a version, a single conversion method would have to handle +multiple input types, and would have to distinguish between them by examining +the data or type information.
    2. +
    3. This allows arbitrary changes to be made. For example, a series of name +changes may reuse a given name for more than one version. To identify the +specific type being converted or renamed, a version number is needed.
    4. +
    +

    See Entity.version() and Persistent.version() for information on assigning +class version numbers.

    + +

    Mutations are therefore responsible for converting each existing +incompatible class version to the current version as defined by a current class +definition. For example, consider that class-version A-1 is initially changed +to A-2 and a mutation is added for converting A-1 to A-2. If later changes in +version A-3 occur before converting all A-1 instances to version A-2, the +converter for A-1 will have to be changed. Instead of converting from A-1 to +A-2 it will need to convert from A-1 to A-3. In addition, a mutation +converting A-2 to A-3 will be needed.

    + +

    When a Converter mutation applies to a +given object, other mutations that may apply to that object are not +automatically performed. It is the responsibility of the Converter to return an object that conforms to +the current class definition, including renaming fields and classes. If the +input object has nested objects or superclasses that also need conversion, the +converter must perform these nested conversions before returning the final +converted object. This rule avoids the complexity and potential errors that +could result if a converter mutation were automatically combined with other +mutations in an arbitrary manner.

    + +

    The EntityStore.evolve +method may optionally be used to ensure that all instances of an old class +version are converted to the current version.

    + +

    Other Metadata Changes

    + +

    When a class that happens to be an entity class is renamed, it remains an +entity class. When a field that happens to be a primary or +secondary key field is renamed, its metadata remains intact as well.

    + +

    When the SecondaryKey annotation is +added to an existing field, a new index is created automatically. The +new index will be populated by reading the entire primary index when the +primary index is opened.

    + +

    When the SecondaryKey annotation is +included with a new field, a new index is created automatically. The +new field is required to be a reference type (not a primitive) and must be +initialized to null (the default behavior) in the default constructor. +Entities will be indexed by the field when they are stored with a non-null key +value.

    + +

    When a field with the SecondaryKey +annotation is deleted, or when the SecondaryKey annotation is removed from a field +without deleting it, the secondary index is removed (dropped). Removal occurs +when the store is opened.

    + +

    The SecondaryKey.relate property may NOT be changed. All other properties of a +SecondaryKey may be changed, although +avoiding changes that cause foreign key integrity errors is the responsibility +of the application developer. For example, if the SecondaryKey.relatedEntity() property is added but +not all existing secondary keys reference existing primary keys for the related +entity, foreign key integrity errors may occur.

    + +

    The PrimaryKey annotation may NOT be +removed from a field in an entity class.

    + +

    The PrimaryKey.sequence() property may be +added, removed, or changed to a different name.

    + +

    The Persistent.proxyFor() property may NOT +be added, removed, or changed to a different class.

    + +

    Warnings on Testing and Backups

    + +

    The application developer is responsible for verifying that class evolution +works properly before deploying with a changed set of persistent classes. The +DPL will report errors when old class definitions cannot be evolved, for +example, when a mutation is missing. To test that no such errors will occur, +application test cases must include instances of all persistent classes.

    + +

    Converter mutations require special testing. Since the application +conversion method is allowed to return instances of any type, the DPL cannot +check that the proper type is returned until the data is accessed. To avoid +data access errors, application test cases must cover converter mutations for +all potential input and output types.

    + +

    When secondary keys are dropped or entity classes are deleted, the +underlying databases are deleted and cannot be recovered from the store. This +takes place when the store is opened. It is strongly recommended that a backup +of the entire store is made before opening the store and causing class +evolution to proceed.

    + +

    Store Conversion

    + +

    When mutations are not sufficient for handling class changes, a full store +conversion may be performed. This is necessary for two particular types of +class changes:

    +
      +
    • A change to a physical key format, for example, a change from type +int to type long.
    • +
    • A conversion that involves multiple entities at once, for example, +combining two separate entity classes into a new single entity class.
    • +
    + +

    To perform a full store conversion, a program is written that performs the +following steps to copy the data from the old store to a new converted +store:

    +
      +
    1. The old store is opened as a RawStore and +the new store is opened as an EntityStore.
    2. +
    3. All entities are read from the old store. Entities are read using a RawStore to allow access to entities for which no +compatible class exists.
    4. +
    5. The RawObject entities are then converted +to the format desired. Raw objects can be arbitrarily manipulated as needed. +The updated raw objects must conform to the new evolved class definitions.
    6. +
    7. The updated raw entities are converted to live objects by calling the +EntityModel.convertRawObject method of the new store. This method converts +raw objects obtained from a different store, as long as they conform to the new +evolved class definitions.
    8. +
    9. The new live objects are written to the new EntityStore using a PrimaryIndex as usual.
    10. +
    + +

    To perform such a conversion, two separate stores must be open at once. +Both stores may be in the same Environment, if +desired, by giving them different store names. But since all data is being +rewritten, there are performance advantages to creating the new store in a new +fresh environment: the data will be compacted as it is written, and the old +store can be removed very quickly by deleting the old environment directory +after the conversion is complete.

    + + + +

    Upgrading a Replication Group

    + +

    When changes to persistent classes are made in a ReplicatedEnvironment, special handling is necessary when +the application is upgraded on the nodes in the replication group. Upgraded +means that the application on a node is stopped, the updated application +classes are installed, and the application is started again.

    + +

    As usual in any sort of replication group upgrade, the Replica nodes must be +upgraded first and the Master node must be upgraded last. If an upgraded node +is elected Master before all of the Replica nodes have been upgraded, either +because of a user error or an unexpected failover, the IncompatibleClassException will be thrown.

    + +

    There are two considerations that must be taken into account during the +upgrade process: new indexes that are temporarily unavailable on a Replica, +and exceptions that result from renamed entity classes and secondary keys.

    + +

    Note that these considerations only apply when a hot upgrade is performed, +i.e., when the replication group will contain a mix of upgraded and +non-upgraded nodes. If all nodes in the group are first taken down and then +the nodes are upgraded and restarted, then no special considerations are +necessary and this documentation is not applicable.

    + +

    Defining New Indexes in a Replication Group

    + +

    When a new entity class is added, which defines a new PrimaryIndex, or a new secondary key is added, which defines a new SecondaryIndex, the indexes will not be immediately available on an upgraded +node. A new index will not be fully available (i.e., on every node) until all +the nodes have been upgraded, the index has been created (and populated, in the +case of a secondary index) on the Master node, and the index has been +replicated to each Replica node via the replication stream.

    + +

    When a node is first upgraded it will start out as a Replica node, and any +newly defined indexes will not be available. The application has two choices +for handling this condition.

    +
      +
    1. An application may be able to coordinate among its nodes, by its own means, +to inform all nodes when an index has been created and populated on the Master. +Such an application can choose to access a new index only after it knows the +index is available. Such coordination is not directly supported by JE, +although a transaction with a CommitToken may be used +to simplify the coordination process.
    2. + +
    3. An application may call getPrimaryIndex or getSecondaryIndex to +determine whether an index is available. An IndexNotAvailableException is thrown by these methods +when the index has not yet been created or when a secondary index is currently +being populated via the replication stream.
    4. +
    + +

    When an upgraded node is elected Master (this is typically near the end of +the the upgrade process), it must call getPrimaryIndex to create +each new primary index, and getSecondaryIndex to +create and populate each new secondary index. A newly elected Master node that +was just upgraded should be prepared for a delay when getSecondaryIndex is +called to create and populate a new secondary index.

    + +

    Renaming Entity Classes and Keys in a Replication Group

    + +

    When a DPL entity class or secondary key field is renamed by an application +using a Renamer mutation, this will result +internally in the underlying database for that entity class or secondary key +being renamed. The actual renaming of the database first occurs on the +upgraded Master node and is then replicated to each Replica node.

    + +

    When the application on a Master or Replica node first accesses the store +after the database has been renamed, a DatabasePreemptedException will be thrown. When this +happens, the application must close any cursors and transactions that are open +for that store, and then close the store and reopen it.

    + +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/package-tree.html b/docs/java/com/sleepycat/persist/evolve/package-tree.html new file mode 100644 index 0000000..ad92793 --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/package-tree.html @@ -0,0 +1,196 @@ + + + + + +com.sleepycat.persist.evolve Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.persist.evolve

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/evolve/package-use.html b/docs/java/com/sleepycat/persist/evolve/package-use.html new file mode 100644 index 0000000..1cfc4cd --- /dev/null +++ b/docs/java/com/sleepycat/persist/evolve/package-use.html @@ -0,0 +1,273 @@ + + + + + +Uses of Package com.sleepycat.persist.evolve (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.persist.evolve

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/AnnotationModel.html b/docs/java/com/sleepycat/persist/model/AnnotationModel.html new file mode 100644 index 0000000..af6defa --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/AnnotationModel.html @@ -0,0 +1,400 @@ + + + + + +AnnotationModel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class AnnotationModel

    +
    +
    + +
    +
      +
    • +
      +
      +
      public class AnnotationModel
      +extends EntityModel
      +
      The default annotation-based entity model. An AnnotationModel + is based on annotations that are specified for entity classes and their key + fields. + +

      AnnotationModel objects are thread-safe. Multiple threads may + safely call the methods of a shared AnnotationModel object.

      + +

      The set of persistent classes in the annotation model is the set of all + classes with the Persistent or Entity annotation.

      + +

      The annotations used to define persistent classes are: Entity, + Persistent, PrimaryKey, SecondaryKey and KeyField. A good starting point is Entity.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          AnnotationModel

          +
          public AnnotationModel()
          +
          Constructs a model for annotated entity classes.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getKnownClasses

          +
          public java.util.Set<java.lang.String> getKnownClasses()
          +
          Description copied from class: EntityModel
          +
          Returns the names of all known persistent classes. A type becomes known + when an instance of the type is stored for the first time or metadata or + type information is queried for a specific class name.
          +
          +
          Specified by:
          +
          getKnownClasses in class EntityModel
          +
          Returns:
          +
          an unmodifiable set of class names.
          +
          +
        • +
        + + + +
          +
        • +

          getKnownSpecialClasses

          +
          public java.util.Set<java.lang.String> getKnownSpecialClasses()
          +
          Description copied from class: EntityModel
          +
          Returns the names of all known persistent enum and array classes that + may be used to store persistent data. This differs from + EntityModel.getKnownClasses(), which does not return enum and array classes + because they have no metadata.
          +
          +
          Overrides:
          +
          getKnownSpecialClasses in class EntityModel
          +
          Returns:
          +
          an unmodifiable set of enum and array class names.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityMetadata

          +
          public EntityMetadata getEntityMetadata(java.lang.String className)
          +
          Description copied from class: EntityModel
          +
          Returns the metadata for a given entity class name.
          +
          +
          Specified by:
          +
          getEntityMetadata in class EntityModel
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the metadata or null if the class is not an entity class or does + not exist.
          +
          +
        • +
        + + + +
          +
        • +

          getClassMetadata

          +
          public ClassMetadata getClassMetadata(java.lang.String className)
          +
          Description copied from class: EntityModel
          +
          Returns the metadata for a given persistent class name, including proxy + classes and entity classes.
          +
          +
          Specified by:
          +
          getClassMetadata in class EntityModel
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the metadata or null if the class is not persistent or does not + exist.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/ClassEnhancer.html b/docs/java/com/sleepycat/persist/model/ClassEnhancer.html new file mode 100644 index 0000000..f02dca8 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/ClassEnhancer.html @@ -0,0 +1,487 @@ + + + + + +ClassEnhancer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class ClassEnhancer

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.model.ClassEnhancer
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.instrument.ClassFileTransformer
      +
      +
      +
      +
      public class ClassEnhancer
      +extends java.lang.Object
      +implements java.lang.instrument.ClassFileTransformer
      +
      Enhances the bytecode of persistent classes to provide efficient access to + fields and constructors, and to avoid special security policy settings for + accessing non-public members. Classes are enhanced if they are annotated + with Entity or Persistent. + +

      ClassEnhancer objects are thread-safe. Multiple threads may + safely call the methods of a shared ClassEnhancer object.

      + +

      As described in the package summary, bytecode + enhancement may be used either at runtime or offline (at build time).

      + +

      To use enhancement offline, this class may be used as a main + program. + + It may also be used via an ant task. + +

      + +

      For enhancement at runtime, this class provides the low level support + needed to transform class bytes during class loading. To configure runtime + enhancement you may use one of the following approaches:

      +
        +
      1. The BDB je-<version>.jar or db.jar file may be used as + an instrumentation agent as follows: +
        java -javaagent:<BDB-JAR-FILE>=enhance:packageNames ...
        + packageNames is a comma separated list of packages containing + persistent classes. Sub-packages of these packages are also searched. If + packageNames is omitted then all packages known to the current + classloader are searched. +

        The "-v" option may be included in the comma separated list to print the + name of each class that is enhanced.

      2. +
      3. The enhance(java.lang.String, byte[]) method may be called to implement a class loader + that performs enhancement. Using this approach, it is the developer's + responsibility to implement and configure the class loader.
      4. +
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        ClassEnhancer() +
        Creates a class enhancer that searches all packages.
        +
        ClassEnhancer(java.util.Set<java.lang.String> packageNames) +
        Creates a class enhancer that searches a given set of packages.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        byte[]enhance(java.lang.String className, + byte[] classBytes) +
        Enhances the given class bytes if the class is annotated with Entity or Persistent.
        +
        booleangetVerbose() +
        Gets verbose mode.
        +
        static voidmain(java.lang.String[] args) +
        Enhances classes in the directories specified.
        +
        static voidpremain(java.lang.String args, + java.lang.instrument.Instrumentation inst) +
        Enhances classes as specified by a JVM -javaagent argument.
        +
        voidsetVerbose(boolean verbose) +
        Sets verbose mode.
        +
        byte[]transform(java.lang.ClassLoader loader, + java.lang.String className, + java.lang.Class<?> classBeingRedefined, + java.security.ProtectionDomain protectionDomain, + byte[] classfileBuffer) 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ClassEnhancer

          +
          public ClassEnhancer()
          +
          Creates a class enhancer that searches all packages.
          +
        • +
        + + + +
          +
        • +

          ClassEnhancer

          +
          public ClassEnhancer(java.util.Set<java.lang.String> packageNames)
          +
          Creates a class enhancer that searches a given set of packages.
          +
          +
          Parameters:
          +
          packageNames - a set of packages to search for persistent + classes. Sub-packages of these packages are also searched. If empty or + null, all packages known to the current classloader are searched.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          main

          +
          public static void main(java.lang.String[] args)
          +                 throws java.lang.Exception
          +
          Enhances classes in the directories specified. The class files are + replaced when they are enhanced, without changing the file modification + date. For example: + +
          java -cp je-<version>.jar com.sleepycat.persist.model.ClassEnhancer ./classes
          + +

          The "-v" argument may be specified to print the name of each class + file that is enhanced. The total number of class files enhanced will + always be printed.

          +
          +
          Parameters:
          +
          args - one or more directories containing classes to be enhanced. + Subdirectories of these directories will also be searched. Optionally, + -v may be included to print the name of every class file enhanced.
          +
          Throws:
          +
          java.lang.Exception - if a problem occurs.
          +
          +
        • +
        + + + +
          +
        • +

          premain

          +
          public static void premain(java.lang.String args,
          +                           java.lang.instrument.Instrumentation inst)
          +
          Enhances classes as specified by a JVM -javaagent argument.
          +
          +
          Parameters:
          +
          args - see java.lang.instrument.Instrumentation.
          +
          inst - see java.lang.instrument.Instrumentation.
          +
          See Also:
          +
          Instrumentation
          +
          +
        • +
        + + + +
          +
        • +

          setVerbose

          +
          public void setVerbose(boolean verbose)
          +
          Sets verbose mode. + +

          True may be specified to print the name of each class file that is + enhanced. This property is false by default.

          +
          +
          Parameters:
          +
          verbose - whether to use verbose mode.
          +
          +
        • +
        + + + +
          +
        • +

          getVerbose

          +
          public boolean getVerbose()
          +
          Gets verbose mode.
          +
          +
          Returns:
          +
          whether to use verbose mode.
          +
          See Also:
          +
          setVerbose(boolean)
          +
          +
        • +
        + + + +
          +
        • +

          transform

          +
          public byte[] transform(java.lang.ClassLoader loader,
          +                        java.lang.String className,
          +                        java.lang.Class<?> classBeingRedefined,
          +                        java.security.ProtectionDomain protectionDomain,
          +                        byte[] classfileBuffer)
          +
          +
          Specified by:
          +
          transform in interface java.lang.instrument.ClassFileTransformer
          +
          +
        • +
        + + + +
          +
        • +

          enhance

          +
          public byte[] enhance(java.lang.String className,
          +                      byte[] classBytes)
          +
          Enhances the given class bytes if the class is annotated with Entity or Persistent.
          +
          +
          Parameters:
          +
          className - the class name in binary format; for example, + "my.package.MyClass$Name", or null if no filtering by class name + should be performed.
          +
          classBytes - are the class file bytes to be enhanced.
          +
          Returns:
          +
          the enhanced bytes, or null if no enhancement was performed.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/ClassEnhancerTask.html b/docs/java/com/sleepycat/persist/model/ClassEnhancerTask.html new file mode 100644 index 0000000..e1f8d9f --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/ClassEnhancerTask.html @@ -0,0 +1,404 @@ + + + + + +ClassEnhancerTask (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class ClassEnhancerTask

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • org.apache.tools.ant.ProjectComponent
      • +
      • +
          +
        • org.apache.tools.ant.Task
        • +
        • +
            +
          • com.sleepycat.persist.model.ClassEnhancerTask
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.lang.Cloneable
      +
      +
      +
      +
      public class ClassEnhancerTask
      +extends org.apache.tools.ant.Task
      +
      An ant task for running the ClassEnhancer. + +

      ClassEnhancerTask objects are thread-safe. Multiple threads may + safely call the methods of a shared ClassEnhancerTask object.

      + +

      Note that in the BDB Java Edition product, the ClassEnhancerTask + class is included in je-<version>.jar. However, in the BDB + (C-based) product, it is not included in db.jar because the build is + not dependent on the Ant libraries. Therefore, in the BDB product, the + application must compile the java/src/com/sleepycat/persist/model/ClassEnhancerTask.java source file and + ensure that the compiled class is available to the Ant task. For example + the following Ant task definitions could be used.

      + +

      For BDB Java Edition product:

      +
      + <taskdef name="enhance-persistent-classes"
      +          classname="com.sleepycat.persist.model.ClassEnhancerTask"
      +          classpath="${je.home}/lib/je-<version>.jar"/>
      + +

      For BDB (C-based Edition) product:

      +
      + <taskdef name="enhance-persistent-classes"
      +          classname="com.sleepycat.persist.model.ClassEnhancerTask"
      +          classpath="/path-to-jar/db.jar:/path-to-ClassEnhancerTask-class"/>
      + +

      The class enhancer task element has no attributes. It may contain one or + more nested fileset elements specifying the classes to be enhanced. + The class files are replaced when they are enhanced, without changing the + file modification date. For example:

      + +
      + <target name="main">
      +     <enhance-persistent-classes verbose="no">
      +         <fileset dir="classes"/>
      +     </enhance-persistent-classes>
      + </target>
      + +

      The verbose attribute may be specified as "true", "yes" or "on" (like + other Ant boolean attributes) to print the name of each class file that is + enhanced. The total number of class files enhanced will always be + printed.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        +
          +
        • + + +

          Fields inherited from class org.apache.tools.ant.Task

          +target, taskName, taskType, wrapper
        • +
        +
          +
        • + + +

          Fields inherited from class org.apache.tools.ant.ProjectComponent

          +description, location, project
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ClassEnhancerTask() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidaddConfiguredFileset(org.apache.tools.ant.types.FileSet files) 
        voidexecute() 
        voidsetVerbose(boolean verbose) 
        +
          +
        • + + +

          Methods inherited from class org.apache.tools.ant.Task

          +bindToOwner, getOwningTarget, getRuntimeConfigurableWrapper, getTaskName, getTaskType, getWrapper, handleErrorFlush, handleErrorOutput, handleFlush, handleInput, handleOutput, init, isInvalid, log, log, log, log, maybeConfigure, perform, reconfigure, setOwningTarget, setRuntimeConfigurableWrapper, setTaskName, setTaskType
        • +
        +
          +
        • + + +

          Methods inherited from class org.apache.tools.ant.ProjectComponent

          +clone, getDescription, getLocation, getProject, setDescription, setLocation, setProject
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ClassEnhancerTask

          +
          public ClassEnhancerTask()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          execute

          +
          public void execute()
          +             throws org.apache.tools.ant.BuildException
          +
          +
          Overrides:
          +
          execute in class org.apache.tools.ant.Task
          +
          Throws:
          +
          org.apache.tools.ant.BuildException
          +
          +
        • +
        + + + +
          +
        • +

          addConfiguredFileset

          +
          public void addConfiguredFileset(org.apache.tools.ant.types.FileSet files)
          +
        • +
        + + + +
          +
        • +

          setVerbose

          +
          public void setVerbose(boolean verbose)
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/ClassMetadata.html b/docs/java/com/sleepycat/persist/model/ClassMetadata.html new file mode 100644 index 0000000..6443d07 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/ClassMetadata.html @@ -0,0 +1,569 @@ + + + + + +ClassMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class ClassMetadata

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.model.ClassMetadata
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class ClassMetadata
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      The metadata for a persistent class. A persistent class may be specified + with the Entity or Persistent annotation. + +

      ClassMetadata objects are thread-safe. Multiple threads may + safely call the methods of a shared ClassMetadata object.

      + +

      This and other metadata classes are classes rather than interfaces to + allow adding properties to the model at a future date without causing + incompatibilities. Any such property will be given a default value and + its use will be optional.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        ClassMetadata(java.lang.String className, + int version, + java.lang.String proxiedClassName, + boolean entityClass, + PrimaryKeyMetadata primaryKey, + java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys, + java.util.List<FieldMetadata> compositeKeyFields) +
        Used by an EntityModel to construct persistent class metadata.
        +
        ClassMetadata(java.lang.String className, + int version, + java.lang.String proxiedClassName, + boolean entityClass, + PrimaryKeyMetadata primaryKey, + java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys, + java.util.List<FieldMetadata> compositeKeyFields, + java.util.Collection<FieldMetadata> persistentFields) +
        Used by an EntityModel to construct persistent class metadata.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object other) 
        java.lang.StringgetClassName() +
        Returns the name of the persistent class.
        +
        java.util.List<FieldMetadata>getCompositeKeyFields() +
        Returns an unmodifiable list of metadata for the fields making up a + composite key, or null if this is a not a composite key class.
        +
        java.util.Collection<FieldMetadata>getPersistentFields() +
        Returns an unmodifiable list of metadata for the persistent fields in + this class, or null if the default rules for persistent fields should be + used.
        +
        PrimaryKeyMetadatagetPrimaryKey() +
        Returns the primary key metadata for a key declared in this class, or + null if none is declared.
        +
        java.lang.StringgetProxiedClassName() +
        Returns the class name of the proxied class if this class is a PersistentProxy, or null otherwise.
        +
        java.util.Map<java.lang.String,SecondaryKeyMetadata>getSecondaryKeys() +
        Returns an unmodifiable map of key name (which may be different from + field name) to secondary key metadata for all secondary keys declared in + this class, or null if no secondary keys are declared in this class.
        +
        intgetVersion() +
        Returns the version of this persistent class.
        +
        inthashCode() 
        booleanisEntityClass() +
        Returns whether this class is an entity class.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ClassMetadata

          +
          public ClassMetadata(java.lang.String className,
          +                     int version,
          +                     java.lang.String proxiedClassName,
          +                     boolean entityClass,
          +                     PrimaryKeyMetadata primaryKey,
          +                     java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys,
          +                     java.util.List<FieldMetadata> compositeKeyFields)
          +
          Used by an EntityModel to construct persistent class metadata. + The optional getPersistentFields() property will be set to null.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          version - the version.
          +
          proxiedClassName - the proxied class name.
          +
          entityClass - whether the class is an entity class.
          +
          primaryKey - the primary key metadata.
          +
          secondaryKeys - the secondary key metadata.
          +
          compositeKeyFields - the composite key field metadata.
          +
          +
        • +
        + + + +
          +
        • +

          ClassMetadata

          +
          public ClassMetadata(java.lang.String className,
          +                     int version,
          +                     java.lang.String proxiedClassName,
          +                     boolean entityClass,
          +                     PrimaryKeyMetadata primaryKey,
          +                     java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys,
          +                     java.util.List<FieldMetadata> compositeKeyFields,
          +                     java.util.Collection<FieldMetadata> persistentFields)
          +
          Used by an EntityModel to construct persistent class metadata.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          version - the version.
          +
          proxiedClassName - the proxied class name.
          +
          entityClass - whether the class is an entity class.
          +
          primaryKey - the primary key metadata.
          +
          secondaryKeys - the secondary key metadata.
          +
          compositeKeyFields - the composite key field metadata.
          +
          persistentFields - the persistent field metadata.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getClassName

          +
          public java.lang.String getClassName()
          +
          Returns the name of the persistent class.
          +
          +
          Returns:
          +
          the name of the persistent class.
          +
          +
        • +
        + + + +
          +
        • +

          getVersion

          +
          public int getVersion()
          +
          Returns the version of this persistent class. This may be specified + using the Entity.version() or Persistent.version() + annotation.
          +
          +
          Returns:
          +
          the version of this persistent class.
          +
          +
        • +
        + + + +
          +
        • +

          getProxiedClassName

          +
          public java.lang.String getProxiedClassName()
          +
          Returns the class name of the proxied class if this class is a PersistentProxy, or null otherwise.
          +
          +
          Returns:
          +
          the class name of the proxied class, or null.
          +
          +
        • +
        + + + +
          +
        • +

          isEntityClass

          +
          public boolean isEntityClass()
          +
          Returns whether this class is an entity class.
          +
          +
          Returns:
          +
          whether this class is an entity class.
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryKey

          +
          public PrimaryKeyMetadata getPrimaryKey()
          +
          Returns the primary key metadata for a key declared in this class, or + null if none is declared. This may be specified using the PrimaryKey annotation.
          +
          +
          Returns:
          +
          the primary key metadata, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryKeys

          +
          public java.util.Map<java.lang.String,SecondaryKeyMetadata> getSecondaryKeys()
          +
          Returns an unmodifiable map of key name (which may be different from + field name) to secondary key metadata for all secondary keys declared in + this class, or null if no secondary keys are declared in this class. + This metadata may be specified using SecondaryKey annotations.
          +
          +
          Returns:
          +
          the unmodifiable map, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getCompositeKeyFields

          +
          public java.util.List<FieldMetadata> getCompositeKeyFields()
          +
          Returns an unmodifiable list of metadata for the fields making up a + composite key, or null if this is a not a composite key class. The + order of the fields in the returned list determines their stored order + and may be specified using the KeyField annotation. When the + composite key class does not implement Comparable, the order of + the fields is the relative sort order.
          +
          +
          Returns:
          +
          the unmodifiable list, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getPersistentFields

          +
          public java.util.Collection<FieldMetadata> getPersistentFields()
          +
          Returns an unmodifiable list of metadata for the persistent fields in + this class, or null if the default rules for persistent fields should be + used. All fields returned must be declared in this class and must be + non-static. + +

          By default (if null is returned) the persistent fields of a class + will be all declared instance fields that are non-transient (are not + declared with the transient keyword). The default rules + may be overridden by an EntityModel. For example, the AnnotationModel overrides the default rules when the NotPersistent or NotTransient annotation is specified.

          +
          +
          Returns:
          +
          the unmodifiable list, or null.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/DeleteAction.html b/docs/java/com/sleepycat/persist/model/DeleteAction.html new file mode 100644 index 0000000..d99add2 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/DeleteAction.html @@ -0,0 +1,390 @@ + + + + + +DeleteAction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Enum DeleteAction

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<DeleteAction>
      • +
      • +
          +
        • com.sleepycat.persist.model.DeleteAction
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<DeleteAction>
      +
      +
      +
      +
      public enum DeleteAction
      +extends java.lang.Enum<DeleteAction>
      +
      Specifies the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity. + This can be specified using a SecondaryKey.onRelatedEntityDelete() + annotation.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        ABORT +
        The default action, ABORT, means that an exception is thrown in + order to abort the current transaction.
        +
        CASCADE +
        If CASCADE is specified, then this entity will be deleted also, + which could in turn trigger further deletions, causing a cascading + effect.
        +
        NULLIFY +
        If NULLIFY is specified, then the secondary key in this entity + is set to null and this entity is updated.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static DeleteActionvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static DeleteAction[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          ABORT

          +
          public static final DeleteAction ABORT
          +
          The default action, ABORT, means that an exception is thrown in + order to abort the current transaction. + + On BDB JE, a DeleteConstraintException is + thrown. +
          +
        • +
        + + + +
          +
        • +

          CASCADE

          +
          public static final DeleteAction CASCADE
          +
          If CASCADE is specified, then this entity will be deleted also, + which could in turn trigger further deletions, causing a cascading + effect.
          +
        • +
        + + + +
          +
        • +

          NULLIFY

          +
          public static final DeleteAction NULLIFY
          +
          If NULLIFY is specified, then the secondary key in this entity + is set to null and this entity is updated. For a secondary key field + that has an array or collection type, the array or collection element + will be removed by this action. The secondary key field must have a + reference (not a primitive) type in order to specify this action.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static DeleteAction[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (DeleteAction c : DeleteAction.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static DeleteAction valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/Entity.html b/docs/java/com/sleepycat/persist/model/Entity.html new file mode 100644 index 0000000..7358a4c --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/Entity.html @@ -0,0 +1,447 @@ + + + + + +Entity (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type Entity

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=TYPE)
      +public @interface Entity
      +
      Indicates a persistent entity class. For each entity class, a PrimaryIndex can be used to store and access instances of that class. + Optionally, one or more SecondaryIndex objects may be used to access + entity instances by secondary key. + +

      Entity Subclasses and Superclasses

      + +

      An entity class may have any number of subclasses and superclasses; + however, none of these may themselves be entity classes (annotated with + Entity).

      + +

      Entity superclasses (which must be annotated with Persistent, not + Entity) are used to share common definitions among entity classes. + Fields in an entity superclass may be defined as primary or secondary keys. + For example, the following BaseClass defines the primary key for any + number of entity classes, using a single sequence to assign primary key + values that will be unique across all entity classes that use it. The + entity class Pet extends the base class, implicitly defining a + primary index

      + +
      +  @Persistent
      +  class BaseClass {
      +      @PrimaryKey(sequence="ID")
      +      long id;
      +  }
      +
      +  @Entity
      +  class Pet extends BaseClass {
      +      @SecondaryKey(relate=ONE_TO_ONE)
      +      String name;
      +      float height;
      +      float weight;
      +  }
      + +

      Entity subclasses (which must be annotated with Persistent, not + Entity) are used to provide polymorphism within a single PrimaryIndex. Instances of the entity class and its subclasses are stored + in the same PrimaryIndex. For example, the entity class Pet + defines a primary index that will contain instances of it and its + subclasses, including Cat which is defined below.

      + +

      Fields in an entity subclass may be defined as secondary keys, and such + secondary keys can only be used to query instances of the subclass. For + example, although the primary key (id) and secondary key (name) can be used to retrieve any Pet instance, the entity subclass + Cat defines a secondary key (finickyness) that only applies + to Cat instances. Querying by this key will never retrieve a Dog instance, if such a subclass existed, because a Dog instance + will never contain a finickyness key.

      + +
      +  @Persistent
      +  class Cat extends Pet {
      +      @SecondaryKey(relate=MANY_TO_ONE)
      +      int finickyness;
      +  }
      + +

      WARNING: Entity subclasses that define secondary keys must be + registered prior to storing an instance of the class. This can be done in + two ways:

      +
        +
      1. The registerClass method may be called + to register the subclass before opening the entity store.
      2. +
      3. The getSubclassIndex method may be + called to implicitly register the subclass after opening the entity + store.
      4. +
      + +

      Persistent Fields and Types

      + +

      All non-transient instance fields of an entity class, as well as its + superclasses and subclasses, are persistent. static and transient fields are not persistent. The persistent fields of a class may + be private, package-private (default access), protected or + public.

      + +

      It is worthwhile to note the reasons that object persistence is defined + in terms of fields rather than properties (getters and setters). This + allows business methods (getters and setters) to be defined independently of + the persistent state of an object; for example, a setter method may perform + validation that could not be performed if it were called during object + deserialization. Similarly, this allows public methods to evolve somewhat + independently of the (typically non-public) persistent fields.

      + +

      Simple Types

      + +

      Persistent types are divided into simple types, enum types, complex + types, and array types. Simple types and enum types are single valued, + while array types may contain multiple elements and complex types may + contain one or more named fields.

      + +

      Simple types include:

      +
        +
      • Java primitive types: boolean, char, byte, short, int, long, + float, double
      • +
      • The wrapper classes for Java primitive types
      • +
      • BigDecimal
      • +
      • BigInteger
      • +
      • String
      • +
      • Date
      • +
      + +

      When null values are required (for optional key fields, for example), + primitive wrapper classes must be used instead of primitive types.

      + +

      Simple types, enum types and array types do not require annotations to + make them persistent.

      + +

      Complex and Proxy Types

      + +

      Complex persistent classes must be annotated with Entity or + Persistent, or must be proxied by a persistent proxy class + (described below). This includes entity classes, subclasses and + superclasses, and all other complex classes referenced via fields of these + classes.

      + +

      All complex persistent classes must have a default constructor. The + default constructor may be private, package-private (default + access), protected, or public. Other constructors are + allowed but are not used by the persistence mechanism.

      + +

      It is sometimes desirable to store instances of a type that is externally + defined and cannot be annotated or does not have a default constructor; for + example, a class defined in the Java standard libraries or a 3rd party + library. In this case, a PersistentProxy class may be used to + represent the stored values for the externally defined type. The proxy + class itself must be annotated with Persistent like other persistent + classes, and the Persistent.proxyFor() property must be specified.

      + +

      For convenience, built-in proxy classes are included for several common + classes (listed below) in the Java library. If you wish, you may define + your own PersistentProxy to override these built-in proxies.

      +
        +
      • HashSet
      • +
      • TreeSet
      • +
      • HashMap
      • +
      • TreeMap
      • +
      • ArrayList
      • +
      • LinkedList
      • +
      + +

      Complex persistent types should in general be application-defined + classes. This gives the application control over the persistent state and + its evolution over time.

      + +

      Other Type Restrictions

      + +

      Entity classes and subclasses may not be used in field declarations for + persistent types. Fields of entity classes and subclasses must be simple + types or non-entity persistent types (annotated with Persistent not + with Entity).

      + +

      Entity classes, subclasses and superclasses may be abstract and + may implement arbitrary interfaces. Interfaces do not need to be annotated + with Persistent in order to be used in a persistent class, since + interfaces do not contain instance fields.

      + +

      Persistent instances of static nested classes are allowed, but the nested + class must be annotated with Persistent or Entity. Inner + classes (non-static nested classes, including anonymous classes) are not + currently allowed as persistent types.

      + +

      Arrays of simple and persistent complex types are allowed as fields of + persistent types. Arrays may be multidimensional. However, an array may + not be stored as a top level instance in a primary index. Only instances of + entity classes and subclasses may be top level instances in a primary + index.

      + +

      Embedded Objects

      + +

      As stated above, the embedded (or member) non-transient non-static fields + of an entity class are themselves persistent and are stored along with their + parent entity object. This allows embedded objects to be stored in an + entity to an arbitrary depth.

      + +

      There is no arbitrary limit to the nesting depth of embedded objects + within an entity; however, there is a practical limit. When an entity is + marshalled, each level of nesting is implemented internally via recursive + method calls. If the nesting depth is large enough, a StackOverflowError can occur. In practice, this has been observed with a + nesting depth of 12,000, using the default Java stack size.

      + +

      This restriction on the nesting depth of embedded objects does not apply + to cyclic references, since these are handled specially as described + below.

      + +

      Object Graphs

      + +

      When an entity instance is stored, the graph of objects referenced via + its fields is stored and retrieved as a graph. In other words, if a single + instance is referenced by two or more fields when the entity is stored, the + same will be true when the entity is retrieved.

      + +

      When a reference to a particular object is stored as a member field + inside that object or one of its embedded objects, this is called a cyclic + reference. Because multiple references to a single object are stored as + such, cycles are also represented correctly and do not cause infinite + recursion or infinite processing loops. If an entity containing a cyclic + reference is stored, the cyclic reference will be present when the entity is + retrieved.

      + +

      Note that the stored object graph is restricted in scope to a single + entity instance. This is because each entity instance is stored separately. + If two entities have a reference to the same object when stored, they will + refer to two separate instances when the entities are retrieved.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Persistent, +PrimaryKey, +SecondaryKey, +KeyField
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Optional Element Summary

        + + + + + + + + + + +
        Optional Elements 
        Modifier and TypeOptional Element and Description
        intversion +
        Identifies a new version of a class when an incompatible class change + has been made.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Element Detail

        + + + +
          +
        • +

          version

          +
          public abstract int version
          +
          Identifies a new version of a class when an incompatible class change + has been made. Prior versions of a class are referred to by version + number to perform class evolution and conversion using Mutations. + +

          The first version of a class is version zero, if version() is + not specified. When an incompatible class change is made, a version + number must be assigned using version() that is higher than the + previous version number for the class. If this is not done, an IncompatibleClassException will be thrown when the store is opened.

          +
          +
          Returns:
          +
          the version.
          +
          +
          +
          Default:
          +
          0
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/EntityMetadata.html b/docs/java/com/sleepycat/persist/model/EntityMetadata.html new file mode 100644 index 0000000..8bd81fe --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/EntityMetadata.html @@ -0,0 +1,393 @@ + + + + + +EntityMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class EntityMetadata

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.model.EntityMetadata
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class EntityMetadata
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      The metadata for a persistent entity class. An entity class may be + specified with the Entity annotation. + +

      EntityMetadata objects are thread-safe. Multiple threads may + safely call the methods of a shared EntityMetadata object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        EntityMetadata(java.lang.String className, + PrimaryKeyMetadata primaryKey, + java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys) +
        Used by an EntityModel to construct entity metadata.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object other) 
        java.lang.StringgetClassName() +
        Returns the name of the entity class.
        +
        PrimaryKeyMetadatagetPrimaryKey() +
        Returns the primary key metadata for this entity.
        +
        java.util.Map<java.lang.String,SecondaryKeyMetadata>getSecondaryKeys() +
        Returns an unmodifiable map of key name to secondary key metadata, or + an empty map if no secondary keys are defined for this entity.
        +
        inthashCode() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EntityMetadata

          +
          public EntityMetadata(java.lang.String className,
          +                      PrimaryKeyMetadata primaryKey,
          +                      java.util.Map<java.lang.String,SecondaryKeyMetadata> secondaryKeys)
          +
          Used by an EntityModel to construct entity metadata.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          primaryKey - the primary key metadata.
          +
          secondaryKeys - the secondary key metadata.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getClassName

          +
          public java.lang.String getClassName()
          +
          Returns the name of the entity class.
          +
          +
          Returns:
          +
          the name of the entity class.
          +
          +
        • +
        + + + +
          +
        • +

          getPrimaryKey

          +
          public PrimaryKeyMetadata getPrimaryKey()
          +
          Returns the primary key metadata for this entity. Note that the primary + key field may be declared in this class or in a subclass. This metadata + may be specified using the PrimaryKey annotation.
          +
          +
          Returns:
          +
          the primary key metadata.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryKeys

          +
          public java.util.Map<java.lang.String,SecondaryKeyMetadata> getSecondaryKeys()
          +
          Returns an unmodifiable map of key name to secondary key metadata, or + an empty map if no secondary keys are defined for this entity. The + returned map contains a mapping for each secondary key of this entity, + including secondary keys declared in subclasses and superclasses. This + metadata may be specified using SecondaryKey annotations.
          +
          +
          Returns:
          +
          the secondary key metadata.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/EntityModel.html b/docs/java/com/sleepycat/persist/model/EntityModel.html new file mode 100644 index 0000000..ab2a65c --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/EntityModel.html @@ -0,0 +1,681 @@ + + + + + +EntityModel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class EntityModel

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.model.EntityModel
      • +
      +
    • +
    +
    +
      +
    • +
      +
      Direct Known Subclasses:
      +
      AnnotationModel
      +
      +
      +
      +
      public abstract class EntityModel
      +extends java.lang.Object
      +
      The base class for classes that provide entity model metadata. An EntityModel defines entity classes, primary keys, secondary keys, and + relationships between entities. For each entity class that is part of the + model, a single PrimaryIndex object and zero or more SecondaryIndex objects may be accessed via an EntityStore. + +

      The built-in entity model, the AnnotationModel, is based on + annotations that are added to entity classes and their key fields. + Annotations are used in the examples in this package, and it is expected + that annotations will normally be used; most readers should therefore skip + to the AnnotationModel class. However, a custom entity model class + may define its own metadata. This can be used to define entity classes and + keys using mechanisms other than annotations.

      + +

      A concrete entity model class should extend this class and implement the + getClassMetadata(java.lang.String), getEntityMetadata(java.lang.String) and getKnownClasses() methods.

      + +

      This is an abstract class rather than an interface to allow adding + capabilities to the model at a future date without causing + incompatibilities. For example, a method may be added in the future for + returning new information about the model and subclasses may override this + method to return the new information. Any new methods will have default + implementations that return default values, and the use of the new + information will be optional.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + +
        Constructors 
        ModifierConstructor and Description
        protected EntityModel() +
        The default constructor for use by subclasses.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Abstract Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        static java.lang.ClassclassForName(java.lang.String className) +
        Deprecated.  +
        use resolveClass(java.lang.String) instead. This method does not + use the environment's ClassLoader property.
        +
        +
        java.lang.ObjectconvertRawObject(RawObject raw) +
        Converts a given raw object to a live object according to the current + class definitions.
        +
        java.util.List<RawType>getAllRawTypes() +
        Returns all versions of all known types.
        +
        java.util.List<RawType>getAllRawTypeVersions(java.lang.String className) +
        Returns all known versions of type information for a given class name, + or null if no persistent version of the class is known.
        +
        abstract ClassMetadatagetClassMetadata(java.lang.String className) +
        Returns the metadata for a given persistent class name, including proxy + classes and entity classes.
        +
        abstract EntityMetadatagetEntityMetadata(java.lang.String className) +
        Returns the metadata for a given entity class name.
        +
        abstract java.util.Set<java.lang.String>getKnownClasses() +
        Returns the names of all known persistent classes.
        +
        java.util.Set<java.lang.String>getKnownSpecialClasses() +
        Returns the names of all known persistent enum and array classes that + may be used to store persistent data.
        +
        RawTypegetRawType(java.lang.String className) +
        Returns the type information for the current version of a given class, + or null if the class is not currently persistent.
        +
        RawTypegetRawTypeVersion(java.lang.String className, + int version) +
        Returns the type information for a given version of a given class, + or null if the given version of the class is unknown.
        +
        booleanisOpen() +
        Returns whether the model is associated with an open store.
        +
        voidregisterClass(java.lang.Class persistentClass) +
        Registers a persistent class, most importantly, a PersistentProxy class or entity subclass.
        +
        java.lang.ClassresolveClass(java.lang.String className) +
        Should be called by entity model implementations instead of calling + Class.forName whenever loading an application class.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          EntityModel

          +
          protected EntityModel()
          +
          The default constructor for use by subclasses.
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + + + + + +
          +
        • +

          registerClass

          +
          public final void registerClass(java.lang.Class persistentClass)
          +
          Registers a persistent class, most importantly, a PersistentProxy class or entity subclass. Also registers an enum or + array class. + +

          Any persistent class , enum class or array may be registered in + advance of using it, to avoid the overhead of updating the catalog + database when an instance of the class is first stored. This method + must be called in three cases:

          +
            +
          1. to register all PersistentProxy classes, and
          2. +
          3. to register an entity subclass defining a secondary key, if getSubclassIndex is not called for the + subclass, and
          4. +
          5. to register all new enum or array classes, if the these enum or + array classes are unknown for DPL but will be used in a Converter + mutation. +
          6. +
          + +

          For example:

          + +
          + EntityModel model = new AnnotationModel();
          + model.registerClass(MyProxy.class);
          + model.registerClass(MyEntitySubclass.class);
          + model.registerClass(MyEnum.class);
          + model.registerClass(MyArray[].class);
          +
          + StoreConfig config = new StoreConfig();
          + ...
          + config.setModel(model);
          +
          + EntityStore store = new EntityStore(..., config);
          + +

          This method must be called before opening a store based on this + model.

          +
          +
          Parameters:
          +
          persistentClass - the class to register.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is associated with an open store.
          +
          java.lang.IllegalArgumentException - if the given class is not persistent + or has a different class loader than previously registered classes.
          +
          +
        • +
        + + + +
          +
        • +

          getClassMetadata

          +
          public abstract ClassMetadata getClassMetadata(java.lang.String className)
          +
          Returns the metadata for a given persistent class name, including proxy + classes and entity classes.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the metadata or null if the class is not persistent or does not + exist.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityMetadata

          +
          public abstract EntityMetadata getEntityMetadata(java.lang.String className)
          +
          Returns the metadata for a given entity class name.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the metadata or null if the class is not an entity class or does + not exist.
          +
          +
        • +
        + + + +
          +
        • +

          getKnownClasses

          +
          public abstract java.util.Set<java.lang.String> getKnownClasses()
          +
          Returns the names of all known persistent classes. A type becomes known + when an instance of the type is stored for the first time or metadata or + type information is queried for a specific class name.
          +
          +
          Returns:
          +
          an unmodifiable set of class names.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          getKnownSpecialClasses

          +
          public java.util.Set<java.lang.String> getKnownSpecialClasses()
          +
          Returns the names of all known persistent enum and array classes that + may be used to store persistent data. This differs from + getKnownClasses(), which does not return enum and array classes + because they have no metadata.
          +
          +
          Returns:
          +
          an unmodifiable set of enum and array class names.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          getRawType

          +
          public final RawType getRawType(java.lang.String className)
          +
          Returns the type information for the current version of a given class, + or null if the class is not currently persistent.
          +
          +
          Parameters:
          +
          className - the name of the current version of the class.
          +
          Returns:
          +
          the RawType.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          getRawTypeVersion

          +
          public final RawType getRawTypeVersion(java.lang.String className,
          +                                       int version)
          +
          Returns the type information for a given version of a given class, + or null if the given version of the class is unknown.
          +
          +
          Parameters:
          +
          className - the name of the latest version of the class.
          +
          version - the desired version of the class.
          +
          Returns:
          +
          the RawType.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          getAllRawTypeVersions

          +
          public final java.util.List<RawType> getAllRawTypeVersions(java.lang.String className)
          +
          Returns all known versions of type information for a given class name, + or null if no persistent version of the class is known.
          +
          +
          Parameters:
          +
          className - the name of the latest version of the class.
          +
          Returns:
          +
          an unmodifiable list of types for the given class name in order + from most recent to least recent.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          getAllRawTypes

          +
          public final java.util.List<RawType> getAllRawTypes()
          +
          Returns all versions of all known types.
          +
          +
          Returns:
          +
          an unmodifiable list of types.
          +
          Throws:
          +
          java.lang.IllegalStateException - if this method is called for a model that + is not associated with an open store.
          +
          +
        • +
        + + + +
          +
        • +

          convertRawObject

          +
          public final java.lang.Object convertRawObject(RawObject raw)
          +
          Converts a given raw object to a live object according to the current + class definitions. + +

          The given raw object must conform to the current class definitions. + However, the raw type (RawObject.getType()) is allowed to be from + a different store, as long as the class names and the value types match. + This allows converting raw objects that are read from one store to live + objects in another store, for example, in a conversion program.

          +
          +
          Parameters:
          +
          raw - the RawObject.
          +
          Returns:
          +
          the live object.
          +
          +
        • +
        + + + +
          +
        • +

          resolveClass

          +
          public java.lang.Class resolveClass(java.lang.String className)
          +                             throws java.lang.ClassNotFoundException
          +
          Should be called by entity model implementations instead of calling + Class.forName whenever loading an application class. This method honors + the BDB JE environment's ClassLoader property and uses ClassResolver to implement the class loading policy.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the Class.
          +
          Throws:
          +
          java.lang.ClassNotFoundException - if the class is not found.
          +
          +
        • +
        + + + +
          +
        • +

          classForName

          +
          public static java.lang.Class classForName(java.lang.String className)
          +                                    throws java.lang.ClassNotFoundException
          +
          Deprecated. use resolveClass(java.lang.String) instead. This method does not + use the environment's ClassLoader property.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          Returns:
          +
          the Class.
          +
          Throws:
          +
          java.lang.ClassNotFoundException - if the class is not found.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/FieldMetadata.html b/docs/java/com/sleepycat/persist/model/FieldMetadata.html new file mode 100644 index 0000000..27063cb --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/FieldMetadata.html @@ -0,0 +1,408 @@ + + + + + +FieldMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class FieldMetadata

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.model.FieldMetadata
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      Direct Known Subclasses:
      +
      PrimaryKeyMetadata, SecondaryKeyMetadata
      +
      +
      +
      +
      public class FieldMetadata
      +extends java.lang.Object
      +implements java.io.Serializable
      +
      The metadata for a key field. This class defines common properties for + singular and composite key fields. + +

      FieldMetadata objects are thread-safe. Multiple threads may + safely call the methods of a shared FieldMetadata object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        FieldMetadata(java.lang.String name, + java.lang.String className, + java.lang.String declaringClassName) +
        Used by an EntityModel to construct field metadata.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object other) 
        java.lang.StringgetClassName() +
        Returns the class name of the field type.
        +
        java.lang.StringgetDeclaringClassName() +
        Returns the name of the class where the field is declared.
        +
        java.lang.StringgetName() +
        Returns the field name.
        +
        inthashCode() 
        java.lang.StringtoString() 
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          FieldMetadata

          +
          public FieldMetadata(java.lang.String name,
          +                     java.lang.String className,
          +                     java.lang.String declaringClassName)
          +
          Used by an EntityModel to construct field metadata.
          +
          +
          Parameters:
          +
          name - the field name.
          +
          className - the class name.
          +
          declaringClassName - the name of the class where the field is + declared.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          public java.lang.String getName()
          +
          Returns the field name.
          +
          +
          Returns:
          +
          the field name.
          +
          +
        • +
        + + + +
          +
        • +

          getClassName

          +
          public java.lang.String getClassName()
          +
          Returns the class name of the field type.
          +
          +
          Returns:
          +
          the class name.
          +
          +
        • +
        + + + +
          +
        • +

          getDeclaringClassName

          +
          public java.lang.String getDeclaringClassName()
          +
          Returns the name of the class where the field is declared.
          +
          +
          Returns:
          +
          the name of the class where the field is declared.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/KeyField.html b/docs/java/com/sleepycat/persist/model/KeyField.html new file mode 100644 index 0000000..0b23a92 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/KeyField.html @@ -0,0 +1,323 @@ + + + + + +KeyField (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type KeyField

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=FIELD)
      +public @interface KeyField
      +
      Indicates the sorting position of a key field in a composite key class when + the Comparable interface is not implemented. The KeyField + integer value specifies the sort order of this field within the set of + fields in the composite key. + +

      If the field type of a PrimaryKey or SecondaryKey is a + composite key class containing more than one key field, then a KeyField annotation must be present on each non-transient instance field of + the composite key class. The KeyField value must be a number + between one and the number of non-transient instance fields declared in the + composite key class.

      + +

      Note that a composite key class is a flat container for one or more + simple type fields. All non-transient instance fields in the composite key + class are key fields, and its superclass must be Object.

      + +

      For example:

      +
      +  @Entity
      +  class Animal {
      +      @PrimaryKey
      +      Classification classification;
      +      ...
      +  }
      +
      +  @Persistent
      +  class Classification {
      +      @KeyField(1) String kingdom;
      +      @KeyField(2) String phylum;
      +      @KeyField(3) String clazz;
      +      @KeyField(4) String order;
      +      @KeyField(5) String family;
      +      @KeyField(6) String genus;
      +      @KeyField(7) String species;
      +      @KeyField(8) String subspecies;
      +      ...
      +  }
      + +

      This causes entities to be sorted first by kingdom, then by + phylum within kingdom, and so on.

      + +

      The fields in a composite key class may not be null.

      + +

      Custom Sort Order

      + +

      To override the default sort order, a composite key class may implement + the Comparable interface. This allows overriding the sort order and + is therefore useful even when there is only one key field in the composite + key class. For example, the following class sorts Strings using a Canadian + collator:

      + +
      +  import java.text.Collator;
      +  import java.util.Locale;
      +
      +  @Entity
      +  class Animal {
      +      ...
      +      @SecondaryKey(relate=ONE_TO_ONE)
      +      CollatedString canadianName;
      +      ...
      +  }
      +
      +  @Persistent
      +  class CollatedString implements Comparable<CollatedString> {
      +
      +      static Collator collator = Collator.getInstance(Locale.CANADA);
      +
      +      @KeyField(1)
      +      String value;
      +
      +      CollatedString(String value) { this.value = value; }
      +
      +      private CollatedString() {}
      +
      +      public int compareTo(CollatedString o) {
      +          return collator.compare(value, o.value);
      +      }
      +  }
      + +

      Several important rules should be considered when implementing a custom + comparison method. Failure to follow these rules may result in the primary + or secondary index becoming unusable; in other words, the store will not be + able to function.

      +
        +
      1. The comparison method must always return the same result, given the same + inputs. The behavior of the comparison method must not change over + time.
      2. +
      3. A corollary to the first rule is that the behavior of the comparison + method must not be dependent on state which may change over time. For + example, if the above collation method used the default Java locale, and the + default locale is changed, then the sort order will change.
      4. +
      5. The comparison method must not assume that it is called after the store + has been opened. With Berkeley DB Java Edition, the comparison method is + called during database recovery, which occurs in the Environment + constructor.
      6. +
      7. The comparison method must not assume that it will only be called with + keys that are currently present in the database. The comparison method will + occasionally be called with deleted keys or with keys for records that were + not part of a committed transaction.
      8. +
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Required Element Summary

        + + + + + + + + + + +
        Required Elements 
        Modifier and TypeRequired Element and Description
        intvalue 
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Element Detail

        + + + +
          +
        • +

          value

          +
          public abstract int value
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/NotPersistent.html b/docs/java/com/sleepycat/persist/model/NotPersistent.html new file mode 100644 index 0000000..da3449c --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/NotPersistent.html @@ -0,0 +1,194 @@ + + + + + +NotPersistent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type NotPersistent

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=FIELD)
      +public @interface NotPersistent
      +
      Overrides the default rules for field persistence and defines a field as + being non-persistent even when it is not declared with the + transient keyword. + +

      By default, the persistent fields of a class are all declared instance + fields that are non-transient (are not declared with the + transient keyword). The default rules may be overridden by + specifying the NotPersistent or NotTransient annotation.

      + +

      For example, the following field is non-transient (persistent) with + respect to Java serialization but is transient with respect to the DPL.

      + +
      +      @NotPersistent
      +      int myField;
      + }
      + 
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      NotTransient
      +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/NotTransient.html b/docs/java/com/sleepycat/persist/model/NotTransient.html new file mode 100644 index 0000000..f1d0b02 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/NotTransient.html @@ -0,0 +1,194 @@ + + + + + +NotTransient (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type NotTransient

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=FIELD)
      +public @interface NotTransient
      +
      Overrides the default rules for field persistence and defines a field as + being persistent even when it is declared with the transient + keyword. + +

      By default, the persistent fields of a class are all declared instance + fields that are non-transient (are not declared with the + transient keyword). The default rules may be overridden by + specifying the NotPersistent or NotTransient annotation.

      + +

      For example, the following field is transient with respect to Java + serialization but is persistent with respect to the DPL.

      + +
      +      @NotTransient
      +      transient int myField;
      + }
      + 
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      NotPersistent
      +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/Persistent.html b/docs/java/com/sleepycat/persist/model/Persistent.html new file mode 100644 index 0000000..6af077a --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/Persistent.html @@ -0,0 +1,271 @@ + + + + + +Persistent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type Persistent

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=TYPE)
      +public @interface Persistent
      +
      Identifies a persistent class that is not an Entity class or a + simple type.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Optional Element Summary

        + + + + + + + + + + + + + + +
        Optional Elements 
        Modifier and TypeOptional Element and Description
        java.lang.ClassproxyFor +
        Specifies the class that is proxied by this PersistentProxy + instance.
        +
        intversion +
        Identifies a new version of a class when an incompatible class change + has been made.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Element Detail

        + + + +
          +
        • +

          version

          +
          public abstract int version
          +
          Identifies a new version of a class when an incompatible class change + has been made.
          +
          +
          Returns:
          +
          the version.
          +
          See Also:
          +
          Entity.version()
          +
          +
          +
          Default:
          +
          0
          +
          +
        • +
        +
      • +
      +
        +
      • + + +
          +
        • +

          proxyFor

          +
          public abstract java.lang.Class proxyFor
          +
          Specifies the class that is proxied by this PersistentProxy + instance.
          +
          +
          Returns:
          +
          the Class.
          +
          See Also:
          +
          PersistentProxy
          +
          +
          +
          Default:
          +
          void.class
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/PersistentProxy.html b/docs/java/com/sleepycat/persist/model/PersistentProxy.html new file mode 100644 index 0000000..1b0702d --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/PersistentProxy.html @@ -0,0 +1,356 @@ + + + + + +PersistentProxy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Interface PersistentProxy<T>

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface PersistentProxy<T>
      +
      Implemented by a proxy class to represent the persistent state of a + (non-persistent) proxied class. Normally classes that are outside the scope + of the developer's control must be proxied since they cannot be annotated, + and because it is desirable to insulate the stored format from changes to + the instance fields of the proxied class. This is useful for classes in the + standard Java libraries, for example. + +

      PersistentProxy objects are not required to be thread-safe. A + single thread will create and call the methods of a given PersistentProxy object.

      + +

      There are three requirements for a proxy class:

      +
        +
      1. It must implement the PersistentProxy interface.
      2. +
      3. It must be specified as a persistent proxy class in the entity model. + When using the AnnotationModel, a proxy class is indicated by the + Persistent annotation with the Persistent.proxyFor() + property.
      4. +
      5. It must be explicitly registered by calling EntityModel.registerClass(java.lang.Class) before opening the store.
      6. +
      + +

      In order to serialize an instance of the proxied class before it is + stored, an instance of the proxy class is created. The proxied instance is + then passed to the proxy's initializeProxy method. + When this method returns, the proxy instance contains the state of the + proxied instance. The proxy instance is then serialized and stored in the + same way as for any persistent object.

      + +

      When an instance of the proxy object is deserialized after it is + retrieved from storage, its convertProxy() method is called. The + instance of the proxied class returned by this method is then returned as a + field in the persistent instance.

      + +

      For example:

      +
      +  import java.util.Locale;
      +
      +  @Persistent(proxyFor=Locale.class)
      +  class LocaleProxy implements PersistentProxy<Locale> {
      +
      +      String language;
      +      String country;
      +      String variant;
      +
      +      private LocaleProxy() {}
      +
      +      public void initializeProxy(Locale object) {
      +          language = object.getLanguage();
      +          country = object.getCountry();
      +          variant = object.getVariant();
      +      }
      +
      +      public Locale convertProxy() {
      +          return new Locale(language, country, variant);
      +      }
      +  }
      + +

      The above definition allows the Locale class to be used in any + persistent class, for example:

      +
      +  @Persistent
      +  class LocalizedText {
      +      String text;
      +      Locale locale;
      +  }
      + +

      A proxied class may not be used as a superclass for a persistent class or + entity class. For example, the following is not allowed.

      +
      +  @Persistent
      +  class LocalizedText extends Locale { // NOT ALLOWED
      +      String text;
      +  }
      + +

      A proxy for proxied class P does not handle instances of subclasses of P. + To proxy a subclass of P, a separate proxy class is needed.

      + +

      Several built in proxy types + are used implicitly. An application defined proxy will be used instead of a + built-in proxy, if both exist for the same proxied class.

      + +

      With respect to class evolution, a proxy instance is no different than + any other persistent instance. When using a RawStore or Converter, only the raw data of the proxy instance will be visible. Raw + data for the proxied instance never exists.

      + +

      Currently a proxied object may not contain a reference to itself. For + simple proxied objects such as the Locale class shown above, this naturally + won't occur. But for proxied objects that are containers -- the built-in + Collection and Map classes for example -- this can occur if the container is + added as an element of itself. This should be avoided. If an attempt to + store such an object is made, an IllegalArgumentException will be + thrown.

      + +

      Note that a proxy class may not be a subclass of an entity class.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        TconvertProxy() +
        Returns a new proxied class instance to which the state of this proxy + instance has been copied.
        +
        voidinitializeProxy(T object) +
        Copies the state of a given proxied class instance to this proxy + instance.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + + + +
          +
        • +

          initializeProxy

          +
          void initializeProxy(T object)
          +
          Copies the state of a given proxied class instance to this proxy + instance.
          +
          +
          Parameters:
          +
          object - the proxied class instance.
          +
          +
        • +
        + + + +
          +
        • +

          convertProxy

          +
          T convertProxy()
          +
          Returns a new proxied class instance to which the state of this proxy + instance has been copied.
          +
          +
          Returns:
          +
          the new proxied class instance.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/PrimaryKey.html b/docs/java/com/sleepycat/persist/model/PrimaryKey.html new file mode 100644 index 0000000..717be1a --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/PrimaryKey.html @@ -0,0 +1,376 @@ + + + + + +PrimaryKey (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type PrimaryKey

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=FIELD)
      +public @interface PrimaryKey
      +
      Indicates the primary key field of an entity class. The value of the + primary key field is the unique identifier for the entity in a PrimaryIndex. + +

      PrimaryKey may appear on at most one declared field per + class.

      + +

      Primary key values may be automatically assigned as sequential integers + using a sequence(). In this case the type of the key field is + restricted to a simple integer type.

      + +

      A primary key field may not be null, unless it is being assigned from a + sequence.

      + +

      Key Field Types

      + +

      The type of a key field must either be one of the following:

      +
        +
      • Any of the simple types.
      • +
      • An enum type.
      • +
      • A composite key class containing one or more simple type or enum + fields.
      • +
      +

      Array types are not allowed.

      + +

      When using a composite key class, each field of the composite key class + must be annotated with KeyField to identify the storage order and + default sort order. See KeyField for an example and more + information on composite keys.

      + +

      Key Sort Order

      + +

      Key field types, being simple types, have a well defined and reasonable + default sort order, described below. This sort order is based on a storage + encoding that allows a fast byte-by-byte comparison.

      +
        +
      • All simple types except for String are encoded so that they are + sorted as expected, that is, as if the Comparable.compareTo(T) method + of their class (or, for primitives, their wrapper class) is called.
      • +
      • Strings are encoded as UTF-8 byte arrays. Zero (0x0000) character + values are UTF encoded as non-zero values, and therefore embedded zeros in + the string are supported. The sequence {0xC0,0x80} is used to + encode a zero character. This UTF encoding is the same one used by native + Java UTF libraries. However, this encoding of zero does impact the + lexicographical ordering, and zeros will not be sorted first (the natural + order) or last. For all character values other than zero, the default UTF + byte ordering is the same as the Unicode lexicographical character + ordering.
      • +
      + +

      When using a composite key class with more than one field, the sorting + order among fields is determined by the KeyField annotations. To + override the default sort order, you can use a composite key class that + implements Comparable. This allows overriding the sort order and is + therefore useful even when there is only one key field in the composite key + class. See Custom Sort Order + for more information on sorting of composite keys.

      + +

      Inherited Primary Key

      + +

      If it does not appear on a declared field in the entity class, PrimaryKey must appear on a field of an entity superclass. In the + following example, the primary key on the base class is used:

      + +
      + @Persistent
      + class BaseClass {
      +     @PrimaryKey
      +     long id;
      +     ...
      + }
      + @Entity
      + class Employee extends BaseClass {
      +     // inherits id primary key
      +     ...
      + }
      + +

      If more than one class with PrimaryKey is present in a class + hierarchy, the key in the most derived class is used. In this case, primary + key fields in superclasses are "shadowed" and are not persistent. In the + following example, the primary key in the base class is not used and is not + persistent:

      +
      + @Persistent
      + class BaseClass {
      +     @PrimaryKey
      +     long id;
      +     ...
      + }
      + @Entity
      + class Employee extends BaseClass {
      +     // overrides id primary key
      +     @PrimaryKey
      +     String uuid;
      +     ...
      + }
      + +

      Note that a PrimaryKey is not allowed on entity subclasses. The + following is illegal and will cause an IllegalArgumentException when + trying to store an Employee instance:

      +
      + @Entity
      + class Person {
      +     @PrimaryKey
      +     long id;
      +     ...
      + }
      + @Persistent
      + class Employee extends Person {
      +     @PrimaryKey
      +     String uuid;
      +     ...
      + }
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Optional Element Summary

        + + + + + + + + + + +
        Optional Elements 
        Modifier and TypeOptional Element and Description
        java.lang.Stringsequence +
        The name of a sequence from which to assign primary key values + automatically.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Element Detail

        + + + +
          +
        • +

          sequence

          +
          public abstract java.lang.String sequence
          +
          The name of a sequence from which to assign primary key values + automatically. If a non-empty string is specified, sequential integers + will be assigned from the named sequence. + +

          A single sequence may be used for more than one entity class by + specifying the same sequence name for each PrimaryKey. For + each named sequence, a Sequence will be used to + assign key values. For more information on configuring sequences, see + EntityStore.setSequenceConfig.

          + +

          To use a sequence, the type of the key field must be a primitive + integer type (byte, short, int or long) + or the primitive wrapper class for one of these types. A composite key + class may also be used to override sort order, but it may contain only a + single key field, and this field must have one of the types previously + mentioned.

          + +

          When an entity with a primary key sequence is stored using one of the + put methods in the PrimaryIndex, a new key will be + assigned if the primary key field in the entity instance is null (for a + reference type) or zero (for a primitive integer type). Specifying zero + for a primitive integer key field is allowed because the initial value + of the sequence is one (not zero) by default. If the sequence + configuration is changed such that zero is part of the sequence, then + the field type must be a primitive wrapper class and the field value + must be null to cause a new key to be assigned.

          + +

          When one of the put methods in the PrimaryIndex + is called and a new key is assigned, the assigned value is returned to + the caller via the key field of the entity object that is passed as a + parameter.

          +
          +
          Returns:
          +
          the sequence name or an empty string.
          +
          +
          +
          Default:
          +
          ""
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/PrimaryKeyMetadata.html b/docs/java/com/sleepycat/persist/model/PrimaryKeyMetadata.html new file mode 100644 index 0000000..37dad2c --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/PrimaryKeyMetadata.html @@ -0,0 +1,362 @@ + + + + + +PrimaryKeyMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class PrimaryKeyMetadata

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class PrimaryKeyMetadata
      +extends FieldMetadata
      +
      The metadata for a primary key field. A primary key may be specified with + the PrimaryKey annotation. + +

      PrimaryKeyMetadata objects are thread-safe. Multiple threads may + safely call the methods of a shared PrimaryKeyMetadata object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        PrimaryKeyMetadata(java.lang.String name, + java.lang.String className, + java.lang.String declaringClassName, + java.lang.String sequenceName) +
        Used by an EntityModel to construct primary key metadata.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          PrimaryKeyMetadata

          +
          public PrimaryKeyMetadata(java.lang.String name,
          +                          java.lang.String className,
          +                          java.lang.String declaringClassName,
          +                          java.lang.String sequenceName)
          +
          Used by an EntityModel to construct primary key metadata.
          +
          +
          Parameters:
          +
          name - the field name.
          +
          className - the class name.
          +
          declaringClassName - the name of the class where the field is + declared.
          +
          sequenceName - the sequence name.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getSequenceName

          +
          public java.lang.String getSequenceName()
          +
          Returns the name of the sequence for assigning key values. This may be + specified using the PrimaryKey.sequence() annotation.
          +
          +
          Returns:
          +
          the sequence name.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class FieldMetadata
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/Relationship.html b/docs/java/com/sleepycat/persist/model/Relationship.html new file mode 100644 index 0000000..2dd70fb --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/Relationship.html @@ -0,0 +1,411 @@ + + + + + +Relationship (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Enum Relationship

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Enum<Relationship>
      • +
      • +
          +
        • com.sleepycat.persist.model.Relationship
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable, java.lang.Comparable<Relationship>
      +
      +
      +
      +
      public enum Relationship
      +extends java.lang.Enum<Relationship>
      +
      Defines the relationship between instances of the entity class and the + secondary keys. This can be specified using a SecondaryKey.relate() + annotation.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Summary

        + + + + + + + + + + + + + + + + + +
        Enum Constants 
        Enum Constant and Description
        MANY_TO_MANY +
        Relates many entities to many secondary keys.
        +
        MANY_TO_ONE +
        Relates many entities to one secondary key.
        +
        ONE_TO_MANY +
        Relates one entity to many secondary keys.
        +
        ONE_TO_ONE +
        Relates one entity to one secondary key.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static RelationshipvalueOf(java.lang.String name) +
        Returns the enum constant of this type with the specified name.
        +
        static Relationship[]values() +
        Returns an array containing the constants of this enum type, in +the order they are declared.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Enum

          +clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Enum Constant Detail

        + + + +
          +
        • +

          MANY_TO_ONE

          +
          public static final Relationship MANY_TO_ONE
          +
          Relates many entities to one secondary key. + +

          The secondary index will have non-unique keys; in other words, + duplicates will be allowed.

          + +

          The secondary key field is singular, in other words, it may not be an + array or collection type.

          +
        • +
        + + + +
          +
        • +

          ONE_TO_MANY

          +
          public static final Relationship ONE_TO_MANY
          +
          Relates one entity to many secondary keys. + +

          The secondary index will have unique keys, in other words, duplicates + will not be allowed.

          + +

          The secondary key field must be an array or collection type.

          +
        • +
        + + + +
          +
        • +

          MANY_TO_MANY

          +
          public static final Relationship MANY_TO_MANY
          +
          Relates many entities to many secondary keys. + +

          The secondary index will have non-unique keys, in other words, + duplicates will be allowed.

          + +

          The secondary key field must be an array or collection type.

          +
        • +
        + + + +
          +
        • +

          ONE_TO_ONE

          +
          public static final Relationship ONE_TO_ONE
          +
          Relates one entity to one secondary key. + +

          The secondary index will have unique keys, in other words, duplicates + will not be allowed.

          + +

          The secondary key field is singular, in other words, it may not be an + array or collection type.

          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          values

          +
          public static Relationship[] values()
          +
          Returns an array containing the constants of this enum type, in +the order they are declared. This method may be used to iterate +over the constants as follows: +
          +for (Relationship c : Relationship.values())
          +    System.out.println(c);
          +
          +
          +
          Returns:
          +
          an array containing the constants of this enum type, in the order they are declared
          +
          +
        • +
        + + + +
          +
        • +

          valueOf

          +
          public static Relationship valueOf(java.lang.String name)
          +
          Returns the enum constant of this type with the specified name. +The string must match exactly an identifier used to declare an +enum constant in this type. (Extraneous whitespace characters are +not permitted.)
          +
          +
          Parameters:
          +
          name - the name of the enum constant to be returned.
          +
          Returns:
          +
          the enum constant with the specified name
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
          +
          java.lang.NullPointerException - if the argument is null
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/SecondaryKey.html b/docs/java/com/sleepycat/persist/model/SecondaryKey.html new file mode 100644 index 0000000..1fd84e9 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/SecondaryKey.html @@ -0,0 +1,495 @@ + + + + + +SecondaryKey (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Annotation Type SecondaryKey

    +
    +
    +
    +
      +
    • +
      +
      +
      @Documented
      + @Retention(value=RUNTIME)
      + @Target(value=FIELD)
      +public @interface SecondaryKey
      +
      Indicates a secondary key field of an entity class. The value of the + secondary key field is a unique or non-unique identifier for the entity and + is accessed via a SecondaryIndex. + +

      SecondaryKey may appear on any number of fields in an entity + class, subclasses and superclasses. For a secondary key field in the entity + class or one of its superclasses, all entity instances will be indexed by + that field (if it is non-null). For a secondary key field in an entity + subclass, only instances of that subclass will be indexed by that field (if + it is non-null).

      + +

      If a secondary key field is null, the entity will not be indexed by that + key. In other words, the entity cannot be queried by that secondary key nor + can the entity be found by iterating through the secondary index.

      + +

      For a given entity class and its superclasses and subclasses, no two + secondary keys may have the same name. By default, the field name + identifies the secondary key and the secondary index for a given entity + class. name() may be specified to override this default.

      + +

      Using relate(), instances of the entity class are related to + secondary keys in a many-to-one, one-to-many, many-to-many, or one-to-one + relationship. This required property specifies the cardinality of + each side of the relationship.

      + +

      A secondary key may optionally be used to form a relationship with + instances of another entity class using relatedEntity() and onRelatedEntityDelete(). This establishes foreign key constraints + for the secondary key.

      + +

      The secondary key field type must be a Set, Collection or array type when + a x-to-many relationship is used or a singular type when an + x-to-one relationship is used; see relate().

      + +

      The field type (or element type, when a Set, Collection or array type is + used) of a secondary key field must follow the same rules as for a + primary key type. The key sort order is also the same.

      + +

      For a secondary key field with a collection type, a type parameter must + be used to specify the element type. For example Collection<String> + is allowed but Collection is not.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Required Element Summary

        + + + + + + + + + + +
        Required Elements 
        Modifier and TypeRequired Element and Description
        Relationshiprelate +
        Defines the relationship between instances of the entity class and the + secondary keys.
        +
        +
      • +
      + +
        +
      • + + +

        Optional Element Summary

        + + + + + + + + + + + + + + + + + + +
        Optional Elements 
        Modifier and TypeOptional Element and Description
        java.lang.Stringname +
        Specifies the name of the key in order to use a name that is different + than the field name.
        +
        DeleteActiononRelatedEntityDelete +
        Specifies the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity.
        +
        java.lang.ClassrelatedEntity +
        Specifies the entity to which this entity is related, for establishing + foreign key constraints.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Element Detail

        + + + +
          +
        • +

          relate

          +
          public abstract Relationship relate
          +
          Defines the relationship between instances of the entity class and the + secondary keys. + +

          The table below summarizes how to create all four variations of + relationships.

          +
          + + + + + + + + + + + + + + + + + + + + + + + + + + +
          RelationshipField typeKey typeExample
          Relationship.ONE_TO_ONESingularUniqueA person record with a unique social security number + key.
          Relationship.MANY_TO_ONESingularDuplicatesA person record with a non-unique employer key.
          Relationship.ONE_TO_MANYSet/Collection/arrayUniqueA person record with multiple unique email address keys.
          Relationship.MANY_TO_MANYSet/Collection/arrayDuplicatesA person record with multiple non-unique organization + keys.
          +
          + +

          For a many-to-x relationship, the secondary index will + have non-unique keys; in other words, duplicates will be allowed. + Conversely, for one-to-x relationship, the secondary index + will have unique keys.

          + +

          For a x-to-one relationship, the secondary key field is + singular; in other words, it may not be a Set, Collection or array type. + Conversely, for a x-to-many relationship, the secondary key + field must be a Set, Collection or array type. A collection type is any + implementation of Collection.

          + +

          For a x-to-many relationship, the field type should normally + be Set (or a subtype of this interface). This + accurately expresses the fact that an Entity may not have two identical + secondary keys. For flexibility, a Collection (or a + subtype of this interface) or an array type may also be used. In that + case, any duplicate key values in the Collection or array are + ignored.

          +
          +
          Returns:
          +
          the Relationship.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +
          +
        • +

          relatedEntity

          +
          public abstract java.lang.Class relatedEntity
          +
          Specifies the entity to which this entity is related, for establishing + foreign key constraints. Values of this secondary key will be + constrained to the set of primary key values for the given entity class. + +

          The given class must be an entity class. This class is called the + related entity or foreign entity.

          + +

          When a related entity class is specified, a check (foreign key + constraint) is made every time a new secondary key value is stored for + this entity, and every time a related entity is deleted.

          + +

          Whenever a new secondary key value is stored for this entity, it is + checked to ensure it exists as a primary key value of the related + entity. If it does not, an exception is thrown by the PrimaryIndex put method. + + On BDB JE, a ForeignConstraintException will be + thrown. + +

          + +

          Whenever a related entity is deleted and its primary key value exists + as a secondary key value for this entity, the action is taken that is + specified using the onRelatedEntityDelete() property.

          + +

          Together, these two checks guarantee that a secondary key value for + this entity will always exist as a primary key value for the related + entity. Note, however, that a transactional store must be configured + to guarantee this to be true in the face of a crash; see StoreConfig.setTransactional(boolean).

          +
          +
          Returns:
          +
          the related entity class, or void.class if none is specified.
          +
          +
          +
          Default:
          +
          void.class
          +
          +
        • +
        +
      • +
      +
        +
      • + + +
          +
        • +

          onRelatedEntityDelete

          +
          public abstract DeleteAction onRelatedEntityDelete
          +
          Specifies the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity. + +

          Note: This property only applies when relatedEntity() + is specified to define the related entity.

          + +

          The default action, ABORT, means that an + exception is thrown in order to abort the current transaction. + + On BDB JE, a DeleteConstraintException is + thrown. + +

          + +

          If CASCADE is specified, then this + entity will be deleted also. This in turn could trigger further + deletions, causing a cascading effect.

          + +

          If NULLIFY is specified, then the + secondary key in this entity is set to null and this entity is updated. + If the key field type is singular, the field value is set to null; + therefore, to specify NULLIFY for a singular key field type, a + primitive wrapper type must be used instead of a primitive type. If the + key field type is an array or collection type, the key is deleted from + the array (the array is resized) or from the collection (using Collection.remove).

          +
          +
          Returns:
          +
          the DeleteAction, or DeleteAction.ABORT if none is + specified.
          +
          +
          +
          Default:
          +
          com.sleepycat.persist.model.DeleteAction.ABORT
          +
          +
        • +
        +
      • +
      +
        +
      • + + +
          +
        • +

          name

          +
          public abstract java.lang.String name
          +
          Specifies the name of the key in order to use a name that is different + than the field name. + +

          This is convenient when prefixes or suffices are used on field names. + For example:

          +
          +  class Person {
          +      @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class, name="parentSsn")
          +      String m_parentSsn;
          +  }
          + +

          It can also be used to uniquely name a key when multiple secondary + keys for a single entity class have the same field name. For example, + an entity class and its subclass may both have a field named 'date', + and both fields are used as secondary keys. The name property + can be specified for one or both fields to give each key a unique + name.

          +
          +
          Returns:
          +
          the key name that overrides the field name, or empty string if + none is specified.
          +
          +
          +
          Default:
          +
          ""
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/SecondaryKeyMetadata.html b/docs/java/com/sleepycat/persist/model/SecondaryKeyMetadata.html new file mode 100644 index 0000000..b47b1a8 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/SecondaryKeyMetadata.html @@ -0,0 +1,463 @@ + + + + + +SecondaryKeyMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.model
    +

    Class SecondaryKeyMetadata

    +
    +
    + +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Serializable
      +
      +
      +
      +
      public class SecondaryKeyMetadata
      +extends FieldMetadata
      +
      The metadata for a secondary key field. A secondary key may be specified + with the SecondaryKey annotation. + +

      SecondaryKeyMetadata objects are thread-safe. Multiple threads + may safely call the methods of a shared SecondaryKeyMetadata + object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        SecondaryKeyMetadata(java.lang.String name, + java.lang.String className, + java.lang.String declaringClassName, + java.lang.String elementClassName, + java.lang.String keyName, + Relationship relationship, + java.lang.String relatedEntity, + DeleteAction deleteAction) +
        Used by an EntityModel to construct secondary key metadata.
        +
        +
      • +
      + + +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          SecondaryKeyMetadata

          +
          public SecondaryKeyMetadata(java.lang.String name,
          +                            java.lang.String className,
          +                            java.lang.String declaringClassName,
          +                            java.lang.String elementClassName,
          +                            java.lang.String keyName,
          +                            Relationship relationship,
          +                            java.lang.String relatedEntity,
          +                            DeleteAction deleteAction)
          +
          Used by an EntityModel to construct secondary key metadata.
          +
          +
          Parameters:
          +
          name - the field name.
          +
          className - the class name.
          +
          declaringClassName - the name of the class where the field is + declared.
          +
          elementClassName - the element class name.
          +
          keyName - the key name.
          +
          relationship - the Relationship.
          +
          relatedEntity - the class name of the related (foreign) entity.
          +
          deleteAction - the DeleteAction.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getElementClassName

          +
          public java.lang.String getElementClassName()
          +
          Returns the class name of the array or collection element for a ONE_TO_MANY or MANY_TO_MANY relationship, or null for a + Relationship#ONE_TO_ONE ONE_TO_ONE} or MANY_TO_ONE relationship.
          +
          +
          Returns:
          +
          the element class name.
          +
          +
        • +
        + + + +
          +
        • +

          getKeyName

          +
          public java.lang.String getKeyName()
          +
          Returns the key name, which may be different from the field name.
          +
          +
          Returns:
          +
          the key name.
          +
          +
        • +
        + + + +
          +
        • +

          getRelationship

          +
          public Relationship getRelationship()
          +
          Returns the relationship between instances of the entity class and the + secondary keys. This may be specified using the SecondaryKey.relate() annotation.
          +
          +
          Returns:
          +
          the Relationship.
          +
          +
        • +
        + + + +
          +
        • +

          getRelatedEntity

          +
          public java.lang.String getRelatedEntity()
          +
          Returns the class name of the related (foreign) entity, for which + foreign key constraints are specified using the SecondaryKey.relatedEntity() annotation.
          +
          +
          Returns:
          +
          the class name of the related (foreign) entity.
          +
          +
        • +
        + + + +
          +
        • +

          getDeleteAction

          +
          public DeleteAction getDeleteAction()
          +
          Returns the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity. + This may be specified using the SecondaryKey.onRelatedEntityDelete() annotation.
          +
          +
          Returns:
          +
          the DeleteAction.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class FieldMetadata
          +
          +
        • +
        + + + + +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/AnnotationModel.html b/docs/java/com/sleepycat/persist/model/class-use/AnnotationModel.html new file mode 100644 index 0000000..c2c03f1 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/AnnotationModel.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.AnnotationModel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.AnnotationModel

    +
    +
    No usage of com.sleepycat.persist.model.AnnotationModel
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancer.html b/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancer.html new file mode 100644 index 0000000..310d8d3 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancer.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.ClassEnhancer (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.ClassEnhancer

    +
    +
    No usage of com.sleepycat.persist.model.ClassEnhancer
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancerTask.html b/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancerTask.html new file mode 100644 index 0000000..88e1e8f --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/ClassEnhancerTask.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.ClassEnhancerTask (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.ClassEnhancerTask

    +
    +
    No usage of com.sleepycat.persist.model.ClassEnhancerTask
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/ClassMetadata.html b/docs/java/com/sleepycat/persist/model/class-use/ClassMetadata.html new file mode 100644 index 0000000..3cc85e1 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/ClassMetadata.html @@ -0,0 +1,205 @@ + + + + + +Uses of Class com.sleepycat.persist.model.ClassMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.ClassMetadata

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/DeleteAction.html b/docs/java/com/sleepycat/persist/model/class-use/DeleteAction.html new file mode 100644 index 0000000..8b17909 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/DeleteAction.html @@ -0,0 +1,207 @@ + + + + + +Uses of Class com.sleepycat.persist.model.DeleteAction (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.DeleteAction

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/Entity.html b/docs/java/com/sleepycat/persist/model/class-use/Entity.html new file mode 100644 index 0000000..a2d896b --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/Entity.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.Entity (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.Entity

    +
    +
    No usage of com.sleepycat.persist.model.Entity
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/EntityMetadata.html b/docs/java/com/sleepycat/persist/model/class-use/EntityMetadata.html new file mode 100644 index 0000000..4efa4c2 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/EntityMetadata.html @@ -0,0 +1,204 @@ + + + + + +Uses of Class com.sleepycat.persist.model.EntityMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.EntityMetadata

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/EntityModel.html b/docs/java/com/sleepycat/persist/model/class-use/EntityModel.html new file mode 100644 index 0000000..f0e213b --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/EntityModel.html @@ -0,0 +1,274 @@ + + + + + +Uses of Class com.sleepycat.persist.model.EntityModel (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.EntityModel

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/FieldMetadata.html b/docs/java/com/sleepycat/persist/model/class-use/FieldMetadata.html new file mode 100644 index 0000000..05758bd --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/FieldMetadata.html @@ -0,0 +1,246 @@ + + + + + +Uses of Class com.sleepycat.persist.model.FieldMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.FieldMetadata

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/KeyField.html b/docs/java/com/sleepycat/persist/model/class-use/KeyField.html new file mode 100644 index 0000000..b7a70ee --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/KeyField.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.KeyField (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.KeyField

    +
    +
    No usage of com.sleepycat.persist.model.KeyField
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/NotPersistent.html b/docs/java/com/sleepycat/persist/model/class-use/NotPersistent.html new file mode 100644 index 0000000..508dcd0 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/NotPersistent.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.NotPersistent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.NotPersistent

    +
    +
    No usage of com.sleepycat.persist.model.NotPersistent
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/NotTransient.html b/docs/java/com/sleepycat/persist/model/class-use/NotTransient.html new file mode 100644 index 0000000..6686d9e --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/NotTransient.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.NotTransient (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.NotTransient

    +
    +
    No usage of com.sleepycat.persist.model.NotTransient
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/Persistent.html b/docs/java/com/sleepycat/persist/model/class-use/Persistent.html new file mode 100644 index 0000000..df9bf83 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/Persistent.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.Persistent (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.Persistent

    +
    +
    No usage of com.sleepycat.persist.model.Persistent
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/PersistentProxy.html b/docs/java/com/sleepycat/persist/model/class-use/PersistentProxy.html new file mode 100644 index 0000000..bc79781 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/PersistentProxy.html @@ -0,0 +1,129 @@ + + + + + +Uses of Interface com.sleepycat.persist.model.PersistentProxy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.model.PersistentProxy

    +
    +
    No usage of com.sleepycat.persist.model.PersistentProxy
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/PrimaryKey.html b/docs/java/com/sleepycat/persist/model/class-use/PrimaryKey.html new file mode 100644 index 0000000..dad6284 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/PrimaryKey.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.PrimaryKey (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.PrimaryKey

    +
    +
    No usage of com.sleepycat.persist.model.PrimaryKey
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/PrimaryKeyMetadata.html b/docs/java/com/sleepycat/persist/model/class-use/PrimaryKeyMetadata.html new file mode 100644 index 0000000..1d865a7 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/PrimaryKeyMetadata.html @@ -0,0 +1,218 @@ + + + + + +Uses of Class com.sleepycat.persist.model.PrimaryKeyMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.PrimaryKeyMetadata

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/Relationship.html b/docs/java/com/sleepycat/persist/model/class-use/Relationship.html new file mode 100644 index 0000000..ddfa7d0 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/Relationship.html @@ -0,0 +1,207 @@ + + + + + +Uses of Class com.sleepycat.persist.model.Relationship (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.Relationship

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/SecondaryKey.html b/docs/java/com/sleepycat/persist/model/class-use/SecondaryKey.html new file mode 100644 index 0000000..9175a11 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/SecondaryKey.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.model.SecondaryKey (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.SecondaryKey

    +
    +
    No usage of com.sleepycat.persist.model.SecondaryKey
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/class-use/SecondaryKeyMetadata.html b/docs/java/com/sleepycat/persist/model/class-use/SecondaryKeyMetadata.html new file mode 100644 index 0000000..a2c594c --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/class-use/SecondaryKeyMetadata.html @@ -0,0 +1,220 @@ + + + + + +Uses of Class com.sleepycat.persist.model.SecondaryKeyMetadata (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.model.SecondaryKeyMetadata

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/package-frame.html b/docs/java/com/sleepycat/persist/model/package-frame.html new file mode 100644 index 0000000..a307d15 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/package-frame.html @@ -0,0 +1,47 @@ + + + + + +com.sleepycat.persist.model (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.persist.model

    + + + diff --git a/docs/java/com/sleepycat/persist/model/package-summary.html b/docs/java/com/sleepycat/persist/model/package-summary.html new file mode 100644 index 0000000..838de7b --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/package-summary.html @@ -0,0 +1,310 @@ + + + + + +com.sleepycat.persist.model (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.persist.model

    +
    +
    Annotations for defining a persistent object model.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      PersistentProxy<T> +
      Implemented by a proxy class to represent the persistent state of a + (non-persistent) proxied class.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      AnnotationModel +
      The default annotation-based entity model.
      +
      ClassEnhancer +
      Enhances the bytecode of persistent classes to provide efficient access to + fields and constructors, and to avoid special security policy settings for + accessing non-public members.
      +
      ClassEnhancerTask +
      An ant task for running the ClassEnhancer.
      +
      ClassMetadata +
      The metadata for a persistent class.
      +
      EntityMetadata +
      The metadata for a persistent entity class.
      +
      EntityModel +
      The base class for classes that provide entity model metadata.
      +
      FieldMetadata +
      The metadata for a key field.
      +
      PrimaryKeyMetadata +
      The metadata for a primary key field.
      +
      SecondaryKeyMetadata +
      The metadata for a secondary key field.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + +
      Enum Summary 
      EnumDescription
      DeleteAction +
      Specifies the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity.
      +
      Relationship +
      Defines the relationship between instances of the entity class and the + secondary keys.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Annotation Types Summary 
      Annotation TypeDescription
      Entity +
      Indicates a persistent entity class.
      +
      KeyField +
      Indicates the sorting position of a key field in a composite key class when + the Comparable interface is not implemented.
      +
      NotPersistent +
      Overrides the default rules for field persistence and defines a field as + being non-persistent even when it is not declared with the + transient keyword.
      +
      NotTransient +
      Overrides the default rules for field persistence and defines a field as + being persistent even when it is declared with the transient + keyword.
      +
      Persistent +
      Identifies a persistent class that is not an Entity class or a + simple type.
      +
      PrimaryKey +
      Indicates the primary key field of an entity class.
      +
      SecondaryKey +
      Indicates a secondary key field of an entity class.
      +
      +
    • +
    + + + +

    Package com.sleepycat.persist.model Description

    +
    Annotations for defining a persistent object model.
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/package-tree.html b/docs/java/com/sleepycat/persist/model/package-tree.html new file mode 100644 index 0000000..a2fd4a1 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/package-tree.html @@ -0,0 +1,191 @@ + + + + + +com.sleepycat.persist.model Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.persist.model

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    +
      +
    • java.lang.Object +
        +
      • com.sleepycat.persist.model.ClassEnhancer (implements java.lang.instrument.ClassFileTransformer)
      • +
      • com.sleepycat.persist.model.ClassMetadata (implements java.io.Serializable)
      • +
      • com.sleepycat.persist.model.EntityMetadata (implements java.io.Serializable)
      • +
      • com.sleepycat.persist.model.EntityModel + +
      • +
      • com.sleepycat.persist.model.FieldMetadata (implements java.io.Serializable) + +
      • +
      • org.apache.tools.ant.ProjectComponent (implements java.lang.Cloneable) + +
      • +
      +
    • +
    +

    Interface Hierarchy

    + +

    Annotation Type Hierarchy

    +
      +
    • com.sleepycat.persist.model.NotTransient (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.Entity (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.KeyField (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.PrimaryKey (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.SecondaryKey (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.Persistent (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.NotPersistent (implements java.lang.annotation.Annotation)
    • +
    +

    Enum Hierarchy

    +
      +
    • java.lang.Object +
        +
      • java.lang.Enum<E> (implements java.lang.Comparable<T>, java.io.Serializable) + +
      • +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/model/package-use.html b/docs/java/com/sleepycat/persist/model/package-use.html new file mode 100644 index 0000000..44de216 --- /dev/null +++ b/docs/java/com/sleepycat/persist/model/package-use.html @@ -0,0 +1,283 @@ + + + + + +Uses of Package com.sleepycat.persist.model (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.persist.model

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/package-frame.html b/docs/java/com/sleepycat/persist/package-frame.html new file mode 100644 index 0000000..39a1787 --- /dev/null +++ b/docs/java/com/sleepycat/persist/package-frame.html @@ -0,0 +1,38 @@ + + + + + +com.sleepycat.persist (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.persist

    + + + diff --git a/docs/java/com/sleepycat/persist/package-summary.html b/docs/java/com/sleepycat/persist/package-summary.html new file mode 100644 index 0000000..d82faad --- /dev/null +++ b/docs/java/com/sleepycat/persist/package-summary.html @@ -0,0 +1,837 @@ + + + + + +com.sleepycat.persist (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.persist

    +
    +
    The Direct Persistence Layer (DPL) adds a persistent object model to the +Berkeley DB transactional engine.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      EntityCursor<V> +
      Traverses entity values or key values and allows deleting or updating the + entity at the current cursor position.
      +
      EntityIndex<K,V> +
      The interface for accessing keys and entities via a primary or secondary + index.
      +
      ForwardCursor<V> +
      Cursor operations limited to traversing forward.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      EntityJoin<PK,E> +
      Performs an equality join on two or more secondary keys.
      +
      EntityResult<V> +
      Used to return an entity value from a 'get' operation along with an + OperationResult.
      +
      EntityStore +
      A store for managing persistent entity objects.
      +
      PrimaryIndex<PK,E> +
      The primary index for an entity class and its primary key.
      +
      SecondaryIndex<SK,PK,E> +
      The secondary index for an entity class and a secondary key.
      +
      StoreConfig +
      Configuration properties used with an EntityStore or RawStore.
      +
      StoreConfigBeanInfo 
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + +
      Exception Summary 
      ExceptionDescription
      IndexNotAvailableException +
      Thrown by the getPrimaryIndex, getSecondaryIndex and getSubclassIndex when an index has not yet + been created.
      +
      StoreExistsException +
      Thrown by the EntityStore constructor when the ExclusiveCreate configuration parameter is + true and the store's internal catalog database already exists.
      +
      StoreNotFoundException +
      Thrown by the EntityStore constructor when the AllowCreate configuration parameter is false and + the store's internal catalog database does not exist.
      +
      +
    • +
    + + + +

    Package com.sleepycat.persist Description

    +
    The Direct Persistence Layer (DPL) adds a persistent object model to the +Berkeley DB transactional engine. + +

    Package Specification

    + + + +

    Introduction

    + +

    The Direct Persistence Layer (DPL) was designed to meet the following +requirements.

    +
      +
    • A type safe and convenient API is provided for accessing persistent +objects. The use of Java generic types, although optional, is fully exploited +to provide type safety. For example: +
      +PrimaryIndex<Long,Employer> employerById = ...;
      +long employerId = ...;
      +Employer employer = employerById.get(employerId);
      +
    • +
    • All Java types are allowed to be persistent without requiring that they +implement special interfaces. Persistent fields may be private, +package-private (default access), protected, or public. No +hand-coding of bindings is required. However, each persistent class must have +a default constructor. For example: +
      +@Persistent
      +class Address {
      +    String street;
      +    String city;
      +    String state;
      +    int zipCode;
      +    private Address() {}
      +}
      +
    • +
    • Bytecode enhancement provides fully optimized bindings that do not use Java +reflection.
    • +
    • It is easy to define primary and secondary keys. No external schema is +required and Java annotations may be used for defining all metadata. +Extensions may derive metadata from other sources. For example, the following +Employer class is defined as a persistent entity with a primary key field +id and the secondary key field name:
    • +
      +@Entity
      +class Employer {
      +
      +    @PrimaryKey(sequence="ID")
      +    long id;
      +
      +    @SecondaryKey(relate=ONE_TO_ONE)
      +    String name;
      +
      +    Address address;
      +
      +    private Employer() {}
      +}
      +
    • Interoperability with external components is supported via the Java +collections framework. Any primary or secondary index can be accessed using a +standard java.util collection. For example: +
      java.util.SortedMap<String,Employer> map = employerByName.sortedMap();
      +
    • +
    • Class evolution is explicitly supported. Compatible changes (adding fields +and type widening) are performed automatically and transparently. For example, +without any special configuration a street2 field may be added to the +Address class and the type of the zipCode field may be changed +from int to long: +
      +@Persistent
      +class Address {
      +    String street;
      +    String street2;
      +    String city;
      +    String state;
      +    long zipCode;
      +    private Address() {}
      +}
      +Many incompatible class changes, such as renaming fields or refactoring a +single class, can be performed using Mutations. Mutations are automatically +applied lazily as data is accessed, avoiding downtime to convert large +databases during a software upgrade. +

      Complex refactoring involving multiple classes may be performed using the a +store conversion. The DPL +always provides access to your data via a RawStore, no matter what +changes have been made to persistent classes.

      +
    • +
      +
    • The performance of the Berkeley DB transactional engine is not compromised. +Operations are internally mapped directly to the engine API, object bindings +are lightweight, and all engine tuning parameters are available. For example, +a "dirty read" may be performed using an optional LockMode parameter: +
      Employer employer = employerByName.get(null, "Gizmo Inc", LockMode.READ_UNCOMMITTED);
      +For high performance applications, DatabaseConfig parameters may be used to tune the performance of the Berkeley +DB engine. For example, the size of an internal Btree node can be specified +as follows: +
      +DatabaseConfig config = store.getPrimaryConfig(Employer.class);
      +config.setNodeMaxEntries(64);
      +store.setPrimaryConfig(config);
      +
    • +
    + +

    The Entity Model

    + +

    The DPL is intended for applications that represent persistent domain +objects using Java classes. An entity class is an ordinary Java class +that has a primary key and is stored and accessed using a primary index. It +may also have any number of secondary keys, and entities may be accessed by +secondary key using a secondary index.

    + +

    An entity class may be defined with the Entity annotation. For each entity class, +its primary key may be defined using the PrimaryKey annotation and any number of +secondary keys may be defined using the SecondaryKey annotation.

    + +

    In the following example, the Person.ssn (social security number) +field is the primary key and the Person.employerIds field is a +many-to-many secondary key.

    +
    +@Entity
    +class Person {
    +
    +    @PrimaryKey
    +    String ssn;
    +
    +    String name;
    +    Address address;
    +
    +    @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class)
    +    Set<Long> employerIds = new HashSet<Long>();
    +
    +    private Person() {} // For bindings
    +}
    + +

    A set of entity classes constitutes an entity model. In addition +to isolated entity classes, an entity model may contain relationships between +entities. Relationships may be defined using the SecondaryKey annotation. +Many-to-one, one-to-many, many-to-many and one-to-one relationships are +supported, as well as foreign key constraints.

    + +

    In the example above, a relationship between the Person and Employer entities is defined via the Person.employerIds field. The +relatedEntity=Employer.class annotation property establishes foreign +key constraints to guarantee that every element of the employerIds set +is a valid Employer primary key.

    + +

    For more information on the entity model, see the AnnotationModel and the Entity annotation.

    + +

    The root object in the DPL is the EntityStore. An entity store manages any number of objects for each entity +class defined in the model. The store provides access to the primary and +secondary indices for each entity class, for example:

    + +
    +EntityStore store = new EntityStore(...);
    +
    +PrimaryIndex<String,Person> personBySsn =
    +    store.getPrimaryIndex(String.class, Person.class);
    + +

    A brief example

    + +

    The following example shows how to define an entity model and how to store +and access persistent objects. Exception handling is omitted for brevity.

    + +
    +import java.io.File;
    +import java.util.HashSet;
    +import java.util.Set;
    +
    +import com.sleepycat.je.DatabaseException;
    +import com.sleepycat.je.Environment;
    +import com.sleepycat.je.EnvironmentConfig;
    +import com.sleepycat.persist.EntityCursor;
    +import com.sleepycat.persist.EntityIndex;
    +import com.sleepycat.persist.EntityStore;
    +import com.sleepycat.persist.PrimaryIndex;
    +import com.sleepycat.persist.SecondaryIndex;
    +import com.sleepycat.persist.StoreConfig;
    +import com.sleepycat.persist.model.Entity;
    +import com.sleepycat.persist.model.Persistent;
    +import com.sleepycat.persist.model.PrimaryKey;
    +import com.sleepycat.persist.model.SecondaryKey;
    +import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
    +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
    +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
    +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
    +import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
    +
    +// An entity class.
    +//
    +@Entity
    +class Person {
    +
    +    @PrimaryKey
    +    String ssn;
    +
    +    String name;
    +    Address address;
    +
    +    @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class)
    +    String parentSsn;
    +
    +    @SecondaryKey(relate=ONE_TO_MANY)
    +    Set<String> emailAddresses = new HashSet<String>();
    +
    +    @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class,
    +                                       onRelatedEntityDelete=NULLIFY)
    +    Set<Long> employerIds = new HashSet<Long>();
    +
    +    Person(String name, String ssn, String parentSsn) {
    +        this.name = name;
    +        this.ssn = ssn;
    +        this.parentSsn = parentSsn;
    +    }
    +
    +    private Person() {} // For bindings
    +}
    +
    +// Another entity class.
    +//
    +@Entity
    +class Employer {
    +
    +    @PrimaryKey(sequence="ID")
    +    long id;
    +
    +    @SecondaryKey(relate=ONE_TO_ONE)
    +    String name;
    +
    +    Address address;
    +
    +    Employer(String name) {
    +        this.name = name;
    +    }
    +
    +    private Employer() {} // For bindings
    +}
    +
    +// A persistent class used in other classes.
    +//
    +@Persistent
    +class Address {
    +    String street;
    +    String city;
    +    String state;
    +    int zipCode;
    +    private Address() {} // For bindings
    +}
    +
    +// The data accessor class for the entity model.
    +//
    +class PersonAccessor {
    +
    +    // Person accessors
    +    //
    +    PrimaryIndex<String,Person> personBySsn;
    +    SecondaryIndex<String,String,Person> personByParentSsn;
    +    SecondaryIndex<String,String,Person> personByEmailAddresses;
    +    SecondaryIndex<Long,String,Person> personByEmployerIds;
    +
    +    // Employer accessors
    +    //
    +    PrimaryIndex<Long,Employer> employerById;
    +    SecondaryIndex<String,Long,Employer> employerByName;
    +
    +    // Opens all primary and secondary indices.
    +    //
    +    public PersonAccessor(EntityStore store)
    +        throws DatabaseException {
    +
    +        personBySsn = store.getPrimaryIndex(
    +            String.class, Person.class);
    +
    +        personByParentSsn = store.getSecondaryIndex(
    +            personBySsn, String.class, "parentSsn");
    +
    +        personByEmailAddresses = store.getSecondaryIndex(
    +            personBySsn, String.class, "emailAddresses");
    +
    +        personByEmployerIds = store.getSecondaryIndex(
    +            personBySsn, Long.class, "employerIds");
    +
    +        employerById = store.getPrimaryIndex(
    +            Long.class, Employer.class);
    +
    +        employerByName = store.getSecondaryIndex(
    +            employerById, String.class, "name"); 
    +    }
    +}
    +
    +// Open a transactional Berkeley DB engine environment.
    +//
    +EnvironmentConfig envConfig = new EnvironmentConfig();
    +envConfig.setAllowCreate(true);
    +envConfig.setTransactional(true);
    +Environment env = new Environment(new File("/my/data"), envConfig);
    +
    +// Open a transactional entity store.
    +//
    +StoreConfig storeConfig = new StoreConfig();
    +storeConfig.setAllowCreate(true);
    +storeConfig.setTransactional(true);
    +EntityStore store = new EntityStore(env, "PersonStore", storeConfig);
    +
    +// Initialize the data access object.
    +//
    +PersonAccessor dao = new PersonAccessor(store);
    +
    +// Add a parent and two children using the Person primary index.  Specifying a
    +// non-null parentSsn adds the child Person to the sub-index of children for
    +// that parent key.
    +//
    +dao.personBySsn.put(new Person("Bob Smith", "111-11-1111", null));
    +dao.personBySsn.put(new Person("Mary Smith", "333-33-3333", "111-11-1111"));
    +dao.personBySsn.put(new Person("Jack Smith", "222-22-2222", "111-11-1111"));
    +
    +// Print the children of a parent using a sub-index and a cursor.
    +//
    +EntityCursor<Person> children =
    +    dao.personByParentSsn.subIndex("111-11-1111").entities();
    +try {
    +    for (Person child : children) {
    +        System.out.println(child.ssn + ' ' + child.name);
    +    }
    +} finally {
    +    children.close();
    +}
    +
    +// Get Bob by primary key using the primary index.
    +//
    +Person bob = dao.personBySsn.get("111-11-1111");
    +assert bob != null;
    +
    +// Create two employers.  Their primary keys are assigned from a sequence.
    +//
    +Employer gizmoInc = new Employer("Gizmo Inc");
    +Employer gadgetInc = new Employer("Gadget Inc");
    +dao.employerById.put(gizmoInc);
    +dao.employerById.put(gadgetInc);
    +
    +// Bob has two jobs and two email addresses.
    +//
    +bob.employerIds.add(gizmoInc.id);
    +bob.employerIds.add(gadgetInc.id);
    +bob.emailAddresses.add("bob@bob.com");
    +bob.emailAddresses.add("bob@gmail.com");
    +
    +// Update Bob's record.
    +//
    +dao.personBySsn.put(bob);
    +
    +// Bob can now be found by both email addresses.
    +//
    +bob = dao.personByEmailAddresses.get("bob@bob.com");
    +assert bob != null;
    +bob = dao.personByEmailAddresses.get("bob@gmail.com");
    +assert bob != null;
    +
    +// Bob can also be found as an employee of both employers.
    +//
    +EntityIndex<String,Person> employees;
    +employees = dao.personByEmployerIds.subIndex(gizmoInc.id);
    +assert employees.contains("111-11-1111");
    +employees = dao.personByEmployerIds.subIndex(gadgetInc.id);
    +assert employees.contains("111-11-1111");
    +
    +// When an employer is deleted, the onRelatedEntityDelete=NULLIFY for the
    +// employerIds key causes the deleted ID to be removed from Bob's employerIds.
    +//
    +dao.employerById.delete(gizmoInc.id);
    +bob = dao.personBySsn.get("111-11-1111");
    +assert !bob.employerIds.contains(gizmoInc.id);
    +
    +store.close();
    +env.close();
    +
    +

    The example illustrates several characteristics of the DPL:

    +
      +
    • Persistent data and keys are defined in terms of instance fields. For +brevity the example does not show getter and setter methods, although these +would normally exist to provide encapsulation. The DPL accesses fields during +object serialization and deserialization, rather than calling getter/setter +methods, leaving business methods free to enforce arbitrary validation rules. +For example: +
      +@Persistent
      +public class ConstrainedValue {
      +
      +    private int min;
      +    private int max;
      +    private int value;
      +
      +    private ConstrainedValue() {} // For bindings
      +
      +    public ConstrainedValue(int min, int max) {
      +        this.min = min;
      +        this.max = max;
      +        value = min;
      +    }
      +
      +    public setValue(int value) {
      +        if (value < min || value > max) {
      +            throw new IllegalArgumentException("out of range");
      +        }
      +        this.value = value;
      +    }
      +}
      +
      +The above setValue method would not work if it were called during +object deserialization, since the order of setting fields is arbitrary. The +min and max fields may not be set before the value is +set. +
    • +
      +
    • The example creates a transactional store and therefore all operations are +transaction protected. Because no explicit transactions are used, auto-commit +is used implicitly. + +

      Explicit transactions may also be used to group multiple operations in a +single transaction, and all access methods have optional transaction +parameters. For example, the following two operations are performed atomically +in a transaction: +

      +Transaction txn = env.beginTransaction(null, null);
      +dao.employerById.put(txn, gizmoInc);
      +dao.employerById.put(txn, gadgetInc);
      +txn.commit();
      +
      +
    • +
    • To provide maximum performance, the DPL operations map directly to the +Btree operations of the Berkeley DB engine. Unlike other persistence +approaches, keys and indices are exposed for direct access and performance +tuning. +

      Queries are implemented by calling methods of the primary and secondary +indices. An EntityJoin class is also +available for performing equality joins. For example, the following code +queries all of Bob's children that work for Gizmo Inc: +

      +EntityJoin<String,Person> join = new EntityJoin(dao.personBySsn);
      +
      +join.addCondition(dao.personByParentSsn, "111-11-1111");
      +join.addCondition(dao.personByEmployerIds, gizmoInc.id);
      +
      +ForwardCursor<Person> results = join.entities();
      +try {
      +    for (Person person : results) {
      +        System.out.println(person.ssn + ' ' + person.name);
      +    }
      +} finally {
      +    results.close();
      +}
      +
    • +
    • Object relationships are based on keys. When a Person with a given +employer ID in its employerIds set is stored, the Person object +becomes part of the collection of employees for that employer. This collection +of employees is accessed using a SecondaryIndex.subIndex for the +employer ID, as shown below: +
      +EntityCursor<Person> employees =
      +    dao.personByEmployerIds.subIndex(gizmoInc.id).entities();
      +try {
      +    for (Person employee : employees) {
      +        System.out.println(employee.ssn + ' ' + employee.name);
      +    }
      +} finally {
      +    employees.close();
      +}
      +
    • +
    • Note that when Bob's employer is deleted in the example, the Person +object for Bob is refetched to see the change to its employerIds. This +is because objects are accessed by value, not by reference. In other words, no +object cache or "persistence context" is maintained by the DPL. The low level +caching of the embedded Berkeley DB engine, combined with lightweight object +bindings, provides maximum performance.
    • +
    + +

    Which API to use?

    + +

    The Berkeley DB engine has a Base API, a Collections API and a Direct Persistence Layer (DPL). Follow these guidelines if you are not sure +which API to use:

    +
      +
    • When Java classes are used to represent domain objects in an application, +the DPL is recommended. The more domain classes, the more value there is in +using annotations to define your schema.
    • +
      +
    • When porting an application between Berkeley DB and Berkeley DB Java +Edition, or when you've chosen not to use Java classes to represent domain +objects, then the Base API is recommended. You may also prefer to use this API +if you have very few domain classes.
    • +
      +
    • The Collections API is useful for interoperating with external components +because it conforms to the standard Java Collections Framework. It is +therefore useful in combination with both the Base API and the DPL. You may +prefer this API because it provides the familiar Java Collections +interface.
    • +
    + +

    Java 1.5 dependencies

    + +

    The DPL uses two features of Java 1.5: generic types and annotations. If +you wish to avoid using these two Java 1.5 features, the DPL provides options +for doing so.

    + +

    Generic Types

    + +

    Generic types are used to provide type safety, especially for the PrimaryIndex, SecondaryIndex, and EntityCursor classes. If you don't wish to +use generic types, you can simply not declare your index and cursor objects +using generic type parameters. This is the same as using the Java 1.5 +Collections Framework without using generic types.

    + +

    Annotations

    + +

    If you don't wish to use annotations, you can provide another source of +metadata by implementing an EntityModel class. For example, naming conventions, static members, or an XML +configuration file might be used as a source of metadata. However, if you +don't use annotations then you won't be able to use bytecode enhancement, which +is described next.

    + +

    Bytecode Enhancement

    + +

    The persistent fields of a class may be private, package-private, protected +or public. The DPL can access persistent fields either by bytecode enhancement +or by reflection.

    + +

    Bytecode enhancement may be used to fully optimize binding performance and +to avoid the use of Java reflection. In applications that are CPU bound, +avoiding Java reflection can have a significant performance impact.

    + +

    Bytecode enhancement may be performed either at runtime or at build time +(offline). When enhancement is performed at runtime, persistent classes are +enhanced as they are loaded. When enhancement is performed offline, class +files are enhanced during a post-compilation step. + +Both a main program and an Ant task are provided for performing offline +enhancement. + +Enhanced classes are used to efficiently access all fields and default +constructors, including non-public members.

    + +

    See ClassEnhancer for +bytecode enhancement configuration details.

    + +

    If bytecode enhancement is not used as described above, the DPL will use +reflection for accessing persistent fields and the default constructor. The +AccessibleObject.setAccessible method is called by the DPL to enable access to +non-public fields and constructors. If you are running under a Java security +manager you must configure your security policy to allow the following +permission:

    + +

    permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + +

    There are three cases where setting the above permission is not +required:

    +
      +
    1. If you are not running under a Java Security Manager, then access to +non-public members via reflection is not restricted. This is the default for +J2SE.
    2. +
      +
    3. If all persistent fields and default constructors are public then +they can be accessed via reflection without special permissions, even when +running under a Java Security Manager. However, declaring public +instance fields is not recommended because it discourages encapsulation.
    4. +
      +
    5. If bytecode enhancement is used as described above, then reflection will +not be used.
    6. +
    + +

    It is well known that executing generated code is faster than reflection. +However, this performance difference may or may not impact a given application +since it may be overshadowed by other factors. Performance testing in a +realistic usage scenario is the best way to determine the impact. If you are +determined to avoid the use of reflection then option 3 above is +recommended.

    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/package-tree.html b/docs/java/com/sleepycat/persist/package-tree.html new file mode 100644 index 0000000..ae66e03 --- /dev/null +++ b/docs/java/com/sleepycat/persist/package-tree.html @@ -0,0 +1,205 @@ + + + + + +com.sleepycat.persist Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.persist

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    +
      +
    • java.lang.AutoCloseable +
        +
      • java.io.Closeable + +
      • +
      +
    • +
    • com.sleepycat.persist.EntityIndex<K,V>
    • +
    • java.lang.Iterable<T> + +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/package-use.html b/docs/java/com/sleepycat/persist/package-use.html new file mode 100644 index 0000000..7bdb7b9 --- /dev/null +++ b/docs/java/com/sleepycat/persist/package-use.html @@ -0,0 +1,251 @@ + + + + + +Uses of Package com.sleepycat.persist (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.persist

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/RawField.html b/docs/java/com/sleepycat/persist/raw/RawField.html new file mode 100644 index 0000000..dfe216c --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/RawField.html @@ -0,0 +1,263 @@ + + + + + +RawField (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.raw
    +

    Interface RawField

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface RawField
      +
      The definition of a field in a RawType. + +

      RawField objects are thread-safe. Multiple threads may safely + call the methods of a shared RawField object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        java.lang.StringgetName() +
        Returns the name of the field.
        +
        RawTypegetType() +
        Returns the type of the field, without expanding parameterized types, + or null if the type is an interface type or the Object class.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getName

          +
          java.lang.String getName()
          +
          Returns the name of the field.
          +
          +
          Returns:
          +
          the name of the field.
          +
          +
        • +
        + + + +
          +
        • +

          getType

          +
          RawType getType()
          +
          Returns the type of the field, without expanding parameterized types, + or null if the type is an interface type or the Object class.
          +
          +
          Returns:
          +
          the type of the field.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/RawObject.html b/docs/java/com/sleepycat/persist/raw/RawObject.html new file mode 100644 index 0000000..1073a2a --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/RawObject.html @@ -0,0 +1,525 @@ + + + + + +RawObject (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.raw
    +

    Class RawObject

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.raw.RawObject
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class RawObject
      +extends java.lang.Object
      +
      A raw instance that can be used with a RawStore or Conversion. A RawObject is used to represent instances of + complex types (persistent classes with fields), arrays, and enum values. It + is not used to represent non-enum simple types, which are represented as + simple objects. This includes primitives, which are represented as + instances of their wrapper class. + +

      RawObject objects are thread-safe. Multiple threads may safely + call the methods of a shared RawObject object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        RawObject(RawType type, + java.util.Map<java.lang.String,java.lang.Object> values, + RawObject superObject) +
        Creates a raw object with a given set of field values for a complex + type.
        +
        RawObject(RawType type, + java.lang.Object[] elements) +
        Creates a raw object with the given array elements for an array type.
        +
        RawObject(RawType type, + java.lang.String enumConstant) +
        Creates a raw object with the given enum value for an enum type.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        booleanequals(java.lang.Object other) 
        java.lang.Object[]getElements() +
        Returns the array of elements for an array type, or null for a complex + type or an enum type.
        +
        java.lang.StringgetEnum() +
        Returns the enum constant String for an enum type, or null for a complex + type or an array type.
        +
        RawObjectgetSuper() +
        Returns the instance of the superclass, or null if the superclass is + Object or Enum.
        +
        RawTypegetType() +
        Returns the raw type information for this raw object.
        +
        java.util.Map<java.lang.String,java.lang.Object>getValues() +
        Returns a map of field name to value for a complex type, or null for an + array type or an enum type.
        +
        inthashCode() 
        java.lang.StringtoString() +
        Returns an XML representation of the raw object.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, finalize, getClass, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          RawObject

          +
          public RawObject(RawType type,
          +                 java.util.Map<java.lang.String,java.lang.Object> values,
          +                 RawObject superObject)
          +
          Creates a raw object with a given set of field values for a complex + type.
          +
          +
          Parameters:
          +
          type - the type of this raw object.
          +
          values - a map of field name to value for each declared field in + the class, or null to create an empty map. Each value in the map is a + RawObject, a simple + type instance, or null.
          +
          superObject - the instance of the superclass, or null if the + superclass is Object.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the type argument is an array type.
          +
          +
        • +
        + + + +
          +
        • +

          RawObject

          +
          public RawObject(RawType type,
          +                 java.lang.Object[] elements)
          +
          Creates a raw object with the given array elements for an array type.
          +
          +
          Parameters:
          +
          type - the type of this raw object.
          +
          elements - an array of elements. Each element in the array is a + RawObject, a simple + type instance, or null.
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the type argument is not an array + type.
          +
          +
        • +
        + + + +
          +
        • +

          RawObject

          +
          public RawObject(RawType type,
          +                 java.lang.String enumConstant)
          +
          Creates a raw object with the given enum value for an enum type.
          +
          +
          Parameters:
          +
          type - the type of this raw object.
          +
          enumConstant - the String value of this enum constant; must be + one of the Strings returned by RawType.getEnumConstants().
          +
          Throws:
          +
          java.lang.IllegalArgumentException - if the type argument is not an array + type.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getType

          +
          public RawType getType()
          +
          Returns the raw type information for this raw object. + +

          Note that if this object is unevolved, the returned type may be + different from the current type returned by EntityModel.getRawType for the same class name. + This can only occur in a Conversion.convert.

          +
          +
          Returns:
          +
          the RawType.
          +
          +
        • +
        + + + +
          +
        • +

          getValues

          +
          public java.util.Map<java.lang.String,java.lang.Object> getValues()
          +
          Returns a map of field name to value for a complex type, or null for an + array type or an enum type. The map contains a String key for each + declared field in the class. Each value in the map is a RawObject, a simple + type instance, or null. + +

          There will be an entry in the map for every field declared in this + type, as determined by RawType.getFields() for the type returned + by getType(). Values in the map may be null for fields with + non-primitive types.

          +
          +
          Returns:
          +
          the map of field name to value, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getElements

          +
          public java.lang.Object[] getElements()
          +
          Returns the array of elements for an array type, or null for a complex + type or an enum type. Each element in the array is a RawObject, + a simple type instance, + or null.
          +
          +
          Returns:
          +
          the array of elements, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getEnum

          +
          public java.lang.String getEnum()
          +
          Returns the enum constant String for an enum type, or null for a complex + type or an array type. The String returned will be one of the Strings + returned by RawType.getEnumConstants().
          +
          +
          Returns:
          +
          the enum constant String, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getSuper

          +
          public RawObject getSuper()
          +
          Returns the instance of the superclass, or null if the superclass is + Object or Enum.
          +
          +
          Returns:
          +
          the instance of the superclass, or null.
          +
          +
        • +
        + + + +
          +
        • +

          equals

          +
          public boolean equals(java.lang.Object other)
          +
          +
          Overrides:
          +
          equals in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          hashCode

          +
          public int hashCode()
          +
          +
          Overrides:
          +
          hashCode in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          Returns an XML representation of the raw object.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/RawStore.html b/docs/java/com/sleepycat/persist/raw/RawStore.html new file mode 100644 index 0000000..e53c38d --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/RawStore.html @@ -0,0 +1,502 @@ + + + + + +RawStore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.raw
    +

    Class RawStore

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.persist.raw.RawStore
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      +
      public class RawStore
      +extends java.lang.Object
      +implements java.io.Closeable
      +
      Provides access to the raw data in a store for use by general purpose tools. + A RawStore provides access to stored entities without using + entity classes or key classes. Keys are represented as simple type objects + or, for composite keys, as RawObject instances, and entities are + represented as RawObject instances. + +

      RawStore objects are thread-safe. Multiple threads may safely + call the methods of a shared RawStore object.

      + +

      When using a RawStore, the current persistent class definitions + are not used. Instead, the previously stored metadata and class definitions + are used. This has several implications:

      +
        +
      1. An EntityModel may not be specified using StoreConfig.setModel(com.sleepycat.persist.model.EntityModel). In other words, the configured model must be + null (the default).
      2. +
      3. When storing entities, their format will not automatically be evolved + to the current class definition, even if the current class definition has + changed.
      4. +
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        RawStore(Environment env, + java.lang.String storeName, + StoreConfig config) +
        Opens an entity store for raw data access.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidclose() +
        Closes all databases and sequences that were opened by this model.
        +
        StoreConfiggetConfig() +
        Returns a copy of the entity store configuration.
        +
        EnvironmentgetEnvironment() +
        Returns the environment associated with this store.
        +
        EntityModelgetModel() +
        Returns the last configured and stored entity model for this store.
        +
        MutationsgetMutations() +
        Returns the set of mutations that were configured and stored previously.
        +
        PrimaryIndex<java.lang.Object,RawObject>getPrimaryIndex(java.lang.String entityClass) +
        Opens the primary index for a given entity class.
        +
        SecondaryIndex<java.lang.Object,java.lang.Object,RawObject>getSecondaryIndex(java.lang.String entityClass, + java.lang.String keyName) +
        Opens the secondary index for a given entity class and secondary key + name.
        +
        java.lang.StringgetStoreName() +
        Returns the name of this store.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          RawStore

          +
          public RawStore(Environment env,
          +                java.lang.String storeName,
          +                StoreConfig config)
          +         throws StoreNotFoundException,
          +                DatabaseException
          +
          Opens an entity store for raw data access.
          +
          +
          Parameters:
          +
          env - an open Berkeley DB environment.
          +
          storeName - the name of the entity store within the given + environment.
          +
          config - the store configuration, or null to use default + configuration properties.
          +
          Throws:
          +
          StoreNotFoundException - when the AllowCreate configuration parameter is false + and the store's internal catalog database does not exist.
          +
          java.lang.IllegalArgumentException - if the Environment is + read-only and the config ReadOnly property is false.
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getPrimaryIndex

          +
          public PrimaryIndex<java.lang.Object,RawObject> getPrimaryIndex(java.lang.String entityClass)
          +                                                         throws DatabaseException
          +
          Opens the primary index for a given entity class.
          +
          +
          Parameters:
          +
          entityClass - the name of the entity class.
          +
          Returns:
          +
          the PrimaryIndex.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getSecondaryIndex

          +
          public SecondaryIndex<java.lang.Object,java.lang.Object,RawObject> getSecondaryIndex(java.lang.String entityClass,
          +                                                                                     java.lang.String keyName)
          +                                                                              throws DatabaseException
          +
          Opens the secondary index for a given entity class and secondary key + name.
          +
          +
          Parameters:
          +
          entityClass - the name of the entity class.
          +
          keyName - the secondary key name.
          +
          Returns:
          +
          the SecondaryIndex.
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        + + + +
          +
        • +

          getEnvironment

          +
          public Environment getEnvironment()
          +
          Returns the environment associated with this store.
          +
          +
          Returns:
          +
          the Environment.
          +
          +
        • +
        + + + +
          +
        • +

          getConfig

          +
          public StoreConfig getConfig()
          +
          Returns a copy of the entity store configuration.
          +
          +
          Returns:
          +
          the StoreConfig.
          +
          +
        • +
        + + + +
          +
        • +

          getStoreName

          +
          public java.lang.String getStoreName()
          +
          Returns the name of this store.
          +
          +
          Returns:
          +
          the store name.
          +
          +
        • +
        + + + +
          +
        • +

          getModel

          +
          public EntityModel getModel()
          +
          Returns the last configured and stored entity model for this store.
          +
          +
          Returns:
          +
          the EntityModel.
          +
          +
        • +
        + + + +
          +
        • +

          getMutations

          +
          public Mutations getMutations()
          +
          Returns the set of mutations that were configured and stored previously.
          +
          +
          Returns:
          +
          the Mutations.
          +
          +
        • +
        + + + +
          +
        • +

          close

          +
          public void close()
          +           throws DatabaseException
          +
          Closes all databases and sequences that were opened by this model. No + databases opened via this store may be in use. + +

          WARNING: To guard against memory leaks, the application should + discard all references to the closed handle. While BDB makes an effort + to discard references from closed objects to the allocated memory for an + environment, this behavior is not guaranteed. The safe course of action + for an application is to discard all references to closed BDB + objects.

          +
          +
          Specified by:
          +
          close in interface java.io.Closeable
          +
          Specified by:
          +
          close in interface java.lang.AutoCloseable
          +
          Throws:
          +
          DatabaseException - the base class for all BDB exceptions.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/RawType.html b/docs/java/com/sleepycat/persist/raw/RawType.html new file mode 100644 index 0000000..2e9dd01 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/RawType.html @@ -0,0 +1,606 @@ + + + + + +RawType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.persist.raw
    +

    Interface RawType

    +
    +
    +
    +
      +
    • +
      +
      +
      public interface RawType
      +
      The type definition for a simple or complex persistent type, or an array + of persistent types. + +

      RawType objects are thread-safe. Multiple threads may safely + call the methods of a shared RawType object.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Abstract Methods 
        Modifier and TypeMethod and Description
        ClassMetadatagetClassMetadata() +
        Returns the original model class metadata used to create this class, or + null if this is not a model class.
        +
        java.lang.StringgetClassName() +
        Returns the class name for this type in the format specified by Class.getName().
        +
        RawTypegetComponentType() +
        Returns the array component type, or null if this is not an array type.
        +
        intgetDimensions() +
        Returns the number of array dimensions, or zero if this is not an array + type.
        +
        EntityMetadatagetEntityMetadata() +
        Returns the original model entity metadata used to create this class, or + null if this is not an entity class.
        +
        java.util.List<java.lang.String>getEnumConstants() +
        Returns an unmodifiable list of the names of the enum instances, or null + if this is not an enum type.
        +
        java.util.Map<java.lang.String,RawField>getFields() +
        Returns a map of field name to raw field for each non-static + non-transient field declared in this class, or null if this is not a + complex type (in other words, this is a simple type or an array type).
        +
        intgetId() +
        Returns the internal unique ID for this type.
        +
        RawTypegetSuperType() +
        Returns the type of the superclass, or null if the superclass is Object + or this is not a complex type (in other words, this is a simple type or + an array type).
        +
        intgetVersion() +
        Returns the class version for this type.
        +
        booleanisArray() +
        Returns whether this is an array type.
        +
        booleanisDeleted() +
        Returns whether this type has been deleted using a class Deleter + mutation.
        +
        booleanisEnum() +
        Returns whether this is an enum type.
        +
        booleanisPrimitive() +
        Returns whether this type is a Java primitive: char, byte, short, int, + long, float or double.
        +
        booleanisSimple() +
        Returns whether this is a + simple type: + primitive, primitive wrapper, BigInteger, BigDecimal, String or Date.
        +
        java.lang.StringtoString() +
        Returns an XML representation of the raw type.
        +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getClassName

          +
          java.lang.String getClassName()
          +
          Returns the class name for this type in the format specified by Class.getName(). + +

          If this class currently exists (has not been removed or renamed) then + the class name may be passed to Class.forName(java.lang.String) to get the current + Class object. However, if this raw type is not the current + version of the class, this type information may differ from that of the + current Class.

          +
          +
          Returns:
          +
          the class name.
          +
          +
        • +
        + + + +
          +
        • +

          getVersion

          +
          int getVersion()
          +
          Returns the class version for this type. For simple types, zero is + always returned.
          +
          +
          Returns:
          +
          the version.
          +
          See Also:
          +
          Entity.version(), +Persistent.version()
          +
          +
        • +
        + + + +
          +
        • +

          getId

          +
          int getId()
          +
          Returns the internal unique ID for this type.
          +
          +
          Returns:
          +
          the ID.
          +
          +
        • +
        + + + +
          +
        • +

          isSimple

          +
          boolean isSimple()
          +
          Returns whether this is a + simple type: + primitive, primitive wrapper, BigInteger, BigDecimal, String or Date. + +

          If true is returned, isPrimitive() can be called for more + information, and a raw value of this type is represented as a simple + type object (not as a RawObject).

          + +

          If false is returned, this is a complex type, an array type (see + isArray()), or an enum type, and a raw value of this type is + represented as a RawObject.

          +
          +
          Returns:
          +
          whether this is a simple type.
          +
          +
        • +
        + + + +
          +
        • +

          isPrimitive

          +
          boolean isPrimitive()
          +
          Returns whether this type is a Java primitive: char, byte, short, int, + long, float or double. + +

          If true is returned, this is also a simple type. In other words, + primitive types are a subset of simple types.

          + +

          If true is returned, a raw value of this type is represented as a + non-null instance of the primitive type's wrapper class. For example, + an int raw value is represented as an + Integer.

          +
          +
          Returns:
          +
          whether this is a Java primitive.
          +
          +
        • +
        + + + +
          +
        • +

          isEnum

          +
          boolean isEnum()
          +
          Returns whether this is an enum type. + +

          If true is returned, a value of this type is a RawObject and + the enum constant String is available via RawObject.getEnum().

          + +

          If false is returned, then this is a complex type, an array type (see + isArray()), or a simple type (see isSimple()).

          +
          +
          Returns:
          +
          whether this is a enum type.
          +
          +
        • +
        + + + +
          +
        • +

          getEnumConstants

          +
          java.util.List<java.lang.String> getEnumConstants()
          +
          Returns an unmodifiable list of the names of the enum instances, or null + if this is not an enum type.
          +
          +
          Returns:
          +
          the list of enum names.
          +
          +
        • +
        + + + +
          +
        • +

          isArray

          +
          boolean isArray()
          +
          Returns whether this is an array type. Raw value arrays are represented + as RawObject instances. + +

          If true is returned, the array component type is returned by getComponentType() and the number of array dimensions is returned by + getDimensions().

          + +

          If false is returned, then this is a complex type, an enum type (see + isEnum()), or a simple type (see isSimple()).

          +
          +
          Returns:
          +
          whether this is an array type.
          +
          +
        • +
        + + + +
          +
        • +

          getDimensions

          +
          int getDimensions()
          +
          Returns the number of array dimensions, or zero if this is not an array + type.
          +
          +
          Returns:
          +
          the number of array dimensions, or zero if this is not an array + type.
          +
          +
        • +
        + + + +
          +
        • +

          getComponentType

          +
          RawType getComponentType()
          +
          Returns the array component type, or null if this is not an array type.
          +
          +
          Returns:
          +
          the array component type, or null if this is not an array type.
          +
          +
        • +
        + + + +
          +
        • +

          getFields

          +
          java.util.Map<java.lang.String,RawField> getFields()
          +
          Returns a map of field name to raw field for each non-static + non-transient field declared in this class, or null if this is not a + complex type (in other words, this is a simple type or an array type).
          +
          +
          Returns:
          +
          a map of field name to raw field, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getSuperType

          +
          RawType getSuperType()
          +
          Returns the type of the superclass, or null if the superclass is Object + or this is not a complex type (in other words, this is a simple type or + an array type).
          +
          +
          Returns:
          +
          the type of the superclass, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getClassMetadata

          +
          ClassMetadata getClassMetadata()
          +
          Returns the original model class metadata used to create this class, or + null if this is not a model class.
          +
          +
          Returns:
          +
          the model class metadata, or null.
          +
          +
        • +
        + + + +
          +
        • +

          getEntityMetadata

          +
          EntityMetadata getEntityMetadata()
          +
          Returns the original model entity metadata used to create this class, or + null if this is not an entity class.
          +
          +
          Returns:
          +
          the model entity metadata, or null.
          +
          +
        • +
        + + + + + + + +
          +
        • +

          toString

          +
          java.lang.String toString()
          +
          Returns an XML representation of the raw type.
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/class-use/RawField.html b/docs/java/com/sleepycat/persist/raw/class-use/RawField.html new file mode 100644 index 0000000..bc336f2 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/class-use/RawField.html @@ -0,0 +1,175 @@ + + + + + +Uses of Interface com.sleepycat.persist.raw.RawField (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.raw.RawField

    +
    +
    +
      +
    • + + + + + + + + + + + + +
      Packages that use RawField 
      PackageDescription
      com.sleepycat.persist.raw +
      Raw data access for general purpose tools and manual conversions.
      +
      +
    • +
    • +
        +
      • + + +

        Uses of RawField in com.sleepycat.persist.raw

        + + + + + + + + + + + + +
        Methods in com.sleepycat.persist.raw that return types with arguments of type RawField 
        Modifier and TypeMethod and Description
        java.util.Map<java.lang.String,RawField>RawType.getFields() +
        Returns a map of field name to raw field for each non-static + non-transient field declared in this class, or null if this is not a + complex type (in other words, this is a simple type or an array type).
        +
        +
      • +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/class-use/RawObject.html b/docs/java/com/sleepycat/persist/raw/class-use/RawObject.html new file mode 100644 index 0000000..6733e76 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/class-use/RawObject.html @@ -0,0 +1,240 @@ + + + + + +Uses of Class com.sleepycat.persist.raw.RawObject (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.raw.RawObject

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/class-use/RawStore.html b/docs/java/com/sleepycat/persist/raw/class-use/RawStore.html new file mode 100644 index 0000000..1c5a390 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/class-use/RawStore.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.persist.raw.RawStore (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.persist.raw.RawStore

    +
    +
    No usage of com.sleepycat.persist.raw.RawStore
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/class-use/RawType.html b/docs/java/com/sleepycat/persist/raw/class-use/RawType.html new file mode 100644 index 0000000..b0b3ea6 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/class-use/RawType.html @@ -0,0 +1,279 @@ + + + + + +Uses of Interface com.sleepycat.persist.raw.RawType (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.persist.raw.RawType

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + +
      Packages that use RawType 
      PackageDescription
      com.sleepycat.persist.model +
      Annotations for defining a persistent object model.
      +
      com.sleepycat.persist.raw +
      Raw data access for general purpose tools and manual conversions.
      +
      +
    • +
    • +
        +
      • + + +

        Uses of RawType in com.sleepycat.persist.model

        + + + + + + + + + + + + + + + + +
        Methods in com.sleepycat.persist.model that return RawType 
        Modifier and TypeMethod and Description
        RawTypeEntityModel.getRawType(java.lang.String className) +
        Returns the type information for the current version of a given class, + or null if the class is not currently persistent.
        +
        RawTypeEntityModel.getRawTypeVersion(java.lang.String className, + int version) +
        Returns the type information for a given version of a given class, + or null if the given version of the class is unknown.
        +
        + + + + + + + + + + + + + + + + +
        Methods in com.sleepycat.persist.model that return types with arguments of type RawType 
        Modifier and TypeMethod and Description
        java.util.List<RawType>EntityModel.getAllRawTypes() +
        Returns all versions of all known types.
        +
        java.util.List<RawType>EntityModel.getAllRawTypeVersions(java.lang.String className) +
        Returns all known versions of type information for a given class name, + or null if no persistent version of the class is known.
        +
        +
      • +
      • + + +

        Uses of RawType in com.sleepycat.persist.raw

        + + + + + + + + + + + + + + + + + + + + + + + + +
        Methods in com.sleepycat.persist.raw that return RawType 
        Modifier and TypeMethod and Description
        RawTypeRawType.getComponentType() +
        Returns the array component type, or null if this is not an array type.
        +
        RawTypeRawType.getSuperType() +
        Returns the type of the superclass, or null if the superclass is Object + or this is not a complex type (in other words, this is a simple type or + an array type).
        +
        RawTypeRawObject.getType() +
        Returns the raw type information for this raw object.
        +
        RawTypeRawField.getType() +
        Returns the type of the field, without expanding parameterized types, + or null if the type is an interface type or the Object class.
        +
        + + + + + + + + + + + + + + + + +
        Constructors in com.sleepycat.persist.raw with parameters of type RawType 
        Constructor and Description
        RawObject(RawType type, + java.util.Map<java.lang.String,java.lang.Object> values, + RawObject superObject) +
        Creates a raw object with a given set of field values for a complex + type.
        +
        RawObject(RawType type, + java.lang.Object[] elements) +
        Creates a raw object with the given array elements for an array type.
        +
        RawObject(RawType type, + java.lang.String enumConstant) +
        Creates a raw object with the given enum value for an enum type.
        +
        +
      • +
      +
    • +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/package-frame.html b/docs/java/com/sleepycat/persist/raw/package-frame.html new file mode 100644 index 0000000..f066167 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/package-frame.html @@ -0,0 +1,26 @@ + + + + + +com.sleepycat.persist.raw (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.persist.raw

    +
    +

    Interfaces

    + +

    Classes

    + +
    + + diff --git a/docs/java/com/sleepycat/persist/raw/package-summary.html b/docs/java/com/sleepycat/persist/raw/package-summary.html new file mode 100644 index 0000000..cfb080d --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/package-summary.html @@ -0,0 +1,188 @@ + + + + + +com.sleepycat.persist.raw (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.persist.raw

    +
    +
    Raw data access for general purpose tools and manual conversions.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      RawField +
      The definition of a field in a RawType.
      +
      RawType +
      The type definition for a simple or complex persistent type, or an array + of persistent types.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      RawObject +
      A raw instance that can be used with a RawStore or Conversion.
      +
      RawStore +
      Provides access to the raw data in a store for use by general purpose tools.
      +
      +
    • +
    + + + +

    Package com.sleepycat.persist.raw Description

    +
    Raw data access for general purpose tools and manual conversions.
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/package-tree.html b/docs/java/com/sleepycat/persist/raw/package-tree.html new file mode 100644 index 0000000..0c77c92 --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/package-tree.html @@ -0,0 +1,148 @@ + + + + + +com.sleepycat.persist.raw Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.persist.raw

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    +
      +
    • java.lang.Object +
        +
      • com.sleepycat.persist.raw.RawObject
      • +
      • com.sleepycat.persist.raw.RawStore (implements java.io.Closeable)
      • +
      +
    • +
    +

    Interface Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/persist/raw/package-use.html b/docs/java/com/sleepycat/persist/raw/package-use.html new file mode 100644 index 0000000..f7c74be --- /dev/null +++ b/docs/java/com/sleepycat/persist/raw/package-use.html @@ -0,0 +1,206 @@ + + + + + +Uses of Package com.sleepycat.persist.raw (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.persist.raw

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/ClassResolver.Stream.html b/docs/java/com/sleepycat/util/ClassResolver.Stream.html new file mode 100644 index 0000000..927ef16 --- /dev/null +++ b/docs/java/com/sleepycat/util/ClassResolver.Stream.html @@ -0,0 +1,371 @@ + + + + + +ClassResolver.Stream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class ClassResolver.Stream

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.io.InputStream
      • +
      • +
          +
        • java.io.ObjectInputStream
        • +
        • +
            +
          • com.sleepycat.util.ClassResolver.Stream
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.io.DataInput, java.io.ObjectInput, java.io.ObjectStreamConstants, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      SerialInput
      +
      +
      +
      Enclosing class:
      +
      ClassResolver
      +
      +
      +
      +
      public static class ClassResolver.Stream
      +extends java.io.ObjectInputStream
      +
      A specialized ObjectInputStream that supports use of a user-specified + ClassLoader. + + If the loader param and thread-context loader are both null, of if they + throw ClassNotFoundException, then ObjectInputStream.resolveClass is + called, which has its own special rules for class loading.
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        +
          +
        • + + +

          Nested classes/interfaces inherited from class java.io.ObjectInputStream

          +java.io.ObjectInputStream.GetField
        • +
        +
      • +
      + +
        +
      • + + +

        Field Summary

        +
          +
        • + + +

          Fields inherited from interface java.io.ObjectStreamConstants

          +baseWireHandle, PROTOCOL_VERSION_1, PROTOCOL_VERSION_2, SC_BLOCK_DATA, SC_ENUM, SC_EXTERNALIZABLE, SC_SERIALIZABLE, SC_WRITE_METHOD, STREAM_MAGIC, STREAM_VERSION, SUBCLASS_IMPLEMENTATION_PERMISSION, SUBSTITUTION_PERMISSION, TC_ARRAY, TC_BASE, TC_BLOCKDATA, TC_BLOCKDATALONG, TC_CLASS, TC_CLASSDESC, TC_ENDBLOCKDATA, TC_ENUM, TC_EXCEPTION, TC_LONGSTRING, TC_MAX, TC_NULL, TC_OBJECT, TC_PROXYCLASSDESC, TC_REFERENCE, TC_RESET, TC_STRING
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        Stream(java.io.InputStream in, + java.lang.ClassLoader classLoader) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        protected java.lang.ClassresolveClass(java.io.ObjectStreamClass desc) 
        +
          +
        • + + +

          Methods inherited from class java.io.ObjectInputStream

          +available, close, defaultReadObject, enableResolveObject, read, read, readBoolean, readByte, readChar, readClassDescriptor, readDouble, readFields, readFloat, readFully, readFully, readInt, readLine, readLong, readObject, readObjectOverride, readShort, readStreamHeader, readUnshared, readUnsignedByte, readUnsignedShort, readUTF, registerValidation, resolveObject, resolveProxyClass, skipBytes
        • +
        +
          +
        • + + +

          Methods inherited from class java.io.InputStream

          +mark, markSupported, read, reset, skip
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
          +
        • + + +

          Methods inherited from interface java.io.ObjectInput

          +read, skip
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          Stream

          +
          public Stream(java.io.InputStream in,
          +              java.lang.ClassLoader classLoader)
          +       throws java.io.IOException
          +
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          resolveClass

          +
          protected java.lang.Class resolveClass(java.io.ObjectStreamClass desc)
          +                                throws java.io.IOException,
          +                                       java.lang.ClassNotFoundException
          +
          +
          Overrides:
          +
          resolveClass in class java.io.ObjectInputStream
          +
          Throws:
          +
          java.io.IOException
          +
          java.lang.ClassNotFoundException
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/ClassResolver.html b/docs/java/com/sleepycat/util/ClassResolver.html new file mode 100644 index 0000000..9eab999 --- /dev/null +++ b/docs/java/com/sleepycat/util/ClassResolver.html @@ -0,0 +1,329 @@ + + + + + +ClassResolver (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class ClassResolver

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.util.ClassResolver
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class ClassResolver
      +extends java.lang.Object
      +
      Implements policies for loading user-supplied classes. The resolveClass(java.lang.String, java.lang.ClassLoader) method should be used to load all user-supplied classes, and + the ClassResolver.Stream class should be used as a replacement for + ObjectInputStream to deserialize instances of user-supplied classes. +

      + The ClassLoader specified as a param should be the one configured using + EnvironmentConfig.setClassLoader. This loader is used, if non-null. If the + loader param is null, but a non-null thread-context loader is available, the + latter is used. If the loader param and thread-context loader are both + null, or if they fail to load a class by throwing ClassNotFoundException, + then the default Java mechanisms for determining the class loader are used.

      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Nested Class Summary

        + + + + + + + + + + +
        Nested Classes 
        Modifier and TypeClass and Description
        static class ClassResolver.Stream +
        A specialized ObjectInputStream that supports use of a user-specified + ClassLoader.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ClassResolver() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static java.lang.ClassresolveClass(java.lang.String className, + java.lang.ClassLoader classLoader) +
        A specialized Class.forName method that supports use of a user-specified + ClassLoader.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ClassResolver

          +
          public ClassResolver()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          resolveClass

          +
          public static java.lang.Class resolveClass(java.lang.String className,
          +                                           java.lang.ClassLoader classLoader)
          +                                    throws java.lang.ClassNotFoundException
          +
          A specialized Class.forName method that supports use of a user-specified + ClassLoader. + + If the loader param and thread-context loader are both null, of if they + throw ClassNotFoundException, then Class.forName is called and the + "current loader" (the one used to load JE) will be used.
          +
          +
          Parameters:
          +
          className - the class name.
          +
          classLoader - the ClassLoader.
          +
          Returns:
          +
          the Class.
          +
          Throws:
          +
          java.lang.ClassNotFoundException - if the class is not found.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/ConfigBeanInfoBase.html b/docs/java/com/sleepycat/util/ConfigBeanInfoBase.html new file mode 100644 index 0000000..48fea6c --- /dev/null +++ b/docs/java/com/sleepycat/util/ConfigBeanInfoBase.html @@ -0,0 +1,570 @@ + + + + + +ConfigBeanInfoBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class ConfigBeanInfoBase

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.beans.SimpleBeanInfo
      • +
      • +
          +
        • com.sleepycat.util.ConfigBeanInfoBase
        • +
        +
      • +
      +
    • +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected static java.util.ArrayList<java.lang.String>getterAndSetterMethods 
        protected static java.util.ArrayList<java.lang.String>ignoreMethods 
        protected static java.util.ArrayList<java.lang.String>propertiesName 
        +
          +
        • + + +

          Fields inherited from interface java.beans.BeanInfo

          +ICON_COLOR_16x16, ICON_COLOR_32x32, ICON_MONO_16x16, ICON_MONO_32x32
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ConfigBeanInfoBase() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        protected static java.beans.BeanDescriptorgetBdescriptor(java.lang.Class cls) 
        java.beans.BeanDescriptorgetBeanDescriptor(java.lang.Class cls) +
        Gets the bean's BeanDescriptors.
        +
        intgetDefaultEventIndex() +
        A bean may have a "default" event that is the event that will + mostly commonly be used by human's when using the bean.
        +
        intgetDefaultPropertyIndex() +
        A bean may have a "default" property that is the property that will + mostly commonly be initially chosen for update by human's who are + customizing the bean.
        +
        java.beans.EventSetDescriptor[]getEventSetDescriptors() +
        Gets the bean's EventSetDescriptors.
        +
        java.awt.ImagegetIcon(int iconKind) +
        This method returns an image object that can be used to + represent the bean in toolboxes, toolbars, etc.
        +
        protected static java.beans.PropertyDescriptor[]getPdescriptor(java.lang.Class cls) 
        protected static voidgetPropertiesInfo(java.lang.Class cls) 
        java.beans.PropertyDescriptor[]getPropertyDescriptors(java.lang.Class cls) +
        Gets the bean's PropertyDescriptors.
        +
        +
          +
        • + + +

          Methods inherited from class java.beans.SimpleBeanInfo

          +getAdditionalBeanInfo, getBeanDescriptor, getMethodDescriptors, getPropertyDescriptors, loadImage
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          propertiesName

          +
          protected static java.util.ArrayList<java.lang.String> propertiesName
          +
        • +
        + + + +
          +
        • +

          getterAndSetterMethods

          +
          protected static java.util.ArrayList<java.lang.String> getterAndSetterMethods
          +
        • +
        + + + +
          +
        • +

          ignoreMethods

          +
          protected static java.util.ArrayList<java.lang.String> ignoreMethods
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ConfigBeanInfoBase

          +
          public ConfigBeanInfoBase()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getPropertiesInfo

          +
          protected static void getPropertiesInfo(java.lang.Class cls)
          +
        • +
        + + + +
          +
        • +

          getPdescriptor

          +
          protected static java.beans.PropertyDescriptor[] getPdescriptor(java.lang.Class cls)
          +
        • +
        + + + +
          +
        • +

          getBdescriptor

          +
          protected static java.beans.BeanDescriptor getBdescriptor(java.lang.Class cls)
          +
        • +
        + + + +
          +
        • +

          getBeanDescriptor

          +
          public java.beans.BeanDescriptor getBeanDescriptor(java.lang.Class cls)
          +
          Gets the bean's BeanDescriptors.
          +
          +
          Parameters:
          +
          cls - the Class.
          +
          Returns:
          +
          BeanDescriptor describing the editable + properties of this bean. May return null if the + information should be obtained by automatic analysis.
          +
          +
        • +
        + + + +
          +
        • +

          getPropertyDescriptors

          +
          public java.beans.PropertyDescriptor[] getPropertyDescriptors(java.lang.Class cls)
          +
          Gets the bean's PropertyDescriptors.
          +
          +
          Parameters:
          +
          cls - the Class.
          +
          Returns:
          +
          An array of PropertyDescriptors describing the editable + properties supported by this bean. May return null if the + information should be obtained by automatic analysis. +

          + If a property is indexed, then its entry in the result array will + belong to the IndexedPropertyDescriptor subclass of PropertyDescriptor. + A client of getPropertyDescriptors can use "instanceof" to check + if a given PropertyDescriptor is an IndexedPropertyDescriptor.

          +
          +
        • +
        + + + +
          +
        • +

          getEventSetDescriptors

          +
          public java.beans.EventSetDescriptor[] getEventSetDescriptors()
          +
          Gets the bean's EventSetDescriptors.
          +
          +
          Specified by:
          +
          getEventSetDescriptors in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getEventSetDescriptors in class java.beans.SimpleBeanInfo
          +
          Returns:
          +
          An array of EventSetDescriptors describing the kinds of + events fired by this bean. May return null if the information + should be obtained by automatic analysis.
          +
          +
        • +
        + + + +
          +
        • +

          getDefaultPropertyIndex

          +
          public int getDefaultPropertyIndex()
          +
          A bean may have a "default" property that is the property that will + mostly commonly be initially chosen for update by human's who are + customizing the bean.
          +
          +
          Specified by:
          +
          getDefaultPropertyIndex in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getDefaultPropertyIndex in class java.beans.SimpleBeanInfo
          +
          Returns:
          +
          Index of default property in the PropertyDescriptor array + returned by getPropertyDescriptors. +

          Returns -1 if there is no default property.

          +
          +
        • +
        + + + +
          +
        • +

          getDefaultEventIndex

          +
          public int getDefaultEventIndex()
          +
          A bean may have a "default" event that is the event that will + mostly commonly be used by human's when using the bean.
          +
          +
          Specified by:
          +
          getDefaultEventIndex in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getDefaultEventIndex in class java.beans.SimpleBeanInfo
          +
          Returns:
          +
          Index of default event in the EventSetDescriptor array + returned by getEventSetDescriptors. +

          Returns -1 if there is no default event.

          +
          +
        • +
        + + + +
          +
        • +

          getIcon

          +
          public java.awt.Image getIcon(int iconKind)
          +
          This method returns an image object that can be used to + represent the bean in toolboxes, toolbars, etc. Icon images + will typically be GIFs, but may in future include other formats. +

          + Beans aren't required to provide icons and may return null from + this method. +

          + There are four possible flavors of icons (16x16 color, + 32x32 color, 16x16 mono, 32x32 mono). If a bean choses to only + support a single icon we recommend supporting 16x16 color. +

          + We recommend that icons have a "transparent" background + so they can be rendered onto an existing background.

          +
          +
          Specified by:
          +
          getIcon in interface java.beans.BeanInfo
          +
          Overrides:
          +
          getIcon in class java.beans.SimpleBeanInfo
          +
          Parameters:
          +
          iconKind - The kind of icon requested. This should be + one of the constant values ICON_COLOR_16x16, ICON_COLOR_32x32, + ICON_MONO_16x16, or ICON_MONO_32x32.
          +
          Returns:
          +
          An image object representing the requested icon. May + return null if no suitable icon is available.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/ExceptionUnwrapper.html b/docs/java/com/sleepycat/util/ExceptionUnwrapper.html new file mode 100644 index 0000000..f7a9c1f --- /dev/null +++ b/docs/java/com/sleepycat/util/ExceptionUnwrapper.html @@ -0,0 +1,320 @@ + + + + + +ExceptionUnwrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class ExceptionUnwrapper

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.util.ExceptionUnwrapper
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class ExceptionUnwrapper
      +extends java.lang.Object
      +
      Unwraps nested exceptions by calling the ExceptionWrapper.getCause() method for exceptions that implement the + ExceptionWrapper interface. Does not currently support the Java 1.4 + Throwable.getCause() method.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        ExceptionUnwrapper() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static java.lang.Exceptionunwrap(java.lang.Exception e) +
        Unwraps an Exception and returns the underlying Exception, or throws an + Error if the underlying Throwable is an Error.
        +
        static java.lang.ThrowableunwrapAny(java.lang.Throwable e) +
        Unwraps an Exception and returns the underlying Throwable.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          ExceptionUnwrapper

          +
          public ExceptionUnwrapper()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          unwrap

          +
          public static java.lang.Exception unwrap(java.lang.Exception e)
          +
          Unwraps an Exception and returns the underlying Exception, or throws an + Error if the underlying Throwable is an Error.
          +
          +
          Parameters:
          +
          e - is the Exception to unwrap.
          +
          Returns:
          +
          the underlying Exception.
          +
          Throws:
          +
          java.lang.Error - if the underlying Throwable is an Error.
          +
          java.lang.IllegalArgumentException - if the underlying Throwable is not an + Exception or an Error.
          +
          +
        • +
        + + + +
          +
        • +

          unwrapAny

          +
          public static java.lang.Throwable unwrapAny(java.lang.Throwable e)
          +
          Unwraps an Exception and returns the underlying Throwable.
          +
          +
          Parameters:
          +
          e - is the Exception to unwrap.
          +
          Returns:
          +
          the underlying Throwable.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/ExceptionWrapper.html b/docs/java/com/sleepycat/util/ExceptionWrapper.html new file mode 100644 index 0000000..5c60ff4 --- /dev/null +++ b/docs/java/com/sleepycat/util/ExceptionWrapper.html @@ -0,0 +1,270 @@ + + + + + +ExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Interface ExceptionWrapper

    +
    +
    +
    +
      +
    • +
      +
      All Known Implementing Classes:
      +
      IOExceptionWrapper, RuntimeExceptionWrapper
      +
      +
      +
      +
      public interface ExceptionWrapper
      +
      Interface implemented by exceptions that can contain nested exceptions.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    + +
    +
    +
      +
    • + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDetail

          +
          java.lang.Throwable getDetail()
          +
          Deprecated. replaced by getCause().
          +
          Returns the nested exception or null if none is present.
          +
          +
          Returns:
          +
          the nested exception or null if none is present.
          +
          +
        • +
        + + + +
          +
        • +

          getCause

          +
          java.lang.Throwable getCause()
          +
          Returns the nested exception or null if none is present. + +

          This method is intentionally defined to be the same signature as the + java.lang.Throwable.getCause method in Java 1.4 and + greater. By defining this method to return a nested exception, the Java + 1.4 runtime will print the nested stack trace.

          +
          +
          Returns:
          +
          the nested exception or null if none is present.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/FastInputStream.html b/docs/java/com/sleepycat/util/FastInputStream.html new file mode 100644 index 0000000..7d2ce7d --- /dev/null +++ b/docs/java/com/sleepycat/util/FastInputStream.html @@ -0,0 +1,710 @@ + + + + + +FastInputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class FastInputStream

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.io.InputStream
      • +
      • +
          +
        • com.sleepycat.util.FastInputStream
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      TupleInput
      +
      +
      +
      +
      public class FastInputStream
      +extends java.io.InputStream
      +
      A replacement for ByteArrayInputStream that does not synchronize every + byte read. + +

      This class extends InputStream and its read() + methods allow it to be used as a standard input stream. In addition, it + provides readFast() methods that are not declared to throw + IOException. IOException is never thrown by this + class.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        protected byte[]buf 
        protected intlen 
        protected intmark 
        protected intoff 
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        FastInputStream(byte[] buffer) +
        Creates an input stream.
        +
        FastInputStream(byte[] buffer, + int offset, + int length) +
        Creates an input stream.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        intavailable() 
        byte[]getBufferBytes() +
        Returns the underlying data being read.
        +
        intgetBufferLength() +
        Returns the end of the buffer being read.
        +
        intgetBufferOffset() +
        Returns the offset at which data is being read from the buffer.
        +
        voidmark(int readLimit) 
        booleanmarkSupported() 
        intread() 
        intread(byte[] toBuf) 
        intread(byte[] toBuf, + int offset, + int length) 
        intreadFast() +
        Equivalent to read() but does not throw + IOException.
        +
        intreadFast(byte[] toBuf) +
        Equivalent to read(byte[]) but does not throw + IOException.
        +
        intreadFast(byte[] toBuf, + int offset, + int length) +
        Equivalent to read(byte[],int,int) but does not throw + IOException.
        +
        voidreset() 
        longskip(long count) 
        voidskipFast(int count) +
        Equivalent to skip() but takes an int parameter instead of a + long, and does not check whether the count given is larger than the + number of remaining bytes.
        +
        +
          +
        • + + +

          Methods inherited from class java.io.InputStream

          +close
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          len

          +
          protected int len
          +
        • +
        + + + +
          +
        • +

          off

          +
          protected int off
          +
        • +
        + + + +
          +
        • +

          mark

          +
          protected int mark
          +
        • +
        + + + +
          +
        • +

          buf

          +
          protected byte[] buf
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          FastInputStream

          +
          public FastInputStream(byte[] buffer)
          +
          Creates an input stream.
          +
          +
          Parameters:
          +
          buffer - the data to read.
          +
          +
        • +
        + + + +
          +
        • +

          FastInputStream

          +
          public FastInputStream(byte[] buffer,
          +                       int offset,
          +                       int length)
          +
          Creates an input stream.
          +
          +
          Parameters:
          +
          buffer - the data to read.
          +
          offset - the byte offset at which to begin reading.
          +
          length - the number of bytes to read.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          available

          +
          public int available()
          +
          +
          Overrides:
          +
          available in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          markSupported

          +
          public boolean markSupported()
          +
          +
          Overrides:
          +
          markSupported in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          mark

          +
          public void mark(int readLimit)
          +
          +
          Overrides:
          +
          mark in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          reset

          +
          public void reset()
          +
          +
          Overrides:
          +
          reset in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          skip

          +
          public long skip(long count)
          +
          +
          Overrides:
          +
          skip in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          read

          +
          public int read()
          +
          +
          Specified by:
          +
          read in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          read

          +
          public int read(byte[] toBuf)
          +
          +
          Overrides:
          +
          read in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          read

          +
          public int read(byte[] toBuf,
          +                int offset,
          +                int length)
          +
          +
          Overrides:
          +
          read in class java.io.InputStream
          +
          +
        • +
        + + + +
          +
        • +

          skipFast

          +
          public final void skipFast(int count)
          +
          Equivalent to skip() but takes an int parameter instead of a + long, and does not check whether the count given is larger than the + number of remaining bytes.
          +
          +
          Parameters:
          +
          count - the number of bytes to skip.
          +
          See Also:
          +
          skip(long)
          +
          +
        • +
        + + + +
          +
        • +

          readFast

          +
          public final int readFast()
          +
          Equivalent to read() but does not throw + IOException.
          +
          +
          Returns:
          +
          the next byte of data, or -1 if at the end of the stream.
          +
          See Also:
          +
          read()
          +
          +
        • +
        + + + +
          +
        • +

          readFast

          +
          public final int readFast(byte[] toBuf)
          +
          Equivalent to read(byte[]) but does not throw + IOException.
          +
          +
          Parameters:
          +
          toBuf - the buffer into which the data is read.
          +
          Returns:
          +
          the number of bytes read, or -1 if at the end of the stream.
          +
          See Also:
          +
          read(byte[])
          +
          +
        • +
        + + + +
          +
        • +

          readFast

          +
          public final int readFast(byte[] toBuf,
          +                          int offset,
          +                          int length)
          +
          Equivalent to read(byte[],int,int) but does not throw + IOException.
          +
          +
          Parameters:
          +
          toBuf - the buffer into which the data is read.
          +
          offset - the start offset in array at which the data is written.
          +
          length - the maximum number of bytes to read.
          +
          Returns:
          +
          the number of bytes read, or -1 if at the end of the stream.
          +
          See Also:
          +
          read(byte[],int,int)
          +
          +
        • +
        + + + +
          +
        • +

          getBufferBytes

          +
          public final byte[] getBufferBytes()
          +
          Returns the underlying data being read.
          +
          +
          Returns:
          +
          the underlying data.
          +
          +
        • +
        + + + +
          +
        • +

          getBufferOffset

          +
          public final int getBufferOffset()
          +
          Returns the offset at which data is being read from the buffer.
          +
          +
          Returns:
          +
          the offset at which data is being read.
          +
          +
        • +
        + + + +
          +
        • +

          getBufferLength

          +
          public final int getBufferLength()
          +
          Returns the end of the buffer being read.
          +
          +
          Returns:
          +
          the end of the buffer.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/FastOutputStream.html b/docs/java/com/sleepycat/util/FastOutputStream.html new file mode 100644 index 0000000..312455a --- /dev/null +++ b/docs/java/com/sleepycat/util/FastOutputStream.html @@ -0,0 +1,783 @@ + + + + + +FastOutputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class FastOutputStream

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.io.OutputStream
      • +
      • +
          +
        • com.sleepycat.util.FastOutputStream
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      java.io.Closeable, java.io.Flushable, java.lang.AutoCloseable
      +
      +
      +
      Direct Known Subclasses:
      +
      TupleOutput
      +
      +
      +
      +
      public class FastOutputStream
      +extends java.io.OutputStream
      +
      A replacement for ByteArrayOutputStream that does not synchronize every + byte read. + +

      This class extends OutputStream and its write() + methods allow it to be used as a standard output stream. In addition, it + provides writeFast() methods that are not declared to throw + IOException. IOException is never thrown by this + class.

      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static intDEFAULT_BUMP_SIZE +
        The default amount that the buffer is increased when it is full.
        +
        static intDEFAULT_INIT_SIZE +
        The default initial size of the buffer if no initialSize parameter is + specified.
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + + + + + + + + + + + + + +
        Constructors 
        Constructor and Description
        FastOutputStream() +
        Creates an output stream with default sizes.
        +
        FastOutputStream(byte[] buffer) +
        Creates an output stream with a given initial buffer and a default + bump size.
        +
        FastOutputStream(byte[] buffer, + int bumpSize) +
        Creates an output stream with a given initial buffer and a given + bump size.
        +
        FastOutputStream(int initialSize) +
        Creates an output stream with a default bump size and a given initial + size.
        +
        FastOutputStream(int initialSize, + int bumpSize) +
        Creates an output stream with a given bump size and initial size.
        +
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods 
        Modifier and TypeMethod and Description
        voidaddSize(int sizeAdded) +
        Skip the given number of bytes in the buffer.
        +
        byte[]getBufferBytes() +
        Returns the buffer owned by this object.
        +
        intgetBufferLength() +
        Returns the length used in the internal buffer, i.e., the offset at + which data will be written next.
        +
        intgetBufferOffset() +
        Returns the offset of the internal buffer.
        +
        voidmakeSpace(int sizeNeeded) +
        Ensure that at least the given number of bytes are available in the + internal buffer.
        +
        voidreset() 
        intsize() 
        byte[]toByteArray() 
        java.lang.StringtoString() 
        java.lang.StringtoString(java.lang.String encoding) 
        voidwrite(byte[] fromBuf) 
        voidwrite(byte[] fromBuf, + int offset, + int length) 
        voidwrite(int b) 
        voidwriteFast(byte[] fromBuf) +
        Equivalent to write(byte[]) but does not throw + IOException.
        +
        voidwriteFast(byte[] fromBuf, + int offset, + int length) +
        Equivalent to write(byte[],int,int) but does not throw + IOException.
        +
        voidwriteFast(int b) +
        Equivalent to write(int) but does not throw + IOException.
        +
        voidwriteTo(java.io.OutputStream out) 
        +
          +
        • + + +

          Methods inherited from class java.io.OutputStream

          +close, flush
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          DEFAULT_INIT_SIZE

          +
          public static final int DEFAULT_INIT_SIZE
          +
          The default initial size of the buffer if no initialSize parameter is + specified. This constant is 100 bytes.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          DEFAULT_BUMP_SIZE

          +
          public static final int DEFAULT_BUMP_SIZE
          +
          The default amount that the buffer is increased when it is full. This + constant is zero, which means to double the current buffer size.
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          FastOutputStream

          +
          public FastOutputStream()
          +
          Creates an output stream with default sizes.
          +
        • +
        + + + +
          +
        • +

          FastOutputStream

          +
          public FastOutputStream(int initialSize)
          +
          Creates an output stream with a default bump size and a given initial + size.
          +
          +
          Parameters:
          +
          initialSize - the initial size of the buffer.
          +
          +
        • +
        + + + +
          +
        • +

          FastOutputStream

          +
          public FastOutputStream(int initialSize,
          +                        int bumpSize)
          +
          Creates an output stream with a given bump size and initial size.
          +
          +
          Parameters:
          +
          initialSize - the initial size of the buffer.
          +
          bumpSize - the amount to increment the buffer.
          +
          +
        • +
        + + + +
          +
        • +

          FastOutputStream

          +
          public FastOutputStream(byte[] buffer)
          +
          Creates an output stream with a given initial buffer and a default + bump size.
          +
          +
          Parameters:
          +
          buffer - the initial buffer; will be owned by this object.
          +
          +
        • +
        + + + +
          +
        • +

          FastOutputStream

          +
          public FastOutputStream(byte[] buffer,
          +                        int bumpSize)
          +
          Creates an output stream with a given initial buffer and a given + bump size.
          +
          +
          Parameters:
          +
          buffer - the initial buffer; will be owned by this object.
          +
          bumpSize - the amount to increment the buffer. If zero (the + default), the current buffer size will be doubled when the buffer is + full.
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          size

          +
          public int size()
          +
        • +
        + + + +
          +
        • +

          reset

          +
          public void reset()
          +
        • +
        + + + +
          +
        • +

          write

          +
          public void write(int b)
          +
          +
          Specified by:
          +
          write in class java.io.OutputStream
          +
          +
        • +
        + + + +
          +
        • +

          write

          +
          public void write(byte[] fromBuf)
          +
          +
          Overrides:
          +
          write in class java.io.OutputStream
          +
          +
        • +
        + + + +
          +
        • +

          write

          +
          public void write(byte[] fromBuf,
          +                  int offset,
          +                  int length)
          +
          +
          Overrides:
          +
          write in class java.io.OutputStream
          +
          +
        • +
        + + + +
          +
        • +

          writeTo

          +
          public void writeTo(java.io.OutputStream out)
          +             throws java.io.IOException
          +
          +
          Throws:
          +
          java.io.IOException
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString()
          +
          +
          Overrides:
          +
          toString in class java.lang.Object
          +
          +
        • +
        + + + +
          +
        • +

          toString

          +
          public java.lang.String toString(java.lang.String encoding)
          +                          throws java.io.UnsupportedEncodingException
          +
          +
          Throws:
          +
          java.io.UnsupportedEncodingException
          +
          +
        • +
        + + + +
          +
        • +

          toByteArray

          +
          public byte[] toByteArray()
          +
        • +
        + + + +
          +
        • +

          writeFast

          +
          public final void writeFast(int b)
          +
          Equivalent to write(int) but does not throw + IOException.
          +
          +
          Parameters:
          +
          b - the byte to write.
          +
          See Also:
          +
          write(int)
          +
          +
        • +
        + + + +
          +
        • +

          writeFast

          +
          public final void writeFast(byte[] fromBuf)
          +
          Equivalent to write(byte[]) but does not throw + IOException.
          +
          +
          Parameters:
          +
          fromBuf - the buffer to write.
          +
          See Also:
          +
          write(byte[])
          +
          +
        • +
        + + + +
          +
        • +

          writeFast

          +
          public final void writeFast(byte[] fromBuf,
          +                            int offset,
          +                            int length)
          +
          Equivalent to write(byte[],int,int) but does not throw + IOException.
          +
          +
          Parameters:
          +
          fromBuf - the buffer to write.
          +
          offset - the start offset in the buffer.
          +
          length - the number of bytes to write.
          +
          See Also:
          +
          write(byte[],int,int)
          +
          +
        • +
        + + + +
          +
        • +

          getBufferBytes

          +
          public byte[] getBufferBytes()
          +
          Returns the buffer owned by this object.
          +
          +
          Returns:
          +
          the buffer.
          +
          +
        • +
        + + + +
          +
        • +

          getBufferOffset

          +
          public int getBufferOffset()
          +
          Returns the offset of the internal buffer.
          +
          +
          Returns:
          +
          always zero currently.
          +
          +
        • +
        + + + +
          +
        • +

          getBufferLength

          +
          public int getBufferLength()
          +
          Returns the length used in the internal buffer, i.e., the offset at + which data will be written next.
          +
          +
          Returns:
          +
          the buffer length.
          +
          +
        • +
        + + + +
          +
        • +

          makeSpace

          +
          public void makeSpace(int sizeNeeded)
          +
          Ensure that at least the given number of bytes are available in the + internal buffer.
          +
          +
          Parameters:
          +
          sizeNeeded - the number of bytes desired.
          +
          +
        • +
        + + + +
          +
        • +

          addSize

          +
          public void addSize(int sizeAdded)
          +
          Skip the given number of bytes in the buffer.
          +
          +
          Parameters:
          +
          sizeAdded - number of bytes to skip.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/IOExceptionWrapper.html b/docs/java/com/sleepycat/util/IOExceptionWrapper.html new file mode 100644 index 0000000..591a539 --- /dev/null +++ b/docs/java/com/sleepycat/util/IOExceptionWrapper.html @@ -0,0 +1,353 @@ + + + + + +IOExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class IOExceptionWrapper

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Throwable
      • +
      • +
          +
        • java.lang.Exception
        • +
        • +
            +
          • java.io.IOException
          • +
          • +
              +
            • com.sleepycat.util.IOExceptionWrapper
            • +
            +
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ExceptionWrapper, java.io.Serializable
      +
      +
      +
      +
      public class IOExceptionWrapper
      +extends java.io.IOException
      +implements ExceptionWrapper
      +
      An IOException that can contain nested exceptions.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        IOExceptionWrapper(java.lang.Throwable e) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        java.lang.ThrowablegetCause() +
        Returns the nested exception or null if none is present.
        +
        java.lang.ThrowablegetDetail() +
        Deprecated.  +
        replaced by getCause().
        +
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getLocalizedMessage, getMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          IOExceptionWrapper

          +
          public IOExceptionWrapper(java.lang.Throwable e)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getDetail

          +
          public java.lang.Throwable getDetail()
          +
          Deprecated. replaced by getCause().
          +
          Description copied from interface: ExceptionWrapper
          +
          Returns the nested exception or null if none is present.
          +
          +
          Specified by:
          +
          getDetail in interface ExceptionWrapper
          +
          Returns:
          +
          the nested exception or null if none is present.
          +
          +
        • +
        + + + +
          +
        • +

          getCause

          +
          public java.lang.Throwable getCause()
          +
          Description copied from interface: ExceptionWrapper
          +
          Returns the nested exception or null if none is present. + +

          This method is intentionally defined to be the same signature as the + java.lang.Throwable.getCause method in Java 1.4 and + greater. By defining this method to return a nested exception, the Java + 1.4 runtime will print the nested stack trace.

          +
          +
          Specified by:
          +
          getCause in interface ExceptionWrapper
          +
          Overrides:
          +
          getCause in class java.lang.Throwable
          +
          Returns:
          +
          the nested exception or null if none is present.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/PackedInteger.html b/docs/java/com/sleepycat/util/PackedInteger.html new file mode 100644 index 0000000..6aa61be --- /dev/null +++ b/docs/java/com/sleepycat/util/PackedInteger.html @@ -0,0 +1,763 @@ + + + + + +PackedInteger (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class PackedInteger

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.util.PackedInteger
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class PackedInteger
      +extends java.lang.Object
      +
      Static methods for reading and writing packed integers. + +

      Most applications should use the classes in the com.sleepycat.bind.tuple package rather than using this class directly.

      +
      +
      See Also:
      +
      Integer Formats
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Summary

        + + + + + + + + + + + + + + +
        Fields 
        Modifier and TypeField and Description
        static intMAX_LENGTH +
        The maximum number of bytes needed to store an int value (5).
        +
        static intMAX_LONG_LENGTH +
        The maximum number of bytes needed to store a long value (9).
        +
        +
      • +
      + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        PackedInteger() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static intgetReadIntLength(byte[] buf, + int off) +
        Returns the number of bytes that would be read by readInt(byte[], int).
        +
        static intgetReadLongLength(byte[] buf, + int off) +
        Returns the number of bytes that would be read by readLong(byte[], int).
        +
        static intgetReadSortedIntLength(byte[] buf, + int off) +
        Returns the number of bytes that would be read by readSortedInt(byte[], int).
        +
        static intgetReadSortedLongLength(byte[] buf, + int off) +
        Returns the number of bytes that would be read by readSortedLong(byte[], int).
        +
        static intgetWriteIntLength(int value) +
        Returns the number of bytes that would be written by writeInt(byte[], int, int).
        +
        static intgetWriteLongLength(long value) +
        Returns the number of bytes that would be written by writeLong(byte[], int, long).
        +
        static intgetWriteSortedIntLength(int value) +
        Returns the number of bytes that would be written by writeSortedInt(byte[], int, int).
        +
        static intgetWriteSortedLongLength(long value) +
        Returns the number of bytes that would be written by writeSortedLong(byte[], int, long).
        +
        static intreadInt(byte[] buf, + int off) +
        Reads a packed integer at the given buffer offset and returns it.
        +
        static longreadLong(byte[] buf, + int off) +
        Reads a packed long integer at the given buffer offset and returns it.
        +
        static intreadSortedInt(byte[] buf, + int off) +
        Reads a sorted packed integer at the given buffer offset and returns it.
        +
        static longreadSortedLong(byte[] buf, + int off) +
        Reads a sorted packed long integer at the given buffer offset and + returns it.
        +
        static intwriteInt(byte[] buf, + int offset, + int value) +
        Writes a packed integer starting at the given buffer offset and returns + the next offset to be written.
        +
        static intwriteLong(byte[] buf, + int offset, + long value) +
        Writes a packed long integer starting at the given buffer offset and + returns the next offset to be written.
        +
        static intwriteSortedInt(byte[] buf, + int offset, + int value) +
        Writes a packed sorted integer starting at the given buffer offset and + returns the next offset to be written.
        +
        static intwriteSortedLong(byte[] buf, + int offset, + long value) +
        Writes a packed sorted long integer starting at the given buffer offset + and returns the next offset to be written.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Field Detail

        + + + +
          +
        • +

          MAX_LENGTH

          +
          public static final int MAX_LENGTH
          +
          The maximum number of bytes needed to store an int value (5).
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        + + + +
          +
        • +

          MAX_LONG_LENGTH

          +
          public static final int MAX_LONG_LENGTH
          +
          The maximum number of bytes needed to store a long value (9).
          +
          +
          See Also:
          +
          Constant Field Values
          +
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          PackedInteger

          +
          public PackedInteger()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          readInt

          +
          public static int readInt(byte[] buf,
          +                          int off)
          +
          Reads a packed integer at the given buffer offset and returns it.
          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the integer that was read.
          +
          +
        • +
        + + + +
          +
        • +

          readLong

          +
          public static long readLong(byte[] buf,
          +                            int off)
          +
          Reads a packed long integer at the given buffer offset and returns it.
          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the long integer that was read.
          +
          +
        • +
        + + + +
          +
        • +

          getReadIntLength

          +
          public static int getReadIntLength(byte[] buf,
          +                                   int off)
          +
          Returns the number of bytes that would be read by readInt(byte[], int). + +

          Because the length is stored in the first byte, this method may be + called with only the first byte of the packed integer in the given + buffer. This method only accesses one byte at the given offset.

          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the number of bytes that would be read.
          +
          +
        • +
        + + + +
          +
        • +

          getReadLongLength

          +
          public static int getReadLongLength(byte[] buf,
          +                                    int off)
          +
          Returns the number of bytes that would be read by readLong(byte[], int). + +

          Because the length is stored in the first byte, this method may be + called with only the first byte of the packed integer in the given + buffer. This method only accesses one byte at the given offset.

          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the number of bytes that would be read.
          +
          +
        • +
        + + + +
          +
        • +

          writeInt

          +
          public static int writeInt(byte[] buf,
          +                           int offset,
          +                           int value)
          +
          Writes a packed integer starting at the given buffer offset and returns + the next offset to be written.
          +
          +
          Parameters:
          +
          buf - the buffer to write to.
          +
          offset - the offset in the buffer at which to start writing.
          +
          value - the integer to be written.
          +
          Returns:
          +
          the offset past the bytes written.
          +
          +
        • +
        + + + +
          +
        • +

          writeLong

          +
          public static int writeLong(byte[] buf,
          +                            int offset,
          +                            long value)
          +
          Writes a packed long integer starting at the given buffer offset and + returns the next offset to be written.
          +
          +
          Parameters:
          +
          buf - the buffer to write to.
          +
          offset - the offset in the buffer at which to start writing.
          +
          value - the long integer to be written.
          +
          Returns:
          +
          the offset past the bytes written.
          +
          +
        • +
        + + + +
          +
        • +

          getWriteIntLength

          +
          public static int getWriteIntLength(int value)
          +
          Returns the number of bytes that would be written by writeInt(byte[], int, int).
          +
          +
          Parameters:
          +
          value - the integer to be written.
          +
          Returns:
          +
          the number of bytes that would be used to write the given + integer.
          +
          +
        • +
        + + + +
          +
        • +

          getWriteLongLength

          +
          public static int getWriteLongLength(long value)
          +
          Returns the number of bytes that would be written by writeLong(byte[], int, long).
          +
          +
          Parameters:
          +
          value - the long integer to be written.
          +
          Returns:
          +
          the number of bytes that would be used to write the given long + integer.
          +
          +
        • +
        + + + +
          +
        • +

          readSortedInt

          +
          public static int readSortedInt(byte[] buf,
          +                                int off)
          +
          Reads a sorted packed integer at the given buffer offset and returns it.
          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the integer that was read.
          +
          +
        • +
        + + + +
          +
        • +

          readSortedLong

          +
          public static long readSortedLong(byte[] buf,
          +                                  int off)
          +
          Reads a sorted packed long integer at the given buffer offset and + returns it.
          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the long integer that was read.
          +
          +
        • +
        + + + +
          +
        • +

          getReadSortedIntLength

          +
          public static int getReadSortedIntLength(byte[] buf,
          +                                         int off)
          +
          Returns the number of bytes that would be read by readSortedInt(byte[], int). + +

          Because the length is stored in the first byte, this method may be + called with only the first byte of the packed integer in the given + buffer. This method only accesses one byte at the given offset.

          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the number of bytes that would be read.
          +
          +
        • +
        + + + +
          +
        • +

          getReadSortedLongLength

          +
          public static int getReadSortedLongLength(byte[] buf,
          +                                          int off)
          +
          Returns the number of bytes that would be read by readSortedLong(byte[], int). + +

          Because the length is stored in the first byte, this method may be + called with only the first byte of the packed integer in the given + buffer. This method only accesses one byte at the given offset.

          +
          +
          Parameters:
          +
          buf - the buffer to read from.
          +
          off - the offset in the buffer at which to start reading.
          +
          Returns:
          +
          the number of bytes that would be read.
          +
          +
        • +
        + + + +
          +
        • +

          writeSortedInt

          +
          public static int writeSortedInt(byte[] buf,
          +                                 int offset,
          +                                 int value)
          +
          Writes a packed sorted integer starting at the given buffer offset and + returns the next offset to be written.
          +
          +
          Parameters:
          +
          buf - the buffer to write to.
          +
          offset - the offset in the buffer at which to start writing.
          +
          value - the integer to be written.
          +
          Returns:
          +
          the offset past the bytes written.
          +
          +
        • +
        + + + +
          +
        • +

          writeSortedLong

          +
          public static int writeSortedLong(byte[] buf,
          +                                  int offset,
          +                                  long value)
          +
          Writes a packed sorted long integer starting at the given buffer offset + and returns the next offset to be written.
          +
          +
          Parameters:
          +
          buf - the buffer to write to.
          +
          offset - the offset in the buffer at which to start writing.
          +
          value - the long integer to be written.
          +
          Returns:
          +
          the offset past the bytes written.
          +
          +
        • +
        + + + +
          +
        • +

          getWriteSortedIntLength

          +
          public static int getWriteSortedIntLength(int value)
          +
          Returns the number of bytes that would be written by writeSortedInt(byte[], int, int).
          +
          +
          Parameters:
          +
          value - the integer to be written.
          +
          Returns:
          +
          the number of bytes that would be used to write the given + integer.
          +
          +
        • +
        + + + +
          +
        • +

          getWriteSortedLongLength

          +
          public static int getWriteSortedLongLength(long value)
          +
          Returns the number of bytes that would be written by writeSortedLong(byte[], int, long).
          +
          +
          Parameters:
          +
          value - the long integer to be written.
          +
          Returns:
          +
          the number of bytes that would be used to write the given long + integer.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html b/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html new file mode 100644 index 0000000..2dab439 --- /dev/null +++ b/docs/java/com/sleepycat/util/RuntimeExceptionWrapper.html @@ -0,0 +1,353 @@ + + + + + +RuntimeExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class RuntimeExceptionWrapper

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • java.lang.Throwable
      • +
      • +
          +
        • java.lang.Exception
        • +
        • +
            +
          • java.lang.RuntimeException
          • +
          • +
              +
            • com.sleepycat.util.RuntimeExceptionWrapper
            • +
            +
          • +
          +
        • +
        +
      • +
      +
    • +
    +
    +
      +
    • +
      +
      All Implemented Interfaces:
      +
      ExceptionWrapper, java.io.Serializable
      +
      +
      +
      +
      public class RuntimeExceptionWrapper
      +extends java.lang.RuntimeException
      +implements ExceptionWrapper
      +
      A RuntimeException that can contain nested exceptions.
      +
      +
      Author:
      +
      Mark Hayes
      +
      See Also:
      +
      Serialized Form
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        RuntimeExceptionWrapper(java.lang.Throwable e) 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + +
        All Methods Static Methods Instance Methods Concrete Methods Deprecated Methods 
        Modifier and TypeMethod and Description
        java.lang.ThrowablegetDetail() +
        Deprecated.  +
        replaced by Throwable.getCause().
        +
        +
        static java.lang.RuntimeExceptionwrapIfNeeded(java.lang.Throwable e) +
        Wraps the given exception if it is not a RuntimeException.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Throwable

          +addSuppressed, fillInStackTrace, getCause, getLocalizedMessage, getMessage, getStackTrace, getSuppressed, initCause, printStackTrace, printStackTrace, printStackTrace, setStackTrace, toString
        • +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait
        • +
        + +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          RuntimeExceptionWrapper

          +
          public RuntimeExceptionWrapper(java.lang.Throwable e)
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          wrapIfNeeded

          +
          public static java.lang.RuntimeException wrapIfNeeded(java.lang.Throwable e)
          +
          Wraps the given exception if it is not a RuntimeException.
          +
          +
          Parameters:
          +
          e - any exception.
          +
          Returns:
          +
          e if it is a RuntimeException, otherwise a + RuntimeExceptionWrapper for e.
          +
          +
        • +
        + + + +
          +
        • +

          getDetail

          +
          public java.lang.Throwable getDetail()
          +
          Deprecated. replaced by Throwable.getCause().
          +
          Description copied from interface: ExceptionWrapper
          +
          Returns the nested exception or null if none is present.
          +
          +
          Specified by:
          +
          getDetail in interface ExceptionWrapper
          +
          Returns:
          +
          the nested exception or null if none is present.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/UtfOps.html b/docs/java/com/sleepycat/util/UtfOps.html new file mode 100644 index 0000000..ead3126 --- /dev/null +++ b/docs/java/com/sleepycat/util/UtfOps.html @@ -0,0 +1,554 @@ + + + + + +UtfOps (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +
    com.sleepycat.util
    +

    Class UtfOps

    +
    +
    +
      +
    • java.lang.Object
    • +
    • +
        +
      • com.sleepycat.util.UtfOps
      • +
      +
    • +
    +
    +
      +
    • +
      +
      +
      public class UtfOps
      +extends java.lang.Object
      +
      UTF operations with more flexibility than is provided by DataInput and + DataOutput.
      +
      +
      Author:
      +
      Mark Hayes
      +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Summary

        + + + + + + + + +
        Constructors 
        Constructor and Description
        UtfOps() 
        +
      • +
      + +
        +
      • + + +

        Method Summary

        + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        All Methods Static Methods Concrete Methods 
        Modifier and TypeMethod and Description
        static intbytesToChars(byte[] bytes, + int byteOffset, + char[] chars, + int charOffset, + int len, + boolean isByteLen) +
        Converts byte arrays into character arrays.
        +
        static java.lang.StringbytesToString(byte[] bytes, + int offset, + int length) +
        Converts byte arrays into strings.
        +
        static voidcharsToBytes(char[] chars, + int charOffset, + byte[] bytes, + int byteOffset, + int charLength) +
        Converts character arrays into byte arrays.
        +
        static intgetByteLength(char[] chars) +
        Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
        +
        static intgetByteLength(char[] chars, + int offset, + int length) +
        Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
        +
        static intgetCharLength(byte[] bytes) +
        Returns the number of characters represented by the given UTF string.
        +
        static intgetCharLength(byte[] bytes, + int offset, + int length) +
        Returns the number of characters represented by the given UTF string.
        +
        static intgetZeroTerminatedByteLength(byte[] bytes, + int offset) +
        Returns the byte length of a null terminated UTF string, not including + the terminator.
        +
        static byte[]stringToBytes(java.lang.String string) +
        Converts strings to byte arrays.
        +
        +
          +
        • + + +

          Methods inherited from class java.lang.Object

          +clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait
        • +
        +
      • +
      +
    • +
    +
    +
    +
      +
    • + +
        +
      • + + +

        Constructor Detail

        + + + +
          +
        • +

          UtfOps

          +
          public UtfOps()
          +
        • +
        +
      • +
      + +
        +
      • + + +

        Method Detail

        + + + +
          +
        • +

          getZeroTerminatedByteLength

          +
          public static int getZeroTerminatedByteLength(byte[] bytes,
          +                                              int offset)
          +                                       throws java.lang.IndexOutOfBoundsException
          +
          Returns the byte length of a null terminated UTF string, not including + the terminator.
          +
          +
          Parameters:
          +
          bytes - the data containing the UTF string.
          +
          offset - the beginning of the string the measure.
          +
          Returns:
          +
          the number of bytes.
          +
          Throws:
          +
          java.lang.IndexOutOfBoundsException - if no zero terminator is found.
          +
          +
        • +
        + + + +
          +
        • +

          getByteLength

          +
          public static int getByteLength(char[] chars)
          +
          Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
          +
          +
          Parameters:
          +
          chars - the characters that would be converted.
          +
          Returns:
          +
          the byte length of the equivalent UTF data.
          +
          +
        • +
        + + + +
          +
        • +

          getByteLength

          +
          public static int getByteLength(char[] chars,
          +                                int offset,
          +                                int length)
          +
          Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
          +
          +
          Parameters:
          +
          chars - the characters that would be converted.
          +
          offset - the first character to be converted.
          +
          length - the number of characters to be converted.
          +
          Returns:
          +
          the byte length of the equivalent UTF data.
          +
          +
        • +
        + + + +
          +
        • +

          getCharLength

          +
          public static int getCharLength(byte[] bytes)
          +                         throws java.lang.IllegalArgumentException,
          +                                java.lang.IndexOutOfBoundsException
          +
          Returns the number of characters represented by the given UTF string.
          +
          +
          Parameters:
          +
          bytes - the UTF string.
          +
          Returns:
          +
          the number of characters.
          +
          Throws:
          +
          java.lang.IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete.
          +
          java.lang.IllegalArgumentException - if an illegal UTF sequence is + encountered.
          +
          +
        • +
        + + + +
          +
        • +

          getCharLength

          +
          public static int getCharLength(byte[] bytes,
          +                                int offset,
          +                                int length)
          +                         throws java.lang.IllegalArgumentException,
          +                                java.lang.IndexOutOfBoundsException
          +
          Returns the number of characters represented by the given UTF string.
          +
          +
          Parameters:
          +
          bytes - the data containing the UTF string.
          +
          offset - the first byte to be converted.
          +
          length - the number of byte to be converted.
          +
          Returns:
          +
          the number of characters.
          +
          Throws:
          +
          java.lang.IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete.
          +
          java.lang.IllegalArgumentException - if an illegal UTF sequence is + encountered.
          +
          +
        • +
        + + + +
          +
        • +

          bytesToChars

          +
          public static int bytesToChars(byte[] bytes,
          +                               int byteOffset,
          +                               char[] chars,
          +                               int charOffset,
          +                               int len,
          +                               boolean isByteLen)
          +                        throws java.lang.IllegalArgumentException,
          +                               java.lang.IndexOutOfBoundsException
          +
          Converts byte arrays into character arrays.
          +
          +
          Parameters:
          +
          bytes - the source byte data to convert
          +
          byteOffset - the offset into the byte array at which + to start the conversion
          +
          chars - the destination array
          +
          charOffset - the offset into chars at which to begin the copy
          +
          len - the amount of information to copy into chars
          +
          isByteLen - if true then len is a measure of bytes, otherwise + len is a measure of characters
          +
          Returns:
          +
          the byte offset after converting the bytes.
          +
          Throws:
          +
          java.lang.IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete.
          +
          java.lang.IllegalArgumentException - if an illegal UTF sequence is + encountered.
          +
          +
        • +
        + + + +
          +
        • +

          charsToBytes

          +
          public static void charsToBytes(char[] chars,
          +                                int charOffset,
          +                                byte[] bytes,
          +                                int byteOffset,
          +                                int charLength)
          +
          Converts character arrays into byte arrays.
          +
          +
          Parameters:
          +
          chars - the source character data to convert
          +
          charOffset - the offset into the character array at which + to start the conversion
          +
          bytes - the destination array
          +
          byteOffset - the offset into bytes at which to begin the copy
          +
          charLength - the length of characters to copy into bytes
          +
          +
        • +
        + + + +
          +
        • +

          bytesToString

          +
          public static java.lang.String bytesToString(byte[] bytes,
          +                                             int offset,
          +                                             int length)
          +                                      throws java.lang.IllegalArgumentException,
          +                                             java.lang.IndexOutOfBoundsException
          +
          Converts byte arrays into strings.
          +
          +
          Parameters:
          +
          bytes - the source byte data to convert
          +
          offset - the offset into the byte array at which + to start the conversion
          +
          length - the number of bytes to be converted.
          +
          Returns:
          +
          the string.
          +
          Throws:
          +
          java.lang.IndexOutOfBoundsException - if a UTF character sequence at the end + of the data is not complete.
          +
          java.lang.IllegalArgumentException - if an illegal UTF sequence is + encountered.
          +
          +
        • +
        + + + +
          +
        • +

          stringToBytes

          +
          public static byte[] stringToBytes(java.lang.String string)
          +
          Converts strings to byte arrays.
          +
          +
          Parameters:
          +
          string - the string to convert.
          +
          Returns:
          +
          the UTF byte array.
          +
          +
        • +
        +
      • +
      +
    • +
    +
    +
    + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/ClassResolver.Stream.html b/docs/java/com/sleepycat/util/class-use/ClassResolver.Stream.html new file mode 100644 index 0000000..bd39963 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/ClassResolver.Stream.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.util.ClassResolver.Stream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.ClassResolver.Stream

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/ClassResolver.html b/docs/java/com/sleepycat/util/class-use/ClassResolver.html new file mode 100644 index 0000000..8f10cd6 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/ClassResolver.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.ClassResolver (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.ClassResolver

    +
    +
    No usage of com.sleepycat.util.ClassResolver
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/ConfigBeanInfoBase.html b/docs/java/com/sleepycat/util/class-use/ConfigBeanInfoBase.html new file mode 100644 index 0000000..d3a1a7f --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/ConfigBeanInfoBase.html @@ -0,0 +1,222 @@ + + + + + +Uses of Class com.sleepycat.util.ConfigBeanInfoBase (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.ConfigBeanInfoBase

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html b/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html new file mode 100644 index 0000000..7861e68 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/ExceptionUnwrapper.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.ExceptionUnwrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.ExceptionUnwrapper

    +
    +
    No usage of com.sleepycat.util.ExceptionUnwrapper
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html b/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html new file mode 100644 index 0000000..f2d5990 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/ExceptionWrapper.html @@ -0,0 +1,179 @@ + + + + + +Uses of Interface com.sleepycat.util.ExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Interface
    com.sleepycat.util.ExceptionWrapper

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/FastInputStream.html b/docs/java/com/sleepycat/util/class-use/FastInputStream.html new file mode 100644 index 0000000..0d44f3d --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/FastInputStream.html @@ -0,0 +1,174 @@ + + + + + +Uses of Class com.sleepycat.util.FastInputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.FastInputStream

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/FastOutputStream.html b/docs/java/com/sleepycat/util/class-use/FastOutputStream.html new file mode 100644 index 0000000..d484d18 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/FastOutputStream.html @@ -0,0 +1,201 @@ + + + + + +Uses of Class com.sleepycat.util.FastOutputStream (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.FastOutputStream

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html b/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html new file mode 100644 index 0000000..1dcfbef --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/IOExceptionWrapper.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.IOExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.IOExceptionWrapper

    +
    +
    No usage of com.sleepycat.util.IOExceptionWrapper
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/PackedInteger.html b/docs/java/com/sleepycat/util/class-use/PackedInteger.html new file mode 100644 index 0000000..9427967 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/PackedInteger.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.PackedInteger (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.PackedInteger

    +
    +
    No usage of com.sleepycat.util.PackedInteger
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html b/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html new file mode 100644 index 0000000..d0600d3 --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/RuntimeExceptionWrapper.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.RuntimeExceptionWrapper (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.RuntimeExceptionWrapper

    +
    +
    No usage of com.sleepycat.util.RuntimeExceptionWrapper
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/class-use/UtfOps.html b/docs/java/com/sleepycat/util/class-use/UtfOps.html new file mode 100644 index 0000000..c2c215f --- /dev/null +++ b/docs/java/com/sleepycat/util/class-use/UtfOps.html @@ -0,0 +1,129 @@ + + + + + +Uses of Class com.sleepycat.util.UtfOps (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Class
    com.sleepycat.util.UtfOps

    +
    +
    No usage of com.sleepycat.util.UtfOps
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/package-frame.html b/docs/java/com/sleepycat/util/package-frame.html new file mode 100644 index 0000000..16277be --- /dev/null +++ b/docs/java/com/sleepycat/util/package-frame.html @@ -0,0 +1,36 @@ + + + + + +com.sleepycat.util (Oracle - Berkeley DB Java Edition API) + + + + + +

    com.sleepycat.util

    + + + diff --git a/docs/java/com/sleepycat/util/package-summary.html b/docs/java/com/sleepycat/util/package-summary.html new file mode 100644 index 0000000..8b99ddc --- /dev/null +++ b/docs/java/com/sleepycat/util/package-summary.html @@ -0,0 +1,243 @@ + + + + + +com.sleepycat.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Package com.sleepycat.util

    +
    +
    General utilities used throughout Berkeley DB.
    +
    +

    See: Description

    +
    +
    +
      +
    • + + + + + + + + + + + + +
      Interface Summary 
      InterfaceDescription
      ExceptionWrapper +
      Interface implemented by exceptions that can contain nested exceptions.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Class Summary 
      ClassDescription
      ClassResolver +
      Implements policies for loading user-supplied classes.
      +
      ClassResolver.Stream +
      A specialized ObjectInputStream that supports use of a user-specified + ClassLoader.
      +
      ConfigBeanInfoBase 
      ExceptionUnwrapper +
      Unwraps nested exceptions by calling the ExceptionWrapper.getCause() method for exceptions that implement the + ExceptionWrapper interface.
      +
      FastInputStream +
      A replacement for ByteArrayInputStream that does not synchronize every + byte read.
      +
      FastOutputStream +
      A replacement for ByteArrayOutputStream that does not synchronize every + byte read.
      +
      PackedInteger +
      Static methods for reading and writing packed integers.
      +
      UtfOps +
      UTF operations with more flexibility than is provided by DataInput and + DataOutput.
      +
      +
    • +
    • + + + + + + + + + + + + + + + + +
      Exception Summary 
      ExceptionDescription
      IOExceptionWrapper +
      An IOException that can contain nested exceptions.
      +
      RuntimeExceptionWrapper +
      A RuntimeException that can contain nested exceptions.
      +
      +
    • +
    + + + +

    Package com.sleepycat.util Description

    +
    General utilities used throughout Berkeley DB.
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/package-tree.html b/docs/java/com/sleepycat/util/package-tree.html new file mode 100644 index 0000000..47acf8e --- /dev/null +++ b/docs/java/com/sleepycat/util/package-tree.html @@ -0,0 +1,187 @@ + + + + + +com.sleepycat.util Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Hierarchy For Package com.sleepycat.util

    +Package Hierarchies: + +
    +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/com/sleepycat/util/package-use.html b/docs/java/com/sleepycat/util/package-use.html new file mode 100644 index 0000000..a2d3d69 --- /dev/null +++ b/docs/java/com/sleepycat/util/package-use.html @@ -0,0 +1,292 @@ + + + + + +Uses of Package com.sleepycat.util (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Uses of Package
    com.sleepycat.util

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/constant-values.html b/docs/java/constant-values.html new file mode 100644 index 0000000..2439bc2 --- /dev/null +++ b/docs/java/constant-values.html @@ -0,0 +1,1836 @@ + + + + + +Constant Field Values (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Constant Field Values

    +

    Contents

    + +
    +
    + + +

    com.sleepycat.*

    + + + + + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/deprecated-list.html b/docs/java/deprecated-list.html new file mode 100644 index 0000000..6e9ff82 --- /dev/null +++ b/docs/java/deprecated-list.html @@ -0,0 +1,1015 @@ + + + + + +Deprecated List (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    + + +
      +
    • + + + + + + + + + + + + + +
      Deprecated Classes 
      Class and Description
      com.sleepycat.je.jmx.JEMBeanHelper +
      As of JE 4, JEMBeanHelper is deprecated in favor of the concrete + MBeans available by default with a JE environment. These MBeans can be + registered and enabled by the environment by setting the following JVM + property: + JEMonitor: + This MBean provides general stats monitoring and access to basic + environment level operations. + + JEMBeanHelper is a utility class for the MBean implementation which wants to + add management of a JE environment to its capabilities. MBean + implementations can contain a JEMBeanHelper instance to get MBean metadata + for JE and to set attributes, get attributes, and invoke operations. +

      + com.sleepycat.je.jmx.JEMonitor and the example program + jmx.JEApplicationMBean are two MBean implementations which provide support + different application use cases. See those classes for examples of how to + use JEMBeanHelper.

      +
      com.sleepycat.je.LockStats +
      as of 4.0.10, replaced by Environment.getStats(StatsConfig).

      +
      +
    • +
    + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/help-doc.html b/docs/java/help-doc.html new file mode 100644 index 0000000..c2bea6d --- /dev/null +++ b/docs/java/help-doc.html @@ -0,0 +1,234 @@ + + + + + +API Help (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    How This API Document Is Organized

    +
    This API (Application Programming Interface) document has pages corresponding to the items in the navigation bar, described as follows.
    +
    +
    +
      +
    • +

      Overview

      +

      The Overview page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.

      +
    • +
    • +

      Package

      +

      Each package has a page that contains a list of its classes and interfaces, with a summary for each. This page can contain six categories:

      +
        +
      • Interfaces (italic)
      • +
      • Classes
      • +
      • Enums
      • +
      • Exceptions
      • +
      • Errors
      • +
      • Annotation Types
      • +
      +
    • +
    • +

      Class/Interface

      +

      Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a class/interface description, summary tables, and detailed member descriptions:

      +
        +
      • Class inheritance diagram
      • +
      • Direct Subclasses
      • +
      • All Known Subinterfaces
      • +
      • All Known Implementing Classes
      • +
      • Class/interface declaration
      • +
      • Class/interface description
      • +
      +
        +
      • Nested Class Summary
      • +
      • Field Summary
      • +
      • Constructor Summary
      • +
      • Method Summary
      • +
      +
        +
      • Field Detail
      • +
      • Constructor Detail
      • +
      • Method Detail
      • +
      +

      Each summary entry contains the first sentence from the detailed description for that item. The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.

      +
    • +
    • +

      Annotation Type

      +

      Each annotation type has its own separate page with the following sections:

      +
        +
      • Annotation Type declaration
      • +
      • Annotation Type description
      • +
      • Required Element Summary
      • +
      • Optional Element Summary
      • +
      • Element Detail
      • +
      +
    • +
    • +

      Enum

      +

      Each enum has its own separate page with the following sections:

      +
        +
      • Enum declaration
      • +
      • Enum description
      • +
      • Enum Constant Summary
      • +
      • Enum Constant Detail
      • +
      +
    • +
    • +

      Use

      +

      Each documented package, class and interface has its own Use page. This page describes what packages, classes, methods, constructors and fields use any part of the given class or package. Given a class or interface A, its Use page includes subclasses of A, fields declared as A, methods that return A, and methods and constructors with parameters of type A. You can access this page by first going to the package, class or interface, then clicking on the "Use" link in the navigation bar.

      +
    • +
    • +

      Tree (Class Hierarchy)

      +

      There is a Class Hierarchy page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. The classes are organized by inheritance structure starting with java.lang.Object. The interfaces do not inherit from java.lang.Object.

      +
        +
      • When viewing the Overview page, clicking on "Tree" displays the hierarchy for all packages.
      • +
      • When viewing a particular package, class or interface page, clicking "Tree" displays the hierarchy for only that package.
      • +
      +
    • +
    • +

      Deprecated API

      +

      The Deprecated API page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to improvements, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.

      +
    • +
    • +

      Index

      +

      The Index contains an alphabetic list of all classes, interfaces, constructors, methods, and fields.

      +
    • +
    • +

      Prev/Next

      +

      These links take you to the next or previous class, interface, package, or related page.

      +
    • +
    • +

      Frames/No Frames

      +

      These links show and hide the HTML frames. All pages are available with or without frames.

      +
    • +
    • +

      All Classes

      +

      The All Classes link shows all classes and interfaces except non-static nested types.

      +
    • +
    • +

      Serialized Form

      +

      Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to re-implementors, not to developers using the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See also" section of the class description.

      +
    • +
    • +

      Constant Field Values

      +

      The Constant Field Values page lists the static final fields and their values.

      +
    • +
    +This help file applies to API documentation generated using the standard doclet.
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/index-all.html b/docs/java/index-all.html new file mode 100644 index 0000000..ff4eddc --- /dev/null +++ b/docs/java/index-all.html @@ -0,0 +1,12102 @@ + + + + + +Index (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    A B C D E F G H I J K L M N O P Q R S T U V W X  + + +

    A

    +
    +
    abort() - Method in class com.sleepycat.je.Transaction
    +
    +
    Cause an abnormal termination of the transaction.
    +
    +
    abortTransaction() - Method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Aborts the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
    +
    +
    acksPending() - Method in exception com.sleepycat.je.rep.InsufficientAcksException
    +
    +
    It returns the number of Replicas that did not respond with an + acknowledgment within the Replica commit timeout period.
    +
    +
    acksRequired() - Method in exception com.sleepycat.je.rep.InsufficientAcksException
    +
    +
    It returns the number of acknowledgments required by the commit policy.
    +
    +
    ackTimeout() - Method in exception com.sleepycat.je.rep.InsufficientAcksException
    +
    +
    Returns the acknowledgment timeout that was in effect at the time of the + exception.
    +
    +
    add(Map.Entry<K, V>) - Method in class com.sleepycat.collections.StoredEntrySet
    +
    +
    Adds the specified element to this set if it is not already present + (optional operation).
    +
    +
    add(E) - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Inserts the specified element into the list or inserts a duplicate into + other types of collections (optional operation).
    +
    +
    add(K) - Method in class com.sleepycat.collections.StoredKeySet
    +
    +
    Adds the specified key to this set if it is not already present + (optional operation).
    +
    +
    add(E) - Method in class com.sleepycat.collections.StoredValueSet
    +
    +
    Adds the specified entity to this set if it is not already present + (optional operation).
    +
    +
    addAll(Collection<? extends E>) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Adds all of the elements in the specified collection to this collection + (optional operation).
    +
    +
    addClassToEvolve(String) - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Adds an entity class for a primary index to be converted.
    +
    +
    addCondition(SecondaryIndex<SK, PK, E>, SK) - Method in class com.sleepycat.persist.EntityJoin
    +
    +
    Adds a secondary key condition to the equality join.
    +
    +
    addConfiguredFileset(FileSet) - Method in class com.sleepycat.persist.model.ClassEnhancerTask
    +
     
    +
    addConverter(Converter) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Adds a converter mutation.
    +
    +
    addDeleter(Deleter) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Adds a deleter mutation.
    +
    +
    addOperations() - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    addOperations() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Add MBean operations into the list.
    +
    +
    addOperations() - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    addRenamer(Renamer) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Adds a renamer mutation.
    +
    +
    addSize(int) - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Skip the given number of bytes in the buffer.
    +
    +
    ADLER32_CHUNK_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    By default, JE passes an entire log record to the Adler32 class for + checksumming.
    +
    +
    ALLOW_ARBITER_ACK - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Boolean flag if set to true, an Arbiter may acknowledge a transaction if + a replication node is not available.
    +
    +
    AnnotationModel - Class in com.sleepycat.persist.model
    +
    +
    The default annotation-based entity model.
    +
    +
    AnnotationModel() - Constructor for class com.sleepycat.persist.model.AnnotationModel
    +
    +
    Constructs a model for annotated entity classes.
    +
    +
    append(V) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Appends a given value returning the newly assigned key.
    +
    +
    AppStateMonitor - Interface in com.sleepycat.je.rep
    +
    +
    A mechanism for adding application specific information when asynchronously + tracking the state of a running JE HA application.
    +
    +
    Arbiter - Class in com.sleepycat.je.rep.arbiter
    +
    +
    Provides a mechanism to allow write availability for the Replication + group even when the number of replication nodes is less than majority.
    +
    +
    Arbiter(ArbiterConfig) - Constructor for class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    An Arbiter used in elections and transaction acknowledgments.
    +
    +
    ARBITER_OUTPUT_QUEUE_SIZE - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The size of the the queue used to hold commit records that the Feeder + uses to request acknowledgment from an Arbiter.
    +
    +
    ArbiterConfig - Class in com.sleepycat.je.rep.arbiter
    +
    +
    The configuration parameters for an Arbiter.
    +
    +
    ArbiterConfig() - Constructor for class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Arbiter configuration.
    +
    +
    ArbiterConfig(Properties) - Constructor for class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Arbiter configuration.
    +
    +
    ArbiterMutableConfig - Class in com.sleepycat.je.rep.arbiter
    +
    +
    The mutable configuration parameters for an Arbiter.
    +
    +
    ArbiterStats - Class in com.sleepycat.je.rep.arbiter
    +
    +
    Statistics for an Arbiter.
    +
    +
    areDuplicatesAllowed() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether duplicate keys are allowed in this container.
    +
    +
    areDuplicatesOrdered() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether duplicate keys are allowed and sorted by element value.
    +
    +
    areKeyRangesAllowed() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether key ranges are allowed in this container.
    +
    +
    areKeysRenumbered() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether keys are renumbered when insertions and deletions occur.
    +
    +
    assignKey(DatabaseEntry) - Method in interface com.sleepycat.collections.PrimaryKeyAssigner
    +
    +
    Assigns a new primary key value into the given buffer.
    +
    +
    AtomicLongMax - Class in com.sleepycat.je.rep.util
    +
    +
    An Atomic long that maintains a max value
    +
    +
    AtomicLongMax(long) - Constructor for class com.sleepycat.je.rep.util.AtomicLongMax
    +
     
    +
    ATT_CACHE_PERCENT - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_CACHE_PERCENT - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_CACHE_SIZE - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_CACHE_SIZE - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_CONSOLEHANDLER_LEVEL - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    ATT_ENV_HOME - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_ENV_HOME - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_FILEHANDLER_LEVEL - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    ATT_IS_READ_ONLY - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_IS_READ_ONLY - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_IS_SERIALIZABLE - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_IS_SERIALIZABLE - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_IS_TRANSACTIONAL - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_IS_TRANSACTIONAL - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_LOCK_TIMEOUT - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_LOCK_TIMEOUT - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    ATT_OPEN - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_SET_READ_ONLY - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_SET_SERIALIZABLE - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_SET_TRANSACTIONAL - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_TXN_TIMEOUT - Static variable in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    ATT_TXN_TIMEOUT - Static variable in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    available() - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    available() - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    + + + +

    B

    +
    +
    beginTransaction(TransactionConfig) - Method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Begins a new transaction for this environment and associates it with + the current thread.
    +
    +
    beginTransaction(Transaction, TransactionConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Creates a new transaction in the database environment.
    +
    +
    BigDecimalBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for an unsorted BigDecimal + value.
    +
    +
    BigDecimalBinding() - Constructor for class com.sleepycat.bind.tuple.BigDecimalBinding
    +
     
    +
    bigDecimalToEntry(BigDecimal, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BigDecimalBinding
    +
    +
    Converts a BigDecimal value into an entry buffer.
    +
    +
    bigDecimalToEntry(BigDecimal, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
    +
    Converts a BigDecimal value into an entry buffer.
    +
    +
    BigIntegerBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a BigInteger value.
    +
    +
    BigIntegerBinding() - Constructor for class com.sleepycat.bind.tuple.BigIntegerBinding
    +
     
    +
    bigIntegerToEntry(BigInteger, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BigIntegerBinding
    +
    +
    Converts a BigInteger value into an entry buffer.
    +
    +
    BinaryEqualityComparator - Interface in com.sleepycat.je
    +
    +
    A tag interface used to mark a BTree or duplicate comparator class as a + binary equality comparator, that is, a comparator that considers + two keys (byte arrays) to be equal if and only if they have the same + length and they are equal byte-per-byte.
    +
    +
    BIND_INADDR_ANY - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    When this configuration parameter is set to true, it binds the HA socket + to INADDR_ANY, so that HA services are available on all network + interfaces.
    +
    +
    BooleanBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Boolean primitive + wrapper or a boolean primitive.
    +
    +
    BooleanBinding() - Constructor for class com.sleepycat.bind.tuple.BooleanBinding
    +
     
    +
    booleanToEntry(boolean, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BooleanBinding
    +
    +
    Converts a simple boolean value into an entry buffer.
    +
    +
    BtreeStats - Class in com.sleepycat.je
    +
    +
    The BtreeStats object is used to return Btree database statistics.
    +
    +
    BtreeStats() - Constructor for class com.sleepycat.je.BtreeStats
    +
     
    +
    buf - Variable in class com.sleepycat.util.FastInputStream
    +
     
    +
    ByteArrayBinding - Class in com.sleepycat.bind
    +
    +
    A pass-through EntryBinding that uses the entry's byte array as + the key or data object.
    +
    +
    ByteArrayBinding() - Constructor for class com.sleepycat.bind.ByteArrayBinding
    +
    +
    Creates a byte array binding.
    +
    +
    ByteBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Byte primitive + wrapper or a byte primitive.
    +
    +
    ByteBinding() - Constructor for class com.sleepycat.bind.tuple.ByteBinding
    +
     
    +
    bytesToChars(byte[], int, char[], int, int, boolean) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Converts byte arrays into character arrays.
    +
    +
    bytesToString(byte[], int, int) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Converts byte arrays into strings.
    +
    +
    byteToEntry(byte, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.ByteBinding
    +
    +
    Converts a simple byte value into an entry buffer.
    +
    +
    + + + +

    C

    +
    +
    CacheMode - Enum in com.sleepycat.je
    +
    +
    Modes that can be specified for control over caching of records in the JE + in-memory cache.
    +
    +
    CharacterBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Character primitive + wrapper or a char primitive.
    +
    +
    CharacterBinding() - Constructor for class com.sleepycat.bind.tuple.CharacterBinding
    +
     
    +
    charsToBytes(char[], int, byte[], int, int) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Converts character arrays into byte arrays.
    +
    +
    charToEntry(char, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.CharacterBinding
    +
    +
    Converts a simple char value into an entry buffer.
    +
    +
    checkpoint(CheckpointConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously checkpoint the database environment.
    +
    +
    CheckpointConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a checkpoint operation invoked from Environment.checkpoint.
    +
    +
    CheckpointConfig() - Constructor for class com.sleepycat.je.CheckpointConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    CHECKPOINTER_BYTES_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Ask the checkpointer to run every time we write this many bytes to the + log.
    +
    +
    CHECKPOINTER_DEADLOCK_RETRY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of times to retry a checkpoint if it runs into a deadlock.
    +
    +
    CHECKPOINTER_HIGH_PRIORITY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, the checkpointer uses more resources in order to complete the + checkpoint in a shorter time interval.
    +
    +
    CHECKPOINTER_WAKEUP_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The checkpointer wakeup interval in microseconds.
    +
    +
    ClassCatalog - Interface in com.sleepycat.bind.serial
    +
    +
    A catalog of class description information for use during object + serialization.
    +
    +
    ClassEnhancer - Class in com.sleepycat.persist.model
    +
    +
    Enhances the bytecode of persistent classes to provide efficient access to + fields and constructors, and to avoid special security policy settings for + accessing non-public members.
    +
    +
    ClassEnhancer() - Constructor for class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Creates a class enhancer that searches all packages.
    +
    +
    ClassEnhancer(Set<String>) - Constructor for class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Creates a class enhancer that searches a given set of packages.
    +
    +
    ClassEnhancerTask - Class in com.sleepycat.persist.model
    +
    +
    An ant task for running the ClassEnhancer.
    +
    +
    ClassEnhancerTask() - Constructor for class com.sleepycat.persist.model.ClassEnhancerTask
    +
     
    +
    classForName(String) - Static method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Deprecated. +
    use EntityModel.resolveClass(java.lang.String) instead. This method does not + use the environment's ClassLoader property.
    +
    +
    +
    ClassMetadata - Class in com.sleepycat.persist.model
    +
    +
    The metadata for a persistent class.
    +
    +
    ClassMetadata(String, int, String, boolean, PrimaryKeyMetadata, Map<String, SecondaryKeyMetadata>, List<FieldMetadata>) - Constructor for class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Used by an EntityModel to construct persistent class metadata.
    +
    +
    ClassMetadata(String, int, String, boolean, PrimaryKeyMetadata, Map<String, SecondaryKeyMetadata>, List<FieldMetadata>, Collection<FieldMetadata>) - Constructor for class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Used by an EntityModel to construct persistent class metadata.
    +
    +
    className - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    ClassResolver - Class in com.sleepycat.util
    +
    +
    Implements policies for loading user-supplied classes.
    +
    +
    ClassResolver() - Constructor for class com.sleepycat.util.ClassResolver
    +
     
    +
    ClassResolver.Stream - Class in com.sleepycat.util
    +
    +
    A specialized ObjectInputStream that supports use of a user-specified + ClassLoader.
    +
    +
    CLEANER_ADJUST_UTILIZATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
    +
    +
    +
    CLEANER_BACKGROUND_PROACTIVE_MIGRATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and + checkpointing. To reduce a cleaner backlog, configure more cleaner + threads.
    +
    +
    +
    CLEANER_BYTES_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The cleaner checks disk utilization every time we write this many bytes + to the log.
    +
    +
    CLEANER_DEADLOCK_RETRY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of times to retry cleaning if a deadlock occurs.
    +
    +
    CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Tracking of detailed cleaning information will use no more than this + percentage of the cache.
    +
    +
    CLEANER_EXPUNGE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true (the default setting), the cleaner deletes log files after + successful cleaning.
    +
    +
    CLEANER_FETCH_OBSOLETE_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, the cleaner will fetch records to determine their size and more + accurately calculate log utilization.
    +
    +
    CLEANER_FORCE_CLEAN_FILES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Specifies a list of files or file ranges to be cleaned at a time when no + other log cleaning is necessary.
    +
    +
    CLEANER_FOREGROUND_PROACTIVE_MIGRATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    This parameter is ignored and proactive migration is no + longer supported due to its negative impact on eviction and Btree + splits. To reduce a cleaner backlog, configure more cleaner threads.
    +
    +
    +
    CLEANER_LAZY_MIGRATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    This parameter is ignored and lazy migration is no longer + supported due to its negative impact on eviction and checkpointing. + To reduce a cleaner backlog, configure more cleaner threads.
    +
    +
    +
    CLEANER_LOCK_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The lock timeout for cleaner transactions in microseconds.
    +
    +
    CLEANER_LOOK_AHEAD_CACHE_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The look ahead cache size for cleaning in bytes.
    +
    +
    CLEANER_MAX_BATCH_FILES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    in 7.0. No longer used because the cleaner no longer has a + backlog.
    +
    +
    +
    CLEANER_MIN_AGE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The minimum age of a file (number of files between it and the active + file) to qualify it for cleaning under any conditions.
    +
    +
    CLEANER_MIN_FILE_UTILIZATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A log file will be cleaned if its utilization percentage is below this + value, irrespective of total utilization.
    +
    +
    CLEANER_MIN_UTILIZATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The cleaner will keep the total disk space utilization percentage above + this value.
    +
    +
    CLEANER_READ_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The read buffer size for cleaning.
    +
    +
    CLEANER_THREADS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of threads allocated by the cleaner for log file processing.
    +
    +
    CLEANER_UPGRADE_TO_LOG_VERSION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    All log files having a log version prior to the specified version will + be cleaned at a time when no other log cleaning is necessary.
    +
    +
    CLEANER_USE_DELETED_DIR - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    When EnvironmentConfig.CLEANER_EXPUNGE is false, the CLEANER_USE_DELETED_DIR parameter determines whether successfully + cleaned files are moved to the "deleted" sub-directory.
    +
    +
    CLEANER_WAKEUP_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The cleaner checks whether cleaning is needed if this interval elapses + without any writing, to handle the case where cleaning or checkpointing + is necessary to reclaim disk space, but writing has stopped.
    +
    +
    cleanLog() - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously invokes log file (data file) cleaning until the target + disk space utilization has been reached; this method is called + periodically by the cleaner background threads.
    +
    +
    cleanLogFile() - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously invokes cleaning of a single log file (data file), if + the target disk space utilization has not been reached.
    +
    +
    clear() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Removes all mappings or elements from this map or collection (optional + operation).
    +
    +
    CLEAR - Static variable in class com.sleepycat.je.StatsConfig
    +
    +
    A convenience instance for which setClear(true) has been called, and + all other properties have default values.
    +
    +
    clone() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.JoinConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.ReadOptions
    +
     
    +
    clone() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
     
    +
    clone() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
     
    +
    clone() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns a copy of this configuration object.
    +
    +
    clone() - Method in class com.sleepycat.je.WriteOptions
    +
     
    +
    clone() - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Returns a shallow copy of the configuration.
    +
    +
    clone() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns a shallow copy of the configuration.
    +
    +
    cloneConfig() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Deprecated. +
    As of JE 4.0.13, replaced by DatabaseConfig.clone().

    +
    +
    +
    cloneConfig() - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Deprecated. +
    As of JE 4.0.13, replaced by EvolveConfig.clone().
    +
    +
    +
    cloneConfig() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Deprecated. +
    As of JE 4.0.13, replaced by StoreConfig.clone().
    +
    +
    +
    close() - Method in interface com.sleepycat.bind.serial.ClassCatalog
    +
    +
    Close a catalog database and release any cached resources.
    +
    +
    close() - Method in class com.sleepycat.bind.serial.StoredClassCatalog
    +
     
    +
    close(Iterator<?>) - Static method in class com.sleepycat.collections.StoredIterator
    +
    +
    Closes the given iterator using StoredIterator.close() if it is a StoredIterator.
    +
    +
    close() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Closes this iterator.
    +
    +
    close() - Method in class com.sleepycat.je.Cursor
    +
    +
    Discards the cursor.
    +
    +
    close() - Method in class com.sleepycat.je.Database
    +
    +
    Discards the database handle.
    +
    +
    close() - Method in class com.sleepycat.je.DiskOrderedCursor
    +
    +
    Discards the cursor.
    +
    +
    close() - Method in class com.sleepycat.je.Environment
    +
    +
    The Environment.close method closes the Berkeley DB environment.
    +
    +
    close() - Method in interface com.sleepycat.je.ForwardCursor
    +
    +
    Discards the cursor.
    +
    +
    close() - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    close() - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Closes the cursors that have been opened by this join cursor.
    +
    +
    close() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Close this ReplicatedEnvironment and release any resources used by the + handle.
    +
    +
    close() - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Closes a secondary database and dis-associates it from its primary + database.
    +
    +
    close() - Method in class com.sleepycat.je.Sequence
    +
    +
    Closes a sequence.
    +
    +
    close() - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    close() - Method in class com.sleepycat.je.util.LogVerificationReadableByteChannel
    +
    close() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Closes the cursor.
    +
    +
    close() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Closes all databases and sequences that were opened via this store.
    +
    +
    close() - Method in interface com.sleepycat.persist.ForwardCursor
    +
    +
    Closes the cursor.
    +
    +
    close() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Closes all databases and sequences that were opened by this model.
    +
    +
    closeClass(Class) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Closes the primary and secondary databases for the given entity class + that were opened via this store.
    +
    +
    com.sleepycat.bind - package com.sleepycat.bind
    +
    +
    Bindings between database entries and Java objects.
    +
    +
    com.sleepycat.bind.serial - package com.sleepycat.bind.serial
    +
    +
    Bindings that use Java serialization.
    +
    +
    com.sleepycat.bind.tuple - package com.sleepycat.bind.tuple
    +
    +
    Bindings that use sequences of primitive fields, or tuples.
    +
    +
    com.sleepycat.collections - package com.sleepycat.collections
    +
    +
    Data access based on the standard Java collections API.
    +
    +
    com.sleepycat.je - package com.sleepycat.je
    +
    +
    Foundation for creating environments, databases and transactions; provides +cursor based data access.
    +
    +
    com.sleepycat.je.jmx - package com.sleepycat.je.jmx
    +
    +
    Implementations of JMX MBeans for JE.
    +
    +
    com.sleepycat.je.rep - package com.sleepycat.je.rep
    +
    +
    +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments.
    +
    +
    com.sleepycat.je.rep.arbiter - package com.sleepycat.je.rep.arbiter
    +
    +
    Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority.
    +
    +
    com.sleepycat.je.rep.monitor - package com.sleepycat.je.rep.monitor
    +
    +
    BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing.
    +
    +
    com.sleepycat.je.rep.util - package com.sleepycat.je.rep.util
    +
    +
    BDB JE High Availability command line utilities and helper classes.
    +
    +
    com.sleepycat.je.util - package com.sleepycat.je.util
    +
    +
    Supporting utilities.
    +
    +
    com.sleepycat.persist - package com.sleepycat.persist
    +
    +
    The Direct Persistence Layer (DPL) adds a persistent object model to the +Berkeley DB transactional engine.
    +
    +
    com.sleepycat.persist.evolve - package com.sleepycat.persist.evolve
    +
    +
    Utilities for managing class evolution of persistent objects.
    +
    +
    com.sleepycat.persist.model - package com.sleepycat.persist.model
    +
    +
    Annotations for defining a persistent object model.
    +
    +
    com.sleepycat.persist.raw - package com.sleepycat.persist.raw
    +
    +
    Raw data access for general purpose tools and manual conversions.
    +
    +
    com.sleepycat.util - package com.sleepycat.util
    +
    +
    General utilities used throughout Berkeley DB.
    +
    +
    commit() - Method in class com.sleepycat.je.Transaction
    +
    +
    End the transaction.
    +
    +
    commit(Durability) - Method in class com.sleepycat.je.Transaction
    +
    +
    End the transaction using the specified durability requirements.
    +
    +
    commit(Xid, boolean) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    COMMIT_NO_SYNC - Static variable in class com.sleepycat.je.Durability
    +
    +
    A convenience constant that defines a durability policy with + COMMIT_NO_SYNC for local commit synchronization.
    +
    +
    COMMIT_SYNC - Static variable in class com.sleepycat.je.Durability
    +
    +
    A convenience constant that defines a durability policy with COMMIT_SYNC + for local commit synchronization.
    +
    +
    COMMIT_WRITE_NO_SYNC - Static variable in class com.sleepycat.je.Durability
    +
    +
    A convenience constant that defines a durability policy with + COMMIT_WRITE_NO_SYNC for local commit synchronization.
    +
    +
    commitNoSync() - Method in class com.sleepycat.je.Transaction
    +
    +
    End the transaction, not writing to stable storage and not committing + synchronously.
    +
    +
    CommitPointConsistencyPolicy - Class in com.sleepycat.je.rep
    +
    +
    A consistency policy which ensures that the environment on a Replica node is + at least as current as denoted by the specified CommitToken.
    +
    +
    CommitPointConsistencyPolicy(CommitToken, long, TimeUnit) - Constructor for class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
    +
    Defines how current a Replica needs to be in terms of a specific + transaction that was committed on the Master.
    +
    +
    commitSync() - Method in class com.sleepycat.je.Transaction
    +
    +
    End the transaction, writing to stable storage and committing + synchronously.
    +
    +
    CommitToken - Class in com.sleepycat.je
    +
    +
    Defines an opaque token that can be used to identify a specific transaction + commit in a replicated environment.
    +
    +
    commitTransaction() - Method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Commits the transaction that is active for the current thread for this + environment and makes the parent transaction (if any) the current + transaction.
    +
    +
    commitWriteNoSync() - Method in class com.sleepycat.je.Transaction
    +
    +
    End the transaction, writing to stable storage but not committing + synchronously.
    +
    +
    comparator() - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns null since comparators are not supported.
    +
    +
    comparator() - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns null since comparators are not supported.
    +
    +
    comparator() - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns null since comparators are not supported.
    +
    +
    comparator() - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns null since comparators are not supported.
    +
    +
    compareDuplicates(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Compares two data elements using either the default comparator if no + duplicate comparator has been set or the duplicate comparator if one has + been set.
    +
    +
    compareKeys(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Compares two keys using either the default comparator if no BTree + comparator has been set or the BTree comparator if one has been set.
    +
    +
    compareTo(CommitToken) - Method in class com.sleepycat.je.CommitToken
    +
    +
    Implements the Comparable interface.
    +
    +
    compareTo(JEVersion) - Method in class com.sleepycat.je.JEVersion
    +
     
    +
    compress() - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously invokes the compressor mechanism which compacts in memory + data structures after delete operations.
    +
    +
    COMPRESSOR_DEADLOCK_RETRY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of times to retry a compression run if a deadlock occurs.
    +
    +
    COMPRESSOR_LOCK_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The lock timeout for compressor transactions in microseconds.
    +
    +
    COMPRESSOR_PURGE_ROOT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of 3.3.87. Compression of the root node no longer has + any benefit and this feature has been removed. This parameter has no + effect.
    +
    +
    +
    COMPRESSOR_WAKEUP_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The compressor thread wakeup interval in microseconds.
    +
    +
    ConfigBeanInfoBase - Class in com.sleepycat.util
    +
     
    +
    ConfigBeanInfoBase() - Constructor for class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    configuredCollection(Collection<E>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured collection from a given stored collection.
    +
    +
    configuredList(List<E>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured list from a given stored list.
    +
    +
    configuredMap(Map<K, V>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured map from a given stored map.
    +
    +
    configuredSet(Set<E>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured set from a given stored set.
    +
    +
    configuredSortedMap(SortedMap<K, V>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured sorted map from a given stored sorted map.
    +
    +
    configuredSortedSet(SortedSet<E>, CursorConfig) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Creates a configured sorted set from a given stored sorted set.
    +
    +
    CONSISTENCY_POLICY - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The default consistency policy used by a replica.
    +
    +
    CONSOLE_LOGGING_LEVEL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Trace messages equal and above this level will be logged to the + console.
    +
    +
    ConsoleHandler - Class in com.sleepycat.je.util
    +
    +
    JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.ConsoleHandler.
    +
    +
    ConsoleHandler(Formatter, EnvironmentImpl) - Constructor for class com.sleepycat.je.util.ConsoleHandler
    +
     
    +
    CONSOLEHANDLER_LEVEL - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    contains(Object) - Method in class com.sleepycat.collections.StoredEntrySet
    +
    +
    Returns true if this set contains the specified element.
    +
    +
    contains(Object) - Method in class com.sleepycat.collections.StoredKeySet
    +
    +
    Returns true if this set contains the specified key.
    +
    +
    contains(Object) - Method in class com.sleepycat.collections.StoredValueSet
    +
    +
    Returns true if this set contains the specified element.
    +
    +
    contains(K) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Checks for existence of a key in this index.
    +
    +
    contains(Transaction, K, LockMode) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Checks for existence of a key in this index.
    +
    +
    containsAll(Collection<?>) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns true if this collection contains all of the elements in the + specified collection.
    +
    +
    containsKey(Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns true if this map contains the specified key.
    +
    +
    containsValue(Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns true if this map contains the specified value.
    +
    +
    Conversion - Interface in com.sleepycat.persist.evolve
    +
    +
    Converts an old version of an object value to conform to the current class + or field definition.
    +
    +
    convert() - Method in class com.sleepycat.je.rep.util.DbEnableReplication
    +
    +
    Modify the log files in the environment directory to add a modicum of + replication required metadata.
    +
    +
    convert(Object) - Method in interface com.sleepycat.persist.evolve.Conversion
    +
    +
    Converts an old version of an object value to conform to the current + class or field definition.
    +
    +
    Converter - Class in com.sleepycat.persist.evolve
    +
    +
    A mutation for converting an old version of an object value to conform to + the current class or field definition.
    +
    +
    Converter(String, int, Conversion) - Constructor for class com.sleepycat.persist.evolve.Converter
    +
    +
    Creates a mutation for converting all instances of the given class + version to the current version of the class.
    +
    +
    Converter(String, int, String, Conversion) - Constructor for class com.sleepycat.persist.evolve.Converter
    +
    +
    Creates a mutation for converting all values of the given field in the + given class version to a type compatible with the current declared type + of the field.
    +
    +
    convertProxy() - Method in interface com.sleepycat.persist.model.PersistentProxy
    +
    +
    Returns a new proxied class instance to which the state of this proxy + instance has been copied.
    +
    +
    convertRawObject(RawObject) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Converts a given raw object to a live object according to the current + class definitions.
    +
    +
    copy() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
     
    +
    count() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the number of elements having the same key value as the key + value of the element last returned by next() or previous().
    +
    +
    count() - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns a count of the number of data items for the key to which the + cursor refers.
    +
    +
    count() - Method in class com.sleepycat.je.Database
    +
    +
    Counts the key/data pairs in the database.
    +
    +
    count(long) - Method in class com.sleepycat.je.Database
    +
    +
    Counts the key/data pairs in the database.
    +
    +
    count() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns the number of values (duplicates) for the key at the cursor + position, or returns zero if all values for the key have been deleted.
    +
    +
    count() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Returns a non-transactional count of the entities in this index.
    +
    +
    count(long) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Returns a non-transactional count of the entities in this index.
    +
    +
    countEstimate() - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns a rough estimate of the count of the number of data items for + the key to which the cursor refers.
    +
    +
    countEstimate() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns a rough estimate of the number of values (duplicates) for the + key at the cursor position, or returns zero if all values for the key + have been deleted.
    +
    +
    createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
     
    +
    createSecondaryKey(PK, D) - Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
    +
    Creates the index key object from primary key and data objects.
    +
    +
    createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
     
    +
    createSecondaryKey(TupleInput, D, TupleOutput) - Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
    +
    Creates the index key entry from primary key tuple entry and + deserialized data entry.
    +
    +
    createSecondaryKey(TupleInput, D, TupleOutput) - Method in class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator
    +
     
    +
    createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator
    +
     
    +
    createSecondaryKey(TupleInput, TupleInput, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator
    +
    +
    Creates the index key from primary key tuple and data tuple.
    +
    +
    createSecondaryKey(TupleInput, TupleInput, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator
    +
     
    +
    createSecondaryKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - Method in interface com.sleepycat.je.SecondaryKeyCreator
    +
    +
    Creates a secondary key entry, given a primary key and data entry.
    +
    +
    createSecondaryKeys(SecondaryDatabase, DatabaseEntry, DatabaseEntry, Set<DatabaseEntry>) - Method in interface com.sleepycat.je.SecondaryMultiKeyCreator
    +
    +
    Creates a secondary key entry, given a primary key and data entry.
    +
    +
    current() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns the value at the cursor position, or null if the value at the + cursor position has been deleted.
    +
    +
    current(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns the value at the cursor position, or null if the value at the + cursor position has been deleted.
    +
    +
    CURRENT_VERSION - Static variable in class com.sleepycat.je.JEVersion
    +
    +
    Release version.
    +
    +
    currentClass - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    CurrentTransaction - Class in com.sleepycat.collections
    +
    +
    Provides access to the current transaction for the current thread within the + context of a Berkeley DB environment.
    +
    +
    Cursor - Class in com.sleepycat.je
    +
    +
    A database cursor.
    +
    +
    CursorConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of database cursor.
    +
    +
    CursorConfig() - Constructor for class com.sleepycat.je.CursorConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    CustomStats - Interface in com.sleepycat.je
    +
    +
    A custom statistics object.
    +
    +
    + + + +

    D

    +
    +
    Database - Class in com.sleepycat.je
    +
    +
    A database handle.
    +
    +
    DatabaseComparator - Interface in com.sleepycat.je
    +
    +
    Implemented by btree and duplicate comparators that need to be initialized + before they are used or need access to the environment's ClassLoader + property.
    +
    +
    DatabaseConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a database.
    +
    +
    DatabaseConfig() - Constructor for class com.sleepycat.je.DatabaseConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    DatabaseEntry - Class in com.sleepycat.je
    +
    +
    Encodes database key and data items as a byte array.
    +
    +
    DatabaseEntry() - Constructor for class com.sleepycat.je.DatabaseEntry
    +
    +
    Constructs a DatabaseEntry with null data.
    +
    +
    DatabaseEntry(byte[]) - Constructor for class com.sleepycat.je.DatabaseEntry
    +
    +
    Constructs a DatabaseEntry with a given byte array.
    +
    +
    DatabaseEntry(byte[], int, int) - Constructor for class com.sleepycat.je.DatabaseEntry
    +
    +
    Constructs a DatabaseEntry with a given byte array, offset and size.
    +
    +
    DatabaseException - Exception in com.sleepycat.je
    +
    +
    The root of all BDB JE-defined exceptions.
    +
    +
    DatabaseExistsException - Exception in com.sleepycat.je
    +
    +
    Thrown by Environment.openDatabase and + Environment.openSecondaryDatabase + if the database already exists and the DatabaseConfig + ExclusiveCreate parameter is true.
    +
    +
    DatabaseNotFoundException - Exception in com.sleepycat.je
    +
    +
    Thrown when an operation requires a database and that database does not + exist.
    +
    +
    DatabasePreemptedException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown when attempting to use a Database handle that was forcibly closed by + replication.
    +
    +
    DatabaseStats - Class in com.sleepycat.je
    +
    +
    Statistics for a single database.
    +
    +
    DatabaseStats() - Constructor for class com.sleepycat.je.DatabaseStats
    +
     
    +
    dataBinding - Variable in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
     
    +
    dataBinding - Variable in class com.sleepycat.bind.serial.TupleSerialBinding
    +
     
    +
    dataBinding - Variable in class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
     
    +
    DbBackup - Class in com.sleepycat.je.util
    +
    +
    DbBackup is a helper class for stopping and restarting JE background + activity in an open environment in order to simplify backup operations.
    +
    +
    DbBackup(Environment) - Constructor for class com.sleepycat.je.util.DbBackup
    +
    +
    Creates a DbBackup helper for a full backup.
    +
    +
    DbBackup(Environment, long) - Constructor for class com.sleepycat.je.util.DbBackup
    +
    +
    Creates a DbBackup helper for an incremental backup.
    +
    +
    DbCacheSize - Class in com.sleepycat.je.util
    +
    +
    Estimates the in-memory cache size needed to hold a specified data set.
    +
    +
    DbDeleteReservedFiles - Class in com.sleepycat.je.util
    +
    +
    Command line utility used to delete reserved files explicitly, when + attempting to recover from a disk-full condition.
    +
    +
    DbDump - Class in com.sleepycat.je.util
    +
    +
    Dump the contents of a database.
    +
    +
    DbDump(Environment, String, PrintStream, String, boolean) - Constructor for class com.sleepycat.je.util.DbDump
    +
    +
    Deprecated. +
    Please use the 4-arg ctor without outputDirectory instead.
    +
    +
    +
    DbDump(Environment, String, PrintStream, boolean) - Constructor for class com.sleepycat.je.util.DbDump
    +
    +
    Create a DbDump object for a specific environment and database.
    +
    +
    DbEnableReplication - Class in com.sleepycat.je.rep.util
    +
    +
    A utility to convert an existing, non replicated JE environment for + replication.
    +
    +
    DbEnableReplication(File, String, String, String) - Constructor for class com.sleepycat.je.rep.util.DbEnableReplication
    +
    +
    Create a DbEnableReplication object for this node.
    +
    +
    DbFilterStats - Class in com.sleepycat.je.util
    +
    +
    Transform one or more je.stat.csv statistics files and + write the output to stdout.
    +
    +
    DbFilterStats() - Constructor for class com.sleepycat.je.util.DbFilterStats
    +
     
    +
    DbGroupAdmin - Class in com.sleepycat.je.rep.util
    +
    +
    DbGroupAdmin supplies the functionality of the administrative class ReplicationGroupAdmin in a convenient command line utility.
    +
    +
    DbGroupAdmin(String, Set<InetSocketAddress>) - Constructor for class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Create a DbGroupAdmin instance for programmatic use.
    +
    +
    DbLoad - Class in com.sleepycat.je.util
    +
    +
    Loads a database from a dump file generated by DbDump.
    +
    +
    DbLoad() - Constructor for class com.sleepycat.je.util.DbLoad
    +
    +
    Creates a DbLoad object.
    +
    +
    dbName - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    DbPing - Class in com.sleepycat.je.rep.util
    +
    +
    This class provides the utility to request the current state of a replica in + a JE replication group, see more details in + NodeState.
    +
    +
    DbPing(ReplicationNode, String, int) - Constructor for class com.sleepycat.je.rep.util.DbPing
    +
    +
    Create a DbPing instance for programmatic use.
    +
    +
    DbPrintLog - Class in com.sleepycat.je.util
    +
    +
    Dumps the contents of the log in XML format to System.out.
    +
    +
    DbPrintLog() - Constructor for class com.sleepycat.je.util.DbPrintLog
    +
     
    +
    DbResetRepGroup - Class in com.sleepycat.je.rep.util
    +
    +
    A utility to reset the members of a replication group, replacing the group + with a new group consisting of a single new member as described by the + arguments supplied to the utility.
    +
    +
    DbResetRepGroup(File, String, String, String) - Constructor for class com.sleepycat.je.rep.util.DbResetRepGroup
    +
    +
    Create a DbResetRepGroup object for this node.
    +
    +
    DbScavenger - Class in com.sleepycat.je.util
    +
    +
    Used to retrieve as much data as possible from a corrupted environment.
    +
    +
    DbScavenger(Environment, String, boolean, boolean, boolean) - Constructor for class com.sleepycat.je.util.DbScavenger
    +
    +
    Create a DbScavenger object for a specific environment.
    +
    +
    DbSpace - Class in com.sleepycat.je.util
    +
    +
    DbSpace displays the disk space utilization for an environment.
    +
    +
    DbSpace(Environment, boolean, boolean, boolean) - Constructor for class com.sleepycat.je.util.DbSpace
    +
    +
    Creates a DbSpace object for calculating utilization using an open + Environment.
    +
    +
    DbStat - Class in com.sleepycat.je.util
    +
     
    +
    DbStat(Environment, String) - Constructor for class com.sleepycat.je.util.DbStat
    +
     
    +
    DbTruncateLog - Class in com.sleepycat.je.util
    +
    +
    DbTruncateLog is a utility that lets the user truncate JE log starting at a + specified file and offset to the last log file, inclusive.
    +
    +
    DbTruncateLog() - Constructor for class com.sleepycat.je.util.DbTruncateLog
    +
     
    +
    DbVerify - Class in com.sleepycat.je.util
    +
    +
    Verifies the internal structures of a database.
    +
    +
    DbVerify(Environment, String, boolean) - Constructor for class com.sleepycat.je.util.DbVerify
    +
    +
    Deprecated. + +
    +
    +
    DbVerifyLog - Class in com.sleepycat.je.util
    +
    +
    Verifies the checksums in one or more log files.
    +
    +
    DbVerifyLog(Environment) - Constructor for class com.sleepycat.je.util.DbVerifyLog
    +
    +
    Creates a utility object for verifying the checksums in log files.
    +
    +
    DbVerifyLog(Environment, int) - Constructor for class com.sleepycat.je.util.DbVerifyLog
    +
    +
    Creates a utility object for verifying log files.
    +
    +
    DeadlockException - Exception in com.sleepycat.je
    +
    +
    Thrown when a deadlock is detected.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.CheckpointConfig
    +
    +
    Default configuration used if null is passed to Environment.checkpoint.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.CursorConfig
    +
    +
    Default configuration used if null is passed to methods that create a + cursor.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.DatabaseConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Default configuration used if null is passed to methods that create a + cursor.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.JoinConfig
    +
    +
    Default configuration used if null is passed to Database.join.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    An instance created using the default constructor is initialized with + the default settings.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.SecondaryConfig
    +
     
    +
    DEFAULT - Static variable in class com.sleepycat.je.SequenceConfig
    +
    +
    Default configuration used if null is passed to methods that create a + cursor.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.StatsConfig
    +
    +
    A convenience instance embodying the default configuration.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.TransactionConfig
    +
    +
    Default configuration used if null is passed to methods that create a + transaction.
    +
    +
    DEFAULT - Static variable in class com.sleepycat.je.VerifyConfig
    +
     
    +
    DEFAULT - Static variable in class com.sleepycat.persist.StoreConfig
    +
    +
    The default store configuration containing properties as if the + configuration were constructed and not modified.
    +
    +
    DEFAULT_BUMP_SIZE - Static variable in class com.sleepycat.util.FastOutputStream
    +
    +
    The default amount that the buffer is increased when it is full.
    +
    +
    DEFAULT_INIT_SIZE - Static variable in class com.sleepycat.util.FastOutputStream
    +
    +
    The default initial size of the buffer if no initialSize parameter is + specified.
    +
    +
    DEFAULT_ITERATOR_BLOCK_SIZE - Static variable in class com.sleepycat.collections.StoredCollection
    +
    +
    The default number of records read at one time by iterators.
    +
    +
    DEFAULT_MAX_RETRIES - Static variable in class com.sleepycat.collections.TransactionRunner
    +
    +
    The default maximum number of retries.
    +
    +
    DEFAULT_PORT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The default port used for replication.
    +
    +
    delete(WriteOptions) - Method in class com.sleepycat.je.Cursor
    +
    +
    Deletes the record to which the cursor refers.
    +
    +
    delete() - Method in class com.sleepycat.je.Cursor
    +
    +
    Deletes the record to which the cursor refers.
    +
    +
    delete(Transaction, DatabaseEntry, WriteOptions) - Method in class com.sleepycat.je.Database
    +
    +
    Removes records with a given key from the database.
    +
    +
    delete(Transaction, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Removes records with a given key from the database.
    +
    +
    delete(WriteOptions) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Delete the record to which the cursor refers from the primary database + and all secondary indices.
    +
    +
    delete() - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Delete the record to which the cursor refers from the primary database + and all secondary indices.
    +
    +
    delete(Transaction, DatabaseEntry, WriteOptions) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Deletes the record associated with the given secondary key.
    +
    +
    delete(Transaction, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Deletes the record associated with the given secondary key.
    +
    +
    delete() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Deletes the entity at the cursor position.
    +
    +
    delete(WriteOptions) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Deletes the entity at the cursor position, using a WriteOptions + parameter and returning an OperationResult.
    +
    +
    delete(K) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Deletes all entities with a given index key.
    +
    +
    delete(Transaction, K) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Deletes all entities with a given index key.
    +
    +
    delete(Transaction, K, WriteOptions) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Deletes all entities with a given index key, using a WriteOptions + parameter and returning an OperationResult.
    +
    +
    DeleteAction - Enum in com.sleepycat.persist.model
    +
    +
    Specifies the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity.
    +
    +
    DeleteConstraintException - Exception in com.sleepycat.je
    +
    +
    Thrown when an attempt is made to delete a key from a foreign key database, + when that key is referenced by a secondary database, and the secondary is + configured to cause an abort in this situation.
    +
    +
    DeletedClassException - Exception in com.sleepycat.persist.evolve
    +
    +
    While reading from an index, an instance of a deleted class version was + encountered.
    +
    +
    Deleter - Class in com.sleepycat.persist.evolve
    +
    +
    A mutation for deleting an entity class or field.
    +
    +
    Deleter(String, int) - Constructor for class com.sleepycat.persist.evolve.Deleter
    +
    +
    Creates a mutation for deleting an entity class.
    +
    +
    Deleter(String, int, String) - Constructor for class com.sleepycat.persist.evolve.Deleter
    +
    +
    Creates a mutation for deleting the given field from all instances of + the given class version.
    +
    +
    DESCRIPTION - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    DESIGNATED_PRIMARY - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Identifies the Primary node in a two node group.
    +
    +
    DiskLimitException - Exception in com.sleepycat.je
    +
    +
    Thrown when a write operation cannot be performed because a disk limit has + been violated.
    +
    +
    DiskOrderedCursor - Class in com.sleepycat.je
    +
    +
    DiskOrderedCursor returns records in unsorted order in exchange for + generally faster retrieval times.
    +
    +
    DiskOrderedCursorConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a DiskOrderedCursor.
    +
    +
    DiskOrderedCursorConfig() - Constructor for class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    DiskOrderedCursorProducerException - Exception in com.sleepycat.je
    +
    +
    Thrown by ForwardCursor.getNext when a + DiskOrderedCursor producer thread throws an exception.
    +
    +
    doAggressiveScavengerRun - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    doRegister(Environment) - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    For EnvironmentImpl.MBeanRegistrar interface.
    +
    +
    doRegisterMBean(Environment) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    doRegisterMBean(Environment) - Method in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    doRegisterMBean(Environment) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    DOS_PRODUCER_QUEUE_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The timeout for Disk Ordered Scan producer thread queue offers in + milliseconds.
    +
    +
    doScavengerRun - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    DoubleBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for an unsorted Double + primitive wrapper or an unsorted double primitive.
    +
    +
    DoubleBinding() - Constructor for class com.sleepycat.bind.tuple.DoubleBinding
    +
     
    +
    doubleToEntry(double, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.DoubleBinding
    +
    +
    Converts a simple double value into an entry buffer.
    +
    +
    doubleToEntry(double, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
    +
    Converts a simple double value into an entry buffer.
    +
    +
    doUnregister() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    For EnvironmentImpl.MBeanRegistrar interface.
    +
    +
    doWork() - Method in interface com.sleepycat.collections.TransactionWorker
    +
    +
    Perform the work for a single transaction.
    +
    +
    dump() - Method in class com.sleepycat.je.util.DbDump
    +
    +
    Perform the dump.
    +
    +
    dump(File, String, String, long, long, boolean, boolean, boolean, boolean, boolean, boolean, String) - Method in class com.sleepycat.je.util.DbPrintLog
    +
    +
    Dump a JE log into human readable form.
    +
    +
    dump() - Method in class com.sleepycat.je.util.DbScavenger
    +
    +
    Start the scavenger run.
    +
    +
    dumpGroup() - Method in class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Display group information.
    +
    +
    dumpOne(PrintStream, byte[], boolean) - Method in class com.sleepycat.je.util.DbDump
    +
     
    +
    dup(boolean) - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns a new cursor with the same transaction and locker ID as the + original cursor.
    +
    +
    dup(boolean) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Returns a new SecondaryCursor for the same transaction as + the original cursor.
    +
    +
    dup() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Duplicates the cursor at the cursor position.
    +
    +
    DuplicateDataException - Exception in com.sleepycat.je
    +
    +
    Thrown by Cursor.putCurrent if the old and new + data are not equal according to the configured duplicate comparator or + default comparator.
    +
    +
    duplicates(K) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns a new collection containing the values mapped to the given key + in this map.
    +
    +
    duplicatesMap(K, EntryBinding) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns a new map from primary key to value for the subset of records + having a given secondary key (duplicates).
    +
    +
    dupSecondary(boolean) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Deprecated. +
    As of JE 4.0.13, replaced by Cursor.dup(boolean).

    +
    +
    +
    Durability - Class in com.sleepycat.je
    +
    +
    Durability defines the overall durability characteristics associated with a + transaction.
    +
    +
    Durability(Durability.SyncPolicy, Durability.SyncPolicy, Durability.ReplicaAckPolicy) - Constructor for class com.sleepycat.je.Durability
    +
    +
    Creates an instance of a Durability specification.
    +
    +
    Durability.ReplicaAckPolicy - Enum in com.sleepycat.je
    +
    +
    A replicated environment makes it possible to increase an application's + transaction commit guarantees by committing changes to its replicas on + the network.
    +
    +
    Durability.SyncPolicy - Enum in com.sleepycat.je
    +
    +
    Defines the synchronization policy to be used when committing a + transaction.
    +
    +
    + + + +

    E

    +
    +
    ELECTABLE_GROUP_SIZE_OVERRIDE - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    An escape mechanism to modify the way in which the number of electable + nodes, and consequently the quorum requirements for elections and commit + acknowledgments, is calculated.
    +
    +
    ELECTIONS_PRIMARY_RETRIES - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The number of times an unsuccessful election will be retried by a + designated Primary in a two node group before it is + activated and becomes the Master.
    +
    +
    ELECTIONS_REBROADCAST_PERIOD - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The time interval between rebroadcasts of election results by the master + node to all nodes not currently connected to it.
    +
    +
    end(Xid, int) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    endBackup() - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    End backup mode, thereby re-enabling normal deletion of log files by the + JE log cleaner.
    +
    +
    enhance(String, byte[]) - Method in class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Enhances the given class bytes if the class is annotated with Entity or Persistent.
    +
    +
    ensureConsistency(EnvironmentImpl) - Method in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
     
    +
    entities() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing all entities in this index.
    +
    +
    entities(Transaction, CursorConfig) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing all entities in this index.
    +
    +
    entities(K, boolean, K, boolean) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing entities in a key range.
    +
    +
    entities(Transaction, K, boolean, K, boolean, CursorConfig) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing entities in a key range.
    +
    +
    entities() - Method in class com.sleepycat.persist.EntityJoin
    +
    +
    Opens a cursor that returns the entities qualifying for the join.
    +
    +
    entities(Transaction, CursorConfig) - Method in class com.sleepycat.persist.EntityJoin
    +
    +
    Opens a cursor that returns the entities qualifying for the join.
    +
    +
    Entity - Annotation Type in com.sleepycat.persist.model
    +
    +
    Indicates a persistent entity class.
    +
    +
    EntityBinding<E> - Interface in com.sleepycat.bind
    +
    +
    A binding between a key-value entry pair and an entity object.
    +
    +
    EntityConverter - Class in com.sleepycat.persist.evolve
    +
    +
    A subclass of Converter that allows specifying keys to be deleted.
    +
    +
    EntityConverter(String, int, Conversion, Set<String>) - Constructor for class com.sleepycat.persist.evolve.EntityConverter
    +
    +
    Creates a mutation for converting all instances of the given entity + class version to the current version of the class.
    +
    +
    EntityCursor<V> - Interface in com.sleepycat.persist
    +
    +
    Traverses entity values or key values and allows deleting or updating the + entity at the current cursor position.
    +
    +
    EntityIndex<K,V> - Interface in com.sleepycat.persist
    +
    +
    The interface for accessing keys and entities via a primary or secondary + index.
    +
    +
    EntityJoin<PK,E> - Class in com.sleepycat.persist
    +
    +
    Performs an equality join on two or more secondary keys.
    +
    +
    EntityJoin(PrimaryIndex<PK, E>) - Constructor for class com.sleepycat.persist.EntityJoin
    +
    +
    Creates a join object for a given primary index.
    +
    +
    EntityMetadata - Class in com.sleepycat.persist.model
    +
    +
    The metadata for a persistent entity class.
    +
    +
    EntityMetadata(String, PrimaryKeyMetadata, Map<String, SecondaryKeyMetadata>) - Constructor for class com.sleepycat.persist.model.EntityMetadata
    +
    +
    Used by an EntityModel to construct entity metadata.
    +
    +
    EntityModel - Class in com.sleepycat.persist.model
    +
    +
    The base class for classes that provide entity model metadata.
    +
    +
    EntityModel() - Constructor for class com.sleepycat.persist.model.EntityModel
    +
    +
    The default constructor for use by subclasses.
    +
    +
    EntityResult<V> - Class in com.sleepycat.persist
    +
    +
    Used to return an entity value from a 'get' operation along with an + OperationResult.
    +
    +
    EntityStore - Class in com.sleepycat.persist
    +
    +
    A store for managing persistent entity objects.
    +
    +
    EntityStore(Environment, String, StoreConfig) - Constructor for class com.sleepycat.persist.EntityStore
    +
    +
    Opens an entity store in a given environment.
    +
    +
    EntryBinding<E> - Interface in com.sleepycat.bind
    +
    +
    A binding between a key or data entry and a key or data object.
    +
    +
    entrySet() - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns a set view of the mappings contained in this map.
    +
    +
    entryToBigDecimal(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BigDecimalBinding
    +
    +
    Converts an entry buffer into a BigDecimal value.
    +
    +
    entryToBigDecimal(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
    +
    Converts an entry buffer into a BigDecimal value.
    +
    +
    entryToBigInteger(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BigIntegerBinding
    +
    +
    Converts an entry buffer into a BigInteger value.
    +
    +
    entryToBoolean(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.BooleanBinding
    +
    +
    Converts an entry buffer into a simple boolean value.
    +
    +
    entryToByte(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.ByteBinding
    +
    +
    Converts an entry buffer into a simple byte value.
    +
    +
    entryToChar(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.CharacterBinding
    +
    +
    Converts an entry buffer into a simple char value.
    +
    +
    entryToDouble(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.DoubleBinding
    +
    +
    Converts an entry buffer into a simple double value.
    +
    +
    entryToDouble(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
    +
    Converts an entry buffer into a simple double value.
    +
    +
    entryToFloat(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.FloatBinding
    +
    +
    Converts an entry buffer into a simple float value.
    +
    +
    entryToFloat(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedFloatBinding
    +
    +
    Converts an entry buffer into a simple float value.
    +
    +
    entryToInput(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Utility method to create a new tuple input object for reading the data + from a given buffer.
    +
    +
    entryToInt(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.IntegerBinding
    +
    +
    Converts an entry buffer into a simple int value.
    +
    +
    entryToInt(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
    +
    Converts an entry buffer into a simple int value.
    +
    +
    entryToInt(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
    +
    Converts an entry buffer into a simple int value.
    +
    +
    entryToLong(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.LongBinding
    +
    +
    Converts an entry buffer into a simple long value.
    +
    +
    entryToLong(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.PackedLongBinding
    +
    +
    Converts an entry buffer into a simple Long value.
    +
    +
    entryToLong(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
    +
    Converts an entry buffer into a simple Long value.
    +
    +
    entryToObject(DatabaseEntry) - Method in class com.sleepycat.bind.ByteArrayBinding
    +
     
    +
    entryToObject(DatabaseEntry, DatabaseEntry) - Method in interface com.sleepycat.bind.EntityBinding
    +
    +
    Converts key and data entry buffers into an entity Object.
    +
    +
    entryToObject(DatabaseEntry) - Method in interface com.sleepycat.bind.EntryBinding
    +
    +
    Converts a entry buffer into an Object.
    +
    +
    entryToObject(DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialBinding
    +
    +
    Deserialize an object from an entry buffer.
    +
    +
    entryToObject(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
     
    +
    entryToObject(K, D) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
    +
    Constructs an entity object from deserialized key and data objects.
    +
    +
    entryToObject(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
     
    +
    entryToObject(TupleInput, D) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
    +
    Constructs an entity object from TupleInput key entry and + deserialized data entry objects.
    +
    +
    entryToObject(TupleInput, E) - Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.BigDecimalBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.BigIntegerBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.BooleanBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.ByteBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.CharacterBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.DoubleBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.FloatBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.IntegerBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.LongBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.PackedLongBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.ShortBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.SortedFloatBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.StringBinding
    +
     
    +
    entryToObject(DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.TupleBinding
    +
    +
    Constructs a key or data object from a TupleInput entry.
    +
    +
    entryToObject(DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleInputBinding
    +
     
    +
    entryToObject(TupleInput) - Method in class com.sleepycat.bind.tuple.TupleMarshalledBinding
    +
     
    +
    entryToObject(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
     
    +
    entryToObject(TupleInput, TupleInput) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
    +
    Constructs an entity object from TupleInput key and data + entries.
    +
    +
    entryToObject(TupleInput, TupleInput) - Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
    +
     
    +
    entryToShort(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.ShortBinding
    +
    +
    Converts an entry buffer into a simple short value.
    +
    +
    entryToString(DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.StringBinding
    +
    +
    Converts an entry buffer into a simple String value.
    +
    +
    env - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    env - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    env - Variable in class com.sleepycat.je.util.DbLoad
    +
     
    +
    ENV_BACKGROUND_READ_LIMIT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum number of read operations performed by JE background + activities (e.g., cleaning) before sleeping to ensure that application + threads can perform I/O.
    +
    +
    ENV_BACKGROUND_SLEEP_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The duration that JE background activities will sleep when the EnvironmentConfig.ENV_BACKGROUND_WRITE_LIMIT or EnvironmentConfig.ENV_BACKGROUND_READ_LIMIT is + reached.
    +
    +
    ENV_BACKGROUND_WRITE_LIMIT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum number of write operations performed by JE background + activities (e.g., checkpointing and eviction) before sleeping to ensure + that application threads can perform I/O.
    +
    +
    ENV_CHECK_LEAKS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Debugging support: check leaked locks and txns at env close.
    +
    +
    ENV_CONSISTENCY_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The amount of time to wait for a Replica to become consistent with the + Master, when a ReplicatedEnvironment handle is created and + no ConsistencyPolicy is specified.
    +
    +
    ENV_DB_EVICTION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, enable eviction of metadata for closed databases.
    +
    +
    ENV_DUP_CONVERT_PRELOAD_ALL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true (the default) preload all duplicates databases at once when + upgrading from JE 4.1 and earlier.
    +
    +
    ENV_EXPIRATION_ENABLED - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true (the default), expired data is filtered from queries and purged + by the cleaner.
    +
    +
    ENV_FAIR_LATCHES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, use latches instead of synchronized blocks to implement the + lock table and log write mutexes.
    +
    +
    ENV_FORCED_YIELD - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Debugging support: call Thread.yield() at strategic points.
    +
    +
    ENV_IS_LOCKING - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the database environment for no locking.
    +
    +
    ENV_IS_TRANSACTIONAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the use of transactions.
    +
    +
    ENV_LATCH_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The timeout for detecting internal latch timeouts, so that deadlocks can + be detected.
    +
    +
    ENV_READ_ONLY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the database environment to be read-only, and any attempt to + modify a database will fail.
    +
    +
    ENV_RECOVERY_FORCE_CHECKPOINT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, a checkpoint is forced following recovery, even if the + log ends with a checkpoint.
    +
    +
    ENV_RECOVERY_FORCE_NEW_FILE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Used after performing a restore from backup to force creation of a new + log file prior to recovery.
    +
    +
    ENV_RUN_CHECKPOINTER - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, starts up the checkpointer thread.
    +
    +
    ENV_RUN_CLEANER - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, starts up the cleaner thread.
    +
    +
    ENV_RUN_EVICTOR - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, eviction is done by a pool of evictor threads, as well as being + done inline by application threads.
    +
    +
    ENV_RUN_IN_COMPRESSOR - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, starts up the INCompressor thread.
    +
    +
    ENV_RUN_OFFHEAP_EVICTOR - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, off-heap eviction is done by a pool of evictor threads, as well + as being done inline by application threads.
    +
    +
    ENV_RUN_VERIFIER - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether to run the background verifier.
    +
    +
    ENV_SETUP_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The maximum amount of time for the internal housekeeping, like + elections, syncup with the master, etc.
    +
    +
    ENV_TTL_CLOCK_TOLERANCE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The interval added to the system clock time for determining that a + record may have expired.
    +
    +
    ENV_UNKNOWN_STATE_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Permits opening of a ReplicatedEnvironment handle in the + ReplicatedEnvironment.State.UNKNOWN state, if a Master cannot be + determined within this timeout period.
    +
    +
    envHome - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    Environment - Class in com.sleepycat.je
    +
    +
    A database environment.
    +
    +
    Environment(File, EnvironmentConfig) - Constructor for class com.sleepycat.je.Environment
    +
    +
    Creates a database environment handle.
    +
    +
    EnvironmentConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of an environment.
    +
    +
    EnvironmentConfig() - Constructor for class com.sleepycat.je.EnvironmentConfig
    +
    +
    Creates an EnvironmentConfig initialized with the system default + settings.
    +
    +
    EnvironmentConfig(Properties) - Constructor for class com.sleepycat.je.EnvironmentConfig
    +
    +
    Creates an EnvironmentConfig which includes the properties specified in + the properties parameter.
    +
    +
    EnvironmentFailureException - Exception in com.sleepycat.je
    +
    +
    Indicates that a failure has occurred that could impact the Environment as a whole.
    +
    +
    EnvironmentLockedException - Exception in com.sleepycat.je
    +
    +
    Thrown by the Environment constructor when an environment cannot be + opened for write access because another process has the same environment + open for write access.
    +
    +
    EnvironmentMutableConfig - Class in com.sleepycat.je
    +
    +
    Specifies the environment attributes that may be changed after the + environment has been opened.
    +
    +
    EnvironmentMutableConfig() - Constructor for class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    EnvironmentNotFoundException - Exception in com.sleepycat.je
    +
    +
    Thrown by the Environment constructor when EnvironmentConfig + AllowCreate property is false (environment creation is not permitted), but + there are no log files in the environment directory.
    +
    +
    EnvironmentStats - Class in com.sleepycat.je
    +
    +
    Statistics for a single environment.
    +
    +
    EnvironmentWedgedException - Exception in com.sleepycat.je
    +
    +
    Thrown by the Environment.close() when the current process must be + shut down and restarted before re-opening the Environment.
    +
    +
    equals(Object) - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Compares this entry to a given entry as specified by Map.Entry.equals(java.lang.Object).
    +
    +
    equals(Object) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Compares the specified object with this collection for equality.
    +
    +
    equals(Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Compares the specified object with this map for equality.
    +
    +
    equals(Object) - Method in class com.sleepycat.je.CommitToken
    +
     
    +
    equals(Object) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Compares the data of two entries for byte-by-byte equality.
    +
    +
    equals(Object) - Method in class com.sleepycat.je.Durability
    +
     
    +
    equals(Object) - Method in class com.sleepycat.je.JEVersion
    +
     
    +
    equals(Object) - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
     
    +
    equals(Object) - Method in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
     
    +
    equals(Object) - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
     
    +
    equals(Object) - Method in interface com.sleepycat.persist.evolve.Conversion
    +
    +
    The standard equals method that must be implemented by + conversion class.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.evolve.Converter
    +
    +
    Returns true if the conversion objects are equal in this object and + given object, and if the Mutation.equals(java.lang.Object) superclass method + returns true.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.evolve.EntityConverter
    +
    +
    Returns true if the deleted and renamed keys are equal in this object + and given object, and if the Converter.equals(java.lang.Object) superclass method + returns true.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.evolve.Mutation
    +
    +
    Returns true if the class name, class version and field name are equal + in this object and given object.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns true if this collection has the same set of mutations as the + given collection and all mutations are equal.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.evolve.Renamer
    +
    +
    Returns true if the new class name is equal in this object and given + object, and if the Mutation.equals(java.lang.Object) method returns true.
    +
    +
    equals(Object) - Method in class com.sleepycat.persist.model.ClassMetadata
    +
     
    +
    equals(Object) - Method in class com.sleepycat.persist.model.EntityMetadata
    +
     
    +
    equals(Object) - Method in class com.sleepycat.persist.model.FieldMetadata
    +
     
    +
    equals(Object) - Method in class com.sleepycat.persist.model.PrimaryKeyMetadata
    +
     
    +
    equals(Object) - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
     
    +
    equals(Object) - Method in class com.sleepycat.persist.raw.RawObject
    +
     
    +
    evictMemory() - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously invokes the mechanism for keeping memory usage within the + cache size boundaries.
    +
    +
    EVICTOR_ALLOW_BIN_DELTAS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Allow Bottom Internal Nodes (BINs) to be written in a delta format + during eviction.
    +
    +
    EVICTOR_CORE_THREADS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The minimum number of threads in the eviction thread pool.
    +
    +
    EVICTOR_CRITICAL_PERCENTAGE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    At this percentage over the allotted cache, critical eviction will + start.
    +
    +
    EVICTOR_DEADLOCK_RETRY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of JE 4.1, since the single evictor thread has + been replaced be a more robust thread pool.
    +
    +
    +
    EVICTOR_EVICT_BYTES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    When eviction occurs, the evictor will push memory usage to this number + of bytes below EnvironmentConfig.MAX_MEMORY.
    +
    +
    EVICTOR_FORCED_YIELD - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Call Thread.yield() at each check for cache overflow.
    +
    +
    EVICTOR_KEEP_ALIVE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The duration that excess threads in the eviction thread pool will stay + idle; after this period, idle threads will terminate.
    +
    +
    EVICTOR_LRU_ONLY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of JE 6.0. This parameter is ignored by the new, + more efficient and more accurate evictor.
    +
    +
    +
    EVICTOR_MAX_THREADS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum number of threads in the eviction thread pool.
    +
    +
    EVICTOR_N_LRU_LISTS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of LRU lists in the main JE cache.
    +
    +
    EVICTOR_NODES_PER_SCAN - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of JE 6.0. This parameter is ignored by the new, more + efficient and more accurate evictor.
    +
    +
    +
    evolve(EvolveConfig) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Performs conversion of unevolved objects in order to reduce lazy + conversion overhead.
    +
    +
    EvolveConfig - Class in com.sleepycat.persist.evolve
    +
    +
    Configuration properties for eager conversion of unevolved objects.
    +
    +
    EvolveConfig() - Constructor for class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Creates an evolve configuration with default properties.
    +
    +
    EvolveConfigBeanInfo - Class in com.sleepycat.persist.evolve
    +
     
    +
    EvolveConfigBeanInfo() - Constructor for class com.sleepycat.persist.evolve.EvolveConfigBeanInfo
    +
     
    +
    EvolveEvent - Class in com.sleepycat.persist.evolve
    +
    +
    The event passed to the EvolveListener interface during eager entity + evolution.
    +
    +
    EvolveListener - Interface in com.sleepycat.persist.evolve
    +
    +
    The listener interface called during eager entity evolution.
    +
    +
    evolveProgress(EvolveEvent) - Method in interface com.sleepycat.persist.evolve.EvolveListener
    +
    +
    The listener method called during eager entity evolution.
    +
    +
    EvolveStats - Class in com.sleepycat.persist.evolve
    +
    +
    Statistics accumulated during eager entity evolution.
    +
    +
    EXCEEDED_TIME - Static variable in class com.sleepycat.je.PreloadStatus
    +
    +
    Database.preload + took more than maxMillisecs.
    +
    +
    ExceptionEvent - Class in com.sleepycat.je
    +
    +
    A class representing an exception event.
    +
    +
    ExceptionEvent(Exception, String) - Constructor for class com.sleepycat.je.ExceptionEvent
    +
     
    +
    ExceptionEvent(Exception) - Constructor for class com.sleepycat.je.ExceptionEvent
    +
     
    +
    ExceptionListener - Interface in com.sleepycat.je
    +
    +
    A callback to notify the application program when an exception occurs in a + JE Daemon thread.
    +
    +
    exceptionThrown(ExceptionEvent) - Method in interface com.sleepycat.je.ExceptionListener
    +
    +
    This method is called if an exception is seen in a JE Daemon thread.
    +
    +
    ExceptionUnwrapper - Class in com.sleepycat.util
    +
    +
    Unwraps nested exceptions by calling the ExceptionWrapper.getCause() method for exceptions that implement the + ExceptionWrapper interface.
    +
    +
    ExceptionUnwrapper() - Constructor for class com.sleepycat.util.ExceptionUnwrapper
    +
     
    +
    ExceptionWrapper - Interface in com.sleepycat.util
    +
    +
    Interface implemented by exceptions that can contain nested exceptions.
    +
    +
    execute(InsufficientLogException, NetworkRestoreConfig) - Method in class com.sleepycat.je.rep.NetworkRestore
    +
    +
    Restores the log files from one of the members of the replication group.
    +
    +
    execute(String[]) - Method in class com.sleepycat.je.util.DbFilterStats
    +
    +
    Performs the processing of the DbFilterStats command.
    +
    +
    execute() - Method in class com.sleepycat.persist.model.ClassEnhancerTask
    +
     
    +
    + + + +

    F

    +
    +
    FastInputStream - Class in com.sleepycat.util
    +
    +
    A replacement for ByteArrayInputStream that does not synchronize every + byte read.
    +
    +
    FastInputStream(byte[]) - Constructor for class com.sleepycat.util.FastInputStream
    +
    +
    Creates an input stream.
    +
    +
    FastInputStream(byte[], int, int) - Constructor for class com.sleepycat.util.FastInputStream
    +
    +
    Creates an input stream.
    +
    +
    FastOutputStream - Class in com.sleepycat.util
    +
    +
    A replacement for ByteArrayOutputStream that does not synchronize every + byte read.
    +
    +
    FastOutputStream() - Constructor for class com.sleepycat.util.FastOutputStream
    +
    +
    Creates an output stream with default sizes.
    +
    +
    FastOutputStream(int) - Constructor for class com.sleepycat.util.FastOutputStream
    +
    +
    Creates an output stream with a default bump size and a given initial + size.
    +
    +
    FastOutputStream(int, int) - Constructor for class com.sleepycat.util.FastOutputStream
    +
    +
    Creates an output stream with a given bump size and initial size.
    +
    +
    FastOutputStream(byte[]) - Constructor for class com.sleepycat.util.FastOutputStream
    +
    +
    Creates an output stream with a given initial buffer and a default + bump size.
    +
    +
    FastOutputStream(byte[], int) - Constructor for class com.sleepycat.util.FastOutputStream
    +
    +
    Creates an output stream with a given initial buffer and a given + bump size.
    +
    +
    FEEDER_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    A heartbeat is exchanged between the feeder and replica to ensure they + are alive.
    +
    +
    FieldMetadata - Class in com.sleepycat.persist.model
    +
    +
    The metadata for a key field.
    +
    +
    FieldMetadata(String, String, String) - Constructor for class com.sleepycat.persist.model.FieldMetadata
    +
    +
    Used by an EntityModel to construct field metadata.
    +
    +
    FILE_LOGGING_LEVEL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Trace messages equal and above this level will be logged to the je.info + file, which is in the Environment home directory.
    +
    +
    FileHandler - Class in com.sleepycat.je.util
    +
    +
    JE instances of java.util.logging.Logger are configured to use this + implementation of java.util.logging.FileHandler.
    +
    +
    FileHandler(String, int, int, Formatter, EnvironmentImpl) - Constructor for class com.sleepycat.je.util.FileHandler
    +
     
    +
    FILEHANDLER_LEVEL - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    FILLED_CACHE - Static variable in class com.sleepycat.je.PreloadStatus
    +
    +
    Database.preload + filled maxBytes of the cache.
    +
    +
    first() - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns the first (lowest) element currently in this sorted set.
    +
    +
    first() - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns the first (lowest) element currently in this sorted set.
    +
    +
    first() - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns the first (lowest) element currently in this sorted set.
    +
    +
    first() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the first value and returns it, or returns null if + the cursor range is empty.
    +
    +
    first(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the first value and returns it, or returns null if + the cursor range is empty.
    +
    +
    firstKey() - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns the first (lowest) key currently in this sorted map.
    +
    +
    FloatBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for an unsorted Float + primitive wrapper or an unsorted float primitive.
    +
    +
    FloatBinding() - Constructor for class com.sleepycat.bind.tuple.FloatBinding
    +
     
    +
    floatToEntry(float, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.FloatBinding
    +
    +
    Converts a simple float value into an entry buffer.
    +
    +
    floatToEntry(float, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedFloatBinding
    +
    +
    Converts a simple float value into an entry buffer.
    +
    +
    flushLog(boolean) - Method in class com.sleepycat.je.Environment
    +
    +
    Writes buffered data to the log, and optionally performs an fsync to + guarantee that data is written to the physical device.
    +
    +
    ForeignConstraintException - Exception in com.sleepycat.je
    +
    +
    Thrown when an attempt to write a primary database record would insert a + secondary record with a key that does not exist in a foreign key database, + when the secondary key is configured as a foreign key.
    +
    +
    ForeignKeyDeleteAction - Enum in com.sleepycat.je
    +
    +
    The action taken when a referenced record in the foreign key database is + deleted.
    +
    +
    ForeignKeyNullifier - Interface in com.sleepycat.je
    +
    +
    The interface implemented for setting single-valued foreign keys to null.
    +
    +
    ForeignMultiKeyNullifier - Interface in com.sleepycat.je
    +
    +
    The interface implemented for setting multi-valued foreign keys to null.
    +
    +
    forget(Xid) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    formatUsingPrintable - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    ForwardCursor - Interface in com.sleepycat.je
    +
    +
    The interface for forward-moving Cursor operations.
    +
    +
    ForwardCursor<V> - Interface in com.sleepycat.persist
    +
    +
    Cursor operations limited to traversing forward.
    +
    +
    FREE_DISK - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A lower limit on the number of bytes of free space to maintain on a + volume and per JE Environment.
    +
    +
    + + + +

    G

    +
    +
    get(Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns the value to which this map maps the specified key.
    +
    +
    get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to a record according to the specified Get + type.
    +
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.Database
    +
    +
    Retrieves a record according to the specified Get type.
    +
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Database
    +
    +
    Retrieves the key/data pair with the given key.
    +
    +
    get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.DiskOrderedCursor
    +
     
    +
    get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in interface com.sleepycat.je.ForwardCursor
    +
    +
    Moves the cursor to a record according to the specified Get + type.
    +
    +
    Get - Enum in com.sleepycat.je
    +
    +
    The operation type passed to "get" methods on databases and cursors.
    +
    +
    get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Returns the next primary key and data resulting from the join operation.
    +
    +
    get() - Method in class com.sleepycat.je.rep.util.AtomicLongMax
    +
    +
    Gets the current value.
    +
    +
    get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to a record according to the specified Get + type.
    +
    +
    get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to a record according to the specified Get + type.
    +
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Moves the cursor to a record according to the specified Get + type.
    +
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, ReadOptions) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Retrieves a record according to the specified Get type.
    +
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryDatabase
    +
     
    +
    get(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Retrieves the key/data pair with the given key.
    +
    +
    get(Transaction, int) - Method in class com.sleepycat.je.Sequence
    +
    +
    Returns the next available element in the sequence and changes the + sequence value by delta.
    +
    +
    get(Get, ReadOptions) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor according to the specified Get type and returns + the value at the updated position.
    +
    +
    get(K) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Gets an entity via a key of this index.
    +
    +
    get(Transaction, K, LockMode) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Gets an entity via a key of this index.
    +
    +
    get(Transaction, K, Get, ReadOptions) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Gets an entity via a key of this index, using Get type and ReadOptions + parameters, and returning an EntityResult.
    +
    +
    get(PK) - Method in class com.sleepycat.persist.PrimaryIndex
    +
     
    +
    get(Transaction, PK, LockMode) - Method in class com.sleepycat.persist.PrimaryIndex
    +
     
    +
    get(Transaction, PK, Get, ReadOptions) - Method in class com.sleepycat.persist.PrimaryIndex
    +
     
    +
    get(SK) - Method in class com.sleepycat.persist.SecondaryIndex
    +
     
    +
    get(Transaction, SK, LockMode) - Method in class com.sleepycat.persist.SecondaryIndex
    +
     
    +
    get(Transaction, SK, Get, ReadOptions) - Method in class com.sleepycat.persist.SecondaryIndex
    +
     
    +
    getAcks() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The number of transactions that has been + acknowledged.
    +
    +
    getAckWaitMs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total time in milliseconds that the master spent waiting for the + Durability.ReplicaAckPolicy to be satisfied during successful transaction + commits.
    +
    +
    getActiveFeeders() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the number of current active Feeders running on this node.
    +
    +
    getActiveLogSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Bytes used by all active data files: files required for basic JE operation."
    +
    +
    getActiveTxns() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    Return the array of active transactions.
    +
    +
    getAdminBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes of JE main cache used for cleaner and checkpointer metadata, in bytes."
    +
    +
    getAggressive() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns true if the Environment.verify and Database.verify are configured to perform fine granularity consistency + checking that includes verifying in memory constructs.
    +
    +
    getAllowCreate() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the Environment.openDatabase method is configured to create the database + if it does not already exist.
    +
    +
    getAllowCreate() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns a flag that specifies if we may create this environment.
    +
    +
    getAllowCreate() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns true if the Database.openSequence method is configured to create the sequence if it + does not already exist.
    +
    +
    getAllowCreate() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns whether creation of a new store is allowed.
    +
    +
    getAllowNestedTransactions() - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Returns whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread.
    +
    +
    getAllowPopulate() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns whether automatic population of the secondary is allowed.
    +
    +
    getAllRawTypes() - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns all versions of all known types.
    +
    +
    getAllRawTypeVersions(String) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns all known versions of type information for a given class name, + or null if no persistent version of the class is known.
    +
    +
    getAppState() - Method in interface com.sleepycat.je.rep.AppStateMonitor
    +
    +
    Return a byte array which holds information about the application's + state.
    +
    +
    getAppState() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the application state which is obtained via + AppStateMonitor.getAppState().
    +
    +
    getArbiterHome() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Gets the Arbiter home directory.
    +
    +
    getArbiterMutableConfig() - Method in class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    Returns the Arbiter mutable attributes.
    +
    +
    getArbiterNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the subset of nodes in the group that participates in elections + but does not have a copy of the data and cannot become a master.
    +
    +
    getAttribute(String) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    getAttribute(Environment, String) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Get an attribute value for the given environment.
    +
    +
    getAttribute(String) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    getAttributeList() - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    getAttributeList() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Get attribute metadata for this MBean.
    +
    +
    getAttributeList(Environment) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Get MBean attribute metadata for this environment.
    +
    +
    getAttributeList() - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    getAttributes(String[]) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    getAttributes(String[]) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    getAutoCommitNoSync() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns true if the auto-commit operations on the sequence are configure + to not flush the transaction log..
    +
    +
    getAvailableLogSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Bytes available for write operations when unprotected reserved files are deleted: free space + reservedLogSize - protectedLogSize."
    +
    +
    getAvailableReplicas() - Method in exception com.sleepycat.je.rep.InsufficientReplicasException
    +
    +
    Returns the set of Replicas that were in contact with the master at the + time of the commit operation.
    +
    +
    getAvgBatchCacheMode() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getAvgBatchCritical() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getAvgBatchDaemon() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getAvgBatchEvictorThread() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getAvgBatchManual() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getBaseClass() - Method in class com.sleepycat.bind.serial.SerialBinding
    +
    +
    Returns the base class for this binding.
    +
    +
    getBatchDelay(TimeUnit) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the batch delay.
    +
    +
    getBatchSize() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the batchSize value.
    +
    +
    getBdescriptor(Class) - Static method in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    getBeanDescriptor() - Method in class com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo
    +
     
    +
    getBeanDescriptor() - Method in class com.sleepycat.persist.evolve.EvolveConfigBeanInfo
    +
     
    +
    getBeanDescriptor() - Method in class com.sleepycat.persist.StoreConfigBeanInfo
    +
     
    +
    getBeanDescriptor(Class) - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    Gets the bean's BeanDescriptors.
    +
    +
    getBigDecimalByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of an unsorted BigDecimal.
    +
    +
    getBigDecimalMaxByteLength(BigDecimal) - Static method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Returns the maximum byte length that would be output for a given BigDecimal value if TupleOutput.writeBigDecimal(java.math.BigDecimal) were called.
    +
    +
    getBigIntegerByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a BigInteger.
    +
    +
    getBigIntegerByteLength(BigInteger) - Static method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Returns the exact byte length that would would be output for a given + BigInteger value if TupleOutput.writeBigInteger(java.math.BigInteger) were + called.
    +
    +
    getBINEntriesHistogram() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns an array representing a histogram of the number of Bottom + Internal Nodes with various percentages of non-deleted entry counts.
    +
    +
    getBINsByLevel() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the count of Bottom Internal Nodes per level, indexed by level.
    +
    +
    getBINsOnly() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns true if the DiskOrderedCursor is configured to scan BINs only, + returning all record keys and only those record data that are embedded + in the BINs.
    +
    +
    getBottomInternalNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the number of Bottom Internal Nodes in the database tree.
    +
    +
    getBtreeComparator() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the Comparator used for key comparison on this database.
    +
    +
    getBtreeComparatorByClassName() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the btree comparator is set by class name, not by + serializable Comparator object
    +
    +
    getBufferBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The total memory currently consumed by log buffers, in bytes.
    +
    +
    getBufferBytes() - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Returns the underlying data being read.
    +
    +
    getBufferBytes() - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Returns the buffer owned by this object.
    +
    +
    getBufferLength() - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Returns the end of the buffer being read.
    +
    +
    getBufferLength() - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Returns the length used in the internal buffer, i.e., the offset at + which data will be written next.
    +
    +
    getBufferOffset() - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Returns the offset at which data is being read from the buffer.
    +
    +
    getBufferOffset() - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Returns the offset of the internal buffer.
    +
    +
    getByteLength(char[]) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
    +
    +
    getByteLength(char[], int, int) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Returns the byte length of the UTF string that would be created by + converting the given characters to UTF.
    +
    +
    getCacheDataBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    Please use EnvironmentStats.getDataBytes() to get the amount of cache + used for data and use EnvironmentStats.getAdminBytes(), EnvironmentStats.getLockBytes() and + EnvironmentStats.getBufferBytes() to get other components of the total cache usage + (EnvironmentStats.getCacheTotalBytes()).
    +
    +
    +
    getCacheMode() - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns the default CacheMode used for subsequent operations + performed using this cursor.
    +
    +
    getCacheMode() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the default CacheMode used for operations performed on + this database, or null if the environment default is used.
    +
    +
    getCacheMode() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Returns the default CacheMode used for operations performed in + this environment, or null if CacheMode.DEFAULT is used.
    +
    +
    getCacheMode() - Method in class com.sleepycat.je.ReadOptions
    +
    +
    Returns the CacheMode to be used for the operation, or null + if the Cursor, Database or Environment default will be used.
    +
    +
    getCacheMode() - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Returns the CacheMode to be used for the operation, or null + if the Cursor, Database or Environment default will be used.
    +
    +
    getCacheMode() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns the default CacheMode used for subsequent operations + performed using this cursor.
    +
    +
    getCachePercent() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for getting EnvironmentConfig.MAX_MEMORY_PERCENT.
    +
    +
    getCacheSize() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Returns the memory available to the database system, in bytes.
    +
    +
    getCacheSize() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns the number of elements cached by a sequence handle..
    +
    +
    getCacheSize() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the number of values that will be cached in this handle.
    +
    +
    getCacheTotalBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Total amount of JE main cache in use, in bytes."
    +
    +
    getCatalog() - Method in class com.sleepycat.collections.TupleSerialFactory
    +
    +
    Returns the class catalog associated with this factory.
    +
    +
    getCause() - Method in interface com.sleepycat.util.ExceptionWrapper
    +
    +
    Returns the nested exception or null if none is present.
    +
    +
    getCause() - Method in exception com.sleepycat.util.IOExceptionWrapper
    +
     
    +
    getChangeType() - Method in class com.sleepycat.je.rep.monitor.GroupChangeEvent
    +
    +
    Returns the type of the change (the addition of a new member or the + removal of an existing member) made to the group.
    +
    +
    getCharLength(byte[]) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Returns the number of characters represented by the given UTF string.
    +
    +
    getCharLength(byte[], int, int) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Returns the number of characters represented by the given UTF string.
    +
    +
    getClassesToEvolve() - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Returns an unmodifiable set of the entity classes to be evolved.
    +
    +
    getClassFormat(byte[]) - Method in interface com.sleepycat.bind.serial.ClassCatalog
    +
    +
    Return the ObjectStreamClass for the given class ID.
    +
    +
    getClassFormat(byte[]) - Method in class com.sleepycat.bind.serial.StoredClassCatalog
    +
     
    +
    getClassID(ObjectStreamClass) - Method in interface com.sleepycat.bind.serial.ClassCatalog
    +
    +
    Return the class ID for the current version of the given class + description.
    +
    +
    getClassID(ObjectStreamClass) - Method in class com.sleepycat.bind.serial.StoredClassCatalog
    +
     
    +
    getClassLoader() - Method in interface com.sleepycat.bind.serial.ClassCatalog
    +
    +
    Returns the ClassLoader to be used by bindings that use this catalog, or + null if a default class loader should be used.
    +
    +
    getClassLoader() - Method in class com.sleepycat.bind.serial.SerialBinding
    +
    +
    Returns the class loader to be used during deserialization, or null if a + default class loader should be used.
    +
    +
    getClassLoader() - Method in class com.sleepycat.bind.serial.StoredClassCatalog
    +
    +
    For BDB JE, returns the ClassLoader property of the catalog database + environment.
    +
    +
    getClassLoader() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns the ClassLoader for loading user-supplied classes by name, or + null if no specified ClassLoader is configured.
    +
    +
    getClassMetadata(String) - Method in class com.sleepycat.persist.model.AnnotationModel
    +
     
    +
    getClassMetadata(String) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the metadata for a given persistent class name, including proxy + classes and entity classes.
    +
    +
    getClassMetadata() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the original model class metadata used to create this class, or + null if this is not a model class.
    +
    +
    getClassName() - Method in class com.sleepycat.persist.evolve.Mutation
    +
    +
    Returns the class to which this mutation applies.
    +
    +
    getClassName() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns the name of the persistent class.
    +
    +
    getClassName() - Method in class com.sleepycat.persist.model.EntityMetadata
    +
    +
    Returns the name of the entity class.
    +
    +
    getClassName() - Method in class com.sleepycat.persist.model.FieldMetadata
    +
    +
    Returns the class name of the field type.
    +
    +
    getClassName() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the class name for this type in the format specified by Class.getName().
    +
    +
    getClassVersion() - Method in class com.sleepycat.persist.evolve.Mutation
    +
    +
    Returns the class version to which this mutation applies.
    +
    +
    getCleanerBacklog() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in 7.0, always returns zero. Use EnvironmentStats.getCurrentMinUtilization() and EnvironmentStats.getCurrentMaxUtilization() to + monitor cleaner behavior.
    +
    +
    +
    getClear() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns true if the statistics operation is configured to reset + statistics after they are returned.
    +
    +
    getCollection() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the collection associated with this iterator.
    +
    +
    getCommitPolicy() - Method in exception com.sleepycat.je.rep.InsufficientReplicasException
    +
    +
    Returns the Replica ack policy that was in effect for the transaction.
    +
    +
    getCommitToken() - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
    +
    Return the CommitToken used to create this consistency + policy.
    +
    +
    getCommitToken() - Method in class com.sleepycat.je.Transaction
    +
    +
    This method is intended for use with a replicated environment.
    +
    +
    getComponentType() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the array component type, or null if this is not an array type.
    +
    +
    getCompositeKeyFields() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns an unmodifiable list of metadata for the fields making up a + composite key, or null if this is a not a composite key class.
    +
    +
    getConfig() - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns this cursor's configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.je.Database
    +
    +
    Returns this Database object's configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.je.DiskOrderedCursor
    +
    +
    Returns this cursor's configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns this object's configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Returns this object's configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Returns a copy of the secondary configuration of this database.
    +
    +
    getConfig() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns a copy of the entity store configuration.
    +
    +
    getConfig() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Returns a copy of the entity store configuration.
    +
    +
    getConfigParam(String) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Returns the value for this configuration parameter.
    +
    +
    getConfigParam(String) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Return the value for this parameter.
    +
    +
    getConnection(String, EnvironmentConfig) - Method in interface com.sleepycat.je.jca.ra.JEConnectionFactory
    +
     
    +
    getConnection(String, EnvironmentConfig, TransactionConfig) - Method in interface com.sleepycat.je.jca.ra.JEConnectionFactory
    +
     
    +
    getConsistencyPolicy() - Method in exception com.sleepycat.je.rep.ReplicaConsistencyException
    +
    +
    Returns the Replica consistency policy that could not be satisfied.
    +
    +
    getConsistencyPolicy() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the default consistency policy associated with the + configuration.
    +
    +
    getConsistencyPolicy() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns the consistency policy associated with the configuration.
    +
    +
    getConsoleLoggingLevel() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Gets the console logging level.
    +
    +
    getConstructors() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Get constructor metadata for this MBean.
    +
    +
    getConversion() - Method in class com.sleepycat.persist.evolve.Converter
    +
    +
    Returns the converter instance specified to the constructor.
    +
    +
    getConverter(String, int, String) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns the converter mutation for the given class, version and field, + or null if none exists.
    +
    +
    getConverters() - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns an unmodifiable collection of all converter mutations.
    +
    +
    getCorrectedAvgLNSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in JE 5.0.56, use EnvironmentStats.getCorrectedAvgLNSize() instead.
    +
    +
    +
    getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns the key/data pair to which the cursor refers.
    +
    +
    getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.DiskOrderedCursor
    +
     
    +
    getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - Method in interface com.sleepycat.je.ForwardCursor
    +
    +
    Returns the key/data pair to which the cursor refers.
    +
    +
    getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.JoinCursor
    +
    +
    This operation is not allowed on a join cursor.
    +
    +
    getCurrent(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Returns the key/data pair to which the cursor refers.
    +
    +
    getCurrent(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Returns the key/data pair to which the cursor refers.
    +
    +
    getCurrent() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the current value of the sequence in the database.
    +
    +
    getCurrentMaxUtilization() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "The current maximum (upper bound) log utilization as a percentage."
    +
    +
    getCurrentMinUtilization() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "The current minimum (lower bound) log utilization as a percentage."
    +
    +
    getCurrentTxnEndVLSN() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the latest transaction end VLSN on this replication node.
    +
    +
    getCursorConfig() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns the cursor configuration that is used for all operations + performed via this container.
    +
    +
    getCursorsBins() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of BINs encountered by the INCompressor that had cursors + referring to them when the compressor ran.
    +
    +
    getCustomStats() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Gets the custom statstics object.
    +
    +
    getData() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns the byte array.
    +
    +
    getDataAdminBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Amount of JE main cache used for holding per-database cleaner utilization metadata, in bytes."
    +
    +
    getDatabase() - Method in class com.sleepycat.je.Cursor
    +
    +
    Returns the Database handle associated with this Cursor.
    +
    +
    getDatabase() - Method in class com.sleepycat.je.DiskOrderedCursor
    +
    +
    Returns the Database handle for the database that contains the + latest record returned by getNext().
    +
    +
    getDatabase() - Method in interface com.sleepycat.je.ForwardCursor
    +
    +
    Returns the Database handle associated with this ForwardCursor.
    +
    +
    getDatabase() - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Returns the primary database handle associated with this cursor.
    +
    +
    getDatabase() - Method in exception com.sleepycat.je.rep.DatabasePreemptedException
    +
    +
    Returns the database handle that was forcibly closed.
    +
    +
    getDatabase() - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Returns the Database handle associated with this Cursor.
    +
    +
    getDatabase() - Method in class com.sleepycat.je.Sequence
    +
    +
    Returns the Database handle associated with this sequence.
    +
    +
    getDatabase() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Returns the underlying database for this index.
    +
    +
    getDatabase() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns the underlying secondary database for this index.
    +
    +
    getDatabaseName() - Method in class com.sleepycat.je.Database
    +
    +
    Returns the database name.
    +
    +
    getDatabaseName() - Method in exception com.sleepycat.je.rep.DatabasePreemptedException
    +
    +
    Returns the name of the database that was forcibly closed.
    +
    +
    getDatabaseNames() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns a List of database names for the database environment.
    +
    +
    getDataBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Amount of JE main cache used for holding data, keys and internal Btree nodes, in bytes."
    +
    +
    getDataNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the subset of nodes in the group that store replication data.
    +
    +
    getDbClosedBins() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of BINs encountered by the INCompressor that had their + database closed between the time they were put on the compressor queue + and when the compressor ran.
    +
    +
    getDBINsByLevel() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns an empty array.
    +
    +
    +
    getDeclaringClassName() - Method in class com.sleepycat.persist.model.FieldMetadata
    +
    +
    Returns the name of the class where the field is declared.
    +
    +
    getDecrement() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns true if the sequence is configured to decrement.
    +
    +
    getDefaultEventIndex() - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    A bean may have a "default" event that is the event that will + mostly commonly be used by human's when using the bean.
    +
    +
    getDefaultPropertyIndex() - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    A bean may have a "default" property that is the property that will + mostly commonly be initially chosen for update by human's who are + customizing the bean.
    +
    +
    getDeferredWrite() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the deferred-write option.
    +
    +
    getDeferredWrite() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the deferred-write configuration property.
    +
    +
    getDeleteAction() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Returns the action to take when a related entity is deleted having a + primary key value that exists as a secondary key value for this entity.
    +
    +
    getDeletedKeys() - Method in class com.sleepycat.persist.evolve.EntityConverter
    +
    +
    Returns the set of key names that are to be deleted.
    +
    +
    getDeletedLeafNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the number of deleted data records in the database tree that + are pending removal by the compressor.
    +
    +
    getDeleter(String, int, String) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns the deleter mutation for the given class, version and field, or + null if none exists.
    +
    +
    getDeleters() - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns an unmodifiable collection of all deleter mutations.
    +
    +
    getDesignatedPrimary() - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Determines whether this node is the currently designated Primary.
    +
    +
    getDetail() - Method in interface com.sleepycat.util.ExceptionWrapper
    +
    +
    Deprecated. + +
    +
    +
    getDetail() - Method in exception com.sleepycat.util.IOExceptionWrapper
    +
    +
    Deprecated. + +
    +
    +
    getDetail() - Method in exception com.sleepycat.util.RuntimeExceptionWrapper
    +
    +
    Deprecated. +
    replaced by Throwable.getCause().
    +
    +
    +
    getDimensions() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the number of array dimensions, or zero if this is not an array + type.
    +
    +
    getDINsByLevel() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns an empty array.
    +
    +
    +
    getDirtyLRUSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of INs in the dirty/priority-2 LRU "
    +
    +
    getDOSBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Amount of JE main cache consumed by disk-ordered cursor and Database.count operations, in bytes."
    +
    +
    getDTVLSN() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The highest commit DTVLSN that has been + acknowledged.
    +
    +
    getDupCountLeafNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns zero.
    +
    +
    +
    getDuplicateBottomInternalNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns zero.
    +
    +
    +
    getDuplicateComparator() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the Comparator used for duplicate record comparison on this + database.
    +
    +
    getDuplicateComparatorByClassName() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the duplicate comparator is set by class name, not by + serializable Comparator object.
    +
    +
    getDuplicateInternalNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns zero.
    +
    +
    +
    getDuplicateTreeMaxDepth() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Deprecated. +
    as of 5.0, returns zero.
    +
    +
    +
    getDurability() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Convenience method for setting EnvironmentConfig.TXN_DURABILITY.
    +
    +
    getDurability() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns the durability associated with the configuration.
    +
    +
    getEarliestTransactionCommitTime() - Method in exception com.sleepycat.je.rep.RollbackException
    +
    +
    Return the time in milliseconds of the earliest transaction commit that + has been rolled back.
    +
    +
    getEarliestTransactionCommitTime() - Method in exception com.sleepycat.je.rep.RollbackProhibitedException
    +
    +
    Return the time in milliseconds of the earliest transaction commit that + will be rolled back if the log is truncated to the location specified by + RollbackProhibitedException.getTruncationFileNumber() and RollbackProhibitedException.getTruncationFileOffset()
    +
    +
    getEarliestTransactionId() - Method in exception com.sleepycat.je.rep.RollbackException
    +
    +
    Return the id of the earliest transaction commit that has been + rolled back.
    +
    +
    getEarliestTransactionId() - Method in exception com.sleepycat.je.rep.RollbackProhibitedException
    +
    +
    Return the id of the earliest transaction commit that will be + rolled back if the log is truncated to the location specified by + RollbackProhibitedException.getTruncationFileNumber() and RollbackProhibitedException.getTruncationFileOffset()
    +
    +
    getElectableGroupSizeOverride() - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Returns the value associated with the override.
    +
    +
    getElectableNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the subset of nodes in the group with replicated environments + that participate in elections and can become masters, ignoring node + priority.
    +
    +
    getElementClassName() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Returns the class name of the array or collection element for a ONE_TO_MANY or MANY_TO_MANY relationship, or null for a + Relationship#ONE_TO_ONE ONE_TO_ONE} or MANY_TO_ONE relationship.
    +
    +
    getElements() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns the array of elements for an array type, or null for a complex + type or an enum type.
    +
    +
    getEndOfLog() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The location of the next entry to be written to the log.
    +
    +
    getEntityBinding(Class<V>) - Method in class com.sleepycat.collections.TupleSerialFactory
    +
     
    +
    getEntityBinding() - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Returns the entity binding for this index.
    +
    +
    getEntityClass() - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Returns the entity class for this index.
    +
    +
    getEntityClassName() - Method in class com.sleepycat.persist.evolve.EvolveEvent
    +
    +
    The class name of the current entity class being converted.
    +
    +
    getEntityMetadata(String) - Method in class com.sleepycat.persist.model.AnnotationModel
    +
     
    +
    getEntityMetadata(String) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the metadata for a given entity class name.
    +
    +
    getEntityMetadata() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the original model entity metadata used to create this class, or + null if this is not an entity class.
    +
    +
    getEnum() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns the enum constant String for an enum type, or null for a complex + type or an array type.
    +
    +
    getEnumConstants() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns an unmodifiable list of the names of the enum instances, or null + if this is not an enum type.
    +
    +
    getEnvironment() - Method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Returns the underlying Berkeley DB environment.
    +
    +
    getEnvironment() - Method in class com.sleepycat.je.Database
    +
    +
    Returns the Environment handle for + the database environment underlying the Database.
    +
    +
    getEnvironment() - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    getEnvironment() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the environment associated with this store.
    +
    +
    getEnvironment() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Returns the environment associated with this store.
    +
    +
    getEnvironmentCreationTime() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The time the Environment was created.
    +
    +
    getEnvironmentHome() - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Return the target environment directory.
    +
    +
    getEnvironmentOpenConfig() - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    If the helper was instantiated with canConfigure==true, it shows + environment configuration attributes.
    +
    +
    getEstimatedAvgLNSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in JE 5.0.56, use EnvironmentStats.getCorrectedAvgLNSize() instead.
    +
    +
    +
    getEvent() - Method in exception com.sleepycat.je.rep.StateChangeException
    +
    +
    Returns the event that resulted in this exception.
    +
    +
    getEventSetDescriptors() - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    Gets the bean's EventSetDescriptors.
    +
    +
    getEventTime() - Method in class com.sleepycat.je.rep.StateChangeEvent
    +
    +
    Returns the time (in nano second units) the event occurred, as reported + by System.nanoTime()
    +
    +
    getEvolveListener() - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Returns the progress listener that is notified each time an entity is + read.
    +
    +
    getException() - Method in class com.sleepycat.je.ExceptionEvent
    +
    +
    Returns the exception in the event.
    +
    +
    getExceptionListener() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Returns the exception listener, if set.
    +
    +
    getExclusiveCreate() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the Environment.openDatabase method is configured to fail if the database + already exists.
    +
    +
    getExclusiveCreate() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns true if the Database.openSequence method is configured to fail if the database + already exists.
    +
    +
    getExclusiveCreate() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns whether opening an existing store is prohibited.
    +
    +
    getExpirationTime() - Method in class com.sleepycat.je.OperationResult
    +
    +
    Returns the expiration time of the record, in milliseconds, or zero + if the record has no TTL and does not expire.
    +
    +
    getExpirationTime() - Method in exception com.sleepycat.je.SecondaryReferenceException
    +
    +
    Returns the expiration time of the record being accessed during the + failure.
    +
    +
    getFast() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns true if the statistics operation is configured to return only + the values which do not require expensive actions.
    +
    +
    getFieldName() - Method in class com.sleepycat.persist.evolve.Mutation
    +
    +
    Returns the field name to which this mutation applies, or null if this + mutation applies to the class itself.
    +
    +
    getFieldNames() - Method in interface com.sleepycat.je.CustomStats
    +
    +
    The field names that are output to the je.stats.csv file.
    +
    +
    getFields() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns a map of field name to raw field for each non-static + non-transient field declared in this class, or null if this is not a + complex type (in other words, this is a simple type or an array type).
    +
    +
    getFieldValues() - Method in interface com.sleepycat.je.CustomStats
    +
    +
    The field values that are output to the je.stats.csv file.
    +
    +
    getFileDeletionBacklog() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in 7.5, always returns zero. Use EnvironmentStats.getProtectedLogSize() EnvironmentStats.getProtectedLogSizeMap() to monitor + file protection.
    +
    +
    +
    getFileLoggingLevel() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Gets the file logging level.
    +
    +
    getFirst(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the first key/data pair of the database, and returns + that pair.
    +
    +
    getFirst(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the first key/data pair of the database, and returns + that pair.
    +
    +
    getFirst(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the first key/data pair of the database, and return + that pair.
    +
    +
    getForce() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns the configuration of the checkpoint force option.
    +
    +
    getForeignKeyDatabase() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the database used to check the foreign key integrity constraint, + or null if no foreign key constraint will be checked.
    +
    +
    getForeignKeyDeleteAction() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the action taken when a referenced record in the foreign key + database is deleted.
    +
    +
    getForeignKeyNullifier() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the user-supplied object used for setting single-valued foreign + keys to null.
    +
    +
    getForeignMultiKeyNullifier() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the user-supplied object used for setting multi-valued foreign + keys to null.
    +
    +
    getFSyncMaxTime() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The maximum number of milliseconds used to perform a single fsync.
    +
    +
    getFSyncs() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The number of file fsyncs.
    +
    +
    getFSyncTime() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The total number of milliseconds used to perform fsyncs.
    +
    +
    getGroup() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Returns the current composition of the group.
    +
    +
    getGroup() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Returns a description of the replication group as known by this node.
    +
    +
    getGroup() - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Returns the current composition of the group from the Master.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Gets the name associated with the replication group.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Returns the name of the group associated with the Monitor.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Gets the name associated with the replication group.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the name of the group which the node joins.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Gets the name associated with the replication group.
    +
    +
    getGroupName() - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Returns the name of the replication group.
    +
    +
    getHeartbeatInterval() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Gets the heartbeat interval in milliseconds.
    +
    +
    getHelperHosts() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Returns the string identifying one or more helper host and port pairs in + this format:
    +
    +
    getHelperHosts() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the string identifying one or more helper host and port pairs in + this format:
    +
    +
    getHelperHosts() - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Returns the string identifying one or more helper host and port pairs in + this format:
    +
    +
    getHelperSockets() - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Returns the helper sockets being used to contact a replication group + member, in order to query for the information.
    +
    +
    getHome() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns the database environment's home directory.
    +
    +
    getHostName() - Method in interface com.sleepycat.je.rep.ReplicationNode
    +
    +
    Returns the host name associated with the node.
    +
    +
    getIcon(int) - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    This method returns an image object that can be used to + represent the bean in toolboxes, toolbars, etc.
    +
    +
    getId() - Method in class com.sleepycat.je.Transaction
    +
    +
    Return the transaction's unique ID.
    +
    +
    getId() - Method in class com.sleepycat.je.TransactionStats.Active
    +
    +
    The transaction ID of the transaction.
    +
    +
    getId() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the internal unique ID for this type.
    +
    +
    getImmutableSecondaryKey() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns whether the secondary key is immutable.
    +
    +
    getInCompQueueSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of entries in the INCompressor queue when the getStats() + call was made.
    +
    +
    getInitialValue() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns the initial value for a sequence..
    +
    +
    getINsByLevel() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the count of Internal Nodes per level, indexed by level.
    +
    +
    getInstance(Environment) - Static method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Gets the CurrentTransaction accessor for a specified Berkeley DB + environment.
    +
    +
    getInternalMemoryLimit() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns the maximum amount of JE Cache Memory that the + DiskOrderedScan can use at one time.
    +
    +
    getInternalMemoryLimit() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Returns the maximum amount of non JE Cache Memory that preload can use at + one time.
    +
    +
    getInternalNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the number of Internal Nodes in the database tree.
    +
    +
    getInvalidatingException() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns the exception that caused the environment to be invalidated, or + null if the environment was not invalidated by an exception.
    +
    +
    getIteratorBlockSize() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns the number of records read at one time by iterators returned by + the StoredCollection.iterator() method.
    +
    +
    getJEVersion() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the current JEVersion that this node runs on.
    +
    +
    getJoinTime() - Method in class com.sleepycat.je.rep.monitor.JoinGroupEvent
    +
    +
    Returns the time at which the node joined the group.
    +
    +
    getJoinTime() - Method in class com.sleepycat.je.rep.monitor.LeaveGroupEvent
    +
     
    +
    getJoinTime() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the time when this node joins the replication group.
    +
    +
    getKBytes() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns the checkpoint log data threshold, in kilobytes.
    +
    +
    getKey() - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Returns the key of this entry.
    +
    +
    getKey() - Method in class com.sleepycat.je.Sequence
    +
    +
    Returns the DatabaseEntry used to open this sequence.
    +
    +
    getKeyBinding() - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Returns the primary key binding for this index.
    +
    +
    getKeyBinding() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns the secondary key binding for the index.
    +
    +
    getKeyClass() - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Returns the primary key class for this index.
    +
    +
    getKeyClass() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns the secondary key class for this index.
    +
    +
    getKeyCreator(Class<V>, String) - Method in class com.sleepycat.collections.TupleSerialFactory
    +
    +
    Creates a SecondaryKeyCreator object for use in configuring + a SecondaryDatabase.
    +
    +
    getKeyCreator() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the user-supplied object used for creating single-valued + secondary keys.
    +
    +
    getKeyName() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Returns the key name, which may be different from the field name.
    +
    +
    getKeyPrefixing() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the key prefixing configuration.
    +
    +
    getKeysDatabase() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns the underlying secondary database that is not associated with + the primary database and is used for the SecondaryIndex.keysIndex.
    +
    +
    getKeysOnly() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns true if the DiskOrderedCursor is configured to return only + keys.
    +
    +
    getKnownClasses() - Method in class com.sleepycat.persist.model.AnnotationModel
    +
     
    +
    getKnownClasses() - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the names of all known persistent classes.
    +
    +
    getKnownMasterTxnEndVLSN() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the transaction end VLSN on the master known by this node.
    +
    +
    getKnownSpecialClasses() - Method in class com.sleepycat.persist.model.AnnotationModel
    +
     
    +
    getKnownSpecialClasses() - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the names of all known persistent enum and array classes that + may be used to store persistent data.
    +
    +
    getLast(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the last key/data pair of the database, and returns + that pair.
    +
    +
    getLast(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the last key/data pair of the database, and returns + that pair.
    +
    +
    getLast(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the last key/data pair of the database, and return + that pair.
    +
    +
    getLastCheckpointEnd() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The location in the log of the last checkpoint end.
    +
    +
    getLastCheckpointId() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The Id of the last checkpoint.
    +
    +
    getLastCheckpointInterval() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Byte length from last checkpoint start to the previous checkpoint start.
    +
    +
    getLastCheckpointStart() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The location in the log of the last checkpoint start.
    +
    +
    getLastCommitTimestamp() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The commit timestamp of the last committed transaction on the master, or + 0 if not known or this node is not the master.
    +
    +
    getLastCommitVLSN() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The VLSN of the last committed transaction on the master, or 0 if not + known or this node is not the master.
    +
    +
    getLastFileInBackupSet() - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Can only be called in backup mode, after startBackup() has been called.
    +
    +
    getLastKnownUtilization() - Method in class com.sleepycat.je.EnvironmentStats
    +
    + +
    +
    getLastValue() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the last cached value of the sequence.
    +
    +
    getLeafNodeCount() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the number of leaf nodes in the database tree, which can equal + the number of records.
    +
    +
    getLeaveReason() - Method in class com.sleepycat.je.rep.monitor.LeaveGroupEvent
    +
    +
    Returns the reason why the node left the group.
    +
    +
    getLeaveTime() - Method in class com.sleepycat.je.rep.monitor.LeaveGroupEvent
    +
    +
    Returns the time at which the node left the group.
    +
    +
    getLNSizeCorrectionFactor() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in JE 6.3. Adjustments are no longer needed because LN log + sizes have been stored in the Btree since JE 6.0.
    +
    +
    +
    getLoadLNs() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Return the configuration of the preload load LNs option.
    +
    +
    getLocalSync() - Method in class com.sleepycat.je.Durability
    +
    +
    Returns the transaction synchronization policy to be used locally when + committing a transaction.
    +
    +
    getLocalWrite() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns whether local-write is configured for this transaction.
    +
    +
    getLockBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes of JE cache used for holding locks and transactions, in bytes."
    +
    +
    getLocking() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns true if the database environment is configured for locking.
    +
    +
    getLockMode() - Method in class com.sleepycat.je.ReadOptions
    +
    +
    Returns the LockMode to be used for the operation.
    +
    +
    getLockStats(StatsConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Deprecated. +
    as of 4.0.10, replaced by Environment.getStats(StatsConfig).

    +
    +
    +
    getLockTimeout(TimeUnit) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns the lock timeout setting.
    +
    +
    getLockTimeout() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. + +
    +
    +
    getLockTimeout(TimeUnit) - Method in class com.sleepycat.je.Transaction
    +
    +
    Returns the lock request timeout value for the transaction.
    +
    +
    getLogFilesInBackupSet() - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Get the minimum list of files that must be copied for this backup.
    +
    +
    getLogFilesInBackupSet(long) - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Deprecated. +
    replaced by DbBackup.getLogFilesInBackupSet(); pass + lastFileInPrevBackup to the DbBackup.DbBackup(Environment,long) + constructor.
    +
    +
    +
    getLogFilesInSnapshot() - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Get the list of all active files that are needed for the environment at + the point of time when backup mode started, i.e., the current snapshot.
    +
    +
    getLoggingHandler() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns the custom java.util.logging.Handler specified by the + application.
    +
    +
    getLoggingHandler() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Returns the custom java.util.logging.Handler specified by the + application.
    +
    +
    getLogProviders() - Method in exception com.sleepycat.je.rep.InsufficientLogException
    +
    +
    Returns the members of the replication group that can serve as candidate + log providers to supply the logs needed by this node.
    +
    +
    getLogProviders() - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    Returns the candidate list of data nodes, either ELECTABLE or SECONDARY + members, that may be used to obtain log files.
    +
    +
    getLogVersion() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the log version of this node.
    +
    +
    getLSNBatchSize() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns the maximum number of LSNs to be sorted that this + DiskOrderedCursor is configured for.
    +
    +
    getLSNBatchSize() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Preload is implemented to optimize I/O cost by fetching the records of + a Database by disk order, so that disk access is are sequential rather + than random.
    +
    +
    getMainTreeMaxDepth() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the maximum depth of the main database tree.
    +
    +
    getMajor() - Method in class com.sleepycat.je.JEVersion
    +
    +
    Major number of the release version.
    +
    +
    getMaster() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The current master node.
    +
    +
    getMasterName() - Method in class com.sleepycat.je.rep.monitor.MemberChangeEvent
    +
    +
    Returns the name of the master at the time of this event.
    +
    +
    getMasterName() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the name of the current + master known by this node.
    +
    +
    getMasterNodeName() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Identifies the master of the replication group, resulting from the last + successful election.
    +
    +
    getMasterNodeName() - Method in class com.sleepycat.je.rep.StateChangeEvent
    +
    +
    Returns the node name identifying the master at the time of the event.
    +
    +
    getMasterNodeName() - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Returns the node name associated with the master
    +
    +
    getMax() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the maximum permitted value of the sequence.
    +
    +
    getMaxBytes() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Return the number of bytes in the cache to stop the preload at.
    +
    +
    getMaxClockDelta(TimeUnit) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the maximum acceptable clock skew between this Replica and its + Feeder, which is the node that is the source of its replication stream.
    +
    +
    getMaxDisk() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for getting EnvironmentConfig.MAX_DISK.
    +
    +
    getMaxMillisecs() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Return the number of millisecs to stop the preload after.
    +
    +
    getMaxRetries() - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Returns the maximum number of retries that will be performed when + deadlocks are detected.
    +
    +
    getMaxSeedMillisecs() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Deprecated. +
    this method returns zero and will be removed in a future + release.
    +
    +
    +
    getMaxSeedNodes() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Deprecated. +
    this method returns zero and will be removed in a future + release.
    +
    +
    +
    getMBeanInfo() - Method in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    getMember(String) - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Get administrative information about a node by its node name.
    +
    +
    getMessage() - Method in exception com.sleepycat.je.DatabaseException
    +
     
    +
    getMessage() - Method in exception com.sleepycat.je.EnvironmentFailureException
    +
     
    +
    getMin() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the minimum permitted value of the sequence.
    +
    +
    getMinimizeRecoveryTime() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns the configuration of the minimize recovery time option.
    +
    +
    getMinor() - Method in class com.sleepycat.je.JEVersion
    +
    +
    Minor number of the release version.
    +
    +
    getMinutes() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns the checkpoint time threshold, in minutes.
    +
    +
    getMixedLRUSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of INs in the mixed/priority-1 LRU "
    +
    +
    getModel() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the current entity model for this store.
    +
    +
    getModel() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Returns the last configured and stored entity model for this store.
    +
    +
    getModel() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the entity model that defines entity classes and index keys.
    +
    +
    getMonitorNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the subset of nodes in the group that monitor group membership + but do not maintain replicated environments.
    +
    +
    getMonitorSocketAddress() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Returns the socket used by this monitor to listen for group changes
    +
    +
    getMultiKeyCreator() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the user-supplied object used for creating multi-valued + secondary keys.
    +
    +
    getMutableConfig() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns database environment attributes.
    +
    +
    getMutations() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the set of mutations that were configured when the store was + opened, or if none were configured, the set of mutations that were + configured and stored previously.
    +
    +
    getMutations() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Returns the set of mutations that were configured and stored previously.
    +
    +
    getMutations() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the configured mutations for performing lazy evolution of stored + instances.
    +
    +
    getNAborts() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of transactions that have aborted.
    +
    +
    getNAcquiresNoWaiters() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of acquires of lock table latch with no contention.
    +
    +
    getNAcquiresNoWaiters() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of acquires of lock table latch with no contention.
    +
    +
    getNAcquiresNoWaitSuccessful() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful no-wait acquires of the lock table latch.
    +
    +
    getNAcquiresNoWaitSuccessful() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of successful no-wait acquires of the lock table latch.
    +
    +
    getNAcquiresNoWaitUnSuccessful() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of unsuccessful no-wait acquires of the lock table latch.
    +
    +
    getNAcquiresNoWaitUnSuccessful() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of unsuccessful no-wait acquires of the lock table latch.
    +
    +
    getNAcquiresSelfOwned() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of acquires of lock table latch when it was already owned + by the caller.
    +
    +
    getNAcquiresSelfOwned() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of acquires of lock table latch when it was already owned + by the caller.
    +
    +
    getNAcquiresWithContention() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of acquires of lock table latch when it was already owned by + another thread.
    +
    +
    getNAcquiresWithContention() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of acquires of lock table latch when it was already owned by + another thread.
    +
    +
    getNActive() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of transactions that are currently active.
    +
    +
    getName() - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
    +
    Returns the name:"CommitPointConsistencyPolicy", associated with this policy.
    +
    +
    getName() - Method in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
    +
    Returns the name:"NoConsistencyRequiredPolicy", associated with this policy.
    +
    +
    getName() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the name associated with the group.
    +
    +
    getName() - Method in interface com.sleepycat.je.rep.ReplicationNode
    +
    +
    Returns the unique name associated with the node.
    +
    +
    getName() - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
    +
    Returns the name:"TimeConsistencyPolicy", associated with this policy.
    +
    +
    getName() - Method in interface com.sleepycat.je.ReplicaConsistencyPolicy
    +
    +
    Returns the name used to identify the policy.
    +
    +
    getName() - Method in class com.sleepycat.je.Transaction
    +
    +
    Get the user visible name for the transaction.
    +
    +
    getName() - Method in class com.sleepycat.je.TransactionStats.Active
    +
    +
    The transaction name, including the thread name if available.
    +
    +
    getName() - Method in class com.sleepycat.persist.model.FieldMetadata
    +
    +
    Returns the field name.
    +
    +
    getName() - Method in interface com.sleepycat.persist.raw.RawField
    +
    +
    Returns the name of the field.
    +
    +
    getNBatchesCacheMode() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBatchesCritical() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBatchesDaemon() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBatchesEvictorThread() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBatchesManual() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBegins() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of transactions that have begun.
    +
    +
    getNBINDeltaBlindOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "The number of operations performed blindly in BIN deltas"
    +
    +
    getNBinDeltaDeleteOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of user (non-internal) Cursor and Database delete operations + performed in BIN deltas.
    +
    +
    getNBinDeltaGetOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of user (non-internal) Cursor and Database get operations + performed in BIN deltas.
    +
    +
    getNBinDeltaInsertOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of user (non-internal) Cursor and Database insert operations + performed in BIN deltas (these are insertions performed via the various + put methods).
    +
    +
    getNBINDeltasCleaned() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of BIN-deltas cleaned."
    +
    +
    getNBINDeltasDead() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of BIN-deltas that were not found in the tree anymore (deleted)."
    +
    +
    getNBINDeltasFetchMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BIN-deltas (partial BINs) fetched to satisfy btree operations that were not in main cache."
    +
    +
    getNBINDeltasMigrated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of BIN-deltas migrated."
    +
    +
    getNBINDeltasObsolete() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of BIN-deltas obsolete."
    +
    +
    getNBinDeltaUpdateOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of user (non-internal) Cursor and Database update operations + performed in BIN deltas (these are updates performed via the various + put methods).
    +
    +
    getNBINsEvictedCacheMode() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBINsEvictedCritical() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBINsEvictedDaemon() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBINsEvictedEvictorThread() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBINsEvictedManual() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNBINsFetch() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs (bottom internal nodes) and BIN-deltas requested by btree operations."
    +
    +
    getNBINsFetchMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of full BINs (bottom internal nodes) and BIN-deltas fetched to satisfy btree operations that were not in main cache."
    +
    +
    getNBINsFetchMissRatio() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "The BIN fetch miss ratio (nBINsFetchMiss / nBINsFetch)"
    +
    +
    getNBINsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the number of BINs that were loaded into the cache during the + preload() operation.
    +
    +
    getNBINsMutated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. + +
    +
    +
    getNBINsStripped() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. + +
    +
    +
    getNBytesEvictedCacheMode() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes evicted by operations for which CacheMode.EVICT_BIN is specified."
    +
    +
    getNBytesEvictedCritical() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes evicted in the application thread because the cache is over budget."
    +
    +
    getNBytesEvictedDeamon() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes evicted by JE deamon threads."
    +
    +
    getNBytesEvictedEvictorThread() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes evicted by evictor pool threads."
    +
    +
    getNBytesEvictedManual() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of bytes evicted by the Environment.evictMemory or during Environment startup."
    +
    +
    getNBytesReadFromWriteQueue() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes read to fulfill file read operations by reading out + of the pending write queue.
    +
    +
    getNBytesWrittenFromWriteQueue() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes written from the pending write queue.
    +
    +
    getNCachedBINDeltas() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BIN-deltas (partial BINs) in main cache. This is a subset of the nCachedBINs value."
    +
    +
    getNCachedBINs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs (bottom internal nodes) and BIN-deltas in main cache."
    +
    +
    getNCachedGets() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the number of times that Sequence.get was called and a cached + value was returned.
    +
    +
    getNCachedUpperINs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of upper INs (non-bottom internal nodes) in main cache."
    +
    +
    getNCacheMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The total number of requests for database objects which were not in + memory.
    +
    +
    getNCheckpoints() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The total number of checkpoints run so far.
    +
    +
    getNCleanerDeletions() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of cleaner file deletions."
    +
    +
    getNCleanerDiskRead() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of disk reads by the cleaner."
    +
    +
    getNCleanerEntriesRead() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of log entries read by the cleaner."
    +
    +
    getNCleanerProbeRuns() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    in JE 6.3, always returns zero.
    +
    +
    +
    getNCleanerRevisalRuns() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of cleaner runs that ended in revising expiration info, but not in any cleaning."
    +
    +
    getNCleanerRuns() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of cleaner runs, including two-pass runs."
    +
    +
    getNCleanerTwoPassRuns() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of cleaner two-pass runs."
    +
    +
    getNClusterLNsProcessed() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs processed because they qualify for clustering."
    +
    +
    getNCommits() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of transactions that have committed.
    +
    +
    getNConverted() - Method in class com.sleepycat.persist.evolve.EvolveStats
    +
    +
    The total number of entities converted during eager evolution.
    +
    +
    getNCountMemoryExceeded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the count of the number of times that the internal memory budget + specified by PreloadConfig.setInternalMemoryLimit() was exceeded.
    +
    +
    getNDBINsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Deprecated. +
    returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
    +
    +
    +
    getNDeltaINFlush() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The accumulated number of Delta INs flushed to the log.
    +
    +
    getNDINsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Deprecated. +
    returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
    +
    +
    +
    getNDirtyNodesEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of dirty target nodes logged and evicted."
    +
    +
    getNDupCountLNsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Deprecated. +
    returns zero for data written using JE 5.0 and later, but + may return non-zero values when reading older data.
    +
    +
    +
    getNeedReset() - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Tell the MBean if the available set of functionality has changed.
    +
    +
    getNEmbeddedLNs() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the number of embedded LNs encountered during the preload() + operation.
    +
    +
    getNEvictionRuns() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of times the background eviction thread is awoken."
    +
    +
    getNEvictPasses() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. + +
    +
    +
    getNewName() - Method in class com.sleepycat.persist.evolve.Renamer
    +
    +
    Returns the new class or field name specified in the constructor.
    +
    +
    getNext(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the next key/data pair and returns that pair.
    +
    +
    getNext(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.DiskOrderedCursor
    +
     
    +
    getNext(DatabaseEntry, DatabaseEntry, LockMode) - Method in interface com.sleepycat.je.ForwardCursor
    +
    +
    Moves the cursor to the next key/data pair and returns that pair.
    +
    +
    getNext(DatabaseEntry, LockMode) - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Returns the next primary key resulting from the join operation.
    +
    +
    getNext(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.JoinCursor
    +
    +
    Returns the next primary key and data resulting from the join operation.
    +
    +
    getNext(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the next key/data pair and returns that pair.
    +
    +
    getNext(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the next key/data pair and return that pair.
    +
    +
    getNextDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    If the next key/data pair of the database is a duplicate data record for + the current key/data pair, moves the cursor to the next key/data pair of + the database and returns that pair.
    +
    +
    getNextDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    If the next key/data pair of the database is a duplicate data record for + the current key/data pair, moves the cursor to the next key/data pair of + the database and returns that pair.
    +
    +
    getNextDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    If the next key/data pair of the database is a duplicate data record for + the current key/data pair, move the cursor to the next key/data pair of + the database and return that pair.
    +
    +
    getNextNoDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the next non-duplicate key/data pair and returns + that pair.
    +
    +
    getNextNoDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the next non-duplicate key/data pair and returns + that pair.
    +
    +
    getNextNoDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the next non-duplicate key/data pair and return that + pair.
    +
    +
    getNFeedersCreated() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Feeder threads since this node was started.
    +
    +
    getNFeedersShutdown() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Feeder threads that were shut down, either because this + node, or the Replica terminated the connection.
    +
    +
    getNFileOpens() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of times a log file has been opened.
    +
    +
    getNFSyncRequests() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of fsyncs requested through the group commit manager.
    +
    +
    getNFSyncs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of fsyncs issued through the group commit manager.
    +
    +
    getNFSyncTimeouts() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of fsync requests submitted to the group commit manager which + timed out.
    +
    +
    getNFullBINFlush() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The accumulated number of full BINs flushed to the log.
    +
    +
    getNFullBINsMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of times a BIN-delta had to be mutated to a full BIN (and as a result a full BIN had to be read in from the log)."
    +
    +
    getNFullINFlush() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The accumulated number of full INs flushed to the log.
    +
    +
    getNGets() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the number of times that Sequence.get was called successfully.
    +
    +
    getNINCompactKeyIN() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of INs that use a compact key representation to minimize the key object representation overhead."
    +
    +
    getNINNoTarget() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of INs that use a compact representation when none of its child nodes are in the main cache."
    +
    +
    getNINsCleaned() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of INs cleaned."
    +
    +
    getNINsDead() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of INs that were not found in the tree anymore (deleted)."
    +
    +
    getNINsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the number of INs that were loaded into the cache during the + preload() operation.
    +
    +
    getNINsMigrated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of INs migrated."
    +
    +
    getNINsObsolete() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of INs obsolete."
    +
    +
    getNINSparseTarget() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of INs that use a compact sparse array representation to point to child nodes in the main cache."
    +
    +
    getNLNQueueHits() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs processed without a tree lookup."
    +
    +
    getNLNsCleaned() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs cleaned."
    +
    +
    getNLNsDead() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs that were not found in the tree anymore (deleted)."
    +
    +
    getNLNsEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs evicted as a result of LRU-based eviction (but not CacheMode.EVICT_LN)."
    +
    +
    getNLNsExpired() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of obsolete LNs that were expired."
    +
    +
    getNLNsFetch() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs (data records) requested by btree operations."
    +
    +
    getNLNsFetchMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs (data records) requested by btree operations that were not in main cache."
    +
    +
    getNLNsLoaded() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the number of LNs that were loaded into the cache during the + preload() operation.
    +
    +
    getNLNsLocked() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs encountered that were locked."
    +
    +
    getNLNsMarked() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs in temporary DBs that were dirtied by the cleaner and subsequently logging during checkpoint/eviction."
    +
    +
    getNLNsMigrated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs that were migrated forward in the log by the cleaner."
    +
    +
    getNLNsObsolete() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs obsolete."
    +
    +
    getNLogBuffers() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of log buffers currently instantiated.
    +
    +
    getNLogFSyncs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The total number of fsyncs of the JE log.
    +
    +
    getNMarkedLNsProcessed() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs processed because they were previously marked for migration."
    +
    +
    getNMaxReplicaLag() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The lag (in VLSNs) associated with the replica that's farthest behind in + replaying the replication stream.
    +
    +
    getNMaxReplicaLagName() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The name of the replica that's farthest behind in replaying the + replication stream.
    +
    +
    getNNodesEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target nodes (INs) evicted from the main cache."
    +
    +
    getNNodesExplicitlyEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. + +
    +
    +
    getNNodesMovedToDirtyLRU() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of nodes (INs) moved from the mixed/priority-1 to the dirty/priority-2 LRU list."
    +
    +
    getNNodesMutated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target BINs mutated to BIN-deltas."
    +
    +
    getNNodesPutBack() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target nodes (INs) moved to the cold end of the LRU list without any action taken on them."
    +
    +
    getNNodesScanned() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has no meaning after the implementation + of the new evictor in JE 6.0. The method returns 0 always.
    +
    +
    +
    getNNodesSelected() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. + +
    +
    +
    getNNodesSkipped() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of nodes (INs) that did not require any action."
    +
    +
    getNNodesStripped() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target BINs whose child LNs were evicted (stripped)."
    +
    +
    getNNodesTargeted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of nodes (INs) selected as eviction targets."
    +
    +
    getNNotResident() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of requests for database objects not contained within the + in memory data structures.
    +
    +
    getNodeHostname() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the hostname component of the nodeHost property.
    +
    +
    getNodeHostname() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the hostname component of the nodeHost property.
    +
    +
    getNodeHostPort() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Returns the hostname and port associated with this node.
    +
    +
    getNodeHostPort() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the hostname and port associated with this node.
    +
    +
    getNodeHostPort() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the hostname and port associated with this node.
    +
    +
    getNodeMaxDupTreeEntries() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Deprecated. +
    this property no longer has any effect and zero is always + returned; DatabaseConfig.getNodeMaxEntries() should be used instead.
    +
    +
    +
    getNodeMaxEntries() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the maximum number of children a B+Tree node can have.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns the user defined nodeName for the Environment.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Returns the unique name associated with this Arbiter.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Returns the group-wide unique name associated with the monitor
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.monitor.MonitorChangeEvent
    +
    +
    Returns the name of the node associated with the event.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the unique name associated with this monitor.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the name of the node whose state is requested.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Returns the unique name used to identify this replicated environment.
    +
    +
    getNodeName() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the unique name associated with this node.
    +
    +
    getNodePort() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the port component of the nodeHost property.
    +
    +
    getNodePort() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the port component of the nodeHost property.
    +
    +
    getNodePriority() - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Returns the election priority associated with the node.
    +
    +
    getNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the set of all nodes in the group.
    +
    +
    getNodeState() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the replication state of + this node.
    +
    +
    getNodeState() - Method in class com.sleepycat.je.rep.util.DbPing
    +
     
    +
    getNodeState(ReplicationNode, int) - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Returns the state of a replicated + node and state of the application where the node is + running in.
    +
    +
    getNodeType() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the NodeType of this node.
    +
    +
    getNonEmptyBins() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of BINs encountered by the INCompressor that were not + actually empty when the compressor ran.
    +
    +
    getNonSticky() - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Returns the non-sticky setting.
    +
    +
    getNOpenFiles() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of files currently open in the file cache.
    +
    +
    getNoSort() - Method in class com.sleepycat.je.JoinConfig
    +
    +
    Returns whether automatic sorting of the input cursors is disabled.
    +
    +
    getNoSync() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Deprecated. + +
    +
    +
    getNotificationInfo() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Get notification metadata for this MBean.
    +
    +
    getNotificationInfo(Environment) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    No notifications are supported.
    +
    +
    getNoWait() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns true if the transaction is configured to not wait if a lock + request cannot be immediately granted.
    +
    +
    getNOwners() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total lock owners in lock table.
    +
    +
    getNOwners() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total lock owners in lock table.
    +
    +
    getNPendingLNsLocked() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of pending LNs that could not be locked for migration because of a long duration application lock."
    +
    +
    getNPendingLNsProcessed() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs processed because they were previously locked."
    +
    +
    getNProtocolBytesRead() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of bytes of Replication Stream read over the network.
    +
    +
    getNProtocolBytesWritten() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Replication Stream bytes written over the network.
    +
    +
    getNProtocolEntriesWrittenOldVersion() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns the number of messages containing log entries that were written + to the replication stream using the previous log format to support + replication to a replica running an earlier version during an upgrade.
    +
    +
    getNProtocolMessageBatches() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Replication Stream message batches written to the network.
    +
    +
    getNProtocolMessagesBatched() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Replication Stream messages that were written as part + of a message batch instead of being written individually.
    +
    +
    getNProtocolMessagesRead() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of Replication Stream messages read over the network.
    +
    +
    getNProtocolMessagesWritten() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total number of Replication Stream messages written over the + network.
    +
    +
    getNRandomReadBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes read which required repositioning the disk head + more than 1MB from the previous file position.
    +
    +
    getNRandomReads() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of disk reads which required repositioning the disk head + more than 1MB from the previous file position.
    +
    +
    getNRandomWriteBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes written which required repositioning the disk head + more than 1MB from the previous file position.
    +
    +
    getNRandomWrites() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of disk writes which required repositioning the disk head by + more than 1MB from the previous file position.
    +
    +
    getNRead() - Method in class com.sleepycat.persist.evolve.EvolveStats
    +
    +
    The total number of entities read during eager evolution.
    +
    +
    getNReadLocks() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total read locks currently held.
    +
    +
    getNReadLocks() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total read locks currently held.
    +
    +
    getNReadsFromWriteQueue() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of file read operations which were fulfilled by reading out + of the pending write queue.
    +
    +
    getNReleases() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of releases of the lock table latch.
    +
    +
    getNReleases() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Number of releases of the lock table latch.
    +
    +
    getNRepeatFaultReads() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of reads which had to be repeated when faulting in an object + from disk because the read chunk size controlled by je.log.faultReadSize + is too small.
    +
    +
    getNRepeatIteratorReads() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of attempts to read a log entry larger than the read buffer size during which the log buffer couldn\'t be grown enough to accommodate the object."
    +
    +
    getNReplayAborts() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of abort records which were replayed while the node was in + the Replica state.
    +
    +
    getNReplayCommitAcks() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of commit log records that needed to be acknowledged to the + Master by this node when it was a Replica.
    +
    +
    getNReplayCommitNoSyncs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of commitNoSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
    +
    +
    getNReplayCommits() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of commit log records that were replayed by this node when + it was a Replica.
    +
    +
    getNReplayCommitSyncs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of commitSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
    +
    +
    getNReplayCommitWriteNoSyncs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of commitNoSync() calls executed when satisfying transaction + commit acknowledgment requests from the Master.
    +
    +
    getNReplayGroupCommitMaxExceeded() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of group commits that were initiated due the + max group size being + exceeded.
    +
    +
    getNReplayGroupCommits() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of group commit operations.
    +
    +
    getNReplayGroupCommitTimeouts() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of group commits that were initiated due to the + group timeout + interval being exceeded.
    +
    +
    getNReplayGroupCommitTxns() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of replay transaction commits that were part of a group + commit operation.
    +
    +
    getNReplayLNs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of data records (creation, update, deletion) which were + replayed while the node was in the Replica state.
    +
    +
    getNReplayNameLNs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of NameLN records which were replayed while the node was in + the Replica state.
    +
    +
    getNRequests() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total number of lock requests to date.
    +
    +
    getNRequests() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total number of lock requests to date.
    +
    +
    getNRootNodesEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of database root nodes (INs) evicted."
    +
    +
    getNSequentialReadBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes read which did not require repositioning the disk + head more than 1MB from the previous file position.
    +
    +
    getNSequentialReads() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of disk reads which did not require repositioning the disk + head more than 1MB from the previous file position.
    +
    +
    getNSequentialWriteBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of bytes written which did not require repositioning the + disk head more than 1MB from the previous file position.
    +
    +
    getNSequentialWrites() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of disk writes which did not require repositioning the disk + head by more than 1MB from the previous file position.
    +
    +
    getNSharedCacheEnvironments() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of Environments sharing the main cache."
    +
    +
    getNTempBufferWrites() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of writes which had to be completed using the temporary + marshalling buffer because the fixed size log buffers specified by + je.log.totalBufferBytes and je.log.numBuffers were not large enough.
    +
    +
    getNThreadUnavailable() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of eviction tasks that were submitted to the background evictor pool, but were refused because all eviction threads were busy."
    +
    +
    getNToBeCleanedLNsProcessed() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Accumulated number of LNs processed because they are soon to be cleaned."
    +
    +
    getNTotalLocks() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total locks currently in lock table.
    +
    +
    getNTotalLocks() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total locks currently in lock table.
    +
    +
    getNTxnsAcked() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of transactions that were successfully acknowledged based + upon the Durability.ReplicaAckPolicy policy associated with the + transaction commit.
    +
    +
    getNTxnsNotAcked() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of transactions that were not acknowledged as required by the + Durability.ReplicaAckPolicy policy associated with the transaction commit.
    +
    +
    getNumericVersionString() - Method in class com.sleepycat.je.JEVersion
    +
    +
    The numeric version string, without the patch tag.
    +
    +
    getNumRetries() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the number of times a ping thread attempts to contact a node + before deeming it unreachable.
    +
    +
    getNUpperINsEvictedCacheMode() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNUpperINsEvictedCritical() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNUpperINsEvictedDaemon() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNUpperINsEvictedEvictorThread() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNUpperINsEvictedManual() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    This statistic has been removed. The method returns 0 + always.
    +
    +
    +
    getNUpperINsFetch() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of Upper INs (non-bottom internal nodes) requested by btree operations."
    +
    +
    getNUpperINsFetchMiss() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of Upper INs (non-bottom internal nodes) requested by btree operations that were not in main cache."
    +
    +
    getNWaiters() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total transactions waiting for locks.
    +
    +
    getNWaiters() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total transactions waiting for locks.
    +
    +
    getNWaits() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total number of lock waits to date.
    +
    +
    getNWaits() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total number of lock waits to date.
    +
    +
    getNWriteLocks() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Total write locks currently held.
    +
    +
    getNWriteLocks() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Total write locks currently held.
    +
    +
    getNWriteQueueOverflow() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of writes operations which would overflow the Write Queue.
    +
    +
    getNWriteQueueOverflowFailures() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of writes operations which would overflow the Write Queue + and could not be queued.
    +
    +
    getNWritesFromWriteQueue() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of file writes operations executed from the pending write + queue.
    +
    +
    getNXAAborts() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of XA transactions that have aborted.
    +
    +
    getNXACommits() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of XA transactions that have committed.
    +
    +
    getNXAPrepares() - Method in class com.sleepycat.je.TransactionStats
    +
    +
    The number of XA transactions that have been prepared.
    +
    +
    getOffHeapAllocFailures() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of off-heap allocation failures due to lack of system memory."
    +
    +
    getOffHeapAllocOverflows() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of off-heap allocation attempts that exceeded the cache size."
    +
    +
    getOffHeapBINsLoaded() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs loaded from the off-heap cache."
    +
    +
    getOffHeapBINsStored() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs stored into the off-heap cache."
    +
    +
    getOffHeapCachedBINDeltas() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BIN-deltas residing in the off-heap cache."
    +
    +
    getOffHeapCachedBINs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs (full BINs and BIN-deltas) residing in the off-heap cache."
    +
    +
    getOffHeapCachedLNs() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs residing in the off-heap cache."
    +
    +
    getOffHeapCacheSize() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for getting + EnvironmentConfig.MAX_OFF_HEAP_MEMORY.
    +
    +
    getOffHeapCriticalNodesTargeted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of nodes targeted in \'critical eviction\' mode."
    +
    +
    getOffHeapDirtyNodesEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target BINs evicted from the off-heap cache that were dirty and therefore were logged."
    +
    +
    getOffHeapLNsEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs evicted from the off-heap cache as a result of BIN stripping."
    +
    +
    getOffHeapLNsLoaded() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs loaded from the off-heap cache."
    +
    +
    getOffHeapLNsStored() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs stored into the off-heap cache."
    +
    +
    getOffHeapLRUSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LRU entries used for the off-heap cache."
    +
    +
    getOffHeapNodesEvicted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target BINs (including BIN-deltas) evicted from the off-heap cache."
    +
    +
    getOffHeapNodesMutated() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of off-heap target BINs mutated to BIN-deltas."
    +
    +
    getOffHeapNodesSkipped() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of off-heap target BINs on which no action was taken."
    +
    +
    getOffHeapNodesStripped() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of target BINs whose off-heap child LNs were evicted (stripped)."
    +
    +
    getOffHeapNodesTargeted() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of BINs selected as off-heap eviction targets."
    +
    +
    getOffHeapThreadUnavailable() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of eviction tasks that were submitted to the background off-heap evictor pool, but were refused because all eviction threads were busy."
    +
    +
    getOffHeapTotalBlocks() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Total number of memory blocks in off-heap cache."
    +
    +
    getOffHeapTotalBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Total number of estimated bytes in off-heap cache."
    +
    +
    getOffset() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns the byte offset into the data array.
    +
    +
    getOperationList(Environment) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Get mbean operation metadata for this environment.
    +
    +
    getOverrideBtreeComparator() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the override setting for the btree comparator.
    +
    +
    getOverrideDuplicateComparator() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the override setting for the duplicate comparator.
    +
    +
    getOwnerTxnIds() - Method in exception com.sleepycat.je.LockConflictException
    +
    +
    Returns an array of longs containing transaction ids of owners at the + the time of the timeout.
    +
    +
    getPackedIntByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a packed integer.
    +
    +
    getPackedLongByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a packed long integer.
    +
    +
    getParentId() - Method in class com.sleepycat.je.TransactionStats.Active
    +
    +
    The transaction ID of the parent transaction (or 0, if no parent).
    +
    +
    getPartial() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns whether this DatabaseEntry is configured to read or write + partial records.
    +
    +
    getPartialLength() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns the byte length of the partial record being read or written by + the application, in bytes.
    +
    +
    getPartialOffset() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns the offset of the partial record being read or written by the + application, in bytes.
    +
    +
    getPatch() - Method in class com.sleepycat.je.JEVersion
    +
    +
    Patch number of the release version.
    +
    +
    getPdescriptor(Class) - Static method in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    getPendingLNQueueSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Number of LNs pending because they were locked and could not be migrated."
    +
    +
    getPermissibleLag(TimeUnit) - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
    +
    Returns the allowed time lag associated with this policy.
    +
    +
    getPersistentFields() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns an unmodifiable list of metadata for the persistent fields in + this class, or null if the default rules for persistent fields should be + used.
    +
    +
    getPort() - Method in interface com.sleepycat.je.rep.ReplicationNode
    +
    +
    Returns the port number associated with the node.
    +
    +
    getPrev(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the previous key/data pair and returns that pair.
    +
    +
    getPrev(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the previous key/data pair and returns that pair.
    +
    +
    getPrev(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the previous key/data pair and return that pair.
    +
    +
    getPrevDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    If the previous key/data pair of the database is a duplicate data record + for the current key/data pair, moves the cursor to the previous key/data + pair of the database and returns that pair.
    +
    +
    getPrevDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    If the previous key/data pair of the database is a duplicate data record + for the current key/data pair, moves the cursor to the previous key/data + pair of the database and returns that pair.
    +
    +
    getPrevDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    If the previous key/data pair of the database is a duplicate data record + for the current key/data pair, move the cursor to the previous key/data + pair of the database and return that pair.
    +
    +
    getPrevNoDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the previous non-duplicate key/data pair and returns + that pair.
    +
    +
    getPrevNoDup(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the previous non-duplicate key/data pair and returns + that pair.
    +
    +
    getPrevNoDup(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the previous non-duplicate key/data pair and return + that pair.
    +
    +
    getPriDeleteFailOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of failed primary DB deletion operations.
    +
    +
    getPriDeleteOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful primary DB deletion operations.
    +
    +
    getPriInsertFailOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of failed primary DB insertion operations.
    +
    +
    getPriInsertOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful primary DB insertion operations.
    +
    +
    getPrimaryConfig(Class) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the default primary database Berkeley DB engine API + configuration for an entity class.
    +
    +
    getPrimaryDatabase() - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Returns the primary Database + associated with this cursor.
    +
    +
    getPrimaryDatabase() - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Returns the primary database associated with this secondary database.
    +
    +
    getPrimaryIndex(Class<PK>, Class<E>) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the primary index for a given entity class, opening it if + necessary.
    +
    +
    getPrimaryIndex(String) - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Opens the primary index for a given entity class.
    +
    +
    getPrimaryIndex() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns the primary index associated with this secondary index.
    +
    +
    getPrimaryKey() - Method in exception com.sleepycat.je.SecondaryReferenceException
    +
    +
    Returns the primary key being accessed during the failure.
    +
    +
    getPrimaryKey() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns the primary key metadata for a key declared in this class, or + null if none is declared.
    +
    +
    getPrimaryKey() - Method in class com.sleepycat.persist.model.EntityMetadata
    +
    +
    Returns the primary key metadata for this entity.
    +
    +
    getPrimitiveBinding(Class<T>) - Static method in class com.sleepycat.bind.tuple.TupleBinding
    +
    +
    Creates a tuple binding for a primitive Java class.
    +
    +
    getPrintInfo() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns true if the Environment.verify and Database.verify are configured to print basic verification information.
    +
    +
    getPriPositionOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful primary DB position operations.
    +
    +
    getPriSearchFailOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of failed primary DB key search operations.
    +
    +
    getPriSearchOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful primary DB key search operations.
    +
    +
    getPriUpdateOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful primary DB update operations.
    +
    +
    getProcessedBins() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of BINs that were successfully processed by the IN + Compressor.
    +
    +
    getProgressListener() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Return the ProgressListener for this PreloadConfig.
    +
    +
    getPropagateExceptions() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns true if the Environment.verify and Database.verify are configured to propagate exceptions found during + verification.
    +
    +
    getPropertiesInfo(Class) - Static method in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    getPropertyDescriptors() - Method in class com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo
    +
     
    +
    getPropertyDescriptors() - Method in class com.sleepycat.persist.evolve.EvolveConfigBeanInfo
    +
     
    +
    getPropertyDescriptors() - Method in class com.sleepycat.persist.StoreConfigBeanInfo
    +
     
    +
    getPropertyDescriptors(Class) - Method in class com.sleepycat.util.ConfigBeanInfoBase
    +
    +
    Gets the bean's PropertyDescriptors.
    +
    +
    getProps() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
     
    +
    getProtectedLogSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Bytes used by all protected data files: the subset of reserved files that are temporarily protected and cannot be deleted."
    +
    +
    getProtectedLogSizeMap() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "A breakdown of protectedLogSize as a map of protecting entity name to protected size in bytes."
    +
    +
    getProtocolBytesReadRate() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Bytes read throughput, in terms of bytes received from the replication + network channels per second.
    +
    +
    getProtocolBytesWriteRate() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Bytes written throughput, in terms of bytes written to the replication + network channels per second.
    +
    +
    getProtocolMessageReadRate() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Incoming replication message throughput, in terms of messages received + from the replication network channels per second.
    +
    +
    getProtocolMessageWriteRate() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Outgoing message throughput, in terms of message written to the + replication network channels per second.
    +
    +
    getProtocolReadNanos() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of nanoseconds spent reading from the network channel.
    +
    +
    getProtocolWriteNanos() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of nanoseconds spent writing to the network channel.
    +
    +
    getProxiedClassName() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns the class name of the proxied class if this class is a PersistentProxy, or null otherwise.
    +
    +
    getQueueSize() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns the maximum number of entries in the queue before the + DiskOrderedCursor producer thread blocks.
    +
    +
    getRangeMax() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns the maximum value for the sequence.
    +
    +
    getRangeMin() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns the minimum value for the sequence.
    +
    +
    getRawType(String) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the type information for the current version of a given class, + or null if the class is not currently persistent.
    +
    +
    getRawTypeVersion(String, int) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns the type information for a given version of a given class, + or null if the given version of the class is unknown.
    +
    +
    getReadCommitted() - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Returns true if read operations performed by the cursor are configured + to obey read committed isolation.
    +
    +
    getReadCommitted() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns true if the transaction is configured for read committed + isolation.
    +
    +
    getReadIntLength(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be read by PackedInteger.readInt(byte[], int).
    +
    +
    getReadLongLength(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be read by PackedInteger.readLong(byte[], int).
    +
    +
    getReadOnly() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the database is configured in read-only mode.
    +
    +
    getReadOnly() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns true if the database environment is configured to be read only.
    +
    +
    getReadOnly() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns whether read-only is configured for this transaction.
    +
    +
    getReadOnly() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the read-only configuration property.
    +
    +
    getReadSortedIntLength(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be read by PackedInteger.readSortedInt(byte[], int).
    +
    +
    getReadSortedLongLength(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be read by PackedInteger.readSortedLong(byte[], int).
    +
    +
    getReadUncommitted() - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Returns true if read operations performed by the cursor are configured + to return modified but not yet committed data.
    +
    +
    getReadUncommitted() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns true if read operations performed by the transaction are + configured to return modified but not yet committed data.
    +
    +
    getReceiveBufferSize() - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    Returns the size of the receive buffer associated with the socket used + to transfer files during the NetworkRestore operation.
    +
    +
    getRecoveryProgressListener() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Return the ProgressListener to be used at this environment startup.
    +
    +
    getRelatches() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    Returns the number of latch upgrades (relatches) required while + operating on this database's BTree.
    +
    +
    getRelatchesRequired() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Returns the number of latch upgrades (relatches) required while + operating on this Environment.
    +
    +
    getRelatedEntity() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Returns the class name of the related (foreign) entity, for which + foreign key constraints are specified using the SecondaryKey.relatedEntity() annotation.
    +
    +
    getRelationship() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Returns the relationship between instances of the entity class and the + secondary keys.
    +
    +
    getRenamer(String, int, String) - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns the renamer mutation for the given class, version and field, or + null if none exists.
    +
    +
    getRenamers() - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns an unmodifiable collection of all renamer mutations.
    +
    +
    getRepConfig() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Return the replication configuration that has been used to create this + handle.
    +
    +
    getRepenvUUID() - Method in class com.sleepycat.je.CommitToken
    +
     
    +
    getRepGroup() - Method in class com.sleepycat.je.rep.monitor.GroupChangeEvent
    +
    +
    Returns the current description of the replication group.
    +
    +
    getReplayElapsedTxnTime() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total elapsed time in milliseconds spent replaying committed and + aborted transactions.
    +
    +
    getReplayMaxCommitProcessingNanos() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The maximum time taken to replay a transaction commit operation.
    +
    +
    getReplayMinCommitProcessingNanos() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The minimum time taken to replay a transaction commit operation.
    +
    +
    getReplayQueueOverflow() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The number of attempts to queue a response when + the queue was full.
    +
    +
    getReplayTotalCommitProcessingNanos() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total time spent to replay all commit operations.
    +
    +
    getReplicaAck() - Method in class com.sleepycat.je.Durability
    +
    +
    Returns the replica acknowledgment policy used by the master when + committing changes to a replicated environment.
    +
    +
    getReplicaAckTimeout(TimeUnit) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Returns the configured replica timeout value.
    +
    +
    getReplicaDelayMap() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a map from replica node name to the delay, in milliseconds, + between when a transaction was committed on the master and when the + master learned that the transaction was processed on the replica, if + known.
    +
    +
    getReplicaLastCommitTimestampMap() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a map from replica node name to the commit timestamp of the last + committed transaction that was processed on the replica, if known.
    +
    +
    getReplicaLastCommitVLSNMap() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a map from replica node name to the VLSN of the last committed + transaction that was processed on the replica, if known.
    +
    +
    getReplicaSync() - Method in class com.sleepycat.je.Durability
    +
    +
    Returns the transaction synchronization policy to be used by the replica + as it replays a transaction that needs an acknowledgment.
    +
    +
    getReplicated() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the replicated property for the database.
    +
    +
    getReplicated() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the replicated property for the store.
    +
    +
    getReplicaVLSNLagMap() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a map from replica node name to the lag, in VLSNs, between the + replication state of the replica and the master, if known.
    +
    +
    getReplicaVLSNRateMap() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a map from replica node name to a moving average of the rate, in + VLSNs per minute, that the replica is processing replication data, if + known.
    +
    +
    getRepMutableConfig() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
     
    +
    getRepStats(StatsConfig) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Returns statistics associated with this environment.
    +
    +
    getRequiredEvictBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Deprecated. +
    The method returns 0 always.
    +
    +
    +
    getRequiredNodeCount() - Method in exception com.sleepycat.je.rep.InsufficientReplicasException
    +
    +
    Returns the number of nodes (including the master) that were + required to be active in order to satisfy the Replica ack + policy.
    +
    +
    getReservedLogSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Bytes used by all reserved data files: files that have beencleaned and can be deleted if they are not protected."
    +
    +
    getRetainLogFiles() - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    Returns a boolean indicating whether existing log files should be + retained or deleted.
    +
    +
    getRetryInterval() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the number of milliseconds between ping thread retries.
    +
    +
    getRootSplits() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    The number of times the root of the BTree was split.
    +
    +
    getSearchBoth(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the specified key/data pair, where both the key and + data items must match.
    +
    +
    getSearchBoth(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Database
    +
    +
    Retrieves the key/data pair with the given key and data value, that is, + both the key and data items must match.
    +
    +
    getSearchBoth(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed with this method signature.
    +
    +
    getSearchBoth(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the specified secondary and primary key, where both + the primary and secondary key items must match.
    +
    +
    getSearchBoth(Transaction, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed with this method signature.
    +
    +
    getSearchBoth(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Retrieves the key/data pair with the specified secondary and primary + key, that is, both the primary and secondary key items must match.
    +
    +
    getSearchBothRange(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the specified key and closest matching data item of + the database.
    +
    +
    getSearchBothRange(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed with this method signature.
    +
    +
    getSearchBothRange(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the specified secondary key and closest matching + primary key of the database.
    +
    +
    getSearchKey(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the given key of the database, and returns the datum + associated with the given key.
    +
    +
    getSearchKey(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the given key of the database, and returns the datum + associated with the given key.
    +
    +
    getSearchKey(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the given key of the database, and return the datum + associated with the given key.
    +
    +
    getSearchKeyRange(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Moves the cursor to the closest matching key of the database, and + returns the data item associated with the matching key.
    +
    +
    getSearchKeyRange(DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Moves the cursor to the closest matching key of the database, and + returns the data item associated with the matching key.
    +
    +
    getSearchKeyRange(DatabaseEntry, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    Move the cursor to the closest matching key of the database, and return + the data item associated with the matching key.
    +
    +
    getSecDeleteOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful secondary DB deletion operations.
    +
    +
    getSecInsertOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful secondary DB insertion operations.
    +
    +
    getSecondaryBulkLoad() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the bulk-load-secondaries configuration property.
    +
    +
    getSecondaryConfig() - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Deprecated. +
    As of JE 4.0.13, replaced by SecondaryDatabase.getConfig().
    +
    +
    +
    getSecondaryConfig(Class, String) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the default secondary database Berkeley DB engine API + configuration for an entity class and key name.
    +
    +
    getSecondaryDatabaseName() - Method in exception com.sleepycat.je.SecondaryReferenceException
    +
    +
    Returns the name of the secondary database being accessed during the + failure.
    +
    +
    getSecondaryDatabases() - Method in class com.sleepycat.je.Database
    +
    +
    Returns a list of all SecondaryDatabase objects associated with a primary database.
    +
    +
    getSecondaryDatabases() - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Returns an empty list, since this database is itself a secondary + database.
    +
    +
    getSecondaryIndex(PrimaryIndex<PK, E>, Class<SK>, String) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns a secondary index for a given primary index and secondary key, + opening it if necessary.
    +
    +
    getSecondaryIndex(String, String) - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Opens the secondary index for a given entity class and secondary key + name.
    +
    +
    getSecondaryKey() - Method in exception com.sleepycat.je.SecondaryReferenceException
    +
    +
    Returns the secondary key being accessed during the failure.
    +
    +
    getSecondaryKeys() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns an unmodifiable map of key name (which may be different from + field name) to secondary key metadata for all secondary keys declared in + this class, or null if no secondary keys are declared in this class.
    +
    +
    getSecondaryKeys() - Method in class com.sleepycat.persist.model.EntityMetadata
    +
    +
    Returns an unmodifiable map of key name to secondary key metadata, or + an empty map if no secondary keys are defined for this entity.
    +
    +
    getSecondaryNodes() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns the subset of nodes in the group with replicated environments + that do not participate in elections and cannot become masters.
    +
    +
    getSecPositionOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful secondary DB position operations.
    +
    +
    getSecSearchFailOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of failed secondary DB key search operations.
    +
    +
    getSecSearchOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful secondary DB key search operations.
    +
    +
    getSecUpdateOps() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Number of successful secondary DB update operations.
    +
    +
    getSequence(String) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns a named sequence for using Berkeley DB engine API directly, + opening it if necessary.
    +
    +
    getSequenceConfig(String) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the default Berkeley DB engine API configuration for a named key + sequence.
    +
    +
    getSequenceName() - Method in class com.sleepycat.persist.model.PrimaryKeyMetadata
    +
    +
    Returns the name of the sequence for assigning key values.
    +
    +
    getSerialBufferSize() - Method in class com.sleepycat.bind.serial.SerialBase
    +
    +
    Returns the initial byte size of the output buffer.
    +
    +
    getSerializableIsolation() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns true if the transaction has been explicitly configured to have + serializable (degree 3) isolation.
    +
    +
    getSerialOutput(Object) - Method in class com.sleepycat.bind.serial.SerialBase
    +
    +
    Returns an empty SerialOutput instance that will be used by the serial + binding or key creator.
    +
    +
    getSharedCache() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for getting the + EnvironmentConfig.SHARED_CACHE parameter.
    +
    +
    getSharedCacheTotalBytes() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Total amount of the shared JE main cache in use, in bytes."
    +
    +
    getShowProgressInterval() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns the showProgressInterval value, if set.
    +
    +
    getShowProgressInterval() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the showProgressInterval value, if set.
    +
    +
    getShowProgressStream() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns the PrintStream on which the progress messages will be displayed + during long running statistics gathering operations.
    +
    +
    getShowProgressStream() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the PrintStream on which the progress messages will be displayed + during long running verify operations.
    +
    +
    getSize() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns the byte size of the data array.
    +
    +
    getSocketAddress() - Method in class com.sleepycat.je.rep.monitor.NewMasterEvent
    +
    +
    Returns the socket address associated with the new master
    +
    +
    getSocketAddress() - Method in interface com.sleepycat.je.rep.ReplicationNode
    +
    +
    The socket address used by other nodes in the replication group to + communicate with this node.
    +
    +
    getSocketConnectTimeout() - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Returns the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node.
    +
    +
    getSortedBigDecimalByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a sorted BigDecimal.
    +
    +
    getSortedBigDecimalMaxByteLength(BigDecimal) - Static method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Returns the maximum byte length that would be output for a given BigDecimal value if TupleOutput.writeSortedBigDecimal(java.math.BigDecimal) were + called.
    +
    +
    getSortedDuplicates() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the database is configured to support records with + duplicate keys.
    +
    +
    getSortedPackedIntByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a sorted packed integer.
    +
    +
    getSortedPackedLongByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a sorted packed long integer.
    +
    +
    getSplitBins() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    The number of BINs encountered by the INCompressor that were split + between the time they were put on the compressor queue and when the + compressor ran.
    +
    +
    getState() - Method in class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    Gets the Arbiter state.
    +
    +
    getState() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The ReplicatedEnvironment.State of the node.
    +
    +
    getState() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Returns the current state of the node associated with this replication + environment.
    +
    +
    getState() - Method in class com.sleepycat.je.rep.StateChangeEvent
    +
    +
    Returns the state that the node has transitioned to.
    +
    +
    getState() - Method in class com.sleepycat.je.Transaction
    +
    +
    Returns the current state of the transaction.
    +
    +
    getStateChangeListener() - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Returns the listener used to receive asynchronous replication node state + change events.
    +
    +
    getStats(StatsConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Returns database statistics.
    +
    +
    getStats(StatsConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Returns the general database environment statistics.
    +
    +
    getStats(StatsConfig) - Method in class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    Gets the Arbiter statistics.
    +
    +
    getStats(StatsConfig) - Method in class com.sleepycat.je.Sequence
    +
    +
    Returns statistical information about the sequence.
    +
    +
    getStats() - Method in class com.sleepycat.persist.evolve.EvolveEvent
    +
    +
    The cumulative statistics gathered during eager evolution.
    +
    +
    getStatsConfig(Object[]) - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Helper for creating a StatsConfig object to use as an operation + parameter.
    +
    +
    getStatus() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns the PreloadStatus value for the preload() operation.
    +
    +
    getStoreName() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the name of this store.
    +
    +
    getStoreName() - Method in class com.sleepycat.persist.raw.RawStore
    +
    +
    Returns the name of this store.
    +
    +
    getStoreNames(Environment) - Static method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns the names of all entity stores in the given environment.
    +
    +
    getStreamHeader() - Static method in class com.sleepycat.bind.serial.SerialOutput
    +
    +
    Returns the fixed stream header used for all serialized streams in + PROTOCOL_VERSION_2 format.
    +
    +
    getStringByteLength() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Returns the byte length of a null-terminated UTF string in the data + buffer, including the terminator.
    +
    +
    getSubclassIndex(PrimaryIndex<PK, E1>, Class<E2>, Class<SK>, String) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Returns a secondary index for a secondary key in an entity subclass, + opening it if necessary.
    +
    +
    getSuper() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns the instance of the superclass, or null if the superclass is + Object or Enum.
    +
    +
    getSuperType() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the type of the superclass, or null if the superclass is Object + or this is not a complex type (in other words, this is a simple type or + an array type).
    +
    +
    getSync() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns true if the transaction is configured to write and synchronously + flush the log it when commits.
    +
    +
    getSyncupProgressListener() - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Return the ProgressListener to be used at this environment startup.
    +
    +
    getSystemLoad() - Method in class com.sleepycat.je.rep.NodeState
    +
    +
    Returns the system load average for the last minute.
    +
    +
    getTemporary() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the temporary database option.
    +
    +
    getTemporary() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the temporary configuration property.
    +
    +
    getterAndSetterMethods - Static variable in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    getThreadName() - Method in class com.sleepycat.je.ExceptionEvent
    +
    +
    Returns the name of the daemon thread that threw the exception.
    +
    +
    getThreadTransaction() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns the transaction associated with this thread if implied + transactions are being used.
    +
    +
    getTimeout(TimeUnit) - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
    +
    Return the timeout specified when creating this consistency policy.
    +
    +
    getTimeout(TimeUnit) - Method in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
    +
    Always returns 0, no timeout is needed for this policy.
    +
    +
    getTimeout(TimeUnit) - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
    +
    Returns the consistency timeout associated with this policy.
    +
    +
    getTimeout(TimeUnit) - Method in interface com.sleepycat.je.ReplicaConsistencyPolicy
    +
    +
    The timeout associated with the consistency policy.
    +
    +
    getTips() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
     
    +
    getTotalLogSize() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    "Total bytes used by data files on disk: activeLogSize + reservedLogSize."
    +
    +
    getTotalTxnMs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total time in milliseconds spent in replicated transactions.
    +
    +
    getTrackerLagConsistencyWaitMs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total time (in msec) for which a Replica held back a + Environment.beginTransaction(Transaction,TransactionConfig) + operation to satisfy the TimeConsistencyPolicy.
    +
    +
    getTrackerLagConsistencyWaits() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of times a Replica held back a + Environment.beginTransaction(Transaction,TransactionConfig) + operation to satisfy the TimeConsistencyPolicy.
    +
    +
    getTrackerVLSNConsistencyWaitMs() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The total time (in msec) for which a Replica held back a + Environment.beginTransaction(Transaction,TransactionConfig) + operation to satisfy the CommitPointConsistencyPolicy.
    +
    +
    getTrackerVLSNConsistencyWaits() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    The number of times a Replica held back a + Environment.beginTransaction(Transaction,TransactionConfig) + operation to satisfy the CommitPointConsistencyPolicy.
    +
    +
    getTransaction() - Method in class com.sleepycat.collections.CurrentTransaction
    +
    +
    Returns the transaction associated with the current thread for this + environment, or null if no transaction is active.
    +
    +
    getTransaction() - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    getTransactional() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns true if the database open is enclosed within a transaction.
    +
    +
    getTransactional() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Returns true if the database environment is configured for transactions.
    +
    +
    getTransactional() - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Returns the transactional configuration property.
    +
    +
    getTransactionConfig() - Method in class com.sleepycat.collections.TransactionRunner
    +
    + +
    +
    getTransactionStats(StatsConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Returns the database environment's transactional statistics.
    +
    +
    getTransactionTimeout() - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    getTruncationFileNumber() - Method in exception com.sleepycat.je.rep.RollbackProhibitedException
    +
     
    +
    getTruncationFileOffset() - Method in exception com.sleepycat.je.rep.RollbackProhibitedException
    +
    +
    The JE log must be truncated to this offset in the specified + file in order for this node to rejoin the group.
    +
    +
    getTTL() - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Returns the Time-To-Live property for a 'put' operation.
    +
    +
    getTTLUnit() - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Returns the Time-To-Live time unit for a 'put' operation.
    +
    +
    getTupleBufferSize() - Method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Returns the initial byte size of the output buffer.
    +
    +
    getTupleOutput(BigDecimal) - Method in class com.sleepycat.bind.tuple.BigDecimalBinding
    +
     
    +
    getTupleOutput(BigInteger) - Method in class com.sleepycat.bind.tuple.BigIntegerBinding
    +
     
    +
    getTupleOutput(Boolean) - Method in class com.sleepycat.bind.tuple.BooleanBinding
    +
     
    +
    getTupleOutput(Byte) - Method in class com.sleepycat.bind.tuple.ByteBinding
    +
     
    +
    getTupleOutput(Character) - Method in class com.sleepycat.bind.tuple.CharacterBinding
    +
     
    +
    getTupleOutput(Double) - Method in class com.sleepycat.bind.tuple.DoubleBinding
    +
     
    +
    getTupleOutput(Float) - Method in class com.sleepycat.bind.tuple.FloatBinding
    +
     
    +
    getTupleOutput(Integer) - Method in class com.sleepycat.bind.tuple.IntegerBinding
    +
     
    +
    getTupleOutput(Long) - Method in class com.sleepycat.bind.tuple.LongBinding
    +
     
    +
    getTupleOutput(Integer) - Method in class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
     
    +
    getTupleOutput(Long) - Method in class com.sleepycat.bind.tuple.PackedLongBinding
    +
     
    +
    getTupleOutput(Short) - Method in class com.sleepycat.bind.tuple.ShortBinding
    +
     
    +
    getTupleOutput(BigDecimal) - Method in class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
     
    +
    getTupleOutput(Double) - Method in class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
     
    +
    getTupleOutput(Float) - Method in class com.sleepycat.bind.tuple.SortedFloatBinding
    +
     
    +
    getTupleOutput(Integer) - Method in class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
     
    +
    getTupleOutput(Long) - Method in class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
     
    +
    getTupleOutput(String) - Method in class com.sleepycat.bind.tuple.StringBinding
    +
     
    +
    getTupleOutput(E) - Method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Returns an empty TupleOutput instance that will be used by the tuple + binding or key creator.
    +
    +
    getTxnNoSync() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Deprecated. + +
    +
    +
    getTxnSerializableIsolation() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for getting + EnvironmentConfig.TXN_SERIALIZABLE_ISOLATION.
    +
    +
    getTxnTimeout(TimeUnit) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for getting EnvironmentConfig.TXN_TIMEOUT.
    +
    +
    getTxnTimeout() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. + +
    +
    +
    getTxnTimeout(TimeUnit) - Method in class com.sleepycat.je.Transaction
    +
    +
    Returns the timeout value for the transaction lifetime.
    +
    +
    getTxnWriteNoSync() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Deprecated. + +
    +
    +
    getType() - Method in interface com.sleepycat.je.rep.ReplicationNode
    +
    +
    Returns the type associated with the node.
    +
    +
    getType() - Method in interface com.sleepycat.persist.raw.RawField
    +
    +
    Returns the type of the field, without expanding parameterized types, + or null if the type is an interface type or the Object class.
    +
    +
    getType() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns the raw type information for this raw object.
    +
    +
    getUnknownStateTimeout(TimeUnit) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Returns the Unknown state timeout.
    +
    +
    getUpdateTTL() - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Returns the update-TTL property for a 'put' operation.
    +
    +
    getUseExistingConfig() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Return the value of the useExistingConfig property.
    +
    +
    getValue() - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Returns the value of this entry.
    +
    +
    getValue() - Method in class com.sleepycat.je.SequenceStats
    +
    +
    Returns the current cached value of the sequence.
    +
    +
    getValues() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns a map of field name to value for a complex type, or null for an + array type or an enum type.
    +
    +
    getVerbose() - Method in class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Gets verbose mode.
    +
    +
    getVerifyDataRecords() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the verifyDataRecords value.
    +
    +
    getVerifySecondaries() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the verifySecondaries value.
    +
    +
    getVersion() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns the version of this persistent class.
    +
    +
    getVersion() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns the class version for this type.
    +
    +
    getVersionString() - Method in class com.sleepycat.je.JEVersion
    +
    +
    Release version, suitable for display.
    +
    +
    getVLSN() - Method in class com.sleepycat.je.CommitToken
    +
     
    +
    getVLSN() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The highest commit VLSN that has been + acknowledged.
    +
    +
    getVLSNRate() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    A moving average of the rate replication data is being generated by the + master, in VLSNs per minute, or 0 if not known or this node is not the + master.
    +
    +
    getWaiterTxnIds() - Method in exception com.sleepycat.je.LockConflictException
    +
    +
    Returns an array of longs containing transaction ids of waiters at the + the time of the timeout.
    +
    +
    getWrap() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns true if the sequence will wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
    +
    +
    getWriteIntLength(int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be written by PackedInteger.writeInt(byte[], int, int).
    +
    +
    getWriteLongLength(long) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be written by PackedInteger.writeLong(byte[], int, long).
    +
    +
    getWriteNoSync() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Deprecated. + +
    +
    +
    getWrites() - Method in class com.sleepycat.je.rep.arbiter.ArbiterStats
    +
    +
    The number of file writes.
    +
    +
    getWriteSortedIntLength(int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be written by PackedInteger.writeSortedInt(byte[], int, int).
    +
    +
    getWriteSortedLongLength(long) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Returns the number of bytes that would be written by PackedInteger.writeSortedLong(byte[], int, long).
    +
    +
    getZeroTerminatedByteLength(byte[], int) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Returns the byte length of a null terminated UTF string, not including + the terminator.
    +
    +
    GROUP_NAME - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The name for the replication group.
    +
    +
    GroupChangeEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    The event generated when the group composition changes.
    +
    +
    GroupChangeEvent.GroupChangeType - Enum in com.sleepycat.je.rep.monitor
    +
    +
    The kind of GroupChangeEvent.
    +
    +
    GroupShutdownException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown when an attempt is made to access an environment that was + shutdown by the Master as a result of a call to + ReplicatedEnvironment.shutdownGroup(long, TimeUnit).
    +
    +
    + + + +

    H

    +
    +
    HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    By default, if a checksum exception is found at the end of the log + during Environment startup, JE will assume the checksum is due to + previously interrupted I/O and will quietly truncate the log and + restart.
    +
    +
    handleException(Exception, int, int) - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Handles exceptions that occur during a transaction, and may implement + transaction retry policy.
    +
    +
    hashCode() - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Computes a hash code as specified by Map.Entry.hashCode().
    +
    +
    hashCode() - Method in class com.sleepycat.collections.StoredCollection
    +
     
    +
    hashCode() - Method in class com.sleepycat.collections.StoredMap
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.CommitToken
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns a hash code based on the data value.
    +
    +
    hashCode() - Method in class com.sleepycat.je.Durability
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.JEVersion
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
     
    +
    hashCode() - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.evolve.Converter
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.evolve.EntityConverter
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.evolve.Mutation
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.evolve.Mutations
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.evolve.Renamer
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.model.EntityMetadata
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.model.FieldMetadata
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.model.PrimaryKeyMetadata
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
     
    +
    hashCode() - Method in class com.sleepycat.persist.raw.RawObject
    +
     
    +
    hasNext() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns true if this iterator has more elements when traversing in the + forward direction.
    +
    +
    hasPrevious() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns true if this iterator has more elements when traversing in the + reverse direction.
    +
    +
    headMap(K) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted set whose keys are + strictly less than toKey.
    +
    +
    headMap(K, boolean) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted map whose elements are + strictly less than toKey, optionally including toKey.
    +
    +
    headSet(Map.Entry<K, V>) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry.
    +
    +
    headSet(Map.Entry<K, V>, boolean) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toMapEntry, optionally including toMapEntry.
    +
    +
    headSet(K) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toKey.
    +
    +
    headSet(K, boolean) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toKey, optionally including toKey.
    +
    +
    headSet(E) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toValue.
    +
    +
    headSet(E, boolean) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly less than toValue, optionally including toValue.
    +
    +
    HELPER_HOSTS - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Deprecated. + +
    +
    +
    HELPER_HOSTS - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    The string identifying one or more helper host and port pairs in + this format:
    +
    +
    + + + +

    I

    +
    +
    ignoreMethods - Static variable in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    IncompatibleClassException - Exception in com.sleepycat.persist.evolve
    +
    +
    A class has been changed incompatibly and no mutation has been configured to + handle the change or a new class version number has not been assigned.
    +
    +
    IncompatibleClassException(String) - Constructor for exception com.sleepycat.persist.evolve.IncompatibleClassException
    +
     
    +
    indexKeyBinding - Variable in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
     
    +
    IndexNotAvailableException - Exception in com.sleepycat.persist
    +
    +
    Thrown by the getPrimaryIndex, getSecondaryIndex and getSubclassIndex when an index has not yet + been created.
    +
    +
    initClassFields() - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    initClassFields() - Method in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    initClassFields() - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    initialize(ClassLoader) - Method in interface com.sleepycat.je.DatabaseComparator
    +
    +
    Called to initialize a comparator object after it is instantiated or + deserialized, and before it is used.
    +
    +
    initialize(EntityModel) - Method in interface com.sleepycat.persist.evolve.Conversion
    +
    +
    Initializes the conversion, allowing it to obtain raw type information + from the entity model.
    +
    +
    initializeProxy(T) - Method in interface com.sleepycat.persist.model.PersistentProxy
    +
    +
    Copies the state of a given proxied class instance to this proxy + instance.
    +
    +
    inputToEntry(TupleInput, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Utility method to set the data in a entry buffer to the data in a tuple + input object.
    +
    +
    INSUFFICIENT_REPLICAS_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The amount of time that a + Environment.beginTransaction(com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig) + on the Master will wait for a sufficient number of electable Replicas, + as determined by the default Durability policy, to contact + the Master.
    +
    +
    InsufficientAcksException - Exception in com.sleepycat.je.rep
    +
    +
    + This exception is thrown at the time of a commit in a Master, if the Master + could not obtain transaction commit acknowledgments from its Replicas in + accordance with the Durability.ReplicaAckPolicy currently in effect and within + the requested timeout interval.
    +
    +
    InsufficientLogException - Exception in com.sleepycat.je.rep
    +
    +
    This exception indicates that the log files constituting the Environment are + insufficient and cannot be used as the basis for continuing with the + replication stream provided by the current master.
    +
    +
    InsufficientReplicasException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown by Environment.beginTransaction(com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig) and Transaction.commit() when these operations are initiated at a Master which is + not in contact with a quorum of Replicas as determined by the Durability.ReplicaAckPolicy that is in effect for the operation.
    +
    +
    InsufficientReplicasException(Locker, Durability.ReplicaAckPolicy, int, Set<String>) - Constructor for exception com.sleepycat.je.rep.InsufficientReplicasException
    +
    +
    Creates a Commit exception.
    +
    +
    IntegerBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Integer primitive + wrapper or an int primitive.
    +
    +
    IntegerBinding() - Constructor for class com.sleepycat.bind.tuple.IntegerBinding
    +
     
    +
    intToEntry(int, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.IntegerBinding
    +
    +
    Converts a simple int value into an entry buffer.
    +
    +
    intToEntry(int, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
    +
    Converts a simple int value into an entry buffer, using + PackedInteger format.
    +
    +
    intToEntry(int, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
    +
    Converts a simple int value into an entry buffer, using + SortedPackedInteger format.
    +
    +
    invoke(String, Object[], String[]) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    invoke(Environment, String, Object[], String[]) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Invoke an operation for the given environment.
    +
    +
    invoke(String, Object[], String[]) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    IOExceptionWrapper - Exception in com.sleepycat.util
    +
    +
    An IOException that can contain nested exceptions.
    +
    +
    IOExceptionWrapper(Throwable) - Constructor for exception com.sleepycat.util.IOExceptionWrapper
    +
     
    +
    isActive() - Method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
     
    +
    isArbiter() - Method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns whether this is the NodeType.ARBITER type.
    +
    +
    isArray() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns whether this is an array type.
    +
    +
    isClosed() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns whether the environment has been closed by the application.
    +
    +
    isCorrupted() - Method in exception com.sleepycat.je.EnvironmentFailureException
    +
    +
    Whether the EnvironmentFailureException indicates that the log is + corrupt, meaning that a network restore (or restore from backup) should + be performed.
    +
    +
    isDataNode() - Method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns whether this type represents a data node, either NodeType.ELECTABLE or NodeType.SECONDARY.
    +
    +
    isDeleted() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns whether this type has been deleted using a class Deleter + mutation.
    +
    +
    isDetached() - Method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
     
    +
    isElectable() - Method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns whether this is the NodeType.ELECTABLE type.
    +
    +
    isEmpty() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns true if this map or collection contains no mappings or elements.
    +
    +
    isEmpty() - Method in class com.sleepycat.persist.evolve.Mutations
    +
    +
    Returns true if no mutations are present.
    +
    +
    isEntityClass() - Method in class com.sleepycat.persist.model.ClassMetadata
    +
    +
    Returns whether this class is an entity class.
    +
    +
    isEnum() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns whether this is an enum type.
    +
    +
    isInternalHandle() - Method in class com.sleepycat.je.Environment
    +
     
    +
    isMaster() - Method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
     
    +
    isMonitor() - Method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns whether this is the NodeType.MONITOR type.
    +
    +
    isOpen() - Method in class com.sleepycat.je.util.LogVerificationReadableByteChannel
    +
    isOpen() - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Returns whether the model is associated with an open store.
    +
    +
    isOrdered() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether keys are ordered in this container.
    +
    +
    isPrimitive() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns whether this type is a Java primitive: char, byte, short, int, + long, float or double.
    +
    +
    isReadModifyWrite() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns whether write-locks will be obtained when reading with this + cursor.
    +
    +
    isReplica() - Method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
     
    +
    isSameRM(XAResource) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    isSecondary() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether this container is a view on a secondary database rather + than directly on a primary database.
    +
    +
    isSecondary() - Method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns whether this is the NodeType.SECONDARY type.
    +
    +
    isSimple() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns whether this is a + simple type: + primitive, primitive wrapper, BigInteger, BigDecimal, String or Date.
    +
    +
    isTransactional() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns whether the databases underlying this container are + transactional.
    +
    +
    isUnknown() - Method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
     
    +
    isUpdate() - Method in class com.sleepycat.je.OperationResult
    +
    +
    Returns whether the operation was an update, for distinguishing inserts + and updates performed by a Put.OVERWRITE operation.
    +
    +
    isValid() - Method in class com.sleepycat.je.Environment
    +
    +
    Returns whether this Environment is open, valid and can be used.
    +
    +
    isValid() - Method in class com.sleepycat.je.Transaction
    +
    +
    Returns whether this Transaction is open, which is equivalent + to when Transaction.getState() returns Transaction.State.OPEN.
    +
    +
    isWriteAllowed() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns true if this is a read-write container or false if this is a + read-only container.
    +
    +
    iterator() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns an iterator over the elements in this collection.
    +
    +
    iterator(boolean) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Deprecated. +
    Please use StoredCollection.storedIterator() or StoredCollection.storedIterator(boolean) instead. Because the iterator returned must + be closed, the method name iterator is confusing since standard + Java iterators do not need to be closed.
    +
    +
    +
    iterator(Iterator<E>) - Static method in class com.sleepycat.collections.StoredCollections
    +
    +
    Clones an iterator preserving its current position.
    +
    +
    iterator() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
    +
    +
    iterator(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
    +
    +
    iterator() - Method in interface com.sleepycat.persist.ForwardCursor
    +
    +
    Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
    +
    +
    iterator(LockMode) - Method in interface com.sleepycat.persist.ForwardCursor
    +
    +
    Returns an iterator over the key range, starting with the value + following the current position or at the first value if the cursor is + uninitialized.
    +
    +
    + + + +

    J

    +
    +
    JEConnection - Class in com.sleepycat.je.jca.ra
    +
    +
    A JEConnection provides access to JE services.
    +
    +
    JEConnection(JEManagedConnection) - Constructor for class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    JEConnectionFactory - Interface in com.sleepycat.je.jca.ra
    +
    +
    An application may obtain a JEConnection in this manner:
    +
    +
    JEDiagnostics - Class in com.sleepycat.je.jmx
    +
    +
    + JEDiagnostics is a debugging mbean for a non replicated JE Environment.
    +
    +
    JEDiagnostics(Environment) - Constructor for class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    JEDiagnostics() - Constructor for class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    JEException - Exception in com.sleepycat.je.jca.ra
    +
     
    +
    JEException(String) - Constructor for exception com.sleepycat.je.jca.ra.JEException
    +
     
    +
    JEMBean - Class in com.sleepycat.je.jmx
    +
     
    +
    JEMBean(Environment) - Constructor for class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    JEMBean() - Constructor for class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    JEMBeanHelper - Class in com.sleepycat.je.jmx
    +
    +
    Deprecated. +
    As of JE 4, JEMBeanHelper is deprecated in favor of the concrete + MBeans available by default with a JE environment. These MBeans can be + registered and enabled by the environment by setting the following JVM + property: + JEMonitor: + This MBean provides general stats monitoring and access to basic + environment level operations. + + JEMBeanHelper is a utility class for the MBean implementation which wants to + add management of a JE environment to its capabilities. MBean + implementations can contain a JEMBeanHelper instance to get MBean metadata + for JE and to set attributes, get attributes, and invoke operations. +

    + com.sleepycat.je.jmx.JEMonitor and the example program + jmx.JEApplicationMBean are two MBean implementations which provide support + different application use cases. See those classes for examples of how to + use JEMBeanHelper.

    +
    +
    +
    JEMBeanHelper(File, boolean) - Constructor for class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Instantiate a helper, specifying environment home and open capabilities.
    +
    +
    JEMonitor - Class in com.sleepycat.je.jmx
    +
    +
    + JEMonitor is a JMX MBean which makes statistics and basic administrative + operations available.
    +
    +
    JEMonitor(Environment) - Constructor for class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    JEMonitor() - Constructor for class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    jeName - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    JEVersion - Class in com.sleepycat.je
    +
    +
    Berkeley DB Java Edition version information.
    +
    +
    JEVersion(String) - Constructor for class com.sleepycat.je.JEVersion
    +
     
    +
    join(StoredContainer[], Object[], JoinConfig) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns an iterator representing an equality join of the indices and + index key values specified.
    +
    +
    join(Cursor[], JoinConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Creates a specialized join cursor for use in performing equality or + natural joins on secondary indices.
    +
    +
    join(Cursor[], JoinConfig) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed on a secondary database.
    +
    +
    JoinConfig - Class in com.sleepycat.je
    +
    +
    The configuration properties of a JoinCursor.
    +
    +
    JoinConfig() - Constructor for class com.sleepycat.je.JoinConfig
    +
    +
    Creates an instance with the system's default settings.
    +
    +
    JoinCursor - Class in com.sleepycat.je
    +
    +
    A specialized join cursor for use in performing equality or natural joins on + secondary indices.
    +
    +
    JoinGroupEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    The event generated when a node joins the group.
    +
    +
    + + + +

    K

    +
    +
    KeyField - Annotation Type in com.sleepycat.persist.model
    +
    +
    Indicates the sorting position of a key field in a composite key class when + the Comparable interface is not implemented.
    +
    +
    keys() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing all keys in this index.
    +
    +
    keys(Transaction, CursorConfig) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing all keys in this index.
    +
    +
    keys(K, boolean, K, boolean) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing keys in a key range.
    +
    +
    keys(Transaction, K, boolean, K, boolean, CursorConfig) - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Opens a cursor for traversing keys in a key range.
    +
    +
    keys() - Method in class com.sleepycat.persist.EntityJoin
    +
    +
    Opens a cursor that returns the primary keys of entities qualifying for + the join.
    +
    +
    keys(Transaction, CursorConfig) - Method in class com.sleepycat.persist.EntityJoin
    +
    +
    Opens a cursor that returns the primary keys of entities qualifying for + the join.
    +
    +
    keySet() - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns a set view of the keys contained in this map.
    +
    +
    keysIndex() - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns a read-only keys index that maps secondary key to primary key.
    +
    +
    + + + +

    L

    +
    +
    last() - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns the last (highest) element currently in this sorted set.
    +
    +
    last() - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns the last (highest) element currently in this sorted set.
    +
    +
    last() - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns the last (highest) element currently in this sorted set.
    +
    +
    last() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the last value and returns it, or returns null if + the cursor range is empty.
    +
    +
    last(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the last value and returns it, or returns null if + the cursor range is empty.
    +
    +
    lastKey() - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns the last (highest) element currently in this sorted map.
    +
    +
    LeaveGroupEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    The event generated when a node leaves the group.
    +
    +
    LeaveGroupEvent.LeaveReason - Enum in com.sleepycat.je.rep.monitor
    +
    +
    The reason for why the node leaves the group.
    +
    +
    len - Variable in class com.sleepycat.util.FastInputStream
    +
     
    +
    load() - Method in class com.sleepycat.je.util.DbLoad
    +
     
    +
    LOCK_DEADLOCK_DETECT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether to perform deadlock detection when a lock conflict occurs.
    +
    +
    LOCK_DEADLOCK_DETECT_DELAY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The delay after a lock conflict, before performing deadlock detection.
    +
    +
    LOCK_N_LOCK_TABLES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Number of Lock Tables.
    +
    +
    LOCK_OLD_LOCK_EXCEPTIONS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    since JE 6.5; has no effect, as if it were set to false.
    +
    +
    +
    LOCK_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the default lock timeout.
    +
    +
    LockConflictException - Exception in com.sleepycat.je
    +
    +
    The common base class for all exceptions that result from record lock + conflicts during read and write operations.
    +
    +
    LockMode - Enum in com.sleepycat.je
    +
    +
    Record lock modes for read operations.
    +
    +
    LockNotAvailableException - Exception in com.sleepycat.je
    +
    +
    Thrown when a non-blocking operation fails to get a lock.
    +
    +
    LockNotGrantedException - Exception in com.sleepycat.je
    +
    +
    Deprecated. + +
    +
    +
    LockPreemptedException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown when a lock has been "stolen", or preempted, from a transaction in a + replicated environment.
    +
    +
    LockStats - Class in com.sleepycat.je
    +
    +
    Deprecated. +
    as of 4.0.10, replaced by Environment.getStats(StatsConfig).

    +
    +
    +
    LockTimeoutException - Exception in com.sleepycat.je
    +
    +
    Thrown when multiple threads are competing for a lock and the lock timeout + interval is exceeded for the current operation.
    +
    +
    LOG_BUFFER_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum starting size of a JE log buffer.
    +
    +
    LOG_CHECKSUM_READ - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, perform a checksum check when reading entries from log.
    +
    +
    LOG_CHUNKED_NIO - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    NIO is no longer used by JE and this parameter has no + effect.
    +
    +
    +
    LOG_DETECT_FILE_DELETE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, periodically detect unexpected file deletions.
    +
    +
    LOG_DETECT_FILE_DELETE_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The interval used to check for unexpected file deletions.
    +
    +
    LOG_DIRECT_NIO - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    NIO is no longer used by JE and this parameter has no + effect.
    +
    +
    +
    LOG_FAULT_READ_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The buffer size for faulting in objects from disk, in bytes.
    +
    +
    LOG_FILE_CACHE_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The size of the file handle cache.
    +
    +
    LOG_FILE_MAX - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum size of each individual JE log file, in bytes.
    +
    +
    LOG_FLUSH_NO_SYNC_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum time interval between committing a transaction with + NO_SYNC durability, and + making the transaction durable with respect to the file system.
    +
    +
    LOG_FLUSH_SYNC_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum time interval between committing a transaction with + NO_SYNC or WRITE_NO_SYNC durability, + and making the transaction durable with respect to the storage device.
    +
    +
    LOG_FLUSH_TASK_INTERVAL - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Deprecated. +
    as of 7.2. Replaced by EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL. For compatibility with + earlier releases, if this parameter is specified its value will be used + as the flush sync interval; in this case, EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL may not also be specified.
    +
    +
    +
    LOG_FSYNC_TIME_LIMIT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If the time taken by an fsync exceeds this limit, a WARNING level + message is logged.
    +
    +
    LOG_FSYNC_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The timeout limit for group file sync, in microseconds.
    +
    +
    LOG_GROUP_COMMIT_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The time interval in nanoseconds during which transactions may be + grouped to amortize the cost of write and/or fsync when a transaction + commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC on the local + machine.
    +
    +
    LOG_GROUP_COMMIT_THRESHOLD - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The threshold value impacts the number of transactions that may be + grouped to amortize the cost of write and/or fsync when a + transaction commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC + on the local machine.
    +
    +
    LOG_ITERATOR_MAX_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum read buffer size for log iterators, which are used when + scanning the log during activities like log cleaning and environment + open, in bytes.
    +
    +
    LOG_ITERATOR_READ_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The read buffer size for log iterators, which are used when scanning the + log during activities like log cleaning and environment open, in bytes.
    +
    +
    LOG_MEM_ONLY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, operates in an in-memory test mode without flushing the log to + disk.
    +
    +
    LOG_N_DATA_DIRECTORIES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of 7.3. This feature is not known to provide benefits + beyond that of a simple RAID configuration, and will be removed in the + next release, which is slated for mid-April, 2017.
    +
    +
    +
    LOG_NUM_BUFFERS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of JE log buffers.
    +
    +
    LOG_TOTAL_BUFFER_BYTES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The total memory taken by log buffers, in bytes.
    +
    +
    LOG_USE_NIO - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    NIO is no longer used by JE and this parameter has no + effect.
    +
    +
    +
    LOG_USE_ODSYNC - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true (default is false) O_DSYNC is used to open JE log files.
    +
    +
    LOG_USE_WRITE_QUEUE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true (default is true) the Write Queue is used for file I/O + operations which are blocked by concurrent I/O operations.
    +
    +
    LOG_VERIFY_CHECKSUMS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, perform a checksum verification just before and after writing + to the log.
    +
    +
    LOG_WRITE_QUEUE_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The size of the Write Queue.
    +
    +
    LogOverwriteException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown when one or more log files are modified (overwritten) as the result + of a replication operation.
    +
    +
    LogVerificationException - Exception in com.sleepycat.je.util
    +
    +
    Thrown during log verification if a checksum cannot be verified or a log + entry is determined to be invalid by examining its contents.
    +
    +
    LogVerificationException(String) - Constructor for exception com.sleepycat.je.util.LogVerificationException
    +
     
    +
    LogVerificationException(String, Throwable) - Constructor for exception com.sleepycat.je.util.LogVerificationException
    +
     
    +
    LogVerificationInputStream - Class in com.sleepycat.je.util
    +
    +
    Verifies the checksums in an InputStream for a log file in a JE + Environment.
    +
    +
    LogVerificationInputStream(Environment, InputStream, String) - Constructor for class com.sleepycat.je.util.LogVerificationInputStream
    +
    +
    Creates a verification input stream.
    +
    +
    LogVerificationReadableByteChannel - Class in com.sleepycat.je.util
    +
    +
    Verifies the checksums in a ReadableByteChannel for a log file in a + JE Environment.
    +
    +
    LogVerificationReadableByteChannel(Environment, ReadableByteChannel, String) - Constructor for class com.sleepycat.je.util.LogVerificationReadableByteChannel
    +
    +
    Creates a verification input stream.
    +
    +
    LogWriteException - Exception in com.sleepycat.je
    +
    +
    Thrown when an IOException or other failure occurs when writing to + the JE log.
    +
    +
    LongBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Long primitive + wrapper or a long primitive.
    +
    +
    LongBinding() - Constructor for class com.sleepycat.bind.tuple.LongBinding
    +
     
    +
    longToEntry(long, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.LongBinding
    +
    +
    Converts a simple long value into an entry buffer.
    +
    +
    longToEntry(long, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.PackedLongBinding
    +
    +
    Converts a simple Long value into an entry buffer, using + PackedLong format.
    +
    +
    longToEntry(long, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
    +
    Converts a simple Long value into an entry buffer, using + SortedPackedLong format.
    +
    +
    + + + +

    M

    +
    +
    main(String[]) - Static method in class com.sleepycat.je.rep.util.DbEnableReplication
    +
    +
    Usage:
    +
    +
    main(String...) - Static method in class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Usage:
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.rep.util.DbPing
    +
    +
    Usage:
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.rep.util.DbResetRepGroup
    +
    +
    Usage:
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbCacheSize
    +
    +
    Runs DbCacheSize as a command line utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbDeleteReservedFiles
    +
     
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbDump
    +
    +
    The main used by the DbDump utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbFilterStats
    +
    +
    The main used by the DbFilterStats utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbLoad
    +
    +
    The main used by the DbLoad utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbPrintLog
    +
    +
    The main used by the DbPrintLog utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbSpace
    +
     
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbStat
    +
     
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbTruncateLog
    +
    +
    Usage:
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbVerify
    +
    +
    The main used by the DbVerify utility.
    +
    +
    main(String[]) - Static method in class com.sleepycat.je.util.DbVerifyLog
    +
     
    +
    main(String[]) - Static method in class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Enhances classes in the directories specified.
    +
    +
    makeSpace(int) - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Ensure that at least the given number of bytes are available in the + internal buffer.
    +
    +
    map() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Returns a standard Java map based on this entity index.
    +
    +
    map() - Method in class com.sleepycat.persist.PrimaryIndex
    +
     
    +
    map() - Method in class com.sleepycat.persist.SecondaryIndex
    +
     
    +
    MapEntryParameter<K,V> - Class in com.sleepycat.collections
    +
    +
    A simple Map.Entry implementation that can be used as in + input parameter.
    +
    +
    MapEntryParameter(K, V) - Constructor for class com.sleepycat.collections.MapEntryParameter
    +
    +
    Creates a map entry with a given key and value.
    +
    +
    mark - Variable in class com.sleepycat.util.FastInputStream
    +
     
    +
    mark(int) - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    markSupported() - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    marshalEntry(TupleOutput) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleEntry
    +
    +
    Construct the key or data tuple entry from the key or data object.
    +
    +
    MarshalledTupleEntry - Interface in com.sleepycat.bind.tuple
    +
    +
    A marshalling interface implemented by key, data or entity classes that + are represented as tuples.
    +
    +
    MarshalledTupleKeyEntity - Interface in com.sleepycat.bind.tuple
    +
    +
    A marshalling interface implemented by entity classes that represent keys as + tuples.
    +
    +
    marshalPrimaryKey(TupleOutput) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
    +
    +
    Extracts the entity's primary key and writes it to the key output.
    +
    +
    marshalSecondaryKey(String, TupleOutput) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
    +
    +
    Extracts the entity's secondary key and writes it to the key output.
    +
    +
    MasterReplicaTransitionException - Exception in com.sleepycat.je.rep
    +
    +
    Deprecated. +
    as of JE 5.0.88 because the environment no longer needs to + restart when transitioning from master to replica.
    +
    +
    +
    MasterReplicaTransitionException(EnvironmentImpl, Exception) - Constructor for exception com.sleepycat.je.rep.MasterReplicaTransitionException
    +
    +
    Deprecated.
    +
    MasterStateException - Exception in com.sleepycat.je.rep
    +
    +
    This exception indicates that the application attempted an operation that is + not permitted when it is in the ReplicatedEnvironment.State.MASTER + state.
    +
    +
    MasterTransferFailureException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown by ReplicatedEnvironment.transferMaster(java.util.Set<java.lang.String>, int, java.util.concurrent.TimeUnit) if a Master Transfer + operation cannot be completed within the allotted time.
    +
    +
    MAX_CLOCK_DELTA - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the maximum acceptable clock skew between this Replica and its + Feeder, which is the node that is the source of its replication stream.
    +
    +
    MAX_DISK - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    An upper limit on the number of bytes used for data storage.
    +
    +
    MAX_LENGTH - Static variable in class com.sleepycat.util.PackedInteger
    +
    +
    The maximum number of bytes needed to store an int value (5).
    +
    +
    MAX_LONG_LENGTH - Static variable in class com.sleepycat.util.PackedInteger
    +
    +
    The maximum number of bytes needed to store a long value (9).
    +
    +
    MAX_MEMORY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the JE main cache size in bytes.
    +
    +
    MAX_MEMORY_PERCENT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the JE main cache size as a percentage of the JVM maximum + memory.
    +
    +
    MAX_MESSAGE_SIZE - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The maximum message size which will be accepted by a node (to prevent + DOS attacks).
    +
    +
    MAX_OFF_HEAP_MEMORY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the number of bytes to be used as a secondary, off-heap cache.
    +
    +
    MemberChangeEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    MemberChangeEvent is the base class for all member status changed events.
    +
    +
    MemberNotFoundException - Exception in com.sleepycat.je.rep
    +
    +
    Thrown when an operation requires a replication group member and that member + is not present in the replication group.
    +
    +
    minAckNodes(int) - Method in enum com.sleepycat.je.Durability.ReplicaAckPolicy
    +
    +
    Returns the minimum number of ELECTABLE replicas required to + implement the ReplicaAckPolicy for a given replication group size.
    +
    +
    Monitor - Class in com.sleepycat.je.rep.monitor
    +
    +
    Provides a lightweight mechanism to track the current master node and the + members of the replication group.
    +
    +
    Monitor(ReplicationConfig) - Constructor for class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Deprecated. +
    As of JE 5, replaced by + Monitor.Monitor(MonitorConfig)
    +
    +
    +
    Monitor(MonitorConfig) - Constructor for class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Creates a monitor instance.
    +
    +
    MonitorChangeEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    MonitorChangeEvent is the base class for all Monitor events.
    +
    +
    MonitorChangeListener - Interface in com.sleepycat.je.rep.monitor
    +
    +
    Applications can register for Monitor event notification through + Monitor.startListener(com.sleepycat.je.rep.monitor.MonitorChangeListener).
    +
    +
    MonitorConfig - Class in com.sleepycat.je.rep.monitor
    +
    +
    Specifies the attributes used by a replication Monitor.
    +
    +
    MonitorConfig() - Constructor for class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    An instance created using the default constructor is initialized with + the default settings.
    +
    +
    MonitorConfigBeanInfo - Class in com.sleepycat.je.rep.monitor
    +
     
    +
    MonitorConfigBeanInfo() - Constructor for class com.sleepycat.je.rep.monitor.MonitorConfigBeanInfo
    +
     
    +
    Mutation - Class in com.sleepycat.persist.evolve
    +
    +
    The base class for all mutations.
    +
    +
    Mutations - Class in com.sleepycat.persist.evolve
    +
    +
    A collection of mutations for configuring class evolution.
    +
    +
    Mutations() - Constructor for class com.sleepycat.persist.evolve.Mutations
    +
    +
    Creates an empty set of mutations.
    +
    +
    + + + +

    N

    +
    +
    NAME - Static variable in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
    +
    The name:"CommitPointConsistencyPolicy" associated with this policy.
    +
    +
    NAME - Static variable in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
    +
    The name:"NoConsistencyRequiredPolicy" associated with this policy.
    +
    +
    NAME - Static variable in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
    +
    The name:"TimeConsistencyPolicy" associated with this policy.
    +
    +
    NetworkRestore - Class in com.sleepycat.je.rep
    +
    +
    Obtains log files for a Replica from other members of the replication + group.
    +
    +
    NetworkRestore() - Constructor for class com.sleepycat.je.rep.NetworkRestore
    +
    +
    Creates an instance of NetworkRestore suitable for restoring the logs at + this node.
    +
    +
    NetworkRestoreConfig - Class in com.sleepycat.je.rep
    +
    +
    NetworkRestoreConfig defines the configuration parameters used to configure + a NetworkRestore operation.
    +
    +
    NetworkRestoreConfig() - Constructor for class com.sleepycat.je.rep.NetworkRestoreConfig
    +
     
    +
    newMap(Database, Class<K>, Class<V>, boolean) - Method in class com.sleepycat.collections.TupleSerialFactory
    +
    +
    Creates a map from a previously opened Database object.
    +
    +
    NewMasterEvent - Class in com.sleepycat.je.rep.monitor
    +
    +
    The event generated upon detecting a new Master.
    +
    +
    newOutput() - Static method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Deprecated. + +
    +
    +
    newOutput(byte[]) - Static method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Deprecated. + +
    +
    +
    newSortedMap(Database, Class<K>, Class<V>, boolean) - Method in class com.sleepycat.collections.TupleSerialFactory
    +
    +
    Creates a sorted map from a previously opened Database object.
    +
    +
    next() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the next element in the iteration.
    +
    +
    next() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
    +
    +
    next(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
    +
    +
    next() - Method in interface com.sleepycat.persist.ForwardCursor
    +
    +
    Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
    +
    +
    next(LockMode) - Method in interface com.sleepycat.persist.ForwardCursor
    +
    +
    Moves the cursor to the next value and returns it, or returns null + if there are no more values in the cursor range.
    +
    +
    nextDup() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position.
    +
    +
    nextDup(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value with the same key (duplicate) and + returns it, or returns null if no more values are present for the key at + the current position.
    +
    +
    nextIndex() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the index of the element that would be returned by a subsequent + call to next.
    +
    +
    nextNoDup() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range.
    +
    +
    nextNoDup(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the next value with a different key and returns it, + or returns null if there are no more unique keys in the cursor range.
    +
    +
    NO_CONSISTENCY - Static variable in class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
    +
    Convenience instance.
    +
    +
    NoConsistencyRequiredPolicy - Class in com.sleepycat.je.rep
    +
    +
    A consistency policy that lets a transaction on a replica using this policy + proceed regardless of the state of the Replica relative to the Master.
    +
    +
    NoConsistencyRequiredPolicy() - Constructor for class com.sleepycat.je.rep.NoConsistencyRequiredPolicy
    +
    +
    Create a NoConsistencyRequiredPolicy.
    +
    +
    NODE_DUP_TREE_MAX_ENTRIES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    this property no longer has any effect; DatabaseConfig.setNodeMaxEntries(int) should be used instead.
    +
    +
    +
    NODE_HOST_PORT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Names the hostname and port associated with this node in the + replication group, e.g.
    +
    +
    NODE_MAX_ENTRIES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum number of entries in an internal btree node.
    +
    +
    NODE_NAME - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The node name uniquely identifies this node within the replication + group.
    +
    +
    NODE_PRIORITY - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    The election priority associated with this node.
    +
    +
    NODE_TYPE - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The type of this node.
    +
    +
    NodeState - Class in com.sleepycat.je.rep
    +
    +
    The current state of a replication node and the application this node is + running in.
    +
    +
    NodeType - Enum in com.sleepycat.je.rep
    +
    +
    The different types of nodes that can be in a replication group.
    +
    +
    notify(NewMasterEvent) - Method in interface com.sleepycat.je.rep.monitor.MonitorChangeListener
    +
    +
    The method is invoked whenever there is new master associated with the + replication group.
    +
    +
    notify(GroupChangeEvent) - Method in interface com.sleepycat.je.rep.monitor.MonitorChangeListener
    +
    +
    The method is invoked whenever there is a change in the composition of + the replication group.
    +
    +
    notify(JoinGroupEvent) - Method in interface com.sleepycat.je.rep.monitor.MonitorChangeListener
    +
    +
    The method is invoked whenever a node joins the group, by successfully + opening its first + ReplicatedEnvironment handle.
    +
    +
    notify(LeaveGroupEvent) - Method in interface com.sleepycat.je.rep.monitor.MonitorChangeListener
    +
    +
    The method is invoked whenever a node leaves the group by closing its + last ReplicatedEnvironment handle.
    +
    +
    NotPersistent - Annotation Type in com.sleepycat.persist.model
    +
    +
    Overrides the default rules for field persistence and defines a field as + being non-persistent even when it is not declared with the + transient keyword.
    +
    +
    NotTransient - Annotation Type in com.sleepycat.persist.model
    +
    +
    Overrides the default rules for field persistence and defines a field as + being persistent even when it is declared with the transient + keyword.
    +
    +
    nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
     
    +
    nullifyForeignKey(D) - Method in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
    +
    Clears the index key in a data object.
    +
    +
    nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
     
    +
    nullifyForeignKey(D) - Method in class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
    +
    Clears the index key in the deserialized data entry.
    +
    +
    nullifyForeignKey(D) - Method in class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator
    +
     
    +
    nullifyForeignKey(String) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
    +
    +
    Clears the entity's secondary key fields for the given key name.
    +
    +
    nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator
    +
     
    +
    nullifyForeignKey(TupleInput, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleKeyCreator
    +
    +
    Clears the index key in the tuple data entry.
    +
    +
    nullifyForeignKey(TupleInput, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator
    +
     
    +
    nullifyForeignKey(SecondaryDatabase, DatabaseEntry) - Method in interface com.sleepycat.je.ForeignKeyNullifier
    +
    +
    Sets the foreign key reference to null in the datum of the primary + database.
    +
    +
    nullifyForeignKey(SecondaryDatabase, DatabaseEntry, DatabaseEntry, DatabaseEntry) - Method in interface com.sleepycat.je.ForeignMultiKeyNullifier
    +
    +
    Sets the foreign key reference to null in the datum of the primary + database.
    +
    +
    + + + +

    O

    +
    +
    objectToData(E, DatabaseEntry) - Method in interface com.sleepycat.bind.EntityBinding
    +
    +
    Extracts the data entry from an entity Object.
    +
    +
    objectToData(E, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
     
    +
    objectToData(E) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
    +
    Extracts a data object from an entity object.
    +
    +
    objectToData(E, DatabaseEntry) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
     
    +
    objectToData(E) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
    +
    Extracts a data object from an entity object.
    +
    +
    objectToData(E) - Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
    +
     
    +
    objectToData(E, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
     
    +
    objectToData(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
    +
    Extracts a key tuple from an entity object.
    +
    +
    objectToData(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
    +
     
    +
    objectToEntry(byte[], DatabaseEntry) - Method in class com.sleepycat.bind.ByteArrayBinding
    +
     
    +
    objectToEntry(E, DatabaseEntry) - Method in interface com.sleepycat.bind.EntryBinding
    +
    +
    Converts an Object into a entry buffer.
    +
    +
    objectToEntry(E, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialBinding
    +
    +
    Serialize an object into an entry buffer.
    +
    +
    objectToEntry(BigDecimal, TupleOutput) - Method in class com.sleepycat.bind.tuple.BigDecimalBinding
    +
     
    +
    objectToEntry(BigInteger, TupleOutput) - Method in class com.sleepycat.bind.tuple.BigIntegerBinding
    +
     
    +
    objectToEntry(Boolean, TupleOutput) - Method in class com.sleepycat.bind.tuple.BooleanBinding
    +
     
    +
    objectToEntry(Byte, TupleOutput) - Method in class com.sleepycat.bind.tuple.ByteBinding
    +
     
    +
    objectToEntry(Character, TupleOutput) - Method in class com.sleepycat.bind.tuple.CharacterBinding
    +
     
    +
    objectToEntry(Double, TupleOutput) - Method in class com.sleepycat.bind.tuple.DoubleBinding
    +
     
    +
    objectToEntry(Float, TupleOutput) - Method in class com.sleepycat.bind.tuple.FloatBinding
    +
     
    +
    objectToEntry(Integer, TupleOutput) - Method in class com.sleepycat.bind.tuple.IntegerBinding
    +
     
    +
    objectToEntry(Long, TupleOutput) - Method in class com.sleepycat.bind.tuple.LongBinding
    +
     
    +
    objectToEntry(Integer, TupleOutput) - Method in class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
     
    +
    objectToEntry(Long, TupleOutput) - Method in class com.sleepycat.bind.tuple.PackedLongBinding
    +
     
    +
    objectToEntry(Short, TupleOutput) - Method in class com.sleepycat.bind.tuple.ShortBinding
    +
     
    +
    objectToEntry(BigDecimal, TupleOutput) - Method in class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
     
    +
    objectToEntry(Double, TupleOutput) - Method in class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
     
    +
    objectToEntry(Float, TupleOutput) - Method in class com.sleepycat.bind.tuple.SortedFloatBinding
    +
     
    +
    objectToEntry(Integer, TupleOutput) - Method in class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
     
    +
    objectToEntry(Long, TupleOutput) - Method in class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
     
    +
    objectToEntry(String, TupleOutput) - Method in class com.sleepycat.bind.tuple.StringBinding
    +
     
    +
    objectToEntry(E, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleBinding
    +
     
    +
    objectToEntry(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleBinding
    +
    +
    Converts a key or data object to a tuple entry.
    +
    +
    objectToEntry(TupleInput, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleInputBinding
    +
     
    +
    objectToEntry(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleMarshalledBinding
    +
     
    +
    objectToKey(E, DatabaseEntry) - Method in interface com.sleepycat.bind.EntityBinding
    +
    +
    Extracts the key entry from an entity Object.
    +
    +
    objectToKey(E, DatabaseEntry) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
     
    +
    objectToKey(E) - Method in class com.sleepycat.bind.serial.SerialSerialBinding
    +
    +
    Extracts a key object from an entity object.
    +
    +
    objectToKey(E, DatabaseEntry) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
     
    +
    objectToKey(E, TupleOutput) - Method in class com.sleepycat.bind.serial.TupleSerialBinding
    +
    +
    Extracts a key tuple from an entity object.
    +
    +
    objectToKey(E, TupleOutput) - Method in class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
    +
     
    +
    objectToKey(E, DatabaseEntry) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
     
    +
    objectToKey(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleBinding
    +
    +
    Extracts a key tuple from an entity object.
    +
    +
    objectToKey(E, TupleOutput) - Method in class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
    +
     
    +
    off - Variable in class com.sleepycat.util.FastInputStream
    +
     
    +
    OFFHEAP_CHECKSUM - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Can be used to add a checksum to each off-heap block when the block is + written, and validate the checksum when the block is read, for debugging + purposes.
    +
    +
    OFFHEAP_CORE_THREADS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The minimum number of threads in the off-heap eviction thread pool.
    +
    +
    OFFHEAP_EVICT_BYTES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The off-heap evictor will attempt to keep memory usage this number of + bytes below EnvironmentConfig.MAX_OFF_HEAP_MEMORY.
    +
    +
    OFFHEAP_KEEP_ALIVE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The duration that excess threads in the off-heap eviction thread pool + will stay idle; after this period, idle threads will terminate.
    +
    +
    OFFHEAP_MAX_THREADS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum number of threads in the off-heap eviction thread pool.
    +
    +
    OFFHEAP_N_LRU_LISTS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of LRU lists in the off-heap JE cache.
    +
    +
    OP_RESET_LOGGING - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    OP_RESET_LOGGING_LEVEL - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    openCursor(Transaction, CursorConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Returns a cursor into the database.
    +
    +
    openCursor(DiskOrderedCursorConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Create a DiskOrderedCursor to iterate over the records in 'this' + Database.
    +
    +
    openCursor(Transaction, CursorConfig) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    Obtain a cursor on a database, returning a SecondaryCursor.
    +
    +
    openDatabase(Transaction, String, DatabaseConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Opens, and optionally creates, a Database.
    +
    +
    openDatabase(String, DatabaseConfig) - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    openDiskOrderedCursor(Database[], DiskOrderedCursorConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Create a DiskOrderedCursor to iterate over the records of a given set + of databases.
    +
    +
    openEnv(boolean) - Method in class com.sleepycat.je.util.DbDump
    +
     
    +
    openSecondaryCursor(Transaction, CursorConfig) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    + +
    +
    openSecondaryDatabase(Transaction, String, Database, SecondaryConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Opens and optionally creates a SecondaryDatabase.
    +
    +
    openSecondaryDatabase(String, Database, SecondaryConfig) - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    openSequence(Transaction, DatabaseEntry, SequenceConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Opens a sequence in the database.
    +
    +
    OperationFailureException - Exception in com.sleepycat.je
    +
    +
    Indicates that a failure has occurred that impacts the current operation + and/or transaction.
    +
    +
    operationList - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    OperationResult - Class in com.sleepycat.je
    +
    +
    The result of an operation that successfully reads or writes a record.
    +
    +
    OperationStatus - Enum in com.sleepycat.je
    +
    +
    Status values from database operations.
    +
    +
    outputDirectory - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    outputFile - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    outputToEntry(TupleOutput, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Utility method to set the data in a entry buffer to the data in a tuple + output object.
    +
    +
    + + + +

    P

    +
    +
    PackedInteger - Class in com.sleepycat.util
    +
    +
    Static methods for reading and writing packed integers.
    +
    +
    PackedInteger() - Constructor for class com.sleepycat.util.PackedInteger
    +
     
    +
    PackedIntegerBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for an unsorted Integer + primitive wrapper or an unsorted int primitive, that stores the + value in the smallest number of bytes possible.
    +
    +
    PackedIntegerBinding() - Constructor for class com.sleepycat.bind.tuple.PackedIntegerBinding
    +
     
    +
    PackedLongBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for an unsorted Long + primitive wrapper or an unsorted long primitive, that stores + the value in the smallest number of bytes possible.
    +
    +
    PackedLongBinding() - Constructor for class com.sleepycat.bind.tuple.PackedLongBinding
    +
     
    +
    parse(String) - Static method in class com.sleepycat.je.Durability
    +
    +
    Parses the string and returns the durability it represents.
    +
    +
    parseArgs(String[]) - Method in class com.sleepycat.je.util.DbDump
    +
     
    +
    PartialComparator - Interface in com.sleepycat.je
    +
    +
    A tag interface used to mark a B-tree or duplicate comparator class as a + partial comparator.
    +
    +
    Persistent - Annotation Type in com.sleepycat.persist.model
    +
    +
    Identifies a persistent class that is not an Entity class or a + simple type.
    +
    +
    PersistentProxy<T> - Interface in com.sleepycat.persist.model
    +
    +
    Implemented by a proxy class to represent the persistent state of a + (non-persistent) proxied class.
    +
    +
    preload(long) - Method in class com.sleepycat.je.Database
    +
    +
    Deprecated. +
    As of JE 2.0.83, replaced by Database.preload(PreloadConfig).

    +
    +
    +
    preload(long, long) - Method in class com.sleepycat.je.Database
    +
    +
    Deprecated. +
    As of JE 2.0.101, replaced by Database.preload(PreloadConfig).

    +
    +
    +
    preload(PreloadConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Preloads the cache.
    +
    +
    preload(Database[], PreloadConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Preloads the cache with multiple databases.
    +
    +
    PreloadConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of an application invoked preload operation.
    +
    +
    PreloadConfig() - Constructor for class com.sleepycat.je.PreloadConfig
    +
    +
    Default configuration used if null is passed to Database.preload.
    +
    +
    PreloadConfig.Phases - Enum in com.sleepycat.je
    +
    +
    Preload progress listeners report this phase value, along with a + count of the number if times that the preload has fetched from disk.
    +
    +
    PreloadStats - Class in com.sleepycat.je
    +
    + +
    +
    PreloadStatus - Class in com.sleepycat.je
    +
    +
    Describes the result of the Database.preload operation.
    +
    +
    PreloadStatus(String) - Constructor for class com.sleepycat.je.PreloadStatus
    +
     
    +
    premain(String, Instrumentation) - Static method in class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Enhances classes as specified by a JVM -javaagent argument.
    +
    +
    prepare(Xid) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    prev() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the previous value and returns it, or returns null + if there are no preceding values in the cursor range.
    +
    +
    prev(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the previous value and returns it, or returns null + if there are no preceding values in the cursor range.
    +
    +
    prevDup() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position.
    +
    +
    prevDup(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the previous value with the same key (duplicate) and + returns it, or returns null if no preceding values are present for the + key at the current position.
    +
    +
    previous() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the next element in the iteration.
    +
    +
    previousIndex() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Returns the index of the element that would be returned by a subsequent + call to previous.
    +
    +
    prevNoDup() - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range.
    +
    +
    prevNoDup(LockMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Moves the cursor to the preceding value with a different key and returns + it, or returns null if there are no preceding unique keys in the cursor + range.
    +
    +
    PrimaryIndex<PK,E> - Class in com.sleepycat.persist
    +
    +
    The primary index for an entity class and its primary key.
    +
    +
    PrimaryIndex(Database, Class<PK>, EntryBinding<PK>, Class<E>, EntityBinding<E>) - Constructor for class com.sleepycat.persist.PrimaryIndex
    +
    +
    Creates a primary index without using an EntityStore.
    +
    +
    PrimaryKey - Annotation Type in com.sleepycat.persist.model
    +
    +
    Indicates the primary key field of an entity class.
    +
    +
    PrimaryKeyAssigner - Interface in com.sleepycat.collections
    +
    +
    An interface implemented to assign new primary key values.
    +
    +
    primaryKeyBinding - Variable in class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
     
    +
    PrimaryKeyMetadata - Class in com.sleepycat.persist.model
    +
    +
    The metadata for a primary key field.
    +
    +
    PrimaryKeyMetadata(String, String, String, String) - Constructor for class com.sleepycat.persist.model.PrimaryKeyMetadata
    +
    +
    Used by an EntityModel to construct primary key metadata.
    +
    +
    print(PrintStream) - Method in class com.sleepycat.je.util.DbSpace
    +
    +
    Calculates utilization and prints a report to the given output stream.
    +
    +
    printHeader(PrintStream, boolean, boolean) - Method in class com.sleepycat.je.util.DbDump
    +
     
    +
    printStartupInfo(PrintStream) - Method in class com.sleepycat.je.Environment
    +
    +
    Print a detailed report about the costs of different phases of + environment startup.
    +
    +
    printStartupInfo(PrintStream) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Print a detailed report about the costs of different phases of + environment startup.
    +
    +
    printUsage(String) - Method in class com.sleepycat.je.util.DbDump
    +
     
    +
    progress(T, long, long) - Method in interface com.sleepycat.je.ProgressListener
    +
    +
    Called by BDB JE to indicate to the user that progress has been + made on a potentially long running or asynchronous operation.
    +
    +
    ProgressListener<T extends java.lang.Enum<T>> - Interface in com.sleepycat.je
    +
    +
    ProgressListener provides feedback to the application that progress is being + made on a potentially long running or asynchronous JE operation.
    +
    +
    propertiesName - Static variable in class com.sleepycat.util.ConfigBeanInfoBase
    +
     
    +
    PROTOCOL_OLD_STRING_ENCODING - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    When set to true, which is currently the default, the + replication network protocol will use the JVM platform default charset + (text encoding) for node names and host names.
    +
    +
    put(K, V) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Associates the specified value with the specified key in this map + (optional operation).
    +
    +
    put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) - Method in class com.sleepycat.je.Cursor
    +
    +
    Inserts or updates a record according to the specified Put + type.
    +
    +
    put(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Cursor
    +
    +
    Stores a key/data pair into the database.
    +
    +
    put(Transaction, DatabaseEntry, DatabaseEntry, Put, WriteOptions) - Method in class com.sleepycat.je.Database
    +
    +
    Inserts or updates a record according to the specified Put + type.
    +
    +
    put(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Stores the key/data pair into the database.
    +
    +
    Put - Enum in com.sleepycat.je
    +
    +
    The operation type passed to "put" methods on databases and cursors.
    +
    +
    put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed on a secondary cursor.
    +
    +
    put(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed on a secondary cursor.
    +
    +
    put(Transaction, DatabaseEntry, DatabaseEntry, Put, WriteOptions) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed on a secondary database.
    +
    +
    put(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed on a secondary database.
    +
    +
    put(E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity.
    +
    +
    put(Transaction, E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity and returns null, or updates it if the primary key + already exists and returns the existing entity.
    +
    +
    put(Transaction, E, Put, WriteOptions) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts or updates an entity, using Put type and WriteOptions + parameters, and returning an OperationResult.
    +
    +
    putAll(Map<? extends K, ? extends V>) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Copies all of the mappings from the specified map to this map (optional + operation).
    +
    +
    putCurrent(DatabaseEntry) - Method in class com.sleepycat.je.Cursor
    +
    +
    Replaces the data in the key/data pair at the current cursor position.
    +
    +
    putCurrent(DatabaseEntry) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed on a secondary cursor.
    +
    +
    putIfAbsent(K, V) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    If the specified key is not already associated with a value, associate + it with the given value.
    +
    +
    putNoDupData(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Cursor
    +
    +
    Stores a key/data pair into the database.
    +
    +
    putNoDupData(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Stores the key/data pair into the database if it does not already appear + in the database.
    +
    +
    putNoDupData(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed on a secondary cursor.
    +
    +
    putNoDupData(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed on a secondary database.
    +
    +
    putNoOverwrite(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Cursor
    +
    +
    Stores a key/data pair into the database.
    +
    +
    putNoOverwrite(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Stores the key/data pair into the database if the key does not already + appear in the database.
    +
    +
    putNoOverwrite(DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryCursor
    +
    +
    This operation is not allowed on a secondary cursor.
    +
    +
    putNoOverwrite(Transaction, DatabaseEntry, DatabaseEntry) - Method in class com.sleepycat.je.SecondaryDatabase
    +
    +
    This operation is not allowed on a secondary database.
    +
    +
    putNoOverwrite(E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity and returns true, or returns false if the primary key + already exists.
    +
    +
    putNoOverwrite(Transaction, E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity and returns true, or returns false if the primary key + already exists.
    +
    +
    putNoReturn(E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity).
    +
    +
    putNoReturn(Transaction, E) - Method in class com.sleepycat.persist.PrimaryIndex
    +
    +
    Inserts an entity, or updates it if the primary key already exists (does + not return the existing entity).
    +
    +
    + + + +

    Q

    +
    +
    QuorumPolicy - Enum in com.sleepycat.je.rep
    +
    +
    The quorum policy determine the number of nodes that must participate to + pick the winner of an election, and therefore the master of the group.
    +
    +
    quorumSize(int) - Method in enum com.sleepycat.je.rep.QuorumPolicy
    +
    +
    Returns the minimum number of nodes to needed meet the quorum policy.
    +
    +
    + + + +

    R

    +
    +
    RawField - Interface in com.sleepycat.persist.raw
    +
    +
    The definition of a field in a RawType.
    +
    +
    RawObject - Class in com.sleepycat.persist.raw
    +
    +
    A raw instance that can be used with a RawStore or Conversion.
    +
    +
    RawObject(RawType, Map<String, Object>, RawObject) - Constructor for class com.sleepycat.persist.raw.RawObject
    +
    +
    Creates a raw object with a given set of field values for a complex + type.
    +
    +
    RawObject(RawType, Object[]) - Constructor for class com.sleepycat.persist.raw.RawObject
    +
    +
    Creates a raw object with the given array elements for an array type.
    +
    +
    RawObject(RawType, String) - Constructor for class com.sleepycat.persist.raw.RawObject
    +
    +
    Creates a raw object with the given enum value for an enum type.
    +
    +
    RawStore - Class in com.sleepycat.persist.raw
    +
    +
    Provides access to the raw data in a store for use by general purpose tools.
    +
    +
    RawStore(Environment, String, StoreConfig) - Constructor for class com.sleepycat.persist.raw.RawStore
    +
    +
    Opens an entity store for raw data access.
    +
    +
    RawType - Interface in com.sleepycat.persist.raw
    +
    +
    The type definition for a simple or complex persistent type, or an array + of persistent types.
    +
    +
    read() - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    read(byte[]) - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    read(byte[], int, int) - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    read(ByteBuffer) - Method in class com.sleepycat.je.util.LogVerificationReadableByteChannel
    +
    read() - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    read(byte[]) - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    read(byte[], int, int) - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    READ_COMMITTED - Static variable in class com.sleepycat.je.CursorConfig
    +
    +
    A convenience instance to configure a cursor for read committed + isolation.
    +
    +
    READ_ONLY_TXN - Static variable in class com.sleepycat.je.Durability
    +
    +
    Deprecated. + +
    +
    +
    READ_UNCOMMITTED - Static variable in class com.sleepycat.je.CursorConfig
    +
    +
    A convenience instance to configure read operations performed by the + cursor to return modified but not yet committed data.
    +
    +
    readBigDecimal() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsorted BigDecimal.
    +
    +
    readBigInteger() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a BigInteger.
    +
    +
    readBoolean() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a boolean (one byte) unsigned value from the buffer and returns + true if it is non-zero and false if it is zero.
    +
    +
    readByte() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a signed byte (one byte) value from the buffer.
    +
    +
    readBytes(int) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting string.
    +
    +
    readBytes(char[]) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of bytes from the buffer, converting each + unsigned byte value to a character of the resulting array.
    +
    +
    readChar() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a char (two byte) unsigned value from the buffer.
    +
    +
    readChars(int) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting string.
    +
    +
    readChars(char[]) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of characters from the buffer, converting + each two byte unsigned value to a character of the resulting array.
    +
    +
    readClassDescriptor() - Method in class com.sleepycat.bind.serial.SerialInput
    +
     
    +
    readDouble() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsorted double (eight byte) value from the buffer.
    +
    +
    readFast() - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Equivalent to read() but does not throw + IOException.
    +
    +
    readFast(byte[]) - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Equivalent to read(byte[]) but does not throw + IOException.
    +
    +
    readFast(byte[], int, int) - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Equivalent to read(byte[],int,int) but does not throw + IOException.
    +
    +
    readFloat() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsorted float (four byte) value from the buffer.
    +
    +
    readInt() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a signed int (four byte) value from the buffer.
    +
    +
    readInt(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Reads a packed integer at the given buffer offset and returns it.
    +
    +
    readLong() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a signed long (eight byte) value from the buffer.
    +
    +
    readLong(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Reads a packed long integer at the given buffer offset and returns it.
    +
    +
    ReadOptions - Class in com.sleepycat.je
    +
    +
    Options for calling methods that read records.
    +
    +
    ReadOptions() - Constructor for class com.sleepycat.je.ReadOptions
    +
    +
    Constructs a ReadOptions object with default values for all properties.
    +
    +
    readPackedInt() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsorted packed integer.
    +
    +
    readPackedLong() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsorted packed long integer.
    +
    +
    readShort() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a signed short (two byte) value from the buffer.
    +
    +
    readSortedBigDecimal() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a sorted BigDecimal, with support for correct default + sorting.
    +
    +
    readSortedDouble() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a sorted double (eight byte) value from the buffer.
    +
    +
    readSortedFloat() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a sorted float (four byte) value from the buffer.
    +
    +
    readSortedInt(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Reads a sorted packed integer at the given buffer offset and returns it.
    +
    +
    readSortedLong(byte[], int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Reads a sorted packed long integer at the given buffer offset and + returns it.
    +
    +
    readSortedPackedInt() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a sorted packed integer.
    +
    +
    readSortedPackedLong() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a sorted packed long integer.
    +
    +
    readString() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads a null-terminated UTF string from the data buffer and converts + the data from UTF to Unicode.
    +
    +
    readString(int) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
    +
    +
    readString(char[]) - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads the specified number of UTF characters string from the data + buffer and converts the data from UTF to Unicode.
    +
    +
    readUnsignedByte() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsigned byte (one byte) value from the buffer.
    +
    +
    readUnsignedInt() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsigned int (four byte) value from the buffer.
    +
    +
    readUnsignedShort() - Method in class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Reads an unsigned short (two byte) value from the buffer.
    +
    +
    recover(int) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    RecoveryProgress - Enum in com.sleepycat.je
    +
    +
    Describes the different phases of initialization that + be executed when an Environment is instantiated.
    +
    +
    register() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Registers the monitor with the group so that it can be kept informed + of the outcome of elections and group membership changes.
    +
    +
    registerAppStateMonitor(AppStateMonitor) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Registers an AppStateMonitor to receive the application state + which this ReplicatedEnvironment is running in.
    +
    +
    registerClass(Class) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Registers a persistent class, most importantly, a PersistentProxy class or entity subclass.
    +
    +
    Relationship - Enum in com.sleepycat.persist.model
    +
    +
    Defines the relationship between instances of the entity class and the + secondary keys.
    +
    +
    remove(Object) - Method in class com.sleepycat.collections.StoredEntrySet
    +
    +
    Removes the specified element from this set if it is present (optional + operation).
    +
    +
    remove() - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Removes the last element that was returned by next or previous (optional + operation).
    +
    +
    remove(Object) - Method in class com.sleepycat.collections.StoredKeySet
    +
    +
    Removes the specified key from this set if it is present (optional + operation).
    +
    +
    remove(Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Removes the mapping for this key from this map if present (optional + operation).
    +
    +
    remove(Object, Object) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Remove entry for key only if currently mapped to given value.
    +
    +
    remove(Object) - Method in class com.sleepycat.collections.StoredValueSet
    +
    +
    Removes the specified value from this set if it is present (optional + operation).
    +
    +
    removeAll(Collection<?>) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Removes all this collection's elements that are also contained in the + specified collection (optional operation).
    +
    +
    removeDatabase(Transaction, String) - Method in class com.sleepycat.je.Environment
    +
    +
    Removes a database from the environment, discarding all records in the + database and removing the database name itself.
    +
    +
    removeDatabase(String) - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    removeFileProtection(String) - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Removes protection for a file in the backup set.
    +
    +
    removeMember(String) - Method in class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Remove a node from the replication group.
    +
    +
    removeMember(String) - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Removes this node from the group, so that it is no longer a member of + the group.
    +
    +
    removeSequence(Transaction, DatabaseEntry) - Method in class com.sleepycat.je.Database
    +
    +
    Removes the sequence from the database.
    +
    +
    renameDatabase(Transaction, String, String) - Method in class com.sleepycat.je.Environment
    +
    +
    Renames a database, without removing the records it contains.
    +
    +
    Renamer - Class in com.sleepycat.persist.evolve
    +
    +
    A mutation for renaming a class or field without changing the instance or + field value.
    +
    +
    Renamer(String, int, String) - Constructor for class com.sleepycat.persist.evolve.Renamer
    +
    +
    Creates a mutation for renaming the class of all instances of the given + class version.
    +
    +
    Renamer(String, int, String, String) - Constructor for class com.sleepycat.persist.evolve.Renamer
    +
    +
    Creates a mutation for renaming the given field for all instances of the + given class version.
    +
    +
    REP_STREAM_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Deprecated. +
    and no longer used as of JE 7.5. Reserved files are now + retained based on available disk space -- see + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK should be used instead. + However, this param is still used when some, but not all, nodes in a + group have been upgraded to 7.5 or later.
    +
    +
    +
    replace(K, V) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Replace entry for key only if currently mapped to some value.
    +
    +
    replace(K, V, V) - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Replace entry for key only if currently mapped to given value.
    +
    +
    REPLAY_COST_PERCENT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Deprecated. +
    and no longer used as of JE 7.5. Reserved files are now + retained based on available disk space -- see + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK should be used instead.
    +
    +
    +
    REPLAY_DB_HANDLE_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    + +
    +
    REPLAY_DB_HANDLE_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    The maximum amount of time that an inactive database handle is kept open + during a replay of the replication stream.
    +
    +
    REPLAY_FREE_DISK_PERCENT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Deprecated. +
    and no longer needed as of JE 7.5. Reserved files are now + retained based on available disk space -- see + EnvironmentConfig.MAX_DISK and + EnvironmentConfig.FREE_DISK should be used instead. + However, this param is still used when it has been specified and + is non-zero, and FREE_DISK has not been specified. In this case, + REPLAY_FREE_DISK_PERCENT overrides the FREE_DISK default value. If + both REPLAY_FREE_DISK_PERCENT and FREE_DISK are specified, an + IllegalArgumentException is thrown.
    +
    +
    +
    REPLAY_MAX_OPEN_DB_HANDLES - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    + +
    +
    REPLAY_MAX_OPEN_DB_HANDLES - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    The maximum number of most recently used database handles that + are kept open during the replay of the replication stream.
    +
    +
    REPLAY_TXN_LOCK_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The maximum amount of time for a replay transaction to wait for a lock.
    +
    +
    REPLICA_ACK_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The amount of time that the + Transaction.commit(com.sleepycat.je.Durability) + on the Master will wait for a sufficient number of acknowledgments from + electable Replicas.
    +
    +
    REPLICA_GROUP_COMMIT_INTERVAL - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The time interval during which transactions may be grouped to amortize + the cost of fsync when a transaction commits with SyncPolicy#SYNC on the + Replica.
    +
    +
    REPLICA_MAX_GROUP_COMMIT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The maximum number of transactions that can be grouped to amortize the + cost of an fsync when a transaction commits with SyncPolicy#SYNC on the + Replica.
    +
    +
    REPLICA_RECEIVE_BUFFER_SIZE - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    The size of the the TCP receive buffer associated with the socket used + by the replica to transfer the replication stream.
    +
    +
    REPLICA_TIMEOUT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    A heartbeat is exchanged between the feeder and replica to ensure they + are alive.
    +
    +
    ReplicaConsistencyException - Exception in com.sleepycat.je.rep
    +
    +
    This exception is thrown by a Replica to indicate it could not meet the + consistency requirements as defined by the + ReplicaConsistencyPolicy in effect for the transaction, within + the allowed timeout period.
    +
    +
    ReplicaConsistencyException(String, ReplicaConsistencyPolicy) - Constructor for exception com.sleepycat.je.rep.ReplicaConsistencyException
    +
     
    +
    ReplicaConsistencyPolicy - Interface in com.sleepycat.je
    +
    +
    The interface for Consistency policies used to provide consistency + guarantees at a Replica.
    +
    +
    ReplicaStateException - Exception in com.sleepycat.je.rep
    +
    +
    This exception indicates that the application attempted an operation that is + not permitted when it is in the ReplicatedEnvironment.State.REPLICA + state.
    +
    +
    ReplicatedEnvironment - Class in com.sleepycat.je.rep
    +
    +
    A replicated database environment that is a node in a replication + group.
    +
    +
    ReplicatedEnvironment(File, ReplicationConfig, EnvironmentConfig, ReplicaConsistencyPolicy, QuorumPolicy) - Constructor for class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Creates a replicated environment handle and starts participating in the + replication group as either a Master or a Replica.
    +
    +
    ReplicatedEnvironment(File, ReplicationConfig, EnvironmentConfig) - Constructor for class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    A convenience constructor that defaults the replica consistency policy + and the initial election policy to be used.
    +
    +
    ReplicatedEnvironment.State - Enum in com.sleepycat.je.rep
    +
    +
    The replication node state determines the operations that the + application can perform against its replicated environment.
    +
    +
    ReplicatedEnvironmentStats - Class in com.sleepycat.je.rep
    +
    +
    Statistics for a replicated environment.
    +
    +
    ReplicationConfig - Class in com.sleepycat.je.rep
    +
    +
    Specifies the immutable attributes of a replicated environment.
    +
    +
    ReplicationConfig() - Constructor for class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Creates a ReplicationConfig initialized with the system default + settings.
    +
    +
    ReplicationConfig(String, String, String) - Constructor for class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Creates a ReplicationConfig initialized with the system default + settings and the specified group name, node name, and hostname/port + values.
    +
    +
    ReplicationConfig(Properties) - Constructor for class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Creates a ReplicationConfig which includes the properties specified in + the properties parameter.
    +
    +
    ReplicationGroup - Class in com.sleepycat.je.rep
    +
    +
    An administrative view of the collection of nodes that form the replication + group.
    +
    +
    ReplicationGroupAdmin - Class in com.sleepycat.je.rep.util
    +
    +
    Administrative APIs for use by applications which do not have direct access + to a replicated environment.
    +
    +
    ReplicationGroupAdmin(String, Set<InetSocketAddress>) - Constructor for class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Constructs a group admin object.
    +
    +
    ReplicationMutableConfig - Class in com.sleepycat.je.rep
    +
    +
    Specifies the attributes that may be changed after a ReplicatedEnvironment has been created.
    +
    +
    ReplicationMutableConfig() - Constructor for class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Create a ReplicationMutableConfig initialized with the system + default settings.
    +
    +
    ReplicationNode - Interface in com.sleepycat.je.rep
    +
    +
    An administrative view of a node in a replication group.
    +
    +
    ReplicaWriteException - Exception in com.sleepycat.je.rep
    +
    +
    This exception indicates that an update operation or transaction commit + or abort was attempted while in the + ReplicatedEnvironment.State.REPLICA state.
    +
    +
    reset() - Method in class com.sleepycat.je.rep.util.DbResetRepGroup
    +
    +
    Replaces the existing group with the new group having a single new node + as described by the constructor arguments.
    +
    +
    reset() - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    reset() - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    resetLoggingParams - Static variable in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    resetMBeanInfo() - Method in class com.sleepycat.je.jmx.JEMBean
    +
    +
    Create the available management interface for this environment.
    +
    +
    resolveClass(String) - Method in class com.sleepycat.persist.model.EntityModel
    +
    +
    Should be called by entity model implementations instead of calling + Class.forName whenever loading an application class.
    +
    +
    resolveClass(String, ClassLoader) - Static method in class com.sleepycat.util.ClassResolver
    +
    +
    A specialized Class.forName method that supports use of a user-specified + ClassLoader.
    +
    +
    resolveClass(ObjectStreamClass) - Method in class com.sleepycat.util.ClassResolver.Stream
    +
     
    +
    RestartRequiredException - Exception in com.sleepycat.je.rep
    +
    +
    RestartRequiredException serves as the base class for all exceptions which + makes it impossible for HA to proceed without some form of corrective action + on the part of the user, followed by a restart of the application.
    +
    +
    RestartRequiredException(EnvironmentImpl, EnvironmentFailureReason) - Constructor for exception com.sleepycat.je.rep.RestartRequiredException
    +
     
    +
    RestartRequiredException(EnvironmentImpl, EnvironmentFailureReason, Exception) - Constructor for exception com.sleepycat.je.rep.RestartRequiredException
    +
     
    +
    RestartRequiredException(EnvironmentImpl, EnvironmentFailureReason, String) - Constructor for exception com.sleepycat.je.rep.RestartRequiredException
    +
     
    +
    RestartRequiredException(String, RestartRequiredException) - Constructor for exception com.sleepycat.je.rep.RestartRequiredException
    +
    +
    For internal use only.
    +
    +
    result() - Method in class com.sleepycat.persist.EntityResult
    +
    +
    Returns the OperationResult resulting from the operation.
    +
    +
    retainAll(Collection<?>) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Retains only the elements in this collection that are contained in the + specified collection (optional operation).
    +
    +
    rollback(Xid) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    RollbackException - Exception in com.sleepycat.je.rep
    +
    +
    This asynchronous exception indicates that a new master has been selected, + this Replica's log is ahead of the current Master, + and in this case, the Replica was unable to rollback without a + recovery.
    +
    +
    RollbackProhibitedException - Exception in com.sleepycat.je.rep
    +
    +
    This exception may be thrown by a Replica during the + replication stream sync-up phase of startup.
    +
    +
    run(TransactionWorker) - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Calls the TransactionWorker.doWork() method and, for transactional + environments, may begin and end a transaction.
    +
    +
    RUN_LOG_FLUSH_TASK - Static variable in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Deprecated. +
    as of 7.2. Log flushing can be disabled by setting EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL and EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL to zero. For compatibility + with earlier releases, if this parameter is specified as false, no log + flushing will be performed; in this case, EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL and EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL may not also be specified.
    +
    +
    +
    RunRecoveryException - Exception in com.sleepycat.je
    +
    +
    Deprecated. + +
    +
    +
    RuntimeExceptionWrapper - Exception in com.sleepycat.util
    +
    +
    A RuntimeException that can contain nested exceptions.
    +
    +
    RuntimeExceptionWrapper(Throwable) - Constructor for exception com.sleepycat.util.RuntimeExceptionWrapper
    +
     
    +
    + + + +

    S

    +
    +
    SecondaryConfig - Class in com.sleepycat.je
    +
    +
    The configuration properties of a SecondaryDatabase extend + those of a primary Database.
    +
    +
    SecondaryConfig() - Constructor for class com.sleepycat.je.SecondaryConfig
    +
    +
    Creates an instance with the system's default settings.
    +
    +
    SecondaryConstraintException - Exception in com.sleepycat.je
    +
    +
    Base class for exceptions thrown when a write operation fails because of a + secondary constraint.
    +
    +
    SecondaryCursor - Class in com.sleepycat.je
    +
    +
    A database cursor for a secondary database.
    +
    +
    SecondaryDatabase - Class in com.sleepycat.je
    +
    +
    A secondary database handle.
    +
    +
    SecondaryIndex<SK,PK,E> - Class in com.sleepycat.persist
    +
    +
    The secondary index for an entity class and a secondary key.
    +
    +
    SecondaryIndex(SecondaryDatabase, Database, PrimaryIndex<PK, E>, Class<SK>, EntryBinding<SK>) - Constructor for class com.sleepycat.persist.SecondaryIndex
    +
    +
    Creates a secondary index without using an EntityStore.
    +
    +
    SecondaryIntegrityException - Exception in com.sleepycat.je
    +
    +
    Thrown when an integrity problem is detected while accessing a secondary + database, including access to secondaries while writing to a primary + database.
    +
    +
    SecondaryKey - Annotation Type in com.sleepycat.persist.model
    +
    +
    Indicates a secondary key field of an entity class.
    +
    +
    SecondaryKeyCreator - Interface in com.sleepycat.je
    +
    +
    The interface implemented for extracting single-valued secondary keys from + primary records.
    +
    +
    SecondaryKeyMetadata - Class in com.sleepycat.persist.model
    +
    +
    The metadata for a secondary key field.
    +
    +
    SecondaryKeyMetadata(String, String, String, String, String, Relationship, String, DeleteAction) - Constructor for class com.sleepycat.persist.model.SecondaryKeyMetadata
    +
    +
    Used by an EntityModel to construct secondary key metadata.
    +
    +
    SecondaryMultiKeyCreator - Interface in com.sleepycat.je
    +
    +
    The interface implemented for extracting multi-valued secondary keys from + primary records.
    +
    +
    SecondaryReferenceException - Exception in com.sleepycat.je
    +
    +
    Base class for exceptions thrown when a read or write operation fails + because of a secondary constraint or integrity problem.
    +
    +
    Sequence - Class in com.sleepycat.je
    +
    +
    A Sequence handle is used to manipulate a sequence record in a + database.
    +
    +
    SequenceConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a sequence.
    +
    +
    SequenceConfig() - Constructor for class com.sleepycat.je.SequenceConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    SequenceExistsException - Exception in com.sleepycat.je
    +
    +
    Thrown by Database.openSequence if the + sequence record already exists and the SequenceConfig + ExclusiveCreate parameter is true.
    +
    +
    SequenceIntegrityException - Exception in com.sleepycat.je
    +
    +
    Thrown by Sequence.get if the sequence record has been + deleted.
    +
    +
    SequenceNotFoundException - Exception in com.sleepycat.je
    +
    +
    Thrown by Database.openSequence if the + sequence record does not exist and the SequenceConfig AllowCreate + parameter is false.
    +
    +
    SequenceOverflowException - Exception in com.sleepycat.je
    +
    +
    Thrown by Sequence.get if the end of the sequence is + reached and wrapping is not configured.
    +
    +
    SequenceStats - Class in com.sleepycat.je
    +
    +
    A SequenceStats object is used to return sequence statistics.
    +
    +
    SerialBase - Class in com.sleepycat.bind.serial
    +
    +
    A base class for serial bindings creators that provides control over the + allocation of the output buffer.
    +
    +
    SerialBase() - Constructor for class com.sleepycat.bind.serial.SerialBase
    +
    +
    Initializes the initial output buffer size to zero.
    +
    +
    SerialBinding<E> - Class in com.sleepycat.bind.serial
    +
    +
    A concrete EntryBinding that treats a key or data entry as + a serialized object.
    +
    +
    SerialBinding(ClassCatalog, Class<E>) - Constructor for class com.sleepycat.bind.serial.SerialBinding
    +
    +
    Creates a serial binding.
    +
    +
    SerialInput - Class in com.sleepycat.bind.serial
    +
    +
    A specialized ObjectInputStream that gets class description + information from a ClassCatalog.
    +
    +
    SerialInput(InputStream, ClassCatalog) - Constructor for class com.sleepycat.bind.serial.SerialInput
    +
    +
    Creates a serial input stream.
    +
    +
    SerialInput(InputStream, ClassCatalog, ClassLoader) - Constructor for class com.sleepycat.bind.serial.SerialInput
    +
    +
    Creates a serial input stream.
    +
    +
    SerialOutput - Class in com.sleepycat.bind.serial
    +
    +
    A specialized ObjectOutputStream that stores class description + information in a ClassCatalog.
    +
    +
    SerialOutput(OutputStream, ClassCatalog) - Constructor for class com.sleepycat.bind.serial.SerialOutput
    +
    +
    Creates a serial output stream.
    +
    +
    SerialSerialBinding<K,D,E> - Class in com.sleepycat.bind.serial
    +
    +
    An abstract EntityBinding that treats an entity's key entry and + data entry as serialized objects.
    +
    +
    SerialSerialBinding(ClassCatalog, Class<K>, Class<D>) - Constructor for class com.sleepycat.bind.serial.SerialSerialBinding
    +
    +
    Creates a serial-serial entity binding.
    +
    +
    SerialSerialBinding(SerialBinding<K>, SerialBinding<D>) - Constructor for class com.sleepycat.bind.serial.SerialSerialBinding
    +
    +
    Creates a serial-serial entity binding.
    +
    +
    SerialSerialKeyCreator<PK,D,SK> - Class in com.sleepycat.bind.serial
    +
    +
    A abstract key creator that uses a serial key and a serial data entry.
    +
    +
    SerialSerialKeyCreator(ClassCatalog, Class<PK>, Class<D>, Class<SK>) - Constructor for class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
    +
    Creates a serial-serial key creator.
    +
    +
    SerialSerialKeyCreator(SerialBinding<PK>, SerialBinding<D>, SerialBinding<SK>) - Constructor for class com.sleepycat.bind.serial.SerialSerialKeyCreator
    +
    +
    Creates a serial-serial entity binding.
    +
    +
    server - Variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    set(E) - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Replaces the last element returned by next or previous with the + specified element (optional operation).
    +
    +
    set(long) - Method in class com.sleepycat.je.rep.util.AtomicLongMax
    +
    +
    Set the value to newValue and returns the old value.
    +
    +
    setAggressive(boolean) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures Environment.verify and Database.verify to perform fine granularity consistency checking that + includes verifying in memory constructs.
    +
    +
    setAllowCreate(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configures the Environment.openDatabase method to create the database if it does not + already exist.
    +
    +
    setAllowCreate(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, creates the database environment if it doesn't already exist.
    +
    +
    setAllowCreate(boolean) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Configures the Database.openSequence method to create the sequence if it does not + already exist.
    +
    +
    setAllowCreate(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Specifies whether creation of a new store is allowed.
    +
    +
    setAllowNestedTransactions(boolean) - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Changes whether nested transactions will be created if + run() is called when a transaction is already active for + the current thread.
    +
    +
    setAllowPopulate(boolean) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies whether automatic population of the secondary is allowed.
    +
    +
    setArbiterHome(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Sets the Arbiter Home directory
    +
    +
    setArbiterMutableConfig(ArbiterMutableConfig) - Method in class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    Sets the Arbiter mutable attributes.
    +
    +
    setAttribute(Attribute) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    setAttribute(Environment, Attribute) - Method in class com.sleepycat.je.jmx.JEMBeanHelper
    +
    +
    Deprecated.
    +
    Set an attribute value for the given environment.
    +
    +
    setAttribute(Attribute) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    setAttributes(AttributeList) - Method in class com.sleepycat.je.jmx.JEDiagnostics
    +
     
    +
    setAttributes(AttributeList) - Method in class com.sleepycat.je.jmx.JEMonitor
    +
     
    +
    setAutoCommitNoSync(boolean) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Configures auto-commit operations on the sequence to not flush the + transaction log.
    +
    +
    setBatchDelay(long, TimeUnit) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures the delay between batches.
    +
    +
    setBatchSize(int) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures the number of records verified per batch.
    +
    +
    setBINsOnly(boolean) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Specify whether the DiskOrderedCursor should scan the BINs only.
    +
    +
    setBtreeComparator(Comparator<byte[]>) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    By default, a byte by byte lexicographic comparison is used for btree + keys.
    +
    +
    setBtreeComparator(Class<? extends Comparator<byte[]>>) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    By default, a byte by byte lexicographic comparison is used for btree + keys.
    +
    +
    setCacheMode(CacheMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Sets the CacheMode default used for subsequent operations + performed using this cursor.
    +
    +
    setCacheMode(CacheMode) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Sets the default CacheMode used for operations performed on this + database.
    +
    +
    setCacheMode(CacheMode) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Sets the default CacheMode used for operations performed in this + environment.
    +
    +
    setCacheMode(CacheMode) - Method in class com.sleepycat.je.ReadOptions
    +
    +
    Sets the CacheMode to be used for the operation.
    +
    +
    setCacheMode(CacheMode) - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Sets the CacheMode to be used for the operation.
    +
    +
    setCacheMode(CacheMode) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Changes the CacheMode default used for subsequent operations + performed using this cursor.
    +
    +
    setCachePercent(int) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for setting EnvironmentConfig.MAX_MEMORY_PERCENT.
    +
    +
    setCacheSize(long) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for setting EnvironmentConfig.MAX_MEMORY.
    +
    +
    setCacheSize(int) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Set the Configure the number of elements cached by a sequence handle.
    +
    +
    setClassLoader(ClassLoader) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configure the environment to use a specified ClassLoader for loading + user-supplied classes by name.
    +
    +
    setClear(boolean) - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Configures the statistics operation to reset statistics after they are + returned.
    +
    +
    setConfigParam(String, String) - Method in class com.sleepycat.je.EnvironmentConfig
    +
     
    +
    setConfigParam(String, String) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Set this configuration parameter.
    +
    +
    setConfigParam(String, String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Documentation inherited from ArbiterMutableConfig.setConfigParam.
    +
    +
    setConfigParam(String, String) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
     
    +
    setConfigParam(String, String) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Set this configuration parameter with this value.
    +
    +
    setConsistencyPolicy(ReplicaConsistencyPolicy) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the consistency policy to be associated with the configuration.
    +
    +
    setConsistencyPolicy(ReplicaConsistencyPolicy) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Associates a consistency policy with this configuration.
    +
    +
    setConsoleLoggingLevel(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Trace messages equal and above this level will be logged to the + console.
    +
    +
    setCustomStats(CustomStats) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Sets the custom statistics object.
    +
    +
    setData(byte[]) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the byte array.
    +
    +
    setData(byte[], int, int) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the byte array, offset and size.
    +
    +
    setDbName(String) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets the database name to load.
    +
    +
    setDecrement(boolean) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Specifies that the sequence should be decremented.
    +
    +
    setDeferredWrite(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Sets the deferred-write option.
    +
    +
    setDeferredWrite(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the deferred-write configuration property.
    +
    +
    setDesignatedPrimary(boolean) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    If isPrimary is true, designate this node as a Primary.
    +
    +
    setDumpCorruptedBounds(boolean) - Method in class com.sleepycat.je.util.DbScavenger
    +
    +
    Set to true if corrupted boundaries should be dumped out.
    +
    +
    setDuplicateComparator(Comparator<byte[]>) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    By default, a byte by byte lexicographic comparison is used for + duplicate data items in a duplicate set.
    +
    +
    setDuplicateComparator(Class<? extends Comparator<byte[]>>) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    By default, a byte by byte lexicographic comparison is used for + duplicate data items in a duplicate set.
    +
    +
    setDurability(Durability) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Convenience method for setting EnvironmentConfig.TXN_DURABILITY.
    +
    +
    setDurability(Durability) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures the durability associated with a transaction when it commits.
    +
    +
    setElectableGroupSizeOverride(int) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Sets the size used to determine the number of electable nodes.
    +
    +
    setEndFile(long) - Method in class com.sleepycat.je.util.DbSpace
    +
    +
    Sets the ending file number, which is an upper bound on the range of + files for which utilization is reported and (optionally) recalculated.
    +
    +
    setEnv(Environment) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets the Environment to load from.
    +
    +
    setEvolveListener(EvolveListener) - Method in class com.sleepycat.persist.evolve.EvolveConfig
    +
    +
    Sets a progress listener that is notified each time an entity is read.
    +
    +
    setExceptionListener(ExceptionListener) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Sets the exception listener for an Environment.
    +
    +
    setExclusiveCreate(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configure the Environment.openDatabase method to fail if the database already exists.
    +
    +
    setExclusiveCreate(boolean) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Configures the Database.openSequence method to fail if the database already exists.
    +
    +
    setExclusiveCreate(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Specifies whether opening an existing store is prohibited.
    +
    +
    setExpirationTime(long, TimeUnit) - Method in class com.sleepycat.je.WriteOptions
    +
    +
    A convenience method to set the TTL based on a given expiration time + and the current system time.
    +
    +
    setFast(boolean) - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Configures the statistics operation to return only the values which do + not incur some performance penalty.
    +
    +
    setFileLoggingLevel(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Trace messages equal and above this level will be logged to the je.info + file, which is in the Arbiter home directory.
    +
    +
    setForce(boolean) - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Configures the checkpoint force option.
    +
    +
    setForeignKeyDatabase(Database) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Defines a foreign key integrity constraint for a given foreign key + database.
    +
    +
    setForeignKeyDeleteAction(ForeignKeyDeleteAction) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies the action taken when a referenced record in the foreign key + database is deleted.
    +
    +
    setForeignKeyNullifier(ForeignKeyNullifier) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies the user-supplied object used for setting single-valued + foreign keys to null.
    +
    +
    setForeignMultiKeyNullifier(ForeignMultiKeyNullifier) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies the user-supplied object used for setting multi-valued foreign + keys to null.
    +
    +
    setGroupName(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Sets the name for the replication group.
    +
    +
    setGroupName(String) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the name for the replication group.
    +
    +
    setGroupName(String) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the name for the replication group.
    +
    +
    setHeartbeatInterval(int) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Sets the heartbeat interval.
    +
    +
    setHelperHosts(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Identify one or more helpers nodes by their host and port pairs in this + format:
    +
    +
    setHelperHosts(String) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Identify one or more helpers nodes by their host and port pairs in this + format:
    +
    +
    setHelperHosts(String) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Identify one or more helpers nodes by their host and port pairs in this + format:
    +
    +
    setHelperSockets(Set<InetSocketAddress>) - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Sets the helper sockets being used to contact a replication group + member, in order to query for the information.
    +
    +
    setIgnoreUnknownConfig(boolean) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets whether to ignore unknown parameters in the config file.
    +
    +
    setImmutableSecondaryKey(boolean) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies whether the secondary key is immutable.
    +
    +
    setInitialValue(long) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Sets the initial value for a sequence.
    +
    +
    setInputReader(BufferedReader) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets the BufferedReader to load from.
    +
    +
    setInternalMemoryLimit(long) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Set the maximum amount of JE Cache Memory that the DiskOrderedScan + can use at one time.
    +
    +
    setInternalMemoryLimit(long) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Set the maximum amount of non JE Cache Memory that preload can use at + one time.
    +
    +
    setIteratorBlockSize(int) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Changes the number of records read at one time by iterators returned by + the StoredCollection.iterator() method.
    +
    +
    setKBytes(int) - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Configures the checkpoint log data threshold, in kilobytes.
    +
    +
    setKeyCreator(SecondaryKeyCreator) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies the user-supplied object used for creating single-valued + secondary keys.
    +
    +
    setKeyPrefixing(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configure the database to support key prefixing.
    +
    +
    setKeysOnly(boolean) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Specify whether the DiskOrderedCursor should return only the key or key + + data.
    +
    +
    setLoadLNs(boolean) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Configure the preload load LNs option.
    +
    +
    setLocalWrite(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures this transaction to allow writing to non-replicated + Databases in a + ReplicatedEnvironment.
    +
    +
    setLocking(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Convenience method for setting + EnvironmentConfig.ENV_IS_LOCKING.
    +
    +
    setLockMode(LockMode) - Method in class com.sleepycat.je.ReadOptions
    +
    +
    Sets the LockMode to be used for the operation.
    +
    +
    setLockTimeout(long, TimeUnit) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Convenience method for setting EnvironmentConfig.LOCK_TIMEOUT.
    +
    +
    setLockTimeout(long) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. + +
    +
    +
    setLockTimeout(long, TimeUnit) - Method in class com.sleepycat.je.Transaction
    +
    +
    Configures the lock request timeout value for the transaction.
    +
    +
    setLockTimeout(long) - Method in class com.sleepycat.je.Transaction
    +
    +
    Deprecated. + +
    +
    +
    setLoggingHandler(Handler) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Set a java.util.logging.Handler which will be used by all + java.util.logging.Loggers instantiated by this Environment.
    +
    +
    setLoggingHandler(Handler) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
     
    +
    setLogProviders(List<ReplicationNode>) - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    Sets the prioritized list of data nodes, either ELECTABLE or SECONDARY + members, used to select a node from which to obtain log files for the + NetworkRestore operation.
    +
    +
    setLSNBatchSize(long) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Set the maximum number of LSNs to gather and sort at any one time.
    +
    +
    setLSNBatchSize(long) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Set the maximum number of LSNs to gather and sort at any one time.
    +
    +
    setManagedConnection(JEManagedConnection, JELocalTransaction) - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    setMaxBytes(long) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Configure the maximum number of bytes to preload.
    +
    +
    setMaxClockDelta(long, TimeUnit) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the maximum acceptable clock skew between this Replica and its + Feeder, which is the node that is the source of its replication + stream.
    +
    +
    setMaxDisk(long) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for setting EnvironmentConfig.MAX_DISK.
    +
    +
    setMaxMillisecs(long) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Configure the maximum number of milliseconds to execute preload.
    +
    +
    setMaxRetries(int) - Method in class com.sleepycat.collections.TransactionRunner
    +
    +
    Changes the maximum number of retries that will be performed when + deadlocks are detected.
    +
    +
    setMaxSeedMillisecs(long) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Deprecated. +
    this method has no effect and will be removed in a future + release.
    +
    +
    +
    setMaxSeedNodes(long) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Deprecated. +
    this method has no effect and will be removed in a future + release.
    +
    +
    +
    setMinimizeRecoveryTime(boolean) - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Configures the minimize recovery time option.
    +
    +
    setMinutes(int) - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Configures the checkpoint time threshold, in minutes.
    +
    +
    setModel(EntityModel) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the entity model that defines entity classes and index keys.
    +
    +
    setMultiKeyCreator(SecondaryMultiKeyCreator) - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Specifies the user-supplied object used for creating multi-valued + secondary keys.
    +
    +
    setMutableConfig(EnvironmentMutableConfig) - Method in class com.sleepycat.je.Environment
    +
    +
    Sets database environment attributes.
    +
    +
    setMutations(Mutations) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Configures mutations for performing lazy evolution of stored instances.
    +
    +
    setName(String) - Method in class com.sleepycat.je.Transaction
    +
    +
    Set the user visible name for the transaction.
    +
    +
    setNodeHostPort(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Sets the hostname and port associated with this arbiter.
    +
    +
    setNodeHostPort(String) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the hostname and port associated with this monitor.
    +
    +
    setNodeHostPort(String) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the hostname and port associated with this node.
    +
    +
    setNodeMaxDupTreeEntries(int) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Deprecated. +
    this property no longer has any effect; DatabaseConfig.setNodeMaxEntries(int) should be used instead.
    +
    +
    +
    setNodeMaxEntries(int) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configures the Environment.openDatabase method to have a B+Tree fanout of + nodeMaxEntries.
    +
    +
    setNodeName(String) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Sets the user defined nodeName for the Environment.
    +
    +
    setNodeName(String) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Sets the name to be associated with this Arbiter.
    +
    +
    setNodeName(String) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the name to be associated with this monitor.
    +
    +
    setNodeName(String) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the name to be associated with this node.
    +
    +
    setNodePriority(int) - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    Sets the election priority for the node.
    +
    +
    setNodeType(NodeType) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Sets the type of this node.
    +
    +
    setNonSticky(boolean) - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Configures the behavior of the cursor when a cursor movement operation + returns OperationStatus.NOTFOUND.
    +
    +
    setNoOverwrite(boolean) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets whether the load should overwrite existing data or not.
    +
    +
    setNoSort(boolean) - Method in class com.sleepycat.je.JoinConfig
    +
    +
    Specifies whether automatic sorting of the input cursors is disabled.
    +
    +
    setNoSync(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    + +
    +
    setNoWait(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures the transaction to not wait if a lock request cannot be + immediately granted.
    +
    +
    setNumRetries(int) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the number of times a ping thread attempts to contact a node + before deeming it unreachable.
    +
    +
    setOffHeapCacheSize(long) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    A convenience method for setting + EnvironmentConfig.MAX_OFF_HEAP_MEMORY.
    +
    +
    setOffset(int) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the byte offset into the data array.
    +
    +
    setOverrideBtreeComparator(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Sets to true if the database exists and the btree comparator specified + in this configuration object should override the current comparator.
    +
    +
    setOverrideDuplicateComparator(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Sets to true if the database exists and the duplicate comparator + specified in this configuration object should override the current + comparator.
    +
    +
    setPartial(int, int, boolean) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Configures this DatabaseEntry to read or write partial records.
    +
    +
    setPartial(boolean) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Configures this DatabaseEntry to read or write partial records.
    +
    +
    setPartialLength(int) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the byte length of the partial record being read or written by the + application, in bytes.
    +
    +
    setPartialOffset(int) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the offset of the partial record being read or written by the + application, in bytes.
    +
    +
    setPrimaryConfig(Class, DatabaseConfig) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Configures the primary database for an entity class using the Berkeley + DB engine API.
    +
    +
    setPrintInfo(boolean) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures Environment.verify and Database.verify to print basic verification information.
    +
    +
    setProgressInterval(long) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    If progressInterval is set, progress status messages are generated to + stdout at set percentages of the load.
    +
    +
    setProgressListener(ProgressListener<PreloadConfig.Phases>) - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Configure the preload operation to make periodic calls to a ProgressListener to provide feedback on preload progress.
    +
    +
    setPropagateExceptions(boolean) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures Environment.verify and Database.verify to propagate exceptions found during verification.
    +
    +
    setQueueSize(int) - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Set the queue size for entries being passed between the + DiskOrderedCursor producer thread and the application's consumer + thread.
    +
    +
    setRange(long, long) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Configures a sequence range.
    +
    +
    setReadCommitted(boolean) - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Configures read operations performed by the cursor to obey read + committed isolation.
    +
    +
    setReadCommitted(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures the transaction for read committed isolation.
    +
    +
    setReadDelay(long, TimeUnit) - Method in class com.sleepycat.je.util.DbVerifyLog
    +
    +
    Configures the delay between file reads during verification.
    +
    +
    setReadModifyWrite(boolean) - Method in class com.sleepycat.collections.StoredIterator
    +
    +
    Changes whether write-locks will be obtained when reading with this + cursor.
    +
    +
    setReadOnly(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configures the database in read-only mode.
    +
    +
    setReadOnly(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Convenience method for setting EnvironmentConfig.ENV_READ_ONLY.
    +
    +
    setReadOnly(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures this transaction to disallow write operations, regardless of + whether writes are allowed for the Environment or the + Databases that are accessed.
    +
    +
    setReadOnly(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the read-only configuration property.
    +
    +
    setReadUncommitted(boolean) - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Configures read operations performed by the cursor to return modified + but not yet committed data.
    +
    +
    setReadUncommitted(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures read operations performed by the transaction to return + modified but not yet committed data.
    +
    +
    setRecalculate(boolean) - Method in class com.sleepycat.je.util.DbSpace
    +
    +
    Sets the recalculation property, which if true causes a more expensive + recalculation of utilization to be performed for debugging purposes.
    +
    +
    setReceiveBufferSize(int) - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    Sets the size of the receive buffer associated with the socket used to + transfer files during the NetworkRestore operation.
    +
    +
    setRecoveryProgressListener(ProgressListener<RecoveryProgress>) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configure the environment to make periodic calls to a ProgressListener to + provide feedback on environment startup (recovery).
    +
    +
    setReplicaAckTimeout(long, TimeUnit) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Set the replica commit timeout.
    +
    +
    setReplicated(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configures a database to be replicated or non-replicated, in a + replicated Environment.
    +
    +
    setReplicated(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Configures a store to be replicated or non-replicated, in a replicated + Environment.
    +
    +
    setRepMutableConfig(ReplicationMutableConfig) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
     
    +
    setRetainLogFiles(boolean) - Method in class com.sleepycat.je.rep.NetworkRestoreConfig
    +
    +
    If true retains obsolete log files, by renaming them instead of deleting + them.
    +
    +
    setRetryInterval(long) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the number of milliseconds between ping thread retries.
    +
    +
    setSecondaryBulkLoad(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the bulk-load-secondaries configuration property.
    +
    +
    setSecondaryConfig(Class, String, SecondaryConfig) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Configures a secondary database for an entity class and key name using + the Berkeley DB engine API.
    +
    +
    setSequenceConfig(String, SequenceConfig) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Configures a named key sequence using the Berkeley DB engine API.
    +
    +
    setSerialBufferSize(int) - Method in class com.sleepycat.bind.serial.SerialBase
    +
    +
    Sets the initial byte size of the output buffer that is allocated by the + default implementation of SerialBase.getSerialOutput(java.lang.Object).
    +
    +
    setSerializableIsolation(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures this transaction to have serializable (degree 3) isolation.
    +
    +
    setSharedCache(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for setting the + EnvironmentConfig.SHARED_CACHE parameter.
    +
    +
    setShowProgressInterval(int) - Method in class com.sleepycat.je.StatsConfig
    +
    +
    When the statistics operation is configured to display progress the + showProgressInterval is the number of LNs between each progress report.
    +
    +
    setShowProgressInterval(int) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    When the verify operation is configured to display progress the + showProgressInterval is the number of LNs between each progress report.
    +
    +
    setShowProgressStream(PrintStream) - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Configures the statistics operation to display progress to the + PrintStream argument.
    +
    +
    setShowProgressStream(PrintStream) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures the verify operation to display progress to the PrintStream + argument.
    +
    +
    setSize(int) - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Sets the byte size of the data array.
    +
    +
    setSocketConnectTimeout(int) - Method in class com.sleepycat.je.rep.monitor.MonitorConfig
    +
    +
    Sets the socketConnection timeout, in milliseconds, used + when the ping thread attempts to establish a connection with a + replication node.
    +
    +
    setSortedDuplicates(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Configures the database to support records with duplicate keys.
    +
    +
    setStartFile(long) - Method in class com.sleepycat.je.util.DbSpace
    +
    +
    Sets the start file number, which is a lower bound on the range of + files for which utilization is reported and (optionally) recalculated.
    +
    +
    setStateChangeListener(StateChangeListener) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Sets the listener used to receive asynchronous replication node state + change events.
    +
    +
    setSync(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Configures the transaction to write and synchronously flush the log it + when commits.
    +
    +
    setSyncupProgressListener(ProgressListener<SyncupProgress>) - Method in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    Configure the environment to make periodic calls to a ProgressListener to provide feedback on replication stream sync-up.
    +
    +
    setTemporary(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Sets the temporary database option.
    +
    +
    setTemporary(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the temporary configuration property.
    +
    +
    setTextFileMode(boolean) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Sets whether the load data is in text file format.
    +
    +
    setThreadTransaction(Transaction) - Method in class com.sleepycat.je.Environment
    +
    +
    Sets the transaction associated with this thread if implied transactions + are being used.
    +
    +
    setTime(long) - Method in class com.sleepycat.je.util.DbSpace
    +
    +
    Sets the time for calculating expired data.
    +
    +
    setTotalLoadBytes(long) - Method in class com.sleepycat.je.util.DbLoad
    +
    +
    Used for progress status messages.
    +
    +
    setTransactional(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Encloses the database open within a transaction.
    +
    +
    setTransactional(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Convenience method for setting + EnvironmentConfig.ENV_IS_TRANSACTIONAL.
    +
    +
    setTransactional(boolean) - Method in class com.sleepycat.persist.StoreConfig
    +
    +
    Sets the transactional configuration property.
    +
    +
    setTransactionConfig(TransactionConfig) - Method in class com.sleepycat.collections.TransactionRunner
    +
    + +
    +
    setTransactionTimeout(int) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    setTTL(int) - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Sets the Time-To-Live property for a 'put' operation, using + TimeUnit.Days as the TTL unit.
    +
    +
    setTTL(int, TimeUnit) - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Sets the Time-To-Live property for a 'put' operation, using the given + TimeUnit.
    +
    +
    setTupleBufferSize(int) - Method in class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Sets the initial byte size of the output buffer that is allocated by the + default implementation of TupleBase.getTupleOutput(E).
    +
    +
    setTxnNoSync(boolean) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    + +
    +
    setTxnSerializableIsolation(boolean) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for setting + EnvironmentConfig.TXN_SERIALIZABLE_ISOLATION.
    +
    +
    setTxnTimeout(long, TimeUnit) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A convenience method for setting EnvironmentConfig.TXN_TIMEOUT.
    +
    +
    setTxnTimeout(long) - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. + +
    +
    +
    setTxnTimeout(long, TimeUnit) - Method in class com.sleepycat.je.Transaction
    +
    +
    Configures the timeout value for the transaction lifetime.
    +
    +
    setTxnTimeout(long) - Method in class com.sleepycat.je.Transaction
    +
    +
    Deprecated. + +
    +
    +
    setTxnWriteNoSync(boolean) - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    + +
    +
    setUnknownStateTimeout(long, TimeUnit) - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Time to wait for the discovery of the Master during the instantiation + of the Arbiter.
    +
    +
    setUpdateTTL(boolean) - Method in class com.sleepycat.je.WriteOptions
    +
    +
    Sets the update-TTL property for a 'put' operation.
    +
    +
    setUseExistingConfig(boolean) - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Setting useExistingConfig to true allows a program to open a database + without knowing a prior what its configuration is.
    +
    +
    setValue(V) - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Always throws UnsupportedOperationException since this + object is not attached to a map.
    +
    +
    setVerbose(boolean) - Method in class com.sleepycat.persist.model.ClassEnhancer
    +
    +
    Sets verbose mode.
    +
    +
    setVerbose(boolean) - Method in class com.sleepycat.persist.model.ClassEnhancerTask
    +
     
    +
    setVerifyDataRecords(boolean) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures verification to read and verify the leaf node (LN) of a + primary data record.
    +
    +
    setVerifySecondaries(boolean) - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Configures verification to verify secondary database integrity.
    +
    +
    setWrap(boolean) - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Specifies that the sequence should wrap around when it is incremented + (decremented) past the specified maximum (minimum) value.
    +
    +
    setWriteNoSync(boolean) - Method in class com.sleepycat.je.TransactionConfig
    +
    + +
    +
    SHARED_CACHE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true, the shared cache is used by this environment.
    +
    +
    ShortBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a Short primitive + wrapper or a short primitive.
    +
    +
    ShortBinding() - Constructor for class com.sleepycat.bind.tuple.ShortBinding
    +
     
    +
    shortToEntry(short, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.ShortBinding
    +
    +
    Converts a simple short value into an entry buffer.
    +
    +
    shutdown() - Method in class com.sleepycat.je.rep.arbiter.Arbiter
    +
    +
    Shutdown the Arbiter.
    +
    +
    shutdown() - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Release monitor resources and shut down the monitor.
    +
    +
    shutdownGroup(long, TimeUnit) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Closes this handle and shuts down the Replication Group by forcing all + active Replicas to exit.
    +
    +
    size() - Method in class com.sleepycat.collections.StoredCollection
    +
     
    +
    size() - Method in class com.sleepycat.collections.StoredContainer
    +
    +
    Returns a non-transactional count of the records in the collection or + map.
    +
    +
    size() - Method in class com.sleepycat.collections.StoredMap
    +
     
    +
    size() - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    skip(long) - Method in class com.sleepycat.je.util.LogVerificationInputStream
    +
    skip(long) - Method in class com.sleepycat.util.FastInputStream
    +
     
    +
    skipFast(int) - Method in class com.sleepycat.util.FastInputStream
    +
    +
    Equivalent to skip() but takes an int parameter instead of a + long, and does not check whether the count given is larger than the + number of remaining bytes.
    +
    +
    skipNext(long, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Skips forward a given number of key/data pairs and returns the number by + which the cursor is moved.
    +
    +
    skipPrev(long, DatabaseEntry, DatabaseEntry, LockMode) - Method in class com.sleepycat.je.Cursor
    +
    +
    Skips backward a given number of key/data pairs and returns the number + by which the cursor is moved.
    +
    +
    SortedBigDecimalBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a sorted BigDecimal + value.
    +
    +
    SortedBigDecimalBinding() - Constructor for class com.sleepycat.bind.tuple.SortedBigDecimalBinding
    +
     
    +
    SortedDoubleBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a sorted Double + primitive wrapper or a sorted double primitive.
    +
    +
    SortedDoubleBinding() - Constructor for class com.sleepycat.bind.tuple.SortedDoubleBinding
    +
     
    +
    SortedFloatBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a sorted Float + primitive wrapper or sorted a float primitive.
    +
    +
    SortedFloatBinding() - Constructor for class com.sleepycat.bind.tuple.SortedFloatBinding
    +
     
    +
    sortedMap() - Method in interface com.sleepycat.persist.EntityIndex
    +
    +
    Returns a standard Java sorted map based on this entity index.
    +
    +
    sortedMap() - Method in class com.sleepycat.persist.PrimaryIndex
    +
     
    +
    sortedMap() - Method in class com.sleepycat.persist.SecondaryIndex
    +
     
    +
    SortedPackedIntegerBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a sorted Integer + primitive wrapper or a sorted int primitive, that stores the + value in the smallest number of bytes possible.
    +
    +
    SortedPackedIntegerBinding() - Constructor for class com.sleepycat.bind.tuple.SortedPackedIntegerBinding
    +
     
    +
    SortedPackedLongBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a sorted Long + primitive wrapper or a sorted long primitive, that stores the + value in the smallest number of bytes possible.
    +
    +
    SortedPackedLongBinding() - Constructor for class com.sleepycat.bind.tuple.SortedPackedLongBinding
    +
     
    +
    Splitter - Class in com.sleepycat.je.util
    +
    +
    Splitter is used to split a string based on a delimiter.
    +
    +
    Splitter(char) - Constructor for class com.sleepycat.je.util.Splitter
    +
     
    +
    start(Xid, int) - Method in class com.sleepycat.je.XAEnvironment
    +
     
    +
    startBackup() - Method in class com.sleepycat.je.util.DbBackup
    +
    +
    Start backup mode in order to determine the definitive backup set needed + at this point in time.
    +
    +
    startListener(MonitorChangeListener) - Method in class com.sleepycat.je.rep.monitor.Monitor
    +
    +
    Starts the listener so it's actively listening for election results and + broadcasts of replication group changes.
    +
    +
    STARTUP_DUMP_THRESHOLD - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If environment startup exceeds this duration, startup statistics are + logged and can be found in the je.info file.
    +
    +
    stateChange(StateChangeEvent) - Method in interface com.sleepycat.je.rep.StateChangeListener
    +
    +
    The notification method.
    +
    +
    StateChangeEvent - Class in com.sleepycat.je.rep
    +
    +
    Communicates the state change at a node + to the StateChangeListener.
    +
    +
    StateChangeException - Exception in com.sleepycat.je.rep
    +
    +
    Provides a synchronous mechanism for informing an application about a change + in the state of the replication node.
    +
    +
    StateChangeException(String, Exception) - Constructor for exception com.sleepycat.je.rep.StateChangeException
    +
    +
    Used when no state change event is available
    +
    +
    StateChangeListener - Interface in com.sleepycat.je.rep
    +
    +
    An asynchronous mechanism for tracking the State of the replicated environment and + choosing how to route database operations.
    +
    +
    statParams - Static variable in class com.sleepycat.je.jmx.JEMBean
    +
     
    +
    stats(PrintStream) - Method in class com.sleepycat.je.util.DbStat
    +
     
    +
    STATS_COLLECT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If true collect and log statistics.
    +
    +
    STATS_COLLECT_INTERVAL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The duration of the statistics capture interval.
    +
    +
    STATS_FILE_DIRECTORY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The directory to save the statistics log file.
    +
    +
    STATS_FILE_ROW_COUNT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Log file maximum row count for Stat collection.
    +
    +
    STATS_MAX_FILES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Maximum number of statistics log files to retain.
    +
    +
    StatsConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a statistics retrieval operation.
    +
    +
    StatsConfig() - Constructor for class com.sleepycat.je.StatsConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    STIFLE_DEFAULT_ERROR_MANAGER - Static variable in class com.sleepycat.je.util.FileHandler
    +
     
    +
    StoreConfig - Class in com.sleepycat.persist
    +
    +
    Configuration properties used with an EntityStore or RawStore.
    +
    +
    StoreConfig() - Constructor for class com.sleepycat.persist.StoreConfig
    +
    +
    Creates an entity store configuration object with default properties.
    +
    +
    StoreConfigBeanInfo - Class in com.sleepycat.persist
    +
     
    +
    StoreConfigBeanInfo() - Constructor for class com.sleepycat.persist.StoreConfigBeanInfo
    +
     
    +
    StoredClassCatalog - Class in com.sleepycat.bind.serial
    +
    +
    A ClassCatalog that is stored in a Database.
    +
    +
    StoredClassCatalog(Database) - Constructor for class com.sleepycat.bind.serial.StoredClassCatalog
    +
    +
    Creates a catalog based on a given database.
    +
    +
    StoredCollection<E> - Class in com.sleepycat.collections
    +
    +
    A abstract base class for all stored collections.
    +
    +
    StoredCollections - Class in com.sleepycat.collections
    +
    +
    Static methods operating on collections and maps.
    +
    +
    StoredContainer - Class in com.sleepycat.collections
    +
    +
    A abstract base class for all stored collections and maps.
    +
    +
    StoredEntrySet<K,V> - Class in com.sleepycat.collections
    +
    +
    The Set returned by Map.entrySet().
    +
    +
    storedIterator() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns an iterator over the elements in this collection.
    +
    +
    storedIterator(boolean) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns a read or read-write iterator over the elements in this + collection.
    +
    +
    StoredIterator<E> - Class in com.sleepycat.collections
    +
    +
    The Iterator returned by all stored collections.
    +
    +
    StoredKeySet<K> - Class in com.sleepycat.collections
    +
    +
    The Set returned by Map.keySet() and which can also be constructed directly + if a Map is not needed.
    +
    +
    StoredKeySet(Database, EntryBinding<K>, boolean) - Constructor for class com.sleepycat.collections.StoredKeySet
    +
    +
    Creates a key set view of a Database.
    +
    +
    StoredMap<K,V> - Class in com.sleepycat.collections
    +
    +
    A Map view of a Database.
    +
    +
    StoredMap(Database, EntryBinding<K>, EntryBinding<V>, boolean) - Constructor for class com.sleepycat.collections.StoredMap
    +
    +
    Creates a map view of a Database.
    +
    +
    StoredMap(Database, EntryBinding<K>, EntryBinding<V>, PrimaryKeyAssigner) - Constructor for class com.sleepycat.collections.StoredMap
    +
    +
    Creates a map view of a Database with a PrimaryKeyAssigner.
    +
    +
    StoredMap(Database, EntryBinding<K>, EntityBinding<V>, boolean) - Constructor for class com.sleepycat.collections.StoredMap
    +
    +
    Creates a map entity view of a Database.
    +
    +
    StoredMap(Database, EntryBinding<K>, EntityBinding<V>, PrimaryKeyAssigner) - Constructor for class com.sleepycat.collections.StoredMap
    +
    +
    Creates a map entity view of a Database with a PrimaryKeyAssigner.
    +
    +
    StoredSortedEntrySet<K,V> - Class in com.sleepycat.collections
    +
    +
    The SortedSet returned by Map.entrySet().
    +
    +
    StoredSortedKeySet<K> - Class in com.sleepycat.collections
    +
    +
    The SortedSet returned by Map.keySet() and which can also be constructed + directly if a Map is not needed.
    +
    +
    StoredSortedKeySet(Database, EntryBinding<K>, boolean) - Constructor for class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Creates a sorted key set view of a Database.
    +
    +
    StoredSortedMap<K,V> - Class in com.sleepycat.collections
    +
    +
    A SortedMap view of a Database.
    +
    +
    StoredSortedMap(Database, EntryBinding<K>, EntryBinding<V>, boolean) - Constructor for class com.sleepycat.collections.StoredSortedMap
    +
    +
    Creates a sorted map view of a Database.
    +
    +
    StoredSortedMap(Database, EntryBinding<K>, EntryBinding<V>, PrimaryKeyAssigner) - Constructor for class com.sleepycat.collections.StoredSortedMap
    +
    +
    Creates a sorted map view of a Database with a PrimaryKeyAssigner.
    +
    +
    StoredSortedMap(Database, EntryBinding<K>, EntityBinding<V>, boolean) - Constructor for class com.sleepycat.collections.StoredSortedMap
    +
    +
    Creates a sorted map entity view of a Database.
    +
    +
    StoredSortedMap(Database, EntryBinding<K>, EntityBinding<V>, PrimaryKeyAssigner) - Constructor for class com.sleepycat.collections.StoredSortedMap
    +
    +
    Creates a sorted map entity view of a Database with a PrimaryKeyAssigner.
    +
    +
    StoredSortedValueSet<E> - Class in com.sleepycat.collections
    +
    +
    The SortedSet returned by Map.values() and which can also be constructed + directly if a Map is not needed.
    +
    +
    StoredSortedValueSet(Database, EntityBinding<E>, boolean) - Constructor for class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Creates a sorted value set entity view of a Database.
    +
    +
    StoredValueSet<E> - Class in com.sleepycat.collections
    +
    +
    The Set returned by Map.values() and Map.duplicates(), and which can also be + constructed directly if a Map is not needed.
    +
    +
    StoredValueSet(Database, EntryBinding<E>, boolean) - Constructor for class com.sleepycat.collections.StoredValueSet
    +
    +
    Creates a value set view of a Database.
    +
    +
    StoredValueSet(Database, EntityBinding<E>, boolean) - Constructor for class com.sleepycat.collections.StoredValueSet
    +
    +
    Creates a value set entity view of a Database.
    +
    +
    StoreExistsException - Exception in com.sleepycat.persist
    +
    +
    Thrown by the EntityStore constructor when the ExclusiveCreate configuration parameter is + true and the store's internal catalog database already exists.
    +
    +
    StoreNotFoundException - Exception in com.sleepycat.persist
    +
    +
    Thrown by the EntityStore constructor when the AllowCreate configuration parameter is false and + the store's internal catalog database does not exist.
    +
    +
    Stream(InputStream, ClassLoader) - Constructor for class com.sleepycat.util.ClassResolver.Stream
    +
     
    +
    StringBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding for a simple String value.
    +
    +
    StringBinding() - Constructor for class com.sleepycat.bind.tuple.StringBinding
    +
     
    +
    stringToBytes(String) - Static method in class com.sleepycat.util.UtfOps
    +
    +
    Converts strings to byte arrays.
    +
    +
    stringToEntry(String, DatabaseEntry) - Static method in class com.sleepycat.bind.tuple.StringBinding
    +
    +
    Converts a simple String value into an entry buffer.
    +
    +
    subIndex(SK) - Method in class com.sleepycat.persist.SecondaryIndex
    +
    +
    Returns an index that maps primary key to entity for the subset of + entities having a given secondary key (duplicates).
    +
    +
    subMap(K, K) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted map whose elements range + from fromKey, inclusive, to toKey, exclusive.
    +
    +
    subMap(K, boolean, K, boolean) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey.
    +
    +
    subSet(Map.Entry<K, V>, Map.Entry<K, V>) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements range + from fromMapEntry, inclusive, to toMapEntry, exclusive.
    +
    +
    subSet(Map.Entry<K, V>, boolean, Map.Entry<K, V>, boolean) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry and strictly less than toMapEntry, + optionally including fromMapEntry and toMapEntry.
    +
    +
    subSet(K, K) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements range + from fromKey, inclusive, to toKey, exclusive.
    +
    +
    subSet(K, boolean, K, boolean) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey and strictly less than toKey, + optionally including fromKey and toKey.
    +
    +
    subSet(E, E) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements range + from fromValue, inclusive, to toValue, exclusive.
    +
    +
    subSet(E, boolean, E, boolean) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue and strictly less than toValue, + optionally including fromValue and toValue.
    +
    +
    SUCCESS - Static variable in class com.sleepycat.je.PreloadStatus
    +
    +
    Database.preload + was successful.
    +
    +
    sync() - Method in class com.sleepycat.je.Database
    +
    +
    Flushes any cached information for this database to disk; only + applicable for deferred-write databases.
    +
    +
    sync() - Method in class com.sleepycat.je.Environment
    +
    +
    Synchronously flushes database environment databases to stable storage.
    +
    +
    sync() - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Flushes each modified index to disk that was opened in deferred-write + mode.
    +
    +
    SyncupProgress - Enum in com.sleepycat.je.rep
    +
    +
    Describes the different phases of replication stream syncup that are + executed when a replica starts working with a new replication group master.
    +
    +
    + + + +

    T

    +
    +
    tailMap(K) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted map whose elements are + greater than or equal to fromKey.
    +
    +
    tailMap(K, boolean) - Method in class com.sleepycat.collections.StoredSortedMap
    +
    +
    Returns a view of the portion of this sorted map whose elements are + strictly greater than fromKey, optionally including fromKey.
    +
    +
    tailSet(Map.Entry<K, V>) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromMapEntry.
    +
    +
    tailSet(Map.Entry<K, V>, boolean) - Method in class com.sleepycat.collections.StoredSortedEntrySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromMapEntry, optionally including fromMapEntry.
    +
    +
    tailSet(K) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromKey.
    +
    +
    tailSet(K, boolean) - Method in class com.sleepycat.collections.StoredSortedKeySet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromKey, optionally including fromKey.
    +
    +
    tailSet(E) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + greater than or equal to fromValue.
    +
    +
    tailSet(E, boolean) - Method in class com.sleepycat.collections.StoredSortedValueSet
    +
    +
    Returns a view of the portion of this sorted set whose elements are + strictly greater than fromValue, optionally including fromValue.
    +
    +
    ThreadInterruptedException - Exception in com.sleepycat.je
    +
    +
    Thrown when java.lang.InterruptedException (a thread interrupt) or + java.nio.channels.ClosedChannelException (which also results from a + thread interrupt) occurs in any JE method.
    +
    +
    TimeConsistencyPolicy - Class in com.sleepycat.je.rep
    +
    +
    A consistency policy which describes the amount of time the Replica is + allowed to lag the Master.
    +
    +
    TimeConsistencyPolicy(long, TimeUnit, long, TimeUnit) - Constructor for class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
    +
    Specifies the amount of time by which the Replica is allowed to lag the + master when initiating a transaction.
    +
    +
    toArray() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns an array of all the elements in this collection.
    +
    +
    toArray(T[]) - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns an array of all the elements in this collection whose runtime + type is that of the specified array.
    +
    +
    toByteArray() - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    tokenize(String) - Method in class com.sleepycat.je.util.Splitter
    +
     
    +
    toList() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Returns a copy of this collection as an ArrayList.
    +
    +
    toReadOptions() - Method in enum com.sleepycat.je.LockMode
    +
    +
    Returns a ReadOptions with this LockMode property, and default values + for all other properties.
    +
    +
    toString() - Method in class com.sleepycat.collections.MapEntryParameter
    +
    +
    Converts the entry to a string representation for debugging.
    +
    +
    toString() - Method in class com.sleepycat.collections.StoredCollection
    +
    +
    Converts the collection to a string representation for debugging.
    +
    +
    toString() - Method in class com.sleepycat.collections.StoredEntrySet
    +
     
    +
    toString() - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Converts the map to a string representation for debugging.
    +
    +
    toString() - Method in class com.sleepycat.je.BtreeStats
    +
    +
    For convenience, the BtreeStats class has a toString method that lists + all the data fields.
    +
    +
    toString() - Method in class com.sleepycat.je.CheckpointConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.CommitToken
    +
     
    +
    toString() - Method in class com.sleepycat.je.CursorConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.DatabaseConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.DatabaseEntry
    +
    +
    Returns all the attributes of the database entry in text form, including + the underlying data.
    +
    +
    toString() - Method in class com.sleepycat.je.DiskOrderedCursorConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.Durability
    +
    +
    Returns the string representation of durability in the format defined + by string form of the Durability constructor.
    +
    +
    toString() - Method in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Display configuration values.
    +
    +
    toString() - Method in class com.sleepycat.je.EnvironmentMutableConfig
    +
    +
    Display configuration values.
    +
    +
    toString() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Returns a String representation of the stats in the form of + <stat>=<value>
    +
    +
    toString() - Method in class com.sleepycat.je.ExceptionEvent
    +
     
    +
    toString() - Method in enum com.sleepycat.je.ForeignKeyDeleteAction
    +
     
    +
    toString() - Method in class com.sleepycat.je.JEVersion
    +
     
    +
    toString() - Method in class com.sleepycat.je.JoinConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in enum com.sleepycat.je.LockMode
    +
     
    +
    toString() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    For convenience, LockTable.toString will display all stats in + an easily readable format.
    +
    +
    toString() - Method in enum com.sleepycat.je.OperationStatus
    +
    toString() - Method in class com.sleepycat.je.PreloadConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.PreloadStats
    +
    +
    Returns a String representation of the stats in the form of + <stat>=<value>
    +
    +
    toString() - Method in class com.sleepycat.je.PreloadStatus
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.arbiter.ArbiterConfig
    +
    +
    Display configuration values.
    +
    +
    toString() - Method in class com.sleepycat.je.rep.arbiter.ArbiterMutableConfig
    +
    +
    Display configuration values.
    +
    +
    toString() - Method in class com.sleepycat.je.rep.CommitPointConsistencyPolicy
    +
     
    +
    toString() - Method in exception com.sleepycat.je.rep.InsufficientLogException
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.monitor.GroupChangeEvent
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.monitor.JoinGroupEvent
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.monitor.LeaveGroupEvent
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.monitor.NewMasterEvent
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.NodeState
    +
     
    +
    toString() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
    +
    Returns a string representation of the statistics.
    +
    +
    toString() - Method in class com.sleepycat.je.rep.ReplicationGroup
    +
    +
    Returns a formatted version of the information held in a + ReplicationGroup.
    +
    +
    toString() - Method in class com.sleepycat.je.rep.ReplicationMutableConfig
    +
    +
    List the configuration parameters and values that have been set + in this configuration object.
    +
    +
    toString() - Method in class com.sleepycat.je.rep.TimeConsistencyPolicy
    +
     
    +
    toString() - Method in class com.sleepycat.je.SecondaryConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.SequenceConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.SequenceStats
    +
     
    +
    toString() - Method in class com.sleepycat.je.StatsConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.Transaction
    +
     
    +
    toString() - Method in class com.sleepycat.je.TransactionConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.je.TransactionStats.Active
    +
     
    +
    toString() - Method in class com.sleepycat.je.TransactionStats
    +
    toString() - Method in class com.sleepycat.je.VerifyConfig
    +
    +
    Returns the values for each configuration attribute.
    +
    +
    toString() - Method in class com.sleepycat.persist.evolve.Converter
    +
     
    +
    toString() - Method in class com.sleepycat.persist.evolve.Deleter
    +
     
    +
    toString() - Method in class com.sleepycat.persist.evolve.EntityConverter
    +
     
    +
    toString() - Method in class com.sleepycat.persist.evolve.Mutation
    +
     
    +
    toString() - Method in class com.sleepycat.persist.evolve.Mutations
    +
     
    +
    toString() - Method in class com.sleepycat.persist.evolve.Renamer
    +
     
    +
    toString() - Method in class com.sleepycat.persist.model.FieldMetadata
    +
     
    +
    toString() - Method in class com.sleepycat.persist.raw.RawObject
    +
    +
    Returns an XML representation of the raw object.
    +
    +
    toString() - Method in interface com.sleepycat.persist.raw.RawType
    +
    +
    Returns an XML representation of the raw type.
    +
    +
    toString() - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    toString(String) - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    toStringVerbose() - Method in class com.sleepycat.je.BtreeStats
    +
     
    +
    toStringVerbose() - Method in class com.sleepycat.je.EnvironmentStats
    +
    +
    Returns a String representation of the stats which includes stats + descriptions in addition to <stat>=<value>
    +
    +
    toStringVerbose() - Method in class com.sleepycat.je.LockStats
    +
    +
    Deprecated.
    +
    Like #toString, display all stats.
    +
    +
    toStringVerbose() - Method in class com.sleepycat.je.rep.ReplicatedEnvironmentStats
    +
     
    +
    toStringVerbose() - Method in class com.sleepycat.je.SequenceStats
    +
     
    +
    toStringVerbose() - Method in class com.sleepycat.je.TransactionStats
    +
     
    +
    TRACE_CONSOLE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    in favor of CONSOLE_LOGGING_LEVEL As of JE + 4.0, use the standard java.util.logging configuration + methodologies. To enable console output, set + com.sleepycat.je.util.ConsoleHandler.level = <LEVEL> through + the java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.ConsoleHandler.level" in the + EnvironmentConfig object.
    +
    +
    +
    TRACE_DB - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, event tracing to the .jdb files has been + separated from the java.util.logging mechanism. This parameter has + no effect.
    +
    +
    +
    TRACE_FILE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    in favor of FILE_LOGGING_LEVEL As of JE 4.0, + use the standard java.util.logging configuration methodologies. To + enable logging output to the je.info files, set + com.sleepycat.je.util.FileHandler.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager. To set the handler level programmatically, + set "com.sleepycat.je.util.FileHandler.level" in the EnvironmentConfig + object.
    +
    +
    +
    TRACE_FILE_COUNT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file count, + set com.sleepycat.je.util.FileHandler.count = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_FILE_LIMIT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To set the FileHandler output file size, + set com.sleepycat.je.util.FileHandler.limit = <NUMBER> + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_LEVEL - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. Set logging levels using class names + through the java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_LEVEL_CLEANER - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see cleaner logging, set + com.sleepycat.je.cleaner.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_LEVEL_EVICTOR - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see evictor logging, set + com.sleepycat.je.evictor.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_LEVEL_LOCK_MANAGER - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see locking logging, set + com.sleepycat.je.txn.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    TRACE_LEVEL_RECOVERY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    As of JE 4.0, use the standard java.util.logging + configuration methodologies. To see recovery logging, set + com.sleepycat.je.recovery.level = <LEVEL> through the + java.util.logging configuration file, or through the + java.util.logging.LogManager.
    +
    +
    +
    Transaction - Class in com.sleepycat.je
    +
    +
    The Transaction object is the handle for a transaction.
    +
    +
    Transaction.State - Enum in com.sleepycat.je
    +
    +
    The current state of the transaction.
    +
    +
    TransactionConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a database environment transaction.
    +
    +
    TransactionConfig() - Constructor for class com.sleepycat.je.TransactionConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    TransactionRunner - Class in com.sleepycat.collections
    +
    +
    Starts a transaction, calls TransactionWorker.doWork(), and handles + transaction retry and exceptions.
    +
    +
    TransactionRunner(Environment) - Constructor for class com.sleepycat.collections.TransactionRunner
    +
    +
    Creates a transaction runner for a given Berkeley DB environment.
    +
    +
    TransactionRunner(Environment, int, TransactionConfig) - Constructor for class com.sleepycat.collections.TransactionRunner
    +
    +
    Creates a transaction runner for a given Berkeley DB environment and + with a given number of maximum retries.
    +
    +
    TransactionStats - Class in com.sleepycat.je
    +
    +
    Transaction statistics for a database environment.
    +
    +
    TransactionStats.Active - Class in com.sleepycat.je
    +
    +
    The Active class represents an active transaction.
    +
    +
    TransactionTimeoutException - Exception in com.sleepycat.je
    +
    +
    Thrown when the transaction timeout interval is exceeded.
    +
    +
    TransactionWorker - Interface in com.sleepycat.collections
    +
    +
    The interface implemented to perform the work within a transaction.
    +
    +
    transferMaster(Set<String>, int, TimeUnit) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Transfers the current master state from this node to one of the + electable replicas supplied in the argument list.
    +
    +
    transferMaster(Set<String>, int, TimeUnit, boolean) - Method in class com.sleepycat.je.rep.ReplicatedEnvironment
    +
    +
    Transfers the current master state from this node to one of the replicas + supplied in the argument list.
    +
    +
    transferMaster(String, String) - Method in class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Transfers the master role from the current master to one of the + electable replicas specified in the argument list.
    +
    +
    transferMaster(Set<String>, int, TimeUnit, boolean) - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Transfers the master state from the current master to one of the + electable replicas supplied in the argument list.
    +
    +
    transform(ClassLoader, String, Class<?>, ProtectionDomain, byte[]) - Method in class com.sleepycat.persist.model.ClassEnhancer
    +
     
    +
    TREE_BIN_DELTA - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    If more than this percentage of entries are changed on a BIN, log a a + full version instead of a delta.
    +
    +
    TREE_COMPACT_MAX_KEY_LENGTH - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Specifies the maximum unprefixed key length for use in the compact + in-memory key representation.
    +
    +
    TREE_MAX_DELTA - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Deprecated. +
    as of JE 6.0. The EnvironmentConfig.TREE_BIN_DELTA param alone now + determines whether a delta is logged.
    +
    +
    +
    TREE_MAX_EMBEDDED_LN - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The maximum size (in bytes) of a record's data portion that will cause + the record to be embedded in its parent LN.
    +
    +
    TREE_MIN_MEMORY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The minimum bytes allocated out of the memory cache to hold Btree data + including internal nodes and record keys and data.
    +
    +
    truncateClass(Class) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Deletes all instances of this entity class and its (non-entity) + subclasses.
    +
    +
    truncateClass(Transaction, Class) - Method in class com.sleepycat.persist.EntityStore
    +
    +
    Deletes all instances of this entity class and its (non-entity) + subclasses.
    +
    +
    truncateDatabase(Transaction, String, boolean) - Method in class com.sleepycat.je.Environment
    +
    +
    Empties the database, discarding all the records it contains, without + removing the database name.
    +
    +
    truncateDatabase(String, boolean) - Method in class com.sleepycat.je.jca.ra.JEConnection
    +
     
    +
    TupleBase<E> - Class in com.sleepycat.bind.tuple
    +
    +
    A base class for tuple bindings and tuple key creators that provides control + over the allocation of the output buffer.
    +
    +
    TupleBase() - Constructor for class com.sleepycat.bind.tuple.TupleBase
    +
    +
    Initializes the initial output buffer size to zero.
    +
    +
    TupleBinding<E> - Class in com.sleepycat.bind.tuple
    +
    +
    An abstract EntryBinding that treats a key or data entry as a + tuple; it includes predefined bindings for Java primitive types.
    +
    +
    TupleBinding() - Constructor for class com.sleepycat.bind.tuple.TupleBinding
    +
    +
    Creates a tuple binding.
    +
    +
    TupleInput - Class in com.sleepycat.bind.tuple
    +
    +
    An InputStream with DataInput-like methods for + reading tuple fields.
    +
    +
    TupleInput(byte[]) - Constructor for class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Creates a tuple input object for reading a byte array of tuple data.
    +
    +
    TupleInput(byte[], int, int) - Constructor for class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Creates a tuple input object for reading a byte array of tuple data at + a given offset for a given length.
    +
    +
    TupleInput(TupleOutput) - Constructor for class com.sleepycat.bind.tuple.TupleInput
    +
    +
    Creates a tuple input object from the data contained in a tuple output + object.
    +
    +
    TupleInputBinding - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete EntryBinding that uses the TupleInput + object as the key or data object.
    +
    +
    TupleInputBinding() - Constructor for class com.sleepycat.bind.tuple.TupleInputBinding
    +
    +
    Creates a tuple input binding.
    +
    +
    TupleMarshalledBinding<E extends MarshalledTupleEntry> - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleBinding that delegates to the + MarshalledTupleEntry interface of the data or key object.
    +
    +
    TupleMarshalledBinding(Class<E>) - Constructor for class com.sleepycat.bind.tuple.TupleMarshalledBinding
    +
    +
    Creates a tuple marshalled binding object.
    +
    +
    TupleOutput - Class in com.sleepycat.bind.tuple
    +
    +
    An OutputStream with DataOutput-like methods for + writing tuple fields.
    +
    +
    TupleOutput() - Constructor for class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Creates a tuple output object for writing a byte array of tuple data.
    +
    +
    TupleOutput(byte[]) - Constructor for class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Creates a tuple output object for writing a byte array of tuple data, + using a given buffer.
    +
    +
    TupleSerialBinding<D,E> - Class in com.sleepycat.bind.serial
    +
    +
    An abstract EntityBinding that treats an entity's key entry as + a tuple and its data entry as a serialized object.
    +
    +
    TupleSerialBinding(ClassCatalog, Class<D>) - Constructor for class com.sleepycat.bind.serial.TupleSerialBinding
    +
    +
    Creates a tuple-serial entity binding.
    +
    +
    TupleSerialBinding(SerialBinding<D>) - Constructor for class com.sleepycat.bind.serial.TupleSerialBinding
    +
    +
    Creates a tuple-serial entity binding.
    +
    +
    TupleSerialFactory - Class in com.sleepycat.collections
    +
    +
    Creates stored collections having tuple keys and serialized entity values.
    +
    +
    TupleSerialFactory(ClassCatalog) - Constructor for class com.sleepycat.collections.TupleSerialFactory
    +
    +
    Creates a tuple-serial factory for given environment and class catalog.
    +
    +
    TupleSerialKeyCreator<D> - Class in com.sleepycat.bind.serial
    +
    +
    A abstract key creator that uses a tuple key and a serial data entry.
    +
    +
    TupleSerialKeyCreator(ClassCatalog, Class<D>) - Constructor for class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
    +
    Creates a tuple-serial key creator.
    +
    +
    TupleSerialKeyCreator(SerialBinding<D>) - Constructor for class com.sleepycat.bind.serial.TupleSerialKeyCreator
    +
    +
    Creates a tuple-serial key creator.
    +
    +
    TupleSerialMarshalledBinding<E extends MarshalledTupleKeyEntity> - Class in com.sleepycat.bind.serial
    +
    +
    A concrete TupleSerialBinding that delegates to the + MarshalledTupleKeyEntity interface of the entity class.
    +
    +
    TupleSerialMarshalledBinding(ClassCatalog, Class<E>) - Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
    +
    +
    Creates a tuple-serial marshalled binding object.
    +
    +
    TupleSerialMarshalledBinding(SerialBinding<E>) - Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledBinding
    +
    +
    Creates a tuple-serial marshalled binding object.
    +
    +
    TupleSerialMarshalledKeyCreator<D extends MarshalledTupleKeyEntity> - Class in com.sleepycat.bind.serial
    +
    +
    A concrete key creator that works in conjunction with a TupleSerialMarshalledBinding.
    +
    +
    TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding<D>, String) - Constructor for class com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator
    +
    +
    Creates a tuple-serial marshalled key creator.
    +
    +
    TupleTupleBinding<E> - Class in com.sleepycat.bind.tuple
    +
    +
    An abstract EntityBinding that treats an entity's key entry and + data entry as tuples.
    +
    +
    TupleTupleBinding() - Constructor for class com.sleepycat.bind.tuple.TupleTupleBinding
    +
    +
    Creates a tuple-tuple entity binding.
    +
    +
    TupleTupleKeyCreator<E> - Class in com.sleepycat.bind.tuple
    +
    +
    An abstract key creator that uses a tuple key and a tuple data entry.
    +
    +
    TupleTupleKeyCreator() - Constructor for class com.sleepycat.bind.tuple.TupleTupleKeyCreator
    +
    +
    Creates a tuple-tuple key creator.
    +
    +
    TupleTupleMarshalledBinding<E extends MarshalledTupleEntry & MarshalledTupleKeyEntity> - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete TupleTupleBinding that delegates to the + MarshalledTupleEntry and + MarshalledTupleKeyEntity interfaces of the entity class.
    +
    +
    TupleTupleMarshalledBinding(Class<E>) - Constructor for class com.sleepycat.bind.tuple.TupleTupleMarshalledBinding
    +
    +
    Creates a tuple-tuple marshalled binding object.
    +
    +
    TupleTupleMarshalledKeyCreator<E extends MarshalledTupleEntry & MarshalledTupleKeyEntity> - Class in com.sleepycat.bind.tuple
    +
    +
    A concrete key creator that works in conjunction with a TupleTupleMarshalledBinding.
    +
    +
    TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding<E>, String) - Constructor for class com.sleepycat.bind.tuple.TupleTupleMarshalledKeyCreator
    +
    +
    Creates a tuple-tuple marshalled key creator.
    +
    +
    TXN_DEADLOCK_STACK_TRACE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Set this parameter to true to add stacktrace information to deadlock + (lock timeout) exception messages.
    +
    +
    TXN_DUMP_LOCKS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Dump the lock table when a lock timeout is encountered, for debugging + assistance.
    +
    +
    TXN_DURABILITY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the default durability associated with transactions.
    +
    +
    TXN_ROLLBACK_DISABLED - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    In rare cases, a node may need to rollback committed transactions in + order to rejoin a replication group.
    +
    +
    TXN_ROLLBACK_LIMIT - Static variable in class com.sleepycat.je.rep.ReplicationConfig
    +
    +
    In rare cases, a node may need to rollback committed transactions in + order to rejoin a replication group.
    +
    +
    TXN_SERIALIZABLE_ISOLATION - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures all transactions for this environment to have Serializable + (Degree 3) isolation.
    +
    +
    TXN_TIMEOUT - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Configures the transaction timeout.
    +
    +
    + + + +

    U

    +
    +
    UniqueConstraintException - Exception in com.sleepycat.je
    +
    +
    Thrown when an attempt to write a primary database record would insert a + secondary record with a duplicate key, for secondaries that represent + one-to-one and one-to-many relationships.
    +
    +
    UnknownMasterException - Exception in com.sleepycat.je.rep
    +
    +
    Indicates that the underlying operation requires communication with a + Master, but that a Master was not available.
    +
    +
    UnknownMasterException(Locker, StateChangeEvent) - Constructor for exception com.sleepycat.je.rep.UnknownMasterException
    +
     
    +
    UnknownMasterException(String) - Constructor for exception com.sleepycat.je.rep.UnknownMasterException
    +
    +
    Used when the inability to determine a master is not related to a + state change.
    +
    +
    UnknownMasterException(String, Exception) - Constructor for exception com.sleepycat.je.rep.UnknownMasterException
    +
    +
    Used when the inability to determine a master is not related to a + state change but some inability to communicate with a node identified + as a master.
    +
    +
    unmarshalEntry(TupleInput) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleEntry
    +
    +
    Construct the key or data object from the key or data tuple entry.
    +
    +
    unmarshalPrimaryKey(TupleInput) - Method in interface com.sleepycat.bind.tuple.MarshalledTupleKeyEntity
    +
    +
    Completes construction of the entity by setting its primary key from the + stored primary key.
    +
    +
    unwrap(Exception) - Static method in class com.sleepycat.util.ExceptionUnwrapper
    +
    +
    Unwraps an Exception and returns the underlying Exception, or throws an + Error if the underlying Throwable is an Error.
    +
    +
    unwrapAny(Throwable) - Static method in class com.sleepycat.util.ExceptionUnwrapper
    +
    +
    Unwraps an Exception and returns the underlying Throwable.
    +
    +
    update(V) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Replaces the entity at the cursor position with the given entity.
    +
    +
    update(V, WriteOptions) - Method in interface com.sleepycat.persist.EntityCursor
    +
    +
    Replaces the entity at the cursor position with the given entity, + using a WriteOptions parameter and returning an OperationResult.
    +
    +
    updateAddress(String, String, int) - Method in class com.sleepycat.je.rep.util.DbGroupAdmin
    +
    +
    Update the network address for a specified node.
    +
    +
    updateAddress(String, String, int) - Method in class com.sleepycat.je.rep.util.ReplicationGroupAdmin
    +
    +
    Update the network address for a specified member of the replication + group.
    +
    +
    updateMax(long) - Method in class com.sleepycat.je.rep.util.AtomicLongMax
    +
    +
    Updates the max value if the argument is greater than the current max.
    +
    +
    USER_HALT_REQUEST - Static variable in class com.sleepycat.je.PreloadStatus
    +
    +
    The user requested that preload stop during a call to + ProgressListener.progress().
    +
    +
    UtfOps - Class in com.sleepycat.util
    +
    +
    UTF operations with more flexibility than is provided by DataInput and + DataOutput.
    +
    +
    UtfOps() - Constructor for class com.sleepycat.util.UtfOps
    +
     
    +
    + + + +

    V

    +
    +
    value() - Method in class com.sleepycat.persist.EntityResult
    +
    +
    Returns the entity value resulting from the operation.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.CacheMode
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.Durability.ReplicaAckPolicy
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.Durability.SyncPolicy
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.ForeignKeyDeleteAction
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.Get
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.LockMode
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.OperationStatus
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.PreloadConfig.Phases
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.Put
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.RecoveryProgress
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.QuorumPolicy
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.rep.SyncupProgress
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.je.Transaction.State
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.persist.model.DeleteAction
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    valueOf(String) - Static method in enum com.sleepycat.persist.model.Relationship
    +
    +
    Returns the enum constant of this type with the specified name.
    +
    +
    values() - Method in class com.sleepycat.collections.StoredMap
    +
    +
    Returns a collection view of the values contained in this map.
    +
    +
    values() - Static method in enum com.sleepycat.je.CacheMode
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.Durability.ReplicaAckPolicy
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.Durability.SyncPolicy
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.ForeignKeyDeleteAction
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.Get
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.LockMode
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.OperationStatus
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.PreloadConfig.Phases
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.Put
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.RecoveryProgress
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.NodeType
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.QuorumPolicy
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.ReplicatedEnvironment.State
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.rep.SyncupProgress
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.je.Transaction.State
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.persist.model.DeleteAction
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    values() - Static method in enum com.sleepycat.persist.model.Relationship
    +
    +
    Returns an array containing the constants of this enum type, in +the order they are declared.
    +
    +
    verbose - Variable in class com.sleepycat.je.util.DbDump
    +
     
    +
    verify(VerifyConfig) - Method in class com.sleepycat.je.Database
    +
    +
    Verifies the integrity of the database.
    +
    +
    verify(VerifyConfig, PrintStream) - Method in class com.sleepycat.je.Environment
    +
    +
    Returns if the database environment is consistent and correct.
    +
    +
    verify(PrintStream) - Method in class com.sleepycat.je.util.DbVerify
    +
    +
    Deprecated. + +
    +
    +
    verify(long, long) - Method in class com.sleepycat.je.util.DbVerifyLog
    +
    +
    Verifies the given range of log files in the environment.
    +
    +
    VERIFY_BTREE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether the background verifier should perform Btree verification, + as if the DbVerify utility were run.
    +
    +
    VERIFY_BTREE_BATCH_DELAY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The delay between batches during Btree + verification.
    +
    +
    VERIFY_BTREE_BATCH_SIZE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The number of records verified per batch during Btree verification.
    +
    +
    VERIFY_DATA_RECORDS - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether to verify data records (leaf nodes, or LNs) during Btree + verification.
    +
    +
    VERIFY_LOG - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether the background verifier should verify checksums in the log, + as if the DbVerifyLog utility were run.
    +
    +
    VERIFY_LOG_READ_DELAY - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    The delay between reads during log verification.
    +
    +
    VERIFY_SCHEDULE - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    A crontab-format string indicating when to start the background + verifier.
    +
    +
    VERIFY_SECONDARIES - Static variable in class com.sleepycat.je.EnvironmentConfig
    +
    +
    Whether to verify secondary index references during Btree verification.
    +
    +
    verifyAll() - Method in class com.sleepycat.je.util.DbVerifyLog
    +
    +
    Verifies all log files in the environment.
    +
    +
    VerifyConfig - Class in com.sleepycat.je
    +
    +
    Specifies the attributes of a verification operation.
    +
    +
    VerifyConfig() - Constructor for class com.sleepycat.je.VerifyConfig
    +
    +
    An instance created using the default constructor is initialized with + the system's default settings.
    +
    +
    VersionMismatchException - Exception in com.sleepycat.je
    +
    +
    Thrown by the Environment constructor when an environment cannot be + opened because the version of the existing log is not compatible with the + version of JE that is running.
    +
    +
    + + + +

    W

    +
    +
    wrapIfNeeded(Throwable) - Static method in exception com.sleepycat.util.RuntimeExceptionWrapper
    +
    +
    Wraps the given exception if it is not a RuntimeException.
    +
    +
    write(int) - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    write(byte[]) - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    write(byte[], int, int) - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    writeBigDecimal(BigDecimal) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsorted BigDecimal.
    +
    +
    writeBigInteger(BigInteger) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a BigInteger.
    +
    +
    writeBoolean(boolean) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a boolean (one byte) unsigned value to the buffer, writing one + if the value is true and zero if it is false.
    +
    +
    writeByte(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an signed byte (one byte) value to the buffer.
    +
    +
    writeBytes(String) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
    +
    +
    writeBytes(char[]) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified bytes to the buffer, converting each character to + an unsigned byte value.
    +
    +
    writeChar(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a char (two byte) unsigned value to the buffer.
    +
    +
    writeChars(String) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
    +
    +
    writeChars(char[]) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified characters to the buffer, converting each character + to a two byte unsigned value.
    +
    +
    writeClassDescriptor(ObjectStreamClass) - Method in class com.sleepycat.bind.serial.SerialOutput
    +
     
    +
    writeDouble(double) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsorted double (eight byte) value to the buffer.
    +
    +
    writeFast(int) - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Equivalent to write(int) but does not throw + IOException.
    +
    +
    writeFast(byte[]) - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Equivalent to write(byte[]) but does not throw + IOException.
    +
    +
    writeFast(byte[], int, int) - Method in class com.sleepycat.util.FastOutputStream
    +
    +
    Equivalent to write(byte[],int,int) but does not throw + IOException.
    +
    +
    writeFloat(float) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsorted float (four byte) value to the buffer.
    +
    +
    writeInt(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an signed int (four byte) value to the buffer.
    +
    +
    writeInt(byte[], int, int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Writes a packed integer starting at the given buffer offset and returns + the next offset to be written.
    +
    +
    writeLong(long) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an signed long (eight byte) value to the buffer.
    +
    +
    writeLong(byte[], int, long) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Writes a packed long integer starting at the given buffer offset and + returns the next offset to be written.
    +
    +
    WriteOptions - Class in com.sleepycat.je
    +
    +
    Options for calling methods that write (insert, update or delete) records.
    +
    +
    WriteOptions() - Constructor for class com.sleepycat.je.WriteOptions
    +
    +
    Constructs a WriteOptions object with default values for all properties.
    +
    +
    writePackedInt(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsorted packed integer.
    +
    +
    writePackedLong(long) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsorted packed long integer.
    +
    +
    writeShort(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an signed short (two byte) value to the buffer.
    +
    +
    writeSortedBigDecimal(BigDecimal) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a sorted BigDecimal.
    +
    +
    writeSortedDouble(double) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a sorted double (eight byte) value to the buffer.
    +
    +
    writeSortedFloat(float) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a sorted float (four byte) value to the buffer.
    +
    +
    writeSortedInt(byte[], int, int) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Writes a packed sorted integer starting at the given buffer offset and + returns the next offset to be written.
    +
    +
    writeSortedLong(byte[], int, long) - Static method in class com.sleepycat.util.PackedInteger
    +
    +
    Writes a packed sorted long integer starting at the given buffer offset + and returns the next offset to be written.
    +
    +
    writeSortedPackedInt(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a sorted packed integer.
    +
    +
    writeSortedPackedLong(long) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes a sorted packed long integer.
    +
    +
    writeString(String) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified characters to the buffer, converting each character + to UTF format, and adding a null terminator byte.
    +
    +
    writeString(char[]) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes the specified characters to the buffer, converting each character + to UTF format.
    +
    +
    writeTo(OutputStream) - Method in class com.sleepycat.util.FastOutputStream
    +
     
    +
    writeUnsignedByte(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsigned byte (one byte) value to the buffer.
    +
    +
    writeUnsignedInt(long) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsigned int (four byte) value to the buffer.
    +
    +
    writeUnsignedShort(int) - Method in class com.sleepycat.bind.tuple.TupleOutput
    +
    +
    Writes an unsigned short (two byte) value to the buffer.
    +
    +
    + + + +

    X

    +
    +
    XAEnvironment - Class in com.sleepycat.je
    +
    +
    An Environment that implements XAResource.
    +
    +
    XAEnvironment(File, EnvironmentConfig) - Constructor for class com.sleepycat.je.XAEnvironment
    +
    +
    Create a database environment handle.
    +
    +
    XAFailureException - Exception in com.sleepycat.je
    +
    +
    Thrown if an attempt is made to use a Transaction after it has been + invalidated as the result of an XA failure.
    +
    +
    +A B C D E F G H I J K L M N O P Q R S T U V W X 
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/index.html b/docs/java/index.html new file mode 100644 index 0000000..34a2012 --- /dev/null +++ b/docs/java/index.html @@ -0,0 +1,75 @@ + + + + + +Oracle - Berkeley DB Java Edition API + + + + + + + + + +<noscript> +<div>JavaScript is disabled on your browser.</div> +</noscript> +<h2>Frame Alert</h2> +<p>This document is designed to be viewed using the frames feature. If you see this message, you are using a non-frame-capable web client. Link to <a href="overview-summary.html">Non-frame version</a>.</p> + + + diff --git a/docs/java/overview-frame.html b/docs/java/overview-frame.html new file mode 100644 index 0000000..11bb2ec --- /dev/null +++ b/docs/java/overview-frame.html @@ -0,0 +1,39 @@ + + + + + +Overview List (Oracle - Berkeley DB Java Edition API) + + + + + +

    Berkeley DB Java Edition
    version 7.5.11 +

    + + +

     

    + + diff --git a/docs/java/overview-summary.html b/docs/java/overview-summary.html new file mode 100644 index 0000000..c974ad6 --- /dev/null +++ b/docs/java/overview-summary.html @@ -0,0 +1,270 @@ + + + + + +Overview (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Berkeley DB Java Edition Packages 
    PackageDescription
    com.sleepycat.je +
    Foundation for creating environments, databases and transactions; provides +cursor based data access.
    +
    com.sleepycat.je.jca.ra +
    Support for the Java Connector Architecture, which provides a standard +for connecting the J2EE platform to legacy enterprise information +systems (EIS), such as ERP systems, database systems, and legacy +applications not written in Java.
    +
    com.sleepycat.je.jmx +
    Implementations of JMX MBeans for JE.
    +
    com.sleepycat.je.rep +
    +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments.
    +
    com.sleepycat.je.rep.arbiter +
    Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority.
    +
    com.sleepycat.je.rep.monitor +
    BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing.
    +
    com.sleepycat.je.rep.util +
    BDB JE High Availability command line utilities and helper classes.
    +
    com.sleepycat.je.util +
    Supporting utilities.
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    Berkeley DB Direct Persistence Layer (DPL) Packages 
    PackageDescription
    com.sleepycat.persist +
    The Direct Persistence Layer (DPL) adds a persistent object model to the +Berkeley DB transactional engine.
    +
    com.sleepycat.persist.evolve +
    Utilities for managing class evolution of persistent objects.
    +
    com.sleepycat.persist.model +
    Annotations for defining a persistent object model.
    +
    com.sleepycat.persist.raw +
    Raw data access for general purpose tools and manual conversions.
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Berkeley DB Bind and Collections Packages 
    PackageDescription
    com.sleepycat.bind +
    Bindings between database entries and Java objects.
    +
    com.sleepycat.bind.serial +
    Bindings that use Java serialization.
    +
    com.sleepycat.bind.tuple +
    Bindings that use sequences of primitive fields, or tuples.
    +
    com.sleepycat.collections +
    Data access based on the standard Java collections API.
    +
    com.sleepycat.util +
    General utilities used throughout Berkeley DB.
    +
    +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/overview-tree.html b/docs/java/overview-tree.html new file mode 100644 index 0000000..c02ce5f --- /dev/null +++ b/docs/java/overview-tree.html @@ -0,0 +1,720 @@ + + + + + +Class Hierarchy (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + + +
    +

    Class Hierarchy

    + +

    Interface Hierarchy

    + +

    Annotation Type Hierarchy

    +
      +
    • com.sleepycat.persist.model.NotTransient (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.Entity (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.KeyField (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.PrimaryKey (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.SecondaryKey (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.Persistent (implements java.lang.annotation.Annotation)
    • +
    • com.sleepycat.persist.model.NotPersistent (implements java.lang.annotation.Annotation)
    • +
    +

    Enum Hierarchy

    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/package-list b/docs/java/package-list new file mode 100644 index 0000000..4f7bbd7 --- /dev/null +++ b/docs/java/package-list @@ -0,0 +1,17 @@ +com.sleepycat.bind +com.sleepycat.bind.serial +com.sleepycat.bind.tuple +com.sleepycat.collections +com.sleepycat.je +com.sleepycat.je.jca.ra +com.sleepycat.je.jmx +com.sleepycat.je.rep +com.sleepycat.je.rep.arbiter +com.sleepycat.je.rep.monitor +com.sleepycat.je.rep.util +com.sleepycat.je.util +com.sleepycat.persist +com.sleepycat.persist.evolve +com.sleepycat.persist.model +com.sleepycat.persist.raw +com.sleepycat.util diff --git a/docs/java/script.js b/docs/java/script.js new file mode 100644 index 0000000..b346356 --- /dev/null +++ b/docs/java/script.js @@ -0,0 +1,30 @@ +function show(type) +{ + count = 0; + for (var key in methods) { + var row = document.getElementById(key); + if ((methods[key] & type) != 0) { + row.style.display = ''; + row.className = (count++ % 2) ? rowColor : altColor; + } + else + row.style.display = 'none'; + } + updateTabs(type); +} + +function updateTabs(type) +{ + for (var value in tabs) { + var sNode = document.getElementById(tabs[value][0]); + var spanNode = sNode.firstChild; + if (value == type) { + sNode.className = activeTableTab; + spanNode.innerHTML = tabs[value][1]; + } + else { + sNode.className = tableTab; + spanNode.innerHTML = "" + tabs[value][1] + ""; + } + } +} diff --git a/docs/java/serialized-form.html b/docs/java/serialized-form.html new file mode 100644 index 0000000..1e58755 --- /dev/null +++ b/docs/java/serialized-form.html @@ -0,0 +1,1820 @@ + + + + + +Serialized Form (Oracle - Berkeley DB Java Edition API) + + + + + + + + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +
    +

    Serialized Form

    +
    +
    + +
    + +
    + + + + + + + +
    Berkeley DB Java Edition
    version 7.5.11 +
    +
    + + +

    Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.

    + + diff --git a/docs/java/standard-stylesheet.css b/docs/java/standard-stylesheet.css new file mode 100644 index 0000000..98055b2 --- /dev/null +++ b/docs/java/standard-stylesheet.css @@ -0,0 +1,574 @@ +/* Javadoc style sheet */ +/* +Overall document style +*/ + +@import url('resources/fonts/dejavu.css'); + +body { + background-color:#ffffff; + color:#353833; + font-family:'DejaVu Sans', Arial, Helvetica, sans-serif; + font-size:14px; + margin:0; +} +a:link, a:visited { + text-decoration:none; + color:#4A6782; +} +a:hover, a:focus { + text-decoration:none; + color:#bb7a2a; +} +a:active { + text-decoration:none; + color:#4A6782; +} +a[name] { + color:#353833; +} +a[name]:hover { + text-decoration:none; + color:#353833; +} +pre { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; +} +h1 { + font-size:20px; +} +h2 { + font-size:18px; +} +h3 { + font-size:16px; + font-style:italic; +} +h4 { + font-size:13px; +} +h5 { + font-size:12px; +} +h6 { + font-size:11px; +} +ul { + list-style-type:disc; +} +code, tt { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + padding-top:4px; + margin-top:8px; + line-height:1.4em; +} +dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + padding-top:4px; +} +table tr td dt code { + font-family:'DejaVu Sans Mono', monospace; + font-size:14px; + vertical-align:top; + padding-top:4px; +} +sup { + font-size:8px; +} +/* +Document title and Copyright styles +*/ +.clear { + clear:both; + height:0px; + overflow:hidden; +} +.aboutLanguage { + float:right; + padding:0px 21px; + font-size:11px; + z-index:200; + margin-top:-9px; +} +.legalCopy { + margin-left:.5em; +} +.bar a, .bar a:link, .bar a:visited, .bar a:active { + color:#FFFFFF; + text-decoration:none; +} +.bar a:hover, .bar a:focus { + color:#bb7a2a; +} +.tab { + background-color:#0066FF; + color:#ffffff; + padding:8px; + width:5em; + font-weight:bold; +} +/* +Navigation bar styles +*/ +.bar { + background-color:#4D7A97; + color:#FFFFFF; + padding:.8em .5em .4em .8em; + height:auto;/*height:1.8em;*/ + font-size:11px; + margin:0; +} +.topNav { + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.bottomNav { + margin-top:10px; + background-color:#4D7A97; + color:#FFFFFF; + float:left; + padding:0; + width:100%; + clear:right; + height:2.8em; + padding-top:10px; + overflow:hidden; + font-size:12px; +} +.subNav { + background-color:#dee3e9; + float:left; + width:100%; + overflow:hidden; + font-size:12px; +} +.subNav div { + clear:left; + float:left; + padding:0 0 5px 6px; + text-transform:uppercase; +} +ul.navList, ul.subNavList { + float:left; + margin:0 25px 0 0; + padding:0; +} +ul.navList li{ + list-style:none; + float:left; + padding: 5px 6px; + text-transform:uppercase; +} +ul.subNavList li{ + list-style:none; + float:left; +} +.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited { + color:#FFFFFF; + text-decoration:none; + text-transform:uppercase; +} +.topNav a:hover, .bottomNav a:hover { + text-decoration:none; + color:#bb7a2a; + text-transform:uppercase; +} +.navBarCell1Rev { + background-color:#F8981D; + color:#253441; + margin: auto 5px; +} +.skipNav { + position:absolute; + top:auto; + left:-9999px; + overflow:hidden; +} +/* +Page header and footer styles +*/ +.header, .footer { + clear:both; + margin:0 20px; + padding:5px 0 0 0; +} +.indexHeader { + margin:10px; + position:relative; +} +.indexHeader span{ + margin-right:15px; +} +.indexHeader h1 { + font-size:13px; +} +.title { + color:#2c4557; + margin:10px 0; +} +.subTitle { + margin:5px 0 0 0; +} +.header ul { + margin:0 0 15px 0; + padding:0; +} +.footer ul { + margin:20px 0 5px 0; +} +.header ul li, .footer ul li { + list-style:none; + font-size:13px; +} +/* +Heading styles +*/ +div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; +} +ul.blockList ul.blockList ul.blockList li.blockList h3 { + background-color:#dee3e9; + border:1px solid #d0d9e0; + margin:0 0 6px -8px; + padding:7px 5px; +} +ul.blockList ul.blockList li.blockList h3 { + padding:0; + margin:15px 0; +} +ul.blockList li.blockList h2 { + padding:0px 0 20px 0; +} +/* +Page layout container styles +*/ +.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer { + clear:both; + padding:10px 20px; + position:relative; +} +.indexContainer { + margin:10px; + position:relative; + font-size:12px; +} +.indexContainer h2 { + font-size:13px; + padding:0 0 3px 0; +} +.indexContainer ul { + margin:0; + padding:0; +} +.indexContainer ul li { + list-style:none; + padding-top:2px; +} +.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt { + font-size:12px; + font-weight:bold; + margin:10px 0 0 0; + color:#4E4E4E; +} +.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd { + margin:5px 0 10px 0px; + font-size:14px; + font-family:'DejaVu Sans Mono',monospace; +} +.serializedFormContainer dl.nameValue dt { + margin-left:1px; + font-size:1.1em; + display:inline; + font-weight:bold; +} +.serializedFormContainer dl.nameValue dd { + margin:0 0 0 1px; + font-size:1.1em; + display:inline; +} +/* +List styles +*/ +ul.horizontal li { + display:inline; + font-size:0.9em; +} +ul.inheritance { + margin:0; + padding:0; +} +ul.inheritance li { + display:inline; + list-style:none; +} +ul.inheritance li ul.inheritance { + margin-left:15px; + padding-left:15px; + padding-top:1px; +} +ul.blockList, ul.blockListLast { + margin:10px 0 10px 0; + padding:0; +} +ul.blockList li.blockList, ul.blockListLast li.blockList { + list-style:none; + margin-bottom:15px; + line-height:1.4; +} +ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList { + padding:0px 20px 5px 10px; + border:1px solid #ededed; + background-color:#f8f8f8; +} +ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList { + padding:0 0 5px 8px; + background-color:#ffffff; + border:none; +} +ul.blockList ul.blockList ul.blockList ul.blockList li.blockList { + margin-left:0; + padding-left:0; + padding-bottom:15px; + border:none; +} +ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast { + list-style:none; + border-bottom:none; + padding-bottom:0; +} +table tr td dl, table tr td dl dt, table tr td dl dd { + margin-top:0; + margin-bottom:1px; +} +/* +Table styles +*/ +.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary { + width:100%; + border-left:1px solid #EEE; + border-right:1px solid #EEE; + border-bottom:1px solid #EEE; +} +.overviewSummary, .memberSummary { + padding:0px; +} +.overviewSummary caption, .memberSummary caption, .typeSummary caption, +.useSummary caption, .constantsSummary caption, .deprecatedSummary caption { + position:relative; + text-align:left; + background-repeat:no-repeat; + color:#253441; + font-weight:bold; + clear:none; + overflow:hidden; + padding:0px; + padding-top:10px; + padding-left:1px; + margin:0px; + white-space:pre; +} +.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link, +.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link, +.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover, +.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover, +.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active, +.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active, +.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited, +.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited { + color:#FFFFFF; +} +.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span, +.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + padding-bottom:7px; + display:inline-block; + float:left; + background-color:#F8981D; + border: none; + height:16px; +} +.memberSummary caption span.activeTableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#F8981D; + height:16px; +} +.memberSummary caption span.tableTab span { + white-space:nowrap; + padding-top:5px; + padding-left:12px; + padding-right:12px; + margin-right:3px; + display:inline-block; + float:left; + background-color:#4D7A97; + height:16px; +} +.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab { + padding-top:0px; + padding-left:0px; + padding-right:0px; + background-image:none; + float:none; + display:inline; +} +.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd, +.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd { + display:none; + width:5px; + position:relative; + float:left; + background-color:#F8981D; +} +.memberSummary .activeTableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + float:left; + background-color:#F8981D; +} +.memberSummary .tableTab .tabEnd { + display:none; + width:5px; + margin-right:3px; + position:relative; + background-color:#4D7A97; + float:left; + +} +.overviewSummary td, .memberSummary td, .typeSummary td, +.useSummary td, .constantsSummary td, .deprecatedSummary td { + text-align:left; + padding:0px 0px 12px 10px; +} +th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th, +td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{ + vertical-align:top; + padding-right:0px; + padding-top:8px; + padding-bottom:3px; +} +th.colFirst, th.colLast, th.colOne, .constantsSummary th { + background:#dee3e9; + text-align:left; + padding:8px 3px 3px 7px; +} +td.colFirst, th.colFirst { + white-space:nowrap; + font-size:13px; +} +td.colLast, th.colLast { + font-size:13px; +} +td.colOne, th.colOne { + font-size:13px; +} +.overviewSummary td.colFirst, .overviewSummary th.colFirst, +.useSummary td.colFirst, .useSummary th.colFirst, +.overviewSummary td.colOne, .overviewSummary th.colOne, +.memberSummary td.colFirst, .memberSummary th.colFirst, +.memberSummary td.colOne, .memberSummary th.colOne, +.typeSummary td.colFirst{ + width:25%; + vertical-align:top; +} +td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover { + font-weight:bold; +} +.tableSubHeadingColor { + background-color:#EEEEFF; +} +.altColor { + background-color:#FFFFFF; +} +.rowColor { + background-color:#EEEEEF; +} +/* +Content styles +*/ +.description pre { + margin-top:0; +} +.deprecatedContent { + margin:0; + padding:10px 0; +} +.docSummary { + padding:0; +} + +ul.blockList ul.blockList ul.blockList li.blockList h3 { + font-style:normal; +} + +div.block { + font-size:14px; + font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif; +} + +td.colLast div { + padding-top:0px; +} + + +td.colLast a { + padding-bottom:3px; +} +/* +Formatting effect styles +*/ +.sourceLineNo { + color:green; + padding:0 30px 0 0; +} +h1.hidden { + visibility:hidden; + overflow:hidden; + font-size:10px; +} +.block { + display:block; + margin:3px 10px 2px 0px; + color:#474747; +} +.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink, +.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel, +.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink { + font-weight:bold; +} +.deprecationComment, .emphasizedPhrase, .interfaceName { + font-style:italic; +} + +div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase, +div.block div.block span.interfaceName { + font-style:normal; +} + +div.contentContainer ul.blockList li.blockList h2{ + padding-bottom:0px; +} diff --git a/docs/java/style.css b/docs/java/style.css new file mode 100644 index 0000000..9dc28c5 --- /dev/null +++ b/docs/java/style.css @@ -0,0 +1,15 @@ +/* Javadoc style sheet for Java 7 and later versions */ + +/* Import standard style sheet for defaults */ +@import url('standard-stylesheet.css'); + +/* + * Modify the style for code samples to use a pale blue background with + * a thin, medium blue border that matches the style of various + * headings. + */ +pre.code { + border: 1px solid #9eadc0; /* Medium blue */ + padding: 2px; + background-color: #dee3e9; /* Pale blue */ +} diff --git a/docs/jconsole/Choose-MBean.JPG b/docs/jconsole/Choose-MBean.JPG new file mode 100644 index 0000000..1540394 Binary files /dev/null and b/docs/jconsole/Choose-MBean.JPG differ diff --git a/docs/jconsole/Choose-group.JPG b/docs/jconsole/Choose-group.JPG new file mode 100644 index 0000000..cfd100e Binary files /dev/null and b/docs/jconsole/Choose-group.JPG differ diff --git a/docs/jconsole/Display-cumulative-stats.JPG b/docs/jconsole/Display-cumulative-stats.JPG new file mode 100644 index 0000000..2dbba6c Binary files /dev/null and b/docs/jconsole/Display-cumulative-stats.JPG differ diff --git a/docs/jconsole/Graph-stat.JPG b/docs/jconsole/Graph-stat.JPG new file mode 100644 index 0000000..91f6125 Binary files /dev/null and b/docs/jconsole/Graph-stat.JPG differ diff --git a/docs/jconsole/Interval-setting.JPG b/docs/jconsole/Interval-setting.JPG new file mode 100644 index 0000000..028b89a Binary files /dev/null and b/docs/jconsole/Interval-setting.JPG differ diff --git a/docs/jconsole/JConsole-plugin.html b/docs/jconsole/JConsole-plugin.html new file mode 100644 index 0000000..cb0b009 --- /dev/null +++ b/docs/jconsole/JConsole-plugin.html @@ -0,0 +1,407 @@ + + + + + Berkeley DB Java Edition MBeans and JConsole Plugin + + + +
    + +
    +

    Monitoring and Debugging Berkeley DB Java Edition with JMX

    +
    + +Monitoring and Diagnostic MBeans
    +Displaying Statistics Graphically With the JE + JConsole Plugin
    + + +
    +

    Monitoring and Diagnostic MBeans

    + +

    Overview

    +

    +Berkeley DB Java Edition provides monitoring and debugging support +through four JMX Dynamic MBeans. JEMonitor and RepJEMonitor make JE +statistics and basic administrative operations available, and are +mainly used for monitoring a JE application. JEDiagnostics and +RepJEDiagnostics makes JE logging output configurable dynamically, and +are mainly used for debugging. +

    +

    +A non-replicated (non-HA) JE Environment can only be accessed via +JEMonitor and JEDiagnostics, while a replicated JE Environment can +only be accessed via RepJEMonitor and RepJEDiagnostics. +

    +

    +The functionality provided by the JE MBeans can be accessed through +the standard MBean attribute/operation interface available through +JConsole or another management console. In addition, environment +statistics can be viewed and exported through the +JE JConsole Plugin + + +

    Enabling MBeans in your JE Application

    +

    To register and enable the MBeans for a JE application, set the +JEMonitor system property to true. For example:

    + + +java -DJEMonitor=true -cp <je.jar> <JE application> + + +

    +Setting -DJEMonitor=true will register both the monitoring +and diagnostic MBeans for the application. For example, a non-HA +environment will register both JEMonitor and JEDiagnostics, while a +a replicated environment will register both RepJEMonitor and RepJEDiagnostics. +

    +

    Attributes and Operations available through JEMonitor and RepJEMonitor

    +

    +JEMonitor monitors a non-replicated JE application. RepJEMonitor +monitors a replicated JE application and provides all the attributes +and operations of JEMonitor, along with additional operations only +applicable for replicated environments. +

    +

    Monitoring Attributes

    +

    +JEMonitor and RepJEMonitor have the following attributes list: +
    +
    + + + + +
    Monitor attributes
    +
    +
    +

    +Attributes names and values are listed in the area outlined in +red. Most of the attributes are +immutable and cannot be changed through JEMonitor, with the exception +of cachePercent and cacheSize. Detailed +information about the attributes can be obtained by clicking on the +attribute name in the list outlined in +blue. +

    +

    Monitoring Operations

    +

    +JEMonitor provides the following operations, which can be invoked on +the monitored, running JE application: +
    +
    + + + + +
    Monitor operations
    +
    +
    +

    +These operations mimic functionality available through the +com.sleepycat.je.Environment class. getEnvConfig +and getEnvironmentStats are of particular value for +obtaining information about the environment configuration, and current +statistics. More information about each operation is available through +a tool tip that displays when the mouse hovers over the operation button. +

    +As stated above, RepJEMonitor provides two additional operations to monitor a +replicated JE application: +
    +
    + + + + +
    Monitor operations
    +
    +
    +

    +The additional operations are outlined +in red. +getReplicationStats displays replication specific +statistics, while dumpReplicationState displays +information about the replication group composition, current node +state, etc. + +

    JEDiagnostics and RepJEDiagnostics

    +

    +JEDiagnostics and RepJEDiagnostics, currently have the same attributes +and operations list. +

    + +

    Diagnostic Attributes

    +

    +JEDiagnostics and RepJEDiagnostics attribute are: +
    +
    + + + + +
    Diagnostic attributes
    +
    +
    +

    +Attributes names and values are listed in the area outlined in +red. These attributes manage the +output levels for ConsoleHandler, FileHandler and MemoryHandler and +let you change logging output for a running JE application. This is +useful when doing detailed debugging, as described +in Chapter 12 of +the Getting Started Guide. Detailed information for each +attribute is listed in the +blue area and can be displayed by +clicking on the attribute. +

    +

    Diagnostic Operations

    +

    +JEDiagnostics and RepJEDiagnostics currently support the same operations: +
    +
    + + + + +
    Diagnostic operations
    +
    +
    +

    +

    +resetLoggerLevel allows you reset the level for a JE +logger, while +pushMemoryHandler lets you flush any logging output which +has been buffered in memory. Both are used only in debugging situations. +

    +
    +
    + + + +
    +

    Berkeley DB Java Edition JConsole Plugin

    + + +

    Overview

    +

    +The BDB JE JConsole plugins let you monitor and graphically display +information from running JE applications using +the jconsole utility which is distributed with the +JDK. Two plugins jars are provided: one for monitoring non-HA JE +applications (JE_HOME/lib/JEJConsole.jar), and another +for monitoring JE HA applications +(JE_HOME/lib/RepJEJConsole.jar). The former +lets jconsole monitor and display +EnvironmentStats while the latter shows both +EnvironmentStats +and ReplicatedEnvironmentStats. +

    +

    +The plugins can: +

      +
    • Display stats from a running JE application, +
    • Optionally log those stats into a log file in csv format, +
    • Graph those stats so that you can directly see the changes, +
    +

    +

    +The plugins are based on the JE MBeans described above and use the +MBean operations to periodically obtain statistics which are displayed +in a table or graph. +JEJConsole invokes +JEMonitor.getEnvironmentStats +while RepJEJConsole invokes +both RepJEMonitor.getEnvironmentStats and +RepJEMonitor.getReplicationStats. +Note that JEJConsole can be used to monitor both JE +non-replicated and replicated applications, but in the latter case +will not display the "JE Replicated Statistics" tab shown in the +second screen shot below. +

    +

    +See the javadoc +for EnvironmentStats +and ReplicatedEnvironmentStats +for more information about the meaning of the statistics. +

    +

    +
    +A screenshot of the JEJConsole plugin: +
    +
    + + + + +
    JEStats
    +
    +
    +The RepJEJConsole plugin: +
    + + + + +
    RepJEStats
    +

    +

    Using The Plugins

    +

    +jconsole can only monitor applications that have +registered a DynamicMBean. +Both JE and JE HA will automatically register an appropriate +DynamicMBean when an Environment or +ReplicatedEnvironment is created, if the JEMonitor +system property is set to true (e.g. using -DJEMonitor=true +on the command line). +

    +To use the JE and JE Replication plugins, invoke jconsole +with the -pluginpath option to specify one of the libraries. +For example: +

    +    jconsole -pluginpath JE_HOME/lib/JEJConsole.jar

    +or +

    +    jconsole -pluginpath JE_HOME/lib/RepJEJConsole.jar

    +

    +

    +When the plugin starts up, a menu will appear which lets you choose +the process to monitor. Your JE application should appear if you have +set -DJEMonitor=true. +

    +Note: There is a known problem with discovering Java +processes on Windows platforms when the temporary directory is on a +FAT type file system. In that case, a Java application may need to +set -XX:+PerfBypassFileSystemCheck on the Java command line in order +for the process to appear on the connection menu. +

    After connecting to the process, a "JE Statistics" tab will +be shown in jconsole. The tab will be named "JE +Replicated Statistics" when using the +RepJEJConsole.jar plugin. The tab provides various +options: +

    +
      +
    • +Choose JE MBean +

      +A JE application may have more than one Environment, and +therefore multiple DynamicMBeans. The plugin lets you +select which Environment you want to look at with +the "Choose JE MBean" box: +

      +

      Choose MBean
      +

      +
    • +
    • +Set Collection Interval +

      +The default interval for collecting environment stats is 10 seconds. +You can change this by entering a new value in the +"Collection interval (secs):" field and then pressing the +Enter key: +

      +

      Graph
      +

      +
    • +
    • +Display cumulative stats +

      +By default, statistics are reset after each collection period, and the +value displayed pertains only to the collection interval. For example, +if the collection interval is 10 seconds, the plugin will display +values for the first 10 seconds, the second 10 seconds, etc. You may +choose instead to display statistics in a cumulative way, so that the +displayed values accumulate as the application runs, instead of +resetting in each interval. To do so, click the +"Display cumulative stats" checkbox: +

      +

      Clear stats
      +

      +
    • +
    • +Limit the Display to Non-Zero values +

      +JE provides numerous stats. Depending on your application, some of +them may be 0 and therefore irrelevant for analyzing performance. You +can hide these stats by clicking the "Hide zero values" checkbox: +

      +

      Non-zero
      +

      +
    • +
    • +Choose a File for Logging Stats +

      +You may specify the file to write selected stats to with +the "Record Statistics To..." button. Currently, only +CSV format is supported: +

      +

      save log
      +

      +
    • +
    • +Start Recording +

      +You can begin recording stats to the selected file by pressing +the "Start Recording" button. While recording is enabled, +you can not change the recording interval, log file, or the specific +stats being logged:

      +

      start recording
      +

      +
    • +
    • +Stop Recording +

      +You can stop recording stats by pressing the "Stop +Recording" button. You can only change the recording interval, +log file, or the specific stats being logged when recording is +stopped: +

      +

      stop recording
      +

      +
    • +
    • +Choose Stats Group to Display +

      +JE Environment stats are divided into several groups. You can specify +which groups to display by checking the appropriate groups: +

      +

      choose group
      +

      +
    • +
    • +Stop Logging a Stat +

      +All stats are logged by default. If you don't want to log a particular +stat, you can right click on that stat, and uncheck "Log This +Stat": +

      +

      unlog
      +

      +
    • +
    • +Graph a Stat +

      +You may graph a particular stat by right clicking on the stat and +selecting "Graph This Stat". For example if you right-click +on nMarkLNsProcessed and select Graph This Stat... +

      +

      graph


      +... then a new window with the dynamic graph will be displayed: +

      +
      show graph
      +

      +
    • +
    • +Show tips +

      +Each stat has a mouse-over which describes its meaning. For example: +

      +

      show tips
      +

      +
    • +
    +

    +Please report bugs to the Berkeley DB Java Edition OTN forum. +

    +
    diff --git a/docs/jconsole/JEDiagnostics-attributes.JPG b/docs/jconsole/JEDiagnostics-attributes.JPG new file mode 100644 index 0000000..bf24e39 Binary files /dev/null and b/docs/jconsole/JEDiagnostics-attributes.JPG differ diff --git a/docs/jconsole/JEDiagnostics-operations.JPG b/docs/jconsole/JEDiagnostics-operations.JPG new file mode 100644 index 0000000..17b0a7a Binary files /dev/null and b/docs/jconsole/JEDiagnostics-operations.JPG differ diff --git a/docs/jconsole/JEMonitor-attributes.JPG b/docs/jconsole/JEMonitor-attributes.JPG new file mode 100644 index 0000000..5edde41 Binary files /dev/null and b/docs/jconsole/JEMonitor-attributes.JPG differ diff --git a/docs/jconsole/JEMonitor-operations.JPG b/docs/jconsole/JEMonitor-operations.JPG new file mode 100644 index 0000000..5e60322 Binary files /dev/null and b/docs/jconsole/JEMonitor-operations.JPG differ diff --git a/docs/jconsole/JEStats-plugin.JPG b/docs/jconsole/JEStats-plugin.JPG new file mode 100644 index 0000000..200ef07 Binary files /dev/null and b/docs/jconsole/JEStats-plugin.JPG differ diff --git a/docs/jconsole/MBean-operations.JPG b/docs/jconsole/MBean-operations.JPG new file mode 100644 index 0000000..2c5be7e Binary files /dev/null and b/docs/jconsole/MBean-operations.JPG differ diff --git a/docs/jconsole/Mutable-configs.JPG b/docs/jconsole/Mutable-configs.JPG new file mode 100644 index 0000000..49fb016 Binary files /dev/null and b/docs/jconsole/Mutable-configs.JPG differ diff --git a/docs/jconsole/Non-zero.JPG b/docs/jconsole/Non-zero.JPG new file mode 100644 index 0000000..bb87b44 Binary files /dev/null and b/docs/jconsole/Non-zero.JPG differ diff --git a/docs/jconsole/RepJEMonitor-operations.JPG b/docs/jconsole/RepJEMonitor-operations.JPG new file mode 100644 index 0000000..ebc67fc Binary files /dev/null and b/docs/jconsole/RepJEMonitor-operations.JPG differ diff --git a/docs/jconsole/RepJEStats-plugin.JPG b/docs/jconsole/RepJEStats-plugin.JPG new file mode 100644 index 0000000..c8db590 Binary files /dev/null and b/docs/jconsole/RepJEStats-plugin.JPG differ diff --git a/docs/jconsole/Save-log.JPG b/docs/jconsole/Save-log.JPG new file mode 100644 index 0000000..c7c6d67 Binary files /dev/null and b/docs/jconsole/Save-log.JPG differ diff --git a/docs/jconsole/Show-graph.JPG b/docs/jconsole/Show-graph.JPG new file mode 100644 index 0000000..1b5f553 Binary files /dev/null and b/docs/jconsole/Show-graph.JPG differ diff --git a/docs/jconsole/Show-tips.JPG b/docs/jconsole/Show-tips.JPG new file mode 100644 index 0000000..e10549a Binary files /dev/null and b/docs/jconsole/Show-tips.JPG differ diff --git a/docs/jconsole/Start-record.JPG b/docs/jconsole/Start-record.JPG new file mode 100644 index 0000000..917cecd Binary files /dev/null and b/docs/jconsole/Start-record.JPG differ diff --git a/docs/jconsole/Stop-record.JPG b/docs/jconsole/Stop-record.JPG new file mode 100644 index 0000000..7939f74 Binary files /dev/null and b/docs/jconsole/Stop-record.JPG differ diff --git a/docs/jconsole/Unlog-stat.JPG b/docs/jconsole/Unlog-stat.JPG new file mode 100644 index 0000000..766b071 Binary files /dev/null and b/docs/jconsole/Unlog-stat.JPG differ diff --git a/docs/relnotes.html b/docs/relnotes.html new file mode 100644 index 0000000..2f62609 --- /dev/null +++ b/docs/relnotes.html @@ -0,0 +1,80 @@ + + + + + Berkeley DB Java Edition Release Notes + + + + +
    +
    +

    Oracle Berkeley DB Java Edition 12c R2
    +Release Notes

    +
    + + +

    Release 7.5.11, 2017-10-31 09:36:36 UTC

    + +

    Overview

    +

    +Berkeley DB Java Edition (JE) is a 100% pure Java embedded, transactional data +store. It supplies a key/object data model with indexing, full transactional +ACID support, POJO APIs and High Availability/Replication. +

    +As of version 7.3, JE is licensed under the Apache 2.0 license. See the LICENSE +file for the complete license. +

    +Note that in JE 7.0 and later, the Java compatibility requirement for JE is +Java SE 8. +

    +JE is compatible with Java SE 8 (64-bit) and later, and has been tested and +certified against Oracle JDK 1.8.0_121 and IBM J9 7.0. We encourage you to +upgrade to the latest Java releases to take advantage of the latest bug fixes +and performance improvements. +

    +Be sure to see the change log for information +about upgrading from earlier releases. +

    +This release contains a number of changes, including: +

      +
    1. + MAX_DISK and FREE_DISK configuration parameters have been added for + limiting disk usage, and read operations are now allowed when these + thresholds are exceeded. Previously no thresholds were used, and the + Environment was invalidated and closed when the volume was filled. + Allowing read operations now provides partial availability even when + writes are no longer possible. New statistics for disk usage + monitoring have also been added. [#25220] +
      +
      + NOTE: Behavior has changed regarding the retention of data + files reserved for replication, and specifying the new + MAX_DISK parameter is strongly recommended for HA + applications. Use of the new disk monitoring statistics, + rather than monitoring of raw disk utilization, is especially + important for HA applications due of the presence of reserved + data files. +
      +
      +
    2. +
    3. + The data verifier has been enhanced to perform Btree verification in + order to detect internal corruption, included secondary index + corruption. Btree verification is performed by the background data + verifier, the DbVerify utility, the DbVerify.verify method, the + Database.verify method and the Environment.verify method. [#25960] +

      +

    4. +
    5. + Several important bug fixes are included. +
    6. +
    +For the complete and detailed list of changes, see +the change log page.
    +

    +Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved. +
    + + + diff --git a/docs/sleepycat-idoc.css b/docs/sleepycat-idoc.css new file mode 100644 index 0000000..bef30e0 --- /dev/null +++ b/docs/sleepycat-idoc.css @@ -0,0 +1,56 @@ +body { width: 600px; +width: 40em; + background-color: white; + margin-left: 30pt; + font-family: Arial, Helvetica, sans-serif; + } + +div.docMain { width: 600px; + position: absolute; + left: 30pt; + top: 120pt;} + +div.contact { width: 40em; + position: absolute; + left: 100pt; + top: 120pt; + font-size: 80%; + } + +div.contact a { padding-right: 2em; + + } +div.releaseDoc { width: 40em; + position: absolute; + left: 30pt; + top: 180pt;} + +div.releaseDoc a { padding-right: 2em; } + +div.docColA { width: 20em; + position: absolute; + left: 30pt; + top: 250pt; + } + +div.docColB { width: 20em; + position: absolute; + left: 25em; + top: 250pt; + } + +h1 { font-family: Verdana: serif; + font-size: 110%; + margin-top: 1em; + } + +div.contact h1 { font-size: 140%; } + +h2 { font-family: Verdana: serif; + font-size: 130%; + margin-top: 1em; + } +p.releaseInfo { font-style: italic; + font-weight: bold; + } + diff --git a/docs/sleepycat/legal.html b/docs/sleepycat/legal.html new file mode 100644 index 0000000..226d06b --- /dev/null +++ b/docs/sleepycat/legal.html @@ -0,0 +1,41 @@ + + + + +Berkeley DB: Oracle Legal Notices + + + + +

    +Oracle +

    +

    Oracle Legal Notices

    +

    Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved.

    +

    This product and publication is protected by copyright and distributed +under licenses restricting its use, copying and distribution. Permission +to use this publication or portions of this publication is granted by +Oracle provided that the above copyright notice appears in all copies +and that use of such publications is for non-commercial use only and no +modifications of the publication is made.

    +

    RESTRICTED RIGHTS: Use, duplication, or disclosure by the U.S. Government +is subject to restrictions of FAR 52.227-14(g)(2)(6/87) and FAR +52.227-19(6/87), or DFAR 252.227-7015(b)(6/95) and DFAR 227.7202-3(a).

    +

    Sleepycat and the names of Oracle products referenced herein +are trademarks, registered trademarks or service marks of Oracle.

    +

    All other brand, company and product names referenced in this publication +may be trademarks, registered trademarks or service marks of their +respective holders and are used here for informational purposes only.

    +

    THIS PRODUCT IS PROVIDED BY ORACLE "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL ORACLE BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.

    + + diff --git a/docs/sleepycat/license_je.html b/docs/sleepycat/license_je.html new file mode 100644 index 0000000..ba70884 --- /dev/null +++ b/docs/sleepycat/license_je.html @@ -0,0 +1,24 @@ + + + + +Berkeley DB Java Edition Product License + + + + +

    Berkeley DB Java Edition Product License

    +

    +The license that applies to this copy of the Berkeley DB Java Edition +software may be found in the "LICENSE" file included in each Berkeley +DB Java Edition distribution.

    +

    For a license to use the Berkeley DB Java Edition software under +conditions other than those described in the "LICENSE" file, or to +purchase support for this software, please send email to +berkeleydb-info_us@oracle.com.

    + +

    Copyright (c) 1996,2007 Oracle and/or its affiliates. All rights reserved. + + diff --git a/docs/traceLogging.html b/docs/traceLogging.html new file mode 100644 index 0000000..1788376 --- /dev/null +++ b/docs/traceLogging.html @@ -0,0 +1,109 @@ + + + + + Berkeley DB Java Edition Trace Logging + + + +

    +Oracle +

    + +

    Trace logging in Berkeley DB Java Edition

    +JE uses the java.util.logging +package to log operations and trace +messages. This information can be particularly useful when running a +replicated JE High Availability application in order to monitor your +application's activities. +

    +The default output lists replication node start up and shutdown +operations. Initial configuration problems should show up +during the startup operation. You are strongly advised to run your +production application with this level of logging. +

    +Note that an application java.util.logging.Handler may be specified for +each environment through EnvironmentConfig.setLoggingHandler(); +

    +See the Logging +section in + Chapter 12, Administering Berkeley DB Java Edition Applications + in the Getting Started Guide for +details on logging configuration and locations. +

    Example of logging output

    + +The first column is the date in <year>-<month>-<day> +format. Times are given in the UTC timezone.

    +The default information when a master node (Node 1) comes up: +
    +2009-09-17 17:36:22:421 UTC INFO [Node 1]  Started ServiceDispatcher. HostPort=localhost:5500
    +2009-09-17 17:36:22:521 UTC INFO [Node 1]  Refreshed 0 monitors.
    +2009-09-17 17:36:22:529 UTC INFO [Node 1]  Current group size: 0
    +2009-09-17 17:36:22:530 UTC INFO [Node 1]  New node Node 1(-1) unknown to rep group
    +2009-09-17 17:36:22:537 UTC INFO [Node 1]  Nascent group. Node 1 is master by virtue of being the first node.
    +2009-09-17 17:36:22:571 UTC INFO [Node 1]  Successfully added node: Node 1(1) HostPort=localhost:5500 [ELECTABLE]
    +2009-09-17 17:36:22:572 UTC INFO [Node 1]  Refreshed 0 monitors.
    +2009-09-17 17:36:22:580 UTC INFO [Node 1]  Node Node 1 started
    +2009-09-17 17:36:22:593 UTC INFO [Node 1]  Feeder manager accepting requests.
    +
    + +The master's perspective when a replica (Node 2) wants to be added, then subsequently joins and syncs up. +
    +2009-09-17 17:36:22:676 UTC INFO [Node 1]  Feeder accepted connection from java.nio.channels.SocketChannel[connected local=/127.0.0.1:5500 remote=/127.0.0.1:59415]
    +2009-09-17 17:36:22:710 UTC INFO [Node 1]  Feeder-replica handshake start
    +2009-09-17 17:36:22:729 UTC INFO [Node 1]  Successfully added node: Node 2(2) HostPort=localhost:5501 [ELECTABLE]
    +2009-09-17 17:36:22:730 UTC INFO [Node 1]  Refreshed 0 monitors.
    +2009-09-17 17:36:22:732 UTC INFO [Node 1]  Feeder-replica Node 2 handshake completed.
    +2009-09-17 17:36:22:735 UTC INFO [Node 1]  Feeder-replica Node 2 syncup started. Feeder range: first=1 last=14 sync=14 txnEnd=14
    +2009-09-17 17:36:22:747 UTC INFO [Node 1]  Feeder-replica Node 2 start stream at VLSN: 1
    +2009-09-17 17:36:22:748 UTC INFO [Node 1]  Feeder-replica Node 2 syncup ended. Elapsed time: 13ms
    +2009-09-17 17:36:22:750 UTC INFO [Node 1]  Feeder output thread for replica Node 2 started at VLSN: 1 Master at: 14 VLSN delta: 13 Socket: (Node 2(2))java.nio.channels.SocketChannel[connected local=/127.0.0.1:5500 remote=/127.0.0.1:59415]
    +
    +A replica (Node 5) comes up. We can tell who the master (Node 1) is: +
    +2009-09-17 17:36:23:179 UTC INFO [Node 5]  Started ServiceDispatcher. HostPort=localhost:5504
    +2009-09-17 17:36:23:181 UTC INFO [Node 5]  Refreshed 0 monitors.
    +2009-09-17 17:36:23:182 UTC INFO [Node 5]  Current group size: 0
    +2009-09-17 17:36:23:182 UTC INFO [Node 5]  New node Node 5(-1) unknown to rep group
    +2009-09-17 17:36:23:186 UTC INFO [Node 5]  New node Node 5 located master: Node 1(1)
    +2009-09-17 17:36:23:188 UTC INFO [Node 5]  Node Node 5 started
    +2009-09-17 17:36:23:188 UTC INFO [Node 5]  Replica loop started with master: Node 1(1)
    +2009-09-17 17:36:23:192 UTC INFO [Node 5]  Replica-feeder handshake start
    +2009-09-17 17:36:23:274 UTC INFO [Node 5]  Replica-feeder Node 1 handshake completed.
    +2009-09-17 17:36:23:274 UTC INFO [Node 5]  Replica-feeder Node 1 syncup started. Replica range: first=-1 last=-1 sync=-1 txnEnd=-1
    +2009-09-17 17:36:23:275 UTC INFO [Node 5]  Replica-feeder Node 1 start stream at VLSN: 1
    +2009-09-17 17:36:23:276 UTC INFO [Node 5]  Replica-feeder Node 1 syncup ended. Elapsed time: 2ms
    +2009-09-17 17:36:23:280 UTC INFO [Node 5]  Replica initialization completed. Replica VLSN: -1  Heartbeat master commit VLSN: 29 VLSN delta: 30
    +
    +A master node (node 1) is shutting down and is closing connections to its replicas. (Nodes 2, 3, 4, 5) +
    +2009-09-17 17:36:39:502 UTC INFO [Node 1]  Shutting down node Node 1(1)
    +2009-09-17 17:36:39:502 UTC INFO [Node 1]  Elections shutdown initiated
    +2009-09-17 17:36:39:504 UTC INFO [Node 1]  Elections shutdown completed
    +2009-09-17 17:36:39:504 UTC INFO [Node 1]  Feeder manager soft shutdown.
    +2009-09-17 17:36:39:505 UTC INFO [Node 1]  Shutting down feeder for replica Node 4 write time:  214ms Avg write time: 19us
    +2009-09-17 17:36:40:502 UTC INFO [Node 1]  Feeder output for replica Node 4 shutdown. feeder VLSN: 11,126 currentCommitVLSN: 11,025
    +2009-09-17 17:36:40:503 UTC INFO [Node 1]  Thread[Feeder Output for Node 4,5,] has exited.
    +2009-09-17 17:36:40:503 UTC INFO [Node 1]  Thread[Feeder Input for Node 4,5,] has exited.
    +2009-09-17 17:36:40:504 UTC INFO [Node 1]  Shutting down feeder for replica Node 2 write time:  212ms Avg write time: 19us
    +2009-09-17 17:36:41:503 UTC INFO [Node 1]  Feeder output for replica Node 2 shutdown. feeder VLSN: 11,126 currentCommitVLSN: 11,025
    +2009-09-17 17:36:41:504 UTC INFO [Node 1]  Thread[Feeder Output for Node 2,5,] has exited.
    +2009-09-17 17:36:41:504 UTC INFO [Node 1]  Thread[Feeder Input for Node 2,5,] has exited.
    +2009-09-17 17:36:41:504 UTC INFO [Node 1]  Shutting down feeder for replica Node 3 write time:  230ms Avg write time: 20us
    +2009-09-17 17:36:42:504 UTC INFO [Node 1]  Feeder output for replica Node 3 shutdown. feeder VLSN: 11,126 currentCommitVLSN: 11,025
    +2009-09-17 17:36:42:504 UTC INFO [Node 1]  Thread[Feeder Output for Node 3,5,] has exited.
    +2009-09-17 17:36:42:505 UTC INFO [Node 1]  Thread[Feeder Input for Node 3,5,] has exited.
    +2009-09-17 17:36:42:505 UTC INFO [Node 1]  Shutting down feeder for replica Node 5 write time:  219ms Avg write time: 19us
    +2009-09-17 17:36:43:502 UTC INFO [Node 1]  Feeder output for replica Node 5 shutdown. feeder VLSN: 11,126 currentCommitVLSN: 11,025
    +2009-09-17 17:36:43:503 UTC INFO [Node 1]  Thread[Feeder Output for Node 5,5,] has exited.
    +2009-09-17 17:36:43:503 UTC INFO [Node 1]  Thread[Feeder Input for Node 5,5,] has exited.
    +2009-09-17 17:36:43:503 UTC INFO [Node 1]  Feeder manager exited. CurrentCommit VLSN: 11,025
    +2009-09-17 17:36:43:503 UTC INFO [Node 1]  RepNode main thread shutting down. 
    +2009-09-17 17:36:43:504 UTC INFO [Node 1]  RepNode main thread: DETACHED Node 1(1) exited.
    +2009-09-17 17:36:43:504 UTC INFO [Node 1]  ServiceDispatcher shutdown starting. HostPort=localhost:5500 Registered services: []
    +2009-09-17 17:36:43:505 UTC INFO [Node 1]  ServiceDispatcher shutdown completed. HostPort=localhost:5500
    +2009-09-17 17:36:43:505 UTC INFO [Node 1]  Node 1(1) shutdown completed.
    +
    + + diff --git a/examples/collections/access/AccessExample.java b/examples/collections/access/AccessExample.java new file mode 100644 index 0000000..130c118 --- /dev/null +++ b/examples/collections/access/AccessExample.java @@ -0,0 +1,276 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.access; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * AccesssExample mirrors the functionality of a class by the same name used to + * demonstrate the com.sleepycat.je Java API. This version makes use of the new + * com.sleepycat.collections.* collections style classes to make life easier. + * + * @author Gregory Burd + */ +public class AccessExample + implements Runnable { + + private static boolean create = true; + private static final int EXIT_FAILURE = 1; + + public static void usage() { + + System.out.println("usage: java " + AccessExample.class.getName() + + " [-r] [database]\n"); + System.exit(EXIT_FAILURE); + } + + public static void main(String[] argv) { + + boolean removeExistingDatabase = false; + String databaseName = "access.db"; + + for (int i = 0; i < argv.length; i++) { + if (argv[i].equals("-r")) { + removeExistingDatabase = true; + } else if (argv[i].equals("-?")) { + usage(); + } else if (argv[i].startsWith("-")) { + usage(); + } else { + if ((argv.length - i) != 1) + usage(); + databaseName = argv[i]; + break; + } + } + + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + if (create) { + envConfig.setAllowCreate(true); + } + Environment env = new Environment(new File("."), envConfig); + if (removeExistingDatabase) { + env.removeDatabase(null, databaseName); + } + + // create the app and run it + AccessExample app = new AccessExample(env, databaseName); + app.run(); + app.close(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(1); + } catch (FileNotFoundException e) { + e.printStackTrace(); + System.exit(1); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + System.exit(0); + } + + private Database db; + private SortedMap map; + private Environment env; + + /** + * Constructor for the AccessExample object + */ + public AccessExample(Environment env, String databaseName) + throws Exception { + + this.env = env; + + /* + * Lets mimic the db.AccessExample 100% and use plain old byte arrays + * to store the key and data strings. + */ + ByteArrayBinding keyBinding = new ByteArrayBinding(); + ByteArrayBinding dataBinding = new ByteArrayBinding(); + + /* Open a data store. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + if (create) { + dbConfig.setAllowCreate(true); + } + this.db = env.openDatabase(null, databaseName, dbConfig); + + /* + * Now create a collection style map view of the data store so that it + * is easy to work with the data in the database. + */ + this.map = new StoredSortedMap + (db, keyBinding, dataBinding, true); + } + + /** + * Close the database and environment. + */ + void close() + throws DatabaseException { + + db.close(); + env.close(); + } + + /** + * Main processing method for the AccessExample object + */ + public void run() { + + /* + * Insert records into a Stored Sorted Map DatabaseImpl, where the key + * is the user input and the data is the user input in reverse order. + */ + final InputStreamReader reader = new InputStreamReader(System.in); + + for (; ; ) { + final String line = askForLine(reader, System.out, "input> "); + if (line == null) { + break; + } + + final String reversed = + (new StringBuilder(line)).reverse().toString(); + + log("adding: \"" + + line + "\" : \"" + + reversed + "\""); + + /* Do the work to add the key/data to the HashMap here. */ + TransactionRunner tr = new TransactionRunner(env); + try { + tr.run(new TransactionWorker() { + public void doWork() { + try { + if (!map.containsKey(line.getBytes("UTF-8"))) + map.put(line.getBytes("UTF-8"), + reversed.getBytes("UTF-8")); + else + System.out.println("Key " + line + + " already exists."); + } catch (Exception e) { + System.err.println("doWork: " + e); + } + } + }); + } catch (com.sleepycat.je.DatabaseException e) { + System.err.println("AccessExample: " + e); + System.exit(1); + } catch (java.lang.Exception e) { + System.err.println("AccessExample: " + e); + System.exit(1); + } + } + System.out.println(""); + + /* + * Do the work to traverse and print the HashMap key/data pairs here + * get iterator over map entries. + */ + Iterator> iter = map.entrySet().iterator(); + System.out.println("Reading data"); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + log("found \"" + + new String(entry.getKey()) + + "\" key with data \"" + + new String(entry.getValue()) + "\""); + } + } + + /** + * Prompts for a line, and keeps prompting until a non blank line is + * returned. Returns null on error. + * + * @param reader stream from which to read user input + * @param out stream on which to prompt for user input + * @param prompt prompt to use to solicit input + * @return the string supplied by the user + */ + String askForLine(InputStreamReader reader, + PrintStream out, + String prompt) { + + String result = ""; + while (result != null && result.length() == 0) { + out.print(prompt); + out.flush(); + result = getLine(reader); + } + return result; + } + + /** + * Read a single line. Gets the line attribute of the AccessExample object + * Not terribly efficient, but does the job. Works for reading a line from + * stdin or a file. + * + * @param reader stream from which to read the line + * + * @return either a String or null on EOF, if EOF appears in the middle of + * a line, returns that line, then null on next call. + */ + String getLine(InputStreamReader reader) { + + StringBuilder b = new StringBuilder(); + int c; + try { + while ((c = reader.read()) != -1 && c != '\n') { + if (c != '\r') { + b.append((char) c); + } + } + } catch (IOException ioe) { + c = -1; + } + + if (c == -1 && b.length() == 0) { + return null; + } else { + return b.toString(); + } + } + + /** + * A simple log method. + * + * @param s The string to be logged. + */ + private void log(String s) { + + System.out.println(s); + System.out.flush(); + } +} diff --git a/examples/collections/hello/HelloDatabaseWorld.java b/examples/collections/hello/HelloDatabaseWorld.java new file mode 100644 index 0000000..67d225f --- /dev/null +++ b/examples/collections/hello/HelloDatabaseWorld.java @@ -0,0 +1,157 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.hello; + +import java.io.File; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * @author Mark Hayes + */ +public class HelloDatabaseWorld implements TransactionWorker { + + private static final String[] INT_NAMES = { + "Hello", "Database", "World", + }; + private static boolean create = true; + + private Environment env; + private ClassCatalog catalog; + private Database db; + private SortedMap map; + + /** Creates the environment and runs a transaction */ + public static void main(String[] argv) + throws Exception { + + String dir = "./tmp"; + + // environment is transactional + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + if (create) { + envConfig.setAllowCreate(true); + } + Environment env = new Environment(new File(dir), envConfig); + + // create the application and run a transaction + HelloDatabaseWorld worker = new HelloDatabaseWorld(env); + TransactionRunner runner = new TransactionRunner(env); + try { + // open and access the database within a transaction + runner.run(worker); + } finally { + // close the database outside the transaction + worker.close(); + } + } + + /** Creates the database for this application */ + private HelloDatabaseWorld(Environment env) + throws Exception { + + this.env = env; + open(); + } + + /** Performs work within a transaction. */ + public void doWork() { + writeAndRead(); + } + + /** Opens the database and creates the Map. */ + private void open() + throws Exception { + + // use a generic database configuration + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + if (create) { + dbConfig.setAllowCreate(true); + } + + // catalog is needed for serial bindings (java serialization) + Database catalogDb = env.openDatabase(null, "catalog", dbConfig); + catalog = new StoredClassCatalog(catalogDb); + + // use Integer tuple binding for key entries + TupleBinding keyBinding = + TupleBinding.getPrimitiveBinding(Integer.class); + + // use String serial binding for data entries + SerialBinding dataBinding = + new SerialBinding(catalog, String.class); + + this.db = env.openDatabase(null, "helloworld", dbConfig); + + // create a map view of the database + this.map = new StoredSortedMap + (db, keyBinding, dataBinding, true); + } + + /** Closes the database. */ + private void close() + throws Exception { + + if (catalog != null) { + catalog.close(); + catalog = null; + } + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** Writes and reads the database via the Map. */ + private void writeAndRead() { + + // check for existing data + Integer key = new Integer(0); + String val = map.get(key); + if (val == null) { + System.out.println("Writing data"); + // write in reverse order to show that keys are sorted + for (int i = INT_NAMES.length - 1; i >= 0; i -= 1) { + map.put(new Integer(i), INT_NAMES[i]); + } + } + // get iterator over map entries + Iterator> iter = map.entrySet().iterator(); + System.out.println("Reading data"); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + System.out.println(entry.getKey().toString() + ' ' + + entry.getValue()); + } + } +} diff --git a/examples/collections/ship/basic/PartData.java b/examples/collections/ship/basic/PartData.java new file mode 100644 index 0000000..26fd794 --- /dev/null +++ b/examples/collections/ship/basic/PartData.java @@ -0,0 +1,69 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A PartData serves as the data in the key/data pair for a part entity. + * + *

    In this sample, PartData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/basic/PartKey.java b/examples/collections/ship/basic/PartKey.java new file mode 100644 index 0000000..6e49794 --- /dev/null +++ b/examples/collections/ship/basic/PartKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage entry for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/basic/Sample.java b/examples/collections/ship/basic/Sample.java new file mode 100644 index 0000000..1e3934a --- /dev/null +++ b/examples/collections/ship/basic/Sample.java @@ -0,0 +1,256 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.basic.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed. + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printEntries("Parts", + views.getPartEntrySet().iterator()); + printEntries("Suppliers", + views.getSupplierEntrySet().iterator()); + printEntries("Shipments", + views.getShipmentEntrySet().iterator()); + } + } + + /** + * Populate the part entities in the database. If the part map is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Map parts = views.getPartMap(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.put(new PartKey("P1"), + new PartData("Nut", "Red", + new Weight(12.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P2"), + new PartData("Bolt", "Green", + new Weight(17.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P3"), + new PartData("Screw", "Blue", + new Weight(17.0, Weight.GRAMS), + "Rome")); + parts.put(new PartKey("P4"), + new PartData("Screw", "Red", + new Weight(14.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P5"), + new PartData("Cam", "Blue", + new Weight(12.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P6"), + new PartData("Cog", "Red", + new Weight(19.0, Weight.GRAMS), + "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier map is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Map suppliers = views.getSupplierMap(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.put(new SupplierKey("S1"), + new SupplierData("Smith", 20, "London")); + suppliers.put(new SupplierKey("S2"), + new SupplierData("Jones", 10, "Paris")); + suppliers.put(new SupplierKey("S3"), + new SupplierData("Blake", 30, "Paris")); + suppliers.put(new SupplierKey("S4"), + new SupplierData("Clark", 20, "London")); + suppliers.put(new SupplierKey("S5"), + new SupplierData("Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment map + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Map shipments = views.getShipmentMap(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.put(new ShipmentKey("P1", "S1"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P3", "S1"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P4", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P5", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P6", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P1", "S2"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S2"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P2", "S3"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P2", "S4"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P4", "S4"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P5", "S4"), + new ShipmentData(400)); + } + } + + /** + * Print the key/value objects returned by an iterator of Map.Entry + * objects. + */ + private void printEntries(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + Map.Entry entry = (Map.Entry) iterator.next(); + System.out.println(entry.getKey().toString()); + System.out.println(entry.getValue().toString()); + } + } +} diff --git a/examples/collections/ship/basic/SampleDatabase.java b/examples/collections/ship/basic/SampleDatabase.java new file mode 100644 index 0000000..b33f2c6 --- /dev/null +++ b/examples/collections/ship/basic/SampleDatabase.java @@ -0,0 +1,133 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.File; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Close all databases and the environment. + */ + public void close() + throws DatabaseException { + + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } +} diff --git a/examples/collections/ship/basic/SampleViews.java b/examples/collections/ship/basic/SampleViews.java new file mode 100644 index 0000000..299c5bc --- /dev/null +++ b/examples/collections/ship/basic/SampleViews.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredMap; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredMap partMap; + private StoredMap supplierMap; + private StoredMap shipmentMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // In this sample, the stored key and data entries are used directly + // rather than mapping them to separate objects. Therefore, no binding + // classes are defined here and the SerialBinding class is used. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntryBinding partDataBinding = + new SerialBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntryBinding supplierDataBinding = + new SerialBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntryBinding shipmentDataBinding = + new SerialBinding(catalog, ShipmentData.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredMap and StoredEntrySet + // classes, which provide additional methods. The entry sets could be + // obtained directly from the Map.entrySet() method, but convenience + // methods are provided here to return them in order to avoid down-casting + // elsewhere. + + /** + * Return a map view of the part storage container. + */ + public final StoredMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public final StoredMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public final StoredMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entry set view of the part storage container. + */ + public final StoredEntrySet getPartEntrySet() { + + return (StoredEntrySet) partMap.entrySet(); + } + + /** + * Return an entry set view of the supplier storage container. + */ + public final StoredEntrySet getSupplierEntrySet() { + + return (StoredEntrySet) supplierMap.entrySet(); + } + + /** + * Return an entry set view of the shipment storage container. + */ + public final StoredEntrySet getShipmentEntrySet() { + + return (StoredEntrySet) shipmentMap.entrySet(); + } +} diff --git a/examples/collections/ship/basic/ShipmentData.java b/examples/collections/ship/basic/ShipmentData.java new file mode 100644 index 0000000..920f456 --- /dev/null +++ b/examples/collections/ship/basic/ShipmentData.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the data in the key/data pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/basic/ShipmentKey.java b/examples/collections/ship/basic/ShipmentKey.java new file mode 100644 index 0000000..cab352c --- /dev/null +++ b/examples/collections/ship/basic/ShipmentKey.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/examples/collections/ship/basic/SupplierData.java b/examples/collections/ship/basic/SupplierData.java new file mode 100644 index 0000000..2a26aef --- /dev/null +++ b/examples/collections/ship/basic/SupplierData.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A SupplierData serves as the data in the key/data pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used both as the storage entry for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/basic/SupplierKey.java b/examples/collections/ship/basic/SupplierKey.java new file mode 100644 index 0000000..3fbc2b4 --- /dev/null +++ b/examples/collections/ship/basic/SupplierKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage entry for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/basic/Weight.java b/examples/collections/ship/basic/Weight.java new file mode 100644 index 0000000..0ac722f --- /dev/null +++ b/examples/collections/ship/basic/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.basic; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/entity/Part.java b/examples/collections/ship/entity/Part.java new file mode 100644 index 0000000..d7d415e --- /dev/null +++ b/examples/collections/ship/entity/Part.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * SerialSerialBinding. See {@link SampleViews.PartBinding} for details. + * Since this class is not used directly for data storage, it does not need to + * be Serializable.

    + * + * @author Mark Hayes + */ +public class Part { + + private String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/entity/PartData.java b/examples/collections/ship/entity/PartData.java new file mode 100644 index 0000000..6427d2a --- /dev/null +++ b/examples/collections/ship/entity/PartData.java @@ -0,0 +1,70 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A PartData serves as the value in the key/value pair for a part entity. + * + *

    In this sample, PartData is used only as the storage data for the + * value, while the Part object is used as the value's object representation. + * Because it is used directly as storage data using serial format, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/entity/PartKey.java b/examples/collections/ship/entity/PartKey.java new file mode 100644 index 0000000..aae269b --- /dev/null +++ b/examples/collections/ship/entity/PartKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage entry for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/entity/Sample.java b/examples/collections/ship/entity/Sample.java new file mode 100644 index 0000000..97e42ec --- /dev/null +++ b/examples/collections/ship/entity/Sample.java @@ -0,0 +1,237 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.entity.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/entity/SampleDatabase.java b/examples/collections/ship/entity/SampleDatabase.java new file mode 100644 index 0000000..dbf02d7 --- /dev/null +++ b/examples/collections/ship/entity/SampleDatabase.java @@ -0,0 +1,326 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.File; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialSerialKeyCreator; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator( + new SupplierByCityKeyCreator(javaCatalog, + SupplierKey.class, + SupplierData.class, + String.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator( + new ShipmentByPartKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + PartKey.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator( + new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + SupplierKey.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class SupplierByCityKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the supplier key class. + * @param valueClass is the supplier value class. + * @param indexKeyClass is the city key class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + SupplierData supplierData = (SupplierData) valueInput; + return supplierData.getCity(); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentByPartKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the part key class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new PartKey(shipmentKey.getPartNumber()); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentBySupplierKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the supplier key class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The part + * key is stored in the shipment key, so the shipment value is not + * used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new SupplierKey(shipmentKey.getSupplierNumber()); + } + } +} diff --git a/examples/collections/ship/entity/SampleViews.java b/examples/collections/ship/entity/SampleViews.java new file mode 100644 index 0000000..d6b8702 --- /dev/null +++ b/examples/collections/ship/entity/SampleViews.java @@ -0,0 +1,311 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.SerialSerialBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object. For keys, however, + // the stored entry is used directly via a SerialBinding and no + // special binding class is needed. + // + ClassCatalog catalog = db.getClassCatalog(); + SerialBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntityBinding partDataBinding = + new PartBinding(catalog, PartKey.class, PartData.class); + SerialBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, SupplierKey.class, + SupplierData.class); + SerialBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, ShipmentKey.class, + ShipmentData.class); + SerialBinding cityKeyBinding = + new SerialBinding(catalog, String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredValueSet getPartSet() { + + return (StoredValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredValueSet getSupplierSet() { + + return (StoredValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredValueSet getShipmentSet() { + + return (StoredValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + */ + private static class PartBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + PartKey key = (PartKey) keyInput; + PartData data = (PartData) dataInput; + return new Part(key.getNumber(), data.getName(), data.getColor(), + data.getWeight(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Part part = (Part) object; + return new PartKey(part.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Part part = (Part) object; + return new PartData(part.getName(), part.getColor(), + part.getWeight(), part.getCity()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + */ + private static class SupplierBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + SupplierKey key = (SupplierKey) keyInput; + SupplierData data = (SupplierData) dataInput; + return new Supplier(key.getNumber(), data.getName(), + data.getStatus(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierKey(supplier.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierData(supplier.getName(), supplier.getStatus(), + supplier.getCity()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + */ + private static class ShipmentBinding extends SerialSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + super(classCatalog, keyClass, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(Object keyInput, Object dataInput) { + + ShipmentKey key = (ShipmentKey) keyInput; + ShipmentData data = (ShipmentData) dataInput; + return new Shipment(key.getPartNumber(), key.getSupplierNumber(), + data.getQuantity()); + } + + /** + * Create the stored key from the entity. + */ + public Object objectToKey(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentKey(shipment.getPartNumber(), + shipment.getSupplierNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentData(shipment.getQuantity()); + } + } +} diff --git a/examples/collections/ship/entity/Shipment.java b/examples/collections/ship/entity/Shipment.java new file mode 100644 index 0000000..5b6b709 --- /dev/null +++ b/examples/collections/ship/entity/Shipment.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.ShipmentBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment { + + private String partNumber; + private String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/entity/ShipmentData.java b/examples/collections/ship/entity/ShipmentData.java new file mode 100644 index 0000000..bfa4a4f --- /dev/null +++ b/examples/collections/ship/entity/ShipmentData.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the value in the key/value pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used only as the storage data for the + * value, while the Shipment object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/entity/ShipmentKey.java b/examples/collections/ship/entity/ShipmentKey.java new file mode 100644 index 0000000..fbc2e12 --- /dev/null +++ b/examples/collections/ship/entity/ShipmentKey.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/examples/collections/ship/entity/Supplier.java b/examples/collections/ship/entity/Supplier.java new file mode 100644 index 0000000..6492f84 --- /dev/null +++ b/examples/collections/ship/entity/Supplier.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.SupplierBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier { + + private String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/entity/SupplierData.java b/examples/collections/ship/entity/SupplierData.java new file mode 100644 index 0000000..3fa73c3 --- /dev/null +++ b/examples/collections/ship/entity/SupplierData.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A SupplierData serves as the value in the key/value pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used only as the storage data for the + * value, while the Supplier object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/entity/SupplierKey.java b/examples/collections/ship/entity/SupplierKey.java new file mode 100644 index 0000000..b36a579 --- /dev/null +++ b/examples/collections/ship/entity/SupplierKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage entry for the + * key as well as the object binding to the key. Because it is used directly + * as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/entity/Weight.java b/examples/collections/ship/entity/Weight.java new file mode 100644 index 0000000..8b19299 --- /dev/null +++ b/examples/collections/ship/entity/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.entity; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/factory/Part.java b/examples/collections/ship/factory/Part.java new file mode 100644 index 0000000..8a3db80 --- /dev/null +++ b/examples/collections/ship/factory/Part.java @@ -0,0 +1,111 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable, MarshalledTupleKeyEntity { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + throw new UnsupportedOperationException(keyName); + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/examples/collections/ship/factory/PartKey.java b/examples/collections/ship/factory/PartKey.java new file mode 100644 index 0000000..4eae699 --- /dev/null +++ b/examples/collections/ship/factory/PartKey.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface.

    + * + * @author Mark Hayes + */ +public class PartKey implements MarshalledTupleEntry { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public PartKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.number = keyInput.readString(); + } +} diff --git a/examples/collections/ship/factory/Sample.java b/examples/collections/ship/factory/Sample.java new file mode 100644 index 0000000..f15fcb8 --- /dev/null +++ b/examples/collections/ship/factory/Sample.java @@ -0,0 +1,237 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.factory.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws Exception { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws Exception { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/factory/SampleDatabase.java b/examples/collections/ship/factory/SampleDatabase.java new file mode 100644 index 0000000..6f92737 --- /dev/null +++ b/examples/collections/ship/factory/SampleDatabase.java @@ -0,0 +1,221 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.io.File; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + private TupleSerialFactory factory; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Use the TupleSerialDbFactory for a Serial/Tuple-based database + // where marshalling interfaces are used. + // + factory = new TupleSerialFactory(javaCatalog); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(factory.getKeyCreator(Supplier.class, + Supplier.CITY_KEY)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class, + Shipment.PART_KEY)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(factory.getKeyCreator(Shipment.class, + Shipment.SUPPLIER_KEY)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the tuple-serial factory. + */ + public final TupleSerialFactory getFactory() { + + return factory; + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all databases and the environment. + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } +} diff --git a/examples/collections/ship/factory/SampleViews.java b/examples/collections/ship/factory/SampleViews.java new file mode 100644 index 0000000..3a16a26 --- /dev/null +++ b/examples/collections/ship/factory/SampleViews.java @@ -0,0 +1,147 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.collections.TupleSerialFactory; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Use the TupleSerialFactory for a Serial/Tuple-based database + // where marshalling interfaces are used. + // + TupleSerialFactory factory = db.getFactory(); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + factory.newSortedMap(db.getPartDatabase(), + PartKey.class, Part.class, true); + supplierMap = + factory.newSortedMap(db.getSupplierDatabase(), + SupplierKey.class, Supplier.class, true); + shipmentMap = + factory.newSortedMap(db.getShipmentDatabase(), + ShipmentKey.class, Shipment.class, true); + shipmentByPartMap = + factory.newSortedMap(db.getShipmentByPartDatabase(), + PartKey.class, Shipment.class, true); + shipmentBySupplierMap = + factory.newSortedMap(db.getShipmentBySupplierDatabase(), + SupplierKey.class, Shipment.class, true); + supplierByCityMap = + factory.newSortedMap(db.getSupplierByCityDatabase(), + String.class, Supplier.class, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredMap and StoredValueSet + // classes, which provide additional methods. The entity sets could be + // obtained directly from the Map.values() method but convenience methods + // are provided here to return them in order to avoid down-casting + // elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } +} diff --git a/examples/collections/ship/factory/Shipment.java b/examples/collections/ship/factory/Shipment.java new file mode 100644 index 0000000..0539387 --- /dev/null +++ b/examples/collections/ship/factory/Shipment.java @@ -0,0 +1,107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable, MarshalledTupleKeyEntity { + + static final String PART_KEY = "part"; + static final String SUPPLIER_KEY = "supplier"; + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(PART_KEY)) { + keyOutput.writeString(this.partNumber); + return true; + } else if (keyName.equals(SUPPLIER_KEY)) { + keyOutput.writeString(this.supplierNumber); + return true; + } else { + throw new UnsupportedOperationException(keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/examples/collections/ship/factory/ShipmentKey.java b/examples/collections/ship/factory/ShipmentKey.java new file mode 100644 index 0000000..cf8e73d --- /dev/null +++ b/examples/collections/ship/factory/ShipmentKey.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements MarshalledTupleEntry { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public ShipmentKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } +} diff --git a/examples/collections/ship/factory/Supplier.java b/examples/collections/ship/factory/Supplier.java new file mode 100644 index 0000000..931e726 --- /dev/null +++ b/examples/collections/ship/factory/Supplier.java @@ -0,0 +1,113 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is bound to the stored key/data entry by + * implementing the MarshalledTupleKeyEntity interface.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable, MarshalledTupleKeyEntity { + + static final String CITY_KEY = "city"; + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } + + // --- MarshalledTupleKeyEntity implementation --- + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(CITY_KEY)) { + if (this.city != null) { + keyOutput.writeString(this.city); + return true; + } else { + return false; + } + } else { + throw new UnsupportedOperationException(keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/examples/collections/ship/factory/SupplierKey.java b/examples/collections/ship/factory/SupplierKey.java new file mode 100644 index 0000000..506d8ba --- /dev/null +++ b/examples/collections/ship/factory/SupplierKey.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the stored key tuple entry by + * implementing the MarshalledTupleEntry interface.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements MarshalledTupleEntry { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } + + // --- MarshalledTupleEntry implementation --- + + public SupplierKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void marshalEntry(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public void unmarshalEntry(TupleInput keyInput) { + + this.number = keyInput.readString(); + } +} diff --git a/examples/collections/ship/factory/Weight.java b/examples/collections/ship/factory/Weight.java new file mode 100644 index 0000000..c116332 --- /dev/null +++ b/examples/collections/ship/factory/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.factory; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/index/PartData.java b/examples/collections/ship/index/PartData.java new file mode 100644 index 0000000..b986821 --- /dev/null +++ b/examples/collections/ship/index/PartData.java @@ -0,0 +1,69 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A PartData serves as the data in the key/data pair for a part entity. + * + *

    In this sample, PartData is used both as the storage data for the data + * as well as the object binding to the data. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/index/PartKey.java b/examples/collections/ship/index/PartKey.java new file mode 100644 index 0000000..0e72ce3 --- /dev/null +++ b/examples/collections/ship/index/PartKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is used both as the storage data for the key as + * well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey implements Serializable { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/index/Sample.java b/examples/collections/ship/index/Sample.java new file mode 100644 index 0000000..07baecf --- /dev/null +++ b/examples/collections/ship/index/Sample.java @@ -0,0 +1,280 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.index.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printEntries("Parts", + views.getPartEntrySet().iterator()); + printEntries("Suppliers", + views.getSupplierEntrySet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printEntries("Shipments", + views.getShipmentEntrySet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part map is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Map parts = views.getPartMap(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.put(new PartKey("P1"), + new PartData("Nut", "Red", + new Weight(12.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P2"), + new PartData("Bolt", "Green", + new Weight(17.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P3"), + new PartData("Screw", "Blue", + new Weight(17.0, Weight.GRAMS), + "Rome")); + parts.put(new PartKey("P4"), + new PartData("Screw", "Red", + new Weight(14.0, Weight.GRAMS), + "London")); + parts.put(new PartKey("P5"), + new PartData("Cam", "Blue", + new Weight(12.0, Weight.GRAMS), + "Paris")); + parts.put(new PartKey("P6"), + new PartData("Cog", "Red", + new Weight(19.0, Weight.GRAMS), + "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier map is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Map suppliers = views.getSupplierMap(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.put(new SupplierKey("S1"), + new SupplierData("Smith", 20, "London")); + suppliers.put(new SupplierKey("S2"), + new SupplierData("Jones", 10, "Paris")); + suppliers.put(new SupplierKey("S3"), + new SupplierData("Blake", 30, "Paris")); + suppliers.put(new SupplierKey("S4"), + new SupplierData("Clark", 20, "London")); + suppliers.put(new SupplierKey("S5"), + new SupplierData("Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment map + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Map shipments = views.getShipmentMap(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.put(new ShipmentKey("P1", "S1"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P3", "S1"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P4", "S1"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P5", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P6", "S1"), + new ShipmentData(100)); + shipments.put(new ShipmentKey("P1", "S2"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P2", "S2"), + new ShipmentData(400)); + shipments.put(new ShipmentKey("P2", "S3"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P2", "S4"), + new ShipmentData(200)); + shipments.put(new ShipmentKey("P4", "S4"), + new ShipmentData(300)); + shipments.put(new ShipmentKey("P5", "S4"), + new ShipmentData(400)); + } + } + + /** + * Print the key/value objects returned by an iterator of Map.Entry + * objects. + */ + private void printEntries(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + Map.Entry entry = (Map.Entry) iterator.next(); + System.out.println(entry.getKey().toString()); + System.out.println(entry.getValue().toString()); + } + } + + /** + * Print the objects returned by an iterator of value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/index/SampleDatabase.java b/examples/collections/ship/index/SampleDatabase.java new file mode 100644 index 0000000..7640d52 --- /dev/null +++ b/examples/collections/ship/index/SampleDatabase.java @@ -0,0 +1,326 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.File; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialSerialKeyCreator; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator( + new SupplierByCityKeyCreator(javaCatalog, + SupplierKey.class, + SupplierData.class, + String.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator( + new ShipmentByPartKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + PartKey.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator( + new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentKey.class, + ShipmentData.class, + SupplierKey.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class SupplierByCityKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the supplier key class. + * @param valueClass is the supplier value class. + * @param indexKeyClass is the city key class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + SupplierData supplierData = (SupplierData) valueInput; + return supplierData.getCity(); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentByPartKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the part key class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new PartKey(shipmentKey.getPartNumber()); + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class SerialSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys and value are all + * of the serial format. + */ + private static class ShipmentBySupplierKeyCreator + extends SerialSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param primaryKeyClass is the shipment key class. + * @param valueClass is the shipment value class. + * @param indexKeyClass is the supplier key class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class primaryKeyClass, + Class valueClass, + Class indexKeyClass) { + + super(catalog, primaryKeyClass, valueClass, indexKeyClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The part + * key is stored in the shipment key, so the shipment value is not + * used. + */ + public Object createSecondaryKey(Object primaryKeyInput, + Object valueInput) { + + ShipmentKey shipmentKey = (ShipmentKey) primaryKeyInput; + return new SupplierKey(shipmentKey.getSupplierNumber()); + } + } +} diff --git a/examples/collections/ship/index/SampleViews.java b/examples/collections/ship/index/SampleViews.java new file mode 100644 index 0000000..3ba600e --- /dev/null +++ b/examples/collections/ship/index/SampleViews.java @@ -0,0 +1,166 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredSortedMap; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, the stored key and data entries are used directly + // rather than mapping them to separate objects. Therefore, no binding + // classes are defined here and the SerialBinding class is used. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new SerialBinding(catalog, PartKey.class); + EntryBinding partDataBinding = + new SerialBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SerialBinding(catalog, SupplierKey.class); + EntryBinding supplierDataBinding = + new SerialBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new SerialBinding(catalog, ShipmentKey.class); + EntryBinding shipmentDataBinding = + new SerialBinding(catalog, ShipmentData.class); + EntryBinding cityKeyBinding = + new SerialBinding(catalog, String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is not used since the stores and indices are + // ordered by serialized key objects, which do not provide a very + // useful ordering. + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredEntrySet classes, which provide additional methods. The entry + // sets could be obtained directly from the Map.entrySet() method, but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public final StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public final StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public final StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entry set view of the part storage container. + */ + public final StoredEntrySet getPartEntrySet() { + + return (StoredEntrySet) partMap.entrySet(); + } + + /** + * Return an entry set view of the supplier storage container. + */ + public final StoredEntrySet getSupplierEntrySet() { + + return (StoredEntrySet) supplierMap.entrySet(); + } + + /** + * Return an entry set view of the shipment storage container. + */ + public final StoredEntrySet getShipmentEntrySet() { + + return (StoredEntrySet) shipmentMap.entrySet(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } +} diff --git a/examples/collections/ship/index/ShipmentData.java b/examples/collections/ship/index/ShipmentData.java new file mode 100644 index 0000000..4f0e007 --- /dev/null +++ b/examples/collections/ship/index/ShipmentData.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the data in the key/data pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used both as the storage data for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/index/ShipmentKey.java b/examples/collections/ship/index/ShipmentKey.java new file mode 100644 index 0000000..204231a --- /dev/null +++ b/examples/collections/ship/index/ShipmentKey.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is used both as the storage data for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements Serializable { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/examples/collections/ship/index/SupplierData.java b/examples/collections/ship/index/SupplierData.java new file mode 100644 index 0000000..8db0aec --- /dev/null +++ b/examples/collections/ship/index/SupplierData.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A SupplierData serves as the data in the key/data pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used both as the storage data for the + * data as well as the object binding to the data. Because it is used + * directly as storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/index/SupplierKey.java b/examples/collections/ship/index/SupplierKey.java new file mode 100644 index 0000000..f5f748d --- /dev/null +++ b/examples/collections/ship/index/SupplierKey.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is used both as the storage data for the key + * as well as the object binding to the key. Because it is used directly as + * storage data using serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements Serializable { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/index/Weight.java b/examples/collections/ship/index/Weight.java new file mode 100644 index 0000000..e99e675 --- /dev/null +++ b/examples/collections/ship/index/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.index; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Serial serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/marshal/MarshalledEntity.java b/examples/collections/ship/marshal/MarshalledEntity.java new file mode 100644 index 0000000..892d3b2 --- /dev/null +++ b/examples/collections/ship/marshal/MarshalledEntity.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * MarshalledEntity is implemented by entity (combined key/data) objects and + * called by {@link SampleViews.MarshalledEntityBinding}. In this sample, + * MarshalledEntity is implemented by {@link Part}, {@link Supplier}, and + * {@link Shipment}. This interface is package-protected rather than public + * to hide the marshalling interface from other users of the data objects. + * Note that a MarshalledEntity must also have a no arguments constructor so + * that it can be instantiated by the binding. + * + * @author Mark Hayes + */ +interface MarshalledEntity { + + /** + * Extracts the entity's primary key and writes it to the key output. + */ + void marshalPrimaryKey(TupleOutput keyOutput); + + /** + * Completes construction of the entity by setting its primary key from the + * stored primary key. + */ + void unmarshalPrimaryKey(TupleInput keyInput); + + /** + * Extracts the entity's index key and writes it to the key output. + */ + boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput); +} diff --git a/examples/collections/ship/marshal/MarshalledKey.java b/examples/collections/ship/marshal/MarshalledKey.java new file mode 100644 index 0000000..15e8cd9 --- /dev/null +++ b/examples/collections/ship/marshal/MarshalledKey.java @@ -0,0 +1,41 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * MarshalledKey is implemented by key objects and called by {@link + * SampleViews.MarshalledKeyBinding}. In this sample, MarshalledKey is + * implemented by {@link PartKey}, {@link SupplierKey}, and {@link + * ShipmentKey}. This interface is package-protected rather than public to + * hide the marshalling interface from other users of the data objects. Note + * that a MarshalledKey must also have a no arguments constructor so + * that it can be instantiated by the binding. + * + * @author Mark Hayes + */ +interface MarshalledKey { + + /** + * Construct the key tuple entry from the key object. + */ + void marshalKey(TupleOutput keyOutput); + + /** + * Construct the key object from the key tuple entry. + */ + void unmarshalKey(TupleInput keyInput); +} diff --git a/examples/collections/ship/marshal/Part.java b/examples/collections/ship/marshal/Part.java new file mode 100644 index 0000000..3191948 --- /dev/null +++ b/examples/collections/ship/marshal/Part.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable, MarshalledEntity { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + final void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } + + // --- MarshalledEntity implementation --- + + Part() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + throw new UnsupportedOperationException(keyName); + } +} diff --git a/examples/collections/ship/marshal/PartKey.java b/examples/collections/ship/marshal/PartKey.java new file mode 100644 index 0000000..0d1f462 --- /dev/null +++ b/examples/collections/ship/marshal/PartKey.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class PartKey implements MarshalledKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } + + // --- MarshalledKey implementation --- + + PartKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } +} diff --git a/examples/collections/ship/marshal/Sample.java b/examples/collections/ship/marshal/Sample.java new file mode 100644 index 0000000..38e2187 --- /dev/null +++ b/examples/collections/ship/marshal/Sample.java @@ -0,0 +1,238 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.marshal.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/marshal/SampleDatabase.java b/examples/collections/ship/marshal/SampleDatabase.java new file mode 100644 index 0000000..a54e48f --- /dev/null +++ b/examples/collections/ship/marshal/SampleDatabase.java @@ -0,0 +1,255 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.io.File; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Supplier.class, + Supplier.CITY_KEY)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Shipment.class, + Shipment.PART_KEY)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new MarshalledKeyCreator(javaCatalog, + Shipment.class, + Shipment.SUPPLIER_KEY)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for MarshalledEntity objects. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class MarshalledKeyCreator + extends TupleSerialKeyCreator { + + private String keyName; + + /** + * Construct the key creator. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + * @param keyName is the key name passed to the marshalling methods. + */ + private MarshalledKeyCreator(ClassCatalog catalog, + Class valueClass, + String keyName) { + + super(catalog, valueClass); + this.keyName = keyName; + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + // the primary key is unmarshalled before marshalling the index + // key, to account for cases where the index key is composed of + // data elements from the primary key + MarshalledEntity entity = (MarshalledEntity) valueInput; + entity.unmarshalPrimaryKey(primaryKeyInput); + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + } +} diff --git a/examples/collections/ship/marshal/SampleViews.java b/examples/collections/ship/marshal/SampleViews.java new file mode 100644 index 0000000..83ed503 --- /dev/null +++ b/examples/collections/ship/marshal/SampleViews.java @@ -0,0 +1,281 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object; a "tricky" binding + // that uses transient fields is used--see PartBinding, etc, for + // details. For keys, a one-to-one binding is implemented with + // EntryBinding classes to bind the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new MarshalledKeyBinding(PartKey.class); + EntityBinding partDataBinding = + new MarshalledEntityBinding(catalog, Part.class); + EntryBinding supplierKeyBinding = + new MarshalledKeyBinding(SupplierKey.class); + EntityBinding supplierDataBinding = + new MarshalledEntityBinding(catalog, Supplier.class); + EntryBinding shipmentKeyBinding = + new MarshalledKeyBinding(ShipmentKey.class); + EntityBinding shipmentDataBinding = + new MarshalledEntityBinding(catalog, Shipment.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * MarshalledKeyBinding is used to bind the stored key tuple entry to a key + * object representation. To do this, it calls the MarshalledKey interface + * implemented by the key class. + */ + private static class MarshalledKeyBinding extends TupleBinding { + + private Class keyClass; + + /** + * Construct the binding object. + */ + private MarshalledKeyBinding(Class keyClass) { + + // The key class will be used to instantiate the key object. + // + if (!MarshalledKey.class.isAssignableFrom(keyClass)) { + throw new IllegalArgumentException(keyClass.toString() + + " does not implement MarshalledKey"); + } + this.keyClass = keyClass; + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + try { + MarshalledKey key = (MarshalledKey) keyClass.newInstance(); + key.unmarshalKey(input); + return key; + } catch (IllegalAccessException e) { + throw new RuntimeExceptionWrapper(e); + } catch (InstantiationException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + MarshalledKey key = (MarshalledKey) object; + key.marshalKey(output); + } + } + + /** + * MarshalledEntityBinding is used to bind the stored key/data entry pair + * to a combined to an entity object representation. To do this, it calls + * the MarshalledEntity interface implemented by the entity class. + * + *

    The binding is "tricky" in that it uses the entity class for both + * the stored data entry and the combined entity object. To do this, + * entity's key field(s) are transient and are set by the binding after the + * data object has been deserialized. This avoids the use of a "data" class + * completely.

    + */ + private static class MarshalledEntityBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private MarshalledEntityBinding(ClassCatalog classCatalog, + Class entityClass) { + + super(classCatalog, entityClass); + + // The entity class will be used to instantiate the entity object. + // + if (!MarshalledEntity.class.isAssignableFrom(entityClass)) { + throw new IllegalArgumentException(entityClass.toString() + + " does not implement MarshalledEntity"); + } + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput tupleInput, Object javaInput) { + + MarshalledEntity entity = (MarshalledEntity) javaInput; + entity.unmarshalPrimaryKey(tupleInput); + return entity; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + MarshalledEntity entity = (MarshalledEntity) object; + entity.marshalPrimaryKey(output); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } +} diff --git a/examples/collections/ship/marshal/Shipment.java b/examples/collections/ship/marshal/Shipment.java new file mode 100644 index 0000000..bbd840c --- /dev/null +++ b/examples/collections/ship/marshal/Shipment.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable, MarshalledEntity { + + static final String PART_KEY = "part"; + static final String SUPPLIER_KEY = "supplier"; + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } + + // --- MarshalledEntity implementation --- + + Shipment() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(PART_KEY)) { + keyOutput.writeString(this.partNumber); + return true; + } else if (keyName.equals(SUPPLIER_KEY)) { + keyOutput.writeString(this.supplierNumber); + return true; + } else { + throw new UnsupportedOperationException(keyName); + } + } +} diff --git a/examples/collections/ship/marshal/ShipmentKey.java b/examples/collections/ship/marshal/ShipmentKey.java new file mode 100644 index 0000000..e4ce932 --- /dev/null +++ b/examples/collections/ship/marshal/ShipmentKey.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class ShipmentKey implements MarshalledKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } + + // --- MarshalledKey implementation --- + + ShipmentKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.partNumber = keyInput.readString(); + this.supplierNumber = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.partNumber); + keyOutput.writeString(this.supplierNumber); + } +} diff --git a/examples/collections/ship/marshal/Supplier.java b/examples/collections/ship/marshal/Supplier.java new file mode 100644 index 0000000..99d8687 --- /dev/null +++ b/examples/collections/ship/marshal/Supplier.java @@ -0,0 +1,123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is bound to the stored key/data entry by + * implementing the MarshalledEntity interface, which is called by {@link + * SampleViews.MarshalledEntityBinding}.

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable, MarshalledEntity { + + static final String CITY_KEY = "city"; + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } + + // --- MarshalledEntity implementation --- + + Supplier() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if (keyName.equals(CITY_KEY)) { + if (this.city != null) { + keyOutput.writeString(this.city); + return true; + } else { + return false; + } + } else { + throw new UnsupportedOperationException(keyName); + } + } +} diff --git a/examples/collections/ship/marshal/SupplierKey.java b/examples/collections/ship/marshal/SupplierKey.java new file mode 100644 index 0000000..b670714 --- /dev/null +++ b/examples/collections/ship/marshal/SupplierKey.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the stored key tuple entry by + * implementing the MarshalledKey interface, which is called by {@link + * SampleViews.MarshalledKeyBinding}.

    + * + * @author Mark Hayes + */ +public class SupplierKey implements MarshalledKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } + + // --- MarshalledKey implementation --- + + SupplierKey() { + + // A no-argument constructor is necessary only to allow the binding to + // instantiate objects of this class. + } + + public void unmarshalKey(TupleInput keyInput) { + + this.number = keyInput.readString(); + } + + public void marshalKey(TupleOutput keyOutput) { + + keyOutput.writeString(this.number); + } +} diff --git a/examples/collections/ship/marshal/Weight.java b/examples/collections/ship/marshal/Weight.java new file mode 100644 index 0000000..f470235 --- /dev/null +++ b/examples/collections/ship/marshal/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.marshal; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/sentity/Part.java b/examples/collections/ship/sentity/Part.java new file mode 100644 index 0000000..2f4ff8b --- /dev/null +++ b/examples/collections/ship/sentity/Part.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a PartData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Part implements Serializable { + + private transient String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + final void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/sentity/PartKey.java b/examples/collections/ship/sentity/PartKey.java new file mode 100644 index 0000000..2edbf16 --- /dev/null +++ b/examples/collections/ship/sentity/PartKey.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the key's tuple storage entry using + * a TupleBinding. Because it is not used directly as storage data, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/sentity/Sample.java b/examples/collections/ship/sentity/Sample.java new file mode 100644 index 0000000..1479bd6 --- /dev/null +++ b/examples/collections/ship/sentity/Sample.java @@ -0,0 +1,238 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.sentity.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. To specify a different home + * directory, use the -home option. The home directory must exist before + * running the sample. To recreate the sample database from scratch, delete + * all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/sentity/SampleDatabase.java b/examples/collections/ship/sentity/SampleDatabase.java new file mode 100644 index 0000000..811ba12 --- /dev/null +++ b/examples/collections/ship/sentity/SampleDatabase.java @@ -0,0 +1,318 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.io.File; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog, + Supplier.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog, + Shipment.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog, + Shipment.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class SupplierByCityKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class valueClass) { + + super(catalog, valueClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + Supplier supplier = (Supplier) valueInput; + String city = supplier.getCity(); + if (city != null) { + indexKeyOutput.writeString(supplier.getCity()); + return true; + } else { + return false; + } + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentByPartKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + String partNumber = primaryKeyInput.readString(); + // don't bother reading the supplierNumber + indexKeyOutput.writeString(partNumber); + return true; + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentBySupplierKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The + * supplier key is stored in the shipment key, so the shipment value is + * not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + primaryKeyInput.readString(); // skip the partNumber + String supplierNumber = primaryKeyInput.readString(); + indexKeyOutput.writeString(supplierNumber); + return true; + } + } +} diff --git a/examples/collections/ship/sentity/SampleViews.java b/examples/collections/ship/sentity/SampleViews.java new file mode 100644 index 0000000..f7d9879 --- /dev/null +++ b/examples/collections/ship/sentity/SampleViews.java @@ -0,0 +1,424 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object; a "tricky" binding + // that uses transient fields is used--see PartBinding, etc, for + // details. For keys, a one-to-one binding is implemented with + // EntryBinding classes to bind the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new PartKeyBinding(); + EntityBinding partDataBinding = + new PartBinding(catalog, Part.class); + EntryBinding supplierKeyBinding = + new SupplierKeyBinding(); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, Supplier.class); + EntryBinding shipmentKeyBinding = + new ShipmentKeyBinding(); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, Shipment.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartKeyBinding is used to bind the stored key tuple entry for a part to + * a key object representation. + */ + private static class PartKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private PartKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new PartKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + PartKey key = (PartKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Part class for both the + * stored data entry and the combined entity object. To do this, Part's + * key field(s) are transient and are set by the binding after the data + * object has been deserialized. This avoids the use of a PartData class + * completely.

    + */ + private static class PartBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + Part part = (Part) dataInput; + part.setKey(number); + return part; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Part part = (Part) object; + output.writeString(part.getNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } + + /** + * SupplierKeyBinding is used to bind the stored key tuple entry for a + * supplier to a key object representation. + */ + private static class SupplierKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private SupplierKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new SupplierKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + SupplierKey key = (SupplierKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Supplier class for both + * the stored data entry and the combined entity object. To do this, + * Supplier's key field(s) are transient and are set by the binding after + * the data object has been deserialized. This avoids the use of a + * SupplierData class completely.

    + */ + private static class SupplierBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + Supplier supplier = (Supplier) dataInput; + supplier.setKey(number); + return supplier; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Supplier supplier = (Supplier) object; + output.writeString(supplier.getNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } + + /** + * ShipmentKeyBinding is used to bind the stored key tuple entry for a + * shipment to a key object representation. + */ + private static class ShipmentKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private ShipmentKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String partNumber = input.readString(); + String supplierNumber = input.readString(); + return new ShipmentKey(partNumber, supplierNumber); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + ShipmentKey key = (ShipmentKey) object; + output.writeString(key.getPartNumber()); + output.writeString(key.getSupplierNumber()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + * + *

    The binding is "tricky" in that it uses the Shipment class for both + * the stored data entry and the combined entity object. To do this, + * Shipment's key field(s) are transient and are set by the binding after + * the data object has been deserialized. This avoids the use of a + * ShipmentData class completely.

    + */ + private static class ShipmentBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String partNumber = keyInput.readString(); + String supplierNumber = keyInput.readString(); + Shipment shipment = (Shipment) dataInput; + shipment.setKey(partNumber, supplierNumber); + return shipment; + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Shipment shipment = (Shipment) object; + output.writeString(shipment.getPartNumber()); + output.writeString(shipment.getSupplierNumber()); + } + + /** + * Return the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + public Object objectToData(Object object) { + + return object; + } + } +} diff --git a/examples/collections/ship/sentity/Shipment.java b/examples/collections/ship/sentity/Shipment.java new file mode 100644 index 0000000..245ac9a --- /dev/null +++ b/examples/collections/ship/sentity/Shipment.java @@ -0,0 +1,80 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for + * details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) + * are transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a ShipmentData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment implements Serializable { + + private transient String partNumber; + private transient String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/sentity/ShipmentKey.java b/examples/collections/ship/sentity/ShipmentKey.java new file mode 100644 index 0000000..b5c2300 --- /dev/null +++ b/examples/collections/ship/sentity/ShipmentKey.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/examples/collections/ship/sentity/Supplier.java b/examples/collections/ship/sentity/Supplier.java new file mode 100644 index 0000000..52362c7 --- /dev/null +++ b/examples/collections/ship/sentity/Supplier.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.io.Serializable; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using TupleSerialEntityBinding. See {@link SampleViews.PartBinding} for + * details. + *

    + * + *

    The binding is "tricky" in that it uses this class for both the stored + * data entry and the combined entity object. To do this, the key field(s) are + * transient and are set by the binding after the data object has been + * deserialized. This avoids the use of a SupplierData class completely.

    + * + *

    Since this class is used directly for data storage, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier implements Serializable { + + private transient String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + /** + * Set the transient key fields after deserializing. This method is only + * called by data bindings. + */ + void setKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/sentity/SupplierKey.java b/examples/collections/ship/sentity/SupplierKey.java new file mode 100644 index 0000000..8e04c72 --- /dev/null +++ b/examples/collections/ship/sentity/SupplierKey.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/sentity/Weight.java b/examples/collections/ship/sentity/Weight.java new file mode 100644 index 0000000..db222fd --- /dev/null +++ b/examples/collections/ship/sentity/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.sentity; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/collections/ship/tuple/Part.java b/examples/collections/ship/tuple/Part.java new file mode 100644 index 0000000..504a771 --- /dev/null +++ b/examples/collections/ship/tuple/Part.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A Part represents the combined key/data pair for a part entity. + * + *

    In this sample, Part is created from the stored key/data entry using a + * SerialSerialBinding. See {@link SampleViews.PartBinding} for details. + * Since this class is not directly used for data storage, it does not need to + * be Serializable.

    + * + * @author Mark Hayes + */ +public class Part { + + private String number; + private String name; + private String color; + private Weight weight; + private String city; + + public Part(String number, String name, String color, Weight weight, + String city) { + + this.number = number; + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Part: number=" + number + + " name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/tuple/PartData.java b/examples/collections/ship/tuple/PartData.java new file mode 100644 index 0000000..f53e11a --- /dev/null +++ b/examples/collections/ship/tuple/PartData.java @@ -0,0 +1,70 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.io.Serializable; + +/** + * A PartData serves as the value in the key/value pair for a part entity. + * + *

    In this sample, PartData is used only as the storage data for the + * value, while the Part object is used as the value's object representation. + * Because it is used directly as storage data using serial format, it must be + * Serializable.

    + * + * @author Mark Hayes + */ +public class PartData implements Serializable { + + private String name; + private String color; + private Weight weight; + private String city; + + public PartData(String name, String color, Weight weight, String city) { + + this.name = name; + this.color = color; + this.weight = weight; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final String getColor() { + + return color; + } + + public final Weight getWeight() { + + return weight; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[PartData: name=" + name + + " color=" + color + + " weight=" + weight + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/tuple/PartKey.java b/examples/collections/ship/tuple/PartKey.java new file mode 100644 index 0000000..062856b --- /dev/null +++ b/examples/collections/ship/tuple/PartKey.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A PartKey serves as the key in the key/data pair for a part entity. + * + *

    In this sample, PartKey is bound to the key's tuple storage entry using + * a TupleBinding. Because it is not used directly as storage data, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class PartKey { + + private String number; + + public PartKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[PartKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/tuple/Sample.java b/examples/collections/ship/tuple/Sample.java new file mode 100644 index 0000000..89e59af --- /dev/null +++ b/examples/collections/ship/tuple/Sample.java @@ -0,0 +1,237 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; + +/** + * Sample is the main entry point for the sample program and may be run as + * follows: + * + *
    + * java collections.ship.tuple.Sample
    + *      [-h  ]
    + * 
    + * + *

    The default for the home directory is ./tmp -- the tmp subdirectory of + * the current directory where the sample is run. The home directory must exist + * before running the sample. To recreate the sample database from scratch, + * delete all files in the home directory before running the sample.

    + * + * @author Mark Hayes + */ +public class Sample { + + private final SampleDatabase db; + private final SampleViews views; + + /** + * Run the sample program. + */ + public static void main(String[] args) { + + System.out.println("\nRunning sample: " + Sample.class); + + // Parse the command line arguments. + // + String homeDir = "./tmp"; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h") && i < args.length - 1) { + i += 1; + homeDir = args[i]; + } else { + System.err.println("Usage:\n java " + Sample.class.getName() + + "\n [-h ]"); + System.exit(2); + } + } + + // Run the sample. + // + Sample sample = null; + try { + sample = new Sample(homeDir); + sample.run(); + } catch (Exception e) { + // If an exception reaches this point, the last transaction did not + // complete. If the exception is RunRecoveryException, follow + // the Berkeley DB recovery procedures before running again. + e.printStackTrace(); + } finally { + if (sample != null) { + try { + // Always attempt to close the database cleanly. + sample.close(); + } catch (Exception e) { + System.err.println("Exception during database close:"); + e.printStackTrace(); + } + } + } + } + + /** + * Open the database and views. + */ + private Sample(String homeDir) + throws DatabaseException { + + db = new SampleDatabase(homeDir); + views = new SampleViews(db); + } + + /** + * Close the database cleanly. + */ + private void close() + throws DatabaseException { + + db.close(); + } + + /** + * Run two transactions to populate and print the database. A + * TransactionRunner is used to ensure consistent handling of transactions, + * including deadlock retries. But the best transaction handling mechanism + * to use depends on the application. + */ + private void run() + throws Exception { + + TransactionRunner runner = new TransactionRunner(db.getEnvironment()); + runner.run(new PopulateDatabase()); + runner.run(new PrintDatabase()); + } + + /** + * Populate the database in a single transaction. + */ + private class PopulateDatabase implements TransactionWorker { + + public void doWork() { + addSuppliers(); + addParts(); + addShipments(); + } + } + + /** + * Print the database in a single transaction. All entities are printed + * and the indices are used to print the entities for certain keys. + * + *

    Note the use of special iterator() methods. These are used here + * with indices to find the shipments for certain keys.

    + */ + private class PrintDatabase implements TransactionWorker { + + public void doWork() { + printValues("Parts", + views.getPartSet().iterator()); + printValues("Suppliers", + views.getSupplierSet().iterator()); + printValues("Suppliers for City Paris", + views.getSupplierByCityMap().duplicates( + "Paris").iterator()); + printValues("Shipments", + views.getShipmentSet().iterator()); + printValues("Shipments for Part P1", + views.getShipmentByPartMap().duplicates( + new PartKey("P1")).iterator()); + printValues("Shipments for Supplier S1", + views.getShipmentBySupplierMap().duplicates( + new SupplierKey("S1")).iterator()); + } + } + + /** + * Populate the part entities in the database. If the part set is not + * empty, assume that this has already been done. + */ + private void addParts() { + + Set parts = views.getPartSet(); + if (parts.isEmpty()) { + System.out.println("Adding Parts"); + parts.add(new Part("P1", "Nut", "Red", + new Weight(12.0, Weight.GRAMS), "London")); + parts.add(new Part("P2", "Bolt", "Green", + new Weight(17.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P3", "Screw", "Blue", + new Weight(17.0, Weight.GRAMS), "Rome")); + parts.add(new Part("P4", "Screw", "Red", + new Weight(14.0, Weight.GRAMS), "London")); + parts.add(new Part("P5", "Cam", "Blue", + new Weight(12.0, Weight.GRAMS), "Paris")); + parts.add(new Part("P6", "Cog", "Red", + new Weight(19.0, Weight.GRAMS), "London")); + } + } + + /** + * Populate the supplier entities in the database. If the supplier set is + * not empty, assume that this has already been done. + */ + private void addSuppliers() { + + Set suppliers = views.getSupplierSet(); + if (suppliers.isEmpty()) { + System.out.println("Adding Suppliers"); + suppliers.add(new Supplier("S1", "Smith", 20, "London")); + suppliers.add(new Supplier("S2", "Jones", 10, "Paris")); + suppliers.add(new Supplier("S3", "Blake", 30, "Paris")); + suppliers.add(new Supplier("S4", "Clark", 20, "London")); + suppliers.add(new Supplier("S5", "Adams", 30, "Athens")); + } + } + + /** + * Populate the shipment entities in the database. If the shipment set + * is not empty, assume that this has already been done. + */ + private void addShipments() { + + Set shipments = views.getShipmentSet(); + if (shipments.isEmpty()) { + System.out.println("Adding Shipments"); + shipments.add(new Shipment("P1", "S1", 300)); + shipments.add(new Shipment("P2", "S1", 200)); + shipments.add(new Shipment("P3", "S1", 400)); + shipments.add(new Shipment("P4", "S1", 200)); + shipments.add(new Shipment("P5", "S1", 100)); + shipments.add(new Shipment("P6", "S1", 100)); + shipments.add(new Shipment("P1", "S2", 300)); + shipments.add(new Shipment("P2", "S2", 400)); + shipments.add(new Shipment("P2", "S3", 200)); + shipments.add(new Shipment("P2", "S4", 200)); + shipments.add(new Shipment("P4", "S4", 300)); + shipments.add(new Shipment("P5", "S4", 400)); + } + } + + /** + * Print the objects returned by an iterator of entity value objects. + */ + private void printValues(String label, Iterator iterator) { + + System.out.println("\n--- " + label + " ---"); + while (iterator.hasNext()) { + System.out.println(iterator.next().toString()); + } + } +} diff --git a/examples/collections/ship/tuple/SampleDatabase.java b/examples/collections/ship/tuple/SampleDatabase.java new file mode 100644 index 0000000..5d7992f --- /dev/null +++ b/examples/collections/ship/tuple/SampleDatabase.java @@ -0,0 +1,318 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.io.File; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialKeyCreator; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +/** + * SampleDatabase defines the storage containers, indices and foreign keys + * for the sample database. + * + * @author Mark Hayes + */ +public class SampleDatabase { + + private static final String CLASS_CATALOG = "java_class_catalog"; + private static final String SUPPLIER_STORE = "supplier_store"; + private static final String PART_STORE = "part_store"; + private static final String SHIPMENT_STORE = "shipment_store"; + private static final String SHIPMENT_PART_INDEX = "shipment_part_index"; + private static final String SHIPMENT_SUPPLIER_INDEX = + "shipment_supplier_index"; + private static final String SUPPLIER_CITY_INDEX = "supplier_city_index"; + + private Environment env; + private Database partDb; + private Database supplierDb; + private Database shipmentDb; + private SecondaryDatabase supplierByCityDb; + private SecondaryDatabase shipmentByPartDb; + private SecondaryDatabase shipmentBySupplierDb; + private StoredClassCatalog javaCatalog; + + /** + * Open all storage containers, indices, and catalogs. + */ + public SampleDatabase(String homeDirectory) + throws DatabaseException { + + // Open the Berkeley DB environment in transactional mode. + // + System.out.println("Opening environment in: " + homeDirectory); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(new File(homeDirectory), envConfig); + + // Set the Berkeley DB config for opening all stores. + // + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + // Create the Serial class catalog. This holds the serialized class + // format for all database records of serial format. + // + Database catalogDb = env.openDatabase(null, CLASS_CATALOG, dbConfig); + javaCatalog = new StoredClassCatalog(catalogDb); + + // Open the Berkeley DB database for the part, supplier and shipment + // stores. The stores are opened with no duplicate keys allowed. + // + partDb = env.openDatabase(null, PART_STORE, dbConfig); + + supplierDb = env.openDatabase(null, SUPPLIER_STORE, dbConfig); + + shipmentDb = env.openDatabase(null, SHIPMENT_STORE, dbConfig); + + // Open the SecondaryDatabase for the city index of the supplier store, + // and for the part and supplier indices of the shipment store. + // Duplicate keys are allowed since more than one supplier may be in + // the same city, and more than one shipment may exist for the same + // supplier or part. A foreign key constraint is defined for the + // supplier and part indices to ensure that a shipment only refers to + // existing part and supplier keys. The CASCADE delete action means + // that shipments will be deleted if their associated part or supplier + // is deleted. + // + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + secConfig.setKeyCreator(new SupplierByCityKeyCreator(javaCatalog, + SupplierData.class)); + supplierByCityDb = env.openSecondaryDatabase(null, SUPPLIER_CITY_INDEX, + supplierDb, secConfig); + + secConfig.setForeignKeyDatabase(partDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new ShipmentByPartKeyCreator(javaCatalog, + ShipmentData.class)); + shipmentByPartDb = env.openSecondaryDatabase(null, SHIPMENT_PART_INDEX, + shipmentDb, secConfig); + + secConfig.setForeignKeyDatabase(supplierDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.CASCADE); + secConfig.setKeyCreator(new ShipmentBySupplierKeyCreator(javaCatalog, + ShipmentData.class)); + shipmentBySupplierDb = env.openSecondaryDatabase(null, + SHIPMENT_SUPPLIER_INDEX, + shipmentDb, secConfig); + } + + /** + * Return the storage environment for the database. + */ + public final Environment getEnvironment() { + + return env; + } + + /** + * Return the class catalog. + */ + public final StoredClassCatalog getClassCatalog() { + + return javaCatalog; + } + + /** + * Return the part storage container. + */ + public final Database getPartDatabase() { + + return partDb; + } + + /** + * Return the supplier storage container. + */ + public final Database getSupplierDatabase() { + + return supplierDb; + } + + /** + * Return the shipment storage container. + */ + public final Database getShipmentDatabase() { + + return shipmentDb; + } + + /** + * Return the shipment-by-part index. + */ + public final SecondaryDatabase getShipmentByPartDatabase() { + + return shipmentByPartDb; + } + + /** + * Return the shipment-by-supplier index. + */ + public final SecondaryDatabase getShipmentBySupplierDatabase() { + + return shipmentBySupplierDb; + } + + /** + * Return the supplier-by-city index. + */ + public final SecondaryDatabase getSupplierByCityDatabase() { + + return supplierByCityDb; + } + + /** + * Close all stores (closing a store automatically closes its indices). + */ + public void close() + throws DatabaseException { + + // Close secondary databases, then primary databases. + supplierByCityDb.close(); + shipmentByPartDb.close(); + shipmentBySupplierDb.close(); + partDb.close(); + supplierDb.close(); + shipmentDb.close(); + // And don't forget to close the catalog and the environment. + javaCatalog.close(); + env.close(); + } + + /** + * The SecondaryKeyCreator for the SupplierByCity index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class SupplierByCityKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the city key extractor. + * @param catalog is the class catalog. + * @param valueClass is the supplier value class. + */ + private SupplierByCityKeyCreator(ClassCatalog catalog, + Class valueClass) { + + super(catalog, valueClass); + } + + /** + * Extract the city key from a supplier key/value pair. The city key + * is stored in the supplier value, so the supplier key is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + SupplierData supplierData = (SupplierData) valueInput; + String city = supplierData.getCity(); + if (city != null) { + indexKeyOutput.writeString(supplierData.getCity()); + return true; + } else { + return false; + } + } + } + + /** + * The SecondaryKeyCreator for the ShipmentByPart index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentByPartKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the part key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentByPartKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the part key from a shipment key/value pair. The part key + * is stored in the shipment key, so the shipment value is not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + String partNumber = primaryKeyInput.readString(); + // don't bother reading the supplierNumber + indexKeyOutput.writeString(partNumber); + return true; + } + } + + /** + * The SecondaryKeyCreator for the ShipmentBySupplier index. This is an + * extension of the abstract class TupleSerialKeyCreator, which implements + * SecondaryKeyCreator for the case where the data keys are of the format + * TupleFormat and the data values are of the format SerialFormat. + */ + private static class ShipmentBySupplierKeyCreator + extends TupleSerialKeyCreator { + + /** + * Construct the supplier key extractor. + * @param catalog is the class catalog. + * @param valueClass is the shipment value class. + */ + private ShipmentBySupplierKeyCreator(ClassCatalog catalog, + Class valueClass) { + super(catalog, valueClass); + } + + /** + * Extract the supplier key from a shipment key/value pair. The + * supplier key is stored in the shipment key, so the shipment value is + * not used. + */ + public boolean createSecondaryKey(TupleInput primaryKeyInput, + Object valueInput, + TupleOutput indexKeyOutput) { + + primaryKeyInput.readString(); // skip the partNumber + String supplierNumber = primaryKeyInput.readString(); + indexKeyOutput.writeString(supplierNumber); + return true; + } + } +} diff --git a/examples/collections/ship/tuple/SampleViews.java b/examples/collections/ship/tuple/SampleViews.java new file mode 100644 index 0000000..0ec42bc --- /dev/null +++ b/examples/collections/ship/tuple/SampleViews.java @@ -0,0 +1,401 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; + +/** + * SampleViews defines the data bindings and collection views for the sample + * database. + * + * @author Mark Hayes + */ +public class SampleViews { + + private StoredSortedMap partMap; + private StoredSortedMap supplierMap; + private StoredSortedMap shipmentMap; + private StoredSortedMap shipmentByPartMap; + private StoredSortedMap shipmentBySupplierMap; + private StoredSortedMap supplierByCityMap; + + /** + * Create the data bindings and collection views. + */ + public SampleViews(SampleDatabase db) { + + // Create the data bindings. + // In this sample, EntityBinding classes are used to bind the stored + // key/data entry pair to a combined data object. For keys, a + // one-to-one binding is implemented with EntryBinding classes to bind + // the stored tuple entry to a key Object. + // + ClassCatalog catalog = db.getClassCatalog(); + EntryBinding partKeyBinding = + new PartKeyBinding(); + EntityBinding partDataBinding = + new PartBinding(catalog, PartData.class); + EntryBinding supplierKeyBinding = + new SupplierKeyBinding(); + EntityBinding supplierDataBinding = + new SupplierBinding(catalog, SupplierData.class); + EntryBinding shipmentKeyBinding = + new ShipmentKeyBinding(); + EntityBinding shipmentDataBinding = + new ShipmentBinding(catalog, ShipmentData.class); + EntryBinding cityKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + // Create map views for all stores and indices. + // StoredSortedMap is used since the stores and indices are ordered + // (they use the DB_BTREE access method). + // + partMap = + new StoredSortedMap(db.getPartDatabase(), + partKeyBinding, partDataBinding, true); + supplierMap = + new StoredSortedMap(db.getSupplierDatabase(), + supplierKeyBinding, supplierDataBinding, true); + shipmentMap = + new StoredSortedMap(db.getShipmentDatabase(), + shipmentKeyBinding, shipmentDataBinding, true); + shipmentByPartMap = + new StoredSortedMap(db.getShipmentByPartDatabase(), + partKeyBinding, shipmentDataBinding, true); + shipmentBySupplierMap = + new StoredSortedMap(db.getShipmentBySupplierDatabase(), + supplierKeyBinding, shipmentDataBinding, true); + supplierByCityMap = + new StoredSortedMap(db.getSupplierByCityDatabase(), + cityKeyBinding, supplierDataBinding, true); + } + + // The views returned below can be accessed using the java.util.Map or + // java.util.Set interfaces, or using the StoredSortedMap and + // StoredValueSet classes, which provide additional methods. The entity + // sets could be obtained directly from the Map.values() method but + // convenience methods are provided here to return them in order to avoid + // down-casting elsewhere. + + /** + * Return a map view of the part storage container. + */ + public StoredSortedMap getPartMap() { + + return partMap; + } + + /** + * Return a map view of the supplier storage container. + */ + public StoredSortedMap getSupplierMap() { + + return supplierMap; + } + + /** + * Return a map view of the shipment storage container. + */ + public StoredSortedMap getShipmentMap() { + + return shipmentMap; + } + + /** + * Return an entity set view of the part storage container. + */ + public StoredSortedValueSet getPartSet() { + + return (StoredSortedValueSet) partMap.values(); + } + + /** + * Return an entity set view of the supplier storage container. + */ + public StoredSortedValueSet getSupplierSet() { + + return (StoredSortedValueSet) supplierMap.values(); + } + + /** + * Return an entity set view of the shipment storage container. + */ + public StoredSortedValueSet getShipmentSet() { + + return (StoredSortedValueSet) shipmentMap.values(); + } + + /** + * Return a map view of the shipment-by-part index. + */ + public StoredSortedMap getShipmentByPartMap() { + + return shipmentByPartMap; + } + + /** + * Return a map view of the shipment-by-supplier index. + */ + public StoredSortedMap getShipmentBySupplierMap() { + + return shipmentBySupplierMap; + } + + /** + * Return a map view of the supplier-by-city index. + */ + public final StoredSortedMap getSupplierByCityMap() { + + return supplierByCityMap; + } + + /** + * PartKeyBinding is used to bind the stored key tuple entry for a part to + * a key object representation. + */ + private static class PartKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private PartKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new PartKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + PartKey key = (PartKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * PartBinding is used to bind the stored key/data entry pair for a part + * to a combined data object (entity). + */ + private static class PartBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private PartBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + PartData data = (PartData) dataInput; + return new Part(number, data.getName(), data.getColor(), + data.getWeight(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Part part = (Part) object; + output.writeString(part.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Part part = (Part) object; + return new PartData(part.getName(), part.getColor(), + part.getWeight(), part.getCity()); + } + } + + /** + * SupplierKeyBinding is used to bind the stored key tuple entry for a + * supplier to a key object representation. + */ + private static class SupplierKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private SupplierKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String number = input.readString(); + return new SupplierKey(number); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + SupplierKey key = (SupplierKey) object; + output.writeString(key.getNumber()); + } + } + + /** + * SupplierBinding is used to bind the stored key/data entry pair for a + * supplier to a combined data object (entity). + */ + private static class SupplierBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private SupplierBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String number = keyInput.readString(); + SupplierData data = (SupplierData) dataInput; + return new Supplier(number, data.getName(), + data.getStatus(), data.getCity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Supplier supplier = (Supplier) object; + output.writeString(supplier.getNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Supplier supplier = (Supplier) object; + return new SupplierData(supplier.getName(), supplier.getStatus(), + supplier.getCity()); + } + } + + /** + * ShipmentKeyBinding is used to bind the stored key tuple entry for a + * shipment to a key object representation. + */ + private static class ShipmentKeyBinding extends TupleBinding { + + /** + * Construct the binding object. + */ + private ShipmentKeyBinding() { + } + + /** + * Create the key object from the stored key tuple entry. + */ + public Object entryToObject(TupleInput input) { + + String partNumber = input.readString(); + String supplierNumber = input.readString(); + return new ShipmentKey(partNumber, supplierNumber); + } + + /** + * Create the stored key tuple entry from the key object. + */ + public void objectToEntry(Object object, TupleOutput output) { + + ShipmentKey key = (ShipmentKey) object; + output.writeString(key.getPartNumber()); + output.writeString(key.getSupplierNumber()); + } + } + + /** + * ShipmentBinding is used to bind the stored key/data entry pair for a + * shipment to a combined data object (entity). + */ + private static class ShipmentBinding extends TupleSerialBinding { + + /** + * Construct the binding object. + */ + private ShipmentBinding(ClassCatalog classCatalog, Class dataClass) { + + super(classCatalog, dataClass); + } + + /** + * Create the entity by combining the stored key and data. + */ + public Object entryToObject(TupleInput keyInput, Object dataInput) { + + String partNumber = keyInput.readString(); + String supplierNumber = keyInput.readString(); + ShipmentData data = (ShipmentData) dataInput; + return new Shipment(partNumber, supplierNumber, + data.getQuantity()); + } + + /** + * Create the stored key from the entity. + */ + public void objectToKey(Object object, TupleOutput output) { + + Shipment shipment = (Shipment) object; + output.writeString(shipment.getPartNumber()); + output.writeString(shipment.getSupplierNumber()); + } + + /** + * Create the stored data from the entity. + */ + public Object objectToData(Object object) { + + Shipment shipment = (Shipment) object; + return new ShipmentData(shipment.getQuantity()); + } + } +} diff --git a/examples/collections/ship/tuple/Shipment.java b/examples/collections/ship/tuple/Shipment.java new file mode 100644 index 0000000..cbe13a0 --- /dev/null +++ b/examples/collections/ship/tuple/Shipment.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A Shipment represents the combined key/data pair for a shipment entity. + * + *

    In this sample, Shipment is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.ShipmentBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Shipment { + + private String partNumber; + private String supplierNumber; + private int quantity; + + public Shipment(String partNumber, String supplierNumber, int quantity) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + this.quantity = quantity; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[Shipment: part=" + partNumber + + " supplier=" + supplierNumber + + " quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/tuple/ShipmentData.java b/examples/collections/ship/tuple/ShipmentData.java new file mode 100644 index 0000000..77b0619 --- /dev/null +++ b/examples/collections/ship/tuple/ShipmentData.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.io.Serializable; + +/** + * A ShipmentData serves as the value in the key/value pair for a shipment + * entity. + * + *

    In this sample, ShipmentData is used only as the storage data for the + * value, while the Shipment object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentData implements Serializable { + + private int quantity; + + public ShipmentData(int quantity) { + + this.quantity = quantity; + } + + public final int getQuantity() { + + return quantity; + } + + public String toString() { + + return "[ShipmentData: quantity=" + quantity + ']'; + } +} diff --git a/examples/collections/ship/tuple/ShipmentKey.java b/examples/collections/ship/tuple/ShipmentKey.java new file mode 100644 index 0000000..6075100 --- /dev/null +++ b/examples/collections/ship/tuple/ShipmentKey.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A ShipmentKey serves as the key in the key/data pair for a shipment entity. + * + *

    In this sample, ShipmentKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class ShipmentKey { + + private String partNumber; + private String supplierNumber; + + public ShipmentKey(String partNumber, String supplierNumber) { + + this.partNumber = partNumber; + this.supplierNumber = supplierNumber; + } + + public final String getPartNumber() { + + return partNumber; + } + + public final String getSupplierNumber() { + + return supplierNumber; + } + + public String toString() { + + return "[ShipmentKey: supplier=" + supplierNumber + + " part=" + partNumber + ']'; + } +} diff --git a/examples/collections/ship/tuple/Supplier.java b/examples/collections/ship/tuple/Supplier.java new file mode 100644 index 0000000..55a587e --- /dev/null +++ b/examples/collections/ship/tuple/Supplier.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A Supplier represents the combined key/data pair for a supplier entity. + * + *

    In this sample, Supplier is created from the stored key/data entry + * using a SerialSerialBinding. See {@link SampleViews.SupplierBinding} for + * details. Since this class is not used directly for data storage, it does + * not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class Supplier { + + private String number; + private String name; + private int status; + private String city; + + public Supplier(String number, String name, int status, String city) { + + this.number = number; + this.name = name; + this.status = status; + this.city = city; + } + + public final String getNumber() { + + return number; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[Supplier: number=" + number + + " name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/tuple/SupplierData.java b/examples/collections/ship/tuple/SupplierData.java new file mode 100644 index 0000000..2ca2381 --- /dev/null +++ b/examples/collections/ship/tuple/SupplierData.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.io.Serializable; + +/** + * A SupplierData serves as the value in the key/value pair for a supplier + * entity. + * + *

    In this sample, SupplierData is used only as the storage data for the + * value, while the Supplier object is used as the value's object + * representation. Because it is used directly as storage data using + * serial format, it must be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierData implements Serializable { + + private String name; + private int status; + private String city; + + public SupplierData(String name, int status, String city) { + + this.name = name; + this.status = status; + this.city = city; + } + + public final String getName() { + + return name; + } + + public final int getStatus() { + + return status; + } + + public final String getCity() { + + return city; + } + + public String toString() { + + return "[SupplierData: name=" + name + + " status=" + status + + " city=" + city + ']'; + } +} diff --git a/examples/collections/ship/tuple/SupplierKey.java b/examples/collections/ship/tuple/SupplierKey.java new file mode 100644 index 0000000..03ed993 --- /dev/null +++ b/examples/collections/ship/tuple/SupplierKey.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +/** + * A SupplierKey serves as the key in the key/data pair for a supplier entity. + * + *

    In this sample, SupplierKey is bound to the key's tuple storage entry + * using a TupleBinding. Because it is not used directly as storage data, it + * does not need to be Serializable.

    + * + * @author Mark Hayes + */ +public class SupplierKey { + + private String number; + + public SupplierKey(String number) { + + this.number = number; + } + + public final String getNumber() { + + return number; + } + + public String toString() { + + return "[SupplierKey: number=" + number + ']'; + } +} diff --git a/examples/collections/ship/tuple/Weight.java b/examples/collections/ship/tuple/Weight.java new file mode 100644 index 0000000..8d71f2f --- /dev/null +++ b/examples/collections/ship/tuple/Weight.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package collections.ship.tuple; + +import java.io.Serializable; + +/** + * Weight represents a weight amount and unit of measure. + * + *

    In this sample, Weight is embedded in part data values which are stored + * as Java serialized objects; therefore Weight must be Serializable.

    + * + * @author Mark Hayes + */ +public class Weight implements Serializable { + + public final static String GRAMS = "grams"; + public final static String OUNCES = "ounces"; + + private double amount; + private String units; + + public Weight(double amount, String units) { + + this.amount = amount; + this.units = units; + } + + public final double getAmount() { + + return amount; + } + + public final String getUnits() { + + return units; + } + + public String toString() { + + return "[" + amount + ' ' + units + ']'; + } +} diff --git a/examples/jca/HOWTO-jboss.txt b/examples/jca/HOWTO-jboss.txt new file mode 100644 index 0000000..0d91a6e --- /dev/null +++ b/examples/jca/HOWTO-jboss.txt @@ -0,0 +1,217 @@ +How to use the Berkeley DB Java Edition JCA Resource Adapter in JBoss 3.2.6 + +Prerequisites: + +JBoss 3.2.6 +ant 1.5.4 or later +J2EE jar files (available in the JBoss distribution) + +This HOWTO describes: + + (1) how to build and deploy the Berkeley DB Java Edition JCA Resource + Adapter under the JBoss Application Server (v3.2.6). + + (2) how to run a simple smoke test to test that the RA has been + deployed correctly. + + (3) some notes on writing applications that use the RA. + +The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries, +but the regular JE code does not require these libraries in order +to build. Therefore, the "ant compile" target only builds the +non-J2EE based code. To build the JE JCA libraries and examples, it is +necessary to have the appropriate J2EE jar files available and to use +additional ant targets. + +Building the Resource Adapter +----------------------------- + +- Edit /src/com/sleepycat/je/jca/ra/ra.xml. + + (1) Search for "" + + (2) Select the appropriate value (LocalTransaction, NoTransaction, or + XATransaction), and comment out or delete the other two. Don't use + multiple values of . + + (3) Change the value of the to refer to the JE + environment directory. JBoss needs this to grant access permission + to JE, otherwise security exceptions will result. + + Note: + + If you use XATransaction, all your databases must be transactional. + +- Edit /build.properties: + + (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example, + + j2ee.jarfile = /client/jbossall-client.jar + + The value specified for j2ee.jarfile should contain all the classes + necessary for proper execution of the JCA Resource Adapter (for + example, JNDI). The jbossall-client.jar is sufficient. + + (2) Set example.resources to an appropriate value, e.g. + + example.resources = /examples/resources/jboss + + The example.resources property should contain a jndi.properties file + that is correct for the target environment. If you are using the + jndi.properties supplied in the {examples.resources} directory, + review it to make sure it has correct values. + +- With the current directory set to , execute + + ant jca + + This creates a jejca.rar Resource Adapter Archive in /build/lib. + The jejca.rar archive contains a je.jar file. + +- Deploy the JE Resource Adapter (/build/lib/jejca.rar), + using an appropriate JBoss deployment tool or by simply copying it + to the JBoss deployment directory. For example, + + copy /build/lib/jejca.rar /server/default/deploy + +- If the JBoss server is not already running, start it now. + +Building the "SimpleBean" Example: +---------------------------------- + +The SimpleBean example is an EJB that has two methods, get() and +put(), which get and put data using the JE Resource Adapter on the +JBoss server. You can use this example to test the JE Resource +Adapter that you just deployed. + +- Edit /build.properties: + + (1) Set example.jca.srcdir to /examples/jca/jboss + + example.jca.srcdir = /examples/jca/jboss + + This is the directory where the JBoss specific deployment descriptor + for the "simple" EJB resides. + + (2) Set example.jca.descriptorname to jboss.xml. + + example.jca.desciptorname = jboss.xml + + This is the name of the jboss specific deployment descriptor for the + "simple" EJB. + +- Edit the source code for SimpleBean to refer to the correct + directory for the JE Environment. The JE Environment directory is + the same one that was specified in the ra.xml file under the + tag. This directory should exist and + the JBoss server should have write permission to that directory. + The source code for SimpleBean is in + + /examples/jca/simple/SimpleBean.java + + To set the directory, change the value of JE_ENV at the top of the + class. For example, + + private final String JE_ENV = "/tmp/je_store"; + +- Edit the jboss.xml descriptor in + + /examples/jca/jboss/jboss.xml + + to use the jndi-name that corresponds to the transaction-support + value in the ra.xml file above. That is, select one of the + following three lines and comment out or remove the other two: + + java:/LocalTransJE + java:/NoTransJE + java:/XATransJE + +- Build the SimpleBean example and jar file. + + ant jca-examples + + This builds a jejca-example.jar file and places it in the + /build/lib directory. The jar file contains the SimpleBean + classes, and the ejb-jar.xml and jboss.xml descriptor files. + +- Deploy the jejca-example jar by copying it to a deployment directory + (or use an appropriate deployment tool). For example, + + copy /build/lib/jejca-example.jar /server/default/deploy + +- Depending on which transaction support you have selected, examine the + corresponding RA service configuration file in + + /examples/jca/jboss + + (e.g. je-localtx-ds.xml). Ensure that the jndi-name matches the + name that you selected in the jboss.xml file in the same directory. + +- Deploy the RA service configuration file (e.g. je-localtx-ds.xml) by + copying it to the JBoss server deployment directory or using an + appropriate deployment tool. For example, + + copy /examples/jca/jboss/je-localtx-ds.xml + /server/default/deploy + +Running the "SimpleBean" Example: +--------------------------------- + +- Verify that the JBoss server has been started. + +- Run the client: + + ant testex-jejcasimple -Dkey=foo -Ddata=bar + + This should produce: + + Buildfile: build.xml + + testex-jejcasimple: + [java] Created Simple + [java] Simple.get('foo') = bar + + BUILD SUCCESSFUL + Total time: 3 seconds + +If you don't see + + [java] Simple.get('foo') = bar + +printed (for example, you see Simple.get('foo') = null), there may be +a configuration problem. Check the server logfile for details. + +Implementation Notes for Applications Using the RA +-------------------------------------------------- + +Please refer to the SimpleBean example in + + /examples/jca/simple/SimpleBean.java + +- Obtain a JEConnection using the + + JEConnectionFactory.getConnection() + + method and passing it an environment home directory and + EnvironmentConfig object. Once the JEConnection has been obtained, + you can obtain the Environment handle by calling + + JEConnection.getEnvironment(); + +- Database handle cache available + +Because bean business methods may be relatively short, the underlying +ManagedConnection object for JE provides a Database handle cache. +This speeds up the Database open operation since the handle +(generally) already exists in the cache. Normally, a program opens a +database using the Environment.openDatabase() method. In the EJB +environment, the program should call JEConnection.openDatabase() +instead. Database handles obtained using this method should not be +close()'d as the ManagedConnection code does that when the +ManagedConnection is closed. + +- Databases under XA must be transactional + +If you are using the XATransaction environment (as specified in the +ra.xml file), all JE Databases used in that environment must be +transactional. diff --git a/examples/jca/HOWTO-oc4j.txt b/examples/jca/HOWTO-oc4j.txt new file mode 100644 index 0000000..2456a03 --- /dev/null +++ b/examples/jca/HOWTO-oc4j.txt @@ -0,0 +1,266 @@ +How to use the Berkeley DB Java Edition JCA Resource Adapter in the +Oracle Containers for J2EE version 10.1.3.2.0. + +Prerequisites: + +- OC4J version 10.1.3.2.0 +- ant 1.5.4 or later +- J2EE jar files (available in the OC4J distribution) + +This HOWTO describes: + + (1) how to build and deploy the Berkeley DB Java Edition JCA Resource + Adapter under OC4J version 10.1.3.2.0 + + (2) how to run a simple smoke test to test that the RA has been + deployed correctly. + + (3) some notes on writing applications that use the RA. + +The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries, +but the regular JE code does not require these libraries in order to +be built. Therefore, the "ant compile" target only builds the +non-J2EE based code. To build the JE JCA libraries and examples, it +is necessary to have the appropriate J2EE jar files available and to +use additional ant targets. + +Building the Resource Adapter +----------------------------- + +- Edit /src/com/sleepycat/je/jca/ra/ra.xml. + + (1) Search for "" + + (2) Select the appropriate value (LocalTransaction, NoTransaction, or + XATransaction) and comment out or delete the other two. Don't use + multiple values of . + + (3) Change the value of the to refer to the + JE environment directory. OC4J needs this to file grant access + permission to JE, otherwise security exceptions will result. + +- Edit /build.properties: + + (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example, + + j2ee.jarfile = /j2ee/home/lib/ejb.jar: + /oc4j/j2ee/home/lib/connector.jar: + /oc4j/j2ee/home/lib/oc4j-internal.jar + + The value specified for j2ee.jarfile should contain all the classes + necessary for proper execution of the JCA Resource Adapter (for + example, JNDI). + + (2) Set example.resources to an appropriate value, e.g. + + example.resources = /examples/resources/oc4j/oc4j.jar + + The oc4j.jar file contains an application-client.xml file which looks + like this: + +bash-3.00$ cat examples/resources/oc4j/META-INF/application-client.xml + + + SimpleBean + Session + jca.simple.SimpleHome + jca.simple.Simple + + + +- With the current directory set to , execute + + ant jca + + This creates a jejca.rar Resource Adapter Archive in + /build/lib. The jejca.rar contains a je.jar file. + +- If OC4J is not already started, start it now. + + oc4j -start + + Note: + + The server can be stopped with the asadmin stop-domain command. e.g. + + oc4j -shutdown -port 23791 -password + +or + + java -jar /j2ee/home/admin_client.jar + deployer:oc4j:localhost:23791 oc4jadmin -shutdown + +- Deploy the JE Resource Adapter (/build/lib/jejca.rar), + using the Oracle Application Server Control web tool: + + http://:8888 + login if necessary + Applications tab + In the "View" pulldown, select 'Standalone Resource Adapters' + Select "Deploy" + In the "Archive" section of the screen, enter the file name of the + jejca.rar file (/build/lib/jejca.rar). + Select "Next" + Enter "JEConnection" for Resource Adapter Name + Select "Next" + Select "Deploy" + +- Create the connector connection pool and connector resource: + + After the RA has been successfully deployed, select "Return" + On the Home | Applications | Stand Alone Resource Adapters page, + select "Berkeley DB Java Edition JCA Adapter" link + Select "Connection Factories" + Under "Shared Connection Pools", select "Create" + Enter "JEConnectionPool" for Connection Pool Name and Select "OK" + Above "JNDI Location" select "Create" + Make sure that com.sleepycat.je.jca.ra.JEConnectionFactory is selected + in the pull down menu and select "Continue" + Enter "ra/JEConnectionPool" for "JNDI Location" + Select "Use Shared Connection Pool" and chose "JEConnectionPool" in the + menu. Select "Finish" + +- If you change or rebuild the jejca.rar file, you must redeploy the Resource + Adapter file with the same steps above. + +Building the "SimpleBean" Example: +---------------------------------- + +The SimpleBean example is an EJB that has two methods, get() and +put(), which get and put data using the JE Resource Adapter on the +OC4J server. You can use this example to test the JE RA that you +just deployed. + +- Edit /build.properties: + + (1) Set example.jca.srcdir to /examples/jca/oc4j + + example.jca.srcdir = /examples/jca/oc4j + + This is the directory where the OC4J specific deployment descriptor + for the "simple" EJB resides. + + (2) Set example.jca.descriptorname to orion-ejb-jar.xml. + + example.jca.desciptorname = orion-ejb-jar.xml + + This is the name of the OC4J specific deployment descriptor for the + "simple" EJB. + +- Edit /examples/jca/simple/ejb-jar.xml + Uncomment the section at the end of the file. + +- Edit the source code for SimpleBean to refer to the correct + directory for the JE Environment. The JE Environment directory is + the same one that was specified in the ra.xml file under the + tag. This directory should exist and + the OC4J server should have write permission to that directory. + The source code for SimpleBean is in + + /examples/jca/simple/SimpleBean.java + + To set the directory, change the value of JE_ENV at the top of the + class. For example, + + private final String JE_ENV = "/tmp/je_store"; + +- Edit the source code for SimpleBean to have the correct value for + TRANSACTIONAL. If you set it to true, you should also set the + proper value in the ra.xml for (either + LocalTransaction or XATransaction). + +- Edit the SimpleClient.java file to have correct values for the JNDI lookup + properties (java.naming.*). + +- Edit the SimpleClient.java file to change the value of OC4J to true. + +- Build the SimpleBean example and jar file. + + ant jca-examples + + This builds a jejca-example.jar file and places it in the + /build/lib directory. The jar file contains the SimpleBean + classes, and the ejb-jar.xml and sun-ejb-jar.xml descriptor files. + +- Deploy the jejca-example jar using the Oracle Application Server + Control web tool: + + http://:8888 + login if necessary + Applications tab + Select "Applications" from the "View" pulldown. + Select "Deploy" + Enter the location of the jejca-example.jar file + (/build/lib/jejca-example.jar) in the + "Location on Server" box in the "Archive" section. + Select "Next". + Enter "Simple" in the "Application Name" box. Select "Next". + On the "Deploy: Deployment Settings" page, click the pencil next to + "Map Environment References". + In the "Map Resource References" section, enter "ra/JEConnectionFactory" + in the form box for the "ra/JEConnectionFactory" Resource Reference. + Select "OK". + Select "Deploy". + + +Running the "SimpleBean" Example: +--------------------------------- + +- Verify that the OC4J server has been started. + +- Run the client: + + ant testex-jejcasimple -Dkey=foo -Ddata=bar + + This should produce: + + Buildfile: build.xml + + testex-jejcasimple: + [java] Created Simple + [java] Simple.get('foo') = bar + + BUILD SUCCESSFUL + Total time: 3 seconds + +If you don't see + + [java] Simple.get('foo') = bar + +printed (for example, you see Simple.get('foo') = null), there may be +a configuration problem. Check the server.log for details. + +Implementation Notes for Applications Using the RA +-------------------------------------------------- + +Please refer to the SimpleBean example in + + /examples/jca/simple/SimpleBean.java + +- Obtain a JEConnection using the + + JEConnectionFactory.getConnection() + + method and passing it an environment home directory and + EnvironmentConfig object. Once the JEConnection has been obtained, + you can obtain the Environment handle by calling + + JEConnection.getEnvironment(); + +- Database handle cache available + +Because bean business methods may be relatively short, the underlying +ManagedConnection object for JE provides a Database handle cache. +This speeds up the Database open operation since the handle +(generally) already exists in the cache. Normally, a program opens a +database using the Environment.openDatabase() method. In the EJB +environment, the program should call JEConnection.openDatabase() +instead. Database handles obtained using this method should not be +close()'d as the ManagedConnection code does that when the +ManagedConnection is closed. + +- Databases under XA must be transactional + +If you are using the XATransaction environment (as specified in the +ra.xml file), all JE Databases used in that environment must be +transactional. diff --git a/examples/jca/HOWTO-sjsas.txt b/examples/jca/HOWTO-sjsas.txt new file mode 100644 index 0000000..0767240 --- /dev/null +++ b/examples/jca/HOWTO-sjsas.txt @@ -0,0 +1,248 @@ +How to use the Berkeley DB Java Edition JCA Resource Adapter in the +Sun Java System Application Server 8.1 or Glassfish 2.1.1. + +Prerequisites: + +- Sun Java System Application Server 8.1/Glassfish 2.1.1 +- ant 1.5.4 or later +- J2EE jar files (available in the SJSAS distribution) + +This HOWTO describes: + + (1) how to build and deploy the Berkeley DB Java Edition JCA Resource + Adapter under the Sun Java System Application Server (v8.1). + + (2) how to run a simple smoke test to test that the RA has been + deployed correctly. + + (3) some notes on writing applications that use the RA. + +The Berkeley DB Java Edition (JE) JCA code depends on J2EE libraries, +but the regular JE code does not require these libraries in order to +be built. Therefore, the "ant compile" target only builds the +non-J2EE based code. To build the JE JCA libraries and examples, it +is necessary to have the appropriate J2EE jar files available and to +use additional ant targets. + +Building the Resource Adapter +----------------------------- + +- Edit /src/com/sleepycat/je/jca/ra/ra.xml. + + (1) Search for "" + + (2) Select the appropriate value (LocalTransaction, NoTransaction, or + XATransaction) and comment out or delete the other two. Don't use + multiple values of . + + (3) Change the value of the to refer to the + JE environment directory. SJSAS needs this to file grant access + permission to JE, otherwise security exceptions will result. + +- Edit /domains/domain1/config/server.policy to include + + permission java.io.FilePermission + "/tmp/je_store/*", "read,write"; + + permission java.util.logging.LoggingPermission "control"; + + in the section with the comment: + + // Basic set of required permissions granted to all remaining code + + You chould grant java.io.FilePermission to the server for the + directory where your JE environment will reside (i.e. /tmp/je_store, + or whatever directory you are using). + +- Edit /build.properties: + + (1) Set j2ee.jarfile to an appropriate j2ee.jar. For example, + + j2ee.jarfile = /lib/j2ee.jar + + The value specified for j2ee.jarfile should contain all the classes + necessary for proper execution of the JCA Resource Adapter (for + example, JNDI). The j2ee.jar file noted here meets all the + necessary requirements. + + (2) Set example.resources to an appropriate value, e.g. + + example.resources = /lib/appserv-rt.jar + + The example.resources property should contain a jndi.properties file + that is correct for the target environment. appserv-rt.jar contains + an appropriate jndi.properties. + +- With the current directory set to , execute + + ant jca + + This creates a jejca.rar Resource Adapter Archive in + /build/lib. The jejca.rar contains a je.jar file. + +- If SJSAS is not already started, start it now. + + asadmin start-domain domain1 + + Note: + + The server can be stopped with the asadmin stop-domain command. e.g. + + asadmin stop-domain + +- Deploy the JE Resource Adapter (/build/lib/jejca.rar), + using the asadmin tool: + + asadmin deploy --user admin --host localhost --port 4848 \ + --force=true --name JEConnection --upload=true \ + build/lib/jejca.rar + + Note: + + If you redeploy the .rar file, you may need to restart the server + with asadmin. + +- Create the connector connection pool and connector resource: + + asadmin create-connector-connection-pool --user admin \ + --host localhost --port 4848 --raname JEConnection \ + --connectiondefinition \ + com.sleepycat.je.jca.ra.JEConnectionFactory \ + JEConnectionPool + + asadmin create-connector-resource --user admin --host localhost \ + --port 4848 --poolname JEConnectionPool ra/JEConnectionFactory + + Note: + + The connector resource and connection pool can be deleted with the + delete-connector-resource and delete-connector-connection-pool options + to asadmin. For example, + + asadmin delete-connector-resource --user admin --host localhost \ + --port 4848 ra/JEConnectionFactory + + asadmin delete-connector-connection-pool --user admin \ + --host localhost --port 4848 --cascade=true JEConnectionPool + +Building the "SimpleBean" Example: +---------------------------------- + +The SimpleBean example is an EJB that has two methods, get() and +put(), which get and put data using the JE Resource Adapter on the +SJSAS server. You can use this example to test the JE RA that you +just deployed. + +- Edit /build.properties: + + (1) Set example.jca.srcdir to /examples/jca/sjsas8_1 + + example.jca.srcdir = /examples/jca/sjsas8_1 + + This is the directory where the SJSAS specific deployment descriptor + for the "simple" EJB resides. + + (2) Set example.jca.descriptorname to sun-ejb-jar.xml. + + example.jca.desciptorname = sun-ejb-jar.xml + + This is the name of the SJSAS specific deployment descriptor for the + "simple" EJB. + +- Edit the source code for SimpleBean to refer to the correct + directory for the JE Environment. The JE Environment directory is + the same one that was specified in the ra.xml file under the + tag. This directory should exist and + the SJSAS server should have write permission to that directory. + The source code for SimpleBean is in + + /examples/jca/simple/SimpleBean.java + + To set the directory, change the value of JE_ENV at the top of the + class. For example, + + private final String JE_ENV = "/tmp/je_store"; + +- Edit the sun-ejb-jar.xml descriptor in + + /examples/jca/sjsas8_1/sun-ejb-jar.xml + + and ensure that the jndi-name and res-ref-name correspond to the + name of the connector resource that was created above during the RA + deployment. It should be "ra/JEConnectionFactory". + +- Build the SimpleBean example and jar file. + + ant jca-examples + + This builds a jejca-example.jar file and places it in the + /build/lib directory. The jar file contains the SimpleBean + classes, and the ejb-jar.xml and sun-ejb-jar.xml descriptor files. + +- Deploy the jejca-example jar using the asadmin tool. + + asadmin deploy --user admin --host localhost --port 4848 \ + --force=true --name Simple --upload=true \ + build/lib/jejca-example.jar + +Running the "SimpleBean" Example: +--------------------------------- + +- Verify that the SJSAS server has been started. + +- Run the client: + + ant testex-jejcasimple -Dkey=foo -Ddata=bar + + This should produce: + + Buildfile: build.xml + + testex-jejcasimple: + [java] Created Simple + [java] Simple.get('foo') = bar + + BUILD SUCCESSFUL + Total time: 3 seconds + +If you don't see + + [java] Simple.get('foo') = bar + +printed (for example, you see Simple.get('foo') = null), there may be +a configuration problem. Check the server.log for details. + +Implementation Notes for Applications Using the RA +-------------------------------------------------- + +Please refer to the SimpleBean example in + + /examples/jca/simple/SimpleBean.java + +- Obtain a JEConnection using the + + JEConnectionFactory.getConnection() + + method and passing it an environment home directory and + EnvironmentConfig object. Once the JEConnection has been obtained, + you can obtain the Environment handle by calling + + JEConnection.getEnvironment(); + +- Database handle cache available + +Because bean business methods may be relatively short, the underlying +ManagedConnection object for JE provides a Database handle cache. +This speeds up the Database open operation since the handle +(generally) already exists in the cache. Normally, a program opens a +database using the Environment.openDatabase() method. In the EJB +environment, the program should call JEConnection.openDatabase() +instead. Database handles obtained using this method should not be +close()'d as the ManagedConnection code does that when the +ManagedConnection is closed. + +- Databases under XA must be transactional + +If you are using the XATransaction environment (as specified in the +ra.xml file), all JE Databases used in that environment must be +transactional. diff --git a/examples/jca/jboss/jboss.xml b/examples/jca/jboss/jboss.xml new file mode 100644 index 0000000..a2e83bb --- /dev/null +++ b/examples/jca/jboss/jboss.xml @@ -0,0 +1,16 @@ + + + + + SimpleBean + + ra/JEConnectionFactory + java:/LocalTransJE + + + + + diff --git a/examples/jca/jboss/je-localtx-ds.xml b/examples/jca/jboss/je-localtx-ds.xml new file mode 100644 index 0000000..384fb1b --- /dev/null +++ b/examples/jca/jboss/je-localtx-ds.xml @@ -0,0 +1,9 @@ + + + + LocalTransJE + + Berkeley DB Java Edition JCA Adapter + + diff --git a/examples/jca/jboss/je-no-tx-ds.xml b/examples/jca/jboss/je-no-tx-ds.xml new file mode 100644 index 0000000..924029d --- /dev/null +++ b/examples/jca/jboss/je-no-tx-ds.xml @@ -0,0 +1,8 @@ + + + + NoTransJE + Berkeley DB Java Edition JCA Adapter + + diff --git a/examples/jca/jboss/je-xa-ds.xml b/examples/jca/jboss/je-xa-ds.xml new file mode 100644 index 0000000..07a5f98 --- /dev/null +++ b/examples/jca/jboss/je-xa-ds.xml @@ -0,0 +1,9 @@ + + + + XATransJE + + Berkeley DB Java Edition JCA Adapter + + diff --git a/examples/jca/oc4j/orion-ejb-jar.xml b/examples/jca/oc4j/orion-ejb-jar.xml new file mode 100644 index 0000000..a1f54b2 --- /dev/null +++ b/examples/jca/oc4j/orion-ejb-jar.xml @@ -0,0 +1,2 @@ + + diff --git a/examples/jca/simple/Simple.java b/examples/jca/simple/Simple.java new file mode 100644 index 0000000..0522e4c --- /dev/null +++ b/examples/jca/simple/Simple.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package jca.simple; + +import java.rmi.RemoteException; +import javax.ejb.EJBObject; + +public interface Simple extends EJBObject { + + public void put(String key, String data) + throws RemoteException; + + public String get(String key) + throws RemoteException; + + public void removeDatabase() + throws RemoteException; +} diff --git a/examples/jca/simple/SimpleBean.java b/examples/jca/simple/SimpleBean.java new file mode 100644 index 0000000..946673e --- /dev/null +++ b/examples/jca/simple/SimpleBean.java @@ -0,0 +1,210 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package jca.simple; + +import javax.ejb.SessionBean; +import javax.ejb.SessionContext; +import javax.naming.Context; +import javax.naming.InitialContext; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.jca.ra.JEConnection; +import com.sleepycat.je.jca.ra.JEConnectionFactory; + +public class SimpleBean implements SessionBean { + + /* + * Set this to something appropriate for your environment. Make sure it + * matches the ra.xml. + */ + private final String JE_ENV = "/export/home/cwl/work-jca/je_store"; + private final boolean TRANSACTIONAL = true; + + private SessionContext sessionCtx; + + public void ejbCreate() { + } + + public void ejbRemove() { + } + + public void setSessionContext(SessionContext context) { + sessionCtx = context; + } + + public void unsetSessionContext() { + sessionCtx = null; + } + + public void ejbActivate() { + } + + public void ejbPassivate() { + } + + public void put(String key, String data) { + try { + @SuppressWarnings("unused") + Environment env = null; + @SuppressWarnings("unused") + Transaction txn = null; + Database db = null; + @SuppressWarnings("unused") + SecondaryDatabase secDb = null; + Cursor cursor = null; + JEConnection dc = null; + try { + dc = getConnection(JE_ENV); + + env = dc.getEnvironment(); + DatabaseConfig dbConfig = new DatabaseConfig(); + SecondaryConfig secDbConfig = new SecondaryConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(TRANSACTIONAL); + secDbConfig.setAllowCreate(true); + secDbConfig.setTransactional(TRANSACTIONAL); + secDbConfig.setKeyCreator(new MyKeyCreator()); + + /* + * Use JEConnection.openDatabase() to obtain a cached Database + * handle. Do not call close() on Database handles obtained + * using this method. + */ + db = dc.openDatabase("db", dbConfig); + secDb = dc.openSecondaryDatabase("secDb", db, secDbConfig); + cursor = db.openCursor(null, null); + cursor.put(new DatabaseEntry(key.getBytes("UTF-8")), + new DatabaseEntry(data.getBytes("UTF-8"))); + } finally { + if (cursor != null) { + cursor.close(); + } + if (dc != null) { + dc.close(); + } + } + } catch (Exception e) { + System.err.println("Failure in put" + e); + } + } + + public void removeDatabase() { + try { + JEConnection dc = null; + try { + dc = getConnection(JE_ENV); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(TRANSACTIONAL); + + /* + * Once you have removed a database from the environment, + * do not try to open it anymore. + */ + dc.removeDatabase("db"); + } finally { + if (dc != null) { + dc.close(); + } + } + } catch (Exception e) { + System.err.println("Failure in remove " + e); + e.printStackTrace(); + } + } + + public String get(String key) { + try { + @SuppressWarnings("unused") + Environment env = null; + @SuppressWarnings("unused") + Transaction txn = null; + Database db = null; + Cursor cursor = null; + JEConnection dc = null; + try { + dc = getConnection(JE_ENV); + + env = dc.getEnvironment(); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(TRANSACTIONAL); + + /* + * Use JEConnection.openDatabase() to obtain a cached Database + * handle. Do not call close() on Database handles obtained + * using this method. + */ + db = dc.openDatabase("db", dbConfig); + cursor = db.openCursor(null, null); + DatabaseEntry data = new DatabaseEntry(); + cursor.getSearchKey(new DatabaseEntry(key.getBytes("UTF-8")), + data, + null); + return new String(data.getData(), "UTF-8"); + } finally { + if (cursor != null) { + cursor.close(); + } + if (dc != null) { + dc.close(); + } + } + } catch (Exception e) { + System.err.println("Failure in get" + e); + e.printStackTrace(); + } + return null; + } + + private JEConnection getConnection(String envDir) { + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + InitialContext iniCtx = new InitialContext(); + Context enc = (Context) iniCtx.lookup("java:comp/env"); + Object ref = enc.lookup("ra/JEConnectionFactory"); + JEConnectionFactory dcf = (JEConnectionFactory) ref; + JEConnection dc = dcf.getConnection(envDir, envConfig); + return dc; + } catch(Exception e) { + System.err.println("Failure in getConnection " + e); + } + return null; + } + + private static class MyKeyCreator implements SecondaryKeyCreator { + + MyKeyCreator() { + } + + public boolean createSecondaryKey(SecondaryDatabase secondaryDb, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry, + DatabaseEntry resultEntry) { + return false; + } + } +} diff --git a/examples/jca/simple/SimpleClient.java b/examples/jca/simple/SimpleClient.java new file mode 100644 index 0000000..c39b342 --- /dev/null +++ b/examples/jca/simple/SimpleClient.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package jca.simple; + +import javax.naming.InitialContext; + +import java.util.Hashtable; + +public class SimpleClient { + + public static void main(String args[]) + throws Exception { + + final boolean OC4J = false; + + InitialContext iniCtx = null; + Hashtable env = new Hashtable(); + if (OC4J) { + env.put("java.naming.factory.initial", + "com.evermind.server.ApplicationClientInitialContextFactory"); + env.put("java.naming.provider.url","ormi://localhost:23791/Simple"); + env.put("java.naming.security.principal","oc4jadmin"); + env.put("java.naming.security.credentials","oc4jadmin"); + iniCtx = new InitialContext(env); + } else { + iniCtx = new InitialContext(); + } + + Object ref = iniCtx.lookup("SimpleBean"); + SimpleHome home = (SimpleHome) ref; + Simple simple = home.create(); + System.out.println("Created Simple"); + simple.put(args[0], args[1]); + System.out.println("Simple.get('" + args[0] + "') = " + + simple.get(args[0])); + } +} diff --git a/examples/jca/simple/SimpleHome.java b/examples/jca/simple/SimpleHome.java new file mode 100644 index 0000000..a60688f --- /dev/null +++ b/examples/jca/simple/SimpleHome.java @@ -0,0 +1,24 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package jca.simple; + +import java.rmi.RemoteException; +import javax.ejb.CreateException; +import javax.ejb.EJBHome; + +public interface SimpleHome extends EJBHome { + + public Simple create() + throws RemoteException, CreateException; +} diff --git a/examples/jca/simple/ejb-jar.xml b/examples/jca/simple/ejb-jar.xml new file mode 100644 index 0000000..e9950b2 --- /dev/null +++ b/examples/jca/simple/ejb-jar.xml @@ -0,0 +1,37 @@ + + + + + + + SimpleBean + SimpleBean + jca.simple.SimpleHome + jca.simple.Simple + jca.simple.SimpleBean + Stateless + Container + + ra/JEConnectionFactory + com.sleepycat.je.jca.ra.JEConnectionFactory + Container + Shareable + + + + + + diff --git a/examples/jca/sjsas8_1/sun-ejb-jar.xml b/examples/jca/sjsas8_1/sun-ejb-jar.xml new file mode 100644 index 0000000..bc791de --- /dev/null +++ b/examples/jca/sjsas8_1/sun-ejb-jar.xml @@ -0,0 +1,21 @@ + + +EBJAR +1 + + SimpleBean + SimpleBean + false + + ra/JEConnectionFactory + ra/JEConnectionFactory + + + false + + + + diff --git a/examples/je/BindingExample.java b/examples/je/BindingExample.java new file mode 100644 index 0000000..02af109 --- /dev/null +++ b/examples/je/BindingExample.java @@ -0,0 +1,242 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; +import java.io.Serializable; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; + +/** + * BindingExample operates in the same way as SimpleExample, but uses a + * IntegerBinding and a SerialBinding to map between Java objects and stored + * DatabaseEntry objects. + */ +class BindingExample { + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + + private int numRecords; // num records to insert or retrieve + private int offset; // where we want to start inserting + private boolean doInsert; // if true, insert, else retrieve + private File envDir; + + public BindingExample(int numRecords, + boolean doInsert, + File envDir, + int offset) { + this.numRecords = numRecords; + this.doInsert = doInsert; + this.envDir = envDir; + this.offset = offset; + } + + /** + * Usage string + */ + public static void usage() { + System.out.println("usage: java " + + "je.BindingExample " + + " " + + " [offset]"); + System.exit(EXIT_FAILURE); + } + + /** + * Main + */ + public static void main(String argv[]) { + + if (argv.length < 2) { + usage(); + return; + } + File envHomeDirectory = new File(argv[0]); + + boolean doInsertArg = false; + if (argv[1].equalsIgnoreCase("insert")) { + doInsertArg = true; + } else if (argv[1].equalsIgnoreCase("retrieve")) { + doInsertArg = false; + } else { + usage(); + } + + int startOffset = 0; + int numRecordsVal = 0; + + if (doInsertArg) { + + if (argv.length > 2) { + numRecordsVal = Integer.parseInt(argv[2]); + } else { + usage(); + return; + } + + if (argv.length > 3) { + startOffset = Integer.parseInt(argv[3]); + } + } + + try { + BindingExample app = new BindingExample(numRecordsVal, + doInsertArg, + envHomeDirectory, + startOffset); + app.run(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(EXIT_FAILURE); + } + System.exit(EXIT_SUCCESS); + } + + /** + * Insert or retrieve data + */ + public void run() throws DatabaseException { + /* Create a new, transactional database environment */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + Environment exampleEnv = new Environment(envDir, envConfig); + + /* Make a database within that environment */ + Transaction txn = exampleEnv.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database exampleDb = exampleEnv.openDatabase(txn, + "bindingsDb", + dbConfig); + + /* + * In our example, the database record is composed of an integer + * key and and instance of the MyData class as data. + * + * A class catalog database is needed for storing class descriptions + * for the serial binding used below. This avoids storing class + * descriptions redundantly in each record. + */ + DatabaseConfig catalogConfig = new DatabaseConfig(); + catalogConfig.setTransactional(true); + catalogConfig.setAllowCreate(true); + Database catalogDb = exampleEnv.openDatabase(txn, + "catalogDb", + catalogConfig); + StoredClassCatalog catalog = new StoredClassCatalog(catalogDb); + + /* + * Create a serial binding for MyData data objects. Serial bindings + * can be used to store any Serializable object. + */ + EntryBinding dataBinding = + new SerialBinding(catalog, MyData.class); + + txn.commit(); + + /* + * Further below we'll use a tuple binding (IntegerBinding + * specifically) for integer keys. Tuples, unlike serialized Java + * objects, have a well defined sort order. + */ + + /* DatabaseEntry represents the key and data of each record */ + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + if (doInsert) { + + /* put some data in */ + for (int i = offset; i < numRecords + offset; i++) { + + StringBuilder stars = new StringBuilder(); + for (int j = 0; j < i; j++) { + stars.append('*'); + } + MyData data = new MyData(i, stars.toString()); + + IntegerBinding.intToEntry(i, keyEntry); + dataBinding.objectToEntry(data, dataEntry); + + txn = exampleEnv.beginTransaction(null, null); + OperationStatus status = + exampleDb.put(txn, keyEntry, dataEntry); + + /* + * Note that put will throw a DatabaseException when + * error conditions are found such as deadlock. + * However, the status return conveys a variety of + * information. For example, the put might succeed, + * or it might not succeed if the record exists + * and duplicates were not + */ + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException("Data insertion got status " + + status); + } + txn.commit(); + } + } else { + + /* retrieve the data */ + Cursor cursor = exampleDb.openCursor(null, null); + + while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + int key = IntegerBinding.entryToInt(keyEntry); + MyData data = dataBinding.entryToObject(dataEntry); + + System.out.println("key=" + key + " data=" + data); + } + cursor.close(); + } + + catalogDb.close(); + exampleDb.close(); + exampleEnv.close(); + } + + @SuppressWarnings("serial") + private static class MyData implements Serializable { + + private int num; + private String msg; + + MyData(int number, String message) { + this.num = number; + this.msg = message; + } + + public String toString() { + return String.valueOf(num) + ' ' + msg; + } + } +} diff --git a/examples/je/MeasureInsertSize.java b/examples/je/MeasureInsertSize.java new file mode 100644 index 0000000..ce29ea6 --- /dev/null +++ b/examples/je/MeasureInsertSize.java @@ -0,0 +1,236 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; + +/** + * MeasureInsertSize inserts a given set of key/value pairs in order to measure + * the disk space consumed by a given data set. + * + * To see how much disk space is consumed, simply add up the size of the log + * files or for a rough estimate multiply the number of files by 10 MB. + * + * This program does sequential inserts. For random inserts, more disk space + * will be consumed, especially if the entire data set does not fit in the + * cache. + * + * This program does not insert into secondary databases, but can be used to + * measure the size of a secondary by specifying the key and data sizes of the + * secondary records. The data size for a secondary record should be specified + * as the size of the primary key. + * + * Checkpoints are performed by this program as usual, and checkpoints will + * added to the size of the log. This is realistic for a typical application + * but note that a smaller disk size can be obtained using a bulk load. + * + * For command line parameters see the usage() method. + */ +public class MeasureInsertSize { + + private File home; + private int records; + private int keySize; + private int dataSize = -1; + private int insertsPerTxn; + private boolean deferredWrite; + private Environment env; + private Database db; + + public static void main(String args[]) { + try { + MeasureInsertSize example = new MeasureInsertSize(args); + example.open(); + example.doInserts(); + example.close(); + System.exit(0); + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + } + + public MeasureInsertSize(String[] args) { + for (int i = 0; i < args.length; i += 1) { + String name = args[i]; + String val = null; + if (i < args.length - 1 && !args[i + 1].startsWith("-")) { + i += 1; + val = args[i]; + } + if (name.equals("-h")) { + if (val == null) { + usage("No value after -h"); + } + home = new File(val); + } else if (name.equals("-records")) { + if (val == null) { + usage("No value after -records"); + } + try { + records = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (records <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-key")) { + if (val == null) { + usage("No value after -key"); + } + try { + keySize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (keySize < 4) { + usage(val + " is not four or greater"); + } + } else if (name.equals("-data")) { + if (val == null) { + usage("No value after -data"); + } + try { + dataSize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (dataSize < 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-txn")) { + if (val == null) { + usage("No value after -txn"); + } + try { + insertsPerTxn = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + } else if (name.equals("-deferredwrite")) { + deferredWrite = true; + } else { + usage("Unknown arg: " + name); + } + } + + if (home == null) { + usage("-h not specified"); + } + + if (records == 0) { + usage("-records not specified"); + } + + if (keySize == -1) { + usage("-key not specified"); + } + + if (dataSize == -1) { + usage("-data not specified"); + } + } + + private void usage(String msg) { + + if (msg != null) { + System.out.println(msg); + } + + System.out.println + ("usage:" + + "\njava " + MeasureInsertSize.class.getName() + + "\n -h " + + "\n # Environment home directory; required" + + "\n -records " + + "\n # Total records (key/data pairs); required" + + "\n -key " + + "\n # Average key bytes per record; required" + + "\n -data " + + "\n # Average data bytes per record; required" + + "\n [-txn ]" + + "\n # Inserts per txn; default: 0 (non-transactional)" + + "\n [-deferredwrite]" + + "\n # Use a Deferred Write database"); + + System.exit(2); + } + + private boolean isTransactional() { + return insertsPerTxn > 0; + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(isTransactional()); + env = new Environment(home, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(isTransactional()); + dbConfig.setDeferredWrite(deferredWrite); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void close() + throws DatabaseException { + + db.close(); + env.close(); + } + + public void doInserts() + throws DatabaseException { + + DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + DatabaseEntry key = new DatabaseEntry(); + byte[] keyBuffer = new byte[keySize]; + byte[] keyPadding = new byte[keySize - 4]; + + Transaction txn = null; + + for (int i = 1; i <= records; i += 1) { + + TupleOutput keyOutput = new TupleOutput(keyBuffer); + keyOutput.writeInt(i); + keyOutput.writeFast(keyPadding); + TupleBinding.outputToEntry(keyOutput, key); + + if (isTransactional() && txn == null) { + txn = env.beginTransaction(null, null); + } + + db.put(txn, key, data); + + if (txn != null && i % insertsPerTxn == 0) { + txn.commit(); + txn = null; + } + } + } +} diff --git a/examples/je/SecondaryExample.java b/examples/je/SecondaryExample.java new file mode 100644 index 0000000..3428507 --- /dev/null +++ b/examples/je/SecondaryExample.java @@ -0,0 +1,330 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; +import java.io.Serializable; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; + +/** + * SecondaryExample operates in the same way as BindingExample, but adds a + * SecondaryDatabase for accessing the primary database by a secondary key. + */ +class SecondaryExample { + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + + private final int numRecords; // num records to insert or retrieve + private final int offset; // where we want to start inserting + private final boolean doInsert; // if true, insert, else retrieve + private final File envDir; + + public SecondaryExample(int numRecords, + boolean doInsert, + File envDir, + int offset) { + this.numRecords = numRecords; + this.doInsert = doInsert; + this.envDir = envDir; + this.offset = offset; + } + + /** + * Usage string + */ + public static void usage() { + System.out.println("usage: java " + + "je.SecondaryExample " + + " " + + " [offset]"); + System.exit(EXIT_FAILURE); + } + + public static void main(String argv[]) { + + if (argv.length < 2) { + usage(); + return; + } + File envHomeDirectory = new File(argv[0]); + + boolean doInsertArg = false; + if (argv[1].equalsIgnoreCase("insert")) { + doInsertArg = true; + } else if (argv[1].equalsIgnoreCase("retrieve")) { + doInsertArg = false; + } else { + usage(); + } + + int startOffset = 0; + int numRecordsVal = 0; + + if (doInsertArg) { + + if (argv.length > 2) { + numRecordsVal = Integer.parseInt(argv[2]); + } else { + usage(); + return; + } + + if (argv.length > 3) { + startOffset = Integer.parseInt(argv[3]); + } + } + + try { + SecondaryExample app = new SecondaryExample(numRecordsVal, + doInsertArg, + envHomeDirectory, + startOffset); + app.run(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(EXIT_FAILURE); + } + System.exit(EXIT_SUCCESS); + } + + /** + * Insert or retrieve data. + */ + public void run() throws DatabaseException { + + /* Create a new, transactional database environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + Environment exampleEnv = new Environment(envDir, envConfig); + + /* + * Make a database within that environment. Because this will be used + * as a primary database, it must not allow duplicates. The primary key + * of a primary database must be unique. + */ + Transaction txn = exampleEnv.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = + exampleEnv.openDatabase(txn, "bindingsDb", dbConfig); + + /* + * In our example, the database record is composed of an integer key + * and and instance of the MyData class as data. + * + * A class catalog database is needed for storing class descriptions + * for the serial binding used below. This avoids storing class + * descriptions redundantly in each record. + */ + DatabaseConfig catalogConfig = new DatabaseConfig(); + catalogConfig.setTransactional(true); + catalogConfig.setAllowCreate(true); + Database catalogDb = + exampleEnv.openDatabase(txn, "catalogDb", catalogConfig); + StoredClassCatalog catalog = new StoredClassCatalog(catalogDb); + + /* + * Create a serial binding for MyData data objects. Serial + * bindings can be used to store any Serializable object. + */ + EntryBinding dataBinding = + new SerialBinding(catalog, MyData.class); + + /* + * Further below we'll use a tuple binding (IntegerBinding + * specifically) for integer keys. Tuples, unlike serialized + * Java objects, have a well defined sort order. + */ + + /* + * Define a String tuple binding for a secondary key. The + * secondary key is the msg field of the MyData object. + */ + EntryBinding secKeyBinding = + TupleBinding.getPrimitiveBinding(String.class); + + /* + * Open a secondary database to allow accessing the primary + * database by the secondary key value. + */ + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new MyKeyCreator(secKeyBinding, dataBinding)); + SecondaryDatabase exampleSecDb = + exampleEnv.openSecondaryDatabase(txn, "bindingsSecDb", + exampleDb, secConfig); + txn.commit(); + + /* DatabaseEntry represents the key and data of each record. */ + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + if (doInsert) { + + /* + * Put some data in. Note that the primary database is always used + * to add data. Adding or changing data in the secondary database + * is not allowed; however, deleting through the secondary database + * is allowed. + */ + for (int i = offset; i < numRecords + offset; i++) { + txn = exampleEnv.beginTransaction(null, null); + StringBuilder stars = new StringBuilder(); + for (int j = 0; j < i; j++) { + stars.append('*'); + } + MyData data = new MyData(i, stars.toString()); + + IntegerBinding.intToEntry(i, keyEntry); + dataBinding.objectToEntry(data, dataEntry); + + OperationStatus status = + exampleDb.put(txn, keyEntry, dataEntry); + + /* + * Note that put will throw a DatabaseException when error + * conditions are found such as deadlock. However, the status + * return conveys a variety of information. For example, the + * put might succeed, or it might not succeed if the record + * exists and duplicates were not + */ + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException("Data insertion got status " + + status); + } + txn.commit(); + } + } else { + + /* + * Retrieve the data by secondary key by opening a cursor on the + * secondary database. The key parameter for a secondary cursor is + * always the secondary key, but the data parameter is always the + * data of the primary database. You can cast the cursor to a + * SecondaryCursor and use additional method signatures for + * retrieving the primary key also. Or you can call + * openSecondaryCursor() to avoid casting. + */ + txn = exampleEnv.beginTransaction(null, null); + Cursor cursor = exampleSecDb.openCursor(txn, null); + + while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + String key = secKeyBinding.entryToObject(keyEntry); + MyData data = dataBinding.entryToObject(dataEntry); + + System.out.println("key=" + key + " data=" + data); + } + cursor.close(); + txn.commit(); + } + + /* + * Always close secondary databases before closing their associated + * primary database. + */ + catalogDb.close(); + exampleSecDb.close(); + exampleDb.close(); + exampleEnv.close(); + } + + @SuppressWarnings("serial") + private static class MyData implements Serializable { + + private final int num; + private final String msg; + + MyData(int number, String message) { + this.num = number; + this.msg = message; + } + + String getMessage() { + return msg; + } + + @Override + public String toString() { + return String.valueOf(num) + ' ' + msg; + } + } + + /** + * A key creator that knows how to extract the secondary key from the data + * entry of the primary database. To do so, it uses both the dataBinding + * of the primary database and the secKeyBinding. + */ + private static class MyKeyCreator implements SecondaryKeyCreator { + + private final EntryBinding secKeyBinding; + private final EntryBinding dataBinding; + + MyKeyCreator(EntryBinding secKeyBinding, + EntryBinding dataBinding) { + this.secKeyBinding = secKeyBinding; + this.dataBinding = dataBinding; + } + + public boolean createSecondaryKey(SecondaryDatabase secondaryDb, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry, + DatabaseEntry resultEntry) { + + /* + * Convert the data entry to a MyData object, extract the secondary + * key value from it, and then convert it to the resulting + * secondary key entry. + */ + MyData data = dataBinding.entryToObject(dataEntry); + String key = data.getMessage(); + if (key != null) { + secKeyBinding.objectToEntry(key, resultEntry); + return true; + } else { + + /* + * The message property of MyData is optional, so if it is null + * then return false to prevent it from being indexed. Note + * that if a required key is missing or an error occurs, an + * exception should be thrown by this method. + */ + return false; + } + } + } +} diff --git a/examples/je/SequenceExample.java b/examples/je/SequenceExample.java new file mode 100644 index 0000000..86c0f7b --- /dev/null +++ b/examples/je/SequenceExample.java @@ -0,0 +1,93 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Sequence; +import com.sleepycat.je.SequenceConfig; + +public class SequenceExample { + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + private static final String DB_NAME = "sequence.db"; + private static final String KEY_NAME = "my_sequence"; + + public SequenceExample() { + } + + public static void usage() { + System.out.println("usage: java " + + "je.SequenceExample " + + ""); + System.exit(EXIT_FAILURE); + } + + public static void main(String[] argv) { + + if (argv.length != 1) { + usage(); + return; + } + File envHomeDirectory = new File(argv[0]); + + try { + SequenceExample app = new SequenceExample(); + app.run(envHomeDirectory); + } catch (Exception e) { + e.printStackTrace(); + System.exit(EXIT_FAILURE); + } + System.exit(EXIT_SUCCESS); + } + + public void run(File envHomeDirectory) + throws DatabaseException, IOException { + + /* Create the environment object. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHomeDirectory, envConfig); + + /* Create the database object. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + + /* Create the sequence oject. */ + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + DatabaseEntry key = + new DatabaseEntry(KEY_NAME.getBytes("UTF-8")); + Sequence seq = db.openSequence(null, key, config); + + /* Allocate a few sequence numbers. */ + for (int i = 0; i < 10; i++) { + long seqnum = seq.get(null, 1); + System.out.println("Got sequence number: " + seqnum); + } + + /* Close all. */ + seq.close(); + db.close(); + env.close(); + } +} diff --git a/examples/je/SimpleExample.java b/examples/je/SimpleExample.java new file mode 100644 index 0000000..f8feaf5 --- /dev/null +++ b/examples/je/SimpleExample.java @@ -0,0 +1,207 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; + +/** + * SimpleExample creates a database environment, a database, and a database + * cursor, inserts and retrieves data. + */ +class SimpleExample { + private static final int EXIT_SUCCESS = 0; + private static final int EXIT_FAILURE = 1; + + private int numRecords; // num records to insert or retrieve + private int offset; // where we want to start inserting + private boolean doInsert; // if true, insert, else retrieve + private File envDir; + + public SimpleExample(int numRecords, + boolean doInsert, + File envDir, + int offset) { + this.numRecords = numRecords; + this.doInsert = doInsert; + this.envDir = envDir; + this.offset = offset; + } + + /** + * Usage string + */ + public static void usage() { + System.out.println("usage: java " + + "je.SimpleExample " + + " " + + " [offset]"); + System.exit(EXIT_FAILURE); + } + + /** + * Main + */ + public static void main(String argv[]) { + + if (argv.length < 2) { + usage(); + return; + } + File envHomeDirectory = new File(argv[0]); + + boolean doInsertArg = false; + if (argv[1].equalsIgnoreCase("insert")) { + doInsertArg = true; + } else if (argv[1].equalsIgnoreCase("retrieve")) { + doInsertArg = false; + } else { + usage(); + } + + int startOffset = 0; + int numRecordsVal = 0; + + if (doInsertArg) { + + if (argv.length > 2) { + numRecordsVal = Integer.parseInt(argv[2]); + } else { + usage(); + return; + } + + if (argv.length > 3) { + startOffset = Integer.parseInt(argv[3]); + } + } + + try { + SimpleExample app = new SimpleExample(numRecordsVal, + doInsertArg, + envHomeDirectory, + startOffset); + app.run(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(EXIT_FAILURE); + } + System.exit(EXIT_SUCCESS); + } + + /** + * Insert or retrieve data + */ + public void run() throws DatabaseException { + /* Create a new, transactional database environment */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + Environment exampleEnv = new Environment(envDir, envConfig); + + /* + * Make a database within that environment + * + * Notice that we use an explicit transaction to + * perform this database open, and that we + * immediately commit the transaction once the + * database is opened. This is required if we + * want transactional support for the database. + * However, we could have used autocommit to + * perform the same thing by simply passing a + * null txn handle to openDatabase(). + */ + Transaction txn = exampleEnv.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database exampleDb = exampleEnv.openDatabase(txn, + "simpleDb", + dbConfig); + txn.commit(); + + /* + * Insert or retrieve data. In our example, database records are + * integer pairs. + */ + + /* DatabaseEntry represents the key and data of each record */ + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + if (doInsert) { + + /* put some data in */ + for (int i = offset; i < numRecords + offset; i++) { + /* + * Note that autocommit mode, described in the Getting + * Started Guide, is an alternative to explicitly + * creating the transaction object. + */ + txn = exampleEnv.beginTransaction(null, null); + + /* Use a binding to convert the int into a DatabaseEntry. */ + + IntegerBinding.intToEntry(i, keyEntry); + IntegerBinding.intToEntry(i+1, dataEntry); + OperationStatus status = + exampleDb.put(txn, keyEntry, dataEntry); + + /* + * Note that put will throw a DatabaseException when + * error conditions are found such as deadlock. + * However, the status return conveys a variety of + * information. For example, the put might succeed, + * or it might not succeed if the record alread exists + * and the database was not configured for duplicate + * records. + */ + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException("Data insertion got status " + + status); + } + txn.commit(); + } + } else { + /* retrieve the data */ + Cursor cursor = exampleDb.openCursor(null, null); + + while (cursor.getNext(keyEntry, dataEntry, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + System.out.println("key=" + + IntegerBinding.entryToInt(keyEntry) + + " data=" + + IntegerBinding.entryToInt(dataEntry)); + + } + cursor.close(); + } + + exampleDb.close(); + exampleEnv.close(); + + } +} diff --git a/examples/je/ToManyExample.java b/examples/je/ToManyExample.java new file mode 100644 index 0000000..2ee7ea6 --- /dev/null +++ b/examples/je/ToManyExample.java @@ -0,0 +1,475 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je; + +import java.io.File; +import java.io.Serializable; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.ForeignMultiKeyNullifier; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.je.Transaction; + +/** + * An example of using many-to-many and one-to-many secondary indices. + */ +public class ToManyExample { + + private final Environment env; + private Database catalogDb; + private Database animalDb; + private Database personDb; + private SecondaryDatabase personByEmail; + private SecondaryDatabase personByAnimal; + private EntryBinding keyBinding; + private EntryBinding personBinding; + private EntryBinding animalBinding; + + /** + * Runs the example program, given a single "-h HOME" argument. + */ + public static void main(String[] args) { + + if (args.length != 2 || !"-h".equals(args[0])) { + System.out.println("Usage: java " + + ToManyExample.class.getName() + + " -h ENV_HOME"); + System.exit(1); + } + String homeDir = args[1]; + + try { + ToManyExample example = new ToManyExample(homeDir); + example.exec(); + example.close(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + /** + * Opens the environment and all databases. + */ + private ToManyExample(String homeDir) throws DatabaseException { + + /* Open the environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(new File(homeDir), envConfig); + + /* Open/create all databases in a transaction. */ + Transaction txn = env.beginTransaction(null, null); + try { + /* A standard (no duplicates) database config. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* The catalog is used for the serial binding. */ + catalogDb = env.openDatabase(txn, "catalog", dbConfig); + StoredClassCatalog catalog = new StoredClassCatalog(catalogDb); + personBinding = new SerialBinding(catalog, null); + animalBinding = new SerialBinding(catalog, null); + keyBinding = new StringBinding(); + + /* Open the person and animal primary DBs. */ + animalDb = env.openDatabase(txn, "animal", dbConfig); + personDb = env.openDatabase(txn, "person", dbConfig); + + /* + * A standard secondary config; duplicates, key creators and key + * nullifiers are specified below. + */ + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(true); + secConfig.setTransactional(true); + + /* + * Open the secondary database for personByEmail. This is a + * one-to-many index because duplicates are not configured. + */ + secConfig.setSortedDuplicates(false); + secConfig.setMultiKeyCreator(new EmailKeyCreator()); + personByEmail = env.openSecondaryDatabase(txn, "personByEmail", + personDb, secConfig); + + /* + * Open the secondary database for personByAnimal. This is a + * many-to-many index because duplicates are configured. Foreign + * key constraints are specified to ensure that all animal keys + * exist in the animal database. + */ + secConfig.setSortedDuplicates(true); + secConfig.setMultiKeyCreator(new AnimalKeyCreator()); + secConfig.setForeignMultiKeyNullifier(new AnimalKeyNullifier()); + secConfig.setForeignKeyDatabase(animalDb); + secConfig.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY); + personByAnimal = env.openSecondaryDatabase(txn, "personByAnimal", + personDb, secConfig); + + txn.commit(); + } catch (DatabaseException e) { + txn.abort(); + throw e; + } catch (RuntimeException e) { + txn.abort(); + throw e; + } + } + + /** + * Closes all databases and the environment. + */ + private void close() throws DatabaseException { + + if (personByEmail != null) { + personByEmail.close(); + } + if (personByAnimal != null) { + personByAnimal.close(); + } + if (catalogDb != null) { + catalogDb.close(); + } + if (personDb != null) { + personDb.close(); + } + if (animalDb != null) { + animalDb.close(); + } + if (env != null) { + env.close(); + } + } + + /** + * Adds, updates, prints and deletes Person records with many-to-many and + * one-to-many secondary indices. + */ + private void exec() + throws DatabaseException { + + System.out.println + ("\nInsert some animals."); + Animal dogs = insertAndPrintAnimal("dogs", true); + Animal fish = insertAndPrintAnimal("fish", false); + Animal horses = insertAndPrintAnimal("horses", true); + Animal donkeys = insertAndPrintAnimal("donkeys", true); + + System.out.println + ("\nInsert a new empty person."); + Person kathy = new Person(); + kathy.name = "Kathy"; + putPerson(kathy); + printPerson("Kathy"); + + System.out.println + ("\nAdd favorites/addresses and update the record."); + kathy.favoriteAnimals.add(horses.name); + kathy.favoriteAnimals.add(dogs.name); + kathy.favoriteAnimals.add(fish.name); + kathy.emailAddresses.add("kathy@kathy.com"); + kathy.emailAddresses.add("kathy@yahoo.com"); + putPerson(kathy); + printPerson("Kathy"); + + System.out.println + ("\nChange favorites and addresses and update the person record."); + kathy.favoriteAnimals.remove(fish.name); + kathy.favoriteAnimals.add(donkeys.name); + kathy.emailAddresses.add("kathy@gmail.com"); + kathy.emailAddresses.remove("kathy@yahoo.com"); + putPerson(kathy); + printPerson("Kathy"); + + System.out.println + ("\nInsert another person with some of the same favorites."); + Person mark = new Person(); + mark.favoriteAnimals.add(dogs.name); + mark.favoriteAnimals.add(horses.name); + mark.name = "Mark"; + putPerson(mark); + printPerson("Mark"); + + System.out.println + ("\nPrint by favorite animal index."); + printByIndex(personByAnimal); + + System.out.println + ("\nPrint by email address index."); + printByIndex(personByEmail); + + System.out.println + ("\nDelete 'dogs' and print again by favorite animal index."); + deleteAnimal(dogs.name); + printPerson("Kathy"); + printPerson("Mark"); + printByIndex(personByAnimal); + + System.out.println + ("\nDelete both records and print again (should print nothing)."); + deletePerson("Kathy"); + deletePerson("Mark"); + printPerson("Kathy"); + printPerson("Mark"); + printByIndex(personByAnimal); + printByIndex(personByEmail); + } + + /** + * Inserts an animal record and prints it. Uses auto-commit. + */ + private Animal insertAndPrintAnimal(String name, boolean furry) + throws DatabaseException { + + Animal animal = new Animal(); + animal.name = name; + animal.furry = furry; + + DatabaseEntry key = new DatabaseEntry(); + keyBinding.objectToEntry(name, key); + + DatabaseEntry data = new DatabaseEntry(); + animalBinding.objectToEntry(animal, data); + + OperationStatus status = animalDb.putNoOverwrite(null, key, data); + if (status == OperationStatus.SUCCESS) { + System.out.println(animal); + } else { + System.out.println("Animal was not inserted: " + name + + " (" + status + ')'); + } + + return animal; + } + + /** + * Deletes an animal. Uses auto-commit. + */ + private boolean deleteAnimal(String name) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + keyBinding.objectToEntry(name, key); + + OperationStatus status = animalDb.delete(null, key); + return status == OperationStatus.SUCCESS; + } + + /** + * Gets a person by name and prints it. + */ + private void printPerson(String name) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + keyBinding.objectToEntry(name, key); + + DatabaseEntry data = new DatabaseEntry(); + + OperationStatus status = personDb.get(null, key, data, null); + if (status == OperationStatus.SUCCESS) { + Person person = personBinding.entryToObject(data); + person.name = keyBinding.entryToObject(key); + System.out.println(person); + } else { + System.out.println("Person not found: " + name); + } + } + + /** + * Prints all person records by a given secondary index. + */ + private void printByIndex(SecondaryDatabase secDb) + throws DatabaseException { + + DatabaseEntry secKey = new DatabaseEntry(); + DatabaseEntry priKey = new DatabaseEntry(); + DatabaseEntry priData = new DatabaseEntry(); + + SecondaryCursor cursor = secDb.openSecondaryCursor(null, null); + try { + while (cursor.getNext(secKey, priKey, priData, null) == + OperationStatus.SUCCESS) { + Person person = personBinding.entryToObject(priData); + person.name = keyBinding.entryToObject(priKey); + System.out.println("Index key [" + + keyBinding.entryToObject(secKey) + + "] maps to primary key [" + + person.name + ']'); + } + } finally { + cursor.close(); + } + } + + /** + * Inserts or updates a person. Uses auto-commit. + */ + private void putPerson(Person person) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + keyBinding.objectToEntry(person.name, key); + + DatabaseEntry data = new DatabaseEntry(); + personBinding.objectToEntry(person, data); + + personDb.put(null, key, data); + } + + /** + * Deletes a person. Uses auto-commit. + */ + private boolean deletePerson(String name) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + keyBinding.objectToEntry(name, key); + + OperationStatus status = personDb.delete(null, key); + return status == OperationStatus.SUCCESS; + } + + /** + * A person object. + */ + @SuppressWarnings("serial") + private static class Person implements Serializable { + + /** The primary key. */ + private transient String name; + + /** A many-to-many set of keys. */ + private final Set favoriteAnimals = new HashSet(); + + /** A one-to-many set of keys. */ + private final Set emailAddresses = new HashSet(); + + @Override + public String toString() { + return "Person {" + + "\n Name: " + name + + "\n FavoriteAnimals: " + favoriteAnimals + + "\n EmailAddresses: " + emailAddresses + + "\n}"; + } + } + + /** + * An animal object. + */ + @SuppressWarnings("serial") + private static class Animal implements Serializable { + + /** The primary key. */ + private transient String name; + + /** A non-indexed property. */ + private boolean furry; + + @Override + public String toString() { + return "Animal {" + + "\n Name: " + name + + "\n Furry: " + furry + + "\n}"; + } + } + + /** + * Returns the set of email addresses for a person. This is an example + * of a multi-key creator for a to-many index. + */ + private class EmailKeyCreator implements SecondaryMultiKeyCreator { + + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry primaryKey, + DatabaseEntry primaryData, + Set results) { + Person person = personBinding.entryToObject(primaryData); + copyKeysToEntries(person.emailAddresses, results); + } + } + + /** + * Returns the set of favorite animals for a person. This is an example + * of a multi-key creator for a to-many index. + */ + private class AnimalKeyCreator implements SecondaryMultiKeyCreator { + + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry primaryKey, + DatabaseEntry primaryData, + Set results) { + Person person = personBinding.entryToObject(primaryData); + copyKeysToEntries(person.favoriteAnimals, results); + } + } + + /** + * A utility method to copy a set of keys (Strings) into a set of + * DatabaseEntry objects. + */ + private void copyKeysToEntries(Set keys, + Set entries) { + + for (Iterator i = keys.iterator(); i.hasNext();) { + DatabaseEntry entry = new DatabaseEntry(); + keyBinding.objectToEntry(i.next(), entry); + entries.add(entry); + } + } + + /** + * Removes a given key from the set of favorite animals for a person. This + * is an example of a nullifier for a to-many index. The nullifier is + * called when an animal record is deleted because we configured this + * secondary with ForeignKeyDeleteAction.NULLIFY. + */ + private class AnimalKeyNullifier implements ForeignMultiKeyNullifier { + + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry primaryKey, + DatabaseEntry primaryData, + DatabaseEntry secKey) { + Person person = personBinding.entryToObject(primaryData); + String key = keyBinding.entryToObject(secKey); + if (person.favoriteAnimals.remove(key)) { + personBinding.objectToEntry(person, primaryData); + return true; + } else { + return false; + } + } + } +} diff --git a/examples/je/gettingStarted/ExampleDatabasePut.java b/examples/je/gettingStarted/ExampleDatabasePut.java new file mode 100644 index 0000000..b40fa59 --- /dev/null +++ b/examples/je/gettingStarted/ExampleDatabasePut.java @@ -0,0 +1,237 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file: ExampleDatabasePut.java + +package je.gettingStarted; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; + +public class ExampleDatabasePut { + + private static File myDbEnvPath = new File("/tmp/JEDB"); + private static File inventoryFile = new File("./inventory.txt"); + private static File vendorsFile = new File("./vendors.txt"); + + // DatabaseEntries used for loading records + private static DatabaseEntry theKey = new DatabaseEntry(); + private static DatabaseEntry theData = new DatabaseEntry(); + + // Encapsulates the environment and databases. + private static MyDbEnv myDbEnv = new MyDbEnv(); + + private static void usage() { + System.out.println("ExampleDatabasePut [-h ]"); + System.out.println(" [-s ] [-v ]"); + System.exit(-1); + } + + public static void main(String args[]) { + ExampleDatabasePut edp = new ExampleDatabasePut(); + try { + edp.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleDatabasePut: " + dbe.toString()); + dbe.printStackTrace(); + dbe.printStackTrace(); + } catch (Exception e) { + System.out.println("Exception: " + e.toString()); + e.printStackTrace(); + } finally { + myDbEnv.close(); + } + System.out.println("All done."); + } + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbEnv.setup(myDbEnvPath, // path to the environment home + false); // is this environment read-only? + + System.out.println("loading vendors db...."); + loadVendorsDb(); + + System.out.println("loading inventory db...."); + loadInventoryDb(); + } + + private void loadVendorsDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + List vendors = loadFile(vendorsFile, 8); + + // Now load the data into the database. The vendor's name is the + // key, and the data is a Vendor class object. + + // Need a serial binding for the data + EntryBinding dataBinding = + new SerialBinding(myDbEnv.getClassCatalog(), Vendor.class); + + for (int i = 0; i < vendors.size(); i++) { + String[] sArray = vendors.get(i); + Vendor theVendor = new Vendor(); + theVendor.setVendorName(sArray[0]); + theVendor.setAddress(sArray[1]); + theVendor.setCity(sArray[2]); + theVendor.setState(sArray[3]); + theVendor.setZipcode(sArray[4]); + theVendor.setBusinessPhoneNumber(sArray[5]); + theVendor.setRepName(sArray[6]); + theVendor.setRepPhoneNumber(sArray[7]); + + // The key is the vendor's name. + // ASSUMES THE VENDOR'S NAME IS UNIQUE! + String vendorName = theVendor.getVendorName(); + try { + theKey = new DatabaseEntry(vendorName.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + + // Convert the Vendor object to a DatabaseEntry object + // using our SerialBinding + dataBinding.objectToEntry(theVendor, theData); + + // Put it in the database. These puts are transactionally protected + // (we're using autocommit). + myDbEnv.getVendorDB().put(null, theKey, theData); + } + } + + private void loadInventoryDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + List inventoryArray = loadFile(inventoryFile, 6); + + // Now load the data into the database. The item's sku is the + // key, and the data is an Inventory class object. + + // Need a tuple binding for the Inventory class. + TupleBinding inventoryBinding = new InventoryBinding(); + + // Start a transaction. All inventory items get loaded using a + // single transaction. + Transaction txn = myDbEnv.getEnv().beginTransaction(null, null); + + for (int i = 0; i < inventoryArray.size(); i++) { + String[] sArray = inventoryArray.get(i); + String sku = sArray[1]; + try { + theKey = new DatabaseEntry(sku.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + + Inventory theInventory = new Inventory(); + theInventory.setItemName(sArray[0]); + theInventory.setSku(sArray[1]); + theInventory.setVendorPrice((new Float(sArray[2])).floatValue()); + theInventory.setVendorInventory((new Integer(sArray[3])).intValue()); + theInventory.setCategory(sArray[4]); + theInventory.setVendor(sArray[5]); + + // Place the Vendor object on the DatabaseEntry object using our + // the tuple binding we implemented in InventoryBinding.java + inventoryBinding.objectToEntry(theInventory, theData); + + // Put it in the database. Note that this causes our secondary database + // to be automatically updated for us. + try { + myDbEnv.getInventoryDB().put(txn, theKey, theData); + } catch (DatabaseException dbe) { + try { + System.out.println("Error putting entry " + + sku.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + txn.abort(); + throw dbe; + } + } + // Commit the transaction. The data is now safely written to the + // inventory database. + txn.commit(); + } + + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + myDbEnvPath = new File(args[++i]); + break; + case 'i': + inventoryFile = new File(args[++i]); + break; + case 'v': + vendorsFile = new File(args[++i]); + break; + default: + usage(); + } + } + } + } + + private List loadFile(File theFile, int numFields) { + List records = new ArrayList(); + try { + String theLine = null; + FileInputStream fis = new FileInputStream(theFile); + BufferedReader br = new BufferedReader(new InputStreamReader(fis)); + while((theLine=br.readLine()) != null) { + String[] theLineArray = theLine.split("#"); + if (theLineArray.length != numFields) { + System.out.println("Malformed line found in " + theFile.getPath()); + System.out.println("Line was: '" + theLine); + System.out.println("length found was: " + theLineArray.length); + System.exit(-1); + } + records.add(theLineArray); + } + // Close the input stream handle + fis.close(); + } catch (FileNotFoundException e) { + System.err.println(theFile.getPath() + " does not exist."); + e.printStackTrace(); + usage(); + } catch (IOException e) { + System.err.println("IO Exception: " + e.toString()); + e.printStackTrace(); + System.exit(-1); + } + return records; + } + + protected ExampleDatabasePut() {} +} diff --git a/examples/je/gettingStarted/ExampleInventoryRead.java b/examples/je/gettingStarted/ExampleInventoryRead.java new file mode 100644 index 0000000..8f28e43 --- /dev/null +++ b/examples/je/gettingStarted/ExampleInventoryRead.java @@ -0,0 +1,213 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file ExampleInventoryRead + +package je.gettingStarted; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryCursor; + +public class ExampleInventoryRead { + + private static File myDbEnvPath = + new File("/tmp/JEDB"); + + // Encapsulates the database environment and databases. + private static MyDbEnv myDbEnv = new MyDbEnv(); + + private static TupleBinding inventoryBinding; + private static EntryBinding vendorBinding; + + // The item to locate if the -s switch is used + private static String locateItem; + + private static void usage() { + System.out.println("ExampleInventoryRead [-h ]" + + "[-s ]"); + System.exit(-1); + } + + public static void main(String args[]) { + ExampleInventoryRead eir = new ExampleInventoryRead(); + try { + eir.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleInventoryRead: " + dbe.toString()); + dbe.printStackTrace(); + } finally { + myDbEnv.close(); + } + System.out.println("All done."); + } + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbEnv.setup(myDbEnvPath, // path to the environment home + true); // is this environment read-only? + + // Setup our bindings. + inventoryBinding = new InventoryBinding(); + vendorBinding = + new SerialBinding(myDbEnv.getClassCatalog(), + Vendor.class); + + if (locateItem != null) { + showItem(); + } else { + showAllInventory(); + } + } + + private void showItem() throws DatabaseException { + + SecondaryCursor secCursor = null; + try { + // searchKey is the key that we want to find in the + // secondary db. + DatabaseEntry searchKey = + new DatabaseEntry(locateItem.getBytes("UTF-8")); + + // foundKey and foundData are populated from the primary + // entry that is associated with the secondary db key. + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + // open a secondary cursor + secCursor = + myDbEnv.getNameIndexDB().openSecondaryCursor(null, null); + + // Search for the secondary database entry. + OperationStatus retVal = + secCursor.getSearchKey(searchKey, foundKey, + foundData, LockMode.DEFAULT); + + // Display the entry, if one is found. Repeat until no more + // secondary duplicate entries are found + while(retVal == OperationStatus.SUCCESS) { + Inventory theInventory = + (Inventory)inventoryBinding.entryToObject(foundData); + displayInventoryRecord(foundKey, theInventory); + retVal = secCursor.getNextDup(searchKey, foundKey, + foundData, LockMode.DEFAULT); + } + } catch (Exception e) { + System.err.println("Error on inventory secondary cursor:"); + System.err.println(e.toString()); + e.printStackTrace(); + } finally { + if (secCursor != null) { + secCursor.close(); + } + } + } + + private void showAllInventory() + throws DatabaseException { + // Get a cursor + Cursor cursor = myDbEnv.getInventoryDB().openCursor(null, null); + + // DatabaseEntry objects used for reading records + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + try { // always want to make sure the cursor gets closed + while (cursor.getNext(foundKey, foundData, + LockMode.DEFAULT) == OperationStatus.SUCCESS) { + Inventory theInventory = + (Inventory)inventoryBinding.entryToObject(foundData); + displayInventoryRecord(foundKey, theInventory); + } + } catch (Exception e) { + System.err.println("Error on inventory cursor:"); + System.err.println(e.toString()); + e.printStackTrace(); + } finally { + cursor.close(); + } + } + + private void displayInventoryRecord(DatabaseEntry theKey, + Inventory theInventory) + throws DatabaseException { + + DatabaseEntry searchKey = null; + try { + String theSKU = new String(theKey.getData(), "UTF-8"); + System.out.println(theSKU + ":"); + System.out.println("\t " + theInventory.getItemName()); + System.out.println("\t " + theInventory.getCategory()); + System.out.println("\t " + theInventory.getVendor()); + System.out.println("\t\tNumber in stock: " + + theInventory.getVendorInventory()); + System.out.println("\t\tPrice per unit: " + + theInventory.getVendorPrice()); + System.out.println("\t\tContact: "); + + searchKey = + new DatabaseEntry(theInventory.getVendor().getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + DatabaseEntry foundVendor = new DatabaseEntry(); + + if (myDbEnv.getVendorDB().get(null, searchKey, foundVendor, + LockMode.DEFAULT) != OperationStatus.SUCCESS) { + System.out.println("Could not find vendor: " + + theInventory.getVendor() + "."); + System.exit(-1); + } else { + Vendor theVendor = + (Vendor)vendorBinding.entryToObject(foundVendor); + System.out.println("\t\t " + theVendor.getAddress()); + System.out.println("\t\t " + theVendor.getCity() + ", " + + theVendor.getState() + " " + theVendor.getZipcode()); + System.out.println("\t\t Business Phone: " + + theVendor.getBusinessPhoneNumber()); + System.out.println("\t\t Sales Rep: " + + theVendor.getRepName()); + System.out.println("\t\t " + + theVendor.getRepPhoneNumber()); + } + } + + protected ExampleInventoryRead() {} + + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + myDbEnvPath = new File(args[++i]); + break; + case 's': + locateItem = new String(args[++i]); + break; + default: + usage(); + } + } + } + } +} diff --git a/examples/je/gettingStarted/Inventory.java b/examples/je/gettingStarted/Inventory.java new file mode 100644 index 0000000..4d5ef62 --- /dev/null +++ b/examples/je/gettingStarted/Inventory.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file Inventory.java + +package je.gettingStarted; + +public class Inventory { + + private String sku; + private String itemName; + private String category; + private String vendor; + private int vendorInventory; + private float vendorPrice; + + public void setSku(String data) { + sku = data; + } + + public void setItemName(String data) { + itemName = data; + } + + public void setCategory(String data) { + category = data; + } + + public void setVendorInventory(int data) { + vendorInventory = data; + } + + public void setVendor(String data) { + vendor = data; + } + + public void setVendorPrice(float data) { + vendorPrice = data; + } + + public String getSku() { + return sku; + } + + public String getItemName() { + return itemName; + } + + public String getCategory() { + return category; + } + + public int getVendorInventory() { + return vendorInventory; + } + + public String getVendor() { + return vendor; + } + + public float getVendorPrice() { + return vendorPrice; + } +} diff --git a/examples/je/gettingStarted/InventoryBinding.java b/examples/je/gettingStarted/InventoryBinding.java new file mode 100644 index 0000000..66e0222 --- /dev/null +++ b/examples/je/gettingStarted/InventoryBinding.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file InventoryBinding.java + +package je.gettingStarted; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +public class InventoryBinding extends TupleBinding { + + // Implement this abstract method. Used to convert + // a DatabaseEntry to an Inventory object. + public Object entryToObject(TupleInput ti) { + + String sku = ti.readString(); + String itemName = ti.readString(); + String category = ti.readString(); + String vendor = ti.readString(); + int vendorInventory = ti.readInt(); + float vendorPrice = ti.readFloat(); + + Inventory inventory = new Inventory(); + inventory.setSku(sku); + inventory.setItemName(itemName); + inventory.setCategory(category); + inventory.setVendor(vendor); + inventory.setVendorInventory(vendorInventory); + inventory.setVendorPrice(vendorPrice); + + return inventory; + } + + // Implement this abstract method. Used to convert a + // Inventory object to a DatabaseEntry object. + public void objectToEntry(Object object, TupleOutput to) { + + Inventory inventory = (Inventory)object; + + to.writeString(inventory.getSku()); + to.writeString(inventory.getItemName()); + to.writeString(inventory.getCategory()); + to.writeString(inventory.getVendor()); + to.writeInt(inventory.getVendorInventory()); + to.writeFloat(inventory.getVendorPrice()); + } +} diff --git a/examples/je/gettingStarted/ItemNameKeyCreator.java b/examples/je/gettingStarted/ItemNameKeyCreator.java new file mode 100644 index 0000000..95fec59 --- /dev/null +++ b/examples/je/gettingStarted/ItemNameKeyCreator.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file ItemNameKeyCreator.java + +package je.gettingStarted; + +import java.io.IOException; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; + +public class ItemNameKeyCreator implements SecondaryKeyCreator { + + private final TupleBinding theBinding; + + // Use the constructor to set the tuple binding + ItemNameKeyCreator(TupleBinding binding) { + theBinding = binding; + } + + // Abstract method that we must implement + public boolean createSecondaryKey(SecondaryDatabase secDb, + DatabaseEntry keyEntry, // From the primary + DatabaseEntry dataEntry, // From the primary + DatabaseEntry resultEntry) { // set the key data on this. + if (dataEntry != null) { + // Convert dataEntry to an Inventory object + Inventory inventoryItem = + (Inventory)theBinding.entryToObject(dataEntry); + // Get the item name and use that as the key + String theItem = inventoryItem.getItemName(); + try { + resultEntry.setData(theItem.getBytes("UTF-8")); + } catch (IOException willNeverOccur) {} + } + return true; + } +} diff --git a/examples/je/gettingStarted/MyDbEnv.java b/examples/je/gettingStarted/MyDbEnv.java new file mode 100644 index 0000000..010cbc5 --- /dev/null +++ b/examples/je/gettingStarted/MyDbEnv.java @@ -0,0 +1,167 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file MyDbEnv.java + +package je.gettingStarted; + +import java.io.File; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; + +public class MyDbEnv { + + private Environment myEnv; + + // The databases that our application uses + private Database vendorDb; + private Database inventoryDb; + private Database classCatalogDb; + private SecondaryDatabase itemNameIndexDb; + + // Needed for object serialization + private StoredClassCatalog classCatalog; + + // Our constructor does nothing + public MyDbEnv() {} + + // The setup() method opens all our databases and the environment + // for us. + public void setup(File envHome, boolean readOnly) + throws DatabaseException { + + EnvironmentConfig myEnvConfig = new EnvironmentConfig(); + DatabaseConfig myDbConfig = new DatabaseConfig(); + SecondaryConfig mySecConfig = new SecondaryConfig(); + + // If the environment is read-only, then + // make the databases read-only too. + myEnvConfig.setReadOnly(readOnly); + myDbConfig.setReadOnly(readOnly); + mySecConfig.setReadOnly(readOnly); + + // If the environment is opened for write, then we want to be + // able to create the environment and databases if + // they do not exist. + myEnvConfig.setAllowCreate(!readOnly); + myDbConfig.setAllowCreate(!readOnly); + mySecConfig.setAllowCreate(!readOnly); + + // Allow transactions if we are writing to the database + myEnvConfig.setTransactional(!readOnly); + myDbConfig.setTransactional(!readOnly); + mySecConfig.setTransactional(!readOnly); + + // Open the environment + myEnv = new Environment(envHome, myEnvConfig); + + // Now open, or create and open, our databases + // Open the vendors and inventory databases + vendorDb = myEnv.openDatabase(null, + "VendorDB", + myDbConfig); + + inventoryDb = myEnv.openDatabase(null, + "InventoryDB", + myDbConfig); + + // Open the class catalog db. This is used to + // optimize class serialization. + classCatalogDb = + myEnv.openDatabase(null, + "ClassCatalogDB", + myDbConfig); + + // Create our class catalog + classCatalog = new StoredClassCatalog(classCatalogDb); + + // Need a tuple binding for the Inventory class. + // We use the InventoryBinding class + // that we implemented for this purpose. + TupleBinding inventoryBinding = new InventoryBinding(); + + // Open the secondary database. We use this to create a + // secondary index for the inventory database + + // We want to maintain an index for the inventory entries based + // on the item name. So, instantiate the appropriate key creator + // and open a secondary database. + ItemNameKeyCreator keyCreator = + new ItemNameKeyCreator(inventoryBinding); + + // Set up additional secondary properties + // Need to allow duplicates for our secondary database + mySecConfig.setSortedDuplicates(true); + mySecConfig.setAllowPopulate(true); // Allow autopopulate + mySecConfig.setKeyCreator(keyCreator); + + // Now open it + itemNameIndexDb = + myEnv.openSecondaryDatabase( + null, + "itemNameIndex", // index name + inventoryDb, // the primary db that we're indexing + mySecConfig); // the secondary config + } + + // getter methods + + // Needed for things like beginning transactions + public Environment getEnv() { + return myEnv; + } + + public Database getVendorDB() { + return vendorDb; + } + + public Database getInventoryDB() { + return inventoryDb; + } + + public SecondaryDatabase getNameIndexDB() { + return itemNameIndexDb; + } + + public StoredClassCatalog getClassCatalog() { + return classCatalog; + } + + //Close the environment + public void close() { + if (myEnv != null) { + try { + //Close the secondary before closing the primaries + itemNameIndexDb.close(); + vendorDb.close(); + inventoryDb.close(); + classCatalogDb.close(); + + // Finally, close the environment. + myEnv.close(); + } catch(DatabaseException dbe) { + System.err.println("Error closing MyDbEnv: " + + dbe.toString()); + System.exit(-1); + } + } + } +} diff --git a/examples/je/gettingStarted/Vendor.java b/examples/je/gettingStarted/Vendor.java new file mode 100644 index 0000000..aa7b076 --- /dev/null +++ b/examples/je/gettingStarted/Vendor.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// file Vendor.java +package je.gettingStarted; + +import java.io.Serializable; + +public class Vendor implements Serializable { + + private String repName; + private String address; + private String city; + private String state; + private String zipcode; + private String bizPhoneNumber; + private String repPhoneNumber; + private String vendor; + + public void setRepName(String data) { + repName = data; + } + + public void setAddress(String data) { + address = data; + } + + public void setCity(String data) { + city = data; + } + + public void setState(String data) { + state = data; + } + + public void setZipcode(String data) { + zipcode = data; + } + + public void setBusinessPhoneNumber(String data) { + bizPhoneNumber = data; + } + + public void setRepPhoneNumber(String data) { + repPhoneNumber = data; + } + + public void setVendorName(String data) { + vendor = data; + } + + public String getRepName() { + return repName; + } + + public String getAddress() { + return address; + } + + public String getCity() { + return city; + } + + public String getState() { + return state; + } + + public String getZipcode() { + return zipcode; + } + + public String getBusinessPhoneNumber() { + return bizPhoneNumber; + } + + public String getRepPhoneNumber() { + return repPhoneNumber; + } + + public String getVendorName() { + return vendor; + } + +} diff --git a/examples/je/gettingStarted/inventory.txt b/examples/je/gettingStarted/inventory.txt new file mode 100644 index 0000000..385c980 --- /dev/null +++ b/examples/je/gettingStarted/inventory.txt @@ -0,0 +1,800 @@ +Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce +Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh +Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine +Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce +Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh +Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine +Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce +Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh +Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine +Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce +Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh +Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine +Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce +Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh +Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine +Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce +Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh +Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine +Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce +Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh +Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine +Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce +Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh +Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine +Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce +Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh +Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine +Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce +Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh +Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine +Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce +Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh +Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine +Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce +Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh +Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine +Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce +Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh +Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine +Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce +Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh +Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine +California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce +California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh +California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine +Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce +Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh +Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine +Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce +Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh +Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine +Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce +Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh +Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine +Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce +Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh +Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine +Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce +Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh +Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine +Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce +Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh +Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine +Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce +Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh +Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine +Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce +Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh +Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine +Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce +Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh +Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine +East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce +East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh +East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine +English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce +English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh +English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine +False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce +False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh +False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine +Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce +Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh +Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine +Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce +Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh +Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine +Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce +Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh +Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine +Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce +Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh +Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine +Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce +Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh +Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine +Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce +Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh +Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine +Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce +Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh +Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine +Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce +Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh +Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine +Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce +Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh +Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine +Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce +Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh +Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine +Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce +Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh +Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine +Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce +Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh +Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine +King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce +King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh +King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine +Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce +Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh +Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine +Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce +Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh +Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine +Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce +Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh +Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine +Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce +Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh +Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine +Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce +Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh +Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine +Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce +Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh +Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine +Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce +Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh +Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine +Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce +Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh +Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine +Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce +Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh +Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine +Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce +Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh +Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine +Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce +Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh +Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine +Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce +Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh +Longan#LongfruisdI812#0.99#993#fruits#Off the Vine +Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce +Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh +Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine +Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce +Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh +Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine +Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce +Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh +Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine +Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce +Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh +Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine +Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce +Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh +Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine +Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce +Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh +Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine +Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce +Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh +Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine +Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce +Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh +Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine +Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce +Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh +Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine +Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce +Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh +Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine +Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce +Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh +Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine +Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce +Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh +Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine +Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce +Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh +Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine +Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce +Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh +Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine +Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce +Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh +Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine +Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce +Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh +Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine +Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce +Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh +Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine +Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce +Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh +Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine +Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce +Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh +Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine +Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce +Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh +Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine +Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce +Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh +Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine +New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce +New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh +New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine +Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce +Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh +Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine +Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce +Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh +Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine +Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce +Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh +Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine +Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce +Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh +Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine +Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce +Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh +Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine +Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce +Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh +Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine +Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce +Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh +Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine +Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce +Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh +Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine +Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce +Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh +Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine +Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce +Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh +Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine +Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce +Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh +Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine +Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce +Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh +Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine +Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce +Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh +Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine +Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce +Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh +Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine +Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce +Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh +Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine +Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce +Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh +Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine +Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce +Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh +Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine +Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce +Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh +Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine +Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce +Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh +Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine +White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce +White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh +White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine +Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce +Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh +Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine +Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce +Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh +Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine +Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce +Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh +Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine +Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce +Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh +Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine +Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce +Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh +Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine +Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce +Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh +Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine +Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry +Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce +Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine +Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry +Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce +Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine +Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry +Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce +Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine +Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry +Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce +Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine +Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry +Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce +Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine +Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry +Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce +Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine +Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry +Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce +Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine +Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry +Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce +Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine +Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry +Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce +Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine +Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry +Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce +Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine +Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry +Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce +Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine +Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry +Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce +Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine +Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry +Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce +Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine +Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry +Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce +Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine +Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry +Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce +Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine +Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry +Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce +Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine +Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry +Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce +Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine +Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry +Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce +Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine +Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry +Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce +Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine +Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry +Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce +Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine +Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry +Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce +Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine +Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry +Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce +Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine +Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry +Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce +Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine +Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry +Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce +Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine +Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry +Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce +Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine +Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry +Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce +Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine +Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry +Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce +Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine +Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry +Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce +Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine +Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry +Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce +Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine +English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry +English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce +English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine +Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry +Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce +Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine +Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry +Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce +Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine +Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry +Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce +Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine +Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry +Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce +Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine +Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry +Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce +Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine +Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry +Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce +Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine +Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry +Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce +Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine +Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry +Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce +Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine +Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry +Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce +Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine +Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry +Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce +Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine +Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry +Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce +Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine +Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry +Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce +Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine +Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry +Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce +Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine +Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry +Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce +Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine +Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry +Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce +Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine +Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry +Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce +Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine +Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry +Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce +Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine +Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry +Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce +Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine +Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry +Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce +Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine +Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry +Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce +Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine +Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry +Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce +Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine +Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry +Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce +Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine +Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry +Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce +Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine +Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry +Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce +Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine +Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry +Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce +Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine +Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry +Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce +Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine +Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry +Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce +Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine +Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry +Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce +Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine +Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry +Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce +Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine +Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry +Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce +Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine +Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry +Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce +Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine +Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry +Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce +Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine +Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry +Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce +Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine +Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry +Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce +Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine +Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry +Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce +Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine +Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry +Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce +Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine +Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry +Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce +Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine +Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry +Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce +Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine +Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry +Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce +Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine +Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry +Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce +Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine +Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry +Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce +Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine +Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry +Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce +Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine +Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry +Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce +Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine +Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry +Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce +Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine +Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry +Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce +Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine +Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry +Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce +Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine +Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry +Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce +Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine +Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry +Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce +Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine +Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry +Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce +Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine +Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry +Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce +Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine +Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry +Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce +Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine +Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry +Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce +Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine +Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry +Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce +Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine +Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry +Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce +Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine +Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry +Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce +Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine +Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry +Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce +Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine +Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry +Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce +Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine +Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry +Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce +Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine +Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry +Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce +Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine +Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry +Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce +Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine +Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry +Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce +Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine +Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry +Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce +Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine +Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry +Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce +Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine +Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry +Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce +Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine +Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry +Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce +Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine +Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry +Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce +Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine +Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry +Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce +Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine +Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry +Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce +Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine +Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry +Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce +Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine +Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry +Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce +Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine +Apple Fritters#AppldessDj96hw#6.12#0#desserts#Mom's Kitchen +Apple Fritters#AppldessrN1kvM#6.06#0#desserts#The Baking Pan +Banana Split#Banadess7tpjkJ#10.86#0#desserts#Mom's Kitchen +Banana Split#Banadessfif758#11.07#0#desserts#The Baking Pan +Blueberry Boy Bait#BluedesseX2LVU#3.72#0#desserts#Mom's Kitchen +Blueberry Boy Bait#Bluedess9zLhaH#3.93#0#desserts#The Baking Pan +Candied Cranberries#CanddessjW92p3#1.77#0#desserts#Mom's Kitchen +Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan +Daiquiri Souffle#DaiqdessebnYcy#9.54#0#desserts#Mom's Kitchen +Daiquiri Souffle#DaiqdessfM1DnX#9.72#0#desserts#The Baking Pan +Bananas Flambe#BanadesscczumD#6.94#0#desserts#Mom's Kitchen +Bananas Flambe#Banadess8qNfxd#7.07#0#desserts#The Baking Pan +Pie, Apple#Pie,desshcSHhT#7.88#0#desserts#Mom's Kitchen +Pie, Apple#Pie,dessTbiwDp#7.88#0#desserts#The Baking Pan +Pie, Pumpkin#Pie,desswhPBPB#6.00#0#desserts#Mom's Kitchen +Pie, Pumpkin#Pie,dessDg3NWl#6.24#0#desserts#The Baking Pan +Pie, Blueberry#Pie,dessw9VdgD#2.14#0#desserts#Mom's Kitchen +Pie, Blueberry#Pie,dessiSjZKD#2.12#0#desserts#The Baking Pan +Pie, Pecan#Pie,dess2NqhNR#12.70#0#desserts#Mom's Kitchen +Pie, Pecan#Pie,dessB1LfcE#12.33#0#desserts#The Baking Pan +Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#0#desserts#Mom's Kitchen +Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#0#desserts#The Baking Pan +Pie, Banana Cream#Pie,dessH80DuG#7.35#0#desserts#Mom's Kitchen +Pie, Banana Cream#Pie,dessf1YvFb#7.08#0#desserts#The Baking Pan +Pie, Key Lime#Pie,desshtli5N#4.85#0#desserts#Mom's Kitchen +Pie, Key Lime#Pie,dessMwQkKm#5.13#0#desserts#The Baking Pan +Pie, Lemon Meringue#Pie,dess9naVkX#3.74#0#desserts#Mom's Kitchen +Pie, Lemon Meringue#Pie,dessKYcNML#3.67#0#desserts#The Baking Pan +Pie, Caramel#Pie,dessSUuiIU#2.27#0#desserts#Mom's Kitchen +Pie, Caramel#Pie,dessvo8uHh#2.33#0#desserts#The Baking Pan +Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen +Pie, Raspberry#Pie,dessJflbf5#2.36#0#desserts#The Baking Pan +Ice Cream, Chocolate#Ice desseXuyxx#1.44#0#desserts#Mom's Kitchen +Ice Cream, Chocolate#Ice dessASBohf#1.41#0#desserts#The Baking Pan +Ice Cream, Vanilla#Ice dessYnzbbt#11.92#0#desserts#Mom's Kitchen +Ice Cream, Vanilla#Ice dessUBBKp8#11.58#0#desserts#The Baking Pan +Ice Cream, Strawberry#Ice dessfTwKhD#1.90#0#desserts#Mom's Kitchen +Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#0#desserts#The Baking Pan +Ice Cream, Rocky Road#Ice dessyIri3P#13.10#0#desserts#Mom's Kitchen +Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#0#desserts#The Baking Pan +Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#0#desserts#Mom's Kitchen +Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#0#desserts#The Baking Pan +Ice Cream Sundae#Ice dessbhlAXt#5.62#0#desserts#Mom's Kitchen +Ice Cream Sundae#Ice dessByapxl#5.72#0#desserts#The Baking Pan +Cobbler, Peach#CobbdessYUGeOB#10.14#0#desserts#Mom's Kitchen +Cobbler, Peach#CobbdessXfEtUK#10.43#0#desserts#The Baking Pan +Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#0#desserts#Mom's Kitchen +Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#0#desserts#The Baking Pan +Cobbler, Blueberry#CobbdessbiI0oF#3.78#0#desserts#Mom's Kitchen +Cobbler, Blueberry#CobbdessMXxbBN#3.57#0#desserts#The Baking Pan +Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen +Cobbler, Cherry#CobbdessA1dADa#12.10#0#desserts#The Baking Pan +Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#0#desserts#Mom's Kitchen +Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan +Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen +Cobbler, Rhubarb#CobbdessPfnCT0#9.27#0#desserts#The Baking Pan +Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen +Cobbler, Strawberry#CobbdessH3LdgQ#12.20#0#desserts#The Baking Pan +Cobbler, Zucchini#Cobbdess5rK4dP#11.24#0#desserts#Mom's Kitchen +Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#0#desserts#The Baking Pan +Brownies#BrowdessmogdTl#7.62#0#desserts#Mom's Kitchen +Brownies#Browdess84Qc1z#7.55#0#desserts#The Baking Pan +Fudge Bar#Fudgdess8iXSyf#11.72#0#desserts#Mom's Kitchen +Fudge Bar#FudgdessakU1Id#12.29#0#desserts#The Baking Pan +Cookies, Oatmeal#Cookdessnq9Oya#2.84#0#desserts#Mom's Kitchen +Cookies, Oatmeal#CookdessBhgp7p#2.68#0#desserts#The Baking Pan +Cookies, Chocolate Chip#CookdessRVszsZ#12.73#0#desserts#Mom's Kitchen +Cookies, Chocolate Chip#CookdessSOoHmT#12.26#0#desserts#The Baking Pan +Cookies, Peanut Butter#Cookdess2UcMI2#7.82#0#desserts#Mom's Kitchen +Cookies, Peanut Butter#Cookdess1cILme#7.46#0#desserts#The Baking Pan +Mousse, Chocolate#MousdessDpN4sQ#6.25#0#desserts#Mom's Kitchen +Mousse, Chocolate#Mousdess8FyFT8#5.96#0#desserts#The Baking Pan +Mousse, Blueberry Maple#MousdessacwrkO#7.28#0#desserts#Mom's Kitchen +Mousse, Blueberry Maple#MousdessbiCMFg#7.21#0#desserts#The Baking Pan +Mousse, Chocolate Banana#MousdessIeW4qz#5.13#0#desserts#Mom's Kitchen +Mousse, Chocolate Banana#Mousdess1De9oL#5.08#0#desserts#The Baking Pan +Mousse, Cherry#Mousdesss1bF8H#13.05#0#desserts#Mom's Kitchen +Mousse, Cherry#Mousdess0ujevx#12.43#0#desserts#The Baking Pan +Mousse, Eggnog#MousdessZ38hXj#9.07#0#desserts#Mom's Kitchen +Mousse, Eggnog#Mousdesshs05ST#8.81#0#desserts#The Baking Pan +Mousse, Strawberry#MousdessHCDlBK#5.58#0#desserts#Mom's Kitchen +Mousse, Strawberry#MousdessSZ4PyW#5.36#0#desserts#The Baking Pan +Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#0#desserts#Mom's Kitchen +Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#0#desserts#The Baking Pan +Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#0#desserts#Mom's Kitchen +Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan +Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#0#desserts#Mom's Kitchen +Sherbet, Orange Crush#SherdessxmVJBF#4.16#0#desserts#The Baking Pan +Sherbet, Blueberry#SherdessFAgxqp#3.46#0#desserts#Mom's Kitchen +Sherbet, Blueberry#SherdessMPL87u#3.60#0#desserts#The Baking Pan +Sherbet, Raspberry#Sherdesse86ugA#6.08#0#desserts#Mom's Kitchen +Sherbet, Raspberry#Sherdesslc1etR#5.85#0#desserts#The Baking Pan +Sherbet, Strawberry#SherdessFwv09m#4.63#0#desserts#Mom's Kitchen +Sherbet, Strawberry#SherdessKB0H7q#4.81#0#desserts#The Baking Pan +Tart, Apple#TartdessrsTyXA#3.35#0#desserts#Mom's Kitchen +Tart, Apple#Tartdessp7pyiy#3.13#0#desserts#The Baking Pan +Tart, Almond#TartdessC7FARL#6.62#0#desserts#Mom's Kitchen +Tart, Almond#Tartdess1V1A1c#6.68#0#desserts#The Baking Pan +Tart, Blueberry#TartdesssQZRXX#10.28#0#desserts#Mom's Kitchen +Tart, Blueberry#TartdessUSJSuc#10.28#0#desserts#The Baking Pan +Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#0#desserts#Mom's Kitchen +Tart, Chocolate-Pear#TartdessL3aEDd#5.51#0#desserts#The Baking Pan +Tart, Lemon Fudge#Tartdess9DhZUT#3.88#0#desserts#Mom's Kitchen +Tart, Lemon Fudge#TartdesshzLOWt#3.96#0#desserts#The Baking Pan +Tart, Pecan#TartdessvSbXzd#11.80#0#desserts#Mom's Kitchen +Tart, Pecan#Tartdess6YXJec#11.04#0#desserts#The Baking Pan +Tart, Pineapple#TartdesseMfJFe#9.01#0#desserts#Mom's Kitchen +Tart, Pineapple#TartdessA2Wftr#8.44#0#desserts#The Baking Pan +Tart, Pear#Tartdess4a1BUc#10.09#0#desserts#Mom's Kitchen +Tart, Pear#TartdessNw8YPG#10.68#0#desserts#The Baking Pan +Tart, Raspberry#TartdessAVnpP6#6.18#0#desserts#Mom's Kitchen +Tart, Raspberry#TartdessfVxZFf#5.95#0#desserts#The Baking Pan +Tart, Strawberry#Tartdess4IUcZW#4.75#0#desserts#Mom's Kitchen +Tart, Strawberry#Tartdess2BeEDb#4.61#0#desserts#The Baking Pan +Tart, Raspberry#TartdesshyBd24#1.85#0#desserts#Mom's Kitchen +Tart, Raspberry#Tartdess5fqxgy#1.94#0#desserts#The Baking Pan +Trifle, Berry#TrifdessmEkbU2#12.48#0#desserts#Mom's Kitchen +Trifle, Berry#TrifdessAV9Ix8#12.60#0#desserts#The Baking Pan +Trifle, American#TrifdesscsdSCd#4.70#0#desserts#Mom's Kitchen +Trifle, American#TrifdessTArskm#4.35#0#desserts#The Baking Pan +Trifle, English#TrifdessX87q8T#8.20#0#desserts#Mom's Kitchen +Trifle, English#Trifdess52l955#8.12#0#desserts#The Baking Pan +Trifle, Orange#TrifdesslUwxwe#9.74#0#desserts#Mom's Kitchen +Trifle, Orange#TrifdessFrfCHP#10.22#0#desserts#The Baking Pan +Trifle, Pumpkin#TrifdessJKFN96#4.72#0#desserts#Mom's Kitchen +Trifle, Pumpkin#TrifdessMNw4EV#4.95#0#desserts#The Baking Pan +Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen +Trifle, Scottish#TrifdessAAUQCN#14.03#0#desserts#The Baking Pan +Trifle, Sherry#TrifdesscuttJg#4.42#0#desserts#Mom's Kitchen +Trifle, Sherry#TrifdesspRGpfP#4.21#0#desserts#The Baking Pan +Trifle, Strawberry#TrifdessAd5TpV#3.58#0#desserts#Mom's Kitchen +Trifle, Strawberry#Trifdess1rtW0A#3.58#0#desserts#The Baking Pan +Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#0#desserts#Mom's Kitchen +Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#0#desserts#The Baking Pan +Cheesecake, Amaretto#CheedessOJBqfD#11.89#0#desserts#Mom's Kitchen +Cheesecake, Amaretto#CheedessVnDf14#11.89#0#desserts#The Baking Pan +Cheesecake, Apple#Cheedessuks1YK#11.22#0#desserts#Mom's Kitchen +Cheesecake, Apple#CheedessMYKaKK#11.01#0#desserts#The Baking Pan +Cheesecake, Apricot#CheedessKUxTYY#12.34#0#desserts#Mom's Kitchen +Cheesecake, Apricot#CheedessMvB1pr#11.88#0#desserts#The Baking Pan +Cheesecake, Australian#CheedessQ9WAIn#2.70#0#desserts#Mom's Kitchen +Cheesecake, Australian#CheedessE6Jyjc#2.53#0#desserts#The Baking Pan +Cheesecake, Arkansas#CheedessTbqzmw#6.98#0#desserts#Mom's Kitchen +Cheesecake, Arkansas#CheedesstWJZfC#6.66#0#desserts#The Baking Pan +Cheesecake, Blueberry#Cheedessyo51KL#8.07#0#desserts#Mom's Kitchen +Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#0#desserts#The Baking Pan +Cheesecake, Cherry#CheedessEahRkC#4.40#0#desserts#Mom's Kitchen +Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#0#desserts#The Baking Pan +Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#0#desserts#Mom's Kitchen +Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#0#desserts#The Baking Pan +Cheesecake, German Chocolate#CheedesswayvJL#12.03#0#desserts#Mom's Kitchen +Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan +Cheesecake, Turtle#CheedessLqgeIA#12.19#0#desserts#Mom's Kitchen +Cheesecake, Turtle#CheedessvyNohA#12.07#0#desserts#The Baking Pan +Brownies, Apple#BrowdessIDW1Cc#5.44#0#desserts#Mom's Kitchen +Brownies, Apple#BrowdessyRMrAH#5.14#0#desserts#The Baking Pan +Brownies, Fudge#BrowdessmIHIFJ#5.19#0#desserts#Mom's Kitchen +Brownies, Fudge#BrowdessqewJ38#5.10#0#desserts#The Baking Pan +Brownies, Almond Macaroon#BrowdessniK7QI#10.57#0#desserts#Mom's Kitchen +Brownies, Almond Macaroon#BrowdessgkXURH#10.36#0#desserts#The Baking Pan +Brownies, Butterscotch#BrowdesslpA06E#7.16#0#desserts#Mom's Kitchen +Brownies, Butterscotch#BrowdessK5hofE#7.30#0#desserts#The Baking Pan +Brownies, Caramel#BrowdessVGfoA8#3.07#0#desserts#Mom's Kitchen +Brownies, Caramel#Browdess5jvVMM#3.13#0#desserts#The Baking Pan +Brownies, Cherry#Browdessyoa66A#3.39#0#desserts#Mom's Kitchen +Brownies, Cherry#BrowdessIg2JuF#3.39#0#desserts#The Baking Pan +Brownies, Chocolate Chip#Browdessb9dc59#6.18#0#desserts#Mom's Kitchen +Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#0#desserts#The Baking Pan +Brownies, Coconut#BrowdessWPHrVR#3.06#0#desserts#Mom's Kitchen +Brownies, Coconut#BrowdessGVBlML#2.86#0#desserts#The Baking Pan +Brownies, Cream Cheese#Browdess1OyRay#12.74#0#desserts#Mom's Kitchen +Brownies, Cream Cheese#Browdess2fRsNv#12.61#0#desserts#The Baking Pan +Brownies, Fudge Mint#Browdessl7DP7k#11.45#0#desserts#Mom's Kitchen +Brownies, Fudge Mint#Browdessv70VKQ#11.34#0#desserts#The Baking Pan +Brownies, Mint Chip#BrowdessDDMvF7#1.81#0#desserts#Mom's Kitchen +Brownies, Mint Chip#Browdess0j9PBD#1.84#0#desserts#The Baking Pan +Cake, Angel Food#CakedessEaqGaE#11.18#0#desserts#Mom's Kitchen +Cake, Angel Food#CakedessJyAyFe#11.18#0#desserts#The Baking Pan +Cake, Chocolate#CakedessKLXFbn#10.11#0#desserts#Mom's Kitchen +Cake, Chocolate#CakedessfNP5Hg#9.91#0#desserts#The Baking Pan +Cake, Carrot#CakedessUTgMoV#4.20#0#desserts#Mom's Kitchen +Cake, Carrot#CakedessQdkaYg#4.00#0#desserts#The Baking Pan +Cake, Lemon Blueberry#CakedessepkeEW#11.73#0#desserts#Mom's Kitchen +Cake, Lemon Blueberry#CakedessHTKyQs#12.42#0#desserts#The Baking Pan +Cake Triple Fudge#CakedessiZ75lR#7.92#0#desserts#Mom's Kitchen +Cake Triple Fudge#CakedessWRrSXP#8.00#0#desserts#The Baking Pan +Cake, Walnut#CakedessveYVXZ#10.83#0#desserts#Mom's Kitchen +Cake, Walnut#Cakedesse22rT5#11.04#0#desserts#The Baking Pan +Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen +Cake, French Apple#CakedessNBHCk0#1.86#0#desserts#The Baking Pan +Cake, Fig#CakedessOncX4y#6.82#0#desserts#Mom's Kitchen +Cake, Fig#CakedessTJtffn#7.08#0#desserts#The Baking Pan +Cake, Maple#CakedessnoGPRF#3.04#0#desserts#Mom's Kitchen +Cake, Maple#CakedessfVattM#3.22#0#desserts#The Baking Pan +Cake, Devil's Food#CakedessiXcDCt#4.73#0#desserts#Mom's Kitchen +Cake, Devil's Food#CakedessnBZk45#4.82#0#desserts#The Baking Pan +Cake, Double-Lemon#CakedesskeS0Vd#3.46#0#desserts#Mom's Kitchen +Cake, Double-Lemon#Cakedess50vx53#3.60#0#desserts#The Baking Pan +Sorbet, Blackberry#SorbdessQoa0CE#9.88#0#desserts#Mom's Kitchen +Sorbet, Blackberry#SorbdessqoOYzv#9.78#0#desserts#The Baking Pan diff --git a/examples/je/gettingStarted/vendors.txt b/examples/je/gettingStarted/vendors.txt new file mode 100644 index 0000000..528e1b1 --- /dev/null +++ b/examples/je/gettingStarted/vendors.txt @@ -0,0 +1,6 @@ +TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765 +Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952 +Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54 +The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391 +Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12 +The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879 diff --git a/examples/je/rep/quote/Command.java b/examples/je/rep/quote/Command.java new file mode 100644 index 0000000..0c25ac1 --- /dev/null +++ b/examples/je/rep/quote/Command.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.util.StringTokenizer; + +/** + * An enumeration of the commands used by the stock quotes example. + */ +enum Command { + + PRINT(true), /* Prints all the stocks currently in the database */ + UPDATE, /* Update the info associated with the stock */ + QUIT(true), /* Quit the application */ + NONE; /* An internal pseudo command indicating no command */ + + /* Indicates whether the command is manifest, that is its the enum name + itself. */ + final private boolean manifest; + + Command(boolean manifest) { + this.manifest = manifest; + } + + /** + * A non-manifest command + */ + Command() { + this(false); + } + + /** + * Determines the command denoted by the line. + * + * @param line the text as typed in at the console. + * + * @return the command represented by the line, or NONE if the line is + * empty. + * + * @throws InvalidCommandException if no recognizable command was found on + * a non-empty line. + */ + static Command getCommand(String line) throws InvalidCommandException { + StringTokenizer tokenizer = new StringTokenizer(line); + if (!tokenizer.hasMoreTokens()) { + return NONE; + } + String command = tokenizer.nextToken(); + + /* Check for a manifest command */ + for (Command c : Command.values()) { + if (c.manifest && c.name().equalsIgnoreCase(command)) { + if (!tokenizer.hasMoreTokens()) { + return c; + } + /* Extra token. */ + throw new InvalidCommandException( + "Unexpected argument: " + tokenizer.nextToken() + + " for command: " + command); + } + } + /* A stock update command, token following arg must be a price*/ + if (!tokenizer.hasMoreTokens()) { + throw new InvalidCommandException("Unknown command: " + command + + "\n" + StockQuotes.usage()); + } + String price = tokenizer.nextToken(); + + try { + Float.parseFloat(price); + if (tokenizer.hasMoreTokens()) { + throw new InvalidCommandException + ("Extraneous argument: " + tokenizer.nextToken()); + } + } catch (NumberFormatException e) { + throw new InvalidCommandException + ("Stock price must be a numeric value, not: " + price); + } + + return UPDATE; + } + + @SuppressWarnings("serial") + static class InvalidCommandException extends Exception { + InvalidCommandException(String error) { + super(error); + } + } +} diff --git a/examples/je/rep/quote/DataAccessor.java b/examples/je/rep/quote/DataAccessor.java new file mode 100644 index 0000000..7758255 --- /dev/null +++ b/examples/je/rep/quote/DataAccessor.java @@ -0,0 +1,27 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; + +class DataAccessor { + /* Quote Accessor */ + final PrimaryIndex quoteById; + + DataAccessor(EntityStore store) { + /* Primary index of the Employee database. */ + quoteById = store.getPrimaryIndex(String.class, Quote.class); + } +} diff --git a/examples/je/rep/quote/HARouter.java b/examples/je/rep/quote/HARouter.java new file mode 100644 index 0000000..ba59929 --- /dev/null +++ b/examples/je/rep/quote/HARouter.java @@ -0,0 +1,568 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package je.rep.quote; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import je.rep.quote.Command.InvalidCommandException; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.monitor.GroupChangeEvent; +import com.sleepycat.je.rep.monitor.JoinGroupEvent; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.monitor.MonitorChangeListener; +import com.sleepycat.je.rep.monitor.MonitorConfig; +import com.sleepycat.je.rep.monitor.NewMasterEvent; + +/** + * This example illustrates use of an HA aware Router used to forward high + * level requests to replication nodes implemented by + * {@link RouterDrivenStockQuotes}. The router is built using the APIs provided + * by the {@link com.sleepycat.je.rep.monitor.Monitor Monitor}; it's a + * standalone application and does not itself access a JE Environment. The + * router forwards logical requests, that represent some service provided by + * the application. It only has knowledge of whether a request will potentially + * require an write to the database, but does not have any other application + * level logic, nor does it access a JE environment. The HARouter accepts a + * request from the console and dispatches it to the application running on the + * master, if it's a write request, or to one of the replicas if it's a read + * request. The HARouter keeps track of the current Master via the events that + * are delivered to the Monitor. + *

    + * It's the HARouter instead of each individual node (as in the + * {@link UpdateForwardingStockQuotes} example) that tracks the current Master + * via the {@link com.sleepycat.je.rep.monitor.Monitor Monitor}. Since the + * router ensures that writes are directed to the master node, the logic in + * the node itself is simpler: the node simply services the requests forwarded + * to it by the router on a port dedicated for this purpose. + *

    + * The protocol used to communicate between the router and the nodes has been + * deliberately kept very simple. In particular, it makes limited provisions + * for error reporting back to the router. + *

    + * The router requires the following arguments: + * + *

    + * java je.rep.quote.HARouter -nodeName <nodeName> \
    + *                            -nodeHost <host:port> \
    + *                            -helperHost <host:port>"
    + *  The arguments are described below:
    + *   -nodeName identifies the monitor name associated with this Router
    + *   -nodeHost the hostname:port combination used by the Monitor to listen for
    + *             election results and group level changes.
    + *   -helperHost one or more nodes that may be used by the Monitor to locate the
    + *               Master and register the Monitor with the Master.
    + * 
    + * + * Note that the arguments are similar to the ones used to start a replication + * node. A key difference is that the -env option is absent, since the router + * is standalone and is not associated with one. + *

    + * The router can be started as follows: + * + *

    + * java je.rep.quote.HARouter -nodeName n1 \
    + *                            -nodeHost node.acme.com:6000 \
    + *                            -helperHost node.acme.com:5001
    + * 
    + * + * The replication nodes involved in the routing can be started as described in + * {@link RouterDrivenStockQuotes}. The Router and the nodes can be started in + * any convenient order. + * + * @see RouterDrivenStockQuotes + */ + +public class HARouter { + + /* + * The displacement from the node's port to the app port on which it + * listens for messages from the router. + */ + private static final int APP_PORT_DISPLACEMENT = 100; + + /* + * Lock used to coordinate writes to the router's notion of the current + * master. + */ + private final Object masterLock = new Object(); + + /* Holds the current masterId, protected by masterLock. */ + private volatile String masterName = null; + + /* + * The address on which the app node that is currently a master is + * listening for requests, protected by masterLock.. + */ + private InetSocketAddress appMasterAddress; + + /* + * Lock used to coordinate access to nodeAddressMapping, activeAppAddresses + * and lastReadAddress. + */ + private final Object groupLock = new Object(); + + /* + * Map keyed by node name to yield the address at which the node is + * listening for routed requests. All access to these mappings must be + * synchronized via groupLock. + */ + private static final Map nodeAddressMapping = + new HashMap(); + + /* + * List of addresses of active nodes. All access to this list must be + * synchronized via groupLock. + */ + private static final List activeAppAddresses = + new LinkedList(); + + /* + * Tracks the position in the activeAppAddresses list to which a read + * request was last dispatched. + */ + private int lastReadAddress = 0; + + /* The configuration for the replication group */ + private static final ReplicationConfig repConfig = new ReplicationConfig(); + + /** + * Round robins through the list of App dispatch addresses returning each + * address in sequence. + * + * @return the next dispatch address + */ + private InetSocketAddress getNextDispatchAddress() { + synchronized (groupLock) { + lastReadAddress++; + if (lastReadAddress == activeAppAddresses.size()) { + lastReadAddress = 0; + } + return activeAppAddresses.get(lastReadAddress); + } + } + + /** + * Routes a quit request to all the nodes in the replication group, thus + * shutting them down. + */ + private void routeQuit() { + synchronized (groupLock) { + for (InetSocketAddress address : nodeAddressMapping.values()) { + try { + QuoteUtil.forwardRequest(address, "quit", System.err); + } catch (IOException e) { + // Ignore exceptions during shutdown + } + } + } + } + + /** + * Routes a read request in round-robin fashion to active nodes in the + * replication group. It gives up if none of the nodes is available. + * + * @param line the text of the read request + */ + private void routeReadCommand(String line) { + IOException lastException = null; + int retries = 0; + synchronized (groupLock) { + retries = activeAppAddresses.size(); + } + while (retries-- > 0) { + try { + QuoteUtil.forwardRequest(getNextDispatchAddress(), + line, + System.err); + return; + } catch (IOException e) { + lastException = e; + + /* + * A more realistic implementation would remove the node + * from the list and arrange for it to be returned when it + * was up again. But this code is only intended to be a demo. + */ + continue; + } + } + System.err.println("None of the nodes were available to " + + "service the request " + + " Sample exception: " + lastException); + } + + /** + * Routes an update command to the master node if one is available. + * + * @param line the text of the update request. + */ + private void routeUpdateCommand(String line) { + final InetSocketAddress targetAddress; + final String targetNodeName; + synchronized (masterLock) { + /* Copy a consistent pair. */ + targetAddress = appMasterAddress; + targetNodeName = masterName; + } + try { + QuoteUtil.forwardRequest(targetAddress, line, System.err); + } catch (IOException e) { + /* Group could be in the midst of a transition to a new master. */ + System.err.println("Could not connect to master: " + + targetNodeName + " Exception: " + e); + } + } + + /** + * The central method used to sort out and dispatch individual requests. + * + * @throws Exception + */ + void doWork() throws Exception { + BufferedReader stdin = + new BufferedReader(new InputStreamReader(System.in)); + + while (true) { + + /** + * Generate prompt, read input. Valid inputs are: 1) quit + * (tells this local node to quit) 2) print 3) stockSymbol + * stockValue + */ + String line = QuoteUtil.promptAndRead( + "HARouter", null, false, System.out, stdin); + if (line == null) { + return; + } + try { + switch (Command.getCommand(line)) { + case NONE: + break; + case PRINT: + routeReadCommand(line); + break; + case QUIT: + /* Broadcast it to all the nodes. */ + routeQuit(); + return; + case UPDATE: + routeUpdateCommand(line); + break; + } + } catch (InvalidCommandException e) { + System.err.println(e.getMessage()); + continue; + } + } + } + + /** + * Creates a Router instance and initializes it using the command line + * arguments passed into it. + * + * @param argv command line arguments. + */ + HARouter(String[] argv) { + parseParams(argv); + } + + /** + * Parse the command line parameters and initialize the router with + * configuration information about the replication group and the + * Monitor that is running as part of this Router. + */ + void parseParams(String[] argv) + throws IllegalArgumentException { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs == 0) { + usage("-nodeName, -nodeHost, and -helperHost are required."); + } + String nodeName = null; + while (argc < nArgs) { + String thisArg = argv[argc++]; + + if (thisArg.equals("-nodeName")) { + /* the node id */ + if (argc < nArgs) { + nodeName = argv[argc++]; + repConfig.setNodeName(nodeName); + } else { + usage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-nodeHost")) { + /* The node hostname, port pair. */ + if (argc < nArgs) { + repConfig.setNodeHostPort(argv[argc++]); + } else { + usage("-nodeHost requires an argument"); + } + } else if (thisArg.equals("-helperHost")) { + /* The helper node hostname, port pair. */ + if (argc < nArgs) { + repConfig.setHelperHosts(argv[argc++]); + } else { + usage("-helperHost requires an argument"); + } + } else { + usage("Invalid parameter: " + thisArg); + } + } + repConfig.setNodeType(NodeType.MONITOR); + repConfig.setGroupName(StockQuotes.REP_GROUP_NAME); + + if (nodeName == null) { + usage("-nodeName is a required parameter"); + } + } + + /** + * Provides invocation usage help information in response to an error + * condition. + * + * @param message an explanation of the condition that provoked the + * display of usage information. + */ + void usage(String message){ + System.out.println(); + System.out.println(message); + System.out.println(); + System.out.print("usage: " + getClass().getName()); + System.out.println(" -nodeName -nodeHost " + + " -helperHost "); + System.out. + println("\t -nodeName the unique node name for this monitor\n" + + "\t -nodeHost the unique hostname and port pair\n" + + "\t -helperHost the hostname and port pair associated with " + + " the helper node\n" ); + System.exit(0); + } + + class RouterChangeListener implements MonitorChangeListener { + + public void notify(NewMasterEvent newMasterEvent) { + updateMaster(newMasterEvent.getNodeName(), + newMasterEvent.getSocketAddress()); + } + + public void notify(GroupChangeEvent groupChangeEvent) { + updateGroup(groupChangeEvent); + } + + public void notify(JoinGroupEvent joinGroupEvent) { + addActiveNode(joinGroupEvent); + } + + public void notify(LeaveGroupEvent leaveGroupEvent) { + removeActiveNode(leaveGroupEvent); + } + } + + /** + * Maps a socket address used by a replication node to the corresponding + * socket address used by the application to listen for routed messages. + * This method achieves the mapping by reusing a port some fixed distance + * away from the replication port in use. + * + * @param nodeAddress the socket address for the replication port + * @return the corresponding socket address used by the application. + */ + static InetSocketAddress getAppSocketAddress(InetSocketAddress + nodeAddress) { + return new InetSocketAddress(nodeAddress.getHostName(), + nodeAddress.getPort()+ + APP_PORT_DISPLACEMENT); + } + + /** + * Initialize the routers knowledge regarding the set of nodes in the + * replication group. + * + * @param electableNodes set of electable nodes + */ + private void initGroup(Set electableNodes) { + System.err.println("Group size: " + electableNodes.size()); + synchronized (groupLock) { + nodeAddressMapping.clear(); + for (ReplicationNode node : electableNodes) { + nodeAddressMapping.put + (node.getName(), + getAppSocketAddress(node.getSocketAddress())); + } + } + } + + /** + * Update the electable nodes list of the group when a GroupChangeEvent + * happens. Also remove the node in the active nodes list if the node is + * active when its GroupChangeType is REMOVE. + * + * @param event the GroupChangeEvent fired + */ + private void updateGroup(GroupChangeEvent event) { + synchronized (groupLock) { + ReplicationGroup group = event.getRepGroup(); + String nodeName = event.getNodeName(); + + switch (event.getChangeType()) { + case REMOVE: + System.err.println("Node: " + nodeName + + " is removed from the group."); + removeActiveNode(nodeName); + nodeAddressMapping.remove(nodeName); + break; + case ADD: + System.err.println("Node: " + nodeName + + " is added to the group."); + ReplicationNode node = group.getMember(nodeName); + nodeAddressMapping.put + (nodeName, + getAppSocketAddress(node.getSocketAddress())); + break; + default: + throw new IllegalStateException("Unknown event: " + + event.getChangeType()); + } + } + } + + /** + * Updates the active nodes list when a node joins the group, so that the + * routers know the current active nodes in the group. + * + * @param event the JoinGroupEvent notifying a node joins the group. + */ + private void addActiveNode(JoinGroupEvent event) { + System.err.println("Node: " + event.getNodeName() + + " with current master: " + event.getMasterName() + + " joins the group at: " + event.getJoinTime()); + synchronized (groupLock) { + InetSocketAddress address = + nodeAddressMapping.get(event.getNodeName()); + activeAppAddresses.add(address); + } + } + + /** + * Updates the active nodes list when a node leaves the group, so that the + * routers know the current active nodes in the group. + * + * @param event the LeaveGroupEvent notifying a node leaves the group. + */ + private void removeActiveNode(LeaveGroupEvent event) { + System.err.println("Node: " + event.getNodeName() + + " with current master: " + event.getMasterName() + + " joins the group at: " + event.getJoinTime() + + ", leaves the group at: " + event.getLeaveTime() + + ", because of: " + event.getLeaveReason()); + synchronized (groupLock) { + removeActiveNode(event.getNodeName()); + } + } + + /** + * Remove a node from the active nodes list. + * + * @param nodeName name of the node which leaves the group. + */ + private void removeActiveNode(String nodeName) { + InetSocketAddress address = + nodeAddressMapping.get(nodeName); + activeAppAddresses.remove(address); + } + + /** + * Updates the routers's notion of the master. + * + * @param newMasterName the new master + * @param masterNodeAddress the socket address + */ + private void updateMaster(String newMasterName, + InetSocketAddress masterNodeAddress){ + System.err.println("Current Master node: " + newMasterName); + synchronized(masterLock) { + masterName = newMasterName; + appMasterAddress = getAppSocketAddress(masterNodeAddress); + } + } + + public static void main(String[] argv) throws Exception { + final HARouter router = new HARouter(argv); + + MonitorConfig monConfig = new MonitorConfig(); + monConfig.setNodeName(repConfig.getNodeName()); + monConfig.setGroupName(repConfig.getGroupName()); + monConfig.setNodeHostPort(repConfig.getNodeHostPort()); + monConfig.setHelperHosts(repConfig.getHelperHosts()); + + final Monitor monitor = new Monitor(monConfig); + + final int retrySleepMs = 2000; + final int retryPeriodMins = 5; + int maxRetries = (retryPeriodMins*60*1000)/retrySleepMs; + + while (true) { + try { + + /* + * Register this monitor, so it's a member of the replication + * group. + */ + ReplicationNode master = monitor.register(); + + /* + * Set up the initial state by explicitly querying the helper + * nodes. Once this is accomplished, the Router is kept current + * via the Monitor APIs. + */ + ReplicationGroup repGroup = monitor.getGroup(); + router.initGroup(repGroup.getElectableNodes()); + router.updateMaster(master.getName(), + master.getSocketAddress()); + + /* Start the listener, so it can listen for group changes. */ + monitor.startListener(router.new RouterChangeListener()); + + break; + } catch (UnknownMasterException unknownMasterException) { + if (maxRetries-- == 0) { + /* Don't have a functioning group. */ + throw unknownMasterException; + } + System.err.println + (new Date() + + " Waiting for a new master to be established." + + unknownMasterException); + Thread.sleep(retrySleepMs); + } + } + router.doWork(); + } +} diff --git a/examples/je/rep/quote/Quote.java b/examples/je/rep/quote/Quote.java new file mode 100644 index 0000000..5e23765 --- /dev/null +++ b/examples/je/rep/quote/Quote.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.Serializable; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; + +@Entity +class Quote implements Serializable{ + /** + * + */ + private static final long serialVersionUID = 1L; + + @PrimaryKey + String stockSymbol; + + float lastTrade; + + Quote(String symbol, float price) { + this.stockSymbol = symbol; + this.lastTrade = price; + } + + @SuppressWarnings("unused") + private Quote() {} // Needed for DPL deserialization +} diff --git a/examples/je/rep/quote/QuoteUtil.java b/examples/je/rep/quote/QuoteUtil.java new file mode 100644 index 0000000..860d85b --- /dev/null +++ b/examples/je/rep/quote/QuoteUtil.java @@ -0,0 +1,176 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.StringTokenizer; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; + +/** + * Common utility methods for the StockQuote examples. + */ +class QuoteUtil { + + /** + * Opens a transactional EntityStore in the given replicated environment. + */ + static EntityStore openEntityStore(ReplicatedEnvironment env, + String storeName) { + + final StoreConfig storeConfig = new StoreConfig(); + + /* An Entity Store in a replicated environment must be transactional.*/ + storeConfig.setTransactional(true); + + /* Note that both Master and Replica open the store for write. */ + storeConfig.setReadOnly(false); + storeConfig.setAllowCreate(true); + + return new EntityStore(env, storeName, storeConfig); + } + + /** + * Display a prompt for this node. If this node accepts input, read and + * return the input. + * + * @param name a descriptive string for the prompt + * + * @param nodeName the name or null, if the prompt is not from a rep node + * + * @param isMaster true if the node is currently the master + * + * @param stdin the Reader providing command input + * + * @return the string that was typed in, in response to the prompt. + * + * @throws IOException + */ + static String promptAndRead(String name, + String nodeName, + boolean isMaster, + PrintStream promptStream, + BufferedReader stdin) + throws IOException { + + if (promptStream != null) { + StringBuilder sb = new StringBuilder(); + sb.append(name); + if (nodeName != null) { + sb.append("-").append(nodeName).append(" "); + if (isMaster) { + sb.append("(master)"); + } else { + sb.append("(replica)"); + } + } + promptStream.print(sb.toString()); + promptStream.print("> "); + } + return stdin.readLine(); + } + + /** + * Forwards the request line to the target and prints out the results + * of the command at the current console. + * + * @param target the socket on which the application is listening + * + * @param commandLine the command to be executed on the remote target + * + * @param printStream the stream used to capture the output from the + * forwarded request + */ + static void forwardRequest(InetSocketAddress target, + String commandLine, + PrintStream printStream) + throws IOException { + + /* Open a connection to the current master. */ + Socket socket = new Socket(); + PrintStream out = null; + BufferedReader in = null; + try { + socket.connect(target); + out = new PrintStream(socket.getOutputStream(), true); + out.println(commandLine); + in = new BufferedReader(new InputStreamReader(socket + .getInputStream())); + while (true) { + String line = in.readLine(); + if (line == null) { + break; + } + printStream.println(line); + } + } finally { + QuoteUtil.closeSocketAndStreams(socket, in, out); + } + } + + /** + * Utility to close socket and its streams. + * + * @param socket to be closed + * @param in input reader to be closed + * @param out output stream to be closed + */ + static void closeSocketAndStreams(Socket socket, + BufferedReader in, + PrintStream out) { + try { + if (in != null) { + in.close(); + } + } catch (IOException e) { + // Ignore exceptions during cleanup + } + try { + if (out != null) { + out.close(); + } + } catch (RuntimeException e) { + // Ignore exceptions during cleanup + } + try { + if (socket != null) { + socket.close(); + } + } catch (IOException e) { + // Ignore exceptions during cleanup + } + } + + /** + * Parses a line to return a new Quote. + * + * @param line the line containing the quote + * + * @return a Quote + */ + static Quote parseQuote(String line) { + StringTokenizer tokenizer = new StringTokenizer(line); + String stockSymbol = tokenizer.nextToken(); + float stockValue = Float.parseFloat(tokenizer.nextToken()); + + return new Quote(stockSymbol, stockValue); + } +} diff --git a/examples/je/rep/quote/RouterDrivenStockQuotes.java b/examples/je/rep/quote/RouterDrivenStockQuotes.java new file mode 100644 index 0000000..0fd95dd --- /dev/null +++ b/examples/je/rep/quote/RouterDrivenStockQuotes.java @@ -0,0 +1,160 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package je.rep.quote; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; + +/** + * This class is based on {@link StockQuotes} and illustrates use of an + * HA-aware router (implemented by {@link HARouter}), in conjunction with the + * {@link com.sleepycat.je.rep.monitor.Monitor Monitor} class, to direct + * application requests, based upon the type of request (read or write) and the + * state (Master or Replica) of a node in the replication group. This example + * is meant to illustrate how a software load balancer might be integrated with + * JE HA, where {@code HARouter} plays the role of the load balancer for + * purposes of the example. + *

    + * Be sure to read the {@link je.rep.quote Example Overview} first to put this + * example into context. + *

    + * In this example, unlike StockQuotes, only the HARouter has a + * console associated with it. It accepts commands typed into its console and + * forwards them as appropriate to the Master and Replicas in the group. The + * logic for tracking the Master resides in HARouter, and + * information about the state of the replication group is supplied by the + * {@link com.sleepycat.je.rep.monitor.Monitor Monitor}. While this example + * uses just one HARouter instance for the entire group, production + * applications could use multiple router instances to avoid single points of + * failure. + *

    + * Each node, which in this example is an instance of + * RouterDrivenStockQuotes, establishes a server socket on which + * it can listen for requests from HARouter. The node that is currently the + * Master will expect both write and read requests from HARouter, while nodes + * that are Replicas will only expect read requests from the router. + *

    + * The request flow between nodes in this example is shown below. + *

    + * ------------               Read requests
    + * | HARouter |------------------------------------||
    + * | Instance |---------------------||             ||
    + * ------------                     ||             ||
    + *  ||                              ||             ||
    + *  || Write requests               ||             ||
    + *  \/                              ||             ||
    + * ---------------------------      ||             ||
    + * | RouterDrivenStockQuotes |      ||             ||
    + * | Instance 1: Master      |      ||             ||
    + * ---------------------------      \/             ||
    + *                ---------------------------      ||
    + *                | RouterDrivenStockQuotes |      ||
    + *                | Instance 2: Replica     |      ||
    + *                ---------------------------      \/
    + *                               ---------------------------
    + *                               | RouterDrivenStockQuotes |
    + *                               | Instance 3: Replica     |
    + *                               ---------------------------
    + *
    + *                                       ...more Replica instances...
    + * 
    + *

    + * This example is intended to be illustrative. It forwards requests as text, + * and receives responses in text form. Actual applications may for example, + * forward HTTP requests, or use some other application level network protocol + * to forward such requests. + *

    + * Please review the javadoc in {@link StockQuotes} for a detailed description + * of the arguments that must be supplied at startup. The only difference is + * that you must use the name of this class when invoking the JVM. For example, + * the first node can be started as follows: + * + *

    + * java je.rep.quote.RouterDrivenStockQuotes -env /tmp/stockQuotes1 \
    + *                                           -nodeName n1 \
    + *                                           -nodeHost node.acme.com:5001 \
    + *                                           -helperHost node.acme.com:5001
    + * 
    + * + * In addition to starting the nodes, you will also need to start the + * {@link HARouter} as described in its javadoc. + * + * @see HARouter + */ +public class RouterDrivenStockQuotes extends StockQuotes { + + RouterDrivenStockQuotes(String[] params) throws Exception { + super(params); + } + + /** + * Overrides the method in the base class to receive requests using the + * socket connection established by the router instead of prompting the + * user for console input. + */ + @Override + void doWork() + throws IOException, InterruptedException { + + /* + * Get socket address on which it can be contacted with requests by + * the Router. + */ + InetSocketAddress appSocketAddress = + HARouter.getAppSocketAddress(repEnv.getRepConfig(). + getNodeSocketAddress()); + final ServerSocket serverSocket = + new ServerSocket(appSocketAddress.getPort()); + System.err.println("Node: " + repEnv.getNodeName() + + " joined replication group: " + + repEnv.getGroup().getName() + + " in state: " + repEnv.getState() + ".\n" + + this.getClass().getSimpleName() + + " ready to service Router requests at: " + + serverSocket + "\n"); + try { + for (boolean done = false; !done;) { + + Socket socket = null; + BufferedReader in = null; + PrintStream out = null; + + try { + socket = serverSocket.accept(); + + in = new BufferedReader + (new InputStreamReader(socket.getInputStream())); + + String line = getCommandLine(null, + socket.getInputStream()); + out = new PrintStream(socket.getOutputStream(), true); + done = doCommand(line, out); + } finally { + QuoteUtil.closeSocketAndStreams(socket, in, out); + } + } + } finally { + serverSocket.close(); + } + } + + public static void main(String[] argv) throws Exception { + StockQuotes stockQuotes = new RouterDrivenStockQuotes(argv); + stockQuotes.runExample(); + } +} diff --git a/examples/je/rep/quote/RunTransaction.java b/examples/je/rep/quote/RunTransaction.java new file mode 100644 index 0000000..6b2aa30 --- /dev/null +++ b/examples/je/rep/quote/RunTransaction.java @@ -0,0 +1,292 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.PrintStream; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NoConsistencyRequiredPolicy; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; + +/** + * Utility class to begin and commit/abort a transaction and handle exceptions + * according to this application's policies. The doTransactionWork method is + * abstract and must be implemented by callers. The transaction is run and + * doTransactionWork is called by the run() method of this class. The + * onReplicaWrite and onRetryFailure methods may optionally be overridden. + */ +public abstract class RunTransaction { + + /* The maximum number of times to retry the transaction. */ + private static final int TRANSACTION_RETRY_MAX = 10; + + /* + * The number of seconds to wait between retries when a sufficient + * number of replicas are not available for a transaction. + */ + private static final int INSUFFICIENT_REPLICA_RETRY_SEC = 1; + + /* Amount of time to wait to let a replica catch up before retrying. */ + private static final int CONSISTENCY_RETRY_SEC = 1; + + /* Amount of time to wait after a lock conflict. */ + private static final int LOCK_CONFLICT_RETRY_SEC = 1; + + private final ReplicatedEnvironment env; + private final PrintStream out; + + /** + * Creates the runner. + */ + RunTransaction(ReplicatedEnvironment repEnv, PrintStream printStream) { + env = repEnv; + out = printStream; + } + + /** + * Runs a transaction, calls the doTransactionWork method, and retries as + * needed. + *

    + * If the transaction is read only, it uses Durability.READ_ONLY_TXN for + * the Transaction. Since this Durability policy does not call for any + * acknowledgments, it eliminates the possibility of a {@link + * InsufficientReplicasException} being thrown from the call to {@link + * Environment#beginTransaction} for a read only transaction on a Master, + * which is an overly stringent requirement. This makes the Master more + * available for read operations. + * + * @param readOnly determines whether the transaction to be run is read + * only. + */ + public void run(boolean readOnly) + throws InterruptedException, EnvironmentFailureException { + + OperationFailureException exception = null; + boolean success = false; + long sleepMillis = 0; + TransactionConfig txnConfig = setupTxnConfig(readOnly); + + for (int i = 0; i < TRANSACTION_RETRY_MAX; i++) { + /* Sleep before retrying. */ + if (sleepMillis != 0) { + Thread.sleep(sleepMillis); + sleepMillis = 0; + } + Transaction txn = null; + try { + txn = env.beginTransaction(null, txnConfig); + doTransactionWork(txn); /* CALL APP-SPECIFIC CODE */ + txn.commit(); + success = true; + return; + } catch (InsufficientReplicasException insufficientReplicas) { + + /* + * Retry the transaction. Give Replicas a chance to contact + * this Master, in case they have not had a chance to do so + * following an election. + */ + exception = insufficientReplicas; + out.println(insufficientReplicas.toString() + + "\n Retrying ..."); + sleepMillis = INSUFFICIENT_REPLICA_RETRY_SEC * 1000; + + if (i > 1) { + + /* + * As an example of a possible application choice, + * elect to execute this operation with lower durability. + * That makes the node more available, but puts the + * data at greater risk. + */ + txnConfig = lowerDurability(txnConfig); + } + continue; + } catch (InsufficientAcksException insufficientReplicas) { + + /* + * Transaction has been committed at this node. The other + * acknowledgments may be late in arriving, or may never arrive + * because the replica just went down. + */ + + /* + * INSERT APP-SPECIFIC CODE HERE: For example, repeat + * idempotent changes to ensure they went through. + * + * Note that 'success' is false at this point, although some + * applications may consider the transaction to be complete. + */ + out.println(insufficientReplicas.toString()); + txn = null; + return; + } catch (ReplicaWriteException replicaWrite) { + + /* + * Attempted a modification while in the Replica state. + * + * CALL APP-SPECIFIC CODE HERE: Cannot accomplish the changes + * on this node, redirect the write to the new master and retry + * the transaction there. This could be done by forwarding the + * request to the master here, or by returning an error to the + * requester and retrying the request at a higher level. + */ + onReplicaWrite(replicaWrite); + return; + } catch (LockConflictException lockConflict) { + + /* + * Retry the transaction. Note that LockConflictException + * covers the HA LockPreemptedException. + */ + exception = lockConflict; + out.println(lockConflict.toString() + "\n Retrying ..."); + sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000; + continue; + } catch (ReplicaConsistencyException replicaConsistency) { + + /* + * Retry the transaction to see if the replica becomes + * consistent. If consistency couldn't be satisfied, we can + * choose to relax the timeout associated with the + * ReplicaConsistencyPolicy, or to do a read with + * NoConsistencyRequiredPolicy. + */ + exception = replicaConsistency; + out.println(replicaConsistency.toString() + + "\n Retrying ..."); + sleepMillis = CONSISTENCY_RETRY_SEC * 1000; + continue; + } finally { + + if (!success) { + if (txn != null) { + txn.abort(); + } + + /* + * INSERT APP-SPECIFIC CODE HERE: Perform any app-specific + * cleanup. + */ + } + } + } + + /* + * CALL APP-SPECIFIC CODE HERE: Transaction failed, despite retries. + */ + onRetryFailure(exception); + } + + /** + * Must be implemented to perform operations using the given Transaction. + */ + public abstract void doTransactionWork(Transaction txn); + + /** + * May be optionally overridden to handle a ReplicaWriteException. After + * this method is called, the RunTransaction constructor will return. By + * default, this method throws the ReplicaWriteException. + */ + public void onReplicaWrite(ReplicaWriteException replicaWrite) { + throw replicaWrite; + } + + /** + * May be optionally overridden to handle a failure after the + * TRANSACTION_RETRY_MAX has been exceeded. After this method is called, + * the RunTransaction constructor will return. By default, this method + * prints the last exception. + */ + public void onRetryFailure(OperationFailureException lastException) { + out.println("Failed despite retries." + + ((lastException == null) ? + "" : + " Encountered exception:" + lastException)); + } + + /** + * Reduce the Durability level so that we don't require any + * acknowledgments from replicas. An example of using lower durability + * requirements. + */ + private TransactionConfig lowerDurability(TransactionConfig txnConfig) { + + out.println("\nLowering durability, execute update although " + + "replicas not available. Update may not be durable."); + TransactionConfig useTxnConfig = txnConfig; + if (useTxnConfig == null) { + useTxnConfig = new TransactionConfig(); + } + + useTxnConfig.setDurability(new Durability(SyncPolicy.WRITE_NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE)); + return useTxnConfig; + } + + /** + * Create an optimal transaction configuration. + */ + private TransactionConfig setupTxnConfig(boolean readOnly) { + + if (!readOnly) { + /* + * A read/write transaction can just use the default transaction + * configuration. A null value for the configuration param means + * defaults should be used. + */ + return null; + } + + if (env.getState().isUnknown()) { + + /* + * This node is not in touch with the replication group master and + * because of that, can't fulfill any consistency checks. As an * + * example of a possible application choice, change the + * consistency * characteristics for this specific transaction and + * avoid a * ReplicaConsistencyException by lowering the + * consistency requirement now. + */ + out.println("\nLowering consistency, permit access of data " + + " currently on this node."); + return new TransactionConfig().setConsistencyPolicy + (NoConsistencyRequiredPolicy.NO_CONSISTENCY); + } + + /* + * We can optimize a read operation by specifying a lower + * durability. Since Durability.READ_ONLY_TXN does not call for any + * acknowledgments, it eliminates the possibility of a {@link + * InsufficientReplicasException} being thrown from the call to {@link + * Environment#beginTransaction} for a read only transaction on a + * Master. + */ + return new TransactionConfig().setDurability + (Durability.READ_ONLY_TXN); + } +} diff --git a/examples/je/rep/quote/SimpleRouter.java b/examples/je/rep/quote/SimpleRouter.java new file mode 100644 index 0000000..9429bc3 --- /dev/null +++ b/examples/je/rep/quote/SimpleRouter.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package je.rep.quote; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.util.LinkedList; +import java.util.List; + +import je.rep.quote.Command.InvalidCommandException; + +/** + * This example illustrates the use of a simple HA-unaware router that is used + * in conjunction with {@link UpdateForwardingStockQuotes}. The router is + * unaware of the state (Master or Replica) of each + * node and simply forwards requests entered at the router's console to each + * node in the group in Round Robin fashion. + *

    + * The UpdateForwardingStockQuotes instance will in turn, if + * necessary, forward any write requests to the current master and return the + * results back to SimpleRouter. UpdateForwardingStockQuotes + * instances do not have their own consoles, they only service requests + * delivered over the network by this router. + *

    + * SimpleRouter takes host:port pairs as arguments, one pair for + * each instance of the UpdateForwardingStockQuotes application. + * The port numbers in this case are application, not HA, port numbers on which + * the UpdateForwardingStockQuotes application listens for + * application messages forwarded by SimpleRouter. They must + * therefore be different from the ports used internally by HA, that is, from + * the HA port numbers specified as arguments to + * UpdateForwardingStockQuotes. The application port number is + * computed in this example by adding + * HARouter.APP_PORT_DISPLACEMENT (default value 100) to the HA + * port number associated with the node. So, if node "n1" uses port 5001 for + * HA, it must (based upon the conventions used in these examples) use port + * 5101, for application level communication. + *

    + * SimpleRouter can thus be invoked as follows: + * + *

    + * java je.rep.quote.SimpleRouter node.acme.com:5101 node.acme.com:5102 node.acme.com:5103
    + * 
    + * + * for a three node group. In this case, the applications will use ports 5101, + * through 5103 for application messages, while HA will use ports 5001 through + * 5003. + * + *

    + * SimpleRouter and UpdateForwardingStockQuotes can be started in any order. + * + * @see UpdateForwardingStockQuotes + */ +public class SimpleRouter { + + /* + * List of addresses on which the application is listening for routed + * requests. All access to this list must be synchronized via groupLock. + */ + private static final List appAddresses= + new LinkedList(); + + /* + * Tracks the position in the appAddresses list to which a read request was + * last dispatched. + */ + private int lastReadAddress = 0; + + /** + * Round robins through the list of App dispatch addresses returning each + * address in sequence. + * + * @return the next dispatch address + */ + private InetSocketAddress getNextDispatchAddress() { + lastReadAddress++; + if (lastReadAddress == appAddresses.size()) { + lastReadAddress = 0; + } + return appAddresses.get(lastReadAddress); + } + + /** + * Routes a quit request to all the nodes in the replication group, thus + * shutting them down. + */ + private void routeQuit() { + for (InetSocketAddress appAddress : appAddresses) { + try { + QuoteUtil.forwardRequest(appAddress, "quit", System.err); + } catch (IOException e) { + // Ignore exceptions during shutdown + } + } + } + + /** + * Routes a request in round-robin fashion to each of the nodes in the + * replication group. It gives up if none of the nodes are available. + * + * @param line the text of the read request + */ + private void routeCommand(String line) { + IOException lastException = null; + int retries = appAddresses.size(); + while (retries-- > 0) { + try { + QuoteUtil.forwardRequest(getNextDispatchAddress(), + line, + System.err); + return; + } catch (IOException e) { + lastException = e; + + /* + * A more realistic implementation would remove the node + * from the list and arrange for it to be returned when it + * was up again. But this code is only intended to be a demo. + */ + continue; + } + } + System.err.println("None of the nodes at:" + appAddresses.toString() + + " were available to service the request." + + " Exception: " + lastException); + } + + /** + * The central method used to sort out and dispatch individual requests. + * + * @throws Exception + */ + void doWork() throws Exception { + BufferedReader stdin = + new BufferedReader(new InputStreamReader(System.in)); + while (true) { + String line = QuoteUtil.promptAndRead + ("SRouter", null, false, System.out, stdin); + + if (line == null) { + return; + } + + try { + switch (Command.getCommand(line)) { + + case NONE: + break; + + case PRINT: + case UPDATE: + routeCommand(line); + break; + + case QUIT: + /* Broadcast it to all the nodes. */ + routeQuit(); + return; + + default: + StockQuotes.usage(); + break; + } + } catch (InvalidCommandException e) { + System.err.println(e.getMessage()); + continue; + } + } + } + + /** + * Creates a Router instance and initializes it using the command line + * arguments passed into it. + * + * @param argv command line arguments. + */ + SimpleRouter(String[] argv) { + parseParams(argv); + } + + /** + * Parse the command line parameters and initialize the router with + * configuration information about the replication group and the + * Monitor that is running as part of this Router. + */ + private void parseParams(String[] argv) + throws IllegalArgumentException { + + if (argv.length == 0) { + usage("Insufficient arguments"); + } + + for (String hostPort : argv) { + + int portStart = hostPort.indexOf(':'); + if (portStart < 0) { + usage("Bad argument:" + hostPort); + } + String hostname = hostPort.substring(0,portStart); + int port; + port = Integer.parseInt(hostPort.substring(portStart+1)); + + appAddresses.add(new InetSocketAddress(hostname, port)); + } + } + + /** + * Provides invocation usage help information in response to an error + * condition. + * + * @param message an explanation of the condition that provoked the + * display of usage information. + */ + void usage(String message){ + System.out.println(); + System.out.println(message); + System.out.println(); + System.out.print("usage: " + getClass().getName()); + System.out.println(" [,]+"); + System.out. + println("\t the hostname and port number pair, e.g. " + + "foo.bar.com:6000, on which the application is listening " + + "for forwarded requests."); + System.exit(0); + } + + public static void main(String[] argv) throws Exception { + final SimpleRouter router = new SimpleRouter(argv); + router.doWork(); + } +} diff --git a/examples/je/rep/quote/StockQuotes.java b/examples/je/rep/quote/StockQuotes.java new file mode 100644 index 0000000..fa174b4 --- /dev/null +++ b/examples/je/rep/quote/StockQuotes.java @@ -0,0 +1,693 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.concurrent.TimeUnit; + +import je.rep.quote.Command.InvalidCommandException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.RollbackException; +import com.sleepycat.je.rep.TimeConsistencyPolicy; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; + +/** + * The most basic demonstration of a replicated application. It's intended to + * help gain an understanding of basic HA concepts and demonstrate use of the + * HA APIs to create a replicated environment and issue read and write + * transactions. + *

    + * Be sure to read the {@link je.rep.quote Example Overview} first to put this + * example into context. + *

    + * The program can be used to start up multiple stock quote servers supplying + * the following arguments: + * + *

    + * java je.rep.quote.StockQuotes -env <environment home> \
    + *                               -nodeName <nodeName> \
    + *                               -nodeHost <hostname:port> \
    + *                               -helperHost <hostname:port>
    + * 
    + * + * The argument names resemble the {@link ReplicationConfig} names to draw + * attention to the connection between the program argument names and + * ReplicationConfig APIs. + * + *
    + *  -env        a pre-existing directory for the replicated JE environment
    + *  -nodeName   the name used to uniquely identify this node in the replication
    + *  -nodeHost   the unique hostname, port pair for this node
    + *  -helperHost the hostname, port pair combination for the helper node. It's
    + *              the same as the nodeHost only if this node is intended to
    + *              become the initial Master, during the formation of the
    + *              replication group.
    + * 
    + * + * A typical demo session begins with a set of commands such as the following + * to start each node. The first node can be started as below: + * + *
    + * java je.rep.quote.StockQuotes -env dir1 -nodeName n1 \
    + *                               -nodeHost node.acme.com:5001 \
    + *                               -helperHost node.acme.com:5001
    + * 
    + * + * Note that the helperHost and the nodeHost are the + * same, since it's the first node in the group. HA uses this fact to start a + * brand new replication group of size one, with this node as the master if + * there is no existing environment in the environment directory + * dir1. + *

    + * Nodes can be added to the group by using a variation of the above. The + * second and third node can be started as follows: + * + *

    + * java je.rep.quote.StockQuotes -env dir2 -nodeName n2 \
    + *                               -nodeHost node.acme.com:5002 \
    + *                               -helperHost node.acme.com:5001
    + *
    + * java je.rep.quote.StockQuotes -env dir3 -nodeName n3 \
    + *                               -nodeHost node.acme.com:5003 \
    + *                               -helperHost node.acme.com:5002
    + * 
    + * + * Note that each node has its own unique node name, and a distinct directory + * for its replicated environment. This and any subsequent nodes can use the + * first node as a helper to get itself going. In fact, you can pick any node + * already in the group to serve as a helper. So, for example when adding the + * third node, node 2 or node 1, could serve as helper nodes. The helper nodes + * simply provide a mechanism to help a new node get itself admitted into the + * group. The helper node is not needed once a node becomes part of the group. + *

    + * When initially running the example, please use a group of at least three + * nodes. A two node group is a special case, and it is best to learn how to + * run larger groups first. For more information, see + * + * Two-Node Replication Groups. When initially creating the nodes, it is + * also important to start the master first. + *

    + * But once the nodes have been created, the order in which the nodes are + * started up does not matter. It minimizes the initial overall group startup + * time to have the master (the one where the helperHost and the + * nodeHost are the same) node started first, since the master + * initializes the replicated environment and is ready to start accepting and + * processing commands even as the other nodes concurrently join the group. + *

    + * The above commands start up a group with three nodes all running locally on + * the same machine. You can start up nodes on different machines connected by + * a TCP/IP network by executing the above commands on the respective machines. + * It's important in this case that the clocks on these machines, be reasonably + * synchronized, that is, they should be within a couple of seconds of each + * other. You can do this manually, but it's best to use a protocol like NTP for this purpose. + *

    + * Upon subsequent restarts the nodes will automatically hold an election and + * select one of the nodes in the group to be the master. The choice of master + * is made visible by the master/replica prompt that the application uses to + * make the distinction clear. Note that at least a simple majority of nodes + * must be started before the application will respond with a prompt because + * it's only after a simple majority of nodes is available that an election can + * be held and a master elected. For a two node group, both nodes must be + * started before an election can be held. + *

    + * Commands are submitted directly at the command prompt in the console + * established by the application at each node. Update commands are only + * accepted at the console associated with the current master, identified by + * the master prompt as below: + * + *

    StockQuotes-2 (master)>
    + * + * After issuing a few commands, you may want to experiment with shutting down + * or killing some number of the replicated environments and bringing them back + * up to see how the application behaves. + *

    + * If you type stock updates at an application that is currently running as a + * replica node, the update is refused and you must manually re-enter the + * updates on the console associated with the master. This is of course quite + * cumbersome and serves as motivation for the subsequent examples. + *

    + * As shown below, there is no routing of requests between nodes in this + * example, which is why write requests fail when they are issued on a Replica + * node. + *

    + * -----------------------
    + * | StockQuotes         | Read and Write requests both succeed,
    + * | Instance 1: Master  | because this is the Master.
    + * -----------------------
    + *
    + *      -----------------------
    + *      | StockQuotes         | Read requests succeed,
    + *      | Instance 2: Replica | but Write requests fail on a Replica.
    + *      -----------------------
    + *
    + *           -----------------------
    + *           | StockQuotes         | Read requests succeed,
    + *           | Instance 3: Replica | but Write requests fail on a Replica.
    + *           -----------------------
    + *
    + *               ...more Replica instances...
    + * 
    + *

    + * See {@link UpdateForwardingStockQuotes} for an example that uses + * {@link SimpleRouter}, along with application supplied inter-node request + * routing to direct write requests to the master. + *

    + * See {@link RouterDrivenStockQuotes} along with {@link HARouter}for an + * example that uses an external router built using the + * {@link com.sleepycat.je.rep.monitor.Monitor Monitor} to route write + * requests externally to the master and provide primitive load balancing + * across the nodes in the replication group. + */ +public class StockQuotes { + private static final String STORE_NAME = "Quotes"; + + /* The name of the replication group used by this application. */ + static final String REP_GROUP_NAME = "StockQuotesRepGroup"; + + static DataAccessor dao = null; + static EntityStore store = null; + private File envHome; + EnvironmentConfig envConfig; + + final ReplicationConfig repConfig; + ReplicatedEnvironment repEnv; + + /* The maximum number of times to retry handle creation. */ + private static int REP_HANDLE_RETRY_MAX = 5; + + /** + * Updates the stock price. This command can only be accomplished on the + * master. The method handles all transaction related exceptions and + * retries the update if appropriate. All environment invalidating + * exceptions are propagated up to the caller. + * + * @param line the command line + * + * @param printStream the output stream for messages + * + * @throws InterruptedException + */ + void updateStock(final String line, final PrintStream printStream) + throws InterruptedException { + + final Quote quote = QuoteUtil.parseQuote(line); + if (quote == null) { + return; + } + + new RunTransaction(repEnv, printStream) { + + @Override + public void doTransactionWork(Transaction txn) { + dao.quoteById.put(txn, quote); + /* Output local indication of processing. */ + System.out.println(repEnv.getNodeName() + + " processed update request: " + line); + } + + @Override + public void onReplicaWrite(ReplicaWriteException replicaWrite) { + /* Attempted a modification while in the replica state. */ + printStream.println + (repEnv.getNodeName() + + " is not currently the master. Perform the update at" + + " the node that's currently the master."); + } + }.run(false /*readOnly*/); + } + + /** + * Parses the command and dispatches it to the method that implements the + * command. + * + * @param commandLine the command to be executed + * + * @param out the stream to which the results are written + * + * @return true if we input has been exhausted, or a Quit was encountered + * + * @throws InterruptedException + */ + final boolean doCommand(String commandLine, PrintStream out) + throws InterruptedException { + + if (commandLine == null) { + return true; + } + try { + Command command = Command.getCommand(commandLine); + switch (command) { + case NONE: + break; + case PRINT: + printStocks(out); + break; + case QUIT: + quit(out); + return true; + case UPDATE: + updateStock(commandLine, out); + break; + } + } catch (InvalidCommandException e) { + out.println(e.getMessage()); + } + return false; + } + + /** + * Prints all stocks in the database. Retries the operation a fixed number + * of times, before giving up. Note that this method can be invoked on + * either the master or the replica. + * + * @throws InterruptedException + */ + private void printStocks(final PrintStream out) + throws InterruptedException { + + new RunTransaction(repEnv, out) { + + @Override + public void doTransactionWork(Transaction txn) { + + final EntityCursor quotes = + dao.quoteById.entities(txn, null); + try { + out.println("\tSymbol\tPrice"); + out.println("\t======\t====="); + + int count = 0; + for (Quote quote : quotes) { + out.println("\t" + quote.stockSymbol + + "\t" + quote.lastTrade); + count++; + } + out.println("\n\t" + count + " stock" + + ((count == 1) ? "" : "s") + + " listed.\n"); + } finally { + quotes.close(); + } + } + + }.run(true /*readOnly*/); + + /* Output local indication of processing. */ + System.out.println(repEnv.getNodeName() + " processed print request"); + } + + /** + * Runs the example. It handles environment invalidating exceptions, + * re-initializing the environment handle and the dao when such an + * exception is encountered. + * + * @throws Exception to propagate any IO or Interrupt exceptions + */ + final void runExample() + throws Exception { + + while (true) { + + try { + initialize(); + doWork(); + shutdown(); + return; + /* Exit the application. */ + } catch (InsufficientLogException insufficientLog) { + /* Restore the log files from another node in the group. */ + NetworkRestore networkRestore = new NetworkRestore(); + networkRestore.execute(insufficientLog, + new NetworkRestoreConfig()); + continue; + } catch (RollbackException rollback) { + + /* + * Any transient state that is dependent on the environment + * must be re-synchronized to account for transactions that + * may have been rolled back. + */ + continue; + } finally { + if (repEnv != null) { + repEnv.close(); + repEnv = null; + } + } + } + } + + /** + * The command execution loop. + * + * @throws Exception + */ + void doWork() + throws Exception { + + boolean done = false; + + while (!done) { + String line = getCommandLine(System.out, System.in); + done = doCommand(line, System.out); + } + } + + /** + * Generate prompt, read input. Valid inputs are: + * + * 1) quit (tells this local node to quit) + * 2) print + * 3) stockSymbol stockValue + * + * @param inputStream + */ + String getCommandLine(PrintStream promptStream, InputStream inputStream) + throws IOException { + BufferedReader stdin = + new BufferedReader(new InputStreamReader(inputStream)); + return QuoteUtil.promptAndRead("StockQuotes", + repEnv.getNodeName(), + repEnv.getState().isMaster(), + promptStream, + stdin); + } + + /** + * Shuts down the application. If this node was the master, then some other + * node will be elected in its place, if a simple majority of nodes + * survives this shutdown. + */ + void shutdown() { + + store.close(); + store = null; + + repEnv.close(); + repEnv = null; + } + + /** + * The StockQuotes example. + */ + StockQuotes(String[] params) + throws Exception { + + /* + * Set envHome and generate a ReplicationConfig. Note that + * ReplicationConfig and EnvironmentConfig values could all be + * specified in the je.properties file, as is shown in the properties + * file included in the example. + */ + repConfig = new ReplicationConfig(); + + /* Set consistency policy for replica. */ + TimeConsistencyPolicy consistencyPolicy = new TimeConsistencyPolicy + (1, TimeUnit.SECONDS, /* 1 sec of lag */ + 3, TimeUnit.SECONDS /* Wait up to 3 sec */); + repConfig.setConsistencyPolicy(consistencyPolicy); + + /* Wait up to two seconds for commit acknowledgments. */ + repConfig.setReplicaAckTimeout(2, TimeUnit.SECONDS); + parseParams(params); + + /* + * A replicated environment must be opened with transactions enabled. + * Environments on a master must be read/write, while environments + * on a client can be read/write or read/only. Since the master's + * identity may change, it's most convenient to open the environment + * in the default read/write mode. All write operations will be + * refused on the client though. + */ + envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + Durability durability = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + envConfig.setDurability(durability); + envConfig.setAllowCreate(true); + } + + /** + * Initializes the Environment, entity store and data accessor used by the + * example. It's invoked when the application is first started up, and + * subsequently, if the environment needs to be re-established due to an + * exception. + * + * @throws InterruptedException + */ + void initialize() + throws InterruptedException { + + /* Initialize the replication handle. */ + repEnv = getEnvironment(); + + /* + * The following two operations -- opening the EntityStore and + * initializing it by calling EntityStore.getPrimaryIndex -- don't + * require an explicit transaction because they use auto-commit + * internally. A specialized RunTransaction class for auto-commit + * could be defined, but for simplicity the RunTransaction class is + * used here and the txn parameter is simply ignored. + */ + new RunTransaction(repEnv, System.out) { + + @Override + public void doTransactionWork(Transaction txn) { + /* Initialize the entity store. */ + store = QuoteUtil.openEntityStore(repEnv, STORE_NAME); + } + + @Override + public void onRetryFailure + (OperationFailureException lastException) { + + /* Restart the initialization process. */ + throw lastException; + } + }.run(false /*readOnly*/); + + new RunTransaction(repEnv, System.out) { + + @Override + public void doTransactionWork(Transaction txn) { + + /* Initialize the data access object. */ + dao = new DataAccessor(store); + } + + @Override + public void onRetryFailure + (OperationFailureException lastException) { + + /* Restart the initialization process. */ + throw lastException; + } + }.run(false /*readOnly*/); + } + + /** + * Implements the "quit" command. Subclasses can override to take + * additional cleanup measures. + */ + public void quit(PrintStream out) { + out.println(repEnv.getNodeName() + " exited."); + } + + /** + * Creates the replicated environment handle and returns it. It will retry + * indefinitely if a master could not be established because a sufficient + * number of nodes were not available, or there were networking issues, + * etc. + * + * @return the newly created replicated environment handle + * + * @throws InterruptedException if the operation was interrupted + */ + ReplicatedEnvironment getEnvironment() + throws InterruptedException { + + DatabaseException exception = null; + + /* + * In this example we retry REP_HANDLE_RETRY_MAX times, but a + * production HA application may retry indefinitely. + */ + for (int i = 0; i < REP_HANDLE_RETRY_MAX; i++) { + try { + return new ReplicatedEnvironment(envHome, + repConfig, + envConfig); + + } catch (UnknownMasterException unknownMaster) { + exception = unknownMaster; + + /* + * Indicates there is a group level problem: insufficient nodes + * for an election, network connectivity issues, etc. Wait and + * retry to allow the problem to be resolved. + */ + System.err.println("Master could not be established. " + + "Exception message:" + + unknownMaster.getMessage() + + " Will retry after 5 seconds."); + Thread.sleep(5 * 1000); + + continue; + } + } + /* Failed despite retries. */ + if (exception != null) { + throw exception; + } + /* Don't expect to get here. */ + throw new IllegalStateException("Failed despite retries"); + } + + /** + * Parse the command line parameters for a replication node and set up + * any configuration parameters. + */ + void parseParams(String[] argv) + throws IllegalArgumentException { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs == 0) { + usage("-env, -nodeName, -nodeHost, and -helperHost are required."); + } + String nodeName = null; + String nodeHost = null; + while (argc < nArgs) { + String thisArg = argv[argc++]; + + if (thisArg.equals("-env")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + usage("-env requires an argument"); + } + } else if (thisArg.equals("-nodeName")) { + /* the node name */ + if (argc < nArgs) { + nodeName = argv[argc++]; + repConfig.setNodeName(nodeName); + } else { + usage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-nodeHost")) { + /* The node hostname, port pair. */ + nodeHost = argv[argc++]; + if (argc <= nArgs) { + repConfig.setNodeHostPort(nodeHost); + } else { + usage("-nodeHost requires an argument"); + } + } else if (thisArg.equals("-helperHost")) { + /* The helper node hostname, port pair. */ + if (argc < nArgs) { + repConfig.setHelperHosts(argv[argc++]); + } else { + usage("-helperHost requires an argument"); + } + } else { + usage("Unknown argument; " + thisArg); + } + } + if (envHome == null) { + usage("-env is a required parameter"); + } + + if (nodeName == null) { + usage("-nodeName is a required parameter"); + } + + if (nodeHost == null) { + usage("-nodeHost is a required parameter"); + } + + /* Helper host can be skipped once a anode has joined a group. */ + repConfig.setGroupName(REP_GROUP_NAME); + } + + /** + * Provides command level usage information. + */ + static String usage() { + StringBuilder builder = new StringBuilder(); + builder.append("Valid commands are:\n"); + builder.append("\tprint - displays stocks in database\n"); + builder.append("\tquit - exit the application\n"); + builder.append("\t - "+ + "inserts this stock/value pair into the database"); + + return builder.toString(); + } + + /** + * Provides invocation usage help information in response to an error + * condition. + * + * @param message an explanation of the condition that provoked the + * display of usage information. + */ + void usage(String message) { + System.out.println(); + System.out.println(message); + System.out.println(); + System.out.print("usage: " + getClass().getName()); + + System.out.println + (" -env -nodeName " + + "-nodeHost -helperHost "); + + System.out.println + ("\t -env the replicated environment directory\n" + + "\t -nodeName the unique name associated with this node\n" + + "\t -nodeHost the hostname and port pair associated with " + + " this node\n" + + "\t -helperHost the hostname and port pair associated with " + + " the helper node\n"); + + System.out.println("All parameters may also be expressed as " + + "properties in a je.properties file."); + System.exit(0); + } + + public static void main(String[] argv) throws Exception { + StockQuotes stockQuotes = new StockQuotes(argv); + stockQuotes.runExample(); + } +} diff --git a/examples/je/rep/quote/StockQuotesRMIForwarding.java b/examples/je/rep/quote/StockQuotesRMIForwarding.java new file mode 100644 index 0000000..c3e23e9 --- /dev/null +++ b/examples/je/rep/quote/StockQuotesRMIForwarding.java @@ -0,0 +1,364 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.PrintStream; +import java.rmi.NotBoundException; +import java.rmi.Remote; +import java.rmi.RemoteException; +import java.rmi.registry.LocateRegistry; +import java.rmi.registry.Registry; +import java.rmi.server.UnicastRemoteObject; +import java.util.Date; + +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +/** + * This example is a small variation on the basic {@link StockQuotes} example. + * Instead of rejecting update requests made at a Replica's console, it + * illustrates how RMI could be used to forward write requests to a Master. The + * example is otherwise identical to StockQuotes and you should + * read the javadoc associated with it before proceeding with this example. The + * discussion that follows thus focusses entirely on the RMI based + * write-forwarding aspects of this example. + *

    + * Each node in this example is an RMI server and hosts an RMI registry. The + * registry contains exactly one binding associated with the name: + * {@link StockQuotesRMIForwarding#RMI_NAME RMI_NAME}. The object associated + * with the RMI binding (an instance of {@link WriteServicesImpl}) makes + * available all the high level database write operations that are part of the + * application. When this node is the Master, + * Replicas will use the remote methods to invoke write operations + * on it. All nodes are RMI servers, but only the current Master + * is actually used to serve write requests while it is in the + * Master state. The Replicas play the role of RMI clients making + * remote method calls to the Master to foward their write requests. + * + *

    + * Please review the javadoc in {@link StockQuotes} for a detailed description + * of the arguments that must be supplied at startup. The only difference is + * that you must use the name of this class when invoking the Java VM. + *

    + * For example, the first node can be started as follows: + * + *

    + * java je.rep.quote.StockQuotesWriteForwarding -env /tmp/stockQuotes1 \
    + *                                               -nodeName n1 \
    + *                                               -nodeHost node.acme.com:5001 \
    + *                                               -helperHost node.acme.com:5001
    + * 
    + *

    + * This instance of the application will therefore use port 5001 for HA, and, + * by convention, port 5101 (5001 + RMI_PORT_DISPLACEMENT) for + * the RMI registry. If you are running on multiple machines you may (depending + * upon your DNS setup) need to specify the + * java.rmi.server.hostname property to ensure that RMI does not + * associate loopback addresses with entries in its registry. + */ +public class StockQuotesRMIForwarding extends StockQuotes { + + /* This name is bound to the WriteServices reference in the registry. */ + public static final String RMI_NAME = "StockQuotes"; + + /* + * The displacement from the node's port to the port on which the + * registry is established. It's just a simple way to find the registry + * port associated with a node based upon it's port. + */ + private static final int RMI_PORT_DISPLACEMENT = 100; + + /* The RMI registry associated with this node. */ + private Registry nodeRegistry = null; + + /* The implementation of the write services made avaialbela this node. */ + private final WriteServices writeServices; + + /* The current master as maintained by the Listener. */ + private volatile MasterInfo masterInfo = new MasterInfo(); + + /** + * The Listener used to react to StateChangeEvents. It maintains the name + * of the current master. + */ + private class Listener implements StateChangeListener { + + /** + * The protocol method used to service StateChangeEvent notifications. + */ + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + + + switch (stateChangeEvent.getState()) { + + case MASTER: + case REPLICA: + String masterName = stateChangeEvent.getMasterNodeName(); + masterInfo = new MasterInfo(masterName); + + System.err.println("Master: " + masterName + + " at " + new Date()); + break; + + default: + masterInfo = new MasterInfo(); + System.err.println("Unknown master. " + + " Node state: " + + stateChangeEvent.getState()); + break; + } + } + } + + /** + * Updates the stock price. Forward the write request, if the node is not + * currently the master, or if the node changes status while a transaction + * is in progress. + * + * @param line the validated command line + * + * @param printStream the output stream + * + * @throws InterruptedException + */ + @Override + void updateStock(final String line, final PrintStream printStream) + throws InterruptedException { + + final Quote quote = QuoteUtil.parseQuote(line); + + if (repEnv.getState().isReplica()) { + forwardStockUpdate(quote); + return; + } + + new RunTransaction(repEnv, printStream) { + + @Override + public void doTransactionWork(Transaction txn) { + dao.quoteById.put(txn, quote); + /* Output local indication of processing. */ + System.out.println(repEnv.getNodeName() + + " processed update request: " + line); + } + + @Override + public void onReplicaWrite(ReplicaWriteException replicaWrite) { + /* Forward to the current master */ + forwardStockUpdate(quote); + } + }.run(false /*readOnly*/); + } + + /** + * An update request on the replica. Forward it to the current master, if + * we have one that's reachable. + * + * @param stockUpdateLine the command to forward + * + * @param printStream the stream used to capture the output from the + * forwarded request + */ + private void forwardStockUpdate(Quote quote) { + try { + + if (masterInfo.name == null) { + System.out.println("Could not update:" + quote.stockSymbol + + " Master is unknown. "); + return; + } + + masterInfo.reference.update(quote); + System.out.println(repEnv.getNodeName() + " forwarded " + + quote.stockSymbol + " update to " + + masterInfo.name); + + } catch (RemoteException e) { + + if (e.getCause() instanceof ReplicaWriteException) { + forwardStockUpdate(quote); + return; + } + System.out.println("Could not connect to master: " + + masterInfo.name + " Exception: " + e); + } + } + + /** + * The constructor. It sets up the RMI registry and binds the remote + * reference. + * + * @see StockQuotes#StockQuotes(String[]) + */ + private StockQuotesRMIForwarding(String[] params) + throws Exception { + + super(params); + + nodeRegistry = LocateRegistry. + createRegistry(repConfig.getNodePort() + RMI_PORT_DISPLACEMENT); + writeServices = new WriteServicesImpl(System.out); + UnicastRemoteObject.exportObject(writeServices, 0); + nodeRegistry.rebind(RMI_NAME, writeServices); + } + + /** + * Sets up the state change listener. + * + * @throws InterruptedException + */ + @Override + void initialize() + throws InterruptedException { + + super.initialize(); + /* Track Master state changes, so we can forward appropriately. */ + repEnv.setStateChangeListener(new Listener()); + } + + /** + * Performs the RMI associated cleanup so that the RMI serve can be + * shutdown cleanly. + */ + @Override + public void quit(PrintStream out) { + super.quit(out); + try { + UnicastRemoteObject.unexportObject(writeServices, true); + nodeRegistry.unbind(RMI_NAME); + UnicastRemoteObject.unexportObject(nodeRegistry,true); + } catch (Exception e) { + throw new IllegalStateException(e); + } + } + + public static void main(String[] argv) + throws Exception { + + StockQuotes stockQuotes = new StockQuotesRMIForwarding(argv); + stockQuotes.runExample(); + } + + /** + * The class supplies the RMI implementation of the write methods. + */ + public class WriteServicesImpl implements WriteServices { + final PrintStream printStream; + + public WriteServicesImpl(PrintStream printStream) { + + super(); + this.printStream = printStream; + } + + private static final long serialVersionUID = 1L; + + /** + * The update operation invoked by a Replica on this Master. + * + *

    Note that this method is executed in an RMI thread and does not + * handle the environment failure level exceptions: + * InsufficientLogException and + * RollbackException exception in order to keep the + * example simple. Production code would handle the exception here and + * coordinate with the main thread of control and other RMI threads to + * take corrective actions and re-estabblish the environment and + * database handles. + */ + public void update(final Quote quote) + throws RemoteException { + + try { + new RunTransaction(repEnv, printStream) { + + @Override + public void doTransactionWork(Transaction txn) { + dao.quoteById.put(txn, quote); + /* Output local indication of processing. */ + System.out.println + (repEnv.getNodeName() + + " processed remote update request. " + + " Stock:" + quote.stockSymbol + + " Price:" + quote.lastTrade); + } + + @Override + public void onReplicaWrite(ReplicaWriteException rwe) { + /* Attempted modification while in the replica state. */ + throw rwe; + } + }.run(false /*readOnly*/); + } catch (InterruptedException e) { + throw new RemoteException("Update for stock:" + + quote.stockSymbol + + " interrupted.", e); + } catch (ReplicaWriteException e) { + String errorMessage = repEnv.getNodeName() + + " is not currently the master. Perform the update at" + + " the node that's currently the master:" + masterInfo.name; + throw new RemoteException("Update for stock:" + + quote.stockSymbol + + " failed. " + errorMessage, e); + } + } + } + + /** + * Internal class used to treat the master name and remote reference as + * a fixed pair. + */ + private class MasterInfo { + /* The node name of the master*/ + final String name; + + /* The remote reference to the Master with the above name */ + final WriteServices reference; + + public MasterInfo(String masterName) { + this.name = masterName; + ReplicationNode masterNode = repEnv.getGroup().getMember(name); + Registry currentMasterRegistry; + try { + currentMasterRegistry = LocateRegistry. + getRegistry(masterNode.getHostName(), + masterNode.getPort() + RMI_PORT_DISPLACEMENT); + reference = (WriteServices)currentMasterRegistry. + lookup(RMI_NAME); + } catch (RemoteException e) { + throw new RuntimeException(e); + } catch (NotBoundException e) { + throw new RuntimeException(e); + } + } + + public MasterInfo() { + name = null; + reference = null; + } + } + + /* Define the remote interface. */ + public interface WriteServices extends Remote { + + /** + * The "write" operation which will update the price associated with + * the Stock. + */ + void update(Quote quote) throws RemoteException; + } +} diff --git a/examples/je/rep/quote/UpdateForwardingStockQuotes.java b/examples/je/rep/quote/UpdateForwardingStockQuotes.java new file mode 100644 index 0000000..d100a59 --- /dev/null +++ b/examples/je/rep/quote/UpdateForwardingStockQuotes.java @@ -0,0 +1,267 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.rep.quote; + +import java.io.IOException; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.util.Date; + +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +/** + * This class is based on {@link RouterDrivenStockQuotes} and illustrates use + * of an HA unaware router (implemented by {@link SimpleRouter}), that load + * balances requests (both read and write) across all the nodes in a + * replication group. This example is meant to illustrate how a load balancer + * appliance might fit into the JE HA architecture, where {@code SimpleRouter} + * plays the role of the load balancer appliance for purposes of the example. + *

    + * Be sure to read the {@link je.rep.quote Example Overview} first to put this + * example into context. + *

    + * The router is unaware of the state (Master or Replica) of each node, or the + * type (read or write) of the request. Nodes use the {@link + * com.sleepycat.je.rep.StateChangeListener StateChangeListener} to track the + * node that is currently the master and redirect write requests to it. That + * is, unlike the {@link RouterDrivenStockQuotes} example, it's the nodes and + * not the router that keeps track of the current master. + *

    + * In this example, unlike StockQuotes, only the + * {@link SimpleRouter} has a console associated with it. It accepts commands + * typed into its console and forwards them as appropriate to the nodes in the + * group. The logic for tracking the Master resides in each node, and is + * supplied by the {@link com.sleepycat.je.rep.StateChangeListener + * StateChangeListener}. + *

    + * Each node, which in this example is an instance of + * UpdateForwardingStockQuotes, establishes a server socket on + * which it can listen for requests from SimpleRouter. Read + * requests are processed directly by the node. Write requests are redirected + * to the current master and the result is communicated back to + * SimpleRouter. + *

    + * The request flow between nodes in this example is shown below. + *

    + * ----------------       Read and Write requests
    + * | SimpleRouter |------------------------------------||
    + * | Instance     |---------------------||             ||
    + * ----------------      ||             ||             ||
    + *                       ||             ||             ||
    + *                       \/             ||             ||
    + * -------------------------------      ||             ||
    + * | UpdateForwardingStockQuotes |      ||             ||
    + * | Instance 1: Master          |      ||             ||
    + * -------------------------------      \/             ||
    + *   /\           -------------------------------      ||
    + *   ||           | UpdateForwardingStockQuotes |      ||
    + *   ||---------- | Instance 2: Replica         |      ||
    + *   || Write     -------------------------------      \/
    + *   || requests                 -------------------------------
    + *   ||                          | UpdateForwardingStockQuotes |
    + *   ||--------------------------| Instance 3: Replica         |
    + *                               -------------------------------
    + *
    + *                                       ...more Replica instances...
    + * 
    + *

    + * This example is intended to be illustrative. It forwards requests as text, + * and receives responses in text form. Actual applications may for example, + * forward HTTP requests, or use some other application level network protocol + * to forward such requests. + *

    + * Please review the javadoc in {@link StockQuotes} for a detailed description + * of the arguments that must be supplied at startup. The only difference is + * that you must use the name of this class when invoking the java vm. + *

    + * For example, the first node can be started as follows: + * + *

    + * java je.rep.quote.UpdateForwardingStockQuotes -env /tmp/stockQuotes1 \
    + *                                               -nodeName n1 \
    + *                                               -nodeHost node.acme.com:5001 \
    + *                                               -helperHost node.acme.com:5001
    + * 
    + *

    + * This instance of the application will therefore use port 5001 for HA, and, + * by convention, port 5101 (5001 + HARouter.APP_PORT_DISPLACEMENT) + * for application messages sent to it. + *

    + * In addition to starting the nodes, you will also need to start the + * {@link SimpleRouter} as described in its javadoc. + * + * @see SimpleRouter + */ +public class UpdateForwardingStockQuotes extends RouterDrivenStockQuotes { + + /* The current master as maintained by the Listener. */ + private volatile String currentmasterName = null; + + /** + * The Listener used to react to StateChangeEvents. It maintains the name + * of the current master. + */ + private class Listener implements StateChangeListener { + + /** + * The protocol method used to service StateChangeEvent notifications. + */ + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + + switch (stateChangeEvent.getState()) { + + case MASTER: + case REPLICA: + currentmasterName = stateChangeEvent.getMasterNodeName(); + System.err.println("Master: " + currentmasterName + + " at " + new Date()); + break; + + default: + currentmasterName = null; + System.err.println("Unknown master. " + + " Node state: " + + stateChangeEvent.getState()); + break; + } + } + } + + /** + * Returns a socket to the UpdateRequestProcessor associated with the + * current master. + * + * @return the socket to the UpdateRequestProcessor associated with the + * master, or null if the node does not know if a current master. + */ + private InetSocketAddress getUpdateRequestProcessorSocket() { + final String master = currentmasterName; + + if (master == null) { + return null; + } + + ReplicationGroup group = repEnv.getGroup(); + + InetSocketAddress nodeSocketAddress = + group.getMember(master).getSocketAddress(); + + return HARouter.getAppSocketAddress(nodeSocketAddress); + } + + /** + * Updates the stock price. Forward the write request, if the node is not + * currently the master, or if the node changes status while a transaction + * is in progress. + * + * @param line the command line + * + * @param printStream the output stream for message results + * + * @throws InterruptedException + */ + @Override + void updateStock(final String line, final PrintStream printStream) + throws InterruptedException { + + if (repEnv.getState().isReplica()) { + forwardStockUpdate(line, printStream); + return; + } + + final Quote quote = QuoteUtil.parseQuote(line); + if (quote == null) { + return; + } + + new RunTransaction(repEnv, printStream) { + + @Override + public void doTransactionWork(Transaction txn) { + dao.quoteById.put(txn, quote); + /* Output local indication of processing. */ + System.out.println(repEnv.getNodeName() + + " processed update request: " + line); + } + + @Override + public void onReplicaWrite(ReplicaWriteException replicaWrite) { + /* Forward to the current master */ + forwardStockUpdate(line, printStream); + } + }.run(false /*readOnly*/); + } + + /** + * + * An update request on the replica. Forward it to the current master, if + * we have one that's reachable. + * + * @param stockUpdateLine the command to forward + * + * @param printStream the stream used to capture the output from the + * forwarded request + */ + void forwardStockUpdate(String stockUpdateLine, PrintStream printStream) { + try { + QuoteUtil.forwardRequest(getUpdateRequestProcessorSocket(), + stockUpdateLine, + printStream); + System.out.println(repEnv.getNodeName() + " forwarded " + + stockUpdateLine + " to " + currentmasterName); + + } catch (IOException e) { + printStream.println("Could not connect to master: " + + currentmasterName + " Exception: " + e); + } + } + + /** + * The constructor. It forwards to the base StockQuotes for initialization + * and sets up the Listener. + * + * @see StockQuotes#StockQuotes(String[]) + */ + private UpdateForwardingStockQuotes(String[] params) + throws Exception { + + super(params); + } + + /** + * Sets up the state change listener. + * + * @throws InterruptedException + */ + @Override + void initialize() + throws InterruptedException { + + super.initialize(); + /* Track Master state changes, so we can forward appropriately. */ + repEnv.setStateChangeListener(new Listener()); + } + + public static void main(String[] argv) + throws Exception { + + StockQuotes stockQuotes = new UpdateForwardingStockQuotes(argv); + stockQuotes.runExample(); + } +} diff --git a/examples/je/rep/quote/package.html b/examples/je/rep/quote/package.html new file mode 100644 index 0000000..88d88fd --- /dev/null +++ b/examples/je/rep/quote/package.html @@ -0,0 +1,71 @@ + + + + + +JE Replication Stock Quote example. + +

    Example Overview

    +This example is a simple but complete demonstration of a replicated +application. The application is a mock stock ticker which stores stock values +in a replicated JE environment. The following commands are accepted: +
      +
    • <stock> <number> : enter this stock price into the +database
    • +
    • print : print all the stocks and current prices held in the database
    • +
    • quit : shut down
    • +
    +

    +There are three versions of the example which illustrate different application +designs and aspects of JE functionality. Please be sure to walk through the +three examples in the order listed below, since the information in one example +builds on the one before it. The javadoc description for each class describes +the example and explains how to run it. More detailed information is found in +the example source code. +

      +
    1. {@link je.rep.quote.StockQuotes StockQuotes}: This example is the most +basic demonstration of a replicated application. It's intended to help gain an +understanding of basic HA concepts and demonstrate use of the HA APIs to create +a replicated environment and issue read and write transactions. +

    2. +
    3. {@link je.rep.quote.RouterDrivenStockQuotes RouterDrivenStockQuotes} and +{@link je.rep.quote.HARouter HARouter}: This example is based on {@code +StockQuotes} and illustrates use of an HA-aware router (implemented by {@code +HARouter}), in conjunction with the {@link com.sleepycat.je.rep.monitor.Monitor +Monitor} class, to direct application requests, based upon the type of request +(read or write) and the state (Master or Replica) of a node in the replication +group. This example is meant to illustrate how a software load balancer might +be integrated with JE HA, where {@code HARouter} plays the role of the load +balancer for purposes of the example. +

    4. +
    5. {@link je.rep.quote.UpdateForwardingStockQuotes +UpdateForwardingStockQuotes} and {@link je.rep.quote.SimpleRouter +SimpleRouter}: This example is based on {@code RouterDrivenStockQuotes} and +illustrates use of an HA unaware router (implemented by {@code SimpleRouter}), +that load balances requests (both read and write) across all the nodes in a +replication group. This example is meant to illustrate how a load balancer +appliance might fit into the JE HA architecture, where {@code SimpleRouter} +plays the role of the load balancer appliance for purposes of the example. +
    6. +
    +Disclaimer: This example is intended to be illustrative. The example +is single threaded, while actual applications may be multithreaded. The +example forwards requests as text and receives responses in text form, while +actual applications may for example, forward HTTP requests, or use some other +application level network protocol to forward such requests. The example opens +and closes a socket to send each request, while actual applications will +typically use a connection management facility. +

    +The example +{@link je.rep.quote.StockQuotesRMIForwarding StockQuotesRMIForwarding}, +a minor variation to the basic {@link je.rep.quote.StockQuotes StockQuotes} +example is also included in this package. It's intended to help illustrate how +RMI could be used to forward write requests from a Replica to the Master. +

    + + diff --git a/examples/je/txn/DBWriter.java b/examples/je/txn/DBWriter.java new file mode 100644 index 0000000..9645b3b --- /dev/null +++ b/examples/je/txn/DBWriter.java @@ -0,0 +1,180 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.txn; + +import java.util.Random; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; + +public class DBWriter extends Thread +{ + private Database myDb = null; + private Environment myEnv = null; + private EntryBinding dataBinding = null; + private Random generator = new Random(); + + private static int MAX_RETRY = 20; + + private static String[] keys = {"key 1", "key 2", "key 3", + "key 4", "key 5", "key 6", + "key 7", "key 8", "key 9", + "key 10"}; + + // Constructor. Get our DB handles from here + DBWriter(Environment env, Database db, StoredClassCatalog scc) { + myDb = db; + myEnv = env; + dataBinding = new SerialBinding(scc, PayloadData.class); + } + + // Thread method that writes a series of records + // to the database using transaction protection. + // Deadlock handling is demonstrated here. + public void run () { + Transaction txn = null; + + // Perform 50 transactions + for (int i=0; i<50; i++) { + + boolean retry = true; + int retry_count = 0; + // while loop is used for deadlock retries + while (retry) { + // try block used for deadlock detection and + // general db exception handling + try { + + // Get a transaction + txn = myEnv.beginTransaction(null, null); + + // Write 10 records to the db + // for each transaction + for (int j = 0; j < 10; j++) { + // Get the key + DatabaseEntry key = new DatabaseEntry(); + StringBinding.stringToEntry(keys[j], key); + + // Get the data + PayloadData pd = new PayloadData(i+j, getName(), + generator.nextDouble()); + DatabaseEntry data = new DatabaseEntry(); + dataBinding.objectToEntry(pd, data); + + // Do the put + myDb.put(txn, key, data); + } + + // commit + System.out.println(getName() + " : committing txn : " + i); + + System.out.println(getName() + " : Found " + + countRecords(null) + " records in the database."); + try { + txn.commit(); + txn = null; + } catch (DatabaseException e) { + System.err.println("Error on txn commit: " + + e.toString()); + } + retry = false; + + } catch (LockConflictException de) { + System.out.println("################# " + getName() + + " : caught deadlock"); + // retry if necessary + if (retry_count < MAX_RETRY) { + System.err.println(getName() + + " : Retrying operation."); + retry = true; + retry_count++; + } else { + System.err.println(getName() + + " : out of retries. Giving up."); + retry = false; + } + } catch (DatabaseException e) { + // abort and don't retry + retry = false; + System.err.println(getName() + + " : caught exception: " + e.toString()); + e.printStackTrace(); + } finally { + if (txn != null) { + try { + txn.abort(); + } catch (Exception e) { + System.err.println("Error aborting transaction: " + + e.toString()); + e.printStackTrace(); + } + } + } + } + } + } + + // This simply counts the number of records contained in the + // database and returns the result. You can use this method + // in three ways: + // + // First call it with an active txn handle. + // Secondly, configure the cursor for dirty reads + // Third, call count_records AFTER the writer has committed + // its transaction. + // + // If you do none of these things, the writer thread will + // self-deadlock. + // + // Note that this method exists only for illustrative purposes. + // A more straight-forward way to count the number of records in + // a database is to use the Database.getStats() method. + private int countRecords(Transaction txn) throws DatabaseException { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + int count = 0; + Cursor cursor = null; + + try { + // Get the cursor + CursorConfig cc = new CursorConfig(); + cc.setReadUncommitted(true); + cursor = myDb.openCursor(txn, cc); + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + count++; + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + return count; + + } +} diff --git a/examples/je/txn/PayloadData.java b/examples/je/txn/PayloadData.java new file mode 100644 index 0000000..68ec3d5 --- /dev/null +++ b/examples/je/txn/PayloadData.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package je.txn; + +import java.io.Serializable; + +public class PayloadData implements Serializable { + private int oID; + private String threadName; + private double doubleData; + + PayloadData(int id, String name, double data) { + oID = id; + threadName = name; + doubleData = data; + } + + public double getDoubleData() { return doubleData; } + public int getID() { return oID; } + public String getThreadName() { return threadName; } +} diff --git a/examples/je/txn/TxnGuide.java b/examples/je/txn/TxnGuide.java new file mode 100644 index 0000000..542e38f --- /dev/null +++ b/examples/je/txn/TxnGuide.java @@ -0,0 +1,161 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// File TxnGuide.java + +package je.txn; + +import java.io.File; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +public class TxnGuide { + + private static String myEnvPath = "./"; + private static String dbName = "mydb.db"; + private static String cdbName = "myclassdb.db"; + + // DB handles + private static Database myDb = null; + private static Database myClassDb = null; + private static Environment myEnv = null; + + private static int NUMTHREADS = 5; + + private static void usage() { + System.out.println("TxnGuide [-h ]"); + System.exit(-1); + } + + public static void main(String args[]) { + try { + // Parse the arguments list + parseArgs(args); + // Open the environment and databases + openEnv(); + // Get our class catalog (used to serialize objects) + StoredClassCatalog classCatalog = + new StoredClassCatalog(myClassDb); + + // Start the threads + DBWriter[] threadArray; + threadArray = new DBWriter[NUMTHREADS]; + for (int i = 0; i < NUMTHREADS; i++) { + threadArray[i] = new DBWriter(myEnv, myDb, classCatalog); + threadArray[i].start(); + } + + for (int i = 0; i < NUMTHREADS; i++) { + threadArray[i].join(); + } + } catch (Exception e) { + System.err.println("TxnGuide: " + e.toString()); + e.printStackTrace(); + } finally { + closeEnv(); + } + System.out.println("All done."); + } + + private static void openEnv() throws DatabaseException { + System.out.println("opening env"); + + // Set up the environment. + EnvironmentConfig myEnvConfig = new EnvironmentConfig(); + myEnvConfig.setAllowCreate(true); + myEnvConfig.setTransactional(true); + // Environment handles are free-threaded in JE, + // so we do not have to do anything to cause the + // environment handle to be free-threaded. + + // Set up the database + DatabaseConfig myDbConfig = new DatabaseConfig(); + myDbConfig.setAllowCreate(true); + myDbConfig.setTransactional(true); + myDbConfig.setSortedDuplicates(true); + // no DatabaseConfig.setThreaded() method available. + // db handles in java are free-threaded so long as the + // env is also free-threaded. + + // Open the environment + myEnv = new Environment(new File(myEnvPath), // Env home + myEnvConfig); + + // Open the database. Do not provide a txn handle. This open + // is autocommitted because DatabaseConfig.setTransactional() + // is true. + myDb = myEnv.openDatabase(null, // txn handle + dbName, // Database file name + myDbConfig); + + // Used by the bind API for serializing objects + // Class database must not support duplicates + myDbConfig.setSortedDuplicates(false); + myClassDb = myEnv.openDatabase(null, // txn handle + cdbName, // Database file name + myDbConfig); + } + + private static void closeEnv() { + System.out.println("Closing env and databases"); + if (myDb != null ) { + try { + myDb.close(); + } catch (DatabaseException e) { + System.err.println("closeEnv: myDb: " + + e.toString()); + e.printStackTrace(); + } + } + + if (myClassDb != null ) { + try { + myClassDb.close(); + } catch (DatabaseException e) { + System.err.println("closeEnv: myClassDb: " + + e.toString()); + e.printStackTrace(); + } + } + + if (myEnv != null ) { + try { + myEnv.close(); + } catch (DatabaseException e) { + System.err.println("closeEnv: " + e.toString()); + e.printStackTrace(); + } + } + } + + private TxnGuide() {} + + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + myEnvPath = new String(args[++i]); + break; + default: + usage(); + } + } + } + } +} diff --git a/examples/jmx/JEApplicationMBean.java b/examples/jmx/JEApplicationMBean.java new file mode 100644 index 0000000..457339b --- /dev/null +++ b/examples/jmx/JEApplicationMBean.java @@ -0,0 +1,329 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package jmx; + +import java.io.File; +import java.lang.reflect.Constructor; +import java.util.List; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanConstructorInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanNotificationInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.jmx.JEMBeanHelper; + +/** + * JEApplicationMBean is an example of how a JE application can incorporate JE + * monitoring into its existing MBean. It may be installed as is, or used as a + * starting point for building a MBean which includes JE support. + *

    + * JE management is divided between the JEApplicationMBean class and + * JEMBeanHelper class. JEApplicationMBean contains an instance of + * JEMBeanHelper, which knows about JE attributes, operations and + * notifications. JEApplicationMBean itself has the responsibility of + * configuring, opening and closing the JE environment along with any other + * resources used by the application, and maintains a + * com.sleepycat.je.Environment handle. + *

    + * The approach taken for accessing the environment is an application specific + * choice. Some of the salient considerations are: + *

      + *
    • Applications may open one or many Environment objects per process + * against a given environment.
    • + * + *
    • All Environment handles reference the same underlying JE environment + * implementation object.
    • + + *
    • The first Environment object instantiated in the process does the real + * work of configuring and opening the environment. Follow-on instantiations of + * Environment merely increment a reference count. Likewise, + * Environment.close() only does real work when it's called by the last + * Environment object in the process.
    • + *
    + *

    + * Another MBean approach for environment access can be seen in + * com.sleepycat.je.jmx.JEMonitor. That MBean does not take responsibility for + * opening and closing environments, and can only operate against already-open + * environments. + */ + +public class JEApplicationMBean implements DynamicMBean { + + private static final String DESCRIPTION = + "A MBean for an application which uses JE. Provides open and close " + + "operations which configure and open a JE environment as part of the "+ + "applications's resources. Also supports general JE monitoring."; + + private MBeanInfo mbeanInfo; // this MBean's visible interface. + private JEMBeanHelper jeHelper; // gets JE management interface + private Environment targetEnv; // saved environment handle + + /** + * This MBean provides an open operation to open the JE environment. + */ + public static final String OP_OPEN = "openJE"; + + /** + * This MBean provides a close operation to release the JE environment. + * Note that environments must be closed to release resources. + */ + public static final String OP_CLOSE = "closeJE"; + + /** + * Instantiate a JEApplicationMBean + * + * @param environmentHome home directory of the target JE environment. + */ + public JEApplicationMBean(String environmentHome) { + + File environmentDirectory = new File(environmentHome); + jeHelper = new JEMBeanHelper(environmentDirectory, true); + resetMBeanInfo(); + } + + /** + * @see DynamicMBean#getAttribute + */ + public Object getAttribute(String attributeName) + throws AttributeNotFoundException, + MBeanException { + + return jeHelper.getAttribute(targetEnv, attributeName); + } + + /** + * @see DynamicMBean#setAttribute + */ + public void setAttribute(Attribute attribute) + throws AttributeNotFoundException, + InvalidAttributeValueException { + + jeHelper.setAttribute(targetEnv, attribute); + } + + /** + * @see DynamicMBean#getAttributes + */ + public AttributeList getAttributes(String[] attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attributes cannot be null"); + } + + /* Get each requested attribute. */ + AttributeList results = new AttributeList(); + for (int i = 0; i < attributes.length; i++) { + try { + String name = attributes[i]; + Object value = jeHelper.getAttribute(targetEnv, name); + results.add(new Attribute(name, value)); + } catch (Exception e) { + e.printStackTrace(); + } + } + return results; + } + + /** + * @see DynamicMBean#setAttributes + */ + public AttributeList setAttributes(AttributeList attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("attribute list can't be null"); + } + + /* Set each attribute specified. */ + AttributeList results = new AttributeList(); + for (int i = 0; i < attributes.size(); i++) { + Attribute attr = (Attribute) attributes.get(i); + try { + /* Set new value. */ + jeHelper.setAttribute(targetEnv, attr); + + /* + * Add the name and new value to the result list. Be sure + * to ask the MBean for the new value, rather than simply + * using attr.getValue(), because the new value may not + * be same if it is modified according to the JE + * implementation. + */ + String name = attr.getName(); + Object newValue = jeHelper.getAttribute(targetEnv, name); + results.add(new Attribute(name, newValue)); + } catch (Exception e) { + e.printStackTrace(); + } + } + return results; + } + + /** + * @see DynamicMBean#invoke + */ + public Object invoke(String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + Object result = null; + + if (actionName == null) { + throw new IllegalArgumentException("actionName cannot be null"); + } + + if (actionName.equals(OP_OPEN)) { + openEnvironment(); + return null; + } else if (actionName.equals(OP_CLOSE)) { + closeEnvironment(); + return null; + } else { + result = jeHelper.invoke(targetEnv, actionName, params, signature); + } + + return result; + } + + /** + * @see DynamicMBean#getMBeanInfo + */ + public MBeanInfo getMBeanInfo() { + return mbeanInfo; + } + + /** + * Create the available management interface for this environment. + * The attributes and operations available vary according to + * environment configuration. + * + */ + private synchronized void resetMBeanInfo() { + + /* + * Get JE attributes, operation and notification information + * from JEMBeanHelper. An application may choose to add functionality + * of its own when constructing the MBeanInfo. + */ + + /* Attributes. */ + List attributeList = jeHelper.getAttributeList(targetEnv); + MBeanAttributeInfo[] attributeInfo = + new MBeanAttributeInfo[attributeList.size()]; + attributeList.toArray(attributeInfo); + + /* Constructors. */ + Constructor[] constructors = this.getClass().getConstructors(); + MBeanConstructorInfo[] constructorInfo = + new MBeanConstructorInfo[constructors.length]; + for (int i = 0; i < constructors.length; i++) { + constructorInfo[i] = + new MBeanConstructorInfo(this.getClass().getName(), + constructors[i]); + } + + /* Operations. */ + + /* + * Get the list of operations available from the jeHelper. Then add + * an open and close operation. + */ + List operationList = jeHelper.getOperationList(targetEnv); + if (targetEnv == null) { + operationList.add( + new MBeanOperationInfo(OP_OPEN, + "Configure and open the JE environment.", + new MBeanParameterInfo[0], // no params + "java.lang.Boolean", + MBeanOperationInfo.ACTION_INFO)); + } else { + operationList.add( + new MBeanOperationInfo(OP_CLOSE, + "Close the JE environment.", + new MBeanParameterInfo[0], // no params + "void", + MBeanOperationInfo.ACTION_INFO)); + } + + MBeanOperationInfo[] operationInfo = + new MBeanOperationInfo[operationList.size()]; + operationList.toArray(operationInfo); + + /* Notifications. */ + MBeanNotificationInfo[] notificationInfo = + jeHelper.getNotificationInfo(targetEnv); + + /* Generate the MBean description. */ + mbeanInfo = new MBeanInfo(this.getClass().getName(), + DESCRIPTION, + attributeInfo, + constructorInfo, + operationInfo, + notificationInfo); + } + + /** + * Open a JE environment using the configuration specified through + * MBean attributes and recorded within the JEMBeanHelper. + */ + private void openEnvironment() + throws MBeanException { + + try { + if (targetEnv == null) { + /* + * The environment configuration has been set through + * mbean attributes managed by the JEMBeanHelper. + */ + targetEnv = + new Environment(jeHelper.getEnvironmentHome(), + jeHelper.getEnvironmentOpenConfig()); + resetMBeanInfo(); + } + } catch (DatabaseException e) { + throw new MBeanException(e); + } + } + + /** + * Release the environment handle contained within the MBean to properly + * release resources. + */ + private void closeEnvironment() + throws MBeanException { + + try { + if (targetEnv != null) { + targetEnv.close(); + targetEnv = null; + resetMBeanInfo(); + } + } catch (DatabaseException e) { + throw new MBeanException(e); + } + } +} diff --git a/examples/jmx/README.txt b/examples/jmx/README.txt new file mode 100644 index 0000000..17fbdcc --- /dev/null +++ b/examples/jmx/README.txt @@ -0,0 +1,47 @@ +JE provides a fully functional JMX MBean in com.sleepycat.je.jmx.JEMonitor. +To use this MBean, build and deploy jejmx.jar: + + 1. cd + 2. modify /build.properties and set j2ee.jarfile to an + appropriate J2EE jar. + 3. ant jmx + +This builds a jejmx.jar in /build/lib which contains the +MBean. A sample JBoss service descriptor can be found in +je-jboss-service.xml in this directory. The MBean can be deployed +by modifying the service file to point to a JE environment, and +then copying the service file, jejmx.jar, and je.jar to the JBoss +deployment directory. + +JEMonitor expects another component in the JVM to configure and open +the JE environment; it will only access a JE environment that is +already active. It is intended for these use cases: + +- The application wants to add database monitoring with minimal effort and + little knowledge of JMX. Configuring JEMonitor within the JMX container + provides monitoring without requiring application code changes. + +- An application already supports JMX and wants to add database monitoring + without modifying its existing MBean. The user can configure JEMonitor in + the JMX container in conjunction with other application MBeans that are + non-overlapping with JE monitoring. No application code changes are + required. + +Users may want to incorporate JE management functionality into their +own MBeans, expecially if their application configures and opens the +JE environment. This can be done by using the utility class +com.sleepycat.je.jmx.JEMBeanHelper and an example implementation, +com.sleepycat.je.JEApplicationMBean which is provided in this +directory. This MBean differs from JEMonitor by supporting environment +configuration and creation from within the MBean. JEApplicationMBean +may be deployed, or used as a starting point for an alternate +implementation. To build the example, + + 1. cd + 2. modify /build.properties and set j2ee.jarfile to an + appropriate J2EE jar. + 3. ant jmx-examples + +This creates a jejmx-example.jar in /build/lib that can be +copied to the appropriate deployment directory. See the +je-jboss-service.xml file for an example of how this might be done for JBoss. diff --git a/examples/jmx/je-jboss-service.xml b/examples/jmx/je-jboss-service.xml new file mode 100644 index 0000000..9bd3ec3 --- /dev/null +++ b/examples/jmx/je-jboss-service.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/persist/CustomKeyOrderExample.java b/examples/persist/CustomKeyOrderExample.java new file mode 100644 index 0000000..16ac310 --- /dev/null +++ b/examples/persist/CustomKeyOrderExample.java @@ -0,0 +1,128 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; + +public class CustomKeyOrderExample { + + @Entity + static class Person { + + @PrimaryKey + ReverseOrder name; + + Person(String name) { + this.name = new ReverseOrder(name); + } + + private Person() {} // For deserialization + + @Override + public String toString() { + return name.value; + } + } + + @Persistent + static class ReverseOrder implements Comparable { + + @KeyField(1) + String value; + + ReverseOrder(String value) { + this.value = value; + } + + private ReverseOrder() {} // For deserialization + + public int compareTo(ReverseOrder o) { + return o.value.compareTo(value); + } + } + + public static void main(String[] args) + throws DatabaseException { + + if (args.length != 2 || !"-h".equals(args[0])) { + System.err.println + ("Usage: java " + CustomKeyOrderExample.class.getName() + + " -h "); + System.exit(2); + } + CustomKeyOrderExample example = + new CustomKeyOrderExample(new File(args[1])); + example.run(); + example.close(); + } + + private Environment env; + private EntityStore store; + + private CustomKeyOrderExample(File envHome) + throws DatabaseException { + + /* Open a transactional Berkeley DB engine environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + /* Open a transactional entity store. */ + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, "TestStore", storeConfig); + } + + private void run() + throws DatabaseException { + + PrimaryIndex index = + store.getPrimaryIndex(ReverseOrder.class, Person.class); + + index.put(new Person("Andy")); + index.put(new Person("Lisa")); + index.put(new Person("Zola")); + + /* Print the entities in key order. */ + EntityCursor people = index.entities(); + try { + for (Person person : people) { + System.out.println(person); + } + } finally { + people.close(); + } + } + + private void close() + throws DatabaseException { + + store.close(); + env.close(); + } +} diff --git a/examples/persist/DplDump.java b/examples/persist/DplDump.java new file mode 100644 index 0000000..1d8ce60 --- /dev/null +++ b/examples/persist/DplDump.java @@ -0,0 +1,166 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.IndexNotAvailableException; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.persist.raw.RawType; + +/** + * Dumps a store or all stores to standard output in raw XML format. This + * sample is intended to be modifed to dump in application specific ways. + * @see #usage + */ +public class DplDump { + + private File envHome; + private String storeName; + private boolean dumpMetadata; + private Environment env; + + public static void main(String[] args) { + try { + DplDump dump = new DplDump(args); + dump.open(); + dump.execute(); + dump.close(); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + private DplDump(String[] args) { + + for (int i = 0; i < args.length; i += 1) { + String name = args[i]; + String val = null; + if (i < args.length - 1 && !args[i + 1].startsWith("-")) { + i += 1; + val = args[i]; + } + if (name.equals("-h")) { + if (val == null) { + usage("No value after -h"); + } + envHome = new File(val); + } else if (name.equals("-s")) { + if (val == null) { + usage("No value after -s"); + } + storeName = val; + } else if (name.equals("-meta")) { + dumpMetadata = true; + } else { + usage("Unknown arg: " + name); + } + } + + if (envHome == null) { + usage("-h not specified"); + } + } + + private void usage(String msg) { + + if (msg != null) { + System.out.println(msg); + } + + System.out.println + ("usage:" + + "\njava " + DplDump.class.getName() + + "\n -h " + + "\n # Environment home directory" + + "\n [-meta]" + + "\n # Dump metadata; default: false" + + "\n [-s ]" + + "\n # Store to dump; default: dump all stores"); + + System.exit(2); + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + env = new Environment(envHome, envConfig); + } + + private void close() + throws DatabaseException { + + env.close(); + } + + private void execute() + throws DatabaseException { + + if (storeName != null) { + dump(); + } else { + for (String name : EntityStore.getStoreNames(env)) { + storeName = name; + dump(); + } + } + } + + private void dump() + throws DatabaseException { + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setReadOnly(true); + RawStore store = new RawStore(env, storeName, storeConfig); + + EntityModel model = store.getModel(); + if (dumpMetadata) { + for (RawType type : model.getAllRawTypes()) { + System.out.println(type); + } + } else { + for (String clsName : model.getKnownClasses()) { + if (model.getEntityMetadata(clsName) != null) { + final PrimaryIndex index; + try { + index = store.getPrimaryIndex(clsName); + } catch (IndexNotAvailableException e) { + System.err.println("Skipping primary index that is " + + "not yet available: " + clsName); + continue; + } + EntityCursor entities = index.entities(); + for (RawObject entity : entities) { + System.out.println(entity); + } + entities.close(); + } + } + } + + store.close(); + } +} diff --git a/examples/persist/EventExample.java b/examples/persist/EventExample.java new file mode 100644 index 0000000..14c65fa --- /dev/null +++ b/examples/persist/EventExample.java @@ -0,0 +1,418 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist; + +import java.io.File; +import java.io.Serializable; +import java.util.Calendar; +import java.util.Date; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; + +/** + * EventExample is a trivial example which stores Java objects that represent + * an event. Events are primarily indexed by a timestamp, but have other + * attributes, such as price, account reps, customer name and quantity. + * Some of those other attributes are indexed. + *

    + * The example simply shows the creation of a JE environment and database, + * inserting some events, and retrieving the events. + *

    + * This example is meant to be paired with its twin, EventExampleDPL.java. + * EventExample.java and EventExampleDPL.java perform the same functionality, + * but use the Base API and the Direct Persistence Layer API, respectively. + * This may be a useful way to compare the two APIs. + *

    + * To run the example: + *

    + * cd jehome/examples
    + * javac je/EventExample.java
    + * java -cp "../lib/je.jar;." je.EventExample -h 
    + * 
    + */ +public class EventExample { + + /* + * The Event class embodies our example event and is the application + * data. JE data records are represented at key/data tuples. In this + * example, the key portion of the record is the event time, and the data + * portion is the Event instance. + */ + @SuppressWarnings("serial") + static class Event implements Serializable { + + /* This example will add secondary indices on price and accountReps. */ + private final int price; + private final Set accountReps; + + private final String customerName; + private int quantity; + + Event(int price, + String customerName) { + + this.price = price; + this.customerName = customerName; + this.accountReps = new HashSet(); + } + + void addRep(String rep) { + accountReps.add(rep); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(" price=").append(price); + sb.append(" customerName=").append(customerName); + sb.append(" reps="); + if (accountReps.size() == 0) { + sb.append("none"); + } else { + for (String rep: accountReps) { + sb.append(rep).append(" "); + } + } + return sb.toString(); + } + + int getPrice() { + return price; + } + } + + /* A JE environment is roughly equivalent to a relational database. */ + private final Environment env; + + /* + * A JE table is roughly equivalent to a relational table with a + * primary index. + */ + private Database eventDb; + + /* A secondary database indexes an additional field of the data record */ + private SecondaryDatabase eventByPriceDb; + + /* + * The catalogs and bindings are used to convert Java objects to the byte + * array format used by JE key/data in the base API. The Direct Persistence + * Layer API supports Java objects as arguments directly. + */ + private Database catalogDb; + private EntryBinding eventBinding; + + /* Used for generating example data. */ + private final Calendar cal; + + /* + * First manually make a directory to house the JE environment. + * Usage: java -cp je.jar EventExample -h + * All JE on-disk storage is held within envHome. + */ + public static void main(String[] args) + throws DatabaseException { + + if (args.length != 2 || !"-h".equals(args[0])) { + System.err.println + ("Usage: java " + EventExample.class.getName() + + " -h "); + System.exit(2); + } + EventExample example = new EventExample(new File(args[1])); + example.run(); + example.close(); + } + + private EventExample(File envHome) + throws DatabaseException { + + /* Open a transactional Berkeley DB engine environment. */ + System.out.println("-> Creating a JE environment"); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + init(); + cal = Calendar.getInstance(); + } + + /** + * Create all primary and secondary indices. + */ + private void init() + throws DatabaseException { + + System.out.println("-> Creating a JE database"); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + eventDb = env.openDatabase(null, // use auto-commit txn + "eventDb", // database name + dbConfig); + + /* + * In our example, the database record is composed of a key portion + * which represents the event timestamp, and a data portion holds an + * instance of the Event class. + * + * JE's base API accepts and returns key and data as byte arrays, so we + * need some support for marshaling between objects and byte arrays. We + * call this binding, and supply a package of helper classes to support + * this. It's entirely possible to do all binding on your own. + * + * A class catalog database is needed for storing class descriptions + * for the serial binding used below. This avoids storing class + * descriptions redundantly in each record. + */ + DatabaseConfig catalogConfig = new DatabaseConfig(); + catalogConfig.setTransactional(true); + catalogConfig.setAllowCreate(true); + catalogDb = env.openDatabase(null, "catalogDb", catalogConfig); + StoredClassCatalog catalog = new StoredClassCatalog(catalogDb); + + /* + * Create a serial binding for Event data objects. Serial + * bindings can be used to store any Serializable object. + * We can use some pre-defined binding classes to convert + * primitives like the long key value to the a byte array. + */ + eventBinding = new SerialBinding(catalog, Event.class); + + /* + * Open a secondary database to allow accessing the primary + * database a secondary key value. In this case, access events + * by price. + */ + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new PriceKeyCreator(eventBinding)); + eventByPriceDb = env.openSecondaryDatabase(null, + "priceDb", + eventDb, + secConfig); + + } + + private void run() + throws DatabaseException { + + Random rand = new Random(); + + /* DatabaseEntry represents the key and data of each record */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * Create a set of events. Each insertion is a separate, auto-commit + * transaction. + */ + System.out.println("-> Inserting 4 events"); + LongBinding.longToEntry(makeDate(1), key); + eventBinding.objectToEntry(new Event(100, "Company_A"), + data); + eventDb.put(null, key, data); + + LongBinding.longToEntry(makeDate(2), key); + eventBinding.objectToEntry(new Event(2, "Company_B"), + data); + eventDb.put(null, key, data); + + LongBinding.longToEntry(makeDate(3), key); + eventBinding.objectToEntry(new Event(20, "Company_C"), + data); + eventDb.put(null, key, data); + + LongBinding.longToEntry(makeDate(4), key); + eventBinding.objectToEntry(new Event(40, "CompanyD"), + data); + eventDb.put(null, key, data); + + /* Load a whole set of events transactionally. */ + Transaction txn = env.beginTransaction(null, null); + int maxPrice = 50; + System.out.println("-> Inserting some randomly generated events"); + for (int i = 0; i < 25; i++) { + long time = makeDate(rand.nextInt(365)); + Event e = new Event(rand.nextInt(maxPrice),"Company_X"); + if ((i%2) ==0) { + e.addRep("Jane"); + e.addRep("Nikunj"); + } else { + e.addRep("Yongmin"); + } + LongBinding.longToEntry(time, key); + eventBinding.objectToEntry(e, data); + eventDb.put(txn, key, data); + } + txn.commitWriteNoSync(); + + /* + * Windows of events - display the events between June 1 and Aug 31 + */ + System.out.println("\n-> Display the events between June 1 and Aug 31"); + long endDate = makeDate(Calendar.AUGUST, 31); + + /* Position the cursor and print the first event. */ + Cursor eventWindow = eventDb.openCursor(null, null); + LongBinding.longToEntry(makeDate(Calendar.JUNE, 1), key); + + if ((eventWindow.getSearchKeyRange(key, data, null)) != + OperationStatus.SUCCESS) { + System.out.println("No events found!"); + eventWindow.close(); + return; + } + try { + printEvents(key, data, eventWindow, endDate); + } finally { + eventWindow.close(); + } + + /* + * Display all events, ordered by a secondary index on price. + */ + System.out.println("\n-> Display all events, ordered by price"); + SecondaryCursor priceCursor = + eventByPriceDb.openSecondaryCursor(null, null); + try { + printEvents(priceCursor); + } finally { + priceCursor.close(); + } + } + + private void close() + throws DatabaseException { + + eventByPriceDb.close(); + eventDb.close(); + catalogDb.close(); + env.close(); + } + + /** + * Print all events covered by this cursor up to the end date. We know + * that the cursor operates on long keys and Event data items, but there's + * no type-safe way of expressing that within the JE base API. + */ + private void printEvents(DatabaseEntry firstKey, + DatabaseEntry firstData, + Cursor cursor, + long endDate) + throws DatabaseException { + + System.out.println("time=" + + new Date(LongBinding.entryToLong(firstKey)) + + eventBinding.entryToObject(firstData)); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + if (LongBinding.entryToLong(key) > endDate) { + break; + } + System.out.println("time=" + + new Date(LongBinding.entryToLong(key)) + + eventBinding.entryToObject(data)); + } + } + + private void printEvents(SecondaryCursor cursor) + throws DatabaseException { + DatabaseEntry timeKey = new DatabaseEntry(); + DatabaseEntry priceKey = new DatabaseEntry(); + DatabaseEntry eventData = new DatabaseEntry(); + + while (cursor.getNext(priceKey, timeKey, eventData, null) == + OperationStatus.SUCCESS) { + System.out.println("time=" + + new Date(LongBinding.entryToLong(timeKey)) + + eventBinding.entryToObject(eventData)); + } + } + + /** + * Little utility for making up java.util.Dates for different days, just + * to generate test data. + */ + private long makeDate(int day) { + + cal.set((Calendar.DAY_OF_YEAR), day); + return cal.getTime().getTime(); + } + /** + * Little utility for making up java.util.Dates for different days, just + * to make the test data easier to read. + */ + private long makeDate(int month, int day) { + + cal.set((Calendar.MONTH), month); + cal.set((Calendar.DAY_OF_MONTH), day); + return cal.getTime().getTime(); + } + + /** + * A key creator that knows how to extract the secondary key from the data + * entry of the primary database. To do so, it uses both the dataBinding + * of the primary database and the secKeyBinding. + */ + private static class PriceKeyCreator implements SecondaryKeyCreator { + + private final EntryBinding dataBinding; + + PriceKeyCreator(EntryBinding eventBinding) { + this.dataBinding = eventBinding; + } + + public boolean createSecondaryKey(SecondaryDatabase secondaryDb, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry, + DatabaseEntry resultEntry) { + + /* + * Convert the data entry to an Event object, extract the secondary + * key value from it, and then convert it to the resulting + * secondary key entry. + */ + Event e = (Event) dataBinding.entryToObject(dataEntry); + int price = e.getPrice(); + IntegerBinding.intToEntry(price, resultEntry); + return true; + } + } +} diff --git a/examples/persist/EventExampleDPL.java b/examples/persist/EventExampleDPL.java new file mode 100644 index 0000000..c35dcd5 --- /dev/null +++ b/examples/persist/EventExampleDPL.java @@ -0,0 +1,275 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.io.File; +import java.util.Calendar; +import java.util.Date; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * EventExampleDPL is a trivial example which stores Java objects that + * represent an event. Events are primarily indexed by a timestamp, but have + * other attributes, such as price, account reps, customer name and + * quantity. Some of those other attributes are indexed. + *

    + * The example simply shows the creation of a BDB environment and database, + * inserting some events, and retrieving the events using the Direct + * Persistence layer. + *

    + * This example is meant to be paired with its twin, EventExample.java. + * EventExample.java and EventExampleDPL.java perform the same functionality, + * but use the Base API and the Direct Persistence Layer API, respectively. + * This may be a useful way to compare the two APIs. + *

    + * To run the example: + *

    + * javac EventExampleDPL.java
    + * java EventExampleDPL -h 
    + * 
    + */ +public class EventExampleDPL { + + /* + * The Event class embodies our example event and is the application + * data. The @Entity annotation indicates that this class defines the + * objects stored in a BDB database. + */ + @Entity + static class Event { + + @PrimaryKey + private Date time; + + @SecondaryKey(relate=MANY_TO_ONE) + private int price; + + private Set accountReps; + + private String customerName; + private int quantity; + + Event(Date time, + int price, + String customerName) { + + this.time = time; + this.price = price; + this.customerName = customerName; + this.accountReps = new HashSet(); + } + + private Event() {} // For deserialization + + void addRep(String rep) { + accountReps.add(rep); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("time=").append(time); + sb.append(" price=").append(price); + sb.append(" customerName=").append(customerName); + sb.append(" reps="); + if (accountReps.size() == 0) { + sb.append("none"); + } else { + for (String rep: accountReps) { + sb.append(rep).append(" "); + } + } + return sb.toString(); + } + } + + /* A BDB environment is roughly equivalent to a relational database. */ + private Environment env; + private EntityStore store; + + /* + * Event accessors let us access events by the primary index (time) + * as well as by the rep and price fields + */ + PrimaryIndex eventByTime; + SecondaryIndex eventByPrice; + + /* Used for generating example data. */ + private Calendar cal; + + /* + * First manually make a directory to house the BDB environment. + * Usage: java EventExampleDPL -h + * All BDB on-disk storage is held within envHome. + */ + public static void main(String[] args) + throws DatabaseException { + + if (args.length != 2 || !"-h".equals(args[0])) { + System.err.println + ("Usage: java " + EventExampleDPL.class.getName() + + " -h "); + System.exit(2); + } + EventExampleDPL example = new EventExampleDPL(new File(args[1])); + example.run(); + example.close(); + } + + private EventExampleDPL(File envHome) + throws DatabaseException { + + /* Open a transactional Berkeley DB engine environment. */ + System.out.println("-> Creating a BDB environment"); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + /* Initialize the data access object. */ + init(); + cal = Calendar.getInstance(); + } + + /** + * Create all primary and secondary indices. + */ + private void init() + throws DatabaseException { + + /* Open a transactional entity store. */ + System.out.println("-> Creating a BDB database"); + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, "ExampleStore", storeConfig); + + eventByTime = store.getPrimaryIndex(Date.class, Event.class); + eventByPrice = store.getSecondaryIndex(eventByTime, + Integer.class, + "price"); + } + + private void run() + throws DatabaseException { + + Random rand = new Random(); + + /* + * Create a set of events. Each insertion is a separate, auto-commit + * transaction. + */ + System.out.println("-> Inserting 4 events"); + eventByTime.put(new Event(makeDate(1), 100, "Company_A")); + eventByTime.put(new Event(makeDate(2), 2, "Company_B")); + eventByTime.put(new Event(makeDate(3), 20, "Company_C")); + eventByTime.put(new Event(makeDate(4), 40, "CompanyD")); + + /* Load a whole set of events transactionally. */ + Transaction txn = env.beginTransaction(null, null); + int maxPrice = 50; + System.out.println("-> Inserting some randomly generated events"); + for (int i = 0; i < 25; i++) { + Event e = new Event(makeDate(rand.nextInt(365)), + rand.nextInt(maxPrice), + "Company_X"); + if ((i%2) ==0) { + e.addRep("Bob"); + e.addRep("Nikunj"); + } else { + e.addRep("Yongmin"); + } + eventByTime.put(e); + } + txn.commitWriteNoSync(); + + /* + * Windows of events - display the events between June 1 and Aug 31 + */ + System.out.println("\n-> Display the events between June 1 and Aug 31"); + Date startDate = makeDate(Calendar.JUNE, 1); + Date endDate = makeDate(Calendar.AUGUST, 31); + + EntityCursor eventWindow = + eventByTime.entities(startDate, true, endDate, true); + printEvents(eventWindow); + + /* + * Display all events, ordered by a secondary index on price. + */ + System.out.println("\n-> Display all events, ordered by price"); + EntityCursor byPriceEvents = eventByPrice.entities(); + printEvents(byPriceEvents); + } + + private void close() + throws DatabaseException { + + store.close(); + env.close(); + } + + /** + * Print all events covered by this cursor. + */ + private void printEvents(EntityCursor eCursor) + throws DatabaseException { + try { + for (Event e: eCursor) { + System.out.println(e); + } + } finally { + /* Be sure to close the cursor. */ + eCursor.close(); + } + } + + /** + * Little utility for making up java.util.Dates for different days, just + * to generate test data. + */ + private Date makeDate(int day) { + + cal.set((Calendar.DAY_OF_YEAR), day); + return cal.getTime(); + } + + /** + * Little utility for making up java.util.Dates for different days, just + * to make the test data easier to read. + */ + private Date makeDate(int month, int day) { + + cal.set((Calendar.MONTH), month); + cal.set((Calendar.DAY_OF_MONTH), day); + return cal.getTime(); + } +} diff --git a/examples/persist/PersonExample.java b/examples/persist/PersonExample.java new file mode 100644 index 0000000..1e78695 --- /dev/null +++ b/examples/persist/PersonExample.java @@ -0,0 +1,258 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist; + +import java.io.File; +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityIndex; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import static com.sleepycat.persist.model.DeleteAction.NULLIFY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY; +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY; + +public class PersonExample { + + /* An entity class. */ + @Entity + static class Person { + + @PrimaryKey + String ssn; + + String name; + Address address; + + @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class) + String parentSsn; + + @SecondaryKey(relate=ONE_TO_MANY) + Set emailAddresses = new HashSet(); + + @SecondaryKey(relate=MANY_TO_MANY, + relatedEntity=Employer.class, + onRelatedEntityDelete=NULLIFY) + Set employerIds = new HashSet(); + + Person(String name, String ssn, String parentSsn) { + this.name = name; + this.ssn = ssn; + this.parentSsn = parentSsn; + } + + private Person() {} // For deserialization + } + + /* Another entity class. */ + @Entity + static class Employer { + + @PrimaryKey(sequence="ID") + long id; + + @SecondaryKey(relate=ONE_TO_ONE) + String name; + + Address address; + + Employer(String name) { + this.name = name; + } + + private Employer() {} // For deserialization + } + + /* A persistent class used in other classes. */ + @Persistent + static class Address { + String street; + String city; + String state; + int zipCode; + private Address() {} // For deserialization + } + + /* The data accessor class for the entity model. */ + static class PersonAccessor { + + /* Person accessors */ + PrimaryIndex personBySsn; + SecondaryIndex personByParentSsn; + SecondaryIndex personByEmailAddresses; + SecondaryIndex personByEmployerIds; + + /* Employer accessors */ + PrimaryIndex employerById; + SecondaryIndex employerByName; + + /* Opens all primary and secondary indices. */ + public PersonAccessor(EntityStore store) + throws DatabaseException { + + personBySsn = store.getPrimaryIndex( + String.class, Person.class); + + personByParentSsn = store.getSecondaryIndex( + personBySsn, String.class, "parentSsn"); + + personByEmailAddresses = store.getSecondaryIndex( + personBySsn, String.class, "emailAddresses"); + + personByEmployerIds = store.getSecondaryIndex( + personBySsn, Long.class, "employerIds"); + + employerById = store.getPrimaryIndex( + Long.class, Employer.class); + + employerByName = store.getSecondaryIndex( + employerById, String.class, "name"); + } + } + + public static void main(String[] args) + throws DatabaseException { + + if (args.length != 2 || !"-h".equals(args[0])) { + System.err.println + ("Usage: java " + PersonExample.class.getName() + + " -h "); + System.exit(2); + } + PersonExample example = new PersonExample(new File(args[1])); + example.run(); + example.close(); + } + + private Environment env; + private EntityStore store; + private PersonAccessor dao; + + private PersonExample(File envHome) + throws DatabaseException { + + /* Open a transactional Berkeley DB engine environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + /* Open a transactional entity store. */ + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, "PersonStore", storeConfig); + + /* Initialize the data access object. */ + dao = new PersonAccessor(store); + } + + private void run() + throws DatabaseException { + + /* + * Add a parent and two children using the Person primary index. + * Specifying a non-null parentSsn adds the child Person to the + * sub-index of children for that parent key. + */ + dao.personBySsn.put + (new Person("Bob Smith", "111-11-1111", null)); + dao.personBySsn.put + (new Person("Mary Smith", "333-33-3333", "111-11-1111")); + dao.personBySsn.put + (new Person("Jack Smith", "222-22-2222", "111-11-1111")); + + /* Print the children of a parent using a sub-index and a cursor. */ + EntityCursor children = + dao.personByParentSsn.subIndex("111-11-1111").entities(); + try { + for (Person child : children) { + System.out.println(child.ssn + ' ' + child.name); + } + } finally { + children.close(); + } + + /* Get Bob by primary key using the primary index. */ + Person bob = dao.personBySsn.get("111-11-1111"); + assert bob != null; + + /* + * Create two employers if they do not already exist. Their primary + * keys are assigned from a sequence. + */ + Employer gizmoInc = dao.employerByName.get("Gizmo Inc"); + if (gizmoInc == null) { + gizmoInc = new Employer("Gizmo Inc"); + dao.employerById.put(gizmoInc); + } + Employer gadgetInc = dao.employerByName.get("Gadget Inc"); + if (gadgetInc == null) { + gadgetInc = new Employer("Gadget Inc"); + dao.employerById.put(gadgetInc); + } + + /* Bob has two jobs and two email addresses. */ + bob.employerIds.add(gizmoInc.id); + bob.employerIds.add(gadgetInc.id); + bob.emailAddresses.add("bob@bob.com"); + bob.emailAddresses.add("bob@gmail.com"); + + /* Update Bob's record. */ + dao.personBySsn.put(bob); + + /* Bob can now be found by both email addresses. */ + bob = dao.personByEmailAddresses.get("bob@bob.com"); + assert bob != null; + bob = dao.personByEmailAddresses.get("bob@gmail.com"); + assert bob != null; + + /* Bob can also be found as an employee of both employers. */ + EntityIndex employees; + employees = dao.personByEmployerIds.subIndex(gizmoInc.id); + assert employees.contains("111-11-1111"); + employees = dao.personByEmployerIds.subIndex(gadgetInc.id); + assert employees.contains("111-11-1111"); + + /* + * When an employer is deleted, the onRelatedEntityDelete=NULLIFY for + * the employerIds key causes the deleted ID to be removed from Bob's + * employerIds. + */ + dao.employerById.delete(gizmoInc.id); + bob = dao.personBySsn.get("111-11-1111"); + assert bob != null; + assert !bob.employerIds.contains(gizmoInc.id); + } + + private void close() + throws DatabaseException { + + store.close(); + env.close(); + } +} diff --git a/examples/persist/ScalaPersonExample.scala b/examples/persist/ScalaPersonExample.scala new file mode 100644 index 0000000..56507b7 --- /dev/null +++ b/examples/persist/ScalaPersonExample.scala @@ -0,0 +1,123 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +import java.io.File + +import com.sleepycat.je.{Environment, EnvironmentConfig} +import com.sleepycat.persist.{EntityCursor,EntityStore,StoreConfig} +import com.sleepycat.persist.model.{Entity,PrimaryKey,SecondaryKey} +import com.sleepycat.persist.model.Relationship.ONE_TO_ONE + +/** + * Simple example of using Berkeley DB Java Edition (JE) with Scala. The JE + * Direct Persistence Layer (DPL) is used in this example, which requires Java + * 1.5, so the scalac -target:jvm-1.5 option is required when compiling. The + * -Ygenerics option must also be used because DPL generics are used in this + * example. + * + * scalac -Ygenerics -target:jvm-1.5 -cp je-x.y.z.jar ScalaPersonExample.scala + * + * To run the example: + * + * mkdir ./tmp + * scala -cp ".;je-x.y.z.jar" ScalaPersonExample + * + * Note that classOf[java.lang.String] and classOf[java.lang.Long] are used + * rather than classOf[String] and classOf[Long]. The latter use the Scala + * types rather than the Java types and cause run-time errors. + * + * This example was tested with Scala 2.6.1-RC1 and JE 3.2.30. + * + * See: + * http://www.scala-lang.org/ + * http://www.oracle.com/technology/products/berkeley-db/je + */ +object ScalaPersonExample extends Application { + + /** + * A persistent Entity is defined using DPL annotations. + */ + @Entity + class Person(nameParam: String, addressParam: String) { + + @PrimaryKey{val sequence="ID"} + var id: long = 0 + + @SecondaryKey{val relate=ONE_TO_ONE} + var name: String = nameParam + + var address: String = addressParam + + private def this() = this(null, null) // default ctor for unmarshalling + + override def toString = "Person: " + id + ' ' + name + ' ' + address + } + + /* Open the JE Environment. */ + val envConfig = new EnvironmentConfig() + envConfig.setAllowCreate(true) + envConfig.setTransactional(true) + val env = new Environment(new File("./tmp"), envConfig) + + /* Open the DPL Store. */ + val storeConfig = new StoreConfig() + storeConfig.setAllowCreate(true) + storeConfig.setTransactional(true) + val store = new EntityStore(env, "ScalaPersonExample", storeConfig) + + /* The PrimaryIndex maps the Long primary key to Person. */ + val priIndex = + store.getPrimaryIndex(classOf[java.lang.Long], classOf[Person]) + + /* The SecondaryIndex maps the String secondary key to Person. */ + val secIndex = + store.getSecondaryIndex(priIndex, classOf[java.lang.String], "name") + + /* Insert some entities if the primary index is empty. */ + val txn = env.beginTransaction(null, null) + if (priIndex.get(txn, 1L, null) == null) { + val person1 = new Person("Zola", "#1 Zola Street") + val person2 = new Person("Abby", "#1 Abby Street") + priIndex.put(txn, person1) + priIndex.put(txn, person2) + assert(person1.id == 1) // assigned from the ID sequence + assert(person2.id == 2) // assigned from the ID sequence + txn.commit() + println("--- Entities were inserted ---") + } else { + txn.abort() + println("--- Entities already exist ---") + } + + /* Get entities by primary and secondary keys. */ + println("--- Get by primary key ---") + println(priIndex.get(1L)) + println(priIndex.get(2L)) + assert(priIndex.get(3L) == null) + println("--- Get by secondary key ---") + println(secIndex.get("Zola")) + println(secIndex.get("Abby")) + assert(secIndex.get("xxx") == null) + + /* Iterate entities in primary and secondary key order. */ + def printAll[T](cursor: EntityCursor[T]) { + val person = cursor.next() + if (person == null) { + cursor.close() + } else { + println(person) + printAll(cursor) // tail recursion + } + } + println("--- Iterate by primary key ---") + printAll(priIndex.entities()) + println("--- Iterate by secondary key ---") + printAll(secIndex.entities()) + + store.close() + env.close() +} diff --git a/examples/persist/gettingStarted/DataAccessor.java b/examples/persist/gettingStarted/DataAccessor.java new file mode 100644 index 0000000..56d9ebe --- /dev/null +++ b/examples/persist/gettingStarted/DataAccessor.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; + +public class DataAccessor { + // Open the indices + public DataAccessor(EntityStore store) + throws DatabaseException { + + // Primary key for Inventory classes + inventoryBySku = store.getPrimaryIndex( + String.class, Inventory.class); + + // Secondary key for Inventory classes + // Last field in the getSecondaryIndex() method must be + // the name of a class member; in this case, an Inventory.class + // data member. + inventoryByName = store.getSecondaryIndex( + inventoryBySku, String.class, "itemName"); + + // Primary key for Vendor class + vendorByName = store.getPrimaryIndex( + String.class, Vendor.class); + } + + // Inventory Accessors + PrimaryIndex inventoryBySku; + SecondaryIndex inventoryByName; + + // Vendor Accessors + PrimaryIndex vendorByName; +} diff --git a/examples/persist/gettingStarted/ExampleDatabasePut.java b/examples/persist/gettingStarted/ExampleDatabasePut.java new file mode 100644 index 0000000..15c4715 --- /dev/null +++ b/examples/persist/gettingStarted/ExampleDatabasePut.java @@ -0,0 +1,194 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.je.DatabaseException; + +public class ExampleDatabasePut { + + private static File myDbEnvPath = new File("/tmp/JEDB"); + private static File inventoryFile = new File("./inventory.txt"); + private static File vendorsFile = new File("./vendors.txt"); + + private DataAccessor da; + + // Encapsulates the environment and data store. + private static MyDbEnv myDbEnv = new MyDbEnv(); + + private static void usage() { + System.out.println("ExampleDatabasePut [-h ]"); + System.out.println(" [-i ] [-v ]"); + System.exit(-1); + } + + + public static void main(String args[]) { + ExampleDatabasePut edp = new ExampleDatabasePut(); + try { + edp.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleDatabasePut: " + dbe.toString()); + dbe.printStackTrace(); + dbe.printStackTrace(); + } catch (Exception e) { + System.out.println("Exception: " + e.toString()); + e.printStackTrace(); + } finally { + myDbEnv.close(); + } + System.out.println("All done."); + } + + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbEnv.setup(myDbEnvPath, // Path to the environment home + false); // Environment read-only? + + // Open the data accessor. This is used to store + // persistent objects. + da = new DataAccessor(myDbEnv.getEntityStore()); + + System.out.println("loading vendors db...."); + loadVendorsDb(); + + System.out.println("loading inventory db...."); + loadInventoryDb(); + } + + private void loadVendorsDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + List vendors = loadFile(vendorsFile, 8); + + // Now load the data into the store. + for (int i = 0; i < vendors.size(); i++) { + String[] sArray = (String[])vendors.get(i); + Vendor theVendor = new Vendor(); + theVendor.setVendorName(sArray[0]); + theVendor.setAddress(sArray[1]); + theVendor.setCity(sArray[2]); + theVendor.setState(sArray[3]); + theVendor.setZipcode(sArray[4]); + theVendor.setBusinessPhoneNumber(sArray[5]); + theVendor.setRepName(sArray[6]); + theVendor.setRepPhoneNumber(sArray[7]); + + // Put it in the store. Because we do not explicitly set + // a transaction here, and because the store was opened + // with transactional support, auto commit is used for each + // write to the store. + da.vendorByName.put(theVendor); + } + } + + private void loadInventoryDb() + throws DatabaseException { + + // loadFile opens a flat-text file that contains our data + // and loads it into a list for us to work with. The integer + // parameter represents the number of fields expected in the + // file. + List inventoryArray = loadFile(inventoryFile, 6); + + // Now load the data into the store. The item's sku is the + // key, and the data is an Inventory class object. + + for (int i = 0; i < inventoryArray.size(); i++) { + String[] sArray = (String[])inventoryArray.get(i); + String sku = sArray[1]; + + Inventory theInventory = new Inventory(); + theInventory.setItemName(sArray[0]); + theInventory.setSku(sArray[1]); + theInventory.setVendorPrice((new Float(sArray[2])).floatValue()); + theInventory.setVendorInventory((new Integer(sArray[3])).intValue()); + theInventory.setCategory(sArray[4]); + theInventory.setVendor(sArray[5]); + + // Put it in the store. Note that this causes our secondary key + // to be automatically updated for us. + da.inventoryBySku.put(theInventory); + } + } + + + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + myDbEnvPath = new File(args[++i]); + break; + case 'i': + inventoryFile = new File(args[++i]); + break; + case 'v': + vendorsFile = new File(args[++i]); + break; + default: + usage(); + } + } + } + } + + private List loadFile(File theFile, int numFields) { + List records = new ArrayList(); + try { + String theLine = null; + FileInputStream fis = new FileInputStream(theFile); + BufferedReader br = new BufferedReader(new InputStreamReader(fis)); + while((theLine=br.readLine()) != null) { + String[] theLineArray = theLine.split("#"); + if (theLineArray.length != numFields) { + System.out.println("Malformed line found in " + theFile.getPath()); + System.out.println("Line was: '" + theLine); + System.out.println("length found was: " + theLineArray.length); + System.exit(-1); + } + records.add(theLineArray); + } + // Close the input stream handle + fis.close(); + } catch (FileNotFoundException e) { + System.err.println(theFile.getPath() + " does not exist."); + e.printStackTrace(); + usage(); + } catch (IOException e) { + System.err.println("IO Exception: " + e.toString()); + e.printStackTrace(); + System.exit(-1); + } + return records; + } + + protected ExampleDatabasePut() {} +} diff --git a/examples/persist/gettingStarted/ExampleInventoryRead.java b/examples/persist/gettingStarted/ExampleInventoryRead.java new file mode 100644 index 0000000..e838f9f --- /dev/null +++ b/examples/persist/gettingStarted/ExampleInventoryRead.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.persist.EntityCursor; + +public class ExampleInventoryRead { + + private static File myDbEnvPath = + new File("/tmp/JEDB"); + + private DataAccessor da; + + // Encapsulates the database environment. + private static MyDbEnv myDbEnv = new MyDbEnv(); + + // The item to locate if the -s switch is used + private static String locateItem; + + private static void usage() { + System.out.println("ExampleInventoryRead [-h ]" + + "[-s ]"); + System.exit(-1); + } + + public static void main(String args[]) { + ExampleInventoryRead eir = new ExampleInventoryRead(); + try { + eir.run(args); + } catch (DatabaseException dbe) { + System.err.println("ExampleInventoryRead: " + dbe.toString()); + dbe.printStackTrace(); + } finally { + myDbEnv.close(); + } + System.out.println("All done."); + } + + private void run(String args[]) + throws DatabaseException { + // Parse the arguments list + parseArgs(args); + + myDbEnv.setup(myDbEnvPath, // path to the environment home + true); // is this environment read-only? + + // Open the data accessor. This is used to retrieve + // persistent objects. + da = new DataAccessor(myDbEnv.getEntityStore()); + + // If a item to locate is provided on the command line, + // show just the inventory items using the provided name. + // Otherwise, show everything in the inventory. + if (locateItem != null) { + showItem(); + } else { + showAllInventory(); + } + } + + // Shows all the inventory items that exist for a given + // inventory name. + private void showItem() throws DatabaseException { + + // Use the inventory name secondary key to retrieve + // these objects. + EntityCursor items = + da.inventoryByName.subIndex(locateItem).entities(); + try { + for (Inventory item : items) { + displayInventoryRecord(item); + } + } finally { + items.close(); + } + } + + // Displays all the inventory items in the store + private void showAllInventory() + throws DatabaseException { + + // Get a cursor that will walk every + // inventory object in the store. + EntityCursor items = + da.inventoryBySku.entities(); + + try { + for (Inventory item : items) { + displayInventoryRecord(item); + } + } finally { + items.close(); + } + } + + private void displayInventoryRecord(Inventory theInventory) + throws DatabaseException { + + System.out.println(theInventory.getSku() + ":"); + System.out.println("\t " + theInventory.getItemName()); + System.out.println("\t " + theInventory.getCategory()); + System.out.println("\t " + theInventory.getVendor()); + System.out.println("\t\tNumber in stock: " + + theInventory.getVendorInventory()); + System.out.println("\t\tPrice per unit: " + + theInventory.getVendorPrice()); + System.out.println("\t\tContact: "); + + Vendor theVendor = + da.vendorByName.get(theInventory.getVendor()); + assert theVendor != null; + + System.out.println("\t\t " + theVendor.getAddress()); + System.out.println("\t\t " + theVendor.getCity() + ", " + + theVendor.getState() + " " + theVendor.getZipcode()); + System.out.println("\t\t Business Phone: " + + theVendor.getBusinessPhoneNumber()); + System.out.println("\t\t Sales Rep: " + + theVendor.getRepName()); + System.out.println("\t\t " + + theVendor.getRepPhoneNumber()); + } + + protected ExampleInventoryRead() {} + + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + myDbEnvPath = new File(args[++i]); + break; + case 's': + locateItem = args[++i]; + break; + default: + usage(); + } + } + } + } +} diff --git a/examples/persist/gettingStarted/Inventory.java b/examples/persist/gettingStarted/Inventory.java new file mode 100644 index 0000000..2334ed8 --- /dev/null +++ b/examples/persist/gettingStarted/Inventory.java @@ -0,0 +1,84 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import static com.sleepycat.persist.model.Relationship.*; +import com.sleepycat.persist.model.SecondaryKey; + +@Entity +public class Inventory { + + // Primary key is sku + @PrimaryKey + private String sku; + + @SecondaryKey(relate=MANY_TO_ONE) + private String itemName; + + private String category; + private String vendor; + private int vendorInventory; + private float vendorPrice; + + public void setSku(String data) { + sku = data; + } + + public void setItemName(String data) { + itemName = data; + } + + public void setCategory(String data) { + category = data; + } + + public void setVendorInventory(int data) { + vendorInventory = data; + } + + public void setVendor(String data) { + vendor = data; + } + + public void setVendorPrice(float data) { + vendorPrice = data; + } + + public String getSku() { + return sku; + } + + public String getItemName() { + return itemName; + } + + public String getCategory() { + return category; + } + + public int getVendorInventory() { + return vendorInventory; + } + + public String getVendor() { + return vendor; + } + + public float getVendorPrice() { + return vendorPrice; + } + +} diff --git a/examples/persist/gettingStarted/MyDbEnv.java b/examples/persist/gettingStarted/MyDbEnv.java new file mode 100644 index 0000000..b4b15cd --- /dev/null +++ b/examples/persist/gettingStarted/MyDbEnv.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; + +public class MyDbEnv { + + private Environment myEnv; + private EntityStore store; + + // Our constructor does nothing + public MyDbEnv() {} + + // The setup() method opens the environment and store + // for us. + public void setup(File envHome, boolean readOnly) + throws DatabaseException { + + EnvironmentConfig myEnvConfig = new EnvironmentConfig(); + StoreConfig storeConfig = new StoreConfig(); + + myEnvConfig.setReadOnly(readOnly); + storeConfig.setReadOnly(readOnly); + + // If the environment is opened for write, then we want to be + // able to create the environment and entity store if + // they do not exist. + myEnvConfig.setAllowCreate(!readOnly); + storeConfig.setAllowCreate(!readOnly); + + // Open the environment and entity store + myEnv = new Environment(envHome, myEnvConfig); + store = new EntityStore(myEnv, "EntityStore", storeConfig); + + } + + // Return a handle to the entity store + public EntityStore getEntityStore() { + return store; + } + + // Return a handle to the environment + public Environment getEnv() { + return myEnv; + } + + // Close the store and environment + public void close() { + if (store != null) { + try { + store.close(); + } catch(DatabaseException dbe) { + System.err.println("Error closing store: " + + dbe.toString()); + System.exit(-1); + } + } + + if (myEnv != null) { + try { + // Finally, close the store and environment. + myEnv.close(); + } catch(DatabaseException dbe) { + System.err.println("Error closing MyDbEnv: " + + dbe.toString()); + System.exit(-1); + } + } + } +} diff --git a/examples/persist/gettingStarted/Vendor.java b/examples/persist/gettingStarted/Vendor.java new file mode 100644 index 0000000..bf09bd4 --- /dev/null +++ b/examples/persist/gettingStarted/Vendor.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.gettingStarted; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; + +@Entity +public class Vendor { + + private String repName; + private String address; + private String city; + private String state; + private String zipcode; + private String bizPhoneNumber; + private String repPhoneNumber; + + // Primary key is the vendor's name + // This assumes that the vendor's name is + // unique in the database. + @PrimaryKey + private String vendor; + + public void setRepName(String data) { + repName = data; + } + + public void setAddress(String data) { + address = data; + } + + public void setCity(String data) { + city = data; + } + + public void setState(String data) { + state = data; + } + + public void setZipcode(String data) { + zipcode = data; + } + + public void setBusinessPhoneNumber(String data) { + bizPhoneNumber = data; + } + + public void setRepPhoneNumber(String data) { + repPhoneNumber = data; + } + + public void setVendorName(String data) { + vendor = data; + } + + public String getRepName() { + return repName; + } + + public String getAddress() { + return address; + } + + public String getCity() { + return city; + } + + public String getState() { + return state; + } + + public String getZipcode() { + return zipcode; + } + + public String getBusinessPhoneNumber() { + return bizPhoneNumber; + } + + public String getRepPhoneNumber() { + return repPhoneNumber; + } + + public String getVendorName() { + return vendor; + } + +} diff --git a/examples/persist/gettingStarted/inventory.txt b/examples/persist/gettingStarted/inventory.txt new file mode 100644 index 0000000..385c980 --- /dev/null +++ b/examples/persist/gettingStarted/inventory.txt @@ -0,0 +1,800 @@ +Oranges#OranfruiRu6Ghr#0.71#451#fruits#TriCounty Produce +Oranges#OranfruiXRPFn1#0.73#263#fruits#Simply Fresh +Oranges#OranfruiLEuzQj#0.69#261#fruits#Off the Vine +Apples#ApplfruiZls4Du#1.20#472#fruits#TriCounty Produce +Apples#Applfrui8fewZe#1.21#402#fruits#Simply Fresh +Apples#ApplfruiXoT6xG#1.20#728#fruits#Off the Vine +Bananas#BanafruipIlluX#0.50#207#fruits#TriCounty Produce +Bananas#BanafruiEQhWuj#0.50#518#fruits#Simply Fresh +Bananas#BanafruimpRgPO#0.50#741#fruits#Off the Vine +Almonds#AlmofruiPPCLz8#0.55#600#fruits#TriCounty Produce +Almonds#AlmofruidMyKmp#0.54#745#fruits#Simply Fresh +Almonds#Almofrui7K0xzH#0.53#405#fruits#Off the Vine +Allspice#AllsfruibJGK4R#0.94#669#fruits#TriCounty Produce +Allspice#Allsfruilfvoeg#0.94#244#fruits#Simply Fresh +Allspice#Allsfruio12BOS#0.95#739#fruits#Off the Vine +Apricot#AprifruijphEpM#0.89#560#fruits#TriCounty Produce +Apricot#AprifruiU1zIDn#0.91#980#fruits#Simply Fresh +Apricot#AprifruichcwYS#0.95#668#fruits#Off the Vine +Avocado#AvocfruiwYYomu#0.99#379#fruits#TriCounty Produce +Avocado#AvocfruiT6IwWE#1.02#711#fruits#Simply Fresh +Avocado#AvocfruisbK1h5#0.97#856#fruits#Off the Vine +Bael Fruit#BaelfruilAU7Hj#0.41#833#fruits#TriCounty Produce +Bael Fruit#BaelfruiX2KvqV#0.40#770#fruits#Simply Fresh +Bael Fruit#Baelfruidjne4e#0.39#778#fruits#Off the Vine +Betel Nut#BetefruiQYdHqQ#0.34#926#fruits#TriCounty Produce +Betel Nut#Betefrui32BKAz#0.37#523#fruits#Simply Fresh +Betel Nut#BetefruisaWzY4#0.34#510#fruits#Off the Vine +Black Walnut#BlacfruiXxIuMU#0.57#923#fruits#TriCounty Produce +Black Walnut#BlacfruiZXgY9t#0.59#312#fruits#Simply Fresh +Black Walnut#BlacfruikWO0vz#0.60#877#fruits#Off the Vine +Blueberry#BluefruiCbxb4t#1.02#276#fruits#TriCounty Produce +Blueberry#BluefruiBuCfgO#1.03#522#fruits#Simply Fresh +Blueberry#Bluefruixz8MkE#1.01#278#fruits#Off the Vine +Boysenberry#BoysfruizxyMuz#1.05#239#fruits#TriCounty Produce +Boysenberry#Boysfrui3hTRQu#1.09#628#fruits#Simply Fresh +Boysenberry#BoysfruinpLvr3#1.02#349#fruits#Off the Vine +Breadnut#Breafrui0kDPs6#0.31#558#fruits#TriCounty Produce +Breadnut#Breafrui44s3og#0.32#879#fruits#Simply Fresh +Breadnut#BreafruiwyLKhJ#0.30#407#fruits#Off the Vine +Cactus#Cactfruiyo2ddH#0.56#601#fruits#TriCounty Produce +Cactus#CactfruixTOLv5#0.54#477#fruits#Simply Fresh +Cactus#Cactfrui4ioUav#0.55#896#fruits#Off the Vine +California Wild Grape#CalifruiZsWAa6#0.78#693#fruits#TriCounty Produce +California Wild Grape#Califruid84xyt#0.83#293#fruits#Simply Fresh +California Wild Grape#CalifruiLSJFoJ#0.81#543#fruits#Off the Vine +Cashew#CashfruihaOFVP#0.37#221#fruits#TriCounty Produce +Cashew#Cashfruizzcw1E#0.38#825#fruits#Simply Fresh +Cashew#CashfruiqtMe2Q#0.38#515#fruits#Off the Vine +Chico Sapote#ChicfruiY534SX#0.47#216#fruits#TriCounty Produce +Chico Sapote#ChicfruiSqL3Lc#0.45#476#fruits#Simply Fresh +Chico Sapote#ChicfruiurzIp4#0.47#200#fruits#Off the Vine +Chinese Jello#ChinfruiyRg75u#0.64#772#fruits#TriCounty Produce +Chinese Jello#ChinfruiuIUj0X#0.65#624#fruits#Simply Fresh +Chinese Jello#ChinfruiwXbRrL#0.67#719#fruits#Off the Vine +Common Guava#Commfruib6znSI#0.80#483#fruits#TriCounty Produce +Common Guava#Commfrui6eUivL#0.81#688#fruits#Simply Fresh +Common Guava#CommfruibWKnz3#0.84#581#fruits#Off the Vine +Crabapple#CrabfruioY2L63#0.94#582#fruits#TriCounty Produce +Crabapple#Crabfruijxcxyt#0.94#278#fruits#Simply Fresh +Crabapple#CrabfruibvWd8K#0.95#213#fruits#Off the Vine +Cranberry#CranfruiJxmKr5#0.83#923#fruits#TriCounty Produce +Cranberry#CranfruiPlklAF#0.84#434#fruits#Simply Fresh +Cranberry#Cranfrui3G5XL9#0.84#880#fruits#Off the Vine +Damson Plum#DamsfruibMRMwe#0.98#782#fruits#TriCounty Produce +Damson Plum#DamsfruiV6wFLk#1.03#400#fruits#Simply Fresh +Damson Plum#DamsfruiLhqFrQ#0.98#489#fruits#Off the Vine +Date Palm#DatefruigS31GU#1.14#315#fruits#TriCounty Produce +Date Palm#DatefruipKPaJK#1.09#588#fruits#Simply Fresh +Date Palm#Datefrui5fTyNS#1.14#539#fruits#Off the Vine +Dragon's Eye#DragfruirGJ3aI#0.28#315#fruits#TriCounty Produce +Dragon's Eye#DragfruiBotxqt#0.27#705#fruits#Simply Fresh +Dragon's Eye#DragfruiPsSnV9#0.29#482#fruits#Off the Vine +East Indian Wine Palm#EastfruiNXFJuG#0.43#992#fruits#TriCounty Produce +East Indian Wine Palm#Eastfruiq06fRr#0.40#990#fruits#Simply Fresh +East Indian Wine Palm#Eastfrui4QUwl2#0.43#351#fruits#Off the Vine +English Walnut#EnglfruiBMtHtW#1.04#787#fruits#TriCounty Produce +English Walnut#EnglfruiHmVzxV#1.03#779#fruits#Simply Fresh +English Walnut#Englfrui18Tc9n#1.06#339#fruits#Off the Vine +False Mangosteen#FalsfruibkmYqH#0.66#971#fruits#TriCounty Produce +False Mangosteen#FalsfruipBsbcX#0.68#250#fruits#Simply Fresh +False Mangosteen#FalsfruiPrFfhe#0.70#386#fruits#Off the Vine +Fried Egg Tree#FriefruiihHUdc#0.29#649#fruits#TriCounty Produce +Fried Egg Tree#FriefruimdD1rf#0.28#527#fruits#Simply Fresh +Fried Egg Tree#FriefruivyAzYq#0.29#332#fruits#Off the Vine +Genipap#GenifruiDtKusQ#0.62#986#fruits#TriCounty Produce +Genipap#GenifruiXq32eP#0.61#326#fruits#Simply Fresh +Genipap#Genifruiphwwyq#0.61#794#fruits#Off the Vine +Ginger#GingfruiQLbRZI#0.28#841#fruits#TriCounty Produce +Ginger#GingfruiS8kK4p#0.29#432#fruits#Simply Fresh +Ginger#GingfruioL3Y4S#0.27#928#fruits#Off the Vine +Grapefruit#Grapfruih86Zxh#1.07#473#fruits#TriCounty Produce +Grapefruit#GrapfruiwL1v0N#1.08#878#fruits#Simply Fresh +Grapefruit#GrapfruihmJzWm#1.02#466#fruits#Off the Vine +Hackberry#HackfruiQjomN7#0.22#938#fruits#TriCounty Produce +Hackberry#HackfruiWS0eKp#0.20#780#fruits#Simply Fresh +Hackberry#Hackfrui0MIv6J#0.21#345#fruits#Off the Vine +Honey Locust#HonefruiebXGRc#1.08#298#fruits#TriCounty Produce +Honey Locust#HonefruiPSqILB#1.00#427#fruits#Simply Fresh +Honey Locust#Honefrui6UXtvW#1.03#422#fruits#Off the Vine +Japanese Plum#JapafruihTmoYR#0.40#658#fruits#TriCounty Produce +Japanese Plum#JapafruifGqz0l#0.40#700#fruits#Simply Fresh +Japanese Plum#JapafruiufWkLx#0.39#790#fruits#Off the Vine +Jojoba#JojofruisE0wTh#0.97#553#fruits#TriCounty Produce +Jojoba#JojofruiwiYLp2#1.02#969#fruits#Simply Fresh +Jojoba#JojofruigMD1ej#0.96#899#fruits#Off the Vine +Jostaberry#JostfruiglsEGV#0.50#300#fruits#TriCounty Produce +Jostaberry#JostfruiV3oo1h#0.52#423#fruits#Simply Fresh +Jostaberry#JostfruiUBerur#0.53#562#fruits#Off the Vine +Kangaroo Apple#KangfruiEQknz8#0.60#661#fruits#TriCounty Produce +Kangaroo Apple#KangfruiNabdFq#0.60#377#fruits#Simply Fresh +Kangaroo Apple#Kangfrui7hky1i#0.60#326#fruits#Off the Vine +Ken's Red#Ken'fruinPUSIm#0.21#337#fruits#TriCounty Produce +Ken's Red#Ken'fruiAoZlpl#0.21#902#fruits#Simply Fresh +Ken's Red#Ken'frui5rmbd4#0.22#972#fruits#Off the Vine +Ketembilla#Ketefrui3yAKxQ#0.31#303#fruits#TriCounty Produce +Ketembilla#KetefruiROn6F5#0.34#283#fruits#Simply Fresh +Ketembilla#Ketefrui16Rsts#0.33#887#fruits#Off the Vine +King Orange#KingfruisOFzWk#0.74#429#fruits#TriCounty Produce +King Orange#KingfruiBmzRJT#0.74#500#fruits#Simply Fresh +King Orange#KingfruiGsrgRX#0.78#994#fruits#Off the Vine +Kola Nut#KolafruiBbtAuw#0.58#991#fruits#TriCounty Produce +Kola Nut#KolafruirbnLVS#0.62#733#fruits#Simply Fresh +Kola Nut#Kolafrui1ItXJx#0.58#273#fruits#Off the Vine +Kuko#Kukofrui6YH5Ds#0.41#647#fruits#TriCounty Produce +Kuko#Kukofrui7WZaZK#0.39#241#fruits#Simply Fresh +Kuko#Kukofruig9MQFT#0.40#204#fruits#Off the Vine +Kumquat#KumqfruiT6WKQL#0.73#388#fruits#TriCounty Produce +Kumquat#KumqfruidLiFLU#0.70#393#fruits#Simply Fresh +Kumquat#KumqfruiL6zhQX#0.71#994#fruits#Off the Vine +Kwai Muk#KwaifruiQK1zOE#1.10#249#fruits#TriCounty Produce +Kwai Muk#KwaifruifbCRlT#1.14#657#fruits#Simply Fresh +Kwai Muk#Kwaifruipe7T2m#1.09#617#fruits#Off the Vine +Lanzone#LanzfruijsPf1v#0.34#835#fruits#TriCounty Produce +Lanzone#LanzfruibU3QoL#0.34#404#fruits#Simply Fresh +Lanzone#LanzfruiYgHwv6#0.34#237#fruits#Off the Vine +Lemon#Lemofrui4Tgsg2#0.46#843#fruits#TriCounty Produce +Lemon#LemofruivK6qvj#0.43#207#fruits#Simply Fresh +Lemon#LemofruiXSXqJ0#0.44#910#fruits#Off the Vine +Lemon Grass#LemofruiVFgVh5#0.40#575#fruits#TriCounty Produce +Lemon Grass#LemofruiWIelvi#0.41#386#fruits#Simply Fresh +Lemon Grass#LemofruiGVAow0#0.39#918#fruits#Off the Vine +Lilly-pilly#LillfruiEQnW1m#1.21#974#fruits#TriCounty Produce +Lilly-pilly#LillfruiMqVuR5#1.23#303#fruits#Simply Fresh +Lilly-pilly#LillfruiVGH9p4#1.17#512#fruits#Off the Vine +Ling Nut#LingfruiGtOf8X#0.85#540#fruits#TriCounty Produce +Ling Nut#LingfruiuP0Jf9#0.83#200#fruits#Simply Fresh +Ling Nut#LingfruiuO5qf5#0.81#319#fruits#Off the Vine +Lipote#LipofruisxD2Qc#0.85#249#fruits#TriCounty Produce +Lipote#LipofruiHNdIqL#0.85#579#fruits#Simply Fresh +Lipote#LipofruiSQ2pKK#0.83#472#fruits#Off the Vine +Litchee#Litcfrui1R6Ydz#0.99#806#fruits#TriCounty Produce +Litchee#LitcfruiwtDM79#1.01#219#fruits#Simply Fresh +Litchee#LitcfruilpPZbC#1.05#419#fruits#Off the Vine +Longan#LongfruiEI0lWF#1.02#573#fruits#TriCounty Produce +Longan#LongfruiPQxxSF#1.04#227#fruits#Simply Fresh +Longan#LongfruisdI812#0.99#993#fruits#Off the Vine +Love-in-a-mist#LovefruiKYPW70#0.69#388#fruits#TriCounty Produce +Love-in-a-mist#LovefruiHrgjDa#0.67#478#fruits#Simply Fresh +Love-in-a-mist#LovefruipSOWVz#0.71#748#fruits#Off the Vine +Lychee#LychfruiicVLnY#0.38#276#fruits#TriCounty Produce +Lychee#LychfruiGY6yJr#0.38#602#fruits#Simply Fresh +Lychee#LychfruiTzDCq2#0.40#572#fruits#Off the Vine +Mabolo#MabofruiSY8RQS#0.97#263#fruits#TriCounty Produce +Mabolo#MabofruiOWWk0n#0.98#729#fruits#Simply Fresh +Mabolo#MabofruixQLOTF#0.98#771#fruits#Off the Vine +Macadamia Nut#MacafruiZppJPw#1.22#888#fruits#TriCounty Produce +Macadamia Nut#MacafruiI7XFMV#1.24#484#fruits#Simply Fresh +Macadamia Nut#Macafrui4x8bxV#1.20#536#fruits#Off the Vine +Madagascar Plum#MadafruiVj5fDf#1.14#596#fruits#TriCounty Produce +Madagascar Plum#MadafruivJhAFI#1.15#807#fruits#Simply Fresh +Madagascar Plum#Madafrui7MTe1x#1.17#355#fruits#Off the Vine +Magnolia Vine#MagnfruiigN4Y1#1.17#321#fruits#TriCounty Produce +Magnolia Vine#MagnfruicKtiHd#1.15#353#fruits#Simply Fresh +Magnolia Vine#MagnfruiLPDSCp#1.23#324#fruits#Off the Vine +Mamey#Mamefrui5rjLF6#0.36#683#fruits#TriCounty Produce +Mamey#MamefruiM6ndnR#0.38#404#fruits#Simply Fresh +Mamey#Mamefruiq9KntD#0.36#527#fruits#Off the Vine +Mandarin Orange#MandfruiRKpmKL#0.42#352#fruits#TriCounty Produce +Mandarin Orange#Mandfrui1V0KLG#0.42#548#fruits#Simply Fresh +Mandarin Orange#Mandfruig2o9Fg#0.41#686#fruits#Off the Vine +Marany Nut#MarafruiqkrwoJ#1.14#273#fruits#TriCounty Produce +Marany Nut#MarafruiCGKpke#1.12#482#fruits#Simply Fresh +Marany Nut#MarafruiB1YE5x#1.09#412#fruits#Off the Vine +Marula#MarufruiXF4biH#0.22#403#fruits#TriCounty Produce +Marula#MarufruidZiVKZ#0.23#317#fruits#Simply Fresh +Marula#MarufruiIS8BEp#0.21#454#fruits#Off the Vine +Mayhaw#MayhfruiCSrm7k#0.24#220#fruits#TriCounty Produce +Mayhaw#MayhfruiNRDzWs#0.25#710#fruits#Simply Fresh +Mayhaw#MayhfruiIUCyEg#0.24#818#fruits#Off the Vine +Meiwa Kumquat#MeiwfruiYhv3AY#0.21#997#fruits#TriCounty Produce +Meiwa Kumquat#MeiwfruiyzQFNR#0.22#347#fruits#Simply Fresh +Meiwa Kumquat#Meiwfruict4OUp#0.21#923#fruits#Off the Vine +Mexican Barberry#Mexifrui2P2dXi#0.28#914#fruits#TriCounty Produce +Mexican Barberry#MexifruiywUTMI#0.29#782#fruits#Simply Fresh +Mexican Barberry#MexifruijPHu5X#0.29#367#fruits#Off the Vine +Meyer Lemon#Meyefruin9901J#0.38#824#fruits#TriCounty Produce +Meyer Lemon#MeyefruiNeQpjO#0.37#617#fruits#Simply Fresh +Meyer Lemon#MeyefruiYEVznZ#0.37#741#fruits#Off the Vine +Mississippi Honeyberry#Missfruipb5iW3#0.95#595#fruits#TriCounty Produce +Mississippi Honeyberry#MissfruiINiDbB#0.96#551#fruits#Simply Fresh +Mississippi Honeyberry#MissfruiNUQ82a#0.93#396#fruits#Off the Vine +Monkey Pot#MonkfruiXlTW4j#0.90#896#fruits#TriCounty Produce +Monkey Pot#Monkfrui1p7a4h#0.88#344#fruits#Simply Fresh +Monkey Pot#Monkfrui4eKggb#0.92#917#fruits#Off the Vine +Monos Plum#Monofrui0Mv9aV#1.11#842#fruits#TriCounty Produce +Monos Plum#Monofrui6iTGQY#1.14#570#fruits#Simply Fresh +Monos Plum#MonofruiNu2uGH#1.13#978#fruits#Off the Vine +Moosewood#MoosfruiMXEGex#0.86#969#fruits#TriCounty Produce +Moosewood#Moosfrui8805mB#0.86#963#fruits#Simply Fresh +Moosewood#MoosfruiOsnDFL#0.88#594#fruits#Off the Vine +Natal Orange#NatafruitB8Kh2#0.42#332#fruits#TriCounty Produce +Natal Orange#NatafruiOhqRrd#0.42#982#fruits#Simply Fresh +Natal Orange#NatafruiRObMf6#0.41#268#fruits#Off the Vine +Nectarine#NectfruilNfeD8#0.36#601#fruits#TriCounty Produce +Nectarine#NectfruiQfjt6b#0.35#818#fruits#Simply Fresh +Nectarine#Nectfrui5U7U96#0.37#930#fruits#Off the Vine +Neem Tree#NeemfruiCruEMF#0.24#222#fruits#TriCounty Produce +Neem Tree#NeemfruiGv0pv5#0.24#645#fruits#Simply Fresh +Neem Tree#NeemfruiUFPVfk#0.25#601#fruits#Off the Vine +New Zealand Spinach#New fruihDIgec#0.87#428#fruits#TriCounty Produce +New Zealand Spinach#New fruiaoR9TP#0.87#630#fruits#Simply Fresh +New Zealand Spinach#New fruiy8LBul#0.94#570#fruits#Off the Vine +Olosapo#OlosfruiGXvaMm#0.76#388#fruits#TriCounty Produce +Olosapo#OlosfruiESlpB3#0.76#560#fruits#Simply Fresh +Olosapo#OlosfruiFNEkER#0.76#962#fruits#Off the Vine +Oregon Grape#OregfruiWxhzrf#1.14#892#fruits#TriCounty Produce +Oregon Grape#OregfruiMgjHUn#1.20#959#fruits#Simply Fresh +Oregon Grape#OregfruiC5UCxX#1.17#419#fruits#Off the Vine +Otaheite Apple#OtahfruilT0iFj#0.21#579#fruits#TriCounty Produce +Otaheite Apple#Otahfrui92PyMY#0.22#857#fruits#Simply Fresh +Otaheite Apple#OtahfruiLGD1EH#0.20#807#fruits#Off the Vine +Oyster Plant#OystfruimGxOsj#0.77#835#fruits#TriCounty Produce +Oyster Plant#Oystfrui1kudBX#0.81#989#fruits#Simply Fresh +Oyster Plant#OystfruiaX3uO2#0.80#505#fruits#Off the Vine +Panama Berry#PanafruiZG0Vp4#1.19#288#fruits#TriCounty Produce +Panama Berry#PanafruiobvXPE#1.21#541#fruits#Simply Fresh +Panama Berry#PanafruipaW8F3#1.16#471#fruits#Off the Vine +Peach Tomato#PeacfruiQpovYH#1.20#475#fruits#TriCounty Produce +Peach Tomato#PeacfruixYXLTN#1.18#655#fruits#Simply Fresh +Peach Tomato#PeacfruiILDYAp#1.23#876#fruits#Off the Vine +Peanut#Peanfruiy8M7pt#0.69#275#fruits#TriCounty Produce +Peanut#PeanfruiEimbED#0.65#307#fruits#Simply Fresh +Peanut#Peanfruic452Vc#0.68#937#fruits#Off the Vine +Peanut Butter Fruit#PeanfruixEDt9Y#0.27#628#fruits#TriCounty Produce +Peanut Butter Fruit#PeanfruiST0T0R#0.27#910#fruits#Simply Fresh +Peanut Butter Fruit#Peanfrui7jeRN2#0.27#938#fruits#Off the Vine +Pear#PearfruiB5YmSJ#0.20#945#fruits#TriCounty Produce +Pear#PearfruiA93XZx#0.21#333#fruits#Simply Fresh +Pear#PearfruioNKiIf#0.21#715#fruits#Off the Vine +Pecan#PecafruiiTIv1Z#0.26#471#fruits#TriCounty Produce +Pecan#PecafruiMGkqla#0.26#889#fruits#Simply Fresh +Pecan#Pecafrui1szYz2#0.25#929#fruits#Off the Vine +Purple Passion Fruit#Purpfrui4mMGkD#1.04#914#fruits#TriCounty Produce +Purple Passion Fruit#Purpfrui5XOW3K#1.06#423#fruits#Simply Fresh +Purple Passion Fruit#PurpfruifDTAgW#1.05#549#fruits#Off the Vine +Red Mulberry#Red fruiVLOXIW#1.24#270#fruits#TriCounty Produce +Red Mulberry#Red fruiXNXt4a#1.21#836#fruits#Simply Fresh +Red Mulberry#Red fruiUseWLG#1.21#795#fruits#Off the Vine +Red Princess#Red fruigJLR4V#0.23#829#fruits#TriCounty Produce +Red Princess#Red fruinVKps5#0.23#558#fruits#Simply Fresh +Red Princess#Red frui0jl9mg#0.24#252#fruits#Off the Vine +Striped Screw Pine#StrifruiUKzjoU#0.60#226#fruits#TriCounty Produce +Striped Screw Pine#StrifruivWLDzH#0.64#685#fruits#Simply Fresh +Striped Screw Pine#StrifruiiF7CGH#0.60#983#fruits#Off the Vine +Tapioca#Tapifruib4LCqt#0.40#955#fruits#TriCounty Produce +Tapioca#TapifruiwgQLj9#0.41#889#fruits#Simply Fresh +Tapioca#TapifruiZ6Igg3#0.41#655#fruits#Off the Vine +Tavola#Tavofrui0k9XOt#1.16#938#fruits#TriCounty Produce +Tavola#Tavofrui8DuRxL#1.08#979#fruits#Simply Fresh +Tavola#TavofruiNZEuJZ#1.16#215#fruits#Off the Vine +Tea#TeafruiL0357s#1.11#516#fruits#TriCounty Produce +Tea#TeafruiD5soTf#1.13#970#fruits#Simply Fresh +Tea#TeafruiOWq4oO#1.19#357#fruits#Off the Vine +Ugli Fruit#UglifruipKNCpf#0.24#501#fruits#TriCounty Produce +Ugli Fruit#UglifruifbDrzc#0.24#642#fruits#Simply Fresh +Ugli Fruit#Uglifruiwx8or4#0.24#280#fruits#Off the Vine +Vegetable Brain#VegefruieXLBoc#0.73#355#fruits#TriCounty Produce +Vegetable Brain#Vegefruik5FSdl#0.71#498#fruits#Simply Fresh +Vegetable Brain#VegefruiKBfzN0#0.72#453#fruits#Off the Vine +White Walnut#Whitfruit3oVHL#0.30#501#fruits#TriCounty Produce +White Walnut#WhitfruiHygydw#0.30#913#fruits#Simply Fresh +White Walnut#WhitfruieNtplo#0.30#401#fruits#Off the Vine +Wood Apple#WoodfruijVPRqA#0.68#501#fruits#TriCounty Produce +Wood Apple#Woodfrui4Zk69T#0.68#616#fruits#Simply Fresh +Wood Apple#WoodfruiuSLHZK#0.70#474#fruits#Off the Vine +Yellow Horn#Yellfrui5igjjf#1.18#729#fruits#TriCounty Produce +Yellow Horn#Yellfrui0DiPqa#1.13#517#fruits#Simply Fresh +Yellow Horn#Yellfrui0ljvqC#1.14#853#fruits#Off the Vine +Yellow Sapote#YellfruilGmCfq#0.93#204#fruits#TriCounty Produce +Yellow Sapote#Yellfrui4J2mke#0.88#269#fruits#Simply Fresh +Yellow Sapote#Yellfrui6PuXaL#0.86#575#fruits#Off the Vine +Ylang-ylang#Ylanfrui3rmByO#0.76#429#fruits#TriCounty Produce +Ylang-ylang#YlanfruiA80Nkq#0.76#886#fruits#Simply Fresh +Ylang-ylang#YlanfruinUEm5d#0.72#747#fruits#Off the Vine +Zapote Blanco#ZapofruisZ5sMA#0.67#428#fruits#TriCounty Produce +Zapote Blanco#ZapofruilKxl7N#0.65#924#fruits#Simply Fresh +Zapote Blanco#ZapofruiAe6Eu1#0.68#255#fruits#Off the Vine +Zulu Nut#Zulufrui469K4k#0.71#445#fruits#TriCounty Produce +Zulu Nut#ZulufruiWbz6vU#0.71#653#fruits#Simply Fresh +Zulu Nut#Zulufrui0LJnWK#0.71#858#fruits#Off the Vine +Artichoke#ArtivegeIuqmS4#0.71#282#vegetables#The Pantry +Artichoke#Artivegebljjnf#0.69#66#vegetables#TriCounty Produce +Artichoke#ArtivegeTa2lcF#0.70#618#vegetables#Off the Vine +Asparagus#AspavegezC0cDl#0.23#70#vegetables#The Pantry +Asparagus#AspavegeM1q5Kt#0.24#546#vegetables#TriCounty Produce +Asparagus#AspavegeXWbCb8#0.24#117#vegetables#Off the Vine +Basil#Basivegev08fzf#0.31#213#vegetables#The Pantry +Basil#BasivegeF3Uha7#0.29#651#vegetables#TriCounty Produce +Basil#BasivegeqR8SHC#0.31#606#vegetables#Off the Vine +Bean#BeanvegegCFUOp#0.27#794#vegetables#The Pantry +Bean#BeanvegeqMSEVq#0.27#468#vegetables#TriCounty Produce +Bean#Beanvege4IGUwX#0.27#463#vegetables#Off the Vine +Beet#BeetvegedEv4Ic#0.35#120#vegetables#The Pantry +Beet#Beetvegegi1bz1#0.35#540#vegetables#TriCounty Produce +Beet#BeetvegemztZcN#0.36#386#vegetables#Off the Vine +Blackeyed Pea#Blacvege3TPldr#0.86#133#vegetables#The Pantry +Blackeyed Pea#Blacvege3Zqnep#0.88#67#vegetables#TriCounty Produce +Blackeyed Pea#Blacvege3khffZ#0.90#790#vegetables#Off the Vine +Cabbage#CabbvegeY0c4Fw#0.82#726#vegetables#The Pantry +Cabbage#CabbvegeoaK7Co#0.85#439#vegetables#TriCounty Produce +Cabbage#CabbvegeVvO646#0.82#490#vegetables#Off the Vine +Carrot#CarrvegeEbI0sw#0.45#717#vegetables#The Pantry +Carrot#CarrvegeEZndWL#0.49#284#vegetables#TriCounty Produce +Carrot#CarrvegewUkHao#0.47#122#vegetables#Off the Vine +Cauliflower#Caulvege1CPeNG#0.68#756#vegetables#The Pantry +Cauliflower#CaulvegedrPqib#0.66#269#vegetables#TriCounty Produce +Cauliflower#CaulvegeT6cka8#0.65#728#vegetables#Off the Vine +Chayote#ChayvegePRReGE#0.14#233#vegetables#The Pantry +Chayote#Chayvegep058f7#0.14#88#vegetables#TriCounty Produce +Chayote#ChayvegeoxO40S#0.14#611#vegetables#Off the Vine +Corn#CornvegeukXkv6#0.72#632#vegetables#The Pantry +Corn#CornvegePnPREC#0.72#609#vegetables#TriCounty Produce +Corn#CornvegeO0GwoQ#0.70#664#vegetables#Off the Vine +Cucumber#CucuvegeEqQeA7#0.94#499#vegetables#The Pantry +Cucumber#CucuvegewmKbJ1#0.94#738#vegetables#TriCounty Produce +Cucumber#CucuvegeUW6JaA#0.94#565#vegetables#Off the Vine +Cantaloupe#CantvegeIHs9vJ#0.66#411#vegetables#The Pantry +Cantaloupe#CantvegeEaDdST#0.66#638#vegetables#TriCounty Produce +Cantaloupe#CantvegewWQEa0#0.64#682#vegetables#Off the Vine +Carraway#CarrvegewuL4Ma#0.32#740#vegetables#The Pantry +Carraway#CarrvegeyiWfBj#0.32#265#vegetables#TriCounty Produce +Carraway#CarrvegeMjb1i9#0.31#732#vegetables#Off the Vine +Celeriac#CelevegeoTBicd#0.74#350#vegetables#The Pantry +Celeriac#CelevegeCNABoZ#0.70#261#vegetables#TriCounty Produce +Celeriac#Celevege9LUeww#0.70#298#vegetables#Off the Vine +Celery#Celevegej40ZCc#0.59#740#vegetables#The Pantry +Celery#CelevegerYlVRy#0.58#734#vegetables#TriCounty Produce +Celery#Celevege67eimC#0.58#619#vegetables#Off the Vine +Chervil#ChervegeuH4Dge#0.09#502#vegetables#The Pantry +Chervil#Chervegea1OyKO#0.09#299#vegetables#TriCounty Produce +Chervil#Chervegeq56gMO#0.09#474#vegetables#Off the Vine +Chicory#Chicvege79qoQ8#0.09#709#vegetables#The Pantry +Chicory#ChicvegeTSVBQq#0.10#477#vegetables#TriCounty Produce +Chicory#Chicvege6qpcyi#0.10#282#vegetables#Off the Vine +Chinese Cabbage#ChinvegeFNsSRn#0.78#408#vegetables#The Pantry +Chinese Cabbage#Chinvege2ldNr3#0.80#799#vegetables#TriCounty Produce +Chinese Cabbage#ChinvegeK3R2Td#0.80#180#vegetables#Off the Vine +Chinese Beans#ChinvegebxbyPy#0.45#654#vegetables#The Pantry +Chinese Beans#ChinvegewKGwgx#0.45#206#vegetables#TriCounty Produce +Chinese Beans#ChinvegevVjzC0#0.47#643#vegetables#Off the Vine +Chines Kale#ChinvegeCfdkss#0.70#239#vegetables#The Pantry +Chines Kale#Chinvege6V6Dne#0.65#548#vegetables#TriCounty Produce +Chines Kale#ChinvegeB7vE3x#0.66#380#vegetables#Off the Vine +Chinese Radish#ChinvegeXcM4eq#0.22#190#vegetables#The Pantry +Chinese Radish#ChinvegeTdUBqN#0.22#257#vegetables#TriCounty Produce +Chinese Radish#ChinvegeMXMms8#0.22#402#vegetables#Off the Vine +Chinese Mustard#ChinvegeRDdpdl#0.33#149#vegetables#The Pantry +Chinese Mustard#ChinvegeABDhNd#0.32#320#vegetables#TriCounty Produce +Chinese Mustard#Chinvege8NPwa2#0.34#389#vegetables#Off the Vine +Cilantro#CilavegeQXBEsW#0.60#674#vegetables#The Pantry +Cilantro#CilavegeRgjkUG#0.60#355#vegetables#TriCounty Produce +Cilantro#CilavegelT2msu#0.59#464#vegetables#Off the Vine +Collard#CollvegesTGGNw#0.32#745#vegetables#The Pantry +Collard#CollvegeAwdor5#0.32#124#vegetables#TriCounty Produce +Collard#CollvegeQe900L#0.30#796#vegetables#Off the Vine +Coriander#CorivegeXxp4xY#0.26#560#vegetables#The Pantry +Coriander#Corivege9xBAT0#0.27#321#vegetables#TriCounty Produce +Coriander#CorivegeCfNjBx#0.27#709#vegetables#Off the Vine +Dandelion#DandvegeJNcnbr#0.11#285#vegetables#The Pantry +Dandelion#DandvegeGwBkHZ#0.11#733#vegetables#TriCounty Produce +Dandelion#DandvegeZfwVqn#0.11#57#vegetables#Off the Vine +Daikon Radish#DaikvegeHHsd7M#0.61#743#vegetables#The Pantry +Daikon Radish#DaikvegeIu17yC#0.62#459#vegetables#TriCounty Produce +Daikon Radish#DaikvegePzFjqf#0.63#296#vegetables#Off the Vine +Eggplant#EggpvegeKJtydN#0.55#200#vegetables#The Pantry +Eggplant#EggpvegeQMKrNs#0.53#208#vegetables#TriCounty Produce +Eggplant#EggpvegeN0WnSo#0.51#761#vegetables#Off the Vine +English Pea#Englvegea1ytIn#0.40#457#vegetables#The Pantry +English Pea#EnglvegerU9Vty#0.37#263#vegetables#TriCounty Produce +English Pea#EnglvegeCmkd3y#0.39#430#vegetables#Off the Vine +Fennel#Fennvegebz2UM7#0.76#545#vegetables#The Pantry +Fennel#FennvegeQzjtZ3#0.78#795#vegetables#TriCounty Produce +Fennel#FennvegeXSrW61#0.75#79#vegetables#Off the Vine +Garlic#GarlvegesR2yel#0.76#478#vegetables#The Pantry +Garlic#GarlvegeEQvt8W#0.77#349#vegetables#TriCounty Produce +Garlic#GarlvegedljBdK#0.80#708#vegetables#Off the Vine +Ginger#GingvegeMNiTc2#0.88#563#vegetables#The Pantry +Ginger#Gingvegeq366Sn#0.89#738#vegetables#TriCounty Produce +Ginger#GingvegeznyyVj#0.89#598#vegetables#Off the Vine +Horseradish#HorsvegemSwISt#0.12#622#vegetables#The Pantry +Horseradish#HorsvegetCOS0x#0.11#279#vegetables#TriCounty Produce +Horseradish#Horsvegew6XXaS#0.12#478#vegetables#Off the Vine +Japanese Eggplant#JapavegeTdKDCL#0.57#539#vegetables#The Pantry +Japanese Eggplant#JapavegevsJfGa#0.58#782#vegetables#TriCounty Produce +Japanese Eggplant#JapavegeCIrIxd#0.57#777#vegetables#Off the Vine +Jerusalem Artichoke#Jeruvege928cr0#0.13#231#vegetables#The Pantry +Jerusalem Artichoke#JeruvegeC2v086#0.14#123#vegetables#TriCounty Produce +Jerusalem Artichoke#JeruvegeehCYzi#0.14#196#vegetables#Off the Vine +Jicama#JicavegeRWYj9n#0.75#79#vegetables#The Pantry +Jicama#JicavegeGk5LKH#0.71#292#vegetables#TriCounty Produce +Jicama#JicavegeUjpaX1#0.70#308#vegetables#Off the Vine +Kale#Kalevegext6RNT#0.55#765#vegetables#The Pantry +Kale#KalevegeFsp17B#0.53#107#vegetables#TriCounty Produce +Kale#KalevegeAffBTS#0.57#573#vegetables#Off the Vine +Kiwifruit#KiwivegeloZBKJ#0.60#769#vegetables#The Pantry +Kiwifruit#KiwivegenCQAHw#0.59#307#vegetables#TriCounty Produce +Kiwifruit#Kiwivege0Gi3P2#0.59#235#vegetables#Off the Vine +Kohlrabi#KohlvegeJFKZDl#0.26#406#vegetables#The Pantry +Kohlrabi#Kohlvege32UTAj#0.28#613#vegetables#TriCounty Produce +Kohlrabi#KohlvegejNQC1M#0.28#326#vegetables#Off the Vine +Leek#Leekvege5iaFtg#0.70#580#vegetables#The Pantry +Leek#Leekvegei9Wxbz#0.68#188#vegetables#TriCounty Produce +Leek#LeekvegewY4mAc#0.70#473#vegetables#Off the Vine +Lettuce#LettvegesK9wDR#0.55#716#vegetables#The Pantry +Lettuce#LettvegeWzMyCM#0.57#83#vegetables#TriCounty Produce +Lettuce#LettvegeHgfGG8#0.56#268#vegetables#Off the Vine +Melons#Melovege6t93WF#0.11#252#vegetables#The Pantry +Melons#Melovegeq9kz7T#0.12#558#vegetables#TriCounty Produce +Melons#Melovege9kLTXN#0.12#382#vegetables#Off the Vine +Mushroom#MushvegeSq53h8#0.59#365#vegetables#The Pantry +Mushroom#Mushvegedq6lYP#0.59#444#vegetables#TriCounty Produce +Mushroom#Mushvege8o27D2#0.55#467#vegetables#Off the Vine +Okra#OkravegeTszQSL#0.55#62#vegetables#The Pantry +Okra#OkravegeJBWmfh#0.58#165#vegetables#TriCounty Produce +Okra#OkravegeD6tF9n#0.55#77#vegetables#Off the Vine +Onion#OniovegejwimQo#0.80#186#vegetables#The Pantry +Onion#OniovegeUOwwks#0.80#417#vegetables#TriCounty Produce +Onion#OniovegezcRDrc#0.80#435#vegetables#Off the Vine +Oregano#OregvegetlU7Ez#0.71#119#vegetables#The Pantry +Oregano#Oregvege9h9ZKy#0.70#173#vegetables#TriCounty Produce +Oregano#OregvegebXr0PJ#0.70#773#vegetables#Off the Vine +Parsley#ParsvegeXFEjjN#0.83#502#vegetables#The Pantry +Parsley#ParsvegejAg5C4#0.80#454#vegetables#TriCounty Produce +Parsley#ParsvegehAtH2H#0.84#523#vegetables#Off the Vine +Parsnip#Parsvegee9Lp6D#0.46#626#vegetables#The Pantry +Parsnip#ParsvegeSxXHSA#0.47#411#vegetables#TriCounty Produce +Parsnip#Parsvegea0stPf#0.44#403#vegetables#Off the Vine +Pea#Peavegecq4SxR#0.18#342#vegetables#The Pantry +Pea#Peavege46Gdp9#0.18#255#vegetables#TriCounty Produce +Pea#Peavegeov1gc5#0.18#251#vegetables#Off the Vine +Pepper#PeppvegeUcBYRp#0.33#52#vegetables#The Pantry +Pepper#PeppvegeB60btP#0.35#107#vegetables#TriCounty Produce +Pepper#PeppvegeG4tP3e#0.34#481#vegetables#Off the Vine +Pigeon Pea#Pigevegec5bAtm#0.94#391#vegetables#The Pantry +Pigeon Pea#Pigevegeb93eLi#0.91#447#vegetables#TriCounty Produce +Pigeon Pea#PigevegejEBDRa#0.89#259#vegetables#Off the Vine +Irish Potato#IrisvegeJNQqby#0.72#355#vegetables#The Pantry +Irish Potato#Irisvegewq1PLd#0.72#601#vegetables#TriCounty Produce +Irish Potato#IrisvegeAfFLdO#0.68#740#vegetables#Off the Vine +Pumpkin#PumpvegeiYsPR8#0.25#776#vegetables#The Pantry +Pumpkin#PumpvegelqP1Kh#0.25#189#vegetables#TriCounty Produce +Pumpkin#Pumpvegeb3nQU5#0.26#207#vegetables#Off the Vine +Radish#RadivegeNwwSBJ#0.16#613#vegetables#The Pantry +Radish#Radivege0tIBnL#0.16#779#vegetables#TriCounty Produce +Radish#RadivegeNLqJCf#0.16#731#vegetables#Off the Vine +Rhubarb#RhubvegeREfOti#0.12#301#vegetables#The Pantry +Rhubarb#Rhubvege4Jc3b7#0.12#557#vegetables#TriCounty Produce +Rhubarb#RhubvegeaXqF7H#0.12#378#vegetables#Off the Vine +Rosemary#Rosevege16QStc#0.73#380#vegetables#The Pantry +Rosemary#RosevegeNf6Oem#0.75#622#vegetables#TriCounty Produce +Rosemary#RosevegeFgsOyN#0.74#631#vegetables#Off the Vine +Rutabaga#RutavegecUYfQ3#0.55#676#vegetables#The Pantry +Rutabaga#RutavegejOG5DF#0.55#273#vegetables#TriCounty Produce +Rutabaga#RutavegewEVjzV#0.53#452#vegetables#Off the Vine +Salsify#SalsvegeViS9HF#0.11#537#vegetables#The Pantry +Salsify#Salsvegemd3HAL#0.11#753#vegetables#TriCounty Produce +Salsify#SalsvegeuRCnmq#0.10#787#vegetables#Off the Vine +Savory#Savovegee4DRWl#0.21#456#vegetables#The Pantry +Savory#SavovegerZ90Xm#0.21#642#vegetables#TriCounty Produce +Savory#Savovegeje7yy7#0.22#328#vegetables#Off the Vine +Sesame#Sesavege4NAWZE#0.84#54#vegetables#The Pantry +Sesame#SesavegeMTc9IN#0.84#458#vegetables#TriCounty Produce +Sesame#SesavegegOwAjo#0.83#125#vegetables#Off the Vine +Shallots#ShalvegeUO2pDO#0.26#599#vegetables#The Pantry +Shallots#ShalvegeY1sekb#0.27#647#vegetables#TriCounty Produce +Shallots#ShalvegeSDC8VY#0.27#369#vegetables#Off the Vine +Sugar Snap Peas#SugavegepUZDTl#0.47#308#vegetables#The Pantry +Sugar Snap Peas#Sugavege1XyzNH#0.48#205#vegetables#TriCounty Produce +Sugar Snap Peas#SugavegeJuaG7f#0.46#348#vegetables#Off the Vine +Soybean#SoybvegeqxSVRL#0.70#639#vegetables#The Pantry +Soybean#SoybvegezEMjOG#0.68#423#vegetables#TriCounty Produce +Soybean#SoybvegebanSFq#0.67#268#vegetables#Off the Vine +Spaghetti Squash#SpagvegeMNO1yC#0.12#753#vegetables#The Pantry +Spaghetti Squash#SpagvegeilpUaD#0.13#604#vegetables#TriCounty Produce +Spaghetti Squash#SpagvegeAOoZNX#0.13#431#vegetables#Off the Vine +Spinach#SpinvegeegXXou#0.10#742#vegetables#The Pantry +Spinach#SpinvegeVcqXL6#0.11#708#vegetables#TriCounty Produce +Spinach#SpinvegetZ26DN#0.11#625#vegetables#Off the Vine +Sweet Potato#SweevegepNDQWb#0.94#720#vegetables#The Pantry +Sweet Potato#Sweevegepnw7Tm#0.90#377#vegetables#TriCounty Produce +Sweet Potato#Sweevegeyk0C82#0.89#242#vegetables#Off the Vine +Swiss Chard#SwisvegeksalTA#0.54#545#vegetables#The Pantry +Swiss Chard#SwisvegeKm2Kze#0.54#472#vegetables#TriCounty Produce +Swiss Chard#SwisvegehteuMk#0.56#142#vegetables#Off the Vine +Taro#Tarovege3fpGV6#0.87#155#vegetables#The Pantry +Taro#TarovegerZkmof#0.86#371#vegetables#TriCounty Produce +Taro#TarovegeXKPuzc#0.89#443#vegetables#Off the Vine +Tarragon#TarrvegeCzVC6U#0.18#491#vegetables#The Pantry +Tarragon#TarrvegesIkEfS#0.17#65#vegetables#TriCounty Produce +Tarragon#TarrvegerZsKFP#0.18#180#vegetables#Off the Vine +Thyme#Thymvege8Rv72c#0.41#442#vegetables#The Pantry +Thyme#ThymvegeJoUdQS#0.42#237#vegetables#TriCounty Produce +Thyme#ThymvegeRck5uO#0.43#491#vegetables#Off the Vine +Tomato#Tomavegey0NHGK#0.31#60#vegetables#The Pantry +Tomato#TomavegeKAjRUn#0.30#630#vegetables#TriCounty Produce +Tomato#TomavegePZOHlH#0.30#70#vegetables#Off the Vine +Turnip#TurnvegeRVQiV5#0.44#580#vegetables#The Pantry +Turnip#TurnvegeVjIX9D#0.45#743#vegetables#TriCounty Produce +Turnip#TurnvegelFhvuJ#0.44#219#vegetables#Off the Vine +Watercress#WatevegelwzPLQ#0.54#230#vegetables#The Pantry +Watercress#Watevege8oeDCT#0.54#774#vegetables#TriCounty Produce +Watercress#Watevegexr8L1t#0.55#185#vegetables#Off the Vine +Watermelon#WatevegeL83MRH#0.19#698#vegetables#The Pantry +Watermelon#WatevegeR2S4Dq#0.21#488#vegetables#TriCounty Produce +Watermelon#WatevegepFPXQu#0.21#439#vegetables#Off the Vine +Kamote#KamovegegdON75#0.13#218#vegetables#The Pantry +Kamote#KamovegevupDBf#0.13#98#vegetables#TriCounty Produce +Kamote#KamovegeSQX7IA#0.14#703#vegetables#Off the Vine +Alogbati#AlogvegeB1WaJU#0.41#775#vegetables#The Pantry +Alogbati#AlogvegeVr5cPP#0.40#789#vegetables#TriCounty Produce +Alogbati#AlogvegeyTUQzy#0.40#416#vegetables#Off the Vine +Ampalaya#AmpavegemR9fSd#0.85#107#vegetables#The Pantry +Ampalaya#AmpavegeJDu9Im#0.90#676#vegetables#TriCounty Produce +Ampalaya#AmpavegepL8GH5#0.86#728#vegetables#Off the Vine +Dahon ng sili#Dahovege6X9grk#0.11#369#vegetables#The Pantry +Dahon ng sili#DahovegeiHZjQT#0.11#141#vegetables#TriCounty Produce +Dahon ng sili#DahovegeoCDAH8#0.12#517#vegetables#Off the Vine +Gabi#GabivegeVm4Xk3#0.44#396#vegetables#The Pantry +Gabi#Gabivegeu6woqK#0.42#722#vegetables#TriCounty Produce +Gabi#GabivegezcA7q1#0.42#394#vegetables#Off the Vine +Kabute#Kabuvege6Tqrif#0.16#123#vegetables#The Pantry +Kabute#KabuvegeA3uYdG#0.15#183#vegetables#TriCounty Produce +Kabute#KabuvegeXW6ZiI#0.16#624#vegetables#Off the Vine +Kamoteng Kahoy#KamovegeAdW37X#0.42#782#vegetables#The Pantry +Kamoteng Kahoy#KamovegetFlqpC#0.42#515#vegetables#TriCounty Produce +Kamoteng Kahoy#KamovegeMvxoLn#0.40#166#vegetables#Off the Vine +Kangkong#KangvegeSFTvEz#0.35#759#vegetables#The Pantry +Kangkong#KangvegeRLR6gL#0.34#695#vegetables#TriCounty Produce +Kangkong#Kangvege9BFo14#0.35#783#vegetables#Off the Vine +Labanos#Labavege3qrWJL#0.94#514#vegetables#The Pantry +Labanos#LabavegekgVWDH#0.89#210#vegetables#TriCounty Produce +Labanos#LabavegeiVPgMx#0.89#207#vegetables#Off the Vine +Labong#LabovegeX3O8yz#0.85#722#vegetables#The Pantry +Labong#LabovegeI1wSEs#0.87#472#vegetables#TriCounty Produce +Labong#LabovegeOPiQht#0.85#740#vegetables#Off the Vine +Malunggay#MaluvegeHkwAFm#0.30#252#vegetables#The Pantry +Malunggay#Maluvegez6TiSY#0.30#245#vegetables#TriCounty Produce +Malunggay#MaluvegewzY37D#0.31#405#vegetables#Off the Vine +Munggo#MungvegeqeuwGw#0.25#362#vegetables#The Pantry +Munggo#MungvegeNhqWvL#0.26#360#vegetables#TriCounty Produce +Munggo#MungvegeGxNxQC#0.25#555#vegetables#Off the Vine +Pechay#PechvegezDeHFZ#0.36#401#vegetables#The Pantry +Pechay#Pechvegehi4Fcx#0.35#723#vegetables#TriCounty Produce +Pechay#Pechvege8Pq8Eo#0.36#141#vegetables#Off the Vine +Sigarilyas#SigavegeMJrtlV#0.88#335#vegetables#The Pantry +Sigarilyas#SigavegeLhsoOB#0.87#768#vegetables#TriCounty Produce +Sigarilyas#SigavegeS6RJcA#0.93#356#vegetables#Off the Vine +Sitaw#Sitavege0hMi9z#0.65#153#vegetables#The Pantry +Sitaw#Sitavegeez1g6N#0.67#561#vegetables#TriCounty Produce +Sitaw#Sitavege0BCNeF#0.66#674#vegetables#Off the Vine +Talong#TalovegevZjVK6#0.10#530#vegetables#The Pantry +Talong#TalovegexX4MRw#0.09#305#vegetables#TriCounty Produce +Talong#TalovegeO3U2ze#0.10#126#vegetables#Off the Vine +Toge#TogevegeYelJUw#0.54#449#vegetables#The Pantry +Toge#Togevegeilr1xK#0.54#274#vegetables#TriCounty Produce +Toge#Togevegesvjnyn#0.51#316#vegetables#Off the Vine +Ube#UbevegeoPnxvb#0.56#397#vegetables#The Pantry +Ube#Ubevege2CNyve#0.55#450#vegetables#TriCounty Produce +Ube#UbevegeC43sVj#0.55#263#vegetables#Off the Vine +Upo#UpovegecOGRqC#0.22#404#vegetables#The Pantry +Upo#Upovegekjl2wl#0.22#541#vegetables#TriCounty Produce +Upo#UpovegemTTTwI#0.23#459#vegetables#Off the Vine +Edamame#EdamvegeVYtk8z#0.79#296#vegetables#The Pantry +Edamame#Edamvege608vXi#0.78#700#vegetables#TriCounty Produce +Edamame#Edamvege1jiqGY#0.75#115#vegetables#Off the Vine +Hairy melon#HairvegeFYFHIw#0.71#789#vegetables#The Pantry +Hairy melon#HairvegeS7AAqI#0.72#302#vegetables#TriCounty Produce +Hairy melon#HairvegeO6WJHL#0.72#444#vegetables#Off the Vine +Burdock#BurdvegeyLstLV#0.56#761#vegetables#The Pantry +Burdock#BurdvegeZsqAjT#0.56#582#vegetables#TriCounty Produce +Burdock#BurdvegeycF7mo#0.55#566#vegetables#Off the Vine +Snake gourd#SnakvegesfHGvt#0.92#626#vegetables#The Pantry +Snake gourd#SnakvegedlNiBk#0.92#669#vegetables#TriCounty Produce +Snake gourd#Snakvegec5n1UM#0.92#143#vegetables#Off the Vine +Wasabi#Wasavege5P5pZp#0.67#751#vegetables#The Pantry +Wasabi#Wasavege6EEE9r#0.68#559#vegetables#TriCounty Produce +Wasabi#Wasavege1ve7TY#0.65#61#vegetables#Off the Vine +Yam#YamvegeRN9ONH#0.57#438#vegetables#The Pantry +Yam#YamvegeWjdzeA#0.56#564#vegetables#TriCounty Produce +Yam#YamvegeI1AnyI#0.56#456#vegetables#Off the Vine +Apple Fritters#AppldessDj96hw#6.12#0#desserts#Mom's Kitchen +Apple Fritters#AppldessrN1kvM#6.06#0#desserts#The Baking Pan +Banana Split#Banadess7tpjkJ#10.86#0#desserts#Mom's Kitchen +Banana Split#Banadessfif758#11.07#0#desserts#The Baking Pan +Blueberry Boy Bait#BluedesseX2LVU#3.72#0#desserts#Mom's Kitchen +Blueberry Boy Bait#Bluedess9zLhaH#3.93#0#desserts#The Baking Pan +Candied Cranberries#CanddessjW92p3#1.77#0#desserts#Mom's Kitchen +Candied Cranberries#CanddesskhtVoQ#1.72#0#desserts#The Baking Pan +Daiquiri Souffle#DaiqdessebnYcy#9.54#0#desserts#Mom's Kitchen +Daiquiri Souffle#DaiqdessfM1DnX#9.72#0#desserts#The Baking Pan +Bananas Flambe#BanadesscczumD#6.94#0#desserts#Mom's Kitchen +Bananas Flambe#Banadess8qNfxd#7.07#0#desserts#The Baking Pan +Pie, Apple#Pie,desshcSHhT#7.88#0#desserts#Mom's Kitchen +Pie, Apple#Pie,dessTbiwDp#7.88#0#desserts#The Baking Pan +Pie, Pumpkin#Pie,desswhPBPB#6.00#0#desserts#Mom's Kitchen +Pie, Pumpkin#Pie,dessDg3NWl#6.24#0#desserts#The Baking Pan +Pie, Blueberry#Pie,dessw9VdgD#2.14#0#desserts#Mom's Kitchen +Pie, Blueberry#Pie,dessiSjZKD#2.12#0#desserts#The Baking Pan +Pie, Pecan#Pie,dess2NqhNR#12.70#0#desserts#Mom's Kitchen +Pie, Pecan#Pie,dessB1LfcE#12.33#0#desserts#The Baking Pan +Pie, Cranberry Apple#Pie,dess1mL7IS#10.16#0#desserts#Mom's Kitchen +Pie, Cranberry Apple#Pie,dessmDhkUA#10.16#0#desserts#The Baking Pan +Pie, Banana Cream#Pie,dessH80DuG#7.35#0#desserts#Mom's Kitchen +Pie, Banana Cream#Pie,dessf1YvFb#7.08#0#desserts#The Baking Pan +Pie, Key Lime#Pie,desshtli5N#4.85#0#desserts#Mom's Kitchen +Pie, Key Lime#Pie,dessMwQkKm#5.13#0#desserts#The Baking Pan +Pie, Lemon Meringue#Pie,dess9naVkX#3.74#0#desserts#Mom's Kitchen +Pie, Lemon Meringue#Pie,dessKYcNML#3.67#0#desserts#The Baking Pan +Pie, Caramel#Pie,dessSUuiIU#2.27#0#desserts#Mom's Kitchen +Pie, Caramel#Pie,dessvo8uHh#2.33#0#desserts#The Baking Pan +Pie, Raspberry#Pie,dessUHhMlS#2.36#0#desserts#Mom's Kitchen +Pie, Raspberry#Pie,dessJflbf5#2.36#0#desserts#The Baking Pan +Ice Cream, Chocolate#Ice desseXuyxx#1.44#0#desserts#Mom's Kitchen +Ice Cream, Chocolate#Ice dessASBohf#1.41#0#desserts#The Baking Pan +Ice Cream, Vanilla#Ice dessYnzbbt#11.92#0#desserts#Mom's Kitchen +Ice Cream, Vanilla#Ice dessUBBKp8#11.58#0#desserts#The Baking Pan +Ice Cream, Strawberry#Ice dessfTwKhD#1.90#0#desserts#Mom's Kitchen +Ice Cream, Strawberry#Ice dessaO9Fxf#1.99#0#desserts#The Baking Pan +Ice Cream, Rocky Road#Ice dessyIri3P#13.10#0#desserts#Mom's Kitchen +Ice Cream, Rocky Road#Ice dessZuLr8F#13.48#0#desserts#The Baking Pan +Ice Cream, Mint Chocolate Chip#Ice dessV1IGG7#5.75#0#desserts#Mom's Kitchen +Ice Cream, Mint Chocolate Chip#Ice dessX1gEQ4#5.64#0#desserts#The Baking Pan +Ice Cream Sundae#Ice dessbhlAXt#5.62#0#desserts#Mom's Kitchen +Ice Cream Sundae#Ice dessByapxl#5.72#0#desserts#The Baking Pan +Cobbler, Peach#CobbdessYUGeOB#10.14#0#desserts#Mom's Kitchen +Cobbler, Peach#CobbdessXfEtUK#10.43#0#desserts#The Baking Pan +Cobbler, Berry-Pecan#Cobbdessx3htak#5.36#0#desserts#Mom's Kitchen +Cobbler, Berry-Pecan#Cobbdesse4FUVI#5.41#0#desserts#The Baking Pan +Cobbler, Blueberry#CobbdessbiI0oF#3.78#0#desserts#Mom's Kitchen +Cobbler, Blueberry#CobbdessMXxbBN#3.57#0#desserts#The Baking Pan +Cobbler, Cherry#CobbdessNSa8QW#12.58#0#desserts#Mom's Kitchen +Cobbler, Cherry#CobbdessA1dADa#12.10#0#desserts#The Baking Pan +Cobbler, Huckleberry#Cobbdess3t6O8d#3.99#0#desserts#Mom's Kitchen +Cobbler, Huckleberry#CobbdessGI9euK#3.88#0#desserts#The Baking Pan +Cobbler, Rhubarb#Cobbdess22X40Z#9.54#0#desserts#Mom's Kitchen +Cobbler, Rhubarb#CobbdessPfnCT0#9.27#0#desserts#The Baking Pan +Cobbler, Strawberry#CobbdessI78188#12.43#0#desserts#Mom's Kitchen +Cobbler, Strawberry#CobbdessH3LdgQ#12.20#0#desserts#The Baking Pan +Cobbler, Zucchini#Cobbdess5rK4dP#11.24#0#desserts#Mom's Kitchen +Cobbler, Zucchini#Cobbdess4Ez8kS#10.51#0#desserts#The Baking Pan +Brownies#BrowdessmogdTl#7.62#0#desserts#Mom's Kitchen +Brownies#Browdess84Qc1z#7.55#0#desserts#The Baking Pan +Fudge Bar#Fudgdess8iXSyf#11.72#0#desserts#Mom's Kitchen +Fudge Bar#FudgdessakU1Id#12.29#0#desserts#The Baking Pan +Cookies, Oatmeal#Cookdessnq9Oya#2.84#0#desserts#Mom's Kitchen +Cookies, Oatmeal#CookdessBhgp7p#2.68#0#desserts#The Baking Pan +Cookies, Chocolate Chip#CookdessRVszsZ#12.73#0#desserts#Mom's Kitchen +Cookies, Chocolate Chip#CookdessSOoHmT#12.26#0#desserts#The Baking Pan +Cookies, Peanut Butter#Cookdess2UcMI2#7.82#0#desserts#Mom's Kitchen +Cookies, Peanut Butter#Cookdess1cILme#7.46#0#desserts#The Baking Pan +Mousse, Chocolate#MousdessDpN4sQ#6.25#0#desserts#Mom's Kitchen +Mousse, Chocolate#Mousdess8FyFT8#5.96#0#desserts#The Baking Pan +Mousse, Blueberry Maple#MousdessacwrkO#7.28#0#desserts#Mom's Kitchen +Mousse, Blueberry Maple#MousdessbiCMFg#7.21#0#desserts#The Baking Pan +Mousse, Chocolate Banana#MousdessIeW4qz#5.13#0#desserts#Mom's Kitchen +Mousse, Chocolate Banana#Mousdess1De9oL#5.08#0#desserts#The Baking Pan +Mousse, Cherry#Mousdesss1bF8H#13.05#0#desserts#Mom's Kitchen +Mousse, Cherry#Mousdess0ujevx#12.43#0#desserts#The Baking Pan +Mousse, Eggnog#MousdessZ38hXj#9.07#0#desserts#Mom's Kitchen +Mousse, Eggnog#Mousdesshs05ST#8.81#0#desserts#The Baking Pan +Mousse, Strawberry#MousdessHCDlBK#5.58#0#desserts#Mom's Kitchen +Mousse, Strawberry#MousdessSZ4PyW#5.36#0#desserts#The Baking Pan +Sherbet, Cantaloupe#Sherdess3DCxUg#3.11#0#desserts#Mom's Kitchen +Sherbet, Cantaloupe#Sherdesscp2VIz#2.99#0#desserts#The Baking Pan +Sherbet, Lemon Milk#Sherdess1JVFOS#7.57#0#desserts#Mom's Kitchen +Sherbet, Lemon Milk#SherdessC865vu#7.57#0#desserts#The Baking Pan +Sherbet, Orange Crush#Sherdess8W8Mb9#4.32#0#desserts#Mom's Kitchen +Sherbet, Orange Crush#SherdessxmVJBF#4.16#0#desserts#The Baking Pan +Sherbet, Blueberry#SherdessFAgxqp#3.46#0#desserts#Mom's Kitchen +Sherbet, Blueberry#SherdessMPL87u#3.60#0#desserts#The Baking Pan +Sherbet, Raspberry#Sherdesse86ugA#6.08#0#desserts#Mom's Kitchen +Sherbet, Raspberry#Sherdesslc1etR#5.85#0#desserts#The Baking Pan +Sherbet, Strawberry#SherdessFwv09m#4.63#0#desserts#Mom's Kitchen +Sherbet, Strawberry#SherdessKB0H7q#4.81#0#desserts#The Baking Pan +Tart, Apple#TartdessrsTyXA#3.35#0#desserts#Mom's Kitchen +Tart, Apple#Tartdessp7pyiy#3.13#0#desserts#The Baking Pan +Tart, Almond#TartdessC7FARL#6.62#0#desserts#Mom's Kitchen +Tart, Almond#Tartdess1V1A1c#6.68#0#desserts#The Baking Pan +Tart, Blueberry#TartdesssQZRXX#10.28#0#desserts#Mom's Kitchen +Tart, Blueberry#TartdessUSJSuc#10.28#0#desserts#The Baking Pan +Tart, Chocolate-Pear#Tartdess2pdOE4#5.67#0#desserts#Mom's Kitchen +Tart, Chocolate-Pear#TartdessL3aEDd#5.51#0#desserts#The Baking Pan +Tart, Lemon Fudge#Tartdess9DhZUT#3.88#0#desserts#Mom's Kitchen +Tart, Lemon Fudge#TartdesshzLOWt#3.96#0#desserts#The Baking Pan +Tart, Pecan#TartdessvSbXzd#11.80#0#desserts#Mom's Kitchen +Tart, Pecan#Tartdess6YXJec#11.04#0#desserts#The Baking Pan +Tart, Pineapple#TartdesseMfJFe#9.01#0#desserts#Mom's Kitchen +Tart, Pineapple#TartdessA2Wftr#8.44#0#desserts#The Baking Pan +Tart, Pear#Tartdess4a1BUc#10.09#0#desserts#Mom's Kitchen +Tart, Pear#TartdessNw8YPG#10.68#0#desserts#The Baking Pan +Tart, Raspberry#TartdessAVnpP6#6.18#0#desserts#Mom's Kitchen +Tart, Raspberry#TartdessfVxZFf#5.95#0#desserts#The Baking Pan +Tart, Strawberry#Tartdess4IUcZW#4.75#0#desserts#Mom's Kitchen +Tart, Strawberry#Tartdess2BeEDb#4.61#0#desserts#The Baking Pan +Tart, Raspberry#TartdesshyBd24#1.85#0#desserts#Mom's Kitchen +Tart, Raspberry#Tartdess5fqxgy#1.94#0#desserts#The Baking Pan +Trifle, Berry#TrifdessmEkbU2#12.48#0#desserts#Mom's Kitchen +Trifle, Berry#TrifdessAV9Ix8#12.60#0#desserts#The Baking Pan +Trifle, American#TrifdesscsdSCd#4.70#0#desserts#Mom's Kitchen +Trifle, American#TrifdessTArskm#4.35#0#desserts#The Baking Pan +Trifle, English#TrifdessX87q8T#8.20#0#desserts#Mom's Kitchen +Trifle, English#Trifdess52l955#8.12#0#desserts#The Baking Pan +Trifle, Orange#TrifdesslUwxwe#9.74#0#desserts#Mom's Kitchen +Trifle, Orange#TrifdessFrfCHP#10.22#0#desserts#The Baking Pan +Trifle, Pumpkin#TrifdessJKFN96#4.72#0#desserts#Mom's Kitchen +Trifle, Pumpkin#TrifdessMNw4EV#4.95#0#desserts#The Baking Pan +Trifle, Scottish#TrifdessFa0JdK#13.63#0#desserts#Mom's Kitchen +Trifle, Scottish#TrifdessAAUQCN#14.03#0#desserts#The Baking Pan +Trifle, Sherry#TrifdesscuttJg#4.42#0#desserts#Mom's Kitchen +Trifle, Sherry#TrifdesspRGpfP#4.21#0#desserts#The Baking Pan +Trifle, Strawberry#TrifdessAd5TpV#3.58#0#desserts#Mom's Kitchen +Trifle, Strawberry#Trifdess1rtW0A#3.58#0#desserts#The Baking Pan +Trifle, Scotch Whiskey#Trifdess2zJsGi#5.44#0#desserts#Mom's Kitchen +Trifle, Scotch Whiskey#TrifdessL8nuI6#5.18#0#desserts#The Baking Pan +Cheesecake, Amaretto#CheedessOJBqfD#11.89#0#desserts#Mom's Kitchen +Cheesecake, Amaretto#CheedessVnDf14#11.89#0#desserts#The Baking Pan +Cheesecake, Apple#Cheedessuks1YK#11.22#0#desserts#Mom's Kitchen +Cheesecake, Apple#CheedessMYKaKK#11.01#0#desserts#The Baking Pan +Cheesecake, Apricot#CheedessKUxTYY#12.34#0#desserts#Mom's Kitchen +Cheesecake, Apricot#CheedessMvB1pr#11.88#0#desserts#The Baking Pan +Cheesecake, Australian#CheedessQ9WAIn#2.70#0#desserts#Mom's Kitchen +Cheesecake, Australian#CheedessE6Jyjc#2.53#0#desserts#The Baking Pan +Cheesecake, Arkansas#CheedessTbqzmw#6.98#0#desserts#Mom's Kitchen +Cheesecake, Arkansas#CheedesstWJZfC#6.66#0#desserts#The Baking Pan +Cheesecake, Blueberry#Cheedessyo51KL#8.07#0#desserts#Mom's Kitchen +Cheesecake, Blueberry#Cheedess4Hz7P4#8.62#0#desserts#The Baking Pan +Cheesecake, Cherry#CheedessEahRkC#4.40#0#desserts#Mom's Kitchen +Cheesecake, Cherry#Cheedess3Nx4jZ#4.65#0#desserts#The Baking Pan +Cheesecake, Cran-Raspberry#CheedessrJsr9i#13.47#0#desserts#Mom's Kitchen +Cheesecake, Cran-Raspberry#CheedesshcuXCy#14.00#0#desserts#The Baking Pan +Cheesecake, German Chocolate#CheedesswayvJL#12.03#0#desserts#Mom's Kitchen +Cheesecake, German Chocolate#CheedessebTAeB#11.58#0#desserts#The Baking Pan +Cheesecake, Turtle#CheedessLqgeIA#12.19#0#desserts#Mom's Kitchen +Cheesecake, Turtle#CheedessvyNohA#12.07#0#desserts#The Baking Pan +Brownies, Apple#BrowdessIDW1Cc#5.44#0#desserts#Mom's Kitchen +Brownies, Apple#BrowdessyRMrAH#5.14#0#desserts#The Baking Pan +Brownies, Fudge#BrowdessmIHIFJ#5.19#0#desserts#Mom's Kitchen +Brownies, Fudge#BrowdessqewJ38#5.10#0#desserts#The Baking Pan +Brownies, Almond Macaroon#BrowdessniK7QI#10.57#0#desserts#Mom's Kitchen +Brownies, Almond Macaroon#BrowdessgkXURH#10.36#0#desserts#The Baking Pan +Brownies, Butterscotch#BrowdesslpA06E#7.16#0#desserts#Mom's Kitchen +Brownies, Butterscotch#BrowdessK5hofE#7.30#0#desserts#The Baking Pan +Brownies, Caramel#BrowdessVGfoA8#3.07#0#desserts#Mom's Kitchen +Brownies, Caramel#Browdess5jvVMM#3.13#0#desserts#The Baking Pan +Brownies, Cherry#Browdessyoa66A#3.39#0#desserts#Mom's Kitchen +Brownies, Cherry#BrowdessIg2JuF#3.39#0#desserts#The Baking Pan +Brownies, Chocolate Chip#Browdessb9dc59#6.18#0#desserts#Mom's Kitchen +Brownies, Chocolate Chip#BrowdessvW4nOx#6.43#0#desserts#The Baking Pan +Brownies, Coconut#BrowdessWPHrVR#3.06#0#desserts#Mom's Kitchen +Brownies, Coconut#BrowdessGVBlML#2.86#0#desserts#The Baking Pan +Brownies, Cream Cheese#Browdess1OyRay#12.74#0#desserts#Mom's Kitchen +Brownies, Cream Cheese#Browdess2fRsNv#12.61#0#desserts#The Baking Pan +Brownies, Fudge Mint#Browdessl7DP7k#11.45#0#desserts#Mom's Kitchen +Brownies, Fudge Mint#Browdessv70VKQ#11.34#0#desserts#The Baking Pan +Brownies, Mint Chip#BrowdessDDMvF7#1.81#0#desserts#Mom's Kitchen +Brownies, Mint Chip#Browdess0j9PBD#1.84#0#desserts#The Baking Pan +Cake, Angel Food#CakedessEaqGaE#11.18#0#desserts#Mom's Kitchen +Cake, Angel Food#CakedessJyAyFe#11.18#0#desserts#The Baking Pan +Cake, Chocolate#CakedessKLXFbn#10.11#0#desserts#Mom's Kitchen +Cake, Chocolate#CakedessfNP5Hg#9.91#0#desserts#The Baking Pan +Cake, Carrot#CakedessUTgMoV#4.20#0#desserts#Mom's Kitchen +Cake, Carrot#CakedessQdkaYg#4.00#0#desserts#The Baking Pan +Cake, Lemon Blueberry#CakedessepkeEW#11.73#0#desserts#Mom's Kitchen +Cake, Lemon Blueberry#CakedessHTKyQs#12.42#0#desserts#The Baking Pan +Cake Triple Fudge#CakedessiZ75lR#7.92#0#desserts#Mom's Kitchen +Cake Triple Fudge#CakedessWRrSXP#8.00#0#desserts#The Baking Pan +Cake, Walnut#CakedessveYVXZ#10.83#0#desserts#Mom's Kitchen +Cake, Walnut#Cakedesse22rT5#11.04#0#desserts#The Baking Pan +Cake, French Apple#CakedessjA2Kxv#1.95#0#desserts#Mom's Kitchen +Cake, French Apple#CakedessNBHCk0#1.86#0#desserts#The Baking Pan +Cake, Fig#CakedessOncX4y#6.82#0#desserts#Mom's Kitchen +Cake, Fig#CakedessTJtffn#7.08#0#desserts#The Baking Pan +Cake, Maple#CakedessnoGPRF#3.04#0#desserts#Mom's Kitchen +Cake, Maple#CakedessfVattM#3.22#0#desserts#The Baking Pan +Cake, Devil's Food#CakedessiXcDCt#4.73#0#desserts#Mom's Kitchen +Cake, Devil's Food#CakedessnBZk45#4.82#0#desserts#The Baking Pan +Cake, Double-Lemon#CakedesskeS0Vd#3.46#0#desserts#Mom's Kitchen +Cake, Double-Lemon#Cakedess50vx53#3.60#0#desserts#The Baking Pan +Sorbet, Blackberry#SorbdessQoa0CE#9.88#0#desserts#Mom's Kitchen +Sorbet, Blackberry#SorbdessqoOYzv#9.78#0#desserts#The Baking Pan diff --git a/examples/persist/gettingStarted/vendors.txt b/examples/persist/gettingStarted/vendors.txt new file mode 100644 index 0000000..528e1b1 --- /dev/null +++ b/examples/persist/gettingStarted/vendors.txt @@ -0,0 +1,6 @@ +TriCounty Produce#309 S. Main Street#Middle Town#MN#55432#763 555 5761#Mort Dufresne#763 555 5765 +Simply Fresh#15612 Bogart Lane#Harrigan#WI#53704#420 333 3912#Cheryl Swedberg#420 333 3952 +Off the Vine#133 American Ct.#Centennial#IA#52002#563 121 3800#Bob King#563 121 3800 x54 +The Pantry#1206 N. Creek Way#Middle Town#MN#55432#763 555 3391#Sully Beckstrom#763 555 3391 +Mom's Kitchen#53 Yerman Ct.#Middle Town#MN#55432#763 554 9200#Maggie Kultgen#763 554 9200 x12 +The Baking Pan#1415 53rd Ave.#Dutchin#MN#56304#320 442 2277#Mike Roan#320 442 6879 diff --git a/examples/persist/sqlapp/DataAccessor.java b/examples/persist/sqlapp/DataAccessor.java new file mode 100644 index 0000000..da35846 --- /dev/null +++ b/examples/persist/sqlapp/DataAccessor.java @@ -0,0 +1,228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.sqlapp; + +import java.util.Collection; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityIndex; +import com.sleepycat.persist.EntityJoin; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.ForwardCursor; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; + +/** + * The data accessor class for the entity model. + * + * @author chao + */ +class DataAccessor { + /* Employee Accessors */ + PrimaryIndex employeeById; + SecondaryIndex employeeByName; + SecondaryIndex employeeBySalary; + SecondaryIndex employeeByManagerId; + SecondaryIndex employeeByDepartmentId; + + /* Department Accessors */ + PrimaryIndex departmentById; + SecondaryIndex departmentByName; + + /** Opens all primary and secondary indices. */ + public DataAccessor(EntityStore store) + throws DatabaseException { + + /* Primary index of the Employee database. */ + employeeById = + store.getPrimaryIndex(Integer.class, Employee.class); + + /* Secondary index of the Employee database. */ + employeeByName = store.getSecondaryIndex(employeeById, + String.class, + "employeeName"); + employeeBySalary = store.getSecondaryIndex(employeeById, + Float.class, + "salary"); + employeeByManagerId = store.getSecondaryIndex(employeeById, + Integer.class, + "managerId"); + employeeByDepartmentId = store.getSecondaryIndex(employeeById, + Integer.class, + "departmentId"); + + /* Primary index of the Department database. */ + departmentById = + store.getPrimaryIndex(Integer.class, Department.class); + /* Secondary index of the Department database. */ + departmentByName = store.getSecondaryIndex(departmentById, + String.class, + "departmentName"); + } + + /** + * Do prefix query, similar to the SQL statement: + *
    +     * SELECT * FROM table WHERE col LIKE 'prefix%';
    +     * 
    + * + * @param index + * @param prefix + * @return + * @throws DatabaseException + */ + public EntityCursor doPrefixQuery(EntityIndex index, + String prefix) + throws DatabaseException { + + assert (index != null); + assert (prefix.length() > 0); + + /* Opens a cursor for traversing entities in a key range. */ + char[] ca = prefix.toCharArray(); + final int lastCharIndex = ca.length - 1; + ca[lastCharIndex]++; + return doRangeQuery(index, prefix, true, String.valueOf(ca), false); + } + + /** + * Do range query, similar to the SQL statement: + *
    +     * SELECT * FROM table WHERE col >= fromKey AND col <= toKey;
    +     * 
    + * + * @param index + * @param fromKey + * @param fromInclusive + * @param toKey + * @param toInclusive + * @return + * @throws DatabaseException + */ + public EntityCursor doRangeQuery(EntityIndex index, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) + throws DatabaseException { + + assert (index != null); + + /* Opens a cursor for traversing entities in a key range. */ + return index.entities(fromKey, + fromInclusive, + toKey, + toInclusive); + } + + /** + * Do a "AND" join on a single primary database, similar to the SQL: + *
    +     * SELECT * FROM table WHERE col1 = key1 AND col2 = key2;
    +     * 
    + * + * @param pk + * @param sk1 + * @param key1 + * @param sk2 + * @param key2 + * @return + * @throws DatabaseException + */ + public ForwardCursor + doTwoConditionsJoin(PrimaryIndex pk, + SecondaryIndex sk1, + SK1 key1, + SecondaryIndex sk2, + SK2 key2) + throws DatabaseException { + + assert (pk != null); + assert (sk1 != null); + assert (sk2 != null); + + EntityJoin join = new EntityJoin(pk); + join.addCondition(sk1, key1); + join.addCondition(sk2, key2); + + return join.entities(); + } + + /** + * Query the Employee database by Department's secondary-key: deptName. + *
    +     * SELECT * FROM employee e, department d
    +     * WHERE e.departmentId = d.departmentId
    +     * AND d.departmentName = 'deptName';
    +     * 
    + * + * @param deptName + * @throws DatabaseException + */ + public void getEmployeeByDeptName(String deptName) + throws DatabaseException { + + Department dept = departmentByName.get(deptName); + /* Do an inner join on Department and Employee. */ + EntityCursor empCursor = + employeeByDepartmentId. + subIndex(dept.getDepartmentId()).entities(); + try { + for (Employee emp : empCursor) { + System.out.println(emp); + } + System.out.println(); + } finally { + empCursor.close(); + } + } + + /** + * Query the Employee database by adding a filter on Department's + * non-secondary-key: deptLocation. + *
    +     * SELECT * FROM employee e, department d
    +     * WHERE e.departmentId = d.departmentId
    +     * AND d.location = 'deptLocation';
    +     * 
    + * + * @param deptLocation + * @throws DatabaseException + */ + public void getEmployeeByDeptLocation(String deptLocation) + throws DatabaseException { + + /* Iterate over Department database. */ + Collection departments = + departmentById.sortedMap().values(); + for (Department dept : departments) { + if (dept.getLocation().equals(deptLocation)) { + /* A nested loop to do an equi-join. */ + EntityCursor empCursor = + employeeByDepartmentId. + subIndex(dept.getDepartmentId()). + entities(); + try { + /* Iterate over all employees in dept. */ + for (Employee emp : empCursor) { + System.out.println(emp); + } + } finally { + empCursor.close(); + } + } + } + } +} diff --git a/examples/persist/sqlapp/Department.java b/examples/persist/sqlapp/Department.java new file mode 100644 index 0000000..ae4bcd4 --- /dev/null +++ b/examples/persist/sqlapp/Department.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.sqlapp; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; + +/** + * The Department entity class. + * + * @author chao + */ +@Entity +class Department { + + @PrimaryKey + int departmentId; + + @SecondaryKey(relate = ONE_TO_ONE) + String departmentName; + + String location; + + public Department(int departmentId, + String departmentName, + String location) { + + this.departmentId = departmentId; + this.departmentName = departmentName; + this.location = location; + } + + @SuppressWarnings("unused") + private Department() {} // Needed for deserialization. + + public int getDepartmentId() { + return departmentId; + } + + public String getDepartmentName() { + return departmentName; + } + + public String getLocation() { + return location; + } + + @Override + public String toString() { + return this.departmentId + ", " + + this.departmentName + ", " + + this.location; + } +} diff --git a/examples/persist/sqlapp/Employee.java b/examples/persist/sqlapp/Employee.java new file mode 100644 index 0000000..0e9129a --- /dev/null +++ b/examples/persist/sqlapp/Employee.java @@ -0,0 +1,102 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.sqlapp; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import static com.sleepycat.persist.model.DeleteAction.NULLIFY; +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +/** + * The Employee entity class. + * + * @author chao + */ +@Entity +class Employee { + + @PrimaryKey + int employeeId; + + /* Many Employees may have the same name. */ + @SecondaryKey(relate = MANY_TO_ONE) + String employeeName; + + /* Many Employees may have the same salary. */ + @SecondaryKey(relate = MANY_TO_ONE) + float salary; + + @SecondaryKey(relate = MANY_TO_ONE, relatedEntity=Employee.class, + onRelatedEntityDelete=NULLIFY) + Integer managerId; // Use "Integer" to allow null values. + + @SecondaryKey(relate = MANY_TO_ONE, relatedEntity=Department.class, + onRelatedEntityDelete=NULLIFY) + Integer departmentId; + + String address; + + public Employee(int employeeId, + String employeeName, + float salary, + Integer managerId, + int departmentId, + String address) { + + this.employeeId = employeeId; + this.employeeName = employeeName; + this.salary = salary; + this.managerId = managerId; + this.departmentId = departmentId; + this.address = address; + } + + @SuppressWarnings("unused") + private Employee() {} // Needed for deserialization. + + public String getAddress() { + return address; + } + + public int getDepartmentId() { + return departmentId; + } + + public int getEmployeeId() { + return employeeId; + } + + public String getEmployeeName() { + return employeeName; + } + + public Integer getManagerId() { + return managerId; + } + + public float getSalary() { + return salary; + } + + @Override + public String toString() { + return this.employeeId + ", " + + this.employeeName + ", " + + this.salary + ", " + + this.managerId + ", " + + this.departmentId + ", " + + this.address; + } +} diff --git a/examples/persist/sqlapp/SQLApp.java b/examples/persist/sqlapp/SQLApp.java new file mode 100644 index 0000000..02b5c78 --- /dev/null +++ b/examples/persist/sqlapp/SQLApp.java @@ -0,0 +1,329 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.sqlapp; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.ForwardCursor; +import com.sleepycat.persist.StoreConfig; + +/** + * An example shows how some common SQL queries are implemented using DPL. + * + * @see #usage + * + * @author chao + */ +public class SQLApp { + + private static String envDir = "./tmp"; + private static boolean cleanEnvOnExit = false; + private static Environment env = null; + private static EntityStore store = null; + private static DataAccessor dao = null; + + /** + * Setup a Berkeley DB engine environment, and preload some example records. + * + * @throws com.sleepycat.je.DatabaseException + */ + public void setup() + throws DatabaseException { + + /* Open a transactional Berkeley DB engine environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(new File(envDir), envConfig); + + /* Open a transactional entity store. */ + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, "SQLAppStore", storeConfig); + + /* Initialize the data access object. */ + dao = new DataAccessor(store); + + /* Preload some example records. */ + loadDepartmentDb(); + loadEmployeeDb(); + } + + /* Load the department database. */ + private void loadDepartmentDb() + throws DatabaseException { + + dao.departmentById.put + (new Department(1, "CEO Office", "North America")); + dao.departmentById.put + (new Department(2, "Sales", "EURO")); + dao.departmentById.put + (new Department(3, "HR", "MEA")); + dao.departmentById.put + (new Department(4, "Engineering", "APAC")); + dao.departmentById.put + (new Department(5, "Support", "LATAM")); + } + + /* Load the employee database. */ + private void loadEmployeeDb() + throws DatabaseException { + + /* Add the corporate's CEO using the Employee primary index. */ + dao.employeeById.put(new Employee(1, // employeeId + "Abraham Lincoln", //employeeName + 10000.0f, //salary + null, //managerId + 1, //departmentId + "Washington D.C., USA")); //address + + /* Add 4 managers responsible for 4 departments. */ + dao.employeeById.put(new Employee(2, + "Augustus", + 9000.0f, + 1, + 2, + "Rome, Italy")); + dao.employeeById.put(new Employee(3, + "Cleopatra", + 7000.0f, + 1, + 3, + "Cairo, Egypt")); + dao.employeeById.put(new Employee(4, + "Confucius", + 7500.0f, + 1, + 4, + "Beijing, China")); + dao.employeeById.put(new Employee(5, + "Toussaint Louverture", + 6800.0f, + 1, + 5, + "Port-au-Prince, Haiti")); + + /* Add 2 employees per department. */ + dao.employeeById.put(new Employee(6, + "William Shakespeare", + 7300.0f, + 2, + 2, + "London, England")); + dao.employeeById.put(new Employee(7, + "Victor Hugo", + 7000.0f, + 2, + 2, + "Paris, France")); + dao.employeeById.put(new Employee(8, + "Yitzhak Rabin", + 6500.0f, + 3, + 3, + "Jerusalem, Israel")); + dao.employeeById.put(new Employee(9, + "Nelson Rolihlahla Mandela", + 6400.0f, + 3, + 3, + "Cape Town, South Africa")); + dao.employeeById.put(new Employee(10, + "Meiji Emperor", + 6600.0f, + 4, + 4, + "Tokyo, Japan")); + dao.employeeById.put(new Employee(11, + "Mohandas Karamchand Gandhi", + 7600.0f, + 4, + 4, + "New Delhi, India")); + dao.employeeById.put(new Employee(12, + "Ayrton Senna da Silva", + 5600.0f, + 5, + 5, + "Brasilia, Brasil")); + dao.employeeById.put(new Employee(13, + "Ronahlinho De Assis Moreira", + 6100.0f, + 5, + 5, + "Brasilia, Brasil")); + } + + /** Run the SQL examples. */ + public void runApp() + throws DatabaseException { + + /* Print departmemt database contents order by departmentId. */ + System.out.println("SELECT * FROM department ORDER BY departmentId;"); + printQueryResults(dao.departmentById.entities()); + + /* Print departmemt database contents order by departmentName. */ + System.out.println("SELECT * FROM department " + + "ORDER BY departmentName;"); + printQueryResults(dao.departmentByName.entities()); + + /* Print employee database contents order by employeeId. */ + System.out.println("SELECT * FROM employee ORDER BY employeeId;"); + printQueryResults(dao.employeeById.entities()); + + /* Do a prefix query. */ + System.out.println("SELECT * FROM employee WHERE employeeName " + + "LIKE 'M%';"); + printQueryResults(dao.doPrefixQuery(dao.employeeByName, "M")); + + /* Do a range query. */ + System.out.println("SELECT * FROM employee WHERE salary >= 6000 AND " + + "salary <= 8000;"); + printQueryResults(dao.doRangeQuery(dao.employeeBySalary, + new Float(6000), //fromKey + true, //fromInclusive + new Float(8000), //toKey + true)); //toInclusive + + /* Two conditions join on a single primary database. */ + System.out.println("SELECT * FROM employee " + + "WHERE employeeName = 'Victor Hugo' " + + "AND departmentId = 2;"); + printQueryResults(dao.doTwoConditionsJoin(dao.employeeById, + dao.employeeByName, + new String("Victor Hugo"), + dao.employeeByDepartmentId, + new Integer(2))); + + /* + * Two conditions join on two primary databases combined with a + * secondary key search. + */ + System.out.println("SELECT * FROM employee e, department d " + + "WHERE e.departmentId = d.departmentId " + + "AND d.departmentName = 'Engineering';"); + dao.getEmployeeByDeptName("Engineering"); + + /* + * Two conditions join on two primary databases combined with a + * filtering on the non-secondary-key. + */ + System.out.println("SELECT * FROM employee e, department d " + + "WHERE e.departmentId = d.departmentId " + + "AND d.location = 'North America';"); + dao.getEmployeeByDeptLocation("North America"); + } + + /** Print query results. */ + public void printQueryResults(ForwardCursor cursor) + throws DatabaseException { + + try { + for (Object entity : cursor) { + System.out.println(entity); + } + System.out.println(); + } finally { + cursor.close(); + } + } + + /** + * Close the store and environment. + */ + public void close() { + + if (store != null) { + try { + store.close(); + } catch (DatabaseException dbe) { + System.err.println("Error closing store: " + + dbe.toString()); + System.exit(-1); + } + } + + if (env != null) { + try { + // Finally, close environment. + env.close(); + } catch (DatabaseException dbe) { + System.err.println("Error closing env: " + + dbe.toString()); + System.exit(-1); + } + } + + if (cleanEnvOnExit) { + removeDbFiles(); + } + } + + private void removeDbFiles() { + File file = new File(envDir); + for (File f : file.listFiles()) { + f.delete(); + } + } + + /** + * @param args the command line arguments + */ + public static void main(String[] args) { + + /* Parse the arguments list. */ + parseArgs(args); + + try { + SQLApp s = new SQLApp(); + s.setup(); + s.runApp(); + s.close(); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(-1); + } + } + + /* Parse input arguments. */ + private static void parseArgs(String args[]) { + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + envDir = args[++i]; + break; + case 'd': + cleanEnvOnExit = true; + break; + default: + usage(); + } + } + } + } + + private static void usage() { + System.out.println("Usage: java SQLApp" + + "\n [-h ] " + + "# environment home directory" + + "\n [-d] # clean environment after program exits"); + System.exit(-1); + } +} diff --git a/examples/persist/txn/PayloadDataEntity.java b/examples/persist/txn/PayloadDataEntity.java new file mode 100644 index 0000000..9d69d19 --- /dev/null +++ b/examples/persist/txn/PayloadDataEntity.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.txn; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import static com.sleepycat.persist.model.Relationship.*; + +@Entity +public class PayloadDataEntity { + @PrimaryKey + private int oID; + + @SecondaryKey(relate=MANY_TO_ONE) + private String threadName; + + private double doubleData; + + PayloadDataEntity() {} + + public double getDoubleData() { return doubleData; } + public int getID() { return oID; } + public String getThreadName() { return threadName; } + + public void setDoubleData(double dd) { doubleData = dd; } + public void setID(int id) { oID = id; } + public void setThreadName(String tn) { threadName = tn; } +} diff --git a/examples/persist/txn/StoreWriter.java b/examples/persist/txn/StoreWriter.java new file mode 100644 index 0000000..bf5a24a --- /dev/null +++ b/examples/persist/txn/StoreWriter.java @@ -0,0 +1,177 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package persist.txn; + +import java.util.Random; + +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; + +public class StoreWriter extends Thread +{ + private EntityStore myStore = null; + private Environment myEnv = null; + private PrimaryIndex pdIndex; + private Random generator = new Random(); + private boolean passTxn = false; + + private static final int MAX_RETRY = 20; + + // Constructor. Get our handles from here + StoreWriter(Environment env, EntityStore store) + + throws DatabaseException { + myStore = store; + myEnv = env; + + // Open the data accessor. This is used to store persistent + // objects. + pdIndex = myStore.getPrimaryIndex(Integer.class, + PayloadDataEntity.class); + } + + // Thread method that writes a series of objects + // to the store using transaction protection. + // Deadlock handling is demonstrated here. + public void run () { + Transaction txn = null; + + // Perform 50 transactions + for (int i=0; i<50; i++) { + + boolean retry = true; + int retry_count = 0; + // while loop is used for deadlock retries + while (retry) { + // try block used for deadlock detection and + // general exception handling + try { + + // Get a transaction + txn = myEnv.beginTransaction(null, null); + + // Write 10 PayloadDataEntity objects to the + // store for each transaction + for (int j = 0; j < 10; j++) { + // Instantiate an object + PayloadDataEntity pd = new PayloadDataEntity(); + + // Set the Object ID. This is used as the primary key. + pd.setID(i + j); + + // The thread name is used as a secondary key, and + // it is retrieved by this class's getName() method. + pd.setThreadName(getName()); + + // The last bit of data that we use is a double + // that we generate randomly. This data is not + // indexed. + pd.setDoubleData(generator.nextDouble()); + + // Do the put + pdIndex.put(txn, pd); + } + + // commit + System.out.println(getName() + " : committing txn : " + i); + System.out.println(getName() + " : Found " + + countObjects(null) + " objects in the store."); + try { + txn.commit(); + txn = null; + } catch (DatabaseException e) { + System.err.println("Error on txn commit: " + + e.toString()); + } + retry = false; + + } catch (LockConflictException de) { + System.out.println("################# " + getName() + + " : caught deadlock"); + // retry if necessary + if (retry_count < MAX_RETRY) { + System.err.println(getName() + + " : Retrying operation."); + retry = true; + retry_count++; + } else { + System.err.println(getName() + + " : out of retries. Giving up."); + retry = false; + } + } catch (DatabaseException e) { + // abort and don't retry + retry = false; + System.err.println(getName() + + " : caught exception: " + e.toString()); + System.err.println(getName() + + " : errno: " + e.toString()); + e.printStackTrace(); + } finally { + if (txn != null) { + try { + txn.abort(); + } catch (Exception e) { + System.err.println("Error aborting transaction: " + + e.toString()); + e.printStackTrace(); + } + } + } + } + } + } + + // This simply counts the number of objects contained in the + // store and returns the result. You can use this method + // in three ways: + // + // First call it with an active txn handle. + // + // Secondly, configure the cursor for dirty reads + // + // Third, call countObjects AFTER the writer has committed + // its transaction. + // + // If you do none of these things, the writer thread will + // self-deadlock. + private int countObjects(Transaction txn) throws DatabaseException { + int count = 0; + + CursorConfig cc = new CursorConfig(); + // This is ignored if the store is not opened with uncommitted read + // support. + cc.setReadUncommitted(true); + EntityCursor cursor = pdIndex.entities(txn, cc); + + try { + for (PayloadDataEntity pdi : cursor) { + count++; + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + return count; + + } +} diff --git a/examples/persist/txn/TxnGuideDPL.java b/examples/persist/txn/TxnGuideDPL.java new file mode 100644 index 0000000..6954d9b --- /dev/null +++ b/examples/persist/txn/TxnGuideDPL.java @@ -0,0 +1,134 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +// File TxnGuideDPL.java + +package persist.txn; + +import java.io.File; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; + +public class TxnGuideDPL { + + private static String myEnvPath = "./"; + private static String storeName = "exampleStore"; + + // Handles + private static EntityStore myStore = null; + private static Environment myEnv = null; + + private static final int NUMTHREADS = 5; + + private static void usage() { + System.out.println("TxnGuideDPL [-h ]"); + System.exit(-1); + } + + public static void main(String args[]) { + try { + // Parse the arguments list + parseArgs(args); + // Open the environment and store + openEnv(); + + // Start the threads + StoreWriter[] threadArray; + threadArray = new StoreWriter[NUMTHREADS]; + for (int i = 0; i < NUMTHREADS; i++) { + threadArray[i] = new StoreWriter(myEnv, myStore); + threadArray[i].start(); + } + + for (int i = 0; i < NUMTHREADS; i++) { + threadArray[i].join(); + } + } catch (Exception e) { + System.err.println("TxnGuideDPL: " + e.toString()); + e.printStackTrace(); + } finally { + closeEnv(); + } + System.out.println("All done."); + } + + private static void openEnv() throws DatabaseException { + System.out.println("opening env and store"); + + // Set up the environment. + EnvironmentConfig myEnvConfig = new EnvironmentConfig(); + myEnvConfig.setAllowCreate(true); + myEnvConfig.setTransactional(true); + // Environment handles are free-threaded by default in JE, + // so we do not have to do anything to cause the + // environment handle to be free-threaded. + + // Set up the entity store + StoreConfig myStoreConfig = new StoreConfig(); + myStoreConfig.setAllowCreate(true); + myStoreConfig.setTransactional(true); + + // Open the environment + myEnv = new Environment(new File(myEnvPath), // Env home + myEnvConfig); + + // Open the store + myStore = new EntityStore(myEnv, storeName, myStoreConfig); + + } + + private static void closeEnv() { + System.out.println("Closing env and store"); + if (myStore != null ) { + try { + myStore.close(); + } catch (DatabaseException e) { + System.err.println("closeEnv: myStore: " + + e.toString()); + e.printStackTrace(); + } + } + + if (myEnv != null ) { + try { + myEnv.close(); + } catch (DatabaseException e) { + System.err.println("closeEnv: " + e.toString()); + e.printStackTrace(); + } + } + } + + private TxnGuideDPL() {} + + private static void parseArgs(String args[]) { + int nArgs = args.length; + for(int i = 0; i < args.length; ++i) { + if (args[i].startsWith("-")) { + switch(args[i].charAt(1)) { + case 'h': + if (i < nArgs - 1) { + myEnvPath = new String(args[++i]); + } + break; + default: + usage(); + } + } + } + } +} diff --git a/examples/resources/jboss/jndi.properties b/examples/resources/jboss/jndi.properties new file mode 100644 index 0000000..cbbd871 --- /dev/null +++ b/examples/resources/jboss/jndi.properties @@ -0,0 +1,4 @@ +### JBossNS properties +java.naming.factory.initial=org.jnp.interfaces.NamingContextFactory +java.naming.provider.url=jnp://localhost:1099 +java.naming.factory.url.pkgs=org.jboss.naming:org.jnp.interfaces diff --git a/ivy.xml b/ivy.xml new file mode 100644 index 0000000..15eb6c0 --- /dev/null +++ b/ivy.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + diff --git a/src/com/sleepycat/asm/AnnotationVisitor.java b/src/com/sleepycat/asm/AnnotationVisitor.java new file mode 100644 index 0000000..165f31a --- /dev/null +++ b/src/com/sleepycat/asm/AnnotationVisitor.java @@ -0,0 +1,169 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A visitor to visit a Java annotation. The methods of this class must be + * called in the following order: ( visit | visitEnum | + * visitAnnotation | visitArray )* visitEnd. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +public abstract class AnnotationVisitor { + + /** + * The ASM API version implemented by this visitor. The value of this field + * must be one of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + protected final int api; + + /** + * The annotation visitor to which this visitor must delegate method calls. + * May be null. + */ + protected AnnotationVisitor av; + + /** + * Constructs a new {@link AnnotationVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + public AnnotationVisitor(final int api) { + this(api, null); + } + + /** + * Constructs a new {@link AnnotationVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + * @param av + * the annotation visitor to which this visitor must delegate + * method calls. May be null. + */ + public AnnotationVisitor(final int api, final AnnotationVisitor av) { + if (api != Opcodes.ASM4 && api != Opcodes.ASM5) { + throw new IllegalArgumentException(); + } + this.api = api; + this.av = av; + } + + /** + * Visits a primitive value of the annotation. + * + * @param name + * the value name. + * @param value + * the actual value, whose type must be {@link Byte}, + * {@link Boolean}, {@link Character}, {@link Short}, + * {@link Integer} , {@link Long}, {@link Float}, {@link Double}, + * {@link String} or {@link Type} or OBJECT or ARRAY sort. This + * value can also be an array of byte, boolean, short, char, int, + * long, float or double values (this is equivalent to using + * {@link #visitArray visitArray} and visiting each array element + * in turn, but is more convenient). + */ + public void visit(String name, Object value) { + if (av != null) { + av.visit(name, value); + } + } + + /** + * Visits an enumeration value of the annotation. + * + * @param name + * the value name. + * @param desc + * the class descriptor of the enumeration class. + * @param value + * the actual enumeration value. + */ + public void visitEnum(String name, String desc, String value) { + if (av != null) { + av.visitEnum(name, desc, value); + } + } + + /** + * Visits a nested annotation value of the annotation. + * + * @param name + * the value name. + * @param desc + * the class descriptor of the nested annotation class. + * @return a visitor to visit the actual nested annotation value, or + * null if this visitor is not interested in visiting this + * nested annotation. The nested annotation value must be fully + * visited before calling other methods on this annotation + * visitor. + */ + public AnnotationVisitor visitAnnotation(String name, String desc) { + if (av != null) { + return av.visitAnnotation(name, desc); + } + return null; + } + + /** + * Visits an array value of the annotation. Note that arrays of primitive + * types (such as byte, boolean, short, char, int, long, float or double) + * can be passed as value to {@link #visit visit}. This is what + * {@link ClassReader} does. + * + * @param name + * the value name. + * @return a visitor to visit the actual array value elements, or + * null if this visitor is not interested in visiting these + * values. The 'name' parameters passed to the methods of this + * visitor are ignored. All the array values must be visited + * before calling other methods on this annotation visitor. + */ + public AnnotationVisitor visitArray(String name) { + if (av != null) { + return av.visitArray(name); + } + return null; + } + + /** + * Visits the end of the annotation. + */ + public void visitEnd() { + if (av != null) { + av.visitEnd(); + } + } +} diff --git a/src/com/sleepycat/asm/AnnotationWriter.java b/src/com/sleepycat/asm/AnnotationWriter.java new file mode 100644 index 0000000..6dd4009 --- /dev/null +++ b/src/com/sleepycat/asm/AnnotationWriter.java @@ -0,0 +1,371 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * An {@link AnnotationVisitor} that generates annotations in bytecode form. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +final class AnnotationWriter extends AnnotationVisitor { + + /** + * The class writer to which this annotation must be added. + */ + private final ClassWriter cw; + + /** + * The number of values in this annotation. + */ + private int size; + + /** + * true if values are named, false otherwise. Annotation + * writers used for annotation default and annotation arrays use unnamed + * values. + */ + private final boolean named; + + /** + * The annotation values in bytecode form. This byte vector only contains + * the values themselves, i.e. the number of values must be stored as a + * unsigned short just before these bytes. + */ + private final ByteVector bv; + + /** + * The byte vector to be used to store the number of values of this + * annotation. See {@link #bv}. + */ + private final ByteVector parent; + + /** + * Where the number of values of this annotation must be stored in + * {@link #parent}. + */ + private final int offset; + + /** + * Next annotation writer. This field is used to store annotation lists. + */ + AnnotationWriter next; + + /** + * Previous annotation writer. This field is used to store annotation lists. + */ + AnnotationWriter prev; + + // ------------------------------------------------------------------------ + // Constructor + // ------------------------------------------------------------------------ + + /** + * Constructs a new {@link AnnotationWriter}. + * + * @param cw + * the class writer to which this annotation must be added. + * @param named + * true if values are named, false otherwise. + * @param bv + * where the annotation values must be stored. + * @param parent + * where the number of annotation values must be stored. + * @param offset + * where in parent the number of annotation values must + * be stored. + */ + AnnotationWriter(final ClassWriter cw, final boolean named, + final ByteVector bv, final ByteVector parent, final int offset) { + super(Opcodes.ASM5); + this.cw = cw; + this.named = named; + this.bv = bv; + this.parent = parent; + this.offset = offset; + } + + // ------------------------------------------------------------------------ + // Implementation of the AnnotationVisitor abstract class + // ------------------------------------------------------------------------ + + @Override + public void visit(final String name, final Object value) { + ++size; + if (named) { + bv.putShort(cw.newUTF8(name)); + } + if (value instanceof String) { + bv.put12('s', cw.newUTF8((String) value)); + } else if (value instanceof Byte) { + bv.put12('B', cw.newInteger(((Byte) value).byteValue()).index); + } else if (value instanceof Boolean) { + int v = ((Boolean) value).booleanValue() ? 1 : 0; + bv.put12('Z', cw.newInteger(v).index); + } else if (value instanceof Character) { + bv.put12('C', cw.newInteger(((Character) value).charValue()).index); + } else if (value instanceof Short) { + bv.put12('S', cw.newInteger(((Short) value).shortValue()).index); + } else if (value instanceof Type) { + bv.put12('c', cw.newUTF8(((Type) value).getDescriptor())); + } else if (value instanceof byte[]) { + byte[] v = (byte[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('B', cw.newInteger(v[i]).index); + } + } else if (value instanceof boolean[]) { + boolean[] v = (boolean[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('Z', cw.newInteger(v[i] ? 1 : 0).index); + } + } else if (value instanceof short[]) { + short[] v = (short[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('S', cw.newInteger(v[i]).index); + } + } else if (value instanceof char[]) { + char[] v = (char[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('C', cw.newInteger(v[i]).index); + } + } else if (value instanceof int[]) { + int[] v = (int[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('I', cw.newInteger(v[i]).index); + } + } else if (value instanceof long[]) { + long[] v = (long[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('J', cw.newLong(v[i]).index); + } + } else if (value instanceof float[]) { + float[] v = (float[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('F', cw.newFloat(v[i]).index); + } + } else if (value instanceof double[]) { + double[] v = (double[]) value; + bv.put12('[', v.length); + for (int i = 0; i < v.length; i++) { + bv.put12('D', cw.newDouble(v[i]).index); + } + } else { + Item i = cw.newConstItem(value); + bv.put12(".s.IFJDCS".charAt(i.type), i.index); + } + } + + @Override + public void visitEnum(final String name, final String desc, + final String value) { + ++size; + if (named) { + bv.putShort(cw.newUTF8(name)); + } + bv.put12('e', cw.newUTF8(desc)).putShort(cw.newUTF8(value)); + } + + @Override + public AnnotationVisitor visitAnnotation(final String name, + final String desc) { + ++size; + if (named) { + bv.putShort(cw.newUTF8(name)); + } + // write tag and type, and reserve space for values count + bv.put12('@', cw.newUTF8(desc)).putShort(0); + return new AnnotationWriter(cw, true, bv, bv, bv.length - 2); + } + + @Override + public AnnotationVisitor visitArray(final String name) { + ++size; + if (named) { + bv.putShort(cw.newUTF8(name)); + } + // write tag, and reserve space for array size + bv.put12('[', 0); + return new AnnotationWriter(cw, false, bv, bv, bv.length - 2); + } + + @Override + public void visitEnd() { + if (parent != null) { + byte[] data = parent.data; + data[offset] = (byte) (size >>> 8); + data[offset + 1] = (byte) size; + } + } + + // ------------------------------------------------------------------------ + // Utility methods + // ------------------------------------------------------------------------ + + /** + * Returns the size of this annotation writer list. + * + * @return the size of this annotation writer list. + */ + int getSize() { + int size = 0; + AnnotationWriter aw = this; + while (aw != null) { + size += aw.bv.length; + aw = aw.next; + } + return size; + } + + /** + * Puts the annotations of this annotation writer list into the given byte + * vector. + * + * @param out + * where the annotations must be put. + */ + void put(final ByteVector out) { + int n = 0; + int size = 2; + AnnotationWriter aw = this; + AnnotationWriter last = null; + while (aw != null) { + ++n; + size += aw.bv.length; + aw.visitEnd(); // in case user forgot to call visitEnd + aw.prev = last; + last = aw; + aw = aw.next; + } + out.putInt(size); + out.putShort(n); + aw = last; + while (aw != null) { + out.putByteArray(aw.bv.data, 0, aw.bv.length); + aw = aw.prev; + } + } + + /** + * Puts the given annotation lists into the given byte vector. + * + * @param panns + * an array of annotation writer lists. + * @param off + * index of the first annotation to be written. + * @param out + * where the annotations must be put. + */ + static void put(final AnnotationWriter[] panns, final int off, + final ByteVector out) { + int size = 1 + 2 * (panns.length - off); + for (int i = off; i < panns.length; ++i) { + size += panns[i] == null ? 0 : panns[i].getSize(); + } + out.putInt(size).putByte(panns.length - off); + for (int i = off; i < panns.length; ++i) { + AnnotationWriter aw = panns[i]; + AnnotationWriter last = null; + int n = 0; + while (aw != null) { + ++n; + aw.visitEnd(); // in case user forgot to call visitEnd + aw.prev = last; + last = aw; + aw = aw.next; + } + out.putShort(n); + aw = last; + while (aw != null) { + out.putByteArray(aw.bv.data, 0, aw.bv.length); + aw = aw.prev; + } + } + } + + /** + * Puts the given type reference and type path into the given bytevector. + * LOCAL_VARIABLE and RESOURCE_VARIABLE target types are not supported. + * + * @param typeRef + * a reference to the annotated type. See {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param out + * where the type reference and type path must be put. + */ + static void putTarget(int typeRef, TypePath typePath, ByteVector out) { + switch (typeRef >>> 24) { + case 0x00: // CLASS_TYPE_PARAMETER + case 0x01: // METHOD_TYPE_PARAMETER + case 0x16: // METHOD_FORMAL_PARAMETER + out.putShort(typeRef >>> 16); + break; + case 0x13: // FIELD + case 0x14: // METHOD_RETURN + case 0x15: // METHOD_RECEIVER + out.putByte(typeRef >>> 24); + break; + case 0x47: // CAST + case 0x48: // CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT + case 0x49: // METHOD_INVOCATION_TYPE_ARGUMENT + case 0x4A: // CONSTRUCTOR_REFERENCE_TYPE_ARGUMENT + case 0x4B: // METHOD_REFERENCE_TYPE_ARGUMENT + out.putInt(typeRef); + break; + // case 0x10: // CLASS_EXTENDS + // case 0x11: // CLASS_TYPE_PARAMETER_BOUND + // case 0x12: // METHOD_TYPE_PARAMETER_BOUND + // case 0x17: // THROWS + // case 0x42: // EXCEPTION_PARAMETER + // case 0x43: // INSTANCEOF + // case 0x44: // NEW + // case 0x45: // CONSTRUCTOR_REFERENCE + // case 0x46: // METHOD_REFERENCE + default: + out.put12(typeRef >>> 24, (typeRef & 0xFFFF00) >> 8); + break; + } + if (typePath == null) { + out.putByte(0); + } else { + int length = typePath.b[typePath.offset] * 2 + 1; + out.putByteArray(typePath.b, typePath.offset, length); + } + } +} diff --git a/src/com/sleepycat/asm/Attribute.java b/src/com/sleepycat/asm/Attribute.java new file mode 100644 index 0000000..4d380a3 --- /dev/null +++ b/src/com/sleepycat/asm/Attribute.java @@ -0,0 +1,255 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A non standard class, field, method or code attribute. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +public class Attribute { + + /** + * The type of this attribute. + */ + public final String type; + + /** + * The raw value of this attribute, used only for unknown attributes. + */ + byte[] value; + + /** + * The next attribute in this attribute list. May be null. + */ + Attribute next; + + /** + * Constructs a new empty attribute. + * + * @param type + * the type of the attribute. + */ + protected Attribute(final String type) { + this.type = type; + } + + /** + * Returns true if this type of attribute is unknown. The default + * implementation of this method always returns true. + * + * @return true if this type of attribute is unknown. + */ + public boolean isUnknown() { + return true; + } + + /** + * Returns true if this type of attribute is a code attribute. + * + * @return true if this type of attribute is a code attribute. + */ + public boolean isCodeAttribute() { + return false; + } + + /** + * Returns the labels corresponding to this attribute. + * + * @return the labels corresponding to this attribute, or null if + * this attribute is not a code attribute that contains labels. + */ + protected Label[] getLabels() { + return null; + } + + /** + * Reads a {@link #type type} attribute. This method must return a + * new {@link Attribute} object, of type {@link #type type}, + * corresponding to the len bytes starting at the given offset, in + * the given class reader. + * + * @param cr + * the class that contains the attribute to be read. + * @param off + * index of the first byte of the attribute's content in + * {@link ClassReader#b cr.b}. The 6 attribute header bytes, + * containing the type and the length of the attribute, are not + * taken into account here. + * @param len + * the length of the attribute's content. + * @param buf + * buffer to be used to call {@link ClassReader#readUTF8 + * readUTF8}, {@link ClassReader#readClass(int,char[]) readClass} + * or {@link ClassReader#readConst readConst}. + * @param codeOff + * index of the first byte of code's attribute content in + * {@link ClassReader#b cr.b}, or -1 if the attribute to be read + * is not a code attribute. The 6 attribute header bytes, + * containing the type and the length of the attribute, are not + * taken into account here. + * @param labels + * the labels of the method's code, or null if the + * attribute to be read is not a code attribute. + * @return a new {@link Attribute} object corresponding to the given + * bytes. + */ + protected Attribute read(final ClassReader cr, final int off, + final int len, final char[] buf, final int codeOff, + final Label[] labels) { + Attribute attr = new Attribute(type); + attr.value = new byte[len]; + System.arraycopy(cr.b, off, attr.value, 0, len); + return attr; + } + + /** + * Returns the byte array form of this attribute. + * + * @param cw + * the class to which this attribute must be added. This + * parameter can be used to add to the constant pool of this + * class the items that corresponds to this attribute. + * @param code + * the bytecode of the method corresponding to this code + * attribute, or null if this attribute is not a code + * attributes. + * @param len + * the length of the bytecode of the method corresponding to this + * code attribute, or null if this attribute is not a + * code attribute. + * @param maxStack + * the maximum stack size of the method corresponding to this + * code attribute, or -1 if this attribute is not a code + * attribute. + * @param maxLocals + * the maximum number of local variables of the method + * corresponding to this code attribute, or -1 if this attribute + * is not a code attribute. + * @return the byte array form of this attribute. + */ + protected ByteVector write(final ClassWriter cw, final byte[] code, + final int len, final int maxStack, final int maxLocals) { + ByteVector v = new ByteVector(); + v.data = value; + v.length = value.length; + return v; + } + + /** + * Returns the length of the attribute list that begins with this attribute. + * + * @return the length of the attribute list that begins with this attribute. + */ + final int getCount() { + int count = 0; + Attribute attr = this; + while (attr != null) { + count += 1; + attr = attr.next; + } + return count; + } + + /** + * Returns the size of all the attributes in this attribute list. + * + * @param cw + * the class writer to be used to convert the attributes into + * byte arrays, with the {@link #write write} method. + * @param code + * the bytecode of the method corresponding to these code + * attributes, or null if these attributes are not code + * attributes. + * @param len + * the length of the bytecode of the method corresponding to + * these code attributes, or null if these attributes + * are not code attributes. + * @param maxStack + * the maximum stack size of the method corresponding to these + * code attributes, or -1 if these attributes are not code + * attributes. + * @param maxLocals + * the maximum number of local variables of the method + * corresponding to these code attributes, or -1 if these + * attributes are not code attributes. + * @return the size of all the attributes in this attribute list. This size + * includes the size of the attribute headers. + */ + final int getSize(final ClassWriter cw, final byte[] code, final int len, + final int maxStack, final int maxLocals) { + Attribute attr = this; + int size = 0; + while (attr != null) { + cw.newUTF8(attr.type); + size += attr.write(cw, code, len, maxStack, maxLocals).length + 6; + attr = attr.next; + } + return size; + } + + /** + * Writes all the attributes of this attribute list in the given byte + * vector. + * + * @param cw + * the class writer to be used to convert the attributes into + * byte arrays, with the {@link #write write} method. + * @param code + * the bytecode of the method corresponding to these code + * attributes, or null if these attributes are not code + * attributes. + * @param len + * the length of the bytecode of the method corresponding to + * these code attributes, or null if these attributes + * are not code attributes. + * @param maxStack + * the maximum stack size of the method corresponding to these + * code attributes, or -1 if these attributes are not code + * attributes. + * @param maxLocals + * the maximum number of local variables of the method + * corresponding to these code attributes, or -1 if these + * attributes are not code attributes. + * @param out + * where the attributes must be written. + */ + final void put(final ClassWriter cw, final byte[] code, final int len, + final int maxStack, final int maxLocals, final ByteVector out) { + Attribute attr = this; + while (attr != null) { + ByteVector b = attr.write(cw, code, len, maxStack, maxLocals); + out.putShort(cw.newUTF8(attr.type)).putInt(b.length); + out.putByteArray(b.data, 0, b.length); + attr = attr.next; + } + } +} diff --git a/src/com/sleepycat/asm/ByteVector.java b/src/com/sleepycat/asm/ByteVector.java new file mode 100644 index 0000000..530b6b1 --- /dev/null +++ b/src/com/sleepycat/asm/ByteVector.java @@ -0,0 +1,339 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A dynamically extensible vector of bytes. This class is roughly equivalent to + * a DataOutputStream on top of a ByteArrayOutputStream, but is more efficient. + * + * @author Eric Bruneton + */ +public class ByteVector { + + /** + * The content of this vector. + */ + byte[] data; + + /** + * Actual number of bytes in this vector. + */ + int length; + + /** + * Constructs a new {@link ByteVector ByteVector} with a default initial + * size. + */ + public ByteVector() { + data = new byte[64]; + } + + /** + * Constructs a new {@link ByteVector ByteVector} with the given initial + * size. + * + * @param initialSize + * the initial size of the byte vector to be constructed. + */ + public ByteVector(final int initialSize) { + data = new byte[initialSize]; + } + + /** + * Puts a byte into this byte vector. The byte vector is automatically + * enlarged if necessary. + * + * @param b + * a byte. + * @return this byte vector. + */ + public ByteVector putByte(final int b) { + int length = this.length; + if (length + 1 > data.length) { + enlarge(1); + } + data[length++] = (byte) b; + this.length = length; + return this; + } + + /** + * Puts two bytes into this byte vector. The byte vector is automatically + * enlarged if necessary. + * + * @param b1 + * a byte. + * @param b2 + * another byte. + * @return this byte vector. + */ + ByteVector put11(final int b1, final int b2) { + int length = this.length; + if (length + 2 > data.length) { + enlarge(2); + } + byte[] data = this.data; + data[length++] = (byte) b1; + data[length++] = (byte) b2; + this.length = length; + return this; + } + + /** + * Puts a short into this byte vector. The byte vector is automatically + * enlarged if necessary. + * + * @param s + * a short. + * @return this byte vector. + */ + public ByteVector putShort(final int s) { + int length = this.length; + if (length + 2 > data.length) { + enlarge(2); + } + byte[] data = this.data; + data[length++] = (byte) (s >>> 8); + data[length++] = (byte) s; + this.length = length; + return this; + } + + /** + * Puts a byte and a short into this byte vector. The byte vector is + * automatically enlarged if necessary. + * + * @param b + * a byte. + * @param s + * a short. + * @return this byte vector. + */ + ByteVector put12(final int b, final int s) { + int length = this.length; + if (length + 3 > data.length) { + enlarge(3); + } + byte[] data = this.data; + data[length++] = (byte) b; + data[length++] = (byte) (s >>> 8); + data[length++] = (byte) s; + this.length = length; + return this; + } + + /** + * Puts an int into this byte vector. The byte vector is automatically + * enlarged if necessary. + * + * @param i + * an int. + * @return this byte vector. + */ + public ByteVector putInt(final int i) { + int length = this.length; + if (length + 4 > data.length) { + enlarge(4); + } + byte[] data = this.data; + data[length++] = (byte) (i >>> 24); + data[length++] = (byte) (i >>> 16); + data[length++] = (byte) (i >>> 8); + data[length++] = (byte) i; + this.length = length; + return this; + } + + /** + * Puts a long into this byte vector. The byte vector is automatically + * enlarged if necessary. + * + * @param l + * a long. + * @return this byte vector. + */ + public ByteVector putLong(final long l) { + int length = this.length; + if (length + 8 > data.length) { + enlarge(8); + } + byte[] data = this.data; + int i = (int) (l >>> 32); + data[length++] = (byte) (i >>> 24); + data[length++] = (byte) (i >>> 16); + data[length++] = (byte) (i >>> 8); + data[length++] = (byte) i; + i = (int) l; + data[length++] = (byte) (i >>> 24); + data[length++] = (byte) (i >>> 16); + data[length++] = (byte) (i >>> 8); + data[length++] = (byte) i; + this.length = length; + return this; + } + + /** + * Puts an UTF8 string into this byte vector. The byte vector is + * automatically enlarged if necessary. + * + * @param s + * a String whose UTF8 encoded length must be less than 65536. + * @return this byte vector. + */ + public ByteVector putUTF8(final String s) { + int charLength = s.length(); + if (charLength > 65535) { + throw new IllegalArgumentException(); + } + int len = length; + if (len + 2 + charLength > data.length) { + enlarge(2 + charLength); + } + byte[] data = this.data; + // optimistic algorithm: instead of computing the byte length and then + // serializing the string (which requires two loops), we assume the byte + // length is equal to char length (which is the most frequent case), and + // we start serializing the string right away. During the serialization, + // if we find that this assumption is wrong, we continue with the + // general method. + data[len++] = (byte) (charLength >>> 8); + data[len++] = (byte) charLength; + for (int i = 0; i < charLength; ++i) { + char c = s.charAt(i); + if (c >= '\001' && c <= '\177') { + data[len++] = (byte) c; + } else { + length = len; + return encodeUTF8(s, i, 65535); + } + } + length = len; + return this; + } + + /** + * Puts an UTF8 string into this byte vector. The byte vector is + * automatically enlarged if necessary. The string length is encoded in two + * bytes before the encoded characters, if there is space for that (i.e. if + * this.length - i - 2 >= 0). + * + * @param s + * the String to encode. + * @param i + * the index of the first character to encode. The previous + * characters are supposed to have already been encoded, using + * only one byte per character. + * @param maxByteLength + * the maximum byte length of the encoded string, including the + * already encoded characters. + * @return this byte vector. + */ + ByteVector encodeUTF8(final String s, int i, int maxByteLength) { + int charLength = s.length(); + int byteLength = i; + char c; + for (int j = i; j < charLength; ++j) { + c = s.charAt(j); + if (c >= '\001' && c <= '\177') { + byteLength++; + } else if (c > '\u07FF') { + byteLength += 3; + } else { + byteLength += 2; + } + } + if (byteLength > maxByteLength) { + throw new IllegalArgumentException(); + } + int start = length - i - 2; + if (start >= 0) { + data[start] = (byte) (byteLength >>> 8); + data[start + 1] = (byte) byteLength; + } + if (length + byteLength - i > data.length) { + enlarge(byteLength - i); + } + int len = length; + for (int j = i; j < charLength; ++j) { + c = s.charAt(j); + if (c >= '\001' && c <= '\177') { + data[len++] = (byte) c; + } else if (c > '\u07FF') { + data[len++] = (byte) (0xE0 | c >> 12 & 0xF); + data[len++] = (byte) (0x80 | c >> 6 & 0x3F); + data[len++] = (byte) (0x80 | c & 0x3F); + } else { + data[len++] = (byte) (0xC0 | c >> 6 & 0x1F); + data[len++] = (byte) (0x80 | c & 0x3F); + } + } + length = len; + return this; + } + + /** + * Puts an array of bytes into this byte vector. The byte vector is + * automatically enlarged if necessary. + * + * @param b + * an array of bytes. May be null to put len + * null bytes into this byte vector. + * @param off + * index of the fist byte of b that must be copied. + * @param len + * number of bytes of b that must be copied. + * @return this byte vector. + */ + public ByteVector putByteArray(final byte[] b, final int off, final int len) { + if (length + len > data.length) { + enlarge(len); + } + if (b != null) { + System.arraycopy(b, off, data, length, len); + } + length += len; + return this; + } + + /** + * Enlarge this byte vector so that it can receive n more bytes. + * + * @param size + * number of additional bytes that this byte vector should be + * able to receive. + */ + private void enlarge(final int size) { + int length1 = 2 * data.length; + int length2 = length + size; + byte[] newData = new byte[length1 > length2 ? length1 : length2]; + System.arraycopy(data, 0, newData, 0, length); + data = newData; + } +} diff --git a/src/com/sleepycat/asm/ClassReader.java b/src/com/sleepycat/asm/ClassReader.java new file mode 100644 index 0000000..b9358fc --- /dev/null +++ b/src/com/sleepycat/asm/ClassReader.java @@ -0,0 +1,2496 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +import java.io.IOException; +import java.io.InputStream; + +/** + * A Java class parser to make a {@link ClassVisitor} visit an existing class. + * This class parses a byte array conforming to the Java class file format and + * calls the appropriate visit methods of a given class visitor for each field, + * method and bytecode instruction encountered. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +public class ClassReader { + + /** + * True to enable signatures support. + */ + static final boolean SIGNATURES = true; + + /** + * True to enable annotations support. + */ + static final boolean ANNOTATIONS = true; + + /** + * True to enable stack map frames support. + */ + static final boolean FRAMES = true; + + /** + * True to enable bytecode writing support. + */ + static final boolean WRITER = true; + + /** + * True to enable JSR_W and GOTO_W support. + */ + static final boolean RESIZE = true; + + /** + * Flag to skip method code. If this class is set CODE + * attribute won't be visited. This can be used, for example, to retrieve + * annotations for methods and method parameters. + */ + public static final int SKIP_CODE = 1; + + /** + * Flag to skip the debug information in the class. If this flag is set the + * debug information of the class is not visited, i.e. the + * {@link MethodVisitor#visitLocalVariable visitLocalVariable} and + * {@link MethodVisitor#visitLineNumber visitLineNumber} methods will not be + * called. + */ + public static final int SKIP_DEBUG = 2; + + /** + * Flag to skip the stack map frames in the class. If this flag is set the + * stack map frames of the class is not visited, i.e. the + * {@link MethodVisitor#visitFrame visitFrame} method will not be called. + * This flag is useful when the {@link ClassWriter#COMPUTE_FRAMES} option is + * used: it avoids visiting frames that will be ignored and recomputed from + * scratch in the class writer. + */ + public static final int SKIP_FRAMES = 4; + + /** + * Flag to expand the stack map frames. By default stack map frames are + * visited in their original format (i.e. "expanded" for classes whose + * version is less than V1_6, and "compressed" for the other classes). If + * this flag is set, stack map frames are always visited in expanded format + * (this option adds a decompression/recompression step in ClassReader and + * ClassWriter which degrades performances quite a lot). + */ + public static final int EXPAND_FRAMES = 8; + + /** + * The class to be parsed. The content of this array must not be + * modified. This field is intended for {@link Attribute} sub classes, and + * is normally not needed by class generators or adapters. + */ + public final byte[] b; + + /** + * The start index of each constant pool item in {@link #b b}, plus one. The + * one byte offset skips the constant pool item tag that indicates its type. + */ + private final int[] items; + + /** + * The String objects corresponding to the CONSTANT_Utf8 items. This cache + * avoids multiple parsing of a given CONSTANT_Utf8 constant pool item, + * which GREATLY improves performances (by a factor 2 to 3). This caching + * strategy could be extended to all constant pool items, but its benefit + * would not be so great for these items (because they are much less + * expensive to parse than CONSTANT_Utf8 items). + */ + private final String[] strings; + + /** + * Maximum length of the strings contained in the constant pool of the + * class. + */ + private final int maxStringLength; + + /** + * Start index of the class header information (access, name...) in + * {@link #b b}. + */ + public final int header; + + // ------------------------------------------------------------------------ + // Constructors + // ------------------------------------------------------------------------ + + /** + * Constructs a new {@link ClassReader} object. + * + * @param b + * the bytecode of the class to be read. + */ + public ClassReader(final byte[] b) { + this(b, 0, b.length); + } + + /** + * Constructs a new {@link ClassReader} object. + * + * @param b + * the bytecode of the class to be read. + * @param off + * the start offset of the class data. + * @param len + * the length of the class data. + */ + public ClassReader(final byte[] b, final int off, final int len) { + this.b = b; + // checks the class version + if (readShort(off + 6) > Opcodes.V1_8) { + throw new IllegalArgumentException(); + } + // parses the constant pool + items = new int[readUnsignedShort(off + 8)]; + int n = items.length; + strings = new String[n]; + int max = 0; + int index = off + 10; + for (int i = 1; i < n; ++i) { + items[i] = index + 1; + int size; + switch (b[index]) { + case ClassWriter.FIELD: + case ClassWriter.METH: + case ClassWriter.IMETH: + case ClassWriter.INT: + case ClassWriter.FLOAT: + case ClassWriter.NAME_TYPE: + case ClassWriter.INDY: + size = 5; + break; + case ClassWriter.LONG: + case ClassWriter.DOUBLE: + size = 9; + ++i; + break; + case ClassWriter.UTF8: + size = 3 + readUnsignedShort(index + 1); + if (size > max) { + max = size; + } + break; + case ClassWriter.HANDLE: + size = 4; + break; + // case ClassWriter.CLASS: + // case ClassWriter.STR: + // case ClassWriter.MTYPE + default: + size = 3; + break; + } + index += size; + } + maxStringLength = max; + // the class header information starts just after the constant pool + header = index; + } + + /** + * Returns the class's access flags (see {@link Opcodes}). This value may + * not reflect Deprecated and Synthetic flags when bytecode is before 1.5 + * and those flags are represented by attributes. + * + * @return the class access flags + * + * @see ClassVisitor#visit(int, int, String, String, String, String[]) + */ + public int getAccess() { + return readUnsignedShort(header); + } + + /** + * Returns the internal name of the class (see + * {@link Type#getInternalName() getInternalName}). + * + * @return the internal class name + * + * @see ClassVisitor#visit(int, int, String, String, String, String[]) + */ + public String getClassName() { + return readClass(header + 2, new char[maxStringLength]); + } + + /** + * Returns the internal of name of the super class (see + * {@link Type#getInternalName() getInternalName}). For interfaces, the + * super class is {@link Object}. + * + * @return the internal name of super class, or null for + * {@link Object} class. + * + * @see ClassVisitor#visit(int, int, String, String, String, String[]) + */ + public String getSuperName() { + return readClass(header + 4, new char[maxStringLength]); + } + + /** + * Returns the internal names of the class's interfaces (see + * {@link Type#getInternalName() getInternalName}). + * + * @return the array of internal names for all implemented interfaces or + * null. + * + * @see ClassVisitor#visit(int, int, String, String, String, String[]) + */ + public String[] getInterfaces() { + int index = header + 6; + int n = readUnsignedShort(index); + String[] interfaces = new String[n]; + if (n > 0) { + char[] buf = new char[maxStringLength]; + for (int i = 0; i < n; ++i) { + index += 2; + interfaces[i] = readClass(index, buf); + } + } + return interfaces; + } + + /** + * Copies the constant pool data into the given {@link ClassWriter}. Should + * be called before the {@link #accept(ClassVisitor,int)} method. + * + * @param classWriter + * the {@link ClassWriter} to copy constant pool into. + */ + void copyPool(final ClassWriter classWriter) { + char[] buf = new char[maxStringLength]; + int ll = items.length; + Item[] items2 = new Item[ll]; + for (int i = 1; i < ll; i++) { + int index = items[i]; + int tag = b[index - 1]; + Item item = new Item(i); + int nameType; + switch (tag) { + case ClassWriter.FIELD: + case ClassWriter.METH: + case ClassWriter.IMETH: + nameType = items[readUnsignedShort(index + 2)]; + item.set(tag, readClass(index, buf), readUTF8(nameType, buf), + readUTF8(nameType + 2, buf)); + break; + case ClassWriter.INT: + item.set(readInt(index)); + break; + case ClassWriter.FLOAT: + item.set(Float.intBitsToFloat(readInt(index))); + break; + case ClassWriter.NAME_TYPE: + item.set(tag, readUTF8(index, buf), readUTF8(index + 2, buf), + null); + break; + case ClassWriter.LONG: + item.set(readLong(index)); + ++i; + break; + case ClassWriter.DOUBLE: + item.set(Double.longBitsToDouble(readLong(index))); + ++i; + break; + case ClassWriter.UTF8: { + String s = strings[i]; + if (s == null) { + index = items[i]; + s = strings[i] = readUTF(index + 2, + readUnsignedShort(index), buf); + } + item.set(tag, s, null, null); + break; + } + case ClassWriter.HANDLE: { + int fieldOrMethodRef = items[readUnsignedShort(index + 1)]; + nameType = items[readUnsignedShort(fieldOrMethodRef + 2)]; + item.set(ClassWriter.HANDLE_BASE + readByte(index), + readClass(fieldOrMethodRef, buf), + readUTF8(nameType, buf), readUTF8(nameType + 2, buf)); + break; + } + case ClassWriter.INDY: + if (classWriter.bootstrapMethods == null) { + copyBootstrapMethods(classWriter, items2, buf); + } + nameType = items[readUnsignedShort(index + 2)]; + item.set(readUTF8(nameType, buf), readUTF8(nameType + 2, buf), + readUnsignedShort(index)); + break; + // case ClassWriter.STR: + // case ClassWriter.CLASS: + // case ClassWriter.MTYPE + default: + item.set(tag, readUTF8(index, buf), null, null); + break; + } + + int index2 = item.hashCode % items2.length; + item.next = items2[index2]; + items2[index2] = item; + } + + int off = items[1] - 1; + classWriter.pool.putByteArray(b, off, header - off); + classWriter.items = items2; + classWriter.threshold = (int) (0.75d * ll); + classWriter.index = ll; + } + + /** + * Copies the bootstrap method data into the given {@link ClassWriter}. + * Should be called before the {@link #accept(ClassVisitor,int)} method. + * + * @param classWriter + * the {@link ClassWriter} to copy bootstrap methods into. + */ + private void copyBootstrapMethods(final ClassWriter classWriter, + final Item[] items, final char[] c) { + // finds the "BootstrapMethods" attribute + int u = getAttributes(); + boolean found = false; + for (int i = readUnsignedShort(u); i > 0; --i) { + String attrName = readUTF8(u + 2, c); + if ("BootstrapMethods".equals(attrName)) { + found = true; + break; + } + u += 6 + readInt(u + 4); + } + if (!found) { + return; + } + // copies the bootstrap methods in the class writer + int boostrapMethodCount = readUnsignedShort(u + 8); + for (int j = 0, v = u + 10; j < boostrapMethodCount; j++) { + int position = v - u - 10; + int hashCode = readConst(readUnsignedShort(v), c).hashCode(); + for (int k = readUnsignedShort(v + 2); k > 0; --k) { + hashCode ^= readConst(readUnsignedShort(v + 4), c).hashCode(); + v += 2; + } + v += 4; + Item item = new Item(j); + item.set(position, hashCode & 0x7FFFFFFF); + int index = item.hashCode % items.length; + item.next = items[index]; + items[index] = item; + } + int attrSize = readInt(u + 4); + ByteVector bootstrapMethods = new ByteVector(attrSize + 62); + bootstrapMethods.putByteArray(b, u + 10, attrSize - 2); + classWriter.bootstrapMethodsCount = boostrapMethodCount; + classWriter.bootstrapMethods = bootstrapMethods; + } + + /** + * Constructs a new {@link ClassReader} object. + * + * @param is + * an input stream from which to read the class. + * @throws IOException + * if a problem occurs during reading. + */ + public ClassReader(final InputStream is) throws IOException { + this(readClass(is, false)); + } + + /** + * Constructs a new {@link ClassReader} object. + * + * @param name + * the binary qualified name of the class to be read. + * @throws IOException + * if an exception occurs during reading. + */ + public ClassReader(final String name) throws IOException { + this(readClass( + ClassLoader.getSystemResourceAsStream(name.replace('.', '/') + + ".class"), true)); + } + + /** + * Reads the bytecode of a class. + * + * @param is + * an input stream from which to read the class. + * @param close + * true to close the input stream after reading. + * @return the bytecode read from the given input stream. + * @throws IOException + * if a problem occurs during reading. + */ + private static byte[] readClass(final InputStream is, boolean close) + throws IOException { + if (is == null) { + throw new IOException("Class not found"); + } + try { + byte[] b = new byte[is.available()]; + int len = 0; + while (true) { + int n = is.read(b, len, b.length - len); + if (n == -1) { + if (len < b.length) { + byte[] c = new byte[len]; + System.arraycopy(b, 0, c, 0, len); + b = c; + } + return b; + } + len += n; + if (len == b.length) { + int last = is.read(); + if (last < 0) { + return b; + } + byte[] c = new byte[b.length + 1000]; + System.arraycopy(b, 0, c, 0, len); + c[len++] = (byte) last; + b = c; + } + } + } finally { + if (close) { + is.close(); + } + } + } + + // ------------------------------------------------------------------------ + // Public methods + // ------------------------------------------------------------------------ + + /** + * Makes the given visitor visit the Java class of this {@link ClassReader} + * . This class is the one specified in the constructor (see + * {@link #ClassReader(byte[]) ClassReader}). + * + * @param classVisitor + * the visitor that must visit this class. + * @param flags + * option flags that can be used to modify the default behavior + * of this class. See {@link #SKIP_DEBUG}, {@link #EXPAND_FRAMES} + * , {@link #SKIP_FRAMES}, {@link #SKIP_CODE}. + */ + public void accept(final ClassVisitor classVisitor, final int flags) { + accept(classVisitor, new Attribute[0], flags); + } + + /** + * Makes the given visitor visit the Java class of this {@link ClassReader}. + * This class is the one specified in the constructor (see + * {@link #ClassReader(byte[]) ClassReader}). + * + * @param classVisitor + * the visitor that must visit this class. + * @param attrs + * prototypes of the attributes that must be parsed during the + * visit of the class. Any attribute whose type is not equal to + * the type of one the prototypes will not be parsed: its byte + * array value will be passed unchanged to the ClassWriter. + * This may corrupt it if this value contains references to + * the constant pool, or has syntactic or semantic links with a + * class element that has been transformed by a class adapter + * between the reader and the writer. + * @param flags + * option flags that can be used to modify the default behavior + * of this class. See {@link #SKIP_DEBUG}, {@link #EXPAND_FRAMES} + * , {@link #SKIP_FRAMES}, {@link #SKIP_CODE}. + */ + public void accept(final ClassVisitor classVisitor, + final Attribute[] attrs, final int flags) { + int u = header; // current offset in the class file + char[] c = new char[maxStringLength]; // buffer used to read strings + + Context context = new Context(); + context.attrs = attrs; + context.flags = flags; + context.buffer = c; + + // reads the class declaration + int access = readUnsignedShort(u); + String name = readClass(u + 2, c); + String superClass = readClass(u + 4, c); + String[] interfaces = new String[readUnsignedShort(u + 6)]; + u += 8; + for (int i = 0; i < interfaces.length; ++i) { + interfaces[i] = readClass(u, c); + u += 2; + } + + // reads the class attributes + String signature = null; + String sourceFile = null; + String sourceDebug = null; + String enclosingOwner = null; + String enclosingName = null; + String enclosingDesc = null; + int anns = 0; + int ianns = 0; + int tanns = 0; + int itanns = 0; + int innerClasses = 0; + Attribute attributes = null; + + u = getAttributes(); + for (int i = readUnsignedShort(u); i > 0; --i) { + String attrName = readUTF8(u + 2, c); + // tests are sorted in decreasing frequency order + // (based on frequencies observed on typical classes) + if ("SourceFile".equals(attrName)) { + sourceFile = readUTF8(u + 8, c); + } else if ("InnerClasses".equals(attrName)) { + innerClasses = u + 8; + } else if ("EnclosingMethod".equals(attrName)) { + enclosingOwner = readClass(u + 8, c); + int item = readUnsignedShort(u + 10); + if (item != 0) { + enclosingName = readUTF8(items[item], c); + enclosingDesc = readUTF8(items[item] + 2, c); + } + } else if (SIGNATURES && "Signature".equals(attrName)) { + signature = readUTF8(u + 8, c); + } else if (ANNOTATIONS + && "RuntimeVisibleAnnotations".equals(attrName)) { + anns = u + 8; + } else if (ANNOTATIONS + && "RuntimeVisibleTypeAnnotations".equals(attrName)) { + tanns = u + 8; + } else if ("Deprecated".equals(attrName)) { + access |= Opcodes.ACC_DEPRECATED; + } else if ("Synthetic".equals(attrName)) { + access |= Opcodes.ACC_SYNTHETIC + | ClassWriter.ACC_SYNTHETIC_ATTRIBUTE; + } else if ("SourceDebugExtension".equals(attrName)) { + int len = readInt(u + 4); + sourceDebug = readUTF(u + 8, len, new char[len]); + } else if (ANNOTATIONS + && "RuntimeInvisibleAnnotations".equals(attrName)) { + ianns = u + 8; + } else if (ANNOTATIONS + && "RuntimeInvisibleTypeAnnotations".equals(attrName)) { + itanns = u + 8; + } else if ("BootstrapMethods".equals(attrName)) { + int[] bootstrapMethods = new int[readUnsignedShort(u + 8)]; + for (int j = 0, v = u + 10; j < bootstrapMethods.length; j++) { + bootstrapMethods[j] = v; + v += 2 + readUnsignedShort(v + 2) << 1; + } + context.bootstrapMethods = bootstrapMethods; + } else { + Attribute attr = readAttribute(attrs, attrName, u + 8, + readInt(u + 4), c, -1, null); + if (attr != null) { + attr.next = attributes; + attributes = attr; + } + } + u += 6 + readInt(u + 4); + } + + // visits the class declaration + classVisitor.visit(readInt(items[1] - 7), access, name, signature, + superClass, interfaces); + + // visits the source and debug info + if ((flags & SKIP_DEBUG) == 0 + && (sourceFile != null || sourceDebug != null)) { + classVisitor.visitSource(sourceFile, sourceDebug); + } + + // visits the outer class + if (enclosingOwner != null) { + classVisitor.visitOuterClass(enclosingOwner, enclosingName, + enclosingDesc); + } + + // visits the class annotations and type annotations + if (ANNOTATIONS && anns != 0) { + for (int i = readUnsignedShort(anns), v = anns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + classVisitor.visitAnnotation(readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && ianns != 0) { + for (int i = readUnsignedShort(ianns), v = ianns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + classVisitor.visitAnnotation(readUTF8(v, c), false)); + } + } + if (ANNOTATIONS && tanns != 0) { + for (int i = readUnsignedShort(tanns), v = tanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + classVisitor.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && itanns != 0) { + for (int i = readUnsignedShort(itanns), v = itanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + classVisitor.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), false)); + } + } + + // visits the attributes + while (attributes != null) { + Attribute attr = attributes.next; + attributes.next = null; + classVisitor.visitAttribute(attributes); + attributes = attr; + } + + // visits the inner classes + if (innerClasses != 0) { + int v = innerClasses + 2; + for (int i = readUnsignedShort(innerClasses); i > 0; --i) { + classVisitor.visitInnerClass(readClass(v, c), + readClass(v + 2, c), readUTF8(v + 4, c), + readUnsignedShort(v + 6)); + v += 8; + } + } + + // visits the fields and methods + u = header + 10 + 2 * interfaces.length; + for (int i = readUnsignedShort(u - 2); i > 0; --i) { + u = readField(classVisitor, context, u); + } + u += 2; + for (int i = readUnsignedShort(u - 2); i > 0; --i) { + u = readMethod(classVisitor, context, u); + } + + // visits the end of the class + classVisitor.visitEnd(); + } + + /** + * Reads a field and makes the given visitor visit it. + * + * @param classVisitor + * the visitor that must visit the field. + * @param context + * information about the class being parsed. + * @param u + * the start offset of the field in the class file. + * @return the offset of the first byte following the field in the class. + */ + private int readField(final ClassVisitor classVisitor, + final Context context, int u) { + // reads the field declaration + char[] c = context.buffer; + int access = readUnsignedShort(u); + String name = readUTF8(u + 2, c); + String desc = readUTF8(u + 4, c); + u += 6; + + // reads the field attributes + String signature = null; + int anns = 0; + int ianns = 0; + int tanns = 0; + int itanns = 0; + Object value = null; + Attribute attributes = null; + + for (int i = readUnsignedShort(u); i > 0; --i) { + String attrName = readUTF8(u + 2, c); + // tests are sorted in decreasing frequency order + // (based on frequencies observed on typical classes) + if ("ConstantValue".equals(attrName)) { + int item = readUnsignedShort(u + 8); + value = item == 0 ? null : readConst(item, c); + } else if (SIGNATURES && "Signature".equals(attrName)) { + signature = readUTF8(u + 8, c); + } else if ("Deprecated".equals(attrName)) { + access |= Opcodes.ACC_DEPRECATED; + } else if ("Synthetic".equals(attrName)) { + access |= Opcodes.ACC_SYNTHETIC + | ClassWriter.ACC_SYNTHETIC_ATTRIBUTE; + } else if (ANNOTATIONS + && "RuntimeVisibleAnnotations".equals(attrName)) { + anns = u + 8; + } else if (ANNOTATIONS + && "RuntimeVisibleTypeAnnotations".equals(attrName)) { + tanns = u + 8; + } else if (ANNOTATIONS + && "RuntimeInvisibleAnnotations".equals(attrName)) { + ianns = u + 8; + } else if (ANNOTATIONS + && "RuntimeInvisibleTypeAnnotations".equals(attrName)) { + itanns = u + 8; + } else { + Attribute attr = readAttribute(context.attrs, attrName, u + 8, + readInt(u + 4), c, -1, null); + if (attr != null) { + attr.next = attributes; + attributes = attr; + } + } + u += 6 + readInt(u + 4); + } + u += 2; + + // visits the field declaration + FieldVisitor fv = classVisitor.visitField(access, name, desc, + signature, value); + if (fv == null) { + return u; + } + + // visits the field annotations and type annotations + if (ANNOTATIONS && anns != 0) { + for (int i = readUnsignedShort(anns), v = anns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + fv.visitAnnotation(readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && ianns != 0) { + for (int i = readUnsignedShort(ianns), v = ianns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + fv.visitAnnotation(readUTF8(v, c), false)); + } + } + if (ANNOTATIONS && tanns != 0) { + for (int i = readUnsignedShort(tanns), v = tanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + fv.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && itanns != 0) { + for (int i = readUnsignedShort(itanns), v = itanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + fv.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), false)); + } + } + + // visits the field attributes + while (attributes != null) { + Attribute attr = attributes.next; + attributes.next = null; + fv.visitAttribute(attributes); + attributes = attr; + } + + // visits the end of the field + fv.visitEnd(); + + return u; + } + + /** + * Reads a method and makes the given visitor visit it. + * + * @param classVisitor + * the visitor that must visit the method. + * @param context + * information about the class being parsed. + * @param u + * the start offset of the method in the class file. + * @return the offset of the first byte following the method in the class. + */ + private int readMethod(final ClassVisitor classVisitor, + final Context context, int u) { + // reads the method declaration + char[] c = context.buffer; + context.access = readUnsignedShort(u); + context.name = readUTF8(u + 2, c); + context.desc = readUTF8(u + 4, c); + u += 6; + + // reads the method attributes + int code = 0; + int exception = 0; + String[] exceptions = null; + String signature = null; + int methodParameters = 0; + int anns = 0; + int ianns = 0; + int tanns = 0; + int itanns = 0; + int dann = 0; + int mpanns = 0; + int impanns = 0; + int firstAttribute = u; + Attribute attributes = null; + + for (int i = readUnsignedShort(u); i > 0; --i) { + String attrName = readUTF8(u + 2, c); + // tests are sorted in decreasing frequency order + // (based on frequencies observed on typical classes) + if ("Code".equals(attrName)) { + if ((context.flags & SKIP_CODE) == 0) { + code = u + 8; + } + } else if ("Exceptions".equals(attrName)) { + exceptions = new String[readUnsignedShort(u + 8)]; + exception = u + 10; + for (int j = 0; j < exceptions.length; ++j) { + exceptions[j] = readClass(exception, c); + exception += 2; + } + } else if (SIGNATURES && "Signature".equals(attrName)) { + signature = readUTF8(u + 8, c); + } else if ("Deprecated".equals(attrName)) { + context.access |= Opcodes.ACC_DEPRECATED; + } else if (ANNOTATIONS + && "RuntimeVisibleAnnotations".equals(attrName)) { + anns = u + 8; + } else if (ANNOTATIONS + && "RuntimeVisibleTypeAnnotations".equals(attrName)) { + tanns = u + 8; + } else if (ANNOTATIONS && "AnnotationDefault".equals(attrName)) { + dann = u + 8; + } else if ("Synthetic".equals(attrName)) { + context.access |= Opcodes.ACC_SYNTHETIC + | ClassWriter.ACC_SYNTHETIC_ATTRIBUTE; + } else if (ANNOTATIONS + && "RuntimeInvisibleAnnotations".equals(attrName)) { + ianns = u + 8; + } else if (ANNOTATIONS + && "RuntimeInvisibleTypeAnnotations".equals(attrName)) { + itanns = u + 8; + } else if (ANNOTATIONS + && "RuntimeVisibleParameterAnnotations".equals(attrName)) { + mpanns = u + 8; + } else if (ANNOTATIONS + && "RuntimeInvisibleParameterAnnotations".equals(attrName)) { + impanns = u + 8; + } else if ("MethodParameters".equals(attrName)) { + methodParameters = u + 8; + } else { + Attribute attr = readAttribute(context.attrs, attrName, u + 8, + readInt(u + 4), c, -1, null); + if (attr != null) { + attr.next = attributes; + attributes = attr; + } + } + u += 6 + readInt(u + 4); + } + u += 2; + + // visits the method declaration + MethodVisitor mv = classVisitor.visitMethod(context.access, + context.name, context.desc, signature, exceptions); + if (mv == null) { + return u; + } + + /* + * if the returned MethodVisitor is in fact a MethodWriter, it means + * there is no method adapter between the reader and the writer. If, in + * addition, the writer's constant pool was copied from this reader + * (mw.cw.cr == this), and the signature and exceptions of the method + * have not been changed, then it is possible to skip all visit events + * and just copy the original code of the method to the writer (the + * access, name and descriptor can have been changed, this is not + * important since they are not copied as is from the reader). + */ + if (WRITER && mv instanceof MethodWriter) { + MethodWriter mw = (MethodWriter) mv; + if (mw.cw.cr == this && signature == mw.signature) { + boolean sameExceptions = false; + if (exceptions == null) { + sameExceptions = mw.exceptionCount == 0; + } else if (exceptions.length == mw.exceptionCount) { + sameExceptions = true; + for (int j = exceptions.length - 1; j >= 0; --j) { + exception -= 2; + if (mw.exceptions[j] != readUnsignedShort(exception)) { + sameExceptions = false; + break; + } + } + } + if (sameExceptions) { + /* + * we do not copy directly the code into MethodWriter to + * save a byte array copy operation. The real copy will be + * done in ClassWriter.toByteArray(). + */ + mw.classReaderOffset = firstAttribute; + mw.classReaderLength = u - firstAttribute; + return u; + } + } + } + + // visit the method parameters + if (methodParameters != 0) { + for (int i = b[methodParameters] & 0xFF, v = methodParameters + 1; i > 0; --i, v = v + 4) { + mv.visitParameter(readUTF8(v, c), readUnsignedShort(v + 2)); + } + } + + // visits the method annotations + if (ANNOTATIONS && dann != 0) { + AnnotationVisitor dv = mv.visitAnnotationDefault(); + readAnnotationValue(dann, c, null, dv); + if (dv != null) { + dv.visitEnd(); + } + } + if (ANNOTATIONS && anns != 0) { + for (int i = readUnsignedShort(anns), v = anns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + mv.visitAnnotation(readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && ianns != 0) { + for (int i = readUnsignedShort(ianns), v = ianns + 2; i > 0; --i) { + v = readAnnotationValues(v + 2, c, true, + mv.visitAnnotation(readUTF8(v, c), false)); + } + } + if (ANNOTATIONS && tanns != 0) { + for (int i = readUnsignedShort(tanns), v = tanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + mv.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), true)); + } + } + if (ANNOTATIONS && itanns != 0) { + for (int i = readUnsignedShort(itanns), v = itanns + 2; i > 0; --i) { + v = readAnnotationTarget(context, v); + v = readAnnotationValues(v + 2, c, true, + mv.visitTypeAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), false)); + } + } + if (ANNOTATIONS && mpanns != 0) { + readParameterAnnotations(mv, context, mpanns, true); + } + if (ANNOTATIONS && impanns != 0) { + readParameterAnnotations(mv, context, impanns, false); + } + + // visits the method attributes + while (attributes != null) { + Attribute attr = attributes.next; + attributes.next = null; + mv.visitAttribute(attributes); + attributes = attr; + } + + // visits the method code + if (code != 0) { + mv.visitCode(); + readCode(mv, context, code); + } + + // visits the end of the method + mv.visitEnd(); + + return u; + } + + /** + * Reads the bytecode of a method and makes the given visitor visit it. + * + * @param mv + * the visitor that must visit the method's code. + * @param context + * information about the class being parsed. + * @param u + * the start offset of the code attribute in the class file. + */ + private void readCode(final MethodVisitor mv, final Context context, int u) { + // reads the header + byte[] b = this.b; + char[] c = context.buffer; + int maxStack = readUnsignedShort(u); + int maxLocals = readUnsignedShort(u + 2); + int codeLength = readInt(u + 4); + u += 8; + + // reads the bytecode to find the labels + int codeStart = u; + int codeEnd = u + codeLength; + Label[] labels = context.labels = new Label[codeLength + 2]; + readLabel(codeLength + 1, labels); + while (u < codeEnd) { + int offset = u - codeStart; + int opcode = b[u] & 0xFF; + switch (ClassWriter.TYPE[opcode]) { + case ClassWriter.NOARG_INSN: + case ClassWriter.IMPLVAR_INSN: + u += 1; + break; + case ClassWriter.LABEL_INSN: + readLabel(offset + readShort(u + 1), labels); + u += 3; + break; + case ClassWriter.LABELW_INSN: + readLabel(offset + readInt(u + 1), labels); + u += 5; + break; + case ClassWriter.WIDE_INSN: + opcode = b[u + 1] & 0xFF; + if (opcode == Opcodes.IINC) { + u += 6; + } else { + u += 4; + } + break; + case ClassWriter.TABL_INSN: + // skips 0 to 3 padding bytes + u = u + 4 - (offset & 3); + // reads instruction + readLabel(offset + readInt(u), labels); + for (int i = readInt(u + 8) - readInt(u + 4) + 1; i > 0; --i) { + readLabel(offset + readInt(u + 12), labels); + u += 4; + } + u += 12; + break; + case ClassWriter.LOOK_INSN: + // skips 0 to 3 padding bytes + u = u + 4 - (offset & 3); + // reads instruction + readLabel(offset + readInt(u), labels); + for (int i = readInt(u + 4); i > 0; --i) { + readLabel(offset + readInt(u + 12), labels); + u += 8; + } + u += 8; + break; + case ClassWriter.VAR_INSN: + case ClassWriter.SBYTE_INSN: + case ClassWriter.LDC_INSN: + u += 2; + break; + case ClassWriter.SHORT_INSN: + case ClassWriter.LDCW_INSN: + case ClassWriter.FIELDORMETH_INSN: + case ClassWriter.TYPE_INSN: + case ClassWriter.IINC_INSN: + u += 3; + break; + case ClassWriter.ITFMETH_INSN: + case ClassWriter.INDYMETH_INSN: + u += 5; + break; + // case MANA_INSN: + default: + u += 4; + break; + } + } + + // reads the try catch entries to find the labels, and also visits them + for (int i = readUnsignedShort(u); i > 0; --i) { + Label start = readLabel(readUnsignedShort(u + 2), labels); + Label end = readLabel(readUnsignedShort(u + 4), labels); + Label handler = readLabel(readUnsignedShort(u + 6), labels); + String type = readUTF8(items[readUnsignedShort(u + 8)], c); + mv.visitTryCatchBlock(start, end, handler, type); + u += 8; + } + u += 2; + + // reads the code attributes + int[] tanns = null; // start index of each visible type annotation + int[] itanns = null; // start index of each invisible type annotation + int tann = 0; // current index in tanns array + int itann = 0; // current index in itanns array + int ntoff = -1; // next visible type annotation code offset + int nitoff = -1; // next invisible type annotation code offset + int varTable = 0; + int varTypeTable = 0; + boolean zip = true; + boolean unzip = (context.flags & EXPAND_FRAMES) != 0; + int stackMap = 0; + int stackMapSize = 0; + int frameCount = 0; + Context frame = null; + Attribute attributes = null; + + for (int i = readUnsignedShort(u); i > 0; --i) { + String attrName = readUTF8(u + 2, c); + if ("LocalVariableTable".equals(attrName)) { + if ((context.flags & SKIP_DEBUG) == 0) { + varTable = u + 8; + for (int j = readUnsignedShort(u + 8), v = u; j > 0; --j) { + int label = readUnsignedShort(v + 10); + if (labels[label] == null) { + readLabel(label, labels).status |= Label.DEBUG; + } + label += readUnsignedShort(v + 12); + if (labels[label] == null) { + readLabel(label, labels).status |= Label.DEBUG; + } + v += 10; + } + } + } else if ("LocalVariableTypeTable".equals(attrName)) { + varTypeTable = u + 8; + } else if ("LineNumberTable".equals(attrName)) { + if ((context.flags & SKIP_DEBUG) == 0) { + for (int j = readUnsignedShort(u + 8), v = u; j > 0; --j) { + int label = readUnsignedShort(v + 10); + if (labels[label] == null) { + readLabel(label, labels).status |= Label.DEBUG; + } + labels[label].line = readUnsignedShort(v + 12); + v += 4; + } + } + } else if (ANNOTATIONS + && "RuntimeVisibleTypeAnnotations".equals(attrName)) { + tanns = readTypeAnnotations(mv, context, u + 8, true); + ntoff = tanns.length == 0 || readByte(tanns[0]) < 0x43 ? -1 + : readUnsignedShort(tanns[0] + 1); + } else if (ANNOTATIONS + && "RuntimeInvisibleTypeAnnotations".equals(attrName)) { + itanns = readTypeAnnotations(mv, context, u + 8, false); + nitoff = itanns.length == 0 || readByte(itanns[0]) < 0x43 ? -1 + : readUnsignedShort(itanns[0] + 1); + } else if (FRAMES && "StackMapTable".equals(attrName)) { + if ((context.flags & SKIP_FRAMES) == 0) { + stackMap = u + 10; + stackMapSize = readInt(u + 4); + frameCount = readUnsignedShort(u + 8); + } + /* + * here we do not extract the labels corresponding to the + * attribute content. This would require a full parsing of the + * attribute, which would need to be repeated in the second + * phase (see below). Instead the content of the attribute is + * read one frame at a time (i.e. after a frame has been + * visited, the next frame is read), and the labels it contains + * are also extracted one frame at a time. Thanks to the + * ordering of frames, having only a "one frame lookahead" is + * not a problem, i.e. it is not possible to see an offset + * smaller than the offset of the current insn and for which no + * Label exist. + */ + /* + * This is not true for UNINITIALIZED type offsets. We solve + * this by parsing the stack map table without a full decoding + * (see below). + */ + } else if (FRAMES && "StackMap".equals(attrName)) { + if ((context.flags & SKIP_FRAMES) == 0) { + zip = false; + stackMap = u + 10; + stackMapSize = readInt(u + 4); + frameCount = readUnsignedShort(u + 8); + } + /* + * IMPORTANT! here we assume that the frames are ordered, as in + * the StackMapTable attribute, although this is not guaranteed + * by the attribute format. + */ + } else { + for (int j = 0; j < context.attrs.length; ++j) { + if (context.attrs[j].type.equals(attrName)) { + Attribute attr = context.attrs[j].read(this, u + 8, + readInt(u + 4), c, codeStart - 8, labels); + if (attr != null) { + attr.next = attributes; + attributes = attr; + } + } + } + } + u += 6 + readInt(u + 4); + } + u += 2; + + // generates the first (implicit) stack map frame + if (FRAMES && stackMap != 0) { + /* + * for the first explicit frame the offset is not offset_delta + 1 + * but only offset_delta; setting the implicit frame offset to -1 + * allow the use of the "offset_delta + 1" rule in all cases + */ + frame = context; + frame.offset = -1; + frame.mode = 0; + frame.localCount = 0; + frame.localDiff = 0; + frame.stackCount = 0; + frame.local = new Object[maxLocals]; + frame.stack = new Object[maxStack]; + if (unzip) { + getImplicitFrame(context); + } + /* + * Finds labels for UNINITIALIZED frame types. Instead of decoding + * each element of the stack map table, we look for 3 consecutive + * bytes that "look like" an UNINITIALIZED type (tag 8, offset + * within code bounds, NEW instruction at this offset). We may find + * false positives (i.e. not real UNINITIALIZED types), but this + * should be rare, and the only consequence will be the creation of + * an unneeded label. This is better than creating a label for each + * NEW instruction, and faster than fully decoding the whole stack + * map table. + */ + for (int i = stackMap; i < stackMap + stackMapSize - 2; ++i) { + if (b[i] == 8) { // UNINITIALIZED FRAME TYPE + int v = readUnsignedShort(i + 1); + if (v >= 0 && v < codeLength) { + if ((b[codeStart + v] & 0xFF) == Opcodes.NEW) { + readLabel(v, labels); + } + } + } + } + } + + // visits the instructions + u = codeStart; + while (u < codeEnd) { + int offset = u - codeStart; + + // visits the label and line number for this offset, if any + Label l = labels[offset]; + if (l != null) { + mv.visitLabel(l); + if ((context.flags & SKIP_DEBUG) == 0 && l.line > 0) { + mv.visitLineNumber(l.line, l); + } + } + + // visits the frame for this offset, if any + while (FRAMES && frame != null + && (frame.offset == offset || frame.offset == -1)) { + // if there is a frame for this offset, makes the visitor visit + // it, and reads the next frame if there is one. + if (frame.offset != -1) { + if (!zip || unzip) { + mv.visitFrame(Opcodes.F_NEW, frame.localCount, + frame.local, frame.stackCount, frame.stack); + } else { + mv.visitFrame(frame.mode, frame.localDiff, frame.local, + frame.stackCount, frame.stack); + } + } + if (frameCount > 0) { + stackMap = readFrame(stackMap, zip, unzip, frame); + --frameCount; + } else { + frame = null; + } + } + + // visits the instruction at this offset + int opcode = b[u] & 0xFF; + switch (ClassWriter.TYPE[opcode]) { + case ClassWriter.NOARG_INSN: + mv.visitInsn(opcode); + u += 1; + break; + case ClassWriter.IMPLVAR_INSN: + if (opcode > Opcodes.ISTORE) { + opcode -= 59; // ISTORE_0 + mv.visitVarInsn(Opcodes.ISTORE + (opcode >> 2), + opcode & 0x3); + } else { + opcode -= 26; // ILOAD_0 + mv.visitVarInsn(Opcodes.ILOAD + (opcode >> 2), opcode & 0x3); + } + u += 1; + break; + case ClassWriter.LABEL_INSN: + mv.visitJumpInsn(opcode, labels[offset + readShort(u + 1)]); + u += 3; + break; + case ClassWriter.LABELW_INSN: + mv.visitJumpInsn(opcode - 33, labels[offset + readInt(u + 1)]); + u += 5; + break; + case ClassWriter.WIDE_INSN: + opcode = b[u + 1] & 0xFF; + if (opcode == Opcodes.IINC) { + mv.visitIincInsn(readUnsignedShort(u + 2), readShort(u + 4)); + u += 6; + } else { + mv.visitVarInsn(opcode, readUnsignedShort(u + 2)); + u += 4; + } + break; + case ClassWriter.TABL_INSN: { + // skips 0 to 3 padding bytes + u = u + 4 - (offset & 3); + // reads instruction + int label = offset + readInt(u); + int min = readInt(u + 4); + int max = readInt(u + 8); + Label[] table = new Label[max - min + 1]; + u += 12; + for (int i = 0; i < table.length; ++i) { + table[i] = labels[offset + readInt(u)]; + u += 4; + } + mv.visitTableSwitchInsn(min, max, labels[label], table); + break; + } + case ClassWriter.LOOK_INSN: { + // skips 0 to 3 padding bytes + u = u + 4 - (offset & 3); + // reads instruction + int label = offset + readInt(u); + int len = readInt(u + 4); + int[] keys = new int[len]; + Label[] values = new Label[len]; + u += 8; + for (int i = 0; i < len; ++i) { + keys[i] = readInt(u); + values[i] = labels[offset + readInt(u + 4)]; + u += 8; + } + mv.visitLookupSwitchInsn(labels[label], keys, values); + break; + } + case ClassWriter.VAR_INSN: + mv.visitVarInsn(opcode, b[u + 1] & 0xFF); + u += 2; + break; + case ClassWriter.SBYTE_INSN: + mv.visitIntInsn(opcode, b[u + 1]); + u += 2; + break; + case ClassWriter.SHORT_INSN: + mv.visitIntInsn(opcode, readShort(u + 1)); + u += 3; + break; + case ClassWriter.LDC_INSN: + mv.visitLdcInsn(readConst(b[u + 1] & 0xFF, c)); + u += 2; + break; + case ClassWriter.LDCW_INSN: + mv.visitLdcInsn(readConst(readUnsignedShort(u + 1), c)); + u += 3; + break; + case ClassWriter.FIELDORMETH_INSN: + case ClassWriter.ITFMETH_INSN: { + int cpIndex = items[readUnsignedShort(u + 1)]; + boolean itf = b[cpIndex - 1] == ClassWriter.IMETH; + String iowner = readClass(cpIndex, c); + cpIndex = items[readUnsignedShort(cpIndex + 2)]; + String iname = readUTF8(cpIndex, c); + String idesc = readUTF8(cpIndex + 2, c); + if (opcode < Opcodes.INVOKEVIRTUAL) { + mv.visitFieldInsn(opcode, iowner, iname, idesc); + } else { + mv.visitMethodInsn(opcode, iowner, iname, idesc, itf); + } + if (opcode == Opcodes.INVOKEINTERFACE) { + u += 5; + } else { + u += 3; + } + break; + } + case ClassWriter.INDYMETH_INSN: { + int cpIndex = items[readUnsignedShort(u + 1)]; + int bsmIndex = context.bootstrapMethods[readUnsignedShort(cpIndex)]; + Handle bsm = (Handle) readConst(readUnsignedShort(bsmIndex), c); + int bsmArgCount = readUnsignedShort(bsmIndex + 2); + Object[] bsmArgs = new Object[bsmArgCount]; + bsmIndex += 4; + for (int i = 0; i < bsmArgCount; i++) { + bsmArgs[i] = readConst(readUnsignedShort(bsmIndex), c); + bsmIndex += 2; + } + cpIndex = items[readUnsignedShort(cpIndex + 2)]; + String iname = readUTF8(cpIndex, c); + String idesc = readUTF8(cpIndex + 2, c); + mv.visitInvokeDynamicInsn(iname, idesc, bsm, bsmArgs); + u += 5; + break; + } + case ClassWriter.TYPE_INSN: + mv.visitTypeInsn(opcode, readClass(u + 1, c)); + u += 3; + break; + case ClassWriter.IINC_INSN: + mv.visitIincInsn(b[u + 1] & 0xFF, b[u + 2]); + u += 3; + break; + // case MANA_INSN: + default: + mv.visitMultiANewArrayInsn(readClass(u + 1, c), b[u + 3] & 0xFF); + u += 4; + break; + } + + // visit the instruction annotations, if any + while (tanns != null && tann < tanns.length && ntoff <= offset) { + if (ntoff == offset) { + int v = readAnnotationTarget(context, tanns[tann]); + readAnnotationValues(v + 2, c, true, + mv.visitInsnAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), true)); + } + ntoff = ++tann >= tanns.length || readByte(tanns[tann]) < 0x43 ? -1 + : readUnsignedShort(tanns[tann] + 1); + } + while (itanns != null && itann < itanns.length && nitoff <= offset) { + if (nitoff == offset) { + int v = readAnnotationTarget(context, itanns[itann]); + readAnnotationValues(v + 2, c, true, + mv.visitInsnAnnotation(context.typeRef, + context.typePath, readUTF8(v, c), false)); + } + nitoff = ++itann >= itanns.length + || readByte(itanns[itann]) < 0x43 ? -1 + : readUnsignedShort(itanns[itann] + 1); + } + } + if (labels[codeLength] != null) { + mv.visitLabel(labels[codeLength]); + } + + // visits the local variable tables + if ((context.flags & SKIP_DEBUG) == 0 && varTable != 0) { + int[] typeTable = null; + if (varTypeTable != 0) { + u = varTypeTable + 2; + typeTable = new int[readUnsignedShort(varTypeTable) * 3]; + for (int i = typeTable.length; i > 0;) { + typeTable[--i] = u + 6; // signature + typeTable[--i] = readUnsignedShort(u + 8); // index + typeTable[--i] = readUnsignedShort(u); // start + u += 10; + } + } + u = varTable + 2; + for (int i = readUnsignedShort(varTable); i > 0; --i) { + int start = readUnsignedShort(u); + int length = readUnsignedShort(u + 2); + int index = readUnsignedShort(u + 8); + String vsignature = null; + if (typeTable != null) { + for (int j = 0; j < typeTable.length; j += 3) { + if (typeTable[j] == start && typeTable[j + 1] == index) { + vsignature = readUTF8(typeTable[j + 2], c); + break; + } + } + } + mv.visitLocalVariable(readUTF8(u + 4, c), readUTF8(u + 6, c), + vsignature, labels[start], labels[start + length], + index); + u += 10; + } + } + + // visits the local variables type annotations + if (tanns != null) { + for (int i = 0; i < tanns.length; ++i) { + if ((readByte(tanns[i]) >> 1) == (0x40 >> 1)) { + int v = readAnnotationTarget(context, tanns[i]); + v = readAnnotationValues(v + 2, c, true, + mv.visitLocalVariableAnnotation(context.typeRef, + context.typePath, context.start, + context.end, context.index, readUTF8(v, c), + true)); + } + } + } + if (itanns != null) { + for (int i = 0; i < itanns.length; ++i) { + if ((readByte(itanns[i]) >> 1) == (0x40 >> 1)) { + int v = readAnnotationTarget(context, itanns[i]); + v = readAnnotationValues(v + 2, c, true, + mv.visitLocalVariableAnnotation(context.typeRef, + context.typePath, context.start, + context.end, context.index, readUTF8(v, c), + false)); + } + } + } + + // visits the code attributes + while (attributes != null) { + Attribute attr = attributes.next; + attributes.next = null; + mv.visitAttribute(attributes); + attributes = attr; + } + + // visits the max stack and max locals values + mv.visitMaxs(maxStack, maxLocals); + } + + /** + * Parses a type annotation table to find the labels, and to visit the try + * catch block annotations. + * + * @param u + * the start offset of a type annotation table. + * @param mv + * the method visitor to be used to visit the try catch block + * annotations. + * @param context + * information about the class being parsed. + * @param visible + * if the type annotation table to parse contains runtime visible + * annotations. + * @return the start offset of each type annotation in the parsed table. + */ + private int[] readTypeAnnotations(final MethodVisitor mv, + final Context context, int u, boolean visible) { + char[] c = context.buffer; + int[] offsets = new int[readUnsignedShort(u)]; + u += 2; + for (int i = 0; i < offsets.length; ++i) { + offsets[i] = u; + int target = readInt(u); + switch (target >>> 24) { + case 0x00: // CLASS_TYPE_PARAMETER + case 0x01: // METHOD_TYPE_PARAMETER + case 0x16: // METHOD_FORMAL_PARAMETER + u += 2; + break; + case 0x13: // FIELD + case 0x14: // METHOD_RETURN + case 0x15: // METHOD_RECEIVER + u += 1; + break; + case 0x40: // LOCAL_VARIABLE + case 0x41: // RESOURCE_VARIABLE + for (int j = readUnsignedShort(u + 1); j > 0; --j) { + int start = readUnsignedShort(u + 3); + int length = readUnsignedShort(u + 5); + readLabel(start, context.labels); + readLabel(start + length, context.labels); + u += 6; + } + u += 3; + break; + case 0x47: // CAST + case 0x48: // CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT + case 0x49: // METHOD_INVOCATION_TYPE_ARGUMENT + case 0x4A: // CONSTRUCTOR_REFERENCE_TYPE_ARGUMENT + case 0x4B: // METHOD_REFERENCE_TYPE_ARGUMENT + u += 4; + break; + // case 0x10: // CLASS_EXTENDS + // case 0x11: // CLASS_TYPE_PARAMETER_BOUND + // case 0x12: // METHOD_TYPE_PARAMETER_BOUND + // case 0x17: // THROWS + // case 0x42: // EXCEPTION_PARAMETER + // case 0x43: // INSTANCEOF + // case 0x44: // NEW + // case 0x45: // CONSTRUCTOR_REFERENCE + // case 0x46: // METHOD_REFERENCE + default: + u += 3; + break; + } + int pathLength = readByte(u); + if ((target >>> 24) == 0x42) { + TypePath path = pathLength == 0 ? null : new TypePath(b, u); + u += 1 + 2 * pathLength; + u = readAnnotationValues(u + 2, c, true, + mv.visitTryCatchAnnotation(target, path, + readUTF8(u, c), visible)); + } else { + u = readAnnotationValues(u + 3 + 2 * pathLength, c, true, null); + } + } + return offsets; + } + + /** + * Parses the header of a type annotation to extract its target_type and + * target_path (the result is stored in the given context), and returns the + * start offset of the rest of the type_annotation structure (i.e. the + * offset to the type_index field, which is followed by + * num_element_value_pairs and then the name,value pairs). + * + * @param context + * information about the class being parsed. This is where the + * extracted target_type and target_path must be stored. + * @param u + * the start offset of a type_annotation structure. + * @return the start offset of the rest of the type_annotation structure. + */ + private int readAnnotationTarget(final Context context, int u) { + int target = readInt(u); + switch (target >>> 24) { + case 0x00: // CLASS_TYPE_PARAMETER + case 0x01: // METHOD_TYPE_PARAMETER + case 0x16: // METHOD_FORMAL_PARAMETER + target &= 0xFFFF0000; + u += 2; + break; + case 0x13: // FIELD + case 0x14: // METHOD_RETURN + case 0x15: // METHOD_RECEIVER + target &= 0xFF000000; + u += 1; + break; + case 0x40: // LOCAL_VARIABLE + case 0x41: { // RESOURCE_VARIABLE + target &= 0xFF000000; + int n = readUnsignedShort(u + 1); + context.start = new Label[n]; + context.end = new Label[n]; + context.index = new int[n]; + u += 3; + for (int i = 0; i < n; ++i) { + int start = readUnsignedShort(u); + int length = readUnsignedShort(u + 2); + context.start[i] = readLabel(start, context.labels); + context.end[i] = readLabel(start + length, context.labels); + context.index[i] = readUnsignedShort(u + 4); + u += 6; + } + break; + } + case 0x47: // CAST + case 0x48: // CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT + case 0x49: // METHOD_INVOCATION_TYPE_ARGUMENT + case 0x4A: // CONSTRUCTOR_REFERENCE_TYPE_ARGUMENT + case 0x4B: // METHOD_REFERENCE_TYPE_ARGUMENT + target &= 0xFF0000FF; + u += 4; + break; + // case 0x10: // CLASS_EXTENDS + // case 0x11: // CLASS_TYPE_PARAMETER_BOUND + // case 0x12: // METHOD_TYPE_PARAMETER_BOUND + // case 0x17: // THROWS + // case 0x42: // EXCEPTION_PARAMETER + // case 0x43: // INSTANCEOF + // case 0x44: // NEW + // case 0x45: // CONSTRUCTOR_REFERENCE + // case 0x46: // METHOD_REFERENCE + default: + target &= (target >>> 24) < 0x43 ? 0xFFFFFF00 : 0xFF000000; + u += 3; + break; + } + int pathLength = readByte(u); + context.typeRef = target; + context.typePath = pathLength == 0 ? null : new TypePath(b, u); + return u + 1 + 2 * pathLength; + } + + /** + * Reads parameter annotations and makes the given visitor visit them. + * + * @param mv + * the visitor that must visit the annotations. + * @param context + * information about the class being parsed. + * @param v + * start offset in {@link #b b} of the annotations to be read. + * @param visible + * true if the annotations to be read are visible at + * runtime. + */ + private void readParameterAnnotations(final MethodVisitor mv, + final Context context, int v, final boolean visible) { + int i; + int n = b[v++] & 0xFF; + // workaround for a bug in javac (javac compiler generates a parameter + // annotation array whose size is equal to the number of parameters in + // the Java source file, while it should generate an array whose size is + // equal to the number of parameters in the method descriptor - which + // includes the synthetic parameters added by the compiler). This work- + // around supposes that the synthetic parameters are the first ones. + int synthetics = Type.getArgumentTypes(context.desc).length - n; + AnnotationVisitor av; + for (i = 0; i < synthetics; ++i) { + // virtual annotation to detect synthetic parameters in MethodWriter + av = mv.visitParameterAnnotation(i, "Ljava/lang/Synthetic;", false); + if (av != null) { + av.visitEnd(); + } + } + char[] c = context.buffer; + for (; i < n + synthetics; ++i) { + int j = readUnsignedShort(v); + v += 2; + for (; j > 0; --j) { + av = mv.visitParameterAnnotation(i, readUTF8(v, c), visible); + v = readAnnotationValues(v + 2, c, true, av); + } + } + } + + /** + * Reads the values of an annotation and makes the given visitor visit them. + * + * @param v + * the start offset in {@link #b b} of the values to be read + * (including the unsigned short that gives the number of + * values). + * @param buf + * buffer to be used to call {@link #readUTF8 readUTF8}, + * {@link #readClass(int,char[]) readClass} or {@link #readConst + * readConst}. + * @param named + * if the annotation values are named or not. + * @param av + * the visitor that must visit the values. + * @return the end offset of the annotation values. + */ + private int readAnnotationValues(int v, final char[] buf, + final boolean named, final AnnotationVisitor av) { + int i = readUnsignedShort(v); + v += 2; + if (named) { + for (; i > 0; --i) { + v = readAnnotationValue(v + 2, buf, readUTF8(v, buf), av); + } + } else { + for (; i > 0; --i) { + v = readAnnotationValue(v, buf, null, av); + } + } + if (av != null) { + av.visitEnd(); + } + return v; + } + + /** + * Reads a value of an annotation and makes the given visitor visit it. + * + * @param v + * the start offset in {@link #b b} of the value to be read + * (not including the value name constant pool index). + * @param buf + * buffer to be used to call {@link #readUTF8 readUTF8}, + * {@link #readClass(int,char[]) readClass} or {@link #readConst + * readConst}. + * @param name + * the name of the value to be read. + * @param av + * the visitor that must visit the value. + * @return the end offset of the annotation value. + */ + private int readAnnotationValue(int v, final char[] buf, final String name, + final AnnotationVisitor av) { + int i; + if (av == null) { + switch (b[v] & 0xFF) { + case 'e': // enum_const_value + return v + 5; + case '@': // annotation_value + return readAnnotationValues(v + 3, buf, true, null); + case '[': // array_value + return readAnnotationValues(v + 1, buf, false, null); + default: + return v + 3; + } + } + switch (b[v++] & 0xFF) { + case 'I': // pointer to CONSTANT_Integer + case 'J': // pointer to CONSTANT_Long + case 'F': // pointer to CONSTANT_Float + case 'D': // pointer to CONSTANT_Double + av.visit(name, readConst(readUnsignedShort(v), buf)); + v += 2; + break; + case 'B': // pointer to CONSTANT_Byte + av.visit(name, + new Byte((byte) readInt(items[readUnsignedShort(v)]))); + v += 2; + break; + case 'Z': // pointer to CONSTANT_Boolean + av.visit(name, + readInt(items[readUnsignedShort(v)]) == 0 ? Boolean.FALSE + : Boolean.TRUE); + v += 2; + break; + case 'S': // pointer to CONSTANT_Short + av.visit(name, new Short( + (short) readInt(items[readUnsignedShort(v)]))); + v += 2; + break; + case 'C': // pointer to CONSTANT_Char + av.visit(name, new Character( + (char) readInt(items[readUnsignedShort(v)]))); + v += 2; + break; + case 's': // pointer to CONSTANT_Utf8 + av.visit(name, readUTF8(v, buf)); + v += 2; + break; + case 'e': // enum_const_value + av.visitEnum(name, readUTF8(v, buf), readUTF8(v + 2, buf)); + v += 4; + break; + case 'c': // class_info + av.visit(name, Type.getType(readUTF8(v, buf))); + v += 2; + break; + case '@': // annotation_value + v = readAnnotationValues(v + 2, buf, true, + av.visitAnnotation(name, readUTF8(v, buf))); + break; + case '[': // array_value + int size = readUnsignedShort(v); + v += 2; + if (size == 0) { + return readAnnotationValues(v - 2, buf, false, + av.visitArray(name)); + } + switch (this.b[v++] & 0xFF) { + case 'B': + byte[] bv = new byte[size]; + for (i = 0; i < size; i++) { + bv[i] = (byte) readInt(items[readUnsignedShort(v)]); + v += 3; + } + av.visit(name, bv); + --v; + break; + case 'Z': + boolean[] zv = new boolean[size]; + for (i = 0; i < size; i++) { + zv[i] = readInt(items[readUnsignedShort(v)]) != 0; + v += 3; + } + av.visit(name, zv); + --v; + break; + case 'S': + short[] sv = new short[size]; + for (i = 0; i < size; i++) { + sv[i] = (short) readInt(items[readUnsignedShort(v)]); + v += 3; + } + av.visit(name, sv); + --v; + break; + case 'C': + char[] cv = new char[size]; + for (i = 0; i < size; i++) { + cv[i] = (char) readInt(items[readUnsignedShort(v)]); + v += 3; + } + av.visit(name, cv); + --v; + break; + case 'I': + int[] iv = new int[size]; + for (i = 0; i < size; i++) { + iv[i] = readInt(items[readUnsignedShort(v)]); + v += 3; + } + av.visit(name, iv); + --v; + break; + case 'J': + long[] lv = new long[size]; + for (i = 0; i < size; i++) { + lv[i] = readLong(items[readUnsignedShort(v)]); + v += 3; + } + av.visit(name, lv); + --v; + break; + case 'F': + float[] fv = new float[size]; + for (i = 0; i < size; i++) { + fv[i] = Float + .intBitsToFloat(readInt(items[readUnsignedShort(v)])); + v += 3; + } + av.visit(name, fv); + --v; + break; + case 'D': + double[] dv = new double[size]; + for (i = 0; i < size; i++) { + dv[i] = Double + .longBitsToDouble(readLong(items[readUnsignedShort(v)])); + v += 3; + } + av.visit(name, dv); + --v; + break; + default: + v = readAnnotationValues(v - 3, buf, false, av.visitArray(name)); + } + } + return v; + } + + /** + * Computes the implicit frame of the method currently being parsed (as + * defined in the given {@link Context}) and stores it in the given context. + * + * @param frame + * information about the class being parsed. + */ + private void getImplicitFrame(final Context frame) { + String desc = frame.desc; + Object[] locals = frame.local; + int local = 0; + if ((frame.access & Opcodes.ACC_STATIC) == 0) { + if ("".equals(frame.name)) { + locals[local++] = Opcodes.UNINITIALIZED_THIS; + } else { + locals[local++] = readClass(header + 2, frame.buffer); + } + } + int i = 1; + loop: while (true) { + int j = i; + switch (desc.charAt(i++)) { + case 'Z': + case 'C': + case 'B': + case 'S': + case 'I': + locals[local++] = Opcodes.INTEGER; + break; + case 'F': + locals[local++] = Opcodes.FLOAT; + break; + case 'J': + locals[local++] = Opcodes.LONG; + break; + case 'D': + locals[local++] = Opcodes.DOUBLE; + break; + case '[': + while (desc.charAt(i) == '[') { + ++i; + } + if (desc.charAt(i) == 'L') { + ++i; + while (desc.charAt(i) != ';') { + ++i; + } + } + locals[local++] = desc.substring(j, ++i); + break; + case 'L': + while (desc.charAt(i) != ';') { + ++i; + } + locals[local++] = desc.substring(j + 1, i++); + break; + default: + break loop; + } + } + frame.localCount = local; + } + + /** + * Reads a stack map frame and stores the result in the given + * {@link Context} object. + * + * @param stackMap + * the start offset of a stack map frame in the class file. + * @param zip + * if the stack map frame at stackMap is compressed or not. + * @param unzip + * if the stack map frame must be uncompressed. + * @param frame + * where the parsed stack map frame must be stored. + * @return the offset of the first byte following the parsed frame. + */ + private int readFrame(int stackMap, boolean zip, boolean unzip, + Context frame) { + char[] c = frame.buffer; + Label[] labels = frame.labels; + int tag; + int delta; + if (zip) { + tag = b[stackMap++] & 0xFF; + } else { + tag = MethodWriter.FULL_FRAME; + frame.offset = -1; + } + frame.localDiff = 0; + if (tag < MethodWriter.SAME_LOCALS_1_STACK_ITEM_FRAME) { + delta = tag; + frame.mode = Opcodes.F_SAME; + frame.stackCount = 0; + } else if (tag < MethodWriter.RESERVED) { + delta = tag - MethodWriter.SAME_LOCALS_1_STACK_ITEM_FRAME; + stackMap = readFrameType(frame.stack, 0, stackMap, c, labels); + frame.mode = Opcodes.F_SAME1; + frame.stackCount = 1; + } else { + delta = readUnsignedShort(stackMap); + stackMap += 2; + if (tag == MethodWriter.SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED) { + stackMap = readFrameType(frame.stack, 0, stackMap, c, labels); + frame.mode = Opcodes.F_SAME1; + frame.stackCount = 1; + } else if (tag >= MethodWriter.CHOP_FRAME + && tag < MethodWriter.SAME_FRAME_EXTENDED) { + frame.mode = Opcodes.F_CHOP; + frame.localDiff = MethodWriter.SAME_FRAME_EXTENDED - tag; + frame.localCount -= frame.localDiff; + frame.stackCount = 0; + } else if (tag == MethodWriter.SAME_FRAME_EXTENDED) { + frame.mode = Opcodes.F_SAME; + frame.stackCount = 0; + } else if (tag < MethodWriter.FULL_FRAME) { + int local = unzip ? frame.localCount : 0; + for (int i = tag - MethodWriter.SAME_FRAME_EXTENDED; i > 0; i--) { + stackMap = readFrameType(frame.local, local++, stackMap, c, + labels); + } + frame.mode = Opcodes.F_APPEND; + frame.localDiff = tag - MethodWriter.SAME_FRAME_EXTENDED; + frame.localCount += frame.localDiff; + frame.stackCount = 0; + } else { // if (tag == FULL_FRAME) { + frame.mode = Opcodes.F_FULL; + int n = readUnsignedShort(stackMap); + stackMap += 2; + frame.localDiff = n; + frame.localCount = n; + for (int local = 0; n > 0; n--) { + stackMap = readFrameType(frame.local, local++, stackMap, c, + labels); + } + n = readUnsignedShort(stackMap); + stackMap += 2; + frame.stackCount = n; + for (int stack = 0; n > 0; n--) { + stackMap = readFrameType(frame.stack, stack++, stackMap, c, + labels); + } + } + } + frame.offset += delta + 1; + readLabel(frame.offset, labels); + return stackMap; + } + + /** + * Reads a stack map frame type and stores it at the given index in the + * given array. + * + * @param frame + * the array where the parsed type must be stored. + * @param index + * the index in 'frame' where the parsed type must be stored. + * @param v + * the start offset of the stack map frame type to read. + * @param buf + * a buffer to read strings. + * @param labels + * the labels of the method currently being parsed, indexed by + * their offset. If the parsed type is an Uninitialized type, a + * new label for the corresponding NEW instruction is stored in + * this array if it does not already exist. + * @return the offset of the first byte after the parsed type. + */ + private int readFrameType(final Object[] frame, final int index, int v, + final char[] buf, final Label[] labels) { + int type = b[v++] & 0xFF; + switch (type) { + case 0: + frame[index] = Opcodes.TOP; + break; + case 1: + frame[index] = Opcodes.INTEGER; + break; + case 2: + frame[index] = Opcodes.FLOAT; + break; + case 3: + frame[index] = Opcodes.DOUBLE; + break; + case 4: + frame[index] = Opcodes.LONG; + break; + case 5: + frame[index] = Opcodes.NULL; + break; + case 6: + frame[index] = Opcodes.UNINITIALIZED_THIS; + break; + case 7: // Object + frame[index] = readClass(v, buf); + v += 2; + break; + default: // Uninitialized + frame[index] = readLabel(readUnsignedShort(v), labels); + v += 2; + } + return v; + } + + /** + * Returns the label corresponding to the given offset. The default + * implementation of this method creates a label for the given offset if it + * has not been already created. + * + * @param offset + * a bytecode offset in a method. + * @param labels + * the already created labels, indexed by their offset. If a + * label already exists for offset this method must not create a + * new one. Otherwise it must store the new label in this array. + * @return a non null Label, which must be equal to labels[offset]. + */ + protected Label readLabel(int offset, Label[] labels) { + if (labels[offset] == null) { + labels[offset] = new Label(); + } + return labels[offset]; + } + + /** + * Returns the start index of the attribute_info structure of this class. + * + * @return the start index of the attribute_info structure of this class. + */ + private int getAttributes() { + // skips the header + int u = header + 8 + readUnsignedShort(header + 6) * 2; + // skips fields and methods + for (int i = readUnsignedShort(u); i > 0; --i) { + for (int j = readUnsignedShort(u + 8); j > 0; --j) { + u += 6 + readInt(u + 12); + } + u += 8; + } + u += 2; + for (int i = readUnsignedShort(u); i > 0; --i) { + for (int j = readUnsignedShort(u + 8); j > 0; --j) { + u += 6 + readInt(u + 12); + } + u += 8; + } + // the attribute_info structure starts just after the methods + return u + 2; + } + + /** + * Reads an attribute in {@link #b b}. + * + * @param attrs + * prototypes of the attributes that must be parsed during the + * visit of the class. Any attribute whose type is not equal to + * the type of one the prototypes is ignored (i.e. an empty + * {@link Attribute} instance is returned). + * @param type + * the type of the attribute. + * @param off + * index of the first byte of the attribute's content in + * {@link #b b}. The 6 attribute header bytes, containing the + * type and the length of the attribute, are not taken into + * account here (they have already been read). + * @param len + * the length of the attribute's content. + * @param buf + * buffer to be used to call {@link #readUTF8 readUTF8}, + * {@link #readClass(int,char[]) readClass} or {@link #readConst + * readConst}. + * @param codeOff + * index of the first byte of code's attribute content in + * {@link #b b}, or -1 if the attribute to be read is not a code + * attribute. The 6 attribute header bytes, containing the type + * and the length of the attribute, are not taken into account + * here. + * @param labels + * the labels of the method's code, or null if the + * attribute to be read is not a code attribute. + * @return the attribute that has been read, or null to skip this + * attribute. + */ + private Attribute readAttribute(final Attribute[] attrs, final String type, + final int off, final int len, final char[] buf, final int codeOff, + final Label[] labels) { + for (int i = 0; i < attrs.length; ++i) { + if (attrs[i].type.equals(type)) { + return attrs[i].read(this, off, len, buf, codeOff, labels); + } + } + return new Attribute(type).read(this, off, len, null, -1, null); + } + + // ------------------------------------------------------------------------ + // Utility methods: low level parsing + // ------------------------------------------------------------------------ + + /** + * Returns the number of constant pool items in {@link #b b}. + * + * @return the number of constant pool items in {@link #b b}. + */ + public int getItemCount() { + return items.length; + } + + /** + * Returns the start index of the constant pool item in {@link #b b}, plus + * one. This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param item + * the index a constant pool item. + * @return the start index of the constant pool item in {@link #b b}, plus + * one. + */ + public int getItem(final int item) { + return items[item]; + } + + /** + * Returns the maximum length of the strings contained in the constant pool + * of the class. + * + * @return the maximum length of the strings contained in the constant pool + * of the class. + */ + public int getMaxStringLength() { + return maxStringLength; + } + + /** + * Reads a byte value in {@link #b b}. This method is intended for + * {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @param index + * the start index of the value to be read in {@link #b b}. + * @return the read value. + */ + public int readByte(final int index) { + return b[index] & 0xFF; + } + + /** + * Reads an unsigned short value in {@link #b b}. This method is intended + * for {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @param index + * the start index of the value to be read in {@link #b b}. + * @return the read value. + */ + public int readUnsignedShort(final int index) { + byte[] b = this.b; + return ((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF); + } + + /** + * Reads a signed short value in {@link #b b}. This method is intended + * for {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @param index + * the start index of the value to be read in {@link #b b}. + * @return the read value. + */ + public short readShort(final int index) { + byte[] b = this.b; + return (short) (((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF)); + } + + /** + * Reads a signed int value in {@link #b b}. This method is intended for + * {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @param index + * the start index of the value to be read in {@link #b b}. + * @return the read value. + */ + public int readInt(final int index) { + byte[] b = this.b; + return ((b[index] & 0xFF) << 24) | ((b[index + 1] & 0xFF) << 16) + | ((b[index + 2] & 0xFF) << 8) | (b[index + 3] & 0xFF); + } + + /** + * Reads a signed long value in {@link #b b}. This method is intended for + * {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @param index + * the start index of the value to be read in {@link #b b}. + * @return the read value. + */ + public long readLong(final int index) { + long l1 = readInt(index); + long l0 = readInt(index + 4) & 0xFFFFFFFFL; + return (l1 << 32) | l0; + } + + /** + * Reads an UTF8 string constant pool item in {@link #b b}. This method + * is intended for {@link Attribute} sub classes, and is normally not needed + * by class generators or adapters. + * + * @param index + * the start index of an unsigned short value in {@link #b b}, + * whose value is the index of an UTF8 constant pool item. + * @param buf + * buffer to be used to read the item. This buffer must be + * sufficiently large. It is not automatically resized. + * @return the String corresponding to the specified UTF8 item. + */ + public String readUTF8(int index, final char[] buf) { + int item = readUnsignedShort(index); + if (index == 0 || item == 0) { + return null; + } + String s = strings[item]; + if (s != null) { + return s; + } + index = items[item]; + return strings[item] = readUTF(index + 2, readUnsignedShort(index), buf); + } + + /** + * Reads UTF8 string in {@link #b b}. + * + * @param index + * start offset of the UTF8 string to be read. + * @param utfLen + * length of the UTF8 string to be read. + * @param buf + * buffer to be used to read the string. This buffer must be + * sufficiently large. It is not automatically resized. + * @return the String corresponding to the specified UTF8 string. + */ + private String readUTF(int index, final int utfLen, final char[] buf) { + int endIndex = index + utfLen; + byte[] b = this.b; + int strLen = 0; + int c; + int st = 0; + char cc = 0; + while (index < endIndex) { + c = b[index++]; + switch (st) { + case 0: + c = c & 0xFF; + if (c < 0x80) { // 0xxxxxxx + buf[strLen++] = (char) c; + } else if (c < 0xE0 && c > 0xBF) { // 110x xxxx 10xx xxxx + cc = (char) (c & 0x1F); + st = 1; + } else { // 1110 xxxx 10xx xxxx 10xx xxxx + cc = (char) (c & 0x0F); + st = 2; + } + break; + + case 1: // byte 2 of 2-byte char or byte 3 of 3-byte char + buf[strLen++] = (char) ((cc << 6) | (c & 0x3F)); + st = 0; + break; + + case 2: // byte 2 of 3-byte char + cc = (char) ((cc << 6) | (c & 0x3F)); + st = 1; + break; + } + } + return new String(buf, 0, strLen); + } + + /** + * Reads a class constant pool item in {@link #b b}. This method is + * intended for {@link Attribute} sub classes, and is normally not needed by + * class generators or adapters. + * + * @param index + * the start index of an unsigned short value in {@link #b b}, + * whose value is the index of a class constant pool item. + * @param buf + * buffer to be used to read the item. This buffer must be + * sufficiently large. It is not automatically resized. + * @return the String corresponding to the specified class item. + */ + public String readClass(final int index, final char[] buf) { + // computes the start index of the CONSTANT_Class item in b + // and reads the CONSTANT_Utf8 item designated by + // the first two bytes of this CONSTANT_Class item + return readUTF8(items[readUnsignedShort(index)], buf); + } + + /** + * Reads a numeric or string constant pool item in {@link #b b}. This + * method is intended for {@link Attribute} sub classes, and is normally not + * needed by class generators or adapters. + * + * @param item + * the index of a constant pool item. + * @param buf + * buffer to be used to read the item. This buffer must be + * sufficiently large. It is not automatically resized. + * @return the {@link Integer}, {@link Float}, {@link Long}, {@link Double}, + * {@link String}, {@link Type} or {@link Handle} corresponding to + * the given constant pool item. + */ + public Object readConst(final int item, final char[] buf) { + int index = items[item]; + switch (b[index - 1]) { + case ClassWriter.INT: + return new Integer(readInt(index)); + case ClassWriter.FLOAT: + return new Float(Float.intBitsToFloat(readInt(index))); + case ClassWriter.LONG: + return new Long(readLong(index)); + case ClassWriter.DOUBLE: + return new Double(Double.longBitsToDouble(readLong(index))); + case ClassWriter.CLASS: + return Type.getObjectType(readUTF8(index, buf)); + case ClassWriter.STR: + return readUTF8(index, buf); + case ClassWriter.MTYPE: + return Type.getMethodType(readUTF8(index, buf)); + default: // case ClassWriter.HANDLE_BASE + [1..9]: + int tag = readByte(index); + int[] items = this.items; + int cpIndex = items[readUnsignedShort(index + 1)]; + String owner = readClass(cpIndex, buf); + cpIndex = items[readUnsignedShort(cpIndex + 2)]; + String name = readUTF8(cpIndex, buf); + String desc = readUTF8(cpIndex + 2, buf); + return new Handle(tag, owner, name, desc); + } + } +} diff --git a/src/com/sleepycat/asm/ClassVisitor.java b/src/com/sleepycat/asm/ClassVisitor.java new file mode 100644 index 0000000..34d785c --- /dev/null +++ b/src/com/sleepycat/asm/ClassVisitor.java @@ -0,0 +1,320 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A visitor to visit a Java class. The methods of this class must be called in + * the following order: visit [ visitSource ] [ + * visitOuterClass ] ( visitAnnotation | + * visitTypeAnnotation | visitAttribute )* ( + * visitInnerClass | visitField | visitMethod )* + * visitEnd. + * + * @author Eric Bruneton + */ +public abstract class ClassVisitor { + + /** + * The ASM API version implemented by this visitor. The value of this field + * must be one of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + protected final int api; + + /** + * The class visitor to which this visitor must delegate method calls. May + * be null. + */ + protected ClassVisitor cv; + + /** + * Constructs a new {@link ClassVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + public ClassVisitor(final int api) { + this(api, null); + } + + /** + * Constructs a new {@link ClassVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + * @param cv + * the class visitor to which this visitor must delegate method + * calls. May be null. + */ + public ClassVisitor(final int api, final ClassVisitor cv) { + if (api != Opcodes.ASM4 && api != Opcodes.ASM5) { + throw new IllegalArgumentException(); + } + this.api = api; + this.cv = cv; + } + + /** + * Visits the header of the class. + * + * @param version + * the class version. + * @param access + * the class's access flags (see {@link Opcodes}). This parameter + * also indicates if the class is deprecated. + * @param name + * the internal name of the class (see + * {@link Type#getInternalName() getInternalName}). + * @param signature + * the signature of this class. May be null if the class + * is not a generic one, and does not extend or implement generic + * classes or interfaces. + * @param superName + * the internal of name of the super class (see + * {@link Type#getInternalName() getInternalName}). For + * interfaces, the super class is {@link Object}. May be + * null, but only for the {@link Object} class. + * @param interfaces + * the internal names of the class's interfaces (see + * {@link Type#getInternalName() getInternalName}). May be + * null. + */ + public void visit(int version, int access, String name, String signature, + String superName, String[] interfaces) { + if (cv != null) { + cv.visit(version, access, name, signature, superName, interfaces); + } + } + + /** + * Visits the source of the class. + * + * @param source + * the name of the source file from which the class was compiled. + * May be null. + * @param debug + * additional debug information to compute the correspondance + * between source and compiled elements of the class. May be + * null. + */ + public void visitSource(String source, String debug) { + if (cv != null) { + cv.visitSource(source, debug); + } + } + + /** + * Visits the enclosing class of the class. This method must be called only + * if the class has an enclosing class. + * + * @param owner + * internal name of the enclosing class of the class. + * @param name + * the name of the method that contains the class, or + * null if the class is not enclosed in a method of its + * enclosing class. + * @param desc + * the descriptor of the method that contains the class, or + * null if the class is not enclosed in a method of its + * enclosing class. + */ + public void visitOuterClass(String owner, String name, String desc) { + if (cv != null) { + cv.visitOuterClass(owner, name, desc); + } + } + + /** + * Visits an annotation of the class. + * + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitAnnotation(String desc, boolean visible) { + if (cv != null) { + return cv.visitAnnotation(desc, visible); + } + return null; + } + + /** + * Visits an annotation on a type in the class signature. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#CLASS_TYPE_PARAMETER + * CLASS_TYPE_PARAMETER}, + * {@link TypeReference#CLASS_TYPE_PARAMETER_BOUND + * CLASS_TYPE_PARAMETER_BOUND} or + * {@link TypeReference#CLASS_EXTENDS CLASS_EXTENDS}. See + * {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitTypeAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (cv != null) { + return cv.visitTypeAnnotation(typeRef, typePath, desc, visible); + } + return null; + } + + /** + * Visits a non standard attribute of the class. + * + * @param attr + * an attribute. + */ + public void visitAttribute(Attribute attr) { + if (cv != null) { + cv.visitAttribute(attr); + } + } + + /** + * Visits information about an inner class. This inner class is not + * necessarily a member of the class being visited. + * + * @param name + * the internal name of an inner class (see + * {@link Type#getInternalName() getInternalName}). + * @param outerName + * the internal name of the class to which the inner class + * belongs (see {@link Type#getInternalName() getInternalName}). + * May be null for not member classes. + * @param innerName + * the (simple) name of the inner class inside its enclosing + * class. May be null for anonymous inner classes. + * @param access + * the access flags of the inner class as originally declared in + * the enclosing class. + */ + public void visitInnerClass(String name, String outerName, + String innerName, int access) { + if (cv != null) { + cv.visitInnerClass(name, outerName, innerName, access); + } + } + + /** + * Visits a field of the class. + * + * @param access + * the field's access flags (see {@link Opcodes}). This parameter + * also indicates if the field is synthetic and/or deprecated. + * @param name + * the field's name. + * @param desc + * the field's descriptor (see {@link Type Type}). + * @param signature + * the field's signature. May be null if the field's + * type does not use generic types. + * @param value + * the field's initial value. This parameter, which may be + * null if the field does not have an initial value, + * must be an {@link Integer}, a {@link Float}, a {@link Long}, a + * {@link Double} or a {@link String} (for int, + * float, long or String fields + * respectively). This parameter is only used for static + * fields. Its value is ignored for non static fields, which + * must be initialized through bytecode instructions in + * constructors or methods. + * @return a visitor to visit field annotations and attributes, or + * null if this class visitor is not interested in visiting + * these annotations and attributes. + */ + public FieldVisitor visitField(int access, String name, String desc, + String signature, Object value) { + if (cv != null) { + return cv.visitField(access, name, desc, signature, value); + } + return null; + } + + /** + * Visits a method of the class. This method must return a new + * {@link MethodVisitor} instance (or null) each time it is called, + * i.e., it should not return a previously returned visitor. + * + * @param access + * the method's access flags (see {@link Opcodes}). This + * parameter also indicates if the method is synthetic and/or + * deprecated. + * @param name + * the method's name. + * @param desc + * the method's descriptor (see {@link Type Type}). + * @param signature + * the method's signature. May be null if the method + * parameters, return type and exceptions do not use generic + * types. + * @param exceptions + * the internal names of the method's exception classes (see + * {@link Type#getInternalName() getInternalName}). May be + * null. + * @return an object to visit the byte code of the method, or null + * if this class visitor is not interested in visiting the code of + * this method. + */ + public MethodVisitor visitMethod(int access, String name, String desc, + String signature, String[] exceptions) { + if (cv != null) { + return cv.visitMethod(access, name, desc, signature, exceptions); + } + return null; + } + + /** + * Visits the end of the class. This method, which is the last one to be + * called, is used to inform the visitor that all the fields and methods of + * the class have been visited. + */ + public void visitEnd() { + if (cv != null) { + cv.visitEnd(); + } + } +} diff --git a/src/com/sleepycat/asm/ClassWriter.java b/src/com/sleepycat/asm/ClassWriter.java new file mode 100644 index 0000000..7f8ce28 --- /dev/null +++ b/src/com/sleepycat/asm/ClassWriter.java @@ -0,0 +1,1776 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A {@link ClassVisitor} that generates classes in bytecode form. More + * precisely this visitor generates a byte array conforming to the Java class + * file format. It can be used alone, to generate a Java class "from scratch", + * or with one or more {@link ClassReader ClassReader} and adapter class visitor + * to generate a modified class from one or more existing Java classes. + * + * @author Eric Bruneton + */ +public class ClassWriter extends ClassVisitor { + + /** + * Flag to automatically compute the maximum stack size and the maximum + * number of local variables of methods. If this flag is set, then the + * arguments of the {@link MethodVisitor#visitMaxs visitMaxs} method of the + * {@link MethodVisitor} returned by the {@link #visitMethod visitMethod} + * method will be ignored, and computed automatically from the signature and + * the bytecode of each method. + * + * @see #ClassWriter(int) + */ + public static final int COMPUTE_MAXS = 1; + + /** + * Flag to automatically compute the stack map frames of methods from + * scratch. If this flag is set, then the calls to the + * {@link MethodVisitor#visitFrame} method are ignored, and the stack map + * frames are recomputed from the methods bytecode. The arguments of the + * {@link MethodVisitor#visitMaxs visitMaxs} method are also ignored and + * recomputed from the bytecode. In other words, computeFrames implies + * computeMaxs. + * + * @see #ClassWriter(int) + */ + public static final int COMPUTE_FRAMES = 2; + + /** + * Pseudo access flag to distinguish between the synthetic attribute and the + * synthetic access flag. + */ + static final int ACC_SYNTHETIC_ATTRIBUTE = 0x40000; + + /** + * Factor to convert from ACC_SYNTHETIC_ATTRIBUTE to Opcode.ACC_SYNTHETIC. + */ + static final int TO_ACC_SYNTHETIC = ACC_SYNTHETIC_ATTRIBUTE + / Opcodes.ACC_SYNTHETIC; + + /** + * The type of instructions without any argument. + */ + static final int NOARG_INSN = 0; + + /** + * The type of instructions with an signed byte argument. + */ + static final int SBYTE_INSN = 1; + + /** + * The type of instructions with an signed short argument. + */ + static final int SHORT_INSN = 2; + + /** + * The type of instructions with a local variable index argument. + */ + static final int VAR_INSN = 3; + + /** + * The type of instructions with an implicit local variable index argument. + */ + static final int IMPLVAR_INSN = 4; + + /** + * The type of instructions with a type descriptor argument. + */ + static final int TYPE_INSN = 5; + + /** + * The type of field and method invocations instructions. + */ + static final int FIELDORMETH_INSN = 6; + + /** + * The type of the INVOKEINTERFACE/INVOKEDYNAMIC instruction. + */ + static final int ITFMETH_INSN = 7; + + /** + * The type of the INVOKEDYNAMIC instruction. + */ + static final int INDYMETH_INSN = 8; + + /** + * The type of instructions with a 2 bytes bytecode offset label. + */ + static final int LABEL_INSN = 9; + + /** + * The type of instructions with a 4 bytes bytecode offset label. + */ + static final int LABELW_INSN = 10; + + /** + * The type of the LDC instruction. + */ + static final int LDC_INSN = 11; + + /** + * The type of the LDC_W and LDC2_W instructions. + */ + static final int LDCW_INSN = 12; + + /** + * The type of the IINC instruction. + */ + static final int IINC_INSN = 13; + + /** + * The type of the TABLESWITCH instruction. + */ + static final int TABL_INSN = 14; + + /** + * The type of the LOOKUPSWITCH instruction. + */ + static final int LOOK_INSN = 15; + + /** + * The type of the MULTIANEWARRAY instruction. + */ + static final int MANA_INSN = 16; + + /** + * The type of the WIDE instruction. + */ + static final int WIDE_INSN = 17; + + /** + * The instruction types of all JVM opcodes. + */ + static final byte[] TYPE; + + /** + * The type of CONSTANT_Class constant pool items. + */ + static final int CLASS = 7; + + /** + * The type of CONSTANT_Fieldref constant pool items. + */ + static final int FIELD = 9; + + /** + * The type of CONSTANT_Methodref constant pool items. + */ + static final int METH = 10; + + /** + * The type of CONSTANT_InterfaceMethodref constant pool items. + */ + static final int IMETH = 11; + + /** + * The type of CONSTANT_String constant pool items. + */ + static final int STR = 8; + + /** + * The type of CONSTANT_Integer constant pool items. + */ + static final int INT = 3; + + /** + * The type of CONSTANT_Float constant pool items. + */ + static final int FLOAT = 4; + + /** + * The type of CONSTANT_Long constant pool items. + */ + static final int LONG = 5; + + /** + * The type of CONSTANT_Double constant pool items. + */ + static final int DOUBLE = 6; + + /** + * The type of CONSTANT_NameAndType constant pool items. + */ + static final int NAME_TYPE = 12; + + /** + * The type of CONSTANT_Utf8 constant pool items. + */ + static final int UTF8 = 1; + + /** + * The type of CONSTANT_MethodType constant pool items. + */ + static final int MTYPE = 16; + + /** + * The type of CONSTANT_MethodHandle constant pool items. + */ + static final int HANDLE = 15; + + /** + * The type of CONSTANT_InvokeDynamic constant pool items. + */ + static final int INDY = 18; + + /** + * The base value for all CONSTANT_MethodHandle constant pool items. + * Internally, ASM store the 9 variations of CONSTANT_MethodHandle into 9 + * different items. + */ + static final int HANDLE_BASE = 20; + + /** + * Normal type Item stored in the ClassWriter {@link ClassWriter#typeTable}, + * instead of the constant pool, in order to avoid clashes with normal + * constant pool items in the ClassWriter constant pool's hash table. + */ + static final int TYPE_NORMAL = 30; + + /** + * Uninitialized type Item stored in the ClassWriter + * {@link ClassWriter#typeTable}, instead of the constant pool, in order to + * avoid clashes with normal constant pool items in the ClassWriter constant + * pool's hash table. + */ + static final int TYPE_UNINIT = 31; + + /** + * Merged type Item stored in the ClassWriter {@link ClassWriter#typeTable}, + * instead of the constant pool, in order to avoid clashes with normal + * constant pool items in the ClassWriter constant pool's hash table. + */ + static final int TYPE_MERGED = 32; + + /** + * The type of BootstrapMethods items. These items are stored in a special + * class attribute named BootstrapMethods and not in the constant pool. + */ + static final int BSM = 33; + + /** + * The class reader from which this class writer was constructed, if any. + */ + ClassReader cr; + + /** + * Minor and major version numbers of the class to be generated. + */ + int version; + + /** + * Index of the next item to be added in the constant pool. + */ + int index; + + /** + * The constant pool of this class. + */ + final ByteVector pool; + + /** + * The constant pool's hash table data. + */ + Item[] items; + + /** + * The threshold of the constant pool's hash table. + */ + int threshold; + + /** + * A reusable key used to look for items in the {@link #items} hash table. + */ + final Item key; + + /** + * A reusable key used to look for items in the {@link #items} hash table. + */ + final Item key2; + + /** + * A reusable key used to look for items in the {@link #items} hash table. + */ + final Item key3; + + /** + * A reusable key used to look for items in the {@link #items} hash table. + */ + final Item key4; + + /** + * A type table used to temporarily store internal names that will not + * necessarily be stored in the constant pool. This type table is used by + * the control flow and data flow analysis algorithm used to compute stack + * map frames from scratch. This array associates to each index i + * the Item whose index is i. All Item objects stored in this array + * are also stored in the {@link #items} hash table. These two arrays allow + * to retrieve an Item from its index or, conversely, to get the index of an + * Item from its value. Each Item stores an internal name in its + * {@link Item#strVal1} field. + */ + Item[] typeTable; + + /** + * Number of elements in the {@link #typeTable} array. + */ + private short typeCount; + + /** + * The access flags of this class. + */ + private int access; + + /** + * The constant pool item that contains the internal name of this class. + */ + private int name; + + /** + * The internal name of this class. + */ + String thisName; + + /** + * The constant pool item that contains the signature of this class. + */ + private int signature; + + /** + * The constant pool item that contains the internal name of the super class + * of this class. + */ + private int superName; + + /** + * Number of interfaces implemented or extended by this class or interface. + */ + private int interfaceCount; + + /** + * The interfaces implemented or extended by this class or interface. More + * precisely, this array contains the indexes of the constant pool items + * that contain the internal names of these interfaces. + */ + private int[] interfaces; + + /** + * The index of the constant pool item that contains the name of the source + * file from which this class was compiled. + */ + private int sourceFile; + + /** + * The SourceDebug attribute of this class. + */ + private ByteVector sourceDebug; + + /** + * The constant pool item that contains the name of the enclosing class of + * this class. + */ + private int enclosingMethodOwner; + + /** + * The constant pool item that contains the name and descriptor of the + * enclosing method of this class. + */ + private int enclosingMethod; + + /** + * The runtime visible annotations of this class. + */ + private AnnotationWriter anns; + + /** + * The runtime invisible annotations of this class. + */ + private AnnotationWriter ianns; + + /** + * The runtime visible type annotations of this class. + */ + private AnnotationWriter tanns; + + /** + * The runtime invisible type annotations of this class. + */ + private AnnotationWriter itanns; + + /** + * The non standard attributes of this class. + */ + private Attribute attrs; + + /** + * The number of entries in the InnerClasses attribute. + */ + private int innerClassesCount; + + /** + * The InnerClasses attribute. + */ + private ByteVector innerClasses; + + /** + * The number of entries in the BootstrapMethods attribute. + */ + int bootstrapMethodsCount; + + /** + * The BootstrapMethods attribute. + */ + ByteVector bootstrapMethods; + + /** + * The fields of this class. These fields are stored in a linked list of + * {@link FieldWriter} objects, linked to each other by their + * {@link FieldWriter#fv} field. This field stores the first element of this + * list. + */ + FieldWriter firstField; + + /** + * The fields of this class. These fields are stored in a linked list of + * {@link FieldWriter} objects, linked to each other by their + * {@link FieldWriter#fv} field. This field stores the last element of this + * list. + */ + FieldWriter lastField; + + /** + * The methods of this class. These methods are stored in a linked list of + * {@link MethodWriter} objects, linked to each other by their + * {@link MethodWriter#mv} field. This field stores the first element of + * this list. + */ + MethodWriter firstMethod; + + /** + * The methods of this class. These methods are stored in a linked list of + * {@link MethodWriter} objects, linked to each other by their + * {@link MethodWriter#mv} field. This field stores the last element of this + * list. + */ + MethodWriter lastMethod; + + /** + * true if the maximum stack size and number of local variables + * must be automatically computed. + */ + private boolean computeMaxs; + + /** + * true if the stack map frames must be recomputed from scratch. + */ + private boolean computeFrames; + + /** + * true if the stack map tables of this class are invalid. The + * {@link MethodWriter#resizeInstructions} method cannot transform existing + * stack map tables, and so produces potentially invalid classes when it is + * executed. In this case the class is reread and rewritten with the + * {@link #COMPUTE_FRAMES} option (the resizeInstructions method can resize + * stack map tables when this option is used). + */ + boolean invalidFrames; + + // ------------------------------------------------------------------------ + // Static initializer + // ------------------------------------------------------------------------ + + /** + * Computes the instruction types of JVM opcodes. + */ + static { + int i; + byte[] b = new byte[220]; + String s = "AAAAAAAAAAAAAAAABCLMMDDDDDEEEEEEEEEEEEEEEEEEEEAAAAAAAADD" + + "DDDEEEEEEEEEEEEEEEEEEEEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + + "AAAAAAAAAAAAAAAAANAAAAAAAAAAAAAAAAAAAAJJJJJJJJJJJJJJJJDOPAA" + + "AAAAGGGGGGGHIFBFAAFFAARQJJKKJJJJJJJJJJJJJJJJJJ"; + for (i = 0; i < b.length; ++i) { + b[i] = (byte) (s.charAt(i) - 'A'); + } + TYPE = b; + + // code to generate the above string + // + // // SBYTE_INSN instructions + // b[Constants.NEWARRAY] = SBYTE_INSN; + // b[Constants.BIPUSH] = SBYTE_INSN; + // + // // SHORT_INSN instructions + // b[Constants.SIPUSH] = SHORT_INSN; + // + // // (IMPL)VAR_INSN instructions + // b[Constants.RET] = VAR_INSN; + // for (i = Constants.ILOAD; i <= Constants.ALOAD; ++i) { + // b[i] = VAR_INSN; + // } + // for (i = Constants.ISTORE; i <= Constants.ASTORE; ++i) { + // b[i] = VAR_INSN; + // } + // for (i = 26; i <= 45; ++i) { // ILOAD_0 to ALOAD_3 + // b[i] = IMPLVAR_INSN; + // } + // for (i = 59; i <= 78; ++i) { // ISTORE_0 to ASTORE_3 + // b[i] = IMPLVAR_INSN; + // } + // + // // TYPE_INSN instructions + // b[Constants.NEW] = TYPE_INSN; + // b[Constants.ANEWARRAY] = TYPE_INSN; + // b[Constants.CHECKCAST] = TYPE_INSN; + // b[Constants.INSTANCEOF] = TYPE_INSN; + // + // // (Set)FIELDORMETH_INSN instructions + // for (i = Constants.GETSTATIC; i <= Constants.INVOKESTATIC; ++i) { + // b[i] = FIELDORMETH_INSN; + // } + // b[Constants.INVOKEINTERFACE] = ITFMETH_INSN; + // b[Constants.INVOKEDYNAMIC] = INDYMETH_INSN; + // + // // LABEL(W)_INSN instructions + // for (i = Constants.IFEQ; i <= Constants.JSR; ++i) { + // b[i] = LABEL_INSN; + // } + // b[Constants.IFNULL] = LABEL_INSN; + // b[Constants.IFNONNULL] = LABEL_INSN; + // b[200] = LABELW_INSN; // GOTO_W + // b[201] = LABELW_INSN; // JSR_W + // // temporary opcodes used internally by ASM - see Label and + // MethodWriter + // for (i = 202; i < 220; ++i) { + // b[i] = LABEL_INSN; + // } + // + // // LDC(_W) instructions + // b[Constants.LDC] = LDC_INSN; + // b[19] = LDCW_INSN; // LDC_W + // b[20] = LDCW_INSN; // LDC2_W + // + // // special instructions + // b[Constants.IINC] = IINC_INSN; + // b[Constants.TABLESWITCH] = TABL_INSN; + // b[Constants.LOOKUPSWITCH] = LOOK_INSN; + // b[Constants.MULTIANEWARRAY] = MANA_INSN; + // b[196] = WIDE_INSN; // WIDE + // + // for (i = 0; i < b.length; ++i) { + // System.err.print((char)('A' + b[i])); + // } + // System.err.println(); + } + + // ------------------------------------------------------------------------ + // Constructor + // ------------------------------------------------------------------------ + + /** + * Constructs a new {@link ClassWriter} object. + * + * @param flags + * option flags that can be used to modify the default behavior + * of this class. See {@link #COMPUTE_MAXS}, + * {@link #COMPUTE_FRAMES}. + */ + public ClassWriter(final int flags) { + super(Opcodes.ASM5); + index = 1; + pool = new ByteVector(); + items = new Item[256]; + threshold = (int) (0.75d * items.length); + key = new Item(); + key2 = new Item(); + key3 = new Item(); + key4 = new Item(); + this.computeMaxs = (flags & COMPUTE_MAXS) != 0; + this.computeFrames = (flags & COMPUTE_FRAMES) != 0; + } + + /** + * Constructs a new {@link ClassWriter} object and enables optimizations for + * "mostly add" bytecode transformations. These optimizations are the + * following: + * + *
      + *
    • The constant pool from the original class is copied as is in the new + * class, which saves time. New constant pool entries will be added at the + * end if necessary, but unused constant pool entries won't be + * removed.
    • + *
    • Methods that are not transformed are copied as is in the new class, + * directly from the original class bytecode (i.e. without emitting visit + * events for all the method instructions), which saves a lot of + * time. Untransformed methods are detected by the fact that the + * {@link ClassReader} receives {@link MethodVisitor} objects that come from + * a {@link ClassWriter} (and not from any other {@link ClassVisitor} + * instance).
    • + *
    + * + * @param classReader + * the {@link ClassReader} used to read the original class. It + * will be used to copy the entire constant pool from the + * original class and also to copy other fragments of original + * bytecode where applicable. + * @param flags + * option flags that can be used to modify the default behavior + * of this class. These option flags do not affect methods + * that are copied as is in the new class. This means that the + * maximum stack size nor the stack frames will be computed for + * these methods. See {@link #COMPUTE_MAXS}, + * {@link #COMPUTE_FRAMES}. + */ + public ClassWriter(final ClassReader classReader, final int flags) { + this(flags); + classReader.copyPool(this); + this.cr = classReader; + } + + // ------------------------------------------------------------------------ + // Implementation of the ClassVisitor abstract class + // ------------------------------------------------------------------------ + + @Override + public final void visit(final int version, final int access, + final String name, final String signature, final String superName, + final String[] interfaces) { + this.version = version; + this.access = access; + this.name = newClass(name); + thisName = name; + if (ClassReader.SIGNATURES && signature != null) { + this.signature = newUTF8(signature); + } + this.superName = superName == null ? 0 : newClass(superName); + if (interfaces != null && interfaces.length > 0) { + interfaceCount = interfaces.length; + this.interfaces = new int[interfaceCount]; + for (int i = 0; i < interfaceCount; ++i) { + this.interfaces[i] = newClass(interfaces[i]); + } + } + } + + @Override + public final void visitSource(final String file, final String debug) { + if (file != null) { + sourceFile = newUTF8(file); + } + if (debug != null) { + sourceDebug = new ByteVector().encodeUTF8(debug, 0, + Integer.MAX_VALUE); + } + } + + @Override + public final void visitOuterClass(final String owner, final String name, + final String desc) { + enclosingMethodOwner = newClass(owner); + if (name != null && desc != null) { + enclosingMethod = newNameType(name, desc); + } + } + + @Override + public final AnnotationVisitor visitAnnotation(final String desc, + final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write type, and reserve space for values count + bv.putShort(newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(this, true, bv, bv, 2); + if (visible) { + aw.next = anns; + anns = aw; + } else { + aw.next = ianns; + ianns = aw; + } + return aw; + } + + @Override + public final AnnotationVisitor visitTypeAnnotation(int typeRef, + TypePath typePath, final String desc, final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + AnnotationWriter.putTarget(typeRef, typePath, bv); + // write type, and reserve space for values count + bv.putShort(newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(this, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = tanns; + tanns = aw; + } else { + aw.next = itanns; + itanns = aw; + } + return aw; + } + + @Override + public final void visitAttribute(final Attribute attr) { + attr.next = attrs; + attrs = attr; + } + + @Override + public final void visitInnerClass(final String name, + final String outerName, final String innerName, final int access) { + if (innerClasses == null) { + innerClasses = new ByteVector(); + } + // Sec. 4.7.6 of the JVMS states "Every CONSTANT_Class_info entry in the + // constant_pool table which represents a class or interface C that is + // not a package member must have exactly one corresponding entry in the + // classes array". To avoid duplicates we keep track in the intVal field + // of the Item of each CONSTANT_Class_info entry C whether an inner + // class entry has already been added for C (this field is unused for + // class entries, and changing its value does not change the hashcode + // and equality tests). If so we store the index of this inner class + // entry (plus one) in intVal. This hack allows duplicate detection in + // O(1) time. + Item nameItem = newClassItem(name); + if (nameItem.intVal == 0) { + ++innerClassesCount; + innerClasses.putShort(nameItem.index); + innerClasses.putShort(outerName == null ? 0 : newClass(outerName)); + innerClasses.putShort(innerName == null ? 0 : newUTF8(innerName)); + innerClasses.putShort(access); + nameItem.intVal = innerClassesCount; + } else { + // Compare the inner classes entry nameItem.intVal - 1 with the + // arguments of this method and throw an exception if there is a + // difference? + } + } + + @Override + public final FieldVisitor visitField(final int access, final String name, + final String desc, final String signature, final Object value) { + return new FieldWriter(this, access, name, desc, signature, value); + } + + @Override + public final MethodVisitor visitMethod(final int access, final String name, + final String desc, final String signature, final String[] exceptions) { + return new MethodWriter(this, access, name, desc, signature, + exceptions, computeMaxs, computeFrames); + } + + @Override + public final void visitEnd() { + } + + // ------------------------------------------------------------------------ + // Other public methods + // ------------------------------------------------------------------------ + + /** + * Returns the bytecode of the class that was build with this class writer. + * + * @return the bytecode of the class that was build with this class writer. + */ + public byte[] toByteArray() { + if (index > 0xFFFF) { + throw new RuntimeException("Class file too large!"); + } + // computes the real size of the bytecode of this class + int size = 24 + 2 * interfaceCount; + int nbFields = 0; + FieldWriter fb = firstField; + while (fb != null) { + ++nbFields; + size += fb.getSize(); + fb = (FieldWriter) fb.fv; + } + int nbMethods = 0; + MethodWriter mb = firstMethod; + while (mb != null) { + ++nbMethods; + size += mb.getSize(); + mb = (MethodWriter) mb.mv; + } + int attributeCount = 0; + if (bootstrapMethods != null) { + // we put it as first attribute in order to improve a bit + // ClassReader.copyBootstrapMethods + ++attributeCount; + size += 8 + bootstrapMethods.length; + newUTF8("BootstrapMethods"); + } + if (ClassReader.SIGNATURES && signature != 0) { + ++attributeCount; + size += 8; + newUTF8("Signature"); + } + if (sourceFile != 0) { + ++attributeCount; + size += 8; + newUTF8("SourceFile"); + } + if (sourceDebug != null) { + ++attributeCount; + size += sourceDebug.length + 6; + newUTF8("SourceDebugExtension"); + } + if (enclosingMethodOwner != 0) { + ++attributeCount; + size += 10; + newUTF8("EnclosingMethod"); + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + ++attributeCount; + size += 6; + newUTF8("Deprecated"); + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((version & 0xFFFF) < Opcodes.V1_5 + || (access & ACC_SYNTHETIC_ATTRIBUTE) != 0) { + ++attributeCount; + size += 6; + newUTF8("Synthetic"); + } + } + if (innerClasses != null) { + ++attributeCount; + size += 8 + innerClasses.length; + newUTF8("InnerClasses"); + } + if (ClassReader.ANNOTATIONS && anns != null) { + ++attributeCount; + size += 8 + anns.getSize(); + newUTF8("RuntimeVisibleAnnotations"); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + ++attributeCount; + size += 8 + ianns.getSize(); + newUTF8("RuntimeInvisibleAnnotations"); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + ++attributeCount; + size += 8 + tanns.getSize(); + newUTF8("RuntimeVisibleTypeAnnotations"); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + ++attributeCount; + size += 8 + itanns.getSize(); + newUTF8("RuntimeInvisibleTypeAnnotations"); + } + if (attrs != null) { + attributeCount += attrs.getCount(); + size += attrs.getSize(this, null, 0, -1, -1); + } + size += pool.length; + // allocates a byte vector of this size, in order to avoid unnecessary + // arraycopy operations in the ByteVector.enlarge() method + ByteVector out = new ByteVector(size); + out.putInt(0xCAFEBABE).putInt(version); + out.putShort(index).putByteArray(pool.data, 0, pool.length); + int mask = Opcodes.ACC_DEPRECATED | ACC_SYNTHETIC_ATTRIBUTE + | ((access & ACC_SYNTHETIC_ATTRIBUTE) / TO_ACC_SYNTHETIC); + out.putShort(access & ~mask).putShort(name).putShort(superName); + out.putShort(interfaceCount); + for (int i = 0; i < interfaceCount; ++i) { + out.putShort(interfaces[i]); + } + out.putShort(nbFields); + fb = firstField; + while (fb != null) { + fb.put(out); + fb = (FieldWriter) fb.fv; + } + out.putShort(nbMethods); + mb = firstMethod; + while (mb != null) { + mb.put(out); + mb = (MethodWriter) mb.mv; + } + out.putShort(attributeCount); + if (bootstrapMethods != null) { + out.putShort(newUTF8("BootstrapMethods")); + out.putInt(bootstrapMethods.length + 2).putShort( + bootstrapMethodsCount); + out.putByteArray(bootstrapMethods.data, 0, bootstrapMethods.length); + } + if (ClassReader.SIGNATURES && signature != 0) { + out.putShort(newUTF8("Signature")).putInt(2).putShort(signature); + } + if (sourceFile != 0) { + out.putShort(newUTF8("SourceFile")).putInt(2).putShort(sourceFile); + } + if (sourceDebug != null) { + int len = sourceDebug.length; + out.putShort(newUTF8("SourceDebugExtension")).putInt(len); + out.putByteArray(sourceDebug.data, 0, len); + } + if (enclosingMethodOwner != 0) { + out.putShort(newUTF8("EnclosingMethod")).putInt(4); + out.putShort(enclosingMethodOwner).putShort(enclosingMethod); + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + out.putShort(newUTF8("Deprecated")).putInt(0); + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((version & 0xFFFF) < Opcodes.V1_5 + || (access & ACC_SYNTHETIC_ATTRIBUTE) != 0) { + out.putShort(newUTF8("Synthetic")).putInt(0); + } + } + if (innerClasses != null) { + out.putShort(newUTF8("InnerClasses")); + out.putInt(innerClasses.length + 2).putShort(innerClassesCount); + out.putByteArray(innerClasses.data, 0, innerClasses.length); + } + if (ClassReader.ANNOTATIONS && anns != null) { + out.putShort(newUTF8("RuntimeVisibleAnnotations")); + anns.put(out); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + out.putShort(newUTF8("RuntimeInvisibleAnnotations")); + ianns.put(out); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + out.putShort(newUTF8("RuntimeVisibleTypeAnnotations")); + tanns.put(out); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + out.putShort(newUTF8("RuntimeInvisibleTypeAnnotations")); + itanns.put(out); + } + if (attrs != null) { + attrs.put(this, null, 0, -1, -1, out); + } + if (invalidFrames) { + anns = null; + ianns = null; + attrs = null; + innerClassesCount = 0; + innerClasses = null; + bootstrapMethodsCount = 0; + bootstrapMethods = null; + firstField = null; + lastField = null; + firstMethod = null; + lastMethod = null; + computeMaxs = false; + computeFrames = true; + invalidFrames = false; + new ClassReader(out.data).accept(this, ClassReader.SKIP_FRAMES); + return toByteArray(); + } + return out.data; + } + + // ------------------------------------------------------------------------ + // Utility methods: constant pool management + // ------------------------------------------------------------------------ + + /** + * Adds a number or string constant to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * + * @param cst + * the value of the constant to be added to the constant pool. + * This parameter must be an {@link Integer}, a {@link Float}, a + * {@link Long}, a {@link Double}, a {@link String} or a + * {@link Type}. + * @return a new or already existing constant item with the given value. + */ + Item newConstItem(final Object cst) { + if (cst instanceof Integer) { + int val = ((Integer) cst).intValue(); + return newInteger(val); + } else if (cst instanceof Byte) { + int val = ((Byte) cst).intValue(); + return newInteger(val); + } else if (cst instanceof Character) { + int val = ((Character) cst).charValue(); + return newInteger(val); + } else if (cst instanceof Short) { + int val = ((Short) cst).intValue(); + return newInteger(val); + } else if (cst instanceof Boolean) { + int val = ((Boolean) cst).booleanValue() ? 1 : 0; + return newInteger(val); + } else if (cst instanceof Float) { + float val = ((Float) cst).floatValue(); + return newFloat(val); + } else if (cst instanceof Long) { + long val = ((Long) cst).longValue(); + return newLong(val); + } else if (cst instanceof Double) { + double val = ((Double) cst).doubleValue(); + return newDouble(val); + } else if (cst instanceof String) { + return newString((String) cst); + } else if (cst instanceof Type) { + Type t = (Type) cst; + int s = t.getSort(); + if (s == Type.OBJECT) { + return newClassItem(t.getInternalName()); + } else if (s == Type.METHOD) { + return newMethodTypeItem(t.getDescriptor()); + } else { // s == primitive type or array + return newClassItem(t.getDescriptor()); + } + } else if (cst instanceof Handle) { + Handle h = (Handle) cst; + return newHandleItem(h.tag, h.owner, h.name, h.desc); + } else { + throw new IllegalArgumentException("value " + cst); + } + } + + /** + * Adds a number or string constant to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param cst + * the value of the constant to be added to the constant pool. + * This parameter must be an {@link Integer}, a {@link Float}, a + * {@link Long}, a {@link Double} or a {@link String}. + * @return the index of a new or already existing constant item with the + * given value. + */ + public int newConst(final Object cst) { + return newConstItem(cst).index; + } + + /** + * Adds an UTF8 string to the constant pool of the class being build. Does + * nothing if the constant pool already contains a similar item. This + * method is intended for {@link Attribute} sub classes, and is normally not + * needed by class generators or adapters. + * + * @param value + * the String value. + * @return the index of a new or already existing UTF8 item. + */ + public int newUTF8(final String value) { + key.set(UTF8, value, null, null); + Item result = get(key); + if (result == null) { + pool.putByte(UTF8).putUTF8(value); + result = new Item(index++, key); + put(result); + } + return result.index; + } + + /** + * Adds a class reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param value + * the internal name of the class. + * @return a new or already existing class reference item. + */ + Item newClassItem(final String value) { + key2.set(CLASS, value, null, null); + Item result = get(key2); + if (result == null) { + pool.put12(CLASS, newUTF8(value)); + result = new Item(index++, key2); + put(result); + } + return result; + } + + /** + * Adds a class reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param value + * the internal name of the class. + * @return the index of a new or already existing class reference item. + */ + public int newClass(final String value) { + return newClassItem(value).index; + } + + /** + * Adds a method type reference to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param methodDesc + * method descriptor of the method type. + * @return a new or already existing method type reference item. + */ + Item newMethodTypeItem(final String methodDesc) { + key2.set(MTYPE, methodDesc, null, null); + Item result = get(key2); + if (result == null) { + pool.put12(MTYPE, newUTF8(methodDesc)); + result = new Item(index++, key2); + put(result); + } + return result; + } + + /** + * Adds a method type reference to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param methodDesc + * method descriptor of the method type. + * @return the index of a new or already existing method type reference + * item. + */ + public int newMethodType(final String methodDesc) { + return newMethodTypeItem(methodDesc).index; + } + + /** + * Adds a handle to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. This method is + * intended for {@link Attribute} sub classes, and is normally not needed by + * class generators or adapters. + * + * @param tag + * the kind of this handle. Must be {@link Opcodes#H_GETFIELD}, + * {@link Opcodes#H_GETSTATIC}, {@link Opcodes#H_PUTFIELD}, + * {@link Opcodes#H_PUTSTATIC}, {@link Opcodes#H_INVOKEVIRTUAL}, + * {@link Opcodes#H_INVOKESTATIC}, + * {@link Opcodes#H_INVOKESPECIAL}, + * {@link Opcodes#H_NEWINVOKESPECIAL} or + * {@link Opcodes#H_INVOKEINTERFACE}. + * @param owner + * the internal name of the field or method owner class. + * @param name + * the name of the field or method. + * @param desc + * the descriptor of the field or method. + * @return a new or an already existing method type reference item. + */ + Item newHandleItem(final int tag, final String owner, final String name, + final String desc) { + key4.set(HANDLE_BASE + tag, owner, name, desc); + Item result = get(key4); + if (result == null) { + if (tag <= Opcodes.H_PUTSTATIC) { + put112(HANDLE, tag, newField(owner, name, desc)); + } else { + put112(HANDLE, + tag, + newMethod(owner, name, desc, + tag == Opcodes.H_INVOKEINTERFACE)); + } + result = new Item(index++, key4); + put(result); + } + return result; + } + + /** + * Adds a handle to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. This method is + * intended for {@link Attribute} sub classes, and is normally not needed by + * class generators or adapters. + * + * @param tag + * the kind of this handle. Must be {@link Opcodes#H_GETFIELD}, + * {@link Opcodes#H_GETSTATIC}, {@link Opcodes#H_PUTFIELD}, + * {@link Opcodes#H_PUTSTATIC}, {@link Opcodes#H_INVOKEVIRTUAL}, + * {@link Opcodes#H_INVOKESTATIC}, + * {@link Opcodes#H_INVOKESPECIAL}, + * {@link Opcodes#H_NEWINVOKESPECIAL} or + * {@link Opcodes#H_INVOKEINTERFACE}. + * @param owner + * the internal name of the field or method owner class. + * @param name + * the name of the field or method. + * @param desc + * the descriptor of the field or method. + * @return the index of a new or already existing method type reference + * item. + */ + public int newHandle(final int tag, final String owner, final String name, + final String desc) { + return newHandleItem(tag, owner, name, desc).index; + } + + /** + * Adds an invokedynamic reference to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param name + * name of the invoked method. + * @param desc + * descriptor of the invoke method. + * @param bsm + * the bootstrap method. + * @param bsmArgs + * the bootstrap method constant arguments. + * + * @return a new or an already existing invokedynamic type reference item. + */ + Item newInvokeDynamicItem(final String name, final String desc, + final Handle bsm, final Object... bsmArgs) { + // cache for performance + ByteVector bootstrapMethods = this.bootstrapMethods; + if (bootstrapMethods == null) { + bootstrapMethods = this.bootstrapMethods = new ByteVector(); + } + + int position = bootstrapMethods.length; // record current position + + int hashCode = bsm.hashCode(); + bootstrapMethods.putShort(newHandle(bsm.tag, bsm.owner, bsm.name, + bsm.desc)); + + int argsLength = bsmArgs.length; + bootstrapMethods.putShort(argsLength); + + for (int i = 0; i < argsLength; i++) { + Object bsmArg = bsmArgs[i]; + hashCode ^= bsmArg.hashCode(); + bootstrapMethods.putShort(newConst(bsmArg)); + } + + byte[] data = bootstrapMethods.data; + int length = (1 + 1 + argsLength) << 1; // (bsm + argCount + arguments) + hashCode &= 0x7FFFFFFF; + Item result = items[hashCode % items.length]; + loop: while (result != null) { + if (result.type != BSM || result.hashCode != hashCode) { + result = result.next; + continue; + } + + // because the data encode the size of the argument + // we don't need to test if these size are equals + int resultPosition = result.intVal; + for (int p = 0; p < length; p++) { + if (data[position + p] != data[resultPosition + p]) { + result = result.next; + continue loop; + } + } + break; + } + + int bootstrapMethodIndex; + if (result != null) { + bootstrapMethodIndex = result.index; + bootstrapMethods.length = position; // revert to old position + } else { + bootstrapMethodIndex = bootstrapMethodsCount++; + result = new Item(bootstrapMethodIndex); + result.set(position, hashCode); + put(result); + } + + // now, create the InvokeDynamic constant + key3.set(name, desc, bootstrapMethodIndex); + result = get(key3); + if (result == null) { + put122(INDY, bootstrapMethodIndex, newNameType(name, desc)); + result = new Item(index++, key3); + put(result); + } + return result; + } + + /** + * Adds an invokedynamic reference to the constant pool of the class being + * build. Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param name + * name of the invoked method. + * @param desc + * descriptor of the invoke method. + * @param bsm + * the bootstrap method. + * @param bsmArgs + * the bootstrap method constant arguments. + * + * @return the index of a new or already existing invokedynamic reference + * item. + */ + public int newInvokeDynamic(final String name, final String desc, + final Handle bsm, final Object... bsmArgs) { + return newInvokeDynamicItem(name, desc, bsm, bsmArgs).index; + } + + /** + * Adds a field reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * + * @param owner + * the internal name of the field's owner class. + * @param name + * the field's name. + * @param desc + * the field's descriptor. + * @return a new or already existing field reference item. + */ + Item newFieldItem(final String owner, final String name, final String desc) { + key3.set(FIELD, owner, name, desc); + Item result = get(key3); + if (result == null) { + put122(FIELD, newClass(owner), newNameType(name, desc)); + result = new Item(index++, key3); + put(result); + } + return result; + } + + /** + * Adds a field reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param owner + * the internal name of the field's owner class. + * @param name + * the field's name. + * @param desc + * the field's descriptor. + * @return the index of a new or already existing field reference item. + */ + public int newField(final String owner, final String name, final String desc) { + return newFieldItem(owner, name, desc).index; + } + + /** + * Adds a method reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * + * @param owner + * the internal name of the method's owner class. + * @param name + * the method's name. + * @param desc + * the method's descriptor. + * @param itf + * true if owner is an interface. + * @return a new or already existing method reference item. + */ + Item newMethodItem(final String owner, final String name, + final String desc, final boolean itf) { + int type = itf ? IMETH : METH; + key3.set(type, owner, name, desc); + Item result = get(key3); + if (result == null) { + put122(type, newClass(owner), newNameType(name, desc)); + result = new Item(index++, key3); + put(result); + } + return result; + } + + /** + * Adds a method reference to the constant pool of the class being build. + * Does nothing if the constant pool already contains a similar item. + * This method is intended for {@link Attribute} sub classes, and is + * normally not needed by class generators or adapters. + * + * @param owner + * the internal name of the method's owner class. + * @param name + * the method's name. + * @param desc + * the method's descriptor. + * @param itf + * true if owner is an interface. + * @return the index of a new or already existing method reference item. + */ + public int newMethod(final String owner, final String name, + final String desc, final boolean itf) { + return newMethodItem(owner, name, desc, itf).index; + } + + /** + * Adds an integer to the constant pool of the class being build. Does + * nothing if the constant pool already contains a similar item. + * + * @param value + * the int value. + * @return a new or already existing int item. + */ + Item newInteger(final int value) { + key.set(value); + Item result = get(key); + if (result == null) { + pool.putByte(INT).putInt(value); + result = new Item(index++, key); + put(result); + } + return result; + } + + /** + * Adds a float to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. + * + * @param value + * the float value. + * @return a new or already existing float item. + */ + Item newFloat(final float value) { + key.set(value); + Item result = get(key); + if (result == null) { + pool.putByte(FLOAT).putInt(key.intVal); + result = new Item(index++, key); + put(result); + } + return result; + } + + /** + * Adds a long to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. + * + * @param value + * the long value. + * @return a new or already existing long item. + */ + Item newLong(final long value) { + key.set(value); + Item result = get(key); + if (result == null) { + pool.putByte(LONG).putLong(value); + result = new Item(index, key); + index += 2; + put(result); + } + return result; + } + + /** + * Adds a double to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. + * + * @param value + * the double value. + * @return a new or already existing double item. + */ + Item newDouble(final double value) { + key.set(value); + Item result = get(key); + if (result == null) { + pool.putByte(DOUBLE).putLong(key.longVal); + result = new Item(index, key); + index += 2; + put(result); + } + return result; + } + + /** + * Adds a string to the constant pool of the class being build. Does nothing + * if the constant pool already contains a similar item. + * + * @param value + * the String value. + * @return a new or already existing string item. + */ + private Item newString(final String value) { + key2.set(STR, value, null, null); + Item result = get(key2); + if (result == null) { + pool.put12(STR, newUTF8(value)); + result = new Item(index++, key2); + put(result); + } + return result; + } + + /** + * Adds a name and type to the constant pool of the class being build. Does + * nothing if the constant pool already contains a similar item. This + * method is intended for {@link Attribute} sub classes, and is normally not + * needed by class generators or adapters. + * + * @param name + * a name. + * @param desc + * a type descriptor. + * @return the index of a new or already existing name and type item. + */ + public int newNameType(final String name, final String desc) { + return newNameTypeItem(name, desc).index; + } + + /** + * Adds a name and type to the constant pool of the class being build. Does + * nothing if the constant pool already contains a similar item. + * + * @param name + * a name. + * @param desc + * a type descriptor. + * @return a new or already existing name and type item. + */ + Item newNameTypeItem(final String name, final String desc) { + key2.set(NAME_TYPE, name, desc, null); + Item result = get(key2); + if (result == null) { + put122(NAME_TYPE, newUTF8(name), newUTF8(desc)); + result = new Item(index++, key2); + put(result); + } + return result; + } + + /** + * Adds the given internal name to {@link #typeTable} and returns its index. + * Does nothing if the type table already contains this internal name. + * + * @param type + * the internal name to be added to the type table. + * @return the index of this internal name in the type table. + */ + int addType(final String type) { + key.set(TYPE_NORMAL, type, null, null); + Item result = get(key); + if (result == null) { + result = addType(key); + } + return result.index; + } + + /** + * Adds the given "uninitialized" type to {@link #typeTable} and returns its + * index. This method is used for UNINITIALIZED types, made of an internal + * name and a bytecode offset. + * + * @param type + * the internal name to be added to the type table. + * @param offset + * the bytecode offset of the NEW instruction that created this + * UNINITIALIZED type value. + * @return the index of this internal name in the type table. + */ + int addUninitializedType(final String type, final int offset) { + key.type = TYPE_UNINIT; + key.intVal = offset; + key.strVal1 = type; + key.hashCode = 0x7FFFFFFF & (TYPE_UNINIT + type.hashCode() + offset); + Item result = get(key); + if (result == null) { + result = addType(key); + } + return result.index; + } + + /** + * Adds the given Item to {@link #typeTable}. + * + * @param item + * the value to be added to the type table. + * @return the added Item, which a new Item instance with the same value as + * the given Item. + */ + private Item addType(final Item item) { + ++typeCount; + Item result = new Item(typeCount, key); + put(result); + if (typeTable == null) { + typeTable = new Item[16]; + } + if (typeCount == typeTable.length) { + Item[] newTable = new Item[2 * typeTable.length]; + System.arraycopy(typeTable, 0, newTable, 0, typeTable.length); + typeTable = newTable; + } + typeTable[typeCount] = result; + return result; + } + + /** + * Returns the index of the common super type of the two given types. This + * method calls {@link #getCommonSuperClass} and caches the result in the + * {@link #items} hash table to speedup future calls with the same + * parameters. + * + * @param type1 + * index of an internal name in {@link #typeTable}. + * @param type2 + * index of an internal name in {@link #typeTable}. + * @return the index of the common super type of the two given types. + */ + int getMergedType(final int type1, final int type2) { + key2.type = TYPE_MERGED; + key2.longVal = type1 | (((long) type2) << 32); + key2.hashCode = 0x7FFFFFFF & (TYPE_MERGED + type1 + type2); + Item result = get(key2); + if (result == null) { + String t = typeTable[type1].strVal1; + String u = typeTable[type2].strVal1; + key2.intVal = addType(getCommonSuperClass(t, u)); + result = new Item((short) 0, key2); + put(result); + } + return result.intVal; + } + + /** + * Returns the common super type of the two given types. The default + * implementation of this method loads the two given classes and uses + * the java.lang.Class methods to find the common super class. It can be + * overridden to compute this common super type in other ways, in particular + * without actually loading any class, or to take into account the class + * that is currently being generated by this ClassWriter, which can of + * course not be loaded since it is under construction. + * + * @param type1 + * the internal name of a class. + * @param type2 + * the internal name of another class. + * @return the internal name of the common super class of the two given + * classes. + */ + protected String getCommonSuperClass(final String type1, final String type2) { + Class c, d; + ClassLoader classLoader = getClass().getClassLoader(); + try { + c = Class.forName(type1.replace('/', '.'), false, classLoader); + d = Class.forName(type2.replace('/', '.'), false, classLoader); + } catch (Exception e) { + throw new RuntimeException(e.toString()); + } + if (c.isAssignableFrom(d)) { + return type1; + } + if (d.isAssignableFrom(c)) { + return type2; + } + if (c.isInterface() || d.isInterface()) { + return "java/lang/Object"; + } else { + do { + c = c.getSuperclass(); + } while (!c.isAssignableFrom(d)); + return c.getName().replace('.', '/'); + } + } + + /** + * Returns the constant pool's hash table item which is equal to the given + * item. + * + * @param key + * a constant pool item. + * @return the constant pool's hash table item which is equal to the given + * item, or null if there is no such item. + */ + private Item get(final Item key) { + Item i = items[key.hashCode % items.length]; + while (i != null && (i.type != key.type || !key.isEqualTo(i))) { + i = i.next; + } + return i; + } + + /** + * Puts the given item in the constant pool's hash table. The hash table + * must not already contains this item. + * + * @param i + * the item to be added to the constant pool's hash table. + */ + private void put(final Item i) { + if (index + typeCount > threshold) { + int ll = items.length; + int nl = ll * 2 + 1; + Item[] newItems = new Item[nl]; + for (int l = ll - 1; l >= 0; --l) { + Item j = items[l]; + while (j != null) { + int index = j.hashCode % newItems.length; + Item k = j.next; + j.next = newItems[index]; + newItems[index] = j; + j = k; + } + } + items = newItems; + threshold = (int) (nl * 0.75); + } + int index = i.hashCode % items.length; + i.next = items[index]; + items[index] = i; + } + + /** + * Puts one byte and two shorts into the constant pool. + * + * @param b + * a byte. + * @param s1 + * a short. + * @param s2 + * another short. + */ + private void put122(final int b, final int s1, final int s2) { + pool.put12(b, s1).putShort(s2); + } + + /** + * Puts two bytes and one short into the constant pool. + * + * @param b1 + * a byte. + * @param b2 + * another byte. + * @param s + * a short. + */ + private void put112(final int b1, final int b2, final int s) { + pool.put11(b1, b2).putShort(s); + } +} diff --git a/src/com/sleepycat/asm/Context.java b/src/com/sleepycat/asm/Context.java new file mode 100644 index 0000000..477d83b --- /dev/null +++ b/src/com/sleepycat/asm/Context.java @@ -0,0 +1,145 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +package com.sleepycat.asm; + +/** + * Information about a class being parsed in a {@link ClassReader}. + * + * @author Eric Bruneton + */ +class Context { + + /** + * Prototypes of the attributes that must be parsed for this class. + */ + Attribute[] attrs; + + /** + * The {@link ClassReader} option flags for the parsing of this class. + */ + int flags; + + /** + * The buffer used to read strings. + */ + char[] buffer; + + /** + * The start index of each bootstrap method. + */ + int[] bootstrapMethods; + + /** + * The access flags of the method currently being parsed. + */ + int access; + + /** + * The name of the method currently being parsed. + */ + String name; + + /** + * The descriptor of the method currently being parsed. + */ + String desc; + + /** + * The label objects, indexed by bytecode offset, of the method currently + * being parsed (only bytecode offsets for which a label is needed have a + * non null associated Label object). + */ + Label[] labels; + + /** + * The target of the type annotation currently being parsed. + */ + int typeRef; + + /** + * The path of the type annotation currently being parsed. + */ + TypePath typePath; + + /** + * The offset of the latest stack map frame that has been parsed. + */ + int offset; + + /** + * The labels corresponding to the start of the local variable ranges in the + * local variable type annotation currently being parsed. + */ + Label[] start; + + /** + * The labels corresponding to the end of the local variable ranges in the + * local variable type annotation currently being parsed. + */ + Label[] end; + + /** + * The local variable indices for each local variable range in the local + * variable type annotation currently being parsed. + */ + int[] index; + + /** + * The encoding of the latest stack map frame that has been parsed. + */ + int mode; + + /** + * The number of locals in the latest stack map frame that has been parsed. + */ + int localCount; + + /** + * The number locals in the latest stack map frame that has been parsed, + * minus the number of locals in the previous frame. + */ + int localDiff; + + /** + * The local values of the latest stack map frame that has been parsed. + */ + Object[] local; + + /** + * The stack size of the latest stack map frame that has been parsed. + */ + int stackCount; + + /** + * The stack values of the latest stack map frame that has been parsed. + */ + Object[] stack; +} diff --git a/src/com/sleepycat/asm/Edge.java b/src/com/sleepycat/asm/Edge.java new file mode 100644 index 0000000..c78cfd4 --- /dev/null +++ b/src/com/sleepycat/asm/Edge.java @@ -0,0 +1,75 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * An edge in the control flow graph of a method body. See {@link Label Label}. + * + * @author Eric Bruneton + */ +class Edge { + + /** + * Denotes a normal control flow graph edge. + */ + static final int NORMAL = 0; + + /** + * Denotes a control flow graph edge corresponding to an exception handler. + * More precisely any {@link Edge} whose {@link #info} is strictly positive + * corresponds to an exception handler. The actual value of {@link #info} is + * the index, in the {@link ClassWriter} type table, of the exception that + * is catched. + */ + static final int EXCEPTION = 0x7FFFFFFF; + + /** + * Information about this control flow graph edge. If + * {@link ClassWriter#COMPUTE_MAXS} is used this field is the (relative) + * stack size in the basic block from which this edge originates. This size + * is equal to the stack size at the "jump" instruction to which this edge + * corresponds, relatively to the stack size at the beginning of the + * originating basic block. If {@link ClassWriter#COMPUTE_FRAMES} is used, + * this field is the kind of this control flow graph edge (i.e. NORMAL or + * EXCEPTION). + */ + int info; + + /** + * The successor block of the basic block from which this edge originates. + */ + Label successor; + + /** + * The next edge in the list of successors of the originating basic block. + * See {@link Label#successors successors}. + */ + Edge next; +} diff --git a/src/com/sleepycat/asm/FieldVisitor.java b/src/com/sleepycat/asm/FieldVisitor.java new file mode 100644 index 0000000..5c8ad8c --- /dev/null +++ b/src/com/sleepycat/asm/FieldVisitor.java @@ -0,0 +1,150 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A visitor to visit a Java field. The methods of this class must be called in + * the following order: ( visitAnnotation | + * visitTypeAnnotation | visitAttribute )* visitEnd. + * + * @author Eric Bruneton + */ +public abstract class FieldVisitor { + + /** + * The ASM API version implemented by this visitor. The value of this field + * must be one of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + protected final int api; + + /** + * The field visitor to which this visitor must delegate method calls. May + * be null. + */ + protected FieldVisitor fv; + + /** + * Constructs a new {@link FieldVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + public FieldVisitor(final int api) { + this(api, null); + } + + /** + * Constructs a new {@link FieldVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + * @param fv + * the field visitor to which this visitor must delegate method + * calls. May be null. + */ + public FieldVisitor(final int api, final FieldVisitor fv) { + if (api != Opcodes.ASM4 && api != Opcodes.ASM5) { + throw new IllegalArgumentException(); + } + this.api = api; + this.fv = fv; + } + + /** + * Visits an annotation of the field. + * + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitAnnotation(String desc, boolean visible) { + if (fv != null) { + return fv.visitAnnotation(desc, visible); + } + return null; + } + + /** + * Visits an annotation on the type of the field. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#FIELD FIELD}. See + * {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitTypeAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (fv != null) { + return fv.visitTypeAnnotation(typeRef, typePath, desc, visible); + } + return null; + } + + /** + * Visits a non standard attribute of the field. + * + * @param attr + * an attribute. + */ + public void visitAttribute(Attribute attr) { + if (fv != null) { + fv.visitAttribute(attr); + } + } + + /** + * Visits the end of the field. This method, which is the last one to be + * called, is used to inform the visitor that all the annotations and + * attributes of the field have been visited. + */ + public void visitEnd() { + if (fv != null) { + fv.visitEnd(); + } + } +} diff --git a/src/com/sleepycat/asm/FieldWriter.java b/src/com/sleepycat/asm/FieldWriter.java new file mode 100644 index 0000000..787cc62 --- /dev/null +++ b/src/com/sleepycat/asm/FieldWriter.java @@ -0,0 +1,329 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * An {@link FieldVisitor} that generates Java fields in bytecode form. + * + * @author Eric Bruneton + */ +final class FieldWriter extends FieldVisitor { + + /** + * The class writer to which this field must be added. + */ + private final ClassWriter cw; + + /** + * Access flags of this field. + */ + private final int access; + + /** + * The index of the constant pool item that contains the name of this + * method. + */ + private final int name; + + /** + * The index of the constant pool item that contains the descriptor of this + * field. + */ + private final int desc; + + /** + * The index of the constant pool item that contains the signature of this + * field. + */ + private int signature; + + /** + * The index of the constant pool item that contains the constant value of + * this field. + */ + private int value; + + /** + * The runtime visible annotations of this field. May be null. + */ + private AnnotationWriter anns; + + /** + * The runtime invisible annotations of this field. May be null. + */ + private AnnotationWriter ianns; + + /** + * The runtime visible type annotations of this field. May be null. + */ + private AnnotationWriter tanns; + + /** + * The runtime invisible type annotations of this field. May be + * null. + */ + private AnnotationWriter itanns; + + /** + * The non standard attributes of this field. May be null. + */ + private Attribute attrs; + + // ------------------------------------------------------------------------ + // Constructor + // ------------------------------------------------------------------------ + + /** + * Constructs a new {@link FieldWriter}. + * + * @param cw + * the class writer to which this field must be added. + * @param access + * the field's access flags (see {@link Opcodes}). + * @param name + * the field's name. + * @param desc + * the field's descriptor (see {@link Type}). + * @param signature + * the field's signature. May be null. + * @param value + * the field's constant value. May be null. + */ + FieldWriter(final ClassWriter cw, final int access, final String name, + final String desc, final String signature, final Object value) { + super(Opcodes.ASM5); + if (cw.firstField == null) { + cw.firstField = this; + } else { + cw.lastField.fv = this; + } + cw.lastField = this; + this.cw = cw; + this.access = access; + this.name = cw.newUTF8(name); + this.desc = cw.newUTF8(desc); + if (ClassReader.SIGNATURES && signature != null) { + this.signature = cw.newUTF8(signature); + } + if (value != null) { + this.value = cw.newConstItem(value).index; + } + } + + // ------------------------------------------------------------------------ + // Implementation of the FieldVisitor abstract class + // ------------------------------------------------------------------------ + + @Override + public AnnotationVisitor visitAnnotation(final String desc, + final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2); + if (visible) { + aw.next = anns; + anns = aw; + } else { + aw.next = ianns; + ianns = aw; + } + return aw; + } + + @Override + public AnnotationVisitor visitTypeAnnotation(final int typeRef, + final TypePath typePath, final String desc, final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + AnnotationWriter.putTarget(typeRef, typePath, bv); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = tanns; + tanns = aw; + } else { + aw.next = itanns; + itanns = aw; + } + return aw; + } + + @Override + public void visitAttribute(final Attribute attr) { + attr.next = attrs; + attrs = attr; + } + + @Override + public void visitEnd() { + } + + // ------------------------------------------------------------------------ + // Utility methods + // ------------------------------------------------------------------------ + + /** + * Returns the size of this field. + * + * @return the size of this field. + */ + int getSize() { + int size = 8; + if (value != 0) { + cw.newUTF8("ConstantValue"); + size += 8; + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + cw.newUTF8("Synthetic"); + size += 6; + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + cw.newUTF8("Deprecated"); + size += 6; + } + if (ClassReader.SIGNATURES && signature != 0) { + cw.newUTF8("Signature"); + size += 8; + } + if (ClassReader.ANNOTATIONS && anns != null) { + cw.newUTF8("RuntimeVisibleAnnotations"); + size += 8 + anns.getSize(); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + cw.newUTF8("RuntimeInvisibleAnnotations"); + size += 8 + ianns.getSize(); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + cw.newUTF8("RuntimeVisibleTypeAnnotations"); + size += 8 + tanns.getSize(); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + cw.newUTF8("RuntimeInvisibleTypeAnnotations"); + size += 8 + itanns.getSize(); + } + if (attrs != null) { + size += attrs.getSize(cw, null, 0, -1, -1); + } + return size; + } + + /** + * Puts the content of this field into the given byte vector. + * + * @param out + * where the content of this field must be put. + */ + void put(final ByteVector out) { + final int FACTOR = ClassWriter.TO_ACC_SYNTHETIC; + int mask = Opcodes.ACC_DEPRECATED | ClassWriter.ACC_SYNTHETIC_ATTRIBUTE + | ((access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) / FACTOR); + out.putShort(access & ~mask).putShort(name).putShort(desc); + int attributeCount = 0; + if (value != 0) { + ++attributeCount; + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + ++attributeCount; + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + ++attributeCount; + } + if (ClassReader.SIGNATURES && signature != 0) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && anns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && ianns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && tanns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && itanns != null) { + ++attributeCount; + } + if (attrs != null) { + attributeCount += attrs.getCount(); + } + out.putShort(attributeCount); + if (value != 0) { + out.putShort(cw.newUTF8("ConstantValue")); + out.putInt(2).putShort(value); + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + out.putShort(cw.newUTF8("Synthetic")).putInt(0); + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + out.putShort(cw.newUTF8("Deprecated")).putInt(0); + } + if (ClassReader.SIGNATURES && signature != 0) { + out.putShort(cw.newUTF8("Signature")); + out.putInt(2).putShort(signature); + } + if (ClassReader.ANNOTATIONS && anns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleAnnotations")); + anns.put(out); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleAnnotations")); + ianns.put(out); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleTypeAnnotations")); + tanns.put(out); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleTypeAnnotations")); + itanns.put(out); + } + if (attrs != null) { + attrs.put(cw, null, 0, -1, -1, out); + } + } +} diff --git a/src/com/sleepycat/asm/Frame.java b/src/com/sleepycat/asm/Frame.java new file mode 100644 index 0000000..acac65c --- /dev/null +++ b/src/com/sleepycat/asm/Frame.java @@ -0,0 +1,1462 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * Information about the input and output stack map frames of a basic block. + * + * @author Eric Bruneton + */ +final class Frame { + + /* + * Frames are computed in a two steps process: during the visit of each + * instruction, the state of the frame at the end of current basic block is + * updated by simulating the action of the instruction on the previous state + * of this so called "output frame". In visitMaxs, a fix point algorithm is + * used to compute the "input frame" of each basic block, i.e. the stack map + * frame at the beginning of the basic block, starting from the input frame + * of the first basic block (which is computed from the method descriptor), + * and by using the previously computed output frames to compute the input + * state of the other blocks. + * + * All output and input frames are stored as arrays of integers. Reference + * and array types are represented by an index into a type table (which is + * not the same as the constant pool of the class, in order to avoid adding + * unnecessary constants in the pool - not all computed frames will end up + * being stored in the stack map table). This allows very fast type + * comparisons. + * + * Output stack map frames are computed relatively to the input frame of the + * basic block, which is not yet known when output frames are computed. It + * is therefore necessary to be able to represent abstract types such as + * "the type at position x in the input frame locals" or "the type at + * position x from the top of the input frame stack" or even "the type at + * position x in the input frame, with y more (or less) array dimensions". + * This explains the rather complicated type format used in output frames. + * + * This format is the following: DIM KIND VALUE (4, 4 and 24 bits). DIM is a + * signed number of array dimensions (from -8 to 7). KIND is either BASE, + * LOCAL or STACK. BASE is used for types that are not relative to the input + * frame. LOCAL is used for types that are relative to the input local + * variable types. STACK is used for types that are relative to the input + * stack types. VALUE depends on KIND. For LOCAL types, it is an index in + * the input local variable types. For STACK types, it is a position + * relatively to the top of input frame stack. For BASE types, it is either + * one of the constants defined below, or for OBJECT and UNINITIALIZED + * types, a tag and an index in the type table. + * + * Output frames can contain types of any kind and with a positive or + * negative dimension (and even unassigned types, represented by 0 - which + * does not correspond to any valid type value). Input frames can only + * contain BASE types of positive or null dimension. In all cases the type + * table contains only internal type names (array type descriptors are + * forbidden - dimensions must be represented through the DIM field). + * + * The LONG and DOUBLE types are always represented by using two slots (LONG + * + TOP or DOUBLE + TOP), for local variable types as well as in the + * operand stack. This is necessary to be able to simulate DUPx_y + * instructions, whose effect would be dependent on the actual type values + * if types were always represented by a single slot in the stack (and this + * is not possible, since actual type values are not always known - cf LOCAL + * and STACK type kinds). + */ + + /** + * Mask to get the dimension of a frame type. This dimension is a signed + * integer between -8 and 7. + */ + static final int DIM = 0xF0000000; + + /** + * Constant to be added to a type to get a type with one more dimension. + */ + static final int ARRAY_OF = 0x10000000; + + /** + * Constant to be added to a type to get a type with one less dimension. + */ + static final int ELEMENT_OF = 0xF0000000; + + /** + * Mask to get the kind of a frame type. + * + * @see #BASE + * @see #LOCAL + * @see #STACK + */ + static final int KIND = 0xF000000; + + /** + * Flag used for LOCAL and STACK types. Indicates that if this type happens + * to be a long or double type (during the computations of input frames), + * then it must be set to TOP because the second word of this value has been + * reused to store other data in the basic block. Hence the first word no + * longer stores a valid long or double value. + */ + static final int TOP_IF_LONG_OR_DOUBLE = 0x800000; + + /** + * Mask to get the value of a frame type. + */ + static final int VALUE = 0x7FFFFF; + + /** + * Mask to get the kind of base types. + */ + static final int BASE_KIND = 0xFF00000; + + /** + * Mask to get the value of base types. + */ + static final int BASE_VALUE = 0xFFFFF; + + /** + * Kind of the types that are not relative to an input stack map frame. + */ + static final int BASE = 0x1000000; + + /** + * Base kind of the base reference types. The BASE_VALUE of such types is an + * index into the type table. + */ + static final int OBJECT = BASE | 0x700000; + + /** + * Base kind of the uninitialized base types. The BASE_VALUE of such types + * in an index into the type table (the Item at that index contains both an + * instruction offset and an internal class name). + */ + static final int UNINITIALIZED = BASE | 0x800000; + + /** + * Kind of the types that are relative to the local variable types of an + * input stack map frame. The value of such types is a local variable index. + */ + private static final int LOCAL = 0x2000000; + + /** + * Kind of the the types that are relative to the stack of an input stack + * map frame. The value of such types is a position relatively to the top of + * this stack. + */ + private static final int STACK = 0x3000000; + + /** + * The TOP type. This is a BASE type. + */ + static final int TOP = BASE | 0; + + /** + * The BOOLEAN type. This is a BASE type mainly used for array types. + */ + static final int BOOLEAN = BASE | 9; + + /** + * The BYTE type. This is a BASE type mainly used for array types. + */ + static final int BYTE = BASE | 10; + + /** + * The CHAR type. This is a BASE type mainly used for array types. + */ + static final int CHAR = BASE | 11; + + /** + * The SHORT type. This is a BASE type mainly used for array types. + */ + static final int SHORT = BASE | 12; + + /** + * The INTEGER type. This is a BASE type. + */ + static final int INTEGER = BASE | 1; + + /** + * The FLOAT type. This is a BASE type. + */ + static final int FLOAT = BASE | 2; + + /** + * The DOUBLE type. This is a BASE type. + */ + static final int DOUBLE = BASE | 3; + + /** + * The LONG type. This is a BASE type. + */ + static final int LONG = BASE | 4; + + /** + * The NULL type. This is a BASE type. + */ + static final int NULL = BASE | 5; + + /** + * The UNINITIALIZED_THIS type. This is a BASE type. + */ + static final int UNINITIALIZED_THIS = BASE | 6; + + /** + * The stack size variation corresponding to each JVM instruction. This + * stack variation is equal to the size of the values produced by an + * instruction, minus the size of the values consumed by this instruction. + */ + static final int[] SIZE; + + /** + * Computes the stack size variation corresponding to each JVM instruction. + */ + static { + int i; + int[] b = new int[202]; + String s = "EFFFFFFFFGGFFFGGFFFEEFGFGFEEEEEEEEEEEEEEEEEEEEDEDEDDDDD" + + "CDCDEEEEEEEEEEEEEEEEEEEEBABABBBBDCFFFGGGEDCDCDCDCDCDCDCDCD" + + "CDCEEEEDDDDDDDCDCDCEFEFDDEEFFDEDEEEBDDBBDDDDDDCCCCCCCCEFED" + + "DDCDCDEEEEEEEEEEFEEEEEEDDEEDDEE"; + for (i = 0; i < b.length; ++i) { + b[i] = s.charAt(i) - 'E'; + } + SIZE = b; + + // code to generate the above string + // + // int NA = 0; // not applicable (unused opcode or variable size opcode) + // + // b = new int[] { + // 0, //NOP, // visitInsn + // 1, //ACONST_NULL, // - + // 1, //ICONST_M1, // - + // 1, //ICONST_0, // - + // 1, //ICONST_1, // - + // 1, //ICONST_2, // - + // 1, //ICONST_3, // - + // 1, //ICONST_4, // - + // 1, //ICONST_5, // - + // 2, //LCONST_0, // - + // 2, //LCONST_1, // - + // 1, //FCONST_0, // - + // 1, //FCONST_1, // - + // 1, //FCONST_2, // - + // 2, //DCONST_0, // - + // 2, //DCONST_1, // - + // 1, //BIPUSH, // visitIntInsn + // 1, //SIPUSH, // - + // 1, //LDC, // visitLdcInsn + // NA, //LDC_W, // - + // NA, //LDC2_W, // - + // 1, //ILOAD, // visitVarInsn + // 2, //LLOAD, // - + // 1, //FLOAD, // - + // 2, //DLOAD, // - + // 1, //ALOAD, // - + // NA, //ILOAD_0, // - + // NA, //ILOAD_1, // - + // NA, //ILOAD_2, // - + // NA, //ILOAD_3, // - + // NA, //LLOAD_0, // - + // NA, //LLOAD_1, // - + // NA, //LLOAD_2, // - + // NA, //LLOAD_3, // - + // NA, //FLOAD_0, // - + // NA, //FLOAD_1, // - + // NA, //FLOAD_2, // - + // NA, //FLOAD_3, // - + // NA, //DLOAD_0, // - + // NA, //DLOAD_1, // - + // NA, //DLOAD_2, // - + // NA, //DLOAD_3, // - + // NA, //ALOAD_0, // - + // NA, //ALOAD_1, // - + // NA, //ALOAD_2, // - + // NA, //ALOAD_3, // - + // -1, //IALOAD, // visitInsn + // 0, //LALOAD, // - + // -1, //FALOAD, // - + // 0, //DALOAD, // - + // -1, //AALOAD, // - + // -1, //BALOAD, // - + // -1, //CALOAD, // - + // -1, //SALOAD, // - + // -1, //ISTORE, // visitVarInsn + // -2, //LSTORE, // - + // -1, //FSTORE, // - + // -2, //DSTORE, // - + // -1, //ASTORE, // - + // NA, //ISTORE_0, // - + // NA, //ISTORE_1, // - + // NA, //ISTORE_2, // - + // NA, //ISTORE_3, // - + // NA, //LSTORE_0, // - + // NA, //LSTORE_1, // - + // NA, //LSTORE_2, // - + // NA, //LSTORE_3, // - + // NA, //FSTORE_0, // - + // NA, //FSTORE_1, // - + // NA, //FSTORE_2, // - + // NA, //FSTORE_3, // - + // NA, //DSTORE_0, // - + // NA, //DSTORE_1, // - + // NA, //DSTORE_2, // - + // NA, //DSTORE_3, // - + // NA, //ASTORE_0, // - + // NA, //ASTORE_1, // - + // NA, //ASTORE_2, // - + // NA, //ASTORE_3, // - + // -3, //IASTORE, // visitInsn + // -4, //LASTORE, // - + // -3, //FASTORE, // - + // -4, //DASTORE, // - + // -3, //AASTORE, // - + // -3, //BASTORE, // - + // -3, //CASTORE, // - + // -3, //SASTORE, // - + // -1, //POP, // - + // -2, //POP2, // - + // 1, //DUP, // - + // 1, //DUP_X1, // - + // 1, //DUP_X2, // - + // 2, //DUP2, // - + // 2, //DUP2_X1, // - + // 2, //DUP2_X2, // - + // 0, //SWAP, // - + // -1, //IADD, // - + // -2, //LADD, // - + // -1, //FADD, // - + // -2, //DADD, // - + // -1, //ISUB, // - + // -2, //LSUB, // - + // -1, //FSUB, // - + // -2, //DSUB, // - + // -1, //IMUL, // - + // -2, //LMUL, // - + // -1, //FMUL, // - + // -2, //DMUL, // - + // -1, //IDIV, // - + // -2, //LDIV, // - + // -1, //FDIV, // - + // -2, //DDIV, // - + // -1, //IREM, // - + // -2, //LREM, // - + // -1, //FREM, // - + // -2, //DREM, // - + // 0, //INEG, // - + // 0, //LNEG, // - + // 0, //FNEG, // - + // 0, //DNEG, // - + // -1, //ISHL, // - + // -1, //LSHL, // - + // -1, //ISHR, // - + // -1, //LSHR, // - + // -1, //IUSHR, // - + // -1, //LUSHR, // - + // -1, //IAND, // - + // -2, //LAND, // - + // -1, //IOR, // - + // -2, //LOR, // - + // -1, //IXOR, // - + // -2, //LXOR, // - + // 0, //IINC, // visitIincInsn + // 1, //I2L, // visitInsn + // 0, //I2F, // - + // 1, //I2D, // - + // -1, //L2I, // - + // -1, //L2F, // - + // 0, //L2D, // - + // 0, //F2I, // - + // 1, //F2L, // - + // 1, //F2D, // - + // -1, //D2I, // - + // 0, //D2L, // - + // -1, //D2F, // - + // 0, //I2B, // - + // 0, //I2C, // - + // 0, //I2S, // - + // -3, //LCMP, // - + // -1, //FCMPL, // - + // -1, //FCMPG, // - + // -3, //DCMPL, // - + // -3, //DCMPG, // - + // -1, //IFEQ, // visitJumpInsn + // -1, //IFNE, // - + // -1, //IFLT, // - + // -1, //IFGE, // - + // -1, //IFGT, // - + // -1, //IFLE, // - + // -2, //IF_ICMPEQ, // - + // -2, //IF_ICMPNE, // - + // -2, //IF_ICMPLT, // - + // -2, //IF_ICMPGE, // - + // -2, //IF_ICMPGT, // - + // -2, //IF_ICMPLE, // - + // -2, //IF_ACMPEQ, // - + // -2, //IF_ACMPNE, // - + // 0, //GOTO, // - + // 1, //JSR, // - + // 0, //RET, // visitVarInsn + // -1, //TABLESWITCH, // visiTableSwitchInsn + // -1, //LOOKUPSWITCH, // visitLookupSwitch + // -1, //IRETURN, // visitInsn + // -2, //LRETURN, // - + // -1, //FRETURN, // - + // -2, //DRETURN, // - + // -1, //ARETURN, // - + // 0, //RETURN, // - + // NA, //GETSTATIC, // visitFieldInsn + // NA, //PUTSTATIC, // - + // NA, //GETFIELD, // - + // NA, //PUTFIELD, // - + // NA, //INVOKEVIRTUAL, // visitMethodInsn + // NA, //INVOKESPECIAL, // - + // NA, //INVOKESTATIC, // - + // NA, //INVOKEINTERFACE, // - + // NA, //INVOKEDYNAMIC, // visitInvokeDynamicInsn + // 1, //NEW, // visitTypeInsn + // 0, //NEWARRAY, // visitIntInsn + // 0, //ANEWARRAY, // visitTypeInsn + // 0, //ARRAYLENGTH, // visitInsn + // NA, //ATHROW, // - + // 0, //CHECKCAST, // visitTypeInsn + // 0, //INSTANCEOF, // - + // -1, //MONITORENTER, // visitInsn + // -1, //MONITOREXIT, // - + // NA, //WIDE, // NOT VISITED + // NA, //MULTIANEWARRAY, // visitMultiANewArrayInsn + // -1, //IFNULL, // visitJumpInsn + // -1, //IFNONNULL, // - + // NA, //GOTO_W, // - + // NA, //JSR_W, // - + // }; + // for (i = 0; i < b.length; ++i) { + // System.err.print((char)('E' + b[i])); + // } + // System.err.println(); + } + + /** + * The label (i.e. basic block) to which these input and output stack map + * frames correspond. + */ + Label owner; + + /** + * The input stack map frame locals. + */ + int[] inputLocals; + + /** + * The input stack map frame stack. + */ + int[] inputStack; + + /** + * The output stack map frame locals. + */ + private int[] outputLocals; + + /** + * The output stack map frame stack. + */ + private int[] outputStack; + + /** + * Relative size of the output stack. The exact semantics of this field + * depends on the algorithm that is used. + * + * When only the maximum stack size is computed, this field is the size of + * the output stack relatively to the top of the input stack. + * + * When the stack map frames are completely computed, this field is the + * actual number of types in {@link #outputStack}. + */ + private int outputStackTop; + + /** + * Number of types that are initialized in the basic block. + * + * @see #initializations + */ + private int initializationCount; + + /** + * The types that are initialized in the basic block. A constructor + * invocation on an UNINITIALIZED or UNINITIALIZED_THIS type must replace + * every occurence of this type in the local variables and in the + * operand stack. This cannot be done during the first phase of the + * algorithm since, during this phase, the local variables and the operand + * stack are not completely computed. It is therefore necessary to store the + * types on which constructors are invoked in the basic block, in order to + * do this replacement during the second phase of the algorithm, where the + * frames are fully computed. Note that this array can contain types that + * are relative to input locals or to the input stack (see below for the + * description of the algorithm). + */ + private int[] initializations; + + /** + * Returns the output frame local variable type at the given index. + * + * @param local + * the index of the local that must be returned. + * @return the output frame local variable type at the given index. + */ + private int get(final int local) { + if (outputLocals == null || local >= outputLocals.length) { + // this local has never been assigned in this basic block, + // so it is still equal to its value in the input frame + return LOCAL | local; + } else { + int type = outputLocals[local]; + if (type == 0) { + // this local has never been assigned in this basic block, + // so it is still equal to its value in the input frame + type = outputLocals[local] = LOCAL | local; + } + return type; + } + } + + /** + * Sets the output frame local variable type at the given index. + * + * @param local + * the index of the local that must be set. + * @param type + * the value of the local that must be set. + */ + private void set(final int local, final int type) { + // creates and/or resizes the output local variables array if necessary + if (outputLocals == null) { + outputLocals = new int[10]; + } + int n = outputLocals.length; + if (local >= n) { + int[] t = new int[Math.max(local + 1, 2 * n)]; + System.arraycopy(outputLocals, 0, t, 0, n); + outputLocals = t; + } + // sets the local variable + outputLocals[local] = type; + } + + /** + * Pushes a new type onto the output frame stack. + * + * @param type + * the type that must be pushed. + */ + private void push(final int type) { + // creates and/or resizes the output stack array if necessary + if (outputStack == null) { + outputStack = new int[10]; + } + int n = outputStack.length; + if (outputStackTop >= n) { + int[] t = new int[Math.max(outputStackTop + 1, 2 * n)]; + System.arraycopy(outputStack, 0, t, 0, n); + outputStack = t; + } + // pushes the type on the output stack + outputStack[outputStackTop++] = type; + // updates the maximun height reached by the output stack, if needed + int top = owner.inputStackTop + outputStackTop; + if (top > owner.outputStackMax) { + owner.outputStackMax = top; + } + } + + /** + * Pushes a new type onto the output frame stack. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param desc + * the descriptor of the type to be pushed. Can also be a method + * descriptor (in this case this method pushes its return type + * onto the output frame stack). + */ + private void push(final ClassWriter cw, final String desc) { + int type = type(cw, desc); + if (type != 0) { + push(type); + if (type == LONG || type == DOUBLE) { + push(TOP); + } + } + } + + /** + * Returns the int encoding of the given type. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param desc + * a type descriptor. + * @return the int encoding of the given type. + */ + private static int type(final ClassWriter cw, final String desc) { + String t; + int index = desc.charAt(0) == '(' ? desc.indexOf(')') + 1 : 0; + switch (desc.charAt(index)) { + case 'V': + return 0; + case 'Z': + case 'C': + case 'B': + case 'S': + case 'I': + return INTEGER; + case 'F': + return FLOAT; + case 'J': + return LONG; + case 'D': + return DOUBLE; + case 'L': + // stores the internal name, not the descriptor! + t = desc.substring(index + 1, desc.length() - 1); + return OBJECT | cw.addType(t); + // case '[': + default: + // extracts the dimensions and the element type + int data; + int dims = index + 1; + while (desc.charAt(dims) == '[') { + ++dims; + } + switch (desc.charAt(dims)) { + case 'Z': + data = BOOLEAN; + break; + case 'C': + data = CHAR; + break; + case 'B': + data = BYTE; + break; + case 'S': + data = SHORT; + break; + case 'I': + data = INTEGER; + break; + case 'F': + data = FLOAT; + break; + case 'J': + data = LONG; + break; + case 'D': + data = DOUBLE; + break; + // case 'L': + default: + // stores the internal name, not the descriptor + t = desc.substring(dims + 1, desc.length() - 1); + data = OBJECT | cw.addType(t); + } + return (dims - index) << 28 | data; + } + } + + /** + * Pops a type from the output frame stack and returns its value. + * + * @return the type that has been popped from the output frame stack. + */ + private int pop() { + if (outputStackTop > 0) { + return outputStack[--outputStackTop]; + } else { + // if the output frame stack is empty, pops from the input stack + return STACK | -(--owner.inputStackTop); + } + } + + /** + * Pops the given number of types from the output frame stack. + * + * @param elements + * the number of types that must be popped. + */ + private void pop(final int elements) { + if (outputStackTop >= elements) { + outputStackTop -= elements; + } else { + // if the number of elements to be popped is greater than the number + // of elements in the output stack, clear it, and pops the remaining + // elements from the input stack. + owner.inputStackTop -= elements - outputStackTop; + outputStackTop = 0; + } + } + + /** + * Pops a type from the output frame stack. + * + * @param desc + * the descriptor of the type to be popped. Can also be a method + * descriptor (in this case this method pops the types + * corresponding to the method arguments). + */ + private void pop(final String desc) { + char c = desc.charAt(0); + if (c == '(') { + pop((Type.getArgumentsAndReturnSizes(desc) >> 2) - 1); + } else if (c == 'J' || c == 'D') { + pop(2); + } else { + pop(1); + } + } + + /** + * Adds a new type to the list of types on which a constructor is invoked in + * the basic block. + * + * @param var + * a type on a which a constructor is invoked. + */ + private void init(final int var) { + // creates and/or resizes the initializations array if necessary + if (initializations == null) { + initializations = new int[2]; + } + int n = initializations.length; + if (initializationCount >= n) { + int[] t = new int[Math.max(initializationCount + 1, 2 * n)]; + System.arraycopy(initializations, 0, t, 0, n); + initializations = t; + } + // stores the type to be initialized + initializations[initializationCount++] = var; + } + + /** + * Replaces the given type with the appropriate type if it is one of the + * types on which a constructor is invoked in the basic block. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param t + * a type + * @return t or, if t is one of the types on which a constructor is invoked + * in the basic block, the type corresponding to this constructor. + */ + private int init(final ClassWriter cw, final int t) { + int s; + if (t == UNINITIALIZED_THIS) { + s = OBJECT | cw.addType(cw.thisName); + } else if ((t & (DIM | BASE_KIND)) == UNINITIALIZED) { + String type = cw.typeTable[t & BASE_VALUE].strVal1; + s = OBJECT | cw.addType(type); + } else { + return t; + } + for (int j = 0; j < initializationCount; ++j) { + int u = initializations[j]; + int dim = u & DIM; + int kind = u & KIND; + if (kind == LOCAL) { + u = dim + inputLocals[u & VALUE]; + } else if (kind == STACK) { + u = dim + inputStack[inputStack.length - (u & VALUE)]; + } + if (t == u) { + return s; + } + } + return t; + } + + /** + * Initializes the input frame of the first basic block from the method + * descriptor. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param access + * the access flags of the method to which this label belongs. + * @param args + * the formal parameter types of this method. + * @param maxLocals + * the maximum number of local variables of this method. + */ + void initInputFrame(final ClassWriter cw, final int access, + final Type[] args, final int maxLocals) { + inputLocals = new int[maxLocals]; + inputStack = new int[0]; + int i = 0; + if ((access & Opcodes.ACC_STATIC) == 0) { + if ((access & MethodWriter.ACC_CONSTRUCTOR) == 0) { + inputLocals[i++] = OBJECT | cw.addType(cw.thisName); + } else { + inputLocals[i++] = UNINITIALIZED_THIS; + } + } + for (int j = 0; j < args.length; ++j) { + int t = type(cw, args[j].getDescriptor()); + inputLocals[i++] = t; + if (t == LONG || t == DOUBLE) { + inputLocals[i++] = TOP; + } + } + while (i < maxLocals) { + inputLocals[i++] = TOP; + } + } + + /** + * Simulates the action of the given instruction on the output stack frame. + * + * @param opcode + * the opcode of the instruction. + * @param arg + * the operand of the instruction, if any. + * @param cw + * the class writer to which this label belongs. + * @param item + * the operand of the instructions, if any. + */ + void execute(final int opcode, final int arg, final ClassWriter cw, + final Item item) { + int t1, t2, t3, t4; + switch (opcode) { + case Opcodes.NOP: + case Opcodes.INEG: + case Opcodes.LNEG: + case Opcodes.FNEG: + case Opcodes.DNEG: + case Opcodes.I2B: + case Opcodes.I2C: + case Opcodes.I2S: + case Opcodes.GOTO: + case Opcodes.RETURN: + break; + case Opcodes.ACONST_NULL: + push(NULL); + break; + case Opcodes.ICONST_M1: + case Opcodes.ICONST_0: + case Opcodes.ICONST_1: + case Opcodes.ICONST_2: + case Opcodes.ICONST_3: + case Opcodes.ICONST_4: + case Opcodes.ICONST_5: + case Opcodes.BIPUSH: + case Opcodes.SIPUSH: + case Opcodes.ILOAD: + push(INTEGER); + break; + case Opcodes.LCONST_0: + case Opcodes.LCONST_1: + case Opcodes.LLOAD: + push(LONG); + push(TOP); + break; + case Opcodes.FCONST_0: + case Opcodes.FCONST_1: + case Opcodes.FCONST_2: + case Opcodes.FLOAD: + push(FLOAT); + break; + case Opcodes.DCONST_0: + case Opcodes.DCONST_1: + case Opcodes.DLOAD: + push(DOUBLE); + push(TOP); + break; + case Opcodes.LDC: + switch (item.type) { + case ClassWriter.INT: + push(INTEGER); + break; + case ClassWriter.LONG: + push(LONG); + push(TOP); + break; + case ClassWriter.FLOAT: + push(FLOAT); + break; + case ClassWriter.DOUBLE: + push(DOUBLE); + push(TOP); + break; + case ClassWriter.CLASS: + push(OBJECT | cw.addType("java/lang/Class")); + break; + case ClassWriter.STR: + push(OBJECT | cw.addType("java/lang/String")); + break; + case ClassWriter.MTYPE: + push(OBJECT | cw.addType("java/lang/invoke/MethodType")); + break; + // case ClassWriter.HANDLE_BASE + [1..9]: + default: + push(OBJECT | cw.addType("java/lang/invoke/MethodHandle")); + } + break; + case Opcodes.ALOAD: + push(get(arg)); + break; + case Opcodes.IALOAD: + case Opcodes.BALOAD: + case Opcodes.CALOAD: + case Opcodes.SALOAD: + pop(2); + push(INTEGER); + break; + case Opcodes.LALOAD: + case Opcodes.D2L: + pop(2); + push(LONG); + push(TOP); + break; + case Opcodes.FALOAD: + pop(2); + push(FLOAT); + break; + case Opcodes.DALOAD: + case Opcodes.L2D: + pop(2); + push(DOUBLE); + push(TOP); + break; + case Opcodes.AALOAD: + pop(1); + t1 = pop(); + push(ELEMENT_OF + t1); + break; + case Opcodes.ISTORE: + case Opcodes.FSTORE: + case Opcodes.ASTORE: + t1 = pop(); + set(arg, t1); + if (arg > 0) { + t2 = get(arg - 1); + // if t2 is of kind STACK or LOCAL we cannot know its size! + if (t2 == LONG || t2 == DOUBLE) { + set(arg - 1, TOP); + } else if ((t2 & KIND) != BASE) { + set(arg - 1, t2 | TOP_IF_LONG_OR_DOUBLE); + } + } + break; + case Opcodes.LSTORE: + case Opcodes.DSTORE: + pop(1); + t1 = pop(); + set(arg, t1); + set(arg + 1, TOP); + if (arg > 0) { + t2 = get(arg - 1); + // if t2 is of kind STACK or LOCAL we cannot know its size! + if (t2 == LONG || t2 == DOUBLE) { + set(arg - 1, TOP); + } else if ((t2 & KIND) != BASE) { + set(arg - 1, t2 | TOP_IF_LONG_OR_DOUBLE); + } + } + break; + case Opcodes.IASTORE: + case Opcodes.BASTORE: + case Opcodes.CASTORE: + case Opcodes.SASTORE: + case Opcodes.FASTORE: + case Opcodes.AASTORE: + pop(3); + break; + case Opcodes.LASTORE: + case Opcodes.DASTORE: + pop(4); + break; + case Opcodes.POP: + case Opcodes.IFEQ: + case Opcodes.IFNE: + case Opcodes.IFLT: + case Opcodes.IFGE: + case Opcodes.IFGT: + case Opcodes.IFLE: + case Opcodes.IRETURN: + case Opcodes.FRETURN: + case Opcodes.ARETURN: + case Opcodes.TABLESWITCH: + case Opcodes.LOOKUPSWITCH: + case Opcodes.ATHROW: + case Opcodes.MONITORENTER: + case Opcodes.MONITOREXIT: + case Opcodes.IFNULL: + case Opcodes.IFNONNULL: + pop(1); + break; + case Opcodes.POP2: + case Opcodes.IF_ICMPEQ: + case Opcodes.IF_ICMPNE: + case Opcodes.IF_ICMPLT: + case Opcodes.IF_ICMPGE: + case Opcodes.IF_ICMPGT: + case Opcodes.IF_ICMPLE: + case Opcodes.IF_ACMPEQ: + case Opcodes.IF_ACMPNE: + case Opcodes.LRETURN: + case Opcodes.DRETURN: + pop(2); + break; + case Opcodes.DUP: + t1 = pop(); + push(t1); + push(t1); + break; + case Opcodes.DUP_X1: + t1 = pop(); + t2 = pop(); + push(t1); + push(t2); + push(t1); + break; + case Opcodes.DUP_X2: + t1 = pop(); + t2 = pop(); + t3 = pop(); + push(t1); + push(t3); + push(t2); + push(t1); + break; + case Opcodes.DUP2: + t1 = pop(); + t2 = pop(); + push(t2); + push(t1); + push(t2); + push(t1); + break; + case Opcodes.DUP2_X1: + t1 = pop(); + t2 = pop(); + t3 = pop(); + push(t2); + push(t1); + push(t3); + push(t2); + push(t1); + break; + case Opcodes.DUP2_X2: + t1 = pop(); + t2 = pop(); + t3 = pop(); + t4 = pop(); + push(t2); + push(t1); + push(t4); + push(t3); + push(t2); + push(t1); + break; + case Opcodes.SWAP: + t1 = pop(); + t2 = pop(); + push(t1); + push(t2); + break; + case Opcodes.IADD: + case Opcodes.ISUB: + case Opcodes.IMUL: + case Opcodes.IDIV: + case Opcodes.IREM: + case Opcodes.IAND: + case Opcodes.IOR: + case Opcodes.IXOR: + case Opcodes.ISHL: + case Opcodes.ISHR: + case Opcodes.IUSHR: + case Opcodes.L2I: + case Opcodes.D2I: + case Opcodes.FCMPL: + case Opcodes.FCMPG: + pop(2); + push(INTEGER); + break; + case Opcodes.LADD: + case Opcodes.LSUB: + case Opcodes.LMUL: + case Opcodes.LDIV: + case Opcodes.LREM: + case Opcodes.LAND: + case Opcodes.LOR: + case Opcodes.LXOR: + pop(4); + push(LONG); + push(TOP); + break; + case Opcodes.FADD: + case Opcodes.FSUB: + case Opcodes.FMUL: + case Opcodes.FDIV: + case Opcodes.FREM: + case Opcodes.L2F: + case Opcodes.D2F: + pop(2); + push(FLOAT); + break; + case Opcodes.DADD: + case Opcodes.DSUB: + case Opcodes.DMUL: + case Opcodes.DDIV: + case Opcodes.DREM: + pop(4); + push(DOUBLE); + push(TOP); + break; + case Opcodes.LSHL: + case Opcodes.LSHR: + case Opcodes.LUSHR: + pop(3); + push(LONG); + push(TOP); + break; + case Opcodes.IINC: + set(arg, INTEGER); + break; + case Opcodes.I2L: + case Opcodes.F2L: + pop(1); + push(LONG); + push(TOP); + break; + case Opcodes.I2F: + pop(1); + push(FLOAT); + break; + case Opcodes.I2D: + case Opcodes.F2D: + pop(1); + push(DOUBLE); + push(TOP); + break; + case Opcodes.F2I: + case Opcodes.ARRAYLENGTH: + case Opcodes.INSTANCEOF: + pop(1); + push(INTEGER); + break; + case Opcodes.LCMP: + case Opcodes.DCMPL: + case Opcodes.DCMPG: + pop(4); + push(INTEGER); + break; + case Opcodes.JSR: + case Opcodes.RET: + throw new RuntimeException( + "JSR/RET are not supported with computeFrames option"); + case Opcodes.GETSTATIC: + push(cw, item.strVal3); + break; + case Opcodes.PUTSTATIC: + pop(item.strVal3); + break; + case Opcodes.GETFIELD: + pop(1); + push(cw, item.strVal3); + break; + case Opcodes.PUTFIELD: + pop(item.strVal3); + pop(); + break; + case Opcodes.INVOKEVIRTUAL: + case Opcodes.INVOKESPECIAL: + case Opcodes.INVOKESTATIC: + case Opcodes.INVOKEINTERFACE: + pop(item.strVal3); + if (opcode != Opcodes.INVOKESTATIC) { + t1 = pop(); + if (opcode == Opcodes.INVOKESPECIAL + && item.strVal2.charAt(0) == '<') { + init(t1); + } + } + push(cw, item.strVal3); + break; + case Opcodes.INVOKEDYNAMIC: + pop(item.strVal2); + push(cw, item.strVal2); + break; + case Opcodes.NEW: + push(UNINITIALIZED | cw.addUninitializedType(item.strVal1, arg)); + break; + case Opcodes.NEWARRAY: + pop(); + switch (arg) { + case Opcodes.T_BOOLEAN: + push(ARRAY_OF | BOOLEAN); + break; + case Opcodes.T_CHAR: + push(ARRAY_OF | CHAR); + break; + case Opcodes.T_BYTE: + push(ARRAY_OF | BYTE); + break; + case Opcodes.T_SHORT: + push(ARRAY_OF | SHORT); + break; + case Opcodes.T_INT: + push(ARRAY_OF | INTEGER); + break; + case Opcodes.T_FLOAT: + push(ARRAY_OF | FLOAT); + break; + case Opcodes.T_DOUBLE: + push(ARRAY_OF | DOUBLE); + break; + // case Opcodes.T_LONG: + default: + push(ARRAY_OF | LONG); + break; + } + break; + case Opcodes.ANEWARRAY: + String s = item.strVal1; + pop(); + if (s.charAt(0) == '[') { + push(cw, '[' + s); + } else { + push(ARRAY_OF | OBJECT | cw.addType(s)); + } + break; + case Opcodes.CHECKCAST: + s = item.strVal1; + pop(); + if (s.charAt(0) == '[') { + push(cw, s); + } else { + push(OBJECT | cw.addType(s)); + } + break; + // case Opcodes.MULTIANEWARRAY: + default: + pop(arg); + push(cw, item.strVal1); + break; + } + } + + /** + * Merges the input frame of the given basic block with the input and output + * frames of this basic block. Returns true if the input frame of + * the given label has been changed by this operation. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param frame + * the basic block whose input frame must be updated. + * @param edge + * the kind of the {@link Edge} between this label and 'label'. + * See {@link Edge#info}. + * @return true if the input frame of the given label has been + * changed by this operation. + */ + boolean merge(final ClassWriter cw, final Frame frame, final int edge) { + boolean changed = false; + int i, s, dim, kind, t; + + int nLocal = inputLocals.length; + int nStack = inputStack.length; + if (frame.inputLocals == null) { + frame.inputLocals = new int[nLocal]; + changed = true; + } + + for (i = 0; i < nLocal; ++i) { + if (outputLocals != null && i < outputLocals.length) { + s = outputLocals[i]; + if (s == 0) { + t = inputLocals[i]; + } else { + dim = s & DIM; + kind = s & KIND; + if (kind == BASE) { + t = s; + } else { + if (kind == LOCAL) { + t = dim + inputLocals[s & VALUE]; + } else { + t = dim + inputStack[nStack - (s & VALUE)]; + } + if ((s & TOP_IF_LONG_OR_DOUBLE) != 0 + && (t == LONG || t == DOUBLE)) { + t = TOP; + } + } + } + } else { + t = inputLocals[i]; + } + if (initializations != null) { + t = init(cw, t); + } + changed |= merge(cw, t, frame.inputLocals, i); + } + + if (edge > 0) { + for (i = 0; i < nLocal; ++i) { + t = inputLocals[i]; + changed |= merge(cw, t, frame.inputLocals, i); + } + if (frame.inputStack == null) { + frame.inputStack = new int[1]; + changed = true; + } + changed |= merge(cw, edge, frame.inputStack, 0); + return changed; + } + + int nInputStack = inputStack.length + owner.inputStackTop; + if (frame.inputStack == null) { + frame.inputStack = new int[nInputStack + outputStackTop]; + changed = true; + } + + for (i = 0; i < nInputStack; ++i) { + t = inputStack[i]; + if (initializations != null) { + t = init(cw, t); + } + changed |= merge(cw, t, frame.inputStack, i); + } + for (i = 0; i < outputStackTop; ++i) { + s = outputStack[i]; + dim = s & DIM; + kind = s & KIND; + if (kind == BASE) { + t = s; + } else { + if (kind == LOCAL) { + t = dim + inputLocals[s & VALUE]; + } else { + t = dim + inputStack[nStack - (s & VALUE)]; + } + if ((s & TOP_IF_LONG_OR_DOUBLE) != 0 + && (t == LONG || t == DOUBLE)) { + t = TOP; + } + } + if (initializations != null) { + t = init(cw, t); + } + changed |= merge(cw, t, frame.inputStack, nInputStack + i); + } + return changed; + } + + /** + * Merges the type at the given index in the given type array with the given + * type. Returns true if the type array has been modified by this + * operation. + * + * @param cw + * the ClassWriter to which this label belongs. + * @param t + * the type with which the type array element must be merged. + * @param types + * an array of types. + * @param index + * the index of the type that must be merged in 'types'. + * @return true if the type array has been modified by this + * operation. + */ + private static boolean merge(final ClassWriter cw, int t, + final int[] types, final int index) { + int u = types[index]; + if (u == t) { + // if the types are equal, merge(u,t)=u, so there is no change + return false; + } + if ((t & ~DIM) == NULL) { + if (u == NULL) { + return false; + } + t = NULL; + } + if (u == 0) { + // if types[index] has never been assigned, merge(u,t)=t + types[index] = t; + return true; + } + int v; + if ((u & BASE_KIND) == OBJECT || (u & DIM) != 0) { + // if u is a reference type of any dimension + if (t == NULL) { + // if t is the NULL type, merge(u,t)=u, so there is no change + return false; + } else if ((t & (DIM | BASE_KIND)) == (u & (DIM | BASE_KIND))) { + // if t and u have the same dimension and same base kind + if ((u & BASE_KIND) == OBJECT) { + // if t is also a reference type, and if u and t have the + // same dimension merge(u,t) = dim(t) | common parent of the + // element types of u and t + v = (t & DIM) | OBJECT + | cw.getMergedType(t & BASE_VALUE, u & BASE_VALUE); + } else { + // if u and t are array types, but not with the same element + // type, merge(u,t) = dim(u) - 1 | java/lang/Object + int vdim = ELEMENT_OF + (u & DIM); + v = vdim | OBJECT | cw.addType("java/lang/Object"); + } + } else if ((t & BASE_KIND) == OBJECT || (t & DIM) != 0) { + // if t is any other reference or array type, the merged type + // is min(udim, tdim) | java/lang/Object, where udim is the + // array dimension of u, minus 1 if u is an array type with a + // primitive element type (and similarly for tdim). + int tdim = (((t & DIM) == 0 || (t & BASE_KIND) == OBJECT) ? 0 + : ELEMENT_OF) + (t & DIM); + int udim = (((u & DIM) == 0 || (u & BASE_KIND) == OBJECT) ? 0 + : ELEMENT_OF) + (u & DIM); + v = Math.min(tdim, udim) | OBJECT + | cw.addType("java/lang/Object"); + } else { + // if t is any other type, merge(u,t)=TOP + v = TOP; + } + } else if (u == NULL) { + // if u is the NULL type, merge(u,t)=t, + // or TOP if t is not a reference type + v = (t & BASE_KIND) == OBJECT || (t & DIM) != 0 ? t : TOP; + } else { + // if u is any other type, merge(u,t)=TOP whatever t + v = TOP; + } + if (u != v) { + types[index] = v; + return true; + } + return false; + } +} diff --git a/src/com/sleepycat/asm/Handle.java b/src/com/sleepycat/asm/Handle.java new file mode 100644 index 0000000..e26022f --- /dev/null +++ b/src/com/sleepycat/asm/Handle.java @@ -0,0 +1,170 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +package com.sleepycat.asm; + +/** + * A reference to a field or a method. + * + * @author Remi Forax + * @author Eric Bruneton + */ +public final class Handle { + + /** + * The kind of field or method designated by this Handle. Should be + * {@link Opcodes#H_GETFIELD}, {@link Opcodes#H_GETSTATIC}, + * {@link Opcodes#H_PUTFIELD}, {@link Opcodes#H_PUTSTATIC}, + * {@link Opcodes#H_INVOKEVIRTUAL}, {@link Opcodes#H_INVOKESTATIC}, + * {@link Opcodes#H_INVOKESPECIAL}, {@link Opcodes#H_NEWINVOKESPECIAL} or + * {@link Opcodes#H_INVOKEINTERFACE}. + */ + final int tag; + + /** + * The internal name of the class that owns the field or method designated + * by this handle. + */ + final String owner; + + /** + * The name of the field or method designated by this handle. + */ + final String name; + + /** + * The descriptor of the field or method designated by this handle. + */ + final String desc; + + /** + * Constructs a new field or method handle. + * + * @param tag + * the kind of field or method designated by this Handle. Must be + * {@link Opcodes#H_GETFIELD}, {@link Opcodes#H_GETSTATIC}, + * {@link Opcodes#H_PUTFIELD}, {@link Opcodes#H_PUTSTATIC}, + * {@link Opcodes#H_INVOKEVIRTUAL}, + * {@link Opcodes#H_INVOKESTATIC}, + * {@link Opcodes#H_INVOKESPECIAL}, + * {@link Opcodes#H_NEWINVOKESPECIAL} or + * {@link Opcodes#H_INVOKEINTERFACE}. + * @param owner + * the internal name of the class that owns the field or method + * designated by this handle. + * @param name + * the name of the field or method designated by this handle. + * @param desc + * the descriptor of the field or method designated by this + * handle. + */ + public Handle(int tag, String owner, String name, String desc) { + this.tag = tag; + this.owner = owner; + this.name = name; + this.desc = desc; + } + + /** + * Returns the kind of field or method designated by this handle. + * + * @return {@link Opcodes#H_GETFIELD}, {@link Opcodes#H_GETSTATIC}, + * {@link Opcodes#H_PUTFIELD}, {@link Opcodes#H_PUTSTATIC}, + * {@link Opcodes#H_INVOKEVIRTUAL}, {@link Opcodes#H_INVOKESTATIC}, + * {@link Opcodes#H_INVOKESPECIAL}, + * {@link Opcodes#H_NEWINVOKESPECIAL} or + * {@link Opcodes#H_INVOKEINTERFACE}. + */ + public int getTag() { + return tag; + } + + /** + * Returns the internal name of the class that owns the field or method + * designated by this handle. + * + * @return the internal name of the class that owns the field or method + * designated by this handle. + */ + public String getOwner() { + return owner; + } + + /** + * Returns the name of the field or method designated by this handle. + * + * @return the name of the field or method designated by this handle. + */ + public String getName() { + return name; + } + + /** + * Returns the descriptor of the field or method designated by this handle. + * + * @return the descriptor of the field or method designated by this handle. + */ + public String getDesc() { + return desc; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Handle)) { + return false; + } + Handle h = (Handle) obj; + return tag == h.tag && owner.equals(h.owner) && name.equals(h.name) + && desc.equals(h.desc); + } + + @Override + public int hashCode() { + return tag + owner.hashCode() * name.hashCode() * desc.hashCode(); + } + + /** + * Returns the textual representation of this handle. The textual + * representation is: + * + *
    +     * owner '.' name desc ' ' '(' tag ')'
    +     * 
    + * + * . As this format is unambiguous, it can be parsed if necessary. + */ + @Override + public String toString() { + return owner + '.' + name + desc + " (" + tag + ')'; + } +} diff --git a/src/com/sleepycat/asm/Handler.java b/src/com/sleepycat/asm/Handler.java new file mode 100644 index 0000000..550e41c --- /dev/null +++ b/src/com/sleepycat/asm/Handler.java @@ -0,0 +1,121 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * Information about an exception handler block. + * + * @author Eric Bruneton + */ +class Handler { + + /** + * Beginning of the exception handler's scope (inclusive). + */ + Label start; + + /** + * End of the exception handler's scope (exclusive). + */ + Label end; + + /** + * Beginning of the exception handler's code. + */ + Label handler; + + /** + * Internal name of the type of exceptions handled by this handler, or + * null to catch any exceptions. + */ + String desc; + + /** + * Constant pool index of the internal name of the type of exceptions + * handled by this handler, or 0 to catch any exceptions. + */ + int type; + + /** + * Next exception handler block info. + */ + Handler next; + + /** + * Removes the range between start and end from the given exception + * handlers. + * + * @param h + * an exception handler list. + * @param start + * the start of the range to be removed. + * @param end + * the end of the range to be removed. Maybe null. + * @return the exception handler list with the start-end range removed. + */ + static Handler remove(Handler h, Label start, Label end) { + if (h == null) { + return null; + } else { + h.next = remove(h.next, start, end); + } + int hstart = h.start.position; + int hend = h.end.position; + int s = start.position; + int e = end == null ? Integer.MAX_VALUE : end.position; + // if [hstart,hend[ and [s,e[ intervals intersect... + if (s < hend && e > hstart) { + if (s <= hstart) { + if (e >= hend) { + // [hstart,hend[ fully included in [s,e[, h removed + h = h.next; + } else { + // [hstart,hend[ minus [s,e[ = [e,hend[ + h.start = end; + } + } else if (e >= hend) { + // [hstart,hend[ minus [s,e[ = [hstart,s[ + h.end = start; + } else { + // [hstart,hend[ minus [s,e[ = [hstart,s[ + [e,hend[ + Handler g = new Handler(); + g.start = end; + g.end = h.end; + g.handler = h.handler; + g.desc = h.desc; + g.type = h.type; + g.next = h.next; + h.end = start; + h.next = g; + } + } + return h; + } +} diff --git a/src/com/sleepycat/asm/Item.java b/src/com/sleepycat/asm/Item.java new file mode 100644 index 0000000..6df175d --- /dev/null +++ b/src/com/sleepycat/asm/Item.java @@ -0,0 +1,312 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A constant pool item. Constant pool items can be created with the 'newXXX' + * methods in the {@link ClassWriter} class. + * + * @author Eric Bruneton + */ +final class Item { + + /** + * Index of this item in the constant pool. + */ + int index; + + /** + * Type of this constant pool item. A single class is used to represent all + * constant pool item types, in order to minimize the bytecode size of this + * package. The value of this field is one of {@link ClassWriter#INT}, + * {@link ClassWriter#LONG}, {@link ClassWriter#FLOAT}, + * {@link ClassWriter#DOUBLE}, {@link ClassWriter#UTF8}, + * {@link ClassWriter#STR}, {@link ClassWriter#CLASS}, + * {@link ClassWriter#NAME_TYPE}, {@link ClassWriter#FIELD}, + * {@link ClassWriter#METH}, {@link ClassWriter#IMETH}, + * {@link ClassWriter#MTYPE}, {@link ClassWriter#INDY}. + * + * MethodHandle constant 9 variations are stored using a range of 9 values + * from {@link ClassWriter#HANDLE_BASE} + 1 to + * {@link ClassWriter#HANDLE_BASE} + 9. + * + * Special Item types are used for Items that are stored in the ClassWriter + * {@link ClassWriter#typeTable}, instead of the constant pool, in order to + * avoid clashes with normal constant pool items in the ClassWriter constant + * pool's hash table. These special item types are + * {@link ClassWriter#TYPE_NORMAL}, {@link ClassWriter#TYPE_UNINIT} and + * {@link ClassWriter#TYPE_MERGED}. + */ + int type; + + /** + * Value of this item, for an integer item. + */ + int intVal; + + /** + * Value of this item, for a long item. + */ + long longVal; + + /** + * First part of the value of this item, for items that do not hold a + * primitive value. + */ + String strVal1; + + /** + * Second part of the value of this item, for items that do not hold a + * primitive value. + */ + String strVal2; + + /** + * Third part of the value of this item, for items that do not hold a + * primitive value. + */ + String strVal3; + + /** + * The hash code value of this constant pool item. + */ + int hashCode; + + /** + * Link to another constant pool item, used for collision lists in the + * constant pool's hash table. + */ + Item next; + + /** + * Constructs an uninitialized {@link Item}. + */ + Item() { + } + + /** + * Constructs an uninitialized {@link Item} for constant pool element at + * given position. + * + * @param index + * index of the item to be constructed. + */ + Item(final int index) { + this.index = index; + } + + /** + * Constructs a copy of the given item. + * + * @param index + * index of the item to be constructed. + * @param i + * the item that must be copied into the item to be constructed. + */ + Item(final int index, final Item i) { + this.index = index; + type = i.type; + intVal = i.intVal; + longVal = i.longVal; + strVal1 = i.strVal1; + strVal2 = i.strVal2; + strVal3 = i.strVal3; + hashCode = i.hashCode; + } + + /** + * Sets this item to an integer item. + * + * @param intVal + * the value of this item. + */ + void set(final int intVal) { + this.type = ClassWriter.INT; + this.intVal = intVal; + this.hashCode = 0x7FFFFFFF & (type + intVal); + } + + /** + * Sets this item to a long item. + * + * @param longVal + * the value of this item. + */ + void set(final long longVal) { + this.type = ClassWriter.LONG; + this.longVal = longVal; + this.hashCode = 0x7FFFFFFF & (type + (int) longVal); + } + + /** + * Sets this item to a float item. + * + * @param floatVal + * the value of this item. + */ + void set(final float floatVal) { + this.type = ClassWriter.FLOAT; + this.intVal = Float.floatToRawIntBits(floatVal); + this.hashCode = 0x7FFFFFFF & (type + (int) floatVal); + } + + /** + * Sets this item to a double item. + * + * @param doubleVal + * the value of this item. + */ + void set(final double doubleVal) { + this.type = ClassWriter.DOUBLE; + this.longVal = Double.doubleToRawLongBits(doubleVal); + this.hashCode = 0x7FFFFFFF & (type + (int) doubleVal); + } + + /** + * Sets this item to an item that do not hold a primitive value. + * + * @param type + * the type of this item. + * @param strVal1 + * first part of the value of this item. + * @param strVal2 + * second part of the value of this item. + * @param strVal3 + * third part of the value of this item. + */ + void set(final int type, final String strVal1, final String strVal2, + final String strVal3) { + this.type = type; + this.strVal1 = strVal1; + this.strVal2 = strVal2; + this.strVal3 = strVal3; + switch (type) { + case ClassWriter.CLASS: + this.intVal = 0; // intVal of a class must be zero, see visitInnerClass + case ClassWriter.UTF8: + case ClassWriter.STR: + case ClassWriter.MTYPE: + case ClassWriter.TYPE_NORMAL: + hashCode = 0x7FFFFFFF & (type + strVal1.hashCode()); + return; + case ClassWriter.NAME_TYPE: { + hashCode = 0x7FFFFFFF & (type + strVal1.hashCode() + * strVal2.hashCode()); + return; + } + // ClassWriter.FIELD: + // ClassWriter.METH: + // ClassWriter.IMETH: + // ClassWriter.HANDLE_BASE + 1..9 + default: + hashCode = 0x7FFFFFFF & (type + strVal1.hashCode() + * strVal2.hashCode() * strVal3.hashCode()); + } + } + + /** + * Sets the item to an InvokeDynamic item. + * + * @param name + * invokedynamic's name. + * @param desc + * invokedynamic's desc. + * @param bsmIndex + * zero based index into the class attribute BootrapMethods. + */ + void set(String name, String desc, int bsmIndex) { + this.type = ClassWriter.INDY; + this.longVal = bsmIndex; + this.strVal1 = name; + this.strVal2 = desc; + this.hashCode = 0x7FFFFFFF & (ClassWriter.INDY + bsmIndex + * strVal1.hashCode() * strVal2.hashCode()); + } + + /** + * Sets the item to a BootstrapMethod item. + * + * @param position + * position in byte in the class attribute BootrapMethods. + * @param hashCode + * hashcode of the item. This hashcode is processed from the + * hashcode of the bootstrap method and the hashcode of all + * bootstrap arguments. + */ + void set(int position, int hashCode) { + this.type = ClassWriter.BSM; + this.intVal = position; + this.hashCode = hashCode; + } + + /** + * Indicates if the given item is equal to this one. This method assumes + * that the two items have the same {@link #type}. + * + * @param i + * the item to be compared to this one. Both items must have the + * same {@link #type}. + * @return true if the given item if equal to this one, + * false otherwise. + */ + boolean isEqualTo(final Item i) { + switch (type) { + case ClassWriter.UTF8: + case ClassWriter.STR: + case ClassWriter.CLASS: + case ClassWriter.MTYPE: + case ClassWriter.TYPE_NORMAL: + return i.strVal1.equals(strVal1); + case ClassWriter.TYPE_MERGED: + case ClassWriter.LONG: + case ClassWriter.DOUBLE: + return i.longVal == longVal; + case ClassWriter.INT: + case ClassWriter.FLOAT: + return i.intVal == intVal; + case ClassWriter.TYPE_UNINIT: + return i.intVal == intVal && i.strVal1.equals(strVal1); + case ClassWriter.NAME_TYPE: + return i.strVal1.equals(strVal1) && i.strVal2.equals(strVal2); + case ClassWriter.INDY: { + return i.longVal == longVal && i.strVal1.equals(strVal1) + && i.strVal2.equals(strVal2); + } + // case ClassWriter.FIELD: + // case ClassWriter.METH: + // case ClassWriter.IMETH: + // case ClassWriter.HANDLE_BASE + 1..9 + default: + return i.strVal1.equals(strVal1) && i.strVal2.equals(strVal2) + && i.strVal3.equals(strVal3); + } + } + +} diff --git a/src/com/sleepycat/asm/Label.java b/src/com/sleepycat/asm/Label.java new file mode 100644 index 0000000..9b7f924 --- /dev/null +++ b/src/com/sleepycat/asm/Label.java @@ -0,0 +1,560 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A label represents a position in the bytecode of a method. Labels are used + * for jump, goto, and switch instructions, and for try catch blocks. A label + * designates the instruction that is just after. Note however that there + * can be other elements between a label and the instruction it designates (such + * as other labels, stack map frames, line numbers, etc.). + * + * @author Eric Bruneton + */ +public class Label { + + /** + * Indicates if this label is only used for debug attributes. Such a label + * is not the start of a basic block, the target of a jump instruction, or + * an exception handler. It can be safely ignored in control flow graph + * analysis algorithms (for optimization purposes). + */ + static final int DEBUG = 1; + + /** + * Indicates if the position of this label is known. + */ + static final int RESOLVED = 2; + + /** + * Indicates if this label has been updated, after instruction resizing. + */ + static final int RESIZED = 4; + + /** + * Indicates if this basic block has been pushed in the basic block stack. + * See {@link MethodWriter#visitMaxs visitMaxs}. + */ + static final int PUSHED = 8; + + /** + * Indicates if this label is the target of a jump instruction, or the start + * of an exception handler. + */ + static final int TARGET = 16; + + /** + * Indicates if a stack map frame must be stored for this label. + */ + static final int STORE = 32; + + /** + * Indicates if this label corresponds to a reachable basic block. + */ + static final int REACHABLE = 64; + + /** + * Indicates if this basic block ends with a JSR instruction. + */ + static final int JSR = 128; + + /** + * Indicates if this basic block ends with a RET instruction. + */ + static final int RET = 256; + + /** + * Indicates if this basic block is the start of a subroutine. + */ + static final int SUBROUTINE = 512; + + /** + * Indicates if this subroutine basic block has been visited by a + * visitSubroutine(null, ...) call. + */ + static final int VISITED = 1024; + + /** + * Indicates if this subroutine basic block has been visited by a + * visitSubroutine(!null, ...) call. + */ + static final int VISITED2 = 2048; + + /** + * Field used to associate user information to a label. Warning: this field + * is used by the ASM tree package. In order to use it with the ASM tree + * package you must override the + * {@link org.objectweb.asm.tree.MethodNode#getLabelNode} method. + */ + public Object info; + + /** + * Flags that indicate the status of this label. + * + * @see #DEBUG + * @see #RESOLVED + * @see #RESIZED + * @see #PUSHED + * @see #TARGET + * @see #STORE + * @see #REACHABLE + * @see #JSR + * @see #RET + */ + int status; + + /** + * The line number corresponding to this label, if known. + */ + int line; + + /** + * The position of this label in the code, if known. + */ + int position; + + /** + * Number of forward references to this label, times two. + */ + private int referenceCount; + + /** + * Informations about forward references. Each forward reference is + * described by two consecutive integers in this array: the first one is the + * position of the first byte of the bytecode instruction that contains the + * forward reference, while the second is the position of the first byte of + * the forward reference itself. In fact the sign of the first integer + * indicates if this reference uses 2 or 4 bytes, and its absolute value + * gives the position of the bytecode instruction. This array is also used + * as a bitset to store the subroutines to which a basic block belongs. This + * information is needed in {@linked MethodWriter#visitMaxs}, after all + * forward references have been resolved. Hence the same array can be used + * for both purposes without problems. + */ + private int[] srcAndRefPositions; + + // ------------------------------------------------------------------------ + + /* + * Fields for the control flow and data flow graph analysis algorithms (used + * to compute the maximum stack size or the stack map frames). A control + * flow graph contains one node per "basic block", and one edge per "jump" + * from one basic block to another. Each node (i.e., each basic block) is + * represented by the Label object that corresponds to the first instruction + * of this basic block. Each node also stores the list of its successors in + * the graph, as a linked list of Edge objects. + * + * The control flow analysis algorithms used to compute the maximum stack + * size or the stack map frames are similar and use two steps. The first + * step, during the visit of each instruction, builds information about the + * state of the local variables and the operand stack at the end of each + * basic block, called the "output frame", relatively to the frame + * state at the beginning of the basic block, which is called the "input + * frame", and which is unknown during this step. The second step, in + * {@link MethodWriter#visitMaxs}, is a fix point algorithm that computes + * information about the input frame of each basic block, from the input + * state of the first basic block (known from the method signature), and by + * the using the previously computed relative output frames. + * + * The algorithm used to compute the maximum stack size only computes the + * relative output and absolute input stack heights, while the algorithm + * used to compute stack map frames computes relative output frames and + * absolute input frames. + */ + + /** + * Start of the output stack relatively to the input stack. The exact + * semantics of this field depends on the algorithm that is used. + * + * When only the maximum stack size is computed, this field is the number of + * elements in the input stack. + * + * When the stack map frames are completely computed, this field is the + * offset of the first output stack element relatively to the top of the + * input stack. This offset is always negative or null. A null offset means + * that the output stack must be appended to the input stack. A -n offset + * means that the first n output stack elements must replace the top n input + * stack elements, and that the other elements must be appended to the input + * stack. + */ + int inputStackTop; + + /** + * Maximum height reached by the output stack, relatively to the top of the + * input stack. This maximum is always positive or null. + */ + int outputStackMax; + + /** + * Information about the input and output stack map frames of this basic + * block. This field is only used when {@link ClassWriter#COMPUTE_FRAMES} + * option is used. + */ + Frame frame; + + /** + * The successor of this label, in the order they are visited. This linked + * list does not include labels used for debug info only. If + * {@link ClassWriter#COMPUTE_FRAMES} option is used then, in addition, it + * does not contain successive labels that denote the same bytecode position + * (in this case only the first label appears in this list). + */ + Label successor; + + /** + * The successors of this node in the control flow graph. These successors + * are stored in a linked list of {@link Edge Edge} objects, linked to each + * other by their {@link Edge#next} field. + */ + Edge successors; + + /** + * The next basic block in the basic block stack. This stack is used in the + * main loop of the fix point algorithm used in the second step of the + * control flow analysis algorithms. It is also used in + * {@link #visitSubroutine} to avoid using a recursive method. + * + * @see MethodWriter#visitMaxs + */ + Label next; + + // ------------------------------------------------------------------------ + // Constructor + // ------------------------------------------------------------------------ + + /** + * Constructs a new label. + */ + public Label() { + } + + // ------------------------------------------------------------------------ + // Methods to compute offsets and to manage forward references + // ------------------------------------------------------------------------ + + /** + * Returns the offset corresponding to this label. This offset is computed + * from the start of the method's bytecode. This method is intended for + * {@link Attribute} sub classes, and is normally not needed by class + * generators or adapters. + * + * @return the offset corresponding to this label. + * @throws IllegalStateException + * if this label is not resolved yet. + */ + public int getOffset() { + if ((status & RESOLVED) == 0) { + throw new IllegalStateException( + "Label offset position has not been resolved yet"); + } + return position; + } + + /** + * Puts a reference to this label in the bytecode of a method. If the + * position of the label is known, the offset is computed and written + * directly. Otherwise, a null offset is written and a new forward reference + * is declared for this label. + * + * @param owner + * the code writer that calls this method. + * @param out + * the bytecode of the method. + * @param source + * the position of first byte of the bytecode instruction that + * contains this label. + * @param wideOffset + * true if the reference must be stored in 4 bytes, or + * false if it must be stored with 2 bytes. + * @throws IllegalArgumentException + * if this label has not been created by the given code writer. + */ + void put(final MethodWriter owner, final ByteVector out, final int source, + final boolean wideOffset) { + if ((status & RESOLVED) == 0) { + if (wideOffset) { + addReference(-1 - source, out.length); + out.putInt(-1); + } else { + addReference(source, out.length); + out.putShort(-1); + } + } else { + if (wideOffset) { + out.putInt(position - source); + } else { + out.putShort(position - source); + } + } + } + + /** + * Adds a forward reference to this label. This method must be called only + * for a true forward reference, i.e. only if this label is not resolved + * yet. For backward references, the offset of the reference can be, and + * must be, computed and stored directly. + * + * @param sourcePosition + * the position of the referencing instruction. This position + * will be used to compute the offset of this forward reference. + * @param referencePosition + * the position where the offset for this forward reference must + * be stored. + */ + private void addReference(final int sourcePosition, + final int referencePosition) { + if (srcAndRefPositions == null) { + srcAndRefPositions = new int[6]; + } + if (referenceCount >= srcAndRefPositions.length) { + int[] a = new int[srcAndRefPositions.length + 6]; + System.arraycopy(srcAndRefPositions, 0, a, 0, + srcAndRefPositions.length); + srcAndRefPositions = a; + } + srcAndRefPositions[referenceCount++] = sourcePosition; + srcAndRefPositions[referenceCount++] = referencePosition; + } + + /** + * Resolves all forward references to this label. This method must be called + * when this label is added to the bytecode of the method, i.e. when its + * position becomes known. This method fills in the blanks that where left + * in the bytecode by each forward reference previously added to this label. + * + * @param owner + * the code writer that calls this method. + * @param position + * the position of this label in the bytecode. + * @param data + * the bytecode of the method. + * @return true if a blank that was left for this label was to + * small to store the offset. In such a case the corresponding jump + * instruction is replaced with a pseudo instruction (using unused + * opcodes) using an unsigned two bytes offset. These pseudo + * instructions will need to be replaced with true instructions with + * wider offsets (4 bytes instead of 2). This is done in + * {@link MethodWriter#resizeInstructions}. + * @throws IllegalArgumentException + * if this label has already been resolved, or if it has not + * been created by the given code writer. + */ + boolean resolve(final MethodWriter owner, final int position, + final byte[] data) { + boolean needUpdate = false; + this.status |= RESOLVED; + this.position = position; + int i = 0; + while (i < referenceCount) { + int source = srcAndRefPositions[i++]; + int reference = srcAndRefPositions[i++]; + int offset; + if (source >= 0) { + offset = position - source; + if (offset < Short.MIN_VALUE || offset > Short.MAX_VALUE) { + /* + * changes the opcode of the jump instruction, in order to + * be able to find it later (see resizeInstructions in + * MethodWriter). These temporary opcodes are similar to + * jump instruction opcodes, except that the 2 bytes offset + * is unsigned (and can therefore represent values from 0 to + * 65535, which is sufficient since the size of a method is + * limited to 65535 bytes). + */ + int opcode = data[reference - 1] & 0xFF; + if (opcode <= Opcodes.JSR) { + // changes IFEQ ... JSR to opcodes 202 to 217 + data[reference - 1] = (byte) (opcode + 49); + } else { + // changes IFNULL and IFNONNULL to opcodes 218 and 219 + data[reference - 1] = (byte) (opcode + 20); + } + needUpdate = true; + } + data[reference++] = (byte) (offset >>> 8); + data[reference] = (byte) offset; + } else { + offset = position + source + 1; + data[reference++] = (byte) (offset >>> 24); + data[reference++] = (byte) (offset >>> 16); + data[reference++] = (byte) (offset >>> 8); + data[reference] = (byte) offset; + } + } + return needUpdate; + } + + /** + * Returns the first label of the series to which this label belongs. For an + * isolated label or for the first label in a series of successive labels, + * this method returns the label itself. For other labels it returns the + * first label of the series. + * + * @return the first label of the series to which this label belongs. + */ + Label getFirst() { + return !ClassReader.FRAMES || frame == null ? this : frame.owner; + } + + // ------------------------------------------------------------------------ + // Methods related to subroutines + // ------------------------------------------------------------------------ + + /** + * Returns true is this basic block belongs to the given subroutine. + * + * @param id + * a subroutine id. + * @return true is this basic block belongs to the given subroutine. + */ + boolean inSubroutine(final long id) { + if ((status & Label.VISITED) != 0) { + return (srcAndRefPositions[(int) (id >>> 32)] & (int) id) != 0; + } + return false; + } + + /** + * Returns true if this basic block and the given one belong to a common + * subroutine. + * + * @param block + * another basic block. + * @return true if this basic block and the given one belong to a common + * subroutine. + */ + boolean inSameSubroutine(final Label block) { + if ((status & VISITED) == 0 || (block.status & VISITED) == 0) { + return false; + } + for (int i = 0; i < srcAndRefPositions.length; ++i) { + if ((srcAndRefPositions[i] & block.srcAndRefPositions[i]) != 0) { + return true; + } + } + return false; + } + + /** + * Marks this basic block as belonging to the given subroutine. + * + * @param id + * a subroutine id. + * @param nbSubroutines + * the total number of subroutines in the method. + */ + void addToSubroutine(final long id, final int nbSubroutines) { + if ((status & VISITED) == 0) { + status |= VISITED; + srcAndRefPositions = new int[nbSubroutines / 32 + 1]; + } + srcAndRefPositions[(int) (id >>> 32)] |= (int) id; + } + + /** + * Finds the basic blocks that belong to a given subroutine, and marks these + * blocks as belonging to this subroutine. This method follows the control + * flow graph to find all the blocks that are reachable from the current + * block WITHOUT following any JSR target. + * + * @param JSR + * a JSR block that jumps to this subroutine. If this JSR is not + * null it is added to the successor of the RET blocks found in + * the subroutine. + * @param id + * the id of this subroutine. + * @param nbSubroutines + * the total number of subroutines in the method. + */ + void visitSubroutine(final Label JSR, final long id, final int nbSubroutines) { + // user managed stack of labels, to avoid using a recursive method + // (recursivity can lead to stack overflow with very large methods) + Label stack = this; + while (stack != null) { + // removes a label l from the stack + Label l = stack; + stack = l.next; + l.next = null; + + if (JSR != null) { + if ((l.status & VISITED2) != 0) { + continue; + } + l.status |= VISITED2; + // adds JSR to the successors of l, if it is a RET block + if ((l.status & RET) != 0) { + if (!l.inSameSubroutine(JSR)) { + Edge e = new Edge(); + e.info = l.inputStackTop; + e.successor = JSR.successors.successor; + e.next = l.successors; + l.successors = e; + } + } + } else { + // if the l block already belongs to subroutine 'id', continue + if (l.inSubroutine(id)) { + continue; + } + // marks the l block as belonging to subroutine 'id' + l.addToSubroutine(id, nbSubroutines); + } + // pushes each successor of l on the stack, except JSR targets + Edge e = l.successors; + while (e != null) { + // if the l block is a JSR block, then 'l.successors.next' leads + // to the JSR target (see {@link #visitJumpInsn}) and must + // therefore not be followed + if ((l.status & Label.JSR) == 0 || e != l.successors.next) { + // pushes e.successor on the stack if it not already added + if (e.successor.next == null) { + e.successor.next = stack; + stack = e.successor; + } + } + e = e.next; + } + } + } + + // ------------------------------------------------------------------------ + // Overriden Object methods + // ------------------------------------------------------------------------ + + /** + * Returns a string representation of this label. + * + * @return a string representation of this label. + */ + @Override + public String toString() { + return "L" + System.identityHashCode(this); + } +} diff --git a/src/com/sleepycat/asm/MethodVisitor.java b/src/com/sleepycat/asm/MethodVisitor.java new file mode 100644 index 0000000..af189c5 --- /dev/null +++ b/src/com/sleepycat/asm/MethodVisitor.java @@ -0,0 +1,880 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A visitor to visit a Java method. The methods of this class must be called in + * the following order: ( visitParameter )* [ + * visitAnnotationDefault ] ( visitAnnotation | + * visitTypeAnnotation | visitAttribute )* [ + * visitCode ( visitFrame | visitXInsn | + * visitLabel | visitInsnAnnotation | + * visitTryCatchBlock | visitTryCatchBlockAnnotation | + * visitLocalVariable | visitLocalVariableAnnotation | + * visitLineNumber )* visitMaxs ] visitEnd. In + * addition, the visitXInsn and visitLabel methods must + * be called in the sequential order of the bytecode instructions of the visited + * code, visitInsnAnnotation must be called after the annotated + * instruction, visitTryCatchBlock must be called before the + * labels passed as arguments have been visited, + * visitTryCatchBlockAnnotation must be called after the + * corresponding try catch block has been visited, and the + * visitLocalVariable, visitLocalVariableAnnotation and + * visitLineNumber methods must be called after the labels + * passed as arguments have been visited. + * + * @author Eric Bruneton + */ +public abstract class MethodVisitor { + + /** + * The ASM API version implemented by this visitor. The value of this field + * must be one of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + protected final int api; + + /** + * The method visitor to which this visitor must delegate method calls. May + * be null. + */ + protected MethodVisitor mv; + + /** + * Constructs a new {@link MethodVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + */ + public MethodVisitor(final int api) { + this(api, null); + } + + /** + * Constructs a new {@link MethodVisitor}. + * + * @param api + * the ASM API version implemented by this visitor. Must be one + * of {@link Opcodes#ASM4} or {@link Opcodes#ASM5}. + * @param mv + * the method visitor to which this visitor must delegate method + * calls. May be null. + */ + public MethodVisitor(final int api, final MethodVisitor mv) { + if (api != Opcodes.ASM4 && api != Opcodes.ASM5) { + throw new IllegalArgumentException(); + } + this.api = api; + this.mv = mv; + } + + // ------------------------------------------------------------------------- + // Parameters, annotations and non standard attributes + // ------------------------------------------------------------------------- + + /** + * Visits a parameter of this method. + * + * @param name + * parameter name or null if none is provided. + * @param access + * the parameter's access flags, only ACC_FINAL, + * ACC_SYNTHETIC or/and ACC_MANDATED are + * allowed (see {@link Opcodes}). + */ + public void visitParameter(String name, int access) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (mv != null) { + mv.visitParameter(name, access); + } + } + + /** + * Visits the default value of this annotation interface method. + * + * @return a visitor to the visit the actual default value of this + * annotation interface method, or null if this visitor is + * not interested in visiting this default value. The 'name' + * parameters passed to the methods of this annotation visitor are + * ignored. Moreover, exacly one visit method must be called on this + * annotation visitor, followed by visitEnd. + */ + public AnnotationVisitor visitAnnotationDefault() { + if (mv != null) { + return mv.visitAnnotationDefault(); + } + return null; + } + + /** + * Visits an annotation of this method. + * + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitAnnotation(String desc, boolean visible) { + if (mv != null) { + return mv.visitAnnotation(desc, visible); + } + return null; + } + + /** + * Visits an annotation on a type in the method signature. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#METHOD_TYPE_PARAMETER + * METHOD_TYPE_PARAMETER}, + * {@link TypeReference#METHOD_TYPE_PARAMETER_BOUND + * METHOD_TYPE_PARAMETER_BOUND}, + * {@link TypeReference#METHOD_RETURN METHOD_RETURN}, + * {@link TypeReference#METHOD_RECEIVER METHOD_RECEIVER}, + * {@link TypeReference#METHOD_FORMAL_PARAMETER + * METHOD_FORMAL_PARAMETER} or {@link TypeReference#THROWS + * THROWS}. See {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitTypeAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (mv != null) { + return mv.visitTypeAnnotation(typeRef, typePath, desc, visible); + } + return null; + } + + /** + * Visits an annotation of a parameter this method. + * + * @param parameter + * the parameter index. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitParameterAnnotation(int parameter, + String desc, boolean visible) { + if (mv != null) { + return mv.visitParameterAnnotation(parameter, desc, visible); + } + return null; + } + + /** + * Visits a non standard attribute of this method. + * + * @param attr + * an attribute. + */ + public void visitAttribute(Attribute attr) { + if (mv != null) { + mv.visitAttribute(attr); + } + } + + /** + * Starts the visit of the method's code, if any (i.e. non abstract method). + */ + public void visitCode() { + if (mv != null) { + mv.visitCode(); + } + } + + /** + * Visits the current state of the local variables and operand stack + * elements. This method must(*) be called just before any + * instruction i that follows an unconditional branch instruction + * such as GOTO or THROW, that is the target of a jump instruction, or that + * starts an exception handler block. The visited types must describe the + * values of the local variables and of the operand stack elements just + * before i is executed.
    + *
    + * (*) this is mandatory only for classes whose version is greater than or + * equal to {@link Opcodes#V1_6 V1_6}.
    + *
    + * The frames of a method must be given either in expanded form, or in + * compressed form (all frames must use the same format, i.e. you must not + * mix expanded and compressed frames within a single method): + *
      + *
    • In expanded form, all frames must have the F_NEW type.
    • + *
    • In compressed form, frames are basically "deltas" from the state of + * the previous frame: + *
        + *
      • {@link Opcodes#F_SAME} representing frame with exactly the same + * locals as the previous frame and with the empty stack.
      • + *
      • {@link Opcodes#F_SAME1} representing frame with exactly the same + * locals as the previous frame and with single value on the stack ( + * nStack is 1 and stack[0] contains value for the + * type of the stack item).
      • + *
      • {@link Opcodes#F_APPEND} representing frame with current locals are + * the same as the locals in the previous frame, except that additional + * locals are defined (nLocal is 1, 2 or 3 and + * local elements contains values representing added types).
      • + *
      • {@link Opcodes#F_CHOP} representing frame with current locals are the + * same as the locals in the previous frame, except that the last 1-3 locals + * are absent and with the empty stack (nLocals is 1, 2 or 3).
      • + *
      • {@link Opcodes#F_FULL} representing complete frame data.
      • + *
      + *
    • + *
    + *
    + * In both cases the first frame, corresponding to the method's parameters + * and access flags, is implicit and must not be visited. Also, it is + * illegal to visit two or more frames for the same code location (i.e., at + * least one instruction must be visited between two calls to visitFrame). + * + * @param type + * the type of this stack map frame. Must be + * {@link Opcodes#F_NEW} for expanded frames, or + * {@link Opcodes#F_FULL}, {@link Opcodes#F_APPEND}, + * {@link Opcodes#F_CHOP}, {@link Opcodes#F_SAME} or + * {@link Opcodes#F_APPEND}, {@link Opcodes#F_SAME1} for + * compressed frames. + * @param nLocal + * the number of local variables in the visited frame. + * @param local + * the local variable types in this frame. This array must not be + * modified. Primitive types are represented by + * {@link Opcodes#TOP}, {@link Opcodes#INTEGER}, + * {@link Opcodes#FLOAT}, {@link Opcodes#LONG}, + * {@link Opcodes#DOUBLE},{@link Opcodes#NULL} or + * {@link Opcodes#UNINITIALIZED_THIS} (long and double are + * represented by a single element). Reference types are + * represented by String objects (representing internal names), + * and uninitialized types by Label objects (this label + * designates the NEW instruction that created this uninitialized + * value). + * @param nStack + * the number of operand stack elements in the visited frame. + * @param stack + * the operand stack types in this frame. This array must not be + * modified. Its content has the same format as the "local" + * array. + * @throws IllegalStateException + * if a frame is visited just after another one, without any + * instruction between the two (unless this frame is a + * Opcodes#F_SAME frame, in which case it is silently ignored). + */ + public void visitFrame(int type, int nLocal, Object[] local, int nStack, + Object[] stack) { + if (mv != null) { + mv.visitFrame(type, nLocal, local, nStack, stack); + } + } + + // ------------------------------------------------------------------------- + // Normal instructions + // ------------------------------------------------------------------------- + + /** + * Visits a zero operand instruction. + * + * @param opcode + * the opcode of the instruction to be visited. This opcode is + * either NOP, ACONST_NULL, ICONST_M1, ICONST_0, ICONST_1, + * ICONST_2, ICONST_3, ICONST_4, ICONST_5, LCONST_0, LCONST_1, + * FCONST_0, FCONST_1, FCONST_2, DCONST_0, DCONST_1, IALOAD, + * LALOAD, FALOAD, DALOAD, AALOAD, BALOAD, CALOAD, SALOAD, + * IASTORE, LASTORE, FASTORE, DASTORE, AASTORE, BASTORE, CASTORE, + * SASTORE, POP, POP2, DUP, DUP_X1, DUP_X2, DUP2, DUP2_X1, + * DUP2_X2, SWAP, IADD, LADD, FADD, DADD, ISUB, LSUB, FSUB, DSUB, + * IMUL, LMUL, FMUL, DMUL, IDIV, LDIV, FDIV, DDIV, IREM, LREM, + * FREM, DREM, INEG, LNEG, FNEG, DNEG, ISHL, LSHL, ISHR, LSHR, + * IUSHR, LUSHR, IAND, LAND, IOR, LOR, IXOR, LXOR, I2L, I2F, I2D, + * L2I, L2F, L2D, F2I, F2L, F2D, D2I, D2L, D2F, I2B, I2C, I2S, + * LCMP, FCMPL, FCMPG, DCMPL, DCMPG, IRETURN, LRETURN, FRETURN, + * DRETURN, ARETURN, RETURN, ARRAYLENGTH, ATHROW, MONITORENTER, + * or MONITOREXIT. + */ + public void visitInsn(int opcode) { + if (mv != null) { + mv.visitInsn(opcode); + } + } + + /** + * Visits an instruction with a single int operand. + * + * @param opcode + * the opcode of the instruction to be visited. This opcode is + * either BIPUSH, SIPUSH or NEWARRAY. + * @param operand + * the operand of the instruction to be visited.
    + * When opcode is BIPUSH, operand value should be between + * Byte.MIN_VALUE and Byte.MAX_VALUE.
    + * When opcode is SIPUSH, operand value should be between + * Short.MIN_VALUE and Short.MAX_VALUE.
    + * When opcode is NEWARRAY, operand value should be one of + * {@link Opcodes#T_BOOLEAN}, {@link Opcodes#T_CHAR}, + * {@link Opcodes#T_FLOAT}, {@link Opcodes#T_DOUBLE}, + * {@link Opcodes#T_BYTE}, {@link Opcodes#T_SHORT}, + * {@link Opcodes#T_INT} or {@link Opcodes#T_LONG}. + */ + public void visitIntInsn(int opcode, int operand) { + if (mv != null) { + mv.visitIntInsn(opcode, operand); + } + } + + /** + * Visits a local variable instruction. A local variable instruction is an + * instruction that loads or stores the value of a local variable. + * + * @param opcode + * the opcode of the local variable instruction to be visited. + * This opcode is either ILOAD, LLOAD, FLOAD, DLOAD, ALOAD, + * ISTORE, LSTORE, FSTORE, DSTORE, ASTORE or RET. + * @param var + * the operand of the instruction to be visited. This operand is + * the index of a local variable. + */ + public void visitVarInsn(int opcode, int var) { + if (mv != null) { + mv.visitVarInsn(opcode, var); + } + } + + /** + * Visits a type instruction. A type instruction is an instruction that + * takes the internal name of a class as parameter. + * + * @param opcode + * the opcode of the type instruction to be visited. This opcode + * is either NEW, ANEWARRAY, CHECKCAST or INSTANCEOF. + * @param type + * the operand of the instruction to be visited. This operand + * must be the internal name of an object or array class (see + * {@link Type#getInternalName() getInternalName}). + */ + public void visitTypeInsn(int opcode, String type) { + if (mv != null) { + mv.visitTypeInsn(opcode, type); + } + } + + /** + * Visits a field instruction. A field instruction is an instruction that + * loads or stores the value of a field of an object. + * + * @param opcode + * the opcode of the type instruction to be visited. This opcode + * is either GETSTATIC, PUTSTATIC, GETFIELD or PUTFIELD. + * @param owner + * the internal name of the field's owner class (see + * {@link Type#getInternalName() getInternalName}). + * @param name + * the field's name. + * @param desc + * the field's descriptor (see {@link Type Type}). + */ + public void visitFieldInsn(int opcode, String owner, String name, + String desc) { + if (mv != null) { + mv.visitFieldInsn(opcode, owner, name, desc); + } + } + + /** + * Visits a method instruction. A method instruction is an instruction that + * invokes a method. + * + * @param opcode + * the opcode of the type instruction to be visited. This opcode + * is either INVOKEVIRTUAL, INVOKESPECIAL, INVOKESTATIC or + * INVOKEINTERFACE. + * @param owner + * the internal name of the method's owner class (see + * {@link Type#getInternalName() getInternalName}). + * @param name + * the method's name. + * @param desc + * the method's descriptor (see {@link Type Type}). + */ + @Deprecated + public void visitMethodInsn(int opcode, String owner, String name, + String desc) { + if (api >= Opcodes.ASM5) { + boolean itf = opcode == Opcodes.INVOKEINTERFACE; + visitMethodInsn(opcode, owner, name, desc, itf); + return; + } + if (mv != null) { + mv.visitMethodInsn(opcode, owner, name, desc); + } + } + + /** + * Visits a method instruction. A method instruction is an instruction that + * invokes a method. + * + * @param opcode + * the opcode of the type instruction to be visited. This opcode + * is either INVOKEVIRTUAL, INVOKESPECIAL, INVOKESTATIC or + * INVOKEINTERFACE. + * @param owner + * the internal name of the method's owner class (see + * {@link Type#getInternalName() getInternalName}). + * @param name + * the method's name. + * @param desc + * the method's descriptor (see {@link Type Type}). + * @param itf + * if the method's owner class is an interface. + */ + public void visitMethodInsn(int opcode, String owner, String name, + String desc, boolean itf) { + if (api < Opcodes.ASM5) { + if (itf != (opcode == Opcodes.INVOKEINTERFACE)) { + throw new IllegalArgumentException( + "INVOKESPECIAL/STATIC on interfaces require ASM 5"); + } + visitMethodInsn(opcode, owner, name, desc); + return; + } + if (mv != null) { + mv.visitMethodInsn(opcode, owner, name, desc, itf); + } + } + + /** + * Visits an invokedynamic instruction. + * + * @param name + * the method's name. + * @param desc + * the method's descriptor (see {@link Type Type}). + * @param bsm + * the bootstrap method. + * @param bsmArgs + * the bootstrap method constant arguments. Each argument must be + * an {@link Integer}, {@link Float}, {@link Long}, + * {@link Double}, {@link String}, {@link Type} or {@link Handle} + * value. This method is allowed to modify the content of the + * array so a caller should expect that this array may change. + */ + public void visitInvokeDynamicInsn(String name, String desc, Handle bsm, + Object... bsmArgs) { + if (mv != null) { + mv.visitInvokeDynamicInsn(name, desc, bsm, bsmArgs); + } + } + + /** + * Visits a jump instruction. A jump instruction is an instruction that may + * jump to another instruction. + * + * @param opcode + * the opcode of the type instruction to be visited. This opcode + * is either IFEQ, IFNE, IFLT, IFGE, IFGT, IFLE, IF_ICMPEQ, + * IF_ICMPNE, IF_ICMPLT, IF_ICMPGE, IF_ICMPGT, IF_ICMPLE, + * IF_ACMPEQ, IF_ACMPNE, GOTO, JSR, IFNULL or IFNONNULL. + * @param label + * the operand of the instruction to be visited. This operand is + * a label that designates the instruction to which the jump + * instruction may jump. + */ + public void visitJumpInsn(int opcode, Label label) { + if (mv != null) { + mv.visitJumpInsn(opcode, label); + } + } + + /** + * Visits a label. A label designates the instruction that will be visited + * just after it. + * + * @param label + * a {@link Label Label} object. + */ + public void visitLabel(Label label) { + if (mv != null) { + mv.visitLabel(label); + } + } + + // ------------------------------------------------------------------------- + // Special instructions + // ------------------------------------------------------------------------- + + /** + * Visits a LDC instruction. Note that new constant types may be added in + * future versions of the Java Virtual Machine. To easily detect new + * constant types, implementations of this method should check for + * unexpected constant types, like this: + * + *
    +     * if (cst instanceof Integer) {
    +     *     // ...
    +     * } else if (cst instanceof Float) {
    +     *     // ...
    +     * } else if (cst instanceof Long) {
    +     *     // ...
    +     * } else if (cst instanceof Double) {
    +     *     // ...
    +     * } else if (cst instanceof String) {
    +     *     // ...
    +     * } else if (cst instanceof Type) {
    +     *     int sort = ((Type) cst).getSort();
    +     *     if (sort == Type.OBJECT) {
    +     *         // ...
    +     *     } else if (sort == Type.ARRAY) {
    +     *         // ...
    +     *     } else if (sort == Type.METHOD) {
    +     *         // ...
    +     *     } else {
    +     *         // throw an exception
    +     *     }
    +     * } else if (cst instanceof Handle) {
    +     *     // ...
    +     * } else {
    +     *     // throw an exception
    +     * }
    +     * 
    + * + * @param cst + * the constant to be loaded on the stack. This parameter must be + * a non null {@link Integer}, a {@link Float}, a {@link Long}, a + * {@link Double}, a {@link String}, a {@link Type} of OBJECT or + * ARRAY sort for .class constants, for classes whose + * version is 49.0, a {@link Type} of METHOD sort or a + * {@link Handle} for MethodType and MethodHandle constants, for + * classes whose version is 51.0. + */ + public void visitLdcInsn(Object cst) { + if (mv != null) { + mv.visitLdcInsn(cst); + } + } + + /** + * Visits an IINC instruction. + * + * @param var + * index of the local variable to be incremented. + * @param increment + * amount to increment the local variable by. + */ + public void visitIincInsn(int var, int increment) { + if (mv != null) { + mv.visitIincInsn(var, increment); + } + } + + /** + * Visits a TABLESWITCH instruction. + * + * @param min + * the minimum key value. + * @param max + * the maximum key value. + * @param dflt + * beginning of the default handler block. + * @param labels + * beginnings of the handler blocks. labels[i] is the + * beginning of the handler block for the min + i key. + */ + public void visitTableSwitchInsn(int min, int max, Label dflt, + Label... labels) { + if (mv != null) { + mv.visitTableSwitchInsn(min, max, dflt, labels); + } + } + + /** + * Visits a LOOKUPSWITCH instruction. + * + * @param dflt + * beginning of the default handler block. + * @param keys + * the values of the keys. + * @param labels + * beginnings of the handler blocks. labels[i] is the + * beginning of the handler block for the keys[i] key. + */ + public void visitLookupSwitchInsn(Label dflt, int[] keys, Label[] labels) { + if (mv != null) { + mv.visitLookupSwitchInsn(dflt, keys, labels); + } + } + + /** + * Visits a MULTIANEWARRAY instruction. + * + * @param desc + * an array type descriptor (see {@link Type Type}). + * @param dims + * number of dimensions of the array to allocate. + */ + public void visitMultiANewArrayInsn(String desc, int dims) { + if (mv != null) { + mv.visitMultiANewArrayInsn(desc, dims); + } + } + + /** + * Visits an annotation on an instruction. This method must be called just + * after the annotated instruction. It can be called several times + * for the same instruction. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#INSTANCEOF INSTANCEOF}, + * {@link TypeReference#NEW NEW}, + * {@link TypeReference#CONSTRUCTOR_REFERENCE + * CONSTRUCTOR_REFERENCE}, {@link TypeReference#METHOD_REFERENCE + * METHOD_REFERENCE}, {@link TypeReference#CAST CAST}, + * {@link TypeReference#CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT + * CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT}, + * {@link TypeReference#METHOD_INVOCATION_TYPE_ARGUMENT + * METHOD_INVOCATION_TYPE_ARGUMENT}, + * {@link TypeReference#CONSTRUCTOR_REFERENCE_TYPE_ARGUMENT + * CONSTRUCTOR_REFERENCE_TYPE_ARGUMENT}, or + * {@link TypeReference#METHOD_REFERENCE_TYPE_ARGUMENT + * METHOD_REFERENCE_TYPE_ARGUMENT}. See {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitInsnAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (mv != null) { + return mv.visitInsnAnnotation(typeRef, typePath, desc, visible); + } + return null; + } + + // ------------------------------------------------------------------------- + // Exceptions table entries, debug information, max stack and max locals + // ------------------------------------------------------------------------- + + /** + * Visits a try catch block. + * + * @param start + * beginning of the exception handler's scope (inclusive). + * @param end + * end of the exception handler's scope (exclusive). + * @param handler + * beginning of the exception handler's code. + * @param type + * internal name of the type of exceptions handled by the + * handler, or null to catch any exceptions (for + * "finally" blocks). + * @throws IllegalArgumentException + * if one of the labels has already been visited by this visitor + * (by the {@link #visitLabel visitLabel} method). + */ + public void visitTryCatchBlock(Label start, Label end, Label handler, + String type) { + if (mv != null) { + mv.visitTryCatchBlock(start, end, handler, type); + } + } + + /** + * Visits an annotation on an exception handler type. This method must be + * called after the {@link #visitTryCatchBlock} for the annotated + * exception handler. It can be called several times for the same exception + * handler. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#EXCEPTION_PARAMETER + * EXCEPTION_PARAMETER}. See {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitTryCatchAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (mv != null) { + return mv.visitTryCatchAnnotation(typeRef, typePath, desc, visible); + } + return null; + } + + /** + * Visits a local variable declaration. + * + * @param name + * the name of a local variable. + * @param desc + * the type descriptor of this local variable. + * @param signature + * the type signature of this local variable. May be + * null if the local variable type does not use generic + * types. + * @param start + * the first instruction corresponding to the scope of this local + * variable (inclusive). + * @param end + * the last instruction corresponding to the scope of this local + * variable (exclusive). + * @param index + * the local variable's index. + * @throws IllegalArgumentException + * if one of the labels has not already been visited by this + * visitor (by the {@link #visitLabel visitLabel} method). + */ + public void visitLocalVariable(String name, String desc, String signature, + Label start, Label end, int index) { + if (mv != null) { + mv.visitLocalVariable(name, desc, signature, start, end, index); + } + } + + /** + * Visits an annotation on a local variable type. + * + * @param typeRef + * a reference to the annotated type. The sort of this type + * reference must be {@link TypeReference#LOCAL_VARIABLE + * LOCAL_VARIABLE} or {@link TypeReference#RESOURCE_VARIABLE + * RESOURCE_VARIABLE}. See {@link TypeReference}. + * @param typePath + * the path to the annotated type argument, wildcard bound, array + * element type, or static inner type within 'typeRef'. May be + * null if the annotation targets 'typeRef' as a whole. + * @param start + * the fist instructions corresponding to the continuous ranges + * that make the scope of this local variable (inclusive). + * @param end + * the last instructions corresponding to the continuous ranges + * that make the scope of this local variable (exclusive). This + * array must have the same size as the 'start' array. + * @param index + * the local variable's index in each range. This array must have + * the same size as the 'start' array. + * @param desc + * the class descriptor of the annotation class. + * @param visible + * true if the annotation is visible at runtime. + * @return a visitor to visit the annotation values, or null if + * this visitor is not interested in visiting this annotation. + */ + public AnnotationVisitor visitLocalVariableAnnotation(int typeRef, + TypePath typePath, Label[] start, Label[] end, int[] index, + String desc, boolean visible) { + if (api < Opcodes.ASM5) { + throw new RuntimeException(); + } + if (mv != null) { + return mv.visitLocalVariableAnnotation(typeRef, typePath, start, + end, index, desc, visible); + } + return null; + } + + /** + * Visits a line number declaration. + * + * @param line + * a line number. This number refers to the source file from + * which the class was compiled. + * @param start + * the first instruction corresponding to this line number. + * @throws IllegalArgumentException + * if start has not already been visited by this + * visitor (by the {@link #visitLabel visitLabel} method). + */ + public void visitLineNumber(int line, Label start) { + if (mv != null) { + mv.visitLineNumber(line, start); + } + } + + /** + * Visits the maximum stack size and the maximum number of local variables + * of the method. + * + * @param maxStack + * maximum stack size of the method. + * @param maxLocals + * maximum number of local variables for the method. + */ + public void visitMaxs(int maxStack, int maxLocals) { + if (mv != null) { + mv.visitMaxs(maxStack, maxLocals); + } + } + + /** + * Visits the end of the method. This method, which is the last one to be + * called, is used to inform the visitor that all the annotations and + * attributes of the method have been visited. + */ + public void visitEnd() { + if (mv != null) { + mv.visitEnd(); + } + } +} diff --git a/src/com/sleepycat/asm/MethodWriter.java b/src/com/sleepycat/asm/MethodWriter.java new file mode 100644 index 0000000..ea925df --- /dev/null +++ b/src/com/sleepycat/asm/MethodWriter.java @@ -0,0 +1,2913 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * A {@link MethodVisitor} that generates methods in bytecode form. Each visit + * method of this class appends the bytecode corresponding to the visited + * instruction to a byte vector, in the order these methods are called. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +class MethodWriter extends MethodVisitor { + + /** + * Pseudo access flag used to denote constructors. + */ + static final int ACC_CONSTRUCTOR = 0x80000; + + /** + * Frame has exactly the same locals as the previous stack map frame and + * number of stack items is zero. + */ + static final int SAME_FRAME = 0; // to 63 (0-3f) + + /** + * Frame has exactly the same locals as the previous stack map frame and + * number of stack items is 1 + */ + static final int SAME_LOCALS_1_STACK_ITEM_FRAME = 64; // to 127 (40-7f) + + /** + * Reserved for future use + */ + static final int RESERVED = 128; + + /** + * Frame has exactly the same locals as the previous stack map frame and + * number of stack items is 1. Offset is bigger then 63; + */ + static final int SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED = 247; // f7 + + /** + * Frame where current locals are the same as the locals in the previous + * frame, except that the k last locals are absent. The value of k is given + * by the formula 251-frame_type. + */ + static final int CHOP_FRAME = 248; // to 250 (f8-fA) + + /** + * Frame has exactly the same locals as the previous stack map frame and + * number of stack items is zero. Offset is bigger then 63; + */ + static final int SAME_FRAME_EXTENDED = 251; // fb + + /** + * Frame where current locals are the same as the locals in the previous + * frame, except that k additional locals are defined. The value of k is + * given by the formula frame_type-251. + */ + static final int APPEND_FRAME = 252; // to 254 // fc-fe + + /** + * Full frame + */ + static final int FULL_FRAME = 255; // ff + + /** + * Indicates that the stack map frames must be recomputed from scratch. In + * this case the maximum stack size and number of local variables is also + * recomputed from scratch. + * + * @see #compute + */ + private static final int FRAMES = 0; + + /** + * Indicates that the maximum stack size and number of local variables must + * be automatically computed. + * + * @see #compute + */ + private static final int MAXS = 1; + + /** + * Indicates that nothing must be automatically computed. + * + * @see #compute + */ + private static final int NOTHING = 2; + + /** + * The class writer to which this method must be added. + */ + final ClassWriter cw; + + /** + * Access flags of this method. + */ + private int access; + + /** + * The index of the constant pool item that contains the name of this + * method. + */ + private final int name; + + /** + * The index of the constant pool item that contains the descriptor of this + * method. + */ + private final int desc; + + /** + * The descriptor of this method. + */ + private final String descriptor; + + /** + * The signature of this method. + */ + String signature; + + /** + * If not zero, indicates that the code of this method must be copied from + * the ClassReader associated to this writer in cw.cr. More + * precisely, this field gives the index of the first byte to copied from + * cw.cr.b. + */ + int classReaderOffset; + + /** + * If not zero, indicates that the code of this method must be copied from + * the ClassReader associated to this writer in cw.cr. More + * precisely, this field gives the number of bytes to copied from + * cw.cr.b. + */ + int classReaderLength; + + /** + * Number of exceptions that can be thrown by this method. + */ + int exceptionCount; + + /** + * The exceptions that can be thrown by this method. More precisely, this + * array contains the indexes of the constant pool items that contain the + * internal names of these exception classes. + */ + int[] exceptions; + + /** + * The annotation default attribute of this method. May be null. + */ + private ByteVector annd; + + /** + * The runtime visible annotations of this method. May be null. + */ + private AnnotationWriter anns; + + /** + * The runtime invisible annotations of this method. May be null. + */ + private AnnotationWriter ianns; + + /** + * The runtime visible type annotations of this method. May be null + * . + */ + private AnnotationWriter tanns; + + /** + * The runtime invisible type annotations of this method. May be + * null. + */ + private AnnotationWriter itanns; + + /** + * The runtime visible parameter annotations of this method. May be + * null. + */ + private AnnotationWriter[] panns; + + /** + * The runtime invisible parameter annotations of this method. May be + * null. + */ + private AnnotationWriter[] ipanns; + + /** + * The number of synthetic parameters of this method. + */ + private int synthetics; + + /** + * The non standard attributes of the method. + */ + private Attribute attrs; + + /** + * The bytecode of this method. + */ + private ByteVector code = new ByteVector(); + + /** + * Maximum stack size of this method. + */ + private int maxStack; + + /** + * Maximum number of local variables for this method. + */ + private int maxLocals; + + /** + * Number of local variables in the current stack map frame. + */ + private int currentLocals; + + /** + * Number of stack map frames in the StackMapTable attribute. + */ + private int frameCount; + + /** + * The StackMapTable attribute. + */ + private ByteVector stackMap; + + /** + * The offset of the last frame that was written in the StackMapTable + * attribute. + */ + private int previousFrameOffset; + + /** + * The last frame that was written in the StackMapTable attribute. + * + * @see #frame + */ + private int[] previousFrame; + + /** + * The current stack map frame. The first element contains the offset of the + * instruction to which the frame corresponds, the second element is the + * number of locals and the third one is the number of stack elements. The + * local variables start at index 3 and are followed by the operand stack + * values. In summary frame[0] = offset, frame[1] = nLocal, frame[2] = + * nStack, frame[3] = nLocal. All types are encoded as integers, with the + * same format as the one used in {@link Label}, but limited to BASE types. + */ + private int[] frame; + + /** + * Number of elements in the exception handler list. + */ + private int handlerCount; + + /** + * The first element in the exception handler list. + */ + private Handler firstHandler; + + /** + * The last element in the exception handler list. + */ + private Handler lastHandler; + + /** + * Number of entries in the MethodParameters attribute. + */ + private int methodParametersCount; + + /** + * The MethodParameters attribute. + */ + private ByteVector methodParameters; + + /** + * Number of entries in the LocalVariableTable attribute. + */ + private int localVarCount; + + /** + * The LocalVariableTable attribute. + */ + private ByteVector localVar; + + /** + * Number of entries in the LocalVariableTypeTable attribute. + */ + private int localVarTypeCount; + + /** + * The LocalVariableTypeTable attribute. + */ + private ByteVector localVarType; + + /** + * Number of entries in the LineNumberTable attribute. + */ + private int lineNumberCount; + + /** + * The LineNumberTable attribute. + */ + private ByteVector lineNumber; + + /** + * The start offset of the last visited instruction. + */ + private int lastCodeOffset; + + /** + * The runtime visible type annotations of the code. May be null. + */ + private AnnotationWriter ctanns; + + /** + * The runtime invisible type annotations of the code. May be null. + */ + private AnnotationWriter ictanns; + + /** + * The non standard attributes of the method's code. + */ + private Attribute cattrs; + + /** + * Indicates if some jump instructions are too small and need to be resized. + */ + private boolean resize; + + /** + * The number of subroutines in this method. + */ + private int subroutines; + + // ------------------------------------------------------------------------ + + /* + * Fields for the control flow graph analysis algorithm (used to compute the + * maximum stack size). A control flow graph contains one node per "basic + * block", and one edge per "jump" from one basic block to another. Each + * node (i.e., each basic block) is represented by the Label object that + * corresponds to the first instruction of this basic block. Each node also + * stores the list of its successors in the graph, as a linked list of Edge + * objects. + */ + + /** + * Indicates what must be automatically computed. + * + * @see #FRAMES + * @see #MAXS + * @see #NOTHING + */ + private final int compute; + + /** + * A list of labels. This list is the list of basic blocks in the method, + * i.e. a list of Label objects linked to each other by their + * {@link Label#successor} field, in the order they are visited by + * {@link MethodVisitor#visitLabel}, and starting with the first basic + * block. + */ + private Label labels; + + /** + * The previous basic block. + */ + private Label previousBlock; + + /** + * The current basic block. + */ + private Label currentBlock; + + /** + * The (relative) stack size after the last visited instruction. This size + * is relative to the beginning of the current basic block, i.e., the true + * stack size after the last visited instruction is equal to the + * {@link Label#inputStackTop beginStackSize} of the current basic block + * plus stackSize. + */ + private int stackSize; + + /** + * The (relative) maximum stack size after the last visited instruction. + * This size is relative to the beginning of the current basic block, i.e., + * the true maximum stack size after the last visited instruction is equal + * to the {@link Label#inputStackTop beginStackSize} of the current basic + * block plus stackSize. + */ + private int maxStackSize; + + // ------------------------------------------------------------------------ + // Constructor + // ------------------------------------------------------------------------ + + /** + * Constructs a new {@link MethodWriter}. + * + * @param cw + * the class writer in which the method must be added. + * @param access + * the method's access flags (see {@link Opcodes}). + * @param name + * the method's name. + * @param desc + * the method's descriptor (see {@link Type}). + * @param signature + * the method's signature. May be null. + * @param exceptions + * the internal names of the method's exceptions. May be + * null. + * @param computeMaxs + * true if the maximum stack size and number of local + * variables must be automatically computed. + * @param computeFrames + * true if the stack map tables must be recomputed from + * scratch. + */ + MethodWriter(final ClassWriter cw, final int access, final String name, + final String desc, final String signature, + final String[] exceptions, final boolean computeMaxs, + final boolean computeFrames) { + super(Opcodes.ASM5); + if (cw.firstMethod == null) { + cw.firstMethod = this; + } else { + cw.lastMethod.mv = this; + } + cw.lastMethod = this; + this.cw = cw; + this.access = access; + if ("".equals(name)) { + this.access |= ACC_CONSTRUCTOR; + } + this.name = cw.newUTF8(name); + this.desc = cw.newUTF8(desc); + this.descriptor = desc; + if (ClassReader.SIGNATURES) { + this.signature = signature; + } + if (exceptions != null && exceptions.length > 0) { + exceptionCount = exceptions.length; + this.exceptions = new int[exceptionCount]; + for (int i = 0; i < exceptionCount; ++i) { + this.exceptions[i] = cw.newClass(exceptions[i]); + } + } + this.compute = computeFrames ? FRAMES : (computeMaxs ? MAXS : NOTHING); + if (computeMaxs || computeFrames) { + // updates maxLocals + int size = Type.getArgumentsAndReturnSizes(descriptor) >> 2; + if ((access & Opcodes.ACC_STATIC) != 0) { + --size; + } + maxLocals = size; + currentLocals = size; + // creates and visits the label for the first basic block + labels = new Label(); + labels.status |= Label.PUSHED; + visitLabel(labels); + } + } + + // ------------------------------------------------------------------------ + // Implementation of the MethodVisitor abstract class + // ------------------------------------------------------------------------ + + @Override + public void visitParameter(String name, int access) { + if (methodParameters == null) { + methodParameters = new ByteVector(); + } + ++methodParametersCount; + methodParameters.putShort((name == null) ? 0 : cw.newUTF8(name)) + .putShort(access); + } + + @Override + public AnnotationVisitor visitAnnotationDefault() { + if (!ClassReader.ANNOTATIONS) { + return null; + } + annd = new ByteVector(); + return new AnnotationWriter(cw, false, annd, null, 0); + } + + @Override + public AnnotationVisitor visitAnnotation(final String desc, + final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2); + if (visible) { + aw.next = anns; + anns = aw; + } else { + aw.next = ianns; + ianns = aw; + } + return aw; + } + + @Override + public AnnotationVisitor visitTypeAnnotation(final int typeRef, + final TypePath typePath, final String desc, final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + AnnotationWriter.putTarget(typeRef, typePath, bv); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = tanns; + tanns = aw; + } else { + aw.next = itanns; + itanns = aw; + } + return aw; + } + + @Override + public AnnotationVisitor visitParameterAnnotation(final int parameter, + final String desc, final boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + if ("Ljava/lang/Synthetic;".equals(desc)) { + // workaround for a bug in javac with synthetic parameters + // see ClassReader.readParameterAnnotations + synthetics = Math.max(synthetics, parameter + 1); + return new AnnotationWriter(cw, false, bv, null, 0); + } + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, 2); + if (visible) { + if (panns == null) { + panns = new AnnotationWriter[Type.getArgumentTypes(descriptor).length]; + } + aw.next = panns[parameter]; + panns[parameter] = aw; + } else { + if (ipanns == null) { + ipanns = new AnnotationWriter[Type.getArgumentTypes(descriptor).length]; + } + aw.next = ipanns[parameter]; + ipanns[parameter] = aw; + } + return aw; + } + + @Override + public void visitAttribute(final Attribute attr) { + if (attr.isCodeAttribute()) { + attr.next = cattrs; + cattrs = attr; + } else { + attr.next = attrs; + attrs = attr; + } + } + + @Override + public void visitCode() { + } + + @Override + public void visitFrame(final int type, final int nLocal, + final Object[] local, final int nStack, final Object[] stack) { + if (!ClassReader.FRAMES || compute == FRAMES) { + return; + } + + if (type == Opcodes.F_NEW) { + if (previousFrame == null) { + visitImplicitFirstFrame(); + } + currentLocals = nLocal; + int frameIndex = startFrame(code.length, nLocal, nStack); + for (int i = 0; i < nLocal; ++i) { + if (local[i] instanceof String) { + frame[frameIndex++] = Frame.OBJECT + | cw.addType((String) local[i]); + } else if (local[i] instanceof Integer) { + frame[frameIndex++] = ((Integer) local[i]).intValue(); + } else { + frame[frameIndex++] = Frame.UNINITIALIZED + | cw.addUninitializedType("", + ((Label) local[i]).position); + } + } + for (int i = 0; i < nStack; ++i) { + if (stack[i] instanceof String) { + frame[frameIndex++] = Frame.OBJECT + | cw.addType((String) stack[i]); + } else if (stack[i] instanceof Integer) { + frame[frameIndex++] = ((Integer) stack[i]).intValue(); + } else { + frame[frameIndex++] = Frame.UNINITIALIZED + | cw.addUninitializedType("", + ((Label) stack[i]).position); + } + } + endFrame(); + } else { + int delta; + if (stackMap == null) { + stackMap = new ByteVector(); + delta = code.length; + } else { + delta = code.length - previousFrameOffset - 1; + if (delta < 0) { + if (type == Opcodes.F_SAME) { + return; + } else { + throw new IllegalStateException(); + } + } + } + + switch (type) { + case Opcodes.F_FULL: + currentLocals = nLocal; + stackMap.putByte(FULL_FRAME).putShort(delta).putShort(nLocal); + for (int i = 0; i < nLocal; ++i) { + writeFrameType(local[i]); + } + stackMap.putShort(nStack); + for (int i = 0; i < nStack; ++i) { + writeFrameType(stack[i]); + } + break; + case Opcodes.F_APPEND: + currentLocals += nLocal; + stackMap.putByte(SAME_FRAME_EXTENDED + nLocal).putShort(delta); + for (int i = 0; i < nLocal; ++i) { + writeFrameType(local[i]); + } + break; + case Opcodes.F_CHOP: + currentLocals -= nLocal; + stackMap.putByte(SAME_FRAME_EXTENDED - nLocal).putShort(delta); + break; + case Opcodes.F_SAME: + if (delta < 64) { + stackMap.putByte(delta); + } else { + stackMap.putByte(SAME_FRAME_EXTENDED).putShort(delta); + } + break; + case Opcodes.F_SAME1: + if (delta < 64) { + stackMap.putByte(SAME_LOCALS_1_STACK_ITEM_FRAME + delta); + } else { + stackMap.putByte(SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED) + .putShort(delta); + } + writeFrameType(stack[0]); + break; + } + + previousFrameOffset = code.length; + ++frameCount; + } + + maxStack = Math.max(maxStack, nStack); + maxLocals = Math.max(maxLocals, currentLocals); + } + + @Override + public void visitInsn(final int opcode) { + lastCodeOffset = code.length; + // adds the instruction to the bytecode of the method + code.putByte(opcode); + // update currentBlock + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, 0, null, null); + } else { + // updates current and max stack sizes + int size = stackSize + Frame.SIZE[opcode]; + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + // if opcode == ATHROW or xRETURN, ends current block (no successor) + if ((opcode >= Opcodes.IRETURN && opcode <= Opcodes.RETURN) + || opcode == Opcodes.ATHROW) { + noSuccessor(); + } + } + } + + @Override + public void visitIntInsn(final int opcode, final int operand) { + lastCodeOffset = code.length; + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, operand, null, null); + } else if (opcode != Opcodes.NEWARRAY) { + // updates current and max stack sizes only for NEWARRAY + // (stack size variation = 0 for BIPUSH or SIPUSH) + int size = stackSize + 1; + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + if (opcode == Opcodes.SIPUSH) { + code.put12(opcode, operand); + } else { // BIPUSH or NEWARRAY + code.put11(opcode, operand); + } + } + + @Override + public void visitVarInsn(final int opcode, final int var) { + lastCodeOffset = code.length; + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, var, null, null); + } else { + // updates current and max stack sizes + if (opcode == Opcodes.RET) { + // no stack change, but end of current block (no successor) + currentBlock.status |= Label.RET; + // save 'stackSize' here for future use + // (see {@link #findSubroutineSuccessors}) + currentBlock.inputStackTop = stackSize; + noSuccessor(); + } else { // xLOAD or xSTORE + int size = stackSize + Frame.SIZE[opcode]; + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + } + if (compute != NOTHING) { + // updates max locals + int n; + if (opcode == Opcodes.LLOAD || opcode == Opcodes.DLOAD + || opcode == Opcodes.LSTORE || opcode == Opcodes.DSTORE) { + n = var + 2; + } else { + n = var + 1; + } + if (n > maxLocals) { + maxLocals = n; + } + } + // adds the instruction to the bytecode of the method + if (var < 4 && opcode != Opcodes.RET) { + int opt; + if (opcode < Opcodes.ISTORE) { + /* ILOAD_0 */ + opt = 26 + ((opcode - Opcodes.ILOAD) << 2) + var; + } else { + /* ISTORE_0 */ + opt = 59 + ((opcode - Opcodes.ISTORE) << 2) + var; + } + code.putByte(opt); + } else if (var >= 256) { + code.putByte(196 /* WIDE */).put12(opcode, var); + } else { + code.put11(opcode, var); + } + if (opcode >= Opcodes.ISTORE && compute == FRAMES && handlerCount > 0) { + visitLabel(new Label()); + } + } + + @Override + public void visitTypeInsn(final int opcode, final String type) { + lastCodeOffset = code.length; + Item i = cw.newClassItem(type); + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, code.length, cw, i); + } else if (opcode == Opcodes.NEW) { + // updates current and max stack sizes only if opcode == NEW + // (no stack change for ANEWARRAY, CHECKCAST, INSTANCEOF) + int size = stackSize + 1; + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + code.put12(opcode, i.index); + } + + @Override + public void visitFieldInsn(final int opcode, final String owner, + final String name, final String desc) { + lastCodeOffset = code.length; + Item i = cw.newFieldItem(owner, name, desc); + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, 0, cw, i); + } else { + int size; + // computes the stack size variation + char c = desc.charAt(0); + switch (opcode) { + case Opcodes.GETSTATIC: + size = stackSize + (c == 'D' || c == 'J' ? 2 : 1); + break; + case Opcodes.PUTSTATIC: + size = stackSize + (c == 'D' || c == 'J' ? -2 : -1); + break; + case Opcodes.GETFIELD: + size = stackSize + (c == 'D' || c == 'J' ? 1 : 0); + break; + // case Constants.PUTFIELD: + default: + size = stackSize + (c == 'D' || c == 'J' ? -3 : -2); + break; + } + // updates current and max stack sizes + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + code.put12(opcode, i.index); + } + + @Override + public void visitMethodInsn(final int opcode, final String owner, + final String name, final String desc, final boolean itf) { + lastCodeOffset = code.length; + Item i = cw.newMethodItem(owner, name, desc, itf); + int argSize = i.intVal; + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, 0, cw, i); + } else { + /* + * computes the stack size variation. In order not to recompute + * several times this variation for the same Item, we use the + * intVal field of this item to store this variation, once it + * has been computed. More precisely this intVal field stores + * the sizes of the arguments and of the return value + * corresponding to desc. + */ + if (argSize == 0) { + // the above sizes have not been computed yet, + // so we compute them... + argSize = Type.getArgumentsAndReturnSizes(desc); + // ... and we save them in order + // not to recompute them in the future + i.intVal = argSize; + } + int size; + if (opcode == Opcodes.INVOKESTATIC) { + size = stackSize - (argSize >> 2) + (argSize & 0x03) + 1; + } else { + size = stackSize - (argSize >> 2) + (argSize & 0x03); + } + // updates current and max stack sizes + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + if (opcode == Opcodes.INVOKEINTERFACE) { + if (argSize == 0) { + argSize = Type.getArgumentsAndReturnSizes(desc); + i.intVal = argSize; + } + code.put12(Opcodes.INVOKEINTERFACE, i.index).put11(argSize >> 2, 0); + } else { + code.put12(opcode, i.index); + } + } + + @Override + public void visitInvokeDynamicInsn(final String name, final String desc, + final Handle bsm, final Object... bsmArgs) { + lastCodeOffset = code.length; + Item i = cw.newInvokeDynamicItem(name, desc, bsm, bsmArgs); + int argSize = i.intVal; + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(Opcodes.INVOKEDYNAMIC, 0, cw, i); + } else { + /* + * computes the stack size variation. In order not to recompute + * several times this variation for the same Item, we use the + * intVal field of this item to store this variation, once it + * has been computed. More precisely this intVal field stores + * the sizes of the arguments and of the return value + * corresponding to desc. + */ + if (argSize == 0) { + // the above sizes have not been computed yet, + // so we compute them... + argSize = Type.getArgumentsAndReturnSizes(desc); + // ... and we save them in order + // not to recompute them in the future + i.intVal = argSize; + } + int size = stackSize - (argSize >> 2) + (argSize & 0x03) + 1; + + // updates current and max stack sizes + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + code.put12(Opcodes.INVOKEDYNAMIC, i.index); + code.putShort(0); + } + + @Override + public void visitJumpInsn(final int opcode, final Label label) { + lastCodeOffset = code.length; + Label nextInsn = null; + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(opcode, 0, null, null); + // 'label' is the target of a jump instruction + label.getFirst().status |= Label.TARGET; + // adds 'label' as a successor of this basic block + addSuccessor(Edge.NORMAL, label); + if (opcode != Opcodes.GOTO) { + // creates a Label for the next basic block + nextInsn = new Label(); + } + } else { + if (opcode == Opcodes.JSR) { + if ((label.status & Label.SUBROUTINE) == 0) { + label.status |= Label.SUBROUTINE; + ++subroutines; + } + currentBlock.status |= Label.JSR; + addSuccessor(stackSize + 1, label); + // creates a Label for the next basic block + nextInsn = new Label(); + /* + * note that, by construction in this method, a JSR block + * has at least two successors in the control flow graph: + * the first one leads the next instruction after the JSR, + * while the second one leads to the JSR target. + */ + } else { + // updates current stack size (max stack size unchanged + // because stack size variation always negative in this + // case) + stackSize += Frame.SIZE[opcode]; + addSuccessor(stackSize, label); + } + } + } + // adds the instruction to the bytecode of the method + if ((label.status & Label.RESOLVED) != 0 + && label.position - code.length < Short.MIN_VALUE) { + /* + * case of a backward jump with an offset < -32768. In this case we + * automatically replace GOTO with GOTO_W, JSR with JSR_W and IFxxx + * with IFNOTxxx GOTO_W , where IFNOTxxx is the + * "opposite" opcode of IFxxx (i.e., IFNE for IFEQ) and where + * designates the instruction just after the GOTO_W. + */ + if (opcode == Opcodes.GOTO) { + code.putByte(200); // GOTO_W + } else if (opcode == Opcodes.JSR) { + code.putByte(201); // JSR_W + } else { + // if the IF instruction is transformed into IFNOT GOTO_W the + // next instruction becomes the target of the IFNOT instruction + if (nextInsn != null) { + nextInsn.status |= Label.TARGET; + } + code.putByte(opcode <= 166 ? ((opcode + 1) ^ 1) - 1 + : opcode ^ 1); + code.putShort(8); // jump offset + code.putByte(200); // GOTO_W + } + label.put(this, code, code.length - 1, true); + } else { + /* + * case of a backward jump with an offset >= -32768, or of a forward + * jump with, of course, an unknown offset. In these cases we store + * the offset in 2 bytes (which will be increased in + * resizeInstructions, if needed). + */ + code.putByte(opcode); + label.put(this, code, code.length - 1, false); + } + if (currentBlock != null) { + if (nextInsn != null) { + // if the jump instruction is not a GOTO, the next instruction + // is also a successor of this instruction. Calling visitLabel + // adds the label of this next instruction as a successor of the + // current block, and starts a new basic block + visitLabel(nextInsn); + } + if (opcode == Opcodes.GOTO) { + noSuccessor(); + } + } + } + + @Override + public void visitLabel(final Label label) { + // resolves previous forward references to label, if any + resize |= label.resolve(this, code.length, code.data); + // updates currentBlock + if ((label.status & Label.DEBUG) != 0) { + return; + } + if (compute == FRAMES) { + if (currentBlock != null) { + if (label.position == currentBlock.position) { + // successive labels, do not start a new basic block + currentBlock.status |= (label.status & Label.TARGET); + label.frame = currentBlock.frame; + return; + } + // ends current block (with one new successor) + addSuccessor(Edge.NORMAL, label); + } + // begins a new current block + currentBlock = label; + if (label.frame == null) { + label.frame = new Frame(); + label.frame.owner = label; + } + // updates the basic block list + if (previousBlock != null) { + if (label.position == previousBlock.position) { + previousBlock.status |= (label.status & Label.TARGET); + label.frame = previousBlock.frame; + currentBlock = previousBlock; + return; + } + previousBlock.successor = label; + } + previousBlock = label; + } else if (compute == MAXS) { + if (currentBlock != null) { + // ends current block (with one new successor) + currentBlock.outputStackMax = maxStackSize; + addSuccessor(stackSize, label); + } + // begins a new current block + currentBlock = label; + // resets the relative current and max stack sizes + stackSize = 0; + maxStackSize = 0; + // updates the basic block list + if (previousBlock != null) { + previousBlock.successor = label; + } + previousBlock = label; + } + } + + @Override + public void visitLdcInsn(final Object cst) { + lastCodeOffset = code.length; + Item i = cw.newConstItem(cst); + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(Opcodes.LDC, 0, cw, i); + } else { + int size; + // computes the stack size variation + if (i.type == ClassWriter.LONG || i.type == ClassWriter.DOUBLE) { + size = stackSize + 2; + } else { + size = stackSize + 1; + } + // updates current and max stack sizes + if (size > maxStackSize) { + maxStackSize = size; + } + stackSize = size; + } + } + // adds the instruction to the bytecode of the method + int index = i.index; + if (i.type == ClassWriter.LONG || i.type == ClassWriter.DOUBLE) { + code.put12(20 /* LDC2_W */, index); + } else if (index >= 256) { + code.put12(19 /* LDC_W */, index); + } else { + code.put11(Opcodes.LDC, index); + } + } + + @Override + public void visitIincInsn(final int var, final int increment) { + lastCodeOffset = code.length; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(Opcodes.IINC, var, null, null); + } + } + if (compute != NOTHING) { + // updates max locals + int n = var + 1; + if (n > maxLocals) { + maxLocals = n; + } + } + // adds the instruction to the bytecode of the method + if ((var > 255) || (increment > 127) || (increment < -128)) { + code.putByte(196 /* WIDE */).put12(Opcodes.IINC, var) + .putShort(increment); + } else { + code.putByte(Opcodes.IINC).put11(var, increment); + } + } + + @Override + public void visitTableSwitchInsn(final int min, final int max, + final Label dflt, final Label... labels) { + lastCodeOffset = code.length; + // adds the instruction to the bytecode of the method + int source = code.length; + code.putByte(Opcodes.TABLESWITCH); + code.putByteArray(null, 0, (4 - code.length % 4) % 4); + dflt.put(this, code, source, true); + code.putInt(min).putInt(max); + for (int i = 0; i < labels.length; ++i) { + labels[i].put(this, code, source, true); + } + // updates currentBlock + visitSwitchInsn(dflt, labels); + } + + @Override + public void visitLookupSwitchInsn(final Label dflt, final int[] keys, + final Label[] labels) { + lastCodeOffset = code.length; + // adds the instruction to the bytecode of the method + int source = code.length; + code.putByte(Opcodes.LOOKUPSWITCH); + code.putByteArray(null, 0, (4 - code.length % 4) % 4); + dflt.put(this, code, source, true); + code.putInt(labels.length); + for (int i = 0; i < labels.length; ++i) { + code.putInt(keys[i]); + labels[i].put(this, code, source, true); + } + // updates currentBlock + visitSwitchInsn(dflt, labels); + } + + private void visitSwitchInsn(final Label dflt, final Label[] labels) { + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(Opcodes.LOOKUPSWITCH, 0, null, null); + // adds current block successors + addSuccessor(Edge.NORMAL, dflt); + dflt.getFirst().status |= Label.TARGET; + for (int i = 0; i < labels.length; ++i) { + addSuccessor(Edge.NORMAL, labels[i]); + labels[i].getFirst().status |= Label.TARGET; + } + } else { + // updates current stack size (max stack size unchanged) + --stackSize; + // adds current block successors + addSuccessor(stackSize, dflt); + for (int i = 0; i < labels.length; ++i) { + addSuccessor(stackSize, labels[i]); + } + } + // ends current block + noSuccessor(); + } + } + + @Override + public void visitMultiANewArrayInsn(final String desc, final int dims) { + lastCodeOffset = code.length; + Item i = cw.newClassItem(desc); + // Label currentBlock = this.currentBlock; + if (currentBlock != null) { + if (compute == FRAMES) { + currentBlock.frame.execute(Opcodes.MULTIANEWARRAY, dims, cw, i); + } else { + // updates current stack size (max stack size unchanged because + // stack size variation always negative or null) + stackSize += 1 - dims; + } + } + // adds the instruction to the bytecode of the method + code.put12(Opcodes.MULTIANEWARRAY, i.index).putByte(dims); + } + + @Override + public AnnotationVisitor visitInsnAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + typeRef = (typeRef & 0xFF0000FF) | (lastCodeOffset << 8); + AnnotationWriter.putTarget(typeRef, typePath, bv); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = ctanns; + ctanns = aw; + } else { + aw.next = ictanns; + ictanns = aw; + } + return aw; + } + + @Override + public void visitTryCatchBlock(final Label start, final Label end, + final Label handler, final String type) { + ++handlerCount; + Handler h = new Handler(); + h.start = start; + h.end = end; + h.handler = handler; + h.desc = type; + h.type = type != null ? cw.newClass(type) : 0; + if (lastHandler == null) { + firstHandler = h; + } else { + lastHandler.next = h; + } + lastHandler = h; + } + + @Override + public AnnotationVisitor visitTryCatchAnnotation(int typeRef, + TypePath typePath, String desc, boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + AnnotationWriter.putTarget(typeRef, typePath, bv); + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = ctanns; + ctanns = aw; + } else { + aw.next = ictanns; + ictanns = aw; + } + return aw; + } + + @Override + public void visitLocalVariable(final String name, final String desc, + final String signature, final Label start, final Label end, + final int index) { + if (signature != null) { + if (localVarType == null) { + localVarType = new ByteVector(); + } + ++localVarTypeCount; + localVarType.putShort(start.position) + .putShort(end.position - start.position) + .putShort(cw.newUTF8(name)).putShort(cw.newUTF8(signature)) + .putShort(index); + } + if (localVar == null) { + localVar = new ByteVector(); + } + ++localVarCount; + localVar.putShort(start.position) + .putShort(end.position - start.position) + .putShort(cw.newUTF8(name)).putShort(cw.newUTF8(desc)) + .putShort(index); + if (compute != NOTHING) { + // updates max locals + char c = desc.charAt(0); + int n = index + (c == 'J' || c == 'D' ? 2 : 1); + if (n > maxLocals) { + maxLocals = n; + } + } + } + + @Override + public AnnotationVisitor visitLocalVariableAnnotation(int typeRef, + TypePath typePath, Label[] start, Label[] end, int[] index, + String desc, boolean visible) { + if (!ClassReader.ANNOTATIONS) { + return null; + } + ByteVector bv = new ByteVector(); + // write target_type and target_info + bv.putByte(typeRef >>> 24).putShort(start.length); + for (int i = 0; i < start.length; ++i) { + bv.putShort(start[i].position) + .putShort(end[i].position - start[i].position) + .putShort(index[i]); + } + if (typePath == null) { + bv.putByte(0); + } else { + int length = typePath.b[typePath.offset] * 2 + 1; + bv.putByteArray(typePath.b, typePath.offset, length); + } + // write type, and reserve space for values count + bv.putShort(cw.newUTF8(desc)).putShort(0); + AnnotationWriter aw = new AnnotationWriter(cw, true, bv, bv, + bv.length - 2); + if (visible) { + aw.next = ctanns; + ctanns = aw; + } else { + aw.next = ictanns; + ictanns = aw; + } + return aw; + } + + @Override + public void visitLineNumber(final int line, final Label start) { + if (lineNumber == null) { + lineNumber = new ByteVector(); + } + ++lineNumberCount; + lineNumber.putShort(start.position); + lineNumber.putShort(line); + } + + @Override + public void visitMaxs(final int maxStack, final int maxLocals) { + if (resize) { + // replaces the temporary jump opcodes introduced by Label.resolve. + if (ClassReader.RESIZE) { + resizeInstructions(); + } else { + throw new RuntimeException("Method code too large!"); + } + } + if (ClassReader.FRAMES && compute == FRAMES) { + // completes the control flow graph with exception handler blocks + Handler handler = firstHandler; + while (handler != null) { + Label l = handler.start.getFirst(); + Label h = handler.handler.getFirst(); + Label e = handler.end.getFirst(); + // computes the kind of the edges to 'h' + String t = handler.desc == null ? "java/lang/Throwable" + : handler.desc; + int kind = Frame.OBJECT | cw.addType(t); + // h is an exception handler + h.status |= Label.TARGET; + // adds 'h' as a successor of labels between 'start' and 'end' + while (l != e) { + // creates an edge to 'h' + Edge b = new Edge(); + b.info = kind; + b.successor = h; + // adds it to the successors of 'l' + b.next = l.successors; + l.successors = b; + // goes to the next label + l = l.successor; + } + handler = handler.next; + } + + // creates and visits the first (implicit) frame + Frame f = labels.frame; + Type[] args = Type.getArgumentTypes(descriptor); + f.initInputFrame(cw, access, args, this.maxLocals); + visitFrame(f); + + /* + * fix point algorithm: mark the first basic block as 'changed' + * (i.e. put it in the 'changed' list) and, while there are changed + * basic blocks, choose one, mark it as unchanged, and update its + * successors (which can be changed in the process). + */ + int max = 0; + Label changed = labels; + while (changed != null) { + // removes a basic block from the list of changed basic blocks + Label l = changed; + changed = changed.next; + l.next = null; + f = l.frame; + // a reachable jump target must be stored in the stack map + if ((l.status & Label.TARGET) != 0) { + l.status |= Label.STORE; + } + // all visited labels are reachable, by definition + l.status |= Label.REACHABLE; + // updates the (absolute) maximum stack size + int blockMax = f.inputStack.length + l.outputStackMax; + if (blockMax > max) { + max = blockMax; + } + // updates the successors of the current basic block + Edge e = l.successors; + while (e != null) { + Label n = e.successor.getFirst(); + boolean change = f.merge(cw, n.frame, e.info); + if (change && n.next == null) { + // if n has changed and is not already in the 'changed' + // list, adds it to this list + n.next = changed; + changed = n; + } + e = e.next; + } + } + + // visits all the frames that must be stored in the stack map + Label l = labels; + while (l != null) { + f = l.frame; + if ((l.status & Label.STORE) != 0) { + visitFrame(f); + } + if ((l.status & Label.REACHABLE) == 0) { + // finds start and end of dead basic block + Label k = l.successor; + int start = l.position; + int end = (k == null ? code.length : k.position) - 1; + // if non empty basic block + if (end >= start) { + max = Math.max(max, 1); + // replaces instructions with NOP ... NOP ATHROW + for (int i = start; i < end; ++i) { + code.data[i] = Opcodes.NOP; + } + code.data[end] = (byte) Opcodes.ATHROW; + // emits a frame for this unreachable block + int frameIndex = startFrame(start, 0, 1); + frame[frameIndex] = Frame.OBJECT + | cw.addType("java/lang/Throwable"); + endFrame(); + // removes the start-end range from the exception + // handlers + firstHandler = Handler.remove(firstHandler, l, k); + } + } + l = l.successor; + } + + handler = firstHandler; + handlerCount = 0; + while (handler != null) { + handlerCount += 1; + handler = handler.next; + } + + this.maxStack = max; + } else if (compute == MAXS) { + // completes the control flow graph with exception handler blocks + Handler handler = firstHandler; + while (handler != null) { + Label l = handler.start; + Label h = handler.handler; + Label e = handler.end; + // adds 'h' as a successor of labels between 'start' and 'end' + while (l != e) { + // creates an edge to 'h' + Edge b = new Edge(); + b.info = Edge.EXCEPTION; + b.successor = h; + // adds it to the successors of 'l' + if ((l.status & Label.JSR) == 0) { + b.next = l.successors; + l.successors = b; + } else { + // if l is a JSR block, adds b after the first two edges + // to preserve the hypothesis about JSR block successors + // order (see {@link #visitJumpInsn}) + b.next = l.successors.next.next; + l.successors.next.next = b; + } + // goes to the next label + l = l.successor; + } + handler = handler.next; + } + + if (subroutines > 0) { + // completes the control flow graph with the RET successors + /* + * first step: finds the subroutines. This step determines, for + * each basic block, to which subroutine(s) it belongs. + */ + // finds the basic blocks that belong to the "main" subroutine + int id = 0; + labels.visitSubroutine(null, 1, subroutines); + // finds the basic blocks that belong to the real subroutines + Label l = labels; + while (l != null) { + if ((l.status & Label.JSR) != 0) { + // the subroutine is defined by l's TARGET, not by l + Label subroutine = l.successors.next.successor; + // if this subroutine has not been visited yet... + if ((subroutine.status & Label.VISITED) == 0) { + // ...assigns it a new id and finds its basic blocks + id += 1; + subroutine.visitSubroutine(null, (id / 32L) << 32 + | (1L << (id % 32)), subroutines); + } + } + l = l.successor; + } + // second step: finds the successors of RET blocks + l = labels; + while (l != null) { + if ((l.status & Label.JSR) != 0) { + Label L = labels; + while (L != null) { + L.status &= ~Label.VISITED2; + L = L.successor; + } + // the subroutine is defined by l's TARGET, not by l + Label subroutine = l.successors.next.successor; + subroutine.visitSubroutine(l, 0, subroutines); + } + l = l.successor; + } + } + + /* + * control flow analysis algorithm: while the block stack is not + * empty, pop a block from this stack, update the max stack size, + * compute the true (non relative) begin stack size of the + * successors of this block, and push these successors onto the + * stack (unless they have already been pushed onto the stack). + * Note: by hypothesis, the {@link Label#inputStackTop} of the + * blocks in the block stack are the true (non relative) beginning + * stack sizes of these blocks. + */ + int max = 0; + Label stack = labels; + while (stack != null) { + // pops a block from the stack + Label l = stack; + stack = stack.next; + // computes the true (non relative) max stack size of this block + int start = l.inputStackTop; + int blockMax = start + l.outputStackMax; + // updates the global max stack size + if (blockMax > max) { + max = blockMax; + } + // analyzes the successors of the block + Edge b = l.successors; + if ((l.status & Label.JSR) != 0) { + // ignores the first edge of JSR blocks (virtual successor) + b = b.next; + } + while (b != null) { + l = b.successor; + // if this successor has not already been pushed... + if ((l.status & Label.PUSHED) == 0) { + // computes its true beginning stack size... + l.inputStackTop = b.info == Edge.EXCEPTION ? 1 : start + + b.info; + // ...and pushes it onto the stack + l.status |= Label.PUSHED; + l.next = stack; + stack = l; + } + b = b.next; + } + } + this.maxStack = Math.max(maxStack, max); + } else { + this.maxStack = maxStack; + this.maxLocals = maxLocals; + } + } + + @Override + public void visitEnd() { + } + + // ------------------------------------------------------------------------ + // Utility methods: control flow analysis algorithm + // ------------------------------------------------------------------------ + + /** + * Adds a successor to the {@link #currentBlock currentBlock} block. + * + * @param info + * information about the control flow edge to be added. + * @param successor + * the successor block to be added to the current block. + */ + private void addSuccessor(final int info, final Label successor) { + // creates and initializes an Edge object... + Edge b = new Edge(); + b.info = info; + b.successor = successor; + // ...and adds it to the successor list of the currentBlock block + b.next = currentBlock.successors; + currentBlock.successors = b; + } + + /** + * Ends the current basic block. This method must be used in the case where + * the current basic block does not have any successor. + */ + private void noSuccessor() { + if (compute == FRAMES) { + Label l = new Label(); + l.frame = new Frame(); + l.frame.owner = l; + l.resolve(this, code.length, code.data); + previousBlock.successor = l; + previousBlock = l; + } else { + currentBlock.outputStackMax = maxStackSize; + } + currentBlock = null; + } + + // ------------------------------------------------------------------------ + // Utility methods: stack map frames + // ------------------------------------------------------------------------ + + /** + * Visits a frame that has been computed from scratch. + * + * @param f + * the frame that must be visited. + */ + private void visitFrame(final Frame f) { + int i, t; + int nTop = 0; + int nLocal = 0; + int nStack = 0; + int[] locals = f.inputLocals; + int[] stacks = f.inputStack; + // computes the number of locals (ignores TOP types that are just after + // a LONG or a DOUBLE, and all trailing TOP types) + for (i = 0; i < locals.length; ++i) { + t = locals[i]; + if (t == Frame.TOP) { + ++nTop; + } else { + nLocal += nTop + 1; + nTop = 0; + } + if (t == Frame.LONG || t == Frame.DOUBLE) { + ++i; + } + } + // computes the stack size (ignores TOP types that are just after + // a LONG or a DOUBLE) + for (i = 0; i < stacks.length; ++i) { + t = stacks[i]; + ++nStack; + if (t == Frame.LONG || t == Frame.DOUBLE) { + ++i; + } + } + // visits the frame and its content + int frameIndex = startFrame(f.owner.position, nLocal, nStack); + for (i = 0; nLocal > 0; ++i, --nLocal) { + t = locals[i]; + frame[frameIndex++] = t; + if (t == Frame.LONG || t == Frame.DOUBLE) { + ++i; + } + } + for (i = 0; i < stacks.length; ++i) { + t = stacks[i]; + frame[frameIndex++] = t; + if (t == Frame.LONG || t == Frame.DOUBLE) { + ++i; + } + } + endFrame(); + } + + /** + * Visit the implicit first frame of this method. + */ + private void visitImplicitFirstFrame() { + // There can be at most descriptor.length() + 1 locals + int frameIndex = startFrame(0, descriptor.length() + 1, 0); + if ((access & Opcodes.ACC_STATIC) == 0) { + if ((access & ACC_CONSTRUCTOR) == 0) { + frame[frameIndex++] = Frame.OBJECT | cw.addType(cw.thisName); + } else { + frame[frameIndex++] = 6; // Opcodes.UNINITIALIZED_THIS; + } + } + int i = 1; + loop: while (true) { + int j = i; + switch (descriptor.charAt(i++)) { + case 'Z': + case 'C': + case 'B': + case 'S': + case 'I': + frame[frameIndex++] = 1; // Opcodes.INTEGER; + break; + case 'F': + frame[frameIndex++] = 2; // Opcodes.FLOAT; + break; + case 'J': + frame[frameIndex++] = 4; // Opcodes.LONG; + break; + case 'D': + frame[frameIndex++] = 3; // Opcodes.DOUBLE; + break; + case '[': + while (descriptor.charAt(i) == '[') { + ++i; + } + if (descriptor.charAt(i) == 'L') { + ++i; + while (descriptor.charAt(i) != ';') { + ++i; + } + } + frame[frameIndex++] = Frame.OBJECT + | cw.addType(descriptor.substring(j, ++i)); + break; + case 'L': + while (descriptor.charAt(i) != ';') { + ++i; + } + frame[frameIndex++] = Frame.OBJECT + | cw.addType(descriptor.substring(j + 1, i++)); + break; + default: + break loop; + } + } + frame[1] = frameIndex - 3; + endFrame(); + } + + /** + * Starts the visit of a stack map frame. + * + * @param offset + * the offset of the instruction to which the frame corresponds. + * @param nLocal + * the number of local variables in the frame. + * @param nStack + * the number of stack elements in the frame. + * @return the index of the next element to be written in this frame. + */ + private int startFrame(final int offset, final int nLocal, final int nStack) { + int n = 3 + nLocal + nStack; + if (frame == null || frame.length < n) { + frame = new int[n]; + } + frame[0] = offset; + frame[1] = nLocal; + frame[2] = nStack; + return 3; + } + + /** + * Checks if the visit of the current frame {@link #frame} is finished, and + * if yes, write it in the StackMapTable attribute. + */ + private void endFrame() { + if (previousFrame != null) { // do not write the first frame + if (stackMap == null) { + stackMap = new ByteVector(); + } + writeFrame(); + ++frameCount; + } + previousFrame = frame; + frame = null; + } + + /** + * Compress and writes the current frame {@link #frame} in the StackMapTable + * attribute. + */ + private void writeFrame() { + int clocalsSize = frame[1]; + int cstackSize = frame[2]; + if ((cw.version & 0xFFFF) < Opcodes.V1_6) { + stackMap.putShort(frame[0]).putShort(clocalsSize); + writeFrameTypes(3, 3 + clocalsSize); + stackMap.putShort(cstackSize); + writeFrameTypes(3 + clocalsSize, 3 + clocalsSize + cstackSize); + return; + } + int localsSize = previousFrame[1]; + int type = FULL_FRAME; + int k = 0; + int delta; + if (frameCount == 0) { + delta = frame[0]; + } else { + delta = frame[0] - previousFrame[0] - 1; + } + if (cstackSize == 0) { + k = clocalsSize - localsSize; + switch (k) { + case -3: + case -2: + case -1: + type = CHOP_FRAME; + localsSize = clocalsSize; + break; + case 0: + type = delta < 64 ? SAME_FRAME : SAME_FRAME_EXTENDED; + break; + case 1: + case 2: + case 3: + type = APPEND_FRAME; + break; + } + } else if (clocalsSize == localsSize && cstackSize == 1) { + type = delta < 63 ? SAME_LOCALS_1_STACK_ITEM_FRAME + : SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED; + } + if (type != FULL_FRAME) { + // verify if locals are the same + int l = 3; + for (int j = 0; j < localsSize; j++) { + if (frame[l] != previousFrame[l]) { + type = FULL_FRAME; + break; + } + l++; + } + } + switch (type) { + case SAME_FRAME: + stackMap.putByte(delta); + break; + case SAME_LOCALS_1_STACK_ITEM_FRAME: + stackMap.putByte(SAME_LOCALS_1_STACK_ITEM_FRAME + delta); + writeFrameTypes(3 + clocalsSize, 4 + clocalsSize); + break; + case SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED: + stackMap.putByte(SAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED).putShort( + delta); + writeFrameTypes(3 + clocalsSize, 4 + clocalsSize); + break; + case SAME_FRAME_EXTENDED: + stackMap.putByte(SAME_FRAME_EXTENDED).putShort(delta); + break; + case CHOP_FRAME: + stackMap.putByte(SAME_FRAME_EXTENDED + k).putShort(delta); + break; + case APPEND_FRAME: + stackMap.putByte(SAME_FRAME_EXTENDED + k).putShort(delta); + writeFrameTypes(3 + localsSize, 3 + clocalsSize); + break; + // case FULL_FRAME: + default: + stackMap.putByte(FULL_FRAME).putShort(delta).putShort(clocalsSize); + writeFrameTypes(3, 3 + clocalsSize); + stackMap.putShort(cstackSize); + writeFrameTypes(3 + clocalsSize, 3 + clocalsSize + cstackSize); + } + } + + /** + * Writes some types of the current frame {@link #frame} into the + * StackMapTableAttribute. This method converts types from the format used + * in {@link Label} to the format used in StackMapTable attributes. In + * particular, it converts type table indexes to constant pool indexes. + * + * @param start + * index of the first type in {@link #frame} to write. + * @param end + * index of last type in {@link #frame} to write (exclusive). + */ + private void writeFrameTypes(final int start, final int end) { + for (int i = start; i < end; ++i) { + int t = frame[i]; + int d = t & Frame.DIM; + if (d == 0) { + int v = t & Frame.BASE_VALUE; + switch (t & Frame.BASE_KIND) { + case Frame.OBJECT: + stackMap.putByte(7).putShort( + cw.newClass(cw.typeTable[v].strVal1)); + break; + case Frame.UNINITIALIZED: + stackMap.putByte(8).putShort(cw.typeTable[v].intVal); + break; + default: + stackMap.putByte(v); + } + } else { + StringBuilder sb = new StringBuilder(); + d >>= 28; + while (d-- > 0) { + sb.append('['); + } + if ((t & Frame.BASE_KIND) == Frame.OBJECT) { + sb.append('L'); + sb.append(cw.typeTable[t & Frame.BASE_VALUE].strVal1); + sb.append(';'); + } else { + switch (t & 0xF) { + case 1: + sb.append('I'); + break; + case 2: + sb.append('F'); + break; + case 3: + sb.append('D'); + break; + case 9: + sb.append('Z'); + break; + case 10: + sb.append('B'); + break; + case 11: + sb.append('C'); + break; + case 12: + sb.append('S'); + break; + default: + sb.append('J'); + } + } + stackMap.putByte(7).putShort(cw.newClass(sb.toString())); + } + } + } + + private void writeFrameType(final Object type) { + if (type instanceof String) { + stackMap.putByte(7).putShort(cw.newClass((String) type)); + } else if (type instanceof Integer) { + stackMap.putByte(((Integer) type).intValue()); + } else { + stackMap.putByte(8).putShort(((Label) type).position); + } + } + + // ------------------------------------------------------------------------ + // Utility methods: dump bytecode array + // ------------------------------------------------------------------------ + + /** + * Returns the size of the bytecode of this method. + * + * @return the size of the bytecode of this method. + */ + final int getSize() { + if (classReaderOffset != 0) { + return 6 + classReaderLength; + } + int size = 8; + if (code.length > 0) { + if (code.length > 65536) { + throw new RuntimeException("Method code too large!"); + } + cw.newUTF8("Code"); + size += 18 + code.length + 8 * handlerCount; + if (localVar != null) { + cw.newUTF8("LocalVariableTable"); + size += 8 + localVar.length; + } + if (localVarType != null) { + cw.newUTF8("LocalVariableTypeTable"); + size += 8 + localVarType.length; + } + if (lineNumber != null) { + cw.newUTF8("LineNumberTable"); + size += 8 + lineNumber.length; + } + if (stackMap != null) { + boolean zip = (cw.version & 0xFFFF) >= Opcodes.V1_6; + cw.newUTF8(zip ? "StackMapTable" : "StackMap"); + size += 8 + stackMap.length; + } + if (ClassReader.ANNOTATIONS && ctanns != null) { + cw.newUTF8("RuntimeVisibleTypeAnnotations"); + size += 8 + ctanns.getSize(); + } + if (ClassReader.ANNOTATIONS && ictanns != null) { + cw.newUTF8("RuntimeInvisibleTypeAnnotations"); + size += 8 + ictanns.getSize(); + } + if (cattrs != null) { + size += cattrs.getSize(cw, code.data, code.length, maxStack, + maxLocals); + } + } + if (exceptionCount > 0) { + cw.newUTF8("Exceptions"); + size += 8 + 2 * exceptionCount; + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + cw.newUTF8("Synthetic"); + size += 6; + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + cw.newUTF8("Deprecated"); + size += 6; + } + if (ClassReader.SIGNATURES && signature != null) { + cw.newUTF8("Signature"); + cw.newUTF8(signature); + size += 8; + } + if (methodParameters != null) { + cw.newUTF8("MethodParameters"); + size += 7 + methodParameters.length; + } + if (ClassReader.ANNOTATIONS && annd != null) { + cw.newUTF8("AnnotationDefault"); + size += 6 + annd.length; + } + if (ClassReader.ANNOTATIONS && anns != null) { + cw.newUTF8("RuntimeVisibleAnnotations"); + size += 8 + anns.getSize(); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + cw.newUTF8("RuntimeInvisibleAnnotations"); + size += 8 + ianns.getSize(); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + cw.newUTF8("RuntimeVisibleTypeAnnotations"); + size += 8 + tanns.getSize(); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + cw.newUTF8("RuntimeInvisibleTypeAnnotations"); + size += 8 + itanns.getSize(); + } + if (ClassReader.ANNOTATIONS && panns != null) { + cw.newUTF8("RuntimeVisibleParameterAnnotations"); + size += 7 + 2 * (panns.length - synthetics); + for (int i = panns.length - 1; i >= synthetics; --i) { + size += panns[i] == null ? 0 : panns[i].getSize(); + } + } + if (ClassReader.ANNOTATIONS && ipanns != null) { + cw.newUTF8("RuntimeInvisibleParameterAnnotations"); + size += 7 + 2 * (ipanns.length - synthetics); + for (int i = ipanns.length - 1; i >= synthetics; --i) { + size += ipanns[i] == null ? 0 : ipanns[i].getSize(); + } + } + if (attrs != null) { + size += attrs.getSize(cw, null, 0, -1, -1); + } + return size; + } + + /** + * Puts the bytecode of this method in the given byte vector. + * + * @param out + * the byte vector into which the bytecode of this method must be + * copied. + */ + final void put(final ByteVector out) { + final int FACTOR = ClassWriter.TO_ACC_SYNTHETIC; + int mask = ACC_CONSTRUCTOR | Opcodes.ACC_DEPRECATED + | ClassWriter.ACC_SYNTHETIC_ATTRIBUTE + | ((access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) / FACTOR); + out.putShort(access & ~mask).putShort(name).putShort(desc); + if (classReaderOffset != 0) { + out.putByteArray(cw.cr.b, classReaderOffset, classReaderLength); + return; + } + int attributeCount = 0; + if (code.length > 0) { + ++attributeCount; + } + if (exceptionCount > 0) { + ++attributeCount; + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + ++attributeCount; + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + ++attributeCount; + } + if (ClassReader.SIGNATURES && signature != null) { + ++attributeCount; + } + if (methodParameters != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && annd != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && anns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && ianns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && tanns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && itanns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && panns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && ipanns != null) { + ++attributeCount; + } + if (attrs != null) { + attributeCount += attrs.getCount(); + } + out.putShort(attributeCount); + if (code.length > 0) { + int size = 12 + code.length + 8 * handlerCount; + if (localVar != null) { + size += 8 + localVar.length; + } + if (localVarType != null) { + size += 8 + localVarType.length; + } + if (lineNumber != null) { + size += 8 + lineNumber.length; + } + if (stackMap != null) { + size += 8 + stackMap.length; + } + if (ClassReader.ANNOTATIONS && ctanns != null) { + size += 8 + ctanns.getSize(); + } + if (ClassReader.ANNOTATIONS && ictanns != null) { + size += 8 + ictanns.getSize(); + } + if (cattrs != null) { + size += cattrs.getSize(cw, code.data, code.length, maxStack, + maxLocals); + } + out.putShort(cw.newUTF8("Code")).putInt(size); + out.putShort(maxStack).putShort(maxLocals); + out.putInt(code.length).putByteArray(code.data, 0, code.length); + out.putShort(handlerCount); + if (handlerCount > 0) { + Handler h = firstHandler; + while (h != null) { + out.putShort(h.start.position).putShort(h.end.position) + .putShort(h.handler.position).putShort(h.type); + h = h.next; + } + } + attributeCount = 0; + if (localVar != null) { + ++attributeCount; + } + if (localVarType != null) { + ++attributeCount; + } + if (lineNumber != null) { + ++attributeCount; + } + if (stackMap != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && ctanns != null) { + ++attributeCount; + } + if (ClassReader.ANNOTATIONS && ictanns != null) { + ++attributeCount; + } + if (cattrs != null) { + attributeCount += cattrs.getCount(); + } + out.putShort(attributeCount); + if (localVar != null) { + out.putShort(cw.newUTF8("LocalVariableTable")); + out.putInt(localVar.length + 2).putShort(localVarCount); + out.putByteArray(localVar.data, 0, localVar.length); + } + if (localVarType != null) { + out.putShort(cw.newUTF8("LocalVariableTypeTable")); + out.putInt(localVarType.length + 2).putShort(localVarTypeCount); + out.putByteArray(localVarType.data, 0, localVarType.length); + } + if (lineNumber != null) { + out.putShort(cw.newUTF8("LineNumberTable")); + out.putInt(lineNumber.length + 2).putShort(lineNumberCount); + out.putByteArray(lineNumber.data, 0, lineNumber.length); + } + if (stackMap != null) { + boolean zip = (cw.version & 0xFFFF) >= Opcodes.V1_6; + out.putShort(cw.newUTF8(zip ? "StackMapTable" : "StackMap")); + out.putInt(stackMap.length + 2).putShort(frameCount); + out.putByteArray(stackMap.data, 0, stackMap.length); + } + if (ClassReader.ANNOTATIONS && ctanns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleTypeAnnotations")); + ctanns.put(out); + } + if (ClassReader.ANNOTATIONS && ictanns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleTypeAnnotations")); + ictanns.put(out); + } + if (cattrs != null) { + cattrs.put(cw, code.data, code.length, maxLocals, maxStack, out); + } + } + if (exceptionCount > 0) { + out.putShort(cw.newUTF8("Exceptions")).putInt( + 2 * exceptionCount + 2); + out.putShort(exceptionCount); + for (int i = 0; i < exceptionCount; ++i) { + out.putShort(exceptions[i]); + } + } + if ((access & Opcodes.ACC_SYNTHETIC) != 0) { + if ((cw.version & 0xFFFF) < Opcodes.V1_5 + || (access & ClassWriter.ACC_SYNTHETIC_ATTRIBUTE) != 0) { + out.putShort(cw.newUTF8("Synthetic")).putInt(0); + } + } + if ((access & Opcodes.ACC_DEPRECATED) != 0) { + out.putShort(cw.newUTF8("Deprecated")).putInt(0); + } + if (ClassReader.SIGNATURES && signature != null) { + out.putShort(cw.newUTF8("Signature")).putInt(2) + .putShort(cw.newUTF8(signature)); + } + if (methodParameters != null) { + out.putShort(cw.newUTF8("MethodParameters")); + out.putInt(methodParameters.length + 1).putByte( + methodParametersCount); + out.putByteArray(methodParameters.data, 0, methodParameters.length); + } + if (ClassReader.ANNOTATIONS && annd != null) { + out.putShort(cw.newUTF8("AnnotationDefault")); + out.putInt(annd.length); + out.putByteArray(annd.data, 0, annd.length); + } + if (ClassReader.ANNOTATIONS && anns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleAnnotations")); + anns.put(out); + } + if (ClassReader.ANNOTATIONS && ianns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleAnnotations")); + ianns.put(out); + } + if (ClassReader.ANNOTATIONS && tanns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleTypeAnnotations")); + tanns.put(out); + } + if (ClassReader.ANNOTATIONS && itanns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleTypeAnnotations")); + itanns.put(out); + } + if (ClassReader.ANNOTATIONS && panns != null) { + out.putShort(cw.newUTF8("RuntimeVisibleParameterAnnotations")); + AnnotationWriter.put(panns, synthetics, out); + } + if (ClassReader.ANNOTATIONS && ipanns != null) { + out.putShort(cw.newUTF8("RuntimeInvisibleParameterAnnotations")); + AnnotationWriter.put(ipanns, synthetics, out); + } + if (attrs != null) { + attrs.put(cw, null, 0, -1, -1, out); + } + } + + // ------------------------------------------------------------------------ + // Utility methods: instruction resizing (used to handle GOTO_W and JSR_W) + // ------------------------------------------------------------------------ + + /** + * Resizes and replaces the temporary instructions inserted by + * {@link Label#resolve} for wide forward jumps, while keeping jump offsets + * and instruction addresses consistent. This may require to resize other + * existing instructions, or even to introduce new instructions: for + * example, increasing the size of an instruction by 2 at the middle of a + * method can increases the offset of an IFEQ instruction from 32766 to + * 32768, in which case IFEQ 32766 must be replaced with IFNEQ 8 GOTO_W + * 32765. This, in turn, may require to increase the size of another jump + * instruction, and so on... All these operations are handled automatically + * by this method. + *

    + * This method must be called after all the method that is being built + * has been visited. In particular, the {@link Label Label} objects used + * to construct the method are no longer valid after this method has been + * called. + */ + private void resizeInstructions() { + byte[] b = code.data; // bytecode of the method + int u, v, label; // indexes in b + int i, j; // loop indexes + /* + * 1st step: As explained above, resizing an instruction may require to + * resize another one, which may require to resize yet another one, and + * so on. The first step of the algorithm consists in finding all the + * instructions that need to be resized, without modifying the code. + * This is done by the following "fix point" algorithm: + * + * Parse the code to find the jump instructions whose offset will need + * more than 2 bytes to be stored (the future offset is computed from + * the current offset and from the number of bytes that will be inserted + * or removed between the source and target instructions). For each such + * instruction, adds an entry in (a copy of) the indexes and sizes + * arrays (if this has not already been done in a previous iteration!). + * + * If at least one entry has been added during the previous step, go + * back to the beginning, otherwise stop. + * + * In fact the real algorithm is complicated by the fact that the size + * of TABLESWITCH and LOOKUPSWITCH instructions depends on their + * position in the bytecode (because of padding). In order to ensure the + * convergence of the algorithm, the number of bytes to be added or + * removed from these instructions is over estimated during the previous + * loop, and computed exactly only after the loop is finished (this + * requires another pass to parse the bytecode of the method). + */ + int[] allIndexes = new int[0]; // copy of indexes + int[] allSizes = new int[0]; // copy of sizes + boolean[] resize; // instructions to be resized + int newOffset; // future offset of a jump instruction + + resize = new boolean[code.length]; + + // 3 = loop again, 2 = loop ended, 1 = last pass, 0 = done + int state = 3; + do { + if (state == 3) { + state = 2; + } + u = 0; + while (u < b.length) { + int opcode = b[u] & 0xFF; // opcode of current instruction + int insert = 0; // bytes to be added after this instruction + + switch (ClassWriter.TYPE[opcode]) { + case ClassWriter.NOARG_INSN: + case ClassWriter.IMPLVAR_INSN: + u += 1; + break; + case ClassWriter.LABEL_INSN: + if (opcode > 201) { + // converts temporary opcodes 202 to 217, 218 and + // 219 to IFEQ ... JSR (inclusive), IFNULL and + // IFNONNULL + opcode = opcode < 218 ? opcode - 49 : opcode - 20; + label = u + readUnsignedShort(b, u + 1); + } else { + label = u + readShort(b, u + 1); + } + newOffset = getNewOffset(allIndexes, allSizes, u, label); + if (newOffset < Short.MIN_VALUE + || newOffset > Short.MAX_VALUE) { + if (!resize[u]) { + if (opcode == Opcodes.GOTO || opcode == Opcodes.JSR) { + // two additional bytes will be required to + // replace this GOTO or JSR instruction with + // a GOTO_W or a JSR_W + insert = 2; + } else { + // five additional bytes will be required to + // replace this IFxxx instruction with + // IFNOTxxx GOTO_W , where IFNOTxxx + // is the "opposite" opcode of IFxxx (i.e., + // IFNE for IFEQ) and where designates + // the instruction just after the GOTO_W. + insert = 5; + } + resize[u] = true; + } + } + u += 3; + break; + case ClassWriter.LABELW_INSN: + u += 5; + break; + case ClassWriter.TABL_INSN: + if (state == 1) { + // true number of bytes to be added (or removed) + // from this instruction = (future number of padding + // bytes - current number of padding byte) - + // previously over estimated variation = + // = ((3 - newOffset%4) - (3 - u%4)) - u%4 + // = (-newOffset%4 + u%4) - u%4 + // = -(newOffset & 3) + newOffset = getNewOffset(allIndexes, allSizes, 0, u); + insert = -(newOffset & 3); + } else if (!resize[u]) { + // over estimation of the number of bytes to be + // added to this instruction = 3 - current number + // of padding bytes = 3 - (3 - u%4) = u%4 = u & 3 + insert = u & 3; + resize[u] = true; + } + // skips instruction + u = u + 4 - (u & 3); + u += 4 * (readInt(b, u + 8) - readInt(b, u + 4) + 1) + 12; + break; + case ClassWriter.LOOK_INSN: + if (state == 1) { + // like TABL_INSN + newOffset = getNewOffset(allIndexes, allSizes, 0, u); + insert = -(newOffset & 3); + } else if (!resize[u]) { + // like TABL_INSN + insert = u & 3; + resize[u] = true; + } + // skips instruction + u = u + 4 - (u & 3); + u += 8 * readInt(b, u + 4) + 8; + break; + case ClassWriter.WIDE_INSN: + opcode = b[u + 1] & 0xFF; + if (opcode == Opcodes.IINC) { + u += 6; + } else { + u += 4; + } + break; + case ClassWriter.VAR_INSN: + case ClassWriter.SBYTE_INSN: + case ClassWriter.LDC_INSN: + u += 2; + break; + case ClassWriter.SHORT_INSN: + case ClassWriter.LDCW_INSN: + case ClassWriter.FIELDORMETH_INSN: + case ClassWriter.TYPE_INSN: + case ClassWriter.IINC_INSN: + u += 3; + break; + case ClassWriter.ITFMETH_INSN: + case ClassWriter.INDYMETH_INSN: + u += 5; + break; + // case ClassWriter.MANA_INSN: + default: + u += 4; + break; + } + if (insert != 0) { + // adds a new (u, insert) entry in the allIndexes and + // allSizes arrays + int[] newIndexes = new int[allIndexes.length + 1]; + int[] newSizes = new int[allSizes.length + 1]; + System.arraycopy(allIndexes, 0, newIndexes, 0, + allIndexes.length); + System.arraycopy(allSizes, 0, newSizes, 0, allSizes.length); + newIndexes[allIndexes.length] = u; + newSizes[allSizes.length] = insert; + allIndexes = newIndexes; + allSizes = newSizes; + if (insert > 0) { + state = 3; + } + } + } + if (state < 3) { + --state; + } + } while (state != 0); + + // 2nd step: + // copies the bytecode of the method into a new bytevector, updates the + // offsets, and inserts (or removes) bytes as requested. + + ByteVector newCode = new ByteVector(code.length); + + u = 0; + while (u < code.length) { + int opcode = b[u] & 0xFF; + switch (ClassWriter.TYPE[opcode]) { + case ClassWriter.NOARG_INSN: + case ClassWriter.IMPLVAR_INSN: + newCode.putByte(opcode); + u += 1; + break; + case ClassWriter.LABEL_INSN: + if (opcode > 201) { + // changes temporary opcodes 202 to 217 (inclusive), 218 + // and 219 to IFEQ ... JSR (inclusive), IFNULL and + // IFNONNULL + opcode = opcode < 218 ? opcode - 49 : opcode - 20; + label = u + readUnsignedShort(b, u + 1); + } else { + label = u + readShort(b, u + 1); + } + newOffset = getNewOffset(allIndexes, allSizes, u, label); + if (resize[u]) { + // replaces GOTO with GOTO_W, JSR with JSR_W and IFxxx + // with IFNOTxxx GOTO_W , where IFNOTxxx is + // the "opposite" opcode of IFxxx (i.e., IFNE for IFEQ) + // and where designates the instruction just after + // the GOTO_W. + if (opcode == Opcodes.GOTO) { + newCode.putByte(200); // GOTO_W + } else if (opcode == Opcodes.JSR) { + newCode.putByte(201); // JSR_W + } else { + newCode.putByte(opcode <= 166 ? ((opcode + 1) ^ 1) - 1 + : opcode ^ 1); + newCode.putShort(8); // jump offset + newCode.putByte(200); // GOTO_W + // newOffset now computed from start of GOTO_W + newOffset -= 3; + } + newCode.putInt(newOffset); + } else { + newCode.putByte(opcode); + newCode.putShort(newOffset); + } + u += 3; + break; + case ClassWriter.LABELW_INSN: + label = u + readInt(b, u + 1); + newOffset = getNewOffset(allIndexes, allSizes, u, label); + newCode.putByte(opcode); + newCode.putInt(newOffset); + u += 5; + break; + case ClassWriter.TABL_INSN: + // skips 0 to 3 padding bytes + v = u; + u = u + 4 - (v & 3); + // reads and copies instruction + newCode.putByte(Opcodes.TABLESWITCH); + newCode.putByteArray(null, 0, (4 - newCode.length % 4) % 4); + label = v + readInt(b, u); + u += 4; + newOffset = getNewOffset(allIndexes, allSizes, v, label); + newCode.putInt(newOffset); + j = readInt(b, u); + u += 4; + newCode.putInt(j); + j = readInt(b, u) - j + 1; + u += 4; + newCode.putInt(readInt(b, u - 4)); + for (; j > 0; --j) { + label = v + readInt(b, u); + u += 4; + newOffset = getNewOffset(allIndexes, allSizes, v, label); + newCode.putInt(newOffset); + } + break; + case ClassWriter.LOOK_INSN: + // skips 0 to 3 padding bytes + v = u; + u = u + 4 - (v & 3); + // reads and copies instruction + newCode.putByte(Opcodes.LOOKUPSWITCH); + newCode.putByteArray(null, 0, (4 - newCode.length % 4) % 4); + label = v + readInt(b, u); + u += 4; + newOffset = getNewOffset(allIndexes, allSizes, v, label); + newCode.putInt(newOffset); + j = readInt(b, u); + u += 4; + newCode.putInt(j); + for (; j > 0; --j) { + newCode.putInt(readInt(b, u)); + u += 4; + label = v + readInt(b, u); + u += 4; + newOffset = getNewOffset(allIndexes, allSizes, v, label); + newCode.putInt(newOffset); + } + break; + case ClassWriter.WIDE_INSN: + opcode = b[u + 1] & 0xFF; + if (opcode == Opcodes.IINC) { + newCode.putByteArray(b, u, 6); + u += 6; + } else { + newCode.putByteArray(b, u, 4); + u += 4; + } + break; + case ClassWriter.VAR_INSN: + case ClassWriter.SBYTE_INSN: + case ClassWriter.LDC_INSN: + newCode.putByteArray(b, u, 2); + u += 2; + break; + case ClassWriter.SHORT_INSN: + case ClassWriter.LDCW_INSN: + case ClassWriter.FIELDORMETH_INSN: + case ClassWriter.TYPE_INSN: + case ClassWriter.IINC_INSN: + newCode.putByteArray(b, u, 3); + u += 3; + break; + case ClassWriter.ITFMETH_INSN: + case ClassWriter.INDYMETH_INSN: + newCode.putByteArray(b, u, 5); + u += 5; + break; + // case MANA_INSN: + default: + newCode.putByteArray(b, u, 4); + u += 4; + break; + } + } + + // updates the stack map frame labels + if (compute == FRAMES) { + Label l = labels; + while (l != null) { + /* + * Detects the labels that are just after an IF instruction that + * has been resized with the IFNOT GOTO_W pattern. These labels + * are now the target of a jump instruction (the IFNOT + * instruction). Note that we need the original label position + * here. getNewOffset must therefore never have been called for + * this label. + */ + u = l.position - 3; + if (u >= 0 && resize[u]) { + l.status |= Label.TARGET; + } + getNewOffset(allIndexes, allSizes, l); + l = l.successor; + } + // Update the offsets in the uninitialized types + for (i = 0; i < cw.typeTable.length; ++i) { + Item item = cw.typeTable[i]; + if (item != null && item.type == ClassWriter.TYPE_UNINIT) { + item.intVal = getNewOffset(allIndexes, allSizes, 0, + item.intVal); + } + } + // The stack map frames are not serialized yet, so we don't need + // to update them. They will be serialized in visitMaxs. + } else if (frameCount > 0) { + /* + * Resizing an existing stack map frame table is really hard. Not + * only the table must be parsed to update the offets, but new + * frames may be needed for jump instructions that were inserted by + * this method. And updating the offsets or inserting frames can + * change the format of the following frames, in case of packed + * frames. In practice the whole table must be recomputed. For this + * the frames are marked as potentially invalid. This will cause the + * whole class to be reread and rewritten with the COMPUTE_FRAMES + * option (see the ClassWriter.toByteArray method). This is not very + * efficient but is much easier and requires much less code than any + * other method I can think of. + */ + cw.invalidFrames = true; + } + // updates the exception handler block labels + Handler h = firstHandler; + while (h != null) { + getNewOffset(allIndexes, allSizes, h.start); + getNewOffset(allIndexes, allSizes, h.end); + getNewOffset(allIndexes, allSizes, h.handler); + h = h.next; + } + // updates the instructions addresses in the + // local var and line number tables + for (i = 0; i < 2; ++i) { + ByteVector bv = i == 0 ? localVar : localVarType; + if (bv != null) { + b = bv.data; + u = 0; + while (u < bv.length) { + label = readUnsignedShort(b, u); + newOffset = getNewOffset(allIndexes, allSizes, 0, label); + writeShort(b, u, newOffset); + label += readUnsignedShort(b, u + 2); + newOffset = getNewOffset(allIndexes, allSizes, 0, label) + - newOffset; + writeShort(b, u + 2, newOffset); + u += 10; + } + } + } + if (lineNumber != null) { + b = lineNumber.data; + u = 0; + while (u < lineNumber.length) { + writeShort( + b, + u, + getNewOffset(allIndexes, allSizes, 0, + readUnsignedShort(b, u))); + u += 4; + } + } + // updates the labels of the other attributes + Attribute attr = cattrs; + while (attr != null) { + Label[] labels = attr.getLabels(); + if (labels != null) { + for (i = labels.length - 1; i >= 0; --i) { + getNewOffset(allIndexes, allSizes, labels[i]); + } + } + attr = attr.next; + } + + // replaces old bytecodes with new ones + code = newCode; + } + + /** + * Reads an unsigned short value in the given byte array. + * + * @param b + * a byte array. + * @param index + * the start index of the value to be read. + * @return the read value. + */ + static int readUnsignedShort(final byte[] b, final int index) { + return ((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF); + } + + /** + * Reads a signed short value in the given byte array. + * + * @param b + * a byte array. + * @param index + * the start index of the value to be read. + * @return the read value. + */ + static short readShort(final byte[] b, final int index) { + return (short) (((b[index] & 0xFF) << 8) | (b[index + 1] & 0xFF)); + } + + /** + * Reads a signed int value in the given byte array. + * + * @param b + * a byte array. + * @param index + * the start index of the value to be read. + * @return the read value. + */ + static int readInt(final byte[] b, final int index) { + return ((b[index] & 0xFF) << 24) | ((b[index + 1] & 0xFF) << 16) + | ((b[index + 2] & 0xFF) << 8) | (b[index + 3] & 0xFF); + } + + /** + * Writes a short value in the given byte array. + * + * @param b + * a byte array. + * @param index + * where the first byte of the short value must be written. + * @param s + * the value to be written in the given byte array. + */ + static void writeShort(final byte[] b, final int index, final int s) { + b[index] = (byte) (s >>> 8); + b[index + 1] = (byte) s; + } + + /** + * Computes the future value of a bytecode offset. + *

    + * Note: it is possible to have several entries for the same instruction in + * the indexes and sizes: two entries (index=a,size=b) and + * (index=a,size=b') are equivalent to a single entry (index=a,size=b+b'). + * + * @param indexes + * current positions of the instructions to be resized. Each + * instruction must be designated by the index of its last + * byte, plus one (or, in other words, by the index of the + * first byte of the next instruction). + * @param sizes + * the number of bytes to be added to the above + * instructions. More precisely, for each i < len, + * sizes[i] bytes will be added at the end of the + * instruction designated by indexes[i] or, if + * sizes[i] is negative, the last | + * sizes[i]| bytes of the instruction will be removed + * (the instruction size must not become negative or + * null). + * @param begin + * index of the first byte of the source instruction. + * @param end + * index of the first byte of the target instruction. + * @return the future value of the given bytecode offset. + */ + static int getNewOffset(final int[] indexes, final int[] sizes, + final int begin, final int end) { + int offset = end - begin; + for (int i = 0; i < indexes.length; ++i) { + if (begin < indexes[i] && indexes[i] <= end) { + // forward jump + offset += sizes[i]; + } else if (end < indexes[i] && indexes[i] <= begin) { + // backward jump + offset -= sizes[i]; + } + } + return offset; + } + + /** + * Updates the offset of the given label. + * + * @param indexes + * current positions of the instructions to be resized. Each + * instruction must be designated by the index of its last + * byte, plus one (or, in other words, by the index of the + * first byte of the next instruction). + * @param sizes + * the number of bytes to be added to the above + * instructions. More precisely, for each i < len, + * sizes[i] bytes will be added at the end of the + * instruction designated by indexes[i] or, if + * sizes[i] is negative, the last | + * sizes[i]| bytes of the instruction will be removed + * (the instruction size must not become negative or + * null). + * @param label + * the label whose offset must be updated. + */ + static void getNewOffset(final int[] indexes, final int[] sizes, + final Label label) { + if ((label.status & Label.RESIZED) == 0) { + label.position = getNewOffset(indexes, sizes, 0, label.position); + label.status |= Label.RESIZED; + } + } +} diff --git a/src/com/sleepycat/asm/Opcodes.java b/src/com/sleepycat/asm/Opcodes.java new file mode 100644 index 0000000..0a82c0b --- /dev/null +++ b/src/com/sleepycat/asm/Opcodes.java @@ -0,0 +1,361 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +/** + * Defines the JVM opcodes, access flags and array type codes. This interface + * does not define all the JVM opcodes because some opcodes are automatically + * handled. For example, the xLOAD and xSTORE opcodes are automatically replaced + * by xLOAD_n and xSTORE_n opcodes when possible. The xLOAD_n and xSTORE_n + * opcodes are therefore not defined in this interface. Likewise for LDC, + * automatically replaced by LDC_W or LDC2_W when necessary, WIDE, GOTO_W and + * JSR_W. + * + * @author Eric Bruneton + * @author Eugene Kuleshov + */ +public interface Opcodes { + + // ASM API versions + + int ASM4 = 4 << 16 | 0 << 8 | 0; + int ASM5 = 5 << 16 | 0 << 8 | 0; + + // versions + + int V1_1 = 3 << 16 | 45; + int V1_2 = 0 << 16 | 46; + int V1_3 = 0 << 16 | 47; + int V1_4 = 0 << 16 | 48; + int V1_5 = 0 << 16 | 49; + int V1_6 = 0 << 16 | 50; + int V1_7 = 0 << 16 | 51; + int V1_8 = 0 << 16 | 52; + + // access flags + + int ACC_PUBLIC = 0x0001; // class, field, method + int ACC_PRIVATE = 0x0002; // class, field, method + int ACC_PROTECTED = 0x0004; // class, field, method + int ACC_STATIC = 0x0008; // field, method + int ACC_FINAL = 0x0010; // class, field, method, parameter + int ACC_SUPER = 0x0020; // class + int ACC_SYNCHRONIZED = 0x0020; // method + int ACC_VOLATILE = 0x0040; // field + int ACC_BRIDGE = 0x0040; // method + int ACC_VARARGS = 0x0080; // method + int ACC_TRANSIENT = 0x0080; // field + int ACC_NATIVE = 0x0100; // method + int ACC_INTERFACE = 0x0200; // class + int ACC_ABSTRACT = 0x0400; // class, method + int ACC_STRICT = 0x0800; // method + int ACC_SYNTHETIC = 0x1000; // class, field, method, parameter + int ACC_ANNOTATION = 0x2000; // class + int ACC_ENUM = 0x4000; // class(?) field inner + int ACC_MANDATED = 0x8000; // parameter + + // ASM specific pseudo access flags + + int ACC_DEPRECATED = 0x20000; // class, field, method + + // types for NEWARRAY + + int T_BOOLEAN = 4; + int T_CHAR = 5; + int T_FLOAT = 6; + int T_DOUBLE = 7; + int T_BYTE = 8; + int T_SHORT = 9; + int T_INT = 10; + int T_LONG = 11; + + // tags for Handle + + int H_GETFIELD = 1; + int H_GETSTATIC = 2; + int H_PUTFIELD = 3; + int H_PUTSTATIC = 4; + int H_INVOKEVIRTUAL = 5; + int H_INVOKESTATIC = 6; + int H_INVOKESPECIAL = 7; + int H_NEWINVOKESPECIAL = 8; + int H_INVOKEINTERFACE = 9; + + // stack map frame types + + /** + * Represents an expanded frame. See {@link ClassReader#EXPAND_FRAMES}. + */ + int F_NEW = -1; + + /** + * Represents a compressed frame with complete frame data. + */ + int F_FULL = 0; + + /** + * Represents a compressed frame where locals are the same as the locals in + * the previous frame, except that additional 1-3 locals are defined, and + * with an empty stack. + */ + int F_APPEND = 1; + + /** + * Represents a compressed frame where locals are the same as the locals in + * the previous frame, except that the last 1-3 locals are absent and with + * an empty stack. + */ + int F_CHOP = 2; + + /** + * Represents a compressed frame with exactly the same locals as the + * previous frame and with an empty stack. + */ + int F_SAME = 3; + + /** + * Represents a compressed frame with exactly the same locals as the + * previous frame and with a single value on the stack. + */ + int F_SAME1 = 4; + + Integer TOP = new Integer(0); + Integer INTEGER = new Integer(1); + Integer FLOAT = new Integer(2); + Integer DOUBLE = new Integer(3); + Integer LONG = new Integer(4); + Integer NULL = new Integer(5); + Integer UNINITIALIZED_THIS = new Integer(6); + + // opcodes // visit method (- = idem) + + int NOP = 0; // visitInsn + int ACONST_NULL = 1; // - + int ICONST_M1 = 2; // - + int ICONST_0 = 3; // - + int ICONST_1 = 4; // - + int ICONST_2 = 5; // - + int ICONST_3 = 6; // - + int ICONST_4 = 7; // - + int ICONST_5 = 8; // - + int LCONST_0 = 9; // - + int LCONST_1 = 10; // - + int FCONST_0 = 11; // - + int FCONST_1 = 12; // - + int FCONST_2 = 13; // - + int DCONST_0 = 14; // - + int DCONST_1 = 15; // - + int BIPUSH = 16; // visitIntInsn + int SIPUSH = 17; // - + int LDC = 18; // visitLdcInsn + // int LDC_W = 19; // - + // int LDC2_W = 20; // - + int ILOAD = 21; // visitVarInsn + int LLOAD = 22; // - + int FLOAD = 23; // - + int DLOAD = 24; // - + int ALOAD = 25; // - + // int ILOAD_0 = 26; // - + // int ILOAD_1 = 27; // - + // int ILOAD_2 = 28; // - + // int ILOAD_3 = 29; // - + // int LLOAD_0 = 30; // - + // int LLOAD_1 = 31; // - + // int LLOAD_2 = 32; // - + // int LLOAD_3 = 33; // - + // int FLOAD_0 = 34; // - + // int FLOAD_1 = 35; // - + // int FLOAD_2 = 36; // - + // int FLOAD_3 = 37; // - + // int DLOAD_0 = 38; // - + // int DLOAD_1 = 39; // - + // int DLOAD_2 = 40; // - + // int DLOAD_3 = 41; // - + // int ALOAD_0 = 42; // - + // int ALOAD_1 = 43; // - + // int ALOAD_2 = 44; // - + // int ALOAD_3 = 45; // - + int IALOAD = 46; // visitInsn + int LALOAD = 47; // - + int FALOAD = 48; // - + int DALOAD = 49; // - + int AALOAD = 50; // - + int BALOAD = 51; // - + int CALOAD = 52; // - + int SALOAD = 53; // - + int ISTORE = 54; // visitVarInsn + int LSTORE = 55; // - + int FSTORE = 56; // - + int DSTORE = 57; // - + int ASTORE = 58; // - + // int ISTORE_0 = 59; // - + // int ISTORE_1 = 60; // - + // int ISTORE_2 = 61; // - + // int ISTORE_3 = 62; // - + // int LSTORE_0 = 63; // - + // int LSTORE_1 = 64; // - + // int LSTORE_2 = 65; // - + // int LSTORE_3 = 66; // - + // int FSTORE_0 = 67; // - + // int FSTORE_1 = 68; // - + // int FSTORE_2 = 69; // - + // int FSTORE_3 = 70; // - + // int DSTORE_0 = 71; // - + // int DSTORE_1 = 72; // - + // int DSTORE_2 = 73; // - + // int DSTORE_3 = 74; // - + // int ASTORE_0 = 75; // - + // int ASTORE_1 = 76; // - + // int ASTORE_2 = 77; // - + // int ASTORE_3 = 78; // - + int IASTORE = 79; // visitInsn + int LASTORE = 80; // - + int FASTORE = 81; // - + int DASTORE = 82; // - + int AASTORE = 83; // - + int BASTORE = 84; // - + int CASTORE = 85; // - + int SASTORE = 86; // - + int POP = 87; // - + int POP2 = 88; // - + int DUP = 89; // - + int DUP_X1 = 90; // - + int DUP_X2 = 91; // - + int DUP2 = 92; // - + int DUP2_X1 = 93; // - + int DUP2_X2 = 94; // - + int SWAP = 95; // - + int IADD = 96; // - + int LADD = 97; // - + int FADD = 98; // - + int DADD = 99; // - + int ISUB = 100; // - + int LSUB = 101; // - + int FSUB = 102; // - + int DSUB = 103; // - + int IMUL = 104; // - + int LMUL = 105; // - + int FMUL = 106; // - + int DMUL = 107; // - + int IDIV = 108; // - + int LDIV = 109; // - + int FDIV = 110; // - + int DDIV = 111; // - + int IREM = 112; // - + int LREM = 113; // - + int FREM = 114; // - + int DREM = 115; // - + int INEG = 116; // - + int LNEG = 117; // - + int FNEG = 118; // - + int DNEG = 119; // - + int ISHL = 120; // - + int LSHL = 121; // - + int ISHR = 122; // - + int LSHR = 123; // - + int IUSHR = 124; // - + int LUSHR = 125; // - + int IAND = 126; // - + int LAND = 127; // - + int IOR = 128; // - + int LOR = 129; // - + int IXOR = 130; // - + int LXOR = 131; // - + int IINC = 132; // visitIincInsn + int I2L = 133; // visitInsn + int I2F = 134; // - + int I2D = 135; // - + int L2I = 136; // - + int L2F = 137; // - + int L2D = 138; // - + int F2I = 139; // - + int F2L = 140; // - + int F2D = 141; // - + int D2I = 142; // - + int D2L = 143; // - + int D2F = 144; // - + int I2B = 145; // - + int I2C = 146; // - + int I2S = 147; // - + int LCMP = 148; // - + int FCMPL = 149; // - + int FCMPG = 150; // - + int DCMPL = 151; // - + int DCMPG = 152; // - + int IFEQ = 153; // visitJumpInsn + int IFNE = 154; // - + int IFLT = 155; // - + int IFGE = 156; // - + int IFGT = 157; // - + int IFLE = 158; // - + int IF_ICMPEQ = 159; // - + int IF_ICMPNE = 160; // - + int IF_ICMPLT = 161; // - + int IF_ICMPGE = 162; // - + int IF_ICMPGT = 163; // - + int IF_ICMPLE = 164; // - + int IF_ACMPEQ = 165; // - + int IF_ACMPNE = 166; // - + int GOTO = 167; // - + int JSR = 168; // - + int RET = 169; // visitVarInsn + int TABLESWITCH = 170; // visiTableSwitchInsn + int LOOKUPSWITCH = 171; // visitLookupSwitch + int IRETURN = 172; // visitInsn + int LRETURN = 173; // - + int FRETURN = 174; // - + int DRETURN = 175; // - + int ARETURN = 176; // - + int RETURN = 177; // - + int GETSTATIC = 178; // visitFieldInsn + int PUTSTATIC = 179; // - + int GETFIELD = 180; // - + int PUTFIELD = 181; // - + int INVOKEVIRTUAL = 182; // visitMethodInsn + int INVOKESPECIAL = 183; // - + int INVOKESTATIC = 184; // - + int INVOKEINTERFACE = 185; // - + int INVOKEDYNAMIC = 186; // visitInvokeDynamicInsn + int NEW = 187; // visitTypeInsn + int NEWARRAY = 188; // visitIntInsn + int ANEWARRAY = 189; // visitTypeInsn + int ARRAYLENGTH = 190; // visitInsn + int ATHROW = 191; // - + int CHECKCAST = 192; // visitTypeInsn + int INSTANCEOF = 193; // - + int MONITORENTER = 194; // visitInsn + int MONITOREXIT = 195; // - + // int WIDE = 196; // NOT VISITED + int MULTIANEWARRAY = 197; // visitMultiANewArrayInsn + int IFNULL = 198; // visitJumpInsn + int IFNONNULL = 199; // - + // int GOTO_W = 200; // - + // int JSR_W = 201; // - +} diff --git a/src/com/sleepycat/asm/Type.java b/src/com/sleepycat/asm/Type.java new file mode 100644 index 0000000..eef8f51 --- /dev/null +++ b/src/com/sleepycat/asm/Type.java @@ -0,0 +1,896 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2011 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +package com.sleepycat.asm; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; + +/** + * A Java field or method type. This class can be used to make it easier to + * manipulate type and method descriptors. + * + * @author Eric Bruneton + * @author Chris Nokleberg + */ +public class Type { + + /** + * The sort of the void type. See {@link #getSort getSort}. + */ + public static final int VOID = 0; + + /** + * The sort of the boolean type. See {@link #getSort getSort}. + */ + public static final int BOOLEAN = 1; + + /** + * The sort of the char type. See {@link #getSort getSort}. + */ + public static final int CHAR = 2; + + /** + * The sort of the byte type. See {@link #getSort getSort}. + */ + public static final int BYTE = 3; + + /** + * The sort of the short type. See {@link #getSort getSort}. + */ + public static final int SHORT = 4; + + /** + * The sort of the int type. See {@link #getSort getSort}. + */ + public static final int INT = 5; + + /** + * The sort of the float type. See {@link #getSort getSort}. + */ + public static final int FLOAT = 6; + + /** + * The sort of the long type. See {@link #getSort getSort}. + */ + public static final int LONG = 7; + + /** + * The sort of the double type. See {@link #getSort getSort}. + */ + public static final int DOUBLE = 8; + + /** + * The sort of array reference types. See {@link #getSort getSort}. + */ + public static final int ARRAY = 9; + + /** + * The sort of object reference types. See {@link #getSort getSort}. + */ + public static final int OBJECT = 10; + + /** + * The sort of method types. See {@link #getSort getSort}. + */ + public static final int METHOD = 11; + + /** + * The void type. + */ + public static final Type VOID_TYPE = new Type(VOID, null, ('V' << 24) + | (5 << 16) | (0 << 8) | 0, 1); + + /** + * The boolean type. + */ + public static final Type BOOLEAN_TYPE = new Type(BOOLEAN, null, ('Z' << 24) + | (0 << 16) | (5 << 8) | 1, 1); + + /** + * The char type. + */ + public static final Type CHAR_TYPE = new Type(CHAR, null, ('C' << 24) + | (0 << 16) | (6 << 8) | 1, 1); + + /** + * The byte type. + */ + public static final Type BYTE_TYPE = new Type(BYTE, null, ('B' << 24) + | (0 << 16) | (5 << 8) | 1, 1); + + /** + * The short type. + */ + public static final Type SHORT_TYPE = new Type(SHORT, null, ('S' << 24) + | (0 << 16) | (7 << 8) | 1, 1); + + /** + * The int type. + */ + public static final Type INT_TYPE = new Type(INT, null, ('I' << 24) + | (0 << 16) | (0 << 8) | 1, 1); + + /** + * The float type. + */ + public static final Type FLOAT_TYPE = new Type(FLOAT, null, ('F' << 24) + | (2 << 16) | (2 << 8) | 1, 1); + + /** + * The long type. + */ + public static final Type LONG_TYPE = new Type(LONG, null, ('J' << 24) + | (1 << 16) | (1 << 8) | 2, 1); + + /** + * The double type. + */ + public static final Type DOUBLE_TYPE = new Type(DOUBLE, null, ('D' << 24) + | (3 << 16) | (3 << 8) | 2, 1); + + // ------------------------------------------------------------------------ + // Fields + // ------------------------------------------------------------------------ + + /** + * The sort of this Java type. + */ + private final int sort; + + /** + * A buffer containing the internal name of this Java type. This field is + * only used for reference types. + */ + private final char[] buf; + + /** + * The offset of the internal name of this Java type in {@link #buf buf} or, + * for primitive types, the size, descriptor and getOpcode offsets for this + * type (byte 0 contains the size, byte 1 the descriptor, byte 2 the offset + * for IALOAD or IASTORE, byte 3 the offset for all other instructions). + */ + private final int off; + + /** + * The length of the internal name of this Java type. + */ + private final int len; + + // ------------------------------------------------------------------------ + // Constructors + // ------------------------------------------------------------------------ + + /** + * Constructs a reference type. + * + * @param sort + * the sort of the reference type to be constructed. + * @param buf + * a buffer containing the descriptor of the previous type. + * @param off + * the offset of this descriptor in the previous buffer. + * @param len + * the length of this descriptor. + */ + private Type(final int sort, final char[] buf, final int off, final int len) { + this.sort = sort; + this.buf = buf; + this.off = off; + this.len = len; + } + + /** + * Returns the Java type corresponding to the given type descriptor. + * + * @param typeDescriptor + * a field or method type descriptor. + * @return the Java type corresponding to the given type descriptor. + */ + public static Type getType(final String typeDescriptor) { + return getType(typeDescriptor.toCharArray(), 0); + } + + /** + * Returns the Java type corresponding to the given internal name. + * + * @param internalName + * an internal name. + * @return the Java type corresponding to the given internal name. + */ + public static Type getObjectType(final String internalName) { + char[] buf = internalName.toCharArray(); + return new Type(buf[0] == '[' ? ARRAY : OBJECT, buf, 0, buf.length); + } + + /** + * Returns the Java type corresponding to the given method descriptor. + * Equivalent to Type.getType(methodDescriptor). + * + * @param methodDescriptor + * a method descriptor. + * @return the Java type corresponding to the given method descriptor. + */ + public static Type getMethodType(final String methodDescriptor) { + return getType(methodDescriptor.toCharArray(), 0); + } + + /** + * Returns the Java method type corresponding to the given argument and + * return types. + * + * @param returnType + * the return type of the method. + * @param argumentTypes + * the argument types of the method. + * @return the Java type corresponding to the given argument and return + * types. + */ + public static Type getMethodType(final Type returnType, + final Type... argumentTypes) { + return getType(getMethodDescriptor(returnType, argumentTypes)); + } + + /** + * Returns the Java type corresponding to the given class. + * + * @param c + * a class. + * @return the Java type corresponding to the given class. + */ + public static Type getType(final Class c) { + if (c.isPrimitive()) { + if (c == Integer.TYPE) { + return INT_TYPE; + } else if (c == Void.TYPE) { + return VOID_TYPE; + } else if (c == Boolean.TYPE) { + return BOOLEAN_TYPE; + } else if (c == Byte.TYPE) { + return BYTE_TYPE; + } else if (c == Character.TYPE) { + return CHAR_TYPE; + } else if (c == Short.TYPE) { + return SHORT_TYPE; + } else if (c == Double.TYPE) { + return DOUBLE_TYPE; + } else if (c == Float.TYPE) { + return FLOAT_TYPE; + } else /* if (c == Long.TYPE) */{ + return LONG_TYPE; + } + } else { + return getType(getDescriptor(c)); + } + } + + /** + * Returns the Java method type corresponding to the given constructor. + * + * @param c + * a {@link Constructor Constructor} object. + * @return the Java method type corresponding to the given constructor. + */ + public static Type getType(final Constructor c) { + return getType(getConstructorDescriptor(c)); + } + + /** + * Returns the Java method type corresponding to the given method. + * + * @param m + * a {@link Method Method} object. + * @return the Java method type corresponding to the given method. + */ + public static Type getType(final Method m) { + return getType(getMethodDescriptor(m)); + } + + /** + * Returns the Java types corresponding to the argument types of the given + * method descriptor. + * + * @param methodDescriptor + * a method descriptor. + * @return the Java types corresponding to the argument types of the given + * method descriptor. + */ + public static Type[] getArgumentTypes(final String methodDescriptor) { + char[] buf = methodDescriptor.toCharArray(); + int off = 1; + int size = 0; + while (true) { + char car = buf[off++]; + if (car == ')') { + break; + } else if (car == 'L') { + while (buf[off++] != ';') { + } + ++size; + } else if (car != '[') { + ++size; + } + } + Type[] args = new Type[size]; + off = 1; + size = 0; + while (buf[off] != ')') { + args[size] = getType(buf, off); + off += args[size].len + (args[size].sort == OBJECT ? 2 : 0); + size += 1; + } + return args; + } + + /** + * Returns the Java types corresponding to the argument types of the given + * method. + * + * @param method + * a method. + * @return the Java types corresponding to the argument types of the given + * method. + */ + public static Type[] getArgumentTypes(final Method method) { + Class[] classes = method.getParameterTypes(); + Type[] types = new Type[classes.length]; + for (int i = classes.length - 1; i >= 0; --i) { + types[i] = getType(classes[i]); + } + return types; + } + + /** + * Returns the Java type corresponding to the return type of the given + * method descriptor. + * + * @param methodDescriptor + * a method descriptor. + * @return the Java type corresponding to the return type of the given + * method descriptor. + */ + public static Type getReturnType(final String methodDescriptor) { + char[] buf = methodDescriptor.toCharArray(); + return getType(buf, methodDescriptor.indexOf(')') + 1); + } + + /** + * Returns the Java type corresponding to the return type of the given + * method. + * + * @param method + * a method. + * @return the Java type corresponding to the return type of the given + * method. + */ + public static Type getReturnType(final Method method) { + return getType(method.getReturnType()); + } + + /** + * Computes the size of the arguments and of the return value of a method. + * + * @param desc + * the descriptor of a method. + * @return the size of the arguments of the method (plus one for the + * implicit this argument), argSize, and the size of its return + * value, retSize, packed into a single int i = + * (argSize << 2) | retSize (argSize is therefore equal to + * i >> 2, and retSize to i & 0x03). + */ + public static int getArgumentsAndReturnSizes(final String desc) { + int n = 1; + int c = 1; + while (true) { + char car = desc.charAt(c++); + if (car == ')') { + car = desc.charAt(c); + return n << 2 + | (car == 'V' ? 0 : (car == 'D' || car == 'J' ? 2 : 1)); + } else if (car == 'L') { + while (desc.charAt(c++) != ';') { + } + n += 1; + } else if (car == '[') { + while ((car = desc.charAt(c)) == '[') { + ++c; + } + if (car == 'D' || car == 'J') { + n -= 1; + } + } else if (car == 'D' || car == 'J') { + n += 2; + } else { + n += 1; + } + } + } + + /** + * Returns the Java type corresponding to the given type descriptor. For + * method descriptors, buf is supposed to contain nothing more than the + * descriptor itself. + * + * @param buf + * a buffer containing a type descriptor. + * @param off + * the offset of this descriptor in the previous buffer. + * @return the Java type corresponding to the given type descriptor. + */ + private static Type getType(final char[] buf, final int off) { + int len; + switch (buf[off]) { + case 'V': + return VOID_TYPE; + case 'Z': + return BOOLEAN_TYPE; + case 'C': + return CHAR_TYPE; + case 'B': + return BYTE_TYPE; + case 'S': + return SHORT_TYPE; + case 'I': + return INT_TYPE; + case 'F': + return FLOAT_TYPE; + case 'J': + return LONG_TYPE; + case 'D': + return DOUBLE_TYPE; + case '[': + len = 1; + while (buf[off + len] == '[') { + ++len; + } + if (buf[off + len] == 'L') { + ++len; + while (buf[off + len] != ';') { + ++len; + } + } + return new Type(ARRAY, buf, off, len + 1); + case 'L': + len = 1; + while (buf[off + len] != ';') { + ++len; + } + return new Type(OBJECT, buf, off + 1, len - 1); + // case '(': + default: + return new Type(METHOD, buf, off, buf.length - off); + } + } + + // ------------------------------------------------------------------------ + // Accessors + // ------------------------------------------------------------------------ + + /** + * Returns the sort of this Java type. + * + * @return {@link #VOID VOID}, {@link #BOOLEAN BOOLEAN}, {@link #CHAR CHAR}, + * {@link #BYTE BYTE}, {@link #SHORT SHORT}, {@link #INT INT}, + * {@link #FLOAT FLOAT}, {@link #LONG LONG}, {@link #DOUBLE DOUBLE}, + * {@link #ARRAY ARRAY}, {@link #OBJECT OBJECT} or {@link #METHOD + * METHOD}. + */ + public int getSort() { + return sort; + } + + /** + * Returns the number of dimensions of this array type. This method should + * only be used for an array type. + * + * @return the number of dimensions of this array type. + */ + public int getDimensions() { + int i = 1; + while (buf[off + i] == '[') { + ++i; + } + return i; + } + + /** + * Returns the type of the elements of this array type. This method should + * only be used for an array type. + * + * @return Returns the type of the elements of this array type. + */ + public Type getElementType() { + return getType(buf, off + getDimensions()); + } + + /** + * Returns the binary name of the class corresponding to this type. This + * method must not be used on method types. + * + * @return the binary name of the class corresponding to this type. + */ + public String getClassName() { + switch (sort) { + case VOID: + return "void"; + case BOOLEAN: + return "boolean"; + case CHAR: + return "char"; + case BYTE: + return "byte"; + case SHORT: + return "short"; + case INT: + return "int"; + case FLOAT: + return "float"; + case LONG: + return "long"; + case DOUBLE: + return "double"; + case ARRAY: + StringBuilder sb = new StringBuilder(getElementType().getClassName()); + for (int i = getDimensions(); i > 0; --i) { + sb.append("[]"); + } + return sb.toString(); + case OBJECT: + return new String(buf, off, len).replace('/', '.'); + default: + return null; + } + } + + /** + * Returns the internal name of the class corresponding to this object or + * array type. The internal name of a class is its fully qualified name (as + * returned by Class.getName(), where '.' are replaced by '/'. This method + * should only be used for an object or array type. + * + * @return the internal name of the class corresponding to this object type. + */ + public String getInternalName() { + return new String(buf, off, len); + } + + /** + * Returns the argument types of methods of this type. This method should + * only be used for method types. + * + * @return the argument types of methods of this type. + */ + public Type[] getArgumentTypes() { + return getArgumentTypes(getDescriptor()); + } + + /** + * Returns the return type of methods of this type. This method should only + * be used for method types. + * + * @return the return type of methods of this type. + */ + public Type getReturnType() { + return getReturnType(getDescriptor()); + } + + /** + * Returns the size of the arguments and of the return value of methods of + * this type. This method should only be used for method types. + * + * @return the size of the arguments (plus one for the implicit this + * argument), argSize, and the size of the return value, retSize, + * packed into a single + * int i = (argSize << 2) | retSize + * (argSize is therefore equal to i >> 2, + * and retSize to i & 0x03). + */ + public int getArgumentsAndReturnSizes() { + return getArgumentsAndReturnSizes(getDescriptor()); + } + + // ------------------------------------------------------------------------ + // Conversion to type descriptors + // ------------------------------------------------------------------------ + + /** + * Returns the descriptor corresponding to this Java type. + * + * @return the descriptor corresponding to this Java type. + */ + public String getDescriptor() { + StringBuffer buf = new StringBuffer(); + getDescriptor(buf); + return buf.toString(); + } + + /** + * Returns the descriptor corresponding to the given argument and return + * types. + * + * @param returnType + * the return type of the method. + * @param argumentTypes + * the argument types of the method. + * @return the descriptor corresponding to the given argument and return + * types. + */ + public static String getMethodDescriptor(final Type returnType, + final Type... argumentTypes) { + StringBuffer buf = new StringBuffer(); + buf.append('('); + for (int i = 0; i < argumentTypes.length; ++i) { + argumentTypes[i].getDescriptor(buf); + } + buf.append(')'); + returnType.getDescriptor(buf); + return buf.toString(); + } + + /** + * Appends the descriptor corresponding to this Java type to the given + * string buffer. + * + * @param buf + * the string buffer to which the descriptor must be appended. + */ + private void getDescriptor(final StringBuffer buf) { + if (this.buf == null) { + // descriptor is in byte 3 of 'off' for primitive types (buf == + // null) + buf.append((char) ((off & 0xFF000000) >>> 24)); + } else if (sort == OBJECT) { + buf.append('L'); + buf.append(this.buf, off, len); + buf.append(';'); + } else { // sort == ARRAY || sort == METHOD + buf.append(this.buf, off, len); + } + } + + // ------------------------------------------------------------------------ + // Direct conversion from classes to type descriptors, + // without intermediate Type objects + // ------------------------------------------------------------------------ + + /** + * Returns the internal name of the given class. The internal name of a + * class is its fully qualified name, as returned by Class.getName(), where + * '.' are replaced by '/'. + * + * @param c + * an object or array class. + * @return the internal name of the given class. + */ + public static String getInternalName(final Class c) { + return c.getName().replace('.', '/'); + } + + /** + * Returns the descriptor corresponding to the given Java type. + * + * @param c + * an object class, a primitive class or an array class. + * @return the descriptor corresponding to the given class. + */ + public static String getDescriptor(final Class c) { + StringBuffer buf = new StringBuffer(); + getDescriptor(buf, c); + return buf.toString(); + } + + /** + * Returns the descriptor corresponding to the given constructor. + * + * @param c + * a {@link Constructor Constructor} object. + * @return the descriptor of the given constructor. + */ + public static String getConstructorDescriptor(final Constructor c) { + Class[] parameters = c.getParameterTypes(); + StringBuffer buf = new StringBuffer(); + buf.append('('); + for (int i = 0; i < parameters.length; ++i) { + getDescriptor(buf, parameters[i]); + } + return buf.append(")V").toString(); + } + + /** + * Returns the descriptor corresponding to the given method. + * + * @param m + * a {@link Method Method} object. + * @return the descriptor of the given method. + */ + public static String getMethodDescriptor(final Method m) { + Class[] parameters = m.getParameterTypes(); + StringBuffer buf = new StringBuffer(); + buf.append('('); + for (int i = 0; i < parameters.length; ++i) { + getDescriptor(buf, parameters[i]); + } + buf.append(')'); + getDescriptor(buf, m.getReturnType()); + return buf.toString(); + } + + /** + * Appends the descriptor of the given class to the given string buffer. + * + * @param buf + * the string buffer to which the descriptor must be appended. + * @param c + * the class whose descriptor must be computed. + */ + private static void getDescriptor(final StringBuffer buf, final Class c) { + Class d = c; + while (true) { + if (d.isPrimitive()) { + char car; + if (d == Integer.TYPE) { + car = 'I'; + } else if (d == Void.TYPE) { + car = 'V'; + } else if (d == Boolean.TYPE) { + car = 'Z'; + } else if (d == Byte.TYPE) { + car = 'B'; + } else if (d == Character.TYPE) { + car = 'C'; + } else if (d == Short.TYPE) { + car = 'S'; + } else if (d == Double.TYPE) { + car = 'D'; + } else if (d == Float.TYPE) { + car = 'F'; + } else /* if (d == Long.TYPE) */{ + car = 'J'; + } + buf.append(car); + return; + } else if (d.isArray()) { + buf.append('['); + d = d.getComponentType(); + } else { + buf.append('L'); + String name = d.getName(); + int len = name.length(); + for (int i = 0; i < len; ++i) { + char car = name.charAt(i); + buf.append(car == '.' ? '/' : car); + } + buf.append(';'); + return; + } + } + } + + // ------------------------------------------------------------------------ + // Corresponding size and opcodes + // ------------------------------------------------------------------------ + + /** + * Returns the size of values of this type. This method must not be used for + * method types. + * + * @return the size of values of this type, i.e., 2 for long and + * double, 0 for void and 1 otherwise. + */ + public int getSize() { + // the size is in byte 0 of 'off' for primitive types (buf == null) + return buf == null ? (off & 0xFF) : 1; + } + + /** + * Returns a JVM instruction opcode adapted to this Java type. This method + * must not be used for method types. + * + * @param opcode + * a JVM instruction opcode. This opcode must be one of ILOAD, + * ISTORE, IALOAD, IASTORE, IADD, ISUB, IMUL, IDIV, IREM, INEG, + * ISHL, ISHR, IUSHR, IAND, IOR, IXOR and IRETURN. + * @return an opcode that is similar to the given opcode, but adapted to + * this Java type. For example, if this type is float and + * opcode is IRETURN, this method returns FRETURN. + */ + public int getOpcode(final int opcode) { + if (opcode == Opcodes.IALOAD || opcode == Opcodes.IASTORE) { + // the offset for IALOAD or IASTORE is in byte 1 of 'off' for + // primitive types (buf == null) + return opcode + (buf == null ? (off & 0xFF00) >> 8 : 4); + } else { + // the offset for other instructions is in byte 2 of 'off' for + // primitive types (buf == null) + return opcode + (buf == null ? (off & 0xFF0000) >> 16 : 4); + } + } + + // ------------------------------------------------------------------------ + // Equals, hashCode and toString + // ------------------------------------------------------------------------ + + /** + * Tests if the given object is equal to this type. + * + * @param o + * the object to be compared to this type. + * @return true if the given object is equal to this type. + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Type)) { + return false; + } + Type t = (Type) o; + if (sort != t.sort) { + return false; + } + if (sort >= ARRAY) { + if (len != t.len) { + return false; + } + for (int i = off, j = t.off, end = i + len; i < end; i++, j++) { + if (buf[i] != t.buf[j]) { + return false; + } + } + } + return true; + } + + /** + * Returns a hash code value for this type. + * + * @return a hash code value for this type. + */ + @Override + public int hashCode() { + int hc = 13 * sort; + if (sort >= ARRAY) { + for (int i = off, end = i + len; i < end; i++) { + hc = 17 * (hc + buf[i]); + } + } + return hc; + } + + /** + * Returns a string representation of this type. + * + * @return the descriptor of this type. + */ + @Override + public String toString() { + return getDescriptor(); + } +} diff --git a/src/com/sleepycat/asm/TypePath.java b/src/com/sleepycat/asm/TypePath.java new file mode 100644 index 0000000..1789acd --- /dev/null +++ b/src/com/sleepycat/asm/TypePath.java @@ -0,0 +1,193 @@ +/*** + * ASM: a very small and fast Java bytecode manipulation framework + * Copyright (c) 2000-2013 INRIA, France Telecom + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +package com.sleepycat.asm; + +/** + * The path to a type argument, wildcard bound, array element type, or static + * inner type within an enclosing type. + * + * @author Eric Bruneton + */ +public class TypePath { + + /** + * A type path step that steps into the element type of an array type. See + * {@link #getStep getStep}. + */ + public final static int ARRAY_ELEMENT = 0; + + /** + * A type path step that steps into the nested type of a class type. See + * {@link #getStep getStep}. + */ + public final static int INNER_TYPE = 1; + + /** + * A type path step that steps into the bound of a wildcard type. See + * {@link #getStep getStep}. + */ + public final static int WILDCARD_BOUND = 2; + + /** + * A type path step that steps into a type argument of a generic type. See + * {@link #getStep getStep}. + */ + public final static int TYPE_ARGUMENT = 3; + + /** + * The byte array where the path is stored, in Java class file format. + */ + byte[] b; + + /** + * The offset of the first byte of the type path in 'b'. + */ + int offset; + + /** + * Creates a new type path. + * + * @param b + * the byte array containing the type path in Java class file + * format. + * @param offset + * the offset of the first byte of the type path in 'b'. + */ + TypePath(byte[] b, int offset) { + this.b = b; + this.offset = offset; + } + + /** + * Returns the length of this path. + * + * @return the length of this path. + */ + public int getLength() { + return b[offset]; + } + + /** + * Returns the value of the given step of this path. + * + * @param index + * an index between 0 and {@link #getLength()}, exclusive. + * @return {@link #ARRAY_ELEMENT ARRAY_ELEMENT}, {@link #INNER_TYPE + * INNER_TYPE}, {@link #WILDCARD_BOUND WILDCARD_BOUND}, or + * {@link #TYPE_ARGUMENT TYPE_ARGUMENT}. + */ + public int getStep(int index) { + return b[offset + 2 * index + 1]; + } + + /** + * Returns the index of the type argument that the given step is stepping + * into. This method should only be used for steps whose value is + * {@link #TYPE_ARGUMENT TYPE_ARGUMENT}. + * + * @param index + * an index between 0 and {@link #getLength()}, exclusive. + * @return the index of the type argument that the given step is stepping + * into. + */ + public int getStepArgument(int index) { + return b[offset + 2 * index + 2]; + } + + /** + * Converts a type path in string form, in the format used by + * {@link #toString()}, into a TypePath object. + * + * @param typePath + * a type path in string form, in the format used by + * {@link #toString()}. May be null or empty. + * @return the corresponding TypePath object, or null if the path is empty. + */ + public static TypePath fromString(final String typePath) { + if (typePath == null || typePath.length() == 0) { + return null; + } + int n = typePath.length(); + ByteVector out = new ByteVector(n); + out.putByte(0); + for (int i = 0; i < n;) { + char c = typePath.charAt(i++); + if (c == '[') { + out.put11(ARRAY_ELEMENT, 0); + } else if (c == '.') { + out.put11(INNER_TYPE, 0); + } else if (c == '*') { + out.put11(WILDCARD_BOUND, 0); + } else if (c >= '0' && c <= '9') { + int typeArg = c - '0'; + while (i < n && (c = typePath.charAt(i)) >= '0' && c <= '9') { + typeArg = typeArg * 10 + c - '0'; + i += 1; + } + out.put11(TYPE_ARGUMENT, typeArg); + } + } + out.data[0] = (byte) (out.length / 2); + return new TypePath(out.data, 0); + } + + /** + * Returns a string representation of this type path. {@link #ARRAY_ELEMENT + * ARRAY_ELEMENT} steps are represented with '[', {@link #INNER_TYPE + * INNER_TYPE} steps with '.', {@link #WILDCARD_BOUND WILDCARD_BOUND} steps + * with '*' and {@link #TYPE_ARGUMENT TYPE_ARGUMENT} steps with their type + * argument index in decimal form. + */ + @Override + public String toString() { + int length = getLength(); + StringBuilder result = new StringBuilder(length * 2); + for (int i = 0; i < length; ++i) { + switch (getStep(i)) { + case ARRAY_ELEMENT: + result.append('['); + break; + case INNER_TYPE: + result.append('.'); + break; + case WILDCARD_BOUND: + result.append('*'); + break; + case TYPE_ARGUMENT: + result.append(getStepArgument(i)); + break; + default: + result.append('_'); + } + } + return result.toString(); + } +} diff --git a/src/com/sleepycat/asm/package-info.java b/src/com/sleepycat/asm/package-info.java new file mode 100644 index 0000000..3621fae --- /dev/null +++ b/src/com/sleepycat/asm/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: ASM library classes (copy of 3rd party sources) used by {@link + * com.sleepycat.persist DPL} for bytecode generation. + */ +package com.sleepycat.asm; diff --git a/src/com/sleepycat/bind/ByteArrayBinding.java b/src/com/sleepycat/bind/ByteArrayBinding.java new file mode 100644 index 0000000..a3b1f2e --- /dev/null +++ b/src/com/sleepycat/bind/ByteArrayBinding.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A pass-through EntryBinding that uses the entry's byte array as + * the key or data object. + * + * @author Mark Hayes + */ +public class ByteArrayBinding implements EntryBinding { + + /* + * We can return the same byte[] for 0 length arrays. + */ + private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; + + /** + * Creates a byte array binding. + */ + public ByteArrayBinding() { + } + + // javadoc is inherited + public byte[] entryToObject(DatabaseEntry entry) { + + int len = entry.getSize(); + if (len == 0) { + return ZERO_LENGTH_BYTE_ARRAY; + } else { + byte[] bytes = new byte[len]; + System.arraycopy(entry.getData(), entry.getOffset(), + bytes, 0, bytes.length); + return bytes; + } + } + + // javadoc is inherited + public void objectToEntry(byte[] object, DatabaseEntry entry) { + + entry.setData(object, 0, object.length); + } +} diff --git a/src/com/sleepycat/bind/EntityBinding.java b/src/com/sleepycat/bind/EntityBinding.java new file mode 100644 index 0000000..cf06af2 --- /dev/null +++ b/src/com/sleepycat/bind/EntityBinding.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A binding between a key-value entry pair and an entity object. + * + *

    WARNING: Binding instances are typically shared by multiple + * threads and binding methods are called without any special synchronization. + * Therefore, bindings must be thread safe. In general no shared state should + * be used and any caching of computed values must be done with proper + * synchronization.

    + * + * @author Mark Hayes + */ +public interface EntityBinding { + + /** + * Converts key and data entry buffers into an entity Object. + * + * @param key is the source key entry. + * + * @param data is the source data entry. + * + * @return the resulting Object. + */ + E entryToObject(DatabaseEntry key, DatabaseEntry data); + + /** + * Extracts the key entry from an entity Object. + * + * @param object is the source Object. + * + * @param key is the destination entry buffer. + */ + void objectToKey(E object, DatabaseEntry key); + + /** + * Extracts the data entry from an entity Object. + * + * @param object is the source Object. + * + * @param data is the destination entry buffer. + */ + void objectToData(E object, DatabaseEntry data); +} diff --git a/src/com/sleepycat/bind/EntryBinding.java b/src/com/sleepycat/bind/EntryBinding.java new file mode 100644 index 0000000..4d6dcbb --- /dev/null +++ b/src/com/sleepycat/bind/EntryBinding.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A binding between a key or data entry and a key or data object. + * + *

    WARNING: Binding instances are typically shared by multiple + * threads and binding methods are called without any special synchronization. + * Therefore, bindings must be thread safe. In general no shared state should + * be used and any caching of computed values must be done with proper + * synchronization.

    + * + * @author Mark Hayes + */ +public interface EntryBinding { + + /** + * Converts a entry buffer into an Object. + * + * @param entry is the source entry buffer. + * + * @return the resulting Object. + */ + E entryToObject(DatabaseEntry entry); + + /** + * Converts an Object into a entry buffer. + * + * @param object is the source Object. + * + * @param entry is the destination entry buffer. + */ + void objectToEntry(E object, DatabaseEntry entry); +} diff --git a/src/com/sleepycat/bind/RecordNumberBinding.java b/src/com/sleepycat/bind/RecordNumberBinding.java new file mode 100644 index 0000000..a4f4a8f --- /dev/null +++ b/src/com/sleepycat/bind/RecordNumberBinding.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; + +/** + * + * @hidden + * + * An EntryBinding that treats a record number key entry as a + * Long key object. + * + *

    Record numbers are returned as Long objects, although on + * input any Number object may be used.

    + * + * @author Mark Hayes + */ +public class RecordNumberBinding implements EntryBinding { + + /** + * Creates a byte array binding. + */ + public RecordNumberBinding() { + } + + // javadoc is inherited + public Long entryToObject(DatabaseEntry entry) { + + return Long.valueOf(entryToRecordNumber(entry)); + } + + // javadoc is inherited + public void objectToEntry(Object object, DatabaseEntry entry) { + + recordNumberToEntry(((Number) object).longValue(), entry); + } + + /** + * Utility method for use by bindings to translate a entry buffer to an + * record number integer. + * + * @param entry the entry buffer. + * + * @return the record number. + */ + public static long entryToRecordNumber(DatabaseEntry entry) { + + return DbCompat.getRecordNumber(entry) & 0xFFFFFFFFL; + } + + /** + * Utility method for use by bindings to translate a record number integer + * to a entry buffer. + * + * @param recordNumber the record number. + * + * @param entry the entry buffer to hold the record number. + */ + public static void recordNumberToEntry(long recordNumber, + DatabaseEntry entry) { + entry.setData(new byte[4], 0, 4); + DbCompat.setRecordNumber(entry, (int) recordNumber); + } +} diff --git a/src/com/sleepycat/bind/package.html b/src/com/sleepycat/bind/package.html new file mode 100644 index 0000000..30a3100 --- /dev/null +++ b/src/com/sleepycat/bind/package.html @@ -0,0 +1,9 @@ + + +Bindings between database entries and Java objects. + +@see [Getting Started Guide] + + + diff --git a/src/com/sleepycat/bind/serial/ClassCatalog.java b/src/com/sleepycat/bind/serial/ClassCatalog.java new file mode 100644 index 0000000..644bb60 --- /dev/null +++ b/src/com/sleepycat/bind/serial/ClassCatalog.java @@ -0,0 +1,111 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import java.io.Closeable; +import java.io.ObjectStreamClass; + +import com.sleepycat.je.DatabaseException; + +/** + * A catalog of class description information for use during object + * serialization. + * + *

    A catalog is used to store class descriptions separately from serialized + * objects, to avoid redundantly stored information with each object. + * When serialized objects are stored in a database, a {@link + * StoredClassCatalog} should be used.

    + * + *

    This information is used for serialization of class descriptors or + * java.io.ObjectStreamClass objects, each of which represents a unique class + * format. For each unique format, a unique class ID is assigned by the + * catalog. The class ID can then be used in the serialization stream in place + * of the full class information. When used with {@link SerialInput} and + * {@link SerialOutput} or any of the serial bindings, the use of the catalog + * is transparent to the application.

    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public interface ClassCatalog + /* */ + extends Closeable + /* */ + { + + /** + * Close a catalog database and release any cached resources. + * + * @throws DatabaseException if an error occurs closing the catalog + * database. + */ + public void close() + throws DatabaseException; + + /** + * Return the class ID for the current version of the given class + * description. + * This is used for storing in serialization streams in place of a full + * class descriptor, since it is much more compact. To get back the + * ObjectStreamClass for a class ID, call {@link #getClassFormat(byte[])}. + * This function causes a new class ID to be assigned if the class + * description has changed. + * + * @param classDesc The class description for which to return the + * class ID. + * + * @return The class ID for the current version of the class. + * + * @throws DatabaseException if an error occurs accessing the catalog + * database. + * + * @throws ClassNotFoundException if the class does not exist. + */ + public byte[] getClassID(ObjectStreamClass classDesc) + throws DatabaseException, ClassNotFoundException; + + /** + * Return the ObjectStreamClass for the given class ID. This may or may + * not be the current class format, depending on whether the class has + * changed since the class ID was generated. + * + * @param classID The class ID for which to return the class format. + * + * @return The class format for the given class ID, which may or may not + * represent the current version of the class. + * + * @throws DatabaseException if an error occurs accessing the catalog + * database. + * + * @throws ClassNotFoundException if the class does not exist. + */ + public ObjectStreamClass getClassFormat(byte[] classID) + throws DatabaseException, ClassNotFoundException; + + /** + * Returns the ClassLoader to be used by bindings that use this catalog, or + * null if a default class loader should be used. The ClassLoader is used + * by {@link SerialBinding} to load classes whose description is stored in + * the catalog. + * + *

    In BDB JE, the implementation of this method in {@link + * StoredClassCatalog} returns the ClassLoader property of the catalog + * database Environment. This ensures that the Environment's ClassLoader + * property is used for loading all user-supplied classes.

    + * + * @return the ClassLoader or null. + */ + public ClassLoader getClassLoader(); +} diff --git a/src/com/sleepycat/bind/serial/SerialBase.java b/src/com/sleepycat/bind/serial/SerialBase.java new file mode 100644 index 0000000..cf3c093 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialBase.java @@ -0,0 +1,101 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.util.FastOutputStream; + +/** + * A base class for serial bindings creators that provides control over the + * allocation of the output buffer. + * + *

    Serial bindings append data to a {@link FastOutputStream} instance. This + * object has a byte array buffer that is resized when it is full. The + * reallocation of this buffer can be a performance factor for some + * applications using large objects. To manage this issue, the {@link + * #setSerialBufferSize} method may be used to control the initial size of the + * buffer, and the {@link #getSerialOutput} method may be overridden by + * subclasses to take over creation of the FastOutputStream object.

    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public class SerialBase { + + private int outputBufferSize; + + /** + * Initializes the initial output buffer size to zero. + * + *

    Unless {@link #setSerialBufferSize} is called, the default {@link + * FastOutputStream#DEFAULT_INIT_SIZE} size will be used.

    + */ + public SerialBase() { + outputBufferSize = 0; + } + + /** + * Sets the initial byte size of the output buffer that is allocated by the + * default implementation of {@link #getSerialOutput}. + * + *

    If this property is zero (the default), the default {@link + * FastOutputStream#DEFAULT_INIT_SIZE} size is used.

    + * + * @param byteSize the initial byte size of the output buffer, or zero to + * use the default size. + */ + public void setSerialBufferSize(int byteSize) { + outputBufferSize = byteSize; + } + + /** + * Returns the initial byte size of the output buffer. + * + * @return the initial byte size of the output buffer. + * + * @see #setSerialBufferSize + */ + public int getSerialBufferSize() { + return outputBufferSize; + } + + /** + * Returns an empty SerialOutput instance that will be used by the serial + * binding or key creator. + * + *

    The default implementation of this method creates a new SerialOutput + * with an initial buffer size that can be changed using the {@link + * #setSerialBufferSize} method.

    + * + *

    This method may be overridden to return a FastOutputStream instance. + * For example, an instance per thread could be created and returned by + * this method. If a FastOutputStream instance is reused, be sure to call + * its {@link FastOutputStream#reset} method before each use.

    + * + * @param object is the object to be written to the serial output, and may + * be used by subclasses to determine the size of the output buffer. + * + * @return an empty FastOutputStream instance. + * + * @see #setSerialBufferSize + */ + protected FastOutputStream getSerialOutput(Object object) { + int byteSize = getSerialBufferSize(); + if (byteSize != 0) { + return new FastOutputStream(byteSize); + } else { + return new FastOutputStream(); + } + } +} diff --git a/src/com/sleepycat/bind/serial/SerialBinding.java b/src/com/sleepycat/bind/serial/SerialBinding.java new file mode 100644 index 0000000..1f2cc65 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialBinding.java @@ -0,0 +1,196 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import java.io.IOException; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete EntryBinding that treats a key or data entry as + * a serialized object. + * + *

    This binding stores objects in serialized object format. The + * deserialized objects are returned by the binding, and their + * Class must implement the Serializable + * interface.

    + * + *

    For key bindings, a tuple binding is usually a better choice than a + * serial binding. A tuple binding gives a reasonable sort order, and works + * with comparators in all cases -- see below.

    + * + *

    WARNING: SerialBinding should not be used with Berkeley DB Java + * Edition for key bindings, when a custom comparator is used. In JE, + * comparators are instantiated and called internally at times when databases + * are not accessible. Because serial bindings depend on the class catalog + * database, a serial binding cannot be used during these times. An attempt + * to use a serial binding with a custom comparator will result in a + * NullPointerException during environment open or close.

    + * + *

    Class Evolution

    + * + *

    {@code SerialBinding} and other classes in this package use standard Java + * serialization and all rules of Java serialization apply. This includes the + * rules for class evolution. Once an instance of a class is stored, the class + * must maintain its {@code serialVersionUID} and follow the rules defined in + * the Java specification. To use a new incompatible version of a class, a + * different {@link ClassCatalog} must be used or the class catalog database + * must be truncated.

    + * + *

    If more advanced class evolution features are required, consider using + * the {@link com.sleepycat.persist.evolve Direct Persistence Layer}.

    + * + * @author Mark Hayes + */ +public class SerialBinding extends SerialBase implements EntryBinding { + + private ClassCatalog classCatalog; + private Class baseClass; + + /** + * Creates a serial binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class for serialized objects stored using + * this binding -- all objects using this binding must be an instance of + * this class. Note that if this parameter is non-null, then this binding + * will not support serialization of null values. + */ + public SerialBinding(ClassCatalog classCatalog, Class baseClass) { + + if (classCatalog == null) { + throw new NullPointerException("classCatalog must be non-null"); + } + this.classCatalog = classCatalog; + this.baseClass = baseClass; + } + + /** + * Returns the base class for this binding. + * + * @return the base class for this binding. + */ + public final Class getBaseClass() { + + return baseClass; + } + + /** + * Returns the class loader to be used during deserialization, or null if a + * default class loader should be used. The default implementation of this + * method returns {@link ClassCatalog#getClassLoader()}, if it returns a + * non-null value. If {@link ClassCatalog#getClassLoader()} returns null, + * then Thread.currentThread().getContextClassLoader() is + * returned. + * + *

    This method may be overridden to return a dynamically determined + * class loader. For example, getBaseClass().getClassLoader() + * could be called to use the class loader for the base class, assuming + * that a base class has been specified.

    + * + *

    If this method returns null, a default class loader will be used as + * determined by the java.io.ObjectInputStream.resolveClass + * method.

    + * + * @return the ClassLoader or null. + */ + public ClassLoader getClassLoader() { + + final ClassLoader loader = classCatalog.getClassLoader(); + if (loader != null) { + return loader; + } + return Thread.currentThread().getContextClassLoader(); + } + + /** + * Deserialize an object from an entry buffer. May only be called for data + * that was serialized using {@link #objectToEntry}, since the fixed + * serialization header is assumed to not be included in the input data. + * {@link SerialInput} is used to deserialize the object. + * + * @param entry is the input serialized entry. + * + * @return the output deserialized object. + */ + public E entryToObject(DatabaseEntry entry) { + + int length = entry.getSize(); + byte[] hdr = SerialOutput.getStreamHeader(); + byte[] bufWithHeader = new byte[length + hdr.length]; + + System.arraycopy(hdr, 0, bufWithHeader, 0, hdr.length); + System.arraycopy(entry.getData(), entry.getOffset(), + bufWithHeader, hdr.length, length); + + try { + SerialInput jin = new SerialInput( + new FastInputStream(bufWithHeader, 0, bufWithHeader.length), + classCatalog, + getClassLoader()); + return (E) jin.readObject(); + } catch (IOException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } catch (ClassNotFoundException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + + /** + * Serialize an object into an entry buffer. The fixed serialization + * header is not included in the output data to save space, and therefore + * to deserialize the data the complementary {@link #entryToObject} method + * must be used. {@link SerialOutput} is used to serialize the object. + * + *

    Note that this method sets the DatabaseEntry offset property to a + * non-zero value and the size property to a value less than the length of + * the byte array.

    + * + * @param object is the input deserialized object. + * + * @param entry is the output serialized entry. + * + * @throws IllegalArgumentException if the object is not an instance of the + * base class for this binding, including if the object is null and a + * non-null base class was specified. + */ + public void objectToEntry(E object, DatabaseEntry entry) { + + if (baseClass != null && !baseClass.isInstance(object)) { + throw new IllegalArgumentException + (((object != null) ? + ("Data object class (" + object.getClass() + ')') : + "Null value") + + " is not an instance of binding's base class (" + + baseClass + ')'); + } + FastOutputStream fo = getSerialOutput(object); + try { + SerialOutput jos = new SerialOutput(fo, classCatalog); + jos.writeObject(object); + } catch (IOException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + + byte[] hdr = SerialOutput.getStreamHeader(); + entry.setData(fo.getBufferBytes(), hdr.length, + fo.getBufferLength() - hdr.length); + } +} diff --git a/src/com/sleepycat/bind/serial/SerialInput.java b/src/com/sleepycat/bind/serial/SerialInput.java new file mode 100644 index 0000000..98efe40 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialInput.java @@ -0,0 +1,106 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectStreamClass; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.util.ClassResolver; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A specialized ObjectInputStream that gets class description + * information from a ClassCatalog. It is used by + * SerialBinding. + * + *

    This class is used instead of an {@link ObjectInputStream}, which it + * extends, to read an object stream written by the {@link SerialOutput} class. + * For reading objects from a database normally one of the serial binding + * classes is used. {@link SerialInput} is used when an {@link + * ObjectInputStream} is needed along with compact storage. A {@link + * ClassCatalog} must be supplied, however, to stored shared class + * descriptions.

    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public class SerialInput extends ClassResolver.Stream { + + private ClassCatalog classCatalog; + + /** + * Creates a serial input stream. + * + * @param in is the input stream from which compact serialized objects will + * be read. + * + * @param classCatalog is the catalog containing the class descriptions + * for the serialized objects. + * + * @throws IOException if an I/O error occurs while reading stream header. + */ + public SerialInput(InputStream in, ClassCatalog classCatalog) + throws IOException { + + this(in, classCatalog, null); + } + + /** + * Creates a serial input stream. + * + * @param in is the input stream from which compact serialized objects will + * be read. + * + * @param classCatalog is the catalog containing the class descriptions + * for the serialized objects. + * + * @param classLoader is the class loader to use, or null if a default + * class loader should be used. + * + * @throws IOException if an I/O error occurs while reading stream header. + */ + public SerialInput(InputStream in, + ClassCatalog classCatalog, + ClassLoader classLoader) + throws IOException { + + super(in, classLoader); + this.classCatalog = classCatalog; + } + + @Override + protected ObjectStreamClass readClassDescriptor() + throws IOException, ClassNotFoundException { + + try { + byte len = readByte(); + byte[] id = new byte[len]; + readFully(id); + + return classCatalog.getClassFormat(id); + } catch (DatabaseException e) { + + /* + * Do not throw IOException from here since ObjectOutputStream + * will write the exception to the stream, which causes another + * call here, etc. + */ + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } +} diff --git a/src/com/sleepycat/bind/serial/SerialOutput.java b/src/com/sleepycat/bind/serial/SerialOutput.java new file mode 100644 index 0000000..a6eed73 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialOutput.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.ObjectStreamConstants; +import java.io.OutputStream; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A specialized ObjectOutputStream that stores class description + * information in a ClassCatalog. It is used by + * SerialBinding. + * + *

    This class is used instead of an {@link ObjectOutputStream}, which it + * extends, to write a compact object stream. For writing objects to a + * database normally one of the serial binding classes is used. {@link + * SerialOutput} is used when an {@link ObjectOutputStream} is needed along + * with compact storage. A {@link ClassCatalog} must be supplied, however, to + * stored shared class descriptions.

    + * + *

    The {@link ClassCatalog} is used to store class definitions rather than + * embedding these into the stream. Instead, a class format identifier is + * embedded into the stream. This identifier is then used by {@link + * SerialInput} to load the class format to deserialize the object.

    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public class SerialOutput extends ObjectOutputStream { + + /* + * Serialization version constants. Instead of hardcoding these we get them + * by creating a SerialOutput, which itself guarantees that we'll always + * use a PROTOCOL_VERSION_2 header. + */ + private final static byte[] STREAM_HEADER; + static { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + new SerialOutput(baos, null); + } catch (IOException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + STREAM_HEADER = baos.toByteArray(); + } + + private ClassCatalog classCatalog; + + /** + * Creates a serial output stream. + * + * @param out is the output stream to which the compact serialized objects + * will be written. + * + * @param classCatalog is the catalog to which the class descriptions for + * the serialized objects will be written. + * + * @throws IOException if an I/O error occurs while writing stream header. + */ + public SerialOutput(OutputStream out, ClassCatalog classCatalog) + throws IOException { + + super(out); + this.classCatalog = classCatalog; + + /* guarantee that we'll always use the same serialization format */ + + useProtocolVersion(ObjectStreamConstants.PROTOCOL_VERSION_2); + } + + // javadoc is inherited + protected void writeClassDescriptor(ObjectStreamClass classdesc) + throws IOException { + + try { + byte[] id = classCatalog.getClassID(classdesc); + writeByte(id.length); + write(id); + } catch (DatabaseException e) { + + /* + * Do not throw IOException from here since ObjectOutputStream + * will write the exception to the stream, which causes another + * call here, etc. + */ + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } catch (ClassNotFoundException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + + /** + * Returns the fixed stream header used for all serialized streams in + * PROTOCOL_VERSION_2 format. To save space this header can be removed and + * serialized streams before storage and inserted before deserializing. + * {@link SerialOutput} always uses PROTOCOL_VERSION_2 serialization format + * to guarantee that this header is fixed. {@link SerialBinding} removes + * this header from serialized streams automatically. + * + * @return the fixed stream header. + */ + public static byte[] getStreamHeader() { + + return STREAM_HEADER; + } +} diff --git a/src/com/sleepycat/bind/serial/SerialSerialBinding.java b/src/com/sleepycat/bind/serial/SerialSerialBinding.java new file mode 100644 index 0000000..e4381e5 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialSerialBinding.java @@ -0,0 +1,123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry and + * data entry as serialized objects. + * + *

    This class takes care of serializing and deserializing the key and + * data entry automatically. Its three abstract methods must be implemented by + * a concrete subclass to convert the deserialized objects to/from an entity + * object.

    + *
      + *
    • {@link #entryToObject(Object,Object)}
    • + *
    • {@link #objectToKey(Object)}
    • + *
    • {@link #objectToData(Object)}
    • + *
    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public abstract class SerialSerialBinding implements EntityBinding { + + private SerialBinding keyBinding; + private SerialBinding dataBinding; + + /** + * Creates a serial-serial entity binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param keyClass is the key base class. + * + * @param dataClass is the data base class. + */ + public SerialSerialBinding(ClassCatalog classCatalog, + Class keyClass, + Class dataClass) { + + this(new SerialBinding(classCatalog, keyClass), + new SerialBinding(classCatalog, dataClass)); + } + + /** + * Creates a serial-serial entity binding. + * + * @param keyBinding is the key binding. + * + * @param dataBinding is the data binding. + */ + public SerialSerialBinding(SerialBinding keyBinding, + SerialBinding dataBinding) { + + this.keyBinding = keyBinding; + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public E entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(keyBinding.entryToObject(key), + dataBinding.entryToObject(data)); + } + + // javadoc is inherited + public void objectToKey(E object, DatabaseEntry key) { + + K keyObject = objectToKey(object); + keyBinding.objectToEntry(keyObject, key); + } + + // javadoc is inherited + public void objectToData(E object, DatabaseEntry data) { + + D dataObject = objectToData(object); + dataBinding.objectToEntry(dataObject, data); + } + + /** + * Constructs an entity object from deserialized key and data objects. + * + * @param keyInput is the deserialized key object. + * + * @param dataInput is the deserialized data object. + * + * @return the entity object constructed from the key and data. + */ + public abstract E entryToObject(K keyInput, D dataInput); + + /** + * Extracts a key object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized key object. + */ + public abstract K objectToKey(E object); + + /** + * Extracts a data object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized data object. + */ + public abstract D objectToData(E object); +} diff --git a/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java b/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java new file mode 100644 index 0000000..6786367 --- /dev/null +++ b/src/com/sleepycat/bind/serial/SerialSerialKeyCreator.java @@ -0,0 +1,152 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.ForeignKeyNullifier; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; + +/** + * A abstract key creator that uses a serial key and a serial data entry. + * This class takes care of serializing and deserializing the key and data + * entry automatically. + * The following abstract method must be implemented by a concrete subclass + * to create the index key using these objects + *
      + *
    • {@link #createSecondaryKey(Object,Object)}
    • + *
    + *

    If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was + * specified when opening the secondary database, the following method must be + * overridden to nullify the foreign index key. If NULLIFY was not specified, + * this method need not be overridden.

    + *
      + *
    • {@link #nullifyForeignKey(Object)}
    • + *
    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public abstract class SerialSerialKeyCreator + implements SecondaryKeyCreator, ForeignKeyNullifier { + + protected SerialBinding primaryKeyBinding; + protected SerialBinding dataBinding; + protected SerialBinding indexKeyBinding; + + /** + * Creates a serial-serial key creator. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param primaryKeyClass is the primary key base class. + * + * @param dataClass is the data base class. + * + * @param indexKeyClass is the index key base class. + */ + public SerialSerialKeyCreator(ClassCatalog classCatalog, + Class primaryKeyClass, + Class dataClass, + Class indexKeyClass) { + + this(new SerialBinding(classCatalog, primaryKeyClass), + new SerialBinding(classCatalog, dataClass), + new SerialBinding(classCatalog, indexKeyClass)); + } + + /** + * Creates a serial-serial entity binding. + * + * @param primaryKeyBinding is the primary key binding. + * + * @param dataBinding is the data binding. + * + * @param indexKeyBinding is the index key binding. + */ + public SerialSerialKeyCreator(SerialBinding primaryKeyBinding, + SerialBinding dataBinding, + SerialBinding indexKeyBinding) { + + this.primaryKeyBinding = primaryKeyBinding; + this.dataBinding = dataBinding; + this.indexKeyBinding = indexKeyBinding; + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) { + PK primaryKeyInput = + primaryKeyBinding.entryToObject(primaryKeyEntry); + D dataInput = dataBinding.entryToObject(dataEntry); + SK indexKey = createSecondaryKey(primaryKeyInput, dataInput); + if (indexKey != null) { + indexKeyBinding.objectToEntry(indexKey, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) { + D data = dataBinding.entryToObject(dataEntry); + data = nullifyForeignKey(data); + if (data != null) { + dataBinding.objectToEntry(data, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key object from primary key and data objects. + * + * @param primaryKey is the deserialized source primary key entry, or + * null if no primary key entry is used to construct the index key. + * + * @param data is the deserialized source data entry, or null if no + * data entry is used to construct the index key. + * + * @return the destination index key object, or null to indicate that + * the key is not present. + */ + public abstract SK createSecondaryKey(PK primaryKey, D data); + + /** + * Clears the index key in a data object. + * + *

    On entry the data parameter contains the index key to be cleared. It + * should be changed by this method such that {@link #createSecondaryKey} + * will return false. Other fields in the data object should remain + * unchanged.

    + * + * @param data is the source and destination data object. + * + * @return the destination data object, or null to indicate that the + * key is not present and no change is necessary. The data returned may + * be the same object passed as the data parameter or a newly created + * object. + */ + public D nullifyForeignKey(D data) { + + return null; + } +} diff --git a/src/com/sleepycat/bind/serial/StoredClassCatalog.java b/src/com/sleepycat/bind/serial/StoredClassCatalog.java new file mode 100644 index 0000000..a28be2d --- /dev/null +++ b/src/com/sleepycat/bind/serial/StoredClassCatalog.java @@ -0,0 +1,499 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.Serializable; +import java.math.BigInteger; +import java.util.HashMap; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.UtfOps; + +/** + * A ClassCatalog that is stored in a Database. + * + *

    A single StoredClassCatalog object is normally used along + * with a set of databases that stored serialized objects.

    + * + * @author Mark Hayes + * + * @see Class Evolution + */ +public class StoredClassCatalog implements ClassCatalog { + + /* + * Record types ([key] [data]): + * + * [0] [next class ID] + * [1 / class ID] [ObjectStreamClass (class format)] + * [2 / class name] [ClassInfo (has 8 byte class ID)] + */ + private static final byte REC_LAST_CLASS_ID = (byte) 0; + private static final byte REC_CLASS_FORMAT = (byte) 1; + private static final byte REC_CLASS_INFO = (byte) 2; + + private static final byte[] LAST_CLASS_ID_KEY = {REC_LAST_CLASS_ID}; + + private Database db; + private HashMap classMap; + private HashMap formatMap; + private LockMode writeLockMode; + private boolean cdbMode; + private boolean txnMode; + + /** + * Creates a catalog based on a given database. To save resources, only a + * single catalog object should be used for each unique catalog database. + * + * @param database an open database to use as the class catalog. It must + * be a BTREE database and must not allow duplicates. + * + * @throws DatabaseException if an error occurs accessing the database. + * + * @throws IllegalArgumentException if the database is not a BTREE database + * or if it configured to allow duplicates. + */ + public StoredClassCatalog(Database database) + throws DatabaseException, IllegalArgumentException { + + db = database; + DatabaseConfig dbConfig = db.getConfig(); + EnvironmentConfig envConfig = db.getEnvironment().getConfig(); + + writeLockMode = (DbCompat.getInitializeLocking(envConfig) || + envConfig.getTransactional()) ? LockMode.RMW + : LockMode.DEFAULT; + cdbMode = DbCompat.getInitializeCDB(envConfig); + txnMode = dbConfig.getTransactional(); + + if (!DbCompat.isTypeBtree(dbConfig)) { + throw new IllegalArgumentException + ("The class catalog must be a BTREE database."); + } + if (DbCompat.getSortedDuplicates(dbConfig) || + DbCompat.getUnsortedDuplicates(dbConfig)) { + throw new IllegalArgumentException + ("The class catalog database must not allow duplicates."); + } + + /* + * Create the class format and class info maps. Note that these are not + * synchronized, and therefore the methods that use them are + * synchronized. + */ + classMap = new HashMap(); + formatMap = new HashMap(); + + DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY); + DatabaseEntry data = new DatabaseEntry(); + if (dbConfig.getReadOnly()) { + /* Check that the class ID record exists. */ + OperationStatus status = db.get(null, key, data, null); + if (status != OperationStatus.SUCCESS) { + throw DbCompat.unexpectedState + ("A read-only catalog database may not be empty"); + } + } else { + /* Add the initial class ID record if it doesn't exist. */ + data.setData(new byte[1]); // zero ID + + /* + * Query the record before writing it to the database, to avoid + * ReplicaWriteException while opening a StoredClassCatalog on the + * replicas. + */ + OperationStatus status = db.get(null, key, data, null); + if (status == OperationStatus.NOTFOUND) { + db.putNoOverwrite(null, key, data); + } + } + } + + // javadoc is inherited + public synchronized void close() + throws DatabaseException { + + if (db != null) { + db.close(); + } + db = null; + formatMap = null; + classMap = null; + } + + // javadoc is inherited + public synchronized byte[] getClassID(ObjectStreamClass classFormat) + throws DatabaseException, ClassNotFoundException { + + ClassInfo classInfo = getClassInfo(classFormat); + return classInfo.getClassID(); + } + + // javadoc is inherited + public synchronized ObjectStreamClass getClassFormat(byte[] classID) + throws DatabaseException, ClassNotFoundException { + + return getClassFormat(classID, new DatabaseEntry()); + } + + /** + * Internal function for getting the class format. Allows passing the + * DatabaseEntry object for the data, so the bytes of the class format can + * be examined afterwards. + */ + private ObjectStreamClass getClassFormat(byte[] classID, + DatabaseEntry data) + throws DatabaseException, ClassNotFoundException { + + /* First check the map and, if found, add class info to the map. */ + + BigInteger classIDObj = new BigInteger(classID); + ObjectStreamClass classFormat = formatMap.get(classIDObj); + if (classFormat == null) { + + /* Make the class format key. */ + + byte[] keyBytes = new byte[classID.length + 1]; + keyBytes[0] = REC_CLASS_FORMAT; + System.arraycopy(classID, 0, keyBytes, 1, classID.length); + DatabaseEntry key = new DatabaseEntry(keyBytes); + + /* Read the class format. */ + + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + throw new ClassNotFoundException("Catalog class ID not found"); + } + try { + ObjectInputStream ois = + new ObjectInputStream( + new ByteArrayInputStream(data.getData(), + data.getOffset(), + data.getSize())); + classFormat = (ObjectStreamClass) ois.readObject(); + } catch (IOException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + + /* Update the class format map. */ + + formatMap.put(classIDObj, classFormat); + } + return classFormat; + } + + /** + * Get the ClassInfo for a given class name, adding it and its + * ObjectStreamClass to the database if they are not already present, and + * caching both of them using the class info and class format maps. When a + * class is first loaded from the database, the stored ObjectStreamClass is + * compared to the current ObjectStreamClass loaded by the Java class + * loader; if they are different, a new class ID is assigned for the + * current format. + */ + private ClassInfo getClassInfo(ObjectStreamClass classFormat) + throws DatabaseException, ClassNotFoundException { + + /* + * First check for a cached copy of the class info, which if + * present always contains the class format object. + */ + String className = classFormat.getName(); + ClassInfo classInfo = classMap.get(className); + if (classInfo != null) { + return classInfo; + } else { + /* Make class info key. */ + char[] nameChars = className.toCharArray(); + byte[] keyBytes = new byte[1 + UtfOps.getByteLength(nameChars)]; + keyBytes[0] = REC_CLASS_INFO; + UtfOps.charsToBytes(nameChars, 0, keyBytes, 1, nameChars.length); + DatabaseEntry key = new DatabaseEntry(keyBytes); + + /* Read class info. */ + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + + /* + * Not found in the database, write class info and class + * format. + */ + classInfo = putClassInfo(new ClassInfo(), className, key, + classFormat); + } else { + /* + * Read class info to get the class format key, then read class + * format. + */ + classInfo = new ClassInfo(data); + DatabaseEntry formatData = new DatabaseEntry(); + ObjectStreamClass storedClassFormat = + getClassFormat(classInfo.getClassID(), formatData); + + /* + * Compare the stored class format to the current class format, + * and if they are different then generate a new class ID. + */ + if (!areClassFormatsEqual(storedClassFormat, + getBytes(formatData), + classFormat)) { + classInfo = putClassInfo(classInfo, className, key, + classFormat); + } + + /* Update the class info map. */ + classInfo.setClassFormat(classFormat); + classMap.put(className, classInfo); + } + } + return classInfo; + } + + /** + * Assign a new class ID (increment the current ID record), write the + * ObjectStreamClass record for this new ID, and update the ClassInfo + * record with the new ID also. The ClassInfo passed as an argument is the + * one to be updated. + */ + private ClassInfo putClassInfo(ClassInfo classInfo, + String className, + DatabaseEntry classKey, + ObjectStreamClass classFormat) + throws DatabaseException { + + /* An intent-to-write cursor is needed for CDB. */ + CursorConfig cursorConfig = null; + if (cdbMode) { + cursorConfig = new CursorConfig(); + DbCompat.setWriteCursor(cursorConfig, true); + } + Cursor cursor = null; + Transaction txn = null; + try { + if (txnMode) { + txn = db.getEnvironment().beginTransaction(null, null); + } + cursor = db.openCursor(txn, cursorConfig); + + /* Get the current class ID. */ + DatabaseEntry key = new DatabaseEntry(LAST_CLASS_ID_KEY); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getSearchKey(key, data, + writeLockMode); + if (status != OperationStatus.SUCCESS) { + throw DbCompat.unexpectedState("Class ID not initialized"); + } + byte[] idBytes = getBytes(data); + + /* Increment the ID by one and write the updated record. */ + idBytes = incrementID(idBytes); + data.setData(idBytes); + cursor.put(key, data); + + /* + * Write the new class format record whose key is the ID just + * assigned. + */ + byte[] keyBytes = new byte[1 + idBytes.length]; + keyBytes[0] = REC_CLASS_FORMAT; + System.arraycopy(idBytes, 0, keyBytes, 1, idBytes.length); + key.setData(keyBytes); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos; + try { + oos = new ObjectOutputStream(baos); + oos.writeObject(classFormat); + } catch (IOException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + data.setData(baos.toByteArray()); + + cursor.put(key, data); + + /* + * Write the new class info record, using the key passed in; this + * is done last so that a reader who gets the class info record + * first will always find the corresponding class format record. + */ + classInfo.setClassID(idBytes); + classInfo.toDbt(data); + + cursor.put(classKey, data); + + /* + * Update the maps before closing the cursor, so that the cursor + * lock prevents other writers from duplicating this entry. + */ + classInfo.setClassFormat(classFormat); + classMap.put(className, classInfo); + formatMap.put(new BigInteger(idBytes), classFormat); + return classInfo; + } finally { + if (cursor != null) { + cursor.close(); + } + if (txn != null) { + txn.commit(); + } + } + } + + private static byte[] incrementID(byte[] key) { + + BigInteger id = new BigInteger(key); + id = id.add(BigInteger.valueOf(1)); + return id.toByteArray(); + } + + /** + * Holds the class format key for a class, maintains a reference to the + * ObjectStreamClass. Other fields can be added when we need to store more + * information per class. + */ + private static class ClassInfo implements Serializable { + static final long serialVersionUID = 3845446969989650562L; + + private byte[] classID; + private transient ObjectStreamClass classFormat; + + ClassInfo() { + } + + ClassInfo(DatabaseEntry dbt) { + + byte[] data = dbt.getData(); + int len = data[0]; + classID = new byte[len]; + System.arraycopy(data, 1, classID, 0, len); + } + + void toDbt(DatabaseEntry dbt) { + + byte[] data = new byte[1 + classID.length]; + data[0] = (byte) classID.length; + System.arraycopy(classID, 0, data, 1, classID.length); + dbt.setData(data); + } + + void setClassID(byte[] classID) { + + this.classID = classID; + } + + byte[] getClassID() { + + return classID; + } + + ObjectStreamClass getClassFormat() { + + return classFormat; + } + + void setClassFormat(ObjectStreamClass classFormat) { + + this.classFormat = classFormat; + } + } + + /** + * Return whether two class formats are equal. This determines whether a + * new class format is needed for an object being serialized. Formats must + * be identical in all respects, or a new format is needed. + */ + private static boolean areClassFormatsEqual(ObjectStreamClass format1, + byte[] format1Bytes, + ObjectStreamClass format2) { + try { + if (format1Bytes == null) { // using cached format1 object + format1Bytes = getObjectBytes(format1); + } + byte[] format2Bytes = getObjectBytes(format2); + return java.util.Arrays.equals(format2Bytes, format1Bytes); + } catch (IOException e) { return false; } + } + + /* + * We can return the same byte[] for 0 length arrays. + */ + private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; + + private static byte[] getBytes(DatabaseEntry dbt) { + byte[] b = dbt.getData(); + if (b == null) { + return null; + } + if (dbt.getOffset() == 0 && b.length == dbt.getSize()) { + return b; + } + int len = dbt.getSize(); + if (len == 0) { + return ZERO_LENGTH_BYTE_ARRAY; + } else { + byte[] t = new byte[len]; + System.arraycopy(b, dbt.getOffset(), t, 0, t.length); + return t; + } + } + + private static byte[] getObjectBytes(Object o) + throws IOException { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(o); + return baos.toByteArray(); + } + + /** + * For BDB JE, returns the ClassLoader property of the catalog database + * environment. This ensures that the Environment's ClassLoader property + * is used for loading all user-supplied classes. + * + *

    For BDB, this method returns null because no Environment ClassLoader + * property is available. This method may be overridden to return a + * ClassLoader.

    + */ + public ClassLoader getClassLoader() { + try { + return DbCompat.getClassLoader(db.getEnvironment()); + } catch (DatabaseException e) { + + /* + * DatabaseException is declared to be thrown by getEnvironment in + * DB (not JE), but this should never happen in practice. + */ + throw new RuntimeException(e); + } + } +} diff --git a/src/com/sleepycat/bind/serial/TupleSerialBinding.java b/src/com/sleepycat/bind/serial/TupleSerialBinding.java new file mode 100644 index 0000000..4e689bd --- /dev/null +++ b/src/com/sleepycat/bind/serial/TupleSerialBinding.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry as + * a tuple and its data entry as a serialized object. + * + *

    This class takes care of serializing and deserializing the data entry, + * and converting the key entry to/from {@link TupleInput} and {@link + * TupleOutput} objects. Its three abstract methods must be implemented by a + * concrete subclass to convert these objects to/from an entity object.

    + *
      + *
    • {@link #entryToObject(TupleInput,Object)}
    • + *
    • {@link #objectToKey(Object,TupleOutput)}
    • + *
    • {@link #objectToData(Object)}
    • + *
    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public abstract class TupleSerialBinding extends TupleBase + implements EntityBinding { + + protected SerialBinding dataBinding; + + /** + * Creates a tuple-serial entity binding. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class. + */ + public TupleSerialBinding(ClassCatalog classCatalog, + Class baseClass) { + + this(new SerialBinding(classCatalog, baseClass)); + } + + /** + * Creates a tuple-serial entity binding. + * + * @param dataBinding is the data binding. + */ + public TupleSerialBinding(SerialBinding dataBinding) { + + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public E entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(entryToInput(key), + dataBinding.entryToObject(data)); + } + + // javadoc is inherited + public void objectToKey(E object, DatabaseEntry key) { + + TupleOutput output = getTupleOutput(object); + objectToKey(object, output); + outputToEntry(output, key); + } + + // javadoc is inherited + public void objectToData(E object, DatabaseEntry data) { + + D dataObject = objectToData(object); + dataBinding.objectToEntry(dataObject, data); + } + + /** + * Constructs an entity object from {@link TupleInput} key entry and + * deserialized data entry objects. + * + * @param keyInput is the {@link TupleInput} key entry object. + * + * @param dataInput is the deserialized data entry object. + * + * @return the entity object constructed from the key and data. + */ + public abstract E entryToObject(TupleInput keyInput, D dataInput); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param keyOutput is the {@link TupleOutput} to which the key should be + * written. + */ + public abstract void objectToKey(E object, TupleOutput keyOutput); + + /** + * Extracts a data object from an entity object. + * + * @param object is the entity object. + * + * @return the deserialized data object. + */ + public abstract D objectToData(E object); +} diff --git a/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java b/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java new file mode 100644 index 0000000..00b16d1 --- /dev/null +++ b/src/com/sleepycat/bind/serial/TupleSerialKeyCreator.java @@ -0,0 +1,146 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.ForeignKeyNullifier; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; + +/** + * A abstract key creator that uses a tuple key and a serial data entry. This + * class takes care of serializing and deserializing the data entry, and + * converting the key entry to/from {@link TupleInput} and {@link TupleOutput} + * objects. + * The following abstract method must be implemented by a concrete subclass + * to create the index key using these objects + *
      + *
    • {@link #createSecondaryKey(TupleInput,Object,TupleOutput)}
    • + *
    + *

    If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was + * specified when opening the secondary database, the following method must be + * overridden to nullify the foreign index key. If NULLIFY was not specified, + * this method need not be overridden.

    + *
      + *
    • {@link #nullifyForeignKey(Object)}
    • + *
    + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public abstract class TupleSerialKeyCreator extends TupleBase + implements SecondaryKeyCreator, ForeignKeyNullifier { + + protected SerialBinding dataBinding; + + /** + * Creates a tuple-serial key creator. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param dataClass is the data base class. + */ + public TupleSerialKeyCreator(ClassCatalog classCatalog, + Class dataClass) { + + this(new SerialBinding(classCatalog, dataClass)); + } + + /** + * Creates a tuple-serial key creator. + * + * @param dataBinding is the data binding. + */ + public TupleSerialKeyCreator(SerialBinding dataBinding) { + + this.dataBinding = dataBinding; + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) { + TupleOutput output = getTupleOutput(null); + TupleInput primaryKeyInput = entryToInput(primaryKeyEntry); + D dataInput = dataBinding.entryToObject(dataEntry); + if (createSecondaryKey(primaryKeyInput, dataInput, output)) { + outputToEntry(output, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) { + D data = dataBinding.entryToObject(dataEntry); + data = nullifyForeignKey(data); + if (data != null) { + dataBinding.objectToEntry(data, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key entry from primary key tuple entry and + * deserialized data entry. + * + * @param primaryKeyInput is the {@link TupleInput} for the primary key + * entry, or null if no primary key entry is used to construct the index + * key. + * + * @param dataInput is the deserialized data entry, or null if no data + * entry is used to construct the index key. + * + * @param indexKeyOutput is the destination index key tuple. For index + * keys which are optionally present, no tuple entry should be output to + * indicate that the key is not present or null. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + public abstract boolean createSecondaryKey(TupleInput primaryKeyInput, + D dataInput, + TupleOutput indexKeyOutput); + + /** + * Clears the index key in the deserialized data entry. + * + *

    On entry the data parameter contains the index key to be cleared. It + * should be changed by this method such that {@link #createSecondaryKey} + * will return false. Other fields in the data object should remain + * unchanged.

    + * + * @param data is the source and destination deserialized data + * entry. + * + * @return the destination data object, or null to indicate that the + * key is not present and no change is necessary. The data returned may + * be the same object passed as the data parameter or a newly created + * object. + */ + public D nullifyForeignKey(D data) { + + return null; + } +} diff --git a/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java b/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java new file mode 100644 index 0000000..2292b6d --- /dev/null +++ b/src/com/sleepycat/bind/serial/TupleSerialMarshalledBinding.java @@ -0,0 +1,98 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A concrete TupleSerialBinding that delegates to the + * MarshalledTupleKeyEntity interface of the entity class. + * + *

    The {@link MarshalledTupleKeyEntity} interface must be implemented by the + * entity class to convert between the key/data entry and entity object.

    + * + *

    The binding is "tricky" in that it uses the entity class for both the + * stored data entry and the combined entity object. To do this, the entity's + * key field(s) are transient and are set by the binding after the data object + * has been deserialized. This avoids the use of a "data" class completely. + *

    + * + * @see MarshalledTupleKeyEntity + * @see Class Evolution + * + * @author Mark Hayes + */ +public class TupleSerialMarshalledBinding + extends TupleSerialBinding { + + /** + * Creates a tuple-serial marshalled binding object. + * + * @param classCatalog is the catalog to hold shared class information and + * for a database should be a {@link StoredClassCatalog}. + * + * @param baseClass is the base class for serialized objects stored using + * this binding -- all objects using this binding must be an instance of + * this class. + */ + public TupleSerialMarshalledBinding(ClassCatalog classCatalog, + Class baseClass) { + + this(new SerialBinding(classCatalog, baseClass)); + } + + /** + * Creates a tuple-serial marshalled binding object. + * + * @param dataBinding is the binding used for serializing and deserializing + * the entity object. + */ + public TupleSerialMarshalledBinding(SerialBinding dataBinding) { + + super(dataBinding); + } + + // javadoc is inherited + public E entryToObject(TupleInput tupleInput, E javaInput) { + + /* + * Creates the entity by combining the stored key and data. + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + if (tupleInput != null) { // may be null if not used by key extractor + javaInput.unmarshalPrimaryKey(tupleInput); + } + return javaInput; + } + + // javadoc is inherited + public void objectToKey(E object, TupleOutput output) { + + /* Creates the stored key from the entity. */ + object.marshalPrimaryKey(output); + } + + // javadoc is inherited + public E objectToData(E object) { + + /* + * Returns the entity as the stored data. There is nothing to do here + * since the entity's key fields are transient. + */ + return object; + } +} diff --git a/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java b/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java new file mode 100644 index 0000000..3523eaf --- /dev/null +++ b/src/com/sleepycat/bind/serial/TupleSerialMarshalledKeyCreator.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * A concrete key creator that works in conjunction with a {@link + * TupleSerialMarshalledBinding}. This key creator works by calling the + * methods of the {@link MarshalledTupleKeyEntity} interface to create and + * clear the index key fields. + * + * @see Class Evolution + * + * @author Mark Hayes + */ +public class TupleSerialMarshalledKeyCreator + extends TupleSerialKeyCreator { + + private TupleSerialMarshalledBinding binding; + private String keyName; + + /** + * Creates a tuple-serial marshalled key creator. + * + * @param binding is the binding used for the tuple-serial entity. + * + * @param keyName is the key name passed to the {@link + * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the + * index key. + */ + public TupleSerialMarshalledKeyCreator(TupleSerialMarshalledBinding + binding, + String keyName) { + + super(binding.dataBinding); + this.binding = binding; + this.keyName = keyName; + + if (dataBinding == null) { + throw new NullPointerException("dataBinding may not be null"); + } + } + + // javadoc is inherited + public boolean createSecondaryKey(TupleInput primaryKeyInput, + D dataInput, + TupleOutput indexKeyOutput) { + + /* + * The primary key is unmarshalled before marshalling the index key, to + * account for cases where the index key includes fields taken from the + * primary key. + */ + MarshalledTupleKeyEntity entity = + binding.entryToObject(primaryKeyInput, dataInput); + + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + + // javadoc is inherited + public D nullifyForeignKey(D dataInput) { + + MarshalledTupleKeyEntity entity = + binding.entryToObject(null, dataInput); + + return entity.nullifyForeignKey(keyName) ? dataInput : null; + } +} diff --git a/src/com/sleepycat/bind/serial/package.html b/src/com/sleepycat/bind/serial/package.html new file mode 100644 index 0000000..774b62b --- /dev/null +++ b/src/com/sleepycat/bind/serial/package.html @@ -0,0 +1,9 @@ + + +Bindings that use Java serialization. + +@see [Getting Started Guide] + + + diff --git a/src/com/sleepycat/bind/tuple/BigDecimalBinding.java b/src/com/sleepycat/bind/tuple/BigDecimalBinding.java new file mode 100644 index 0000000..936357d --- /dev/null +++ b/src/com/sleepycat/bind/tuple/BigDecimalBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.math.BigDecimal; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for an unsorted BigDecimal + * value. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see BigDecimal Formats + */ +public class BigDecimalBinding extends TupleBinding { + + // javadoc is inherited + public BigDecimal entryToObject(TupleInput input) { + + return input.readBigDecimal(); + } + + // javadoc is inherited + public void objectToEntry(BigDecimal object, TupleOutput output) { + + output.writeBigDecimal(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(BigDecimal object) { + + return sizedOutput(object); + } + + /** + * Converts an entry buffer into a BigDecimal value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static BigDecimal entryToBigDecimal(DatabaseEntry entry) { + + return entryToInput(entry).readBigDecimal(); + } + + /** + * Converts a BigDecimal value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void bigDecimalToEntry(BigDecimal val, DatabaseEntry entry) { + + outputToEntry(sizedOutput(val).writeBigDecimal(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput(BigDecimal val) { + + int len = TupleOutput.getBigDecimalMaxByteLength(val); + return new TupleOutput(new byte[len]); + } +} diff --git a/src/com/sleepycat/bind/tuple/BigIntegerBinding.java b/src/com/sleepycat/bind/tuple/BigIntegerBinding.java new file mode 100644 index 0000000..861188e --- /dev/null +++ b/src/com/sleepycat/bind/tuple/BigIntegerBinding.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.math.BigInteger; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a BigInteger value. + * + *

    This class produces byte array values that by default (without a custom + * comparator) sort correctly.

    + * + * @see Integer Formats + */ +public class BigIntegerBinding extends TupleBinding { + + // javadoc is inherited + public BigInteger entryToObject(TupleInput input) { + + return input.readBigInteger(); + } + + // javadoc is inherited + public void objectToEntry(BigInteger object, TupleOutput output) { + + output.writeBigInteger(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(BigInteger object) { + + return sizedOutput(object); + } + + /** + * Converts an entry buffer into a BigInteger value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static BigInteger entryToBigInteger(DatabaseEntry entry) { + + return entryToInput(entry).readBigInteger(); + } + + /** + * Converts a BigInteger value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void bigIntegerToEntry(BigInteger val, DatabaseEntry entry) { + + outputToEntry(sizedOutput(val).writeBigInteger(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput(BigInteger val) { + + int len = TupleOutput.getBigIntegerByteLength(val); + return new TupleOutput(new byte[len]); + } +} diff --git a/src/com/sleepycat/bind/tuple/BooleanBinding.java b/src/com/sleepycat/bind/tuple/BooleanBinding.java new file mode 100644 index 0000000..5d3093a --- /dev/null +++ b/src/com/sleepycat/bind/tuple/BooleanBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Boolean primitive + * wrapper or a boolean primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class BooleanBinding extends TupleBinding { + + private static final int BOOLEAN_SIZE = 1; + + // javadoc is inherited + public Boolean entryToObject(TupleInput input) { + + return input.readBoolean(); + } + + // javadoc is inherited + public void objectToEntry(Boolean object, TupleOutput output) { + + output.writeBoolean(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Boolean object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple boolean value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static boolean entryToBoolean(DatabaseEntry entry) { + + return entryToInput(entry).readBoolean(); + } + + /** + * Converts a simple boolean value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void booleanToEntry(boolean val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeBoolean(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[BOOLEAN_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/ByteBinding.java b/src/com/sleepycat/bind/tuple/ByteBinding.java new file mode 100644 index 0000000..c68e6e8 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/ByteBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Byte primitive + * wrapper or a byte primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class ByteBinding extends TupleBinding { + + private static final int BYTE_SIZE = 1; + + // javadoc is inherited + public Byte entryToObject(TupleInput input) { + + return input.readByte(); + } + + // javadoc is inherited + public void objectToEntry(Byte object, TupleOutput output) { + + output.writeByte(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Byte object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple byte value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static byte entryToByte(DatabaseEntry entry) { + + return entryToInput(entry).readByte(); + } + + /** + * Converts a simple byte value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void byteToEntry(byte val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeByte(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[BYTE_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/CharacterBinding.java b/src/com/sleepycat/bind/tuple/CharacterBinding.java new file mode 100644 index 0000000..47358fb --- /dev/null +++ b/src/com/sleepycat/bind/tuple/CharacterBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Character primitive + * wrapper or a char primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class CharacterBinding extends TupleBinding { + + private static final int CHAR_SIZE = 2; + + // javadoc is inherited + public Character entryToObject(TupleInput input) { + + return input.readChar(); + } + + // javadoc is inherited + public void objectToEntry(Character object, TupleOutput output) { + + output.writeChar(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Character object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple char value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static char entryToChar(DatabaseEntry entry) { + + return entryToInput(entry).readChar(); + } + + /** + * Converts a simple char value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void charToEntry(char val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeChar(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[CHAR_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/DoubleBinding.java b/src/com/sleepycat/bind/tuple/DoubleBinding.java new file mode 100644 index 0000000..bb8bceb --- /dev/null +++ b/src/com/sleepycat/bind/tuple/DoubleBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for an unsorted Double + * primitive wrapper or an unsorted double primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Floating Point Formats + */ +public class DoubleBinding extends TupleBinding { + + private static final int DOUBLE_SIZE = 8; + + // javadoc is inherited + public Double entryToObject(TupleInput input) { + + return input.readDouble(); + } + + // javadoc is inherited + public void objectToEntry(Double object, TupleOutput output) { + + output.writeDouble(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Double object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple double value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static double entryToDouble(DatabaseEntry entry) { + + return entryToInput(entry).readDouble(); + } + + /** + * Converts a simple double value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void doubleToEntry(double val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeDouble(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[DOUBLE_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/FloatBinding.java b/src/com/sleepycat/bind/tuple/FloatBinding.java new file mode 100644 index 0000000..6897377 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/FloatBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for an unsorted Float + * primitive wrapper or an unsorted float primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Floating Point Formats + */ +public class FloatBinding extends TupleBinding { + + private static final int FLOAT_SIZE = 4; + + // javadoc is inherited + public Float entryToObject(TupleInput input) { + + return input.readFloat(); + } + + // javadoc is inherited + public void objectToEntry(Float object, TupleOutput output) { + + output.writeFloat(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Float object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple float value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static float entryToFloat(DatabaseEntry entry) { + + return entryToInput(entry).readFloat(); + } + + /** + * Converts a simple float value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void floatToEntry(float val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeFloat(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[FLOAT_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/IntegerBinding.java b/src/com/sleepycat/bind/tuple/IntegerBinding.java new file mode 100644 index 0000000..5fc3026 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/IntegerBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Integer primitive + * wrapper or an int primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class IntegerBinding extends TupleBinding { + + private static final int INT_SIZE = 4; + + // javadoc is inherited + public Integer entryToObject(TupleInput input) { + + return input.readInt(); + } + + // javadoc is inherited + public void objectToEntry(Integer object, TupleOutput output) { + + output.writeInt(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Integer object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple int value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static int entryToInt(DatabaseEntry entry) { + + return entryToInput(entry).readInt(); + } + + /** + * Converts a simple int value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void intToEntry(int val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeInt(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[INT_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/LongBinding.java b/src/com/sleepycat/bind/tuple/LongBinding.java new file mode 100644 index 0000000..f0c17f9 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/LongBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Long primitive + * wrapper or a long primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class LongBinding extends TupleBinding { + + private static final int LONG_SIZE = 8; + + // javadoc is inherited + public Long entryToObject(TupleInput input) { + + return input.readLong(); + } + + // javadoc is inherited + public void objectToEntry(Long object, TupleOutput output) { + + output.writeLong(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Long object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple long value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static long entryToLong(DatabaseEntry entry) { + + return entryToInput(entry).readLong(); + } + + /** + * Converts a simple long value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void longToEntry(long val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeLong(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[LONG_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java b/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java new file mode 100644 index 0000000..bc863e2 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/MarshalledTupleEntry.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +/** + * A marshalling interface implemented by key, data or entity classes that + * are represented as tuples. + * + *

    Key classes implement this interface to marshal their key entry. Data or + * entity classes implement this interface to marshal their data entry. + * Implementations of this interface must have a public no arguments + * constructor so that they can be instantiated by a binding, prior to calling + * the {@link #unmarshalEntry} method.

    + * + *

    Note that implementing this interface is not necessary when the object is + * a Java simple type, for example: String, Integer, etc. These types can be + * used with built-in bindings returned by {@link + * TupleBinding#getPrimitiveBinding}.

    + * + * @author Mark Hayes + * @see TupleTupleMarshalledBinding + */ +public interface MarshalledTupleEntry { + + /** + * Construct the key or data tuple entry from the key or data object. + * + * @param dataOutput is the output tuple. + */ + void marshalEntry(TupleOutput dataOutput); + + /** + * Construct the key or data object from the key or data tuple entry. + * + * @param dataInput is the input tuple. + */ + void unmarshalEntry(TupleInput dataInput); +} diff --git a/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java b/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java new file mode 100644 index 0000000..f6bb84b --- /dev/null +++ b/src/com/sleepycat/bind/tuple/MarshalledTupleKeyEntity.java @@ -0,0 +1,80 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +/** + * A marshalling interface implemented by entity classes that represent keys as + * tuples. Since MarshalledTupleKeyEntity objects are instantiated + * using Java deserialization, no particular constructor is required by classes + * that implement this interface. + * + *

    Note that a marshalled tuple key extractor is somewhat less efficient + * than a non-marshalled key tuple extractor because more conversions are + * needed. A marshalled key extractor must convert the entry to an object in + * order to extract the key fields, while an unmarshalled key extractor does + * not.

    + * + * @author Mark Hayes + * @see TupleTupleMarshalledBinding + * @see com.sleepycat.bind.serial.TupleSerialMarshalledBinding + */ +public interface MarshalledTupleKeyEntity { + + /** + * Extracts the entity's primary key and writes it to the key output. + * + * @param keyOutput is the output tuple. + */ + void marshalPrimaryKey(TupleOutput keyOutput); + + /** + * Completes construction of the entity by setting its primary key from the + * stored primary key. + * + * @param keyInput is the input tuple. + */ + void unmarshalPrimaryKey(TupleInput keyInput); + + /** + * Extracts the entity's secondary key and writes it to the key output. + * + * @param keyName identifies the secondary key. + * + * @param keyOutput is the output tuple. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput); + + /** + * Clears the entity's secondary key fields for the given key name. + * + *

    The specified index key should be changed by this method such that + * {@link #marshalSecondaryKey} for the same key name will return false. + * Other fields in the data object should remain unchanged.

    + * + *

    If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was + * specified when opening the secondary database, this method is called + * when the entity for this foreign key is deleted. If NULLIFY was not + * specified, this method will not be called and may always return + * false.

    + * + * @param keyName identifies the secondary key. + * + * @return true if the key was cleared, or false to indicate that the key + * is not present and no change is necessary. + */ + boolean nullifyForeignKey(String keyName); +} diff --git a/src/com/sleepycat/bind/tuple/PackedIntegerBinding.java b/src/com/sleepycat/bind/tuple/PackedIntegerBinding.java new file mode 100644 index 0000000..5fd22d2 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/PackedIntegerBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.PackedInteger; + +/** + * A concrete TupleBinding for an unsorted Integer + * primitive wrapper or an unsorted int primitive, that stores the + * value in the smallest number of bytes possible. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Integer Formats + */ +public class PackedIntegerBinding extends TupleBinding { + + // javadoc is inherited + public Integer entryToObject(TupleInput input) { + + return input.readPackedInt(); + } + + // javadoc is inherited + public void objectToEntry(Integer object, TupleOutput output) { + + output.writePackedInt(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Integer object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple int value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static int entryToInt(DatabaseEntry entry) { + + return entryToInput(entry).readPackedInt(); + } + + /** + * Converts a simple int value into an entry buffer, using + * PackedInteger format. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void intToEntry(int val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writePackedInt(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[PackedInteger.MAX_LENGTH]); + } +} diff --git a/src/com/sleepycat/bind/tuple/PackedLongBinding.java b/src/com/sleepycat/bind/tuple/PackedLongBinding.java new file mode 100644 index 0000000..2f02f68 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/PackedLongBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.PackedInteger; + +/** + * A concrete TupleBinding for an unsorted Long + * primitive wrapper or an unsorted long primitive, that stores + * the value in the smallest number of bytes possible. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Integer Formats + */ +public class PackedLongBinding extends TupleBinding { + + // javadoc is inherited + public Long entryToObject(TupleInput input) { + + return input.readPackedLong(); + } + + // javadoc is inherited + public void objectToEntry(Long object, TupleOutput output) { + + output.writePackedLong(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Long object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple Long value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static Long entryToLong(DatabaseEntry entry) { + + return entryToInput(entry).readPackedLong(); + } + + /** + * Converts a simple Long value into an entry buffer, using + * PackedLong format. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void longToEntry(long val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writePackedLong(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[PackedInteger.MAX_LONG_LENGTH]); + } +} diff --git a/src/com/sleepycat/bind/tuple/ShortBinding.java b/src/com/sleepycat/bind/tuple/ShortBinding.java new file mode 100644 index 0000000..d08c666 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/ShortBinding.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a Short primitive + * wrapper or a short primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see Integer Formats + */ +public class ShortBinding extends TupleBinding { + + private static final int SHORT_SIZE = 2; + + // javadoc is inherited + public Short entryToObject(TupleInput input) { + + return input.readShort(); + } + + // javadoc is inherited + public void objectToEntry(Short object, TupleOutput output) { + + output.writeShort(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Short object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple short value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static short entryToShort(DatabaseEntry entry) { + + return entryToInput(entry).readShort(); + } + + /** + * Converts a simple short value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void shortToEntry(short val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeShort(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[SHORT_SIZE]); + } +} diff --git a/src/com/sleepycat/bind/tuple/SortedBigDecimalBinding.java b/src/com/sleepycat/bind/tuple/SortedBigDecimalBinding.java new file mode 100644 index 0000000..5de9dc6 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/SortedBigDecimalBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.math.BigDecimal; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a sorted BigDecimal + * value. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see BigDecimal Formats + */ +public class SortedBigDecimalBinding extends TupleBinding { + + // javadoc is inherited + public BigDecimal entryToObject(TupleInput input) { + + return input.readSortedBigDecimal(); + } + + // javadoc is inherited + public void objectToEntry(BigDecimal object, TupleOutput output) { + + output.writeSortedBigDecimal(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(BigDecimal object) { + + return sizedOutput(object); + } + + /** + * Converts an entry buffer into a BigDecimal value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static BigDecimal entryToBigDecimal(DatabaseEntry entry) { + + return entryToInput(entry).readSortedBigDecimal(); + } + + /** + * Converts a BigDecimal value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void bigDecimalToEntry(BigDecimal val, DatabaseEntry entry) { + + outputToEntry(sizedOutput(val).writeSortedBigDecimal(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput(BigDecimal val) { + + int len = TupleOutput.getSortedBigDecimalMaxByteLength(val); + return new TupleOutput(new byte[len]); + } +} diff --git a/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java b/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java new file mode 100644 index 0000000..70cfc62 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/SortedDoubleBinding.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a sorted Double + * primitive wrapper or a sorted double primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Floating Point Formats + */ +public class SortedDoubleBinding extends TupleBinding { + + /* javadoc is inherited */ + public Double entryToObject(TupleInput input) { + + return input.readSortedDouble(); + } + + /* javadoc is inherited */ + public void objectToEntry(Double object, TupleOutput output) { + + output.writeSortedDouble(object); + } + + /* javadoc is inherited */ + protected TupleOutput getTupleOutput(Double object) { + + return DoubleBinding.sizedOutput(); + } + + /** + * Converts an entry buffer into a simple double value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static double entryToDouble(DatabaseEntry entry) { + + return entryToInput(entry).readSortedDouble(); + } + + /** + * Converts a simple double value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void doubleToEntry(double val, DatabaseEntry entry) { + + outputToEntry(DoubleBinding.sizedOutput().writeSortedDouble(val), + entry); + } +} diff --git a/src/com/sleepycat/bind/tuple/SortedFloatBinding.java b/src/com/sleepycat/bind/tuple/SortedFloatBinding.java new file mode 100644 index 0000000..86b2e12 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/SortedFloatBinding.java @@ -0,0 +1,76 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a sorted Float + * primitive wrapper or sorted a float primitive. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Floating Point Formats + */ +public class SortedFloatBinding extends TupleBinding { + + /* javadoc is inherited */ + public Float entryToObject(TupleInput input) { + + return input.readSortedFloat(); + } + + /* javadoc is inherited */ + public void objectToEntry(Float object, TupleOutput output) { + + output.writeSortedFloat(object); + } + + /* javadoc is inherited */ + protected TupleOutput getTupleOutput(Float object) { + + return FloatBinding.sizedOutput(); + } + + /** + * Converts an entry buffer into a simple float value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static float entryToFloat(DatabaseEntry entry) { + + return entryToInput(entry).readSortedFloat(); + } + + /** + * Converts a simple float value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void floatToEntry(float val, DatabaseEntry entry) { + + outputToEntry(FloatBinding.sizedOutput().writeSortedFloat(val), entry); + } +} diff --git a/src/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.java b/src/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.java new file mode 100644 index 0000000..2ebd94e --- /dev/null +++ b/src/com/sleepycat/bind/tuple/SortedPackedIntegerBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.PackedInteger; + +/** + * A concrete TupleBinding for a sorted Integer + * primitive wrapper or a sorted int primitive, that stores the + * value in the smallest number of bytes possible. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Integer Formats + */ +public class SortedPackedIntegerBinding extends TupleBinding { + + // javadoc is inherited + public Integer entryToObject(TupleInput input) { + + return input.readSortedPackedInt(); + } + + // javadoc is inherited + public void objectToEntry(Integer object, TupleOutput output) { + + output.writeSortedPackedInt(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Integer object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple int value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static int entryToInt(DatabaseEntry entry) { + + return entryToInput(entry).readSortedPackedInt(); + } + + /** + * Converts a simple int value into an entry buffer, using + * SortedPackedInteger format. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void intToEntry(int val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeSortedPackedInt(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[PackedInteger.MAX_LENGTH]); + } +} diff --git a/src/com/sleepycat/bind/tuple/SortedPackedLongBinding.java b/src/com/sleepycat/bind/tuple/SortedPackedLongBinding.java new file mode 100644 index 0000000..322d7a2 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/SortedPackedLongBinding.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.PackedInteger; + +/** + * A concrete TupleBinding for a sorted Long + * primitive wrapper or a sorted long primitive, that stores the + * value in the smallest number of bytes possible. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection.
    4. + *
    + * + * @see Integer Formats + */ +public class SortedPackedLongBinding extends TupleBinding { + + // javadoc is inherited + public Long entryToObject(TupleInput input) { + + return input.readSortedPackedLong(); + } + + // javadoc is inherited + public void objectToEntry(Long object, TupleOutput output) { + + output.writeSortedPackedLong(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(Long object) { + + return sizedOutput(); + } + + /** + * Converts an entry buffer into a simple Long value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static Long entryToLong(DatabaseEntry entry) { + + return entryToInput(entry).readSortedPackedLong(); + } + + /** + * Converts a simple Long value into an entry buffer, using + * SortedPackedLong format. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void longToEntry(long val, DatabaseEntry entry) { + + outputToEntry(sizedOutput().writeSortedPackedLong(val), entry); + } + + /** + * Returns a tuple output object of the maximum size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput() { + + return new TupleOutput(new byte[PackedInteger.MAX_LONG_LENGTH]); + } +} diff --git a/src/com/sleepycat/bind/tuple/StringBinding.java b/src/com/sleepycat/bind/tuple/StringBinding.java new file mode 100644 index 0000000..f81fa38 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/StringBinding.java @@ -0,0 +1,90 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.UtfOps; +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete TupleBinding for a simple String value. + * + *

    There are two ways to use this class:

    + *
      + *
    1. When using the {@link com.sleepycat.je} package directly, the static + * methods in this class can be used to convert between primitive values and + * {@link DatabaseEntry} objects.
    2. + *
    3. When using the {@link com.sleepycat.collections} package, an instance of + * this class can be used with any stored collection. The easiest way to + * obtain a binding instance is with the {@link + * TupleBinding#getPrimitiveBinding} method.
    4. + *
    + * + * @see String Formats + */ +public class StringBinding extends TupleBinding { + + // javadoc is inherited + public String entryToObject(TupleInput input) { + + return input.readString(); + } + + // javadoc is inherited + public void objectToEntry(String object, TupleOutput output) { + + output.writeString(object); + } + + // javadoc is inherited + protected TupleOutput getTupleOutput(String object) { + + return sizedOutput(object); + } + + /** + * Converts an entry buffer into a simple String value. + * + * @param entry is the source entry buffer. + * + * @return the resulting value. + */ + public static String entryToString(DatabaseEntry entry) { + + return entryToInput(entry).readString(); + } + + /** + * Converts a simple String value into an entry buffer. + * + * @param val is the source value. + * + * @param entry is the destination entry buffer. + */ + public static void stringToEntry(String val, DatabaseEntry entry) { + + outputToEntry(sizedOutput(val).writeString(val), entry); + } + + /** + * Returns a tuple output object of the exact size needed, to avoid + * wasting space when a single primitive is output. + */ + private static TupleOutput sizedOutput(String val) { + + int stringLength = + (val == null) ? 1 : UtfOps.getByteLength(val.toCharArray()); + stringLength++; // null terminator + return new TupleOutput(new byte[stringLength]); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleBase.java b/src/com/sleepycat/bind/tuple/TupleBase.java new file mode 100644 index 0000000..d925486 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleBase.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; + +/** + * A base class for tuple bindings and tuple key creators that provides control + * over the allocation of the output buffer. + * + *

    Tuple bindings and key creators append data to a {@link TupleOutput} + * instance, which is also a {@link com.sleepycat.util.FastOutputStream} + * instance. This object has a byte array buffer that is resized when it is + * full. The reallocation of this buffer can be a performance factor for + * some applications using large objects. To manage this issue, the {@link + * #setTupleBufferSize} method may be used to control the initial size of the + * buffer, and the {@link #getTupleOutput} method may be overridden by + * subclasses to take over creation of the TupleOutput object.

    + */ +public class TupleBase { + + private int outputBufferSize; + + /** + * Initializes the initial output buffer size to zero. + * + *

    Unless {@link #setTupleBufferSize} is called, the default {@link + * com.sleepycat.util.FastOutputStream#DEFAULT_INIT_SIZE} size will be + * used.

    + */ + public TupleBase() { + outputBufferSize = 0; + } + + /** + * Sets the initial byte size of the output buffer that is allocated by the + * default implementation of {@link #getTupleOutput}. + * + *

    If this property is zero (the default), the default {@link + * com.sleepycat.util.FastOutputStream#DEFAULT_INIT_SIZE} size is used.

    + * + * @param byteSize the initial byte size of the output buffer, or zero to + * use the default size. + */ + public void setTupleBufferSize(int byteSize) { + outputBufferSize = byteSize; + } + + /** + * Returns the initial byte size of the output buffer. + * + * @return the initial byte size of the output buffer. + * + * @see #setTupleBufferSize + */ + public int getTupleBufferSize() { + return outputBufferSize; + } + + /** + * Returns an empty TupleOutput instance that will be used by the tuple + * binding or key creator. + * + *

    The default implementation of this method creates a new TupleOutput + * with an initial buffer size that can be changed using the {@link + * #setTupleBufferSize} method.

    + * + *

    This method may be overridden to return a TupleOutput instance. For + * example, an instance per thread could be created and returned by this + * method. If a TupleOutput instance is reused, be sure to call its + * {@link com.sleepycat.util.FastOutputStream#reset} method before each + * use.

    + * + * @param object is the object to be written to the tuple output, and may + * be used by subclasses to determine the size of the output buffer. + * + * @return an empty TupleOutput instance. + * + * @see #setTupleBufferSize + */ + protected TupleOutput getTupleOutput(E object) { + int byteSize = getTupleBufferSize(); + if (byteSize != 0) { + return new TupleOutput(new byte[byteSize]); + } else { + return new TupleOutput(); + } + } + + /** + * Utility method to set the data in a entry buffer to the data in a tuple + * output object. + * + * @param output is the source tuple output object. + * + * @param entry is the destination entry buffer. + */ + public static void outputToEntry(TupleOutput output, DatabaseEntry entry) { + + entry.setData(output.getBufferBytes(), output.getBufferOffset(), + output.getBufferLength()); + } + + /** + * Utility method to set the data in a entry buffer to the data in a tuple + * input object. + * + * @param input is the source tuple input object. + * + * @param entry is the destination entry buffer. + */ + public static void inputToEntry(TupleInput input, DatabaseEntry entry) { + + entry.setData(input.getBufferBytes(), input.getBufferOffset(), + input.getBufferLength()); + } + + /** + * Utility method to create a new tuple input object for reading the data + * from a given buffer. If an existing input is reused, it is reset before + * returning it. + * + * @param entry is the source entry buffer. + * + * @return the new tuple input object. + */ + public static TupleInput entryToInput(DatabaseEntry entry) { + + return new TupleInput(entry.getData(), entry.getOffset(), + entry.getSize()); + } + + /** + * Utility method for use by bindings to create a tuple output object. + * + * @return a new tuple output object. + * + * @deprecated replaced by {@link #getTupleOutput} + */ + public static TupleOutput newOutput() { + + return new TupleOutput(); + } + + /** + * Utility method for use by bindings to create a tuple output object + * with a specific starting size. + * + * @param buffer is the byte array to use as the buffer. + * + * @return a new tuple output object. + * + * @deprecated replaced by {@link #getTupleOutput} + */ + public static TupleOutput newOutput(byte[] buffer) { + + return new TupleOutput(buffer); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleBinding.java b/src/com/sleepycat/bind/tuple/TupleBinding.java new file mode 100644 index 0000000..4262624 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleBinding.java @@ -0,0 +1,135 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * An abstract EntryBinding that treats a key or data entry as a + * tuple; it includes predefined bindings for Java primitive types. + * + *

    This class takes care of converting the entries to/from {@link + * TupleInput} and {@link TupleOutput} objects. Its two abstract methods must + * be implemented by a concrete subclass to convert between tuples and key or + * data objects.

    + *
      + *
    • {@link #entryToObject(TupleInput)}
    • + *
    • {@link #objectToEntry(Object,TupleOutput)}
    • + *
    + * + * @param is the class representing the key or data. + * + * @see Tuple Formats + * + * @author Mark Hayes + */ +public abstract class TupleBinding + extends TupleBase + implements EntryBinding { + + private static final Map primitives = + new HashMap(); + static { + addPrimitive(String.class, String.class, new StringBinding()); + addPrimitive(Character.class, Character.TYPE, new CharacterBinding()); + addPrimitive(Boolean.class, Boolean.TYPE, new BooleanBinding()); + addPrimitive(Byte.class, Byte.TYPE, new ByteBinding()); + addPrimitive(Short.class, Short.TYPE, new ShortBinding()); + addPrimitive(Integer.class, Integer.TYPE, new IntegerBinding()); + addPrimitive(Long.class, Long.TYPE, new LongBinding()); + addPrimitive(Float.class, Float.TYPE, new FloatBinding()); + addPrimitive(Double.class, Double.TYPE, new DoubleBinding()); + } + + private static void addPrimitive(Class cls1, Class cls2, + TupleBinding binding) { + primitives.put(cls1, binding); + primitives.put(cls2, binding); + } + + /** + * Creates a tuple binding. + */ + public TupleBinding() { + } + + // javadoc is inherited + public E entryToObject(DatabaseEntry entry) { + + return entryToObject(entryToInput(entry)); + } + + // javadoc is inherited + public void objectToEntry(E object, DatabaseEntry entry) { + + TupleOutput output = getTupleOutput(object); + objectToEntry(object, output); + outputToEntry(output, entry); + } + + /** + * Constructs a key or data object from a {@link TupleInput} entry. + * + * @param input is the tuple key or data entry. + * + * @return the key or data object constructed from the entry. + */ + public abstract E entryToObject(TupleInput input); + + /** + * Converts a key or data object to a tuple entry. + * + * @param object is the key or data object. + * + * @param output is the tuple entry to which the key or data should be + * written. + */ + public abstract void objectToEntry(E object, TupleOutput output); + + /** + * Creates a tuple binding for a primitive Java class. The following + * Java classes are supported. + *
      + *
    • String
    • + *
    • Character
    • + *
    • Boolean
    • + *
    • Byte
    • + *
    • Short
    • + *
    • Integer
    • + *
    • Long
    • + *
    • Float
    • + *
    • Double
    • + *
    + * + *

    Note: {@link #getPrimitiveBinding} returns bindings that do + * not sort negative floating point numbers correctly by default. See + * {@link SortedFloatBinding} and {@link SortedDoubleBinding} for + * details.

    + * + * @param the primitive Java class. + * + * @param cls the primitive Java class. + * + * @return a new binding for the primitive class or null if the cls + * parameter is not one of the supported classes. + */ + public static TupleBinding getPrimitiveBinding(Class cls) { + + return primitives.get(cls); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleInput.java b/src/com/sleepycat/bind/tuple/TupleInput.java new file mode 100644 index 0000000..88c65b7 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleInput.java @@ -0,0 +1,865 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.PackedInteger; +import com.sleepycat.util.UtfOps; + +/** + * An InputStream with DataInput-like methods for + * reading tuple fields. It is used by TupleBinding. + * + *

    This class has many methods that have the same signatures as methods in + * the {@link java.io.DataInput} interface. The reason this class does not + * implement {@link java.io.DataInput} is because it would break the interface + * contract for those methods because of data format differences.

    + * + * @see Tuple Formats + * + * @author Mark Hayes + */ +public class TupleInput extends FastInputStream { + + /** + * Creates a tuple input object for reading a byte array of tuple data. A + * reference to the byte array will be kept by this object (it will not be + * copied) and therefore the byte array should not be modified while this + * object is in use. + * + * @param buffer is the byte array to be read and should contain data in + * tuple format. + */ + public TupleInput(byte[] buffer) { + + super(buffer); + } + + /** + * Creates a tuple input object for reading a byte array of tuple data at + * a given offset for a given length. A reference to the byte array will + * be kept by this object (it will not be copied) and therefore the byte + * array should not be modified while this object is in use. + * + * @param buffer is the byte array to be read and should contain data in + * tuple format. + * + * @param offset is the byte offset at which to begin reading. + * + * @param length is the number of bytes to be read. + */ + public TupleInput(byte[] buffer, int offset, int length) { + + super(buffer, offset, length); + } + + /** + * Creates a tuple input object from the data contained in a tuple output + * object. A reference to the tuple output's byte array will be kept by + * this object (it will not be copied) and therefore the tuple output + * object should not be modified while this object is in use. + * + * @param output is the tuple output object containing the data to be read. + */ + public TupleInput(TupleOutput output) { + + super(output.getBufferBytes(), output.getBufferOffset(), + output.getBufferLength()); + } + + // --- begin DataInput compatible methods --- + + /** + * Reads a null-terminated UTF string from the data buffer and converts + * the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(String)}. + * + * @return the converted string. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + * + * @see String Formats + */ + public final String readString() + throws IndexOutOfBoundsException, IllegalArgumentException { + + byte[] myBuf = buf; + int myOff = off; + if (available() >= 2 && + myBuf[myOff] == TupleOutput.NULL_STRING_UTF_VALUE && + myBuf[myOff + 1] == 0) { + skip(2); + return null; + } else { + int byteLen = UtfOps.getZeroTerminatedByteLength(myBuf, myOff); + skip(byteLen + 1); + return UtfOps.bytesToString(myBuf, myOff, byteLen); + } + } + + /** + * Reads a char (two byte) unsigned value from the buffer. + * Reads values that were written using {@link TupleOutput#writeChar}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final char readChar() + throws IndexOutOfBoundsException { + + return (char) readUnsignedShort(); + } + + /** + * Reads a boolean (one byte) unsigned value from the buffer and returns + * true if it is non-zero and false if it is zero. + * Reads values that were written using {@link TupleOutput#writeBoolean}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final boolean readBoolean() + throws IndexOutOfBoundsException { + + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + return (c != 0); + } + + /** + * Reads a signed byte (one byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeByte}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final byte readByte() + throws IndexOutOfBoundsException { + + return (byte) (readUnsignedByte() ^ 0x80); + } + + /** + * Reads a signed short (two byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeShort}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final short readShort() + throws IndexOutOfBoundsException { + + return (short) (readUnsignedShort() ^ 0x8000); + } + + /** + * Reads a signed int (four byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeInt}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final int readInt() + throws IndexOutOfBoundsException { + + return (int) (readUnsignedInt() ^ 0x80000000); + } + + /** + * Reads a signed long (eight byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeLong}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final long readLong() + throws IndexOutOfBoundsException { + + return readUnsignedLong() ^ 0x8000000000000000L; + } + + /** + * Reads an unsorted float (four byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeFloat}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Floating Point + * Formats + */ + public final float readFloat() + throws IndexOutOfBoundsException { + + return Float.intBitsToFloat((int) readUnsignedInt()); + } + + /** + * Reads an unsorted double (eight byte) value from the buffer. + * Reads values that were written using {@link TupleOutput#writeDouble}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Floating Point + * Formats + */ + public final double readDouble() + throws IndexOutOfBoundsException { + + return Double.longBitsToDouble(readUnsignedLong()); + } + + /** + * Reads a sorted float (four byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeSortedFloat}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Floating Point + * Formats + */ + public final float readSortedFloat() + throws IndexOutOfBoundsException { + + int val = (int) readUnsignedInt(); + val ^= (val < 0) ? 0x80000000 : 0xffffffff; + return Float.intBitsToFloat(val); + } + + /** + * Reads a sorted double (eight byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeSortedDouble}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Floating Point + * Formats + */ + public final double readSortedDouble() + throws IndexOutOfBoundsException { + + long val = readUnsignedLong(); + val ^= (val < 0) ? 0x8000000000000000L : 0xffffffffffffffffL; + return Double.longBitsToDouble(val); + } + + /** + * Reads an unsigned byte (one byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedByte}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final int readUnsignedByte() + throws IndexOutOfBoundsException { + + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + return c; + } + + /** + * Reads an unsigned short (two byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedShort}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final int readUnsignedShort() + throws IndexOutOfBoundsException { + + int c1 = readFast(); + int c2 = readFast(); + if ((c1 | c2) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 8) | c2); + } + + // --- end DataInput compatible methods --- + + /** + * Reads an unsigned int (four byte) value from the buffer. + * Reads values that were written using {@link + * TupleOutput#writeUnsignedInt}. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final long readUnsignedInt() + throws IndexOutOfBoundsException { + + long c1 = readFast(); + long c2 = readFast(); + long c3 = readFast(); + long c4 = readFast(); + if ((c1 | c2 | c3 | c4) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 24) | (c2 << 16) | (c3 << 8) | c4); + } + + /** + * This method is private since an unsigned long cannot be treated as + * such in Java, nor converted to a BigInteger of the same value. + */ + private final long readUnsignedLong() + throws IndexOutOfBoundsException { + + long c1 = readFast(); + long c2 = readFast(); + long c3 = readFast(); + long c4 = readFast(); + long c5 = readFast(); + long c6 = readFast(); + long c7 = readFast(); + long c8 = readFast(); + if ((c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8) < 0) { + throw new IndexOutOfBoundsException(); + } + return ((c1 << 56) | (c2 << 48) | (c3 << 40) | (c4 << 32) | + (c5 << 24) | (c6 << 16) | (c7 << 8) | c8); + } + + /** + * Reads the specified number of bytes from the buffer, converting each + * unsigned byte value to a character of the resulting string. + * Reads values that were written using {@link TupleOutput#writeBytes}. + * + * @param length is the number of bytes to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final String readBytes(int length) + throws IndexOutOfBoundsException { + + StringBuilder buf = new StringBuilder(length); + for (int i = 0; i < length; i++) { + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + buf.append((char) c); + } + return buf.toString(); + } + + /** + * Reads the specified number of characters from the buffer, converting + * each two byte unsigned value to a character of the resulting string. + * Reads values that were written using {@link TupleOutput#writeChars}. + * + * @param length is the number of characters to be read. + * + * @return the value read from the buffer. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final String readChars(int length) + throws IndexOutOfBoundsException { + + StringBuilder buf = new StringBuilder(length); + for (int i = 0; i < length; i++) { + buf.append(readChar()); + } + return buf.toString(); + } + + /** + * Reads the specified number of bytes from the buffer, converting each + * unsigned byte value to a character of the resulting array. + * Reads values that were written using {@link TupleOutput#writeBytes}. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of bytes to be read. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final void readBytes(char[] chars) + throws IndexOutOfBoundsException { + + for (int i = 0; i < chars.length; i++) { + int c = readFast(); + if (c < 0) { + throw new IndexOutOfBoundsException(); + } + chars[i] = (char) c; + } + } + + /** + * Reads the specified number of characters from the buffer, converting + * each two byte unsigned value to a character of the resulting array. + * Reads values that were written using {@link TupleOutput#writeChars}. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of characters to be read. + * + * @throws IndexOutOfBoundsException if not enough bytes are available in + * the buffer. + * + * @see Integer Formats + */ + public final void readChars(char[] chars) + throws IndexOutOfBoundsException { + + for (int i = 0; i < chars.length; i++) { + chars[i] = readChar(); + } + } + + /** + * Reads the specified number of UTF characters string from the data + * buffer and converts the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(char[])}. + * + * @param length is the number of characters to be read. + * + * @return the converted string. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + * + * @see String Formats + */ + public final String readString(int length) + throws IndexOutOfBoundsException, IllegalArgumentException { + + char[] chars = new char[length]; + readString(chars); + return new String(chars); + } + + /** + * Reads the specified number of UTF characters string from the data + * buffer and converts the data from UTF to Unicode. + * Reads values that were written using {@link + * TupleOutput#writeString(char[])}. + * + * @param chars is the array to receive the data and whose length is used + * to determine the number of characters to be read. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + * + * @see String Formats + */ + public final void readString(char[] chars) + throws IndexOutOfBoundsException, IllegalArgumentException { + + off = UtfOps.bytesToChars(buf, off, chars, 0, chars.length, false); + } + + /** + * Returns the byte length of a null-terminated UTF string in the data + * buffer, including the terminator. Used with string values that were + * written using {@link TupleOutput#writeString(String)}. + * + * @return the byte length. + * + * @throws IndexOutOfBoundsException if no null terminating byte is found + * in the buffer. + * + * @throws IllegalArgumentException malformed UTF data is encountered. + * + * @see String Formats + */ + public final int getStringByteLength() + throws IndexOutOfBoundsException, IllegalArgumentException { + + if (available() >= 2 && + buf[off] == TupleOutput.NULL_STRING_UTF_VALUE && + buf[off + 1] == 0) { + return 2; + } else { + return UtfOps.getZeroTerminatedByteLength(buf, off) + 1; + } + } + + /** + * Reads an unsorted packed integer. + * + * @return the int value. + * + * @see Integer Formats + */ + public final int readPackedInt() { + + int len = PackedInteger.getReadIntLength(buf, off); + int val = PackedInteger.readInt(buf, off); + + off += len; + return val; + } + + /** + * Returns the byte length of a packed integer. + * + * @return the byte length. + * + * @see Integer Formats + */ + public final int getPackedIntByteLength() { + return PackedInteger.getReadIntLength(buf, off); + } + + /** + * Reads an unsorted packed long integer. + * + * @return the long value. + * + * @see Integer Formats + */ + public final long readPackedLong() { + + int len = PackedInteger.getReadLongLength(buf, off); + long val = PackedInteger.readLong(buf, off); + + off += len; + return val; + } + + /** + * Returns the byte length of a packed long integer. + * + * @return the byte length. + * + * @see Integer Formats + */ + public final int getPackedLongByteLength() { + return PackedInteger.getReadLongLength(buf, off); + } + + /** + * Reads a sorted packed integer. + * + * @return the int value. + * + * @see Integer Formats + */ + public final int readSortedPackedInt() { + + int len = PackedInteger.getReadSortedIntLength(buf, off); + int val = PackedInteger.readSortedInt(buf, off); + + off += len; + return val; + } + + /** + * Returns the byte length of a sorted packed integer. + * + * @return the byte length. + * + * @see Integer Formats + */ + public final int getSortedPackedIntByteLength() { + return PackedInteger.getReadSortedIntLength(buf, off); + } + + /** + * Reads a sorted packed long integer. + * + * @return the long value. + * + * @see Integer Formats + */ + public final long readSortedPackedLong() { + + int len = PackedInteger.getReadSortedLongLength(buf, off); + long val = PackedInteger.readSortedLong(buf, off); + + off += len; + return val; + } + + /** + * Returns the byte length of a sorted packed long integer. + * + * @return the byte length. + * + * @see Integer Formats + */ + public final int getSortedPackedLongByteLength() { + return PackedInteger.getReadSortedLongLength(buf, off); + } + + /** + * Reads a {@code BigInteger}. + * + * @return the non-null BigInteger value. + * + * @see Integer Formats + */ + public final BigInteger readBigInteger() { + + int len = readShort(); + if (len < 0) { + len = (- len); + } + byte[] a = new byte[len]; + a[0] = readByte(); + readFast(a, 1, a.length - 1); + return new BigInteger(a); + } + + /** + * Returns the byte length of a {@code BigInteger}. + * + * @return the byte length. + * + * @see Integer Formats + */ + public final int getBigIntegerByteLength() { + + int saveOff = off; + int len = readShort(); + off = saveOff; + if (len < 0) { + len = (- len); + } + return len + 2; + } + + /** + * Reads an unsorted {@code BigDecimal}. + * + * @return the non-null BigDecimal value. + * + * @see BigDecimal + * Formats + */ + public final BigDecimal readBigDecimal() { + + int scale = readPackedInt(); + int len = readPackedInt(); + byte[] a = new byte[len]; + readFast(a, 0, len); + BigInteger unscaledVal = new BigInteger(a); + return new BigDecimal(unscaledVal, scale); + } + + /** + * Returns the byte length of an unsorted {@code BigDecimal}. + * + * @return the byte length. + * + * @see BigDecimal + * Formats + */ + public final int getBigDecimalByteLength() { + + /* First get the length of the scale. */ + int scaleLen = getPackedIntByteLength(); + int saveOff = off; + off += scaleLen; + + /* + * Then get the length of the value which store the length of the + * following bytes. + */ + int lenOfUnscaleValLen = getPackedIntByteLength(); + + /* Finally get the length of the following bytes. */ + int unscaledValLen = readPackedInt(); + off = saveOff; + return scaleLen + lenOfUnscaleValLen + unscaledValLen; + } + + /** + * Reads a sorted {@code BigDecimal}, with support for correct default + * sorting. + * + * @return the non-null BigDecimal value. + * + * @see BigDecimal + * Formats + */ + public final BigDecimal readSortedBigDecimal() { + /* Get the sign of the BigDecimal. */ + int sign = readByte(); + + /* Get the exponent of the BigDecimal. */ + int exponent = readSortedPackedInt(); + + /*Get the normalized BigDecimal. */ + BigDecimal normalizedVal = readSortedNormalizedBigDecimal(); + + /* + * After getting the normalized BigDecimal, we need to scale the value + * with the exponent. + */ + return normalizedVal.scaleByPowerOfTen(exponent * sign); + } + + /** + * Reads a sorted {@code BigDecimal} in normalized format with a single + * digit to the left of the decimal point. + */ + private BigDecimal readSortedNormalizedBigDecimal() { + + StringBuilder valStr = new StringBuilder(32); + int subVal = readSortedPackedInt(); + int sign = subVal < 0 ? -1 : 1; + + /* Read through the buf, until we meet the terminator byte. */ + while (subVal != -1) { + + /* Adjust the sub-value back to the original. */ + subVal = subVal < 0 ? subVal + 1 : subVal; + String groupDigits = Integer.toString(Math.abs(subVal)); + + /* + * subVal < 100000000 means some leading zeros have been removed, + * we have to add them back. + */ + if (groupDigits.length() < 9) { + final int insertLen = 9 - groupDigits.length(); + for (int i = 0; i < insertLen; i++) { + valStr.append("0"); + } + } + valStr.append(groupDigits); + subVal = readSortedPackedInt(); + } + + BigInteger digitsVal = new BigInteger(valStr.toString()); + if (sign < 0) { + digitsVal = digitsVal.negate(); + } + /* The normalized decimal has 1 digits in the int part. */ + int scale = valStr.length() - 1; + + /* + * Since we may pad trailing zeros for serialization, when doing + * de-serialization, we need to delete the trailing zeros. + */ + return new BigDecimal(digitsVal, scale).stripTrailingZeros(); + } + + /** + * Returns the byte length of a sorted {@code BigDecimal}. + * + * @return the byte length. + * + * @see BigDecimal + * Formats + */ + public final int getSortedBigDecimalByteLength() { + + /* Save the original position, and read past the sigh byte. */ + int saveOff = off++; + + /* Get the length of the exponent. */ + int len = getSortedPackedIntByteLength(); /* the exponent */ + + /* Skip to the digit part. */ + off += len; + + /* + * Travel through the following SortedPackedIntegers, until we meet the + * terminator byte. + */ + int subVal = readSortedPackedInt(); + while (subVal != -1) { + subVal = readSortedPackedInt(); + } + + /* + * off is the value of end offset, while saveOff is the beginning + * offset. + */ + len = off - saveOff; + off = saveOff; + return len; + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleInputBinding.java b/src/com/sleepycat/bind/tuple/TupleInputBinding.java new file mode 100644 index 0000000..f40ebfd --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleInputBinding.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * A concrete EntryBinding that uses the TupleInput + * object as the key or data object. + * + * A concrete tuple binding for key or data entries which are {@link + * TupleInput} objects. This binding is used when tuples themselves are the + * objects, rather than using application defined objects. A {@link TupleInput} + * must always be used. To convert a {@link TupleOutput} to a {@link + * TupleInput}, use the {@link TupleInput#TupleInput(TupleOutput)} constructor. + * + * @author Mark Hayes + */ +public class TupleInputBinding implements EntryBinding { + + /** + * Creates a tuple input binding. + */ + public TupleInputBinding() { + } + + // javadoc is inherited + public TupleInput entryToObject(DatabaseEntry entry) { + + return TupleBinding.entryToInput(entry); + } + + // javadoc is inherited + public void objectToEntry(TupleInput object, DatabaseEntry entry) { + + TupleBinding.inputToEntry(object, entry); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java b/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java new file mode 100644 index 0000000..2e184f5 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleMarshalledBinding.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete TupleBinding that delegates to the + * MarshalledTupleEntry interface of the data or key object. + * + *

    This class works by calling the methods of the {@link + * MarshalledTupleEntry} interface, which must be implemented by the key or + * data class, to convert between the key or data entry and the object.

    + * + * @author Mark Hayes + */ +public class TupleMarshalledBinding + extends TupleBinding { + + private Class cls; + + /** + * Creates a tuple marshalled binding object. + * + *

    The given class is used to instantiate key or data objects using + * {@link Class#newInstance}, and therefore must be a public class and have + * a public no-arguments constructor. It must also implement the {@link + * MarshalledTupleEntry} interface.

    + * + * @param cls is the class of the key or data objects. + */ + public TupleMarshalledBinding(Class cls) { + + this.cls = cls; + + /* The class will be used to instantiate the object. */ + if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException + (cls.toString() + " does not implement MarshalledTupleEntry"); + } + } + + // javadoc is inherited + public E entryToObject(TupleInput input) { + + try { + E obj = cls.newInstance(); + obj.unmarshalEntry(input); + return obj; + } catch (IllegalAccessException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } catch (InstantiationException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + + // javadoc is inherited + public void objectToEntry(E object, TupleOutput output) { + + object.marshalEntry(output); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleOutput.java b/src/com/sleepycat/bind/tuple/TupleOutput.java new file mode 100644 index 0000000..835d715 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleOutput.java @@ -0,0 +1,779 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.PackedInteger; +import com.sleepycat.util.UtfOps; + +/** + * An OutputStream with DataOutput-like methods for + * writing tuple fields. It is used by TupleBinding. + * + *

    This class has many methods that have the same signatures as methods in + * the {@link java.io.DataOutput} interface. The reason this class does not + * implement {@link java.io.DataOutput} is because it would break the interface + * contract for those methods because of data format differences.

    + * + * @see Tuple Formats + * + * @author Mark Hayes + */ +public class TupleOutput extends FastOutputStream { + + /** + * We represent a null string as a single FF UTF character, which cannot + * occur in a UTF encoded string. + */ + static final int NULL_STRING_UTF_VALUE = ((byte) 0xFF); + + /** + * Creates a tuple output object for writing a byte array of tuple data. + */ + public TupleOutput() { + + super(); + } + + /** + * Creates a tuple output object for writing a byte array of tuple data, + * using a given buffer. A new buffer will be allocated only if the number + * of bytes needed is greater than the length of this buffer. A reference + * to the byte array will be kept by this object and therefore the byte + * array should not be modified while this object is in use. + * + * @param buffer is the byte array to use as the buffer. + */ + public TupleOutput(byte[] buffer) { + + super(buffer); + } + + // --- begin DataOutput compatible methods --- + + /** + * Writes the specified bytes to the buffer, converting each character to + * an unsigned byte value. + * Writes values that can be read using {@link TupleInput#readBytes}. + * + * @param val is the string containing the values to be written. + * Only characters with values below 0x100 may be written using this + * method, since the high-order 8 bits of all characters are discarded. + * + * @return this tuple output object. + * + * @throws NullPointerException if the val parameter is null. + * + * @see Integer Formats + */ + public final TupleOutput writeBytes(String val) { + + writeBytes(val.toCharArray()); + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to a two byte unsigned value. + * Writes values that can be read using {@link TupleInput#readChars}. + * + * @param val is the string containing the characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the val parameter is null. + * + * @see Integer Formats + */ + public final TupleOutput writeChars(String val) { + + writeChars(val.toCharArray()); + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to UTF format, and adding a null terminator byte. + * Writes values that can be read using {@link TupleInput#readString()}. + * + * @param val is the string containing the characters to be written. + * + * @return this tuple output object. + * + * @see String Formats + */ + public final TupleOutput writeString(String val) { + + if (val != null) { + writeString(val.toCharArray()); + } else { + writeFast(NULL_STRING_UTF_VALUE); + } + writeFast(0); + return this; + } + + /** + * Writes a char (two byte) unsigned value to the buffer. + * Writes values that can be read using {@link TupleInput#readChar}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeChar(int val) { + + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * Writes a boolean (one byte) unsigned value to the buffer, writing one + * if the value is true and zero if it is false. + * Writes values that can be read using {@link TupleInput#readBoolean}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeBoolean(boolean val) { + + writeFast(val ? (byte)1 : (byte)0); + return this; + } + + /** + * Writes an signed byte (one byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readByte}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeByte(int val) { + + writeUnsignedByte(val ^ 0x80); + return this; + } + + /** + * Writes an signed short (two byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readShort}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeShort(int val) { + + writeUnsignedShort(val ^ 0x8000); + return this; + } + + /** + * Writes an signed int (four byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readInt}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeInt(int val) { + + writeUnsignedInt(val ^ 0x80000000); + return this; + } + + /** + * Writes an signed long (eight byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readLong}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeLong(long val) { + + writeUnsignedLong(val ^ 0x8000000000000000L); + return this; + } + + /** + * Writes an unsorted float (four byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readFloat}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Floating Point + * Formats + */ + public final TupleOutput writeFloat(float val) { + + writeUnsignedInt(Float.floatToIntBits(val)); + return this; + } + + /** + * Writes an unsorted double (eight byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readDouble}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Floating Point + * Formats + */ + public final TupleOutput writeDouble(double val) { + + writeUnsignedLong(Double.doubleToLongBits(val)); + return this; + } + + /** + * Writes a sorted float (four byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readSortedFloat}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Floating Point + * Formats + */ + public final TupleOutput writeSortedFloat(float val) { + + int intVal = Float.floatToIntBits(val); + intVal ^= (intVal < 0) ? 0xffffffff : 0x80000000; + writeUnsignedInt(intVal); + return this; + } + + /** + * Writes a sorted double (eight byte) value to the buffer. + * Writes values that can be read using {@link TupleInput#readSortedDouble}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Floating Point + * Formats + */ + public final TupleOutput writeSortedDouble(double val) { + + long longVal = Double.doubleToLongBits(val); + longVal ^= (longVal < 0) ? 0xffffffffffffffffL : 0x8000000000000000L; + writeUnsignedLong(longVal); + return this; + } + + // --- end DataOutput compatible methods --- + + /** + * Writes the specified bytes to the buffer, converting each character to + * an unsigned byte value. + * Writes values that can be read using {@link TupleInput#readBytes}. + * + * @param chars is the array of values to be written. + * Only characters with values below 0x100 may be written using this + * method, since the high-order 8 bits of all characters are discarded. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + * + * @see Integer Formats + */ + public final TupleOutput writeBytes(char[] chars) { + + for (int i = 0; i < chars.length; i++) { + writeFast((byte) chars[i]); + } + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to a two byte unsigned value. + * Writes values that can be read using {@link TupleInput#readChars}. + * + * @param chars is the array of characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + * + * @see Integer Formats + */ + public final TupleOutput writeChars(char[] chars) { + + for (int i = 0; i < chars.length; i++) { + writeFast((byte) (chars[i] >>> 8)); + writeFast((byte) chars[i]); + } + return this; + } + + /** + * Writes the specified characters to the buffer, converting each character + * to UTF format. + * Writes values that can be read using {@link TupleInput#readString(int)} + * or {@link TupleInput#readString(char[])}. + * + * @param chars is the array of characters to be written. + * + * @return this tuple output object. + * + * @throws NullPointerException if the chars parameter is null. + * + * @see String Formats + */ + public final TupleOutput writeString(char[] chars) { + + if (chars.length == 0) return this; + + int utfLength = UtfOps.getByteLength(chars); + + makeSpace(utfLength); + UtfOps.charsToBytes(chars, 0, getBufferBytes(), getBufferLength(), + chars.length); + addSize(utfLength); + return this; + } + + /** + * Writes an unsigned byte (one byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedByte}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeUnsignedByte(int val) { + + writeFast(val); + return this; + } + + /** + * Writes an unsigned short (two byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedShort}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeUnsignedShort(int val) { + + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * Writes an unsigned int (four byte) value to the buffer. + * Writes values that can be read using {@link + * TupleInput#readUnsignedInt}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeUnsignedInt(long val) { + + writeFast((byte) (val >>> 24)); + writeFast((byte) (val >>> 16)); + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * This method is private since an unsigned long cannot be treated as + * such in Java, nor converted to a BigInteger of the same value. + */ + private final TupleOutput writeUnsignedLong(long val) { + + writeFast((byte) (val >>> 56)); + writeFast((byte) (val >>> 48)); + writeFast((byte) (val >>> 40)); + writeFast((byte) (val >>> 32)); + writeFast((byte) (val >>> 24)); + writeFast((byte) (val >>> 16)); + writeFast((byte) (val >>> 8)); + writeFast((byte) val); + return this; + } + + /** + * Writes an unsorted packed integer. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writePackedInt(int val) { + + makeSpace(PackedInteger.MAX_LENGTH); + + int oldLen = getBufferLength(); + int newLen = PackedInteger.writeInt(getBufferBytes(), oldLen, val); + + addSize(newLen - oldLen); + return this; + } + + /** + * Writes an unsorted packed long integer. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writePackedLong(long val) { + + makeSpace(PackedInteger.MAX_LONG_LENGTH); + + int oldLen = getBufferLength(); + int newLen = PackedInteger.writeLong(getBufferBytes(), oldLen, val); + + addSize(newLen - oldLen); + return this; + } + + /** + * Writes a sorted packed integer. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeSortedPackedInt(int val) { + + makeSpace(PackedInteger.MAX_LENGTH); + int oldLen = getBufferLength(); + int newLen = PackedInteger.writeSortedInt(getBufferBytes(), oldLen, + val); + addSize(newLen - oldLen); + return this; + } + + /** + * Writes a sorted packed long integer. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see Integer Formats + */ + public final TupleOutput writeSortedPackedLong(long val) { + + makeSpace(PackedInteger.MAX_LONG_LENGTH); + + int oldLen = getBufferLength(); + int newLen = PackedInteger.writeSortedLong(getBufferBytes(), oldLen, + val); + + addSize(newLen - oldLen); + return this; + } + + /** + * Writes a {@code BigInteger}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @throws NullPointerException if val is null. + * + * @throws IllegalArgumentException if the byte array representation of val + * is larger than 0x7fff bytes. + * + * @see Integer Formats + */ + public final TupleOutput writeBigInteger(BigInteger val) { + + byte[] a = val.toByteArray(); + if (a.length > Short.MAX_VALUE) { + throw new IllegalArgumentException + ("BigInteger byte array is larger than 0x7fff bytes"); + } + int firstByte = a[0]; + writeShort((firstByte < 0) ? (- a.length) : a.length); + writeByte(firstByte); + writeFast(a, 1, a.length - 1); + return this; + } + + /** + * Returns the exact byte length that would would be output for a given + * {@code BigInteger} value if {@link TupleOutput#writeBigInteger} were + * called. + * + * @param val the BigInteger + * + * @return the byte length. + * + * @see Integer Formats + */ + public static int getBigIntegerByteLength(BigInteger val) { + return 2 /* length bytes */ + + (val.bitLength() + 1 /* sign bit */ + 7 /* round up */) / 8; + } + + /** + * Writes an unsorted {@code BigDecimal}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @throws NullPointerException if val is null. + * + * @see BigDecimal + * Formats + */ + public final TupleOutput writeBigDecimal(BigDecimal val) { + + /* + * The byte format for a BigDecimal value is: + * Byte 0 ~ L: The scale part written as a PackedInteger. + * Byte L+1 ~ M: The length of the unscaled value written as a + * PackedInteger. + * Byte M+1 ~ N: The BigDecimal.toByteArray array, written + * without modification. + * + * Get the scale and the unscaled value of this BigDecimal. + */ + int scale = val.scale(); + BigInteger unscaledVal = val.unscaledValue(); + + /* Store the scale. */ + writePackedInt(scale); + byte[] a = unscaledVal.toByteArray(); + int len = a.length; + + /* Store the length of the following bytes. */ + writePackedInt(len); + + /* Store the bytes of the BigDecimal, without modification. */ + writeFast(a, 0, len); + return this; + } + + /** + * Returns the maximum byte length that would be output for a given {@code + * BigDecimal} value if {@link TupleOutput#writeBigDecimal} were called. + * + * @param val the BigDecimal. + * + * @return the byte length. + * + * @see BigDecimal + * Formats + */ + public static int getBigDecimalMaxByteLength(BigDecimal val) { + + BigInteger unscaledVal = val.unscaledValue(); + return PackedInteger.MAX_LENGTH * 2 + + unscaledVal.toByteArray().length; + } + + /** + * Writes a sorted {@code BigDecimal}. + * + * @param val is the value to write to the buffer. + * + * @return this tuple output object. + * + * @see BigDecimal + * Formats + */ + public final TupleOutput writeSortedBigDecimal(BigDecimal val) { + + /* + * We have several options for the serialization of sorted BigDecimal. + * The reason for choosing this method is that it is simpler and more + * compact, and in some cases, comparison time will be less. For other + * methods and detailed discussion, please refer to [#18379]. + * + * First, we need to do the normalization, which means we normalize a + * given BigDecimal into two parts: decimal part and the exponent part. + * The decimal part contains one integer (non zero). For example, + * 1234.56 will be normalized to 1.23456E3; + * 123.4E100 will be normalized to 1.234E102; + * -123.4E-100 will be normalized to -1.234E-98. + * + * After the normalization, the byte format is: + * Byte 0: sign (-1 represents negative, 0 represents zero, and 1 + * represents positive). + * Byte 1 ~ 5: the exponent with sign, and written as a + * SortedPackedInteger value. + * Byte 6 ~ N: the normalized decimal part with sign. + * + * Get the scale and the unscaled value of this BigDecimal.. + */ + BigDecimal valNoTrailZeros = val.stripTrailingZeros(); + int scale = valNoTrailZeros.scale(); + BigInteger unscaledVal = valNoTrailZeros.unscaledValue(); + int sign = valNoTrailZeros.signum(); + + /* Then do the normalization. */ + String unscaledValStr = unscaledVal.abs().toString(); + int normalizedScale = unscaledValStr.length() - 1; + BigDecimal normalizedVal = new BigDecimal(unscaledVal, + normalizedScale); + int exponent = (normalizedScale - scale) * sign; + + /* Start serializing each part. */ + writeByte(sign); + writeSortedPackedInt(exponent); + writeSortedNormalizedBigDecimal(normalizedVal); + return this; + } + + /** + * Writes a normalized {@code BigDecimal}. + */ + private final TupleOutput writeSortedNormalizedBigDecimal(BigDecimal val) { + + /* + * The byte format for a sorted normalized {@code BigDecimal} value is: + * Byte 0 ~ N: Store all digits with sign. Each 9 digits is + * regarded as one integer, and written as a + * SortedPackedInteger value. If there are not enough + * 9 digits, pad trailing zeros. Since we may pad + * trailing zeros for serialization, when doing + * de-serialization, we need to delete the trailing + * zeros. In order to designate a special value as the + * terminator byte, we set + * val = (val < 0) ? (val - 1) : val. + * Byte N + 1: Terminator byte. The terminator byte is -1, and + * written as a SortedPackedInteger value. + */ + + /* get the precision, scale and sign of the BigDecimal. */ + int precision = val.precision(); + int scale = val.scale(); + int sign = val.signum(); + + /* Start the serialization of the whole digits. */ + String digitsStr = val.abs().toPlainString(); + + /* + * The default capacity of a StringBuilder is 16 chars, which is + * enough to hold a group of digits having 9 digits. + */ + StringBuilder groupDigits = new StringBuilder(); + for (int i = 0; i < digitsStr.length();) { + char digit = digitsStr.charAt(i++); + + /* Ignore the decimal. */ + if (digit != '.') { + groupDigits.append(digit); + } + + /* + * For the last group of the digits, if there are not 9 digits, pad + * trailing zeros. + */ + if (i == digitsStr.length() && groupDigits.length() < 9) { + final int insertLen = 9 - groupDigits.length(); + for (int k = 0; k < insertLen; k++) { + groupDigits.append("0"); + } + } + + /* Group every 9 digits as an Integer. */ + if (groupDigits.length() == 9) { + int subVal = Integer.valueOf(groupDigits.toString()); + if (sign < 0) { + subVal = -subVal; + } + + /* + * Reset the sub-value, so the value -1 will be designated as + * the terminator byte. + */ + subVal = subVal < 0 ? subVal - 1 : subVal; + writeSortedPackedInt(subVal); + groupDigits.setLength(0); + } + } + + /* Write the terminator byte. */ + writeSortedPackedInt(-1); + return this; + } + + /** + * Returns the maximum byte length that would be output for a given {@code + * BigDecimal} value if {@link TupleOutput#writeSortedBigDecimal} were + * called. + * + * @param val the BigDecimal. + * + * @return the byte length. + * + * @see BigDecimal + * Formats + */ + public static int getSortedBigDecimalMaxByteLength(BigDecimal val) { + + String digitsStr = val.stripTrailingZeros().unscaledValue().abs(). + toString(); + + int numOfGroups = (digitsStr.length() + 8 /* round up */) / 9; + + return 1 /* sign */ + + PackedInteger.MAX_LENGTH /* exponent */ + + PackedInteger.MAX_LENGTH * numOfGroups /* all the digits */ + + 1; /* terminator byte */ + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleTupleBinding.java b/src/com/sleepycat/bind/tuple/TupleTupleBinding.java new file mode 100644 index 0000000..ee57752 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleTupleBinding.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * An abstract EntityBinding that treats an entity's key entry and + * data entry as tuples. + * + *

    This class takes care of converting the entries to/from {@link + * TupleInput} and {@link TupleOutput} objects. Its three abstract methods + * must be implemented by a concrete subclass to convert between tuples and + * entity objects.

    + *
      + *
    • {@link #entryToObject(TupleInput,TupleInput)}
    • + *
    • {@link #objectToKey(Object,TupleOutput)}
    • + *
    • {@link #objectToData(Object,TupleOutput)}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class TupleTupleBinding extends TupleBase + implements EntityBinding { + + /** + * Creates a tuple-tuple entity binding. + */ + public TupleTupleBinding() { + } + + // javadoc is inherited + public E entryToObject(DatabaseEntry key, DatabaseEntry data) { + + return entryToObject(TupleBinding.entryToInput(key), + TupleBinding.entryToInput(data)); + } + + // javadoc is inherited + public void objectToKey(E object, DatabaseEntry key) { + + TupleOutput output = getTupleOutput(object); + objectToKey(object, output); + outputToEntry(output, key); + } + + // javadoc is inherited + public void objectToData(E object, DatabaseEntry data) { + + TupleOutput output = getTupleOutput(object); + objectToData(object, output); + outputToEntry(output, data); + } + + // abstract methods + + /** + * Constructs an entity object from {@link TupleInput} key and data + * entries. + * + * @param keyInput is the {@link TupleInput} key entry object. + * + * @param dataInput is the {@link TupleInput} data entry object. + * + * @return the entity object constructed from the key and data. + */ + public abstract E entryToObject(TupleInput keyInput, TupleInput dataInput); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param output is the {@link TupleOutput} to which the key should be + * written. + */ + public abstract void objectToKey(E object, TupleOutput output); + + /** + * Extracts a key tuple from an entity object. + * + * @param object is the entity object. + * + * @param output is the {@link TupleOutput} to which the data should be + * written. + */ + public abstract void objectToData(E object, TupleOutput output); +} diff --git a/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java b/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java new file mode 100644 index 0000000..e42c394 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleTupleKeyCreator.java @@ -0,0 +1,119 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.ForeignKeyNullifier; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; + +/** + * An abstract key creator that uses a tuple key and a tuple data entry. This + * class takes care of converting the key and data entry to/from {@link + * TupleInput} and {@link TupleOutput} objects. + * The following abstract method must be implemented by a concrete subclass + * to create the index key using these objects + *
      + *
    • {@link #createSecondaryKey(TupleInput,TupleInput,TupleOutput)}
    • + *
    + *

    If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was + * specified when opening the secondary database, the following method must be + * overridden to nullify the foreign index key. If NULLIFY was not specified, + * this method need not be overridden.

    + *
      + *
    • {@link #nullifyForeignKey(TupleInput,TupleOutput)}
    • + *
    + *

    If {@link com.sleepycat.je.ForeignKeyDeleteAction#NULLIFY} was + * specified when creating the secondary, this method is called when the + * entity for this foreign key is deleted. If NULLIFY was not specified, + * this method will not be called and may always return false.

    + * + * @author Mark Hayes + */ +public abstract class TupleTupleKeyCreator extends TupleBase + implements SecondaryKeyCreator, ForeignKeyNullifier { + + /** + * Creates a tuple-tuple key creator. + */ + public TupleTupleKeyCreator() { + } + + // javadoc is inherited + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyEntry, + DatabaseEntry dataEntry, + DatabaseEntry indexKeyEntry) { + TupleOutput output = getTupleOutput(null); + TupleInput primaryKeyInput = entryToInput(primaryKeyEntry); + TupleInput dataInput = entryToInput(dataEntry); + if (createSecondaryKey(primaryKeyInput, dataInput, output)) { + outputToEntry(output, indexKeyEntry); + return true; + } else { + return false; + } + } + + // javadoc is inherited + public boolean nullifyForeignKey(SecondaryDatabase db, + DatabaseEntry dataEntry) { + TupleOutput output = getTupleOutput(null); + if (nullifyForeignKey(entryToInput(dataEntry), output)) { + outputToEntry(output, dataEntry); + return true; + } else { + return false; + } + } + + /** + * Creates the index key from primary key tuple and data tuple. + * + * @param primaryKeyInput is the {@link TupleInput} for the primary key + * entry. + * + * @param dataInput is the {@link TupleInput} for the data entry. + * + * @param indexKeyOutput is the destination index key tuple. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + public abstract boolean createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput); + + /** + * Clears the index key in the tuple data entry. The dataInput should be + * read and then written to the dataOutput, clearing the index key in the + * process. + * + *

    The secondary key should be output or removed by this method such + * that {@link #createSecondaryKey} will return false. Other fields in the + * data object should remain unchanged.

    + * + * @param dataInput is the {@link TupleInput} for the data entry. + * + * @param dataOutput is the destination {@link TupleOutput}. + * + * @return true if the key was cleared, or false to indicate that the key + * is not present and no change is necessary. + */ + public boolean nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) { + + return false; + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java new file mode 100644 index 0000000..f89713b --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledBinding.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A concrete TupleTupleBinding that delegates to the + * MarshalledTupleEntry and + * MarshalledTupleKeyEntity interfaces of the entity class. + * + *

    This class calls the methods of the {@link MarshalledTupleEntry} + * interface to convert between the data entry and entity object. It calls the + * methods of the {@link MarshalledTupleKeyEntity} interface to convert between + * the key entry and the entity object. These two interfaces must both be + * implemented by the entity class.

    + * + * @author Mark Hayes + */ +public class TupleTupleMarshalledBinding + extends TupleTupleBinding { + + private Class cls; + + /** + * Creates a tuple-tuple marshalled binding object. + * + *

    The given class is used to instantiate entity objects using {@link + * Class#newInstance}, and therefore must be a public class and have a + * public no-arguments constructor. It must also implement the {@link + * MarshalledTupleEntry} and {@link MarshalledTupleKeyEntity} + * interfaces.

    + * + * @param cls is the class of the entity objects. + */ + public TupleTupleMarshalledBinding(Class cls) { + + this.cls = cls; + + // The entity class will be used to instantiate the entity object. + // + if (!MarshalledTupleKeyEntity.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException + (cls.toString() + + " does not implement MarshalledTupleKeyEntity"); + } + if (!MarshalledTupleEntry.class.isAssignableFrom(cls)) { + throw new IllegalArgumentException + (cls.toString() + " does not implement MarshalledTupleEntry"); + } + } + + // javadoc is inherited + public E entryToObject(TupleInput keyInput, TupleInput dataInput) { + + /* + * This "tricky" binding returns the stored data as the entity, but + * first it sets the transient key fields from the stored key. + */ + E obj; + try { + obj = cls.newInstance(); + } catch (IllegalAccessException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } catch (InstantiationException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + if (dataInput != null) { // may be null if used by key extractor + obj.unmarshalEntry(dataInput); + } + if (keyInput != null) { // may be null if used by key extractor + obj.unmarshalPrimaryKey(keyInput); + } + return obj; + } + + // javadoc is inherited + public void objectToKey(E object, TupleOutput output) { + + object.marshalPrimaryKey(output); + } + + // javadoc is inherited + public void objectToData(E object, TupleOutput output) { + + object.marshalEntry(output); + } +} diff --git a/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java new file mode 100644 index 0000000..e31b26d --- /dev/null +++ b/src/com/sleepycat/bind/tuple/TupleTupleMarshalledKeyCreator.java @@ -0,0 +1,78 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple; + +/** + * A concrete key creator that works in conjunction with a {@link + * TupleTupleMarshalledBinding}. This key creator works by calling the + * methods of the {@link MarshalledTupleKeyEntity} interface to create and + * clear the index key. + * + *

    Note that a marshalled tuple key creator is somewhat less efficient + * than a non-marshalled key tuple creator because more conversions are + * needed. A marshalled key creator must convert the entry to an object in + * order to create the key, while an unmarshalled key creator does not.

    + * + * @author Mark Hayes + */ +public class TupleTupleMarshalledKeyCreator + extends TupleTupleKeyCreator { + + private String keyName; + private TupleTupleMarshalledBinding binding; + + /** + * Creates a tuple-tuple marshalled key creator. + * + * @param binding is the binding used for the tuple-tuple entity. + * + * @param keyName is the key name passed to the {@link + * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the + * index key. + */ + public TupleTupleMarshalledKeyCreator(TupleTupleMarshalledBinding + binding, + String keyName) { + + this.binding = binding; + this.keyName = keyName; + } + + // javadoc is inherited + public boolean createSecondaryKey(TupleInput primaryKeyInput, + TupleInput dataInput, + TupleOutput indexKeyOutput) { + + /* The primary key is unmarshalled before marshalling the index key, to + * account for cases where the index key includes fields taken from the + * primary key. + */ + E entity = binding.entryToObject(primaryKeyInput, dataInput); + return entity.marshalSecondaryKey(keyName, indexKeyOutput); + } + + // javadoc is inherited + public boolean nullifyForeignKey(TupleInput dataInput, + TupleOutput dataOutput) { + + E entity = binding.entryToObject(null, dataInput); + if (entity.nullifyForeignKey(keyName)) { + binding.objectToData(entity, dataOutput); + return true; + } else { + return false; + } + } +} diff --git a/src/com/sleepycat/bind/tuple/package.html b/src/com/sleepycat/bind/tuple/package.html new file mode 100644 index 0000000..8fce171 --- /dev/null +++ b/src/com/sleepycat/bind/tuple/package.html @@ -0,0 +1,394 @@ + + +Bindings that use sequences of primitive fields, or tuples. + + +For a general discussion of bindings, see the +Getting Started Guide. + + +

    Tuple Formats

    + +

    The serialization format for tuple bindings are designed for compactness, +serialization speed and proper default sorting.

    + +

    When a format is used for database keys, it is important to use default +sorting for best performance. Although a custom comparator may be specified +for a {@link com.sleepycat.je.DatabaseConfig#setBtreeComparator database} or +{@link entity +index}, custom comparators often reduce performance because comparators are +called very frequently during Btree operations.

    + +

    For proper default sorting, the byte array of the stored format must be +designed so that a byte-by-byte unsigned comparison results in the natural sort +order, as defined by the {@link java.lang.Comparable#compareTo} method of the +data type. For example, the natural sort order for integers is the standard +mathematical definition, and is implemented by {@code Integer.compareTo}, +{@code Long.compareTo}, etc. This is called default natural +sorting.

    + +

    Although most tuple formats provide default natural sorting, not all of them +do. Certain formats do not provide default natural sorting for historical +reasons (see the discussion of packed integer and float formats below.) Other +formats sacrifice default natural sorting for other performance factors (see +the discussion of BigDecimal formats below.)

    + + +

    Another performance factor has to do with amount of memory used by keys in +the Btree. Keys are stored in their serialized form in the Btree. If keys are +small (currently 16 bytes or less), Btree memory can be optimized. Optimized +memory storage is based on the maximum size of all keys in a single Btree +node. A single Btree node holds N adjacent key values, where N is 128 by +default and can be {@link com.sleepycat.je.DatabaseConfig#setNodeMaxEntries +configured} for each database or index.

    + + +

    String Formats

    + +

    All {@code String} formats support default natural sorting.

    + +

    Strings are stored as a byte array of UTF encoded characters, either where +the length must be known by the application, or the byte array is +zero-terminated. The UTF encoding is described below.

    +
      +
    • Null strings are UTF encoded as { 0xFF }, which is not allowed in a +standard UTF encoding. This allows null strings, as distinct from empty or +zero length strings, to be represented. Using default sorting, null strings +will be ordered last. +
    • +
    • Zero (0x0000) character values are UTF encoded as non-zero values, and +therefore embedded zeros in the string are supported. The sequence { 0xC0, +0x80 } is used to encode a zero character. This UTF encoding is the same one +used by the native Java UTF libraries and is called +Modified UTF-8. +However, this encoding of zero does impact the lexicographical ordering, and +zeros will not be sorted first (the natural order) or last. +
    • +
    • For all character values other than zero, the standard UTF encoding is +used, and the default sorting is the same as the Unicode lexicographical +character ordering. +
    • +
    + +

    Binding classes and methods are provided for zero-terminated and +known-length {@code String} values.

    +
      +
    • Single-value binding classes for zero-terminated {@code String} +values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.StringBinding}
      • +
      +
    • Multi-value binding methods for zero-terminated and known-length {@code +String} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeString(String)}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readString}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getStringByteLength}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeString(char[])}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readString(char[])}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readString(int)}
      • +
      +
    + +

    Integer Formats

    + +

    Fixed Size Integer Formats

    + +

    All fixed size integer formats support default natural sorting.

    + +

    The size of the stored value depends on the type, and ranges (as one would +expect) from 1 byte for type {@code byte} and class {@code Byte}, to 8 bytes for +type {@code long} and class {@code Long}.

    + +

    Signed numbers are stored in the buffer in MSB (most significant byte first) +order with their sign bit (high-order bit) inverted to cause negative numbers +to be sorted first when comparing values as unsigned byte arrays, as done in a +database.

    + +
      +
    • Single-value binding classes for signed, fixed size integers.
    • +
        +
      • {@link com.sleepycat.bind.tuple.ByteBinding}
      • +
      • {@link com.sleepycat.bind.tuple.ShortBinding}
      • +
      • {@link com.sleepycat.bind.tuple.IntegerBinding}
      • +
      • {@link com.sleepycat.bind.tuple.LongBinding}
      • +
      +
    • Multi-value binding methods for signed, fixed size integers.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeByte}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readByte}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeShort}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readShort}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeLong}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readLong}
      • +
      +
    + +

    Unsigned numbers, including characters, are stored in MSB order with no +change to their sign bit. Arrays of characters and unsigned bytes may also be +stored and may be treated as {@code String} values. For booleans, {@code true} +is stored as the unsigned byte value one and {@code false} as the unsigned byte +value zero.

    + +
      +
    • Single-value binding classes for characters and booleans.
    • +
        +
      • {@link com.sleepycat.bind.tuple.BooleanBinding}
      • +
      • {@link com.sleepycat.bind.tuple.CharacterBinding}
      • +
      +
    • Multi-value binding methods for unsigned, fixed size integers, characters +and booleans.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeBoolean}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readBoolean}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeChar}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readChar}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeUnsignedByte}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readUnsignedByte}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeUnsignedShort}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readUnsignedShort}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeUnsignedInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readUnsignedInt}
      • +
      +
    • Multi-value binding methods for character arrays and unsigned byte arrays +that may be treated as {@code String} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeChars(String)}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readChars(int)}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeChars(char[])}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readChars(char[])}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeBytes(String)}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readBytes(int)}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeBytes(char[])}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readBytes(char[])}
      • +
      +
    + +

    Packed Integer Formats

    + +

    The packed integer format stores integers with small absolute values in a +single byte. The size increases as the absolute value increases, up to a +maximum of 5 bytes for {@code int} values and 9 bytes for {@code long} +values.

    + +

    The packed integer format can be used for integer values between {@link +java.lang.Long#MIN_VALUE} and {@link java.lang.Long#MAX_VALUE}. However, +different bindings and methods are provided for type {@code int} and {@code +long}, to avoid unsafe casting from {@code long} to {@code int} when {@code +int} values are used.

    + +

    Because the same packed format is used for {@code int} and {@code long} +values, stored {@code int} values may be expanded to {@code long} values +without introducing a format incompatibility. In other words, you can treat +previously stored packed {@code int} values as packed {@code long} values.

    + +

    Packed integer formats come in two varieties: those that support default +natural sorting and those that don't. The formats of the two varieties are +incompatible. For new applications, the format that supports default natural +sorting should normally be used. There is no performance advantage to using +the unsorted format.

    + +

    The format with support for default natural sorting stores values in the +inclusive range [-119,120] in a single byte.

    +
      +
    • Single-value binding classes for packed integers with default natural +sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.SortedPackedIntegerBinding}
      • +
      • {@link com.sleepycat.bind.tuple.SortedPackedLongBinding}
      • +
      +
    • Multi-value binding methods for packed integers with default natural +sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeSortedPackedInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readSortedPackedInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getSortedPackedIntByteLength}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeSortedPackedLong}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readSortedPackedLong}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getSortedPackedLongByteLength}
      • +
      +
    + +

    The unsorted packed integer format is an older, legacy format that is used +internally and supported for compatibility. It stores values in the inclusive +range [-119,119] in a single byte. Because default natural sorting is not +supported, this format should not be used for keys. However, it so happens +that packed integers in the inclusive range [0,630] are sorted correctly by +default, and this may be useful for some applications.

    +
      +
    • Single-value binding classes for legacy, unsorted packed integers.
    • +
        +
      • {@link com.sleepycat.bind.tuple.PackedIntegerBinding}
      • +
      • {@link com.sleepycat.bind.tuple.PackedLongBinding}
      • +
      +
    • Multi-value binding methods for legacy, unsorted packed integers.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writePackedInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readPackedInt}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getPackedIntByteLength}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writePackedLong}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readPackedLong}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getPackedLongByteLength}
      • +
      +
    + +

    BigInteger Formats

    + +

    All {@code BigInteger} formats support default natural sorting.

    + +

    {@code BigInteger} values are variable length and are stored as signed +values with a preceding byte length. The length has the same sign as the +value, in order to support default natural sorting.

    + +

    The length is stored as a 2-byte (short), fixed size, signed integer. +Supported values are therefore limited to those with a byte array ({@link +java.math.BigInteger#toByteArray}) representation with a size of 0x7fff bytes +or less. The maximum {@code BigInteger} value is (20x3fff7 - 1) and +the minimum value is (-20x3fff7).

    + +
      +
    • Single-value binding classes for {@code BigInteger} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.BigIntegerBinding}
      • +
      +
    • Multi-value binding methods for {@code BigInteger} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeBigInteger}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readBigInteger}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#getBigIntegerByteLength}
      • +
      +
    + +

    Floating Point Formats

    + +

    Floats and doubles are stored in a fixed size, 4 and 8 byte format, +respectively. Floats and doubles are stored using two different +representations: a representation with default natural sorting, and an +unsorted, integer-bit (IEEE 754) representation. For new applications, the +format that supports default natural sorting should normally be used. There is +no performance advantage to using the unsorted format.

    + +

    For {@code float} values, Float.floatToIntBits and the following +bit manipulations are used to convert the signed float value to a +representation that is sorted correctly by default.

    +
    + int intVal = Float.floatToIntBits(val);
    + intVal ^= (intVal < 0) ? 0xffffffff : 0x80000000;
    +
    + +

    For {@code double} values, Float.doubleToLongBits and the +following bit manipulations are used to convert the signed double value to a +representation that is sorted correctly by default.

    +
    + long longVal = Double.doubleToLongBits(val);
    + longVal ^= (longVal < 0) ? 0xffffffffffffffffL : 0x8000000000000000L;
    +
    + +

    In both cases, the resulting {@code int} or {@code long} value is stored as +an unsigned value.

    + +
      +
    • Single-value binding classes for {@code float} and {@code double} values +with default natural sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.SortedFloatBinding}
      • +
      • {@link com.sleepycat.bind.tuple.SortedDoubleBinding}
      • +
      +
    • Multi-value binding methods for {@code float} and {@code double} values +with default natural sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeSortedFloat}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readSortedFloat}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeSortedDouble}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readSortedDouble}
      • +
      +
    + +

    The unsorted floating point format is an older, legacy format that is +supported for compatibility. With this format, only zero and positive values +have default natural sorting; negative values do not.

    + +
      +
    • Single-value binding classes for legacy, unsorted {@code float} and {@code +double} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.FloatBinding}
      • +
      • {@link com.sleepycat.bind.tuple.DoubleBinding}
      • +
      +
    • Multi-value binding methods for legacy, unsorted {@code float} and {@code +double} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeFloat}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readFloat}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeDouble}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readDouble}
      • +
      +
    + +

    BigDecimal Formats

    + +

    {@code BigDecimal} values are stored using two different, variable length +representations: a representation that supports default natural sorting, and an +unsorted representation. Differences between the two formats are: +

      +
    • The {@code BigDecimal} format with default natural sorting should normally +be used for database keys.
    • +
        +
      • Default natural sorting is supported.
      • +
      • The stored value is around 3 bytes larger than the unsorted format, + more or less, and is a minimum of 8 bytes.
      • +
      • More computation is required for serialization than the unsorted + format.
      • +
      • Trailing zeros after the decimal place are stripped, meaning that + precision is not preserved.
      • +
      +
    • The unsorted {@code BigDecimal} format should normally be used for non-key +values.
    • +
        +
      • Default natural sorting is not supported.
      • +
      • The stored value is around 3 bytes smaller than the sorted format, more + or less, and is a minimum of 3 bytes.
      • +
      • Less computation is required for serialization than the sorted + format.
      • +
      • Trailing zeros after the decimal place are preserved, meaning that + precision is preserved.
      • +
      +
    + +

    Both formats store the scale or exponent separately from the unscaled value, +and the stored size does not increase proportionally as the absolute value of +the scale or exponent increases.

    + +
      +
    • Single-value binding classes for {@code BigDecimal} values with default +natural sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.SortedBigDecimalBinding}
      • +
      +
    • Multi-value binding methods for {@code BigDecimal} values with default +natural sorting.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeSortedBigDecimal}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#getSortedBigDecimalMaxByteLength}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readSortedBigDecimal}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getSortedBigDecimalByteLength}
      • +
      +
    • Single-value binding classes for unsorted {@code BigDecimal} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.BigDecimalBinding}
      • +
      +
    • Multi-value binding methods for unsorted {@code BigDecimal} values.
    • +
        +
      • {@link com.sleepycat.bind.tuple.TupleOutput#writeBigDecimal}
      • +
      • {@link com.sleepycat.bind.tuple.TupleOutput#getBigDecimalMaxByteLength}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#readBigDecimal}
      • +
      • {@link com.sleepycat.bind.tuple.TupleInput#getBigDecimalByteLength}
      • +
      +
    + + + diff --git a/src/com/sleepycat/collections/BaseIterator.java b/src/com/sleepycat/collections/BaseIterator.java new file mode 100644 index 0000000..42a09d0 --- /dev/null +++ b/src/com/sleepycat/collections/BaseIterator.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.ListIterator; + +/** + * Common interface for BlockIterator and StoredIterator. This is an abstract + * class rather than in interface to prevent exposing these methods in javadoc. + */ +abstract class BaseIterator implements ListIterator { + + abstract ListIterator dup(); + + abstract boolean isCurrentData(Object currentData); + + abstract boolean moveToIndex(int index); +} diff --git a/src/com/sleepycat/collections/BlockIterator.java b/src/com/sleepycat/collections/BlockIterator.java new file mode 100644 index 0000000..f047a7e --- /dev/null +++ b/src/com/sleepycat/collections/BlockIterator.java @@ -0,0 +1,779 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.keyrange.KeyRange; + +/** + * An iterator that does not need closing because a cursor is not kept open + * across method calls. A cursor is opened to read a block of records at a + * time and then closed before the method returns. + * + * @author Mark Hayes + */ +class BlockIterator extends BaseIterator { + + private StoredCollection coll; + private boolean writeAllowed; + + /** + * Slots for a block of record keys and values. The priKeys array is only + * used for secondary databases; otherwise it is set to the keys array. + */ + private byte[][] keys; + private byte[][] priKeys; + private byte[][] values; + + /** + * The slot index of the record that would be returned by next(). + * nextIndex is always greater or equal to zero. If the next record is not + * available, then nextIndex is equal to keys.length or keys[nextIndex] is + * null. + * + * If the block is empty, then either the iterator is uninitialized or the + * key range is empty. Either way, nextIndex will be the array length and + * all array values will be null. This is the initial state set by the + * constructor. If remove() is used to delete all records in the key + * range, it will restore the iterator to this initial state. The block + * must never be allowed to become empty when the key range is non-empty, + * since then the iterator's position would be lost. [#15858] + */ + private int nextIndex; + + /** + * The slot index of the record last returned by next() or previous(), or + * the record inserted by add(). dataIndex is -1 if the data record is not + * available. If greater or equal to zero, the slot at dataIndex is always + * non-null. + */ + private int dataIndex; + + /** + * The iterator data last returned by next() or previous(). This value is + * set to null if dataIndex is -1, or if the state of the iterator is such + * that set() or remove() cannot be called. For example, after add() this + * field is set to null, even though the dataIndex is still valid. + */ + private E dataObject; + + /** + * Creates an iterator. + */ + BlockIterator(StoredCollection coll, + boolean writeAllowed, + int blockSize) { + + this.coll = coll; + this.writeAllowed = writeAllowed; + + keys = new byte[blockSize][]; + priKeys = coll.isSecondary() ? (new byte[blockSize][]) : keys; + values = new byte[blockSize][]; + + nextIndex = blockSize; + dataIndex = -1; + dataObject = null; + } + + /** + * Copy constructor. + */ + private BlockIterator(BlockIterator o) { + + coll = o.coll; + writeAllowed = o.writeAllowed; + + keys = copyArray(o.keys); + priKeys = coll.isSecondary() ? copyArray(o.priKeys) : keys; + values = copyArray(o.values); + + nextIndex = o.nextIndex; + dataIndex = o.dataIndex; + dataObject = o.dataObject; + } + + /** + * Copies an array of byte arrays. + */ + private byte[][] copyArray(byte[][] a) { + + byte[][] b = new byte[a.length][]; + for (int i = 0; i < b.length; i += 1) { + if (a[i] != null) { + b[i] = KeyRange.copyBytes(a[i]); + } + } + return b; + } + + /** + * Returns whether the element at nextIndex is available. + */ + private boolean isNextAvailable() { + + return (nextIndex < keys.length) && + (keys[nextIndex] != null); + } + + /** + * Returns whether the element at nextIndex-1 is available. + */ + private boolean isPrevAvailable() { + + return (nextIndex > 0) && + (keys[nextIndex - 1] != null); + } + + /** + * Returns the record number at the given slot position. + */ + private int getRecordNumber(int i) { + + if (coll.view.btreeRecNumDb) { + DataCursor cursor = null; + try { + cursor = new DataCursor(coll.view, false); + if (!moveCursor(i, cursor)) { + throw new IllegalStateException(); + } + return cursor.getCurrentRecordNumber(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + coll.closeCursor(cursor); + } + } else { + DatabaseEntry entry = new DatabaseEntry(keys[i]); + return DbCompat.getRecordNumber(entry); + } + } + + /** + * Sets dataObject to the iterator data for the element at dataIndex. + */ + private void makeDataObject() { + + int i = dataIndex; + DatabaseEntry keyEntry = new DatabaseEntry(keys[i]); + DatabaseEntry priKeyEntry = (keys != priKeys) + ? (new DatabaseEntry(priKeys[i])) + : keyEntry; + DatabaseEntry valuesEntry = new DatabaseEntry(values[i]); + + dataObject = coll.makeIteratorData(this, keyEntry, priKeyEntry, + valuesEntry); + } + + /** + * Sets all slots to null. + */ + private void clearSlots() { + + for (int i = 0; i < keys.length; i += 1) { + keys[i] = null; + priKeys[i] = null; + values[i] = null; + } + } + + /** + * Sets a given slot using the data in the given cursor. + */ + private void setSlot(int i, DataCursor cursor) { + + keys[i] = KeyRange.getByteArray(cursor.getKeyThang()); + + if (keys != priKeys) { + priKeys[i] = KeyRange.getByteArray + (cursor.getPrimaryKeyThang()); + } + + values[i] = KeyRange.getByteArray(cursor.getValueThang()); + } + + /** + * Inserts an added record at a given slot position and shifts other slots + * accordingly. Also adjusts nextIndex and sets dataIndex to -1. + */ + private void insertSlot(int i, DataCursor cursor) { + + if (i < keys.length) { + for (int j = keys.length - 1; j > i; j -= 1) { + + /* Shift right. */ + keys[j] = keys[j - 1]; + priKeys[j] = priKeys[j - 1]; + values[j] = values[j - 1]; + + /* Bump key in recno-renumber database. */ + if (coll.view.recNumRenumber && keys[j] != null) { + bumpRecordNumber(j); + } + } + nextIndex += 1; + } else { + if (i != keys.length) { + throw DbCompat.unexpectedState(); + } + i -= 1; + for (int j = 0; j < i; j += 1) { + /* Shift left. */ + keys[j] = keys[j + 1]; + priKeys[j] = priKeys[j + 1]; + values[j] = values[j + 1]; + } + } + setSlot(i, cursor); + dataIndex = -1; + } + + /** + * Increments the record number key at the given slot. + */ + private void bumpRecordNumber(int i) { + + DatabaseEntry entry = new DatabaseEntry(keys[i]); + DbCompat.setRecordNumber(entry, + DbCompat.getRecordNumber(entry) + 1); + keys[i] = entry.getData(); + } + + /** + * Deletes the given slot, adjusts nextIndex and sets dataIndex to -1. + */ + private void deleteSlot(int i) { + + for (int j = i + 1; j < keys.length; j += 1) { + keys[j - 1] = keys[j]; + priKeys[j - 1] = priKeys[j]; + values[j - 1] = values[j]; + } + int last = keys.length - 1; + keys[last] = null; + priKeys[last] = null; + values[last] = null; + + if (nextIndex > i) { + nextIndex -= 1; + } + dataIndex = -1; + } + + /** + * Moves the cursor to the key/data at the given slot, and returns false + * if the reposition (search) fails. + */ + private boolean moveCursor(int i, DataCursor cursor) + throws DatabaseException { + + return cursor.repositionExact(keys[i], priKeys[i], values[i], false); + } + + // --- begin Iterator/ListIterator methods --- + + public boolean hasNext() { + + if (isNextAvailable()) { + return true; + } + DataCursor cursor = null; + try { + cursor = new DataCursor(coll.view, writeAllowed); + int prev = nextIndex - 1; + boolean found = false; + + if (keys[prev] == null) { + /* Get the first record for an uninitialized iterator. */ + OperationStatus status = cursor.getFirst(false); + if (status == OperationStatus.SUCCESS) { + found = true; + nextIndex = 0; + } + } else { + /* Reposition to the last known key/data pair. */ + int repos = cursor.repositionRange + (keys[prev], priKeys[prev], values[prev], false); + + if (repos == DataCursor.REPOS_EXACT) { + + /* + * The last known key/data pair was found and will now be + * in slot zero. + */ + found = true; + nextIndex = 1; + + /* The data object is now in slot zero or not available. */ + if (dataIndex == prev) { + dataIndex = 0; + } else { + dataIndex = -1; + dataObject = null; + } + } else if (repos == DataCursor.REPOS_NEXT) { + + /* + * The last known key/data pair was not found, but the + * following record was found and it will be in slot zero. + */ + found = true; + nextIndex = 0; + + /* The data object is no longer available. */ + dataIndex = -1; + dataObject = null; + } else { + if (repos != DataCursor.REPOS_EOF) { + throw DbCompat.unexpectedState(); + } + } + } + + if (found) { + /* Clear all slots first in case an exception occurs below. */ + clearSlots(); + + /* Attempt to fill all slots with records. */ + int i = 0; + boolean done = false; + while (!done) { + setSlot(i, cursor); + i += 1; + if (i < keys.length) { + OperationStatus status = coll.iterateDuplicates() ? + cursor.getNext(false) : + cursor.getNextNoDup(false); + if (status != OperationStatus.SUCCESS) { + done = true; + } + } else { + done = true; + } + } + + } + + /* + * If REPOS_EXACT was returned above, make sure we retrieved + * the following record. + */ + return isNextAvailable(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + coll.closeCursor(cursor); + } + } + + public boolean hasPrevious() { + + if (isPrevAvailable()) { + return true; + } + if (!isNextAvailable()) { + return false; + } + DataCursor cursor = null; + try { + cursor = new DataCursor(coll.view, writeAllowed); + int last = keys.length - 1; + int next = nextIndex; + boolean found = false; + + /* Reposition to the last known key/data pair. */ + int repos = cursor.repositionRange + (keys[next], priKeys[next], values[next], false); + + if (repos == DataCursor.REPOS_EXACT || + repos == DataCursor.REPOS_NEXT) { + + /* + * The last known key/data pair, or the record following it, + * was found and will now be in the last slot. + */ + found = true; + nextIndex = last; + + /* The data object is now in the last slot or not available. */ + if (dataIndex == next && repos == DataCursor.REPOS_EXACT) { + dataIndex = last; + } else { + dataIndex = -1; + dataObject = null; + } + } else { + if (repos != DataCursor.REPOS_EOF) { + throw DbCompat.unexpectedState(); + } + } + + if (found) { + /* Clear all slots first in case an exception occurs below. */ + clearSlots(); + + /* Attempt to fill all slots with records. */ + int i = last; + boolean done = false; + while (!done) { + setSlot(i, cursor); + i -= 1; + if (i >= 0) { + OperationStatus status = coll.iterateDuplicates() ? + cursor.getPrev(false) : + cursor.getPrevNoDup(false); + if (status != OperationStatus.SUCCESS) { + done = true; + } + } else { + done = true; + } + } + } + + /* + * Make sure we retrieved the preceding record after the reposition + * above. + */ + return isPrevAvailable(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + coll.closeCursor(cursor); + } + } + + public E next() { + + if (hasNext()) { + dataIndex = nextIndex; + nextIndex += 1; + makeDataObject(); + return dataObject; + } else { + throw new NoSuchElementException(); + } + } + + public E previous() { + + if (hasPrevious()) { + nextIndex -= 1; + dataIndex = nextIndex; + makeDataObject(); + return dataObject; + } else { + throw new NoSuchElementException(); + } + } + + public int nextIndex() { + + if (!coll.view.recNumAccess) { + throw new UnsupportedOperationException + ("Record number access not supported"); + } + + return hasNext() ? (getRecordNumber(nextIndex) - + coll.getIndexOffset()) + : Integer.MAX_VALUE; + } + + public int previousIndex() { + + if (!coll.view.recNumAccess) { + throw new UnsupportedOperationException + ("Record number access not supported"); + } + + return hasPrevious() ? (getRecordNumber(nextIndex - 1) - + coll.getIndexOffset()) + : (-1); + } + + public void set(E value) { + + if (dataObject == null) { + throw new IllegalStateException(); + } + if (!coll.hasValues()) { + throw new UnsupportedOperationException(); + } + DataCursor cursor = null; + boolean doAutoCommit = coll.beginAutoCommit(); + try { + cursor = new DataCursor(coll.view, writeAllowed); + if (moveCursor(dataIndex, cursor)) { + cursor.putCurrent(value); + setSlot(dataIndex, cursor); + coll.closeCursor(cursor); + coll.commitAutoCommit(doAutoCommit); + } else { + throw new IllegalStateException(); + } + } catch (Exception e) { + coll.closeCursor(cursor); + throw coll.handleException(e, doAutoCommit); + } + } + + public void remove() { + + if (dataObject == null) { + throw new IllegalStateException(); + } + DataCursor cursor = null; + boolean doAutoCommit = coll.beginAutoCommit(); + try { + cursor = new DataCursor(coll.view, writeAllowed); + if (moveCursor(dataIndex, cursor)) { + cursor.delete(); + deleteSlot(dataIndex); + dataObject = null; + + /* + * Repopulate the block after removing all records, using the + * cursor position of the deleted record as a starting point. + * First try moving forward, since the user was moving forward. + * (It is possible to delete all records in the block only by + * moving forward, i.e, when nextIndex is greater than + * dataIndex.) + */ + if (nextIndex == 0 && keys[0] == null) { + OperationStatus status; + for (int i = 0; i < keys.length; i += 1) { + status = coll.iterateDuplicates() ? + cursor.getNext(false) : + cursor.getNextNoDup(false); + if (status == OperationStatus.SUCCESS) { + setSlot(i, cursor); + } else { + break; + } + } + + /* + * If no records are found past the cursor position, try + * moving backward. If no records are found before the + * cursor position, leave nextIndex set to keys.length, + * which is the same as the initial iterator state and is + * appropriate for an empty key range. + */ + if (keys[0] == null) { + nextIndex = keys.length; + for (int i = nextIndex - 1; i >= 0; i -= 1) { + status = coll.iterateDuplicates() ? + cursor.getPrev(false) : + cursor.getPrevNoDup(false); + if (status == OperationStatus.SUCCESS) { + setSlot(i, cursor); + } else { + break; + } + } + } + } + coll.closeCursor(cursor); + coll.commitAutoCommit(doAutoCommit); + } else { + throw new IllegalStateException(); + } + } catch (Exception e) { + coll.closeCursor(cursor); + throw coll.handleException(e, doAutoCommit); + } + } + + public void add(E value) { + + /* + * The checkIterAddAllowed method ensures that one of the following two + * conditions holds and throws UnsupportedOperationException otherwise: + * 1- This is a list iterator for a recno-renumber database. + * 2- This is a collection iterator for a duplicates database. + */ + coll.checkIterAddAllowed(); + OperationStatus status = OperationStatus.SUCCESS; + DataCursor cursor = null; + boolean doAutoCommit = coll.beginAutoCommit(); + try { + if (coll.view.keysRenumbered || !coll.areDuplicatesOrdered()) { + + /* + * This is a recno-renumber database or a btree/hash database + * with unordered duplicates. + */ + boolean hasPrev = hasPrevious(); + if (!hasPrev && !hasNext()) { + + /* The collection is empty. */ + if (coll.view.keysRenumbered) { + + /* Append to an empty recno-renumber database. */ + status = coll.view.append(value, null, null); + + } else if (coll.view.dupsAllowed && + coll.view.range.isSingleKey()) { + + /* + * When inserting a duplicate into a single-key range, + * the main key is fixed, so we can always insert into + * an empty collection. + */ + cursor = new DataCursor(coll.view, writeAllowed); + cursor.useRangeKey(); + status = cursor.putNoDupData(null, value, null, true); + coll.closeCursor(cursor); + cursor = null; + } else { + throw new IllegalStateException + ("Collection is empty, cannot add() duplicate"); + } + + /* + * Move past the record just inserted (the cursor should be + * closed above to prevent multiple open cursors in certain + * DB core modes). + */ + if (status == OperationStatus.SUCCESS) { + next(); + dataIndex = nextIndex - 1; + } + } else { + + /* + * The collection is non-empty. If hasPrev is true then + * the element at (nextIndex - 1) is available; otherwise + * the element at nextIndex is available. + */ + cursor = new DataCursor(coll.view, writeAllowed); + int insertIndex = hasPrev ? (nextIndex - 1) : nextIndex; + + if (!moveCursor(insertIndex, cursor)) { + throw new IllegalStateException(); + } + + /* + * For a recno-renumber database or a database with + * unsorted duplicates, insert before the iterator 'next' + * position, or after the 'prev' position. Then adjust + * the slots to account for the inserted record. + */ + status = hasPrev ? cursor.putAfter(value) + : cursor.putBefore(value); + if (status == OperationStatus.SUCCESS) { + insertSlot(nextIndex, cursor); + } + } + } else { + /* This is a btree/hash database with ordered duplicates. */ + cursor = new DataCursor(coll.view, writeAllowed); + + if (coll.view.range.isSingleKey()) { + + /* + * When inserting a duplicate into a single-key range, + * the main key is fixed. + */ + cursor.useRangeKey(); + } else { + + /* + * When inserting into a multi-key range, the main key + * is the last dataIndex accessed by next(), previous() + * or add(). + */ + if (dataIndex < 0 || !moveCursor(dataIndex, cursor)) { + throw new IllegalStateException(); + } + } + + /* + * For a hash/btree with duplicates, insert the duplicate, + * put the new record in slot zero, and set the next index + * to slot one (past the new record). + */ + status = cursor.putNoDupData(null, value, null, true); + if (status == OperationStatus.SUCCESS) { + clearSlots(); + setSlot(0, cursor); + dataIndex = 0; + nextIndex = 1; + } + } + + if (status == OperationStatus.KEYEXIST) { + throw new IllegalArgumentException("Duplicate value"); + } else if (status != OperationStatus.SUCCESS) { + DbCompat.unexpectedState("Could not insert: " + status); + } + + /* Prevent subsequent set() or remove() call. */ + dataObject = null; + + coll.closeCursor(cursor); + coll.commitAutoCommit(doAutoCommit); + } catch (Exception e) { + /* Catch RuntimeExceptions too. */ + coll.closeCursor(cursor); + throw coll.handleException(e, doAutoCommit); + } + } + + // --- end Iterator/ListIterator methods --- + + // --- begin BaseIterator methods --- + + final ListIterator dup() { + + return new BlockIterator(this); + } + + final boolean isCurrentData(Object currentData) { + + return (dataObject == currentData); + } + + final boolean moveToIndex(int index) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(coll.view, writeAllowed); + OperationStatus status = + cursor.getSearchKey(Integer.valueOf(index), null, false); + boolean retVal; + if (status == OperationStatus.SUCCESS) { + clearSlots(); + setSlot(0, cursor); + nextIndex = 0; + retVal = true; + } else { + retVal = false; + } + return retVal; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + coll.closeCursor(cursor); + } + } + + // --- end BaseIterator methods --- +} diff --git a/src/com/sleepycat/collections/CurrentTransaction.java b/src/com/sleepycat/collections/CurrentTransaction.java new file mode 100644 index 0000000..1bb71aa --- /dev/null +++ b/src/com/sleepycat/collections/CurrentTransaction.java @@ -0,0 +1,523 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.List; +import java.util.WeakHashMap; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Provides access to the current transaction for the current thread within the + * context of a Berkeley DB environment. This class provides explicit + * transaction control beyond that provided by the {@link TransactionRunner} + * class. However, both methods of transaction control manage per-thread + * transactions. + * + * @author Mark Hayes + */ +public class CurrentTransaction { + + /* For internal use, this class doubles as an Environment wrapper. */ + + private static WeakHashMap envMap = + new WeakHashMap(); + + private LockMode writeLockMode; + private boolean cdbMode; + private boolean txnMode; + private boolean lockingMode; + private ThreadLocal localTrans = new ThreadLocal(); + private ThreadLocal localCdbCursors; + + /* + * Use a WeakReference to the Environment to avoid pinning the environment + * in the envMap. The WeakHashMap envMap uses the Environment as a weak + * key, but this won't prevent GC of the Environment if the map's value has + * a hard reference to the Environment. [#15444] + */ + private WeakReference envRef; + + /** + * Gets the CurrentTransaction accessor for a specified Berkeley DB + * environment. This method always returns the same reference when called + * more than once with the same environment parameter. + * + * @param env is an open Berkeley DB environment. + * + * @return the CurrentTransaction accessor for the given environment, or + * null if the environment is not transactional. + */ + public static CurrentTransaction getInstance(Environment env) { + + CurrentTransaction currentTxn = getInstanceInternal(env); + return currentTxn.isTxnMode() ? currentTxn : null; + } + + /** + * Gets the CurrentTransaction accessor for a specified Berkeley DB + * environment. Unlike getInstance(), this method never returns null. + * + * @param env is an open Berkeley DB environment. + */ + static CurrentTransaction getInstanceInternal(Environment env) { + synchronized (envMap) { + CurrentTransaction ct = envMap.get(env); + if (ct == null) { + ct = new CurrentTransaction(env); + envMap.put(env, ct); + } + return ct; + } + } + + private CurrentTransaction(Environment env) { + envRef = new WeakReference(env); + try { + EnvironmentConfig config = env.getConfig(); + txnMode = config.getTransactional(); + lockingMode = DbCompat.getInitializeLocking(config); + if (txnMode || lockingMode) { + writeLockMode = LockMode.RMW; + } else { + writeLockMode = LockMode.DEFAULT; + } + cdbMode = DbCompat.getInitializeCDB(config); + if (cdbMode) { + localCdbCursors = new ThreadLocal(); + } + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + + /** + * Returns whether environment is configured for locking. + */ + final boolean isLockingMode() { + + return lockingMode; + } + + /** + * Returns whether this is a transactional environment. + */ + final boolean isTxnMode() { + + return txnMode; + } + + /** + * Returns whether this is a Concurrent Data Store environment. + */ + final boolean isCdbMode() { + + return cdbMode; + } + + /** + * Return the LockMode.RMW or null, depending on whether locking is + * enabled. LockMode.RMW will cause an error if passed when locking + * is not enabled. Locking is enabled if locking or transactions were + * specified for this environment. + */ + final LockMode getWriteLockMode() { + + return writeLockMode; + } + + /** + * Returns the underlying Berkeley DB environment. + * + * @return the Environment. + */ + public final Environment getEnvironment() { + + return envRef.get(); + } + + /** + * Returns the transaction associated with the current thread for this + * environment, or null if no transaction is active. + * + * @return the Transaction. + */ + public final Transaction getTransaction() { + + Trans trans = (Trans) localTrans.get(); + return (trans != null) ? trans.txn : null; + } + + /** + * Returns whether auto-commit may be performed by the collections API. + * True is returned if no collections API transaction is currently active, + * and no XA transaction is currently active. + */ + boolean isAutoCommitAllowed() + throws DatabaseException { + + return getTransaction() == null && + DbCompat.getThreadTransaction(getEnvironment()) == null; + } + + /** + * Begins a new transaction for this environment and associates it with + * the current thread. If a transaction is already active for this + * environment and thread, a nested transaction will be created. + * + * @param config the transaction configuration used for calling + * {@link Environment#beginTransaction}, or null to use the default + * configuration. + * + * @return the new transaction. + * + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the Master + * in a replicated environment could not contact a quorum of replicas as + * determined by the {@link com.sleepycat.je.Durability.ReplicaAckPolicy}. + * + * @throws com.sleepycat.je.rep.ReplicaConsistencyException if a replica + * in a replicated environment cannot become consistent within the timeout + * period. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException if the transaction cannot be started, in which + * case any existing transaction is not affected. + * + * @throws IllegalStateException if a transaction is already active and + * nested transactions are not supported by the environment. + */ + public final Transaction beginTransaction(TransactionConfig config) + throws DatabaseException { + + Environment env = getEnvironment(); + Trans trans = (Trans) localTrans.get(); + if (trans != null) { + if (trans.txn != null) { + if (!DbCompat.NESTED_TRANSACTIONS) { + throw new IllegalStateException + ("Nested transactions are not supported"); + } + Transaction parentTxn = trans.txn; + trans = new Trans(trans, config); + trans.txn = env.beginTransaction(parentTxn, config); + localTrans.set(trans); + } else { + trans.txn = env.beginTransaction(null, config); + trans.config = config; + } + } else { + trans = new Trans(null, config); + trans.txn = env.beginTransaction(null, config); + localTrans.set(trans); + } + return trans.txn; + } + + /** + * Commits the transaction that is active for the current thread for this + * environment and makes the parent transaction (if any) the current + * transaction. + * + * @return the parent transaction or null if the committed transaction was + * not nested. + * + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact a quorum of replicas as + * determined by the {@link com.sleepycat.je.Durability.ReplicaAckPolicy}. + * The application must abort the transaction and can choose to retry it. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * although the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException if an error occurs committing the transaction. + * The transaction will still be closed and the parent transaction will + * become the current transaction. + * + * @throws IllegalStateException if no transaction is active for the + * current thread for this environment. + */ + public final Transaction commitTransaction() + throws DatabaseException, IllegalStateException { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.txn != null) { + Transaction parent = closeTxn(trans); + trans.txn.commit(); + return parent; + } else { + throw new IllegalStateException("No transaction is active"); + } + } + + /** + * Aborts the transaction that is active for the current thread for this + * environment and makes the parent transaction (if any) the current + * transaction. + * + * @return the parent transaction or null if the aborted transaction was + * not nested. + * + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException if an error occurs aborting the transaction. + * The transaction will still be closed and the parent transaction will + * become the current transaction. + * + * @throws IllegalStateException if no transaction is active for the + * current thread for this environment. + */ + public final Transaction abortTransaction() + throws DatabaseException, IllegalStateException { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.txn != null) { + Transaction parent = closeTxn(trans); + trans.txn.abort(); + return parent; + } else { + throw new IllegalStateException("No transaction is active"); + } + } + + /** + * Returns whether the current transaction is a readUncommitted + * transaction. + */ + final boolean isReadUncommitted() { + + Trans trans = (Trans) localTrans.get(); + if (trans != null && trans.config != null) { + return trans.config.getReadUncommitted(); + } else { + return false; + } + } + + private Transaction closeTxn(Trans trans) { + + localTrans.set(trans.parent); + return (trans.parent != null) ? trans.parent.txn : null; + } + + private static class Trans { + + private Trans parent; + private Transaction txn; + private TransactionConfig config; + + private Trans(Trans parent, TransactionConfig config) { + + this.parent = parent; + this.config = config; + } + } + + /** + * Opens a cursor for a given database, dup'ing an existing CDB cursor if + * one is open for the current thread. + */ + Cursor openCursor(Database db, + CursorConfig cursorConfig, + boolean writeCursor, + Transaction txn) + throws DatabaseException { + + if (cdbMode) { + CdbCursors cdbCursors = null; + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap == null) { + cdbCursorsMap = new WeakHashMap(); + localCdbCursors.set(cdbCursorsMap); + } else { + cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + } + if (cdbCursors == null) { + cdbCursors = new CdbCursors(); + cdbCursorsMap.put(db, cdbCursors); + } + + /* + * In CDB mode the cursorConfig specified by the user is ignored + * and only the writeCursor parameter is honored. This is the only + * meaningful cursor attribute for CDB, and here we count on + * writeCursor flag being set correctly by the caller. + */ + List cursors; + CursorConfig cdbConfig; + if (writeCursor) { + if (cdbCursors.readCursors.size() > 0) { + + /* + * Although CDB allows opening a write cursor when a read + * cursor is open, a self-deadlock will occur if a write is + * attempted for a record that is read-locked; we should + * avoid self-deadlocks at all costs + */ + throw new IllegalStateException + ("Cannot open CDB write cursor when read cursor " + + "is open"); + } + cursors = cdbCursors.writeCursors; + cdbConfig = new CursorConfig(); + DbCompat.setWriteCursor(cdbConfig, true); + } else { + cursors = cdbCursors.readCursors; + cdbConfig = null; + } + Cursor cursor; + if (cursors.size() > 0) { + Cursor other = ((Cursor) cursors.get(0)); + cursor = other.dup(false); + } else { + cursor = db.openCursor(null, cdbConfig); + } + cursors.add(cursor); + return cursor; + } else { + return db.openCursor(txn, cursorConfig); + } + } + + /** + * Duplicates a cursor for a given database. + * + * @param writeCursor true to open a write cursor in a CDB environment, and + * ignored for other environments. + * + * @param samePosition is passed through to Cursor.dup(). + * + * @return the open cursor. + * + * @throws DatabaseException if a database problem occurs. + */ + Cursor dupCursor(Cursor cursor, boolean writeCursor, boolean samePosition) + throws DatabaseException { + + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + Database db = cursor.getDatabase(); + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + if (cdbCursors != null) { + List cursors = writeCursor ? cdbCursors.writeCursors + : cdbCursors.readCursors; + if (cursors.contains(cursor)) { + Cursor newCursor = cursor.dup(samePosition); + cursors.add(newCursor); + return newCursor; + } + } + } + throw new IllegalStateException("Cursor to dup not tracked"); + } else { + return cursor.dup(samePosition); + } + } + + /** + * Closes a cursor. + * + * @param cursor the cursor to close. + * + * @throws DatabaseException if a database problem occurs. + */ + void closeCursor(Cursor cursor) + throws DatabaseException { + + if (cursor == null) { + return; + } + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + Database db = cursor.getDatabase(); + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + if (cdbCursors != null) { + if (cdbCursors.readCursors.remove(cursor) || + cdbCursors.writeCursors.remove(cursor)) { + cursor.close(); + return; + } + } + } + throw new IllegalStateException + ("Closing CDB cursor that was not known to be open"); + } else { + cursor.close(); + } + } + + /** + * Returns true if a CDB cursor is open and therefore a Database write + * operation should not be attempted since a self-deadlock may result. + */ + boolean isCDBCursorOpen(Database db) { + if (cdbMode) { + WeakHashMap cdbCursorsMap = (WeakHashMap) localCdbCursors.get(); + if (cdbCursorsMap != null) { + CdbCursors cdbCursors = (CdbCursors) cdbCursorsMap.get(db); + + if (cdbCursors != null && + (cdbCursors.readCursors.size() > 0 || + cdbCursors.writeCursors.size() > 0)) { + return true; + } + } + } + return false; + } + + static final class CdbCursors { + + List writeCursors = new ArrayList(); + List readCursors = new ArrayList(); + } +} diff --git a/src/com/sleepycat/collections/DataCursor.java b/src/com/sleepycat/collections/DataCursor.java new file mode 100644 index 0000000..c163b55 --- /dev/null +++ b/src/com/sleepycat/collections/DataCursor.java @@ -0,0 +1,930 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.compat.DbCompat.OpReadOptions; +import com.sleepycat.compat.DbCompat.OpResult; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.JoinConfig; +import com.sleepycat.je.JoinCursor; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * Represents a Berkeley DB cursor and adds support for indices, bindings and + * key ranges. + * + *

    This class operates on a view and takes care of reading and updating + * indices, calling bindings, constraining access to a key range, etc.

    + * + * @author Mark Hayes + */ +final class DataCursor implements Cloneable { + + /** Repositioned exactly to the key/data pair given. */ + static final int REPOS_EXACT = 0; + /** Repositioned on a record following the key/data pair given. */ + static final int REPOS_NEXT = 1; + /** Repositioned failed, no records on or after the key/data pair given. */ + static final int REPOS_EOF = 2; + + private RangeCursor cursor; + private JoinCursor joinCursor; + private DataView view; + private KeyRange range; + private boolean writeAllowed; + private boolean readUncommitted; + private DatabaseEntry keyThang; + private DatabaseEntry valueThang; + private DatabaseEntry primaryKeyThang; + private DatabaseEntry otherThang; + private DataCursor[] indexCursorsToClose; + + /** + * Creates a cursor for a given view. + */ + DataCursor(DataView view, boolean writeAllowed) + throws DatabaseException { + + init(view, writeAllowed, null, null); + } + + /** + * Creates a cursor for a given view. + */ + DataCursor(DataView view, boolean writeAllowed, CursorConfig config) + throws DatabaseException { + + init(view, writeAllowed, config, null); + } + + /** + * Creates a cursor for a given view and single key range. + * Used by unit tests. + */ + DataCursor(DataView view, boolean writeAllowed, Object singleKey) + throws DatabaseException { + + init(view, writeAllowed, null, view.subRange(view.range, singleKey)); + } + + /** + * Creates a cursor for a given view and key range. + * Used by unit tests. + */ + DataCursor(DataView view, boolean writeAllowed, + Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException { + + init(view, writeAllowed, null, + view.subRange + (view.range, beginKey, beginInclusive, endKey, endInclusive)); + } + + /** + * Creates a join cursor. + */ + DataCursor(DataView view, DataCursor[] indexCursors, + JoinConfig joinConfig, boolean closeIndexCursors) + throws DatabaseException { + + if (view.isSecondary()) { + throw new IllegalArgumentException( + "The primary collection in a join must not be a secondary " + + "database"); + } + Cursor[] cursors = new Cursor[indexCursors.length]; + for (int i = 0; i < cursors.length; i += 1) { + cursors[i] = indexCursors[i].cursor.getCursor(); + } + joinCursor = view.db.join(cursors, joinConfig); + init(view, false, null, null); + if (closeIndexCursors) { + indexCursorsToClose = indexCursors; + } + } + + /** + * Clones a cursor preserving the current position. + */ + DataCursor cloneCursor() + throws DatabaseException { + + checkNoJoinCursor(); + + DataCursor o; + try { + o = (DataCursor) super.clone(); + } catch (CloneNotSupportedException neverHappens) { + return null; + } + + o.initThangs(); + KeyRange.copy(keyThang, o.keyThang); + KeyRange.copy(valueThang, o.valueThang); + if (primaryKeyThang != keyThang) { + KeyRange.copy(primaryKeyThang, o.primaryKeyThang); + } + + o.cursor = cursor.dup(true); + return o; + } + + /** + * Returns the internal range cursor. + */ + RangeCursor getCursor() { + return cursor; + } + + /** + * Constructor helper. + */ + private void init(DataView view, + boolean writeAllowed, + CursorConfig config, + KeyRange range) + throws DatabaseException { + + if (config == null) { + config = view.cursorConfig; + } + this.view = view; + this.writeAllowed = writeAllowed && view.writeAllowed; + this.range = (range != null) ? range : view.range; + readUncommitted = config.getReadUncommitted() || + view.currentTxn.isReadUncommitted(); + initThangs(); + + if (joinCursor == null) { + cursor = new MyRangeCursor + (this.range, config, view, this.writeAllowed); + } + } + + /** + * Constructor helper. + */ + private void initThangs() { + keyThang = new DatabaseEntry(); + primaryKeyThang = view.isSecondary() ? (new DatabaseEntry()) + : keyThang; + valueThang = new DatabaseEntry(); + } + + /** + * Set entries from given byte arrays. + */ + private void setThangs(byte[] keyBytes, + byte[] priKeyBytes, + byte[] valueBytes) { + + keyThang.setData(KeyRange.copyBytes(keyBytes)); + + if (keyThang != primaryKeyThang) { + primaryKeyThang.setData(KeyRange.copyBytes(priKeyBytes)); + } + + valueThang.setData(KeyRange.copyBytes(valueBytes)); + } + + /** + * Closes the associated cursor. + */ + void close() + throws DatabaseException { + + if (joinCursor != null) { + JoinCursor toClose = joinCursor; + joinCursor = null; + toClose.close(); + } + if (cursor != null) { + Cursor toClose = cursor.getCursor(); + cursor = null; + view.currentTxn.closeCursor(toClose); + } + if (indexCursorsToClose != null) { + DataCursor[] toClose = indexCursorsToClose; + indexCursorsToClose = null; + for (int i = 0; i < toClose.length; i += 1) { + toClose[i].close(); + } + } + } + + /** + * Repositions to a given raw key/data pair, or just past it if that record + * has been deleted. + * + * @return REPOS_EXACT, REPOS_NEXT or REPOS_EOF. + */ + int repositionRange(byte[] lastKeyBytes, + byte[] lastPriKeyBytes, + byte[] lastValueBytes, + boolean lockForWrite) + throws DatabaseException { + + OpReadOptions options = OpReadOptions.make(getLockMode(lockForWrite)); + OpResult result; + + /* Use the given key/data byte arrays. */ + setThangs(lastKeyBytes, lastPriKeyBytes, lastValueBytes); + + /* Position on or after the given key/data pair. */ + + if (!view.dupsAllowed) { + /* + * No-dups is the simple case. Search for key >= lastKey, and then + * compare to see if we found lastKey again. + */ + result = cursor.getSearchKeyRange( + keyThang, primaryKeyThang, valueThang, options); + + if (!result.isSuccess()) { + return REPOS_EOF; + } + + return KeyRange.equalBytes( + lastKeyBytes, 0, lastKeyBytes.length, + keyThang.getData(), keyThang.getOffset(), keyThang.getSize()) ? + REPOS_EXACT : REPOS_NEXT; + } + + /* + * Duplicates are more complex. + * + * Search for key == lastKey && data >= lastData, and then compare to + * see if we found lastData again. + */ + result = cursor.getSearchBothRange( + keyThang, primaryKeyThang, valueThang, options); + + if (result.isSuccess()) { + + DatabaseEntry thang = + view.isSecondary() ? primaryKeyThang : valueThang; + + byte[] bytes = + view.isSecondary() ? lastPriKeyBytes : lastValueBytes; + + return KeyRange.equalBytes( + bytes, 0, bytes.length, + thang.getData(), thang.getOffset(), thang.getSize()) ? + REPOS_EXACT : REPOS_NEXT; + } + + /* + * The record with lastKey/lastData must have been deleted and there + * are no records with key == lastkey && data > lastData. + * + * Search for key >= lastKey, but keep in mind that we will probably + * land on the first record with lastKey. + */ + result = cursor.getSearchKeyRange( + keyThang, primaryKeyThang, valueThang, options); + + if (!result.isSuccess()) { + return REPOS_EOF; + } + + /* + * If we are positioned on the first dup of lastKey, skip over its + * records to the first record with the next key. + */ + if (KeyRange.equalBytes(lastKeyBytes, 0, lastKeyBytes.length, + keyThang.getData(), keyThang.getOffset(), keyThang.getSize())) { + + result = cursor.getNextNoDup(keyThang, primaryKeyThang, + valueThang, options); + + if (!result.isSuccess()) { + return REPOS_EOF; + } + } + + return REPOS_NEXT; + } + + /** + * Repositions to a given raw key/data pair. + * + * @throws IllegalStateException when the database has unordered keys or + * unordered duplicates. + * + * @return whether the search succeeded. + */ + boolean repositionExact(byte[] keyBytes, + byte[] priKeyBytes, + byte[] valueBytes, + boolean lockForWrite) + throws DatabaseException { + + OpReadOptions options = OpReadOptions.make(getLockMode(lockForWrite)); + OpResult result; + + /* Use the given key/data byte arrays. */ + setThangs(keyBytes, priKeyBytes, valueBytes); + + /* Position on the given key/data pair. */ + if (view.recNumRenumber) { + /* getSearchBoth doesn't work with recno-renumber databases. */ + result = cursor.getSearchKey(keyThang, primaryKeyThang, + valueThang, options); + } else { + result = cursor.getSearchBoth(keyThang, primaryKeyThang, + valueThang, options); + } + + return result.isSuccess(); + } + + /** + * Returns the view for this cursor. + */ + DataView getView() { + + return view; + } + + /** + * Returns the range for this cursor. + */ + KeyRange getRange() { + + return range; + } + + /** + * Returns whether write is allowed for this cursor, as specified to the + * constructor. + */ + boolean isWriteAllowed() { + + return writeAllowed; + } + + /** + * Returns the key object for the last record read. + */ + Object getCurrentKey() { + return view.makeKey(keyThang, primaryKeyThang); + } + + /** + * Returns the value object for the last record read. + */ + Object getCurrentValue() { + return view.makeValue(primaryKeyThang, valueThang); + } + + /** + * Returns the internal key entry. + */ + DatabaseEntry getKeyThang() { + return keyThang; + } + + /** + * Returns the internal primary key entry, which is the same object as the + * key entry if the cursor is not for a secondary database. + */ + DatabaseEntry getPrimaryKeyThang() { + return primaryKeyThang; + } + + /** + * Returns the internal value entry. + */ + DatabaseEntry getValueThang() { + return valueThang; + } + + /** + * Returns whether record number access is allowed. + */ + boolean hasRecNumAccess() { + + return view.recNumAccess; + } + + /** + * Returns the record number for the last record read. + */ + int getCurrentRecordNumber() + throws DatabaseException { + + if (view.btreeRecNumDb) { + /* BTREE-RECNO access. */ + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + DbCompat.getCurrentRecordNumber(cursor.getCursor(), otherThang, + getLockMode(false)); + return DbCompat.getRecordNumber(otherThang); + } else { + /* QUEUE or RECNO database. */ + return DbCompat.getRecordNumber(keyThang); + } + } + + /** + * Binding version of Cursor.getCurrent(), no join cursor allowed. + */ + OperationStatus getCurrent(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getCurrent( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + + /** + * Binding version of Cursor.getFirst(), join cursor is allowed. + */ + OperationStatus getFirst(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else { + return cursor.getFirst( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(lockMode)).status(); + } + } + + /** + * Binding version of Cursor.getNext(), join cursor is allowed. + */ + OperationStatus getNext(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else { + return cursor.getNext( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(lockMode)).status(); + } + } + + /** + * Binding version of Cursor.getNext(), join cursor is allowed. + */ + OperationStatus getNextNoDup(boolean lockForWrite) + throws DatabaseException { + + LockMode lockMode = getLockMode(lockForWrite); + OpReadOptions options = OpReadOptions.make(lockMode); + if (joinCursor != null) { + return joinCursor.getNext(keyThang, valueThang, lockMode); + } else if (view.dupsView) { + return cursor.getNext + (keyThang, primaryKeyThang, valueThang, options).status(); + } else { + return cursor.getNextNoDup + (keyThang, primaryKeyThang, valueThang, options).status(); + } + } + + /** + * Binding version of Cursor.getNextDup(), no join cursor allowed. + */ + OperationStatus getNextDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + if (view.dupsView) { + return null; + } else { + return cursor.getNextDup( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + } + + /** + * Binding version of Cursor.getLast(), no join cursor allowed. + */ + OperationStatus getLast(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getLast( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + + /** + * Binding version of Cursor.getPrev(), no join cursor allowed. + */ + OperationStatus getPrev(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + return cursor.getPrev( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + + /** + * Binding version of Cursor.getPrevNoDup(), no join cursor allowed. + */ + OperationStatus getPrevNoDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + LockMode lockMode = getLockMode(lockForWrite); + OpReadOptions options = OpReadOptions.make(lockMode); + if (view.dupsView) { + return null; + } else { + return cursor.getPrevNoDup( + keyThang, primaryKeyThang, valueThang, options).status(); + } + } + + /** + * Binding version of Cursor.getPrevDup(), no join cursor allowed. + */ + OperationStatus getPrevDup(boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + if (view.dupsView) { + return null; + } else { + return cursor.getPrevDup( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + } + + /** + * Binding version of Cursor.getSearchKey(), no join cursor allowed. + * Searches by record number in a BTREE-RECNO db with RECNO access. + */ + OperationStatus getSearchKey(Object key, Object value, + boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + if (view.dupsView) { + if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) { + KeyRange.copy(view.dupsKey, keyThang); + return cursor.getSearchBoth( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(lockForWrite))).status(); + } + } else { + if (view.useKey(key, value, keyThang, range)) { + return doGetSearchKey(lockForWrite); + } + } + return OperationStatus.NOTFOUND; + } + + /** + * Pass-thru version of Cursor.getSearchKey(). + * Searches by record number in a BTREE-RECNO db with RECNO access. + */ + private OperationStatus doGetSearchKey(boolean lockForWrite) + throws DatabaseException { + + OpReadOptions options = OpReadOptions.make(getLockMode(lockForWrite)); + if (view.btreeRecNumAccess) { + return cursor.getSearchRecordNumber( + keyThang, primaryKeyThang, valueThang, options).status(); + } else { + return cursor.getSearchKey( + keyThang, primaryKeyThang, valueThang, options).status(); + } + } + + /** + * Binding version of Cursor.getSearchKeyRange(), no join cursor allowed. + */ + OperationStatus getSearchKeyRange(Object key, Object value, + boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + OpReadOptions options = OpReadOptions.make(getLockMode(lockForWrite)); + if (view.dupsView) { + if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) { + KeyRange.copy(view.dupsKey, keyThang); + return cursor.getSearchBothRange( + keyThang, primaryKeyThang, valueThang, options).status(); + } + } else { + if (view.useKey(key, value, keyThang, range)) { + return cursor.getSearchKeyRange( + keyThang, primaryKeyThang, valueThang, options).status(); + } + } + return OperationStatus.NOTFOUND; + } + + /** + * Find the given key and value using getSearchBoth if possible or a + * sequential scan otherwise, no join cursor allowed. + */ + OperationStatus findBoth(Object key, Object value, boolean lockForWrite) + throws DatabaseException { + + checkNoJoinCursor(); + OpReadOptions options = OpReadOptions.make(getLockMode(lockForWrite)); + view.useValue(value, valueThang, null); + if (view.dupsView) { + if (view.useKey(key, value, primaryKeyThang, view.dupsRange)) { + KeyRange.copy(view.dupsKey, keyThang); + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + OperationStatus status = cursor.getSearchBoth( + keyThang, primaryKeyThang, otherThang, options).status(); + if (status == OperationStatus.SUCCESS && + KeyRange.equalBytes(otherThang, valueThang)) { + return status; + } + } + } else if (view.useKey(key, value, keyThang, range)) { + if (view.isSecondary()) { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + OperationStatus status = cursor.getSearchKey( + keyThang, primaryKeyThang, otherThang, options).status(); + while (status == OperationStatus.SUCCESS) { + if (KeyRange.equalBytes(otherThang, valueThang)) { + return status; + } + status = cursor.getNextDup( + keyThang, primaryKeyThang, otherThang, + options).status(); + } + /* if status != SUCCESS set range cursor to invalid? */ + } else { + return cursor.getSearchBoth( + keyThang, null, valueThang, options).status(); + } + } + return OperationStatus.NOTFOUND; + } + + /** + * Find the given value using getSearchBoth if possible or a sequential + * scan otherwise, no join cursor allowed. + */ + OperationStatus findValue(Object value, boolean findFirst) + throws DatabaseException { + + checkNoJoinCursor(); + + if (view.entityBinding != null && !view.isSecondary() && + (findFirst || !view.dupsAllowed)) { + return findBoth(null, value, false); + } else { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + view.useValue(value, otherThang, null); + OperationStatus status = findFirst ? getFirst(false) + : getLast(false); + while (status == OperationStatus.SUCCESS) { + if (KeyRange.equalBytes(valueThang, otherThang)) { + break; + } + status = findFirst ? getNext(false) : getPrev(false); + } + return status; + } + } + + /** + * Calls Cursor.count(), no join cursor allowed. + */ + int count() + throws DatabaseException { + + checkNoJoinCursor(); + if (view.dupsView) { + return 1; + } else { + return cursor.count(); + } + } + + /** + * Binding version of Cursor.putCurrent(). + */ + OperationStatus putCurrent(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, keyThang); + + /* + * Workaround for a DB core problem: With HASH type a put() with + * different data is allowed. + */ + boolean hashWorkaround = (view.dupsOrdered && !view.ordered); + if (hashWorkaround) { + if (otherThang == null) { + otherThang = new DatabaseEntry(); + } + cursor.getCurrent( + keyThang, primaryKeyThang, otherThang, OpReadOptions.EMPTY); + if (KeyRange.equalBytes(valueThang, otherThang)) { + return OperationStatus.SUCCESS; + } else { + throw new IllegalArgumentException( + "Current data differs from put data with sorted duplicates"); + } + } + + return cursor.putCurrent(valueThang); + } + + /** + * Binding version of Cursor.putAfter(). + */ + OperationStatus putAfter(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, null); /* why no key check? */ + return cursor.putAfter(keyThang, valueThang); + } + + /** + * Binding version of Cursor.putBefore(). + */ + OperationStatus putBefore(Object value) + throws DatabaseException { + + checkWriteAllowed(false); + view.useValue(value, valueThang, keyThang); + return cursor.putBefore(keyThang, valueThang); + } + + /** + * Binding version of Cursor.put(), optionally returning the old value and + * optionally using the current key instead of the key parameter. + */ + OperationStatus put(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, oldValue, useCurrentKey); + return cursor.put(keyThang, valueThang); + } + + /** + * Binding version of Cursor.putNoOverwrite(), optionally using the current + * key instead of the key parameter. + */ + OperationStatus putNoOverwrite(Object key, Object value, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, null, useCurrentKey); + return cursor.putNoOverwrite(keyThang, valueThang); + } + + /** + * Binding version of Cursor.putNoDupData(), optionally returning the old + * value and optionally using the current key instead of the key parameter. + */ + OperationStatus putNoDupData(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + initForPut(key, value, oldValue, useCurrentKey); + if (view.dupsOrdered) { + return cursor.putNoDupData(keyThang, valueThang); + } else { + if (view.dupsAllowed) { + /* Unordered duplicates. */ + OperationStatus status = cursor.getSearchBoth( + keyThang, primaryKeyThang, valueThang, + OpReadOptions.make(getLockMode(false))).status(); + if (status == OperationStatus.SUCCESS) { + return OperationStatus.KEYEXIST; + } else { + return cursor.put(keyThang, valueThang); + } + } else { + /* No duplicates. */ + return cursor.putNoOverwrite(keyThang, valueThang); + } + } + } + + /** + * Do setup for a put() operation. + */ + private void initForPut(Object key, Object value, Object[] oldValue, + boolean useCurrentKey) + throws DatabaseException { + + checkWriteAllowed(false); + if (!useCurrentKey && !view.useKey(key, value, keyThang, range)) { + throw new IllegalArgumentException("key out of range"); + } + if (oldValue != null) { + oldValue[0] = null; + if (!view.dupsAllowed) { + OperationStatus status = doGetSearchKey(true); + if (status == OperationStatus.SUCCESS) { + oldValue[0] = getCurrentValue(); + } + } + } + view.useValue(value, valueThang, keyThang); + } + + /** + * Sets the key entry to the begin key of a single key range, so the next + * time a putXxx() method is called that key will be used. + */ + void useRangeKey() { + if (!range.isSingleKey()) { + throw DbCompat.unexpectedState(); + } + KeyRange.copy(range.getSingleKey(), keyThang); + } + + /** + * Perform an arbitrary database 'delete' operation. + */ + OperationStatus delete() + throws DatabaseException { + + checkWriteAllowed(true); + return cursor.delete(); + } + + /** + * Returns the lock mode to use for a getXxx() operation. + */ + LockMode getLockMode(boolean lockForWrite) { + + /* Read-uncommmitted takes precedence over write-locking. */ + + if (readUncommitted) { + return LockMode.READ_UNCOMMITTED; + } else if (lockForWrite) { + return view.currentTxn.getWriteLockMode(); + } else { + return LockMode.DEFAULT; + } + } + + /** + * Throws an exception if a join cursor is in use. + */ + private void checkNoJoinCursor() { + + if (joinCursor != null) { + throw new UnsupportedOperationException + ("Not allowed with a join cursor"); + } + } + + /** + * Throws an exception if write is not allowed or if a join cursor is in + * use. + */ + private void checkWriteAllowed(boolean allowSecondary) { + + checkNoJoinCursor(); + + if (!writeAllowed || (!allowSecondary && view.isSecondary())) { + throw new UnsupportedOperationException + ("Writing is not allowed"); + } + } +} diff --git a/src/com/sleepycat/collections/DataView.java b/src/com/sleepycat/collections/DataView.java new file mode 100644 index 0000000..3789f55 --- /dev/null +++ b/src/com/sleepycat/collections/DataView.java @@ -0,0 +1,685 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.JoinConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.KeyRangeException; + +/** + * Represents a Berkeley DB database and adds support for indices, bindings and + * key ranges. + * + *

    This class defines a view and takes care of reading and updating indices, + * calling bindings, constraining access to a key range, etc.

    + * + * @author Mark Hayes + */ +final class DataView implements Cloneable { + + Database db; + SecondaryDatabase secDb; + CurrentTransaction currentTxn; + KeyRange range; + EntryBinding keyBinding; + EntryBinding valueBinding; + EntityBinding entityBinding; + PrimaryKeyAssigner keyAssigner; + SecondaryKeyCreator secKeyCreator; + CursorConfig cursorConfig; // Used for all operations via this view + boolean writeAllowed; // Read-write view + boolean ordered; // Not a HASH Db + boolean keyRangesAllowed; // BTREE only + boolean recNumAllowed; // QUEUE, RECNO, or BTREE-RECNUM Db + boolean recNumAccess; // recNumAllowed && using a rec num binding + boolean btreeRecNumDb; // BTREE-RECNUM Db + boolean btreeRecNumAccess; // recNumAccess && BTREE-RECNUM Db + boolean recNumRenumber; // RECNO-RENUM Db + boolean keysRenumbered; // recNumRenumber || btreeRecNumAccess + boolean dupsAllowed; // Dups configured + boolean dupsOrdered; // Sorted dups configured + boolean transactional; // Db is transactional + boolean readUncommittedAllowed; // Read-uncommited is optional in DB-CORE + + /* + * If duplicatesView is called, dupsView will be true and dupsKey will be + * the secondary key used as the "single key" range. dupRange will be set + * as the range of the primary key values if subRange is subsequently + * called, to further narrow the view. + */ + DatabaseEntry dupsKey; + boolean dupsView; + KeyRange dupsRange; + + /** + * Creates a view for a given database and bindings. The initial key range + * of the view will be open. + */ + DataView(Database database, EntryBinding keyBinding, + EntryBinding valueBinding, EntityBinding entityBinding, + boolean writeAllowed, PrimaryKeyAssigner keyAssigner) + throws IllegalArgumentException { + + if (database == null) { + throw new IllegalArgumentException("database is null"); + } + db = database; + try { + currentTxn = + CurrentTransaction.getInstanceInternal(db.getEnvironment()); + DatabaseConfig dbConfig; + if (db instanceof SecondaryDatabase) { + secDb = (SecondaryDatabase) database; + SecondaryConfig secConfig = secDb.getSecondaryConfig(); + secKeyCreator = secConfig.getKeyCreator(); + dbConfig = secConfig; + } else { + dbConfig = db.getConfig(); + } + ordered = !DbCompat.isTypeHash(dbConfig); + keyRangesAllowed = DbCompat.isTypeBtree(dbConfig); + recNumAllowed = DbCompat.isTypeQueue(dbConfig) || + DbCompat.isTypeRecno(dbConfig) || + DbCompat.getBtreeRecordNumbers(dbConfig); + recNumRenumber = DbCompat.getRenumbering(dbConfig); + dupsAllowed = DbCompat.getSortedDuplicates(dbConfig) || + DbCompat.getUnsortedDuplicates(dbConfig); + dupsOrdered = DbCompat.getSortedDuplicates(dbConfig); + transactional = currentTxn.isTxnMode() && + dbConfig.getTransactional(); + readUncommittedAllowed = DbCompat.getReadUncommitted(dbConfig); + btreeRecNumDb = recNumAllowed && DbCompat.isTypeBtree(dbConfig); + range = new KeyRange(dbConfig.getBtreeComparator()); + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + this.writeAllowed = writeAllowed; + this.keyBinding = keyBinding; + this.valueBinding = valueBinding; + this.entityBinding = entityBinding; + this.keyAssigner = keyAssigner; + cursorConfig = CursorConfig.DEFAULT; + + if (valueBinding != null && entityBinding != null) + throw new IllegalArgumentException + ("both valueBinding and entityBinding are non-null"); + + if (keyBinding instanceof com.sleepycat.bind.RecordNumberBinding) { + if (!recNumAllowed) { + throw new IllegalArgumentException + ("RecordNumberBinding requires DB_BTREE/DB_RECNUM, " + + "DB_RECNO, or DB_QUEUE"); + } + recNumAccess = true; + if (btreeRecNumDb) { + btreeRecNumAccess = true; + } + } + keysRenumbered = recNumRenumber || btreeRecNumAccess; + } + + /** + * Clones the view. + */ + private DataView cloneView() { + + try { + return (DataView) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + throw DbCompat.unexpectedState(); + } + } + + /** + * Return a new key-set view derived from this view by setting the + * entity and value binding to null. + * + * @return the derived view. + */ + DataView keySetView() { + + if (keyBinding == null) { + throw new UnsupportedOperationException("Must have keyBinding"); + } + DataView view = cloneView(); + view.valueBinding = null; + view.entityBinding = null; + return view; + } + + /** + * Return a new value-set view derived from this view by setting the + * key binding to null. + * + * @return the derived view. + */ + DataView valueSetView() { + + if (valueBinding == null && entityBinding == null) { + throw new UnsupportedOperationException + ("Must have valueBinding or entityBinding"); + } + DataView view = cloneView(); + view.keyBinding = null; + return view; + } + + /** + * Return a new value-set view for single key range. + * + * @param singleKey the single key value. + * + * @return the derived view. + * + * @throws DatabaseException if a database problem occurs. + * + * @throws KeyRangeException if the specified range is not within the + * current range. + */ + DataView valueSetView(Object singleKey) + throws DatabaseException, KeyRangeException { + + /* + * Must do subRange before valueSetView since the latter clears the + * key binding needed for the former. + */ + KeyRange singleKeyRange = subRange(range, singleKey); + DataView view = valueSetView(); + view.range = singleKeyRange; + return view; + } + + /** + * Return a new value-set view for key range, optionally changing + * the key binding. + */ + DataView subView(Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive, + EntryBinding keyBinding) + throws DatabaseException, KeyRangeException { + + DataView view = cloneView(); + view.setRange(beginKey, beginInclusive, endKey, endInclusive); + if (keyBinding != null) view.keyBinding = keyBinding; + return view; + } + + /** + * Return a new duplicates view for a given secondary key. + */ + DataView duplicatesView(Object secondaryKey, + EntryBinding primaryKeyBinding) + throws DatabaseException, KeyRangeException { + + if (!isSecondary()) { + throw new UnsupportedOperationException + ("Only allowed for maps on secondary databases"); + } + if (dupsView) { + throw DbCompat.unexpectedState(); + } + DataView view = cloneView(); + view.range = subRange(view.range, secondaryKey); + view.dupsKey = view.range.getSingleKey(); + view.dupsView = true; + view.keyBinding = primaryKeyBinding; + return view; + } + + /** + * Returns a new view with a specified cursor configuration. + */ + DataView configuredView(CursorConfig config) { + + DataView view = cloneView(); + view.cursorConfig = (config != null) ? + DbCompat.cloneCursorConfig(config) : CursorConfig.DEFAULT; + return view; + } + + /** + * Returns the current transaction for the view or null if the environment + * is non-transactional. + */ + CurrentTransaction getCurrentTxn() { + + return transactional ? currentTxn : null; + } + + /** + * Sets this view's range to a subrange with the given parameters. + */ + private void setRange(Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException, KeyRangeException { + + if ((beginKey != null || endKey != null) && !keyRangesAllowed) { + throw new UnsupportedOperationException + ("Key ranges allowed only for BTREE databases"); + } + KeyRange useRange = useSubRange(); + useRange = subRange + (useRange, beginKey, beginInclusive, endKey, endInclusive); + if (dupsView) { + dupsRange = useRange; + } else { + range = useRange; + } + } + + /** + * Returns the key thang for a single key range, or null if a single key + * range is not used. + */ + DatabaseEntry getSingleKeyThang() { + + return range.getSingleKey(); + } + + /** + * Returns the environment for the database. + */ + final Environment getEnv() { + + return currentTxn.getEnvironment(); + } + + /** + * Returns whether this is a view on a secondary database rather + * than directly on a primary database. + */ + final boolean isSecondary() { + + return (secDb != null); + } + + /** + * Returns whether no records are present in the view. + * + * Auto-commit must be performed by the caller. + */ + boolean isEmpty() + throws DatabaseException { + + DataCursor cursor = new DataCursor(this, false); + try { + return cursor.getFirst(false) != OperationStatus.SUCCESS; + } finally { + cursor.close(); + } + } + + /** + * Appends a value and returns the new key. If a key assigner is used + * it assigns the key, otherwise a QUEUE or RECNO database is required. + * + * Auto-commit must be performed by the caller. + */ + OperationStatus append(Object value, + Object[] retPrimaryKey, + Object[] retValue) + throws DatabaseException { + + /* + * Flags will be NOOVERWRITE if used with assigner, or APPEND + * otherwise. + * Requires: if value param, value or entity binding + * Requires: if retPrimaryKey, primary key binding (no index). + * Requires: if retValue, value or entity binding + */ + DatabaseEntry keyThang = new DatabaseEntry(); + DatabaseEntry valueThang = new DatabaseEntry(); + useValue(value, valueThang, null); + OperationStatus status; + if (keyAssigner != null) { + keyAssigner.assignKey(keyThang); + if (!range.check(keyThang)) { + throw new IllegalArgumentException + ("assigned key out of range"); + } + DataCursor cursor = new DataCursor(this, true); + try { + status = cursor.getCursor().putNoOverwrite(keyThang, + valueThang); + } finally { + cursor.close(); + } + } else { + /* Assume QUEUE/RECNO access method. */ + if (currentTxn.isCDBCursorOpen(db)) { + throw new IllegalStateException + ("cannot open CDB write cursor when read cursor is open"); + } + status = DbCompat.append(db, useTransaction(), + keyThang, valueThang); + if (status == OperationStatus.SUCCESS && !range.check(keyThang)) { + db.delete(useTransaction(), keyThang); + throw new IllegalArgumentException + ("appended record number out of range"); + } + } + if (status == OperationStatus.SUCCESS) { + returnPrimaryKeyAndValue(keyThang, valueThang, + retPrimaryKey, retValue); + } + return status; + } + + /** + * Returns the current transaction if the database is transaction, or null + * if the database is not transactional or there is no current transaction. + */ + Transaction useTransaction() { + return transactional ? currentTxn.getTransaction() : null; + } + + /** + * Deletes all records in the current range. + * + * Auto-commit must be performed by the caller. + */ + void clear() + throws DatabaseException { + + DataCursor cursor = new DataCursor(this, true); + try { + OperationStatus status = OperationStatus.SUCCESS; + while (status == OperationStatus.SUCCESS) { + if (keysRenumbered) { + status = cursor.getFirst(true); + } else { + status = cursor.getNext(true); + } + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + } + } finally { + cursor.close(); + } + } + + /** + * Returns a cursor for this view that reads only records having the + * specified index key values. + */ + DataCursor join(DataView[] indexViews, Object[] indexKeys, + JoinConfig joinConfig) + throws DatabaseException { + + DataCursor joinCursor = null; + DataCursor[] indexCursors = new DataCursor[indexViews.length]; + try { + for (int i = 0; i < indexViews.length; i += 1) { + indexCursors[i] = new DataCursor(indexViews[i], false); + indexCursors[i].getSearchKey(indexKeys[i], null, false); + } + joinCursor = new DataCursor(this, indexCursors, joinConfig, true); + return joinCursor; + } finally { + if (joinCursor == null) { + // An exception is being thrown, so close cursors we opened. + for (int i = 0; i < indexCursors.length; i += 1) { + if (indexCursors[i] != null) { + try { indexCursors[i].close(); } + catch (Exception e) { + /* FindBugs, this is ok. */ + } + } + } + } + } + } + + /** + * Returns a cursor for this view that reads only records having the + * index key values at the specified cursors. + */ + DataCursor join(DataCursor[] indexCursors, JoinConfig joinConfig) + throws DatabaseException { + + return new DataCursor(this, indexCursors, joinConfig, false); + } + + /** + * Returns primary key and value if return parameters are non-null. + */ + private void returnPrimaryKeyAndValue(DatabaseEntry keyThang, + DatabaseEntry valueThang, + Object[] retPrimaryKey, + Object[] retValue) { + // Requires: if retPrimaryKey, primary key binding (no index). + // Requires: if retValue, value or entity binding + + if (retPrimaryKey != null) { + if (keyBinding == null) { + throw new IllegalArgumentException + ("returning key requires primary key binding"); + } else if (isSecondary()) { + throw new IllegalArgumentException + ("returning key requires unindexed view"); + } else { + retPrimaryKey[0] = keyBinding.entryToObject(keyThang); + } + } + if (retValue != null) { + retValue[0] = makeValue(keyThang, valueThang); + } + } + + /** + * Populates the key entry and returns whether the key is within range. + */ + boolean useKey(Object key, Object value, DatabaseEntry keyThang, + KeyRange checkRange) + throws DatabaseException { + + if (key != null) { + if (keyBinding == null) { + throw new IllegalArgumentException + ("non-null key with null key binding"); + } + keyBinding.objectToEntry(key, keyThang); + } else { + if (value == null) { + throw new IllegalArgumentException("null key and null value"); + } + if (entityBinding == null) { + throw new IllegalStateException + ("EntityBinding required to derive key from value"); + } + if (!dupsView && isSecondary()) { + DatabaseEntry primaryKeyThang = new DatabaseEntry(); + entityBinding.objectToKey(value, primaryKeyThang); + DatabaseEntry valueThang = new DatabaseEntry(); + entityBinding.objectToData(value, valueThang); + secKeyCreator.createSecondaryKey(secDb, primaryKeyThang, + valueThang, keyThang); + } else { + entityBinding.objectToKey(value, keyThang); + } + } + if (recNumAccess && DbCompat.getRecordNumber(keyThang) <= 0) { + return false; + } + if (checkRange != null && !checkRange.check(keyThang)) { + return false; + } + return true; + } + + /** + * Returns whether data keys can be derived from the value/entity binding + * of this view, which determines whether a value/entity object alone is + * sufficient for operations that require keys. + */ + final boolean canDeriveKeyFromValue() { + + return (entityBinding != null); + } + + /** + * Populates the value entry and throws an exception if the primary key + * would be changed via an entity binding. + */ + void useValue(Object value, DatabaseEntry valueThang, + DatabaseEntry checkKeyThang) { + if (valueBinding != null) { + /* Allow binding to handle null value. */ + valueBinding.objectToEntry(value, valueThang); + } else if (entityBinding != null) { + if (value == null) { + throw new IllegalArgumentException + ("null value with entity binding"); + } + entityBinding.objectToData(value, valueThang); + if (checkKeyThang != null) { + DatabaseEntry thang = new DatabaseEntry(); + entityBinding.objectToKey(value, thang); + if (!KeyRange.equalBytes(thang, checkKeyThang)) { + throw new IllegalArgumentException + ("cannot change primary key"); + } + } + } else { + if (value != null) { + throw new IllegalArgumentException + ("non-null value with null value/entity binding"); + } + valueThang.setData(KeyRange.ZERO_LENGTH_BYTE_ARRAY); + valueThang.setOffset(0); + valueThang.setSize(0); + } + } + + /** + * Converts a key entry to a key object. + */ + Object makeKey(DatabaseEntry keyThang, DatabaseEntry priKeyThang) { + + if (keyBinding == null) { + throw new UnsupportedOperationException(); + } else { + DatabaseEntry thang = dupsView ? priKeyThang : keyThang; + if (thang.getSize() == 0) { + return null; + } else { + return keyBinding.entryToObject(thang); + } + } + } + + /** + * Converts a key-value entry pair to a value object. + */ + Object makeValue(DatabaseEntry primaryKeyThang, DatabaseEntry valueThang) { + + Object value; + if (valueBinding != null) { + value = valueBinding.entryToObject(valueThang); + } else if (entityBinding != null) { + value = entityBinding.entryToObject(primaryKeyThang, + valueThang); + } else { + throw new UnsupportedOperationException + ("Requires valueBinding or entityBinding"); + } + return value; + } + + /** + * Intersects the given key and the current range. + */ + KeyRange subRange(KeyRange useRange, Object singleKey) + throws DatabaseException, KeyRangeException { + + return useRange.subRange(makeRangeKey(singleKey)); + } + + /** + * Intersects the given range and the current range. + */ + KeyRange subRange(KeyRange useRange, + Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive) + throws DatabaseException, KeyRangeException { + + if (beginKey == endKey && beginInclusive && endInclusive) { + return subRange(useRange, beginKey); + } + if (!ordered) { + throw new UnsupportedOperationException + ("Cannot use key ranges on an unsorted database"); + } + DatabaseEntry beginThang = + (beginKey != null) ? makeRangeKey(beginKey) : null; + DatabaseEntry endThang = + (endKey != null) ? makeRangeKey(endKey) : null; + + return useRange.subRange(beginThang, beginInclusive, + endThang, endInclusive); + } + + /** + * Returns the range to use for sub-ranges. Returns range if this is not a + * dupsView, or the dupsRange if this is a dupsView, creating dupsRange if + * necessary. + */ + KeyRange useSubRange() + throws DatabaseException { + + if (dupsView) { + synchronized (this) { + if (dupsRange == null) { + DatabaseConfig config = + secDb.getPrimaryDatabase().getConfig(); + dupsRange = new KeyRange(config.getBtreeComparator()); + } + } + return dupsRange; + } else { + return range; + } + } + + /** + * Given a key object, make a key entry that can be used in a range. + */ + private DatabaseEntry makeRangeKey(Object key) + throws DatabaseException { + + DatabaseEntry thang = new DatabaseEntry(); + if (keyBinding != null) { + useKey(key, null, thang, null); + } else { + useKey(null, key, thang, null); + } + return thang; + } +} diff --git a/src/com/sleepycat/collections/MapEntryParameter.java b/src/com/sleepycat/collections/MapEntryParameter.java new file mode 100644 index 0000000..4341951 --- /dev/null +++ b/src/com/sleepycat/collections/MapEntryParameter.java @@ -0,0 +1,129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Map; + +/** + * A simple Map.Entry implementation that can be used as in + * input parameter. Since a MapEntryParameter is not obtained + * from a map, it is not attached to any map in particular. To emphasize that + * changing this object does not change the map, the {@link #setValue} method + * always throws UnsupportedOperationException. + * + *

    Warning: Use of this interface violates the Java Collections + * interface contract since these state that Map.Entry objects + * should only be obtained from Map.entrySet() sets, while this + * class allows constructing them directly. However, it is useful for + * performing operations on an entry set such as add(), contains(), etc. For + * restrictions see {@link #getValue} and {@link #setValue}.

    + * + * @author Mark Hayes + */ +public class MapEntryParameter implements Map.Entry { + + private K key; + private V value; + + /** + * Creates a map entry with a given key and value. + * + * @param key is the key to use. + * + * @param value is the value to use. + */ + public MapEntryParameter(K key, V value) { + + this.key = key; + this.value = value; + } + + /** + * Computes a hash code as specified by {@link + * java.util.Map.Entry#hashCode}. + * + * @return the computed hash code. + */ + public int hashCode() { + + return ((key == null) ? 0 : key.hashCode()) ^ + ((value == null) ? 0 : value.hashCode()); + } + + /** + * Compares this entry to a given entry as specified by {@link + * java.util.Map.Entry#equals}. + * + * @return the computed hash code. + */ + public boolean equals(Object other) { + + if (!(other instanceof Map.Entry)) { + return false; + } + + Map.Entry e = (Map.Entry) other; + + return ((key == null) ? (e.getKey() == null) + : key.equals(e.getKey())) && + ((value == null) ? (e.getValue() == null) + : value.equals(e.getValue())); + } + + /** + * Returns the key of this entry. + * + * @return the key of this entry. + */ + public final K getKey() { + + return key; + } + + /** + * Returns the value of this entry. Note that this will be the value + * passed to the constructor or the last value passed to {@link #setValue}. + * It will not reflect changes made to a Map. + * + * @return the value of this entry. + */ + public final V getValue() { + + return value; + } + + /** + * Always throws UnsupportedOperationException since this + * object is not attached to a map. + */ + public V setValue(V newValue) { + + throw new UnsupportedOperationException(); + } + + final void setValueInternal(V newValue) { + + this.value = newValue; + } + + /** + * Converts the entry to a string representation for debugging. + * + * @return the string representation. + */ + public String toString() { + + return "[key [" + key + "] value [" + value + ']'; + } +} diff --git a/src/com/sleepycat/collections/MyRangeCursor.java b/src/com/sleepycat/collections/MyRangeCursor.java new file mode 100644 index 0000000..cba25d3 --- /dev/null +++ b/src/com/sleepycat/collections/MyRangeCursor.java @@ -0,0 +1,79 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.RangeCursor; + +class MyRangeCursor extends RangeCursor { + + private DataView view; + private boolean isRecnoOrQueue; + private boolean writeCursor; + + MyRangeCursor(KeyRange range, + CursorConfig config, + DataView view, + boolean writeAllowed) + throws DatabaseException { + + super(range, view.dupsRange, view.dupsOrdered, + openCursor(view, config, writeAllowed)); + this.view = view; + isRecnoOrQueue = view.recNumAllowed && !view.btreeRecNumDb; + writeCursor = isWriteCursor(config, writeAllowed); + } + + /** + * Returns true if a write cursor is requested by the user via the cursor + * config, or if this is a writable cursor and the user has not specified a + * cursor config. For CDB, a special cursor must be created for writing. + * See CurrentTransaction.openCursor. + */ + private static boolean isWriteCursor(CursorConfig config, + boolean writeAllowed) { + return DbCompat.getWriteCursor(config) || + (config == CursorConfig.DEFAULT && writeAllowed); + } + + private static Cursor openCursor(DataView view, + CursorConfig config, + boolean writeAllowed) + throws DatabaseException { + + return view.currentTxn.openCursor + (view.db, config, isWriteCursor(config, writeAllowed), + view.useTransaction()); + } + + protected Cursor dupCursor(Cursor cursor, boolean samePosition) + throws DatabaseException { + + return view.currentTxn.dupCursor(cursor, writeCursor, samePosition); + } + + protected void closeCursor(Cursor cursor) + throws DatabaseException { + + view.currentTxn.closeCursor(cursor); + } + + protected boolean checkRecordNumber() { + return isRecnoOrQueue; + } +} diff --git a/src/com/sleepycat/collections/PrimaryKeyAssigner.java b/src/com/sleepycat/collections/PrimaryKeyAssigner.java new file mode 100644 index 0000000..87546e5 --- /dev/null +++ b/src/com/sleepycat/collections/PrimaryKeyAssigner.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; + +/** + * An interface implemented to assign new primary key values. + * An implementation of this interface is passed to the {@link StoredMap} + * or {@link StoredSortedMap} constructor to assign primary keys for that + * store. Key assignment occurs when StoredMap.append() is called. + * + * @author Mark Hayes + */ +public interface PrimaryKeyAssigner { + + /** + * Assigns a new primary key value into the given buffer. + * + * @param keyData the buffer. + * + * @throws DatabaseException to stop the operation and cause this exception + * to be propagated to the caller of StoredMap.append(). + */ + void assignKey(DatabaseEntry keyData) + throws DatabaseException; +} diff --git a/src/com/sleepycat/collections/StoredCollection.java b/src/com/sleepycat/collections/StoredCollection.java new file mode 100644 index 0000000..95e13e7 --- /dev/null +++ b/src/com/sleepycat/collections/StoredCollection.java @@ -0,0 +1,742 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseEntry; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +/* */ +import com.sleepycat.je.JoinConfig; +/* */ +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A abstract base class for all stored collections. This class, and its + * base class {@link StoredContainer}, provide implementations of most methods + * in the {@link Collection} interface. Other methods, such as {@link #add} + * and {@link #remove}, are provided by concrete classes that extend this + * class. + * + *

    In addition, this class provides the following methods for stored + * collections only. Note that the use of these methods is not compatible with + * the standard Java collections interface.

    + *
      + *
    • {@link #getIteratorBlockSize}
    • + *
    • {@link #setIteratorBlockSize}
    • + *
    • {@link #storedIterator()}
    • + *
    • {@link #storedIterator(boolean)}
    • + *
    • {@link #join}
    • + *
    • {@link #toList()}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class StoredCollection extends StoredContainer + implements Collection { + + /** + * The default number of records read at one time by iterators. + * @see #setIteratorBlockSize + */ + public static final int DEFAULT_ITERATOR_BLOCK_SIZE = 10; + + private int iteratorBlockSize = DEFAULT_ITERATOR_BLOCK_SIZE; + + StoredCollection(DataView view) { + + super(view); + } + + /** + * Returns the number of records read at one time by iterators returned by + * the {@link #iterator} method. By default this value is {@link + * #DEFAULT_ITERATOR_BLOCK_SIZE}. + * + * @return the number of records. + */ + public int getIteratorBlockSize() { + + return iteratorBlockSize; + } + + /** + * Changes the number of records read at one time by iterators returned by + * the {@link #iterator} method. By default this value is {@link + * #DEFAULT_ITERATOR_BLOCK_SIZE}. + * + * @param blockSize the number of records. + * + * @throws IllegalArgumentException if the blockSize is less than two. + */ + public void setIteratorBlockSize(int blockSize) { + + if (blockSize < 2) { + throw new IllegalArgumentException + ("blockSize is less than two: " + blockSize); + } + + iteratorBlockSize = blockSize; + } + + final boolean add(Object key, Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.putNoDupData(key, value, null, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + BlockIterator blockIterator() { + return new BlockIterator(this, isWriteAllowed(), iteratorBlockSize); + } + + /** + * Returns an iterator over the elements in this collection. + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link Collection#iterator} interface. + * + *

    The iterator returned by this method does not keep a database cursor + * open and therefore it does not need to be closed. It reads blocks of + * records as needed, opening and closing a cursor to read each block of + * records. The number of records per block is 10 by default and can be + * changed with {@link #setIteratorBlockSize}.

    + * + *

    Because this iterator does not keep a cursor open, if it is used + * without transactions, the iterator does not have cursor + * stability characteristics. In other words, the record at the + * current iterator position can be changed or deleted by another thread. + * To prevent this from happening, call this method within a transaction or + * use the {@link #storedIterator()} method instead.

    + * + * @return a standard {@link Iterator} for this collection. + * + * @see #isWriteAllowed + */ + public Iterator iterator() { + return blockIterator(); + } + + /** + * Returns an iterator over the elements in this collection. + * The iterator will be read-only if the collection is read-only. + * This method does not exist in the standard {@link Collection} interface. + * + *

    If {@code Iterator.set} or {@code Iterator.remove} will be called + * and the underlying Database is transactional, then a transaction must be + * active when calling this method and must remain active while using the + * iterator.

    + * + *

    Warning: The iterator returned must be explicitly + * closed using {@link StoredIterator#close()} or {@link + * StoredIterator#close(java.util.Iterator)} to release the underlying + * database cursor resources.

    + * + * @return a {@link StoredIterator} for this collection. + * + * @see #isWriteAllowed + */ + public StoredIterator storedIterator() { + + return storedIterator(isWriteAllowed()); + } + + /** + * Returns a read or read-write iterator over the elements in this + * collection. + * This method does not exist in the standard {@link Collection} interface. + * + *

    If {@code Iterator.set} or {@code Iterator.remove} will be called + * and the underlying Database is transactional, then a transaction must be + * active when calling this method and must remain active while using the + * iterator.

    + * + *

    Warning: The iterator returned must be explicitly + * closed using {@link StoredIterator#close()} or {@link + * StoredIterator#close(java.util.Iterator)} to release the underlying + * database cursor resources.

    + * + * @param writeAllowed is true to open a read-write iterator or false to + * open a read-only iterator. If the collection is read-only the iterator + * will always be read-only. + * + * @return a {@link StoredIterator} for this collection. + * + * @throws IllegalStateException if writeAllowed is true but the collection + * is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + * + * @see #isWriteAllowed + */ + public StoredIterator storedIterator(boolean writeAllowed) { + + try { + return new StoredIterator(this, writeAllowed && isWriteAllowed(), + null); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * @param writeAllowed is true to open a read-write iterator or false to + * open a read-only iterator. If the collection is read-only the iterator + * will always be read-only. + * + * @return a {@link StoredIterator} for this collection. + * + * @deprecated Please use {@link #storedIterator()} or {@link + * #storedIterator(boolean)} instead. Because the iterator returned must + * be closed, the method name {@code iterator} is confusing since standard + * Java iterators do not need to be closed. + */ + public StoredIterator iterator(boolean writeAllowed) { + + return storedIterator(writeAllowed); + } + + /** + * Returns an array of all the elements in this collection. + * This method conforms to the {@link Collection#toArray()} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public Object[] toArray() { + + ArrayList list = new ArrayList(); + StoredIterator i = null; + try { + i = storedIterator(); + while (i.hasNext()) { + list.add(i.next()); + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + return list.toArray(); + } + + /** + * Returns an array of all the elements in this collection whose runtime + * type is that of the specified array. + * This method conforms to the {@link Collection#toArray(Object[])} + * interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public T[] toArray(T[] a) { + + int j = 0; + StoredIterator i = null; + try { + i = storedIterator(); + while (j < a.length && i.hasNext()) { + a[j++] = (T) i.next(); + } + if (j < a.length) { + a[j] = null; + } else if (i.hasNext()) { + ArrayList list = new ArrayList(Arrays.asList(a)); + while (i.hasNext()) { + list.add((T) i.next()); + } + a = list.toArray(a); + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + return a; + } + + /** + * Returns true if this collection contains all of the elements in the + * specified collection. + * This method conforms to the {@link Collection#containsAll} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean containsAll(Collection coll) { + Iterator i = null; + try { + i = storedOrExternalIterator(coll); + while (i.hasNext()) { + if (!contains(i.next())) { + return false; + } + } + return true; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + } + + /** + * Adds all of the elements in the specified collection to this collection + * (optional operation). + * This method calls the {@link #add(Object)} method of the concrete + * collection class, which may or may not be supported. + * This method conforms to the {@link Collection#addAll} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only, or + * if the collection is indexed, or if the add method is not supported by + * the concrete collection. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean addAll(Collection coll) { + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + i = storedOrExternalIterator(coll); + boolean changed = false; + while (i.hasNext()) { + if (add(i.next())) { + changed = true; + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return changed; + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes all this collection's elements that are also contained in the + * specified collection (optional operation). + * This method conforms to the {@link Collection#removeAll} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean removeAll(Collection coll) { + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + boolean changed = false; + i = storedOrExternalIterator(coll); + while (i.hasNext()) { + if (remove(i.next())) { + changed = true; + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return changed; + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Retains only the elements in this collection that are contained in the + * specified collection (optional operation). + * This method conforms to the {@link Collection#removeAll} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean retainAll(Collection coll) { + + StoredIterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + boolean changed = false; + i = storedIterator(); + while (i.hasNext()) { + if (!coll.contains(i.next())) { + i.remove(); + changed = true; + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return changed; + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Compares the specified object with this collection for equality. + * A value comparison is performed by this method and the stored values + * are compared rather than calling the equals() method of each element. + * This method conforms to the {@link Collection#equals} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean equals(Object other) { + + if (other instanceof Collection) { + Collection otherColl = StoredCollection.copyCollection(other); + StoredIterator i = null; + try { + i = storedIterator(); + boolean otherHasAll = true; + while (i.hasNext()) { + if (!otherColl.remove(i.next())) { + otherHasAll = false; + break; + } + } + return otherHasAll && otherColl.isEmpty(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + } else { + return false; + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + /** + * Returns a copy of this collection as an ArrayList. This is the same as + * {@link #toArray()} but returns a collection instead of an array. + * + * @return an {@link ArrayList} containing a copy of all elements in this + * collection. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public List toList() { + + ArrayList list = new ArrayList(); + StoredIterator i = null; + try { + i = storedIterator(); + while (i.hasNext()) { + list.add(i.next()); + } + return list; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + } + + /** + * Converts the collection to a string representation for debugging. + * WARNING: The returned string may be very large. + * + * @return the string representation. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append("["); + StoredIterator i = null; + try { + i = storedIterator(); + while (i.hasNext()) { + if (buf.length() > 1) buf.append(','); + buf.append(i.next().toString()); + } + buf.append(']'); + return buf.toString(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + } + + // Inherit javadoc + public int size() { + + boolean countDups = iterateDuplicates(); + if (DbCompat.DATABASE_COUNT && countDups && !view.range.hasBound()) { + try { + return (int) DbCompat.getDatabaseCount(view.db); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } else { + int count = 0; + CursorConfig cursorConfig = view.currentTxn.isLockingMode() ? + CursorConfig.READ_UNCOMMITTED : null; + DataCursor cursor = null; + /* Auto-commit is not needed because ReadUncommitted is used. */ + try { + cursor = new DataCursor(view, false, cursorConfig); + OperationStatus status = cursor.getFirst(false); + while (status == OperationStatus.SUCCESS) { + if (countDups) { + count += cursor.count(); + } else { + count += 1; + } + status = cursor.getNextNoDup(false); + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + return count; + } + } + + /** + * Returns an iterator representing an equality join of the indices and + * index key values specified. + * This method does not exist in the standard {@link Collection} interface. + * + *

    Warning: The iterator returned must be explicitly + * closed using {@link StoredIterator#close()} or {@link + * StoredIterator#close(java.util.Iterator)} to release the underlying + * database cursor resources.

    + * + *

    The returned iterator supports only the two methods: hasNext() and + * next(). All other methods will throw UnsupportedOperationException.

    + * + * @param indices is an array of indices with elements corresponding to + * those in the indexKeys array. + * + * @param indexKeys is an array of index key values identifying the + * elements to be selected. + * + * @param joinConfig is the join configuration, or null to use the + * default configuration. + * + * @return an iterator over the elements in this collection that match + * all specified index key values. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws IllegalArgumentException if this collection is indexed or if a + * given index does not have the same store as this collection. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredIterator join(StoredContainer[] indices, + Object[] indexKeys, + JoinConfig joinConfig) { + + try { + DataView[] indexViews = new DataView[indices.length]; + for (int i = 0; i < indices.length; i += 1) { + indexViews[i] = indices[i].view; + } + DataCursor cursor = view.join(indexViews, indexKeys, joinConfig); + return new StoredIterator(this, false, cursor); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + final E getFirstOrLast(boolean doGetFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status; + if (doGetFirst) { + status = cursor.getFirst(false); + } else { + status = cursor.getLast(false); + } + return (status == OperationStatus.SUCCESS) ? + makeIteratorData(null, cursor) : + null; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + E makeIteratorData(BaseIterator iterator, DataCursor cursor) { + + return makeIteratorData(iterator, + cursor.getKeyThang(), + cursor.getPrimaryKeyThang(), + cursor.getValueThang()); + } + + abstract E makeIteratorData(BaseIterator iterator, + DatabaseEntry keyEntry, + DatabaseEntry priKeyEntry, + DatabaseEntry valueEntry); + + abstract boolean hasValues(); + + boolean iterateDuplicates() { + + return true; + } + + void checkIterAddAllowed() + throws UnsupportedOperationException { + + if (!areDuplicatesAllowed()) { + throw new UnsupportedOperationException("Duplicates required"); + } + } + + int getIndexOffset() { + + return 0; + } + + private static Collection copyCollection(Object other) { + + if (other instanceof StoredCollection) { + return ((StoredCollection) other).toList(); + } else { + return new ArrayList((Collection) other); + } + } +} diff --git a/src/com/sleepycat/collections/StoredCollections.java b/src/com/sleepycat/collections/StoredCollections.java new file mode 100644 index 0000000..1c48569 --- /dev/null +++ b/src/com/sleepycat/collections/StoredCollections.java @@ -0,0 +1,209 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; + +import com.sleepycat.je.CursorConfig; + +/** + * Static methods operating on collections and maps. + * + *

    This class consists exclusively of static methods that operate on or + * return stored collections and maps, jointly called containers. It contains + * methods for changing certain properties of a container. Because container + * properties are immutable, these methods always return a new container + * instance. This allows stored container instances to be used safely by + * multiple threads. Creating the new container instance is not expensive and + * creates only two new objects.

    + * + *

    When a container is created with a particular property, all containers + * and iterators derived from that container will inherit the property. For + * example, if a read-uncommitted Map is created then calls to its subMap(), + * values(), entrySet(), and keySet() methods will create read-uncommitted + * containers also.

    + * + *

    Method names beginning with "configured" create a new container with a + * specified {@link CursorConfig} from a given stored container. This allows + * configuring a container for read-committed isolation, read-uncommitted + * isolation, or any other property supported by CursorConfig. + * All operations performed with the resulting container will be performed with + * the specified cursor configuration.

    + */ +public class StoredCollections { + + private StoredCollections() {} + + /** + * Creates a configured collection from a given stored collection. + * + * @param storedCollection the base collection. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new collection instance; null may be specified to use + * the default configuration. + * + * @param the element class. + * + * @return the configured collection. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Collection configuredCollection(Collection + storedCollection, + CursorConfig config) { + return (Collection) + ((StoredContainer) storedCollection).configuredClone(config); + } + + /** + * Creates a configured list from a given stored list. + * + *

    Note that this method may not be called in the JE product, since the + * StoredList class is not supported.

    + * + * @param storedList the base list. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new list instance; null may be specified to use the + * default configuration. + * + * @param the element class. + * + * @return the configured list. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static List configuredList(List storedList, + CursorConfig config) { + return (List) ((StoredContainer) storedList).configuredClone(config); + } + + /** + * Creates a configured map from a given stored map. + * + * @param storedMap the base map. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new map instance; null may be specified to use the + * default configuration. + * + * @param the key class. + * + * @param the value class. + * + * @return the configured map. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Map configuredMap(Map storedMap, + CursorConfig config) { + return (Map) ((StoredContainer) storedMap).configuredClone(config); + } + + /** + * Creates a configured set from a given stored set. + * + * @param storedSet the base set. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new set instance; null may be specified to use the + * default configuration. + * + * @param the element class. + * + * @return the configured set. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static Set configuredSet(Set storedSet, + CursorConfig config) { + return (Set) ((StoredContainer) storedSet).configuredClone(config); + } + + /** + * Creates a configured sorted map from a given stored sorted map. + * + * @param storedSortedMap the base map. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new map instance; null may be specified to use the + * default configuration. + * + * @param the key class. + * + * @param the value class. + * + * @return the configured map. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static SortedMap configuredSortedMap + (SortedMap storedSortedMap, CursorConfig config) { + return (SortedMap) + ((StoredContainer) storedSortedMap).configuredClone(config); + } + + /** + * Creates a configured sorted set from a given stored sorted set. + * + * @param storedSortedSet the base set. + * + * @param config is the cursor configuration to be used for all operations + * performed via the new set instance; null may be specified to use the + * default configuration. + * + * @param the element class. + * + * @return the configured set. + * + * @throws ClassCastException if the given container is not a + * StoredContainer. + */ + public static SortedSet configuredSortedSet(SortedSet + storedSortedSet, + CursorConfig config) { + return (SortedSet) + ((StoredContainer) storedSortedSet).configuredClone(config); + } + + /** + * Clones an iterator preserving its current position. + * + * @param iter an iterator to clone. + * + * @param the element class. + * + * @return a new {@code Iterator} having the same position as the given + * iterator. + * + * @throws ClassCastException if the given iterator was not obtained via a + * {@link StoredCollection} method. + */ + public static Iterator iterator(Iterator iter) { + + return ((BaseIterator) iter).dup(); + } +} diff --git a/src/com/sleepycat/collections/StoredContainer.java b/src/com/sleepycat/collections/StoredContainer.java new file mode 100644 index 0000000..c09f85c --- /dev/null +++ b/src/com/sleepycat/collections/StoredContainer.java @@ -0,0 +1,476 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Iterator; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A abstract base class for all stored collections and maps. This class + * provides implementations of methods that are common to the {@link + * java.util.Collection} and the {@link java.util.Map} interfaces, namely + * {@link #clear}, {@link #isEmpty} and {@link #size}. + * + *

    In addition, this class provides the following methods for stored + * collections only. Note that the use of these methods is not compatible with + * the standard Java collections interface.

    + *
      + *
    • {@link #isWriteAllowed()}
    • + *
    • {@link #isSecondary()}
    • + *
    • {@link #isOrdered()}
    • + *
    • {@link #areKeyRangesAllowed()}
    • + *
    • {@link #areDuplicatesAllowed()}
    • + *
    • {@link #areDuplicatesOrdered()}
    • + *
    • {@link #areKeysRenumbered()}
    • + *
    • {@link #getCursorConfig()}
    • + *
    • {@link #isTransactional()}
    • + *
    + * + * @author Mark Hayes + */ +public abstract class StoredContainer implements Cloneable { + + DataView view; + + StoredContainer(DataView view) { + + this.view = view; + } + + /** + * Returns true if this is a read-write container or false if this is a + * read-only container. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether write is allowed. + */ + public final boolean isWriteAllowed() { + + return view.writeAllowed; + } + + /** + * Returns the cursor configuration that is used for all operations + * performed via this container. + * For example, if CursorConfig.getReadUncommitted returns + * true, data will be read that is modified but not committed. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return the cursor configuration, or null if no configuration has been + * specified. + */ + public final CursorConfig getCursorConfig() { + + return DbCompat.cloneCursorConfig(view.cursorConfig); + } + + /** + * Returns whether the databases underlying this container are + * transactional. + * Even in a transactional environment, a database will be transactional + * only if it was opened within a transaction or if the auto-commit option + * was specified when it was opened. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether the database is transactional. + */ + public final boolean isTransactional() { + + return view.transactional; + } + + /** + * Clones a container with a specified cursor configuration. + */ + final StoredContainer configuredClone(CursorConfig config) { + + try { + StoredContainer cont = (StoredContainer) clone(); + cont.view = cont.view.configuredView(config); + cont.initAfterClone(); + return cont; + } catch (CloneNotSupportedException willNeverOccur) { return null; } + } + + /** + * Override this method to initialize view-dependent fields. + */ + void initAfterClone() { + } + + /** + * Returns whether duplicate keys are allowed in this container. + * Duplicates are optionally allowed for HASH and BTREE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + *

    Note that the JE product only supports BTREE databases.

    + * + * @return whether duplicates are allowed. + */ + public final boolean areDuplicatesAllowed() { + + return view.dupsAllowed; + } + + /** + * Returns whether duplicate keys are allowed and sorted by element value. + * Duplicates are optionally sorted for HASH and BTREE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + *

    Note that the JE product only supports BTREE databases, and + * duplicates are always sorted.

    + * + * @return whether duplicates are ordered. + */ + public final boolean areDuplicatesOrdered() { + + return view.dupsOrdered; + } + + /** + * Returns whether keys are renumbered when insertions and deletions occur. + * Keys are optionally renumbered for RECNO databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + *

    Note that the JE product does not support RECNO databases, and + * therefore keys are never renumbered.

    + * + * @return whether keys are renumbered. + */ + public final boolean areKeysRenumbered() { + + return view.keysRenumbered; + } + + /** + * Returns whether keys are ordered in this container. + * Keys are ordered for BTREE, RECNO and QUEUE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + *

    Note that the JE product only support BTREE databases, and + * therefore keys are always ordered.

    + * + * @return whether keys are ordered. + */ + public final boolean isOrdered() { + + return view.ordered; + } + + /** + * Returns whether key ranges are allowed in this container. + * Key ranges are allowed only for BTREE databases. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + *

    Note that the JE product only supports BTREE databases, and + * therefore key ranges are always allowed.

    + * + * @return whether keys are ordered. + */ + public final boolean areKeyRangesAllowed() { + + return view.keyRangesAllowed; + } + + /** + * Returns whether this container is a view on a secondary database rather + * than directly on a primary database. + * This method does not exist in the standard {@link java.util.Map} or + * {@link java.util.Collection} interfaces. + * + * @return whether the view is for a secondary database. + */ + public final boolean isSecondary() { + + return view.isSecondary(); + } + + /** + * Returns a non-transactional count of the records in the collection or + * map. This method conforms to the {@link java.util.Collection#size} and + * {@link java.util.Map#size} interfaces. + * + * + *

    This operation is faster than obtaining a count by scanning the + * collection manually, and will not perturb the current contents of the + * cache. However, the count is not guaranteed to be accurate if there are + * concurrent updates.

    + * + * + * @return the number of records. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public abstract int size(); + + /** + * Returns true if this map or collection contains no mappings or elements. + * This method conforms to the {@link java.util.Collection#isEmpty} and + * {@link java.util.Map#isEmpty} interfaces. + * + * @return whether the container is empty. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean isEmpty() { + + try { + return view.isEmpty(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Removes all mappings or elements from this map or collection (optional + * operation). + * This method conforms to the {@link java.util.Collection#clear} and + * {@link java.util.Map#clear} interfaces. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the container is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public void clear() { + + boolean doAutoCommit = beginAutoCommit(); + try { + view.clear(); + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + Object getValue(Object key) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + if (OperationStatus.SUCCESS == + cursor.getSearchKey(key, null, false)) { + return cursor.getCurrentValue(); + } else { + return null; + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + Object putKeyValue(final Object key, final Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + Object[] oldValue = new Object[1]; + cursor.put(key, value, oldValue, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return oldValue[0]; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + final boolean removeKey(final Object key, final Object[] oldVal) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + boolean found = false; + OperationStatus status = cursor.getSearchKey(key, null, true); + while (status == OperationStatus.SUCCESS) { + cursor.delete(); + found = true; + if (oldVal != null && oldVal[0] == null) { + oldVal[0] = cursor.getCurrentValue(); + } + status = areDuplicatesAllowed() ? + cursor.getNextDup(true): OperationStatus.NOTFOUND; + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return found; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + boolean containsKey(Object key) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + return (OperationStatus.SUCCESS == + cursor.getSearchKey(key, null, false)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + final boolean removeValue(Object value) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = cursor.findValue(value, true); + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + boolean containsValue(Object value) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = cursor.findValue(value, true); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + /** + * Returns a StoredIterator if the given collection is a StoredCollection, + * else returns a regular/external Iterator. The iterator returned should + * be closed with the static method StoredIterator.close(Iterator). + */ + final Iterator storedOrExternalIterator(Collection coll) { + + if (coll instanceof StoredCollection) { + return ((StoredCollection) coll).storedIterator(); + } else { + return coll.iterator(); + } + } + + final void closeCursor(DataCursor cursor) { + + if (cursor != null) { + try { + cursor.close(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + } + + final boolean beginAutoCommit() { + if (view.transactional) { + final CurrentTransaction currentTxn = view.getCurrentTxn(); + try { + if (currentTxn.isAutoCommitAllowed()) { + currentTxn.beginTransaction(null); + return true; + } + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + return false; + } + + final void commitAutoCommit(boolean doAutoCommit) + throws DatabaseException { + + if (doAutoCommit) { + view.getCurrentTxn().commitTransaction(); + } + } + + final RuntimeException handleException(Exception e, boolean doAutoCommit) { + + if (doAutoCommit) { + try { + view.getCurrentTxn().abortTransaction(); + } catch (DatabaseException ignored) { + /* Klockwork - ok */ + } + } + return StoredContainer.convertException(e); + } + + static RuntimeException convertException(Exception e) { + + return RuntimeExceptionWrapper.wrapIfNeeded(e); + } +} diff --git a/src/com/sleepycat/collections/StoredEntrySet.java b/src/com/sleepycat/collections/StoredEntrySet.java new file mode 100644 index 0000000..3d91e9f --- /dev/null +++ b/src/com/sleepycat/collections/StoredEntrySet.java @@ -0,0 +1,208 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.DatabaseEntry; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The Set returned by Map.entrySet(). This class may not be instantiated + * directly. Contrary to what is stated by {@link Map#entrySet} this class + * does support the {@link #add} and {@link #addAll} methods. + * + *

    The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects + * that are returned by this class and its iterators behaves just as the {@link + * StoredIterator#set} method does.

    + * + * @author Mark Hayes + */ +public class StoredEntrySet + extends StoredCollection> + implements Set> { + + StoredEntrySet(DataView mapView) { + + super(mapView); + } + + /** + * Adds the specified element to this set if it is not already present + * (optional operation). + * This method conforms to the {@link Set#add} interface. + * + * @param mapEntry must be a {@link java.util.Map.Entry} instance. + * + * @return true if the key-value pair was added to the set (and was not + * previously present). + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws ClassCastException if the mapEntry is not a {@link + * java.util.Map.Entry} instance. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean add(Map.Entry mapEntry) { + + return add(mapEntry.getKey(), mapEntry.getValue()); + } + + /** + * Removes the specified element from this set if it is present (optional + * operation). + * This method conforms to the {@link Set#remove} interface. + * + * @param mapEntry is a {@link java.util.Map.Entry} instance to be removed. + * + * @return true if the key-value pair was removed from the set, or false if + * the mapEntry is not a {@link java.util.Map.Entry} instance or is not + * present in the set. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean remove(Object mapEntry) { + + if (!(mapEntry instanceof Map.Entry)) { + return false; + } + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + Map.Entry entry = (Map.Entry) mapEntry; + OperationStatus status = + cursor.findBoth(entry.getKey(), entry.getValue(), true); + if (status == OperationStatus.SUCCESS) { + cursor.delete(); + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns true if this set contains the specified element. + * This method conforms to the {@link Set#contains} interface. + * + * @param mapEntry is a {@link java.util.Map.Entry} instance to be checked. + * + * @return true if the key-value pair is present in the set, or false if + * the mapEntry is not a {@link java.util.Map.Entry} instance or is not + * present in the set. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean contains(Object mapEntry) { + + if (!(mapEntry instanceof Map.Entry)) { + return false; + } + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + Map.Entry entry = (Map.Entry) mapEntry; + OperationStatus status = + cursor.findBoth(entry.getKey(), entry.getValue(), false); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + // javadoc is inherited + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append("["); + StoredIterator i = null; + try { + i = storedIterator(); + while (i.hasNext()) { + Map.Entry entry = (Map.Entry) i.next(); + if (buf.length() > 1) buf.append(','); + Object key = entry.getKey(); + Object val = entry.getValue(); + if (key != null) buf.append(key.toString()); + buf.append('='); + if (val != null) buf.append(val.toString()); + } + buf.append(']'); + return buf.toString(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + StoredIterator.close(i); + } + } + + Map.Entry makeIteratorData(BaseIterator iterator, + DatabaseEntry keyEntry, + DatabaseEntry priKeyEntry, + DatabaseEntry valueEntry) { + + return new StoredMapEntry(view.makeKey(keyEntry, priKeyEntry), + view.makeValue(priKeyEntry, valueEntry), + this, iterator); + } + + boolean hasValues() { + + return true; + } +} diff --git a/src/com/sleepycat/collections/StoredIterator.java b/src/com/sleepycat/collections/StoredIterator.java new file mode 100644 index 0000000..7f19817 --- /dev/null +++ b/src/com/sleepycat/collections/StoredIterator.java @@ -0,0 +1,736 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.io.Closeable; +import java.util.Iterator; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The Iterator returned by all stored collections. + * + *

    While in general this class conforms to the {@link Iterator} interface, + * it is important to note that all iterators for stored collections must be + * explicitly closed with {@link #close()}. The static method {@link + * #close(java.util.Iterator)} allows calling close for all iterators without + * harm to iterators that are not from stored collections, and also avoids + * casting. If a stored iterator is not closed, unpredictable behavior + * including process death may result.

    + * + *

    This class implements the {@link Iterator} interface for all stored + * iterators. It also implements {@link ListIterator} because some list + * iterator methods apply to all stored iterators, for example, {@link + * #previous} and {@link #hasPrevious}. Other list iterator methods are always + * supported for lists, but for other types of collections are only supported + * under certain conditions. See {@link #nextIndex}, {@link #previousIndex}, + * {@link #add} and {@link #set} for details.

    + * + *

    In addition, this class provides the following methods for stored + * collection iterators only. Note that the use of these methods is not + * compatible with the standard Java collections interface.

    + *
      + *
    • {@link #close()}
    • + *
    • {@link #close(Iterator)}
    • + *
    • {@link #count()}
    • + *
    • {@link #getCollection}
    • + *
    • {@link #setReadModifyWrite}
    • + *
    • {@link #isReadModifyWrite}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredIterator extends BaseIterator + implements ListIterator, Cloneable + /* */ + , Closeable + /* */ + { + + /** + * Closes the given iterator using {@link #close()} if it is a {@link + * StoredIterator}. If the given iterator is not a {@link StoredIterator}, + * this method does nothing. + * + * @param i is the iterator to close. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public static void close(Iterator i) { + + if (i instanceof StoredIterator) { + ((StoredIterator) i).close(); + } + } + + private static final int MOVE_NEXT = 1; + private static final int MOVE_PREV = 2; + private static final int MOVE_FIRST = 3; + + private boolean lockForWrite; + private StoredCollection coll; + private DataCursor cursor; + private int toNext; + private int toPrevious; + private int toCurrent; + private boolean writeAllowed; + private boolean setAndRemoveAllowed; + private E currentData; + + StoredIterator(StoredCollection coll, + boolean writeAllowed, + DataCursor joinCursor) { + try { + this.coll = coll; + this.writeAllowed = writeAllowed; + if (joinCursor == null) + this.cursor = new DataCursor(coll.view, writeAllowed); + else + this.cursor = joinCursor; + reset(); + } catch (Exception e) { + try { + /* Ensure that the cursor is closed. [#10516] */ + close(); + } catch (Exception ignored) { + /* Klockwork - ok */ + } + throw StoredContainer.convertException(e); + } + } + + /** + * Returns whether write-locks will be obtained when reading with this + * cursor. + * Obtaining write-locks can prevent deadlocks when reading and then + * modifying data. + * + * @return the write-lock setting. + */ + public final boolean isReadModifyWrite() { + + return lockForWrite; + } + + /** + * Changes whether write-locks will be obtained when reading with this + * cursor. + * Obtaining write-locks can prevent deadlocks when reading and then + * modifying data. + * + * @param lockForWrite the write-lock setting. + */ + public void setReadModifyWrite(boolean lockForWrite) { + + this.lockForWrite = lockForWrite; + } + + // --- begin Iterator/ListIterator methods --- + + /** + * Returns true if this iterator has more elements when traversing in the + * forward direction. False is returned if the iterator has been closed. + * This method conforms to the {@link Iterator#hasNext} interface. + * + * @return whether {@link #next()} will succeed. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean hasNext() { + + if (cursor == null) { + return false; + } + try { + if (toNext != 0) { + OperationStatus status = move(toNext); + if (status == OperationStatus.SUCCESS) { + toNext = 0; + toPrevious = MOVE_PREV; + toCurrent = MOVE_PREV; + } + } + return (toNext == 0); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns true if this iterator has more elements when traversing in the + * reverse direction. It returns false if the iterator has been closed. + * This method conforms to the {@link ListIterator#hasPrevious} interface. + * + * @return whether {@link #previous()} will succeed. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean hasPrevious() { + + if (cursor == null) { + return false; + } + try { + if (toPrevious != 0) { + OperationStatus status = move(toPrevious); + if (status == OperationStatus.SUCCESS) { + toPrevious = 0; + toNext = MOVE_NEXT; + toCurrent = MOVE_NEXT; + } + } + return (toPrevious == 0); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns the next element in the iteration. + * This method conforms to the {@link Iterator#next} interface. + * + * @return the next element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public E next() { + + try { + if (toNext != 0) { + OperationStatus status = move(toNext); + if (status == OperationStatus.SUCCESS) { + toNext = 0; + } + } + if (toNext == 0) { + currentData = coll.makeIteratorData(this, cursor); + toNext = MOVE_NEXT; + toPrevious = 0; + toCurrent = 0; + setAndRemoveAllowed = true; + return currentData; + } + // else throw NoSuchElementException below + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + throw new NoSuchElementException(); + } + + /** + * Returns the next element in the iteration. + * This method conforms to the {@link ListIterator#previous} interface. + * + * @return the previous element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public E previous() { + + try { + if (toPrevious != 0) { + OperationStatus status = move(toPrevious); + if (status == OperationStatus.SUCCESS) { + toPrevious = 0; + } + } + if (toPrevious == 0) { + currentData = coll.makeIteratorData(this, cursor); + toPrevious = MOVE_PREV; + toNext = 0; + toCurrent = 0; + setAndRemoveAllowed = true; + return currentData; + } + // else throw NoSuchElementException below + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + throw new NoSuchElementException(); + } + + /** + * Returns the index of the element that would be returned by a subsequent + * call to next. + * This method conforms to the {@link ListIterator#nextIndex} interface + * except that it returns Integer.MAX_VALUE for stored lists when + * positioned at the end of the list, rather than returning the list size + * as specified by the ListIterator interface. This is because the database + * size is not available. + * + * @return the next index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if this iterator's collection does + * not use record number keys. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public int nextIndex() { + + if (!coll.view.recNumAccess) { + throw new UnsupportedOperationException + ("Record number access not supported"); + } + try { + return hasNext() ? (cursor.getCurrentRecordNumber() - + coll.getIndexOffset()) + : Integer.MAX_VALUE; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns the index of the element that would be returned by a subsequent + * call to previous. + * This method conforms to the {@link ListIterator#previousIndex} + * interface. + * + * @return the previous index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if this iterator's collection does + * not use record number keys. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public int previousIndex() { + + if (!coll.view.recNumAccess) { + throw new UnsupportedOperationException + ("Record number access not supported"); + } + try { + return hasPrevious() ? (cursor.getCurrentRecordNumber() - + coll.getIndexOffset()) + : (-1); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Replaces the last element returned by next or previous with the + * specified element (optional operation). + * This method conforms to the {@link ListIterator#set} interface. + * + *

    In order to call this method, if the underlying Database is + * transactional then a transaction must be active when creating the + * iterator.

    + * + * @param value the new value. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a {@link + * StoredKeySet} (the set returned by {@link java.util.Map#keySet}), or if + * duplicates are sorted since this would change the iterator position, or + * if the collection is indexed, or if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public void set(E value) { + + if (!coll.hasValues()) { + throw new UnsupportedOperationException(); + } + if (!setAndRemoveAllowed) { + throw new IllegalStateException(); + } + try { + moveToCurrent(); + cursor.putCurrent(value); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Removes the last element that was returned by next or previous (optional + * operation). + * This method conforms to the {@link ListIterator#remove} interface except + * that when the collection is a list and the RECNO-RENUMBER access method + * is not used, list indices will not be renumbered. + * + *

    In order to call this method, if the underlying Database is + * transactional then a transaction must be active when creating the + * iterator.

    + * + *

    Note that for the JE product, RECNO-RENUMBER databases are not + * supported, and therefore list indices are never renumbered by this + * method.

    + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public void remove() { + + if (!setAndRemoveAllowed) { + throw new IllegalStateException(); + } + try { + moveToCurrent(); + cursor.delete(); + setAndRemoveAllowed = false; + toNext = MOVE_NEXT; + toPrevious = MOVE_PREV; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Inserts the specified element into the list or inserts a duplicate into + * other types of collections (optional operation). + * This method conforms to the {@link ListIterator#add} interface when + * the collection is a list and the RECNO-RENUMBER access method is used. + * Otherwise, this method may only be called when duplicates are allowed. + * If duplicates are unsorted, the new value will be inserted in the same + * manner as list elements. + * If duplicates are sorted, the new value will be inserted in sort order. + * + *

    Note that for the JE product, RECNO-RENUMBER databases are not + * supported, and therefore this method may only be used to add + * duplicates.

    + * + * @param value the new value. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the collection is a list and the RECNO-RENUMBER access method was not + * used, or if the collection is not a list and duplicates are not allowed. + * + * @throws IllegalStateException if the collection is empty and is not a + * list with RECNO-RENUMBER access. + * + * @throws IllegalArgumentException if a duplicate value is being added + * that already exists and duplicates are sorted. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public void add(E value) { + + coll.checkIterAddAllowed(); + try { + OperationStatus status = OperationStatus.SUCCESS; + if (toNext != 0 && toPrevious != 0) { // database is empty + if (coll.view.keysRenumbered) { // recno-renumber database + /* + * Close cursor during append and then reopen to support + * CDB restriction that append may not be called with a + * cursor open; note the append will still fail if the + * application has another cursor open. + */ + close(); + status = coll.view.append(value, null, null); + cursor = new DataCursor(coll.view, writeAllowed); + reset(); + next(); // move past new record + } else { // hash/btree with duplicates + throw new IllegalStateException + ("Collection is empty, cannot add() duplicate"); + } + } else { // database is not empty + boolean putBefore = false; + if (coll.view.keysRenumbered) { // recno-renumber database + moveToCurrent(); + if (hasNext()) { + status = cursor.putBefore(value); + putBefore = true; + } else { + status = cursor.putAfter(value); + } + } else { // hash/btree with duplicates + if (coll.areDuplicatesOrdered()) { + status = cursor.putNoDupData(null, value, null, true); + } else if (toNext == 0) { + status = cursor.putBefore(value); + putBefore = true; + } else { + status = cursor.putAfter(value); + } + } + if (putBefore) { + toPrevious = 0; + toNext = MOVE_NEXT; + } + } + if (status == OperationStatus.KEYEXIST) { + throw new IllegalArgumentException("Duplicate value"); + } else if (status != OperationStatus.SUCCESS) { + throw DbCompat.unexpectedState("Could not insert: " + status); + } + setAndRemoveAllowed = false; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + // --- end Iterator/ListIterator methods --- + + /** + * Resets cursor to an uninitialized state. + */ + private void reset() { + + toNext = MOVE_FIRST; + toPrevious = MOVE_PREV; + toCurrent = 0; + currentData = null; + /* + * Initialize cursor at beginning to avoid "initial previous == last" + * behavior when cursor is uninitialized. + * + * FindBugs whines about us ignoring the return value from hasNext(). + */ + hasNext(); + } + + /** + * Returns the number of elements having the same key value as the key + * value of the element last returned by next() or previous(). If no + * duplicates are allowed, 1 is always returned. + * This method does not exist in the standard {@link Iterator} or {@link + * ListIterator} interfaces. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @return the number of duplicates. + * + * @throws IllegalStateException if next() or previous() has not been + * called for this iterator, or if remove() or add() were called after + * the last call to next() or previous(). + */ + public int count() { + + if (!setAndRemoveAllowed) { + throw new IllegalStateException(); + } + try { + moveToCurrent(); + return cursor.count(); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Closes this iterator. + * This method does not exist in the standard {@link Iterator} or {@link + * ListIterator} interfaces. + * + *

    After being closed, only the {@link #hasNext} and {@link + * #hasPrevious} methods may be called and these will return false. {@link + * #close()} may also be called again and will do nothing. If other + * methods are called a NullPointerException will generally be + * thrown.

    + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public void close() { + + if (cursor != null) { + coll.closeCursor(cursor); + cursor = null; + } + } + + /** + * Returns the collection associated with this iterator. + * This method does not exist in the standard {@link Iterator} or {@link + * ListIterator} interfaces. + * + * @return the collection associated with this iterator. + */ + public final StoredCollection getCollection() { + + return coll; + } + + // --- begin BaseIterator methods --- + + final ListIterator dup() { + + try { + StoredIterator o = (StoredIterator) super.clone(); + o.cursor = cursor.cloneCursor(); + return o; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + final boolean isCurrentData(Object currentData) { + + return (this.currentData == currentData); + } + + final boolean moveToIndex(int index) { + + try { + OperationStatus status = + cursor.getSearchKey(Integer.valueOf(index), + null, lockForWrite); + setAndRemoveAllowed = (status == OperationStatus.SUCCESS); + return setAndRemoveAllowed; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + // --- end BaseIterator methods --- + + private void moveToCurrent() + throws DatabaseException { + + if (toCurrent != 0) { + move(toCurrent); + toCurrent = 0; + } + } + + private OperationStatus move(int direction) + throws DatabaseException { + + switch (direction) { + case MOVE_NEXT: + if (coll.iterateDuplicates()) { + return cursor.getNext(lockForWrite); + } else { + return cursor.getNextNoDup(lockForWrite); + } + case MOVE_PREV: + if (coll.iterateDuplicates()) { + return cursor.getPrev(lockForWrite); + } else { + return cursor.getPrevNoDup(lockForWrite); + } + case MOVE_FIRST: + return cursor.getFirst(lockForWrite); + default: + throw DbCompat.unexpectedState(String.valueOf(direction)); + } + } +} diff --git a/src/com/sleepycat/collections/StoredKeySet.java b/src/com/sleepycat/collections/StoredKeySet.java new file mode 100644 index 0000000..358133c --- /dev/null +++ b/src/com/sleepycat/collections/StoredKeySet.java @@ -0,0 +1,175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Set; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The Set returned by Map.keySet() and which can also be constructed directly + * if a Map is not needed. + * Since this collection is a set it only contains one element for each key, + * even when duplicates are allowed. Key set iterators are therefore + * particularly useful for enumerating the unique keys of a store or index that + * allows duplicates. + * + * @author Mark Hayes + */ +public class StoredKeySet extends StoredCollection implements Set { + + /** + * Creates a key set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, null, + writeAllowed, null)); + } + + StoredKeySet(DataView keySetView) { + + super(keySetView); + } + + /** + * Adds the specified key to this set if it is not already present + * (optional operation). + * This method conforms to the {@link Set#add} interface. + * + *

    WARNING: When a key is added the value in the underlying data store + * will be empty, i.e., the byte array will be zero length. Such a record + * cannot be accessed using the Map interface unless the value binding + * supports zero length byte arrays.

    + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean add(K key) { + + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = cursor.putNoOverwrite(key, null, false); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes the specified key from this set if it is present (optional + * operation). + * If duplicates are allowed, this method removes all duplicates for the + * given key. + * This method conforms to the {@link Set#remove} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean remove(Object key) { + + return removeKey(key, null); + } + + /** + * Returns true if this set contains the specified key. + * This method conforms to the {@link Set#contains} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean contains(Object key) { + + return containsKey(key); + } + + boolean hasValues() { + + return false; + } + + K makeIteratorData(BaseIterator iterator, + DatabaseEntry keyEntry, + DatabaseEntry priKeyEntry, + DatabaseEntry valueEntry) { + + return (K) view.makeKey(keyEntry, priKeyEntry); + } + + boolean iterateDuplicates() { + + return false; + } +} diff --git a/src/com/sleepycat/collections/StoredList.java b/src/com/sleepycat/collections/StoredList.java new file mode 100644 index 0000000..3690bf7 --- /dev/null +++ b/src/com/sleepycat/collections/StoredList.java @@ -0,0 +1,746 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.keyrange.KeyRangeException; + +/** + * + * @hidden + * + * A List view of a {@link Database}. + * + *

    For all stored lists the keys of the underlying Database + * must have record number format, and therefore the store or index must be a + * RECNO, RECNO-RENUMBER, QUEUE, or BTREE-RECNUM database. Only RECNO-RENUMBER + * allows true list behavior where record numbers are renumbered following the + * position of an element that is added or removed. For the other access + * methods (RECNO, QUEUE, and BTREE-RECNUM), stored Lists are most useful as + * read-only collections where record numbers are not required to be + * sequential.

    + * + *

    In addition to the standard List methods, this class provides the + * following methods for stored lists only. Note that the use of these methods + * is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #append(Object)}
    • + *
    + * @author Mark Hayes + */ +public class StoredList extends StoredCollection implements List { + + private static final EntryBinding DEFAULT_KEY_BINDING = + new IndexKeyBinding(1); + + private int baseIndex = 1; + private boolean isSubList; + + /** + * Creates a list view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredList(Database database, + EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a list entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredList(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, DEFAULT_KEY_BINDING, null, + valueEntityBinding, writeAllowed, null)); + } + + /** + * Creates a list view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created list. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #add} and {@link #append} + * methods to assign primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredList(Database database, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, DEFAULT_KEY_BINDING, valueBinding, + null, true, keyAssigner)); + } + + /** + * Creates a list entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created list. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #add} and {@link #append} + * methods to assign primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredList(Database database, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, DEFAULT_KEY_BINDING, null, + valueEntityBinding, true, keyAssigner)); + } + + private StoredList(DataView view, int baseIndex) { + + super(view); + this.baseIndex = baseIndex; + this.isSubList = true; + } + + /** + * Inserts the specified element at the specified position in this list + * (optional operation). + * This method conforms to the {@link List#add(int, Object)} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public void add(int index, E value) { + + checkIterAddAllowed(); + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.getSearchKey(Long.valueOf(index), null, false); + if (status == OperationStatus.SUCCESS) { + cursor.putBefore(value); + closeCursor(cursor); + } else { + closeCursor(cursor); + cursor = null; + view.append(value, null, null); + } + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Appends the specified element to the end of this list (optional + * operation). + * This method conforms to the {@link List#add(Object)} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean add(E value) { + + checkIterAddAllowed(); + boolean doAutoCommit = beginAutoCommit(); + try { + view.append(value, null, null); + commitAutoCommit(doAutoCommit); + return true; + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + /** + * Appends a given value returning the newly assigned index. + * If a {@link com.sleepycat.collections.PrimaryKeyAssigner} is associated + * with Store for this list, it will be used to assigned the returned + * index. Otherwise the Store must be a QUEUE or RECNO database and the + * next available record number is assigned as the index. This method does + * not exist in the standard {@link List} interface. + * + * @param value the value to be appended. + * + * @return the assigned index. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only, or if the Store has no {@link + * com.sleepycat.collections.PrimaryKeyAssigner} and is not a QUEUE or + * RECNO database. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public int append(E value) { + + boolean doAutoCommit = beginAutoCommit(); + try { + Object[] key = new Object[1]; + view.append(value, key, null); + commitAutoCommit(doAutoCommit); + return ((Number) key[0]).intValue(); + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + void checkIterAddAllowed() + throws UnsupportedOperationException { + + if (isSubList) { + throw new UnsupportedOperationException("Cannot add to subList"); + } + if (!view.keysRenumbered) { // RECNO-RENUM + throw new UnsupportedOperationException + ("Requires renumbered keys"); + } + } + + /** + * Inserts all of the elements in the specified collection into this list + * at the specified position (optional operation). + * This method conforms to the {@link List#addAll(int, Collection)} + * interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is indexed, or if the collection is read-only, or if + * the RECNO-RENUMBER access method was not used. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean addAll(int index, Collection coll) { + + checkIterAddAllowed(); + DataCursor cursor = null; + Iterator i = null; + boolean doAutoCommit = beginAutoCommit(); + try { + i = storedOrExternalIterator(coll); + if (!i.hasNext()) { + return false; + } + cursor = new DataCursor(view, true); + OperationStatus status = + cursor.getSearchKey(Long.valueOf(index), null, false); + if (status == OperationStatus.SUCCESS) { + while (i.hasNext()) { + cursor.putBefore(i.next()); + } + closeCursor(cursor); + } else { + closeCursor(cursor); + cursor = null; + while (i.hasNext()) { + view.append(i.next(), null, null); + } + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + return true; + } catch (Exception e) { + closeCursor(cursor); + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns true if this list contains the specified element. + * This method conforms to the {@link List#contains} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean contains(Object value) { + + return containsValue(value); + } + + /** + * Returns the element at the specified position in this list. + * This method conforms to the {@link List#get} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public E get(int index) { + + return (E) getValue(Long.valueOf(index)); + } + + /** + * Returns the index in this list of the first occurrence of the specified + * element, or -1 if this list does not contain this element. + * This method conforms to the {@link List#indexOf} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public int indexOf(Object value) { + + return indexOf(value, true); + } + + /** + * Returns the index in this list of the last occurrence of the specified + * element, or -1 if this list does not contain this element. + * This method conforms to the {@link List#lastIndexOf} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public int lastIndexOf(Object value) { + + return indexOf(value, false); + } + + private int indexOf(Object value, boolean findFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status = cursor.findValue(value, findFirst); + return (status == OperationStatus.SUCCESS) ? + (cursor.getCurrentRecordNumber() - baseIndex) : + (-1); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + int getIndexOffset() { + + return baseIndex; + } + + /** + * Returns a list iterator of the elements in this list (in proper + * sequence). + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link List#listIterator()} interface. + * + *

    For information on cursor stability and iterator block size, see + * {@link #iterator()}.

    + * + * @return a {@link ListIterator} for this collection. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + * + * @see #isWriteAllowed + */ + public ListIterator listIterator() { + + return blockIterator(); + } + + /** + * Returns a list iterator of the elements in this list (in proper + * sequence), starting at the specified position in this list. + * The iterator will be read-only if the collection is read-only. + * This method conforms to the {@link List#listIterator(int)} interface. + * + *

    For information on cursor stability and iterator block size, see + * {@link #iterator()}.

    + * + * @return a {@link ListIterator} for this collection. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + * + * @see #isWriteAllowed + */ + public ListIterator listIterator(int index) { + + BlockIterator i = blockIterator(); + if (i.moveToIndex(index)) { + return i; + } else { + throw new IndexOutOfBoundsException(String.valueOf(index)); + } + } + + /** + * Removes the element at the specified position in this list (optional + * operation). + * This method conforms to the {@link List#remove(int)} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public E remove(int index) { + + try { + Object[] oldVal = new Object[1]; + removeKey(Long.valueOf(index), oldVal); + return (E) oldVal[0]; + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + + /** + * Removes the first occurrence in this list of the specified element + * (optional operation). + * This method conforms to the {@link List#remove(Object)} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is a sublist, or + * if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean remove(Object value) { + + return removeValue(value); + } + + /** + * Replaces the element at the specified position in this list with the + * specified element (optional operation). + * This method conforms to the {@link List#set} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public E set(int index, E value) { + + try { + return (E) putKeyValue(Long.valueOf(index), value); + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + + /** + * Returns a view of the portion of this list between the specified + * fromIndex, inclusive, and toIndex, exclusive. + * Note that add() and remove() may not be called for the returned sublist. + * This method conforms to the {@link List#subList} interface. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public List subList(int fromIndex, int toIndex) { + + if (fromIndex < 0 || fromIndex > toIndex) { + throw new IndexOutOfBoundsException(String.valueOf(fromIndex)); + } + try { + int newBaseIndex = baseIndex + fromIndex; + return new StoredList( + view.subView(Long.valueOf(fromIndex), true, + Long.valueOf(toIndex), false, + new IndexKeyBinding(newBaseIndex)), + newBaseIndex); + } catch (KeyRangeException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Compares the specified object with this list for equality. + * A value comparison is performed by this method and the stored values + * are compared rather than calling the equals() method of each element. + * This method conforms to the {@link List#equals} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean equals(Object other) { + + if (!(other instanceof List)) return false; + List otherList = (List) other; + StoredIterator i1 = null; + ListIterator i2 = null; + try { + i1 = storedIterator(); + i2 = storedOrExternalListIterator(otherList); + while (i1.hasNext()) { + if (!i2.hasNext()) { + return false; + } + if (i1.nextIndex() != i2.nextIndex()) { + return false; + } + Object o1 = i1.next(); + Object o2 = i2.next(); + if (o1 == null) { + if (o2 != null) { + return false; + } + } else { + if (!o1.equals(o2)) { + return false; + } + } + } + return !i2.hasNext(); + } finally { + StoredIterator.close(i1); + StoredIterator.close(i2); + } + } + + /** + * Returns a StoredIterator if the given collection is a StoredCollection, + * else returns a regular/external ListIterator. The iterator returned + * should be closed with the static method StoredIterator.close(Iterator). + */ + final ListIterator storedOrExternalListIterator(List list) { + + if (list instanceof StoredCollection) { + return ((StoredCollection) list).storedIterator(); + } else { + return list.listIterator(); + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + E makeIteratorData(BaseIterator iterator, + DatabaseEntry keyEntry, + DatabaseEntry priKeyEntry, + DatabaseEntry valueEntry) { + + return (E) view.makeValue(priKeyEntry, valueEntry); + } + + boolean hasValues() { + + return true; + } + + private static class IndexKeyBinding extends RecordNumberBinding { + + private int baseIndex; + + private IndexKeyBinding(int baseIndex) { + + this.baseIndex = baseIndex; + } + + @Override + public Long entryToObject(DatabaseEntry data) { + + return Long.valueOf(entryToRecordNumber(data) - baseIndex); + } + + @Override + public void objectToEntry(Object object, DatabaseEntry data) { + + recordNumberToEntry(((Number) object).intValue() + baseIndex, + data); + } + } +} diff --git a/src/com/sleepycat/collections/StoredMap.java b/src/com/sleepycat/collections/StoredMap.java new file mode 100644 index 0000000..7dc20a1 --- /dev/null +++ b/src/com/sleepycat/collections/StoredMap.java @@ -0,0 +1,817 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Database; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.keyrange.KeyRangeException; + +/** + * A Map view of a {@link Database}. + * + *

    In addition to the standard Map methods, this class provides the + * following methods for stored maps only. Note that the use of these methods + * is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #duplicates}
    • + *
    • {@link #duplicatesMap}
    • + *
    • {@link #append}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredMap extends StoredContainer + implements ConcurrentMap { + + private StoredKeySet keySet; + private StoredEntrySet entrySet; + private StoredValueSet valueSet; + + /** + * Creates a map view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, valueBinding, null, + writeAllowed, null)); + initView(); + } + + /** + * Creates a map view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, valueBinding, null, + true, keyAssigner)); + initView(); + } + + /** + * Creates a map entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + writeAllowed, null)); + initView(); + } + + /** + * Creates a map entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + true, keyAssigner)); + initView(); + } + + StoredMap(DataView view) { + + super(view); + initView(); + } + + /** + * Override this method to initialize view-dependent fields. + */ + void initAfterClone() { + initView(); + } + + /** + * The keySet, entrySet and valueSet are created during Map construction + * rather than lazily when requested (as done with the java.util.Map + * implementations). This is done to avoid synchronization every time they + * are requested. Since they are requested often but a StoredMap is + * created infrequently, this gives the best performance. The additional + * views are small objects and are cheap to construct. + */ + private void initView() { + + /* entrySet */ + if (areKeyRangesAllowed()) { + entrySet = new StoredSortedEntrySet(view); + } else { + entrySet = new StoredEntrySet(view); + } + + /* keySet */ + DataView newView = view.keySetView(); + if (areKeyRangesAllowed()) { + keySet = new StoredSortedKeySet(newView); + } else { + keySet = new StoredKeySet(newView); + } + + /* valueSet */ + newView = view.valueSetView(); + if (areKeyRangesAllowed() && newView.canDeriveKeyFromValue()) { + valueSet = new StoredSortedValueSet(newView); + } else { + valueSet = new StoredValueSet(newView); + } + } + + /** + * Returns the value to which this map maps the specified key. If + * duplicates are allowed, this method returns the first duplicate, in the + * order in which duplicates are configured, that maps to the specified + * key. + * + * This method conforms to the {@link Map#get} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public V get(Object key) { + + return (V) getValue(key); + } + + /** + * Associates the specified value with the specified key in this map + * (optional operation). If duplicates are allowed and the specified key + * is already mapped to a value, this method appends the new duplicate + * after the existing duplicates. This method conforms to the {@link + * Map#put} interface. + * + *

    The key parameter may be null if an entity binding is used and the + * key will be derived from the value (entity) parameter. If an entity + * binding is used and the key parameter is non-null, then the key + * parameter must be equal to the key derived from the value parameter.

    + * + * @return the previous value associated with specified key, or null if + * there was no mapping for the key or if duplicates are allowed. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only. + * + * @throws IllegalArgumentException if an entity value binding is used and + * the primary key of the value given is different than the existing stored + * primary key. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public V put(K key, V value) { + + return (V) putKeyValue(key, value); + } + + /** + * Appends a given value returning the newly assigned key. If a {@link + * PrimaryKeyAssigner} is associated with Store for this map, it will be + * used to assigned the returned key. Otherwise the Store must be a QUEUE + * or RECNO database and the next available record number is assigned as + * the key. This method does not exist in the standard {@link Map} + * interface. + * + *

    Note that for the JE product, QUEUE and RECNO databases are not + * supported, and therefore a PrimaryKeyAssigner must be associated with + * the map in order to call this method.

    + * + * @param value the value to be appended. + * + * @return the assigned key. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is indexed, or + * if the collection is read-only, or if the Store has no {@link + * PrimaryKeyAssigner} and is not a QUEUE or RECNO database. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public K append(V value) { + + boolean doAutoCommit = beginAutoCommit(); + try { + Object[] key = new Object[1]; + view.append(value, key, null); + commitAutoCommit(doAutoCommit); + return (K) key[0]; + } catch (Exception e) { + throw handleException(e, doAutoCommit); + } + } + + /** + * Removes the mapping for this key from this map if present (optional + * operation). If duplicates are allowed, this method removes all + * duplicates for the given key. This method conforms to the {@link + * Map#remove} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public V remove(Object key) { + + Object[] oldVal = new Object[1]; + removeKey(key, oldVal); + return (V) oldVal[0]; + } + + /** + * If the specified key is not already associated with a value, associate + * it with the given value. This method conforms to the {@link + * ConcurrentMap#putIfAbsent} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public V putIfAbsent(K key, V value) { + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + V oldValue; + while (true) { + OperationStatus status = + cursor.putNoOverwrite(key, value, false /*useCurrentKey*/); + if (status == OperationStatus.SUCCESS) { + /* We inserted the key. Return null. */ + oldValue = null; + break; + } else { + status = cursor.getSearchKey(key, null /*value*/, + false /*lockForWrite*/); + if (status == OperationStatus.SUCCESS) { + /* The key is present. Return the current value. */ + oldValue = (V) cursor.getCurrentValue(); + break; + } else { + + /* + * If Serializable isolation is not configured, another + * thread can delete the record after our attempt to + * insert it failed above. Loop back and try again. + */ + continue; + } + } + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return oldValue; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Remove entry for key only if currently mapped to given value. This + * method conforms to the {@link ConcurrentMap#remove(Object,Object)} + * interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean remove(Object key, Object value) { + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true, key); + OperationStatus status = cursor.getFirst(true /*lockForWrite*/); + boolean removed; + if (status == OperationStatus.SUCCESS && + cursor.getCurrentValue().equals(value)) { + cursor.delete(); + removed = true; + } else { + removed = false; + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return removed; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Replace entry for key only if currently mapped to some value. This + * method conforms to the {@link ConcurrentMap#replace(Object,Object)} + * interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public V replace(K key, V value) { + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true, key); + OperationStatus status = cursor.getFirst(true /*lockForWrite*/); + V oldValue; + if (status == OperationStatus.SUCCESS) { + oldValue = (V) cursor.getCurrentValue(); + cursor.putCurrent(value); + } else { + oldValue = null; + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return oldValue; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Replace entry for key only if currently mapped to given value. This + * method conforms to the {@link + * ConcurrentMap#replace(Object,Object,Object)} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean replace(K key, V oldValue, V newValue) { + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true, key); + OperationStatus status = cursor.getFirst(true /*lockForWrite*/); + boolean replaced; + if (status == OperationStatus.SUCCESS && + cursor.getCurrentValue().equals(oldValue)) { + cursor.putCurrent(newValue); + replaced = true; + } else { + replaced = false; + } + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return replaced; + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns true if this map contains the specified key. This method + * conforms to the {@link Map#containsKey} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean containsKey(Object key) { + + return super.containsKey(key); + } + + /** + * Returns true if this map contains the specified value. When an entity + * binding is used, this method returns whether the map contains the + * primary key and value mapping of the entity. This method conforms to + * the {@link Map#containsValue} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean containsValue(Object value) { + + return super.containsValue(value); + } + + /** + * Copies all of the mappings from the specified map to this map (optional + * operation). When duplicates are allowed, the mappings in the specified + * map are effectively appended to the existing mappings in this map, that + * is no previously existing mappings in this map are replaced. This + * method conforms to the {@link Map#putAll} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only, or + * if the collection is indexed. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public void putAll(Map map) { + + boolean doAutoCommit = beginAutoCommit(); + Iterator i = null; + try { + Collection coll = map.entrySet(); + i = storedOrExternalIterator(coll); + while (i.hasNext()) { + Map.Entry entry = (Map.Entry) i.next(); + putKeyValue(entry.getKey(), entry.getValue()); + } + StoredIterator.close(i); + commitAutoCommit(doAutoCommit); + } catch (Exception e) { + StoredIterator.close(i); + throw handleException(e, doAutoCommit); + } + } + + /** + * Returns a set view of the keys contained in this map. A {@link + * java.util.SortedSet} is returned if the map supports key ranges. The + * returned collection will be read-only if the map is read-only. This + * method conforms to the {@link Map#keySet()} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @return a {@link StoredKeySet} or a {@link StoredSortedKeySet} for this + * map. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + * + * @see #areKeyRangesAllowed + * @see #isWriteAllowed + */ + public Set keySet() { + + return keySet; + } + + /** + * Returns a set view of the mappings contained in this map. A {@link + * java.util.SortedSet} is returned if the map supports key ranges. The + * returned collection will be read-only if the map is read-only. This + * method conforms to the {@link Map#entrySet()} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @return a {@link StoredEntrySet} or a {@link StoredSortedEntrySet} for + * this map. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + * + * @see #areKeyRangesAllowed + * @see #isWriteAllowed + */ + public Set> entrySet() { + + return entrySet; + } + + /** + * Returns a collection view of the values contained in this map. A {@link + * java.util.SortedSet} is returned if the map supports key ranges and the + * value/entity binding can be used to derive the map's key from its + * value/entity object. The returned collection will be read-only if the + * map is read-only. This method conforms to the {@link Map#values()} + * interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @return a {@link StoredValueSet} or a {@link StoredSortedValueSet} for + * this map. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + * + * @see #areKeyRangesAllowed + * @see #isWriteAllowed + */ + public Collection values() { + + return valueSet; + } + + /** + * Returns a new collection containing the values mapped to the given key + * in this map. This collection's iterator() method is particularly useful + * for iterating over the duplicates for a given key, since this is not + * supported by the standard Map interface. This method does not exist in + * the standard {@link Map} interface. + * + *

    If no mapping for the given key is present, an empty collection is + * returned. If duplicates are not allowed, at most a single value will be + * in the collection returned. If duplicates are allowed, the returned + * collection's add() method may be used to add values for the given + * key.

    + * + * @param key is the key for which values are to be returned. + * + * @return the new collection. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public Collection duplicates(K key) { + + try { + DataView newView = view.valueSetView(key); + return new StoredValueSet(newView); + } catch (KeyRangeException e) { + return Collections.EMPTY_SET; + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Returns a new map from primary key to value for the subset of records + * having a given secondary key (duplicates). This method does not exist + * in the standard {@link Map} interface. + * + *

    If no mapping for the given key is present, an empty collection is + * returned. If duplicates are not allowed, at most a single value will be + * in the collection returned. If duplicates are allowed, the returned + * collection's add() method may be used to add values for the given + * key.

    + * + * @param secondaryKey is the secondary key for which duplicates values + * will be represented by the returned map. + * + * @param primaryKeyBinding is the binding used for keys in the returned + * map. + * + * @param the primary key class. + * + * @return the new map. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public Map duplicatesMap(K secondaryKey, + EntryBinding primaryKeyBinding) { + try { + DataView newView = + view.duplicatesView(secondaryKey, primaryKeyBinding); + if (isOrdered()) { + return new StoredSortedMap(newView); + } else { + return new StoredMap(newView); + } + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } + + /** + * Compares the specified object with this map for equality. A value + * comparison is performed by this method and the stored values are + * compared rather than calling the equals() method of each element. This + * method conforms to the {@link Map#equals} interface. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public boolean equals(Object other) { + + if (other instanceof Map) { + return entrySet().equals(((Map) other).entrySet()); + } else { + return false; + } + } + + /* + * Add this in to keep FindBugs from whining at us about implementing + * equals(), but not hashCode(). + */ + public int hashCode() { + return super.hashCode(); + } + + // Inherit javadoc + public int size() { + return values().size(); + } + + /** + * Converts the map to a string representation for debugging. WARNING: All + * mappings will be converted to strings and returned and therefore the + * returned string may be very large. + * + * @return the string representation. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public String toString() { + + return entrySet().toString(); + } +} diff --git a/src/com/sleepycat/collections/StoredMapEntry.java b/src/com/sleepycat/collections/StoredMapEntry.java new file mode 100644 index 0000000..cfe4692 --- /dev/null +++ b/src/com/sleepycat/collections/StoredMapEntry.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +/** + * @author Mark Hayes + */ +final class StoredMapEntry extends MapEntryParameter { + + private BaseIterator iter; + private StoredCollection coll; + + StoredMapEntry(Object key, + Object value, + StoredCollection coll, + BaseIterator iter) { + + super(key, value); + this.coll = coll; + this.iter = iter; + } + + public Object setValue(Object newValue) { + + Object oldValue; + if (iter != null && iter.isCurrentData(this)) { + oldValue = getValue(); + iter.set(newValue); + } else { + if (coll.view.dupsAllowed) { + throw new IllegalStateException("May not insert duplicates"); + } + oldValue = coll.putKeyValue(getKey(), newValue); + } + setValueInternal(newValue); + return oldValue; + } +} diff --git a/src/com/sleepycat/collections/StoredSortedEntrySet.java b/src/com/sleepycat/collections/StoredSortedEntrySet.java new file mode 100644 index 0000000..109fd6f --- /dev/null +++ b/src/com/sleepycat/collections/StoredSortedEntrySet.java @@ -0,0 +1,260 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.Map; +import java.util.SortedSet; + +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The SortedSet returned by Map.entrySet(). This class may not be + * instantiated directly. Contrary to what is stated by {@link Map#entrySet} + * this class does support the {@link #add} and {@link #addAll} methods. + * + *

    The {@link java.util.Map.Entry#setValue} method of the Map.Entry objects + * that are returned by this class and its iterators behaves just as the {@link + * StoredIterator#set} method does.

    + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted sets only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #headSet(Map.Entry, boolean)}
    • + *
    • {@link #tailSet(Map.Entry, boolean)}
    • + *
    • {@link #subSet(Map.Entry, boolean, Map.Entry, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedEntrySet + extends StoredEntrySet + implements SortedSet> { + + StoredSortedEntrySet(DataView mapView) { + + super(mapView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator> comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public Map.Entry first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public Map.Entry last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toMapEntry. + * This method conforms to the {@link SortedSet#headSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toMapEntry the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> headSet(Map.Entry toMapEntry) { + + return subSet(null, false, toMapEntry, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toMapEntry, optionally including toMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toMapEntry is the upper bound. + * + * @param toInclusive is true to include toMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> headSet(Map.Entry toMapEntry, + boolean toInclusive) { + + return subSet(null, false, toMapEntry, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromMapEntry. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromMapEntry is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> tailSet(Map.Entry fromMapEntry) { + + return subSet(fromMapEntry, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromMapEntry, optionally including fromMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromMapEntry is the lower bound. + * + * @param fromInclusive is true to include fromMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> tailSet(Map.Entry fromMapEntry, + boolean fromInclusive) { + + return subSet(fromMapEntry, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromMapEntry, inclusive, to toMapEntry, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromMapEntry is the lower bound. + * + * @param toMapEntry is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> subSet(Map.Entry fromMapEntry, + Map.Entry toMapEntry) { + + return subSet(fromMapEntry, true, toMapEntry, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromMapEntry and strictly less than toMapEntry, + * optionally including fromMapEntry and toMapEntry. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromMapEntry is the lower bound. + * + * @param fromInclusive is true to include fromMapEntry. + * + * @param toMapEntry is the upper bound. + * + * @param toInclusive is true to include toMapEntry. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet> subSet(Map.Entry fromMapEntry, + boolean fromInclusive, + Map.Entry toMapEntry, + boolean toInclusive) { + + Object fromKey = (fromMapEntry != null) ? fromMapEntry.getKey() : null; + Object toKey = (toMapEntry != null) ? toMapEntry.getKey() : null; + try { + return new StoredSortedEntrySet( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/src/com/sleepycat/collections/StoredSortedKeySet.java b/src/com/sleepycat/collections/StoredSortedKeySet.java new file mode 100644 index 0000000..004c17c --- /dev/null +++ b/src/com/sleepycat/collections/StoredSortedKeySet.java @@ -0,0 +1,279 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedSet; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Database; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The SortedSet returned by Map.keySet() and which can also be constructed + * directly if a Map is not needed. + * Since this collection is a set it only contains one element for each key, + * even when duplicates are allowed. Key set iterators are therefore + * particularly useful for enumerating the unique keys of a store or index that + * allows duplicates. + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted sets only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #headSet(Object, boolean)}
    • + *
    • {@link #tailSet(Object, boolean)}
    • + *
    • {@link #subSet(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedKeySet + extends StoredKeySet + implements SortedSet { + + /** + * Creates a sorted key set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedKeySet(Database database, + EntryBinding keyBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, null, + writeAllowed, null)); + } + + StoredSortedKeySet(DataView keySetView) { + + super(keySetView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public K first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public K last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toKey. + * This method conforms to the {@link SortedSet#headSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toKey is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet headSet(K toKey) { + + return subSet(null, false, toKey, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toKey, optionally including toKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet headSet(K toKey, boolean toInclusive) { + + return subSet(null, false, toKey, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromKey. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet tailSet(K fromKey) { + + return subSet(fromKey, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromKey, optionally including fromKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet tailSet(K fromKey, boolean fromInclusive) { + + return subSet(fromKey, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromKey, inclusive, to toKey, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param toKey is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet subSet(K fromKey, K toKey) { + + return subSet(fromKey, true, toKey, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromKey and strictly less than toKey, + * optionally including fromKey and toKey. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet subSet(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) { + try { + return new StoredSortedKeySet( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/src/com/sleepycat/collections/StoredSortedMap.java b/src/com/sleepycat/collections/StoredSortedMap.java new file mode 100644 index 0000000..a1d21eb --- /dev/null +++ b/src/com/sleepycat/collections/StoredSortedMap.java @@ -0,0 +1,390 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Database; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * A SortedMap view of a {@link Database}. + * + *

    In addition to the standard SortedMap methods, this class provides the + * following methods for stored sorted maps only. Note that the use of these + * methods is not compatible with the standard Java collections interface.

    + *
      + *
    • {@link #headMap(Object, boolean)}
    • + *
    • {@link #tailMap(Object, boolean)}
    • + *
    • {@link #subMap(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedMap + extends StoredMap + implements SortedMap { + + /** + * Creates a sorted map view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a sorted map view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedMap(Database database, + EntryBinding keyBinding, + EntryBinding valueBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, valueBinding, null, + true, keyAssigner)); + } + + /** + * Creates a sorted map entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + writeAllowed, null)); + } + + /** + * Creates a sorted map entity view of a {@link Database} with a {@link + * PrimaryKeyAssigner}. Writing is allowed for the created map. + * + * @param database is the Database underlying the new collection. + * + * @param keyBinding is the binding used to translate between key buffers + * and key objects. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param keyAssigner is used by the {@link #append} method to assign + * primary keys. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedMap(Database database, + EntryBinding keyBinding, + EntityBinding valueEntityBinding, + PrimaryKeyAssigner keyAssigner) { + + super(new DataView(database, keyBinding, null, valueEntityBinding, + true, keyAssigner)); + } + + StoredSortedMap(DataView mapView) { + + super(mapView); + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedMap#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) key currently in this sorted map. + * This method conforms to the {@link SortedMap#firstKey} interface. + * + * @return the first key. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public K firstKey() { + + return getFirstOrLastKey(true); + } + + /** + * Returns the last (highest) element currently in this sorted map. + * This method conforms to the {@link SortedMap#lastKey} interface. + * + * @return the last key. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public K lastKey() { + + return getFirstOrLastKey(false); + } + + private K getFirstOrLastKey(boolean doGetFirst) { + + DataCursor cursor = null; + try { + cursor = new DataCursor(view, false); + OperationStatus status; + if (doGetFirst) { + status = cursor.getFirst(false); + } else { + status = cursor.getLast(false); + } + return (K) ((status == OperationStatus.SUCCESS) ? + cursor.getCurrentKey() : + null); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } finally { + closeCursor(cursor); + } + } + + /** + * Returns a view of the portion of this sorted set whose keys are + * strictly less than toKey. + * This method conforms to the {@link SortedMap#headMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toKey is the upper bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap headMap(K toKey) { + + return subMap(null, false, toKey, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly less than toKey, optionally including toKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap headMap(K toKey, boolean toInclusive) { + + return subMap(null, false, toKey, toInclusive); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * greater than or equal to fromKey. + * This method conforms to the {@link SortedMap#tailMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap tailMap(K fromKey) { + + return subMap(fromKey, true, null, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly greater than fromKey, optionally including fromKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap tailMap(K fromKey, boolean fromInclusive) { + + return subMap(fromKey, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements range + * from fromKey, inclusive, to toKey, exclusive. + * This method conforms to the {@link SortedMap#subMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param toKey is the upper bound. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap subMap(K fromKey, K toKey) { + + return subMap(fromKey, true, toKey, false); + } + + /** + * Returns a view of the portion of this sorted map whose elements are + * strictly greater than fromKey and strictly less than toKey, + * optionally including fromKey and toKey. + * This method does not exist in the standard {@link SortedMap} interface. + * + *

    Note that the return value is a StoredStoredMap and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromKey is the lower bound. + * + * @param fromInclusive is true to include fromKey. + * + * @param toKey is the upper bound. + * + * @param toInclusive is true to include toKey. + * + * @return the submap. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedMap subMap(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) { + try { + return new StoredSortedMap( + view.subView(fromKey, fromInclusive, toKey, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/src/com/sleepycat/collections/StoredSortedValueSet.java b/src/com/sleepycat/collections/StoredSortedValueSet.java new file mode 100644 index 0000000..78e673d --- /dev/null +++ b/src/com/sleepycat/collections/StoredSortedValueSet.java @@ -0,0 +1,291 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Comparator; +import java.util.SortedSet; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.je.Database; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The SortedSet returned by Map.values() and which can also be constructed + * directly if a Map is not needed. + * Although this collection is a set it may contain duplicate values. Only if + * an entity value binding is used are all elements guaranteed to be unique. + * + *

    In addition to the standard SortedSet methods, this class provides the + * following methods for stored sorted value sets only. Note that the use of + * these methods is not compatible with the standard Java collections + * interface.

    + *
      + *
    • {@link #headSet(Object, boolean)}
    • + *
    • {@link #tailSet(Object, boolean)}
    • + *
    • {@link #subSet(Object, boolean, Object, boolean)}
    • + *
    + * + * @author Mark Hayes + */ +public class StoredSortedValueSet + extends StoredValueSet + implements SortedSet { + + /* + * No valueBinding ctor is possible since key cannot be derived. + */ + + /** + * Creates a sorted value set entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public StoredSortedValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, null, null, valueEntityBinding, + writeAllowed, null)); + checkKeyDerivation(); + } + + StoredSortedValueSet(DataView valueSetView) { + + super(valueSetView); + checkKeyDerivation(); + } + + private void checkKeyDerivation() { + + if (!view.canDeriveKeyFromValue()) { + throw new IllegalArgumentException("Cannot derive key from value"); + } + } + + /** + * Returns null since comparators are not supported. The natural ordering + * of a stored collection is data byte order, whether the data classes + * implement the {@link java.lang.Comparable} interface or not. + * This method does not conform to the {@link SortedSet#comparator} + * interface. + * + * @return null. + */ + public Comparator comparator() { + + return null; + } + + /** + * Returns the first (lowest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#first} interface. + * + * @return the first element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public E first() { + + return getFirstOrLast(true); + } + + /** + * Returns the last (highest) element currently in this sorted set. + * This method conforms to the {@link SortedSet#last} interface. + * + * @return the last element. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public E last() { + + return getFirstOrLast(false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toValue. + * This method conforms to the {@link SortedSet#headSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toValue the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet headSet(E toValue) { + + return subSet(null, false, toValue, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly less than toValue, optionally including toValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param toValue is the upper bound. + * + * @param toInclusive is true to include toValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet headSet(E toValue, boolean toInclusive) { + + return subSet(null, false, toValue, toInclusive); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * greater than or equal to fromValue. + * This method conforms to the {@link SortedSet#tailSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromValue is the lower bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet tailSet(E fromValue) { + + return subSet(fromValue, true, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromValue, optionally including fromValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromValue is the lower bound. + * + * @param fromInclusive is true to include fromValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet tailSet(E fromValue, boolean fromInclusive) { + + return subSet(fromValue, fromInclusive, null, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements range + * from fromValue, inclusive, to toValue, exclusive. + * This method conforms to the {@link SortedSet#subSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromValue is the lower bound. + * + * @param toValue is the upper bound. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet subSet(E fromValue, E toValue) { + + return subSet(fromValue, true, toValue, false); + } + + /** + * Returns a view of the portion of this sorted set whose elements are + * strictly greater than fromValue and strictly less than toValue, + * optionally including fromValue and toValue. + * This method does not exist in the standard {@link SortedSet} interface. + * + *

    Note that the return value is a StoredCollection and must be treated + * as such; for example, its iterators must be explicitly closed.

    + * + * @param fromValue is the lower bound. + * + * @param fromInclusive is true to include fromValue. + * + * @param toValue is the upper bound. + * + * @param toInclusive is true to include toValue. + * + * @return the subset. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C edition). + */ + public SortedSet subSet(E fromValue, + boolean fromInclusive, + E toValue, + boolean toInclusive) { + try { + return new StoredSortedValueSet(view.subView + (fromValue, fromInclusive, toValue, toInclusive, null)); + } catch (Exception e) { + throw StoredContainer.convertException(e); + } + } +} diff --git a/src/com/sleepycat/collections/StoredValueSet.java b/src/com/sleepycat/collections/StoredValueSet.java new file mode 100644 index 0000000..cfa20e7 --- /dev/null +++ b/src/com/sleepycat/collections/StoredValueSet.java @@ -0,0 +1,220 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import java.util.Set; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +/* */ +import com.sleepycat.je.EnvironmentFailureException; // for javadoc +import com.sleepycat.je.OperationFailureException; // for javadoc +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The Set returned by Map.values() and Map.duplicates(), and which can also be + * constructed directly if a Map is not needed. + * Although this collection is a set it may contain duplicate values. Only if + * an entity value binding is used are all elements guaranteed to be unique. + * + * @author Mark Hayes + */ +public class StoredValueSet extends StoredCollection implements Set { + + /* + * This class is also used internally for the set returned by duplicates(). + */ + + /** + * Creates a value set view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueBinding is the binding used to translate between value + * buffers and value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredValueSet(Database database, + EntryBinding valueBinding, + boolean writeAllowed) { + + super(new DataView(database, null, valueBinding, null, + writeAllowed, null)); + } + + /** + * Creates a value set entity view of a {@link Database}. + * + * @param database is the Database underlying the new collection. + * + * @param valueEntityBinding is the binding used to translate between + * key/value buffers and entity value objects. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @throws IllegalArgumentException if formats are not consistently + * defined or a parameter is invalid. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public StoredValueSet(Database database, + EntityBinding valueEntityBinding, + boolean writeAllowed) { + + super(new DataView(database, null, null, valueEntityBinding, + writeAllowed, null)); + } + + StoredValueSet(DataView valueSetView) { + + super(valueSetView); + } + + /** + * Adds the specified entity to this set if it is not already present + * (optional operation). + * This method conforms to the {@link Set#add} interface. + * + * @param entity is the entity to be added. + * + * @return true if the entity was added, that is the key-value pair + * represented by the entity was not previously present in the collection. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only, + * if the collection is indexed, or if an entity binding is not used. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean add(E entity) { + + if (view.isSecondary()) { + throw new UnsupportedOperationException + ("Add not allowed with index"); + } else if (view.range.isSingleKey()) { + /* entity is actually just a value in this case */ + if (!view.dupsAllowed) { + throw new UnsupportedOperationException("Duplicates required"); + } + DataCursor cursor = null; + boolean doAutoCommit = beginAutoCommit(); + try { + cursor = new DataCursor(view, true); + cursor.useRangeKey(); + OperationStatus status = + cursor.putNoDupData(null, entity, null, true); + closeCursor(cursor); + commitAutoCommit(doAutoCommit); + return (status == OperationStatus.SUCCESS); + } catch (Exception e) { + closeCursor(cursor); + throw handleException(e, doAutoCommit); + } + } else if (view.entityBinding == null) { + throw new UnsupportedOperationException + ("Add requires entity binding"); + } else { + return add(null, entity); + } + } + + /** + * Returns true if this set contains the specified element. + * This method conforms to the {@link java.util.Set#contains} + * interface. + * + * @param value the value to check. + * + * @return whether the set contains the given value. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean contains(Object value) { + + return containsValue(value); + } + + /** + * Removes the specified value from this set if it is present (optional + * operation). + * If an entity binding is used, the key-value pair represented by the + * given entity is removed. If an entity binding is used, the first + * occurrence of a key-value pair with the given value is removed. + * This method conforms to the {@link Set#remove} interface. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws UnsupportedOperationException if the collection is read-only. + * + * @throws RuntimeExceptionWrapper if a checked exception is thrown, + * including a {@code DatabaseException} on BDB (C Edition). + */ + public boolean remove(Object value) { + + return removeValue(value); + } + + E makeIteratorData(BaseIterator iterator, + DatabaseEntry keyEntry, + DatabaseEntry priKeyEntry, + DatabaseEntry valueEntry) { + + return (E) view.makeValue(priKeyEntry, valueEntry); + } + + boolean hasValues() { + + return true; + } +} diff --git a/src/com/sleepycat/collections/TransactionRunner.java b/src/com/sleepycat/collections/TransactionRunner.java new file mode 100644 index 0000000..979f1dc --- /dev/null +++ b/src/com/sleepycat/collections/TransactionRunner.java @@ -0,0 +1,349 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * Starts a transaction, calls {@link TransactionWorker#doWork}, and handles + * transaction retry and exceptions. To perform a transaction, the user + * implements the {@link TransactionWorker} interface and passes an instance of + * that class to the {@link #run run} method. + * + *

    A single TransactionRunner instance may be used by any number of threads + * for any number of transactions.

    + * + *

    The behavior of the run() method depends on whether the environment is + * transactional, whether nested transactions are enabled, and whether a + * transaction is already active.

    + * + *
      + *
    • When the run() method is called in a transactional environment and no + * transaction is active for the current thread, a new transaction is started + * before calling doWork(). If LockConflictException is thrown by doWork(), + * the transaction will be aborted and the process will be repeated up to the + * maximum number of retries. If another exception is thrown by doWork() or + * the maximum number of retries has occurred, the transaction will be aborted + * and the exception will be rethrown by the run() method. If no exception is + * thrown by doWork(), the transaction will be committed. The run() method + * will not attempt to commit or abort a transaction if it has already been + * committed or aborted by doWork().
    • + * + *
    • When the run() method is called and a transaction is active for the + * current thread, and nested transactions are enabled, a nested transaction is + * started before calling doWork(). The transaction that is active when + * calling the run() method will become the parent of the nested transaction. + * The nested transaction will be committed or aborted by the run() method + * following the same rules described above. Note that nested transactions may + * not be enabled for the JE product, since JE does not support nested + * transactions.
    • + * + *
    • When the run() method is called in a non-transactional environment, the + * doWork() method is called without starting a transaction. The run() method + * will return without committing or aborting a transaction, and any exceptions + * thrown by the doWork() method will be thrown by the run() method.
    • + * + *
    • When the run() method is called and a transaction is active for the + * current thread and nested transactions are not enabled (the default) the + * same rules as above apply. All the operations performed by the doWork() + * method will be part of the currently active transaction.
    • + *
    + * + *

    In a transactional environment, the rules described above support nested + * calls to the run() method and guarantee that the outermost call will cause + * the transaction to be committed or aborted. This is true whether or not + * nested transactions are supported or enabled. Note that nested transactions + * are provided as an optimization for improving concurrency but do not change + * the meaning of the outermost transaction. Nested transactions are not + * currently supported by the JE product.

    + * + * @author Mark Hayes + */ +public class TransactionRunner { + + /** The default maximum number of retries. */ + public static final int DEFAULT_MAX_RETRIES = 10; + + private CurrentTransaction currentTxn; + private int maxRetries; + private TransactionConfig config; + private boolean allowNestedTxn; + + /** + * Creates a transaction runner for a given Berkeley DB environment. + * The default maximum number of retries ({@link #DEFAULT_MAX_RETRIES}) and + * a null (default) {@link TransactionConfig} will be used. + * + * @param env is the environment for running transactions. + */ + public TransactionRunner(Environment env) { + + this(env, DEFAULT_MAX_RETRIES, null); + } + + /** + * Creates a transaction runner for a given Berkeley DB environment and + * with a given number of maximum retries. + * + * @param env is the environment for running transactions. + * + * @param maxRetries is the maximum number of retries that will be + * performed when deadlocks are detected. + * + * @param config the transaction configuration used for calling + * {@link Environment#beginTransaction}, or null to use the default + * configuration. The configuration object is not cloned, and + * any modifications to it will impact subsequent transactions. + */ + public TransactionRunner(Environment env, + int maxRetries, + TransactionConfig config) { + + this.currentTxn = CurrentTransaction.getInstance(env); + this.maxRetries = maxRetries; + this.config = config; + } + + /** + * Returns the maximum number of retries that will be performed when + * deadlocks are detected. + * + * @return the maximum number of retries. + */ + public int getMaxRetries() { + + return maxRetries; + } + + /** + * Changes the maximum number of retries that will be performed when + * deadlocks are detected. + * Calling this method does not impact transactions already running. + * + * @param maxRetries the maximum number of retries. + */ + public void setMaxRetries(int maxRetries) { + + this.maxRetries = maxRetries; + } + + /** + * Returns whether nested transactions will be created if + * run() is called when a transaction is already active for + * the current thread. + * By default this property is false. + * + * @return whether nested transactions will be created. + * + *

    Note that this method always returns false in the JE product, since + * nested transactions are not supported by JE.

    + */ + public boolean getAllowNestedTransactions() { + + return allowNestedTxn; + } + + /** + * Changes whether nested transactions will be created if + * run() is called when a transaction is already active for + * the current thread. + * Calling this method does not impact transactions already running. + * + * @param allowNestedTxn whether nested transactions will be created. + * + *

    Note that true may not be passed to this method in the JE product, + * since nested transactions are not supported by JE.

    + */ + public void setAllowNestedTransactions(boolean allowNestedTxn) { + + if (allowNestedTxn && !DbCompat.NESTED_TRANSACTIONS) { + throw new UnsupportedOperationException + ("Nested transactions are not supported."); + } + this.allowNestedTxn = allowNestedTxn; + } + + /** + * Returns the transaction configuration used for calling + * {@link Environment#beginTransaction}. + * + *

    If this property is null, the default configuration is used. The + * configuration object is not cloned, and any modifications to it will + * impact subsequent transactions.

    + * + * @return the transaction configuration. + */ + public TransactionConfig getTransactionConfig() { + + return config; + } + + /** + * Changes the transaction configuration used for calling + * {@link Environment#beginTransaction}. + * + *

    If this property is null, the default configuration is used. The + * configuration object is not cloned, and any modifications to it will + * impact subsequent transactions.

    + * + * @param config the transaction configuration. + */ + public void setTransactionConfig(TransactionConfig config) { + + this.config = config; + } + + /** + * Calls the {@link TransactionWorker#doWork} method and, for transactional + * environments, may begin and end a transaction. If the environment given + * is non-transactional, a transaction will not be used but the doWork() + * method will still be called. See the class description for more + * information. + * + * @param worker the TransactionWorker. + * + * @throws LockConflictException when it is thrown by doWork() and the + * maximum number of retries has occurred. The transaction will have been + * aborted by this method. + * + * @throws Exception when any other exception is thrown by doWork(). The + * exception will first be unwrapped by calling {@link + * ExceptionUnwrapper#unwrap}. The transaction will have been aborted by + * this method. + */ + public void run(TransactionWorker worker) + throws DatabaseException, Exception { + + if (currentTxn != null && + (allowNestedTxn || currentTxn.getTransaction() == null)) { + /* Transactional and (not nested or nested txns allowed). */ + int useMaxRetries = maxRetries; + for (int retries = 0;; retries += 1) { + Transaction txn = null; + try { + txn = currentTxn.beginTransaction(config); + worker.doWork(); + if (txn != null && txn == currentTxn.getTransaction()) { + currentTxn.commitTransaction(); + } + return; + } catch (Throwable e) { + e = ExceptionUnwrapper.unwrapAny(e); + if (txn != null && txn == currentTxn.getTransaction()) { + try { + currentTxn.abortTransaction(); + } catch (Throwable e2) { + + /* + * We print this stack trace so that the + * information is not lost when we throw the + * original exception. + */ + if (DbCompat. + TRANSACTION_RUNNER_PRINT_STACK_TRACES) { + e2.printStackTrace(); + } + /* Force the original exception to be thrown. */ + retries = useMaxRetries; + } + } + /* An Error should not require special handling. */ + if (e instanceof Error) { + throw (Error) e; + } + /* Allow a subclass to determine retry policy. */ + Exception ex = (Exception) e; + useMaxRetries = + handleException(ex, retries, useMaxRetries); + if (retries >= useMaxRetries) { + throw ex; + } + } + } + } else { + /* Non-transactional or (nested and no nested txns allowed). */ + try { + worker.doWork(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + } + + /** + * Handles exceptions that occur during a transaction, and may implement + * transaction retry policy. The transaction is aborted by the {@link + * #run run} method before calling this method. + * + *

    The default implementation of this method throws the {@code + * exception} parameter if it is not an instance of {@link + * LockConflictException} and otherwise returns the {@code maxRetries} + * parameter value. This method can be overridden to throw a different + * exception or return a different number of retries. For example:

    + *
      + *
    • This method could call {@code Thread.sleep} for a short interval to + * allow other transactions to finish.
    • + * + *
    • This method could return a different {@code maxRetries} value + * depending on the {@code exception} that occurred.
    • + * + *
    • This method could throw an application-defined exception when the + * {@code retries} value is greater or equal to the {@code maxRetries} and + * a {@link LockConflictException} occurs, to override the default behavior + * which is to throw the {@link LockConflictException}.
    • + *
    + * + * @param exception an exception that was thrown by the {@link + * TransactionWorker#doWork} method or thrown when beginning or committing + * the transaction. If the {@code retries} value is greater or equal to + * {@code maxRetries} when this method returns normally, this exception + * will be thrown by the {@link #run run} method. + * + * @param retries the current value of a counter that starts out at zero + * and is incremented when each retry is performed. + * + * @param maxRetries the maximum retries to be performed. By default, + * this value is set to {@link #getMaxRetries}. This method may return a + * different maximum retries value to override that default. + * + * @return the maximum number of retries to perform. The + * default policy is to return the {@code maxRetries} parameter value + * if the {@code exception} parameter value is an instance of {@link + * LockConflictException}. + * + * @throws Exception to cause the exception to be thrown by the {@link + * #run run} method. The default policy is to throw the {@code exception} + * parameter value if it is not an instance of {@link + * LockConflictException}. + * + * @since 3.4 + */ + public int handleException(Exception exception, + int retries, + int maxRetries) + throws Exception { + + if (exception instanceof LockConflictException) { + return maxRetries; + } else { + throw exception; + } + } +} diff --git a/src/com/sleepycat/collections/TransactionWorker.java b/src/com/sleepycat/collections/TransactionWorker.java new file mode 100644 index 0000000..cd1c253 --- /dev/null +++ b/src/com/sleepycat/collections/TransactionWorker.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.util.ExceptionUnwrapper; + +/** + * The interface implemented to perform the work within a transaction. + * To run a transaction, an instance of this interface is passed to the + * {@link TransactionRunner#run} method. + * + * @author Mark Hayes + */ +public interface TransactionWorker { + + /** + * Perform the work for a single transaction. + * + * @throws Exception the exception to be thrown to the caller of + * {@link TransactionRunner#run(TransactionWorker)}. The exception will + * first be unwrapped by calling {@link ExceptionUnwrapper#unwrap}, and the + * transaction will be aborted. + * + * @see TransactionRunner#run + */ + void doWork() + throws Exception; +} diff --git a/src/com/sleepycat/collections/TupleSerialFactory.java b/src/com/sleepycat/collections/TupleSerialFactory.java new file mode 100644 index 0000000..66b7819 --- /dev/null +++ b/src/com/sleepycat/collections/TupleSerialFactory.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.TupleSerialMarshalledBinding; +import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator; +import com.sleepycat.bind.tuple.MarshalledTupleEntry; // for javadoc +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleMarshalledBinding; +import com.sleepycat.je.Database; + +/** + * Creates stored collections having tuple keys and serialized entity values. + * The entity classes must be Serializable and must implement the + * MarshalledTupleKeyEntity interfaces. The key classes must either implement + * the MarshalledTupleEntry interface or be one of the Java primitive type + * classes. Underlying binding objects are created automatically. + * + * @author Mark Hayes + */ +public class TupleSerialFactory { + + private ClassCatalog catalog; + + /** + * Creates a tuple-serial factory for given environment and class catalog. + * + * @param catalog the ClassCatalog. + */ + public TupleSerialFactory(ClassCatalog catalog) { + + this.catalog = catalog; + } + + /** + * Returns the class catalog associated with this factory. + * + * @return the catalog. + */ + public final ClassCatalog getCatalog() { + + return catalog; + } + + /** + * Creates a map from a previously opened Database object. + * + * @param db the previously opened Database object. + * + * @param keyClass is the class used for map keys. It must implement the + * {@link MarshalledTupleEntry} interface or be one of the Java primitive + * type classes. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link MarshalledTupleKeyEntity} + * interface. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @param the key class. + * + * @param the value base class. + * + * @return the map. + */ + public StoredMap + newMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) { + + return new StoredMap(db, + getKeyBinding(keyClass), + getEntityBinding(valueBaseClass), + writeAllowed); + } + + /** + * Creates a sorted map from a previously opened Database object. + * + * @param db the previously opened Database object. + * + * @param keyClass is the class used for map keys. It must implement the + * {@link MarshalledTupleEntry} interface or be one of the Java primitive + * type classes. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link MarshalledTupleKeyEntity} + * interface. + * + * @param writeAllowed is true to create a read-write collection or false + * to create a read-only collection. + * + * @param the key class. + * + * @param the value base class. + * + * @return the sorted map. + */ + public StoredSortedMap + newSortedMap(Database db, + Class keyClass, + Class valueBaseClass, + boolean writeAllowed) { + + return new StoredSortedMap(db, + getKeyBinding(keyClass), + getEntityBinding(valueBaseClass), + writeAllowed); + } + + /** + * Creates a SecondaryKeyCreator object for use in configuring + * a SecondaryDatabase. The returned object implements + * the {@link com.sleepycat.je.SecondaryKeyCreator} interface. + * + * @param valueBaseClass the base class of the entity values for this + * store. It must implement the {@link MarshalledTupleKeyEntity} + * interface. + * + * @param keyName is the key name passed to the {@link + * MarshalledTupleKeyEntity#marshalSecondaryKey} method to identify the + * secondary key. + * + * @param the value base class. + * + * @return the key creator. + */ + public + TupleSerialMarshalledKeyCreator + getKeyCreator(Class valueBaseClass, String keyName) { + + return new TupleSerialMarshalledKeyCreator + (getEntityBinding(valueBaseClass), keyName); + } + + public + TupleSerialMarshalledBinding + getEntityBinding(Class baseClass) { + + return new TupleSerialMarshalledBinding(catalog, baseClass); + } + + private EntryBinding getKeyBinding(Class keyClass) { + + EntryBinding binding = TupleBinding.getPrimitiveBinding(keyClass); + if (binding == null) { + + /* + * Cannot use type param here because it does not implement + * MarshalledTupleEntry if it is a primitive class. + */ + binding = new TupleMarshalledBinding(keyClass); + } + return binding; + } +} diff --git a/src/com/sleepycat/collections/package.html b/src/com/sleepycat/collections/package.html new file mode 100644 index 0000000..99078a4 --- /dev/null +++ b/src/com/sleepycat/collections/package.html @@ -0,0 +1,19 @@ + + + + + +Data access based on the standard Java collections API. + +Examples can be found in je/examples/collections. Build and run directions are +in the installation notes. + + + diff --git a/src/com/sleepycat/compat/DbCompat.java b/src/com/sleepycat/compat/DbCompat.java new file mode 100644 index 0000000..ce0e03a --- /dev/null +++ b/src/com/sleepycat/compat/DbCompat.java @@ -0,0 +1,709 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.compat; + +import java.util.Comparator; +import java.util.regex.Pattern; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.WriteOptions; + +/** + * A minimal set of BDB DB-JE compatibility constants and static methods, for + * internal use only. + * + * Two versions of this class, with the same public interface but different + * implementations, are maintained in parallel in the DB and JE source trees. + * By the use of the constants and methods in this class, along with a script + * that moves the source code from JE to DB, the source code in certain + * packages is kept "portable" and is shared by the two products. The script + * translates the package names from com.sleepycat.je to com.sleepycat.db, and + * perform other fix-ups as described further below. + * + * The JE directories that contain portable code are: + * + * src/com/sleepycat/bind + * /collections + * /persist + * /util + * test/com/sleepycat/bind + * /collections + * /persist + * /util + * + * In DB, these sources are stored in the following locations: + * + * Sources: + * src/java + * Tests: + * test/java/compat + * + * To keep this source code portable there are additional coding rules, above + * and beyond the standard rules (such as coding style) for all JE code. + * + * + In general we should try to use the JE/DB public API, since it is usually + * the same or similar in both products. If we use internal APIs, they will + * always be different and will require special handling. + * + * + When there are differences between products, the first choice for + * handling the difference is to use a DbCompat static method or constant. + * This keeps the source code the same for both products (except in this + * DbCompat class). + * + * + When JE-only code is needed -- for example, some APIs only exist in JE, + * and special handling of JE exceptions is sometimes needed -- the + * following special comment syntax can be used to bracket the JE-only code: + * + * + * JE-only code goes here + * + * + * This syntax must be used inside of a comment: either inside a javadoc + * section as shown above, or inside a single-line comment (space before + * last slash is to prevent ending this javadoc comment): + * + * /* * / + * JE-only code goes here + * /* * / + * + * All lines between the and + * lines, and including these lines, will be removed by the script that + * transfers code from JE to DB. + * + * + When DB-only code is needed, the code will exist in the JE product but + * will never be executed. For DB-only APIs, we hide the API from the user + * with the @hidden javadoc tag. The @hidden tag is ignored on the DB side. + * We do not have a way to remove DB-only code completely from the JE + * product, because we do not use a proprocessor for building JE. + * + * + Because DatabaseException (and all subclasses) are checked exceptions in + * DB but runtime exceptions in JE, we cannot omit the 'throws' declaration. + * Another difference is that DB normally throws DatabaseException for all + * errors, while JE has many specific subclasses for specific errors. + * Therefore, any method that calls a DB API method (for example, + * Database.get or put) will have a "throws DatabaseException" clause. + * + * + Special consideration is needed for the @throws clauses in javadoc. We do + * want to javadoc the JE-only exceptions that are thrown, so the @throws + * for these exceptions should be inside the "begin/end JE only" brackets. + * We also need to document the fact that DB may throw DatabaseException for + * almost any method, so we do that with a final @throws clause that looks + * like this: + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * This is a compromise. JE doesn't throw this exception, but we've + * described it in a way that still makes some sense for JE, sort of. + * + * + Other special handling can be implemented in the transfer script, which + * uses SED. Entire files can be excluded from the transfer, for example, + * the JE-only exception classes. Name changes can also be made using SED, + * for example: s/LockConflictException/DeadlockException/. See the + * db/dist/s_je2db script for details. + */ +public class DbCompat { + + /* Capabilities */ + + public static final boolean CDB = false; + public static final boolean JOIN = true; + public static final boolean NESTED_TRANSACTIONS = false; + public static final boolean INSERTION_ORDERED_DUPLICATES = false; + public static final boolean SEPARATE_DATABASE_FILES = false; + public static final boolean MEMORY_SUBSYSTEM = false; + public static final boolean LOCK_SUBSYSTEM = false; + public static final boolean HASH_METHOD = false; + public static final boolean RECNO_METHOD = false; + public static final boolean QUEUE_METHOD = false; + public static final boolean BTREE_RECNUM_METHOD = false; + public static final boolean OPTIONAL_READ_UNCOMMITTED = false; + public static final boolean SECONDARIES = true; + public static boolean TRANSACTION_RUNNER_PRINT_STACK_TRACES = true; + public static final boolean DATABASE_COUNT = true; + public static final boolean NEW_JE_EXCEPTIONS = true; + public static final boolean POPULATE_ENFORCES_CONSTRAINTS = true; + + /** + * For read-only cursor operations on a replicated node, we must use a + * transaction to satisfy HA requirements. However, we use a Durability + * that avoids consistency checks on the Master, and we use ReadCommitted + * isolation since that gives the same behavior as a non-transactional + * cursor: locks are released when the cursor is moved or closed. + */ + public static final TransactionConfig READ_ONLY_TXN_CONFIG; + + /** Used on JE only, simply to avoid warnings about "if (true) ...". */ + public static final boolean IS_JE = true; + + static { + READ_ONLY_TXN_CONFIG = new TransactionConfig(); + READ_ONLY_TXN_CONFIG.setDurability(Durability.READ_ONLY_TXN); + READ_ONLY_TXN_CONFIG.setReadCommitted(true); + } + + public static boolean getInitializeCache(EnvironmentConfig config) { + return true; + } + + public static boolean getInitializeLocking(EnvironmentConfig config) { + return config.getLocking(); + } + + public static boolean getInitializeCDB(EnvironmentConfig config) { + return false; + } + + public static boolean isReplicated(Environment env) { + return DbInternal.getNonNullEnvImpl(env).isReplicated(); + } + + public static boolean isTypeBtree(DatabaseConfig dbConfig) { + return true; + } + + public static boolean isTypeHash(DatabaseConfig dbConfig) { + return false; + } + + public static boolean isTypeQueue(DatabaseConfig dbConfig) { + return false; + } + + public static boolean isTypeRecno(DatabaseConfig dbConfig) { + return false; + } + + public static boolean getBtreeRecordNumbers(DatabaseConfig dbConfig) { + return false; + } + + public static boolean getReadUncommitted(DatabaseConfig dbConfig) { + return true; + } + + public static boolean getRenumbering(DatabaseConfig dbConfig) { + return false; + } + + public static boolean getSortedDuplicates(DatabaseConfig dbConfig) { + return dbConfig.getSortedDuplicates(); + } + + public static boolean getUnsortedDuplicates(DatabaseConfig dbConfig) { + return false; + } + + public static boolean getDeferredWrite(DatabaseConfig dbConfig) { + return dbConfig.getDeferredWrite(); + } + + // XXX Remove this when DB and JE support CursorConfig.cloneConfig + public static CursorConfig cloneCursorConfig(CursorConfig config) { + CursorConfig newConfig = new CursorConfig(); + newConfig.setReadCommitted(config.getReadCommitted()); + newConfig.setReadUncommitted(config.getReadUncommitted()); + return newConfig; + } + + public static boolean getWriteCursor(CursorConfig config) { + return false; + } + + public static void setWriteCursor(CursorConfig config, boolean write) { + if (write) { + throw new UnsupportedOperationException(); + } + } + + public static void setRecordNumber(DatabaseEntry entry, int recNum) { + throw new UnsupportedOperationException(); + } + + public static int getRecordNumber(DatabaseEntry entry) { + throw new UnsupportedOperationException(); + } + + public static String getDatabaseFile(Database db) { + return null; + } + + public static long getDatabaseCount(Database db) + throws DatabaseException { + + return db.count(); + } + + public static OperationStatus getCurrentRecordNumber(Cursor cursor, + DatabaseEntry key, + LockMode lockMode) + throws DatabaseException { + + throw new UnsupportedOperationException(); + } + + public static OperationStatus getSearchRecordNumber(Cursor cursor, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + throw new UnsupportedOperationException(); + } + + public static OperationStatus getSearchRecordNumber(SecondaryCursor cursor, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + throw new UnsupportedOperationException(); + } + + public static OperationStatus putAfter(Cursor cursor, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + throw new UnsupportedOperationException(); + } + + public static OperationStatus putBefore(Cursor cursor, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + throw new UnsupportedOperationException(); + } + + public static OperationStatus append(Database db, + Transaction txn, + DatabaseEntry key, + DatabaseEntry data) { + throw new UnsupportedOperationException(); + } + + public static Transaction getThreadTransaction(Environment env) + throws DatabaseException { + + return env.getThreadTransaction(); + } + + public static ClassLoader getClassLoader(Environment env) { + return DbInternal.getNonNullEnvImpl(env).getClassLoader(); + } + + /* Methods used by the collections tests. */ + + public static void setInitializeCache(EnvironmentConfig config, + boolean val) { + if (!val) { + throw new UnsupportedOperationException(); + } + } + + public static void setInitializeLocking(EnvironmentConfig config, + boolean val) { + if (!val) { + throw new UnsupportedOperationException(); + } + } + + public static void setInitializeCDB(EnvironmentConfig config, + boolean val) { + if (val) { + throw new UnsupportedOperationException(); + } + } + + public static void setLockDetectModeOldest(EnvironmentConfig config) { + /* JE does this by default, since it uses timeouts. */ + } + + public static void setSerializableIsolation(TransactionConfig config, + boolean val) { + config.setSerializableIsolation(val); + } + + public static boolean setImportunate(final Transaction txn, + final boolean importunate) { + final boolean oldVal = DbInternal.getTxn(txn).getImportunate(); + DbInternal.getTxn(txn).setImportunate(importunate); + return oldVal; + } + + public static void setBtreeComparator(DatabaseConfig dbConfig, + Comparator comparator) { + dbConfig.setBtreeComparator(comparator); + } + + public static void setTypeBtree(DatabaseConfig dbConfig) { + } + + public static void setTypeHash(DatabaseConfig dbConfig) { + throw new UnsupportedOperationException(); + } + + public static void setTypeRecno(DatabaseConfig dbConfig) { + throw new UnsupportedOperationException(); + } + + public static void setTypeQueue(DatabaseConfig dbConfig) { + throw new UnsupportedOperationException(); + } + + public static void setBtreeRecordNumbers(DatabaseConfig dbConfig, + boolean val) { + throw new UnsupportedOperationException(); + } + + public static void setReadUncommitted(DatabaseConfig dbConfig, + boolean val) { + } + + public static void setRenumbering(DatabaseConfig dbConfig, + boolean val) { + throw new UnsupportedOperationException(); + } + + public static void setSortedDuplicates(DatabaseConfig dbConfig, + boolean val) { + dbConfig.setSortedDuplicates(val); + } + + public static void setUnsortedDuplicates(DatabaseConfig dbConfig, + boolean val) { + if (val) { + throw new UnsupportedOperationException(); + } + } + + public static void setDeferredWrite(DatabaseConfig dbConfig, boolean val) { + dbConfig.setDeferredWrite(val); + } + + public static void setRecordLength(DatabaseConfig dbConfig, int val) { + if (val != 0) { + throw new UnsupportedOperationException(); + } + } + + public static void setRecordPad(DatabaseConfig dbConfig, int val) { + throw new UnsupportedOperationException(); + } + + public static boolean databaseExists(Environment env, + String fileName, + String dbName) { + assert fileName == null; + return env.getDatabaseNames().contains(dbName); + } + + /** + * Returns null if the database is not found (and AllowCreate is false) or + * already exists (and ExclusiveCreate is true). + */ + public static Database openDatabase(Environment env, + Transaction txn, + String fileName, + String dbName, + DatabaseConfig config) { + assert fileName == null; + try { + return env.openDatabase(txn, dbName, config); + } catch (DatabaseNotFoundException e) { + return null; + } catch (DatabaseExistsException e) { + return null; + } + } + + /** + * Returns null if the database is not found (and AllowCreate is false) or + * already exists (and ExclusiveCreate is true). + */ + public static SecondaryDatabase + openSecondaryDatabase(Environment env, + Transaction txn, + String fileName, + String dbName, + Database primaryDatabase, + SecondaryConfig config) { + assert fileName == null; + try { + return env.openSecondaryDatabase(txn, dbName, primaryDatabase, + config); + } catch (DatabaseNotFoundException e) { + return null; + } catch (DatabaseExistsException e) { + return null; + } + } + + /** + * Returns false if the database is not found. + */ + public static boolean truncateDatabase(Environment env, + Transaction txn, + String fileName, + String dbName) { + assert fileName == null; + try { + env.truncateDatabase(txn, dbName, false /*returnCount*/); + return true; + } catch (DatabaseNotFoundException e) { + return false; + } + } + + /** + * Returns false if the database is not found. + */ + public static boolean removeDatabase(Environment env, + Transaction txn, + String fileName, + String dbName) { + assert fileName == null; + try { + env.removeDatabase(txn, dbName); + return true; + } catch (DatabaseNotFoundException e) { + return false; + } + } + + /** + * Returns false if the database is not found. + */ + public static boolean renameDatabase(Environment env, + Transaction txn, + String oldFileName, + String oldDbName, + String newFileName, + String newDbName) { + assert oldFileName == null; + assert newFileName == null; + try { + env.renameDatabase(txn, oldDbName, newDbName); + return true; + } catch (DatabaseNotFoundException e) { + return false; + } + } + + /** + * Fires an assertion if the database is not found (and AllowCreate is + * false) or already exists (and ExclusiveCreate is true). + */ + public static Database testOpenDatabase(Environment env, + Transaction txn, + String file, + String name, + DatabaseConfig config) { + try { + return env.openDatabase(txn, makeTestDbName(file, name), config); + } catch (DatabaseNotFoundException e) { + assert false; + return null; + } catch (DatabaseExistsException e) { + assert false; + return null; + } + } + + /** + * Fires an assertion if the database is not found (and AllowCreate is + * false) or already exists (and ExclusiveCreate is true). + */ + public static SecondaryDatabase + testOpenSecondaryDatabase(Environment env, + Transaction txn, + String file, + String name, + Database primary, + SecondaryConfig config) { + try { + return env.openSecondaryDatabase(txn, makeTestDbName(file, name), + primary, config); + } catch (DatabaseNotFoundException e) { + assert false; + return null; + } catch (DatabaseExistsException e) { + assert false; + return null; + } + } + + private static String makeTestDbName(String file, String name) { + if (file == null) { + return name; + } else { + if (name != null) { + return file + '.' + name; + } else { + return file; + } + } + } + + public static RuntimeException unexpectedException(Exception cause) { + return EnvironmentFailureException.unexpectedException(cause); + } + + public static RuntimeException unexpectedException(String msg, + Exception cause) { + return EnvironmentFailureException.unexpectedException(msg, cause); + } + + public static RuntimeException unexpectedState(String msg) { + return EnvironmentFailureException.unexpectedState(msg); + } + + public static RuntimeException unexpectedState() { + return EnvironmentFailureException.unexpectedState(); + } + + public static void enableDeadlockDetection(EnvironmentConfig envConfig, + boolean isCDB) { + // do nothing in JE, deadlock detection is always on + } + + public static Object getErrorHandler(Environment env) + throws DatabaseException { + + return null; + } + + public static void setErrorHandler(Environment env, Object errHandler) + throws DatabaseException { + } + + public static void suppressError(Environment env, + final Pattern errPattern) + throws DatabaseException{ + } + + /* + * Abstraction for a result, wrapping an OperationResult in JE, or an + * OperationStatus in either product. The jeResult field + * and make(OperationResult) method will not appear in DB core, and must be + * accessed only by "JE only" code. + */ + public static class OpResult { + + public static final OpResult SUCCESS = + new OpResult(DbInternal.DEFAULT_RESULT); + + public static final OpResult FAILURE = new OpResult(null); + + public final OperationResult jeResult; + + private OpResult(OperationResult result) { + jeResult = result; + } + + public boolean isSuccess() { + return jeResult != null; + } + + public OperationStatus status() { + return isSuccess() ? + OperationStatus.SUCCESS : OperationStatus.NOTFOUND; + } + + public static OpResult make(OperationResult result) { + return (result != null) ? (new OpResult(result)) : FAILURE; + } + + public static OpResult make(OperationStatus status) { + return (status == OperationStatus.SUCCESS) ? + SUCCESS : FAILURE; + } + } + + /* + * Abstraction for read options, wrapping a ReadOptions in JE, or a + * LockMode in either product. The jeOptions field and make(ReadOptions) + * method will not appear in DB core, and must be accessed only by + * "JE only" code. + */ + public static class OpReadOptions { + + public static final OpReadOptions EMPTY = + new OpReadOptions(null); + + public final ReadOptions jeOptions; + + private OpReadOptions(ReadOptions options) { + jeOptions = options; + } + + public LockMode getLockMode() { + return (jeOptions != null) ? jeOptions.getLockMode() : null; + } + + public static OpReadOptions make(ReadOptions options) { + return (options != null) ? + new OpReadOptions(options) : EMPTY; + } + + public static OpReadOptions make(LockMode lockMode) { + return (lockMode != null) ? + new OpReadOptions(lockMode.toReadOptions()) : EMPTY; + } + } + + /* + * Abstraction for write options, wrapping a WriteOptions in JE, but always + * empty in DB core. The jeOptions field and make(WriteOptions) method will + * not appear in DB core, and must be accessed only by "JE only" code. + */ + public static class OpWriteOptions { + + public static final OpWriteOptions EMPTY = + new OpWriteOptions(null); + + public final WriteOptions jeOptions; + + private OpWriteOptions(WriteOptions options) { + jeOptions = options; + } + + public static OpWriteOptions make(WriteOptions options) { + return (options != null) ? + new OpWriteOptions(options) : EMPTY; + } + } +} diff --git a/src/com/sleepycat/compat/package-info.java b/src/com/sleepycat/compat/package-info.java new file mode 100644 index 0000000..3722bb3 --- /dev/null +++ b/src/com/sleepycat/compat/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: BDB/JE compatibility layer for bind, collections, DPL packages. + */ +package com.sleepycat.compat; \ No newline at end of file diff --git a/src/com/sleepycat/je/BinaryEqualityComparator.java b/src/com/sleepycat/je/BinaryEqualityComparator.java new file mode 100644 index 0000000..332328e --- /dev/null +++ b/src/com/sleepycat/je/BinaryEqualityComparator.java @@ -0,0 +1,72 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * A tag interface used to mark a BTree or duplicate comparator class as a + * binary equality comparator, that is, a comparator that considers + * two keys (byte arrays) to be equal if and only if they have the same + * length and they are equal byte-per-byte. + *

    + * If both the BTree and duplicate comparators used by a databse are + * binary-equality comparators, then certain internal optimizations can be + * enabled. Specifically, the "BIN-delta blind-puts" optimization described + * below is made possible. + *

    + * We say that a record operation (insertion, update, or deletion) is performed + * blindly in a BIN-delta when the delta does not contain a slot with the + * operation's key and we don't need to access the full BIN to check whether + * such a slot exists there or to extract any information from the full-BIN + * slot, if it exists. Performing a blind operation involves inserting the + * record in the BIN-delta, and in case of deletion, marking the BIN slot as + * deleted. When the delta and the full BIN are merged at a later time, the + * blind operation will be translated to an insertion, update, or delete + * depending on whether the full BIN contained the record or not. + *

    + * Normally, blind puts are not possible: we need to know whether the put + * is actually an update or an insertion, i.e., whether the key exists in + * the full BIN or not. Furthermore, in case of update we also need to + * know the location of the previous record version to make the current + * update abortable. However, it is possible to answer at least the key + * existence question by adding a small amount of extra information in + * the deltas. If we do so, puts that are actual insertions can be done + * blindly. + *

    + * To answer whether a key exists in a full BIN or not, each BIN-delta + * stores a bloom filter, which is a very compact, approximate + * representation of the set of keys in the full BIN. Bloom filters can + * answer set membership questions with no false negatives and very low + * probability of false positives. As a result, put operation that are + * actual insertions can almost always be performed blindly. + *

    + * Because bloom filters work by applying hash functions on keys (where each + * key byte participates in the hash computation), an additional requirement + * for blind puts is that a database uses "binary equality" comparators, that + * is, a comparator that considers two keys to be equal if and only if they + * have the same length and they are equal byte-per-byte. Inheriting from the + * BinaryEqualityComparator interface marks an actual comparator as having the + * "binary equality" property. + *

    + * Comparators are configured using + * {@link DatabaseConfig#setBtreeComparator(java.util.Comparator)} or + * {@link DatabaseConfig#setBtreeComparator(Class)}, and + * {@link DatabaseConfig#setDuplicateComparator(java.util.Comparator)} or + * {@link DatabaseConfig#setDuplicateComparator(Class)}. + *

    + * As described in the javadoc for these methods, comparators must be used + * with great caution, since a badly behaved comparator can cause B-tree + * corruption. + */ +public interface BinaryEqualityComparator { +} diff --git a/src/com/sleepycat/je/BtreeStats.java b/src/com/sleepycat/je/BtreeStats.java new file mode 100644 index 0000000..c8df042 --- /dev/null +++ b/src/com/sleepycat/je/BtreeStats.java @@ -0,0 +1,265 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BINS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_DELETED_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_NAME; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_DESC; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_INS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_IN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_MAINTREE_MAXDEPTH; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_RELATCHES_REQUIRED; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_ROOT_SPLITS; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_ENTRIES_HISTOGRAM; + +import com.sleepycat.je.utilint.StatGroup; + +/** + * The BtreeStats object is used to return Btree database statistics. + */ +public class BtreeStats extends DatabaseStats { + + private static final long serialVersionUID = 298825033L; + + private StatGroup dbImplStats; + private StatGroup treeStats; + + public BtreeStats() { + dbImplStats = new StatGroup(GROUP_NAME, GROUP_DESC); + treeStats = new StatGroup(GROUP_NAME, GROUP_DESC); + } + + /** + * @hidden + * Internal use only. + */ + public void setDbImplStats(StatGroup stats) { + dbImplStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setTreeStats(StatGroup tStats) { + this.treeStats = tStats; + } + + /** + * Returns the number of Bottom Internal Nodes in the database tree. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return number of Bottom Internal Nodes in the database tree. + */ + public long getBottomInternalNodeCount() { + return dbImplStats.getLong(BTREE_BIN_COUNT); + } + + /** + * @deprecated as of 5.0, returns zero. + */ + public long getDuplicateBottomInternalNodeCount() { + return 0; + } + + /** + * Returns the number of deleted data records in the database tree that + * are pending removal by the compressor. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return number of deleted data records in the database tree that are + * pending removal by the compressor. + */ + public long getDeletedLeafNodeCount() { + return dbImplStats.getLong(BTREE_DELETED_LN_COUNT); + } + + /** + * @deprecated as of 5.0, returns zero. + */ + public long getDupCountLeafNodeCount() { + return 0; + } + + /** + * Returns the number of Internal Nodes in the database tree. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return number of Internal Nodes in the database tree. + */ + public long getInternalNodeCount() { + return dbImplStats.getLong(BTREE_IN_COUNT); + } + + /** + * @deprecated as of 5.0, returns zero. + */ + public long getDuplicateInternalNodeCount() { + return 0; + } + + /** + * Returns the number of leaf nodes in the database tree, which can equal + * the number of records. This is calculated without locks or transactions, + * and therefore is only an accurate count of the current number of records + * when the database is quiescent. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return number of leaf nodes in the database tree, which can equal the + * number of records. This is calculated without locks or transactions, and + * therefore is only an accurate count of the current number of records + * when the database is quiescent. + */ + public long getLeafNodeCount() { + return dbImplStats.getLong(BTREE_LN_COUNT); + } + + /** + * Returns the maximum depth of the main database tree. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return maximum depth of the main database tree. + */ + public int getMainTreeMaxDepth() { + return dbImplStats.getInt(BTREE_MAINTREE_MAXDEPTH); + } + + /** + * @deprecated as of 5.0, returns zero. + */ + public int getDuplicateTreeMaxDepth() { + return 0; + } + + /** + * Returns the count of Internal Nodes per level, indexed by level. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return count of Internal Nodes per level, indexed by level. + */ + public long[] getINsByLevel() { + return dbImplStats.getLongArray(BTREE_INS_BYLEVEL); + } + + /** + * Returns the count of Bottom Internal Nodes per level, indexed by level. + * + *

    The information is included only if the {@link + * com.sleepycat.je.Database#getStats Database.getStats} call was not + * configured by the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method.

    + * + * @return count of Bottom Internal Nodes per level, indexed by level. + */ + public long[] getBINsByLevel() { + return dbImplStats.getLongArray(BTREE_BINS_BYLEVEL); + } + + /** + * Returns an array representing a histogram of the number of Bottom + * Internal Nodes with various percentages of non-deleted entry counts. + * The array is 10 elements and each element represents a range of 10%. + * + *
    +     * element [0]: # BINs with 0% to 9% entries used by non-deleted values
    +     * element [1]: # BINs with 10% to 19% entries used by non-deleted values
    +     * element [2]: # BINs with 20% to 29% entries used by non-deleted values
    +     * ...
    +     * element [0]: # BINs with 90% to 100% entries used by non-deleted values
    +     * 
    + * + * @return an array representing a histogram of the number of BINs with + * various percentages of non-deleted entries. + */ + public long[] getBINEntriesHistogram() { + return dbImplStats.getLongArray(BTREE_BIN_ENTRIES_HISTOGRAM); + } + + /** + * @deprecated as of 5.0, returns an empty array. + */ + public long[] getDINsByLevel() { + return new long[0]; + } + + /** + * @deprecated as of 5.0, returns an empty array. + */ + public long[] getDBINsByLevel() { + return new long[0]; + } + + /** + * Returns the number of latch upgrades (relatches) required while + * operating on this database's BTree. Latch upgrades are required when an + * operation assumes that a shared (read) latch will be sufficient but + * later determines that an exclusive (write) latch will actually be + * required. + * + * @return number of latch upgrades (relatches) required. + */ + public long getRelatches() { + return treeStats.getLong(BTREE_RELATCHES_REQUIRED); + } + + /** + * The number of times the root of the BTree was split. + * + * @return number of times the root was split. + */ + public int getRootSplits() { + return treeStats.getInt(BTREE_ROOT_SPLITS); + } + + /** + * For convenience, the BtreeStats class has a toString method that lists + * all the data fields. + */ + @Override + public String toString() { + return dbImplStats.toString() + "\n" + treeStats.toString(); + } + + public String toStringVerbose() { + return + dbImplStats.toStringVerbose() + "\n" + treeStats.toStringVerbose(); + } +} diff --git a/src/com/sleepycat/je/CacheMode.java b/src/com/sleepycat/je/CacheMode.java new file mode 100644 index 0000000..156cd79 --- /dev/null +++ b/src/com/sleepycat/je/CacheMode.java @@ -0,0 +1,357 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je; + +/** + * Modes that can be specified for control over caching of records in the JE + * in-memory cache. When a record is stored or retrieved, the cache mode + * determines how long the record is subsequently retained in the JE in-memory + * cache, relative to other records in the cache. + * + *

    When the cache overflows, JE must evict some records from the cache. By + * default, JE uses a Least Recently Used (LRU) algorithm for determining which + * records to evict. With the LRU algorithm, JE makes a best effort to evict + * the "coldest" (least recently used or accessed) records and to retain the + * "hottest" records in the cache for as long as possible.

    + * + *

    When an {@link EnvironmentMutableConfig#setOffHeapCacheSize off-heap + * cache} is configured, records evicted from the main cache are placed in + * the off-heap cache, and a separate LRU is used to determine when to evict + * a record from the off-heap cache.

    + * + *

    JE uses an approximate LRU approach with some exceptions and special + * cases.

    + *
      + *
    • + * Individual records (LNs or Leaf Nodes) do not appear on the LRU + * list, i.e., their "hotness" is not explicitly tracked. Instead, + * their containing Btree node (BIN or bottom internal node) appears on + * the LRU list. Each BIN contains roughly 100 LNs + * (see {@link com.sleepycat.je.EnvironmentConfig#NODE_MAX_ENTRIES}). + * When an LN is accessed, its BIN is moved to the hot end of the LRU + * list, implying that all other LNs in the same BIN also are treated + * as if they are hot. The same applies if the BIN is moved to the cold + * end of the LRU list. The above statement applies to the off-heap + * cache also, when one is configured. + *
    • + *
    • + * When a BIN contains LNs and the BIN reaches the cold end of the LRU + * list, memory can be reclaimed by evicting the LNs, and eviction of + * the BIN is deferred. The empty BIN is moved to the hot end of the + * LRU list. When an off-heap cache is configured, the eviction of LNs + * in this manner occurs independently in both caches. + *
    • + *
    • + * When a BIN contains no LNs, it may be evicted entirely. When the + * BINs parent node becomes empty, it may also be evicted, and so on. + * The BINs and INs are evicted on the basis of an LRU, but with two + * exceptions: + *

      + * 1) Dirty BINs and INs are evicted only after eviction of all + * non-dirty BINs and INs. This is important to reduce logging and + * associated cleaning costs. When an off-heap cache is configured, + * BINs and INs are evicted from the main cache without regard to + * whether they are dirty. Dirty BINs and INs are evicted last, as + * just described, only from the off-heap cache. + *

      + * 2) A BIN may be mutated to a BIN-delta to reclaim memory, rather + * then being evicted entirely. A BIN-delta contains only the dirty + * entries (for LNs recently logged). A BIN-delta is used when its + * size relative to the full BIN will be small enough so that it will + * be more efficient, both on disk and in memory, to store the delta + * rather than the full BIN. + * (see {@link com.sleepycat.je.EnvironmentConfig#TREE_BIN_DELTA}). + * The advantage of keeping a BIN-delta in cache is that some + * operations, particularly record insertions, can be performed using + * the delta without having the complete BIN in cache. When a BIN is + * mutated to a BIN-delta to reclaim memory, it is placed at the hot + * end of the LRU list. When an off-heap cache is configured, BINs are + * not mutated to BIN-deltas in the main cache, but rather this is done + * only in the off-heap cache. + *

    • + *
    • + * To reduce contention among threads on the LRU list, multiple LRU + * lists may be configured. See + * {@link com.sleepycat.je.EnvironmentConfig#EVICTOR_N_LRU_LISTS}. + * As described in the javadoc for this parameter, there is a trade-off + * between thread contention and the accuracy of the LRU. This + * parameter determines the number of main cache LRU lists as well as + * the number of off-heap cache LRU lists, when an off-heap cache is + * configured. + *
    • + *
    • + * A non-default cache mode may be explicitly specified to override + * the LRU behavior. See the CacheMode enumeration values for details. + * the normal LRU behavior described above. See the CacheMode + * enumeration values for details. The behavior of each CacheMode + * when an off-heap cache is configured is also described. + *
    • + *
    + * + *

    When no cache mode is explicitly specified, the default cache mode is + * {@link #DEFAULT}. The default mode causes the normal LRU algorithm to be + * used.

    + * + *

    An explicit cache mode may be specified as an {@link + * EnvironmentConfig#setCacheMode Environment property}, a {@link + * DatabaseConfig#setCacheMode Database property}, a {@link + * Cursor#setCacheMode Cursor property}, or on a per-operation basis using + * {@link ReadOptions#setCacheMode(CacheMode)} or {@link + * WriteOptions#setCacheMode(CacheMode)}. If none are specified, {@link + * #DEFAULT} is used. If more than one non-null property is specified, the + * Cursor property overrides the Database and Environment properties, and the + * Database property overrides the Environment property.

    + * + *

    When all records in a given Database, or all Databases, should be treated + * the same with respect to caching, using the Database and/or Environment + * cache mode properties is sufficient. For applications that need finer + * grained control, the Cursor cache mode property can be used to provide a + * specific cache mode for individual records or operations. The Cursor cache + * mode property can be changed at any time, and the cache mode specified will + * apply to subsequent operations performed with that Cursor.

    + * + *

    In a Replicated Environment where a non-default cache mode is desired, + * the cache mode can be configured on the Master node as described above. + * However, it is important to configure the cache mode on the Replica nodes + * using an Environment property. That way, the cache mode will apply to + * write operations that are replayed on the Replica for all + * Databases, even if the Databases are not open by the application on the + * Replica. Since all nodes may be Replicas at some point in their life cycle, + * it is recommended to configure the desired cache mode as an Environment + * property on all nodes in a Replicated Environment.

    + * + *

    On a Replica, per-Database control over the cache mode for write + * operations is possible by opening the Database on the Replica and + * configuring the cache mode. Per-Cursor control (meaning per-record or + * per-operation) control of the cache mode is not possible on a Replica for + * write operations. For read operations, both per-Database + * and per-Cursor control is possible on the Replica, as described above.

    + *

    + * The cache related stats in {@link EnvironmentStats} can provide some measure + * of the effectiveness of the cache mode choice. + * + * @see Cache Statistics: + * Sizing + */ +public enum CacheMode { + + /** + * The record's hotness is changed to "most recently used" by the + * operation. + * + *

    This cache mode is used when the application does not need explicit + * control over the cache and a standard LRU approach is sufficient.

    + * + *

    Note that {@code null} may be specified to use the {@code DEFAULT} + * mode.

    + * + *

    Specifically: + *

      + *
    • The BIN containing the record's LN will remain in the main + * cache, and it is moved to the hot end of its LRU list.
    • + * + *
    • When an off-heap cache is configured, the record's LN and BIN + * will be loaded into the main cache only. They will be removed from + * the off-heap cache, if they were present there. However, if other + * LNs belonging to this BIN were present in the off-heap cache, they + * will remain there.
    • + *
        + */ + DEFAULT, + + /** + * @deprecated please use {@link #DEFAULT} instead. As of JE 4.0, this mode + * functions exactly as if {@link #DEFAULT} were specified. + */ + KEEP_HOT, + + /** + * The record's hotness or coldness is unchanged by the operation where + * this cache mode is specified. + * + *

        This cache mode is normally used when the application prefers that + * the operation should not perturb the cache, for example, when scanning + * over all records in a database.

        + * + *

        Specifically: + *

          + *
        • A record's LN and BIN must be loaded into the main cache in + * order to perform the operation. However, they may be removed from + * the main cache after the operation, to avoid a net change to the + * cache, according to the rules below.
        • + * + *
        • If the record's LN was not present in the main cache prior to + * the operation, then the LN will be evicted from the main cache + * after the operation. The LN will not be added to, or removed from, + * the off-heap cache.
        • + * + *
        • When the LN is to be evicted from the main cache (according to + * the above rules) and the operation is not performed via a cursor, + * the LN is evicted when the operation is complete. When a cursor is + * used, the LN is evicted when the cursor is moved to a different + * record or closed.
        • + * + *
        • If the record's BIN was not present in the main cache prior to + * the operation, the action taken depends on whether the BIN is dirty + * and whether an off-heap cache is configured. + *
            + *
          • When the BIN is not dirty, the BIN (and LN) will be evicted + * from the main cache after the operation. The BIN (and LN) will + * not be added to, or removed from, the off-heap cache.
          • + * + *
          • When the BIN is dirty and an off-heap cache is + * not configured, the BIN will not be evicted from the + * main cache and will be moved to the hot end of its main cache + * LRU list. This is done to reduce logging.
          • + * + *
          • When the BIN is dirty and an off-heap cache is + * configured, we evict the BIN from the main cache even when it + * is dirty because the BIN (and LN) will be stored in the off-heap + * cache and the BIN will not be logged. The BIN will be placed at + * the hot end of its off-heap LRU list.
          • + * + *
          • Note that when this operation loaded the BIN and the BIN + * becomes dirty, it is normally because this operation is a write + * operation. However, other concurrent threads can also dirty the + * BIN.
          • + *
          + * + *
        • When the BIN is to be evicted from the main cache (according + * to the above rules) and the operation is not performed via a + * cursor, the BIN is evicted when the operation is complete. When a + * cursor is used, the BIN is evicted only when the cursor moves to a + * different BIN or is closed. Because of the way BINs are evicted, + * when multiple operations are performed using a single cursor and + * not perturbing the cache is desired, it is important to use this + * cache mode for all of the operations.
        • + * + *
        • When the BIN was present in the main cache prior to the + * operation, its position in the LRU list will not be changed. Its + * position in the off-heap LRU list, if it is present in the off-heap + * cache, will also not be changed.
        • + *
        + */ + UNCHANGED, + + /** + * @deprecated please use {@link #UNCHANGED} instead. As of JE 4.0, this + * mode functions exactly as if {@link #UNCHANGED} were specified. + */ + MAKE_COLD, + + /** + * The record's LN is evicted after the operation, and the containing + * BIN is moved to the hot end of the LRU list. + * + *

        This cache mode is normally used when not all LNs will fit into the + * main cache, and the application prefers to read the LN from the log file + * or load it from the off-heap cache when the record is accessed again, + * rather than have it take up space in the main cache and potentially + * cause expensive Java GC. By using this mode, the file system cache or + * off-heap cache can be relied on for holding LNs, which complements the + * use of the JE cache to hold BINs and INs.

        + * + *

        Note that using this mode for all operations will prevent the cache + * from filling, if all internal nodes fit in cache.

        + * + *

        Specifically: + *

          + *
        • The record's LN will be evicted from the main cache after the + * operation. The LN will be added to the off-heap cache, if it is not + * already present and an off-heap cache is configured.
        • + * + *
        • When the operation is not performed via a cursor, the LN is + * evicted when the operation is complete. When a cursor is used, the + * LN is evicted when the cursor is moved to a different record or + * closed.
        • + *
        + * + * @since 3.3.98 + */ + EVICT_LN, + + /** + * The record's BIN (and its LNs) are evicted after the operation. + * + *

        This cache mode is normally used when not all BINs will fit into the + * main cache, and the application prefers to read the LN and BIN from the + * log file or load it from the off-heap cache when the record is accessed + * again, rather than have them take up space in the JE cache and + * potentially cause expensive Java GC.

        + * + *

        Because this mode evicts all LNs in the BIN, even if they are "hot" + * from the perspective of a different accessor, this mode should be used + * with caution. One valid use case is where all accessors use this mode; + * in this case the cache mode might be set on a per-Database or + * per-Environment basis.

        + * + *

        Note that using this mode for all operations will prevent the cache + * from filling, if all upper internal nodes fit in cache.

        + * + *

        Specifically: + *

          + *
        • The record's LN will be evicted from the main cache after the + * operation. The LN will be added to the off-heap cache, if it is not + * already present and an off-heap cache is configured.
        • + * + *
        • When the operation is not performed via a cursor, the LN is + * evicted when the operation is complete. When a cursor is used, the + * LN is evicted when the cursor is moved to a different record or + * closed.
        • + * + *
        • Whether the BIN is evicted depends on whether the BIN is dirty + * and whether an off-heap cache is configured. + *
            + *
          • When the BIN is not dirty, the BIN (and LN) will be evicted + * from the main cache after the operation. The BIN (and LN) will + * be added to the off-heap cache, if they are not already present + * and an off-heap cache is configured. The BIN will be placed at + * the hot end of its off-heap LRU list.
          • + * + *
          • When the BIN is dirty and an off-heap cache is + * not configured, the BIN will not be evicted from the + * main cache and will be moved to the hot end of its main cache + * LRU list. This is done to reduce logging.
          • + * + *
          • When the BIN is dirty and an off-heap cache is + * configured, we evict the BIN from the main cache even when it + * is dirty because the BIN (and LN) will be stored in the off-heap + * cache and the BIN will not be logged. The BIN will be placed at + * the hot end of its off-heap LRU list.
          • + * + *
          • Note that BIN may have been dirtied by this operation, if + * it is a write operation, or by earlier write operations.
          • + *
          + * + *
        • When the BIN is to be evicted from the main cache (according + * to the above rules) and the operation is not performed via a + * cursor, the BIN is evicted when the operation is complete. When a + * cursor is used, the BIN is evicted only when the cursor moves to a + * different BIN or is closed. Because of the way BINs are evicted, + * when multiple operations are performed using a single cursor and + * not perturbing the cache is desired, it is important to use this + * cache mode for all of the operations.
        • + *
        + * + * @since 4.0.97 + */ + EVICT_BIN, + + /** + * @hidden + * For internal use only. + * Placeholder to avoid DPL class evolution errors. Never actually used. + * @since 4.0.97 + */ + DYNAMIC +} diff --git a/src/com/sleepycat/je/CheckpointConfig.java b/src/com/sleepycat/je/CheckpointConfig.java new file mode 100644 index 0000000..be9712e --- /dev/null +++ b/src/com/sleepycat/je/CheckpointConfig.java @@ -0,0 +1,200 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Specifies the attributes of a checkpoint operation invoked from {@link + * com.sleepycat.je.Environment#checkpoint Environment.checkpoint}. + */ +public class CheckpointConfig implements Cloneable { + + /** + * Default configuration used if null is passed to {@link + * com.sleepycat.je.Environment#checkpoint Environment.checkpoint}. + */ + public static final CheckpointConfig DEFAULT = new CheckpointConfig(); + + private boolean force = false; + private int kBytes = 0; + private int minutes = 0; + private boolean minimizeRecoveryTime = false; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public CheckpointConfig() { + } + + /** + * Configures the checkpoint log data threshold, in kilobytes. + * + *

        The default is 0 for this class and the database environment.

        + * + * @param kBytes If the kBytes parameter is non-zero, a checkpoint will + * be performed if more than kBytes of log data have been written since + * the last checkpoint. + * + * @return this + */ + public CheckpointConfig setKBytes(int kBytes) { + setKBytesVoid(kBytes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setKBytesVoid(int kBytes) { + this.kBytes = kBytes; + } + + /** + * Returns the checkpoint log data threshold, in kilobytes. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The checkpoint log data threshold, in kilobytes. + */ + public int getKBytes() { + return kBytes; + } + + /** + * Configures the checkpoint time threshold, in minutes. + * + *

        The default is 0 for this class and the database environment.

        + * + * @param minutes If the minutes parameter is non-zero, a checkpoint is + * performed if more than min minutes have passed since the last + * checkpoint. + * + * @return this + */ + public CheckpointConfig setMinutes(int minutes) { + setMinutesVoid(minutes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMinutesVoid(int minutes) { + this.minutes = minutes; + } + + /** + * Returns the checkpoint time threshold, in minutes. + * + * @return The checkpoint time threshold, in minutes. + */ + public int getMinutes() { + return minutes; + } + + /** + * Configures the checkpoint force option. + * + *

        The default is false for this class and the BDB JE environment.

        + * + * @param force If set to true, force a checkpoint, even if there has + * been no activity since the last checkpoint. + * + * @return this + */ + public CheckpointConfig setForce(boolean force) { + setForceVoid(force); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setForceVoid(boolean force) { + this.force = force; + } + + /** + * Returns the configuration of the checkpoint force option. + * + * @return The configuration of the checkpoint force option. + */ + public boolean getForce() { + return force; + } + + /** + * Configures the minimize recovery time option. + * + *

        The default is false for this class and the BDB JE environment.

        + * + * @param minimizeRecoveryTime If set to true, the checkpoint will itself + * take longer but will cause a subsequent recovery (Environment.open) to + * finish more quickly. + * + * @return this + */ + public CheckpointConfig + setMinimizeRecoveryTime(boolean minimizeRecoveryTime) { + setMinimizeRecoveryTimeVoid(minimizeRecoveryTime); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMinimizeRecoveryTimeVoid(boolean minimizeRecoveryTime) { + this.minimizeRecoveryTime = minimizeRecoveryTime; + } + + /** + * Returns the configuration of the minimize recovery time option. + * + * @return The configuration of the minimize recovery time option. + */ + public boolean getMinimizeRecoveryTime() { + return minimizeRecoveryTime; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public CheckpointConfig clone() { + try { + return (CheckpointConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "minutes=" + minutes + + "\nkBytes=" + kBytes + + "\nforce=" + force + + "\nminimizeRecoveryTime=" + minimizeRecoveryTime + + "\n"; + } +} diff --git a/src/com/sleepycat/je/CheckpointConfigBeanInfo.java b/src/com/sleepycat/je/CheckpointConfigBeanInfo.java new file mode 100644 index 0000000..58df615 --- /dev/null +++ b/src/com/sleepycat/je/CheckpointConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class CheckpointConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(CheckpointConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(CheckpointConfig.class); + } +} diff --git a/src/com/sleepycat/je/CommitToken.java b/src/com/sleepycat/je/CommitToken.java new file mode 100644 index 0000000..e74e7b0 --- /dev/null +++ b/src/com/sleepycat/je/CommitToken.java @@ -0,0 +1,131 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; +import java.util.UUID; + +import com.sleepycat.je.utilint.VLSN; + +/** + * Defines an opaque token that can be used to identify a specific transaction + * commit in a replicated environment. It's unique relative to its environment. + *

        + * Since CommitTokens identify a point in the serialized transaction schedule + * created on the master, it's meaningful to compare commit tokens, + * as described in the {@link #compareTo(CommitToken)} method below. + * CommitTokens are obtained from {@link Transaction#getCommitToken()} + * + * @see com.sleepycat.je.rep.CommitPointConsistencyPolicy + */ +public class CommitToken implements Serializable, Comparable { + + private static final long serialVersionUID = 1L; + private final UUID repenvUUID; + private final long vlsn; + + /** + * @hidden + * For internal use only. + * Creates a CommitToken suitable for use in a consistency policy. + * + * @param repenvUUID identifies the replicated environment associated with + * the vlsn + * @param vlsn the vlsn representing the state of the database. + */ + public CommitToken(UUID repenvUUID, long vlsn) { + if (repenvUUID == null) { + throw EnvironmentFailureException.unexpectedState + ("The UUID must not be null"); + } + + if (vlsn == VLSN.NULL_VLSN_SEQUENCE) { + throw EnvironmentFailureException.unexpectedState + ("The vlsn must not be null"); + } + + this.repenvUUID = repenvUUID; + this.vlsn = vlsn; + } + + public UUID getRepenvUUID() { + return repenvUUID; + } + + public long getVLSN() { + return vlsn; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((repenvUUID == null) ? 0 : repenvUUID.hashCode()); + result = prime * result + (int) (vlsn ^ (vlsn >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof CommitToken)) { + return false; + } + CommitToken other = (CommitToken) obj; + if (repenvUUID == null) { + if (other.repenvUUID != null) { + return false; + } + } else if (!repenvUUID.equals(other.repenvUUID)) { + return false; + } + if (vlsn != other.vlsn) { + return false; + } + return true; + } + + /** + * Implements the Comparable interface. Note that it's not meaningful to + * compare commit tokens across environments, since they represent + * states in unrelated serialized transaction streams. + *

        + * CommitToken(1) < CommitToken(2) implies that CommitToken(1) represents + * a state of the database that preceded the state defined by + * CommitToken(2). + * @throws IllegalArgumentException if two tokens from different + * environments are compared. + */ + public int compareTo(CommitToken other) { + if (! repenvUUID.equals(other.repenvUUID)) { + throw new IllegalArgumentException + ("Comparisons across environments are not meaningful. " + + "This environment: " + repenvUUID + + " other environment: " + other.getRepenvUUID()); + } + final long compare = vlsn - other.vlsn; + return (compare < 0) ? -1 : ((compare == 0) ? 0 : 1); + } + + @Override + public String toString() { + return "UUID: " + repenvUUID + " VLSN: " + vlsn; + } +} diff --git a/src/com/sleepycat/je/Cursor.java b/src/com/sleepycat/je/Cursor.java new file mode 100644 index 0000000..7d99cb8 --- /dev/null +++ b/src/com/sleepycat/je/Cursor.java @@ -0,0 +1,5542 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Collection; +import java.util.Comparator; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.CursorImpl.LockStanding; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DupKeyData; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.ExpirationInfo; +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.PutMode; +import com.sleepycat.je.dbi.RangeConstraint; +import com.sleepycat.je.dbi.RangeRestartException; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.CountEstimator; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.txn.BuddyLocker; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * A database cursor. Cursors are used for operating on collections of records, + * for iterating over a database, and for saving handles to individual records, + * so that they can be modified after they have been read. + * + *

        Cursors which are opened with a transaction instance are transactional + * cursors and may be used by multiple threads, but only serially. That is, + * the application must serialize access to the handle. Non-transactional + * cursors, opened with a null transaction instance, may not be used by + * multiple threads.

        + * + *

        If the cursor is to be used to perform operations on behalf of a + * transaction, the cursor must be opened and closed within the context of that + * single transaction.

        + * + *

        Once the cursor {@link #close} method has been called, the handle may not + * be accessed again, regardless of the {@code close} method's success or + * failure, with one exception: the {@code close} method itself may be called + * any number of times to simplify error handling.

        + * + *

        To obtain a cursor with default attributes:

        + * + *
        + *     Cursor cursor = myDatabase.openCursor(txn, null);
        + * 
        + * + *

        To customize the attributes of a cursor, use a CursorConfig object.

        + * + *
        + *     CursorConfig config = new CursorConfig();
        + *     config.setReadUncommitted(true);
        + *     Cursor cursor = myDatabase.openCursor(txn, config);
        + * 
        + * + *

        Modifications to the database during a sequential scan will be reflected + * in the scan; that is, records inserted behind a cursor will not be returned + * while records inserted in front of a cursor will be returned.

        + * + *

        By default, a cursor is "sticky", meaning that the prior position is + * maintained by cursor movement operations, and the cursor stays at the + * prior position when the operation does not succeed. However, it is possible + * to configure a cursor as non-sticky to enable certain performance benefits. + * See {@link CursorConfig#setNonSticky} for details.

        + * + *

        Using Null and Partial DatabaseEntry + * Parameters

        + * + *

        Null can be passed for DatabaseEntry output parameters if the value is + * not needed. The {@link DatabaseEntry#setPartial DatabaseEntry Partial} + * property can also be used to optimize in certain cases. These provide + * varying degrees of performance benefits that depend on the specific + * operation, as described below.

        + * + *

        When retrieving a record with a {@link Database} or {@link Cursor} + * method, if only the key is needed by the application then the retrieval of + * the data item can be suppressed by passing null. If null is passed as + * the data parameter, the data item will not be returned by the {@code + * Database} or {@code Cursor} method.

        + * + *

        Suppressing the return of the data item potentially has a large + * performance benefit. In this case, if the record data is not already in the + * JE cache, it will not be read from disk. The performance benefit is + * potentially large because random access disk reads may be reduced. Examples + * use cases are:

        + *
          + *
        • Scanning all records in key order, when the data is not needed.
        • + *
        • Skipping over records quickly with {@code READ_UNCOMMITTED} isolation to + * select records for further processing by examining the key value.
        • + *
        + * + *

        Note that by "record data" we mean both the {@code data} parameter for a + * regular or primary DB, and the {@code pKey} parameter for a secondary DB. + * However, the performance advantage of a key-only operation does not apply to + * databases configured for duplicates. For a duplicates DB, the data is always + * available along with the key and does not have to be fetched separately.

        + * + *

        The Partial property may also be used to retrieve or update only a + * portion of a data item. This avoids copying the entire record between the + * JE cache and the application data parameter. However, this feature has less + * of a performance benefit than one might assume, since the entire record is + * always read or written to the database, and the entire record is cached. A + * partial update may be performed only with + * {@link Cursor#putCurrent Cursor.putCurrent}.

        + * + *

        A null or partial DatabaseEntry output parameter may also be used in + * other cases, for example, to retrieve a partial key item. However, in + * practice this has limited value since the entire key is usually needed by + * the application, and the benefit of copying a portion of the key is + * generally very small.

        + * + *

        Historical note: Prior to JE 7.0, null could not be passed for output + * parameters. Instead, {@code DatabaseEntry.setPartial(0, 0, true)} was called + * for a data parameter to avoid reading the record's data. Now, null can be + * passed instead.

        + */ +public class Cursor implements ForwardCursor { + + static final ReadOptions DEFAULT_READ_OPTIONS = new ReadOptions(); + static final WriteOptions DEFAULT_WRITE_OPTIONS = new WriteOptions(); + + private static final DatabaseEntry EMPTY_DUP_DATA = + new DatabaseEntry(new byte[0]); + + static final DatabaseEntry NO_RETURN_DATA = new DatabaseEntry(); + + static { + NO_RETURN_DATA.setPartial(0, 0, true); + } + + /** + * The CursorConfig used to configure this cursor. + */ + CursorConfig config; + + /* User Transacational, or null if none. */ + private Transaction transaction; + + /** + * Handle under which this cursor was created; may be null when the cursor + * is used internally. + */ + private Database dbHandle; + + /** + * Database implementation. + */ + private DatabaseImpl dbImpl; + + /** + * The underlying cursor. + */ + CursorImpl cursorImpl; // Used by subclasses. + + private boolean updateOperationsProhibited; + + /* Attributes */ + private boolean readUncommittedDefault; + private boolean serializableIsolationDefault; + + private boolean nonSticky = false; + + private CacheMode defaultCacheMode; + + /* + * For range searches, it establishes the upper bound (K2) of the search + * range via a function that returns false if a key is >= K2. + */ + private RangeConstraint rangeConstraint; + + private Logger logger; + + /** + * Creates a cursor for a given user transaction with + * retainNonTxnLocks=false. + * + *

        If txn is null, a non-transactional cursor will be created that + * releases locks for the prior operation when the next operation + * succeeds.

        + */ + Cursor(final Database dbHandle, + final Transaction txn, + CursorConfig cursorConfig) { + + if (cursorConfig == null) { + cursorConfig = CursorConfig.DEFAULT; + } + + /* Check that Database is open for internal Cursor usage. */ + final DatabaseImpl dbImpl = dbHandle.checkOpen(); + + /* Do not allow auto-commit when creating a user cursor. */ + Locker locker = LockerFactory.getReadableLocker( + dbHandle, txn, cursorConfig.getReadCommitted()); + + init(dbHandle, dbImpl, locker, cursorConfig, + false /*retainNonTxnLocks*/); + } + + /** + * Creates a cursor for a given locker with retainNonTxnLocks=false. + * + *

        If locker is null or is non-transactional, a non-transactional cursor + * will be created that releases locks for the prior operation when the + * next operation succeeds.

        + */ + Cursor(final Database dbHandle, Locker locker, CursorConfig cursorConfig) { + + if (cursorConfig == null) { + cursorConfig = CursorConfig.DEFAULT; + } + + /* Check that Database is open for internal Cursor usage. */ + final DatabaseImpl dbImpl = dbHandle.checkOpen(); + + locker = LockerFactory.getReadableLocker( + dbHandle, locker, cursorConfig.getReadCommitted()); + + init(dbHandle, dbImpl, locker, cursorConfig, + false /*retainNonTxnLocks*/); + } + + /** + * Creates a cursor for a given locker and retainNonTxnLocks parameter. + * + *

        The locker parameter must be non-null. With this constructor, we use + * the given locker and retainNonTxnLocks parameter without applying any + * special rules for different lockers -- the caller must supply the + * correct locker and retainNonTxnLocks combination.

        + */ + Cursor(final Database dbHandle, + final Locker locker, + CursorConfig cursorConfig, + final boolean retainNonTxnLocks) { + + if (cursorConfig == null) { + cursorConfig = CursorConfig.DEFAULT; + } + + /* Check that Database is open for internal Cursor usage. */ + final DatabaseImpl dbImpl = dbHandle.checkOpen(); + + init(dbHandle, dbImpl, locker, cursorConfig, retainNonTxnLocks); + } + + /** + * Creates a cursor for a given locker and retainNonTxnLocks parameter, + * without a Database handle. + * + *

        The locker parameter must be non-null. With this constructor, we use + * the given locker and retainNonTxnLocks parameter without applying any + * special rules for different lockers -- the caller must supply the + * correct locker and retainNonTxnLocks combination.

        + */ + Cursor(final DatabaseImpl databaseImpl, + final Locker locker, + CursorConfig cursorConfig, + final boolean retainNonTxnLocks) { + + if (cursorConfig == null) { + cursorConfig = CursorConfig.DEFAULT; + } + + /* Check that Database is open for internal Cursor usage. */ + if (dbHandle != null) { + dbHandle.checkOpen(); + } + + init(null /*dbHandle*/, databaseImpl, locker, cursorConfig, + retainNonTxnLocks); + } + + private void init(final Database dbHandle, + final DatabaseImpl databaseImpl, + final Locker locker, + final CursorConfig cursorConfig, + final boolean retainNonTxnLocks) { + assert locker != null; + + /* + * Allow locker to perform "open cursor" actions, such as consistency + * checks for a non-transactional locker on a Replica. + */ + try { + locker.openCursorHook(databaseImpl); + } catch (RuntimeException e) { + locker.operationEnd(); + throw e; + } + + cursorImpl = new CursorImpl( + databaseImpl, locker, retainNonTxnLocks, isSecondaryCursor()); + + transaction = locker.getTransaction(); + + /* Perform eviction for user cursors. */ + cursorImpl.setAllowEviction(true); + + readUncommittedDefault = + cursorConfig.getReadUncommitted() || + locker.isReadUncommittedDefault(); + + serializableIsolationDefault = + cursorImpl.getLocker().isSerializableIsolation(); + + /* Be sure to keep this logic in sync with checkUpdatesAllowed. */ + updateOperationsProhibited = + locker.isReadOnly() || + (dbHandle != null && !dbHandle.isWritable()) || + (databaseImpl.isTransactional() && !locker.isTransactional()) || + (databaseImpl.isReplicated() == locker.isLocalWrite()); + + this.dbImpl = databaseImpl; + if (dbHandle != null) { + this.dbHandle = dbHandle; + dbHandle.addCursor(this); + } + + this.config = cursorConfig; + this.logger = databaseImpl.getEnv().getLogger(); + + nonSticky = cursorConfig.getNonSticky(); + + setCacheMode(null); + } + + /** + * Copy constructor. + */ + Cursor(final Cursor cursor, final boolean samePosition) { + + readUncommittedDefault = cursor.readUncommittedDefault; + serializableIsolationDefault = cursor.serializableIsolationDefault; + updateOperationsProhibited = cursor.updateOperationsProhibited; + + cursorImpl = cursor.cursorImpl.cloneCursor(samePosition); + dbImpl = cursor.dbImpl; + dbHandle = cursor.dbHandle; + if (dbHandle != null) { + dbHandle.addCursor(this); + } + config = cursor.config; + logger = dbImpl.getEnv().getLogger(); + defaultCacheMode = cursor.defaultCacheMode; + nonSticky = cursor.nonSticky; + } + + boolean isSecondaryCursor() { + return false; + } + + /** + * Sets non-sticky mode. + * + * @see CursorConfig#setNonSticky + */ + void setNonSticky(final boolean nonSticky) { + this.nonSticky = nonSticky; + } + + /** + * Internal entrypoint. + */ + CursorImpl getCursorImpl() { + return cursorImpl; + } + + /** + * Returns the Database handle associated with this Cursor. + * + * @return The Database handle associated with this Cursor. + */ + public Database getDatabase() { + return dbHandle; + } + + /** + * Always returns non-null, while getDatabase() returns null if no handle + * is associated with this cursor. + */ + DatabaseImpl getDatabaseImpl() { + return dbImpl; + } + + /** + * Returns this cursor's configuration. + * + *

        This may differ from the configuration used to open this object if + * the cursor existed previously.

        + * + * @return This cursor's configuration. + */ + public CursorConfig getConfig() { + try { + return config.clone(); + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Returns the default {@code CacheMode} used for subsequent operations + * performed using this cursor. If {@link #setCacheMode} has not been + * called with a non-null value, the configured Database or Environment + * default is returned. + * + * @return the {@code CacheMode} default used for subsequent operations + * using this cursor. + */ + public CacheMode getCacheMode() { + return defaultCacheMode; + } + + /** + * Sets the {@code CacheMode} default used for subsequent operations + * performed using this cursor. This method may be used to override the + * defaults specified using {@link DatabaseConfig#setCacheMode} and {@link + * EnvironmentConfig#setCacheMode}. Note that the default is always + * overridden by a non-null cache mode that is specified via + * {@link ReadOptions} or {@link WriteOptions}. + * + * @param cacheMode is the default {@code CacheMode} used for subsequent + * operations using this cursor, or null to configure the Database or + * Environment default. + * + * @see CacheMode for further details. + */ + public void setCacheMode(final CacheMode cacheMode) { + + this.defaultCacheMode = + (cacheMode != null) ? cacheMode : dbImpl.getDefaultCacheMode(); + } + + /** + * @hidden + * For internal use only. + * Used by KVStore. + * + * A RangeConstraint is used by search-range and next/previous methods to + * prevent keys that are not inside the range from being returned. + * + * This method is not yet part of the public API because it has not been + * designed with future-proofing or generality in mind, and has not been + * reviewed. + */ + public void setRangeConstraint(RangeConstraint rangeConstraint) { + if (dbImpl.getSortedDuplicates()) { + throw new UnsupportedOperationException("Not allowed with dups"); + } + this.rangeConstraint = rangeConstraint; + } + + private void setPrefixConstraint(final Cursor c, final byte[] keyBytes2) { + c.rangeConstraint = new RangeConstraint() { + public boolean inBounds(byte[] checkKey) { + return DupKeyData.compareMainKey( + checkKey, keyBytes2, dbImpl.getBtreeComparator()) == 0; + } + }; + } + + private void setPrefixConstraint(final Cursor c, + final DatabaseEntry key2) { + c.rangeConstraint = new RangeConstraint() { + public boolean inBounds(byte[] checkKey) { + return DupKeyData.compareMainKey( + checkKey, key2.getData(), key2.getOffset(), + key2.getSize(), dbImpl.getBtreeComparator()) == 0; + } + }; + } + + private boolean checkRangeConstraint(final DatabaseEntry key) { + assert key.getOffset() == 0; + assert key.getData().length == key.getSize(); + + if (rangeConstraint == null) { + return true; + } + + return rangeConstraint.inBounds(key.getData()); + } + + /** + * Discards the cursor. + * + *

        The cursor handle may not be used again after this method has been + * called, regardless of the method's success or failure, with one + * exception: the {@code close} method itself may be called any number of + * times.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void close() { + try { + if (cursorImpl.isClosed()) { + return; + } + + /* + * Do not call checkState here, to allow closing a cursor after an + * operation failure. [#17015] + */ + checkEnv(); + cursorImpl.close(); + if (dbHandle != null) { + dbHandle.removeCursor(this); + dbHandle = null; + } + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Returns a new cursor with the same transaction and locker ID as the + * original cursor. + * + *

        This is useful when an application is using locking and requires + * two or more cursors in the same thread of control.

        + * + * @param samePosition If true, the newly created cursor is initialized + * to refer to the same position in the database as the original cursor + * (if any) and hold the same locks (if any). If false, or the original + * cursor does not hold a database position and locks, the returned + * cursor is uninitialized and will behave like a newly created cursor. + * + * @return A new cursor with the same transaction and locker ID as the + * original cursor. + * + * @throws com.sleepycat.je.rep.DatabasePreemptedException in a replicated + * environment if the master has truncated, removed or renamed the + * database. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed. + */ + public Cursor dup(final boolean samePosition) { + try { + checkOpenAndState(false); + return new Cursor(this, samePosition); + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Deletes the record to which the cursor refers. When the database has + * associated secondary databases, this method also deletes the associated + * index records. + * + *

        The cursor position is unchanged after a delete, and subsequent calls + * to cursor functions expecting the cursor to refer to an existing record + * will fail.

        + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if the record is deleted, else null if the + * record at the cursor position has already been deleted. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @since 7.0 + */ + public OperationResult delete(final WriteOptions options) { + + checkOpenAndState(true); + + trace(Level.FINEST, "Cursor.delete: ", null); + + final CacheMode cacheMode = + options != null ? options.getCacheMode() : null; + + return deleteInternal(dbImpl.getRepContext(), cacheMode); + } + + /** + * Deletes the record to which the cursor refers. When the database has + * associated secondary databases, this method also deletes the associated + * index records. + * + *

        The cursor position is unchanged after a delete, and subsequent calls + * to cursor functions expecting the cursor to refer to an existing record + * will fail.

        + * + *

        Calling this method is equivalent to calling {@link + * #delete(WriteOptions)}.

        + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if the record at the cursor position has + * already been deleted; otherwise, {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + */ + public OperationStatus delete() { + final OperationResult result = delete(null); + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * Inserts or updates a record according to the specified {@link Put} + * type. + * + *

        If the operation succeeds, the record will be locked according to the + * {@link ReadOptions#getLockMode() lock mode} specified, the cursor will + * be positioned on the record, and a non-null OperationResult will be + * returned. If the operation fails because the record already exists (or + * does not exist, depending on the putType), null is returned.

        + * + *

        When the database has associated secondary databases, this method + * also inserts or deletes associated index records as necessary.

        + * + *

        The following table lists each allowed operation. See the individual + * {@link Put} operations for more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Put operationDescriptionReturns null when?Other special rules
        {@link Put#OVERWRITE}Inserts or updates a record depending on whether a matching + * record is already present.Never returns null.Without duplicates, a matching record is one with the same key; + * with duplicates, it is one with the same key and data.
        {@link Put#NO_OVERWRITE}Inserts a record if a record with a matching key is not already + * present.When an existing record matches.If the database has duplicate keys, a record is inserted only if + * there are no records with a matching key.
        {@link Put#NO_DUP_DATA}Inserts a record in a database with duplicate keys if a record + * with a matching key and data is not already present.When an existing record matches.Without duplicates, this operation is not allowed.
        {@link Put#CURRENT}Updates the data of the record at the cursor position.When the record at the cursor position has been deleted.With duplicates, the data must be considered equal by the + * duplicate comparator, meaning that changing the data is only + * possible if a custom duplicate comparator is configured. + *

        + * Cannot be used to update the key of an existing record and in + * fact the key parameter must be null. + *

        + * A partial data item may be + * specified to optimize for partial data update. + *

        + * + * @param key the key used as + * input. Must be null when + * putType is {@code Put.CURRENT}. + * + * @param data the data used as + * input. May be partial only when + * putType is {@code Put.CURRENT}. + * + * @param putType the Put operation type. May not be null. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if the record is written, else null. + * + * @throws DuplicateDataException if putType is Put.CURRENT and the old and + * new data are not equal according to the configured duplicate comparator + * or default comparator. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only, or putType is Put.NO_DUP_DATA and the + * database is not configured for duplicates. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null putType, a null input key/data parameter, + * an input key/data parameter with a null data array, a partial key/data + * input parameter. + * + * @since 7.0 + */ + public OperationResult put( + final DatabaseEntry key, + final DatabaseEntry data, + final Put putType, + final WriteOptions options) { + + try { + checkOpen(); + + trace( + Level.FINEST, "Cursor.put: ", String.valueOf(putType), + key, data, null); + + return putInternal(key, data, putType, options); + + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Performs the put() operation except for state checking and tracing. + * + * Allows passing a throughput stat index so it can be called for Database + * and SecondaryCursor operations. + */ + OperationResult putInternal( + final DatabaseEntry key, + final DatabaseEntry data, + final Put putType, + WriteOptions options) { + + DatabaseUtil.checkForNullParam(putType, "putType"); + + if (putType == Put.CURRENT) { + if (key != null) { + throw new IllegalArgumentException( + "The key must be null for Put.Current"); + } + } else { + DatabaseUtil.checkForNullDbt(key, "key", true); + } + + if (key != null) { + DatabaseUtil.checkForPartial(key, "key"); + } + + DatabaseUtil.checkForNullDbt(data, "data", true); + + checkState(putType == Put.CURRENT /*mustBeInitialized*/); + + if (options == null) { + options = DEFAULT_WRITE_OPTIONS; + } + + return putInternal( + key, data, options.getCacheMode(), + ExpirationInfo.getInfo(options), + putType.getPutMode()); + } + + /** + * Stores a key/data pair into the database. + * + *

        Calling this method is equivalent to calling {@link + * #put(DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#OVERWRITE}.

        + * + *

        If the put method succeeds, the cursor is positioned to refer to the + * newly inserted item.

        + * + *

        If the key already appears in the database and duplicates are + * supported, the new data value is inserted at the correct sorted + * location, unless the new data value also appears in the database + * already. In the later case, although the given key/data pair compares + * equal to an existing key/data pair, the two records may not be identical + * if custom comparators are used, in which case the existing record will + * be replaced with the new record. If the key already appears in the + * database and duplicates are not supported, the data associated with + * the key will be replaced.

        + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus put( + final DatabaseEntry key, + final DatabaseEntry data) { + + final OperationResult result = put(key, data, Put.OVERWRITE, null); + + EnvironmentFailureException.assertState(result != null); + return OperationStatus.SUCCESS; + } + + /** + * Stores a key/data pair into the database. + * + *

        Calling this method is equivalent to calling {@link + * #put(DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#NO_OVERWRITE}.

        + * + *

        If the putNoOverwrite method succeeds, the cursor is positioned to + * refer to the newly inserted item.

        + * + *

        If the key already appears in the database, putNoOverwrite will + * return {@link com.sleepycat.je.OperationStatus#KEYEXIST + * OperationStatus.KEYEXIST}.

        + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEXIST + * OperationStatus.KEYEXIST} if the key already appears in the database, + * else {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS} + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus putNoOverwrite( + final DatabaseEntry key, + final DatabaseEntry data) { + + final OperationResult result = put( + key, data, Put.NO_OVERWRITE, null); + + return result == null ? + OperationStatus.KEYEXIST : OperationStatus.SUCCESS; + } + + /** + * Stores a key/data pair into the database. The database must be + * configured for duplicates. + * + *

        Calling this method is equivalent to calling {@link + * #put(DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#NO_DUP_DATA}.

        + * + *

        If the putNoDupData method succeeds, the cursor is positioned to + * refer to the newly inserted item.

        + * + *

        Insert the specified key/data pair into the database, unless a + * key/data pair comparing equally to it already exists in the database. + * If a matching key/data pair already exists in the database, {@link + * com.sleepycat.je.OperationStatus#KEYEXIST OperationStatus.KEYEXIST} is + * returned.

        + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEXIST + * OperationStatus.KEYEXIST} if the key/data pair already appears in the + * database, else {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS} + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, or + * the database is read-only, or the database is not configured for + * duplicates. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus putNoDupData( + final DatabaseEntry key, + final DatabaseEntry data) { + + final OperationResult result = put( + key, data, Put.NO_DUP_DATA, null); + + return result == null ? + OperationStatus.KEYEXIST : OperationStatus.SUCCESS; + } + + /** + * Replaces the data in the key/data pair at the current cursor position. + * + *

        Calling this method is equivalent to calling {@link + * #put(DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#CURRENT}.

        + * + *

        Overwrite the data of the key/data pair to which the cursor refers + * with the specified data item. This method will return + * OperationStatus.NOTFOUND if the cursor currently refers to an + * already-deleted key/data pair.

        + * + *

        For a database that does not support duplicates, the data may be + * changed by this method. If duplicates are supported, the data may be + * changed only if a custom partial comparator is configured and the + * comparator considers the old and new data to be equal (that is, the + * comparator returns zero). For more information on partial comparators + * see {@link DatabaseConfig#setDuplicateComparator}.

        + * + *

        If the old and new data are unequal according to the comparator, a + * {@link DuplicateDataException} is thrown. Changing the data in this + * case would change the sort order of the record, which would change the + * cursor position, and this is not allowed. To change the sort order of a + * record, delete it and then re-insert it.

        + * + * @param data the data used as + * input. + * A partial data item may be + * specified to optimize for partial data update. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has + * been deleted; otherwise, {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. + * + * @throws DuplicateDataException if the old and new data are not equal + * according to the configured duplicate comparator or default comparator. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is transactional + * but this cursor was not opened with a non-null transaction parameter, + * or the database is read-only. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus putCurrent(final DatabaseEntry data) { + + final OperationResult result = put(null, data, Put.CURRENT, null); + + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to a record according to the specified {@link Get} + * type. + * + *

        If the operation succeeds, the record at the resulting cursor + * position will be locked according to the {@link + * ReadOptions#getLockMode() lock mode} specified, the key and/or data will + * be returned via the (non-null) DatabaseEntry parameters, and a non-null + * OperationResult will be returned. If the operation fails because the + * record requested is not found, null is returned.

        + * + *

        The following table lists each allowed operation and whether the key + * and data parameters are input or + * output parameters. Also specified is whether the cursor must be + * initialized (positioned on a record) before calling this method. See the + * individual {@link Get} operations for more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Get operationDescription'key' parameter'data' parameterCursor position
        must be initialized?
        {@link Get#SEARCH}Searches using an exact match by key.inputoutputno
        {@link Get#SEARCH_BOTH}Searches using an exact match by key and data.inputinputno
        {@link Get#SEARCH_GTE}Searches using a GTE match by key.input/outputoutputno
        {@link Get#SEARCH_BOTH_GTE}Searches using an exact match by key and a GTE match by data.inputinput/outputno
        {@link Get#CURRENT}Accesses the current recordoutputoutputyes
        {@link Get#FIRST}Finds the first record in the database.outputoutputno
        {@link Get#LAST}Finds the last record in the database.outputoutputno
        {@link Get#NEXT}Moves to the next record.outputoutputno**
        {@link Get#NEXT_DUP}Moves to the next record with the same key.outputoutputyes
        {@link Get#NEXT_NO_DUP}Moves to the next record with a different key.outputoutputno**
        {@link Get#PREV}Moves to the previous record.outputoutputno**
        {@link Get#PREV_DUP}Moves to the previous record with the same key.outputoutputyes
        {@link Get#PREV_NO_DUP}Moves to the previous record with a different key.outputoutputno**
        + * + *

        ** - For these 'next' and 'previous' operations the cursor may be + * uninitialized, in which case the cursor will be moved to the first or + * last record, respectively.

        + * + * @param key the key input or output parameter, depending on getType. + * + * @param data the data input or output parameter, depending on getType. + * + * @param getType the Get operation type. May not be null. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the OperationResult if the record requested is found, else null. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * the cursor is uninitialized (not positioned on a record) and this is not + * permitted (see above), or the non-transactional cursor was created in a + * different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null getType, a null input key/data parameter, + * an input key/data parameter with a null data array, a partial key/data + * input parameter, and specifying a {@link ReadOptions#getLockMode() + * lock mode} of READ_COMMITTED. + * + * @since 7.0 + */ + public OperationResult get( + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + ReadOptions options) { + + try { + checkOpen(); + + if (options == null) { + options = DEFAULT_READ_OPTIONS; + } + + final LockMode lockMode = options.getLockMode(); + + trace( + Level.FINEST, "Cursor.get: ", String.valueOf(getType), + key, data, lockMode); + + return getInternal(key, data, getType, options, lockMode); + + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Performs the get() operation except for state checking and tracing. + * + * The LockMode is passed because for Database operations it is sometimes + * different than ReadOptions.getLockMode. + * + * Allows passing a throughput stat index so it can be called for Database + * and SecondaryCursor operations. + */ + OperationResult getInternal( + DatabaseEntry key, + DatabaseEntry data, + Get getType, + final ReadOptions options, + final LockMode lockMode) { + + DatabaseUtil.checkForNullParam(getType, "getType"); + + final CacheMode cacheMode = options.getCacheMode(); + final SearchMode searchMode = getType.getSearchMode(); + + if (searchMode != null) { + checkState(false /*mustBeInitialized*/); + + DatabaseUtil.checkForNullDbt(key, "key", true); + DatabaseUtil.checkForPartial(key, "key"); + + if (searchMode.isDataSearch()) { + DatabaseUtil.checkForNullDbt(data, "data", true); + DatabaseUtil.checkForPartial(data, "data"); + } else { + if (data == null) { + data = NO_RETURN_DATA; + } + } + + return search(key, data, lockMode, cacheMode, searchMode, true); + } + + if (key == null) { + key = NO_RETURN_DATA; + } + if (data == null) { + data = NO_RETURN_DATA; + } + + GetMode getMode = getType.getGetMode(); + + if (getType.getAllowNextPrevUninitialized() && + cursorImpl.isNotInitialized()) { + + assert getMode != null; + getType = getMode.isForward() ? Get.FIRST : Get.LAST; + getMode = null; + } + + if (getMode != null) { + checkState(true /*mustBeInitialized*/); + + return retrieveNext(key, data, lockMode, cacheMode, getMode); + } + + if (getType == Get.CURRENT) { + checkState(true /*mustBeInitialized*/); + + return getCurrentInternal(key, data, lockMode, cacheMode); + } + + assert getType == Get.FIRST || getType == Get.LAST; + checkState(false /*mustBeInitialized*/); + + return position(key, data, lockMode, cacheMode, getType == Get.FIRST); + } + + /** + * Returns the key/data pair to which the cursor refers. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#CURRENT}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has + * been deleted; otherwise, {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getCurrent( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.CURRENT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the first key/data pair of the database, and returns + * that pair. If the first key has duplicate values, the first data item + * in the set of duplicates is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#FIRST}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getFirst( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.FIRST, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the last key/data pair of the database, and returns + * that pair. If the last key has duplicate values, the last data item in + * the set of duplicates is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#LAST}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getLast( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.LAST, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the next key/data pair and returns that pair. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#NEXT}.

        + * + *

        If the cursor is not yet initialized, move the cursor to the first + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the next key/data pair of the database, and that pair + * is returned. In the presence of duplicate key values, the value of the + * key may not change.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getNext( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.NEXT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * If the next key/data pair of the database is a duplicate data record for + * the current key/data pair, moves the cursor to the next key/data pair of + * the database and returns that pair. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#NEXT_DUP}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getNextDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.NEXT_DUP, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the next non-duplicate key/data pair and returns + * that pair. If the matching key has duplicate values, the first data + * item in the set of duplicates is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#NEXT_NO_DUP}.

        + * + *

        If the cursor is not yet initialized, move the cursor to the first + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the next non-duplicate key of the database, and that + * key/data pair is returned.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getNextNoDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.NEXT_NO_DUP, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the previous key/data pair and returns that pair. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#PREV}.

        + * + *

        If the cursor is not yet initialized, move the cursor to the last + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the previous key/data pair of the database, and that + * pair is returned. In the presence of duplicate key values, the value of + * the key may not change.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getPrev( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.PREV, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * If the previous key/data pair of the database is a duplicate data record + * for the current key/data pair, moves the cursor to the previous key/data + * pair of the database and returns that pair. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#PREV_DUP}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getPrevDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.PREV_DUP, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the previous non-duplicate key/data pair and returns + * that pair. If the matching key has duplicate values, the last data item + * in the set of duplicates is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#PREV_NO_DUP}.

        + * + *

        If the cursor is not yet initialized, move the cursor to the last + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the previous non-duplicate key of the database, and + * that key/data pair is returned.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getPrevNoDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.PREV_NO_DUP, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Skips forward a given number of key/data pairs and returns the number by + * which the cursor is moved. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #getNext getNext} with {@link + * LockMode#READ_UNCOMMITTED} to skip over the desired number of key/data + * pairs, and then calling {@link #getCurrent getCurrent} with the {@code + * lockMode} parameter to return the final key/data pair.

        + * + *

        With regard to performance, this method is optimized to skip over + * key/value pairs using a smaller number of Btree operations. When there + * is no contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * EnvironmentConfig#NODE_MAX_ENTRIES} setting. When there is contention + * on BINs or fetching BINs is required, the scan is broken up into smaller + * operations to avoid blocking other threads for long time periods.

        + * + *

        If the returned count is greater than zero, then the key/data pair at + * the new cursor position is also returned. If zero is returned, then + * there are no key/value pairs that follow the cursor position and a + * key/data pair is not returned.

        + * + * @param maxCount the maximum number of key/data pairs to skip, i.e., the + * maximum number by which the cursor should be moved; must be greater + * than zero. + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return the number of key/data pairs skipped, i.e., the number by which + * the cursor has moved; if zero is returned, the cursor position is + * unchanged and the key/data pair is not returned. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public long skipNext( + final long maxCount, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + checkOpenAndState(true); + if (maxCount <= 0) { + throw new IllegalArgumentException("maxCount must be positive: " + + maxCount); + } + trace(Level.FINEST, "Cursor.skipNext: ", lockMode); + + return skipInternal( + maxCount, true /*forward*/, key, data, lockMode, null); + } + + /** + * Skips backward a given number of key/data pairs and returns the number + * by which the cursor is moved. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #getPrev getPrev} with {@link + * LockMode#READ_UNCOMMITTED} to skip over the desired number of key/data + * pairs, and then calling {@link #getCurrent getCurrent} with the {@code + * lockMode} parameter to return the final key/data pair.

        + * + *

        With regard to performance, this method is optimized to skip over + * key/value pairs using a smaller number of Btree operations. When there + * is no contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * EnvironmentConfig#NODE_MAX_ENTRIES} setting. When there is contention + * on BINs or fetching BINs is required, the scan is broken up into smaller + * operations to avoid blocking other threads for long time periods.

        + * + *

        If the returned count is greater than zero, then the key/data pair at + * the new cursor position is also returned. If zero is returned, then + * there are no key/value pairs that follow the cursor position and a + * key/data pair is not returned.

        + * + *

        In a replicated environment, an explicit transaction must have been + * specified when opening the cursor, unless read-uncommitted isolation is + * specified via the {@link CursorConfig} or {@link LockMode} + * parameter.

        + * + * @param maxCount the maximum number of key/data pairs to skip, i.e., the + * maximum number by which the cursor should be moved; must be greater + * than zero. + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return the number of key/data pairs skipped, i.e., the number by which + * the cursor has moved; if zero is returned, the cursor position is + * unchanged and the key/data pair is not returned. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public long skipPrev( + final long maxCount, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + checkOpenAndState(true); + if (maxCount <= 0) { + throw new IllegalArgumentException("maxCount must be positive: " + + maxCount); + } + trace(Level.FINEST, "Cursor.skipPrev: ", lockMode); + + return skipInternal( + maxCount, false /*forward*/, key, data, lockMode, null); + } + + /** + * Moves the cursor to the given key of the database, and returns the datum + * associated with the given key. If the matching key has duplicate + * values, the first data item in the set of duplicates is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH}.

        + * + * @param key the key used as + * input. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchKey( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the closest matching key of the database, and + * returns the data item associated with the matching key. If the matching + * key has duplicate values, the first data item in the set of duplicates + * is returned. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH_GTE}.

        + * + *

        The returned key/data pair is for the smallest key greater than or + * equal to the specified key (as determined by the key comparison + * function), permitting partial key matches and range searches.

        + * + * @param key the key used as + * input and returned as output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes + * are used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchKeyRange( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.SEARCH_GTE, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the specified key/data pair, where both the key and + * data items must match. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH_BOTH}.

        + * + * @param key the key used as + * input. + * + * @param data the data used as + * input. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchBoth( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.SEARCH_BOTH, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to the specified key and closest matching data item of + * the database. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH_BOTH_GTE}.

        + * + *

        In the case of any database supporting sorted duplicate sets, the + * returned key/data pair is for the smallest data item greater than or + * equal to the specified data item (as determined by the duplicate + * comparison function), permitting partial matches and range searches in + * duplicate data sets.

        + * + *

        In the case of databases that do not support sorted duplicate sets, + * this method is equivalent to getSearchBoth.

        + * + * @param key the key used as + * input. + * + * @param data the data used as + * input and returned as output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchBothRange( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.SEARCH_BOTH_GTE, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Returns a count of the number of data items for the key to which the + * cursor refers. + * + *

        If the database is configured for duplicates, the database is scanned + * internally, without taking any record locks, to count the number of + * non-deleted entries. Although the internal scan is more efficient under + * some conditions, the result is the same as if a cursor were used to + * iterate over the entries using {@link LockMode#READ_UNCOMMITTED}.

        + * + *

        If the database is not configured for duplicates, the count returned + * is always zero or one, depending on the record at the cursor position is + * deleted or not.

        + * + *

        The cost of this method is directly proportional to the number of + * records scanned.

        + * + * @return A count of the number of data items for the key to which the + * cursor refers. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + */ + public int count() { + + checkOpenAndState(true); + trace(Level.FINEST, "Cursor.count: ", null); + + return countInternal(); + } + + /** + * Returns a rough estimate of the count of the number of data items for + * the key to which the cursor refers. + * + *

        If the database is configured for duplicates, a quick estimate of the + * number of records is computed using information in the Btree. Because + * the Btree is unbalanced, in some cases the estimate may be off by a + * factor of two or more. The estimate is accurate when the number of + * records is less than the configured {@link + * DatabaseConfig#setNodeMaxEntries NodeMaxEntries}.

        + * + *

        If the database is not configured for duplicates, the count returned + * is always zero or one, depending on the record at the cursor position is + * deleted or not.

        + * + *

        The cost of this method is fixed, rather than being proportional to + * the number of records scanned. Because its accuracy is variable, this + * method should normally be used when accuracy is not required, such as + * for query optimization, and a fixed cost operation is needed. For + * example, this method is used internally for determining the index + * processing order in a {@link JoinCursor}.

        + * + * @return an estimate of the count of the number of data items for the key + * to which the cursor refers. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + */ + public long countEstimate() { + + checkOpenAndState(true); + trace(Level.FINEST, "Cursor.countEstimate: ", null); + + return countEstimateInternal(); + } + + /** + * Version of deleteInternal that does not check disk limits. Used for + * replication stream replay. + * + * Notifies triggers and prevents phantoms. Note that although deleteNotify + * is called, secondaries are not populated because this cursor is internal + * and has no associated Database handle. + */ + OperationResult deleteForReplay(final ReplicationContext repContext) { + return deleteNotify(repContext, null); + } + + /** + * Internal version of delete() that does no parameter checking. Notify + * triggers, update secondaries and enforce foreign key constraints. + */ + OperationResult deleteInternal(final ReplicationContext repContext, + final CacheMode cacheMode) { + checkUpdatesAllowed(); + return deleteNotify(repContext, cacheMode); + } + + /** + * Implementation of deleteInternal that does not check disk limits. + * + * Note that this algorithm is duplicated in Database and Cursor for + * efficiency reasons: in Cursor delete we must separately fetch the key + * and data, while in Database delete we know the key and have to search + * anyway so we can get the old data when we search. The two algorithms + * need to be kept in sync. + */ + private OperationResult deleteNotify(final ReplicationContext repContext, + final CacheMode cacheMode) { + + final boolean hasUserTriggers = (dbImpl.getTriggers() != null); + final boolean hasAssociations = (dbHandle != null) && + dbHandle.hasSecondaryOrForeignKeyAssociations(); + + if (hasAssociations) { + try { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().lockInterruptibly(); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(dbImpl.getEnv(), e); + } + } + try { + /* The key is needed if there are secondaries or triggers. */ + final DatabaseEntry key; + if (hasAssociations || hasUserTriggers) { + key = new DatabaseEntry(); + key.setData(cursorImpl.getCurrentKey()); + } else { + key = null; + } + + /* + * Get secondaries from the association and determine whether the + * old data is needed. + */ + final Collection secondaries; + final Collection fkSecondaries; + final boolean needOldData; + if (hasAssociations) { + secondaries = dbHandle.secAssoc.getSecondaries(key); + fkSecondaries = dbHandle.foreignKeySecondaries; + needOldData = hasUserTriggers || + SecondaryDatabase.needOldDataForDelete(secondaries); + } else { + secondaries = null; + fkSecondaries = null; + needOldData = hasUserTriggers; + } + + /* + * Get old data only if needed. Even if the old data is not + * needed, if there are associations we must lock the record with + * RMW before calling onForeignKeyDelete. + */ + final DatabaseEntry oldData = + needOldData ? (new DatabaseEntry()) : null; + + final OperationResult readResult; + + if (needOldData || hasAssociations) { + readResult = getCurrentInternal( + key, oldData, LockMode.RMW, cacheMode); + + if (readResult == null) { + return null; + } + } else { + readResult = null; + } + + /* + * Enforce foreign key constraints first, so that + * ForeignKeyDeleteAction.ABORT is applied before deletions. + */ + final Locker locker = cursorImpl.getLocker(); + if (fkSecondaries != null) { + for (final SecondaryDatabase secDb : fkSecondaries) { + secDb.onForeignKeyDelete(locker, key, cacheMode); + } + } + + /* + * The actual deletion. + */ + final OperationResult deleteResult = + deleteNoNotify(cacheMode, repContext); + + if (deleteResult == null) { + return null; + } + + /* + * Update secondaries after actual deletion, so that replica replay + * will lock the primary before the secondaries. This locking order + * is required for secondary deadlock avoidance. + */ + if (secondaries != null) { + int nWrites = 0; + + for (final SecondaryDatabase secDb : secondaries) { + nWrites += secDb.updateSecondary( + locker, null /*cursor*/, key, + oldData, null /*newData*/, cacheMode, + 0 /*expirationTime*/, false /*expirationUpdated*/, + readResult.getExpirationTime()); + } + + cursorImpl.setNSecondaryWrites(nWrites); + } + + /* Run triggers after actual deletion. */ + if (hasUserTriggers) { + TriggerManager.runDeleteTriggers(locker, dbImpl, key, oldData); + } + + return deleteResult; + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } finally { + if (hasAssociations) { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().unlock(); + } + } + } + + /** + * Delete at current position. Does not notify triggers (does not perform + * secondary updates). + */ + OperationResult deleteNoNotify(final CacheMode cacheMode, + final ReplicationContext repContext) { + + synchronized (getTxnSynchronizer()) { + checkTxnState(); + + /* + * No need to use a dup cursor, since this operation does not + * change the cursor position. + */ + beginUseExistingCursor(cacheMode); + + final OperationResult result = + cursorImpl.deleteCurrentRecord(repContext); + + if (result != null) { + dbImpl.getEnv().incDeleteOps(dbImpl); + } + + endUseExistingCursor(); + return result; + } + } + + /** + * Version of putInternal that allows passing an existing LN, does not + * interpret duplicates, and does not check disk limits. Used for + * replication stream replay. + * + * Notifies triggers and prevents phantoms. Note that although putNotify + * is called, secondaries are not populated because this cursor is internal + * and has no associated Database handle. + */ + OperationResult putForReplay( + final DatabaseEntry key, + final DatabaseEntry data, + final LN ln, + final int expiration, + final boolean expirationInHours, + final PutMode putMode, + final ReplicationContext repContext) { + + assert putMode != PutMode.CURRENT; + + final ExpirationInfo expInfo = new ExpirationInfo( + expiration, expirationInHours, true /*updateExpiration*/); + + synchronized (getTxnSynchronizer()) { + checkTxnState(); + + return putNotify( + key, data, ln, null, expInfo, putMode, repContext); + } + } + + /** + * Internal version of put that does no parameter checking. Interprets + * duplicates, notifies triggers, and prevents phantoms. + */ + OperationResult putInternal( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo, + final PutMode putMode) { + + checkUpdatesAllowed(expInfo); + + synchronized (getTxnSynchronizer()) { + checkTxnState(); + + if (dbImpl.getSortedDuplicates()) { + return putHandleDups( + key, data, cacheMode, expInfo, putMode); + } + + if (putMode == PutMode.NO_DUP_DATA) { + throw new UnsupportedOperationException( + "Database is not configured for duplicate data."); + } + + return putNoDups( + key, data, cacheMode, expInfo, putMode); + } + } + + /** + * Interpret duplicates for the various 'putXXX' operations. + */ + private OperationResult putHandleDups( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo, + final PutMode putMode) { + + switch (putMode) { + case OVERWRITE: + return dupsPutOverwrite(key, data, cacheMode, expInfo); + case NO_OVERWRITE: + return dupsPutNoOverwrite(key, data, cacheMode, expInfo); + case NO_DUP_DATA: + return dupsPutNoDupData(key, data, cacheMode, expInfo); + case CURRENT: + return dupsPutCurrent(data, cacheMode, expInfo); + default: + throw EnvironmentFailureException.unexpectedState( + putMode.toString()); + } + } + + /** + * Interpret duplicates for the put() operation. + */ + private OperationResult dupsPutOverwrite( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo) { + + final DatabaseEntry twoPartKey = DupKeyData.combine(key, data); + + return putNoDups( + twoPartKey, EMPTY_DUP_DATA, cacheMode, expInfo, + PutMode.OVERWRITE); + } + + /** + * Interpret duplicates for putNoOverwrite() operation. + * + * The main purpose of this method is to guarantee that when two threads + * call putNoOverwrite concurrently, only one of them will succeed. In + * other words, if putNoOverwrite is called for all dup insertions, there + * will always be at most one dup per key. + * + * Next key locking must be used to prevent two insertions, since there is + * no other way to block an insertion of dup Y in another thread, while + * inserting dup X in the current thread. This is tested by AtomicPutTest. + * + * Although this method does extra searching and locking compared to + * putNoOverwrite for a non-dup DB (or to putNoDupData for a dup DB), that + * is not considered a significant issue because this method is rarely, if + * ever, used by applications (for dup DBs that is). It exists primarily + * for compatibility with the DB core API. + */ + private OperationResult dupsPutNoOverwrite( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo) { + + final DatabaseEntry key2 = new DatabaseEntry(); + final DatabaseEntry data2 = new DatabaseEntry(); + + try (final Cursor c = dup(false /*samePosition*/)) { + c.setNonSticky(true); + + /* Lock next key (or EOF if none) exclusively, before we insert. */ + setEntry(key, key2); + + OperationResult result = c.dupsGetSearchKeyRange( + key2, data2, LockMode.RMW, cacheMode); + + if (result != null && key.equals(key2)) { + /* Key exists, no need for further checks. */ + return null; + } + if (result == null) { + /* No next key exists, lock EOF. */ + c.cursorImpl.lockEof(LockType.WRITE); + } + + /* While next key is locked, check for key existence again. */ + setEntry(key, key2); + + result = c.dupsGetSearchKey(key2, data2, LockMode.RMW, cacheMode); + + if (result != null) { + return null; + } + + /* Insertion can safely be done now. */ + result = c.dupsPutNoDupData(key, data, cacheMode, expInfo); + + if (result == null) { + return null; + } + + /* We successfully inserted the first dup for the key. */ + swapCursor(c); + return result; + } + } + + /** + * Interpret duplicates for putNoDupData operation. + */ + private OperationResult dupsPutNoDupData( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo) { + + final DatabaseEntry twoPartKey = DupKeyData.combine(key, data); + + return putNoDups( + twoPartKey, EMPTY_DUP_DATA, cacheMode, expInfo, + PutMode.NO_OVERWRITE); + } + + /** + * Interpret duplicates for putCurrent operation. + * + * Get old key/data, replace data portion, and put new key/data. + * + * Arguably we could skip the replacement if there is no user defined + * comparison function and the new data is the same. + */ + private OperationResult dupsPutCurrent( + final DatabaseEntry newData, + final CacheMode cacheMode, + final ExpirationInfo expInfo) { + + final DatabaseEntry oldTwoPartKey = + new DatabaseEntry(cursorImpl.getCurrentKey()); + + final DatabaseEntry key = new DatabaseEntry(); + DupKeyData.split(oldTwoPartKey, key, null); + + final DatabaseEntry newTwoPartKey = DupKeyData.combine(key, newData); + + return putNoDups( + newTwoPartKey, EMPTY_DUP_DATA, cacheMode, expInfo, + PutMode.CURRENT); + } + + /** + * Eventually, all insertions/updates are happening via this method. + */ + private OperationResult putNoDups( + final DatabaseEntry key, + final DatabaseEntry data, + final CacheMode cacheMode, + final ExpirationInfo expInfo, + final PutMode putMode) { + + final LN ln = (putMode == PutMode.CURRENT) ? + null : + LN.makeLN(dbImpl.getEnv(), data); + + return putNotify( + key, data, ln, cacheMode, expInfo, putMode, + dbImpl.getRepContext()); + } + + /** + * This single method is used for all put operations in order to notify + * triggers and perform secondary updates in one place. Prevents phantoms. + * Does not interpret duplicates. + * + * WARNING: When the cursor has no Database handle, which is true when + * called from the replication replayer, this method notifies user triggers + * but does not do secondary updates. This is correct for replication + * because secondary updates are part of the replication stream. However, + * it is fragile because other operations, when no Database handle is used, + * will not perform secondary updates. This isn't currently a problem + * because a Database handle is present for all user operations. But it is + * fragile and needs work. + * + * @param putMode One of OVERWRITE, NO_OVERWITE, CURRENT. (NO_DUPS_DATA + * has been converted to NO_OVERWRITE). Note: OVERWRITE may perform an + * insertion or an update, NO_OVERWRITE performs insertion only, and + * CURRENT updates the slot where the cursor is currently positioned at. + * + * @param key The new key value for the BIN slot S to be inserted/updated. + * Cannot be partial. For a no-dups DB, it is null if the putMode is + * CURRENT. For dups DBs it is a 2-part key: if the putMode is CURRENT, + * it combines the current primary key of slot S with the original, + * user-provided data; for OVERWRITE and NO_OVERWRITE, it combines the + * original, user-provided key and data. In case of update, "key" must + * compare equal to S.key (otherwise DuplicateDataException is thrown), + * but the 2 keys may not be identical if custom comparators are used. + * So, S.key will actually be replaced by "key". + * + * @param data The new data for the LN associated with the BIN slot. For + * dups DBs it is EMPTY_DUPS_DATA. Note: for dups DBs the original, + * user-provided "data" must not be partial. + * + * @param ln LN to be inserted, if insertion is allowed by putMode. null + * for CURRENT (since insertion is not allowed), not null for other modes. + */ + private OperationResult putNotify( + DatabaseEntry key, + final DatabaseEntry data, + final LN ln, + final CacheMode cacheMode, + ExpirationInfo expInfo, + final PutMode putMode, + final ReplicationContext repContext) { + + final boolean hasUserTriggers = (dbImpl.getTriggers() != null); + final boolean hasAssociations = (dbHandle != null) && + dbHandle.hasSecondaryOrForeignKeyAssociations(); + + if (hasAssociations) { + try { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().lockInterruptibly(); + + } catch (InterruptedException e) { + throw new ThreadInterruptedException(dbImpl.getEnv(), e); + } + } + + try { + final OperationResult result; + DatabaseEntry replaceKey = null; + + if (putMode == PutMode.CURRENT) { + if (key == null) { + /* + * This is a no-dups DB. The slot key will not be affected + * by the update. However, if there are indexes/triggers, + * the value of the key is needed to update/apply the + * indexes/triggers after the update. So, it must be + * returned by the putCurrentNoNotify() call below. + * Furthermore, for indexes, the value of the key is needed + * before the update as well, to determine which indexes + * actually must be updated and whether the old data is + * also needed to do the index updates. So, we read the + * value of the key here by what is effectively a + * dirty-read. + */ + if (hasAssociations || hasUserTriggers) { + key = new DatabaseEntry(); + /* + * Latch this.bin and make "key" point to the + * slot key; then unlatch this.bin. + */ + key.setData(cursorImpl.getCurrentKey()); + } + } else { + /* + * This is a dups DB. The slot key must be replaced by the + * given 2-part key. We don't need the pre-update slot key. + */ + replaceKey = key; + } + } + + /* + * - oldData: if needed, will be set to the LN data before the + * update. + * - newData: if needed, will be set to the full LN data after + * the update; may be different than newData only if newData + * is partial. + */ + DatabaseEntry oldData = null; + DatabaseEntry newData = null; + + /* + * Get secondaries from the association and determine whether the + * old data and new data is needed. + */ + Collection secondaries = null; + + if (hasAssociations || hasUserTriggers) { + + if (data.getPartial()) { + newData = new DatabaseEntry(); + } + + if (hasUserTriggers) { + oldData = new DatabaseEntry(); + } + + if (hasAssociations) { + secondaries = dbHandle.secAssoc.getSecondaries(key); + if (oldData == null && + SecondaryDatabase.needOldDataForUpdate(secondaries)) { + oldData = new DatabaseEntry(); + } + + /* + * Even if the TTL is not specified or changed, we need the + * ExpirationUpdated and OldExpirationTime for the + * secondary update. + */ + if (expInfo == null) { + expInfo = new ExpirationInfo(0, false, false); + } + } + } + + /* Perform the actual put operation. */ + if (putMode == PutMode.CURRENT) { + + result = putCurrentNoNotify( + replaceKey, data, oldData, newData, cacheMode, + expInfo, repContext); + + } else { + + result = putNoNotify( + key, data, ln, cacheMode, expInfo, putMode, + oldData, newData, repContext); + } + + if (result == null) { + return null; + } + + /* + * If returned data is null, then + * 1. this is an insertion not an update, or + * 2. an expired LN was purged and the data could not be fetched. + * + * The latter case is acceptable because the old data is needed + * only to delete secondary records, and if the LN expired then + * those secondary records will also expire naturally. The old + * expirationTime is passed to updateSecondary below, which will + * prevent secondary integrity errors. + */ + if (oldData != null && oldData.getData() == null) { + oldData = null; + } + + if (newData == null) { + newData = data; + } + + /* + * Update secondaries and notify triggers. Pass newData, not data, + * since data may be partial. + */ + final Locker locker = cursorImpl.getLocker(); + + if (secondaries != null) { + int nWrites = 0; + + for (final SecondaryDatabase secDb : secondaries) { + + if (!result.isUpdate() || + secDb.updateMayChangeSecondary()) { + + nWrites += secDb.updateSecondary( + locker, null, key, oldData, newData, + cacheMode, + result.getExpirationTime(), + expInfo.getExpirationUpdated(), + expInfo.getOldExpirationTime()); + } + } + + cursorImpl.setNSecondaryWrites(nWrites); + } + + if (hasUserTriggers) { + TriggerManager.runPutTriggers( + locker, dbImpl, key, oldData, newData); + } + + return result; + + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } finally { + if (hasAssociations) { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().unlock(); + } + } + } + + /** + * Search for the key and perform insertion or update. Does not notify + * triggers or perform secondary updates. Prevents phantoms. + * + * @param putMode is either OVERWRITE, NO_OEVERWRITE, or BLIND_INSERTION + * + * @param key The new key value for the BIN slot S to be inserted/updated. + * Cannot be partial. For dups DBs it is a 2-part key combining the + * original, user-provided key and data. In case of update, "key" must + * compare equal to S.key (otherwise DuplicateDataException is thrown), + * but the 2 keys may not be identical if custom comparators are used. + * So, S.key will actually be replaced by "key". + * + * @param data In case of update, the new data to (perhaps partially) + * replace the data of the LN associated with the BIN slot. For dups DBs + * it is EMPTY_DUPS_DATA. Note: for dups DBs the original, user-provided + * "data" must not be partial. + * + * @param ln is normally a new LN node that is created for insertion, and + * will be discarded if an update occurs. However, HA will pass an + * existing node. + * + * @param returnOldData To receive, in case of update, the old LN data + * (before the update). It is needed only by DBs with indexes/triggers; + * will be null otherwise. + * + * @param returnNewData To receive the full data of the new or updated LN. + * It is needed only by DBs with indexes/triggers and only if "data" is + * partial; will be null otherwise. Note: "returnNewData" may be different + * than "data" only if "data" is partial. + + * @return OperationResult where isUpdate() distinguishes insertions and + * updates. + */ + private OperationResult putNoNotify( + final DatabaseEntry key, + final DatabaseEntry data, + final LN ln, + final CacheMode cacheMode, + final ExpirationInfo expInfo, + final PutMode putMode, + final DatabaseEntry returnOldData, + final DatabaseEntry returnNewData, + final ReplicationContext repContext) { + + assert key != null; + assert ln != null; + assert putMode != null; + assert putMode != PutMode.CURRENT; + + Locker nextKeyLocker = null; + CursorImpl nextKeyCursor = null; + CursorImpl dup = null; + OperationResult result = null; + boolean success = false; + + try { + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + /* + * If other transactions are serializable, lock the next key. + * BUG ???? What if a serializable txn starts after the check + * below returns false? At least, if this cursor is using a + * serializable txn, it SHOULD do next key locking unconditionally. + */ + Locker cursorLocker = cursorImpl.getLocker(); + + if (envImpl.getTxnManager(). + areOtherSerializableTransactionsActive(cursorLocker)) { + + /* + * nextKeyCursor is created with retainNonTxnLocks == true, + * and as a result, releaseNonTxnLocks() will not be called + * on nextKeyLocker when nextKeyCursor is reset or closed. + * That's why in the finally clause below we explicitly call + * nextKeyLocker.operationEnd() + */ + nextKeyLocker = BuddyLocker.createBuddyLocker( + envImpl, cursorLocker); + + nextKeyCursor = new CursorImpl(dbImpl, nextKeyLocker); + + /* Perform eviction for user cursors. */ + nextKeyCursor.setAllowEviction(true); + nextKeyCursor.lockNextKeyForInsert(key); + } + + dup = beginMoveCursor(false /*samePosition*/, cacheMode); + + /* Perform operation. */ + result = dup.insertOrUpdateRecord( + key, data, ln, expInfo, putMode, + returnOldData, returnNewData, repContext); + + if (result == null) { + if (putMode == PutMode.NO_OVERWRITE) { + envImpl.incInsertFailOps(dbImpl); + } + } else { + if (!result.isUpdate()) { + envImpl.incInsertOps(dbImpl); + } else { + envImpl.incUpdateOps(dbImpl); + } + } + + /* Note that status is used in the finally. */ + success = true; + return result; + + } finally { + + try { + if (dup != null) { + endMoveCursor(dup, result != null); + } + + if (nextKeyCursor != null) { + nextKeyCursor.close(); + } + + /* Release the next-key lock. */ + if (nextKeyLocker != null) { + nextKeyLocker.operationEnd(); + } + } catch (Exception e) { + if (success) { + throw e; + } else { + /* + * Log the exception thrown by the cleanup actions and + * allow the original exception to be thrown + */ + LoggerUtils.traceAndLogException( + dbImpl.getEnv(), "Cursor", "putNoNotify", "", e); + } + } + } + } + + /** + * Update the data at the current position. No new LN, dup cursor, or + * phantom handling is needed. Does not interpret duplicates. + * + * @param key The new key value for the BIN slot S to be updated. Cannot + * be partial. For a no-dups DB, it is null. For dups DBs it is a 2-part + * key combining the current primary key of slot S with the original, + * user-provided data. "key" (if not null) must compare equal to S.key + * (otherwise DuplicateDataException is thrown), but the 2 keys may not + * be identical if custom comparators are used. So, S.key will actually + * be replaced by "key". + * + * @param data The new data to (perhaps partially) replace the data of the + * LN associated with the BIN slot. For dups DBs it is EMPTY_DUPS_DATA. + * Note: for dups DBs the original, user-provided "data" must not be + * partial. + * + * @param returnOldData To receive the old LN data (before the update). + * It is needed only by DBs with indexes/triggers; will be null otherwise. + * + * @param returnNewData To receive the full data of the updated LN. + * It is needed only by DBs with indexes/triggers and only if "data" is + * partial; will be null otherwise. Note: "returnNewData" may be different + * than "data" only if "data" is partial. + */ + private OperationResult putCurrentNoNotify( + final DatabaseEntry key, + final DatabaseEntry data, + final DatabaseEntry returnOldData, + final DatabaseEntry returnNewData, + final CacheMode cacheMode, + final ExpirationInfo expInfo, + final ReplicationContext repContext) { + + assert data != null; + + beginUseExistingCursor(cacheMode); + + final OperationResult result = cursorImpl.updateCurrentRecord( + key, data, expInfo, returnOldData, returnNewData, repContext); + + if (result != null) { + dbImpl.getEnv().incUpdateOps(dbImpl); + } + + endUseExistingCursor(); + return result; + } + + /** + * Returns the current key and data. There is no need to use a dup cursor + * or prevent phantoms. + */ + OperationResult getCurrentInternal( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + synchronized (getTxnSynchronizer()) { + + checkTxnState(); + + if (dbImpl.getSortedDuplicates()) { + return getCurrentHandleDups(key, data, lockMode, cacheMode); + } + + return getCurrentNoDups(key, data, lockMode, cacheMode); + } + } + + /** + * Used to lock without returning key/data. When called with + * LockMode.READ_UNCOMMITTED, it simply checks for a deleted record. + */ + OperationResult checkCurrent(final LockMode lockMode, + final CacheMode cacheMode) { + + return getCurrentNoDups(null, null, lockMode, cacheMode); + } + + /** + * Interpret duplicates for getCurrent operation. + */ + private OperationResult getCurrentHandleDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final DatabaseEntry twoPartKey = new DatabaseEntry(); + + final OperationResult result = getCurrentNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + return result; + } + + private OperationResult getCurrentNoDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + boolean success = false; + + beginUseExistingCursor(cacheMode); + + final LockType lockType = getLockType(lockMode, false); + + try { + final OperationResult result = cursorImpl.lockAndGetCurrent( + key, data, lockType, lockMode == LockMode.READ_UNCOMMITTED_ALL, + false /*isLatched*/, false /*unlatch*/); + + success = true; + return result; + + } finally { + + if (success && + !dbImpl.isInternalDb() && + cursorImpl.getBIN() != null && + cursorImpl.getBIN().isBINDelta()) { + dbImpl.getEnv().incBinDeltaGets(); + } + + cursorImpl.releaseBIN(); + endUseExistingCursor(); + } + } + + /** + * Internal version of getFirst/getLast that does no parameter checking. + * Interprets duplicates. + */ + OperationResult position( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final boolean first) { + + synchronized (getTxnSynchronizer()) { + + checkTxnState(); + + final OperationResult result; + + if (dbImpl.getSortedDuplicates()) { + result = positionHandleDups( + key, data, lockMode, cacheMode, first); + } else { + result = positionNoDups( + key, data, lockMode, cacheMode, first); + } + + if (result != null) { + dbImpl.getEnv().incPositionOps(dbImpl); + } + + return result; + } + } + + /** + * Interpret duplicates for getFirst and getLast operations. + */ + private OperationResult positionHandleDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final boolean first) { + + final DatabaseEntry twoPartKey = new DatabaseEntry(); + + final OperationResult result = positionNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, first); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + return result; + } + + /** + * Does not interpret duplicates. Prevents phantoms. + */ + private OperationResult positionNoDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final boolean first) { + + try { + if (!isSerializableIsolation(lockMode)) { + + return positionAllowPhantoms( + key, data, lockMode, cacheMode, false /*rangeLock*/, + first); + } + + /* + * Perform range locking to prevent phantoms and handle restarts. + */ + while (true) { + try { + /* Range lock the EOF node before getLast. */ + if (!first) { + cursorImpl.lockEof(LockType.RANGE_READ); + } + + /* Perform operation. Use a range lock for getFirst. */ + final OperationResult result = positionAllowPhantoms( + key, data, lockMode, cacheMode, first /*rangeLock*/, + first); + + /* + * Range lock the EOF node when getFirst returns null. + */ + if (first && result == null) { + cursorImpl.lockEof(LockType.RANGE_READ); + } + + return result; + } catch (RangeRestartException e) { + // continue + } + } + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Positions without preventing phantoms. + */ + private OperationResult positionAllowPhantoms( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final boolean rangeLock, + final boolean first) { + + assert (key != null && data != null); + + OperationResult result = null; + + final CursorImpl dup = + beginMoveCursor(false /*samePosition*/, cacheMode); + + try { + /* Search for first or last slot. */ + if (!dup.positionFirstOrLast(first)) { + /* Tree is empty. */ + result = null; + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + } else { + /* + * Found and latched first/last BIN in this tree. + * BIN may be empty. + */ + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + final LockType lockType = getLockType(lockMode, rangeLock); + + final boolean dirtyReadAll = + lockMode == LockMode.READ_UNCOMMITTED_ALL; + + result = dup.lockAndGetCurrent( + key, data, lockType, dirtyReadAll, + true /*isLatched*/, false /*unlatch*/); + + if (result == null) { + /* + * The BIN may be empty or the slot we're pointing at may + * be deleted. + */ + result = dup.getNext( + key, data, lockType, dirtyReadAll, first, + true /*isLatched*/, null /*rangeConstraint*/); + } + } + } finally { + dup.releaseBIN(); + endMoveCursor(dup, result != null); + } + return result; + } + + /** + * Retrieves the next or previous record. Prevents phantoms. + */ + OperationResult retrieveNext( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getMode) { + + synchronized (getTxnSynchronizer()) { + final OperationResult result; + + if (dbImpl.getSortedDuplicates()) { + result = retrieveNextHandleDups( + key, data, lockMode, cacheMode, getMode); + } else { + result = retrieveNextNoDups( + key, data, lockMode, cacheMode, getMode); + } + + if (result != null) { + dbImpl.getEnv().incPositionOps(dbImpl); + } + + return result; + } + } + + /** + * Interpret duplicates for getNext/Prev/etc operations. + */ + private OperationResult retrieveNextHandleDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getMode) { + + switch (getMode) { + case NEXT: + case PREV: + return dupsGetNextOrPrev(key, data, lockMode, cacheMode, getMode); + case NEXT_DUP: + return dupsGetNextOrPrevDup( + key, data, lockMode, cacheMode, GetMode.NEXT); + case PREV_DUP: + return dupsGetNextOrPrevDup( + key, data, lockMode, cacheMode, GetMode.PREV); + case NEXT_NODUP: + return dupsGetNextNoDup(key, data, lockMode, cacheMode); + case PREV_NODUP: + return dupsGetPrevNoDup(key, data, lockMode, cacheMode); + default: + throw EnvironmentFailureException.unexpectedState( + getMode.toString()); + } + } + + /** + * Interpret duplicates for getNext and getPrev. + */ + private OperationResult dupsGetNextOrPrev( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getMode) { + + final DatabaseEntry twoPartKey = new DatabaseEntry(); + + final OperationResult result = retrieveNextNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, getMode); + + if (result == null) { + return null; + } + DupKeyData.split(twoPartKey, key, data); + return result; + } + + /** + * Interpret duplicates for getNextDup and getPrevDup. + * + * Move the cursor forward or backward by one record, and check the key + * prefix to detect going out of the bounds of the duplicate set. + */ + private OperationResult dupsGetNextOrPrevDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getMode) { + + final byte[] currentKey = cursorImpl.getCurrentKey(); + + try (final Cursor c = dup(true /*samePosition*/)) { + c.setNonSticky(true); + setPrefixConstraint(c, currentKey); + final DatabaseEntry twoPartKey = new DatabaseEntry(); + + final OperationResult result = c.retrieveNextNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, getMode); + + if (result == null) { + return null; + } + DupKeyData.split(twoPartKey, key, data); + swapCursor(c); + return result; + } + } + + /** + * Interpret duplicates for getNextNoDup. + * + * Using a special comparator, search for first duplicate in the duplicate + * set following the one for the current key. For details see + * DupKeyData.NextNoDupComparator. + */ + private OperationResult dupsGetNextNoDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final byte[] currentKey = cursorImpl.getCurrentKey(); + final DatabaseEntry twoPartKey = DupKeyData.removeData(currentKey); + + try (final Cursor c = dup(false /*samePosition*/)) { + c.setNonSticky(true); + + final Comparator searchComparator = + new DupKeyData.NextNoDupComparator( + dbImpl.getBtreeComparator()); + + final OperationResult result = c.searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + SearchMode.SET_RANGE, searchComparator); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + + swapCursor(c); + return result; + } + } + + /** + * Interpret duplicates for getPrevNoDup. + * + * Move the cursor to the first duplicate in the duplicate set, then to the + * previous record. If this fails because all dups at the current position + * have been deleted, move the cursor backward to find the previous key. + * + * Note that we lock the first duplicate to enforce Serializable isolation. + */ + private OperationResult dupsGetPrevNoDup( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final byte[] currentKey = cursorImpl.getCurrentKey(); + final DatabaseEntry twoPartKey = DupKeyData.removeData(currentKey); + Cursor c = dup(false /*samePosition*/); + try { + c.setNonSticky(true); + setPrefixConstraint(c, currentKey); + + OperationResult result = c.searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + SearchMode.SET_RANGE, null /*comparator*/); + + if (result != null) { + c.rangeConstraint = null; + + result = c.retrieveNextNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + GetMode.PREV); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + swapCursor(c); + return result; + } + } finally { + c.close(); + } + + c = dup(true /*samePosition*/); + + try { + c.setNonSticky(true); + while (true) { + final OperationResult result = + c.retrieveNextNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + GetMode.PREV); + + if (result == null) { + return null; + } + + if (!haveSameDupPrefix(twoPartKey, currentKey)) { + DupKeyData.split(twoPartKey, key, data); + swapCursor(c); + return result; + } + } + } finally { + c.close(); + } + } + + /** + * Does not interpret duplicates. Prevents phantoms. + */ + private OperationResult retrieveNextNoDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getModeParam) { + + final GetMode getMode; + switch (getModeParam) { + case NEXT_DUP: + case PREV_DUP: + return null; + case NEXT_NODUP: + getMode = GetMode.NEXT; + break; + case PREV_NODUP: + getMode = GetMode.PREV; + break; + default: + getMode = getModeParam; + } + + try { + if (!isSerializableIsolation(lockMode)) { + + /* + * No need to prevent phantoms. + */ + assert (getMode == GetMode.NEXT || getMode == GetMode.PREV); + + final CursorImpl dup = + beginMoveCursor(true /*samePosition*/, cacheMode); + + OperationResult result = null; + try { + result = dup.getNext( + key, data, getLockType(lockMode, false), + lockMode == LockMode.READ_UNCOMMITTED_ALL, + getMode.isForward(), false /*isLatched*/, + rangeConstraint); + + return result; + } finally { + endMoveCursor(dup, result != null); + } + } + + /* + * Perform range locking to prevent phantoms and handle restarts. + */ + while (true) { + try { + /* Get a range lock for 'prev' operations. */ + if (!getMode.isForward()) { + rangeLockCurrentPosition(); + } + /* Use a range lock if performing a 'next' operation. */ + final LockType lockType = + getLockType(lockMode, getMode.isForward()); + + /* Do not modify key/data params until SUCCESS. */ + final DatabaseEntry tryKey = cloneEntry(key); + final DatabaseEntry tryData = cloneEntry(data); + + /* Perform the operation with a null rangeConstraint. */ + OperationResult result = retrieveNextCheckForInsertion( + tryKey, tryData, lockType, cacheMode, getMode); + + if (getMode.isForward() && result == null) { + /* NEXT: lock the EOF node. */ + cursorImpl.lockEof(LockType.RANGE_READ); + } + + /* Finally check rangeConstraint. */ + if (result != null && !checkRangeConstraint(tryKey)) { + result = null; + } + + /* + * Only overwrite key/data on SUCCESS, after all locking. + */ + if (result != null) { + setEntry(tryKey, key); + setEntry(tryData, data); + } + + return result; + } catch (RangeRestartException e) { + // continue + } + } + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * For 'prev' operations, upgrades to a range lock at the current position. + * If there are no records at the current position, get a range lock on the + * next record or, if not found, on the logical EOF node. Do not modify + * the current cursor position, use a separate cursor. + */ + private void rangeLockCurrentPosition() { + + final DatabaseEntry tempKey = new DatabaseEntry(); + final DatabaseEntry tempData = new DatabaseEntry(); + tempKey.setPartial(0, 0, true); + tempData.setPartial(0, 0, true); + + OperationResult result; + + CursorImpl dup = cursorImpl.cloneCursor(true /*samePosition*/); + + try { + result = dup.lockAndGetCurrent( + tempKey, tempData, LockType.RANGE_READ); + + if (result == null) { + + while (true) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + result = dup.getNext( + tempKey, tempData, LockType.RANGE_READ, + false /*dirtyReadAll*/, true /*forward*/, + false /*isLatched*/, null /*rangeConstraint*/); + + if (cursorImpl.checkForInsertion(GetMode.NEXT, dup)) { + dup.close(cursorImpl); + dup = cursorImpl.cloneCursor(true /*samePosition*/); + continue; + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + break; + } + } + } finally { + dup.close(cursorImpl); + } + + if (result == null) { + cursorImpl.lockEof(LockType.RANGE_READ); + } + } + + /** + * Retrieves and checks for insertions, for serializable isolation. + */ + private OperationResult retrieveNextCheckForInsertion( + final DatabaseEntry key, + final DatabaseEntry data, + final LockType lockType, + final CacheMode cacheMode, + final GetMode getMode) { + + assert (key != null && data != null); + assert (getMode == GetMode.NEXT || getMode == GetMode.PREV); + + while (true) { + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + /* + * Force cloning of the cursor because the caller may need to + * restart the operation from the previous position. In addition, + * checkForInsertion depends on having two CursorImpls for + * comparison, at the old and new position. + */ + final CursorImpl dup = beginMoveCursor( + true /*samePosition*/, true /*forceClone*/, cacheMode); + + boolean doEndMoveCursor = true; + + try { + final OperationResult result = dup.getNext( + key, data, lockType, false /*dirtyReadAll*/, + getMode.isForward(), false /*isLatched*/, + null /*rangeConstraint*/); + + if (!cursorImpl.checkForInsertion(getMode, dup)) { + + doEndMoveCursor = false; + endMoveCursor(dup, result != null); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + return result; + } + } finally { + if (doEndMoveCursor) { + endMoveCursor(dup, false); + } + } + } + } + + private long skipInternal( + final long maxCount, + final boolean forward, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final LockType lockType = getLockType(lockMode, false); + + synchronized (getTxnSynchronizer()) { + checkTxnState(); + while (true) { + + /* + * Force cloning of the cursor since we may need to restart + * the operation at the previous position. + */ + final CursorImpl dup = beginMoveCursor( + true /*samePosition*/, true /*forceClone*/, cacheMode); + boolean success = false; + try { + final long count = dup.skip(forward, maxCount, + null /*rangeConstraint*/); + if (count <= 0) { + return 0; + } + final OperationResult result = + getCurrentWithCursorImpl(dup, key, data, lockType); + + if (result == null) { + /* Retry if deletion occurs while unlatched. */ + continue; + } + success = true; + return count; + } finally { + endMoveCursor(dup, success); + } + } + } + } + + /** + * Convenience method that does lockAndGetCurrent, with and without dups, + * using a CursorImpl. Does no setup or save/restore of cursor state. + */ + private OperationResult getCurrentWithCursorImpl( + final CursorImpl c, + final DatabaseEntry key, + final DatabaseEntry data, + final LockType lockType) { + + if (!dbImpl.getSortedDuplicates()) { + return c.lockAndGetCurrent(key, data, lockType); + } + + final DatabaseEntry twoPartKey = new DatabaseEntry(); + + final OperationResult result = + c.lockAndGetCurrent(twoPartKey, NO_RETURN_DATA, lockType); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + return result; + } + + /** + * Performs search by key, data, or both. Prevents phantoms. + */ + OperationResult search( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + SearchMode searchMode, + final boolean countOpStat) { + + synchronized (getTxnSynchronizer()) { + final OperationResult result; + + checkTxnState(); + + if (dbImpl.getSortedDuplicates()) { + + switch (searchMode) { + case SET: + result = dupsGetSearchKey(key, data, lockMode, cacheMode); + break; + case SET_RANGE: + result = dupsGetSearchKeyRange( + key, data, lockMode, cacheMode); + break; + case BOTH: + result = dupsGetSearchBoth(key, data, lockMode, cacheMode); + break; + case BOTH_RANGE: + result = dupsGetSearchBothRange( + key, data, lockMode, cacheMode); + break; + default: + throw EnvironmentFailureException.unexpectedState( + searchMode.toString()); + } + } else { + if (searchMode == SearchMode.BOTH_RANGE) { + searchMode = SearchMode.BOTH; + } + result = searchNoDups( + key, data, lockMode, cacheMode, searchMode, + null /*comparator*/); + } + + if (countOpStat) { + if (result != null) { + dbImpl.getEnv().incSearchOps(dbImpl); + } else { + dbImpl.getEnv().incSearchFailOps(dbImpl); + } + } + + return result; + } + } + + /** + * Version of search that does not interpret duplicates. Used for + * replication stream replay. Prevents phantoms. + */ + OperationResult searchForReplay( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final SearchMode searchMode) { + + synchronized (getTxnSynchronizer()) { + + checkTxnState(); + + return searchNoDups( + key, data, lockMode, cacheMode, searchMode, + null /*comparator*/); + } + } + + /** + * Interpret duplicates for getSearchKey operation. + * + * Use key as prefix to find first duplicate using a range search. Compare + * result to prefix to see whether we went out of the bounds of the + * duplicate set, i.e., whether NOTFOUND should be returned. + * + * Even if the user-provided "key" exists in the DB, the twoPartKey built + * here out of "key" compares < any of the BIN-slot keys that comprise the + * duplicates-set of "key". So there is no way to get an exact key match + * by a BTree search. Instead, we do a constrained range search: we forbid + * the cursor to advance past the duplicates-set of "key" by using an + * appropriate range constraint. + */ + private OperationResult dupsGetSearchKey( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final DatabaseEntry twoPartKey = new DatabaseEntry( + DupKeyData.makePrefixKey(key.getData(), + key.getOffset(), + key.getSize())); + + final RangeConstraint savedRangeConstraint = rangeConstraint; + + try { + setPrefixConstraint(this, key); + + final OperationResult result = searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + SearchMode.SET_RANGE, null /*comparator*/); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + + return result; + } finally { + rangeConstraint = savedRangeConstraint; + } + } + + /** + * Interpret duplicates for getSearchKeyRange operation. + * + * Do range search for key prefix. + */ + private OperationResult dupsGetSearchKeyRange( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final DatabaseEntry twoPartKey = new DatabaseEntry( + DupKeyData.makePrefixKey(key.getData(), + key.getOffset(), + key.getSize())); + + final OperationResult result = searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + SearchMode.SET_RANGE, null /*comparator*/); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + return result; + } + + /** + * Interpret duplicates for getSearchBoth operation. + * + * Do exact search for combined key. + */ + private OperationResult dupsGetSearchBoth( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final DatabaseEntry twoPartKey = DupKeyData.combine(key, data); + + final OperationResult result = searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, SearchMode.BOTH, + null /*comparator*/); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + return result; + } + + /** + * Interpret duplicates for getSearchBothRange operation. + * + * Do range search for combined key. Compare result to prefix to see + * whether we went out of the bounds of the duplicate set, i.e., whether + * null should be returned. + */ + private OperationResult dupsGetSearchBothRange( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + + final DatabaseEntry twoPartKey = DupKeyData.combine(key, data); + + final RangeConstraint savedRangeConstraint = rangeConstraint; + + try { + setPrefixConstraint(this, key); + + final OperationResult result = searchNoDups( + twoPartKey, NO_RETURN_DATA, lockMode, cacheMode, + SearchMode.SET_RANGE, null /*comparator*/); + + if (result == null) { + return null; + } + + DupKeyData.split(twoPartKey, key, data); + + return result; + } finally { + rangeConstraint = savedRangeConstraint; + } + } + + /** + * Does not interpret duplicates. Prevents phantoms. + */ + private OperationResult searchNoDups( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final SearchMode searchMode, + final Comparator comparator) { + + /* + * searchMode cannot be BOTH_RANGE, because for non-dups DBs BOTH_RANGE + * is converted to BOTH, and for dup DBs BOTH_RANGE is converted to + * SET_RANGE. + */ + assert(searchMode != SearchMode.BOTH_RANGE); + + try { + if (!isSerializableIsolation(lockMode)) { + + if (searchMode.isExactSearch()) { + + assert(comparator == null); + + return searchExact( + key, data, lockMode, cacheMode, searchMode); + } + + while (true) { + try { + return searchRange( + key, data, lockMode, cacheMode, comparator); + } catch (RangeRestartException e) { + // continue + } + } + } + + /* + * Perform range locking to prevent phantoms and handle restarts. + */ + while (true) { + + OperationResult result; + + try { + /* + * Do not use a range lock for the initial search, but + * switch to a range lock when advancing forward. + */ + final LockType searchLockType; + final LockType advanceLockType; + searchLockType = getLockType(lockMode, false); + advanceLockType = getLockType(lockMode, true); + + /* Do not modify key/data params until SUCCESS. */ + final DatabaseEntry tryKey = cloneEntry(key); + final DatabaseEntry tryData = cloneEntry(data); + + /* + * If the searchMode is SET or BOTH (i.e., we are looking + * for an exact key match) we do a artificial range search + * to range lock the next key. If an exact match for the + * search key is not found, we still want to advance to the + * next slot in order to RANGE lock it, but contrary to a + * normal range scan, we want to return NOTFOUND to the + * caller and we want to consider this as an operation + * failure so that the position of the cursor won't change, + * even though we advance to the following slot in order + * to range lock it. We achieve this by passing true for + * the checkForExactKey parameter. + */ + result = searchRangeSerializable( + tryKey, tryData, searchLockType, advanceLockType, + comparator, cacheMode, searchMode); + + if (result != null) { + setEntry(tryKey, key); + setEntry(tryData, data); + } + + return result; + } catch (RangeRestartException e) { + // continue + } + } + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Search for a "valid" BIN slot whose key is equal to the given "key". + * A slot is "valid" only if after locking it, neither its PD nor it KD + * flags are set. If no slot exists, return NOTFOUND. Otherwise, copy + * the key and the LN of the found slot into "key" and "data" respectively + * (if "key"/"data" request so) and return either NOTFOUND if searchMode + * == BOTH and "data" does not match the LN of the found slot, or SUCCESS + * otherwise. + * + * Note: On return from this method no latches are held by this cursor. + * + * Note: If the method returns NOTFOUND or raises an exception, any non- + * transactional locks acquired by this method are released. + * + * Note: On SUCCESS, if this is a sticky cursor, any non-transactional + * locks held by this cursor before calling this method are released. + * + * Note: this method is never called when the desired isolation is + * "serializable", because in order to do next-slot-locking, a range + * search is required. + * + * @param key It is used as the search key, as well as to receive the key + * of the BIN slot found by this method, if any. If the DB contains + * duplicates, the key is in the "two-part-key" format (see + * dbi/DupKeyData.java) so that it can be compared with the two-part keys + * stored in the BTree (which contain both a primary key and a data + * portion). The search key itself may or may not contain a data portion. + * + * @param data A DatabaseEntry to compare against the LN of the slot found + * by the search (if searchMode == BOTH) as well as to receive the data of + * that LN. If the DB contains duplicates, it is equal to NO_RETURN_DATA, + * because the LN will be emtpy (the full record is contained in the key). + * + * @param searchMode Either SET or BOTH. + * + * @return NOTFOUND if (a) no valid slot exists with a key == the search + * key, or (b) searchMode == BOTH and "data" does not match the LN of the + * found slot. SUCCESS otherwise. + */ + private OperationResult searchExact( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final SearchMode searchMode) { + + assert(key != null && data != null); + assert(searchMode == SearchMode.SET || searchMode == SearchMode.BOTH); + + boolean success = false; + OperationResult result = null; + + DatabaseEntry origData = new DatabaseEntry( + data.getData(), data.getOffset(), data.getSize()); + + final boolean dataRequested = + !data.getPartial() || data.getPartialLength() != 0; + + final LockType lockType = getLockType(lockMode, false); + + final boolean dirtyReadAll = + lockMode == LockMode.READ_UNCOMMITTED_ALL; + + final CursorImpl dup = + beginMoveCursor(false /*samePosition*/, cacheMode); + + try { + /* + * Search for a BIN slot whose key is == the search key. If such a + * slot is found, lock it and check whether it is valid. + */ + if (dup.searchExact( + key, lockType, dirtyReadAll, dataRequested) == null) { + success = true; + return null; + } + + /* + * The search found and locked a valid BIN slot whose key is + * equal to the search key. Copy into "data" the LN of this + * slot (if "data" requests so). Also copy into "key" the key of + * the found slot if a partial key comparator is used, since then + * it may be different than the given key. + */ + result = dup.getCurrent( + dbImpl.allowsKeyUpdates() ? key : null, data); + + /* Check for data match, if asked so. */ + if (result != null && + searchMode == SearchMode.BOTH && + !checkDataMatch(origData, data)) { + result = null; + } + + success = true; + + } finally { + + if (success && + !dbImpl.isInternalDb() && + dup.getBIN() != null && + dup.getBIN().isBINDelta()) { + dbImpl.getEnv().incBinDeltaGets(); + } + + dup.releaseBIN(); + endMoveCursor(dup, result != null); + } + + return result; + } + + /** + * Search for the 1st "valid" BIN slot whose key is in the range [K1, K2), + * where (a) K1 is a given key, (b) K2 is determined by + * this.rangeConstraint, or is +INFINITY if this.rangeConstraint == null, + * and (c) a slot is "valid" only if after locking it, neither its PD nor + * its KD flags are set. + * + * If such a slot is found, copy its key and its associated LN into "key" + * and "data" respectively (if "key"/"data" request so). Note that the + * fact that the slot is valid implies that it has been locked. + * + * Note: On return from this method no latches are held by this cursor. + * + * Note: If the method returns NOTFOUND or raises an exception, any non- + * transactional locks acquired by this method are released. + * + * Note: On SUCCESS, if this is a sticky cursor, any non-transactional + * locks held by this cursor before calling this method are released. + * + * @param key It is used as the search key, as well as to receive the key + * of the BIN slot found by this method, if any. If the DB contains + * duplicates, the key is in the "two-part-key" format (see + * dbi/DupKeyData.java) so that it can be compared with the two-part keys + * stored in the BTree (which contain both a primary key and a data + * portion). The search key itself may or may not contain a data portion. + * + * @param data A DatabaseEntry to receive the data of the LN associated + * with the found slot, if any. If the DB contains duplicates, it is equal + * to NO_RETURN_DATA, because the LN will be empty (the full record is + * contained in the key). + * + * @param comparator Comparator to use to compare the search key against + * the BTree keys. + * + * @return NOTFOUND if no valid slot exists in the [K1, K2) range; SUCCESS + * otherwise. + * + * @throws RangeRestartException if the search should be restarted by the + * caller. + */ + private OperationResult searchRange( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + Comparator comparator) + throws RangeRestartException { + + assert(key != null && data != null); + + boolean success = false; + boolean incStats = !dbImpl.isInternalDb(); + OperationResult result = null; + + final LockType lockType = getLockType(lockMode, false); + + final boolean dirtyReadAll = + lockMode == LockMode.READ_UNCOMMITTED_ALL; + + final CursorImpl dup = + beginMoveCursor(false /*samePosition*/, cacheMode); + + try { + /* Search for a BIN slot whose key is the max key <= K1. */ + final int searchResult = dup.searchRange(key, comparator); + + if ((searchResult & CursorImpl.FOUND) == 0) { + /* The tree is completely empty (has no nodes at all) */ + success = true; + return null; + } + + /* + * The search positioned dup on the BIN that should contain K1 + * and this BIN is now latched. If the BIN does contain K1, + * dup.index points to K1's slot. Otherwise, dup.index points + * to the right-most slot whose key is < K1 (or dup.index is -1 + * if K1 is < than all keys in the BIN). Note: if foundLast is + * true, dup is positioned on the very last slot of the BTree. + */ + final boolean exactKeyMatch = + ((searchResult & CursorImpl.EXACT_KEY) != 0); + final boolean foundLast = + ((searchResult & CursorImpl.FOUND_LAST) != 0); + + /* + * If we found K1, lock the slot and check whether it is valid. + * If so, copy out its key and associated LN. + */ + if (exactKeyMatch) { + result = dup.lockAndGetCurrent( + key, data, lockType, dirtyReadAll, + true /*isLatched*/, false /*unlatch*/); + } + + /* + * If K1 is not in the BTree or its slot is not valid, advance + * dup until (a) the rangeConstraint (if any) returns false, or + * (b) there are no more slots, or (c) we find a valid slot. If + * (c), check whether the slot key is < K1. This can happen if + * K1 was not in the BTree (so dup is now on a key K0 < K1) and + * another txn inserted new keys < K1 while we were trying to + * advance dup. If so, a RestartException is thrown. Otherwise, + * the slot key and LN are copied into "key" and "data" (if + * "key"/"data" request so). + */ + if (!exactKeyMatch || result == null) { + result = null; + if (!foundLast) { + result = searchRangeAdvanceAndCheckKey( + dup, key, data, lockType, dirtyReadAll, + comparator, rangeConstraint); + + /* + * Don't inc thput stats because the bin is released by + * searchRangeAdvanceAndCheckKey(). This is ok because + * searchRangeAdvanceAndCheckKey() will cause mutation + * to full bin anyway. + */ + incStats = false; + } + } + + success = true; + + } finally { + + if (success && + incStats && + dup.getBIN() != null && + dup.getBIN().isBINDelta()) { + dbImpl.getEnv().incBinDeltaGets(); + } + + dup.releaseBIN(); + endMoveCursor(dup, result != null); + } + + return result; + } + + /** + * Search for the 1st "valid" BIN slot whose key is in the range [K1, K2), + * where (a) K1 is a given key, (b) K2 is determined by + * this.rangeConstraint, or is +INFINITY if this.rangeConstraint == null, + * and (c) a slot is "valid" only if after locking it, neither its PD nor + * its KD flags are set. + * + * If such a slot is found, copy its key and it associated LN into "key" + * and "data" respectively (if "key"/"data" request so). Note that the + * fact that the slot is valid implies that it has been locked. If the + * key of the found slot is == K1, it is locked in a non-range lock. If + * the key is > K1, the slot is locked in a range lock. + * + * If no slot is found, lock the EOF with a range lock. + * + * Note: On return from this method no latches are held by this cursor. + * + * Note: This Cursor's locker should be a Txn, so there are no non- + * transactional locks to be released. + * + * @param key It is used as the search key, as well as to receive the key + * of the BIN slot found by this method, if any. If the DB contains + * duplicates, the key is in the "two-part-key" format (see + * dbi/DupKeyData.java) so that it can be compared with the two-part keys + * stored in the BTree (which contain both a primary key and a data + * portion). The search key itself may or may not contain a data portion. + * + * @param data A DatabaseEntry to receive the data of the LN associated + * with the found slot, if any. If the DB contains duplicates, it is equal + * to NO_RETURN_DATA, because the LN will be emtpy (the full record is + * contained in the key). + * + * @param searchLockType LockType to use for locking the slot if its key + * is == search key. Normally, this is a READ or WRITE lock. + * + * @param advanceLockType LockType to use for locking the slot if its key + * is > search key. Normally, this is a READ_RANGE or WRITE_RANGE lock. + * + * @param comparator Comparator to use to compare the search key against + * the BTree keys. + * + * @param searchMode If SET or BOTH, we are actually looking for an exact + * match on K1. If so and K1 is not in the BTree, we want the cursor to + * advance temporarily to the next slot in order to range-lock it, but + * then return NOTFOUND. NOTFOUND is returned also if K1 is found, but + * searchMode is BOTH and the data associated with the K1 slot does not + * match the given data. + * + * @return NOTFOUND if no valid slot exists in the [K1, K2) range, or + * checkForExactKey == true and the key of the found slot is > K1; SUCCESS + * otherwise. + * + * @throws RangeRestartException if the search should be restarted by the + * caller. + */ + private OperationResult searchRangeSerializable( + final DatabaseEntry key, + final DatabaseEntry data, + final LockType searchLockType, + final LockType advanceLockType, + final Comparator comparator, + final CacheMode cacheMode, + final SearchMode searchMode) + throws RangeRestartException { + + assert(key != null && data != null); + + boolean success = false; + boolean incStats = !dbImpl.isInternalDb(); + + OperationResult result = null; + boolean exactSearch = searchMode.isExactSearch(); + boolean keyChange = false; + boolean mustLockEOF = false; + + DatabaseEntry origData = null; + if (exactSearch) { + origData = new DatabaseEntry( + data.getData(), data.getOffset(), data.getSize()); + } + + final CursorImpl dup = + beginMoveCursor(false /*samePosition*/, cacheMode); + + try { + /* Search for a BIN slot whose key is the max key <= K1. */ + final int searchResult = dup.searchRange(key, comparator); + + if ((searchResult & CursorImpl.FOUND) != 0) { + + /* + * The search positioned dup on the BIN that should contain K1 + * and this BIN is now latched. If the BIN does contain K1, + * dup.index points to K1's slot. Otherwise, dup.index points + * to the right-most slot whose key is < K1 (or dup.index is -1 + * if K1 is < than all keys in the BIN). Note: if foundLast is + * true, dup is positioned on the very last slot of the BTree. + */ + final boolean exactKeyMatch = + ((searchResult & CursorImpl.EXACT_KEY) != 0); + final boolean foundLast = + ((searchResult & CursorImpl.FOUND_LAST) != 0); + + /* + * If we found K1, lock the slot and check whether it is valid. + * If so, copy out its key and associated LN. + */ + if (exactKeyMatch) { + result = dup.lockAndGetCurrent( + key, data, searchLockType, false /*dirtyReadAll*/, + true /*isLatched*/, false /*unlatch*/); + } + + /* + * If K1 is not in the BTree or its slot is not valid, advance + * dup until (a) there are no more slots, or (b) we find a + * valid slot. If (b), check whether the slot key is < K1. This + * can happen if K1 was not in the BTree (so dup is now on a + * key K0 < K1) and another txn inserted new keys < K1 while we + * were trying to advance dup. If so, a RestartException is + * thrown. Otherwise, the slot key and LN are copied into "key" + * and "data" (if "key"/"data" request so). + */ + if (!exactKeyMatch || result == null) { + result = null; + if (!foundLast) { + result = searchRangeAdvanceAndCheckKey( + dup, key, data, advanceLockType, + false /*dirtyReadAll*/, comparator, + null /*rangeConstraint*/); + + keyChange = (result != null); + incStats = false; + } + + mustLockEOF = (result == null); + } + + /* + * Consider this search op a failure if we are actually looking + * for an exact key match and we didn't find the search key. + */ + if (result != null && exactSearch) { + if (keyChange) { + result = null; + } else if (searchMode == SearchMode.BOTH && + !checkDataMatch(origData, data)) { + result = null; + } + } + + /* Finally check rangeConstraint. */ + if (result != null && + !exactSearch && + !checkRangeConstraint(key)) { + result = null; + } + } else { + /* The tree is completely empty (has no nodes at all) */ + mustLockEOF = true; + } + + success = true; + + } finally { + + if (success && + incStats && + dup.getBIN() != null && + dup.getBIN().isBINDelta()) { + dbImpl.getEnv().incBinDeltaGets(); + } + + dup.releaseBIN(); + endMoveCursor(dup, result != null); + } + + /* + * Lock the EOF node if no records follow the key. + * + * BUG ????? At this point no latches are held by this cursor, so + * another transaction can insert new slots at the end of the DB + * and then commit. I think the fix is to request the eof lock in + * non-blocking mode with the BIN latched and restart the search + * if the lock is denied. + */ + if (mustLockEOF) { + cursorImpl.lockEof(LockType.RANGE_READ); + } + + return result; + } + + /* + * Helper method for searchRange and searchRangeSerializable + * + * @throws RangeRestartException if the search should be restarted by the + * caller. + */ + private OperationResult searchRangeAdvanceAndCheckKey( + final CursorImpl dup, + final DatabaseEntry key, + final DatabaseEntry data, + final LockType lockType, + final boolean dirtyReadAll, + Comparator comparator, + final RangeConstraint rangeConstraint) + throws RangeRestartException { + + if (comparator == null) { + comparator = dbImpl.getKeyComparator(); + } + + DatabaseEntry origKey = new DatabaseEntry( + key.getData(), key.getOffset(), key.getSize()); + + DatabaseEntry nextKey = key; + if (key.getPartial()) { + nextKey = new DatabaseEntry( + key.getData(), key.getOffset(), key.getSize()); + } + + OperationResult result = dup.getNext( + nextKey, data, lockType, dirtyReadAll, true /*forward*/, + true /*isLatched*/, rangeConstraint); + + /* + * Check whether the dup.getNext() landed on slot whose key is < K1. + * This can happen if K1 was not in the BTree (so before dup.getNext() + * is called, dup is on a key K0 < K1) and another txn inserted new + * keys < K1 while we were trying to advance dup. Such an insertion is + * possible because if dup must move to the next BIN, it releases all + * latches for a while, so the inserter can come in, split the current + * BIN and insert its keys on the right split-sibling. Finally, dup + * moves to the right split-sibling and lands on a wrong slot. + */ + if (result != null) { + int c = Key.compareKeys(nextKey, origKey, comparator); + if (c < 0) { + key.setData(origKey.getData(), + origKey.getOffset(), + origKey.getSize()); + + throw new RangeRestartException(); + + } else if (key.getPartial()) { + LN.setEntry(key, nextKey); + } + } + + return result; + } + + /** + * For a non-duplicates database, the data must match exactly when + * getSearchBoth or getSearchBothRange is called. + */ + private boolean checkDataMatch( + DatabaseEntry data1, + DatabaseEntry data2) { + + final int size1 = data1.getSize(); + final int size2 = data2.getSize(); + if (size1 != size2) { + return false; + } + return Key.compareUnsignedBytes( + data1.getData(), data1.getOffset(), size1, + data2.getData(), data2.getOffset(), size2) == 0; + } + + /** + * Counts duplicates without parameter checking. No need to dup the cursor + * because we never change the position. + */ + int countInternal() { + synchronized (getTxnSynchronizer()) { + checkTxnState(); + if (dbImpl.getSortedDuplicates()) { + return countHandleDups(); + } + return countNoDups(); + } + } + + /** + * Count duplicates by skipping over the entries in the dup set key range. + */ + private int countHandleDups() { + final byte[] currentKey = cursorImpl.getCurrentKey(); + final DatabaseEntry twoPartKey = DupKeyData.removeData(currentKey); + + try (final Cursor c = dup(false /*samePosition*/)) { + c.setNonSticky(true); + setPrefixConstraint(c, currentKey); + + /* Move cursor to first key in this dup set. */ + OperationResult result = c.searchNoDups( + twoPartKey, NO_RETURN_DATA, LockMode.READ_UNCOMMITTED, + CacheMode.UNCHANGED, SearchMode.SET_RANGE, null /*comparator*/); + + if (result == null) { + return 0; + } + + /* Skip over entries in the dup set. */ + long count = 1 + c.cursorImpl.skip( + true /*forward*/, 0 /*maxCount*/, c.rangeConstraint); + + if (count > Integer.MAX_VALUE) { + throw new IllegalStateException( + "count exceeded integer size: " + count); + } + + return (int) count; + + } + } + + /** + * When there are no duplicates, the count is either 0 or 1, and is very + * cheap to determine. + */ + private int countNoDups() { + try { + beginUseExistingCursor(CacheMode.UNCHANGED); + + final OperationResult result = cursorImpl.lockAndGetCurrent( + null /*foundKey*/, null /*foundData*/, LockType.NONE); + + endUseExistingCursor(); + + return (result != null) ? 1 : 0; + } catch (Error E) { + dbImpl.getEnv().invalidate(E); + throw E; + } + } + + /** + * Estimates duplicate count without parameter checking. No need to dup + * the cursor because we never change the position. + */ + long countEstimateInternal() { + if (dbImpl.getSortedDuplicates()) { + return countEstimateHandleDups(); + } + return countNoDups(); + } + + /** + * Estimate duplicate count using the end point positions. + */ + private long countEstimateHandleDups() { + final byte[] currentKey = cursorImpl.getCurrentKey(); + final DatabaseEntry twoPartKey = DupKeyData.removeData(currentKey); + + try (final Cursor c1 = dup(false /*samePosition*/)) { + c1.setNonSticky(true); + setPrefixConstraint(c1, currentKey); + + /* Move cursor 1 to first key in this dup set. */ + OperationResult result = c1.searchNoDups( + twoPartKey, NO_RETURN_DATA, LockMode.READ_UNCOMMITTED, + CacheMode.UNCHANGED, SearchMode.SET_RANGE, + null /*comparator*/); + + if (result == null) { + return 0; + } + + /* Move cursor 2 to first key in the following dup set. */ + try (Cursor c2 = c1.dup(true /*samePosition*/)) { + c2.setNonSticky(true); + + result = c2.dupsGetNextNoDup( + twoPartKey, NO_RETURN_DATA, LockMode.READ_UNCOMMITTED, + CacheMode.UNCHANGED); + + final boolean c2Inclusive; + if (result != null) { + c2Inclusive = false; + } else { + c2Inclusive = true; + + /* + * There is no following dup set. Go to the last record in + * the database. If we land on a newly inserted dup set, + * go to the prev record until we find the last record in + * the original dup set. + */ + result = c2.positionNoDups( + twoPartKey, NO_RETURN_DATA, LockMode.READ_UNCOMMITTED, + CacheMode.UNCHANGED, false /*first*/); + + if (result == null) { + return 0; + } + + while (!haveSameDupPrefix(twoPartKey, currentKey)) { + result = c2.retrieveNextNoDups( + twoPartKey, NO_RETURN_DATA, + LockMode.READ_UNCOMMITTED, CacheMode.UNCHANGED, + GetMode.PREV); + + if (result == null) { + return 0; + } + } + } + + /* Estimate the count between the two cursor positions. */ + return CountEstimator.count( + dbImpl, c1.cursorImpl, true, c2.cursorImpl, c2Inclusive); + + } + } + } + + /** + * Reads the primary data for a primary key that was retrieved from a + * secondary DB via this secondary cursor ("this" may also be a regular + * Cursor in the role of a secondary cursor). This method is in the + * Cursor class, rather than in SecondaryCursor, to support joins with + * plain Cursors [#21258]. + * + * When a true status is returned by this method, the caller should return + * a successful result. When false is returned, the caller should treat + * this as a deleted record and either skip the record (in the case of + * position, search, and retrieveNext) or return failure/null (in the case + * of getCurrent). False can be returned only when read-uncommitted is used + * or the primary record has expired. + * + * @param priDb primary database as input. + * + * @param key secondary key as input. + * + * @param pKey key as input. + * + * @param data the data returned as output. + * + * @param lockMode the lock mode to use for the primary read; if null, use + * the default lock mode. + * + * @param secDirtyRead whether we used dirty-read for reading the secondary + * record. It is true if the user's configured isolation mode (or lockMode + * param) is dirty-read, or we used dirty-read for the secondary read to + * avoid deadlocks (this is done when the user's isolation mode is + * READ_COMMITTED or REPEATABLE_READ). + * + * @param lockPrimaryOnly If false, then we are not using dirty-read for + * secondary deadlock avoidance. If true, this secondary cursor's + * reference to the primary will be checked after the primary record has + * been locked. + * + * @param verifyPrimary If true, we are only checking integrity and we read + * the primary even though the data is not requested. + * + * @param locker is the Locker to use for accessing the primary record. + * + * @param secDb is the Database handle of the secondary database. Note + * that the dbHandle field may be null and should not be used by this + * method. + * + * @param secAssoc is the SecondaryAssociation of the secondary database. + * It is used to check whether the secondary database is still in the + * SecondaryAssociation before throwing SecondaryIntegrityException. If + * not, we will not throw SecondaryIntegrityException. + * + * @return true if the primary was read successfully, or false in one of + * the following cases: + * + When using read-uncommitted and the primary has been deleted. + * + When using read-uncommitted and the primary has been updated and no + * longer contains the secondary key. + * + When the primary record has expired (whether or not read-uncommitted + * is used). + * + * @throws SecondaryIntegrityException to indicate a corrupt secondary + * reference if the primary record is deleted (as opposed to expired) and + * read-uncommitted is not used. + */ + boolean readPrimaryAfterGet( + final Database priDb, + final DatabaseEntry key, + final DatabaseEntry pKey, + DatabaseEntry data, + final LockMode lockMode, + final boolean secDirtyRead, + final boolean lockPrimaryOnly, + final boolean verifyPrimary, + final Locker locker, + final Database secDb, + final SecondaryAssociation secAssoc) { + + final boolean priDirtyRead = isReadUncommittedMode(lockMode); + final DatabaseImpl priDbImpl = priDb.getDbImpl(); + + /* + * If we only lock the primary (and check the sec cursor), we must be + * using sec dirty-read for deadlock avoidance (whether or not the user + * requested dirty-read). Otherwise, we should be using sec dirty-read + * iff the user requested it. + */ + if (lockPrimaryOnly) { + assert secDirtyRead && !priDirtyRead; + } else { + assert secDirtyRead == priDirtyRead; + } + + final boolean dataRequested = + !data.getPartial() || data.getPartialLength() > 0; + + /* + * In most cases, there is no need to read the primary if no data is + * requested. In these case a lock on the secondary has been + * acquired (if the caller did not specify dirty-read). + * + * But for btree verification, we need to check whether the primary + * record still exists without requesting the data. + */ + if (!dataRequested && !verifyPrimary) { + data.setData(LogUtils.ZERO_LENGTH_BYTE_ARRAY); + return true; + } + + /* + * If partial data is requested along with read-uncommitted, then we + * must read all data in order to call the key creator below. [#14966] + */ + DatabaseEntry copyToPartialEntry = null; + + if (priDirtyRead && data.getPartial()) { + copyToPartialEntry = data; + data = new DatabaseEntry(); + } + + /* + * Do not release non-transactional locks when reading the primary + * cursor. They are held until all locks for this operation are + * released by the secondary cursor [#15573]. + */ + final CursorImpl priCursor = new CursorImpl( + priDbImpl, locker, true /*retainNonTxnLocks*/, + false /*isSecondaryCursor*/); + + try { + final LockType priLockType = getLockType(lockMode, false); + + final boolean dirtyReadAll = + lockMode == LockMode.READ_UNCOMMITTED_ALL; + + LockStanding priLockStanding = priCursor.searchExact( + pKey, priLockType, dirtyReadAll, dataRequested); + + try { + if (priLockStanding != null) { + if (priCursor.getCurrent(null, data) == null) { + priCursor.revertLock(priLockStanding); + priLockStanding = null; + } + } + } finally { + priCursor.releaseBIN(); + } + + if (priLockStanding != null && lockPrimaryOnly) { + if (!ensureReferenceToPrimary(pKey, priLockType)) { + priCursor.revertLock(priLockStanding); + priLockStanding = null; + } + } + + if (priLockStanding == null) { + /* + * If using read-uncommitted and the primary is deleted, the + * primary must have been deleted after reading the secondary. + * We cannot verify this by checking if the secondary is + * deleted, because it may have been reinserted. [#22603] + * + * If the secondary is expired (within TTL clock tolerance), + * then the record must have expired after the secondary read. + * + * In either case, return false to skip this record. + */ + if (secDirtyRead || cursorImpl.isProbablyExpired()) { + return false; + } + + /* + * TODO: whether we need to do the following check for all + * usage scenarios of readPrimaryAfterGet. If true, we + * may get the SecondaryAssociation by the secDb. + * + * If secDb has been removed from SecondaryAssociation, the + * operations on the primary database after removing it + * may cause an inconsistency between the secondary record and + * the corresponding primary record. For this case, just return + * false to skip this record. + */ + if (secAssoc != null) { + boolean stillExist = false; + for (SecondaryDatabase db : secAssoc.getSecondaries(pKey)) { + if (db == secDb) { + stillExist = true; + break; + } + } + if (!stillExist) { + return false; + } + } + + /* + * When the primary is deleted, secondary keys are deleted + * first. So if the above check fails, we know the secondary + * reference is corrupt. + */ + throw secDb.secondaryRefersToMissingPrimaryKey( + locker, key, pKey, cursorImpl.getExpirationTime()); + } + + /* + * If using read-uncommitted and the primary was found, check to + * see if primary was updated so that it no longer contains the + * secondary key. If it has been, return false. + */ + if (priDirtyRead && checkForPrimaryUpdate(key, pKey, data)) { + return false; + } + + /* + * When a partial entry was requested but we read all the data, + * copy the requested partial data to the caller's entry. [#14966] + */ + if (copyToPartialEntry != null) { + LN.setEntry(copyToPartialEntry, data.getData()); + } + + /* Copy primary record info to secondary cursor. */ + cursorImpl.setPriInfo(priCursor); + + priDbImpl.getEnv().incSearchOps(priDbImpl); + + return true; + } finally { + priCursor.close(); + } + } + + /** + * Checks whether this secondary cursor still refers to the primary key, + * and locks the secondary record if necessary. + * + * This is used for deadlock avoidance with secondary DBs. The initial + * secondary index read is done without locking. After the primary has + * been locked, we check here to insure that the primary/secondary + * relationship is still in place. There are two cases: + * + * 1. If the secondary DB has duplicates, the key contains the sec/pri + * relationship and the presence of the secondary record (that is not + * deleted) is sufficient to insure the sec/pri relationship. + * + * 2. If the secondary DB does not allow duplicates, then the primary key + * (the data of the secondary record) must additionally be compared to + * the original search key. This detects the case where the secondary + * record was updated to refer to a different primary key. + * + * In addition, this method locks the secondary record if it would expire + * within {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. This is needed to + * support repeatable-read. The lock prevents expiration of the secondary. + */ + private boolean ensureReferenceToPrimary( + final DatabaseEntry matchPriKey, + final LockType lockType) { + + assert lockType != LockType.NONE; + + /* + * To check whether the reference is still valid, because the primary + * is locked and the secondary can only be deleted after locking the + * primary, it is sufficient to check whether the secondary PD and KD + * flags are set. There is no need to lock the secondary, because it is + * protected from changes by the lock on the primary. + * + * If this technique were used with serializable isolation then + * checking the PD/KD flags wouldn't be sufficient -- locking the + * secondary would be necessary to prevent phantoms. With serializable + * isolation, a lock on the secondary record is acquired up front by + * SecondaryCursor. + */ + cursorImpl.latchBIN(); + try { + final BIN bin = cursorImpl.getBIN(); + final int index = cursorImpl.getIndex(); + + if (bin.isDeleted(index)) { + return false; + } + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + /* Additionally, lock the secondary if it expires soon. */ + final long expirationTime = TTL.expirationToSystemTime( + bin.getExpiration(index), bin.isExpirationInHours()); + + if (envImpl.expiresWithin( + expirationTime, envImpl.getTtlMaxTxnTime())) { + cursorImpl.lockLN(lockType); + } + } finally { + cursorImpl.releaseBIN(); + } + + /* + * If there are no duplicates, check the secondary data (primary key). + * No need to actually lock (use LockType.NONE) since the primary lock + * protects the secondary from changes. + */ + if (!cursorImpl.hasDuplicates()) { + final DatabaseEntry secData = new DatabaseEntry(); + + if (cursorImpl.lockAndGetCurrent( + null, secData, LockType.NONE) == null) { + return false; + } + + if (!secData.equals(matchPriKey)) { + return false; + } + } + + return true; + } + + /** + * Checks for a secondary corruption caused by a primary record update + * during a read-uncommitted read. Checking in this method is not possible + * because there is no secondary key creator available. It is overridden + * by SecondaryCursor. + * + * This method is in the Cursor class, rather than in SecondaryCursor, to + * support joins with plain Cursors [#21258]. + */ + boolean checkForPrimaryUpdate( + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data) { + return false; + } + + /** + * Returns whether the two keys have the same prefix. + * + * @param twoPartKey1 combined key with zero offset and size equal to the + * data array length. + * + * @param keyBytes2 combined key byte array. + */ + private boolean haveSameDupPrefix( + final DatabaseEntry twoPartKey1, + final byte[] keyBytes2) { + + assert twoPartKey1.getOffset() == 0; + assert twoPartKey1.getData().length == twoPartKey1.getSize(); + + return DupKeyData.compareMainKey( + twoPartKey1.getData(), keyBytes2, + dbImpl.getBtreeComparator()) == 0; + } + + /** + * Called to start an operation that potentially moves the cursor. + * + * If the cursor is not initialized already, the method simply returns + * this.cursorImpl. This avoids the overhead of cloning this.cursorImpl + * when this is a sticky cursor or forceClone is true. + * + * If the cursor is initialized, the actions taken here depend on whether + * cloning is required (either because this is a sticky cursor or + * because forceClone is true). + * + * (a) No cloning: + * - If same position is true, (1) the current LN (if any) is evicted, if + * the cachemode so dictates, and (2) non-txn locks are released, if + * retainNonTxnLocks is false. this.cursorImpl remains registered at its + * current BIN. + * - If same position is false, this.cursorImpl is "reset", i.e., (1) it is + * deregistered from its current position, (2) cachemode eviction is + * performed, (3) non-txn locks are released, if retainNonTxnLocks is + * false, and (4) this.cursorImpl is marked uninitialized. + * - this.cursorImpl is returned. + * + * Note: In cases where only non-transactional locks are held, releasing + * them before the move prevents more than one lock from being held during + * a cursor move, which helps to avoid deadlocks. + * + * (b) Cloning: + * - this.cursorImpl is cloned. + * - If same position is true, the clone is registered at the same position + * as this.cursorImpl. + * - If same position is false, the clone is marked uninitialized. + * - If this.cursorImpl uses a locker that may acquire non-txn locks and + * retainNonTxnLocks is false, the clone cursorImpl gets a new locker + * of the same kind as this.cursorImpl. This allows for the non-txn locks + * acquired by the clone to be released independently from the non-txn + * locks of this.cursorImpl. + * - The clone cursorImpl is returned. + * + * In all cases, critical eviction is performed, if necessary, before the + * method returns. This is done by CursorImpl.cloneCursor()/reset(), or is + * done here explicitly when the cursor is not cloned or reset. + * + * In all cases, the cursor returned must be passed to endMoveCursor() to + * close the correct cursor. + * + * @param samePosition If true, this cursor's position is used for the new + * cursor and addCursor is called on the new cursor; if non-sticky, this + * cursor's position is unchanged. If false, the new cursor will be + * uninitialized; if non-sticky, this cursor is reset. + * + * @param forceClone is true to clone an initialized cursor even if + * non-sticky is configured. Used when cloning is needed to support + * internal algorithms, namely when the algorithm may restart the operation + * and samePosition is true. + * + * @see CursorImpl#performCacheModeEviction for a description of how the + * cache mode is used. This method ensures that the correct cache mode + * is used before each operation. + */ + private CursorImpl beginMoveCursor( + final boolean samePosition, + final boolean forceClone, + final CacheMode cacheMode) { + + /* + * It don't make sense to force cloning if the new cursor will be + * uninitialized. + */ + assert !(forceClone && !samePosition); + + /* Must set cache mode before calling criticalEviction or reset. */ + cursorImpl.setCacheMode( + cacheMode != null ? cacheMode : defaultCacheMode); + + if (cursorImpl.isNotInitialized()) { + cursorImpl.criticalEviction(); + return cursorImpl; + } + + if (nonSticky && !forceClone) { + if (samePosition) { + cursorImpl.beforeNonStickyOp(); + } else { + cursorImpl.reset(); + } + return cursorImpl; + } + + final CursorImpl dup = cursorImpl.cloneCursor(samePosition); + dup.setClosingLocker(cursorImpl); + return dup; + } + + private CursorImpl beginMoveCursor(final boolean samePosition, + final CacheMode cacheMode) { + return beginMoveCursor(samePosition, false /*forceClone*/, cacheMode); + } + + /** + * Called to end an operation that potentially moves the cursor. + * + * The actions taken here depend on whether cloning was done in + * beginMoveCursor() or not: + * + * (a) No cloning: + * - If the op is successfull, only critical eviction is done. + * - If the op is not successfull, this.cursorImpl is "reset", i.e., + * (1) it is deregistered from its current position, (2) cachemode + * eviction is performed, (3) non-txn locks are released, if + * retainNonTxnLocks is false, and (4) this.cursorImpl is marked + * unintialized. + * + * (b) Cloning: + * - If the op is successful, this.cursorImpl is closed and then it is + * set to the clone cursorImpl. + * - If the op is not successfull, the clone cursorImpl is closed. + * - In either case, closing a cursorImpl involves deregistering it from + * its current position, performing cachemode eviction, releasing its + * non-transactional locks and closing its locker, if retainNonTxnLocks + * is false and the locker is not a Txn, and finally marking the + * cursorImpl as closed. + * + * In all cases, critical eviction is performed after each cursor operation. + * This is done by CursorImpl.reset() and close(), or is done here explicitly + * when the cursor is not cloned. + */ + private void endMoveCursor(final CursorImpl dup, final boolean success) { + + dup.clearClosingLocker(); + + if (dup == cursorImpl) { + if (success) { + cursorImpl.afterNonStickyOp(); + } else { + cursorImpl.reset(); + } + } else { + if (success) { + cursorImpl.close(dup); + cursorImpl = dup; + } else { + dup.close(cursorImpl); + } + } + } + + /** + * Called to start an operation that does not move the cursor, and + * therefore does not clone the cursor. Either beginUseExistingCursor / + * endUseExistingCursor or beginMoveCursor / endMoveCursor must be used for + * each operation. + */ + private void beginUseExistingCursor(final CacheMode cacheMode) { + /* Must set cache mode before calling criticalEviction. */ + cursorImpl.setCacheMode( + cacheMode != null ? cacheMode : defaultCacheMode); + cursorImpl.criticalEviction(); + } + + /** + * Called to end an operation that does not move the cursor. + */ + private void endUseExistingCursor() { + cursorImpl.criticalEviction(); + } + + /** + * Swaps CursorImpl of this cursor and the other cursor given. + */ + private void swapCursor(Cursor other) { + final CursorImpl otherImpl = other.cursorImpl; + other.cursorImpl = this.cursorImpl; + this.cursorImpl = otherImpl; + } + + boolean advanceCursor(final DatabaseEntry key, final DatabaseEntry data) { + return cursorImpl.advanceCursor(key, data); + } + + private LockType getLockType( + final LockMode lockMode, + final boolean rangeLock) { + + if (isReadUncommittedMode(lockMode)) { + return LockType.NONE; + } else if (lockMode == null || lockMode == LockMode.DEFAULT) { + return rangeLock ? LockType.RANGE_READ: LockType.READ; + } else if (lockMode == LockMode.RMW) { + return rangeLock ? LockType.RANGE_WRITE: LockType.WRITE; + } else if (lockMode == LockMode.READ_COMMITTED) { + throw new IllegalArgumentException( + lockMode.toString() + " not allowed with Cursor methods, " + + "use CursorConfig.setReadCommitted instead."); + } else { + assert false : lockMode; + return LockType.NONE; + } + } + + /** + * Returns whether the given lock mode will cause a read-uncommitted when + * used with this cursor, taking into account the default cursor + * configuration. + */ + boolean isReadUncommittedMode(final LockMode lockMode) { + + return (lockMode == LockMode.READ_UNCOMMITTED || + lockMode == LockMode.READ_UNCOMMITTED_ALL || + (readUncommittedDefault && + (lockMode == null || lockMode == LockMode.DEFAULT))); + } + + boolean isSerializableIsolation(final LockMode lockMode) { + + return serializableIsolationDefault && + !isReadUncommittedMode(lockMode); + } + + private void checkUpdatesAllowed(final ExpirationInfo expInfo) { + + checkUpdatesAllowed(); + + if (dbImpl.isReplicated() && + expInfo != null && expInfo.expiration > 0) { + + /* Throws IllegalStateException if TTL is not available. */ + dbImpl.getEnv().checkTTLAvailable(); + } + } + + private void checkUpdatesAllowed() { + + if (updateOperationsProhibited) { + throw updatesProhibitedException(cursorImpl.getLocker()); + } + + if (!dbImpl.getDbType().isInternal()) { + + final String diskLimitViolation = + dbImpl.getEnv().getDiskLimitViolation(); + + if (diskLimitViolation != null) { + throw new DiskLimitException( + cursorImpl.getLocker(), diskLimitViolation); + } + } + } + + private UnsupportedOperationException updatesProhibitedException( + final Locker locker) { + + final StringBuilder str = new StringBuilder(200); + + str.append("Write operation is not allowed because "); + + /* Be sure to keep this logic in sync with init(). */ + if (locker.isReadOnly()) { + str.append("the Transaction is configured as read-only."); + } else if (dbHandle != null && !dbHandle.isWritable()) { + str.append("the Database is configured as read-only."); + } else if (dbImpl.isTransactional() && !locker.isTransactional()) { + str.append("a Transaction was not supplied to openCursor "); + str.append("and the Database is transactional."); + } else if (dbImpl.isReplicated() && locker.isLocalWrite()) { + str.append("the Database is replicated and Transaction is "); + str.append("configured as local-write."); + } else if (!dbImpl.isReplicated() && !locker.isLocalWrite()) { + str.append("the Database is not replicated and the "); + str.append("Transaction is not configured as local-write."); + } else { + assert false; + } + + throw new UnsupportedOperationException(str.toString()); + } + + /** + * Checks the cursor state. + */ + void checkState(final boolean mustBeInitialized) { + cursorImpl.checkCursorState( + mustBeInitialized, false /*mustNotBeInitialized*/); + } + + /** + * Checks the environment, DB handle, and cursor state. + */ + void checkOpenAndState(final boolean mustBeInitialized) { + checkEnv(); + checkOpen(); + checkState(mustBeInitialized); + } + + /** + * Checks the environment and DB handle. + */ + void checkOpen() { + checkEnv(); + if (dbHandle != null) { + dbHandle.checkOpen(); + } + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid. + */ + void checkEnv() { + cursorImpl.checkEnv(); + } + + /** + * Returns an object used for synchronizing transactions that are used in + * multiple threads. + * + * For a transactional locker, the Transaction is returned to prevent + * concurrent access using this transaction from multiple threads. The + * Transaction.commit and abort methods are synchronized so they do not run + * concurrently with operations using the Transaction. Note that the Txn + * cannot be used for synchronization because locking order is BIN first, + * then Txn. + * + * For a non-transactional locker, 'this' is returned because no special + * blocking is needed. Other mechanisms are used to prevent + * non-transactional usage access by multiple threads (see ThreadLocker). + * In the future we may wish to use the getTxnSynchronizer for + * synchronizing non-transactional access as well; however, note that a new + * locker is created for each operation. + */ + private Object getTxnSynchronizer() { + return (transaction != null) ? transaction : this; + } + + private void checkTxnState() { + if (transaction == null) { + return; + } + transaction.checkOpen(); + transaction.getTxn().checkState(false /*calledByAbort*/); + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace( + final Level level, + final String methodName, + final String getOrPutType, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + if (logger.isLoggable(level)) { + final StringBuilder sb = new StringBuilder(); + sb.append(methodName); + sb.append(getOrPutType); + traceCursorImpl(sb); + if (key != null) { + sb.append(" key=").append(key.dumpData()); + } + if (data != null) { + sb.append(" data=").append(data.dumpData()); + } + if (lockMode != null) { + sb.append(" lockMode=").append(lockMode); + } + LoggerUtils.logMsg( + logger, dbImpl.getEnv(), level, sb.toString()); + } + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace( + final Level level, + final String methodName, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + if (logger.isLoggable(level)) { + final StringBuilder sb = new StringBuilder(); + sb.append(methodName); + traceCursorImpl(sb); + if (key != null) { + sb.append(" key=").append(key.dumpData()); + } + if (data != null) { + sb.append(" data=").append(data.dumpData()); + } + if (lockMode != null) { + sb.append(" lockMode=").append(lockMode); + } + LoggerUtils.logMsg( + logger, dbImpl.getEnv(), level, sb.toString()); + } + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace( + final Level level, + final String methodName, + final LockMode lockMode) { + + if (logger.isLoggable(level)) { + final StringBuilder sb = new StringBuilder(); + sb.append(methodName); + traceCursorImpl(sb); + if (lockMode != null) { + sb.append(" lockMode=").append(lockMode); + } + LoggerUtils.logMsg( + logger, dbImpl.getEnv(), level, sb.toString()); + } + } + + private void traceCursorImpl(final StringBuilder sb) { + sb.append(" locker=").append(cursorImpl.getLocker().getId()); + sb.append(" bin=").append(cursorImpl.getCurrentNodeId()); + sb.append(" idx=").append(cursorImpl.getIndex()); + } + + /** + * Clone entry contents in a new returned entry. + */ + private static DatabaseEntry cloneEntry(DatabaseEntry from) { + final DatabaseEntry to = new DatabaseEntry(); + setEntry(from, to); + return to; + } + + /** + * Copy entry contents to another entry. + */ + private static void setEntry(DatabaseEntry from, DatabaseEntry to) { + to.setPartial(from.getPartialOffset(), from.getPartialLength(), + from.getPartial()); + to.setData(from.getData(), from.getOffset(), from.getSize()); + } +} diff --git a/src/com/sleepycat/je/CursorConfig.java b/src/com/sleepycat/je/CursorConfig.java new file mode 100644 index 0000000..03afc8f --- /dev/null +++ b/src/com/sleepycat/je/CursorConfig.java @@ -0,0 +1,249 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Specifies the attributes of database cursor. An instance created with the + * default constructor is initialized with the system's default settings. + */ +public class CursorConfig implements Cloneable { + + /** + * Default configuration used if null is passed to methods that create a + * cursor. + */ + public static final CursorConfig DEFAULT = new CursorConfig(); + + /** + * A convenience instance to configure read operations performed by the + * cursor to return modified but not yet committed data. + */ + public static final CursorConfig READ_UNCOMMITTED = new CursorConfig(); + + /** + * A convenience instance to configure a cursor for read committed + * isolation. + * + * This ensures the stability of the current data item read by the cursor + * but permits data read by this cursor to be modified or deleted prior to + * the commit of the transaction. + */ + public static final CursorConfig READ_COMMITTED = new CursorConfig(); + + static { + READ_UNCOMMITTED.setReadUncommitted(true); + READ_COMMITTED.setReadCommitted(true); + } + + private boolean readUncommitted = false; + private boolean readCommitted = false; + private boolean nonSticky = false; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public CursorConfig() { + } + + /** + * Configures read operations performed by the cursor to return modified + * but not yet committed data. + * + * @param readUncommitted If true, configure read operations performed by + * the cursor to return modified but not yet committed data. + * + * @see LockMode#READ_UNCOMMITTED + * + * @return this + */ + public CursorConfig setReadUncommitted(boolean readUncommitted) { + setReadUncommittedVoid(readUncommitted); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadUncommittedVoid(boolean readUncommitted) { + this.readUncommitted = readUncommitted; + } + + /** + * Returns true if read operations performed by the cursor are configured + * to return modified but not yet committed data. + * + * @return true if read operations performed by the cursor are configured + * to return modified but not yet committed data. + * + * @see LockMode#READ_UNCOMMITTED + */ + public boolean getReadUncommitted() { + return readUncommitted; + } + + /** + * Configures read operations performed by the cursor to obey read + * committed isolation. Read committed isolation provides for cursor + * stability but not repeatable reads. Data items which have been + * previously read by this transaction may be deleted or modified by other + * transactions before the cursor is closed or the transaction completes. + * + * @param readCommitted If true, configure read operations performed by + * the cursor to obey read committed isolation. + * + * @see LockMode#READ_COMMITTED + * + * @return this + */ + public CursorConfig setReadCommitted(boolean readCommitted) { + setReadCommittedVoid(readCommitted); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadCommittedVoid(boolean readCommitted) { + this.readCommitted = readCommitted; + } + + /** + * Returns true if read operations performed by the cursor are configured + * to obey read committed isolation. + * + * @return true if read operations performed by the cursor are configured + * to obey read committed isolation. + * + * @see LockMode#READ_COMMITTED + */ + public boolean getReadCommitted() { + return readCommitted; + } + + /** + * Configures the behavior of the cursor when a cursor movement operation + * returns {@link OperationStatus#NOTFOUND}. + * + * By default, a cursor is "sticky", meaning that the prior position is + * maintained by cursor movement operations, and the cursor stays at the + * prior position when the operation does not succeed. For example, if + * {@link Cursor#getFirst} is called successfully, and then + * {@link Cursor#getNext} is called, if {@code getNext} returns + * {@code NOTFOUND} the cursor will remain positioned on the first record. + *

        + * Also, as part of maintaining the prior position, the lock on the record + * at the current position will be held (at least) until after a cursor + * movement operation succeeds and acquires a lock on the record at the new + * position. In the example above, a lock on the first record will still + * be held after {@code getNext} returns {@code NOTFOUND}. + *

        + * If the cursor is configured to be non-sticky, the prior position is + * not maintained, and this has certain performance advantages: + *

          + *
        • + * Some processing is avoided because the prior position is not + * maintained. + *
        • + *
        • + * The lock on the record at the prior position is released before + * acquiring the lock on the record at the new position (when the + * cursor movement operation succeeds.) This can help to prevent + * deadlocks in certain situations. Namely, if the cursor's isolation + * mode allows locks to be released when moving to a new position, then + * only one lock at a time will be held by the cursor. Holding multiple + * locks at a time can cause deadlocks, when locks are acquired in + * different orders by different threads, for example, when one cursor + * is scanning forward and another cursor is scanning backward. Note + * that this optimization does not apply to repeatable-read or + * serializable isolation, since these modes require that locks are + * not released by cursor movement operations. + *
        • + *
        + *

        + * However, when the cursor is configured as non-sticky and {@code getNext} + * returns {@code NOTFOUND} in the example above, the cursor position will + * be uninitialized, as if it had just been opened. Also, the lock on the + * first record will have been released (except when repeatable-read or + * serializable isolation is configured.) To move to the first record (and + * lock it), {@code getFirst} must be called again. + *

        + * Also note that in certain circumstances, internal algorithms require + * that the prior position is retained, and the operation will behave as if + * the cursor is sticky. Specifically, these are only the following + * methods, and only when called on a database with duplicates configured: + *

          + *
        • {@link Cursor#putNoOverwrite}
        • + *
        • {@link Cursor#getNextDup}}
        • + *
        • {@link Cursor#getPrevDup}}
        • + *
        • {@link Cursor#getNextNoDup}}
        • + *
        • {@link Cursor#getPrevNoDup}}
        • + *
        + * + * @param nonSticky if false (the default), the prior position is + * maintained by cursor movement operations, and the cursor stays at the + * prior position when {@code NOTFOUND} is returned. If true, the prior + * position is not maintained, and the cursor is reinitialized when + * {@code NOTFOUND} is returned. + * + * @return this + */ + public CursorConfig setNonSticky(boolean nonSticky) { + setNonStickyVoid(nonSticky); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNonStickyVoid(boolean nonSticky) { + this.nonSticky = nonSticky; + } + + /** + * Returns the non-sticky setting. + * + * @see #setNonSticky + */ + public boolean getNonSticky() { + return nonSticky; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public CursorConfig clone() { + try { + return (CursorConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "readUncommitted=" + readUncommitted + + "\nreadCommitted=" + readCommitted + + "\n"; + } +} diff --git a/src/com/sleepycat/je/CursorConfigBeanInfo.java b/src/com/sleepycat/je/CursorConfigBeanInfo.java new file mode 100644 index 0000000..be7cdac --- /dev/null +++ b/src/com/sleepycat/je/CursorConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class CursorConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(CursorConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(CursorConfig.class); + } +} diff --git a/src/com/sleepycat/je/CustomStats.java b/src/com/sleepycat/je/CustomStats.java new file mode 100644 index 0000000..24fbb73 --- /dev/null +++ b/src/com/sleepycat/je/CustomStats.java @@ -0,0 +1,44 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * A custom statistics object. Custom statistics allow for customization + * of statistics that are written at periodic intervals to the je.stats.csv + * file. The field names returned from the getFieldNames() method are used as + * column headers in the je.stat.csv file. The getFieldNames() method is only + * called once when the environment is opened. The field values are associated + * with the field names in the order of the returned array. The + * getFieldValues() method is called when a row is written to the statistics + * file. The semantic for the values are implementation specific. The values + * may represent totals, incremental (since the last getFieldValues() call), or + * stateless (computed at the time the statistic is requested). + */ +public interface CustomStats { + + /** + * The field names that are output to the je.stats.csv file. + * + * @return Array of strings that represent the field values. + */ + String[] getFieldNames(); + + /** + * The field values that are output to the je.stats.csv file. + * + * @return Array of strings that represent a value for the + * associated field name. + */ + String[] getFieldValues(); +} diff --git a/src/com/sleepycat/je/Database.java b/src/com/sleepycat/je/Database.java new file mode 100644 index 0000000..6863848 --- /dev/null +++ b/src/com/sleepycat/je/Database.java @@ -0,0 +1,2519 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.txn.HandleLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * A database handle. + * + *

        Database attributes are specified in the {@link + * com.sleepycat.je.DatabaseConfig DatabaseConfig} class. Database handles are + * free-threaded and may be used concurrently by multiple threads.

        + * + *

        To open an existing database with default attributes:

        + * + *
        + *     Environment env = new Environment(home, null);
        + *     Database myDatabase = env.openDatabase(null, "mydatabase", null);
        + * 
        + * + *

        To create a transactional database that supports duplicates:

        + * + *
        + *     DatabaseConfig dbConfig = new DatabaseConfig();
        + *     dbConfig.setTransactional(true);
        + *     dbConfig.setAllowCreate(true);
        + *     dbConfig.setSortedDuplicates(true);
        + *     Database db = env.openDatabase(txn, "mydatabase", dbConfig);
        + * 
        + */ +public class Database implements Closeable { + + static final CursorConfig DEFAULT_CURSOR_CONFIG = + CursorConfig.DEFAULT.clone().setNonSticky(true); + + static final CursorConfig READ_COMMITTED_CURSOR_CONFIG = + CursorConfig.READ_COMMITTED.clone().setNonSticky(true); + + /* + * DbState embodies the Database handle state. + */ + enum DbState { + OPEN, CLOSED, INVALID, PREEMPTED, CORRUPTED + } + + /* + * The current state of the handle. When the state is non-open, the + * databaseImpl should not be accessed, since the databaseImpl is set to + * null during close. This check does not guarantee that an NPE will not + * occur, since we do not synchronize -- the state could change after + * checking it and before accessing the databaseImpl. But the check makes + * an NPE unlikely. + */ + private volatile DbState state; + + /* The DatabasePreemptedException cause when state == PREEMPTED. */ + private volatile OperationFailureException preemptedCause; + + /* The SecondaryIntegrityException cause when state == CORRUPTED. */ + private volatile OperationFailureException corruptedCause; + + /* + * The envHandle field cannot be declared as final because it is + * initialized by methods called by the ctor. However, after construction + * it is non-null and should be treated as final. + */ + Environment envHandle; + + /* + * The databaseImpl field is set to null during close to avoid OOME. It + * should normally only be accessed via the checkOpen and getDbImpl + * methods. It is guaranteed to be non-null if state == DbState.OPEN. + */ + private DatabaseImpl databaseImpl; + + /* + * Used to store per-Database handle properties: allow create, + * exclusive create, read only and use existing config. Other Database-wide + * properties are stored in DatabaseImpl. + */ + DatabaseConfig configuration; + + /* True if this handle permits write operations; */ + private boolean isWritable; + + /* Record how many open cursors on this database. */ + private final AtomicInteger openCursors = new AtomicInteger(); + + /* + * Locker that owns the NameLN lock held while the Database is open. + * + * The handleLocker field is set to null during close to avoid OOME. It + * is only accessed during close, while synchronized, so checking for null + * is unnecessary. + */ + private HandleLocker handleLocker; + + /* + * If a user-supplied SecondaryAssociation is configured, this field + * contains it. Otherwise, it contains an internal SecondaryAssociation + * that uses the simpleAssocSecondaries to store associations between a + * single primary and its secondaries. + */ + SecondaryAssociation secAssoc; + Collection simpleAssocSecondaries; + + /* + * Secondaries whose keys have values contrained to the primary keys in + * this database. + */ + Collection foreignKeySecondaries; + + final Logger logger; + + /** + * Creates a database but does not open or fully initialize it. Is + * protected for use in compat package. + */ + Database(final Environment env) { + this.envHandle = env; + logger = getEnv().getLogger(); + } + + /** + * Creates a database, called by Environment. + */ + DatabaseImpl initNew(final Environment env, + final Locker locker, + final String databaseName, + final DatabaseConfig dbConfig) { + + dbConfig.validateForNewDb(); + + init(env, dbConfig); + + /* Make the databaseImpl. */ + databaseImpl = getEnv().getDbTree().createDb( + locker, databaseName, dbConfig, handleLocker); + databaseImpl.addReferringHandle(this); + return databaseImpl; + } + + /** + * Opens a database, called by Environment. + */ + void initExisting(final Environment env, + final Locker locker, + final DatabaseImpl dbImpl, + final String databaseName, + final DatabaseConfig dbConfig) { + + /* + * Make sure the configuration used for the open is compatible with the + * existing databaseImpl. + */ + validateConfigAgainstExistingDb(locker, databaseName, dbConfig, + dbImpl); + + init(env, dbConfig); + this.databaseImpl = dbImpl; + dbImpl.addReferringHandle(this); + } + + private void init(final Environment env, final DatabaseConfig config) { + assert handleLocker != null; + envHandle = env; + configuration = config.clone(); + isWritable = !configuration.getReadOnly(); + secAssoc = makeSecondaryAssociation(); + state = DbState.OPEN; + } + + SecondaryAssociation makeSecondaryAssociation() { + foreignKeySecondaries = new CopyOnWriteArraySet<>(); + + if (configuration.getSecondaryAssociation() != null) { + if (configuration.getSortedDuplicates()) { + throw new IllegalArgumentException( + "Duplicates not allowed for a primary database"); + } + simpleAssocSecondaries = Collections.emptySet(); + return configuration.getSecondaryAssociation(); + } + + simpleAssocSecondaries = new CopyOnWriteArraySet<>(); + + return new SecondaryAssociation() { + + public boolean isEmpty() { + return simpleAssocSecondaries.isEmpty(); + } + + public Database getPrimary(@SuppressWarnings("unused") + DatabaseEntry primaryKey) { + return Database.this; + } + + public Collection + getSecondaries(@SuppressWarnings("unused") + DatabaseEntry primaryKey) { + return simpleAssocSecondaries; + } + }; + } + + /** + * Used to remove references to this database from other objects, when this + * database is closed. We don't remove references from cursors or + * secondaries here, because it's an error to close a database before its + * cursors and to close a primary before its secondaries. + */ + void removeReferringAssociations() { + envHandle.removeReferringHandle(this); + } + + /** + * Sees if this new handle's configuration is compatible with the + * pre-existing database. + */ + private void validateConfigAgainstExistingDb(Locker locker, + final String databaseName, + final DatabaseConfig config, + final DatabaseImpl dbImpl) { + /* + * The sortedDuplicates, temporary, and replicated properties are + * persistent and immutable. But they do not need to be specified if + * the useExistingConfig property is set. + */ + if (!config.getUseExistingConfig()) { + validatePropertyMatches( + "sortedDuplicates", dbImpl.getSortedDuplicates(), + config.getSortedDuplicates()); + validatePropertyMatches( + "temporary", dbImpl.isTemporary(), + config.getTemporary()); + /* Only check replicated if the environment is replicated. */ + if (getEnv().isReplicated()) { + validatePropertyMatches( + "replicated", dbImpl.isReplicated(), + config.getReplicated()); + } + } + + /* + * The transactional and deferredWrite properties are kept constant + * while any handles are open, and set when the first handle is opened. + * But if an existing handle is open and the useExistingConfig property + * is set, then they do not need to be specified. + */ + if (dbImpl.hasOpenHandles()) { + if (!config.getUseExistingConfig()) { + validatePropertyMatches( + "transactional", dbImpl.isTransactional(), + config.getTransactional()); + validatePropertyMatches( + "deferredWrite", dbImpl.isDurableDeferredWrite(), + config.getDeferredWrite()); + } + } else { + dbImpl.setTransactional(config.getTransactional()); + dbImpl.setDeferredWrite(config.getDeferredWrite()); + if (config.getDeferredWrite()) { + mutateDeferredWriteBINDeltas(dbImpl); + } + } + + /* + * If this database handle uses the existing config, we shouldn't + * search for and write any changed attributes to the log. + */ + if (config.getUseExistingConfig()) { + return; + } + + /* Write any changed, persistent attributes to the log. */ + boolean dbImplModified = false; + + /* Only re-set the comparators if the override is allowed. */ + if (config.getOverrideBtreeComparator()) { + dbImplModified |= dbImpl.setBtreeComparator( + config.getBtreeComparator(), + config.getBtreeComparatorByClassName()); + } + + if (config.getOverrideDuplicateComparator()) { + dbImplModified |= dbImpl.setDuplicateComparator( + config.getDuplicateComparator(), + config.getDuplicateComparatorByClassName()); + } + + dbImplModified |= dbImpl.setTriggers(locker, + databaseName, + config.getTriggers(), + config.getOverrideTriggers()); + + /* Check if KeyPrefixing property is updated. */ + boolean newKeyPrefixing = config.getKeyPrefixing(); + if (newKeyPrefixing != dbImpl.getKeyPrefixing()) { + dbImplModified = true; + if (newKeyPrefixing) { + dbImpl.setKeyPrefixing(); + } else { + dbImpl.clearKeyPrefixing(); + } + } + + /* + * Check if NodeMaxEntries properties are updated. + */ + int newNodeMaxEntries = config.getNodeMaxEntries(); + if (newNodeMaxEntries != 0 && + newNodeMaxEntries != dbImpl.getNodeMaxTreeEntries()) { + dbImplModified = true; + dbImpl.setNodeMaxTreeEntries(newNodeMaxEntries); + } + + /* Do not write LNs in a read-only environment. Also see [#15743]. */ + EnvironmentImpl envImpl = getEnv(); + if (dbImplModified && !envImpl.isReadOnly()) { + + /* Write a new NameLN to the log. */ + try { + envImpl.getDbTree().updateNameLN(locker, dbImpl.getName(), + null); + } catch (LockConflictException e) { + throw new IllegalStateException( + "DatabaseConfig properties may not be updated when the " + + "database is already open; first close other open " + + "handles for this database.", e); + } + + /* Dirty the root. */ + envImpl.getDbTree().modifyDbRoot(dbImpl); + } + + /* CacheMode is changed for all handles, but is not persistent. */ + dbImpl.setCacheMode(config.getCacheMode()); + } + + /** + * Mutate BIN-deltas to full BINs for the given DW DB. [#25999] + *

        + * Any BIN-deltas in cache must be mutated to full BINs, since + * BIN-deltas are not allowed in DW mode. This can be expensive, and is + * only a workaround for the underlying problem that BIN-deltas are not + * supported in DW mode. The mutation is necessary when a db transitions + * from non-DW to DW mode. In that case there may be BIN-deltas in + * cache, even if the DB has not yet been opened since BIN-deltas may + * be placed in the Btree by recovery. This method is not optimized + * because ultimately BIN-deltas will be supported in DW mode or DW mode + * will be discontinued. + *

        + * At the time this method is called, the DW flag on this object has + * already been set. Therefore, this workaround doesn't guarantee that + * internal operations on the db (e.g., compression or eviction) won't + * operation on a BIN-delta in a DW DB. Therefore, we cannot assume/assert + * that a cached BIN-delta does not exist in a DW DB. Such assertions were + * removed as part of this workaround. + *

        + * Setting the DW flag before mutating the BINs is necessary to ensure + * that no new BIN-deltas appear in cache during this process. BINs loaded + * into cache for a DW db are mutated to full BINs in IN.postFetchInit. + * + * @see BIN#shouldLogDelta + */ + private void mutateDeferredWriteBINDeltas(DatabaseImpl dbImpl) { + + final OffHeapCache ohCache = getEnv().getOffHeapCache(); + + for (final IN in : getEnv().getInMemoryINs()) { + if (in.getDatabase() != dbImpl) { + continue; + } + in.latchNoUpdateLRU(); + try { + if (in.isBIN()) { + in.mutateToFullBIN(false); + continue; + } + if (ohCache == null || + in.getNormalizedLevel() != 2) { + continue; + } + for (int i = 0; i < in.getNEntries(); i += 1) { + if (in.getOffHeapBINId(i) < 0) { + continue; + } + final IN child = in.loadIN(i, CacheMode.UNCHANGED); + if (child == null) { + continue; + } + child.latchNoUpdateLRU(); + try { + child.mutateToFullBIN(false); + } finally { + child.releaseLatch(); + } + } + } finally { + in.releaseLatch(); + } + } + } + + /** + * @throws IllegalArgumentException via Environment.openDatabase and + * openSecondaryDatabase. + */ + private void validatePropertyMatches(final String propName, + final boolean existingValue, + final boolean newValue) { + if (newValue != existingValue) { + throw new IllegalArgumentException( + "You can't open a Database with a " + propName + + " configuration of " + newValue + + " if the underlying database was created with a " + + propName + " setting of " + existingValue + '.'); + } + } + + /** + * Discards the database handle. + *

        + * When closing the last open handle for a deferred-write database, any + * cached database information is flushed to disk as if {@link #sync} were + * called. + *

        + * The database handle should not be closed while any other handle that + * refers to it is not yet closed; for example, database handles should not + * be closed while cursor handles into the database remain open, or + * transactions that include operations on the database have not yet been + * committed or aborted. Specifically, this includes {@link + * com.sleepycat.je.Cursor Cursor} and {@link com.sleepycat.je.Transaction + * Transaction} handles. + *

        + * When multiple threads are using the {@link com.sleepycat.je.Database + * Database} handle concurrently, only a single thread may call this + * method. + *

        + * When called on a database that is the primary database for a secondary + * index, the primary database should be closed only after all secondary + * indices which reference it have been closed. + *

        + * The database handle may not be accessed again after this method is + * called, regardless of the method's success or failure, with one + * exception: the {@code close} method itself may be called any number of + * times.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @see DatabaseConfig#setDeferredWrite DatabaseConfig.setDeferredWrite + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if cursors associated with this database + * are still open. + */ + public void close() { + try { + closeInternal(true /*doSyncDw*/, true /*deleteTempDb*/, + DbState.CLOSED, null /*preemptedException*/); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /* + * This method is private for now because it is incomplete. To fully + * implement it we must clear all dirty nodes for the database that is + * closed, since otherwise they will be flushed during the next checkpoint. + */ + @SuppressWarnings("unused") + private void closeNoSync() { + try { + closeInternal(false /*doSyncDw*/, true /*deleteTempDb*/, + DbState.CLOSED, null /*preemptedException*/); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Marks the handle as preempted when the handle lock is stolen by the HA + * replayer, during replay of a naming operation (remove, truncate or + * rename). This causes DatabasePreemptedException to be thrown on all + * subsequent use of the handle or cursors opened on this handle. [#17015] + */ + synchronized void setPreempted(final String dbName, final String msg) { + + /* + * Return silently when the DB is closed, because the calling thread is + * performing an DbTree operation, and a "database closed" exception + * should not be thrown there. + */ + final DatabaseImpl dbImpl = databaseImpl; + + if (dbImpl == null) { + return; + } + + final OperationFailureException preemptedException = + dbImpl.getEnv().createDatabasePreemptedException( + msg, dbName, this); + + closeInternal(false /*doSyncDw*/, false /*deleteTempDb*/, + DbState.PREEMPTED, preemptedException); + } + + synchronized void setCorrupted(SecondaryIntegrityException sie) { + if (state != DbState.OPEN) { + return; + } + corruptedCause = sie; + state = DbState.CORRUPTED; + } + + boolean isCorrupted() { + return corruptedCause != null; + } + + /** + * Invalidates the handle when the transaction used to open the database + * is aborted. + * + * Note that this method (unlike close) does not perform sync and removal + * of DW DBs. A DW DB cannot be transactional. + */ + synchronized void invalidate() { + closeInternal(false /*doSyncDw*/, false /*deleteTempDb*/, + DbState.INVALID, null /*preemptedException*/); + } + + EnvironmentImpl getEnv() { + return envHandle.getNonNullEnvImpl(); + } + + private void closeInternal( + final boolean doSyncDw, + final boolean deleteTempDb, + final DbState newState, + final OperationFailureException preemptedException) { + + /* + * We acquire the SecondaryAssociationLatch exclusively because + * associations are changed by removeReferringAssociations, and + * operations using the associations should not run concurrently with + * close. + */ + try { + final EnvironmentImpl envImpl = getEnv(); + try { + envImpl.getSecondaryAssociationLock(). + writeLock().lockInterruptibly(); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + try { + closeInternalWork( + doSyncDw, deleteTempDb, newState, preemptedException); + } finally { + envImpl.getSecondaryAssociationLock().writeLock().unlock(); + } + } finally { + minimalClose(newState, preemptedException); + } + } + + /** + * Nulls-out indirect references to the environment, to allow GC. It also + * sets the state to the given non-open state, if the state is currently + * open. We must set the state to non-open before setting references to + * null. + * + * The app may hold the Database references longer than expected. In + * particular during an Environment re-open we need to give GC a fighting + * chance while handles from two environments are temporarily referenced. + * + * Note that this is needed even when the db or env is invalid. + */ + synchronized void minimalClose( + final DbState newState, + final OperationFailureException preemptedException) { + + assert newState != DbState.OPEN; + + if (state == DbState.OPEN) { + state = newState; + preemptedCause = preemptedException; + } + + databaseImpl = null; + handleLocker = null; + } + + private void closeInternalWork( + final boolean doSyncDw, + final boolean deleteTempDb, + final DbState newState, + final OperationFailureException preemptedException) { + + assert newState != DbState.OPEN; + + final StringBuilder handleRefErrors = new StringBuilder(); + RuntimeException triggerException = null; + final DatabaseImpl dbImpl; + + synchronized (this) { + + /* + * Do nothing if handle was previously closed. + * + * When the state is set to CLOSED, INVALID and PREEMPTED, the + * database has been closed. So for these states, do not close the + * database again. + * + * The CORRUPTED state is set when SecondaryIntegrityException is + * thrown, but the database is not closed at that time. + * This state is currently only set for a secondary database. + * For this state, we want to let the user close the database. + * + * Besides, if the database was not opened, just return. + */ + if (state == DbState.CLOSED || state == DbState.INVALID || + state == DbState.PREEMPTED || state == null) { + return; + } + + /* + * databaseImpl and handleLocker are set to null only while + * synchronized, at which time the state is also changed to + * non-open. So they should not be null here. + */ + dbImpl = databaseImpl; + assert dbImpl != null; + assert handleLocker != null; + + /* + * Check env only after checking for closed db, to mimic close() + * behavior for Cursors, etc, and avoid unnecessary exception + * handling. [#21264] + */ + final EnvironmentImpl envImpl = checkEnv(); + + /* + * The state should be changed ASAP during close, so that + * addCursor and removeCursor will see the updated state ASAP. + */ + state = newState; + preemptedCause = preemptedException; + + /* + * Throw an IllegalStateException if there are open cursors or + * associated secondaries. + */ + if (newState == DbState.CLOSED) { + if (openCursors.get() != 0) { + handleRefErrors.append(" "). + append(openCursors.get()). + append(" open cursors."); + } + if (simpleAssocSecondaries != null && + simpleAssocSecondaries.size() > 0) { + handleRefErrors.append(" "). + append(simpleAssocSecondaries.size()). + append(" associated SecondaryDatabases."); + } + if (foreignKeySecondaries != null && + foreignKeySecondaries.size() > 0) { + handleRefErrors.append(" "). + append(foreignKeySecondaries.size()). + append( + " associated foreign key SecondaryDatabases."); + } + } + + trace(Level.FINEST, "Database.close: ", null, null); + + removeReferringAssociations(); + + dbImpl.removeReferringHandle(this); + envImpl.getDbTree().releaseDb(dbImpl); + + /* + * If the handle was preempted, we mark the locker as + * only-abortable with the DatabasePreemptedException. If + * the handle locker is a user txn, this causes the + * DatabasePreemptedException to be thrown if the user + * attempts to commit, or continue to use, the txn, rather + * than throwing a LockConflictException. [#17015] + */ + if (newState == DbState.PREEMPTED) { + handleLocker.setOnlyAbortable(preemptedException); + } + + /* + * Tell our protecting txn that we're closing. If this type + * of transaction doesn't live beyond the life of the + * handle, it will release the db handle lock. + */ + if (newState == DbState.CLOSED) { + if (isWritable() && (dbImpl.noteWriteHandleClose() == 0)) { + try { + TriggerManager.runCloseTriggers(handleLocker, dbImpl); + } catch (RuntimeException e) { + triggerException = e; + } + } + handleLocker.operationEnd(true); + } else { + handleLocker.operationEnd(false); + } + } + + /* + * Notify the database when a handle is closed. This should not be + * done while synchronized since it may perform database removal or + * sync. Statements above are synchronized but this section must not + * be. + * + * Note that handleClosed may throw an exception, so any following code + * may not be executed. + */ + dbImpl.handleClosed(doSyncDw, deleteTempDb); + + /* Throw exceptions for previously encountered problems. */ + if (handleRefErrors.length() > 0) { + throw new IllegalStateException( + "Database closed while still referenced by other handles." + + handleRefErrors.toString()); + } + if (triggerException != null) { + throw triggerException; + } + } + + /** + * Flushes any cached information for this database to disk; only + * applicable for deferred-write databases. + *

        Note that deferred-write databases are automatically flushed to disk + * when the {@link #close} method is called. + * + * @see DatabaseConfig#setDeferredWrite DatabaseConfig.setDeferredWrite + * + * @throws com.sleepycat.je.rep.DatabasePreemptedException in a replicated + * environment if the master has truncated, removed or renamed the + * database. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is not a deferred-write + * database, or this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + */ + public void sync() { + + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + trace(Level.FINEST, "Database.sync", null, null, null, null); + + dbImpl.sync(true); + } + + /** + * Opens a sequence in the database. + * + * @param txn For a transactional database, an explicit transaction may + * be specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key The key {@link DatabaseEntry} of the sequence. + * + * @param config The sequence attributes. If null, default attributes are + * used. + * + * @return a new Sequence handle. + * + * @throws SequenceExistsException if the sequence record already exists + * and the {@code SequenceConfig ExclusiveCreate} parameter is true. + * + * @throws SequenceNotFoundException if the sequence record does not exist + * and the {@code SequenceConfig AllowCreate} parameter is false. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the sequence does not exist and the {@link + * SequenceConfig#setAllowCreate AllowCreate} parameter is true, then one + * of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only, or + * this database is configured for duplicates. + * + * @throws IllegalStateException if the Sequence record is deleted by + * another thread during this method invocation, or the database has been + * closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code SequenceConfig} parameter. + */ + public Sequence openSequence(final Transaction txn, + final DatabaseEntry key, + final SequenceConfig config) { + try { + checkEnv(); + DatabaseUtil.checkForNullDbt(key, "key", true); + checkOpen(); + trace(Level.FINEST, "Database.openSequence", txn, key, null, null); + + return new Sequence(this, txn, key, config); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Removes the sequence from the database. This method should not be + * called if there are open handles on this sequence. + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key The key {@link com.sleepycat.je.DatabaseEntry + * DatabaseEntry} of the sequence. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only. + */ + public void removeSequence(final Transaction txn, + final DatabaseEntry key) { + try { + delete(txn, key); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Returns a cursor into the database. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the database is non-transactional, null must be + * specified. For a transactional database, the transaction is optional + * for read-only access and required for read-write access. + * + * @param cursorConfig The cursor attributes. If null, default attributes + * are used. + * + * @return A database cursor. + * + * @throws com.sleepycat.je.rep.DatabasePreemptedException in a replicated + * environment if the master has truncated, removed or renamed the + * database. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code CursorConfig} parameter. + */ + public Cursor openCursor(final Transaction txn, + CursorConfig cursorConfig) { + try { + checkEnv(); + checkOpen(); + + if (cursorConfig == null) { + cursorConfig = CursorConfig.DEFAULT; + } + + if (cursorConfig.getReadUncommitted() && + cursorConfig.getReadCommitted()) { + throw new IllegalArgumentException( + "Only one may be specified: " + + "ReadCommitted or ReadUncommitted"); + } + + trace(Level.FINEST, "Database.openCursor", txn, cursorConfig); + return newDbcInstance(txn, cursorConfig); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Create a DiskOrderedCursor to iterate over the records in 'this' + * Database. Because the retrieval is based on Log Sequence Number (LSN) + * order rather than key order, records are returned in unsorted order in + * exchange for generally faster retrieval. LSN order approximates disk + * sector order. + *

        + * See {@link DiskOrderedCursor} for more details and a description of the + * consistency guarantees provided by the scan. + *

        + * WARNING: After calling this method, deletion of log files by + * the JE log cleaner will be disabled until {@link + * DiskOrderedCursor#close()} is called. To prevent unbounded growth of + * disk usage, be sure to call {@link DiskOrderedCursor#close()} to + * re-enable log file deletion. + */ + public DiskOrderedCursor openCursor(DiskOrderedCursorConfig cursorConfig) { + try { + checkEnv(); + checkOpen(); + + if (cursorConfig == null) { + cursorConfig = DiskOrderedCursorConfig.DEFAULT; + } + + trace(Level.FINEST, "Database.openForwardCursor", + null, cursorConfig); + + Database[] dbs = new Database[1]; + dbs[0] = this; + + return new DiskOrderedCursor(dbs, cursorConfig); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Is overridden by SecondaryDatabase. + */ + Cursor newDbcInstance(final Transaction txn, + final CursorConfig cursorConfig) { + return new Cursor(this, txn, cursorConfig); + } + + /** + * @hidden + * For internal use only. + * + * @deprecated in favor of {@link #populateSecondaries(Transaction, + * DatabaseEntry, DatabaseEntry, long, CacheMode)}. + */ + public void populateSecondaries(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + + populateSecondaries(txn, key, data, 0 /*expirationTime*/, null); + } + + /** + * @hidden + * For internal use only. + * + * Given the {@code key}, {@code data} and {@code expirationTime} for a + * locked primary DB record, update the corresponding secondary database + * (index) records, for secondaries enabled for incremental population. + *

        + * The secondaries associated the primary record are determined by calling + * {@link SecondaryAssociation#getSecondaries}. For each of these + * secondaries, {@link SecondaryDatabase#isIncrementalPopulationEnabled} is + * called to determine whether incremental population is enabled. If so, + * appropriate secondary records are inserted and deleted so that the + * index accurately reflects the current state of the primary record. + *

        + * Note that for a given primary record, this method will not modify the + * secondary database if the secondary has already been updated for the + * primary record, due to concurrent primary write operations. Due to this + * behavior, certain integrity checks are not performed as documented in + * {@link SecondaryDatabase#startIncrementalPopulation}. + *

        + * The primary record must be locked (read or write locked) when this + * method is called. Therefore, the caller should not use dirty-read to + * read the primary record. The simplest way to ensure that the primary + * record is locked is to use a cursor to read primary records, and call + * this method while the cursor is still positioned on the primary record. + *

        + * It is the caller's responsibility to pass all primary records to this + * method that contain index keys for a secondary DB being incrementally + * populated, before calling {@link + * SecondaryDatabase#endIncrementalPopulation} on that secondary DB. + * + * @param txn is the transaction to be used to write secondary records. If + * null and the secondary DBs are transactional, auto-commit will be used. + + * @param key is the key of the locked primary record. + * + * @param data is the data of the locked primary record. + * + * @param expirationTime the expiration time of the locked primary record. + * This can be obtained from {@link OperationResult#getExpirationTime()} + * when reading the primary record. + * + * @param cacheMode the CacheMode to use, or null for the Database default. + */ + public void populateSecondaries(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final long expirationTime, + CacheMode cacheMode) { + try { + checkEnv(); + DatabaseUtil.checkForNullDbt(key, "key", true); + DatabaseUtil.checkForNullDbt(data, "true", true); + final DatabaseImpl dbImpl = checkOpen(); + trace(Level.FINEST, "populateSecondaries", null, key, data, null); + + final Collection secondaries = + secAssoc.getSecondaries(key); + + final Locker locker = LockerFactory.getWritableLocker( + envHandle, txn, dbImpl.isInternalDb(), isTransactional(), + dbImpl.isReplicated()); // autoTxnIsReplicated + + boolean success = false; + + if (cacheMode == null) { + cacheMode = dbImpl.getDefaultCacheMode(); + } + + try { + for (final SecondaryDatabase secDb : secondaries) { + if (secDb.isIncrementalPopulationEnabled()) { + + secDb.updateSecondary( + locker, null /*cursor*/, key /*priKey*/, + null /*oldData*/, data /*newData*/, cacheMode, + expirationTime, false /*expirationUpdated*/, + expirationTime); + } + } + success = true; + } finally { + locker.operationEnd(success); + } + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Removes records with a given key from the database. In the presence of + * duplicate keys, all records associated with the given key will be + * removed. When the database has associated secondary databases, this + * method also deletes the associated index records. + * + * @param txn For a transactional database, an explicit transaction may + * be specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if the record is deleted, else null if the + * given key was not found in the database. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null input key parameter, an input key parameter + * with a null data array, or a partial key input parameter. + * + * @since 7.0 + */ + public OperationResult delete(final Transaction txn, + final DatabaseEntry key, + final WriteOptions options) { + try { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + trace(Level.FINEST, "Database.delete", txn, key, null, null); + + final CacheMode cacheMode = + options != null ? options.getCacheMode() : null; + + OperationResult result = null; + + final Locker locker = LockerFactory.getWritableLocker( + envHandle, txn, + dbImpl.isInternalDb(), + isTransactional(), + dbImpl.isReplicated()); // autoTxnIsReplicated + + try { + result = deleteInternal(locker, key, cacheMode); + } finally { + if (locker != null) { + locker.operationEnd(result != null); + } + } + + return result; + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Internal version of delete() that does no parameter checking. Notify + * triggers, update secondaries and enforce foreign key constraints. + * Deletes all duplicates. + */ + OperationResult deleteInternal(final Locker locker, + final DatabaseEntry key, + final CacheMode cacheMode) { + + final DatabaseEntry noData = new DatabaseEntry(); + noData.setPartial(0, 0, true); + + try (final Cursor cursor = new Cursor(this, locker, null)) { + cursor.setNonSticky(true); + + final LockMode lockMode = + cursor.isSerializableIsolation(LockMode.RMW) ? + LockMode.RMW : LockMode.READ_UNCOMMITTED_ALL; + + OperationResult searchResult = cursor.search( + key, noData, lockMode, cacheMode, SearchMode.SET, false); + + final DatabaseImpl dbImpl = getDbImpl(); + OperationResult anyResult = null; + + while (searchResult != null) { + + final OperationResult deleteResult = cursor.deleteInternal( + dbImpl.getRepContext(), cacheMode); + + if (deleteResult != null) { + anyResult = deleteResult; + } + + if (!dbImpl.getSortedDuplicates()) { + break; + } + + searchResult = cursor.retrieveNext( + key, noData, lockMode, cacheMode, GetMode.NEXT_DUP); + } + + if (anyResult == null) { + dbImpl.getEnv().incDeleteFailOps(dbImpl); + } + + return anyResult; + } + } + + /** + * Removes records with a given key from the database. In the presence of + * duplicate keys, all records associated with the given key will be + * removed. When the database has associated secondary databases, this + * method also deletes the associated index records. + * + *

        Calling this method is equivalent to calling {@link + * #delete(Transaction, DatabaseEntry, WriteOptions)}.

        + * + * @param txn For a transactional database, an explicit transaction may + * be specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input. + * + * @return The method will return {@link + * com.sleepycat.je.OperationStatus#NOTFOUND OperationStatus.NOTFOUND} if + * the given key is not found in the database; otherwise {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null input key parameter, an input key parameter + * with a null data array, or a partial key input parameter. + */ + public OperationStatus delete(final Transaction txn, + final DatabaseEntry key) { + final OperationResult result = delete(txn, key, null); + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Retrieves a record according to the specified {@link Get} type. + * + *

        If the operation succeeds, the record will be locked according to the + * {@link ReadOptions#getLockMode() lock mode} specified, the key and/or + * data will be returned via the (non-null) DatabaseEntry parameters, and a + * non-null OperationResult will be returned. If the operation fails + * because the record requested is not found, null is returned.

        + * + *

        The following table lists each allowed operation and whether the key + * and data parameters are input or + * output parameters. See the individual {@link Get} operations for + * more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Get operationDescription'key' parameter'data' parameter
        {@link Get#SEARCH}Searches using an exact match by key.inputoutput
        {@link Get#SEARCH_BOTH}Searches using an exact match by key and data.inputinput
        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the key input parameter. + * + * @param data the data input or output parameter, depending on getType. + * + * @param getType the Get operation type. May not be null. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the OperationResult if the record requested is found, else null. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null getType, a null input key/data parameter, + * an input key/data parameter with a null data array, and a partial + * key/data input parameter. + * + * @since 7.0 + */ + public OperationResult get( + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + ReadOptions options) { + + try { + checkEnv(); + checkOpen(); + + if (options == null) { + options = Cursor.DEFAULT_READ_OPTIONS; + } + + LockMode lockMode = options.getLockMode(); + + trace( + Level.FINEST, "Database.get", String.valueOf(getType), txn, + key, null, lockMode); + + checkLockModeWithoutTxn(txn, lockMode); + + final CursorConfig cursorConfig; + + if (lockMode == LockMode.READ_COMMITTED) { + cursorConfig = READ_COMMITTED_CURSOR_CONFIG; + lockMode = null; + } else { + cursorConfig = DEFAULT_CURSOR_CONFIG; + } + + OperationResult result = null; + + final Locker locker = LockerFactory.getReadableLocker( + this, txn, cursorConfig.getReadCommitted()); + + try { + try (final Cursor cursor = + new Cursor(this, locker, cursorConfig)) { + + result = cursor.getInternal( + key, data, getType, options, lockMode); + } + } finally { + locker.operationEnd(result != null); + } + + return result; + + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Retrieves the key/data pair with the given key. If the matching key has + * duplicate values, the first data item in the set of duplicates is + * returned. Retrieval of duplicates requires the use of {@link Cursor} + * operations. + * + *

        Calling this method is equivalent to calling {@link + * #get(Transaction, DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH}.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus get(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) { + final OperationResult result = get( + txn, key, data, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Retrieves the key/data pair with the given key and data value, that is, + * both the key and data items must match. + * + *

        Calling this method is equivalent to calling {@link + * #get(Transaction, DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#SEARCH_BOTH}.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input. + * + * @param data the data used as + * input. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchBoth(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) { + final OperationResult result = get( + txn, key, data, Get.SEARCH_BOTH, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Inserts or updates a record according to the specified {@link Put} + * type. + * + *

        If the operation succeeds, the record will be locked according to the + * {@link ReadOptions#getLockMode() lock mode} specified, the cursor will + * be positioned on the record, and a non-null OperationResult will be + * returned. If the operation fails because the record already exists (or + * does not exist, depending on the putType), null is returned.

        + * + *

        When the database has associated secondary databases, this method + * also inserts or deletes associated index records as necessary.

        + * + *

        The following table lists each allowed operation. See the individual + * {@link Put} operations for more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Put operationDescriptionReturns null when?Other special rules
        {@link Put#OVERWRITE}Inserts or updates a record depending on whether a matching + * record is already present.Never returns null.Without duplicates, a matching record is one with the same key; + * with duplicates, it is one with the same key and data.
        {@link Put#NO_OVERWRITE}Inserts a record if a record with a matching key is not already + * present.When an existing record matches.If the database has duplicate keys, a record is inserted only if + * there are no records with a matching key.
        {@link Put#NO_DUP_DATA}Inserts a record in a database with duplicate keys if a record + * with a matching key and data is not already present.When an existing record matches.Without duplicates, this operation is not allowed.
        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input. + * + * @param data the data used as + * input. + * + * @param putType the Put operation type. May not be null. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if the record is written, else null. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the database is read-only, or + * putType is Put.NO_DUP_DATA and the database is not configured for + * duplicates. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null putType, a null input key/data parameter, + * an input key/data parameter with a null data array, a partial key/data + * input parameter, or when putType is Put.CURRENT. + * + * @since 7.0 + */ + public OperationResult put( + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final Put putType, + final WriteOptions options) { + + try { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + if (putType == Put.CURRENT) { + throw new IllegalArgumentException( + "putType may not be Put.CURRENT"); + } + + OperationResult result = null; + + trace( + Level.FINEST, "Database.put", String.valueOf(putType), txn, + key, data, null); + + final Locker locker = LockerFactory.getWritableLocker( + envHandle, txn, + dbImpl.isInternalDb(), + isTransactional(), + dbImpl.isReplicated()); // autoTxnIsReplicated + + try { + try (final Cursor cursor = + new Cursor(this, locker, DEFAULT_CURSOR_CONFIG)) { + + result = cursor.putInternal(key, data, putType, options); + } + } finally { + locker.operationEnd(result != null); + } + + return result; + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Stores the key/data pair into the database. + * + *

        Calling this method is equivalent to calling {@link + * #put(Transaction, DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#OVERWRITE}.

        + * + *

        If the key already appears in the database and duplicates are not + * configured, the data associated with the key will be replaced. If the + * key already appears in the database and sorted duplicates are + * configured, the new data value is inserted at the correct sorted + * location.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + */ + public OperationStatus put(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + final OperationResult result = put( + txn, key, data, Put.OVERWRITE, null); + + EnvironmentFailureException.assertState(result != null); + return OperationStatus.SUCCESS; + } + + /** + * Stores the key/data pair into the database if the key does not already + * appear in the database. + * + *

        Calling this method is equivalent to calling {@link + * #put(Transaction, DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#NO_OVERWRITE}.

        + * + *

        This method will return {@link + * com.sleepycat.je.OperationStatus#KEYEXIST OpeationStatus.KEYEXIST} if + * the key already exists in the database, even if the database supports + * duplicates.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEXIST + * OperationStatus.KEYEXIST} if the key already appears in the database, + * else {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS} + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + */ + public OperationStatus putNoOverwrite(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + final OperationResult result = put( + txn, key, data, Put.NO_OVERWRITE, null); + + return result == null ? + OperationStatus.KEYEXIST : OperationStatus.SUCCESS; + } + + /** + * Stores the key/data pair into the database if it does not already appear + * in the database. + * + *

        Calling this method is equivalent to calling {@link + * #put(Transaction, DatabaseEntry, DatabaseEntry, Put, WriteOptions)} with + * {@link Put#NO_DUP_DATA}.

        + * + *

        This method may only be called if the underlying database has been + * configured to support sorted duplicates.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param key the key used as + * input.. + * + * @param data the data used as + * input. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEXIST + * OperationStatus.KEYEXIST} if the key/data pair already appears in the + * database, else {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS} + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this database is not configured + * for duplicates, or this database is read-only. + * + * @throws IllegalStateException if the database has been closed. + */ + public OperationStatus putNoDupData(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + final OperationResult result = put( + txn, key, data, Put.NO_DUP_DATA, null); + + return result == null ? + OperationStatus.KEYEXIST : OperationStatus.SUCCESS; + } + + /** + * Creates a specialized join cursor for use in performing equality or + * natural joins on secondary indices. + * + *

        Each cursor in the cursors array must have been + * initialized to refer to the key on which the underlying database should + * be joined. Typically, this initialization is done by calling {@link + * Cursor#getSearchKey Cursor.getSearchKey}.

        + * + *

        Once the cursors have been passed to this method, they should not be + * accessed or modified until the newly created join cursor has been + * closed, or else inconsistent results may be returned. However, the + * position of the cursors will not be changed by this method or by the + * methods of the join cursor.

        + * + * @param cursors an array of cursors associated with this primary + * database. + * + * @param config The join attributes. If null, default attributes are + * used. + * + * @return a specialized cursor that returns the results of the equality + * join operation. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code JoinConfig} parameter. + * + * @see JoinCursor + */ + public JoinCursor join(final Cursor[] cursors, final JoinConfig config) { + try { + EnvironmentImpl env = checkEnv(); + checkOpen(); + DatabaseUtil.checkForNullParam(cursors, "cursors"); + if (cursors.length == 0) { + throw new IllegalArgumentException( + "At least one cursor is required."); + } + + /* + * Check that all cursors use the same locker, if any cursor is + * transactional. And if non-transactional, that all databases are + * in the same environment. + */ + Locker locker = cursors[0].getCursorImpl().getLocker(); + if (!locker.isTransactional()) { + for (int i = 1; i < cursors.length; i += 1) { + Locker locker2 = cursors[i].getCursorImpl().getLocker(); + if (locker2.isTransactional()) { + throw new IllegalArgumentException( + "All cursors must use the same transaction."); + } + EnvironmentImpl env2 = + cursors[i].getDatabaseImpl().getEnv(); + if (env != env2) { + throw new IllegalArgumentException( + "All cursors must use the same environment."); + } + } + } else { + for (int i = 1; i < cursors.length; i += 1) { + Locker locker2 = cursors[i].getCursorImpl().getLocker(); + if (locker.getTxnLocker() != locker2.getTxnLocker()) { + throw new IllegalArgumentException( + "All cursors must use the same transaction."); + } + } + } + + /* Create the join cursor. */ + return new JoinCursor(this, cursors, config); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Preloads the cache. This method should only be called when there are no + * operations being performed on the database in other threads. Executing + * preload during concurrent updates may result in some or all of the tree + * being loaded into the JE cache. Executing preload during any other + * types of operations may result in JE exceeding its allocated cache + * size. preload() effectively locks the entire database and therefore will + * lock out the checkpointer, cleaner, and compressor, as well as not allow + * eviction to occur. + * + * @deprecated As of JE 2.0.83, replaced by {@link + * Database#preload(PreloadConfig)}.

        + * + * @param maxBytes The maximum number of bytes to load. If maxBytes is 0, + * je.evictor.maxMemory is used. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + */ + public void preload(final long maxBytes) { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + PreloadConfig config = new PreloadConfig(); + config.setMaxBytes(maxBytes); + + dbImpl.preload(config); + } + + /** + * Preloads the cache. This method should only be called when there are no + * operations being performed on the database in other threads. Executing + * preload during concurrent updates may result in some or all of the tree + * being loaded into the JE cache. Executing preload during any other + * types of operations may result in JE exceeding its allocated cache + * size. preload() effectively locks the entire database and therefore will + * lock out the checkpointer, cleaner, and compressor, as well as not allow + * eviction to occur. + * + * @deprecated As of JE 2.0.101, replaced by {@link + * Database#preload(PreloadConfig)}.

        + * + * @param maxBytes The maximum number of bytes to load. If maxBytes is 0, + * je.evictor.maxMemory is used. + * + * @param maxMillisecs The maximum time in milliseconds to use when + * preloading. Preloading stops once this limit has been reached. If + * maxMillisecs is 0, preloading can go on indefinitely or until maxBytes + * (if non-0) is reached. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + */ + public void preload(final long maxBytes, final long maxMillisecs) { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + PreloadConfig config = new PreloadConfig(); + config.setMaxBytes(maxBytes); + config.setMaxMillisecs(maxMillisecs); + + dbImpl.preload(config); + } + + /** + * Preloads the cache. This method should only be called when there are no + * operations being performed on the database in other threads. Executing + * preload during concurrent updates may result in some or all of the tree + * being loaded into the JE cache. Executing preload during any other + * types of operations may result in JE exceeding its allocated cache + * size. preload() effectively locks the entire database and therefore will + * lock out the checkpointer, cleaner, and compressor, as well as not allow + * eviction to occur. If the database is replicated and the environment is + * in the replica state, then the replica may become temporarily + * disconnected from the master if the replica needs to replay changes + * against the database and is locked out because the time taken by the + * preload operation exceeds {@link + * com.sleepycat.je.rep.ReplicationConfig#FEEDER_TIMEOUT}. + *

        + * While this method preloads a single database, {@link + * Environment#preload} lets you preload multiple databases. + * + * @param config The PreloadConfig object that specifies the parameters + * of the preload. + * + * @return A PreloadStats object with various statistics about the + * preload() operation. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if {@code PreloadConfig.getMaxBytes} is + * greater than size of the JE cache. + */ + public PreloadStats preload(final PreloadConfig config) { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + PreloadConfig useConfig = + (config == null) ? new PreloadConfig() : config; + + return dbImpl.preload(useConfig); + } + + /** + * Counts the key/data pairs in the database. This operation is faster than + * obtaining a count from a cursor based scan of the database, and will not + * perturb the current contents of the cache. However, the count is not + * guaranteed to be accurate if there are concurrent updates. Note that + * this method does scan a significant portion of the database and should + * be considered a fairly expensive operation. + *

        + * This operation uses the an internal infrastructure and algorithm that is + * similar to the one used for the {@link DiskOrderedCursor}. Specifically, + * it will disable deletion of log files by the JE log cleaner during its + * execution and will consume a certain amount of memory (but without + * affecting the memory that is available for the JE cache). To avoid + * excessive memory consumption (and a potential {@code OutOfMemoryError}) + * this method places an internal limit on its memory consumption. If this + * limit is reached, the method will still work properly, but its + * performance will degrade. To specify a different memory limit than the + * one used by this method, use the + * {@link Database#count(long memoryLimit)} method. + *

        + * Currently, the internal memory limit is calculated as 10% of the + * difference between the max JVM memory (the value returned by + * Runtime.getRuntime().maxMemory()) and the configured JE cache size. + * + * @return The count of key/data pairs in the database. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @see Cache + * Statistics: Unexpected Sizes + */ + public long count() { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + return dbImpl.count(0); + } + + /** + * Counts the key/data pairs in the database. This operation is faster than + * obtaining a count from a cursor based scan of the database, and will not + * perturb the current contents of the cache. However, the count is not + * guaranteed to be accurate if there are concurrent updates. Note that + * this method does scan a significant portion of the database and should + * be considered a fairly expensive operation. + *

        + * This operation uses the an internal infrastructure and algorithm that is + * similar to the one used for the {@link DiskOrderedCursor}. Specifically, + * it will disable deletion of log files by the JE log cleaner during its + * execution and will consume a certain amount of memory (but without + * affecting the memory that is available for the JE cache). To avoid + * excessive memory consumption (and a potential {@code OutOfMemoryError}) + * this method takes as input an upper bound on the memory it may consume. + * If this limit is reached, the method will still work properly, but its + * performance will degrade. + * + * @param memoryLimit The maximum memory (in bytes) that may be consumed + * by this method. + * + * @return The count of key/data pairs in the database. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @see Cache + * Statistics: Unexpected Sizes + */ + public long count(long memoryLimit) { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + return dbImpl.count(memoryLimit); + } + + /** + * Returns database statistics. + * + *

        If this method has not been configured to avoid expensive operations + * (using the {@link com.sleepycat.je.StatsConfig#setFast + * StatsConfig.setFast} method), it will access some of or all the pages in + * the database, incurring a severe performance penalty as well as possibly + * flushing the underlying cache.

        + * + *

        In the presence of multiple threads or processes accessing an active + * database, the information returned by this method may be + * out-of-date.

        + * + * @param config The statistics returned; if null, default statistics are + * returned. + * + * @return Database statistics. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + */ + public DatabaseStats getStats(StatsConfig config) { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + return dbImpl.stat(config); + } + + /** + * Verifies the integrity of the database. + * + *

        Verification is an expensive operation that should normally only be + * used for troubleshooting and debugging.

        + * + * @param config Configures the verify operation; if null, the default + * operation is performed. + * + * @return Database statistics. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if a corruption is detected, or if + * an unexpected, internal or environment-wide failure occurs. If a + * persistent corruption is detected, + * {@link EnvironmentFailureException#isCorrupted()} will return true. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public DatabaseStats verify(VerifyConfig config) { + try { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + if (config == null) { + config = VerifyConfig.DEFAULT; + } + + return dbImpl.verify(config); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Returns the database name. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The database name. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + */ + public String getDatabaseName() { + try { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + return dbImpl.getName(); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /* + * Non-transactional database name, safe to access when creating error + * messages. + */ + String getDebugName() { + final DatabaseImpl dbImpl = databaseImpl; + return (dbImpl == null) ? "[closed]" : dbImpl.getDebugName(); + } + + /** + * Returns this Database object's configuration. + * + *

        This may differ from the configuration used to open this object if + * the database existed previously.

        + * + *

        Unlike most Database methods, this method may be called after the + * database is closed.

        + * + * @return This Database object's configuration. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + */ + public DatabaseConfig getConfig() { + + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + + try { + return DatabaseConfig.combineConfig(dbImpl, configuration); + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Equivalent to getConfig().getTransactional() but cheaper. + */ + boolean isTransactional() { + final DatabaseImpl dbImpl = checkOpen(); + return dbImpl.isTransactional(); + } + + /** + * Returns the {@link com.sleepycat.je.Environment Environment} handle for + * the database environment underlying the {@link + * com.sleepycat.je.Database Database}. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The {@link com.sleepycat.je.Environment Environment} handle + * for the database environment underlying the {@link + * com.sleepycat.je.Database Database}. + */ + public Environment getEnvironment() { + return envHandle; + } + + /** + * Returns a list of all {@link com.sleepycat.je.SecondaryDatabase + * SecondaryDatabase} objects associated with a primary database. + * + *

        If no secondaries are associated with this database, an empty list is + * returned.

        + */ + /* + * Replacement for above paragraph when SecondaryAssociation is published: + *

        If no secondaries are associated with this database, or a {@link + * SecondaryAssociation} is {@link DatabaseConfig#setSecondaryAssociation + * configured}, an empty list is returned.

        + */ + public List getSecondaryDatabases() { + return new ArrayList<>(simpleAssocSecondaries); + } + + /** + * Compares two keys using either the default comparator if no BTree + * comparator has been set or the BTree comparator if one has been set. + * + * @return -1 if entry1 compares less than entry2, + * 0 if entry1 compares equal to entry2, + * 1 if entry1 compares greater than entry2 + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if either entry is a partial + * DatabaseEntry, or is null. + */ + public int compareKeys(final DatabaseEntry entry1, + final DatabaseEntry entry2) { + return doCompareKeys(entry1, entry2, false/*duplicates*/); + } + + /** + * Compares two data elements using either the default comparator if no + * duplicate comparator has been set or the duplicate comparator if one has + * been set. + * + * @return -1 if entry1 compares less than entry2, + * 0 if entry1 compares equal to entry2, + * 1 if entry1 compares greater than entry2 + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if either entry is a partial + * DatabaseEntry, or is null. + */ + public int compareDuplicates(final DatabaseEntry entry1, + final DatabaseEntry entry2) { + return doCompareKeys(entry1, entry2, true/*duplicates*/); + } + + private int doCompareKeys(final DatabaseEntry entry1, + final DatabaseEntry entry2, + final boolean duplicates) { + try { + checkEnv(); + final DatabaseImpl dbImpl = checkOpen(); + DatabaseUtil.checkForNullDbt(entry1, "entry1", true); + DatabaseUtil.checkForNullDbt(entry2, "entry2", true); + DatabaseUtil.checkForPartial(entry1, "entry1"); + DatabaseUtil.checkForPartial(entry2, "entry2"); + + return dbImpl.compareEntries(entry1, entry2, duplicates); + + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /* + * Helpers, not part of the public API + */ + + /** + * Returns true if the Database was opened read/write. + * + * @return true if the Database was opened read/write. + */ + boolean isWritable() { + return isWritable; + } + + /** + * Returns the non-null, underlying getDbImpl. + * + * This method should always be called to access the databaseImpl, to guard + * against NPE when the database has been closed after the initial checks. + * + * However, callers should additionally call checkOpen at API entry points + * to reject the operation as soon as possible. Plus, if the database has + * been closed, this method may return non-null because the databaseImpl + * field is not volatile. + * + * @throws IllegalStateException if the database has been closed since + * checkOpen was last called. + */ + DatabaseImpl getDbImpl() { + + final DatabaseImpl dbImpl = databaseImpl; + + if (dbImpl != null) { + return dbImpl; + } + + checkOpen(); + + /* + * checkOpen should have thrown an exception, but we'll throw again + * here just in case. + */ + throw new IllegalStateException("Database is closed. State=" + state); + } + + /** + * Called during database open to set the handleLocker field. + * @see HandleLocker + */ + HandleLocker initHandleLocker(EnvironmentImpl envImpl, + Locker openDbLocker) { + handleLocker = HandleLocker.createHandleLocker(envImpl, openDbLocker); + return handleLocker; + } + + @SuppressWarnings("unused") + void removeCursor(final ForwardCursor ignore) + throws DatabaseException { + + /* + * Do not call checkOpen if the handle was preempted or corrupted, to + * allow closing a cursor after an operation failure. [#17015] + */ + if (state != DbState.PREEMPTED && state != DbState.CORRUPTED) { + checkOpen(); + } + openCursors.getAndDecrement(); + } + + @SuppressWarnings("unused") + void addCursor(final ForwardCursor ignore) { + checkOpen(); + openCursors.getAndIncrement(); + } + + DatabaseImpl checkOpen() { + switch (state) { + case OPEN: + return databaseImpl; + case CLOSED: + throw new IllegalStateException("Database was closed."); + case INVALID: + throw new IllegalStateException( + "The Transaction used to open the Database was aborted."); + case PREEMPTED: + throw preemptedCause.wrapSelf(preemptedCause.getMessage()); + case CORRUPTED: + throw corruptedCause.wrapSelf(corruptedCause.getMessage()); + default: + assert false : state; + return null; + } + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid. + * @throws IllegalStateException if the environment is not open. + */ + EnvironmentImpl checkEnv() { + return envHandle.checkOpen(); + } + + void checkLockModeWithoutTxn(final Transaction userTxn, + final LockMode lockMode) { + if (userTxn == null && LockMode.RMW.equals(lockMode)) { + throw new IllegalArgumentException( + lockMode + " is meaningless and can not be specified " + + "when a null (autocommit) transaction is used. There " + + "will never be a follow on operation which will promote " + + "the lock to WRITE."); + } + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace(final Level level, + final String methodName, + final String getOrPutType, + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(methodName).append(" "); + sb.append(getOrPutType); + if (txn != null) { + sb.append(" txnId=").append(txn.getId()); + } + sb.append(" key=").append(key.dumpData()); + if (data != null) { + sb.append(" data=").append(data.dumpData()); + } + if (lockMode != null) { + sb.append(" lockMode=").append(lockMode); + } + LoggerUtils.logMsg( + logger, getEnv(), level, sb.toString()); + } + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace(final Level level, + final String methodName, + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(methodName); + if (txn != null) { + sb.append(" txnId=").append(txn.getId()); + } + sb.append(" key=").append(key.dumpData()); + if (data != null) { + sb.append(" data=").append(data.dumpData()); + } + if (lockMode != null) { + sb.append(" lockMode=").append(lockMode); + } + LoggerUtils.logMsg( + logger, getEnv(), level, sb.toString()); + } + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace(final Level level, + final String methodName, + final Transaction txn, + final Object config) { + + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(methodName); + sb.append(" name=").append(getDebugName()); + if (txn != null) { + sb.append(" txnId=").append(txn.getId()); + } + if (config != null) { + sb.append(" config=").append(config); + } + LoggerUtils.logMsg( + logger, getEnv(), level, sb.toString()); + } + } + + boolean hasSecondaryOrForeignKeyAssociations() { + return (!secAssoc.isEmpty() || !foreignKeySecondaries.isEmpty()); + } + + SecondaryAssociation getSecondaryAssociation() { + return secAssoc; + } + + /** + * Creates a SecondaryIntegrityException using the information given. + * + * This method is in the Database class, rather than in SecondaryDatabase, + * to support joins with plain Cursors [#21258]. + */ + SecondaryIntegrityException secondaryRefersToMissingPrimaryKey( + final Locker locker, + final DatabaseEntry secKey, + final DatabaseEntry priKey, + final long expirationTime) { + + return new SecondaryIntegrityException( + this, + locker, + "Secondary refers to a missing key in the primary database", + getDebugName(), secKey, priKey, expirationTime); + } +} diff --git a/src/com/sleepycat/je/DatabaseComparator.java b/src/com/sleepycat/je/DatabaseComparator.java new file mode 100644 index 0000000..5af97aa --- /dev/null +++ b/src/com/sleepycat/je/DatabaseComparator.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; +import java.util.Comparator; + +/** + * Implemented by btree and duplicate comparators that need to be initialized + * before they are used or need access to the environment's ClassLoader + * property. + * @since 5.0 + */ +public interface DatabaseComparator extends Comparator, Serializable { + + /** + * Called to initialize a comparator object after it is instantiated or + * deserialized, and before it is used. + * + * @param loader is the environment's ClassLoader property. + */ + public void initialize(ClassLoader loader); +} diff --git a/src/com/sleepycat/je/DatabaseConfig.java b/src/com/sleepycat/je/DatabaseConfig.java new file mode 100644 index 0000000..58660d8 --- /dev/null +++ b/src/com/sleepycat/je/DatabaseConfig.java @@ -0,0 +1,1772 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.trigger.ReplicatedDatabaseTrigger; +import com.sleepycat.je.trigger.PersistentTrigger; +import com.sleepycat.je.trigger.Trigger; + +/** + *

        Specifies the attributes of a database.

        + * + *

        There are two groups of database attributes: per-database handle + * attributes, and database-wide attributes. An attribute may be + * persistent/transient or mutable/immutable:

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        ScopeMutablePersistentAttribute
        Database-wide attributeTrueTrue{@link DatabaseConfig#getBtreeComparator() btree comparator}
        + * {@link DatabaseConfig#getDuplicateComparator() duplicate comparator}
        + * {@link DatabaseConfig#getKeyPrefixing() key prefixing}
        + * {@link DatabaseConfig#getNodeMaxEntries() nodeMaxEntries}
        + * + *
        TrueFalse{@link DatabaseConfig#getDeferredWrite() deferred write}
        + * {@link DatabaseConfig#getTransactional() transactional}
        FalseTrue + * {@link DatabaseConfig#getSortedDuplicates() sorted duplicates}
        + *
        FalseFalse{@link DatabaseConfig#getTemporary() temporary}
        Per-database handle attributesFalseFalse{@link DatabaseConfig#getAllowCreate() allow create}
        + * {@link DatabaseConfig#getExclusiveCreate() exclusive create}
        + * {@link DatabaseConfig#getReadOnly() read only}
        + * {@link DatabaseConfig#getCacheMode()} read only}
        + * {@link DatabaseConfig#getUseExistingConfig() use existing config}
        + *
        + *

        + * + *

        Persistent attributes will be saved in the log and remain in effect + * every time the environment is reopened. Transient attributes only remain + * in effect until:

        + * + *
          + *
        • the database configuration is updated
        • + *
        • the database handle(per-database handle attributes) is closed, or all + * handles for this database (database-wide attributes) are closed.
        • + *
        + */ +public class DatabaseConfig implements Cloneable { + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public static final DatabaseConfig DEFAULT = new DatabaseConfig(); + + private boolean allowCreate = false; + private boolean exclusiveCreate = false; + private boolean transactional = false; + private boolean readOnly = false; + private boolean sortedDuplicates = false; + private boolean deferredWrite = false; + private boolean temporary = false; + private boolean keyPrefixing = false; + private boolean replicated = true; + + private int nodeMaxEntries; + private Comparator btreeComparator = null; + private Comparator duplicateComparator = null; + private boolean btreeComparatorByClassName = false; + private boolean duplicateComparatorByClassName = false; + private boolean overrideBtreeComparator = false; + private boolean overrideDuplicateComparator = false; + private boolean useExistingConfig = false; + private CacheMode cacheMode = null; + private SecondaryAssociation secAssociation = null; + + /* User defined triggers associated with this database. */ + private List triggers = new LinkedList(); + private boolean overrideTriggers; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public DatabaseConfig() { + } + + /** + * Configures the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method to create the database if it does not + * already exist. + * + * @param allowCreate If true, configure the {@link + * com.sleepycat.je.Environment#openDatabase Environment.openDatabase} + * method to create the database if it does not already exist. + * + * @return this + */ + public DatabaseConfig setAllowCreate(boolean allowCreate) { + setAllowCreateVoid(allowCreate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAllowCreateVoid(boolean allowCreate) { + this.allowCreate = allowCreate; + } + + /** + * Returns true if the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method is configured to create the database + * if it does not already exist. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method is configured to create the database + * if it does not already exist. + */ + public boolean getAllowCreate() { + return allowCreate; + } + + /** + * Configure the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method to fail if the database already exists. + * + *

        The exclusiveCreate mode is only meaningful if specified with the + * allowCreate mode.

        + * + * @param exclusiveCreate If true, configure the {@link + * com.sleepycat.je.Environment#openDatabase Environment.openDatabase} + * method to fail if the database already exists. + * + * @return this + */ + public DatabaseConfig setExclusiveCreate(boolean exclusiveCreate) { + setExclusiveCreateVoid(exclusiveCreate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setExclusiveCreateVoid(boolean exclusiveCreate) { + this.exclusiveCreate = exclusiveCreate; + } + + /** + * Returns true if the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method is configured to fail if the database + * already exists. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method is configured to fail if the database + * already exists. + */ + public boolean getExclusiveCreate() { + return exclusiveCreate; + } + + /** + * Configures the database to support records with duplicate keys. + * + *

        When duplicate keys are configured for a database, key prefixing is + * also implicitly configured. Without key prefixing, databases with + * duplicates would store keys inefficiently. Key prefixing is therefore + * mandatory for databases with duplicates.

        + * + *

        Although two records may have the same key, they may not also have + * the same data item. Two identical records, that have the same key and + * data, may not be stored in a database.

        + * + *

        The ordering of duplicates in the database is determined by the + * duplicate comparison function. See {@link #setDuplicateComparator}. If + * the application does not specify a duplicate comparison function, a + * default lexical comparison will be used.

        + * + *

        If a primary database is to be associated with one or more secondary + * databases, it may not be configured for duplicates.

        + * + *

        Calling this method affects the database, including all threads of + * control accessing the database.

        + * + *

        If the database already exists when the database is opened, any + * database configuration specified by this method must be the same as the + * existing database or an error will be returned.

        + * + * @param sortedDuplicates If true, configure the database to support + * duplicate data items. A value of false is illegal to this method, that + * is, once set, the configuration cannot be cleared. + * + * @return this + */ + public DatabaseConfig setSortedDuplicates(boolean sortedDuplicates) { + setSortedDuplicatesVoid(sortedDuplicates); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSortedDuplicatesVoid(boolean sortedDuplicates) { + this.sortedDuplicates = sortedDuplicates; + if (sortedDuplicates) { + setKeyPrefixingVoid(true); + } + } + + /** + * Returns true if the database is configured to support records with + * duplicate keys. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database is configured to support records with + * duplicate keys. + */ + public boolean getSortedDuplicates() { + return sortedDuplicates; + } + + /** + * Returns the key prefixing configuration. Note that key prefixing is + * always enabled for a database with duplicates configured. + * + * @return true if key prefixing has been enabled in this database. + */ + public boolean getKeyPrefixing() { + return keyPrefixing; + } + + /** + * Configure the database to support key prefixing. + * + *

        Key prefixing causes the representation of keys in the b-tree + * internal nodes to be split in each BIN (bottom internal node) between + * the common prefix of all keys and the suffixes. Using this often + * results in a more space-efficient representation in both the + * in-memory and on-disk formats. In general the cost of maintaining + * the prefix separately is low compared to the benefit, and therefore + * enabling key prefixing is strongly recommended.

        + * + *

        When duplicate keys are configured for a database, key prefixing is + * also implicitly configured. Without key prefixing, databases with + * duplicates would store keys inefficiently. Key prefixing is therefore + * mandatory for databases with duplicates.

        + * + * @param keyPrefixing If true, enables keyPrefixing for the database. + * + * @return this + * + * @throws IllegalStateException if the keyPrefixing argument is false and + * {@link #setSortedDuplicates} has been called to configure duplicates. + * Key prefixing is therefore mandatory for databases with duplicates. + * + * @see Cache + * Statistics: Size Optimizations + */ + public DatabaseConfig setKeyPrefixing(boolean keyPrefixing) { + setKeyPrefixingVoid(keyPrefixing); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setKeyPrefixingVoid(boolean keyPrefixing) { + if (!keyPrefixing && sortedDuplicates) { + throw new IllegalStateException + ("Key prefixing is mandatory for databases with duplicates"); + } + this.keyPrefixing = keyPrefixing; + } + + /** + * Encloses the database open within a transaction. + * + *

        If the call succeeds, the open operation will be recoverable. If the + * call fails, no database will have been created.

        + * + *

        All future operations on this database, which are not explicitly + * enclosed in a transaction by the application, will be enclosed in in a + * transaction within the library.

        + * + * @param transactional If true, enclose the database open within a + * transaction. + * + * @return this + */ + public DatabaseConfig setTransactional(boolean transactional) { + setTransactionalVoid(transactional); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTransactionalVoid(boolean transactional) { + this.transactional = transactional; + } + + /** + * Returns true if the database open is enclosed within a transaction. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database open is enclosed within a transaction. + */ + public boolean getTransactional() { + return transactional; + } + + /** + * Configures the database in read-only mode. + * + *

        Any attempt to modify items in the database will fail, regardless of + * the actual permissions of any underlying files.

        + * + * @param readOnly If true, configure the database in read-only mode. + * + * @return this + */ + public DatabaseConfig setReadOnly(boolean readOnly) { + setReadOnlyVoid(readOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadOnlyVoid(boolean readOnly) { + this.readOnly = readOnly; + } + + /** + * Returns true if the database is configured in read-only mode. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database is configured in read-only mode. + */ + public boolean getReadOnly() { + return readOnly; + } + + /** + * Configures the {@link com.sleepycat.je.Environment#openDatabase + * Environment.openDatabase} method to have a B+Tree fanout of + * nodeMaxEntries. + * + *

        The nodeMaxEntries parameter is only meaningful if specified with the + * allowCreate mode. See {@link EnvironmentConfig#NODE_MAX_ENTRIES} for the + * valid value range, and the default value.

        + * + * @param nodeMaxEntries The maximum children per B+Tree node. + * + * @return this + */ + public DatabaseConfig setNodeMaxEntries(int nodeMaxEntries) { + setNodeMaxEntriesVoid(nodeMaxEntries); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeMaxEntriesVoid(int nodeMaxEntries) { + this.nodeMaxEntries = nodeMaxEntries; + } + + /** + * @deprecated this property no longer has any effect; {@link + * #setNodeMaxEntries} should be used instead. + */ + public DatabaseConfig setNodeMaxDupTreeEntries(int nodeMaxDupTreeEntries) { + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeMaxDupTreeEntriesVoid(int nodeMaxDupTreeEntries) { + } + + /** + * Returns the maximum number of children a B+Tree node can have. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The maximum number of children a B+Tree node can have. + */ + public int getNodeMaxEntries() { + return nodeMaxEntries; + } + + /** + * @deprecated this property no longer has any effect and zero is always + * returned; {@link #getNodeMaxEntries} should be used instead. + */ + public int getNodeMaxDupTreeEntries() { + return 0; + } + + /** + * By default, a byte by byte lexicographic comparison is used for btree + * keys. To customize the comparison, supply a different Comparator. + * + *

        Note that there are two ways to set the comparator: by specifying the + * class or by specifying a serializable object. This method is used to + * specify a serializable object. The comparator class must implement + * java.util.Comparator and must be serializable. JE will serialize the + * Comparator and deserialize it when subsequently opening the + * database.

        + * + *

        If a comparator needs to be initialized before it is used or needs + * access to the environment's ClassLoader property, it may implement the + * {@link DatabaseComparator} interface.

        + * + *

        The Comparator.compare() method is passed the byte arrays that are + * stored in the database. If you know how your data is organized in the + * byte array, then you can write a comparison routine that directly + * examines the contents of the arrays. Otherwise, you have to reconstruct + * your original objects, and then perform the comparison. See the Getting Started Guide for examples.

        + * + *

        WARNING: There are several special considerations that must + * be taken into account when implementing a comparator.

        + *

          + *
        • Comparator instances are shared by multiple threads and comparator + * methods are called without any special synchronization. Therefore, + * comparators must be thread safe. In general no shared state should be + * used and any caching of computed values must be done with proper + * synchronization.
        • + * + *
        • Because records are stored in the order determined by the + * Comparator, the Comparator's behavior must not change over time and + * therefore should not be dependent on any state that may change over + * time. In addition, although it is possible to change the comparator + * for an existing database, care must be taken that the new comparator + * provides compatible results with the previous comparator, or database + * corruption will occur.
        • + * + *
        • JE uses comparators internally in a wide variety of circumstances, + * so custom comparators must be sure to return valid values for any two + * arbitrary keys. The user must not make any assumptions about the + * range of key values that might be compared. For example, it's possible + * for the comparator may be used against previously deleted values.
        • + *
        + * + *

        A special type of comparator is a partial comparator, which + * allows for the keys of a database to be updated, but only if the updates + * do not change the relative order of the keys. For example, if a database + * uses strings as keys and a case-insensitive comparator, it is possible + * to change the case of characters in the keys, as this will not change + * the ordering of the keys. Another example is when the keys contain + * multiple fields but uniquely identify each record with a single field. + * The partial comparator could then compare only the single identifying + * field, allowing the rest of the fields to be updated. A query + * ({@link Cursor#getSearchKey Cursor.getSearchKey}, for example) could + * then be performed by passing a partial key that contains only the + * identifying field. + * + *

        WARNING: To allow for key updates in situations + * like those described above, all partial comparators must implement the + * {@link PartialComparator} tag interface. Otherwise, BDB JE will raise + * an exception if an attempt is made to update a key in a database whose + * comparators do not implement PartialComparator. See "Upgrading from JE + * 5.0 or earlier" in the change log and the {@link PartialComparator} + * javadoc for more information.

        + *

        + * Another special type of comparator is a binary equality + * comparator, which considers two keys to be equal if and only if they + * have the same length and they are equal byte-per-byte. All binary + * equality comparators must implement the {@link BinaryEqualityComparator} + * interface. The significance of binary equality comparators is that they + * make possible certain internal optimizations, like the "blind puts" + * optimization, described in + * {@link BinaryEqualityComparator} + *

        + * The comparator for an existing database will not be overridden unless + * setOverrideBtreeComparator() is set to true. + * + * @return this + */ + public DatabaseConfig setBtreeComparator( + Comparator btreeComparator) { + + setBtreeComparatorVoid(btreeComparator); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBtreeComparatorVoid(Comparator btreeComparator) { + + /* Note: comparator may be null */ + this.btreeComparator = validateComparator(btreeComparator, "Btree"); + this.btreeComparatorByClassName = false; + } + + /** + * By default, a byte by byte lexicographic comparison is used for btree + * keys. To customize the comparison, supply a different Comparator. + * + *

        Note that there are two ways to set the comparator: by specifying the + * class or by specifying a serializable object. This method is used to + * specify a Comparator class. The comparator class must implement + * java.util.Comparator and must have a public zero-parameter constructor. + * JE will store the class name and instantiate the Comparator by class + * name (using Class.forName and newInstance) + * when subsequently opening the database. Because the Comparator is + * instantiated using its default constructor, it should not be dependent + * on other constructor parameters.

        + * + *

        The Comparator.compare() method is passed the byte arrays that are + * stored in the database. If you know how your data is organized in the + * byte array, then you can write a comparison routine that directly + * examines the contents of the arrays. Otherwise, you have to reconstruct + * your original objects, and then perform the comparison. See the Getting Started Guide for examples.

        + * + *

        If a comparator needs to be initialized before it is used or needs + * access to the environment's ClassLoader property, it may implement the + * {@link DatabaseComparator} interface.

        + * + *

        WARNING: There are several special considerations that must + * be taken into account when implementing a comparator.

        + *

          + *
        • Comparator instances are shared by multiple threads and comparator + * methods are called without any special synchronization. Therefore, + * comparators must be thread safe. In general no shared state should be + * used and any caching of computed values must be done with proper + * synchronization.
        • + * + *
        • Because records are stored in the order determined by the + * Comparator, the Comparator's behavior must not change over time and + * therefore should not be dependent on any state that may change over + * time. In addition, although it is possible to change the comparator + * for an existing database, care must be taken that the new comparator + * provides compatible results with the previous comparator, or database + * corruption will occur.
        • + * + *
        • JE uses comparators internally in a wide variety of circumstances, + * so custom comparators must be sure to return valid values for any two + * arbitrary keys. The user must not make any assumptions about the + * range of key values that might be compared. For example, it's possible + * for the comparator may be used against previously deleted values.
        • + *
        + * + *

        A special type of comparator is a partial comparator, which + * allows for the keys of a database to be updated, but only if the updates + * do not change the relative order of the keys. For example, if a database + * uses strings as keys and a case-insensitive comparator, it is possible + * to change the case of characters in the keys, as this will not change the + * ordering of the keys. Another example is when the keys contain multiple + * fields but uniquely identify each record with a single field. The + * partial comparator could then compare only the single identifying field, + * allowing the rest of the fields to be updated. A query + * ({@link Cursor#getSearchKey Cursor.getSearchKey}, for example) could + * then be performed by passing a partial key that contains only the + * identifying field. + * + *

        WARNING: To allow for key updates in situations + * like those described above, all partial comparators must implement the + * {@link PartialComparator} tag interface. See "Upgrading from JE 5.0 + * or earlier" in the change log and the {@link PartialComparator} javadoc + * for more information.

        + * + * Another special type of comparator is a binary equality + * comparator, which considers two keys to be equal if and only if they + * have the same length and they are equal byte-per-byte. All binary + * equality comparators must implement the {@link BinaryEqualityComparator} + * interface. The significance of binary equality comparators is that they + * make possible certain internal optimizations, like the "blind puts" + * optimization, described in + * {@link BinaryEqualityComparator} + *

        + * The comparator for an existing database will not be overridden unless + * setOverrideBtreeComparator() is set to true. + * + * @return this + */ + public DatabaseConfig setBtreeComparator( + Class> + btreeComparatorClass) { + + setBtreeComparatorVoid(btreeComparatorClass); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBtreeComparatorVoid(Class> + btreeComparatorClass) { + + /* Note: comparator may be null */ + this.btreeComparator = validateComparator(btreeComparatorClass, + "Btree"); + this.btreeComparatorByClassName = true; + } + + /** + * Returns the Comparator used for key comparison on this database. + */ + public Comparator getBtreeComparator() { + return btreeComparator; + } + + /** + * Returns true if the btree comparator is set by class name, not by + * serializable Comparator object + * @return true if the comparator is set by class name, not by serializable + * Comparator object. + */ + public boolean getBtreeComparatorByClassName() { + return btreeComparatorByClassName; + } + + /** + * Sets to true if the database exists and the btree comparator specified + * in this configuration object should override the current comparator. + * + * @param override Set to true to override the existing comparator. + * + * @return this + */ + public DatabaseConfig setOverrideBtreeComparator(boolean override) { + setOverrideBtreeComparatorVoid(override); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setOverrideBtreeComparatorVoid(boolean override) { + overrideBtreeComparator = override; + } + + /** + * Returns the override setting for the btree comparator. + */ + public boolean getOverrideBtreeComparator() { + return overrideBtreeComparator; + } + + /** + * By default, a byte by byte lexicographic comparison is used for + * duplicate data items in a duplicate set. To customize the comparison, + * supply a different Comparator. + * + *

        Note that there are two ways to set the comparator: by specifying the + * class or by specifying a serializable object. This method is used to + * specify a serializable object. The comparator class must implement + * java.util.Comparator and must be serializable. JE will serialize the + * Comparator and deserialize it when subsequently opening the + * database.

        + * + *

        The Comparator.compare() method is passed the byte arrays that are + * stored in the database. If you know how your data is organized in the + * byte array, then you can write a comparison routine that directly + * examines the contents of the arrays. Otherwise, you have to reconstruct + * your original objects, and then perform the comparison. See the Getting Started Guide for examples.

        + * + *

        If a comparator needs to be initialized before it is used or needs + * access to the environment's ClassLoader property, it may implement the + * {@link DatabaseComparator} interface.

        + * + *

        WARNING: There are several special considerations that must + * be taken into account when implementing a comparator.

        + *

          + *
        • Comparator instances are shared by multiple threads and comparator + * methods are called without any special synchronization. Therefore, + * comparators must be thread safe. In general no shared state should be + * used and any caching of computed values must be done with proper + * synchronization.
        • + * + *
        • Because records are stored in the order determined by the + * Comparator, the Comparator's behavior must not change over time and + * therefore should not be dependent on any state that may change over + * time. In addition, although it is possible to change the comparator + * for an existing database, care must be taken that the new comparator + * provides compatible results with the previous comparator, or database + * corruption will occur.
        • + * + *
        • JE uses comparators internally in a wide variety of circumstances, + * so custom comparators must be sure to return valid values for any two + * arbitrary keys. The user must not make any assumptions about the + * range of key values that might be compared. For example, it's possible + * for the comparator may be used against previously deleted values.
        • + *
        + * + *

        A special type of comparator is a partial comparator, which + * allows for the keys of a database to be updated, but only if the updates + * do not change the relative order of the keys. For example, if a database + * uses strings as keys and a case-insensitive comparator, it is possible to + * change the case of characters in the keys, as this will not change the + * ordering of the keys. Another example is when the keys contain multiple + * fields but uniquely identify each record with a single field. The + * partial comparator could then compare only the single identifying field, + * allowing the rest of the fields to be updated. A query + * ({@link Cursor#getSearchKey Cursor.getSearchKey}, for example) could + * then be performed by passing a partial key that contains only the + * identifying field. + * + *

        When using a partial duplicates comparator, it is possible to update + * the data for a duplicate record, as long as only the non-identifying + * fields in the data are changed. See + * {@link Cursor#putCurrent Cursor.putCurrent} for more information.

        + * + *

        WARNING: To allow for key updates in situations + * like those described above, all partial comparators must implement the + * {@link PartialComparator} tag interface. See "Upgrading from JE 5.0 + * or earlier" in the change log and the {@link PartialComparator} javadoc + * for more information.

        + * + *

        + * Another special type of comparator is a binary equality + * comparator, which considers two keys to be equal if and only if they + * have the same length and they are equal byte-per-byte. All binary + * equality comparators must implement the {@link BinaryEqualityComparator} + * interface. The significance of binary equality comparators is that they + * make possible certain internal optimizations, like the "blind puts" + * optimization, described in + * {@link BinaryEqualityComparator} + *

        + * The comparator for an existing database will not be overridden unless + * setOverrideDuplicateComparator() is set to true. + */ + public DatabaseConfig setDuplicateComparator( + Comparator duplicateComparator) { + + /* Note: comparator may be null */ + setDuplicateComparatorVoid(duplicateComparator); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDuplicateComparatorVoid( + Comparator + duplicateComparator) { + + /* Note: comparator may be null */ + this.duplicateComparator = + validateComparator(duplicateComparator, "Duplicate"); + this.duplicateComparatorByClassName = false; + } + + /** + * By default, a byte by byte lexicographic comparison is used for + * duplicate data items in a duplicate set. To customize the comparison, + * supply a different Comparator. + * + *

        Note that there are two ways to set the comparator: by specifying the + * class or by specifying a serializable object. This method is used to + * specify a Comparator class. The comparator class must implement + * java.util.Comparator and must have a public zero-parameter constructor. + * JE will store the class name and instantiate the Comparator by class + * name (using Class.forName and newInstance) + * when subsequently opening the database. Because the Comparator is + * instantiated using its default constructor, it should not be dependent + * on other constructor parameters.

        + * + *

        The Comparator.compare() method is passed the byte arrays that are + * stored in the database. If you know how your data is organized in the + * byte array, then you can write a comparison routine that directly + * examines the contents of the arrays. Otherwise, you have to reconstruct + * your original objects, and then perform the comparison. See the Getting Started Guide for examples.

        + * + *

        If a comparator needs to be initialized before it is used or needs + * access to the environment's ClassLoader property, it may implement the + * {@link DatabaseComparator} interface.

        + * + *

        WARNING: There are several special considerations that must + * be taken into account when implementing a comparator.

        + *

          + *
        • Comparator instances are shared by multiple threads and comparator + * methods are called without any special synchronization. Therefore, + * comparators must be thread safe. In general no shared state should be + * used and any caching of computed values must be done with proper + * synchronization.
        • + * + *
        • Because records are stored in the order determined by the + * Comparator, the Comparator's behavior must not change over time and + * therefore should not be dependent on any state that may change over + * time. In addition, although it is possible to change the comparator + * for an existing database, care must be taken that the new comparator + * provides compatible results with the previous comparator, or database + * corruption will occur.
        • + * + *
        • JE uses comparators internally in a wide variety of circumstances, + * so custom comparators must be sure to return valid values for any two + * arbitrary keys. The user must not make any assumptions about the + * range of key values that might be compared. For example, it's possible + * for the comparator may be used against previously deleted values.
        • + *
        + * + *

        A special type of comparator is a partial comparator, which + * allows for the keys of a database to be updated, but only if the updates + * do not change the relative order of the keys. For example, if a database + * uses strings as keys and a case-insensitive comparator, it is possible to + * change the case of characters in the keys, as this will not change the + * ordering of the keys. Another example is when the keys contain multiple + * fields but uniquely identify each record with a single field. The + * partial comparator could then compare only the single identifying field, + * allowing the rest of the fields to be updated. A query + * ({@link Cursor#getSearchKey Cursor.getSearchKey}, for example) could + * then be performed by passing a partial key that contains only the + * identifying field. + * + *

        When using a partial duplicates comparator, it is possible to update + * the data for a duplicate record, as long as only the non-identifying + * fields in the data are changed. See + * {@link Cursor#putCurrent Cursor.putCurrent} for more information.

        + * + *

        WARNING: To allow for key updates in situations + * like those described above, all partial comparators must implement the + * {@link PartialComparator} tag interface. See "Upgrading from JE 5.0 + * or earlier" in the change log and the {@link PartialComparator} javadoc + * for more information.

        + *

        + * Another special type of comparator is a binary equality + * comparator, which considers two keys to be equal if and only if they + * have the same length and they are equal byte-per-byte. All binary + * equality comparators must implement the {@link BinaryEqualityComparator} + * interface. The significance of binary equality comparators is that they + * make possible certain internal optimizations, like the "blind puts" + * optimization, described in + * {@link BinaryEqualityComparator} + *

        + * The comparator for an existing database will not be overridden unless + * setOverrideDuplicateComparator() is set to true. + * + * @return this + */ + public DatabaseConfig setDuplicateComparator( + Class> + duplicateComparatorClass) { + + setDuplicateComparatorVoid(duplicateComparatorClass); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDuplicateComparatorVoid( + Class> + duplicateComparatorClass) { + + /* Note: comparator may be null */ + this.duplicateComparator = + validateComparator(duplicateComparatorClass, "Duplicate"); + this.duplicateComparatorByClassName = true; + } + + /** + * Returns the Comparator used for duplicate record comparison on this + * database. + */ + public Comparator getDuplicateComparator() { + return duplicateComparator; + } + + /** + * Returns true if the duplicate comparator is set by class name, not by + * serializable Comparator object. + * + * @return true if the duplicate comparator is set by class name, not by + * serializable Comparator object. + */ + public boolean getDuplicateComparatorByClassName() { + return duplicateComparatorByClassName; + } + + /** + * Sets to true if the database exists and the duplicate comparator + * specified in this configuration object should override the current + * comparator. + * + * @param override Set to true to override the existing comparator. + * + * @return this + */ + public DatabaseConfig setOverrideDuplicateComparator(boolean override) { + setOverrideDuplicateComparatorVoid(override); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setOverrideDuplicateComparatorVoid(boolean override) { + overrideDuplicateComparator = override; + } + + /** + * Returns the override setting for the duplicate comparator. + */ + public boolean getOverrideDuplicateComparator() { + return overrideDuplicateComparator; + } + + /** + * @hidden + * For internal use only. + * + * Specifies the list of triggers associated with the database; triggers + * are executed in the order specified by this list. + *

        + * This configuration parameter is only meaningful when configuring a + * Primary database. + *

        + * The type of the trigger specified in the list must match the type of + * database being configured. For example, the trigger object must + * implement the ReplicatedDatabaseTrigger interface if it's + * used to configure a replicated database. + *

        + * Some of the incorrect uses of this parameter are detected during calls + * to {@link Environment#openDatabase Environment.openDatabase} or + * {@link Environment#openSecondaryDatabase + * Environment.openSecondaryDatabase} and will result in an + * IllegalArgumentException. + * + * @param triggers the list of database triggers to be associated with the + * environment. + * + * @throws IllegalArgumentException If the triggers in the list do not have + * unique names, have conflicting types (e.g. only a subset implement + * {@link ReplicatedDatabaseTrigger ReplicatedDatabaseTrigger}), or do not + * implement {@link ReplicatedDatabaseTrigger ReplicatedDatabaseTrigger} + * for a replicated database. + * + * @return this + */ + public DatabaseConfig setTriggers(List triggers) { + setTriggersVoid(triggers); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTriggersVoid(List triggers) { + this.triggers = triggers; + + if ((triggers == null) || (triggers.size() == 0)) { + return; + } + + checkTriggers(triggers); + } + + /** + * @hidden + * For internal use only. + * + * Returns the list of configured database triggers. + */ + public List getTriggers() { + return triggers; + } + + /** + * @hidden + * For internal use only. + * + * Set to true if the database exists and the {@link PersistentTrigger}s in + * the trigger list specified in this configuration object should override + * those in the current list of triggers. Note that any transient triggers + * that are specified are always configured, because they do not override + * existing triggers. + * + * @param override Set to true to override the existing comparator. + * + * @return this + */ + public DatabaseConfig setOverrideTriggers(boolean override) { + setOverrideTriggersVoid(override); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setOverrideTriggersVoid(boolean override) { + overrideTriggers = override; + } + + /** + * @hidden + * For internal use only. + * + * Returns the override setting for triggers. + */ + public boolean getOverrideTriggers() { + return overrideTriggers; + } + + /** + * Sets the temporary database option. + * + *

        Temporary databases operate internally in deferred-write mode to + * provide reduced disk I/O and increased concurrency. But unlike an + * ordinary deferred-write database, the information in a temporary + * database is not durable or persistent. + * + *

        A temporary database is not flushed to disk when the database is + * closed or when a checkpoint is performed, and the Database.sync method + * may not be called. When all handles for a temporary database are + * closed, the database is automatically removed. If a crash occurs before + * closing a temporary database, the database will be automatically removed + * when the environment is re-opened. + * + *

        Note that although temporary databases can page to disk if the cache + * is not large enough to hold the databases, they are much more efficient + * if the database remains in memory. See the JE FAQ on the Oracle + * Technology Network site for information on how to estimate the cache + * size needed by a given database. + * + *

        + * See the {@link Getting + * Started Guide, Database chapter} for a full description of temporary + * databases. + *

        + * @param temporary if true, the database will be opened as a temporary + * database. + * + * @return this + */ + public DatabaseConfig setTemporary(boolean temporary) { + setTemporaryVoid(temporary); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTemporaryVoid(boolean temporary) { + this.temporary = temporary; + } + + /** + * Returns the temporary database option. + * @return boolean if true, the database is temporary. + */ + public boolean getTemporary() { + return temporary; + } + + /** + * Sets the deferred-write option. + * + *

        Deferred-write databases have reduced disk I/O and improved + * concurrency. Disk I/O is reduced when data records are frequently + * modified or deleted. The information in a deferred-write database is + * not guaranteed to be durable or persistent until {@link Database#close} + * or {@link Database#sync} is called, or a checkpoint is performed. Since + * the usual write ahead logging system is relaxed in order to improve + * performance, if the environment crashes before a {@link Database#sync} + * or {@link Database#close}, none, all, or a unpredictable set of the + * operations previously done may be persistent. + * + *

        After a deferred-write database is closed it may be re-opened as an + * ordinary transactional or non-transactional database. For example, this + * can be used to initially load a large data set in deferred-write mode + * and then switch to transactional mode for subsequent operations. + * + *

        Note that although deferred-write databases can page to disk if the + * cache is not large enough to hold the databases, they are much more + * efficient if the database remains in memory. See the JE FAQ on the + * Oracle Technology Network site for information on how to estimate the + * cache size needed by a given database. + * + *

        + * See the {@link Getting + * Started Guide, Database chapter} for a full description + * of deferred-write databases. + * + *

        + * @param deferredWrite if true, the database will be opened as a + * deferred-write database. + * + * @return this + */ + public DatabaseConfig setDeferredWrite(boolean deferredWrite) { + setDeferredWriteVoid(deferredWrite); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDeferredWriteVoid(boolean deferredWrite) { + this.deferredWrite = deferredWrite; + } + + /** + * Returns the deferred-write option. + * + * @return boolean if true, deferred-write is enabled. + */ + public boolean getDeferredWrite() { + return deferredWrite; + } + + /** + * Used to set the comparator when filling in a configuration from an + * existing database. + */ + void setBtreeComparatorInternal(Comparator comparator, + boolean byClassName) { + btreeComparator = comparator; + btreeComparatorByClassName = byClassName; + } + + /** + * Used to set the comparator when filling in a configuration from an + * existing database. + */ + void setDuplicateComparatorInternal(Comparator comparator, + boolean byClassName) { + duplicateComparator = comparator; + duplicateComparatorByClassName = byClassName; + } + + /** + * Setting useExistingConfig to true allows a program to open a database + * without knowing a prior what its configuration is. For example, if you + * want to open a database without knowing whether it contains sorted + * duplicates or not, you can set this property to true. In general, this + * is used by the JE utilities, to avoid having to know the configuration + * of a database. The databases should be opened readOnly when this + * property is set to true. + * + * @param useExistingConfig true if this Database should be opened using + * the existing configuration. + * + * @return this + */ + public DatabaseConfig setUseExistingConfig(boolean useExistingConfig) { + setUseExistingConfigVoid(useExistingConfig); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setUseExistingConfigVoid(boolean useExistingConfig) { + this.useExistingConfig = useExistingConfig; + } + + /** + * Return the value of the useExistingConfig property. + * + * @return the value of the useExistingConfig property. + */ + public boolean getUseExistingConfig() { + return useExistingConfig; + } + + /** + * Sets the default {@code CacheMode} used for operations performed on this + * database. If this property is non-null, it overrides the default + * specified using {@link EnvironmentConfig#setCacheMode} for operations on + * this database. The default cache mode may be overridden on a per-record + * or per-operation basis using {@link Cursor#setCacheMode}, {@link + * ReadOptions#setCacheMode(CacheMode)} or {@link + * WriteOptions#setCacheMode(CacheMode)}. + * + * @param cacheMode is the default {@code CacheMode} used for operations + * performed on this database. If {@code null} is specified, the + * environment default will be used. + * + * @see CacheMode for further details. + * + * @since 4.0.97 + */ + public DatabaseConfig setCacheMode(final CacheMode cacheMode) { + setCacheModeVoid(cacheMode); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCacheModeVoid(final CacheMode cacheMode) { + this.cacheMode = cacheMode; + } + + /** + * Returns the default {@code CacheMode} used for operations performed on + * this database, or null if the environment default is used. + * + * @return the default {@code CacheMode} used for operations performed on + * this database, or null if the environment default is used. + * + * @see #setCacheMode + * + * @since 4.0.97 + */ + public CacheMode getCacheMode() { + return cacheMode; + } + + /** + * Configures a database to be replicated or non-replicated, in a + * replicated Environment. By default this property is true, meaning that + * by default a database is replicated in a replicated Environment. + *

        + * In a non-replicated Environment, this property is ignored. All + * databases are non-replicated in a non-replicated Environment. + * + * @see + * Non-replicated + * Databases in a Replicated Environment + */ + public DatabaseConfig setReplicated(boolean replicated) { + setReplicatedVoid(replicated); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReplicatedVoid(boolean replicated) { + this.replicated = replicated; + } + + /** + * Returns the replicated property for the database. + *

        + * This method returns true by default. However, in a non-replicated + * Environment, this property is ignored. All databases are non-replicated + * in a non-replicated Environment. + * + * @see #setReplicated + */ + public boolean getReplicated() { + return replicated; + } + + /** + * @hidden + * For internal use only. + * + * Configures a SecondaryAssociation that is used to define + * primary-secondary associations for a group of primary and secondary + * databases. The same SecondaryAssociation instance must be configured on + * the primary and secondary databases. + * + * @see SecondaryAssociation + */ + public DatabaseConfig + setSecondaryAssociation(SecondaryAssociation association) { + setSecondaryAssociationVoid(association); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSecondaryAssociationVoid(SecondaryAssociation association) { + secAssociation = association; + } + + /** + * @hidden + * For internal use only. + * + * Returns the configured SecondaryAssociation. + * + * @see #setSecondaryAssociation + * @see SecondaryAssociation + */ + public SecondaryAssociation getSecondaryAssociation() { + return secAssociation; + } + + /** + * Returns a copy of this configuration object. + * + * @deprecated As of JE 4.0.13, replaced by {@link + * DatabaseConfig#clone()}.

        + */ + public DatabaseConfig cloneConfig() { + return clone(); + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public DatabaseConfig clone() { + try { + return (DatabaseConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * For JCA Database handle caching. + * + * @throws IllegalArgumentException via JEConnection.openDatabase. + */ + void validate(DatabaseConfig config) + throws DatabaseException { + + if (config == null) { + config = DatabaseConfig.DEFAULT; + } + + boolean txnMatch = (config.transactional == transactional); + boolean roMatch = (config.readOnly == readOnly); + boolean sdMatch = (config.sortedDuplicates == sortedDuplicates); + boolean dwMatch = (config.getDeferredWrite() == deferredWrite); + boolean btCmpMatch = true; + if (config.overrideBtreeComparator) { + if (btreeComparator == null) { + btCmpMatch = (config.btreeComparator == null); + } else if (config.btreeComparatorByClassName != + btreeComparatorByClassName) { + btCmpMatch = false; + } else if (btreeComparatorByClassName) { + btCmpMatch = btreeComparator.getClass() == + config.btreeComparator.getClass(); + } else { + btCmpMatch = Arrays.equals + (DatabaseImpl.objectToBytes + (btreeComparator, "Btree"), + DatabaseImpl.objectToBytes + (config.btreeComparator, "Btree")); + } + } + boolean dtCmpMatch = true; + if (config.overrideDuplicateComparator) { + if (duplicateComparator == null) { + dtCmpMatch = (config.duplicateComparator == null); + } else if (config.duplicateComparatorByClassName != + duplicateComparatorByClassName) { + dtCmpMatch = false; + } else if (duplicateComparatorByClassName) { + dtCmpMatch = duplicateComparator.getClass() == + config.duplicateComparator.getClass(); + } else { + dtCmpMatch = Arrays.equals + (DatabaseImpl.objectToBytes + (duplicateComparator, "Duplicate"), + DatabaseImpl.objectToBytes + (config.duplicateComparator, "Duplicate")); + } + } + + if (txnMatch && + roMatch && + sdMatch && + dwMatch && + btCmpMatch && + dtCmpMatch) { + return; + } + String message = genDatabaseConfigMismatchMessage + (txnMatch, roMatch, sdMatch, dwMatch, + btCmpMatch, dtCmpMatch); + throw new IllegalArgumentException(message); + } + + private String genDatabaseConfigMismatchMessage(boolean txnMatch, + boolean roMatch, + boolean sdMatch, + boolean dwMatch, + boolean btCmpMatch, + boolean dtCmpMatch) { + StringBuilder ret = new StringBuilder + ("The following DatabaseConfig parameters for the\n" + + "cached Database do not match the parameters for the\n" + + "requested Database:\n"); + if (!txnMatch) { + ret.append(" Transactional\n"); + } + + if (!roMatch) { + ret.append(" Read-Only\n"); + } + + if (!sdMatch) { + ret.append(" Sorted Duplicates\n"); + } + + if (!dwMatch) { + ret.append(" Deferred Write"); + } + + if (!btCmpMatch) { + ret.append(" Btree Comparator\n"); + } + + if (!dtCmpMatch) { + ret.append(" Duplicate Comparator\n"); + } + + return ret.toString(); + } + + /** + * Checks that this comparator can be serialized by JE. + * + * @throws IllegalArgumentException via setBtreeComparator and + * setDuplicateComparator + */ + private Comparator validateComparator( + Comparator comparator, + String type) + throws IllegalArgumentException { + + if (comparator == null) { + return null; + } + + try { + DatabaseImpl.comparatorToBytes(comparator, false /*byClassName*/, + type); + return comparator; + } catch (DatabaseException e) { + throw new IllegalArgumentException + (type + " comparator is not valid.", e); + } + } + + /** + * Checks that this comparator class can be instantiated by JE. + * + * @throws IllegalArgumentException via setBtreeComparator and + * setDuplicateComparator + */ + private Comparator validateComparator( + Class> comparatorClass, + String type) + throws IllegalArgumentException { + + if (comparatorClass == null) { + return null; + } + + if (!Comparator.class.isAssignableFrom(comparatorClass)) { + throw new IllegalArgumentException + (comparatorClass.getName() + + " is is not valid as a " + type + + " comparator because it does not " + + " implement java.util.Comparator."); + } + + try { + return DatabaseImpl.instantiateComparator(comparatorClass, type); + } catch (DatabaseException e) { + throw new IllegalArgumentException + (type + " comparator is not valid. " + + "Perhaps you have not implemented a zero-parameter " + + "constructor for the comparator or the comparator class " + + "cannot be found.", + e); + } + } + + /** + * Checks that this database configuration is valid for a new, non-existant + * database. + * + * @throws IllegalArgumentException via Environment.openDatabase and + * openSecondaryDatabase + */ + void validateForNewDb() + throws DatabaseException { + + if (readOnly) { + throw new IllegalArgumentException + ("DatabaseConfig.setReadOnly() must be set to false " + + "when creating a Database"); + } + + if (transactional && deferredWrite) { + throw new IllegalArgumentException + ("deferredWrite mode is not supported for transactional " + + "databases"); + } + } + + /** + * For unit tests, checks that the database configuration attributes that + * are saved persistently are equal. + */ + boolean persistentEquals(DatabaseConfig other) { + if (sortedDuplicates != other.sortedDuplicates) { + return false; + } + + if (temporary != other.temporary) { + return false; + } + + if (replicated != other.replicated) { + return false; + } + + if (nodeMaxEntries != other.nodeMaxEntries) { + return false; + } + + if (((btreeComparator == null) && (other.btreeComparator != null)) || + ((btreeComparator != null) && (other.btreeComparator == null))) { + return false; + } + + if (btreeComparator != null) { + if (btreeComparator.getClass() != + other.btreeComparator.getClass()) { + return false; + } + } + + if (((duplicateComparator == null) && + (other.duplicateComparator != null)) || + ((duplicateComparator != null) && + (other.duplicateComparator == null))) { + return false; + } + + if ((duplicateComparator != null)) { + if (duplicateComparator.getClass() != + other.duplicateComparator.getClass()) { + return false; + } + } + + return true; + } + + /** + * Perform validations at database open time on the completed DbConfig + * object. Inter-attribute checks are done here. + */ + void validateOnDbOpen(String databaseName, + boolean dbIsReplicated) { + + if ((getDeferredWrite() && getTemporary()) || + (getDeferredWrite() && getTransactional()) || + (getTemporary() && getTransactional())) { + throw new IllegalArgumentException + ("Attempted to open Database " + databaseName + + " and two ore more of the following exclusive properties" + + " are true: deferredWrite, temporary, transactional"); + } + + if ((triggers != null) && (triggers.size() > 0)) { + + final boolean replicatedTriggers = checkTriggers(triggers); + if (dbIsReplicated && !replicatedTriggers) { + throw new IllegalArgumentException + ("For a replicated Database, triggers must implement " + + ReplicatedDatabaseTrigger.class.getName()); + } + } + } + + /** + * Checks that the triggers in the list have consistent definitions. + * + * @param triggerList the list of triggers to be checked + * + * @return true if the list consists of just replicated triggers, false if + * it consists entirely of non-replicated triggers. + * + * @throws IllegalArgumentException if the list had triggers with duplicate + * names or the types were not consistent. + */ + boolean checkTriggers(List triggerList) { + + final boolean replicatedTrigger = + triggerList.get(0) instanceof ReplicatedDatabaseTrigger; + + final Set triggerNames = new HashSet(); + + for (Trigger trigger : triggerList) { + + /* + * Note that we do not disallow the unsupported PersistentTrigger + * or ReplicatedDatabaseTrigger intefaces here, to enable the + * continued testing of these partially implemented features. + * + if (trigger instanceof PersistentTrigger) { + throw new IllegalArgumentException + ("PeristentTrigger not supported: " + trigger.getName()); + } + */ + + if (!triggerNames.add(trigger.getName())) { + throw new IllegalArgumentException + ("Duplicate trigger name:" + trigger.getName()); + } + if (replicatedTrigger != + (trigger instanceof ReplicatedDatabaseTrigger)) { + throw new IllegalArgumentException + ("Conflicting trigger types in list:" + triggerList); + } + } + return replicatedTrigger; + } + + /** + * Combine the per-Database handle and Database-wide properties for a + * database handle. + * + * @param dbImpl the underlying DatabaseImpl for a database handle, which + * provides the Database-wide properties + * + * @param dbHandleConfig DatabaseConfig field for the same database handle, + * which provides the per-Database properties. + * + * @return a DatabaseConfig which includes the correct Database-wide and + * per-Database handle properties. + */ + static DatabaseConfig combineConfig(DatabaseImpl dbImpl, + DatabaseConfig dbHandleConfig) { + + DatabaseConfig showConfig = dbHandleConfig.cloneConfig(); + + /* + * Set the Database-wide properties from the DatabaseImpl, since they + * might have changed from other database handles. + * + * Note: sorted duplicates, temporary and replicated attributes are not + * mutable and were checked at handle creation to make sure these + * properties in dbHandleConfig are consistent with + * DatabaseImpl. They're still set here in case the useExistingConfig + * property is set, and those field were not initialized. + */ + if (dbImpl != null) { + /* mutable, persistent, database wide attributes. */ + showConfig.setBtreeComparatorInternal + (dbImpl.getBtreeComparator(), + dbImpl.getBtreeComparatorByClass()); + showConfig.setDuplicateComparatorInternal + (dbImpl.getDuplicateComparator(), + dbImpl.getDuplicateComparatorByClass()); + showConfig.setKeyPrefixing(dbImpl.getKeyPrefixing()); + showConfig.setNodeMaxEntries(dbImpl.getNodeMaxTreeEntries()); + showConfig.setTriggers(dbImpl.getTriggers()); + + /* mutable, but non-persistent attributes. */ + showConfig.setTransactional(dbImpl.isTransactional()); + showConfig.setDeferredWrite(dbImpl.isDurableDeferredWrite()); + + /* not mutable, but initialized in the showConfig. */ + showConfig.setReplicated(dbImpl.isReplicated()); + showConfig.setSortedDuplicates(dbImpl.getSortedDuplicates()); + showConfig.setTemporary(dbImpl.isTemporary()); + } + + return showConfig; + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "allowCreate=" + allowCreate + + "\nexclusiveCreate=" + exclusiveCreate + + "\ntransactional=" + transactional + + "\nreadOnly=" + readOnly + + "\nsortedDuplicates=" + sortedDuplicates + + "\ndeferredWrite=" + deferredWrite + + "\ntemporary=" + temporary + + "\nkeyPrefixing=" + keyPrefixing + + "\n"; + } +} diff --git a/src/com/sleepycat/je/DatabaseConfigBeanInfo.java b/src/com/sleepycat/je/DatabaseConfigBeanInfo.java new file mode 100644 index 0000000..5a0ea1a --- /dev/null +++ b/src/com/sleepycat/je/DatabaseConfigBeanInfo.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class DatabaseConfigBeanInfo extends ConfigBeanInfoBase { + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(DatabaseConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(DatabaseConfig.class); + } +} diff --git a/src/com/sleepycat/je/DatabaseEntry.java b/src/com/sleepycat/je/DatabaseEntry.java new file mode 100644 index 0000000..c5481ee --- /dev/null +++ b/src/com/sleepycat/je/DatabaseEntry.java @@ -0,0 +1,533 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; + +import com.sleepycat.je.tree.Key; +import com.sleepycat.util.keyrange.KeyRange; + +/** + * Encodes database key and data items as a byte array. + * + *

        Storage and retrieval for the {@link com.sleepycat.je.Database Database} + * and {@link com.sleepycat.je.Cursor Cursor} methods are based on key/data + * pairs. Both key and data items are represented by DatabaseEntry objects. + * Key and data byte arrays may refer to arrays of zero length up to arrays of + * essentially unlimited length.

        + * + *

        The DatabaseEntry class provides simple access to an underlying object + * whose elements can be examined or changed. DatabaseEntry objects can be + * subclassed, providing a way to associate with it additional data or + * references to other structures.

        + * + *

        Access to DatabaseEntry objects is not re-entrant. In particular, if + * multiple threads simultaneously access the same DatabaseEntry object using + * {@link com.sleepycat.je.Database Database} or {@link com.sleepycat.je.Cursor + * Cursor} methods, the results are undefined.

        + * + *

        DatabaseEntry objects may be used in conjunction with the object mapping + * support provided in the {@link com.sleepycat.bind} package.

        + * + *

        Input and Output Parameters

        + * + *

        DatabaseEntry objects are used for both input values (for example, when + * writing to a database or specifying a search parameter) and output values + * (for example, when reading from a database). For every CRUD method + * ({@code get}, {@code put}, etc), each of the method's DatabaseEntry + * parameters ({@code key}, {@code data}, etc) may be input or output + * parameters, and this is specified by the method's documentation.

        + * + *

        Input Parameters

        + * + *

        An input parameter is required by the JE method. The parameter may not be + * null, and the caller is also responsible for initializing the data of the + * DatabaseEntry to a non-null byte array.

        + * + *

        Input parameters normally may not be {@link #setPartial(int,int,boolean) + * partial}. However, this is allowed under certain circumstances, namely + * the {@link Cursor#putCurrent} method allows specifying a partial data + * parameter in order to update only part of the record's data value. Input + * parameters are NOT allowed to be partial unless this is explicitly stated in + * the method documentation.

        + * + *

        Although an input parameter is always used for input, in some cases it + * may be also used for output. For example, the {@link + * Cursor#getSearchKeyRange} method is passed a key parameter that is used as + * input, but since a record with a different key (greater or equal to the key + * given) may be found, the key parameter is also used to return the key + * that was found. Such parameters are documented as "input/output" + * parameters.

        + * + *

        Another example is when a custom key comparator is used and a key + * parameter is passed to a search method. The input parameter may match a + * record's key even if the bytes are not equal, and the key of the record + * found will be returned via the parameter. The same thing is true of data (or + * primary key) parameters when a custom duplicate comparator is used. Because + * of this, all input parameters of "get" methods can potentially be used for + * output, however, they are not explicitly documented to be input/output + * parameters.

        + * + *

        Output Parameters

        + * + *

        An output parameter is not required by the JE method. It is used to + * optionally return a value to the caller. Null may be passed for the + * parameter if no returned value is needed. Passing null is a common way to + * optimize read operations when only the record's key, and not the record's + * data, is required. By passing null for the data parameter, a read from + * disk can be avoided when the data is not already cached. In addition, all + * output parameters may be {@link #setPartial(int,int,boolean) partial} to + * allow only returning a part of the data byte array. See Using Null and Partial DatabaseEntry + * Parameters for more information.

        + * + *

        For output parameters, the byte array specified by the caller will not be + * used and may be null. The JE method will will always allocate a new byte + * array. Therefore, after calling a method that returns output parameters, + * the application can safely keep a reference to the byte array returned by + * {@link #getData} without danger that the array will be overwritten in a + * subsequent call.

        + * + *

        Historical note: Prior to JE 7.0, null could not be passed for output + * parameters. Instead, {@code DatabaseEntry.setPartial(0, 0, true)} was called + * for a data parameter to avoid reading the record's data. Now, null can be + * passed instead.

        + * + *

        Offset and Size Properties

        + * + *

        By default the Offset property is zero and the Size property is the + * length of the byte array. However, to allow for optimizations involving the + * partial use of a byte array, the Offset and Size may be set to non-default + * values.

        + * + *

        For output parameters, the Size will always be set to the length of the + * byte array and the Offset will always be set to zero.

        + * + *

        However, for input parameters the Offset and Size are set to non-default + * values by the built-in tuple and serial bindings. For example, with a tuple + * or serial binding the byte array is grown dynamically as data is output, and + * the Size is set to the number of bytes actually used. For a serial binding, + * the Offset is set to a non-zero value in order to implement an optimization + * having to do with the serialization stream header.

        + * + *

        WARNING: In callbacks that are passed DatabaseEntry parameters, the + * application should always honor the Size and Offset properties, rather than + * assuming they have default values.

        + */ +public class DatabaseEntry implements Serializable { + private static final long serialVersionUID = 1L; + + /* Currently, JE stores all data records as byte array */ + private byte[] data; + private int dlen = 0; + private int doff = 0; + private int offset = 0; + private int size = 0; + private boolean partial = false; + + /* FindBugs - ignore not "final" since a user can set this. */ + /** @hidden + * The maximum number of bytes to show when toString() is called. + */ + public static int MAX_DUMP_BYTES = 100; + + /** + * Returns all the attributes of the database entry in text form, including + * the underlying data. The maximum number of bytes that will be formatted + * is taken from the static variable DatabaseEntry.MAX_DUMP_BYTES, which + * defaults to 100. MAX_DUMP_BYTES may be changed by an application if it + * wishes to cause more bytes to be formatted. + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(" MAX_DUMP_BYTES) { + sb.append(" ... ").append((size - MAX_DUMP_BYTES) + + " bytes not shown "); + } + sb.append("\"/>"); + return sb.toString(); + } + + /* + * Constructors + */ + + /** + * Constructs a DatabaseEntry with null data. The offset and size are set + * to zero. + */ + public DatabaseEntry() { + } + + /** + * Constructs a DatabaseEntry with a given byte array. The offset is set + * to zero; the size is set to the length of the array, or to zero if null + * is passed. + * + * @param data Byte array wrapped by the DatabaseEntry. + */ + public DatabaseEntry(byte[] data) { + this.data = data; + if (data != null) { + this.size = data.length; + } + } + + /** + * Constructs a DatabaseEntry with a given byte array, offset and size. + * + * @param data Byte array wrapped by the DatabaseEntry. + * + * @param offset Offset in the first byte in the byte array to be included. + * + * @param size Number of bytes in the byte array to be included. + */ + public DatabaseEntry(byte[] data, int offset, int size) { + this.data = data; + this.offset = offset; + this.size = size; + } + + /* + * Accessors + */ + + /** + * Returns the byte array. + * + *

        For a DatabaseEntry that is used as an output parameter, the byte + * array will always be a newly allocated array. The byte array specified + * by the caller will not be used and may be null.

        + * + * @return The byte array. + */ + public byte[] getData() { + return data; + } + + /** + * Sets the byte array. The offset is set to zero; the size is set to the + * length of the array, or to zero if null is passed. + * + * @param data Byte array wrapped by the DatabaseEntry. + */ + public void setData(byte[] data) { + this.data = data; + offset = 0; + size = (data == null) ? 0 : data.length; + } + + /** + * Sets the byte array, offset and size. + * + * @param data Byte array wrapped by the DatabaseEntry. + * + * @param offset Offset in the first byte in the byte array to be included. + * + * @param size Number of bytes in the byte array to be included. + */ + public void setData(byte[] data, int offset, int size) { + this.data = data; + this.offset = offset; + this.size = size; + } + + /** + * Configures this DatabaseEntry to read or write partial records. + * + *

        By default the specified data (byte array, offset and size) + * corresponds to the full stored key or data item. Optionally, the + * Partial property can be set to true, and the PartialOffset and + * PartialLength properties are used to specify the portion of the key or + * data item to be read or written.

        + * + *

        Note that the Partial properties are set only by the caller. They + * will never be set by a Database or Cursor method, nor will they every be + * set by bindings. Therefore, the application can assume that the Partial + * properties are not set, unless the application itself sets them + * explicitly.

        + * + *

        All {For a DatabaseEntry that is used as an output parameter, the size + * will always be the length of the data array.

        + * + * @return Number of bytes in the byte array to be included. + */ + public int getSize() { + return size; + } + + /** + * Sets the byte size of the data array. + * + * ArrayIndexOutOfBoundsException if the data, offset, and size parameters + * refer to elements of the data array which do not exist. Note that this + * exception will not be thrown by setSize() or setOffset(), but will be + * thrown by varous JE methods if "this" is inconsistent and is used as an + * input parameter to those methods. It is the caller's responsibility to + * ensure that size, offset, and data.length are consistent. + * + * @param size Number of bytes in the byte array to be included. + */ + public void setSize(int size) { + this.size = size; + } + + /** + * Dumps the data as a byte array, for tracing purposes + */ + String dumpData() { + return Key.DUMP_TYPE.dumpByteArray( + KeyRange.getByteArray(this, MAX_DUMP_BYTES)); + } + + /** + * Compares the data of two entries for byte-by-byte equality. + * + *

        In either entry, if the offset is non-zero or the size is not equal + * to the data array length, then only the data bounded by these values is + * compared. The data array length and offset need not be the same in both + * entries for them to be considered equal.

        + * + *

        If the data array is null in one entry, then to be considered equal + * both entries must have a null data array.

        + * + *

        If the partial property is set in either entry, then to be considered + * equal both entries must have the same partial properties: partial, + * partialOffset and partialLength. + */ + @Override + public boolean equals(Object o) { + if (!(o instanceof DatabaseEntry)) { + return false; + } + DatabaseEntry e = (DatabaseEntry) o; + if (partial || e.partial) { + if (partial != e.partial || + dlen != e.dlen || + doff != e.doff) { + return false; + } + } + if (data == null && e.data == null) { + return true; + } + if (data == null || e.data == null) { + return false; + } + if (size != e.size) { + return false; + } + for (int i = 0; i < size; i += 1) { + if (data[offset + i] != e.data[e.offset + i]) { + return false; + } + } + return true; + } + + /** + * Returns a hash code based on the data value. + */ + @Override + public int hashCode() { + int hash = 0; + if (data != null) { + for (int i = 0; i < size; i += 1) { + hash += data[offset + i]; + } + } + return hash; + } +} diff --git a/src/com/sleepycat/je/DatabaseException.java b/src/com/sleepycat/je/DatabaseException.java new file mode 100644 index 0000000..9abcc72 --- /dev/null +++ b/src/com/sleepycat/je/DatabaseException.java @@ -0,0 +1,163 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The root of all BDB JE-defined exceptions. + * + *

        Exceptions thrown by BDB JE fall into three categories.

        + *
          + *
        1. When a method is used incorrectly as the result of an application + * programming error, a standard Java runtime exception is thrown: {@link + * IllegalArgumentException}, {@link IllegalStateException} or {@link + * UnsupportedOperationException}. These exceptions have the standard meaning + * defined by their javadoc. Note that JE throws {@link + * IllegalArgumentException} rather than {@link NullPointerException} when a + * required parameter is null. + *
        2. + *
        3. When an operation failure occurs, {@link OperationFailureException} or + * one of its subclasses is thrown. See {@link OperationFailureException} for + * details. + *
        4. + *
        5. When an {@code Environment} failure occurs, {@link + * EnvironmentFailureException} or one of its subclasses is thrown. See {@link + * EnvironmentFailureException} for details. + *
        6. + *
        + * + *

        {@link OperationFailureException} and {@link EnvironmentFailureException} + * are the only two direct subclasses of {@code DatabaseException}.

        + * + *

        (Actually the above statement is not strictly correct. {@link + * EnvironmentFailureException} extends {@link RunRecoveryException} which + * extends {@code DatabaseException}. {@link RunRecoveryException} exists for + * backward compatibility and has been deprecated. {@link + * EnvironmentFailureException} should be used instead.)

        + * + *

        Note that in some cases, certain methods return status values without + * issuing an exception. This occurs in situations that are not normally + * considered an error, but when some informational status is returned. For + * example, {@link com.sleepycat.je.Database#get Database.get} returns {@link + * com.sleepycat.je.OperationStatus#NOTFOUND OperationStatus.NOTFOUND} when a + * requested key does not appear in the database.

        + */ +@SuppressWarnings("javadoc") +public abstract class DatabaseException extends RuntimeException { + + private static final long serialVersionUID = 1535562945L; + + /* String appended to original message, see addErrorMessage. */ + private String extraInfo = null; + + /* Per-thread re-thrown stack traces, see addRethrownStackTrace. */ + private transient ThreadLocal rethrownStackTraces = + new ThreadLocal(); + + /** + * For internal use only. + * @hidden + */ + public DatabaseException(Throwable t) { + super(getVersionHeader() + t.toString(), t); + } + + /** + * For internal use only. + * @hidden + */ + public DatabaseException(String message) { + super(getVersionHeader() + message); + } + + /** + * For internal use only. + * @hidden + */ + public DatabaseException(String message, Throwable t) { + super((getVersionHeader() + message), t); + } + + /** + * For internal use only. + * @hidden + * Utility for generating the version at the start of the exception + * message. Public for unit tests. + */ + public static String getVersionHeader() { + return "(JE " + JEVersion.CURRENT_VERSION + ") "; + } + + /** + * For internal use only. + * @hidden + * + * Support the addition of extra error information. Use this approach + * rather than wrapping exceptions whenever possible for two reasons: + * 1) so the user can catch the original exception class and handle it + * appropriately, and 2) because the EnvironmentFailureException hierarchy + * does some intricate things with setting the environment as invalid. + * + * @param newExtraInfo the message to add, not including separator space. + */ + public void addErrorMessage(String newExtraInfo) { + + if (extraInfo == null) { + extraInfo = " " + newExtraInfo; + } else { + extraInfo = extraInfo + ' ' + newExtraInfo; + } + } + + /** + * For internal use only. + * @hidden + * + * Adds the current stack trace to the exception message, before it is + * re-thrown in a different thread. The full stack trace will then show + * both where it was generated and where it was re-thrown. Use this + * approach rather than wrapping (via wrapSelf) when user code relies on + * the getCause method to return a specific exception, and wrapping would + * change the cause exception to something unexpected. + */ + public void addRethrownStackTrace() { + + final Exception localEx = new Exception( + "Stacktrace where exception below was rethrown (" + + getClass().getName() + ")"); + + rethrownStackTraces.set(LoggerUtils.getStackTrace(localEx)); + } + + @Override + public String getMessage() { + + /* + * If extraInfo and rethrownStackTrace are null, don't allocate memory + * by constructing a new string. An OutOfMemoryError (or related Error) + * may have occurred, and we'd rather not cause another one here. + */ + final String msg = (extraInfo != null) ? + (super.getMessage() + extraInfo) : + super.getMessage(); + + final String rethrownStackTrace = rethrownStackTraces.get(); + if (rethrownStackTrace == null) { + return msg; + } + + return rethrownStackTrace + "\n" + msg; + } +} diff --git a/src/com/sleepycat/je/DatabaseExistsException.java b/src/com/sleepycat/je/DatabaseExistsException.java new file mode 100644 index 0000000..a51e315 --- /dev/null +++ b/src/com/sleepycat/je/DatabaseExistsException.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Environment#openDatabase Environment.openDatabase} and + * {@link Environment#openSecondaryDatabase Environment.openSecondaryDatabase} + * if the database already exists and the {@code DatabaseConfig + * ExclusiveCreate} parameter is true. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class DatabaseExistsException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public DatabaseExistsException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private DatabaseExistsException(String message, + DatabaseExistsException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DatabaseExistsException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DatabaseNotFoundException.java b/src/com/sleepycat/je/DatabaseNotFoundException.java new file mode 100644 index 0000000..0534ddc --- /dev/null +++ b/src/com/sleepycat/je/DatabaseNotFoundException.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown when an operation requires a database and that database does not + * exist. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + */ +public class DatabaseNotFoundException extends OperationFailureException { + + private static final long serialVersionUID = 1895430616L; + + /** + * For internal use only. + * @hidden + */ + public DatabaseNotFoundException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private DatabaseNotFoundException(String message, + DatabaseNotFoundException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DatabaseNotFoundException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DatabaseStats.java b/src/com/sleepycat/je/DatabaseStats.java new file mode 100644 index 0000000..138ce94 --- /dev/null +++ b/src/com/sleepycat/je/DatabaseStats.java @@ -0,0 +1,26 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; + +/** + * Statistics for a single database. + */ +public abstract class DatabaseStats implements Serializable { + private static final long serialVersionUID = 1L; + + // no public constructor + protected DatabaseStats() {} +} diff --git a/src/com/sleepycat/je/DbInternal.java b/src/com/sleepycat/je/DbInternal.java new file mode 100644 index 0000000..7d7e349 --- /dev/null +++ b/src/com/sleepycat/je/DbInternal.java @@ -0,0 +1,726 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Properties; + +import com.sleepycat.compat.DbCompat.OpReadOptions; +import com.sleepycat.compat.DbCompat.OpResult; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DiskOrderedCursorImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.PutMode; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * @hidden + * For internal use only. It serves to shelter methods that must be public to + * be used by other BDB JE packages but that are not part of the public API + * available to applications. + */ +public class DbInternal { + + public static OperationResult DEFAULT_RESULT = + new OperationResult(0, false); + + /** + * Proxy to Database.invalidate() + */ + public static void invalidate(final Database db) { + db.invalidate(); + } + + /** + * Proxy to Database.setPreempted() + */ + public static void setPreempted(final Database db, + final String dbName, + final String msg) { + db.setPreempted(dbName, msg); + } + + /** + * Proxy to Environment.getMaybeNullEnvImpl. + * + * This method does not check whether the returned envImpl is valid. + * + * WARNING: This method will be phased out over time and normally + * getNonNullEnvImpl should be called instead. + * + * @return the non-null underlying EnvironmentImpl, or null if the env has + * been closed. + */ + public static EnvironmentImpl getEnvironmentImpl(final Environment env) { + return env.getMaybeNullEnvImpl(); + } + + /** + * Proxy to Environment.getNonNullEnvImpl + * + * This method is called to access the underlying EnvironmentImpl when an + * env is expected to be open, to guard against NPE when the env has been + * closed. + * + * This method does not check whether the env is valid. + * + * @return the non-null, underlying EnvironmentImpl. + * + * @throws IllegalStateException if the env has been closed. + */ + public static EnvironmentImpl getNonNullEnvImpl(final Environment env) { + return env.getNonNullEnvImpl(); + } + + /** + * Proxy to Environment.clearEnvImpl + */ + public static void clearEnvImpl(final Environment env) { + env.clearEnvImpl(); + } + + /** + * Proxy to Environment.checkOpen + */ + public static EnvironmentImpl checkOpen(final Environment env) { + return env.checkOpen(); + } + + /** + * Proxy to Environment.closeInternalHandle + */ + public static void closeInternalHandle(final Environment env) { + env.closeInternalHandle(); + } + + /** + * Proxy to Database.getDebugName + */ + public static String getDbDebugName(final Database dbHandle) { + return dbHandle.getDebugName(); + } + + /** + * Proxy to SecondaryDatabase.getSecondaryAssociation + */ + public static SecondaryAssociation getSecondaryAssociation( + final Database db) { + return db.getSecondaryAssociation(); + } + + /** + * Proxy to SecondaryDatabase.getPrivateSecondaryConfig + */ + public static SecondaryConfig getPrivateSecondaryConfig( + final SecondaryDatabase secDb) { + return secDb.getPrivateSecondaryConfig(); + } + + /** + * Proxy to Cursor.readPrimaryAfterGet + */ + public static boolean readPrimaryAfterGet( + final Cursor cursor, + final Database priDb, + final DatabaseEntry key, + final DatabaseEntry pKey, + DatabaseEntry data, + final LockMode lockMode, + final boolean secDirtyRead, + final boolean lockPrimaryOnly, + final boolean allowNoData, + final Locker locker, + final SecondaryDatabase secDb, + final SecondaryAssociation secAssoc) { + + return cursor.readPrimaryAfterGet( + priDb, key, pKey, data, lockMode, secDirtyRead, + lockPrimaryOnly, allowNoData, locker, secDb, secAssoc); + } + + /** + * Proxy to Cursor.position(). + */ + public static OperationResult position( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final boolean first) { + + return cursor.position(key, data, lockMode, null, first); + } + + /** + * Proxy to Cursor.search(). + */ + public static OperationResult search( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final SearchMode searchMode, + final boolean countOpStat) { + + return cursor.search( + key, data, lockMode, cacheMode, searchMode, countOpStat); + } + + /** + * Proxy to Cursor.searchForReplay(). + */ + public static OperationResult searchForReplay( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final SearchMode searchMode) { + + return cursor.searchForReplay(key, data, lockMode, null, searchMode); + } + + /** + * Proxy to Cursor.retrieveNext(). + */ + public static OperationResult retrieveNext( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode, + final GetMode getMode) + throws DatabaseException { + + return cursor.retrieveNext(key, data, lockMode, null, getMode); + } + + /** + * Proxy to Cursor.advanceCursor() + */ + public static boolean advanceCursor( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data) { + + return cursor.advanceCursor(key, data); + } + + /** + * Proxy to Cursor.deleteInternal() + */ + public static OperationResult deleteInternal( + final Cursor cursor, + final ReplicationContext repContext) { + + return cursor.deleteInternal(repContext, null); + } + + /** + * Proxy to Cursor.deleteForReplay() + */ + public static OperationResult deleteForReplay( + final Cursor cursor, + final ReplicationContext repContext) { + + return cursor.deleteForReplay(repContext); + } + + /** + * Proxy to Cursor.putForReplay() + */ + public static OperationResult putForReplay( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry data, + final LN ln, + final int expiration, + final boolean expirationInHours, + final PutMode putMode, + final ReplicationContext repContext) { + + return cursor.putForReplay( + key, data, ln, expiration, expirationInHours, putMode, repContext); + } + + /** + * Search mode used with the internal search and searchBoth methods. + */ + public enum Search { + + /** + * Match the smallest value greater than the key or data param. + */ + GT, + + /** + * Match the smallest value greater than or equal to the key or data + * param. + */ + GTE, + + /** + * Match the largest value less than the key or data param. + */ + LT, + + /** + * Match the largest value less than or equal to the key or data param. + */ + LTE, + } + + /** + * @deprecated use new-style API below instead. + */ + public static OperationStatus search( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Search searchMode, + final LockMode lockMode) { + + final OperationResult result = search( + cursor, key, pKey, data, searchMode, getReadOptions(lockMode)); + + return result != null ? + OperationStatus.SUCCESS : OperationStatus.NOTFOUND; + } + + /** + * Finds the key according to the Search param. If dups are configured, GT + * and GTE will land on the first dup for the matching key, while LT and + * LTE will land on the last dup for the matching key. + * + * search() and searchBoth() in this class may eventually be exposed as + * public JE Cursor methods, but this isn't practical now for the following + * reasons: + * + * + The API design needs more thought. Perhaps Search.EQ should be added. + * Perhaps existing Cursor methods should be deprecated. + * + * + This implementation moves the cursor multiple times and does not + * release locks on the intermediate records. + * + * + This could be implemented more efficiently using lower level cursor + * code. For example, an LTE search would actually more efficient than + * the existing GTE search (getSearchKeyRange and getSearchBothRange). + * + * These methods are used by KVStore. + */ + public static OperationResult search( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Search searchMode, + final ReadOptions options) { + + final DatabaseImpl dbImpl = cursor.getDatabaseImpl(); + KeyRange range = new KeyRange(dbImpl.getBtreeComparator()); + final boolean first; + + switch (searchMode) { + case GT: + case GTE: + range = range.subRange( + key, searchMode == Search.GTE, null, false); + first = true; + break; + case LT: + case LTE: + range = range.subRange( + null, false, key, searchMode == Search.LTE); + first = false; + break; + default: + throw EnvironmentFailureException.unexpectedState(); + } + + final RangeCursor rangeCursor = new RangeCursor( + range, null, dbImpl.getSortedDuplicates(), cursor); + + final OpReadOptions opReadOptions = OpReadOptions.make(options); + + final OpResult result = (first) ? + rangeCursor.getFirst(key, pKey, data, opReadOptions) : + rangeCursor.getLast(key, pKey, data, opReadOptions); + + /* RangeCursor should not have dup'd the cursor. */ + assert cursor == rangeCursor.getCursor(); + + return result.jeResult; + } + + /** + * @deprecated use new-style API below instead. + */ + public static OperationStatus searchBoth( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Search searchMode, + final LockMode lockMode) { + + final OperationResult result = searchBoth( + cursor, key, pKey, data, searchMode, getReadOptions(lockMode)); + + return result != null ? + OperationStatus.SUCCESS : OperationStatus.NOTFOUND; + } + + /** + * Searches with the dups for the given key and finds the dup matching the + * pKey value, according to the Search param. + * + * See search() for more discussion. + */ + public static OperationResult searchBoth( + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Search searchMode, + final ReadOptions options) { + + final LockMode lockMode = + options != null ? options.getLockMode() : null; + + final DatabaseImpl dbImpl = cursor.getDatabaseImpl(); + KeyRange range = new KeyRange(dbImpl.getBtreeComparator()); + range = range.subRange(key); + KeyRange pKeyRange = new KeyRange(dbImpl.getDuplicateComparator()); + final boolean first; + + switch (searchMode) { + case GT: + case GTE: + pKeyRange = pKeyRange.subRange( + pKey, searchMode == Search.GTE, null, false); + first = true; + break; + case LT: + case LTE: + pKeyRange = pKeyRange.subRange( + null, false, pKey, searchMode == Search.LTE); + first = false; + break; + default: + throw EnvironmentFailureException.unexpectedState(); + } + + final RangeCursor rangeCursor = new RangeCursor( + range, pKeyRange, dbImpl.getSortedDuplicates(), cursor); + + final OpReadOptions opReadOptions = OpReadOptions.make(options); + + final OpResult result = (first) ? + rangeCursor.getFirst(key, pKey, data, opReadOptions) : + rangeCursor.getLast(key, pKey, data, opReadOptions); + + /* RangeCursor should not have dup'd the cursor. */ + assert cursor == rangeCursor.getCursor(); + + return result.jeResult; + } + + /** + * Proxy to Cursor.getCursorImpl() + */ + public static CursorImpl getCursorImpl(Cursor cursor) { + return cursor.getCursorImpl(); + } + + /** + * Create a Cursor for internal use from a DatabaseImpl. + */ + public static Cursor makeCursor(final DatabaseImpl databaseImpl, + final Locker locker, + final CursorConfig cursorConfig) { + return makeCursor(databaseImpl, locker, cursorConfig, true); + } + + /** + * Create a Cursor for internal use from a DatabaseImpl for btree + * corruption, which can set retainNonTxnLocks. + */ + public static Cursor makeCursor(final DatabaseImpl databaseImpl, + final Locker locker, + final CursorConfig cursorConfig, + boolean retainNonTxnLocks) { + final Cursor cursor = new Cursor( + databaseImpl, locker, cursorConfig, retainNonTxnLocks); + /* Internal cursors don't need to be sticky. */ + cursor.setNonSticky(true); + return cursor; + } + + /** + * Create a Cursor from a DatabaseHandle. + */ + public static Cursor makeCursor(final Database dbHandle, + final Locker locker, + final CursorConfig cursorConfig) { + return makeCursor(dbHandle, locker, cursorConfig, false); + } + + public static Cursor makeCursor(final Database dbHandle, + final Locker locker, + final CursorConfig cursorConfig, + boolean retainNonTxnLocks) { + return new Cursor(dbHandle, locker, cursorConfig, retainNonTxnLocks); + } + + /** + * @deprecated use {@link CursorConfig#setNonSticky} instead. + */ + public static void setNonCloning(final Cursor cursor, + final boolean nonSticky) { + cursor.setNonSticky(nonSticky); + } + + public static boolean isCorrupted(Database db) { + return db.isCorrupted(); + } + + /** + * Proxy to Database.getDbImpl() + */ + public static DatabaseImpl getDbImpl(final Database db) { + return db.getDbImpl(); + } + + /** + * Proxy to JoinCursor.getSortedCursors() + */ + public static Cursor[] getSortedCursors(final JoinCursor cursor) { + return cursor.getSortedCursors(); + } + + /** + * Proxy to EnvironmentConfig.setLoadPropertyFile() + */ + public static void setLoadPropertyFile(final EnvironmentConfig config, + final boolean loadProperties) { + config.setLoadPropertyFile(loadProperties); + } + + /** + * Proxy to EnvironmentConfig.setCreateUP() + */ + public static void setCreateUP(final EnvironmentConfig config, + final boolean val) { + config.setCreateUP(val); + } + + /** + * Proxy to EnvironmentConfig.getCreateUP() + */ + public static boolean getCreateUP(final EnvironmentConfig config) { + return config.getCreateUP(); + } + + /** + * Proxy to EnvironmentConfig.setCreateEP() + */ + public static void setCreateEP(final EnvironmentConfig config, + final boolean val) { + config.setCreateEP(val); + } + + /** + * Proxy to EnvironmentConfig.getCreateEP() + */ + public static boolean getCreateEP(final EnvironmentConfig config) { + return config.getCreateEP(); + } + + /** + * Proxy to EnvironmentConfig.setCheckpointUP() + */ + public static void setCheckpointUP(final EnvironmentConfig config, + final boolean checkpointUP) { + config.setCheckpointUP(checkpointUP); + } + + /** + * Proxy to EnvironmentConfig.getCheckpointUP() + */ + public static boolean getCheckpointUP(final EnvironmentConfig config) { + return config.getCheckpointUP(); + } + + /** + * Proxy to EnvironmentConfig.setTxnReadCommitted() + */ + public static void setTxnReadCommitted(final EnvironmentConfig config, + final boolean txnReadCommitted) { + config.setTxnReadCommitted(txnReadCommitted); + } + + /** + * Proxy to EnvironmentConfig.setTxnReadCommitted() + */ + public static boolean getTxnReadCommitted(final EnvironmentConfig config) { + return config.getTxnReadCommitted(); + } + + /** + * Proxy to EnvironmentMutableConfig.cloneMutableConfig() + */ + public static EnvironmentMutableConfig + cloneMutableConfig(final EnvironmentMutableConfig config) { + return config.cloneMutableConfig(); + } + + /** + * Proxy to EnvironmentMutableConfig.checkImmutablePropsForEquality() + */ + public static void + checkImmutablePropsForEquality(final EnvironmentMutableConfig config, + final Properties handleConfigProps) + throws IllegalArgumentException { + + config.checkImmutablePropsForEquality(handleConfigProps); + } + + /** + * Proxy to EnvironmentMutableConfig.copyMutablePropsTo() + */ + public static void + copyMutablePropsTo(final EnvironmentMutableConfig config, + final EnvironmentMutableConfig toConfig) { + config.copyMutablePropsTo(toConfig); + } + + /** + * Proxy to EnvironmentMutableConfig.validateParams. + */ + public static void + disableParameterValidation(final EnvironmentMutableConfig config) { + config.setValidateParams(false); + } + + /** + * Proxy to EnvironmentMutableConfig.getProps + */ + public static Properties getProps(final EnvironmentMutableConfig config) { + return config.getProps(); + } + + /** + * Proxy to DatabaseConfig.setUseExistingConfig() + */ + public static void setUseExistingConfig(final DatabaseConfig config, + final boolean useExistingConfig) { + config.setUseExistingConfig(useExistingConfig); + } + + /** + * Proxy to DatabaseConfig.validate(DatabaseConfig() + */ + public static void validate(final DatabaseConfig config1, + final DatabaseConfig config2) + throws DatabaseException { + + config1.validate(config2); + } + + /** + * Proxy to Transaction.getLocker() + */ + public static Locker getLocker(final Transaction txn) + throws DatabaseException { + + return txn.getLocker(); + } + + /** + * Proxy to Transaction.getEnvironment() + */ + public static Environment getEnvironment(final Transaction txn) + throws DatabaseException { + + return txn.getEnvironment(); + } + + /** + * Proxy to Environment.getDefaultTxnConfig() + */ + public static TransactionConfig + getDefaultTxnConfig(final Environment env) { + return env.getDefaultTxnConfig(); + } + + public static Transaction + beginInternalTransaction(final Environment env, + final TransactionConfig config) { + return env.beginInternalTransaction(config); + } + + public static ExceptionEvent makeExceptionEvent(final Exception e, + final String n) { + return new ExceptionEvent(e, n); + } + + public static Txn getTxn(final Transaction transaction) { + return transaction.getTxn(); + } + + public static DiskOrderedCursorImpl + getDiskOrderedCursorImpl(final DiskOrderedCursor cursor) { + + return cursor.getCursorImpl(); + } + + public static OperationResult makeResult(final long time) { + + return time == 0 ? + DEFAULT_RESULT : + new OperationResult(time, false /*update*/); + } + + public static OperationResult makeResult( + final int expiration, + final boolean expirationInHours) { + + return makeResult( + TTL.expirationToSystemTime(expiration, expirationInHours)); + } + + public static OperationResult makeUpdateResult( + final int expiration, + final boolean expirationInHours) { + + return new OperationResult( + TTL.expirationToSystemTime(expiration, expirationInHours), + true /*update*/); + } + + public static ReadOptions getReadOptions(LockMode lockMode) { + if (lockMode == null) { + lockMode = LockMode.DEFAULT; + } + return lockMode.toReadOptions(); + } +} diff --git a/src/com/sleepycat/je/DeadlockException.java b/src/com/sleepycat/je/DeadlockException.java new file mode 100644 index 0000000..d95c9de --- /dev/null +++ b/src/com/sleepycat/je/DeadlockException.java @@ -0,0 +1,84 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when a deadlock is detected. When this exception is thrown, JE + * detected a deadlock and chose one transaction (or non-transactional + * operation), the "victim", to invalidate in order to break the deadlock. + * Note that this is different than a {@link LockTimeoutException lock + * timeout} or {@link TransactionTimeoutException}, which occur for other + * reasons. + * + *

        For more information on deadlock detection, see + * {@link EnvironmentConfig#LOCK_DEADLOCK_DETECT}. As described there, a + * {@code DeadlockException} is normally thrown when a random victim is + * selected; in this case the exception message will contain the string: + * {@code This locker was chosen randomly as the victim}. If the deadlock + * exception is thrown in a non-victim thread, due to live lock or an + * unresponsive thread, the message will contain the string: + * {@code Unable to break deadlock using random victim selection within the + * timeout interval}.

        + * + *

        TODO: describe how to debug using info included with the exception.

        + * + *

        Normally, applications should catch the base class {@link + * LockConflictException} rather than catching one of its subclasses. All lock + * conflicts are typically handled in the same way, which is normally to abort + * and retry the transaction. See {@link LockConflictException} for more + * information.

        + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + */ +public class DeadlockException extends LockConflictException { + + private static final long serialVersionUID = 729943514L; + + /** + * For internal use only. + * @hidden + */ + DeadlockException(String message) { + super(message); + } + + /** + * For internal use only. + * @hidden + */ + public DeadlockException(Locker locker, String message) { + super(locker, message); + } + + /** + * For internal use only. + * @hidden + */ + DeadlockException(String message, + DeadlockException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DeadlockException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DeleteConstraintException.java b/src/com/sleepycat/je/DeleteConstraintException.java new file mode 100644 index 0000000..6b66291 --- /dev/null +++ b/src/com/sleepycat/je/DeleteConstraintException.java @@ -0,0 +1,78 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when an attempt is made to delete a key from a foreign key database, + * when that key is referenced by a secondary database, and the secondary is + * configured to cause an abort in this situation. + * + *

        When using the base API ({@code com.sleepycat.je}), this can occur when a + * {@link SecondaryDatabase} is configured to be associated with a foreign key + * database (see {@link SecondaryConfig#setForeignKeyDatabase}), and is also + * configured with the {@link ForeignKeyDeleteAction#ABORT} delete action (see + * {@link SecondaryConfig#setForeignKeyDeleteAction}). Note that {@code ABORT} + * is the default setting.

        + * + *

        When using the DPL ({@code com.sleepycat.persist}), this can occur when a + * {@link com.sleepycat.persist.model.SecondaryKey} is defined with a {@link + * com.sleepycat.persist.model.SecondaryKey#relatedEntity}, and {@link + * com.sleepycat.persist.model.SecondaryKey#onRelatedEntityDelete} is {@link + * com.sleepycat.persist.model.DeleteAction#ABORT} (which is the default).

        + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + * @see
        Special considerations + * for using Secondary Databases with and without Transactions + * + * @since 4.0 + */ +public class DeleteConstraintException extends SecondaryConstraintException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public DeleteConstraintException(Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, message, secDbName, secKey, priKey, expirationTime); + } + + /** + * For internal use only. + * @hidden + */ + private DeleteConstraintException(String message, + DeleteConstraintException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DeleteConstraintException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DiskLimitException.java b/src/com/sleepycat/je/DiskLimitException.java new file mode 100644 index 0000000..df07d4a --- /dev/null +++ b/src/com/sleepycat/je/DiskLimitException.java @@ -0,0 +1,93 @@ +/*- + * + * This file is part of Oracle Berkeley DB Java Edition + * Copyright (C) 2002, 2016 Oracle and/or its affiliates. All rights reserved. + * + * Oracle Berkeley DB Java Edition is free software: you can redistribute it + * and/or modify it under the terms of the GNU Affero General Public License + * as published by the Free Software Foundation, version 3. + * + * Oracle Berkeley DB Java Edition is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero + * General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License in + * the LICENSE file along with Oracle Berkeley DB Java Edition. If not, see + * . + * + * An active Oracle commercial licensing agreement for this product + * supercedes this license. + * + * For more information please contact: + * + * Vice President Legal, Development + * Oracle America, Inc. + * 5OP-10 + * 500 Oracle Parkway + * Redwood Shores, CA 94065 + * + * or + * + * berkeleydb-info_us@oracle.com + * + * [This line intentionally left blank.] + * [This line intentionally left blank.] + * [This line intentionally left blank.] + * [This line intentionally left blank.] + * [This line intentionally left blank.] + * [This line intentionally left blank.] + * EOF + * + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when a write operation cannot be performed because a disk limit has + * been violated. It may also be thrown by {@link Environment#checkpoint} + * {@link Environment#sync} and {@link Environment#close} (when it performs + * a checkpoint). + * + * @see EnvironmentConfig#MAX_DISK + * @see EnvironmentConfig#FREE_DISK + * @since 7.5 + */ +public class DiskLimitException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + * + * @param locker is non-null to mark the txn abort-only, or null in cases + * where no txn/locker is involved. + */ + public DiskLimitException(Locker locker, String message) { + super( + locker /*locker*/, + locker != null /*abortOnly*/, + message, + null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private DiskLimitException(String message, DiskLimitException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DiskLimitException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DiskOrderedCursor.java b/src/com/sleepycat/je/DiskOrderedCursor.java new file mode 100644 index 0000000..3039aff --- /dev/null +++ b/src/com/sleepycat/je/DiskOrderedCursor.java @@ -0,0 +1,381 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DiskOrderedCursorImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * DiskOrderedCursor returns records in unsorted order in exchange for + * generally faster retrieval times. Instead of key order, an approximation of + * disk order is used, which results in less I/O. This can be useful when the + * application needs to scan all records in one or more databases, and will be + * applying filtering logic which does not need key ordered retrieval. + * A DiskOrderedCursor is created using the {@link + * Database#openCursor(DiskOrderedCursorConfig)} method or the {@link + * Environment#openDiskOrderedCursor(Database[], DiskOrderedCursorConfig)} + * method. + *

        + * WARNING: After opening a DiskOrderedCursor, deletion of log files + * by the JE log cleaner will be disabled until {@link #close()} is called. To + * prevent unbounded growth of disk usage, be sure to call {@link #close()} to + * re-enable log file deletion. + *

        + * Optional configurations: the following options are available to + * tune the DiskOrderedCursor. + *

        + * The DiskOrderedCursor creates a background producer thread which prefetches + * some target records and inserts them in a queue for use by the cursor. The + * parameter {@link EnvironmentConfig#DOS_PRODUCER_QUEUE_TIMEOUT} applies to + * this background thread, and controls the timeout which governs the blocking + * queue. + *

        + * See {@link DiskOrderedCursorConfig} for additional options. + *

        + *

        Consistency Guarantees

        + *

        + * The consistency guarantees provided by a DiskOrderedCursor are, at best, the + * same as those provided by READ_UNCOMMITTED (see {@link LockMode}). With + * READ_UNCOMMITTED, changes made by all transactions, including uncommitted + * transactions, may be returned by the scan. Also, a record returned by the + * scan is not locked, and may be modified or deleted by the application after + * it is returned, including modification or deletion of the record at the + * cursor position. + *

        + * In other words, the records returned by the scan correspond to the state + * of the database (as if READ_UNCOMMITTED were used) at the beginning of the + * scan plus some, but not all, changes made by the application after the start + * of the scan. The user should not rely on the scan returning any changes + * made after the start of the scan. For example, if the record referred to by + * the DiskOrderedCursor is deleted after the DiskOrderedCursor is positioned + * at that record, getCurrent() will still return the key and value of that + * record and OperationStatus.SUCCESS. + * + * If a transactionally correct data set is required (as defined by + * READ_COMMITTED), the application must ensure that all transactions that + * write to the database are committed before the beginning of the scan. + * During the scan, no records in the database of the scan may be + * inserted, deleted, or modified. While this is possible, it is not the + * expected use case for a DiskOrderedCursor. + *

        + *

        Performance Considerations

        + *

        + * The internal algorithm used to approximate disk ordered reads is as follows. + * For simplicity, the algorithm description assumes that a single database is + * being scanned, but the algorithm is almost the same when multiple databases + * are involved. + * An internal producer thread is used to scan the database. This thread is + * created and started when the {@code DiskOrderedCursor} is created, and is + * destroyed by {@link DiskOrderedCursor#close}. Scanning consists of two + * phases. In phase I the in-cache Btree of the scanned database is traversed + * in key order. The LSNs (physical record addresses) of the data to be + * fetched are accumulated in a memory buffer. Btree latches are held during + * the traversal, but only for short durations. In phase II the accumulated + * LSNs are sorted into disk order, fetched one at a time in that order, and + * the fetched data is added to a blocking queue. The {@code getNext} method + * in this class removes the next entry from the queue. This approach allows + * concurrent access to the Database during both phases of the scan, including + * access by the application's consumer thread (the thread calling {@code + * getNext}). + *

        + * Phase I does not always process the entire Btree. During phase I if the + * accumulation of LSNs causes the {@link + * DiskOrderedCursorConfig#setInternalMemoryLimit internal memory limit} or + * {@link DiskOrderedCursorConfig#setLSNBatchSize LSN batch size} to be + * exceeded, phase I is ended and phase II begins. In this case, after phase + * II finishes, phase I resumes where it left off in the Btree traversal. + * Phase I and II are repeated until the entire database is scanned. + *

        + * By default, the internal memory limit and LSN batch size are unbounded (see + * {@link DiskOrderedCursorConfig}). For a database with a large number of + * records, this could cause an {@code OutOfMemoryError}. Therefore, it is + * strongly recommended that either the internal memory limit or LSN batch size + * is configured to limit the use of memory during the scan. On the other + * hand, the efficiency of the scan is proportional to the amount of memory + * used. If enough memory is available, the ideal case would be that the + * database is scanned in in a single iteration of phase I and II. The more + * iterations, the more random IO will occur. + *

        + * Another factor is the {@link DiskOrderedCursorConfig#setQueueSize queue + * size}. During the phase I Btree traversal, data that is resident in the JE + * cache will be added to the queue immediately, rather than waiting until + * phase II and fetching it, but only if the queue is not full. Therefore, + * increasing the size of the queue can avoid fetching data that is resident in + * the JE cache. Also, increasing the queue size can improve parallelism of + * the work done by the producer and consumer threads. + *

        + * Also note that a {@link DiskOrderedCursorConfig#setKeysOnly keys-only} scan + * is much more efficient than the default keys-and-data scan. With a + * keys-only scan, only the BINs (bottom internal nodes) of the Btree need to + * be fetched; the LNs (leaf nodes) do not. This is also true of databases + * {@link DatabaseConfig#setSortedDuplicates configured for duplicates}, even + * for a keys-and-data scan, since internally the key and data are both + * contained in the BIN. + * + * @since 5.0 + */ +public class DiskOrderedCursor implements ForwardCursor { + + private final Database[] dbHandles; + + private final DatabaseImpl[] dbImpls; + + private final DiskOrderedCursorConfig config; + + private final DiskOrderedCursorImpl dosCursorImpl; + + private final Logger logger; + + DiskOrderedCursor( + final Database[] dbHandles, + final DiskOrderedCursorConfig config) { + + this.dbHandles = dbHandles; + this.config = config; + + assert(dbHandles != null && dbHandles.length > 0); + + dbImpls = new DatabaseImpl[dbHandles.length]; + + boolean dups = false; + int i = 0; + + try { + for (; i < dbHandles.length; ++i) { + + Database db = dbHandles[i]; + DatabaseImpl dbImpl; + + synchronized (db) { + db.addCursor(this); + dbImpl = db.getDbImpl(); + } + + assert(dbImpl != null); + + if (i == 0) { + dups = dbImpl.getSortedDuplicates(); + + } else if (dbImpl.getSortedDuplicates() != dups) { + throw new IllegalArgumentException( + "In a multi-database disk ordered cursor " + + "either all or none of the databases should support " + + "duplicates"); + } + + dbImpls[i] = dbImpl; + } + + dosCursorImpl = new DiskOrderedCursorImpl(dbImpls, config); + + this.logger = dbImpls[0].getEnv().getLogger(); + + } catch (final Throwable e) { + for (int j = 0; j < i; ++j) { + dbHandles[j].removeCursor(this); + } + + throw e; + } + } + + /** + * Returns the Database handle for the database that contains the + * latest record returned by getNext(). + * + * @return The Database handle associated with this Cursor. + */ + @Override + public Database getDatabase() { + return dbHandles[dosCursorImpl.getCurrDb()]; + } + + /** + * Discards the cursor. + * + *

        The cursor handle may not be used again after this method has been + * called, regardless of the method's success or failure.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public void close() + throws DatabaseException { + + if (dosCursorImpl.isClosed()) { + return; + } + try { + dosCursorImpl.checkEnv(); + + dosCursorImpl.close(); + + for (int i = 0; i < dbHandles.length; ++i) { + dbHandles[i].removeCursor(this); + } + + } catch (Error E) { + dbImpls[0].getEnv().invalidate(E); + throw E; + } + } + + /** + * @param options the ReadOptions, or null to use default options. + * For DiskOrderedCursors, {@link ReadOptions#getLockMode} must return + * null, {@link com.sleepycat.je.LockMode#DEFAULT} or + * {@link com.sleepycat.je.LockMode#READ_UNCOMMITTED}, and no locking + * is performed. + */ + @Override + public OperationResult get( + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + final ReadOptions options) { + + try { + checkState(); + checkLockMode((options != null) ? options.getLockMode() : null); + trace(Level.FINEST, getType); + + switch (getType) { + case CURRENT: + return dosCursorImpl.getCurrent(key, data); + case NEXT: + return dosCursorImpl.getNext(key, data); + default: + throw new IllegalArgumentException( + "Get type not allowed: " + getType); + } + + } catch (Error E) { + dbImpls[0].getEnv().invalidate(E); + throw E; + } + } + + /** + * @param lockMode the locking attributes. For DiskOrderedCursors this + * parameter must be either null, {@link com.sleepycat.je.LockMode#DEFAULT} + * or {@link com.sleepycat.je.LockMode#READ_UNCOMMITTED}, and no locking + * is performed. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if there are no more records in the + * DiskOrderedCursor set, otherwise, {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. If + * the record referred to by a DiskOrderedCursor is deleted after the + * ForwardCursor is positioned at that record, getCurrent() will still + * return the key and value of that record and OperationStatus.SUCCESS. + */ + @Override + public OperationStatus getCurrent( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.CURRENT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * @param lockMode the locking attributes. For DiskOrderedCursors this + * parameter must be either null, {@link com.sleepycat.je.LockMode#DEFAULT} + * or {@link com.sleepycat.je.LockMode#READ_UNCOMMITTED}, and no locking + * is performed. + */ + @Override + public OperationStatus getNext( + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.NEXT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Returns this cursor's configuration. + * + *

        This may differ from the configuration used to open this object if + * the cursor existed previously.

        + * + * @return This cursor's configuration. + */ + public DiskOrderedCursorConfig getConfig() { + try { + return config.clone(); + } catch (Error E) { + dbImpls[0].getEnv().invalidate(E); + throw E; + } + } + + private void checkLockMode(final LockMode lockMode) { + if (lockMode == null || + lockMode == LockMode.DEFAULT || + lockMode == LockMode.READ_UNCOMMITTED) { + return; + } + + throw new IllegalArgumentException( + "lockMode must be null or LockMode.READ_UNCOMMITTED"); + } + + /** + * Checks the environment and cursor state. + */ + private void checkState() { + dosCursorImpl.checkEnv(); + } + + /** + * Sends trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void trace(final Level level, final Get getType) { + + if (logger.isLoggable(level)) { + LoggerUtils.logMsg( + logger, dbImpls[0].getEnv(), level, getType.toString()); + } + } + + /** + * For testing and other internal use. + */ + DiskOrderedCursorImpl getCursorImpl() { + return dosCursorImpl; + } +} diff --git a/src/com/sleepycat/je/DiskOrderedCursorConfig.java b/src/com/sleepycat/je/DiskOrderedCursorConfig.java new file mode 100644 index 0000000..16918a2 --- /dev/null +++ b/src/com/sleepycat/je/DiskOrderedCursorConfig.java @@ -0,0 +1,376 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + + +/** + * Specifies the attributes of a DiskOrderedCursor. + * @since 5.0 + */ +public class DiskOrderedCursorConfig implements Cloneable { + + /** + * Default configuration used if null is passed to methods that create a + * cursor. + */ + public static final DiskOrderedCursorConfig DEFAULT = + new DiskOrderedCursorConfig(); + + private boolean binsOnly = false; + + private boolean keysOnly = false; + + private boolean countOnly = false; + + private long lsnBatchSize = Long.MAX_VALUE; + + private long internalMemoryLimit = Long.MAX_VALUE; + + private int queueSize = 1000; + + private boolean serialDBScan = false; + + private boolean debug = false; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public DiskOrderedCursorConfig() { + } + + /** + * Specify whether the DiskOrderedCursor should return only the key or key + * + data. The default value is false (key + data). If keyOnly is true, + * the performance of the disk ordered scan will be better, because the + * Cursor only descends to the BIN level. + * + * @param keysOnly If true, return only keys from this cursor. + * + * @return this + */ + public DiskOrderedCursorConfig setKeysOnly(final boolean keysOnly) { + setKeysOnlyVoid(keysOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setKeysOnlyVoid(final boolean keysOnly) { + this.keysOnly = keysOnly; + } + + /** + * Returns true if the DiskOrderedCursor is configured to return only + * keys. Returns false if it is configured to return keys + data. + * + * @return true if the DiskOrderedCursor is configured to return keys only. + */ + public boolean getKeysOnly() { + return keysOnly; + } + + /** + * For internal use only. + * @hidden + */ + public DiskOrderedCursorConfig setCountOnly(boolean val) { + setBINsOnlyVoid(val); + return this; + } + + /** + * For internal use only. + * @hidden + */ + public void setCountOnlyVoid(boolean val) { + countOnly = val; + } + + /** + * For internal use only. + * @hidden + */ + public boolean getCountOnly() { + return countOnly; + } + + /** + * Specify whether the DiskOrderedCursor should scan the BINs only. If + * true, the performance of the disk ordered scan will be better, because + * LNs are not read from disk. However, in this case, the data portion + * of a record will be returned only if it is embedded in the BIN; + * otherwise only the key will be returned. + * + * @param binsOnly If true, return keys and, if available, the associated + * embedded data. + * + * @return this + */ + public DiskOrderedCursorConfig setBINsOnly(final boolean binsOnly) { + setBINsOnlyVoid(binsOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBINsOnlyVoid(final boolean binsOnly) { + this.binsOnly = binsOnly; + } + + /** + * Returns true if the DiskOrderedCursor is configured to scan BINs only, + * returning all record keys and only those record data that are embedded + * in the BINs. + * + * @return true if the DiskOrderedCursor is configured to scan BINs only. + */ + public boolean getBINsOnly() { + return binsOnly; + } + + /** + * Set the maximum number of LSNs to gather and sort at any one time. The + * default is an unlimited number of LSNs. Setting this lower causes the + * DiskOrderedScan to use less memory, but it sorts and processes LSNs more + * frequently thereby causing slower performance. Setting this higher will + * in general improve performance at the expense of memory. Each LSN uses + * 16 bytes of memory. + * + * @param lsnBatchSize the maximum number of LSNs to accumulate and sort + * per batch. + * + * @return this + */ + public DiskOrderedCursorConfig setLSNBatchSize(final long lsnBatchSize) { + setLSNBatchSizeVoid(lsnBatchSize); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLSNBatchSizeVoid(final long lsnBatchSize) { + this.lsnBatchSize = lsnBatchSize; + } + + /** + * Returns the maximum number of LSNs to be sorted that this + * DiskOrderedCursor is configured for. + * + * @return the maximum number of LSNs to be sorted that this + * DiskOrderedCursor is configured for. + */ + public long getLSNBatchSize() { + return lsnBatchSize; + } + + /** + * Set the maximum amount of JE Cache Memory that the DiskOrderedScan + * can use at one time. The default is an unlimited amount of memory. + * Setting this lower causes the DiskOrderedScan to use less memory, but it + * sorts and processes LSNs more frequently thereby generally causing slower + * performance. Setting this higher will in general improve performance at + * the expense of JE cache memory. + * + * @param internalMemoryLimit the maximum number of non JE Cache bytes to + * use. + * + * @return this + * + * @see Cache + * Statistics: Unexpected Sizes + */ + public DiskOrderedCursorConfig setInternalMemoryLimit( + final long internalMemoryLimit) { + setInternalMemoryLimitVoid(internalMemoryLimit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setInternalMemoryLimitVoid(final long internalMemoryLimit) { + this.internalMemoryLimit = internalMemoryLimit; + } + + /** + * Returns the maximum amount of JE Cache Memory that the + * DiskOrderedScan can use at one time. + * + * @return the maximum amount of non JE Cache Memory that preload can use at + * one time. + */ + public long getInternalMemoryLimit() { + return internalMemoryLimit; + } + + /** + * Set the queue size for entries being passed between the + * DiskOrderedCursor producer thread and the application's consumer + * thread. If the queue size reaches this number of entries, the producer + * thread will block until the application thread removes one or more + * entries (by calling ForwardCursor.getNext(). The default is 1000. + * + * @param queueSize the maximum number of entries the queue can hold before + * the producer thread blocks. + * + * @return this + */ + public DiskOrderedCursorConfig setQueueSize(final int queueSize) { + setQueueSizeVoid(queueSize); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setQueueSizeVoid(final int queueSize) { + this.queueSize = queueSize; + } + + /** + * Returns the maximum number of entries in the queue before the + * DiskOrderedCursor producer thread blocks. + * + * @return the maximum number of entries in the queue before the + * DiskOrderedCursor producer thread blocks. + */ + public int getQueueSize() { + return queueSize; + } + + /** + * @deprecated this method has no effect and will be removed in a future + * release. + */ + public DiskOrderedCursorConfig setMaxSeedMillisecs( + final long maxSeedMillisecs) { + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxSeedMillisecsVoid(final long maxSeedMillisecs) { + } + + /** + * @deprecated this method returns zero and will be removed in a future + * release. + */ + public long getMaxSeedMillisecs() { + return 0; + } + + /** + * @deprecated this method has no effect and will be removed in a future + * release. + */ + public DiskOrderedCursorConfig setMaxSeedNodes(final long maxSeedNodes) { + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxSeedNodesVoid(final long maxSeedNodes) { + } + + /** + * @deprecated this method returns zero and will be removed in a future + * release. + */ + public long getMaxSeedNodes() { + return 0; + } + + /** + * @hidden + */ + public DiskOrderedCursorConfig setSerialDBScan(final boolean v) { + setSerialDBScanVoid(v); + return this; + } + + /** + * @hidden + */ + public void setSerialDBScanVoid(final boolean v) { + this.serialDBScan = v; + } + + /** + * @hidden + */ + public boolean getSerialDBScan() { + return this.serialDBScan; + } + + /** + * @hidden + */ + public DiskOrderedCursorConfig setDebug(final boolean v) { + setDebugVoid(v); + return this; + } + + /** + * @hidden + */ + public void setDebugVoid(final boolean v) { + this.debug = v; + } + + /** + * @hidden + */ + public boolean getDebug() { + return this.debug; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public DiskOrderedCursorConfig clone() { + try { + return (DiskOrderedCursorConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "keysOnly=" + keysOnly + + "\nlsnBatchSize=" + lsnBatchSize + + "\ninternalMemoryLimit=" + internalMemoryLimit + + "\nqueueSize=" + queueSize; + } +} diff --git a/src/com/sleepycat/je/DiskOrderedCursorConfigBeanInfo.java b/src/com/sleepycat/je/DiskOrderedCursorConfigBeanInfo.java new file mode 100644 index 0000000..2418dc4 --- /dev/null +++ b/src/com/sleepycat/je/DiskOrderedCursorConfigBeanInfo.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class DiskOrderedCursorConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(DiskOrderedCursorConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + + /* + * setMaxSeedTestHook is only used for unit test, and + * setMaxSeedTestHookVoid method is not necessary, so add + * "setMaxSeedTestHook" into ignoreMethods list. + */ + ignoreMethods.add("setMaxSeedTestHook"); + return getPdescriptor(DiskOrderedCursorConfig.class); + } +} diff --git a/src/com/sleepycat/je/DiskOrderedCursorProducerException.java b/src/com/sleepycat/je/DiskOrderedCursorProducerException.java new file mode 100644 index 0000000..42fbf82 --- /dev/null +++ b/src/com/sleepycat/je/DiskOrderedCursorProducerException.java @@ -0,0 +1,44 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link ForwardCursor#getNext ForwardCursor.getNext} when a + * {@link DiskOrderedCursor} producer thread throws an exception. + * This exception wraps that thrown exception; + * + * @since 5.0 + */ +public class DiskOrderedCursorProducerException + extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public DiskOrderedCursorProducerException(String message, Throwable cause) { + super(null /*locker*/, false /*abortOnly*/, message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DiskOrderedCursorProducerException(msg, this); + } +} diff --git a/src/com/sleepycat/je/DuplicateDataException.java b/src/com/sleepycat/je/DuplicateDataException.java new file mode 100644 index 0000000..8e2ba56 --- /dev/null +++ b/src/com/sleepycat/je/DuplicateDataException.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Cursor#putCurrent Cursor.putCurrent} if the old and new + * data are not equal according to the configured duplicate comparator or + * default comparator. + * + *

        If the old and new data are unequal according to the comparator, this + * would change the sort order of the record, which would change the cursor + * position, and this is not allowed. To change the sort order of a record, + * delete it and then re-insert it.

        + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class DuplicateDataException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public DuplicateDataException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private DuplicateDataException(String message, + DuplicateDataException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DuplicateDataException(msg, this); + } +} diff --git a/src/com/sleepycat/je/Durability.java b/src/com/sleepycat/je/Durability.java new file mode 100644 index 0000000..0f4b47f --- /dev/null +++ b/src/com/sleepycat/je/Durability.java @@ -0,0 +1,360 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.StringTokenizer; + +/** + * Durability defines the overall durability characteristics associated with a + * transaction. When operating on a local environment the durability of a + * transaction is completely determined by the local {@link SyncPolicy} that is + * in effect. When using replication, the overall durability is a function of + * the local {@link SyncPolicy} plus the {@link ReplicaAckPolicy} used by the + * master and the {@link SyncPolicy} in effect at each Replica. + */ +public class Durability { + + /** + * A convenience constant that defines a durability policy with COMMIT_SYNC + * for local commit synchronization. + * + * The replicated environment policies default to COMMIT_NO_SYNC for + * commits of replicated transactions that need acknowledgment and + * SIMPLE_MAJORITY for the acknowledgment policy. + */ + public static final Durability COMMIT_SYNC = + new Durability(SyncPolicy.SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.SIMPLE_MAJORITY); // replicaAck + + /** + * A convenience constant that defines a durability policy with + * COMMIT_NO_SYNC for local commit synchronization. + * + * The replicated environment policies default to COMMIT_NO_SYNC for + * commits of replicated transactions that need acknowledgment and + * SIMPLE_MAJORITY for the acknowledgment policy. + */ + public static final Durability COMMIT_NO_SYNC = + new Durability(SyncPolicy.NO_SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.SIMPLE_MAJORITY); // replicaAck + + /** + * A convenience constant that defines a durability policy with + * COMMIT_WRITE_NO_SYNC for local commit synchronization. + * + * The replicated environment policies default to COMMIT_NO_SYNC for + * commits of replicated transactions that need acknowledgment and + * SIMPLE_MAJORITY for the acknowledgment policy. + */ + public static final Durability COMMIT_WRITE_NO_SYNC = + new Durability(SyncPolicy.WRITE_NO_SYNC,// localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.SIMPLE_MAJORITY); // replicaAck + + /** + * A convenience constant that defines a durability policy, with + * ReplicaAckPolicy.NONE for use with a read only transaction. + * A read only transaction on a Master, using this Durability, will thus + * not be held up, or throw InsufficientReplicasException, if + * the Master is not in contact with a sufficient number of Replicas at the + * time the transaction was initiated.

        + * + * It's worth noting that since the transaction is read only, the sync + * policies, although specified as NO_SYNC, do not really + * matter. + * + * @deprecated use {@link TransactionConfig#setReadOnly} instead. + */ + public static final Durability READ_ONLY_TXN = + new Durability(SyncPolicy.NO_SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.NONE); // replicaAck + + /** + * Defines the synchronization policy to be used when committing a + * transaction. High levels of synchronization offer a greater guarantee + * that the transaction is persistent to disk, but trade that off for + * lower performance. + */ + public enum SyncPolicy { + + /** + * Write and synchronously flush the log on transaction commit. + * Transactions exhibit all the ACID (atomicity, consistency, + * isolation, and durability) properties. + *

        + * This is the default. + */ + SYNC, + + /** + * Do not write or synchronously flush the log on transaction commit. + * Transactions exhibit the ACI (atomicity, consistency, and isolation) + * properties, but not D (durability); that is, database integrity will + * be maintained, but if the application or system fails, it is + * possible some number of the most recently committed transactions may + * be undone during recovery. The number of transactions at risk is + * governed by how many log updates can fit into the log buffer, how + * often the operating system flushes dirty buffers to disk, and how + * often the log is checkpointed. + */ + NO_SYNC, + + /** + * Write but do not synchronously flush the log on transaction commit. + * Transactions exhibit the ACI (atomicity, consistency, and isolation) + * properties, but not D (durability); that is, database integrity will + * be maintained, but if the operating system fails, it is possible + * some number of the most recently committed transactions may be + * undone during recovery. The number of transactions at risk is + * governed by how often the operating system flushes dirty buffers to + * disk, and how often the log is checkpointed. + */ + WRITE_NO_SYNC + }; + + /** + * A replicated environment makes it possible to increase an application's + * transaction commit guarantees by committing changes to its replicas on + * the network. ReplicaAckPolicy defines the policy for how such network + * commits are handled. + *

        + * The choice of a ReplicaAckPolicy must be consistent across all the + * replicas in a replication group, to ensure that the policy is + * consistently enforced in the event of an election. + * + *

        Note that SECONDARY nodes are not included in the set of replicas + * that must acknowledge transaction commits. + */ + public enum ReplicaAckPolicy { + + /** + * All ELECTABLE replicas must acknowledge that they have committed the + * transaction. This policy should be selected only if your replication + * group has a small number of ELECTABLE replicas, and those replicas + * are on extremely reliable networks and servers. + */ + ALL, + + /** + * No transaction commit acknowledgments are required and the master + * will never wait for replica acknowledgments. In this case, + * transaction durability is determined entirely by the type of commit + * that is being performed on the master. + */ + NONE, + + /** + * A simple majority of ELECTABLE replicas must acknowledge that they + * have committed the transaction. This acknowledgment policy, in + * conjunction with an election policy which requires at least a simple + * majority, ensures that the changes made by the transaction remains + * durable if a new election is held. + *

        + * This is the default. + */ + SIMPLE_MAJORITY; + + /** + * Returns the minimum number of ELECTABLE replicas required to + * implement the ReplicaAckPolicy for a given replication group size. + * + * @param groupSize the number of ELECTABLE replicas in the replication + * group + * + * @return the number of ELECTABLE replicas needed + */ + public int minAckNodes(int groupSize) { + switch (this) { + case ALL: + return groupSize; + case NONE: + return 1; + case SIMPLE_MAJORITY: + return (groupSize / 2 + 1); + default: + throw EnvironmentFailureException.unexpectedState + ("Unknown ack policy: " + this); + } + } + } + + /* The sync policy in effect on the local node. */ + private final SyncPolicy localSync; + + /* The sync policy in effect on a replica. */ + final private SyncPolicy replicaSync; + + /* The replica acknowledgment policy to be used. */ + final private ReplicaAckPolicy replicaAck; + + /** + * Creates an instance of a Durability specification. + * + * @param localSync the SyncPolicy to be used when committing the + * transaction locally. + * @param replicaSync the SyncPolicy to be used remotely, as part of a + * transaction acknowledgment, at a Replica node. + * @param replicaAck the acknowledgment policy used when obtaining + * transaction acknowledgments from Replicas. + */ + public Durability(SyncPolicy localSync, + SyncPolicy replicaSync, + ReplicaAckPolicy replicaAck) { + this.localSync = localSync; + this.replicaSync = replicaSync; + this.replicaAck = replicaAck; + } + + /** + * Parses the string and returns the durability it represents. The string + * must have the following format: + *

        + * + * SyncPolicy[,SyncPolicy[,ReplicaAckPolicy]] + * + *

        + * The first SyncPolicy in the above format applies to the Master, and the + * optional second SyncPolicy to the replica. Specific SyncPolicy or + * ReplicaAckPolicy values are denoted by the name of the enumeration + * value. + *

        + * For example, the string:sync,sync,quorum describes a durability + * policy where the master and replica both use {@link SyncPolicy#SYNC} + * to commit transactions and {@link ReplicaAckPolicy#SIMPLE_MAJORITY} to + * acknowledge a transaction commit. + *

        + * {@link SyncPolicy#NO_SYNC}, is the default value for a node's + * SyncPolicy. + *

        + * {@link ReplicaAckPolicy#SIMPLE_MAJORITY} is the default for the + * ReplicaAckPolicy. + * + * @param durabilityString the durability string in the above format + * + * @return the Durability resulting from the parse, or null if the + * durabilityString argument was itself null. + * + * @throws IllegalArgumentException if the durabilityString is invalid. + */ + public static Durability parse(String durabilityString) { + if (durabilityString == null) { + return null; + } + StringTokenizer tokenizer = + new StringTokenizer(durabilityString.toUpperCase(), ","); + + if (!tokenizer.hasMoreTokens()) { + throw new IllegalArgumentException + ("Bad string format: " + '"' + durabilityString + '"'); + } + SyncPolicy localSync = + SyncPolicy.valueOf(tokenizer.nextToken()); + SyncPolicy replicaSync = tokenizer.hasMoreTokens() ? + SyncPolicy.valueOf(tokenizer.nextToken()) : + SyncPolicy.NO_SYNC; + ReplicaAckPolicy replicaAck = tokenizer.hasMoreTokens() ? + ReplicaAckPolicy.valueOf(tokenizer.nextToken()) : + ReplicaAckPolicy.SIMPLE_MAJORITY; + return new Durability(localSync, replicaSync, replicaAck); + } + + /** + * Returns the string representation of durability in the format defined + * by string form of the Durability constructor. + * + * @see #parse(String) + */ + @Override + public String toString() { + return localSync.toString() + "," + + replicaSync.toString() + "," + + replicaAck.toString(); + } + + /** + * Returns the transaction synchronization policy to be used locally when + * committing a transaction. + */ + public SyncPolicy getLocalSync() { + return localSync; + } + + /** + * Returns the transaction synchronization policy to be used by the replica + * as it replays a transaction that needs an acknowledgment. + */ + public SyncPolicy getReplicaSync() { + return replicaSync; + } + + /** + * Returns the replica acknowledgment policy used by the master when + * committing changes to a replicated environment. + */ + public ReplicaAckPolicy getReplicaAck() { + return replicaAck; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((localSync == null) ? 0 : localSync.hashCode()); + result = prime * result + + ((replicaAck == null) ? 0 : replicaAck.hashCode()); + result = prime * result + + ((replicaSync == null) ? 0 : replicaSync.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof Durability)) { + return false; + } + Durability other = (Durability) obj; + if (localSync == null) { + if (other.localSync != null) { + return false; + } + } else if (!localSync.equals(other.localSync)) { + return false; + } + if (replicaAck == null) { + if (other.replicaAck != null) { + return false; + } + } else if (!replicaAck.equals(other.replicaAck)) { + return false; + } + if (replicaSync == null) { + if (other.replicaSync != null) { + return false; + } + } else if (!replicaSync.equals(other.replicaSync)) { + return false; + } + return true; + } +} diff --git a/src/com/sleepycat/je/Environment.java b/src/com/sleepycat/je/Environment.java new file mode 100644 index 0000000..8bab31a --- /dev/null +++ b/src/com/sleepycat/je/Environment.java @@ -0,0 +1,2555 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Closeable; +import java.io.File; +import java.io.PrintStream; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Level; +import javax.transaction.xa.Xid; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbTree.TruncateDbResult; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.RepConfigProxy; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.txn.HandleLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; + +/** + * A database environment. Environments include support for some or all of + * caching, locking, logging and transactions. + * + *

        To open an existing environment with default attributes the application + * may use a default environment configuration object or null: + *

        + *
        + *      // Open an environment handle with default attributes.
        + *     Environment env = new Environment(home, new EnvironmentConfig());
        + *     
        + *
        + * or + *
        + *     Environment env = new Environment(home, null);
        + * 
        + *

        Note that many Environment objects may access a single environment.

        + *

        To create an environment or customize attributes, the application should + * customize the configuration class. For example:

        + *
        + *     EnvironmentConfig envConfig = new EnvironmentConfig();
        + *     envConfig.setTransactional(true);
        + *     envConfig.setAllowCreate(true);
        + *     envConfig.setCacheSize(1000000);
        + *     Environment newlyCreatedEnv = new Environment(home, envConfig);
        + * 
        + * + *

        Note that environment configuration parameters can also be set through + * the <environment home>/je.properties file. This file takes precedence + * over any programmatically specified configuration parameters so that + * configuration changes can be made without recompiling. Environment + * configuration follows this order of precedence:

        + * + *
          + *
        1. Configuration parameters specified in + * <environment home>/je.properties take first precedence. + *
        2. Configuration parameters set in the EnvironmentConfig object used at + * Environment construction e tameters not set by the application are set to + * system defaults, described along with the parameter name String constants + * in the EnvironmentConfig class. + *
        + * + *

        An environment handle is an Environment instance. More than one + * Environment instance may be created for the same physical directory, which + * is the same as saying that more than one Environment handle may be open at + * one time for a given environment.

        + * + * The Environment handle should not be closed while any other handle remains + * open that is using it as a reference (for example, {@link + * com.sleepycat.je.Database Database} or {@link com.sleepycat.je.Transaction + * Transaction}. Once {@link com.sleepycat.je.Environment#close + * Environment.close} is called, this object may not be accessed again. + */ +public class Environment implements Closeable { + + /** + * envImpl is a reference to the shared underlying environment. + * + * The envImpl field is set to null during close to avoid OOME. It + * should normally only be accessed via the checkOpen and + * getNonNullEnvImpl methods. During close, while synchronized, it is safe + * to access it directly. + */ + private volatile EnvironmentImpl environmentImpl; + + /* + * If the env was invalided (even if the env is now closed) this contains + * the first EFE that invalidated it. Contains null if the env was not + * invalidated. + * + * This reference is shared with EnvironmentImpl, to allow the invalidating + * exception to be returned after close, when environmentImpl is null. + * The EFE does not reference the EnvironmentImpl, so GC is not effected. + * + * This field cannot be declared as final because it is initialized by + * methods called by the ctor. However, after construction it is non-null + * and should be treated as final. + */ + private AtomicReference invalidatingEFE; + + private TransactionConfig defaultTxnConfig; + private EnvironmentMutableConfig handleConfig; + private final EnvironmentConfig appliedFinalConfig; + + private final Map referringDbs; + private final Map referringDbTxns; + + /** + * @hidden + * The name of the cleaner daemon thread. This constant is passed to an + * ExceptionEvent's threadName argument when an exception is thrown in the + * cleaner daemon thread. + */ + public static final String CLEANER_NAME = "Cleaner"; + + /** + * @hidden + * The name of the IN Compressor daemon thread. This constant is passed to + * an ExceptionEvent's threadName argument when an exception is thrown in + * the IN Compressor daemon thread. + */ + public static final String INCOMP_NAME = "INCompressor"; + + /** + * @hidden + * The name of the Checkpointer daemon thread. This constant is passed to + * an ExceptionEvent's threadName argument when an exception is thrown in + * the Checkpointer daemon thread. + */ + public static final String CHECKPOINTER_NAME = "Checkpointer"; + + /** + * @hidden + * The name of the StatCapture daemon thread. This constant is passed to + * an ExceptionEvent's threadName argument when an exception is thrown in + * the StatCapture daemon thread. + */ + public static final String STATCAPTURE_NAME = "StatCapture"; + + /** + * @hidden + * The name of the log flusher daemon thread. + */ + public static final String LOG_FLUSHER_NAME = "LogFlusher"; + + /** + * @hidden + * The name of the deletion detector daemon thread. + */ + public static final String FILE_DELETION_DETECTOR_NAME = + "FileDeletionDetector"; + + /** + * @hidden + * The name of the data corruption verifier daemon thread. + */ + public static final String DATA_CORRUPTION_VERIFIER_NAME = + "DataCorruptionVerifier"; + + /** + * Creates a database environment handle. + * + * @param envHome The database environment's home directory. + * + * @param configuration The database environment attributes. If null, + * default attributes are used. + * + * @throws EnvironmentNotFoundException if the environment does not exist + * (does not contain at least one log file) and the {@code + * EnvironmentConfig AllowCreate} parameter is false. + * + * @throws EnvironmentLockedException when an environment cannot be opened + * for write access because another process has the same environment open + * for write access. Warning: This exception should be + * handled when an environment is opened by more than one process. + * + * @throws VersionMismatchException when the existing log is not compatible + * with the version of JE that is running. This occurs when a later + * version of JE was used to create the log. Warning: + * This exception should be handled when more than one version of JE may be + * used to access an environment. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this environment was previously + * opened for replication and is not being opened read-only. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code EnvironmentConfig} parameter. + */ + public Environment(File envHome, EnvironmentConfig configuration) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + VersionMismatchException, + DatabaseException, + IllegalArgumentException { + + this(envHome, configuration, null /*repConfigProxy*/, + null /*envImplParam*/); + } + + /** + * @hidden + * Internal common constructor. + * + * @param envImpl is non-null only when used by EnvironmentImpl to + * create an InternalEnvironment. + */ + protected Environment(File envHome, + EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy, + EnvironmentImpl envImpl) { + + referringDbs = new ConcurrentHashMap(); + referringDbTxns = new ConcurrentHashMap(); + + DatabaseUtil.checkForNullParam(envHome, "envHome"); + + appliedFinalConfig = + setupHandleConfig(envHome, envConfig, repConfigProxy); + + if (envImpl != null) { + /* We're creating an InternalEnvironment in EnvironmentImpl. */ + environmentImpl = envImpl; + } else { + /* Open a new or existing environment in the shared pool. */ + environmentImpl = + makeEnvironmentImpl(envHome, envConfig, repConfigProxy); + } + } + + /** + * @hidden + * makeEnvironmentImpl() is called both by the Environment constructor and + * by the ReplicatedEnvironment constructor when recreating the environment + * for a hard recovery. + */ + protected EnvironmentImpl makeEnvironmentImpl( + File envHome, + EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy) { + + environmentImpl = DbEnvPool.getInstance().getEnvironment( + envHome, + appliedFinalConfig, + envConfig != null /*checkImmutableParams*/, + setupRepConfig(envHome, repConfigProxy, envConfig)); + + environmentImpl.registerMBean(this); + + invalidatingEFE = environmentImpl.getInvalidatingExceptionReference(); + + return environmentImpl; + } + + /** + * Validate the parameters specified in the environment config. Applies + * the configurations specified in the je.properties file to override any + * programmatically set configurations. Create a copy to save in this + * handle. The main reason to return a config instead of using the + * handleConfig field is to return an EnvironmentConfig instead of a + * EnvironmentMutableConfig. + */ + private EnvironmentConfig setupHandleConfig(File envHome, + EnvironmentConfig envConfig, + RepConfigProxy repConfig) + throws IllegalArgumentException { + + /* If the user specified a null object, use the default */ + EnvironmentConfig baseConfig = (envConfig == null) ? + EnvironmentConfig.DEFAULT : envConfig; + + /* Make a copy, apply je.properties, and init the handle config. */ + EnvironmentConfig useConfig = baseConfig.clone(); + + /* Apply the je.properties file. */ + if (useConfig.getLoadPropertyFile()) { + DbConfigManager.applyFileConfig(envHome, + DbInternal.getProps(useConfig), + false); // forReplication + } + copyToHandleConfig(useConfig, useConfig, repConfig); + return useConfig; + } + + /** + * @hidden + * Obtain a validated replication configuration. In a non-HA environment, + * return null. + */ + protected RepConfigProxy + setupRepConfig(final File envHome, + final RepConfigProxy repConfigProxy, + final EnvironmentConfig envConfig) { + + return null; + } + + /** + * The Environment.close method closes the Berkeley DB environment. + * + *

        When the last environment handle is closed, allocated resources are + * freed, and daemon threads are stopped, even if they are performing work. + * For example, if the cleaner is still cleaning the log, it will be + * stopped at the next reasonable opportunity and perform no more cleaning + * operations. After stopping background threads, a final checkpoint is + * performed by this method, in order to reduce the time to recover the + * next time the environment is opened.

        + * + *

        When minimizing recovery time is desired, it is often useful to stop + * all application activity and perform an additional checkpoint prior to + * calling {@code close}. This additional checkpoint will write most of + * dirty Btree information, so that that the final checkpoint is very + * small (and recovery is fast). To ensure that recovery time is minimized, + * the log cleaner threads should also be stopped prior to the extra + * checkpoint. This prevents log cleaning from dirtying the Btree, which + * can make the final checkpoint larger (and recovery time longer). The + * recommended procedure for minimizing recovery time is:

        + * + *
        +     *     // Stop/finish all application operations that are using JE.
        +     *     ...
        +     *
        +     *     // Stop the cleaner daemon threads.
        +     *     EnvironmentMutableConfig config = env.getMutableConfig();
        +     *     config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
        +     *     env.setMutableConfig(config);
        +     *
        +     *     // Perform an extra checkpoint
        +     *     env.checkpoint(new CheckpointConfig().setForce(true));
        +     *
        +     *     // Finally, close the environment.
        +     *     env.close();
        +     * 
        + * + *

        The Environment handle should not be closed while any other handle + * that refers to it is not yet closed; for example, database environment + * handles must not be closed while database handles remain open, or + * transactions in the environment have not yet committed or aborted. + * Specifically, this includes {@link com.sleepycat.je.Database Database}, + * and {@link com.sleepycat.je.Transaction Transaction} handles.

        + * + *

        If this handle has already been closed, this method does nothing and + * returns without throwing an exception.

        + * + *

        In multithreaded applications, only a single thread should call + * Environment.close.

        + * + *

        The environment handle may not be used again after this method has + * been called, regardless of the method's success or failure, with one + * exception: the {@code close} method itself may be called any number of + * times.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentWedgedException when the current process must be + * shut down and restarted before re-opening the Environment. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DiskLimitException if the final checkpoint cannot be performed + * because a disk limit has been violated. The Environment will be closed, + * but this exception will be thrown so that the application is aware that + * a checkpoint was not performed. + * + * @throws IllegalStateException if any open databases or transactions + * refer to this handle. The Environment will be closed, but this exception + * will be thrown so that the application is aware that not all databases + * and transactions were closed. + */ + public synchronized void close() + throws DatabaseException { + + if (environmentImpl == null) { + return; + } + + if (!environmentImpl.isValid()) { + + /* + * We're trying to close on an environment that has seen a fatal + * exception. Try to do the minimum, such as closing file + * descriptors, to support re-opening the environment in the same + * JVM. + */ + try { + environmentImpl.closeAfterInvalid(); + } finally { + clearEnvImpl(); + + for (Database db : referringDbs.keySet()) { + db.minimalClose(Database.DbState.CLOSED, null); + } + } + return; + } + + final StringBuilder errors = new StringBuilder(); + try { + checkForCloseErrors(errors); + + try { + environmentImpl.close(); + } catch (DatabaseException e) { + e.addErrorMessage(errors.toString()); + throw e; + } catch (RuntimeException e) { + if (errors.length() > 0) { + throw new IllegalStateException(errors.toString(), e); + } + throw e; + } + + if (errors.length() > 0) { + throw new IllegalStateException(errors.toString()); + } + } finally { + clearEnvImpl(); + } + } + + /** + * Set environmentImpl to null during close, to allow GC when the app may + * hold on to a reference to the Environment handle for some time period. + */ + void clearEnvImpl() { + environmentImpl = null; + } + + /** + * Close an InternalEnvironment handle. We do not call + * EnvironmentImpl.close here, since an InternalEnvironment is not + * registered like a non-internal handle. However, we must call + * checkForCloseErrors to auto-close internal databases, as well as check + * for errors. + */ + synchronized void closeInternalHandle() { + final StringBuilder errors = new StringBuilder(); + checkForCloseErrors(errors); + if (errors.length() > 0) { + throw new IllegalStateException(errors.toString()); + } + } + + private void checkForCloseErrors(StringBuilder errors) { + + checkOpenDbs(errors); + + checkOpenTxns(errors); + + if (!isInternalHandle()) { + + /* + * Only check for open XA transactions against user created + * environment handles. + */ + checkOpenXATransactions(errors); + } + } + + /** + * Appends error messages to the errors argument if there are + * open XA transactions associated with the underlying EnvironmentImpl. + */ + private void checkOpenXATransactions(final StringBuilder errors) { + Xid[] openXids = getNonNullEnvImpl().getTxnManager().XARecover(); + if (openXids != null && openXids.length > 0) { + errors.append("There "); + int nXATxns = openXids.length; + if (nXATxns == 1) { + errors.append("is 1 existing XA transaction opened"); + errors.append(" in the Environment.\n"); + errors.append("It"); + } else { + errors.append("are "); + errors.append(nXATxns); + errors.append(" existing transactions opened in"); + errors.append(" the Environment.\n"); + errors.append("They"); + } + errors.append(" will be left open ...\n"); + } + } + + /** + * Appends error messages to the errors argument if there are open + * transactions associated with the environment. + */ + private void checkOpenTxns(final StringBuilder errors) { + int nTxns = (referringDbTxns == null) ? 0 : referringDbTxns.size(); + if (nTxns == 0) { + return; + } + + errors.append("There "); + if (nTxns == 1) { + errors.append("is 1 existing transaction opened"); + errors.append(" against the Environment.\n"); + } else { + errors.append("are "); + errors.append(nTxns); + errors.append(" existing transactions opened against"); + errors.append(" the Environment.\n"); + } + errors.append("Aborting open transactions ...\n"); + + for (Transaction txn : referringDbTxns.keySet()) { + try { + errors.append("aborting " + txn); + txn.abort(); + } catch (RuntimeException e) { + if (!environmentImpl.isValid()) { + /* Propagate if env is invalidated. */ + throw e; + } + errors.append("\nWhile aborting transaction "); + errors.append(txn.getId()); + errors.append(" encountered exception: "); + errors.append(e).append("\n"); + } + } + } + + /** + * Appends error messages to the errors argument if there are open database + * handles associated with the environment. + */ + private void checkOpenDbs(final StringBuilder errors) { + + if (referringDbs.isEmpty()) { + return; + } + + int nOpenUserDbs = 0; + + for (Database db : referringDbs.keySet()) { + String dbName = ""; + try { + + /* + * Save the db name before we attempt the close, it's + * unavailable after the close. + */ + dbName = db.getDebugName(); + + if (!db.getDbImpl().isInternalDb()) { + nOpenUserDbs += 1; + errors.append("Unclosed Database: "); + errors.append(dbName).append("\n"); + } + db.close(); + } catch (RuntimeException e) { + if (!environmentImpl.isValid()) { + /* Propagate if env is invalidated. */ + throw e; + } + errors.append("\nWhile closing Database "); + errors.append(dbName); + errors.append(" encountered exception: "); + errors.append(LoggerUtils.getStackTrace(e)).append("\n"); + } + } + + if (nOpenUserDbs > 0) { + errors.append("Databases left open: "); + errors.append(nOpenUserDbs).append("\n"); + } + } + + /** + * Opens, and optionally creates, a Database. + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param databaseName The name of the database. + * + * @param dbConfig The database attributes. If null, default attributes + * are used. + * + * @return Database handle. + * + * @throws DatabaseExistsException if the database already exists and the + * {@code DatabaseConfig ExclusiveCreate} parameter is true. + * + * @throws DatabaseNotFoundException if the database does not exist and the + * {@code DatabaseConfig AllowCreate} parameter is false. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the database does not exist and the {@link + * DatabaseConfig#setAllowCreate AllowCreate} parameter is true, then one + * of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code DatabaseConfig} property. + * + * @throws IllegalStateException if DatabaseConfig properties are changed + * and there are other open handles for this database. + */ + public synchronized Database openDatabase(Transaction txn, + String databaseName, + DatabaseConfig dbConfig) + throws DatabaseNotFoundException, + DatabaseExistsException, + IllegalArgumentException, + IllegalStateException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (dbConfig == null) { + dbConfig = DatabaseConfig.DEFAULT; + } + + try { + final Database db = new Database(this); + + setupDatabase( + envImpl, txn, db, databaseName, dbConfig, + false /*isInternalDb*/); + + return db; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Opens and optionally creates a SecondaryDatabase. + * + *

        Note that the associations between primary and secondary databases + * are not stored persistently. Whenever a primary database is opened for + * write access by the application, the appropriate associated secondary + * databases should also be opened by the application. This is necessary + * to ensure data integrity when changes are made to the primary + * database.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param databaseName The name of the database. + * + * @param primaryDatabase the primary database with which the secondary + * database will be associated. The primary database must not be + * configured for duplicates. + * + * @param dbConfig The secondary database attributes. If null, default + * attributes are used. + * + * @return Database handle. + * + * @throws DatabaseExistsException if the database already exists and the + * {@code DatabaseConfig ExclusiveCreate} parameter is true. + * + * @throws DatabaseNotFoundException if the database does not exist and the + * {@code DatabaseConfig AllowCreate} parameter is false. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the database does not exist and the {@link + * DatabaseConfig#setAllowCreate AllowCreate} parameter is true, then one + * of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code SecondaryConfig} property. + * + * @throws IllegalStateException if DatabaseConfig properties are changed + * and there are other open handles for this database. + */ + public synchronized SecondaryDatabase openSecondaryDatabase( + Transaction txn, + String databaseName, + Database primaryDatabase, + SecondaryConfig dbConfig) + throws DatabaseNotFoundException, + DatabaseExistsException, + DatabaseException, + IllegalArgumentException, + IllegalStateException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + envImpl.getSecondaryAssociationLock(). + writeLock().lockInterruptibly(); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + try { + if (dbConfig == null) { + dbConfig = SecondaryConfig.DEFAULT; + } + final SecondaryDatabase db = + new SecondaryDatabase(this, dbConfig, primaryDatabase); + + setupDatabase( + envImpl, txn, db, databaseName, dbConfig, + false /*isInternalDb*/); + + return db; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } finally { + envImpl.getSecondaryAssociationLock().writeLock().unlock(); + } + } + + /** + * The meat of open database processing. + * + * Currently, only external DBs are opened via this method, but we may + * allow internal DB opens in the future. + * + * @param txn may be null + * @param newDb is the Database handle which houses this database + * + * @throws IllegalArgumentException via openDatabase and + * openSecondaryDatabase + * + * @see HandleLocker + */ + private void setupDatabase(EnvironmentImpl envImpl, + Transaction txn, + Database newDb, + String databaseName, + DatabaseConfig dbConfig, + boolean isInternalDb) + throws DatabaseNotFoundException, DatabaseExistsException { + + DatabaseUtil.checkForNullParam(databaseName, "databaseName"); + + LoggerUtils.envLogMsg(Level.FINEST, envImpl, + "Environment.open: " + " name=" + databaseName + + " dbConfig=" + dbConfig); + + final boolean autoTxnIsReplicated = + dbConfig.getReplicated() && envImpl.isReplicated(); + + /* + * Check that the open configuration is valid and doesn't conflict with + * the envImpl configuration. + */ + dbConfig.validateOnDbOpen(databaseName, autoTxnIsReplicated); + + validateDbConfigAgainstEnv( + envImpl, dbConfig, databaseName, isInternalDb); + + /* Perform eviction before each operation that allocates memory. */ + envImpl.criticalEviction(false /*backgroundIO*/); + + DatabaseImpl database = null; + boolean operationOk = false; + HandleLocker handleLocker = null; + final Locker locker = LockerFactory.getWritableLocker + (this, txn, isInternalDb, dbConfig.getTransactional(), + autoTxnIsReplicated, null); + try { + + /* + * Create the handle locker and lock the NameLN of an existing + * database. A read lock on the NameLN is acquired for both locker + * and handleLocker. Note: getDb may return a deleted database. + */ + handleLocker = newDb.initHandleLocker(envImpl, locker); + database = envImpl.getDbTree().getDb(locker, databaseName, + handleLocker, false); + + boolean dbCreated = false; + final boolean databaseExists = + (database != null) && !database.isDeleted(); + + if (databaseExists) { + if (dbConfig.getAllowCreate() && + dbConfig.getExclusiveCreate()) { + throw new DatabaseExistsException + ("Database " + databaseName + " already exists"); + } + + newDb.initExisting(this, locker, database, databaseName, + dbConfig); + } else { + /* Release deleted DB. [#13415] */ + envImpl.getDbTree().releaseDb(database); + database = null; + + if (!isInternalDb && + DbTree.isReservedDbName(databaseName)) { + throw new IllegalArgumentException + (databaseName + " is a reserved database name."); + } + + if (!dbConfig.getAllowCreate()) { + throw new DatabaseNotFoundException("Database " + + databaseName + + " not found."); + } + + /* + * Init a new DB. This calls DbTree.createDb and the new + * database is returned. A write lock on the NameLN is + * acquired by locker and a read lock by the handleLocker. + */ + database = newDb.initNew(this, locker, databaseName, dbConfig); + dbCreated = true; + } + + /* + * The open is successful. We add the opened database handle to + * this environment to track open handles in general, and to the + * locker so that it can be invalidated by a user txn abort. + */ + operationOk = true; + addReferringHandle(newDb); + locker.addOpenedDatabase(newDb); + + /* Run triggers before any subsequent auto commits. */ + final boolean firstWriteHandle = + newDb.isWritable() && + (newDb.getDbImpl().noteWriteHandleOpen() == 1); + + if (dbCreated || firstWriteHandle) { + TriggerManager.runOpenTriggers(locker, newDb, dbCreated); + } + } finally { + + /* + * If the open fails, decrement the DB usage count, release + * handle locks and remove references from other objects. In other + * cases this is done by Database.close() or invalidate(), the + * latter in the case of a user txn abort. + */ + if (!operationOk) { + envImpl.getDbTree().releaseDb(database); + if (handleLocker != null) { + handleLocker.operationEnd(false); + } + newDb.removeReferringAssociations(); + } + + /* + * Tell the locker that this operation is over. Some types of + * lockers (BasicLocker and auto Txn) will actually finish. + */ + locker.operationEnd(operationOk); + } + } + + /** + * @throws IllegalArgumentException via openDatabase and + * openSecondaryDatabase + */ + private void validateDbConfigAgainstEnv(EnvironmentImpl envImpl, + DatabaseConfig dbConfig, + String databaseName, + boolean isInternalDb) + throws IllegalArgumentException { + + /* + * R/W database handles on a replicated database must be transactional, + * for now. In the future we may support non-transactional database + * handles. + */ + if (envImpl.isReplicated() && + dbConfig.getReplicated() && + !dbConfig.getReadOnly()) { + if (!dbConfig.getTransactional()) { + throw new IllegalArgumentException + ("Read/Write Database instances for replicated " + + "database " + databaseName + " must be transactional."); + } + } + + /* Check operation's transactional status against the Environment */ + if (!isInternalDb && + dbConfig.getTransactional() && + !(envImpl.isTransactional())) { + throw new IllegalArgumentException + ("Attempted to open Database " + databaseName + + " transactionally, but parent Environment is" + + " not transactional"); + } + + /* Check read/write status */ + if (envImpl.isReadOnly() && (!dbConfig.getReadOnly())) { + throw new IllegalArgumentException + ("Attempted to open Database " + databaseName + + " as writable but parent Environment is read only "); + } + } + + /** + * Removes a database from the environment, discarding all records in the + * database and removing the database name itself. + * + *

        Compared to deleting all the records in a database individually, + * {@code removeDatabase} is a very efficient operation. Some internal + * housekeeping information is updated, but the database records are not + * read or written, and very little I/O is needed.

        + * + *

        When called on a database configured with secondary indices, the + * application is responsible for also removing all associated secondary + * indices. To guarantee integrity, a primary database and all of its + * secondary databases should be removed atomically using a single + * transaction.

        + * + *

        Applications should not remove a database with open {@link Database + * Database} handles. If the database is open with the same transaction as + * passed in the {@code txn} parameter, {@link IllegalStateException} is + * thrown by this method. If the database is open using a different + * transaction, this method will block until all database handles are + * closed, or until the conflict is resolved by throwing {@link + * LockConflictException}.

        + * + * @param txn For a transactional environment, an explicit transaction + * may be specified or null may be specified to use auto-commit. For a + * non-transactional environment, null must be specified. + * + * @param databaseName The database to be removed. + * + * @throws DatabaseNotFoundException if the database does not exist. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is a read-only + * environment. + * + * @throws IllegalStateException if the database is currently open using + * the transaction passed in the {@code txn} parameter, or if this handle + * or the underlying environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public void removeDatabase(final Transaction txn, + final String databaseName) + throws DatabaseNotFoundException { + + DatabaseUtil.checkForNullParam(databaseName, "databaseName"); + + new DbNameOperation(txn) { + + Pair runWork(final Locker locker) + throws DatabaseNotFoundException, + DbTree.NeedRepLockerException { + + final DatabaseImpl dbImpl = + dbTree.dbRemove(locker, databaseName, null /*checkId*/); + + return new Pair<>(dbImpl, null); + } + + void runTriggers(final Locker locker, final DatabaseImpl dbImpl) { + TriggerManager.runRemoveTriggers(locker, dbImpl); + } + }.run(); + } + + /** + * Renames a database, without removing the records it contains. + * + *

        Applications should not rename a database with open {@link Database + * Database} handles. If the database is open with the same transaction as + * passed in the {@code txn} parameter, {@link IllegalStateException} is + * thrown by this method. If the database is open using a different + * transaction, this method will block until all database handles are + * closed, or until the conflict is resolved by throwing {@link + * LockConflictException}.

        + * + * @param txn For a transactional environment, an explicit transaction + * may be specified or null may be specified to use auto-commit. For a + * non-transactional environment, null must be specified. + * + * @param databaseName The new name of the database. + * + * @throws DatabaseNotFoundException if the database does not exist. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is a read-only + * environment. + * + * @throws IllegalStateException if the database is currently open using + * the transaction passed in the {@code txn} parameter, or if this handle + * or the underlying environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public void renameDatabase(final Transaction txn, + final String databaseName, + final String newName) + throws DatabaseNotFoundException { + + DatabaseUtil.checkForNullParam(databaseName, "databaseName"); + DatabaseUtil.checkForNullParam(newName, "newName"); + + new DbNameOperation(txn) { + + Pair runWork(final Locker locker) + throws DatabaseNotFoundException, + DbTree.NeedRepLockerException { + + final DatabaseImpl dbImpl = + dbTree.dbRename(locker, databaseName, newName); + + return new Pair<>(dbImpl, null); + } + + void runTriggers(final Locker locker, final DatabaseImpl dbImpl) { + TriggerManager.runRenameTriggers(locker, dbImpl, newName); + } + }.run(); + } + + /** + * Empties the database, discarding all the records it contains, without + * removing the database name. + * + *

        Compared to deleting all the records in a database individually, + * {@code truncateDatabase} is a very efficient operation. Some internal + * housekeeping information is updated, but the database records are not + * read or written, and very little I/O is needed.

        + * + *

        When called on a database configured with secondary indices, the + * application is responsible for also truncating all associated secondary + * indices. To guarantee integrity, a primary database and all of its + * secondary databases should be truncated atomically using a single + * transaction.

        + * + *

        Applications should not truncate a database with open {@link Database + * Database} handles. If the database is open with the same transaction as + * passed in the {@code txn} parameter, {@link IllegalStateException} is + * thrown by this method. If the database is open using a different + * transaction, this method will block until all database handles are + * closed, or until the conflict is resolved by throwing {@link + * LockConflictException}.

        + * + * @param txn For a transactional environment, an explicit transaction may + * be specified or null may be specified to use auto-commit. For a + * non-transactional environment, null must be specified. + * + * @param databaseName The database to be truncated. + * + * @param returnCount If true, count and return the number of records + * discarded. + * + * @return The number of records discarded, or -1 if returnCount is false. + * + * @throws DatabaseNotFoundException if the database does not exist. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is a read-only + * environment. + * + * @throws IllegalStateException if the database is currently open using + * the transaction passed in the {@code txn} parameter, or if this handle + * or the underlying environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public long truncateDatabase(final Transaction txn, + final String databaseName, + final boolean returnCount) + throws DatabaseNotFoundException { + + DatabaseUtil.checkForNullParam(databaseName, "databaseName"); + + return (new DbNameOperation(txn) { + + Pair runWork(final Locker locker) + throws DatabaseNotFoundException, + DbTree.NeedRepLockerException { + + final TruncateDbResult result = + dbTree.truncate(locker, databaseName, returnCount); + + return new Pair<>(result.newDb, result.recordCount); + } + + void runTriggers(final Locker locker, final DatabaseImpl dbImpl) { + TriggerManager.runTruncateTriggers(locker, dbImpl); + } + }).run(); + } + + /** + * Runs a DB naming operation: remove, truncate or rename. The common code + * is factored out here. In particular this class handles non-replicated + * DBs in a replicated environment, when auto-commit is used. + *

        + * For a non-replicated DB, an auto-commit txn must be created by calling + * LockerFactory.getWritableLocker with the autoTxnIsReplicated param set + * to false. If autoTxnIsReplicated is set to true in a replicated + * environment, HA consistency checks will be made when the txn is begun + * and acks will be enforced at commit. For example, for an HA node in an + * unknown state, the consistency checks would fail and prevent performing + * the operation on the local/non-replicated DB. + *

        + * Unfortunately, we need to create a txn/locker in order to query the DB + * metadata, to determine whether it is replicated. Therefore, we always + * attempt the operation initially with autoTxnIsReplicated set to false. + * The DbTree name operation methods (see DbTree.lockNameLN) will throw an + * internal exception (NeedRepLockerException) if a non-replicated + * auto-commit txn is used on a replicated DB. That signals this class to + * retry the operation with autoTxnIsReplicated set to true. + *

        + * Via an unlikely series of DB renaming it is possible that on the 2nd try + * with a replicated txn, we find that the DB is non-replicated. However, + * there is little harm in proceeding, since the consistency check is + * already done. + */ + private abstract class DbNameOperation { + + private final EnvironmentImpl envImpl; + private final Transaction txn; + final DbTree dbTree; + + DbNameOperation(final Transaction txn) { + this.txn = txn; + this.envImpl = checkOpen(); + checkWritable(envImpl); + + dbTree = envImpl.getDbTree(); + } + + /** Run the DB name operation. */ + abstract Pair runWork(final Locker locker) + throws DatabaseNotFoundException, DbTree.NeedRepLockerException; + + /** Run triggers after a successful DB name operation. */ + abstract void runTriggers(final Locker locker, + final DatabaseImpl dbImpl); + + /** + * Try the operation with autoTxnIsReplicated=false, and then again + * with autoTxnIsReplicated=true if NeedRepLockerException is thrown. + */ + R run() throws DatabaseNotFoundException { + try { + return runOnce(getWritableLocker(false)); + } catch (DbTree.NeedRepLockerException e) { + try { + return runOnce(getWritableLocker(true)); + } catch (DbTree.NeedRepLockerException e2) { + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedException( + envImpl, e); + } + } + } + + private R runOnce(final Locker locker) + throws DatabaseNotFoundException, DbTree.NeedRepLockerException { + + boolean success = false; + try { + final Pair results = runWork(locker); + final DatabaseImpl dbImpl = results.first(); + if (dbImpl == null) { + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedState(envImpl); + } + success = true; + runTriggers(locker, dbImpl); + return results.second(); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } finally { + locker.operationEnd(success); + } + } + + private Locker getWritableLocker(boolean autoTxnIsReplicated) { + return LockerFactory.getWritableLocker( + Environment.this, txn, false /*isInternalDb*/, + envImpl.isTransactional(), autoTxnIsReplicated); + } + } + + /** + * For unit testing. Returns the current memory usage in bytes for all + * btrees in the envImpl. + */ + long getMemoryUsage() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + return envImpl.getMemoryBudget().getCacheMemoryUsage(); + } + + /** + * Returns the database environment's home directory. + * + * This method may be called when the environment has been invalidated, but + * not yet closed. In other words, {@link EnvironmentFailureException} is + * never thrown by this method. + * + * @return The database environment's home directory. + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle has been closed. + */ + public File getHome() + throws DatabaseException { + + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + + return envImpl.getEnvironmentHome(); + } + + /* + * Transaction management + */ + + /** + * Returns the default txn config for this environment handle. + */ + TransactionConfig getDefaultTxnConfig() { + return defaultTxnConfig; + } + + /** + * Copies the handle properties out of the config properties, and + * initializes the default transaction config. + */ + private void copyToHandleConfig(EnvironmentMutableConfig useConfig, + EnvironmentConfig initStaticConfig, + RepConfigProxy initRepConfig) { + + /* + * Create the new objects, initialize them, then change the instance + * fields. This avoids synchronization issues. + */ + EnvironmentMutableConfig newHandleConfig = + new EnvironmentMutableConfig(); + useConfig.copyHandlePropsTo(newHandleConfig); + this.handleConfig = newHandleConfig; + + TransactionConfig newTxnConfig = + TransactionConfig.DEFAULT.clone(); + newTxnConfig.setNoSync(handleConfig.getTxnNoSync()); + newTxnConfig.setWriteNoSync(handleConfig.getTxnWriteNoSync()); + newTxnConfig.setDurability(handleConfig.getDurability()); + + if (initStaticConfig != null) { + newTxnConfig.setSerializableIsolation + (initStaticConfig.getTxnSerializableIsolation()); + newTxnConfig.setReadCommitted + (initStaticConfig.getTxnReadCommitted()); + } else { + newTxnConfig.setSerializableIsolation + (defaultTxnConfig.getSerializableIsolation()); + newTxnConfig.setReadCommitted + (defaultTxnConfig.getReadCommitted()); + newTxnConfig.setConsistencyPolicy + (defaultTxnConfig.getConsistencyPolicy()); + } + if (initRepConfig != null) { + newTxnConfig.setConsistencyPolicy + (initRepConfig.getConsistencyPolicy()); + } + this.defaultTxnConfig = newTxnConfig; + } + + /** + * Creates a new transaction in the database environment. + * + *

        Transaction handles are free-threaded; transactions handles may be + * used concurrently by multiple threads.

        + * + *

        Cursors may not span transactions; that is, each cursor must be + * opened and closed within a single transaction. The parent parameter is a + * placeholder for nested transactions, and must currently be null.

        + * + * @param txnConfig The transaction attributes. If null, default + * attributes are used. + * + * @return The newly created transaction's handle. + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the Master + * in a replicated environment could not contact a quorum of replicas as + * determined by the {@link ReplicaAckPolicy}. + * + * @throws com.sleepycat.je.rep.ReplicaConsistencyException if a replica + * in a replicated environment cannot become consistent within the timeout + * period. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is not a transactional + * environment. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code TransactionConfig} parameter. + */ + public Transaction beginTransaction(Transaction parent, + TransactionConfig txnConfig) + throws DatabaseException, + IllegalArgumentException { + + try { + return beginTransactionInternal(parent, txnConfig, + false /*isInternalTxn*/); + } catch (Error E) { + invalidate(E); + throw E; + } + } + + /** + * Like beginTransaction, but does not require that the Environment is + * transactional. + */ + Transaction beginInternalTransaction(TransactionConfig txnConfig) { + return beginTransactionInternal(null /*parent*/, txnConfig, + true /*isInternalTxn*/); + } + + /** + * @throws IllegalArgumentException via beginTransaction. + * @throws UnsupportedOperationException via beginTransaction. + */ + private Transaction beginTransactionInternal(Transaction parent, + TransactionConfig txnConfig, + boolean isInternalTxn ) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (parent != null) { + throw new IllegalArgumentException + ("Parent txn is non-null. " + + "Nested transactions are not supported."); + } + + if (!isInternalTxn && !envImpl.isTransactional()) { + throw new UnsupportedOperationException + ("Transactions can not be used in a non-transactional " + + "environment"); + } + + checkTxnConfig(txnConfig); + + /* + * Apply txn config defaults. We don't need to clone unless we have to + * apply the env default, since we don't hold onto a txn config + * reference. + */ + TransactionConfig useConfig = null; + if (txnConfig == null) { + useConfig = defaultTxnConfig; + } else { + if (defaultTxnConfig.getNoSync() || + defaultTxnConfig.getWriteNoSync()) { + + /* + * The environment sync settings have been set, check if any + * were set in the user's txn config. If none were set in the + * user's config, apply the environment defaults + */ + if (!txnConfig.getNoSync() && + !txnConfig.getSync() && + !txnConfig.getWriteNoSync()) { + useConfig = txnConfig.clone(); + if (defaultTxnConfig.getWriteNoSync()) { + useConfig.setWriteNoSync(true); + } else { + useConfig.setNoSync(true); + } + } + } + + if ((defaultTxnConfig.getDurability() != null) && + (txnConfig.getDurability() == null)) { + + /* + * Inherit transaction durability from the environment in the + * absence of an explicit transaction config durability. + */ + if (useConfig == null) { + useConfig = txnConfig.clone(); + } + useConfig.setDurability(defaultTxnConfig.getDurability()); + } + + if ((defaultTxnConfig.getConsistencyPolicy() != null) && + (txnConfig.getConsistencyPolicy() == null)) { + if (useConfig == null) { + useConfig = txnConfig.clone(); + } + useConfig.setConsistencyPolicy + (defaultTxnConfig.getConsistencyPolicy()); + } + + /* Apply isolation level default. */ + if (!txnConfig.getSerializableIsolation() && + !txnConfig.getReadCommitted() && + !txnConfig.getReadUncommitted()) { + if (defaultTxnConfig.getSerializableIsolation()) { + if (useConfig == null) { + useConfig = txnConfig.clone(); + } + useConfig.setSerializableIsolation(true); + } else if (defaultTxnConfig.getReadCommitted()) { + if (useConfig == null) { + useConfig = txnConfig.clone(); + } + useConfig.setReadCommitted(true); + } + } + + /* No environment level defaults applied. */ + if (useConfig == null) { + useConfig = txnConfig; + } + } + Txn internalTxn = envImpl.txnBegin(parent, useConfig); + Transaction txn = new Transaction(this, internalTxn); + addReferringHandle(txn); + return txn; + } + + /** + * Checks the txnConfig object to ensure that its correctly configured and + * is compatible with the configuration of the Environment. + * + * @param txnConfig the configuration being checked. + * + * @throws IllegalArgumentException via beginTransaction + */ + private void checkTxnConfig(TransactionConfig txnConfig) + throws IllegalArgumentException { + + if (txnConfig == null) { + return; + } + if ((txnConfig.getSerializableIsolation() && + txnConfig.getReadUncommitted()) || + (txnConfig.getSerializableIsolation() && + txnConfig.getReadCommitted()) || + (txnConfig.getReadUncommitted() && + txnConfig.getReadCommitted())) { + throw new IllegalArgumentException + ("Only one may be specified: SerializableIsolation, " + + "ReadCommitted or ReadUncommitted"); + } + if ((txnConfig.getDurability() != null) && + ((defaultTxnConfig.getSync() || + defaultTxnConfig.getNoSync() || + defaultTxnConfig.getWriteNoSync()))) { + throw new IllegalArgumentException + ("Mixed use of deprecated durability API for the " + + "Environment with the new durability API for " + + "TransactionConfig.setDurability()"); + } + if ((defaultTxnConfig.getDurability() != null) && + ((txnConfig.getSync() || + txnConfig.getNoSync() || + txnConfig.getWriteNoSync()))) { + throw new IllegalArgumentException + ("Mixed use of new durability API for the " + + "Environment with the deprecated durability API for " + + "TransactionConfig."); + } + } + + /** + * Synchronously checkpoint the database environment. + *

        + * This is an optional action for the application since this activity + * is, by default, handled by a database environment owned background + * thread. + *

        + * A checkpoint has the side effect of flushing all preceding + * non-transactional write operations, as well as any preceding + * transactions that were committed with {@link + * Durability.SyncPolicy#NO_SYNC no-sync durability}. However, for best + * performance, checkpoints should be used only to bound recovery time. + * {@link #flushLog} can be used to write buffered data for durability + * purposes. + * + * @param ckptConfig The checkpoint attributes. If null, default + * attributes are used. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DiskLimitException if the checkpoint cannot be performed + * because a disk limit has been violated. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void checkpoint(CheckpointConfig ckptConfig) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (ckptConfig == null) { + ckptConfig = CheckpointConfig.DEFAULT; + } + + try { + envImpl.invokeCheckpoint(ckptConfig, "api"); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Synchronously flushes database environment databases to stable storage. + * Calling this method is equivalent to forcing a checkpoint and setting + * {@link CheckpointConfig#setMinimizeRecoveryTime} to true. + *

        + * A checkpoint has the side effect of flushing all preceding + * non-transactional write operations, as well as any preceding + * transactions that were committed with {@link + * Durability.SyncPolicy#NO_SYNC no-sync durability}. However, for best + * performance, checkpoints should be used only to bound recovery time. + * {@link #flushLog} can be used to write buffered data for durability + * purposes. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DiskLimitException if the sync cannot be performed + * because a disk limit has been violated. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void sync() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + final CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + config.setMinimizeRecoveryTime(true); + + envImpl.invokeCheckpoint(config, "sync"); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Writes buffered data to the log, and optionally performs an fsync to + * guarantee that data is written to the physical device. + *

        + * This method is used to make durable, by writing to the log, all + * preceding non-transactional write operations, as well as any preceding + * transactions that were committed with {@link + * Durability.SyncPolicy#NO_SYNC no-sync durability}. If the {@code fsync} + * parameter is true, it can also be used to flush all logged data to the + * physical storage device, by performing an fsync. + *

        + * Note that this method does not flush previously unwritten data + * in deferred-write databases; that is done by calling {@link + * Database#sync} or performing a checkpoint. + * + * @param fsync is true to perform an fsync as well as a file write, or + * false to perform only a file write. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void flushLog(boolean fsync) { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + envImpl.flushLog(fsync); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Synchronously invokes log file (data file) cleaning until the target + * disk space utilization has been reached; this method is called + * periodically by the cleaner background threads. + * + *

        Zero or more log files will be cleaned as necessary to bring the + * current {@link EnvironmentStats#getCurrentMinUtilization disk space + * utilization} of the environment above the configured {@link + * EnvironmentConfig#CLEANER_MIN_UTILIZATION utilization threshold}. + * + *

        Note that this method does not perform the complete task of cleaning + * a log file. Eviction and checkpointing log Btree information that is + * marked dirty by the cleaner, and a full checkpoint is necessary, + * following cleaning, before cleaned files will be deleted (or renamed). + * Checkpoints occur periodically and when the environment is closed.

        + * + *

        This is an optional action for the application since this activity + * is, by default, handled by one or more Environment-owned background + * threads.

        + * + *

        The intended use case for the {@code cleanLog} method is when the + * application wishes to disable the built-in cleaner threads using the + * {@link EnvironmentConfig#ENV_RUN_CLEANER} property. To replace the + * functionality of the cleaner threads, the application should call + * {@code cleanLog} periodically.

        + * + *

        Note that because this method cleans multiple files before returning, + * in an attempt to reach the target utilization, it may not return for a + * long time when there is a large {@link + * EnvironmentStats#getCleanerBacklog backlog} of files to be cleaned. This + * method cannot be aborted except by closing the environment. If the + * application needs the ability to abort the cleaning process, the + * {@link #cleanLogFile} method should be used instead.

        + * + *

        Note that in certain unusual situations the cleaner may not be able + * to make forward progress and the target utilization will never be + * reached. For example, this can occur if the target utilization is set + * too high or checkpoints are performed too often. To guard against + * cleaning "forever", this method will return when all files have been + * cleaned, even when the target utilization has not been reached.

        + * + * @return The number of log files that were cleaned, and that will be + * deleted (or renamed) when a qualifying checkpoint occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is a read-only or + * memory-only environment. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public int cleanLog() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + return envImpl.invokeCleaner(true /*cleanMultipleFiles*/); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Synchronously invokes cleaning of a single log file (data file), if + * the target disk space utilization has not been reached. + * + *

        One log file will be cleaned if the current {@link + * EnvironmentStats#getCurrentMinUtilization disk space utilization} of the + * environment is below the configured {@link + * EnvironmentConfig#CLEANER_MIN_UTILIZATION utilization threshold}. No + * files will be cleaned if disk space utilization is currently above the + * threshold. The lowest utilized file is selected for cleaning, since it + * has the lowest cleaning cost.

        + * + *

        Note that this method does not perform the complete task of cleaning + * a log file. Eviction and checkpointing log Btree information that is + * marked dirty by the cleaner, and a full checkpoint is necessary, + * following cleaning, before cleaned files will be deleted (or renamed). + * Checkpoints occur periodically and when the environment is closed.

        + * + *

        The intended use case for the {@code cleanLog} method is "batch + * cleaning". This is when the application disables the cleaner threads + * (using the {@link EnvironmentConfig#ENV_RUN_CLEANER} property) + * for maximum performance during active periods, and calls {@code + * cleanLog} during periods when the application is quiescent or less + * active than usual. Similarly, there may be times when an application + * wishes to perform cleaning explicitly until the target utilization + * rather than relying on the cleaner's background threads. For example, + * some applications may wish to perform batch cleaning prior to closing + * the environment, to reclaim as much disk space as possible at that + * time.

        + * + *

        To clean until the target utilization threshold is reached, {@code + * cleanLogFile} can be called in a loop until it returns {@code false}. + * When there is a large {@link EnvironmentStats#getCleanerBacklog + * backlog} of files to be cleaned, the application may wish to limit the + * amount of cleaning. Batch cleaning can be aborted simply by breaking out + * of the loop. The cleaning of a single file is not a long operation; it + * should take several minutes at most. For example:

        + * + *
        +     *     boolean cleaningAborted;
        +     *     boolean anyCleaned = false;
        +     *
        +     *     while (!cleaningAborted && env.cleanLogFile()) {
        +     *         anyCleaned = true;
        +     *     }
        +     * 
        + * + *

        Note that in certain unusual situations the cleaner may not be able + * to make forward progress and the target utilization will never be + * reached. For example, this can occur if the target utilization is set + * too high or checkpoints are performed too often. To guard against + * cleaning "forever", the application may wish to cancel the batch + * cleaning (break out of the loop) when the cleaning time or number of + * files cleaned exceeds some reasonable limit.

        + * + *

        As mentioned above, the cleaned log files will not be deleted until + * the next full checkpoint. If the application wishes to reclaim this disk + * space as soon as possible, an explicit checkpoint may be performed after + * the batch cleaning operation. For example:

        + * + *
        +     *     if (anyCleaned) {
        +     *         env.checkpoint(new CheckpointConfig().setForce(true));
        +     *     }
        +     * 
        + * + *

        However, even an explicit checkpoint is not guaranteed to delete the + * cleaned log files if, at the time the file was cleaned, records in the + * file were locked or were part of a database that was being removed, due + * to concurrent application activity that was accessing records or + * removing databases. In this case the files will be deleted only after + * these operations are complete and a subsequent checkpoint is performed. + * To guarantee that the cleaned files will be deleted, an application may + * stop all concurrent activity (ensure all operations and transactions + * have ended) and then perform a checkpoint.

        + * + *

        When closing the environment and minimizing recovery time is desired + * (see {@link #close}), as well as reclaiming disk space, the recommended + * procedure is as follows:

        + + *
        +     *     // Stop/finish all application operations that are using JE.
        +     *     ...
        +     *
        +     *     // Stop the cleaner daemon threads.
        +     *     EnvironmentMutableConfig config = env.getMutableConfig();
        +     *     config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
        +     *     env.setMutableConfig(config);
        +     *
        +     *     // Perform batch cleaning.
        +     *     while (!cleaningAborted && env.cleanLogFile()) {
        +     *     }
        +     *
        +     *     // Perform an extra checkpoint
        +     *     env.checkpoint(new CheckpointConfig().setForce(true));
        +     *
        +     *     // Finally, close the environment.
        +     *     env.close();
        +     * 
        + * + * @return true if one log was cleaned, or false if none were cleaned. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if this is a read-only or + * memory-only environment. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public boolean cleanLogFile() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + return envImpl.invokeCleaner(false /*cleanMultipleFiles*/) > 0; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Synchronously invokes the mechanism for keeping memory usage within the + * cache size boundaries. + * + *

        This is an optional action for the application since this activity + * is, by default, handled by a database environment owned background + * thread.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void evictMemory() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + envImpl.invokeEvictor(); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Synchronously invokes the compressor mechanism which compacts in memory + * data structures after delete operations. + * + *

        This is an optional action for the application since this activity + * is, by default, handled by a database environment owned background + * thread.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void compress() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + envImpl.invokeCompressor(); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Preloads the cache with multiple databases. This method should only be + * called when there are no operations being performed on the specified + * databases in other threads. Executing preload during concurrent updates + * of the specified databases may result in some or all of the tree being + * loaded into the JE cache. Executing preload during any other types of + * operations may result in JE exceeding its allocated cache + * size. preload() effectively locks all of the specified database and + * therefore will lock out the checkpointer, cleaner, and compressor, as + * well as not allow eviction to occur. If databases are replicated and + * the environment is in the replica state, then the replica may become + * temporarily disconnected from the master if the replica needs to replay + * changes against the database and is locked out because the time taken by + * the preload operation exceeds {@link + * com.sleepycat.je.rep.ReplicationConfig#FEEDER_TIMEOUT}. + * + * @param config The PreloadConfig object that specifies the parameters + * of the preload. + * + * @return A PreloadStats object with the result of the preload operation + * and various statistics about the preload() operation. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if any of the databases has been closed. + * + * @see Database#preload(PreloadConfig) + */ + public PreloadStats preload(final Database[] databases, + PreloadConfig config) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + DatabaseUtil.checkForZeroLengthArrayParam(databases, "databases"); + + if (config == null) { + config = new PreloadConfig(); + } + + try { + final int nDbs = databases.length; + final DatabaseImpl[] dbImpls = new DatabaseImpl[nDbs]; + for (int i = 0; i < nDbs; i += 1) { + dbImpls[i] = DbInternal.getDbImpl(databases[i]); + } + return envImpl.preload(dbImpls, config); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * + * Create a DiskOrderedCursor to iterate over the records of a given set + * of databases. Because the retrieval is based on Log Sequence Number + * (LSN) order rather than key order, records are returned in unsorted + * order in exchange for generally faster retrieval. LSN order + * approximates disk sector order. + *

        + * See {@link DiskOrderedCursor} for more details and a description of the + * consistency guarantees provided by the scan. + *

        + * WARNING: After calling this method, deletion of log files by + * the JE log cleaner will be disabled until {@link + * DiskOrderedCursor#close()} is called. To prevent unbounded growth of + * disk usage, be sure to call {@link DiskOrderedCursor#close()} to + * re-enable log file deletion. + * + * @param databases An array containing the handles to the database that + * are to be scanned. All these handles must be currently open. + * Furthermore, all the databases must belong to this environments, and + * they should all support duplicates or none of them should support + * duplicates. Note: this method does not make a copy of this array, + * and as a result, the contents of the array should not be modified + * while the returned DiskOrderedCursor is still in use. + * + * @param config The DiskOrderedCursorConfig object that specifies the + * parameters of the disk ordered scan. + * + * @return the new DiskOrderedCursor object. + * + * @throws IllegalArgumentException if (a) the databases parameter is + * null or an empty array, or (b) any of the handles in the databases + * parameter is null, or (c) the databases do not all belong to this + * environment, or (d) some databases support duplicates and some don't. + * + * @throws IllegalStateException if any of the databases has been + * closed or invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public DiskOrderedCursor openDiskOrderedCursor( + final Database[] databases, + DiskOrderedCursorConfig config) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + DatabaseUtil.checkForZeroLengthArrayParam(databases, "databases"); + + if (config == null) { + config = DiskOrderedCursorConfig.DEFAULT; + } + + try { + int nDbs = databases.length; + + for (int i = 0; i < nDbs; i += 1) { + + if (databases[i] == null) { + throw new IllegalArgumentException( + "The handle at position " + i + " of the databases " + + "array is null."); + } + + if (databases[i].getEnvironment() != this) { + throw new IllegalArgumentException( + "The handle at position " + i + " of the databases " + + "array points to a database that does not belong " + + "to this environment"); + } + } + + return new DiskOrderedCursor(databases, config); + + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + + /** + * Returns this object's configuration. + * + * @return This object's configuration. + * + *

        Unlike most Environment methods, this method may be called if the + * environment is invalid, but not yet closed.

        + * + * @throws IllegalStateException if this handle has been closed. + */ + public EnvironmentConfig getConfig() + throws DatabaseException { + + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + + try { + final EnvironmentConfig config = envImpl.cloneConfig(); + handleConfig.copyHandlePropsTo(config); + config.fillInEnvironmentGeneratedProps(envImpl); + return config; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Sets database environment attributes. + * + *

        Attributes only apply to a specific Environment object and are not + * necessarily shared by other Environment objects accessing this + * database environment.

        + * + *

        Unlike most Environment methods, this method may be called if the + * environment is invalid, but not yet closed.

        + * + * @param mutableConfig The database environment attributes. If null, + * default attributes are used. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle has been closed. + */ + public synchronized void setMutableConfig( + EnvironmentMutableConfig mutableConfig) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + DatabaseUtil.checkForNullParam(mutableConfig, "mutableConfig"); + + /* + * This method is synchronized so that we atomically call both + * EnvironmentImpl.setMutableConfig and copyToHandleConfig. This + * ensures that the handle and the EnvironmentImpl properties match. + */ + try { + + /* + * Change the mutable properties specified in the given + * configuration. + */ + envImpl.setMutableConfig(mutableConfig); + + /* Reset the handle config properties. */ + copyToHandleConfig(mutableConfig, null, null); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns database environment attributes. + * + *

        Unlike most Environment methods, this method may be called if the + * environment is invalid, but not yet closed.

        + * + * @return Environment attributes. + * + * @throws IllegalStateException if this handle has been closed. + */ + public EnvironmentMutableConfig getMutableConfig() + throws DatabaseException { + + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + + try { + final EnvironmentMutableConfig config = + envImpl.cloneMutableConfig(); + handleConfig.copyHandlePropsTo(config); + config.fillInEnvironmentGeneratedProps(envImpl); + return config; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns the general database environment statistics. + * + * @param config The general statistics attributes. If null, default + * attributes are used. + * + * @return The general database environment statistics. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public EnvironmentStats getStats(StatsConfig config) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + try { + return envImpl.loadStats(config); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns the database environment's locking statistics. + * + * @param config The locking statistics attributes. If null, default + * attributes are used. + * + * @return The database environment's locking statistics. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + * + * @deprecated as of 4.0.10, replaced by {@link + * Environment#getStats(StatsConfig)}.

        + */ + public LockStats getLockStats(StatsConfig config) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + try { + return envImpl.lockStat(config); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns the database environment's transactional statistics. + * + * @param config The transactional statistics attributes. If null, + * default attributes are used. + * + * @return The database environment's transactional statistics. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public TransactionStats getTransactionStats(StatsConfig config) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + try { + return envImpl.txnStat(config); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns a List of database names for the database environment. + * + *

        Each element in the list is a String.

        + * + * @return A List of database names for the database environment. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public List getDatabaseNames() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + return envImpl.getDbTree().getDbNames(); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns if the database environment is consistent and correct. + * + *

        Verification is an expensive operation that should normally only be + * used for troubleshooting and debugging.

        + * + * @param config The verification attributes. If null, default + * attributes are used. + * + * @param out is unused. To specify the output stream for verification + * information, use {@link VerifyConfig#setShowProgressStream}. + * + * @return true if the database environment is consistent and correct. + * Currently true is always returned when this method returns normally, + * i.e., when no exception is thrown. + * + * @throws EnvironmentFailureException if a corruption is detected, or if + * an unexpected, internal or environment-wide failure occurs. If a + * persistent corruption is detected, + * {@link EnvironmentFailureException#isCorrupted()} will return true. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public boolean verify(VerifyConfig config, + @SuppressWarnings("unused") PrintStream out) + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + if (config == null) { + config = VerifyConfig.DEFAULT; + } + + try { + envImpl.verify(config); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + + return true; + } + + /** + * Returns the transaction associated with this thread if implied + * transactions are being used. Implied transactions are used in an XA or + * JCA "Local Transaction" environment. In an XA environment the + * XAEnvironment.start() entrypoint causes a transaction to be created and + * become associated with the calling thread. Subsequent API calls + * implicitly use that transaction. XAEnvironment.end() causes the + * transaction to be disassociated with the thread. In a JCA Local + * Transaction environment, the call to JEConnectionFactory.getConnection() + * causes a new transaction to be created and associated with the calling + * thread. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public Transaction getThreadTransaction() + throws DatabaseException { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + return envImpl.getTxnManager().getTxnForThread(); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Sets the transaction associated with this thread if implied transactions + * are being used. Implied transactions are used in an XA or JCA "Local + * Transaction" environment. In an XA environment the + * XAEnvironment.start() entrypoint causes a transaction to be created and + * become associated with the calling thread. Subsequent API calls + * implicitly use that transaction. XAEnvironment.end() causes the + * transaction to be disassociated with the thread. In a JCA Local + * Transaction environment, the call to JEConnectionFactory.getConnection() + * causes a new transaction to be created and associated with the calling + * thread. + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void setThreadTransaction(Transaction txn) { + + final EnvironmentImpl envImpl = checkOpen(); + + try { + envImpl.getTxnManager().setTxnForThread(txn); + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * Returns whether this {@code Environment} is open, valid and can be used. + * + *

        When an {@link EnvironmentFailureException}, or one of its + * subclasses, is caught, the {@code isValid} method can be called to + * determine whether the {@code Environment} can continue to be used, or + * should be closed. Some EnvironmentFailureExceptions invalidate the + * environment and others do not.

        + * + *

        If this method returns false, the environment may have been closed by + * the application, or may have been invalidated by an exception and not + * yet closed. The {@link #isClosed()} method may be used to distinguish + * between these two cases, and {@link #getInvalidatingException()} can be + * used to return the exception. Note that it is safe to call {@link + * #close} redundantly, so it is safe to always call {@link #close} when + * this method returns false.

        + */ + public boolean isValid() { + final EnvironmentImpl envImpl = environmentImpl; + return envImpl != null && envImpl.isValid(); + } + + /** + * Returns whether the environment has been closed by the application. + * + *

        If this method returns true, {@link #close()}} has been called. If + * the environment was previously invalidated by an exception, it will be + * returned by {@link #getInvalidatingException()}.

        + * + *

        If this method returns false, the environment may or may not be + * usable, since it may have been invalidated by an exception but not yet + * closed. To determine whether it was invalidated, call {@link #isValid()} + * or {@link #getInvalidatingException()}.

        + * + * @return whether the environment has been closed by the application. + * + * @since 7.2 + */ + public boolean isClosed() { + final EnvironmentImpl envImpl = environmentImpl; + return envImpl == null || envImpl.isClosed(); + } + + /** + * Returns the exception that caused the environment to be invalidated, or + * null if the environment was not invalidated by an exception. + * + *

        This method may be used to determine whether the environment was + * invalidated by an exception, by checking for a non-null return value. + * This method will return the invalidating exception, regardless of + * whether the environment is closed. Note that {@link #isValid()} will + * return false when the environment is closed, even when it was not + * invalidated by an exception.

        + * + *

        This method may also be used to identify and handle the original + * invalidating exception, when more than one exception is thrown. When an + * environment is first invalidated by an EnvironmentFailureException, the + * exception is saved so that it can be returned by this method. Other + * EnvironmentFailureExceptions may be thrown later as side effects of the + * original problem, or possibly as separate problems. It is normally the + * first invalidating exception that is most relevant.

        + * + * @return the invalidating exception or null. + * + * @since 7.2 + */ + public EnvironmentFailureException getInvalidatingException() { + assert invalidatingEFE != null; + return invalidatingEFE.get(); + } + + /** + * Print a detailed report about the costs of different phases of + * environment startup. This report is by default logged to the je.info + * file if startup takes longer than je.env.startupThreshold. + * + *

        Unlike most Environment methods, this method may be called if the + * environment is invalid, but not yet closed.

        + * + * @throws IllegalStateException if this handle or the underlying + * environment has been closed. + */ + public void printStartupInfo(PrintStream out) { + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + envImpl.getStartupTracker().displayStats(out, Phase.TOTAL_ENV_OPEN); + } + + /* + * Non public api -- helpers + */ + + /** + * Let the Environment remember what's opened against it. + */ + private void addReferringHandle(Database db) { + referringDbs.put(db, db); + } + + /** + * Lets the Environment remember what's opened against it. + */ + private void addReferringHandle(Transaction txn) { + referringDbTxns.put(txn, txn); + } + + /** + * The referring db has been closed. + */ + void removeReferringHandle(Database db) { + referringDbs.remove(db); + } + + /** + * The referring Transaction has been closed. + */ + void removeReferringHandle(Transaction txn) { + referringDbTxns.remove(txn); + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid. + * @throws IllegalStateException if the environment is not open. + */ + EnvironmentImpl checkOpen() { + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + envImpl.checkOpen(); + return envImpl; + } + + /** + * Returns the non-null, underlying EnvironmentImpl. + * + * This method is called to access the environmentImpl field, to guard + * against NPE when the environment has been closed. + * + * This method does not check whether the env is valid. For API method + * calls, checkOpen is called at API entry points to check validity. The + * validity of the env should also be checked before critical operations + * (e.g., disk writes), after idle periods, and periodically during time + * consuming operations. + * + * @throws IllegalStateException if the env has been closed. + */ + EnvironmentImpl getNonNullEnvImpl() { + + final EnvironmentImpl envImpl = environmentImpl; + + if (envImpl == null) { + throw new IllegalStateException("Environment is closed."); + } + + return envImpl; + } + + /** + * Returns the underlying EnvironmentImpl, or null if the env has been + * closed. + * + * WARNING: This method will be phased out over time and normally + * getNonNullEnvImpl should be called instead. + */ + EnvironmentImpl getMaybeNullEnvImpl() { + return environmentImpl; + } + + /* Returns true, if this is a handle allocated internally by JE. */ + protected boolean isInternalHandle() { + return false; + } + + /** + * @throws UnsupportedOperationException via the database operation methods + * (remove, truncate, rename) and potentially other methods that require a + * writable environment. + */ + private void checkWritable(final EnvironmentImpl envImpl ) { + if (envImpl.isReadOnly()) { + throw new UnsupportedOperationException + ("Environment is Read-Only."); + } + } + + void invalidate(Error e) { + final EnvironmentImpl envImpl = environmentImpl; + if (envImpl == null) { + return; + } + envImpl.invalidate(e); + } +} diff --git a/src/com/sleepycat/je/EnvironmentConfig.java b/src/com/sleepycat/je/EnvironmentConfig.java new file mode 100644 index 0000000..9e26015 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentConfig.java @@ -0,0 +1,4740 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.File; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Handler; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.util.DbCacheSize; +import com.sleepycat.je.util.DbVerify; +import com.sleepycat.je.util.DbVerifyLog; + +/** + * Specifies the attributes of an environment. + * + *

        To change the default settings for a database environment, an application + * creates a configuration object, customizes settings and uses it for + * environment construction. The set methods of this class validate the + * configuration values when the method is invoked. An + * IllegalArgumentException is thrown if the value is not valid for that + * attribute.

        + * + *

        Most parameters are described by the parameter name String constants in + * this class. These parameters can be specified or individually by calling + * {@link #setConfigParam}, through a Properties object passed to {@link + * #EnvironmentConfig(Properties)}, or via properties in the je.properties + * files located in the environment home directory.

        + * + *

        For example, an application can change the default btree node size + * with:

        + * + *
        + *     envConfig.setConfigParam(EnvironmentConfig.LOCK_TIMEOUT, "250 ms");
        + * 
        + * + *

        Some commonly used environment attributes have convenience setter/getter + * methods defined in this class. For example, to change the default + * lock timeout setting for an environment, the application can instead do + * the following:

        + *
        + *     // customize an environment configuration
        + *     EnvironmentConfig envConfig = new EnvironmentConfig();
        + *     // will throw if timeout value is invalid
        + *     envConfig.setLockTimeout(250, TimeUnit.MILLISECONDS);
        + *     // Open the environment using this configuration.
        + *     Environment myEnvironment = new Environment(home, envConfig);
        + * 
        + * + *

        Parameter values are applied using this order of precedence:

        + *
          + *
        1. Configuration parameters specified in je.properties take first + * precedence.
        2. + *
        3. Configuration parameters set in the EnvironmentConfig object used at + * Environment construction are next.
        4. + *
        5. Any configuration parameters not set by the application are set to + * system defaults, described along with the parameter name String + * constants in this class.
        6. + *
        + * + *

        However, a small number of parameters do not have string constants in + * this class, and cannot be set using {@link #setConfigParam}, a Properties + * object, or the je.properties file. These parameters can only be changed + * via the following setter methods:

        + *
          + *
        • {@link #setAllowCreate}
        • + *
        • {@link #setCacheMode}
        • + *
        • {@link #setClassLoader}
        • + *
        • {@link #setCustomStats}
        • + *
        • {@link #setExceptionListener}
        • + *
        • {@link #setLoggingHandler}
        • + *
        • {@link #setNodeName}
        • + *
        • {@link #setRecoveryProgressListener}
        • + *
        + * + *

        An EnvironmentConfig can be used to specify both mutable and immutable + * environment properties. Immutable properties may be specified when the + * first Environment handle (instance) is opened for a given physical + * environment. When more handles are opened for the same environment, the + * following rules apply:

        + *
          + *
        1. Immutable properties must equal the original values specified when + * constructing an Environment handle for an already open environment. When a + * mismatch occurs, an exception is thrown.
        2. + *
        3. Mutable properties are ignored when constructing an Environment handle + * for an already open environment.
        4. + *
        + * + *

        After an Environment has been constructed, its mutable properties may be + * changed using {@link Environment#setMutableConfig}. See {@link + * EnvironmentMutableConfig} for a list of mutable properties; all other + * properties are immutable. Whether a property is mutable or immutable is + * also described along with the parameter name String constants in this + * class.

        + * + *

        Getting the Current Environment Properties

        + * + * To get the current "live" properties of an environment after constructing it + * or changing its properties, you must call {@link Environment#getConfig} or + * {@link Environment#getMutableConfig}. The original EnvironmentConfig or + * EnvironmentMutableConfig object used to set the properties is not kept up to + * date as properties are changed, and does not reflect property validation or + * properties that are computed. + * + *

        Time Duration Properties

        + * + *

        Several environment and transaction configuration properties are time + * durations. For these properties, a time unit is specified along with an + * integer duration value.

        + * + *

        When specific setter and getter methods exist for a time duration + * property, these methods have a {@link TimeUnit} argument. Examples are + * {@link #setLockTimeout(long,TimeUnit)} and {@link + * #getLockTimeout(TimeUnit)}. Note that the {@link TimeUnit} argument may + * be null only when the duration value is zero; there is no default unit that + * is used when null is specified.

        + * + *

        When a time duration is specified as a string value, the following format + * is used.

        + * + *
           {@code  [   ]}
        + * + *

        The {@code } is an integer. The {@code } name, if present, + * must be preceded by one or more spaces or tabs.

        + * + *

        The following {@code } names are allowed. Both {@link TimeUnit} + * names and IEEE standard abbreviations are allowed. Unit names are case + * insensitive.

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        IEEE abbreviationTimeUnit name + * Definition
        {@code ns}{@code NANOSECONDS}one billionth (10-9) of a second
        {@code us}{@code MICROSECONDS}one millionth (10-6) of a second
        {@code ms}{@code MILLISECONDS}one thousandth (10-3) of a second
        {@code s}{@code SECONDS}1 second
        {@code min} 60 seconds
        {@code h} 3600 seconds
        + * + *

        Examples are:

        + *
        + * 3 seconds
        + * 3 s
        + * 500 ms
        + * 1000000 (microseconds is implied)
        + * 
        + * + *

        The maximum duration value is currently Integer.MAX_VALUE milliseconds. + * This translates to almost 25 days (2147483647999999 ns, 2147483647999 us, + * 2147483647 ms, 2147483 s, 35791 min, 596 h).

        + * + *

        Note that when the {@code } is omitted, microseconds is implied. + * This default is supported for compatibility with JE 3.3 and earlier. In JE + * 3.3 and earlier, explicit time units were not used and durations were always + * implicitly specified in microseconds. The older methods that do not have a + * {@link TimeUnit} argument, such as {@link #setLockTimeout(long)} and {@link + * #getLockTimeout()}, use microsecond durations and have been deprecated.

        + */ +public class EnvironmentConfig extends EnvironmentMutableConfig { + private static final long serialVersionUID = 1L; + + /** + * @hidden + * For internal use, to allow null as a valid value for the config + * parameter. + */ + public static final EnvironmentConfig DEFAULT = new EnvironmentConfig(); + + /** + * Configures the JE main cache size in bytes. + * + *

        Either MAX_MEMORY or MAX_MEMORY_PERCENT may be used to configure the + * cache size. When MAX_MEMORY is zero (its default value), + * MAX_MEMORY_PERCENT determines the cache size. See + * {@link #MAX_MEMORY_PERCENT} for more information.

        + * + *

        When using MAX_MEMORY, take care to ensure that the overhead + * of the JVM does not leave less free space in the heap than intended. + * Some JVMs have more overhead than others, and some JVMs allocate their + * overhead within the specified heap size (the -Xmx value). To be sure + * that enough free space is available, use MAX_MEMORY_PERCENT rather than + * MAX_MEMORY.

        + * + *

        When using the Oracle NoSQL DB product

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes0-none--none-

        + * + * @see #setCacheSize + * @see #MAX_MEMORY_PERCENT + * + * @see Cache Statistics: + * Sizing + */ + public static final String MAX_MEMORY = "je.maxMemory"; + + /** + * Configures the JE main cache size as a percentage of the JVM maximum + * memory. + * + *

        The system will evict database objects when it comes within a + * prescribed margin of the limit.

        + * + *

        By default, JE sets the cache size to:

        + * + *
        + * (MAX_MEMORY_PERCENT * JVM maximum memory) / 100 + *
        + * + *

        where JVM maximum memory is specified by the JVM -Xmx flag. Note that + * the actual heap size may be somewhat less, depending on JVM overheads. + * The value used in the calculation above is the actual heap size as + * returned by {@link Runtime#maxMemory()}.

        + * + *

        The above calculation applies when {@link #MAX_MEMORY} is zero, which + * is its default value. Setting MAX_MEMORY to a non-zero value overrides + * the percentage based calculation and sets the cache size explicitly.

        + * + *

        The following details apply to setting the cache size to a percentage + * of the JVM heap size byte size (this parameter) as well as to a byte + * size ({@link #MAX_MEMORY}

        + * + *

        If {@link #SHARED_CACHE} is set to true, MAX_MEMORY and + * MAX_MEMORY_PERCENT specify the total size of the shared cache, and + * changing these parameters will change the size of the shared cache. New + * environments that join the cache may alter the cache size if their + * configuration uses a different cache size parameter.

        + * + *

        The size of the cache is often directly proportional to operation + * performance. See {@link Cache + * Statistics} for information on understanding and monitoring the + * cache. It is strongly recommended that the cache is large enough to + * hold all INs. See {@link DbCacheSize} for information on sizing the + * cache.

        + * + *

        To take full advantage of JE cache memory, it is strongly recommended + * that + * compressed oops + * (-XX:+UseCompressedOops) is specified when a 64-bit JVM is + * used and the maximum heap size is less than 32 GB. As described in the + * referenced documentation, compressed oops is sometimes the default JVM + * mode even when it is not explicitly specified in the Java command. + * However, if compressed oops is desired then it must be + * explicitly specified in the Java command when running DbCacheSize or a + * JE application. If it is not explicitly specified then JE will not + * aware of it, even if it is the JVM default setting, and will not take it + * into account when calculating cache memory sizes.

        + * + *

        Note that log write buffers may be flushed to disk if the cache size + * is changed after the environment has been opened.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes60190

        + * + * @see #setCachePercent + * @see #MAX_MEMORY + * + * @see Cache Statistics: + * Sizing + */ + public static final String MAX_MEMORY_PERCENT = "je.maxMemoryPercent"; + + /** + * Configures the number of bytes to be used as a secondary, off-heap cache. + * + * The off-heap cache is used to hold record data and Btree nodes when + * these are evicted from the "main cache" because it overflows. Eviction + * occurs according to an LRU algorithm and takes into account the user- + * specified {@link CacheMode}. When the off-heap cache overflows, eviction + * occurs there also according to the same algorithm. + *

        + * The main cache is in the Java heap and consists primarily of the Java + * objects making up the in-memory Btree data structure. Btree objects are + * not serialized the main cache, so no object materialization is needed to + * access the Btree there. Access to records in the main cache is therefore + * very fast, but the main cache has drawbacks as well: 1) The larger the + * main cache, the more likely it is to have Java GC performance problems. + * 2) When the Java heap exceeds 32GB, the "compressed OOPs" setting no + * longer applies and less data will fit in the same amount of memory. For + * these reasons, JE applications often configure a heap of 32GB or less, + * and a main cache that is significantly less than 32GB, leaving any + * additional machine memory for use by the file system cache. + *

        + * The use of the file system cache has performance benefits, but + * also has its own drawbacks: 1) There is a significant redundancy + * between the main cache and the file system cache because all data and + * Btree information that is logged (written) by JE appears in the file + * system and may also appear in the main cache. 2) It is not possible + * for dirty Btree information to be placed in the file system + * cache without logging it, this logging may be otherwise unnecessary, and + * the logging creates additional work for the JE cleaner; in other words, + * the size of the main cache alone determines the maximum size of the + * in-memory "dirty set". + *

        + * The off-heap cache is stored outside the Java heap using a native + * platform memory allocator. The current implementation relies on + * internals that are specific to the Oracle and IBM JDKs; however, a + * memory allocator interface that can be implemented for other situations + * is being considered for a future release. Records and Btree objects are + * serialized when they are placed in the off-heap cache, and they must be + * materialized when they are moved back to the main cache in order to + * access them. This serialization and materialization adds some CPU + * overhead and thread contention, as compared to accessing data directly + * in the main cache. The off-heap cache can contain dirty Btree + * information, so it can be used to increase the maximum size of the + * in-memory "dirty set". + *

        + * NOTE: If an off-heap cache is configured but cannot be used because + * that native allocator is not available in the JDK that is used, an + * {@code IllegalStateException} will be thrown by the {@link Environment} + * or {@link com.sleepycat.je.rep.ReplicatedEnvironment} constructor. In + * the current release, this means that the {@code sun.misc.Unsafe} class + * must contain the {@code allocateMemory} method and related methods, as + * defined in the Oracle JDK. + *

        + * When configuring an off-heap cache you can think of the performance + * trade-offs in two ways. First, if the off-heap cache is considered to be + * a replacement for the file system cache, the serialization and + * materialization overhead is not increased. In this case, the use of + * the off-heap cache is clearly beneficial, and using the off-heap cache + * "instead of" the file system cache is normally recommended. Second, the + * off-heap cache can be used along with a main cache that is reduced in + * size in order to compensate for Java GC problems. In this case, the + * trade-off is between the additional serialization, materialization and + * contention overheads of the off-heap cache, as compared to the Java GC + * overhead. + *

        + * When dividing up available memory for the JVM heap, the off-heap cache, + * and for other uses, please be aware that the file system cache and the + * off-heap cache are different in one important respect. The file system + * cache automatically shrinks when memory is needed by the OS or other + * processes, while the off-heap cache does not. Therefore, it is best to + * be conservative about leaving memory free for other uses, and it is not + * a good idea to size the off-heap cache such that all machine memory will + * be allocated. If off-heap allocations or other allocations fail because + * there is no available memory, the process is likely to die without any + * exception being thrown. In one test on Linux, for example, the process + * was killed abruptly by the OS and the only indication of the problem was + * the following shown by {@code dmesg}. + *

        +     * Out of memory: Kill process 28768 (java) score 974 or sacrifice child
        +     * Killed process 28768 (java)
        +     *    total-vm:278255336kB, anon-rss:257274420kB, file-rss:0kB
        +     * 
        + *

        + * WARNING: Although this configuration property is mutable, it cannot be + * changed from zero to non-zero, or non-zero to zero. In other words, the + * size of the off-heap cache can be changed after initially configuring a + * non-zero size, but the off-heap cache cannot be turned on and off + * dynamically. An attempt to do so will cause an {@code + * IllegalArgumentException} to be thrown by the {@link Environment} or + * {@link com.sleepycat.je.rep.ReplicatedEnvironment} constructor. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes00-none-

        + * + * @see #setOffHeapCacheSize(long) + * + * @see Cache Statistics: + * Sizing + */ + public static final String MAX_OFF_HEAP_MEMORY = "je.maxOffHeapMemory"; + + /** + * If true, the shared cache is used by this environment. + * + *

        By default this parameter is false and this environment uses a + * private cache. If this parameter is set to true, this environment will + * use a cache that is shared with all other open environments in this + * process that also set this parameter to true. There is a single shared + * cache per process.

        + * + *

        By using the shared cache, multiple open environments will make + * better use of memory because the cache LRU algorithm is applied across + * all information in all environments sharing the cache. For example, if + * one environment is open but not recently used, then it will only use a + * small portion of the cache, leaving the rest of the cache for + * environments that have been recently used.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see #setSharedCache + * + * @see Cache Statistics: + * Sizing + */ + public static final String SHARED_CACHE = "je.sharedCache"; + + /** + * An upper limit on the number of bytes used for data storage. Works + * with {@link #FREE_DISK} to define the storage limit. If the limit is + * exceeded, write operations will be prohibited. + *

        + * If set to zero (the default), no usage limit is enforced, meaning that + * all space on the storage volume, minus {@link #FREE_DISK}, may be used. + * If MAX_DISK is non-zero, FREE_DISK is subtracted from MAX_DISK to + * determine the usage threshold for prohibiting write operations. If + * multiple JE environments share the same storage volume, setting MAX_DISK + * to a non-zero value is strongly recommended. + * + *

        Note: An exception to the rule above is + * when MAX_DISK is less than or equal to 10GB and FREE_DISK is not + * explicitly specified. See {@link #FREE_DISK} more information.

        + * + * Both the FREE_DISK and MAX_DISK thresholds (if configured) are checked + * during a write operation. If either threshold is crossed, the behavior + * of the JE environment is as follows: + *
          + *
        • + * Application write operations will throw {@link DiskLimitException}. + * DiskLimitException extends {@link OperationFailureException} and + * will invalidate the transaction, but will not invalidate the + * environment. Read operations may continue even when write operations + * are prohibited. + *
        • + *
        • + * When using NoSQL DB, the above item applies to client CRUD + * operations as well as operations performed on internal metadata. + * When a disk limit is violated, NoSQL DB will throw exceptions for + * client write operations and for operations that update internal + * metadata. Related exceptions may be logged for other internal write + * operations. Such exceptions will be derived from the JE + * DiskLimitException. + *
        • + *
        • + * {@link Environment#checkpoint}, {@link Environment#sync} and + * {@link Database#sync} will throw DiskLimitException. + *
        • + *
        • + * {@link Environment#close} may throw DiskLimitException when a final + * checkpoint is performed. However, the environment will be properly + * closed in other respects. + *
        • + *
        • + * The JE evictor will not log dirty nodes when the cache overflows + * and therefore dirty nodes cannot be evicted from cache. So + * although read operations are allowed, cache thrashing may occur if + * all INs do not fit in cache as {@link DbCacheSize recommended}. + *
        • + *
        • + * In an HA environment a disk limit may be violated on a replica node + * but not the master node. In this case, a DiskLimitException will not + * be thrown by a write operation on the master node. Instead, + * {@link com.sleepycat.je.rep.InsufficientAcksException} or + * {@link com.sleepycat.je.rep.InsufficientReplicasException} will be + * thrown if the {@link Durability#getReplicaAck() ack requirements} + * are not met. + *
        • + *
        + *

        + * JE uses a log structured storage system where data files often become + * gradually obsolete over time (see {@link #CLEANER_MIN_UTILIZATION}). The + * JE cleaner is responsible for reclaiming obsolete space by cleaning and + * deleting data files. In a standalone (non-HA) environment, data files + * are normally deleted quickly after being cleaned, but may be reserved + * and protected temporarily by a {@link com.sleepycat.je.util.DbBackup} or + * {@link DiskOrderedCursor}. These reserved files will be deleted as soon + * as they are no longer protected. + *

        + * In an HA environment, JE will retain as many reserved files as possible + * to support replication to nodes that are out of contact. All cleaned + * files are reserved (not deleted) until approaching a disk limit, at + * which time they are deleted, as long as they are not protected. + * Reserved files are protected when they are needed for + * replication to active nodes or for feeding an active network restore. + *

        + * For more information on reserved and protected data files, see + * {@link EnvironmentStats#getActiveLogSize()}, + * {@link EnvironmentStats#getReservedLogSize()}, + * {@link EnvironmentStats#getProtectedLogSize()}, + * {@link EnvironmentStats#getProtectedLogSizeMap()}, + * {@link EnvironmentStats#getAvailableLogSize()} and + * {@link EnvironmentStats#getTotalLogSize}. + *

        + * When multiple JE environments share the same storage volume, the + * FREE_DISK amount will be maintained for each environment. The following + * scenario illustrates use of a single shared volume with capacity 300GB: + *

          + *
        • + * JE-1 and JE-2 each have MAX_DISK=100GB and FREE_DISK=5GB, + *
        • + *
        • + * 100GB is used for fixed miscellaneous storage. + *
        • + *
        + *

        + * Each JE environment will use no more than 95GB each, so at least 10GB + * will remain free overall. In other words, if both JE environments reach + * their threshold and write operations are prohibited, each JE environment + * will have 5GB of free space for recovery (10GB total). + *

        + * On the other hand, when an external service is also consuming disk + * space and its usage of disk space is variable over time, the situation + * is more complex and JE cannot always guarantee that FREE_DISK is + * honored. The following scenario includes multiple JE environments as + * well an external service, all sharing a 300GB volume. + *

          + *
        • + * JE-1 and JE-2 each have MAX_DISK=100GB and FREE_DISK=5GB, + *
        • + *
        • + * an external service is expected to use up to 50GB, and + *
        • + *
        • + * 50GB is used for fixed miscellaneous storage. + *
        • + *
        + *

        + * Assuming that the external service stays within its 50GB limit then, as + * the previous example, each JE environment will normally use no more than + * 95GB each, and at least 10GB will remain free overall. However, if the + * external service exceeds its threshold, JE will make a best effort to + * prohibit write operations in order to honor the FREE_DISK limit, but + * this is not always possible, as illustrated by the following sequence + * of events: + *

          + *
        • + * If the external service uses all its allocated space, 50GB, and JE + * environments are each using 75GB, then there will be 50GB free + * overall (25GB for each JE environment). Write operations are allowed + * in both JE environments. + *
        • + *
        • + * If the external service then exceeds its limit by 25GB and uses + * 75GB, there will only 25GB free overall. But each JE environment is + * still under its 90GB limit and there is still more than 5GB free + * overall, so write operations are still allowed. + *
        • + *
        • + * If each JE environment uses an additional 10GB of space, there will + * only be 5GB free overall. Each JE environment is using only 85GB, + * which is under its 95GB limit. But the 5GB FREE_DISK limit for the + * volume overall has been reached and therefore JE write operations + * will be prohibited. + *
        • + *
        + * Leaving only 5GB of free space in the prior scenario is not ideal, but + * it is at least enough for one JE environment at a time to be recovered. + * The reality is that when an external entity exceeds its expected disk + * usage, JE cannot always compensate. For example, if the external service + * continues to use more space in the scenario above, the volume will + * eventually be filled completely. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes00-none-

        + * + * @see #FREE_DISK + * @see #setMaxDisk(long) + * @see #getMaxDisk() + * @since 7.5 + */ + public static final String MAX_DISK = "je.maxDisk"; + + /** + * A lower limit on the number of bytes of free space to maintain on a + * volume and per JE Environment. Works with {@link #MAX_DISK} to define + * the storage limit. If the limit is exceeded, write operations will be + * prohibited. + *

        + * The default FREE_DISK value is 5GB. This value is designed to be large + * enough to allow manual recovery after exceeding a disk threshold. + *

        + * If FREE_DISK is set to zero, no free space limit is enforced. This is + * not recommended, since manual recovery may be very difficult or + * impossible when the volume is completely full. + *

        + * If non-zero, this parameter is used in two ways. + *

          + *
        • + * FREE_DISK determines the minimum of free space left on the storage + * volume. If less than this amount is free, write operations are + * prohibited. + *
        • + *
        • + * If MAX_DISK is configured, FREE_DISK is subtracted from MAX_DISK to + * determine the usage threshold for prohibiting write operations. See + * {@link #MAX_DISK} for more information. + * + *

          Note that this subtraction could make + * testing inconvenient when a small value is specified for MAX_DISK + * and FREE_DISK is not also specified. For example, if MAX_DISK is + * 1GB and FREE_DISK is 5G (its default value), then no writing + * would be allowed (MAX_DISK minus FREE_DISK is negative 4G). To + * address this, the subtraction is performed only if one of two + * conditions is met: + *

            + *
          1. FREE_DISK is explicitly specified, or
          2. + *
          3. MAX_DISK is greater than 10GB.
          4. + *

          + * + *
        • + *
        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes5,368,709,120 (5GB)-none--none-

        + * + * @see #MAX_DISK + * @since 7.5 + */ + public static final String FREE_DISK = "je.freeDisk"; + + /** + * If true, a checkpoint is forced following recovery, even if the + * log ends with a checkpoint. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String ENV_RECOVERY_FORCE_CHECKPOINT = + "je.env.recoveryForceCheckpoint"; + + /** + * Used after performing a restore from backup to force creation of a new + * log file prior to recovery. + *

        + * As of JE 6.3, the use of this parameter is unnecessary except in special + * cases. See the "Restoring from a backup" section in the DbBackup javadoc + * for more information. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see Restoring from a backup + */ + public static final String ENV_RECOVERY_FORCE_NEW_FILE = + "je.env.recoveryForceNewFile"; + + /** + * By default, if a checksum exception is found at the end of the log + * during Environment startup, JE will assume the checksum is due to + * previously interrupted I/O and will quietly truncate the log and + * restart. If this property is set to true, when a ChecksumException + * occurs in the last log file during recovery, instead of truncating the + * log file, and automatically restarting, attempt to continue reading past + * the corrupted record with the checksum error to see if there are commit + * records following the corruption. If there are, throw an + * EnvironmentFailureException to indicate the presence of committed + * transactions. The user may then need to run DbTruncateLog to truncate + * the log for further recovery after doing manual analysis of the log. + * Setting this property is suitable when the application wants to guard + * against unusual cases. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION = + "je.haltOnCommitAfterChecksumException"; + + /** + * If true, starts up the INCompressor thread. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String ENV_RUN_IN_COMPRESSOR = + "je.env.runINCompressor"; + + /** + * If true, starts up the checkpointer thread. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String ENV_RUN_CHECKPOINTER = "je.env.runCheckpointer"; + + /** + * If true, starts up the cleaner thread. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String ENV_RUN_CLEANER = "je.env.runCleaner"; + + /** + * If true, eviction is done by a pool of evictor threads, as well as being + * done inline by application threads. If false, the evictor pool is not + * used, regardless of the values of {@link #EVICTOR_CORE_THREADS} and + * {@link #EVICTOR_MAX_THREADS}. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String ENV_RUN_EVICTOR = "je.env.runEvictor"; + + /** + * If true, off-heap eviction is done by a pool of evictor threads, as well + * as being done inline by application threads. If false, the evictor pool + * is not used, regardless of the values of {@link #OFFHEAP_CORE_THREADS} + * and {@link #OFFHEAP_MAX_THREADS}. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String ENV_RUN_OFFHEAP_EVICTOR = + "je.env.runOffHeapEvictor"; + + /** + * The maximum number of read operations performed by JE background + * activities (e.g., cleaning) before sleeping to ensure that application + * threads can perform I/O. If zero (the default) then no limitation on + * I/O is enforced. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes00-none-

        + * + * @see #ENV_BACKGROUND_SLEEP_INTERVAL + */ + public static final String ENV_BACKGROUND_READ_LIMIT = + "je.env.backgroundReadLimit"; + + /** + * The maximum number of write operations performed by JE background + * activities (e.g., checkpointing and eviction) before sleeping to ensure + * that application threads can perform I/O. If zero (the default) then no + * limitation on I/O is enforced. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes00-none-

        + * + * @see #ENV_BACKGROUND_SLEEP_INTERVAL + */ + public static final String ENV_BACKGROUND_WRITE_LIMIT = + "je.env.backgroundWriteLimit"; + + /** + * The duration that JE background activities will sleep when the {@link + * #ENV_BACKGROUND_WRITE_LIMIT} or {@link #ENV_BACKGROUND_READ_LIMIT} is + * reached. If {@link #ENV_BACKGROUND_WRITE_LIMIT} and {@link + * #ENV_BACKGROUND_READ_LIMIT} are zero, this setting is not used. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes1 ms1 ms24 d

        + * + * @see Time Duration + * Properties + */ + public static final String ENV_BACKGROUND_SLEEP_INTERVAL = + "je.env.backgroundSleepInterval"; + + /** + * Debugging support: check leaked locks and txns at env close. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + */ + public static final String ENV_CHECK_LEAKS = "je.env.checkLeaks"; + + /** + * Debugging support: call Thread.yield() at strategic points. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String ENV_FORCED_YIELD = "je.env.forcedYield"; + + /** + * Configures the use of transactions. + * + *

        This should be set to true when transactional guarantees such as + * atomicity of multiple operations and durability are important.

        + * + *

        If true, create an environment that is capable of performing + * transactions. If true is not passed, transactions may not be used. For + * licensing purposes, the use of this method distinguishes the use of the + * Transactional product. Note that if transactions are not used, + * specifying true does not create additional overhead in the + * environment.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see #setTransactional + */ + public static final String ENV_IS_TRANSACTIONAL = "je.env.isTransactional"; + + /** + * Configures the database environment for no locking. + * + *

        If true, create the environment with record locking. This property + * should be set to false only in special circumstances when it is safe to + * run without record locking.

        + * + *

        This configuration option should be used when locking guarantees such + * as consistency and isolation are not important. If locking mode is + * disabled (it is enabled by default), the cleaner is automatically + * disabled. The user is responsible for invoking the cleaner and ensuring + * that there are no concurrent operations while the cleaner is + * running.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + * + * @see #setLocking + */ + public static final String ENV_IS_LOCKING = "je.env.isLocking"; + + /** + * Configures the database environment to be read-only, and any attempt to + * modify a database will fail. + * + *

        A read-only environment has several limitations and is recommended + * only in special circumstances. Note that there is no performance + * advantage to opening an environment read-only.

        + * + *

        The primary reason for opening an environment read-only is to open a + * single environment in multiple JVM processes. Only one JVM process at a + * time may open the environment read-write. See {@link + * EnvironmentLockedException}.

        + * + *

        When the environment is open read-only, the following limitations + * apply.

        + *
          + *
        • In the read-only environment no writes may be performed, as + * expected, and databases must be opened read-only using {@link + * DatabaseConfig#setReadOnly}.
        • + *
        • The read-only environment receives a snapshot of the data that is + * effectively frozen at the time the environment is opened. If the + * application has the environment open read-write in another JVM process + * and modifies the environment's databases in any way, the read-only + * version of the data will not be updated until the read-only JVM process + * closes and reopens the environment (and by extension all databases in + * that environment).
        • + *
        • If the read-only environment is opened while the environment is in + * use by another JVM process in read-write mode, opening the environment + * read-only (recovery) is likely to take longer than it does after a clean + * shutdown. This is due to the fact that the read-write JVM process is + * writing and checkpoints are occurring that are not coordinated with the + * read-only JVM process. The effect is similar to opening an environment + * after a crash.
        • + *
        • In a read-only environment, the JE cache will contain information + * that cannot be evicted because it was reconstructed by recovery and + * cannot be flushed to disk. This means that the read-only environment + * may not be suitable for operations that use large amounts of memory, and + * poor performance may result if this is attempted.
        • + *
        • In a read-write environment, the log cleaner will be prohibited from + * deleting log files for as long as the environment is open read-only in + * another JVM process. This may cause disk usage to rise, and for this + * reason it is not recommended that an environment is kept open read-only + * in this manner for long periods.
        • + *
        + * + *

        For these reasons, it is recommended that a read-only environment be + * used only for short periods and for operations that are not performance + * critical or memory intensive. With few exceptions, all application + * functions that require access to a JE environment should be built into a + * single application so that they can be performed in the JVM process + * where the environment is open read-write.

        + * + *

        In most applications, opening an environment read-only can and should + * be avoided.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see #setReadOnly + */ + public static final String ENV_READ_ONLY = "je.env.isReadOnly"; + + /** + * If true, use latches instead of synchronized blocks to implement the + * lock table and log write mutexes. Latches require that threads queue to + * obtain the mutex in question and therefore guarantee that there will be + * no mutex starvation, but do incur a performance penalty. Latches should + * not be necessary in most cases, so synchronized blocks are the default. + * An application that puts heavy load on JE with threads with different + * thread priorities might find it useful to use latches. In a Java 5 JVM, + * where java.util.concurrent.locks.ReentrantLock is used for the latch + * implementation, this parameter will determine whether they are 'fair' or + * not. This parameter is 'static' across all environments. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String ENV_FAIR_LATCHES = "je.env.fairLatches"; + + /** + * The timeout for detecting internal latch timeouts, so that deadlocks can + * be detected. Latches are held internally for very short durations. If + * due to unforeseen problems a deadlock occurs, a timeout will occur after + * the duration specified by this parameter. When a latch timeout occurs: + *
          + *
        • The Environment is invalidated and must be closed.
        • + *
        • An {@link EnvironmentFailureException} is thrown.
        • + *
        • A full thread dump is logged at level SEVERE.
        • + *
        + * If this happens, thread dump in je.info file should be preserved so it + * can be used to analyze the problem. + *

        + * Most applications should not change this parameter. The default value, 5 + * minutes, should be much longer than a latch is ever held. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No5 min1 ms-none-

        + * + * @see Time Duration + * Properties + * + * @since 6.2 + */ + public static final String ENV_LATCH_TIMEOUT = "je.env.latchTimeout"; + + /** + * The interval added to the system clock time for determining that a + * record may have expired. Used when an internal integrity error may be + * present, but may also be due to a record that expired and the system + * clock was moved back. + *

        + * For example, say a record expires and then the clock is moved back by + * one hour to correct a daylight savings time error. Because the LN and + * BIN slot for an expired record are purged separately (see + * Time-To_live), in this case the LN was + * purged but the BIN slot was not purged. When accessing the record's key + * via the BIN slot, it will appear that it is not expired. But then when + * accessing the the data, the LN will not be accessible. Normally this + * would be considered a fatal integrity error, but since the record will + * expire within the 2 hour limit, it is simply treated as an expired + * record. + *

        + * Most applications should not change this parameter. The default value, + * two hours, is enough to account for minor clock adjustments or + * accidentally setting the clock one hour off. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No2 h1 ms-none-

        + * + * @see Time Duration + * Properties + * + * @since 7.0 + */ + public static final String ENV_TTL_CLOCK_TOLERANCE = + "je.env.ttlClockTolerance"; + + /** + * If true (the default), expired data is filtered from queries and purged + * by the cleaner. This might be set to false to recover data after an + * extended down time. + *

        + * WARNING: Disabling expiration is intended for special-purpose access + * for data recovery only. When this parameter is set to false, records + * that have expired may or may not have been purged, so they may or may + * not be accessible. In addition, it is possible for the key and data of + * a record to expire independently, so the key may be accessible (if the + * data is not requested by the read operation), while the record will + * appear to be deleted when the data is requested. The same thing is + * true of primary and secondary records, which are also purged + * independently. A record may be accessible by primary key but not + * secondary key, and vice-versa. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}Booleanyestrue

        + */ + public static final String ENV_EXPIRATION_ENABLED = + "je.env.expirationEnabled"; + + /** + * If true, enable eviction of metadata for closed databases. There is + * no known benefit to setting this parameter to false. + * + *

        This param is unlikely to be needed for tuning, but is sometimes + * useful for debugging and testing.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + * + * @see Cache Statistics: + * Debugging + */ + public static final String ENV_DB_EVICTION = "je.env.dbEviction"; + + /** + * If true (the default) preload all duplicates databases at once when + * upgrading from JE 4.1 and earlier. If false, preload each duplicates + * database individually instead. Preloading all databases at once gives a + * performance advantage if the JE cache is roughly large enough to contain + * the internal nodes for all duplicates databases. Preloading each + * database individually gives a performance advantage if the JE cache is + * roughly large enough to contain the internal nodes for a single + * duplicates database. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + */ + public static final String ENV_DUP_CONVERT_PRELOAD_ALL = + "je.env.dupConvertPreloadAll"; + + /** + * By default, JE passes an entire log record to the Adler32 class for + * checksumming. This can cause problems with the GC in some cases if the + * records are large and there is concurrency. Setting this parameter will + * cause JE to pass chunks of the log record to the checksumming class so + * that the GC does not block. 0 means do not chunk. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes001048576 (1M)

        + */ + public static final String ADLER32_CHUNK_SIZE = "je.adler32.chunkSize"; + + /** + * The total memory taken by log buffers, in bytes. If 0, use 7% of + * je.maxMemory. If 0 and je.sharedCache=true, use 7% divided by N where N + * is the number of environments sharing the global cache. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNo0{@value + * com.sleepycat.je.config.EnvironmentParams#LOG_MEM_SIZE_MIN}-none-

        + */ + public static final String LOG_TOTAL_BUFFER_BYTES = + "je.log.totalBufferBytes"; + + /** + * The number of JE log buffers. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo{@value + * com.sleepycat.je.config.EnvironmentParams#NUM_LOG_BUFFERS_DEFAULT}2-none-

        + */ + public static final String LOG_NUM_BUFFERS = "je.log.numBuffers"; + + /** + * The maximum starting size of a JE log buffer. JE silently restricts + * this value to be no more than the configured maximum log file size + * (je.log.fileMax). + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo1048576 (1M)1024 (1K)-none-

        + */ + public static final String LOG_BUFFER_SIZE = "je.log.bufferSize"; + + /** + * The buffer size for faulting in objects from disk, in bytes. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo2048 (2K)32-none-

        + */ + public static final String LOG_FAULT_READ_SIZE = "je.log.faultReadSize"; + + /** + * The read buffer size for log iterators, which are used when scanning the + * log during activities like log cleaning and environment open, in bytes. + * This may grow as the system encounters larger log entries. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo8192 (8K)128-none-

        + */ + public static final String LOG_ITERATOR_READ_SIZE = + "je.log.iteratorReadSize"; + + /** + * The maximum read buffer size for log iterators, which are used when + * scanning the log during activities like log cleaning and environment + * open, in bytes. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo16777216 (16M)128-none-

        + */ + public static final String LOG_ITERATOR_MAX_SIZE = + "je.log.iteratorMaxSize"; + + /** + * The maximum size of each individual JE log file, in bytes. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNo10000000 (10M)1000000 (1M)1073741824 (1G)

        + */ + public static final String LOG_FILE_MAX = "je.log.fileMax"; + + /** + * The JE environment can be spread across multiple subdirectories. + * Environment subdirectories may be used to spread an environment's .jdb + * files over multiple directories, and therefore over multiple disks or + * file systems. Environment subdirectories reside in the environment home + * directory and are named data001/ through dataNNN/, consecutively, where + * NNN is the value of je.log.nDataDirectories. A typical configuration + * would be to have each of the dataNNN/ names be symbolic links to actual + * directories which each reside on separate file systems or disks. + *

        + * If 0, all log files (*.jdb) will reside in the environment + * home directory passed to the Environment constructor. A non-zero value + * indicates the number of environment subdirectories to use for holding the + * environment log files. + *

        + * If data subdirectories are used (i.e. je.log.nDataDirectories > 0), this + * parameter must be set when the environment is initially created. + * Like the environment home directory, each and every one of the dataNNN/ + * subdirectories must also be present and writable. This parameter must + * be set to the same value for all subsequent openings of the environment + * or an exception will be thrown. + *

        + * If the set of existing dataNNN/ subdirectories is not equivalent to the + * set { 1 ... je.log.nDataDirectories } when the environment is opened, an + * EnvironmentFailureException will be thrown, and the Environment will + * fail to be opened. + *

        + * This parameter should be set using the je.properties file rather than + * the EnvironmentConfig. If not, JE command line utilities that open the + * Environment will throw an exception because they will not know of the + * non-zero value of this parameter. + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximumJVM
        {@value}IntegerNo00256

        + * + * @deprecated as of 7.3. This feature is not known to provide benefits + * beyond that of a simple RAID configuration, and will be removed in the + * next release, which is slated for mid-April, 2017. + */ + public static final String LOG_N_DATA_DIRECTORIES = + "je.log.nDataDirectories"; + + /** + * If true, perform a checksum check when reading entries from log. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + */ + public static final String LOG_CHECKSUM_READ = "je.log.checksumRead"; + + /** + * If true, perform a checksum verification just before and after writing + * to the log. This is primarily used for debugging. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String LOG_VERIFY_CHECKSUMS = "je.log.verifyChecksums"; + + /** + * If true, operates in an in-memory test mode without flushing the log to + * disk. An environment directory must be specified, but it need not exist + * and no files are written. The system operates until it runs out of + * memory, at which time an OutOfMemoryError is thrown. Because the entire + * log is kept in memory, this mode is normally useful only for testing. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String LOG_MEM_ONLY = "je.log.memOnly"; + + /** + * The size of the file handle cache. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo1003-none-

        + */ + public static final String LOG_FILE_CACHE_SIZE = "je.log.fileCacheSize"; + + /** + * If true, periodically detect unexpected file deletions. Normally all + * file deletions should be performed as a result of JE log cleaning. + * If an external file deletion is detected, JE assumes this was + * accidental. This will cause the environment to be invalidated and + * all methods will throw {@link EnvironmentFailureException}. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + * + * @since 7.2 + */ + public static final String LOG_DETECT_FILE_DELETE = + "je.log.detectFileDelete"; + + /** + * The interval used to check for unexpected file deletions. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No1000 ms1 msnone

        + * + * @see Time Duration + * Properties + */ + public static final String LOG_DETECT_FILE_DELETE_INTERVAL = + "je.log.detectFileDeleteInterval"; + + /** + * The timeout limit for group file sync, in microseconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No500 ms10 ms24 d

        + * + * @see Time Duration + * Properties + */ + public static final String LOG_FSYNC_TIMEOUT = "je.log.fsyncTimeout"; + + /** + * If the time taken by an fsync exceeds this limit, a WARNING level + * message is logged. If this parameter set to zero, a message will not be + * logged. By default, this parameter is 5 seconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No5 szero30 s

        + * + * @since 7.0 + * @see EnvironmentStats#getFSyncMaxTime() + * @see Time Duration + * Properties + */ + public static final String LOG_FSYNC_TIME_LIMIT = "je.log.fsyncTimeLimit"; + + /** + * The time interval in nanoseconds during which transactions may be + * grouped to amortize the cost of write and/or fsync when a transaction + * commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC on the local + * machine. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No00none

        + * + * @see Time Duration + * Properties + * @since 5.0.76 + * @see #LOG_GROUP_COMMIT_THRESHOLD + */ + public static final String LOG_GROUP_COMMIT_INTERVAL = + "je.log.groupCommitInterval"; + + /** + * The threshold value impacts the number of transactions that may be + * grouped to amortize the cost of write and/or fsync when a + * transaction commits with SyncPolicy#SYNC or SyncPolicy#WRITE_NO_SYNC + * on the local machine. + *

        + * Specifying larger values can result in more transactions being grouped + * together decreasing average commit times. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo00-none-
        + *

        + * + * @since 5.0.76 + * @see #LOG_GROUP_COMMIT_INTERVAL + */ + public static final String LOG_GROUP_COMMIT_THRESHOLD = + "je.log.groupCommitThreshold"; + + /** + * The maximum time interval between committing a transaction with + * {@link Durability.SyncPolicy#COMMIT_NO_SYNC NO_SYNC} or {@link + * Durability.SyncPolicy#COMMIT_WRITE_NO_SYNC WRITE_NO_SYNC} durability, + * and making the transaction durable with respect to the storage device. + * To provide this guarantee, a JE background thread is used to flush any + * data buffered by JE to the file system, and also perform an fsync to + * force any data buffered by the file system to the storage device. If + * this parameter is set to zero, this JE background task is disabled and + * no such guarantee is provided. + *

        + * Separately, the {@link #LOG_FLUSH_NO_SYNC_INTERVAL} flushing provides a + * guarantee that data is periodically flushed to the file system. To guard + * against data loss due to an OS crash (and to improve performance) we + * recommend that the file system is configured to periodically flush dirty + * pages to the storage device. This parameter, {@code + * LOG_FLUSH_SYNC_INTERVAL}, provides a fallback for flushing to the + * storage device, in case the file system is not adequately configured. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * Yes20 s0-none-
        + *

        + * + * @see Time Duration + * Properties + * + * @since 7.2 + */ + public static final String LOG_FLUSH_SYNC_INTERVAL = + "je.log.flushSyncInterval"; + + /** + * The maximum time interval between committing a transaction with + * {@link Durability.SyncPolicy#COMMIT_NO_SYNC NO_SYNC} durability, and + * making the transaction durable with respect to the file system. To + * provide this guarantee, a JE background thread is used to flush any data + * buffered by JE to the file system. If this parameter is set to zero, + * this JE background task is disabled and no such guarantee is provided. + *

        + * Frequent periodic flushing to the file system provides improved + * durability for NO_SYNC transactions. Without this flushing, if + * application write operations stop, then some number of NO_SYNC + * transactions would be left in JE memory buffers and would be lost in the + * event of a crash. For HA applications, this flushing reduces the + * possibility of {@link com.sleepycat.je.rep.RollbackProhibitedException}. + * Note that periodic flushing reduces the time window where a crash can + * cause transaction loss and {@code RollbackProhibitedException}, but the + * window cannot be closed completely when using NO_SYNC durability. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * Yes5 s0-none-
        + *

        + * + * @see Time Duration + * Properties + * + * @since 7.2 + */ + public static final String LOG_FLUSH_NO_SYNC_INTERVAL = + "je.log.flushNoSyncInterval"; + + /** + * If true (default is false) O_DSYNC is used to open JE log files. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + */ + public static final String LOG_USE_ODSYNC = "je.log.useODSYNC"; + + /** + * @deprecated NIO is no longer used by JE and this parameter has no + * effect. + */ + public static final String LOG_USE_NIO = "je.log.useNIO"; + + /** + * If true (default is true) the Write Queue is used for file I/O + * operations which are blocked by concurrent I/O operations. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + */ + public static final String LOG_USE_WRITE_QUEUE = "je.log.useWriteQueue"; + + /** + * The size of the Write Queue. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo1MB4KB32MB-

        + */ + public static final String LOG_WRITE_QUEUE_SIZE = "je.log.writeQueueSize"; + + /** + * @deprecated NIO is no longer used by JE and this parameter has no + * effect. + */ + public static final String LOG_DIRECT_NIO = "je.log.directNIO"; + + /** + * @deprecated NIO is no longer used by JE and this parameter has no + * effect. + */ + public static final String LOG_CHUNKED_NIO = "je.log.chunkedNIO"; + + /** + * Whether to run the background verifier. + *

        + * If true (the default), the verifier runs according to the schedule + * given by {@link #VERIFY_SCHEDULE}. Each time the verifier runs, it + * performs checksum verification if the {@link #VERIFY_LOG} setting is + * true and performs Btree verification if the {@link #VERIFY_BTREE} + * setting is true. + *

        + * When corruption is detected, the Environment will be invalidated and an + * EnvironmentFailureException will be thrown. Applications catching this + * exception can call the new {@link + * EnvironmentFailureException#isCorrupted()} method to determine whether + * corruption was detected. + *

        + * If isCorrupted returns true, a full restore (an HA {@link + * com.sleepycat.je.rep.NetworkRestore} or restore from backup) + * should be performed to avoid further problems. The advantage of + * performing verification frequently is that a problem may be detected + * sooner than it would be otherwise. For HA applications, this means that + * the network restore can be done while the other nodes in the group are + * up, minimizing exposure to additional failures. + *

        + * When index corruption is detected, the environment is not invalidated. + * Instead, the corrupt index (secondary database) is marked as corrupt + * in memory and a warning message is logged. All subsequent access to the + * index will throw {@link SecondaryIntegrityException}. To correct the + * problem, the application may perform a full restore or rebuild the + * corrupt index. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + * + * @since 7.3 + */ + public static final String ENV_RUN_VERIFIER = "je.env.runVerifier"; + + /** + * A crontab-format string indicating when to start the background + * verifier. + *

        + * See https://en.wikipedia.org/wiki/Cron#Configuration_file + * Note that times and dates are specified in local time, not UTC time. + *

        + * The data verifier will run at most once per scheduled interval. If the + * complete verification (log verification followed by Btree verification) + * takes longer than the scheduled interval, then the next verification + * will start at the next increment of the interval. For example, if the + * default schedule is used (one per day at midnight), and verification + * takes 25 hours, then verification will occur once every two + * days (48 hours), starting at midnight. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringYes"0 0 * * * (run once a day at midnight, local time)"

        + * + * @since 7.3 + */ + public static final String VERIFY_SCHEDULE = "je.env.verifySchedule"; + + /** + * Whether the background verifier should verify checksums in the log, + * as if the {@link DbVerifyLog} utility were run. + *

        + * If true, the entire log is read sequentially and verified. The size + * of the read buffer is determined by LOG_ITERATOR_READ_SIZE. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + * + * @since 7.3 + */ + public static final String VERIFY_LOG = "je.env.verifyLog"; + + /** + * The delay between reads during {@link #VERIFY_LOG log verification}. + * A delay between reads is needed to allow other JE components, such as + * HA, to make timely progress. + *

        + * A 100ms delay, the default value, with the read buffer size 131072, i.e. + * 128K, for a 1GB file, the total delay time is about 13 minutes. + *

        + * This parameter applies only to the {@link #ENV_RUN_VERIFIER background + * verifier}. It does not apply to use of {@link DbVerifyLog}. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes100 ms0 ms10 s

        + * + * @since 7.5 + */ + public static final String VERIFY_LOG_READ_DELAY = + "je.env.verifyLogReadDelay"; + + /** + * Whether the background verifier should perform Btree verification, + * as if the {@link DbVerify} utility were run. + *

        + * If true, the Btree of all databases, external and internal, is + * verified. The in-memory cache is used for verification and internal + * data structures are checked. References to data records (log sequence + * numbers, or LSNs) are checked to ensure they do not refer to deleted + * files -- this is the most common type of corruption. Additional + * checks are performed, depending on the settings for {@link + * #VERIFY_SECONDARIES} and {@link #VERIFY_DATA_RECORDS}. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + * + * @since 7.5 + */ + public static final String VERIFY_BTREE = "je.env.verifyBtree"; + + /** + * Whether to verify secondary index references during Btree verification. + *

        + * An index record contains a reference to a primary key, and the + * verification involves checking that a record for the primary key exists. + *

        + * Note that secondary index references are verified only for each + * {@link SecondaryDatabase} (and {@link + * com.sleepycat.persist.SecondaryIndex SecondaryIndex}) that is currently + * open. The relationship between a secondary and primary database is not + * stored persistently, so JE is not aware of the relationship unless the + * secondary database has been opened by the application. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + * + * @since 7.5 + */ + public static final String VERIFY_SECONDARIES = "je.env.verifySecondaries"; + + /** + * Whether to verify data records (leaf nodes, or LNs) during Btree + * verification. + *

        + * Regardless of this parameter's value, the Btree reference to the data + * record (the log sequence number, or LSN) is checked to ensure that + * it doesn't refer to a file that has been deleted by the JE cleaner -- + * this sort of "dangling reference" is the most common type of + * corruption. If this parameter value is true, the LN is additionally + * fetched from disk (if not in cache) to verify that the LSN refers to + * a valid log entry. Because LNs are often not cached, this can cause + * expensive random IO, and the default value for this parameter is false + * for this reason. Some applications may choose to set this parameter to + * true, for example, when using a storage device with fast random + * IO (an SSD). + *

        + * Note that Btree internal nodes (INs) are always fetched from disk + * during verification, if they are not in cache, and this can result + * in random IO. Verification was implemented with the assumption that + * most INs will be in cache. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + * + * @since 7.5 + */ + public static final String VERIFY_DATA_RECORDS = + "je.env.verifyDataRecords"; + + /** + * @hidden + * Whether to verify references to obsolete records during Btree + * verification. + *

        + * For performance reasons, the JE cleaner maintains a set of + * references(log sequence numbers, or LSNs) to obsolete records. + * If such a reference is incorrect and the record at the LSN is + * actually active, the cleaner may delete a data file without + * migrating the active record, and this will result in a dangling + * reference from the Btree. + *

        + * If this parameter's value is true, all active LSNs in the Btree are + * checked to ensure they are not in the cleaner's set of obsolete LSNs. + * To perform this check efficiently, the set of all obsolete LSNs must + * be fetched from disk and kept in memory during the verification run, + * and the default value for this parameter is false for this reason. + * Some applications may choose to set this parameter to true, when the + * use of more Java heap memory is worth the additional safety measure. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + * + * @since 7.5 + */ + public static final String VERIFY_OBSOLETE_RECORDS = + "je.env.verifyObsoleteRecords"; + + /** + * The number of records verified per batch during {@link #VERIFY_BTREE + * Btree verification}. In order to give database remove/truncate the + * opportunity to execute, records are verified in batches and there is + * a {@link #VERIFY_BTREE_BATCH_DELAY delay} between batches. + *

        + * This parameter applies only to the {@link #ENV_RUN_VERIFIER background + * verifier}. It does not apply to use of {@link DbVerify}. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes1000110000

        + */ + public static final String VERIFY_BTREE_BATCH_SIZE = + "je.env.verifyBtreeBatchSize"; + + /** + * The delay between batches during {@link #VERIFY_BTREE Btree + * verification}. In order to give database remove/truncate the + * opportunity to execute, records are verified in {@link + * #VERIFY_BTREE_BATCH_SIZE batches} and there is a delay between batches. + *

        + * A 10ms delay, the default value, should be enough to allow other + * threads to run. A large value, for example 1s, would result in a total + * delay of 28 hours when verifying 100m records or 100k batches. + *

        + * This parameter applies only to the {@link #ENV_RUN_VERIFIER background + * verifier}. It does not apply to use of {@link DbVerify}. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes10 ms0 ms10 s

        + */ + public static final String VERIFY_BTREE_BATCH_DELAY = + "je.env.verifyBtreeBatchDelay"; + + /** + * The maximum number of entries in an internal btree node. This can be + * set per-database using the DatabaseConfig object. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo128432767 (32K)

        + */ + public static final String NODE_MAX_ENTRIES = "je.nodeMaxEntries"; + + /** + * @deprecated this property no longer has any effect; {@link + * DatabaseConfig#setNodeMaxEntries} should be used instead. + */ + public static final String NODE_DUP_TREE_MAX_ENTRIES = + "je.nodeDupTreeMaxEntries"; + + /** + * The maximum size (in bytes) of a record's data portion that will cause + * the record to be embedded in its parent LN. + *

        + * Normally, records (key-value pairs) are stored on disk as individual + * byte sequences called LNs (leaf nodes) and they are accessed via a + * Btree. The nodes of the Btree are called INs (Internal Nodes) and the + * INs at the bottom layer of the Btree are called BINs (Bottom Internal + * Nodes). Conceptually, each BIN contains an array of slots. A slot + * represents an associated data record. Among other things, it stores + * the key of the record and the most recent disk address of that record. + * Records and INs share the disk space (are stored in the same kind of + * files), but LNs are stored separately from BINs, i.e., there is no + * clustering or co-location of a BIN and its child LNs. + *

        + * With embedded LNs, a whole record may be stored inside a BIN (i.e., + * a BIN slot may contain both the key and the data portion of a record). + * Specifically, a record will be "embedded" if the size (in bytes) of its + * data portion is less than or equal to the value of the + * TREE_MAX_EMBEDDED_LN configuration parameter. The decision to embed a + * record or not is taken on a record-by-record basis. As a result, a BIN + * may contain both embedded and non-embedded records. The "embeddedness" + * of a record is a dynamic property: a size-changing update may turn a + * non-embedded record to an embedded one or vice-versa. + *

        + * Notice that even though a record may be embedded, when the record is + * inserted, updated, or deleted an LN for that record is still generated + * and written to disk. This is because LNs also act as log records, + * which are needed during recovery and/or transaction abort to undo/redo + * operations that are/are-not currently reflected in the BINs. However, + * during normal processing, these LNs will never be fetched from disk. + *

        + * Obviously, embedding records has the performance advantage that no + * extra disk read is needed to fetch the record data (i.e., the LN) + * during read operations. This is especially true for operations like + * cursor scans and for random searches within key ranges whose + * containing BINs can fit in the JE cache (in other words when there + * is locality of reference). Furthermore, embedded records do not need + * to be migrated during cleaning; they are considered obsolete by default, + * because they will never be needed again after their containing log file + * is deleted. This makes cleaning faster, and more importantly, avoids + * the dirtying of the parent BINs, which would otherwise cause even more + * cleaning later. + *

        + * On the other hand, embedded LNs make the BINs larger, which can lead to + * more cache eviction of BINs and the associated performance problems. + * When eviction does occur, performance can deteriorate as the size of + * the data portion of the records grows. This is especially true for + * insertion-only workloads. Therefore, increasing the value of + * TREE_MAX_EMBEDDED_LN beyond the default value of 16 bytes should be + * done "carefully": by considering the kind of workloads that will be run + * against BDB-JE and their relative importance and expected response + * times, and by running performance tests with both embedded and + * non-embedded LNs. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo160Integer.MAX_VALUE

        + * + * @see Cache + * Statistics: Size Optimizations + */ + public static final String TREE_MAX_EMBEDDED_LN = "je.tree.maxEmbeddedLN"; + + /** + * @deprecated as of JE 6.0. The {@link #TREE_BIN_DELTA} param alone now + * determines whether a delta is logged. + */ + public static final String TREE_MAX_DELTA = "je.tree.maxDelta"; + + /** + * If more than this percentage of entries are changed on a BIN, log a a + * full version instead of a delta. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo25075

        + */ + public static final String TREE_BIN_DELTA = "je.tree.binDelta"; + + /** + * The minimum bytes allocated out of the memory cache to hold Btree data + * including internal nodes and record keys and data. If the specified + * value is larger than the size initially available in the cache, it will + * be truncated to the amount available. + * + *

        {@code TREE_MIN_MEMORY} is the minimum for a single environment. By + * default, 500 KB or the size initially available in the cache is used, + * whichever is smaller.

        + * + *

        This param is only likely to be needed for tuning of Environments + * with extremely small cache sizes. It is sometimes also useful for + * debugging and testing.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes512000 (500K)51200 (50K)-none-

        + * + * @see Cache Statistics: + * Debugging + */ + public static final String TREE_MIN_MEMORY = "je.tree.minMemory"; + + /** + * Specifies the maximum unprefixed key length for use in the compact + * in-memory key representation. + * + *

        In the Btree, the JE in-memory cache, the default representation for + * keys uses a byte array object per key. The per-key object overhead of + * this approach ranges from 20 to 32 bytes, depending on the JVM + * platform.

        + * + *

        To reduce memory overhead, a compact representation can instead be + * used where keys will be represented inside a single byte array instead + * of having one byte array per key. Within the single array, all keys are + * assigned a storage size equal to that taken up by the largest key, plus + * one byte to hold the actual key length. The use of the fixed size array + * reduces Java GC activity as well as memory overhead.

        + * + *

        In order for the compact representation to reduce memory usage, all + * keys in a database, or in a Btree internal node, must be roughly the + * same size. The more fully populated the internal node, the more the + * savings with this representation since the single byte array is sized to + * hold the maximum number of keys in the internal node, regardless of the + * actual number of keys that are present.

        + * + *

        It's worth noting that the storage savings of the compact + * representation are realized in addition to the storage benefits of key + * prefixing (if it is configured), since the keys stored in the key array + * are the smaller key values after the prefix has been stripped, reducing + * the length of the key and making it more likely that it's small enough + * for this specialized representation. This configuration parameter + * ({@code TREE_COMPACT_MAX_KEY_LENGTH}) is the maximum key length, not + * including the common prefix, for the keys in a Btree internal node + * stored using the compact representation. See {@link + * DatabaseConfig#setKeyPrefixing}.

        + * + *

        The compact representation is used automatically when both of the + * following conditions hold.

        + *
          + *
        • All keys in a Btree internal node must have an unprefixed length + * that is less than or equal to the length specified by this parameter + * ({@code TREE_COMPACT_MAX_KEY_LENGTH}).
        • + *
        • If key lengths vary by large amounts within an internal node, the + * wasted space of the fixed length storage may negate the benefits of the + * compact representation and cause more memory to be used than with the + * default representation. In that case, the default representation will + * be used.
        • + *
        + * + *

        If this configuration parameter is set to zero, the compact + * representation will not be used.

        + * + *

        The default value of this configuration parameter is 16 bytes. The + * potential drawbacks of specifying a larger length are:

        + *
          + *
        • Insertion and deletion for larger keys move bytes proportional to + * the storage length of the keys.
        • + *
        • With the compact representation, all operations create temporary + * byte arrays for each key involved in the operation. Larger byte arrays + * mean more work for the Java GC, even though these objects are short + * lived.
        • + *
        + * + *

        Mutation of the key representation between the default and compact + * approaches is automatic on a per-Btree internal node basis. For + * example, if a key that exceeds the configured length is added to a node + * that uses the compact representation, the node is automatically + * mutated to the default representation. A best effort is made to + * prevent frequent mutations that could increase Java GC activity.

        + * + *

        To determine how often the compact representation is used in a + * running application, see {@link EnvironmentStats#getNINCompactKeyIN}.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo160256

        + * + * @see DatabaseConfig#setKeyPrefixing + * @see EnvironmentStats#getNINCompactKeyIN + * + * @see Cache + * Statistics: Size Optimizations + * + * @since 5.0 + */ + public static final String TREE_COMPACT_MAX_KEY_LENGTH = + "je.tree.compactMaxKeyLength"; + + /** + * The compressor thread wakeup interval in microseconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No5 s1 s75 min

        + * + * @see Time Duration + * Properties + */ + public static final String COMPRESSOR_WAKEUP_INTERVAL = + "je.compressor.wakeupInterval"; + + /** + * The number of times to retry a compression run if a deadlock occurs. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo30-none-

        + */ + public static final String COMPRESSOR_DEADLOCK_RETRY = + "je.compressor.deadlockRetry"; + + /** + * The lock timeout for compressor transactions in microseconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No500 ms075 min

        + * + * @see Time Duration + * Properties + */ + public static final String COMPRESSOR_LOCK_TIMEOUT = + "je.compressor.lockTimeout"; + + /** + * @deprecated as of 3.3.87. Compression of the root node no longer has + * any benefit and this feature has been removed. This parameter has no + * effect. + */ + public static final String COMPRESSOR_PURGE_ROOT = + "je.compressor.purgeRoot"; + + /** + * When eviction occurs, the evictor will push memory usage to this number + * of bytes below {@link #MAX_MEMORY}. No more than 50% of je.maxMemory + * will be evicted per eviction cycle, regardless of this setting. + * + *

        When using the shared cache feature, the value of this property is + * applied the first time the cache is set up. New environments that + * join the cache do not alter the cache setting.

        + * + *

        This parameter impacts + * how often background + * evictor threads are awoken as well as the size of latency spikes + * caused by + * critical + * eviction.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNo524288 (512K)1024 (1K)-none-

        + * + * @see Cache Statistics: + * Eviction + * + * @see Cache + * Statistics: Critical Eviction + */ + public static final String EVICTOR_EVICT_BYTES = "je.evictor.evictBytes"; + + /** + * @deprecated as of JE 6.0. This parameter is ignored by the new, more + * efficient and more accurate evictor. + */ + public static final String EVICTOR_NODES_PER_SCAN = + "je.evictor.nodesPerScan"; + + /** + * At this percentage over the allotted cache, critical eviction will + * start. For example, if this parameter is 5, then when the cache size is + * 5% over its maximum or 105% full, critical eviction will start. + *

        + * Critical eviction is eviction performed in application threads as part + * of normal database access operations. Background eviction, on the other + * hand, is performed in JE evictor threads as well as during log cleaning + * and checkpointing. Background eviction is unconditionally started when + * the cache size exceeds its maximum. When critical eviction is also + * performed (concurrently with background eviction), it helps to ensure + * that the cache size does not continue to grow, but can have a negative + * impact on operation latency. + *

        + * By default this parameter is zero, which means that critical eviction + * will start as soon as the cache size exceeds its maximum. Some + * applications may wish to set this parameter to a non-zero value to + * improve operation latency, when eviction is a significant performance + * factor and latency requirements are not being satisfied. + *

        + * When setting this parameter to a non-zero value, for example 5, be sure + * to reserve enough heap memory for the cache size to be over its + * configured maximum, for example 105% full. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo001000

        + * + * @see Cache + * Statistics: Critical Eviction + */ + public static final String EVICTOR_CRITICAL_PERCENTAGE = + "je.evictor.criticalPercentage"; + + /** + * @deprecated as of JE 4.1, since the single evictor thread has + * been replaced be a more robust thread pool. + */ + public static final String EVICTOR_DEADLOCK_RETRY = + "je.evictor.deadlockRetry"; + + /** + * @deprecated as of JE 6.0. This parameter is ignored by the new, + * more efficient and more accurate evictor. + */ + public static final String EVICTOR_LRU_ONLY = "je.evictor.lruOnly"; + + /** + * The number of LRU lists in the main JE cache. + * + *

        Ideally, all nodes managed by an LRU eviction policy should appear in + * a single LRU list, ordered by the "hotness" of each node. However, + * such a list is accessed very frequently by multiple threads, and can + * become a synchronization bottleneck. To avoid this problem, the + * evictor can employ multiple LRU lists. The nLRULists parameter + * specifies the number of LRU lists to be used. Increasing the number + * of LRU lists alleviates any potential synchronization bottleneck, but + * it also decreases the quality of the LRU approximation.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo4132

        + * + * @see Cache Statistics: LRU List + * Contention + */ + public static final String EVICTOR_N_LRU_LISTS = "je.evictor.nLRULists"; + + /** + * Call Thread.yield() at each check for cache overflow. This potentially + * improves GC performance, but little testing has been done and the actual + * benefit is unknown. + * + *

        When using the shared cache feature, the value of this property is + * applied the first time the cache is set up. New environments that + * join the cache do not alter the cache setting.

        + * + *

        This param is unlikely to be needed for tuning, but is sometimes + * useful for debugging and testing.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see Cache Statistics: + * Debugging + */ + public static final String EVICTOR_FORCED_YIELD = "je.evictor.forcedYield"; + + /** + * The minimum number of threads in the eviction thread pool. + *

        + * These threads help keep memory usage within cache bounds, offloading + * work from application threads. + *

        + * {@link #EVICTOR_CORE_THREADS}, {@link #EVICTOR_MAX_THREADS} and {@link + * #EVICTOR_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}Integeryes10Integer.MAX_VALUE

        + */ + public static final String EVICTOR_CORE_THREADS = "je.evictor.coreThreads"; + + /** + * The maximum number of threads in the eviction thread pool. + *

        + * These threads help keep memory usage within cache bound, offloading work + * from application threads. If the eviction thread pool receives more + * work, it will allocate up to this number of threads. These threads will + * terminate if they are idle for more than the time indicated by {@link + * #EVICTOR_KEEP_ALIVE}. + *

        + * {@link #EVICTOR_CORE_THREADS}, {@link #EVICTOR_MAX_THREADS} and {@link + * #EVICTOR_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}Integeryes101Integer.MAX_VALUE

        + */ + public static final String EVICTOR_MAX_THREADS = "je.evictor.maxThreads"; + + /** + * The duration that excess threads in the eviction thread pool will stay + * idle; after this period, idle threads will terminate. + *

        + * {@link #EVICTOR_CORE_THREADS}, {@link #EVICTOR_MAX_THREADS} and {@link + * #EVICTOR_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes10 min1 s1 d

        + * + * @see Time Duration + * Properties + */ + public static final String EVICTOR_KEEP_ALIVE = "je.evictor.keepAlive"; + + /** + * Allow Bottom Internal Nodes (BINs) to be written in a delta format + * during eviction. Using a delta format will improve write and log + * cleaning performance. There is no known performance benefit to setting + * this parameter to false. + * + *

        This param is unlikely to be needed for tuning, but is sometimes + * useful for debugging and testing.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + * + * @see Cache Statistics: + * Debugging + */ + public static final String EVICTOR_ALLOW_BIN_DELTAS = + "je.evictor.allowBinDeltas"; + + /** + * The off-heap evictor will attempt to keep memory usage this number of + * bytes below {@link #MAX_OFF_HEAP_MEMORY}. + *

        + * If this value is too small, memory usage may exceed the maximum and then + * "critical eviction" is needed, which will increase operation latency in + * the application threads. + + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNo52428800 (50MB)1024 (1K)-none-

        + * + * @see Cache + * Statistics: Critical Eviction + */ + public static final String OFFHEAP_EVICT_BYTES = "je.offHeap.evictBytes"; + + /** + * The number of LRU lists in the off-heap JE cache. + * + *

        Ideally, all nodes managed by an LRU eviction policy should appear in + * a single LRU list, ordered by the "hotness" of each node. However, + * such a list is accessed very frequently by multiple threads, and can + * become a synchronization bottleneck. To avoid this problem, the + * evictor can employ multiple LRU lists. The nLRULists parameter + * specifies the number of LRU lists to be used. Increasing the number + * of LRU lists alleviates any potential synchronization bottleneck, but + * it also decreases the quality of the LRU approximation.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo4132

        + * + * @see Cache Statistics: LRU List + * Contention + */ + public static final String OFFHEAP_N_LRU_LISTS = "je.evictor.nLRULists"; + + /** + * Can be used to add a checksum to each off-heap block when the block is + * written, and validate the checksum when the block is read, for debugging + * purposes. Setting this param to true adds memory and CPU overhead, and + * it should normally be set to false in a production environment. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see Cache Statistics: + * Debugging + */ + public static final String OFFHEAP_CHECKSUM = "je.offHeap.checksum"; + + /** + * The minimum number of threads in the off-heap eviction thread pool. + *

        + * These threads help keep memory usage within cache bounds, offloading + * work from application threads. + *

        + * {@link #OFFHEAP_CORE_THREADS}, {@link #OFFHEAP_MAX_THREADS} and {@link + * #OFFHEAP_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}Integeryes10Integer.MAX_VALUE

        + */ + public static final String OFFHEAP_CORE_THREADS = "je.offHeap.coreThreads"; + + /** + * The maximum number of threads in the off-heap eviction thread pool. + *

        + * These threads help keep memory usage within cache bound, offloading + * work from application threads. If the eviction thread pool receives + * more work, it will allocate up to this number of threads. These + * threads will terminate if they are idle for more than the time + * indicated by {@link #OFFHEAP_KEEP_ALIVE}. + *

        + * If the number of threads is too small, memory usage may exceed the + * maximum and then "critical eviction" is needed, which will increase + * operation latency in the application threads. + *

        + * {@link #OFFHEAP_CORE_THREADS}, {@link #OFFHEAP_MAX_THREADS} and {@link + * #OFFHEAP_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}Integeryes31Integer.MAX_VALUE

        + */ + public static final String OFFHEAP_MAX_THREADS = "je.offHeap.maxThreads"; + + /** + * The duration that excess threads in the off-heap eviction thread pool + * will stay idle; after this period, idle threads will terminate. + *

        + * {@link #OFFHEAP_CORE_THREADS}, {@link #OFFHEAP_MAX_THREADS} and {@link + * #OFFHEAP_KEEP_ALIVE} are used to configure the core, max and keepalive + * attributes for the {@link java.util.concurrent.ThreadPoolExecutor} which + * implements the eviction thread pool. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes10 min1 s1 d

        + * + * @see Time Duration + * Properties + */ + public static final String OFFHEAP_KEEP_ALIVE = "je.offHeap.keepAlive"; + + /** + * Ask the checkpointer to run every time we write this many bytes to the + * log. If set, supersedes {@link #CHECKPOINTER_WAKEUP_INTERVAL}. To use + * time based checkpointing, set this to 0. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNo20000000 (20M)0-none-

        + */ + public static final String CHECKPOINTER_BYTES_INTERVAL = + "je.checkpointer.bytesInterval"; + + /** + * The checkpointer wakeup interval in microseconds. By default, this + * is inactive and we wakeup the checkpointer as a function of the + * number of bytes written to the log ({@link + * #CHECKPOINTER_BYTES_INTERVAL}). + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No01 s75 min

        + * + * @see Time Duration + * Properties + */ + public static final String CHECKPOINTER_WAKEUP_INTERVAL = + "je.checkpointer.wakeupInterval"; + + /** + * The number of times to retry a checkpoint if it runs into a deadlock. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo30-none-

        + */ + public static final String CHECKPOINTER_DEADLOCK_RETRY = + "je.checkpointer.deadlockRetry"; + + /** + * If true, the checkpointer uses more resources in order to complete the + * checkpoint in a shorter time interval. Btree latches are held and other + * threads are blocked for a longer period. When set to true, application + * response time may be longer during a checkpoint. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + */ + public static final String CHECKPOINTER_HIGH_PRIORITY = + "je.checkpointer.highPriority"; + + /** + * The cleaner will keep the total disk space utilization percentage above + * this value. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes50090

        + */ + public static final String CLEANER_MIN_UTILIZATION = + "je.cleaner.minUtilization"; + + /** + * A log file will be cleaned if its utilization percentage is below this + * value, irrespective of total utilization. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes5050

        + */ + public static final String CLEANER_MIN_FILE_UTILIZATION = + "je.cleaner.minFileUtilization"; + + /** + * The cleaner checks disk utilization every time we write this many bytes + * to the log. If zero (and by default) it is set to either the {@link + * #LOG_FILE_MAX} value divided by four, or to 100 MB, whichever is + * smaller. + * + *

        When overriding the default value, use caution to ensure that the + * cleaner is woken frequently enough, so that reserved files are deleted + * quickly enough to avoid violating a disk limit.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongYes00-none-

        + * + * @see #CLEANER_WAKEUP_INTERVAL + */ + public static final String CLEANER_BYTES_INTERVAL = + "je.cleaner.bytesInterval"; + + /** + * The cleaner checks whether cleaning is needed if this interval elapses + * without any writing, to handle the case where cleaning or checkpointing + * is necessary to reclaim disk space, but writing has stopped. This + * addresses the problem that {@link #CLEANER_BYTES_INTERVAL} may not cause + * cleaning, and {@link #CHECKPOINTER_BYTES_INTERVAL} may not cause + * checkpointing, when enough writing has not occurred to exceed these + * intervals. + * + *

        If this parameter is set to zero, the cleaner wakeup interval is + * disabled, and cleaning and checkpointing will occur only via {@link + * #CLEANER_BYTES_INTERVAL}, {@link #CHECKPOINTER_BYTES_INTERVAL}, and + * {@link #CHECKPOINTER_WAKEUP_INTERVAL}.

        + * + *

        For example, if a database were removed or truncated, or large + * records were deleted, the amount written to the log may not exceed + * CLEANER_BYTES_INTERVAL. If writing were to stop at that point, no + * cleaning would occur, if it were not for the wakeup interval.

        + * + *

        In addition, even when cleaning is performed, a checkpoint is + * additionally needed to reclaim disk space. This may not occur if + * {@link #CHECKPOINTER_BYTES_INTERVAL} or + * {@link #CHECKPOINTER_WAKEUP_INTERVAL} does not happen to cause a + * checkpoint after write operations have stopped. If files have been + * cleaned and a checkpoint is needed to reclaim space, and write + * operations have stopped, a checkpoint will be scheduled when the + * CLEANER_WAKEUP_INTERVAL elapses. The checkpoint will be performed in the + * JE checkpointer thread if it is not disabled, or when + * {@link Environment#checkpoint} is called.

        + * + *

        In test environments it is fairly common for application writing to + * stop, and then to expect cleaning to occur as a result of the last set + * of operations. This situation may also arise in production environments, + * for example, during repair of an out-of-disk situation.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes10 s010 h

        + * + * @see Time Duration + * Properties + * + * @see #CLEANER_BYTES_INTERVAL + * + * @since 7.1 + */ + public static final String CLEANER_WAKEUP_INTERVAL = + "je.cleaner.wakeupInterval"; + + /** + * If true, the cleaner will fetch records to determine their size and more + * accurately calculate log utilization. Normally when a record is updated + * or deleted without first being read (sometimes called a blind + * delete/update), the size of the previous version of the record is + * unknown and therefore the cleaner's utilization calculations may be + * incorrect. Setting this parameter to true will cause a record to be + * read during a blind delete/update, in order to determine its size. This + * will ensure that the cleaner's utilization calculations are correct, but + * will cause more (potentially random) IO. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + * + * @see #CLEANER_ADJUST_UTILIZATION + */ + public static final String CLEANER_FETCH_OBSOLETE_SIZE = + "je.cleaner.fetchObsoleteSize"; + + /** + * @deprecated in JE 6.3. Adjustments are no longer needed because LN log + * sizes have been stored in the Btree since JE 6.0. + */ + public static final String CLEANER_ADJUST_UTILIZATION = + "je.cleaner.adjustUtilization"; + + /** + * The number of times to retry cleaning if a deadlock occurs. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes30-none-

        + */ + public static final String CLEANER_DEADLOCK_RETRY = + "je.cleaner.deadlockRetry"; + + /** + * The lock timeout for cleaner transactions in microseconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes500 ms075 min

        + * + * @see Time Duration + * Properties + */ + public static final String CLEANER_LOCK_TIMEOUT = "je.cleaner.lockTimeout"; + + /** + * If true (the default setting), the cleaner deletes log files after + * successful cleaning. + * + * This parameter may be set to false for diagnosing log cleaning problems. + * For example, if a bug causes a LOG_FILE_NOT_FOUND exception, when + * reproducing the problem it is often necessary to avoid deleting files so + * they can be used for diagnosis. When this parameter is false: + *
          + *
        • + * Rather than delete files that are successfully cleaned, the cleaner + * renames them. + *
        • + *
        • + * When renaming a file, its extension is changed from ".jdb" to ".del" + * and its last modification date is set to the current time. + *
        • + *
        • + * Depending on the setting of the {@link #CLEANER_USE_DELETED_DIR} + * parameter, the file is either renamed in its current data directory + * (the default), or moved into the "deleted" sub-directory. + *
        • + *
        + *

        + * When this parameter is set to false, disk usage may grow without bounds + * and the application is responsible for removing the cleaned files. It + * may be necessary to write a script for deleting the least recently + * cleaned files when disk usage is low. The .del extension and the last + * modification time can be leveraged to write such a script. The "deleted" + * sub-directory can be used to avoid granting write or delete permissions + * for the main data directory to the script. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYestrue

        + */ + public static final String CLEANER_EXPUNGE = "je.cleaner.expunge"; + + /** + * When {@link #CLEANER_EXPUNGE} is false, the {@code + * CLEANER_USE_DELETED_DIR} parameter determines whether successfully + * cleaned files are moved to the "deleted" sub-directory. + * + * {@code CLEANER_USE_DELETED_DIR} applies only when {@link + * #CLEANER_EXPUNGE} is false. When {@link #CLEANER_EXPUNGE} is true, + * successfully cleaned files are deleted and the {@code + * CLEANER_USE_DELETED_DIR} parameter setting is ignored. + *

        + * When {@code CLEANER_USE_DELETED_DIR} is true (and {@code + * CLEANER_EXPUNGE} is false), the cleaner will move successfully cleaned + * data files (".jdb" files) to the "deleted" sub-directory of the + * Environment directory, in addition to changing the file extension to + * "*.del". In this case, the "deleted" sub-directory must have been + * created by the application before opening the Environment. This allows + * the application to control permissions on this sub-directory. When + * multiple data directories are used ({@link #LOG_N_DATA_DIRECTORIES}), a + * "deleted" sub-directory must be created under each data directory. Note + * that {@link java.io.File#renameTo(File)} is used to move the file, and + * this method may or may not support moving the file to a different volume + * (when the "deleted" directory is a file system link) on a particular + * platform. + *

        + * When {@code CLEANER_USE_DELETED_DIR} is false (and {@code + * CLEANER_EXPUNGE} is false), the cleaner will change the file extension + * of successfully cleaned data files from ".jdb" to ".del", but will not + * move the files to a different directory. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + */ + public static final String CLEANER_USE_DELETED_DIR = + "je.cleaner.useDeletedDir"; + + /** + * The minimum age of a file (number of files between it and the active + * file) to qualify it for cleaning under any conditions. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes211000

        + */ + public static final String CLEANER_MIN_AGE = "je.cleaner.minAge"; + + /** + * @deprecated in 7.0. No longer used because the cleaner no longer has a + * backlog. + */ + public static final String CLEANER_MAX_BATCH_FILES = + "je.cleaner.maxBatchFiles"; + + /** + * The read buffer size for cleaning. If zero (the default), then {@link + * #LOG_ITERATOR_READ_SIZE} value is used. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes0128-none-

        + */ + public static final String CLEANER_READ_SIZE = "je.cleaner.readSize"; + + /** + * Tracking of detailed cleaning information will use no more than this + * percentage of the cache. The default value is 2% of {@link + * #MAX_MEMORY}. If 0 and {@link #SHARED_CACHE} is true, use 2% divided by + * N where N is the number of environments sharing the global cache. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes2190

        + */ + public static final String CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE = + "je.cleaner.detailMaxMemoryPercentage"; + + /** + * Specifies a list of files or file ranges to be cleaned at a time when no + * other log cleaning is necessary. This parameter is intended for use in + * forcing the cleaning of a large number of log files. File numbers are + * in hex and are comma separated or hyphen separated to specify ranges, + * e.g.: '9,a,b-d' will clean 5 files. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String CLEANER_FORCE_CLEAN_FILES = + "je.cleaner.forceCleanFiles"; + + /** + * All log files having a log version prior to the specified version will + * be cleaned at a time when no other log cleaning is necessary. Intended + * for use in upgrading old format log files forward to the current log + * format version, e.g., to take advantage of format improvements; note + * that log upgrading is optional. The default value zero (0) specifies + * that no upgrading will occur. The value negative one (-1) specifies + * upgrading to the current log version. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo0-1-none-

        + */ + public static final String CLEANER_UPGRADE_TO_LOG_VERSION = + "je.cleaner.upgradeToLogVersion"; + + /** + * The number of threads allocated by the cleaner for log file processing. + * If the cleaner backlog becomes large, try increasing this value. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes11-none-

        + */ + public static final String CLEANER_THREADS = "je.cleaner.threads"; + + /** + * The look ahead cache size for cleaning in bytes. Increasing this value + * can reduce the number of Btree lookups. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes8192 (8K)0-none-

        + */ + public static final String CLEANER_LOOK_AHEAD_CACHE_SIZE = + "je.cleaner.lookAheadCacheSize"; + + /** + * @deprecated This parameter is ignored and proactive migration is no + * longer supported due to its negative impact on eviction and Btree + * splits. To reduce a cleaner backlog, configure more cleaner threads. + */ + public static final String CLEANER_FOREGROUND_PROACTIVE_MIGRATION = + "je.cleaner.foregroundProactiveMigration"; + + /** + * @deprecated This parameter is ignored and proactive migration is no + * longer supported due to its negative impact on eviction and + * checkpointing. To reduce a cleaner backlog, configure more cleaner + * threads. + */ + public static final String CLEANER_BACKGROUND_PROACTIVE_MIGRATION = + "je.cleaner.backgroundProactiveMigration"; + + /** + * @deprecated This parameter is ignored and lazy migration is no longer + * supported due to its negative impact on eviction and checkpointing. + * To reduce a cleaner backlog, configure more cleaner threads. + */ + public static final String CLEANER_LAZY_MIGRATION = + "je.cleaner.lazyMigration"; + + /** + * The timeout for Disk Ordered Scan producer thread queue offers in + * milliseconds. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes10 secs075 min

        + * + * @see Time Duration + * Properties + */ + public static final String DOS_PRODUCER_QUEUE_TIMEOUT = + "je.env.diskOrderedScanLockTimeout"; + + /** + * Number of Lock Tables. Set this to a value other than 1 when an + * application has multiple threads performing concurrent JE operations. + * It should be set to a prime number, and in general not higher than the + * number of application threads performing JE operations. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo1132767 (32K)

        + */ + public static final String LOCK_N_LOCK_TABLES = "je.lock.nLockTables"; + + /** + * Configures the default lock timeout. It may be overridden on a + * per-transaction basis by calling + * {@link Transaction#setLockTimeout(long, TimeUnit)}. + * + *

        A value of zero disables lock timeouts. This is not recommended, even + * when the application expects that deadlocks will not occur or will be + * easily resolved. A lock timeout is a fall-back that guards against + * unexpected "live lock", unresponsive threads, or application failure to + * close a cursor or to commit or abort a transaction.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No500 ms075 min

        + * + * @see #setLockTimeout(long,TimeUnit) + * @see Time Duration + * Properties + */ + public static final String LOCK_TIMEOUT = "je.lock.timeout"; + + /** + * Whether to perform deadlock detection when a lock conflict occurs. + * By default, deadlock detection is enabled (this parameter is true) in + * order to reduce thread wait times when there are deadlocks. + *

        + * Deadlock detection is performed as follows. + *

          + *
        1. When a lock is requested by a record read or write operation, JE + * checks for lock conflicts with another transaction or another + * thread performing a non-transactional operation. If there is no + * conflict, the lock is acquired and the operation returns + * normally.
        2. + *
        3. When there is a conflict, JE performs deadlock detection. However, + * before performing deadlock detection, JE waits for the + * {@link #LOCK_DEADLOCK_DETECT_DELAY} interval, if it is non-zero. + * This delay is useful for avoiding the overhead of deadlock + * detection when normal, short-lived contention (not a deadlock) is + * the reason for the conflict. If the lock is acquired during the + * delay, the thread wakes up and the operation returns + * normally.
        4. + *
        5. If a deadlock is detected, {@link DeadlockException} is thrown in + * one of the threads participating in the deadlock, called the + * "victim". The victim is chosen at random to prevent a repeated + * pattern of deadlocks, called "live lock". A non-victim thread that + * detects a deadlock will notify the victim and perform short + * delays, waiting for the deadlock to be broken; if the lock is + * acquired, the operation returns normally.
        6. + *
        7. It is possible for live lock to occur in spite of using random + * victim selection. It is also possible that a deadlock is not + * broken because the victim thread is unresponsive or the + * application fails to close a cursor or to commit or abort a + * transaction. In these cases, if the lock or transaction timeout + * expires without acquiring the lock, a {@code DeadlockException} is + * thrown for the last deadlock detected, in the thread that detected + * the deadlock. In this case, {@code DeadlockException} may be + * thrown by more than one thread participating in the deadlock. + *
        8. + *
        9. When no deadlock is detected, JE waits for the lock or transaction + * timeout to expire. If the lock is acquired during this delay, the + * thread wakes up and the operation returns normally.
        10. + *
        11. When the lock or transaction timeout expires without acquiring the + * lock, JE checks for deadlocks one final time. If a deadlock is + * detected, {@code DeadlockException} is thrown; otherwise, + * {@link LockTimeoutException} or + * {@link TransactionTimeoutException}is thrown.
        12. + *
        + *

        + * Deadlock detection may be disabled (by setting this parameter to false) + * in applications that are known to be free of deadlocks, and this may + * provide a slight performance improvement in certain scenarios. However, + * this is not recommended because deadlock-free operation is difficult to + * guarantee. If deadlock detection is disabled, JE skips steps 2, 3 and 4 + * above. However, deadlock detection is always performed in the last step, + * and {@code DeadlockException} may be thrown. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + * + * @since 7.1 + */ + public static final String LOCK_DEADLOCK_DETECT = "je.lock.deadlockDetect"; + + /** + * The delay after a lock conflict, before performing deadlock detection. + * + * This delay is used to avoid the overhead of deadlock detection when + * normal contention (not a deadlock) is the reason for the conflict. See + * {@link #LOCK_DEADLOCK_DETECT} for more information. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No0075 min

        + * + * @see Time Duration + * Properties + * + * @since 7.1 + */ + public static final String LOCK_DEADLOCK_DETECT_DELAY = + "je.lock.deadlockDetectDelay"; + + /** + * Used in JE releases 3.4 through 6.4 to throw old-style lock exceptions + * for compatibility with JE release 3.3 and earlier. + * + * @deprecated since JE 6.5; has no effect, as if it were set to false. + */ + public static final String LOCK_OLD_LOCK_EXCEPTIONS = + "je.lock.oldLockExceptions"; + + /** + * Configures the transaction timeout. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No0075 min

        + * + * @see #setTxnTimeout + * @see Time Duration + * Properties + */ + public static final String TXN_TIMEOUT = "je.txn.timeout"; + + /** + * Configures all transactions for this environment to have Serializable + * (Degree 3) isolation. By setting Serializable isolation, phantoms will + * be prevented. By default transactions provide Repeatable Read + * isolation. + * + * The default is false for the database environment. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse

        + * + * @see #setTxnSerializableIsolation + */ + public static final String TXN_SERIALIZABLE_ISOLATION = + "je.txn.serializableIsolation"; + + /** + * Configures the default durability associated with transactions. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringYesnull

        + * + * The format of the durability string is described at + * {@link Durability#parse(String)} + * + * @see Durability + * @see #setDurability + */ + public static final String TXN_DURABILITY = "je.txn.durability"; + + /** + * Set this parameter to true to add stacktrace information to deadlock + * (lock timeout) exception messages. The stack trace will show where each + * lock was taken. The default is false, and true should only be used + * during debugging because of the added memory/processing cost. This + * parameter is 'static' across all environments. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + */ + public static final String TXN_DEADLOCK_STACK_TRACE = + "je.txn.deadlockStackTrace"; + + /** + * Dump the lock table when a lock timeout is encountered, for debugging + * assistance. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesfalse

        + */ + public static final String TXN_DUMP_LOCKS = "je.txn.dumpLocks"; + + /** + * @deprecated in favor of FILE_LOGGING_LEVEL As of JE 4.0, + * use the standard java.util.logging configuration methodologies. To + * enable logging output to the je.info files, set + * com.sleepycat.je.util.FileHandler.level = {@literal } through the + * java.util.logging configuration file, or through the + * java.util.logging.LogManager. To set the handler level programmatically, + * set "com.sleepycat.je.util.FileHandler.level" in the EnvironmentConfig + * object. + */ + public static final String TRACE_FILE = "java.util.logging.FileHandler.on"; + + /** + * @deprecated in favor of CONSOLE_LOGGING_LEVEL As of JE + * 4.0, use the standard java.util.logging configuration + * methodologies. To enable console output, set + * com.sleepycat.je.util.ConsoleHandler.level = {@literal } through + * the java.util.logging configuration file, or through the + * java.util.logging.LogManager. To set the handler level programmatically, + * set "com.sleepycat.je.util.ConsoleHandler.level" in the + * EnvironmentConfig object. + */ + public static final String TRACE_CONSOLE = + "java.util.logging.ConsoleHandler.on"; + + /** + * @deprecated As of JE 4.0, event tracing to the .jdb files has been + * separated from the java.util.logging mechanism. This parameter has + * no effect. + */ + public static final String TRACE_DB = "java.util.logging.DbLogHandler.on"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To set the FileHandler output file size, + * set com.sleepycat.je.util.FileHandler.limit = {@literal } + * through the java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_FILE_LIMIT = + "java.util.logging.FileHandler.limit"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To set the FileHandler output file count, + * set com.sleepycat.je.util.FileHandler.count = {@literal } + * through the java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_FILE_COUNT = + "java.util.logging.FileHandler.count"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. Set logging levels using class names + * through the java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_LEVEL = "java.util.logging.level"; + + /** + * Trace messages equal and above this level will be logged to the + * console. Value should be one of the predefined + * java.util.logging.Level values. + *

        + * Setting this parameter in the je.properties file or through {@link + * EnvironmentConfig#setConfigParam} is analogous to setting + * the property in the java.util.logging properties file or MBean. + * It is preferred to use the standard java.util.logging mechanisms for + * configuring java.util.logging.Handler, but this JE parameter is provided + * because the java.util.logging API doesn't provide a method to set + * handler levels programmatically. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"OFF"

        + * @see Chapter 12. Logging + */ + public static final String CONSOLE_LOGGING_LEVEL = + "com.sleepycat.je.util.ConsoleHandler.level"; + + /** + * Trace messages equal and above this level will be logged to the je.info + * file, which is in the Environment home directory. Value should + * be one of the predefined java.util.logging.Level values. + *

        + * Setting this parameter in the je.properties file or through {@link + * EnvironmentConfig#setConfigParam} is analogous to setting + * the property in the java.util.logging properties file or MBean. + * It is preferred to use the standard java.util.logging mechanisms for + * configuring java.util.logging.Handler, but this JE parameter is provided + * because the java.util.logging APIs doesn't provide a method to set + * handler levels programmatically. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"INFO"

        + * @see Chapter 12. Logging + */ + public static final String FILE_LOGGING_LEVEL = + "com.sleepycat.je.util.FileHandler.level"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To see locking logging, set + * com.sleepycat.je.txn.level = {@literal } through the + * java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_LEVEL_LOCK_MANAGER = + "java.util.logging.level.lockMgr"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To see recovery logging, set + * com.sleepycat.je.recovery.level = {@literal } through the + * java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_LEVEL_RECOVERY = + "java.util.logging.level.recovery"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To see evictor logging, set + * com.sleepycat.je.evictor.level = {@literal } through the + * java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_LEVEL_EVICTOR = + "java.util.logging.level.evictor"; + + /** + * @deprecated As of JE 4.0, use the standard java.util.logging + * configuration methodologies. To see cleaner logging, set + * com.sleepycat.je.cleaner.level = {@literal } through the + * java.util.logging configuration file, or through the + * java.util.logging.LogManager. + */ + public static final String TRACE_LEVEL_CLEANER = + "java.util.logging.level.cleaner"; + + /** + * If environment startup exceeds this duration, startup statistics are + * logged and can be found in the je.info file. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No5 min0none

        + * + * @see Time Duration + * Properties + */ + public static final String STARTUP_DUMP_THRESHOLD = + "je.env.startupThreshold"; + + /** + * If true collect and log statistics. The statistics are logged in CSV + * format and written to the log file at a user specified interval. + * The logging occurs per-Environment when the Environment is opened + * in read/write mode. Statistics are written to a filed named je.stat.csv. + * Successively older files are named by adding "0", "1", "2", etc into + * the file name. The file name format is je.stat.[version number].csv. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}BooleanYesTrue0none

        + */ + public static final String STATS_COLLECT = + "je.stats.collect"; + + /** + * Maximum number of statistics log files to retain. The rotating set of + * files, as each file reaches a given size limit, is closed, rotated out, + * and a new file opened. The name of the log file is je.stat.csv. + * Successively older files are named by adding "0", "1", "2", etc into + * the file name. The file name format is je.stat.[version number].csv. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes101-none-

        + */ + public static final String STATS_MAX_FILES = + "je.stats.max.files"; + + /** + * Log file maximum row count for Stat collection. When the number of + * rows in the statistics file reaches the maximum row count, the file + * is closed, rotated out, and a new file opened. The name of the log + * file is je.stat.csv. Successively older files are named by adding "0", + * "1", "2", etc into the file name. The file name format is + * je.stat.[version number].csv. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes14401-none-

        + */ + public static final String STATS_FILE_ROW_COUNT = + "je.stats.file.row.count"; + + /** + * The duration of the statistics capture interval. Statistics are captured + * and written to the log file at this interval. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes1 min1 s24 d

        + * + * @see Time Duration + * Properties + */ + public static final String STATS_COLLECT_INTERVAL = + "je.stats.collect.interval"; + + /** + * The directory to save the statistics log file. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"NULL-> Environment home directory"

        + */ + public static final String STATS_FILE_DIRECTORY = + "je.stats.file.directory"; + + /** + * For unit testing, to prevent using the utilization profile and + * expiration profile DB. + */ + private transient boolean createUP = true; + private transient boolean createEP = true; + + /** + * For unit testing, to prevent writing utilization data during checkpoint. + */ + private transient boolean checkpointUP = true; + + private boolean allowCreate = false; + + /** + * For unit testing, to set readCommitted as the default. + */ + private transient boolean txnReadCommitted = false; + + private String nodeName = null; + + /** + * The loggingHandler is an instance and cannot be serialized. + */ + private transient Handler loggingHandler; + + private transient + ProgressListener recoveryProgressListener; + + private transient ClassLoader classLoader; + + private transient PreloadConfig dupConvertPreloadConfig; + + private CustomStats customStats; + + /** + * Creates an EnvironmentConfig initialized with the system default + * settings. + */ + public EnvironmentConfig() { + super(); + } + + /** + * Creates an EnvironmentConfig which includes the properties specified in + * the properties parameter. + * + * @param properties Supported properties are described in this class + * + * @throws IllegalArgumentException If any properties read from the + * properties param are invalid. + */ + public EnvironmentConfig(Properties properties) + throws IllegalArgumentException { + + super(properties); + } + + /** + * If true, creates the database environment if it doesn't already exist. + * + * @param allowCreate If true, the database environment is created if it + * doesn't already exist. + * + * @return this + */ + public EnvironmentConfig setAllowCreate(boolean allowCreate) { + + setAllowCreateVoid(allowCreate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAllowCreateVoid(boolean allowCreate) { + this.allowCreate = allowCreate; + } + + /** + * Returns a flag that specifies if we may create this environment. + * + * @return true if we may create this environment. + */ + public boolean getAllowCreate() { + + return allowCreate; + } + + /** + * Convenience method for setting {@link EnvironmentConfig#LOCK_TIMEOUT}. + * + * @param timeout The lock timeout for all transactional and + * non-transactional operations, or zero to disable lock timeouts. + * + * @param unit the {@code TimeUnit} of the timeout value. May be null only + * if timeout is zero. + * + * @return this + * + * @throws IllegalArgumentException if the value of timeout is invalid + * + * @see EnvironmentConfig#LOCK_TIMEOUT + * @see Transaction#setLockTimeout(long,TimeUnit) + */ + public EnvironmentConfig setLockTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + + setLockTimeoutVoid(timeout, unit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLockTimeoutVoid(long timeout, TimeUnit unit) + throws IllegalArgumentException { + + DbConfigManager.setDurationVal(props, EnvironmentParams.LOCK_TIMEOUT, + timeout, unit, validateParams); + } + + /** + * Configures the lock timeout, in microseconds. This method is equivalent + * to: + * + *
        setLockTimeout(long, TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #setLockTimeout(long, + * TimeUnit)}. + */ + public EnvironmentConfig setLockTimeout(long timeout) + throws IllegalArgumentException { + + setLockTimeoutVoid(timeout); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLockTimeoutVoid(long timeout) + throws IllegalArgumentException { + + setLockTimeout(timeout, TimeUnit.MICROSECONDS); + } + + /** + * Returns the lock timeout setting. + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + * + * A value of 0 means no timeout is set. + */ + public long getLockTimeout(TimeUnit unit) { + + return DbConfigManager.getDurationVal + (props, EnvironmentParams.LOCK_TIMEOUT, unit); + } + + /** + * Returns the lock timeout setting, in microseconds. This method is + * equivalent to: + * + *
        getLockTimeout(TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #getLockTimeout(TimeUnit)}. + */ + public long getLockTimeout() { + return getLockTimeout(TimeUnit.MICROSECONDS); + } + + /** + * Convenience method for setting {@link EnvironmentConfig#ENV_READ_ONLY}. + * + * @param readOnly If true, configure the database environment to be read + * only, and any attempt to modify a database will fail. + * + * @return this + */ + public EnvironmentConfig setReadOnly(boolean readOnly) { + + setReadOnlyVoid(readOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadOnlyVoid(boolean readOnly) { + + DbConfigManager.setBooleanVal(props, EnvironmentParams.ENV_RDONLY, + readOnly, validateParams); + } + + /** + * Returns true if the database environment is configured to be read only. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database environment is configured to be read only. + */ + public boolean getReadOnly() { + + return DbConfigManager.getBooleanVal(props, + EnvironmentParams.ENV_RDONLY); + } + + /** + * Convenience method for setting + * {@link EnvironmentConfig#ENV_IS_TRANSACTIONAL}. + * + * @param transactional If true, configure the database environment for + * transactions. + * + * @return this + */ + public EnvironmentConfig setTransactional(boolean transactional) { + + setTransactionalVoid(transactional); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTransactionalVoid(boolean transactional) { + + DbConfigManager.setBooleanVal(props, EnvironmentParams.ENV_INIT_TXN, + transactional, validateParams); + } + + /** + * Returns true if the database environment is configured for transactions. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database environment is configured for transactions. + */ + public boolean getTransactional() { + + return DbConfigManager.getBooleanVal(props, + EnvironmentParams.ENV_INIT_TXN); + } + + /** + * Convenience method for setting + * {@link EnvironmentConfig#ENV_IS_LOCKING}. + * + * @param locking If false, configure the database environment for no + * locking. The default is true. + * + * @return this + */ + public EnvironmentConfig setLocking(boolean locking) { + + setLockingVoid(locking); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLockingVoid(boolean locking) { + + DbConfigManager.setBooleanVal(props, + EnvironmentParams.ENV_INIT_LOCKING, + locking, validateParams); + } + + /** + * Returns true if the database environment is configured for locking. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the database environment is configured for locking. + */ + public boolean getLocking() { + + return DbConfigManager.getBooleanVal + (props, EnvironmentParams.ENV_INIT_LOCKING); + } + + /** + * A convenience method for setting {@link EnvironmentConfig#TXN_TIMEOUT}. + * + * @param timeout The transaction timeout. A value of 0 turns off + * transaction timeouts. + * + * @param unit the {@code TimeUnit} of the timeout value. May be null only + * if timeout is zero. + * + * @return this + * + * @throws IllegalArgumentException If the value of timeout is negative + * + * @see EnvironmentConfig#TXN_TIMEOUT + * @see Transaction#setTxnTimeout + */ + public EnvironmentConfig setTxnTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + + setTxnTimeoutVoid(timeout, unit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTxnTimeoutVoid(long timeout, TimeUnit unit) + throws IllegalArgumentException { + + DbConfigManager.setDurationVal(props, EnvironmentParams.TXN_TIMEOUT, + timeout, unit, validateParams); + } + + /** + * Configures the transaction timeout, in microseconds. This method is + * equivalent to: + * + *
        setTxnTimeout(long, TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #setTxnTimeout(long, + * TimeUnit)}. + */ + public EnvironmentConfig setTxnTimeout(long timeout) + throws IllegalArgumentException { + + setTxnTimeoutVoid(timeout); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTxnTimeoutVoid(long timeout) + throws IllegalArgumentException { + + setTxnTimeout(timeout, TimeUnit.MICROSECONDS); + } + + /** + * A convenience method for getting {@link EnvironmentConfig#TXN_TIMEOUT}. + * + *

        A value of 0 means transaction timeouts are not configured.

        + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + * + * @return The transaction timeout. + */ + public long getTxnTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal + (props, EnvironmentParams.TXN_TIMEOUT, unit); + } + + /** + * Returns the transaction timeout, in microseconds. This method is + * equivalent to: + * + *
        getTxnTimeout(TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #getTxnTimeout(TimeUnit)}. + */ + public long getTxnTimeout() { + return getTxnTimeout(TimeUnit.MICROSECONDS); + } + + /** + * A convenience method for setting + * {@link EnvironmentConfig#TXN_SERIALIZABLE_ISOLATION}. + * + * @see LockMode + * + * @return this + */ + public EnvironmentConfig + setTxnSerializableIsolation(boolean txnSerializableIsolation) { + + setTxnSerializableIsolationVoid(txnSerializableIsolation); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void + setTxnSerializableIsolationVoid(boolean txnSerializableIsolation) { + + DbConfigManager.setBooleanVal + (props, EnvironmentParams.TXN_SERIALIZABLE_ISOLATION, + txnSerializableIsolation, validateParams); + } + + /** + * A convenience method for getting + * {@link EnvironmentConfig#TXN_SERIALIZABLE_ISOLATION}. + * + * @return true if the environment has been configured to have repeatable + * read isolation. + * + * @see LockMode + */ + public boolean getTxnSerializableIsolation() { + + return DbConfigManager.getBooleanVal + (props, EnvironmentParams.TXN_SERIALIZABLE_ISOLATION); + } + + /** + * For unit testing, sets readCommitted as the default. + */ + void setTxnReadCommitted(boolean txnReadCommitted) { + + this.txnReadCommitted = txnReadCommitted; + } + + /** + * For unit testing, to set readCommitted as the default. + */ + boolean getTxnReadCommitted() { + + return txnReadCommitted; + } + + /** + * A convenience method for setting the + * {@link EnvironmentConfig#SHARED_CACHE} parameter. + * + * @param sharedCache If true, the shared cache is used by this + * environment. + * + * @return this + */ + public EnvironmentConfig setSharedCache(boolean sharedCache) { + + setSharedCacheVoid(sharedCache); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSharedCacheVoid(boolean sharedCache) { + + DbConfigManager.setBooleanVal + (props, EnvironmentParams.ENV_SHARED_CACHE, sharedCache, + validateParams); + } + + /** + * A convenience method for getting the + * {@link EnvironmentConfig#SHARED_CACHE} parameter. + * + * @return true if the shared cache is used by this environment. @see + * #setSharedCache + */ + public boolean getSharedCache() { + return DbConfigManager.getBooleanVal + (props, EnvironmentParams.ENV_SHARED_CACHE); + } + + /** + * Sets the user defined nodeName for the Environment. If set, exception + * messages, logging messages, and thread names will have this nodeName + * included in them. If a user has multiple Environments in a single JVM, + * setting this to a string unique to each Environment may make it easier + * to diagnose certain exception conditions as well as thread dumps. + * + * @return this + */ + public EnvironmentConfig setNodeName(String nodeName) { + setNodeNameVoid(nodeName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeNameVoid(String nodeName) { + this.nodeName = nodeName; + } + + /** + * Returns the user defined nodeName for the Environment. + */ + public String getNodeName() { + return nodeName; + } + + /** + * Sets the custom statistics object. + * + * @return this + */ + public EnvironmentConfig setCustomStats(CustomStats customStats) { + this.customStats = customStats; + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCustomStatsVoid(CustomStats customStats) { + this.customStats = customStats; + } + + /** + * Gets the custom statstics object. + * + * @return customStats + */ + public CustomStats getCustomStats() { + return customStats; + } + + /** + * Set a java.util.logging.Handler which will be used by all + * java.util.logging.Loggers instantiated by this Environment. This lets + * the application specify a handler which + *
          + *
        • requires a constructor with arguments
        • + *
        • is specific to this environment, which is important if the + * application is using multiple environments within the same process. + *
        + * Note that {@link Handler} is not serializable, and the logging + * handler should be set within the same process. + */ + public EnvironmentConfig setLoggingHandler(Handler handler) { + setLoggingHandlerVoid(handler); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLoggingHandlerVoid(Handler handler){ + loggingHandler = handler; + } + + /** + * Returns the custom java.util.logging.Handler specified by the + * application. + */ + public Handler getLoggingHandler() { + return loggingHandler; + } + + /* Documentation inherited from EnvironmentMutableConfig.setConfigParam. */ + @Override + public EnvironmentConfig setConfigParam(String paramName, String value) + throws IllegalArgumentException { + + DbConfigManager.setConfigParam(props, + paramName, + value, + false, /* requireMutablity */ + validateParams, + false /* forReplication */, + true /* verifyForReplication */); + return this; + } + + /** + * Configure the environment to make periodic calls to a ProgressListener to + * provide feedback on environment startup (recovery). The + * ProgressListener.progress() method is called at different stages of + * the recovery process. See {@link RecoveryProgress} for information about + * those stages. + *

        + * When using progress listeners, review the information at {@link + * ProgressListener#progress} to avoid any unintended disruption to + * environment startup. + * @param progressListener The ProgressListener to callback during + * environment startup (recovery). + */ + public EnvironmentConfig setRecoveryProgressListener + (final ProgressListener progressListener) { + setRecoveryProgressListenerVoid(progressListener); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setRecoveryProgressListenerVoid + (final ProgressListener progressListener) { + this.recoveryProgressListener = progressListener; + } + + /** + * Return the ProgressListener to be used at this environment startup. + */ + public ProgressListener getRecoveryProgressListener() { + return recoveryProgressListener; + } + + /** + * Configure the environment to use a specified ClassLoader for loading + * user-supplied classes by name. + */ + public EnvironmentConfig setClassLoader(final ClassLoader classLoader) { + setClassLoaderVoid(classLoader); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setClassLoaderVoid(final ClassLoader classLoader) { + this.classLoader = classLoader; + } + + /** + * Returns the ClassLoader for loading user-supplied classes by name, or + * null if no specified ClassLoader is configured. + */ + public ClassLoader getClassLoader() { + return classLoader; + } + + /** + * @hidden + * Configure the environment to use a specified PreloadConfig for + * duplicates database conversion. + */ + public EnvironmentConfig + setDupConvertPreloadConfig(final PreloadConfig preloadConfig) { + setDupConvertPreloadConfigVoid(preloadConfig); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void + setDupConvertPreloadConfigVoid(final PreloadConfig preloadConfig) { + this.dupConvertPreloadConfig = preloadConfig; + } + + /** + * @hidden + * Returns the PreloadConfig for duplicates database conversion, or + * null if no PreloadConfig is configured. + */ + public PreloadConfig getDupConvertPreloadConfig() { + return dupConvertPreloadConfig; + } + + /** + * For unit testing, to prevent use of the utilization profile DB. + */ + void setCreateUP(boolean createUP) { + this.createUP = createUP; + } + + /** + * For unit testing, to prevent use of the utilization profile DB. + */ + boolean getCreateUP() { + return createUP; + } + + /** + * For unit testing, to prevent use of the expiration profile DB. + */ + void setCreateEP(boolean createUP) { + this.createEP = createUP; + } + + /** + * For unit testing, to prevent use of the expiration profile DB. + */ + boolean getCreateEP() { + return createEP; + } + + /** + * For unit testing, to prevent writing utilization data during checkpoint. + */ + void setCheckpointUP(boolean checkpointUP) { + this.checkpointUP = checkpointUP; + } + + /** + * For unit testing, to prevent writing utilization data during checkpoint. + */ + boolean getCheckpointUP() { + return checkpointUP; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public EnvironmentConfig clone() { + return (EnvironmentConfig) super.clone(); + } + + /** + * Display configuration values. + */ + @Override + public String toString() { + return " nodeName=" + nodeName + + " allowCreate=" + allowCreate + + " recoveryProgressListener=" + + (recoveryProgressListener != null) + + " classLoader=" + (classLoader != null) + + " customStats=" + (customStats != null) + + super.toString(); + } +} diff --git a/src/com/sleepycat/je/EnvironmentConfigBeanInfo.java b/src/com/sleepycat/je/EnvironmentConfigBeanInfo.java new file mode 100644 index 0000000..d81a9c0 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentConfigBeanInfo.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class EnvironmentConfigBeanInfo + extends EnvironmentMutableConfigBeanInfo { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(EnvironmentConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(EnvironmentConfig.class); + } +} diff --git a/src/com/sleepycat/je/EnvironmentFailureException.java b/src/com/sleepycat/je/EnvironmentFailureException.java new file mode 100644 index 0000000..3cb7f86 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentFailureException.java @@ -0,0 +1,499 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.PrintStream; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Indicates that a failure has occurred that could impact the {@code + * Environment} as a whole. For failures that impact only the current + * operation and/or transaction, see {@link OperationFailureException}). For + * an overview of all exceptions thrown by JE, see {@link DatabaseException}. + * + *

        Depending on the nature of the failure, this exception may indicate that + * {@link Environment#close} must be called. The application should catch + * {@code EnvironmentFailureException} and then call {@link + * Environment#isValid}. If {@code false} is returned, all {@code Environment} + * handles (instances) must be closed and re-opened in order to run recovery + * and continue operating. If {@code true} is returned, the {@code + * Environment} can continue operating without being closed and re-opened. + * Also note that {@link Environment#isValid} may be called at any time, not + * just during exception handling.

        + * + *

        The use of the {@link Environment#isValid} method allows JE to determine + * dynamically whether the failure requires recovery or not, and allows for + * this determination to change in future releases. Over time, internal + * improvements to error handling may allow more error conditions to be handled + * without invalidating the {@code Environment}.

        + * + *

        (Although this exception class extends {@link RunRecoveryException}, it + * does not always indicate that recovery is necessary, as described above. + * {@code RunRecoveryException} has been deprecated and {@code + * EnvironmentFailureException} should be used instead.)

        + * + *

        If an {@code EnvironmentFailureException} consistently occurs soon after + * opening the Environment, this may indicate a persistent problem. It may + * indicate a system problem or a persistent storage problem. In this case, + * human intervention is normally required and restoring from a backup may be + * necessary.

        + * + *

        Note that subclasses of {@code EnvironmentFailureException} indicate how + * to handle the exception in more specific ways.

        + *
          + *
        • If {@code Thread.interrupt} is called for a thread performing JE + * operations, a {@link ThreadInterruptedException} is thrown. Since + * interrupting a thread is intentional, it does not indicate a persistent + * problem and human intervention is not normally required. + *
        • + *
        • If an {@code IOException} occurs while writing to the JE log, a + * {@link LogWriteException} is thrown. Although an {@code IOException} can + * occur for different reasons, it is a hint that the disk may be full and + * applications may wish to attempt recovery after making more disk space + * available. + *
        • + *
        • For replicated environments, see the subclasses of {@code + * EnvironmentFailureException} in the {@link com.sleepycat.je.rep} package for + * more information. Such exceptions may require special handling. + *
        • + *
        + * + *

        If {@link Environment#close} is not called after an {@code + * EnvironmentFailureException} invalidates the {@code Environment}, all + * subsequent method calls for the {@code Environment} will throw the same + * exception. This provides more than one opportunity to catch and handle the + * specific exception subclass that caused the failure.

        + */ +@SuppressWarnings("deprecation") +public class EnvironmentFailureException extends RunRecoveryException { + + /* + * Classes that extend EnvironmentFailureException should be aware that + * their constructors should not be seen as atomic. If the failure reason + * mandates it, the environment may be invalidated. At invalidation time, + * the exception is saved within the environment as the precipitating + * failure, and may be seen and used by other threads, and the sub class + * instance may be seen before construction is complete. The subclass + * should take care if it has any fields that are initialized in the + * constructor, after the call to super(). + * + * Any overloadings of getMessage() should also assume that they may be + * called asynchronously before the subclass if fully initialized. + */ + + private static final long serialVersionUID = 1; + + private volatile boolean alreadyThrown; + private EnvironmentFailureReason reason; + + /** + * Only used by makeJavaErrorWrapper. + */ + private EnvironmentFailureException(EnvironmentFailureReason reason) { + super(reason.toString()); + this.reason = reason; + } + + /** + * Only used by unexpectedState and unexpectedException. + */ + private EnvironmentFailureException(EnvironmentFailureReason reason, + String message, + Throwable cause) { + this(null /*envImpl*/, reason, message, cause); + } + + /** + * For internal use only. + * @hidden + */ + public EnvironmentFailureException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason) { + this(envImpl, reason, null /*message*/, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + public EnvironmentFailureException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + Throwable cause) { + this(envImpl, reason, null /*message*/, cause); + } + + /** + * For internal use only. + * @hidden + */ + public EnvironmentFailureException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + String message) { + this(envImpl, reason, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + public EnvironmentFailureException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + String message, + Throwable cause) { + super(makeMsg(envImpl, reason, message, cause), cause); + this.reason = reason; + + if (reason.invalidatesEnvironment()) { + /* + * If the environment exists, invalidate it. Note that there are + * cases, such as if an exception occurred during recovery, that + * the environment will not exist. + */ + if (envImpl == null) { + if (reason.envShouldExist()) { + assert envImpl != null; + } + } else { + envImpl.invalidate(this); + } + } + } + + private static String makeMsg(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + String message, + Throwable cause) { + StringBuilder s = new StringBuilder(300); + if (envImpl != null) { + s.append(envImpl.getName()).append(" "); + } + + if (message != null) { + s.append(message); + s.append(' '); + } else if (cause != null) { + s.append(cause.toString()); + s.append(' '); + } + assert reason != null; + s.append(reason); + + /* + * Use the current environment status for reporting in the exception + * message. This is more information than simply whether this + * exception caused an invalidate, since previous exceptions may have + * occurred. + */ + if (reason.invalidatesEnvironment() || + (envImpl != null && !envImpl.isValid())) { + s.append(" Environment is invalid and must be closed."); + } + return s.toString(); + } + + /** + * For internal use only. + * @hidden + * Only for use by wrapSelf methods. + */ + protected EnvironmentFailureException(String message, + EnvironmentFailureException cause) { + super(message, cause); + assert cause != null; + reason = cause.reason; + } + + /** + * For internal use only. + * @hidden + * Must be overridden by every concrete subclass to return an instance of + * its own class, constructed with the given msg and this exception as + * parameters, e.g.: return new MyClass(msg, this); + */ + public EnvironmentFailureException wrapSelf(String msg) { + assert EnvironmentFailureException.class == this.getClass() : + "Missing overriding " + this.getClass().getName() + + ".wrapSelf() method"; + return new EnvironmentFailureException(msg, this); + } + + /** + * For internal use only. + * @hidden + * Remember that this was already thrown. That way, if we re-throw it + * because another handle uses the environment after the fatal throw, the + * message is more clear. + */ + public void setAlreadyThrown(boolean alreadyThrown) { + this.alreadyThrown = alreadyThrown; + } + + @Override + public String getMessage() { + + /* + * Don't allocate memory after a Java Error occurs. Note that for + * a Java Error, addErrorMessage is never called, so super.getMessage + * will not allocate memory either. + */ + if (reason == EnvironmentFailureReason.JAVA_ERROR || !alreadyThrown) { + return super.getMessage(); + } + + return "Environment invalid because of previous exception: " + + super.getMessage(); + } + + /** + * For internal use only. + * @hidden + */ + public EnvironmentFailureReason getReason() { + return reason; + } + + /** + * Whether the EnvironmentFailureException indicates that the log is + * corrupt, meaning that a network restore (or restore from backup) should + * be performed. + *

        + * This method currently returns true only when corruption has been + * detected and is persistent. This may have been detected by verifying + * checksums in the disk data log, and in this case the corruption + * indicates a media/disk failure. The checksum error may have + * been detected when accessing data normally via the JE API, or by the + * background data verifier (see {@link EnvironmentConfig#VERIFY_LOG}). + * Or a persistent Btree corruption may have been detected by the data + * verifier (see {@link EnvironmentConfig#VERIFY_BTREE}) or by the + * {@link Environment#verify(VerifyConfig, PrintStream)} or + * {@link Database#verify(VerifyConfig)} methods. This method will + * returns true in all such cases. + *

        + * Additionally, when a persistent corruption is detected and the + * Environment is open for read-write access, a marker file named + * 7fffffff.jdb is created in the Environment directory that will + * prevent re-opening the environment. If an attempt is made to + * re-open the Environment, the original EnvironmentFailureException + * will be thrown. This is meant to safeguard against using a corrupt + * environment when the original exception is accidentally overlooked. + * While the marker file can be deleted to allow re-opening the + * environment, this is normally unsafe and is not recommended. + * + * @return true if the environment is corrupt. + * + * @since 7.3 + */ + public boolean isCorrupted() { + return reason == EnvironmentFailureReason.LOG_CHECKSUM || + reason == EnvironmentFailureReason.BTREE_CORRUPTION; + } + + /** + * For internal use only. + * @hidden + * + * May ONLY be used for EnvironmentImpl.preallocatedEFE. + */ + public static EnvironmentFailureException makeJavaErrorWrapper() { + return new EnvironmentFailureException( + EnvironmentFailureReason.JAVA_ERROR); + } + + /** + * For internal use only. + * @hidden + * + * Promotes the given cause exception and message to an + * EnvironmentFailureException. + * + * If the cause is not an EnvironmentFailureException, wraps the cause + * exception in an EnvironmentFailureException along with the message. If + * the cause is an EnvironmentFailureException, adds the message to it. + * + * @return the resulting EnvironmentFailureException. + */ + public static EnvironmentFailureException + promote(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + String message, + Throwable cause) { + if (cause instanceof EnvironmentFailureException) { + EnvironmentFailureException e = + (EnvironmentFailureException) cause; + e.addErrorMessage(message); + return e; + } + + return new EnvironmentFailureException + (envImpl, reason, message, cause); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that an unexpected exception was caught + * internally. Used in place of an assert, when an exception is preferred. + * Used when the Environment should *not* be invalidated. + */ + public static EnvironmentFailureException + unexpectedException(Exception cause) { + return new EnvironmentFailureException + (EnvironmentFailureReason.UNEXPECTED_EXCEPTION, + null /*message*/, cause); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that an unexpected exception was caught + * internally. Used in place of an assert, when an exception is preferred. + * Used when the Environment *should* be invalidated. + */ + public static EnvironmentFailureException + unexpectedException(EnvironmentImpl envImpl, Exception cause) { + return new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNEXPECTED_EXCEPTION_FATAL, + null /*message*/, cause); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that an unexpected exception was caught + * internally. Used in place of an assert, when an exception is preferred. + * Used when the Environment should *not* be invalidated. + */ + public static EnvironmentFailureException + unexpectedException(String message, Exception cause) { + return new EnvironmentFailureException + (EnvironmentFailureReason.UNEXPECTED_EXCEPTION, message, + cause); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that an unexpected exception was caught + * internally. Used in place of an assert, when an exception is preferred. + * Used when the Environment *should* be invalidated. + */ + public static EnvironmentFailureException + unexpectedException(EnvironmentImpl envImpl, + String message, + Exception cause) { + return new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNEXPECTED_EXCEPTION_FATAL, + message, cause); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that unexpected internal state was + * detected. Used in place of an assert, when an exception is preferred. + * Used when the Environment should *not* be invalidated. + */ + public static EnvironmentFailureException unexpectedState() { + return new EnvironmentFailureException + (EnvironmentFailureReason.UNEXPECTED_STATE, + null /*message*/, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that unexpected internal state was + * detected. Used in place of an assert, when an exception is preferred. + * Used when the Environment *should* be invalidated. + */ + public static EnvironmentFailureException + unexpectedState(EnvironmentImpl envImpl) { + return new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNEXPECTED_STATE_FATAL, + null /*message*/, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that unexpected internal state was + * detected. Used in place of an assert, when an exception is preferred. + * Used when the Environment should *not* be invalidated. + */ + public static EnvironmentFailureException unexpectedState(String message) { + return new EnvironmentFailureException + (EnvironmentFailureReason.UNEXPECTED_STATE, message, + null /*cause*/); + } + + /** + * For internal use only. + * @hidden + * + * Creates an exception indicating that unexpected internal state was + * detected. Used in place of an assert, when an exception is preferred. + * Used when the Environment *should* be invalidated. + */ + public static EnvironmentFailureException + unexpectedState(EnvironmentImpl envImpl, String message) { + return new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNEXPECTED_STATE_FATAL, + message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + * + * Convenience method that throws an UNEXPECTED_STATE exception (non-fatal) + * if the given condition is false. + */ + public static void assertState(boolean cond) { + + /* + * Implementation in assertState(boolean,String) is repeated to reduce + * assertion overhead. + */ + if (!cond) { + throw unexpectedState((String) null); + } + } + + /** + * For internal use only. + * @hidden + * + * Convenience method that throws an UNEXPECTED_STATE exception (non-fatal) + * if the given condition is false. + */ + public static void assertState(boolean cond, String message) { + + /* + * Implementation in assertState(boolean) is repeated to reduce + * assertion overhead. + */ + if (!cond) { + throw unexpectedState(message); + } + } +} diff --git a/src/com/sleepycat/je/EnvironmentLockedException.java b/src/com/sleepycat/je/EnvironmentLockedException.java new file mode 100644 index 0000000..587b69a --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentLockedException.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown by the {@link Environment} constructor when an environment cannot be + * opened for write access because another process has the same environment + * open for write access. + * + *

        Warning: This exception should be handled when an + * environment is opened by more than one process.

        + */ +public class EnvironmentLockedException extends EnvironmentFailureException { + + private static final long serialVersionUID = 629594964L; + + /** + * For internal use only. + * @hidden + */ + public EnvironmentLockedException(EnvironmentImpl envImpl, + String message) { + super(envImpl, EnvironmentFailureReason.ENV_LOCKED, message); + } + + /** + * For internal use only. + * @hidden + */ + private EnvironmentLockedException(String message, + EnvironmentLockedException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new EnvironmentLockedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/EnvironmentMutableConfig.java b/src/com/sleepycat/je/EnvironmentMutableConfig.java new file mode 100644 index 0000000..c3b6870 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentMutableConfig.java @@ -0,0 +1,770 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.Properties; + +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Specifies the environment attributes that may be changed after the + * environment has been opened. EnvironmentMutableConfig is a parameter to + * {@link Environment#setMutableConfig} and is returned by {@link + * Environment#getMutableConfig}. + * + *

        There are two types of mutable environment properties: per-environment + * handle properties, and environment wide properties.

        + * + *

        Per-Environment Handle Properties

        + * + *

        Per-environment handle properties apply only to a single Environment + * instance. For example, to change the default transaction commit behavior + * for a single environment handle, do this:

        + * + *
        + *     // Specify no-sync behavior for a given handle.
        + *     EnvironmentMutableConfig mutableConfig = env.getMutableConfig();
        + *     mutableConfig.setDurability(Durability.COMMIT_NO_SYNC);
        + *     env.setMutableConfig(mutableConfig);
        + * 
        + * + *

        The per-environment handle properties are listed below. These properties + * are accessed using the setter and getter methods listed, as shown in the + * example above.

        + * + *
          + *
        • {@link #setDurability}, {@link #getDurability}
        • + *
        • {@link #setTxnNoSync}, {@link #getTxnNoSync} deprecated
        • + *
        • {@link #setTxnWriteNoSync}, {@link #getTxnWriteNoSync} deprecated
        • + *
        + * + *

        Environment-Wide Mutable Properties

        + * + *

        Environment-wide mutable properties are those that can be changed for an + * environment as a whole, irrespective of which environment instance (for the + * same physical environment) is used. For example, to stop the cleaner daemon + * thread, do this:

        + * + *
        + *     // Stop the cleaner daemon threads for the environment.
        + *     EnvironmentMutableConfig mutableConfig = env.getMutableConfig();
        + *     mutableConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false");
        + *     env.setMutableConfig(mutableConfig);
        + * 
        + * + *

        The environment-wide mutable properties are documented as such for each + * EnvironmentConfig String constant.

        + * + *

        Getting the Current Environment Properties

        + * + * To get the current "live" properties of an environment after constructing it + * or changing its properties, you must call {@link Environment#getConfig} or + * {@link Environment#getMutableConfig}. The original EnvironmentConfig or + * EnvironmentMutableConfig object used to set the properties is not kept up to + * date as properties are changed, and does not reflect property validation or + * properties that are computed. + * + * @see EnvironmentConfig + */ +public class EnvironmentMutableConfig implements Cloneable, Serializable { + private static final long serialVersionUID = 1L; + + /* + * Change copyHandlePropsTo and Environment.copyToHandleConfig when adding + * fields here. + */ + private boolean txnNoSync = false; + private boolean txnWriteNoSync = false; + + /** + * Cache size is a category of property that is calculated within the + * environment. It is only supplied when returning the cache size to the + * application and never used internally; internal code directly checks + * with the MemoryBudget class; + */ + private long cacheSize; + + private long offHeapCacheSize; + + /** + * Note that in the implementation we choose not to extend Properties in + * order to keep the configuration type safe. + */ + Properties props; + + /** + * For unit testing, to prevent loading of je.properties. + */ + private transient boolean loadPropertyFile = true; + + /** + * Internal boolean that says whether or not to validate params. Setting + * it to false means that parameter value validatation won't be performed + * during setVal() calls. Only should be set to false by unit tests using + * DbInternal. + */ + transient boolean validateParams = true; + + private transient ExceptionListener exceptionListener = null; + private CacheMode cacheMode; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public EnvironmentMutableConfig() { + props = new Properties(); + } + + /** + * Used by EnvironmentConfig to construct from properties. + */ + EnvironmentMutableConfig(Properties properties) + throws IllegalArgumentException { + + DbConfigManager.validateProperties(properties, + false, // isRepConfigInstance + getClass().getName()); + /* For safety, copy the passed in properties. */ + props = new Properties(); + props.putAll(properties); + } + + /** + * Configures the database environment for asynchronous transactions. + * + * @param noSync If true, do not write or synchronously flush the log on + * transaction commit. This means that transactions exhibit the ACI + * (Atomicity, Consistency, and Isolation) properties, but not D + * (Durability); that is, database integrity is maintained, but if the JVM + * or operating system fails, it is possible some number of the most + * recently committed transactions may be undone during recovery. The + * number of transactions at risk is governed by how many updates fit into + * a log buffer, how often the operating system flushes dirty buffers to + * disk, and how often the database environment is checkpointed. + * + *

        This attribute is false by default for this class and for the + * database environment.

        + * + * @deprecated replaced by {@link #setDurability} + */ + public EnvironmentMutableConfig setTxnNoSync(boolean noSync) { + setTxnNoSyncVoid(noSync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTxnNoSyncVoid(boolean noSync) { + TransactionConfig.checkMixedMode + (false, noSync, txnWriteNoSync, getDurability()); + txnNoSync = noSync; + } + + /** + * Returns true if the database environment is configured for asynchronous + * transactions. + * + * @return true if the database environment is configured for asynchronous + * transactions. + * + * @deprecated replaced by {@link #getDurability} + */ + public boolean getTxnNoSync() { + return txnNoSync; + } + + /** + * Configures the database environment for transactions which write but do + * not flush the log. + * + * @param writeNoSync If true, write but do not synchronously flush the log + * on transaction commit. This means that transactions exhibit the ACI + * (Atomicity, Consistency, and Isolation) properties, but not D + * (Durability); that is, database integrity is maintained, but if the + * operating system fails, it is possible some number of the most recently + * committed transactions may be undone during recovery. The number of + * transactions at risk is governed by how often the operating system + * flushes dirty buffers to disk, and how often the database environment is + * checkpointed. + * + *

        The motivation for this attribute is to provide a transaction that + * has more durability than asynchronous (nosync) transactions, but has + * higher performance than synchronous transactions.

        + * + *

        This attribute is false by default for this class and for the + * database environment.

        + * + * @deprecated replaced by {@link #setDurability} + */ + public EnvironmentMutableConfig setTxnWriteNoSync(boolean writeNoSync) { + setTxnWriteNoSyncVoid(writeNoSync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setTxnWriteNoSyncVoid(boolean writeNoSync) { + TransactionConfig.checkMixedMode + (false, txnNoSync, writeNoSync, getDurability()); + txnWriteNoSync = writeNoSync; + } + + /** + * Returns true if the database environment is configured for transactions + * which write but do not flush the log. + * + * @return true if the database environment is configured for transactions + * which write but do not flush the log. + * + * @deprecated replaced by {@link #getDurability} + */ + public boolean getTxnWriteNoSync() { + return txnWriteNoSync; + } + + /** + * Convenience method for setting {@link EnvironmentConfig#TXN_DURABILITY}. + * + * @param durability the new durability definition + * + * @return this + * + * @see Durability + */ + public EnvironmentMutableConfig setDurability(Durability durability) { + setDurabilityVoid(durability); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDurabilityVoid(Durability durability) { + TransactionConfig.checkMixedMode + (false, txnNoSync, txnWriteNoSync, durability); + + if (durability == null) { + props.remove(EnvironmentParams.JE_DURABILITY); + } else { + DbConfigManager.setVal(props, EnvironmentParams.JE_DURABILITY, + durability.toString(), + validateParams); + } + } + + /** + * Convenience method for setting {@link EnvironmentConfig#TXN_DURABILITY}. + * + * @return the durability setting currently associated with this config. + */ + public Durability getDurability() { + String value = DbConfigManager.getVal(props, + EnvironmentParams.JE_DURABILITY); + return Durability.parse(value); + } + + /** + * A convenience method for setting {@link EnvironmentConfig#MAX_MEMORY}. + * + * @param totalBytes The memory available to the database system, in bytes. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * + * @return this + * + * @see EnvironmentConfig#MAX_MEMORY + */ + public EnvironmentMutableConfig setCacheSize(long totalBytes) + throws IllegalArgumentException { + + setCacheSizeVoid(totalBytes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCacheSizeVoid(long totalBytes) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, EnvironmentParams.MAX_MEMORY, + Long.toString(totalBytes), validateParams); + } + + /** + * Returns the memory available to the database system, in bytes. A valid + * value is only available if this EnvironmentConfig object has been + * returned from Environment.getConfig(). + * + * @return The memory available to the database system, in bytes. + */ + public long getCacheSize() { + + /* + * CacheSize is filled in from the EnvironmentImpl by way of + * fillInEnvironmentGeneratedProps. + */ + return cacheSize; + } + + /** + * A convenience method for setting {@link + * EnvironmentConfig#MAX_MEMORY_PERCENT}. + * + * @param percent The percent of JVM memory to allocate to the JE cache. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * + * @return this + * + * @see EnvironmentConfig#MAX_MEMORY_PERCENT + */ + public EnvironmentMutableConfig setCachePercent(int percent) + throws IllegalArgumentException { + + setCachePercentVoid(percent); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCachePercentVoid(int percent) + throws IllegalArgumentException { + + DbConfigManager.setIntVal(props, EnvironmentParams.MAX_MEMORY_PERCENT, + percent, validateParams); + } + + /** + * A convenience method for getting {@link + * EnvironmentConfig#MAX_MEMORY_PERCENT}. + * + * @return the percentage value used in the JE cache size calculation. + */ + public int getCachePercent() { + + return DbConfigManager.getIntVal(props, + EnvironmentParams.MAX_MEMORY_PERCENT); + } + + /** + * A convenience method for setting + * {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}. + */ + public EnvironmentMutableConfig setOffHeapCacheSize(long totalBytes) + throws IllegalArgumentException { + + setOffHeapCacheSizeVoid(totalBytes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setOffHeapCacheSizeVoid(long totalBytes) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, EnvironmentParams.MAX_OFF_HEAP_MEMORY, + Long.toString(totalBytes), validateParams); + } + + /** + * A convenience method for getting + * {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}. + */ + public long getOffHeapCacheSize() { + + /* + * CacheSize is filled in from the EnvironmentImpl by way of + * fillInEnvironmentGeneratedProps. + */ + return offHeapCacheSize; + } + + /** + * A convenience method for setting {@link EnvironmentConfig#MAX_DISK}. + * + * @param totalBytes is an upper limit on the number of bytes used for + * data storage, or zero if no limit is desired. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * + * @return this + * + * @see EnvironmentConfig#MAX_DISK + */ + public EnvironmentMutableConfig setMaxDisk(long totalBytes) + throws IllegalArgumentException { + + setMaxDiskVoid(totalBytes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxDiskVoid(long totalBytes) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, EnvironmentParams.MAX_DISK, + Long.toString(totalBytes), validateParams); + } + + /** + * A convenience method for getting {@link EnvironmentConfig#MAX_DISK}. + * + * @return the upper limit on the number of bytes used for data storage, + * or zero if no limit is set. + * + * @see EnvironmentConfig#MAX_DISK + */ + public long getMaxDisk() { + + return DbConfigManager.getLongVal(props, EnvironmentParams.MAX_DISK); + } + + /** + * Sets the exception listener for an Environment. The listener is called + * when a daemon thread throws an exception, in order to provide a + * notification mechanism for these otherwise asynchronous exceptions. + * Daemon thread exceptions are also printed through stderr. + *

        + * Not all daemon exceptions are fatal, and the application bears + * responsibility for choosing how to respond to the notification. Since + * exceptions may repeat, the application should also choose how to handle + * a spate of exceptions. For example, the application may choose to act + * upon each notification, or it may choose to batch up its responses + * by implementing the listener so it stores exceptions, and only acts + * when a certain number have been received. + * @param exceptionListener the callback to be executed when an exception + * occurs. + * + * @return this + */ + public EnvironmentMutableConfig + setExceptionListener(ExceptionListener exceptionListener) { + + setExceptionListenerVoid(exceptionListener); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setExceptionListenerVoid(ExceptionListener exceptionListener) { + this.exceptionListener = exceptionListener; + } + + /** + * Returns the exception listener, if set. + */ + public ExceptionListener getExceptionListener() { + return exceptionListener; + } + + /** + * Sets the default {@code CacheMode} used for operations performed in this + * environment. The default cache mode may be overridden on a per-database + * basis using {@link DatabaseConfig#setCacheMode}, and on a per-record or + * per-operation basis using {@link Cursor#setCacheMode}, {@link + * ReadOptions#setCacheMode(CacheMode)} or {@link + * WriteOptions#setCacheMode(CacheMode)}. + * + * @param cacheMode is the default {@code CacheMode} used for operations + * performed in this environment. If {@code null} is specified, {@link + * CacheMode#DEFAULT} will be used. + * + * @see CacheMode for further details. + * + * @since 4.0.97 + */ + public EnvironmentMutableConfig setCacheMode(final CacheMode cacheMode) { + setCacheModeVoid(cacheMode); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCacheModeVoid(final CacheMode cacheMode) { + this.cacheMode = cacheMode; + } + + /** + * Returns the default {@code CacheMode} used for operations performed in + * this environment, or null if {@link CacheMode#DEFAULT} is used. + * + * @return the default {@code CacheMode} used for operations performed on + * this database, or null if {@link CacheMode#DEFAULT} is used. + * + * @see #setCacheMode + * + * @since 4.0.97 + */ + public CacheMode getCacheMode() { + return cacheMode; + } + + /** + * Set this configuration parameter. First validate the value specified for + * the configuration parameter; if it is valid, the value is set in the + * configuration. + * + * @param paramName the configuration parameter name, one of the String + * constants in this class + * + * @param value The configuration value + * + * @return this + * + * @throws IllegalArgumentException if the paramName or value is invalid. + */ + public EnvironmentMutableConfig setConfigParam(String paramName, + String value) + throws IllegalArgumentException { + + DbConfigManager.setConfigParam(props, + paramName, + value, + true, /* require mutability. */ + validateParams, + false /* forReplication */, + true /* verifyForReplication */); + return this; + } + + /** + * Returns the value for this configuration parameter. + * + * @param paramName a valid configuration parameter, one of the String + * constants in this class. + * @return the configuration value. + * @throws IllegalArgumentException if the paramName is invalid. + */ + public String getConfigParam(String paramName) + throws IllegalArgumentException { + + return DbConfigManager.getConfigParam(props, paramName); + } + + /** + * @hidden + * For internal use only. + */ + public boolean isConfigParamSet(String paramName) { + return props.containsKey(paramName); + } + + /* + * Helpers + */ + void setValidateParams(boolean validateParams) { + this.validateParams = validateParams; + } + + /** + * @hidden + * Used by unit tests. + */ + boolean getValidateParams() { + return validateParams; + } + + /** + * Checks that the immutable values in the environment config used to open + * an environment match those in the config object saved by the underlying + * shared EnvironmentImpl. + * @param handleConfigProps are the config property values that were + * specified by configuration object from the Environment. + */ + void checkImmutablePropsForEquality(Properties handleConfigProps) + throws IllegalArgumentException { + + Iterator iter = + EnvironmentParams.SUPPORTED_PARAMS.keySet().iterator(); + while (iter.hasNext()) { + String paramName = iter.next(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + assert param != null; + if (!param.isMutable() && !param.isForReplication()) { + String paramVal = props.getProperty(paramName); + String useParamVal = handleConfigProps.getProperty(paramName); + if ((paramVal != null) ? + (!paramVal.equals(useParamVal)) : + (useParamVal != null)) { + throw new IllegalArgumentException + (paramName + " is set to " + + useParamVal + + " in the config parameter" + + " which is incompatible" + + " with the value of " + + paramVal + " in the" + + " underlying environment"); + } + } + } + } + + /** + * @hidden + * For internal use only. + * Overrides Object.clone() to clone all properties, used by this class and + * EnvironmentConfig. + */ + @Override + protected EnvironmentMutableConfig clone() { + + try { + EnvironmentMutableConfig copy = + (EnvironmentMutableConfig) super.clone(); + copy.props = (Properties) props.clone(); + return copy; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Used by Environment to create a copy of the application supplied + * configuration. Done this way to provide non-public cloning. + */ + EnvironmentMutableConfig cloneMutableConfig() { + EnvironmentMutableConfig copy = (EnvironmentMutableConfig) clone(); + /* Remove all immutable properties. */ + copy.clearImmutableProps(); + return copy; + } + + /** + * Copies the per-handle properties of this object to the given config + * object. + */ + void copyHandlePropsTo(EnvironmentMutableConfig other) { + other.txnNoSync = txnNoSync; + other.txnWriteNoSync = txnWriteNoSync; + other.setDurability(getDurability()); + } + + /** + * Copies all mutable props to the given config object. + * Unchecked suppress here because Properties don't play well with + * generics in Java 1.5 + */ + @SuppressWarnings("unchecked") + void copyMutablePropsTo(EnvironmentMutableConfig toConfig) { + + Properties toProps = toConfig.props; + Enumeration propNames = props.propertyNames(); + while (propNames.hasMoreElements()) { + String paramName = (String) propNames.nextElement(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + assert param != null; + if (param.isMutable()) { + String newVal = props.getProperty(paramName); + toProps.setProperty(paramName, newVal); + } + } + toConfig.exceptionListener = this.exceptionListener; + toConfig.cacheMode = this.cacheMode; + } + + /** + * Fills in the properties calculated by the environment to the given + * config object. + */ + void fillInEnvironmentGeneratedProps(EnvironmentImpl envImpl) { + cacheSize = envImpl.getMemoryBudget().getMaxMemory(); + offHeapCacheSize = envImpl.getOffHeapCache().getMaxMemory(); + } + + /** + * Removes all immutable props. + * Unchecked suppress here because Properties don't play well with + * generics in Java 1.5 + */ + @SuppressWarnings("unchecked") + private void clearImmutableProps() { + Enumeration propNames = props.propertyNames(); + while (propNames.hasMoreElements()) { + String paramName = (String) propNames.nextElement(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + assert param != null; + if (!param.isMutable()) { + props.remove(paramName); + } + } + } + + Properties getProps() { + return props; + } + + /** + * For unit testing, to prevent loading of je.properties. + */ + void setLoadPropertyFile(boolean loadPropertyFile) { + this.loadPropertyFile = loadPropertyFile; + } + + /** + * For unit testing, to prevent loading of je.properties. + */ + boolean getLoadPropertyFile() { + return loadPropertyFile; + } + + /** + * Testing support + * @hidden + */ + public int getNumExplicitlySetParams() { + return props.size(); + } + + /** + * Display configuration values. + */ + @Override + public String toString() { + return " cacheSize=" + cacheSize + + " offHeapCacheSize=" + offHeapCacheSize + + " cacheMode=" + cacheMode + + " txnNoSync=" + txnNoSync + + " txnWriteNoSync=" + txnWriteNoSync + + " exceptionListener=" + (exceptionListener != null) + + " map=" + props.toString(); + } +} diff --git a/src/com/sleepycat/je/EnvironmentMutableConfigBeanInfo.java b/src/com/sleepycat/je/EnvironmentMutableConfigBeanInfo.java new file mode 100644 index 0000000..169e3ac --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentMutableConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class EnvironmentMutableConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(EnvironmentMutableConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(EnvironmentMutableConfig.class); + } +} diff --git a/src/com/sleepycat/je/EnvironmentNotFoundException.java b/src/com/sleepycat/je/EnvironmentNotFoundException.java new file mode 100644 index 0000000..13b2bb1 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentNotFoundException.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown by the {@link Environment} constructor when {@code EnvironmentConfig + * AllowCreate} property is false (environment creation is not permitted), but + * there are no log files in the environment directory. + * + * @since 4.0 + */ +public class EnvironmentNotFoundException extends EnvironmentFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public EnvironmentNotFoundException(EnvironmentImpl envImpl, + String message) { + super(envImpl, EnvironmentFailureReason.ENV_NOT_FOUND, message); + } + + /** + * For internal use only. + * @hidden + */ + private EnvironmentNotFoundException(String message, + EnvironmentNotFoundException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new EnvironmentNotFoundException(msg, this); + } +} diff --git a/src/com/sleepycat/je/EnvironmentStats.java b/src/com/sleepycat/je/EnvironmentStats.java new file mode 100644 index 0000000..50ac09d --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentStats.java @@ -0,0 +1,4280 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ACTIVE_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_AVAILABLE_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_CLUSTER_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DELETIONS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DISK_READS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ENTRIES_READ; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNQUEUE_HITS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_EXPIRED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_LOCKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MARKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MARKED_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MAX_UTILIZATION; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MIN_UTILIZATION; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_LOCKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LN_QUEUE_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE_MAP; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_REPEAT_ITERATOR_READS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RESERVED_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_REVISAL_RUNS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RUNS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TOTAL_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TO_BE_CLEANED_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TWO_PASS_RUNS; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_DELETES; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_GETS; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_INSERTS; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_UPDATES; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_CREATION_TIME; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_RELATCHES_REQUIRED; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_ADMIN_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DATA_ADMIN_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DATA_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DOS_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_LOCK_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_SHARED_CACHE_TOTAL_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_TOTAL_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_POSITION; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_UPDATE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_DELETE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_INSERT; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_POSITION; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_UPDATE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_BLIND_OPS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS_RATIO; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_BINS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_BIN_DELTAS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_COMPACT_KEY; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_NO_TARGET; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_SPARSE_TARGET; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_UPPER_INS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_DIRTY_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_EVICTION_RUNS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_LNS_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MOVED_TO_PRI2_LRU; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MUTATED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_PUT_BACK; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_SKIPPED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_STRIPPED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_TARGETED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_ROOT_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_SHARED_CACHE_ENVS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.FULL_BIN_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI1_LRU_SIZE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI2_LRU_SIZE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.THREAD_UNAVAILABLE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH_MISS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_CURSORS_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_DBCLOSED_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_NON_EMPTY_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_PROCESSED_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_QUEUE_SIZE; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_SPLIT_BINS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_CONTENTION; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_SUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_UNSUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NO_WAITERS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_RELEASES; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_SELF_OWNED; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_READ_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FILE_OPENS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_LOG_FSYNCS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_OPEN_FILES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READ_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITE_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_READS_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READ_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITE_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITES_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNCS; +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNC_REQUESTS; +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_TIMEOUTS; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_FSYNC_MAX_TIME; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_FSYNC_TIME; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_BUFFER_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_LOG_BUFFERS; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_MISS; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_NOT_RESIDENT; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_END_OF_LOG; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_REPEAT_FAULT_READS; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_TEMP_BUFFER_WRITES; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_CHECKPOINTS; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_DELTA_IN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_BIN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_IN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPTID; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_END; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_INTERVAL; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_START; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_OWNERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_REQUESTS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_TOTAL; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.je.cleaner.CleanerStatDefinition; +import com.sleepycat.je.dbi.DbiStatDefinition; +import com.sleepycat.je.evictor.Evictor.EvictionSource; +import com.sleepycat.je.evictor.EvictorStatDefinition; +import com.sleepycat.je.evictor.OffHeapStatDefinition; +import com.sleepycat.je.incomp.INCompStatDefinition; +import com.sleepycat.je.log.LogStatDefinition; +import com.sleepycat.je.recovery.CheckpointStatDefinition; +import com.sleepycat.je.txn.LockStatDefinition; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.util.DbCacheSize; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Statistics for a single environment. Statistics provide indicators for + * system monitoring and performance tuning. + * + *

        Each statistic has a name and a getter method in this class. For example, + * the {@code cacheTotalBytes} stat is returned by the {@link + * #getCacheTotalBytes()} method. Statistics are categorized into several + * groups, for example, {@code cacheTotalBytes} is in the {@code Cache} + * group. Each stat and group has a name and a description.

        + * + *

        Viewing the statistics through {@link #toString()} shows the stat names + * and values organized by group. Viewing the stats with {@link + * #toStringVerbose()} additionally shows the description of each stat and + * group.

        + * + *

        Statistics are periodically output in CSV format to the je.stat.csv file + * (see {@link EnvironmentConfig#STATS_COLLECT}). The column header in the .csv + * file has {@code group:stat} format, where 'group' is the group name and + * 'stat' is the stat name. In Oracle NoSQL DB, in the addition to the .csv + * file, JE stats are output in the .stat files.

        + * + *

        Stat values may also be obtained via JMX using the {@link JEMonitor mbean}. + * In Oracle NoSQL DB, JE stats are obtained via a different JMX interface in + * JSON format. The JSON format uses property names of the form {@code + * group_stat} where 'group' is the group name and 'stat' is the stat name.

        + * + *

        The stat groups are listed below. Each group name links to a summary of + * the statistics in the group.

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Group NameDescription
        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.log.LogStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} + * {@value + * com.sleepycat.je.txn.LockStatDefinition#GROUP_DESC} + *
        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} + * {@value + * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_DESC} + *
        + * + *

        The following sections describe each group of stats along with some + * common strategies for using them for monitoring and performance tuning.

        + * + *

        Cache Statistics

        + * + *

        Group Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Description: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_DESC}

        + * + *

        Group Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Description: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_DESC}

        + * + *

        The JE cache consists of the main (in-heap) cache and and optional + * off-heap cache. The vast majority of the cache is occupied by Btree nodes, + * including internal nodes (INs) and leaf nodes (LNs). INs contain record keys + * while LNs contain record data.

        + * + *

        Each IN refers to a configured maximum number of child nodes ({@link + * EnvironmentConfig#NODE_MAX_ENTRIES}). The INs form a Btree of at least 2 + * levels. With a large data set the Btree will normally have 4 or 5 levels. + * The top level is a single node, the root IN. Levels are numbered from the + * bottom up, starting with level 1 for bottom level INs (BINs). Levels are + * added at the top when the root IN splits.

        + * + *

        When an off-heap cache is configured, it serves as an overflow for the + * main cache. See {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}.

        + * + *

        Cache Statistics: Sizing

        + * + *

        Operation performance is often directly proportional to how much of the + * active data set is cached. BINs and LNs form the vast majority of the cache. + * Caching of BINs and LNs have different performance impacts, and behavior + * varies depending on whether an off-heap cache is configured and which {@link + * CacheMode} is used.

        + * + *

        Main cache current usage is indicated by the following stats. Note that + * there is currently no stat for the number of LNs in the main cache.

        + *
          + *
        • {@link #getCacheTotalBytes()}
        • + *
        • {@link #getNCachedBINs()}
        • + *
        • {@link #getNCachedBINDeltas()}
        • + *
        • {@link #getNCachedUpperINs()}
        • + *
        + * + *

        Off-heap cache current usage is indicated by:

        + *
          + *
        • {@link #getOffHeapTotalBytes()}
        • + *
        • {@link #getOffHeapCachedLNs()}
        • + *
        • {@link #getOffHeapCachedBINs()}
        • + *
        • {@link #getOffHeapCachedBINDeltas()}
        • + *
        + *

        + * + *

        A cache miss is considered a miss only when the object is not found in + * either cache. Misses often result in file I/O and are a primary indicator + * of cache performance. Fetches (access requests) and misses are indicated + * by:

        + *
          + *
        • {@link #getNLNsFetch()}
        • + *
        • {@link #getNLNsFetchMiss()}
        • + *
        • {@link #getNBINsFetch()}
        • + *
        • {@link #getNBINsFetchMiss()}
        • + *
        • {@link #getNBINDeltasFetchMiss()}
        • + *
        • {@link #getNFullBINsMiss()}
        • + *
        • {@link #getNUpperINsFetch()}
        • + *
        • {@link #getNUpperINsFetchMiss()}
        • + *
        + * + *

        When the number of LN misses ({@code nLNsFetchMiss}) or the number of + * BIN misses ({@code nBINsFetchMiss + nFullBINsMiss}) are significant, the + * JE cache may be undersized, as discussed below. But note that it is not + * practical to correlate the number of fetches and misses directly to + * application operations, because LNs are sometimes + * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}, BINs are sometimes + * accessed multiple times per operation, and internal Btree accesses are + * included in the stat values.

        + * + *

        Ideally, all BINs and LNs for the active data set should fit in cache so + * that operations do not result in fetch misses, which often perform random + * read I/O. When this is not practical, which is often the case for large + * data sets, the next best thing is to ensure that all BINs fit in cache, + * so that an operation will perform at most one random read I/O to fetch + * the LN. The {@link DbCacheSize} javadoc describes how to size the cache + * to ensure that all BINs and/or LNs fit in cache.

        + * + *

        Normally {@link EnvironmentConfig#MAX_MEMORY_PERCENT} determines the JE + * cache size as a value relative to the JVM heap size, i.e., the heap size + * determines the cache size.

        + * + *

        For configuring cache size and behavior, see:

        + *
          + *
        • {@link EnvironmentConfig#MAX_MEMORY_PERCENT}
        • + *
        • {@link EnvironmentConfig#MAX_MEMORY}
        • + *
        • {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}
        • + *
        • {@link EnvironmentConfig#setCacheMode(CacheMode)}
        • + *
        • {@link CacheMode}
        • + *
        • {@link DbCacheSize}
        • + *
        + * + *

        When using Oracle NoSQL DB, a sizing exercise and {@link DbCacheSize} are + * used to determine the cache size needed to hold all BINs in memory. The + * memory available to each node is divided between a 32 GB heap for the JVM + * process (so that CompressedOops may be used) and the off-heap cache (when + * more than 32 GB of memory is available).

        + * + *

        It is also important not to configured the cache size too large, relative + * to the JVM heap size. If there is not enough free space in the heap, Java + * GC pauses may become a problem. Increasing the default value for {@code + * MAX_MEMORY_PERCENT}, or setting {@code MAX_MEMORY} (which overrides {@code + * MAX_MEMORY_PERCENT}), should be done carefully.

        + * + *

        Java GC performance may also be improved by using {@link + * CacheMode#EVICT_LN}. Record data sizes should also be kept below 1 MB to + * avoid "humongous objects" (see Java GC documentation).

        + * + *

        When using Oracle NoSQL DB, by default, {@code MAX_MEMORY_PERCENT} is + * set to 70% and {@link CacheMode#EVICT_LN} is used. The LOB (large object) + * API is implemented using multiple JE records per LOB where the data size of + * each record is 1 MB or less.

        + * + *

        When a shared cache is configured, the main and off-heap cache may be + * shared by multiple JE Environments in a single JVM process. See:

        + *
          + *
        • {@link EnvironmentConfig#SHARED_CACHE}
        • + *
        • {@link #getSharedCacheTotalBytes()}
        • + *
        • {@link #getNSharedCacheEnvironments()}
        • + *
        + * + *

        When using Oracle NoSQL DB, the JE shared cache feature is not used + * because each node only uses a single JE Environment.

        + * + *

        Cache Statistics: Size + * Optimizations

        + * + *

        Since a large portion of an IN consists of record keys, JE uses + * {@link DatabaseConfig#setKeyPrefixing(boolean) key prefix compression}. + * Ideally, key suffixes are small enough to be stored using the {@link + * EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH compact key format}. The + * following stat indicates the number of INs using this compact format:

        + *
          + *
        • {@link #getNINCompactKeyIN()}
        • + *
        + * + *

        Configuration params impacting key prefixing and the compact key format + * are:

        + *
          + *
        • {@link DatabaseConfig#setKeyPrefixing(boolean)}
        • + *
        • {@link EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH}
        • + *
        + * + *

        Enabling key prefixing for all databases is strongly recommended. When + * using Oracle NoSQL DB, key prefixing is always enabled.

        + * + *

        Another configuration param impacting BIN cache size is {@code + * TREE_MAX_EMBEDDED_LN}. There is currently no stat indicating the number of + * embedded LNs. See:

        + *
          + *
        • {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN}
        • + *
        + * + *

        Cache Statistics: Unexpected + * Sizes

        + * + *

        Although the Btree normally occupies the vast majority of the cache, it + * is possible that record locks occupy unexpected amounts of cache when + * large transactions are used, or when cursors or transactions are left open + * due to application bugs. The following stat indicates the amount of cache + * used by record locks:

        + *
          + *
        • {@link #getLockBytes()}
        • + *
        + * + *

        To reduce the amount of memory used for record locks:

        + *
          + *
        • Use a small number of write operations per transaction. Write + * locks are held until the end of a transaction.
        • + *
        • For transactions using Serializable isolation or RepeatableRead + * isolation (the default), use a small number of read operations per + * transaction.
        • + *
        • To read large numbers of records, use {@link + * LockMode#READ_COMMITTED} isolation or use a null Transaction (which + * implies ReadCommitted). With ReadCommitted isolation, locks are + * released after each read operation. Using {@link + * LockMode#READ_UNCOMMITTED} will also avoid record locks, but does not + * provide any transactional guarantees.
        • + *
        • Ensure that all cursors and transactions are closed + * promptly.
        • + *
        + * + *

        Note that the above guidelines are also important for reducing contention + * when records are accessed concurrently from multiple threads and + * transactions. When using Oracle NoSQL DB, the application should avoid + * performing a large number of write operations in a single request. For read + * operations, NoSQL DB uses ReadCommitted isolation to avoid accumulation of + * locks.

        + * + *

        Another unexpected use of cache is possible when using a {@link + * DiskOrderedCursor} or when calling {@link Database#count()}. The amount of + * cache used by these operations is indicated by:

        + *
          + *
        • {@link #getDOSBytes()}
        • + *
        + * + *

        {@code DiskOrderedCursor} and {@code Database.count} should normally be + * explicitly constrained to use a maximum amount of cache memory. See:

        + *
          + *
        • {@link DiskOrderedCursorConfig#setInternalMemoryLimit(long)}
        • + *
        • {@link Database#count(long)}
        • + *
        + * + *

        Oracle NoSQL DB does not currently use {@code DiskOrderedCursor} or + * {@code Database.count}.

        + * + *

        Cache Statistics: Eviction

        + * + *

        Eviction is removal of Btree node from the cache in order to make room + * for newly added nodes. See {@link CacheMode} for a description of + * eviction.

        + * + *

        Normally eviction is performed via background threads in the eviction + * thread pools. Disabling the eviction pool threads is not recommended.

        + *
          + *
        • {@link EnvironmentConfig#ENV_RUN_EVICTOR}
        • + *
        • {@link EnvironmentConfig#ENV_RUN_OFFHEAP_EVICTOR}
        • + *
        + * + *

        Eviction stats are important indicator of cache efficiency and provide a + * deeper understanding of cache behavior. Main cache eviction is indicated + * by:

        + *
          + *
        • {@link #getNLNsEvicted()}
        • + *
        • {@link #getNNodesMutated()}
        • + *
        • {@link #getNNodesEvicted()}
        • + *
        • {@link #getNDirtyNodesEvicted()}
        • + *
        + * + *

        Note that objects evicted from the main cache are moved to the off-heap + * cache whenever possible.

        + * + *

        Off-heap cache eviction is indicated by:

        + *
          + *
        • {@link #getOffHeapLNsEvicted()}
        • + *
        • {@link #getOffHeapNodesMutated()}
        • + *
        • {@link #getOffHeapNodesEvicted()}
        • + *
        • {@link #getOffHeapDirtyNodesEvicted()}
        • + *
        + * + *

        When analyzing Java GC performance, the most relevant stats are {@code + * NLNsEvicted}, {@code NNodesMutated} and {@code NNodesEvicted}, which all + * indicate eviction from the main cache based on LRU. Large values for these + * stats indicate that many old generation Java objects are being GC'd, which + * is often a cause of GC pauses.

        + * + *

        Note that {@link CacheMode#EVICT_LN} is used or when LNs are {@link + * EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}, {@code NLNsEvicted} will + * be close to zero because LNs are not evicted based on LRU. And if an + * off-heap cache is configured, {@code NNodesMutated} will be close to zero + * because BIN mutation takes place in the off-heap cache. If any of the three + * values are large, this points to a potential GC performance problem. The GC + * logs should be consulted to confirm this.

        + * + *

        Large values for {@code NDirtyNodesEvicted} or {@code + * OffHeapDirtyNodesEvicted} indicate that the cache is severely undersized and + * there is a risk of using all available disk space and severe performance + * problems. Dirty nodes are evicted last (after evicting all non-dirty nodes) + * because they must be written to disk. This causes excessive writing and JE + * log cleaning may be unproductive.

        + * + *

        Note that when an off-heap cache is configured, {@code + * NDirtyNodesEvicted} will be zero because dirty nodes in the main cache are + * moved to the off-heap cache if they don't fit in the main cache, and are + * evicted completely and written to disk only when they don't fit in the + * off-heap cache.

        + * + *

        Another type of eviction tuning for the main cache involves changing the + * number of bytes evicted each time an evictor thread is awoken:

        + *
          + *
        • {@link EnvironmentConfig#EVICTOR_EVICT_BYTES}
        • + *
        + * + *

        If the number of bytes is too large, it may cause a noticeable spike in + * eviction activity, reducing resources available to other threads. If the + * number of bytes is too small, the overhead of waking the evictor threads + * more often may be noticeable. The default values for this parameter is + * generally a good compromise. This parameter also impacts critical eviction, + * which is described next.

        + * + *

        Note that the corresponding parameter for the off-heap cache, {@link + * EnvironmentConfig#OFFHEAP_EVICT_BYTES}, works differently and is described + * in the next section.

        + * + *

        Cache Statistics: Critical + * Eviction

        + * + *

        The following stats indicate that critical eviction is occurring:

        + *
          + *
        • {@link #getNBytesEvictedCritical()}
        • + *
        • {@link #getNBytesEvictedCacheMode()}
        • + *
        • {@link #getNBytesEvictedDeamon()}
        • + *
        • {@link #getNBytesEvictedEvictorThread()}
        • + *
        • {@link #getNBytesEvictedManual()}
        • + *
        • {@link #getOffHeapCriticalNodesTargeted()}
        • + *
        • {@link #getOffHeapNodesTargeted()}
        • + *
        + * + *

        Eviction is performed by eviction pool threads, calls to {@link + * Environment#evictMemory()} in application background threads, or via {@link + * CacheMode#EVICT_LN} or {@link CacheMode#EVICT_BIN}. If these mechanisms are + * not sufficient to evict memory from cache as quickly as CRUD operations are + * adding memory to cache, then critical eviction comes into play. Critical + * eviction is performed in-line in the thread performing the CRUD operation, + * which is very undesirable since it increases operation latency.

        + * + *

        Critical eviction in the main cache is indicated by large values for + * {@code NBytesEvictedCritical}, as compared to the other {@code + * NBytesEvictedXXX} stats. Critical eviction in the off-heap cache is + * indicated by large values for {@code OffHeapCriticalNodesTargeted} compared + * to {@code OffHeapNodesTargeted}.

        + * + *

        Additional stats indicating that background eviction threads may be + * insufficient are:

        + *
          + *
        • {@link #getNThreadUnavailable()}
        • + *
        • {@link #getOffHeapThreadUnavailable()}
        • + *
        + * + *

        Critical eviction can sometimes be reduced by changing {@link + * EnvironmentConfig#EVICTOR_CRITICAL_PERCENTAGE} or modifying the eviction + * thread pool parameters.

        + *
          + *
        • {@link EnvironmentConfig#EVICTOR_CRITICAL_PERCENTAGE}
        • + *
        • {@link EnvironmentConfig#EVICTOR_CORE_THREADS}
        • + *
        • {@link EnvironmentConfig#EVICTOR_MAX_THREADS}
        • + *
        • {@link EnvironmentConfig#EVICTOR_KEEP_ALIVE}
        • + *
        • {@link EnvironmentConfig#OFFHEAP_CORE_THREADS}
        • + *
        • {@link EnvironmentConfig#OFFHEAP_MAX_THREADS}
        • + *
        • {@link EnvironmentConfig#OFFHEAP_KEEP_ALIVE}
        • + *
        + * + *

        When using Oracle NoSQL DB, {@code EVICTOR_CRITICAL_PERCENTAGE} is set to + * 20% rather than using the JE default of 0%.

        + * + *

        In the main cache, critical eviction uses the same parameter as + * background eviction for determining how many bytes to evict at one + * time:

        + *
          + *
        • {@link EnvironmentConfig#EVICTOR_EVICT_BYTES}
        • + *
        + * + *

        Be careful when increasing this value, since this will cause longer + * operation latencies when critical eviction is occurring in the main + * cache.

        + * + *

        The corresponding parameter for the off-heap cache, {@code + * OFFHEAP_EVICT_BYTES}, works differently:

        + *
          + *
        • {@link EnvironmentConfig#OFFHEAP_EVICT_BYTES}
        • + *
        + * + *

        Unlike in the main cache, {@code OFFHEAP_EVICT_BYTES} defines the goal + * for background eviction to be below {@code MAX_OFF_HEAP_MEMORY}. The + * background evictor threads for the off-heap cache attempt to maintain the + * size of the off-heap cache at {@code MAX_OFF_HEAP_MEMORY - + * OFFHEAP_EVICT_BYTES}. If the off-heap cache size grows larger than {@code + * MAX_OFF_HEAP_MEMORY}, critical off-heap eviction will occur. The default + * value for {@code OFFHEAP_EVICT_BYTES} is fairly large to ensure that + * critical eviction does not occur. Be careful when lowering this value.

        + * + *

        This approach is intended to prevent the off-heap cache from exceeding + * its maximum size. If the maximum is exceeded, there is a danger that the + * JVM process will be killed by the OS. See {@link + * #getOffHeapAllocFailures()}.

        + * + *

        Cache Statistics: LRU List + * Contention

        + * + *

        Another common tuning issue involves thread contention on the cache LRU + * lists, although there is no stat to indicate such contention. Since each + * time a node is accessed it must be moved to the end of the LRU list, a + * single LRU list would cause contention among threads performing CRUD + * operations. By default there are 4 LRU lists for each cache. If contention + * is noticeable on internal Evictor.LRUList or OffHeapCache.LRUList methods, + * consider increasing the number of LRU lists:

        + *
          + *
        • {@link EnvironmentConfig#EVICTOR_N_LRU_LISTS}
        • + *
        • {@link EnvironmentConfig#OFFHEAP_N_LRU_LISTS}
        • + *
        + * + *

        However, note that increasing the number of LRU lists will decrease the + * accuracy of the LRU.

        + * + *

        Cache Statistics: Debugging

        + * + *

        The following cache stats are unlikely to be needed for monitoring or + * tuning, but are sometimes useful for debugging and testing.

        + *
          + *
        • {@link #getDataBytes()}
        • + *
        • {@link #getAdminBytes()}
        • + *
        • {@link #getDataAdminBytes()}
        • + *
        • {@link #getNNodesTargeted()}
        • + *
        • {@link #getNNodesStripped()}
        • + *
        • {@link #getNNodesPutBack()}
        • + *
        • {@link #getNNodesMovedToDirtyLRU()}
        • + *
        • {@link #getNNodesSkipped()}
        • + *
        • {@link #getNRootNodesEvicted()}
        • + *
        • {@link #getNBINsFetchMissRatio()}
        • + *
        • {@link #getNINSparseTarget()}
        • + *
        • {@link #getNINNoTarget()}
        • + *
        • {@link #getMixedLRUSize()}
        • + *
        • {@link #getDirtyLRUSize()}
        • + *
        • {@link #getOffHeapAllocFailures()}
        • + *
        • {@link #getOffHeapAllocOverflows()}
        • + *
        • {@link #getOffHeapNodesStripped()}
        • + *
        • {@link #getOffHeapNodesSkipped()}
        • + *
        • {@link #getOffHeapLNsLoaded()}
        • + *
        • {@link #getOffHeapLNsStored()}
        • + *
        • {@link #getOffHeapBINsLoaded()}
        • + *
        • {@link #getOffHeapBINsStored()}
        • + *
        • {@link #getOffHeapTotalBlocks()}
        • + *
        • {@link #getOffHeapLRUSize()}
        • + *
        + * + *

        Likewise, the following cache configuration params are unlikely to be + * needed for tuning, but are sometimes useful for debugging and testing.

        + *
          + *
        • {@link EnvironmentConfig#ENV_DB_EVICTION}
        • + *
        • {@link EnvironmentConfig#TREE_MIN_MEMORY}
        • + *
        • {@link EnvironmentConfig#EVICTOR_FORCED_YIELD}
        • + *
        • {@link EnvironmentConfig#EVICTOR_ALLOW_BIN_DELTAS}
        • + *
        • {@link EnvironmentConfig#OFFHEAP_CHECKSUM}
        • + *
        + * + * + * + * @see Viewing + * Statistics with JConsole + */ +public class EnvironmentStats implements Serializable { + + /* + find/replace: + public static final StatDefinition\s+(\w+)\s*=\s*new StatDefinition\(\s*(".*"),\s*(".*")(,?\s*\w*\.?\w*)\); + public static final String $1_NAME =\n $2;\n public static final String $1_DESC =\n $3;\n public static final StatDefinition $1 =\n new StatDefinition(\n $1_NAME,\n $1_DESC$4); + */ + + private static final long serialVersionUID = 1734048134L; + + private StatGroup incompStats; + private StatGroup cacheStats; + private StatGroup offHeapStats; + private StatGroup ckptStats; + private StatGroup cleanerStats; + private StatGroup logStats; + private StatGroup lockStats; + private StatGroup envImplStats; + private StatGroup throughputStats; + + /** + * @hidden + * Internal use only. + */ + public EnvironmentStats() { + incompStats = new StatGroup(INCompStatDefinition.GROUP_NAME, + INCompStatDefinition.GROUP_DESC); + + cacheStats = new StatGroup(EvictorStatDefinition.GROUP_NAME, + EvictorStatDefinition.GROUP_DESC); + offHeapStats = new StatGroup(OffHeapStatDefinition.GROUP_NAME, + OffHeapStatDefinition.GROUP_DESC); + ckptStats = new StatGroup(CheckpointStatDefinition.GROUP_NAME, + CheckpointStatDefinition.GROUP_DESC); + cleanerStats = new StatGroup(CleanerStatDefinition.GROUP_NAME, + CleanerStatDefinition.GROUP_DESC); + logStats = new StatGroup(LogStatDefinition.GROUP_NAME, + LogStatDefinition.GROUP_DESC); + lockStats = new StatGroup(LockStatDefinition.GROUP_NAME, + LockStatDefinition.GROUP_DESC); + envImplStats = new StatGroup(DbiStatDefinition.ENV_GROUP_NAME, + DbiStatDefinition.ENV_GROUP_DESC); + throughputStats = + new StatGroup(DbiStatDefinition.THROUGHPUT_GROUP_NAME, + DbiStatDefinition.THROUGHPUT_GROUP_DESC); + } + + /** + * @hidden + * Internal use only. + */ + public List getStatGroups() { + return Arrays.asList( + logStats, cacheStats, offHeapStats, cleanerStats, incompStats, + ckptStats, envImplStats, lockStats, throughputStats); + } + + /** + * @hidden + * Internal use only. + */ + public Map getStatGroupsMap() { + final HashMap map = new HashMap<>(); + for (StatGroup group : getStatGroups()) { + map.put(group.getName(), group); + } + return map; + } + + /** + * @hidden + * Internal use only. + */ + public void setStatGroup(StatGroup sg) { + + if (sg.getName().equals(INCompStatDefinition.GROUP_NAME)) { + incompStats = sg; + } else if (sg.getName().equals(EvictorStatDefinition.GROUP_NAME)) { + cacheStats = sg; + } else if (sg.getName().equals(OffHeapStatDefinition.GROUP_NAME)) { + offHeapStats = sg; + } else if (sg.getName().equals(CheckpointStatDefinition.GROUP_NAME)) { + ckptStats = sg; + } else if (sg.getName().equals(CleanerStatDefinition.GROUP_NAME)) { + cleanerStats = sg; + } else if (sg.getName().equals(LogStatDefinition.GROUP_NAME)) { + logStats = sg; + } else if (sg.getName().equals(LockStatDefinition.GROUP_NAME)) { + lockStats = sg; + } else if (sg.getName().equals(DbiStatDefinition.ENV_GROUP_NAME)) { + envImplStats = sg; + } else if (sg.getName().equals( + DbiStatDefinition.THROUGHPUT_GROUP_NAME)) { + throughputStats = sg; + } else { + throw EnvironmentFailureException.unexpectedState + ("Invalid stat group name in setStatGroup " + + sg.getName()); + } + } + + /** + * @hidden + * Internal use only + * For JConsole plugin support. + */ + public static String[] getStatGroupTitles() { + List groups = new EnvironmentStats().getStatGroups(); + final String[] titles = new String[groups.size()]; + for (int i = 0; i < titles.length; i += 1) { + titles[i] = groups.get(i).getName(); + } + return titles; + } + + /** + * @hidden + * Internal use only. + */ + public void setThroughputStats(StatGroup stats) { + throughputStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setINCompStats(StatGroup stats) { + incompStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setCkptStats(StatGroup stats) { + ckptStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setCleanerStats(StatGroup stats) { + cleanerStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setLogStats(StatGroup stats) { + logStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setMBAndEvictorStats(StatGroup clonedMBStats, + StatGroup clonedEvictorStats){ + cacheStats = clonedEvictorStats; + cacheStats.addAll(clonedMBStats); + } + + /** + * @hidden + * Internal use only. + */ + public void setOffHeapStats(StatGroup stats) { + offHeapStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setLockStats(StatGroup stats) { + lockStats = stats; + } + + /** + * @hidden + * Internal use only. + */ + public void setEnvStats(StatGroup stats) { + envImplStats = stats; + } + + /* INCompressor stats. */ + + /** + * The number of BINs encountered by the INCompressor that had cursors + * referring to them when the compressor ran. + */ + public long getCursorsBins() { + return incompStats.getLong(INCOMP_CURSORS_BINS); + } + + /** + * The time the Environment was created. + */ + public long getEnvironmentCreationTime() { + return envImplStats.getLong(ENV_CREATION_TIME); + } + + /** + * The number of BINs encountered by the INCompressor that had their + * database closed between the time they were put on the compressor queue + * and when the compressor ran. + */ + public long getDbClosedBins() { + return incompStats.getLong(INCOMP_DBCLOSED_BINS); + } + + /** + * The number of entries in the INCompressor queue when the getStats() + * call was made. + */ + public long getInCompQueueSize() { + return incompStats.getLong(INCOMP_QUEUE_SIZE); + } + + /** + * The number of BINs encountered by the INCompressor that were not + * actually empty when the compressor ran. + */ + public long getNonEmptyBins() { + return incompStats.getLong(INCOMP_NON_EMPTY_BINS); + } + + /** + * The number of BINs that were successfully processed by the IN + * Compressor. + */ + public long getProcessedBins() { + return incompStats.getLong(INCOMP_PROCESSED_BINS); + } + + /** + * The number of BINs encountered by the INCompressor that were split + * between the time they were put on the compressor queue and when the + * compressor ran. + */ + public long getSplitBins() { + return incompStats.getLong(INCOMP_SPLIT_BINS); + } + + /* Checkpointer stats. */ + + /** + * The Id of the last checkpoint. + */ + public long getLastCheckpointId() { + return ckptStats.getLong(CKPT_LAST_CKPTID); + } + + /** + * The total number of checkpoints run so far. + */ + public long getNCheckpoints() { + return ckptStats.getLong(CKPT_CHECKPOINTS); + } + + /** + * The accumulated number of full INs flushed to the log. + */ + public long getNFullINFlush() { + return ckptStats.getLong(CKPT_FULL_IN_FLUSH); + } + + /** + * The accumulated number of full BINs flushed to the log. + */ + public long getNFullBINFlush() { + return ckptStats.getLong(CKPT_FULL_BIN_FLUSH); + } + + /** + * The accumulated number of Delta INs flushed to the log. + */ + public long getNDeltaINFlush() { + return ckptStats.getLong(CKPT_DELTA_IN_FLUSH); + } + + /** + * Byte length from last checkpoint start to the previous checkpoint start. + */ + public long getLastCheckpointInterval() { + return ckptStats.getLong(CKPT_LAST_CKPT_INTERVAL); + } + + /** + * The location in the log of the last checkpoint start. + */ + public long getLastCheckpointStart() { + return ckptStats.getLong(CKPT_LAST_CKPT_START); + } + + /** + * The location in the log of the last checkpoint end. + */ + public long getLastCheckpointEnd() { + return ckptStats.getLong(CKPT_LAST_CKPT_END); + } + + /* Cleaner stats. */ + + /** + * @deprecated in 7.0, always returns zero. Use {@link + * #getCurrentMinUtilization()} and {@link #getCurrentMaxUtilization()} to + * monitor cleaner behavior. + */ + public int getCleanerBacklog() { + return 0; + } + + /** + * @deprecated in 7.5, always returns zero. Use {@link + * #getProtectedLogSize()} {@link #getProtectedLogSizeMap()} to monitor + * file protection. + */ + public int getFileDeletionBacklog() { + return 0; + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MIN_UTILIZATION_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MIN_UTILIZATION_NAME}

        + * + * The last known log minimum utilization as a percentage. This statistic + * provides a cheap way of checking the log utilization without having to + * run the DbSpace utility. + *

        + * The log utilization is the percentage of the total log size (all .jdb + * files) that is utilized or active. The remaining portion of the log + * is obsolete. The log cleaner is responsible for keeping the log + * utilization below the configured threshold, + * {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION}. + *

        + * This statistic is computed every time the log cleaner examines the + * utilization of the log, in order to determine whether cleaning is + * needed. The frequency can be configured using + * {@link EnvironmentConfig#CLEANER_BYTES_INTERVAL}. + *

        + * The obsolete portion of the log includes data that has expired at the + * time the statistic was last computed. An expiration histogram is stored + * for each file and used to compute the expired size. The minimum and + * maximum utilization are the lower and upper bounds of computed + * utilization, which may be different when some data has expired. See + * {@link #getNCleanerTwoPassRuns()} for more information. + *

        + * Note that the size of the utilized data in the log is always greater + * than the amount of user data (total size of keys and data). The active + * Btree internal nodes and other metadata are also included. + *

        + * + * @return the current minimum utilization, or -1 if the utilization has + * not been calculated for this environment since it was last opened. + * + * @see Cleaner Statistics + * @since 6.5 + */ + public int getCurrentMinUtilization() { + return cleanerStats.getInt(CLEANER_MIN_UTILIZATION); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MAX_UTILIZATION_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MAX_UTILIZATION_NAME}

        + * + * The last known log maximum utilization as a percentage. This statistic + * provides a cheap way of checking the log utilization without having to + * run the DbSpace utility. + *

        + * The log utilization is the percentage of the total log size (all .jdb + * files) that is utilized or active. The remaining portion of the log + * is obsolete. The log cleaner is responsible for keeping the log + * utilization below the configured threshold, + * {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION}. + *

        + * This statistic is computed every time the log cleaner examines the + * utilization of the log, in order to determine whether cleaning is + * needed. The frequency can be configured using + * {@link EnvironmentConfig#CLEANER_BYTES_INTERVAL}. + *

        + * The obsolete portion of the log includes data that has expired at the + * time the statistic was last computed. An expiration histogram is stored + * for each file and used to compute the expired size. The minimum and + * maximum utilization are the lower and upper bounds of computed + * utilization, which may be different when some data has expired. See + * {@link #getNCleanerTwoPassRuns()} for more information. + *

        + * Note that the size of the utilized data in the log is always greater + * than the amount of user data (total size of keys and data). The active + * Btree internal nodes and other metadata are also included. + *

        + * + * @return the current maximum utilization, or -1 if the utilization has + * not been calculated for this environment since it was last opened. + * + * @see Cleaner Statistics + * @since 6.5 + */ + public int getCurrentMaxUtilization() { + return cleanerStats.getInt(CLEANER_MAX_UTILIZATION); + } + + /** + * @deprecated in JE 6.5, use {@link #getCurrentMinUtilization()} or + * {@link #getCurrentMaxUtilization()} instead. + */ + public int getLastKnownUtilization() { + return getCurrentMinUtilization(); + } + + /** + * @deprecated in JE 6.3. Adjustments are no longer needed because LN log + * sizes have been stored in the Btree since JE 6.0. + */ + public float getLNSizeCorrectionFactor() { + return 1; + } + + /** + * @deprecated in JE 5.0.56, use {@link #getCorrectedAvgLNSize} instead. + */ + public float getCorrectedAvgLNSize() { + return Float.NaN; + } + + /** + * @deprecated in JE 5.0.56, use {@link #getCorrectedAvgLNSize} instead. + */ + public float getEstimatedAvgLNSize() { + return Float.NaN; + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RUNS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RUNS_NAME}

        + * + * Total number of cleaner runs, including {@link #getNCleanerTwoPassRuns() + * two-pass runs} but not including {@link #getNCleanerRevisalRuns() + * revisal runs}. The {@link #getCurrentMinUtilization() minimum} and + * {@link #getCurrentMaxUtilization() maximum} utilization values are used + * to drive cleaning. + * + * @see Cleaner Statistics + */ + public long getNCleanerRuns() { + return cleanerStats.getLong(CLEANER_RUNS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TWO_PASS_RUNS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TWO_PASS_RUNS_NAME}

        + * + * Number of cleaner two-pass runs, which are a subset of the + * {@link #getNCleanerRuns() total cleaner runs}. The {@link + * #getCurrentMinUtilization() minimum} and {@link + * #getCurrentMaxUtilization() maximum} utilization values are used to + * drive cleaning. + *

        + * The obsolete portion of the log includes data that has expired. An + * expiration histogram is stored for each file and used to compute the + * expired size. The minimum and maximum utilization are the lower and + * upper bounds of computed utilization. They are different only when the + * TTL feature is used, and some data in the file has expired while other + * data has become obsolete for other reasons, such as record updates, + * record deletions or checkpoints. In this case the strictly obsolete size + * and the expired size may overlap because they are maintained separately. + *

        + * If they overlap completely then the minimum utilization is correct, + * while if there is no overlap then the maximum utilization is correct. + * Both utilization values trigger cleaning, but when there is significant + * overlap, the cleaner will perform two-pass cleaning. + *

        + * In the first pass of two-pass cleaning, the file is read to recompute + * obsolete and expired sizes, but the file is not cleaned. As a result of + * recomputing the expired sizes, the strictly obsolete and expired sizes + * will no longer overlap, and the minimum and maximum utilization will be + * equal. If the file should still be cleaned, based on the recomputed + * utilization, it is cleaned as usual, and in this case the number of + * two-pass runs (this statistic) is incremented. + *

        + * If the file should not be cleaned because its recomputed utilization is + * higher than expected, the file will not be cleaned. Instead, its + * recomputed expiration histogram, which has size information that now + * does not overlap with the strictly obsolete data, is stored for future + * use. By storing the revised histogram, the cleaner can select the most + * appropriate files for cleaning in the future. In this case the number of + * {@link #getNCleanerRevisalRuns() revisal runs} is incremented, and the + * number of {@link #getNCleanerRuns() total runs} is not incremented. + * + * @see Cleaner Statistics + * @since 6.5.0 + */ + public long getNCleanerTwoPassRuns() { + return cleanerStats.getLong(CLEANER_TWO_PASS_RUNS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REVISAL_RUNS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REVISAL_RUNS_NAME}

        + * + * Number of cleaner runs that ended in revising expiration info, but not + * in any cleaning. + * + * @see #getNCleanerTwoPassRuns() + * @see Cleaner Statistics + * @since 6.5.0 + */ + public long getNCleanerRevisalRuns() { + return cleanerStats.getLong(CLEANER_REVISAL_RUNS); + } + + /** + * @deprecated in JE 6.3, always returns zero. + */ + public long getNCleanerProbeRuns() { + return 0; + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DELETIONS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DELETIONS_NAME}

        + * + * The number of cleaner file deletions this session. + * + * @see Cleaner Statistics + */ + public long getNCleanerDeletions() { + return cleanerStats.getLong(CLEANER_DELETIONS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LN_QUEUE_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LN_QUEUE_SIZE_NAME}

        + * + * The number of LNs pending because they were locked and could not be + * migrated. + * + * @see Cleaner Statistics + */ + public int getPendingLNQueueSize() { + return cleanerStats.getInt(CLEANER_PENDING_LN_QUEUE_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DISK_READS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DISK_READS_NAME}

        + * + * The number of disk reads performed by the cleaner. + * + * @see Cleaner Statistics + */ + public long getNCleanerDiskRead() { + return cleanerStats.getLong(CLEANER_DISK_READS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ENTRIES_READ_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ENTRIES_READ_NAME}

        + * + * The accumulated number of log entries read by the cleaner. + * + * @see Cleaner Statistics + */ + public long getNCleanerEntriesRead() { + return cleanerStats.getLong(CLEANER_ENTRIES_READ); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_OBSOLETE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_OBSOLETE_NAME}

        + * + * The accumulated number of INs obsolete. + * + * @see Cleaner Statistics + */ + public long getNINsObsolete() { + return cleanerStats.getLong(CLEANER_INS_OBSOLETE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_CLEANED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_CLEANED_NAME}

        + * + * The accumulated number of INs cleaned. + * + * @see Cleaner Statistics + */ + public long getNINsCleaned() { + return cleanerStats.getLong(CLEANER_INS_CLEANED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_DEAD_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_DEAD_NAME}

        + * + * The accumulated number of INs that were not found in the tree anymore + * (deleted). + * + * @see Cleaner Statistics + */ + public long getNINsDead() { + return cleanerStats.getLong(CLEANER_INS_DEAD); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_MIGRATED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_MIGRATED_NAME}

        + * + * The accumulated number of INs migrated. + * + * @see Cleaner Statistics + */ + public long getNINsMigrated() { + return cleanerStats.getLong(CLEANER_INS_MIGRATED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_OBSOLETE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_OBSOLETE_NAME}

        + * + * The accumulated number of BIN-deltas obsolete. + * + * @see Cleaner Statistics + */ + public long getNBINDeltasObsolete() { + return cleanerStats.getLong(CLEANER_BIN_DELTAS_OBSOLETE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_CLEANED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_CLEANED_NAME}

        + * + * The accumulated number of BIN-deltas cleaned. + * + * @see Cleaner Statistics + */ + public long getNBINDeltasCleaned() { + return cleanerStats.getLong(CLEANER_BIN_DELTAS_CLEANED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_DEAD_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_DEAD_NAME}

        + * + * The accumulated number of BIN-deltas that were not found in the tree + * anymore (deleted). + * + * @see Cleaner Statistics + */ + public long getNBINDeltasDead() { + return cleanerStats.getLong(CLEANER_BIN_DELTAS_DEAD); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_MIGRATED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_MIGRATED_NAME}

        + * + * The accumulated number of BIN-deltas migrated. + * + * @see Cleaner Statistics + */ + public long getNBINDeltasMigrated() { + return cleanerStats.getLong(CLEANER_BIN_DELTAS_MIGRATED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_OBSOLETE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_OBSOLETE_NAME}

        + * + * The accumulated number of LNs obsolete. + * + * @see Cleaner Statistics + */ + public long getNLNsObsolete() { + return cleanerStats.getLong(CLEANER_LNS_OBSOLETE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXPIRED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXPIRED_NAME}

        + * + * The accumulated number of obsolete LNs that were expired. Note that + * this does not included embedded LNs (those having a data size less than + * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN}), because embedded LNs + * are always considered obsolete. + * + * @see Cleaner Statistics + */ + public long getNLNsExpired() { + return cleanerStats.getLong(CLEANER_LNS_EXPIRED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_CLEANED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_CLEANED_NAME}

        + * + * The accumulated number of LNs cleaned. + * + * @see Cleaner Statistics + */ + public long getNLNsCleaned() { + return cleanerStats.getLong(CLEANER_LNS_CLEANED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_DEAD_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_DEAD_NAME}

        + * + * The accumulated number of LNs that were not found in the tree anymore + * (deleted). + * + * @see Cleaner Statistics + */ + public long getNLNsDead() { + return cleanerStats.getLong(CLEANER_LNS_DEAD); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_LOCKED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_LOCKED_NAME}

        + * + * The accumulated number of LNs encountered that were locked. + * + * @see Cleaner Statistics + */ + public long getNLNsLocked() { + return cleanerStats.getLong(CLEANER_LNS_LOCKED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MIGRATED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MIGRATED_NAME}

        + * + * The accumulated number of LNs encountered that were migrated forward in + * the log by the cleaner. + * + * @see Cleaner Statistics + */ + public long getNLNsMigrated() { + return cleanerStats.getLong(CLEANER_LNS_MIGRATED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MARKED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MARKED_NAME}

        + * + * The accumulated number of LNs in temporary DBs that were dirtied by the + * cleaner and subsequently logging during checkpoint/eviction. + * + * @see Cleaner Statistics + */ + public long getNLNsMarked() { + return cleanerStats.getLong(CLEANER_LNS_MARKED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNQUEUE_HITS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNQUEUE_HITS_NAME}

        + * + * The accumulated number of LNs processed without a tree lookup. + * + * @see Cleaner Statistics + */ + public long getNLNQueueHits() { + return cleanerStats.getLong(CLEANER_LNQUEUE_HITS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_PROCESSED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_PROCESSED_NAME}

        + * + * The accumulated number of LNs processed because they were previously + * locked. + * + * @see Cleaner Statistics + */ + public long getNPendingLNsProcessed() { + return cleanerStats.getLong(CLEANER_PENDING_LNS_PROCESSED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MARKED_LNS_PROCESSED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MARKED_LNS_PROCESSED_NAME}

        + * + * The accumulated number of LNs processed because they were previously + * marked for migration. + * + * @see Cleaner Statistics + */ + public long getNMarkedLNsProcessed() { + return cleanerStats.getLong(CLEANER_MARKED_LNS_PROCESSED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TO_BE_CLEANED_LNS_PROCESSED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TO_BE_CLEANED_LNS_PROCESSED_NAME}

        + * + * The accumulated number of LNs processed because they are soon to be + * cleaned. + * + * @see Cleaner Statistics + */ + public long getNToBeCleanedLNsProcessed() { + return cleanerStats.getLong(CLEANER_TO_BE_CLEANED_LNS_PROCESSED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_CLUSTER_LNS_PROCESSED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_CLUSTER_LNS_PROCESSED_NAME}

        + * + * The accumulated number of LNs processed because they qualify for + * clustering. + * + * @see Cleaner Statistics + */ + public long getNClusterLNsProcessed() { + return cleanerStats.getLong(CLEANER_CLUSTER_LNS_PROCESSED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_LOCKED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_LOCKED_NAME}

        + * + * The accumulated number of pending LNs that could not be locked for + * migration because of a long duration application lock. + * + * @see Cleaner Statistics + */ + public long getNPendingLNsLocked() { + return cleanerStats.getLong(CLEANER_PENDING_LNS_LOCKED); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REPEAT_ITERATOR_READS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REPEAT_ITERATOR_READS_NAME}

        + * + * The number of times we tried to read a log entry larger than the read + * buffer size and couldn't grow the log buffer to accommodate the large + * object. This happens during scans of the log during activities like + * environment open or log cleaning. Implies that the read chunk size + * controlled by je.log.iteratorReadSize is too small. + * + * @see Cleaner Statistics + */ + public long getNRepeatIteratorReads() { + return cleanerStats.getLong(CLEANER_REPEAT_ITERATOR_READS); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ACTIVE_LOG_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ACTIVE_LOG_SIZE_NAME}

        + * + *

        The {@link #getCurrentMinUtilization() log utilization} is the + * percentage of activeLogSize that is currently referenced or active.

        + * + * @see Cleaner Statistics + * @since 7.5 + */ + public long getActiveLogSize() { + return cleanerStats.getLong(CLEANER_ACTIVE_LOG_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RESERVED_LOG_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RESERVED_LOG_SIZE_NAME}

        + * + *

        Deletion of reserved files may be postponed for several reasons. + * This occurs if an active file is protected (by a backup, for example), + * and then the file is cleaned and becomes a reserved file. See + * {@link #getProtectedLogSizeMap()} for more information. In a + * standalone JE environment, reserved files are normally deleted very + * soon after being cleaned.

        + * + *

        In an HA environment, reserved files are retained because they might + * be used for replication to electable nodes that have been offline + * for the {@link com.sleepycat.je.rep.ReplicationConfig#FEEDER_TIMEOUT} + * interval or longer, or to offline secondary nodes. The replication + * stream position of these nodes is unknown, so whether these files could + * be used to avoid a network restore, when bringing these nodes online, + * is also unknown. The files are retained just in case they can be used + * for such replication. Files are reserved for replication on both master + * and replicas, since a replica may become a master at a future time. + * Such files will be deleted (oldest file first) to make room for a + * write operation, if the write operation would have caused a disk limit + * to be violated.

        + * + *

        In NoSQL DB, this retention of reserved files has the additional + * benefit of supplying the replication stream to subscribers of the + * Stream API, when such subscribers need to replay the stream from an + * earlier point in time.

        + * + * @see Cleaner Statistics + * @since 7.5 + */ + public long getReservedLogSize() { + return cleanerStats.getLong(CLEANER_RESERVED_LOG_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_NAME}

        + * + *

        Reserved files are protected for reasons described by {@link + * #getProtectedLogSizeMap()}.

        + * + * @see Cleaner Statistics + * @since 7.5 + */ + public long getProtectedLogSize() { + return cleanerStats.getLong(CLEANER_PROTECTED_LOG_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_MAP_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_MAP_NAME}

        + * + *

        {@link #getReservedLogSize() Reserved} data files are temporarily + * {@link #getProtectedLogSize() protected} for a number of reasons. The + * keys in the protected log size map are the names of the protecting + * entities, and the values are the number of bytes protected by each + * entity. The type and format of the entity names are as follows:

        + * + *
        +     *    Backup-N
        +     *    DatabaseCount-N
        +     *    DiskOrderedCursor-N
        +     *    Syncup-N
        +     *    Feeder-N
        +     *    NetworkRestore-N
        +     * 
        + * + *

        Where:

        + *
          + *
        • + * {@code Backup-N} represents a {@link DbBackup} in progress, + * i.e., for which {@link DbBackup#startBackup()} has been called + * and {@link DbBackup#endBackup()} has not yet been called. All + * active files are initially protected by the backup, but these + * are not reserved files ond only appear in the map if they are + * cleaned and become reserved after the backup starts. Files + * are not protected if they have been copied and + * {@link DbBackup#removeFileProtection(String)} has been called. + * {@code N} is a sequentially assigned integer. + *
        • + *
        • + * {@code DatabaseCount-N} represents an outstanding call to + * {@link Database#count()}. + * All active files are initially protected by this method, but + * these are not reserved files ond only appear in the map if + * they are cleaned and become reserved during the execution of + * {@code Database.count}. + * {@code N} is a sequentially assigned integer. + *
        • + *
        • + * {@code DiskOrderedCursor-N} represents a + * {@link DiskOrderedCursor} that has not yet been closed by + * {@link DiskOrderedCursor#close()}. + * All active files are initially protected when the cursor is + * opened, but these are not reserved files ond only appear in + * the map if they are cleaned and become reserved while the + * cursor is open. + * {@code N} is a sequentially assigned integer. + *
        • + *
        • + * {@code Syncup-N} represents an in-progress negotiation between + * a master and replica node in an HA replication group to + * establish a replication stream. This is a normally a very short + * negotiation and occurs when a replica joins the group or after + * an election is held. During syncup, all reserved files are + * protected. + * {@code N} is the node name of the other node involved in the + * syncup, i.e, if this node is a master then it is the name of + * the replica, and vice versa. + *
        • + *
        • + * {@code Feeder-N} represents an HA master node that is supplying + * the replication stream to a replica. Normally data in active + * files is being supplied and this data is not in the reserved + * or protected categories. But if the replica is lagging, data + * from reserved files may be supplied, and in that case will be + * protected and appear in the map. + * {@code N} is the node name of the replica receiving the + * replication stream. + *
        • + *
        • + * {@code NetworkRestore-N} represents an HA replica or master + * node that is supplying files to a node that is performing a + * {@link com.sleepycat.je.rep.NetworkRestore}. The files supplied + * are all active files plus the two most recently written + * reserved files. The two reserved files will appear in the map, + * as well as any of the active files that were cleaned and became + * reserved during the network restore. Files that have already + * been copied by the network restore are not protected. + * {@code N} is the name of the node performing the + * {@link com.sleepycat.je.rep.NetworkRestore}. + *
        • + *
        + * + *

        When more than one entity is included in the map, in general the + * largest value points to the entity primarily responsible for + * preventing reclamation of disk space. Note that the values normally + * sum to more than {@link #getProtectedLogSize()}, since protection often + * overlaps.

        + * + *

        The string format of this stat consists of {@code name=size} pairs + * separated by semicolons, where name is the entity name described + * above and size is the number of protected bytes.

        + * + * @see Cleaner Statistics + * @since 7.5 + */ + public SortedMap getProtectedLogSizeMap() { + return cleanerStats.getMap(CLEANER_PROTECTED_LOG_SIZE_MAP); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_AVAILABLE_LOG_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_AVAILABLE_LOG_SIZE_NAME}

        + * + *

        This is the amount that can be logged by write operations, and + * other JE activity such as checkpointing, without violating a disk + * limit. The files making up {@code reservedLogSize} can be deleted to + * make room for these write operations, so {@code availableLogSize} is + * the sum of the current disk free space and the reserved size that is not + * protected ({@code reservedLogSize} - {@code protectedLogSize}). The + * current disk free space is calculated using the disk volume's free + * space, {@link EnvironmentConfig#MAX_DISK} and {@link + * EnvironmentConfig#FREE_DISK}.

        + * + *

        Note that when a record is written, the number of bytes includes JE + * overheads for the record. Also, this causes Btree metadata to be + * written during checkpoints, and other metadata is also written by JE. + * So the space occupied on disk by a given set of records cannot be + * calculated by simply summing the key/data sizes.

        + * + *

        Also note that {@code availableLogSize} will be negative when a disk + * limit has been violated, representing the amount that needs to be freed + * before write operations are allowed.

        + * + * @see Cleaner Statistics + * @see EnvironmentConfig#MAX_DISK + * @see EnvironmentConfig#FREE_DISK + * @since 7.5 + */ + public long getAvailableLogSize() { + return cleanerStats.getLong(CLEANER_AVAILABLE_LOG_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TOTAL_LOG_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TOTAL_LOG_SIZE_NAME}

        + * + * @see Cleaner Statistics + */ + public long getTotalLogSize() { + return cleanerStats.getLong(CLEANER_TOTAL_LOG_SIZE); + } + + /* LogManager stats. */ + + /** + * The total number of requests for database objects which were not in + * memory. + */ + public long getNCacheMiss() { + return logStats.getAtomicLong(LBFP_MISS); + } + + /** + * The location of the next entry to be written to the log. + * + *

        Note that the log entries prior to this position may not yet have + * been flushed to disk. Flushing can be forced using a Sync or + * WriteNoSync commit, or a checkpoint.

        + */ + public long getEndOfLog() { + return logStats.getLong(LOGMGR_END_OF_LOG); + } + + /** + * The number of fsyncs issued through the group commit manager. A subset + * of nLogFsyncs. + */ + public long getNFSyncs() { + return logStats.getAtomicLong(FSYNCMGR_FSYNCS); + } + + /** + * The number of fsyncs requested through the group commit manager. + */ + public long getNFSyncRequests() { + return logStats.getLong(FSYNCMGR_FSYNC_REQUESTS); + } + + /** + * The number of fsync requests submitted to the group commit manager which + * timed out. + */ + public long getNFSyncTimeouts() { + return logStats.getLong(FSYNCMGR_TIMEOUTS); + } + + /** + * The total number of milliseconds used to perform fsyncs. + * + * @since 7.0, although the stat was output by {@link #toString} and + * appeared in the je.stat.csv file in earlier versions. + */ + public long getFSyncTime() { + return logStats.getLong(GRPCMGR_FSYNC_TIME); + } + + /** + * The maximum number of milliseconds used to perform a single fsync. + * + * @since 7.0 + */ + public long getFSyncMaxTime() { + return logStats.getLong(GRPCMGR_FSYNC_MAX_TIME); + } + + /** + * The total number of fsyncs of the JE log. This includes those fsyncs + * issued on behalf of transaction commits. + */ + public long getNLogFSyncs() { + return logStats.getLong(FILEMGR_LOG_FSYNCS); + } + + /** + * The number of log buffers currently instantiated. + */ + public int getNLogBuffers() { + return logStats.getInt(LBFP_LOG_BUFFERS); + } + + /** + * The number of disk reads which required repositioning the disk head + * more than 1MB from the previous file position. Reads in a different + * *.jdb log file then the last IO constitute a random read. + *

        + * This number is approximate and may differ from the actual number of + * random disk reads depending on the type of disks and file system, disk + * geometry, and file system cache size. + */ + public long getNRandomReads() { + return logStats.getLong(FILEMGR_RANDOM_READS); + } + + /** + * The number of bytes read which required repositioning the disk head + * more than 1MB from the previous file position. Reads in a different + * *.jdb log file then the last IO constitute a random read. + *

        + * This number is approximate vary depending on the type of disks and file + * system, disk geometry, and file system cache size. + */ + public long getNRandomReadBytes() { + return logStats.getLong(FILEMGR_RANDOM_READ_BYTES); + } + + /** + * The number of disk writes which required repositioning the disk head by + * more than 1MB from the previous file position. Writes to a different + * *.jdb log file (i.e. a file "flip") then the last IO constitute a random + * write. + *

        + * This number is approximate and may differ from the actual number of + * random disk writes depending on the type of disks and file system, disk + * geometry, and file system cache size. + */ + public long getNRandomWrites() { + return logStats.getLong(FILEMGR_RANDOM_WRITES); + } + + /** + * The number of bytes written which required repositioning the disk head + * more than 1MB from the previous file position. Writes in a different + * *.jdb log file then the last IO constitute a random write. + *

        + * This number is approximate vary depending on the type of disks and file + * system, disk geometry, and file system cache size. + */ + public long getNRandomWriteBytes() { + return logStats.getLong(FILEMGR_RANDOM_WRITE_BYTES); + } + + /** + * The number of disk reads which did not require repositioning the disk + * head more than 1MB from the previous file position. Reads in a + * different *.jdb log file then the last IO constitute a random read. + *

        + * This number is approximate and may differ from the actual number of + * sequential disk reads depending on the type of disks and file system, + * disk geometry, and file system cache size. + */ + public long getNSequentialReads() { + return logStats.getLong(FILEMGR_SEQUENTIAL_READS); + } + + /** + * The number of bytes read which did not require repositioning the disk + * head more than 1MB from the previous file position. Reads in a + * different *.jdb log file then the last IO constitute a random read. + *

        + * This number is approximate vary depending on the type of disks and file + * system, disk geometry, and file system cache size. + */ + public long getNSequentialReadBytes() { + return logStats.getLong(FILEMGR_SEQUENTIAL_READ_BYTES); + } + + /** + * The number of disk writes which did not require repositioning the disk + * head by more than 1MB from the previous file position. Writes to a + * different *.jdb log file (i.e. a file "flip") then the last IO + * constitute a random write. + *

        + * This number is approximate and may differ from the actual number of + * sequential disk writes depending on the type of disks and file system, + * disk geometry, and file system cache size. + */ + public long getNSequentialWrites() { + return logStats.getLong(FILEMGR_SEQUENTIAL_WRITES); + } + + /** + * The number of bytes written which did not require repositioning the + * disk head more than 1MB from the previous file position. Writes in a + * different *.jdb log file then the last IO constitute a random write. + *

        + * This number is approximate vary depending on the type of disks and file + * system, disk geometry, and file system cache size. + */ + public long getNSequentialWriteBytes() { + return logStats.getLong(FILEMGR_SEQUENTIAL_WRITE_BYTES); + } + + /** + * The number of bytes read to fulfill file read operations by reading out + * of the pending write queue. + */ + public long getNBytesReadFromWriteQueue() { + return logStats.getLong(FILEMGR_BYTES_READ_FROM_WRITEQUEUE); + } + + /** + * The number of bytes written from the pending write queue. + */ + public long getNBytesWrittenFromWriteQueue() { + return logStats.getLong(FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE); + } + + /** + * The number of file read operations which were fulfilled by reading out + * of the pending write queue. + */ + public long getNReadsFromWriteQueue() { + return logStats.getLong(FILEMGR_READS_FROM_WRITEQUEUE); + } + + /** + * The number of file writes operations executed from the pending write + * queue. + */ + public long getNWritesFromWriteQueue() { + return logStats.getLong(FILEMGR_WRITES_FROM_WRITEQUEUE); + } + + /** + * The number of writes operations which would overflow the Write Queue. + */ + public long getNWriteQueueOverflow() { + return logStats.getLong(FILEMGR_WRITEQUEUE_OVERFLOW); + } + + /** + * The number of writes operations which would overflow the Write Queue + * and could not be queued. + */ + public long getNWriteQueueOverflowFailures() { + return logStats.getLong(FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES); + } + + /** + * The total memory currently consumed by log buffers, in bytes. If this + * environment uses the shared cache, this method returns only the amount + * used by this environment. + */ + public long getBufferBytes() { + return logStats.getLong(LBFP_BUFFER_BYTES); + } + + /** + * The number of requests for database objects not contained within the + * in memory data structures. + */ + public long getNNotResident() { + return logStats.getAtomicLong(LBFP_NOT_RESIDENT); + } + + /** + * The number of reads which had to be repeated when faulting in an object + * from disk because the read chunk size controlled by je.log.faultReadSize + * is too small. + */ + public long getNRepeatFaultReads() { + return logStats.getLong(LOGMGR_REPEAT_FAULT_READS); + } + + /** + * The number of writes which had to be completed using the temporary + * marshalling buffer because the fixed size log buffers specified by + * je.log.totalBufferBytes and je.log.numBuffers were not large enough. + */ + public long getNTempBufferWrites() { + return logStats.getLong(LOGMGR_TEMP_BUFFER_WRITES); + } + + /** + * The number of times a log file has been opened. + */ + public int getNFileOpens() { + return logStats.getInt(FILEMGR_FILE_OPENS); + } + + /** + * The number of files currently open in the file cache. + */ + public int getNOpenFiles() { + return logStats.getInt(FILEMGR_OPEN_FILES); + } + + /* Return Evictor stats. */ + + /** + * @deprecated The method returns 0 always. + */ + public long getRequiredEvictBytes() { + return 0; + } + + /** + * @deprecated This statistic has no meaning after the implementation + * of the new evictor in JE 6.0. The method returns 0 always. + */ + public long getNNodesScanned() { + return 0; + } + + /** + * @deprecated Use {@link #getNEvictionRuns()} instead. + */ + public long getNEvictPasses() { + return cacheStats.getLong(EVICTOR_EVICTION_RUNS); + } + + /** + * @deprecated use {@link #getNNodesTargeted()} instead. + */ + public long getNNodesSelected() { + return cacheStats.getLong(EVICTOR_NODES_TARGETED); + } + + /** + * @deprecated Use {@link #getNNodesEvicted()} instead. + */ + public long getNNodesExplicitlyEvicted() { + return cacheStats.getLong(EVICTOR_NODES_EVICTED); + } + + /** + * @deprecated Use {@link #getNNodesStripped()} instead. + */ + public long getNBINsStripped() { + return cacheStats.getLong(EVICTOR_NODES_STRIPPED); + } + + /** + * @deprecated Use {@link #getNNodesMutated()} instead. + */ + public long getNBINsMutated() { + return cacheStats.getLong(EVICTOR_NODES_MUTATED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_EVICTION_RUNS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_EVICTION_RUNS_NAME}

        + * + *

        When an evictor thread is awoken it performs eviction until + * {@link #getCacheTotalBytes()} is at least + * {@link EnvironmentConfig#EVICTOR_EVICT_BYTES} less than the + * {@link EnvironmentConfig#MAX_MEMORY_PERCENT total cache size}. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics + */ + public long getNEvictionRuns() { + return cacheStats.getLong(EVICTOR_EVICTION_RUNS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_TARGETED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_TARGETED_NAME}

        + * + *

        An eviction target may actually be evicted, or skipped, or put back + * to the LRU, potentially after partial eviction (stripping) or + * BIN-delta mutation is done on it. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNNodesTargeted() { + return cacheStats.getLong(EVICTOR_NODES_TARGETED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_EVICTED_NAME}

        + * + *

        Does not include {@link #getNLNsEvicted() LN eviction} or + * {@link #getNNodesMutated() BIN-delta mutation}. + * Includes eviction of {@link #getNDirtyNodesEvicted() dirty nodes} and + * {@link #getNRootNodesEvicted() root nodes}. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Eviction + */ + public long getNNodesEvicted() { + return cacheStats.getLong(EVICTOR_NODES_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_ROOT_NODES_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_ROOT_NODES_EVICTED_NAME}

        + * + *

        The root node of a Database is only evicted after all other nodes in + * the Database, so this implies that the entire Database has fallen out of + * cache and is probably closed. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNRootNodesEvicted() { + return cacheStats.getLong(EVICTOR_ROOT_NODES_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_DIRTY_NODES_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_DIRTY_NODES_EVICTED_NAME}

        + * + *

        When a dirty IN is evicted from main cache and no off-heap cache is + * configured, the IN must be logged. When an off-heap cache is configured, + * dirty INs can be moved from main cache to off-heap cache based on LRU, + * but INs are only logged when they are evicted from off-heap cache. + * Therefore, this stat is always zero when an off-heap cache is configured. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Eviction + */ + public long getNDirtyNodesEvicted() { + return cacheStats.getLong(EVICTOR_DIRTY_NODES_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_LNS_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_LNS_EVICTED_NAME}

        + * + *

        When a BIN is considered for eviction based on LRU, if the BIN + * contains resident LNs in main cache, it is stripped of the LNs rather + * than being evicted. This stat reflects LNs evicted in this manner, but + * not LNs evicted as a result of using {@link CacheMode#EVICT_LN}. Also + * note that {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded} LNs + * are evicted immediately and are not reflected in this stat value. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Eviction + */ + public long getNLNsEvicted() { + return cacheStats.getLong(EVICTOR_LNS_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_STRIPPED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_STRIPPED_NAME}

        + * + *

        BINs are stripped in order to {@link #getNLNsEvicted() evict LNs}. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNNodesStripped() { + return cacheStats.getLong(EVICTOR_NODES_STRIPPED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MUTATED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MUTATED_NAME}

        + * + *

        When a BIN is considered for eviction based on LRU, if the BIN + * can be mutated to a BIN-delta, it is mutated rather than being evicted. + * Note that when an off-heap cache is configured, this stat value will be + * zero because BIN mutation will take place only in the off-heap cache; + * see {@link #getOffHeapNodesMutated()}. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Eviction + */ + public long getNNodesMutated() { + return cacheStats.getLong(EVICTOR_NODES_MUTATED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_PUT_BACK_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_PUT_BACK_NAME}

        + * + *

        Reasons for putting back a target IN are:

        + *
          + *
        • The IN was accessed by an operation while the evictor was + * processing it.
        • + *
        • To prevent the cache usage for Btree objects from falling below + * {@link EnvironmentConfig#TREE_MIN_MEMORY}.
        • + *
        + * + *

        See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNNodesPutBack() { + return cacheStats.getLong(EVICTOR_NODES_PUT_BACK); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MOVED_TO_PRI2_LRU_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MOVED_TO_PRI2_LRU_NAME}

        + * + *

        When an off-cache is not configured, dirty nodes are evicted last + * from the main cache by moving them to a 2nd priority LRU list. When an + * off-cache is configured, level-2 INs that reference off-heap BINs are + * evicted last from the main cache, using the same approach. + * See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNNodesMovedToDirtyLRU() { + return cacheStats.getLong(EVICTOR_NODES_MOVED_TO_PRI2_LRU); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_SKIPPED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_SKIPPED_NAME}

        + * + *

        Reasons for skipping a target IN are:

        + *
          + *
        • It has already been evicted by another thread.
        • + *
        • It cannot be evicted because concurrent activity added resident + * child nodes.
        • + *
        • It cannot be evicted because it is dirty and the environment is + * read-only.
        • + *
        + *

        See {@link CacheMode} for a description of eviction.

        + * + * @see Cache Statistics: Debugging + */ + public long getNNodesSkipped() { + return cacheStats.getLong(EVICTOR_NODES_SKIPPED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#THREAD_UNAVAILABLE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#THREAD_UNAVAILABLE_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNThreadUnavailable() { + return cacheStats.getAtomicLong(THREAD_UNAVAILABLE); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_SHARED_CACHE_ENVS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_SHARED_CACHE_ENVS_NAME}

        + * + *

        This method says nothing about whether this environment is using + * the shared cache or not.

        + * + */ + public int getNSharedCacheEnvironments() { + return cacheStats.getInt(EVICTOR_SHARED_CACHE_ENVS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_NAME}

        + * + *

        Note that the number of LN fetches does not necessarily correspond + * to the number of records accessed, since some LNs may be + * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}.

        + * + * @see Cache Statistics: Sizing + */ + public long getNLNsFetch() { + return cacheStats.getAtomicLong(LN_FETCH); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_NAME}

        + * + * @see Cache Statistics: Sizing + */ + public long getNBINsFetch() { + return cacheStats.getAtomicLong(BIN_FETCH); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_NAME}

        + * + * @see Cache Statistics: Sizing + */ + public long getNUpperINsFetch() { + return cacheStats.getAtomicLong(UPPER_IN_FETCH); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_MISS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_MISS_NAME}

        + * + *

        Note that the number of LN fetches does not necessarily correspond + * to the number of records accessed, since some LNs may be + * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}.

        + * + * @see Cache Statistics: Sizing + */ + public long getNLNsFetchMiss() { + return cacheStats.getAtomicLong(LN_FETCH_MISS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_NAME}

        + * + *

        This is the portion of {@link #getNBINsFetch()} that resulted in a + * fetch miss. The fetch may be for a full BIN or BIN-delta + * ({@link #getNBINDeltasFetchMiss()}), depending on whether a BIN-delta + * currently exists (see {@link EnvironmentConfig#TREE_BIN_DELTA}). + * However, additional full BIN fetches occur when mutating a BIN-delta to + * a full BIN ({@link #getNFullBINsMiss()}) whenever this is necessary for + * completing an operation.

        + * + *

        Therefore, the total number of BIN fetch misses + * (including BIN-deltas) is:

        + * + *

        {@code nFullBINsMiss + nBINsFetchMiss}

        + * + *

        And the total number of full BIN (vs BIN-delta) fetch misses is:

        + * + *

        {@code nFullBINsMiss + nBINsFetchMiss - + * nBINDeltasFetchMiss}

        + * + * @see Cache Statistics: Sizing + */ + public long getNBINsFetchMiss() { + return cacheStats.getAtomicLong(BIN_FETCH_MISS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_FETCH_MISS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_FETCH_MISS_NAME}

        + * + *

        This represents the portion of {@code nBINsFetchMiss()} that fetched + * BIN-deltas rather than full BINs. See {@link #getNBINsFetchMiss()}.

        + * + * @see Cache Statistics: Sizing + */ + public long getNBINDeltasFetchMiss() { + return cacheStats.getAtomicLong(BIN_DELTA_FETCH_MISS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#FULL_BIN_MISS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#FULL_BIN_MISS_NAME}

        + * + *

        Note that this stat does not include full BIN misses that are + * not due to BIN-delta mutations. See + * {@link #getNBINsFetchMiss()}

        + * + * @see Cache Statistics: Sizing + */ + public long getNFullBINsMiss() { + return cacheStats.getAtomicLong(FULL_BIN_MISS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_MISS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_MISS_NAME}

        + * + * @see Cache Statistics: Sizing + */ + public long getNUpperINsFetchMiss() { + return cacheStats.getAtomicLong(UPPER_IN_FETCH_MISS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_RATIO_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_RATIO_NAME}

        + * + *

        This stat can be misleading because it does not include the number + * of full BIN fetch misses resulting from BIN-delta mutations ({@link + * #getNFullBINsMiss()}. It may be improved, or perhaps deprecated, in a + * future release.

        + * + * @see Cache Statistics: Debugging + */ + public float getNBINsFetchMissRatio() { + return cacheStats.getFloat(BIN_FETCH_MISS_RATIO); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_BLIND_OPS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_BLIND_OPS_NAME}

        + * + *

        Note that this stat is misplaced. It should be in the + * {@value com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} group + * and will probably be moved there in a future release.

        + * + * @see Cache Statistics: Debugging + * @see EnvironmentConfig#TREE_BIN_DELTA + */ + public long getNBINDeltaBlindOps() { + return cacheStats.getAtomicLong(BIN_DELTA_BLIND_OPS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_UPPER_INS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_UPPER_INS_NAME}

        + * + *

        When used on shared environment caches, zero is returned when {@link + * StatsConfig#setFast fast stats} are requested.

        + * + * @see Cache Statistics: Sizing + */ + public long getNCachedUpperINs() { + return cacheStats.getLong(CACHED_UPPER_INS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BINS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BINS_NAME}

        + * + *

        When used on shared environment caches, zero is returned when {@link + * StatsConfig#setFast fast stats} are requested.

        + * + * @see Cache Statistics: Sizing + */ + public long getNCachedBINs() { + return cacheStats.getLong(CACHED_BINS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BIN_DELTAS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BIN_DELTAS_NAME}

        + * + *

        When used on shared environment caches, zero is returned when {@link + * StatsConfig#setFast fast stats} are requested.

        + * + * @see Cache Statistics: Sizing + */ + public long getNCachedBINDeltas() { + return cacheStats.getLong(CACHED_BIN_DELTAS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_SPARSE_TARGET_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_SPARSE_TARGET_NAME}

        + * + *

        Each IN contains an array of references to child INs or LNs. When + * there are between one and four children resident, the size of the array + * is reduced to four. This saves a significant amount of cache memory for + * BINs when {@link CacheMode#EVICT_LN} is used, because there are + * typically only a small number of LNs resident in main cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getNINSparseTarget() { + return cacheStats.getLong(CACHED_IN_SPARSE_TARGET); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_NO_TARGET_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_NO_TARGET_NAME}

        + * + *

        Each IN contains an array of references to child INs or LNs. When + * there are no children resident, no array is allocated. This saves a + * significant amount of cache memory for BINs when {@link + * CacheMode#EVICT_LN} is used, because there are typically only a small + * number of LNs resident in main cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getNINNoTarget() { + return cacheStats.getLong(CACHED_IN_NO_TARGET); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_COMPACT_KEY_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_COMPACT_KEY_NAME}

        + * + * @see Cache Statistics: Size + * Optimizations + * + * @see EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH + */ + public long getNINCompactKeyIN() { + return cacheStats.getLong(CACHED_IN_COMPACT_KEY); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#PRI2_LRU_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#PRI2_LRU_SIZE_NAME}

        + * + * @see Cache Statistics: Debugging + * @see #getNNodesMovedToDirtyLRU() + */ + public long getDirtyLRUSize() { + return cacheStats.getLong(PRI2_LRU_SIZE); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#PRI1_LRU_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#PRI1_LRU_SIZE_NAME}

        + * + * @see Cache Statistics: Debugging + * @see #getNNodesMovedToDirtyLRU() + */ + public long getMixedLRUSize() { + return cacheStats.getLong(PRI1_LRU_SIZE); + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBINsEvictedEvictorThread() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBINsEvictedManual() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBINsEvictedCritical() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBINsEvictedCacheMode() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBINsEvictedDaemon() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNUpperINsEvictedEvictorThread() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNUpperINsEvictedManual() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNUpperINsEvictedCritical() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNUpperINsEvictedCacheMode() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNUpperINsEvictedDaemon() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBatchesEvictorThread() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBatchesManual() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBatchesCacheMode() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBatchesCritical() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getNBatchesDaemon() { + return 0; + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_EVICTORTHREAD_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_EVICTORTHREAD_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNBytesEvictedEvictorThread() { + return cacheStats.getLong( + EvictionSource.EVICTORTHREAD.getNumBytesEvictedStatDef()); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_MANUAL_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_MANUAL_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNBytesEvictedManual() { + return cacheStats.getLong( + EvictionSource.MANUAL.getNumBytesEvictedStatDef()); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CACHEMODE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CACHEMODE_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNBytesEvictedCacheMode() { + return cacheStats.getLong( + EvictionSource.CACHEMODE.getNumBytesEvictedStatDef()); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CRITICAL_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CRITICAL_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNBytesEvictedCritical() { + return cacheStats.getLong( + EvictionSource.CRITICAL.getNumBytesEvictedStatDef()); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_DAEMON_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_DAEMON_NAME}

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getNBytesEvictedDeamon() { + return cacheStats.getLong( + EvictionSource.DAEMON.getNumBytesEvictedStatDef()); + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getAvgBatchEvictorThread() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getAvgBatchManual() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getAvgBatchCacheMode() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getAvgBatchCritical() { + return 0; + } + + /** + * @deprecated This statistic has been removed. The method returns 0 + * always. + */ + public long getAvgBatchDaemon() { + return 0; + } + + /* MemoryBudget stats. */ + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_SHARED_CACHE_TOTAL_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_SHARED_CACHE_TOTAL_BYTES_NAME}

        + * + *

        If this + * environment uses the shared cache, this method returns the total size of + * the shared cache, i.e., the sum of the {@link #getCacheTotalBytes()} for + * all environments that are sharing the cache. If this environment does + * not use the shared cache, this method returns zero.

        + * + *

        To get the configured maximum cache size, see {@link + * EnvironmentMutableConfig#getCacheSize}.

        + */ + public long getSharedCacheTotalBytes() { + return cacheStats.getLong(MB_SHARED_CACHE_TOTAL_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_TOTAL_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_TOTAL_BYTES_NAME}

        + * + *

        This method returns the sum of {@link #getDataBytes}, {@link + * #getAdminBytes}, {@link #getLockBytes} and {@link #getBufferBytes}.

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + *

        To get the configured maximum cache size, see {@link + * EnvironmentMutableConfig#getCacheSize}.

        + * + * @see Cache Statistics: Sizing + */ + public long getCacheTotalBytes() { + return cacheStats.getLong(MB_TOTAL_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_BYTES_NAME}

        + * + *

        The value returned by this method includes the amount returned by + * {@link #getDataAdminBytes}.

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + * @see Cache Statistics: Debugging + */ + public long getDataBytes() { + return cacheStats.getLong(MB_DATA_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_ADMIN_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_ADMIN_BYTES_NAME}

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + * @see Cache Statistics: Debugging + */ + public long getDataAdminBytes() { + return cacheStats.getLong(MB_DATA_ADMIN_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DOS_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_DOS_BYTES_NAME}

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + * @see Cache Statistics: Unexpected + * Sizes + */ + public long getDOSBytes() { + return cacheStats.getLong(MB_DOS_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_ADMIN_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_ADMIN_BYTES_NAME}

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + * @see Cache Statistics: Debugging + */ + public long getAdminBytes() { + return cacheStats.getLong(MB_ADMIN_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_LOCK_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.dbi.DbiStatDefinition#MB_LOCK_BYTES_NAME}

        + * + *

        If this environment uses the shared cache, this method returns only + * the amount used by this environment.

        + * + * @see Cache Statistics: Unexpected + * Sizes + */ + public long getLockBytes() { + return cacheStats.getLong(MB_LOCK_BYTES); + } + + /** + * @deprecated Please use {@link #getDataBytes} to get the amount of cache + * used for data and use {@link #getAdminBytes}, {@link #getLockBytes} and + * {@link #getBufferBytes} to get other components of the total cache usage + * ({@link #getCacheTotalBytes}). + */ + public long getCacheDataBytes() { + return getCacheTotalBytes() - getBufferBytes(); + } + + /* OffHeapCache stats. */ + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_FAILURE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_FAILURE_NAME}

        + * + *

        Currently, with the default off-heap allocator, an allocation + * failure occurs only when OutOfMemoryError is thrown by {@code + * Unsafe.allocateMemory}. This might be considered a fatal error, since it + * means that no memory is available on the machine or VM. In practice, + * we have not seen this occur because Linux will automatically kill + * processes that are rapidly allocating memory when available memory is + * very low.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapAllocFailures() { + return cacheStats.getLong(OffHeapStatDefinition.ALLOC_FAILURE); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_OVERFLOW_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_OVERFLOW_NAME}

        + * + *

        Currently, with the default off-heap allocator, this never happens + * because the allocator will perform the allocation as long as any memory + * is available. Even so, the off-heap evictor normally prevents + * overflowing of the off-heap cache by freeing memory before it is + * needed.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapAllocOverflows() { + return cacheStats.getLong(OffHeapStatDefinition.ALLOC_OVERFLOW); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#THREAD_UNAVAILABLE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#THREAD_UNAVAILABLE_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getOffHeapThreadUnavailable() { + return cacheStats.getLong(OffHeapStatDefinition.THREAD_UNAVAILABLE); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_TARGETED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_TARGETED_NAME}

        + * + *

        Nodes are selected as targets by the evictor based on LRU, always + * selecting from the cold end of the LRU list. First, non-dirty nodes and + * nodes referring to off-heap LNs are selected based on LRU. When there + * are no more such nodes then dirty nodes with no off-heap LNs are + * selected, based on LRU.

        + * + *

        An eviction target may actually be evicted, or skipped, or put + * back to the LRU, potentially after stripping child LNs or mutation to + * a BIN-delta.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getOffHeapNodesTargeted() { + return offHeapStats.getLong(OffHeapStatDefinition.NODES_TARGETED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CRITICAL_NODES_TARGETED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CRITICAL_NODES_TARGETED_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Critical + * Eviction + */ + public long getOffHeapCriticalNodesTargeted() { + return cacheStats.getLong( + OffHeapStatDefinition.CRITICAL_NODES_TARGETED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_EVICTED_NAME}

        + * + *

        An evicted BIN is completely removed from the off-heap cache and LRU + * list. If it is dirty, it must be logged. A BIN is evicted only if it has + * no off-heap child LNs and it cannot be mutated to a BIN-delta.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Eviction + */ + public long getOffHeapNodesEvicted() { + return cacheStats.getLong(OffHeapStatDefinition.NODES_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#DIRTY_NODES_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#DIRTY_NODES_EVICTED_NAME}

        + * + *

        This stat value is a subset of {@link #getOffHeapNodesEvicted()}.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Eviction + */ + public long getOffHeapDirtyNodesEvicted() { + return cacheStats.getLong(OffHeapStatDefinition.DIRTY_NODES_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_STRIPPED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_STRIPPED_NAME}

        + * + *

        When a BIN is stripped, all off-heap LNs that the BIN refers to are + * evicted. The {@link #getOffHeapLNsEvicted()} stat is incremented + * accordingly.

        + * + *

        A stripped BIN could be a BIN in main cache that is stripped of + * off-heap LNs, or a BIN that is off-heap and also refers to off-heap + * LNs. When a main cache BIN is stripped, it is removed from the + * off-heap LRU. When an off-heap BIN is stripped, it is either modified + * in place to remove the LN references (this is done when a small + * number of LNs are referenced and the wasted space is small), or is + * copied to a new, smaller off-heap block with no LN references.

        + * + *

        After stripping an off-heap BIN, it is moved to the hot end of the + * LRU list. Off-heap BINs are only mutated to BIN-deltas or evicted + * completely when they do not refer to any off-heap LNs. This gives + * BINs precedence over LNs in the cache. + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapNodesStripped() { + return cacheStats.getLong(OffHeapStatDefinition.NODES_STRIPPED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_MUTATED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_MUTATED_NAME}

        + * + *

        Mutation to a BIN-delta is performed for full BINs that do not + * refer to any off-heap LNs and can be represented as BIN-deltas in + * cache and on disk (see {@link EnvironmentConfig#TREE_BIN_DELTA}). + * When a BIN is mutated, it is is copied to a new, smaller off-heap + * block. After mutating an off-heap BIN, it is moved to the hot end of + * the LRU list.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Eviction + */ + public long getOffHeapNodesMutated() { + return cacheStats.getLong(OffHeapStatDefinition.NODES_MUTATED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_SKIPPED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_SKIPPED_NAME}

        + * + *

        For example, a node will be skipped if it has been moved to the + * hot end of the LRU list by another thread, or more rarely, already + * processed by another evictor thread. This can occur because there is + * a short period of time where a targeted node has been removed from + * the LRU by the evictor thread, but not yet latched.

        + * + *

        The number of skipped nodes is normally very small, compared to the + * number of targeted nodes.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapNodesSkipped() { + return cacheStats.getLong(OffHeapStatDefinition.NODES_SKIPPED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_EVICTED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_EVICTED_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Eviction + */ + public long getOffHeapLNsEvicted() { + return offHeapStats.getLong(OffHeapStatDefinition.LNS_EVICTED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_LOADED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_LOADED_NAME}

        + * + *

        LNs are loaded when requested by CRUD operations or other internal + * btree operations.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapLNsLoaded() { + return offHeapStats.getLong(OffHeapStatDefinition.LNS_LOADED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_STORED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_STORED_NAME}

        + * + *

        LNs are stored off-heap when they are evicted from the main cache. + * Note that when {@link CacheMode#EVICT_LN} is used, the LN resides in + * the main cache for a very short period since it is evicted after the + * CRUD operation is complete.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapLNsStored() { + return offHeapStats.getLong(OffHeapStatDefinition.LNS_STORED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_LOADED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_LOADED_NAME}

        + * + *

        BINs are loaded when needed by CRUD operations or other internal + * btree operations.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapBINsLoaded() { + return offHeapStats.getLong(OffHeapStatDefinition.BINS_LOADED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_STORED_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_STORED_NAME}

        + * + *

        BINs are stored off-heap when they are evicted from the main cache. + * Note that when {@link CacheMode#EVICT_BIN} is used, the BIN resides + * in the main cache for a very short period since it is evicted after + * the CRUD operation is complete.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapBINsStored() { + return offHeapStats.getLong(OffHeapStatDefinition.BINS_STORED); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_LNS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_LNS_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Sizing + */ + public int getOffHeapCachedLNs() { + return offHeapStats.getInt(OffHeapStatDefinition.CACHED_LNS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BINS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BINS_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Sizing + */ + public int getOffHeapCachedBINs() { + return offHeapStats.getInt(OffHeapStatDefinition.CACHED_BINS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BIN_DELTAS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BIN_DELTAS_NAME}

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Sizing + */ + public int getOffHeapCachedBINDeltas() { + return offHeapStats.getInt(OffHeapStatDefinition.CACHED_BIN_DELTAS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BYTES_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BYTES_NAME}

        + * + *

        This includes the estimated overhead for off-heap memory blocks, as + * well as their contents.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + *

        To get the configured maximum off-heap cache size, see {@link + * EnvironmentMutableConfig#getOffHeapCacheSize()}.

        + * + * @see Cache Statistics: Sizing + */ + public long getOffHeapTotalBytes() { + return offHeapStats.getLong(OffHeapStatDefinition.TOTAL_BYTES); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BLOCKS_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BLOCKS_NAME}

        + * + *

        There is one block for each off-heap BIN and one for each off-heap + * LN. So the total number of blocks is the sum of + * {@link #getOffHeapCachedLNs} and {@link #getOffHeapCachedBINs}.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapTotalBlocks() { + return offHeapStats.getInt(OffHeapStatDefinition.TOTAL_BLOCKS); + } + + /** + *

        {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LRU_SIZE_DESC}

        + * + *

        Group: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} + *
        Name: {@value + * com.sleepycat.je.evictor.OffHeapStatDefinition#LRU_SIZE_NAME}

        + * + *

        The off-heap LRU list is stored in the Java heap. Each entry occupies + * 20 bytes of memory when compressed oops are used, or 24 bytes otherwise. + * This memory is not considered part of the JE main cache, and is not + * included in main cache statistics.

        + * + *

        There is one LRU entry for each off-heap BIN, and one for each BIN in + * main cache that refers to one or more off-heap LNs. The latter approach + * avoids an LRU entry per off-heap LN, which would use excessive amounts + * of space in the Java heap. Similarly, when an off-heap BIN refers to + * off-heap LNs, only one LRU entry (for the BIN) is used.

        + * + *

        If this environment uses the shared cache, the return value is the + * total for all environments that are sharing the cache.

        + * + * @see Cache Statistics: Debugging + */ + public long getOffHeapLRUSize() { + return offHeapStats.getInt(OffHeapStatDefinition.LRU_SIZE); + } + + /* EnvironmentImpl stats. */ + + /** + * Returns the number of latch upgrades (relatches) required while + * operating on this Environment. Latch upgrades are required when an + * operation assumes that a shared (read) latch will be sufficient but + * later determines that an exclusive (write) latch will actually be + * required. + * + * @return number of latch upgrades (relatches) required. + */ + public long getRelatchesRequired() { + return envImplStats.getLong(ENV_RELATCHES_REQUIRED); + } + + /* TxnManager stats. */ + + /** + * Total lock owners in lock table. Only provided when {@link + * com.sleepycat.je.Environment#getStats Environment.getStats} is + * called in "slow" mode. + */ + public int getNOwners() { + return lockStats.getInt(LOCK_OWNERS); + } + + /** + * Total read locks currently held. Only provided when {@link + * com.sleepycat.je.Environment#getStats Environment.getStats} is + * called in "slow" mode. + */ + public int getNReadLocks() { + return lockStats.getInt(LOCK_READ_LOCKS); + } + + /** + * Total locks currently in lock table. Only provided when {@link + * com.sleepycat.je.Environment#getStats Environment.getStats} is + * called in "slow" mode. + */ + public int getNTotalLocks() { + return lockStats.getInt(LOCK_TOTAL); + } + + /** + * Total transactions waiting for locks. Only provided when {@link + * com.sleepycat.je.Environment#getStats Environment.getStats} is + * called in "slow" mode. + */ + public int getNWaiters() { + return lockStats.getInt(LOCK_WAITERS); + } + + /** + * Total write locks currently held. Only provided when {@link + * com.sleepycat.je.Environment#getStats Environment.getStats} is + * called in "slow" mode. + */ + public int getNWriteLocks() { + return lockStats.getInt(LOCK_WRITE_LOCKS); + } + + /** + * Total number of lock requests to date. + */ + public long getNRequests() { + return lockStats.getLong(LOCK_REQUESTS); + } + + /** + * Total number of lock waits to date. + */ + public long getNWaits() { + return lockStats.getLong(LOCK_WAITS); + } + + /** + * Number of acquires of lock table latch with no contention. + */ + public int getNAcquiresNoWaiters() { + return lockStats.getInt(LATCH_NO_WAITERS); + } + + /** + * Number of acquires of lock table latch when it was already owned + * by the caller. + */ + public int getNAcquiresSelfOwned() { + return lockStats.getInt(LATCH_SELF_OWNED); + } + + /** + * Number of acquires of lock table latch when it was already owned by + * another thread. + */ + public int getNAcquiresWithContention() { + return lockStats.getInt(LATCH_CONTENTION); + } + + /** + * Number of successful no-wait acquires of the lock table latch. + */ + public int getNAcquiresNoWaitSuccessful() { + return lockStats.getInt(LATCH_NOWAIT_SUCCESS); + } + + /** + * Number of unsuccessful no-wait acquires of the lock table latch. + */ + public int getNAcquiresNoWaitUnSuccessful() { + return lockStats.getInt(LATCH_NOWAIT_UNSUCCESS); + } + + /** + * Number of releases of the lock table latch. + */ + public int getNReleases() { + return lockStats.getInt(LATCH_RELEASES); + } + + /** + * The number of user (non-internal) Cursor and Database get operations + * performed in BIN deltas. + */ + public long getNBinDeltaGetOps() { + return envImplStats.getAtomicLong(ENV_BIN_DELTA_GETS); + } + + /** + * The number of user (non-internal) Cursor and Database insert operations + * performed in BIN deltas (these are insertions performed via the various + * put methods). + */ + public long getNBinDeltaInsertOps() { + return envImplStats.getAtomicLong(ENV_BIN_DELTA_INSERTS); + } + + /** + * The number of user (non-internal) Cursor and Database update operations + * performed in BIN deltas (these are updates performed via the various + * put methods). + */ + public long getNBinDeltaUpdateOps() { + return envImplStats.getAtomicLong(ENV_BIN_DELTA_UPDATES); + } + + /** + * The number of user (non-internal) Cursor and Database delete operations + * performed in BIN deltas. + */ + public long getNBinDeltaDeleteOps() { + return envImplStats.getAtomicLong(ENV_BIN_DELTA_DELETES); + } + + /** + * Number of successful primary DB key search operations. + *

        + * This operation corresponds to one of the following API calls: + *

          + *
        • + * A successful {@link Cursor#get(DatabaseEntry, DatabaseEntry, + * Get, ReadOptions) Cursor.get} or {@link + * Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) Database.get} call with {@link Get#SEARCH}, {@link + * Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link + * Get#SEARCH_BOTH_GTE}. + *
        • + *
        • + * A successful {@link SecondaryCursor#get(DatabaseEntry, + * DatabaseEntry, DatabaseEntry, Get, ReadOptions) + * SecondaryCursor.get} or {@link + * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, + * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} call + * when the primary data is requested (via the {@code data} param). + * This call internally performs a key search operation in the + * primary DB in order to return the data. + *
        • + *
        + */ + public long getPriSearchOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_SEARCH); + } + + /** + * Number of failed primary DB key search operations. + *

        + * This operation corresponds to a call to {@link Cursor#get(DatabaseEntry, + * DatabaseEntry, Get, ReadOptions) Cursor.get} or {@link + * Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) Database.get} with {@link Get#SEARCH}, {@link + * Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link + * Get#SEARCH_BOTH_GTE}, when the specified key is not found in the DB. + */ + public long getPriSearchFailOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_SEARCH_FAIL); + } + + /** + * Number of successful secondary DB key search operations. + *

        + * This operation corresponds to a successful call to {@link + * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) SecondaryCursor.get} or {@link + * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, + * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with + * {@link Get#SEARCH}, {@link Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or + * {@link Get#SEARCH_BOTH_GTE}. + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecSearchOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_SEARCH); + } + + /** + * Number of failed secondary DB key search operations. + *

        + * This operation corresponds to a call to {@link + * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) SecondaryCursor.get} or {@link + * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, + * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with {@link + * Get#SEARCH}, {@link Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link + * Get#SEARCH_BOTH_GTE}, when the specified key is not found in the DB. + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecSearchFailOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_SEARCH_FAIL); + } + + /** + * Number of successful primary DB position operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) Cursor.get} + * or {@link Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) Database.get} with {@link Get#FIRST}, {@link Get#LAST}, + * {@link Get#NEXT}, {@link Get#NEXT_DUP}, {@link Get#NEXT_NO_DUP}, + * {@link Get#PREV}, {@link Get#PREV_DUP} or {@link Get#PREV_NO_DUP}. + */ + public long getPriPositionOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_POSITION); + } + + /** + * Number of successful secondary DB position operations. + *

        + * This operation corresponds to a successful call to {@link + * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions) SecondaryCursor.get} or {@link + * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, + * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with + * {@link Get#FIRST}, {@link Get#LAST}, + * {@link Get#NEXT}, {@link Get#NEXT_DUP}, {@link Get#NEXT_NO_DUP}, + * {@link Get#PREV}, {@link Get#PREV_DUP} or {@link Get#PREV_NO_DUP}. + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecPositionOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_POSITION); + } + + /** + * Number of successful primary DB insertion operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} + * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put} in one of the following cases: + *

          + *
        • + * When {@link Put#NO_OVERWRITE} or {@link Put#NO_DUP_DATA} is + * specified. + *
        • + *
        • + * When {@link Put#OVERWRITE} is specified and the key was inserted + * because it previously did not exist in the DB. + *
        • + *
        + */ + public long getPriInsertOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_INSERT); + } + + /** + * Number of failed primary DB insertion operations. + *

        + * This operation corresponds to a call to {@link Cursor#put(DatabaseEntry, + * DatabaseEntry, Put, WriteOptions) Cursor.put} or {@link + * Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put} with {@link Put#NO_OVERWRITE} or {@link + * Put#NO_DUP_DATA}, when the key could not be inserted because it + * previously existed in the DB. + */ + public long getPriInsertFailOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_INSERT_FAIL); + } + + /** + * Number of successful secondary DB insertion operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} + * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put}, for a primary DB with an associated + * secondary DB. A secondary record is inserted when inserting a primary + * record with a non-null secondary key, or when updating a primary record + * and the secondary key is changed to to a non-null value that is + * different than the previously existing value. + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecInsertOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_INSERT); + } + + /** + * Number of successful primary DB update operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} + * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put} in one of the following cases: + *

          + *
        • + * When {@link Put#OVERWRITE} is specified and the key previously + * existed in the DB. + *
        • + *
        • + * When calling {@code Cursor.put} with {@link Put#CURRENT}. + *
        • + *
        + */ + public long getPriUpdateOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_UPDATE); + } + + /** + * Number of successful secondary DB update operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} + * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put}, when a primary record is updated and its + * TTL is changed. The associated secondary records must also be updated to + * reflect the change in the TTL. + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecUpdateOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_UPDATE); + } + + /** + * Number of successful primary DB deletion operations. + *

        + * This operation corresponds to a successful call to {@link + * Cursor#delete() Cursor.delete}, {@link Database#delete(Transaction, + * DatabaseEntry, WriteOptions) Database.delete}, {@link + * SecondaryCursor#delete() SecondaryCursor.delete} or {@link + * SecondaryDatabase#delete(Transaction, DatabaseEntry, WriteOptions) + * SecondaryDatabase.delete}. + */ + public long getPriDeleteOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_DELETE); + } + + /** + * Number of failed primary DB deletion operations. + *

        + * This operation corresponds to a call to {@link + * Database#delete(Transaction, DatabaseEntry, + * WriteOptions) Database.delete} or {@link + * SecondaryDatabase#delete(Transaction, DatabaseEntry, WriteOptions) + * SecondaryDatabase.delete}, when the key could not be deleted because it + * did not previously exist in the DB. + */ + public long getPriDeleteFailOps() { + return throughputStats.getAtomicLong(THROUGHPUT_PRI_DELETE_FAIL); + } + + /** + * Number of successful secondary DB deletion operations. + *

        + * This operation corresponds to one of the following API calls: + *

          + *
        • + * A successful call to {@link Cursor#delete() Cursor.delete} or + * {@link Database#delete(Transaction, DatabaseEntry, + * WriteOptions) Database.delete}, that deletes a primary record + * containing a non-null secondary key. + *
        • + *
        • + * A successful call to {@link SecondaryCursor#delete() + * SecondaryCursor.delete} or {@link + * SecondaryDatabase#delete(Transaction, DatabaseEntry, + * WriteOptions) SecondaryDatabase.delete}. + *
        • + *
        • + * A successful call to {@link Cursor#put(DatabaseEntry, + * DatabaseEntry, Put, WriteOptions) Cursor.put} or {@link + * Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions) Database.put} that updates a primary record and + * changes its previously non-null secondary key to null. + *
        • + *
        + *

        + * Note: Operations are currently counted as secondary DB (rather than + * primary DB) operations only if the DB has been opened by the application + * as a secondary DB. In particular the stats may be confusing on an HA + * replica node if a secondary DB has not been opened by the application on + * the replica. + */ + public long getSecDeleteOps() { + return throughputStats.getAtomicLong(THROUGHPUT_SEC_DELETE); + } + + /** + * Returns a String representation of the stats in the form of + * <stat>=<value> + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (StatGroup group : getStatGroups()) { + sb.append(group.toString()); + } + return sb.toString(); + } + + /** + * Returns a String representation of the stats which includes stats + * descriptions in addition to <stat>=<value> + */ + public String toStringVerbose() { + StringBuilder sb = new StringBuilder(); + for (StatGroup group : getStatGroups()) { + sb.append(group.toStringVerbose()); + } + return sb.toString(); + } + + /** + * @hidden + * Internal use only. + * JConsole plugin support: Get tips for stats. + */ + public Map getTips() { + Map tipsMap = new HashMap(); + for (StatGroup group : getStatGroups()) { + group.addToTipMap(tipsMap); + } + return tipsMap; + } +} diff --git a/src/com/sleepycat/je/EnvironmentWedgedException.java b/src/com/sleepycat/je/EnvironmentWedgedException.java new file mode 100644 index 0000000..5908805 --- /dev/null +++ b/src/com/sleepycat/je/EnvironmentWedgedException.java @@ -0,0 +1,79 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown by the {@link Environment#close()} when the current process must be + * shut down and restarted before re-opening the Environment. + *

        + * If during close(), a badly behaved internal thread cannot be stopped, + * then the JVM process must be stopped and restarted. The close() method first + * attempts a soft shutdown of each thread. If that fails to stop the thread, + * it is interrupted. If that fails to stop the thread, because it never + * becomes interruptible, then {@code EnvironmentWedgedException} is thrown by + * close(), after performing as much of the normal shutdown process as + * possible. Before this exception is thrown, a full thread dump is logged, to + * aid in debugging. + *

        + * Note that prior to calling close(), if JE attempts to shut down an internal + * thread and it cannot be shut down, the Environment will be {@link + * Environment#isValid() invalidated}, also causing an {@code + * EnvironmentWedgedException} to be thrown. In this case (as in all other + * cases where an {@link EnvironmentFailureException} is thrown and the + * Environment is invalidated), the application should call {@link + * Environment#close()}. The close() method will throw {@code + * EnvironmentWedgedException} in this case, as described above. + *

        + * If the application fails to restart the process when this exception is + * thrown, it is likely that re-opening the Environment will not be possible, + * or will result in unpredictable behavior. This is because the thread that + * stopped may be holding a resource that is needed by the newly opened + * Environment. + * + * @since 7.1 + */ +public class EnvironmentWedgedException extends EnvironmentFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public EnvironmentWedgedException(EnvironmentImpl envImpl, + String message) { + super(envImpl, EnvironmentFailureReason.WEDGED, message); + } + + /** + * For internal use only. + * @hidden + */ + private EnvironmentWedgedException(String message, + EnvironmentWedgedException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentWedgedException wrapSelf(String msg) { + return new EnvironmentWedgedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/ExceptionEvent.java b/src/com/sleepycat/je/ExceptionEvent.java new file mode 100644 index 0000000..51d5cf8 --- /dev/null +++ b/src/com/sleepycat/je/ExceptionEvent.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * A class representing an exception event. Contains an exception and the name + * of the daemon thread that it was thrown from. + */ +public class ExceptionEvent { + + private Exception exception; + private String threadName; + + public ExceptionEvent(Exception exception, String threadName) { + this.exception = exception; + this.threadName = threadName; + } + + public ExceptionEvent(Exception exception) { + this.exception = exception; + this.threadName = Thread.currentThread().toString(); + } + + /** + * Returns the exception in the event. + */ + public Exception getException() { + return exception; + } + + /** + * Returns the name of the daemon thread that threw the exception. + */ + public String getThreadName() { + return threadName; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + sb.append(LoggerUtils.getStackTrace(exception)); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/ExceptionListener.java b/src/com/sleepycat/je/ExceptionListener.java new file mode 100644 index 0000000..45f9395 --- /dev/null +++ b/src/com/sleepycat/je/ExceptionListener.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * A callback to notify the application program when an exception occurs in a + * JE Daemon thread. + */ +public interface ExceptionListener { + + /** + * This method is called if an exception is seen in a JE Daemon thread. + * + * @param event the ExceptionEvent representing the exception that was + * thrown. + */ + void exceptionThrown(ExceptionEvent event); +} diff --git a/src/com/sleepycat/je/ForeignConstraintException.java b/src/com/sleepycat/je/ForeignConstraintException.java new file mode 100644 index 0000000..cb7ce21 --- /dev/null +++ b/src/com/sleepycat/je/ForeignConstraintException.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when an attempt to write a primary database record would insert a + * secondary record with a key that does not exist in a foreign key database, + * when the secondary key is configured as a foreign key. + * + *

        When using the base API ({@code com.sleepycat.je}), this can occur when a + * {@link SecondaryDatabase} is configured to be associated with a foreign key + * database (see {@link SecondaryConfig#setForeignKeyDatabase}).

        + * + *

        When using the DPL ({@code com.sleepycat.persist}), this can occur when a + * {@link com.sleepycat.persist.model.SecondaryKey} is defined with a {@link + * com.sleepycat.persist.model.SecondaryKey#relatedEntity}.

        + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + * @see Special considerations + * for using Secondary Databases with and without Transactions + * + * @since 4.0 + */ +public class ForeignConstraintException extends SecondaryConstraintException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public ForeignConstraintException(Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, message, secDbName, secKey, priKey, expirationTime); + } + + /** + * For internal use only. + * @hidden + */ + private ForeignConstraintException(String message, + ForeignConstraintException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new ForeignConstraintException(msg, this); + } +} diff --git a/src/com/sleepycat/je/ForeignKeyDeleteAction.java b/src/com/sleepycat/je/ForeignKeyDeleteAction.java new file mode 100644 index 0000000..15c3fc3 --- /dev/null +++ b/src/com/sleepycat/je/ForeignKeyDeleteAction.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The action taken when a referenced record in the foreign key database is + * deleted. + * + *

        The delete action applies to a secondary database that is configured to + * have a foreign key integrity constraint. The delete action is specified by + * calling {@link SecondaryConfig#setForeignKeyDeleteAction}.

        + * + *

        When a record in the foreign key database is deleted, it is checked to + * see if it is referenced by any record in the associated secondary database. + * If the key is referenced, the delete action is applied. By default, the + * delete action is {@link #ABORT}.

        + * + * @see SecondaryConfig + */ +public enum ForeignKeyDeleteAction { + + /** + * When a referenced record in the foreign key database is deleted, abort + * the transaction by throwing a {@link DeleteConstraintException}. + */ + ABORT, + + /** + * When a referenced record in the foreign key database is deleted, delete + * the primary database record that references it. + */ + CASCADE, + + /** + * When a referenced record in the foreign key database is deleted, set the + * reference to null in the primary database record that references it, + * thereby deleting the secondary key. @see ForeignKeyNullifier @see + * ForeignMultiKeyNullifier + */ + NULLIFY; + + @Override + public String toString() { + return "ForeignKeyDeleteAction." + name(); + } +} diff --git a/src/com/sleepycat/je/ForeignKeyNullifier.java b/src/com/sleepycat/je/ForeignKeyNullifier.java new file mode 100644 index 0000000..50b70c5 --- /dev/null +++ b/src/com/sleepycat/je/ForeignKeyNullifier.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The interface implemented for setting single-valued foreign keys to null. + * + *

        A key nullifier is used with a secondary database that is configured to + * have a foreign key integrity constraint and a delete action of {@link + * ForeignKeyDeleteAction#NULLIFY}. The key nullifier is specified by calling + * {@link SecondaryConfig#setForeignKeyNullifier}.

        + * + *

        When a referenced record in the foreign key database is deleted and the + * foreign key delete action is NULLIFY, the {@link + * ForeignKeyNullifier#nullifyForeignKey} method is called. This method sets + * the foreign key reference to null in the datum of the primary database. The + * primary database is then updated to contain the modified datum. The result + * is that the secondary key is deleted.

        + * + * This interface may be used along with {@link SecondaryKeyCreator} for + * many-to-one and one-to-one relationships. It may not be used with + * {@link SecondaryMultiKeyCreator} because the secondary key is not passed as + * a parameter to the nullifyForeignKey method and this method would not know + * which key to nullify. When using {@link SecondaryMultiKeyCreator}, use + * {@link ForeignMultiKeyNullifier} instead. + */ +public interface ForeignKeyNullifier { + + /** + * Sets the foreign key reference to null in the datum of the primary + * database. + * + * @param secondary the database in which the foreign key integrity + * constraint is defined. This parameter is passed for informational + * purposes but is not commonly used. + * + * @param data the existing primary datum in which the foreign key + * reference should be set to null. This parameter should be updated by + * this method if it returns true. + * + * @return true if the datum was modified, or false to indicate that the + * key is not present. + * + * @throws DatabaseException if an error occurs attempting to clear the key + * reference. + */ + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry data) + throws DatabaseException; +} diff --git a/src/com/sleepycat/je/ForeignMultiKeyNullifier.java b/src/com/sleepycat/je/ForeignMultiKeyNullifier.java new file mode 100644 index 0000000..3a7bb4c --- /dev/null +++ b/src/com/sleepycat/je/ForeignMultiKeyNullifier.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The interface implemented for setting multi-valued foreign keys to null. + * + *

        A key nullifier is used with a secondary database that is configured to + * have a foreign key integrity constraint and a delete action of {@link + * ForeignKeyDeleteAction#NULLIFY}. The key nullifier is specified by calling + * {@link SecondaryConfig#setForeignMultiKeyNullifier}.

        + * + *

        When a referenced record in the foreign key database is deleted and the + * foreign key delete action is NULLIFY, the {@link + * ForeignMultiKeyNullifier#nullifyForeignKey} method is called. This method + * sets the foreign key reference to null in the datum of the primary + * database. The primary database is then updated to contain the modified + * datum. The result is that the secondary key is deleted.

        + * + * This interface may be used along with {@link SecondaryKeyCreator} or {@link + * SecondaryMultiKeyCreator} for many-to-many, one-to-many, many-to-one and + * one-to-one relationships. + */ +public interface ForeignMultiKeyNullifier { + + /** + * Sets the foreign key reference to null in the datum of the primary + * database. + * + * @param secondary the database in which the foreign key integrity + * constraint is defined. This parameter is passed for informational + * purposes but is not commonly used. + * + * @param key the existing primary key. This parameter is passed for + * informational purposes but is not commonly used. + * + * @param data the existing primary datum in which the foreign key + * reference should be set to null. This parameter should be updated by + * this method if it returns true. + * + * @param secKey the secondary key to be nullified. This parameter is + * needed for knowing which key to nullify when multiple keys are present, + * as when {@link SecondaryMultiKeyCreator} is used. + * + * @return true if the datum was modified, or false to indicate that the + * key is not present. + * + * @throws DatabaseException if an error occurs attempting to clear the key + * reference. + */ + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry secKey) + throws DatabaseException; +} diff --git a/src/com/sleepycat/je/ForwardCursor.java b/src/com/sleepycat/je/ForwardCursor.java new file mode 100644 index 0000000..9c883d7 --- /dev/null +++ b/src/com/sleepycat/je/ForwardCursor.java @@ -0,0 +1,165 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Closeable; + +/** + * The interface for forward-moving Cursor operations. Specific implementations + * may modify the documented behavior on each of these methods. + * @since 5.0 + */ +public interface ForwardCursor extends Closeable { + + /** + * Returns the Database handle associated with this ForwardCursor. + * + * @return The Database handle associated with this ForwardCursor. + */ + Database getDatabase(); + + /** + * Discards the cursor. + * + *

        The cursor handle may not be used again after this method has been + * called, regardless of the method's success or failure.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + void close(); + + /** + * Moves the cursor to a record according to the specified {@link Get} + * type. + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param getType is {@link Get#NEXT} or {@link Get#CURRENT}. + * interface. {@code Get.CURRENT} is permitted only if the cursor is + * initialized (positioned on a record). + * + * @param options the ReadOptions, or null to use default options. + * + * @return the OperationResult if the record requested is found, else null. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * the cursor is uninitialized (not positioned on a record) and this is not + * permitted (see above), or the non-transactional cursor was created in a + * different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null getType, a null input key/data parameter, + * an input key/data parameter with a null data array, a partial key/data + * input parameter, and specifying a {@link ReadOptions#getLockMode() + * lock mode} of READ_COMMITTED. + * + * @since 7.0 + */ + OperationResult get( + DatabaseEntry key, + DatabaseEntry data, + Get getType, + ReadOptions options); + + /** + * Returns the key/data pair to which the cursor refers. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#CURRENT}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has + * been deleted; otherwise, {@link + * com.sleepycat.je.OperationStatus#SUCCESS OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + OperationStatus getCurrent(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode); + + /** + * Moves the cursor to the next key/data pair and returns that pair. + * + *

        Calling this method is equivalent to calling {@link + * #get(DatabaseEntry, DatabaseEntry, Get, ReadOptions)} with + * {@link Get#NEXT}.

        + * + * @param key the key returned as + * output. + * + * @param data the data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + OperationStatus getNext(DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode); +} diff --git a/src/com/sleepycat/je/Get.java b/src/com/sleepycat/je/Get.java new file mode 100644 index 0000000..702eaf3 --- /dev/null +++ b/src/com/sleepycat/je/Get.java @@ -0,0 +1,298 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.SearchMode; + +/** + * The operation type passed to "get" methods on databases and cursors. + */ +public enum Get { + + /** + * Searches using an exact match by key. + * + *

        Returns, or moves the cursor to, the record having a key exactly + * matching the given key parameter.

        + * + *

        If the database has duplicate keys, the record with the matching key + * and lowest data value (or the lowest primary key, for secondary + * databases) is selected.

        + * + *

        The operation does not succeed if no record matches.

        + */ + SEARCH(SearchMode.SET), + + /** + * Searches using an exact match by key and data (or pKey). + * + *

        Returns, or moves the cursor to, the record having a key exactly + * matching the given key parameter, and having a data value (or primary + * key) exactly matching the given data (or pKey) parameter. The data is + * matched for Database and Cursor operations, while the primary key is + * matched for SecondaryDatabase and SecondaryCursor operations.

        + * + *

        If the database has duplicate keys, the search is performed by key + * and data (or pKey) using the database Btree. If the database has does + * not have duplicate keys, the search is performed by key alone using the + * Btree, and then the data (or primary key) of the matching record is + * simply compared to the data (pKey) parameter. In other words, using + * this operation has no performance advantage over {@link #SEARCH} when + * the database does not have duplicates.

        + * + *

        The operation does not succeed (null is returned) if no record + * matches.

        + */ + SEARCH_BOTH(SearchMode.BOTH), + + /** + * Searches using a GTE match by key. + * + *

        Returns, or moves the cursor to, the record with a key that is + * greater than or equal to (GTE) the given key parameter.

        + * + *

        If the database has duplicate keys, the record with the lowest data + * value (or the lowest primary key, for a secondary database) is selected + * among the duplicates with the matching key.

        + * + *

        The operation does not succeed (null is returned) if no record + * matches.

        + */ + SEARCH_GTE(SearchMode.SET_RANGE), + + /** + * Searches using an exact match by key and a GTE match by data (or pKey). + * + *

        Returns, or moves the cursor to, the record with a key exactly + * matching the given key parameter, and having a data value (or primary + * key) that is greater than or equal to (GTE) the given data (or pKey) + * parameter. The data is matched for Database and Cursor operations, while + * the primary key is matched for SecondaryDatabase and SecondaryCursor + * operations.

        + * + *

        If the database does not have duplicate keys, the data (or pKey) is + * matched exactly and this operation is equivalent to {@link + * #SEARCH_BOTH}.

        + * + *

        The operation does not succeed (null is returned) if no record + * matches.

        + */ + SEARCH_BOTH_GTE(SearchMode.BOTH_RANGE), + + /** + * Accesses the current record. + * + *

        Accesses the record at the current cursor position. If the cursor is + * uninitialized (not positioned on a record), {@link + * IllegalStateException} is thrown.

        + * + *

        The operation does not succeed (null is returned) if the record at + * the current position has been deleted. This can occur in two cases: 1. + * If the record was deleted using this cursor and then accessed. 2. If the + * record was not locked by this cursor or transaction, and was deleted by + * another thread or transaction after this cursor was positioned on + * it.

        + */ + CURRENT(), + + /** + * Finds the first record in the database. + * + *

        Moves the cursor to the record in the database with the lowest valued + * key.

        + * + *

        If the database has duplicate keys, the record with the lowest data + * value (or the lowest primary key, for a secondary database) is selected + * among the duplicates for the lowest key.

        + * + *

        The operation does not succeed (null is returned) if the database is + * empty.

        + */ + FIRST(), + + /** + * Finds the last record in the database. + * + *

        Moves the cursor to the record in the database with the highest + * valued key.

        + * + *

        If the database has duplicate keys, the record with the highest data + * value (or the highest primary key, for a secondary database) is selected + * among the duplicates for the highest key.

        + * + *

        The operation does not succeed (null is returned) if the database is + * empty.

        + */ + LAST(), + + /** + * Moves to the next record. + * + *

        Moves the cursor to the record following the record at the current + * cursor position. If the cursor is uninitialized (not positioned on a + * record), moves to the first record and this operation is equivalent to + * {@link #FIRST}.

        + * + *

        If the database does not have duplicate keys, the following record is + * defined as the record with the next highest key. If the database does + * have duplicate keys, the following record is defined as the record with + * the same key and the next highest data value (or the next highest + * primary key, for a secondary database) among the duplicates for that + * key; or if there are no more records with the same key, the following + * record is the record with the next highest key and the lowest data value + * (or the lowest primary key, for a secondary database) among the + * duplicates for that key.

        + * + *

        The operation does not succeed (null is returned) if the record at + * the cursor position is the last record in the database.

        + */ + NEXT(GetMode.NEXT, true /*allowNexPrevUninitialized*/), + + /** + * Moves to the next record with the same key. + * + *

        Moves the cursor to the record following the record at the current + * cursor position and having the same key. If the cursor is uninitialized + * (not positioned on a record), {@link IllegalStateException} is + * thrown.

        + * + *

        If the database has duplicate keys, moves to the record with the same + * key and the next highest data value (or the next highest primary key, + * for a secondary database) among the duplicates for that key.

        + * + *

        The operation does not succeed (null is returned) if there are no + * following records with the same key. This is always the case when + * database does not have duplicate keys.

        + */ + NEXT_DUP(GetMode.NEXT_DUP, false /*allowNexPrevUninitialized*/), + + /** + * Moves to the next record with a different key. + * + *

        Moves the cursor to the record following the record at the current + * cursor position and having the next highest key. If the cursor is + * uninitialized (not positioned on a record), moves to the first record + * and this operation is equivalent to {@link #FIRST}.

        + * + *

        If the database has duplicate keys, moves to the record with the next + * highest key and the lowest data value (or the lowest primary key, for a + * secondary database) among the duplicates for that key; this effectively + * skips over records having the same key and a higher data value (or a + * higher primary key, for a secondary database). If the database does not + * have duplicate keys, this operation is equivalent to {@link #NEXT}.

        + * + *

        The operation does not succeed (null is returned) if there are no + * following records with a different key.

        + */ + NEXT_NO_DUP(GetMode.NEXT_NODUP, true /*allowNexPrevUninitialized*/), + + /** + * Moves to the previous record. + * + *

        Moves the cursor to the record preceding the record at the current + * cursor position. If the cursor is uninitialized (not positioned on a + * record), moves to the last record and this operation is equivalent to + * {@link #LAST}.

        + * + *

        If the database does not have duplicate keys, the preceding record is + * defined as the record with the next lowest key. If the database does + * have duplicate keys, the preceding record is defined as the record with + * the same key and the next lowest data value (or the next lowest primary + * key, for a secondary database) among the duplicates for that key; or if + * there are no preceding records with the same key, the preceding record + * is the record with the next lowest key and the highest data value (or + * the highest primary key, for a secondary database) among the duplicates + * for that key.

        + * + *

        The operation does not succeed (null is returned) if the record at + * the cursor position is the first record in the database.

        + */ + PREV(GetMode.PREV, true /*allowNexPrevUninitialized*/), + + /** + * Moves to the previous record with the same key. + * + *

        Moves the cursor to the record preceding the record at the current + * cursor position and having the same key. If the cursor is uninitialized + * (not positioned on a record), {@link IllegalStateException} is + * thrown.

        + * + *

        If the database has duplicate keys, moves to the record with the same + * key and the next lowest data value (or the next lowest primary key, for + * a secondary database) among the duplicates for that key.

        + * + *

        The operation does not succeed (null is returned) if there are no + * preceding records with the same key. This is always the case when + * database does not have duplicate keys.

        + */ + PREV_DUP(GetMode.PREV_DUP, false /*allowNexPrevUninitialized*/), + + /** + * Moves to the previous record with a different key. + * + *

        Moves the cursor to the record preceding the record at the current + * cursor position and having the next lowest key. If the cursor is + * uninitialized (not positioned on a record), moves to the last record + * and this operation is equivalent to {@link #LAST}.

        + * + *

        If the database has duplicate keys, moves to the record with the next + * lowest key and the highest data value (or the highest primary key, for a + * secondary database) among the duplicates for that key; this effectively + * skips over records having the same key and a lower data value (or a + * lower primary key, for a secondary database). If the database does not + * have duplicate keys, this operation is equivalent to {@link #PREV}.

        + * + *

        The operation does not succeed (null is returned) if there are no + * preceding records with a different key.

        + */ + PREV_NO_DUP(GetMode.PREV_NODUP, true /*allowNexPrevUninitialized*/); + + private final SearchMode searchMode; + private final GetMode getMode; + private final boolean allowNexPrevUninitialized; + + Get() { + this(null, null, false); + } + + Get(final SearchMode searchMode) { + this(searchMode, null, false); + } + + Get(final GetMode getMode, final boolean allowNexPrevUninitialized) { + this(null, getMode, allowNexPrevUninitialized); + } + + Get(final SearchMode searchMode, + final GetMode getMode, + final boolean allowNexPrevUninitialized) { + + this.searchMode = searchMode; + this.getMode = getMode; + this.allowNexPrevUninitialized = allowNexPrevUninitialized; + } + + SearchMode getSearchMode() { + return searchMode; + } + + GetMode getGetMode() { + return getMode; + } + + boolean getAllowNextPrevUninitialized() { + return allowNexPrevUninitialized; + } +} diff --git a/src/com/sleepycat/je/JEVersion.java b/src/com/sleepycat/je/JEVersion.java new file mode 100644 index 0000000..b8bcd29 --- /dev/null +++ b/src/com/sleepycat/je/JEVersion.java @@ -0,0 +1,166 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; +import java.util.StringTokenizer; + +/** + * Berkeley DB Java Edition version information. Versions consist of major, + * minor and patch numbers. + *

        + * There is one JEVersion object per running JVM and it may be accessed using + * the static field JEVersion.CURRENT_VERSION. + */ +public class JEVersion implements Comparable, Serializable { + + private static final long serialVersionUID = 1L; + + /** + * Release version. + */ + public static final JEVersion CURRENT_VERSION = + new JEVersion(7, 5, 11, null); + + private final int majorNum; + private final int minorNum; + private final int patchNum; + private final String name; + + private JEVersion(int majorNum, int minorNum, int patchNum, String name) { + this.majorNum = majorNum; + this.minorNum = minorNum; + this.patchNum = patchNum; + this.name = name; + } + + public JEVersion(String version) { + StringTokenizer st = new StringTokenizer(version, "."); + + majorNum = Integer.parseInt(st.nextToken()); + minorNum = Integer.parseInt(st.nextToken()); + patchNum = Integer.parseInt(st.nextToken()); + if (st.hasMoreTokens()) { + name = st.nextToken(); + } else { + name = null; + } + } + + @Override + public String toString() { + return getVersionString(); + } + + /** + * Major number of the release version. + * + * @return The major number of the release version. + */ + public int getMajor() { + return majorNum; + } + + /** + * Minor number of the release version. + * + * @return The minor number of the release version. + */ + public int getMinor() { + return minorNum; + } + + /** + * Patch number of the release version. + * + * @return The patch number of the release version. + */ + public int getPatch() { + return patchNum; + } + + /** + * The numeric version string, without the patch tag. + * + * @return The release version + */ + public String getNumericVersionString() { + StringBuilder version = new StringBuilder(); + version.append(majorNum).append("."); + version.append(minorNum).append("."); + version.append(patchNum); + return version.toString(); + } + + /** + * Release version, suitable for display. + * + * @return The release version, suitable for display. + */ + public String getVersionString() { + StringBuilder version = new StringBuilder(); + version.append(majorNum).append("."); + version.append(minorNum).append("."); + version.append(patchNum); + if (name != null) { + version.append("."); + version.append(name); + } + return version.toString(); + } + + /* + * Return -1 if the current version is earlier than the comparedVersion. + * Return 0 if the current version is the same as the comparedVersion. + * Return 1 if the current version is later than the comparedVersion. + */ + public int compareTo(JEVersion comparedVersion) { + int result = 0; + + if (majorNum == comparedVersion.getMajor()) { + if (minorNum == comparedVersion.getMinor()) { + if (patchNum > comparedVersion.getPatch()) { + result = 1; + } else if (patchNum < comparedVersion.getPatch()) { + result = -1; + } + } else if (minorNum > comparedVersion.getMinor()) { + result = 1; + } else { + result = -1; + } + } else if (majorNum > comparedVersion.getMajor()) { + result = 1; + } else { + result = -1; + } + + return result; + } + + /* + * If its type is JEVersion, and the version numbers are the same, + * then we consider these two versions equal. + */ + @Override + public boolean equals(Object o) { + return (o instanceof JEVersion) && (compareTo((JEVersion) o) == 0); + } + + /* Produce a unique hash code for JEVersion. */ + @Override + public int hashCode() { + return majorNum * 1000 * 1000 + minorNum * 1000 + patchNum; + } +} diff --git a/src/com/sleepycat/je/JoinConfig.java b/src/com/sleepycat/je/JoinConfig.java new file mode 100644 index 0000000..c759f86 --- /dev/null +++ b/src/com/sleepycat/je/JoinConfig.java @@ -0,0 +1,126 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The configuration properties of a JoinCursor. The join cursor + * configuration is specified when calling {@link Database#join Database.join}. + * + *

        To create a configuration object with default attributes:

        + * + *
        + *     JoinConfig config = new JoinConfig();
        + * 
        + * + *

        To set custom attributes:

        + * + *
        + *     JoinConfig config = new JoinConfig();
        + *     config.setNoSort(true);
        + * 
        + * + * @see Database#join Database.join + * @see JoinCursor + */ +public class JoinConfig implements Cloneable { + + /** + * Default configuration used if null is passed to {@link + * com.sleepycat.je.Database#join Database.join}. + */ + public static final JoinConfig DEFAULT = new JoinConfig(); + + private boolean noSort; + + /** + * Creates an instance with the system's default settings. + */ + public JoinConfig() { + } + + /** + * Specifies whether automatic sorting of the input cursors is disabled. + * + *

        Joined values are retrieved by doing a sequential iteration over the + * first cursor in the cursor array, and a nested iteration over each + * following cursor in the order they are specified in the array. This + * requires database traversals to search for the current datum in all the + * cursors after the first. For this reason, the best join performance + * normally results from sorting the cursors from the one that refers to + * the least number of data items to the one that refers to the + * most. Unless this method is called with true, Database.join + * does this sort on behalf of its caller using the {@link + * Cursor#countEstimate} method.

        + * + *

        If the data are structured so that cursors with many data items also + * share many common elements, higher performance will result from listing + * those cursors before cursors with fewer data items; that is, a sort + * order other than the default. Calling this method permits applications + * to perform join optimization prior to calling + * Database.join.

        + * + * @param noSort whether automatic sorting of the input cursors is + * disabled. + * + * @see Database#join Database.join + * + * @return this + */ + public JoinConfig setNoSort(boolean noSort) { + setNoSortVoid(noSort); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNoSortVoid(boolean noSort) { + this.noSort = noSort; + } + + /** + * Returns whether automatic sorting of the input cursors is disabled. + * + * @return whether automatic sorting of the input cursors is disabled. + * + * @see #setNoSort + */ + public boolean getNoSort() { + return noSort; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public JoinConfig clone() { + try { + return (JoinConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "noSort=" + noSort + + "\n"; + } +} diff --git a/src/com/sleepycat/je/JoinConfigBeanInfo.java b/src/com/sleepycat/je/JoinConfigBeanInfo.java new file mode 100644 index 0000000..25b0e49 --- /dev/null +++ b/src/com/sleepycat/je/JoinConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class JoinConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(JoinConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(JoinConfig.class); + } +} diff --git a/src/com/sleepycat/je/JoinCursor.java b/src/com/sleepycat/je/JoinCursor.java new file mode 100644 index 0000000..4a66074 --- /dev/null +++ b/src/com/sleepycat/je/JoinCursor.java @@ -0,0 +1,495 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Closeable; +import java.util.Arrays; +import java.util.Comparator; +import java.util.logging.Level; + +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.SearchMode; + +/** + * A specialized join cursor for use in performing equality or natural joins on + * secondary indices. + * + *

        A join cursor is returned when calling {@link Database#join + * Database.join}.

        + * + *

        To open a join cursor using two secondary cursors:

        + * + *
        + *     Transaction txn = ...
        + *     Database primaryDb = ...
        + *     SecondaryDatabase secondaryDb1 = ...
        + *     SecondaryDatabase secondaryDb2 = ...
        + *     

        + * SecondaryCursor cursor1 = null; + * SecondaryCursor cursor2 = null; + * JoinCursor joinCursor = null; + * try { + * DatabaseEntry key = new DatabaseEntry(); + * DatabaseEntry data = new DatabaseEntry(); + *

        + * cursor1 = secondaryDb1.openSecondaryCursor(txn, null); + * cursor2 = secondaryDb2.openSecondaryCursor(txn, null); + *

        + * key.setData(...); // initialize key for secondary index 1 + * OperationStatus status1 = + * cursor1.getSearchKey(key, data, LockMode.DEFAULT); + * key.setData(...); // initialize key for secondary index 2 + * OperationStatus status2 = + * cursor2.getSearchKey(key, data, LockMode.DEFAULT); + *

        + * if (status1 == OperationStatus.SUCCESS && + * status2 == OperationStatus.SUCCESS) { + *

        + * SecondaryCursor[] cursors = {cursor1, cursor2}; + * joinCursor = primaryDb.join(cursors, null); + *

        + * while (true) { + * OperationStatus joinStatus = joinCursor.getNext(key, data, + * LockMode.DEFAULT); + * if (joinStatus == OperationStatus.SUCCESS) { + * // Do something with the key and data. + * } else { + * break; + * } + * } + * } + * } finally { + * if (cursor1 != null) { + * cursor1.close(); + * } + * if (cursor2 != null) { + * cursor2.close(); + * } + * if (joinCursor != null) { + * joinCursor.close(); + * } + * } + *

        + * + *

        The join algorithm is described here so that its cost can be estimated and + * compared to other approaches for performing a query. Say that N cursors are + * provided for the join operation. According to the order they appear in the + * array the cursors are labeled C(1) through C(n), and the keys at each cursor + * position are labeled K(1) through K(n).

        + * + *
          + * + *
        1. Using C(1), the join algorithm iterates sequentially through all records + * having K(1). This iteration is equivalent to a {@link Cursor#getNextDup + * Cursor.getNextDup} operation on the secondary index. The primary key of a + * candidate record is determined in this manner. The primary record itself is + * not retrieved and the primary database is not accessed.
        2. + * + *
        3. For each candidate primary key found in step 1, a Btree lookup is + * performed using C(2) through C(n), in that order. The Btree lookups are + * exact searches to determine whether the candidate record also contains + * secondary keys K(2) through K(n). The lookups are equivalent to a {@link + * Cursor#getSearchBoth Cursor.getSearchBoth} operation on the secondary index. + * The primary record itself is not retrieved and the primary database is not + * accessed.
        4. + * + *
        5. If any lookup in step 2 fails, the algorithm advances to the next + * candidate record using C(1). Lookups are performed in the order of the + * cursor array, and the algorithm proceeds to the next C(1) candidate key as + * soon as a single lookup fails.
        6. + * + *
        7. If all lookups in step 2 succeed, then the matching key and/or data is + * returned by the {@code getNext} method. If the {@link + * #getNext(DatabaseEntry,DatabaseEntry,LockMode)} method signature is used, + * then the primary database is read to obtain the record data, as if {@link + * Cursor#getSearchKey Cursor.getSearchKey} were called for the primary + * database. If the {@link #getNext(DatabaseEntry,LockMode)} method signature + * is used, then only the primary key is returned and the primary database is + * not accessed.
        8. + * + *
        9. The algorithm ends when C(1) has no more candidate records with K(1), + * and the {@code getNext} method will then return {@link + * com.sleepycat.je.OperationStatus#NOTFOUND OperationStatus.NOTFOUND}.
        10. + * + *
        + */ +public class JoinCursor implements ForwardCursor, Closeable { + + private JoinConfig config; + private Database priDb; + private Cursor[] secCursors; + private DatabaseEntry[] cursorScratchEntries; + private DatabaseEntry scratchEntry; + private DatabaseEntry firstSecKey; + private boolean[] cursorFetchedFirst; + + /** + * Creates a join cursor without parameter checking. + */ + JoinCursor(final Database primaryDb, + final Cursor[] cursors, + final JoinConfig configParam) + throws DatabaseException { + + priDb = primaryDb; + config = (configParam != null) ? configParam.clone() + : JoinConfig.DEFAULT; + scratchEntry = new DatabaseEntry(); + firstSecKey = new DatabaseEntry(); + cursorScratchEntries = new DatabaseEntry[cursors.length]; + for (int i = 0; i < cursors.length; i += 1) { + cursorScratchEntries[i] = new DatabaseEntry(); + } + cursorFetchedFirst = new boolean[cursors.length]; + Cursor[] sortedCursors = new Cursor[cursors.length]; + System.arraycopy(cursors, 0, sortedCursors, 0, cursors.length); + + if (!config.getNoSort()) { + + /* + * Sort ascending by duplicate count. Collect counts before + * sorting so that countEstimate is called only once per cursor. + */ + final long[] counts = new long[cursors.length]; + for (int i = 0; i < cursors.length; i += 1) { + counts[i] = cursors[i].countEstimateInternal(); + assert counts[i] >= 0; + } + Arrays.sort(sortedCursors, new Comparator() { + public int compare(Cursor o1, Cursor o2) { + long count1 = -1; + long count2 = -1; + + /* + * Scan for objects in cursors not sortedCursors since + * sortedCursors is being sorted in place. + */ + for (int i = 0; i < cursors.length && + (count1 < 0 || count2 < 0); i += 1) { + if (cursors[i] == o1) { + count1 = counts[i]; + } else if (cursors[i] == o2) { + count2 = counts[i]; + } + } + assert count1 >= 0 && count2 >= 0; + long cmp = count1 - count2; + return (cmp < 0) ? (-1) : ((cmp > 0) ? 1 : 0); + } + }); + } + + /* + * Dup cursors last. If an error occurs before the constructor is + * complete, close them and ignore exceptions during close. + */ + try { + secCursors = new Cursor[cursors.length]; + for (int i = 0; i < cursors.length; i += 1) { + secCursors[i] = sortedCursors[i].dup(true); + } + } catch (DatabaseException e) { + close(e); /* will throw e */ + } + } + + /** + * Closes the cursors that have been opened by this join cursor. + * + *

        The cursors passed to {@link Database#join Database.join} are not + * closed by this method, and should be closed by the caller.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void close() + throws DatabaseException { + + if (priDb == null) { + return; + } + close(null); + } + + /** + * Close all cursors we own, throwing only the first exception that occurs. + * + * @param firstException an exception that has already occured, or null. + */ + private void close(DatabaseException firstException) + throws DatabaseException { + + priDb = null; + for (int i = 0; i < secCursors.length; i += 1) { + if (secCursors[i] != null) { + try { + secCursors[i].close(); + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + secCursors[i] = null; + } + } + if (firstException != null) { + throw firstException; + } + } + + /** + * For unit testing. + */ + Cursor[] getSortedCursors() { + return secCursors; + } + + /** + * Returns the primary database handle associated with this cursor. + * + * @return the primary database handle associated with this cursor. + */ + public Database getDatabase() { + + return priDb; + } + + /** + * Returns this object's configuration. + * + * @return this object's configuration. + */ + public JoinConfig getConfig() { + + return config.clone(); + } + + /** + * Returns the next primary key and data resulting from the join operation. + * + * @param getType is {@link Get#NEXT}. + */ + @Override + public OperationResult get( + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + final ReadOptions options) { + + if (getType != Get.NEXT) { + throw new IllegalArgumentException( + "Get type not allowed: " + getType); + } + + final LockMode lockMode = + (options != null) ? options.getLockMode() : null; + + final CacheMode cacheMode = + (options != null) ? options.getCacheMode() : null; + + try { + secCursors[0].checkEnv(); + secCursors[0].trace(Level.FINEST, getType.toString(), lockMode); + + return retrieveNext(key, data, lockMode, cacheMode); + + } catch (Error E) { + priDb.getEnv().invalidate(E); + throw E; + } + } + + /** + * This operation is not allowed on a join cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + */ + @Override + public OperationStatus getCurrent(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + throw new UnsupportedOperationException(); + } + + /** + * Returns the next primary key resulting from the join operation. + * + *

        An entry is returned by the join cursor for each primary key/data + * pair having all secondary key values that were specified using the array + * of secondary cursors passed to {@link Database#join Database.join}.

        + * + * @param key the key returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getNext(final DatabaseEntry key, + final LockMode lockMode) { + return getNext(key, null, lockMode); + } + + /** + * Returns the next primary key and data resulting from the join operation. + * + *

        An entry is returned by the join cursor for each primary key/data + * pair having all secondary key values that were specified using the array + * of secondary cursors passed to {@link Database#join Database.join}.

        + */ + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry data, + LockMode lockMode) { + + final OperationResult result = get( + key, data, Get.NEXT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Internal version of getNext(), with an optional data param. + *

        + * Since duplicates are always sorted and duplicate-duplicates are not + * allowed, a natural join can be implemented by simply traversing through + * the duplicates of the first cursor to find candidate keys, and then + * looking for each candidate key in the duplicate set of the other + * cursors, without ever reseting a cursor to the beginning of the + * duplicate set. + *

        + * This only works when the same duplicate comparison method is used for + * all cursors. We don't check for that, we just assume the user won't + * violate that rule. + *

        + * A future optimization would be to add a SearchMode.BOTH_DUPS operation + * and use it instead of using SearchMode.BOTH. This would be the + * equivalent of the undocumented DB_GET_BOTHC operation used by DB core's + * join() implementation. + */ + private OperationResult retrieveNext(final DatabaseEntry keyParam, + final DatabaseEntry dataParam, + final LockMode lockMode, + final CacheMode cacheMode) { + boolean readUncommitted = + secCursors[0].isReadUncommittedMode(lockMode); + + outerLoop: while (true) { + + /* Process the first cursor to get a candidate key. */ + Cursor secCursor = secCursors[0]; + DatabaseEntry candidateKey = cursorScratchEntries[0]; + OperationResult result; + if (!cursorFetchedFirst[0]) { + /* Get first duplicate at initial cursor position. */ + result = secCursor.getCurrentInternal( + firstSecKey, candidateKey, lockMode, cacheMode); + if (readUncommitted && result == null) { + /* Deleted underneath read-uncommitted cursor; skip it. */ + cursorFetchedFirst[0] = true; + continue; + } + cursorFetchedFirst[0] = true; + } else { + /* Already initialized, move to the next candidate key. */ + result = secCursor.retrieveNext( + firstSecKey, candidateKey, lockMode, cacheMode, + GetMode.NEXT_DUP); + } + if (result == null) { + /* No more candidate keys. */ + return null; + } + + /* Process the second and following cursors. */ + for (int i = 1; i < secCursors.length; i += 1) { + secCursor = secCursors[i]; + DatabaseEntry secKey = cursorScratchEntries[i]; + if (!cursorFetchedFirst[i]) { + result = secCursor.getCurrentInternal( + secKey, scratchEntry, lockMode, cacheMode); + if (readUncommitted && + result == null) { + /* Deleted underneath read-uncommitted; skip it. */ + result = secCursor.retrieveNext( + secKey, scratchEntry, lockMode, cacheMode, + GetMode.NEXT_DUP); + if (result == null) { + /* All keys were deleted; no possible match. */ + return null; + } + } + cursorFetchedFirst[i] = true; + } + scratchEntry.setData(secKey.getData(), secKey.getOffset(), + secKey.getSize()); + result = secCursor.search( + scratchEntry, candidateKey, lockMode, cacheMode, + SearchMode.BOTH, true); + if (result == null) { + /* No match, get another candidate key. */ + continue outerLoop; + } + } + + /* The candidate key was found for all cursors. */ + if (dataParam != null) { + if (!secCursors[0].readPrimaryAfterGet( + priDb, firstSecKey, candidateKey, dataParam, lockMode, + readUncommitted, false /*lockPrimaryOnly*/, + false /*verifyPrimary*/, + secCursors[0].getCursorImpl().getLocker(), + secCursors[0].getDatabase(), null)) { + /* Deleted underneath read-uncommitted cursor; skip it. */ + continue; + } + + /* + * Copy primary info to all secondary cursors. The 0th cursor + * was updated above with the primary info. + */ + final CursorImpl firstSecCursor = secCursors[0].cursorImpl; + for (int i = 1; i < secCursors.length; i += 1) { + secCursors[i].cursorImpl.setPriInfo(firstSecCursor); + } + } + keyParam.setData(candidateKey.getData(), candidateKey.getOffset(), + candidateKey.getSize()); + return result; + } + } +} diff --git a/src/com/sleepycat/je/LockConflictException.java b/src/com/sleepycat/je/LockConflictException.java new file mode 100644 index 0000000..631513b --- /dev/null +++ b/src/com/sleepycat/je/LockConflictException.java @@ -0,0 +1,211 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * The common base class for all exceptions that result from record lock + * conflicts during read and write operations. + * + *

        This exception normally indicates that a transaction may be retried. + * Catching this exception, rather than its subclasses, is convenient and + * recommended for handling lock conflicts and performing transaction retries + * in a general purpose manner. See below for information on performing + * transaction retries.

        + * + *

        The exception carries two arrays of transaction ids, one of the owners and + * the other of the waiters, at the time of the lock conflict. This + * information may be used along with the {@link Transaction#getId Transaction + * ID} for diagnosing locking problems. See {@link #getOwnerTxnIds} and {@link + * #getWaiterTxnIds}.

        + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + *

        Performing Transaction Retries

        + * + *

        If a lock conflict occurs during a transaction, the transaction may be + * retried by performing the following steps. Some applications may also wish + * to sleep for a short interval before retrying, to give other concurrent + * transactions a chance to finish and release their locks.

        + *
          + *
        1. Close all cursors opened under the transaction.
        2. + *
        3. Abort the transaction.
        4. + *
        5. Begin a new transaction and repeat the operations.
        6. + *
        + * + *

        To handle {@link LockConflictException} reliably for all types of JE + * applications including JE-HA applications, it is important to handle it when + * it is thrown by all {@link Database} and {@link Cursor} read and write + * operations.

        + * + *

        The following example code illustrates the recommended approach. Note + * that the {@code Environment.beginTransaction} and {@code Transaction.commit} + * calls are intentially inside the {@code try} block. When using JE-HA, this + * will make it easy to add a {@code catch} for other exceptions that can be + * resolved by retrying the transaction, such as consistency exceptions.

        + * + *
        + *  void doTransaction(final Environment env,
        + *                     final Database db1,
        + *                     final Database db2,
        + *                     final int maxTries)
        + *      throws DatabaseException {
        + *
        + *      boolean success = false;
        + *      long sleepMillis = 0;
        + *      for (int i = 0; i < maxTries; i++) {
        + *          // Sleep before retrying.
        + *          if (sleepMillis != 0) {
        + *              Thread.sleep(sleepMillis);
        + *              sleepMillis = 0;
        + *          }
        + *          Transaction txn = null;
        + *          try {
        + *              txn = env.beginTransaction(null, null);
        + *              final Cursor cursor1 = db1.openCursor(txn, null);
        + *              try {
        + *                  final Cursor cursor2 = db2.openCursor(txn, null);
        + *                  try {
        + *                      // INSERT APP-SPECIFIC CODE HERE:
        + *                      // Perform read and write operations.
        + *                  } finally {
        + *                      cursor2.close();
        + *                  }
        + *              } finally {
        + *                  cursor1.close();
        + *              }
        + *              txn.commit();
        + *              success = true;
        + *              return;
        + *          } catch (LockConflictException e) {
        + *              sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000;
        + *              continue;
        + *          } finally {
        + *              if (!success) {
        + *                  if (txn != null) {
        + *                      txn.abort();
        + *                  }
        + *              }
        + *          }
        + *      }
        + *      // INSERT APP-SPECIFIC CODE HERE:
        + *      // Transaction failed, despite retries.
        + *      // Take some app-specific course of action.
        + *  }
        + * + *

        For more information on transactions and lock conflicts, see Writing Transactional Applications.

        + * + * @since 4.0 + */ +public abstract class LockConflictException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + private long[] ownerTxnIds; + private long[] waiterTxnIds; + private long timeoutMillis; + + /** + * For internal use only. + * @hidden + */ + LockConflictException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + protected LockConflictException(Locker locker, String message) { + super(locker, true /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + protected LockConflictException(Locker locker, + String message, + Throwable cause) { + super(locker, true /*abortOnly*/, message, cause); + } + + /** + * For internal use only. + * @hidden + */ + protected LockConflictException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * @hidden + * Internal use only. + */ + public void setOwnerTxnIds(long[] ownerTxnIds) { + this.ownerTxnIds = ownerTxnIds; + } + + /** + * Returns an array of longs containing transaction ids of owners at the + * the time of the timeout. + * + * @return an array of longs containing transaction ids of owners at the + * the time of the timeout. + */ + public long[] getOwnerTxnIds() { + return ownerTxnIds; + } + + /** + * @hidden + * Internal use only. + */ + public void setWaiterTxnIds(long[] waiterTxnIds) { + this.waiterTxnIds = waiterTxnIds; + } + + /** + * Returns an array of longs containing transaction ids of waiters at the + * the time of the timeout. + * + * @return an array of longs containing transaction ids of waiters at the + * the time of the timeout. + */ + public long[] getWaiterTxnIds() { + return waiterTxnIds; + } + + /** + * @hidden + * Internal use only. + */ + public void setTimeoutMillis(long timeoutMillis) { + this.timeoutMillis = timeoutMillis; + } + + /** + * @hidden + * Internal use only. + */ + public long getTimeoutMillis() { + return timeoutMillis; + } +} diff --git a/src/com/sleepycat/je/LockMode.java b/src/com/sleepycat/je/LockMode.java new file mode 100644 index 0000000..1de74a8 --- /dev/null +++ b/src/com/sleepycat/je/LockMode.java @@ -0,0 +1,344 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Record lock modes for read operations. Lock mode parameters may be specified + * for all operations that retrieve data. + * + *

        Locking Rules

        + * + *

        Together with {@link CursorConfig}, {@link TransactionConfig} and {@link + * EnvironmentConfig} settings, lock mode parameters determine how records are + * locked during read operations. Record locking is used to enforce the + * isolation modes that are configured. Record locking is summarized below for + * read and write operations. For more information on isolation levels and + * transactions, see Writing Transactional Applications.

        + * + *

        With one exception, a record lock is always acquired when a record is + * read or written, and a cursor will always hold the lock as long as it is + * positioned on the record. The exception is when {@link #READ_UNCOMMITTED} + * is specified, which allows a record to be read without any locking.

        + * + *

        Both read (shared) and write (exclusive) locks are used. Read locks are + * normally acquired on read ({@code get} method) operations and write locks on + * write ({@code put} method) operations. The only exception is that a write + * lock will be acquired on a read operation if {@link #RMW} is specified.

        + * + *

        Because read locks are shared, multiple accessors may read the same + * record. Because write locks are exclusive, if a record is written by one + * accessor it may not be read or written by another accessor. An accessor is + * either a transaction or a thread (for non-transactional operations).

        + * + *

        Whether additional locking is performed and how locks are released depend + * on whether the operation is transactional and other configuration + * settings.

        + * + *

        Transactional Locking

        + * + *

        Transactional operations include all write operations for a transactional + * database, and read operations when a non-null {@link Transaction} parameter + * is passed. When a null transaction parameter is passed for a write + * operation for a transactional database, an auto-commit transaction is + * automatically used.

        + * + *

        With transactions, read and write locks are normally held until the end + * of the transaction (commit or abort). Write locks are always held until the + * end of the transaction. However, if {@link #READ_COMMITTED} is configured, + * then read locks for cursor operations are only held during the operation and + * while the cursor is positioned on the record. The read lock is released + * when the cursor is moved to a different record or closed. When {@link + * #READ_COMMITTED} is used for a database (non-cursor) operation, the read + * lock is released before the method returns.

        + * + *

        When neither {@link #READ_UNCOMMITTED} nor {@link #READ_COMMITTED} is + * specified, read and write locking as described above provide Repeatable Read + * isolation, which is the default transactional isolation level. If + * Serializable isolation is configured, additional "next key" locking is + * performed to prevent "phantoms" -- records that are not visible at one point + * in a transaction but that become visible at a later point after being + * inserted by another transaction. Serializable isolation is configured via + * {@link TransactionConfig#setSerializableIsolation} or {@link + * EnvironmentConfig#setTxnSerializableIsolation}.

        + * + *

        Non-Transactional Locking

        + * + *

        Non-transactional operations include all operations for a + * non-transactional database (including a Deferred Write database), and read + * operations for a transactional database when a null {@link Transaction} + * parameter is passed.

        + * + *

        For non-transactional operations, both read and write locks are only held + * while a cursor is positioned on the record, and are released when the cursor + * is moved to a different record or closed. For database (non-cursor) + * operations, the read or write lock is released before the method + * returns.

        + * + *

        This behavior is similar to {@link #READ_COMMITTED}, except that both + * read and write locks are released. Configuring {@link #READ_COMMITTED} for + * a non-transactional database cursor has no effect.

        + * + *

        Because the current thread is the accessor (locker) for non-transactional + * operations, a single thread may have multiple cursors open without locking + * conflicts. Two non-transactional cursors in the same thread may access the + * same record via write or read operations without conflicts, and the changes + * make by one cursor will be visible to the other cursor.

        + * + *

        However, a non-transactional operation will conflict with a transactional + * operation for the same record even when performed in the same thread. When + * using a transaction in a particular thread for a particular database, to + * avoid conflicts you should use that transaction for all access to that + * database in that thread. In other words, to avoid conflicts always pass the + * transaction parameter, not null, for all operations. If you don't wish to + * hold the read lock for the duration of the transaction, specify {@link + * #READ_COMMITTED}.

        + * + *

        Read Uncommitted (Dirty-Read)

        + * + *

        When {@link #READ_UNCOMMITTED} is configured, no locking is performed + * by a read operation. {@code READ_UNCOMMITTED} does not apply to write + * operations.

        + * + *

        {@code READ_UNCOMMITTED} is sometimes called dirty-read because records + * are visible to the caller in their current state in the Btree at the time of + * the read, even when that state is due to operations performed using a + * transaction that has not yet committed. In addition, because no lock is + * acquired by the dirty read operation, the record's state may change at any + * time, even while a cursor used to do the dirty-read is still positioned on + * the record.

        + * + *

        To illustrate this, let's say a record is read with dirty-read + * ({@code READ_UNCOMMITTED}) by calling {@link Cursor#getNext Cursor.getNext} + * with a cursor C, and changes to the record are also being made in another + * thread using transaction T. When a locking (non-dirty-read) call to {@link + * Cursor#getCurrent Cursor.getCurrent} is subsequently made to read the same + * record again with C at the current position, a result may be returned that + * is different than the result returned by the earlier call to {@code + * getNext}. For example: + *

          + *
        • If the record is updated by T after the dirty-read {@code getNext} + * call, and T is committed, a subsequent call to {@code getCurrent} will + * return the data updated by T.
        • + * + *
        • If the record is updated by T before the dirty-read {@code getNext} + * call, the {@code getNext} will returned the data updated by T. But if + * call, the {@code getNext} will return the data updated by T. But if + * T is then aborted, a subsequent call to {@code getCurrent} will return + * the version of the data before it was updated by T.
        • + * + *
        • If the record was inserted by T before the dirty-read {@code + * getNext} call, the {@code getNext} call will return the inserted record. + * But if T is aborted, a subsequent call to {@code getCurrent} will return + * {@link OperationStatus#KEYEMPTY}.
        • + * + *
        • If the record is deleted by T after the dirty-read {@code getNext} + * call, and T is committed, a subsequent call to {@code getCurrent} will + * return {@link OperationStatus#KEYEMPTY}.
        • + *
        + *

        + * + *

        Note that deleted records are handled specially in JE. Deleted records + * remain in the Btree until after the deleting transaction is committed, and + * they are removed from the Btree asynchronously (not immediately at commit + * time). When using {@code #READ_UNCOMMITTED}, any record encountered in the + * Btree that was previously deleted, whether or not the deleting transaction + * has been committed, will be ignored (skipped over) by the read operation. + * Of course, if the deleting transaction is aborted, the record will no longer + * be deleted. If the application is scanning records, for example, this means + * that such records may be skipped by the scan. If this behavior is not + * desirable, {@link #READ_UNCOMMITTED_ALL} may be used instead. This mode + * ensures that records deleted by a transaction that is later aborted will not + * be skipped by a read operation. This is accomplished in two different ways + * depending on the type of database and whether the record's data is requested + * by the operation. + *

          + *
        1. If the DB is configured for duplicates or the record's data + * is not requested, then a record that has been deleted by an open + * transaction is returned by the read operation.
        2. + * + *
        3. If the DB is not configured for duplicates and the record's data is + * requested, then the read operation must wait for the deleting + * transaction to close (commit or abort). After the transaction is + * closed, the record will be returned if it is actually not deleted and + * otherwise will be skipped.
        4. + *
        + * + *

        By "record data" we mean both the {@code data} parameter for a regular or + * primary DB, and the {@code pKey} parameter for a secondary DB. By "record + * data requested" we mean that all or part of the {@code DatabaseEntry} will + * be returned by the read operation. Unless explicitly not + * requested, the complete {@code DatabaseEntry} is returned. See + * Using Partial DatabaseEntry + * Parameters for more information.

        + * + *

        Because of this difference in behavior, although {@code + * #READ_UNCOMMITTED} is fully non-blocking, {@code #READ_UNCOMMITTED_ALL} is + * not (under the conditions described). As a result, when using {@code + * #READ_UNCOMMITTED_ALL} under these conditions, a {@link + * LockConflictException} will be thrown when blocking results in a deadlock or + * lock timeout.

        + * + *

        To summarize, callers that use {@code READ_UNCOMMITTED} or {@code + * #READ_UNCOMMITTED_ALL} should be prepared for the following behaviors. + *

          + *
        • After a successful dirty-read operation, because no lock is acquired + * the record can be changed by another transaction, even when the cursor + * used to perform the dirty-read operation is still positioned on the + * record.
        • + * + *
        • After a successful dirty-read operation using a cursor C, say that + * another transaction T deletes the record, and T is committed. In this + * case, {@link OperationStatus#KEYEMPTY} will be returned by the following + * methods if they are called while C is still positioned on the deleted + * record: {@link Cursor#getCurrent Cursor.getCurrent}, {@link + * Cursor#putCurrent Cursor.putCurrent} and {@link Cursor#delete + * Cursor.delete}.
        • + * + *
        • When using {@code READ_UNCOMMITTED}, deleted records will be skipped + * even when the deleting transaction is still open. No blocking will occur + * and {@link LockConflictException} is never thrown when using this + * mode.
        • + * + *
        • When using {@code #READ_UNCOMMITTED_ALL}, deleted records will not + * be skipped even when the deleting transaction is open. If the DB is a + * duplicates DB or the record's data is not requested, the deleted record + * will be returned. If the DB is not a duplicates DB and the record's + * data is requested, blocking will occur until the deleting transaction is + * closed. In the latter case, {@link LockConflictException} will be thrown + * when this blocking results in a deadlock or a lock timeout.
        • + *
        + *

        + */ +public enum LockMode { + + /** + * Uses the default lock mode and is equivalent to passing {@code null} for + * the lock mode parameter. + * + *

        The default lock mode is {@link #READ_UNCOMMITTED} when this lock + * mode is configured via {@link CursorConfig#setReadUncommitted} or {@link + * TransactionConfig#setReadUncommitted}, or when using a {@link + * DiskOrderedCursor}. The Read Uncommitted mode overrides any other + * configuration settings.

        + * + *

        Otherwise, the default lock mode is {@link #READ_COMMITTED} when this + * lock mode is configured via {@link CursorConfig#setReadCommitted} or + * {@link TransactionConfig#setReadCommitted}. The Read Committed mode + * overrides other configuration settings except for {@link + * #READ_UNCOMMITTED}.

        + * + *

        Otherwise, the default lock mode is to acquire read locks and release + * them according to the {@link LockMode default locking rules} for + * transactional and non-transactional operations.

        + */ + DEFAULT, + + /** + * Reads modified but not yet committed data. + * + *

        The Read Uncommitted mode is used if this lock mode is explicitly + * passed for the lock mode parameter, or if null or {@link #DEFAULT} is + * passed and Read Uncommitted is the default -- see {@link #DEFAULT} for + * details.

        + * + *

        Unlike {@link #READ_UNCOMMITTED_ALL}, deleted records will be skipped + * even when the deleting transaction is still open. No blocking will occur + * and {@link LockConflictException} is never thrown when using this + * mode.

        + * + *

        See the {@link LockMode locking rules} for information on how Read + * Uncommitted impacts transactional and non-transactional locking.

        + */ + READ_UNCOMMITTED, + + /** + * Reads modified but not yet committed data, ensuring that records are not + * skipped due to transaction aborts. + * + *

        The Read Uncommitted mode is used only when this lock mode is + * explicitly passed for the lock mode parameter.

        + * + *

        Unlike {@link #READ_UNCOMMITTED}, deleted records will not be skipped + * even when the deleting transaction is open. If the DB is a duplicates DB + * or the record's data is not requested, the deleted record will be + * returned. If the DB is not a duplicates DB and the record's data is + * requested, blocking will occur until the deleting transaction is closed. + * In the latter case, {@link LockConflictException} will be thrown when + * this blocking results in a deadlock or a lock timeout.

        + * + *

        See the {@link LockMode locking rules} for information on how Read + * Uncommitted impacts transactional and non-transactional locking.

        + */ + READ_UNCOMMITTED_ALL, + + /** + * Read committed isolation provides for cursor stability but not + * repeatable reads. Data items which have been previously read by this + * transaction may be deleted or modified by other transactions before the + * cursor is closed or the transaction completes. + * + *

        Note that this LockMode may only be passed to {@link Database} get + * methods, not to {@link Cursor} methods. To configure a cursor for Read + * Committed isolation, use {@link CursorConfig#setReadCommitted}.

        + * + *

        See the {@link LockMode locking rules} for information on how Read + * Committed impacts transactional and non-transactional locking.

        + * + * @see Cache + * Statistics: Unexpected Sizes + */ + READ_COMMITTED, + + /** + * Acquire write locks instead of read locks when doing the retrieval. + * + *

        Because it causes a write lock to be acquired, specifying this lock + * mode as a {@link Cursor} or {@link Database} {@code get} (read) method + * parameter will override the Read Committed or Read Uncommitted isolation + * mode that is configured using {@link CursorConfig} or {@link + * TransactionConfig}. The write lock will acquired and held until the end + * of the transaction. For non-transactional use, the write lock will be + * released when the cursor is moved to a new position or closed.

        + * + *

        Setting this flag can eliminate deadlock during a read-modify-write + * cycle by acquiring the write lock during the read part of the cycle so + * that another thread of control acquiring a read lock for the same item, + * in its own read-modify-write cycle, will not result in deadlock.

        + */ + RMW; + + private final ReadOptions readOptions; + + LockMode() { + readOptions = new ReadOptions().setLockMode(this); + } + + /** + * Returns a ReadOptions with this LockMode property, and default values + * for all other properties. + * + *

        WARNING: Do not modify the returned object, since it is a singleton. + * + * @since 7.0 + */ + public ReadOptions toReadOptions() { + return readOptions; + } + + public String toString() { + return "LockMode." + name(); + } +} diff --git a/src/com/sleepycat/je/LockNotAvailableException.java b/src/com/sleepycat/je/LockNotAvailableException.java new file mode 100644 index 0000000..bbbf93d --- /dev/null +++ b/src/com/sleepycat/je/LockNotAvailableException.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when a non-blocking operation fails to get a lock. Non-blocking + * transactions are configured using {@link TransactionConfig#setNoWait}. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + *

        Normally, applications should catch the base class {@link + * LockConflictException} rather than catching one of its subclasses. All lock + * conflicts are typically handled in the same way, which is normally to abort + * and retry the transaction. See {@link LockConflictException} for more + * information.

        + * + * @since 4.0 + */ +public class LockNotAvailableException extends LockConflictException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public LockNotAvailableException(Locker locker, String message) { + /* Do not set abort-only for a no-wait lock failure. */ + super(message); + } + + /** + * For internal use only. + * @hidden + */ + private LockNotAvailableException(String message, + LockNotAvailableException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new LockNotAvailableException(msg, this); + } +} diff --git a/src/com/sleepycat/je/LockNotGrantedException.java b/src/com/sleepycat/je/LockNotGrantedException.java new file mode 100644 index 0000000..2143747 --- /dev/null +++ b/src/com/sleepycat/je/LockNotGrantedException.java @@ -0,0 +1,85 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when a non-blocking operation fails to get a lock, and {@link + * EnvironmentConfig#LOCK_OLD_LOCK_EXCEPTIONS} is set to true. Non-blocking + * transactions are configured using {@link TransactionConfig#setNoWait}. + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + *

        For compatibility with JE 3.3 and earlier, {@link + * LockNotGrantedException} is thrown instead of {@link + * LockNotAvailableException} when {@link + * EnvironmentConfig#LOCK_OLD_LOCK_EXCEPTIONS} is set to true. This + * configuration parameter is false by default. See {@link + * EnvironmentConfig#LOCK_OLD_LOCK_EXCEPTIONS} for information on the changes + * that should be made to all applications that upgrade from JE 3.3 or + * earlier.

        + * + *

        Normally, applications should catch the base class {@link + * LockConflictException} rather than catching one of its subclasses. All lock + * conflicts are typically handled in the same way, which is normally to abort + * and retry the transaction. See {@link LockConflictException} for more + * information.

        + * + * @deprecated replaced by {@link LockNotAvailableException} + */ +public class LockNotGrantedException extends DeadlockException { + + private static final long serialVersionUID = 646414701L; + + /* + * LockNotGrantedException extends DeadlockException in order to support + * the approach that all application need only handle + * DeadlockException. The idea is that we don't want an application to fail + * because a new type of exception is thrown when an operation is changed + * to non-blocking. + * + * Applications that care about LockNotGrantedExceptions can add another + * catch block to handle it, but otherwise they can be handled the same way + * as deadlocks. See SR [#10672] + */ + + /** + * For internal use only. + * @hidden + */ + public LockNotGrantedException(Locker locker, String message) { + /* Do not set abort-only for a no-wait lock failure. */ + super(message); + } + + /** + * For internal use only. + * @hidden + */ + private LockNotGrantedException(String message, + LockNotGrantedException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new LockNotGrantedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/LockStats.java b/src/com/sleepycat/je/LockStats.java new file mode 100644 index 0000000..4234227 --- /dev/null +++ b/src/com/sleepycat/je/LockStats.java @@ -0,0 +1,200 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_CONTENTION; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_SUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_UNSUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NO_WAITERS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_RELEASES; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_SELF_OWNED; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_OWNERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_REQUESTS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_TOTAL; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; + +import java.io.Serializable; + +import com.sleepycat.je.utilint.StatGroup; + +/** + * Lock statistics for a database environment. + * + *

        Note that some of the lock statistics may be expensive to obtain because + * the lock table is unavailable to other operations while the statistics are + * gathered. These expensive statistics are only provided if {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called with a StatsConfig parameter that has been configured for "slow" + * stats. + * + * @deprecated as of 4.0.10, replaced by {@link + * Environment#getStats(StatsConfig)}.

        + */ +public class LockStats implements Serializable { + + private static final long serialVersionUID = 172109534L; + + /* Basic stats on lock requests. */ + private final StatGroup basicStats; + + /* Stats on lock table latch access. */ + private final StatGroup latchStats; + + /* Stats on the types of locks in the lock table; expensive to collect. */ + private final StatGroup tableStats; + + /** + * @hidden + * For internal use only. Create a stat api instance to house lock related + * information. + */ + public LockStats(StatGroup basicStats, + StatGroup latchStats, + StatGroup tableStats) { + this.basicStats = basicStats; + this.latchStats = latchStats; + this.tableStats = tableStats; + } + + /** + * Total lock owners in lock table. Only provided when {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called in "slow" mode. + */ + public int getNOwners() { + return tableStats.getInt(LOCK_OWNERS); + } + + /** + * Total read locks currently held. Only provided when {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called in "slow" mode. + */ + public int getNReadLocks() { + return tableStats.getInt(LOCK_READ_LOCKS); + } + + /** + * Total locks currently in lock table. Only provided when {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called in "slow" mode. + */ + public int getNTotalLocks() { + return tableStats.getInt(LOCK_TOTAL); + } + + /** + * Total transactions waiting for locks. Only provided when {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called in "slow" mode. + */ + public int getNWaiters() { + return tableStats.getInt(LOCK_WAITERS); + } + + /** + * Total write locks currently held. Only provided when {@link + * com.sleepycat.je.Environment#getLockStats Environment.getLockStats} is + * called in "slow" mode. + */ + public int getNWriteLocks() { + return tableStats.getInt(LOCK_WRITE_LOCKS); + } + + /** + * Total number of lock requests to date. + */ + public long getNRequests() { + return basicStats.getLong(LOCK_REQUESTS); + } + + /** + * Total number of lock waits to date. + */ + public long getNWaits() { + return basicStats.getLong(LOCK_WAITS); + } + + /** + * Number of acquires of lock table latch with no contention. + */ + public int getNAcquiresNoWaiters() { + return tableStats.getInt(LATCH_NO_WAITERS); + } + + /** + * Number of acquires of lock table latch when it was already owned + * by the caller. + */ + public int getNAcquiresSelfOwned() { + return tableStats.getInt(LATCH_SELF_OWNED); + } + + /** + * Number of acquires of lock table latch when it was already owned by + * another thread. + */ + public int getNAcquiresWithContention() { + return tableStats.getInt(LATCH_CONTENTION); + } + + /** + * Number of successful no-wait acquires of the lock table latch. + */ + public int getNAcquiresNoWaitSuccessful() { + return tableStats.getInt(LATCH_NOWAIT_SUCCESS); + } + + /** + * Number of unsuccessful no-wait acquires of the lock table latch. + */ + public int getNAcquiresNoWaitUnSuccessful() { + return tableStats.getInt(LATCH_NOWAIT_UNSUCCESS); + } + + /** + * Number of releases of the lock table latch. + */ + public int getNReleases() { + return tableStats.getInt(LATCH_RELEASES); + } + + /** + * For convenience, LockTable.toString will display all stats in + * an easily readable format. + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(basicStats); + sb.append(latchStats); + sb.append(tableStats); + return sb.toString(); + } + + /** + * Like #toString, display all stats. Includes a description of each + * stat. + */ + public String toStringVerbose() { + StringBuilder sb = new StringBuilder(); + sb.append(basicStats.toStringVerbose()); + sb.append(latchStats.toStringVerbose()); + sb.append(tableStats.toStringVerbose()); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/LockTimeoutException.java b/src/com/sleepycat/je/LockTimeoutException.java new file mode 100644 index 0000000..4a573c0 --- /dev/null +++ b/src/com/sleepycat/je/LockTimeoutException.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when multiple threads are competing for a lock and the lock timeout + * interval is exceeded for the current operation. This is normally because + * another transaction or cursor holds a lock for longer than the timeout + * interval. It may also occur if the application fails to close a cursor, or + * fails to commit or abort a transaction, since any locks held by the cursor + * or transaction will be held indefinitely. + * + *

        This exception is not thrown if a deadlock is detected, even if the + * timeout elapses before the deadlock is broken. If a deadlock is detected, + * {@link DeadlockException} is always thrown instead.

        + * + *

        The lock timeout interval may be set using + * {@link EnvironmentConfig#setLockTimeout} or + * {@link Transaction#setLockTimeout}.

        + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + *

        Normally, applications should catch the base class {@link + * LockConflictException} rather than catching one of its subclasses. All lock + * conflicts are typically handled in the same way, which is normally to abort + * and retry the transaction. See {@link LockConflictException} for more + * information.

        + * + * @since 4.0 + */ +public class LockTimeoutException extends LockConflictException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public LockTimeoutException(Locker locker, String message) { + super(locker, message); + } + + /** + * For internal use only. + * @hidden + */ + private LockTimeoutException(String message, + LockTimeoutException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new LockTimeoutException(msg, this); + } +} diff --git a/src/com/sleepycat/je/LogWriteException.java b/src/com/sleepycat/je/LogWriteException.java new file mode 100644 index 0000000..fbdd138 --- /dev/null +++ b/src/com/sleepycat/je/LogWriteException.java @@ -0,0 +1,80 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown when an {@code IOException} or other failure occurs when writing to + * the JE log. This exception may be indicative of a full disk, although an + * {@code IOException} does not contain enough information to determine this + * definitively. + * + *

        This exception may be thrown as the result of any write operation, + * including record writes, checkpoints, etc.

        + * + *

        Existing {@link Environment} handles are invalidated as a result of this + * exception.

        + * + * @since 4.0 + */ +public class LogWriteException extends EnvironmentFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public LogWriteException(EnvironmentImpl envImpl, String message) { + super(envImpl, EnvironmentFailureReason.LOG_WRITE, message); + } + + /** + * For internal use only. + * @hidden + */ + public LogWriteException(EnvironmentImpl envImpl, Throwable t) { + super(envImpl, EnvironmentFailureReason.LOG_WRITE, t); + } + + /** + * For internal use only. + * @hidden + */ + public LogWriteException(EnvironmentImpl envImpl, + String message, + Throwable t) { + super(envImpl, EnvironmentFailureReason.LOG_WRITE, message, t); + } + + /** + * For internal use only. + * @hidden + */ + private LogWriteException(String message, + LogWriteException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new LogWriteException(msg, this); + } +} diff --git a/src/com/sleepycat/je/OperationFailureException.java b/src/com/sleepycat/je/OperationFailureException.java new file mode 100644 index 0000000..d2c5734 --- /dev/null +++ b/src/com/sleepycat/je/OperationFailureException.java @@ -0,0 +1,240 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Indicates that a failure has occurred that impacts the current operation + * and/or transaction. For failures that impact the environment as a whole, + * see {@link EnvironmentFailureException}. For an overview of all exceptions + * thrown by JE, see {@link DatabaseException}. + * + *

        If an explicit transaction applies to a method which threw this + * exception, the exception may indicate that {@link Transaction#abort} must be + * called, depending on the nature of the failure. A transaction is applicable + * to a method call in two cases.

        + *
          + *
        1. When an explicit (non-null) {@code Transaction} instance is specified. + * This applies when the {@code Transaction} is passed as a parameter to the + * method that throws the exception, or when the {@code Transaction} is passed + * to {@link Database#openCursor} and a {@code Cursor} method throws the + * exception. + *
        2. + *
        3. When a per-thread {@code Transaction} applies to the method that throws + * the exception. Per-thread transactions apply when using {@link + * com.sleepycat.collections persistent collections} with {@link + * com.sleepycat.collections.CurrentTransaction} or {@link + * com.sleepycat.collections.TransactionRunner}, or when using XA transactions + * with {@link XAEnvironment}. + *
        4. + *
        + * + *

        When a transaction is applicable to a method call, the application should + * catch {@code OperationFailureException} and then call {@link + * Transaction#isValid}. If {@code false} is returned, all {@code Cursor} + * instances that were created with the transaction must be closed and then + * {@link Transaction#abort} must be called. Also note that {@link + * Transaction#isValid} may be called at any time, not just during exception + * handling.

        + * + *

        The use of the {@link Transaction#isValid} method allows JE to determine + * dynamically whether the failure requires an abort or not, and allows for + * this determination to change in future releases. Over time, internal + * improvements to error handling may allow more error conditions to be handled + * without invalidating the {@code Transaction}.

        + * + *

        The specific handling that is necessary for an {@code + * OperationFailureException} depends on the specific subclass thrown. See the + * javadoc for each method for information on which methods throw which {@code + * OperationFailureException}s and why.

        + * + *

        If {@link Transaction#abort} is not called after an {@code + * OperationFailureException} invalidates the {@code Transaction}, all + * subsequent method calls using the {@code Transaction} will throw the same + * exception. This provides more than one opportunity to catch and handle the + * specific exception subclass that caused the failure.

        + * + *

        {@code OperationFailureException} is also thrown by methods where no + * transaction applies. In most cases the action required to handle the + * exception is the same as with a transaction, although of course no abort is + * necessary.

        + * + *

        However, please be aware that for some operations on a non-transactional + * {@code Database} or {@code EntityStore}, an {@code + * OperationFailureException} may cause data corruption. For example, see + * {@link SecondaryReferenceException}.

        + * + *

        There are two groups of operation failure subclasses worth noting since + * they apply to many methods: read operation failures and write operation + * failures. These are described below.

        + * + *

        Read Operation Failures

        + * + *

        Read operations are all those performed by the {@code get} family of + * methods, for example, {@link Database#get Database.get}, {@link + * Cursor#getNext Cursor.getNext}, {@link com.sleepycat.persist.EntityIndex#get + * EntityIndex.get}, {@link com.sleepycat.persist.EntityCursor#next + * EntityCursor.next}, {@link com.sleepycat.collections.StoredMap#get + * StoredMap.get}, and {@link ForwardCursor#getNext ForwardCursor.getNext}. + * These methods may cause the following operation + * failures.

        + * + *
          + *
        • {@link OperationFailureException} is the superclass of all read + * operation failures.
        • + *
            + *
          • {@link LockConflictException} is thrown if a lock conflict prevents + * the operation from completing. A read operation may be blocked by another + * locker (transaction or non-transactional cursor) that holds a write lock + * on the record.
          • + * + *
              + *
            • {@link com.sleepycat.je.rep.LockPreemptedException} is a subclass + * of {@code LockConflictException} that is thrown in a replicated + * environment on the Replica node, when the Master node has changed a + * record that was previously locked by the reading transaction or + * cursor.
            • + *
            + * + *
          • {@link SecondaryIntegrityException} is thrown if a primary-secondary + * relationship integrity problem is detected while reading a primary + * database record via a secondary index.
          • + * + *
          • {@link com.sleepycat.je.rep.DatabasePreemptedException} is thrown in a + * replicated environment on the Replica node, when the Master node has + * truncated, removed or renamed the database.
          • + * + *
          • Other {@link OperationFailureException} subclasses may be thrown if + * such an exception was thrown earlier and caused the transaction to be + * invalidated.
          • + *
          + *
        + * + *

        Write Operation Failures

        + * + *

        Write operations are all those performed by the {@code put} and {@code + * delete} families of methods, for example, {@link Database#put Database.put}, + * {@link Cursor#delete Cursor.delete}, {@link + * com.sleepycat.persist.PrimaryIndex#put PrimaryIndex.put}, {@link + * com.sleepycat.persist.EntityCursor#delete EntityCursor.delete} and {@link + * com.sleepycat.collections.StoredMap#put StoredMap.put}. These methods may + * cause the following operation failures, although certain failures are only + * caused by {@code put} methods and others only by {@code delete} methods, as + * noted below.

        + * + *
          + *
        • {@link OperationFailureException} is the superclass of all write + * operation failures.
        • + * + *
            + *
          • {@link LockConflictException} is thrown if a lock conflict prevents + * the operation from completing. A write operation may be blocked by + * another locker (transaction or non-transactional cursor) that holds a read + * or write lock on the record.
          • + * + *
          • {@link DiskLimitException} is thrown if a disk limit has been + * violated and this prevents the operation from completing.

            + * + *
          • {@link SecondaryConstraintException} is the superclass of all + * exceptions thrown when a write operation fails because of a secondary + * constraint.
          • + * + *
              + *
            • {@link ForeignConstraintException} is thrown when an attempt to + * write a primary database record would insert a secondary record with a + * key that does not exist in a foreign key database, when the secondary + * key is configured as a foreign key. This exception is only thrown by + * {@code put} methods.
            • + * + *
            • {@link UniqueConstraintException} is thrown when an attempt to write + * a primary database record would insert a secondary record with a + * duplicate key, for secondaries that represent one-to-one and one-to-many + * relationships. This exception is only thrown by {@code put} + * methods.
            • + * + *
            • {@link DeleteConstraintException} is thrown when an attempt is made + * to delete a key from a foreign key database, when that key is referenced + * by a secondary database, and the secondary is configured to cause an + * abort in this situation. This exception is only thrown by {@code + * delete} methods.
            • + *
            + * + *
          • {@link SecondaryIntegrityException} is thrown if a primary-secondary + * relationship integrity problem is detected while writing a record in a + * primary database that has one or more secondary indices. + * + *
          • {@link com.sleepycat.je.rep.DatabasePreemptedException} is thrown in a + * replicated environment on a Replica node, when the Master node has + * truncated, removed or renamed the database.
          • + * + *
          • {@link com.sleepycat.je.rep.ReplicaWriteException} is always thrown in + * a replicated environment on a Replica node, since write operations are not + * allowed on a Replica.
          • + * + *
          • Other {@link OperationFailureException} subclasses may be thrown if + * such an exception was thrown earlier and caused the transaction to be + * invalidated.
          • + *
          + *
        + * + * @since 4.0 + */ +public abstract class OperationFailureException extends DatabaseException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public OperationFailureException(Locker locker, + boolean abortOnly, + String message, + Throwable cause) { + super(message, cause); + if (abortOnly) { + assert locker != null; + locker.setOnlyAbortable(this); + } + } + + /** + * For internal use only. + * @hidden + * Only for use by bind/collection/persist exception subclasses. + */ + public OperationFailureException(String message) { + this(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + * Only for use by wrapSelf methods. + */ + protected OperationFailureException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + * Must be implemented by every concrete subclass to return an instance of + * its own class, constructed with the given msg and this exception as + * parameters, e.g.: return new MyClass(msg, this); + */ + public abstract OperationFailureException wrapSelf(String msg); +} diff --git a/src/com/sleepycat/je/OperationResult.java b/src/com/sleepycat/je/OperationResult.java new file mode 100644 index 0000000..53c6915 --- /dev/null +++ b/src/com/sleepycat/je/OperationResult.java @@ -0,0 +1,71 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The result of an operation that successfully reads or writes a record. + *

        + * An OperationResult does not contain any failure information. Methods that + * perform unsuccessful reads or writes return null or throw an exception. Null + * is returned if the operation failed for commonly expected reasons, such as a + * read that fails because the key does not exist, or an insertion that fails + * because the key does exist. + *

        + * Methods that return OperationResult can be compared to methods that return + * {@link OperationStatus} as follows: If {@link OperationStatus#SUCCESS} is + * returned by the latter methods, this is equivalent to returning a non-null + * OperationResult by the former methods. + * + * @since 7.0 + */ +public class OperationResult { + + private final long expirationTime; + private final boolean update; + + OperationResult(final long expirationTime, final boolean update) { + this.expirationTime = expirationTime; + this.update = update; + } + + /** + * Returns whether the operation was an update, for distinguishing inserts + * and updates performed by a {@link Put#OVERWRITE} operation. + * + * @return whether an existing record was updated by this operation. + */ + public boolean isUpdate() { + return update; + } + + /** + * Returns the expiration time of the record, in milliseconds, or zero + * if the record has no TTL and does not expire. + *

        + * For 'get' operations, this is the expiration time of the current record. + * For 'put operations, this is the expiration time of the newly written + * record. For 'delete' operation, this is the expiration time of the + * record that was deleted. + *

        + * The return value will always be evenly divisible by the number of + * milliseconds in one hour. If {@code TimeUnit.Days} was specified + * when the record was written, the return value will also be evenly + * divisible by the number of milliseconds in one day. + * + * @return the expiration time in milliseconds, or zero. + */ + public long getExpirationTime() { + return expirationTime; + } +} diff --git a/src/com/sleepycat/je/OperationStatus.java b/src/com/sleepycat/je/OperationStatus.java new file mode 100644 index 0000000..ca9a044 --- /dev/null +++ b/src/com/sleepycat/je/OperationStatus.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Status values from database operations. + */ +public enum OperationStatus { + + /** + * The operation was successful. + */ + SUCCESS, + + /** + * The operation to insert data was configured to not allow overwrite and + * the key already exists in the database. + */ + KEYEXIST, + + /** + * The cursor operation was unsuccessful because the current record was + * deleted. This can only occur if a Cursor is positioned to an existing + * record, then the record is deleted, and then the getCurrent, putCurrent, + * or delete methods is called. + */ + KEYEMPTY, + + /** + * The requested key/data pair was not found. + */ + NOTFOUND; + + /** {@inheritDoc} */ + @Override + public String toString() { + return "OperationStatus." + name(); + } +} diff --git a/src/com/sleepycat/je/PartialComparator.java b/src/com/sleepycat/je/PartialComparator.java new file mode 100644 index 0000000..0fd85cb --- /dev/null +++ b/src/com/sleepycat/je/PartialComparator.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * A tag interface used to mark a B-tree or duplicate comparator class as a + * partial comparator. + * + * Comparators are configured using + * {@link DatabaseConfig#setBtreeComparator(java.util.Comparator)} or + * {@link DatabaseConfig#setBtreeComparator(Class)}, and + * {@link DatabaseConfig#setDuplicateComparator(java.util.Comparator)} or + * {@link DatabaseConfig#setDuplicateComparator(Class)}. + *

        + * As described in the javadoc for these methods, a partial comparator is a + * comparator that allows for the keys of a database to be updated, but only + * if the updates are not significant with respect to uniqueness and ordering. + * Also described is the fact that comparators must be used with great caution, + * since a badly behaved comparator can cause B-tree corruption. + *

        + * Even greater caution is needed when using partial comparators, for several + * reasons. Partial comparators are normally used for performance reasons in + * certain situations, but the performance trade-offs are very subtle and + * difficult to understand. In addition, as of JE 6, this tag interface must + * be added to all partial comparator classes so that JE can correctly perform + * transaction aborts, while maintaining the last committed key or duplicate + * data values properly. In addition, for a database with duplicates + * configured, a partial comparator (implementing this tag interface) will + * disable optimizations in JE 6 that drastically reduce cleaner costs. + *

        + * For these reasons, we do not recommend using partial comparators, although + * they are supported in order to avoid breaking applications that used them + * prior to JE 6. Whenever possible, please avoid using partial comparators. + */ +public interface PartialComparator { +} diff --git a/src/com/sleepycat/je/PreloadConfig.java b/src/com/sleepycat/je/PreloadConfig.java new file mode 100644 index 0000000..929bdeb --- /dev/null +++ b/src/com/sleepycat/je/PreloadConfig.java @@ -0,0 +1,288 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Specifies the attributes of an application invoked preload operation. + */ +public class PreloadConfig implements Cloneable { + + private long maxBytes; + private long maxMillisecs; + private boolean loadLNs; + private ProgressListener progressListener; + private long lsnBatchSize = Long.MAX_VALUE; + private long internalMemoryLimit = Long.MAX_VALUE; + + /** + * Default configuration used if null is passed to {@link + * com.sleepycat.je.Database#preload Database.preload}. + */ + public PreloadConfig() { + } + + /** + * Configure the maximum number of bytes to preload. + * + *

        The default is 0 for this class.

        + * + * @param maxBytes If the maxBytes parameter is non-zero, a preload will + * stop when the cache contains this number of bytes. + * + * @return this + */ + public PreloadConfig setMaxBytes(final long maxBytes) { + setMaxBytesVoid(maxBytes); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxBytesVoid(final long maxBytes) { + this.maxBytes = maxBytes; + } + + /** + * Return the number of bytes in the cache to stop the preload at. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The number of bytes in the cache to stop the preload at. + */ + public long getMaxBytes() { + return maxBytes; + } + + /** + * Configure the maximum number of milliseconds to execute preload. + * + *

        The default is 0 for this class.

        + * + * @param maxMillisecs If the maxMillisecs parameter is non-zero, a preload + * will stop when this amount of time has passed. + * + * @return this + */ + public PreloadConfig setMaxMillisecs(final long maxMillisecs) { + setMaxMillisecsVoid(maxMillisecs); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxMillisecsVoid(final long maxMillisecs) { + this.maxMillisecs = maxMillisecs; + } + + /** + * Return the number of millisecs to stop the preload after. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The number of millisecs to stop the preload after. + */ + public long getMaxMillisecs() { + return maxMillisecs; + } + + /** + * Configure the preload load LNs option. + * + *

        The default is false for this class.

        + * + * @param loadLNs If set to true, the preload will load Leaf Nodes (LNs) + * containing the data values. + * + * @return this + */ + public PreloadConfig setLoadLNs(final boolean loadLNs) { + setLoadLNsVoid(loadLNs); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLoadLNsVoid(final boolean loadLNs) { + this.loadLNs = loadLNs; + } + + /** + * Return the configuration of the preload load LNs option. + * + * @return The configuration of the preload load LNs option. + */ + public boolean getLoadLNs() { + return loadLNs; + } + + /** + * Preload progress listeners report this phase value, along with a + * count of the number if times that the preload has fetched from disk. + */ + public static enum Phases { + /** + * Preload is in progress and resulted in a fetch from disk. + */ + PRELOAD }; + + /** + * Configure the preload operation to make periodic calls to a {@link + * ProgressListener} to provide feedback on preload progress. + * The ProgressListener.progress() method is called each time the preload + * mush fetch a btree node or data record from disk. + *

        + * When using progress listeners, review the information at {@link + * ProgressListener#progress} to avoid any unintended disruption to + * replication stream syncup. + * @param progressListener The ProgressListener to callback during + * preload. + */ + public PreloadConfig + setProgressListener(final ProgressListener progressListener) { + setProgressListenerVoid(progressListener); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setProgressListenerVoid + (final ProgressListener progressListener) { + this.progressListener = progressListener; + } + + /** + * Return the ProgressListener for this PreloadConfig. + * + * @return the ProgressListener for this PreloadConfig. + */ + public ProgressListener getProgressListener() { + return progressListener; + } + + /** + * Set the maximum number of LSNs to gather and sort at any one time. The + * default is an unlimited number of LSNs. Setting this lower causes the + * preload to use less memory, but it sorts and processes LSNs more + * frequently thereby causing slower performance. Setting this higher will + * in general improve performance at the expense of memory. Each LSN uses + * 16 bytes of memory. + * + * @param lsnBatchSize the maximum number of LSNs to accumulate and sort + * per batch. + * + * @return this + */ + public PreloadConfig setLSNBatchSize(final long lsnBatchSize) { + setLSNBatchSizeVoid(lsnBatchSize); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLSNBatchSizeVoid(final long lsnBatchSize) { + this.lsnBatchSize = lsnBatchSize; + } + + /** + * Preload is implemented to optimize I/O cost by fetching the records of + * a Database by disk order, so that disk access is are sequential rather + * than random. LSNs (log sequence numbers) are the disk addresses of + * database records. Setting this value causes the preload to process + * batches of LSNs rather than all in-memory LSNs at one time, + * which bounds the memory usage of + * the preload processing, at the expense of preload performance. + * + * @return the maximum number of LSNs to be sorted that this + * preload is configured for. + */ + public long getLSNBatchSize() { + return lsnBatchSize; + } + + /** + * Set the maximum amount of non JE Cache Memory that preload can use at + * one time. The default is an unlimited amount of memory. Setting this + * lower causes the preload to use less memory, but generally results in + * slower performance. Setting this higher will often improve performance + * at the expense of higher memory utilization. + * + * @param internalMemoryLimit the maximum number of non JE Cache bytes to + * use. + * + * @return this + */ + public PreloadConfig + setInternalMemoryLimit(final long internalMemoryLimit) { + setInternalMemoryLimitVoid(internalMemoryLimit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setInternalMemoryLimitVoid(final long internalMemoryLimit) { + this.internalMemoryLimit = internalMemoryLimit; + } + + /** + * Returns the maximum amount of non JE Cache Memory that preload can use at + * one time. + * + * @return the maximum amount of non JE Cache Memory that preload can use at + * one time. + */ + public long getInternalMemoryLimit() { + return internalMemoryLimit; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public PreloadConfig clone() { + try { + return (PreloadConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "maxBytes=" + maxBytes + + "\nmaxMillisecs=" + maxMillisecs + + "\nloadLNs=" + loadLNs + + "\nlsnBatchSize=" + lsnBatchSize + + "\ninternalMemoryLimit=" + internalMemoryLimit + + "\n"; + } +} diff --git a/src/com/sleepycat/je/PreloadConfigBeanInfo.java b/src/com/sleepycat/je/PreloadConfigBeanInfo.java new file mode 100644 index 0000000..dc8029f --- /dev/null +++ b/src/com/sleepycat/je/PreloadConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class PreloadConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(PreloadConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(PreloadConfig.class); + } +} diff --git a/src/com/sleepycat/je/PreloadStats.java b/src/com/sleepycat/je/PreloadStats.java new file mode 100644 index 0000000..395daca --- /dev/null +++ b/src/com/sleepycat/je/PreloadStats.java @@ -0,0 +1,253 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; + +/** + * Statistics returned from {@link com.sleepycat.je.Database#preload + * Database.preload} or {@link com.sleepycat.je.Environment#preload}. + */ +public class PreloadStats implements Serializable { + + private static final long serialVersionUID = 2131949076L; + + /** + * The number of INs loaded during the preload() operation. + */ + private int nINsLoaded; + + /** + * The number of BINs loaded during the preload() operation. + */ + private int nBINsLoaded; + + /** + * The number of LNs loaded during the preload() operation. + */ + private int nLNsLoaded; + + /** + * The number of embeddedLNs encountered during the preload() operation. + */ + private int nEmbeddedLNs; + + /** + * The number of DINs loaded during the preload() operation. + */ + private int nDINsLoaded; + + /** + * The number of DBINs loaded during the preload() operation. + */ + private int nDBINsLoaded; + + /** + * The number of DupCountLNs loaded during the preload() operation. + */ + private int nDupCountLNsLoaded; + + /** + * The number of times internal memory was exceeded. + */ + private int nCountMemoryExceeded; + + /** + * The status of the preload() operation. + */ + private PreloadStatus status; + + /** + * @hidden + * Internal use only. + */ + public PreloadStats() { + reset(); + } + + /** + * Resets all stats. + */ + private void reset() { + nEmbeddedLNs = 0; + nINsLoaded = 0; + nBINsLoaded = 0; + nLNsLoaded = 0; + nDINsLoaded = 0; + nDBINsLoaded = 0; + nDupCountLNsLoaded = 0; + nCountMemoryExceeded = 0; + status = PreloadStatus.SUCCESS; + } + + /** + * Returns the number of INs that were loaded into the cache during the + * preload() operation. + */ + public int getNINsLoaded() { + return nINsLoaded; + } + + /** + * Returns the number of BINs that were loaded into the cache during the + * preload() operation. + */ + public int getNBINsLoaded() { + return nBINsLoaded; + } + + /** + * Returns the number of LNs that were loaded into the cache during the + * preload() operation. + */ + public int getNLNsLoaded() { + return nLNsLoaded; + } + + /** + * Returns the number of embedded LNs encountered during the preload() + * operation. + */ + public int getNEmbeddedLNs() { + return nEmbeddedLNs; + } + + /** + * @deprecated returns zero for data written using JE 5.0 and later, but + * may return non-zero values when reading older data. + */ + public int getNDINsLoaded() { + return nDINsLoaded; + } + + /** + * @deprecated returns zero for data written using JE 5.0 and later, but + * may return non-zero values when reading older data. + */ + public int getNDBINsLoaded() { + return nDBINsLoaded; + } + + /** + * @deprecated returns zero for data written using JE 5.0 and later, but + * may return non-zero values when reading older data. + */ + public int getNDupCountLNsLoaded() { + return nDupCountLNsLoaded; + } + + /** + * Returns the count of the number of times that the internal memory budget + * specified by {@link + * com.sleepycat.je.PreloadConfig#setInternalMemoryLimit + * PreloadConfig.setInternalMemoryLimit()} was exceeded. + */ + public int getNCountMemoryExceeded() { + return nCountMemoryExceeded; + } + + /** + * Returns the PreloadStatus value for the preload() operation. + */ + public PreloadStatus getStatus() { + return status; + } + + /** + * @hidden + * Internal use only. + */ + public void incINsLoaded() { + this.nINsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incBINsLoaded() { + this.nBINsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incLNsLoaded() { + this.nLNsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incEmbeddedLNs() { + this.nEmbeddedLNs++; + } + + /** + * @hidden + * Internal use only. + */ + public void incDINsLoaded() { + this.nDINsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incDBINsLoaded() { + this.nDBINsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incDupCountLNsLoaded() { + this.nDupCountLNsLoaded++; + } + + /** + * @hidden + * Internal use only. + */ + public void incMemoryExceeded() { + this.nCountMemoryExceeded++; + } + + /** + * @hidden + * Internal use only. + */ + public void setStatus(PreloadStatus status) { + this.status = status; + } + + /** + * Returns a String representation of the stats in the form of + * <stat>=<value> + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("status=").append(status).append('\n'); + sb.append("nINsLoaded=").append(nINsLoaded).append('\n'); + sb.append("nBINsLoaded=").append(nBINsLoaded).append('\n'); + sb.append("nLNsLoaded=").append(nLNsLoaded).append('\n'); + + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/PreloadStatus.java b/src/com/sleepycat/je/PreloadStatus.java new file mode 100644 index 0000000..857b3a7 --- /dev/null +++ b/src/com/sleepycat/je/PreloadStatus.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; + +/** + * Describes the result of the {@link com.sleepycat.je.Database#preload + * Database.preload} operation. + */ +public class PreloadStatus implements Serializable { + + private static final long serialVersionUID = 903470137L; + + /* For toString. */ + private String statusName; + + /* Make the constructor public for serializability testing. */ + public PreloadStatus(String statusName) { + this.statusName = statusName; + } + + @Override + public String toString() { + return "PreloadStatus." + statusName; + } + + /** + * {@link com.sleepycat.je.Database#preload Database.preload} + * was successful. + */ + public static final PreloadStatus SUCCESS = + new PreloadStatus("SUCCESS"); + + /** + * {@link com.sleepycat.je.Database#preload Database.preload} + * filled maxBytes of the cache. + */ + public static final PreloadStatus FILLED_CACHE = + new PreloadStatus("FILLED_CACHE"); + + /** + * {@link com.sleepycat.je.Database#preload Database.preload} + * took more than maxMillisecs. + */ + public static final PreloadStatus EXCEEDED_TIME = + new PreloadStatus("EXCEEDED_TIME"); + + /** + * The user requested that preload stop during a call to + * ProgressListener.progress(). + */ + public static final PreloadStatus USER_HALT_REQUEST = + new PreloadStatus("USER_HALT_REQUEST"); +} diff --git a/src/com/sleepycat/je/ProgressListener.java b/src/com/sleepycat/je/ProgressListener.java new file mode 100644 index 0000000..0a05e7c --- /dev/null +++ b/src/com/sleepycat/je/ProgressListener.java @@ -0,0 +1,71 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * ProgressListener provides feedback to the application that progress is being + * made on a potentially long running or asynchronous JE operation. The + * listener itself is general and abstract, and more details about the meaning + * of the progress callback can be found by reading about the entry points + * where specific ProgressListeners can be specified. For example, see: + *

          + *
        • + * {@link PreloadConfig#setProgressListener}, which accepts a + * ProgressListener<PreloadConfig.Phase>, and reports on + * Environment.preload() or Database.preload()
        • + + *
        • {@link EnvironmentConfig#setRecoveryProgressListener}, which accepts a + * ProgressListener<RecoveryProgress>, and reports on environment + * startup. + *
        • + *
        • {@link com.sleepycat.je.rep.ReplicationConfig#setSyncupProgressListener}, + * which accepts a ProgressListener<SyncupProgress>, and reports on + * replication stream syncup. + *
        • + *
        + * @since 5.0 + */ +public interface ProgressListener> { + + /** + * Called by BDB JE to indicate to the user that progress has been + * made on a potentially long running or asynchronous operation. + *

        + * This method should do the minimal amount of work, queuing any resource + * intensive operations for processing by another thread before returning + * to the caller, so that it does not unduly delay the target operation, + * which invokes this method. + *

        + * The applicaton should also be aware that the method has potential to + * disrupt the reported-upon operation. If the progress() throws a + * RuntimeException, the operation for which the progress is being reported + * will be aborted and the exception propagated back to the original + * caller. Also, if progress() returns false, the operation will be + * halted. For recovery and syncup listeners, a false return value can + * invalidate and close the environment. + * + * @param phase an enum indicating the phase of the operation for + * which progress is being reported. + * @param n indicates the number of units that have been processed so far. + * If this does not apply, -1 is returned. + * @param total indicates the total number of units that will be processed + * if it is known by JE. If total is < 0, then the total number is + * unknown. When total == n, this indicates that processing of this + * operation is 100% complete, even if all previous calls to progress + * passed a negative value for total. + * + * @return true to continue the operation, false to stop it. + */ + public boolean progress(T phase, long n, long total); +} diff --git a/src/com/sleepycat/je/Put.java b/src/com/sleepycat/je/Put.java new file mode 100644 index 0000000..0a8f30d --- /dev/null +++ b/src/com/sleepycat/je/Put.java @@ -0,0 +1,108 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.PutMode; + +/** + * The operation type passed to "put" methods on databases and cursors. + */ +public enum Put { + + /** + * Inserts or updates a record depending on whether a matching record is + * already present. + * + *

        If the database does not have duplicate keys, a matching record is + * defined as one with the same key. The existing record's data will be + * replaced. In addition, if a custom key comparator is configured, and the + * key bytes are different but considered equal by the comparator, the key + * is replaced.

        + * + *

        If the database does have duplicate keys, a matching record is + * defined as one with the same key and data. As above, if a custom key + * comparator is configured, and the key bytes are different but considered + * equal by the comparator, the key is replaced. In addition, if a custom + * duplicate comparator is configured, and the data bytes are different but + * considered equal by the comparator, the data is replaced.

        + * + *

        The operation always succeeds (null is never returned).

        + */ + OVERWRITE(PutMode.OVERWRITE), + + /** + * Inserts a record if a record with a matching key is not already present. + * + *

        If the database has duplicate keys, a record is inserted only if + * there are no records with a matching key.

        + * + *

        The operation does not succeed (null is returned) when an existing + * record matches.

        + */ + NO_OVERWRITE(PutMode.NO_OVERWRITE), + + /** + * Inserts a record in a database with duplicate keys if a record with a + * matching key and data is not already present. + * + *

        This operation is not allowed for databases that do not have + * duplicate keys.

        + * + *

        The operation does not succeed (null is returned) when an existing + * record matches.

        + */ + NO_DUP_DATA(PutMode.NO_DUP_DATA), + + /** + * Updates the data of the record at the cursor position. + * + *

        If the database does not have duplicate keys, the existing record's + * data will be replaced.

        + * + *

        If the database does have duplicate keys, the existing data is + * replaced but it is must be considered equal by the duplicate comparator. + * If the data is not considered equal, {@link DuplicateDataException} is + * thrown. Using the default comparator, a key is considered equal only if + * its bytes are equal. Therefore, changing the data is only possible if a + * custom duplicate comparator is configured.

        + * + *

        A partial data item may be + * specified to optimize for partial data update.

        + * + *

        This operation cannot be used to update the key of an existing record + * and in fact the key parameter must be null when calling generic put + * methods such as + * {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, + * WriteOptions)} and + * {@link Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions)}.

        + * + *

        The operation does not succeed (null is returned) if the record at + * the current position has been deleted. This can occur in two cases: 1. + * If the record was deleted using this cursor and then accessed. 2. If the + * record was not locked by this cursor or transaction, and was deleted by + * another thread or transaction after this cursor was positioned on + * it.

        + */ + CURRENT(PutMode.CURRENT); + + private final PutMode putMode; + + Put(final PutMode putMode) { + this.putMode = putMode; + } + + PutMode getPutMode() { + return putMode; + } +} diff --git a/src/com/sleepycat/je/ReadOptions.java b/src/com/sleepycat/je/ReadOptions.java new file mode 100644 index 0000000..e1b09b2 --- /dev/null +++ b/src/com/sleepycat/je/ReadOptions.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedException; + +import com.sleepycat.je.utilint.DatabaseUtil; + +/** + * Options for calling methods that read records. + * + * @since 7.0 + */ +public class ReadOptions implements Cloneable { + + private CacheMode cacheMode = null; + private LockMode lockMode = LockMode.DEFAULT; + + /** + * Constructs a ReadOptions object with default values for all properties. + */ + public ReadOptions() { + } + + @Override + public ReadOptions clone() { + try { + return (ReadOptions) super.clone(); + } catch (CloneNotSupportedException e) { + throw unexpectedException(e); + } + } + + /** + * Sets the {@code CacheMode} to be used for the operation. + *

        + * By default this property is null, meaning that the default specified + * using {@link Cursor#setCacheMode}, + * {@link DatabaseConfig#setCacheMode} or + * {@link EnvironmentConfig#setCacheMode} will be used. + * + * @param cacheMode is the {@code CacheMode} used for the operation, or + * null to use the Cursor, Database or Environment default. + * + * @return 'this'. + */ + public ReadOptions setCacheMode(final CacheMode cacheMode) { + this.cacheMode = cacheMode; + return this; + } + + /** + * Returns the {@code CacheMode} to be used for the operation, or null + * if the Cursor, Database or Environment default will be used. + * + * @see #setCacheMode(CacheMode) + */ + public CacheMode getCacheMode() { + return cacheMode; + } + + /** + * Sets the {@code LockMode} to be used for the operation. + *

        + * By default this property is {@link LockMode#DEFAULT}. + * + * @param lockMode the locking attributes. Specifying null or + * {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return 'this'. + */ + public ReadOptions setLockMode(final LockMode lockMode) { + DatabaseUtil.checkForNullParam(lockMode, "lockMode"); + this.lockMode = lockMode; + return this; + } + + /** + * Returns the {@code LockMode} to be used for the operation. + * + * @see #setLockMode(LockMode) + */ + public LockMode getLockMode() { + return lockMode; + } +} diff --git a/src/com/sleepycat/je/RecoveryProgress.java b/src/com/sleepycat/je/RecoveryProgress.java new file mode 100644 index 0000000..7314186 --- /dev/null +++ b/src/com/sleepycat/je/RecoveryProgress.java @@ -0,0 +1,126 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Describes the different phases of initialization that + * be executed when an Environment is instantiated. Meant to be used in + * conjunction with a {@link ProgressListener} that is configured through + * {@link EnvironmentConfig#setRecoveryProgressListener} to monitor + * the cost of environment startup + * @since 5.0 + */ +public enum RecoveryProgress { + /** + * Find the last valid entry in the database log. + */ + FIND_END_OF_LOG, + + /** + * Find the last complete checkpoint in the database log. + */ + FIND_LAST_CKPT, + + /** + * Read log entries that pertain to the database map, which is an + * internal index of all databases. + */ + READ_DBMAP_INFO, + + /** + * Redo log entries that pertain to the database map, which is an + * internal index of all databases. + */ + REDO_DBMAP_INFO, + + /** + * Rollback uncommitted database creation, deletion and truncations. + */ + UNDO_DBMAP_RECORDS, + + /** + * Redo committed database creation, deletion and truncations. + */ + REDO_DBMAP_RECORDS, + + /** + * Read log entries that pertain to the database indices. + */ + READ_DATA_INFO, + + /** + * Redo log entries that pertain to the database indices. + */ + REDO_DATA_INFO, + + /** + * Rollback uncommitted data operations, such as inserts, updates + * and deletes. + */ + UNDO_DATA_RECORDS, + + /** + * Repeat committed data operations, such as inserts, updates + * and deletes. + */ + REDO_DATA_RECORDS, + + /** + * Populate internal metadata which stores information about the + * utilization level of each log file, for efficient log cleaning. + */ + POPULATE_UTILIZATION_PROFILE, + + /** + * Populate internal metadata which stores information about the + * expiration time/data windows (histogram) of each log file, for + * efficient log cleaning. + * + * @since 6.5 + */ + POPULATE_EXPIRATION_PROFILE, + + /** + * Remove temporary databases created by the application that + * are no longer valid. + */ + REMOVE_TEMP_DBS, + + /** + * Perform a checkpoint to make all the work of this environment + * startup persistent, so it is not repeated in future startups. + */ + CKPT, + + /** + * Basic recovery is completed, and the environment is able to + * service operations. + */ + RECOVERY_FINISHED, + + /** + * For replicated systems only: locate the master of the + * replication group by querying others in the group, and holding an + * election if necessary. + */ + FIND_MASTER, + + /** + * For replicated systems only: if a replica, process enough of the + * replication stream so that the environment fulfills the required + * consistency policy, as defined by parameters passed to the + * ReplicatedEnvironment constructor. + */ + BECOME_CONSISTENT + } \ No newline at end of file diff --git a/src/com/sleepycat/je/ReplicaConsistencyPolicy.java b/src/com/sleepycat/je/ReplicaConsistencyPolicy.java new file mode 100644 index 0000000..a881377 --- /dev/null +++ b/src/com/sleepycat/je/ReplicaConsistencyPolicy.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * The interface for Consistency policies used to provide consistency + * guarantees at a Replica. ReplicaConsistencyPolicies are only used by + * Berkeley DB JE High Availability. + *

        + * A transaction initiated at a Replica will wait in + * the {@link com.sleepycat.je.Environment#beginTransaction} method until the + * consistency policy is satisfied. + * Consistency policies are specified at either a per-transaction level through + * {@link TransactionConfig#setConsistencyPolicy} or as an replication node + * wide default through {@link + * com.sleepycat.je.rep.ReplicationConfig#setConsistencyPolicy} + * + * @see Managing Consistency + */ +public interface ReplicaConsistencyPolicy { + + /** + * @hidden + * For internal use only. + * + * Ensures that the replica is within the constraints specified by this + * policy. If it isn't the method waits until the constraint is satisfied + * by the replica. + * + * @param repInstance identifies the replicated environment that must meet + * this consistency requirement. + */ + public void ensureConsistency(EnvironmentImpl repInstance) + throws InterruptedException; + + /** + * Returns the name used to identify the policy. The name is used when + * constructing policy property values for use in je.properties files. + */ + public String getName(); + + /** + * The timeout associated with the consistency policy. If consistency + * cannot be established by the Replica within the timeout period, a {@link + * com.sleepycat.je.rep.ReplicaConsistencyException} is thrown by {@link + * com.sleepycat.je.Environment#beginTransaction}. + * + * @return the timeout associated with the policy + */ + public long getTimeout(TimeUnit unit); +} diff --git a/src/com/sleepycat/je/RunRecoveryException.java b/src/com/sleepycat/je/RunRecoveryException.java new file mode 100644 index 0000000..a00fee1 --- /dev/null +++ b/src/com/sleepycat/je/RunRecoveryException.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * This base class of {@link EnvironmentFailureException} is deprecated but + * exists for API backward compatibility. + * + *

        Prior to JE 4.0, {@code RunRecoveryException} is thrown to indicate that + * the JE environment is invalid and cannot continue on safely. Applications + * catching {@code RunRecoveryException} prior to JE 4.0 were required to close + * and re-open the {@code Environment}.

        + * + *

        When using JE 4.0 or later, the application should catch {@link + * EnvironmentFailureException}. The application should then call {@link + * Environment#isValid} to determine whether the {@code Environment} must be + * closed and re-opened, or can continue operating without being closed. See + * {@link EnvironmentFailureException}.

        + * + * @deprecated replaced by {@link EnvironmentFailureException} and {@link + * Environment#isValid}. + */ +@Deprecated +public abstract class RunRecoveryException extends DatabaseException { + + private static final long serialVersionUID = 1913208269L; + + /** + * For internal use only. + * @hidden + */ + public RunRecoveryException(String message) { + super(message); + } + + /** + * For internal use only. + * @hidden + */ + public RunRecoveryException(String message, Throwable e) { + super(message, e); + } +} diff --git a/src/com/sleepycat/je/SecondaryAssociation.java b/src/com/sleepycat/je/SecondaryAssociation.java new file mode 100644 index 0000000..6c6129d --- /dev/null +++ b/src/com/sleepycat/je/SecondaryAssociation.java @@ -0,0 +1,288 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Collection; + +/** + * @hidden + * For internal use only. + * + * Provides a way to create an association between primary and secondary + * databases that is not limited to a one-to-many association. + *

        + * By implementing this interface, a secondary database may be associated with + * one or more primary databases. For example, imagine an application that + * wishes to partition data from a single logical "table" into multiple primary + * databases to take advantage of the performance benefits of {@link + * Environment#removeDatabase} for a single partition, while at the same having + * a single index database for the entire "table". For read operations via a + * secondary database, JE reads the primary key from the secondary database and + * then must read the primary record from the primary database. The mapping + * from a primary key to its primary database is performed by the user's + * implementation of the {@link #getPrimary} method. + *

        + * In addition, a primary database may be divided into subsets of keys where + * each subset may be efficiently indexed by zero or more secondary databases. + * This effectively allows multiple logical "tables" per primary database. + * When a primary record is written, the secondaries for its logical "table" + * are returned by the user's implementation of {@link #getSecondaries}. + * During a primary record write, because {@link #getSecondaries} is called to + * determine the secondary databases, only the key creators/extractors for + * those secondaries are invoked. For example, if you have a single primary + * database with 10 logical "tables", each of which has 5 distinct secondary + * databases, when a record is written only 5 key creators/extractors will be + * invoked, not 50. + *

        + * Configuring a SecondaryAssociation + *

        + * When primary and secondary databases are associated using a {@code + * SecondaryAssociation}, the databases must all be configured with the same + * {@code SecondaryAssociation} instance. A common error is to forget to + * configure the {@code SecondaryAssociation} on the primary database. + *

        + * A {@code SecondaryAssociation} is configured using {@link + * DatabaseConfig#setSecondaryAssociation} and {@link + * SecondaryConfig#setSecondaryAssociation} for a primary and secondary + * database, respectively. When calling {@link + * Environment#openSecondaryDatabase}, null must be passed for the + * primaryDatabase parameter when a {@code SecondaryAssociation} is configured. + *

        + * Note that when a {@code SecondaryAssociation} is configured, {@code true} + * may not be passed to the {@link SecondaryConfig#setAllowPopulate} method. + * Population of new secondary databases in an existing {@code + * SecondaryAssociation} is done differently, as described below. + *

        + * Adding and removing secondary associations + *

        + * The state information defining the association between primary and secondary + * databases is maintained by the application, and made available to JE by + * implementing the {@code SecondaryAssociation} interface. The application + * may add/remove databases to/from the association without any explicit + * coordination with JE. However, certain rules need to be followed to + * maintain data integrity. + *

        + * In the simplest case, there is a fixed (never changing) set of primary and + * secondary databases and they are added to the association at the time they + * are created, before writing or reading records. In this case, since reads + * and writes do not occur concurrently with changes to the association, no + * special rules are needed. For example: + *

          + *
        1. Open the databases.
        2. + *
        3. Add databases to the association.
        4. + *
        5. Begin writing and reading.
        6. + *
        + *

        + * In other cases, primary and secondary databases may be added to an already + * established association, along with concurrent reads and writes. The rules + * for doing so are described below. + *

        + * Adding an empty (newly created) primary database is no more complex than in + * the simple case above, assuming that writes to the primary database do not + * proceed until after it has been added to the association. + *

          + *
        1. Open the new primary database.
        2. + *
        3. Add the primary database to the association such that {@link + * #getSecondaries} returns the appropriate list when passed a key in the + * new primary database, and {@link #getPrimary} returns the new database + * when passed a key it contains.
        4. + *
        5. Begin writing to the new primary database.
        6. + *
        + *

        + * Using the procedure above for adding a primary database, records will be + * indexed in secondary databases as they are added to the primary database, + * and will be immediately available to secondary index queries. + *

        + * Alternatively, records can be added to the primary database without making + * them immediately available to secondary index queries by using the following + * procedure. Records will be indexed in secondary databases as they are added + * but will not be returned by secondary queries until after the last step. + *

          + *
        1. Open the new primary database.
        2. + *
        3. Modify the association such that {@link #getSecondaries} returns the + * appropriate list when passed a key in the new primary database, but + * {@link #getPrimary} returns null when passed a key it contains.
        4. + *
        5. Write records in the new primary database.
        6. + *
        7. Modify the association such that {@link #getPrimary} returns the new + * database when passed a key it contains.
        8. + *
        + *

        + * The following procedure should be used for removing an existing (non-empty) + * primary database from an association. + *

          + *
        1. Remove the primary database from the association, such that {@link + * #getPrimary} returns null for all keys in the primary.
        2. + *
        3. Disable read and write operations on the database and ensure that + * all in-progress operations have completed.
        4. + *
        5. At this point the primary database may be closed and removed + * (e.g., with {@link Environment#removeDatabase}), if desired.
        6. + *
        7. For each secondary database associated with the removed primary + * database, {@link SecondaryDatabase#deleteObsoletePrimaryKeys} should be + * called to process all secondary records.
        8. + *
        9. At this point {@link #getPrimary} may throw an exception (rather + * than return null) when called for a primary key in the removed database, + * if desired.
        10. + *
        + *

        + * The following procedure should be used for adding a new (empty) secondary + * database to an association, and to populate the secondary incrementally + * from its associated primary database(s). + *

          + *
        1. Open the secondary database and call {@link + * SecondaryDatabase#startIncrementalPopulation}. The secondary may not be + * used (yet) for read operations.
        2. + *
        3. Add the secondary database to the association, such that the + * collection returned by {@link #getSecondaries} includes the new + * database, for all primary keys that should be indexed.
        4. + *
        5. For each primary database associated with the new secondary + * database, {@link Database#populateSecondaries} should be called to + * process all primary records.
        6. + *
        7. Call {@link SecondaryDatabase#endIncrementalPopulation}. The + * secondary database may now be used for read operations.
        8. + *
        + *

        + * The following procedure should be used for removing an existing (non-empty) + * secondary database from an association. + *

          + *
        1. Remove the secondary database from the association, such that it is + * not included in the collection returned by {@link #getSecondaries}.
        2. + *
        3. Disable read and write operations on the database and ensure that + * all in-progress operations have completed.
        4. + *
        5. The secondary database may now be closed and removed, if + * desired.
        6. + *
        + *

        + * Other Implementation Requirements + *

        + * The implementation must use data structures for storing and accessing + * association information that provide happens-before semantics. In + * other words, when the association is changed, other threads calling the + * methods in this interface must see the changes as soon as the change is + * complete. + *

        + * The implementation should use non-blocking data structures to hold + * association information to avoid blocking on the methods in this interface, + * which may be frequently called from many threads. + *

        + * The simplest way to meet the above two requirements is to use concurrent + * structures such as those provided in the java.util.concurrent package, e.g., + * ConcurrentHashMap. + *

        + * For an example implementation, see: + * test/com/sleepycat/je/test/SecondaryAssociationTest.java + */ +public interface SecondaryAssociation { + + /* + * Implementation note on concurrent access and latching. + * + * When adding/removing primary/secondary DBs to an existing association, + * concurrency issues arise. This section explains how they are handled, + * assuming the rules described in the javadoc below are followed when a + * custom association is configured. + * + * There are two cases: + * + * 1) A custom association is NOT configured. An internal association is + * created that manages the one-many association between a primary and its + * secondaries. + * + * 2) A custom association IS configured. It is used internally. + * + * For case 1, no custom association, DB addition and removal take place in + * the Environment.openSecondaryDatabase and SecondaryDatabase.close + * methods, respectively. These methods, and the changes to the internal + * association, are protected by acquiring the secondary latch + * (EnvironmentImpl.getSecondaryAssociationLock) exclusively. All write + * operations that use the association -- puts and deletes -- acquire this + * latch shared. Therefore, write operations and changes to the + * association do not take place concurrently. Read operations via a + * secondary DB/cursor do not acquire this latch, so they can take place + * concurrently with changes to the association; however, read operations + * only call getPrimary and they expect it may return null at any time (the + * operation ignores the record in that case). It is impossible to close + * a primary while it is being read via a secondary, because secondaries + * must be closed before their associated primary. (And the application is + * responsible for finishing reads before closing any DB.) + * + * For case 2, a custom association, the secondary latch does not provide + * protection because modifications to the association take place in the + * application domain, not in the Environment.openSecondaryDatabase and + * SecondaryDatabase.close methods. Instead, protection is provided by the + * rules described in the javadoc here. Namely: + * + * Primary/secondary DBs are added to the association AFTER opening them + * (of course). The DBs will not be be accessed until they are added to + * the association. + * + * Primary/secondary DBs are removed from the association BEFORE closing + * them. The application is responsible for ensuring they are no longer + * accessed before they are closed. + * + * When a primary DB is added, its secondaries will be kept in sync as + * soon as writing begins, since it must be added to the association + * before writes are allowed. + * + * When a secondary DB is added, the incremental population procedure + * ensures that it is fully populated before being accessed. + */ + + /** + * Returns true if there are no secondary databases in the association. + * + * This method is used by JE to optimize for the case where a primary + * has no secondary databases. This allows a {@code SecondaryAssociation} + * to be configured on a primary database even when it has no associated + * secondaries, with no added overhead, and to allow for the possibility + * that secondaries will be added later. + *

        + * For example, when a primary database has no secondaries, no internal + * latching is performed by JE during writes. JE determines that no + * secondaries are present by calling {@code isEmpty}, prior to doing the + * write operation. + * + * @throws RuntimeException if an unexpected problem occurs. The exception + * will be thrown to the application, which should then take appropriate + * action. + */ + boolean isEmpty(); + + /** + * Returns the primary database for the given primary key. This method + * is called during read operations on secondary databases that are + * configured with this {@code SecondaryAssociation}. + * + * This method should return null when the primary database has been + * removed from the association, or when it should not be included in the + * results for secondary queries. In this case, the current operation will + * treat the secondary record as if it does not exist, i.e., it will be + * skipped over. + * + * @throws RuntimeException if an unexpected problem occurs. The exception + * will be thrown to the application, which should then take appropriate + * action. + */ + Database getPrimary(DatabaseEntry primaryKey); + + /** + * Returns the secondary databases associated with the given primary key. + * This method is called during write operations on primary databases that + * are configured with this {@code SecondaryAssociation}. + * + * @throws RuntimeException if an unexpected problem occurs. The exception + * will be thrown to the application, which should then take appropriate + * action. + */ + Collection getSecondaries(DatabaseEntry primaryKey); +} diff --git a/src/com/sleepycat/je/SecondaryConfig.java b/src/com/sleepycat/je/SecondaryConfig.java new file mode 100644 index 0000000..415c30c --- /dev/null +++ b/src/com/sleepycat/je/SecondaryConfig.java @@ -0,0 +1,668 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.List; + +import com.sleepycat.je.trigger.Trigger; +import com.sleepycat.je.utilint.DatabaseUtil; + +/** + * The configuration properties of a SecondaryDatabase extend + * those of a primary Database. The secondary database + * configuration is specified when calling {@link + * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}. + * + *

        To create a configuration object with default attributes:

        + * + *
        + *     SecondaryConfig config = new SecondaryConfig();
        + * 
        + * + *

        To set custom attributes:

        + * + *
        + *     SecondaryConfig config = new SecondaryConfig();
        + *     config.setAllowCreate(true);
        + *     config.setSortedDuplicates(true);
        + *     config.setKeyCreator(new MyKeyCreator());
        + * 
        + * + * @see Environment#openSecondaryDatabase + * Environment.openSecondaryDatabase @see SecondaryDatabase + */ +public class SecondaryConfig extends DatabaseConfig { + + /* + * For internal use, to allow null as a valid value for the config + * parameter. + */ + public static final SecondaryConfig DEFAULT = new SecondaryConfig(); + + private boolean allowPopulate; + private SecondaryKeyCreator keyCreator; + private SecondaryMultiKeyCreator multiKeyCreator; + private Database foreignKeyDatabase; + private ForeignKeyDeleteAction foreignKeyDeleteAction = + ForeignKeyDeleteAction.ABORT; + private ForeignKeyNullifier foreignKeyNullifier; + private ForeignMultiKeyNullifier foreignMultiKeyNullifier; + private boolean extractFromPrimaryKeyOnly; + private boolean immutableSecondaryKey; + + /** + * Creates an instance with the system's default settings. + */ + public SecondaryConfig() { + } + + /** + * Specifies the user-supplied object used for creating single-valued + * secondary keys. + * + *

        Unless the primary database is read-only, a key creator is required + * when opening a secondary database. Either a KeyCreator or + * MultiKeyCreator must be specified, but both may not be specified.

        + * + *

        Unless the primary database is read-only, a key creator is required + * when opening a secondary database.

        + * + *

        WARNING: Key creator instances are shared by multiple + * threads and key creator methods are called without any special + * synchronization. Therefore, key creators must be thread safe. In + * general no shared state should be used and any caching of computed + * values must be done with proper synchronization.

        + * + * @param keyCreator the user-supplied object used for creating + * single-valued secondary keys. + * + * @return this + */ + public SecondaryConfig setKeyCreator(SecondaryKeyCreator keyCreator) { + setKeyCreatorVoid(keyCreator); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setKeyCreatorVoid(SecondaryKeyCreator keyCreator) { + this.keyCreator = keyCreator; + } + + /** + * Returns the user-supplied object used for creating single-valued + * secondary keys. + * + * @return the user-supplied object used for creating single-valued + * secondary keys. + * + * @see #setKeyCreator + */ + public SecondaryKeyCreator getKeyCreator() { + return keyCreator; + } + + /** + * Specifies the user-supplied object used for creating multi-valued + * secondary keys. + * + *

        Unless the primary database is read-only, a key creator is required + * when opening a secondary database. Either a KeyCreator or + * MultiKeyCreator must be specified, but both may not be specified.

        + * + *

        WARNING: Key creator instances are shared by multiple + * threads and key creator methods are called without any special + * synchronization. Therefore, key creators must be thread safe. In + * general no shared state should be used and any caching of computed + * values must be done with proper synchronization.

        + * + * @param multiKeyCreator the user-supplied object used for creating + * multi-valued secondary keys. + * + * @return this + */ + public SecondaryConfig + setMultiKeyCreator(SecondaryMultiKeyCreator multiKeyCreator) { + + setMultiKeyCreatorVoid(multiKeyCreator); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void + setMultiKeyCreatorVoid(SecondaryMultiKeyCreator multiKeyCreator) { + + this.multiKeyCreator = multiKeyCreator; + } + + /** + * Returns the user-supplied object used for creating multi-valued + * secondary keys. + * + * @return the user-supplied object used for creating multi-valued + * secondary keys. + * + * @see #setKeyCreator + */ + public SecondaryMultiKeyCreator getMultiKeyCreator() { + return multiKeyCreator; + } + + /** + * Specifies whether automatic population of the secondary is allowed. + * + *

        If automatic population is allowed, when the secondary database is + * opened it is checked to see if it is empty. If it is empty, the primary + * database is read in its entirety and keys are added to the secondary + * database using the information read from the primary.

        + * + *

        If this property is set to true and the database is transactional, + * the population of the secondary will be done within the explicit or + * auto-commit transaction that is used to open the database.

        + * + * @param allowPopulate whether automatic population of the secondary is + * allowed. + * + * @return this + */ + public SecondaryConfig setAllowPopulate(boolean allowPopulate) { + setAllowPopulateVoid(allowPopulate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAllowPopulateVoid(boolean allowPopulate) { + this.allowPopulate = allowPopulate; + } + + /** + * Returns whether automatic population of the secondary is allowed. If + * {@link #setAllowPopulate} has not been called, this method returns + * false. + * + * @return whether automatic population of the secondary is allowed. + * + * @see #setAllowPopulate + */ + public boolean getAllowPopulate() { + return allowPopulate; + } + + /** + * Defines a foreign key integrity constraint for a given foreign key + * database. + * + *

        If this property is non-null, a record must be present in the + * specified foreign database for every record in the secondary database, + * where the secondary key value is equal to the foreign database key + * value. Whenever a record is to be added to the secondary database, the + * secondary key is used as a lookup key in the foreign database. If the + * key is not found in the foreign database, a {@link + * ForeignConstraintException} is thrown.

        + * + *

        The foreign database must not have duplicates allowed. If duplicates + * are allowed, an IllegalArgumentException will be thrown when the + * secondary database is opened.

        + * + * @param foreignKeyDatabase the database used to check the foreign key + * integrity constraint, or null if no foreign key constraint should be + * checked. + * + * @return this + */ + public SecondaryConfig setForeignKeyDatabase(Database foreignKeyDatabase) { + setForeignKeyDatabaseVoid(foreignKeyDatabase); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setForeignKeyDatabaseVoid(Database foreignKeyDatabase) { + this.foreignKeyDatabase = foreignKeyDatabase; + } + + /** + * Returns the database used to check the foreign key integrity constraint, + * or null if no foreign key constraint will be checked. + * + * @return the foreign key database, or null. + * + * @see #setForeignKeyDatabase + */ + public Database getForeignKeyDatabase() { + return foreignKeyDatabase; + } + + /** + * Specifies the action taken when a referenced record in the foreign key + * database is deleted. + * + *

        This property is ignored if the foreign key database property is + * null.

        + * + * @param foreignKeyDeleteAction the action taken when a referenced record + * in the foreign key database is deleted. + * + * @see ForeignKeyDeleteAction @see #setForeignKeyDatabase + * + * @return this + */ + public SecondaryConfig setForeignKeyDeleteAction + (ForeignKeyDeleteAction foreignKeyDeleteAction) { + + setForeignKeyDeleteActionVoid(foreignKeyDeleteAction); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setForeignKeyDeleteActionVoid + (ForeignKeyDeleteAction foreignKeyDeleteAction) { + + DatabaseUtil.checkForNullParam(foreignKeyDeleteAction, + "foreignKeyDeleteAction"); + this.foreignKeyDeleteAction = foreignKeyDeleteAction; + } + + /** + * Returns the action taken when a referenced record in the foreign key + * database is deleted. + * + * @return the action taken when a referenced record in the foreign key + * database is deleted. + * + * @see #setForeignKeyDeleteAction + */ + public ForeignKeyDeleteAction getForeignKeyDeleteAction() { + return foreignKeyDeleteAction; + } + + /** + * Specifies the user-supplied object used for setting single-valued + * foreign keys to null. + * + *

        This method may not be used along with {@link + * #setMultiKeyCreator}. When using a multi-key creator, use {@link + * #setForeignMultiKeyNullifier} instead.

        + * + *

        If the foreign key database property is non-null and the foreign key + * delete action is NULLIFY, this property is required to be + * non-null; otherwise, this property is ignored.

        + * + *

        WARNING: Key nullifier instances are shared by multiple + * threads and key nullifier methods are called without any special + * synchronization. Therefore, key creators must be thread safe. In + * general no shared state should be used and any caching of computed + * values must be done with proper synchronization.

        + * + * @param foreignKeyNullifier the user-supplied object used for setting + * single-valued foreign keys to null. + * + * @see ForeignKeyNullifier @see ForeignKeyDeleteAction#NULLIFY @see + * #setForeignKeyDatabase + * + * @return this + */ + public SecondaryConfig + setForeignKeyNullifier(ForeignKeyNullifier foreignKeyNullifier) { + + setForeignKeyNullifierVoid(foreignKeyNullifier); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void + setForeignKeyNullifierVoid(ForeignKeyNullifier foreignKeyNullifier) { + + this.foreignKeyNullifier = foreignKeyNullifier; + } + + /** + * Returns the user-supplied object used for setting single-valued foreign + * keys to null. + * + * @return the user-supplied object used for setting single-valued foreign + * keys to null. + * + * @see #setForeignKeyNullifier + */ + public ForeignKeyNullifier getForeignKeyNullifier() { + return foreignKeyNullifier; + } + + /** + * Specifies the user-supplied object used for setting multi-valued foreign + * keys to null. + * + *

        If the foreign key database property is non-null and the foreign key + * delete action is NULLIFY, this property is required to be + * non-null; otherwise, this property is ignored.

        + * + *

        WARNING: Key nullifier instances are shared by multiple + * threads and key nullifier methods are called without any special + * synchronization. Therefore, key creators must be thread safe. In + * general no shared state should be used and any caching of computed + * values must be done with proper synchronization.

        + * + * @param foreignMultiKeyNullifier the user-supplied object used for + * setting multi-valued foreign keys to null. + * + * @see ForeignMultiKeyNullifier @see ForeignKeyDeleteAction#NULLIFY @see + * #setForeignKeyDatabase + * + * @return this + */ + public SecondaryConfig setForeignMultiKeyNullifier + (ForeignMultiKeyNullifier foreignMultiKeyNullifier) { + + setForeignMultiKeyNullifierVoid(foreignMultiKeyNullifier); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setForeignMultiKeyNullifierVoid + (ForeignMultiKeyNullifier foreignMultiKeyNullifier) { + + this.foreignMultiKeyNullifier = foreignMultiKeyNullifier; + } + + /** + * Returns the user-supplied object used for setting multi-valued foreign + * keys to null. + * + * @return the user-supplied object used for setting multi-valued foreign + * keys to null. + * + * @see #setForeignMultiKeyNullifier + */ + public ForeignMultiKeyNullifier getForeignMultiKeyNullifier() { + return foreignMultiKeyNullifier; + } + + /** + * @hidden + * For internal use only. + * + * Specifies whether the key extractor/creator will only use the primary + * key. + * + *

        Specifying that only the primary key is needed can be used to + * optimize primary database updates and deletions. If a primary record is + * updated or deleted, and all associated secondaries have this property + * set to true, then the existing primary record will not be read. This + * potentially saves an I/O. When this property is not set to + * true (it is false by default), the existing primary record must be read + * if it is not already in cache, order to pass the primary data to the key + * extractor/creator.

        + * + *

        Note that if this property is true, either null or a non-null value + * may be passed to the key extractor/creator for the primary data + * parameter. The key extractor/creator is expected to ignore this + * parameter.

        + * + * @param extractFromPrimaryKeyOnly whether the key extractor/creator will + * only use the primary key. + * + * @return this + */ + public SecondaryConfig + setExtractFromPrimaryKeyOnly(boolean extractFromPrimaryKeyOnly) { + + setExtractFromPrimaryKeyOnlyVoid(extractFromPrimaryKeyOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void + setExtractFromPrimaryKeyOnlyVoid(boolean extractFromPrimaryKeyOnly) { + + this.extractFromPrimaryKeyOnly = extractFromPrimaryKeyOnly; + } + + /** + * @hidden + * For internal use only. + * + * Returns whether the key extractor/creator will only use the primary key. + * If {@link #setExtractFromPrimaryKeyOnly} has not been called, this + * method returns false. + * + * @return whether the key extractor/creator will only use the primary key. + * + * @see #setExtractFromPrimaryKeyOnly + */ + public boolean getExtractFromPrimaryKeyOnly() { + return extractFromPrimaryKeyOnly; + } + + /** + * Specifies whether the secondary key is immutable. + * + *

        Specifying that a secondary key is immutable can be used to optimize + * updates when the secondary key in a primary record will never be changed + * after that primary record is inserted. For immutable secondary keys, a + * best effort is made to avoid calling + * SecondaryKeyCreator.createSecondaryKey when a primary + * record is updated. This optimization may reduce the overhead of an + * update operation significantly if the createSecondaryKey + * operation is expensive.

        + * + *

        Be sure to set this property to true only if the secondary key in the + * primary record is never changed. If this rule is violated, the + * secondary index will become corrupted, that is, it will become out of + * sync with the primary.

        + * + * @param immutableSecondaryKey whether the secondary key is immutable. + * + * @return this + */ + public SecondaryConfig + setImmutableSecondaryKey(boolean immutableSecondaryKey) { + + setImmutableSecondaryKeyVoid(immutableSecondaryKey); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setImmutableSecondaryKeyVoid(boolean immutableSecondaryKey) { + + this.immutableSecondaryKey = immutableSecondaryKey; + } + + /** + * Returns whether the secondary key is immutable. If {@link + * #setImmutableSecondaryKey} has not been called, this method returns + * false. + * + * @return whether the secondary key is immutable. + * + * @see #setImmutableSecondaryKey + */ + public boolean getImmutableSecondaryKey() { + return immutableSecondaryKey; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public SecondaryConfig clone() { + return (SecondaryConfig) super.clone(); + } + + /** + * For JCA Database handle caching. + * + * @throws IllegalArgumentException via JEConnection.openSecondaryDatabase. + */ + @Override + void validate(DatabaseConfig configArg) + throws DatabaseException { + + super.validate(configArg); + + if (configArg == null || + !(configArg instanceof SecondaryConfig)) { + throw new IllegalArgumentException + ("The SecondaryConfig argument is null."); + } + + SecondaryConfig config = (SecondaryConfig) configArg; + + boolean kcMatch = equalOrBothNull + (config.getKeyCreator(), keyCreator); + boolean mkcMatch = equalOrBothNull + (config.getMultiKeyCreator(), multiKeyCreator); + boolean fkdMatch = + (config.getForeignKeyDatabase() == foreignKeyDatabase); + boolean fkdaMatch = + (config.getForeignKeyDeleteAction() == foreignKeyDeleteAction); + boolean fknMatch = equalOrBothNull + (config.getForeignKeyNullifier(), foreignKeyNullifier); + boolean fmknMatch = equalOrBothNull + (config.getForeignMultiKeyNullifier(), foreignMultiKeyNullifier); + boolean imskMatch = + (config.getImmutableSecondaryKey() == immutableSecondaryKey); + if (kcMatch && + mkcMatch && + fkdMatch && + fkdaMatch && + fknMatch && + fmknMatch && + imskMatch) { + return; + } + + String message = + genSecondaryConfigMismatchMessage( + config, kcMatch, mkcMatch, fkdMatch, fkdaMatch, + fknMatch, fmknMatch, imskMatch); + throw new IllegalArgumentException(message); + } + + /** + * @hidden + * For internal use only. + */ + @Override + public DatabaseConfig setTriggers(List triggers) { + + if ((triggers == null) || (triggers.size() == 0)) { + return this; + } + + throw new IllegalArgumentException + ("Triggers may only be associated with a Primary database"); + } + + /** + * @hidden + * For internal use only. + */ + @Override + public DatabaseConfig setOverrideTriggers(@SuppressWarnings("unused") + boolean override) { + throw new IllegalArgumentException + ("Triggers may only be associated with a Primary database"); + } + + private boolean equalOrBothNull(Object o1, Object o2) { + return (o1 != null) ? o1.equals(o2) : (o2 == null); + } + + String genSecondaryConfigMismatchMessage(DatabaseConfig config, + boolean kcMatch, + boolean mkcMatch, + boolean fkdMatch, + boolean fkdaMatch, + boolean fknMatch, + boolean fmknMatch, + boolean imskMatch) { + StringBuilder ret = new StringBuilder + ("The following SecondaryConfig parameters for the\n" + + "cached Database do not match the parameters for the\n" + + "requested Database:\n"); + if (!kcMatch) { + ret.append(" SecondaryKeyCreator\n"); + } + + if (!mkcMatch) { + ret.append(" SecondaryMultiKeyCreator\n"); + } + + if (!fkdMatch) { + ret.append(" ForeignKeyDelete\n"); + } + + if (!fkdaMatch) { + ret.append(" ForeignKeyDeleteAction\n"); + } + + if (!fknMatch) { + ret.append(" ForeignKeyNullifier\n"); + } + + if (!fknMatch) { + ret.append(" ForeignMultiKeyNullifier\n"); + } + + if (!imskMatch) { + ret.append(" ImmutableSecondaryKey\n"); + } + + return ret.toString(); + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "keyCreator=" + keyCreator + + "\nmultiKeyCreator=" + multiKeyCreator + + "\nallowPopulate=" + allowPopulate + + "\nforeignKeyDatabase=" + foreignKeyDatabase + + "\nforeignKeyDeleteAction=" + foreignKeyDeleteAction + + "\nforeignKeyNullifier=" + foreignKeyNullifier + + "\nforeignMultiKeyNullifier=" + foreignMultiKeyNullifier + + "\nimmutableSecondaryKey=" + immutableSecondaryKey + + "\n"; + } +} diff --git a/src/com/sleepycat/je/SecondaryConfigBeanInfo.java b/src/com/sleepycat/je/SecondaryConfigBeanInfo.java new file mode 100644 index 0000000..7c6e850 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryConfigBeanInfo.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class SecondaryConfigBeanInfo extends DatabaseConfigBeanInfo { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(SecondaryConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(SecondaryConfig.class); + } +} diff --git a/src/com/sleepycat/je/SecondaryConstraintException.java b/src/com/sleepycat/je/SecondaryConstraintException.java new file mode 100644 index 0000000..bf4784a --- /dev/null +++ b/src/com/sleepycat/je/SecondaryConstraintException.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Base class for exceptions thrown when a write operation fails because of a + * secondary constraint. See subclasses for more information. + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + * @see Special considerations + * for using Secondary Databases with and without Transactions + * + * @since 4.0 + */ +public abstract class SecondaryConstraintException + extends SecondaryReferenceException { + + private static final long serialVersionUID = 1L; + + /** + * For internal use only. + * @hidden + */ + public SecondaryConstraintException(Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, message, secDbName, secKey, priKey, expirationTime); + } + + /** + * For internal use only. + * @hidden + */ + SecondaryConstraintException(String message, + SecondaryReferenceException cause) { + super(message, cause); + } +} diff --git a/src/com/sleepycat/je/SecondaryCursor.java b/src/com/sleepycat/je/SecondaryCursor.java new file mode 100644 index 0000000..9138079 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryCursor.java @@ -0,0 +1,1796 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.HashSet; +import java.util.Set; +import java.util.logging.Level; + +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.utilint.DatabaseUtil; + +/** + * A database cursor for a secondary database. Cursors are not thread safe and + * the application is responsible for coordinating any multithreaded access to + * a single cursor object. + * + *

        Secondary cursors are returned by {@link SecondaryDatabase#openCursor + * SecondaryDatabase.openCursor} and {@link + * SecondaryDatabase#openSecondaryCursor + * SecondaryDatabase.openSecondaryCursor}. The distinguishing characteristics + * of a secondary cursor are:

        + * + *
        • Direct calls to put() methods on a secondary cursor + * are prohibited. + * + *
        • The {@link #delete} method of a secondary cursor will delete the primary + * record and as well as all its associated secondary records. + * + *
        • Calls to all get methods will return the data from the associated + * primary database. + * + *
        • Additional get method signatures are provided to return the primary key + * in an additional pKey parameter. + * + *
        • Calls to {@link #dup} will return a {@link SecondaryCursor}. + * + *
        + * + *

        To obtain a secondary cursor with default attributes:

        + * + *
        + *     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, null);
        + * 
        + * + *

        To customize the attributes of a cursor, use a CursorConfig object.

        + * + *
        + *     CursorConfig config = new CursorConfig();
        + *     config.setReadUncommitted(true);
        + *     SecondaryCursor cursor = myDb.openSecondaryCursor(txn, config);
        + * 
        + */ +public class SecondaryCursor extends Cursor { + + private final SecondaryDatabase secondaryDb; + + /** + * Cursor constructor. Not public. To get a cursor, the user should call + * SecondaryDatabase.cursor(); + */ + SecondaryCursor(final SecondaryDatabase dbHandle, + final Transaction txn, + final CursorConfig cursorConfig) { + super(dbHandle, txn, cursorConfig); + secondaryDb = dbHandle; + } + + /** + * Cursor constructor. Not public. To get a cursor, the user should call + * SecondaryDatabase.cursor(); + */ + SecondaryCursor(final SecondaryDatabase dbHandle, + final Locker locker, + final CursorConfig cursorConfig) { + super(dbHandle, locker, cursorConfig); + secondaryDb = dbHandle; + } + + /** + * Copy constructor. + */ + private SecondaryCursor(final SecondaryCursor cursor, + final boolean samePosition) { + super(cursor, samePosition); + secondaryDb = cursor.secondaryDb; + } + + boolean isSecondaryCursor() { + return true; + } + + /** + * Returns the Database handle associated with this Cursor. + * + * @return The Database handle associated with this Cursor. + */ + @Override + public SecondaryDatabase getDatabase() { + return secondaryDb; + } + + /** + * Returns the primary {@link com.sleepycat.je.Database Database} + * associated with this cursor. + * + *

        Calling this method is the equivalent of the following + * expression:

        + * + *
        +     *         getDatabase().getPrimaryDatabase()
        +     * 
        + * + * @return The primary {@link com.sleepycat.je.Database Database} + * associated with this cursor. + */ + + /* + * To be added when SecondaryAssociation is published: + * If a {@link SecondaryAssociation} is {@link + * SecondaryCursor#setSecondaryAssociation configured}, this method returns + * null. + */ + public Database getPrimaryDatabase() { + return secondaryDb.getPrimaryDatabase(); + } + + /** + * Returns a new SecondaryCursor for the same transaction as + * the original cursor. + * + * + */ + @Override + public SecondaryCursor dup(final boolean samePosition) { + checkOpenAndState(false); + return new SecondaryCursor(this, samePosition); + } + + /** + * Returns a new copy of the cursor as a SecondaryCursor. + * + *

        Calling this method is the equivalent of calling {@link #dup} and + * casting the result to {@link SecondaryCursor}.

        + * + * @see #dup + * + * @deprecated As of JE 4.0.13, replaced by {@link Cursor#dup}.

        + */ + public SecondaryCursor dupSecondary(final boolean samePosition) { + return dup(samePosition); + } + + /** + * Delete the record to which the cursor refers from the primary database + * and all secondary indices. + * + *

        This method behaves as if {@link Database#delete(Transaction, + * DatabaseEntry, WriteOptions)} were called for the primary database, + * using the primary key associated with this cursor position.

        + * + *

        The cursor position is unchanged after a delete, and subsequent calls + * to cursor functions expecting the cursor to refer to an existing record + * will fail.

        + * + *

        WARNING: Unlike read operations using a SecondaryCursor, write + * operations like this one are deadlock-prone.

        + * + * + */ + @Override + public OperationResult delete(final WriteOptions options) { + + checkOpenAndState(true); + + trace(Level.FINEST, "SecondaryCursor.delete: ", null); + + final CacheMode cacheMode = + options != null ? options.getCacheMode() : null; + + /* Read the primary key (the data of a secondary). */ + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry pKey = new DatabaseEntry(); + + /* + * Currently we write-lock the secondary before deleting the primary, + * which reverses the normal locking order and is deadlock-prone. + * + * FUTURE: To avoid deadlocks we could use dirty-read-all here, and + * then perform a special delete-if-has-secondary-key operation on the + * primary. We must be careful not to delete the primary record if, + * after locking it, it does not reference this secondary key. + */ + final OperationResult secResult = + getCurrentInternal(key, pKey, LockMode.RMW, cacheMode); + + if (secResult == null) { + return null; + } + + final Locker locker = cursorImpl.getLocker(); + final Database primaryDb = secondaryDb.getPrimary(pKey); + + if (primaryDb == null) { + /* Primary was removed from the association. */ + deleteNoNotify(cacheMode, getDatabaseImpl().getRepContext()); + return secResult; + } + + /* Delete the primary and all secondaries (including this one). */ + final OperationResult priResult = + primaryDb.deleteInternal(locker, pKey, cacheMode); + + if (priResult != null) { + return priResult; + } + + /* The primary record may have expired after locking the secondary. */ + if (cursorImpl.isProbablyExpired()) { + return null; + } + + throw secondaryDb.secondaryRefersToMissingPrimaryKey( + locker, key, pKey, secResult.getExpirationTime()); + } + + /** + * Delete the record to which the cursor refers from the primary database + * and all secondary indices. + * + *

        This method behaves as if {@link Database#delete(Transaction, + * DatabaseEntry, WriteOptions)} were called for the primary database, + * using the primary key associated with this cursor position.

        + * + *

        The cursor position is unchanged after a delete, and subsequent calls + * to cursor functions expecting the cursor to refer to an existing record + * will fail.

        + * + *

        Calling this method is equivalent to calling {@link + * #delete(WriteOptions)}.

        + * + * + */ + @Override + public OperationStatus delete() { + final OperationResult result = delete(null); + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * This operation is not allowed on a secondary cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary cursor should be used instead. + */ + @Override + public OperationResult put( + DatabaseEntry key, + DatabaseEntry data, + Put putType, + WriteOptions options) { + + throw SecondaryDatabase.notAllowedException(); + } + + /** + * This operation is not allowed on a secondary cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary cursor should be used instead. + */ + @Override + public OperationStatus put(final DatabaseEntry key, + final DatabaseEntry data) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * This operation is not allowed on a secondary cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary cursor should be used instead. + */ + @Override + public OperationStatus putNoOverwrite(final DatabaseEntry key, + final DatabaseEntry data) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * This operation is not allowed on a secondary cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary cursor should be used instead. + */ + @Override + public OperationStatus putNoDupData(final DatabaseEntry key, + final DatabaseEntry data) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * This operation is not allowed on a secondary cursor. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary cursor should be used instead. + */ + @Override + public OperationStatus putCurrent(final DatabaseEntry data) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * Moves the cursor to a record according to the specified {@link Get} + * type. + * + *

        The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + * In addition, two operations are not supported by this method: + * {@link Get#SEARCH_BOTH} and {@link Get#SEARCH_BOTH_GTE}.

        + */ + @Override + public OperationResult get( + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + final ReadOptions options) { + + return get(key, null, data, getType, options); + } + + /** + * Moves the cursor to a record according to the specified {@link Get} + * type. + * + *

        If the operation succeeds, the record at the resulting cursor + * position will be locked according to the {@link + * ReadOptions#getLockMode() lock mode} specified, the key, primary key, + * and/or data will be returned via the (non-null) DatabaseEntry + * parameters, and a non-null OperationResult will be returned. If the + * operation fails because the record requested is not found, null is + * returned.

        + * + *

        The following table lists each allowed operation and whether the key, + * pKey and data parameters are input + * or output parameters. Also specified is whether the cursor must be + * initialized (positioned on a record) before calling this method. See the + * individual {@link Get} operations for more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Get operationDescription'key' parameter'pKey' parameter'data' parameterCursor position
        must be initialized?
        {@link Get#SEARCH}Searches using an exact match by key.inputoutputoutputno
        {@link Get#SEARCH_BOTH}Searches using an exact match by key and pKey.inputinputoutputno
        {@link Get#SEARCH_GTE}Searches using a GTE match by key.input/outputoutputoutputno
        {@link Get#SEARCH_BOTH_GTE}Searches using an exact match by key and a GTE match by pKey.inputinput/outputoutputno
        {@link Get#CURRENT}Accesses the current recordoutputoutputoutputyes
        {@link Get#FIRST}Finds the first record in the database.outputoutputoutputno
        {@link Get#LAST}Finds the last record in the database.outputoutputoutputno
        {@link Get#NEXT}Moves to the next record.outputoutputoutputno**
        {@link Get#NEXT_DUP}Moves to the next record with the same key.outputoutputoutputyes
        {@link Get#NEXT_NO_DUP}Moves to the next record with a different key.outputoutputoutputno**
        {@link Get#PREV}Moves to the previous record.outputoutputoutputno**
        {@link Get#PREV_DUP}Moves to the previous record with the same key.outputoutputoutputyes
        {@link Get#PREV_NO_DUP}Moves to the previous record with a different key.outputoutputoutputno**
        + * + *

        ** - For these 'next' and 'previous' operations the cursor may be + * uninitialized, in which case the cursor will be moved to the first or + * last record, respectively.

        + * + * @param key the secondary key input or output parameter, depending on + * getType. + * + * @param pKey the primary key input or output parameter, depending on + * getType. + * + * @param data the primary data output parameter. + * + * @param getType the Get operation type. May not be null. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the OperationResult if the record requested is found, else null. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * the cursor is uninitialized (not positioned on a record) and this is not + * permitted (see above), or the non-transactional cursor was created in a + * different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null getType, a null input key/pKey parameter, + * an input key/pKey parameter with a null data array, a partial key/pKey + * input parameter, and specifying a {@link ReadOptions#getLockMode() + * lock mode} of READ_COMMITTED. + * + * @since 7.0 + */ + public OperationResult get( + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Get getType, + ReadOptions options) { + + try { + checkOpen(); + + if (options == null) { + options = DEFAULT_READ_OPTIONS; + } + + final LockMode lockMode = options.getLockMode(); + + trace( + Level.FINEST, "SecondaryCursor.get: ", String.valueOf(getType), + key, data, lockMode); + + return getInternal( + key, pKey, data, getType, options, lockMode); + + } catch (Error E) { + getDatabaseImpl().getEnv().invalidate(E); + throw E; + } + } + + /** + * Performs the get() operation except for state checking and tracing. + * + * The LockMode is passed because for Database operations it is sometimes + * different than ReadOptions.getLockMode. + * + * Allows passing a throughput stat index so it can be called for Database + * and SecondaryCursor operations. + */ + OperationResult getInternal( + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + Get getType, + final ReadOptions options, + final LockMode lockMode) { + + DatabaseUtil.checkForNullParam(getType, "getType"); + + if (data == null) { + data = NO_RETURN_DATA; + } + + final CacheMode cacheMode = options.getCacheMode(); + final SearchMode searchMode = getType.getSearchMode(); + + if (searchMode != null) { + checkState(false /*mustBeInitialized*/); + + DatabaseUtil.checkForNullDbt(key, "key", true); + DatabaseUtil.checkForPartial(key, "key"); + + if (searchMode.isDataSearch()) { + DatabaseUtil.checkForNullDbt(pKey, "pKey", true); + DatabaseUtil.checkForPartial(pKey, "pKey"); + } else { + if (pKey == null) { + pKey = new DatabaseEntry(); + } + } + + return search(key, pKey, data, lockMode, cacheMode, searchMode); + } + + if (key == null) { + key = NO_RETURN_DATA; + } + if (pKey == null) { + pKey = new DatabaseEntry(); + } + + GetMode getMode = getType.getGetMode(); + + if (getType.getAllowNextPrevUninitialized() && + cursorImpl.isNotInitialized()) { + + assert getMode != null; + getType = getMode.isForward() ? Get.FIRST : Get.LAST; + getMode = null; + } + + if (getMode != null) { + checkState(true /*mustBeInitialized*/); + + return retrieveNext( + key, pKey, data, lockMode, cacheMode, getMode, + getLockPrimaryOnly(lockMode, data)); + } + + if (getType == Get.CURRENT) { + checkState(true /*mustBeInitialized*/); + + return getCurrentInternal(key, pKey, data, lockMode, cacheMode); + } + + assert getType == Get.FIRST || getType == Get.LAST; + checkState(false /*mustBeInitialized*/); + + return position( + key, pKey, data, lockMode, cacheMode, getType == Get.FIRST, + getLockPrimaryOnly(lockMode, data)); + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getCurrent(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getCurrent(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Returns the key/data pair to which the cursor refers. + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#KEYEMPTY + * OperationStatus.KEYEMPTY} if the key/pair at the cursor position has + * been deleted; otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getCurrent(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.CURRENT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.KEYEMPTY : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getFirst(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getFirst(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the first key/data pair of the database, and return + * that pair. If the first key has duplicate values, the first data item + * in the set of duplicates is returned. + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getFirst(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.FIRST, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getLast(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getLast(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the last key/data pair of the database, and return + * that pair. If the last key has duplicate values, the last data item in + * the set of duplicates is returned. + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getLast(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.LAST, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getNext(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the next key/data pair and return that pair. If the + * matching key has duplicate values, the first data item in the set of + * duplicates is returned. + * + *

        If the cursor is not yet initialized, move the cursor to the first + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the next key/data pair of the database, and that pair + * is returned. In the presence of duplicate key values, the value of the + * key may not change.

        + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getNext(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.NEXT, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getNextDup(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getNextDup(key, new DatabaseEntry(), data, lockMode); + } + + /** + * If the next key/data pair of the database is a duplicate data record for + * the current key/data pair, move the cursor to the next key/data pair of + * the database and return that pair. + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getNextDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.NEXT_DUP, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getNextNoDup(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getNextNoDup(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the next non-duplicate key/data pair and return that + * pair. If the matching key has duplicate values, the first data item in + * the set of duplicates is returned. + * + *

        If the cursor is not yet initialized, move the cursor to the first + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the next non-duplicate key of the database, and that + * key/data pair is returned.

        + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getNextNoDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.NEXT_NO_DUP, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getPrev(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getPrev(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the previous key/data pair and return that pair. If + * the matching key has duplicate values, the last data item in the set of + * duplicates is returned. + * + *

        If the cursor is not yet initialized, move the cursor to the last + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the previous key/data pair of the database, and that + * pair is returned. In the presence of duplicate key values, the value of + * the key may not change.

        + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getPrev(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.PREV, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getPrevDup(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getPrevDup(key, new DatabaseEntry(), data, lockMode); + } + + /** + * If the previous key/data pair of the database is a duplicate data record + * for the current key/data pair, move the cursor to the previous key/data + * pair of the database and return that pair. + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the cursor is uninitialized (not positioned on a record), or the + * non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getPrevDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.PREV_DUP, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getPrevNoDup(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getPrevNoDup(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the previous non-duplicate key/data pair and return + * that pair. If the matching key has duplicate values, the last data item + * in the set of duplicates is returned. + * + *

        If the cursor is not yet initialized, move the cursor to the last + * key/data pair of the database, and return that pair. Otherwise, the + * cursor is moved to the previous non-duplicate key of the database, and + * that key/data pair is returned.

        + * + * @param key the secondary key returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getPrevNoDup(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.PREV_NO_DUP, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getSearchKey(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getSearchKey(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the given key of the database, and return the datum + * associated with the given key. If the matching key has duplicate + * values, the first data item in the set of duplicates is returned. + * + * @param key the secondary key used as input. It must be initialized with + * a non-null byte array by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getSearchKey(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * {@inheritDoc} + * + * The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data. + */ + @Override + public OperationStatus getSearchKeyRange(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return getSearchKeyRange(key, new DatabaseEntry(), data, lockMode); + } + + /** + * Move the cursor to the closest matching key of the database, and return + * the data item associated with the matching key. If the matching key has + * duplicate values, the first data item in the set of duplicates is + * returned. + * + *

        The returned key/data pair is for the smallest key greater than or + * equal to the specified key (as determined by the key comparison + * function), permitting partial key matches and range searches.

        + * + * @param key the secondary key used as input and returned as output. It + * must be initialized with a non-null byte array by the caller. + * + * @param pKey the primary key returned as output. Its byte array does not + * need to be initialized by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * A partial data item may be + * specified to optimize for key only or partial data retrieval. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getSearchKeyRange(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.SEARCH_GTE, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * This operation is not allowed with this method signature. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method with the pKey parameter should be + * used instead. + */ + @Override + public OperationStatus getSearchBoth(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * Move the cursor to the specified secondary and primary key, where both + * the primary and secondary key items must match. + * + * @param key the secondary key used as input. It must be initialized with + * a non-null byte array by the caller. + * + * @param pKey the primary key used as input. It must be initialized with + * a non-null byte array by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getSearchBoth(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.SEARCH_BOTH, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * This operation is not allowed with this method signature. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method with the pKey parameter should be + * used instead. + */ + @Override + public OperationStatus getSearchBothRange(final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + throw SecondaryDatabase.notAllowedException(); + } + + /** + * Move the cursor to the specified secondary key and closest matching + * primary key of the database. + * + *

        In the case of any database supporting sorted duplicate sets, the + * returned key/data pair is for the smallest primary key greater than or + * equal to the specified primary key (as determined by the key comparison + * function), permitting partial matches and range searches in duplicate + * data sets.

        + * + * @param key the secondary key used as input. It must be initialized with + * a non-null byte array by the caller. + * + * @param pKey the primary key used as input and returned as output. It + * must be initialized with a non-null byte array by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. {@link LockMode#READ_COMMITTED} is not allowed. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the cursor or database has been closed, + * or the non-transactional cursor was created in a different thread. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, if a DatabaseEntry parameter is null or does not contain a + * required non-null byte array. + */ + public OperationStatus getSearchBothRange(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode) { + final OperationResult result = get( + key, pKey, data, Get.SEARCH_BOTH_GTE, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Returns the current key and data. + * + * When a secondary key is found, but the primary cannot be read for one of + * the following reasons, this method returns KEYEMPTY. + * + * 1) lock mode is read-uncommitted and the primary record was deleted in + * the middle of the operation + * + * 2) the primary DB has been removed from the SecondaryAssocation + */ + private OperationResult getCurrentInternal(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode) { + final boolean lockPrimaryOnly = getLockPrimaryOnly(lockMode, data); + + final LockMode searchLockMode = + lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + + final OperationResult result = getCurrentInternal( + key, pKey, searchLockMode, cacheMode); + + if (result == null) { + return null; + } + + return readPrimaryAfterGet( + key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), + lockPrimaryOnly, result); + } + + /** + * Calls search() and retrieves primary data. + * + * When the primary record cannot be read (see readPrimaryAfterGet), + * advance over the unavailable record, according to the search type. + */ + OperationResult search(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final SearchMode searchMode) { + + final boolean lockPrimaryOnly = getLockPrimaryOnly(lockMode, data); + + final LockMode searchLockMode = + lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + + final OperationResult result1 = search( + key, pKey, searchLockMode, cacheMode, searchMode, true); + if (result1 == null) { + return null; + } + + final OperationResult result2 = readPrimaryAfterGet( + key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), + lockPrimaryOnly, result1); + + if (result2 != null) { + return result2; + } + + /* Advance over the unavailable record. */ + switch (searchMode) { + case BOTH: + /* Exact search on sec and pri key. */ + return null; + case SET: + case BOTH_RANGE: + /* Find exact sec key and next primary key. */ + return retrieveNext( + key, pKey, data, lockMode, cacheMode, GetMode.NEXT_DUP, + lockPrimaryOnly); + case SET_RANGE: + /* Find next sec key or primary key. */ + return retrieveNext( + key, pKey, data, lockMode, cacheMode, GetMode.NEXT, + lockPrimaryOnly); + default: + throw EnvironmentFailureException.unexpectedState(); + } + } + + /** + * Calls position() and retrieves primary data. + * + * When the primary record cannot be read (see readPrimaryAfterGet), + * advance over the unavailable record. + */ + private OperationResult position(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final boolean first, + final boolean lockPrimaryOnly) { + + final LockMode searchLockMode = + lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + + final OperationResult result1 = + position(key, pKey, searchLockMode, cacheMode, first); + + if (result1 == null) { + return null; + } + + final OperationResult result2 = readPrimaryAfterGet( + key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), + lockPrimaryOnly, result1); + + if (result2 != null) { + return result2; + } + + /* Advance over the unavailable record. */ + return retrieveNext( + key, pKey, data, lockMode, cacheMode, + first ? GetMode.NEXT : GetMode.PREV, lockPrimaryOnly); + } + + /** + * Calls retrieveNext() and retrieves primary data. + * + * When the primary record cannot be read (see readPrimaryAfterGet), + * advance over the unavailable record. + */ + private OperationResult retrieveNext( + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode, + final CacheMode cacheMode, + final GetMode getMode, + final boolean lockPrimaryOnly) { + + final LockMode searchLockMode = + lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + + while (true) { + final OperationResult result1 = retrieveNext( + key, pKey, searchLockMode, cacheMode, getMode); + + if (result1 == null) { + return null; + } + + final OperationResult result2 = readPrimaryAfterGet( + key, pKey, data, lockMode, + isReadUncommittedMode(searchLockMode), + lockPrimaryOnly, result1); + + if (result2 != null) { + return result2; + } + + /* Continue loop to advance over the unavailable record. */ + } + } + + /** + * Returns whether to use dirty-read for the secondary read and rely on + * the primary record lock alone. + * + * False is returned in the following cases, and true otherwise. + * + * + When the user specifies dirty-read, since there is no locking. + * + * + For serializable isolation because this would likely require other + * changes to the serializable algorithms. Currently we live with the + * fact that secondary access with serializable isolation is deadlock + * prone. + * + * + When the primary data is not requested we must lock the secondary + * because we do not read or lock the primary. + */ + private boolean getLockPrimaryOnly(final LockMode lockMode, + final DatabaseEntry data) { + + final boolean dataRequested = + data != null && + (!data.getPartial() || data.getPartialLength() != 0); + + return dataRequested && + !isSerializableIsolation(lockMode) && + !isReadUncommittedMode(lockMode); + } + + /** + * Reads the primary record associated with a secondary record. + * + * An approach is used for secondary DB access that avoids deadlocks that + * would occur if locks were acquired on primary and secondary DBs in + * different orders for different operations. The primary DB lock must + * always be acquired first when doing a write op; for example, when + * deleting a primary record, we don't know what the secondary keys are + * until we read (and lock) the primary record. However, the natural way + * to read via a secondary DB would be to read (and lock) the secondary + * record first to obtain the primary key, and then read (and lock) the + * primary record. Because this would obtain locks in the reverse order as + * write ops, a different approach is used for secondary reads. + * + * In order to avoid deadlocks, for non-serializable isolation we change + * the natural lock order for reads -- we only lock the primary record and + * then check the secondary record's reference to primary record. The + * initial read of the secondary DB is performed without acquiring locks + * (dirty-read). The primary key is then used to read and lock the + * associated primary record. At this point only the primary record is + * locked. + * + * Then, the secondary reference is checked (see checkReferenceToPrimary in + * Cursor). Note that there is no need to lock the secondary before + * checking its reference to the primary, because during the check the + * secondary is protected from changes by the lock on the primary. If we + * discover that the secondary record has been deleted (for example, due to + * an update to the primary after the secondary dirty-read and before the + * primary locking read), the record will not be returned to the caller (it + * will be skipped) and we will advance to the next record according to the + * operation type. In this case the lock on the primary record is released. + * + * In addition, the READ_UNCOMMITTED_ALL mode is used for the dirty-read + * of the secondary DB. This ensures that we do not skip uncommitted + * deleted records. See LockMode.READ_UNCOMMITTED_ALL and + * Cursor.readPrimaryAfterGet for further details. + * + * For a secondary DB with dups, READ_UNCOMMITTED_ALL will return a deleted + * record for an open txn, and we'll discover the deletion when reading + * (and locking) the primary record. The primary lookup is wasted in that + * case, but this should be infrequent. For a secondary DB without dups, + * READ_UNCOMMITTED_ALL will block during the secondary read in this case + * (a deleted record for an open txn) in order to obtain the data (the + * primary key). + * + * @return null if the primary record has been deleted or updated (when + * using read-uncommitted), or the primary database has been removed from + * the association. Otherwise, returns the result that should be returned + * to the API caller, which may or may not be origResult (see below). + */ + private OperationResult readPrimaryAfterGet( + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final LockMode lockMode, + final boolean secDirtyRead, + final boolean lockPrimaryOnly, + final OperationResult origResult) { + + final Database primaryDb = secondaryDb.getPrimary(pKey); + if (primaryDb == null) { + /* Primary was removed from the association. */ + return null; + } + + if (!readPrimaryAfterGet( + primaryDb, key, pKey, data, lockMode, secDirtyRead, + lockPrimaryOnly, false /*verifyPrimary*/, + cursorImpl.getLocker() /*locker*/, secondaryDb, null)) { + return null; + } + + if (!secDirtyRead) { + return origResult; + } + + /* + * The expiration time may have changed after the secondary dirty-read + * and before locking the primary. + */ + return DbInternal.makeResult(cursorImpl.getExpirationTime()); + } + + /** + * @see Cursor#checkForPrimaryUpdate + */ + @Override + boolean checkForPrimaryUpdate(final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data) { + + final SecondaryConfig conf = secondaryDb.getPrivateSecondaryConfig(); + boolean possibleIntegrityError = false; + + /* + * If the secondary key is immutable, or the key creators are + * null (the database is read only), then we can skip this + * check. + */ + if (conf.getImmutableSecondaryKey()) { + /* Do nothing. */ + } else if (conf.getKeyCreator() != null) { + + /* + * Check that the key we're using is equal to the key + * returned by the key creator. + */ + final DatabaseEntry secKey = new DatabaseEntry(); + if (!conf.getKeyCreator().createSecondaryKey + (secondaryDb, pKey, data, secKey) || + !secKey.equals(key)) { + possibleIntegrityError = true; + } + } else if (conf.getMultiKeyCreator() != null) { + + /* + * Check that the key we're using is in the set returned by + * the key creator. + */ + final Set results = new HashSet(); + conf.getMultiKeyCreator().createSecondaryKeys + (secondaryDb, pKey, data, results); + if (!results.contains(key)) { + possibleIntegrityError = true; + } + } + + return possibleIntegrityError; + } +} diff --git a/src/com/sleepycat/je/SecondaryDatabase.java b/src/com/sleepycat/je/SecondaryDatabase.java new file mode 100644 index 0000000..ac06bfb --- /dev/null +++ b/src/com/sleepycat/je/SecondaryDatabase.java @@ -0,0 +1,1799 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.logging.Level; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.ExpirationInfo; +import com.sleepycat.je.dbi.GetMode; +import com.sleepycat.je.dbi.PutMode; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * A secondary database handle. + * + *

        Secondary databases are opened with {@link + * Environment#openSecondaryDatabase Environment.openSecondaryDatabase} and are + * always associated with a single primary database. The distinguishing + * characteristics of a secondary database are:

        + * + *
          + *
        • Records are automatically added to a secondary database when records are + * added, modified and deleted in the primary database. Direct calls to + * put() methods on a secondary database are prohibited.
        • + *
        • The {@link #delete delete} method of a secondary database will delete + * the primary record and as well as all its associated secondary records.
        • + *
        • Calls to all get() methods will return the data from the + * associated primary database.
        • + *
        • Additional get() method signatures are provided to return + * the primary key in an additional pKey parameter.
        • + *
        • Calls to {@link #openCursor openCursor} will return a {@link + * SecondaryCursor}, which itself has get() methods that return + * the data of the primary database and additional get() method + * signatures for returning the primary key.
        • + *
        + *

        Before opening or creating a secondary database you must implement + * the {@link SecondaryKeyCreator} or {@link SecondaryMultiKeyCreator} + * interface.

        + * + *

        For example, to create a secondary database that supports duplicates:

        + * + *
        + *     Database primaryDb; // The primary database must already be open.
        + *     SecondaryKeyCreator keyCreator; // Your key creator implementation.
        + *     SecondaryConfig secConfig = new SecondaryConfig();
        + *     secConfig.setAllowCreate(true);
        + *     secConfig.setSortedDuplicates(true);
        + *     secConfig.setKeyCreator(keyCreator);
        + *     SecondaryDatabase newDb = env.openSecondaryDatabase(transaction,
        + *                                                         "myDatabaseName",
        + *                                                         primaryDb,
        + *                                                         secConfig)
        + * 
        + * + *

        If a primary database is to be associated with one or more secondary + * databases, it may not be configured for duplicates.

        + * + *

        WARNING: The associations between primary and secondary databases + * are not stored persistently. Whenever a primary database is opened for + * write access by the application, the appropriate associated secondary + * databases should also be opened by the application. This is necessary to + * ensure data integrity when changes are made to the primary database. If the + * secondary database is not opened, it will not be updated when the primary is + * updated, and the references between the databases will become invalid. + * (Note that this warning does not apply when using the {@link + * com.sleepycat.persist DPL}, which does store secondary relationships + * persistently.)

        + * + *

        Special considerations for using Secondary + * Databases with and without Transactions

        + * + *

        Normally, during a primary database write operation (insert, update or + * delete), all associated secondary databases are also updated. However, when + * an exception occurs during the write operation, the updates may be + * incomplete. If the databases are transactional, this is handled by aborting + * the transaction to undo the incomplete operation. If an auto-commit + * transaction is used (null is passed for the transaction), the transaction + * will be aborted automatically. If an explicit transaction is used, it + * must be aborted by the application caller after the exception is caught.

        + * + *

        However, if the databases are non-transactional, integrity problems can + * result when an exception occurs during the write operation. Because the + * write operation is not made atomic by a transaction, references between the + * databases will become invalid if the operation is incomplete. This results + * in a {@link SecondaryIntegrityException} when attempting to access the + * databases later.

        + * + *

        A secondary integrity problem is persistent; it cannot be resolved by + * reopening the databases or the environment. The only way to resolve the + * problem is to restore the environment from a valid backup, or, if the + * integrity of the primary database is assumed, to remove and recreate all + * secondary databases.

        + * + *

        Therefore, secondary databases and indexes should always be used in + * conjunction with transactional databases and stores. Without transactions, + * it is the responsibility of the application to handle the results of the + * incomplete write operation or to take steps to prevent this situation from + * happening in the first place.

        + * + *

        The following exceptions may be thrown during a write operation, and may + * cause an integrity problem in the absence of transactions.

        + *
          + *
        • {@link SecondaryConstraintException}, see its subclasses for more + * information.
        • + *
        • {@link LockConflictException}, when more than one thread is accessing + * the databases.
        • + *
        • {@link EnvironmentFailureException}, if an unexpected or system failure + * occurs.
        • + *
        • There is always the possibility of an {@link Error} or an unintended + * {@link RuntimeException}.
        • + *
        + */ +public class SecondaryDatabase extends Database { + + /* For type-safe check against EMPTY_SET */ + private static final Set EMPTY_SET = + Collections.emptySet(); + + private final Database primaryDatabase; // May be null. + private SecondaryConfig secondaryConfig; + private volatile boolean isFullyPopulated = true; + + /** + * Creates a secondary database but does not open or fully initialize it. + * + * @throws IllegalArgumentException via Environment.openSecondaryDatabase. + */ + SecondaryDatabase(final Environment env, + final SecondaryConfig secConfig, + final Database primaryDatabase) { + + super(env); + this.primaryDatabase = primaryDatabase; + if (primaryDatabase == null) { + if (secConfig.getSecondaryAssociation() == null) { + throw new IllegalArgumentException( + "Exactly one must be non-null: " + + "PrimaryDatabase or SecondaryAssociation"); + } + if (secConfig.getAllowPopulate()) { + throw new IllegalArgumentException( + "AllowPopulate must be false when a SecondaryAssociation" + + " is configured"); + } + } else { + if (secConfig.getSecondaryAssociation() != null) { + throw new IllegalArgumentException( + "Exactly one must be non-null: " + + "PrimaryDatabase or SecondaryAssociation"); + } + primaryDatabase.checkOpen(); + if (primaryDatabase.configuration.getSortedDuplicates()) { + throw new IllegalArgumentException( + "Duplicates not allowed for a primary database: " + + primaryDatabase.getDebugName()); + } + if (env.getNonNullEnvImpl() != + primaryDatabase.getEnvironment().getNonNullEnvImpl()) { + throw new IllegalArgumentException( + "Primary and secondary databases must be in the same" + + " environment"); + } + if (!primaryDatabase.configuration.getReadOnly() && + secConfig.getKeyCreator() == null && + secConfig.getMultiKeyCreator() == null) { + throw new IllegalArgumentException( + "SecondaryConfig.getKeyCreator()/getMultiKeyCreator()" + + " may be null only if the primary database is read-only"); + } + } + if (secConfig.getKeyCreator() != null && + secConfig.getMultiKeyCreator() != null) { + throw new IllegalArgumentException( + "secConfig.getKeyCreator() and getMultiKeyCreator() may not" + + " both be non-null"); + } + if (secConfig.getForeignKeyNullifier() != null && + secConfig.getForeignMultiKeyNullifier() != null) { + throw new IllegalArgumentException( + "secConfig.getForeignKeyNullifier() and" + + " getForeignMultiKeyNullifier() may not both be non-null"); + } + if (secConfig.getForeignKeyDeleteAction() == + ForeignKeyDeleteAction.NULLIFY && + secConfig.getForeignKeyNullifier() == null && + secConfig.getForeignMultiKeyNullifier() == null) { + throw new IllegalArgumentException( + "ForeignKeyNullifier or ForeignMultiKeyNullifier must be" + + " non-null when ForeignKeyDeleteAction is NULLIFY"); + } + if (secConfig.getForeignKeyNullifier() != null && + secConfig.getMultiKeyCreator() != null) { + throw new IllegalArgumentException( + "ForeignKeyNullifier may not be used with" + + " SecondaryMultiKeyCreator -- use" + + " ForeignMultiKeyNullifier instead"); + } + if (secConfig.getForeignKeyDatabase() != null) { + Database foreignDb = secConfig.getForeignKeyDatabase(); + if (foreignDb.getDbImpl().getSortedDuplicates()) { + throw new IllegalArgumentException( + "Duplicates must not be allowed for a foreign key " + + " database: " + foreignDb.getDebugName()); + } + } + } + + /** + * Create a database, called by Environment + */ + @Override + DatabaseImpl initNew(final Environment env, + final Locker locker, + final String databaseName, + final DatabaseConfig dbConfig) { + + final DatabaseImpl dbImpl = + super.initNew(env, locker, databaseName, dbConfig); + init(locker); + return dbImpl; + } + + /** + * Open a database, called by Environment + * + * @throws IllegalArgumentException via Environment.openSecondaryDatabase. + */ + @Override + void initExisting(final Environment env, + final Locker locker, + final DatabaseImpl database, + final String databaseName, + final DatabaseConfig dbConfig) { + + /* Disallow one secondary associated with two different primaries. */ + if (primaryDatabase != null) { + Database otherPriDb = database.findPrimaryDatabase(); + if (otherPriDb != null && + otherPriDb.getDbImpl() != + primaryDatabase.getDbImpl()) { + throw new IllegalArgumentException( + "Secondary already associated with different primary: " + + otherPriDb.getDebugName()); + } + } + + super.initExisting(env, locker, database, databaseName, dbConfig); + init(locker); + } + + /** + * Adds secondary to primary's list, and populates the secondary if needed. + * + * @param locker should be the locker used to open the database. If a + * transactional locker, the population operations will occur in the same + * transaction; this may result in a large number of retained locks. If a + * non-transactional locker, the Cursor will create a ThreadLocker (even if + * a BasicLocker used for handle locking is passed), and locks will not be + * retained. + */ + private void init(final Locker locker) { + + trace(Level.FINEST, "SecondaryDatabase open"); + + getDbImpl().setKnownSecondary(); + secondaryConfig = (SecondaryConfig) configuration; + + Database foreignDb = secondaryConfig.getForeignKeyDatabase(); + if (foreignDb != null) { + foreignDb.foreignKeySecondaries.add(this); + } + + /* Populate secondary if requested and secondary is empty. */ + if (!secondaryConfig.getAllowPopulate()) { + return; + } + Cursor secCursor = null; + Cursor priCursor = null; + try { + secCursor = new Cursor(this, locker, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + OperationResult result = secCursor.position( + key, data, LockMode.DEFAULT, null, true); + + if (result != null) { + return; + } + + /* Is empty, so populate */ + priCursor = new Cursor(primaryDatabase, locker, null); + + result = priCursor.position( + key, data, LockMode.DEFAULT, null, true); + + while (result != null) { + + updateSecondary( + locker, secCursor, key /*priKey*/, + null /*oldData*/, data /*newData*/, + null /*cacheMode*/, + result.getExpirationTime(), + false /*expirationUpdated*/, + result.getExpirationTime()); + + result = priCursor.retrieveNext( + key, data, LockMode.DEFAULT, null, GetMode.NEXT); + } + } finally { + if (secCursor != null) { + secCursor.close(); + } + if (priCursor != null) { + priCursor.close(); + } + } + } + + @Override + SecondaryAssociation makeSecondaryAssociation() { + /* Only one is non-null: primaryDatabase, SecondaryAssociation. */ + if (primaryDatabase != null) { + primaryDatabase.simpleAssocSecondaries.add(this); + return primaryDatabase.secAssoc; + } + return configuration.getSecondaryAssociation(); + } + + /** + * Closes a secondary database and dis-associates it from its primary + * database. A secondary database should be closed before closing its + * associated primary database. + * + * {@inheritDoc} + * + * + */ + @Override + public synchronized void close() { + /* removeReferringAssociations will be called during close. */ + super.close(); + } + + @Override + void removeReferringAssociations() { + super.removeReferringAssociations(); + if (primaryDatabase != null) { + primaryDatabase.simpleAssocSecondaries.remove(this); + } + if (secondaryConfig != null) { + final Database foreignDb = secondaryConfig.getForeignKeyDatabase(); + if (foreignDb != null) { + foreignDb.foreignKeySecondaries.remove(this); + } + } + } + + /** + * @hidden + * For internal use only. + * + * Enables incremental population of this secondary database, so that index + * population can occur incrementally, and concurrently with primary + * database writes. + *

        + * After calling this method (and before calling {@link + * #endIncrementalPopulation}), it is expected that the application will + * populate the secondary explicitly by calling {@link + * Database#populateSecondaries} to process all records for the primary + * database(s) associated with this secondary. + *

        + * The concurrent population mode supports concurrent indexing by ordinary + * writes to the primary database(s) and calls to {@link + * Database#populateSecondaries}. To provide this capability, some + * primary-secondary integrity checking is disabled. The integrity + * checking (that is disabled) is meant only to detect application bugs, + * and is not necessary for normal operations. Specifically, the checks + * that are disabled are: + *

          + *
        • When a new secondary key is inserted, because a primary record is + * inserted or updated, we normally check that a key mapped to the + * primary record does not already exist in the secondary database.
        • + *
        • When an existing secondary key is deleted, because a primary + * record is updated or deleted, we normally check that a key mapped to + * the primary record already does exist in the secondary database.
        • + *
        + * Without these checks, one can think of the secondary indexing operations + * as being idempotent. Via the idempotent indexing operations, explicit + * population (via {@link Database#populateSecondaries}) and normal + * secondary population (via primary writes) collaborate to add and delete + * index records as needed. + */ + public void startIncrementalPopulation() { + isFullyPopulated = false; + } + + /** + * @hidden + * For internal use only. + * + * Disables incremental population of this secondary database, after this + * index has been fully populated. + *

        + * After calling this method, this database may not be populated by calling + * {@link Database#populateSecondaries}, and all primary-secondary + * integrity checking for this secondary is enabled. + */ + public void endIncrementalPopulation() { + isFullyPopulated = true; + } + + /** + * @hidden + * For internal use only. + * + * @return true if {@link #startIncrementalPopulation} was called, and + * {@link #endIncrementalPopulation} was not subsequently called. + */ + public boolean isIncrementalPopulationEnabled() { + return !isFullyPopulated; + } + + /** + * @hidden + * For internal use only. + * + * Reads {@code batchSize} records starting at the given {@code key} and + * {@code data}, and deletes any secondary records having a primary key + * (the data of the secondary record) for which {@link + * SecondaryAssociation#getPrimary} returns null. The next key/data pair + * to be processed is returned in the {@code key} and {@code data} + * parameters so these can be passed in to process the next batch. + *

        + * It is the application's responsibility to save the key/data pair + * returned by this method, and then pass the saved key/data when the + * method is called again to process the next batch of records. The + * application may wish to save the key/data persistently in order to avoid + * restarting the processing from the beginning of the database after a + * crash. + * + * @param key contains the starting key for the batch of records to be + * processed when this method is called, and contains the next key to be + * processed when this method returns. If {@code key.getData() == null} + * when this method is called, the batch will begin with the first record + * in the database. + * + * @param data contains the starting data element (primary key) for the + * batch of records to be processed when this method is called, and + * contains the next data element to be processed when this method returns. + * If {@code key.getData() == null} when this method is called, the batch + * will begin with the first record in the database. + * + * @param batchSize is the maximum number of records to be read, and also + * the maximum number of deletions that will be included in a single + * transaction. + * + * @return true if more records may need to be processed, or false if + * processing is complete. + */ + public boolean deleteObsoletePrimaryKeys(final DatabaseEntry key, + final DatabaseEntry data, + final int batchSize) { + try { + checkEnv(); + DatabaseUtil.checkForNullDbt(key, "key", false); + if (batchSize <= 0) { + throw new IllegalArgumentException( + "batchSize must be positive"); + } + final DatabaseImpl dbImpl = checkOpen(); + trace(Level.FINEST, "deleteObsoletePrimaryKeys", null, key, + null, null); + + final Locker locker = LockerFactory.getWritableLocker( + envHandle, null, dbImpl.isInternalDb(), + isTransactional(), + dbImpl.isReplicated() /*autoTxnIsReplicated*/); + try { + final boolean result; + try (final Cursor cursor = new Cursor(this, locker, null)) { + result = deleteObsoletePrimaryKeysInternal( + cursor, locker, key, data, batchSize); + } + locker.operationEnd(true); + return result; + } catch (final Throwable e) { + locker.operationEnd(false); + throw e; + } + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * Use a scan to walk through the primary keys. If the primary key is + * obsolete (SecondaryAssociation.getPrimary returns null), delete the + * record. + */ + private boolean deleteObsoletePrimaryKeysInternal(final Cursor cursor, + final Locker locker, + final DatabaseEntry key, + final DatabaseEntry data, + final int batchSize) { + /* TODO: use dirty-read scan with mode to return deleted records. */ + final LockMode scanMode = LockMode.RMW; + OperationResult searchResult; + if (key.getData() == null) { + /* Start at first key. */ + searchResult = cursor.position(key, data, scanMode, null, true); + } else { + /* Resume at key/data pair last processed. */ + searchResult = cursor.search( + key, data, scanMode, null, SearchMode.BOTH_RANGE, false); + if (searchResult == null) { + searchResult = cursor.search( + key, data, scanMode, null, SearchMode.SET_RANGE, false); + } + } + int nProcessed = 0; + while (searchResult != null) { + if (nProcessed >= batchSize) { + return true; + } + nProcessed += 1; + if (secAssoc.getPrimary(data) == null) { + cursor.deleteNoNotify(null, getDbImpl().getRepContext()); + } + searchResult = cursor.retrieveNext( + key, data, scanMode, null, GetMode.NEXT); + } + return false; + } + + /** + * @hidden + * For internal use only. + */ + @Override + public void populateSecondaries(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + throw new UnsupportedOperationException("Not allowed on a secondary"); + } + + /** + * @hidden + * For internal use only. + */ + @Override + public void populateSecondaries(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final long expirationTime, + CacheMode cacheMode) { + throw new UnsupportedOperationException("Not allowed on a secondary"); + } + + /** + * Returns the primary database associated with this secondary database. + * + * @return the primary database associated with this secondary database. + */ + /* + * To be added when SecondaryAssociation is published: + * If a {@link SecondaryAssociation} is {@link + * SecondaryCursor#setSecondaryAssociation configured}, this method returns + * null. + */ + public Database getPrimaryDatabase() { + return primaryDatabase; + } + + /** + * Returns an empty list, since this database is itself a secondary + * database. + */ + @Override + public List getSecondaryDatabases() { + return Collections.emptyList(); + } + + /** + * Returns a copy of the secondary configuration of this database. + * + * @return a copy of the secondary configuration of this database. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @deprecated As of JE 4.0.13, replaced by {@link + * SecondaryDatabase#getConfig()}. + */ + public SecondaryConfig getSecondaryConfig() { + return getConfig(); + } + + /** + * Returns a copy of the secondary configuration of this database. + * + * @return a copy of the secondary configuration of this database. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public SecondaryConfig getConfig() { + return (SecondaryConfig) super.getConfig(); + } + + /** + * Returns the secondary config without cloning, for internal use. + */ + SecondaryConfig getPrivateSecondaryConfig() { + return secondaryConfig; + } + + /** + * Obtain a cursor on a database, returning a + * SecondaryCursor. Calling this method is the equivalent of + * calling {@link #openCursor} and casting the result to {@link + * SecondaryCursor}. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the database is non-transactional, null must be + * specified. For a transactional database, the transaction is optional + * for read-only access and required for read-write access. + * + * @param cursorConfig The cursor attributes. If null, default attributes + * are used. + * + * @return A secondary database cursor. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @deprecated As of JE 4.0.13, replaced by {@link + * SecondaryDatabase#openCursor}.

        + */ + public SecondaryCursor openSecondaryCursor( + final Transaction txn, + final CursorConfig cursorConfig) { + + return openCursor(txn, cursorConfig); + } + + /** + * Obtain a cursor on a database, returning a SecondaryCursor. + */ + @Override + public SecondaryCursor openCursor(final Transaction txn, + final CursorConfig cursorConfig) { + checkReadable(); + return (SecondaryCursor) super.openCursor(txn, cursorConfig); + } + + /** + * Overrides Database method. + */ + @Override + Cursor newDbcInstance(final Transaction txn, + final CursorConfig cursorConfig) { + return new SecondaryCursor(this, txn, cursorConfig); + } + + /** + * Deletes the record associated with the given secondary key. In the + * presence of duplicate keys, all primary records associated with the + * given secondary key will be deleted. + * + *

        When multiple primary records are deleted, the expiration time in the + * returned result is that of the last record deleted.

        + * + *

        When the primary records are deleted, their associated secondary + * records are deleted as if {@link Database#delete} were called. This + * includes, but is not limited to, the secondary record referenced by the + * given key.

        + * + * @param key the key used as + * input. + * + * + */ + @Override + public OperationResult delete(final Transaction txn, + final DatabaseEntry key, + final WriteOptions options) { + + checkEnv(); + final DatabaseImpl dbImpl = checkReadable(); + + trace(Level.FINEST, "SecondaryDatabase.delete", txn, key, null, null); + + final CacheMode cacheMode = + options != null ? options.getCacheMode() : null; + + final Locker locker = LockerFactory.getWritableLocker( + envHandle, + txn, + dbImpl.isInternalDb(), + isTransactional(), + dbImpl.isReplicated()); // autoTxnIsReplicated + + OperationResult commitResult = null; + try { + final LockMode lockMode = locker.isSerializableIsolation() ? + LockMode.RMW : + LockMode.READ_UNCOMMITTED_ALL; + + try (Cursor cursor = new Cursor(this, locker, null)) { + /* Read the primary key (the data of a secondary). */ + + final DatabaseEntry pKey = new DatabaseEntry(); + + OperationResult searchResult = cursor.search( + key, pKey, lockMode, cacheMode, SearchMode.SET, false); + + /* + * For each duplicate secondary key, delete the primary record + * and all its associated secondary records, including the one + * referenced by this secondary cursor. + */ + while (searchResult != null) { + final Database primaryDb = getPrimary(pKey); + + if (primaryDb == null) { + + /* Primary was removed from the association. */ + cursor.deleteNoNotify( + null, dbImpl.getRepContext()); + } else { + + commitResult = primaryDb.deleteInternal( + locker, pKey, cacheMode); + + if (commitResult == null) { + if (lockMode != LockMode.RMW) { + + /* + * The primary record was not found. The index + * may be either corrupt or the record was + * deleted between finding it in the secondary + * without locking and trying to delete it. If + * it was deleted or expired then just skip it. + */ + if (cursor.checkCurrent(LockMode.RMW, null) != + null) { + + /* There is a secondary index entry */ + throw secondaryRefersToMissingPrimaryKey( + locker, key, pKey, + searchResult.getExpirationTime()); + } + } else { + if (!cursor.cursorImpl.isProbablyExpired()) { + + /* There is a secondary index entry. */ + throw secondaryRefersToMissingPrimaryKey( + locker, key, pKey, + searchResult.getExpirationTime()); + } + } + } + } + + checkOpen(); + + searchResult = cursor.retrieveNext( + key, pKey, lockMode, null, GetMode.NEXT_DUP); + } + + if (commitResult == null) { + dbImpl.getEnv().incDeleteFailOps(dbImpl); + } + + return commitResult; + } + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } finally { + locker.operationEnd(commitResult != null); + } + } + + /** + * Deletes the record associated with the given secondary key. In the + * presence of duplicate keys, all primary records associated with the + * given secondary key will be deleted. + * + *

        When the primary records are deleted, their associated secondary + * records are deleted as if {@link Database#delete} were called. This + * includes, but is not limited to, the secondary record referenced by the + * given key.

        + * + *

        Calling this method is equivalent to calling {@link + * #delete(Transaction, DatabaseEntry, WriteOptions)}.

        + * + * @param key the key used as + * input. + * + * + */ + @Override + public OperationStatus delete(final Transaction txn, + final DatabaseEntry key) { + final OperationResult result = delete(txn, key, null); + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * Moves the cursor to a record according to the specified {@link Get} + * type. + * + *

        The difference between this method and the method it overrides in + * {@link Cursor} is that the key here is defined as the secondary + * records's key, and the data is defined as the primary record's data.

        + */ + @Override + public OperationResult get( + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final Get getType, + ReadOptions options) { + + return get(txn, key, null, data, getType, options); + } + + /** + * Retrieves a record according to the specified {@link Get} type. + * + *

        If the operation succeeds, the record will be locked according to the + * {@link ReadOptions#getLockMode() lock mode} specified, the key, primary + * key and/or data will be returned via the (non-null) DatabaseEntry + * parameters, and a non-null OperationResult will be returned. If the + * operation fails because the record requested is not found, null is + * returned.

        + * + *

        The following table lists each allowed operation and whether the key, + * pKey and data parameters are input + * or output parameters. See the individual {@link Get} operations for + * more information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Get operationDescription'key' parameter'pKey' parameter'data' parameter
        {@link Get#SEARCH}Searches using an exact match by key.inputoutputoutput
        {@link Get#SEARCH_BOTH}Searches using an exact match by key and data.inputinputoutput
        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the secondary key input parameter. + * + * @param pKey the primary key input or output parameter, depending on + * getType. + * + * @param data the primary data output parameter. + * + * @param getType the Get operation type. May not be null. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the OperationResult if the record requested is found, else null. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * This includes passing a null getType, a null input key/pKey parameter, + * an input key/pKey parameter with a null data array, a partial key/pKey + * input parameter, and specifying a {@link ReadOptions#getLockMode() + * lock mode} of READ_COMMITTED. + * + * @since 7.0 + */ + public OperationResult get( + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + final Get getType, + ReadOptions options) { + + try { + checkEnv(); + checkReadable(); + + if (options == null) { + options = Cursor.DEFAULT_READ_OPTIONS; + } + + LockMode lockMode = options.getLockMode(); + + trace( + Level.FINEST, "SecondaryDatabase.get", String.valueOf(getType), + txn, key, null, lockMode); + + checkLockModeWithoutTxn(txn, lockMode); + + final CursorConfig cursorConfig; + + if (lockMode == LockMode.READ_COMMITTED) { + cursorConfig = READ_COMMITTED_CURSOR_CONFIG; + lockMode = null; + } else { + cursorConfig = DEFAULT_CURSOR_CONFIG; + } + + OperationResult result = null; + + final Locker locker = LockerFactory.getReadableLocker( + this, txn, cursorConfig.getReadCommitted()); + + try { + try (final SecondaryCursor cursor = + new SecondaryCursor(this, locker, cursorConfig)) { + + result = cursor.getInternal( + key, pKey, data, getType, options, lockMode); + } + } finally { + locker.operationEnd(result != null); + } + + return result; + + } catch (Error E) { + envHandle.invalidate(E); + throw E; + } + } + + /** + * @param key the secondary key used as input. It must be initialized with + * a non-null byte array by the caller. + * + * @param data the primary data returned as output. Its byte array does + * not need to be initialized by the caller. + * + * + */ + @Override + public OperationStatus get(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + return get(txn, key, new DatabaseEntry(), data, lockMode); + } + + /** + * Retrieves the key/data pair with the given key. If the matching key has + * duplicate values, the first data item in the set of duplicates is + * returned. Retrieval of duplicates requires the use of {@link Cursor} + * operations. + * + *

        Calling this method is equivalent to calling {@link + * #get(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions)} with {@link Get#SEARCH}.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the secondary key used as + * input. + * + * @param pKey the primary key returned as + * output. + * + * @param data the primary data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus get(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) { + final OperationResult result = get( + txn, key, pKey, data, Get.SEARCH, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * This operation is not allowed with this method signature. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method with the pKey parameter should be + * used instead. + */ + @Override + public OperationStatus getSearchBoth(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final LockMode lockMode) { + throw notAllowedException(); + } + + /** + * Retrieves the key/data pair with the specified secondary and primary + * key, that is, both the primary and secondary key items must match. + * + *

        Calling this method is equivalent to calling {@link + * #get(Transaction, DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, + * ReadOptions)} with {@link Get#SEARCH_BOTH}.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified to transaction-protect the operation, or null may be specified + * to perform the operation without transaction protection. For a + * non-transactional database, null must be specified. + * + * @param key the secondary key used as + * input. + * + * @param pKey the primary key used as + * input. + * + * @param data the primary data returned as + * output. + * + * @param lockMode the locking attributes; if null, default attributes are + * used. + * + * @return {@link com.sleepycat.je.OperationStatus#NOTFOUND + * OperationStatus.NOTFOUND} if no matching key/data pair is found; + * otherwise, {@link com.sleepycat.je.OperationStatus#SUCCESS + * OperationStatus.SUCCESS}. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the database has been closed. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public OperationStatus getSearchBoth(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry pKey, + final DatabaseEntry data, + LockMode lockMode) { + final OperationResult result = get( + txn, key, pKey, data, Get.SEARCH_BOTH, + DbInternal.getReadOptions(lockMode)); + + return result == null ? + OperationStatus.NOTFOUND : OperationStatus.SUCCESS; + } + + /** + * This operation is not allowed on a secondary database. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary database should be used instead. + */ + @Override + public OperationResult put( + final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data, + final Put putType, + final WriteOptions options) { + + throw notAllowedException(); + } + + /** + * This operation is not allowed on a secondary database. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary database should be used instead. + */ + @Override + public OperationStatus put(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + throw notAllowedException(); + } + + /** + * This operation is not allowed on a secondary database. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary database should be used instead. + */ + @Override + public OperationStatus putNoOverwrite(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + throw notAllowedException(); + } + + /** + * This operation is not allowed on a secondary database. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary database should be used instead. + */ + @Override + public OperationStatus putNoDupData(final Transaction txn, + final DatabaseEntry key, + final DatabaseEntry data) { + throw notAllowedException(); + } + + /** + * This operation is not allowed on a secondary database. {@link + * UnsupportedOperationException} will always be thrown by this method. + * The corresponding method on the primary database should be used instead. + */ + @Override + public JoinCursor join(final Cursor[] cursors, final JoinConfig config) { + throw notAllowedException(); + } + + /** + * Updates a single secondary when a put() or delete() is performed on the + * primary. + *

        + * For an insert, newData will be non-null and oldData will be null. + *

        + * For an update, newData will be non-null and oldData will be non-null. + *

        + * For a delete, newData will be null and oldData may be null or non-null + * depending on whether its need by the key creator/extractor. + * + * @param locker the internal locker. + * + * @param cursor secondary cursor to use, or null if this method should + * open and close a cursor if one is needed. + * + * @param priKey the primary key. + * + * @param oldData the primary data before the change, or null if the record + * did not previously exist. + * + * @param newData the primary data after the change, or null if the record + * has been deleted. + * + * @param cacheMode CacheMode to use for accessing secondary. + * + * @param expirationTime expiration time of primary record, to be written + * to secondary records. + * + * @param expirationUpdated whether the primary was updated and the + * expiration time changed. + * + * @param oldExpirationTime the expiration time of the previous version of + * the primary record. When the primary was updated, this is the expiration + * time of the previous version. When the primary was not updated, this is + * the expiration time of the current or newly inserted primary record. + * + * @return the number of secondary records written (inserted, deleted or + * updated). + */ + int updateSecondary(final Locker locker, + Cursor cursor, + final DatabaseEntry priKey, + final DatabaseEntry oldData, + final DatabaseEntry newData, + final CacheMode cacheMode, + final long expirationTime, + final boolean expirationUpdated, + final long oldExpirationTime) { + + final boolean expirationInHours = + TTL.isSystemTimeInHours(expirationTime); + + final int expiration = + TTL.systemTimeToExpiration(expirationTime, expirationInHours); + + final SecondaryKeyCreator keyCreator = secondaryConfig.getKeyCreator(); + + final SecondaryMultiKeyCreator multiKeyCreator = + secondaryConfig.getMultiKeyCreator(); + + final boolean localCursor = (cursor == null); + + if (keyCreator != null) { + /* Each primary record may have a single secondary key. */ + assert multiKeyCreator == null; + + /* Get old and new secondary keys. */ + DatabaseEntry oldSecKey = null; + DatabaseEntry newSecKey = null; + + if (oldData != null || newData == null) { + oldSecKey = new DatabaseEntry(); + if (!keyCreator.createSecondaryKey(this, priKey, oldData, + oldSecKey)) { + oldSecKey = null; + } + } + + if (newData != null) { + newSecKey = new DatabaseEntry(); + if (!keyCreator.createSecondaryKey(this, priKey, newData, + newSecKey)) { + newSecKey = null; + } + } + + /* Delete the old key if it is no longer present. */ + final boolean doDelete = + oldSecKey != null && !oldSecKey.equals(newSecKey); + + /* Insert the new key if it was not present before. */ + final boolean doInsert = + newSecKey != null && !newSecKey.equals(oldSecKey); + + /* Update secondary if key did not change but expiration did. */ + final boolean doUpdate = + expirationUpdated && newSecKey != null && !doInsert; + + if (doDelete || doInsert || doUpdate) { + if (localCursor) { + cursor = new Cursor(this, locker, null); + } + try { + if (doDelete) { + deleteKey( + cursor, priKey, oldSecKey, cacheMode, + oldExpirationTime); + } + if (doInsert) { + insertKey( + locker, cursor, priKey, newSecKey, + cacheMode, expiration, expirationInHours, + oldExpirationTime); + } + if (doUpdate) { + updateExpiration( + cursor, priKey, oldSecKey, + cacheMode, expiration, expirationInHours, + oldExpirationTime); + } + } finally { + if (localCursor) { + cursor.close(); + } + } + } + return (doDelete ? 1 : 0) + + (doInsert ? 1 : 0) + + (doUpdate ? 1 : 0); + } else { + /* Each primary record may have multiple secondary keys. */ + if (multiKeyCreator == null) { + throw new IllegalArgumentException( + "SecondaryConfig.getKeyCreator()/getMultiKeyCreator()" + + " may be null only if the primary database is read-only"); + } + + /* Get old and new secondary keys. */ + final Set oldKeys; + final Set newKeys; + + if (oldData == null && newData != null) { + oldKeys = EMPTY_SET; + } else { + oldKeys = new HashSet<>(); + multiKeyCreator.createSecondaryKeys( + this, priKey, oldData, oldKeys); + } + + if (newData == null) { + newKeys = EMPTY_SET; + } else { + newKeys = new HashSet<>(); + multiKeyCreator.createSecondaryKeys( + this, priKey, newData, newKeys); + } + + final Set toDelete; + final Set toInsert; + final Set toUpdate; + + /* Delete old keys that are no longer present. */ + if (oldKeys.isEmpty()) { + toDelete = EMPTY_SET; + } else { + toDelete = new HashSet<>(oldKeys); + toDelete.removeAll(newKeys); + } + + /* Insert new keys that were not present before. */ + if (newKeys.isEmpty()) { + toInsert = EMPTY_SET; + } else { + toInsert = new HashSet<>(newKeys); + toInsert.removeAll(oldKeys); + } + + /* Update secondary if key did not change but expiration did. */ + if (!expirationUpdated || newKeys.isEmpty()) { + toUpdate = EMPTY_SET; + } else { + toUpdate = new HashSet<>(newKeys); + toUpdate.retainAll(oldKeys); + } + + if (!toDelete.isEmpty() || + !toInsert.isEmpty() || + !toUpdate.isEmpty()) { + + if (localCursor) { + cursor = new Cursor(this, locker, null); + } + try { + if (!toDelete.isEmpty()) { + for (DatabaseEntry secKey : toDelete) { + deleteKey( + cursor, priKey, secKey, cacheMode, + oldExpirationTime); + } + } + if (!toInsert.isEmpty()) { + for (DatabaseEntry secKey : toInsert) { + insertKey( + locker, cursor, priKey, secKey, + cacheMode, expiration, expirationInHours, + oldExpirationTime); + } + } + if (!toUpdate.isEmpty()) { + for (DatabaseEntry secKey : toUpdate) { + updateExpiration( + cursor, priKey, secKey, + cacheMode, expiration, expirationInHours, + oldExpirationTime); + } + } + } finally { + if (localCursor) { + cursor.close(); + } + } + } + return toDelete.size() + toInsert.size() + toUpdate.size(); + } + } + + /** + * Deletes an old secondary key. + */ + private void deleteKey(final Cursor cursor, + final DatabaseEntry priKey, + final DatabaseEntry oldSecKey, + final CacheMode cacheMode, + final long oldExpirationTime) { + + final OperationResult result = cursor.search( + oldSecKey, priKey, LockMode.RMW, cacheMode, SearchMode.BOTH, + false); + + if (result != null) { + cursor.deleteInternal(getDbImpl().getRepContext(), cacheMode); + return; + } + + if (isFullyPopulated && + !getEnv().expiresWithin( + oldExpirationTime, getEnv().getTtlClockTolerance())) { + + throw new SecondaryIntegrityException( + this, cursor.getCursorImpl().getLocker(), + "Secondary is corrupt: the primary record contains a key " + + "that is not present in the secondary", + getDebugName(), oldSecKey, priKey, oldExpirationTime); + } + } + + /** + * Inserts a new secondary key. + */ + private void insertKey(final Locker locker, + final Cursor cursor, + final DatabaseEntry priKey, + final DatabaseEntry newSecKey, + final CacheMode cacheMode, + final int expiration, + final boolean expirationInHours, + final long oldExpirationTime) { + + /* Check for the existence of a foreign key. */ + final Database foreignDb = + secondaryConfig.getForeignKeyDatabase(); + + if (foreignDb != null) { + + try (final Cursor foreignCursor = + new Cursor(foreignDb, locker, null)) { + + final DatabaseEntry tmpData = new DatabaseEntry(); + + final OperationResult result = foreignCursor.search( + newSecKey, tmpData, LockMode.DEFAULT, cacheMode, + SearchMode.SET, true); + + if (result == null) { + throw new ForeignConstraintException( + locker, + "Secondary " + getDebugName() + + " foreign key not allowed: it is not" + + " present in the foreign database " + + foreignDb.getDebugName(), getDebugName(), + newSecKey, priKey, oldExpirationTime); + } + } + } + + final ExpirationInfo expInfo = new ExpirationInfo( + expiration, expirationInHours, false /*updateExpiration*/); + + /* Insert the new key. */ + if (configuration.getSortedDuplicates()) { + + final OperationResult result = cursor.putInternal( + newSecKey, priKey, cacheMode, expInfo, PutMode.NO_DUP_DATA); + + if (result == null && isFullyPopulated) { + throw new SecondaryIntegrityException( + this, locker, "Secondary/primary record already present", + getDebugName(), newSecKey, priKey, oldExpirationTime); + } + } else { + final OperationResult result = cursor.putInternal( + newSecKey, priKey, cacheMode, expInfo, PutMode.NO_OVERWRITE); + + if (result == null && isFullyPopulated) { + throw new UniqueConstraintException( + locker, "Unique secondary key is already present", + getDebugName(), newSecKey, priKey, oldExpirationTime); + } + } + } + + /** + * Updates a new secondary key, which doesn't change the key or data but is + * needed to update the expiration time. + */ + private void updateExpiration(final Cursor cursor, + final DatabaseEntry priKey, + final DatabaseEntry secKey, + final CacheMode cacheMode, + final int expiration, + final boolean expirationInHours, + final long oldExpirationTime) { + + final PutMode putMode; + + final ExpirationInfo expInfo = new ExpirationInfo( + expiration, expirationInHours, true /*updateExpiration*/); + + final EnvironmentImpl envImpl = getEnv(); + + if (isFullyPopulated && + !envImpl.expiresWithin( + oldExpirationTime, envImpl.getTtlClockTolerance())) { + + final OperationResult result = cursor.search( + secKey, priKey, LockMode.RMW, cacheMode, + configuration.getSortedDuplicates() ? + SearchMode.BOTH : SearchMode.SET, false); + + if (result == null) { + throw new SecondaryIntegrityException( + this, cursor.getCursorImpl().getLocker(), + "Secondary is corrupt: the primary record contains a " + + "key that is not present in the secondary", + getDebugName(), secKey, priKey, oldExpirationTime); + } + + putMode = PutMode.CURRENT; + } else { + putMode = PutMode.OVERWRITE; + } + + cursor.putInternal(secKey, priKey, cacheMode, expInfo, putMode); + } + + /** + * Called when a record in the foreign database is deleted. + * + * @param secKey is the primary key of the foreign database, which is the + * secondary key (ordinary key) of this secondary database. + */ + void onForeignKeyDelete(final Locker locker, + final DatabaseEntry secKey, + final CacheMode cacheMode) { + + final ForeignKeyDeleteAction deleteAction = + secondaryConfig.getForeignKeyDeleteAction(); + + /* Use RMW if we're going to be deleting the secondary records. */ + final LockMode lockMode = + (deleteAction == ForeignKeyDeleteAction.ABORT) ? + LockMode.DEFAULT : + LockMode.RMW; + + /* + * Use the deleted foreign primary key to read the data of this + * database, which is the associated primary's key. + */ + try (final Cursor cursor = new Cursor(this, locker, null)) { + + final DatabaseEntry priKey = new DatabaseEntry(); + + OperationResult secResult = cursor.search( + secKey, priKey, lockMode, cacheMode, SearchMode.SET, true); + + while (secResult != null) { + + if (deleteAction == ForeignKeyDeleteAction.ABORT) { + + /* + * ABORT - throw an exception to cause the user to abort + * the transaction. + */ + throw new DeleteConstraintException( + locker, "Secondary refers to a deleted foreign key", + getDebugName(), secKey, priKey, + secResult.getExpirationTime()); + + } else if (deleteAction == ForeignKeyDeleteAction.CASCADE) { + + /* + * CASCADE - delete the associated primary record. + */ + final Database primaryDb = getPrimary(priKey); + if (primaryDb != null) { + + final OperationResult priResult = + primaryDb.deleteInternal( + locker, priKey, cacheMode); + + if (priResult == null && + !cursor.cursorImpl.isProbablyExpired()) { + + throw secondaryRefersToMissingPrimaryKey( + locker, secKey, priKey, + secResult.getExpirationTime()); + } + } + + } else if (deleteAction == ForeignKeyDeleteAction.NULLIFY) { + + /* + * NULLIFY - set the secondary key to null in the + * associated primary record. + */ + final Database primaryDb = getPrimary(priKey); + if (primaryDb != null) { + + try (final Cursor priCursor = new Cursor( + primaryDb, locker, null)) { + + final DatabaseEntry data = new DatabaseEntry(); + + final OperationResult priResult = priCursor.search( + priKey, data, LockMode.RMW, cacheMode, + SearchMode.SET, true); + + if (priResult == null) { + if (!cursor.cursorImpl.isProbablyExpired()) { + throw secondaryRefersToMissingPrimaryKey( + locker, secKey, priKey, + secResult.getExpirationTime()); + } + continue; + } + + final ForeignMultiKeyNullifier multiNullifier = + secondaryConfig.getForeignMultiKeyNullifier(); + + if (multiNullifier != null) { + + if (multiNullifier.nullifyForeignKey( + this, priKey, data, secKey)) { + + priCursor.putCurrent(data); + } + } else { + final ForeignKeyNullifier nullifier = + secondaryConfig.getForeignKeyNullifier(); + + if (nullifier.nullifyForeignKey(this, data)) { + priCursor.putCurrent(data); + } + } + } + } + } else { + /* Should never occur. */ + throw EnvironmentFailureException.unexpectedState(); + } + + secResult = cursor.retrieveNext( + secKey, priKey, LockMode.DEFAULT, cacheMode, + GetMode.NEXT_DUP); + } + } + } + + /** + * If either ImmutableSecondaryKey or ExtractFromPrimaryKeyOnly is + * configured, an update cannot change a secondary key. + * ImmutableSecondaryKey is a guarantee from the user meaning just that, + * and ExtractFromPrimaryKeyOnly also implies the secondary key cannot + * change because it is derived from the primary key which is immutable + * (like any other key). + */ + boolean updateMayChangeSecondary() { + return !secondaryConfig.getImmutableSecondaryKey() && + !secondaryConfig.getExtractFromPrimaryKeyOnly(); + } + + /** + * When false is returned, this allows optimizing for the case where a + * primary update operation can update secondaries without reading the + * primary data. + */ + static boolean needOldDataForUpdate( + final Collection secondaries) { + + if (secondaries == null) { + return false; + } + for (final SecondaryDatabase secDb : secondaries) { + if (secDb.updateMayChangeSecondary()) { + return true; + } + } + return false; + } + + /** + * When false is returned, this allows optimizing for the case where a + * primary delete operation can update secondaries without reading the + * primary data. + */ + static boolean needOldDataForDelete( + final Collection secondaries) { + + if (secondaries == null) { + return false; + } + for (final SecondaryDatabase secDb : secondaries) { + if (!secDb.secondaryConfig.getExtractFromPrimaryKeyOnly()) { + return true; + } + } + return false; + } + + /* A secondary DB has no secondaries of its own, by definition. */ + @Override + boolean hasSecondaryOrForeignKeyAssociations() { + return false; + } + + /** + * Utility to call SecondaryAssociation.getPrimary. + * + * Handles exceptions and does an important debugging check that can't be + * done at database open time: ensures that the same SecondaryAssociation + * instance is used for all associated DBs. + *

        + * Returns null if getPrimary returns null, so the caller must handle this + * possibility. Null normally means that a secondary read operation can + * skip the record. + */ + Database getPrimary(DatabaseEntry priKey) { + final Database priDb; + try { + priDb = secAssoc.getPrimary(priKey); + } catch (RuntimeException e) { + throw EnvironmentFailureException.unexpectedException( + "Exception from SecondaryAssociation.getPrimary", e); + } + if (priDb == null) { + return null; + } + if (priDb.secAssoc != secAssoc) { + throw new IllegalArgumentException( + "Primary and secondary have different SecondaryAssociation " + + "instances. Remember to configure the SecondaryAssociation " + + "on the primary database."); + } + return priDb; + } + + private DatabaseImpl checkReadable() { + final DatabaseImpl dbImpl = checkOpen(); + if (!isFullyPopulated) { + throw new IllegalStateException( + "Incremental population is currently enabled."); + } + return dbImpl; + } + + static UnsupportedOperationException notAllowedException() { + + return new UnsupportedOperationException( + "Operation not allowed on a secondary"); + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void trace(final Level level, final String methodName) { + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(methodName); + sb.append(" name=").append(getDebugName()); + sb.append(" primary=").append(primaryDatabase.getDebugName()); + + LoggerUtils.logMsg( + logger, getEnv(), level, sb.toString()); + } + } +} diff --git a/src/com/sleepycat/je/SecondaryIntegrityException.java b/src/com/sleepycat/je/SecondaryIntegrityException.java new file mode 100644 index 0000000..6540043 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryIntegrityException.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when an integrity problem is detected while accessing a secondary + * database, including access to secondaries while writing to a primary + * database. Secondary integrity problems are normally caused by the use of + * secondaries without transactions. + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception. In addition, the corrupt index (secondary database) is marked + * as corrupt in memory. All subsequent access to the index will throw + * {@code SecondaryIntegrityException}. To correct the problem, the + * application may perform a full restore (an HA {@link + * com.sleepycat.je.rep.NetworkRestore} or restore from backup) or rebuild + * the corrupt index.

        + * + *

        Some possible causes of a secondary integrity exception are listed + * below. Note that only the first item -- the use of a non-transactional + * store -- is applicable when using the {@link com.sleepycat.persist DPL}. + * All other items below do not apply to the use of the DPL, because the DPL + * ensures that secondary databases are configured and managed correctly.

        + *
          + *
        1. The use of non-transactional databases or stores can cause secondary + * corruption as described in Special considerations for using + * Secondary Databases with and without Transactions. Secondary databases + * and indexes should always be used in conjunction with transactional + * databases and stores.
        2. + * + *
        3. Secondary corruption can be caused by an incorrectly implemented + * secondary key creator method, for example, one which uses mutable state + * information or is not properly synchronized. When the DPL is not used, the + * application is responsible for correctly implementing the key creator.
        4. + * + *
        5. Secondary corruption can be caused by failing to open a secondary + * database before writing to the primary database, by writing to a secondary + * database directly using a {@link Database} handle, or by truncating or + * removing primary database without also truncating or removing all secondary + * databases. When the DPL is not used, the application is responsible for + * managing associated databases correctly.

          + *
        + * + * @since 4.0 + */ +public class SecondaryIntegrityException extends SecondaryReferenceException { + private static final long serialVersionUID = 1L; + + /** + * For internal use only. + * @hidden + */ + public SecondaryIntegrityException(Database secDb, + Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, message, secDbName, secKey, priKey, expirationTime); + if (secDb != null) { + secDb.setCorrupted(this); + } + } + + /** + * For internal use only. + * @hidden + */ + private SecondaryIntegrityException(String message, + SecondaryIntegrityException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new SecondaryIntegrityException(msg, this); + } +} diff --git a/src/com/sleepycat/je/SecondaryKeyCreator.java b/src/com/sleepycat/je/SecondaryKeyCreator.java new file mode 100644 index 0000000..62406a0 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryKeyCreator.java @@ -0,0 +1,136 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * The interface implemented for extracting single-valued secondary keys from + * primary records. + * + *

        The key creator object is specified by calling {@link + * SecondaryConfig#setKeyCreator SecondaryConfig.setKeyCreator}. The secondary + * database configuration is specified when calling {@link + * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}.

        + * + *

        For example:

        + * + *
        + *     class MyKeyCreator implements SecondaryKeyCreator {
        + *         public boolean createSecondaryKey(SecondaryDatabase secondary,
        + *                                             DatabaseEntry key,
        + *                                             DatabaseEntry data,
        + *                                             DatabaseEntry result) {
        + *             //
        + *             // DO HERE: Extract the secondary key from the primary key and
        + *             // data, and set the secondary key into the result parameter.
        + *             //
        + *             return true;
        + *         }
        + *     }
        + *     ...
        + *     SecondaryConfig secConfig = new SecondaryConfig();
        + *     secConfig.setKeyCreator(new MyKeyCreator());
        + *     // Now pass secConfig to Environment.openSecondaryDatabase
        + * 
        + * + *

        Use this interface when zero or one secondary key is present in a single + * primary record, in other words, for many-to-one and one-to-one + * relationships. When more than one secondary key may be present (for + * many-to-many and one-to-many relationships), use the {@link + * SecondaryMultiKeyCreator} interface instead. The table below summarizes how + * to create all four variations of relationships.

        + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        RelationshipInterfaceDuplicatesExample
        One-to-one{@link SecondaryKeyCreator}NoA person record with a unique social security number key.
        Many-to-one{@link SecondaryKeyCreator}YesA person record with a non-unique employer key.
        One-to-many{@link SecondaryMultiKeyCreator}NoA person record with multiple unique email address keys.
        Many-to-many{@link SecondaryMultiKeyCreator}YesA person record with multiple non-unique organization keys.
        + * + *
        + * + *

        To configure a database for duplicates. pass true to {@link + * DatabaseConfig#setSortedDuplicates}.

        + * + *

        WARNING: Key creator instances are shared by multiple threads + * and key creator methods are called without any special synchronization. + * Therefore, key creators must be thread safe. In general no shared state + * should be used and any caching of computed values must be done with proper + * synchronization.

        + */ +public interface SecondaryKeyCreator { + + /** + * Creates a secondary key entry, given a primary key and data entry. + * + *

        A secondary key may be derived from the primary key, primary data, or + * a combination of the primary key and data. For secondary keys that are + * optional, the key creator method may return false and the key/data pair + * will not be indexed. To ensure the integrity of a secondary database + * the key creator method must always return the same result for a given + * set of input parameters.

        + * + *

        A {@code RuntimeException} may be thrown by this method if an error + * occurs attempting to create the secondary key. This exception will be + * thrown by the API method currently in progress, for example, a {@link + * Database#put put} method. However, this will cause the write operation + * to be incomplete. When databases are not configured to be + * transactional, caution should be used to avoid integrity problems. See + * Special considerations for + * using Secondary Databases with and without Transactions.

        + * + * @param secondary the database to which the secondary key will be + * added. This parameter is passed for informational purposes but is not + * commonly used. This parameter is always non-null. + * + * @param key the primary key entry. This parameter must not be modified + * by this method. This parameter is always non-null. + * + * @param data the primary data entry. This parameter must not be modified + * by this method. If {@link SecondaryConfig#setExtractFromPrimaryKeyOnly} + * is configured as {@code true}, the {@code data} param may be either null + * or non-null, and the implementation is expected to ignore it; otherwise, + * this parameter is always non-null. + * + * @param result the secondary key created by this method. This parameter + * is always non-null. + * + * @return true if a key was created, or false to indicate that the key is + * not present. + */ + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result); +} diff --git a/src/com/sleepycat/je/SecondaryMultiKeyCreator.java b/src/com/sleepycat/je/SecondaryMultiKeyCreator.java new file mode 100644 index 0000000..93e40b9 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryMultiKeyCreator.java @@ -0,0 +1,142 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.Set; + +/** + * The interface implemented for extracting multi-valued secondary keys from + * primary records. + * + *

        The key creator object is specified by calling {@link + * SecondaryConfig#setMultiKeyCreator SecondaryConfig.setMultiKeyCreator}. The + * secondary database configuration is specified when calling {@link + * Environment#openSecondaryDatabase Environment.openSecondaryDatabase}.

        + * + *

        For example:

        + * + *
        + *     class MyMultiKeyCreator implements SecondaryMultiKeyCreator {
        + *         public void createSecondaryKeys(SecondaryDatabase secondary,
        + *                                         DatabaseEntry key,
        + *                                         DatabaseEntry data,
        + *                                         Set<DatabaseEntry> results) {
        + *             //
        + *             // DO HERE: Extract the secondary keys from the primary key and
        + *             // data.  For each key extracted, create a DatabaseEntry and add
        + *             // it to the results set.
        + *             //
        + *         }
        + *     }
        + *     ...
        + *     SecondaryConfig secConfig = new SecondaryConfig();
        + *     secConfig.setMultiKeyCreator(new MyMultiKeyCreator());
        + *     // Now pass secConfig to Environment.openSecondaryDatabase
        + * 
        + * + *

        Use this interface when any number of secondary keys may be present in a + * single primary record, in other words, for many-to-many and one-to-many + * relationships. When only zero or one secondary key is present (for + * many-to-one and one-to-one relationships) you may use the {@link + * SecondaryKeyCreator} interface instead. The table below summarizes how to + * create all four variations of relationships.

        + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        RelationshipInterfaceDuplicatesExample
        One-to-one{@link SecondaryKeyCreator}NoA person record with a unique social security number key.
        Many-to-one{@link SecondaryKeyCreator}YesA person record with a non-unique employer key.
        One-to-many{@link SecondaryMultiKeyCreator}NoA person record with multiple unique email address keys.
        Many-to-many{@link SecondaryMultiKeyCreator}YesA person record with multiple non-unique organization keys.
        + * + *
        + * + *

        To configure a database for duplicates. pass true to {@link + * DatabaseConfig#setSortedDuplicates}.

        + * + *

        Note that SecondaryMultiKeyCreator may also be used for + * single key secondaries (many-to-one and one-to-one); in this case, at most a + * single key is added to the results set. + * SecondaryMultiKeyCreator is only slightly less efficient than + * {@link SecondaryKeyCreator} in that two or three temporary sets must be + * created to hold the results. @see SecondaryConfig

        + * + *

        WARNING: Key creator instances are shared by multiple threads + * and key creator methods are called without any special synchronization. + * Therefore, key creators must be thread safe. In general no shared state + * should be used and any caching of computed values must be done with proper + * synchronization.

        + */ +public interface SecondaryMultiKeyCreator { + + /** + * Creates a secondary key entry, given a primary key and data entry. + * + *

        A secondary key may be derived from the primary key, primary data, or + * a combination of the primary key and data. Zero or more secondary keys + * may be derived from the primary record and returned in the results + * parameter. To ensure the integrity of a secondary database the key + * creator method must always return the same results for a given set of + * input parameters.

        + * + *

        A {@code RuntimeException} may be thrown by this method if an error + * occurs attempting to create the secondary key. This exception will be + * thrown by the API method currently in progress, for example, a {@link + * Database#put put} method. However, this will cause the write operation + * to be incomplete. When databases are not configured to be + * transactional, caution should be used to avoid integrity problems. See + * Special considerations for + * using Secondary Databases with and without Transactions.

        + * + * @param secondary the database to which the secondary key will be + * added. This parameter is passed for informational purposes but is not + * commonly used. This parameter is always non-null. + * + * @param key the primary key entry. This parameter must not be modified + * by this method. This parameter is always non-null. + * + * @param data the primary data entry. This parameter must not be modified + * by this method. If {@link SecondaryConfig#setExtractFromPrimaryKeyOnly} + * is configured as {@code true}, the {@code data} param may be either null + * or non-null, and the implementation is expected to ignore it; otherwise, + * this parameter is always non-null. + * + * @param results the set to contain the the secondary key DatabaseEntry + * objects created by this method. + */ + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results); +} diff --git a/src/com/sleepycat/je/SecondaryReferenceException.java b/src/com/sleepycat/je/SecondaryReferenceException.java new file mode 100644 index 0000000..2f3a691 --- /dev/null +++ b/src/com/sleepycat/je/SecondaryReferenceException.java @@ -0,0 +1,120 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.txn.Locker; + +/** + * Base class for exceptions thrown when a read or write operation fails + * because of a secondary constraint or integrity problem. Provides accessors + * for getting further information about the database and keys involved in the + * failure. See subclasses for more information. + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + * @see Special considerations + * for using Secondary Databases with and without Transactions + * + * @since 4.0 + */ +public abstract class SecondaryReferenceException + extends OperationFailureException { + + private static final long serialVersionUID = 1; + + private final String secDbName; + private final DatabaseEntry secKey; + private final DatabaseEntry priKey; + private final long expirationTime; + + /** + * For internal use only. + * @hidden + */ + public SecondaryReferenceException(Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, true /*abortOnly*/, message, null /*cause*/); + this.secDbName = secDbName; + this.secKey = secKey; + this.priKey = priKey; + this.expirationTime = expirationTime; + + String expirationTimeMsg = "expiration: "; + + if (expirationTime != 0) { + expirationTimeMsg += TTL.formatExpirationTime(expirationTime); + } else { + expirationTimeMsg += "none"; + } + + addErrorMessage(expirationTimeMsg); + + if (locker.getEnvironment().getExposeUserData()) { + addErrorMessage("secDbName=" + secDbName); + } + }; + + /** + * For internal use only. + * @hidden + */ + SecondaryReferenceException(String message, + SecondaryReferenceException cause) { + super(message, cause); + this.secDbName = cause.secDbName; + this.secKey = cause.secKey; + this.priKey = cause.priKey; + this.expirationTime = cause.expirationTime; + } + + /** + * Returns the name of the secondary database being accessed during the + * failure. + */ + public String getSecondaryDatabaseName() { + return secDbName; + } + + /** + * Returns the secondary key being accessed during the failure. Note that + * in some cases, the returned primary key can be null. + */ + public DatabaseEntry getSecondaryKey() { + return secKey; + } + + /** + * Returns the primary key being accessed during the failure. Note that + * in some cases, the returned primary key can be null. + */ + public DatabaseEntry getPrimaryKey() { + return priKey; + } + + /** + * Returns the expiration time of the record being accessed during the + * failure. + * + * @since 7.0 + */ + public long getExpirationTime() { + return expirationTime; + } +} diff --git a/src/com/sleepycat/je/Sequence.java b/src/com/sleepycat/je/Sequence.java new file mode 100644 index 0000000..695e668 --- /dev/null +++ b/src/com/sleepycat/je/Sequence.java @@ -0,0 +1,612 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHED_GETS; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_LAST; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_SIZE; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_VALUE; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_GETS; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_RANGE_MAX; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_RANGE_MIN; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_STORED_VALUE; + +import java.io.Closeable; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.SequenceStatDefinition; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * A Sequence handle is used to manipulate a sequence record in a + * database. Sequence handles are opened using the {@link + * com.sleepycat.je.Database#openSequence Database.openSequence} method. + */ +public class Sequence implements Closeable { + + private static final byte FLAG_INCR = ((byte) 0x1); + private static final byte FLAG_WRAP = ((byte) 0x2); + private static final byte FLAG_OVER = ((byte) 0x4); + + /* Allocation size for the record data. */ + private static final int MAX_DATA_SIZE = 50; + + /* Version of the format for fields stored in the sequence record. */ + private static final byte CURRENT_VERSION = 1; + + /* A sequence is a unique record in a database. */ + private final Database db; + private final DatabaseEntry key; + + /* Persistent fields. */ + private boolean wrapAllowed; + private boolean increment; + private boolean overflow; + private long rangeMin; + private long rangeMax; + private long storedValue; + + /* Handle-specific fields. */ + private final int cacheSize; + private long cacheValue; + private long cacheLast; + private int nGets; + private int nCachedGets; + private TransactionConfig autoCommitConfig; + private final Logger logger; + + /* + * The cache holds the range of values [cacheValue, cacheLast], which is + * the same as [cacheValue, storedValue) at the time the record is written. + * At store time, cacheLast is set to one before (after) storedValue. + * + * storedValue may be used by other Sequence handles with separate caches. + * storedValue is always the next value to be returned by any handle that + * runs out of cached values. + */ + + /** + * Opens a sequence handle, adding the sequence record if appropriate. + * + * @throws IllegalArgumentException via Database.openSequence. + * + * @throws IllegalStateException via Database.openSequence. + */ + Sequence(Database db, + Transaction txn, + DatabaseEntry key, + SequenceConfig config) + throws SequenceNotFoundException, SequenceExistsException { + + if (db.getDbImpl().getSortedDuplicates()) { + throw new UnsupportedOperationException + ("Sequences not supported in databases configured for " + + "duplicates"); + } + + SequenceConfig useConfig = (config != null) ? + config : SequenceConfig.DEFAULT; + + if (useConfig.getRangeMin() >= useConfig.getRangeMax()) { + throw new IllegalArgumentException + ("Minimum sequence value must be less than the maximum"); + } + + if (useConfig.getInitialValue() > useConfig.getRangeMax() || + useConfig.getInitialValue() < useConfig.getRangeMin()) { + throw new IllegalArgumentException + ("Initial sequence value is out of range"); + } + + if (useConfig.getRangeMin() > + useConfig.getRangeMax() - useConfig.getCacheSize()) { + throw new IllegalArgumentException + ("The cache size is larger than the sequence range"); + } + + if (useConfig.getAutoCommitNoSync()) { + autoCommitConfig = + DbInternal.getDefaultTxnConfig(db.getEnvironment()).clone(); + autoCommitConfig.overrideDurability(Durability.COMMIT_NO_SYNC); + } else { + /* Use the environment's default transaction config. */ + autoCommitConfig = null; + } + + this.db = db; + this.key = copyEntry(key); + logger = db.getEnvironment().getNonNullEnvImpl().getLogger(); + + /* Perform an auto-commit transaction to create the sequence. */ + Locker locker = null; + Cursor cursor = null; + OperationStatus status = OperationStatus.NOTFOUND; + try { + locker = LockerFactory.getReadableLocker( + db, txn, false /*readCommitedIsolation*/); + + cursor = new Cursor(db, locker, null); + + boolean sequenceExists = readData(cursor, null); + boolean isWritableLocker = !db.getConfig().getTransactional() || + (locker.isTransactional() && + !DbInternal.getNonNullEnvImpl(db.getEnvironment()). + isReplicated()); + + if (sequenceExists) { + if (useConfig.getAllowCreate() && + useConfig.getExclusiveCreate()) { + throw new SequenceExistsException + ("ExclusiveCreate=true and the sequence record " + + "already exists."); + } + } else { + if (useConfig.getAllowCreate()) { + if (!isWritableLocker) { + if (cursor != null) { + cursor.close(); + } + locker.operationEnd(OperationStatus.SUCCESS); + + locker = LockerFactory.getWritableLocker + (db.getEnvironment(), + txn, + db.getDbImpl().isInternalDb(), + db.isTransactional(), + db.getDbImpl().isReplicated(), + autoCommitConfig); + cursor = new Cursor(db, locker, null); + } + + /* Get the persistent fields from the config. */ + rangeMin = useConfig.getRangeMin(); + rangeMax = useConfig.getRangeMax(); + increment = !useConfig.getDecrement(); + wrapAllowed = useConfig.getWrap(); + storedValue = useConfig.getInitialValue(); + + /* + * To avoid dependence on SerializableIsolation, try + * putNoOverwrite first. If it fails, then try to get an + * existing record. + */ + status = cursor.putNoOverwrite(key, makeData()); + + if (!readData(cursor, null)) { + /* A retry loop should be performed here. */ + throw new IllegalStateException + ("Sequence record removed during openSequence."); + } + status = OperationStatus.SUCCESS; + } else { + throw new SequenceNotFoundException + ("AllowCreate=false and the sequence record " + + "does not exist."); + } + } + } finally { + if (cursor != null) { + cursor.close(); + } + if (locker != null) { + locker.operationEnd(status); + } + } + + /* + * cacheLast is initialized such that the cache will be considered + * empty the first time get() is called. + */ + cacheSize = useConfig.getCacheSize(); + cacheValue = storedValue; + cacheLast = increment ? (storedValue - 1) : (storedValue + 1); + } + + /** + * Closes a sequence. Any unused cached values are lost. + * + *

        The sequence handle may not be used again after this method has + * been called, regardless of the method's success or failure.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void close() + throws DatabaseException { + + /* Defined only for DB compatibility and possible future use. */ + } + + /** + * Returns the next available element in the sequence and changes the + * sequence value by delta. The value of delta + * must be greater than zero. If there are enough cached values in the + * sequence handle then they will be returned. Otherwise the next value + * will be fetched from the database and incremented (decremented) by + * enough to cover the delta and the next batch of cached + * values. + * + * This method is synchronized to protect updating of the cached value, + * since multiple threads may share a single handle. Multiple handles for + * the same database/key may be used to increase concurrency.

        + * + *

        The txn handle must be null if the sequence handle was + * opened with a non-zero cache size.

        + * + *

        For maximum concurrency, a non-zero cache size should be specified + * prior to opening the sequence handle, the txn handle should + * be null, and {@link + * com.sleepycat.je.SequenceConfig#setAutoCommitNoSync + * SequenceConfig.setAutoCommitNoSync} should be called to disable log + * flushes.

        + * + * @param txn For a transactional database, an explicit transaction may be + * specified, or null may be specified to use auto-commit. For a + * non-transactional database, null must be specified. + * + * @param delta the amount by which to increment or decrement the sequence + * + * @return the next available element in the sequence + * + * @throws SequenceOverflowException if the end of the sequence is reached + * and wrapping is not configured. + * + * @throws SequenceIntegrityException if the sequence record has been + * deleted. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalArgumentException if the delta is less than or equal to + * zero, or larger than the size of the sequence's range. + */ + public synchronized long get(Transaction txn, int delta) + throws DatabaseException { + + /* Check parameters, being careful of overflow. */ + if (delta <= 0) { + throw new IllegalArgumentException + ("Sequence delta must be greater than zero"); + } + if (rangeMin > rangeMax - delta) { + throw new IllegalArgumentException + ("Sequence delta is larger than the range"); + } + + /* Status variables for tracing. */ + boolean cached = true; + boolean wrapped = false; + + /* + * Determine whether we have exceeded the cache. The cache size is + * always <= Integer.MAX_VALUE, so we don't have to worry about + * overflow here as long as we subtract the two long values first. + */ + if ((increment && delta > ((cacheLast - cacheValue) + 1)) || + (!increment && delta > ((cacheValue - cacheLast) + 1))) { + + cached = false; + + /* + * We need to allocate delta or cacheSize values, whichever is + * larger, by incrementing or decrementing the stored value by + * adjust. + */ + int adjust = (delta > cacheSize) ? delta : cacheSize; + + /* Perform an auto-commit transaction to update the sequence. */ + Locker locker = null; + Cursor cursor = null; + OperationStatus status = OperationStatus.NOTFOUND; + try { + locker = LockerFactory.getWritableLocker + (db.getEnvironment(), + txn, + db.getDbImpl().isInternalDb(), + db.isTransactional(), + db.getDbImpl().isReplicated(), + // autoTxnIsReplicated + autoCommitConfig); + + cursor = new Cursor(db, locker, null); + + /* Get the existing record. */ + readDataRequired(cursor, LockMode.RMW); + + /* If we would have wrapped when not allowed, overflow. */ + if (overflow) { + throw new SequenceOverflowException + ("Sequence overflow " + storedValue); + } + + /* + * Handle wrapping. The range size can be larger than a long + * can hold, so to avoid arithmetic overflow we use BigInteger + * arithmetic. Since we are going to write, the BigInteger + * overhead is acceptable. + */ + BigInteger availBig; + if (increment) { + /* Available amount: rangeMax - storedValue */ + availBig = BigInteger.valueOf(rangeMax). + subtract(BigInteger.valueOf(storedValue)); + } else { + /* Available amount: storedValue - rangeMin */ + availBig = BigInteger.valueOf(storedValue). + subtract(BigInteger.valueOf(rangeMin)); + } + + if (availBig.compareTo(BigInteger.valueOf(adjust)) < 0) { + /* If availBig < adjust then availBig fits in an int. */ + int availInt = (int) availBig.longValue(); + if (availInt < delta) { + if (wrapAllowed) { + /* Wrap to the opposite range end point. */ + storedValue = increment ? rangeMin : rangeMax; + wrapped = true; + } else { + /* Signal an overflow next time. */ + overflow = true; + adjust = 0; + } + } else { + + /* + * If the delta fits in the cache available, don't wrap + * just to allocate the full cacheSize; instead, + * allocate as much as is available. + */ + adjust = availInt; + } + } + + /* Negate the adjustment for decrementing. */ + if (!increment) { + adjust = -adjust; + } + + /* Set the stored value one past the cached amount. */ + storedValue += adjust; + + /* Write the new stored value. */ + cursor.put(key, makeData()); + status = OperationStatus.SUCCESS; + } finally { + if (cursor != null) { + cursor.close(); + } + if (locker != null) { + locker.operationEnd(status); + } + } + + /* The cache now contains the range: [cacheValue, storedValue) */ + cacheValue = storedValue - adjust; + cacheLast = storedValue + (increment ? (-1) : 1); + } + + /* Return the current value and increment/decrement it by delta. */ + long retVal = cacheValue; + if (increment) { + cacheValue += delta; + } else { + cacheValue -= delta; + } + + /* Increment stats. */ + nGets += 1; + if (cached) { + nCachedGets += 1; + } + + /* Trace this method at the FINEST level. */ + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, + db.getEnvironment().getNonNullEnvImpl(), + "Sequence.get" + " value=" + retVal + + " cached=" + cached + " wrapped=" + wrapped); + } + + return retVal; + } + + /** + * Returns the Database handle associated with this sequence. + * + * @return The Database handle associated with this sequence. + */ + public Database getDatabase() { + return db; + } + + /** + * Returns the DatabaseEntry used to open this sequence. + * + * @return The DatabaseEntry used to open this sequence. + */ + public DatabaseEntry getKey() { + return copyEntry(key); + } + + /** + * Returns statistical information about the sequence. + * + *

        In the presence of multiple threads or processes accessing an active + * sequence, the information returned by this method may be + * out-of-date.

        + * + *

        The getStats method cannot be transaction-protected. For this reason, + * it should be called in a thread of control that has no open cursors or + * active transactions.

        + * + * @param config The statistics returned; if null, default statistics are + * returned. + * + * @return Sequence statistics. + * + * @throws SequenceIntegrityException if the sequence record has been + * deleted. + */ + public SequenceStats getStats(StatsConfig config) + throws DatabaseException { + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + if (!config.getFast()) { + + /* + * storedValue may have been updated by another handle since it + * was last read by this handle. Fetch the last written value. + * READ_UNCOMMITTED must be used to avoid lock conflicts. + */ + Cursor cursor = db.openCursor(null, null); + try { + readDataRequired(cursor, LockMode.READ_UNCOMMITTED); + } finally { + cursor.close(); + } + } + + StatGroup stats = new StatGroup(SequenceStatDefinition.GROUP_NAME, + SequenceStatDefinition.GROUP_DESC); + new IntStat(stats, SEQUENCE_GETS, nGets); + new IntStat(stats, SEQUENCE_CACHED_GETS, nCachedGets); + new IntStat(stats, SEQUENCE_CACHE_SIZE, cacheSize); + new LongStat(stats, SEQUENCE_STORED_VALUE, storedValue); + new LongStat(stats, SEQUENCE_CACHE_VALUE, cacheValue); + new LongStat(stats, SEQUENCE_CACHE_LAST, cacheLast); + new LongStat(stats, SEQUENCE_RANGE_MIN, rangeMin); + new LongStat(stats, SEQUENCE_RANGE_MAX, rangeMax); + + SequenceStats seqStats = new SequenceStats(stats); + + if (config.getClear()) { + nGets = 0; + nCachedGets = 0; + } + + return seqStats; + } + + /** + * Reads persistent fields from the sequence record. Throws an exception + * if the key is not present in the database. + */ + private void readDataRequired(Cursor cursor, LockMode lockMode) + throws DatabaseException { + + if (!readData(cursor, lockMode)) { + throw new SequenceIntegrityException + ("The sequence record has been deleted while it is open."); + } + } + + /** + * Reads persistent fields from the sequence record. Returns false if the + * key is not present in the database. + */ + private boolean readData(Cursor cursor, LockMode lockMode) + throws DatabaseException { + + /* Fetch the sequence record. */ + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getSearchKey(key, data, lockMode); + if (status != OperationStatus.SUCCESS) { + return false; + } + ByteBuffer buf = ByteBuffer.wrap(data.getData()); + + /* Get the persistent fields from the record data. */ + byte version = buf.get(); + byte flags = buf.get(); + boolean unpacked = (version < 1); + rangeMin = LogUtils.readLong(buf, unpacked); + rangeMax = LogUtils.readLong(buf, unpacked); + storedValue = LogUtils.readLong(buf, unpacked); + + increment = (flags & FLAG_INCR) != 0; + wrapAllowed = (flags & FLAG_WRAP) != 0; + overflow = (flags & FLAG_OVER) != 0; + + return true; + } + + /** + * Makes a storable database entry from the persistent fields. + */ + private DatabaseEntry makeData() { + + byte[] data = new byte[MAX_DATA_SIZE]; + ByteBuffer buf = ByteBuffer.wrap(data); + + byte flags = 0; + if (increment) { + flags |= FLAG_INCR; + } + if (wrapAllowed) { + flags |= FLAG_WRAP; + } + if (overflow) { + flags |= FLAG_OVER; + } + + buf.put(CURRENT_VERSION); + buf.put(flags); + LogUtils.writePackedLong(buf, rangeMin); + LogUtils.writePackedLong(buf, rangeMax); + LogUtils.writePackedLong(buf, storedValue); + + return new DatabaseEntry(data, 0, buf.position()); + } + + /** + * Returns a deep copy of the given database entry. + */ + private DatabaseEntry copyEntry(DatabaseEntry entry) { + + int len = entry.getSize(); + byte[] data; + if (len == 0) { + data = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + data = new byte[len]; + System.arraycopy + (entry.getData(), entry.getOffset(), data, 0, data.length); + } + + return new DatabaseEntry(data); + } +} diff --git a/src/com/sleepycat/je/SequenceConfig.java b/src/com/sleepycat/je/SequenceConfig.java new file mode 100644 index 0000000..6161bbf --- /dev/null +++ b/src/com/sleepycat/je/SequenceConfig.java @@ -0,0 +1,459 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Specifies the attributes of a sequence. + */ +public class SequenceConfig implements Cloneable { + + /** + * Default configuration used if null is passed to methods that create a + * cursor. + */ + public static final SequenceConfig DEFAULT = new SequenceConfig(); + + /* Parameters */ + private int cacheSize = 0; + private long rangeMin = Long.MIN_VALUE; + private long rangeMax = Long.MAX_VALUE; + private long initialValue = 0L; + + /* Flags */ + private boolean allowCreate; + private boolean decrement; + private boolean exclusiveCreate; + private boolean autoCommitNoSync; + private boolean wrap; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public SequenceConfig() { + } + + /** + * Configures the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method to create the sequence if it does not + * already exist. + * + *

        The default value is false.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param allowCreate If true, configure the {@link + * com.sleepycat.je.Database#openSequence Database.openSequence} method to + * create the sequence if it does not already exist. + * + * @return this + */ + public SequenceConfig setAllowCreate(boolean allowCreate) { + setAllowCreateVoid(allowCreate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAllowCreateVoid(boolean allowCreate) { + this.allowCreate = allowCreate; + } + + /** + * Returns true if the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method is configured to create the sequence if it + * does not already exist. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method is configured to create the sequence if it + * does not already exist. + */ + public boolean getAllowCreate() { + return allowCreate; + } + + /** + * Set the Configure the number of elements cached by a sequence handle. + * + *

        The default value is zero.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param cacheSize The number of elements cached by a sequence handle. + * May not be larger than the size of the range defined by {@link + * #setRange}. + * + * @return this + */ + public SequenceConfig setCacheSize(int cacheSize) { + setCacheSizeVoid(cacheSize); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setCacheSizeVoid(int cacheSize) { + this.cacheSize = cacheSize; + } + + /** + * Returns the number of elements cached by a sequence handle.. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The number of elements cached by a sequence handle.. + */ + public int getCacheSize() { + return cacheSize; + } + + /** + * Specifies that the sequence should be decremented. + * + *

        The default value is false.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param decrement If true, specify that the sequence should be + * decremented. + * + * @return this + */ + public SequenceConfig setDecrement(boolean decrement) { + setDecrementVoid(decrement); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDecrementVoid(boolean decrement) { + this.decrement = decrement; + } + + /** + * Returns true if the sequence is configured to decrement. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the sequence is configured to decrement. + */ + public boolean getDecrement() { + return decrement; + } + + /** + * Configures the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method to fail if the database already exists. + * + *

        The default value is false.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param exclusiveCreate If true, configure the {@link + * com.sleepycat.je.Database#openSequence Database.openSequence} method to + * fail if the database already exists. + * + * @return this + */ + public SequenceConfig setExclusiveCreate(boolean exclusiveCreate) { + setExclusiveCreateVoid(exclusiveCreate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setExclusiveCreateVoid(boolean exclusiveCreate) { + this.exclusiveCreate = exclusiveCreate; + } + + /** + * Returns true if the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method is configured to fail if the database + * already exists. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Database#openSequence + * Database.openSequence} method is configured to fail if the database + * already exists. + */ + public boolean getExclusiveCreate() { + return exclusiveCreate; + } + + /** + * Sets the initial value for a sequence. + * + *

        The default initial value is zero.

        + * + *

        This call is only effective when the sequence is being created.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param initialValue The initial value for a sequence. Must be within + * the range minimum and maximum values, inclusive. + * + * @return this + */ + public SequenceConfig setInitialValue(long initialValue) { + setInitialValueVoid(initialValue); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setInitialValueVoid(long initialValue) { + this.initialValue = initialValue; + } + + /** + * Returns the initial value for a sequence.. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The initial value for a sequence.. + */ + public long getInitialValue() { + return initialValue; + } + + /** + * Configures auto-commit operations on the sequence to not flush the + * transaction log. + * + *

        The default value is false.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param autoCommitNoSync If true, configure auto-commit operations on + * the sequence to not flush the transaction log. + * + * @return this + */ + public SequenceConfig setAutoCommitNoSync(boolean autoCommitNoSync) { + setAutoCommitNoSyncVoid(autoCommitNoSync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAutoCommitNoSyncVoid(boolean autoCommitNoSync) { + this.autoCommitNoSync = autoCommitNoSync; + } + + /** + * Returns true if the auto-commit operations on the sequence are configure + * to not flush the transaction log.. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the auto-commit operations on the sequence are configure + * to not flush the transaction log.. + */ + public boolean getAutoCommitNoSync() { + return autoCommitNoSync; + } + + /** + * Configures a sequence range. This call is only effective when the + * sequence is being created. + * + *

        The default minimum is {@code Long.MIN_VALUE} and the default maximum + * is {@code Long.MAX_VALUE}.

        + * + * @param min The minimum value for the sequence. Must be less than max. + * + * @param max The maximum value for the sequence. Must be greater than + * min. + * + * @return this + */ + public SequenceConfig setRange(long min, long max) { + setRangeMin(min); + setRangeMax(max); + return this; + } + + /** + * @hidden + * Configures a sequence range minimum value. This call is only effective + * when the sequence is being created. + * + *

        The default minimum is {@code Long.MIN_VALUE}

        + * + * @param min The minimum value for the sequence. Must be less than max. + * + * @return this + */ + public SequenceConfig setRangeMin(long min) { + setRangeMinVoid(min); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setRangeMinVoid(long min) { + this.rangeMin = min; + } + + /** + * @hidden + * Configures a sequence range maximum value. This call is only effective + * when the sequence is being created. + * + *

        the default maximum is {@code Long.MAX_VALUE}.

        + * + * @param max The maximum value for the sequence. Must be greater than + * min. + * + * @return this + */ + public SequenceConfig setRangeMax(long max) { + setRangeMaxVoid(max); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setRangeMaxVoid(long max) { + this.rangeMax = max; + } + + /** + * Returns the minimum value for the sequence. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The minimum value for the sequence. + */ + public long getRangeMin() { + return rangeMin; + } + + /** + * Returns the maximum value for the sequence. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return The maximum value for the sequence. + */ + public long getRangeMax() { + return rangeMax; + } + + /** + * Specifies that the sequence should wrap around when it is incremented + * (decremented) past the specified maximum (minimum) value. + * + *

        The default value is false.

        + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @param wrap If true, specify that the sequence should wrap around when + * it is incremented (decremented) past the specified maximum (minimum) + * value. + * + * @return this + */ + public SequenceConfig setWrap(boolean wrap) { + setWrapVoid(wrap); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setWrapVoid(boolean wrap) { + this.wrap = wrap; + } + + /** + * Returns true if the sequence will wrap around when it is incremented + * (decremented) past the specified maximum (minimum) value. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the sequence will wrap around when it is incremented + * (decremented) past the specified maximum (minimum) value. + */ + public boolean getWrap() { + return wrap; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public SequenceConfig clone() { + try { + return (SequenceConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "allowCreate=" + allowCreate + + "\ncacheSize=" + cacheSize + + "\ndecrement=" + decrement + + "\nexclusiveCreate=" + exclusiveCreate + + "\ninitialValue=" + initialValue + + "\nautoCommitNoSync=" + autoCommitNoSync + + "\nrangeMin=" + rangeMin + + "\nrangeMax=" + rangeMax + + "\nwrap=" + wrap + + "\n"; + } +} diff --git a/src/com/sleepycat/je/SequenceConfigBeanInfo.java b/src/com/sleepycat/je/SequenceConfigBeanInfo.java new file mode 100644 index 0000000..2332181 --- /dev/null +++ b/src/com/sleepycat/je/SequenceConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class SequenceConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(SequenceConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(SequenceConfig.class); + } +} diff --git a/src/com/sleepycat/je/SequenceExistsException.java b/src/com/sleepycat/je/SequenceExistsException.java new file mode 100644 index 0000000..6e689b1 --- /dev/null +++ b/src/com/sleepycat/je/SequenceExistsException.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Database#openSequence Database.openSequence} if the + * sequence record already exists and the {@code SequenceConfig + * ExclusiveCreate} parameter is true. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class SequenceExistsException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public SequenceExistsException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private SequenceExistsException(String message, + SequenceExistsException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new SequenceExistsException(msg, this); + } +} diff --git a/src/com/sleepycat/je/SequenceIntegrityException.java b/src/com/sleepycat/je/SequenceIntegrityException.java new file mode 100644 index 0000000..6bea284 --- /dev/null +++ b/src/com/sleepycat/je/SequenceIntegrityException.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Sequence#get Sequence.get} if the sequence record has been + * deleted. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class SequenceIntegrityException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public SequenceIntegrityException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private SequenceIntegrityException(String message, + SequenceIntegrityException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new SequenceIntegrityException(msg, this); + } +} diff --git a/src/com/sleepycat/je/SequenceNotFoundException.java b/src/com/sleepycat/je/SequenceNotFoundException.java new file mode 100644 index 0000000..cfac6ef --- /dev/null +++ b/src/com/sleepycat/je/SequenceNotFoundException.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Database#openSequence Database.openSequence} if the + * sequence record does not exist and the {@code SequenceConfig AllowCreate} + * parameter is false. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class SequenceNotFoundException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public SequenceNotFoundException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private SequenceNotFoundException(String message, + SequenceNotFoundException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new SequenceNotFoundException(msg, this); + } +} diff --git a/src/com/sleepycat/je/SequenceOverflowException.java b/src/com/sleepycat/je/SequenceOverflowException.java new file mode 100644 index 0000000..c63739f --- /dev/null +++ b/src/com/sleepycat/je/SequenceOverflowException.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +/** + * Thrown by {@link Sequence#get Sequence.get} if the end of the sequence is + * reached and wrapping is not configured. + * + *

        The {@link Transaction} handle is not invalidated as a result of + * this exception.

        + * + * @since 4.0 + */ +public class SequenceOverflowException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public SequenceOverflowException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private SequenceOverflowException(String message, + SequenceOverflowException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new SequenceOverflowException(msg, this); + } +} diff --git a/src/com/sleepycat/je/SequenceStats.java b/src/com/sleepycat/je/SequenceStats.java new file mode 100644 index 0000000..244a2e6 --- /dev/null +++ b/src/com/sleepycat/je/SequenceStats.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.Serializable; + +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHED_GETS; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_LAST; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_SIZE; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_CACHE_VALUE; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_GETS; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_RANGE_MAX; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_RANGE_MIN; +import static com.sleepycat.je.dbi.SequenceStatDefinition.SEQUENCE_STORED_VALUE; + +import com.sleepycat.je.utilint.StatGroup; + +/** + * A SequenceStats object is used to return sequence statistics. + */ +public class SequenceStats implements Serializable { + private static final long serialVersionUID = 1L; + + private StatGroup stats; + + /** + * @hidden + * Internal use only. + */ + public SequenceStats(StatGroup stats) { + this.stats = stats; + } + + /** + * Returns the number of times that Sequence.get was called successfully. + * + * @return number of times that Sequence.get was called successfully. + */ + public int getNGets() { + return stats.getInt(SEQUENCE_GETS); + } + + /** + * Returns the number of times that Sequence.get was called and a cached + * value was returned. + * + * @return number of times that Sequence.get was called and a cached + * value was returned. + */ + public int getNCachedGets() { + return stats.getInt(SEQUENCE_CACHED_GETS); + } + + /** + * Returns the current value of the sequence in the database. + * + * @return current value of the sequence in the database. + */ + public long getCurrent() { + return stats.getLong(SEQUENCE_STORED_VALUE); + } + + /** + * Returns the current cached value of the sequence. + * + * @return current cached value of the sequence. + */ + public long getValue() { + return stats.getLong(SEQUENCE_CACHE_VALUE); + } + + /** + * Returns the last cached value of the sequence. + * + * @return last cached value of the sequence. + */ + public long getLastValue() { + return stats.getLong(SEQUENCE_CACHE_LAST); + } + + /** + * Returns the minimum permitted value of the sequence. + * + * @return minimum permitted value of the sequence. + */ + public long getMin() { + return stats.getLong(SEQUENCE_RANGE_MIN); + } + + /** + * Returns the maximum permitted value of the sequence. + * + * @return maximum permitted value of the sequence. + */ + public long getMax() { + return stats.getLong(SEQUENCE_RANGE_MAX); + } + + /** + * Returns the number of values that will be cached in this handle. + * + * @return number of values that will be cached in this handle. + */ + public int getCacheSize() { + return stats.getInt(SEQUENCE_CACHE_SIZE); + } + + @Override + public String toString() { + return stats.toString(); + } + + public String toStringVerbose() { + return stats.toStringVerbose(); + } +} diff --git a/src/com/sleepycat/je/StatsConfig.java b/src/com/sleepycat/je/StatsConfig.java new file mode 100644 index 0000000..f75715c --- /dev/null +++ b/src/com/sleepycat/je/StatsConfig.java @@ -0,0 +1,197 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.PrintStream; + +/** + * Specifies the attributes of a statistics retrieval operation. + */ +public class StatsConfig implements Cloneable { + + /** + * A convenience instance embodying the default configuration. + */ + public static final StatsConfig DEFAULT = new StatsConfig(); + + /** + * A convenience instance for which setClear(true) has been called, and + * all other properties have default values. + */ + public static final StatsConfig CLEAR = new StatsConfig().setClear(true); + + private boolean fast = false; + private boolean clear = false; + private PrintStream showProgressStream = null; + private int showProgressInterval = 0; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public StatsConfig() { + } + + /** + * Configures the statistics operation to return only the values which do + * not incur some performance penalty. + * + *

        The default value is false.

        + * + *

        For example, skip stats that require a traversal of the database or + * in-memory tree, or which lock down the lock table for a period of + * time.

        + * + * @param fast If set to true, configure the statistics operation to return + * only the values which do not incur some performance penalty. + * + * @return this + */ + public StatsConfig setFast(boolean fast) { + setFastVoid(fast); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setFastVoid(boolean fast) { + this.fast = fast; + } + + /** + * Returns true if the statistics operation is configured to return only + * the values which do not require expensive actions. + * + * @return true if the statistics operation is configured to return only + * the values which do not require expensive actions. + */ + public boolean getFast() { + return fast; + } + + /** + * Configures the statistics operation to reset statistics after they are + * returned. The default value is false. + * + * @param clear If set to true, configure the statistics operation to + * reset statistics after they are returned. + * + * @return this + */ + public StatsConfig setClear(boolean clear) { + setClearVoid(clear); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setClearVoid(boolean clear) { + this.clear = clear; + } + + /** + * Returns true if the statistics operation is configured to reset + * statistics after they are returned. + * + * @return true if the statistics operation is configured to reset + * statistics after they are returned. + */ + public boolean getClear() { + return clear; + } + + /** + * Configures the statistics operation to display progress to the + * PrintStream argument. The accumulated statistics will be displayed + * every N records, where N is the value of showProgressInterval. + * + * @return this + */ + public StatsConfig setShowProgressStream(PrintStream showProgressStream) { + setShowProgressStreamVoid(showProgressStream); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setShowProgressStreamVoid(PrintStream showProgressStream) { + this.showProgressStream = showProgressStream; + } + + /** + * Returns the PrintStream on which the progress messages will be displayed + * during long running statistics gathering operations. + */ + public PrintStream getShowProgressStream() { + return showProgressStream; + } + + /** + * When the statistics operation is configured to display progress the + * showProgressInterval is the number of LNs between each progress report. + * + * @return this + */ + public StatsConfig setShowProgressInterval(int showProgressInterval) { + setShowProgressIntervalVoid(showProgressInterval); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setShowProgressIntervalVoid(int showProgressInterval) { + this.showProgressInterval = showProgressInterval; + } + + /** + * Returns the showProgressInterval value, if set. + */ + public int getShowProgressInterval() { + return showProgressInterval; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public StatsConfig clone() { + try { + return (StatsConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + return "fast=" + fast + + "\nclear=" + clear + + "\nshowProgressStream=" + showProgressStream + + "\nshowProgressInterval=" + showProgressInterval + + "\n"; + } +} diff --git a/src/com/sleepycat/je/StatsConfigBeanInfo.java b/src/com/sleepycat/je/StatsConfigBeanInfo.java new file mode 100644 index 0000000..079c394 --- /dev/null +++ b/src/com/sleepycat/je/StatsConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class StatsConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(StatsConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(StatsConfig.class); + } +} diff --git a/src/com/sleepycat/je/ThreadInterruptedException.java b/src/com/sleepycat/je/ThreadInterruptedException.java new file mode 100644 index 0000000..5f9be11 --- /dev/null +++ b/src/com/sleepycat/je/ThreadInterruptedException.java @@ -0,0 +1,107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown when {@code java.lang.InterruptedException} (a thread interrupt) or + * {@code java.nio.channels.ClosedChannelException} (which also results from a + * thread interrupt) occurs in any JE method. This occurs when the application, + * or perhaps a library or container that the application is using, calls + * {@link Thread#interrupt}. + * + *

        Calling {@code Thread.interrupt} is not recommended for an active JE + * thread if the goal is to stop the thread or do thread coordination. If you + * interrupt a thread that is executing a JE operation, the state of the + * environment will be undefined. That's because JE might have been in the + * middle of I/O activity when the operation was aborted midstream, and it + * becomes very difficult to detect and handle all possible outcomes.

        + * + *

        When JE detects the interrupt, it will mark the environment invalid and + * will throw a {@code ThreadInterruptedException}. This tells you that you + * must close the environment and re-open it before using it again. This is + * necessary, because if JE didn't throw {@code ThreadInterruptedException}, it + * is very likely that you would get some other exception that is less + * meaningful, or simply see corrupted data.

        + * + *

        Instead, applications should use other mechanisms like {@code + * Object.notify} and {@code wait} to coordinate threads. For example, use a + * {@code keepRunning} variable of some kind in each thread. Check this + * variable in your threads, and return from the thread when it is false. Set + * it to false when you want to stop the thread. If this thread is waiting to + * be woken up to do another unit of work, use {@code Object.notify} to wake it + * up. This is the recommended technique.

        + * + *

        However, if the use of {@code Thread.interrupt} is unavoidable, be sure + * to use it only when shutting down the environment. In this situation, + * the {@code ThreadInterruptedException} should be expected. Note that + * by shutting down the environment abnormally, recovery time will be longer + * when the environment is subsequently opened, because a final checkpoint was + * not performed.

        + * + *

        Existing {@link Environment} handles are invalidated as a result of this + * exception.

        + * + * @since 4.0 + */ +public class ThreadInterruptedException extends EnvironmentFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public ThreadInterruptedException(EnvironmentImpl env, Throwable t) { + super(env, EnvironmentFailureReason.THREAD_INTERRUPTED, t); + } + + /** + * For internal use only. + * @hidden + */ + public ThreadInterruptedException(EnvironmentImpl env, String message) { + super(env, EnvironmentFailureReason.THREAD_INTERRUPTED, message); + } + + /** + * For internal use only. + * @hidden + */ + public ThreadInterruptedException(EnvironmentImpl env, + String message, + Throwable t) { + super(env, EnvironmentFailureReason.THREAD_INTERRUPTED, message, t); + } + + /** + * For internal use only. + * @hidden + */ + private ThreadInterruptedException(String message, + ThreadInterruptedException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new ThreadInterruptedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/Transaction.java b/src/com/sleepycat/je/Transaction.java new file mode 100644 index 0000000..516b690 --- /dev/null +++ b/src/com/sleepycat/je/Transaction.java @@ -0,0 +1,949 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.PropUtil; + +/** + * The Transaction object is the handle for a transaction. Methods off the + * transaction handle are used to configure, abort and commit the transaction. + * Transaction handles are provided to other Berkeley DB methods in order to + * transactionally protect those operations. + * + *

        A single Transaction may be used to protect operations for any number of + * Databases in a given environment. However, a single Transaction may not be + * used for operations in more than one distinct environment.

        + * + *

        Transaction handles are free-threaded; transactions handles may be used + * concurrently by multiple threads. Once the {@link Transaction#abort + * Transaction.abort} or {@link Transaction#commit Transaction.commit} method + * is called, the handle may not be accessed again, regardless of the success + * or failure of the method, with one exception: the {@code abort} method may + * be called any number of times to simplify error handling.

        + * + *

        To obtain a transaction with default attributes:

        + * + *
        + *     Transaction txn = myEnvironment.beginTransaction(null, null);
        + * 
        + * + *

        To customize the attributes of a transaction:

        + * + *
        + *     TransactionConfig config = new TransactionConfig();
        + *     config.setReadUncommitted(true);
        + *     Transaction txn = myEnvironment.beginTransaction(null, config);
        + * 
        + */ +public class Transaction { + + /** + * The current state of the transaction. + * + * @since 5.0.48 + */ + public enum State { + + /** + * The transaction has not been committed or aborted, and can be used + * for performing operations. This state is also indicated if {@link + * #isValid} returns true. For all other states, {@link #isValid} will + * return false. + */ + OPEN, + + /** + * An exception was thrown by the {@code commit} method due to an error + * that occurred while attempting to make the transaction durable. The + * transaction may or may not be locally durable, according to the + * {@link Durability#getLocalSync local SyncPolicy} requested. + *

        + * This is an unusual situation and is normally due to a system + * failure, storage device failure, disk full condition, thread + * interrupt, or a bug of some kind. When a transaction is in this + * state, the Environment will have been {@link Environment#isValid() + * invalidated} by the error. + *

        + * In a replicated environment, a transaction in this state is not + * transferred to replicas. If it turns out that the transaction is + * indeed durable, it will be transferred to replicas via normal + * replication mechanisms when the Environment is re-opened. + *

        + * When the {@code commit} method throws an exception and the + * transaction is in the {@code POSSIBLY_COMMITTED} state, some + * applications may wish to perform a data query to determine whether + * the transaction is durable or not. Note that in the event of a + * system level failure, the reads themselves may be unreliable, e.g. + * the data may be in the file system cache but not on disk. Other + * applications may wish to repeat the transaction unconditionally, + * after resolving the error condition, particularly when the set of + * operations in the transaction is designed to be idempotent. + */ + POSSIBLY_COMMITTED, + + /** + * The transaction has been committed and is locally durable according + * to the {@link Durability#getLocalSync local SyncPolicy} requested. + *

        + * Note that a transaction may be in this state even when an exception + * is thrown by the {@code commit} method. For example, in a + * replicated environment, an {@link + * com.sleepycat.je.rep.InsufficientAcksException} may be thrown after + * the transaction is committed locally. + */ + COMMITTED, + + /** + * The transaction has been invalidated by an exception and cannot be + * committed. See {@link OperationFailureException} for a description + * of how a transaction can become invalid. The application is + * responsible for aborting the transaction. + */ + MUST_ABORT, + + /** + * The transaction has been aborted. + */ + ABORTED, + } + + private Txn txn; + private final Environment env; + private final long id; + private String name; + + /* + * It's set upon a successful updating replicated commit and identifies the + * VLSN associated with the commit entry. + */ + private CommitToken commitToken = null; + + /* + * Is null until setTxnNull is called, and then it holds the state at the + * time the txn was closed. + */ + private State finalState = null; + + /* + * Commit and abort methods are synchronized to prevent them from running + * concurrently with operations using the transaction. See + * Cursor.getTxnSynchronizer. + */ + + /** + * For internal use. + * @hidden + * Creates a transaction. + */ + protected Transaction(Environment env, Txn txn) { + this.env = env; + this.txn = txn; + txn.setTransaction(this); + + /* + * Copy the id to this wrapper object so the id will be available + * after the transaction is closed and the txn field is nulled. + */ + this.id = txn.getId(); + } + + /** + * Cause an abnormal termination of the transaction. + * + *

        The log is played backward, and any necessary undo operations are + * done. Before Transaction.abort returns, any locks held by the + * transaction will have been released.

        + * + *

        In the case of nested transactions, aborting a parent transaction + * causes all children (unresolved or not) of the parent transaction to be + * aborted.

        + * + *

        All cursors opened within the transaction must be closed before the + * transaction is aborted.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method itself may be called any number of + * times to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the environment has been closed, or + * cursors associated with the transaction are still open. + */ + public synchronized void abort() + throws DatabaseException { + + try { + + /* + * If the transaction is already closed, do nothing. Do not call + * checkOpen in order to support any number of calls to abort(). + */ + if (txn == null) { + return; + } + + /* + * Check env only after checking for closed txn, to mimic close() + * behavior for Cursors, etc, and avoid unnecessary exception + * handling. [#21264] + */ + checkEnv(); + + env.removeReferringHandle(this); + txn.abort(); + + /* Remove reference to internal txn, so we can reclaim memory. */ + setTxnNull(); + } catch (Error E) { + DbInternal.getNonNullEnvImpl(env).invalidate(E); + throw E; + } + } + + /** + * Return the transaction's unique ID. + * + * @return The transaction's unique ID. + */ + public long getId() { + return id; + } + + /** + * This method is intended for use with a replicated environment. + *

        + * It returns the commitToken associated with a successful replicated + * commit. A null value is returned if the txn was not associated with a + * replicated environment, or the txn did not result in any changes to the + * environment. This method should only be called after the transaction + * has finished. + *

        + * This method is typically used in conjunction with the + * CommitPointConsistencyPolicy. + * + * @return the token used to identify the replicated commit. Return null if + * the transaction has aborted, or has committed without making any + * updates. + * + * @throws IllegalStateException if the method is called before the + * transaction has committed or aborted. + * + * @see com.sleepycat.je.rep.CommitPointConsistencyPolicy + */ + public CommitToken getCommitToken() + throws IllegalStateException { + + if (txn == null) { + + /* + * The commit token is only legitimate after the transaction is + * closed. A null txn field means the transaction is closed. + */ + return commitToken; + } + + throw new IllegalStateException + ("This transaction is still in progress and a commit token " + + "is not available"); + } + + /** + * End the transaction. If the environment is configured for synchronous + * commit, the transaction will be committed synchronously to stable + * storage before the call returns. This means the transaction will + * exhibit all of the ACID (atomicity, consistency, isolation, and + * durability) properties. + * + *

        If the environment is not configured for synchronous commit, the + * commit will not necessarily have been committed to stable storage before + * the call returns. This means the transaction will exhibit the ACI + * (atomicity, consistency, and isolation) properties, but not D + * (durability); that is, database integrity will be maintained, but it is + * possible this transaction may be undone during recovery.

        + * + *

        All cursors opened within the transaction must be closed before the + * transaction is committed.

        + * + *

        If the method encounters an error, the transaction will have been aborted when the call + * returns.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method may be called any number of times + * to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact a quorum of replicas as + * determined by the {@link ReplicaAckPolicy}. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * although the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed, or cursors associated with the transaction are still open. + */ + public synchronized void commit() + throws DatabaseException { + + try { + checkEnv(); + checkOpen(); + env.removeReferringHandle(this); + txn.commit(); + commitToken = txn.getCommitToken(); + /* Remove reference to internal txn, so we can reclaim memory. */ + setTxnNull(); + } catch (Error E) { + DbInternal.getNonNullEnvImpl(env).invalidate(E); + throw E; + } + } + + /** + * End the transaction using the specified durability requirements. This + * requirement overrides any default durability requirements associated + * with the environment. If the durability requirements cannot be satisfied, + * an exception is thrown to describe the problem. Please see + * {@link Durability} for specific exceptions that could result when the + * durability requirements cannot be satisfied. + * + *

        All cursors opened within the transaction must be closed before the + * transaction is committed.

        + * + *

        If the method encounters an error, the transaction will have been aborted when the call + * returns.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method may be called any number of times + * to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @param durability the durability requirements for this transaction + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact enough replicas to + * initiate the commit. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * althought the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed, or cursors associated with the transaction are still open. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public synchronized void commit(Durability durability) + throws DatabaseException { + + doCommit(durability, false /* explicitSync */); + } + + /** + * End the transaction, writing to stable storage and committing + * synchronously. This means the transaction will exhibit all of the ACID + * (atomicity, consistency, isolation, and durability) properties. + * + *

        This behavior is the default for database environments unless + * otherwise configured using the {@link + * com.sleepycat.je.EnvironmentConfig#setTxnNoSync + * EnvironmentConfig.setTxnNoSync} method. This behavior may also be set + * for a single transaction using the {@link + * com.sleepycat.je.Environment#beginTransaction + * Environment.beginTransaction} method. Any value specified to this + * method overrides both of those settings.

        + * + *

        All cursors opened within the transaction must be closed before the + * transaction is committed.

        + * + *

        If the method encounters an error, the transaction will have been aborted when the call + * returns.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method may be called any number of times + * to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact enough replicas to + * initiate the commit. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * althought the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed, or cursors associated with the transaction are still open. + */ + public synchronized void commitSync() + throws DatabaseException { + + doCommit(Durability.COMMIT_SYNC, true /* explicitSync */); + } + + /** + * End the transaction, not writing to stable storage and not committing + * synchronously. This means the transaction will exhibit the ACI + * (atomicity, consistency, and isolation) properties, but not D + * (durability); that is, database integrity will be maintained, but it is + * possible this transaction may be undone during recovery. + * + *

        This behavior may be set for a database environment using the {@link + * com.sleepycat.je.EnvironmentConfig#setTxnNoSync + * EnvironmentConfig.setTxnNoSync} method or for a single transaction using + * the {@link com.sleepycat.je.Environment#beginTransaction + * Environment.beginTransaction} method. Any value specified to this + * method overrides both of those settings.

        + * + *

        All cursors opened within the transaction must be closed before the + * transaction is committed.

        + * + *

        If the method encounters an error, the transaction will have been aborted when the call + * returns.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method may be called any number of times + * to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact enough replicas to + * initiate the commit. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * althought the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed, or cursors associated with the transaction are still open. + */ + public synchronized void commitNoSync() + throws DatabaseException { + + doCommit(Durability.COMMIT_NO_SYNC, true /* explicitSync */); + } + + /** + * End the transaction, writing to stable storage but not committing + * synchronously. This means the transaction will exhibit the ACI + * (atomicity, consistency, and isolation) properties, but not D + * (durability); that is, database integrity will be maintained, but it is + * possible this transaction may be undone during recovery. + * + *

        This behavior is the default for database environments unless + * otherwise configured using the {@link + * com.sleepycat.je.EnvironmentConfig#setTxnNoSync + * EnvironmentConfig.setTxnNoSync} method. This behavior may also be set + * for a single transaction using the {@link + * com.sleepycat.je.Environment#beginTransaction + * Environment.beginTransaction} method. Any value specified to this + * method overrides both of those settings.

        + * + *

        All cursors opened within the transaction must be closed before the + * transaction is committed.

        + * + *

        If the method encounters an error, the transaction will have been aborted when the call + * returns.

        + * + *

        After this method has been called, regardless of its return, the + * {@link Transaction} handle may not be accessed again, with one + * exception: the {@code abort} method may be called any number of times + * to simplify error handling.

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws com.sleepycat.je.rep.InsufficientReplicasException if the master + * in a replicated environment could not contact enough replicas to + * initiate the commit. + * + * @throws com.sleepycat.je.rep.InsufficientAcksException if the master in + * a replicated environment did not receive enough replica acknowledgments, + * althought the commit succeeded locally. + * + * @throws com.sleepycat.je.rep.ReplicaWriteException if a write operation + * was performed with this transaction, but this node is now a Replica. + * + * @throws OperationFailureException if this exception occurred earlier and + * caused the transaction to be invalidated. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed, or cursors associated with the transaction are still open. + */ + public synchronized void commitWriteNoSync() + throws DatabaseException { + + doCommit(Durability.COMMIT_WRITE_NO_SYNC, true /* explicitSync */); + } + + /** + * For internal use. + * @hidden + */ + public boolean getPrepared() { + return txn.getPrepared(); + } + + /** + * Perform error checking and invoke the commit on Txn. + * + * @param durability the durability to use for the commit + * @param explicitSync true if the method was invoked from one of the + * sync-specific APIs, false if durability was used explicitly. This + * parameter exists solely to support mixed mode api usage checks. + * + * @throws IllegalArgumentException via commit(Durability) + */ + private void doCommit(Durability durability, boolean explicitSync) { + try { + checkEnv(); + checkOpen(); + env.removeReferringHandle(this); + if (explicitSync) { + /* A sync-specific api was invoked. */ + if (txn.getExplicitDurabilityConfigured()) { + throw new IllegalArgumentException + ("Mixed use of deprecated durability API for the " + + "transaction commit with the new durability API for" + + " TransactionConfig or MutableEnvironmentConfig"); + } + } else if (txn.getExplicitSyncConfigured()) { + /* Durability was explicitly configured for commit */ + throw new IllegalArgumentException + ("Mixed use of new durability API for the " + + "transaction commit with deprecated durability API for" + + " TransactionConfig or MutableEnvironmentConfig"); + } + txn.commit(durability); + commitToken = txn.getCommitToken(); + /* Remove reference to internal txn, so we can reclaim memory. */ + setTxnNull(); + } catch (Error E) { + DbInternal.getNonNullEnvImpl(env).invalidate(E); + throw E; + } + } + + /** + * Returns the timeout value for the transaction lifetime. + * + *

        If {@link #setTxnTimeout(long,TimeUnit)} has not been called to + * configure the timeout, the environment configuration value ({@link + * EnvironmentConfig#TXN_TIMEOUT} )is returned.

        + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed. + * + * @throws IllegalArgumentException if the unit is null. + * + * @since 4.0 + */ + public long getTxnTimeout(TimeUnit unit) + throws EnvironmentFailureException, + IllegalStateException, + IllegalArgumentException { + + checkEnv(); + checkOpen(); + return PropUtil.millisToDuration((int) txn.getTxnTimeout(), unit); + } + + /** + * Configures the timeout value for the transaction lifetime. + * + *

        If the transaction runs longer than this time, an operation using the + * transaction may throw {@link TransactionTimeoutException}. The + * transaction timeout is checked when locking a record, as part of a read + * or write operation.

        + * + *

        A value of zero (which is the default) disables timeouts for the + * transaction, meaning that no limit on the duration of the transaction is + * enforced. Note that the {@link #setLockTimeout(long, TimeUnit)} lock + * timeout} is independent of the transaction timeout, and the lock timeout + * should not normally be set to zero.

        + * + * @param timeOut The timeout value for the transaction lifetime, or zero + * to disable transaction timeouts. + * + * @param unit the {@code TimeUnit} of the timeOut value. May be null only + * if timeOut is zero. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed. + * + * @throws IllegalArgumentException if timeOut or unit is invalid. + * + * @since 4.0 + */ + public void setTxnTimeout(long timeOut, TimeUnit unit) + throws IllegalArgumentException, DatabaseException { + + checkEnv(); + checkOpen(); + txn.setTxnTimeout(PropUtil.durationToMillis(timeOut, unit)); + } + + /** + * Configures the timeout value for the transaction lifetime, with the + * timeout value specified in microseconds. This method is equivalent to: + * + *
        setTxnTimeout(long, TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #setTxnTimeout(long, + * TimeUnit)}. + */ + public void setTxnTimeout(long timeOut) + throws IllegalArgumentException, DatabaseException { + + setTxnTimeout(timeOut, TimeUnit.MICROSECONDS); + } + + /** + * Returns the lock request timeout value for the transaction. + * + *

        If {@link #setLockTimeout(long,TimeUnit)} has not been called to + * configure the timeout, the environment configuration value ({@link + * EnvironmentConfig#LOCK_TIMEOUT}) is returned.

        + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed. + * + * @throws IllegalArgumentException if the unit is null. + * + * @since 4.0 + */ + public long getLockTimeout(TimeUnit unit) + throws EnvironmentFailureException, + IllegalStateException, + IllegalArgumentException { + + checkEnv(); + checkOpen(); + return PropUtil.millisToDuration((int) txn.getLockTimeout(), unit); + } + + /** + * Configures the lock request timeout value for the transaction. This + * overrides the {@link EnvironmentConfig#setLockTimeout(long, TimeUnit) + * default lock timeout}. + * + *

        A value of zero disables lock timeouts. This is not recommended, even + * when the application expects that deadlocks will not occur or will be + * easily resolved. A lock timeout is a fall-back that guards against + * unexpected "live lock", unresponsive threads, or application failure to + * close a cursor or to commit or abort a transaction.

        + * + * @param timeOut The lock timeout for all transactional and + * non-transactional operations, or zero to disable lock timeouts. + * + * @param unit the {@code TimeUnit} of the timeOut value. May be null only + * if timeOut is zero. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the transaction or environment has been + * closed. + * + * @throws IllegalArgumentException if timeOut or unit is invalid. + * + * @since 4.0 + */ + public void setLockTimeout(long timeOut, TimeUnit unit) + throws IllegalArgumentException, DatabaseException { + + checkEnv(); + checkOpen(); + txn.setLockTimeout(PropUtil.durationToMillis(timeOut, unit)); + } + + /** + * Configures the lock request timeout value for the transaction, with the + * timeout value specified in microseconds. This method is equivalent to: + * + *
        setLockTimeout(long, TimeUnit.MICROSECONDS);
        + * + * @deprecated as of 4.0, replaced by {@link #setLockTimeout(long, + * TimeUnit)}. + */ + public void setLockTimeout(long timeOut) + throws IllegalArgumentException, DatabaseException { + + setLockTimeout(timeOut, TimeUnit.MICROSECONDS); + } + + /** + * Set the user visible name for the transaction. + * + * @param name The user visible name for the transaction. + */ + public void setName(String name) { + this.name = name; + } + + /** + * Get the user visible name for the transaction. + * + * @return The user visible name for the transaction. + */ + public String getName() { + return name; + } + + /** + * For internal use. + * @hidden + */ + @Override + public int hashCode() { + return (int) id; + } + + /** + * For internal use. + * @hidden + */ + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + + if (!(o instanceof Transaction)) { + return false; + } + + if (((Transaction) o).id == id) { + return true; + } + + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + /** + * This method should only be called by the LockerFactory.getReadableLocker + * and getWritableLocker methods. The locker returned does not enforce the + * readCommitted isolation setting. + * + * @throws IllegalArgumentException via all API methods with a txn param + */ + Locker getLocker() + throws DatabaseException { + + if (txn == null) { + throw new IllegalArgumentException + ("Transaction " + id + + " has been closed and is no longer usable."); + } + return txn; + } + + /* + * Helpers + */ + + Txn getTxn() { + return txn; + } + + Environment getEnvironment() { + return env; + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid, via all methods. + * + * @throws IllegalStateException via all methods. + */ + private void checkEnv() { + EnvironmentImpl envImpl = env.getNonNullEnvImpl(); + if (envImpl == null) { + throw new IllegalStateException + ("The environment has been closed. " + + "This transaction is no longer usable."); + } + envImpl.checkIfInvalid(); + } + + /** + * @throws IllegalStateException via all methods except abort. + */ + void checkOpen() { + if (txn == null || txn.isClosed()) { + throw new IllegalStateException("Transaction Id " + id + + " has been closed."); + } + } + + /** + * Returns whether this {@code Transaction} is open, which is equivalent + * to when {@link Transaction#getState} returns {@link + * Transaction.State#OPEN}. See {@link Transaction.State#OPEN} for more + * information. + * + *

        When an {@link OperationFailureException}, or one of its subclasses, + * is caught, the {@code isValid} method may be called to determine whether + * the {@code Transaction} can continue to be used, or should be + * aborted.

        + */ + public boolean isValid() { + return txn != null && + txn.isValid(); + } + + /** + * Remove reference to internal txn, so we can reclaim memory. Before + * setting it null, save the final State value, so we can return it from + * getState. + */ + private void setTxnNull() { + finalState = txn.getState(); + txn = null; + } + + /** + * Returns the current state of the transaction. + * + * @since 5.0.48 + */ + public State getState() { + if (txn != null) { + assert finalState == null; + return txn.getState(); + } else { + assert finalState != null; + return finalState; + } + } +} diff --git a/src/com/sleepycat/je/TransactionConfig.java b/src/com/sleepycat/je/TransactionConfig.java new file mode 100644 index 0000000..88e01e4 --- /dev/null +++ b/src/com/sleepycat/je/TransactionConfig.java @@ -0,0 +1,650 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Specifies the attributes of a database environment transaction. + */ +public class TransactionConfig implements Cloneable { + + /** + * Default configuration used if null is passed to methods that create a + * transaction. + */ + public static final TransactionConfig DEFAULT = new TransactionConfig(); + + private boolean sync = false; + private boolean noSync = false; + private boolean writeNoSync = false; + private Durability durability = null; + private ReplicaConsistencyPolicy consistencyPolicy; + private boolean noWait = false; + private boolean readUncommitted = false; + private boolean readCommitted = false; + private boolean serializableIsolation = false; + private boolean readOnly = false; + private boolean localWrite = false; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public TransactionConfig() { + } + + /** + * @hidden + * For internal use only. + * + * Maps the existing sync settings to the equivalent durability settings. + * Figure out what we should do on commit. TransactionConfig could be + * set with conflicting values; take the most stringent ones first. + * All environment level defaults were applied by the caller. + * + * ConfigSync ConfigWriteNoSync ConfigNoSync default + * 0 0 0 sync + * 0 0 1 nosync + * 0 1 0 write nosync + * 0 1 1 write nosync + * 1 0 0 sync + * 1 0 1 sync + * 1 1 0 sync + * 1 1 1 sync + * + * @return the equivalent durability + */ + public Durability getDurabilityFromSync(final EnvironmentImpl envImpl) { + if (sync) { + return Durability.COMMIT_SYNC; + } else if (writeNoSync) { + return Durability.COMMIT_WRITE_NO_SYNC; + } else if (noSync) { + return Durability.COMMIT_NO_SYNC; + } + + /* + * Replicated environments default to commitNoSync, while standalone + * default to commitSync. + */ + if (envImpl.isReplicated()) { + return Durability.COMMIT_NO_SYNC; + } else { + return Durability.COMMIT_SYNC; + } + } + + /** + * Configures the transaction to write and synchronously flush the log it + * when commits. + * + *

        This behavior may be set for a database environment using the + * Environment.setMutableConfig method. Any value specified to this method + * overrides that setting.

        + * + *

        The default is false for this class and true for the database + * environment.

        + * + *

        If true is passed to both setSync and setNoSync, setSync will take + * precedence.

        + * + * @param sync If true, transactions exhibit all the ACID (atomicity, + * consistency, isolation, and durability) properties. + * + * @return this + */ + public TransactionConfig setSync(final boolean sync) { + setSyncVoid(sync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSyncVoid(final boolean sync) { + checkMixedMode(sync, noSync, writeNoSync, durability); + this.sync = sync; + } + + /** + * Returns true if the transaction is configured to write and synchronously + * flush the log it when commits. + * + * @return true if the transaction is configured to write and synchronously + * flush the log it when commits. + */ + public boolean getSync() { + return sync; + } + + /** + * Configures the transaction to not write or synchronously flush the log + * it when commits. + * + *

        This behavior may be set for a database environment using the + * Environment.setMutableConfig method. Any value specified to this method + * overrides that setting.

        + * + *

        The default is false for this class and the database environment.

        + * + * @param noSync If true, transactions exhibit the ACI (atomicity, + * consistency, and isolation) properties, but not D (durability); that is, + * database integrity will be maintained, but if the application or system + * fails, it is possible some number of the most recently committed + * transactions may be undone during recovery. The number of transactions + * at risk is governed by how many log updates can fit into the log buffer, + * how often the operating system flushes dirty buffers to disk, and how + * often the log is checkpointed. + * + * @deprecated replaced by {@link #setDurability} + * + * @return this + */ + public TransactionConfig setNoSync(final boolean noSync) { + setNoSyncVoid(noSync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNoSyncVoid(final boolean noSync) { + checkMixedMode(sync, noSync, writeNoSync, durability); + this.noSync = noSync; + } + + /** + * Returns true if the transaction is configured to not write or + * synchronously flush the log it when commits. + * + * @return true if the transaction is configured to not write or + * synchronously flush the log it when commits. + * + * @deprecated replaced by {@link #getDurability} + */ + public boolean getNoSync() { + return noSync; + } + + /** + * Configures the transaction to write but not synchronously flush the log + * it when commits. + * + *

        This behavior may be set for a database environment using the + * Environment.setMutableConfig method. Any value specified to this method + * overrides that setting.

        + * + *

        The default is false for this class and the database environment.

        + * + * @param writeNoSync If true, transactions exhibit the ACI (atomicity, + * consistency, and isolation) properties, but not D (durability); that is, + * database integrity will be maintained, but if the operating system + * fails, it is possible some number of the most recently committed + * transactions may be undone during recovery. The number of transactions + * at risk is governed by how often the operating system flushes dirty + * buffers to disk, and how often the log is checkpointed. + * + * @deprecated replaced by {@link #setDurability} + * + * @return this + */ + public TransactionConfig setWriteNoSync(final boolean writeNoSync) { + setWriteNoSyncVoid(writeNoSync); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setWriteNoSyncVoid(final boolean writeNoSync) { + checkMixedMode(sync, noSync, writeNoSync, durability); + this.writeNoSync = writeNoSync; + } + + /** + * Returns true if the transaction is configured to write but not + * synchronously flush the log it when commits. + * + * @return true if the transaction is configured to not write or + * synchronously flush the log it when commits. + * + * @deprecated replaced by {@link #getDurability} + */ + public boolean getWriteNoSync() { + return writeNoSync; + } + + /** + * Configures the durability associated with a transaction when it commits. + * Changes to durability are not reflected back to the "sync" booleans -- + * there isn't a one to one mapping. + * + * Note that you should not use both the durability and the XXXSync() apis + * on the same config object. + * + * @param durability the durability definition + * + * @return this + */ + public TransactionConfig setDurability(final Durability durability) { + setDurabilityVoid(durability); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDurabilityVoid(final Durability durability) { + checkMixedMode(sync, noSync, writeNoSync, durability); + this.durability = durability; + } + + /** + * Returns the durability associated with the configuration. + * + * If {@link #setDurability} has not been called, this method returns null. + * When no durability settings have been specified using the + * {@code TransactionConfig}, the default durability is applied to the + * {@link Transaction} by {@link Environment#beginTransaction} using + * {@link EnvironmentConfig} settings. + * + * @return the durability setting currently associated with this config. + */ + public Durability getDurability() { + return durability; + } + + /** + * Used internally to configure Durability, modifying the existing + * Durability or explicit sync configuration. This method is used to avoid + * a mixed mode exception, since the existing config may be in either mode. + */ + void overrideDurability(final Durability durability) { + sync = false; + noSync = false; + writeNoSync = false; + this.durability = durability; + } + + /** + * Associates a consistency policy with this configuration. + * + * @param consistencyPolicy the consistency definition + * + * @return this + */ + public TransactionConfig setConsistencyPolicy( + final ReplicaConsistencyPolicy consistencyPolicy) { + + setConsistencyPolicyVoid(consistencyPolicy); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setConsistencyPolicyVoid( + final ReplicaConsistencyPolicy consistencyPolicy) { + + this.consistencyPolicy = consistencyPolicy; + } + /** + * Returns the consistency policy associated with the configuration. + * + * @return the consistency policy currently associated with this config. + */ + public ReplicaConsistencyPolicy getConsistencyPolicy() { + return consistencyPolicy; + } + + /** + * Configures the transaction to not wait if a lock request cannot be + * immediately granted. + * + *

        The default is false for this class and the database environment.

        + * + * @param noWait If true, transactions will not wait if a lock request + * cannot be immediately granted, instead {@link + * com.sleepycat.je.LockNotAvailableException LockNotAvailableException} + * will be thrown. + * + * @return this + */ + public TransactionConfig setNoWait(final boolean noWait) { + setNoWaitVoid(noWait); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNoWaitVoid(final boolean noWait) { + this.noWait = noWait; + } + + /** + * Returns true if the transaction is configured to not wait if a lock + * request cannot be immediately granted. + * + * @return true if the transaction is configured to not wait if a lock + * request cannot be immediately granted. + */ + public boolean getNoWait() { + return noWait; + } + + /** + * Configures read operations performed by the transaction to return + * modified but not yet committed data. + * + * @param readUncommitted If true, configure read operations performed by + * the transaction to return modified but not yet committed data. + * + * @see LockMode#READ_UNCOMMITTED + * + * @return this + */ + public TransactionConfig setReadUncommitted( + final boolean readUncommitted) { + + setReadUncommittedVoid(readUncommitted); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadUncommittedVoid(final boolean readUncommitted) { + this.readUncommitted = readUncommitted; + } + + /** + * Returns true if read operations performed by the transaction are + * configured to return modified but not yet committed data. + * + * @return true if read operations performed by the transaction are + * configured to return modified but not yet committed data. + * + * @see LockMode#READ_UNCOMMITTED + */ + public boolean getReadUncommitted() { + return readUncommitted; + } + + /** + * Configures the transaction for read committed isolation. + * + *

        This ensures the stability of the current data item read by the + * cursor but permits data read by this transaction to be modified or + * deleted prior to the commit of the transaction.

        + * + * @param readCommitted If true, configure the transaction for read + * committed isolation. + * + * @see LockMode#READ_COMMITTED + * + * @return this + */ + public TransactionConfig setReadCommitted(final boolean readCommitted) { + setReadCommittedVoid(readCommitted); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadCommittedVoid(final boolean readCommitted) { + this.readCommitted = readCommitted; + } + + /** + * Returns true if the transaction is configured for read committed + * isolation. + * + * @return true if the transaction is configured for read committed + * isolation. + * + * @see LockMode#READ_COMMITTED + */ + public boolean getReadCommitted() { + return readCommitted; + } + + /** + * Configures this transaction to have serializable (degree 3) isolation. + * By setting serializable isolation, phantoms will be prevented. + * + *

        By default a transaction provides Repeatable Read isolation; {@link + * EnvironmentConfig#setTxnSerializableIsolation} may be called to override + * the default. If the environment is configured for serializable + * isolation, all transactions will be serializable regardless of whether + * this method is called; calling {@link #setSerializableIsolation} with a + * false parameter will not disable serializable isolation.

        + * + * The default is false for this class and the database environment. + * + * @see LockMode + * + * @return this + */ + public TransactionConfig setSerializableIsolation( + final boolean serializableIsolation) { + + setSerializableIsolationVoid(serializableIsolation); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSerializableIsolationVoid( + final boolean serializableIsolation) { + + this.serializableIsolation = serializableIsolation; + } + + /** + * Returns true if the transaction has been explicitly configured to have + * serializable (degree 3) isolation. + * + * @return true if the transaction has been configured to have serializable + * isolation. + * + * @see LockMode + */ + public boolean getSerializableIsolation() { + return serializableIsolation; + } + + /** + * Configures this transaction to disallow write operations, regardless of + * whether writes are allowed for the {@link Environment} or the + * {@link Database}s that are accessed. + * + *

        If a write operation is attempted using a read-only transaction, + * an {@code UnsupportedOperationException} will be thrown.

        + * + *

        For a read-only transaction, the transaction's {@code Durability} is + * ignored, even when it is explicitly specified using {@link + * #setDurability(Durability)}.

        + * + *

        In a {@link com.sleepycat.je.rep.ReplicatedEnvironment}, a read-only + * transaction implicitly uses + * {@link com.sleepycat.je.Durability.ReplicaAckPolicy#NONE}. + * A read-only transaction on a Master will thus not be held up, or + * throw {@link com.sleepycat.je.rep.InsufficientReplicasException}, if the + * Master is not in contact with a sufficient number of Replicas at the + * time the transaction is initiated.

        + * + *

        The default setting is false (writes are allowed).

        + * + * @return this + */ + public TransactionConfig setReadOnly(final boolean readOnly) { + setReadOnlyVoid(readOnly); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReadOnlyVoid(final boolean readOnly) { + if (localWrite && readOnly) { + throw new IllegalArgumentException( + "localWrite and readOnly may not both be true"); + } + this.readOnly = readOnly; + } + + /** + * Returns whether read-only is configured for this transaction. + */ + public boolean getReadOnly() { + return readOnly; + } + + /** + * Configures this transaction to allow writing to non-replicated + * {@link Database}s in a + * {@link com.sleepycat.je.rep.ReplicatedEnvironment}. + * + *

        In a replicated environment, a given transaction may be used to + * write to either replicated databases or non-replicated databases, but + * not both. If a write operation to a replicated database is attempted + * when local-write is true, or to a non-replicated database when + * local-write is false, an {@code UnsupportedOperationException} will be + * thrown.

        + * + *

        Note that for auto-commit transactions (when the {@code Transaction} + * parameter is null), the local-write setting is automatically set to + * correspond to whether the database is replicated. With auto-commit, + * local-write is always true for a non-replicated database, and + * always false for a replicated database.

        + * + *

        In a replicated environment, a local-write transaction implicitly + * uses {@link com.sleepycat.je.Durability.ReplicaAckPolicy#NONE}. + * A local-write transaction on a Master will thus not be held up, or + * throw {@link com.sleepycat.je.rep.InsufficientReplicasException}, if the + * Master is not in contact with a sufficient number of Replicas at the + * time the transaction is initiated.

        + * + *

        By default the local-write setting is false, meaning that the + * transaction may only write to replicated Databases in a replicated + * environment.

        + * + *

        This configuration setting is ignored in a non-replicated Environment + * since no databases are replicated.

        + * + * @return this + * + * @see Special considerations + * for using Secondary Databases with and without Transactions + * + * @since 4.0 + */ +public class UniqueConstraintException extends SecondaryConstraintException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public UniqueConstraintException(Locker locker, + String message, + String secDbName, + DatabaseEntry secKey, + DatabaseEntry priKey, + long expirationTime) { + super(locker, message, secDbName, secKey, priKey, expirationTime); + } + + /** + * For internal use only. + * @hidden + */ + private UniqueConstraintException(String message, + UniqueConstraintException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new UniqueConstraintException(msg, this); + } +} diff --git a/src/com/sleepycat/je/VerifyConfig.java b/src/com/sleepycat/je/VerifyConfig.java new file mode 100644 index 0000000..84350be --- /dev/null +++ b/src/com/sleepycat/je/VerifyConfig.java @@ -0,0 +1,442 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.PrintStream; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.utilint.PropUtil; + +/** + * Specifies the attributes of a verification operation. + */ +public class VerifyConfig implements Cloneable { + + /* + * For internal use, to allow null as a valid value for the config + * parameter. + */ + public static final VerifyConfig DEFAULT = new VerifyConfig(); + + private boolean propagateExceptions = false; + private boolean aggressive = false; + private boolean printInfo = false; + private PrintStream showProgressStream = null; + private int showProgressInterval = 0; + private boolean verifySecondaries = true; + private boolean verifyDataRecords = false; + private boolean verifyObsoleteRecords = false; + private int batchSize = 1000; + private int batchDelayMs = 10; + + /** + * An instance created using the default constructor is initialized with + * the system's default settings. + */ + public VerifyConfig() { + } + + /** + * Configures {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} to propagate exceptions found during verification. + * + *

        By default this is false and exception information is printed to + * System.out for notification but does not stop the verification activity, + * which continues on for as long as possible.

        + * + *

        Note: Currently this method has no effect.

        + * + * @param propagate If set to true, configure {@link + * com.sleepycat.je.Environment#verify Environment.verify} and {@link + * com.sleepycat.je.Database#verify Database.verify} to propagate + * exceptions found during verification. + * + * @return this + */ + public VerifyConfig setPropagateExceptions(boolean propagate) { + setPropagateExceptionsVoid(propagate); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setPropagateExceptionsVoid(boolean propagate) { + propagateExceptions = propagate; + } + + /** + * Returns true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to propagate exceptions found during + * verification. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to propagate exceptions found during + * verification. + */ + public boolean getPropagateExceptions() { + return propagateExceptions; + } + + /** + * Configures {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} to perform fine granularity consistency checking that + * includes verifying in memory constructs. + * + *

        This level of checking should only be performed while the database + * environment is quiescent.

        + * + *

        By default this is false.

        + * + *

        Note: Currently, enabling aggressive verification has no additional + * effect.

        + * + * @param aggressive If set to true, configure {@link + * com.sleepycat.je.Environment#verify Environment.verify} and {@link + * com.sleepycat.je.Database#verify Database.verify} to perform fine + * granularity consistency checking that includes verifying in memory + * constructs. + * + * @return this + */ + public VerifyConfig setAggressive(boolean aggressive) { + setAggressiveVoid(aggressive); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setAggressiveVoid(boolean aggressive) { + this.aggressive = aggressive; + } + + /** + * Returns true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to perform fine granularity consistency + * checking that includes verifying in memory constructs. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to perform fine granularity consistency + * checking that includes verifying in memory constructs. + */ + public boolean getAggressive() { + return aggressive; + } + + /** + * Configures {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} to print basic verification information. + * + *

        Information is printed to the {@link #getShowProgressStream()} if it + * is non-null, and otherwise to System.err.

        + * + *

        By default this is false.

        + * + * @param printInfo If set to true, configure {@link + * com.sleepycat.je.Environment#verify Environment.verify} and {@link + * com.sleepycat.je.Database#verify Database.verify} to print basic + * verification information. + * + * @return this + */ + public VerifyConfig setPrintInfo(boolean printInfo) { + setPrintInfoVoid(printInfo); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setPrintInfoVoid(boolean printInfo) { + this.printInfo = printInfo; + } + + /** + * Returns true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to print basic verification information. + * + *

        This method may be called at any time during the life of the + * application.

        + * + * @return true if the {@link com.sleepycat.je.Environment#verify + * Environment.verify} and {@link com.sleepycat.je.Database#verify + * Database.verify} are configured to print basic verification information. + */ + public boolean getPrintInfo() { + return printInfo; + } + + /** + * Configures the verify operation to display progress to the PrintStream + * argument. The accumulated statistics will be displayed every N records, + * where N is the value of showProgressInterval. + * + * @return this + */ + public VerifyConfig setShowProgressStream(PrintStream showProgressStream) { + setShowProgressStreamVoid(showProgressStream); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setShowProgressStreamVoid(PrintStream showProgressStream) { + this.showProgressStream = showProgressStream; + } + + /** + * Returns the PrintStream on which the progress messages will be displayed + * during long running verify operations. + */ + public PrintStream getShowProgressStream() { + return showProgressStream; + } + + /** + * When the verify operation is configured to display progress the + * showProgressInterval is the number of LNs between each progress report. + * + * @return this + */ + public VerifyConfig setShowProgressInterval(int showProgressInterval) { + setShowProgressIntervalVoid(showProgressInterval); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setShowProgressIntervalVoid(int showProgressInterval) { + this.showProgressInterval = showProgressInterval; + } + + /** + * Returns the showProgressInterval value, if set. + */ + public int getShowProgressInterval() { + return showProgressInterval; + } + + /** + * Configures verification to verify secondary database integrity. This is + * equivalent to verifying secondaries in the background Btree verifier, + * when {@link EnvironmentConfig#VERIFY_SECONDARIES} is set to true. + * + *

        By default this is true.

        + * + * @return this + */ + public VerifyConfig setVerifySecondaries(boolean verifySecondaries) { + setVerifySecondariesVoid(verifySecondaries); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setVerifySecondariesVoid(boolean verifySecondaries) { + this.verifySecondaries = verifySecondaries; + } + + /** + * Returns the verifySecondaries value. + */ + public boolean getVerifySecondaries() { + return verifySecondaries; + } + + /** + * Configures verification to read and verify the leaf node (LN) of a + * primary data record. This is equivalent to verifying data records in the + * background Btree verifier, when + * {@link EnvironmentConfig#VERIFY_DATA_RECORDS} is set to true. + * + *

        By default this is false.

        + * + * @return this + */ + public VerifyConfig setVerifyDataRecords(boolean verifyDataRecords) { + setVerifyDataRecordsVoid(verifyDataRecords); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setVerifyDataRecordsVoid(boolean verifyDataRecords) { + this.verifyDataRecords = verifyDataRecords; + } + + /** + * Returns the verifyDataRecords value. + */ + public boolean getVerifyDataRecords() { + return verifyDataRecords; + } + + /** + * @hidden + * Configures verification to verify the obsolete record metadata. This is + * equivalent to verifying obsolete metadata in the background Btree + * verifier, when {@link EnvironmentConfig#VERIFY_OBSOLETE_RECORDS} is set + * to true. + * + *

        By default this is false.

        + * + * @return this + */ + public VerifyConfig setVerifyObsoleteRecords( + boolean verifyObsoleteRecords) { + setVerifyObsoleteRecordsVoid(verifyObsoleteRecords); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setVerifyObsoleteRecordsVoid(boolean verifyObsoleteRecords) { + this.verifyObsoleteRecords = verifyObsoleteRecords; + } + + /** + * @hidden + * Returns the verifyObsoleteRecords value. + */ + public boolean getVerifyObsoleteRecords() { + return verifyObsoleteRecords; + } + + /** + * Configures the number of records verified per batch. In order to give + * database remove/truncate the opportunity to execute, records are + * verified in batches and there is a {@link #setBatchDelay delay} + * between batches. + * + *

        By default the batch size is 1000.

        + * + *

        Note that when using the {@link EnvironmentConfig#ENV_RUN_VERIFIER + * background data verifier}, the batch size is + * {@link EnvironmentConfig#VERIFY_BTREE_BATCH_SIZE}.

        + * + * @return this + */ + public VerifyConfig setBatchSize(int batchSize) { + setBatchSizeVoid(batchSize); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBatchSizeVoid(int batchSize) { + this.batchSize = batchSize; + } + + /** + * Returns the batchSize value. + */ + public int getBatchSize() { + return batchSize; + } + + /** + * Configures the delay between batches. In order to give database + * remove/truncate the opportunity to execute, records are verified in + * {@link #setBatchSize batches} and there is a delay between batches. + * + *

        By default the batch delay is 10 ms.

        + * + *

        Note that when using the {@link EnvironmentConfig#ENV_RUN_VERIFIER + * background data verifier}, the batch delay is + * {@link EnvironmentConfig#VERIFY_BTREE_BATCH_DELAY}.

        + * + * @param delay the delay between batches. + * + * @param unit the {@code TimeUnit} of the delay value. May be + * null only if delay is zero. + * + * @return this + */ + public VerifyConfig setBatchDelay(long delay, TimeUnit unit) { + setBatchDelayVoid(delay, unit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBatchDelayVoid(long delayDuration, TimeUnit unit) { + batchDelayMs = PropUtil.durationToMillis(delayDuration, unit); + } + + /** + * Returns the batch delay. + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + */ + public long getBatchDelay(TimeUnit unit) { + return PropUtil.millisToDuration(batchDelayMs, unit); + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public VerifyConfig clone() { + try { + return (VerifyConfig) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns the values for each configuration attribute. + * + * @return the values for each configuration attribute. + */ + @Override + public String toString() { + // TODO: add new properties here. + StringBuilder sb = new StringBuilder(); + sb.append("propagateExceptions=").append(propagateExceptions); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/VerifyConfigBeanInfo.java b/src/com/sleepycat/je/VerifyConfigBeanInfo.java new file mode 100644 index 0000000..cb40654 --- /dev/null +++ b/src/com/sleepycat/je/VerifyConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class VerifyConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(VerifyConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(VerifyConfig.class); + } +} diff --git a/src/com/sleepycat/je/VersionMismatchException.java b/src/com/sleepycat/je/VersionMismatchException.java new file mode 100644 index 0000000..0c3a0bc --- /dev/null +++ b/src/com/sleepycat/je/VersionMismatchException.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Thrown by the Environment constructor when an environment cannot be + * opened because the version of the existing log is not compatible with the + * version of JE that is running. This occurs when a later version of JE was + * used to create the log. + * + *

        Warning: This exception should be handled when more than + * one version of JE may be used to access an environment.

        + * + * @since 4.0 + */ +public class VersionMismatchException extends EnvironmentFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public VersionMismatchException(EnvironmentImpl envImpl, String message) { + super(envImpl, EnvironmentFailureReason.VERSION_MISMATCH, message); + } + + /** + * For internal use only. + * @hidden + */ + private VersionMismatchException(String message, + VersionMismatchException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new VersionMismatchException(msg, this); + } +} diff --git a/src/com/sleepycat/je/WriteOptions.java b/src/com/sleepycat/je/WriteOptions.java new file mode 100644 index 0000000..5c8644e --- /dev/null +++ b/src/com/sleepycat/je/WriteOptions.java @@ -0,0 +1,491 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.EnvironmentFailureException.*; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.dbi.TTL; + +/** + * Options for calling methods that write (insert, update or delete) records. + * + *

        Time-To-Live

        + * + *

        When performing a 'put' operation, a TTL may be specified using + * {@link #setTTL(int, TimeUnit)} or {@link #setTTL(int)}.

        + * + *

        By default, the TTL property is zero, meaning there is no automatic + * expiration. A non-zero TTL may be specified to cause an inserted record + * to expire. The expiration time may also be changed for an existing + * record by updating the record and specifying a different TTL, including + * specifying zero to prevent the record from expiring. However, the TTL of + * an existing record is updated only if {@link #setUpdateTTL(boolean)} is + * explicitly set to true. When deleting a record, the TTL parameter is + * ignored.

        + * + *

        Records expire on day or hour boundaries, depending on the {@code + * timeUnit} parameter. At the time of the write operation, the TTL + * parameter is used to compute the record's expiration time by first + * converting it from days (or hours) to milliseconds, and then adding it + * to the current system time. If the resulting expiration time is not + * evenly divisible by the number of milliseconds in one day (or hour), it + * is rounded up to the nearest day (or hour).

        + * + *

        Passing TimeUnit.DAYS, rather than TimeUnit.HOURS, for the timeUnit + * parameter is recommended to minimize storage requirements (memory + * and disk). Because the expiration time is stored in the JE Btree + * internally, when using the TTL feature, the additional memory and disk + * space required for storing Btree internal nodes (INs) is twice as much + * when using TimeUnit.HOURS as when using TimeUnit.DAYS. Using + * TimeUnit.DAYS adds about 5% to the space needed for INs, while + * TimeUnit.HOURS adds about 10%.

        + * + *

        Note that JE stores the expiration time of the record and not the + * original TTL value that was specified. The expiration time of a record + * is available when reading (or writing) records via {@link + * OperationResult#getExpirationTime()}.

        + * + *

        A summary of the behavior of expired records is as follows.

        + *
          + *
        • Space for expired records will be purged in the background by + * the JE cleaner, and expired records will be filtered out of queries + * even if they have not been purged.

        • + * + *
        • Expired records are removed individually: there is no guarantee + * that records with the same expiration time will be removed + * simultaneously.

        • + * + *
        • Records with expiration times support repeatable-read semantics + * in most cases, but with some exceptions (described below).

        • + *
        + * + *

        A more detailed description is below, including some information on + * how expired records are handled internally.

        + *
          + *
        • Expired records will be purged in order to reclaim disk space. + * This happens in background over time, and there is no guarantee that + * the space for a record will be reclaimed at any particular time. + * Purging of expired records occurs during the normal JE cleaning + * process. The goals of the purging process are: + *
            + *
          1. to minimize the cost of purging;
          2. + *
          3. to keep disk utilization below the {@link + * EnvironmentConfig#CLEANER_MIN_UTILIZATION} threshold, as usual, + * but taking into account expired data; and
          4. + *
          5. to reclaim expired data gradually and avoid spikes in + * cleaning on day and hour boundaries.
          6. + *
          + * + *
        • Expired records that have not been purged will be filtered out + * of queries and will not be returned to the application. In a + * replicated environment, purging and filtering occur independently on + * each node. For queries to return consistent results on all nodes, + * the system clocks on all nodes must be synchronized.

        • + * + *
        • Repeatable-read semantics are supported for records that expire + * after being read. If a lock of any kind is held on a record and the + * record expires, when accessing it again using the same transaction + * or cursor, it will be accessed as if it is not expired. In other + * words, locking a record prevents it from expiring, from the + * viewpoint of that transaction or cursor. However, there are some + * caveats and exceptions to this rule: + *
            + *
          • A lock by one transaction or cursor will not prevent a + * record from being seen as expired when accessing it using a + * different transaction or cursor.

          • + * + *
          • In the unlikely event that the system clock is changed, + * locking a record may not guarantee that the record's data has + * not been purged, if the data is not read at the time the + * record is locked. This is because the record's key and its + * data are purged independently. It is possible to lock a record + * without reading its data by passing null for the 'data' + * parameter. If a record is locked in this manner, and the data + * was previously purged because the system clock was changed, + * then one of the following may occur, even when using the same + * transaction or cursor that was used to lock the record: + *

          • + *
              + *
            • If the record is read again with a non-null data + * parameter, the operation may fail (return null) because the + * data cannot be read.

            • + * + *
            • If a partial update is attempted (passing a {@link + * DatabaseEntry#setPartial(int,int,boolean) partial} 'data' + * parameter), the operation may fail (return null) + * because the pre-existing data cannot be read.

            • + *
            + *
          + * + *
        • Even when multiple records have the same expiration time, JE + * does not provide a way for them to expire atomically, as could be + * done by explicitly deleting multiple records in a single + * transaction. This restriction is for performance reasons; if records + * could expire atomically, they could not be purged efficiently using + * the JE cleaning process. Instead, each record expires individually, + * as if each were deleted in a separate transaction. This means that + * even when a set of records is inserted or updated atomically, a + * query may return some but not not all of the records, when any of + * the records expire at a time very close to the time of the query. + * This is because the system clock is checked for each record + * individually at the time it is read by the query, and because + * expired records may be purged by other threads.

        • + * + *
        • There are several special cases of the above rule that involve + * access to primary and secondary databases. Because a given primary + * record and its associated secondary records are normal records in + * most respects, this set of records does not expire atomically. For + * most read and write operations, JE treats the expiration of any + * record in this set as if all records have expired, and in these + * cases there is no special behavior to consider. For example:

          + *

            + *
          • As long as the primary and secondary databases are + * transactional, JE ensures that the expiration times of a + * given primary record and all its associated secondary records + * are the same.

          • + * + *
          • When reading a primary record via a secondary key, JE + * first reads the secondary record and then the primary. If + * either record expires during this process, both records are + * treated as expired.

          • + * + *
          • When updating or deleting a primary record, JE first + * reads the primary record to obtain the secondary keys and + * then deletes/updates/inserts the secondary records as + * needed. If a secondary record expires during this process, + * this will not cause a {@link SecondaryIntegrityException}, as + * would normally happen when an expected associated record is + * missing.

          • + * + *
          • When a primary and/or secondary record expires after + * being read, with few exceptions, repeatable-read semantics + * are supported as described above, i.e., locks prevent + * expiration from the viewpoint of the locking transaction or + * cursor. Exceptions to this rule are described below.

          • + *
          + * + * However, there are several cases where such treatment by JE is not + * practical, and the user should be aware of special behavior when + * primary or secondary records expire. These are not common use cases, + * but it is important to be aware of them. In the cases described + * below, let us assume a primary database has two associated secondary + * databases, and a particular primary record with primary key X has + * two secondary records with keys A and B, one in each secondary + * database.

          + *

            + *
          • After a transaction or cursor reads and locks the primary + * record via primary key X, reading via primary key X again + * with the same transaction or cursor will also be successful + * even if the record has expired, i.e., repeatable-read is + * supported. However, if the record expires and the same + * transaction or cursor attempts to read via key A or B, the + * record will not be found. This is because the secondary + * records for key A and B were not locked and they expire + * independently of the primary record.

          • + * + *
          • Similarly, after a transaction or cursor reads and locks + * the primary record via secondary key A successfully, reading + * via key A again with the same transaction or cursor will also + * be successful even if the record has expired. Reading via + * primary key X will also be successful, even if the record has + * expired, because the primary record was locked. However, if + * the record expires and the same transaction or cursor + * attempts to read via key B, the record will not be found. + * This is because the secondary record for key B was not locked + * and it expires independently of the primary record and the + * secondary record for key A.

          • + * + *
          • When reading via a secondary database, it is possible to + * read the only the secondary key and primary key (which are + * both contained in the secondary record), but not the primary + * record, by passing null for the 'data' parameter. In this + * case the primary record is not locked. Therefore, if the + * record expires and the same transaction or cursor attempts to + * read the primary record (via any secondary key or the primary + * key), the record will not be found.
          • + * + *
          • When a record expires, if its database serves as + * a {@link SecondaryConfig#setForeignKeyDatabase foreign key + * database}, the {@link + * SecondaryConfig#setForeignKeyDeleteAction foreign key delete + * action} will not be enforced. Therefore, setting a TTL for a + * record in a foreign key database is not recommended. The same + * is true when using the DPL and a foreign key database is + * specified using {@link + * com.sleepycat.persist.model.SecondaryKey#relatedEntity()}. + *

          • + *
          + *

        • + * + *
        • When JE detects what may be an internal integrity error, it + * tries to determine whether an expired record, rather than a true + * integrity error, is the underlying cause. To prevent internal errors + * when small changes in the system clock time are made, if a record + * has expired within {@link EnvironmentConfig#ENV_TTL_CLOCK_TOLERANCE} + * (two hours, by default), JE treats the record as deleted and no + * exception is thrown. + * + *

          When an integrity error does cause an exception to be thrown, the + * record's expiration time will be included in the exception message + * and this can help to diagnose the problem. This includes the + * following exceptions: + *

            + *
          • {@link SecondaryIntegrityException}
          • + *
          • {@link EnvironmentFailureException} with + * LOG_FILE_NOT_FOUND in the message.
          • + *
          + *

          + * + *

          In cases where the clock has been changed by more than one hour + * and integrity exceptions occur because of this, it may be possible + * to avoid the exceptions by setting the {@link + * EnvironmentConfig#ENV_TTL_CLOCK_TOLERANCE} configuration parameter + * to a larger value.

        • + *
        + * + *

        In order to use the TTL feature in a ReplicatedEnvironment, all nodes + * must be upgraded to JE 7.0 or later. If one or more nodes in a group + * uses an earlier version, an IllegalStateException will be thrown when + * attempting a put operation with a non-zero TTL. Also, once records with + * a non-zero TTL have been written, a node using an earlier version of JE + * may not join the group; if this is attempted, the node will fail during + * open with an EnvironmentFailureException.

        + * + * @since 7.0 + */ +public class WriteOptions implements Cloneable { + + private CacheMode cacheMode = null; + private int ttl = 0; + private TimeUnit ttlUnit = TimeUnit.DAYS; + private boolean updateTtl = false; + + /** + * Constructs a WriteOptions object with default values for all properties. + */ + public WriteOptions() { + } + + @Override + public WriteOptions clone() { + try { + return (WriteOptions) super.clone(); + } catch (CloneNotSupportedException e) { + throw unexpectedException(e); + } + } + + /** + * Sets the {@code CacheMode} to be used for the operation. + *

        + * By default this property is null, meaning that the default specified + * using {@link Cursor#setCacheMode}, + * {@link DatabaseConfig#setCacheMode} or + * {@link EnvironmentConfig#setCacheMode} will be used. + * + * @param cacheMode is the {@code CacheMode} used for the operation, or + * null to use the Cursor, Database or Environment default. + * + * @return 'this'. + */ + public WriteOptions setCacheMode(final CacheMode cacheMode) { + this.cacheMode = cacheMode; + return this; + } + + /** + * Returns the {@code CacheMode} to be used for the operation, or null + * if the Cursor, Database or Environment default will be used. + * + * @see #setCacheMode(CacheMode) + */ + public CacheMode getCacheMode() { + return cacheMode; + } + + /** + * Sets the Time-To-Live property for a 'put' operation, using + * {@code TimeUnit.Days} as the TTL unit. + * + * @param ttl the number of days after the current time on which + * the record will automatically expire, or zero for no automatic + * expiration. May not be negative. + * + * @return 'this'. + * + * @see Time-To-Live + */ + public WriteOptions setTTL(final int ttl) { + this.ttl = ttl; + this.ttlUnit = TimeUnit.DAYS; + return this; + } + + /** + * Sets the Time-To-Live property for a 'put' operation, using the given + * {@code TimeUnit}. + * + * @param ttl the number of days or hours after the current time on which + * the record will automatically expire, or zero for no automatic + * expiration. May not be negative. + * + * @param timeUnit is TimeUnit.DAYS or TimeUnit.HOURS. TimeUnit.DAYS is + * recommended to minimize storage requirements (memory and disk). + * + * @return 'this'. + * + * @see Time-To-Live + */ + public WriteOptions setTTL(final int ttl, final TimeUnit timeUnit) { + this.ttl = ttl; + this.ttlUnit = timeUnit; + return this; + } + + /** + * Returns the Time-To-Live property for a 'put' operation. + * + * @see #setTTL(int) + */ + public int getTTL() { + return ttl; + } + + /** + * Returns the Time-To-Live time unit for a 'put' operation. + * + * @see #setTTL(int, TimeUnit) + */ + public TimeUnit getTTLUnit() { + return ttlUnit; + } + + /** + * Sets the update-TTL property for a 'put' operation. + *

        + * If this property is true and the operation updates a record, the + * specified TTL will be used to assign a new expiration time for the + * record, or to clear the record's expiration time if the specified + * TTL is zero. + *

        + * If this parameter is false and the operation updates a record, the + * record's expiration time will not be changed. + *

        + * If the operation inserts a record, this parameter is ignored and the + * specified TTL is always applied. + *

        + * By default, this property is false. + * + * @param updateTtl is whether to assign (or clear) the expiration time + * when updating an existing record. + * + * @return 'this'. + * + * @see Time-To-Live + */ + public WriteOptions setUpdateTTL(final boolean updateTtl) { + this.updateTtl = updateTtl; + return this; + } + + /** + * Returns the update-TTL property for a 'put' operation. + * + * @see #setUpdateTTL(boolean) + */ + public boolean getUpdateTTL() { + return updateTtl; + } + + /** + * A convenience method to set the TTL based on a given expiration time + * and the current system time. + *

        + * Given a desired expiration time and {@link TimeUnit} (DAYS or HOURS), + * sets the TTL to a value that will cause a record to expire at or after + * the given time, if the record is stored at the current time. The + * intended use case is to determine the TTL when writing a record and the + * desired expiration time, rather than the TTL, is known. + *

        + * This method determines the TTL by taking the difference between the + * current time and the given time, converting it from milliseconds to + * days (or hours), and rounding up if it is not evenly divisible by + * the number of milliseconds in one day (or hour). + *

        + * A special use case is when the expiration time was previously obtained + * from {@link OperationResult#getExpirationTime()}, for example, when + * performing an export followed by an import. To support this, null can be + * passed for the timeUnit parameter and the time unit will be determined + * as follows. + *

          + *
        • This method first converts the expiration time to a TTL in + * hours, as described above. If the expiration time was obtained by + * calling {@link OperationResult#getExpirationTime}, then it will be + * evenly divisible by the number of milliseconds in one hour and no + * rounding will occur.
        • + * + *
        • If the resulting TTL in hours is an even multiple of 24, + * {@code DAYS} is used; otherwise, {@code HOURS} is used. For example, + * when performing an import, if the original expiration time was + * specified in {@code DAYS}, and obtained by calling + * {@link OperationResult#getExpirationTime}, the unit derived by this + * method will also be {@code DAYS}.
        • + *
        + *

        + * Note that when a particular time unit is desired, null should not be + * passed for the timeUnit parameter. Normally {@link TimeUnit#DAYS} is + * recommended instead of {@link TimeUnit#HOURS}, to minimize storage + * requirements (memory and disk). When the desired unit is known, the unit + * should be passed explicitly. + * + * @param expirationTime is the desired expiration time in milliseconds + * (UTC), or zero for no automatic expiration. + * + * @param timeUnit is {@link TimeUnit#DAYS} or {@link TimeUnit#HOURS}, or + * null to derive the time unit as described above. + * + * @throws IllegalArgumentException if ttlUnits is not DAYS, HOURS or null. + * + * @see Time-To-Live + */ + public WriteOptions setExpirationTime(final long expirationTime, + TimeUnit timeUnit) { + if (expirationTime == 0) { + return setTTL(0, timeUnit); + } + + final boolean hours; + + if (timeUnit == TimeUnit.DAYS) { + hours = false; + } else if (timeUnit == TimeUnit.HOURS) { + hours = true; + } else if (timeUnit == null) { + hours = TTL.isSystemTimeInHours(expirationTime); + timeUnit = hours ? TimeUnit.HOURS : TimeUnit.DAYS; + } else { + throw new IllegalArgumentException( + "ttlUnits not allowed: " + timeUnit); + } + + setTTL( + TTL.systemTimeToExpiration( + expirationTime - TTL.currentSystemTime(), hours), + timeUnit); + + return this; + } +} diff --git a/src/com/sleepycat/je/XAEnvironment.java b/src/com/sleepycat/je/XAEnvironment.java new file mode 100644 index 0000000..8b26b03 --- /dev/null +++ b/src/com/sleepycat/je/XAEnvironment.java @@ -0,0 +1,412 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import java.io.File; +import javax.transaction.xa.XAException; +import javax.transaction.xa.XAResource; +import javax.transaction.xa.Xid; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.txn.PreparedTxn; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnManager; + +/** + * An Environment that implements XAResource. If JE is used in an XA + * environment, this class should be used instead of Environment so that + * appropriate XA functions are available. + */ +public class XAEnvironment extends Environment implements XAResource { + + private static final boolean DEBUG = false; + + /** + * Create a database environment handle. + * + * @param envHome The database environment's home directory. + * + * @param configuration The database environment attributes. If null, + * default attributes are used. + */ + public XAEnvironment(File envHome, EnvironmentConfig configuration) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + super(envHome, configuration); + } + + /** + * Used to get the Transaction object given an XA Xid. + * @hidden + * Internal use only. + */ + public Transaction getXATransaction(Xid xid) { + Txn ret = getXATransactionInternal(xid); + if (ret == null || + ret instanceof PreparedTxn) { + return null; + } + + /* Do we guarantee object identity for Transaction objects? */ + return new Transaction(this, ret); + } + + private Txn getXATransactionInternal(Xid xid) { + return getNonNullEnvImpl().getTxnManager().getTxnFromXid(xid); + } + + /** + * Used to set the Transaction object for an XA Xid. Public for tests. + * + * @hidden + * Internal use only. + */ + public void setXATransaction(Xid xid, Transaction txn) { + getNonNullEnvImpl().getTxnManager().registerXATxn( + xid, txn.getTxn(), false); + } + + /* + * XAResource methods. + */ + + @Override + public void commit(Xid xid, boolean ignore /*onePhase*/) + throws XAException { + + if (DEBUG) { + System.out.println("*** commit called " + xid + "/" + ignore); + } + + if (xid == null) { + return; + } + + try { + checkOpen(); + Txn txn = getXATransactionInternal(xid); + if (txn == null) { + throw new XAException + ("No transaction found for " + xid + " during commit."); + } + removeReferringHandle(new Transaction(this, txn)); + if (txn.isOnlyAbortable()) { + throw new XAException(XAException.XA_RBROLLBACK); + } + txn.commit(xid); + } catch (DatabaseException DE) { + throwNewXAException(DE); + } + if (DEBUG) { + System.out.println("*** commit finished"); + } + } + + @Override + public void end(Xid xid, int flags) + throws XAException { + + if (DEBUG) { + System.out.println("*** end called " + xid + "/" + flags); + } + + /* flags - One of TMSUCCESS, TMFAIL, or TMSUSPEND. */ + + boolean tmFail = (flags & XAResource.TMFAIL) != 0; + boolean tmSuccess = (flags & XAResource.TMSUCCESS) != 0; + boolean tmSuspend = (flags & XAResource.TMSUSPEND) != 0; + if ((!tmFail && !tmSuccess && !tmSuspend) || + (tmFail && tmSuccess) || + ((tmFail || tmSuccess) && tmSuspend)) { + throw new XAException(XAException.XAER_INVAL); + } + + final EnvironmentImpl envImpl = getNonNullEnvImpl(); + + if (DEBUG) { + System.out.println + ("Transaction for " + Thread.currentThread() + " is " + + envImpl.getTxnManager().getTxnForThread()); + } + + Transaction transaction = envImpl.getTxnManager().unsetTxnForThread(); + if (transaction == null) { + transaction = getXATransaction(xid); + } + Txn txn = (transaction != null) ? transaction.getTxn() : null; + if (txn == null) { + throw new XAException(XAException.XAER_NOTA); + } + + if (tmFail) { + + /* + * Creating the XAFailureException will set the txn to abort-only. + * This exception stack trace will provide more "cause" information + * when it is wrapped in an exception that is thrown later, which + * occurs when an attempt is made to use the txn. + */ + new XAFailureException(txn); + } + + if (tmSuspend) { + if (txn.isSuspended()) { + throw new XAException(XAException.XAER_PROTO); + } + txn.setSuspended(true); + } + + /* For tmSuccess, do nothing further. */ + } + + @Override + public void forget(Xid xid) + throws XAException { + + if (DEBUG) { + System.out.println("*** forget called"); + } + + throw new XAException(XAException.XAER_NOTA); + } + + @Override + public boolean isSameRM(XAResource rm) + throws XAException { + + if (DEBUG) { + System.out.println("*** isSameRM called"); + } + + EnvironmentImpl envImpl = null; + try { + envImpl = checkOpen(); + } catch (DatabaseException DE) { + throwNewXAException(DE); + } + + if (rm == null) { + return false; + } + + if (!(rm instanceof XAEnvironment)) { + return false; + } + + return envImpl == + DbInternal.getNonNullEnvImpl((XAEnvironment) rm); + } + + @Override + public int prepare(Xid xid) + throws XAException { + + if (DEBUG) { + System.out.println("*** prepare called"); + } + + try { + checkOpen(); + Transaction txn = getXATransaction(xid); + if (txn == null) { + throw new XAException + ("No transaction found for " + xid + " during prepare."); + } + int ret = txn.getTxn().prepare(xid); + + if (DEBUG) { + System.out.println("*** prepare returning " + ret); + } + + /* + * If this transaction was R/O, then there were no writes. We'll + * commit it here because the user doesn't need to (and isn't + * allowed to either). + */ + if (ret == XAResource.XA_RDONLY) { + commit(xid, true); + } + + return ret; + } catch (RuntimeException e) { + throwNewXAException(e); + } + return XAResource.XA_OK; // for compiler + } + + @Override + public Xid[] recover(int flags) + throws XAException { + + if (DEBUG) { + System.out.println("*** recover called"); + } + + /* flags - One of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS. */ + + boolean tmStartRScan = (flags & XAResource.TMSTARTRSCAN) != 0; + boolean tmEndRScan = (flags & XAResource.TMENDRSCAN) != 0; + if ((tmStartRScan && tmEndRScan) || + (!tmStartRScan && !tmEndRScan && flags != TMNOFLAGS)) { + throw new XAException(XAException.XAER_INVAL); + } + + /* + * We don't have to actually do anything with STARTRSCAN or ENDRSCAN + * since we return the whole set of Xid's to be recovered on each call. + */ + try { + final EnvironmentImpl envImpl = checkOpen(); + + if (DEBUG) { + System.out.println("*** recover returning1"); + } + + return envImpl.getTxnManager().XARecover(); + } catch (DatabaseException DE) { + throwNewXAException(DE); + } + return null; // for compiler + } + + @Override + public void rollback(Xid xid) + throws XAException { + + if (DEBUG) { + System.out.println("*** rollback called"); + } + + try { + checkOpen(); + Txn txn = getXATransactionInternal(xid); + if (txn == null) { + throw new XAException + ("No transaction found for " + xid + " during rollback."); + } + removeReferringHandle(new Transaction(this, txn)); + txn.abort(xid); + } catch (DatabaseException DE) { + throwNewXAException(DE); + } + + if (DEBUG) { + System.out.println("*** rollback returning"); + } + } + + @Override + public int getTransactionTimeout() + throws XAException { + + try { + return (int) ((getConfig().getTxnTimeout() + 999999L) / 1000000L); + } catch (Exception DE) { + throwNewXAException(DE); + } + return 0; // for compiler + } + + @Override + public boolean setTransactionTimeout(int timeout) { + return false; + } + + @Override + public void start(Xid xid, int flags) + throws XAException { + + if (DEBUG) { + System.out.println("*** start called " + xid + "/" + flags); + } + + boolean tmJoin = (flags & XAResource.TMJOIN) != 0; + boolean tmResume = (flags & XAResource.TMRESUME) != 0; + + /* Check flags - only one of TMNOFLAGS, TMJOIN, or TMRESUME. */ + if (xid == null || + (tmJoin && tmResume) || + (!tmJoin && + !tmResume && + flags != XAResource.TMNOFLAGS)) { + throw new XAException(XAException.XAER_INVAL); + } + + try { + Transaction txn = getXATransaction(xid); + TxnManager txnMgr = getNonNullEnvImpl().getTxnManager(); + + if (flags == XAResource.TMNOFLAGS) { + + /* + * If neither RESUME nor JOIN was set, make sure xid doesn't + * exist in allXATxns. Throw XAER_DUPID if it does. + */ + if (txn == null) { + if (DEBUG) { + System.out.println + ("Transaction for XID " + xid + " being created"); + } + + txn = beginTransaction(null, null); + setXATransaction(xid, txn); + + } else { + throw new XAException(XAException.XAER_DUPID); + } + } else if (tmJoin) { + if (txn == null) { + throw new XAException(XAException.XAER_NOTA); + } + + if (txnMgr.getTxnForThread() != null || + txn.getPrepared()) { + throw new XAException(XAException.XAER_PROTO); + } + } else if (tmResume) { + if (txn == null) { + throw new XAException(XAException.XAER_NOTA); + } + + if (!txn.getTxn().isSuspended()) { + throw new XAException(XAException.XAER_PROTO); + } + txn.getTxn().setSuspended(false); + } + + if (DEBUG) { + System.out.println + ("Setting Transaction for " + Thread.currentThread()); + } + txnMgr.setTxnForThread(txn); + } catch (DatabaseException DE) { + if (DEBUG) { + System.out.println("*** start exception"); + } + throwNewXAException(DE); + } + + if (DEBUG) { + System.out.println("*** start finished"); + } + } + + private void throwNewXAException(Exception E) + throws XAException { + + XAException ret = new XAException(E.toString()); + ret.initCause(E); + throw ret; + } +} diff --git a/src/com/sleepycat/je/XAFailureException.java b/src/com/sleepycat/je/XAFailureException.java new file mode 100644 index 0000000..dcc955e --- /dev/null +++ b/src/com/sleepycat/je/XAFailureException.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.txn.Locker; + +/** + * Thrown if an attempt is made to use a {@link Transaction} after it has been + * invalidated as the result of an XA failure. The invalidation occurs when + * {@code XAResource.end} is called by the resource manager with a {@code + * XAResource.TMFAIL} flag. + * + *

        The {@link Transaction} handle is invalidated as a result of this + * exception.

        + * + * @since 4.0 + */ +public class XAFailureException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public XAFailureException(Locker locker) { + super(locker, true /*abortOnly*/, + "The TM_FAIL flag was passed to XAEnvironment.end().", + null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private XAFailureException(String message, + XAFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new XAFailureException(msg, this); + } +} diff --git a/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java b/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java new file mode 100644 index 0000000..5b86c90 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/BaseLocalUtilizationTracker.java @@ -0,0 +1,171 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Shared implementation for all local utilization trackers. Per-database + * utilization info is tracked in a local map rather than in the live + * DatabaseImpl objects. The transferToUtilizationTracker method is called to + * transfer per-file and per-database info to the (global) UtilizationTracker. + */ +abstract class BaseLocalUtilizationTracker extends BaseUtilizationTracker { + + /** + * Map of per-database utilization info. + * + * In LocalUtilizationTracker: + * IdentityHashMap of DatabaseImpl to DbFileSummaryMap + * + * In RecoveryUtilizationTracker: + * HashMap of DatabaseId to DbFileSummaryMap + */ + private Map dbMap; + + /** + * Creates a local tracker with a map keyed by DatabaseId or DatabaseImpl. + * + * When used by this class dbMap is an IdentityHashMap keyed by + * DatabaseImpl. When used by RecoveryUtilizationTracker dbMap is a HashMap + * keyed by DatabaseId. + */ + BaseLocalUtilizationTracker(EnvironmentImpl env, + Map dbMap) { + super(env, env.getCleaner()); + this.dbMap = dbMap; + } + + /** + * Returns the map of databases; for use by subclasses. + */ + Map getDatabaseMap() { + return dbMap; + } + + /** + * Transfers counts and offsets from this local tracker to the given + * (global) UtilizationTracker and to the live DatabaseImpl objects. + * + *

        When called after recovery has finished, must be called under the log + * write latch.

        + */ + public void transferToUtilizationTracker(UtilizationTracker tracker) + throws DatabaseException { + + /* Add file summary information, including obsolete offsets. */ + for (TrackedFileSummary localSummary : getTrackedFiles()) { + TrackedFileSummary fileSummary = + tracker.getFileSummary(localSummary.getFileNumber()); + fileSummary.addTrackedSummary(localSummary); + } + + /* Add DbFileSummary information. */ + Iterator dbMapKeys = dbMap.keySet().iterator(); + while (dbMapKeys.hasNext()) { + Object key = dbMapKeys.next(); + DatabaseImpl db = databaseKeyToDatabaseImpl(key); + /* If db is null, it was deleted. */ + DbFileSummaryMap fileMap = dbMap.get(key); + + if (db != null) { + Iterator> fileEntries = + fileMap.entrySet().iterator(); + + while (fileEntries.hasNext()) { + Map.Entry fileEntry = + fileEntries.next(); + + Long fileNum = fileEntry.getKey(); + DbFileSummary dbFileSummary = + db.getDbFileSummary(fileNum, true /*willModify*/); + if (dbFileSummary != null) { + DbFileSummary localSummary = fileEntry.getValue(); + dbFileSummary.add(localSummary); + } + } + } + /* Ensure that DbTree.releaseDb is called. [#16329] */ + releaseDatabaseImpl(db); + /* This object is being discarded, subtract it from the budget. */ + fileMap.subtractFromMemoryBudget(); + } + } + + /** + * Returns the DatabaseImpl from the database key, which is either the + * DatabaseId or DatabaseImpl. The releaseDatabaseImpl must be called + * with the DatabaseImpl returned by this method. + */ + abstract DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey) + throws DatabaseException; + + /** + * Must be called after calling databaseKeyToDatabaseImpl. The db + * parameter may be null, in which case no action is taken. + * + * If DbTree.getDb is called by the implementation of + * databaseKeyToDatabaseImpl, then DbTree.releaseDb must be called by the + * implementation of this method. + */ + abstract void releaseDatabaseImpl(DatabaseImpl db); + + /** + * Allocates DbFileSummary information locally in this object rather than + * in the DatabaseImpl. + * + * @param databaseKey is either a DatabaseId or DatabaseImpl depending on + * whether called from the RecoveryUtilizationTracker or + * LocalUtilizationTracker, respectively. + * + * @return the summary, or null if the databaseKey param is null. + */ + DbFileSummary getDbFileSummary(Object databaseKey, long fileNum) { + if (databaseKey != null) { + DbFileSummaryMap fileMap = dbMap.get(databaseKey); + if (fileMap == null) { + fileMap = new DbFileSummaryMap(true /* countParentMapEntry */); + fileMap.init(env); + dbMap.put(databaseKey, fileMap); + } + + /* + * Pass false for checkResurrected since a log file can be deleted + * at any time when counting with a local tracker. + */ + return fileMap.get + (Long.valueOf(fileNum), true /*adjustMemBudget*/, + false /*checkResurrected*/, env.getFileManager()); + } else { + return null; + } + } + + /** + * Deallocates all DbFileSummary objects for the given database key. + * For use by subclasses. + */ + void removeDbFileSummaries(Object databaseKey) { + /* The dbMap entry is budgeted by the DbFileSummaryMap. */ + DbFileSummaryMap fileMap = dbMap.remove(databaseKey); + if (fileMap != null) { + fileMap.subtractFromMemoryBudget(); + } + } +} diff --git a/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java b/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java new file mode 100644 index 0000000..34974ab --- /dev/null +++ b/src/com/sleepycat/je/cleaner/BaseUtilizationTracker.java @@ -0,0 +1,365 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Shared implementation for all utilization trackers. The base implementation + * keeps track of per-file utilization info only. Subclasses keep track of + * per-database info. + */ +public abstract class BaseUtilizationTracker { + + EnvironmentImpl env; + Cleaner cleaner; + private long activeFile; + + /** + * The tracked files are maintained in a volatile field Map that is "copied + * on write" whenever an element is added or removed. Add and remove are + * called only under the log write latch, but get and iteration may be + * performed at any time because the map is read-only. + */ + private volatile Map fileSummaries; + + BaseUtilizationTracker(EnvironmentImpl env, Cleaner cleaner) { + assert cleaner != null; + this.env = env; + this.cleaner = cleaner; + fileSummaries = new HashMap(); + activeFile = -1; + } + + public EnvironmentImpl getEnvironment() { + return env; + } + + public int getNumTFSs() { + return fileSummaries.size(); + } + + /** + * Returns a snapshot of the files being tracked as of the last time a + * log entry was added. The summary info returned is the delta since the + * last checkpoint, not the grand totals, and is approximate since it is + * changing in real time. This method may be called without holding the + * log write latch. + * + *

        If files are added or removed from the collection of tracked files in + * real time, the returned collection will not be changed since it is a + * snapshot. But the objects contained in the collection are live and will + * be updated in real time under the log write latch. The collection and + * the objects in the collection should not be modified by the caller.

        + */ + public Collection getTrackedFiles() { + return fileSummaries.values(); + } + + /** + * Returns one file from the snapshot of tracked files, or null if the + * given file number is not in the snapshot array. + * + * @see #getTrackedFiles + */ + public TrackedFileSummary getTrackedFile(long fileNum) { + + return fileSummaries.get(fileNum); + } + + /** + * Counts the addition of all new log entries including LNs. + * + *

        For the global tracker, must be called under the log write latch.

        + */ + final void countNew(long lsn, + Object databaseKey, + LogEntryType type, + int size) { + assert type != null; + /* Count in per-file and per-file-per-db summaries. */ + long fileNum = DbLsn.getFileNumber(lsn); + FileSummary fileSummary = getFileSummary(fileNum); + fileSummary.totalCount += 1; + fileSummary.totalSize += size; + boolean isLN = isLNType(type); + if (isLN && fileSummary.maxLNSize < size) { + fileSummary.maxLNSize = size; + } + if (trackObsoleteInfo(type)) { + assert databaseKey != null : + "No DB for lsn=" + DbLsn.getNoFormatString(lsn) + + " type: " + type; + DbFileSummary dbFileSummary = + getDbFileSummary(databaseKey, fileNum); + if (isLN) { + fileSummary.totalLNCount += 1; + fileSummary.totalLNSize += size; + if (dbFileSummary != null) { + dbFileSummary.totalLNCount += 1; + dbFileSummary.totalLNSize += size; + } + } else { + fileSummary.totalINCount += 1; + fileSummary.totalINSize += size; + if (dbFileSummary != null) { + dbFileSummary.totalINCount += 1; + dbFileSummary.totalINSize += size; + } + } + } + } + + /** + * Counts an obsolete node by incrementing the obsolete count and size. + * Tracks the LSN offset if trackOffset is true and the offset is non-zero. + * + *

        For the global tracker, must be called under the log write latch.

        + */ + final void countObsolete( + long lsn, + Object databaseKey, + LogEntryType type, + int size, + boolean countPerFile, + boolean countPerDb, + boolean trackOffset, + boolean checkDupOffsets) { + + assert trackObsoleteInfo(type); + + boolean isLN = isLNType(type); + long fileNum = DbLsn.getFileNumber(lsn); + + if (countPerFile) { + TrackedFileSummary fileSummary = getFileSummary(fileNum); + if (isLN) { + fileSummary.obsoleteLNCount += 1; + /* The size is optional when tracking obsolete LNs. */ + if (size > 0) { + fileSummary.obsoleteLNSize += size; + fileSummary.obsoleteLNSizeCounted += 1; + } + } else { + fileSummary.obsoleteINCount += 1; + /* The size is not allowed when tracking obsolete INs. */ + assert size == 0; + } + + if (trackOffset) { + long offset = DbLsn.getFileOffset(lsn); + if (offset != 0) { + fileSummary.trackObsolete(offset, checkDupOffsets); + } + } + } + + if (countPerDb) { + assert databaseKey != null : + "No DB for lsn=" + DbLsn.getNoFormatString(lsn) + + " type: " + type; + + DbFileSummary dbFileSummary = + getDbFileSummary(databaseKey, fileNum); + + if (dbFileSummary != null) { + if (isLN) { + dbFileSummary.obsoleteLNCount += 1; + /* The size is optional when tracking obsolete LNs. */ + if (size > 0) { + dbFileSummary.obsoleteLNSize += size; + dbFileSummary.obsoleteLNSizeCounted += 1; + } + } else { + dbFileSummary.obsoleteINCount += 1; + /* The size is not allowed when tracking obsolete INs. */ + assert size == 0; + } + } + } + } + + /** + * Counts all active LSNs in a database as obsolete in the per-file + * utilization summaries. This method is called during database + * remove/truncate or when replaying those operations during recovery. + * + *

        For the global tracker, must be called under the log write latch.

        + * + * @param dbFileSummaries the map of Long file number to DbFileSummary for + * a database that is being deleted. + * + * @param mapLnLsn is the LSN of the MapLN when recovery is replaying the + * truncate/remove, or NULL_LSN when called outside of recovery; obsolete + * totals should only be counted when this LSN is prior to the LSN of the + * FileSummaryLN for the file being counted. + */ + public void countObsoleteDb( + DbFileSummaryMap dbFileSummaries, + long mapLnLsn) { + + for (Map.Entry entry : + dbFileSummaries.entrySet()) { + + Long fileNum = entry.getKey(); + + if (isFileUncounted(fileNum, mapLnLsn)) { + DbFileSummary dbFileSummary = entry.getValue(); + TrackedFileSummary fileSummary = + getFileSummary(fileNum.longValue()); + + /* + * Count as obsolete the currently active amounts in the + * database, which are the total amounts minus the previously + * counted obsolete amounts. + */ + int lnObsoleteCount = dbFileSummary.totalLNCount - + dbFileSummary.obsoleteLNCount; + int lnObsoleteSize = dbFileSummary.totalLNSize - + dbFileSummary.obsoleteLNSize; + int inObsoleteCount = dbFileSummary.totalINCount - + dbFileSummary.obsoleteINCount; + fileSummary.obsoleteLNCount += lnObsoleteCount; + fileSummary.obsoleteLNSize += lnObsoleteSize; + fileSummary.obsoleteINCount += inObsoleteCount; + + /* + * When a DB becomes obsolete, the size of all obsolete LNs can + * now be counted accurately because all LN bytes in the DB are + * now obsolete. The lnObsoleteSize value calculated above + * includes LNs that become obsolete now, plus those that + * became obsolete previously but whose size was not counted. + * The obsoleteLNSizeCounted field is updated accordingly + * below. In other words, DB obsolescence is self-healing with + * respect to obsolete LN sizes. [#19144] + */ + int lnObsoleteSizeCounted = lnObsoleteCount + + (dbFileSummary.obsoleteLNCount - + dbFileSummary.obsoleteLNSizeCounted); + fileSummary.obsoleteLNSizeCounted += lnObsoleteSizeCounted; + + /* + * Do not update the DbFileSummary. It will be flushed when + * the MapLN is deleted. If later replayed during recovery, we + * will call this method to update the per-file utilization. + */ + } + } + } + + /** + * Returns whether file summary information for the given LSN is not + * already counted. Outside of recovery, always returns true. For + * recovery, is overridden by RecoveryUtilizationTracker and returns + * whether the FileSummaryLN for the given file is prior to the given LSN. + * . + */ + boolean isFileUncounted(Long fileNum, long lsn) { + return true; + } + + /** + * Returns a DbFileSummary for the given database key and file number, + * adding an empty one if the file is not already being tracked. + * + *

        This method is implemented by subclasses which interpret the + * databaseKey as either the DatabaseImpl or a DatabaseId.

        + * + *

        For the global tracker, must be called under the log write latch.

        + * + * @return the summary, or null if the DB should not be tracked because + * the file has been deleted. + */ + abstract DbFileSummary getDbFileSummary(Object databaseKey, long fileNum); + + /** + * Returns a tracked file for the given file number, adding an empty one + * if the file is not already being tracked. + * + *

        For the global tracker, must be called under the log write latch.

        + */ + TrackedFileSummary getFileSummary(long fileNum) { + + if (activeFile < fileNum) { + activeFile = fileNum; + } + Long fileNumLong = Long.valueOf(fileNum); + TrackedFileSummary file = fileSummaries.get(fileNumLong); + if (file == null) { + /* Assign fileSummaries field after modifying the new map. */ + file = new TrackedFileSummary(this, fileNum, cleaner.trackDetail); + Map newFiles = + new HashMap(fileSummaries); + newFiles.put(fileNumLong, file); + fileSummaries = newFiles; + } + return file; + } + + /** + * Called after the FileSummaryLN is written to the log during checkpoint. + * + *

        We keep the active file summary in the tracked file map, but we + * remove older files to prevent unbounded growth of the map.

        + * + *

        Must be called under the log write latch.

        + */ + void resetFile(TrackedFileSummary fileSummary) { + + if (fileSummary.getFileNumber() < activeFile && + fileSummary.getAllowFlush()) { + /* Assign fileSummaries field after modifying the new map. */ + Map newFiles = + new HashMap(fileSummaries); + newFiles.remove(fileSummary.getFileNumber()); + fileSummaries = newFiles; + } + } + + /** + * Returns whether obsoleteness is tracked for the given type. Obsoleteness + * is tracked for node types and BIN-deltas. A null type is assumed to be + * an LN. + */ + public static boolean trackObsoleteInfo(LogEntryType type) { + return type == null || + type.isNodeType() || + type.equals(LogEntryType.LOG_BIN_DELTA) || + type.equals(LogEntryType.LOG_OLD_BIN_DELTA); + } + + /** + * Returns whether the given type is an LN; a null type is assumed to be an + * LN. + */ + public static boolean isLNType(LogEntryType type) { + return type == null || type.isLNType(); + } + + /** + * Update memory budgets when this tracker is closed and will never be + * accessed again. + */ + void close() { + for (TrackedFileSummary t: fileSummaries.values()) { + t.close(); + } + } +} diff --git a/src/com/sleepycat/je/cleaner/Cleaner.java b/src/com/sleepycat/je/cleaner/Cleaner.java new file mode 100644 index 0000000..b218072 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/Cleaner.java @@ -0,0 +1,1646 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ACTIVE_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_AVAILABLE_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_CLUSTER_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DELETIONS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DISK_READS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ENTRIES_READ; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNQUEUE_HITS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_CLEANED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_DEAD; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_EXPIRED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_LOCKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MARKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MIGRATED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_OBSOLETE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MARKED_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MAX_UTILIZATION; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MIN_UTILIZATION; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_LOCKED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LN_QUEUE_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE_MAP; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_REPEAT_ITERATOR_READS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RESERVED_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_REVISAL_RUNS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RUNS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TOTAL_LOG_SIZE; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TO_BE_CLEANED_LNS_PROCESSED; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TWO_PASS_RUNS; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.GROUP_DESC; +import static com.sleepycat.je.cleaner.CleanerStatDefinition.GROUP_NAME; + +import java.io.File; +import java.io.IOException; +import java.text.NumberFormat; +import java.util.HashMap; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.cleaner.FileSelector.CheckpointStartCleanerState; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.utilint.AtomicLongMapStat; +import com.sleepycat.je.utilint.DaemonRunner; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.FileStoreInfo; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; + +/** + * The Cleaner is responsible for effectively garbage collecting the JE log. + * It selects the least utilized log file for cleaning (see FileSelector), + * reads through the log file (FileProcessor) and determines whether each entry + * is obsolete (no longer relevant) or active (referenced by the Btree). + * Entries that are active are migrated (copied) to the end of the log, and + * finally the cleaned file is deleted. + * + * The migration of active entries is a multi-step process that can be + * configured to operate in different ways. Eviction and checkpointing, as + * well as the cleaner threads (FileProcessor instances) are participants in + * this process. Migration may be immediate or lazy. + * + * Active INs are always migrated lazily, which means that they are marked + * dirty by the FileProcessor, and then logged later by an eviction or + * checkpoint. Active LNs are always migrated immediately by the FileProcessor + * by logging them. + * + * When the FileProcessor is finished with a file, all lazy migration for that + * file is normally completed by the end of the next checkpoint, if not sooner + * via eviction. The checkpoint/recovery mechanism will ensure that obsolete + * entries will not be referenced by the Btree. At the end of the checkpoint, + * it is therefore safe to delete the log file. + * + * There is one exception to the above paragraph. When attempting to migrate + * an LN, if the LN cannot be locked then we must retry the migration at a + * later time. Also, if a database removal is in progress, we consider all + * entries in the database obsolete but cannot delete the log file until + * database removal is complete. Such "pending" LNs and databases are queued + * and processed periodically during file processing and at the start of a + * checkpoint; see processPending(). In this case, we may have to wait for + * more than one checkpoint to occur before the log file can be deleted. See + * FileSelector and the use of the pendingLNs and pendingDBs collections. + */ +public class Cleaner implements DaemonRunner, EnvConfigObserver { + /* From cleaner */ + static final String CLEAN_IN = "CleanIN:"; + static final String CLEAN_LN = "CleanLN:"; + static final String CLEAN_PENDING_LN = "CleanPendingLN:"; + + private static final NumberFormat INT_FORMAT = + NumberFormat.getIntegerInstance(); + + /** + * The CacheMode to use for Btree searches. This is currently UNCHANGED + * because we update the generation of the BIN when we migrate an LN. + * In other other cases, it is not desirable to keep INs in cache. + */ + static final CacheMode UPDATE_GENERATION = CacheMode.UNCHANGED; + + /** + * Whether the cleaner should participate in critical eviction. Ideally + * the cleaner would not participate in eviction, since that would reduce + * the cost of cleaning. However, the cleaner can add large numbers of + * nodes to the cache. By not participating in eviction, other threads + * could be kept in a constant state of eviction and would effectively + * starve. Therefore, this setting is currently enabled. + */ + static final boolean DO_CRITICAL_EVICTION = true; + + private static final String DELETED_SUBDIR = "deleted"; + + /* Used to ensure that the cleaner is woken often enough. */ + private final static long MAX_CLEANER_BYTES_INTERVAL = 100L << 20;; + + /* 10GB is the lower threshold for adjusting MAX_DISK. */ + private final static long MAX_DISK_ADJUSTMENT_THRESHOLD = + 10L * 1024L * 1024L * 1024L; + + /* Used to disable processing of safe-to-delete files during testing. */ + private boolean fileDeletionEnabled = true; + + /* Used to limit manageDiskUsage calls to one thread at a time. */ + private final ReentrantLock manageDiskUsageLock = new ReentrantLock(); + + /* + * Cleaner stats. Updates to these counters occur in multiple threads, + * including FileProcessor threads, and are not synchronized. This could + * produce errors in counting, but avoids contention around stat updates. + */ + private final StatGroup statGroup; + final LongStat nCleanerRuns; + final LongStat nTwoPassRuns; + final LongStat nRevisalRuns; + private final LongStat nCleanerDeletions; + final LongStat nINsObsolete; + final LongStat nINsCleaned; + final LongStat nINsDead; + final LongStat nINsMigrated; + final LongStat nBINDeltasObsolete; + final LongStat nBINDeltasCleaned; + final LongStat nBINDeltasDead; + final LongStat nBINDeltasMigrated; + final LongStat nLNsObsolete; + final LongStat nLNsExpired; + final LongStat nLNsCleaned; + final LongStat nLNsDead; + final LongStat nLNsLocked; + final LongStat nLNsMigrated; + final LongStat nLNsMarked; + final LongStat nLNQueueHits; + private final LongStat nPendingLNsProcessed; + private final LongStat nMarkedLNsProcessed; + private final LongStat nToBeCleanedLNsProcessed; + private final LongStat nClusterLNsProcessed; + private final LongStat nPendingLNsLocked; + final LongStat nEntriesRead; + final LongStat nDiskReads; + final LongStat nRepeatIteratorReads; + /* + * Log size stats. These are CUMMULATIVE and the stat objects are created + * by loadStats. They are accessed as a group while synchronized on + * statGroup to ensure the set of values is consistent/coherent. + */ + private FileProtector.LogSizeStats logSizeStats; + private long availableLogSize; + private long totalLogSize; + + /* + * Unlike availableLogSize, maxDiskOverage and freeDiskShortage are + * calculated based on actual disk usage, without subtracting the size of + * the reserved files. So these values may be GT zero even if + * availableLogSize is GTE zero. If maxDiskOverage or freeDiskShortage + * is GT zero, then manageDiskUsage will try to delete log files to + * avoid a violation. + */ + private long maxDiskOverage; + private long freeDiskShortage; + + /* Message summarizing current log size stats, with limit violations. */ + private String diskUsageMessage; + + /* + * If a disk usage limit is violated, this is diskUsageMessage; otherwise + * it is null. It is volatile so it can be checked cheaply during CRUD ops. + */ + private volatile String diskUsageViolationMessage; + + /* + * Used to prevent repeated logging about a disk limit violation. + * Protected by manageDiskUsageLock. + */ + private boolean loggedDiskLimitViolation; + + /* + * Configuration parameters. + */ + long lockTimeout; + int readBufferSize; + int lookAheadCacheSize; + long nDeadlockRetries; + boolean expunge; + private boolean useDeletedDir; + int twoPassGap; + int twoPassThreshold; + boolean gradualExpiration; + long cleanerBytesInterval; + boolean trackDetail; + private boolean fetchObsoleteSize; + int dbCacheClearCount; + private final boolean rmwFixEnabled; + int minUtilization; + int minFileUtilization; + int minAge; + private long maxDiskLimit; + private long freeDiskLimit; + private long adjustedMaxDiskLimit; + + private final String name; + private final EnvironmentImpl env; + private final FileStoreInfo fileStoreInfo; + private final FileProtector fileProtector; + private final UtilizationProfile profile; + private final UtilizationTracker tracker; + private final ExpirationProfile expirationProfile; + private final UtilizationCalculator calculator; + private final FileSelector fileSelector; + private FileProcessor[] threads; + + private final Logger logger; + final AtomicLong totalRuns; + TestHook fileChosenHook; + + /** @see #processPending */ + private final AtomicBoolean processPendingReentrancyGuard = + new AtomicBoolean(false); + + /** @see #wakeupAfterWrite */ + private final AtomicLong bytesWrittenSinceActivation = new AtomicLong(0); + + public Cleaner(EnvironmentImpl env, String name) { + this.env = env; + this.name = name; + + /* Initialize the non-CUMULATIVE stats definitions. */ + statGroup = new StatGroup(GROUP_NAME, GROUP_DESC); + nCleanerRuns = new LongStat(statGroup, CLEANER_RUNS); + nTwoPassRuns = new LongStat(statGroup, CLEANER_TWO_PASS_RUNS); + nRevisalRuns = new LongStat(statGroup, CLEANER_REVISAL_RUNS); + nCleanerDeletions = new LongStat(statGroup, CLEANER_DELETIONS); + nINsObsolete = new LongStat(statGroup, CLEANER_INS_OBSOLETE); + nINsCleaned = new LongStat(statGroup, CLEANER_INS_CLEANED); + nINsDead = new LongStat(statGroup, CLEANER_INS_DEAD); + nINsMigrated = new LongStat(statGroup, CLEANER_INS_MIGRATED); + nBINDeltasObsolete = new LongStat(statGroup, CLEANER_BIN_DELTAS_OBSOLETE); + nBINDeltasCleaned = new LongStat(statGroup, CLEANER_BIN_DELTAS_CLEANED); + nBINDeltasDead = new LongStat(statGroup, CLEANER_BIN_DELTAS_DEAD); + nBINDeltasMigrated = new LongStat(statGroup, CLEANER_BIN_DELTAS_MIGRATED); + nLNsObsolete = new LongStat(statGroup, CLEANER_LNS_OBSOLETE); + nLNsExpired = new LongStat(statGroup, CLEANER_LNS_EXPIRED); + nLNsCleaned = new LongStat(statGroup, CLEANER_LNS_CLEANED); + nLNsDead = new LongStat(statGroup, CLEANER_LNS_DEAD); + nLNsLocked = new LongStat(statGroup, CLEANER_LNS_LOCKED); + nLNsMigrated = new LongStat(statGroup, CLEANER_LNS_MIGRATED); + nLNsMarked = new LongStat(statGroup, CLEANER_LNS_MARKED); + nLNQueueHits = new LongStat(statGroup, CLEANER_LNQUEUE_HITS); + nPendingLNsProcessed = + new LongStat(statGroup, CLEANER_PENDING_LNS_PROCESSED); + nMarkedLNsProcessed = + new LongStat(statGroup, CLEANER_MARKED_LNS_PROCESSED); + nToBeCleanedLNsProcessed = + new LongStat(statGroup, CLEANER_TO_BE_CLEANED_LNS_PROCESSED); + nClusterLNsProcessed = + new LongStat(statGroup, CLEANER_CLUSTER_LNS_PROCESSED); + nPendingLNsLocked = new LongStat(statGroup, CLEANER_PENDING_LNS_LOCKED); + nEntriesRead = new LongStat(statGroup, CLEANER_ENTRIES_READ); + nDiskReads = new LongStat(statGroup, CLEANER_DISK_READS); + nRepeatIteratorReads = + new LongStat(statGroup, CLEANER_REPEAT_ITERATOR_READS); + + logSizeStats = + new FileProtector.LogSizeStats(0, 0, 0, new HashMap<>()); + + if (env.isMemOnly()) { + fileStoreInfo = null; + } else { + try { + fileStoreInfo = FileStoreInfo.getInfo( + env.getEnvironmentHome().getAbsolutePath()); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException(env, e); + } + } + fileProtector = new FileProtector(env); + tracker = new UtilizationTracker(env, this); + profile = new UtilizationProfile(env, tracker); + expirationProfile = new ExpirationProfile(env); + calculator = new UtilizationCalculator(env, this); + fileSelector = new FileSelector(); + threads = new FileProcessor[0]; + logger = LoggerUtils.getLogger(getClass()); + totalRuns = new AtomicLong(0); + + /* + * The trackDetail property is immutable because of the complexity (if + * it were mutable) in determining whether to update the memory budget + * and perform eviction. + */ + trackDetail = env.getConfigManager().getBoolean + (EnvironmentParams.CLEANER_TRACK_DETAIL); + + rmwFixEnabled = env.getConfigManager().getBoolean + (EnvironmentParams.CLEANER_RMW_FIX); + + /* Initialize mutable properties and register for notifications. */ + setMutableProperties(env.getConfigManager()); + env.addConfigObserver(this); + } + + /** + * Process notifications of mutable property changes. + * + * @throws IllegalArgumentException via Environment ctor and + * setMutableConfig. + */ + public void envConfigUpdate(DbConfigManager cm, + EnvironmentMutableConfig ignore) { + + setMutableProperties(cm); + + /* A parameter that impacts cleaning may have changed. */ + wakeupActivate(); + } + + private void setMutableProperties(final DbConfigManager cm) { + + lockTimeout = cm.getDuration(EnvironmentParams.CLEANER_LOCK_TIMEOUT); + + readBufferSize = cm.getInt(EnvironmentParams.CLEANER_READ_SIZE); + if (readBufferSize <= 0) { + readBufferSize = + cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + } + + lookAheadCacheSize = + cm.getInt(EnvironmentParams.CLEANER_LOOK_AHEAD_CACHE_SIZE); + + nDeadlockRetries = cm.getInt(EnvironmentParams.CLEANER_DEADLOCK_RETRY); + + expunge = cm.getBoolean(EnvironmentParams.CLEANER_REMOVE); + + useDeletedDir = + cm.getBoolean(EnvironmentParams.CLEANER_USE_DELETED_DIR); + + twoPassGap = + cm.getInt(EnvironmentParams.CLEANER_TWO_PASS_GAP); + + twoPassThreshold = + cm.getInt(EnvironmentParams.CLEANER_TWO_PASS_THRESHOLD); + + if (twoPassThreshold == 0) { + twoPassThreshold = + cm.getInt(EnvironmentParams.CLEANER_MIN_UTILIZATION) - 5; + } + + gradualExpiration = + cm.getBoolean(EnvironmentParams.CLEANER_GRADUAL_EXPIRATION); + + dbCacheClearCount = + cm.getInt(EnvironmentParams.ENV_DB_CACHE_CLEAR_COUNT); + + int nThreads = cm.getInt(EnvironmentParams.CLEANER_THREADS); + assert nThreads > 0; + + if (nThreads != threads.length) { + + /* Shutdown threads when reducing their number. */ + for (int i = nThreads; i < threads.length; i += 1) { + if (threads[i] == null) { + continue; + } + threads[i].shutdown(); + threads[i] = null; + } + + /* Copy existing threads that are still used. */ + FileProcessor[] newThreads = new FileProcessor[nThreads]; + for (int i = 0; i < nThreads && i < threads.length; i += 1) { + newThreads[i] = threads[i]; + } + + /* Don't lose track of new threads if an exception occurs. */ + threads = newThreads; + + /* Start new threads when increasing their number. */ + for (int i = 0; i < nThreads; i += 1) { + if (threads[i] != null) { + continue; + } + threads[i] = new FileProcessor( + name + '-' + (i + 1), + i == 0 /*firstThread*/, + env, this, profile, calculator, fileSelector); + } + } + + cleanerBytesInterval = cm.getLong( + EnvironmentParams.CLEANER_BYTES_INTERVAL); + + if (cleanerBytesInterval == 0) { + cleanerBytesInterval = + cm.getLong(EnvironmentParams.LOG_FILE_MAX) / 4; + + cleanerBytesInterval = Math.min( + cleanerBytesInterval, MAX_CLEANER_BYTES_INTERVAL); + } + + final int wakeupInterval = + cm.getDuration(EnvironmentParams.CLEANER_WAKEUP_INTERVAL); + + for (FileProcessor thread : threads) { + if (thread == null) { + continue; + } + thread.setWaitTime(wakeupInterval); + } + + fetchObsoleteSize = + cm.getBoolean(EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE); + + minAge = cm.getInt(EnvironmentParams.CLEANER_MIN_AGE); + minUtilization = cm.getInt(EnvironmentParams.CLEANER_MIN_UTILIZATION); + minFileUtilization = + cm.getInt(EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION); + + maxDiskLimit = cm.getLong(EnvironmentParams.MAX_DISK); + adjustedMaxDiskLimit = maxDiskLimit; + + if (env.isMemOnly()) { + /* Env home dir may not exist, can't query file system info. */ + freeDiskLimit = 0; + } else { + final int replayFreeDiskPct = env.getReplayFreeDiskPercent(); + if (replayFreeDiskPct == 0) { + /* No backward compatibility is needed. */ + freeDiskLimit = cm.getLong(EnvironmentParams.FREE_DISK); + } else { + /* Use replayFreeDiskPercent for backward compatibility. */ + if (cm.isSpecified(EnvironmentParams.FREE_DISK)) { + throw new IllegalArgumentException( + "Cannot specify both " + EnvironmentConfig.FREE_DISK + + " and je.rep.replayFreeDiskPercent."); + } + freeDiskLimit = + (getDiskTotalSpace() * replayFreeDiskPct) / 100; + } + + if (maxDiskLimit > MAX_DISK_ADJUSTMENT_THRESHOLD || + cm.isSpecified(EnvironmentParams.FREE_DISK) || + replayFreeDiskPct != 0) { + adjustedMaxDiskLimit -= freeDiskLimit; + } + } + } + + public FileProtector getFileProtector() { + return fileProtector; + } + + public UtilizationTracker getUtilizationTracker() { + return tracker; + } + + public UtilizationProfile getUtilizationProfile() { + return profile; + } + + UtilizationCalculator getUtilizationCalculator() { + return calculator; + } + + public ExpirationProfile getExpirationProfile() { + return expirationProfile; + } + + public FileSelector getFileSelector() { + return fileSelector; + } + + public boolean getFetchObsoleteSize(DatabaseImpl db) { + return fetchObsoleteSize && !db.isLNImmediatelyObsolete(); + } + + /** + * @see EnvironmentParams#CLEANER_RMW_FIX + * @see FileSummaryLN#postFetchInit + */ + public boolean isRMWFixEnabled() { + return rmwFixEnabled; + } + + /* For unit testing only. */ + void setFileChosenHook(TestHook hook) { + fileChosenHook = hook; + } + + /* + * Delegate the run/pause/wakeup/shutdown DaemonRunner operations. We + * always check for null to account for the possibility of exceptions + * during thread creation. Cleaner daemon can't ever be run if No Locking + * mode is enabled. + */ + public void runOrPause(boolean run) { + + if (env.isNoLocking()) { + return; + } + + for (FileProcessor processor : threads) { + if (processor == null) { + continue; + } + if (run) { + processor.activateOnWakeup(); + } + processor.runOrPause(run); + } + } + + /** + * If the number of bytes written since the last activation exceeds the + * cleaner's byte interval, wakeup the file processor threads in activate + * mode. + */ + public void wakeupAfterWrite(int writeSize) { + + if (bytesWrittenSinceActivation.addAndGet(writeSize) > + cleanerBytesInterval) { + + bytesWrittenSinceActivation.set(0); + wakeupActivate(); + } + } + + /** + * Wakeup the file processor threads in activate mode, meaning that + * FileProcessor.doClean will be called. + * + * @see FileProcessor#onWakeup() + */ + public void wakeupActivate() { + + for (FileProcessor thread : threads) { + if (thread == null) { + continue; + } + thread.activateOnWakeup(); + thread.wakeup(); + } + } + + public void requestShutdown() { + for (FileProcessor thread : threads) { + if (thread == null) { + continue; + } + thread.requestShutdown(); + } + } + + public void shutdown() { + for (int i = 0; i < threads.length; i += 1) { + if (threads[i] == null) { + continue; + } + threads[i].shutdown(); + threads[i] = null; + } + } + + public int getNWakeupRequests() { + int count = 0; + for (FileProcessor thread : threads) { + if (thread == null) { + continue; + } + count += thread.getNWakeupRequests(); + } + return count; + } + + /** + * Cleans selected files and returns the number of files cleaned. This + * method is not invoked by a deamon thread, it is programatically. + * + * @param cleanMultipleFiles is true to clean until we're under budget, + * or false to clean at most one file. + * + * @param forceCleaning is true to clean even if we're not under the + * utilization threshold. + * + * @return the number of files cleaned, not including files cleaned + * unsuccessfully. + */ + public int doClean(boolean cleanMultipleFiles, boolean forceCleaning) { + + FileProcessor processor = createProcessor(); + + return processor.doClean + (false /*invokedFromDaemon*/, cleanMultipleFiles, forceCleaning); + } + + public FileProcessor createProcessor() { + return new FileProcessor( + "", false, env, this, profile, calculator, fileSelector); + } + + /** + * Load stats. + */ + public StatGroup loadStats(StatsConfig config) { + + final StatGroup stats = statGroup.cloneGroup(config.getClear()); + + /* Add all CUMULATIVE stats explicitly. */ + new IntStat( + stats, CLEANER_MIN_UTILIZATION, + calculator.getCurrentMinUtilization()); + new IntStat( + stats, CLEANER_MAX_UTILIZATION, + calculator.getCurrentMaxUtilization()); + new IntStat( + stats, CLEANER_PENDING_LN_QUEUE_SIZE, + fileSelector.getPendingLNQueueSize()); + + /* + * Synchronize on statGroup while adding log size stats, to return a + * consistent set of values. + */ + synchronized (statGroup) { + new LongStat( + stats, CLEANER_ACTIVE_LOG_SIZE, + logSizeStats.activeSize); + new LongStat( + stats, CLEANER_RESERVED_LOG_SIZE, + logSizeStats.reservedSize); + new LongStat( + stats, CLEANER_PROTECTED_LOG_SIZE, + logSizeStats.protectedSize); + new LongStat( + stats, CLEANER_AVAILABLE_LOG_SIZE, + availableLogSize); + new LongStat( + stats, CLEANER_TOTAL_LOG_SIZE, + totalLogSize); + + final AtomicLongMapStat protectedSizeMap = + new AtomicLongMapStat(stats, CLEANER_PROTECTED_LOG_SIZE_MAP); + + for (final Map.Entry entry : + logSizeStats.protectedSizeMap.entrySet()) { + + protectedSizeMap. + createStat(entry.getKey()). + set(entry.getValue()); + } + } + + return stats; + } + + /** + * Enables or disabling processing of safe-to-delete files, including + * the truncation of the VLSN index. Disabling this is needed for tests, + * when VLSNIndex changes should be prevented. + */ + public synchronized void enableFileDeletion(boolean enable) { + fileDeletionEnabled = enable; + } + + /** + * Updates log size stats and deletes unprotected reserved files in order + * to stay within disk limits. + * + * This method must be called frequently enough to maintain disk usage + * safely below the limits. For an HA env this is particularly important + * since we retain all reserved files until we approach the disk limits. + * For this, calling this method at least every CLEANER_BYTES_INTERVAL + * should suffice. + * + * It is also important to call this method based on a time interval + * when writing stops, to retry deletions when files are protected or the + * env is locked by read-only processes. For this, calling this method at + * least every CLEANER_WAKEUP_INTERVAL should suffice. + */ + public void manageDiskUsage() { + + /* Fail loudly if the environment is invalid. */ + env.checkIfInvalid(); + + if (env.isMemOnly() || env.mayNotWrite() || !fileDeletionEnabled) { + return; + } + + /* + * Only one thread at a time can truncate the VLSNIndex head, request + * the environment lock, and update stats. This is a periodic action, + * so probe the lock to avoid blocking other cleaner threads. + */ + if (!manageDiskUsageLock.tryLock()) { + return; + } + + try { + /* Periodically update the stats. */ + freshenLogSizeStats(); + + if (fileProtector.getNReservedFiles() > 0) { + + boolean freshenStats = false; + + if (env.isReplicated()) { + /* + * Reserved files are retained until we approach a disk + * limit. Determine how many bytes we need to reclaim by + * deleting reserved files. Add a reasonable value to stay + * safely below the limits between cleaner wakeups. Note + * that max(overage,shortage) may be negative, since + * overage and shortage may be negative. + */ + final long origBytesNeeded = + ((maxDiskLimit > 0) ? + Math.max(maxDiskOverage, freeDiskShortage) : + freeDiskShortage) + + Math.max( + 1L << 20, + 3 * cleanerBytesInterval); + + /* + * First try deleting files without truncating the + * VLSNIndex head. + */ + long bytesNeeded = origBytesNeeded; + if (bytesNeeded > 0) { + bytesNeeded = deleteUnprotectedFiles(bytesNeeded); + } + + /* + * If we still need space, try truncating the VLSNIndex + * and then deleting files. See FileProtector for details. + */ + if (bytesNeeded > 0 && + env.tryVlsnHeadTruncate(bytesNeeded)) { + + bytesNeeded = deleteUnprotectedFiles(bytesNeeded); + freshenStats = true; + } + + if (bytesNeeded < origBytesNeeded) { + freshenStats = true; + } + } else { + /* + * For a non-HA env, simply try to delete all the reserved + * files. + */ + final long bytesNeeded = + deleteUnprotectedFiles(Long.MAX_VALUE); + + if (bytesNeeded < Long.MAX_VALUE) { + freshenStats = true; + } + } + + /* + * Freshen the stats if any files are deleted, so write + * operations can occur ASAP if they were previously + * prohibited. Also freshen the stats if we truncated the + * VLSNIndex, so that the stats reflect the current factors + * gating file deletion. + */ + if (freshenStats) { + freshenLogSizeStats(); + } + } + + /* + * If there is still a violation, and we have not logged it since + * the violation status changed, then log it now. We do not expect + * the violation status to change frequently. + */ + final String violation = diskUsageViolationMessage; + if (violation != null) { + if (!loggedDiskLimitViolation) { + LoggerUtils.logMsg(logger, env, Level.SEVERE, violation); + loggedDiskLimitViolation = true; + } + } else { + loggedDiskLimitViolation = false; + } + + } catch (EnvLockedException e) { + + LoggerUtils.logMsg( + logger, env, Level.SEVERE, + "Could not delete files due to read-only processes. " + + diskUsageMessage); + + } finally { + manageDiskUsageLock.unlock(); + } + } + + /** @see #deleteUnprotectedFiles */ + private static class EnvLockedException extends Exception {} + + /** + * Deletes unprotected reserved files in an attempt to free bytesNeeded. + * In a non-HA env, attempts to delete all reserved files, irrespective of + * bytesNeeded. + * + *

        An exclusive environment is held while deleting the files to lock + * out read-only processes. The lock is held while deleting the reserved + * file records as well, but this is inexpensive and should not cause long + * delays for read-only processes.

        + * + * @param bytesNeeded the amount of space we need to reclaim to stay within + * disk limits. + * + * @return number of bytes we could not reclaim due to protected files, or + * zero if we deleted files totaling bytesNeeded or more. + * + * @throws EnvLockedException if we can't get an exclusive environment + * lock because the env is locked by read-only processes, and therefore no + * files can be deleted. + */ + private long deleteUnprotectedFiles(long bytesNeeded) + throws EnvLockedException { + + final FileManager fileManager = env.getFileManager(); + final SortedSet deletedFiles = new TreeSet<>(); + + if (!fileManager.lockEnvironment(false, true)) { + throw new EnvLockedException(); + } + try { + long file = -1; + + while (bytesNeeded > 0 || !env.isReplicated()) { + + final Pair pair = + fileProtector.takeCondemnedFile(file + 1); + + if (pair == null) { + break; + } + + file = pair.first(); + final long size = pair.second(); + + if (!deleteFile(file)) { + /* Sometimes files cannot be deleted on Windows. */ + fileProtector.putBackCondemnedFile(file, size); + continue; + } + + bytesNeeded = Math.max(0, bytesNeeded - size); + profile.deleteReservedFileRecord(file); + nCleanerDeletions.increment(); + deletedFiles.add(file); + } + + } finally { + fileManager.releaseExclusiveLock(); + + if (!deletedFiles.isEmpty()) { + + final StringBuilder sb = new StringBuilder( + "Cleaner deleted files:"); + + for (final Long file : deletedFiles) { + sb.append(" 0x"); + sb.append(Long.toHexString(file)); + } + + LoggerUtils.traceAndLog( + logger, env, Level.INFO, sb.toString()); + } + } + + return bytesNeeded; + } + + /** + * Attempts to delete the file and returns whether it has been deleted. + */ + private boolean deleteFile(final Long file) { + final FileManager fileManager = env.getFileManager(); + + final String expungeLabel = expunge ? "delete" : "rename"; + final String expungedLabel = expungeLabel + "d"; + + try { + if (expunge) { + if (fileManager.deleteFile(file)) { + return true; + } + } else { + /* See EnvironmentConfig.CLEANER_EXPUNGE. */ + + final File newFile = fileManager.renameFile( + file, FileManager.DEL_SUFFIX, + useDeletedDir ? DELETED_SUBDIR : null); + + if (newFile != null) { + newFile.setLastModified(System.currentTimeMillis()); + return true; + } + } + } catch (IOException e) { + throw new EnvironmentFailureException( + env, EnvironmentFailureReason.LOG_WRITE, + "Unable to " + expungeLabel + " " + file, e); + } + + /* + * If the file is not valid (missing) then the file was previously + * deleted. This can occur on Windows when we retry deletion (see + * below). + */ + if (!fileManager.isFileValid(file)) { + return true; + } + + /* + * Log a message and return false to retry the deletion later. The + * deletion is known to fail on Windows, and probably this occurs whe + * the file was recently closed. + */ + LoggerUtils.traceAndLog( + logger, env, Level.WARNING, + "Log file 0x" + Long.toHexString(file) + " could not be " + + expungedLabel + ". The deletion will be retried later."); + + return false; + } + + /** + * Updates the cached set of log size stats, including maxDiskOverage, + * freeDiskShortage, diskUsageMessage and diskUsageViolationMessage. + * + * Normally this should only be called by manageDiskUsage while holding + * the manageDiskUsageLock. However, it may be called directly during + * (single threaded) recovery. + */ + public void freshenLogSizeStats() { + + recalcLogSizeStats( + fileProtector.getLogSizeStats(), getDiskFreeSpace()); + } + + /** + * Implementation of freshenLogSizeStats. Exposed for testing. + */ + void recalcLogSizeStats(final FileProtector.LogSizeStats stats, + final long diskFreeSpace) { + + /* + * Use locals for limits, since they may be changed by other threads. + */ + final long maxLimit = maxDiskLimit; + final long adjustedMax = adjustedMaxDiskLimit; + final long freeLimit = freeDiskLimit; + + /* + * Calculate overage/shortage and available size. Below are examples of + * availableLogBytes values where: + * + * totalLS=75 activeLS=50 reservedLS=25 protectedLS=5 + * + * freeDL maxDL diskFS freeB1 freeB2 availableLS + * 5 - 20 15 15 35 + * 25 - 5 -20 -20 0 + * 30 - 5 -25 -25 -5 + * 5 100 20 15 15 35 + * 25 100 20 -5 -5 15 + * 5 80 20 15 0 20 + * 25 80 20 -5 -20 0 + * 25 200 5 -20 -20 0 + * 25 75 20 -5 -25 -5 + * 50 80 90 40 -45 -25 + */ + final long freeBytes1 = diskFreeSpace - freeLimit; + final long freeShortage = 0 - freeBytes1; + final long totalSize = stats.activeSize + stats.reservedSize; + final long maxOverage; + final long freeBytes2; + + if (adjustedMax > 0) { + maxOverage = totalSize - adjustedMax; + freeBytes2 = Math.min(freeBytes1, adjustedMax - totalSize); + } else { + maxOverage = 0; + freeBytes2 = freeBytes1; + } + + final long availBytes = + freeBytes2 + stats.reservedSize - stats.protectedSize; + + final StringBuilder sb = new StringBuilder(); + + if (availBytes <= 0) { + sb.append("Disk usage is not within je.maxDisk or je.freeDisk "); + sb.append("limits and write operations are prohibited:"); + } else { + sb.append("Disk usage is currently within je.maxDisk and "); + sb.append("je.freeDisk limits:"); + } + + sb.append(" maxDiskLimit="); + sb.append(INT_FORMAT.format(maxLimit)); + sb.append(" freeDiskLimit="); + sb.append(INT_FORMAT.format(freeLimit)); + sb.append(" adjustedMaxDiskLimit="); + sb.append(INT_FORMAT.format(adjustedMax)); + sb.append(" maxDiskOverage="); + sb.append(INT_FORMAT.format(maxOverage)); + sb.append(" freeDiskShortage="); + sb.append(INT_FORMAT.format(freeShortage)); + sb.append(" diskFreeSpace="); + sb.append(INT_FORMAT.format(diskFreeSpace)); + sb.append(" availableLogSize="); + sb.append(INT_FORMAT.format(availBytes)); + sb.append(" totalLogSize="); + sb.append(INT_FORMAT.format(totalSize)); + sb.append(" activeLogSize="); + sb.append(INT_FORMAT.format(stats.activeSize)); + sb.append(" reservedLogSize="); + sb.append(INT_FORMAT.format(stats.reservedSize)); + sb.append(" protectedLogSize="); + sb.append(INT_FORMAT.format(stats.protectedSize)); + sb.append(" protectedLogSizeMap={"); + + for (final Map.Entry entry : + stats.protectedSizeMap.entrySet()) { + + sb.append(entry.getKey()).append(":"); + sb.append(INT_FORMAT.format(entry.getValue())); + } + + sb.append("}"); + + final String msg = sb.toString(); + + /* Synchronize on statGroup to maintain consistent set of stats. */ + synchronized (statGroup) { + maxDiskOverage = maxOverage; + freeDiskShortage = freeShortage; + diskUsageMessage = msg; + diskUsageViolationMessage = (availBytes <= 0) ? msg : null; + availableLogSize = availBytes; + totalLogSize = totalSize; + logSizeStats = stats; + } + } + + private long getDiskFreeSpace() { + try { + return fileStoreInfo.getUsableSpace(); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException(env, e); + } + } + + private long getDiskTotalSpace() { + try { + return fileStoreInfo.getTotalSpace(); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException(env, e); + } + } + + /** + * Returns a message describing disk space limits and usage, regardless of + * whether the limit is violated or not. If there is a limit violation, + * returns the same value as {@link #getDiskLimitViolation()}. Does not + * return null. + */ + public String getDiskLimitMessage() { + return diskUsageMessage; + } + + /** + * Uses cached disk usage info to determine whether disk space limits are + * currently violated. This method simply returns a volatile field. The + * cached information is updated frequently enough to prevent violating the + * limits by a large amount. + * + * @return a non-null message (appropriate for an exception) if a disk + * limit is currently violated, else null. + */ + public String getDiskLimitViolation() { + return diskUsageViolationMessage; + } + + long getMaxDiskOverage() { + return maxDiskOverage; + } + + long getFreeDiskShortage() { + return freeDiskShortage; + } + + /** + * Returns a copy of the cleaned and processed files at the time a + * checkpoint starts. + * + *

        If non-null is returned, the checkpoint should flush an extra level, + * and addCheckpointedFiles() should be called when the checkpoint is + * complete.

        + */ + public CheckpointStartCleanerState getFilesAtCheckpointStart() { + + /* Pending LNs can prevent file deletion. */ + processPending(); + + return fileSelector.getFilesAtCheckpointStart(); + } + + /** + * When a checkpoint is complete, update the files that were returned at + * the beginning of the checkpoint. + */ + public void updateFilesAtCheckpointEnd(CheckpointStartCleanerState info) { + + /* Update cleaned file status and get newly reserved files. */ + final Map reservedFiles = + fileSelector.updateFilesAtCheckpointEnd(env, info); + + /* + * Insert reserved file db record and delete other (unnecessary) + * metadata for reserved files. + */ + profile.reserveFiles(reservedFiles); + + /* Try deleting files since file status may have changed. */ + manageDiskUsage(); + + /* + * Periodically process completed expiration trackers. This is done + * here in case cleaner threads are disabled. + */ + expirationProfile.processCompletedTrackers(); + } + + /** + * If any LNs or databases are pending, process them. This method should + * be called often enough to prevent the pending LN set from growing too + * large. + */ + void processPending() { + + /* + * This method is not synchronized because that would block cleaner + * and checkpointer threads unnecessarily. However, we do prevent + * reentrancy, for two reasons: + * 1. It is wasteful for two threads to process the same pending + * entries. + * 2. Many threads calling getDb may increase the liklihood of + * livelock. [#20816] + */ + if (!processPendingReentrancyGuard.compareAndSet(false, true)) { + return; + } + + try { + final DbTree dbMapTree = env.getDbTree(); + + final LockManager lockManager = + env.getTxnManager().getLockManager(); + + final Map pendingLNs = fileSelector.getPendingLNs(); + + if (pendingLNs != null) { + final TreeLocation location = new TreeLocation(); + + for (final Map.Entry entry : + pendingLNs.entrySet()) { + + if (!env.isValid()) { + return; + } + + if (diskUsageViolationMessage != null) { + break; /* We can't write. */ + } + + final long logLsn = entry.getKey(); + final LNInfo info = entry.getValue(); + + if (env.expiresWithin( + info.getExpirationTime(), + 0 - env.getTtlLnPurgeDelay())) { + + if (lockManager.isLockUncontended(logLsn)) { + fileSelector.removePendingLN(logLsn); + nLNsExpired.increment(); + nLNsObsolete.increment(); + } else { + nPendingLNsLocked.increment(); + } + continue; + } + + final DatabaseId dbId = info.getDbId(); + final DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout); + + try { + final byte[] key = info.getKey(); + + /* Evict before processing each entry. */ + if (DO_CRITICAL_EVICTION) { + env.daemonEviction(true /*backgroundIO*/); + } + + processPendingLN(logLsn, db, key, location); + + } finally { + dbMapTree.releaseDb(db); + } + } + } + + final DatabaseId[] pendingDBs = fileSelector.getPendingDBs(); + if (pendingDBs != null) { + for (final DatabaseId dbId : pendingDBs) { + if (!env.isValid()) { + return; + } + final DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout); + try { + if (db == null || db.isDeleteFinished()) { + fileSelector.removePendingDB(dbId); + } + } finally { + dbMapTree.releaseDb(db); + } + } + } + } finally { + processPendingReentrancyGuard.set(false); + } + } + + /** + * Processes a pending LN, getting the lock first to ensure that the + * overhead of retries is minimal. + */ + private void processPendingLN( + final long logLsn, + final DatabaseImpl db, + final byte[] keyFromLog, + final TreeLocation location) { + + boolean parentFound; // We found the parent BIN. + boolean processedHere = true; // The LN was cleaned here. + boolean lockDenied = false; // The LN lock was denied. + boolean obsolete = false; // The LN is no longer in use. + boolean completed = false; // This method completed. + + BasicLocker locker = null; + BIN bin = null; + + try { + nPendingLNsProcessed.increment(); + + /* + * If the DB is gone, this LN is obsolete. If delete cleanup is in + * progress, put the DB into the DB pending set; this LN will be + * declared deleted after the delete cleanup is finished. + */ + if (db == null || db.isDeleted()) { + addPendingDB(db); + nLNsDead.increment(); + obsolete = true; + completed = true; + return; + } + + final Tree tree = db.getTree(); + assert tree != null; + + /* + * Get a non-blocking read lock on the original log LSN. If this + * fails, then the original LSN is still write-locked. We may have + * to lock again, if the LSN has changed in the BIN, but this + * initial check prevents a Btree lookup in some cases. + */ + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + + /* Don't allow this short-lived lock to be preempted/stolen. */ + locker.setPreemptable(false); + + final LockResult lockRet = locker.nonBlockingLock( + logLsn, LockType.READ, false /*jumpAheadOfWaiters*/, db); + + if (lockRet.getLockGrant() == LockGrantType.DENIED) { + /* Try again later. */ + nPendingLNsLocked.increment(); + lockDenied = true; + completed = true; + return; + } + + /* + * Search down to the bottom most level for the parent of this LN. + */ + parentFound = tree.getParentBINForChildLN( + location, keyFromLog, false /*splitsAllowed*/, + false /*blindDeltaOps*/, UPDATE_GENERATION); + + bin = location.bin; + final int index = location.index; + + if (!parentFound) { + nLNsDead.increment(); + obsolete = true; + completed = true; + return; + } + + /* Migrate an LN. */ + processedHere = false; + + lockDenied = + migratePendingLN(db, logLsn, bin.getLsn(index), bin, index); + + completed = true; + + } catch (RuntimeException e) { + e.printStackTrace(); + LoggerUtils.traceAndLogException( + env, "com.sleepycat.je.cleaner.Cleaner", + "processLN", "Exception thrown: ", e); + throw e; + } finally { + if (bin != null) { + bin.releaseLatch(); + } + + if (locker != null) { + locker.operationEnd(); + } + + /* BIN must not be latched when synchronizing on FileSelector. */ + if (completed && !lockDenied) { + fileSelector.removePendingLN(logLsn); + } + + /* + * If migratePendingLN was not called above, perform tracing in + * this method. + */ + if (processedHere) { + logFine(CLEAN_PENDING_LN, null /*node*/, DbLsn.NULL_LSN, + completed, obsolete, false /*migrated*/); + } + } + } + + /** + * Migrate a pending LN in the given BIN entry, if it is not obsolete. The + * BIN must be latched on entry and is left latched by this method. + * + * @return whether migration could not be completed because the LN lock was + * denied. + */ + private boolean migratePendingLN( + final DatabaseImpl db, + final long logLsn, + final long treeLsn, + final BIN bin, + final int index) { + + /* Status variables are used to generate debug tracing info. */ + boolean obsolete = false; // The LN is no longer in use. + boolean migrated = false; // The LN was in use and is migrated. + boolean completed = false; // This method completed. + boolean clearTarget = false; // Node was non-resident when called. + + /* + * If wasCleaned is false we don't count statistics unless we migrate + * the LN. This avoids double counting. + */ + BasicLocker locker = null; + LN ln = null; + + try { + if (treeLsn == DbLsn.NULL_LSN) { + /* This node was never written, no need to migrate. */ + completed = true; + return false; + } + + /* If the record has been deleted, the logrec is obsolete */ + if (bin.isEntryKnownDeleted(index)) { + nLNsDead.increment(); + obsolete = true; + completed = true; + return false; + } + + /* + * Get a non-blocking read lock on the LN. A pending node is + * already locked, but the original pending LSN may have changed. + * We must lock the current LSN to guard against aborts. + */ + if (logLsn != treeLsn) { + + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + /* Don't allow this short-lived lock to be preempted/stolen. */ + locker.setPreemptable(false); + + final LockResult lockRet = locker.nonBlockingLock( + treeLsn, LockType.READ, false /*jumpAheadOfWaiters*/, db); + + if (lockRet.getLockGrant() == LockGrantType.DENIED) { + + /* + * LN is currently locked by another Locker, so we can't + * assume anything about the value of the LSN in the bin. + */ + nLNsLocked.increment(); + completed = true; + return true; + } else { + nLNsDead.increment(); + obsolete = true; + completed = true; + return false; + } + + } else if (bin.isEmbeddedLN(index)) { + throw EnvironmentFailureException.unexpectedState( + env, + "LN is embedded although its associated logrec (at " + + treeLsn + " does not have the embedded flag on"); + } + + /* + * Get the ln so that we can log it to its new position. + * Notice that the fetchLN() call below will return null if the + * slot is defunct and the LN has been purged by the cleaner. + */ + ln = (LN) bin.getTarget(index); + if (ln == null) { + ln = bin.fetchLN(index, CacheMode.EVICT_LN); + clearTarget = !db.getId().equals(DbTree.ID_DB_ID); + } + + /* Don't migrate defunct LNs. */ + if (ln == null || ln.isDeleted()) { + bin.setKnownDeletedAndEvictLN(index); + nLNsDead.increment(); + obsolete = true; + completed = true; + return false; + } + + /* + * Migrate the LN. + * + * Do not pass a locker, because there is no need to lock the new + * LSN, as done for user operations. Another locker cannot attempt + * to lock the new LSN until we're done, because we release the + * lock before we release the BIN latch. + */ + final LogItem logItem = ln.log( + env, db, null /*locker*/, null /*writeLockInfo*/, + false /*newEmbeddedLN*/, bin.getKey(index), + bin.getExpiration(index), bin.isExpirationInHours(), + false /*currEmbeddedLN*/, treeLsn, bin.getLastLoggedSize(index), + false /*isInsertion*/, true /*backgroundIO*/, + getMigrationRepContext(ln)); + + bin.updateEntry( + index, logItem.lsn, ln.getVLSNSequence(), + logItem.size); + + nLNsMigrated.increment(); + + /* Lock new LSN on behalf of existing lockers. */ + CursorImpl.lockAfterLsnChange( + db, treeLsn, logItem.lsn, null /*excludeLocker*/); + + migrated = true; + completed = true; + return false; + + } finally { + /* + * If the node was originally non-resident, evict it now so that we + * don't create more work for the evictor and reduce the cache + * memory available to the application. + */ + if (clearTarget) { + bin.evictLN(index); + } + + if (locker != null) { + locker.operationEnd(); + } + + logFine( + CLEAN_PENDING_LN, ln, treeLsn, completed, obsolete, migrated); + } + } + + /** + * Returns the ReplicationContext to use for migrating the given LN. If + * VLSNs are preserved in this Environment then the VLSN is logically part + * of the data record, and LN.getVLSNSequence will return the VLSN, which + * should be included in the migrated LN. + */ + static ReplicationContext getMigrationRepContext(LN ln) { + long vlsnSeq = ln.getVLSNSequence(); + if (vlsnSeq <= 0) { + return ReplicationContext.NO_REPLICATE; + } + return new ReplicationContext(new VLSN(vlsnSeq), + false /*inReplicationStream*/); + } + + /** + * Adds the DB ID to the pending DB set if it is being deleted but deletion + * is not yet complete. + */ + void addPendingDB(DatabaseImpl db) { + if (db != null && db.isDeleted() && !db.isDeleteFinished()) { + DatabaseId id = db.getId(); + if (fileSelector.addPendingDB(id)) { + LoggerUtils.logMsg(logger, env, Level.FINE, + "CleanAddPendingDB " + id); + } + } + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + void logFine(String action, + Node node, + long logLsn, + boolean completed, + boolean obsolete, + boolean dirtiedMigrated) { + + if (logger.isLoggable(Level.FINE)) { + StringBuilder sb = new StringBuilder(); + sb.append(action); + if (node instanceof IN) { + sb.append(" node="); + sb.append(((IN) node).getNodeId()); + } + sb.append(" logLsn="); + sb.append(DbLsn.getNoFormatString(logLsn)); + sb.append(" complete=").append(completed); + sb.append(" obsolete=").append(obsolete); + sb.append(" dirtiedOrMigrated=").append(dirtiedMigrated); + + LoggerUtils.logMsg(logger, env, Level.FINE, sb.toString()); + } + } + + /** + * Release resources and update memory budget. Should only be called + * when this environment is closed and will never be accessed again. + */ + public void close() { + profile.close(); + tracker.close(); + fileSelector.close(env.getMemoryBudget()); + } +} diff --git a/src/com/sleepycat/je/cleaner/CleanerStatDefinition.java b/src/com/sleepycat/je/cleaner/CleanerStatDefinition.java new file mode 100644 index 0000000..6078fe3 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/CleanerStatDefinition.java @@ -0,0 +1,391 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE cleaner statistics. + */ +public class CleanerStatDefinition { + + public static final String GROUP_NAME = "Cleaning"; + public static final String GROUP_DESC = + "Log cleaning involves garbage collection of data " + + "files in the append-only storage system."; + + public static final String CLEANER_RUNS_NAME = + "nCleanerRuns"; + public static final String CLEANER_RUNS_DESC = + "Number of cleaner runs, including two-pass runs."; + public static final StatDefinition CLEANER_RUNS = + new StatDefinition( + CLEANER_RUNS_NAME, + CLEANER_RUNS_DESC); + + public static final String CLEANER_TWO_PASS_RUNS_NAME = + "nTwoPassRuns"; + public static final String CLEANER_TWO_PASS_RUNS_DESC = + "Number of cleaner two-pass runs."; + public static final StatDefinition CLEANER_TWO_PASS_RUNS = + new StatDefinition( + CLEANER_TWO_PASS_RUNS_NAME, + CLEANER_TWO_PASS_RUNS_DESC); + + public static final String CLEANER_REVISAL_RUNS_NAME = + "nRevisalRuns"; + public static final String CLEANER_REVISAL_RUNS_DESC = + "Number of cleaner runs that ended in revising expiration info, but " + + "not in any cleaning."; + public static final StatDefinition CLEANER_REVISAL_RUNS = + new StatDefinition( + CLEANER_REVISAL_RUNS_NAME, + CLEANER_REVISAL_RUNS_DESC); + + public static final String CLEANER_DELETIONS_NAME = + "nCleanerDeletions"; + public static final String CLEANER_DELETIONS_DESC = + "Number of cleaner file deletions."; + public static final StatDefinition CLEANER_DELETIONS = + new StatDefinition( + CLEANER_DELETIONS_NAME, + CLEANER_DELETIONS_DESC); + + public static final String CLEANER_PENDING_LN_QUEUE_SIZE_NAME = + "pendingLNQueueSize"; + public static final String CLEANER_PENDING_LN_QUEUE_SIZE_DESC = + "Number of LNs pending because they were locked and could not be " + + "migrated."; + public static final StatDefinition CLEANER_PENDING_LN_QUEUE_SIZE = + new StatDefinition( + CLEANER_PENDING_LN_QUEUE_SIZE_NAME, + CLEANER_PENDING_LN_QUEUE_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_INS_OBSOLETE_NAME = + "nINsObsolete"; + public static final String CLEANER_INS_OBSOLETE_DESC = + "Accumulated number of INs obsolete."; + public static final StatDefinition CLEANER_INS_OBSOLETE = + new StatDefinition( + CLEANER_INS_OBSOLETE_NAME, + CLEANER_INS_OBSOLETE_DESC); + + public static final String CLEANER_INS_CLEANED_NAME = + "nINsCleaned"; + public static final String CLEANER_INS_CLEANED_DESC = + "Accumulated number of INs cleaned."; + public static final StatDefinition CLEANER_INS_CLEANED = + new StatDefinition( + CLEANER_INS_CLEANED_NAME, + CLEANER_INS_CLEANED_DESC); + + public static final String CLEANER_INS_DEAD_NAME = + "nINsDead"; + public static final String CLEANER_INS_DEAD_DESC = + "Accumulated number of INs that were not found in the tree anymore " + + "(deleted)."; + public static final StatDefinition CLEANER_INS_DEAD = + new StatDefinition( + CLEANER_INS_DEAD_NAME, + CLEANER_INS_DEAD_DESC); + + public static final String CLEANER_INS_MIGRATED_NAME = + "nINsMigrated"; + public static final String CLEANER_INS_MIGRATED_DESC = + "Accumulated number of INs migrated."; + public static final StatDefinition CLEANER_INS_MIGRATED = + new StatDefinition( + CLEANER_INS_MIGRATED_NAME, + CLEANER_INS_MIGRATED_DESC); + + public static final String CLEANER_BIN_DELTAS_OBSOLETE_NAME = + "nBINDeltasObsolete"; + public static final String CLEANER_BIN_DELTAS_OBSOLETE_DESC = + "Accumulated number of BIN-deltas obsolete."; + public static final StatDefinition CLEANER_BIN_DELTAS_OBSOLETE = + new StatDefinition( + CLEANER_BIN_DELTAS_OBSOLETE_NAME, + CLEANER_BIN_DELTAS_OBSOLETE_DESC); + + public static final String CLEANER_BIN_DELTAS_CLEANED_NAME = + "nBINDeltasCleaned"; + public static final String CLEANER_BIN_DELTAS_CLEANED_DESC = + "Accumulated number of BIN-deltas cleaned."; + public static final StatDefinition CLEANER_BIN_DELTAS_CLEANED = + new StatDefinition( + CLEANER_BIN_DELTAS_CLEANED_NAME, + CLEANER_BIN_DELTAS_CLEANED_DESC); + + public static final String CLEANER_BIN_DELTAS_DEAD_NAME = + "nBINDeltasDead"; + public static final String CLEANER_BIN_DELTAS_DEAD_DESC = + "Accumulated number of BIN-deltas that were not found in the tree " + + "anymore (deleted)."; + public static final StatDefinition CLEANER_BIN_DELTAS_DEAD = + new StatDefinition( + CLEANER_BIN_DELTAS_DEAD_NAME, + CLEANER_BIN_DELTAS_DEAD_DESC); + + public static final String CLEANER_BIN_DELTAS_MIGRATED_NAME = + "nBINDeltasMigrated"; + public static final String CLEANER_BIN_DELTAS_MIGRATED_DESC = + "Accumulated number of BIN-deltas migrated."; + public static final StatDefinition CLEANER_BIN_DELTAS_MIGRATED = + new StatDefinition( + CLEANER_BIN_DELTAS_MIGRATED_NAME, + CLEANER_BIN_DELTAS_MIGRATED_DESC); + + public static final String CLEANER_LNS_OBSOLETE_NAME = + "nLNsObsolete"; + public static final String CLEANER_LNS_OBSOLETE_DESC = + "Accumulated number of LNs obsolete."; + public static final StatDefinition CLEANER_LNS_OBSOLETE = + new StatDefinition( + CLEANER_LNS_OBSOLETE_NAME, + CLEANER_LNS_OBSOLETE_DESC); + + public static final String CLEANER_LNS_EXPIRED_NAME = + "nLNsExpired"; + public static final String CLEANER_LNS_EXPIRED_DESC = + "Accumulated number of obsolete LNs that were expired."; + public static final StatDefinition CLEANER_LNS_EXPIRED = + new StatDefinition( + CLEANER_LNS_EXPIRED_NAME, + CLEANER_LNS_EXPIRED_DESC); + + public static final String CLEANER_LNS_CLEANED_NAME = + "nLNsCleaned"; + public static final String CLEANER_LNS_CLEANED_DESC = + "Accumulated number of LNs cleaned."; + public static final StatDefinition CLEANER_LNS_CLEANED = + new StatDefinition( + CLEANER_LNS_CLEANED_NAME, + CLEANER_LNS_CLEANED_DESC); + + public static final String CLEANER_LNS_DEAD_NAME = + "nLNsDead"; + public static final String CLEANER_LNS_DEAD_DESC = + "Accumulated number of LNs that were not found in the tree anymore " + + "(deleted)."; + public static final StatDefinition CLEANER_LNS_DEAD = + new StatDefinition( + CLEANER_LNS_DEAD_NAME, + CLEANER_LNS_DEAD_DESC); + + public static final String CLEANER_LNS_LOCKED_NAME = + "nLNsLocked"; + public static final String CLEANER_LNS_LOCKED_DESC = + "Accumulated number of LNs encountered that were locked."; + public static final StatDefinition CLEANER_LNS_LOCKED = + new StatDefinition( + CLEANER_LNS_LOCKED_NAME, + CLEANER_LNS_LOCKED_DESC); + + public static final String CLEANER_LNS_MIGRATED_NAME = + "nLNsMigrated"; + public static final String CLEANER_LNS_MIGRATED_DESC = + "Accumulated number of LNs that were migrated forward in the log by " + + "the cleaner."; + public static final StatDefinition CLEANER_LNS_MIGRATED = + new StatDefinition( + CLEANER_LNS_MIGRATED_NAME, + CLEANER_LNS_MIGRATED_DESC); + + public static final String CLEANER_LNS_MARKED_NAME = + "nLNsMarked"; + public static final String CLEANER_LNS_MARKED_DESC = + "Accumulated number of LNs in temporary DBs that were dirtied by the" + + " cleaner and subsequently logging during checkpoint/eviction."; + public static final StatDefinition CLEANER_LNS_MARKED = + new StatDefinition( + CLEANER_LNS_MARKED_NAME, + CLEANER_LNS_MARKED_DESC); + + public static final String CLEANER_LNQUEUE_HITS_NAME = + "nLNQueueHits"; + public static final String CLEANER_LNQUEUE_HITS_DESC = + "Accumulated number of LNs processed without a tree lookup."; + public static final StatDefinition CLEANER_LNQUEUE_HITS = + new StatDefinition( + CLEANER_LNQUEUE_HITS_NAME, + CLEANER_LNQUEUE_HITS_DESC); + + public static final String CLEANER_PENDING_LNS_PROCESSED_NAME = + "nPendingLNsProcessed"; + public static final String CLEANER_PENDING_LNS_PROCESSED_DESC = + "Accumulated number of LNs processed because they were previously " + + "locked."; + public static final StatDefinition CLEANER_PENDING_LNS_PROCESSED = + new StatDefinition( + CLEANER_PENDING_LNS_PROCESSED_NAME, + CLEANER_PENDING_LNS_PROCESSED_DESC); + + public static final String CLEANER_MARKED_LNS_PROCESSED_NAME = + "nMarkLNsProcessed"; + public static final String CLEANER_MARKED_LNS_PROCESSED_DESC = + "Accumulated number of LNs processed because they were previously " + + "marked for migration."; + public static final StatDefinition CLEANER_MARKED_LNS_PROCESSED = + new StatDefinition( + CLEANER_MARKED_LNS_PROCESSED_NAME, + CLEANER_MARKED_LNS_PROCESSED_DESC); + + public static final String CLEANER_TO_BE_CLEANED_LNS_PROCESSED_NAME = + "nToBeCleanedLNsProcessed"; + public static final String CLEANER_TO_BE_CLEANED_LNS_PROCESSED_DESC = + "Accumulated number of LNs processed because they are soon to be " + + "cleaned."; + public static final StatDefinition CLEANER_TO_BE_CLEANED_LNS_PROCESSED = + new StatDefinition( + CLEANER_TO_BE_CLEANED_LNS_PROCESSED_NAME, + CLEANER_TO_BE_CLEANED_LNS_PROCESSED_DESC); + + public static final String CLEANER_CLUSTER_LNS_PROCESSED_NAME = + "nClusterLNsProcessed"; + public static final String CLEANER_CLUSTER_LNS_PROCESSED_DESC = + "Accumulated number of LNs processed because they qualify for " + + "clustering."; + public static final StatDefinition CLEANER_CLUSTER_LNS_PROCESSED = + new StatDefinition( + CLEANER_CLUSTER_LNS_PROCESSED_NAME, + CLEANER_CLUSTER_LNS_PROCESSED_DESC); + + public static final String CLEANER_PENDING_LNS_LOCKED_NAME = + "nPendingLNsLocked"; + public static final String CLEANER_PENDING_LNS_LOCKED_DESC = + "Accumulated number of pending LNs that could not be locked for " + + "migration because of a long duration application lock."; + public static final StatDefinition CLEANER_PENDING_LNS_LOCKED = + new StatDefinition( + CLEANER_PENDING_LNS_LOCKED_NAME, + CLEANER_PENDING_LNS_LOCKED_DESC); + + public static final String CLEANER_ENTRIES_READ_NAME = + "nCleanerEntriesRead"; + public static final String CLEANER_ENTRIES_READ_DESC = + "Accumulated number of log entries read by the cleaner."; + public static final StatDefinition CLEANER_ENTRIES_READ = + new StatDefinition( + CLEANER_ENTRIES_READ_NAME, + CLEANER_ENTRIES_READ_DESC); + + public static final String CLEANER_DISK_READS_NAME = + "nCleanerDisksReads"; + public static final String CLEANER_DISK_READS_DESC = + "Number of disk reads by the cleaner."; + public static final StatDefinition CLEANER_DISK_READS = + new StatDefinition( + CLEANER_DISK_READS_NAME, + CLEANER_DISK_READS_DESC); + + public static final String CLEANER_REPEAT_ITERATOR_READS_NAME = + "nRepeatIteratorReads"; + public static final String CLEANER_REPEAT_ITERATOR_READS_DESC = + "Number of attempts to read a log entry larger than the read buffer " + + "size during which the log buffer couldn't be grown enough to " + + "accommodate the object."; + public static final StatDefinition CLEANER_REPEAT_ITERATOR_READS = + new StatDefinition( + CLEANER_REPEAT_ITERATOR_READS_NAME, + CLEANER_REPEAT_ITERATOR_READS_DESC); + + public static final String CLEANER_ACTIVE_LOG_SIZE_NAME = + "activeLogSize"; + public static final String CLEANER_ACTIVE_LOG_SIZE_DESC = + "Bytes used by all active data files: files required " + + "for basic JE operation."; + public static final StatDefinition CLEANER_ACTIVE_LOG_SIZE = + new StatDefinition( + CLEANER_ACTIVE_LOG_SIZE_NAME, + CLEANER_ACTIVE_LOG_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_RESERVED_LOG_SIZE_NAME = + "reservedLogSize"; + public static final String CLEANER_RESERVED_LOG_SIZE_DESC = + "Bytes used by all reserved data files: files that have been" + + "cleaned and can be deleted if they are not protected."; + public static final StatDefinition CLEANER_RESERVED_LOG_SIZE = + new StatDefinition( + CLEANER_RESERVED_LOG_SIZE_NAME, + CLEANER_RESERVED_LOG_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_PROTECTED_LOG_SIZE_NAME = + "protectedLogSize"; + public static final String CLEANER_PROTECTED_LOG_SIZE_DESC = + "Bytes used by all protected data files: the subset of reserved " + + "files that are temporarily protected and cannot be deleted."; + public static final StatDefinition CLEANER_PROTECTED_LOG_SIZE = + new StatDefinition( + CLEANER_PROTECTED_LOG_SIZE_NAME, + CLEANER_PROTECTED_LOG_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_PROTECTED_LOG_SIZE_MAP_NAME = + "protectedLogSizeMap"; + public static final String CLEANER_PROTECTED_LOG_SIZE_MAP_DESC = + "A breakdown of protectedLogSize as a map of protecting " + + "entity name to protected size in bytes."; + public static final StatDefinition CLEANER_PROTECTED_LOG_SIZE_MAP = + new StatDefinition( + CLEANER_PROTECTED_LOG_SIZE_MAP_NAME, + CLEANER_PROTECTED_LOG_SIZE_MAP_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_AVAILABLE_LOG_SIZE_NAME = + "availableLogSize"; + public static final String CLEANER_AVAILABLE_LOG_SIZE_DESC = + "Bytes available for write operations when unprotected reserved " + + "files are deleted: " + + "free space + reservedLogSize - protectedLogSize."; + public static final StatDefinition CLEANER_AVAILABLE_LOG_SIZE = + new StatDefinition( + CLEANER_AVAILABLE_LOG_SIZE_NAME, + CLEANER_AVAILABLE_LOG_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_TOTAL_LOG_SIZE_NAME = + "totalLogSize"; + public static final String CLEANER_TOTAL_LOG_SIZE_DESC = + "Total bytes used by data files on disk: " + + "activeLogSize + reservedLogSize."; + public static final StatDefinition CLEANER_TOTAL_LOG_SIZE = + new StatDefinition( + CLEANER_TOTAL_LOG_SIZE_NAME, + CLEANER_TOTAL_LOG_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_MIN_UTILIZATION_NAME = + "minUtilization"; + public static final String CLEANER_MIN_UTILIZATION_DESC = + "The current minimum (lower bound) log utilization as a percentage."; + public static final StatDefinition CLEANER_MIN_UTILIZATION = + new StatDefinition( + CLEANER_MIN_UTILIZATION_NAME, + CLEANER_MIN_UTILIZATION_DESC, + StatType.CUMULATIVE); + + public static final String CLEANER_MAX_UTILIZATION_NAME = + "maxUtilization"; + public static final String CLEANER_MAX_UTILIZATION_DESC = + "The current maximum (upper bound) log utilization as a percentage."; + public static final StatDefinition CLEANER_MAX_UTILIZATION = + new StatDefinition( + CLEANER_MAX_UTILIZATION_NAME, + CLEANER_MAX_UTILIZATION_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/cleaner/DbFileSummary.java b/src/com/sleepycat/je/cleaner/DbFileSummary.java new file mode 100644 index 0000000..d3eac4f --- /dev/null +++ b/src/com/sleepycat/je/cleaner/DbFileSummary.java @@ -0,0 +1,159 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; + +/** + * Per-DB-per-file utilization counters. The DatabaseImpl stores a persistent + * map of file number to DbFileSummary. + */ +public class DbFileSummary implements Loggable, Cloneable { + + /* Persistent fields. */ + public int totalINCount; // Number of IN log entries + public int totalINSize; // Byte size of IN log entries + public int totalLNCount; // Number of LN log entries + public int totalLNSize; // Byte size of LN log entries + public int obsoleteINCount; // Number of obsolete IN log entries + public int obsoleteLNCount; // Number of obsolete LN log entries + public int obsoleteLNSize; // Byte size of obsolete LN log entries + public int obsoleteLNSizeCounted; // Number obsolete LNs with size counted + + /** + * Creates an empty summary. + */ + public DbFileSummary() { + } + + /** + * Add the totals of the given summary object to the totals of this object. + */ + public void add(DbFileSummary o) { + + totalINCount += o.totalINCount; + totalINSize += o.totalINSize; + totalLNCount += o.totalLNCount; + totalLNSize += o.totalLNSize; + obsoleteINCount += o.obsoleteINCount; + obsoleteLNCount += o.obsoleteLNCount; + obsoleteLNSize += o.obsoleteLNSize; + obsoleteLNSizeCounted += o.obsoleteLNSizeCounted; + } + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + return + LogUtils.getPackedIntLogSize(totalINCount) + + LogUtils.getPackedIntLogSize(totalINSize) + + LogUtils.getPackedIntLogSize(totalLNCount) + + LogUtils.getPackedIntLogSize(totalLNSize) + + LogUtils.getPackedIntLogSize(obsoleteINCount) + + LogUtils.getPackedIntLogSize(obsoleteLNCount) + + LogUtils.getPackedIntLogSize(obsoleteLNSize) + + LogUtils.getPackedIntLogSize(obsoleteLNSizeCounted); + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer buf) { + + LogUtils.writePackedInt(buf, totalINCount); + LogUtils.writePackedInt(buf, totalINSize); + LogUtils.writePackedInt(buf, totalLNCount); + LogUtils.writePackedInt(buf, totalLNSize); + LogUtils.writePackedInt(buf, obsoleteINCount); + LogUtils.writePackedInt(buf, obsoleteLNCount); + LogUtils.writePackedInt(buf, obsoleteLNSize); + LogUtils.writePackedInt(buf, obsoleteLNSizeCounted); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer buf, int entryTypeVersion) { + + totalINCount = LogUtils.readPackedInt(buf); + totalINSize = LogUtils.readPackedInt(buf); + totalLNCount = LogUtils.readPackedInt(buf); + totalLNSize = LogUtils.readPackedInt(buf); + obsoleteINCount = LogUtils.readPackedInt(buf); + obsoleteLNCount = LogUtils.readPackedInt(buf); + obsoleteLNSize = LogUtils.readPackedInt(buf); + obsoleteLNSizeCounted = LogUtils.readPackedInt(buf); + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder buf, boolean verbose) { + + buf.append(""); + } + + /** + * Never called. + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + @Override + public DbFileSummary clone() { + try { + return (DbFileSummary) super.clone(); + } catch (CloneNotSupportedException e) { + /* Should never happen. */ + throw new IllegalStateException(e); + } + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + dumpLog(buf, true); + return buf.toString(); + } +} diff --git a/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java b/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java new file mode 100644 index 0000000..11b6126 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/DbFileSummaryMap.java @@ -0,0 +1,207 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.FileManager; + +public class DbFileSummaryMap { + + private final static int FILE_ENTRY_OVERHEAD = + MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.LONG_OVERHEAD + + MemoryBudget.DBFILESUMMARY_OVERHEAD; + + private Map map; + private int memSize; + private MemoryBudget budget; + + /** + * Creates a map of Long file number to DbFileSummary. The init() method + * must be called after creating this object. + * + *

        Always counts this object and its contained objects in the memory + * budget. If countParentMapEntry is true, also counts a single HashMap + * entry that contains this object. This option allows all memory budget + * adjustments for LocalUtilizationTracker to be contained in this + * class.

        + */ + public DbFileSummaryMap(boolean countParentMapEntry) { + map = new HashMap(); + memSize = MemoryBudget.HASHMAP_OVERHEAD; + if (countParentMapEntry) { + memSize += MemoryBudget.HASHMAP_ENTRY_OVERHEAD; + } + } + + /** + * Starts memory budgeting. The map and its entries will be counted in + * the budget. When adding entries via the get() method prior to calling + * this method, the adjustMemBudget parameter must be false. After calling + * this method, the adjustMemBudget parameter must be true. + * + *

        This method is separate from the constructor so that the map may be + * read from the log without having the EnvironmentImpl object + * available.

        + */ + public void init(EnvironmentImpl env) { + budget = env.getMemoryBudget(); + budget.updateTreeAdminMemoryUsage(memSize); + } + + /** + * Returns the DbFileSummary for the given file, allocating it if + * necessary. + * + *

        Must be called under the log write latch.

        + * + * @param fileNum the file identifying the summary. + * + * @param adjustMemBudget see init(). + * + * @param checkResurrected is true if this method should check fileNum and + * return null if the file does not exist. When checkResurrected is false, + * the expensive call to File.exists will not be made. + * + * @param fileManager is used to check for resurrected files and may be + * null if checkResurrected is false. + */ + public DbFileSummary get(Long fileNum, + boolean adjustMemBudget, + boolean checkResurrected, + FileManager fileManager) { + + assert adjustMemBudget == (budget != null); + + /* + * Note that the call below to isFileValid (which calls File.exists) is + * only made if the file number is less than the last file in the log, + * and the file is not already present in the map. When the file is + * not the last file, we are recording obsoleteness and the file should + * already be in the map. So we only incur the overhead of File.exists + * when resurrecting a file, which should be pretty rare. + * + * The reliability of this approach is questionable. Earlier we had an + * assertion that double-checked this condition after adding a new map + * entry and the assertion sometimes fired, indicating that the file + * was deleted during the execution of this method. Luckily, we plan + * to remove per-DB utilization metadata completely in the future. + */ + DbFileSummary summary = map.get(fileNum); + if (summary == null) { + if (checkResurrected && + fileNum < fileManager.getCurrentFileNum() && + !fileManager.isFileValid(fileNum)) { + /* Will return null. */ + } else { + summary = new DbFileSummary(); + Object oldVal = map.put(fileNum, summary); + assert oldVal == null; + memSize += FILE_ENTRY_OVERHEAD; + if (adjustMemBudget) { + budget.updateTreeAdminMemoryUsage(FILE_ENTRY_OVERHEAD); + } + } + } + return summary; + } + + /** + * Removes the DbFileSummary for the given file. + * + *

        Must be called under the log write latch.

        + */ + public boolean remove(Long fileNum) { + if (map.remove(fileNum) != null) { + budget.updateTreeAdminMemoryUsage(0 - FILE_ENTRY_OVERHEAD); + memSize -= FILE_ENTRY_OVERHEAD; + return true; + } else { + return false; + } + } + + /* + * Get this map's memory size. Usually it's built up over time and added to + * the global memory budget, but this is used to reinitialize the memory + * budget after recovery, when DbFileSummaryMaps may be cut adrift by the + * process of overlaying new portions of the btree. + */ + public long getMemorySize() { + return memSize; + } + + public void subtractFromMemoryBudget() { + /* May not have been initialized if it was read by a FileReader */ + if (budget != null) { + budget.updateTreeAdminMemoryUsage(0 - memSize); + memSize = 0; + } + } + + public Set> entrySet() { + return map.entrySet(); + } + + public boolean contains(Long fileNum) { + return map.containsKey(fileNum); + } + + public int size() { + return map.size(); + } + + public Map cloneMap() { + final Map clone = + new HashMap(map.size()); + final Iterator> i = + map.entrySet().iterator(); + while (i.hasNext()) { + final Map.Entry entry = i.next(); + final Long fileNum = entry.getKey(); + final DbFileSummary summary = entry.getValue(); + clone.put(fileNum, summary.clone()); + } + return clone; + } + + @Override + public String toString() { + return map.toString(); + } + + /** + * Removes entries for deleted files that were created by JE 3.3.74 and + * earlier. [#16610] + */ + public void repair(EnvironmentImpl env) { + Long[] existingFiles = env.getFileManager().getAllFileNumbers(); + Iterator iter = map.keySet().iterator(); + while (iter.hasNext()) { + Long fileNum = iter.next(); + if (Arrays.binarySearch(existingFiles, fileNum) < 0) { + iter.remove(); + budget.updateTreeAdminMemoryUsage(0 - FILE_ENTRY_OVERHEAD); + memSize -= FILE_ENTRY_OVERHEAD; + } + } + } +} diff --git a/src/com/sleepycat/je/cleaner/ExpirationProfile.java b/src/com/sleepycat/je/cleaner/ExpirationProfile.java new file mode 100644 index 0000000..287b9df --- /dev/null +++ b/src/com/sleepycat/je/cleaner/ExpirationProfile.java @@ -0,0 +1,566 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.bind.tuple.SortedPackedLongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.Put; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.StartupTracker; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; + +/** + * A cache of the histograms for all files, except for the last file. Also + * caches the number of bytes expired in the current interval. + * + * No memory budgeting is performed because the size of these data structures + * is small compared to the Btree they represent. The serialized form of the + * histogram is cached, which is a small number of bytes per file. If no data + * in a file expires, it will not have a cache entry. + * + * Possible future optimization: If there is contention on this data structure, + * the refresh method could create a read-only map containing the current number + * of expired bytes, for access by getExpiredBytes without synchronization. + */ +public class ExpirationProfile { + + private static Pair PAIR_OF_ZEROS = new Pair<>(0, 0); + + private final EnvironmentImpl env; + + /* + * The 'map' of file number to Histogram, protected by its own mutex. + * + * Note that if the map and completedTrackers mutexes are both held, they + * must be acquired in that order. + */ + private final Map map; + + /* + * The the expiration times in days and hours of last refresh, i.e, the + * cached bytes in each Histogram are those that were expired on this + * day/hour. Both fields are protected by the 'map' mutex. + */ + private int lastRefreshHour = -1; + private int lastRefreshDay = -1; + + /* + * Whether any expiration times are in hours. If false, all intervals are + * in days. Protected by the 'map' mutex. + */ + private boolean anyExpirationInHours; + + /* + * Map of file number to completed tracker. Protected by its own mutex. + * + * Note that if the map and completedTrackers mutexes are both held, they + * must be acquired in that order. + */ + private final Map completedTrackers; + + /* + * The expiration summary DB. Its key is the file number, a long. Its data + * is the serialized form of the histogram, or is empty (zero length) if + * the file has no expired data. The latter case includes files created + * before the TTL feature was added. + */ + private DatabaseImpl db; + + public ExpirationProfile(final EnvironmentImpl env) { + this.env = env; + map = new HashMap<>(); + completedTrackers = new HashMap<>(); + } + + /** Make a copy for used in utilities, etc. */ + public ExpirationProfile(final ExpirationProfile other) { + env = other.env; + db = other.db; + synchronized (other.map) { + map = new HashMap<>(other.map); + } + completedTrackers = Collections.emptyMap(); + } + + /** + * Called at the end of recovery to open the expiration DB, and cache its + * records in the profile's map. + * + * Also collects expiration info for any complete file having expiration + * info that was not written to the DB earlier, due to a crash for example. + * + * Also initializes the tracker for the current file in the log manager, + * reading/tracking the existing entries in that file. + */ + public void populateCache( + final StartupTracker.Counter counter, + final ProgressListener listener) { + + synchronized (map) { + + assert db == null; + assert completedTrackers.isEmpty(); + + db = env.getDbTree().openNonRepInternalDB(DbType.EXPIRATION); + + if (db == null) { + /* Read-only env with no expiration summary DB. */ + return; + } + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Ordered array of file numbers. */ + final Long[] existingFiles = + env.getFileManager().getAllFileNumbers(); + + /* Parallel array to existingFiles. */ + final boolean[] filesHaveRecords = + new boolean[existingFiles.length]; + + /* + * For the last file we must always get its expiration info and + * then initialize the log manager's tracker. Note that its DB + * record, if any, is deleted below. + */ + final FileProcessor processor = env.getCleaner().createProcessor(); + final long lastFileNum = env.getFileManager().getCurrentFileNum(); + + if (!env.isReadOnly()) { + if (existingFiles.length > 0) { + + /* Flush to ensure the cleaner can read all entries. */ + env.flushLog(false /*fsync*/); + + final ExpirationTracker tracker = + processor.countExpiration(lastFileNum); + + env.getLogManager().initExpirationTracker(tracker); + } else { + env.getLogManager().initExpirationTracker( + new ExpirationTracker(0)); + } + } + + /* + * Populate map with existing records in the DB that correspond to + * existing files. Delete records in the DB that do not correspond + * to existing files, to clean-up past errors. Also delete the + * record for the last file, if it exists. + */ + final Locker locker = BasicLocker.createBasicLocker( + env, false /*noWait*/); + + try (final Cursor cursor = + DbInternal.makeCursor(db, locker, null)) { + + while (cursor.get(key, data, Get.NEXT, null) != null) { + + counter.incNumRead(); + + final long fileNum = + SortedPackedLongBinding.entryToLong(key); + + final int i = Arrays.binarySearch(existingFiles, fileNum); + + if (i >= 0 && existingFiles[i] < lastFileNum) { + + filesHaveRecords[i] = true; + + final byte[] serializedForm = data.getData(); + + if (serializedForm.length > 0) { + counter.incNumProcessed(); + map.put( + fileNum, + new ExpInfo(serializedForm, 0)); + } + } else if (!env.isReadOnly()) { + counter.incNumDeleted(); + cursor.delete(); + } + } + } finally { + locker.operationEnd(); + } + + /* + * If a record is missing for an existing file, use the cleaner to + * get the expiration info for the file, and then add a record to + * the DB and to the map. Note that the last file is not processed. + */ + for (int i = 0; + i < existingFiles.length && existingFiles[i] < lastFileNum; + i += 1) { + + if (filesHaveRecords[i]) { + continue; + } + + final long fileNum = existingFiles[i]; + + counter.incNumAux(); + + if (listener != null) { + listener.progress( + RecoveryProgress.POPULATE_EXPIRATION_PROFILE, + 1, -1); + } + + final ExpirationTracker tracker = + processor.countExpiration(fileNum); + + putFile(tracker, 0); + + LoggerUtils.info( + env.getLogger(), env, + "Loaded missing expiration data from file 0x" + + Long.toHexString(fileNum)); + } + } + } + + /** + * Writes a record in the expiration summary DB for the given tracker, + * and (if there is any data with an expiration time) adds it to the map. + * + * Because this method and {@link #removeFile} perform Btree operations + * while synchronized on the 'map', it is important that an IN is not + * latched when calling these methods, which could cause a deadlock. Also, + * an IN latch should not be held while calling a method that synchronizes + * on {@link FileSelector}, since methods in this class are called while + * synchronized on FileSelector, and this could cause a 3-way deadlock + * [#25613]. + */ + void putFile(final ExpirationTracker tracker, final int expiredSize) { + + final long fileNum = tracker.getFileNum(); + final byte[] serializedForm = tracker.serialize(); + + synchronized (map) { + + if (db != null && !env.isReadOnly()) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + SortedPackedLongBinding.longToEntry(fileNum, key); + + data.setData(serializedForm); + + final Locker locker = BasicLocker.createBasicLocker( + env, false /*noWait*/); + + try (final Cursor cursor = + DbInternal.makeCursor(db, locker, null)) { + + cursor.put(key, data, Put.OVERWRITE, null); + + } finally { + locker.operationEnd(); + } + } + + if (serializedForm.length > 0) { + map.put( + fileNum, + new ExpInfo(serializedForm, expiredSize)); + } + } + } + + /** + * Remove entry for a file from the map and DB, when the file is deleted. + */ + void removeFile(final long fileNum) { + + if (db == null || env.isReadOnly()) { + return; + } + + synchronized (map) { + + map.remove(fileNum); + + final DatabaseEntry key = new DatabaseEntry(); + SortedPackedLongBinding.longToEntry(fileNum, key); + + final Locker locker = BasicLocker.createBasicLocker( + env, false /*noWait*/); + + try (final Cursor cursor = + DbInternal.makeCursor(db, locker, null)) { + + if (cursor.get( + key, null, Get.SEARCH, + LockMode.RMW.toReadOptions()) != null) { + + cursor.delete(null); + } + } finally { + locker.operationEnd(); + } + } + } + + /** + * Called after a file flip. The tracker is completed in the sense that the + * file is completely written, but there may be pending calls to + * {@link ExpirationTracker#track(int, boolean, int)} for some writing + * threads. This is because track is not called under the LWL. + */ + public void addCompletedTracker(final ExpirationTracker tracker) { + + if (db == null) { + return; + } + + final long fileNum = tracker.getFileNum(); + + synchronized (completedTrackers) { + assert !completedTrackers.containsKey(fileNum); + + completedTrackers.put(fileNum, tracker); + } + } + + /** + * Periodically, and when refreshing the profile, we process completed + * trackers that were added to the completedTrackers queue at the time of a + * file flip. If truly complete, they are added to the profile DB and map. + */ + void processCompletedTrackers() { + + /* Only one thread at a time should process them. */ + synchronized (map) { + + /* + * Make a copy in order to process them without holding their + * mutex, to avoid blocking addCompletedTracker, which is in the + * main write path. + */ + final List trackers; + + synchronized (completedTrackers) { + trackers = new ArrayList<>(completedTrackers.values()); + } + + for (final ExpirationTracker tracker : trackers) { + + if (tracker.hasPendingTrackCalls()) { + /* Not quite completed. */ + continue; + } + + putFile(tracker, 0); + + synchronized (completedTrackers) { + completedTrackers.remove(tracker.getFileNum()); + } + } + } + } + + /** + * Updates the expired bytes in the expiration profile according to the + * data that has expired at the given time. Should be called periodically, + * and before calling {@link #getExpiredBytes}. + * + * Also processes any completed trackers by adding them to the DB and to + * the histogram map. + * + * This method only does any real work once per hour, on hour boundaries, + * since data expires on (at most) hour boundaries. + */ + public void refresh(final long time) { + + if (db == null) { + return; + } + + /* Synchronize to protect map and the lastRefreshXxx fields. */ + synchronized (map) { + + /* + * Get last hour boundary, rounding down to the closest hour. If + * an hour has not passed (and this is not the first time called), + * then return and expect that we'll try again later. + */ + final int hourLimit = + (int) (time / TTL.MILLIS_PER_HOUR); + + if (hourLimit == lastRefreshHour) { + return; + } + + final int dayLimit = hourLimit / 24; + final boolean newDayLimit = (dayLimit != lastRefreshDay); + + processCompletedTrackers(); + + lastRefreshHour = hourLimit; + lastRefreshDay = dayLimit; + anyExpirationInHours = false; + + for (final ExpInfo info : map.values()) { + if (ExpirationTracker.isExpirationInHours( + info.serializedForm)) { + anyExpirationInHours = true; + break; + } + } + + /* + * If all expiration times are on day boundaries, and we have not + * started a new day, there is nothing more to do. + */ + if (!newDayLimit && !anyExpirationInHours) { + return; + } + + /* + * Recalculate expired bytes for the current day/hour, saving the + * previous value. + */ + for (final ExpInfo info : map.values()) { + + info.previousExpiredBytes = info.currentExpiredBytes; + + info.currentExpiredBytes = ExpirationTracker.getExpiredBytes( + info.serializedForm, dayLimit, hourLimit); + } + } + } + + /** + * Returns the number of expired bytes for the given file. Uses the value + * calculated by the last call to {@link #refresh}. + */ + public int getExpiredBytes(final long fileNum) { + synchronized (map) { + final ExpInfo info = map.get(fileNum); + return (info != null) ? info.currentExpiredBytes : 0; + } + } + + /** + * Returns the number of expired bytes for the given file. Two values are + * returned: the total expired at the given time, and a potentially smaller + * amount that gradually expires over the current hour or day interval. + * Uses the values calculated by the last call to {@link #refresh}. + * + * The amount that gradually expires is the amount that expires in the + * current time interval, which is one day if all data in the profile + * expires on day boundaries, and otherwise is one hour. If this is the + * first interval for the file (after a restart or a revisal run), all + * expired bytes are considered expired in the current interval and expire + * gradually. + * + * @return pair of {allExpiredBytes, gradualExpiredBytes}. + */ + public Pair getExpiredBytes( + final long fileNum, + final long time) { + + synchronized (map) { + + final ExpInfo info = map.get(fileNum); + + if (info == null) { + return PAIR_OF_ZEROS; + } + + final int newlyExpiredBytes = + info.currentExpiredBytes - info.previousExpiredBytes; + + if (newlyExpiredBytes == 0) { + return new Pair<>( + info.currentExpiredBytes, info.currentExpiredBytes); + } + + final long intervalMs = anyExpirationInHours ? + TTL.MILLIS_PER_HOUR : TTL.MILLIS_PER_DAY; + + final long currentMs = time % intervalMs; + + final int gradualBytes = info.previousExpiredBytes + + ((int) ((newlyExpiredBytes * currentMs) / intervalMs)); + + return new Pair<>(info.currentExpiredBytes, gradualBytes); + } + } + + public String toString(final long fileNum) { + synchronized (map) { + final ExpInfo info = map.get(fileNum); + return (info != null) ? info.toString() : "NoExpInfo"; + } + } + + /** + * Contains cached information about expiration for a data file. + */ + private static class ExpInfo { + + /** + * Cached serialized form, use to recompute the current expired bytes + * for a given expiration time. + */ + final byte[] serializedForm; + + /** + * The number of expired bytes for the given file. This is the value + * calculated by the last call to {@link ExpirationProfile#refresh}. + */ + int currentExpiredBytes = 0; + + /** + * The number of bytes that expired prior to the current interval. + * Calculated by the previous refresh for which the interval changed. + */ + int previousExpiredBytes = 0; + + ExpInfo(final byte[] serializedForm, + final int currentExpiredBytes) { + + this.serializedForm = serializedForm; + this.currentExpiredBytes = currentExpiredBytes; + } + + @Override + public String toString() { + return "{ExpInfo currentBytes = " + currentExpiredBytes + + " " + ExpirationTracker.toString(serializedForm) + '}'; + } + } +} diff --git a/src/com/sleepycat/je/cleaner/ExpirationTracker.java b/src/com/sleepycat/je/cleaner/ExpirationTracker.java new file mode 100644 index 0000000..e42dd46 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/ExpirationTracker.java @@ -0,0 +1,335 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.Key; + +/** + * Tracks the expired bytes in each time window, i.e., a histogram. A separate + * ExpirationTracker instance is used for each tracked data file. + *

        + * A copy-on-write approach is used to store a file number-to-counter mapping, + * and AtomicIntegers are used for the counters. This avoids blocking when + * tracking information for the current end-of-log file. That way, the + * end-of-log tracker can be used by multiple threads without holding a global + * mutex. This tracker is maintained by the LogManager and a new tracker is + * created for each file, and then flushed to disk when starting a new file as + * a FileExpirationLN. + *

        + * An ExpirationTracker instance is used to track expired data when performing + * the first pass of two pass cleaning, although in that case it is only used + * by one thread, so the optimizations are irrelevant. + *

        + * The {@link #serialize}} method is called to represent the histogram in a + * single byte array. This array is the record "data" in a FileExpirationLN. + * It is also stored in memory, in the UtilizationProfile, and used during + * cleaning to calculate the number of expired bytes per file. + */ +public class ExpirationTracker { + + private final long fileNum; + + /* Copy-on-write map of expiration time (in hours) to byte counter. */ + private volatile Map map = new HashMap<>(); + + /** + * We wait for pendingTrackCalls to go to zero before flushing the + * tracker to its database. + */ + private AtomicInteger pendingTrackCalls = new AtomicInteger(0); + + public ExpirationTracker(final long fileNum) { + this.fileNum = fileNum; + } + + public long getFileNum() { + return fileNum; + } + + /** + * Tracks expiration of a BIN or LN. + * + * @param entry is the LogEntry that was just logged. INs and LNs will be + * processed here, and must be protected by their parent latch. + * + * @param size byte size of logged entry. + */ + public void track(final LogEntry entry, final int size) { + + pendingTrackCalls.decrementAndGet(); + + final LogEntryType type = entry.getLogType(); + + if (type.isUserLNType()) { + + final LNLogEntry lnEntry = (LNLogEntry) entry; + final int expiration = lnEntry.getExpiration(); + + if (expiration == 0) { + return; + } + + track(expiration, lnEntry.isExpirationInHours(), size); + return; + } + + if (!type.equals(LogEntryType.LOG_BIN) && + !type.equals(LogEntryType.LOG_BIN_DELTA)){ + return; + } + + final INLogEntry inEntry = (INLogEntry) entry; + final BIN bin = inEntry.getBINWithExpiration(); + + if (bin == null) { + return; + } + + final boolean inHours = bin.isExpirationInHours(); + final int entrySize = size / bin.getNEntries(); + + for (int i = 0; i < bin.getNEntries(); i += 1) { + + final int expiration = bin.getExpiration(i); + + if (expiration == 0) { + continue; + } + + track(expiration, inHours, entrySize); + } + } + + /** + * Adds a single expiration value. + */ + private void track(int expiration, + final boolean expirationInHours, + final int size) { + + final Integer expInHours = + expirationInHours ? expiration : (24 * expiration); + + AtomicInteger counter = map.get(expInHours); + + /* + * The map is modified only while synchronized, which prevents two + * threads from adding the same entry or a reader thread from accessing + * the map while it is being modified. To guarantee this we must + * "install" the new map in the volatile field only after adding the + * new counter. + */ + if (counter == null) { + synchronized (this) { + /* + * Check again while synchronized, since another thread may + * have added it. This "double check" is safe because the 'map' + * field is volatile. + */ + counter = map.get(expInHours); + if (counter == null) { + final Map newMap = + new HashMap<>(map); + counter = new AtomicInteger(0); + newMap.put(expInHours, counter); + map = newMap; + } + } + } + + counter.addAndGet(size); + } + + /** + * Increment the number of calls to {@link #track(int, boolean, int)} + * that must be made before the tracked data can be flushed to its + * database. + */ + public void incrementPendingTrackCalls() { + pendingTrackCalls.incrementAndGet(); + } + + /** + * Returns whether to wait for outstanding calls to {@link + * #track(int, boolean, int)} before flushing the tracked data to its + * database. + */ + boolean hasPendingTrackCalls() { + return pendingTrackCalls.get() > 0; + } + + /** + * Computes the current expired bytes for the given time. + */ + public int getExpiredBytes(final long time) { + + final int expLimit = (int) (time / TTL.MILLIS_PER_HOUR); + + int expiredSize = 0; + + for (final Map.Entry entry : map.entrySet()) { + final int exp = entry.getKey(); + if (exp > expLimit) { + continue; + } + expiredSize += entry.getValue().get(); + } + + return expiredSize; + } + + @Override + public String toString() { + + final StringBuilder sb = new StringBuilder(); + sb.append("{ExpTracker file= ").append(fileNum); + + for (final Map.Entry entry : + new TreeMap<>(map).entrySet()) { + + final int exp = entry.getKey(); + sb.append(' ').append(TTL.formatExpiration(exp, true)); + sb.append('=').append(entry.getValue().get()); + } + + sb.append('}'); + return sb.toString(); + } + + public static String toString(final byte[] serializedForm) { + + final StringBuilder sb = new StringBuilder(); + sb.append("{ExpSerialized"); + + final TupleInput in = new TupleInput( + serializedForm, 1, serializedForm.length - 1); + + final boolean hours = isExpirationInHours(serializedForm); + int prevExp = 0; + + while (in.available() > 0) { + final int exp = in.readPackedInt() + prevExp; + final int size = in.readPackedInt(); + sb.append(' ').append(TTL.formatExpiration(exp, hours)); + sb.append('=').append(size); + prevExp = exp; + } + + sb.append('}'); + return sb.toString(); + } + + /** + * Computes the expired bytes for the given serialized histogram and + * expiration time. + */ + static int getExpiredBytes(final byte[] serializedForm, + final int dayLimit, + final int hourLimit) { + final int expLimit = + ExpirationTracker.isExpirationInHours(serializedForm) ? + hourLimit : dayLimit; + + final TupleInput in = new TupleInput( + serializedForm, 1, serializedForm.length - 1); + + int expiredSize = 0; + int prevExp = 0; + + while (in.available() > 0) { + final int exp = in.readPackedInt() + prevExp; + if (exp > expLimit) { + break; + } + expiredSize += in.readPackedInt(); + prevExp = exp; + } + + return expiredSize; + } + + /** + * Converts this object to a serialized form that is compact and can be + * used to quickly find the total bytes after a given time. Returns an + * empty array if no data in this file has an expiration time. + * + * The serialized form is a series of {interval,byteSize} pairs that is + * ordered by expiration time and run length encoded. The interval and + * byteSize are packed integers. The interval is the delta between the + * current and previous expiration value. All expiration values are in days + * if all values are on a day boundary; otherwise they are in hours. Days + * are used, when possible, to reduce the size of the delta, using less + * space due to the packed integer format. + */ + byte[] serialize() { + + final Map myMap = map; + + if (myMap.isEmpty()) { + return Key.EMPTY_KEY; + } + + final List expList = new ArrayList<>(myMap.size()); + expList.addAll(myMap.keySet()); + Collections.sort(expList); + + boolean hours = false; + for (int exp : expList) { + if (exp % 24 != 0) { + hours = true; + break; + } + } + + final TupleOutput out = new TupleOutput(); + out.write(hours ? 1 : 0); + int prevExp = 0; + + for (int exp : expList) { + final AtomicInteger counter = myMap.get(exp); + if (!hours) { + exp /= 24; + } + out.writePackedInt(exp - prevExp); + out.writePackedInt(counter.get()); + prevExp = exp; + } + + return out.toByteArray(); + } + + /** + * Returns whether the given serialized form has expired values in hours. + * If false is returned, all values expired on day boundaries. + */ + static boolean isExpirationInHours(final byte[] serialized) { + return (serialized[0] == 1); + } +} diff --git a/src/com/sleepycat/je/cleaner/FileProcessor.java b/src/com/sleepycat/je/cleaner/FileProcessor.java new file mode 100644 index 0000000..25158f4 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/FileProcessor.java @@ -0,0 +1,1990 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.logging.Level; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.CleanerFileReader; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.OldBINDelta; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.utilint.DaemonThread; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * Reads all entries in a log file and either determines them to be obsolete or + * active. Active LNs are migrated immediately (by logging them). Active INs + * are marked for migration by setting the dirty flag. + * + * May be invoked explicitly by calling doClean, or woken up if used as a + * daemon thread. + */ +public class FileProcessor extends DaemonThread { + + /** + * The number of LN log entries after we process pending LNs. If we do + * this too seldom, the pending LN queue may grow large, and it isn't + * budgeted memory. If we process it too often, we will repeatedly request + * a non-blocking lock for the same locked node. + */ + private static final int PROCESS_PENDING_EVERY_N_LNS = 100; + + private final Cleaner cleaner; + private final FileSelector fileSelector; + private final UtilizationProfile profile; + private final UtilizationCalculator calculator; + + /** @see #onWakeup() */ + private volatile boolean activate = false; + private long lastWakeupLsn = 0; + + /* + * The first thread (out of N cleaner threads) does certain housekeeping + * duties that don't need to be done by all threads. + */ + private final boolean firstThread; + + /* Log version for the target file. */ + private int fileLogVersion; + + /* Per Run counters. Reset before each file is processed. */ + + /* + * Number of full IN (BIN or UIN) logrecs that were known to be apriory + * obsolete and did not need any further processing (i.e., they did not + * need to be searched-for in the tree). These include logrecs whose + * offset is recorded in the FS DB, or whose DB has been deleted or is + * being deleted. + */ + private int nINsObsoleteThisRun = 0; + + /* + * Number of full IN (BIN or UIN) logrecs that were not known apriory to + * be obsolete, and as a result, needed further processing. + */ + private int nINsCleanedThisRun = 0; + + /* + * Number of full IN (BIN or UIN) logrecs that were found to be obsolete + * after having been looked up in the tree. + */ + private int nINsDeadThisRun = 0; + + /* + * Number of full IN (BIN or UIN) logrecs that were still active and were + * marked dirty so that they will be migrated during the next ckpt. + */ + private int nINsMigratedThisRun = 0; + + /* + * Number of BIN-delta logrecs that were known to be apriory + * obsolete and did not need any further processing (i.e., they did not + * need to be searched-for in the tree). These include logrecs whose + * offset is recorded in the FS DB, or whose DB has been deleted or is + * being deleted. + */ + private int nBINDeltasObsoleteThisRun = 0; + + /* + * Number of BIN-delta logrecs that were not known apriory to be obsolete, + * and as a result, needed further processing. + */ + private int nBINDeltasCleanedThisRun = 0; + + /* + * Number of BIN-delta logrecs that were found to be obsolete after having + * been looked up in the tree. + */ + private int nBINDeltasDeadThisRun = 0; + + /* + * Number of BIN-delta logrecs that were still active and were marked + * dirty so that they will be migrated during the next ckpt. + */ + private int nBINDeltasMigratedThisRun = 0; + + /* + * Number of LN logrecs that were known to be apriory obsolete and did not + * need any further processing (for example, they did not need to be + * searched-for in the tree). These include logrecs that are immediately + * obsolete, or whose offset is recorded in the FS DB, or whose DB has been + * deleted or is being deleted. + */ + private int nLNsObsoleteThisRun = 0; + + /* + * Number of LN logrecs that were expired. + */ + private int nLNsExpiredThisRun = 0; + + /* + * Number of LN logrecs that were not known apriory to be obsolete, and as + * a result, needed further processing. These include LNs that had to be + * searched-for in the tree as well as the nLNQueueHitsThisRun (see below). + */ + private int nLNsCleanedThisRun = 0; + + /* + * Number of LN logrecs that were processed without tree search. Let L1 and + * L2 be two LN logrecs and R1 and R2 be their associated records. We will + * avoid a tree search for L1 if L1 is in the to-be-proccessed cache when + * L2 is processed, R2 must be searched-for in the tree, R2 is found in a + * BIN B, and L1 is also pointed-to by a slot in B. + */ + private int nLNQueueHitsThisRun = 0; + + /* + * Number of LN logrecs that were found to be obsolete after haning been + * processed further. + */ + private int nLNsDeadThisRun = 0; + + /* + * Number of LN logrecs whose LSN had to be locked in order to check their + * obsoleteness, and this non-blocking lock request was denied (and as a + * result, the logrec was placed in the "pending LNs" queue. + */ + private int nLNsLockedThisRun = 0; + + /* + * Number of LN logrecs that were still active and were migrated. + */ + private int nLNsMigratedThisRun = 0; + + /* + * This applies to temporary DBs only. It is the number of LN logrecs that + * were still active, but intead of migrating them, we attached the LN to + * the meory-resident tree and marked the LN as dirty. + */ + private int nLNsMarkedThisRun = 0; + + /* + * Number of log entries read during cleaning. + */ + private int nEntriesReadThisRun; + + private long nRepeatIteratorReadsThisRun; + + FileProcessor(String name, + boolean firstThread, + EnvironmentImpl env, + Cleaner cleaner, + UtilizationProfile profile, + UtilizationCalculator calculator, + FileSelector fileSelector) { + super(0, name, env); + this.cleaner = cleaner; + this.fileSelector = fileSelector; + this.profile = profile; + this.calculator = calculator; + this.firstThread = firstThread; + } + + /** + * Return the number of retries when a deadlock exception occurs. + */ + @Override + protected long nDeadlockRetries() { + return cleaner.nDeadlockRetries; + } + + void activateOnWakeup() { + activate = true; + } + + /** + * The thread is woken, either by an explicit notify (call to {@link + * Cleaner#wakeupActivate()}, or when the timed wakeup interval elapses. + * + * In the former case (a call to wakeupActivate), the 'activate' field will + * be true and the doClean method is called here. This happens when the + * number of bytes written exceeds the cleanerBytesInterval, a config + * change is made that could impact cleaning, etc. + * + * In the latter case (the wakeup interval elapsed), 'activate' will be + * false. In this case, when there has been no writing since the last + * wakeup, we perform cleaning and checkpointing, if needed to reclaim + * space. This handles the situation where writing stops, but cleaning + * or checkpointing or reserved file deletion may be needed. See {@link + * com.sleepycat.je.EnvironmentConfig#CLEANER_WAKEUP_INTERVAL}. + * + * In all cases, when a disk limit is in violation we always call the + * doClean method to ensure that {@link Cleaner#manageDiskUsage()} is + * called in this situation. This is important to free disk space whenever + * possible. + */ + @Override + protected synchronized void onWakeup() { + + if (!activate && cleaner.getDiskLimitViolation() == null) { + /* + * This is a timed wakeup and no disk limit is violated. We should + * only call doClean if writing has stopped. + */ + final long nextLsn = envImpl.getFileManager().getNextLsn(); + if (lastWakeupLsn != nextLsn) { + /* + * If the last LSN in the log has changed since the last timed + * wakeup, do nothing, because writing has not stopped. As long + * as writing continues, we expect the cleaner and checkpointer + * to be woken via their byte interval params. + */ + lastWakeupLsn = nextLsn; + return; + } + + /* + * There has been no writing since the last wakeup. Schedule a + * checkpoint, if needed to reclaim disk space for already cleaned + * files. Then fall through and activate (call doClean). + */ + envImpl.getCheckpointer().wakeupAfterNoWrites(); + } + + doClean( + true /*invokedFromDaemon*/, + true /*cleanMultipleFiles*/, + false /*forceCleaning*/); + + activate = false; + } + + /** + * Selects files to clean and cleans them. It returns the number of + * successfully cleaned files. May be called by the daemon thread or + * programatically. + * + * @param invokedFromDaemon currently has no effect. + * + * @param cleanMultipleFiles is true to clean until we're under budget, + * or false to clean at most one file. + * + * @param forceCleaning is true to clean even if we're not under the + * utilization threshold. + * + * @return the number of files cleaned, not including files cleaned + * unsuccessfully. + */ + synchronized int doClean( + boolean invokedFromDaemon, + boolean cleanMultipleFiles, + boolean forceCleaning) { + + if (envImpl.isClosed()) { + return 0; + } + + /* + * Get all file summaries including tracked files. Tracked files may + * be ready for cleaning if there is a large cache and many files have + * not yet been flushed and do not yet appear in the profile map. + */ + SortedMap fileSummaryMap = + profile.getFileSummaryMap(true /*includeTrackedFiles*/); + + /* Clean until no more files are selected. */ + final int nOriginalLogFiles = fileSummaryMap.size(); + int nFilesCleaned = 0; + + while (true) { + + /* Stop if the daemon is paused or the environment is closing. */ + if ((invokedFromDaemon && isPaused()) || envImpl.isClosing()) { + break; + } + + /* + * Manage disk usage (refresh stats and delete files) periodically. + * + * Do this before cleaning, to reduce the chance of filling the + * disk while cleaning and migrating/logging LNs. Also do it after + * cleaning (before deciding whether to clean another file), even + * if there are no more files to clean, to ensure space is freed + * after a long run. + */ + cleaner.manageDiskUsage(); + + /* + * Stop if we cannot write because of a disk limit violation. */ + try { + envImpl.checkDiskLimitViolation(); + } catch (DiskLimitException e) { + if (!invokedFromDaemon) { + throw e; + } + break; + } + + /* + * Process pending LNs periodically. Pending LNs can prevent file + * deletion. + */ + cleaner.processPending(); + + if (nFilesCleaned > 0) { + + /* If we should only clean one file, stop now. */ + if (!cleanMultipleFiles) { + break; + } + + /* Don't clean forever. */ + if (nFilesCleaned >= nOriginalLogFiles) { + break; + } + + /* Refresh file summary info for next file selection. */ + fileSummaryMap = + profile.getFileSummaryMap(true /*includeTrackedFiles*/); + } + + /* + * Select the next file for cleaning and update the Cleaner's + * read-only file collections. + */ + final Pair result = + fileSelector.selectFileForCleaning( + calculator, fileSummaryMap, forceCleaning); + + /* Stop if no file is selected for cleaning. */ + if (result == null) { + break; + } + + final Long fileNum = result.first(); + final int requiredUtil = result.second(); + final boolean twoPass = (requiredUtil >= 0); + + boolean finished = false; + boolean fileDeleted = false; + final long fileNumValue = fileNum; + + final long runId = cleaner.totalRuns.incrementAndGet(); + final MemoryBudget budget = envImpl.getMemoryBudget(); + nFilesCleaned += 1; + + try { + TestHookExecute.doHookIfSet(cleaner.fileChosenHook); + + /* Perform 1st pass of 2-pass cleaning. */ + String passOneMsg = ""; + if (twoPass) { + + final FileSummary recalcSummary = new FileSummary(); + + final ExpirationTracker expTracker = + new ExpirationTracker(fileNumValue); + + processFile( + fileNum, recalcSummary, new INSummary(), expTracker); + + final int expiredSize = + expTracker.getExpiredBytes(TTL.currentSystemTime()); + + final int obsoleteSize = recalcSummary.getObsoleteSize(); + + final int recalcUtil = FileSummary.utilization( + obsoleteSize + expiredSize, recalcSummary.totalSize); + + passOneMsg = + " pass1RecalcObsolete=" + obsoleteSize + + " pass1RecalcExpired=" + expiredSize + + " pass1RecalcUtil=" + recalcUtil + + " pass1RequiredUtil=" + requiredUtil; + + if (recalcUtil > requiredUtil) { + + cleaner.nRevisalRuns.increment(); + + cleaner.getExpirationProfile().putFile( + expTracker, expiredSize); + + final String logMsg = "CleanerRevisalRun " + runId + + " on file 0x" + Long.toHexString(fileNumValue) + + " ends:" + passOneMsg; + + LoggerUtils.logMsg(logger, envImpl, Level.INFO, logMsg); + + fileSelector.removeFile(fileNum, budget); + + finished = true; + continue; + } + } + + resetPerRunCounters(); + cleaner.nCleanerRuns.increment(); + + if (twoPass) { + cleaner.nTwoPassRuns.increment(); + } + + /* Keep track of estimated and true utilization. */ + final FileSummary estimatedFileSummary = + fileSummaryMap.containsKey(fileNum) ? + fileSummaryMap.get(fileNum).clone() : null; + + final FileSummary recalculatedFileSummary = new FileSummary(); + final INSummary inSummary = new INSummary(); + + final String msgHeader = + (twoPass ? "CleanerTwoPassRun " : "CleanerRun ") + + runId + " on file 0x" + Long.toHexString(fileNumValue); + + final String beginMsg = msgHeader + " begins:"; + + /* Trace is unconditional for log-based debugging. */ + LoggerUtils.traceAndLog(logger, envImpl, Level.FINE, beginMsg); + + /* Process all log entries in the file. */ + if (!processFile( + fileNum, recalculatedFileSummary, inSummary, null)) { + return nFilesCleaned; + } + + /* File is fully processed, update stats. */ + accumulatePerRunCounters(); + finished = true; + + /* Trace is unconditional for log-based debugging. */ + final String endMsg = msgHeader + " ends:" + + " invokedFromDaemon=" + invokedFromDaemon + + " finished=" + finished + + " fileDeleted=" + fileDeleted + + " nEntriesRead=" + nEntriesReadThisRun + + " nINsObsolete=" + nINsObsoleteThisRun + + " nINsCleaned=" + nINsCleanedThisRun + + " nINsDead=" + nINsDeadThisRun + + " nINsMigrated=" + nINsMigratedThisRun + + " nBINDeltasObsolete=" + nBINDeltasObsoleteThisRun + + " nBINDeltasCleaned=" + nBINDeltasCleanedThisRun + + " nBINDeltasDead=" + nBINDeltasDeadThisRun + + " nBINDeltasMigrated=" + nBINDeltasMigratedThisRun + + " nLNsObsolete=" + nLNsObsoleteThisRun + + " nLNsCleaned=" + nLNsCleanedThisRun + + " nLNsDead=" + nLNsDeadThisRun + + " nLNsExpired=" + nLNsExpiredThisRun + + " nLNsMigrated=" + nLNsMigratedThisRun + + " nLNsMarked=" + nLNsMarkedThisRun + + " nLNQueueHits=" + nLNQueueHitsThisRun + + " nLNsLocked=" + nLNsLockedThisRun; + + Trace.trace(envImpl, endMsg); + + /* Only construct INFO level message if needed. */ + if (logger.isLoggable(Level.INFO)) { + + final int estUtil = (estimatedFileSummary != null) ? + estimatedFileSummary.utilization() : -1; + + final int recalcUtil = + recalculatedFileSummary.utilization(); + + LoggerUtils.logMsg( + logger, envImpl, Level.INFO, + endMsg + + " inSummary=" + inSummary + + " estSummary=" + estimatedFileSummary + + " recalcSummary=" + recalculatedFileSummary + + " estimatedUtil=" + estUtil + + " recalcUtil=" + recalcUtil + + passOneMsg); + } + } catch (FileNotFoundException e) { + + /* + * File was deleted. Although it is possible that the file was + * deleted externally it is much more likely that the file was + * deleted normally after being cleaned earlier. This can + * occur when tracked obsolete information is collected and + * processed after the file has been cleaned and deleted. + * Since the file does not exist, ignore the error so that the + * cleaner will continue. Remove the file completely from the + * FileSelector, UtilizationProfile and ExpirationProfile so + * that we don't repeatedly attempt to process it. [#15528] + */ + fileDeleted = true; + profile.removeDeletedFile(fileNum); + cleaner.getExpirationProfile().removeFile(fileNum); + fileSelector.removeFile(fileNum, budget); + + LoggerUtils.logMsg( + logger, envImpl, Level.INFO, + "Missing file 0x" + Long.toHexString(fileNum) + + " ignored by cleaner"); + + } catch (IOException e) { + + LoggerUtils.traceAndLogException( + envImpl, "Cleaner", "doClean", "", e); + + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, e); + + } catch (DiskLimitException e) { + + LoggerUtils.logMsg( + logger, envImpl, Level.WARNING, + "Cleaning of file 0x" + Long.toHexString(fileNum) + + " aborted because of disk limit violation: " + e); + + if (!invokedFromDaemon) { + throw e; + } + + } catch (RuntimeException e) { + + LoggerUtils.traceAndLogException( + envImpl, "Cleaner", "doClean", "", e); + + throw e; + + } finally { + if (!finished && !fileDeleted) { + fileSelector.putBackFileForCleaning(fileNum); + } + } + } + + return nFilesCleaned; + } + + /** + * Calculates expired bytes without performing any migration or other side + * effects. The expired sizes will not overlap with obsolete data, because + * expired sizes are accumulated only for non-obsolete entries. + * + * @param fileNum file to read. + * + * @return the expiration tracker. + */ + public ExpirationTracker countExpiration(long fileNum) { + + final ExpirationTracker tracker = new ExpirationTracker(fileNum); + + try { + final boolean result = processFile( + fileNum, new FileSummary(), new INSummary(), tracker); + + assert result; + + } catch (IOException e) { + + LoggerUtils.traceAndLogException( + envImpl, "Cleaner", "countExpiration", "", e); + + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, e); + } + + return tracker; + } + + /** + * Process all log entries in the given file. + * + * Note that we gather obsolete offsets at the beginning of the method and + * do not check for obsolete offsets of entries that become obsolete while + * the file is being processed. An entry in this file can become obsolete + * before we process it when normal application activity deletes or + * updates the entry. Also, large numbers of entries also become obsolete + * as the result of LN migration while processing the file, but these + * Checking the TrackedFileSummary while processing the file would be + * expensive if it has many entries, because we perform a linear search in + * the TFS. There is a tradeoff between the cost of the TFS lookup and its + * benefit, which is to avoid a tree search if the entry is obsolete. Many + * more lookups for non-obsolete entries than obsolete entries will + * typically be done. Because of the high cost of the linear search, + * especially when processing large log files, we do not check the TFS. + * [#19626] + * + * @param fileNum the file being cleaned. + * + * @param fileSummary used to return the true utilization. + * + * @param inSummary used to return IN utilization info for debugging. + * + * @param expTracker if non-null, enables countOnly mode, in which + * expiration info is returned via this param, obsolete info returned via + * fileSummary does not include expired data, and no migration is + * performed, i.e., there are no side effects. + * + * @return false if we aborted file processing because the environment is + * being closed. + */ + private boolean processFile(Long fileNum, + FileSummary fileSummary, + INSummary inSummary, + ExpirationTracker expTracker) + throws IOException { + + final boolean countOnly = (expTracker != null); + final LockManager lockManager = envImpl.getTxnManager().getLockManager(); + + /* Get the current obsolete offsets for this file. */ + final PackedOffsets obsoleteOffsets = + profile.getObsoleteDetailPacked(fileNum, !countOnly /*logUpdate*/); + + final PackedOffsets.Iterator obsoleteIter = obsoleteOffsets.iterator(); + long nextObsolete = -1; + + /* Copy to local variables because they are mutable properties. */ + final int readBufferSize = cleaner.readBufferSize; + final int lookAheadCacheSize = + countOnly ? 0 : cleaner.lookAheadCacheSize; + + /* + * Add the overhead of this method to the budget. Two read buffers are + * allocated by the file reader. The log size of the offsets happens to + * be the same as the memory overhead. + */ + final int adjustMem = (2 * readBufferSize) + + obsoleteOffsets.getLogSize() + + lookAheadCacheSize; + final MemoryBudget budget = envImpl.getMemoryBudget(); + budget.updateAdminMemoryUsage(adjustMem); + + /* Evict after updating the budget. */ + if (Cleaner.DO_CRITICAL_EVICTION) { + envImpl.daemonEviction(true /*backgroundIO*/); + } + + /* + * We keep a look ahead cache of non-obsolete LNs. When we lookup a + * BIN in processLN, we also process any other LNs in that BIN that are + * in the cache. This can reduce the number of tree lookups. + */ + final LookAheadCache lookAheadCache = + countOnly ? null : new LookAheadCache(lookAheadCacheSize); + + /* + * For obsolete entries we must check for pending deleted DBs. To + * avoid the overhead of DbTree.getDb on every entry we keep a set of + * all DB IDs encountered and do the check once per DB at the end. + */ + final Set checkPendingDbSet = + countOnly ? null : new HashSet<>(); + + /* + * Use local caching to reduce DbTree.getDb overhead. Do not call + * releaseDb after getDb with the dbCache, since the entire dbCache + * will be released at the end of this method. + */ + final Map dbCache = new HashMap<>(); + final DbTree dbMapTree = envImpl.getDbTree(); + + /* + * Expired entries are counted obsolete so that this is reflected in + * total utilization. A separate tracker is used so it can be added in + * a single call under the log write latch. + */ + final LocalUtilizationTracker localTracker = + countOnly ? null : new LocalUtilizationTracker(envImpl); + + /* Keep track of all database IDs encountered. */ + final Set databases = new HashSet<>(); + + /* Create the file reader. */ + final CleanerFileReader reader = new CleanerFileReader( + envImpl, readBufferSize, DbLsn.makeLsn(fileNum, 0), fileNum, + fileSummary, inSummary, expTracker); + + /* Validate all entries before ever deleting a file. */ + reader.setAlwaysValidateChecksum(true); + + try { + final TreeLocation location = new TreeLocation(); + + int nProcessedLNs = 0; + int nProcessedEntries = 0; + + while (reader.readNextEntryAllowExceptions()) { + + nProcessedEntries += 1; + cleaner.nEntriesRead.increment(); + + int nReads = reader.getAndResetNReads(); + if (nReads > 0) { + cleaner.nDiskReads.add(nReads); + } + + long logLsn = reader.getLastLsn(); + long fileOffset = DbLsn.getFileOffset(logLsn); + boolean isLN = reader.isLN(); + boolean isIN = reader.isIN(); + boolean isBINDelta = reader.isBINDelta(); + boolean isOldBINDelta = reader.isOldBINDelta(); + boolean isDbTree = reader.isDbTree(); + boolean isObsolete = false; + long expirationTime = 0; + + /* Remember the version of the log file. */ + if (reader.isFileHeader()) { + fileLogVersion = reader.getFileHeader().getLogVersion(); + /* No expiration info exists before version 12. */ + if (countOnly && fileLogVersion < 12) { + return true; // TODO caller must abort also + } + } + + /* Stop if the daemon is shut down. */ + if (!countOnly && envImpl.isClosing()) { + return false; + } + + /* Exit loop if we can't write. */ + if (!countOnly) { + envImpl.checkDiskLimitViolation(); + } + + /* Update background reads. */ + if (nReads > 0) { + envImpl.updateBackgroundReads(nReads); + } + + /* Sleep if background read/write limit was exceeded. */ + envImpl.sleepAfterBackgroundIO(); + + /* Check for a known obsolete node. */ + while (nextObsolete < fileOffset && obsoleteIter.hasNext()) { + nextObsolete = obsoleteIter.next(); + } + if (nextObsolete == fileOffset) { + isObsolete = true; + } + + /* Check for the entry type next because it is very cheap. */ + if (!isObsolete && + !isLN && + !isIN && + !isBINDelta && + !isOldBINDelta && + !isDbTree) { + /* Consider all entries we do not process as obsolete. */ + isObsolete = true; + } + + /* + * Ignore deltas before log version 8. Before the change to + * place deltas in the Btree (in JE 5.0), all deltas were + * considered obsolete by the cleaner. Processing an old delta + * would be very wasteful (a Btree lookup, and possibly + * dirtying and flushing a BIN), and for duplicates databases + * could cause an exception due to the key format change. + * [#21405] + */ + if (!isObsolete && + isOldBINDelta && + fileLogVersion < 8) { + isObsolete = true; + } + + /* Maintain a set of all databases encountered. */ + final DatabaseId dbId = reader.getDatabaseId(); + if (dbId != null) { + databases.add(dbId); + } + + /* + * Get database. This is postponed until we need it, to reduce + * contention in DbTree.getDb. + */ + final DatabaseImpl db; + if (!isObsolete && dbId != null) { + + + /* + * Clear DB cache after dbCacheClearCount entries, to + * prevent starving other threads that need exclusive + * access to the MapLN (for example, DbTree.deleteMapLN). + * [#21015] + */ + if ((nProcessedEntries % cleaner.dbCacheClearCount) == 0) { + dbMapTree.releaseDbs(dbCache); + dbCache.clear(); + } + + db = dbMapTree.getDb(dbId, cleaner.lockTimeout, dbCache); + + /* + * If the DB is gone, this entry is obsolete. If delete + * cleanup is in progress, we will put the DB into the DB + * pending set further below. This entry will be declared + * deleted after the delete cleanup is finished. + */ + if (db == null || db.isDeleted()) { + isObsolete = true; + } + } else { + db = null; + } + + /* + * Also ignore INs in dup DBs before log version 8. These must + * be obsolete, just as DINs and DBINs must be obsolete (and + * are also ignored here) after dup DB conversion. Also, the + * old format IN key cannot be used for lookups. [#21405] + */ + if (!isObsolete && + isIN && + db.getSortedDuplicates() && + fileLogVersion < 8) { + isObsolete = true; + } + + if (!isObsolete && isLN) { + + final LNLogEntry lnEntry = reader.getLNLogEntry(); + + /* + * SR 14583: In JE 2.0 and later we can assume that all + * deleted LNs are obsolete. Either the delete committed + * and the BIN parent is marked with a pending deleted bit, + * or the delete rolled back, in which case there is no + * reference to this entry. JE 1.7.1 and earlier require a + * tree lookup because deleted LNs may still be reachable + * through their BIN parents. + */ + if (lnEntry.isDeleted() && fileLogVersion > 2) { + isObsolete = true; + } + + /* "Immediately obsolete" LNs can be discarded. */ + if (!isObsolete && + (db.isLNImmediatelyObsolete() || + lnEntry.isEmbeddedLN())) { + isObsolete = true; + } + + /* + * Check for expired LN. If locked, add to pending queue. + */ + if (!isObsolete && !countOnly) { + + expirationTime = TTL.expirationToSystemTime( + lnEntry.getExpiration(), + lnEntry.isExpirationInHours()); + + if (envImpl.expiresWithin( + expirationTime, + 0 - envImpl.getTtlLnPurgeDelay())) { + + if (!lockManager.isLockUncontended(logLsn)) { + fileSelector.addPendingLN( + logLsn, + new LNInfo(null /*LN*/, dbId, + lnEntry.getKey(), expirationTime)); + nLNsLockedThisRun++; + continue; + } + + isObsolete = true; + nLNsExpiredThisRun += 1; + + /* + * Inexact counting is used to avoid overhead of + * adding obsolete offset. + */ + localTracker.countObsoleteNodeInexact( + logLsn, null /*type*/, + reader.getLastEntrySize(), db); + } + } + } + + /* Skip known obsolete nodes. */ + if (isObsolete) { + /* Count obsolete stats. */ + if (!countOnly) { + if (isLN) { + nLNsObsoleteThisRun++; + } else if (isBINDelta || isOldBINDelta) { + nBINDeltasObsoleteThisRun++; + } else if (isIN) { + nINsObsoleteThisRun++; + } + } + /* Update the pending DB set for obsolete entries. */ + if (checkPendingDbSet != null && dbId != null) { + checkPendingDbSet.add(dbId); + } + /* Count utilization for obsolete entry. */ + reader.countObsolete(); + continue; + } + + /* If not obsolete, count expired. */ + reader.countExpired(); + + /* Don't process further if we are only calculating. */ + if (countOnly) { + continue; + } + + /* Evict before processing each entry. */ + if (Cleaner.DO_CRITICAL_EVICTION) { + envImpl.daemonEviction(true /*backgroundIO*/); + } + + /* The entry is not known to be obsolete -- process it now. */ + assert lookAheadCache != null; + assert checkPendingDbSet != null; + + if (isLN) { + + final LNLogEntry lnEntry = reader.getLNLogEntry(); + lnEntry.postFetchInit(db); + + final LN targetLN = lnEntry.getLN(); + final byte[] key = lnEntry.getKey(); + + lookAheadCache.add( + DbLsn.getFileOffset(logLsn), + new LNInfo(targetLN, dbId, key, expirationTime)); + + if (lookAheadCache.isFull()) { + processLN(fileNum, location, lookAheadCache, dbCache, + checkPendingDbSet); + } + + /* + * Process pending LNs before proceeding in order to + * prevent the pending list from growing too large. + */ + nProcessedLNs += 1; + if (nProcessedLNs % PROCESS_PENDING_EVERY_N_LNS == 0) { + cleaner.processPending(); + } + + } else if (isIN) { + + final IN targetIN = reader.getIN(db); + targetIN.setDatabase(db); + + processIN(targetIN, db, logLsn); + + } else if (isOldBINDelta) { + + final OldBINDelta delta = reader.getOldBINDelta(); + processOldBINDelta(delta, db, logLsn); + + } else if (isBINDelta) { + + final BIN delta = reader.getBINDelta(); + processBINDelta(delta, db, logLsn); + + } else if (isDbTree) { + + envImpl.rewriteMapTreeRoot(logLsn); + } else { + assert false; + } + } + + /* Don't process further if we are only calculating. */ + if (countOnly) { + return true; + } + + /* Process remaining queued LNs. */ + while (!lookAheadCache.isEmpty()) { + if (Cleaner.DO_CRITICAL_EVICTION) { + envImpl.daemonEviction(true /*backgroundIO*/); + } + processLN(fileNum, location, lookAheadCache, dbCache, + checkPendingDbSet); + /* Sleep if background read/write limit was exceeded. */ + envImpl.sleepAfterBackgroundIO(); + } + + /* Update the pending DB set. */ + for (final DatabaseId pendingDbId : checkPendingDbSet) { + final DatabaseImpl db = dbMapTree.getDb + (pendingDbId, cleaner.lockTimeout, dbCache); + cleaner.addPendingDB(db); + } + + /* Update reader stats. */ + nEntriesReadThisRun = reader.getNumRead(); + nRepeatIteratorReadsThisRun = reader.getNRepeatIteratorReads(); + + envImpl.getUtilizationProfile().flushLocalTracker(localTracker); + + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } finally { + /* Subtract the overhead of this method from the budget. */ + budget.updateAdminMemoryUsage(0 - adjustMem); + + /* Release all cached DBs. */ + dbMapTree.releaseDbs(dbCache); + } + + /* File is fully processed, update status information. */ + fileSelector.addCleanedFile( + fileNum, databases, reader.getFirstVLSN(), reader.getLastVLSN(), + budget); + + return true; + } + + /** + * Processes the first LN in the look ahead cache and removes it from the + * cache. While the BIN is latched, look through the BIN for other LNs in + * the cache; if any match, process them to avoid a tree search later. + */ + private void processLN( + final Long fileNum, + final TreeLocation location, + final LookAheadCache lookAheadCache, + final Map dbCache, + final Set checkPendingDbSet) { + + /* Get the first LN from the queue. */ + final Long offset = lookAheadCache.nextOffset(); + final LNInfo info = lookAheadCache.remove(offset); + + final LN lnFromLog = info.getLN(); + final byte[] keyFromLog = info.getKey(); + final long logLsn = DbLsn.makeLsn(fileNum, offset); + + /* + * Refresh the DB before processing an LN, in case the DB has been + * deleted since it was added to the lookAheadCache. If the DB has + * been deleted, perform the housekeeping tasks for an obsolete LN. + * + * Normally the DB will already be present (and non-deleted) in the + * dbCache. But because of the lookAheadCache and the periodic clearing + * of the dbCache in processFile, it's possible for a DB to be deleted + * in between placing it in the lookAheadCache and the call to + * processLN. [#22202] + */ + final DatabaseId dbId = info.getDbId(); + + final DatabaseImpl db = envImpl.getDbTree().getDb( + dbId, cleaner.lockTimeout, dbCache); + + if (db == null || db.isDeleted()) { + nLNsObsoleteThisRun++; + if (checkPendingDbSet != null) { + checkPendingDbSet.add(dbId); + } + return; + } + + nLNsCleanedThisRun++; + + /* Status variables are used to generate debug tracing info. */ + boolean processedHere = true; // The LN was cleaned here. + boolean obsolete = false; // The LN is no longer in use. + boolean completed = false; // This method completed. + + BIN bin = null; + Map pendingLNs = null; + + try { + final Tree tree = db.getTree(); + assert tree != null; + + /* Find parent of this LN. */ + final boolean parentFound = tree.getParentBINForChildLN( + location, keyFromLog, false /*splitsAllowed*/, + false /*blindDeltaOps*/, Cleaner.UPDATE_GENERATION); + + bin = location.bin; + final int index = location.index; + + if (!parentFound) { + + nLNsDeadThisRun++; + obsolete = true; + completed = true; + return; + } + + /* + * Now we're at the BIN parent for this LN. If knownDeleted, LN is + * deleted and can be purged. + */ + if (bin.isEntryKnownDeleted(index)) { + nLNsDeadThisRun++; + obsolete = true; + completed = true; + return; + } + + /* Process this LN that was found in the tree. */ + processedHere = false; + + LNInfo pendingLN = + processFoundLN(info, logLsn, bin.getLsn(index), bin, index); + + if (pendingLN != null) { + pendingLNs = new HashMap<>(); + pendingLNs.put(logLsn, pendingLN); + } + + completed = true; + + /* + * For all other non-deleted LNs in this BIN, lookup their LSN + * in the LN queue and process any matches. + */ + for (int i = 0; i < bin.getNEntries(); i += 1) { + + final long binLsn = bin.getLsn(i); + + if (i != index && + !bin.isEntryKnownDeleted(i) && + !bin.isEntryPendingDeleted(i) && + DbLsn.getFileNumber(binLsn) == fileNum) { + + final Long myOffset = DbLsn.getFileOffset(binLsn); + final LNInfo myInfo = lookAheadCache.remove(myOffset); + + /* If the offset is in the cache, it's a match. */ + if (myInfo != null) { + nLNQueueHitsThisRun++; + nLNsCleanedThisRun++; + + pendingLN = + processFoundLN(myInfo, binLsn, binLsn, bin, i); + + if (pendingLN != null) { + if (pendingLNs == null) { + pendingLNs = new HashMap<>(); + } + pendingLNs.put(binLsn, pendingLN); + } + } + } + } + } finally { + if (bin != null) { + bin.releaseLatch(); + } + + /* BIN must not be latched when synchronizing on FileSelector. */ + if (pendingLNs != null) { + for (Map.Entry entry : pendingLNs.entrySet()) { + fileSelector.addPendingLN( + entry.getKey(), entry.getValue()); + } + } + + if (processedHere) { + cleaner.logFine(Cleaner.CLEAN_LN, lnFromLog, logLsn, + completed, obsolete, false /*migrated*/); + } + } + } + + /** + * Processes an LN that was found in the tree. Lock the LN's LSN and + * then migrates the LN, if the LSN of the LN log entry is the active LSN + * in the tree. + * + * @param info identifies the LN log entry. + * + * @param logLsn is the LSN of the log entry. + * + * @param treeLsn is the LSN found in the tree. + * + * @param bin is the BIN found in the tree; is latched on method entry and + * exit. + * + * @param index is the BIN index found in the tree. + * + * @return a non-null LNInfo if it should be added to the pending LN list, + * after releasing the BIN latch. + */ + private LNInfo processFoundLN( + final LNInfo info, + final long logLsn, + final long treeLsn, + final BIN bin, + final int index) { + + final LN lnFromLog = info.getLN(); + final byte[] key = info.getKey(); + + final DatabaseImpl db = bin.getDatabase(); + final boolean isTemporary = db.isTemporary(); + + /* Status variables are used to generate debug tracing info. */ + boolean obsolete = false; // The LN is no longer in use. + boolean migrated = false; // The LN was in use and is migrated. + boolean completed = false; // This method completed. + + BasicLocker locker = null; + try { + final Tree tree = db.getTree(); + assert tree != null; + + /* + * Before migrating an LN, we must lock it and then check to see + * whether it is obsolete or active. + * + * 1. If the LSN in the tree and in the log are the same, we will + * attempt to migrate it. + * + * 2. If the LSN in the tree is < the LSN in the log, the log entry + * is obsolete, because this LN has been rolled back to a previous + * version by a txn that aborted. + * + * 3. If the LSN in the tree is > the LSN in the log, the log entry + * is obsolete, because the LN was advanced forward by some + * now-committed txn. + * + * 4. If the LSN in the tree is a null LSN, the log entry is + * obsolete. A slot can only have a null LSN if the record has + * never been written to disk in a deferred write database, and + * in that case the log entry must be for a past, deleted version + * of that record. + */ + if (lnFromLog.isDeleted() && + treeLsn == logLsn && + fileLogVersion <= 2) { + + /* + * SR 14583: After JE 2.0, deleted LNs are never found in the + * tree, since we can assume they're obsolete and correctly + * marked as such in the obsolete offset tracking. JE 1.7.1 and + * earlier did not use the pending deleted bit, so deleted LNs + * may still be reachable through their BIN parents. + */ + obsolete = true; + nLNsDeadThisRun++; + bin.setPendingDeleted(index); + completed = true; + return null; + } + + if (treeLsn == DbLsn.NULL_LSN) { + + /* + * Case 4: The LN in the tree is a never-written LN for a + * deferred-write db, so the LN in the file is obsolete. + */ + nLNsDeadThisRun++; + obsolete = true; + completed = true; + return null; + } + + if (treeLsn != logLsn && isTemporary) { + + /* + * Temporary databases are always non-transactional. If the + * tree and log LSNs are different then we know that the logLsn + * is obsolete. Even if the LN is locked, the tree cannot be + * restored to the logLsn because no abort is possible without + * a transaction. We should consider a similar optimization in + * the future for non-transactional durable databases. + */ + nLNsDeadThisRun++; + obsolete = true; + completed = true; + return null; + } + + if (!isTemporary) { + + /* + * Get a lock on the LN if we will migrate it now. (Temporary + * DB LNs are dirtied below and migrated later.) + * + * We can hold the latch on the BIN since we always attempt to + * acquire a non-blocking read lock. + */ + locker = BasicLocker.createBasicLocker(envImpl, false /*noWait*/); + /* Don't allow this short-lived lock to be preempted/stolen. */ + locker.setPreemptable(false); + final LockResult lockRet = locker.nonBlockingLock( + treeLsn, LockType.READ, false /*jumpAheadOfWaiters*/, db); + + if (lockRet.getLockGrant() == LockGrantType.DENIED) { + + /* + * LN is currently locked by another Locker, so we can't + * assume anything about the value of the LSN in the bin. + */ + nLNsLockedThisRun++; + completed = true; + + return new LNInfo( + null /*LN*/, db.getId(), key, + info.getExpirationTime()); + } + + if (treeLsn != logLsn) { + /* The LN is obsolete and can be purged. */ + nLNsDeadThisRun++; + obsolete = true; + completed = true; + return null; + } + } + + /* + * The LN must be migrated because it is not obsolete, the lock was + * not denied, and treeLsn==logLsn. + */ + assert !obsolete; + assert treeLsn == logLsn; + + if (bin.isEmbeddedLN(index)) { + throw EnvironmentFailureException.unexpectedState( + envImpl, + "LN is embedded although its associated logrec (at " + + logLsn + " does not have the embedded flag on"); + } + + /* + * For active LNs in non-temporary DBs, migrate the LN now. + * In this case we acquired a lock on the LN above. + * + * If the LN is not resident, populate it using the LN we read + * from the log so it does not have to be fetched. We must + * call postFetchInit to initialize MapLNs that have not been + * fully initialized yet [#13191]. When explicitly migrating + * (for a non-temporary DB) we will evict the LN after logging. + * + * Note that we do not load LNs from the off-heap cache here + * because it's unnecessary. We have the current LN in hand + * (from the log) and the off-heap cache does not hold dirty + * LNs, so the IN in hand is identical to the off-heap LN. + * + * MapLNs must be logged by DbTree.modifyDbRoot (the Tree root + * latch must be held) [#23492]. Here we simply dirty it via + * setDirty, which ensures it will be logged during the next + * checkpoint. Delaying until the next checkpoint also allows + * for write absorption, since MapLNs are often logged every + * checkpoint due to utilization changes. + * + * For temporary databases, we wish to defer logging for as + * long as possible. Therefore, dirty the LN to ensure it is + * flushed before its parent is written. Because we do not + * attempt to lock temporary database LNs (see above) we know + * that if it is non-obsolete, the tree and log LSNs are equal. + * If the LN from the log was populated here, it will be left + * in place for logging at a later time. + * + * Also for temporary databases, make both the target LN and + * the BIN or IN parent dirty. Otherwise, when the BIN or IN is + * evicted in the future, it will be written to disk without + * flushing its dirty, migrated LNs. [#18227] + */ + if (bin.getTarget(index) == null) { + lnFromLog.postFetchInit(db, logLsn); + /* Ensure keys are transactionally correct. [#15704] */ + bin.attachNode(index, lnFromLog, key /*lnSlotKey*/); + } + + if (db.getId().equals(DbTree.ID_DB_ID)) { + final MapLN targetLn = (MapLN) bin.getTarget(index); + assert targetLn != null; + targetLn.getDatabase().setDirty(); + + } else if (isTemporary) { + ((LN) bin.getTarget(index)).setDirty(); + bin.setDirty(true); + nLNsMarkedThisRun++; + + } else { + final LN targetLn = (LN) bin.getTarget(index); + assert targetLn != null; + + final LogItem logItem = targetLn.log( + envImpl, db, null /*locker*/, null /*writeLockInfo*/, + false /*newEmbeddedLN*/, bin.getKey(index), + bin.getExpiration(index), bin.isExpirationInHours(), + false /*newEmbeddedLN*/, logLsn, + bin.getLastLoggedSize(index), + false/*isInsertion*/, true /*backgroundIO*/, + Cleaner.getMigrationRepContext(targetLn)); + + bin.updateEntry( + index, logItem.lsn, targetLn.getVLSNSequence(), + logItem.size); + + /* Evict LN if we populated it with the log LN. */ + if (lnFromLog == targetLn) { + bin.evictLN(index); + } + + /* Lock new LSN on behalf of existing lockers. */ + CursorImpl.lockAfterLsnChange( + db, logLsn, logItem.lsn, locker /*excludeLocker*/); + + nLNsMigratedThisRun++; + } + + migrated = true; + completed = true; + return null; + + } finally { + if (locker != null) { + locker.operationEnd(); + } + + cleaner.logFine(Cleaner.CLEAN_LN, lnFromLog, logLsn, completed, + obsolete, migrated); + } + } + + /** + * If this OldBINDelta is still in use in the in-memory tree, dirty the + * associated BIN. The next checkpoint will log a new delta or a full + * version, which will make this delta obsolete. + * + * For OldBINDeltas, we do not optimize and must fetch the BIN if it is not + * resident. + */ + private void processOldBINDelta( + OldBINDelta deltaClone, + DatabaseImpl db, + long logLsn) { + + nBINDeltasCleanedThisRun++; + + /* + * Search Btree for the BIN associated with this delta. + */ + final byte[] searchKey = deltaClone.getSearchKey(); + + final BIN treeBin = db.getTree().search( + searchKey, Cleaner.UPDATE_GENERATION); + + if (treeBin == null) { + /* BIN for this delta is no longer in the tree. */ + nBINDeltasDeadThisRun++; + return; + } + + /* Tree BIN is non-null and latched. */ + try { + final long treeLsn = treeBin.getLastLoggedLsn(); + + if (treeLsn == DbLsn.NULL_LSN) { + /* Current version was never logged. */ + nBINDeltasDeadThisRun++; + return; + } + + final int cmp = DbLsn.compareTo(treeLsn, logLsn); + + if (cmp > 0) { + /* Log entry is obsolete. */ + nBINDeltasDeadThisRun++; + return; + } + + /* + * Log entry is same or newer than what's in the tree. Dirty the + * BIN and let the checkpoint write it out. There is no need to + * prohibit a delta when the BIN is next logged (as is done when + * migrating full INs) because logging a new delta will obsolete + * this delta. + */ + treeBin.setDirty(true); + nBINDeltasMigratedThisRun++; + + } finally { + treeBin.releaseLatch(); + } + } + + /** + * If this BIN-delta is still in use in the in-memory tree, dirty the + * associated BIN. The next checkpoint will log a new delta or a full + * version, which will make this delta obsolete. + * + * We optimize by placing the delta from the log into the tree when the + * BIN is not resident. + */ + private void processBINDelta( + BIN deltaClone, + DatabaseImpl db, + long logLsn) { + + nBINDeltasCleanedThisRun++; + + /* Search for the BIN's parent by level, to avoid fetching the BIN. */ + deltaClone.setDatabase(db); + deltaClone.latch(CacheMode.UNCHANGED); + + final SearchResult result = db.getTree().getParentINForChildIN( + deltaClone, true /*useTargetLevel*/, + true /*doFetch*/, CacheMode.UNCHANGED); + + try { + if (!result.exactParentFound) { + /* BIN for this delta is no longer in the tree. */ + nBINDeltasDeadThisRun++; + return; + } + + final long treeLsn = result.parent.getLsn(result.index); + if (treeLsn == DbLsn.NULL_LSN) { + /* Current version was never logged. */ + nBINDeltasDeadThisRun++; + return; + } + + /* + * If cmp is > 0 then log entry is obsolete because it is older + * than the version in the tree. + * + * If cmp is < 0 then log entry is also obsolete, because the old + * parent slot was deleted and we're now looking at a completely + * different IN due to the by-level search above. + */ + final int cmp = DbLsn.compareTo(treeLsn, logLsn); + if (cmp != 0) { + /* Log entry is obsolete. */ + nBINDeltasDeadThisRun++; + return; + } + + /* + * Log entry is the version that's in the tree. Dirty the BIN and + * let the checkpoint write it out. There is no need to prohibit a + * delta when the BIN is next logged (as is done when migrating + * full BINs) because logging a new delta will obsolete this delta. + */ + BIN treeBin = (BIN) result.parent.loadIN( + result.index, CacheMode.UNCHANGED); + + if (treeBin == null) { + /* Place delta from log into tree to avoid fetching. */ + treeBin = deltaClone; + treeBin.latchNoUpdateLRU(db); + + treeBin.postFetchInit(db, logLsn); + + result.parent.attachNode( + result.index, treeBin, null /*lnSlotKey*/); + } else { + treeBin.latch(CacheMode.UNCHANGED); + } + + /* + * Compress to reclaim space for expired slots, including dirty + * slots. However, if treeBin is a BIN-delta, this does nothing. + */ + envImpl.lazyCompress(treeBin, true /*compressDirtySlots*/); + + treeBin.setDirty(true); + treeBin.releaseLatch(); + + nBINDeltasMigratedThisRun++; + + } finally { + if (result.parent != null) { + result.parent.releaseLatch(); + } + } + } + + /** + * If an IN is still in use in the in-memory tree, dirty it. The checkpoint + * invoked at the end of the cleaning run will end up rewriting it. + */ + private void processIN( + IN inClone, + DatabaseImpl db, + long logLsn) { + + boolean obsolete = false; + boolean dirtied = false; + boolean completed = false; + + try { + nINsCleanedThisRun++; + + Tree tree = db.getTree(); + assert tree != null; + + IN inInTree = findINInTree(tree, db, inClone, logLsn); + + if (inInTree == null) { + /* IN is no longer in the tree. Do nothing. */ + nINsDeadThisRun++; + obsolete = true; + } else { + + /* + * IN is still in the tree. Dirty it. Checkpoint or eviction + * will write it out. + * + * Prohibit the next delta, since the original version must be + * made obsolete. + * + * Compress to reclaim space for expired slots, including dirty + * slots. + */ + nINsMigratedThisRun++; + inInTree.setDirty(true); + inInTree.setProhibitNextDelta(true); + envImpl.lazyCompress(inInTree, true /*compressDirtySlots*/); + inInTree.releaseLatch(); + dirtied = true; + } + + completed = true; + } finally { + cleaner.logFine(Cleaner.CLEAN_IN, inClone, logLsn, completed, + obsolete, dirtied); + } + } + + /** + * Given a clone of an IN that has been taken out of the log, try to find + * it in the tree and verify that it is the current one in the log. + * Returns the node in the tree if it is found and it is current re: LSN's. + * Otherwise returns null if the clone is not found in the tree or it's not + * the latest version. Caller is responsible for unlatching the returned + * IN. + */ + private IN findINInTree( + Tree tree, + DatabaseImpl db, + IN inClone, + long logLsn) { + + /* Check if inClone is the root. */ + if (inClone.isRoot()) { + IN rootIN = isRoot(tree, db, inClone, logLsn); + if (rootIN == null) { + + /* + * inClone is a root, but no longer in use. Return now, because + * a call to tree.getParentNode will return something + * unexpected since it will try to find a parent. + */ + return null; + } else { + return rootIN; + } + } + + /* It's not the root. Can we find it, and if so, is it current? */ + inClone.latch(Cleaner.UPDATE_GENERATION); + SearchResult result = null; + try { + result = tree.getParentINForChildIN( + inClone, true /*useTargetLevel*/, + true /*doFetch*/, Cleaner.UPDATE_GENERATION); + + if (!result.exactParentFound) { + return null; + } + + /* Note that treeLsn may be for a BIN-delta, see below. */ + IN parent = result.parent; + long treeLsn = parent.getLsn(result.index); + + /* + * The IN in the tree is a never-written IN for a DW db so the IN + * in the file is obsolete. [#15588] + */ + if (treeLsn == DbLsn.NULL_LSN) { + return null; + } + + /* + * If tree and log LSNs are equal, then we've found the exact IN we + * read from the log. We know the treeLsn is not for a BIN-delta, + * because it is equal to LSN of the IN (or BIN) we read from the + * log. To avoid a fetch, we can place the inClone in the tree if + * it is not already resident, or use the inClone to mutate the + * delta in the tree to a full BIN. + */ + if (treeLsn == logLsn) { + IN in = parent.loadIN(result.index, Cleaner.UPDATE_GENERATION); + + if (in != null) { + + in.latch(Cleaner.UPDATE_GENERATION); + + if (in.isBINDelta()) { + /* + * The BIN should be dirty here because the most + * recently written logrec for it is a full-version + * logrec. After that logrec was written, the BIN + * was dirtied again, and then mutated to a delta. + * So this delta should still be dirty. + */ + assert(in.getDirty()); + + /* + * Since we want to clean the inClone full version of + * the bin, we must mutate the cached delta to a full + * BIN so that the next logrec for this BIN can be a + * full-version logrec. + */ + final BIN bin = (BIN) in; + bin.mutateToFullBIN( + (BIN) inClone, false /*leaveFreeSlot*/); + } + } else { + in = inClone; + + /* + * Latch before calling postFetchInit and attachNode to + * make those operations atomic. Must use latchNoUpdateLRU + * before the node is attached. + */ + in.latchNoUpdateLRU(db); + in.postFetchInit(db, logLsn); + parent.attachNode(result.index, in, null /*lnSlotKey*/); + } + + return in; + } + + if (inClone.isUpperIN()) { + /* No need to deal with BIN-deltas. */ + return null; + } + + /* + * If the tree and log LSNs are unequal, then we must get the full + * version LSN in case the tree LSN is actually for a BIN-delta. + * The only way to do that is to fetch the IN in the tree; however, + * we only need the delta not the full BIN. + */ + final BIN bin = + (BIN) parent.fetchIN(result.index, Cleaner.UPDATE_GENERATION); + + treeLsn = bin.getLastFullLsn(); + + /* Now compare LSNs, since we know treeLsn is the full version. */ + final int compareVal = DbLsn.compareTo(treeLsn, logLsn); + + /* + * If cmp is > 0 then log entry is obsolete because it is older + * than the version in the tree. + * + * If cmp is < 0 then log entry is also obsolete, because the old + * parent slot was deleted and we're now looking at a completely + * different IN due to the by-level search above. + */ + if (compareVal != 0) { + return null; + } + + /* + * Log entry is the full version associated with the BIN-delta + * that's in the tree. To avoid a fetch, we can use the inClone to + * mutate the delta in the tree to a full BIN. + */ + bin.latch(Cleaner.UPDATE_GENERATION); + if (bin.isBINDelta()) { + bin.mutateToFullBIN((BIN) inClone, false /*leaveFreeSlot*/); + } + + return bin; + + } finally { + if (result != null && result.exactParentFound) { + result.parent.releaseLatch(); + } + } + } + + /** + * Get the current root in the tree, or null if the inClone is not the + * current root. + */ + private static class RootDoWork implements WithRootLatched { + private final DatabaseImpl db; + private final IN inClone; + private final long logLsn; + + RootDoWork(DatabaseImpl db, IN inClone, long logLsn) { + this.db = db; + this.inClone = inClone; + this.logLsn = logLsn; + } + + public IN doWork(ChildReference root) { + + if (root == null || + (root.getLsn() == DbLsn.NULL_LSN) || // deferred write root + (((IN) root.fetchTarget(db, null)).getNodeId() != + inClone.getNodeId())) { + return null; + } + + /* + * A root LSN less than the log LSN must be an artifact of when we + * didn't properly propagate the logging of the rootIN up to the + * root ChildReference. We still do this for compatibility with + * old log versions but may be able to remove it in the future. + */ + if (DbLsn.compareTo(root.getLsn(), logLsn) <= 0) { + IN rootIN = (IN) root.fetchTarget(db, null); + rootIN.latch(Cleaner.UPDATE_GENERATION); + return rootIN; + } else { + return null; + } + } + } + + /** + * Check if the cloned IN is the same node as the root in tree. Return the + * real root if it is, null otherwise. If non-null is returned, the + * returned IN (the root) is latched -- caller is responsible for + * unlatching it. + */ + private IN isRoot(Tree tree, DatabaseImpl db, IN inClone, long lsn) { + RootDoWork rdw = new RootDoWork(db, inClone, lsn); + return tree.withRootLatchedShared(rdw); + } + + /** + * Reset per-run counters. + */ + private void resetPerRunCounters() { + nINsObsoleteThisRun = 0; + nINsCleanedThisRun = 0; + nINsDeadThisRun = 0; + nINsMigratedThisRun = 0; + nBINDeltasObsoleteThisRun = 0; + nBINDeltasCleanedThisRun = 0; + nBINDeltasDeadThisRun = 0; + nBINDeltasMigratedThisRun = 0; + nLNsObsoleteThisRun = 0; + nLNsExpiredThisRun = 0; + nLNsCleanedThisRun = 0; + nLNsDeadThisRun = 0; + nLNsMigratedThisRun = 0; + nLNsMarkedThisRun = 0; + nLNQueueHitsThisRun = 0; + nLNsLockedThisRun = 0; + nEntriesReadThisRun = 0; + nRepeatIteratorReadsThisRun = 0; + } + + /** + * Add per-run counters to total counters. + */ + private void accumulatePerRunCounters() { + cleaner.nINsObsolete.add(nINsObsoleteThisRun); + cleaner.nINsCleaned.add(nINsCleanedThisRun); + cleaner.nINsDead.add(nINsDeadThisRun); + cleaner.nINsMigrated.add(nINsMigratedThisRun); + cleaner.nBINDeltasObsolete.add(nBINDeltasObsoleteThisRun); + cleaner.nBINDeltasCleaned.add(nBINDeltasCleanedThisRun); + cleaner.nBINDeltasDead.add(nBINDeltasDeadThisRun); + cleaner.nBINDeltasMigrated.add(nBINDeltasMigratedThisRun); + cleaner.nLNsObsolete.add(nLNsObsoleteThisRun); + cleaner.nLNsExpired.add(nLNsExpiredThisRun); + cleaner.nLNsCleaned.add(nLNsCleanedThisRun); + cleaner.nLNsDead.add(nLNsDeadThisRun); + cleaner.nLNsMigrated.add(nLNsMigratedThisRun); + cleaner.nLNsMarked.add(nLNsMarkedThisRun); + cleaner.nLNQueueHits.add(nLNQueueHitsThisRun); + cleaner.nLNsLocked.add(nLNsLockedThisRun); + cleaner.nRepeatIteratorReads.add(nRepeatIteratorReadsThisRun); + } + + /** + * A cache of LNInfo by LSN offset. Used to hold a set of LNs that are + * to be processed. Keeps track of memory used, and when full (over + * budget) the next offset should be queried and removed. + */ + private static class LookAheadCache { + + private final SortedMap map; + private final int maxMem; + private int usedMem; + + LookAheadCache(int lookAheadCacheSize) { + map = new TreeMap<>(); + maxMem = lookAheadCacheSize; + usedMem = MemoryBudget.TREEMAP_OVERHEAD; + } + + boolean isEmpty() { + return map.isEmpty(); + } + + boolean isFull() { + return usedMem >= maxMem; + } + + Long nextOffset() { + return map.firstKey(); + } + + void add(Long lsnOffset, LNInfo info) { + map.put(lsnOffset, info); + usedMem += info.getMemorySize(); + usedMem += MemoryBudget.TREEMAP_ENTRY_OVERHEAD; + } + + LNInfo remove(Long offset) { + LNInfo info = map.remove(offset); + if (info != null) { + usedMem -= info.getMemorySize(); + usedMem -= MemoryBudget.TREEMAP_ENTRY_OVERHEAD; + } + return info; + } + } +} diff --git a/src/com/sleepycat/je/cleaner/FileProtector.java b/src/com/sleepycat/je/cleaner/FileProtector.java new file mode 100644 index 0000000..b98f1ec --- /dev/null +++ b/src/com/sleepycat/je/cleaner/FileProtector.java @@ -0,0 +1,893 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.VLSN; + +/** + * The FileProtector is primarily responsible for protecting files from being + * deleted due to log cleaning, when they are needed for other purposes. As + * such it is a gatekeeper for reading files. In addition it maintains certain + * metadata: + * - the size of each file, which is needed for calculating disk usage; + * - the first and last VLSNs in each reserved file, which are needed for + * maintaining the VLSNIndex; + * - the total size of active and reserved files, for disk usage statistics. + * + * All files are in three categories: + * + Active: readable by all. The minimum set of files needed to function. + * + Reserved: should be read only by feeders and low level utilities. + * They have been cleaned and will not become active again. They can be + * "condemned" and deleted if they are not being read. + * + Condemned: not readable and effectively invisible. They will be deleted + * ASAP. They will not become active or reserved again. A file is typically + * in the Condemned category for a very short time, while being deleted. + * + * Reserved files can be temporarily protected, i.e., prevented from being + * condemned and deleted. Only reserved files can be condemned and deleted. All + * active files are implicitly protected, but are also protected explicitly by + * consumers because they may become reserved while being consumed. + * + * Consumers of the File Protection Service + * ---------------------------------------- + * + * + DiskOrderedScanner (DiskOrderedCursor and DatabaseCount) + * - Protects all currently active files. By currently active we mean active + * at the time they are protected. If they become reserved during the + * scan, they must continue to be protected. + * - Also protects any new files written during the scan. + * + * + DbBackup: + * - Protects-and-lists all currently active files, or a subset of the + * currently active files in the case of an incremental backup. + * - Provides API to remove files from protected set as they are copied, to + * allow file deletion. + * + * + NetworkRestore: + * - Protects-and-lists all currently active files using DbBackup. + * - Also protects the two newest reserved files at the time that the active + * files are protected. + * - Removes files from protected set as they are copied, to allow file + * deletion. + * + * + Syncup: + * - Protects all files (active and reserved) in an open ended range starting + * with the file of the VLSNIndex range start. Barren files (with no + * replicable entries) are not protected. + * + * + Feeder: + * - Protects all files (active and reserved) in an open ended range starting + * with the file of the current VLSN. Barren files (with no replicable + * entries) are not protected. + * - Advances lower bound of protected range as VLSN advances, to allow file + * deletion. + * + * + Cleaner: + * - Transforms active files into reserved files after cleaning them. + * - Condemns and deletes reserved files to honor disk limits. Truncates head + * of VLSNIndex when necessary to stay with disk thresholds. + * + * Syncup, Feeders and the VLSNIndex + * --------------------------------- + * During syncup, a ProtectedFileRange is used to protect files in the entire + * range of the VLSNIndex. Syncup must also prevent head truncation of the + * VLSNIndex itself because the file readers (used by both master and replica) + * use the VLSNIndex to position in the file at various times. + * + * A feeder file reader also protect all files from the current VLSN onward + * using a ProtectedFileRange. We rely on syncup to initialize the feeder's + * ProtectedFileRange safely, while the syncup ProtectedFileRange is in effect. + * The feeder reader will advance the lower bound of its ProtectedFileRange as + * it reads forward to allow files to be deleted. It also uses the VLNSIndex to + * skip over gaps in the file, although it is unclear whether this is really + * necessary. + * + * Therefore the syncup and feeder ProtectedFileRanges are special in that + * they also prevent head truncation of the VLSNIndex. + * + * The cleaner truncates the head of the VLSNIndex to allow deletion of files + * when necessary to stay within disk usage limits. This truncation must not + * be allowed when a syncup is in progress, and must not be allowed to remove + * the portion of the VLSN range used by a feeder. This is enforced using a + * special ProtectedFileRange (vlsnIndexRange) that protects the entire + * VLSNIndex range. The vlsnIndexRange is advanced when necessary to delete + * files that it protects, but only if those files are not protected by syncup + * or feeders. See {@link #checkVLSNIndexTruncation}, {@link + * com.sleepycat.je.rep.vlsn.VLSNTracker#tryTruncateFromHead} and {@link + * com.sleepycat.je.rep.vlsn.VLSNTracker#protectRangeHead}. + * + * We takes pains to avoid synchronizing on FileProtector while truncating the + * VLSNIndex head, which is a relatively expensive operation. (The + * FileProtector is meant to be used by multiple threads without a lot of + * blocking and should perform only a fairly small amount of work while + * synchronized.) The following approach is used to truncate the VLSNIndex head + * safely: + * + * -- To prevent disk usage limit violations, Cleaner.manageDiskUsage first + * tries to delete reserved files without truncating the VLSNIndex. If this + * is not sufficient, it then tries to truncate the VLSNIndex head. If the + * VLSNIndex head can be truncated, then it tries again to delete reserved + * files, since more files should then be unprotected. + * + * -- VLSNTracker synchronization is used to protect the VLSNIndex range. The + * vlsnIndexRange ProtectedFileRange is advanced only while synchronized on + * the VLSNTracker. + * + * -- VLSNTracker.tryTruncateFromHead (which is synchronized) calls + * FileProtector.checkVLSNIndexTruncation to determine where to truncate the + * index. Reserved files can be removed from the VLSNIndex range only if + * they are not protected by syncup and feeders. + * + * -- The VLSNTracker range is then truncated, and the vlsnIndexRange is + * advanced to allow file deletion, all while synchronized on the tracker. + * + * -- When a syncup starts, it adds a ProtectedFileRange with the same + * startFile as the vlsnIndexRange. This is done while synchronized on the + * VLSNTracker and it prevents the vlsnIndexRange from advancing during the + * syncup. + * + * -- When a syncup is successful, on the master the Feeder is initialized and + * it adds a ProtectedFileRange to protect the range of the VLSNIndex that + * it is reading. This is done BEFORE removing the ProtectedFileRange that + * was added at the start of the syncup. This guarantees that the files and + * VLSNIndex range used by the feeder will not be truncated/deleted. + * + * Note that the special vlsnIndexRange ProtectedFileRange is excluded from + * LogSizeStats to avoid confusion and because this ProtectedFileRange does not + * ultimately prevent VLSNIndex head truncation or file deletion. + * + * Barren Files + * ------------ + * Files with no replicable entries do not need to be protected by syncup or + * feeeders. See {@link #protectFileRange(String, long, boolean)}. Barren files + * may be created when cleaning is occurring but app writes are not, for + * example, when recovering from a cache size configuration error. In this + * situation it is important to delete the barren files to reclaim disk space. + * + * Such "barren" files are identified by having null begin/end VLSNs. The + * begin/end VLSNs for a file are part of the cleaner metadata that is + * collected when the cleaner processes a file. These VLSNs are for replicable + * entries only, not migrated entries that happen to contain a VLSN. + */ +public class FileProtector { + + /* Prefixes for ProtectedFileSet names. */ + public static final String BACKUP_NAME = "Backup"; + public static final String DATABASE_COUNT_NAME = "DatabaseCount"; + public static final String DISK_ORDERED_CURSOR_NAME = "DiskOrderedCursor"; + public static final String FEEDER_NAME = "Feeder"; + public static final String SYNCUP_NAME = "Syncup"; + public static final String VLSN_INDEX_NAME = "VLSNIndex"; + public static final String NETWORK_RESTORE_NAME = "NetworkRestore"; + + private static class ReservedFileInfo { + long size; + VLSN endVLSN; + + ReservedFileInfo(long size, VLSN endVLSN) { + this.size = size; + this.endVLSN = endVLSN; + } + } + + private final EnvironmentImpl envImpl; + + /* Access active files only via getActiveFiles. */ + private final NavigableMap activeFiles = new TreeMap<>(); + + private final NavigableMap reservedFiles = + new TreeMap<>(); + + private final NavigableMap condemnedFiles = new TreeMap<>(); + + private final Map protectedFileSets = + new HashMap<>(); + + /* Is null if the env is not replicated. */ + private ProtectedFileRange vlsnIndexRange; + + FileProtector(final EnvironmentImpl envImpl) { + this.envImpl = envImpl; + } + + private void addFileProtection(ProtectedFileSet pfs) { + + if (protectedFileSets.putIfAbsent(pfs.getName(), pfs) != null) { + + throw EnvironmentFailureException.unexpectedState( + "ProtectedFileSets already present name=" + pfs.getName()); + } + } + + /** + * Removes protection by the given ProtectedFileSet to allow files to be + * deleted. + */ + public synchronized void removeFileProtection(ProtectedFileSet pfs) { + + final ProtectedFileSet oldPfs = + protectedFileSets.remove(pfs.getName()); + + if (oldPfs == null) { + throw EnvironmentFailureException.unexpectedState( + "ProtectedFileSet not found name=" + pfs.getName()); + } + + if (oldPfs != pfs) { + throw EnvironmentFailureException.unexpectedState( + "ProtectedFileSet mismatch name=" + pfs.getName()); + } + } + + /** + * Calls {@link #protectFileRange(String, long, boolean)} passing false for + * protectVlsnIndex. + */ + public synchronized ProtectedFileRange protectFileRange( + final String name, + final long rangeStart) { + + return protectFileRange(name, rangeStart, false); + } + + /** + * Returns a ProtectedFileRange that protects files with numbers GTE a + * lower bound. The upper bound is open ended. The protectVlsnIndex param + * should be true for feeder/syncup file protection only. + * + * @param rangeStart is the first file to be protected in the range. + * + * @param protectVlsnIndex is whether to prevent the VLSNIndex head from + * advancing, which also implies that barren files (with no replicable + * entries) are not protected. + */ + public synchronized ProtectedFileRange protectFileRange( + final String name, + final long rangeStart, + final boolean protectVlsnIndex) { + + final ProtectedFileRange fileRange = + new ProtectedFileRange(name, rangeStart, protectVlsnIndex); + + addFileProtection(fileRange); + return fileRange; + } + + /** + * Calls {@link #protectActiveFiles(String, int, boolean)} passing 0 for + * nReservedFiles and true for protectNewFiles. + */ + public synchronized ProtectedActiveFileSet protectActiveFiles( + final String name) { + + return protectActiveFiles(name, 0, true); + } + + /** + * Returns a ProtectedActiveFileSet that protects all files currently + * active at the time of construction. These files are protected even if + * they later become reserved. Note that this does not include the last + * file at the time of construction. Additional files can also be + * protected -- see params. + * + * @param nReservedFiles if greater than zero, this number of the newest + * (highest numbered) reserved files are also protected. + * + * @param protectNewFiles if true, the last file and any new files created + * later are also protected. + */ + public synchronized ProtectedActiveFileSet protectActiveFiles( + final String name, + final int nReservedFiles, + final boolean protectNewFiles) { + + final NavigableMap activeFiles = getActiveFiles(); + + final NavigableSet protectedFiles = + new TreeSet<>(activeFiles.keySet()); + + if (nReservedFiles > 0) { + int n = nReservedFiles; + for (Long file : reservedFiles.descendingKeySet()) { + protectedFiles.add(file); + n -= 1; + if (n <= 0) { + break; + } + } + } + + final Long rangeStart = protectNewFiles ? + (protectedFiles.isEmpty() ? 0 : (protectedFiles.last() + 1)) : + null; + + final ProtectedActiveFileSet pfs = + new ProtectedActiveFileSet(name, protectedFiles, rangeStart); + + addFileProtection(pfs); + return pfs; + } + + /** + * Get new file info lazily to prevent synchronization and work in the CRUD + * code path when a new files is added. + */ + private synchronized NavigableMap getActiveFiles() { + + final FileManager fileManager = envImpl.getFileManager(); + + /* + * Add all existing files when the env is first opened (except for the + * last file -- see below). This is a relatively expensive but one-time + * initialization. + */ + if (activeFiles.isEmpty()) { + + final Long[] files = fileManager.getAllFileNumbers(); + + for (int i = 0; i < files.length - 1; i++) { + final long file = files[i]; + + final File fileObj = + new File(fileManager.getFullFileName(file)); + + activeFiles.put(file, fileObj.length()); + } + } + + /* + * Add new files that have appeared. This is very quick, because no + * synchronization is required to get the last file number. Do not + * add the last file, since its length may still be changing. + */ + final long lastFile = DbLsn.getFileNumber(fileManager.getNextLsn()); + + final long firstNewFile = activeFiles.isEmpty() ? + 0 : activeFiles.lastKey() + 1; + + for (long file = firstNewFile; file < lastFile; file += 1) { + + final File fileObj = + new File(fileManager.getFullFileName(file)); + + /* New files should be active before being reserved and deleted. */ + if (!fileObj.exists() && !envImpl.isMemOnly()) { + throw EnvironmentFailureException.unexpectedState( + "File 0x" + Long.toHexString(file) + + " lastFile=" + Long.toHexString(lastFile)); + } + + activeFiles.put(file, fileObj.length()); + } + + return activeFiles; + } + + /** + * Moves a file from active status to reserved status. + */ + synchronized void reserveFile(Long file, VLSN endVLSN) { + + final NavigableMap activeFiles = getActiveFiles(); + + final Long size = activeFiles.remove(file); + + if (size == null) { + throw EnvironmentFailureException.unexpectedState( + "Only active files (not the last file) may be" + + " cleaned/reserved file=0x" + Long.toHexString(file) + + " exists=" + envImpl.getFileManager().isFileValid(file) + + " reserved=" + reservedFiles.containsKey(file) + + " nextLsn=" + DbLsn.getNoFormatString( + envImpl.getFileManager().getNextLsn())); + } + + final ReservedFileInfo info = new ReservedFileInfo(size, endVLSN); + final ReservedFileInfo prevInfo = reservedFiles.put(file, info); + assert prevInfo == null; + } + + /** + * Returns the number of active files, including the last file. + */ + synchronized int getNActiveFiles() { + + final NavigableMap activeFiles = getActiveFiles(); + int count = getActiveFiles().size(); + + if (activeFiles.isEmpty() || + activeFiles.lastKey() < + envImpl.getFileManager().getCurrentFileNum()) { + count += 1; + } + + return count; + } + + /** + * Returns the number of reserved files. + */ + synchronized int getNReservedFiles() { + return reservedFiles.size(); + } + + /** + * Returns a copy of the reserved files along with the total size. + */ + public synchronized Pair> getReservedFileInfo() { + long size = 0; + for (final ReservedFileInfo info : reservedFiles.values()) { + size += info.size; + } + return new Pair<>(size, new TreeSet<>(reservedFiles.keySet())); + } + + /** + * Returns whether the given file is active, including the last file + * whether or not it has been created on disk yet. If false is returned, + * the file is reserved or deleted. + */ + synchronized boolean isActiveOrNewFile(Long file) { + + final NavigableMap activeFiles = getActiveFiles(); + + return activeFiles.isEmpty() || + file > activeFiles.lastKey() || + activeFiles.containsKey(file); + } + + /** + * Returns whether the given file is in the reserved file set. + */ + synchronized boolean isReservedFile(Long file) { + return reservedFiles.containsKey(file); + } + + /** + * Returns a previously condemned file or condemns the oldest unprotected + * reserved file and returns it. If the returned file cannot be deleted by + * the caller, {@link #putBackCondemnedFile} should be called so the file + * deletion can be retried later. + * + * @param fromFile the lowest file number to return. Used to iterate over + * reserved files that are protected. + * + * @return {file, size} pair or null if a condemned file is not available. + */ + synchronized Pair takeCondemnedFile(long fromFile) { + + if (!condemnedFiles.isEmpty()) { + final Long file = condemnedFiles.firstKey(); + final Long size = condemnedFiles.remove(file); + return new Pair<>(file, size); + } + + if (reservedFiles.isEmpty()) { + return null; + } + + fileLoop: + for (final Map.Entry entry : + reservedFiles.tailMap(fromFile).entrySet()) { + + final Long file = entry.getKey(); + final ReservedFileInfo info = entry.getValue(); + + for (final ProtectedFileSet pfs : protectedFileSets.values()) { + if (pfs.isProtected(file, info)) { + continue fileLoop; + } + } + + reservedFiles.remove(file); + return new Pair<>(file, info.size); + } + + return null; + } + + /** + * Puts back a condemned file after a file returned by {@link + * #takeCondemnedFile} could not be deleted. + */ + synchronized void putBackCondemnedFile(Long file, Long size) { + final Long oldSize = condemnedFiles.put(file, size); + assert oldSize == null; + } + + static class LogSizeStats { + final long activeSize; + final long reservedSize; + final long protectedSize; + final Map protectedSizeMap; + + LogSizeStats(final long activeSize, + final long reservedSize, + final long protectedSize, + final Map protectedSizeMap) { + this.activeSize = activeSize; + this.reservedSize = reservedSize; + this.protectedSize = protectedSize; + this.protectedSizeMap = protectedSizeMap; + } + } + + /** + * Returns sizes occupied by active, reserved and protected files. + */ + synchronized LogSizeStats getLogSizeStats() { + + /* Calculate active size. */ + final NavigableMap activeFiles = getActiveFiles(); + long activeSize = 0; + + for (final long size : activeFiles.values()) { + activeSize += size; + } + + /* Add size of last file, which is not included in activeFiles. */ + final long lastFileNum = activeFiles.isEmpty() ? + 0 : activeFiles.lastKey() + 1; + + final File lastFile = new File( + envImpl.getFileManager().getFullFileName(lastFileNum)); + + if (lastFile.exists()) { + activeSize += lastFile.length(); + } + + /* Calculate reserved and protected sizes. */ + long reservedSize = 0; + long protectedSize = 0; + final Map protectedSizeMap = new HashMap<>(); + + for (final Map.Entry entry : + reservedFiles.entrySet()) { + + final Long file = entry.getKey(); + final ReservedFileInfo info = entry.getValue(); + reservedSize += info.size; + boolean isProtected = false; + + for (final ProtectedFileSet pfs : protectedFileSets.values()) { + + if (pfs == vlsnIndexRange || !pfs.isProtected(file, info)) { + continue; + } + + isProtected = true; + + protectedSizeMap.compute( + pfs.getName(), + (k, v) -> ((v != null) ? v : 0) + info.size); + } + + if (isProtected) { + protectedSize += info.size; + } + } + + return new LogSizeStats( + activeSize, reservedSize, protectedSize, protectedSizeMap); + } + + /** + * Sets the ProtectedFileRange that protects files in VLSNIndex range + * from being deleted. The range start is changed during VLSNIndex + * initialization and when the head of the index is truncated. It is + * changed while synchronized on VLSNTracker so that changes to the + * range and changes to the files it protects are made atomically. This + * is important for + * {@link com.sleepycat.je.rep.vlsn.VLSNTracker#protectRangeHead}. + */ + public void setVLSNIndexProtectedFileRange(ProtectedFileRange pfs) { + vlsnIndexRange = pfs; + } + + /** + * Determines whether the VLSNIndex ProtectedFileRange should be advanced + * to reclaim bytesNeeded. This is possible if one or more reserved files + * are not protected by syncup and feeders. The range of files to be + * truncated must be at the head of the ordered set of reserved files, and + * the highest numbered file must contain a VLSN so we know where to + * truncate the VLSNIndex. + * + * @param bytesNeeded the number of bytes we need to free. + * + * @param preserveVLSN is the boundary above which the VLSN range may not + * advance. The deleteEnd returned will be less than preserveVLSN. + * + * @return {deleteEnd, deleteFileNum} pair if the protected file range + * should be advanced, or null if advancing is not currently possible. + * -- deleteEnd is the last VLSN to be truncated. + * -- deleteFileNum the file having deleteEnd as its last VLSN. + */ + public synchronized Pair checkVLSNIndexTruncation( + final long bytesNeeded, + final VLSN preserveVLSN) { + + /* + * Determine how many reserved files we need to delete, and find the + * last file/VLSN in that set of files, which is the truncation point. + */ + VLSN truncateVLSN = VLSN.NULL_VLSN; + long truncateFile = -1; + long deleteBytes = 0; + + fileLoop: + for (final Map.Entry entry : + reservedFiles.entrySet()) { + + final Long file = entry.getKey(); + final ReservedFileInfo info = entry.getValue(); + + for (final ProtectedFileSet pfs : protectedFileSets.values()) { + + if (pfs == vlsnIndexRange || !pfs.protectVlsnIndex) { + continue; + } + + if (pfs.isProtected(file, info)) { + break fileLoop; + } + } + + final VLSN lastVlsn = info.endVLSN; + + if (!lastVlsn.isNull()) { + if (lastVlsn.compareTo(preserveVLSN) > 0) { + break; + } + truncateVLSN = lastVlsn; + truncateFile = file; + } + + deleteBytes += info.size; + + if (deleteBytes >= bytesNeeded) { + break; + } + } + + return truncateVLSN.isNull() ? null : + new Pair<>(truncateVLSN, truncateFile); + } + + /** + * A ProtectedFileSet is used to prevent a set of files from being deleted. + * Implementations must meet two special requirements: + * + * 1. After a ProtectedFileSet is added using {@link #addFileProtection}, + * its set of protected files (the set for which {@link #isProtected} + * returns true) may only be changed by shrinking the set. Files may not be + * added to the set of protected files. (One exception is that newly + * created files are effectively to a file set defined as an opened ended + * range.) + * + * 2. Shrinking the protected set can be done without synchronization on + * FileProtector. However, implementations should ensure that changes made + * in one thread are visible to all threads. + * + * The intention is to allow protecting a set of files that are to be + * processed in some way, and allow easily shrinking this set as the files + * are processed, so that the processed files may be deleted. Changes to + * the protected set should be visible to all threads so that periodic disk + * space reclamation tasks can delete unprotected files ASAP. {@link + * ProtectedFileRange} is a simple class that meets these requirements. + */ + public static abstract class ProtectedFileSet { + + private final String name; + private final boolean protectVlsnIndex; + + private ProtectedFileSet(final String name, + final boolean protectVlsnIndex) { + this.name = name; + this.protectVlsnIndex = protectVlsnIndex; + } + + /** + * Identifies protecting entity, used in LogSizeStats. Must be unique + * across all file sets added to the FileProtector. + */ + private String getName() { + return name; + } + + /** + * Whether the given file is protected. + */ + abstract boolean isProtected(Long file, ReservedFileInfo info); + + @Override + public String toString() { + return "ProtectedFileSet:" + name; + } + } + + /** + * A ProtectedFileSet created using {@link #protectFileRange}. + * + * Protection may be removed dynamically to allow file deletion using + * {@link #advanceRange}. The current lower bound can be obtained using + * {@link #getRangeStart()}. + */ + public static class ProtectedFileRange extends ProtectedFileSet { + + private volatile long rangeStart; + private final boolean protectBarrenFiles; + + ProtectedFileRange( + final String name, + final long rangeStart, + final boolean protectVlsnIndex) { + + super(name, protectVlsnIndex); + this.rangeStart = rangeStart; + protectBarrenFiles = !protectVlsnIndex; + } + + @Override + boolean isProtected(final Long file, + final ReservedFileInfo info) { + + return file >= rangeStart && + (protectBarrenFiles || !info.endVLSN.isNull()); + } + + /** + * Returns the current rangeStart. This method is not synchronized and + * rangeStart is volatile to allow checking this value without + * blocking. + */ + public long getRangeStart() { + return rangeStart; + } + + /** + * Moves the lower bound of the protected file range forward. Used to + * allow file deletion as protected files are processed. + */ + public synchronized void advanceRange(final long rangeStart) { + + if (rangeStart < this.rangeStart) { + throw EnvironmentFailureException.unexpectedState( + "Attempted to advance to a new rangeStart=0x" + + Long.toHexString(rangeStart) + + " that precedes the old rangeStart=0x" + + Long.toHexString(this.rangeStart)); + } + + this.rangeStart = rangeStart; + } + } + + /** + * A ProtectedFileSet created using {@link #protectActiveFiles}. + * + * Protection may be removed dynamically to allow file deletion using + * {@link #truncateHead(long)}, {@link #truncateTail(long)} and + * {@link #removeFile(Long)}. A copy of the currently protected files can + * be obtained using {@link #getProtectedFiles()}. + */ + public static class ProtectedActiveFileSet extends ProtectedFileSet { + + private NavigableSet protectedFiles; + private Long rangeStart; + + ProtectedActiveFileSet( + final String name, + final NavigableSet protectedFiles, + final Long rangeStart) { + + super(name, false /*protectVlsnIndex*/); + this.protectedFiles = protectedFiles; + this.rangeStart = rangeStart; + } + + @Override + synchronized boolean isProtected(final Long file, + final ReservedFileInfo info) { + + return (rangeStart != null && file >= rangeStart) || + protectedFiles.contains(file); + } + + /** + * Returns a copy of the currently protected files, not including any + * new files. + */ + public synchronized NavigableSet getProtectedFiles() { + return new TreeSet<>(protectedFiles); + } + + /** + * Removes protection for files GT lastProtectedFile. Protection of + * new files is not impacted. + */ + public synchronized void truncateTail(long lastProtectedFile) { + protectedFiles = protectedFiles.headSet(lastProtectedFile, true); + } + + /** + * Removes protection for files LT firstProtectedFile. Protection of + * new files is not impacted. + */ + public synchronized void truncateHead(long firstProtectedFile) { + protectedFiles = protectedFiles.tailSet(firstProtectedFile, true); + } + + /** + * Removes protection for a given file. + */ + public synchronized void removeFile(final Long file) { + + protectedFiles.remove(file); + + /* + * This only works if protected files are removed in sequence, but + * that's good enough -- new files will rarely need to be deleted. + */ + if (file.equals(rangeStart)) { + rangeStart += 1; + } + } + } + + /** + * For debugging. + */ + @SuppressWarnings("unused") + synchronized void verifyFileSizes() { + final FileManager fm = envImpl.getFileManager(); + final Long[] numArray = fm.getAllFileNumbers(); + final NavigableMap activeFiles = getActiveFiles(); + for (int i = 0; i < numArray.length - 1; i++) { + final Long n = numArray[i]; + final long trueSize = new File(fm.getFullFileName(n)).length(); + if (activeFiles.containsKey(n)) { + final long activeSize = activeFiles.get(n); + if (activeSize != trueSize) { + System.out.format( + "active file %,d size %,d but true size %,d %n", + n, activeSize, trueSize); + } + } else if (reservedFiles.containsKey(n)) { + final long reservedSize = reservedFiles.get(n).size; + if (reservedSize != trueSize) { + System.out.format( + "reserved file %,d size %,d but true size %,d %n", + n, reservedSize, trueSize); + } + } else { + System.out.format( + "true file %x size %,d missing in FileProtector%n", + n, trueSize); + } + } + } +} diff --git a/src/com/sleepycat/je/cleaner/FileSelector.java b/src/com/sleepycat/je/cleaner/FileSelector.java new file mode 100644 index 0000000..e8d4c1a --- /dev/null +++ b/src/com/sleepycat/je/cleaner/FileSelector.java @@ -0,0 +1,624 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.TreeSet; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.VLSN; + +/** + * Keeps track of the status of files for which cleaning is in progress. + */ +public class FileSelector { + + /** + * Each file for which cleaning is in progress has one of the following + * status values. Files numbers migrate from one status to another, in + * the order declared below. + */ + enum FileStatus { + + /** + * A file's status is initially TO_BE_CLEANED when it is selected as + * part of a batch of files that, when deleted, will bring total + * utilization down to the minimum configured value. All files with + * this status will be cleaned in lowest-cost-to-clean order. For two + * files of equal cost to clean, the lower numbered (oldest) files is + * selected; this is why the fileInfoMap is sorted by key (file + * number). + */ + TO_BE_CLEANED, + + /** + * When a TO_BE_CLEANED file is selected for processing by + * FileProcessor, it is moved to the BEING_CLEANED status. This + * distinction is used to prevent a file from being processed by more + * than one thread. + */ + BEING_CLEANED, + + /** + * A file is moved to the CLEANED status when all its log entries have + * been read and processed. However, entries needing migration will be + * marked with the BIN entry MIGRATE flag, entries that could not be + * locked will be in the pending LN set, and the DBs that were pending + * deletion will be in the pending DB set. + */ + CLEANED, + + /** + * A file is moved to the CHECKPOINTED status at the end of a + * checkpoint if it was CLEANED at the beginning of the checkpoint. + * Because all dirty BINs are flushed during the checkpoints, no files + * in this set will have entries with the MIGRATE flag set. However, + * some entries may be in the pending LN set and some DBs may be in the + * pending DB set. + */ + CHECKPOINTED, + + /** + * A file is moved from the CHECKPOINTED status to the FULLY_PROCESSED + * status when the pending LN/DB sets become empty. Since a pending LN + * was not locked successfully, we don't know its original file. But + * we do know that when no pending LNs are present for any file, all + * log entries in CHECKPOINTED files are either obsolete or have been + * migrated. Note, however, that the parent BINs of the migrated + * entries may not have been logged yet. + * + * No special handling is required to coordinate syncing of deferred + * write databases for pending, deferred write LNs, because + * non-temporary deferred write DBs are always synced during + * checkpoints, and temporary deferred write DBs are not recovered. + * Note that although DW databases are non-txnal, their LNs may be + * pended because of lock collisions. + */ + FULLY_PROCESSED, + } + + /** + * Information about a file being cleaned. + */ + static class FileInfo { + private FileStatus status; + private int requiredUtil = -1; + + /* Per-file metadata. */ + Set dbIds; + VLSN firstVlsn = VLSN.NULL_VLSN; + VLSN lastVlsn = VLSN.NULL_VLSN; + + @Override + public String toString() { + return "status = " + status + + " dbIds = " + dbIds + + " firstVlsn = " + firstVlsn + + " lastVlsn = " + lastVlsn; + } + } + + /** + * Information about files being cleaned, keyed by file number. The map is + * sorted by file number to clean older files before newer files. + */ + private SortedMap fileInfoMap; + + /** + * Pending LN info, keyed by original LSN. These are LNs that could not be + * locked, either during processing or during migration. + */ + private Map pendingLNs; + + /** + * For processed entries with DBs that are pending deletion, we consider + * them to be obsolete but we store their DatabaseIds in a set. Until the + * DB deletion is complete, we can't delete the log files containing those + * entries. + */ + private Set pendingDBs; + + /** + * If during a checkpoint there are no pending LNs or DBs added, we can + * move CLEANED files directly to reserved status at the end of the + * checkpoint. This is an optimization that allows deleting files more + * quickly when possible. In particular this impacts the checkpoint + * during environment close, since no user operations are active during + * that checkpoint; this optimization allows us to delete all cleaned + * files after the final checkpoint. + */ + private boolean anyPendingDuringCheckpoint; + + FileSelector() { + fileInfoMap = new TreeMap<>(); + pendingLNs = new HashMap<>(); + pendingDBs = new HashSet<>(); + } + + /** + * Returns the best file that qualifies for cleaning, or null if no file + * qualifies. + * + * @param forceCleaning is true to always select a file, even if its + * utilization is above the minimum utilization threshold. + * + * @return {file number, required utilization for 2-pass cleaning}, + * or null if no file qualifies for cleaning. + */ + synchronized Pair selectFileForCleaning( + UtilizationCalculator calculator, + SortedMap fileSummaryMap, + boolean forceCleaning) { + + final Set toBeCleaned = getToBeCleanedFiles(); + + if (!toBeCleaned.isEmpty()) { + final Long fileNum = toBeCleaned.iterator().next(); + final FileInfo info = setStatus(fileNum, FileStatus.BEING_CLEANED); + return new Pair<>(fileNum, info.requiredUtil); + } + + final Pair result = calculator.getBestFile( + fileSummaryMap, forceCleaning); + + if (result == null) { + return null; + } + + final Long fileNum = result.first(); + final int requiredUtil = result.second(); + + assert !fileInfoMap.containsKey(fileNum); + + final FileInfo info = setStatus(fileNum, FileStatus.BEING_CLEANED); + info.requiredUtil = requiredUtil; + + return result; + } + + /** + * Returns the number of files having the given status. + */ + private synchronized int getNumberOfFiles(FileStatus status) { + int count = 0; + for (FileInfo info : fileInfoMap.values()) { + if (info.status == status) { + count += 1; + } + } + return count; + } + + /** + * Returns a sorted set of files having the given status. + */ + private synchronized NavigableSet getFiles(FileStatus status) { + final NavigableSet set = new TreeSet<>(); + for (Map.Entry entry : fileInfoMap.entrySet()) { + if (entry.getValue().status == status) { + set.add(entry.getKey()); + } + } + return set; + } + + /** + * Moves a file to a given status, adding the file to the fileInfoMap if + * necessary. + * + * This method must be called while synchronized. + */ + private FileInfo setStatus(Long fileNum, FileStatus newStatus) { + FileInfo info = fileInfoMap.get(fileNum); + if (info == null) { + info = new FileInfo(); + fileInfoMap.put(fileNum, info); + } + info.status = newStatus; + return info; + } + + /** + * Moves a collection of files to a given status, adding the files to the + * fileInfoMap if necessary. + * + * This method must be called while synchronized. + */ + private void setStatus(Collection files, FileStatus newStatus) { + for (Long fileNum : files) { + setStatus(fileNum, newStatus); + } + } + + /** + * Moves all files with oldStatus to newStatus. + * + * This method must be called while synchronized. + */ + private void setStatus(FileStatus oldStatus, FileStatus newStatus) { + for (FileInfo info : fileInfoMap.values()) { + if (info.status == oldStatus) { + info.status = newStatus; + } + } + } + + /** + * Asserts that a file has a given status. Should only be called under an + * assertion to avoid the overhead of the method call and synchronization. + * Always returns true to enable calling it under an assertion. + * + * This method must be called while synchronized. + */ + private boolean checkStatus(Long fileNum, FileStatus expectStatus) { + final FileInfo info = fileInfoMap.get(fileNum); + assert info != null : "Expected " + expectStatus + " but was missing"; + assert info.status == expectStatus : + "Expected " + expectStatus + " but was " + info.status; + return true; + } + + /** + * Calls checkStatus(Long, FileStatus) for a collection of files. + * + * This method must be called while synchronized. + */ + private boolean checkStatus(final Collection files, + final FileStatus expectStatus) { + for (Long fileNum : files) { + checkStatus(fileNum, expectStatus); + } + return true; + } + + /** + * Returns whether the file is in any stage of the cleaning process. + */ + private synchronized boolean isFileCleaningInProgress(Long fileNum) { + return fileInfoMap.containsKey(fileNum); + } + + synchronized int getRequiredUtil(Long fileNum) { + FileInfo info = fileInfoMap.get(fileNum); + return (info != null) ? info.requiredUtil : -1; + } + + /** + * Removes all references to a file. + */ + synchronized FileInfo removeFile(Long fileNum, MemoryBudget budget) { + FileInfo info = fileInfoMap.get(fileNum); + if (info == null) { + return null; + } + adjustMemoryBudget(budget, info.dbIds, null /*newDatabases*/); + fileInfoMap.remove(fileNum); + return info; + } + + /** + * When file cleaning is aborted, move the file back from BEING_CLEANED to + * TO_BE_CLEANED. + */ + synchronized void putBackFileForCleaning(Long fileNum) { + assert checkStatus(fileNum, FileStatus.BEING_CLEANED); + setStatus(fileNum, FileStatus.TO_BE_CLEANED); + } + + /** + * For unit testing. + */ + public synchronized void injectFileForCleaning(Long fileNum) { + if (!isFileCleaningInProgress(fileNum)) { + final FileInfo info = setStatus(fileNum, FileStatus.TO_BE_CLEANED); + info.requiredUtil = -1; + } + } + + /** + * When cleaning is complete, move the file from the BEING_CLEANED to + * CLEANED. + */ + synchronized void addCleanedFile(Long fileNum, + Set databases, + VLSN firstVlsn, + VLSN lastVlsn, + MemoryBudget budget) { + assert checkStatus(fileNum, FileStatus.BEING_CLEANED); + FileInfo info = setStatus(fileNum, FileStatus.CLEANED); + adjustMemoryBudget(budget, info.dbIds, databases); + info.dbIds = databases; + info.firstVlsn = firstVlsn; + info.lastVlsn = lastVlsn; + } + + /** + * Returns a read-only copy of TO_BE_CLEANED files that can be accessed + * without synchronization. + */ + synchronized Set getToBeCleanedFiles() { + return getFiles(FileStatus.TO_BE_CLEANED); + } + + /** + * Returns a copy of the CLEANED and FULLY_PROCESSED files at the time a + * checkpoint starts. + */ + synchronized CheckpointStartCleanerState getFilesAtCheckpointStart() { + + anyPendingDuringCheckpoint = + !pendingLNs.isEmpty() || + !pendingDBs.isEmpty(); + + return new CheckpointStartCleanerState( + getFiles(FileStatus.CLEANED), + getFiles(FileStatus.FULLY_PROCESSED)); + } + + /** + * Returns whether any files are cleaned or fully-processed, meaning that a + * checkpoint is needed before they can be deleted. + */ + public synchronized boolean isCheckpointNeeded() { + return getNumberOfFiles(FileStatus.CLEANED) > 0 || + getNumberOfFiles(FileStatus.FULLY_PROCESSED) > 0; + } + + /** + * When a checkpoint is complete, move the previously CLEANED and + * FULLY_PROCESSED files to the CHECKPOINTED and reserved status. + * Reserved files are removed from the FileSelector and their reserved + * status is maintained in FileProtector. + * + * @return map of {fileNum, FileInfo} for the files whose status was + * changed to reserved. + */ + synchronized Map updateFilesAtCheckpointEnd( + final EnvironmentImpl env, + final CheckpointStartCleanerState info) { + + if (info.isEmpty()) { + return Collections.emptyMap(); + } + + final Map reservedFiles = new HashMap<>(); + final Set previouslyCleanedFiles = info.getCleanedFiles(); + final Set previouslyProcessedFiles = + info.getFullyProcessedFiles(); + + if (previouslyCleanedFiles != null) { + + assert checkStatus(previouslyCleanedFiles, FileStatus.CLEANED); + + if (anyPendingDuringCheckpoint) { + setStatus(previouslyCleanedFiles, FileStatus.CHECKPOINTED); + } else { + makeReservedFiles(env, previouslyCleanedFiles, reservedFiles); + } + } + + if (previouslyProcessedFiles != null) { + + assert checkStatus(previouslyProcessedFiles, + FileStatus.FULLY_PROCESSED); + + makeReservedFiles(env, previouslyProcessedFiles, reservedFiles); + } + + updateProcessedFiles(); + + return reservedFiles; + } + + private void makeReservedFiles( + final EnvironmentImpl env, + final Set safeToDeleteFiles, + final Map reservedFiles) { + + final FileProtector fileProtector = env.getFileProtector(); + final MemoryBudget memoryBudget = env.getMemoryBudget(); + + for (Long file : safeToDeleteFiles) { + final FileInfo info = removeFile(file, memoryBudget); + fileProtector.reserveFile(file, info.lastVlsn); + reservedFiles.put(file, info); + } + + env.getUtilizationProfile().removeFileSummaries(safeToDeleteFiles); + } + + /** + * Adds the given LN info to the pending LN set. + */ + synchronized boolean addPendingLN(final long logLsn, final LNInfo info) { + + anyPendingDuringCheckpoint = true; + return pendingLNs.put(logLsn, info) != null; + } + + /** + * Returns a map of LNInfo for LNs that could not be migrated in a prior + * cleaning attempt, or null if no LNs are pending. + */ + synchronized Map getPendingLNs() { + + if (pendingLNs.size() > 0) { + return new HashMap<>(pendingLNs); + } else { + return null; + } + } + + /** + * Removes the LN for the given LSN from the pending LN set. + */ + synchronized void removePendingLN(long originalLsn) { + + pendingLNs.remove(originalLsn); + updateProcessedFiles(); + } + + /** + * Returns number of LNs pending. + */ + synchronized int getPendingLNQueueSize() { + return pendingLNs.size(); + } + + /** + * Adds the given DatabaseId to the pending DB set. + */ + synchronized boolean addPendingDB(DatabaseId dbId) { + + boolean added = pendingDBs.add(dbId); + + anyPendingDuringCheckpoint = true; + return added; + } + + /** + * Returns an array of DatabaseIds for DBs that were pending deletion in a + * prior cleaning attempt, or null if no DBs are pending. + */ + synchronized DatabaseId[] getPendingDBs() { + + if (pendingDBs.size() > 0) { + DatabaseId[] dbs = new DatabaseId[pendingDBs.size()]; + pendingDBs.toArray(dbs); + return dbs; + } else { + return null; + } + } + + /** + * Removes the DatabaseId from the pending DB set. + */ + synchronized void removePendingDB(DatabaseId dbId) { + + pendingDBs.remove(dbId); + updateProcessedFiles(); + } + + /** + * Returns a copy of the in-progress files, or an empty set if there are + * none. + */ + public synchronized NavigableSet getInProgressFiles() { + return new TreeSet<>(fileInfoMap.keySet()); + } + + /** + * Update memory budgets when the environment is closed and will never be + * accessed again. + */ + synchronized void close(MemoryBudget budget) { + for (FileInfo info : fileInfoMap.values()) { + adjustMemoryBudget(budget, info.dbIds, null /*newDatabases*/); + } + } + + /** + * If there are no pending LNs or DBs outstanding, move the CHECKPOINTED + * files to FULLY_PROCESSED. The check for pending LNs/DBs and the copying + * of the CHECKPOINTED files must be done atomically in a synchronized + * block. All methods that call this method are synchronized. + */ + private void updateProcessedFiles() { + if (pendingLNs.isEmpty() && pendingDBs.isEmpty()) { + setStatus(FileStatus.CHECKPOINTED, FileStatus.FULLY_PROCESSED); + } + } + + /** + * Adjust the memory budget when an entry is added to or removed from the + * cleanedFilesDatabases map. + */ + private void adjustMemoryBudget(MemoryBudget budget, + Set oldDatabases, + Set newDatabases) { + long adjustMem = 0; + if (oldDatabases != null) { + adjustMem -= getCleanedFilesDatabaseEntrySize(oldDatabases); + } + if (newDatabases != null) { + adjustMem += getCleanedFilesDatabaseEntrySize(newDatabases); + } + budget.updateAdminMemoryUsage(adjustMem); + } + + /** + * Returns the size of a HashMap entry that contains the given set of + * DatabaseIds. We don't count the DatabaseId size because it is likely + * that it is also stored (and budgeted) in the DatabaseImpl. + */ + private long getCleanedFilesDatabaseEntrySize(Set databases) { + return MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.HASHSET_OVERHEAD + + (databases.size() * MemoryBudget.HASHSET_ENTRY_OVERHEAD); + } + + /** + * Holds copy of all checkpoint-dependent cleaner state. + */ + public static class CheckpointStartCleanerState { + + /* A snapshot of the cleaner collections at the checkpoint start. */ + private Set cleanedFiles; + private Set fullyProcessedFiles; + + private CheckpointStartCleanerState(Set cleanedFiles, + Set fullyProcessedFiles) { + + /* + * Save snapshots of the collections of various files at the + * beginning of the checkpoint. + */ + this.cleanedFiles = cleanedFiles; + this.fullyProcessedFiles = fullyProcessedFiles; + } + + public boolean isEmpty() { + return ((cleanedFiles.size() == 0) && + (fullyProcessedFiles.size() == 0)); + } + + public Set getCleanedFiles() { + return cleanedFiles; + } + + public Set getFullyProcessedFiles() { + return fullyProcessedFiles; + } + } + + @Override + public synchronized String toString() { + return "files = " + fileInfoMap + + " pendingLNs = " + pendingLNs + + " pendingDBs = " + pendingDBs + + " anyPendingDuringCheckpoint = " + anyPendingDuringCheckpoint; + } +} diff --git a/src/com/sleepycat/je/cleaner/FileSummary.java b/src/com/sleepycat/je/cleaner/FileSummary.java new file mode 100644 index 0000000..73d0c28 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/FileSummary.java @@ -0,0 +1,452 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.entry.LNLogEntry; + +/** + * Per-file utilization counters. The UtilizationProfile stores a persistent + * map of file number to FileSummary. + */ +public class FileSummary implements Loggable, Cloneable { + + /* Persistent fields. */ + public int totalCount; // Total # of log entries + public int totalSize; // Total bytes in log file + public int totalINCount; // Number of IN log entries + public int totalINSize; // Byte size of IN log entries + public int totalLNCount; // Number of LN log entries + public int totalLNSize; // Byte size of LN log entries + public int maxLNSize; // Byte size of largest LN log entry + public int obsoleteINCount; // Number of obsolete IN log entries + public int obsoleteLNCount; // Number of obsolete LN log entries + public int obsoleteLNSize; // Byte size of obsolete LN log entries + public int obsoleteLNSizeCounted; // Number obsolete LNs with size counted + + /** + * Creates an empty summary. + */ + public FileSummary() { + } + + public FileSummary clone() { + try { + return (FileSummary) super.clone(); + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Returns whether this summary contains any non-zero totals. + */ + public boolean isEmpty() { + + return totalCount == 0 && + totalSize == 0 && + obsoleteINCount == 0 && + obsoleteLNCount == 0; + } + + /** + * Returns the approximate byte size of all obsolete LN entries, using the + * average LN size for LN sizes that were not counted. + */ + public int getObsoleteLNSize() { + + if (totalLNCount == 0) { + return 0; + } + + /* Normalize obsolete amounts to account for double-counting. */ + final int obsLNCount = Math.min(obsoleteLNCount, totalLNCount); + final int obsLNSize = Math.min(obsoleteLNSize, totalLNSize); + final int obsLNSizeCounted = Math.min(obsoleteLNSizeCounted, + obsLNCount); + + /* + * Use the tracked obsolete size for all entries for which the size was + * counted, plus the average size for all obsolete entries whose size + * was not counted. + */ + long obsSize = obsLNSize; + final int obsCountNotCounted = obsLNCount - obsLNSizeCounted; + if (obsCountNotCounted > 0) { + + /* + * When there are any obsolete LNs with sizes uncounted, we add an + * obsolete amount that is the product of the number of LNs + * uncounted and the average LN size. + */ + final float avgLNSizeNotCounted = getAvgObsoleteLNSizeNotCounted(); + if (!Float.isNaN(avgLNSizeNotCounted)) { + obsSize += (int) (obsCountNotCounted * avgLNSizeNotCounted); + } + } + + /* Don't return an impossibly large estimate. */ + return (obsSize > totalLNSize) ? totalLNSize : (int) obsSize; + } + + /** + * Returns the average size for LNs with sizes not counted, or NaN if + * there are no such LNs. + * + * In FileSummaryLN version 3 and greater the obsolete size is normally + * counted, but not in exceptional circumstances such as recovery. If it + * is not counted, obsoleteLNSizeCounted will be less than obsoleteLNCount. + * + * In log version 8 and greater, we don't count the size when the LN is not + * resident in cache during update/delete, and CLEANER_FETCH_OBSOLETE_SIZE + * is false (the default setting). + * + * We added maxLNSize in version 8 for use in estimating obsolete LN sizes. + * + * To compute the average LN size, we only consider the LNs (both obsolete + * and non-obsolete) for which the size has not been counted. This + * increases accuracy when counted and uncounted LN sizes are not uniform. + * An example is when large LNs are inserted and deleted. The size of the + * deleted LN log entry (which is small) is always counted, but the + * previous version (which has a large size) may not be counted. + */ + public float getAvgObsoleteLNSizeNotCounted() { + + /* Normalize obsolete amounts to account for double-counting. */ + final int obsLNCount = Math.min(obsoleteLNCount, totalLNCount); + final int obsLNSize = Math.min(obsoleteLNSize, totalLNSize); + final int obsLNSizeCounted = Math.min(obsoleteLNSizeCounted, + obsLNCount); + + final int obsCountNotCounted = obsLNCount - obsLNSizeCounted; + if (obsCountNotCounted <= 0) { + return Float.NaN; + } + + final int totalSizeNotCounted = totalLNSize - obsLNSize; + final int totalCountNotCounted = totalLNCount - obsLNSizeCounted; + + if (totalSizeNotCounted <= 0 || totalCountNotCounted <= 0) { + return Float.NaN; + } + + return totalSizeNotCounted / ((float) totalCountNotCounted); + } + + /** + * Returns the maximum possible obsolete LN size, using the maximum LN size + * for LN sizes that were not counted. + */ + public int getMaxObsoleteLNSize() { + + /* + * In log version 7 and earlier the maxLNSize is not available. It is + * safe to use getObsoleteLNSize in that case, because LSN locking was + * not used and the obsolete size was counted for updates and deletes. + */ + if (maxLNSize == 0) { + return getObsoleteLNSize(); + } + + if (totalLNCount == 0) { + return 0; + } + + /* Normalize obsolete amounts to account for double-counting. */ + final int obsLNCount = Math.min(obsoleteLNCount, totalLNCount); + final int obsLNSize = Math.min(obsoleteLNSize, totalLNSize); + final int obsLNSizeCounted = Math.min(obsoleteLNSizeCounted, + obsLNCount); + + /* + * Use the tracked obsolete size for all entries for which the size was + * counted, plus the maximum possible size for all obsolete entries + * whose size was not counted. + */ + long obsSize = obsLNSize; + final long obsCountNotCounted = obsLNCount - obsLNSizeCounted; + if (obsCountNotCounted > 0) { + + /* + * When there are any obsolete LNs with sizes uncounted, we add an + * obsolete amount that is the minimum of two values. Either value + * may be much higher than the true obsolete amount, but by taking + * their minimum we use a much more realistic obsolete amount. + * + * maxLNSizeNotCounted is the maximum obsolete not counted, based + * on the multiplying maxLNSize and the number of obsolete LNs not + * counted. + * + * maxObsSizeNotCounted is also an upper bound on the obsolete size + * not not counted. The (totalLNSize - obsLNSize) gives the amount + * non-obsolete plus the obsolete amount not counted. From this we + * subtract the minimum non-obsolete size, based on the minimum + * size of any LN. This leaves the maximum obsolete size not + * counted. + * + * Note that the mutiplication immediately below would overflow if + * type 'int' instead of 'long' were used for the operands. This + * was fixed in [#21106]. + */ + final long maxLNSizeNotCounted = obsCountNotCounted * maxLNSize; + + final long maxObsSizeNotCounted = totalLNSize - obsLNSize - + ((totalLNCount - obsLNCount) * LNLogEntry.MIN_LOG_SIZE); + + obsSize += Math.min(maxLNSizeNotCounted, maxObsSizeNotCounted); + } + + /* Don't return an impossibly large estimate. */ + return (obsSize > totalLNSize) ? totalLNSize : (int) obsSize; + } + + /** + * Returns the approximate byte size of all obsolete IN entries. + */ + public int getObsoleteINSize() { + + if (totalINCount == 0) { + return 0; + } + + /* Normalize obsolete amounts to account for double-counting. */ + final int obsINCount = Math.min(obsoleteINCount, totalINCount); + + /* Use average IN size to compute total. */ + final float size = totalINSize; + final float avgSizePerIN = size / totalINCount; + return (int) (obsINCount * avgSizePerIN); + } + + /** + * Returns an estimate of the total bytes that are obsolete, using + * getObsoleteLNSize instead of getMaxObsoleteLNSize. + */ + public int getObsoleteSize() { + return calculateObsoleteSize(getObsoleteLNSize()); + } + + /** + * Returns an estimate of the total bytes that are obsolete, using + * getMaxObsoleteLNSize instead of getObsoleteLNSize. + */ + public int getMaxObsoleteSize() { + return calculateObsoleteSize(getMaxObsoleteLNSize()); + } + + private int calculateObsoleteSize(int lnObsoleteSize) { + if (totalSize <= 0) { + return 0; + } + /* Leftover (non-IN non-LN) space is considered obsolete. */ + final int leftoverSize = totalSize - (totalINSize + totalLNSize); + + int obsoleteSize = lnObsoleteSize + + getObsoleteINSize() + + leftoverSize; + + /* + * Don't report more obsolete bytes than the total. We may + * calculate more than the total because of (intentional) + * double-counting during recovery. + */ + if (obsoleteSize > totalSize) { + obsoleteSize = totalSize; + } + return obsoleteSize; + } + + /** + * Returns the total number of entries counted. This value is guaranteed + * to increase whenever the tracking information about a file changes. It + * is used a key discriminator for FileSummaryLN records. + */ + int getEntriesCounted() { + return totalCount + obsoleteLNCount + obsoleteINCount; + } + + /** + * Calculates utilization percentage using average LN sizes. + */ + public int utilization() { + return utilization(getObsoleteSize(), totalSize); + } + + /** + * Calculates a utilization percentage. + */ + public static int utilization(long obsoleteSize, long totalSize) { + if (totalSize == 0) { + return 0; + } + return Math.round(((100.0F * (totalSize - obsoleteSize)) / totalSize)); + } + + /** + * Reset all totals to zero. + */ + public void reset() { + + totalCount = 0; + totalSize = 0; + totalINCount = 0; + totalINSize = 0; + totalLNCount = 0; + totalLNSize = 0; + maxLNSize = 0; + obsoleteINCount = 0; + obsoleteLNCount = 0; + obsoleteLNSize = 0; + obsoleteLNSizeCounted = 0; + } + + /** + * Add the totals of the given summary object to the totals of this object. + */ + public void add(FileSummary o) { + + totalCount += o.totalCount; + totalSize += o.totalSize; + totalINCount += o.totalINCount; + totalINSize += o.totalINSize; + totalLNCount += o.totalLNCount; + totalLNSize += o.totalLNSize; + if (maxLNSize < o.maxLNSize) { + maxLNSize = o.maxLNSize; + } + obsoleteINCount += o.obsoleteINCount; + obsoleteLNCount += o.obsoleteLNCount; + obsoleteLNSize += o.obsoleteLNSize; + obsoleteLNSizeCounted += o.obsoleteLNSizeCounted; + } + + public int getLogSize() { + + return 11 * LogUtils.getIntLogSize(); + } + + public void writeToLog(ByteBuffer buf) { + + LogUtils.writeInt(buf, totalCount); + LogUtils.writeInt(buf, totalSize); + LogUtils.writeInt(buf, totalINCount); + LogUtils.writeInt(buf, totalINSize); + LogUtils.writeInt(buf, totalLNCount); + LogUtils.writeInt(buf, totalLNSize); + LogUtils.writeInt(buf, maxLNSize); + LogUtils.writeInt(buf, obsoleteINCount); + LogUtils.writeInt(buf, obsoleteLNCount); + LogUtils.writeInt(buf, obsoleteLNSize); + LogUtils.writeInt(buf, obsoleteLNSizeCounted); + } + + public void readFromLog(ByteBuffer buf, int entryVersion) { + + totalCount = LogUtils.readInt(buf); + totalSize = LogUtils.readInt(buf); + totalINCount = LogUtils.readInt(buf); + totalINSize = LogUtils.readInt(buf); + totalLNCount = LogUtils.readInt(buf); + totalLNSize = LogUtils.readInt(buf); + if (entryVersion >= 8) { + maxLNSize = LogUtils.readInt(buf); + } + obsoleteINCount = LogUtils.readInt(buf); + if (obsoleteINCount == -1) { + + /* + * If INs were not counted in an older log file written by 1.5.3 or + * earlier, consider all INs to be obsolete. This causes the file + * to be cleaned, and then IN counting will be accurate. + */ + obsoleteINCount = totalINCount; + } + obsoleteLNCount = LogUtils.readInt(buf); + + /* + * obsoleteLNSize and obsoleteLNSizeCounted were added in FileSummaryLN + * version 3. + */ + if (entryVersion >= 3) { + obsoleteLNSize = LogUtils.readInt(buf); + obsoleteLNSizeCounted = LogUtils.readInt(buf); + } else { + obsoleteLNSize = 0; + obsoleteLNSizeCounted = 0; + } + } + + public void dumpLog(StringBuilder buf, boolean verbose) { + + buf.append("

        "); + } + + /** + * Never called. + */ + public long getTransactionId() { + return 0; + } + + /** + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + dumpLog(buf, true); + return buf.toString(); + } +} diff --git a/src/com/sleepycat/je/cleaner/FilesToMigrate.java b/src/com/sleepycat/je/cleaner/FilesToMigrate.java new file mode 100644 index 0000000..bb9d66b --- /dev/null +++ b/src/com/sleepycat/je/cleaner/FilesToMigrate.java @@ -0,0 +1,251 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.SortedMap; +import java.util.StringTokenizer; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; + +/** + * Iterator over files that should be migrated by cleaning them, even if + * they don't need to be cleaned for other reasons. + * + * Files are migrated either because they are named in the + * CLEANER_FORCE_CLEAN_FILES parameter or their log version is prior to the + * CLEANER_UPGRADE_TO_LOG_VERSION parameter. + * + * An iterator is used rather than finding the entire set at startup to + * avoid opening a large number of files to examine their log version. For + * example, if all files are being migrated in a very large data set, this + * would involve opening a very large number of files in order to read + * their header. This could significantly delay application startup. + * + * Because we don't have the entire set at startup, we can't select the + * lowest utilization file from the set to clean next. Inteaad we iterate + * in file number order to increase the odds of cleaning lower utilization + * files first. + */ +class FilesToMigrate { + + private final EnvironmentImpl env; + + /** + * An array of pairs of file numbers, where each pair is a range of + * files to be force cleaned. Index i is the from value and i+1 is the + * to value, both inclusive. + */ + private long[] forceCleanFiles; + + /** Log version to upgrade to, or zero if none. */ + private int upgradeToVersion; + + /** Whether to continue checking the log version. */ + private boolean checkLogVersion; + + /** Whether hasNext() has prepared a valid nextFile. */ + private boolean nextAvailable; + + /** File to return; set by hasNext() and returned by next(). */ + private long nextFile; + + FilesToMigrate(EnvironmentImpl env) { + this.env = env; + String forceCleanProp = env.getConfigManager().get + (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES); + parseForceCleanFiles(forceCleanProp); + + upgradeToVersion = env.getConfigManager().getInt + (EnvironmentParams.CLEANER_UPGRADE_TO_LOG_VERSION); + if (upgradeToVersion == -1) { + upgradeToVersion = LogEntryType.LOG_VERSION; + } + + checkLogVersion = (upgradeToVersion != 0); + nextAvailable = false; + nextFile = -1; + } + + /** + * Returns whether there are more files to be migrated. Must be called + * while synchronized on the UtilizationProfile. + */ + boolean hasNext(SortedMap fileSummaryMap, + Set inProgressFiles) { + if (nextAvailable) { + /* hasNext() has returned true since the last next(). */ + return true; + } + long foundFile = -1; + for (long file : + fileSummaryMap.tailMap(nextFile + 1).keySet()) { + if (inProgressFiles.contains(file)) { + continue; + } + if (isForceCleanFile(file)) { + /* Found a file to force clean. */ + foundFile = file; + break; + } else if (checkLogVersion) { + try { + int logVersion = + env.getFileManager().getFileLogVersion(file); + if (logVersion < upgradeToVersion) { + /* Found a file to migrate. */ + foundFile = file; + break; + } else { + + /* + * All following files have a log version greater + * or equal to this one; stop checking. + */ + checkLogVersion = false; + } + } catch (RuntimeException e) { + /* Throw exception but allow iterator to continue. */ + nextFile = file; + throw e; + } + } + } + if (foundFile != -1) { + nextFile = foundFile; + nextAvailable = true; + return true; + } else { + return false; + } + } + + /** + * Returns the next file file to be migrated. Must be called while + * synchronized on the UtilizationProfile. + */ + long next(SortedMap fileSummaryMap, + Set inProgressFiles) + throws NoSuchElementException { + + if (hasNext(fileSummaryMap, inProgressFiles)) { + nextAvailable = false; + return nextFile; + } else { + throw new NoSuchElementException(); + } + } + + /** + * Returns whether the given file is in the forceCleanFiles set. + */ + private boolean isForceCleanFile(long file) { + + if (forceCleanFiles != null) { + for (int i = 0; i < forceCleanFiles.length; i += 2) { + long from = forceCleanFiles[i]; + long to = forceCleanFiles[i + 1]; + if (file >= from && file <= to) { + return true; + } + } + } + return false; + } + + /** + * Parses the je.cleaner.forceCleanFiles property value and initializes + * the forceCleanFiles field. + * + * @throws IllegalArgumentException via Environment ctor and + * setMutableConfig. + */ + private void parseForceCleanFiles(String propValue) + throws IllegalArgumentException { + + if (propValue == null || propValue.length() == 0) { + forceCleanFiles = null; + } else { + String errPrefix = "Error in " + + EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName() + + "=" + propValue + ": "; + + StringTokenizer tokens = new StringTokenizer + (propValue, ",-", true /*returnDelims*/); + + /* Resulting list of Long file numbers. */ + List list = new ArrayList(); + + while (tokens.hasMoreTokens()) { + + /* Get "from" file number. */ + String fromStr = tokens.nextToken(); + long fromNum; + try { + fromNum = Long.parseLong(fromStr, 16); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + (errPrefix + "Invalid hex file number: " + + fromStr); + } + + long toNum = -1; + if (tokens.hasMoreTokens()) { + + /* Get delimiter. */ + String delim = tokens.nextToken(); + if (",".equals(delim)) { + toNum = fromNum; + } else if ("-".equals(delim)) { + + /* Get "to" file number." */ + if (tokens.hasMoreTokens()) { + String toStr = tokens.nextToken(); + try { + toNum = Long.parseLong(toStr, 16); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + (errPrefix + + "Invalid hex file number: " + + toStr); + } + } else { + throw new IllegalArgumentException + (errPrefix + "Expected file number: " + + delim); + } + } else { + throw new IllegalArgumentException + (errPrefix + "Expected '-' or ',': " + delim); + } + } else { + toNum = fromNum; + } + + assert toNum != -1; + list.add(Long.valueOf(fromNum)); + list.add(Long.valueOf(toNum)); + } + + forceCleanFiles = new long[list.size()]; + for (int i = 0; i < forceCleanFiles.length; i += 1) { + forceCleanFiles[i] = list.get(i).longValue(); + } + } + } +} diff --git a/src/com/sleepycat/je/cleaner/INSummary.java b/src/com/sleepycat/je/cleaner/INSummary.java new file mode 100644 index 0000000..28521b6 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/INSummary.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +/** + * Used to trace the relative numbers of full INs and BIN-deltas that are + * obsolete vs active. May be used in the future for adjusting utilization. + */ +public class INSummary { + public int totalINCount; + public int totalINSize; + public int totalBINDeltaCount; + public int totalBINDeltaSize; + public int obsoleteINCount; + public int obsoleteINSize; + public int obsoleteBINDeltaCount; + public int obsoleteBINDeltaSize; + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + + buf.append(""); + + return buf.toString(); + } +} diff --git a/src/com/sleepycat/je/cleaner/LNInfo.java b/src/com/sleepycat/je/cleaner/LNInfo.java new file mode 100644 index 0000000..a0e91a7 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/LNInfo.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.LN; + +/** + * The information necessary to lookup an LN. Used for pending LNs that are + * locked and must be migrated later, or cannot be migrated immediately during + * a split. Also used in a look ahead cache in FileProcessor. + * + * Is public for Sizeof only. + */ +public final class LNInfo { + + private final LN ln; + private final DatabaseId dbId; + private final byte[] key; + private final long expirationTime; + + public LNInfo(final LN ln, + final DatabaseId dbId, + final byte[] key, + final long expirationTime) { + this.ln = ln; + this.dbId = dbId; + this.key = key; + this.expirationTime = expirationTime; + } + + LN getLN() { + return ln; + } + + DatabaseId getDbId() { + return dbId; + } + + byte[] getKey() { + return key; + } + + long getExpirationTime() { + return expirationTime; + } + + /** + * Note that the dbId is not counted because it is shared with the + * DatabaseImpl, where it is accounted for in the memory budget. + */ + int getMemorySize() { + int size = MemoryBudget.LN_INFO_OVERHEAD; + if (ln != null) { + size += ln.getMemorySizeIncludedByParent(); + } + if (key != null) { + size += MemoryBudget.byteArraySize(key.length); + } + return size; + } +} diff --git a/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java b/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java new file mode 100644 index 0000000..eeca469 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/LocalUtilizationTracker.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.IdentityHashMap; +import java.util.Set; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; + +/** + * Accumulates changes to the utilization profile locally in a single thread. + * + *

        Per-database information is keyed by DatabaseImpl so that no tree lookup + * of a database is required (as when a DatabaseId is used).

        + * + *

        The countNewLogEntry, countObsoleteNode and countObsoleteNodeInexact + * methods may be called without taking the log write latch. Totals and offset + * are accumulated locally in this object only, not in DatabaseImpl + * objects.

        + * + *

        When finished with this object, its information should be added to the + * Environment's UtilizationTracker and DatabaseImpl objects by calling + * transferToUtilizationTracker under the log write latch. This is done in the + * Checkpointer, Evictor and INCompressor by calling + * UtilizationProfile.flushLocalTracker which calls + * LogManager.transferToUtilizationTracker which calls + * BaseLocalUtilizationTracker.transferToUtilizationTracker.

        + */ +public class LocalUtilizationTracker extends BaseLocalUtilizationTracker { + + public LocalUtilizationTracker(EnvironmentImpl env) { + super(env, new IdentityHashMap()); + } + + /** + * Counts the addition of all new log entries including LNs. + */ + public void countNewLogEntry(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countNew(lsn, db, type, size); + } + + /** + * Counts a node that has become obsolete and tracks the LSN offset, if + * non-zero, to avoid a lookup during cleaning. + * + *

        A zero LSN offset is used as a special value when obsolete offset + * tracking is not desired. [#15365] The file header entry (at offset + * zero) is never counted as obsolete, it is assumed to be obsolete by the + * cleaner.

        + * + *

        This method should only be called for LNs and INs (i.e, only for + * nodes). If type is null we assume it is an LN.

        + */ + public void countObsoleteNode(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countObsolete + (lsn, db, type, size, + true, // countPerFile + true, // countPerDb + true, // trackOffset + true); // checkDupOffsets + } + + /** + * Counts as countObsoleteNode does, but since the LSN may be inexact, does + * not track the obsolete LSN offset. + * + *

        This method should only be called for LNs and INs (i.e, only for + * nodes). If type is null we assume it is an LN.

        + */ + public void countObsoleteNodeInexact(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countObsolete + (lsn, db, type, size, + true, // countPerFile + true, // countPerDb + false, // trackOffset + false); // checkDupOffsets + } + + public Set getTrackedDbs() { + return getDatabaseMap().keySet(); + } + + /** + * Returns the DatabaseImpl from the database key, which in this case is + * the DatabaseImpl. + */ + @Override + DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey) { + return (DatabaseImpl) databaseKey; + } + + /** + * Do nothing, since DbTree.getDb was not called by + * databaseKeyToDatabaseImpl. + */ + @Override + void releaseDatabaseImpl(DatabaseImpl db) { + } +} diff --git a/src/com/sleepycat/je/cleaner/OffsetList.java b/src/com/sleepycat/je/cleaner/OffsetList.java new file mode 100644 index 0000000..95f792a --- /dev/null +++ b/src/com/sleepycat/je/cleaner/OffsetList.java @@ -0,0 +1,221 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * List of LSN offsets as a linked list of segments. The reasons for using a + * list of this type and not a java.util.List are: + *
          + *
        • Segements reduce memory overhead by storing long primitives rather than + * Long objects. Many longs per segment reduce link overhead.
        • + *
        • Memory is only allocated for new segments, reducing the number of calls + * to update the memory budget.
        • + *
        • This is an append-only list that supports a single appender thread and + * multiple unsynchronized reader threads. The caller is responsible for + * synchronizing such that only one thread calls add() at one time. The reader + * threads see data as it is changing but do not see inconsistent data (corrupt + * longs) and do not require synchronization for thread safety.
        • + *
        + * + *

        The algorithms here use traversal of the list segments rather than + * recursion to avoid using a lot of stack space.

        + */ +public class OffsetList { + + static final int SEGMENT_CAPACITY = 100; + + /* + * Once the size of the list goes over this limit, we should not assert + * (self-check) by doing sequential searches though the list. Assertions + * can't be too expensive or they interfere with normal operation. + */ + static final int TOO_BIG_FOR_SELF_CHECK = 100; + + private Segment head; + private Segment tail; + private int size; + + public OffsetList() { + head = new Segment(); + tail = head; + } + + /** + * Adds the given value and returns whether a new segment was allocated. + */ + public boolean add(long value, boolean checkDupOffsets) { + + /* Each value added should be unique. */ + assert !checkDupOffsets || + size > TOO_BIG_FOR_SELF_CHECK || + !contains(value) : + LoggerUtils.getStackTrace + (new Exception("Dup Offset " + Long.toHexString(value))); + + /* + * Do not increment the size until the value is added so that reader + * threads do not try to read a value before it has been added. + */ + Segment oldTail = tail; + tail = tail.add(value); + size += 1; + return tail != oldTail; + } + + public int size() { + return size; + } + + /** + * Merges the given list and returns whether a segment was freed. + */ + boolean merge(OffsetList other) { + + boolean oneSegFreed = true; + Segment seg = other.head; + while (true) { + Segment next = seg.next(); + if (next != null) { + /* Insert a full segment at the beginning of the list. */ + seg.setNext(head); + head = seg; + seg = next; + } else { + /* Copy the last segment and discard it. */ + for (int i = 0; i < seg.size(); i += 1) { + if (add(seg.get(i), false)) { + /* The two partial segments did not fit into one. */ + oneSegFreed = false; + } + } + break; + } + } + return oneSegFreed; + } + + /** + * Returns an array of all values as longs. If a writer thread is + * appending to the list while this method is executing, some values may be + * missing from the returned array, but the operation is safe. + */ + public long[] toArray() { + + long[] a = new long[size]; + int next = 0; + + segments: for (Segment seg = head; seg != null; seg = seg.next()) { + for (int i = 0; i < seg.size(); i += 1) { + if (next >= a.length) { + break segments; + } + a[next] = seg.get(i); + next += 1; + } + } + + return a; + } + + /** + * Returns whether this list contains the given offset. + */ + boolean contains(long offset) { + + for (Segment seg = head; seg != null; seg = seg.next()) { + for (int i = 0; i < seg.size(); i += 1) { + if (seg.get(i) == offset) { + return true; + } + } + } + + return false; + } + + /** + * One segment of a OffsetList containing at most SEGMENT_CAPACITY values. + * public for Sizeof. + */ + public static class Segment { + + private int index; + private Segment next; + private final int[] values; + + /* public for Sizeof. */ + public Segment() { + values = new int[SEGMENT_CAPACITY]; + } + + /** + * Call this method on the tail. The new tail is returned, if + * allocating a new tail is necessary. + */ + Segment add(long value) { + if (index < values.length) { + + /* + * Increment index after adding the offset so that reader + * threads won't see a partial long value. + */ + values[index] = (int) value; + index += 1; + return this; + } else { + + /* + * Add the value to the new segment before assigning the next + * field so that reader threads can rely on more values being + * available whenever the next field is non-null. + */ + Segment seg = new Segment(); + seg.values[0] = (int) value; + seg.index = 1; + next = seg; + return seg; + } + } + + /** + * Returns the value at the given index from this segment only. + */ + long get(int i) { + return ((long) values[i]) & 0xFFFFFFFF; + } + + /** + * Returns the next segment or null if this is the tail segment. + */ + Segment next() { + return next; + } + + /** + * Sets the next pointer during a merge. + */ + void setNext(Segment next) { + this.next = next; + } + + /** + * Returns the number of values in this segment. + */ + int size() { + return index; + } + } +} diff --git a/src/com/sleepycat/je/cleaner/PackedObsoleteInfo.java b/src/com/sleepycat/je/cleaner/PackedObsoleteInfo.java new file mode 100644 index 0000000..0808101 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/PackedObsoleteInfo.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.DbLsn; + +/** + * A sequence of obsolete info. + * + * To save memory, a TupleOutput is used to contain a sequence of {LSN-file, + * LSN-offset} tuples. Packed integers are used and memory is saved by not + * using an Object for each tuple, as would be needed in a Java collection. + * + * An OffsetList was not used because it does not use packed integers. + * PackedOffsets was not used because it depends on offsets being sorted in + * ascending order. + * + * Only obsolete IN LSNs are supported. LNs are not counted using this + * approach. + */ +public class PackedObsoleteInfo extends TupleOutput { + + public PackedObsoleteInfo() { + } + + public int getMemorySize() { + return MemoryBudget.tupleOutputSize(this); + } + + public void copyObsoleteInfo(final PackedObsoleteInfo other) { + writeFast(other.getBufferBytes(), + other.getBufferOffset(), + other.getBufferLength()); + } + + public void addObsoleteInfo(final long obsoleteLsn) { + + writePackedLong(DbLsn.getFileNumber(obsoleteLsn)); + writePackedLong(DbLsn.getFileOffset(obsoleteLsn)); + } + + public void countObsoleteInfo( + final UtilizationTracker tracker, + final DatabaseImpl nodeDb) { + + final TupleInput in = new TupleInput(this); + + while (in.available() > 0) { + final long fileNumber = in.readPackedLong(); + long fileOffset = in.readPackedLong(); + + tracker.countObsoleteNode( + DbLsn.makeLsn(fileNumber, fileOffset), + LogEntryType.LOG_IN, 0, nodeDb); + } + } +} diff --git a/src/com/sleepycat/je/cleaner/PackedOffsets.java b/src/com/sleepycat/je/cleaner/PackedOffsets.java new file mode 100644 index 0000000..1678546 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/PackedOffsets.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; + +/** + * Stores a sorted list of LSN offsets in a packed short representation. Each + * stored value is the difference between two consecutive offsets. The stored + * values are stored as one or more shorts where each short holds 0x7fff + * values. Shorts are in LSB order. The value is negated if more shorts for + * the same offset follow; this works because offsets are always positive + * values. + */ +public class PackedOffsets implements Loggable { + + private short[] data; + private int size; + + /** + * Creates an empty object. + */ + public PackedOffsets() { + + /* + * Verify assumption in FileSummaryLN that a new PackedOffsets instance + * has no extra extra memory that must be budgeted. + */ + assert getExtraMemorySize() == 0; + } + + /** + * Returns an iterator over all offsets. + */ + Iterator iterator() { + return new Iterator(); + } + + /** + * Packs the given offsets, replacing any offsets stored in this object. + */ + public void pack(long[] offsets) { + + /* Allocate a maximum sized new data array. */ + short[] newData = new short[offsets.length * 3]; + + /* Pack the sorted offsets. */ + Arrays.sort(offsets); + int dataIndex = 0; + long priorVal = 0; + for (int i = 0; i < offsets.length; i += 1) { + long val = offsets[i]; + dataIndex = append(newData, dataIndex, val - priorVal); + priorVal = val; + } + + /* Copy in the exact sized new data. */ + data = new short[dataIndex]; + System.arraycopy(newData, 0, data, 0, dataIndex); + size = offsets.length; + } + + /** + * Returns the unpacked offsets. + */ + long[] toArray() { + long[] offsets = new long[size]; + int index = 0; + Iterator iter = iterator(); + while (iter.hasNext()) { + offsets[index++] = iter.next(); + } + assert index == size; + return offsets; + } + + /** + * Copies the given value as a packed long to the array starting at the + * given index. Returns the index of the next position in the array. + */ + private int append(short[] to, int index, long val) { + + assert val >= 0; + + while (true) { + short s = (short) (val & 0x7fff); + val >>>= 15; + if (val > 0) { + to[index++] = (short) (-1 - s); + } else { + to[index++] = s; + break; + } + } + return index; + } + + /** + * An iterator over all offsets. + */ + class Iterator { + + private int index; + private long priorVal; + + private Iterator() { + } + + boolean hasNext() { + return data != null && index < data.length; + } + + long next() { + long val = priorVal; + for (int shift = 0;; shift += 15) { + long s = data[index++]; + if (s < 0) { + val += (-1 - s) << shift; + } else { + val += s << shift; + break; + } + } + priorVal = val; + return val; + } + } + + /** + * Return the extra memory used by this object when the pack() method has + * been called to allocate the data array. + */ + public int getExtraMemorySize() { + if (data != null) { + return MemoryBudget.shortArraySize(data.length); + } else { + return 0; + } + } + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + + int len = (data != null) ? data.length : 0; + return (LogUtils.getPackedIntLogSize(size) + + LogUtils.getPackedIntLogSize(len) + + (len * LogUtils.SHORT_BYTES)); + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer buf) { + + LogUtils.writePackedInt(buf, size); + if (data != null) { + LogUtils.writePackedInt(buf, data.length); + for (int i = 0; i < data.length; i += 1) { + LogUtils.writeShort(buf, data[i]); + } + } else { + LogUtils.writePackedInt(buf, 0); + } + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer buf, int entryVersion) { + + boolean unpacked = (entryVersion < 6); + size = LogUtils.readInt(buf, unpacked); + int len = LogUtils.readInt(buf, unpacked); + if (len > 0) { + data = new short[len]; + for (int i = 0; i < len; i += 1) { + data[i] = LogUtils.readShort(buf); + } + } + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder buf, boolean verbose) { + + if (size > 0) { + Iterator i = iterator(); + buf.append(""); + while (i.hasNext()) { + buf.append("0x"); + buf.append(Long.toHexString(i.next())); + buf.append(' '); + } + buf.append(""); + } else { + buf.append(""); + } + } + + /** + * Never called. + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return -1; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + dumpLog(buf, true); + return buf.toString(); + } +} diff --git a/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java b/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java new file mode 100644 index 0000000..813c9ff --- /dev/null +++ b/src/com/sleepycat/je/cleaner/RecoveryUtilizationTracker.java @@ -0,0 +1,200 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Accumulates changes to the utilization profile during recovery. + * + *

        Per-database information is keyed by DatabaseId because the DatabaseImpl + * is not always available during recovery. In fact this is the only reason + * that a "local" tracker is used during recovery -- to avoid requiring that + * the DatabaseImpl is available, which is necessary to use the "global" + * UtilizationTracker. There is no requirement to accumulate totals locally, + * since recovery is single threaded.

        + * + *

        When finished with this object, its information should be added to the + * Environment's UtilizationTracker and DatabaseImpl objects by calling + * transferToUtilizationTracker. This is done at the end of recovery, just + * prior to the checkpoint. It does not have to be done under the log write + * latch, since recovery is single threaded.

        + */ +public class RecoveryUtilizationTracker extends BaseLocalUtilizationTracker { + + /* File number -> LSN of FileSummaryLN */ + private final Map fileSummaryLsns; + + /* DatabaseId -> LSN of MapLN */ + private final Map databaseLsns; + + public RecoveryUtilizationTracker(EnvironmentImpl env) { + super(env, new HashMap()); + fileSummaryLsns = new HashMap(); + databaseLsns = new HashMap(); + } + + /** + * Saves the LSN of the last logged FileSummaryLN. + */ + public void saveLastLoggedFileSummaryLN(long fileNum, long lsn) { + fileSummaryLsns.put(Long.valueOf(fileNum), Long.valueOf(lsn)); + } + + /** + * Saves the LSN of the last logged MapLN. + */ + public void saveLastLoggedMapLN(DatabaseId dbId, long lsn) { + databaseLsns.put(dbId, Long.valueOf(lsn)); + } + + /** + * Counts the addition of all new log entries including LNs. + */ + public void countNewLogEntry( + long lsn, + LogEntryType type, + int size, + DatabaseId dbId) { + + countNew(lsn, dbId, type, size); + } + + /** + * Counts the LSN of a node obsolete unconditionally. + * + * Even when trackOffset is true, duplicate offsets are not checked (no + * assertion is fired) because recovery is known to count the same LSN + * offset twice in certain circumstances. + */ + public void countObsoleteUnconditional( + long lsn, + LogEntryType type, + int size, + DatabaseId dbId, + boolean trackOffset) { + + countObsolete( + lsn, dbId, type, size, + true, // countPerFile + true, // countPerDb + trackOffset, + false); // checkDupOffsets + } + + /** + * Counts the oldLsn of a node obsolete if it has not already been counted + * at the point of lsn in the log. + * + * Even when trackOffset is true, duplicate offsets are not checked (no + * assertion is fired) because recovery is known to count the same LSN + * offset twice in certain circumstances. + * + * @return whether the file was previously uncounted. + */ + public boolean countObsoleteIfUncounted( + long oldLsn, + long newLsn, + LogEntryType type, + int size, + DatabaseId dbId, + boolean trackOffset) { + + Long fileNum = Long.valueOf(DbLsn.getFileNumber(oldLsn)); + + boolean fileUncounted = isFileUncounted(fileNum, newLsn); + boolean dbUncounted = isDbUncounted(dbId, newLsn); + + countObsolete( + oldLsn, dbId, type, size, + fileUncounted, // countPerFile + dbUncounted, // countPerDb + trackOffset, + false); // checkDupOffsets + + return fileUncounted; + } + + /** + * Overrides this method for recovery and returns whether the most recently + * seen FileSummaryLN for the given file is prior to the given LSN. + */ + @Override + boolean isFileUncounted(Long fileNum, long lsn) { + + long fsLsn = DbLsn.longToLsn(fileSummaryLsns.get(fileNum)); + + int cmpFsLsnToNewLsn = (fsLsn != DbLsn.NULL_LSN ? + DbLsn.compareTo(fsLsn, lsn) : -1); + + return cmpFsLsnToNewLsn < 0; + } + + /** + * Returns whether the MapLN for the given database ID is prior to the + * given LSN. + */ + private boolean isDbUncounted(DatabaseId dbId, long lsn) { + long dbLsn = DbLsn.longToLsn(databaseLsns.get(dbId)); + int cmpDbLsnToLsn = (dbLsn != DbLsn.NULL_LSN) ? + DbLsn.compareTo(dbLsn, lsn) : -1; + return cmpDbLsnToLsn < 0; + } + + /** + * Clears all accmulated utilization info for the given file. + */ + public void resetFileInfo(long fileNum) { + TrackedFileSummary trackedSummary = getTrackedFile(fileNum); + if (trackedSummary != null) { + trackedSummary.reset(); + } + } + + /** + * Clears all accmulated utilization info for the given database. + */ + public void resetDbInfo(DatabaseId dbId) { + removeDbFileSummaries(dbId); + } + + /** + * Returns the DatabaseImpl from the database key, which in this case is + * the DatabaseId. + */ + @Override + DatabaseImpl databaseKeyToDatabaseImpl(Object databaseKey) + throws DatabaseException { + + DatabaseId dbId = (DatabaseId) databaseKey; + return env.getDbTree().getDb(dbId); + } + + /** + * Must release the database, since DbTree.getDb was called by + * databaseKeyToDatabaseImpl. + */ + @Override + void releaseDatabaseImpl(DatabaseImpl db) { + env.getDbTree().releaseDb(db); + } +} diff --git a/src/com/sleepycat/je/cleaner/ReservedFileInfo.java b/src/com/sleepycat/je/cleaner/ReservedFileInfo.java new file mode 100644 index 0000000..367834d --- /dev/null +++ b/src/com/sleepycat/je/cleaner/ReservedFileInfo.java @@ -0,0 +1,75 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.utilint.VLSN; + +/** + */ +public class ReservedFileInfo { + public final VLSN firstVLSN; + public final VLSN lastVLSN; + public final Set dbIds; + + ReservedFileInfo(final VLSN firstVLSN, + final VLSN lastVLSN, + final Set dbIds) { + this.firstVLSN = firstVLSN; + this.lastVLSN = lastVLSN; + this.dbIds = dbIds; + } + + public static Long entryToKey(final DatabaseEntry entry) { + return LongBinding.entryToLong(entry); + } + + public static void keyToEntry(Long key, final DatabaseEntry entry) { + LongBinding.longToEntry(key, entry); + } + + public static ReservedFileInfo entryToObject(final DatabaseEntry entry) { + final TupleInput input = TupleBase.entryToInput(entry); + input.readByte(); /* Future flags. */ + final VLSN firstVLSN = new VLSN(input.readPackedLong()); + final VLSN lastVLSN = new VLSN(input.readPackedLong()); + final Set dbIds = new HashSet<>(); + final int nDbs = input.readPackedInt(); + for (int i = 0; i < nDbs; i += 1) { + dbIds.add(new DatabaseId(input.readPackedLong())); + } + return new ReservedFileInfo(firstVLSN, lastVLSN, dbIds); + } + + public static void objectToEntry(final ReservedFileInfo info, + final DatabaseEntry entry) { + final TupleOutput output = new TupleOutput(); + output.writeByte(0); /* Future flags. */ + output.writePackedLong(info.firstVLSN.getSequence()); + output.writePackedLong(info.lastVLSN.getSequence()); + output.writePackedInt(info.dbIds.size()); + for (final DatabaseId id : info.dbIds) { + output.writePackedLong(id.getId()); + } + TupleBase.outputToEntry(output, entry); + } +} diff --git a/src/com/sleepycat/je/cleaner/TrackedFileSummary.java b/src/com/sleepycat/je/cleaner/TrackedFileSummary.java new file mode 100644 index 0000000..8cfde2c --- /dev/null +++ b/src/com/sleepycat/je/cleaner/TrackedFileSummary.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.je.dbi.MemoryBudget; + +/** + * Delta file summary info for a tracked file. Tracked files are managed by + * the UtilizationTracker. + * + *

        The methods in this class for reading obsolete offsets may be used by + * multiple threads without synchronization even while another thread is adding + * offsets. This is possible because elements are never deleted from the + * lists. The thread adding obsolete offsets does so under the log write + * latch to prevent multiple threads from adding concurrently.

        + */ +public class TrackedFileSummary extends FileSummary { + + private BaseUtilizationTracker tracker; + private long fileNum; + private OffsetList obsoleteOffsets; + private int memSize; + private boolean trackDetail; + private boolean allowFlush = true; + + /** + * Creates an empty tracked summary. + */ + TrackedFileSummary(BaseUtilizationTracker tracker, + long fileNum, + boolean trackDetail) { + this.tracker = tracker; + this.fileNum = fileNum; + this.trackDetail = trackDetail; + } + + /** + * Returns whether this summary is allowed or prohibited from being flushed + * or evicted during cleaning. By default, flushing is allowed. + */ + public boolean getAllowFlush() { + return allowFlush; + } + + /** + * Allows or prohibits this summary from being flushed or evicted during + * cleaning. By default, flushing is allowed. + */ + void setAllowFlush(boolean allowFlush) { + this.allowFlush = allowFlush; + } + + /** + * Returns the file number being tracked. + */ + public long getFileNumber() { + return fileNum; + } + + /** + * Return the total memory size for this object. We only bother to budget + * obsolete detail, not the overhead for this object, for two reasons: + * 1) The number of these objects is very small, and 2) unit tests disable + * detail tracking as a way to prevent budget adjustments here. + */ + int getMemorySize() { + return memSize; + } + + /** + * Overrides reset for a tracked file, and is called when a FileSummaryLN + * is written to the log. + * + *

        Must be called under the log write latch.

        + */ + @Override + public void reset() { + assert tracker != null; + + obsoleteOffsets = null; + + tracker.resetFile(this); + + if (memSize > 0) { + updateMemoryBudget(0 - memSize); + } + + super.reset(); + } + + /** + * Tracks the given offset as obsolete or non-obsolete. + * + *

        Must be called under the log write latch.

        + */ + void trackObsolete(long offset, boolean checkDupOffsets) { + + if (!trackDetail) { + return; + } + + int adjustMem = 0; + if (obsoleteOffsets == null) { + obsoleteOffsets = new OffsetList(); + adjustMem += MemoryBudget.TFS_LIST_INITIAL_OVERHEAD; + } + + if (obsoleteOffsets.add(offset, checkDupOffsets)) { + adjustMem += MemoryBudget.TFS_LIST_SEGMENT_OVERHEAD; + } + + if (adjustMem != 0) { + updateMemoryBudget(adjustMem); + } + } + + /** + * Adds the obsolete offsets as well as the totals of the given object. + */ + void addTrackedSummary(TrackedFileSummary other) { + + /* Add the totals. */ + add(other); + + /* + * Add the offsets and the memory used [#15505] by the other tracker. + * The memory budget has already been updated for the offsets to be + * added, so we only need to account for a possible difference of one + * segment when we merge them. + */ + memSize += other.memSize; + if (other.obsoleteOffsets != null) { + if (obsoleteOffsets != null) { + /* Merge the other offsets into our list. */ + if (obsoleteOffsets.merge(other.obsoleteOffsets)) { + /* There is one segment less as a result of the merge. */ + updateMemoryBudget + (- MemoryBudget.TFS_LIST_SEGMENT_OVERHEAD); + } + } else { + /* Adopt the other's offsets as our own. */ + obsoleteOffsets = other.obsoleteOffsets; + } + } + } + + /** + * Returns obsolete offsets as an array of longs, or null if none. + */ + public long[] getObsoleteOffsets() { + + if (obsoleteOffsets != null) { + return obsoleteOffsets.toArray(); + } else { + return null; + } + } + + /** + * Returns whether the given offset is present in the tracked offsets. + * This does not indicate whether the offset is obsolete in general, but + * only if it is known to be obsolete in this version of the tracked + * information. + */ + boolean containsObsoleteOffset(long offset) { + + if (obsoleteOffsets != null) { + return obsoleteOffsets.contains(offset); + } else { + return false; + } + } + + private void updateMemoryBudget(int delta) { + assert tracker != null; + memSize += delta; + tracker.env.getMemoryBudget().updateAdminMemoryUsage(delta); + } + + /** + * Update memory budgets when this tracker is closed and will never be + * accessed again. + */ + void close() { + assert tracker != null; + tracker.env.getMemoryBudget().updateAdminMemoryUsage(0-memSize); + tracker = null; + memSize = 0; + } +} diff --git a/src/com/sleepycat/je/cleaner/UtilizationCalculator.java b/src/com/sleepycat/je/cleaner/UtilizationCalculator.java new file mode 100644 index 0000000..7be352d --- /dev/null +++ b/src/com/sleepycat/je/cleaner/UtilizationCalculator.java @@ -0,0 +1,495 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; + +/** + * Contains methods for calculating utilization and for selecting files to + * clean. + * + * Note that we do clean files that are protected from deletion by HA/DataSync. + * If we did not clean them and a large number of files were to become + * unprotected at once, a large amount of log cleaning may suddenly be + * necessary. Cleaning the files avoids this. Better still would be to delete + * the metadata, but that would require writing a log entry to indicate the + * file is ready to be deleted, to avoid cleaning from scratch after a crash. + * [#16643] [#19221] + * + * Historical note: Prior to JE 6.0, LN utilization adjustments were needed + * because the LN last logged size was not stored in the BIN [#18633]. + * Originally in JE 5, the corrected average LN size was used to adjust + * utilization. This was changed later in JE 5 to a correction factor since + * different log files may have different average LN sizes [#21106]. Then in + * JE 6.0 the last logged size was added to the BIN, the default for + * {@link com.sleepycat.je.EnvironmentConfig#CLEANER_ADJUST_UTILIZATION} was + * changed to false and a warning was added that the feature will be removed in + * the future [#22275]. Finally in JE 6.3 the LN adjustment code and data in + * CheckpointEnd were removed and the parameter was deprecated [#24090]. + * + * Unlike with LNs, we do not store the last logged size of INs, so their + * obsolete size is computed as an average and this has the potential to cause + * over/under-cleaning. This problem is not known to occur, but if there are + * over/under-cleaning problems we should examine the recalculated info that is + * output as part of the CleanerRun INFO message. + * + * === Expired Data and Utilization === + * + * Per-file histograms are calculated by the {@link ExpirationTracker} and + * stored in an internal database and cache by the {@link ExpirationProfile}. + * The histograms are used to calculate the expired bytes for a file at a + * particular time. Since obsolete (not expired) data can overlap with expired + * data, upper and lower bounds for overall utilization are determined. When + * the lower bound is below minUtilization, cleaning occurs. + * + * The file that has the lowest average utilization (midway between its upper + * and lower bounds) is selected for cleaning. If the file's upper and lower + * bounds are not close together (or the same), and the upper bound is above a + * threshold, then two-pass cleaning is performed. See + * {@link EnvironmentParams#CLEANER_TWO_PASS_GAP} and + * {@link EnvironmentParams#CLEANER_TWO_PASS_THRESHOLD}. + * + * The first pass of two-pass cleaning reads the file but doesn't do any real + * cleaning (no side effects). If this pass finds that the true utilization of + * the file is above the threshold (the same threshold as above), then cleaning + * does not take place and instead the histogram is updated. In this case the + * obsolete and expired data (in the old histogram) overlap, and it is the + * overlap that caused utilization to be estimated incorrectly. The new/updated + * histogram is calculated such that there is no overlap, improving utilization + * accuracy. If the first pass finds that true utilization is below the + * threshold, then normal cleaning (pass two) occurs. Two-pass cleaning + * protects against "over cleaning". + * + * The use of the overall utilization lower bound to drive cleaning is + * considered sufficient to protect against "under cleaning". Therefore, a disk + * space threshold is unnecessary. + * + * Gradual expiration is used to prevent cleaning spikes on day or hour + * boundaries. For purposes of driving cleaning, the utilization lower bound is + * calculated by distributing the bytes that expired in the current day/hour + * period evenly over that day/hour. + */ +public class UtilizationCalculator implements EnvConfigObserver { + + private final EnvironmentImpl env; + private final Cleaner cleaner; + private final Logger logger; + private FilesToMigrate filesToMigrate; + private volatile int currentMinUtilization = -1; + private volatile int currentMaxUtilization = -1; + private volatile int predictedMinUtilization = -1; + private volatile int predictedMaxUtilization = -1; + + UtilizationCalculator(EnvironmentImpl env, Cleaner cleaner) { + this.env = env; + this.cleaner = cleaner; + logger = LoggerUtils.getLogger(getClass()); + filesToMigrate = new FilesToMigrate(env); + env.addConfigObserver(this); + } + + int getCurrentMinUtilization() { + return currentMinUtilization; + } + + int getCurrentMaxUtilization() { + return currentMaxUtilization; + } + + int getPredictedMinUtilization() { + return predictedMinUtilization; + } + + int getPredictedMaxUtilization() { + return predictedMaxUtilization; + } + + /** + * Returns the best file that qualifies for cleaning or probing, or null + * if no file qualifies. + * + * This method is called by FileSelector and synchronization order is: + * 1-FileSelector, 2-UtilizationCalculator, 3-ExpirationProfile. + * + * @param fileSummaryMap the map containing file summary info. + * + * @param forceCleaning is true to always select a file, even if its + * utilization is above the minimum utilization threshold. + * + * @return {file number, required utilization for 2-pass cleaning}, + * or null if no file qualifies for cleaning. + */ + synchronized Pair getBestFile( + final SortedMap fileSummaryMap, + final boolean forceCleaning) { + + /* Paranoia. There should always be 1 file. */ + if (fileSummaryMap.size() == 0) { + LoggerUtils.logMsg(logger, env, Level.SEVERE, + "Can't clean, map is empty."); + return null; + } + + final FileSelector fileSelector = cleaner.getFileSelector(); + final Set inProgressFiles = fileSelector.getInProgressFiles(); + + /* Refresh expiration info so it reflects the current time. */ + final ExpirationProfile expProfile = cleaner.getExpirationProfile(); + final long currentTime = TTL.currentSystemTime(); + expProfile.refresh(currentTime); + + /* + * Use local variables for mutable properties. Using values that are + * changing during a single file selection pass would not produce a + * well defined result. + * + * Note that age is a distance between files not a number of files, + * that is, deleted files are counted in the age. + */ + final int totalThreshold = cleaner.minUtilization; + final int fileThreshold = cleaner.minFileUtilization; + final int twoPassThreshold = cleaner.twoPassThreshold; + final int twoPassGap = cleaner.twoPassGap; + final int minAge = cleaner.minAge; + final boolean gradualExpiration = cleaner.gradualExpiration; + final boolean expirationEnabled = env.isExpirationEnabled(); + + /* + * Cleaning must refrain from rearranging the portion of log processed + * as recovery time. Do not clean a file greater or equal to the first + * active file used in recovery, which is either the last log file or + * the file of the first active LSN in an active transaction, whichever + * is earlier. + * + * TxnManager.getFirstActiveLsn() (firstActiveTxnLsn below) is + * guaranteed to be earlier or equal to the first active LSN of the + * checkpoint that will be performed before deleting the selected log + * file. By selecting a file prior to this point we ensure that will + * not clean any entry that may be replayed by recovery. + * + * For example: + * 200 ckptA start, determines that ckpt's firstActiveLsn = 100 + * 400 ckptA end + * 600 ckptB start, determines that ckpt's firstActiveLsn = 300 + * 800 ckptB end + * + * Any cleaning that executes before ckpt A start will be constrained + * to files <= lsn 100, because it will have checked the TxnManager. + * If cleaning executes after ckptA start, it may indeed clean after + * ckptA's firstActiveLsn, but the cleaning run will wait to ckptB end + * to delete files. + */ + long firstActiveFile = fileSummaryMap.lastKey(); + final long firstActiveTxnLsn = env.getTxnManager().getFirstActiveLsn(); + + if (firstActiveTxnLsn != DbLsn.NULL_LSN) { + + long firstActiveTxnFile = + DbLsn.getFileNumber(firstActiveTxnLsn); + + if (firstActiveFile > firstActiveTxnFile) { + firstActiveFile = firstActiveTxnFile; + } + } + + /* + * Note that minAge is at least one and may be configured to a higher + * value to prevent cleaning recently active files. + */ + final long lastFileToClean = firstActiveFile - minAge; + + /* + * Given the total, obsolete and expired bytes: + * + * min utilization: 100 * ((obsolete + expired) / total) + * max utilization: 100 * (max(obsolete, expired) / total) + * avg utilization: (max - min) / 2 + * + * Calculate min/max utilization in several categories: + * + * + Current total utilization, not counting the results of cleaning + * that has not yet occurred, and using the current expired sizes. + * + * + Predicted total utilization, estimating the results of cleaning + * yet to occur, and using a gradual expired size to prevent cleaning + * spikes after hour/day boundaries. + * + * + Utilization for the "best" file to use when cleaning normally. The + * file with the lowest avg utilization is selected. + * + * + Also determine the "best gradual" file with the lowest gradual + * max utilization. Note that when a file is selected due to the + * minFileUtilization threshold, gradual utilization must be used. + * Otherwise, cleaning due to this threshold would defeat gradual + * expiration in general. + */ + Long bestFile = null; + int bestFileAvgUtil = 101; + int bestFileMinUtil = 0; + int bestFileMaxUtil = 0; + Long bestGradualFile = null; + int bestGradualFileMaxUtil = 101; + long currentTotalSize = 0; + long currentMinObsoleteSize = 0; + long currentMaxObsoleteSize = 0; + long predictedTotalSize = 0; + long predictedMinObsoleteSize = 0; + long predictedMaxObsoleteSize = 0; + + for (final Map.Entry entry : + fileSummaryMap.entrySet()) { + + final Long file = entry.getKey(); + final long fileNum = file; + + if (!env.getFileProtector().isActiveOrNewFile(file)) { + continue; + } + + final FileSummary summary = entry.getValue(); + + final int expiredSize; + final int expiredGradualSize; + + if (expirationEnabled) { + + final Pair expiredSizes = + expProfile.getExpiredBytes(fileNum, currentTime); + + expiredSize = Math.min( + expiredSizes.first(), summary.totalSize); + + expiredGradualSize = gradualExpiration ? + Math.min(expiredSizes.second(), summary.totalSize) : + expiredSize; + + } else { + expiredSize = 0; + expiredGradualSize = 0; + } + + /* + * If the file is safe-to-delete, it is entirely obsolete by + * definition. This is more accurate than using getObsoleteSize. + */ + final int obsoleteSize = summary.getObsoleteSize(); + + final int minObsoleteSize = Math.max( + obsoleteSize, expiredSize); + + final int maxObsoleteSize = Math.min( + obsoleteSize + expiredSize, + summary.totalSize); + + final int minGradualObsoleteSize = Math.max( + obsoleteSize, expiredGradualSize); + + final int maxGradualObsoleteSize = Math.min( + obsoleteSize + expiredGradualSize, + summary.totalSize); + + currentTotalSize += summary.totalSize; + currentMinObsoleteSize += minObsoleteSize; + currentMaxObsoleteSize += maxObsoleteSize; + + /* + * If the file has been clean or is being cleaned, assume the + * file's data will occupy only its currently utilized bytes, after + * cleaning and deletion, based on the min obsolete size. This is + * an intentionally overly optimistic prediction of the results of + * cleaning, and is used to prevent over-cleaning, especially one + * due to a backlog created using inaccurate predictions. + */ + if (inProgressFiles.contains(file)) { + final int utilizedSize = summary.totalSize - minObsoleteSize; + predictedTotalSize += utilizedSize; + continue; + } + + predictedTotalSize += summary.totalSize; + predictedMinObsoleteSize += minGradualObsoleteSize; + predictedMaxObsoleteSize += maxGradualObsoleteSize; + + /* Skip files that are too young to be cleaned. */ + if (fileNum > lastFileToClean) { + continue; + } + + /* + * Pick the "best" file -- the one having the lowest avg + * utilization so far. + */ + final int thisMinUtil = FileSummary.utilization( + maxObsoleteSize, summary.totalSize); + + final int thisMaxUtil = FileSummary.utilization( + minObsoleteSize, summary.totalSize); + + final int thisAvgUtil = (thisMinUtil + thisMaxUtil) / 2; + + if (bestFile == null || thisAvgUtil < bestFileAvgUtil) { + bestFile = file; + bestFileAvgUtil = thisAvgUtil; + bestFileMinUtil = thisMinUtil; + bestFileMaxUtil = thisMaxUtil; + } + + /* + * Pick the "best gradual" file -- the one having the lowest max + * gradual utilization so far. + */ + final int thisGradualMaxUtil = FileSummary.utilization( + minGradualObsoleteSize, summary.totalSize); + + if (bestGradualFile == null || + thisGradualMaxUtil < bestGradualFileMaxUtil) { + + bestGradualFile = file; + bestGradualFileMaxUtil = thisGradualMaxUtil; + } + } + + final int currentMinUtil = FileSummary.utilization( + currentMaxObsoleteSize, currentTotalSize); + + final int currentMaxUtil = FileSummary.utilization( + currentMinObsoleteSize, currentTotalSize); + + final int predictedMinUtil = FileSummary.utilization( + predictedMaxObsoleteSize, predictedTotalSize); + + final int predictedMaxUtil = FileSummary.utilization( + predictedMinObsoleteSize, predictedTotalSize); + + currentMinUtilization = currentMinUtil; + currentMaxUtilization = currentMaxUtil; + predictedMinUtilization = predictedMinUtil; + predictedMaxUtilization = predictedMaxUtil; + + /* + * 1. If total min utilization is below the threshold, clean the + * "best" file, which is the one with the lowest avg utilization. + * + * 2. Else if the "best gradual" file has a max gradual utilization + * that is below the threshold for a single file, clean it. + * + * 3. Else if there are more files to migrate, clean the next file to + * be migrated. + * + * 4. Else if cleaning is forced for unit testing, clean the best file. + */ + final Long fileChosen; + final String reason; + + if (predictedMinUtil < totalThreshold) { + + fileChosen = bestFile; + reason = "predicted min util is below minUtilization"; + + } else if (bestGradualFileMaxUtil < fileThreshold) { + + fileChosen = bestGradualFile; + reason = "file has avg util below minFileUtilization"; + + } else if (filesToMigrate.hasNext(fileSummaryMap, inProgressFiles)) { + + fileChosen = filesToMigrate.next(fileSummaryMap, inProgressFiles); + reason = "there are more forceCleanFiles"; + + } else if (forceCleaning) { + + fileChosen = bestFile; + reason = "forced for testing"; + + } else { + fileChosen = null; + reason = "no file selected"; + } + + String bestFileMsg = ""; + String twoPassMsg = ""; + int pass1RequiredUtil = -1; + + if (fileChosen != null && fileChosen.equals(bestFile)) { + + bestFileMsg = + ", chose file with util min: " + bestFileMinUtil + + " max: " + bestFileMaxUtil + + " avg: " + bestFileAvgUtil; + + /* + * If the difference between the file's min and max utilization is + * more than twoPassGap, and its max utilization is more then + * twoPassThreshold, use two pass cleaning. When using two-pass + * cleaning, skip the second pass if its recalculated utilization + * is higher than twoPassThreshold. In other words, if the benefit + * of cleaning is low, don't actually clean it. + */ + if (bestFileMaxUtil > twoPassThreshold && + bestFileMaxUtil - bestFileMinUtil >= twoPassGap) { + + pass1RequiredUtil = twoPassThreshold; + twoPassMsg = ", 2-pass cleaning"; + } + } + + final Level logLevel = (fileChosen != null) ? Level.INFO : Level.FINE; + + if (logger.isLoggable(logLevel)) { + + LoggerUtils.logMsg( + logger, env, logLevel, + "Clean file " + + ((fileChosen != null) ? + ("0x" + Long.toHexString(fileChosen)) : "none") + + ": " + reason + twoPassMsg + + ", current util min: " + currentMinUtil + + " max: " + currentMaxUtil + + ", predicted util min: " + predictedMinUtil + + " max: " + predictedMaxUtil + + bestFileMsg); + } + + return (fileChosen != null) ? + new Pair<>(fileChosen, pass1RequiredUtil) : + null; + } + + /** + * Process notifications of mutable property changes. + * + * @throws IllegalArgumentException via FilesToMigrate ctor and + * parseForceCleanFiles. + */ + + public synchronized void envConfigUpdate(DbConfigManager cm, + EnvironmentMutableConfig ignore) { + + filesToMigrate = new FilesToMigrate(env); + } +} diff --git a/src/com/sleepycat/je/cleaner/UtilizationProfile.java b/src/com/sleepycat/je/cleaner/UtilizationProfile.java new file mode 100644 index 0000000..6788e69 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/UtilizationProfile.java @@ -0,0 +1,1446 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.dbi.StartupTracker; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The UP tracks utilization summary information for all log files. + * + *

        Unlike the UtilizationTracker, the UP is not accessed under the log write + * latch and is instead synchronized on itself for protecting the cache. It is + * not accessed during the primary data access path, except for when flushing + * (writing) file summary LNs. This occurs in the following cases: + *

          + *
        1. The summary information is flushed at the end of a checkpoint. This + * allows tracking to occur in memory in between checkpoints, and replayed + * during recovery.
        2. + *
        3. When committing the truncateDatabase and removeDatabase operations, the + * summary information is flushed because detail tracking for those operations + * is not replayed during recovery
        4. + *
        5. The evictor will ask the UtilizationTracker to flush the largest summary + * if the memory taken by the tracker exeeds its budget.
        6. + *
        + * + *

        The cache is populated by the RecoveryManager just before performing the + * initial checkpoint. The UP must be open and populated in order to respond + * to requests to flush summaries and to evict tracked detail, even if the + * cleaner is disabled.

        + * + *

        WARNING: While synchronized on this object, eviction is not permitted. + * If it were, this could cause deadlocks because the order of locking would be + * the UP object and then the evictor. During normal eviction the order is to + * first lock the evictor and then the UP, when evicting tracked detail.

        + * + *

        The methods in this class synchronize to protect the cached summary + * information. Some methods also access the UP database. However, because + * eviction must not occur while synchronized, UP database access is not + * performed while synchronized except in one case: when inserting a new + * summary record. In that case we disallow eviction during the database + * operation.

        + */ +public class UtilizationProfile { + + private final EnvironmentImpl env; + private final UtilizationTracker tracker; + private DatabaseImpl fileSummaryDb; + private DatabaseImpl reservedFilesDb; + private SortedMap fileSummaryMap; + private boolean cachePopulated; + private final Logger logger; + + /** + * Creates an empty UP. + */ + public UtilizationProfile(EnvironmentImpl env, + UtilizationTracker tracker) { + this.env = env; + this.tracker = tracker; + fileSummaryMap = new TreeMap<>(); + + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Gets the base summary from the cached map. Add the tracked summary, if + * one exists, to the base summary. Sets all entries obsolete, if the file + * is in the migrateFiles set. + */ + private synchronized FileSummary getFileSummary(Long file) { + + /* Get base summary. */ + FileSummary summary = fileSummaryMap.get(file); + + /* Add tracked summary */ + TrackedFileSummary trackedSummary = tracker.getTrackedFile(file); + if (trackedSummary != null) { + FileSummary totals = new FileSummary(); + if (summary != null) { + totals.add(summary); + } + totals.add(trackedSummary); + summary = totals; + } + + return summary; + } + + /** + * Count the given locally tracked info as obsolete and then log the file + * and database info. + */ + public void flushLocalTracker(LocalUtilizationTracker localTracker) + throws DatabaseException { + + /* Count tracked info under the log write latch. */ + env.getLogManager().transferToUtilizationTracker(localTracker); + + /* Write out the modified file and database info. */ + flushFileUtilization(localTracker.getTrackedFiles()); + flushDbUtilization(localTracker); + } + + /** + * Flush a FileSummaryLN node for each given TrackedFileSummary. + */ + public void flushFileUtilization + (Collection activeFiles) + throws DatabaseException { + + /* Utilization flushing may be disabled for unittests. */ + if (!DbInternal.getCheckpointUP + (env.getConfigManager().getEnvironmentConfig())) { + return; + } + + /* Write out the modified file summaries. */ + for (TrackedFileSummary activeFile : activeFiles) { + long fileNum = activeFile.getFileNumber(); + TrackedFileSummary tfs = tracker.getTrackedFile(fileNum); + if (tfs != null) { + flushFileSummary(tfs); + } + } + } + + /** + * Flush a MapLN for each database that has dirty utilization in the given + * tracker. + */ + private void flushDbUtilization(LocalUtilizationTracker localTracker) + throws DatabaseException { + + /* Utilization flushing may be disabled for unittests. */ + if (!DbInternal.getCheckpointUP + (env.getConfigManager().getEnvironmentConfig())) { + return; + } + + /* Write out the modified MapLNs. */ + for (Object o : localTracker.getTrackedDbs()) { + DatabaseImpl db = (DatabaseImpl) o; + if (!db.isDeleted() && db.isDirty()) { + env.getDbTree().modifyDbRoot(db); + } + } + } + + /** + * Returns a copy of the current file summary map, optionally including + * tracked summary information, for use by the DbSpace utility and by unit + * tests. The returned map's key is a Long file number and its value is a + * FileSummary. + */ + public synchronized SortedMap + getFileSummaryMap(boolean includeTrackedFiles) { + + assert cachePopulated; + + if (includeTrackedFiles) { + + /* + * Copy the fileSummaryMap to a new map, adding in the tracked + * summary information for each entry. + */ + TreeMap map = new TreeMap<>(); + for (Long file : fileSummaryMap.keySet()) { + FileSummary summary = getFileSummary(file); + map.put(file, summary); + } + + /* Add tracked files that are not in fileSummaryMap yet. */ + for (TrackedFileSummary summary : tracker.getTrackedFiles()) { + Long fileNum = summary.getFileNumber(); + if (!map.containsKey(fileNum)) { + map.put(fileNum, summary); + } + } + return map; + } else { + return new TreeMap<>(fileSummaryMap); + } + } + + /** + * Gets the size of the file. If the file does not exist in fileSummaryMap, + * then return -1. + */ + public synchronized int getFileSize(Long file) { + + FileSummary summary = getFileSummary(file); + if (summary == null) { + return -1; + } else { + return summary.totalSize; + } + } + + /** + * Returns a simplified copy of the current file summary map, i.e. the + * value is only the total size of the file. + * + * Besides, we are not sure whether the FileSummary for the current + * last file is complete, so we remove the entry for the last file. + */ + public synchronized SortedMap getFileSizeSummaryMap() { + + TreeMap map = new TreeMap<>(); + + for (Long fileNum : fileSummaryMap.keySet()) { + int totalSize = getFileSize(fileNum); + map.put(fileNum, totalSize); + } + + /* Add tracked size, or create entry if not yet in the map. */ + for (TrackedFileSummary trackedSummary : tracker.getTrackedFiles()) { + Long fileNum = trackedSummary.getFileNumber(); + if (!map.containsKey(fileNum)) { + map.put(fileNum, trackedSummary.totalSize); + } + } + + /* Remove the last file. */ + if (!map.isEmpty()) { + map.remove(map.lastKey()); + } + + return map; + } + + /** + * Clears the cache of file summary info. The cache is not automatically + * repopulated, so this method should currently be called only by close. + */ + private synchronized void clearCache() { + + int memorySize = fileSummaryMap.size() * + MemoryBudget.UTILIZATION_PROFILE_ENTRY; + MemoryBudget mb = env.getMemoryBudget(); + mb.updateAdminMemoryUsage(0 - memorySize); + + fileSummaryMap = new TreeMap<>(); + cachePopulated = false; + } + + /** + * Updates the reserved file db and file summary db after removing a set of + * files from the FileSelector and changing their status to reserved in the + * FileProtector. + * 1. Insert reserved file record for each file. + * 2. Remove files from database (MapLN) metadata. + * 3. Delete file summary db records for each file. + * + * See populateCache for how a crash during these steps is handled. + */ + void reserveFiles(final Map reservedFiles) { + + final Set dbIds = new HashSet<>(); + + for (final Map.Entry entry : + reservedFiles.entrySet()) { + + final FileSelector.FileInfo fsInfo = entry.getValue(); + putReservedFileRecord(entry.getKey(), fsInfo); + dbIds.addAll(fsInfo.dbIds); + } + + final Set files = reservedFiles.keySet(); + + removeDbMetadata(files, dbIds); + + for (final Long file : files) { + deleteFileSummary(file); + } + } + + /** + * Stores a reserved file db record for a file that has been cleaned and + * is ready-to-deleted. + */ + private void putReservedFileRecord( + final Long file, + final FileSelector.FileInfo fsInfo) { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + ReservedFileInfo.keyToEntry(file, keyEntry); + + final ReservedFileInfo rfInfo = new ReservedFileInfo( + fsInfo.firstVlsn, fsInfo.lastVlsn, fsInfo.dbIds); + + final DatabaseEntry dataEntry = new DatabaseEntry(); + ReservedFileInfo.objectToEntry(rfInfo, dataEntry); + + final Locker locker = + BasicLocker.createBasicLocker(env, false /*noWait*/); + + try { + try (Cursor cursor = DbInternal.makeCursor( + reservedFilesDb, locker, null, false /*retainNonTxnLocks*/)) { + + cursor.put(keyEntry, dataEntry, Put.OVERWRITE, null); + } + } finally { + locker.operationEnd(); + } + } + + /** + * Deletes a reserved file db record after the file has been deleted. + */ + void deleteReservedFileRecord(final Long file) { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + ReservedFileInfo.keyToEntry(file, keyEntry); + + final ReadOptions readOptions = + new ReadOptions().setLockMode(LockMode.RMW); + + final Locker locker = + BasicLocker.createBasicLocker(env, false /*noWait*/); + try { + try (Cursor cursor = + DbInternal.makeCursor(reservedFilesDb, locker, null)) { + + if (cursor.get( + keyEntry, null, Get.SEARCH, readOptions) != null) { + + cursor.delete(null); + } + } + } finally { + locker.operationEnd(); + } + } + + /** + * Removes a file from the MapLN utilization info, the utilization database + * and the profile, after it has been determined that the file does not + * exist. This is unusual, so the inefficiency of removeAndFlushDbMetadata + * is acceptable. + */ + void removeDeletedFile(Long fileNum) + throws DatabaseException { + + removeAndFlushDbMetadata(fileNum); + removeFileSummaries(Collections.singleton(fileNum)); + deleteFileSummary(fileNum); + } + + /** + * Remove newly reserved files from the utilization profile cache. This is + * called while synchronized on the FileSelector to prevent a window where + * the file could be selected for cleaning again. + * + * This method does not delete the utilization db records. + */ + synchronized void removeFileSummaries(final Set files) { + assert cachePopulated; + + for (final Long fileNum : files) { + FileSummary oldSummary = fileSummaryMap.remove(fileNum); + if (oldSummary != null) { + MemoryBudget mb = env.getMemoryBudget(); + mb.updateAdminMemoryUsage + (0 - MemoryBudget.UTILIZATION_PROFILE_ENTRY); + } + } + } + + /** + * Removes a set of files from the DatabaseImpl utilization info, dirtying + * the MapLNs in the process. The MapLNs will be flushed by the + * checkpointer. This method performs eviction and is not synchronized. + * + * For a given file, this method must be called after inserting its + * reserved db record. If there is a crash and recovery finds the reserved + * db record in the checkpointer interval, recovery will redo this action. + * + * This method is optimally called for multiple files at a time, since this + * updates the MapLNs more efficiently than if it were done one file at a + * time. + */ + private boolean removeDbMetadata(final Collection fileNums, + final Set databases) { + + final LogManager logManager = env.getLogManager(); + final DbTree dbTree = env.getDbTree(); + boolean anyRemoved = false; + + for (final DatabaseId dbId : databases) { + final DatabaseImpl db = dbTree.getDb(dbId); + if (db == null) { + continue; + } + try { + anyRemoved |= logManager.removeDbFileSummaries(db, fileNums); + } finally { + dbTree.releaseDb(db); + } + } + + return anyRemoved; + } + + /** + * Like removeDbMetadata but operates on a single file and also flushes + * the updated MapLNs immediately. This is very inefficient and is meant + * only for infrequent cleanup. + */ + private void removeAndFlushDbMetadata(Long fileNum) + throws DatabaseException { + + final Collection fileNums = Collections.singleton(fileNum); + final LogManager logManager = env.getLogManager(); + final DbTree dbTree = env.getDbTree(); + + /* Only call logMapTreeRoot once for ID and NAME DBs. */ + DatabaseImpl idDatabase = dbTree.getDb(DbTree.ID_DB_ID); + DatabaseImpl nameDatabase = dbTree.getDb(DbTree.NAME_DB_ID); + boolean logRoot = false; + if (logManager.removeDbFileSummaries(idDatabase, fileNums)) { + logRoot = true; + } + if (logManager.removeDbFileSummaries(nameDatabase, fileNums)) { + logRoot = true; + } + if (logRoot) { + env.logMapTreeRoot(); + } + + /* + * Use LockType.NONE for traversing the ID DB so that a lock is not + * held when calling modifyDbRoot, which must release locks to + * handle deadlocks. + */ + CursorImpl.traverseDbWithCursor(idDatabase, + LockType.NONE, + true /*allowEviction*/, + new CursorImpl.WithCursor() { + public boolean withCursor(CursorImpl cursor, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + MapLN mapLN = + (MapLN) cursor.lockAndGetCurrentLN(LockType.NONE); + + if (mapLN != null) { + DatabaseImpl db = mapLN.getDatabase(); + if (logManager.removeDbFileSummaries(db, fileNums)) { + + /* + * Because we're using dirty-read, silently do + * nothing if the DB does not exist + * (mustExist=false). + */ + dbTree.modifyDbRoot + (db, DbLsn.NULL_LSN /*ifBeforeLsn*/, + false /*mustExist*/); + } + } + return true; + } + }); + } + + /** + * Deletes all FileSummaryLNs for the file. This method performs eviction + * and is not synchronized. + * + * For a given file, this method should be called after calling + * removeDbMetadata as described in {@link #populateCache}. + */ + private void deleteFileSummary(final Long fileNum) + throws DatabaseException { + + Locker locker = null; + CursorImpl cursor = null; + try { + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + cursor = new CursorImpl(fileSummaryDb, locker); + /* Perform eviction in unsynchronized methods. */ + cursor.setAllowEviction(true); + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + /* Do not return data to avoid a fetch of the existing LN. */ + dataEntry.setPartial(0, 0, true); + + /* Search by file number. */ + OperationResult result = null; + if (getFirstFSLN( + cursor, fileNum, keyEntry, dataEntry, LockType.WRITE)) { + result = DbInternal.DEFAULT_RESULT; + } + + /* Delete all LNs for this file number. */ + while (result != null && + fileNum == + FileSummaryLN.getFileNumber(keyEntry.getData())) { + + /* Perform eviction once per operation. */ + env.daemonEviction(true /*backgroundIO*/); + + /* + * Eviction after deleting is not necessary since we did not + * fetch the LN. + */ + cursor.deleteCurrentRecord(ReplicationContext.NO_REPLICATE); + + result = cursor.getNext( + keyEntry, dataEntry, LockType.WRITE, + false /*dirtyReadAll*/, true /*forward*/, + false /*isLatched*/, null /*rangeConstraint*/); + } + } finally { + if (cursor != null) { + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + } + + /* Explicitly remove the file from the tracker. */ + TrackedFileSummary tfs = tracker.getTrackedFile(fileNum); + if (tfs != null) { + env.getLogManager().removeTrackedFile(tfs); + } + } + + /** + * Updates and stores the FileSummary for a given tracked file, if flushing + * of the summary is allowed. + */ + void flushFileSummary(TrackedFileSummary tfs) + throws DatabaseException { + + if (tfs.getAllowFlush()) { + putFileSummary(tfs); + } + } + + /** + * Updates and stores the FileSummary for a given tracked file. This + * method is synchronized and may not perform eviction. + */ + private synchronized PackedOffsets putFileSummary(TrackedFileSummary tfs) + throws DatabaseException { + + if (env.isReadOnly()) { + throw EnvironmentFailureException.unexpectedState + ("Cannot write file summary in a read-only environment"); + } + + if (tfs.isEmpty()) { + return null; // no delta + } + + if (!cachePopulated) { + /* Db does not exist and this is a read-only environment. */ + return null; + } + + long fileNum = tfs.getFileNumber(); + Long fileNumLong = fileNum; + + /* Get existing file summary or create an empty one. */ + FileSummary summary = fileSummaryMap.get(fileNumLong); + if (summary == null) { + + /* + * An obsolete node may have been counted after its file was + * reserved or even deleted, for example, when compressing a BIN. + * Do not insert a new profile record if the file is reserved or + * if no corresponding log file exists. + */ + if (!env.getFileProtector().isActiveOrNewFile(fileNumLong)) { + + /* + * File was deleted by the cleaner. Remove it from the + * UtilizationTracker and return. Note that a file is normally + * removed from the tracker by FileSummaryLN.writeToLog method + * when it is called via insertFileSummary below. [#15512] + */ + env.getLogManager().removeTrackedFile(tfs); + return null; + } + + summary = new FileSummary(); + } + + /* + * The key discriminator is a sequence that must be increasing over the + * life of the file. We use the sum of all entries counted. We must + * add the tracked and current summaries here to calculate the key. + */ + FileSummary tmp = new FileSummary(); + tmp.add(summary); + tmp.add(tfs); + int sequence = tmp.getEntriesCounted(); + + /* Insert an LN with the existing and tracked summary info. */ + FileSummaryLN ln = new FileSummaryLN(summary); + ln.setTrackedSummary(tfs); + insertFileSummary(ln, fileNum, sequence); + + /* Cache the updated summary object. */ + summary = ln.getBaseSummary(); + + if (fileSummaryMap.put(fileNumLong, summary) == null) { + MemoryBudget mb = env.getMemoryBudget(); + mb.updateAdminMemoryUsage(MemoryBudget.UTILIZATION_PROFILE_ENTRY); + } + + return ln.getObsoleteOffsets(); + } + + /** + * Returns the stored/packed obsolete offsets for the given file. + * + * @param logUpdate if true, log any updates to the utilization profile. If + * false, only retrieve the new information. + */ + PackedOffsets getObsoleteDetailPacked(Long fileNum, boolean logUpdate) + throws DatabaseException { + + final PackedOffsets packedOffsets = new PackedOffsets(); + + /* Return if no detail is being tracked. */ + if (!env.getCleaner().trackDetail) { + return packedOffsets; + } + + packedOffsets.pack(getObsoleteDetailInternal(fileNum, logUpdate)); + return packedOffsets; + + } + + /** + * Returns the sorted obsolete offsets for the given file. + */ + public long[] getObsoleteDetailSorted(Long fileNum) + throws DatabaseException { + + long[] sortedOffsets = new long[0]; + + /* Return if no detail is being tracked. */ + if (!env.getCleaner().trackDetail) { + return sortedOffsets; + } + + sortedOffsets = getObsoleteDetailInternal(fileNum, false); + Arrays.sort(sortedOffsets); + return sortedOffsets; + + } + + private long[] getObsoleteDetailInternal(Long fileNum, boolean logUpdate) + throws DatabaseException { + + assert cachePopulated; + + final long fileNumVal = fileNum; + final List list = new ArrayList<>(); + + /* + * Get a TrackedFileSummary that cannot be flushed (evicted) while we + * gather obsolete offsets. + */ + final TrackedFileSummary tfs = + env.getLogManager().getUnflushableTrackedSummary(fileNumVal); + try { + /* Read the summary db. */ + final Locker locker = + BasicLocker.createBasicLocker(env, false /*noWait*/); + final CursorImpl cursor = new CursorImpl(fileSummaryDb, locker); + try { + /* Perform eviction in unsynchronized methods. */ + cursor.setAllowEviction(true); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + /* Search by file number. */ + OperationResult result = null; + if (getFirstFSLN(cursor, fileNumVal, keyEntry, dataEntry, + LockType.NONE)) { + result = DbInternal.DEFAULT_RESULT; + } + + /* Read all LNs for this file number. */ + while (result != null) { + + /* Perform eviction once per operation. */ + env.daemonEviction(true /*backgroundIO*/); + + final FileSummaryLN ln = (FileSummaryLN) + cursor.lockAndGetCurrentLN(LockType.NONE); + + if (ln != null) { + /* Stop if the file number changes. */ + if (fileNumVal != + FileSummaryLN.getFileNumber(keyEntry.getData())) { + break; + } + + final PackedOffsets offsets = ln.getObsoleteOffsets(); + if (offsets != null) { + list.add(offsets.toArray()); + } + + /* Always evict after using a file summary LN. */ + cursor.evictLN(); + } + + result = cursor.getNext( + keyEntry, dataEntry, LockType.NONE, + false /*dirtyReadAll*/, true /*forward*/, + false /*isLatched*/, null /*rangeConstraint*/); + } + } finally { + cursor.close(); + locker.operationEnd(); + } + + /* + * Write out tracked detail, if any, and add its offsets to the + * list. + */ + if (!tfs.isEmpty()) { + if (logUpdate) { + final PackedOffsets offsets = putFileSummary(tfs); + if (offsets != null) { + list.add(offsets.toArray()); + } + } else { + final long[] offsetList = tfs.getObsoleteOffsets(); + if (offsetList != null) { + list.add(offsetList); + } + } + } + } finally { + /* Allow flushing of TFS when all offsets have been gathered. */ + tfs.setAllowFlush(true); + } + + /* Merge all offsets into a single array and pack the result. */ + int size = 0; + for (final long[] a : list) { + size += a.length; + } + final long[] offsets = new long[size]; + int index = 0; + for (long[] a : list) { + System.arraycopy(a, 0, offsets, index, a.length); + index += a.length; + } + assert index == offsets.length; + return offsets; + } + + /** + * Populate the profile for file selection. This method performs eviction + * and is not synchronized. It must be called before recovery is complete + * so that synchronization is unnecessary. It should be called before the + * recovery checkpoint so that the checkpoint can flush dirty metadata. + * + * After a file is cleaned, at checkpoint end it is moved to reserved + * status and these steps are taken to delete its metadata: + * 1. The file is cleaned and a checkpoint flushes the updated INs. + * 2. Utilization info in MapLNs referencing the file are updated in + * cache and the MapLNs are dirtied. At some point later, the dirty + * MapLNs are flushed by a normally scheduled checkpoint. This could + * happen after any of the steps below. + * 3. A record is inserted for the file in the reserved file db. + * 4. All file summary db records for the file are deleted. + * + * When the reserved file is deleted, this is done in these steps. + * 5. The file itself is deleted. Note that because we do not flush the + * log before deleting the file, step 3 and 4 may not be durable. + * 6. The files's record in the reserved file db is deleted. + * + * Data file deletion and file metadata deletion cannot be performed + * atomically. In fact none of the steps are grouped atomically into + * transactions, and no-sync logging is used when writing to the file + * summary db and the reserved file db. A crash can occur at any point, and + * we must handle that in some way, either in recovery or here (in the + * populateCache method). + * + * - A crash prior to durable completion of step 3 will be handled + * naturally by cleaning the file again, since it is present in the file + * summary db but not in the reserved file db. + * + * - If step 3 completes durably but a crash occurs before the dirty + * MapLNs are flushed, this is indicated by the presence of a reserved + * file record in the recovery interval, and step 2 will be repeated + * here during recovery. + * + * - If step 3 completes durably but not step 4 or 5, then step 4 is + * completed here. + * + * - If step 3 and 5 complete durably but not step 4 and 6, then steps 4 + * and 6 are completed here. + * + * - If step 4 and 5 complete durably but not step 6, then step 6 is + * completed here. + * + * Prior to log version 15, the reserved file db did not exist and reserved + * files were re-cleaned if they were not deleted before a crash. The old + * steps were: + * A. The file is cleaned and a checkpoint flushes the updated INs. + * B. If the file is reserved, this is tracked in memory but nothing is + * logged. + * + * When deleting a file: + * C. The file itself is deleted. + * D. MapLNs are updated and flushed. + * E. All file summary db records for the file are deleted. + * + * This is handled now as follows, which is the same as the old approach. + * + * - A crash prior to step C results in re-cleaning the file. + * + * - If step C completes but step E does not, step D and E are performed + * here. MapLNs must be flushed immediately in this case, which is + * inefficient but hopefully very infrequent. + */ + public boolean populateCache( + final StartupTracker.Counter counter, + final RecoveryInfo recoveryInfo, + final Set recoveryReservedFiles, + final Set recoveryReservedFileDbs) + throws DatabaseException { + + assert !cachePopulated; + + /* Open the file summary db on first use. */ + if (!openFileSummaryDatabase()) { + /* Db does not exist and this is a read-only environment. */ + return false; + } + + final Long[] existingFiles = env.getFileManager().getAllFileNumbers(); + final Set reservedFileRecords = new HashSet<>(); + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final FileProtector fileProtector = env.getFileProtector(); + + /* + * For each reserved file record in the recovery interval, ensure that + * the MapLN metadata is updated. + */ + final boolean forceCheckpoint = removeDbMetadata( + recoveryReservedFiles, recoveryReservedFileDbs); + + /* + * Open the reserved file db on first use. This may return false in a + * read-only old-version env where the file summary db exists but the + * reserved file db does not. + */ + if (openReservedFilesDatabase()) { + + /* + * First read through the reserved file db and add reserved + * files to the file protector. It is possible that a file does + * not exist for a record in the db, and again we must clean up + * as described in method comments. + */ + final Locker locker = + BasicLocker.createBasicLocker(env, false /*noWait*/); + try { + final ReadOptions options = + new ReadOptions().setLockMode(LockMode.READ_UNCOMMITTED); + + try (final Cursor cursor = DbInternal.makeCursor( + reservedFilesDb, locker, null, + false /*retainNonTxnLocks*/)) { + + while (cursor.get( + keyEntry, dataEntry, Get.NEXT, options) != null) { + + counter.incNumRead(); + env.daemonEviction(false /*backgroundIO*/); + + final Long file = + ReservedFileInfo.entryToKey(keyEntry); + + reservedFileRecords.add(file); + + final ReservedFileInfo info = + ReservedFileInfo.entryToObject(dataEntry); + + if (Arrays.binarySearch(existingFiles, file) >= 0) { + + counter.incNumProcessed(); + fileProtector.reserveFile(file, info.lastVLSN); + + } else { + counter.incNumDeleted(); + + if (!info.lastVLSN.isNull()) { + recoveryInfo.lastMissingFileNumber = file; + recoveryInfo.lastMissingFileVLSN = + info.lastVLSN; + } + + if (env.isReadOnly()) { + continue; + } + + cursor.delete(); + } + } + } + } finally { + locker.operationEnd(); + } + } + + final int oldMemorySize = fileSummaryMap.size() * + MemoryBudget.UTILIZATION_PROFILE_ENTRY; + + /* + * Next read through the file summary db and populate the profile. + * As above, it is possible that a file does not exist for a record in + * the db, and again we must clean up as described in method comments. + */ + Locker locker = null; + CursorImpl cursor = null; + try { + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + + cursor = new CursorImpl( + fileSummaryDb, locker, false /*retainNonTxnLocks*/, + false /*isSecondaryCursor*/); + + /* Perform eviction in unsynchronized methods. */ + cursor.setAllowEviction(true); + + if (cursor.positionFirstOrLast(true)) { + + /* Retrieve the first record. */ + OperationResult result = cursor.lockAndGetCurrent( + keyEntry, dataEntry, LockType.NONE, + false /*dirtyReadAll*/, + true /*isLatched*/, true /*unlatch*/); + + if (result == null) { + /* The record we're pointing at may be deleted. */ + result = cursor.getNext( + keyEntry, dataEntry, LockType.NONE, + false /*dirtyReadAll*/, true /*forward*/, + false /*isLatched*/, null /*rangeConstraint*/); + } + + while (result != null) { + counter.incNumRead(); + + /* + * Perform eviction once per operation. Pass false for + * backgroundIO because this is done during recovery and + * there is no reason to sleep. + */ + env.daemonEviction(false /*backgroundIO*/); + + final FileSummaryLN ln = (FileSummaryLN) + cursor.lockAndGetCurrentLN(LockType.NONE); + + if (ln == null) { + /* Advance past a cleaned record. */ + result = cursor.getNext( + keyEntry, dataEntry, LockType.NONE, + false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, + null /*rangeConstraint*/); + continue; + } + + final byte[] keyBytes = keyEntry.getData(); + final boolean isOldVersion = + FileSummaryLN.hasStringKey(keyBytes); + final long fileNum = FileSummaryLN.getFileNumber(keyBytes); + final Long fileNumLong = fileNum; + + if (!fileProtector.isReservedFile(fileNumLong) && + Arrays.binarySearch(existingFiles, fileNumLong) >= 0) { + + counter.incNumProcessed(); + + /* File is active, cache the FileSummaryLN. */ + final FileSummary summary = ln.getBaseSummary(); + fileSummaryMap.put(fileNumLong, summary); + + /* + * Update old version records to the new version. A + * zero sequence number is used to distinguish the + * converted records and to ensure that later records + * will have a greater sequence number. + */ + if (isOldVersion && !env.isReadOnly()) { + insertFileSummary(ln, fileNum, 0); + cursor.deleteCurrentRecord( + ReplicationContext.NO_REPLICATE); + } else { + /* Always evict after using a file summary LN. */ + cursor.evictLN(); + } + } else { + + /* + * File does not exist or is a reserved file. Remove + * the summary from the map and delete all + * FileSummaryLN records. If the file has a reserved + * file record (even if it was deleted above) then we + * can rely on the reserved file record mechanism to + * update MapLNs (per-db metadata); otherwise we must + * update the MapLNs here. + */ + counter.incNumDeleted(); + + fileSummaryMap.remove(fileNumLong); + + if (!env.isReadOnly()) { + if (!reservedFileRecords.contains(fileNumLong)) { + removeAndFlushDbMetadata(fileNumLong); + } + if (isOldVersion) { + cursor.deleteCurrentRecord( + ReplicationContext.NO_REPLICATE); + } else { + deleteFileSummary(fileNumLong); + } + } + + /* + * Do not evict after deleting since the compressor + * would have to fetch it again. + */ + } + + /* Go on to the next entry. */ + if (isOldVersion) { + + /* Advance past the single old version record. */ + result = cursor.getNext( + keyEntry, dataEntry, LockType.NONE, + false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, + null /*rangeConstraint*/); + } else { + + /* + * Skip over other records for this file by adding one + * to the file number and doing a range search. + */ + if (!getFirstFSLN + (cursor, + fileNum + 1, + keyEntry, dataEntry, + LockType.NONE)) { + result = null; + } + } + } + } + } finally { + if (cursor != null) { + /* positionFirstOrLast may leave BIN latched. */ + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + + final int newMemorySize = fileSummaryMap.size() * + MemoryBudget.UTILIZATION_PROFILE_ENTRY; + final MemoryBudget mb = env.getMemoryBudget(); + mb.updateAdminMemoryUsage(newMemorySize - oldMemorySize); + } + + cachePopulated = true; + return forceCheckpoint; + } + + /** + * Positions at the most recent LN for the given file number. + */ + private boolean getFirstFSLN(CursorImpl cursor, + long fileNum, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry, + LockType lockType) + throws DatabaseException { + + byte[] keyBytes = FileSummaryLN.makePartialKey(fileNum); + keyEntry.setData(keyBytes); + + cursor.reset(); + + try { + int result = cursor.searchRange(keyEntry, null /*comparator*/); + + if ((result & CursorImpl.FOUND) == 0) { + return false; + } + + boolean exactKeyMatch = ((result & CursorImpl.EXACT_KEY) != 0); + + if (exactKeyMatch && + cursor.lockAndGetCurrent( + keyEntry, dataEntry, lockType, false /*dirtyReadAll*/, + true /*isLatched*/, false /*unlatch*/) != null) { + return true; + } + } finally { + cursor.releaseBIN(); + } + + /* Always evict after using a file summary LN. */ + cursor.evictLN(); + + OperationResult result = cursor.getNext( + keyEntry, dataEntry, lockType, false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, null /*rangeConstraint*/); + + return result != null; + } + + /** + * If the reserved files db is already open, return, otherwise attempt to + * open it. If the environment is read-only and the database doesn't + * exist, return false. If the environment is read-write the database will + * be created if it doesn't exist. + */ + private boolean openReservedFilesDatabase() + throws DatabaseException { + + if (reservedFilesDb != null) { + return true; + } + + reservedFilesDb = + env.getDbTree().openNonRepInternalDB(DbType.RESERVED_FILES); + + return (reservedFilesDb != null); + } + + /** + * If the file summary db is already open, return, otherwise attempt to + * open it. If the environment is read-only and the database doesn't + * exist, return false. If the environment is read-write the database will + * be created if it doesn't exist. + */ + private boolean openFileSummaryDatabase() + throws DatabaseException { + + if (fileSummaryDb != null) { + return true; + } + + fileSummaryDb = + env.getDbTree().openNonRepInternalDB(DbType.UTILIZATION); + + return (fileSummaryDb != null); + } + + /** + * For unit testing. + */ + DatabaseImpl getFileSummaryDb() { + return fileSummaryDb; + } + + /** + * Insert the given LN with the given key values. This method is + * synchronized and may not perform eviction. + * + * Is public only for unit testing. + */ + synchronized boolean insertFileSummary( + FileSummaryLN ln, + long fileNum, + int sequence) + throws DatabaseException { + + byte[] keyBytes = FileSummaryLN.makeFullKey(fileNum, sequence); + + Locker locker = null; + CursorImpl cursor = null; + try { + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + cursor = new CursorImpl(fileSummaryDb, locker); + + /* Insert the LN. */ + boolean inserted = cursor.insertRecord( + keyBytes, ln, false /*blindInsertion*/, + ReplicationContext.NO_REPLICATE); + + if (!inserted) { + LoggerUtils.traceAndLog + (logger, env, Level.SEVERE, + "Cleaner duplicate key sequence file=0x" + + Long.toHexString(fileNum) + " sequence=0x" + + Long.toHexString(sequence)); + return false; + } + + /* Always evict after using a file summary LN. */ + cursor.evictLN(); + return true; + } finally { + if (cursor != null) { + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + } + } + + /** + * Checks that all FSLN offsets are indeed obsolete. Assumes that the + * system is quiesent (does not lock LNs). This method is not synchronized + * (because it doesn't access fileSummaryMap) and eviction is allowed. + * + * @return true if no verification failures. + */ + boolean verifyFileSummaryDatabase() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + openFileSummaryDatabase(); + Locker locker = null; + CursorImpl cursor = null; + boolean ok = true; + + try { + locker = BasicLocker.createBasicLocker(env, false /*noWait*/); + cursor = new CursorImpl(fileSummaryDb, locker); + cursor.setAllowEviction(true); + + if (cursor.positionFirstOrLast(true)) { + + OperationResult result = cursor.lockAndGetCurrent( + key, data, LockType.NONE, false /*dirtyReadAll*/, + true /*isLatched*/, true /*unlatch*/); + + /* Iterate over all file summary lns. */ + while (result != null) { + + /* Perform eviction once per operation. */ + env.daemonEviction(true /*backgroundIO*/); + + FileSummaryLN ln = (FileSummaryLN) + cursor.lockAndGetCurrentLN(LockType.NONE); + + if (ln != null) { + long fileNumVal = + FileSummaryLN.getFileNumber(key.getData()); + PackedOffsets offsets = ln.getObsoleteOffsets(); + + /* + * Check every offset in the fsln to make sure it's + * truly obsolete. + */ + if (offsets != null) { + long[] vals = offsets.toArray(); + for (long val : vals) { + long lsn = DbLsn.makeLsn(fileNumVal, val); + if (!verifyLsnIsObsolete(lsn)) { + ok = false; + } + } + } + + cursor.evictLN(); + } + + result = cursor.getNext( + key, data, LockType.NONE, false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, + null /*rangeConstraint*/); + } + } + } finally { + if (cursor != null) { + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + } + + return ok; + } + + /* + * Return true if the LN at this lsn is obsolete. + */ + private boolean verifyLsnIsObsolete(long lsn) + throws DatabaseException { + + /* Read the whole entry out of the log. */ + Object o = env.getLogManager().getLogEntryHandleFileNotFound(lsn); + if (!(o instanceof LNLogEntry)) { + return true; + } + LNLogEntry entry = (LNLogEntry) o; + + /* Find the owning database. */ + DatabaseId dbId = entry.getDbId(); + DatabaseImpl db = env.getDbTree().getDb(dbId); + + /* + * Search down to the bottom most level for the parent of this LN. + */ + BIN bin = null; + try { + /* + * The whole database is gone, so this LN is obsolete. No need + * to worry about delete cleanup; this is just verification and + * no cleaning is done. + */ + if (db == null || db.isDeleted()) { + return true; + } + + if (entry.isImmediatelyObsolete(db)) { + return true; + } + + entry.postFetchInit(db); + + Tree tree = db.getTree(); + TreeLocation location = new TreeLocation(); + boolean parentFound = tree.getParentBINForChildLN( + location, entry.getKey(), false /*splitsAllowed*/, + false /*blindDeltaOps*/, CacheMode.UNCHANGED); + + bin = location.bin; + int index = location.index; + + /* Is bin latched ? */ + if (!parentFound) { + return true; + } + + /* + * Now we're at the BIN parent for this LN. If knownDeleted, LN is + * deleted and can be purged. + */ + if (bin.isEntryKnownDeleted(index)) { + return true; + } + + if (bin.getLsn(index) != lsn) { + return true; + } + + /* Oh no -- this lsn is in the tree. */ + /* should print, or trace? */ + System.err.println("lsn " + DbLsn.getNoFormatString(lsn)+ + " was found in tree."); + return false; + } finally { + env.getDbTree().releaseDb(db); + if (bin != null) { + bin.releaseLatch(); + } + } + } + + /** + * Update memory budgets when this profile is closed and will never be + * accessed again. + */ + void close() { + clearCache(); + if (fileSummaryDb != null) { + fileSummaryDb.releaseTreeAdminMemory(); + } + if (reservedFilesDb != null) { + reservedFilesDb.releaseTreeAdminMemory(); + } + } +} diff --git a/src/com/sleepycat/je/cleaner/UtilizationTracker.java b/src/com/sleepycat/je/cleaner/UtilizationTracker.java new file mode 100644 index 0000000..34b7588 --- /dev/null +++ b/src/com/sleepycat/je/cleaner/UtilizationTracker.java @@ -0,0 +1,230 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; + +/** + * Tracks changes to the utilization profile since the last checkpoint. This + * is the "global" tracker for an environment that tracks changes as they + * occur in live operations. Other "local" tracker classes are used to count + * utilization locally and then later transfer the information to the global + * tracker, this tracker. + * + *

        All changes to this object occur must under the log write latch. It is + * possible to read tracked info without holding the latch. This is done by + * the cleaner when selecting a file and by the checkpointer when determining + * what FileSummaryLNs need to be written. To read tracked info outside the + * log write latch, call getTrackedFile or getTrackedFiles.

        + */ +public class UtilizationTracker extends BaseUtilizationTracker { + + /** + * Creates an empty tracker. The cleaner field of the environment object + * must be initialized before using this constructor. + */ + public UtilizationTracker(EnvironmentImpl env) { + super(env, env.getCleaner()); + } + + /** + * Constructor used by the cleaner constructor, prior to setting the + * cleaner field of the environment. + */ + UtilizationTracker(EnvironmentImpl env, Cleaner cleaner) { + super(env, cleaner); + } + + @Override + public EnvironmentImpl getEnvironment() { + return env; + } + + /** + * Evicts tracked detail if the budget for the tracker is exceeded. Evicts + * only one file summary LN at most to keep eviction batches small. + * Returns the number of bytes freed. + * + *

        When flushFileSummary is called, the TrackedFileSummary is cleared + * via its reset method, which is called by FileSummaryLN.writeToLog. This + * is how memory is subtracted from the budget.

        + */ + public long evictMemory() + throws DatabaseException { + + /* If not tracking detail, there is nothing to evict. */ + if (!cleaner.trackDetail) { + return 0; + } + + /* + * Do not start eviction until after recovery, since the + * UtilizationProfile will not be initialized properly. UP + * initialization requires that all LNs have been replayed. + */ + if (!env.isValid()) { + return 0; + } + + /* + * In a read-only env, we cannot free memory by flushing a + * FileSummaryLN. Normally utilization information is not accumulated + * in a read-only env, but this may ocur during recovery. + */ + if (env.isReadOnly()) { + return 0; + } + + MemoryBudget mb = env.getMemoryBudget(); + long totalEvicted = 0; + long totalBytes = 0; + int largestBytes = 0; + TrackedFileSummary bestFile = null; + final int ONE_MB = 1024 * 1024; + + for (TrackedFileSummary tfs : getTrackedFiles()) { + int mem = tfs.getMemorySize(); + if (mem >= ONE_MB) { + env.getUtilizationProfile().flushFileSummary(tfs); + totalEvicted += mem; + continue; + } + totalBytes += mem; + if (mem > largestBytes && tfs.getAllowFlush()) { + largestBytes = mem; + bestFile = tfs; + } + } + + if (bestFile != null && totalBytes > mb.getTrackerBudget()) { + env.getUtilizationProfile().flushFileSummary(bestFile); + totalEvicted += largestBytes; + } + return totalEvicted; + } + + /** + * Counts the addition of all new log entries including LNs. + * + *

        Must be called under the log write latch.

        + */ + public void countNewLogEntry(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countNew(lsn, db, type, size); + } + + /** + * Counts a node that has become obsolete and tracks the LSN offset, if + * non-zero, to avoid a lookup during cleaning. + * + *

        A zero LSN offset is used as a special value when obsolete offset + * tracking is not desired. [#15365] The file header entry (at offset + * zero) is never counted as obsolete, it is assumed to be obsolete by the + * cleaner.

        + * + *

        This method should only be called for LNs and INs (i.e, only for + * nodes). If type is null we assume it is an LN.

        + * + *

        Must be called under the log write latch.

        + */ + public void countObsoleteNode(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countObsolete + (lsn, db, type, size, + true, // countPerFile + true, // countPerDb + true, // trackOffset + true); // checkDupOffsets + } + + /** + * Counts as countObsoleteNode does, but since the LSN may be inexact, does + * not track the obsolete LSN offset. + * + *

        This method should only be called for LNs and INs (i.e, only for + * nodes). If type is null we assume it is an LN.

        + * + *

        Must be called under the log write latch.

        + */ + public void countObsoleteNodeInexact(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countObsolete + (lsn, db, type, size, + true, // countPerFile + true, // countPerDb + false, // trackOffset + false); // checkDupOffsets + } + + /** + * Counts as countObsoleteNode does, tracks the obsolete LSN offset, but + * does not fire an assert if the offset has already been counted. Use + * this method when the same LSN offset may be counted twice in certain + * circumstances. + * + *

        This method should only be called for LNs and INs (i.e, only for + * nodes). If type is null we assume it is an LN.

        + * + *

        Must be called under the log write latch.

        + */ + public void countObsoleteNodeDupsAllowed(long lsn, + LogEntryType type, + int size, + DatabaseImpl db) { + countObsolete + (lsn, db, type, size, + true, // countPerFile + true, // countPerDb + true, // trackOffset + false); // checkDupOffsets + } + + /** + * Returns a tracked summary for the given file which will not be flushed. + */ + public TrackedFileSummary getUnflushableTrackedSummary(long fileNum) { + TrackedFileSummary file = getFileSummary(fileNum); + file.setAllowFlush(false); + return file; + } + + /** + * Allocates DbFileSummary information in the DatabaseImpl, which is the + * database key. + * + *

        Must be called under the log write latch, and the returned object + * may only be accessed under the log write latch.

        + * + * @return the summary, or null if the DB should not be tracked because + * the file has been deleted, or null if the databaseKey param is null. + */ + DbFileSummary getDbFileSummary(Object databaseKey, long fileNum) { + DatabaseImpl db = (DatabaseImpl) databaseKey; + if (db != null) { + return db.getDbFileSummary(fileNum, true /*willModify*/); + } else { + return null; + } + } +} diff --git a/src/com/sleepycat/je/cleaner/VerifyUtils.java b/src/com/sleepycat/je/cleaner/VerifyUtils.java new file mode 100644 index 0000000..e708f0e --- /dev/null +++ b/src/com/sleepycat/je/cleaner/VerifyUtils.java @@ -0,0 +1,436 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.SortedLSNTreeWalker; +import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.UtilizationFileReader; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Verify cleaner data structures + */ +public class VerifyUtils { + + private static final boolean DEBUG = false; + + /** + * Compare the LSNs referenced by a given Database to the lsns held + * in the utilization profile. Assumes that the database and + * environment is quiescent, and that there is no current cleaner + * activity. + */ + public static void checkLsns(Database db) + throws DatabaseException { + + checkLsns(DbInternal.getDbImpl(db), System.out); + } + + /** + * Compare the lsns referenced by a given Database to the lsns held + * in the utilization profile. Assumes that the database and + * environment is quiescent, and that there is no current cleaner + * activity. + */ + public static void checkLsns(DatabaseImpl dbImpl, + PrintStream out) + throws DatabaseException { + + /* Get all the LSNs in the database. */ + GatherLSNs gatherLsns = new GatherLSNs(); + long rootLsn = dbImpl.getTree().getRootLsn(); + List savedExceptions = + new ArrayList(); + + SortedLSNTreeWalker walker = + new SortedLSNTreeWalker(new DatabaseImpl[] { dbImpl }, + false /*setDbState*/, + new long[] { rootLsn }, + gatherLsns, savedExceptions, null); + walker.walk(); + + /* Print out any exceptions seen during the walk. */ + if (savedExceptions.size() > 0) { + out.println(savedExceptions.size() + + " problems seen during tree walk for checkLsns"); + Iterator iter = savedExceptions.iterator(); + while (iter.hasNext()) { + out.println(" " + iter.next()); + } + } + + Set lsnsInTree = gatherLsns.getLsns(); + if (rootLsn != DbLsn.NULL_LSN) { + lsnsInTree.add(rootLsn); + } + + /* Get all the files used by this database. */ + Iterator iter = lsnsInTree.iterator(); + Set fileNums = new HashSet(); + + while (iter.hasNext()) { + long lsn = iter.next(); + fileNums.add(DbLsn.getFileNumber(lsn)); + } + + /* Gather up the obsolete LSNs in these file summary LNs. */ + iter = fileNums.iterator(); + Set obsoleteLsns = new HashSet(); + EnvironmentImpl envImpl = dbImpl.getEnv(); + UtilizationProfile profile = envImpl.getUtilizationProfile(); + + while (iter.hasNext()) { + Long fileNum = iter.next(); + + PackedOffsets obsoleteOffsets = + profile.getObsoleteDetailPacked(fileNum, false /*logUpdate*/); + PackedOffsets.Iterator obsoleteIter = obsoleteOffsets.iterator(); + while (obsoleteIter.hasNext()) { + long offset = obsoleteIter.next(); + Long oneLsn = Long.valueOf(DbLsn.makeLsn(fileNum.longValue(), + offset)); + obsoleteLsns.add(oneLsn); + if (DEBUG) { + out.println("Adding 0x" + + Long.toHexString(oneLsn.longValue())); + } + } + } + + /* Check than none the LSNs in the tree is in the UP. */ + boolean error = false; + iter = lsnsInTree.iterator(); + while (iter.hasNext()) { + Long lsn = iter.next(); + if (obsoleteLsns.contains(lsn)) { + out.println("Obsolete LSN set contains valid LSN " + + DbLsn.getNoFormatString(lsn.longValue())); + error = true; + } + } + + /* + * Check that none of the LSNs in the file summary LN is in the + * tree. + */ + iter = obsoleteLsns.iterator(); + while (iter.hasNext()) { + Long lsn = iter.next(); + if (lsnsInTree.contains(lsn)) { + out.println("Tree contains obsolete LSN " + + DbLsn.getNoFormatString(lsn.longValue())); + error = true; + } + } + + if (error) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "Lsn mismatch"); + } + + if (savedExceptions.size() > 0) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "Sorted LSN Walk problem"); + } + } + + private static class GatherLSNs implements TreeNodeProcessor { + private final Set lsns = new HashSet(); + + @Override + public void processLSN(long childLSN, + LogEntryType childType, + Node ignore, + byte[] ignore2, + int ignore3) { + if (childLSN != DbLsn.NULL_LSN) { + lsns.add(childLSN); + } + } + + /* ignore */ + @Override + public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey) { + } + + public Set getLsns() { + return lsns; + } + + @Override + public void noteMemoryExceeded() { + } + } + + /** + * Compare utilization as calculated by UtilizationProfile to utilization + * as calculated by UtilizationFileReader. Also check that per-database + * and per-file utilization match. + * + * @throws EnvironmentFailureException if there are mismatches + */ + public static void verifyUtilization(EnvironmentImpl envImpl, + boolean expectAccurateObsoleteLNCount, + boolean expectAccurateObsoleteLNSize, + boolean expectAccurateDbUtilization) + throws DatabaseException { + + Map profileMap = envImpl.getCleaner() + .getUtilizationProfile() + .getFileSummaryMap(true); + + /* Flush the log before reading. */ + envImpl.getLogManager().flushNoSync(); + + /* Create per-file map of recalculated utilization info. */ + Map recalcMap = + UtilizationFileReader.calcFileSummaryMap(envImpl); + /* Create per-file map derived from per-database utilization. */ + Map dbDerivedMap = null; + if (expectAccurateDbUtilization) { + dbDerivedMap = calcDbDerivedUtilization(envImpl); + } + + /* + * Loop through each file in the per-file profile, checking it against + * the recalculated map and database derived maps. + */ + Iterator> i = + profileMap.entrySet().iterator(); + while (i.hasNext()) { + Map.Entry entry = i.next(); + Long file = entry.getKey(); + String fileStr = file.toString(); + FileSummary profileSummary = entry.getValue(); + FileSummary recalcSummary = recalcMap.remove(file); + check(fileStr, recalcSummary != null); + /* + if (expectAccurateObsoleteLNCount && + expectAccurateObsoleteLNSize && + profileSummary.obsoleteLNSize != + recalcSummary.getObsoleteLNSize()) { + System.out.println("file=" + file); + System.out.println("profile=" + profileSummary); + System.out.println("recalc=" + recalcSummary); + } + //*/ + /* + if (expectAccurateObsoleteLNCount && + profileSummary.obsoleteLNCount != + recalcSummary.obsoleteLNCount) { + System.out.println("file=" + file); + System.out.println("profile=" + profileSummary); + System.out.println("recalc=" + recalcSummary); + } + //*/ + /* + if (recalcSummary.totalCount != + profileSummary.totalCount) { + System.out.println("file=" + file); + System.out.println("profile=" + profileSummary); + System.out.println("recalc=" + recalcSummary); + } + //*/ + check(fileStr, + recalcSummary.totalCount == profileSummary.totalCount); + check(fileStr, + recalcSummary.totalSize == profileSummary.totalSize); + check(fileStr, + recalcSummary.totalINCount == profileSummary.totalINCount); + check(fileStr, + recalcSummary.totalINSize == profileSummary.totalINSize); + check(fileStr, + recalcSummary.totalLNCount == profileSummary.totalLNCount); + check(fileStr, + recalcSummary.totalLNSize == profileSummary.totalLNSize); + + /* + * Currently we cannot verify obsolete INs because + * UtilizationFileReader does not count them accurately. + */ + if (false) { + check(fileStr, + recalcSummary.obsoleteINCount == + profileSummary.obsoleteINCount); + } + + /* + * The obsolete LN count/size is inaccurate when a deleted LN is + * not counted properly by recovery because its parent INs were + * flushed and the obsolete LN was not found in the tree. + */ + if (expectAccurateObsoleteLNCount) { + check(fileStr, + recalcSummary.obsoleteLNCount == + profileSummary.obsoleteLNCount); + + /* + * The obsoletely LN size is inaccurate when a tree walk is + * performed for truncate/remove or an abortLsn is counted by + * recovery + */ + if (expectAccurateObsoleteLNSize) { + check(fileStr, + recalcSummary.getObsoleteLNSize() == + profileSummary.obsoleteLNSize); + } + } + + /* + * The per-database and per-file info normally match. It does not + * match, and expectAccurateDbUtilization is false, when we have + * truncated or removed a database, since that database information + * is now gone. + */ + if (expectAccurateDbUtilization) { + DbFileSummary dbSummary = dbDerivedMap.remove(file); + if (dbSummary == null) { + dbSummary = new DbFileSummary(); + } + check(fileStr, + profileSummary.totalINCount == dbSummary.totalINCount); + check(fileStr, + profileSummary.totalLNCount == dbSummary.totalLNCount); + check(fileStr, + profileSummary.totalINSize == dbSummary.totalINSize); + check(fileStr, + profileSummary.totalLNSize == dbSummary.totalLNSize); + + /* + * Currently we cannot verify obsolete INs because + * UtilizationFileReader does not count them accurately. + */ + if (false) { + check(fileStr, + profileSummary.obsoleteINCount == + dbSummary.obsoleteINCount); + } + if (expectAccurateObsoleteLNCount) { + check(fileStr, + profileSummary.obsoleteLNCount == + dbSummary.obsoleteLNCount); + if (expectAccurateObsoleteLNSize) { + check(fileStr, + profileSummary.obsoleteLNSize == + dbSummary.obsoleteLNSize); + check(fileStr, + profileSummary.obsoleteLNSizeCounted == + dbSummary.obsoleteLNSizeCounted); + } + } + } + } + check(recalcMap.toString(), recalcMap.isEmpty()); + if (expectAccurateDbUtilization) { + check(dbDerivedMap.toString(), dbDerivedMap.isEmpty()); + } + } + + private static void check(String errorMessage, boolean checkIsTrue) { + if (!checkIsTrue) { + throw EnvironmentFailureException.unexpectedState(errorMessage); + } + } + + /** + * Adds up the per-file totals from the utilization information for each + * database to make a total per-file count. + * + * @return aggregation of per-file information. + */ + private static Map calcDbDerivedUtilization + (EnvironmentImpl envImpl) + throws DatabaseException { + + final Map grandTotalsMap = + new HashMap(); + + DbTree dbTree = envImpl.getDbTree(); + + /* Add in the special id and name database. */ + addDbDerivedTotals(dbTree.getDb(DbTree.ID_DB_ID), grandTotalsMap); + addDbDerivedTotals(dbTree.getDb(DbTree.NAME_DB_ID), grandTotalsMap); + + /* Walk through all the regular databases. */ + CursorImpl.traverseDbWithCursor(dbTree.getDb(DbTree.ID_DB_ID), + LockType.NONE, + true /*allowEviction*/, + new CursorImpl.WithCursor() { + @Override + public boolean withCursor(CursorImpl cursor, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + MapLN mapLN = (MapLN) + cursor.lockAndGetCurrentLN(LockType.NONE); + + addDbDerivedTotals(mapLN.getDatabase(), grandTotalsMap); + return true; + } + }); + return grandTotalsMap; + } + + /** + * Walk through the DbFileSummaryMap associated with a single database and + * aggregate all the per-file/per db information into a single per-file + * grandTotals map. + */ + private static void addDbDerivedTotals + (DatabaseImpl dbImpl, + Map grandTotalsMap) { + + Iterator> entries = + dbImpl.getDbFileSummaries().entrySet().iterator(); + + while (entries.hasNext()) { + Map.Entry entry = entries.next(); + Long fileNum = entry.getKey(); + DbFileSummary dbTotals = entry.getValue(); + DbFileSummary grandTotals = grandTotalsMap.get(fileNum); + if (grandTotals == null) { + grandTotals = new DbFileSummary(); + grandTotalsMap.put(fileNum, grandTotals); + } + grandTotals.add(dbTotals); + } + } +} diff --git a/src/com/sleepycat/je/cleaner/package-info.java b/src/com/sleepycat/je/cleaner/package-info.java new file mode 100644 index 0000000..2c2302e --- /dev/null +++ b/src/com/sleepycat/je/cleaner/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: disk garbage collection. + */ +package com.sleepycat.je.cleaner; diff --git a/src/com/sleepycat/je/config/BooleanConfigParam.java b/src/com/sleepycat/je/config/BooleanConfigParam.java new file mode 100644 index 0000000..459170a --- /dev/null +++ b/src/com/sleepycat/je/config/BooleanConfigParam.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +/** + * A JE configuration parameter with an boolean value. + */ +public class BooleanConfigParam extends ConfigParam { + + private static final String DEBUG_NAME = + BooleanConfigParam.class.getName(); + + /** + * Set a boolean parameter w/default. + * @param configName + * @param defaultValue + * @param forReplication true if param is for replication + */ + public BooleanConfigParam(String configName, + boolean defaultValue, + boolean mutable, + boolean forReplication) { + /* defaultValue must not be null. */ + super(configName, + Boolean.valueOf(defaultValue).toString(), + mutable, + forReplication); + } + + /** + * Make sure that value is a valid string for booleans. + */ + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + if (!value.trim().equalsIgnoreCase(Boolean.FALSE.toString()) && + !value.trim().equalsIgnoreCase(Boolean.TRUE.toString())) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + value + " not valid boolean " + name); + } + } +} diff --git a/src/com/sleepycat/je/config/ConfigParam.java b/src/com/sleepycat/je/config/ConfigParam.java new file mode 100644 index 0000000..f4cba68 --- /dev/null +++ b/src/com/sleepycat/je/config/ConfigParam.java @@ -0,0 +1,147 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * A ConfigParam embodies the metadata about a JE configuration parameter: + * the parameter name, default value, and a validation method. + * + * Validation can be done in the scope of this parameter, or as a function of + * other parameters. + */ +public class ConfigParam { + + protected String name; + private String defaultValue; + private boolean mutable; + private boolean forReplication; + private boolean isMultiValueParam; + + /* + * Create a String parameter. + */ + public ConfigParam(String configName, + String configDefault, + boolean mutable, + boolean forReplication) + throws IllegalArgumentException { + + if (configName == null) { + name = null; + } else { + + /* + * For Multi-Value params (i.e. those whose names end with ".#"), + * strip the .# off the end of the name before storing and flag it + * with isMultiValueParam=true. + */ + int mvFlagIdx = configName.indexOf(".#"); + if (mvFlagIdx < 0) { + name = configName; + isMultiValueParam = false; + } else { + name = configName.substring(0, mvFlagIdx); + isMultiValueParam = true; + } + } + + defaultValue = configDefault; + this.mutable = mutable; + this.forReplication = forReplication; + + /* Check that the name and default value are valid */ + validateName(name); + validateValue(configDefault); + + /* Add it the list of supported environment parameters. */ + EnvironmentParams.addSupportedParam(this); + } + + /* + * Return the parameter name of a multi-value parameter. e.g. + * "je.rep.remote.address.foo" => "je.rep.remote.address" + */ + public static String multiValueParamName(String paramName) { + int mvParamIdx = paramName.lastIndexOf('.'); + if (mvParamIdx < 0) { + return null; + } + return paramName.substring(0, mvParamIdx); + } + + /* + * Return the label of a multi-value parameter. e.g. + * "je.rep.remote.address.foo" => foo. + */ + public static String mvParamIndex(String paramName) { + + int mvParamIdx = paramName.lastIndexOf('.'); + return paramName.substring(mvParamIdx + 1); + } + + public String getName() { + return name; + } + + public String getDefault() { + return defaultValue; + } + + public boolean isMutable() { + return mutable; + } + + public boolean isForReplication() { + return forReplication; + } + + public void setForReplication(boolean forReplication) { + this.forReplication = forReplication; + } + + public boolean isMultiValueParam() { + return isMultiValueParam; + } + + /** + * A param name can't be null or 0 length + */ + private void validateName(String name) + throws IllegalArgumentException { + + if ((name == null) || (name.length() < 1)) { + throw EnvironmentFailureException.unexpectedState + ("A configuration parameter name can't be null or 0 length"); + } + } + + /** + * Validate your value. (No default validation for strings.) + * May be overridden for (e.g.) Multi-value params. + * + * @throws IllegalArgumentException via XxxConfig.setXxx methods and + * XxxConfig(Properties) ctor. + */ + public void validateValue(String value) + throws IllegalArgumentException { + + } + + @Override + public String toString() { + return name; + } +} diff --git a/src/com/sleepycat/je/config/DurationConfigParam.java b/src/com/sleepycat/je/config/DurationConfigParam.java new file mode 100644 index 0000000..c261340 --- /dev/null +++ b/src/com/sleepycat/je/config/DurationConfigParam.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +import com.sleepycat.je.utilint.PropUtil; + +/** + * A JE configuration parameter with a duration integer value in milliseconds. + * The String format is described under Time Duration Properties in the + * EnvironmentConfig javadoc. + */ +public class DurationConfigParam extends ConfigParam { + + private static final String DEBUG_NAME = + DurationConfigParam.class.getName(); + + private String minString; + private int minMillis; + private String maxString; + private int maxMillis; + + public DurationConfigParam(String configName, + String minVal, + String maxVal, + String defaultValue, + boolean mutable, + boolean forReplication) { + super(configName, defaultValue, mutable, forReplication); + if (minVal != null) { + minString = minVal; + minMillis = PropUtil.parseDuration(minVal); + } + if (maxVal != null) { + maxString = maxVal; + maxMillis = PropUtil.parseDuration(maxVal); + } + } + + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + final int millis; + try { + /* Parse for validation side-effects. */ + millis = PropUtil.parseDuration(value); + } catch (IllegalArgumentException e) { + /* Identify this property in the exception message. */ + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " fails validation: " + e.getMessage()); + } + /* Check min/max. */ + if (minString != null) { + if (millis < minMillis) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is less than min of "+ + minString); + } + } + if (maxString != null) { + if (millis > maxMillis) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is greater than max of " + + maxString); + } + } + } +} diff --git a/src/com/sleepycat/je/config/EnvironmentParams.java b/src/com/sleepycat/je/config/EnvironmentParams.java new file mode 100644 index 0000000..f8d96e7 --- /dev/null +++ b/src/com/sleepycat/je/config/EnvironmentParams.java @@ -0,0 +1,1833 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; + +/** + */ +public class EnvironmentParams { + + /* The prefix for all JE replication parameters. */ + public static final String REP_PARAM_PREFIX = "je.rep."; + + /* + * The map of supported environment parameters where the key is parameter + * name and the data is the configuration parameter object. Put first, + * before any declarations of ConfigParams. + */ + public final static Map SUPPORTED_PARAMS = + new HashMap(); + + /* + * Only environment parameters that are part of the public API are + * represented by String constants in EnvironmentConfig. + */ + public static final LongConfigParam MAX_MEMORY = + new LongConfigParam(EnvironmentConfig.MAX_MEMORY, + null, // min + null, // max + 0L, // default uses je.maxMemoryPercent + true, // mutable + false); // forReplication + + public static final IntConfigParam MAX_MEMORY_PERCENT = + new IntConfigParam(EnvironmentConfig.MAX_MEMORY_PERCENT, + 1, // min + 90, // max + 60, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_SHARED_CACHE = + new BooleanConfigParam(EnvironmentConfig.SHARED_CACHE, + false, // default + false, // mutable + false); // forReplication + + public static final LongConfigParam MAX_DISK = + new LongConfigParam(EnvironmentConfig.MAX_DISK, + 0L, // min + null, // max + 0L, // default + true, // mutable + false); // forReplication + + public static final LongConfigParam FREE_DISK = + new LongConfigParam(EnvironmentConfig.FREE_DISK, + 0L, // min + null, // max + 5368709120L, // default + true, // mutable + false); // forReplication + + /** + * Used by utilities, not exposed in the API. + * + * If true, even when recovery is not run (see ENV_RECOVERY) by a utility, + * the btree and dup comparators will be instantiated. Set to true by + * utilities such as DbScavenger that need comparators in spite of not + * needing recovery. + */ + public static final BooleanConfigParam ENV_COMPARATORS_REQUIRED = + new BooleanConfigParam("je.env.comparatorsRequired", + false, // default + false, // mutable + false); // forReplication + + /** + * Used by utilities, not exposed in the API. + * + * If true, an environment is created with recovery and the related daemon + * threads are enabled. + */ + public static final BooleanConfigParam ENV_RECOVERY = + new BooleanConfigParam("je.env.recovery", + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RECOVERY_FORCE_CHECKPOINT = + new BooleanConfigParam(EnvironmentConfig.ENV_RECOVERY_FORCE_CHECKPOINT, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RECOVERY_FORCE_NEW_FILE = + new BooleanConfigParam(EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam + HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION = + new BooleanConfigParam( + EnvironmentConfig.HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RUN_INCOMPRESSOR = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + true, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RUN_EVICTOR = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + true, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_DUP_CONVERT_PRELOAD_ALL = + new BooleanConfigParam(EnvironmentConfig.ENV_DUP_CONVERT_PRELOAD_ALL, + true, // default + false, // mutable + false); // forReplication + + /** + * @deprecated as of JE 4.1 + */ + private static final DurationConfigParam EVICTOR_WAKEUP_INTERVAL = + new DurationConfigParam("je.evictor.wakeupInterval", + "1 s", // min + "75 min", // max + "5 s", // default + false, // mutable + false); + + public static final IntConfigParam EVICTOR_CORE_THREADS = + new IntConfigParam(EnvironmentConfig.EVICTOR_CORE_THREADS, + 0, // min + Integer.MAX_VALUE, // max + 1, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam EVICTOR_MAX_THREADS = + new IntConfigParam(EnvironmentConfig.EVICTOR_MAX_THREADS, + 1, // min + Integer.MAX_VALUE, // max + 10, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam EVICTOR_KEEP_ALIVE = + new DurationConfigParam(EnvironmentConfig.EVICTOR_KEEP_ALIVE, + "1 s", // min + "24 h", // max + "10 min", // default + true, // mutable + false); // forReplication + + /** + * The amount of time to wait for the eviction pool to terminate, in order + * to create a clean shutdown. An intentionally unadvertised parameter, of + * use mainly for unit test cleanup. + */ + public static final DurationConfigParam EVICTOR_TERMINATE_TIMEOUT = + new DurationConfigParam("je.env.terminateTimeout", + "1 ms", // min + "60 s", // max + "10 s", // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam EVICTOR_ALLOW_BIN_DELTAS = + new BooleanConfigParam(EnvironmentConfig.EVICTOR_ALLOW_BIN_DELTAS, + true, // default + false, // mutable + false); // forReplication + + /* + * Not exposed in the API because we expect that BIN mutation will + * always be beneficial. Intended only for debugging and testing. + */ + public static final BooleanConfigParam EVICTOR_MUTATE_BINS = + new BooleanConfigParam("je.evictor.mutateBins", + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RUN_CHECKPOINTER = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + true, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RUN_CLEANER = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + true, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam ENV_BACKGROUND_READ_LIMIT = + new IntConfigParam(EnvironmentConfig.ENV_BACKGROUND_READ_LIMIT, + 0, // min + Integer.MAX_VALUE, // max + 0, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam ENV_BACKGROUND_WRITE_LIMIT = + new IntConfigParam(EnvironmentConfig.ENV_BACKGROUND_WRITE_LIMIT, + 0, // min + Integer.MAX_VALUE, // max + 0, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam ENV_BACKGROUND_SLEEP_INTERVAL = + new DurationConfigParam( + EnvironmentConfig.ENV_BACKGROUND_SLEEP_INTERVAL, + "1 ms", // min + null, // max + "1 ms", // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_CHECK_LEAKS = + new BooleanConfigParam(EnvironmentConfig.ENV_CHECK_LEAKS, + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_FORCED_YIELD = + new BooleanConfigParam(EnvironmentConfig.ENV_FORCED_YIELD, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_INIT_TXN = + new BooleanConfigParam(EnvironmentConfig.ENV_IS_TRANSACTIONAL, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_INIT_LOCKING = + new BooleanConfigParam(EnvironmentConfig.ENV_IS_LOCKING, + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_RDONLY = + new BooleanConfigParam(EnvironmentConfig.ENV_READ_ONLY, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_FAIR_LATCHES = + new BooleanConfigParam(EnvironmentConfig.ENV_FAIR_LATCHES, + false, // default + false, // mutable + false); // forReplication + + /** + * Not part of the public API. As of 3.3, is true by default. As of 6.0, + * it is no longer used (and latches are always shared when possible). + * The param is left in place just to avoid errors from config settings. + */ + private static final BooleanConfigParam ENV_SHARED_LATCHES = + new BooleanConfigParam("je.env.sharedLatches", + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_SETUP_LOGGER = + new BooleanConfigParam("je.env.setupLogger", + false, // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam ENV_LATCH_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.ENV_LATCH_TIMEOUT, + "1 ms", // min + null, // max + "5 min", // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam ENV_TTL_CLOCK_TOLERANCE = + new DurationConfigParam(EnvironmentConfig.ENV_TTL_CLOCK_TOLERANCE, + "1 ms", // min + null, // max + "2 h", // default + false, // mutable + false); // forReplication + + /** + * Hidden (for now) parameter to control the assumed maximum length that a + * lock may be held. It is used to determine when a record might expire + * during a transaction, so we can avoid extra locking or checking for + * locks when a record should not expire during the current transaction. + */ + public static final DurationConfigParam ENV_TTL_MAX_TXN_TIME = + new DurationConfigParam("je.env.ttlMaxTxnTime", + null, // min + null, // max + "24 h", // default + false, // mutable + false); // forReplication + + /** + * Hidden (for now) parameter to determine the amount added to the + * expirationTime of a record to determine when to purge it, in the + * cleaner. The goal is to ensure (disregarding clock changes) that when a + * record is locked, its LN will not be purged. We lock the record before + * fetching the LN, so delaying the purge of the LN a little should prevent + * purging a locked LN due to thread scheduling issues. + */ + public static final DurationConfigParam ENV_TTL_LN_PURGE_DELAY = + new DurationConfigParam("je.env.ttlLnPurgeDelay", + null, // min + null, // max + "5 s", // default + false, // mutable + false); // forReplication + + /** + * Hidden (for now) parameter to allow user key/data values to be included + * in exception messages, log messages, etc. For example, when this is set + * to true, the SecondaryReferenceException message will include the + * primary key and secondary key. + */ + public static final BooleanConfigParam ENV_EXPOSE_USER_DATA = + new BooleanConfigParam("je.env.exposeUserData", + false, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_DB_EVICTION = + new BooleanConfigParam(EnvironmentConfig.ENV_DB_EVICTION, + true, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam ADLER32_CHUNK_SIZE = + new IntConfigParam(EnvironmentConfig.ADLER32_CHUNK_SIZE, + 0, // min + 1 << 20, // max + 0, // default + true, // mutable + false); // forReplication + + /* + * Database Logs + */ + /* default: 2k * NUM_LOG_BUFFERS */ + public static final int MIN_LOG_BUFFER_SIZE = 2048; + public static final int NUM_LOG_BUFFERS_DEFAULT = 3; + public static final long LOG_MEM_SIZE_MIN = + NUM_LOG_BUFFERS_DEFAULT * MIN_LOG_BUFFER_SIZE; + public static final String LOG_MEM_SIZE_MIN_STRING = + Long.toString(LOG_MEM_SIZE_MIN); + + public static final LongConfigParam LOG_MEM_SIZE = + new LongConfigParam(EnvironmentConfig.LOG_TOTAL_BUFFER_BYTES, + LOG_MEM_SIZE_MIN, // min + null, // max + 0L, // by default computed + // from je.maxMemory + false, // mutable + false); // forReplication + + public static final IntConfigParam NUM_LOG_BUFFERS = + new IntConfigParam(EnvironmentConfig.LOG_NUM_BUFFERS, + 2, // min + null, // max + NUM_LOG_BUFFERS_DEFAULT, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_BUFFER_MAX_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_BUFFER_SIZE, + 1 << 10, // min + null, // max + 1 << 20, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_FAULT_READ_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_FAULT_READ_SIZE, + 32, // min + null, // max + 2048, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_ITERATOR_READ_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_ITERATOR_READ_SIZE, + 128, // min + null, // max + 8192, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_ITERATOR_MAX_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_ITERATOR_MAX_SIZE, + 128, // min + null, // max + 16777216, // default + false, // mutable + false); // forReplication + + public static final LongConfigParam LOG_FILE_MAX = + new LongConfigParam(EnvironmentConfig.LOG_FILE_MAX, + 1000000L, // min + 1073741824L, // max + 10000000L, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_N_DATA_DIRECTORIES = + new IntConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + 0, // min + 256, // max + 0, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOG_CHECKSUM_READ = + new BooleanConfigParam(EnvironmentConfig.LOG_CHECKSUM_READ, + true, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOG_VERIFY_CHECKSUMS = + new BooleanConfigParam(EnvironmentConfig.LOG_VERIFY_CHECKSUMS, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOG_MEMORY_ONLY = + new BooleanConfigParam(EnvironmentConfig.LOG_MEM_ONLY, + false, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_FILE_CACHE_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_FILE_CACHE_SIZE, + 3, // min + null, // max + 100, // default + false, // mutable + false); // forReplication + + /** + * This is experimental and pending performance tests. Javadoc and change + * log are commented out below, and can be used if we decide to use this. + */ + public static final IntConfigParam LOG_FILE_WARM_UP_SIZE = + new IntConfigParam("je.log.fileWarmUpSize", + 0, // min + null, // max + 0, // default + false, // mutable + false); // forReplication + + /** + * This is experimental and pending performance tests. Javadoc and change + * log are commented out below, and can be used if we decide to use this. + */ + public static final IntConfigParam LOG_FILE_WARM_UP_BUF_SIZE = + new IntConfigParam("je.log.fileWarmUpReadSize", + 128, // min + null, // max + 10485760, // default + false, // mutable + false); // forReplication + + /* + * Whether detect unexpected log file deletion. + */ + public static final BooleanConfigParam LOG_DETECT_FILE_DELETE = + new BooleanConfigParam(EnvironmentConfig.LOG_DETECT_FILE_DELETE, + true, // default + false, // mutable + false); // forReplication + + /* + * The interval used to check for unexpected file deletions. + */ + public static final DurationConfigParam LOG_DETECT_FILE_DELETE_INTERVAL = + new DurationConfigParam("je.log.detectFileDeleteInterval", + "1 ms", // min + null, // max + "1000 ms", // default + false, // mutable + false); // forReplication + + /** + * The size in MiB to be read sequentially at the end of the log in order + * to warm the file system cache. + *

        + * Making use of sequential reads to warm the file system cache has the + * benefit of reducing random reads caused by CRUD operations, and thereby + * increasing throughput and latency for these operations. This is + * especially true during the initial period after opening an Environment, + * when CRUD operations must fetch Btree internal nodes from the file + * system in order to populate the JE cache. The fetches due to JE cache + * misses typically cause random reads. Often the Btree internal nodes that + * are needed appear close to the end of the log because they were written + * fairly recently by checkpoints, and this is why warming the cache with + * the data at the end of the log is often beneficial. + *

        + * The warm-up occurs concurrently with recovery when an Environment is + * opened. It may finish before recovery finishes, or continue after + * recovery finishes when recovery is brief. In the latter case, the + * warm-up is concurrent with the application's CRUD operations. A + * dedicated thread is used for the warm-up, and this thread is destroyed + * when warm-up is complete. + *

        + * Recovery itself will perform at least a partial warm-up implicitly, + * since it reads the log (sequentially), and in fact it may read more than + * the configured warm-up size. The warm-up thread will only read the + * portion of the log not being read by recovery, and only when the warm-up + * size is larger than the size read by recovery (i.e., it reads the + * difference between these two sizes). + *

        + * The size read by recovery is dependent on whether the Environment was + * previously closed cleanly (a crash did not occur and the application + * called Environment.close), and on the size of the last complete + * checkpoint. When the environment is closed cleanly with a small + * checkpoint, recovery will only read a small portion of the log, and in + * this case the additional reads performed by the warm-up thread can be + * very beneficial. + *

        + * If the warm-up size is larger than the amount of memory available to the + * file system cache, then the warm-up may be counter productive, although + * TODO: change text below or change default to 1024 + * the default warm-up size (1 GiB) was chosen to avoid this problem in + * most cases. Applications are advised to change the warm-up size based on + * knowledge of the amount of physical memory on the machine and how much + * is expected to be available as file system cache. The warm-up may be + * disabled by setting the warm-up size to zero, although of course + * recovery will continue to do some amount of warm-up implicitly. + *

        + * The warm-up thread performs read operations using a single buffer and it + * reads as much as will fit in the buffer at a time. The size of the + * buffer, and therefore the maximum size of each read, is {@link + * #LOG_FILE_WARM_UP_READ_SIZE}. Files are read in the reverse of the order + * they were written. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo00-none-

        + public static final String LOG_FILE_WARM_UP_SIZE = "je.log.fileWarmUpSize"; + */ + + /** + * The read buffer size for warming the file system cache; see {@link + * #LOG_FILE_WARM_UP_SIZE}. + * + * Because the warm-up can be concurrent with application CRUD operations, + * it is important that a large buffer size be used for reading the data + * files during the warm-up. That way, the warm-up is performed using + * sequential reads to a large degree, even though CRUD operations may + * cause some random I/O. Sequential reads are required to obtain the + * performance benefit of the warm-up. + *

        + * Note that this buffer is allocated outside of the JE cache, so the Java + * heap size must be set accordingly. + *

        + * The default value, 10 MiB, is designed to reduce random I/O to some + * degree. It should be made larger to perform the warm-up more quickly, + * especially if there are many application threads performing CRUD + * operations. In our tests, using a value of 100 MiB minimized the time to + * complete the warm-up while 20 threads performed CRUD operations. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo10485760 (10 MiB)128-none-

        + public static final String LOG_FILE_WARM_UP_READ_SIZE = + "je.log.fileWarmUpReadSize"; + */ + + /* Future change log entry for above feature: (adjust for default value) +
      • + JE now warms the file system cache at startup by sequentially reading at least + 1 GiB (by default) at the end of the data log, even if this amount is not read + by recovery. +

        + Making use of sequential reads to warm the file system cache has the + benefit of reducing random reads caused by CRUD operations, and thereby + increasing throughput and latency for these operations. This is + especially true during the initial period after opening an Environment, + when CRUD operations must fetch Btree internal nodes from the file + system in order to populate the JE cache. The fetches due to JE cache + misses typically cause random reads. Often the Btree internal nodes that + are needed appear close to the end of the log because they were written + fairly recently by checkpoints, and this is why warming the cache with + the data at the end of the log is often beneficial. +

        + A new config param, EnvironmentConfig.LOG_FILE_WARM_UP_SIZE, can be modified to + change the size of the log read during warm-up, or to disable the warm-up. See + the javadoc for this parameter for details on the warm-up behavior. Another + new parameter, EnvironmentConfig.LOG_FILE_WARM_UP_READ_SIZE, provides control + over the buffer size for the warm-up. Applications running with very small + heaps or very little memory available to the file system should disable the + warm-up or reduce these param values from their default settings. +

        + [#23893] (6.2.27) +


      • + */ + + public static final DurationConfigParam LOG_FSYNC_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.LOG_FSYNC_TIMEOUT, + "10 ms", // min + null, // max + "500 ms", // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam LOG_FSYNC_TIME_LIMIT = + new DurationConfigParam(EnvironmentConfig.LOG_FSYNC_TIME_LIMIT, + "0", // min + "30 s", // max + "5 s", // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam LOG_GROUP_COMMIT_INTERVAL = + new DurationConfigParam(EnvironmentConfig.LOG_GROUP_COMMIT_INTERVAL, + "0 ns", // min + null, // max + "0 ns", // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_GROUP_COMMIT_THRESHOLD = + new IntConfigParam(EnvironmentConfig.LOG_GROUP_COMMIT_THRESHOLD, + 0, // min + null, // max + 0, // default + false, // mutable + false); // forReplication + + /** + * @see EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL + */ + public static final DurationConfigParam LOG_FLUSH_SYNC_INTERVAL = + new DurationConfigParam( + EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL, + "0", // min + null, // max + "20 s", // default + true, // mutable + false); // forReplication + + /** + * @see EnvironmentConfig#LOG_FLUSH_NO_SYNC_INTERVAL + */ + public static final DurationConfigParam LOG_FLUSH_NO_SYNC_INTERVAL = + new DurationConfigParam( + EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL, + "0", // min + null, // max + "5 s", // default + true, // mutable + false); // forReplication + + /** + * Deprecated but still supported for backward compatibility. + */ + public static final BooleanConfigParam OLD_REP_RUN_LOG_FLUSH_TASK = + new BooleanConfigParam( + EnvironmentParams.REP_PARAM_PREFIX + "runLogFlushTask", + true, // default + true, // mutable + true); // forReplication + + /** + * Deprecated but still supported for backward compatibility. + */ + public static final DurationConfigParam OLD_REP_LOG_FLUSH_TASK_INTERVAL = + new DurationConfigParam( + EnvironmentParams.REP_PARAM_PREFIX + "logFlushTaskInterval", + "1 s", // min + null, // max + "5 min", // default + true, // mutable + true); // forReplication + + public static final BooleanConfigParam LOG_USE_ODSYNC = + new BooleanConfigParam(EnvironmentConfig.LOG_USE_ODSYNC, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOG_USE_NIO = + new BooleanConfigParam(EnvironmentConfig.LOG_USE_NIO, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOG_USE_WRITE_QUEUE = + new BooleanConfigParam(EnvironmentConfig.LOG_USE_WRITE_QUEUE, + true, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam LOG_WRITE_QUEUE_SIZE = + new IntConfigParam(EnvironmentConfig.LOG_WRITE_QUEUE_SIZE, + 1 << 12, // min (4KB) + 1 << 28, // max (32MB) + 1 << 20, // default (1MB) + false, // mutable + false); // forReplication + + /** + * @deprecated + */ + private static final BooleanConfigParam LOG_DIRECT_NIO = + new BooleanConfigParam(EnvironmentConfig.LOG_DIRECT_NIO, + false, // default + false, // mutable + false); // forReplication + + /** + * @deprecated + */ + private static final LongConfigParam LOG_CHUNKED_NIO = + new LongConfigParam(EnvironmentConfig.LOG_CHUNKED_NIO, + 0L, // min + 1L << 26, // max (64M) + 0L, // default (no chunks) + false, // mutable + false); // forReplication + + /** + * @deprecated As of 3.3, no longer used + * + * Optimize cleaner operation for temporary deferred write DBs. + */ + public static final BooleanConfigParam LOG_DEFERREDWRITE_TEMP = + new BooleanConfigParam("je.deferredWrite.temp", + false, // default + false, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#ENV_RUN_VERIFIER + */ + public static final BooleanConfigParam ENV_RUN_VERIFIER = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_VERIFIER, + true, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_SCHEDULE + */ + public static final ConfigParam VERIFY_SCHEDULE = + new ConfigParam(EnvironmentConfig.VERIFY_SCHEDULE, + "0 0 * * *", // default + true, // mutable + false); // forReplication + + /* + * The max accepted tardiness to allow the scheduled run of verifier to + * execute. + *

        + * Normally, the verifier runs at most once per scheduled interval. If the + * complete verification (log verification followed by Btree verification) + * takes longer than the scheduled interval, then the next verification + * will start at the next increment of the interval. For example, if the + * default schedule is used (one per day at midnight), and verification + * takes 25 hours, then verification will occur once every two + * days (48 hours), starting at midnight. + *

        + * But sometimes, some degree of tardiness may be tolerated. For example, + * if the default schedule is used (one per day at midnight) and if the + * verification takes 24 hours and 5 minutes, the left 23 hours and + * 55 minutes may be considered to be enough for the scheduled run. + *

        + * VERIFY_MAX_TARDINESS is just used to constraint the tardiness. + * If the tardiness caused by the current long-time verification exceeds + * VERIFY_MAX_TARDINESS, then the scheduled run will be skipped. + */ + public static final DurationConfigParam VERIFY_MAX_TARDINESS = + new DurationConfigParam("je.env.verifyMaxTardiness", + "1 s", // min + null, // max + "5 min", // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_LOG + */ + public static final BooleanConfigParam VERIFY_LOG = + new BooleanConfigParam(EnvironmentConfig.VERIFY_LOG, + true, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_LOG_READ_DELAY + */ + public static final DurationConfigParam VERIFY_LOG_READ_DELAY = + new DurationConfigParam(EnvironmentConfig.VERIFY_LOG_READ_DELAY, + "0 ms", // min + "10 s", // max + "100 ms", // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_BTREE + */ + public static final BooleanConfigParam VERIFY_BTREE = + new BooleanConfigParam(EnvironmentConfig.VERIFY_BTREE, + true, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_SECONDARIES + */ + public static final BooleanConfigParam VERIFY_SECONDARIES = + new BooleanConfigParam(EnvironmentConfig.VERIFY_SECONDARIES, + true, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_DATA_RECORDS + */ + public static final BooleanConfigParam VERIFY_DATA_RECORDS = + new BooleanConfigParam(EnvironmentConfig.VERIFY_DATA_RECORDS, + false, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_OBSOLETE_RECORDS + */ + public static final BooleanConfigParam VERIFY_OBSOLETE_RECORDS = + new BooleanConfigParam(EnvironmentConfig.VERIFY_OBSOLETE_RECORDS, + false, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_BTREE_BATCH_SIZE + */ + public static final IntConfigParam VERIFY_BTREE_BATCH_SIZE = + new IntConfigParam(EnvironmentConfig.VERIFY_BTREE_BATCH_SIZE, + 1, // min + 10000, // max + 1000, // default + true, // mutable + false); // forReplication + + /* + * @see EnvironmentConfig#VERIFY_BTREE_BATCH_DELAY + */ + public static final DurationConfigParam VERIFY_BTREE_BATCH_DELAY = + new DurationConfigParam(EnvironmentConfig.VERIFY_BTREE_BATCH_DELAY, + "0 ms", // min + "10 s", // max + "10 ms", // default + true, // mutable + false); // forReplication + + /* + * Tree + */ + public static final IntConfigParam NODE_MAX = + new IntConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, + 4, // min + 32767, // max + 128, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam NODE_MAX_DUPTREE = + new IntConfigParam(EnvironmentConfig.NODE_DUP_TREE_MAX_ENTRIES, + 4, // min + 32767, // max + 128, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam TREE_MAX_EMBEDDED_LN = + new IntConfigParam(EnvironmentConfig.TREE_MAX_EMBEDDED_LN, + 0, // min + null, // max + 16, // default + false, // mutable + false); // forReplication + + /** + * @deprecated as of JE 6.0 + */ + private static final IntConfigParam BIN_MAX_DELTAS = + new IntConfigParam(EnvironmentConfig.TREE_MAX_DELTA, + 0, // min + 100, // max + 10, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam BIN_DELTA_PERCENT = + new IntConfigParam(EnvironmentConfig.TREE_BIN_DELTA, + 0, // min + 75, // max + 25, // default + false, // mutable + false); // forReplication + + /* + * Whether blind insertions are allowed in BIN-deltas (it is also used to + * determine the max number of slots when a delta is created). + */ + public static final BooleanConfigParam BIN_DELTA_BLIND_OPS = + new BooleanConfigParam("je.tree.binDeltaBlindOps", + true, // default + false, // mutable + false); // forReplication + + /* + * Whether blind puts are allowed in BIN-deltas. Blind puts imply + * the storage of bloom filters in BIN-deltas. + */ + public static final BooleanConfigParam BIN_DELTA_BLIND_PUTS = + new BooleanConfigParam("je.tree.binDeltaBlindPuts", + true, // default + false, // mutable + false); // forReplication + + public static final LongConfigParam MIN_TREE_MEMORY = + new LongConfigParam(EnvironmentConfig.TREE_MIN_MEMORY, + 50L * 1024, // min + null, // max + 500L * 1024, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam TREE_COMPACT_MAX_KEY_LENGTH = + new IntConfigParam(EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH, + 0, // min + 255, // max + 16, // default + false, // mutable + false); // forReplication + + /* + * IN Compressor + */ + public static final DurationConfigParam COMPRESSOR_WAKEUP_INTERVAL = + new DurationConfigParam(EnvironmentConfig.COMPRESSOR_WAKEUP_INTERVAL, + "1 s", // min + "75 min", // max + "5 s", // default + false, // mutable + false); // forReplication + + public static final IntConfigParam COMPRESSOR_RETRY = + new IntConfigParam(EnvironmentConfig.COMPRESSOR_DEADLOCK_RETRY, + 0, // min + Integer.MAX_VALUE, // max + 3, // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam COMPRESSOR_LOCK_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.COMPRESSOR_LOCK_TIMEOUT, + null, // min + "75 min", // max + "500 ms", // default + false, // mutable + false); // forReplication + + /* + * Evictor + */ + public static final LongConfigParam EVICTOR_EVICT_BYTES = + new LongConfigParam(EnvironmentConfig.EVICTOR_EVICT_BYTES, + 1024L, // min + null, // max + 524288L, // default + false, // mutable + false); // forReplication + + /** + * @deprecated As of 2.0, this is replaced by je.evictor.evictBytes + * + * When eviction happens, the evictor will push memory usage to this + * percentage of je.maxMemory. + */ + private static final IntConfigParam EVICTOR_USEMEM_FLOOR = + new IntConfigParam("je.evictor.useMemoryFloor", + 50, // min + 100, // max + 95, // default + false, // mutable + false); // forReplication + + /** + * @deprecated As of 1.7.2, this is replaced by je.evictor.nodesPerScan + * + * The evictor percentage of total nodes to scan per wakeup. + */ + private static final IntConfigParam EVICTOR_NODE_SCAN_PERCENTAGE = + new IntConfigParam("je.evictor.nodeScanPercentage", + 1, // min + 100, // max + 10, // default + false, // mutable + false); // forReplication + + /** + * @deprecated As of 1.7.2, 1 node is chosen per scan. + * + * The evictor percentage of scanned nodes to evict per wakeup. + */ + private static final + IntConfigParam EVICTOR_EVICTION_BATCH_PERCENTAGE = + new IntConfigParam("je.evictor.evictionBatchPercentage", + 1, // min + 100, // max + 10, // default + false, // mutable + false); // forReplication + + /** + * @deprecated as of JE 6.0 + */ + private static final IntConfigParam EVICTOR_NODES_PER_SCAN = + new IntConfigParam(EnvironmentConfig.EVICTOR_NODES_PER_SCAN, + 1, // min + 1000, // max + 10, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam EVICTOR_CRITICAL_PERCENTAGE = + new IntConfigParam(EnvironmentConfig.EVICTOR_CRITICAL_PERCENTAGE, + 0, // min + 1000, // max + 0, // default + false, // mutable + false); // forReplication + + /** + * @deprecated as of JE 4.1 + */ + private static final IntConfigParam EVICTOR_RETRY = + new IntConfigParam(EnvironmentConfig.EVICTOR_DEADLOCK_RETRY, + 0, // min + Integer.MAX_VALUE, // max + 3, // default + false, // mutable + false); // forReplication + + /** + * @deprecated as of JE 6.0 + */ + private static final BooleanConfigParam EVICTOR_LRU_ONLY = + new BooleanConfigParam(EnvironmentConfig.EVICTOR_LRU_ONLY, + true, // default + false, // mutable + false); // forReplication + + /** + * If true (the default), use a 2-level LRU policy that aims to keep + * dirty BTree nodes in memory at the expense of potentially hotter + * clean nodes. Specifically, a node that is selected for eviction from + * level-1 will be moved to level-2 if it is dirty. Nodes in level-2 are + * considered for eviction only after all nodes in level-1 have been + * considered. Dirty nodes that are in level-2 are moved back to level-1 + * when they get cleaned. + *

        + * This parameter applies to the new evictor only. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue

        + */ + public static final BooleanConfigParam EVICTOR_USE_DIRTY_LRU = + new BooleanConfigParam("je.evictor.useDirtyLRU", + true, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam EVICTOR_N_LRU_LISTS = + new IntConfigParam(EnvironmentConfig.EVICTOR_N_LRU_LISTS, + 1, // min + 32, // max + 4, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam EVICTOR_FORCED_YIELD = + new BooleanConfigParam(EnvironmentConfig.EVICTOR_FORCED_YIELD, + false, // default + false, // mutable + false); // forReplication + + /* Off-heap cache. */ + + public static final LongConfigParam MAX_OFF_HEAP_MEMORY = + new LongConfigParam(EnvironmentConfig.MAX_OFF_HEAP_MEMORY, + 0L, // min + null, // max + 0L, // default + true, // mutable + false); // forReplication + + public static final LongConfigParam OFFHEAP_EVICT_BYTES = + new LongConfigParam(EnvironmentConfig.OFFHEAP_EVICT_BYTES, + 1024L, // min + null, // max + 50 * 1024 * 1024L, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam OFFHEAP_CHECKSUM = + new BooleanConfigParam(EnvironmentConfig.OFFHEAP_CHECKSUM, + false, // default + false, // mutable + false); // forReplication + + /** + */ + public static final BooleanConfigParam ENV_RUN_OFFHEAP_EVICTOR = + new BooleanConfigParam(EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, + true, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam ENV_EXPIRATION_ENABLED = + new BooleanConfigParam(EnvironmentConfig.ENV_EXPIRATION_ENABLED, + true, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam OFFHEAP_CORE_THREADS = + new IntConfigParam(EnvironmentConfig.OFFHEAP_CORE_THREADS, + 0, // min + Integer.MAX_VALUE, // max + 1, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam OFFHEAP_MAX_THREADS = + new IntConfigParam(EnvironmentConfig.OFFHEAP_MAX_THREADS, + 1, // min + Integer.MAX_VALUE, // max + 3, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam OFFHEAP_KEEP_ALIVE = + new DurationConfigParam(EnvironmentConfig.OFFHEAP_KEEP_ALIVE, + "1 s", // min + "24 h", // max + "10 min", // default + true, // mutable + false); // forReplication + + public static final IntConfigParam OFFHEAP_N_LRU_LISTS = + new IntConfigParam(EnvironmentConfig.OFFHEAP_N_LRU_LISTS, + 1, // min + 32, // max + 4, // default + false, // mutable + false); // forReplication + + /* + * Checkpointer + */ + public static final LongConfigParam CHECKPOINTER_BYTES_INTERVAL = + new LongConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, + 0L, // min + Long.MAX_VALUE, // max + 20000000L, // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam CHECKPOINTER_WAKEUP_INTERVAL = + new DurationConfigParam(EnvironmentConfig.CHECKPOINTER_WAKEUP_INTERVAL, + "1 s", // min + "75 min", // max + "0", // default + false, // mutable + false); // forReplication + + public static final IntConfigParam CHECKPOINTER_RETRY = + new IntConfigParam(EnvironmentConfig.CHECKPOINTER_DEADLOCK_RETRY, + 0, // min + Integer.MAX_VALUE, // max + 3, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam CHECKPOINTER_HIGH_PRIORITY = + new BooleanConfigParam(EnvironmentConfig.CHECKPOINTER_HIGH_PRIORITY, + false, // default + true, // mutable + false);// forReplication + + /* + * Cleaner + */ + public static final IntConfigParam CLEANER_MIN_UTILIZATION = + new IntConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, + 0, // min + 90, // max + 50, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_MIN_FILE_UTILIZATION = + new IntConfigParam(EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, + 0, // min + 50, // max + 5, // default + true, // mutable + false); // forReplication + + public static final LongConfigParam CLEANER_BYTES_INTERVAL = + new LongConfigParam(EnvironmentConfig.CLEANER_BYTES_INTERVAL, + 0L, // min + Long.MAX_VALUE, // max + 0L, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam CLEANER_WAKEUP_INTERVAL = + new DurationConfigParam(EnvironmentConfig.CLEANER_WAKEUP_INTERVAL, + "0", // min + "1 h", // max + "10 s", // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam CLEANER_FETCH_OBSOLETE_SIZE = + new BooleanConfigParam(EnvironmentConfig.CLEANER_FETCH_OBSOLETE_SIZE, + false, // default + true, // mutable + false);// forReplication + + /** + * @deprecated in JE 6.3. Adjustments are no longer needed because LN log + * sizes have been stored in the Btree since JE 6.0. + */ + private static final BooleanConfigParam CLEANER_ADJUST_UTILIZATION = + new BooleanConfigParam(EnvironmentConfig.CLEANER_ADJUST_UTILIZATION, + false, // default + true, // mutable + false);// forReplication + + public static final IntConfigParam CLEANER_DEADLOCK_RETRY = + new IntConfigParam(EnvironmentConfig.CLEANER_DEADLOCK_RETRY, + 0, // min + Integer.MAX_VALUE, // max + 3, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam CLEANER_LOCK_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.CLEANER_LOCK_TIMEOUT, + "0", // min + "75 min", // max + "500 ms", // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam CLEANER_REMOVE = + new BooleanConfigParam(EnvironmentConfig.CLEANER_EXPUNGE, + true, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam CLEANER_USE_DELETED_DIR = + new BooleanConfigParam(EnvironmentConfig.CLEANER_USE_DELETED_DIR, + false, // default + true, // mutable + false); // forReplication + + /** + * @deprecated As of 1.7.1, no longer used. + */ + private static final IntConfigParam CLEANER_MIN_FILES_TO_DELETE = + new IntConfigParam("je.cleaner.minFilesToDelete", + 1, // min + 1000000, // max + 5, // default + false, // mutable + false); // forReplication + + /** + * @deprecated As of 2.0, no longer used. + */ + private static final IntConfigParam CLEANER_RETRIES = + new IntConfigParam("je.cleaner.retries", + 0, // min + 1000, // max + 10, // default + false, // mutable + false); // forReplication + + /** + * @deprecated As of 2.0, no longer used. + */ + private static final IntConfigParam CLEANER_RESTART_RETRIES = + new IntConfigParam("je.cleaner.restartRetries", + 0, // min + 1000, // max + 5, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_MIN_AGE = + new IntConfigParam(EnvironmentConfig.CLEANER_MIN_AGE, + 1, // min + 1000, // max + 2, // default + true, // mutable + false); // forReplication + + /** + * @deprecated in JE 6.3. + */ + private final IntConfigParam CLEANER_CALC_RECENT_LN_SIZES = + new IntConfigParam("je.cleaner.calc.recentLNSizes", + 1, // min + 100, // max + 10, // default + false, // mutable + false); // forReplication + + /** + * @deprecated in JE 6.3. + */ + private static final IntConfigParam CLEANER_CALC_MIN_UNCOUNTED_LNS = + new IntConfigParam("je.cleaner.calc.minUncountedLNs", + 0, // min + 1000000, // max + 1000, // default + false, // mutable + false); // forReplication + + /** + * @deprecated in JE 6.3. + */ + private static final IntConfigParam CLEANER_CALC_INITIAL_ADJUSTMENTS = + new IntConfigParam("je.cleaner.calc.initialAdjustments", + 1, // min + 100, // max + 5, // default + false, // mutable + false); // forReplication + + /** + * @deprecated in JE 6.3. + */ + private static final IntConfigParam CLEANER_CALC_MIN_PROBE_SKIP_FILES = + new IntConfigParam("je.cleaner.calc.minProbeSkipFiles", + 1, // min + 100, // max + 5, // default + false, // mutable + false); // forReplication + + /** + * @deprecated in JE 6.3. + */ + private static final IntConfigParam CLEANER_CALC_MAX_PROBE_SKIP_FILES = + new IntConfigParam("je.cleaner.calc.maxProbeSkipFiles", + 1, // min + 100, // max + 20, // default + false, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + private static final BooleanConfigParam CLEANER_CLUSTER = + new BooleanConfigParam("je.cleaner.cluster", + false, // default + true, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + private static final BooleanConfigParam CLEANER_CLUSTER_ALL = + new BooleanConfigParam("je.cleaner.clusterAll", + false, // default + true, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + public static final IntConfigParam CLEANER_MAX_BATCH_FILES = + new IntConfigParam(EnvironmentConfig.CLEANER_MAX_BATCH_FILES, + 0, // min + 100000, // max + 0, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_READ_SIZE = + new IntConfigParam(EnvironmentConfig.CLEANER_READ_SIZE, + 128, // min + null, // max + 0, // default + true, // mutable + false); // forReplication + + /** + * DiskOrderedScan + */ + public static final DurationConfigParam DOS_PRODUCER_QUEUE_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.DOS_PRODUCER_QUEUE_TIMEOUT, + "0", // min + "75 min", // max + "10 seconds", // default + true, // mutable + false); // forReplication + + /** + * Not part of public API. + * + * If true, the cleaner tracks and stores detailed information that is used + * to decrease the cost of cleaning. + */ + public static final BooleanConfigParam CLEANER_TRACK_DETAIL = + new BooleanConfigParam("je.cleaner.trackDetail", + true, // default + false, // mutable + false); // forReplication + + /** + * Not part of public API. + * + * If true (the default), data expires gradually over an hour or day time + * period, preventing spikes in cleaning after hour/day boundaries. This + * might be set to false for debugging. + */ + public static final BooleanConfigParam CLEANER_GRADUAL_EXPIRATION = + new BooleanConfigParam("je.cleaner.gradualExpiration", + true, // default + true, // mutable + false); // forReplication + + /** + * Not part of public API. + * + * Used to determine when to perform two-pass cleaning. + * + * @see #CLEANER_TWO_PASS_THRESHOLD + * @see EnvironmentStats#getNCleanerTwoPassRuns() + */ + public static final IntConfigParam CLEANER_TWO_PASS_GAP = + new IntConfigParam("je.cleaner.twoPassGap", + 1, // min + 100, // max + 10, // default + true, // mutable + false); // forReplication + + /** + * Not part of public API. + * + * Used to determine when to perform two-pass cleaning. + * + * Two-pass cleaning is used when: + * 1. the file's maximum utilization is greater than + * {@link #CLEANER_TWO_PASS_THRESHOLD}, and + * 2. the difference between the minimum and maximum utilization of a file + * is greater than or equal to than + * {@link #CLEANER_TWO_PASS_GAP}. + * + * After pass one, pass two is performed only if the recalculated + * utilization is greater than or equal to + * {@link #CLEANER_TWO_PASS_THRESHOLD}. + * + * When this parameter is zero, the default, the value used is + * {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION} minus five. + * + * @see EnvironmentStats#getNCleanerTwoPassRuns() + */ + public static final IntConfigParam CLEANER_TWO_PASS_THRESHOLD = + new IntConfigParam("je.cleaner.twoPassThreshold", + 0, // min + 100, // max + 0, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE = + new IntConfigParam(EnvironmentConfig.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE, + 1, // min + 90, // max + 2, // default + true, // mutable + false); // forReplication + + /** + * Not part of public API, since it applies to a very old bug. + * + * If true, detail information is discarded that was added by earlier + * versions of JE (specifically 2.0.42 and 2.0.54) if it may be invalid. + * This may be set to false for increased performance when those version of + * JE were used but LockMode.RMW was never used. + */ + public static final BooleanConfigParam CLEANER_RMW_FIX = + new BooleanConfigParam("je.cleaner.rmwFix", + true, // default + false, // mutable + false); // forReplication + + public static final ConfigParam CLEANER_FORCE_CLEAN_FILES = + new ConfigParam(EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES, + "", // default + true, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_UPGRADE_TO_LOG_VERSION = + new IntConfigParam(EnvironmentConfig.CLEANER_UPGRADE_TO_LOG_VERSION, + -1, // min + null, // max + 0, // default + false, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_THREADS = + new IntConfigParam(EnvironmentConfig.CLEANER_THREADS, + 1, // min + null, // max + 1, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam CLEANER_LOOK_AHEAD_CACHE_SIZE = + new IntConfigParam(EnvironmentConfig.CLEANER_LOOK_AHEAD_CACHE_SIZE, + 0, // min + null, // max + 8192, // default + true, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + private static final BooleanConfigParam + CLEANER_FOREGROUND_PROACTIVE_MIGRATION = new BooleanConfigParam( + EnvironmentConfig.CLEANER_FOREGROUND_PROACTIVE_MIGRATION, + false, // default + true, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + public static final BooleanConfigParam + CLEANER_BACKGROUND_PROACTIVE_MIGRATION = new BooleanConfigParam( + EnvironmentConfig.CLEANER_BACKGROUND_PROACTIVE_MIGRATION, + false, // default + true, // mutable + false); // forReplication + + /** + * @deprecated + * Retained here only to avoid errors in old je.properties files. + */ + private static final BooleanConfigParam CLEANER_LAZY_MIGRATION = + new BooleanConfigParam(EnvironmentConfig.CLEANER_LAZY_MIGRATION, + false, // default + true, // mutable + false); // forReplication + + /* Processed entry count after which we clear the database cache. */ + public static final IntConfigParam ENV_DB_CACHE_CLEAR_COUNT = + new IntConfigParam("je.env.dbCacheClearCount", + 1, // min + null, // max + 100, // default + true, // mutable + false); // forReplication + + /* + * Transactions + */ + public static final IntConfigParam N_LOCK_TABLES = + new IntConfigParam(EnvironmentConfig.LOCK_N_LOCK_TABLES, + 1, // min + 32767, // max + 1, // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam LOCK_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.LOCK_TIMEOUT, + null, // min + "75 min", // max + "500 ms", // default + false, // mutable + false); // forReplication + + /* "mutable" aims to do some test in DeadlockStress.java. */ + public static final BooleanConfigParam LOCK_DEADLOCK_DETECT = + new BooleanConfigParam(EnvironmentConfig.LOCK_DEADLOCK_DETECT, + true, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam LOCK_DEADLOCK_DETECT_DELAY = + new DurationConfigParam(EnvironmentConfig.LOCK_DEADLOCK_DETECT_DELAY, + "0", // min + "75 min", // max + "0", // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam LOCK_OLD_LOCK_EXCEPTIONS = + new BooleanConfigParam(EnvironmentConfig.LOCK_OLD_LOCK_EXCEPTIONS, + false, // default + false, // mutable + false); // forReplication + + public static final DurationConfigParam TXN_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.TXN_TIMEOUT, + null, // min + "75 min", // max + "0", // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam TXN_SERIALIZABLE_ISOLATION = + new BooleanConfigParam(EnvironmentConfig.TXN_SERIALIZABLE_ISOLATION, + false, // default + false, // mutable + false); // forReplication + + public static final BooleanConfigParam TXN_DEADLOCK_STACK_TRACE = + new BooleanConfigParam(EnvironmentConfig.TXN_DEADLOCK_STACK_TRACE, + false, // default + true, // mutable + false); // forReplication + + public static final BooleanConfigParam TXN_DUMPLOCKS = + new BooleanConfigParam(EnvironmentConfig.TXN_DUMP_LOCKS, + false, // default + true, // mutable + false); // forReplication + + /* + * If true, exceptions and critical cleaner and recovery event tracing + * is written into the .jdb files. + */ + public static final BooleanConfigParam JE_LOGGING_DBLOG = + new BooleanConfigParam("je.env.logTrace", + true, // default + false, // mutable + false); // forReplication + + /* + * The level for JE ConsoleHandler. + */ + public static final ConfigParam JE_CONSOLE_LEVEL = + new ConfigParam(EnvironmentConfig.CONSOLE_LOGGING_LEVEL, + "OFF", // default + true, // mutable + false) { // for Replication + + @Override + public void validateValue(String level) + throws NullPointerException, IllegalArgumentException { + + /* Parse the level. */ + Level.parse(level); + } + }; + + /* + * The level for JE FileHandler. + */ + public static final ConfigParam JE_FILE_LEVEL = + new ConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, + "INFO", // default + true, // mutable + false) { // for Replication + + @Override + public void validateValue(String level) + throws NullPointerException, IllegalArgumentException { + + /* Parse the level. */ + Level.parse(level); + } + }; + + /* + * The default below for JE_DURABILITY is currently null to avoid mixed + * mode durability API exceptions. Once the "sync" API has been removed, we + * can provide a default like: sync,sync,simple majority that's compatible + * with the current sync default stand alone behavior and is safe, though + * not the best performing setup, wrt HA. + */ + public static final ConfigParam JE_DURABILITY = + new ConfigParam(EnvironmentConfig.TXN_DURABILITY, + null, // default + true, // mutable + false) { // forReplication + + @Override + public void validateValue(String durabilityString) + throws IllegalArgumentException { + // Parse the string to determine whether it's valid + Durability.parse(durabilityString); + } + }; + + /** + * If environment startup exceeds this duration, startup statistics are + * logged and can be found in the je.info file. + */ + public static final DurationConfigParam STARTUP_DUMP_THRESHOLD = + new DurationConfigParam(EnvironmentConfig.STARTUP_DUMP_THRESHOLD, + "0", // min + null, // max + "5 min", // default + false, // mutable + false); // forReplication + public static final BooleanConfigParam STATS_COLLECT = + new BooleanConfigParam(EnvironmentConfig.STATS_COLLECT, + true, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam STATS_FILE_ROW_COUNT = + new IntConfigParam(EnvironmentConfig.STATS_FILE_ROW_COUNT, + 2, // min + Integer.MAX_VALUE, // max + 1440, // default + true, // mutable + false); // forReplication + + public static final IntConfigParam STATS_MAX_FILES = + new IntConfigParam(EnvironmentConfig.STATS_MAX_FILES, + 1, // min + Integer.MAX_VALUE, // max + 10, // default + true, // mutable + false); // forReplication + + public static final DurationConfigParam STATS_COLLECT_INTERVAL = + new DurationConfigParam(EnvironmentConfig.STATS_COLLECT_INTERVAL, + "1 s", // min + null, // max + "1 min", // default + true, // mutable + false); // forReplication + + public static final ConfigParam STATS_FILE_DIRECTORY = + new ConfigParam(EnvironmentConfig.STATS_FILE_DIRECTORY, + "", // default + false, // mutable + false); // forReplication + + /* + * Replication params are in com.sleepycat.je.rep.impl.RepParams + */ + + /* + * Add a configuration parameter to the set supported by an environment. + */ + public static void addSupportedParam(ConfigParam param) { + SUPPORTED_PARAMS.put(param.getName(), param); + } +} diff --git a/src/com/sleepycat/je/config/IntConfigParam.java b/src/com/sleepycat/je/config/IntConfigParam.java new file mode 100644 index 0000000..ea22053 --- /dev/null +++ b/src/com/sleepycat/je/config/IntConfigParam.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +/** + * A JE configuration parameter with an integer value. + */ +public class IntConfigParam extends ConfigParam { + + private static final String DEBUG_NAME = IntConfigParam.class.getName(); + + private Integer min; + private Integer max; + + public IntConfigParam(String configName, + Integer minVal, + Integer maxVal, + Integer defaultValue, + boolean mutable, + boolean forReplication) { + /* defaultValue must not be null. */ + super(configName, defaultValue.toString(), mutable, forReplication); + min = minVal; + max = maxVal; + } + + /** + * Self validate. Check mins and maxs + */ + private void validate(Integer value) + throws IllegalArgumentException { + + if (value != null) { + if (min != null) { + if (value.compareTo(min) < 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is less than min of "+ + min); + } + } + if (max != null) { + if (value.compareTo(max) > 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is greater than max of " + + max); + } + } + } + } + + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + try { + validate(new Integer(value)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + value + " not valid value for " + name); + } + } +} diff --git a/src/com/sleepycat/je/config/LongConfigParam.java b/src/com/sleepycat/je/config/LongConfigParam.java new file mode 100644 index 0000000..4ca8822 --- /dev/null +++ b/src/com/sleepycat/je/config/LongConfigParam.java @@ -0,0 +1,82 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +/** + * A JE configuration parameter with an integer value. + */ +public class LongConfigParam extends ConfigParam { + + private static final String DEBUG_NAME = LongConfigParam.class.getName(); + + private Long min; + private Long max; + + public LongConfigParam(String configName, + Long minVal, + Long maxVal, + Long defaultValue, + boolean mutable, + boolean forReplication) { + + /* defaultValue must not be null. */ + super(configName, defaultValue.toString(), mutable, forReplication); + min = minVal; + max = maxVal; + } + + /** + * Self validate. Check mins and maxs + */ + private void validate(Long value) + throws IllegalArgumentException { + + if (value != null) { + if (min != null) { + if (value.compareTo(min) < 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is less than min of " + + min); + } + } + if (max != null) { + if (value.compareTo(max) > 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + + value + + " is greater than max "+ + " of " + max); + } + } + } + } + + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + try { + validate(new Long(value)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + value + " not valid value for " + name); + } + } +} diff --git a/src/com/sleepycat/je/config/ShortConfigParam.java b/src/com/sleepycat/je/config/ShortConfigParam.java new file mode 100644 index 0000000..304e623 --- /dev/null +++ b/src/com/sleepycat/je/config/ShortConfigParam.java @@ -0,0 +1,85 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +/** + * A JE configuration parameter with an short value. + */ +public class ShortConfigParam extends ConfigParam { + + private static final String DEBUG_NAME = + ShortConfigParam.class.getName(); + + private Short min; + private Short max; + + public ShortConfigParam(String configName, + Short minVal, + Short maxVal, + Short defaultValue, + boolean mutable, + boolean forReplication) { + /* defaultValue must not be null. */ + super(configName, defaultValue.toString(), mutable, forReplication); + + min = minVal; + max = maxVal; + } + + /** + * Self validate. Check mins and maxs. + */ + private void validate(Short value) + throws IllegalArgumentException { + + if (value != null) { + if (min != null) { + if (value.compareTo(min) < 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + value + + " is less than min of " + min); + } + } + if (max != null) { + if (value.compareTo(max) > 0) { + throw new IllegalArgumentException + (DEBUG_NAME + ":" + + " param " + name + + " doesn't validate, " + value + + " is greater than max of " + + max); + } + } + } + } + + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + try { + validate(new Short(value)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + value + + " not valid value for " + name); + } + } + + public Short getMin() { + return min; + } +} diff --git a/src/com/sleepycat/je/config/package-info.java b/src/com/sleepycat/je/config/package-info.java new file mode 100644 index 0000000..2baa7ce --- /dev/null +++ b/src/com/sleepycat/je/config/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Environment configuration parameter support. + */ +package com.sleepycat.je.config; \ No newline at end of file diff --git a/src/com/sleepycat/je/dbi/BTreeStatDefinition.java b/src/com/sleepycat/je/dbi/BTreeStatDefinition.java new file mode 100644 index 0000000..240b132 --- /dev/null +++ b/src/com/sleepycat/je/dbi/BTreeStatDefinition.java @@ -0,0 +1,79 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE Btree statistics. + */ +public class BTreeStatDefinition { + + public static final String GROUP_NAME = "BTree"; + public static final String GROUP_DESC = + "Composition of btree, types and counts of nodes."; + + public static final StatDefinition BTREE_BIN_COUNT = + new StatDefinition("binCount", + "Number of bottom internal nodes in " + + "the database's btree.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_DELETED_LN_COUNT = + new StatDefinition("deletedLNCount", + "Number of deleted leaf nodes in the database's " + + "btree.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_IN_COUNT = + new StatDefinition("inCount", + "Number of internal nodes in database's btree. " + + "BINs are not included.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_LN_COUNT = + new StatDefinition("lnCount", + "Number of leaf nodes in the database's btree.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_MAINTREE_MAXDEPTH = + new StatDefinition("mainTreeMaxDepth", + "Maximum depth of the in-memory tree.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_INS_BYLEVEL = + new StatDefinition("insByLevel", + "Histogram of internal nodes by level.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_BINS_BYLEVEL = + new StatDefinition("binsByLevel", + "Histogram of bottom internal nodes by level.", + StatType.CUMULATIVE); + + public static final StatDefinition BTREE_RELATCHES_REQUIRED = + new StatDefinition("relatchesRequired", + "Number of latch upgrades (relatches) required."); + + public static final StatDefinition BTREE_ROOT_SPLITS = + new StatDefinition("nRootSplits", + "Number of times the root was split."); + + public static final StatDefinition BTREE_BIN_ENTRIES_HISTOGRAM = + new StatDefinition("binEntriesHistogram", + "Histogram of bottom internal nodes fill " + + "percentage.", + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/dbi/CompressedOopsDetector.java b/src/com/sleepycat/je/dbi/CompressedOopsDetector.java new file mode 100644 index 0000000..95d91a3 --- /dev/null +++ b/src/com/sleepycat/je/dbi/CompressedOopsDetector.java @@ -0,0 +1,96 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.lang.management.ManagementFactory; +import java.lang.reflect.Method; + +/** + * Uses com.sun.management (non-portable) APIs to detect whether compressed + * oops is actually in effect. Uses reflection so that isEnabled simply + * returns null if the com.sun.management classes are not available, rather + * than causing a class loading error during static initialization, which would + * prevent the process from running. For the IBM J9 environment, which doesn't + * support the MBean, checks the value of a system property for a known string. + */ +class CompressedOopsDetector { + private static final String HOTSPOT_BEAN_CLASS = + "com.sun.management.HotSpotDiagnosticMXBean"; + private static final String HOTSPOT_BEAN_NAME = + "com.sun.management:type=HotSpotDiagnostic"; + private static final String VMOPTION_CLASS = + "com.sun.management.VMOption"; + + /** + * For IBM J9, it appears that the best way to tell if compressed OOPs are + * in use is to see if the value of the java.vm.info system property + * contains this value. + */ + private static final String IBM_VM_INFO_COMPRESSED_OOPS_SUBSTRING = + "Compressed References"; + + /** + * @return TRUE or FALSE if the status of compressed oops is known, or null + * if it is unknown. + */ + static Boolean isEnabled() { + try { + return isEnabledInternal(); + } catch (Throwable e) { + final String vendor = System.getProperty("java.vendor"); + if ((vendor != null) && vendor.startsWith("IBM")) { + final String info = System.getProperty("java.vm.info"); + if (info != null) { + return info.indexOf( + IBM_VM_INFO_COMPRESSED_OOPS_SUBSTRING) != -1; + } + } + return null; + } + } + + /* Throws exceptions rather than returning null. */ + private static Boolean isEnabledInternal() + throws Throwable { + + final Class hotspotMBeanClass = Class.forName(HOTSPOT_BEAN_CLASS); + final Object hotspotMBean = + ManagementFactory.newPlatformMXBeanProxy( + ManagementFactory.getPlatformMBeanServer(), + HOTSPOT_BEAN_NAME, hotspotMBeanClass); + + /* + * vmOption is an instance of com.sun.management.VMOption. + * HotSpotDiagnosticMXBean.getVMOption(String option) returns a + * VMOption, which has a "String getValue()" method. + */ + final Method getVMOption = + hotspotMBeanClass.getMethod("getVMOption", String.class); + final Object vmOption = + getVMOption.invoke(hotspotMBean, "UseCompressedOops"); + final Class vmOptionClass = Class.forName(VMOPTION_CLASS); + final Method getValue = vmOptionClass.getMethod("getValue"); + final String value = (String) getValue.invoke(vmOption); + return Boolean.valueOf(value); + } + + /* For manual testing. */ + public static void main(final String[] args) { + try { + System.out.println("isEnabled(): " + isEnabled()); + } catch (Throwable e) { + e.printStackTrace(); + } + } +} diff --git a/src/com/sleepycat/je/dbi/CursorImpl.java b/src/com/sleepycat/je/dbi/CursorImpl.java new file mode 100644 index 0000000..63fb6fe --- /dev/null +++ b/src/com/sleepycat/je/dbi/CursorImpl.java @@ -0,0 +1,4096 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.logging.Level; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DuplicateDataException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.BINBoundary; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.StorageSize; +import com.sleepycat.je.tree.TrackingInfo; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeWalkerStatsAccumulator; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockInfo; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * A CursorImpl is the internal implementation of the cursor. + */ +public class CursorImpl implements Cloneable { + + private static final boolean DEBUG = false; + + private static final byte CURSOR_NOT_INITIALIZED = 1; + private static final byte CURSOR_INITIALIZED = 2; + private static final byte CURSOR_CLOSED = 3; + private static final String TRACE_DELETE = "Delete"; + private static final String TRACE_MOD = "Mod:"; + private static final String TRACE_INSERT = "Ins:"; + + public static final int FOUND = 0x1; + /* Exact match on the key portion. */ + public static final int EXACT_KEY = 0x2; + /* Record found is the last one in the dbImpl. */ + public static final int FOUND_LAST = 0x4; + + /* + * Allocate hashCode ids from this. [#13896] + */ + private static long lastAllocatedId = 0; + + /* + * Unique id that we can return as a hashCode to prevent calls to + * Object.hashCode(). [#13896] + */ + private final int thisId; + + /* The dbImpl behind the handle. */ + private final DatabaseImpl dbImpl; + + /* Owning transaction. */ + private Locker locker; + + private final boolean retainNonTxnLocks; + + private final boolean isSecondaryCursor; + + /* + * Cursor location in the dbImpl, represented by a BIN and an index + * in the BIN. The bin is null if not established, and the index is + * negative if not established. + */ + private volatile BIN bin; + private volatile int index; + + /* State of the cursor. See CURSOR_XXX above. */ + private byte status; + + private CacheMode cacheMode; + private boolean allowEviction; + private BIN priorBIN; + + /* + * A cache of the record version for the operation at the current position. + * Is null if the cursor is uninitialized. For a secondary cursor, is the + * version of the primary record. + */ + private RecordVersion currentRecordVersion; + + /* + * A cache of the storage size for the operation at the cursor position. + * Both values are zero if the cursor is uninitialized. priStorageSize is + * non-zero only if Cursor.readPrimaryAfterGet was called. + */ + private int storageSize; + private int priStorageSize; + + /* Number of secondary records written by a primary put or delete. */ + private int nSecWrites; + + private ThreadLocal treeStatsAccumulatorTL; + + private TestHook testHook; + + /** + * Creates a cursor with retainNonTxnLocks=true, isSecondaryCursor=false. + * These are the standard settings for an internal cursor. + */ + public CursorImpl(DatabaseImpl database, Locker locker) { + this(database, locker, + true /*retainNonTxnLocks*/, + false /*isSecondaryCursor*/); + } + + /** + * Creates a cursor. + * + * A cursor always retains transactional locks when it is reset or closed. + * Non-transaction locks may be retained or not, depending on the + * retainNonTxnLocks parameter value. + * + * Normally a user-created non-transactional Cursor releases locks on reset + * and close, and a ThreadLocker is normally used. However, by passing + * true for retainNonTxnLocks a ThreadLocker can be made to retain locks; + * this capability is used by SecondaryCursor.readPrimaryAfterGet. + * + * For internal (non-user) cursors, a BasicLocker is often used and locks + * are retained. In these internal use cases the caller explicitly calls + * BasicLocker.operationEnd() after the cursor is closed, and + * retainNonTxnLocks is set to true to prevent the locks acquired by the + * BasicLocker from being released when the cursor is closed. + * + * BasicLocker is also used for NameLN operations while opening a Database + * handle. Database handle locks must be retained, even if the Database is + * opened non-transactionally. + * + * @param retainNonTxnLocks is true if non-transactional locks should be + * retained (not released automatically) when the cursor is reset or + * closed. + * + * @param isSecondaryCursor whether to treat this cursor as a secondary + * cursor, e.g., secondary records don't have record versions. + */ + public CursorImpl( + DatabaseImpl dbImpl, + Locker locker, + boolean retainNonTxnLocks, + boolean isSecondaryCursor) { + + thisId = (int) getNextCursorId(); + bin = null; + index = -1; + + this.retainNonTxnLocks = retainNonTxnLocks; + this.isSecondaryCursor = isSecondaryCursor; + + /* Associate this cursor with the dbImpl. */ + this.dbImpl = dbImpl; + this.locker = locker; + this.locker.registerCursor(this); + + /* + * This default value is used only when the CursorImpl is used directly + * (mainly for internal databases). When the CursorImpl is created by + * a Cursor, CursorImpl.setCacheMode will be called. + */ + this.cacheMode = CacheMode.DEFAULT; + + status = CURSOR_NOT_INITIALIZED; + + /* + * Do not perform eviction here because we may be synchronized on the + * Database instance. For example, this happens when we call + * Database.openCursor(). Also eviction may be disabled after the + * cursor is constructed. + */ + } + + /** + * Performs a shallow copy and returns the new cursor. + * + * @param samePosition If true, this cursor's position is used for the new + * cursor, and addCursor is called on the new cursor to register it with + * the current BIN. If false, the new cursor will be uninitialized. + */ + public CursorImpl cloneCursor(final boolean samePosition) { + + assert assertCursorState( + false /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + CursorImpl ret = null; + try { + latchBIN(); + + ret = (CursorImpl) super.clone(); + + if (!retainNonTxnLocks) { + ret.locker = locker.newNonTxnLocker(); + } + + ret.locker.registerCursor(ret); + + if (samePosition) { + ret.addCursor(); + } else { + ret.clear(); + } + } catch (CloneNotSupportedException cannotOccur) { + return null; + } finally { + releaseBIN(); + } + + /* Perform eviction before and after each cursor operation. */ + criticalEviction(); + + return ret; + } + + /* + * Allocate a new hashCode id. Doesn't need to be synchronized since it's + * ok for two objects to have the same hashcode. + */ + private static long getNextCursorId() { + return ++lastAllocatedId; + } + + @Override + public int hashCode() { + return thisId; + } + + public Locker getLocker() { + return locker; + } + + /** + * Called when a cursor has been duplicated prior to being moved. The new + * locker is informed of the old locker, so that a preempted lock taken by + * the old locker can be ignored. [#16513] + * + * @param closingCursor the old cursor that will be closed if the new + * cursor is moved successfully. + */ + public void setClosingLocker(CursorImpl closingCursor) { + + /* + * If the two lockers are different, then the old locker will be closed + * when the operation is complete. This is currently the case only for + * ReadCommitted cursors and non-transactional cursors that do not + * retain locks. + */ + if (!retainNonTxnLocks && locker != closingCursor.locker) { + locker.setClosingLocker(closingCursor.locker); + } + } + + /** + * Called when a cursor move operation is complete. Clears the + * closingLocker so that a reference to the old closed locker is not held. + */ + public void clearClosingLocker() { + locker.setClosingLocker(null); + } + + public CacheMode getCacheMode() { + return cacheMode; + } + + /** + * Sets the effective cache mode to use for the next operation. The + * cacheMode field will never be set to null, and can be passed directly to + * latching methods. + * + * @see #performCacheModeEviction + */ + public void setCacheMode(final CacheMode mode) { + cacheMode = mode; + } + + public void setTreeStatsAccumulator(TreeWalkerStatsAccumulator tSA) { + maybeInitTreeStatsAccumulator(); + treeStatsAccumulatorTL.set(tSA); + } + + private void maybeInitTreeStatsAccumulator() { + if (treeStatsAccumulatorTL == null) { + treeStatsAccumulatorTL = new ThreadLocal<>(); + } + } + + private TreeWalkerStatsAccumulator getTreeStatsAccumulator() { + if (EnvironmentImpl.getThreadLocalReferenceCount() > 0) { + maybeInitTreeStatsAccumulator(); + return treeStatsAccumulatorTL.get(); + } else { + return null; + } + } + + public void incrementLNCount() { + TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + if (treeStatsAccumulator != null) { + treeStatsAccumulator.incrementLNCount(); + } + } + + public int getIndex() { + return index; + } + + public BIN getBIN() { + return bin; + } + + public void setIndex(int idx) { + index = idx; + } + + public void setOnFirstSlot() { + assert(bin.isLatchOwner()); + index = 0; + } + + public void setOnLastSlot() { + assert(bin.isLatchOwner()); + index = bin.getNEntries() - 1; + } + + public boolean isOnBIN(BIN bin) { + return this.bin == bin; + } + + public void assertBIN(BIN bin) { + assert this.bin == bin : + "nodeId=" + bin.getNodeId() + + " cursor=" + dumpToString(true); + } + + public boolean isOnSamePosition(CursorImpl other) { + return bin == other.bin && index == other.index; + } + + public void setBIN(BIN newBin) { + + /* + * Historical note. In the past we checked here that the cursor was + * removed for the prior BIN by calling BIN.containsCursor [#16280]. + * Because the containsCursor method takes a latch on the prior BIN, + * this causes a rare latch deadlock when newBin is latched (during an + * insert, for example), since this thread will latch two BINs in + * arbitrary order; so the assertion was removed [#21395]. + */ + bin = newBin; + } + + public void latchBIN() { + while (bin != null) { + BIN waitingOn = bin; + waitingOn.latch(cacheMode); + if (bin == waitingOn) { + return; + } + waitingOn.releaseLatch(); + } + } + + public void releaseBIN() { + if (bin != null) { + bin.releaseLatchIfOwner(); + } + } + + void addCursor(BIN bin) { + if (bin != null) { + assert bin.isLatchExclusiveOwner(); + bin.addCursor(this); + } + } + + /** + * Add to the current cursor. + */ + void addCursor() { + if (bin != null) { + addCursor(bin); + } + } + + /** + * Change cursor to point to the given BIN/index. If the new BIN is + * different, then old BIN must be unlatched and the new BIN must be + * latched. + */ + private void setPosition(BIN newBin, int newIndex) { + if (bin != newBin) { + if (bin != null) { + latchBIN(); + bin.removeCursor(this); + bin.releaseLatch(); + } + setBIN(newBin); + addCursor(); + } + setIndex(newIndex); + } + + /** + * Called for creating trace messages without any latching. + */ + public long getCurrentNodeId() { + final BIN b = bin; + return (b == null ? -1 : b.getNodeId()); + } + + public long getCurrentLsn() { + + assert(bin != null && bin.isLatchOwner()); + assert(index >= 0 && index < bin.getNEntries()); + + return bin.getLsn(index); + } + + public byte[] getCurrentKey() { + return getCurrentKey(false); + } + + /** + * Returns the key at the current position, regardless of whether the + * record is defunct. Does not lock. The key returned is not a copy and + * may not be returned directly to the user without copying it first. + * + * The cursor must be initialized. + * + * TODO: + * The returned byte array is normally, but not always a copied, and then + * copied again into the user's DatabaseEntry. If this method always + * returns a copy, the extra copy into DatabaseEntry could be avoided. + */ + public byte[] getCurrentKey(boolean isLatched) { + + if (!isLatched) { + latchBIN(); + } + + try { + assert(bin != null); + assert(index >= 0 && index < bin.getNEntries()); + + return bin.getKey(index); + } finally { + if (!isLatched) { + releaseBIN(); + } + } + } + + public boolean isProbablyExpired() { + latchBIN(); + try { + return bin.isProbablyExpired(index); + } finally { + releaseBIN(); + } + } + + public long getExpirationTime() { + latchBIN(); + try { + return TTL.expirationToSystemTime( + bin.getExpiration(index), bin.isExpirationInHours()); + } finally { + releaseBIN(); + } + } + + private void setInitialized() { + status = CURSOR_INITIALIZED; + } + + /** + * @return true if this cursor is closed + */ + public boolean isClosed() { + return (status == CURSOR_CLOSED); + } + + /** + * @return true if this cursor is not initialized + */ + public boolean isNotInitialized() { + return (status == CURSOR_NOT_INITIALIZED); + } + + public boolean isInternalDbCursor() { + return dbImpl.isInternalDb(); + } + + public boolean hasDuplicates() { + return dbImpl.getSortedDuplicates(); + } + + /** + * For a non-sticky cursor, this method is called when the cursor is + * initialized and an advancing operation (next/prev/skip) is about to be + * performed. The cursor position is not reset as it would be if the + * operation were a search or an insertion, for example. + */ + public void beforeNonStickyOp() { + + /* + * When the cache mode dictates that we evict the LN or BIN, we evict + * the LN here before the cursor's position changes. We can assume that + * either the position will change or the cursor will be reset. The BIN + * is evicted later. + */ + if (cacheMode != CacheMode.DEFAULT && + cacheMode != CacheMode.KEEP_HOT) { + + latchBIN(); + try { + performCacheModeLNEviction(); + } finally { + releaseBIN(); + } + } + + releaseNonTxnLocks(); + + criticalEviction(); + } + + /** + * For a non-sticky cursor, this method is called after a successful + * operation. The cursor position is not reset as it would be if the + * operation failed. + */ + public void afterNonStickyOp() { + + /* + * To implement BIN eviction for a non-sticky cursor we must save the + * prior BIN, and only evict it after the operation and only when the + * BIN changes. The prior BIN is evicted after the operation (in this + * method) and when the cursor is reset or closed. + */ + performPriorBINEviction(); + + if (priorBIN == null) { + priorBIN = bin; + } + + criticalEviction(); + } + + /** + * Reset a cursor to an uninitialized state, but unlike close(), allow it + * to be used further. + */ + public void reset() { + + /* Must remove cursor before evicting BIN and releasing locks. */ + removeCursorAndPerformCacheEviction(null /*newCursor*/); + + releaseNonTxnLocks(); + + /* Perform eviction before and after each cursor operation. */ + criticalEviction(); + } + + private void clear() { + bin = null; + index = -1; + status = CURSOR_NOT_INITIALIZED; + currentRecordVersion = null; + storageSize = 0; + priStorageSize = 0; + nSecWrites = 0; + priorBIN = null; + } + + private void releaseNonTxnLocks() { + if (!retainNonTxnLocks) { + locker.releaseNonTxnLocks(); + } + } + + public void close() { + close(null); + } + + /** + * Close a cursor. + * + * @param newCursor is another cursor that is kept open by the parent + * Cursor object, or null if no other cursor is kept open. + */ + public void close(final CursorImpl newCursor) { + + assert assertCursorState( + false /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + /* Must remove cursor before evicting BIN and releasing locks. */ + removeCursorAndPerformCacheEviction(newCursor); + + locker.unRegisterCursor(this); + + if (!retainNonTxnLocks) { + locker.nonTxnOperationEnd(); + } + + status = CURSOR_CLOSED; + + /* Perform eviction before and after each cursor operation. */ + criticalEviction(); + } + + private void removeCursorAndPerformCacheEviction(CursorImpl newCursor) { + + performPriorBINEviction(); + + latchBIN(); + + if (bin == null) { + clear(); // ensure that state is uninitialized + return; + } + + try { + /* Must remove cursor before evicting BIN. */ + bin.removeCursor(this); + performCacheModeEviction(newCursor); // may release latch + } finally { + releaseBIN(); + clear(); + } + } + + /** + * Performs cache mode-based eviction but for the prior BIN only. This is + * called after a successful operation using a non-sticky cursor. The prior + * BIN is evicted only if the BIN has changed. + */ + private void performPriorBINEviction() { + + if (priorBIN == null || priorBIN == bin) { + return; + } + + /* + * This priorBIN should not be processed again, and setting it to null + * enables the setting of a new priorBIN. + */ + BIN binToEvict = priorBIN; + priorBIN = null; + + /* Short circuit modes that do not perform BIN eviction. */ + if (cacheMode== CacheMode.DEFAULT || + cacheMode == CacheMode.KEEP_HOT || + cacheMode == CacheMode.EVICT_LN) { + return; + } + + binToEvict.latch(CacheMode.UNCHANGED); + try { + performCacheModeBINEviction(binToEvict); + } finally { + binToEvict.releaseLatchIfOwner(); + } + } + + /** + * Disables or enables eviction during cursor operations. For example, a + * cursor used to implement eviction (e.g., in some UtilizationProfile and + * most DbTree and VLSNIndex methods) should not itself perform eviction, + * but eviction should be enabled for user cursors. Eviction is disabled + * by default. + */ + public void setAllowEviction(boolean allowed) { + allowEviction = allowed; + } + + public void criticalEviction() { + + /* + * In addition to disabling critical eviction for internal cursors (see + * setAllowEviction above), we do not perform critical eviction when + * UNCHANGED, EVICT_BIN or MAKE_COLD is used and the BIN is not dirty. + * Operations using these modes for a non-dirty BIN generally do not + * add any net memory to the cache, so they shouldn't have to perform + * critical eviction or block while another thread performs eviction. + */ + if (allowEviction && + ((bin != null && bin.getDirty()) || + (cacheMode != CacheMode.UNCHANGED && + cacheMode != CacheMode.EVICT_BIN && + cacheMode != CacheMode.MAKE_COLD))) { + dbImpl.getEnv().criticalEviction(false /*backgroundIO*/); + } + } + + /** + * When multiple operations are performed, CacheMode-based eviction is + * performed for a given operation at the end of the next operation, which + * calls close() or reset() on the CursorImpl of the previous operation. + * Eviction for the last operation (including when only one operation is + * performed) also occurs during Cursor.close(), which calls + * CursorImpl.close(). + * + * By default, the CacheMode returned by DatabaseImpl.getCacheMode is used, + * and the defaults specified by the user for the Database or Environment + * are applied. However, the default mode can be overridden by the user by + * calling Cursor.setCacheMode, and the mode may be changed prior to each + * operation, if desired. + * + * To implement a per-operation CacheMode, two CacheMode fields are + * maintained. Cursor.cacheMode is the mode to use for the next operation. + * CursorImpl.cacheMode is the mode that was used for the previous + * operation, and that is used for eviction when that CursorImpl is closed + * or reset. + * + * This method must be called with the BIN latched but may release it, + * namely when the BIN is evicted. + */ + private void performCacheModeEviction(final CursorImpl newCursor) { + + /* Short circuit modes that do not perform LN or BIN eviction. */ + if (cacheMode == CacheMode.DEFAULT || + cacheMode == CacheMode.KEEP_HOT) { + return; + } + + final boolean movedOffBin; + final boolean movedOffLn; + + if (newCursor != null) { + movedOffBin = (bin != newCursor.bin); + movedOffLn = (movedOffBin || index != newCursor.index); + } else { + movedOffBin = true; + movedOffLn = true; + } + + if (movedOffLn) { + performCacheModeLNEviction(); + } + + /* Short circuit modes that do not perform BIN eviction. */ + if (cacheMode == CacheMode.EVICT_LN) { + return; + } + + if (movedOffBin) { + performCacheModeBINEviction(bin); + } + } + + /** + * Performs the LN portion of CacheMode eviction. The BIN is latched on + * method entry and exit. Must be called only for CacheMode.EVICT_LN, + * EVICT_BIN, UNCHANGED and MAKE_COLD. + */ + private void performCacheModeLNEviction() { + switch (cacheMode) { + case EVICT_LN: + case EVICT_BIN: + evictLN(true /*isLatched*/, false /*ifFetchedCold*/); + break; + case UNCHANGED: + case MAKE_COLD: + evictLN(true /*isLatched*/, true /*ifFetchedCold*/); + break; + default: + assert false; + } + } + + /** + * Performs the BIN portion of CacheMode eviction. The BIN is latched on + * method entry, but may or may not be latched on exit. Must be called only + * for CacheMode.EVICT_BIN, UNCHANGED and MAKE_COLD. + */ + private void performCacheModeBINEviction(BIN binToEvict) { + switch (cacheMode) { + case EVICT_BIN: + evictBIN(binToEvict, CacheMode.EVICT_BIN); + break; + case UNCHANGED: + case MAKE_COLD: + if (binToEvict.getFetchedCold()) { + evictBIN(binToEvict, CacheMode.UNCHANGED); + } + break; + default: + assert false; + } + } + + /** + * Evict the given BIN. Must already be latched. The latch will be released + * inside the doCacheModeEvict() call. + */ + private void evictBIN(BIN binToEvict, CacheMode cacheMode) { + + dbImpl.getEnv().getEvictor().doCacheModeEvict(binToEvict, cacheMode); + } + + /** + * Evict the LN node at the cursor position. + */ + public void evictLN() { + evictLN(false /*isLatched*/, false /*ifFetchedCold*/); + } + + /** + * Evict the LN node at the cursor position. + */ + private void evictLN(boolean isLatched, boolean ifFetchedCold) { + try { + if (!isLatched) { + latchBIN(); + } + if (index >= 0) { + bin.evictLN(index, ifFetchedCold); + } + } finally { + if (!isLatched) { + releaseBIN(); + } + } + } + + private boolean shouldEmbedLN(byte[] data) { + + return data.length <= dbImpl.getEnv().getMaxEmbeddedLN() && + !dbImpl.getSortedDuplicates() && + !dbImpl.getDbType().isInternal(); + } + + /** + * Delete the item pointed to by the cursor. If the item is already + * defunct, return KEYEMPTY. Returns with nothing latched. + */ + public OperationResult deleteCurrentRecord(ReplicationContext repContext) { + + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + final DbType dbType = dbImpl.getDbType(); + final long currLsn; + final LogItem logItem; + + boolean success = false; + + latchBIN(); + + try { + /* + * Get a write lock. An uncontended lock is permitted because we + * will log a new LN before releasing the BIN latch. + */ + final LockStanding lockStanding = lockLN( + LockType.WRITE, true /*allowUncontended*/, false /*noWait*/); + + if (!lockStanding.recordExists()) { + revertLock(lockStanding); + success = true; + return null; + } + + currLsn = lockStanding.lsn; + assert(currLsn != DbLsn.NULL_LSN); + final boolean currEmbeddedLN = bin.isEmbeddedLN(index); + final int currLoggedSize = bin.getLastLoggedSize(index); + final byte[] currKey = bin.getKey(index); + + final int expiration = bin.getExpiration(index); + final boolean expirationInHours = bin.isExpirationInHours(); + + /* + * Must fetch LN if the LN is not embedded and any of the following + * are true: + * - CLEANER_FETCH_OBSOLETE_SIZE is configured and lastLoggedSize + * is unknown + * - this database does not use the standard LN class and we + * cannot call DbType.createdDeletedLN further below + * For other cases, we are careful not to fetch, in order to avoid + * a random read during a delete operation. + */ + LN ln; + if ((currLoggedSize == 0 && + !currEmbeddedLN && + envImpl.getCleaner().getFetchObsoleteSize(dbImpl)) || + !dbType.mayCreateDeletedLN()) { + + ln = bin.fetchLN(index, cacheMode); + if (ln == null) { + /* An expired LN was purged. */ + revertLock(lockStanding); + success = true; + return null; + } + } else { + ln = bin.getLN(index, cacheMode); + } + + /* + * Make the existing LN deleted, if cached; otherwise, create a + * new deleted LN (with ln.data == null), but do not attach it + * to the tree yet. + */ + long oldLNMemSize = 0; + if (ln != null) { + oldLNMemSize = ln.getMemorySizeIncludedByParent(); + ln.delete(); + } else { + ln = dbType.createDeletedLN(envImpl); + } + + /* Get a wli to log. */ + final WriteLockInfo wli = lockStanding.prepareForUpdate(bin, index); + + /* Log the deleted record version and lock its new LSN. */ + logItem = ln.optionalLog( + envImpl, dbImpl, locker, wli, + currEmbeddedLN /*newEmbeddedLN*/, currKey /*newKey*/, + expiration, expirationInHours, + currEmbeddedLN, currLsn, currLoggedSize, + false/*isInsertion*/, repContext); + + /* + * Now update the parent BIN to reference the logrec written + * above, set the PD flag on, and do the BIN memory counting. + */ + bin.deleteRecord( + index, oldLNMemSize, logItem.lsn, + ln.getVLSNSequence(), logItem.size); + + /* + * If the LN is not cached, we don't need to attach the LN to the + * tree, because as long as the PD flag is on, the record's data + * will never be accessed. But for DW DBs, we must attach the LN + * because no logrec was generated above, and as a result, the LN + * must be in the tree so that a logrec will be generated when + * a db.sync() occurs later (that logrec is needed for crash + * recovery, because BINs are not replayed during crash recovery). + * + * If the LN child is cached, it is desirable to evict it because + * as long as the PD flag is on, the record's data will never be + * accessed. But for DW DBs we should not evict the dirty LN since + * it will be logged unnecessarily. + */ + if (bin.getTarget(index) == null) { + if (dbImpl.isDeferredWriteMode()) { + bin.attachNode(index, ln, null /*lnKey*/); + } + } else { + if (!dbImpl.isDeferredWriteMode()) { + bin.evictLN(index); + } + } + + /* Cache record version/size for delete operation. */ + setCurrentVersion(ln.getVLSNSequence(), logItem.lsn); + setStorageSize(); + + locker.addDeleteInfo(bin); + success = true; + + trace(Level.FINER, TRACE_DELETE, bin, index, currLsn, logItem.lsn); + + return DbInternal.makeResult(expiration, expirationInHours); + + } finally { + + if (success && + !dbImpl.isInternalDb() && + bin != null && + bin.isBINDelta()) { + dbImpl.getEnv().incBinDeltaDeletes(); + } + + releaseBIN(); + } + } + + /** + * Modify the current record with the given data, and optionally replace + * the key. + * + * @param key The new key value for the BIN slot S to be updated. Cannot + * be partial. For a no-dups DB, it is null. For dups DBs it is a 2-part + * key combining the current primary key of slot S with the original, + * user-provided data. "key" (if not null) must compare equal to S.key + * (otherwise DuplicateDataException is thrown), but the 2 keys may not + * be identical if custom comparators are used. So, S.key will actually + * be replaced by "key". + * + * @param data The new data to (perhaps partially) replace the data of the + * LN associated with the BIN slot. For dups DBs it is EMPTY_DUPS_DATA. + * Note: for dups DBs the original, user-provided "data" must not be + * partial. + * + * @param returnOldData To receive the old LN data (before the update). + * It is needed only by DBs with indexes/triggers; will be null otherwise. + * + * @param returnNewData To receive the full data of the updated LN. + * It is needed only by DBs with indexes/triggers and only if "data" is + * partial; will be null otherwise. Note: "returnNewData" may be different + * than "data" only if "data" is partial. + * + * @return OperationResult, or null if an expired LN was purged and a + * partial 'data' param was supplied. + */ + public OperationResult updateCurrentRecord( + DatabaseEntry key, + DatabaseEntry data, + ExpirationInfo expInfo, + DatabaseEntry returnOldData, + DatabaseEntry returnNewData, + ReplicationContext repContext) { + + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + if (returnOldData != null) { + returnOldData.setData(null); + } + if (returnNewData != null) { + returnNewData.setData(null); + } + + final LockStanding lockStanding; + OperationResult result = null; + boolean success = false; + + latchBIN(); + + try { + /* Get a write lock. */ + lockStanding = lockLN( + LockType.WRITE, true /*allowUncontended*/, false /*noWait*/); + + if (!lockStanding.recordExists()) { + revertLock(lockStanding); + } else { + result = updateRecordInternal( + (key != null ? Key.makeKey(key) : null), data, + expInfo, returnOldData, returnNewData, + lockStanding, repContext); + } + + success = true; + return result; + + } finally { + + if (success && + !dbImpl.isInternalDb() && + bin != null && + bin.isBINDelta()) { + dbImpl.getEnv().incBinDeltaUpdates(); + } + + releaseBIN(); + } + } + + /** + * Insert the given record (key + LN) in the tree or return false if the + * key is already present. + * + * The cursor must initially be uninitialized. + * + * This method is called directly internally for putting tree map LNs + * and file summary LNs. + */ + public boolean insertRecord( + byte[] key, + LN ln, + boolean blindInsertion, + ReplicationContext repContext) { + + assert assertCursorState( + false /*mustBeInitialized*/, true /*mustNotBeInitialized*/); + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + try { + final Pair result = + insertRecordInternal( + key, ln, null /*expirationInfo*/, blindInsertion, + null /*returnNewData*/, repContext); + + return result.second() != null; + } finally { + releaseBIN(); + } + } + + /** + * Insert or update a given record. The method searches for the record + * using its key. It will perform an update if the record is found, + * otherwise an insertion. + * + * The cursor must initially be uninitialized. + * + * Called by all the Cursor.putXXX() ops, except putCurrent(). + * + * @param key The new key value for the BIN slot S to be inserted/updated. + * Cannot be partial. For dups DBs it is a 2-part key combining the + * original, user-provided key and data. In case of update, "key" must + * compare equal to S.key (otherwise DuplicateDataException is thrown), + * but the 2 keys may not be identical if custom comparators are used. + * So, S.key will actually be replaced by "key". + * + * @param data In case of update, the new data to (perhaps partially) + * replace the data of the LN associated with the BIN slot. For dups DBs + * it is EMPTY_DUPS_DATA. Note: for dups DBs the original, user-provided + * "data" must not be partial. + * + * @param ln is normally a new LN node that is created for insertion, and + * will be discarded if an update occurs. However, HA will pass an + * existing node. + * + * @param putMode OVERWRITE or NO_OVERWRITE + * + * @param returnOldData To receive, in case of update, the old LN data + * (before the update). It is needed only by DBs with indexes/triggers; + * will be null otherwise. + * + * @param returnNewData To receive the full data of the new or updated LN. + * It is needed only by DBs with indexes/triggers and only if "data" is + * partial; will be null otherwise. Note: "returnNewData" may be different + * than "data" only if "data" is partial. + * + * @return OperationResult where isUpdate() distinguishes insertions and + * updates. Is null only if an expired LN was purged and a partial 'data' + * param was supplied. + */ + public OperationResult insertOrUpdateRecord( + final DatabaseEntry key, + final DatabaseEntry data, + final LN ln, + final ExpirationInfo expInfo, + final PutMode putMode, + final DatabaseEntry returnOldData, + final DatabaseEntry returnNewData, + final ReplicationContext repContext) { + + assert key != null; + assert data != null; + assert ln != null; + assert putMode != null; + assert assertCursorState( + false /*mustBeInitialized*/, true /*mustNotBeInitialized*/); + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + if (putMode != PutMode.OVERWRITE && + putMode != PutMode.NO_OVERWRITE) { + throw EnvironmentFailureException.unexpectedState( + putMode.toString()); + } + + boolean success = false; + boolean inserted = false; + + byte[] keyCopy = Key.makeKey(key); + + try { + + /* + * Try to insert the key/data pair as a new record. Will succeed if + * the record does not exist in the DB already. Otherwise, the + * insertRecord() returns with the cursor registered on the slot + * whose key is equal to "key", with the LSN of that slot locked + * in WRITE mode, and with the containing BIN latched. + */ + Pair insertResult = + insertRecordInternal( + keyCopy, ln, expInfo, + false /*blindInsertion*/, + returnNewData, repContext); + + if (insertResult.second() != null) { + inserted = true; + success = true; + return insertResult.second(); + } + + /* + * There is a non-defunct slot whose key is == "key". So, this is + * going to be an update. Note: Cursor has been registered on the + * existing slot by insertRecord() + */ + if (putMode == PutMode.NO_OVERWRITE) { + success = true; + return null; + } + + /* + * Update the non-defunct record at the cursor position. We have + * optimized by preferring to take an uncontended lock. The + * lockStanding var is guaranteed to be non-null in this case. + * The BIN must remain latched when calling this method. + */ + final OperationResult result = updateRecordInternal( + keyCopy, data, expInfo, + returnOldData, returnNewData, + insertResult.first(), repContext); + + success = true; + return result; + + } finally { + + if (success && + !dbImpl.isInternalDb() && + bin != null && + bin.isBINDelta()) { + if (inserted) { + dbImpl.getEnv().incBinDeltaInserts(); + } else { + dbImpl.getEnv().incBinDeltaUpdates(); + } + } + + releaseBIN(); + } + } + + /** + * Try to insert the key/data pair as a new record. Will succeed if a + * non-defunct record does not exist already with the given key. + * + * The cursor must initially be uninitialized. + * + * On return, this.bin is latched. + * + * @return a non-null pair of LockStanding and OperationResult. + * + * + LockStanding will be non-null if a slot with the given key already + * exists, whether or not we reuse the slot for this record (i.e., + * whether or not the result is non-null). In other words, we always + * lock the record in an existing slot for the give key. + * + * + OperationResult will be non-null if we inserted a slot or reused a + * slot having a defunct record, or null if the insertion failed + * because a non-defunct record exists with the given key. + */ + private Pair insertRecordInternal( + final byte[] key, + final LN ln, + ExpirationInfo expInfo, + final boolean blindInsertion, + final DatabaseEntry returnNewData, + final ReplicationContext repContext) { + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + final Tree tree = dbImpl.getTree(); + WriteLockInfo wli; + LockStanding lockStanding = null; + final boolean isSlotReuse; + final long currLsn; + + final boolean currEmbeddedLN; + final boolean newEmbeddedLN; + final byte[] data; + + if (shouldEmbedLN(ln.getData())) { + data = ln.getData(); + newEmbeddedLN = true; + } else { + newEmbeddedLN = false; + data = null; + } + + if (expInfo == null) { + expInfo = ExpirationInfo.DEFAULT; + } + + /* + * At this point, this cursor does not have a position so it cannot be + * registered with the BIN that will be used. This is good because it + * allows slot compression to occur before BIN splits (thus avoiding + * splits if compression finds and removes any defunct slots). However, + * if another cursor, including the one from which this was cloned, is + * registered with the BIN, then splits won't be allowed. This is a + * good reason to use non-sticky cursors for insertions, especially + * sequential insertions since they will often end up in the same BIN. + * + * Find and latch the BIN that should contain the "key". On return from + * the tree search, this.bin is latched, but "this" is still not + * registered. + */ + bin = tree.findBinForInsert(key, getCacheMode()); + + /* + * In the case where logging occurs before locking, allow lockers to + * reject the operation (e.g., if writing on a replica) and also + * prepare to undo in the (very unlikely) event that logging succeeds + * but locking fails. Call this method BEFORE slot insertion, in case + * it throws an exception which would leave the slot with a null LSN. + * + * For Txn, creates the writeInfo map (if not done already), and + * inserts dbImpl in the undoDatabases map. Noop for other + * non-HA lockers. + */ + locker.preLogWithoutLock(dbImpl); + + /* + * If the key exists already, insertEntry1() does not insert, but + * returns the index of the existing key. + * + * If bin is a delta and it does not contain the key, then: + * (a) if blindInsertion is false, insertEntry1() will mutate it to a + * full BIN and check again if the key exists or not. + * (b) if blindInsertion is true, insertEntry1() will not mutate the + * delta; it will just insert the key into the delta. This is OK, + * because blindInsertion will be true only if we know already that the + * key does not exist in the tree. + */ + int insertIndex = bin.insertEntry1( + ln, key, data, DbLsn.NULL_LSN, blindInsertion); + + if ((insertIndex & IN.INSERT_SUCCESS) == 0) { + /* + * Key exists. Insertion was not successful. Register the cursor on + * the existing slot. If the slot is defunct, the key does not + * really exist and the slot can be reused to do an insertion. + */ + isSlotReuse = true; + + setIndex(insertIndex); + addCursor(); + setInitialized(); + + /* + * Lock the LSN for the existing LN slot, and check defunct-ness. + * An uncontended lock request is permitted because we are holding + * the bin latch. If no locker holds a lock on the slot, then no + * lock is taken by this cursor either. + */ + lockStanding = lockLN( + LockType.WRITE, true /*allowUncontended*/, false /*noWait*/); + assert(lockStanding != null); + + if (lockStanding.recordExists()) { + return new Pair<>(lockStanding, null); + } + + /* + * The record in the current slot is defunct. Note: it may have + * been made defunct by this.locker itself. + */ + currLsn = lockStanding.lsn; + currEmbeddedLN = bin.isEmbeddedLN(index); + + /* + * Create a new WriteLockInfo or get an existing one for the LSN + * of the current slot, and set its abortLSN and abortKD fields, + * if needed, i.e, if it is not the current txn the one who created + * this LSN. The abortLSN and abortKD fields of the wli will be + * included in the new logrec. + */ + wli = lockStanding.prepareForUpdate(bin, index); + + } else { + /* + * Register the cursor at the slot that has been successfully + * inserted. + */ + isSlotReuse = false; + currEmbeddedLN = newEmbeddedLN; + currLsn = DbLsn.NULL_LSN; + + setIndex(insertIndex &= ~IN.INSERT_SUCCESS); + addCursor(); + setInitialized(); + + /* Create a new WriteLockInfo */ + wli = LockStanding.prepareForInsert(bin); + } + + /* + * Log the new LN and lock the LSN of the new logrec in WRITE mode. + * Note: in case of slot reuse, we pass NULL_LSN for the oldLsn param + * because the old defunct LN is counted obsolete by other means. + */ + LogItem logItem = null; + try { + logItem = ln.optionalLog( + envImpl, dbImpl, locker, wli, + newEmbeddedLN, key, + expInfo.expiration, expInfo.expirationInHours, + currEmbeddedLN, currLsn, 0/*currSize*/, + true/*isInsertion*/, repContext); + } finally { + if (logItem == null && !isSlotReuse) { + /* + * Possible buffer overflow, out-of-memory, or I/O exception + * during logging. The BIN entry will contain a NULL_LSN. To + * prevent an exception during a future fetchLN() call, we + * set the KD flag. We do not call BIN.deleteEntry because it + * does not adjust cursors. We do not add this entry to the + * compressor queue to avoid complexity (this situation is + * rare). + */ + bin.setKnownDeletedAndEvictLN(index); + } + } + + assert logItem != null; + + if (lockStanding == null) { + /* + * No slot reuse; straight insertion. Update LSN in BIN slot. + * The LN is already in the slot. + */ + bin.updateEntry( + index, logItem.lsn, ln.getVLSNSequence(), + logItem.size); + + bin.setExpiration( + index, expInfo.expiration, expInfo.expirationInHours); + + /* + * The following call accounts for extra marshaled memory, i.e., + * memory that was added to the LN as a side-effect of logging it. + * This can happen for FileSummaryLN's only (it is a noop for + * other kinds of LNs). + * + * To avoid violating assertions (e.g., in IN.changeMemorySize), we + * must must finish the memory adjustment while the BIN is still + * latched. [#20069] + * + * This special handling does not apply to slot reuse, because the + * updateEntry() version used in the slot reuse case will recalc + * the BIN memory from scratch, and as a result, will take into + * account the extra marshaled memory. [#20845] + */ + if (bin.getTarget(index) == ln) { + ln.addExtraMarshaledMemorySize(bin); + } + + } else { + + /* + * Slot reuse. When reusing a slot, the key is replaced in the BIN + * slot. This ensures that the correct key value is used when the + * new key is non-identical to the key in the slot but is + * considered equal by the btree comparator. + */ + bin.insertRecord( + index, ln, logItem.lsn, logItem.size, key, data, + expInfo.expiration, expInfo.expirationInHours); + } + + if (returnNewData != null) { + returnNewData.setData(null); + ln.setEntry(returnNewData); + } + + /* Cursor is positioned on new record. */ + setInitialized(); + + /* Cache record version/size for insertion operation. */ + setCurrentVersion(ln.getVLSNSequence(), bin.getLsn(index)); + setStorageSize(); + + /* + * It is desirable to evict the LN in a duplicates DB because it will + * never be fetched again. But for deferred-write DBs we should not + * evict a dirty LN since it may be logged unnecessarily. + */ + if (dbImpl.getSortedDuplicates() && + !dbImpl.isDeferredWriteMode() && + bin.getTarget(index) != null) { + bin.evictLN(index); + } + + traceInsert(Level.FINER, bin, logItem.lsn, index); + + return new Pair<>( + lockStanding, + DbInternal.makeResult( + expInfo.expiration, expInfo.expirationInHours)); + } + + /** + * Update the record where the cursor is currently positioned at. The + * cursor is registered with this position, the associated bin is latched, + * the BIN slot is not defunct, and it has been locked in WRITE mode. + * + * @param returnOldData if non-null, will be filled in with the + * pre-existing record's data. However, if an expired LN was purged, it + * will not be filled in and the caller should expect this; see {@link + * Cursor#putNotify}. + * + * @return OperationResult, or null if an expired LN was purged and a + * partial 'data' param was supplied. + */ + private OperationResult updateRecordInternal( + final byte[] key, + final DatabaseEntry data, + final ExpirationInfo expInfo, + final DatabaseEntry returnOldData, + final DatabaseEntry returnNewData, + final LockStanding lockStanding, + final ReplicationContext repContext) { + + assert(lockStanding.recordExists()); + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + final DbType dbType = dbImpl.getDbType(); + + final long currLsn = lockStanding.lsn; + assert(currLsn != DbLsn.NULL_LSN); + final int currLoggedSize = bin.getLastLoggedSize(index); + final byte[] currKey = bin.getKey(index); + final byte[] currData; + + final boolean currEmbeddedLN = bin.isEmbeddedLN(index); + final boolean newEmbeddedLN; + + final LogItem logItem; + + /* + * Must fetch LN if it is not embedded and any of the following + * are true: + * - returnOldData is non-null: data needs to be returned + * - data is a partial entry: needs to be resolved + * - CLEANER_FETCH_OBSOLETE_SIZE is configured and lastLoggedSize + * is unknown + * - this database does not use the standard LN class and we + * cannot call DbType.createdUpdatedLN further below (this is + * the case for NameLNs, MapLNs, and FileSummaryLNs). + * For other cases, we are careful not to fetch, in order to avoid + * a random read during an update operation. + */ + LN ln; + if (returnOldData != null || + data.getPartial() || + (currLoggedSize == 0 && + !currEmbeddedLN && + envImpl.getCleaner().getFetchObsoleteSize(dbImpl)) || + !dbType.mayCreateUpdatedLN()) { + + if (currEmbeddedLN) { + currData = bin.getData(index); + ln = bin.getLN(index, cacheMode); + } else { + ln = bin.fetchLN(index, cacheMode); + currData = (ln != null ? ln.getData() : null); + } + } else { + ln = bin.getLN(index, cacheMode); + currData = (ln != null ? ln.getData() : null); + } + + final byte[] newData; + if (data.getPartial()) { + if (currData == null) { + /* Expired LN was purged. Cannot use a partial entry. */ + return null; + } + newData = LN.resolvePartialEntry(data, currData); + } else { + newData = LN.copyEntryData(data); + } + + /* + * If the key is changed (according to the comparator), we assume + * it is actually the data that has changed for a duplicate's DB. + */ + if (key != null && + Key.compareKeys( + currKey, key, dbImpl.getKeyComparator()) != 0) { + throw new DuplicateDataException( + "Can't replace a duplicate with new data that is not " + + "equal to the existing data according to the duplicate " + + " comparator."); + } + + if (returnOldData != null && currData != null) { + returnOldData.setData(null); + LN.setEntry(returnOldData, currData); + } + + newEmbeddedLN = shouldEmbedLN(newData); + + /* Update the existing LN, if cached, else create new LN. */ + final long oldLNMemSize; + if (ln != null) { + oldLNMemSize = ln.getMemorySizeIncludedByParent(); + ln.modify(newData); + } else { + oldLNMemSize = 0; + ln = dbType.createUpdatedLN(envImpl, newData); + } + + final int oldExpiration = bin.getExpiration(index); + final boolean oldExpirationInHours = bin.isExpirationInHours(); + + if (expInfo != null) { + expInfo.setOldExpirationTime( + TTL.expirationToSystemTime( + oldExpiration, oldExpirationInHours)); + } + + final int expiration; + final boolean expirationInHours; + + if (expInfo != null && expInfo.updateExpiration) { + if (expInfo.expiration != oldExpiration || + expInfo.expirationInHours != oldExpirationInHours) { + expInfo.setExpirationUpdated(true); + } + expiration = expInfo.expiration; + expirationInHours = expInfo.expirationInHours; + } else { + expiration = oldExpiration; + expirationInHours = oldExpirationInHours; + } + + /* + * Create a new WriteLockInfo or get an existing one for the LSN + * of the current slot, and set its abortLSN and abortKD fields, + * if needed, i.e, if it is not the current txn the one who created + * this LSN. The abortLSN and abortKD fields of the wli will be + * included in the new logrec. + */ + final WriteLockInfo wli = lockStanding.prepareForUpdate(bin, index); + + /* Log the new record version and lock its new LSN . */ + logItem = ln.optionalLog( + envImpl, dbImpl, locker, wli, + newEmbeddedLN, (key != null ? key : currKey), + expiration, expirationInHours, + currEmbeddedLN, currLsn, currLoggedSize, + false/*isInsertion*/, repContext); + + /* Return a copy of resulting data, if requested. [#16932] */ + if (returnNewData != null) { + returnNewData.setData(null); + ln.setEntry(returnNewData); + } + + /* + * Update the parent BIN. Update the key, if changed. [#15704] + */ + bin.updateRecord( + index, oldLNMemSize, logItem.lsn, ln.getVLSNSequence(), + logItem.size, key, (newEmbeddedLN ? newData : null), + expiration, expirationInHours); + + /* + * If the LN child is not cached, attach it to the tree if the DB + * is a DW one or if the record is not embedded in the BIN. For + * DW DBs, we must attach the LN even if the record in embedded, + * because no logrec was generated above, and as a result, the LN + * must be in the tree so that a logrec will be generated when + * a db.sync() occurs later (that logrec is needed for crash + * recovery, because BINs are not replayed during crash recovery). + * + * If the LN child is cached, it is desirable to evict it if the + * record is embedded because it will never be fetched again. + * But for DW DBs we should not evict a dirty LN since it will + * be logged unnecessarily. + */ + final boolean shouldCache = + (dbImpl.isDeferredWriteMode() || + (!dbImpl.getSortedDuplicates() && !newEmbeddedLN)); + + if (bin.getTarget(index) == null) { + if (shouldCache) { + bin.attachNode(index, ln, null /*lnKey*/); + } + } else { + if (!shouldCache) { + bin.evictLN(index); + } + } + + /* Cache record version/size for update operation. */ + setCurrentVersion(ln.getVLSNSequence(), logItem.lsn); + setStorageSize(); + + trace(Level.FINER, TRACE_MOD, bin, index, currLsn, logItem.lsn); + + return DbInternal.makeUpdateResult(expiration, expirationInHours); + } + + /** + * Position the cursor at the first or last record of the dbImpl. + * It's okay if this record is defunct. + * + * The cursor must initially be uninitialized. + * + * Returns with the target BIN latched! + * + * @return true if a first or last position is found, false if the + * tree being searched is empty. + */ + public boolean positionFirstOrLast(boolean first) { + + assert assertCursorState( + false /*mustBeInitialized*/, true /*mustNotBeInitialized*/); + + boolean found = false; + + try { + if (first) { + bin = dbImpl.getTree().getFirstNode(cacheMode); + } else { + bin = dbImpl.getTree().getLastNode(cacheMode); + } + + if (bin != null) { + + TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + + if (bin.getNEntries() == 0) { + + /* + * An IN was found. Even if it's empty, let Cursor + * handle moving to the first non-defunct entry. + */ + found = true; + index = -1; + } else { + index = (first ? 0 : (bin.getNEntries() - 1)); + + if (treeStatsAccumulator != null && + !bin.isEntryKnownDeleted(index) && + !bin.isEntryPendingDeleted(index)) { + treeStatsAccumulator.incrementLNCount(); + } + + /* + * Even if the entry is defunct, just leave our + * position here and return. + */ + found = true; + } + } + + addCursor(bin); + setInitialized(); + + return found; + } catch (final Throwable e) { + /* Release latch on error. */ + releaseBIN(); + throw e; + } + } + + /** + * Position this cursor on the slot whose key is the max key less or equal + * to the given search key. + * + * To be more precise, let K1 be search key. The method positions the + * cursor on the BIN that should contain K1. If the BIN does contain K1, + * this.index is set to the containing slot. Otherwise, this.index is + * set to the right-most slot whose key is < K1, or to -1 if K1 is < than + * all keys in the BIN. + * + * The cursor must initially be uninitialized. + * + * The method returns with the BIN latched, unless an exception is raised. + * + * The method returns an integer that encodes the search outcome: If the + * FOUND bit is not set, the tree is completely empty (has no BINs). If + * the FOUND bit is set, the EXACT_KEY bit says whether K1 was found or + * not and the FOUND_LAST bit says whether the cursor is positioned to the + * very last slot of the BTree (note that this state can only be counted + * on as long as the BIN is latched). + * + * Even if the search returns an exact result, the record may be defunct. + * The caller must therefore check whether the cursor is positioned on a + * defunct record. + * + * This method does not lock the record. The caller is expected to call + * lockAndGetCurrent to perform locking. + */ + public int searchRange( + DatabaseEntry searchKey, + Comparator comparator) { + + assert assertCursorState( + false /*mustBeInitialized*/, true /*mustNotBeInitialized*/); + + boolean foundSomething = false; + boolean foundExactKey = false; + boolean foundLast = false; + BINBoundary binBoundary = new BINBoundary(); + + try { + byte[] key = Key.makeKey(searchKey); + + bin = dbImpl.getTree().search( + key, Tree.SearchType.NORMAL, binBoundary, cacheMode, + comparator); + + if (bin != null) { + + foundSomething = true; + if (bin.isBINDelta() && comparator != null) { + + /* + * We must mutate a BIN delta if a non-null comparator is + * used. Otherwise, if we positioned the cursor on the + * delta using the non-null comparator, we would not be + * able to adjust its position correctly later when the + * delta gets mutated for some reason (because at that + * later time, the comparator used here would not be + * known). + */ + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + } + + index = bin.findEntry( + key, true /*indicateIfExact*/, false/*exact*/, comparator); + + if (bin.isBINDelta() && + (index < 0 || + (index & IN.EXACT_MATCH) == 0 || + binBoundary.isLastBin)) { + + /* + * Note: if binBoundary.isLastBin, we must mutate the BIN + * in order to compute the foundLast flag below. + */ + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + index = bin.findEntry(key, true, false, comparator); + } + + if (index >= 0) { + if ((index & IN.EXACT_MATCH) != 0) { + foundExactKey = true; + index &= ~IN.EXACT_MATCH; + } + + foundLast = (binBoundary.isLastBin && + index == bin.getNEntries() - 1); + } + + /* + * Must call addCursor after mutateToFullBIN() to avoid having + * to reposition "this" inside mutateToFullBIN(), which would + * be both unnecessary and wrong given that this.index could + * have the IN.EXACT_MATCH still on. + */ + addCursor(bin); + } + + setInitialized(); + + /* Return a multi-part status value */ + return ((foundSomething ? FOUND : 0) | + (foundExactKey ? EXACT_KEY : 0) | + (foundLast ? FOUND_LAST : 0)); + + } catch (final Throwable e) { + releaseBIN(); + throw e; + } + } + + public boolean searchExact(DatabaseEntry searchKey, LockType lockType) { + return searchExact(searchKey, lockType, false, false) != null; + } + + /** + * Position this cursor on the slot (if any) whose key matches the given + * search key. If no such slot is found or the slot does not hold a "valid" + * record, return null. Otherwise, lock the found record with the specified + * lock type (which may be NONE) and return the LockStanding obj that was + * created by the locking op. Whether the slot contains a "valid" record or + * not depends on the slot's KD/PD flags and the lockType and dirtyReadAll + * parameters. Four cases are considered; they are described in the + * lockLNAndCheckDefunct() method. + * + * The cursor must initially be uninitialized. + * + * The method returns with the BIN latched, unless an exception is raised. + * + * In all cases, the method registers the cursor with the BIN that contains + * or should contain the search key. + * + * @return the LockStanding for the found record, or null if no record was + * found. + */ + public LockStanding searchExact( + final DatabaseEntry searchKey, + final LockType lockType, + final boolean dirtyReadAll, + final boolean dataRequested) { + + assert assertCursorState( + false /*mustBeInitialized*/, true /*mustNotBeInitialized*/); + + LockStanding lockStanding = null; + + try { + byte[] key = Key.makeKey(searchKey); + + bin = dbImpl.getTree().search(key, cacheMode); + + if (bin != null) { + + index = bin.findEntry(key, false, true /*exact*/); + + if (index < 0 && bin.isBINDelta()) { + + if (bin.mayHaveKeyInFullBin(key)) { + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + index = bin.findEntry(key, false, true /*exact*/); + } + } + + addCursor(bin); + + if (index >= 0) { + lockStanding = lockLNAndCheckDefunct( + lockType, dirtyReadAll, dataRequested); + } + } + + setInitialized(); + return lockStanding; + + } catch (final Throwable e) { + /* Release latch on error. */ + releaseBIN(); + throw e; + } + } + + /** + * Lock and copy current record into the key and data DatabaseEntry. + * When calling this method, this.bin should not be latched already. + * On return, this.bin is unlatched. + */ + public OperationResult lockAndGetCurrent( + DatabaseEntry foundKey, + DatabaseEntry foundData, + final LockType lockType) { + + return lockAndGetCurrent( + foundKey, foundData, lockType, false, false, true); + } + + /** + * Let S be the slot where this cursor is currently positioned on. If S + * does not hold a "valid" record, return null. Otherwise, lock the + * record in S with the specified lock type(which may be NONE), copy its + * key and data into the key and data DatabaseEntries, and return SUCCESS. + * Whether the slot contains a "valid" record or not depends on the slot's + * KD/PD flags, the lockType and dirtyReadAll parameters, and whether the + * record has expired. For details see {@link #lockLNAndCheckDefunct}. + * + * On entry, the isLatched param says whether this.bin is latched or not. + * On return, this.bin is unlatched if the unlatch param is true or an + * exception is thrown. + * + * @return OperationResult, or null if the LN has been cleaned and cannot + * be fetched. + */ + public OperationResult lockAndGetCurrent( + DatabaseEntry foundKey, + DatabaseEntry foundData, + final LockType lockType, + final boolean dirtyReadAll, + final boolean isLatched, + final boolean unlatch) { + + /* Used in the finally to indicate whether exception was raised. */ + boolean success = false; + + try { + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + assert checkAlreadyLatched(isLatched) : dumpToString(true); + + if (!isLatched) { + latchBIN(); + } + + assert(bin.getCursorSet().contains(this)); + + TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + + /* + * If we encounter a deleted slot, opportunistically add the BIN + * to the compressor queue. We do not queue expired slots to avoid + * frequent compression, especially in the CRUD path; we rely + * instead on the evictor to perform expired slot compression. + */ + if (index >= 0 && + index < bin.getNEntries() && + bin.isDeleted(index)) { + bin.queueSlotDeletion(index); + } + + /* + * Check the KD flag in the BIN slot and make sure this isn't an + * empty BIN. The BIN could be empty by virtue of the compressor + * running the size of this BIN to 0 but not having yet removed + * it from the tree. + * + * The index may be negative if we're at an intermediate stage in + * an higher level operation (e.g., the starting search for a range + * scan op), and we expect a higher level method to do a next or + * prev operation after this returns KEYEMPTY. [#11700] + */ + if (index < 0 || + index >= bin.getNEntries() || + bin.isEntryKnownDeleted(index)) { + /* Node is no longer present. */ + if (treeStatsAccumulator != null) { + treeStatsAccumulator.incrementDeletedLNCount(); + } + + success = true; + return null; + } + + assert TestHookExecute.doHookIfSet(testHook); + + final boolean dataRequested = + (foundData != null && + (!foundData.getPartial() || + foundData.getPartialLength() != 0)); + + if (lockLNAndCheckDefunct( + lockType, dirtyReadAll, dataRequested) == null) { + if (treeStatsAccumulator != null) { + treeStatsAccumulator.incrementDeletedLNCount(); + } + success = true; + return null; + } + + final OperationResult result = getCurrent(foundKey, foundData); + + success = true; + return result; + + } finally { + if (unlatch || !success) { + releaseBIN(); + } + } + } + + /** + * Let S be the slot where this cursor is currently positioned on. The + * method locks S (i.e. its LSN), and depending on S's KD/PD flags and + * expired status, it returns either null or the LockStanding obj that was + * created by the locking op. The following 4 cases are considered. By + * "defunct" below we mean S is KD/PD or expired. + * + * 1. If S is not defunct, return the LockStanding obj. In this case, we + * know that S holds a valid (non-defunct) record. + * + * 2. If S is defunct, and the lock type is not NONE, return null. In this + * case, we know that the record that used to be in S is definitely defunct. + * + * 3. If S is defunct, the lock kind is NONE, and dirtyReadAll is false, + * return null. This case corresponds to the READ_UNCOMMITTED LockMode. + * The record in S is defunct, but the deleting txn may be active still, + * and if it aborts later, the record will be restored. To avoid a + * potentially blocking lock, in READ_UNCOMMITTED mode we consider the + * record to be non-existing and return null. + * + * 4. If S is defunct, the lock kind is NONE, and dirtyReadAll is true, + * lock the record in READ mode. This case corresponds to the + * READ_UNCOMMITTED_ALL LockMode, which requires that we do not skip + * "provisionally defunct" records. There are two sub-cases: + * + * 4a. If dataRequested is true, we wait until the deleting txn finishes. + * In this case the READ lock is blocking. If after the lock is + * granted S is still defunct, release the lock and return null. + * Otherwise, release the lock and return the LockStanding obj. + * + * 4b. If dataRequested is false, then we check whether the deleting txn is + * still open by requested a non-blocking READ lock. If the lock is + * granted then the writing txn is closed or this cursor's locker is + * the writer, and we proceed as if the READ lock was granted in 4a. + * If the lock is denied then the deleting txn is still open, and we + * return the LockStanding obj so that the record is not skipped. + * + * The BIN must be latched on entry and is latched on exit. + * + * @param dirtyReadAll is true if using LockMode.READ_UNCOMMITTED_ALL. + * + * @param dataRequested is true if the read operation should return the + * record data, meaning that a blocking lock must be used for dirtyReadAll. + * Is ignored if dirtyReadAll is false. Is always false for a dup DB, + * since data is never requested for dup DB ops at the CursorImpl level. + */ + private LockStanding lockLNAndCheckDefunct( + final LockType lockType, + final boolean dirtyReadAll, + final boolean dataRequested) { + + assert !(dirtyReadAll && lockType != LockType.NONE); + assert !(dataRequested && dbImpl.getSortedDuplicates()); + + LockStanding standing = lockLN(lockType); + + if (standing.recordExists()) { + return standing; + } + + /* The slot is defunct. */ + + if (lockType != LockType.NONE) { + revertLock(standing); + + /* + * The record was committed by another locker, or has been + * performed by this locker. + */ + return null; + } + + /* We're using dirty-read. The lockLN above did not actually lock. */ + + if (!dirtyReadAll) { + /* READ_UNCOMMITTED -- skip defunct records without locking. */ + return null; + } + + /* + * READ_UNCOMMITTED_ALL -- get a read lock. Whether we can request a + * no-wait or a blocking lock depends on the dataRequested parameter. + * + * Although there is some redundant processing in the sense that lockLN + * is called more than once (above and below), this is not considered a + * performance issue because accessing defunct records is normally + * infrequent. Deleted slots are normally compressed away quickly. + */ + standing = lockLN( + LockType.READ, false /*allowUncontended*/, + !dataRequested /*noWait*/); + + if (standing.lockResult.getLockGrant() == LockGrantType.DENIED) { + + /* + * The no-wait lock request was denied, which means the data is not + * needed and the writing transaction is still open. The defunct + * record should not be skipped in this case, according to the + * definition of READ_UNCOMMITTED_ALL. + */ + assert !standing.recordExists(); + return standing; + } + + /* We have acquired a temporary read lock. */ + revertLock(standing); + + if (standing.recordExists()) { + /* + * Another txn aborted the deletion or expiration time change while + * we waited. + */ + return standing; + } + + /* + * The write was committed by another locker, or has been performed by + * this locker. + */ + return null; + } + + /** + * Copy current record into the key and data DatabaseEntry. + * + * @return OperationResult, or null if the LN has been cleaned and cannot + * be fetched. + */ + public OperationResult getCurrent( + final DatabaseEntry foundKey, + final DatabaseEntry foundData) { + + assert(bin.isLatchExclusiveOwner()); + assert(index >= 0 && index < bin.getNEntries()); + assert(!bin.isEntryKnownDeleted(index)); + + /* + * We don't need to fetch the LN if the user has not requested that we + * return the data, or if we know for sure that the LN is empty. + */ + final boolean isEmptyLN = dbImpl.isLNImmediatelyObsolete(); + final boolean isEmbeddedLN = bin.isEmbeddedLN(index); + + final boolean dataRequested = + (foundData != null && + (!foundData.getPartial() || foundData.getPartialLength() != 0)); + + final LN ln; + if (!isEmptyLN && !isEmbeddedLN && dataRequested) { + ln = bin.fetchLN(index, cacheMode); + if (ln == null) { + /* An expired LN was purged. */ + return null; + } + } else { + ln = null; + } + + /* Return the data. */ + if (dataRequested) { + + byte[] data; + + if (ln != null) { + data = ln.getData(); + } else if (isEmptyLN || bin.isNoDataLN(index)) { + data = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + assert(isEmbeddedLN); + data = bin.getData(index); + } + + LN.setEntry(foundData, data); + } + + /* Return the key */ + if (foundKey != null) { + LN.setEntry(foundKey, bin.getKey(index)); + } + + /* Cache record version/size for fetch operation. */ + final long vlsn = (ln != null ? + ln.getVLSNSequence() : + bin.getVLSN(index, false /*allowFetch*/, cacheMode)); + + setCurrentVersion(vlsn, bin.getLsn(index)); + setStorageSize(); + + return DbInternal.makeResult( + bin.getExpiration(index), bin.isExpirationInHours()); + } + + public LN getCurrentLN(final boolean isLatched, final boolean unlatch) { + + /* Used in the finally to indicate whether exception was raised. */ + boolean success = false; + + try { + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + assert checkAlreadyLatched(isLatched) : dumpToString(true); + + if (!isLatched) { + latchBIN(); + } + + assert(bin.getCursorSet().contains(this)); + assert(!bin.isEmbeddedLN(index)); + + LN ln = bin.fetchLN(index, cacheMode); + + success = true; + return ln; + } finally { + if (unlatch || !success) { + releaseBIN(); + } + } + } + + /** + * Retrieve the current LN. BIN is unlatched on entry and exit. + */ + public LN lockAndGetCurrentLN(final LockType lockType) { + + try { + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + assert checkAlreadyLatched(false) : dumpToString(true); + + latchBIN(); + + assert(bin.getCursorSet().contains(this)); + + LockStanding lockStanding = lockLN(lockType); + + if (!lockStanding.recordExists()) { + revertLock(lockStanding); + return null; + } + + assert(!bin.isEmbeddedLN(index)); + + return bin.fetchLN(index, cacheMode); + } finally { + releaseBIN(); + } + } + + /** + * Returns the VLSN and LSN for the record at the current position. Must + * be called when the cursor is positioned on a record. + * + * If this method is called on a secondary cursor, the version of the + * associated primary record is returned. In that case, the allowFetch + * parameter is ignored, and the version is available only if the primary + * record was retrieved (see setPriInfo). + * + * @param allowFetch is true to fetch the LN to get the VLSN, or false to + * return -1 for the VLSN if both the LN and VLSN are not cached. + * + * @throws IllegalStateException if the cursor is closed or uninitialized, + * or this is a secondary cursor and the version is not cached. + */ + public RecordVersion getCurrentVersion(boolean allowFetch) { + + /* Ensure cursor is open and initialized. */ + checkCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + /* + * For a secondary cursor, the cached version is all we have. + * See setPriInfo. + */ + if (isSecondaryCursor) { + if (currentRecordVersion == null) { + throw new IllegalStateException( + "Record version is available via a SecondaryCursor only " + + "if the associated primary record was retrieved."); + } + return currentRecordVersion; + } + + /* + * Use cached version if available. Do not use cached version if it + * does not contain a VLSN, and VLSNs are preserved, and fetching is + * allowed; instead, try to fetch it below. + */ + if (currentRecordVersion != null) { + if ((currentRecordVersion.getVLSN() != + VLSN.NULL_VLSN_SEQUENCE) || + !allowFetch || + !dbImpl.getEnv().getPreserveVLSN()) { + + return currentRecordVersion; + } + } + + /* Get the VLSN from the BIN, create the version and cache it. */ + latchBIN(); + try { + setCurrentVersion( + bin.getVLSN(index, allowFetch, cacheMode), bin.getLsn(index)); + } finally { + releaseBIN(); + } + return currentRecordVersion; + } + + private void setCurrentVersion(long vlsn, long lsn) { + currentRecordVersion = new RecordVersion(vlsn, lsn); + } + + /** + * Returns the estimated disk storage size for the record at the current + * position. The size includes an estimation of the JE overhead for the + * record, in addition to the user key/data sizes. But it does not include + * obsolete overhead related to the record, i.e., space that could + * potentially be reclaimed by the cleaner. + * + *

        This method does not fetch the LN. Must be called when the + * cursor is positioned on a record.

        + * + *

        When called on a secondary cursor that was used to return the primary + * data, the size of the primary record is returned by this method. + * Otherwise the size of the record at this cursor position is + * returned.

        + * + * @return the estimated storage size, or zero when the size is unknown + * because a non-embedded LN is not resident and the LN was logged with a + * JE version prior to 6.0. + * + * @throws IllegalStateException if the cursor is closed or uninitialized. + * + * @see StorageSize + */ + public int getStorageSize() { + + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + return (priStorageSize > 0) ? priStorageSize : storageSize; + } + + private void setStorageSize() { + storageSize = StorageSize.getStorageSize(bin, index); + } + + /** + * When the primary record is read during a secondary operation, this + * method is called to copy the primary version and storage size here. + * This allows the secondary cursor API to return the version and size of + * the primary record. Note that a secondary record does not have a version + * of its own. + * + * @param sourceCursor contains the primary info, but may be a primary or + * secondary cursor. + */ + public void setPriInfo(final CursorImpl sourceCursor) { + currentRecordVersion = sourceCursor.currentRecordVersion; + priStorageSize = sourceCursor.storageSize; + } + + /** + * Returns the number of secondary records written by the last put/delete + * operation at the current cursor position. + * + * NOTE: this method does not work (returns 0) if primary deletions are + * performed via a secondary (SecondaryDatabase/SecondaryCursor.delete). + * + * @return number of writes, or zero if a put/delete operation was not + * performed. + */ + public int getNSecondaryWrites() { + return nSecWrites; + } + + public void setNSecondaryWrites(final int nWrites) { + nSecWrites = nWrites; + } + + /** + * Advance a cursor. Used so that verify can advance a cursor even in the + * face of an exception [12932]. + * @param key on return contains the key if available, or null. + * @param data on return contains the data if available, or null. + */ + public boolean advanceCursor(DatabaseEntry key, DatabaseEntry data) { + + BIN oldBin = bin; + int oldIndex = index; + + key.setData(null); + data.setData(null); + + try { + getNext( + key, data, LockType.NONE, false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, + null /*rangeConstraint*/); + } catch (DatabaseException ignored) { + /* Klockwork - ok */ + } + + /* + * If the position changed, regardless of an exception, then we believe + * that we have advanced the cursor. + */ + if (bin != oldBin || index != oldIndex) { + + /* + * Return the key and data from the BIN entries, if we were not + * able to read it above. + */ + if (key.getData() == null && bin != null && index > 0) { + LN.setEntry(key, bin.getKey(index)); + } + return true; + } else { + return false; + } + } + + /** + * Move the cursor forward and return the next "valid" record. Whether a + * slot contains a "valid" record or not depends on the slot's KD/PD flags + * and the lockType and dirtyReadAll parameters. Four cases are considered; + * they are described in the lockLNAndCheckDefunct() method. + * + * This will cross BIN boundaries. On return, no latches are held. If no + * exceptions, the cursor is registered with its new location. + * + * @param foundKey DatabaseEntry to use for returning key + * + * @param foundData DatabaseEntry to use for returning data + * + * @param forward if true, move forward, else move backwards + * + * @param isLatched if true, the bin that we're on is already + * latched. + * + * @param rangeConstraint if non-null, is called to determine whether a key + * is out of range. + */ + public OperationResult getNext( + DatabaseEntry foundKey, + DatabaseEntry foundData, + LockType lockType, + boolean dirtyReadAll, + boolean forward, + boolean isLatched, + RangeConstraint rangeConstraint) { + + assert assertCursorState( + true /*mustBeInitialized*/, false /*mustNotBeInitialized*/); + + assert checkAlreadyLatched(isLatched) : dumpToString(true); + + OperationResult result = null; + BIN anchorBIN = null; + + try { + while (bin != null) { + + assert checkAlreadyLatched(isLatched) : dumpToString(true); + + if (!isLatched) { + latchBIN(); + isLatched = true; + } + + if (DEBUG) { + verifyCursor(bin); + } + + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + + /* Is there anything left on this BIN? */ + if ((forward && ++index < bin.getNEntries()) || + (!forward && --index > -1)) { + + if (rangeConstraint != null && + !rangeConstraint.inBounds(bin.getKey(index))) { + + result = null; + releaseBIN(); + break; + } + + OperationResult ret = lockAndGetCurrent( + foundKey, foundData, lockType, dirtyReadAll, + true /*isLatched*/, false /*unlatch*/); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + if (ret != null) { + incrementLNCount(); + releaseBIN(); + result = ret; + break; + } + } else { + /* + * Make sure that the current BIN will not be pruned away + * if it is or becomes empty after it gets unlatched by + * Tree.getNextBin() or Tree.getPrevBin(). The operation + * of these Tree methods relies on the current BIN not + * getting pruned. + */ + anchorBIN = bin; + anchorBIN.pin(); + bin.removeCursor(this); + bin = null; + + final Tree tree = dbImpl.getTree(); + + /* SR #12736 Try to prune away oldBin */ + assert TestHookExecute.doHookIfSet(testHook); + + if (forward) { + bin = tree.getNextBin(anchorBIN, cacheMode); + index = -1; + } else { + bin = tree.getPrevBin(anchorBIN, cacheMode); + if (bin != null) { + index = bin.getNEntries(); + } + } + isLatched = true; + + if (bin == null) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + result = null; + break; + } else { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + addCursor(); + anchorBIN.unpin(); + anchorBIN = null; + } + } + } + } finally { + if (anchorBIN != null) { + anchorBIN.unpin(); + } + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + return result; + } + + /** + * Used to detect phantoms during "get next" operations with serializable + * isolation. If this method returns true, the caller should restart the + * operation from the prior position. + * + * Something may have been added to the original cursor (cursorImpl) while + * we were getting the next BIN. cursorImpl would have been adjusted + * properly but we would have skipped a BIN in the process. This can + * happen when all INs are unlatched in Tree.getNextBin. It can also + * happen without a split, simply due to inserted entries in the previous + * BIN. + * + * @return true if an unaccounted for insertion happened. + * + * TODO: + * Unfortunately, this method doesn't cover all cases where a phantom may + * have been inserted. Another case is described below. + * + * IN-0 + * ---------------------- + * | | 50 | 100 | | + * ---------------------- + * / / \ \ + * / \ + * /---- ----\ + * IN-1 / \ IN-2 + * ---------------- ---------------- + * | 60 | 70 | 80 | | | | | + * ---------------- ---------------- + * / | \ / + * / | \ / + * ---------------- ----------------- + * | 81 | 83 | 85 | | 110 | | | + * ---------------- ----------------- + * BIN-3 BIN-4 + * + * Initially, the tree looks as above and a cursor (C) is located on the + * last slot of BIN-3. For simplicity, assume no duplicates and no + * serializable isolation. Also assume that C is a sticky cursor. + * + * 1. Thread 1 calls C.getNext(), which calls retrieveNextAllowPhantoms(), + * which duplicates C's cursorImpl, and calls dup.getNext(). + * + * dup.getNext() latches BIN-3, sets dup.binToBeRemoved to BIN-3 and + * then calls Tree.getNextBin(BIN-3). + * + * Tree.getNextBin(BIN-3) does the following: + * - sets searchKey to 85 + * - calls Tree.getParentINForChildIN(BIN-3). + * - Tree.getParentINForChildIN(BIN-3) unlatches BIN-3 and searches for + * BIN-3 parent, thus reaching IN-1. + * - IN-1.findEntry(85) sets "index" to 2, + * - "index" is incremented, + * - "moreEntriesThisIn" is set to false, + * - "next" is set to IN-1, . + * - Tree.getParentINForChildIN(IN-1) is called and unlatches IN-1. + * + * Assume at this point thread 1 looses the cpu. + * + * 2. Thread 2 inserts keys 90 and 95, causing a split of both BIN-3 and + * IN-1. So the tree now looks like this: + * + * IN-0 + * --------------------------- + * | | 50 | 80 | 100 | | + * --------------------------- + * / / | \ \ + * / | \ + * /--------- | ----------\ + * IN-1 / | \ IN-2 + * / IN-5 | \ + * ----------- ----------- ---------------- + * | 60 | 70 | | 80 | 90 | | | | | + * ----------- ----------- ---------------- + * / | / \ / + * / | / \ / + * ---------------- ----------- ----------------- + * | 81 | 83 | 85 | | 90 | 95 | | 110 | | | + * ---------------- ----------- ----------------- + * BIN-3 BIN-6 BIN-4 + * + * + * Notice that C.cursorImpl still points to the last slot of BIN-3. + * + * 3. Thread 1 resumes: + * + * - Tree.getParentINForChildIN(IN-1) reaches IN-0. + * - IN-0.findEntry(85) sets "index" to 2, + * - "index" is incremented, + * - "nextIN" is set to IN-2, which is latched. + * - Tree.searchSubTree(IN-2, LEFT) is called, and returns BIN-4. + * - BIN-4 is the result of Tree.getNextBin(BIN-3), i.e., BIN-6 was + * skipped + * + * Now we are back in dup.getNext(): + * - dup.bin is set to BIN-4, dup.index to -1, and dup is added to BIN-4 + * - the while loop repeats, dup.index is set to 0, the 1st slot of + * BIN-4 is locked, and dup.getNext() returns SUCCESS. + * + * Now we are back in C.retrieveNextAllowPhantoms(): + * - C.checkForInsertion() is called + * - C.cursorImpl and dup are on different BINs, but the condition: + * origBIN.getNEntries() - 1 > origCursor.getIndex() + * is false, so C.checkForInsertion() returns false. + * + * The end result is that BIN-6 has been missed. This is not be a "bug" for + * non-serializable isolation, but the above scenario applies to + * serializable isolation as well, and in that case, BIN-6 should really + * not be missed. This could be solved by re-implementing + * Tree.getNext/PrevBIN() do a more "logical" kind of search. + */ + public boolean checkForInsertion( + final GetMode getMode, + final CursorImpl dupCursor) { + + final CursorImpl origCursor = this; + boolean forward = getMode.isForward(); + boolean ret = false; + + if (origCursor.bin != dupCursor.bin) { + + /* + * We jumped to the next BIN during getNext(). + * + * Be sure to operate on the BIN returned by latchBIN, not a cached + * var [#21121]. + * + * Note that a cursor BIN can change after the check above, but + * that's not relevant; what we're trying to detect are BIN changes + * during the operation that has already completed. + * + * Note that we can call isDefunct without locking. If we see a + * non-committed defunct entry, we'll just iterate around in the + * caller. So a false positive is ok. + */ + origCursor.latchBIN(); + final BIN origBIN = origCursor.bin; + + origBIN.mutateToFullBIN(false /*leaveFreeSlot*/); + + try { + if (forward) { + if (origBIN.getNEntries() - 1 > origCursor.getIndex()) { + + /* + * We were adjusted to something other than the + * last entry so some insertion happened. + */ + for (int i = origCursor.getIndex() + 1; + i < origBIN.getNEntries(); + i++) { + if (!origBIN.isDefunct(i)) { + /* See comment above about locking. */ + ret = true; + break; + } + } + } + } else { + if (origCursor.getIndex() > 0) { + + /* + * We were adjusted to something other than the + * first entry so some insertion happened. + */ + for (int i = 0; i < origCursor.getIndex(); i++) { + if (!origBIN.isDefunct(i)) { + /* See comment above about locking. */ + ret = true; + break; + } + } + } + } + } finally { + origCursor.releaseBIN(); + } + return ret; + } + return false; + } + + /** + * Skips over entries until a boundary condition is satisfied, either + * because maxCount is reached or RangeConstraint.inBounds returns false. + * + * If a maxCount is passed, this allows advancing the cursor quickly by N + * entries. If a rangeConstraint is passed, this allows returning the + * entry count after advancing until the predicate returns false, e.g., the + * number of entries in a key range. In either case, the number of entries + * advanced is returned. + * + * Optimized to scan using level two of the tree when possible, to avoid + * calling getNextBin/getPrevBin for every BIN of the database. All BINs + * beneath a level two IN can be skipped quickly, with the level two parent + * IN latched, when all of its children BINs are resident and can be + * latched without waiting. When a child BIN is not resident or latching + * waits, we revert to the getNextBin/getPrevBin approach, to avoid keeping + * the parent IN latched for long time periods. + * + * Although this method positions the cursor on the last non-defunct entry + * seen (before the boundary condition is satisfied), because it does not + * lock the LN it is possible that it is made defunct by another thread + * after the BIN is unlatched. + * + * @param forward is true to skip forward, false to skip backward. + * + * @param maxCount is the maximum number of non-defunct entries to skip, + * and may be LTE zero if no maximum is enforced. + * + * @param rangeConstraint is a predicate that returns false at a position + * where advancement should stop, or null if no predicate is enforced. + * + * @return the number of non-defunct entries that were skipped. + */ + public long skip( + boolean forward, + long maxCount, + RangeConstraint rangeConstraint) { + + final CursorImpl c = cloneCursor(true /*samePosition*/); + c.setCacheMode(CacheMode.UNCHANGED); + + try { + return c.skipInternal(forward, maxCount, rangeConstraint, this); + } catch (final Throwable e) { + /* + * Get more info on dbsim duplicate.conf failure when c.close below + * throws because the BIN latch is already held. It should have + * been released by skipInternal and therefore an unexpected + * exception must have been throw and the error handling must be + * incorrect. + */ + e.printStackTrace(System.out); + throw e; + } finally { + c.close(); + } + } + + /** + * Use this cursor to reference the current BIN in the traversal, to + * prevent the current BIN from being compressed away. But set the given + * finalPositionCursor (the 'user' cursor) position only at non-defunct + * entries, since it should be positioned on a valid entry when this method + * returns. + */ + private long skipInternal( + boolean forward, + long maxCount, + RangeConstraint rangeConstraint, + CursorImpl finalPositionCursor) { + + /* Start with the entry at the cursor position. */ + final Tree tree = dbImpl.getTree(); + + latchBIN(); + + IN parent = null; + BIN prevBin = null; + BIN curBin = bin; + int curIndex = getIndex(); + long count = 0; + boolean success = false; + + try { + while (true) { + curBin.mutateToFullBIN(false /*leaveFreeSlot*/); + + /* Skip entries in the current BIN. */ + count = skipEntries( + forward, maxCount, rangeConstraint, finalPositionCursor, + curBin, curIndex, count); + + if (count < 0) { + curBin.releaseLatch(); + success = true; + return (- count); + } + + /* + * Get the parent IN at level two. The BIN is unlatched by + * getParentINForChildIN. Before releasing the BIN latch, get + * the search key for the last entry. + */ + final byte[] idKey = + (curBin.getNEntries() == 0 ? + curBin.getIdentifierKey() : + (forward ? + curBin.getKey(curBin.getNEntries() - 1) : + curBin.getKey(0))); + + final SearchResult result = tree.getParentINForChildIN( + curBin, false, /*useTargetLevel*/ + true, /*doFetch*/ CacheMode.DEFAULT); + + parent = result.parent; + + if (!result.exactParentFound) { + throw EnvironmentFailureException.unexpectedState( + "Cannot get parent of BIN id=" + + curBin.getNodeId() + " key=" + + Arrays.toString(idKey)); + } + + /* + * Find and latch previous child BIN by matching idKey rather + * than using result.index, as in Tree.getNextIN (see comments + * there). + */ + int parentIndex = parent.findEntry(idKey, false, false); + + curBin = (BIN) parent.fetchIN(parentIndex, CacheMode.DEFAULT); + curBin.latch(); + + if (forward ? + (parentIndex < parent.getNEntries() - 1) : + (parentIndex > 0)) { + + /* + * There are more entries in the parent. Skip entries for + * child BINs that are resident and can be latched no-wait. + */ + final int incr = forward ? 1 : (-1); + + for (parentIndex += incr;; parentIndex += incr) { + + prevBin = curBin; + curBin = null; + + /* Break is no more entries in parent. */ + if ((forward ? + parentIndex >= parent.getNEntries() : + parentIndex < 0)) { + parent.releaseLatch(); + break; + } + + /* + * Latch next child BIN, if cached and unlatched. + * + * Note that although 2 BINs are latched here, this + * can't cause deadlocks because the 2nd latch is + * no-wait. + */ + curBin = (BIN) parent.getTarget(parentIndex); + + if (curBin == null || + !curBin.latchNoWait(CacheMode.DEFAULT)) { + parent.releaseLatch(); + break; + } + + /* Unlatch the prev BIN */ + prevBin.releaseLatch(); + prevBin = null; + + /* Position at new BIN to prevent compression. */ + setPosition(curBin, -1); + + curBin.mutateToFullBIN(false /*leaveFreeSlot*/); + + /* Skip entries in new child BIN. */ + count = skipEntries( + forward, maxCount, rangeConstraint, + finalPositionCursor, curBin, + forward ? (-1) : curBin.getNEntries(), count); + + if (count < 0) { + parent.releaseLatch(); + curBin.releaseLatch(); + success = true; + return (- count); + } + } + } else { + /* No more entries in the parent. */ + parent.releaseLatch(); + prevBin = curBin; + } + + /* + * Only the prevBin is still latched here. Move to the next + * BIN the "hard" way (i.e., via full tree searches). + */ + curBin = forward ? + tree.getNextBin(prevBin, CacheMode.DEFAULT) : + tree.getPrevBin(prevBin, CacheMode.DEFAULT); + + assert(!prevBin.isLatchOwner()); + + if (curBin == null) { + success = true; + return count; + } + + prevBin = null; + curIndex = forward ? (-1) : curBin.getNEntries(); + + /* Position at new BIN to prevent compression. */ + setPosition(curBin, -1); + } + } finally { + if (curBin != null && !success) { + curBin.releaseLatchIfOwner(); + } + if (prevBin != null && !success) { + prevBin.releaseLatchIfOwner(); + } + if (parent != null && !success) { + parent.releaseLatchIfOwner(); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + } + } + + /** + * Skip entries in curBin from one past curIndex and onward. Returns + * non-negative count if skipping should continue, or negative count if + * bounds is exceeded. + */ + private long skipEntries( + boolean forward, + long maxCount, + RangeConstraint rangeConstraint, + CursorImpl finalPositionCursor, + BIN curBin, + int curIndex, + long count) { + + assert(!curBin.isBINDelta()); + + final int incr = forward ? 1 : (-1); + + for (int i = curIndex + incr;; i += incr) { + if (forward ? (i >= curBin.getNEntries()) : (i < 0)) { + break; + } + if (rangeConstraint != null && + !rangeConstraint.inBounds(curBin.getKey(i))) { + return (- count); + } + if (!curBin.isDefunct(i)) { + count += 1; + finalPositionCursor.setPosition(curBin, i); + if (maxCount > 0 && count >= maxCount) { + return (- count); + } + } + } + return count; + } + + /** + * Returns the stack of ancestor TrackingInfo for the BIN at the cursor, or + * null if a split occurs and the information returned would be + * inconsistent. + * + * Used by CountEstimator. + */ + public List getAncestorPath() { + + /* + * Search for parent of BIN, get TrackingInfo for ancestors. If the + * exact parent is not found, a split occurred and null is returned. + */ + final List trackingList = new ArrayList<>(); + + latchBIN(); + + final BIN origBin = bin; + final Tree tree = dbImpl.getTree(); + + final SearchResult result = tree.getParentINForChildIN( + origBin, false, /*useTargetLevel*/ + true /*doFetch*/, CacheMode.UNCHANGED, trackingList); + + if (!result.exactParentFound) { + /* Must have been a split. */ + return null; + } + + /* + * The parent was found and is now latched. If the child BIN does not + * match the cursor's BIN, then a split occurred and null is returned. + */ + final long binLsn; + try { + if (origBin != result.parent.getTarget(result.index) || + origBin != bin) { + /* Must have been a split. */ + return null; + } + + binLsn = result.parent.getLsn(result.index); + bin.latch(); + + } finally { + result.parent.releaseLatch(); + } + + /* + * The child BIN is now latched. Subtract defunct entries from BIN's + * total entries and adjust the index accordingly. Add TrackingInfo + * for child BIN. + */ + try { + int binEntries = bin.getNEntries(); + int binIndex = getIndex(); + + for (int i = bin.getNEntries() - 1; i >= 0; i -= 1) { + + if (bin.isDefunct(i)) { + binEntries -= 1; + if (i < binIndex) { + binIndex -= 1; + } + } + } + + final TrackingInfo info = new TrackingInfo( + binLsn, bin.getNodeId(), binEntries, binIndex); + + trackingList.add(info); + + return trackingList; + + } finally { + bin.releaseLatch(); + } + } + + /** + * Search for the next key following the given key, and acquire a range + * insert lock on it. If there are no more records following the given + * key, lock the special EOF node for the dbImpl. + */ + public void lockNextKeyForInsert(DatabaseEntry key) { + + DatabaseEntry tempKey = new DatabaseEntry( + key.getData(), key.getOffset(), key.getSize()); + + boolean lockedNextKey = false; + boolean latched = true; + + try { + while (true) { + + int searchResult = searchRange(tempKey, null /*comparator*/); + + if ((searchResult & FOUND) != 0 && + (searchResult & FOUND_LAST) == 0) { + + /* + * The search positioned "this" on the BIN that should + * contain K1 and this BIN is now latched. If the BIN does + * contain K1, this.index points to K1's slot. Otherwise, + * this.index points to the right-most slot whose key is + * < K1 (or this.index is -1 if K1 is < than all keys in + * the BIN). Furthermore, "this" is NOT positioned on the + * very last slot of the BTree. + * + * Call getNext() to advance "this" to the next *valid* + * (i.e., not defunct) slot and lock that slot in + * RANGE_INSERT mode. Normally, getNext() will move the + * cursor to the 1st slot with a key K2 > K1. However, it + * is possible that K2 <= K1 (see the comments in + * Cursor.searchRangeAdvanceAndCheckKey() about how this + * can happen. We handle this race condition by restarting + * the search. + */ + DatabaseEntry tempData = new DatabaseEntry(); + tempData.setPartial(0, 0, true); + + OperationResult result = getNext( + tempKey, tempData, LockType.RANGE_INSERT, + false, true, true, + null /*rangeConstraint*/); + + latched = false; + + if (result != null) { + + Comparator comparator = + dbImpl.getKeyComparator(); + + int c = Key.compareKeys(tempKey, key, comparator); + if (c <= 0) { + tempKey.setData( + key.getData(), key.getOffset(), key.getSize()); + continue; + } + + lockedNextKey = true; + } + } + + break; + } + } finally { + if (latched) { + releaseBIN(); + } + } + + /* Lock the EOF node if no next key was found. */ + if (!lockedNextKey) { + lockEof(LockType.RANGE_INSERT); + } + } + + /* + * Locking + */ + + /** + * Holds the result of a lockLN operation. A lock may not actually be + * held (getLockResult may return null) if an uncontended lock is allowed. + */ + public static class LockStanding { + + private long lsn; + private boolean defunct; + private LockResult lockResult; + + /** + * Returns true if the record is not deleted or expired. + */ + public boolean recordExists() { + return !defunct; + } + + /** + * Called by update and delete ops, after lockLN() and before logging + * the LN and updating the BIN. It returns a WriteLockInfo that is + * meant to be passed to the LN logging method, where its info will + * be included in the LN log entry and also copied into the new + * WriteLockInfo that will be created for the new LSN. + * + * If the locker is not transactional, or the current LSN has not been + * write-locked before by this locker, a new WriteLockInfo is created + * here and its abortLsn and abortKD fields are set. (note: even though + * lockLN() is called before prepareForUpdate(), it may not actually + * acquire a lock because of the uncontended optimization). + * + * Otherwise, a WriteLockInfo exists already. It may have been created + * by the lockLN() call during the current updating op, or a lockLN() + * call during an earlier updating op by the same txn. In the later + * case, the abortLsn and abortKD have been set already and should not + * be overwriten here. + */ + public WriteLockInfo prepareForUpdate(BIN bin, int idx) { + + DatabaseImpl db = bin.getDatabase(); + boolean abortKD = !recordExists(); + byte[] abortKey = null; + byte[] abortData = null; + long abortVLSN = VLSN.NULL_VLSN.getSequence(); + int abortExpiration = bin.getExpiration(idx); + boolean abortExpirationInHours = bin.isExpirationInHours(); + + if (bin.isEmbeddedLN(idx)) { + + abortData = bin.getData(idx); + + abortVLSN = bin.getVLSN( + idx, false/*allowFetch*/, null/*cacheMode*/); + + if (bin.getDatabase().allowsKeyUpdates()) { + abortKey = bin.getKey(idx); + } + } + + WriteLockInfo wri = (lockResult == null ? + null : + lockResult.getWriteLockInfo()); + if (wri == null) { + wri = new WriteLockInfo(); + wri.setAbortLsn(lsn); + wri.setAbortKnownDeleted(abortKD); + wri.setAbortKey(abortKey); + wri.setAbortData(abortData); + wri.setAbortVLSN(abortVLSN); + wri.setAbortExpiration(abortExpiration, abortExpirationInHours); + wri.setDb(db); + } else { + lockResult.setAbortInfo( + lsn, abortKD, abortKey, abortData, abortVLSN, + abortExpiration, abortExpirationInHours, db); + } + return wri; + } + + /** + * Creates WriteLockInfo that is appropriate for a newly inserted slot. + * The return value is meant to be passed to an LN logging method and + * copied into the WriteLockInfo for the new LSN. This method is + * static because lockLN is never called prior to logging an LN for a + * newly inserted slot. + */ + public static WriteLockInfo prepareForInsert(BIN bin) { + WriteLockInfo wri = new WriteLockInfo(); + wri.setDb(bin.getDatabase()); + return wri; + } + } + + /** Does not allow uncontended locks. See lockLN(LockType, boolean). */ + public LockStanding lockLN(LockType lockType) + throws LockConflictException { + + return lockLN(lockType, false /*allowUncontended*/, false /*noWait*/); + } + + /** + * Locks the LN at the cursor position. Attempts to use a non-blocking + * lock to avoid unlatching/relatching. + * + * Retries if necessary, to handle the case where the LSN is changed while + * the BIN is unlatched. Because it re-latches the BIN to check the LSN, + * this serializes access to the LSN for locking, guaranteeing that two + * lockers cannot obtain conflicting locks on the old and new LSNs. + * + * Preconditions: The BIN must be latched. + * + * Postconditions: The BIN is latched. + * + * LN Locking Rules + * ---------------- + * The lock ID for an LN is its LSN in the parent BIN slot. Because the + * LSN changes when logging the LN, only two methods of locking an LN may + * be used to support concurrent access: + * + * 1. This method may be called to lock the old LSN. For read operations, + * that is all that is necessary. For write operations, the new LSN must + * be locked after logging it, which is done by all the LN logging methods. + * Be sure to pass a non-null locker to the LN logging method to lock the + * LN, unless locking is not desired. + * + * 2. A non-blocking lock may be obtained on the old LSN (using + * Locker.nonBlockingLock rather than this method), as long as the lock is + * released before the BIN latch is released. In this case a null locker + * is passed to the LN logging method; locking the new LSN is unnecessary + * because no other thread can access the new LSN until the BIN latch is + * released. + * + * The first method is used for all user operations. The second method is + * used by the cleaner, when flushing dirty deferred-write LNs, and by + * certain btree operations. + * + * Uncontended Lock Optimization + * ----------------------------- + * The allowUncontended param is passed as true for update and delete + * operations as an optimization for the case where no lock on the old LSN + * is held by any locker. In this case we don't need to lock the old LSN + * at all, as long as we log the new LSN before releasing the BIN latch. + * + * 1. Latch BIN + * 2. Determine that no lock/waiter exists for oldLsn + * 3. Log LN and get lsn + * 4. Lock lsn + * 5. Update BIN + * 6. Release BIN latch + * + * The oldLsn is never locked, saving operations on the lock table. The + * assumption is that another locker will first have to latch the BIN to + * get oldLsn, before requesting a lock. + * + * A potential problem is that the other locker may release the BIN latch + * before requesting the lock. + * + * This Operation Another Operation + * -------------- ----------------- + * Latch BIN, get oldLsn, release BIN latch + * Step 1 and 2 + * Request lock for oldLsn, granted + * Step 3 and 4 + * + * Both operations now believe they have an exclusive lock, but they have + * locks on different LSNs. + * + * However, this problem is handled as long as the other lock is performed + * using a lockLN method in this class, which will release the lock and + * retry if the LSN changes while acquiring the lock. Because it + * re-latches the BIN to check the LSN, this will serialize access to the + * LSN for locking, guaranteeing that two conflicting locks cannot be + * granted on the old and new LSNs. + * + * Deferred-Write Locking + * ---------------------- + * When one of the LN optionalLog methods is called, a deferred-write LN is + * dirtied but not actually logged. In order to lock an LN that has been + * inserted but not yet assigned a true LSN, a transient LSNs is assigned. + * These LSNs serve to lock the LN but never appear in the log. See + * LN.assignTransientLsn. + * + * A deferred-write LN is logged when its parent BIN is logged, or when the + * LN is evicted. This will replace transient LSNs with durable LSNs. If + * a lock is held by a cursor on a deferred-write LN when it is logged, the + * same lock is acquired on the new LSN by the cursor. See + * lockAfterLsnChange. + * + * Cleaner Migration Locking + * ------------------------- + * The cleaner takes a non-blocking read lock on the old LSN before + * migrating/logging the LN, while holding the BIN latch. It does not take + * a lock on the new LSN, since it does not need to retain a lock after + * releasing the BIN latch. + * + * Because a read, not write, lock is taken, other read locks may be held + * during migration. After logging, the cleaner calls lockAfterLsnChange + * to lock the new LSN on behalf of other lockers. + * + * For more info on migration locking, see HandleLocker. + * + * Expired Record Locking + * ---------------------- + * To support repeatable-read semantics when a record expires after being + * locked, we must check whether a record was previously locked before + * attempting to lock it. If it was previously locked, then it is treated + * as not expired, even if its expiration time has passed. + * + * By was previously "locked" here we mean that any lock type is held, or + * shared with its owner, by this cursor's locker. Since a read lock will + * prevent modification of the expiration time, any lock type is adequate. + * A shared lock is considered adequate to account for the case where + * multiple lockers are used internally for a single virtual locker, as + * seen by the user. This is the case when using a read-committed locker or + * a thread-locker, for example. + * + * To avoid unnecessary added overhead, we do not check whether a record + * was previously locked except when expiration is imminent, which is + * defined as expiring within {@link + * EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. The ENV_TTL_MAX_TXN_TIME buffer + * is used because the expiration time may pass while waiting for a lock. + * + * Another case to account for is when the expiration time of the record + * changes while waiting for the lock. This can happen if the record is + * updated or an update is aborted. In this case we can assume that the + * was not previously locked, since that would have prevented the update. + * + * Note that when an uncontended lock applies, the expiration of the record + * with the current LSN cannot change. It is possible that the update or + * deletion requesting the uncontended lock will be aborted, and the LSN of + * an expired record will be reinstated in the BIN, but this does not + * create a special case. + * + * Historical Notes + * ---------------- + * In JE 4.1 and earlier, each LN had a node ID that was used for locking, + * rather than using the LSN. The node ID changed only if a deleted slot + * was reused. The node ID was stored in the LN, requiring that the LN be + * fetched when locking the LN. With LSN locking a fetch is not needed. + * + * When LN node IDs were used, deferred-write LNs were not assigned an LSN + * until they were actually logged. Deferred-write LNs were initially + * assigned a null LSN and transient LSNs were not needed. + * + * @param lockType the type of lock requested. + * + * @param allowUncontended is true to return immediately (no lock is taken) + * when no locker holds or waits for the lock. + * + * @param noWait is true to perform a no-wait lock request while keeping + * the BIN latched. The caller must check the lock result to see whether + * the lock was granted. + * + * @return all information about the lock; see LockStanding. + * + * @throws LockConflictException if the lsn is non-null, the lock is + * contended, and a lock could not be obtained by blocking. + */ + public LockStanding lockLN( + final LockType lockType, + final boolean allowUncontended, + final boolean noWait) + throws LockConflictException { + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final LockStanding standing = new LockStanding(); + standing.lsn = bin.getLsn(index); + + /* Check for a known-deleted null LSN. */ + if (standing.lsn == DbLsn.NULL_LSN) { + assert bin.isEntryKnownDeleted(index); + standing.defunct = true; + return standing; + } + + /* + * We can avoid taking a lock if uncontended. However, we must + * call preLogWithoutLock to prevent logging on a replica, and as + * good measure to prepare for undo. + */ + if (allowUncontended && lockManager.isLockUncontended(standing.lsn)) { + assert verifyPendingDeleted(lockType); + locker.preLogWithoutLock(dbImpl); + standing.defunct = bin.isDefunct(index); + return standing; + } + + /* + * If wasLockedAndExpiresSoon is true, we will treat the record as not + * expired. If false, we will check for expiration after locking. + */ + boolean wasLockedAndExpiresSoon = false; + final int prevExpiration = bin.getExpiration(index); + final boolean prevExpirationInHours = bin.isExpirationInHours(); + + if (envImpl.expiresWithin( + prevExpiration, prevExpirationInHours, + dbImpl.getEnv().getTtlMaxTxnTime())) { + + if (lockManager.ownsOrSharesLock(locker, standing.lsn)) { + wasLockedAndExpiresSoon = true; + } + } + + /* + * Try a non-blocking lock first, to avoid unlatching. If the default + * is no-wait, use the standard lock method so + * LockNotAvailableException is thrown; there is no need to try a + * non-blocking lock twice. + * + * Even for dirty-read (LockType.NONE) we must call Locker.lock() since + * it checks the locker state and may throw LockPreemptedException. + */ + if (locker.getDefaultNoWait()) { + try { + standing.lockResult = locker.lock( + standing.lsn, lockType, true /*noWait*/, dbImpl); + + } catch (LockNotAvailableException e) { + releaseBIN(); + throw e; + + } catch (LockConflictException e) { + releaseBIN(); + throw EnvironmentFailureException.unexpectedException(e); + } + } else { + standing.lockResult = locker.nonBlockingLock( + standing.lsn, lockType, false /*jumpAheadOfWaiters*/, + dbImpl); + } + + if (standing.lockResult.getLockGrant() != LockGrantType.DENIED) { + + /* Lock was granted whiled latched, no need to check LSN. */ + assert verifyPendingDeleted(lockType); + + standing.defunct = wasLockedAndExpiresSoon ? + bin.isDeleted(index) : bin.isDefunct(index); + + return standing; + } + + if (noWait) { + /* We did not acquire the lock. */ + + standing.defunct = wasLockedAndExpiresSoon ? + bin.isDeleted(index) : bin.isDefunct(index); + + return standing; + } + + /* + * Unlatch, get a blocking lock, latch, and get the current LSN from + * the slot. If the LSN changes while unlatched, revert the lock and + * repeat. + */ + while (true) { + + /* Request a blocking lock. */ + releaseBIN(); + + standing.lockResult = locker.lock( + standing.lsn, lockType, false /*noWait*/, dbImpl); + + latchBIN(); + + /* Check current LSN after locking. */ + final long newLsn = bin.getLsn(index); + if (standing.lsn == newLsn) { + + /* + * If the expiration time changes while unlatched, then it + * could not have been previously locked. + */ + if (prevExpiration != bin.getExpiration(index) || + prevExpirationInHours != bin.isExpirationInHours()) { + wasLockedAndExpiresSoon = false; + } + + standing.defunct = wasLockedAndExpiresSoon ? + bin.isDeleted(index) : bin.isDefunct(index); + + assert verifyPendingDeleted(lockType); + return standing; + } + + /* The LSN changed, revert the lock and try again. */ + revertLock(standing); + standing.lsn = newLsn; + + /* Check for a known-deleted null LSN. */ + if (newLsn == DbLsn.NULL_LSN) { + assert bin.isEntryKnownDeleted(index); + standing.defunct = true; + return standing; + } + } + } + + /** + * After logging a deferred-write LN during eviction/checkpoint or a + * migrated LN during cleaning, for every existing lock on the old LSN held + * by another locker, we must lock the new LSN on behalf of that locker. + * + * This is done while holding the BIN latch so that the new LSN does not + * change during the locking process. The BIN must be latched on entry and + * is left latched by this method. + * + * We release the lock on the oldLsn to prevent locks from accumulating + * over time on a HandleLocker, as the cleaner migrates LNs, because + * Database handle locks are legitimately very long-lived. It is important + * to first acquire all lsn locks and then release the oldLsn locks. + * Releasing an oldLsn lock might allow another locker to acquire it, and + * then acquiring another lsn lock may encounter a conflict. [#20617] + * + * @see com.sleepycat.je.txn.HandleLocker + * @see #lockLN + */ + public static void lockAfterLsnChange( + DatabaseImpl dbImpl, + long oldLsn, + long newLsn, + Locker excludeLocker) { + + final LockManager lockManager = + dbImpl.getEnv().getTxnManager().getLockManager(); + + final Set owners = lockManager.getOwners(oldLsn); + if (owners == null) { + return; + } + /* Acquire lsn locks. */ + for (LockInfo lockInfo : owners) { + final Locker locker = lockInfo.getLocker(); + if (locker != excludeLocker) { + locker.lockAfterLsnChange(oldLsn, newLsn, dbImpl); + } + } + /* Release oldLsn locks. */ + for (LockInfo lockInfo : owners) { + final Locker locker = lockInfo.getLocker(); + if (locker != excludeLocker && + locker.allowReleaseLockAfterLsnChange()) { + locker.releaseLock(oldLsn); + } + } + } + + /** + * For debugging. Verify that a BINs cursor set refers to the BIN. + */ + private void verifyCursor(BIN bin) { + + if (!bin.getCursorSet().contains(this)) { + throw new EnvironmentFailureException( + dbImpl.getEnv(), + EnvironmentFailureReason.UNEXPECTED_STATE, + "BIN cursorSet is inconsistent"); + } + } + + /** + * Calls checkCursorState and asserts false if an exception is thrown. + * Otherwise returns true, so it can be called under an assertion. + */ + private boolean assertCursorState( + boolean mustBeInitialized, + boolean mustNotBeInitialized) { + + try { + checkCursorState(mustBeInitialized, mustNotBeInitialized); + return true; + } catch (RuntimeException e) { + assert false : e.toString() + " " + dumpToString(true); + return false; // for compiler + } + } + + /** + * Check that the cursor is open and optionally if it is initialized or + * uninitialized. + * + * @throws IllegalStateException via all Cursor methods that call + * Cursor.checkState (all get and put methods, plus more). + */ + public void checkCursorState( + boolean mustBeInitialized, + boolean mustNotBeInitialized) { + + switch (status) { + case CURSOR_NOT_INITIALIZED: + if (mustBeInitialized) { + throw new IllegalStateException("Cursor not initialized."); + } + break; + case CURSOR_INITIALIZED: + if (mustNotBeInitialized) { + throw EnvironmentFailureException.unexpectedState( + "Cursor is initialized."); + } + if (DEBUG) { + if (bin != null) { + verifyCursor(bin); + } + } + break; + case CURSOR_CLOSED: + throw new IllegalStateException("Cursor has been closed."); + default: + throw EnvironmentFailureException.unexpectedState( + "Unknown cursor status: " + status); + } + } + + /** + * Checks that LN deletedness matches KD/PD flag state, at least when the + * LN is resident. Should only be called under an assertion. + */ + private boolean verifyPendingDeleted(LockType lockType) { + + /* Cannot verify deletedness if LN is not locked. */ + if (lockType == LockType.NONE) { + return true; + } + + /* Cannot verify deletedness if cursor is not intialized. */ + if (bin == null || index < 0) { + return true; + } + + /* Cannot verify deletedness if LN is not resident. */ + final LN ln = (LN) bin.getTarget(index); + if (ln == null) { + return true; + } + + /* + * If the LN is deleted then KD or PD must be set. If the LN is not + * deleted then PD must not be set, but KD may or may not be set since + * it used for various purposes (see IN.java). + */ + final boolean kd = bin.isEntryKnownDeleted(index); + final boolean pd = bin.isEntryPendingDeleted(index); + final boolean lnDeleted = ln.isDeleted(); + assert ((lnDeleted && (kd || pd)) || (!lnDeleted && !pd)) : + "Deleted state mismatch LNDeleted = " + lnDeleted + + " PD = " + pd + " KD = " + kd; + return true; + } + + public void revertLock(LockStanding standing) { + + if (standing.lockResult != null) { + revertLock(standing.lsn, standing.lockResult); + standing.lockResult = null; + } + } + + /** + * Return this lock to its prior status. If the lock was just obtained, + * release it. If it was promoted, demote it. + */ + private void revertLock(long lsn, LockResult lockResult) { + + LockGrantType lockStatus = lockResult.getLockGrant(); + + if ((lockStatus == LockGrantType.NEW) || + (lockStatus == LockGrantType.WAIT_NEW)) { + locker.releaseLock(lsn); + } else if ((lockStatus == LockGrantType.PROMOTION) || + (lockStatus == LockGrantType.WAIT_PROMOTION)){ + locker.demoteLock(lsn); + } + } + + /** + * Locks the logical EOF node for the dbImpl. + */ + public void lockEof(LockType lockType) { + + locker.lock(dbImpl.getEofLsn(), lockType, + false /*noWait*/, dbImpl); + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid. + */ + public void checkEnv() { + dbImpl.getEnv().checkIfInvalid(); + } + + /** + * Callback object for traverseDbWithCursor. + */ + public interface WithCursor { + + /** + * Called for each record in the dbImpl. + * @return true to continue or false to stop the enumeration. + */ + boolean withCursor(CursorImpl cursor, + DatabaseEntry key, + DatabaseEntry data); + } + + /** + * Enumerates all records in a dbImpl non-transactionally and calls + * the withCursor method for each record. Stops the enumeration if the + * callback returns false. + * + * @param db DatabaseImpl to traverse. + * + * @param lockType non-null LockType for reading records. + * + * @param allowEviction should normally be true to evict when performing + * multiple operations, but may be false if eviction is disallowed in a + * particular context. + * + * @param withCursor callback object. + */ + public static void traverseDbWithCursor( + DatabaseImpl db, + LockType lockType, + boolean allowEviction, + WithCursor withCursor) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Locker locker = null; + CursorImpl cursor = null; + + try { + EnvironmentImpl envImpl = db.getEnv(); + + locker = LockerFactory.getInternalReadOperationLocker(envImpl); + + cursor = new CursorImpl(db, locker); + cursor.setAllowEviction(allowEviction); + + if (cursor.positionFirstOrLast(true /*first*/)) { + + OperationResult result = cursor.lockAndGetCurrent( + key, data, lockType, false /*dirtyReadAll*/, + true /*isLatched*/, true /*unlatch*/); + + boolean done = false; + while (!done) { + + /* + * lockAndGetCurrent may have returned non-SUCCESS if the + * first record is defunct, but we can call getNext below + * to move forward. + */ + if (result != null) { + if (!withCursor.withCursor(cursor, key, data)) { + done = true; + } + } + + if (!done) { + result = cursor.getNext( + key, data, lockType, false /*dirtyReadAll*/, + true /*forward*/, false /*isLatched*/, + null /*rangeConstraint*/); + + if (result == null) { + done = true; + } + } + } + } + } finally { + if (cursor != null) { + cursor.releaseBIN(); + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + } + } + + /** + * Dump the cursor for debugging purposes. Dump the bin that the cursor + * refers to if verbose is true. + */ + public void dump(boolean verbose) { + System.out.println(dumpToString(verbose)); + } + + /** + * dump the cursor for debugging purposes. + */ + public void dump() { + System.out.println(dumpToString(true)); + } + + /* + * dumper + */ + private String statusToString(byte status) { + switch(status) { + case CURSOR_NOT_INITIALIZED: + return "CURSOR_NOT_INITIALIZED"; + case CURSOR_INITIALIZED: + return "CURSOR_INITIALIZED"; + case CURSOR_CLOSED: + return "CURSOR_CLOSED"; + default: + return "UNKNOWN (" + Byte.toString(status) + ")"; + } + } + + /* + * dumper + */ + public String dumpToString(boolean verbose) { + StringBuilder sb = new StringBuilder(); + + sb.append("\n"); + if (verbose) { + sb.append((bin == null) ? "" : bin.dumpString(2, true)); + } + sb.append("\n"); + + return sb.toString(); + } + + /* + * For unit tests + */ + public StatGroup getLockStats() { + return locker.collectStats(); + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void trace( + Level level, + String changeType, + BIN theBin, + int lnIndex, + long oldLsn, + long newLsn) { + + EnvironmentImpl envImpl = dbImpl.getEnv(); + if (envImpl.getLogger().isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(changeType); + sb.append(" bin="); + sb.append(theBin.getNodeId()); + sb.append(" lnIdx="); + sb.append(lnIndex); + sb.append(" oldLnLsn="); + sb.append(DbLsn.getNoFormatString(oldLsn)); + sb.append(" newLnLsn="); + sb.append(DbLsn.getNoFormatString(newLsn)); + + LoggerUtils.logMsg + (envImpl.getLogger(), envImpl, level, sb.toString()); + } + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void traceInsert( + Level level, + BIN insertingBin, + long lnLsn, + int index) { + + EnvironmentImpl envImpl = dbImpl.getEnv(); + if (envImpl.getLogger().isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(TRACE_INSERT); + sb.append(" bin="); + sb.append(insertingBin.getNodeId()); + sb.append(" lnLsn="); + sb.append(DbLsn.getNoFormatString(lnLsn)); + sb.append(" index="); + sb.append(index); + + LoggerUtils.logMsg(envImpl.getLogger(), envImpl, level, + sb.toString()); + } + } + + /* For unit testing only. */ + public void setTestHook(TestHook hook) { + testHook = hook; + } + + /* Check that the target bin is latched. For use in assertions. */ + private boolean checkAlreadyLatched(boolean isLatched) { + if (isLatched) { + if (bin != null) { + return bin.isLatchExclusiveOwner(); + } + } + return true; + } +} diff --git a/src/com/sleepycat/je/dbi/DatabaseId.java b/src/com/sleepycat/je/dbi/DatabaseId.java new file mode 100644 index 0000000..4c90b90 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DatabaseId.java @@ -0,0 +1,174 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.BasicVersionedWriteLoggable; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.utilint.StringUtils; + +/** + * DatabaseImpl Ids are wrapped in a class so they can be logged. + */ +public class DatabaseId extends BasicVersionedWriteLoggable + implements Comparable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /** + * The unique id of this database. + */ + private long id; + + /** + * + */ + public DatabaseId(long id) { + this.id = id; + } + + /** + * Uninitialized database id, for logging. + */ + public DatabaseId() { + } + + /** + * @return id value + */ + public long getId() { + return id; + } + + /** + * @return id as bytes, for use as a key + */ + public byte[] getBytes() + throws DatabaseException { + + return StringUtils.toUTF8(toString()); + } + + /** + * Compare two DatabaseImpl Id's. + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof DatabaseId)) { + return false; + } + + return ((DatabaseId) obj).id == id; + } + + @Override + public int hashCode() { + return (int) id; + } + + @Override + public String toString() { + return Long.toString(id); + } + + /** + * see Comparable#compareTo + */ + @Override + public int compareTo(DatabaseId o) { + if (o == null) { + throw EnvironmentFailureException.unexpectedState("null arg"); + } + + if (id == o.id) { + return 0; + } else if (id > o.id) { + return 1; + } else { + return -1; + } + } + + /* + * Logging support. + */ + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return LogUtils.getPackedLongLogSize(id); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + LogUtils.writePackedLong(logBuffer, id); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + if (entryVersion < 6) { + id = LogUtils.readInt(itemBuffer); + } else { + id = LogUtils.readPackedLong(itemBuffer); + } + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof DatabaseId)) + return false; + + return id == ((DatabaseId) other).id; + } +} diff --git a/src/com/sleepycat/je/dbi/DatabaseImpl.java b/src/com/sleepycat/je/dbi/DatabaseImpl.java new file mode 100644 index 0000000..af8493a --- /dev/null +++ b/src/com/sleepycat/je/dbi/DatabaseImpl.java @@ -0,0 +1,2571 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import com.sleepycat.je.BinaryEqualityComparator; +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseComparator; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PartialComparator; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.cleaner.BaseUtilizationTracker; +import com.sleepycat.je.cleaner.DbFileSummary; +import com.sleepycat.je.cleaner.DbFileSummaryMap; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.cleaner.LocalUtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.DbOpReplicationContext; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.DbOperationType; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeUtils; +import com.sleepycat.je.trigger.PersistentTrigger; +import com.sleepycat.je.trigger.Trigger; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.util.verify.BtreeVerifier; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.util.ClassResolver; + +/** + * The underlying object for a given database. + */ +public class DatabaseImpl implements Loggable, Cloneable { + + /* + * Delete processing states. See design note on database deletion and + * truncation + */ + private static final short NOT_DELETED = 1; + private static final short DELETED_CLEANUP_INLIST_HARVEST = 2; + private static final short DELETED_CLEANUP_LOG_HARVEST = 3; + private static final short DELETED = 4; + + /* + * Flag bits are the persistent representation of boolean properties + * for this database. The DUPS_ENABLED value is 1 for compatibility + * with earlier log entry versions where it was stored as a boolean. + * + * Two bits are used to indicate whether this database is replicated or + * not. + * isReplicated = 0, notReplicated = 0 means replication status is + * unknown, because the db was created in an standalone environment. + * isReplicated = 1, notReplicated = 0 means the db is replicated. + * isReplicated = 0, notReplicated = 1 means the db is not replicated. + * isReplicated = 1, notReplicated = 1 is an illegal combination. + */ + private byte flags; + private static final byte DUPS_ENABLED = 0x1; // getSortedDuplicates() + private static final byte TEMPORARY_BIT = 0x2; // isTemporary() + private static final byte IS_REPLICATED_BIT = 0x4; // isReplicated() + private static final byte NOT_REPLICATED_BIT = 0x8;// notReplicated() + private static final byte PREFIXING_ENABLED = 0x10;// getKeyPrefixing() + private static final byte UTILIZATION_REPAIR_DONE = 0x20; + // getUtilizationRepairDone() + private static final byte DUPS_CONVERTED = 0x40; // getKeyPrefixing() + + private DatabaseId id; // unique id + private Tree tree; + private EnvironmentImpl envImpl; // Tree operations find the env this way + private boolean transactional; // All open handles are transactional + private boolean durableDeferredWrite; // Durable deferred write mode set + private volatile boolean dirty; // Utilization, root LSN, etc., changed + private Set referringHandles; // Set of open Database handles + private long eofLsn; // Logical EOF LSN for range locking + private volatile short deleteState; // one of four delete states. + private AtomicInteger useCount = new AtomicInteger(); + // If non-zero, eviction is prohibited + /* + * Tracks the number of write handle references to this impl. It's used + * to determine the when the Trigger.open/close methods must be invoked. + */ + private final AtomicInteger writeCount = new AtomicInteger(); + + private DbFileSummaryMap dbFileSummaries; + + /** + * Log version when DB was created, or 0 if created prior to log version 6. + */ + private byte createdAtLogVersion; + + /** + * For unit testing, setting this field to true will force a walk of the + * tree to count utilization during truncate/remove, rather than using the + * per-database info. This is used to test the "old technique" for + * counting utilization, which is now used only if the database was created + * prior to log version 6. + */ + public static boolean forceTreeWalkForTruncateAndRemove; + + /* + * The user defined Btree and duplicate comparison functions, if specified. + */ + private Comparator btreeComparator = null; + private Comparator duplicateComparator = null; + private byte[] btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + private byte[] duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + + private boolean btreeComparatorByClassName = false; + private boolean duplicateComparatorByClassName = false; + private boolean btreePartialComparator = false; + private boolean duplicatePartialComparator = false; + private boolean btreeBinaryEqualityComparator = true; + private boolean duplicateBinaryEqualityComparator = true; + + /* Key comparator uses the btree and dup comparators as needed. */ + private Comparator keyComparator = null; + + /* + * The user defined triggers associated with this database. + * + * The triggers reference value contains all known triggers, persistent and + * transient, or null if it has not yet been constructed, which is done + * lazily. It is constructed by unmarshalling the triggerBytes (persistent + * triggers) and adding them to the transientTriggers. + * + * transientTriggers is null if there are none, and never an empty list. + */ + private AtomicReference> triggers = + new AtomicReference>(null); + private List transientTriggers = null; + private byte[][] triggerBytes = null; + + /* + * Cache some configuration values. + */ + private int binDeltaPercent; + private int maxTreeEntriesPerNode; + + private String debugDatabaseName; + + /* Set to true when opened as a secondary DB. */ + private volatile boolean knownSecondary = false; + + /* For unit tests */ + private TestHook pendingDeletedHook; + + /* + * The DbType of this DatabaseImpl. Is determined lazily, so getDbType + * should always be called rather than referencing the field directly. + */ + private DbType dbType; + + private CacheMode cacheMode; + + /* + * For debugging -- this gives the ability to force all non-internal + * databases to use key prefixing. + * + * Note that doing + * ant -Dje.forceKeyPrefixing=true test + * does not work because ant does not pass the parameter down to JE. + */ + private static final boolean forceKeyPrefixing; + static { + String forceKeyPrefixingProp = + System.getProperty("je.forceKeyPrefixing"); + if ("true".equals(forceKeyPrefixingProp)) { + forceKeyPrefixing = true; + } else { + forceKeyPrefixing = false; + } + } + + /** + * Create a database object for a new database. + */ + public DatabaseImpl(Locker locker, + String dbName, + DatabaseId id, + EnvironmentImpl envImpl, + DatabaseConfig dbConfig) + throws DatabaseException { + + this.id = id; + this.envImpl = envImpl; + + setConfigProperties(locker, dbName, dbConfig, envImpl); + cacheMode = dbConfig.getCacheMode(); + + createdAtLogVersion = LogEntryType.LOG_VERSION; + + /* A new DB is implicitly converted to the new dups format. */ + if (getSortedDuplicates()) { + setDupsConverted(); + } + + /* + * New DB records do not need utilization repair. Set this before + * calling initWithEnvironment to avoid repair overhead. + */ + setUtilizationRepairDone(); + + commonInit(); + + initWithEnvironment(); + + /* + * The tree needs the env, make sure we assign it before + * allocating the tree. + */ + tree = new Tree(this); + + /* For error messages only. */ + debugDatabaseName = dbName; + } + + /** + * Create an empty database object for initialization from the log. Note + * that the rest of the initialization comes from readFromLog(), except + * for the debugDatabaseName, which is set by the caller. + */ + public DatabaseImpl() { + id = new DatabaseId(); + envImpl = null; + + tree = new Tree(); + + commonInit(); + + /* initWithEnvironment is called after reading and envImpl is set. */ + } + + /* Set the DatabaseConfig properties for a DatabaseImpl. */ + public void setConfigProperties(Locker locker, + String dbName, + DatabaseConfig dbConfig, + EnvironmentImpl envImpl) { + setBtreeComparator(dbConfig.getBtreeComparator(), + dbConfig.getBtreeComparatorByClassName()); + setDuplicateComparator(dbConfig.getDuplicateComparator(), + dbConfig.getDuplicateComparatorByClassName()); + + setTriggers(locker, dbName, dbConfig.getTriggers(), + true /*overridePersistentTriggers*/); + + if (dbConfig.getSortedDuplicates()) { + setSortedDuplicates(); + } + + if (dbConfig.getKeyPrefixing() || + forceKeyPrefixing) { + setKeyPrefixing(); + } else { + clearKeyPrefixing(); + } + + if (dbConfig.getTemporary()) { + setTemporary(); + } + + if (envImpl.isReplicated()) { + if (dbConfig.getReplicated()) { + setIsReplicatedBit(); + } else { + setNotReplicatedBit(); + } + } + + transactional = dbConfig.getTransactional(); + durableDeferredWrite = dbConfig.getDeferredWrite(); + maxTreeEntriesPerNode = dbConfig.getNodeMaxEntries(); + } + + private void commonInit() { + + deleteState = NOT_DELETED; + referringHandles = + Collections.synchronizedSet(new HashSet()); + dbFileSummaries = new DbFileSummaryMap + (false /* countParentMapEntry */); + } + + public void setDebugDatabaseName(String debugName) { + debugDatabaseName = debugName; + /* DbType may be wrong if name has not yet been set. */ + resetDbType(); + } + + /** + * Returns the DB name for debugging and error messages. This method + * should be called rather than getName to avoid accessing the db mapping + * tree in error situations The name may not be transactionally correct, + * and may be unknown under certain circumstances (see + * DbTree.setDebugNameForDatabaseImpl) in which case a string containing + * the DB ID is returned. + */ + public String getDebugName() { + return (debugDatabaseName != null) ? debugDatabaseName : "dBId=" + id; + } + + /** + * Returns whether getDebugName returns a DB name rather than a DB ID. + */ + boolean isDebugNameAvailable() { + return (debugDatabaseName != null); + } + + /* + * Returns true if this DB has been opened as a secondary DB. Currently, + * secondary DB metadata is not persistent, so this is the best we can do. + */ + public boolean isKnownSecondary() { + return knownSecondary; + } + + public void setKnownSecondary() { + knownSecondary = true; + } + + /* For unit testing only. */ + public void setPendingDeletedHook(TestHook hook) { + pendingDeletedHook = hook; + } + + /** + * Initialize configuration settings when creating a new instance or after + * reading an instance from the log. The envImpl field must be set before + * calling this method. + */ + private void initWithEnvironment() { + /* The eof LSN must be unique for each database in memory. */ + eofLsn = envImpl.getNodeSequence().getNextTransientLsn(); + + assert !(replicatedBitSet() && notReplicatedBitSet()) : + "The replicated AND notReplicated bits should never be set "+ + " together"; + + /* + * We'd like to assert that neither replication bit is set if + * the environmentImpl is not replicated, but can't do that. + * EnvironmentImpl.isReplicated() is not yet initialized if this + * environment is undergoing recovery during replication setup. + + assert !((!envImpl.isReplicated() && + (replicatedBitSet() || notReplicatedBitSet()))) : + "Neither the replicated nor notReplicated bits should be set " + + " in a non-replicated environment" + + " replicatedBitSet=" + replicatedBitSet() + + " notRepBitSet=" + notReplicatedBitSet(); + */ + + DbConfigManager configMgr = envImpl.getConfigManager(); + + binDeltaPercent = + configMgr.getInt(EnvironmentParams.BIN_DELTA_PERCENT); + + /* + * If maxTreeEntriesPerNode is zero (for a newly created database), + * set it to the default config value. When we write the DatabaseImpl + * to the log, we'll store the default. That way, if the default + * changes, the fan out for existing databases won't change. + */ + if (maxTreeEntriesPerNode == 0) { + maxTreeEntriesPerNode = + configMgr.getInt(EnvironmentParams.NODE_MAX); + } + + /* Budgets memory for the utilization info. */ + dbFileSummaries.init(envImpl); + + /* + * Repair utilization info if necessary. The repair flag will not be + * set for MapLNs written by JE 3.3.74 and earlier, and will be set for + * all MapLNs written thereafter. Make the utilization dirty to force + * the MapLN to be flushed. Even if no repair is performed, we want to + * write the updated flag. [#16610] + */ + if (!getUtilizationRepairDone()) { + dbFileSummaries.repair(envImpl); + setDirty(); + setUtilizationRepairDone(); + } + + /* Don't instantiate if comparators are unnecessary (DbPrintLog). */ + if (!envImpl.getNoComparators()) { + + ComparatorReader reader = new ComparatorReader( + btreeComparatorBytes, "Btree", envImpl.getClassLoader()); + btreeComparator = reader.getComparator(); + btreeComparatorByClassName = reader.isClass(); + btreePartialComparator = + btreeComparator instanceof PartialComparator; + btreeBinaryEqualityComparator = + (btreeComparator == null || + btreeComparator instanceof BinaryEqualityComparator); + + reader = new ComparatorReader( + duplicateComparatorBytes, "Duplicate", + envImpl.getClassLoader()); + duplicateComparator = reader.getComparator(); + duplicateComparatorByClassName = reader.isClass(); + duplicatePartialComparator = + duplicateComparator instanceof PartialComparator; + duplicateBinaryEqualityComparator = + (duplicateComparator == null || + duplicateComparator instanceof BinaryEqualityComparator); + + /* Key comparator is derived from dup and btree comparators. */ + resetKeyComparator(); + } + } + + /** + * Create a clone of this database that can be used as the new empty + * database when truncating this database. setId and setTree must be + * called on the returned database. + */ + public DatabaseImpl cloneDatabase() { + DatabaseImpl newDb; + try { + newDb = (DatabaseImpl) super.clone(); + } catch (CloneNotSupportedException e) { + assert false : e; + return null; + } + + /* Re-initialize fields that should not be shared by the new DB. */ + newDb.id = null; + newDb.tree = null; + newDb.createdAtLogVersion = LogEntryType.LOG_VERSION; + newDb.dbFileSummaries = new DbFileSummaryMap + (false /*countParentMapEntry*/); + newDb.dbFileSummaries.init(envImpl); + newDb.useCount = new AtomicInteger(); + return newDb; + } + + /** + * @return the database tree. + */ + public Tree getTree() { + return tree; + } + + void setTree(Tree tree) { + this.tree = tree; + } + + /** + * @return the database id. + */ + public DatabaseId getId() { + return id; + } + + void setId(DatabaseId id) { + this.id = id; + } + + public long getEofLsn() { + return eofLsn; + } + + /** + * @return true if this database is transactional. + */ + public boolean isTransactional() { + return transactional; + } + + /** + * Sets the transactional property for the first opened handle. + */ + public void setTransactional(boolean transactional) { + this.transactional = transactional; + } + + /** + * @return true if this database is temporary. + */ + public boolean isTemporary() { + return ((flags & TEMPORARY_BIT) != 0); + } + + public static boolean isTemporary(byte flagVal) { + return ((flagVal & TEMPORARY_BIT) != 0); + } + + public boolean isInternalDb() { + return getDbType().isInternal(); + } + + public DbType getDbType() { + if (dbType != null) { + return dbType; + } + resetDbType(); + return dbType; + } + + private void resetDbType() { + dbType = DbTree.typeForDbName(debugDatabaseName); + } + + private void setTemporary() { + flags |= TEMPORARY_BIT; + } + + /** + * @return true if this database was user configured for durable deferred + * write mode. + */ + public boolean isDurableDeferredWrite() { + return durableDeferredWrite; + } + + /** + * @return true if write operations are not logged immediately. This is + * true if the user configured a durable DW database or a temporary + * database. + */ + public boolean isDeferredWriteMode() { + return isDurableDeferredWrite() || isTemporary(); + } + + /** + * Sets the deferred write property for the first opened handle. + */ + public void setDeferredWrite(boolean durableDeferredWrite) { + this.durableDeferredWrite = durableDeferredWrite; + } + + /** + * @return true if duplicates are allowed in this database. + */ + public boolean getSortedDuplicates() { + return (flags & DUPS_ENABLED) != 0; + } + + public static boolean getSortedDuplicates(byte flagVal) { + return (flagVal & DUPS_ENABLED) != 0; + } + + public void setSortedDuplicates() { + flags |= DUPS_ENABLED; + } + + public boolean getDupsConverted() { + return (flags & DUPS_CONVERTED) != 0; + } + + public void setDupsConverted() { + flags |= DUPS_CONVERTED; + } + + /** + * Returns whether all LNs in this DB are "immediately obsolete", meaning + * two things: + * 1) They are counted obsolete when logged and can be ignored by the + * cleaner entirely. + * 2) As a consequence, they cannot be fetched by LSN, except under special + * circumstances where they are known to exist. + * + * Currently, this is synonymous with whether all LNs in this DB must have + * zero length data, and partial comparators are not used. Currently only + * duplicate DBs are known to have zero length LNs, since there is no way + * in the API to specify that LNs are immutable. In the future we will + * also support "immediately obsolete" LNs that are mutable and embedded + * in the BIN in other ways, e.g., tiny data may be stored with the key. + * + * Note that deleted LNs (the logged deletion, not the prior version) are + * always immediately obsolete also. See LNLogEntry.isImmediatelyObsolete. + */ + public boolean isLNImmediatelyObsolete() { + return getSortedDuplicates() && + !btreePartialComparator && + !duplicatePartialComparator; + } + + /** + * This method should be the only method used to obtain triggers after + * reading the MapLN from the log. It unmarshalls the triggers lazily + * here to avoid a call to getName() during recovery, when the DbTree is + * not yet instantiated. + */ + public List getTriggers() { + + /* When comparators are not needed, neither are triggers. */ + if (envImpl == null || envImpl.getNoComparators()) { + return null; + } + + /* If no transient or persistent triggers, return null. */ + if (triggerBytes == null && transientTriggers == null) { + return null; + } + + /* Just return them, if already constructed. */ + List myTriggers = triggers.get(); + if (myTriggers != null) { + return myTriggers; + } + + /* + * Unmarshall triggers, add transient triggers, and update the + * reference atomically. If another thread unmarshalls and updates it + * first, use the value set by the other thread. This ensures that a + * single instance is always used. + */ + myTriggers = TriggerUtils.unmarshallTriggers(getName(), triggerBytes, + envImpl.getClassLoader()); + if (myTriggers == null) { + myTriggers = new LinkedList(); + } + if (transientTriggers != null) { + myTriggers.addAll(transientTriggers); + } + if (triggers.compareAndSet(null, myTriggers)) { + return myTriggers; + } + myTriggers = triggers.get(); + assert myTriggers != null; + return myTriggers; + } + + public boolean hasUserTriggers() { + return (triggerBytes != null) || (transientTriggers != null); + } + + /** + * @return true if key prefixing is enabled in this database. + */ + public boolean getKeyPrefixing() { + return (flags & PREFIXING_ENABLED) != 0; + } + + /** + * Returns true if the flagVal enables the KeyPrefixing, used to create + * ReplicatedDatabaseConfig after reading a NameLNLogEntry. + */ + public static boolean getKeyPrefixing(byte flagVal) { + return (flagVal & PREFIXING_ENABLED) != 0; + } + + public void setKeyPrefixing() { + flags |= PREFIXING_ENABLED; + } + + public void clearKeyPrefixing() { + if (forceKeyPrefixing) { + return; + } + flags &= ~PREFIXING_ENABLED; + } + + /** + * @return true if this database is replicated. Note that we only need to + * check the IS_REPLICATED_BIT, because we require that we never have both + * IS_REPLICATED and NOT_REPLICATED set at the same time. + */ + public boolean isReplicated() { + return replicatedBitSet(); + } + + /** + * @return true if this database is replicated. + */ + public boolean unknownReplicated() { + return ((flags & IS_REPLICATED_BIT) == 0) && + ((flags & NOT_REPLICATED_BIT) == 0); + } + + private boolean replicatedBitSet() { + return (flags & IS_REPLICATED_BIT) != 0; + } + + public void setIsReplicatedBit() { + flags |= IS_REPLICATED_BIT; + } + + /** + * @return true if this database's not replicated bit is set. + */ + private boolean notReplicatedBitSet() { + return (flags & NOT_REPLICATED_BIT) != 0; + } + + private void setNotReplicatedBit() { + flags |= NOT_REPLICATED_BIT; + } + + /** + * Is public for unit testing. + */ + public boolean getUtilizationRepairDone() { + return (flags & UTILIZATION_REPAIR_DONE) != 0; + } + + private void setUtilizationRepairDone() { + flags |= UTILIZATION_REPAIR_DONE; + } + + /** + * Is public for unit testing. + */ + public void clearUtilizationRepairDone() { + flags &= ~UTILIZATION_REPAIR_DONE; + } + + public int getNodeMaxTreeEntries() { + return maxTreeEntriesPerNode; + } + + public void setNodeMaxTreeEntries(int newNodeMaxTreeEntries) { + maxTreeEntriesPerNode = newNodeMaxTreeEntries; + } + + /** + * Used to determine whether to throw ReplicaWriteException when a write to + * this database is attempted. For the most part, writes on a replica are + * not allowed to any replicated DB. However, an exception is the DB + * naming DB. The naming DB contains a mixture of LNs for replicated and + * non-replicated databases. Here, we allow all writes to the naming DB. + * DB naming operations for replicated databases on a replica, such as the + * creation of a replicated DB on a replica, are prohibited by DbTree + * methods (dbCreate, dbRemove, etc). [#20543] + */ + public boolean allowReplicaWrite() { + return !isReplicated() || getDbType() == DbType.NAME; + } + + /** + * Sets the default mode for this database (all handles). May be null to + * use Environment default. + */ + public void setCacheMode(CacheMode mode) { + cacheMode = mode; + } + + /** + * Returns the default cache mode for this database. If the database has a + * null cache mode and is not an internal database, the Environment default + * is returned. Null is never returned. CacheMode.DYNAMIC may be returned. + */ + public CacheMode getDefaultCacheMode() { + if (cacheMode != null) { + return cacheMode; + } + if (isInternalDb()) { + return CacheMode.DEFAULT; + } + return envImpl.getDefaultCacheMode(); + } + + /** + * Returns the tree memory size that should be added to MAPLN_OVERHEAD. + * + * This is a start at budgeting per-Database memory. For future reference, + * other things that could be budgeted are: + * - debugDatabaseName as it is set + * - Database handles as they are added/removed in referringHandles + */ + public int getAdditionalTreeMemorySize() { + + int val = 0; + + /* + * If the comparator object is non-null we double the size of the + * serialized form to account for the approximate size of the user's + * comparator object. This is only an approximation of course, and is + * not a very good one if we have serialized the class name, but we + * have no way to know the size of the user's object. + */ + if (btreeComparator != null) { + val += 2 * MemoryBudget.byteArraySize + (btreeComparatorBytes.length); + } + if (duplicateComparator != null) { + val += 2 * MemoryBudget.byteArraySize + (duplicateComparatorBytes.length); + } + + return val; + } + + /** + * Set the duplicate comparison function for this database. + * + * @return true if the comparator was actually changed + * + * @param comparator - The Duplicate Comparison function. + */ + public boolean setDuplicateComparator( + Comparator comparator, + boolean byClassName) + throws DatabaseException { + + final byte[] newBytes = + comparatorToBytes(comparator, byClassName, "Duplicate"); + + final boolean changed = + !Arrays.equals(newBytes, duplicateComparatorBytes) || + ((comparator instanceof PartialComparator) != + (duplicateComparator instanceof PartialComparator)) || + ((comparator instanceof BinaryEqualityComparator) != + (duplicateComparator instanceof BinaryEqualityComparator)); + + duplicateComparator = comparator; + duplicateComparatorBytes = newBytes; + duplicateComparatorByClassName = byClassName; + + duplicatePartialComparator = + duplicateComparator instanceof PartialComparator; + + duplicateBinaryEqualityComparator = + (duplicateComparator == null || + duplicateComparator instanceof BinaryEqualityComparator); + + if (changed) { + /* Key comparator is derived from dup and btree comparators. */ + resetKeyComparator(); + } + return changed; + } + + /** + * Sets the list of triggers associated with the database. + * + * @param dbName pass it in since it may not be available during database + * creation + * @param newTriggers the triggers to associate with the database + * @param overridePersistentTriggers whether to overwrite persistent + * triggers + * + * @return true if a {@link PersistentTrigger} was changed, and therefore + * may need to be stored. + */ + public boolean setTriggers(Locker locker, + String dbName, + List newTriggers, + boolean overridePersistentTriggers) { + + if ((newTriggers != null) && (newTriggers.size() == 0)) { + newTriggers = null; + } + + /* Construct new persistent triggers. */ + final byte newTriggerBytes[][]; + final boolean persistentChange; + + if (overridePersistentTriggers) { + if (newTriggers == null) { + newTriggerBytes = null; + persistentChange = (this.triggerBytes != null); + } else { + /* Create the new trigger bytes. */ + int nTriggers = 0; + for (Trigger trigger : newTriggers) { + if (trigger instanceof PersistentTrigger) { + nTriggers += 1; + } + } + if (nTriggers == 0) { + newTriggerBytes = null; + persistentChange = (this.triggerBytes != null); + } else { + newTriggerBytes = new byte[nTriggers][]; + int i=0; + for (Trigger trigger : newTriggers) { + if (trigger instanceof PersistentTrigger) { + newTriggerBytes[i++] = objectToBytes + (trigger, "trigger " + trigger.getName()); + trigger.setDatabaseName(dbName); + } + } + persistentChange = + !Arrays.equals(triggerBytes, newTriggerBytes); + } + } + } else { + newTriggerBytes = triggerBytes; + persistentChange = false; + } + + /* Add transient triggers. */ + final List newTransientTriggers; + final boolean transientChange; + + if (newTriggers == null) { + newTransientTriggers = null; + transientChange = (transientTriggers != null); + } else { + newTransientTriggers = new LinkedList(); + final Map diffs = + new IdentityHashMap(); + for (Trigger trigger : newTriggers) { + if (!(trigger instanceof PersistentTrigger)) { + diffs.put(trigger, null); + newTransientTriggers.add(trigger); + trigger.setDatabaseName(dbName); + } + } + if (transientTriggers == null) { + transientChange = (newTransientTriggers.size() > 0); + } else if (transientTriggers.size() != + newTransientTriggers.size()) { + transientChange = true; + } else { + for (Trigger trigger : transientTriggers) { + diffs.remove(trigger); + } + transientChange = (diffs.size() > 0); + } + } + + if (persistentChange || transientChange) { + TriggerManager.invokeAddRemoveTriggers(locker, + getTriggers(), + newTriggers); + /* Don't change fields until after getTriggers() call above. */ + triggerBytes = newTriggerBytes; + transientTriggers = + ((newTransientTriggers != null) && + (newTransientTriggers.size() > 0)) ? + newTransientTriggers : + null; + this.triggers.set(newTriggers); + } + + return persistentChange; + } + + /** + * Called when a database is closed to clear all transient triggers and + * call their 'removeTrigger' methods. + */ + private void clearTransientTriggers() { + final List oldTriggers = getTriggers(); + if (oldTriggers == null) { + return; + } + final List newTriggers = new LinkedList(oldTriggers); + final Iterator iter = newTriggers.iterator(); + while (iter.hasNext()) { + final Trigger trigger = iter.next(); + if (!(trigger instanceof PersistentTrigger)) { + iter.remove(); + } + } + /* The dbName param can be null because it is not used. */ + setTriggers(null /*locker*/, null /*dbName*/, newTriggers, + false /*overridePersistentTriggers*/); + } + + /** + * Set the btree comparison function for this database. + * + * @return true if the comparator was actually changed + * + * @param comparator - The btree Comparison function. + */ + public boolean setBtreeComparator( + Comparator comparator, + boolean byClassName) + throws DatabaseException { + + final byte[] newBytes = + comparatorToBytes(comparator, byClassName, "Btree"); + + final boolean changed = + !Arrays.equals(newBytes, btreeComparatorBytes) || + ((btreeComparator instanceof PartialComparator) != + (comparator instanceof PartialComparator)) || + ((btreeComparator instanceof BinaryEqualityComparator) != + (comparator instanceof BinaryEqualityComparator)); + + btreeComparator = comparator; + btreeComparatorBytes = newBytes; + btreeComparatorByClassName = byClassName; + + btreePartialComparator = + btreeComparator instanceof PartialComparator; + + btreeBinaryEqualityComparator = + (btreeComparator == null || + btreeComparator instanceof BinaryEqualityComparator); + + if (changed) { + /* Key comparator is derived from dup and btree comparators. */ + resetKeyComparator(); + } + return changed; + } + + /** + * This comparator should not be used directly for comparisons. Use + * getKeyComparator instead. + * + * @return the btree Comparator object. + */ + public Comparator getBtreeComparator() { + return btreeComparator; + } + + /** + * This comparator should not be used directly for comparisons. Use + * getKeyComparator instead. + * + * @return the duplicate Comparator object. + */ + public Comparator getDuplicateComparator() { + return duplicateComparator; + } + + /** + * Key comparator is derived from the duplicate and btree comparator + */ + private void resetKeyComparator() { + + /* Initialize comparators. */ + if (btreeComparator instanceof DatabaseComparator) { + ((DatabaseComparator) btreeComparator).initialize + (envImpl.getClassLoader()); + } + if (duplicateComparator instanceof DatabaseComparator) { + ((DatabaseComparator) duplicateComparator).initialize + (envImpl.getClassLoader()); + } + + /* Create derived comparator for duplicate database. */ + if (getSortedDuplicates()) { + keyComparator = new DupKeyData.TwoPartKeyComparator + (btreeComparator, duplicateComparator); + } else { + keyComparator = btreeComparator; + } + } + + /** + * Should always be used when comparing keys for this database. + * + * For a duplicates database, the data is part two of the two-part database + * key. Therefore, the duplicates comparator and btree comparator are used + * for comparing keys. This synthetic comparator will call both of the + * other two user-defined comparators as necessary. + */ + public Comparator getKeyComparator() { + return keyComparator; + } + + /** + * @return whether Comparator is set by class name, not by serializable + * Comparator object. + */ + public boolean getBtreeComparatorByClass() { + return btreeComparatorByClassName; + } + + /** + * @return whether Comparator is set by class name, not by serializable + * Comparator object. + */ + public boolean getDuplicateComparatorByClass() { + return duplicateComparatorByClassName; + } + + /** + * @return whether Comparator implements PartialComparator. + */ + public boolean hasBtreePartialComparator() { + return btreePartialComparator; + } + + /** + * @return whether Comparator implements PartialComparator. + */ + public boolean hasDuplicatePartialComparator() { + return duplicatePartialComparator; + } + + public boolean allowsKeyUpdates() { + return (btreePartialComparator || duplicatePartialComparator); + } + + /** + * @return whether Comparator implements BinaryEqualityComparator. + */ + public boolean hasBtreeBinaryEqualityComparator() { + return btreeBinaryEqualityComparator; + } + + /** + * @return whether Comparator implements BinaryEqualityComparator. + */ + public boolean hasDuplicateBinaryEqualityComparator() { + return duplicateBinaryEqualityComparator; + } + + /** + * Set the db environment after reading in the DatabaseImpl from the log. + */ + public void setEnvironmentImpl(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + initWithEnvironment(); + tree.setDatabase(this); + } + + public EnvironmentImpl getEnv() { + return envImpl; + } + + /** + * Returns whether one or more handles are open. + */ + public boolean hasOpenHandles() { + return referringHandles.size() > 0; + } + + /** + * Add a referring handle + */ + public void addReferringHandle(Database db) { + referringHandles.add(db); + } + + /** + * Decrement the reference count. + */ + public void removeReferringHandle(Database db) { + referringHandles.remove(db); + } + + /** + * Returns a copy of the referring database handles. + */ + public Set getReferringHandles() { + HashSet copy = new HashSet(); + synchronized (referringHandles) { + copy.addAll(referringHandles); + } + return copy; + } + + /** + * Called after a handle onto this DB is closed. + */ + public void handleClosed(boolean doSyncDw, boolean deleteTempDb) + throws DatabaseException { + + if (referringHandles.isEmpty()) { + + /* + * Transient triggers are discarded when the last handle is + * closed. + */ + clearTransientTriggers(); + + /* + * Remove a temporary database with no handles open. + * + * We are not synchronized here in any way that would prevent + * another thread from opening a handle during this process, before + * the NameLN is locked. So we use noWait locking. If a lock is + * not granted, then another handle was opened and we cannot remove + * the database until later. + * + * We pass the database ID to dbRemove in order to remove the + * database only if the name matches the ID. This accounts for the + * remote possibility that the database is renamed or another + * database is created with the same name during this process, + * before the NameLN is locked. + * + * We can use a BasicLocker because temporary databases are always + * non-transactional. + */ + if (deleteTempDb && isTemporary()) { + Locker locker = + BasicLocker.createBasicLocker(envImpl, true /* noWait */); + boolean operationOk = false; + try { + envImpl.getDbTree().dbRemove(locker, getName(), getId()); + operationOk = true; + } catch (DbTree.NeedRepLockerException e) { + /* Should never happen; a temp db is never replicated. */ + throw EnvironmentFailureException.unexpectedException( + envImpl, e); + } catch (DatabaseNotFoundException e) { + /* Do nothing if DB was removed or renamed. */ + } catch (LockConflictException e) { + /* + * We will have to remove this database later. Note that + * we catch LockConflictException for simplicity but we + * expect either LockNotAvailableException or + * LockNotGrantedException. + */ + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } finally { + locker.operationEnd(operationOk); + } + } + + /* + * Sync a durable deferred write database with no handles open. If + * a handle is opened during this process, then the sync may be + * unnecessary but it will not cause a problem. + */ + if (doSyncDw && isDurableDeferredWrite()) { + sync(true); + } + } + } + + /** + * Figure out how much memory is used by the DbFileSummaryMap. Usually + * this number is built up over time by the DbFileSummaryMap itself and + * added to the memory budget, but in this case we need to reinitialize it + * after recovery, when DbFileSummaryMaps may be cut adrift by the process + * of overlaying new portions of the btree. + */ + public long getTreeAdminMemory() { + return dbFileSummaries.getMemorySize(); + } + + /** + * Update memory budgets when this databaseImpl is closed and will never be + * accessed again or when it is still open when its owning MapLN will be + * garbage collected, due to eviction or recovery. + */ + public void releaseTreeAdminMemory() { + + /* + * There's no need to account for INs which belong to this database, + * because those are closed by the EnvironmentImpl when clearing + * the INList. Do adjust memory budget for utilization info. + */ + dbFileSummaries.subtractFromMemoryBudget(); + } + + /** + * @return the referring handle count. + */ + int getReferringHandleCount() { + return referringHandles.size(); + } + + /** + * Increments the use count of this DB to prevent it from being evicted. + * Called by the DbTree.createDb/getDb methods that return a DatabaseImpl. + * Must be called while holding a lock on the MapLN. See isInUse. [#13415] + */ + void incrementUseCount() { + useCount.incrementAndGet(); + } + + /** + * Increments the write count and returns the updated value. + * @return updated write count + */ + public int noteWriteHandleOpen() { + return writeCount.incrementAndGet(); + } + + /** + * Decrements the write count and returns the updated value. + * @return updated write count + */ + public int noteWriteHandleClose() { + int count = writeCount.decrementAndGet(); + assert count >= 0; + return count; + } + + /** + * Decrements the use count of this DB, allowing it to be evicted if the + * use count reaches zero. Called via DbTree.releaseDb to release a + * DatabaseImpl that was returned by a DbTree.createDb/getDb method. See + * isInUse. [#13415] + */ + void decrementUseCount() { + assert useCount.get() > 0; + useCount.decrementAndGet(); + } + + /** + * Returns whether this DB is in use and cannot be evicted. Called by + * MapLN.isEvictable while holding a write-lock on the MapLN and a latch on + * its parent BIN. [#13415] + * + * When isInUse returns false (while holding a write-lock on the MapLN and + * a latch on the parent BIN), it guarantees that the database object + * is not in use and cannot be acquired by another thread (via + * DbTree.createDb/getDb) until both the MapLN lock and BIN latch are + * released. This guarantee is due to the fact that DbTree.createDb/getDb + * only increment the use count while holding a read-lock on the MapLN. + * Therefore, it is safe to evict the MapLN when isInUse returns false. + * + * When isInUse returns true, it is possible that another thread may + * decrement the use count at any time, since no locking or latching is + * performed when calling DbTree.releaseDb (which calls decrementUseCount). + * Therefore, it is not guaranteed that the MapLN is in use when isInUse + * returns true. A true result means: the DB may be in use, so it is not + * safe to evict it. + */ + public boolean isInUse() { + return (useCount.get() > 0); + } + + /** + * Checks whether a database is in use during a remove or truncate database + * operation. + */ + boolean isInUseDuringDbRemove() { + + /* + * The use count is at least one here, because remove/truncate has + * called getDb but releaseDb has not yet been called. Normally the + * database must be closed in order to remove or truncate it and + * referringHandles will be empty. But when the deprecated + * Database.truncate is called, the database is open and the use count + * includes the number of open handles. [#15805] + */ + return useCount.get() > 1 + referringHandles.size(); + } + + /** + * Flush all dirty nodes for this database to disk. + * + * @throws UnsupportedOperationException via Database.sync. + */ + public synchronized void sync(boolean flushLog) + throws DatabaseException { + + if (!isDurableDeferredWrite()) { + throw new UnsupportedOperationException + ("Database.sync() is only supported " + + "for deferred-write databases"); + } + + if (tree.rootExists()) { + envImpl.getCheckpointer().syncDatabase(envImpl, this, flushLog); + } + } + + /** + * For this secondary database return the primary that it is associated + * with, or null if not associated with any primary. Note that not all + * handles need be associated with a primary. + */ + public Database findPrimaryDatabase() { + synchronized (referringHandles) { + for (Database obj : referringHandles) { + if (obj instanceof SecondaryDatabase) { + return ((SecondaryDatabase) obj).getPrimaryDatabase(); + } + } + } + return null; + } + + public String getName() + throws DatabaseException { + + return envImpl.getDbTree().getDbName(id); + } + + /** + * Returns the DbFileSummary for the given file, allocates it if necessary + * and budgeted memory for any changes. + * + *

        Must be called under the log write latch.

        + * + * @param willModify if true, the caller will modify the utilization info. + */ + public DbFileSummary getDbFileSummary(Long fileNum, boolean willModify) { + if (willModify) { + dirty = true; + } + assert dbFileSummaries != null; + + /* + * Pass true for checkResurrected to prevent memory/disk leaks caused + * by entries that could accumulate for deleted log files. + */ + return dbFileSummaries.get(fileNum, true /*adjustMemBudget*/, + true /*checkResurrected*/, + envImpl.getFileManager()); + } + + /** + * Removes the DbFileSummary for the given set of files. + * + *

        Must be called under the log write latch.

        + * + * @return whether a DbFileSummary for any of the given files was present + * and was removed. + */ + public boolean removeDbFileSummaries(Collection fileNums) { + assert dbFileSummaries != null; + boolean removedAny = false; + + for (Long fileNum : fileNums) { + removedAny |= dbFileSummaries.remove(fileNum); + } + + if (removedAny) { + setDirty(); + } + + return removedAny; + } + + public Map cloneDbFileSummaries() { + return envImpl.getLogManager().cloneDbFileSummaries(this); + } + + /** Called under the log write latch, via cloneDbFileSummaries above. */ + public Map cloneDbFileSummariesInternal() { + return dbFileSummaries.cloneMap(); + } + + /** + * For unit testing. + */ + public DbFileSummaryMap getDbFileSummaries() { + return dbFileSummaries; + } + + /** + * Returns whether this database has new (unflushed) utilization info or + * the root LSN was modified after it was last logged. + */ + public boolean isDirty() { + return dirty; + } + + /** + * Sets dirty in order to force the MapLN to be flushed later. + * + * This flag is used when utilization is changed, the root LSN is changed, + * etc, in order to cause the MapLN to be flushed during the next + * checkpoint, or when utilization info is logged. + */ + public void setDirty() { + dirty = true; + } + + /** + * Returns whether this database's MapLN must be flushed during a + * checkpoint. + */ + public boolean isCheckpointNeeded() { + return !isDeleted() && (isDirty() || isTemporary()); + } + + /** + * @return true if this database is deleted. Delete cleanup may still be in + * progress. + */ + public boolean isDeleted() { + return !(deleteState == NOT_DELETED); + } + + /** + * @return true if this database is deleted and all cleanup is finished. + */ + public boolean isDeleteFinished() { + return (deleteState == DELETED); + } + + /** + * The delete cleanup is starting. Set this before releasing any + * write locks held for a db operation. + */ + public void startDeleteProcessing() { + assert (deleteState == NOT_DELETED); + + deleteState = DELETED_CLEANUP_INLIST_HARVEST; + } + + /** + * Should be called by the SortedLSNTreeWalker when it is finished with + * the INList. + */ + void finishedINListHarvest() { + assert (deleteState == DELETED_CLEANUP_INLIST_HARVEST); + + deleteState = DELETED_CLEANUP_LOG_HARVEST; + } + + /** + * Perform the entire two-step database deletion. This method is used at + * non-transactional operation end. When a transaction is used (see Txn), + * startDeleteProcessing is called at commit before releasing write locks + * and finishDeleteProcessing is called after releasing write locks. + */ + public void startAndFinishDelete() + throws DatabaseException { + + startDeleteProcessing(); + finishDeleteProcessing(); + } + + /** + * Release the INs for the deleted database, count all log entries for this + * database as obsolete, delete the MapLN, and set the state to DELETED. + * + * Used at transaction end or non-transactional operation end in these + * cases: + * - purge the deleted database after a commit of + * Environment.removeDatabase + * - purge the deleted database after a commit of + * Environment.truncateDatabase + * - purge the newly created database after an abort of + * Environment.truncateDatabase + * + * Note that the processing of the naming tree means the MapLN is never + * actually accessible from the current tree, but deleting the MapLN will + * do two things: + * (a) mark it properly obsolete + * (b) null out the database tree, leaving the INList the only + * reference to the INs. + */ + public void finishDeleteProcessing() + throws DatabaseException { + + assert TestHookExecute.doHookIfSet(pendingDeletedHook); + + try { + /* + * Delete MapLN before the walk. Get the root LSN before deleting + * the MapLN, as that will null out the root. + */ + long rootLsn = tree.getRootLsn(); + + /* + * Grab the in-cache root IN before we call deleteMapLN so that it + * gives us a starting point for the SortedLSNTreeWalk below. The + * on-disk version is obsolete at this point. + */ + IN rootIN = tree.getResidentRootIN(false); + envImpl.getDbTree().deleteMapLN(id); + + /* + * Ensure that the MapLN deletion is flushed to disk, so that + * utilization information is not lost if we crash past this point. + * Note that the Commit entry has already been flushed for the + * transaction of the DB removal/truncation operation, so we cannot + * rely on the flush of the Commit entry to flush the MapLN. + * [#18696] + */ + envImpl.getLogManager().flushSync(); + + if (createdAtLogVersion >= 6 && + !forceTreeWalkForTruncateAndRemove) { + + /* + * For databases created at log version 6 or after, the + * per-database utilization info is complete and can be counted + * as obsolete without walking the database. + * + * We do not need to flush modified file summaries because the + * obsolete amounts are logged along with the deleted MapLN and + * will be re-counted by recovery if necessary. + */ + envImpl.getLogManager().countObsoleteDb(this); + } else { + + /* + * For databases created prior to log version 6, the + * per-database utilization info is incomplete. Use the old + * method of counting utilization via SortedLSNTreeWalker. + * + * Use a local tracker that is accumulated under the log write + * latch when we're done counting. Start by recording the LSN + * of the root IN as obsolete. + */ + LocalUtilizationTracker localTracker = + new LocalUtilizationTracker(envImpl); + + if (rootLsn != DbLsn.NULL_LSN) { + localTracker.countObsoleteNodeInexact + (rootLsn, LogEntryType.LOG_IN, 0, this); + } + + /* Fetch LNs to count LN sizes only if so configured. */ + boolean fetchLNSize = + envImpl.getCleaner().getFetchObsoleteSize(this); + + /* Use the tree walker to visit every child LSN in the tree. */ + ObsoleteProcessor obsoleteProcessor = + new ObsoleteProcessor(this, localTracker); + + SortedLSNTreeWalker walker = new ObsoleteTreeWalker + (this, rootLsn, fetchLNSize, obsoleteProcessor, rootIN); + + /* + * At this point, it's possible for the evictor to find an IN + * for this database on the INList. It should be ignored. + */ + walker.walk(); + + /* + * Count obsolete nodes for a deleted database at transaction + * end time. Write out the modified file summaries for + * recovery. + */ + envImpl.getUtilizationProfile().flushLocalTracker + (localTracker); + } + + /* Remove all INs for this database from the INList. */ + MemoryBudget mb = envImpl.getMemoryBudget(); + INList inList = envImpl.getInMemoryINs(); + long memoryChange = 0; + try { + Iterator iter = inList.iterator(); + while (iter.hasNext()) { + IN thisIN = iter.next(); + if (thisIN.getDatabase() == this) { + iter.remove(); + memoryChange += + (0 - thisIN.getBudgetedMemorySize()); + } + } + } finally { + mb.updateTreeMemoryUsage(memoryChange); + } + + } finally { + /* Adjust memory budget for utilization info. */ + dbFileSummaries.subtractFromMemoryBudget(); + + deleteState = DELETED; + /* releaseDb to balance getDb called by truncate/remove. */ + envImpl.getDbTree().releaseDb(this); + } + } + + /** + * Counts all active LSNs in a database as obsolete. + * + * @param mapLnLsn is the LSN of the MapLN when called via recovery, + * otherwise is NULL_LSN. + * + *

        Must be called under the log write latch or during recovery.

        + */ + public void countObsoleteDb(BaseUtilizationTracker tracker, + long mapLnLsn) { + /* + * Even though the check for createdAtLogVersion and + * forceTreeWalkForTruncateAndRemove is made in finishDeleteProcessing + * before calling this method, we must repeat the check here because + * this method is also called by recovery. + */ + if (createdAtLogVersion >= 6 && !forceTreeWalkForTruncateAndRemove) { + tracker.countObsoleteDb(dbFileSummaries, mapLnLsn); + } + } + + private static class ObsoleteTreeWalker extends SortedLSNTreeWalker { + + private final IN rootIN; + + private ObsoleteTreeWalker(DatabaseImpl dbImpl, + long rootLsn, + boolean fetchLNSize, + TreeNodeProcessor callback, + IN rootIN) + throws DatabaseException { + + super(new DatabaseImpl[] { dbImpl }, + true, // set INList finish harvest + new long[] { rootLsn }, + callback, + null, /* savedException */ + null); /* exception predicate */ + + accumulateLNs = fetchLNSize; + this.rootIN = rootIN; + } + + @Override + IN getResidentRootIN(@SuppressWarnings("unused") + DatabaseImpl ignore) { + if (rootIN != null) { + rootIN.latchShared(); + } + return rootIN; + } + } + + /* Mark each LSN obsolete in the utilization tracker. */ + private static class ObsoleteProcessor implements TreeNodeProcessor { + + private final LocalUtilizationTracker localTracker; + private final DatabaseImpl db; + + ObsoleteProcessor(DatabaseImpl db, + LocalUtilizationTracker localTracker) { + this.db = db; + this.localTracker = localTracker; + } + + @Override + public void processLSN(long childLsn, + LogEntryType childType, + Node node, + byte[] lnKey, + int lastLoggedSize) { + assert childLsn != DbLsn.NULL_LSN; + + /* + * Count the LN log size if an LN node and key are available, i.e., + * we are certain this is an LN. [#15365] + */ + int size = 0; + if (lnKey != null && node instanceof LN) { + size = lastLoggedSize; + } + + localTracker.countObsoleteNodeInexact + (childLsn, childType, size, db); + } + + @Override + public void processDirtyDeletedLN(long childLsn, LN ln, + @SuppressWarnings("unused") + byte[] lnKey) { + assert ln != null; + + /* + * Do not count the size (pass zero) because the LN is dirty and + * the logged LN is not available. + */ + localTracker.countObsoleteNodeInexact + (childLsn, ln.getGenericLogType(), 0, db); + } + + @Override + public void noteMemoryExceeded() { + } + } + + public BtreeStats stat(StatsConfig config) + throws DatabaseException { + + if (tree == null) { + return new BtreeStats(); + } + + final BtreeStats stats; + + if (config.getFast()) { + stats = new BtreeStats(); + } else { + /* + * Use verify() to get stats. This is fairly inexpensive and the + * performance of this method is not critical. In the future we + * could optimize a little by disabling basic btree verification. + */ + final VerifyConfig verifyConfig = new VerifyConfig(); + + verifyConfig.setShowProgressInterval( + config.getShowProgressInterval()); + + verifyConfig.setShowProgressStream( + config.getShowProgressStream()); + + stats = verify(verifyConfig); + } + + tree.loadStats(config, stats); + + return stats; + } + + public BtreeStats verify(VerifyConfig config) + throws DatabaseException { + + if (tree == null) { + return new BtreeStats(); + } + + final BtreeVerifier verifier = new BtreeVerifier(envImpl); + verifier.setBtreeVerifyConfig(config); + + return verifier.verifyDatabase(getDebugName(), getId()); + } + + /** + * Preload the cache, using up to maxBytes bytes or maxMillsecs msec. + * + * @throws IllegalArgumentException via Database.preload + */ + public PreloadStats preload(PreloadConfig config) + throws DatabaseException { + + return envImpl.preload(new DatabaseImpl[] { this }, config); + } + + /** + * The processLSN() code for CountProcessor. + */ + private static class DOSCountCallback + implements DiskOrderedScanner.RecordProcessor { + + public long count = 0; + + @Override + public void process( + int dbIdx, + byte[] key, + byte[] data, + int expiration, + boolean expirationInHours) { + + assert(key == null); + assert(data == null); + ++count; + } + + @Override + public boolean canProcessWithoutBlocking(int nRecords) { + return true; + } + + @Override + public int getCapacity() { + return Integer.MAX_VALUE; + } + + @Override + public void checkShutdown() { + } + } + + /** + * Count entries in the database including dups, but don't dirty the cache. + */ + public long count(long memoryLimit) + throws DatabaseException { + + try { + MemoryBudget mb = envImpl.getMemoryBudget(); + + /* + * Must have at least 1MB of memory to be used by DOS (1MB is + * chosen rather arbitrarely). + */ + long minMem = 1024 * 1024; + + /* + * Use a heuristic to calculate the memory limit if none was + * provided by the user. This heuristic makes sure that the + * JE cache will not be affected, but otherwise, it is also + * rather arbitrary. + */ + if (memoryLimit <= 0) { + memoryLimit = + (JVMSystemUtils.getRuntimeMaxMemory() - mb.getMaxMemory()) / 10; + } + + if (memoryLimit < minMem) { + //System.out.println("Using skip-base Database.count()"); + return count(null, true, null, true); + } + + DOSCountCallback counter = new DOSCountCallback(); + + DatabaseImpl[] dbs = new DatabaseImpl[1]; + dbs[0] = this; + + DiskOrderedScanner scanner = new DiskOrderedScanner( + dbs, counter, true/*serialDBScan*/, + true/*binsOnly*/, true /*keyOnly*/, true/*countOnly*/, + Long.MAX_VALUE/*lsnBatchSize*/, memoryLimit, + false/*debug*/); + + scanner.scan( + FileProtector.DATABASE_COUNT_NAME, + envImpl.getNodeSequence().getNextDatabaseCountId()); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + return counter.count; + + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } + } + + /** + * For future use as API method. Implementation is incomplete. + * + * Counts entries in a key range by positioning a cursor on the beginning + * key and skipping entries until the ending key is encountered. + */ + private long count( + DatabaseEntry beginKey, + boolean beginInclusive, + DatabaseEntry endKey, + boolean endInclusive) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry noData = new DatabaseEntry(); + noData.setPartial(0, 0, true); + + final Locker locker = BasicLocker.createBasicLocker(envImpl); + final LockMode lockMode = LockMode.READ_UNCOMMITTED; + + try { + final Cursor c = DbInternal.makeCursor( + this, locker, null /*cursorConfig*/); + + try { + /* Position cursor on beginning key. */ + if (beginKey != null) { + + key.setData(beginKey.getData(), beginKey.getOffset(), + beginKey.getSize()); + + if (c.getSearchKeyRange(key, noData, lockMode) != + OperationStatus.SUCCESS) { + return 0; + } + + if (!beginInclusive && key.equals(beginKey)) { + if (c.getNext(key, noData, lockMode) != + OperationStatus.SUCCESS) { + return 0; + } + } + } else { + if (c.getFirst(key, noData, lockMode) != + OperationStatus.SUCCESS) { + return 0; + } + } + + /* Create RangeConstraint for ending key. */ + RangeConstraint rangeConstraint = null; // INCOMPLETE + + /* Skip entries to get count. */ + return 1 + DbInternal.getCursorImpl(c).skip( + true /*forward*/, 0 /*maxCount*/, rangeConstraint); + + } finally { + c.close(); + } + } finally { + locker.operationEnd(true); + } + } + + /* + * Dumping + */ + public String dumpString(int nSpaces) { + StringBuilder sb = new StringBuilder(); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + if (dbFileSummaries != null) { + Iterator> entries = + dbFileSummaries.entrySet().iterator(); + while (entries.hasNext()) { + Map.Entry entry = entries.next(); + Long fileNum = entry.getKey(); + DbFileSummary summary = entry.getValue(); + sb.append(""); + sb.append(summary); + sb.append("/file>"); + } + } + sb.append(""); + return sb.toString(); + } + + /* + * Logging support + */ + + /** + * This log entry type is configured to perform marshaling (getLogSize and + * writeToLog) under the write log mutex. Otherwise, the size could change + * in between calls to these two methods as the result of utilizaton + * tracking. + * + * @see Loggable#getLogSize + */ + @Override + public int getLogSize() { + + int size = + id.getLogSize() + + tree.getLogSize() + + 1 + // flags, 1 byte + LogUtils.getByteArrayLogSize(btreeComparatorBytes) + + LogUtils.getByteArrayLogSize(duplicateComparatorBytes) + + LogUtils.getPackedIntLogSize(maxTreeEntriesPerNode) + + 1; // createdAtLogVersion + + size += LogUtils.getPackedIntLogSize(dbFileSummaries.size()); + Iterator> i = + dbFileSummaries.entrySet().iterator(); + while (i.hasNext()) { + Map.Entry entry = i.next(); + Long fileNum = entry.getKey(); + DbFileSummary summary = entry.getValue(); + size += + LogUtils.getPackedLongLogSize(fileNum.longValue()) + + summary.getLogSize(); + } + size += TriggerUtils.logSize(triggerBytes); + return size; + } + + /** + * @see Loggable#writeToLog + */ + @Override + public void writeToLog(ByteBuffer logBuffer) { + + id.writeToLog(logBuffer); + + tree.writeToLog(logBuffer); + + logBuffer.put(flags); + + LogUtils.writeByteArray(logBuffer, btreeComparatorBytes); + LogUtils.writeByteArray(logBuffer, duplicateComparatorBytes); + + LogUtils.writePackedInt(logBuffer, maxTreeEntriesPerNode); + + logBuffer.put(createdAtLogVersion); + + LogUtils.writePackedInt(logBuffer, dbFileSummaries.size()); + + Iterator> i = + dbFileSummaries.entrySet().iterator(); + + while (i.hasNext()) { + Map.Entry entry = i.next(); + Long fileNum = entry.getKey(); + DbFileSummary summary = entry.getValue(); + LogUtils.writePackedLong(logBuffer, fileNum.longValue()); + summary.writeToLog(logBuffer); + } + + TriggerUtils.writeTriggers(logBuffer, triggerBytes); + + dirty = false; + } + + /** + * @see Loggable#readFromLog + */ + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + boolean version6OrLater = (entryVersion >= 6); + + id.readFromLog(itemBuffer, entryVersion); + tree.readFromLog(itemBuffer, entryVersion); + + /* + * Versions < 6 have the duplicatesAllowed boolean rather than a flags + * byte here, but we don't need a special case because the old boolean + * value is 1 and replacement flag value is 1. + */ + flags = itemBuffer.get(); + + if (forceKeyPrefixing) { + setKeyPrefixing(); + } + + if (entryVersion >= 2) { + btreeComparatorBytes = + LogUtils.readByteArray(itemBuffer, !version6OrLater); + duplicateComparatorBytes = + LogUtils.readByteArray(itemBuffer, !version6OrLater); + } else { + String btreeClassName = LogUtils.readString + (itemBuffer, !version6OrLater, entryVersion); + String dupClassName = LogUtils.readString + (itemBuffer, !version6OrLater, entryVersion); + if (btreeClassName.length() == 0) { + btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + btreeComparatorBytes = + objectToBytes(btreeClassName, "Btree"); + } + if (dupClassName.length() == 0) { + duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + duplicateComparatorBytes = + objectToBytes(dupClassName, "Duplicate"); + } + } + + if (entryVersion >= 1) { + maxTreeEntriesPerNode = + LogUtils.readInt(itemBuffer, !version6OrLater); + if (entryVersion < 8) { + /* Discard maxDupTreeEntriesPerNode. */ + LogUtils.readInt(itemBuffer, !version6OrLater); + } + } + + if (version6OrLater) { + createdAtLogVersion = itemBuffer.get(); + int nFiles = LogUtils.readPackedInt(itemBuffer); + for (int i = 0; i < nFiles; i += 1) { + long fileNum = LogUtils.readPackedLong(itemBuffer); + DbFileSummary summary = dbFileSummaries.get + (Long.valueOf(fileNum), false /*adjustMemBudget*/, + false /*checkResurrected*/, null /*fileManager*/); + summary.readFromLog(itemBuffer, entryVersion); + } + } + + triggerBytes = (entryVersion < 8) ? + null : + TriggerUtils.readTriggers(itemBuffer, entryVersion); + /* Trigger list is unmarshalled lazily by getTriggers. */ + } + + /** + * @see Loggable#dumpLog + */ + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(" "); + id.dumpLog(sb, verbose); + tree.dumpLog(sb, verbose); + if (verbose && dbFileSummaries != null) { + Iterator> entries = + dbFileSummaries.entrySet().iterator(); + + while (entries.hasNext()) { + Map.Entry entry = entries.next(); + Long fileNum = entry.getKey(); + DbFileSummary summary = entry.getValue(); + sb.append(""); + sb.append(summary); + sb.append(""); + } + } + TriggerUtils.dumpTriggers(sb, triggerBytes, getTriggers()); + sb.append("
        "); + } + + static void dumpFlags(StringBuilder sb, + @SuppressWarnings("unused") boolean verbose, + byte flags) { + sb.append(" dupsort=\"").append((flags & DUPS_ENABLED) != 0); + sb.append("\" replicated=\"").append((flags & IS_REPLICATED_BIT) != 0); + sb.append("\" temp=\"").append((flags & TEMPORARY_BIT) + != 0).append("\" "); + } + + /** + * @see Loggable#getTransactionId + */ + @Override + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + @Override + public boolean logicalEquals(@SuppressWarnings("unused") Loggable other) { + return false; + } + + /** + * Used for log dumping. + */ + private static String + getComparatorClassName(Comparator comparator, + byte[] comparatorBytes) { + + if (comparator != null) { + return comparator.getClass().getName(); + } else if (comparatorBytes != null && + comparatorBytes.length > 0) { + + /* + * Output something for DbPrintLog when + * EnvironmentImpl.getNoComparators. + */ + return "byteLen: " + comparatorBytes.length; + } else { + return ""; + } + } + + /** + * Used both to read from the log and to validate a comparator when set in + * DatabaseConfig. + */ + public static Comparator + instantiateComparator(Class> + comparatorClass, + String comparatorType) { + if (comparatorClass == null) { + return null; + } + + try { + return comparatorClass.newInstance(); + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException + ("Exception while trying to load " + comparatorType + + " Comparator class.", e); + } + } + + /** + * Used to validate a comparator when set in DatabaseConfig. + */ + @SuppressWarnings("unchecked") + public Comparator + instantiateComparator(Comparator comparator, + String comparatorType) + throws DatabaseException { + + if (comparator == null) { + return null; + } + + return (Comparator) bytesToObject + (objectToBytes(comparator, comparatorType), comparatorType, + envImpl.getClassLoader()); + } + + /** + * Converts a comparator object to a serialized byte array, converting to + * a class name String object if byClassName is true. + * + * @throws EnvironmentFailureException if the object cannot be serialized. + */ + public static byte[] comparatorToBytes(Comparator comparator, + boolean byClassName, + String comparatorType) { + if (comparator == null) { + return LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } + + final Object obj = + byClassName ? comparator.getClass().getName() : comparator; + + return objectToBytes(obj, comparatorType); + } + + /** + * Converts an arbitrary object to a serialized byte array. Assumes that + * the object given is non-null. + */ + public static byte[] objectToBytes(Object obj, + String comparatorType) { + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(obj); + return baos.toByteArray(); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException + ("Exception while trying to store " + comparatorType, e); + } + } + + /** + * Converts an arbitrary serialized byte array to an object. Assumes that + * the byte array given is non-null and has a non-zero length. + */ + static Object bytesToObject(byte[] bytes, + String comparatorType, + ClassLoader loader) { + try { + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + ClassResolver.Stream ois = new ClassResolver.Stream(bais, loader); + return ois.readObject(); + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException + ("Exception while trying to load " + comparatorType, e); + } + } + + public int compareEntries(DatabaseEntry entry1, + DatabaseEntry entry2, + boolean duplicates) { + return Key.compareKeys + (entry1.getData(), entry1.getOffset(), entry1.getSize(), + entry2.getData(), entry2.getOffset(), entry2.getSize(), + (duplicates ? duplicateComparator : btreeComparator)); + } + + /** + * Utility class for converting bytes to compartor or Class. + */ + static class ComparatorReader { + + /* + * True if comparator type is Class, + * false if comparator type is Comparator. + */ + private final boolean isClass; + + /* + * Record the Class type for this Comparator, + * used by ReplicatedDatabaseConfig. + */ + private final Class> comparatorClass; + private final Comparator comparator; + + @SuppressWarnings("unchecked") + public ComparatorReader(byte[] comparatorBytes, + String type, + ClassLoader loader) { + + /* No comparator. */ + if (comparatorBytes.length == 0) { + comparatorClass = null; + comparator = null; + isClass = false; + return; + } + + /* Deserialize String class name or Comparator instance. */ + final Object obj = bytesToObject(comparatorBytes, type, loader); + + /* Comparator is specified as a class name. */ + if (obj instanceof String) { + final String className = (String)obj; + try { + comparatorClass = (Class>) + ClassResolver.resolveClass(className, loader); + } catch (ClassNotFoundException ee) { + throw EnvironmentFailureException. + unexpectedException(ee); + } + comparator = instantiateComparator(comparatorClass, type); + isClass = true; + return; + } + + /* Comparator is specified as an instance. */ + if (obj instanceof Comparator) { + comparatorClass = null; + comparator = (Comparator) obj; + isClass = false; + return; + } + + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedState + ("Expected class name or Comparator instance, got: " + + obj.getClass().getName()); + } + + public boolean isClass() { + return isClass; + } + + public Class> getComparatorClass() { + return comparatorClass; + } + + public Comparator getComparator() { + return comparator; + } + } + + public int getBinDeltaPercent() { + return binDeltaPercent; + } + + /** + * Return a ReplicationContext that will indicate if this operation + * should broadcast data records for this database as part the replication + * stream. + */ + public ReplicationContext getRepContext() { + + /* + * It's sufficient to base the decision on what to return solely on the + * isReplicated() value. We're guaranteed that the environment is + * currently opened w/replication. That's because we refuse to open + * rep'ed environments in standalone mode and we couldn't have created + * this db w/replication specified in a standalone environment. + * + * We also don't have to check if this is a client or master. If this + * method is called, we're executing a write operation that was + * instigated an API call on this node (as opposed to a write operation + * that was instigated by an incoming replication message). We enforce + * elsewhere that write operations are only conducted by the master. + * + * Writes provoked by incoming replication messages are executed + * through the putReplicatedLN and deleteReplicatedLN methods. + */ + return isReplicated() ? + ReplicationContext.MASTER : + ReplicationContext.NO_REPLICATE; + } + + /** + * Return a ReplicationContext that includes information on how to + * logically replicate database operations. This kind of replication + * context must be used for any api call which logging a NameLN for that + * represents a database operation. However, NameLNs which are logged for + * other reasons, such as cleaner migration, don't need this special + * replication context. + */ + DbOpReplicationContext + getOperationRepContext(DbOperationType operationType, + DatabaseId oldDbId) { + + /* + * If this method is called, we're executing a write operation that was + * instigated by an API call on this node (as opposed to a write + * operation that was instigated by an incoming replication + * message). We enforce elsewhere that write operations are only + * conducted by the master. + */ + DbOpReplicationContext context = + new DbOpReplicationContext(isReplicated(), operationType); + + if (DbOperationType.isWriteConfigType(operationType)) { + assert(oldDbId == null); + context.setCreateConfig + (new ReplicatedDatabaseConfig(flags, + maxTreeEntriesPerNode, + btreeComparatorBytes, + duplicateComparatorBytes, + triggerBytes)); + } else if (operationType == DbOperationType.TRUNCATE) { + assert(oldDbId != null); + context.setTruncateOldDbId(oldDbId); + } + return context; + } + + /** + * Convenience overloading. + * + * @see #getOperationRepContext(DbOperationType, DatabaseId) + * @param operationType + * @return + */ + DbOpReplicationContext + getOperationRepContext(DbOperationType operationType) { + + assert(operationType != DbOperationType.TRUNCATE); + return getOperationRepContext(operationType, null); + } +} diff --git a/src/com/sleepycat/je/dbi/DbConfigManager.java b/src/com/sleepycat/je/dbi/DbConfigManager.java new file mode 100644 index 0000000..04e42c1 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbConfigManager.java @@ -0,0 +1,706 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.BooleanConfigParam; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.DurationConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.config.IntConfigParam; +import com.sleepycat.je.config.LongConfigParam; +import com.sleepycat.je.config.ShortConfigParam; +import com.sleepycat.je.utilint.PropUtil; + +/** + * DbConfigManager holds the configuration parameters for an environment. + * + * In general, all configuration parameters are represented by a ConfigParam + * defined in com.sleepycat.je.config.EnvironmentParams and can be represented + * by a property described by the EnvironmentConfig String constants. + * Environment parameters have some interesting twists because there are some + * attributes that are scoped by handle, such as the commit durability + * (txnSync, txnNoSync, etc) parameters. + * + * DbConfigManager is instantiated first by the EnvironmentImpl, and is loaded + * with the base configuration parameters. If replication is enabled, + * additional properties are added when the ReplicatedEnvironment is + * instantiated. In order to keep replication code out of the base code, + * replication parameters are loaded by way of the addConfigurations method. + */ +public class DbConfigManager { + + /* + * The name of the JE properties file, to be found in the environment + * directory. + */ + private static final String PROPFILE_NAME = "je.properties"; + + /* + * All properties in effect for this JE instance, both environment + * and replication environment scoped, are stored in this Properties field. + */ + protected Properties props; + + /* + * Save a reference to the environment config to access debug properties + * that are fields in EnvironmentConfig, must be set before the + * environment is created, and are not represented as JE properties. + */ + private final EnvironmentConfig environmentConfig; + + public DbConfigManager(EnvironmentConfig config) { + + this.environmentConfig = config; + + if (config == null) { + props = new Properties(); + } else { + props = DbInternal.getProps(config); + } + } + + public EnvironmentConfig getEnvironmentConfig() { + return environmentConfig; + } + + /* + * Parameter Access + */ + + /** + * Returns whether this parameter is specified by the user's configuration. + * + * Can be used to determine whether to apply another param, if this param + * is not specified, for example, when this param is deprecated and another + * param takes its place. + * + * @return whether this parameter is specified. + */ + public synchronized boolean isSpecified(ConfigParam configParam) { + return props.containsKey(configParam.getName()); + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if param wasn't explicitly set + */ + public synchronized String get(ConfigParam configParam) { + return getConfigParam(props, configParam.getName()); + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if param wasn't explicitly set + */ + public synchronized String get(String configParamName) { + return getConfigParam(props, configParamName); + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set. + */ + public boolean getBoolean(BooleanConfigParam configParam) { + + /* See if it's specified. */ + String val = get(configParam); + return parseBoolean(val); + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set. + */ + public short getShort(ShortConfigParam configParam) { + + /* See if it's specified. */ + String val = get(configParam); + short shortValue = 0; + if (val != null) { + try { + shortValue = Short.parseShort(val); + } catch (NumberFormatException e) { + + /* + * This should never happen if we put error checking into + * the loading of config values. + */ + assert false: e.getMessage(); + } + } + return shortValue; + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set. + */ + public int getInt(IntConfigParam configParam) { + + /* See if it's specified. */ + String val = get(configParam); + int intValue = 0; + if (val != null) { + try { + intValue = Integer.parseInt(val); + } catch (NumberFormatException e) { + + /* + * This should never happen if we put error checking into + * the loading of config values. + */ + assert false: e.getMessage(); + } + } + return intValue; + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set + */ + public long getLong(LongConfigParam configParam) { + + /* See if it's specified. */ + String val = get(configParam); + long longValue = 0; + if (val != null) { + try { + longValue = Long.parseLong(val); + } catch (NumberFormatException e) { + /* + * This should never happen if we put error checking + * into the loading of config values. + */ + assert false : e.getMessage(); + } + } + return longValue; + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set. + */ + public int getDuration(DurationConfigParam configParam) { + String val = get(configParam); + int millis = 0; + if (val != null) { + try { + millis = PropUtil.parseDuration(val); + } catch (IllegalArgumentException e) { + + /* + * This should never happen if we put error checking into + * the loading of config values. + */ + assert false: e.getMessage(); + } + } + return millis; + } + + /** + * Get this parameter from the environment wide configuration settings. + * + * @return default for param if it wasn't explicitly set. + */ + public long getDurationNS(DurationConfigParam configParam) { + String val = get(configParam); + long nanos = 0; + if (val != null) { + try { + nanos = PropUtil.parseDurationNS(val); + } catch (IllegalArgumentException e) { + + /* + * This should never happen if we put error checking into + * the loading of config values. + */ + assert false: e.getMessage(); + } + } + return nanos; + } + + /* + * Helper methods used by EnvironmentConfig and ReplicationConfig. + */ + + /** + * Validate a collection of configurations, checking that + * - the name and value are valid + * - a replication param is not being set through an EnvironmentConfig + * class, and a non-rep param is not set through a ReplicationConfig + * instance. + * + * This may happen at Environment start time, or when configurations have + * been mutated. The configurations have been collected from a file, or + * from a Properties object, and haven't gone through the usual validation + * path that occurs when XXXConfig.setConfigParam is called. + * + * SuppressWarnings is used here because Enumeration doesn't work well with + * Properties in Java 1.5 + * + * @throws IllegalArgumentException via XxxConfig(Properties) ctor. + */ + @SuppressWarnings("unchecked") + public static void validateProperties(Properties props, + boolean isRepConfigInstance, + String configClassName) + throws IllegalArgumentException { + + /* Check that the properties have valid names and values. */ + Enumeration propNames = props.propertyNames(); + while (propNames.hasMoreElements()) { + String name = (String) propNames.nextElement(); + /* Is this a valid property name? */ + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(name); + + if (param == null) { + /* See if the parameter is an multi-value parameter. */ + String mvParamName = ConfigParam.multiValueParamName(name); + param = EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName); + + if (param == null) { + + /* + * Remove the property only if: + * 1. The parameter name indicates it's a replication + * parameter + * 2. The Environment is being opened in standalone mode + * 3. The parameter is being initialized in the properties + * file + * See SR [#19080]. + */ + if (configClassName == null && !isRepConfigInstance && + name.contains(EnvironmentParams.REP_PARAM_PREFIX)) { + props.remove(name); + continue; + } + + throw new IllegalArgumentException + (name + + " is not a valid BDBJE environment configuration"); + } + } + + /* + * Only verify that the parameter is "for replication" if this is + * being validated on behalf of a FooConfig class, not a + * je.properties file. + */ + if (configClassName != null) { + /* We're validating a config instance, not a file. */ + if (isRepConfigInstance) { + if (!param.isForReplication()) { + throw new IllegalArgumentException + (name + + " is not a replication parameter and cannot " + + " be set through " + configClassName); + } + } else { + if (param.isForReplication()) { + throw new IllegalArgumentException + (name + + " is a replication parameter and cannot be set " + + " through " + configClassName); + } + } + } + + /* Is this a valid property value? */ + param.validateValue(props.getProperty(name)); + } + } + + /** + * Apply the configurations specified in the je.properties file to override + * the programmatically set configuration values held in the property bag. + * + * @throws IllegalArgumentException via XxxConfig(Properties) ctor. + */ + @SuppressWarnings("unchecked") + public static void applyFileConfig(File envHome, + Properties props, + boolean forReplication) + throws IllegalArgumentException { + + File paramFile = null; + try { + Properties fileProps = new Properties(); + if (envHome != null) { + if (envHome.isFile()) { + paramFile = envHome; + } else { + paramFile = new File(envHome, PROPFILE_NAME); + } + FileInputStream fis = new FileInputStream(paramFile); + fileProps.load(fis); + fis.close(); + } + + /* + * Validate the existing file. No config instance name is used + * because we're validating a je.properties file. + */ + validateProperties(fileProps, + false, + null); /* config instance name, don't use. */ + + /* Add them to the configuration object. */ + Iterator iter = fileProps.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry propPair = (Map.Entry) iter.next(); + String name = (String) propPair.getKey(); + String value = (String) propPair.getValue(); + setConfigParam(props, + name, + value, + false, /* don't need mutability, we're + initializing */ + false, /* value already validated when set in + config object */ + forReplication, + false); /* verifyForReplication */ + } + } catch (FileNotFoundException e) { + + /* + * Klockwork - ok + * Eat the exception, okay if the file doesn't exist. + */ + } catch (IOException e) { + IllegalArgumentException e2 = new IllegalArgumentException + ("An error occurred when reading " + paramFile); + e2.initCause(e); + throw e2; + } + } + + /** + * Helper method for environment and replication configuration classes. + * Set a configuration parameter. Check that the name is valid. + * If specified, also check that the value is valid.Value checking + * may be disabled for unit testing. + * + * @param props Property bag held within the configuration object. + * + * @throws IllegalArgumentException via XxxConfig.setXxx methods and + * XxxConfig(Properties) ctor. + */ + public static void setConfigParam(Properties props, + String paramName, + String value, + boolean requireMutability, + boolean validateValue, + boolean forReplication, + boolean verifyForReplication) + throws IllegalArgumentException { + + boolean isMVParam = false; + + /* Is this a valid property name? */ + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + + if (param == null) { + /* See if the parameter is an multi-value parameter. */ + String mvParamName = ConfigParam.multiValueParamName(paramName); + param = EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName); + if (param == null || + !param.isMultiValueParam()) { + throw new IllegalArgumentException + (paramName + + " is not a valid BDBJE environment parameter"); + } + isMVParam = true; + assert param.isMultiValueParam(); + } + + /* + * Only verify that the parameter is "for replication" if this is + * being validated on behalf of a FooConfig class, not a + * je.properties file. + */ + if (verifyForReplication) { + if (forReplication) { + if (!param.isForReplication()) { + throw new IllegalArgumentException + (paramName + + " is not a replication parameter."); + } + } else { + if (param.isForReplication()) { + throw new IllegalArgumentException + (paramName + + " is a replication parameter and cannot be " + + " set through this configuration class."); + } + } + } + + /* Is this a mutable property? */ + if (requireMutability && !param.isMutable()) { + throw new IllegalArgumentException + (paramName + + " is not a mutable BDBJE environment configuration"); + } + + if (isMVParam) { + setVal(props, param, paramName, value, validateValue); + } else { + setVal(props, param, value, validateValue); + } + } + + /** + * Helper method for environment and replication configuration classes. + * Get the configuration value for the specified parameter, checking + * that the parameter name is valid. + * + * @param props Property bag held within the configuration object. + * + * @throws IllegalArgumentException via XxxConfig.getConfigParam. + */ + public static String getConfigParam(Properties props, String paramName) + throws IllegalArgumentException { + + boolean isMVParam = false; + + /* Is this a valid property name? */ + ConfigParam param = EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + + if (param == null) { + + /* See if the parameter is an multi-value parameter. */ + String mvParamName = ConfigParam.multiValueParamName(paramName); + param = EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName); + if (param == null) { + throw new IllegalArgumentException + (paramName + + " is not a valid BDBJE environment configuration"); + } + isMVParam = true; + assert param.isMultiValueParam(); + } else if (param.isMultiValueParam()) { + throw new IllegalArgumentException + ("Use getMultiValueValues() to retrieve Multi-Value " + + "parameter values."); + } + + if (isMVParam) { + return DbConfigManager.getVal(props, param, paramName); + } + return DbConfigManager.getVal(props, param); + } + + /** + * Helper method for environment and replication configuration classes. + * Gets either the value stored in this configuration or the + * default value for this param. + */ + public static String getVal(Properties props, + ConfigParam param) { + String val = props.getProperty(param.getName()); + if (val == null) { + val = param.getDefault(); + } + return val; + } + + /** + * Helper method for environment and replication configuration classes. + * Gets either the value stored in this configuration or the + * default value for this param. + */ + public static String getVal(Properties props, + ConfigParam param, + String paramName) { + String val = props.getProperty(paramName); + if (val == null) { + val = param.getDefault(); + } + return val; + } + + /** + * Helper method for environment and replication configuration classes. + * Set and validate the value for the specified parameter. + */ + public static void setVal(Properties props, + ConfigParam param, + String val, + boolean validateValue) + throws IllegalArgumentException { + + if (validateValue) { + param.validateValue(val); + } + props.setProperty(param.getName(), val); + } + + /** + * Helper method for environment and replication configuration classes. + * Set and validate the value for the specified parameter. + */ + public static void setVal(Properties props, + ConfigParam param, + String paramName, + String val, + boolean validateValue) + throws IllegalArgumentException { + + if (validateValue) { + param.validateValue(val); + } + props.setProperty(paramName, val); + } + + /** + * Helper method for getting integer values. + */ + public static int getIntVal(Properties props, IntConfigParam param) { + String val = DbConfigManager.getVal(props, param); + if (val == null) { + throw EnvironmentFailureException.unexpectedState + ("No value for " + param.getName()); + } + try { + return Integer.parseInt(val); + } catch (NumberFormatException e) { + throw EnvironmentFailureException.unexpectedState + ("Bad value for " + param.getName()+ ": " + e.getMessage()); + } + } + + /** + * Helper method for setting integer values. + */ + public static void setIntVal(Properties props, + IntConfigParam param, + int val, + boolean validateValue) { + setVal(props, param, Integer.toString(val), validateValue); + } + + /** + * Helper method for getting long values. + */ + public static long getLongVal(Properties props, LongConfigParam param) { + String val = DbConfigManager.getVal(props, param); + if (val == null) { + throw EnvironmentFailureException.unexpectedState + ("No value for " + param.getName()); + } + try { + return Long.parseLong(val); + } catch (NumberFormatException e) { + throw EnvironmentFailureException.unexpectedState + ("Bad value for " + param.getName()+ ": " + e.getMessage()); + } + } + + /** + * Helper method for getting boolean values. + */ + public static boolean getBooleanVal(Properties props, + BooleanConfigParam param) { + String val = DbConfigManager.getVal(props, param); + if (val == null) { + throw EnvironmentFailureException.unexpectedState + ("No value for " + param.getName()); + } + return parseBoolean(val); + } + + /** + * Helper method for setting boolean values. + */ + public static void setBooleanVal(Properties props, + BooleanConfigParam param, + boolean val, + boolean validateValue) { + setVal(props, param, Boolean.toString(val), validateValue); + } + + /** + * Helper method for getting duration values. + */ + public static long getDurationVal(Properties props, + DurationConfigParam param, + TimeUnit unit) { + if (unit == null) { + throw new IllegalArgumentException + ("TimeUnit argument may not be null"); + } + String val = DbConfigManager.getVal(props, param); + if (val == null) { + throw EnvironmentFailureException.unexpectedState + ("No value for " + param.getName()); + } + try { + return unit.convert(PropUtil.parseDuration(val), + TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException e) { + throw EnvironmentFailureException.unexpectedState + ("Bad value for " + param.getName()+ ": " + e.getMessage()); + } + } + + /** + * Helper method for setting duration values. + */ + public static void setDurationVal(Properties props, + DurationConfigParam param, + long val, + TimeUnit unit, + boolean validateValue) { + setVal(props, param, PropUtil.formatDuration(val, unit), + validateValue); + } + + /** + * Ensures that leading and trailing whitespace is ignored when parsing a + * boolean. It is ignored by BooleanConfigParam.validateValue, so it must + * be ignored here also. [#22212] + */ + private static boolean parseBoolean(String val) { + if (val == null) { + return false; + } + return Boolean.parseBoolean(val.trim()); + } +} diff --git a/src/com/sleepycat/je/dbi/DbEnvPool.java b/src/com/sleepycat/je/dbi/DbEnvPool.java new file mode 100644 index 0000000..0816532 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbEnvPool.java @@ -0,0 +1,456 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * Singleton collection of environments. Responsible for environment open and + * close, supporting this from multiple threads by synchronizing on the pool. + * + * To avoid multiple environment openings from being blocked by recovery in + * getEnvironment(), the EnvironmentImpl constructor is broken into two parts, + * with the second part (EnvironmentImpl.finishInit) doing the recovery. + * + * When synchronizing on two or more of the following objects the + * synchronization order must be as follows. Synchronization is not performed + * in constructors, of course, because no other thread can access the object. + * + * Synchronization order: Environment, DbEnvPool, EnvironmentImpl, Evictor + */ +public class DbEnvPool { + /* Singleton instance. */ + private static DbEnvPool pool = new DbEnvPool(); + + /* + * Collection of environment handles, mapped by canonical directory + * name->EnvironmentImpl object. + */ + private final Map envs; + + /* Environments (subset of envs) that share the global cache. */ + private final Set sharedCacheEnvs; + + /* Test hook, during Environment creation. */ + private TestHook beforeFinishInitHook; + + /** + * Enforce singleton behavior. + */ + private DbEnvPool() { + envs = new HashMap(); + sharedCacheEnvs = new HashSet(); + } + + /** + * Access the singleton instance. + */ + public static DbEnvPool getInstance() { + return pool; + } + + public void setBeforeFinishInitHook(TestHook hook) { + beforeFinishInitHook = hook; + } + + public synchronized int getNSharedCacheEnvironments() { + return sharedCacheEnvs.size(); + } + + private EnvironmentImpl getAnySharedCacheEnv() { + Iterator iter = sharedCacheEnvs.iterator(); + return iter.hasNext() ? iter.next() : null; + } + + /** + * Find a single environment, used by Environment handles and by command + * line utilities. + * + * @return a non-null EnvironmentImpl. + */ + public EnvironmentImpl getEnvironment(File envHome, + EnvironmentConfig config, + boolean checkImmutableParams, + RepConfigProxy repConfigProxy) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + String environmentKey = null; + EnvironmentImpl envImpl = null; + synchronized (this) { + environmentKey = getEnvironmentMapKey(envHome); + envImpl = envs.get(environmentKey); + + if (envImpl != null) { + + /* + * If the envImpl intance returned is standalone, but users are + * actually creating a replicated environment, throw out an + * UnsupportedOperationException. We needn't worry about the + * read only property, since a replicated environment can't be + * read only. + */ + if (!envImpl.isReplicated() && (repConfigProxy != null)) { + throw new UnsupportedOperationException + ("This environment was previously opened as a " + + "standalone environment. It cannot be re-opened for " + + "replication."); + } + + /* + * If the envImpl instance returned is replicated, but users + * are actually creating a standalone environment and it is not + * read only, then throw out an UnsupportedOperationException. + */ + if (envImpl.isReplicated() && (repConfigProxy == null) && + !config.getReadOnly()) { + throw new UnsupportedOperationException + ("This environment was previously opened for " + + "replication. It cannot be re-opened in read/write " + + "mode for standalone operation."); + } + + /* + * If envImpl is used for an Arbiter. + */ + if (envImpl.isArbiter()) { + throw new UnsupportedOperationException( + "An Arbiter is currently using " + + "this directory. " + envHome.getAbsolutePath()); + } + + envImpl.checkIfInvalid(); + + if (checkImmutableParams) { + + /* + * If a non-null configuration parameter was passed to the + * Environment ctor and the underlying EnvironmentImpl + * already exists, check that the configuration parameters + * specified match those of the currently open environment. + * An exception is thrown if the check fails. + * + * Don't do this check if we create the environment here + * because the creation might modify the parameters, which + * would create a Catch-22 in terms of validation. For + * example, je.maxMemory will be overridden if the JVM's + * -mx flag is less than that setting, so the new resolved + * config parameters won't be the same as the passed + * in config. + */ + envImpl.checkImmutablePropsForEquality + (DbInternal.getProps(config)); + } + /* Successful, increment reference count */ + envImpl.incOpenCount(); + } else { + + /* + * If a shared cache is used, get another (any other, + * doesn't matter which) environment that is sharing the + * global cache. + */ + EnvironmentImpl sharedCacheEnv = config.getSharedCache() ? + getAnySharedCacheEnv() : null; + + /* + * Environment must be instantiated. If it can be created, + * the configuration must have allowCreate set. Note that + * the environment is added to the SharedEvictor before the + * EnvironmentImpl ctor returns, by + * RecoveryManager.buildTree. + */ + envImpl = + (repConfigProxy == null) ? + new EnvironmentImpl(envHome, config, sharedCacheEnv) : + loadRepImpl(envHome, config, sharedCacheEnv, + repConfigProxy); + assert config.getSharedCache() == envImpl.getSharedCache(); + + envImpl.incOpenCount(); + envs.put(environmentKey, envImpl); + addToSharedCacheEnvs(envImpl); + } + } + + /* + * An new EnvironmentImpl was created. Call finishInit outside the + * synchronized block to support concurrent recovery for more than one + * environment. + * + * Note that finishInit must be called even if an existing envImpl was + * found, because initialization (recovery) for that envImpl may not be + * complete. finishInit is synchronized and this ensures that recovery + * will be complete when it returns. + * + * If this environment finishInit() fails in any way, make sure it + * is removed from the envs map. If it isn't, it will block all + * future attempts to create the environment. + */ + TestHookExecute.doHookIfSet(beforeFinishInitHook, envImpl); + boolean success = false; + try { + if (envImpl.finishInit(config)) { + /* Initialization (recovery) was performed. */ + synchronized(this) { + finishAdditionOfSharedCacheEnv(envImpl); + } + } + success = true; + } finally { + if (!success) { + synchronized(this) { + envs.remove(environmentKey); + sharedCacheEnvs.remove(envImpl); + } + } + } + + return envImpl; + } + + /** + * Use reflection to create a RepImpl, to avoid introducing HA compilation + * dependencies to non-replication code. + */ + private EnvironmentImpl loadRepImpl(File envHome, + EnvironmentConfig config, + EnvironmentImpl sharedCacheEnv, + RepConfigProxy repConfigProxy) + throws DatabaseException { + + final String repClassName = "com.sleepycat.je.rep.impl.RepImpl"; + final String envImplName = "com.sleepycat.je.dbi.EnvironmentImpl"; + final String repProxy = "com.sleepycat.je.dbi.RepConfigProxy"; + try { + final Class repClass = Class.forName(repClassName); + return (EnvironmentImpl) + repClass.getConstructor(envHome.getClass(), + config.getClass(), + Class.forName(envImplName), + Class.forName(repProxy)). + newInstance(envHome, config, sharedCacheEnv, repConfigProxy); + } catch (InvocationTargetException e) { + if (e.getCause() instanceof RuntimeException) { + /* Propate runtime exceptions thrown by the ctor. */ + throw (RuntimeException)e.getCause(); + } + throw EnvironmentFailureException.unexpectedException(e); + } catch (Exception e) { + + /* + * This intentially violates our guideline for not catching + * Exception in order to avoid many catches for all the checked + * exceptions thrown by Class.forName and Class.getConstructor. No + * other methods in the try block throw checked exceptions. + */ + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /* Add this environment into sharedCache environments list. */ + private void addToSharedCacheEnvs(EnvironmentImpl envImpl) + throws DatabaseException { + + if (envImpl.getSharedCache()) { + if (sharedCacheEnvs.contains(envImpl)) { + throw EnvironmentFailureException.unexpectedState(); + } + sharedCacheEnvs.add(envImpl); + } + } + + /* Post-processing of SharedCacheEnv addition, after recovery is done. */ + private void finishAdditionOfSharedCacheEnv(EnvironmentImpl envImpl) + throws DatabaseException { + + if (envImpl.getSharedCache()) { + if (!sharedCacheEnvs.contains(envImpl)) { + throw EnvironmentFailureException.unexpectedState(); + } + assert envImpl.getEvictor().checkEnv(envImpl); + resetSharedCache(-1, envImpl); + } + } + + /** + * Called by EnvironmentImpl.setMutableConfig to perform the + * setMutableConfig operation while synchronized on the DbEnvPool. + * + * In theory we shouldn't need to synchronize here when + * envImpl.getSharedCache() is false; however, we synchronize + * unconditionally to standardize the synchronization order and avoid + * accidental deadlocks. + */ + synchronized void setMutableConfig(EnvironmentImpl envImpl, + EnvironmentMutableConfig mutableConfig) + throws DatabaseException { + + envImpl.doSetMutableConfig(mutableConfig); + if (envImpl.getSharedCache()) { + resetSharedCache(envImpl.getMemoryBudget().getMaxMemory(), + envImpl); + } + } + + /** + * Called by EnvironmentImpl.close to perform the close operation while + * synchronized on the DbEnvPool. + * + * Synchronization on this DbEnvPool during the close is used to protect + * its data structures. Unfortunately, this means that a long checkpoint + * during a close will block other closes and opens. We may want to + * improve this in the future. However, at least there is a user + * workaround: perform a full checkpoint before closing the environment. + */ + synchronized void closeEnvironment(EnvironmentImpl envImpl, + boolean doCheckpoint, + boolean isAbnormalClose) { + synchronized (envImpl) { + /* Hold the reference count stable. */ + if (envImpl.decOpenCount()) { + try { + envImpl.doClose(doCheckpoint, isAbnormalClose); + } finally { + removeEnvironment(envImpl); + } + } + } + } + + /** + * Called by EnvironmentImpl.closeAfterInvalid to perform the close + * operation while synchronized on the DbEnvPool. + */ + synchronized void closeEnvironmentAfterInvalid(EnvironmentImpl envImpl) + throws DatabaseException { + + try { + envImpl.doCloseAfterInvalid(); + } finally { + removeEnvironment(envImpl); + } + } + + /** + * Removes an EnvironmentImpl from the pool after it has been closed. This + * method is called while synchronized. Note that the environment was + * removed from the SharedEvictor by EnvironmentImpl.shutdownEvictor. + */ + private void removeEnvironment(EnvironmentImpl envImpl) { + + final String environmentKey = + getEnvironmentMapKey(envImpl.getEnvironmentHome()); + + final boolean found = envs.remove(environmentKey) != null; + + if (sharedCacheEnvs.remove(envImpl)) { + + assert found && envImpl.getSharedCache(); + assert !envImpl.getEvictor().checkEnv(envImpl); + + if (sharedCacheEnvs.isEmpty()) { + envImpl.getEvictor().shutdown(); + envImpl.getOffHeapCache().shutdown(); + } else { + envImpl.getMemoryBudget().subtractCacheUsage(); + resetSharedCache(-1, null); + } + } else { + assert !found || !envImpl.getSharedCache(); + } + + /* + * Latch notes may only be cleared when there is no possibility that + * any environment is open. + */ + if (envs.isEmpty()) { + LatchSupport.clear(); + } + } + + /** + * For unit testing only. + */ + public synchronized void clear() { + envs.clear(); + } + + public synchronized Collection getEnvImpls() { + return envs.values(); + } + + public synchronized boolean isOpen(final File home) { + return envs.containsKey(getEnvironmentMapKey(home)); + } + + /* Use the canonical path name for a normalized environment key. */ + String getEnvironmentMapKey(File file) + throws DatabaseException { + + try { + return file.getCanonicalPath(); + } catch (IOException e) { + /* No env is available, can't throw EnvironmentFailedException. */ + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Resets the memory budget for all environments with a shared cache. + * + * @param newMaxMemory is the new total cache budget or is less than 0 if + * the total should remain unchanged. A total greater than zero is given + * when it has changed via setMutableConfig. + * + * @param skipEnv is an environment that should not be reset, or null. + * Non-null is passed when an environment has already been reset because + * it was just created or the target of setMutableConfig. + */ + private void resetSharedCache(long newMaxMemory, EnvironmentImpl skipEnv) + throws DatabaseException { + + for (EnvironmentImpl envImpl : sharedCacheEnvs) { + + /* + * To avoid spurious exceptions, don't reset invalid envs that have + * not yet been removed. They aren't usable, and we expect them + * to be closed and removed very soon. + */ + if (envImpl != skipEnv && envImpl.isValid()) { + envImpl.getMemoryBudget().reset(newMaxMemory, + false /*newEnv*/, + envImpl.getConfigManager()); + } + } + } +} diff --git a/src/com/sleepycat/je/dbi/DbEnvState.java b/src/com/sleepycat/je/dbi/DbEnvState.java new file mode 100644 index 0000000..7142a92 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbEnvState.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + + +/** + * DbEnvState implements a typesafe enumeration of environment states + * and does state change validation. + */ +class DbEnvState { + private static final boolean DEBUG = false; + + private String name; + + /* Valid environment states. */ + public static final DbEnvState INIT = new DbEnvState("initialized"); + public static final DbEnvState OPEN = new DbEnvState("open"); + public static final DbEnvState CLOSED = new DbEnvState("closed"); + public static final DbEnvState INVALID = new DbEnvState("invalid"); + + /* Valid previous states, for state transition checking. */ + public static final DbEnvState[] VALID_FOR_CLOSE = {INIT, OPEN, INVALID}; + /* Not currently used: + public static final DbEnvState[] VALID_FOR_OPEN = {INIT, CLOSED}; + public static final DbEnvState[] VALID_FOR_REMOVE = {INIT, CLOSED}; + */ + + DbEnvState(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + + /* Check for valid state transitions. */ + void checkState(DbEnvState[] validPrevStates, DbEnvState newState) { + if (DEBUG) { + System.out.println("newState = " + newState + + " currentState = " + name); + } + boolean transitionOk = false; + for (int i = 0; i < validPrevStates.length; i++) { + if (this == validPrevStates[i]) { + transitionOk = true; + break; + } + } + if (!transitionOk) { + throw new IllegalStateException + ("Can't go from environment state " + toString() + + " to " + newState.toString()); + } + } +} diff --git a/src/com/sleepycat/je/dbi/DbTree.java b/src/com/sleepycat/je/dbi/DbTree.java new file mode 100644 index 0000000..1723e61 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbTree.java @@ -0,0 +1,2107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static com.sleepycat.je.log.entry.DbOperationType.CREATE; +import static com.sleepycat.je.log.entry.DbOperationType.REMOVE; +import static com.sleepycat.je.log.entry.DbOperationType.RENAME; +import static com.sleepycat.je.log.entry.DbOperationType.TRUNCATE; +import static com.sleepycat.je.log.entry.DbOperationType.UPDATE_CONFIG; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.log.DbOpReplicationContext; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeUtils; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.HandleLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.utilint.StringUtils; + +/** + * DbTree represents the database directory for this environment. DbTree is + * itself implemented through two databases. The nameDatabase maps + * databaseName-> an internal databaseId. The idDatabase maps + * databaseId->DatabaseImpl. + * + * For example, suppose we have two databases, foo and bar. We have the + * following structure: + * + * nameDatabase idDatabase + * IN IN + * | | + * BIN BIN + * +-------------+--------+ +---------------+--------+ + * . | | . | | + * NameLNs NameLN NameLN MapLNs for MapLN MapLN + * for internal key=bar key=foo internal dbs key=53 key=79 + * dbs data= data= data= data= + * dbId79 dbId53 DatabaseImpl DatabaseImpl + * | | + * Tree for foo Tree for bar + * | | + * root IN root IN + * + * Databases, Cursors, the cleaner, compressor, and other entities have + * references to DatabaseImpls. It's important that object identity is properly + * maintained, and that all constituents reference the same DatabaseImpl for + * the same db, lest they develop disparate views of the in-memory database; + * corruption would ensue. To ensure that, all entities must obtain their + * DatabaseImpl by going through the idDatabase. + * + * DDL type operations such as create, rename, remove and truncate get their + * transactional semantics by transactionally locking the NameLN appropriately. + * A read-lock on the NameLN, called a handle lock, is maintained for all DBs + * opened via the public API (openDatabase). This prevents them from being + * renamed or removed while open. See HandleLocker for details. + * + * However, for internal database operations, no handle lock on the NameLN is + * acquired and MapLNs are locked with short-lived non-transactional Lockers. + * An entity that is trying to get a reference to the DatabaseImpl gets a short + * lived read lock just for the fetch of the MapLN, and a DatabaseImpl usage + * count is incremented to prevent eviction; see getDb and releaseDb. A write + * lock on the MapLN is taken when the database is created, deleted, or when + * the MapLN is evicted. (see DatabaseImpl.isInUse()) + * + * The nameDatabase operates pretty much as a regular application database in + * terms of eviction and recovery. The idDatabase requires special treatment + * for both eviction and recovery. + * + * The issues around eviction of the idDatabase center on the need to ensure + * that there are no other current references to the DatabaseImpl other than + * that held by the mapLN. The presence of a current reference would both make + * the DatabaseImpl not GC'able, and more importantly, would lead to object + * identity confusion later on. For example, if the MapLN is evicted while + * there is a current reference to its DatabaseImpl, and then refetched, there + * will be two in-memory versions of the DatabaseImpl. Since locks on the + * idDatabase are short lived, DatabaseImpl.useCount acts as a reference count + * of active current references. DatabaseImpl.useCount must be modified and + * read in conjunction with appropriate locking on the MapLN. See + * DatabaseImpl.isInUse() for details. + * + * This reference count checking is only needed when the entire MapLN is + * evicted. It's possible to evict only the root IN of the database in + * question, since that doesn't interfere with the DatabaseImpl object + * identity. + * + * Another dependency on usage counts was introduced to prevent MapLN deletion + * during cleaner and checkpointer operations that are processing entries for a + * DB. (Without usage counts, this problem would have occurred even if DB + * eviction were never implemented.) When the usage count is non-zero it + * prohibits deleteMapLN from running. The deleted state of the MapLN must not + * change during a reader operation (operation by a thread that has called + * getDb and not yet called releaseDb). + * + * Why not just hold a MapLN read lock during a reader operation? + * -------------------------------------------------------------- + * Originally this was not done because of cleaner performance. We afraid that * either of the following solutions would not perform well: + * + If we get (and release) a MapLN lock for every entry in a log file, this + * adds a lot of per-entry overhead. + * + If we hold the MapLN read lock for the duration of a log file cleaning + * (the assumption is that many entries are for the same DB), then we block + * checkpoints during that period, when they call modifyDbRoot. + * Therefore, the usage count is incremented once per DB encountered during log + * cleaning, and the count is decremented at the end. This caching approach is + * also used by the HA replayer. In both cases, we do not want to lock the + * MapLN every entry/operation, and we do not want to block checkpoints or + * other callers of modifyDbRoot. It is acceptable, however, to block DB + * naming operations. + * + * In addition we allow modifyDbRoot to run when even the usage count is + * non-zero, which would not be possible using a read-write locking strategy. + * I'm not sure why this was done originally, perhaps to avoid blocking. But + * currently, it is necessary to prevent a self-deadlock. All callers of + * modifyDbRoot first call getDb, which increments the usage count. So if + * modifyDbRoot was to check the usage count and retry if non-zero (like + * deleteMapLN), then it would loop forever. + * + * Why are the retry loops necessary in the DbTree methods? + * -------------------------------------------------------- + * Three methods that access the MapLN perform retries (forever) when there is + * a lock conflict: getDb, modifyDbRoot and deleteMapLN. Initially the retry + * loops were added to compensate for certain slow operations. To solve that + * problem, perhaps there are alternative solutions (increasing the lock + * timeout). However, the deleteMapLN retry loop is necessary to avoid + * deleting it when the DB is in use by reader operations. + * + * Tendency to livelock + * -------------------- + * Because MapLN locks are short lived, but a reader operation may hold a + * MapLN/DatabaseImpl for a longer period by incrementing the usage count, + * there is the possibility of livelock. One strategy for avoiding livelock is + * to avoid algorithms where multiple threads continuously call getDb and + * releaseDb, since this could prevent completion of deleteMapLN. [#20816] + */ +public class DbTree implements Loggable { + + /* The id->DatabaseImpl tree is always id 0 */ + public static final DatabaseId ID_DB_ID = new DatabaseId(0); + /* The name->id tree is always id 1 */ + public static final DatabaseId NAME_DB_ID = new DatabaseId(1); + + /** Map from internal DB name to type. */ + private final static Map INTERNAL_TYPES_BY_NAME; + static { + final EnumSet set = EnumSet.allOf(DbType.class); + INTERNAL_TYPES_BY_NAME = new HashMap(set.size()); + for (DbType t : set) { + if (t.isInternal()) { + INTERNAL_TYPES_BY_NAME.put(t.getInternalName(), t); + } + } + } + + /** + * Returns the DbType for a given DB name. + * + * Note that we allow dbName to be null, because it may be null when the + * 'debug database name' is not yet known to DatabaseImpl. This works + * because the debug name is always known immediately for internal DBs. + */ + public static DbType typeForDbName(String dbName) { + final DbType t = INTERNAL_TYPES_BY_NAME.get(dbName); + if (t != null) { + return t; + } + return DbType.USER; + } + + /* + * Database Ids: + * We need to ensure that local and replicated databases use different + * number spaces for their ids, so there can't be any possible conflicts. + * Local, non replicated databases use positive values, replicated + * databases use negative values. Values -1 thru NEG_DB_ID_START are + * reserved for future special use. + */ + public static final long NEG_DB_ID_START = -256L; + private final AtomicLong lastAllocatedLocalDbId; + private final AtomicLong lastAllocatedReplicatedDbId; + + private final DatabaseImpl idDatabase; // map db ids -> databases + private final DatabaseImpl nameDatabase; // map names -> dbIds + + /* + * The log version at the time the env was created. Is -1 if the initial + * version is unknown, which means it is prior to version 15 because this + * field was added in version 15. For environments created with log version + * 15 and greater, no log entries can have a version LT this field's value. + */ + private int initialLogVersion; + + /* The flags byte holds a variety of attributes. */ + private byte flags; + + /* + * The replicated bit is set for environments that are opened with + * replication. The behavior is as follows: + * + * Env is Env is Persistent Follow-on action + * replicated brand new value of + * DbTree.isReplicated + * + * 0 1 n/a replicated bit = 0; + * 0 0 0 none + * 0 0 1 true for r/o, false for r/w + * 1 1 n/a replicated bit = 1 + * 1 0 0 require config of all dbs + * 1 0 1 none + */ + private static final byte REPLICATED_BIT = 0x1; + + /* + * The rep converted bit is set when an environments was originally created + * as a standalone (non-replicated) environment, and has been changed to a + * replicated environment. + * + * The behaviors are as follows: + * + * Value of Value of the What happens Can open Can open + * RepConfig. DbTree when we call as r/o as r/2 + * allowConvert replicated bit ReplicatedEnv() Environment Environment + * later on? later on? + * + * throw exception, Yes, because Yes, because + * complain that the env is not env is not + * false false env is not converted converted + * replicated + * + * Yes, always ok No, this is + * open a now a + * true false do conversion replicated replicated + * env with r/o env + * + * + * Ignore true or open as a replicated + * allowConvert brand new env the usual way Yes No + * Environment + */ + private static final byte REP_CONVERTED_BIT = 0x2; + + /* + * The dups converted bit is set when we have successfully converted all + * dups databases after recovery, to indicate that we don't need to perform + * this conversion again for this environment. It is set initially for a + * brand new environment that uses the new dup database format. + */ + private static final byte DUPS_CONVERTED_BIT = 0x4; + + /* + * The preserve VLSN bit is set in a replicated environment only, and may + * never be changed after initial environment creation. See + * RepParams.PRESERVE_RECORD_VERSION. + */ + private static final byte PRESERVE_VLSN_BIT = 0x8; + + /** + * Number of LNs in the naming DB considered to be fairly small, and + * therefore to result in fairly fast execution of getDbName. + */ + private static final long FAST_NAME_LOOKUP_MAX_LNS = 100; + + private EnvironmentImpl envImpl; + + /** + * Create a dbTree from the log. + */ + public DbTree() { + this.envImpl = null; + idDatabase = new DatabaseImpl(); + idDatabase.setDebugDatabaseName(DbType.ID.getInternalName()); + + /* + * The default is false, but just in case we ever turn it on globally + * for testing this forces it off. + */ + idDatabase.clearKeyPrefixing(); + nameDatabase = new DatabaseImpl(); + nameDatabase.clearKeyPrefixing(); + nameDatabase.setDebugDatabaseName(DbType.NAME.getInternalName()); + + /* These sequences are initialized by readFromLog. */ + lastAllocatedLocalDbId = new AtomicLong(); + lastAllocatedReplicatedDbId = new AtomicLong(); + + initialLogVersion = -1; + } + + /** + * Create a new dbTree for a new environment. + */ + public DbTree(EnvironmentImpl env, + boolean replicationIntended, + boolean preserveVLSN) + throws DatabaseException { + + this.envImpl = env; + + /* + * Sequences must be initialized before any databases are created. 0 + * and 1 are reserved, so we start at 2. We've put -1 to + * NEG_DB_ID_START asided for the future. + */ + lastAllocatedLocalDbId = new AtomicLong(1); + lastAllocatedReplicatedDbId = new AtomicLong(NEG_DB_ID_START); + + /* The id database is local */ + DatabaseConfig idConfig = new DatabaseConfig(); + idConfig.setReplicated(false /* replicated */); + + /* + * The default is false, but just in case we ever turn it on globally + * for testing this forces it off. + */ + idConfig.setKeyPrefixing(false); + idDatabase = new DatabaseImpl(null, + DbType.ID.getInternalName(), + new DatabaseId(0), + env, + idConfig); + /* Force a reset if enabled globally. */ + idDatabase.clearKeyPrefixing(); + + DatabaseConfig nameConfig = new DatabaseConfig(); + nameConfig.setKeyPrefixing(false); + nameDatabase = new DatabaseImpl(null, + DbType.NAME.getInternalName(), + new DatabaseId(1), + env, + nameConfig); + /* Force a reset if enabled globally. */ + nameDatabase.clearKeyPrefixing(); + + if (replicationIntended) { + setIsReplicated(); + } + + if (preserveVLSN) { + setPreserveVLSN(); + } + + /* New environments don't need dup conversion. */ + setDupsConverted(); + + initialLogVersion = LogEntryType.LOG_VERSION; + } + + /** + * The last allocated local and replicated db ids are used for ckpts. + */ + public long getLastLocalDbId() { + return lastAllocatedLocalDbId.get(); + } + + public long getLastReplicatedDbId() { + return lastAllocatedReplicatedDbId.get(); + } + + /** + * We get a new database id of the appropriate kind when creating a new + * database. + */ + private long getNextLocalDbId() { + return lastAllocatedLocalDbId.incrementAndGet(); + } + + private long getNextReplicatedDbId() { + return lastAllocatedReplicatedDbId.decrementAndGet(); + } + + /** + * Initialize the db ids, from recovery. + */ + public void setLastDbId(long lastReplicatedDbId, long lastLocalDbId) { + lastAllocatedReplicatedDbId.set(lastReplicatedDbId); + lastAllocatedLocalDbId.set(lastLocalDbId); + } + + /** + * @return true if this id is for a replicated db. + */ + private boolean isReplicatedId(long id) { + return id < NEG_DB_ID_START; + } + + /* + * Tracks the lowest replicated database id used during a replay of the + * replication stream, so that it's available as the starting point if this + * replica transitions to being the master. + */ + public void updateFromReplay(DatabaseId replayDbId) { + assert !envImpl.isMaster(); + + final long replayVal = replayDbId.getId(); + + if (replayVal > 0 && !envImpl.isRepConverted()) { + throw EnvironmentFailureException.unexpectedState + ("replay database id is unexpectedly positive " + replayDbId); + } + + if (replayVal < lastAllocatedReplicatedDbId.get()) { + lastAllocatedReplicatedDbId.set(replayVal); + } + } + + /** + * Initialize the db tree during recovery, after instantiating the tree + * from the log. + * a. set up references to the environment impl + * b. check for replication rules. + */ + void initExistingEnvironment(EnvironmentImpl eImpl) + throws DatabaseException { + + eImpl.checkRulesForExistingEnv(isReplicated(), getPreserveVLSN()); + this.envImpl = eImpl; + idDatabase.setEnvironmentImpl(eImpl); + nameDatabase.setEnvironmentImpl(eImpl); + } + + /** + * Creates a new database object given a database name. + * + * Increments the use count of the new DB to prevent it from being evicted. + * releaseDb should be called when the returned object is no longer used, + * to allow it to be evicted. See DatabaseImpl.isInUse. [#13415] + */ + public DatabaseImpl createDb(Locker locker, + String databaseName, + DatabaseConfig dbConfig, + HandleLocker handleLocker) + throws DatabaseException { + + return doCreateDb(locker, + databaseName, + dbConfig, + handleLocker, + null, // replicatedLN + null); // repContext, to be decided by new db + } + + /** + * Create a database for internal use. It may or may not be replicated. + * Since DatabaseConfig.replicated is true by default, be sure to + * set it to false if this is a internal, not replicated database. + */ + public DatabaseImpl createInternalDb(Locker locker, + String databaseName, + DatabaseConfig dbConfig) + throws DatabaseException { + + /* Force all internal databases to not use key prefixing. */ + dbConfig.setKeyPrefixing(false); + DatabaseImpl ret = + doCreateDb(locker, + databaseName, + dbConfig, + null, // handleLocker, + null, // replicatedLN + null); // repContext, to be decided by new db + /* Force a reset if enabled globally. */ + ret.clearKeyPrefixing(); + return ret; + } + + /** + * Create a replicated database on this client node. + */ + public DatabaseImpl createReplicaDb(Locker locker, + String databaseName, + DatabaseConfig dbConfig, + NameLN replicatedLN, + ReplicationContext repContext) + throws DatabaseException { + + return doCreateDb(locker, + databaseName, + dbConfig, + null, // databaseHndle + replicatedLN, + repContext); + } + + /** + * Create a database. + * + * Increments the use count of the new DB to prevent it from being evicted. + * releaseDb should be called when the returned object is no longer used, + * to allow it to be evicted. See DatabaseImpl.isInUse. [#13415] + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + */ + private DatabaseImpl doCreateDb(Locker nameLocker, + String databaseName, + DatabaseConfig dbConfig, + HandleLocker handleLocker, + NameLN replicatedLN, + ReplicationContext repContext) + throws DatabaseException { + + /* Create a new database object. */ + DatabaseId newId = null; + long allocatedLocalDbId = 0; + long allocatedRepDbId = 0; + if (replicatedLN != null) { + + /* + * This database was created on a master node and is being + * propagated to this client node. + */ + newId = replicatedLN.getId(); + } else { + + /* + * This database has been created locally, either because this is + * a non-replicated node or this is the replicated group master. + */ + if (envImpl.isReplicated() && + dbConfig.getReplicated()) { + newId = new DatabaseId(getNextReplicatedDbId()); + allocatedRepDbId = newId.getId(); + } else { + newId = new DatabaseId(getNextLocalDbId()); + allocatedLocalDbId = newId.getId(); + } + } + + DatabaseImpl newDb = null; + CursorImpl idCursor = null; + CursorImpl nameCursor = null; + boolean operationOk = false; + Locker idDbLocker = null; + try { + newDb = new DatabaseImpl(nameLocker, + databaseName, newId, envImpl, dbConfig); + + /* Get effective rep context and check for replica write. */ + ReplicationContext useRepContext = repContext; + if (repContext == null) { + useRepContext = newDb.getOperationRepContext(CREATE); + } + checkReplicaWrite(nameLocker, useRepContext); + + /* Insert it into name -> id db. */ + nameCursor = new CursorImpl(nameDatabase, nameLocker); + LN nameLN = null; + if (replicatedLN != null) { + nameLN = replicatedLN; + } else { + nameLN = new NameLN(newId); + } + + nameCursor.insertRecord( + StringUtils.toUTF8(databaseName), // key + nameLN, false /*blindInsertion*/, useRepContext); + + /* Record handle lock. */ + if (handleLocker != null) { + acquireHandleLock(nameCursor, handleLocker); + } + + /* Insert it into id -> name db, in auto commit mode. */ + idDbLocker = BasicLocker.createBasicLocker(envImpl); + idCursor = new CursorImpl(idDatabase, idDbLocker); + + idCursor.insertRecord( + newId.getBytes() /*key*/, new MapLN(newDb) /*ln*/, + false /*blindInsertion*/, ReplicationContext.NO_REPLICATE); + + /* Increment DB use count with lock held. */ + newDb.incrementUseCount(); + operationOk = true; + } finally { + if (idCursor != null) { + idCursor.close(); + } + + if (nameCursor != null) { + nameCursor.close(); + } + + if (idDbLocker != null) { + idDbLocker.operationEnd(operationOk); + } + + /* + * Undo the allocation of the database ID if DB creation fails. We + * use compareAndSet so that we don't undo the assignment of the ID + * by another concurrent operation, for example, truncation. + * + * Note that IDs are not conserved in doTruncateDb when a failure + * occurs. This inconsistency is historical and may or may not be + * the best approach. + * + * [#18642] + */ + if (!operationOk) { + if (allocatedRepDbId != 0) { + lastAllocatedReplicatedDbId.compareAndSet + (allocatedRepDbId, allocatedRepDbId + 1); + } + if (allocatedLocalDbId != 0) { + lastAllocatedLocalDbId.compareAndSet + (allocatedLocalDbId, allocatedLocalDbId - 1); + } + } + } + + return newDb; + } + + /** + * Opens (or creates if it does not exist) an internal, non-replicated DB. + * Returns null only if the DB does not exist and the env is read-only. + */ + public DatabaseImpl openNonRepInternalDB(final DbType dbType) { + + final String name = dbType.getInternalName(); + + final Locker autoTxn = Txn.createLocalAutoTxn( + envImpl, new TransactionConfig()); + + boolean operationOk = false; + try { + DatabaseImpl db = getDb(autoTxn, name, null, false); + + if (db == null) { + + if (envImpl.isReadOnly()) { + return null; + } + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReplicated(false); + + db = createInternalDb(autoTxn, name, dbConfig); + } + operationOk = true; + return db; + } finally { + autoTxn.operationEnd(operationOk); + } + } + + /** + * Called after locking a NameLN with nameCursor when opening a database. + * The NameLN may be locked for read or write, depending on whether the + * database existed when openDatabase was called. Here we additionally + * lock the NameLN for read on behalf of the handleLocker, which is kept + * by the Database handle. + * + * The lock must be acquired while the BIN is latched, so the locker will + * be updated if the LSN changes. There is no lock contention possible + * because the HandleLocker shares locks with the nameCursor locker, and + * jumpAheadOfWaiters=true is passed in case another locker is waiting on a + * write lock. + * + * If the lock is denied, checkPreempted is called on the nameCursor + * locker, in case the lock is denied because the nameCursor's lock was + * preempted. If so, DatabasePreemptedException will be thrown. + * + * @see CursorImpl#lockLN + * @see HandleLocker + */ + private void acquireHandleLock(CursorImpl nameCursor, + HandleLocker handleLocker) { + nameCursor.latchBIN(); + try { + final long lsn = nameCursor.getCurrentLsn(); + + final LockResult lockResult = handleLocker.nonBlockingLock + (lsn, LockType.READ, true /*jumpAheadOfWaiters*/, + nameDatabase); + + if (lockResult.getLockGrant() == LockGrantType.DENIED) { + nameCursor.getLocker().checkPreempted(null); + throw EnvironmentFailureException.unexpectedState + ("No contention is possible with HandleLocker: " + + DbLsn.getNoFormatString(lsn)); + } + } finally { + nameCursor.releaseBIN(); + } + } + + /** + * Check deferred write settings before writing the MapLN. + * @param db the database represented by this MapLN + */ + public void optionalModifyDbRoot(DatabaseImpl db) + throws DatabaseException { + + if (db.isDeferredWriteMode()) { + return; + } + + modifyDbRoot(db); + } + + /** + * Write the MapLN to disk. + * @param db the database represented by this MapLN + */ + public void modifyDbRoot(DatabaseImpl db) + throws DatabaseException { + + modifyDbRoot(db, DbLsn.NULL_LSN /*ifBeforeLsn*/, true /*mustExist*/); + } + + /** + * Write a MapLN to the log in order to: + * - propagate a root change + * - save per-db utilization information + * - save database config information. + * Any MapLN writes must be done through this method, in order to ensure + * that the root latch is taken, and updates to the rootIN are properly + * safeguarded. See MapN.java for more detail. + * + * @param db the database whose root is held by this MapLN + * + * @param ifBeforeLsn if argument is not NULL_LSN, only do the write if + * this MapLN's current LSN is before isBeforeLSN. + * + * @param mustExist if true, throw DatabaseException if the DB does not + * exist; if false, silently do nothing. + */ + public void modifyDbRoot( + DatabaseImpl db, + long ifBeforeLsn, + boolean mustExist) + throws DatabaseException { + + /* + * Do not write LNs in read-only env. This method is called when + * recovery causes a root split. [#21493] + */ + if (envImpl.isReadOnly() && envImpl.isInInit()) { + return; + } + + if (db.getId().equals(ID_DB_ID) || + db.getId().equals(NAME_DB_ID)) { + envImpl.logMapTreeRoot(); + } else { + DatabaseEntry keyDbt = new DatabaseEntry(db.getId().getBytes()); + + /* + * Retry indefinitely in the face of lock timeouts since the + * lock on the MapLN is only supposed to be held for short + * periods. + */ + while (true) { + Locker idDbLocker = null; + CursorImpl cursor = null; + boolean operationOk = false; + try { + idDbLocker = BasicLocker.createBasicLocker(envImpl); + cursor = new CursorImpl(idDatabase, idDbLocker); + + boolean found = cursor.searchExact(keyDbt, LockType.WRITE); + + if (!found) { + if (mustExist) { + throw new EnvironmentFailureException( + envImpl, + EnvironmentFailureReason.LOG_INTEGRITY, + "Can't find database ID: " + db.getId()); + } + /* Do nothing silently. */ + break; + } + + /* Check BIN LSN while latched. */ + if (ifBeforeLsn == DbLsn.NULL_LSN || + DbLsn.compareTo( + cursor.getCurrentLsn(), ifBeforeLsn) < 0) { + + MapLN mapLN = (MapLN) cursor.getCurrentLN( + true, /*isLatched*/ true/*unlatch*/); + + assert mapLN != null; /* Should be locked. */ + + /* Perform rewrite. */ + RewriteMapLN writeMapLN = new RewriteMapLN(cursor); + mapLN.getDatabase().getTree().withRootLatchedExclusive( + writeMapLN); + + operationOk = true; + } + break; + } catch (LockConflictException e) { + /* Continue loop and retry. */ + } finally { + if (cursor != null) { + cursor.releaseBIN(); + cursor.close(); + } + if (idDbLocker != null) { + idDbLocker.operationEnd(operationOk); + } + } + } + } + } + + private static class RewriteMapLN implements WithRootLatched { + private final CursorImpl cursor; + + RewriteMapLN(CursorImpl cursor) { + this.cursor = cursor; + } + + public IN doWork(@SuppressWarnings("unused") ChildReference root) + throws DatabaseException { + + DatabaseEntry dataDbt = new DatabaseEntry(new byte[0]); + cursor.updateCurrentRecord( + null /*replaceKey*/, dataDbt, null /*expirationInfo*/, + null /*foundData*/, null /*returnNewData*/, + ReplicationContext.NO_REPLICATE); + return null; + } + } + + /** + * In other places (e.g., when write locking a record in ReadOnlyTxn) we + * allow writes to the naming DB on a replica, since we allow both + * replicated and non-replicated DBs and therefore some NameLNs are + * replicated and some are not. Below is the sole check to prevent a + * creation, removal, truncation, or configuration update of a replicated + * DB on a replica. It will throw ReplicaWriteException on a replica if + * this operation would assign a new VLSN. [#20543] + */ + private void checkReplicaWrite(Locker locker, + ReplicationContext repContext) { + if (repContext != null && repContext.mustGenerateVLSN()) { + locker.disallowReplicaWrite(); + } + } + + /** + * Used by lockNameLN to get the rep context, which is needed for calling + * checkReplicaWrite. + */ + interface GetRepContext { + ReplicationContext get(DatabaseImpl dbImpl); + } + + /** + * Thrown by lockNameLN when an incorrect locker was used via auto-commit. + * See Environment.DbNameOperation. A checked exception is used to ensure + * that it is always handled internally and never propagated to the app. + */ + public static class NeedRepLockerException extends Exception {} + + /** + * Helper for database operations. This method positions a cursor + * on the NameLN that represents this database and write locks it. + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + * + * @throws IllegalStateException via + * Environment.remove/rename/truncateDatabase + */ + private NameLockResult lockNameLN(Locker locker, + String databaseName, + String action, + GetRepContext getRepContext) + throws DatabaseNotFoundException, NeedRepLockerException { + + /* + * We have to return both a cursor on the naming tree and a + * reference to the found DatabaseImpl. + */ + NameLockResult result = new NameLockResult(); + + /* Find the existing DatabaseImpl and establish a cursor. */ + result.dbImpl = getDb(locker, databaseName, null, true); + if (result.dbImpl == null) { + throw new DatabaseNotFoundException + ("Attempted to " + action + " non-existent database " + + databaseName); + } + + boolean success = false; + try { + /* Get effective rep context and check for replica write. */ + result.repContext = getRepContext.get(result.dbImpl); + checkReplicaWrite(locker, result.repContext); + + /* + * Check for an incorrect locker created via auto-commit. This + * check is made after we have the DatabaseImpl and can check + * whether it is replicated. See Environment.DbNameOperation. + */ + if (envImpl.isReplicated() && + result.dbImpl.isReplicated() && + locker.getTxnLocker() != null && + locker.getTxnLocker().isAutoTxn() && + !locker.isReplicated()) { + throw new NeedRepLockerException(); + } + + result.nameCursor = new CursorImpl(nameDatabase, locker); + + /* Position the cursor at the specified NameLN. */ + DatabaseEntry key = + new DatabaseEntry(StringUtils.toUTF8(databaseName)); + /* See [#16210]. */ + boolean found = result.nameCursor.searchExact(key, LockType.WRITE); + + if (!found) { + throw new DatabaseNotFoundException( + "Attempted to " + action + " non-existent database " + + databaseName); + } + + /* Call lockAndGetCurrentLN to write lock the nameLN. */ + result.nameLN = (NameLN) result.nameCursor.getCurrentLN( + true, /*isLatched*/ true/*unlatch*/); + assert result.nameLN != null; /* Should be locked. */ + + /* + * Check for open handles after we have the write lock and no other + * transactions can open a handle. After obtaining the write lock, + * other handles may be open only if (1) we preempted their locks, + * or (2) a handle was opened with the same transaction as used for + * this operation. For (1), we mark the handles as preempted to + * cause a DatabasePreemptedException the next time they are + * accessed. For (2), we throw IllegalStateException. + */ + if (locker.getImportunate()) { + /* We preempted the lock of all open DB handles. [#17015] */ + final String msg = + "Database " + databaseName + + " has been forcibly closed in order to apply a" + + " replicated " + action + " operation. This Database" + + " and all associated Cursors must be closed. All" + + " associated Transactions must be aborted."; + for (Database db : result.dbImpl.getReferringHandles()) { + DbInternal.setPreempted(db, databaseName, msg); + } + } else { + /* Disallow open handles for the same transaction. */ + int handleCount = result.dbImpl.getReferringHandleCount(); + if (handleCount > 0) { + throw new IllegalStateException + ("Can't " + action + " database " + databaseName + + ", " + handleCount + " open Database handles exist"); + } + } + success = true; + } finally { + if (!success) { + releaseDb(result.dbImpl); + if (result.nameCursor != null) { + result.nameCursor.releaseBIN(); + result.nameCursor.close(); + } + } + } + + return result; + } + + private static class NameLockResult { + CursorImpl nameCursor; + DatabaseImpl dbImpl; + NameLN nameLN; + ReplicationContext repContext; + } + + /** + * Update the NameLN for the DatabaseImpl when the DatabaseConfig changes. + * + * JE MapLN actually includes the DatabaseImpl information, but it is not + * transactional, so the DatabaseConfig information is stored in + * NameLNLogEntry and replicated. + * + * So when there is a DatabaseConfig changes, we'll update the NameLN for + * the database, which will log a new NameLNLogEntry so that the rep stream + * will transfer it to the replicas and it will be replayed. + * + * @param locker the locker used to update the NameLN + * @param dbName the name of the database whose corresponding NameLN needs + * to be updated + * @param repContext information used while replaying a NameLNLogEntry on + * the replicas, it's null on master + */ + public void updateNameLN(Locker locker, + String dbName, + final DbOpReplicationContext repContext) + throws LockConflictException { + + assert dbName != null; + + /* Find and write lock on the NameLN. */ + final NameLockResult result; + try { + result = lockNameLN + (locker, dbName, "updateConfig", new GetRepContext() { + + public ReplicationContext get(DatabaseImpl dbImpl) { + return (repContext != null) ? + repContext : + dbImpl.getOperationRepContext(UPDATE_CONFIG, null); + } + }); + } catch (NeedRepLockerException e) { + /* Should never happen; db is known when locker is created. */ + throw EnvironmentFailureException.unexpectedException(envImpl, e); + } + + final CursorImpl nameCursor = result.nameCursor; + final DatabaseImpl dbImpl = result.dbImpl; + final ReplicationContext useRepContext = result.repContext; + try { + + /* Log a NameLN. */ + DatabaseEntry dataDbt = new DatabaseEntry(new byte[0]); + nameCursor.updateCurrentRecord( + null /*replaceKey*/, dataDbt, null /*expirationInfo*/, + null /*foundData*/, null /*returnNewData*/, useRepContext); + } finally { + releaseDb(dbImpl); + nameCursor.releaseBIN(); + nameCursor.close(); + } + } + + /** + * Rename the database by creating a new NameLN and deleting the old one. + * + * @return the database handle of the impacted database + * + * @throws DatabaseNotFoundException if the operation fails because the + * given DB name is not found. + */ + private DatabaseImpl doRenameDb(Locker locker, + String databaseName, + String newName, + NameLN replicatedLN, + final DbOpReplicationContext repContext) + throws DatabaseNotFoundException, NeedRepLockerException { + + final NameLockResult result = lockNameLN + (locker, databaseName, "rename", new GetRepContext() { + + public ReplicationContext get(DatabaseImpl dbImpl) { + return (repContext != null) ? + repContext : + dbImpl.getOperationRepContext(RENAME); + } + }); + + final CursorImpl nameCursor = result.nameCursor; + final DatabaseImpl dbImpl = result.dbImpl; + final ReplicationContext useRepContext = result.repContext; + try { + + /* + * Rename simply deletes the one entry in the naming tree and + * replaces it with a new one. Remove the oldName->dbId entry and + * insert newName->dbId. + */ + nameCursor.deleteCurrentRecord(ReplicationContext.NO_REPLICATE); + final NameLN useLN = + (replicatedLN != null) ? + replicatedLN : + new NameLN(dbImpl.getId()); + /* + * Reset cursor to remove old BIN before calling insertRecord. + * [#16280] + */ + nameCursor.reset(); + + nameCursor.insertRecord( + StringUtils.toUTF8(newName), useLN, + false /*blindInsertion*/, useRepContext); + + dbImpl.setDebugDatabaseName(newName); + return dbImpl; + } finally { + releaseDb(dbImpl); + nameCursor.close(); + } + } + + /** + * Stand alone and Master invocations. + * + * @see #doRenameDb + */ + public DatabaseImpl dbRename(Locker locker, + String databaseName, + String newName) + throws DatabaseNotFoundException, NeedRepLockerException { + + return doRenameDb(locker, databaseName, newName, null, null); + } + + /** + * Replica invocations. + * + * @see #doRenameDb + */ + public DatabaseImpl renameReplicaDb(Locker locker, + String databaseName, + String newName, + NameLN replicatedLN, + DbOpReplicationContext repContext) + throws DatabaseNotFoundException { + + try { + return doRenameDb(locker, databaseName, newName, replicatedLN, + repContext); + } catch (NeedRepLockerException e) { + /* Should never happen; db is known when locker is created. */ + throw EnvironmentFailureException.unexpectedException(envImpl, e); + } + } + + /** + * Remove the database by deleting the nameLN. + * + * @return a handle to the renamed database + * + * @throws DatabaseNotFoundException if the operation fails because the + * given DB name is not found, or the non-null checkId argument does not + * match the database identified by databaseName. + */ + private DatabaseImpl doRemoveDb(Locker locker, + String databaseName, + DatabaseId checkId, + final DbOpReplicationContext repContext) + throws DatabaseNotFoundException, NeedRepLockerException { + + CursorImpl nameCursor = null; + + final NameLockResult result = lockNameLN + (locker, databaseName, "remove", new GetRepContext() { + + public ReplicationContext get(DatabaseImpl dbImpl) { + return (repContext != null) ? + repContext : + dbImpl.getOperationRepContext(REMOVE); + } + }); + + final ReplicationContext useRepContext = result.repContext; + try { + nameCursor = result.nameCursor; + if (checkId != null && !checkId.equals(result.nameLN.getId())) { + throw new DatabaseNotFoundException + ("ID mismatch: " + databaseName); + } + + /* + * Delete the NameLN. There's no need to mark any Database + * handle invalid, because the handle must be closed when we + * take action and any further use of the handle will re-look + * up the database. + */ + nameCursor.deleteCurrentRecord(useRepContext); + + /* + * Schedule database for final deletion during commit. This + * should be the last action taken, since this will take + * effect immediately for non-txnal lockers. + * + * Do not call releaseDb here on result.dbImpl, since that is + * taken care of by markDeleteAtTxnEnd. + */ + locker.markDeleteAtTxnEnd(result.dbImpl, true); + return result.dbImpl; + } finally { + if (nameCursor != null) { + nameCursor.close(); + } + } + } + + /** + * Stand alone and Master invocations. + * + * @see #doRemoveDb + */ + public DatabaseImpl dbRemove(Locker locker, + String databaseName, + DatabaseId checkId) + throws DatabaseNotFoundException, NeedRepLockerException { + + return doRemoveDb(locker, databaseName, checkId, null); + } + + /** + * Replica invocations. + * + * @see #doRemoveDb + */ + public void removeReplicaDb(Locker locker, + String databaseName, + DatabaseId checkId, + DbOpReplicationContext repContext) + throws DatabaseNotFoundException { + + try { + doRemoveDb(locker, databaseName, checkId, repContext); + } catch (NeedRepLockerException e) { + /* Should never happen; db is known when locker is created. */ + throw EnvironmentFailureException.unexpectedException(envImpl, e); + } + } + + /** + * To truncate, remove the database named by databaseName and + * create a new database in its place. + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + * + * @param returnCount if true, must return the count of records in the + * database, which can be an expensive option. + * + * @return the record count, oldDb and newDb packaged in a TruncateDbResult + * + * @throws DatabaseNotFoundException if the operation fails because the + * given DB name is not found. + */ + public TruncateDbResult + doTruncateDb(Locker locker, + String databaseName, + boolean returnCount, + NameLN replicatedLN, + final DbOpReplicationContext repContext) + throws DatabaseNotFoundException, NeedRepLockerException { + + assert((replicatedLN != null) ? (repContext != null) : true); + + final NameLockResult result = lockNameLN + (locker, databaseName, "truncate", new GetRepContext() { + + public ReplicationContext get(DatabaseImpl dbImpl) { + return (repContext != null) ? + repContext : + dbImpl.getOperationRepContext(TRUNCATE, dbImpl.getId()); + } + }); + + final CursorImpl nameCursor = result.nameCursor; + final ReplicationContext useRepContext = result.repContext; + try { + /* + * Make a new database with an empty tree. Make the nameLN refer to + * the id of the new database. If this database is replicated, the + * new one should also be replicated, and vice versa. + */ + DatabaseImpl oldDb = result.dbImpl; + final DatabaseId newId = + (replicatedLN != null) ? + replicatedLN.getId() : + new DatabaseId(isReplicatedId(oldDb.getId().getId()) ? + getNextReplicatedDbId() : + getNextLocalDbId()); + + DatabaseImpl newDb = oldDb.cloneDatabase(); + newDb.incrementUseCount(); + newDb.setId(newId); + newDb.setTree(new Tree(newDb)); + + /* + * Insert the new MapLN into the id tree. Do not use a transaction + * on the id database, because we can not hold long term locks on + * the mapLN. + */ + Locker idDbLocker = null; + CursorImpl idCursor = null; + boolean operationOk = false; + try { + idDbLocker = BasicLocker.createBasicLocker(envImpl); + idCursor = new CursorImpl(idDatabase, idDbLocker); + + idCursor.insertRecord( + newId.getBytes() /*key*/, new MapLN(newDb), + false /*blindInsertion*/, ReplicationContext.NO_REPLICATE); + + operationOk = true; + } finally { + if (idCursor != null) { + idCursor.close(); + } + + if (idDbLocker != null) { + idDbLocker.operationEnd(operationOk); + } + } + result.nameLN.setId(newDb.getId()); + + /* If required, count the number of records in the database. */ + final long recordCount = (returnCount ? oldDb.count(0) : 0); + + /* log the nameLN. */ + DatabaseEntry dataDbt = new DatabaseEntry(new byte[0]); + + nameCursor.updateCurrentRecord( + null /*replaceKey*/, dataDbt, null /*expirationInfo*/, + null /*foundData*/, null /*returnNewData*/, + useRepContext); + /* + * Marking the lockers should be the last action, since it + * takes effect immediately for non-txnal lockers. + * + * Do not call releaseDb here on oldDb or newDb, since that is + * taken care of by markDeleteAtTxnEnd. + */ + + /* Schedule old database for deletion if txn commits. */ + locker.markDeleteAtTxnEnd(oldDb, true); + + /* Schedule new database for deletion if txn aborts. */ + locker.markDeleteAtTxnEnd(newDb, false); + + return new TruncateDbResult(oldDb, newDb, recordCount); + } finally { + nameCursor.releaseBIN(); + nameCursor.close(); + } + } + + /* + * Effectively a struct used to return multiple values of interest. + */ + public static class TruncateDbResult { + public final DatabaseImpl oldDB; + public final DatabaseImpl newDb; + public final long recordCount; + + public TruncateDbResult(DatabaseImpl oldDB, + DatabaseImpl newDb, + long recordCount) { + this.oldDB = oldDB; + this.newDb = newDb; + this.recordCount = recordCount; + } + } + + /** + * @see #doTruncateDb + */ + public TruncateDbResult truncate(Locker locker, + String databaseName, + boolean returnCount) + throws DatabaseNotFoundException, NeedRepLockerException { + + return doTruncateDb(locker, databaseName, returnCount, null, null); + } + + /** + * @see #doTruncateDb + */ + public TruncateDbResult truncateReplicaDb(Locker locker, + String databaseName, + boolean returnCount, + NameLN replicatedLN, + DbOpReplicationContext repContext) + throws DatabaseNotFoundException { + + try { + return doTruncateDb(locker, databaseName, returnCount, + replicatedLN, repContext); + } catch (NeedRepLockerException e) { + /* Should never happen; db is known when locker is created. */ + throw EnvironmentFailureException.unexpectedException(envImpl, e); + } + } + + /* + * Remove the mapLN that refers to this database. + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + */ + void deleteMapLN(DatabaseId id) + throws DatabaseException { + + /* + * Retry indefinitely in the face of lock timeouts since the lock on + * the MapLN is only supposed to be held for short periods. + */ + boolean done = false; + while (!done) { + Locker idDbLocker = null; + CursorImpl idCursor = null; + boolean operationOk = false; + try { + idDbLocker = BasicLocker.createBasicLocker(envImpl); + idCursor = new CursorImpl(idDatabase, idDbLocker); + + boolean found = idCursor.searchExact( + new DatabaseEntry(id.getBytes()), LockType.WRITE); + + if (found) { + + /* + * If the database is in use by an internal JE operation + * (checkpointing, cleaning, etc), release the lock (done + * in the finally block) and retry. [#15805] + */ + MapLN mapLN = (MapLN) idCursor.getCurrentLN( + true, /*isLatched*/ true/*unlatch*/); + + assert mapLN != null; + DatabaseImpl dbImpl = mapLN.getDatabase(); + + if (!dbImpl.isInUseDuringDbRemove()) { + idCursor.deleteCurrentRecord( + ReplicationContext.NO_REPLICATE); + done = true; + } + } else { + /* MapLN does not exist. */ + done = true; + } + operationOk = true; + } catch (LockConflictException e) { + /* Continue loop and retry. */ + } finally { + if (idCursor != null) { + /* searchExact leaves BIN latched. */ + idCursor.releaseBIN(); + idCursor.close(); + } + if (idDbLocker != null) { + idDbLocker.operationEnd(operationOk); + } + } + } + } + + /** + * Get a database object given a database name. Increments the use count + * of the given DB to prevent it from being evicted. releaseDb should be + * called when the returned object is no longer used, to allow it to be + * evicted. See DatabaseImpl.isInUse. + * [#13415] + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + * + * @param nameLocker is used to access the NameLN. As always, a NullTxn + * is used to access the MapLN. + * @param databaseName target database + * @return null if database doesn't exist + */ + public DatabaseImpl getDb(Locker nameLocker, + String databaseName, + HandleLocker handleLocker, + boolean writeLock) + throws DatabaseException { + + /* Use count is not incremented for idDatabase and nameDatabase. */ + if (databaseName.equals(DbType.ID.getInternalName())) { + return idDatabase; + } else if (databaseName.equals(DbType.NAME.getInternalName())) { + return nameDatabase; + } + + /* + * Search the nameDatabase tree for the NameLn for this name. + */ + CursorImpl nameCursor = null; + DatabaseId id = null; + + try { + nameCursor = new CursorImpl(nameDatabase, nameLocker); + DatabaseEntry keyDbt = + new DatabaseEntry(StringUtils.toUTF8(databaseName)); + + boolean found; + if (writeLock) { + found = nameCursor.searchExact(keyDbt, LockType.WRITE); + } else { + found = nameCursor.searchExact(keyDbt, LockType.READ); + } + + if (found) { + NameLN nameLN = (NameLN) nameCursor.getCurrentLN( + true, /*isLatched*/ true/*unlatch*/); + assert nameLN != null; /* Should be locked. */ + id = nameLN.getId(); + + /* Record handle lock. */ + if (handleLocker != null) { + acquireHandleLock(nameCursor, handleLocker); + } + } + } finally { + if (nameCursor != null) { + nameCursor.releaseBIN(); + nameCursor.close(); + } + } + + /* + * Now search the id tree. + */ + if (id == null) { + return null; + } + return getDb(id, -1, databaseName); + } + + /** + * Get a database object based on an id only. Used by recovery, cleaning + * and other clients who have an id in hand, and don't have a resident + * node, to find the matching database for a given log entry. + */ + public DatabaseImpl getDb(DatabaseId dbId) + throws DatabaseException { + + return getDb(dbId, -1); + } + + /** + * Get a database object based on an id only. Specify the lock timeout to + * use, or -1 to use the default timeout. A timeout should normally only + * be specified by daemons with their own timeout configuration. public + * for unit tests. + */ + public DatabaseImpl getDb(DatabaseId dbId, long lockTimeout) + throws DatabaseException { + + return getDb(dbId, lockTimeout, (String) null); + } + + /** + * Get a database object based on an id only, caching the id-db mapping in + * the given map. + */ + public DatabaseImpl getDb(DatabaseId dbId, + long lockTimeout, + Map dbCache) + throws DatabaseException { + + if (dbCache.containsKey(dbId)) { + return dbCache.get(dbId); + } + DatabaseImpl db = getDb(dbId, lockTimeout, (String) null); + dbCache.put(dbId, db); + return db; + } + + /** + * Get a database object based on an id only. Specify the lock timeout to + * use, or -1 to use the default timeout. A timeout should normally only + * be specified by daemons with their own timeout configuration. public + * for unit tests. + * + * Increments the use count of the given DB to prevent it from being + * evicted. releaseDb should be called when the returned object is no + * longer used, to allow it to be evicted. See DatabaseImpl.isInUse. + * [#13415] + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + */ + public DatabaseImpl getDb(DatabaseId dbId, + long lockTimeout, + String dbNameIfAvailable) + throws DatabaseException { + + if (dbId.equals(idDatabase.getId())) { + /* We're looking for the id database itself. */ + return idDatabase; + } else if (dbId.equals(nameDatabase.getId())) { + /* We're looking for the name database itself. */ + return nameDatabase; + } else { + /* Scan the tree for this db. */ + DatabaseImpl foundDbImpl = null; + + /* + * Retry indefinitely in the face of lock timeouts. Deadlocks may + * be due to conflicts with modifyDbRoot. + */ + while (true) { + Locker locker = null; + CursorImpl idCursor = null; + boolean operationOk = false; + try { + locker = BasicLocker.createBasicLocker(envImpl); + if (lockTimeout != -1) { + locker.setLockTimeout(lockTimeout); + } + idCursor = new CursorImpl(idDatabase, locker); + DatabaseEntry keyDbt = new DatabaseEntry(dbId.getBytes()); + + boolean found = idCursor.searchExact(keyDbt, LockType.READ); + + if (found) { + MapLN mapLN = (MapLN) idCursor.getCurrentLN( + true, /*isLatched*/ true /*unlatch*/); + assert mapLN != null; /* Should be locked. */ + foundDbImpl = mapLN.getDatabase(); + /* Increment DB use count with lock held. */ + foundDbImpl.incrementUseCount(); + } + operationOk = true; + break; + } catch (LockConflictException e) { + /* Continue loop and retry. */ + } finally { + if (idCursor != null) { + idCursor.releaseBIN(); + idCursor.close(); + } + if (locker != null) { + locker.operationEnd(operationOk); + } + } + } + + /* + * Set the debugging name in the databaseImpl. + */ + setDebugNameForDatabaseImpl(foundDbImpl, dbNameIfAvailable); + + return foundDbImpl; + } + } + + /** + * Decrements the use count of the given DB, allowing it to be evicted if + * the use count reaches zero. Must be called to release a DatabaseImpl + * that was returned by a method in this class. See DatabaseImpl.isInUse. + * [#13415] + */ + public void releaseDb(DatabaseImpl db) { + /* Use count is not incremented for idDatabase and nameDatabase. */ + if (db != null && + db != idDatabase && + db != nameDatabase) { + db.decrementUseCount(); + } + } + + /** + * Calls releaseDb for all DBs in the given map of DatabaseId to + * DatabaseImpl. See getDb(DatabaseId, long, Map). [#13415] + */ + public void releaseDbs(Map dbCache) { + if (dbCache != null) { + for (DatabaseImpl databaseImpl : dbCache.values()) { + releaseDb(databaseImpl); + } + } + } + + /* + * We need to cache a database name in the dbImpl for later use in error + * messages, when it may be unsafe to walk the mapping tree. Finding a + * name by id is slow, so minimize the number of times we must set the + * debug name. The debug name will only be uninitialized when an existing + * databaseImpl is faulted in. + */ + private void setDebugNameForDatabaseImpl(DatabaseImpl dbImpl, + String dbName) + throws DatabaseException { + + if (dbImpl != null) { + if (dbName != null) { + /* If a name was provided, use that. */ + dbImpl.setDebugDatabaseName(dbName); + } else { + + /* + * Only worry about searching for a name if the name is + * uninitialized. Only search after recovery had finished + * setting up the tree. + * + * Only do name lookup if it will be fairly fast. Debugging + * info isn't important enough to cause long lookups during log + * cleaning, for example. [#21015] + */ + if (envImpl.isValid() && + !dbImpl.isDebugNameAvailable() && + getFastNameLookup()) { + dbImpl.setDebugDatabaseName(getDbName(dbImpl.getId())); + } + } + } + } + + /** + * Rebuild the IN list after recovery. + */ + public void rebuildINListMapDb() + throws DatabaseException { + + idDatabase.getTree().rebuildINList(); + } + + /** + * Returns true if the naming DB has a fairly small number of names, and + * therefore execution of getDbName will be fairly fast. + */ + private boolean getFastNameLookup() { + return nameDatabase.getTree().getMaxLNs() <= FAST_NAME_LOOKUP_MAX_LNS; + } + + /** + * Return the database name for a given db. Slow, must traverse. Called by + * Database.getName. + * + * Do not evict (do not call CursorImpl.setAllowEviction(true)) during low + * level DbTree operation. [#15176] + */ + public String getDbName(final DatabaseId id) + throws DatabaseException { + + if (id.equals(ID_DB_ID)) { + return DbType.ID.getInternalName(); + } else if (id.equals(NAME_DB_ID)) { + return DbType.NAME.getInternalName(); + } + + class Traversal implements CursorImpl.WithCursor { + String name = null; + + public boolean withCursor(CursorImpl cursor, + DatabaseEntry key, + @SuppressWarnings("unused") + DatabaseEntry data) + throws DatabaseException { + + NameLN nameLN = (NameLN) cursor.lockAndGetCurrentLN( + LockType.NONE); + + if (nameLN != null && nameLN.getId().equals(id)) { + name = StringUtils.fromUTF8(key.getData()); + return false; + } + return true; + } + } + + Traversal traversal = new Traversal(); + + CursorImpl.traverseDbWithCursor( + nameDatabase, LockType.NONE, false /*allowEviction*/, traversal); + + return traversal.name; + } + + /** + * @return a map of database ids to database names (Strings). + */ + public Map getDbNamesAndIds() + throws DatabaseException { + + final Map nameMap = + new HashMap(); + + class Traversal implements CursorImpl.WithCursor { + public boolean withCursor(CursorImpl cursor, + DatabaseEntry key, + @SuppressWarnings("unused") + DatabaseEntry data) + throws DatabaseException { + + NameLN nameLN = (NameLN) cursor.lockAndGetCurrentLN( + LockType.NONE); + DatabaseId id = nameLN.getId(); + nameMap.put(id, StringUtils.fromUTF8(key.getData())); + return true; + } + } + Traversal traversal = new Traversal(); + CursorImpl.traverseDbWithCursor + (nameDatabase, LockType.NONE, false /*allowEviction*/, traversal); + return nameMap; + } + + /** + * @return a list of database names held in the environment, as strings. + */ + public List getDbNames() + throws DatabaseException { + + final List nameList = new ArrayList(); + + CursorImpl.traverseDbWithCursor(nameDatabase, + LockType.NONE, + true /*allowEviction*/, + new CursorImpl.WithCursor() { + public boolean withCursor(@SuppressWarnings("unused") + CursorImpl cursor, + DatabaseEntry key, + @SuppressWarnings("unused") + DatabaseEntry data) + throws DatabaseException { + + String name = StringUtils.fromUTF8(key.getData()); + if (!isReservedDbName(name)) { + nameList.add(name); + } + return true; + } + }); + + return nameList; + } + + /** + * Returns true if the name is a reserved JE database name. + */ + public static boolean isReservedDbName(String name) { + return typeForDbName(name).isInternal(); + } + + /** + * @return the higest level node for this database. + */ + public int getHighestLevel(DatabaseImpl dbImpl) + throws DatabaseException { + + /* The highest level in the map side */ + RootLevel getLevel = new RootLevel(dbImpl); + dbImpl.getTree().withRootLatchedShared(getLevel); + return getLevel.getRootLevel(); + } + + boolean isReplicated() { + return (flags & REPLICATED_BIT) != 0; + } + + void setIsReplicated() { + flags |= REPLICATED_BIT; + } + + /* + * Return true if this environment is converted from standalone to + * replicated. + */ + boolean isRepConverted() { + return (flags & REP_CONVERTED_BIT) != 0; + } + + void setIsRepConverted() { + flags |= REP_CONVERTED_BIT; + } + + public DatabaseImpl getIdDatabaseImpl() { + return idDatabase; + } + + public DatabaseImpl getNameDatabaseImpl() { + return nameDatabase; + } + + boolean getDupsConverted() { + return (flags & DUPS_CONVERTED_BIT) != 0; + } + + void setDupsConverted() { + flags |= DUPS_CONVERTED_BIT; + } + + private boolean getPreserveVLSN() { + return (flags & PRESERVE_VLSN_BIT) != 0; + } + + private void setPreserveVLSN() { + flags |= PRESERVE_VLSN_BIT; + } + + /** + * Returns the initial log version at the time the env was created, or -1 + * if the env was created prior to log version 15. + */ + public int getInitialLogVersion() { + return initialLogVersion; + } + + /** + * Release resources and update memory budget. Should only be called + * when this dbtree is closed and will never be accessed again. + */ + public void close() { + idDatabase.releaseTreeAdminMemory(); + nameDatabase.releaseTreeAdminMemory(); + } + + public long getTreeAdminMemory() { + return idDatabase.getTreeAdminMemory() + + nameDatabase.getTreeAdminMemory(); + } + + /* + * RootLevel lets us fetch the root IN within the root latch. + */ + private static class RootLevel implements WithRootLatched { + private final DatabaseImpl db; + private int rootLevel; + + RootLevel(DatabaseImpl db) { + this.db = db; + rootLevel = 0; + } + + public IN doWork(ChildReference root) + throws DatabaseException { + + if (root == null) { + return null; + } + IN rootIN = (IN) root.fetchTarget(db, null); + rootLevel = rootIN.getLevel(); + return null; + } + + int getRootLevel() { + return rootLevel; + } + } + + /* + * Logging support + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + return + LogUtils.getLongLogSize() + // lastAllocatedLocalDbId + LogUtils.getLongLogSize() + // lastAllocatedReplicatedDbId + idDatabase.getLogSize() + + nameDatabase.getLogSize() + + 1 + // 1 byte of flags + LogUtils.getPackedIntLogSize(initialLogVersion); + //initialLogVersion + } + + /** + * This log entry type is configured to perform marshaling (getLogSize and + * writeToLog) under the write log mutex. Otherwise, the size could change + * in between calls to these two methods as the result of utilizaton + * tracking. + * + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + + /* + * Long format, rather than packed long format, is used for the last + * allocated DB IDs. The IDs, and therefore their packed length, can + * change between the getLogSize and writeToLog calls. Since the root + * is infrequently logged, the simplest solution is to use fixed size + * values. [#18540] + */ + LogUtils.writeLong(logBuffer, lastAllocatedLocalDbId.get()); + LogUtils.writeLong(logBuffer, lastAllocatedReplicatedDbId.get()); + + idDatabase.writeToLog(logBuffer); + nameDatabase.writeToLog(logBuffer); + logBuffer.put(flags); + LogUtils.writePackedInt(logBuffer, initialLogVersion); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + if (entryVersion >= 8) { + lastAllocatedLocalDbId.set(LogUtils.readLong(itemBuffer)); + lastAllocatedReplicatedDbId.set(LogUtils.readLong(itemBuffer)); + } else { + lastAllocatedLocalDbId.set(LogUtils.readInt(itemBuffer)); + if (entryVersion >= 6) { + lastAllocatedReplicatedDbId.set(LogUtils.readInt(itemBuffer)); + } + } + + idDatabase.readFromLog(itemBuffer, entryVersion); // id db + nameDatabase.readFromLog(itemBuffer, entryVersion); // name db + + if (entryVersion >= 6) { + flags = itemBuffer.get(); + } else { + flags = 0; + } + + if (entryVersion >= 15) { + initialLogVersion = LogUtils.readPackedInt(itemBuffer); + } else { + initialLogVersion = -1; + } + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(""); + idDatabase.dumpLog(sb, verbose); + sb.append(""); + nameDatabase.dumpLog(sb, verbose); + sb.append(""); + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(@SuppressWarnings("unused") Loggable other) { + return false; + } + + /* + * For unit test support + */ + + String dumpString(int nSpaces) { + StringBuilder self = new StringBuilder(); + self.append(TreeUtils.indent(nSpaces)); + self.append(""); + self.append('\n'); + self.append(idDatabase.dumpString(nSpaces + 1)); + self.append('\n'); + self.append(nameDatabase.dumpString(nSpaces + 1)); + self.append('\n'); + self.append(""); + return self.toString(); + } + + @Override + public String toString() { + return dumpString(0); + } + + /** + * For debugging. + */ + public void dump() { + idDatabase.getTree().dump(); + nameDatabase.getTree().dump(); + } +} diff --git a/src/com/sleepycat/je/dbi/DbType.java b/src/com/sleepycat/je/dbi/DbType.java new file mode 100644 index 0000000..e985b4f --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbType.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.LN; + +/** + * Classifies all databases as specific internal databases or user databases. + * This can be thought of as a substitute for having DatabaseImpl subclasses + * for different types of databases. It also identifies each internal database + * by name. + */ +public enum DbType { + + ID("_jeIdMap") { + @Override + public boolean mayCreateDeletedLN() { + return false; + } + @Override + public LN createDeletedLN(EnvironmentImpl envImpl) { + throw EnvironmentFailureException.unexpectedState(); + } + @Override + public boolean mayCreateUpdatedLN() { + return false; + } + @Override + public LN createUpdatedLN(EnvironmentImpl envImpl, byte[] newData) { + throw EnvironmentFailureException.unexpectedState(); + } + }, + + NAME("_jeNameMap") { + @Override + public boolean mayCreateDeletedLN() { + return false; + } + @Override + public LN createDeletedLN(EnvironmentImpl envImpl) { + throw EnvironmentFailureException.unexpectedState(); + } + @Override + public boolean mayCreateUpdatedLN() { + return false; + } + @Override + public LN createUpdatedLN(EnvironmentImpl envImpl, byte[] newData) { + throw EnvironmentFailureException.unexpectedState(); + } + }, + + UTILIZATION("_jeUtilization") { + @Override + public LN createDeletedLN(EnvironmentImpl envImpl) { + return FileSummaryLN.makeDeletedLN(); + } + @Override + public boolean mayCreateUpdatedLN() { + return false; + } + @Override + public LN createUpdatedLN(EnvironmentImpl envImpl, byte[] newData) { + throw EnvironmentFailureException.unexpectedState(); + } + }, + + EXPIRATION("_jeExpiration"), + + REP_GROUP("_jeRepGroupDB"), + + VLSN_MAP("_jeVlsnMapDb"), + + SYNC("_jeSyncDb"), + + RESERVED_FILES("_jeReservedFilesDb") { + @Override + public LogEntryType getLogType() { + return LogEntryType.LOG_RESERVED_FILE_LN; + } + }, + + USER(null); + + private final String internalName; + + DbType(String internalName) { + this.internalName = internalName; + } + + /** + * Returns true if this is an internal DB, or false if it is a user DB. + */ + public boolean isInternal() { + return internalName != null; + } + + /** + * Returns the DB name for an internal DB type. + * + * @throws EnvironmentFailureException if this is not an internal DB type. + */ + public String getInternalName() { + if (internalName == null) { + throw EnvironmentFailureException.unexpectedState(); + } + return internalName; + } + + /** + * Returns true if createUpdatedLN may be called. + */ + public boolean mayCreateUpdatedLN() { + return true; + } + + /** + * Creates an updated LN for use in an optimization in + * CursorImpl.putCurrentAlreadyLatchedAndLocked. Without this method it + * would be necessary to fetch the existing LN and call LN.modify. + * + * Does NOT copy the byte array, so after calling this method the array is + * "owned" by the Btree and should not be modified. + * + * @throws EnvironmentFailureException if this is not allowed. + */ + public LN createUpdatedLN(EnvironmentImpl envImpl, byte[] newData) { + return LN.makeLN(envImpl, newData); + } + + /** + * Returns true if createDeletedLN may be called. + */ + public boolean mayCreateDeletedLN() { + return true; + } + + /** + * Creates a deleted LN for use in an optimization in CursorImpl.delete. + * Without this method it would be necessary to fetch the existing LN and + * call LN.delete. + * + * @throws EnvironmentFailureException if this is not allowed. + */ + public LN createDeletedLN(EnvironmentImpl envImpl) { + return LN.makeLN(envImpl, (byte[]) null); + } + + /** + * Returns the LogEntryType for LNs in this DB, or null if the usual user + * LN types should be used. + */ + public LogEntryType getLogType() { + return null; + } +} diff --git a/src/com/sleepycat/je/dbi/DbiStatDefinition.java b/src/com/sleepycat/je/dbi/DbiStatDefinition.java new file mode 100644 index 0000000..6c8a6f1 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DbiStatDefinition.java @@ -0,0 +1,296 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE EnvironmentImpl and MemoryBudget statistics. + */ +public class DbiStatDefinition { + + public static final String MB_GROUP_NAME = "Cache Layout"; + public static final String MB_GROUP_DESC = + "Allocation of resources in the cache."; + + public static final String ENV_GROUP_NAME = "Environment"; + public static final String ENV_GROUP_DESC = + "Miscellaneous environment wide statistics."; + + public static final String THROUGHPUT_GROUP_NAME = "Op"; + public static final String THROUGHPUT_GROUP_DESC = + "Throughput statistics for JE calls."; + + /* The following stat definitions are used in MemoryBudget. */ + public static final String MB_SHARED_CACHE_TOTAL_BYTES_NAME = + "sharedCacheTotalBytes"; + public static final String MB_SHARED_CACHE_TOTAL_BYTES_DESC = + "Total amount of the shared JE main cache in use, in bytes."; + public static final StatDefinition MB_SHARED_CACHE_TOTAL_BYTES = + new StatDefinition( + MB_SHARED_CACHE_TOTAL_BYTES_NAME, + MB_SHARED_CACHE_TOTAL_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_TOTAL_BYTES_NAME = + "cacheTotalBytes"; + public static final String MB_TOTAL_BYTES_DESC = + "Total amount of JE main cache in use, in bytes."; + public static final StatDefinition MB_TOTAL_BYTES = + new StatDefinition( + MB_TOTAL_BYTES_NAME, + MB_TOTAL_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_DATA_BYTES_NAME = + "dataBytes"; + public static final String MB_DATA_BYTES_DESC = + "Amount of JE main cache used for holding data, keys and internal " + + "Btree nodes, in bytes."; + public static final StatDefinition MB_DATA_BYTES = + new StatDefinition( + MB_DATA_BYTES_NAME, + MB_DATA_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_DATA_ADMIN_BYTES_NAME = + "dataAdminBytes"; + public static final String MB_DATA_ADMIN_BYTES_DESC = + "Amount of JE main cache used for holding per-database cleaner " + + "utilization metadata, in bytes."; + public static final StatDefinition MB_DATA_ADMIN_BYTES = + new StatDefinition( + MB_DATA_ADMIN_BYTES_NAME, + MB_DATA_ADMIN_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_DOS_BYTES_NAME = + "DOSBytes"; + public static final String MB_DOS_BYTES_DESC = + "Amount of JE main cache consumed by disk-ordered cursor and " + + "Database.count operations, in bytes."; + public static final StatDefinition MB_DOS_BYTES = + new StatDefinition( + MB_DOS_BYTES_NAME, + MB_DOS_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_ADMIN_BYTES_NAME = + "adminBytes"; + public static final String MB_ADMIN_BYTES_DESC = + "Number of bytes of JE main cache used for cleaner and checkpointer " + + "metadata, in bytes."; + public static final StatDefinition MB_ADMIN_BYTES = + new StatDefinition( + MB_ADMIN_BYTES_NAME, + MB_ADMIN_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String MB_LOCK_BYTES_NAME = + "lockBytes"; + public static final String MB_LOCK_BYTES_DESC = + "Number of bytes of JE cache used for holding locks and transactions," + + " in bytes."; + public static final StatDefinition MB_LOCK_BYTES = + new StatDefinition( + MB_LOCK_BYTES_NAME, + MB_LOCK_BYTES_DESC, + StatType.CUMULATIVE); + + /* The following stat definitions are used in EnvironmentImpl. */ + public static final String ENV_RELATCHES_REQUIRED_NAME = + "btreeRelatchesRequired"; + public static final String ENV_RELATCHES_REQUIRED_DESC = + "Returns the number of btree latch upgrades required while operating " + + "on this Environment. A measurement of contention."; + public static final StatDefinition ENV_RELATCHES_REQUIRED = + new StatDefinition( + ENV_RELATCHES_REQUIRED_NAME, + ENV_RELATCHES_REQUIRED_DESC); + + public static final String ENV_CREATION_TIME_NAME = + "environmentCreationTime"; + public static final String ENV_CREATION_TIME_DESC = + "Returns the time the Environment was created. "; + public static final StatDefinition ENV_CREATION_TIME = + new StatDefinition( + ENV_CREATION_TIME_NAME, + ENV_CREATION_TIME_DESC, + StatType.CUMULATIVE); + + public static final String ENV_BIN_DELTA_GETS_NAME = + "nBinDeltaGet"; + public static final String ENV_BIN_DELTA_GETS_DESC = + "The number of gets performed in BIN deltas"; + public static final StatDefinition ENV_BIN_DELTA_GETS = + new StatDefinition( + ENV_BIN_DELTA_GETS_NAME, + ENV_BIN_DELTA_GETS_DESC); + + public static final String ENV_BIN_DELTA_INSERTS_NAME = + "nBinDeltaInsert"; + public static final String ENV_BIN_DELTA_INSERTS_DESC = + "The number of insertions performed in BIN deltas"; + public static final StatDefinition ENV_BIN_DELTA_INSERTS = + new StatDefinition( + ENV_BIN_DELTA_INSERTS_NAME, + ENV_BIN_DELTA_INSERTS_DESC); + + public static final String ENV_BIN_DELTA_UPDATES_NAME = + "nBinDeltaUpdate"; + public static final String ENV_BIN_DELTA_UPDATES_DESC = + "The number of updates performed in BIN deltas"; + public static final StatDefinition ENV_BIN_DELTA_UPDATES = + new StatDefinition( + ENV_BIN_DELTA_UPDATES_NAME, + ENV_BIN_DELTA_UPDATES_DESC); + + public static final String ENV_BIN_DELTA_DELETES_NAME = + "nBinDeltaDelete"; + public static final String ENV_BIN_DELTA_DELETES_DESC = + "The number of deletions performed in BIN deltas"; + public static final StatDefinition ENV_BIN_DELTA_DELETES = + new StatDefinition( + ENV_BIN_DELTA_DELETES_NAME, + ENV_BIN_DELTA_DELETES_DESC); + + /* The following stat definitions are used for throughput. */ + + public static final String THROUGHPUT_PRI_SEARCH_NAME = + "priSearch"; + public static final String THROUGHPUT_PRI_SEARCH_DESC = + "Number of successful primary DB key search operations."; + public static final StatDefinition THROUGHPUT_PRI_SEARCH = + new StatDefinition( + THROUGHPUT_PRI_SEARCH_NAME, + THROUGHPUT_PRI_SEARCH_DESC); + + public static final String THROUGHPUT_PRI_SEARCH_FAIL_NAME = + "priSearchFail"; + public static final String THROUGHPUT_PRI_SEARCH_FAIL_DESC = + "Number of failed primary DB key search operations."; + public static final StatDefinition THROUGHPUT_PRI_SEARCH_FAIL = + new StatDefinition( + THROUGHPUT_PRI_SEARCH_FAIL_NAME, + THROUGHPUT_PRI_SEARCH_FAIL_DESC); + + public static final String THROUGHPUT_SEC_SEARCH_NAME = + "secSearch"; + public static final String THROUGHPUT_SEC_SEARCH_DESC = + "Number of successful secondary DB key search operations."; + public static final StatDefinition THROUGHPUT_SEC_SEARCH = + new StatDefinition( + THROUGHPUT_SEC_SEARCH_NAME, + THROUGHPUT_SEC_SEARCH_DESC); + + public static final String THROUGHPUT_SEC_SEARCH_FAIL_NAME = + "secSearchFail"; + public static final String THROUGHPUT_SEC_SEARCH_FAIL_DESC = + "Number of failed secondary DB key search operations."; + public static final StatDefinition THROUGHPUT_SEC_SEARCH_FAIL = + new StatDefinition( + THROUGHPUT_SEC_SEARCH_FAIL_NAME, + THROUGHPUT_SEC_SEARCH_FAIL_DESC); + + public static final String THROUGHPUT_PRI_POSITION_NAME = + "priPosition"; + public static final String THROUGHPUT_PRI_POSITION_DESC = + "Number of successful primary DB position operations."; + public static final StatDefinition THROUGHPUT_PRI_POSITION = + new StatDefinition( + THROUGHPUT_PRI_POSITION_NAME, + THROUGHPUT_PRI_POSITION_DESC); + + public static final String THROUGHPUT_SEC_POSITION_NAME = + "secPosition"; + public static final String THROUGHPUT_SEC_POSITION_DESC = + "Number of successful secondary DB position operations."; + public static final StatDefinition THROUGHPUT_SEC_POSITION = + new StatDefinition( + THROUGHPUT_SEC_POSITION_NAME, + THROUGHPUT_SEC_POSITION_DESC); + + public static final String THROUGHPUT_PRI_INSERT_NAME = + "priInsert"; + public static final String THROUGHPUT_PRI_INSERT_DESC = + "Number of successful primary DB insertion operations."; + public static final StatDefinition THROUGHPUT_PRI_INSERT = + new StatDefinition( + THROUGHPUT_PRI_INSERT_NAME, + THROUGHPUT_PRI_INSERT_DESC); + + public static final String THROUGHPUT_PRI_INSERT_FAIL_NAME = + "priInsertFail"; + public static final String THROUGHPUT_PRI_INSERT_FAIL_DESC = + "Number of failed primary DB insertion operations."; + public static final StatDefinition THROUGHPUT_PRI_INSERT_FAIL = + new StatDefinition( + THROUGHPUT_PRI_INSERT_FAIL_NAME, + THROUGHPUT_PRI_INSERT_FAIL_DESC); + + public static final String THROUGHPUT_SEC_INSERT_NAME = + "secInsert"; + public static final String THROUGHPUT_SEC_INSERT_DESC = + "Number of successful secondary DB insertion operations."; + public static final StatDefinition THROUGHPUT_SEC_INSERT = + new StatDefinition( + THROUGHPUT_SEC_INSERT_NAME, + THROUGHPUT_SEC_INSERT_DESC); + + public static final String THROUGHPUT_PRI_UPDATE_NAME = + "priUpdate"; + public static final String THROUGHPUT_PRI_UPDATE_DESC = + "Number of successful primary DB update operations."; + public static final StatDefinition THROUGHPUT_PRI_UPDATE = + new StatDefinition( + THROUGHPUT_PRI_UPDATE_NAME, + THROUGHPUT_PRI_UPDATE_DESC); + + public static final String THROUGHPUT_SEC_UPDATE_NAME = + "secUpdate"; + public static final String THROUGHPUT_SEC_UPDATE_DESC = + "Number of successful secondary DB update operations."; + public static final StatDefinition THROUGHPUT_SEC_UPDATE = + new StatDefinition( + THROUGHPUT_SEC_UPDATE_NAME, + THROUGHPUT_SEC_UPDATE_DESC); + + public static final String THROUGHPUT_PRI_DELETE_NAME = + "priDelete"; + public static final String THROUGHPUT_PRI_DELETE_DESC = + "Number of successful primary DB deletion operations."; + public static final StatDefinition THROUGHPUT_PRI_DELETE = + new StatDefinition( + THROUGHPUT_PRI_DELETE_NAME, + THROUGHPUT_PRI_DELETE_DESC); + + public static final String THROUGHPUT_PRI_DELETE_FAIL_NAME = + "priDeleteFail"; + public static final String THROUGHPUT_PRI_DELETE_FAIL_DESC = + "Number of failed primary DB deletion operations."; + public static final StatDefinition THROUGHPUT_PRI_DELETE_FAIL = + new StatDefinition( + THROUGHPUT_PRI_DELETE_FAIL_NAME, + THROUGHPUT_PRI_DELETE_FAIL_DESC); + + public static final String THROUGHPUT_SEC_DELETE_NAME = + "secDelete"; + public static final String THROUGHPUT_SEC_DELETE_DESC = + "Number of successful secondary DB deletion operations."; + public static final StatDefinition THROUGHPUT_SEC_DELETE = + new StatDefinition( + THROUGHPUT_SEC_DELETE_NAME, + THROUGHPUT_SEC_DELETE_DESC); +} diff --git a/src/com/sleepycat/je/dbi/DiskOrderedCursorImpl.java b/src/com/sleepycat/je/dbi/DiskOrderedCursorImpl.java new file mode 100644 index 0000000..50f6cbe --- /dev/null +++ b/src/com/sleepycat/je/dbi/DiskOrderedCursorImpl.java @@ -0,0 +1,456 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskOrderedCursorConfig; +import com.sleepycat.je.DiskOrderedCursorProducerException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.LN; + +/** + * This class implements the DiskOrderedCursor. When an instance is + * constructed, a Producer Thread is created which runs a DiskOrderedScanner + * against the DiskOrderedCursor's Database. The callback for the + * DiskOrderedScanner takes key/data byte arrays that are passed to it, and + * then place those entries on a BlockingQueue which is shared between the + * Producer Thread and the application thread. When the application calls + * getNext(), it simply takes an entry off the queue and hands it to the + * caller. The entries on the queue are simple KeyAndData structs which hold + * byte[]'s for the key (and optional) data. A special instance of KeyAndData + * is used to indicate that the cursor scan has finished. + * + * The consistency guarantees are documented in the public javadoc for + * DiskOrderedCursor, and are based on the use of DiskOrderedScanner (see its + * javadoc for details). + * + * If the cleaner is operating concurrently with the DiskOrderedScanner, then + * it is possible for a file to be deleted and a not-yet-processed LSN (i.e. + * one which has not yet been returned to the user) might be pointing to that + * deleted file. Therefore, we must disable file deletion (but not cleaner + * operation) during the DOS. + */ +public class DiskOrderedCursorImpl { + + /* + * Simple struct to hold key and data byte arrays being passed through the + * queue. + */ + private static class KeyAndData { + + final int dbIdx; + final byte[] key; + final byte[] data; + + /* Negative value means "in hours", to save queue space. */ + final int expiration; + + /** + * Creates a marker instance, for END_OF_QUEUE. + */ + private KeyAndData() { + this.dbIdx = -1; + this.key = null; + this.data = null; + this.expiration = 0; + } + + private KeyAndData( + int dbIdx, + byte[] key, + byte[] data, + int expiration, + boolean expirationInHours) { + + this.dbIdx = dbIdx; + this.key = key; + this.data = data; + this.expiration = expirationInHours ? (- expiration) : expiration; + } + + private int getDbIdx() { + return dbIdx; + } + + private byte[] getKey() { + return key; + } + + private byte[] getData() { + return data; + } + + private long getExpirationTime() { + if (expiration == 0) { + return 0; + } + if (expiration < 0) { + return TTL.expirationToSystemTime(- expiration, true); + } + return TTL.expirationToSystemTime(expiration, false); + } + } + + /* + * The maximum number of entries that the BlockingQueue will store before + * blocking the producer thread. + */ + private int queueSize = 1000; + + /* Queue.offer() timeout in msec. */ + private int offerTimeout; + + private final boolean keysOnly; + + private final EnvironmentImpl env; + + private final Processor processor; + + private final DiskOrderedScanner scanner; + + private final Thread producer; + + private final BlockingQueue queue; + + /* The special KeyAndData which marks the end of the operation. */ + private final KeyAndData END_OF_QUEUE = new KeyAndData(); + + private final RuntimeException SHUTDOWN_REQUESTED_EXCEPTION = + new RuntimeException("Producer Thread shutdown requested"); + + /* DiskOrderedCursors are initialized as soon as they are created. */ + private boolean closed = false; + + private KeyAndData currentNode = null; + + public DiskOrderedCursorImpl( + final DatabaseImpl[] dbImpls, + final DiskOrderedCursorConfig config) { + + this.env = dbImpls[0].getEnv(); + + DbConfigManager configMgr = env.getConfigManager(); + + this.offerTimeout = configMgr.getDuration( + EnvironmentParams.DOS_PRODUCER_QUEUE_TIMEOUT); + + this.keysOnly = config.getKeysOnly(); + this.queueSize = config.getQueueSize(); + + if (keysOnly) { + for (int i = 0; i < dbImpls.length; ++i) { + if (queueSize < dbImpls[i].getNodeMaxTreeEntries()) { + queueSize = dbImpls[i].getNodeMaxTreeEntries(); + } + } + } + + this.processor = new Processor(); + + this.scanner = new DiskOrderedScanner( + dbImpls, processor, + config.getSerialDBScan(), + config.getBINsOnly(), keysOnly, config.getCountOnly(), + config.getLSNBatchSize(), config.getInternalMemoryLimit(), + config.getDebug()); + + this.queue = new ArrayBlockingQueue(queueSize); + + this.producer = new Thread() { + + public void run() { + try { + scanner.scan( + FileProtector.DISK_ORDERED_CURSOR_NAME, + env.getNodeSequence(). + getNextDiskOrderedCursorId()); + + processor.close(); + + } catch (Throwable T) { + if (T == SHUTDOWN_REQUESTED_EXCEPTION) { + /* Shutdown was requested. Don't rethrow. */ + processor.isClosed = true; + return; + } + + /* The exception is check by the getNext() method of + the consumer code. + */ + processor.setException(T); + + queue.offer(END_OF_QUEUE); + } + } + }; + + this.producer.setName("DiskOrderedCursor Producer Thread for " + + Thread.currentThread()); + this.producer.start(); + } + + private class Processor implements DiskOrderedScanner.RecordProcessor { + + /* + * A place to stash any exception caught by the producer thread so that + * it can be returned to the application. + */ + private Throwable exception; + + private volatile boolean shutdownNow; + + public boolean isClosed = false; // used for unit testing only + + @Override + public void process( + int dbIdx, + byte[] key, + byte[] data, + int expiration, + boolean expirationInHours) { + + checkShutdown(); + + try { + KeyAndData e = new KeyAndData( + dbIdx, key, data, expiration, expirationInHours); + + while (!queue.offer(e, offerTimeout, TimeUnit.MILLISECONDS)) { + checkShutdown(); + } + + } catch (InterruptedException IE) { + setException( + new ThreadInterruptedException(env, IE)); + setShutdown(); + } + } + + @Override + public boolean canProcessWithoutBlocking(int nRecords) { + return queue.remainingCapacity() >= nRecords; + } + + @Override + public int getCapacity() { + return queueSize; + } + + /* + * Called from the producer thread's run() method after there are + * no more records to scan. + */ + void close() { + + try { + if (!queue.offer(END_OF_QUEUE, offerTimeout, + TimeUnit.MILLISECONDS)) { + /* Cursor.close() called, but queue was not drained. */ + setException(SHUTDOWN_REQUESTED_EXCEPTION. + fillInStackTrace()); + setShutdown(); + } + + isClosed = true; + + } catch (InterruptedException IE) { + setException( + new ThreadInterruptedException(env, IE)); + setShutdown(); + } + } + + /* + * Called by producer code only. + */ + void setException(Throwable t) { + exception = t; + } + + /* + * Called by consumer thread's getNext() method. + */ + private Throwable getException() { + return exception; + } + + /* + * Called by by both producer and consumer code. + */ + private void setShutdown() { + shutdownNow = true; + } + + /* + * Called by producer code only. + */ + @Override + public void checkShutdown() { + if (shutdownNow) { + throw SHUTDOWN_REQUESTED_EXCEPTION; + } + } + } + + /* + * For unit testing only + */ + public boolean isProcessorClosed() { + return processor.isClosed; + } + + public synchronized boolean isClosed() { + return closed; + } + + public synchronized void close() { + if (closed) { + return; + } + + /* Tell Producer Thread to die if it hasn't already. */ + processor.setShutdown(); + + closed = true; + } + + public void checkEnv() { + env.checkIfInvalid(); + } + + private OperationResult setData( + final DatabaseEntry foundKey, + final DatabaseEntry foundData) { + + if (foundKey != null) { + LN.setEntry(foundKey, currentNode.getKey()); + } + if (foundData != null) { + LN.setEntry(foundData, currentNode.getData()); + } + return DbInternal.makeResult(currentNode.getExpirationTime()); + } + + public synchronized OperationResult getCurrent( + final DatabaseEntry foundKey, + final DatabaseEntry foundData) { + + if (closed) { + throw new IllegalStateException("Not initialized"); + } + + if (currentNode == END_OF_QUEUE) { + return null; + } + + return setData(foundKey, foundData); + } + + public int getCurrDb() { + + if (closed) { + throw new IllegalStateException("Not initialized"); + } + + if (currentNode == END_OF_QUEUE) { + return -1; + } + + return currentNode.getDbIdx(); + } + + public synchronized OperationResult getNext( + final DatabaseEntry foundKey, + final DatabaseEntry foundData) { + + if (closed) { + throw new IllegalStateException("Not initialized"); + } + + /* + * If null was returned earlier, do not enter loop below to avoid a + * hang. [#21282] + */ + if (currentNode == END_OF_QUEUE) { + return null; + } + + try { + + /* + * Poll in a loop in case the producer thread throws an exception + * and can't put END_OF_QUEUE on the queue because of an + * InterruptedException. The presence of an exception is the last + * resort to make sure that getNext actually returns to the user. + */ + do { + currentNode = queue.poll(1, TimeUnit.SECONDS); + if (processor.getException() != null) { + break; + } + } while (currentNode == null); + + } catch (InterruptedException IE) { + currentNode = END_OF_QUEUE; + throw new ThreadInterruptedException(env, IE); + } + + if (processor.getException() != null) { + throw new DiskOrderedCursorProducerException( + "Producer Thread Failure", processor.getException()); + } + + if (currentNode == END_OF_QUEUE) { + return null; + } + + return setData(foundKey, foundData); + } + + /** + * For unit testing only + */ + int freeQueueSlots() { + return queue.remainingCapacity(); + } + + /* + * For unit testing only. + */ + long getNumLsns() { + return scanner.getNumLsns(); + } + + /* + * For unit testing only. + */ + DiskOrderedScanner getScanner() { + return scanner; + } + + /** + * For testing and other internal use. + */ + public int getNScannerIterations() { + return scanner.getNIterations(); + } +} diff --git a/src/com/sleepycat/je/dbi/DiskOrderedScanner.java b/src/com/sleepycat/je/dbi/DiskOrderedScanner.java new file mode 100644 index 0000000..732a171 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DiskOrderedScanner.java @@ -0,0 +1,2324 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Map; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.cleaner.DbFileSummary; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.OldBINDelta; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * Provides an enumeration of all key/data pairs in a database, striving to + * fetch in disk order. + * + * Unlike SortedLSNTreeWalker, for which the primary use case is preload, this + * class notifies the callback while holding a latch only if that can be done + * without blocking (e.g., when the callback can buffer the data without + * blocking). In other words, while holding a latch, the callback will not be + * notified if it might block. This is appropriate for the DOS + * (DiskOrderedCursor) use case, since the callback will block if the DOS queue + * is full, and the user's consumer thread may not empty the queue as quickly + * as it can be filled by the producer thread. If the callback were allowed to + * block while a latch is held, this would block other threads attempting to + * access the database, including JE internal threads, which would have a very + * detrimental impact. + * + * Algorithm + * ========= + * + * Terminology + * ----------- + * callback: object implementing the RecordProcessor interface + * process: invoking the callback with a key-data pair + * iteration: top level iteration consisting of phase I and II + * phase I: accumulate LSNs + * phase II: sort, fetch and process LSNs + * + * Phase I and II + * -------------- + * To avoid processing resident nodes (invoking the callback with a latch + * held), a non-recursive algorithm is used. Instead of recursively + * accumulating LSNs in a depth-first iteration of the tree (like the + * SortedLSNTreeWalker algorithm), level 2 INs are traversed in phase I and + * LSNs are accumulated for LNs or BINs (more on this below). When the memory + * or LSN batch size limit is exceeded, phase I ends and all tree latches are + * released. During phase II the previously accumulated LSNs are fetched and + * the callback is invoked for each key or key-data pair. Since no latches are + * held, it is permissible for the callback to block. + * + * One iteration of phase I and II processes some subset of the database. + * Since INs are traversed in tree order in phase I, this subset is described + * by a range of keys. When performing the next iteration, the IN traversal is + * restarted at the highest key that was processed by the previous iteration. + * The previous highest key is used to avoid duplication of entries, since some + * overlap between iterations may occur. + * + * LN and BIN modes + * ---------------- + * As mentioned above, we accumulate LSNs for either LNs or BINs. The BIN + * accumulation mode provides an optimization for key-only traversals and for + * all traversals of duplicate DBs (in a dup DB, the data is included in the + * key). In these cases we never need to fetch the LN, so we can sort and + * fetch the BIN LSNs instead. This supports at least some types of traversals + * that are efficient when all BINs are not in the JE cache. + * + * We must only accumulate LN or BIN LSNs, never both, and never the LSNs of + * other INs (above level 1). If we broke this rule, there would be no way to + * constrain memory usage in our non-recursive approach, since we could not + * easily predict in advance how much memory would be needed to fetch the + * nested nodes. Even if we were able predict the memory needed, it would be + * of little advantage to sort and fetch a small number of higher level nodes, + * only to accumulate the LSNs of their descendants (which are much larger in + * number). The much smaller number of higher level nodes would likely be + * fetched via random IO anyway, in a large data set anyway. + * + * The above justification also applies to the algorithm we use in LN mode, in + * which we accumulate and fetch only LN LSNs. In this mode we always fetch + * BINs explicitly (not in LSN sorted order), if they are not resident, for the + * reasons stated above. + * + * Furthermore, in BIN mode we must account for BIN-deltas. Phase I must keep + * a copy any BIN-deltas encountered in the cache. And phase II must make two + * passes for the accumulated LSNs: one pass to load the deltas and another to + * load the full BINs and merge the previously loaded deltas. Unfortunately + * we must budget memory for the deltas during phase I; since most BIN LSNs are + * for deltas, not full BINs, we assume that we will need to temporarily save a + * delta for each LSN. This two pass approach is not like the recursive + * algorithm we rejected above, however, in two respects: 1) we know in advance + * (roughly anyway) how much memory we will need for both passes, and 2) the + * number of LSNs fetched in each pass is roughly the same. + * + * Data Lag + * -------- + * In phase I, as an exception to what was said above, we sometimes process + * nodes that are resident in the Btree (in the JE cache) if this is possible + * without blocking. The primary intention of this is to provide more recent + * data to the callback. When accumulating BINs, if the BIN is dirty then + * fetching its LSN later means that some recently written LNs will not be + * included. Therefore, if the callback would not block, we process the keys + * in a dirty BIN during phase I. Likewise, when accumulating LNs in a + * deferred-write database, we process dirty LNs if the callback would not + * block. When accumulating LN LSNs for a non-deferred-write database, we can + * go further and process all resident LNs, as long as the callback would not + * block, since we know that no LNs are dirty. + * + * In spite of our attempt to process resident nodes, we may not be able to + * process all of them if doing so would cause the callback to block. When we + * can't process a dirty, resident node, the information added (new, deleted or + * updated records) since the node was last flushed will not be visible to the + * callback. + * + * In other words, the data presented to the callback may lag back to the time + * of the last checkpoint. It cannot lag further back than the last + * checkpoint, because: 1) the scan doesn't accumulate LSNs any higher than the + * BIN level, and 2) checkpoints flush all dirty BINs. For a DOS, the user may + * decrease the likelihood of stale data by increasing the DOS queue size, + * decreasing the LSN batch size, decreasing the memory limit, or performing a + * checkpoint immediately before the start of the scan. Even so, it may be + * impossible to guarantee that all records written at the start of the scan + * are visible to the callback. + */ +public class DiskOrderedScanner { + + /** + * Interface implemented by the callback. + */ + interface RecordProcessor { + + /** + * Process a key-data pair, in user format (dup DB keys are already + * split into key and data). + * + * @param key always non-null. + * @param data is null only in keys-only mode. + */ + void process( + int dbIdx, + byte[] key, + byte[] data, + int expiration, + boolean expirationInHours); + + /** + * Returns whether process() can be called nRecords times, immediately + * after calling this method, without any possibility of blocking. + * For example, with DOS this method returns true if the DOS queue has + * nRecords or more remaining capacity. + */ + boolean canProcessWithoutBlocking(int nRecords); + + int getCapacity(); + + void checkShutdown(); + } + + /* + * + */ + private static class DBContext { + + final int dbIdx; + + final DatabaseImpl dbImpl; + + final Map dbFileSummaries; + + boolean done = false; + + byte[] prevEndingKey = null; + byte[] newEndingKey = null; + + long lastBinLsn = DbLsn.NULL_LSN; + boolean safeToUseCachedDelta = false; + + IN parent = null; + boolean parentIsLatched; + int pidx = 0; + long plsn; + byte[] pkey; + + boolean checkLevel2Keys = true; + + byte[] binKey; + boolean reuseBin = false; + + DBContext(int dbIdx, DatabaseImpl db) { + this.dbIdx = dbIdx; + dbImpl = db; + dbFileSummaries = dbImpl.cloneDbFileSummaries(); + } + } + + /* + * WeakBinRefs are used to reduce the memory consumption for BIN deltas + * that are found in the je cache during phase 1. In an older implementation + * of DOS all such deltas were copied locally and the copies retained + * until phase 2b, when they would be merged with their associated full + * bins. Obviously this could consume a lot of memory. + * + * In the current implementation, dirty deltas are still treated the old + * way, i.e. copied (see below for a justification). But for clean deltas, + * a WeakBinRef is created when, during phase 1, such a delta is found in + * the je cache (the WeakBinRef is created while the delta is latched). At + * creation time, the WeakBinRef stores (a) a WeakReference to the BIN obj, + * (b) the lsn found in the parent slot for this bin (the on-disk image + * pointed to by this lsn must be the same as the latched, in-memory image, + * given that we use WeakBinRefs for clean deltas only) (c) the lsn of + * the full bin associated with this bin delta (copied out from the delta + * itself), and (d) the average size of deltas for the database and log + * file that the delta belongs to. + * + * DiskOrderedScanner.binDeltas is an ArrayList of Object refs, pointing + * to either WeakBinRefs (for clean deltas) or BINs (for copies of dirty + * deltas). The list is built during phase 1 and is used during phase 2b + * to merge the referenced deltas with their associated full BINs. The + * list is first sorted by full-bin LSN so that the full bins are fetched + * in disk order. + * + * Shortly after a WeakBinRef is created, its associated bin gets unlatched + * and remains unlatched until the WeakBinRef is used in phase 2b. As a + * result, anything can happen to the bin in between. The bin may be evicted + * and then garbage-collected, or it may be converted to a full BIN, split, + * logged, and mutated to a delta again. If the BIN obj gets GC-ed, + * this.get() will be null when we process this WeakBinRef in phase 2b. + * In this case, to avoid doing one or two random I/Os in the middle of + * sequential I/Os, this.binLsn is saved in a set of "deferred LSNs" + * (see below) to be processed in a subsequent iteration. If the BIN obj + * is still accessible, we compare its current full-bin LSN with the one + * saved in this.fullBinLsn. If they are different, we again add + * this.binLsn to deferred-LSNs set. Again this is done to avoid + * disturbing the sequential I/O (we could use the current full-bin LSN to + * merge the delta with the correct full bin, but this would require a + * random I/O), but also because the bin could have split, and there is no + * easy way to find the other bin(s) where slots from this bin were moved + * to. Note that detecting splits via comparing the 2 full-bin LSNs is a + * conservative approach that relies on splits being logged immediately. + * + * WeakReferences are used to ref cached clean deltas in order to avoid + * counting the size of the delta in the DOS budget or having to pin the + * bin for a long period of time. Of course, if this.binLsn needs to be + * deferred for the reasons mentioned above, then an average delta size will + * be added to the memory usage for the DOS as well as the je cache. + * + * What about dirty cached deltas? We could be doing the same as for clean + * deltas. However, for dirty deltas, their unsaved updates could have + * happened before the start of the DOS and we would loose these updates + * if instead of processing the original bin delta, we have to fetch + * this.binLsn from disk. So, we would be violating the DOS contract that + * says that the returned data are not older than the start of the DOS. + */ + public static class WeakBinRef extends WeakReference { + + final long binLsn; + final long fullBinLsn; + final int memSize; + + /* For Sizeof util program */ + public WeakBinRef() { + super(null); + binLsn = 0; + fullBinLsn = 0; + memSize = 0; + } + + WeakBinRef( + DiskOrderedScanner scanner, + DBContext ctx, + long lsn, + BIN delta) { + + super(delta); + binLsn = lsn; + fullBinLsn = delta.getLastFullLsn(); + + assert(lsn != delta.getLastFullLsn()); + + if (lsn != delta.getLastFullLsn()) { + memSize = getDeltaMemSize(ctx, lsn); + } else { + memSize = 0; + } + } + } + + public static class OffHeapBinRef { + + final int ohBinId; + final long binLsn; + final long fullBinLsn; + final int memSize; + + /* For Sizeof util program */ + public OffHeapBinRef() { + ohBinId = 0; + binLsn = 0; + fullBinLsn = 0; + memSize = 0; + } + + OffHeapBinRef( + DiskOrderedScanner scanner, + DBContext ctx, + long binLsn, + long fullBinLsn, + int ohBinId) { + + this.ohBinId = ohBinId; + this.binLsn = binLsn; + this.fullBinLsn = fullBinLsn; + memSize = getDeltaMemSize(ctx, binLsn); + } + } + + /* + * A DeferredLsnsBatch obj stores a set of lsns whose processing cannot be + * done in the current iteration, and as a result, must be deferred until + * one of the subsequent iterations. + * + * The DeferredLsnsBatch obj also stores the total (approximate) memory + * that will be consumed when, during a subsequent phase 1a, for each bin + * pointed-to by these lsns, the bin is fetched from disk and, if it is a + * delta, stored in memory until it is merged with its associated full bin + * (which is done in phase 2b). + * + * Given that we cannot exceed the DOS memory budget during each iteration, + * no more LSNs are added to a DeferredLsnsBatch once its memoryUsage gets + * >= memoryLimit. Instead, a new DeferredLsnsBatch is created and added + * at the tail of the DiskOrderedScanner.deferredLsns queue. Only the batch + * at the head of the deferredLsns queue can be processed during each + * iteration. So, if we have N DeferredLsnsBatches in the queue, the next + * N iteration will process only deferred lsns (no phase 1 will be done + * during these N iterations). + */ + public static class DeferredLsnsBatch { + + /* + * Fixed overhead for adding an lsn to this batch + */ + final static int LSN_MEM_OVERHEAD = + (MemoryBudget.HASHSET_ENTRY_OVERHEAD + + MemoryBudget.LONG_OVERHEAD); + + final HashSet lsns; + long memoryUsage = 0; + + /* For Sizeof util program */ + public DeferredLsnsBatch() { + this.lsns = new HashSet<>(); + memoryUsage = 0; + } + + DeferredLsnsBatch(DiskOrderedScanner scanner) { + this.lsns = new HashSet<>(); + scanner.addGlobalMemory(SIZEOF_DeferredLsnsBatch); + memoryUsage += SIZEOF_DeferredLsnsBatch; + } + + void free(DiskOrderedScanner scanner) { + scanner.addGlobalMemory(-SIZEOF_DeferredLsnsBatch); + memoryUsage -= SIZEOF_DeferredLsnsBatch; + assert(memoryUsage == 0); + } + + boolean containsLsn(long lsn) { + return lsns.contains(lsn); + } + + /* + * Called during phase 2b, when a WeakBinRef or OffHeapBinRef has to + * be deferred. + */ + boolean addLsn(DiskOrderedScanner scanner, long lsn, int memSize) { + + this.lsns.add(lsn); + + int currentMemConsumption = LSN_MEM_OVERHEAD; + scanner.addGlobalMemory(currentMemConsumption); + + /* + * ref.memSize is the memory that will be needed during a + * subsequent phase 2a to store the delta fetched via ref.binLsn, + * if ref.binLsn is indeed pointing to a delta; 0 if ref.binLsn + * points to a full bin. + */ + int futureMemConsumption = memSize; + + /* For storing ref.binLsn in the lsns array created in phase 2a. */ + futureMemConsumption += 8; + + /* + * For the DeferredDeltaRef created in phase 2a to reference the + * delta fetched via ref.binLsn. + */ + futureMemConsumption += SIZEOF_DeferredDeltaRef; + + memoryUsage += (currentMemConsumption + futureMemConsumption); + + return scanner.accLimitExceeded(memoryUsage, lsns.size()); + } + + /* + * Called during phase 2a after fetching the bin pointed-to by the + * given lsn. + */ + boolean removeLsn( + DiskOrderedScanner scanner, + long lsn, + int memSize) { + + boolean found = lsns.remove(lsn); + + if (found) { + + scanner.addGlobalMemory(-LSN_MEM_OVERHEAD); + + /* + * We don't really need to subtract from this.memoryUsage here. + * It is done only for sanity checking (the assertion in + * this.free()). + */ + int memDelta = LSN_MEM_OVERHEAD; + memDelta += memSize; + memDelta += 8; + memDelta += SIZEOF_DeferredDeltaRef; + memoryUsage -= memDelta; + } + + return found; + } + + void undoLsn( + DiskOrderedScanner scanner, + long lsn, + int memSize) { + + boolean found = lsns.remove(lsn); + assert(found); + + scanner.addGlobalMemory(-LSN_MEM_OVERHEAD); + + int memDelta = LSN_MEM_OVERHEAD; + memDelta += memSize; + memDelta += 8; + memDelta += SIZEOF_DeferredDeltaRef; + memoryUsage -= memDelta; + } + } + + /* + * Wrapper for a BIN ref. Used to distinguish whether the fetching of a + * bin delta during phase 1a was done via a deferred lsn or not. This info + * is needed during phase 2b: after the delta is merged with its associated + * full bin, we need to know whether it is a deferred bin, in which case + * its records will be processed without checking their keys against + * prevEndingKey. + */ + public static class DeferredDeltaRef { + + final BIN delta; + + /* For Sizeof util program */ + public DeferredDeltaRef() { + delta = null; + } + + DeferredDeltaRef(DiskOrderedScanner scanner, BIN delta) { + this.delta = delta; + scanner.addGlobalMemory(SIZEOF_DeferredDeltaRef); + } + + void free(DiskOrderedScanner scanner) { + scanner.addGlobalMemory(-SIZEOF_DeferredDeltaRef); + } + } + + private static final LogEntryType[] LN_ONLY = new LogEntryType[] { + LogEntryType.LOG_INS_LN /* Any LN type will do. */ + }; + + private static final LogEntryType[] BIN_ONLY = new LogEntryType[] { + LogEntryType.LOG_BIN + }; + + private static final LogEntryType[] BIN_OR_DELTA = new LogEntryType[] { + LogEntryType.LOG_BIN, + LogEntryType.LOG_BIN_DELTA, + LogEntryType.LOG_OLD_BIN_DELTA, + }; + + private final static int SIZEOF_JAVA_REF = + MemoryBudget.OBJECT_ARRAY_ITEM_OVERHEAD; + + private final static int SIZEOF_WeakBinRef = + MemoryBudget.DOS_WEAK_BINREF_OVERHEAD; + + private final static int SIZEOF_OffHeapBinRef = + MemoryBudget.DOS_OFFHEAP_BINREF_OVERHEAD; + + private final static int SIZEOF_DeferredDeltaRef = + MemoryBudget.DOS_DEFERRED_DELTAREF_OVERHEAD; + + private final static int SIZEOF_DeferredLsnsBatch = + MemoryBudget.DOS_DEFERRED_LSN_BATCH_OVERHEAD; + + private final static int ACCUMULATED_MEM_LIMIT = 100000; // bytes + + private final static int SUSPENSION_INTERVAL = 50; // in milliseconds + + private final boolean scanSerial; + + private final boolean countOnly; + private final boolean keysOnly; + private final boolean binsOnly; + + private final long lsnBatchSize; + private final long memoryLimit; + + private final EnvironmentImpl env; + + private final RecordProcessor processor; + + private final int numDBs; + + private final DBContext[] dbs; + + private final Map dbid2dbidxMap; + + private final boolean dupDBs; + + private final ArrayList binDeltas; + + private final LSNAccumulator lsnAcc; + + private final LinkedList deferredLsns; + + private long localMemoryUsage = 0; + + private long globalMemoryUsage = 0; + + private long accumulatedMemDelta = 0; + + private long numLsns = 0; + + private volatile int nIterations; + + private TestHook testHook1; + + private TestHook evictionHook; + + private final boolean debug; + + DiskOrderedScanner( + DatabaseImpl[] dbImpls, + RecordProcessor processor, + boolean scanSerial, + boolean binsOnly, + boolean keysOnly, + boolean countOnly, + long lsnBatchSize, + long memoryLimit, + boolean dbg) { + + this.processor = processor; + + env = dbImpls[0].getEnv(); + + dupDBs = dbImpls[0].getSortedDuplicates(); + + this.scanSerial = scanSerial; + + this.countOnly = countOnly; + this.keysOnly = keysOnly || countOnly; + this.binsOnly = binsOnly || dupDBs || keysOnly || countOnly; + + this.lsnBatchSize = lsnBatchSize; + this.memoryLimit = memoryLimit; + + this.debug = dbg; + + numDBs = dbImpls.length; + + dbs = new DBContext[numDBs]; + + dbid2dbidxMap = new HashMap<>(numDBs); + + for (int i = 0; i < numDBs; ++i) { + + dbid2dbidxMap.put(dbImpls[i].getId(), i); + + dbs[i] = new DBContext(i, dbImpls[i]); + } + + lsnAcc = + new LSNAccumulator() { + @Override + void noteMemUsage(long increment) { + addLocalMemory(increment); + addGlobalMemory(increment); + } + }; + + if (this.binsOnly) { + binDeltas = new ArrayList<>(); + deferredLsns = new LinkedList<>(); + } else { + binDeltas = null; + deferredLsns = null; + } + } + + int getNIterations() { + return nIterations; + } + + /* + * For unit testing only. + */ + long getNumLsns() { + return numLsns; + } + + private void addLocalMemory(long delta) { + localMemoryUsage += delta; + assert(localMemoryUsage >= 0); + } + + private void addGlobalMemory(long delta) { + + globalMemoryUsage += delta; + assert(globalMemoryUsage >= 0); + + accumulatedMemDelta += delta; + if (accumulatedMemDelta > ACCUMULATED_MEM_LIMIT || + accumulatedMemDelta < -ACCUMULATED_MEM_LIMIT) { + + env.getMemoryBudget().updateDOSMemoryUsage(accumulatedMemDelta); + accumulatedMemDelta = 0; + } + } + + + /** + * Returns whether phase I should terminate because the memory or LSN batch + * size limit has been exceeded. + * + * This method need not be called every LN processed; exceeding the + * limits by a reasonable amount should not cause problems, since the + * limits are very approximate measures anyway. It is acceptable to check + * for exceeded limits once per BIN, and this is currently how it is used. + */ + private boolean accLimitExceeded() { + return accLimitExceeded(localMemoryUsage, numLsns); + } + + private boolean accLimitExceeded(long mem, long nLsns) { + return (mem >= memoryLimit || nLsns > lsnBatchSize); + } + + /** + * Called to perform a disk-ordered scan. Returns only when the scan is + * complete; i.e, when all records in the database have been passed to the + * callback. + */ + void scan(final String protectedFilesNamePrefix, + final long protectedFilesId) { + + final FileProtector.ProtectedFileSet protectedFileSet = + env.getFileProtector().protectActiveFiles( + protectedFilesNamePrefix + "-" + protectedFilesId); + try { + if (scanSerial) { + scanSerial(); + } else { + scanInterleaved(); + } + + assert (globalMemoryUsage == MemoryBudget.TREEMAP_OVERHEAD) : + "MemoryUsage is wrong at DOS end: " + globalMemoryUsage; + + } finally { + env.getFileProtector().removeFileProtection(protectedFileSet); + final long budgeted = globalMemoryUsage - accumulatedMemDelta; + env.getMemoryBudget().updateDOSMemoryUsage(-budgeted); + } + } + + private void scanSerial() { + + int dbidx = 0; + boolean overBudget; + DBContext ctx = null; + + while (true) { + + /* + * Phase I. + */ + try { + /* + * Skip phase 1 if we have already exceeded the DOS budget + * due to delta lsns deferred from phase 2b. + */ + overBudget = accLimitExceeded(); + + while (dbidx < numDBs && !overBudget) { + + ctx = dbs[dbidx]; + + if (ctx.parent == null) { + getFirstIN(ctx, ctx.prevEndingKey); + } + + if (ctx.done) { + ++dbidx; + assert(ctx.parent == null); + continue; + } + + while (ctx.parent != null) { + + if (binsOnly) { + accumulateBINs(ctx); + } else { + accumulateLNs(ctx); + } + + if (accLimitExceeded()) { + overBudget = true; + break; + } + + processor.checkShutdown(); + + getNextIN(ctx); + + if (ctx.done) { + ++dbidx; + assert(ctx.parent == null); + } + } + } + + } finally { + if (ctx.parent != null && ctx.parentIsLatched) { + ctx.parent.releaseLatchIfOwner(); + } + } + + /* + * Phase II. + */ + if (binsOnly) { + fetchAndProcessBINs(); + } else { + fetchAndProcessLNs(); + } + + /* + * Check if DOS is done; if not prepare for next iteration + */ + if (dbidx >= numDBs && (!binsOnly || deferredLsns.isEmpty())) { + break; + } + + initNextIteration(); + } + } + + private void scanInterleaved() { + + boolean done; + boolean overBudget; + + while (true) { + + /* + * Phase I. + */ + try { + do { + done = true; + overBudget = false; + + /* + * Skip phase 1 if we have already exceeded the DOS budget + * due to delta lsns deferred from phase 2b. + */ + if (accLimitExceeded()) { + overBudget = true; + break; + } + + for (int dbidx = 0; dbidx < numDBs; ++dbidx) { + + DBContext ctx = dbs[dbidx]; + + if (ctx.done) { + continue; + } + + if (ctx.parent == null) { + getFirstIN(ctx, ctx.prevEndingKey); + } else if (numDBs > 1) { + resumeParent(ctx); + } + + if (ctx.done) { + continue; + } + + done = false; + + if (binsOnly) { + accumulateBINs(ctx); + } else { + accumulateLNs(ctx); + } + + if (accLimitExceeded()) { + overBudget = true; + break; + } + + processor.checkShutdown(); + + if (ctx.pidx >= ctx.parent.getNEntries()) { + getNextIN(ctx); + if (ctx.done) { + continue; + } + } + + if (numDBs > 1) { + releaseParent(ctx); + } + } + } while (!done && !overBudget); + + } finally { + for (int dbidx = 0; dbidx < numDBs; ++dbidx) { + if (dbs[dbidx].parent != null && + dbs[dbidx].parentIsLatched) { + dbs[dbidx].parent.releaseLatchIfOwner(); + } + } + } + + if (debug) { + if (overBudget) { + System.out.println( + "Finished Phase 1." + nIterations + + " because DOS budget exceeded." + + " localMemoryUsage = " + localMemoryUsage + + " globalMemoryUsage = " + globalMemoryUsage); + } else { + System.out.println( + "Finished Phase 1." + nIterations + + " because no more records to scan." + + " localMemoryUsage = " + localMemoryUsage + + " globalMemoryUsage = " + globalMemoryUsage); + } + } + + TestHookExecute.doHookIfSet(evictionHook); + + /* + * Phase II. + */ + + if (binsOnly) { + fetchAndProcessBINs(); + } else { + fetchAndProcessLNs(); + } + + if (debug) { + System.out.println( + "Finished Phase 2." + nIterations + + " localMemoryUsage = " + localMemoryUsage + + " globalMemoryUsage = " + globalMemoryUsage); + } + + /* + * Check if DOS is done; if not prepare for next iteration. + */ + if (done && (!binsOnly || deferredLsns.isEmpty())) { + break; + } + + initNextIteration(); + } + + if (debug) { + System.out.println("Producer done in " + nIterations + + " iterations"); + } + } + + private void initNextIteration() { + + for (int i = 0; i < numDBs; ++i) { + + DBContext ctx = dbs[i]; + + ctx.parent = null; + ctx.parentIsLatched = false; + ctx.checkLevel2Keys = true; + + ctx.prevEndingKey = ctx.newEndingKey; + ctx.safeToUseCachedDelta = false; + + /* + * If this is a bin-only DOS and phase 1 was actually executed + * during the current iteration, see whether prevEndingKey must + * be moved forward. + */ + if (binsOnly && ctx.lastBinLsn != DbLsn.NULL_LSN) { + + for (DeferredLsnsBatch batch : deferredLsns) { + + if (batch.containsLsn(ctx.lastBinLsn)) { + + BIN bin = (BIN)fetchItem( + ctx.lastBinLsn, BIN_OR_DELTA); + + int memSize = 0; + + if (bin.isBINDelta(false)) { + bin = bin.reconstituteBIN(ctx.dbImpl); + memSize = getDeltaMemSize(ctx, ctx.lastBinLsn); + } + + processBINInternal(ctx, bin, false); + + batch.undoLsn(this, ctx.lastBinLsn, memSize); + + assert(Key.compareKeys(ctx.newEndingKey, + ctx.prevEndingKey, + ctx.dbImpl.getKeyComparator()) + >= 0); + + ctx.prevEndingKey = ctx.newEndingKey; + + if (debug) { + System.out.println( + "LSN " + ctx.lastBinLsn + + " for bin " + bin.getNodeId() + + " was the last bin lsn seen during Phase 1." + + nIterations + " and it got deferred during " + + "Phase " + "2." + nIterations + ". Moved " + + "prevEndingKey forward"); + } + + break; + } + } + } + + /* + * Set lastBinLsn to NULL_LSN as a way to indicate that phase 1 + * has not started yet. If phase 1 is not skipped during the next + * iteration, lastBinLsn will be set to a real lsn. + */ + ctx.lastBinLsn = DbLsn.NULL_LSN; + } + + localMemoryUsage = lsnAcc.getMemoryUsage(); + numLsns = 0; + + if (binsOnly && !deferredLsns.isEmpty()) { + DeferredLsnsBatch batch = deferredLsns.getFirst(); + numLsns = batch.lsns.size(); + addLocalMemory(batch.memoryUsage); + } + + nIterations += 1; + } + + /** + * Implements guts of phase I in binsOnly mode. Accumulates BIN deltas and + * BIN LSNs for the children of the given level 2 IN parent, and processes + * resident BINs under certain conditions; see algorithm at top of file. + */ + private void accumulateBINs(DBContext ctx) { + + OffHeapCache ohCache = env.getOffHeapCache(); + + while (ctx.pidx < ctx.parent.getNEntries()) { + + /* Skip BINs that were processed on the previous iteration. */ + if (skipParentSlot(ctx)) { + ++ctx.pidx; + continue; + } + + /* + * A cached delta must be copied it if it may contain any keys <= + * prevEndingKey. Otherwise, if it is the last bin processed in + * the previous iteration, it will be processed again if we take + * a weak ref to the delta and this weak ref is then cleared and + * the delta lsn being deferred as a result. Because no key checking + * is done for deferred bins, processing the bin again would result + * in duplicate records being returned to the app. + */ + if (ctx.prevEndingKey == null || + (!ctx.safeToUseCachedDelta && + ctx.pidx > 0 && + Key.compareKeys(ctx.prevEndingKey, + ctx.parent.getKey(ctx.pidx), + ctx.dbImpl.getKeyComparator()) < 0)) { + + ctx.safeToUseCachedDelta = true; + } + + boolean waitForConsumer = false; + int binNEntries = 0; + long binLsn = ctx.parent.getLsn(ctx.pidx); + int ohBinId = ctx.parent.getOffHeapBINId(ctx.pidx); + boolean ohBinPri2 = ctx.parent.isOffHeapBINPri2(ctx.pidx); + + ctx.lastBinLsn = binLsn; + + BIN bin = (BIN)ctx.parent.getTarget(ctx.pidx); + + if (bin != null) { + bin.latch(CacheMode.UNCHANGED); + } + + try { + if (bin != null || ohBinId >= 0) { + + boolean isBinDelta; + OffHeapCache.BINInfo ohInfo = null; + + if (bin != null) { + isBinDelta = bin.isBINDelta(); + } else { + ohInfo = ohCache.getBINInfo(env, ohBinId); + isBinDelta = ohInfo.isBINDelta; + } + + if (isBinDelta) { + + if (bin != null) { + if (bin.getDirty() || !ctx.safeToUseCachedDelta) { + addDirtyDeltaRef(bin.cloneBINDelta()); + } else { + addCleanDeltaRef(ctx, binLsn, bin); + } + + } else { + if (ctx.parent.isOffHeapBINDirty(ctx.pidx) || + !ctx.safeToUseCachedDelta) { + + addDirtyDeltaRef( + ohCache.loadBIN(env, ohBinId)); + } else { + addCleanDeltaOffHeapRef( + ctx, binLsn, ohInfo.fullBINLsn, + ohBinId, ohBinPri2); + } + } + + ++ctx.pidx; + if (scanSerial) { + continue; + } else { + return; + } + } + + if (bin == null) { + bin = ohCache.loadBIN(env, ohBinId); + bin.latchNoUpdateLRU(ctx.dbImpl); + } + + binNEntries = bin.getNEntries(); + if (binNEntries == 0) { + ++ctx.pidx; + continue; + } + + /* + * Skip the bin if its last key is <= prevEndingKey. + * Normally, this should happen only if the bin is the last + * bin from the previous iteration and the 1st bin of the + * current iteration. + */ + if (ctx.prevEndingKey != null) { + int cmp = Key.compareKeys( + bin.getKey(binNEntries - 1), + ctx.prevEndingKey, + ctx.dbImpl.getKeyComparator()); + + if (cmp <= 0) { + ++ctx.pidx; + continue; + } + } + } + + /* + * If the BIN is not resident, accumulate this LSN for later + * processing during phase 2. During phase 2a, if the lsn + * points to a delta, that delta will be fetched from disk and + * stored until all lsns accumulated here have been fetched. + * Then, in phase 2b the full BINs corresponding to the stored + * deltas will be read in lsn order. So, we must budget memory + * for these deltas. Since most lsns point to deltas, we assume + * that they all point to deltas. + */ + if (bin == null || processor.getCapacity() < binNEntries) { + + lsnAcc.add(binLsn); + ++numLsns; + + addLocalMemory(8 + getDeltaMemSize(ctx, binLsn)); + + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": accumulated bin lsn: " + binLsn); + } + + } else if (processor.canProcessWithoutBlocking(binNEntries)) { + + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": Processing bin: " + bin.getNodeId()); + } + + processBINInternal(ctx, bin, false); + + } else { + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": Producer must wait before it can process bin " + + bin.getNodeId()); + } + + waitForConsumer = true; + ctx.binKey = bin.getKey(0); + } + + } finally { + if (bin != null) { + bin.releaseLatch(); + } + } + + if (waitForConsumer) { + waitForConsumer(ctx, binNEntries); + } else { + ++ctx.pidx; + if (!scanSerial) { + return; + } + } + } /* parent slot iteration */ + } + + void addCleanDeltaRef(DBContext ctx, long binLsn, BIN bin) { + + binDeltas.add(new WeakBinRef(this, ctx, binLsn, bin)); + + bin.updateLRU(CacheMode.DEFAULT); + + /* + * For both the local and global memory, we count the size of the + * WeakBinRef obj, plus the ref to it in this.binDeltas. For + * the local memory we count 2 additional overheads: (a) another + * ref in the deltaArray allocated at the start of phase 2a and + * (b) we assume the ref will have to be deferred during phase 2b + * and add the memory taken to store a deferred lsn. + */ + addLocalMemory(SIZEOF_WeakBinRef + + 2 * SIZEOF_JAVA_REF + + DeferredLsnsBatch.LSN_MEM_OVERHEAD); + + addGlobalMemory(SIZEOF_WeakBinRef + SIZEOF_JAVA_REF); + + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": added weak bin ref for bin delta " + + bin.getNodeId() + " at LSN = " + binLsn); + } + } + + void addCleanDeltaOffHeapRef(DBContext ctx, + long binLsn, + long fullBINLsn, + int ohBinId, + boolean ohBinPri2) { + + binDeltas.add(new OffHeapBinRef( + this, ctx, binLsn, fullBINLsn, ohBinId)); + + env.getOffHeapCache().moveBack(ohBinId, ohBinPri2); + + /* + * For both the local and global memory, we count the size of the + * OffHeapBinRef obj, plus the ref to it in this.binDeltas. For + * the local memory we count 2 additional overheads: (a) another + * ref in the deltaArray allocated at the start of phase 2a and + * (b) we assume the ref will have to be deferred during phase 2b + * and add the memory taken to store a deferred lsn. + */ + addLocalMemory(SIZEOF_OffHeapBinRef + + 2 * SIZEOF_JAVA_REF + + DeferredLsnsBatch.LSN_MEM_OVERHEAD); + + addGlobalMemory(SIZEOF_OffHeapBinRef + SIZEOF_JAVA_REF); + + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": added off-heap bin ref for bin delta ID " + + ohBinId + " at LSN = " + binLsn); + } + } + + void addDirtyDeltaRef(BIN delta) { + + binDeltas.add(delta); + + /* + * For the local mem, we account for the copy of the delta plus 2 refs + * to it: one in this.dirtyBinDeltas, and another in the deltaArray + * allocated at the start of phase 2a. For the global memory, the 2nd + * ref will be counted when the deltaArray is actually allocated. + */ + addLocalMemory(delta.getInMemorySize() + 2 * SIZEOF_JAVA_REF); + + addGlobalMemory(delta.getInMemorySize() + SIZEOF_JAVA_REF); + + if (debug) { + System.out.println( + "Phase 1." + nIterations + + ": copied dirty or unsafe bin delta " + delta.getNodeId()); + } + } + + /** + * Implements guts of phase I in LNs-only mode (binsOnly is false). + * Accumulates LN LSNs for the BIN children of the given level 2 IN parent, + * and processes resident LNs under certain conditions; see algorithm at + * top of file. + */ + private void accumulateLNs(DBContext ctx) { + + OffHeapCache ohCache = env.getOffHeapCache(); + DatabaseImpl dbImpl = ctx.dbImpl; + + IN parent = ctx.parent; + assert(parent != null); + + BIN bin = null; + + ctx.reuseBin = false; + boolean waitForConsumer = false; + + while (ctx.pidx < parent.getNEntries()) { + + /* Skip BINs that were processed on the previous iteration. */ + if (skipParentSlot(ctx)) { + ++ctx.pidx; + continue; + } + + long plsn = parent.getLsn(ctx.pidx); + + /* + * Explicitly fetch the BIN if it is not resident, merging it with + * a delta if needed. Do not call currParent.fetchIN(i) or loadIN + * because we don't want the BIN to be attached to the in-memory + * tree. + */ + if (!ctx.reuseBin) { + bin = (BIN)parent.getTarget(ctx.pidx); + } + + if (bin == null) { + final Object item; + final int ohBinId = parent.getOffHeapBINId(ctx.pidx); + if (ohBinId >= 0) { + item = ohCache.loadBIN(env, ohBinId); + } else { + item = fetchItem(plsn, BIN_OR_DELTA); + } + + if (item instanceof BIN) { + bin = (BIN) item; + if (bin.isBINDelta(false)) { + bin = bin.reconstituteBIN(dbImpl); + } else { + bin.setDatabase(dbImpl); + } + } else { + final OldBINDelta delta = (OldBINDelta) item; + bin = (BIN) fetchItem(delta.getLastFullLsn(), BIN_ONLY); + delta.reconstituteBIN(dbImpl, bin); + } + + bin.latchNoUpdateLRU(dbImpl); + + } else { + + bin.latchNoUpdateLRU(); + + if (bin.isBINDelta()) { + final BIN fullBIN; + try { + fullBIN = bin.reconstituteBIN(dbImpl); + } finally { + bin.releaseLatch(); + } + bin = fullBIN; + bin.latchNoUpdateLRU(dbImpl); + } + } + + try { + int bidx = 0; + + if (waitForConsumer) { + waitForConsumer = false; + ctx.reuseBin = false; + + bidx = bin.findEntry(ctx.binKey, true, false); + + boolean exact = + (bidx >= 0 && ((bidx & IN.EXACT_MATCH) != 0)); + + if (exact) { + bidx = (bidx & ~IN.EXACT_MATCH); + } else { + ++bidx; + } + } + + boolean checkBinKeys = isBinProcessedBefore(ctx, bin); + + for (; bidx < bin.getNEntries(); ++bidx) { + + ctx.binKey = bin.getKey(bidx); + + if (skipSlot(ctx, bin, bidx, checkBinKeys)) { + continue; + } + + LN ln = (LN) bin.getTarget(bidx); + + /* + * Accumulate LSNs of non-resident, non-embedded LNs + */ + if (ln == null && !bin.isEmbeddedLN(bidx)) { + + ln = ohCache.loadLN(bin, bidx, CacheMode.UNCHANGED); + + if (ln == null) { + + if (!DbLsn.isTransientOrNull(bin.getLsn(bidx))) { + lsnAcc.add(bin.getLsn(bidx)); + ++numLsns; + addLocalMemory(8); + } + + continue; + } + } + + /* + * LN is resident or embedded. Process it now unless the + * queue is full. + */ + if (processor.canProcessWithoutBlocking(1)) { + + byte[] data = (ln != null ? + ln.getData() : bin.getData(bidx)); + + processRecord( + ctx, ctx.binKey, data, + bin.getExpiration(bidx), + bin.isExpirationInHours()); + continue; + } + + /* + * LN is resident or embedded but the queue is full. We + * will suspend the producer and try again after the queue + * has free slots. + */ + waitForConsumer = true; + break; + } /* BIN slot iteration */ + } finally { + bin.releaseLatch(); + } + + if (waitForConsumer) { + waitForConsumer(ctx, 0); + parent = ctx.parent; + } else { + ++ctx.pidx; + if (!scanSerial) { + return; + } + } + } /* parent slot iteration */ + } + + boolean skipParentSlot(DBContext ctx) { + + if (ctx.checkLevel2Keys && + ctx.prevEndingKey != null && + ctx.pidx + 1 < ctx.parent.getNEntries() && + Key.compareKeys(ctx.prevEndingKey, + ctx.parent.getKey(ctx.pidx + 1), + ctx.dbImpl.getKeyComparator()) >= 0) { + return true; + } + + ctx.checkLevel2Keys = false; + return false; + } + + private void waitForConsumer(DBContext ctx, int binNEntries) { + + releaseParent(ctx); + + try { + int minFree = (binsOnly ? binNEntries : 1); + int free = Math.max(minFree, processor.getCapacity() / 5); + + while (!processor.canProcessWithoutBlocking(free)) { + synchronized (processor) { + processor.wait(SUSPENSION_INTERVAL); + processor.checkShutdown(); + } + } + } catch (InterruptedException IE) { + ctx.parent.unpin(); + throw new ThreadInterruptedException(ctx.dbImpl.getEnv(), IE); + } catch (Error E) { + ctx.parent.unpin(); + throw E; + } + + resumeParent(ctx); + } + + void releaseParent(DBContext ctx) { + + ctx.plsn = ctx.parent.getLsn(ctx.pidx); + ctx.pkey = ctx.parent.getKey(ctx.pidx); + + ctx.parent.pin(); + ctx.parent.releaseLatch(); + ctx.parentIsLatched = false; + } + + void resumeParent(DBContext ctx) { + + ctx.parent.latchShared(); + ctx.parentIsLatched = true; + + ctx.parent.unpin(); + + /* + * See "big" comment in IN.fetchINWithNoLatch() for an + * explanation of these conditions. + */ + if (ctx.pidx >= ctx.parent.getNEntries() || + ctx.plsn != ctx.parent.getLsn(ctx.pidx) || + (ctx.dbImpl.isDeferredWriteMode() && + ctx.parent.getTarget(ctx.pidx) != null)) { + + TestHookExecute.doHookIfSet(testHook1); + + ctx.pidx = ctx.parent.findEntry(ctx.pkey, false, true/*exact*/); + + /* + * If we cannot re-establish the position in currParent, we search + * the tree for the parent of the BIN that should contain binKey. + * + * Note: The last-slot key is always good, because we are basically + * searching for keys >= pkey. The 1st-slot key is never good + * because it may cover keys that are < pkey. + */ + if (ctx.pidx <= 0) { + ctx.parent.releaseLatch(); + ctx.parentIsLatched = false; + + TestHookExecute.doHookIfSet(testHook1); + + getFirstIN(ctx, ctx.binKey); + + /* + * We know for sure that binKey is contained in currParent, + * so the 1st and last lost slots are safe. + */ + ctx.pidx = ctx.parent.findEntry(ctx.binKey, false, false); + } + } else if (ctx.plsn == ctx.parent.getLsn(ctx.pidx)) { + ctx.reuseBin = true; + } + } + + /** + * Implements guts of phase II in binsOnly mode. + */ + private void fetchAndProcessBINs() { + + /* + * Phase 2a + */ + + /* + * Create and sort an array of LSNs out of the LSNs gathered in phase 1 + * of the current iteration (stored in the LSNAccumulator) and the LSNs + * deferred from previous iterations (stored in deferredLsns). Only the + * 1st batch of LSNs in deferredLsns is used here, because using more + * deferred LSNs (if any) would exceed the DOS budget. + */ + int nAccLsns = lsnAcc.getNTotalEntries(); + int nDeferredLsns = 0; + DeferredLsnsBatch deferredBatch = null; + DeferredLsnsBatch nextDeferredBatch = null; + + if (!deferredLsns.isEmpty()) { + deferredBatch = deferredLsns.removeFirst(); + nDeferredLsns = deferredBatch.lsns.size(); + } + + long[] lsns = new long[nAccLsns + nDeferredLsns]; + + addGlobalMemory(lsns.length * 8); + + lsnAcc.getLSNs(lsns, 0); // releases the mem held by the accumulator + + int nLsns = nAccLsns; + + if (deferredBatch != null) { + + for (Long lsn : deferredBatch.lsns) { + + if (debug) { + System.out.println( + "Phase 2." + nIterations + " Found deferred LSN: " + + lsn); + } + + lsns[nLsns] = lsn; + ++nLsns; + } + } + + if (debug) { + System.out.println( + "Phase 2." + nIterations + + " Num LSNs to read during phase 2a: " + lsns.length); + } + + Arrays.sort(lsns); + + /* + * Create an array of delta refs. This array will be sorted later by + * full BIN LSN in order to read the corresponding full bins in disk + * order and merge them with the deltas. We populate the array from + * 3 sources: (a) the WaekBinRefs collected for clean cached deltas + * during phase 1, (b) the copies of dirty cached deltas found during + * phase 1, and (c) by reading from the log the bins whose LSN is in + * the "lsns" array, and adding to the deltaArray each such bin that + * is indeed a delta. + * + * The size of the deltaArray is pre-computed based on the assumption + * that all the LSNs in "lsns" point to deltas and all the weak bin + * refs in this.cleanBinDeltas are still deltas. + * + * Note that the elements of the deltaArray may be type OldBINDelta, or + * BIN, or DeferredDeltaRef, or WeakBinRef. + */ + int nDeltas = binDeltas.size(); + + final Object[] deltaArray = new Object[nDeltas + lsns.length]; + + addGlobalMemory(deltaArray.length * SIZEOF_JAVA_REF); + + for (int i = 0; i < nDeltas; ++i) { + deltaArray[i] = binDeltas.get(i); + } + + binDeltas.clear(); + binDeltas.trimToSize(); + addGlobalMemory(-(nDeltas * SIZEOF_JAVA_REF)); + + for (long lsn : lsns) { + + boolean isDeferred; + + LogEntry logEntry = fetchEntry(lsn, BIN_OR_DELTA); + Object item = logEntry.getMainItem(); + + DatabaseId dbId = logEntry.getDbId(); + DBContext ctx = getDbCtx(dbId); + + /* + * For a delta, queue fetching of the full BIN and combine the full + * BIN with the delta when it is processed below. + */ + if (item instanceof OldBINDelta) { + OldBINDelta o = (OldBINDelta) item; + deltaArray[nDeltas] = item; + ++nDeltas; + addGlobalMemory(o.getMemorySize()); + continue; + } + + BIN bin = (BIN) item; + bin.setDatabase(ctx.dbImpl); + + if (bin.isBINDelta(false/*checkLatched*/)) { + + addGlobalMemory(bin.getInMemorySize()); + + int memSize = getDeltaMemSize(ctx, lsn); + isDeferred = (deferredBatch != null && + deferredBatch.removeLsn(this, lsn, memSize)); + + if (debug) { + System.out.println( + "Phase 2a." + nIterations + " Saving bin delta " + + bin.getNodeId() + " fetched via LSN " + lsn); + } + + deltaArray[nDeltas] = (isDeferred ? + new DeferredDeltaRef(this, bin) : + bin); + ++nDeltas; + continue; + } + + isDeferred = (deferredBatch != null && + deferredBatch.removeLsn(this, lsn, 0)); + + if (debug) { + System.out.println( + "Phase 2a." + nIterations + " Processing full bin " + + bin.getNodeId() + " fetched via LSN " + lsn); + } + + /* LSN was for a full BIN, so we can just process it. */ + processBIN(ctx, bin, isDeferred); + } + + addGlobalMemory(-(lsns.length * 8)); + + lsns = null; // allow GC + + if (deferredBatch != null) { + deferredBatch.free(this); + deferredBatch = null; // allow GC + } + + if (debug) { + System.out.println( + "Finished Phase 2a." + nIterations + + " localMemoryUsage = " + localMemoryUsage + + " globalMemoryUsage = " + globalMemoryUsage); + } + + if (nDeltas == 0) { + addGlobalMemory(-(deltaArray.length * SIZEOF_JAVA_REF)); + return; + } + + /* + * Phase 2b + */ + + /* Sort deltas by full BIN LSN. */ + Arrays.sort(deltaArray, 0, nDeltas, new Comparator() { + + public int compare(Object a, Object b) { + return DbLsn.compareTo(getLsn(a), getLsn(b)); + } + + private long getLsn(Object o) { + + if (o instanceof OldBINDelta) { + return ((OldBINDelta) o).getLastFullLsn(); + } else if (o instanceof BIN) { + return ((BIN) o).getLastFullLsn(); + } else if (o instanceof DeferredDeltaRef) { + return ((DeferredDeltaRef) o).delta.getLastFullLsn(); + } else if (o instanceof OffHeapBinRef) { + return ((OffHeapBinRef) o).fullBinLsn; + } else { + return ((WeakBinRef)o).fullBinLsn; + } + } + }); + + /* + * Fetch each full BIN and merge it with its corresponding delta, and + * process each resulting BIN. + */ + for (int i = 0; i < nDeltas; i += 1) { + + Object o = deltaArray[i]; + deltaArray[i] = null; // for GC + + if (o instanceof OldBINDelta) { + + OldBINDelta delta = (OldBINDelta) o; + + DBContext ctx = getDbCtx(delta.getDbId()); + + BIN bin = (BIN) fetchItem(delta.getLastFullLsn(), BIN_ONLY); + delta.reconstituteBIN(ctx.dbImpl, bin); + + processBINInternal(ctx, bin, false); + + addGlobalMemory(-delta.getMemorySize()); + + } else if (o instanceof BIN || o instanceof DeferredDeltaRef) { + + /* + * The bin may be (a) a delta copied in phase 1, or (b) a + * delta fetched via lsn during phase 2a; in this case the lsn + * may be one that was collected during phase 1 of the current + * iteration, or a deferred lsn from an earlier iteration. In + * all cases we don't need to latch the bin because it was + * fetched from disk and not attached to the in-memory tree. + */ + BIN delta; + BIN fullBin; + boolean isDeferred; + + if (o instanceof DeferredDeltaRef) { + delta = ((DeferredDeltaRef)o).delta; + isDeferred = true; + ((DeferredDeltaRef)o).free(this); + } else { + delta = (BIN)o; + isDeferred = false; + } + + assert(delta.isBINDelta(false)); + + DBContext ctx = getDbCtx(delta.getDatabaseId()); + + fullBin = delta.reconstituteBIN(ctx.dbImpl); + + processBINInternal(ctx, fullBin, isDeferred); + + addGlobalMemory(-delta.getInMemorySize()); + + } else if (o instanceof OffHeapBinRef){ + + OffHeapBinRef ref = (OffHeapBinRef)o; + + BIN bin = env.getOffHeapCache().loadBINIfLsnMatches( + env, ref.ohBinId, ref.binLsn); + + if (bin == null) { + nextDeferredBatch = addDeferredLsn( + nextDeferredBatch, ref.binLsn, ref.memSize); + + if (debug) { + System.out.println( + "Phase 2." + nIterations + + ": Found stale OffHeapBinRef - " + + "Deferring LSN: " + ref.binLsn + + " delta mem: " + + (DeferredLsnsBatch.LSN_MEM_OVERHEAD + ref.memSize)); + } + + } else { + try { + DBContext ctx = getDbCtx(bin.getDatabaseId()); + + BIN fullBin; + + if (bin.isBINDelta()) { + fullBin = bin.reconstituteBIN(ctx.dbImpl); + } else { + fullBin = bin; + } + + processBINInternal(ctx, fullBin, false); + } finally { + bin.releaseLatch(); + } + } + + addGlobalMemory(-SIZEOF_OffHeapBinRef); + + } else { + assert(o instanceof WeakBinRef); + + WeakBinRef ref = (WeakBinRef)o; + BIN bin = ref.get(); + + if (bin == null) { + nextDeferredBatch = addDeferredLsn( + nextDeferredBatch, ref.binLsn, ref.memSize); + + if (debug) { + System.out.println( + "Phase 2." + nIterations + + ": Found cleared WeakBinRef - " + + "Deferring LSN: " + ref.binLsn + + " delta mem: " + + (DeferredLsnsBatch.LSN_MEM_OVERHEAD + ref.memSize)); + } + + } else { + DBContext ctx = getDbCtx(bin.getDatabaseId()); + + bin.latch(CacheMode.UNCHANGED); + + try { + if (bin.getLastFullLsn() != ref.fullBinLsn) { + nextDeferredBatch = addDeferredLsn( + nextDeferredBatch, ref.binLsn, ref.memSize); + + if (debug) { + System.out.println( + "Phase 2." + nIterations + + ": Found stale WeakBinRef - " + + "Deferring LSN: " + ref.binLsn); + } + + } else { + BIN fullBin; + + if (bin.isBINDelta()) { + fullBin = bin.reconstituteBIN(ctx.dbImpl); + } else { + fullBin = bin; + } + + processBINInternal(ctx, fullBin, false); + } + } finally { + bin.releaseLatch(); + } + } + + addGlobalMemory(-SIZEOF_WeakBinRef); + } + } + + addGlobalMemory(-(deltaArray.length * SIZEOF_JAVA_REF)); + } + + private DeferredLsnsBatch addDeferredLsn( + DeferredLsnsBatch batch, + long lsn, + int memSize) { + + if (batch == null) { + batch = new DeferredLsnsBatch(this); + deferredLsns.addLast(batch); + } + + if (batch.addLsn(this, lsn, memSize)) { + batch = new DeferredLsnsBatch(this); + deferredLsns.addLast(batch); + } + + return batch; + } + + /** + * Process a BIN during phase II in binsOnly mode. + * + * @param bin the exclusively latched BIN. + */ + private void processBIN(DBContext ctx, BIN bin, boolean isDeferred) { + + bin.latch(CacheMode.UNCHANGED); + + try { + processBINInternal(ctx, bin, isDeferred); + } finally { + bin.releaseLatch(); + } + } + + private void processBINInternal( + DBContext ctx, + BIN bin, + boolean isDeferred) { + + /* + if (!processedBINs.add(bin.getNodeId())) { + System.out.println("XXXXX bin " + bin.getNodeId() + + " has been processed before"); + } + */ + + boolean checkBinKeys = !isDeferred && isBinProcessedBefore(ctx, bin); + + for (int i = 0; i < bin.getNEntries(); i += 1) { + + ctx.binKey = bin.getKey(i); + + if (skipSlot(ctx, bin, i, checkBinKeys)) { + continue; + } + + /* Only the key is needed, as in accumulateBINs. */ + + byte[] key = ctx.binKey; + byte[] data = (keysOnly ? null : bin.getData(i)); + + processRecord( + ctx, key, data, + bin.getExpiration(i), bin.isExpirationInHours()); + } + } + + /** + * Implements guts of phase II in LNs-only mode (binsOnly is false). + */ + private void fetchAndProcessLNs() { + + long[] lsns = lsnAcc.getAndSortPendingLSNs(); + + addGlobalMemory(lsns.length * 8); + + for (long lsn : lsns) { + + final LNLogEntry entry = + (LNLogEntry) fetchEntry(lsn, LN_ONLY); + + DBContext ctx = getDbCtx(entry.getDbId()); + + entry.postFetchInit(ctx.dbImpl); + + final LN ln = entry.getMainItem(); + if (ln.isDeleted()) { + continue; + } + + processRecord( + ctx, entry.getKey(), ln.getData(), + entry.getExpiration(), entry.isExpirationInHours()); + } + + addGlobalMemory(-(lsns.length * 8)); + } + + private DBContext getDbCtx(DatabaseId dbId) { + int dbIdx = dbid2dbidxMap.get(dbId); + return dbs[dbIdx]; + } + + /** + * Invokes the callback to process a single key-data pair. The parameters + * are in the format stored in the Btree, and are translated here to + * user-format for dup DBs. + */ + private void processRecord( + DBContext ctx, + byte[] treeKey, + byte[] treeData, + int expiration, + boolean expirationInHours) { + + assert treeKey != null; + + final byte[] key; + final byte[] data; + + if (dupDBs && !countOnly) { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = + (keysOnly ? null : new DatabaseEntry()); + + DupKeyData.split(treeKey, treeKey.length, keyEntry, dataEntry); + key = keyEntry.getData(); + + data = keysOnly ? null : dataEntry.getData(); + + } else { + key = (countOnly ? null : treeKey); + data = ((countOnly || keysOnly) ? null : treeData); + } + + processor.process(ctx.dbIdx, key, data, expiration, expirationInHours); + + /* Save the highest valued key for this iteration. */ + if (ctx.newEndingKey == null || + Key.compareKeys(ctx.newEndingKey, treeKey, + ctx.dbImpl.getKeyComparator()) < 0) { + ctx.newEndingKey = treeKey; + } + } + + /** + * Fetches a log entry for the given LSN and returns its main item. + * + * @param expectTypes is used to validate the type of the entry; an + * internal exception is thrown if the log entry does not have one of the + * given types. + */ + private Object fetchItem(long lsn, LogEntryType[] expectTypes) { + return fetchEntry(lsn, expectTypes).getMainItem(); + } + + /** + * Fetches a log entry for the given LSN and returns it. + * + * @param expectTypes is used to validate the type of the entry; an + * internal exception is thrown if the log entry does not have one of the + * given types. + */ + private LogEntry fetchEntry( + long lsn, + LogEntryType[] expectTypes) { + + final LogManager logManager = env.getLogManager(); + + final LogEntry entry = + logManager.getLogEntryHandleFileNotFound(lsn); + + final LogEntryType type = entry.getLogType(); + + for (LogEntryType expectType : expectTypes) { + if (expectType.isLNType()) { + if (type.isLNType()) { + return entry; + } + } else { + if (type.equals(expectType)) { + return entry; + } + } + } + + throw EnvironmentFailureException.unexpectedState( + "Expected: " + Arrays.toString(expectTypes) + + " but got: " + type + " LSN=" + DbLsn.getNoFormatString(lsn)); + } + + /** + * Calculates a rough estimate of the memory needed for a BIN-delta object. + */ + private static int getDeltaMemSize(DBContext ctx, long lsn) { + + long fileNum = DbLsn.getFileNumber(lsn); + + final DbFileSummary summary = ctx.dbFileSummaries.get(fileNum); + + /* + * If there are no deltas in this file, then the LSN must for a full + * BIN, and no memory is needed for the delta. + */ + if (summary == null) { + return 0; + } + + /* + * The cleaner counts deltas as INs in the DbFileSummary, and most + * are actually deltas, not INs. We double the average IN byte size + * in the file to very roughly approximate the memory for a + * deserialized BIN-delta object. + */ + final float avgINSize = + (((float) summary.totalINSize) / summary.totalINCount); + + return (int) (avgINSize * 2); + } + + + /* + * Return true if the given bin has been processed in a previous + * iteration, i.e., if the 1st key of the bin is <= prevEndingKey. + * If not (which should be the common case), we don't need to check + * the rest of the BIN keys against prevEndingKey. + * + * The result of this method is passed as the value of the checkBinKeys + * param of the skipSlot method below. + */ + private boolean isBinProcessedBefore(DBContext ctx, BIN bin) { + + if (ctx.prevEndingKey != null && bin.getNEntries() > 0) { + final byte[] firstKey = bin.getKey(0); + + if (Key.compareKeys(firstKey, ctx.prevEndingKey, + ctx.dbImpl.getKeyComparator()) <= 0) { + return true; + } + } + + return false; + } + + /** + * Returns whether to skip a BIN slot because its LN is deleted or expired, + * or its key has already been processed in a previous iteration. + */ + private boolean skipSlot( + DBContext ctx, + BIN bin, + int index, + boolean checkBinKeys) { + + if (bin.isDefunct(index)) { + return true; + } + + /* Skip a slot that was processed in a previous iteration. */ + return ctx.prevEndingKey != null && + checkBinKeys && + Key.compareKeys( + ctx.prevEndingKey, ctx.binKey, + ctx.dbImpl.getKeyComparator()) >= 0; + } + + /** + * Moves to the first level 2 IN in the database if searchKey is null + * (signifying the first iteration), or the level 2 IN containing + * searchKey if it is non-null. + * + * We take the liberty of fetching the BIN (first BIN or BIN for the + * searchKey), when it is not resident, although in an ideal world no + * BINs would be added to the cache. Since this only occurs once per + * iteration, it is considered to be acceptable. + */ + private void getFirstIN(DBContext ctx, byte[] searchKey) { + + /* + * Use a retry loop to account for the possibility that after getting + * the BIN we can't find its exact parent due to a split of some kind + * while the BIN is unlatched. + */ + final Tree tree = ctx.dbImpl.getTree(); + + for (int i = 0; i < 25; i += 1) { + + final BIN bin; + + if (searchKey == null) { + bin = tree.getFirstNode(CacheMode.UNCHANGED); + } else { + bin = tree.search(searchKey, CacheMode.UNCHANGED); + } + + if (bin == null) { + /* Empty database. */ + ctx.parent = null; + ctx.done = true; + return; + } + + /* + * Call getParentINForChildIN with 0 as exclusiveLevel so that + * the parent will be latched in shared mode. + */ + long targetId = bin.getNodeId(); + byte[] targetKey = bin.getIdentifierKey(); + + bin.releaseLatch(); + + ctx.parentIsLatched = true; + + final SearchResult result = tree.getParentINForChildIN( + targetId, targetKey, -1/*useTargetLevel*/, + 0/*exclusiveLevel*/, true/*requireExactMatch*/, + true/*doFetch*/, CacheMode.UNCHANGED, + null/*trackingList*/); + + final IN parent = result.parent; + + if (!result.exactParentFound) { + if (parent != null) { + parent.releaseLatch(); + } + ctx.parentIsLatched = false; + continue; /* Retry. */ + } + + ctx.parent = parent; + ctx.pidx = 0; + + if (ctx.parent == null) { + ctx.done = true; + } + + return; + } + + throw EnvironmentFailureException.unexpectedState( + "Unable to find BIN for key: " + + Arrays.toString(searchKey)); + } + + /** + * Moves to the next level 2 IN in the database. + */ + private void getNextIN(DBContext ctx) { + + ctx.parent = ctx.dbImpl.getTree().getNextIN( + ctx.parent, true /*forward*/, true/*latchShared*/, + CacheMode.UNCHANGED); + + ctx.pidx = 0; + + if (ctx.parent == null) { + ctx.done = true; + } + } + + + /* + * UnitTesting support + */ + + public void setTestHook1(TestHook hook) { + testHook1 = hook; + } + + public void setEvictionHook(TestHook hook) { + evictionHook = hook; + } + + public void evictBinRefs() { + + if (debug) { + System.out.println("DOS EVICTION HOOK"); + } + + for (Object o : binDeltas) { + if (o instanceof OffHeapBinRef) { + + OffHeapBinRef ohRef = (OffHeapBinRef) o; + + env.getOffHeapCache().evictBINIfLsnMatch( + env, ohRef.ohBinId, ohRef.binLsn); + + continue; + } + + if (!(o instanceof WeakBinRef)) { + continue; + } + + Evictor evictor = env.getEvictor(); + + WeakBinRef binRef = (WeakBinRef) o; + + BIN bin = binRef.get(); + + if (bin == null) { + continue; + } + + binRef.clear(); + + bin.latch(); + + if (!bin.getInListResident()) { + bin.releaseLatch(); + continue; + } + + long freedBytes = + evictor.doTestEvict(bin, Evictor.EvictionSource.MANUAL); + + /* + * Try another time; maybe the bin was just moved to the dirty LRU + */ + if (freedBytes == 0) { + bin.latch(); + evictor.doTestEvict(bin, Evictor.EvictionSource.MANUAL); + } + } + } +} diff --git a/src/com/sleepycat/je/dbi/DupKeyData.java b/src/com/sleepycat/je/dbi/DupKeyData.java new file mode 100644 index 0000000..1ecdfa6 --- /dev/null +++ b/src/com/sleepycat/je/dbi/DupKeyData.java @@ -0,0 +1,409 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.Comparator; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.util.PackedInteger; + +/** + * Utility methods for combining, splitting and comparing two-part key values + * for duplicates databases. + * + * At the Btree storage level, for the key/data pairs in a duplicates database, + * the data is always zero length and the key is a two-part key. For embedded + * records, the key and data parts are visible at the BTree level as well. In + * both cases, the 'key' parameter in the API is the first part of the key. + * The the 'data' parameter in the API is the second part of the key. + * + * The length of the first part is stored at the end of the combined key as a + * packed integer, so that the two parts can be split, combined, and compared + * separately. The length is stored at the end, rather than the start, to + * enable key prefixing for the first part, e.g., for Strings with different + * lengths but common prefixes. + */ +public class DupKeyData { + + public static final int PREFIX_ONLY = -1; + + /** + * Returns twoPartKey as: + * paramKey bytes, + * paramData bytes, + * reverse-packed len of paramKey bytes. + * + * The byte array in the resulting twoPartKey will be copied again by JE at + * a lower level. It would be nice if there were a way to give ownership + * of the array to JE, to avoid the extra copy. + */ + public static DatabaseEntry combine(final DatabaseEntry paramKey, + final DatabaseEntry paramData) { + final byte[] buf = combine + (paramKey.getData(), paramKey.getOffset(), paramKey.getSize(), + paramData.getData(), paramData.getOffset(), paramData.getSize()); + return new DatabaseEntry(buf); + } + + public static byte[] combine(final byte[] key, final byte[] data) { + return combine(key, 0, key.length, data, 0, data.length); + } + + public static byte[] combine(final byte[] key, + final int keyOff, + final int keySize, + final byte[] data, + final int dataOff, + final int dataSize) { + final int keySizeLen = PackedInteger.getWriteIntLength(keySize); + final byte[] buf = new byte[keySizeLen + keySize + dataSize]; + System.arraycopy(key, keyOff, buf, 0, keySize); + System.arraycopy(data, dataOff, buf, keySize, dataSize); + final int nextOff = + PackedInteger.writeReverseInt(buf, keySize + dataSize, keySize); + assert nextOff == buf.length; + return buf; + } + + /** + * Splits twoPartKey, previously set by combine, into original paramKey and + * paramData if they are non-null. + * + * The offset of the twoPartKey must be zero. This can be assumed because + * the entry is read from the database and JE always returns entries with a + * zero offset. + * + * This method copies the bytes into to new arrays rather than using the + * DatabaseEntry offset and size to shared the array, to keep with the + * convention that JE always returns whole arrays. It would be nice to + * avoid the copy, but that might break user apps. + */ + public static void split(final DatabaseEntry twoPartKey, + final DatabaseEntry paramKey, + final DatabaseEntry paramData) { + assert twoPartKey.getOffset() == 0; + split(twoPartKey.getData(), twoPartKey.getSize(), paramKey, paramData); + } + + /** + * Same as split method above, but with twoPartKey/twoPartKeySize byte + * array and array size params. + */ + public static void split(final byte[] twoPartKey, + final int twoPartKeySize, + final DatabaseEntry paramKey, + final DatabaseEntry paramData) { + final int keySize = + PackedInteger.readReverseInt(twoPartKey, twoPartKeySize - 1); + assert keySize != PREFIX_ONLY; + + if (paramKey != null) { + final byte[] keyBuf = new byte[keySize]; + System.arraycopy(twoPartKey, 0, keyBuf, 0, keySize); + + if (keySize == 0 || paramKey.getPartial()) { + LN.setEntry(paramKey, keyBuf); + } else { + paramKey.setData(keyBuf, 0, keySize); + } + } + + if (paramData != null) { + final int keySizeLen = + PackedInteger.getReadIntLength(twoPartKey, twoPartKeySize - 1); + + final int dataSize = twoPartKeySize - keySize - keySizeLen; + final byte[] dataBuf = new byte[dataSize]; + System.arraycopy(twoPartKey, keySize, dataBuf, 0, dataSize); + + if (dataSize == 0 || paramData.getPartial()) { + LN.setEntry(paramData, dataBuf); + } else { + paramData.setData(dataBuf, 0, dataSize); + } + } + } + + /** + * Splits twoPartKey and returns a two-part key entry containing the key + * portion of twoPartKey combined with newData. + */ + public static byte[] replaceData(final byte[] twoPartKey, + final byte[] newData) { + final int origKeySize = + PackedInteger.readReverseInt(twoPartKey, twoPartKey.length - 1); + final int keySize = (origKeySize == PREFIX_ONLY) ? + (twoPartKey.length - 1) : + origKeySize; + return combine(twoPartKey, 0, keySize, newData, 0, newData.length); + } + + /** + * Splits twoPartKey and returns a two-part key entry containing the key + * portion from twoPartKey, no data, and the special PREFIX_ONLY value for + * the key length. When used for a search, this will compare as less than + * any other entry having the same first part, i.e., in the same duplicate + * set. + */ + public static DatabaseEntry removeData(final byte[] twoPartKey) { + final int keySize = + PackedInteger.readReverseInt(twoPartKey, twoPartKey.length - 1); + assert keySize != PREFIX_ONLY; + return new DatabaseEntry(makePrefixKey(twoPartKey, 0, keySize)); + } + + /** + * Returns a two-part key entry with the given key portion, no data, and + * the special PREFIX_ONLY value for the key length. When used for a + * search, this will compare as less than any other entry having the same + * first part, i.e., in the same duplicate set. + */ + public static byte[] makePrefixKey( + final byte[] key, + final int keyOff, + final int keySize) { + + final byte[] buf = new byte[keySize + 1]; + System.arraycopy(key, 0, buf, 0, keySize); + buf[keySize] = (byte) PREFIX_ONLY; + return buf; + } + + public static int getKeyLength(final byte[] buf, int off, int len) { + + assert(buf.length >= off + len); + + int keyLen = PackedInteger.readReverseInt(buf, off + len - 1); + assert(keyLen != PREFIX_ONLY); + assert(keyLen >= 0 && keyLen <= len); + + return keyLen; + } + + public static byte[] getKey(final byte[] buf, int off, int len) { + + assert(buf.length >= off + len); + + int keyLen = PackedInteger.readReverseInt(buf, off + len - 1); + assert(keyLen != PREFIX_ONLY); + assert(keyLen >= 0 && keyLen <= len); + + byte[] key = new byte[keyLen]; + System.arraycopy(buf, off, key, 0, keyLen); + + return key; + } + + public static byte[] getData(final byte[] buf, int off, int len) { + + assert(buf.length >= off + len); + + int keyLen = PackedInteger.readReverseInt(buf, off + len - 1); + assert(keyLen != PREFIX_ONLY); + assert(keyLen >= 0 && keyLen <= len); + + int keyLenSize = PackedInteger.getReadIntLength(buf, off + len - 1); + + int dataLen = len - keyLen - keyLenSize; + assert(dataLen > 0); + assert(keyLen + dataLen <= len); + + byte[] data = new byte[dataLen]; + System.arraycopy(buf, off + keyLen, data, 0, dataLen); + return data; + } + + /** + * Comparator that compares the combined key/data two-part key, calling the + * user-defined btree and duplicate comparator as needed. + */ + public static class TwoPartKeyComparator implements Comparator { + + private final Comparator btreeComparator; + private final Comparator duplicateComparator; + + public TwoPartKeyComparator(final Comparator btreeComparator, + final Comparator dupComparator) { + this.btreeComparator = btreeComparator; + this.duplicateComparator = dupComparator; + } + + public int compare(final byte[] twoPartKey1, + final byte[] twoPartKey2) { + + /* Compare key portion. */ + final int origKeySize1 = PackedInteger.readReverseInt + (twoPartKey1, twoPartKey1.length - 1); + + final int keySize1 = (origKeySize1 == PREFIX_ONLY) ? + (twoPartKey1.length - 1) : + origKeySize1; + + final int origKeySize2 = PackedInteger.readReverseInt + (twoPartKey2, twoPartKey2.length - 1); + + final int keySize2 = (origKeySize2 == PREFIX_ONLY) ? + (twoPartKey2.length - 1) : + origKeySize2; + + final int keyCmp; + + if (btreeComparator == null) { + keyCmp = Key.compareUnsignedBytes( + twoPartKey1, 0, keySize1, twoPartKey2, 0, keySize2); + } else { + final byte[] key1 = new byte[keySize1]; + final byte[] key2 = new byte[keySize2]; + System.arraycopy(twoPartKey1, 0, key1, 0, keySize1); + System.arraycopy(twoPartKey2, 0, key2, 0, keySize2); + keyCmp = btreeComparator.compare(key1, key2); + } + + if (keyCmp != 0) { + return keyCmp; + } + + if (origKeySize1 == PREFIX_ONLY || origKeySize2 == PREFIX_ONLY) { + if (origKeySize1 == origKeySize2) { + return 0; + } + return (origKeySize1 == PREFIX_ONLY) ? -1 : 1; + } + + /* Compare data portion. */ + final int keySizeLen1 = PackedInteger.getReadIntLength + (twoPartKey1, twoPartKey1.length - 1); + final int keySizeLen2 = PackedInteger.getReadIntLength + (twoPartKey2, twoPartKey2.length - 1); + + final int dataSize1 = twoPartKey1.length - keySize1 - keySizeLen1; + final int dataSize2 = twoPartKey2.length - keySize2 - keySizeLen2; + + final int dataCmp; + if (duplicateComparator == null) { + dataCmp = Key.compareUnsignedBytes + (twoPartKey1, keySize1, dataSize1, + twoPartKey2, keySize2, dataSize2); + } else { + final byte[] data1 = new byte[dataSize1]; + final byte[] data2 = new byte[dataSize2]; + System.arraycopy(twoPartKey1, keySize1, data1, 0, dataSize1); + System.arraycopy(twoPartKey2, keySize2, data2, 0, dataSize2); + dataCmp = duplicateComparator.compare(data1, data2); + } + return dataCmp; + } + } + + /** + * Used to perform the getNextNoDup operation. + * + * Compares the left parameter (the key parameter in a user-initiated + * search operation) as: + * - less than a right operand with a prefix with is less than the + * prefix of the left operand. This is standard. + * - greater than a right operand with a prefix with is greater than the + * prefix of the left operand. This is standard. + * - greater than a right operand with a prefix equal to the prefix of + * the left operation. This is non-standard. + * + * The last property causes the range search to find the first duplicate in + * the duplicate set following the duplicate set of the left operand. + */ + public static class NextNoDupComparator implements Comparator { + + private final Comparator btreeComparator; + + public NextNoDupComparator(final Comparator btreeComparator) { + this.btreeComparator = btreeComparator; + } + + public int compare(final byte[] twoPartKey1, + final byte[] twoPartKey2) { + final int cmp = compareMainKey(twoPartKey1, twoPartKey2, + btreeComparator); + return (cmp != 0) ? cmp : 1; + } + } + + /** + * Used to perform the putNoOverwrite operation. Only used to find the + * insertion position in the BIN, after the standard comparator is used to + * find the correct BIN for insertion. Because it compares part-one only, + * it prevents insertion of a duplicate for the main key given. + */ + public static class PutNoOverwriteComparator + implements Comparator { + + private final Comparator btreeComparator; + + public PutNoOverwriteComparator(final Comparator cmp) { + this.btreeComparator = cmp; + } + + public int compare(final byte[] twoPartKey1, + final byte[] twoPartKey2) { + return compareMainKey(twoPartKey1, twoPartKey2, btreeComparator); + } + } + + /** + * Compares the first part of the two keys. + */ + public static int compareMainKey( + final byte[] keyBytes1, + final byte[] keyBytes2, + final Comparator btreeComparator) { + + final int origKeySize2 = + PackedInteger.readReverseInt(keyBytes2, keyBytes2.length - 1); + final int keySize2 = (origKeySize2 == PREFIX_ONLY) ? + (keyBytes2.length - 1) : + origKeySize2; + return compareMainKey(keyBytes1, keyBytes2, 0, keySize2, + btreeComparator); + } + + /** + * Compares the first part of the two keys. + */ + public static int compareMainKey( + final byte[] keyBytes1, + final byte[] keyBytes2, + final int keyOff2, + final int keySize2, + final Comparator btreeComparator) { + + final int origKeySize1 = + PackedInteger.readReverseInt(keyBytes1, keyBytes1.length - 1); + final int keySize1 = (origKeySize1 == PREFIX_ONLY) ? + (keyBytes1.length - 1) : + origKeySize1; + final int keyCmp; + if (btreeComparator == null) { + keyCmp = Key.compareUnsignedBytes + (keyBytes1, 0, keySize1, + keyBytes2, keyOff2, keySize2); + } else { + final byte[] key1 = new byte[keySize1]; + final byte[] key2 = new byte[keySize2]; + System.arraycopy(keyBytes1, 0, key1, 0, keySize1); + System.arraycopy(keyBytes2, keyOff2, key2, 0, keySize2); + keyCmp = btreeComparator.compare(key1, key2); + } + return keyCmp; + } +} diff --git a/src/com/sleepycat/je/dbi/EnvConfigObserver.java b/src/com/sleepycat/je/dbi/EnvConfigObserver.java new file mode 100644 index 0000000..fb88c8a --- /dev/null +++ b/src/com/sleepycat/je/dbi/EnvConfigObserver.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentMutableConfig; + +/** + * Implemented by observers of mutable config changes. + */ +public interface EnvConfigObserver { + + /** + * Notifies the observer that one or more mutable properties have been + * changed. + */ + void envConfigUpdate(DbConfigManager configMgr, + EnvironmentMutableConfig newConfig) + throws DatabaseException; +} diff --git a/src/com/sleepycat/je/dbi/EnvironmentFailureReason.java b/src/com/sleepycat/je/dbi/EnvironmentFailureReason.java new file mode 100644 index 0000000..72f20aa --- /dev/null +++ b/src/com/sleepycat/je/dbi/EnvironmentFailureReason.java @@ -0,0 +1,185 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * @see com.sleepycat.je.EnvironmentFailureException + */ +public enum EnvironmentFailureReason { + + ENV_LOCKED + (false /*invalidates*/, + "The je.lck file could not be locked."), + ENV_NOT_FOUND + (false /*invalidates*/, + "EnvironmentConfig.setAllowCreate is false so environment " + + "creation is not permitted, but there are no log files in the " + + "environment directory."), + FOUND_COMMITTED_TXN + (true /*invalidates*/, + "One committed transaction has been found after a corrupted " + + "log entry. The recovery process has been stopped, and the user " + + "may need to run DbTruncateLog to truncate the log. Some valid " + + "data may be lost if the log file is truncated for recovery."), + HANDSHAKE_ERROR + (true /*invalidates*/, + "Error during the handshake between two nodes. " + + "Some validity or compatibility check failed, " + + "preventing further communication between the nodes."), + HARD_RECOVERY + (true /*invalidates*/, + "Rolled back past transaction commit or abort. Must run recovery by" + + " re-opening Environment handles"), + JAVA_ERROR + (true /*invalidates*/, + "Java Error occurred, recovery may not be possible."), + LATCH_ALREADY_HELD + (false /*invalidates*/, + "Attempt to acquire a latch that is already held, " + + "may cause a hard deadlock."), + LATCH_NOT_HELD + (false /*invalidates*/, + "Attempt to release a latch that is not currently not held, " + + "may indicate a thread safety problem."), + LISTENER_EXCEPTION + (true, /* invalidates. */ + "An exception was thrown from an application supplied Listener."), + BTREE_CORRUPTION + (true /*invalidates*/, + "Btree corruption is detected, log is likely invalid."), + LOG_CHECKSUM + (true /*invalidates*/, + "Checksum invalid on read, log is likely invalid."), + LOG_FILE_NOT_FOUND + (true /*invalidates*/, + "Log file missing, log is likely invalid."), + LOG_UNEXPECTED_FILE_DELETION + (true /*invalidates*/, + "A log file was unexpectedly deleted, log is likely invalid."), + LOG_INCOMPLETE + (true /*invalidates*/, + "Transaction logging is incomplete, replica is invalid."), + LOG_INTEGRITY + (false /*invalidates*/, + "Log information is incorrect, problem is likely persistent."), + LOG_READ + (true /*invalidates*/, + "IOException on read, log is likely invalid."), + INSUFFICIENT_LOG + (true /*invalidates*/, + "Log files at this node are obsolete.", + false), // It's ok if the env doesn't exist at this point, + // since this can happen before recovery is complete + LOG_WRITE + (true /*invalidates*/, + "IOException on write, log is likely incomplete."), + MASTER_TO_REPLICA_TRANSITION + (true /*invalidates*/, + "This node was a master and must reinitialize internal state to " + + "become a replica. The application must close and reopen all " + + "Environment handles."), + MONITOR_REGISTRATION + (false /*invalidates*/, + "JMX JE monitor could not be registered."), + PROGRESS_LISTENER_HALT + (true /* invalidates */, + "A ProgressListener registered with this environment returned " + + "false from a call to ProgressListener.progress(), indicating that " + + "the environment should be closed"), + PROTOCOL_VERSION_MISMATCH + (true /*invalidates*/, + "Two communicating nodes could not agree on a common protocol " + + "version."), + ROLLBACK_PROHIBITED + (true /*invalidates*/, + "Node would like to roll back past committed transactions, but " + + "would exceed the limit specified by je.rep.txnRollbackLimit. " + + "Manual intervention required."), + SHUTDOWN_REQUESTED + (true /*invalidates*/, + "The Replica was shutdown via a remote shutdown request."), + TEST_INVALIDATE + (true /*invalidates*/, + "Test program invalidated the environment."), + THREAD_INTERRUPTED + (true /*invalidates*/, + "InterruptedException may cause incorrect internal state, " + + "unable to continue."), + UNCAUGHT_EXCEPTION + (true /*invalidates*/, + "Uncaught Exception in internal thread, unable to continue."), + UNEXPECTED_EXCEPTION + (false /*invalidates*/, + "Unexpected internal Exception, may have side effects."), + UNEXPECTED_EXCEPTION_FATAL + (true /*invalidates*/, + "Unexpected internal Exception, unable to continue."), + UNEXPECTED_STATE + (false /*invalidates*/, + "Unexpected internal state, may have side effects."), + UNEXPECTED_STATE_FATAL + (true /*invalidates*/, + "Unexpected internal state, unable to continue."), + VERSION_MISMATCH + (false /*invalidates*/, + "The existing log was written with a version of JE that is " + + "later than the running version of JE, the log cannot be read."), + WEDGED + (true /*invalidates*/, + "An internal thread could not be stopped. The current process must " + + "be shut down and restarted before re-opening the Environment. " + + "A full thread dump has been logged."); + + private final boolean invalidates; + private final String description; + + /* + * Generally, environment failure exceptions should be thrown after the + * environment has been created. One case where this is not true is when + * an exception can be thrown both during the recovery process, and during + * normal, post-recovery operations. In the former, we would like to throw + * the same exception, but it's okay if the environmentImpl is null, because + * we're still coming up. + */ + private final boolean envShouldExist; + + private EnvironmentFailureReason(boolean invalidates, String description) { + this(invalidates, description, true); + } + + private EnvironmentFailureReason(boolean invalidates, + String description, + boolean envShouldExist) { + this.invalidates = invalidates; + this.description = description; + this.envShouldExist = envShouldExist; + } + + public boolean invalidatesEnvironment() { + return invalidates; + } + + @Override + public String toString() { + return super.toString() + ": " + description; + } + + /** + * If true, we expect an environment to exist when this exception is + * thrown, and it's okay to assert for existence. + */ + public boolean envShouldExist() { + return envShouldExist; + } +} diff --git a/src/com/sleepycat/je/dbi/EnvironmentImpl.java b/src/com/sleepycat/je/dbi/EnvironmentImpl.java new file mode 100644 index 0000000..ee139f5 --- /dev/null +++ b/src/com/sleepycat/je/dbi/EnvironmentImpl.java @@ -0,0 +1,3702 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.dbi; + +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_DELETES; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_GETS; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_INSERTS; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_BIN_DELTA_UPDATES; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_CREATION_TIME; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_GROUP_DESC; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_GROUP_NAME; +import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_RELATCHES_REQUIRED; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_GROUP_DESC; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_GROUP_NAME; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_POSITION; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_UPDATE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_DELETE; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_INSERT; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_POSITION; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH_FAIL; +import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_UPDATE; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.SortedSet; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.logging.ConsoleHandler; +import java.util.logging.FileHandler; +import java.util.logging.Formatter; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.EnvironmentWedgedException; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.LockStats; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.PreloadStatus; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionStats; +import com.sleepycat.je.TransactionStats.Active; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.VersionMismatchException; +import com.sleepycat.je.cleaner.Cleaner; +import com.sleepycat.je.cleaner.ExpirationProfile; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.cleaner.UtilizationProfile; +import com.sleepycat.je.cleaner.UtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.incomp.INCompressor; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogFlusher; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.recovery.Checkpointer; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.recovery.RecoveryManager; +import com.sleepycat.je.recovery.VLSNRecoveryProxy; +import com.sleepycat.je.statcap.EnvStatsLogger; +import com.sleepycat.je.statcap.StatCapture; +import com.sleepycat.je.statcap.StatCaptureDefinitions; +import com.sleepycat.je.statcap.StatManager; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.BINReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.dupConvert.DupConvert; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.LockUpgrade; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.ThreadLocker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnManager; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.util.verify.BtreeVerifier; +import com.sleepycat.je.util.verify.DataVerifier; +import com.sleepycat.je.util.verify.VerifierUtils; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.TracerFormatter; +import com.sleepycat.je.utilint.VLSN; + +/** + * Underlying Environment implementation. There is a single instance for any + * database environment opened by the application. + */ +public class EnvironmentImpl implements EnvConfigObserver { + + /* + * Set true and run unit tests for NO_LOCKING_MODE test. + * EnvironmentConfigTest.testInconsistentParams will fail. [#13788] + */ + private static final boolean TEST_NO_LOCKING_MODE = false; + + /* Attributes of the entire environment */ + private volatile DbEnvState envState; + private volatile boolean closing;// true if close has begun + private final File envHome; + private final AtomicInteger openCount = new AtomicInteger(0); + // count of open environment handles + private final AtomicInteger backupCount = new AtomicInteger(0); + // count of in-progress dbBackup + private boolean isTransactional; // true if env opened with DB_INIT_TRANS + private boolean isNoLocking; // true if env has no locking + private boolean isReadOnly; // true if env opened with the read only flag. + private boolean isMemOnly; // true if je.log.memOnly=true + private boolean sharedCache; // true if je.sharedCache=true + /* true if offset tracking should be used for deferred write dbs. */ + private boolean dbEviction; + private boolean useOffHeapChecksums; + private boolean expirationEnabled; + private boolean exposeUserData; + + private boolean allowBlindOps = false; + private boolean allowBlindPuts = false; + + private int maxEmbeddedLN = -1; + + private CacheMode cacheMode; + + /* Whether or not initialization succeeded. */ + private boolean initializedSuccessfully = false; + + /* + * Represents whether this environment needs to be converted from + * standalone to replicated. + */ + protected boolean needRepConvert = false; + + private MemoryBudget memoryBudget; + private static int adler32ChunkSize; + + /* Save so we don't have to look it up in the config manager frequently. */ + private long lockTimeout; + private long txnTimeout; + + /* Deadlock detection. */ + private boolean deadlockDetection; + private long deadlockDetectionDelay; + + /* Directory of databases */ + protected DbTree dbMapTree; + private long mapTreeRootLsn = DbLsn.NULL_LSN; + private final Latch mapTreeRootLatch; + + private final INList inMemoryINs; + + /* Services */ + protected DbConfigManager configManager; + private final List configObservers; + protected final Logger envLogger; + private final LogManager logManager; + private final LogFlusher logFlusher; + private final DataVerifier dataVerifier; + private final FileManager fileManager; + private final TxnManager txnManager; + protected final StatManager statManager; + + /* Daemons */ + private final Evictor evictor; + private final OffHeapCache offHeapCache; + private final INCompressor inCompressor; + private final Checkpointer checkpointer; + private final Cleaner cleaner; + private final StatCapture statCapture; + + /* Stats, debug information */ + protected final StartupTracker startupTracker; + + /* If true, call Thread.yield() at strategic points (stress test aid) */ + private static boolean forcedYield = false; + + /* + * Used by Database, SecondaryDatabase and Cursor to protect changes to + * secondary associations during operations that use the associations. A + * single latch for all databases is used to prevent deadlocks and to + * support associations involving multiple primary databases. + * + * A ReentrantReadWriteLock is used directly rather than via a SharedLatch. + * This is because reentrancy is required but not supported by SharedLatch. + */ + private final ReentrantReadWriteLock secondaryAssociationLock; + + /** + * The exception listener for this environment, if any has been specified. + */ + private ExceptionListener exceptionListener = null; + + /** + * The recovery progress listener for this environment, if any has been + * specified. + */ + private ProgressListener recoveryProgressListener = null; + + /** + * ClassLoader used to load user-supplied classes by name. + */ + private ClassLoader classLoader = null; + + /** + * Used for duplicate database conversion. + */ + private PreloadConfig dupConvertPreloadConfig = null; + + /* + * Configuration and tracking of background IO limits. Managed by the + * updateBackgroundReads, updateBackgroundWrites and sleepAfterBackgroundIO + * methods. The limits and the backlog are volatile because we check them + * outside the synchronized block. Other fields are updated and checked + * while synchronized on the tracking mutex object. The sleep mutex is + * used to block multiple background threads while sleeping. + */ + private volatile int backgroundSleepBacklog; + private volatile int backgroundReadLimit; + private volatile int backgroundWriteLimit; + private long backgroundSleepInterval; + private int backgroundReadCount; + private long backgroundWriteBytes; + private TestHook backgroundSleepHook; + private final Object backgroundTrackingMutex = new Object(); + private final Object backgroundSleepMutex = new Object(); + + /* + * ThreadLocal.get() is not cheap so we want to minimize calls to it. We + * only use ThreadLocals for the TreeStatsAccumulator which are only called + * in limited circumstances. Use this reference count to indicate that a + * thread has set a TreeStatsAccumulator. When it's done, it decrements + * the counter. It's static so that we don't have to pass around the + * EnvironmentImpl. + */ + private static int threadLocalReferenceCount = 0; + + /* Used to prevent multiple full thread dumps. */ + private boolean didFullThreadDump = false; + + /** + * DbPrintLog doesn't need btree and dup comparators to function properly + * don't require any instantiations. This flag, if true, indicates that + * we've been called from DbPrintLog or a similar utility. + */ + private boolean noComparators = false; + + /* + * A preallocated EnvironmentFailureException that is used in OOME and + * other java.lang.Error situations so that allocation does not need to be + * done in the OOME context. + */ + private final EnvironmentFailureException preallocatedEFE = + EnvironmentFailureException.makeJavaErrorWrapper(); + + /* + * If the env was invalidated (even if envState is now CLOSED) this + * contains the first EFE that invalidated it. This == preallocatedEFE when + * an Error caused the invalidation. Contains null if the env was not + * invalidated. + */ + private final AtomicReference + invalidatingEFE = new AtomicReference<>(); + + /* + * The first EWE that occurred, or null. If this EWE was the first EFE to + * invalidate the env, then wedgedEFE.get() == invalidatingEFE.get(). + */ + private final AtomicReference wedgedEFE = + new AtomicReference<>(); + + public static final boolean USE_JAVA5_ADLER32; + + private static final String DISABLE_JAVA_ADLER32_NAME = + "je.disable.java.adler32"; + + static { + USE_JAVA5_ADLER32 = + System.getProperty(DISABLE_JAVA_ADLER32_NAME) == null; + } + + /* + * JE MBeans. + * + * Note that MBeans are loaded dynamically in order to support platforms + * that do not include javax.management. TODO: Since Dalvik is no longer + * supported, we may want to remove this abstraction. + */ + + /* The property name of setting these two MBeans. */ + private static final String REGISTER_MONITOR = "JEMonitor"; + + /* The two MBeans registered or not. */ + private volatile boolean isMBeanRegistered = false; + + /* + * Log handlers used in java.util.logging. Handlers are per-environment, + * and must not be static, because the output is tagged with an identifier + * that associates the information with that environment. Handlers should + * be closed to release resources when the environment is closed. + * + * Note that handlers are not statically attached to loggers. See + * LoggerUtils.java for information on how redirect loggers are used. + */ + private static final String INFO_FILES = "je.info"; + private static final int FILEHANDLER_LIMIT = 10000000; + private static final int FILEHANDLER_COUNT = 10; + private final ConsoleHandler consoleHandler; + private final FileHandler fileHandler; + + /* + * A Handler that was specified by the application through + * EnvironmentConfig + */ + private final Handler configuredHandler; + /* cache this value as a performance optimization. */ + private boolean dbLoggingDisabled; + + /* Formatter for java.util.logging. */ + protected final Formatter formatter; + + /* + * The internal environment handle that is passed to triggers invoked as a + * result of AutoTransactions where no environment handle is available, and + * in all cases of triggers involving replicated environments. + */ + protected Environment envInternal; + + /* + * Used to coordinate getting stats and shutting down the threads + * that provide the stats. The shutdown of the statistics capture + * thread will get statistics right before shutting down. The + * acquisition of stats must be done without synchronizing on the + * EnvironmentImpl to avoid a deadlock between the shutdown thread + * (has the EnvironmentImpl lock) and the stat capture thread calling + * getStats(). + */ + private final Object statSynchronizer = new Object(); + + /* Stat base key used for loadStats api */ + protected Integer statKey; + + private long creationTime; + + /** + * To support platforms that do not have any javax.management classes, we + * load JEMonitor dynamically to ensure that there are no explicit + * references to com.sleepycat.je.jmx.*. + */ + public static interface MBeanRegistrar { + public void doRegister(Environment env) + throws Exception; + + public void doUnregister() + throws Exception; + } + + private final ArrayList mBeanRegList = + new ArrayList(); + + /* NodeId sequence counters */ + private final NodeSequence nodeSequence; + + /* Stats */ + private final StatGroup envStats; + private LongStat relatchesRequired; + private final StatGroup thrputStats; + private final AtomicLongStat priSearchOps; + private final AtomicLongStat priSearchFailOps; + private final AtomicLongStat secSearchOps; + private final AtomicLongStat secSearchFailOps; + private final AtomicLongStat priPositionOps; + private final AtomicLongStat secPositionOps; + private final AtomicLongStat priInsertOps; + private final AtomicLongStat priInsertFailOps; + private final AtomicLongStat secInsertOps; + private final AtomicLongStat priUpdateOps; + private final AtomicLongStat secUpdateOps; + private final AtomicLongStat priDeleteOps; + private final AtomicLongStat priDeleteFailOps; + private final AtomicLongStat secDeleteOps; + private final AtomicLongStat binDeltaGets; + private final AtomicLongStat binDeltaInserts; + private final AtomicLongStat binDeltaUpdates; + private final AtomicLongStat binDeltaDeletes; + + private EnvStatsLogger envStatLogger = null; + + /* Refer to comment near declaration of these static LockUpgrades. */ + static { + LockUpgrade.ILLEGAL.setUpgrade(null); + LockUpgrade.EXISTING.setUpgrade(null); + LockUpgrade.WRITE_PROMOTE.setUpgrade(LockType.WRITE); + LockUpgrade.RANGE_READ_IMMED.setUpgrade(LockType.RANGE_READ); + LockUpgrade.RANGE_WRITE_IMMED.setUpgrade(LockType.RANGE_WRITE); + LockUpgrade.RANGE_WRITE_PROMOTE.setUpgrade(LockType.RANGE_WRITE); + } + + /* May be null, see getOptionalNodeName. */ + private final String optionalNodeName; + + /* EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH. */ + private int compactMaxKeyLength; + + /* EnvironmentParams.ENV_LATCH_TIMEOUT. */ + private int latchTimeoutMs; + + /** {@link EnvironmentParams#ENV_TTL_CLOCK_TOLERANCE}. */ + private int ttlClockTolerance; + + /** {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. */ + private int ttlMaxTxnTime; + + /** {@link EnvironmentParams#ENV_TTL_LN_PURGE_DELAY}. */ + private int ttlLnPurgeDelay; + + public EnvironmentImpl(File envHome, + EnvironmentConfig envConfig, + EnvironmentImpl sharedCacheEnv) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + this(envHome, envConfig, sharedCacheEnv, null); + } + + /** + * Create a database environment to represent the data in envHome. + * dbHome. Properties from the je.properties file in that directory are + * used to initialize the system wide property bag. Properties passed to + * this method are used to influence the open itself. + * + * @param envHome absolute path of the database environment home directory + * @param envConfig is the configuration to be used. It's already had + * the je.properties file applied, and has been validated. + * @param sharedCacheEnv if non-null, is another environment that is + * sharing the cache with this environment; if null, this environment is + * not sharing the cache or is the first environment to share the cache. + * + * @throws DatabaseException on all other failures + * + * @throws IllegalArgumentException via Environment ctor. + */ + protected EnvironmentImpl(File envHome, + EnvironmentConfig envConfig, + EnvironmentImpl sharedCacheEnv, + RepConfigProxy repConfigProxy) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + boolean success = false; + startupTracker = new StartupTracker(this); + startupTracker.start(Phase.TOTAL_ENV_OPEN); + + try { + this.envHome = envHome; + envState = DbEnvState.INIT; + mapTreeRootLatch = LatchFactory.createExclusiveLatch( + this, "MapTreeRoot", false /*collectStats*/); + + /* Do the stats definition. */ + envStats = new StatGroup(ENV_GROUP_NAME, ENV_GROUP_DESC); + + relatchesRequired = + new LongStat(envStats, ENV_RELATCHES_REQUIRED); + + creationTime = System.currentTimeMillis(); + + binDeltaGets = + new AtomicLongStat(envStats, ENV_BIN_DELTA_GETS); + binDeltaInserts = + new AtomicLongStat(envStats, ENV_BIN_DELTA_INSERTS); + binDeltaUpdates = + new AtomicLongStat(envStats, ENV_BIN_DELTA_UPDATES); + binDeltaDeletes = + new AtomicLongStat(envStats, ENV_BIN_DELTA_DELETES); + + thrputStats = new StatGroup( + THROUGHPUT_GROUP_NAME, THROUGHPUT_GROUP_DESC); + + priSearchOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_SEARCH); + priSearchFailOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_SEARCH_FAIL); + secSearchOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_SEARCH); + secSearchFailOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_SEARCH_FAIL); + priPositionOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_POSITION); + secPositionOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_POSITION); + priInsertOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_INSERT); + priInsertFailOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_INSERT_FAIL); + secInsertOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_INSERT); + priUpdateOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_UPDATE); + secUpdateOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_UPDATE); + priDeleteOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_DELETE); + priDeleteFailOps = + new AtomicLongStat(thrputStats, THROUGHPUT_PRI_DELETE_FAIL); + secDeleteOps = + new AtomicLongStat(thrputStats, THROUGHPUT_SEC_DELETE); + + /* Set up configuration parameters */ + configManager = initConfigManager(envConfig, repConfigProxy); + configObservers = new ArrayList(); + addConfigObserver(this); + initConfigParams(envConfig, repConfigProxy); + + /* + * Create essential services that must exist before recovery. + */ + + /* + * Set up java.util.logging handlers and their environment specific + * formatters. These are used by the redirect handlers, rather + * than specific loggers. + */ + formatter = initFormatter(); + consoleHandler = + new com.sleepycat.je.util.ConsoleHandler(formatter, this); + fileHandler = initFileHandler(); + configuredHandler = envConfig.getLoggingHandler(); + envLogger = LoggerUtils.getLogger(getClass()); + + /* + * Decide on memory budgets based on environment config params and + * memory available to this process. + */ + memoryBudget = + new MemoryBudget(this, sharedCacheEnv, configManager); + + fileManager = new FileManager(this, envHome, isReadOnly); + + if (!envConfig.getAllowCreate() && !fileManager.filesExist() && + !configManager.getBoolean(EnvironmentParams.ENV_SETUP_LOGGER)) { + + throw new EnvironmentNotFoundException + (this, "Home directory: " + envHome); + } + + optionalNodeName = envConfig.getNodeName(); + + logManager = new LogManager(this, isReadOnly); + + inMemoryINs = new INList(this); + txnManager = new TxnManager(this); + statManager = createStatManager(); + + /* + * Daemons are always made here, but only started after recovery. + * We want them to exist so we can call them programatically even + * if the daemon thread is not started. + */ + if (sharedCacheEnv != null) { + /* The evictor and off-heap cache may be shared by multiple envs. */ + assert sharedCache; + evictor = sharedCacheEnv.evictor; + offHeapCache = sharedCacheEnv.offHeapCache; + } else { + evictor = new Evictor(this); + offHeapCache = new OffHeapCache(this); + } + + checkpointer = new Checkpointer( + this, + Checkpointer.getWakeupPeriod(configManager), + Environment .CHECKPOINTER_NAME); + + inCompressor = new INCompressor( + this, + configManager.getDuration( + EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL), + Environment.INCOMP_NAME); + + cleaner = new Cleaner(this, Environment.CLEANER_NAME); + + statCapture = new StatCapture( + this, Environment.STATCAPTURE_NAME, + configManager.getDuration( + EnvironmentParams.STATS_COLLECT_INTERVAL), + envConfig.getCustomStats(), getStatCaptureProjections(), + statManager); + + logFlusher = new LogFlusher(this); + + dataVerifier = new DataVerifier(this); + + /* + * The node sequences are not initialized until after the DbTree is + * created below. + */ + nodeSequence = new NodeSequence(this); + + /* + * Instantiate a new, blank dbtree. If the environment already + * exists, recovery will recreate the dbMapTree from the log and + * overwrite this instance. + */ + dbMapTree = new DbTree(this, isReplicated(), getPreserveVLSN()); + + secondaryAssociationLock = + new ReentrantReadWriteLock(false /*fair*/); + + /* + * Allocate node sequences before recovery. We expressly wait to + * allocate it after the DbTree is created, because these sequences + * should not be used by the DbTree before recovery has + * run. Waiting until now to allocate them will make errors more + * evident, since there will be a NullPointerException. + */ + nodeSequence.initRealNodeId(); + + statKey = statManager.registerStatContext(); + if (!isReadOnly() && + !isMemOnly() && + configManager.getBoolean(EnvironmentParams.STATS_COLLECT)) { + envStatLogger = new EnvStatsLogger(this); + addConfigObserver(envStatLogger); + envStatLogger.log(); + } + success = true; + } finally { + if (!success) { + /* Release any environment locks if there was a problem. */ + clearFileManager(); + closeHandlers(); + } + } + } + + /** + * Create a config manager that holds the configuration properties that + * have been passed in. These properties are already validated, and have + * had the proper order of precedence applied; that is, the je.properties + * file has been applied. The configuration properties need to be available + * before the rest of environment creation proceeds. + * + * This method is overridden by replication environments. + * + * @param envConfig is the environment configuration to use + * @param repParams are the replication configurations to use. In this + * case, the Properties bag has been extracted from the configuration + * instance, to avoid crossing the compilation firewall. + */ + protected DbConfigManager initConfigManager(EnvironmentConfig envConfig, + RepConfigProxy repParams) { + return new DbConfigManager(envConfig); + } + + /** + * Init configuration params during environment creation. + * + * This method is overridden by RepImpl to get init params also. This + * allows certain rep params to be accessed from the EnvironmentImpl + * constructor using methods such as getPreserveVLSN. The overridden method + * calls this method first. + * @param repConfigProxy unused + */ + protected void initConfigParams(EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy) { + + forcedYield = + configManager.getBoolean(EnvironmentParams.ENV_FORCED_YIELD); + isTransactional = + configManager.getBoolean(EnvironmentParams.ENV_INIT_TXN); + isNoLocking = !(configManager.getBoolean + (EnvironmentParams.ENV_INIT_LOCKING)); + if (isTransactional && isNoLocking) { + if (TEST_NO_LOCKING_MODE) { + isNoLocking = !isTransactional; + } else { + throw new IllegalArgumentException + ("Can't set 'je.env.isNoLocking' and " + + "'je.env.isTransactional';"); + } + } + + isReadOnly = configManager.getBoolean( + EnvironmentParams.ENV_RDONLY); + + isMemOnly = configManager.getBoolean( + EnvironmentParams.LOG_MEMORY_ONLY); + + dbEviction = configManager.getBoolean( + EnvironmentParams.ENV_DB_EVICTION); + + useOffHeapChecksums = configManager.getBoolean( + EnvironmentParams.OFFHEAP_CHECKSUM); + + adler32ChunkSize = configManager.getInt( + EnvironmentParams.ADLER32_CHUNK_SIZE); + + sharedCache = configManager.getBoolean( + EnvironmentParams.ENV_SHARED_CACHE); + + dbLoggingDisabled = !configManager.getBoolean( + EnvironmentParams.JE_LOGGING_DBLOG); + + compactMaxKeyLength = configManager.getInt( + EnvironmentParams.TREE_COMPACT_MAX_KEY_LENGTH); + + latchTimeoutMs = configManager.getDuration( + EnvironmentParams.ENV_LATCH_TIMEOUT); + + ttlClockTolerance = configManager.getDuration( + EnvironmentParams.ENV_TTL_CLOCK_TOLERANCE); + + ttlMaxTxnTime = configManager.getDuration( + EnvironmentParams.ENV_TTL_MAX_TXN_TIME); + + ttlLnPurgeDelay = configManager.getDuration( + EnvironmentParams.ENV_TTL_LN_PURGE_DELAY); + + allowBlindOps = configManager.getBoolean( + EnvironmentParams.BIN_DELTA_BLIND_OPS); + + allowBlindPuts = configManager.getBoolean( + EnvironmentParams.BIN_DELTA_BLIND_PUTS); + + maxEmbeddedLN = configManager.getInt( + EnvironmentParams.TREE_MAX_EMBEDDED_LN); + + deadlockDetection = configManager.getBoolean( + EnvironmentParams.LOCK_DEADLOCK_DETECT); + + deadlockDetectionDelay = configManager.getDuration( + EnvironmentParams.LOCK_DEADLOCK_DETECT_DELAY); + + recoveryProgressListener = envConfig.getRecoveryProgressListener(); + classLoader = envConfig.getClassLoader(); + dupConvertPreloadConfig = envConfig.getDupConvertPreloadConfig(); + } + + /** + * Initialize the environment, including running recovery, if it is not + * already initialized. + * + * Note that this method should be called even when opening additional + * handles for an already initialized environment. If initialization is + * still in progress then this method will block until it is finished. + * + * @return true if we are opening the first handle for this environment and + * recovery is run (when ENV_RECOVERY is configured to true); false if we + * are opening an additional handle and recovery is not run. + */ + public synchronized boolean finishInit(EnvironmentConfig envConfig) + throws DatabaseException { + + if (initializedSuccessfully) { + return false; + } + + boolean success = false; + try { + + /* + * Do not do recovery if this environment is for a utility that + * reads the log directly. + */ + final boolean doRecovery = + configManager.getBoolean(EnvironmentParams.ENV_RECOVERY); + if (doRecovery) { + + /* + * Run recovery. Note that debug logging to the database log + * is disabled until recovery is finished. + */ + boolean recoverySuccess = false; + try { + RecoveryManager recoveryManager = + new RecoveryManager(this); + recoveryManager.recover(isReadOnly); + + postRecoveryConversion(); + recoverySuccess = true; + } finally { + try { + + /* + * Flush to get all exception tracing out to the log. + */ + logManager.flushSync(); + fileManager.clear(); + } catch (IOException e) { + /* Ignore second order exceptions. */ + if (recoverySuccess) { + throw new EnvironmentFailureException + (this, EnvironmentFailureReason.LOG_INTEGRITY, + e); + } + } catch (Exception e) { + if (recoverySuccess) { + throw EnvironmentFailureException. + unexpectedException(this, e); + } + } + } + } else { + isReadOnly = true; + + /* + * Normally when recovery is skipped, we don't need to + * instantiate comparators. But even without recovery, some + * utilities such as DbScavenger need comparators. + */ + if (!configManager.getBoolean + (EnvironmentParams.ENV_COMPARATORS_REQUIRED)) { + noComparators = true; + } + } + + /* + * Cache a few critical values. We keep our timeout in millis + * because Object.wait takes millis. + */ + lockTimeout = + configManager.getDuration(EnvironmentParams.LOCK_TIMEOUT); + txnTimeout = + configManager.getDuration(EnvironmentParams.TXN_TIMEOUT); + + /* + * Initialize the environment memory usage number. Must be called + * after recovery, because recovery determines the starting size of + * the in-memory tree. + */ + memoryBudget.initCacheMemoryUsage + (dbMapTree.getTreeAdminMemory()); + + /* + * Call config observer and start daemons last after everything + * else is initialized. Note that all config parameters, both + * mutable and non-mutable, needed by the memoryBudget have already + * been initialized when the configManager was instantiated. + */ + envConfigUpdate(configManager, envConfig); + + /* + * Mark initialized before creating the internal env, since + * otherwise a we'll recurse and attempt to create another + * EnvironmentImpl. + */ + initializedSuccessfully = true; + + if (doRecovery) { + + /* + * Perform dup database conversion after recovery and other + * initialization is complete, but before running daemons. + */ + convertDupDatabases(); + + /* Create internal env before SyncCleanerBarrier. */ + envInternal = createInternalEnvironment(); + } + + /* + * Mark as open before starting daemons. Note that this will allow + * background eviction threads to run, so it should not be done + * until we are ready for multi-threaded access. + */ + open(); + + runOrPauseDaemons(configManager); + success = true; + return true; + } finally { + if (!success) { + /* Release any environment locks if there was a problem. */ + clearFileManager(); + closeHandlers(); + } + + /* + * DbEnvPool.addEnvironment is called by RecoveryManager.buildTree + * during recovery above, to enable eviction during recovery. If + * we fail to create the environment, we must remove it. + */ + if (!success && sharedCache) { + evictor.removeSharedCacheEnv(this); + } + + startupTracker.stop(Phase.TOTAL_ENV_OPEN); + startupTracker.setProgress(RecoveryProgress.RECOVERY_FINISHED); + } + } + + /** + * Is overridden in RepImpl to create a ReplicatedEnvironment. + */ + protected Environment createInternalEnvironment() { + return new InternalEnvironment(getEnvironmentHome(), cloneConfig(), + this); + } + + /* + * JE MBean registration is performed during Environment creation so that + * the MBean has access to the Environment API which is not available from + * EnvironmentImpl. This precludes registering MBeans in + * EnvironmentImpl.finishInit. + */ + public synchronized void registerMBean(Environment env) + throws DatabaseException { + + if (!isMBeanRegistered) { + if (System.getProperty(REGISTER_MONITOR) != null) { + doRegisterMBean(getMonitorClassName(), env); + doRegisterMBean(getDiagnosticsClassName(), env); + } + isMBeanRegistered = true; + } + } + + protected String getMonitorClassName() { + return "com.sleepycat.je.jmx.JEMonitor"; + } + + protected String getDiagnosticsClassName() { + return "com.sleepycat.je.jmx.JEDiagnostics"; + } + + /* + * Returns the default consistency policy for this EnvironmentImpl. + * + * When a Txn is created directly for internal use, the default consistency + * is needed. For example, SyncDB uses this method. + * + * This method returns null for a standalone Environment, and returns the + * default consistency policy for a ReplicatedEnvironment. + */ + public ReplicaConsistencyPolicy getDefaultConsistencyPolicy() { + return null; + } + + /* + * Returns the end of the log. + * + * Returned value is a Lsn if it's a standalone Environment, otherwise it's + * a VLSN. + */ + public long getEndOfLog() { + return fileManager.getLastUsedLsn(); + } + + /* Get replication statistics. */ + public Collection getRepStatGroups(StatsConfig config, + Integer statkey) { + throw new UnsupportedOperationException + ("Standalone Environment doesn't support replication statistics."); + } + + public SortedSet getStatCaptureProjections() { + return new StatCaptureDefinitions().getStatisticProjections(); + } + + public StatManager createStatManager() { + return new StatManager(this); + } + + private void doRegisterMBean(String className, Environment env) + throws DatabaseException { + + try { + Class newClass = Class.forName(className); + MBeanRegistrar mBeanReg = (MBeanRegistrar) newClass.newInstance(); + mBeanReg.doRegister(env); + mBeanRegList.add(mBeanReg); + } catch (Exception e) { + throw new EnvironmentFailureException + (DbInternal.getNonNullEnvImpl(env), + EnvironmentFailureReason.MONITOR_REGISTRATION, e); + } + } + + private synchronized void unregisterMBean() + throws Exception { + + for (MBeanRegistrar mBeanReg : mBeanRegList) { + mBeanReg.doUnregister(); + } + } + + /* + * Release and close the FileManager when there are problems during the + * initialization of this EnvironmentImpl. An exception is already in + * flight when this method is called. + */ + private void clearFileManager() + throws DatabaseException { + + if (fileManager == null) { + return; + } + + try { + /* + * Clear again, in case an exception in logManager.flush() + * caused us to skip the earlier call to clear(). + */ + fileManager.clear(); + } catch (Throwable e) { + /* + * Klockwork - ok + * Eat it, we want to throw the original exception. + */ + } + + try { + fileManager.close(); + } catch (Throwable e) { + /* + * Klockwork - ok + * Eat it, we want to throw the original exception. + */ + } + } + + /** + * Respond to config updates. + */ + @Override + public void envConfigUpdate(DbConfigManager mgr, + EnvironmentMutableConfig newConfig) { + backgroundReadLimit = mgr.getInt + (EnvironmentParams.ENV_BACKGROUND_READ_LIMIT); + backgroundWriteLimit = mgr.getInt + (EnvironmentParams.ENV_BACKGROUND_WRITE_LIMIT); + backgroundSleepInterval = mgr.getDuration + (EnvironmentParams.ENV_BACKGROUND_SLEEP_INTERVAL); + + /* Reset logging levels if they're set in EnvironmentMutableConfig. */ + if (newConfig.isConfigParamSet + (EnvironmentConfig.CONSOLE_LOGGING_LEVEL)) { + Level newConsoleHandlerLevel = + Level.parse(mgr.get(EnvironmentParams.JE_CONSOLE_LEVEL)); + consoleHandler.setLevel(newConsoleHandlerLevel); + } + + if (newConfig.isConfigParamSet + (EnvironmentConfig.FILE_LOGGING_LEVEL)) { + Level newFileHandlerLevel = + Level.parse(mgr.get(EnvironmentParams.JE_FILE_LEVEL)); + if (fileHandler != null) { + fileHandler.setLevel(newFileHandlerLevel); + } + } + + exceptionListener = newConfig.getExceptionListener(); + + cacheMode = newConfig.getCacheMode(); + + expirationEnabled = mgr.getBoolean( + EnvironmentParams.ENV_EXPIRATION_ENABLED); + + exposeUserData = mgr.getBoolean( + EnvironmentParams.ENV_EXPOSE_USER_DATA); + + if (mgr.getBoolean(EnvironmentParams.STATS_COLLECT)) { + if (envStatLogger == null && + !isReadOnly() && + !isMemOnly() ) { + envStatLogger = new EnvStatsLogger(this); + addConfigObserver(envStatLogger); + + /* + * Need to log env stats because stats were off and are now on. + * Since stats were off there was no event observer registered. + */ + envStatLogger.log(); + } + } else { + if (envStatLogger != null) { + removeConfigObserver(envStatLogger); + } + envStatLogger = null; + } + + /* + * Start daemons last, after all other parameters are set. Do not + * start the daemons during the EnvironmentImpl constructor's call + * (before open() has been called), to allow finishInit to run. + */ + if (isValid()) { + runOrPauseDaemons(mgr); + } + } + + /** + * Run or pause daemons, depending on config properties. + */ + private void runOrPauseDaemons(DbConfigManager mgr) { + + if (isReadOnly) { + return; + } + + inCompressor.runOrPause( + mgr.getBoolean(EnvironmentParams.ENV_RUN_INCOMPRESSOR)); + + cleaner.runOrPause( + mgr.getBoolean(EnvironmentParams.ENV_RUN_CLEANER) && + !isMemOnly); + + checkpointer.runOrPause( + mgr.getBoolean(EnvironmentParams.ENV_RUN_CHECKPOINTER)); + + statCapture.runOrPause( + mgr.getBoolean(EnvironmentParams.STATS_COLLECT)); + + logFlusher.configFlushTask(mgr); + + dataVerifier.configVerifyTask(mgr); + } + + /** + * Return the incompressor. In general, don't use this directly because + * it's easy to forget that the incompressor can be null at times (i.e + * during the shutdown procedure. Instead, wrap the functionality within + * this class, like lazyCompress. + */ + public INCompressor getINCompressor() { + return inCompressor; + } + + /** + * Returns the FileProtector. + */ + public FileProtector getFileProtector() { + return cleaner.getFileProtector(); + } + + /** + * Returns the UtilizationTracker. + */ + public UtilizationTracker getUtilizationTracker() { + return cleaner.getUtilizationTracker(); + } + + /** + * Returns the UtilizationProfile. + */ + public UtilizationProfile getUtilizationProfile() { + return cleaner.getUtilizationProfile(); + } + + /** + * Returns the ExpirationProfile. + */ + public ExpirationProfile getExpirationProfile() { + return cleaner.getExpirationProfile(); + } + + /** + * Returns the default cache mode for this environment. If the environment + * has a null cache mode, CacheMode.DEFAULT is returned. Null is never + * returned. + */ + public CacheMode getDefaultCacheMode() { + if (cacheMode != null) { + return cacheMode; + } + return CacheMode.DEFAULT; + } + + /** + * Returns EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH. + */ + public int getCompactMaxKeyLength() { + return compactMaxKeyLength; + } + + /** + * Returns EnvironmentConfig.ENV_LATCH_TIMEOUT. + */ + public int getLatchTimeoutMs() { + return latchTimeoutMs; + } + + /** + * Returns {@link EnvironmentParams#ENV_TTL_CLOCK_TOLERANCE}. + */ + public int getTtlClockTolerance() { + return ttlClockTolerance; + } + + /** + * Returns {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. + */ + public int getTtlMaxTxnTime() { + return ttlMaxTxnTime; + } + + /** + * Returns {@link EnvironmentParams#ENV_TTL_LN_PURGE_DELAY}. + */ + public int getTtlLnPurgeDelay() { + return ttlLnPurgeDelay; + } + + /** + * If a background read limit has been configured and that limit is + * exceeded when the cumulative total is incremented by the given number of + * reads, increment the sleep backlog to cause a sleep to occur. Called by + * background activities such as the cleaner after performing a file read + * operation. + * + * @see #sleepAfterBackgroundIO + */ + public void updateBackgroundReads(int nReads) { + + /* + * Make a copy of the volatile limit field since it could change + * between the time we check it and the time we use it below. + */ + int limit = backgroundReadLimit; + if (limit > 0) { + synchronized (backgroundTrackingMutex) { + backgroundReadCount += nReads; + if (backgroundReadCount >= limit) { + backgroundSleepBacklog += 1; + /* Remainder is rolled forward. */ + backgroundReadCount -= limit; + assert backgroundReadCount >= 0; + } + } + } + } + + /** + * If a background write limit has been configured and that limit is + * exceeded when the given amount written is added to the cumulative total, + * increment the sleep backlog to cause a sleep to occur. Called by + * background activities such as the checkpointer and evictor after + * performing a file write operation. + * + *

        The number of writes is estimated by dividing the bytes written by + * the log buffer size. Since the log write buffer is shared by all + * writers, this is the best approximation possible.

        + * + * @see #sleepAfterBackgroundIO + */ + public void updateBackgroundWrites(int writeSize, int logBufferSize) { + + /* + * Make a copy of the volatile limit field since it could change + * between the time we check it and the time we use it below. + */ + int limit = backgroundWriteLimit; + if (limit > 0) { + synchronized (backgroundTrackingMutex) { + backgroundWriteBytes += writeSize; + int writeCount = (int) (backgroundWriteBytes / logBufferSize); + if (writeCount >= limit) { + backgroundSleepBacklog += 1; + /* Remainder is rolled forward. */ + backgroundWriteBytes -= (limit * logBufferSize); + assert backgroundWriteBytes >= 0; + } + } + } + } + + /** + * If the sleep backlog is non-zero (set by updateBackgroundReads or + * updateBackgroundWrites), sleep for the configured interval and decrement + * the backlog. + * + *

        If two threads call this method and the first call causes a sleep, + * the call by the second thread will block until the first thread's sleep + * interval is over. When the call by the second thread is unblocked, if + * another sleep is needed then the second thread will sleep again. In + * other words, when lots of sleeps are needed, background threads may + * backup. This is intended to give foreground threads a chance to "catch + * up" when background threads are doing a lot of IO.

        + */ + public void sleepAfterBackgroundIO() { + if (backgroundSleepBacklog > 0) { + synchronized (backgroundSleepMutex) { + /* Sleep. Rethrow interrupts if they occur. */ + try { + /* FindBugs: OK that we're sleeping with a mutex held. */ + Thread.sleep(backgroundSleepInterval); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + /* Assert has intentional side effect for unit testing. */ + assert TestHookExecute.doHookIfSet(backgroundSleepHook); + } + synchronized (backgroundTrackingMutex) { + /* Decrement backlog last to make other threads wait. */ + if (backgroundSleepBacklog > 0) { + backgroundSleepBacklog -= 1; + } + } + } + } + + /* For unit testing only. */ + public void setBackgroundSleepHook(TestHook hook) { + backgroundSleepHook = hook; + } + + /** + * Logs the map tree root and saves the LSN. + */ + public void logMapTreeRoot() + throws DatabaseException { + + logMapTreeRoot(DbLsn.NULL_LSN); + } + + /** + * Logs the map tree root, but only if its current LSN is before the + * ifBeforeLsn parameter or ifBeforeLsn is NULL_LSN. + */ + public void logMapTreeRoot(long ifBeforeLsn) + throws DatabaseException { + + mapTreeRootLatch.acquireExclusive(); + try { + if (ifBeforeLsn == DbLsn.NULL_LSN || + DbLsn.compareTo(mapTreeRootLsn, ifBeforeLsn) < 0) { + + mapTreeRootLsn = logManager.log( + SingleItemEntry.create(LogEntryType.LOG_DBTREE, dbMapTree), + ReplicationContext.NO_REPLICATE); + } + } finally { + mapTreeRootLatch.release(); + } + } + + /** + * Force a rewrite of the map tree root if required. + */ + public void rewriteMapTreeRoot(long cleanerTargetLsn) + throws DatabaseException { + + mapTreeRootLatch.acquireExclusive(); + try { + if (DbLsn.compareTo(cleanerTargetLsn, mapTreeRootLsn) == 0) { + + /* + * The root entry targetted for cleaning is in use. Write a + * new copy. + */ + mapTreeRootLsn = logManager.log( + SingleItemEntry.create(LogEntryType.LOG_DBTREE, dbMapTree), + ReplicationContext.NO_REPLICATE); + } + } finally { + mapTreeRootLatch.release(); + } + } + + /** + * @return the mapping tree root LSN. + */ + public long getRootLsn() { + return mapTreeRootLsn; + } + + /** + * Set the mapping tree from the log. Called during recovery. + */ + public void readMapTreeFromLog(long rootLsn) + throws DatabaseException { + + if (dbMapTree != null) { + dbMapTree.close(); + } + dbMapTree = (DbTree) logManager.getEntryHandleFileNotFound(rootLsn); + + /* Set the dbMapTree to replicated when converted. */ + if (!dbMapTree.isReplicated() && getAllowRepConvert()) { + dbMapTree.setIsReplicated(); + dbMapTree.setIsRepConverted(); + needRepConvert = true; + } + + dbMapTree.initExistingEnvironment(this); + + /* Set the map tree root */ + mapTreeRootLatch.acquireExclusive(); + try { + mapTreeRootLsn = rootLsn; + } finally { + mapTreeRootLatch.release(); + } + } + + /** + * Tells the asynchronous IN compressor thread about a BIN with a deleted + * entry. + */ + public void addToCompressorQueue(BIN bin) { + inCompressor.addBinToQueue(bin); + } + + /** + * Tells the asynchronous IN compressor thread about a collections of + * BINReferences with deleted entries. + */ + public void addToCompressorQueue(Collection binRefs) { + inCompressor.addMultipleBinRefsToQueue(binRefs); + } + + public void lazyCompress(IN in) { + + if (!in.isBIN()) { + return; + } + + final BIN bin = (BIN) in; + + lazyCompress(bin, !bin.shouldLogDelta() /*compressDirtySlots*/); + } + + public void lazyCompress(IN in, boolean compressDirtySlots) { + inCompressor.lazyCompress(in, compressDirtySlots); + } + + /** + * Reset the logging level for specified loggers in a JE environment. + * + * @throws IllegalArgumentException via JEDiagnostics.OP_RESET_LOGGING + */ + public void resetLoggingLevel(String changedLoggerName, Level level) { + + /* + * Go through the loggers registered in the global log manager, and + * set the new level. If the specified logger name is not valid, throw + * an IllegalArgumentException. + */ + java.util.logging.LogManager loggerManager = + java.util.logging.LogManager.getLogManager(); + Enumeration loggers = loggerManager.getLoggerNames(); + boolean validName = false; + + while (loggers.hasMoreElements()) { + String loggerName = loggers.nextElement(); + Logger logger = loggerManager.getLogger(loggerName); + + if ("all".equals(changedLoggerName) || + loggerName.endsWith(changedLoggerName) || + loggerName.endsWith(changedLoggerName + + LoggerUtils.NO_ENV) || + loggerName.endsWith(changedLoggerName + + LoggerUtils.FIXED_PREFIX) || + loggerName.startsWith(changedLoggerName)) { + + logger.setLevel(level); + validName = true; + } + } + + if (!validName) { + throw new IllegalArgumentException + ("The logger name parameter: " + changedLoggerName + + " is invalid!"); + } + } + + /* Initialize the handler's formatter. */ + protected Formatter initFormatter() { + return new TracerFormatter(getName()); + } + + private FileHandler initFileHandler() + throws DatabaseException { + + /* + * Note that in JE 3.X and earlier, file logging encompassed both + * logging to a java.util.logging.FileHandler and our own JE log files + * and logging was disabled for read only and in-memory environments. + * Now that these two concepts are separated, file logging is supported + * for in-memory environments. File logging can be supported as long as + * there is a valid environment home. + */ + boolean setupLoggers = + configManager.getBoolean(EnvironmentParams.ENV_SETUP_LOGGER); + if ((envHome == null) || (!envHome.isDirectory()) || + (isReadOnly && !setupLoggers)) { + + /* + * Return null if no environment home directory(therefore no place + * to put file handler output files), or if the Environment is read + * only. + */ + return null; + } + + String handlerName = com.sleepycat.je.util.FileHandler.class.getName(); + String logFilePattern = envHome + "/" + INFO_FILES; + + /* Log with a rotating set of files, use append mode. */ + int limit = FILEHANDLER_LIMIT; + String logLimit = + LoggerUtils.getLoggerProperty(handlerName + ".limit"); + if (logLimit != null) { + limit = Integer.parseInt(logLimit); + } + + /* Limit the number of files. */ + int count = FILEHANDLER_COUNT; + String logCount = + LoggerUtils.getLoggerProperty(handlerName + ".count"); + if (logCount != null) { + count = Integer.parseInt(logCount); + } + + try { + return new com.sleepycat.je.util.FileHandler(logFilePattern, + limit, + count, + formatter, + this); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException + ("Problem creating output files in: " + logFilePattern, e); + } + } + + public ConsoleHandler getConsoleHandler() { + return consoleHandler; + } + + public FileHandler getFileHandler() { + return fileHandler; + } + + public Handler getConfiguredHandler() { + return configuredHandler; + } + + public void closeHandlers() { + if (consoleHandler != null) { + consoleHandler.close(); + } + + if (fileHandler != null) { + fileHandler.close(); + } + } + + /** + * Not much to do, mark state. + */ + public void open() { + + assert invalidatingEFE.get() == null; + + envState = DbEnvState.OPEN; + } + + /** + * Invalidate the environment. Done when a fatal exception + * (EnvironmentFailureException) is thrown. + */ + public void invalidate(EnvironmentFailureException e) { + + invalidatingEFE.compareAndSet(null, e); + + /* + * Remember the wedged exception even if invalidatingEFE != null. + * The EWE takes priority over other exceptions during close(). + */ + if (e instanceof EnvironmentWedgedException) { + wedgedEFE.compareAndSet(null, (EnvironmentWedgedException) e); + } + + /* + * Set state to invalid *after* setting invalidatingEFE, to maintain + * invariant: + * if (envState == INVALID) then (invalidatingEFE.get() != null) + * + * It is safe to check and set without a mutex, because the state never + * transitions away from CLOSED. + */ + if (envState != DbEnvState.CLOSED) { + envState = DbEnvState.INVALID; + } + + requestShutdownDaemons(); + } + + public EnvironmentFailureException getInvalidatingException() { + return invalidatingEFE.get(); + } + + public AtomicReference + getInvalidatingExceptionReference() { + + return invalidatingEFE; + } + + /** + * Invalidate the environment when a Java Error is thrown. + */ + public void invalidate(Error e) { + + /* + * initCause() throws ISE if the cause is non-null. To prevent this + * from happening when two threads call this method, synchronize on the + * exception to make the check and set atomic. + */ + synchronized (preallocatedEFE) { + if (preallocatedEFE.getCause() == null) { + preallocatedEFE.initCause(e); + } + } + + invalidate(preallocatedEFE); + } + + /** + * Returns true if the environment is currently invalid or was invalidated + * and closed. + */ + public boolean wasInvalidated() { + return invalidatingEFE.get() != null; + } + + /** + * @return true if environment is fully open (not being constructed and not + * closed), and has not been invalidated by an EnvironmentFailureException. + */ + public boolean isValid() { + return (envState == DbEnvState.OPEN); + } + + /** + * @return true if environment is still in init + */ + public boolean isInInit() { + return (envState == DbEnvState.INIT); + } + + /** + * @return true if close has begun, although the state may still be open. + */ + public boolean isClosing() { + return closing; + } + + public boolean isClosed() { + return (envState == DbEnvState.CLOSED); + } + + /** + * When a EnvironmentFailureException occurs or the environment is closed, + * further writing can cause log corruption. + */ + public boolean mayNotWrite() { + return (envState == DbEnvState.INVALID) || + (envState == DbEnvState.CLOSED); + } + + public void checkIfInvalid() + throws EnvironmentFailureException { + + if (envState != DbEnvState.INVALID) { + return; + } + + final EnvironmentFailureException efe = invalidatingEFE.get(); + assert efe != null; + + /* + * Set a flag in the exception so the exception message will be + * clear that this was an earlier exception. + */ + efe.setAlreadyThrown(true); + + if (efe == preallocatedEFE) { + efe.fillInStackTrace(); + /* Do not wrap to avoid allocations after an OOME. */ + throw efe; + } + + throw efe.wrapSelf("Environment must be closed, caused by: " + efe); + } + + public void checkOpen() + throws DatabaseException { + + /* + * Allow OPEN and INIT states, but not INVALID and CLOSED. + */ + checkIfInvalid(); + + /* + * The CLOSED state should not occur when the Environnment handle is + * closed, because its environmentImpl field is null, but we check + * anyway to be safe. + */ + if (envState == DbEnvState.CLOSED) { + throw new IllegalStateException + ("Attempt to use a Environment that has been closed."); + } + } + + /** + * Decrements the reference count and closes the environment when it + * reaches zero. A checkpoint is always performed when closing. + */ + public void close() + throws DatabaseException { + + /* Calls doClose while synchronized on DbEnvPool. */ + DbEnvPool.getInstance().closeEnvironment + (this, true /*doCheckpoint*/, false /*isAbnormalClose*/); + } + + /** + * Decrements the reference count and closes the environment when it + * reaches zero. A checkpoint when closing is optional. + */ + public void close(boolean doCheckpoint) + throws DatabaseException { + + /* Calls doClose while synchronized on DbEnvPool. */ + DbEnvPool.getInstance().closeEnvironment + (this, doCheckpoint, false /*isAbnormalClose*/); + } + + /** + * Used by error handling to forcibly close an environment, and by tests to + * close an environment to simulate a crash. Database handles do not have + * to be closed before calling this method. A checkpoint is not performed. + */ + public void abnormalClose() + throws DatabaseException { + + /* Discard the internal handle, for an abnormal close. */ + closeInternalEnvHandle(true); + + /* + * We are assuming that the environment will be cleared out of the + * environment pool, so it's safe to assert that the open count is + * zero. + */ + int openCount1 = getOpenCount(); + if (openCount1 > 1) { + throw EnvironmentFailureException.unexpectedState + (this, "Abnormal close assumes that the open count on " + + "this handle is 1, not " + openCount1); + } + + /* Calls doClose while synchronized on DbEnvPool. */ + DbEnvPool.getInstance().closeEnvironment + (this, false /*doCheckpoint*/, true /*isAbnormalClose*/); + } + + /** + * Closes the environment, optionally performing a checkpoint and checking + * for resource leaks. This method must be called while synchronized on + * DbEnvPool. + * + * @throws IllegalStateException if the environment is already closed. + * + * @throws EnvironmentFailureException if leaks or other problems are + * detected while closing. + */ + synchronized void doClose(boolean doCheckpoint, boolean isAbnormalClose) { + + /* Discard the internal handle. */ + closeInternalEnvHandle(isAbnormalClose); + + StringWriter errorStringWriter = new StringWriter(); + PrintWriter errors = new PrintWriter(errorStringWriter); + DiskLimitException diskLimitEx = null; + + try { + Trace.traceLazily + (this, "Close of environment " + envHome + " started"); + LoggerUtils.fine(envLogger, + this, + "Close of environment " + envHome + " started"); + + envState.checkState(DbEnvState.VALID_FOR_CLOSE, + DbEnvState.CLOSED); + + try { + setupClose(errors); + } catch (Exception e) { + appendException(errors, e, "releasing resources"); + } + + /* + * If backups are in progress, warn the caller that it was a + * mistake to close the environment at this time. + */ + if (getBackupCount() > 0) { + errors.append("\nThere are backups in progress so the "); + errors.append("Environment should not have been closed."); + errors.println(); + } + + /* + * Begin shutdown of the deamons before checkpointing. Cleaning + * during the checkpoint is wasted and slows down the checkpoint. + */ + requestShutdownDaemons(); + + try { + unregisterMBean(); + } catch (Exception e) { + appendException(errors, e, "unregistering MBean"); + } + + /* Checkpoint to bound recovery time. */ + boolean checkpointHappened = false; + if (doCheckpoint && + !isReadOnly && + (envState != DbEnvState.INVALID) && + logManager.getLastLsnAtRecovery() != + fileManager.getLastUsedLsn()) { + + /* + * Force a checkpoint. Flush all the way to the root, i.e., + * minimize recovery time. + */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + ckptConfig.setMinimizeRecoveryTime(true); + try { + invokeCheckpoint(ckptConfig, "close"); + checkpointHappened = true; + } catch (DiskLimitException e) { + diskLimitEx = e; + } catch (Exception e) { + appendException(errors, e, "performing checkpoint"); + } + } + + try { + postCheckpointClose(checkpointHappened); + } catch (Exception e) { + appendException(errors, e, "after checkpoint"); + } + + LoggerUtils.fine(envLogger, + this, + "About to shutdown daemons for Env " + envHome); + shutdownDaemons(); + + /* Flush log. */ + if (!isAbnormalClose) { + try { + logManager.flushSync(); + } catch (Exception e) { + appendException(errors, e, "flushing log manager"); + } + } + + try { + fileManager.clear(); + } catch (Exception e) { + appendException(errors, e, "clearing file manager"); + } + + try { + fileManager.close(); + } catch (Exception e) { + appendException(errors, e, "closing file manager"); + } + + /* + * Close the memory budgets on these components before the + * INList is forcibly released and the treeAdmin budget is + * cleared. + */ + dbMapTree.close(); + cleaner.close(); + inMemoryINs.clear(); + + closeHandlers(); + + if (!isAbnormalClose && + (envState != DbEnvState.INVALID)) { + + try { + checkLeaks(); + } catch (Exception e) { + appendException(errors, e, "performing validity checks"); + } + } + } finally { + envState = DbEnvState.CLOSED; + + /* + * Last ditch effort to clean up so that tests can continue and + * re-open the Environment in the face of an Exception or even an + * Error. Note that this was also attempted above. [#21929] + */ + clearFileManager(); + closeHandlers(); + } + + /* + * Throwing the wedged exception is the first priority. This is done + * even for an abnormal close, since HA may have created threads. + */ + if (wedgedEFE.get() != null) { + throw wedgedEFE.get(); + } + + /* Don't whine again if we've already whined. */ + if (errorStringWriter.getBuffer().length() > 0 && + invalidatingEFE.get() == null) { + throw EnvironmentFailureException.unexpectedState + (errorStringWriter.toString()); + } + + /* If no other errors, throw DiskLimitException. */ + if (diskLimitEx != null) { + throw diskLimitEx; + } + } + + protected void appendException(PrintWriter pw, + Exception e, + String doingWhat) { + pw.append("\nException " + doingWhat + ": "); + e.printStackTrace(pw); + pw.println(); + } + + /** + * Release any resources from a subclass that need to be released before + * close is called on regular environment components. + * @throws DatabaseException + */ + protected synchronized void setupClose(@SuppressWarnings("unused") + PrintWriter errors) + throws DatabaseException { + } + + /** + * Release any resources from a subclass that need to be released after + * the closing checkpoint. + * @param checkpointed if true, a checkpoint as issued before the close + * @throws DatabaseException + */ + protected synchronized void postCheckpointClose(boolean checkpointed) + throws DatabaseException { + } + + /** + * Called after recovery but before any other initialization. Is overridden + * by ReplImpl to convert user defined databases to replicated after doing + * recovery. + */ + protected void postRecoveryConversion() { + } + + /** + * Perform dup conversion after recovery and before running daemons. + */ + private void convertDupDatabases() { + if (dbMapTree.getDupsConverted()) { + return; + } + /* Convert dup dbs, set converted flag, flush mapping tree root. */ + final DupConvert dupConvert = new DupConvert(this, dbMapTree); + dupConvert.convertDatabases(); + dbMapTree.setDupsConverted(); + logMapTreeRoot(); + logManager.flushSync(); + } + + /* + * Clear as many resources as possible, even in the face of an environment + * that has received a fatal error, in order to support reopening the + * environment in the same JVM. + */ + public void closeAfterInvalid() + throws DatabaseException { + + /* Calls doCloseAfterInvalid while synchronized on DbEnvPool. */ + DbEnvPool.getInstance().closeEnvironmentAfterInvalid(this); + } + + /** + * This method must be called while synchronized on DbEnvPool. + */ + public synchronized void doCloseAfterInvalid() { + + try { + unregisterMBean(); + } catch (Exception e) { + /* Klockwork - ok */ + } + + shutdownDaemons(); + + try { + fileManager.clear(); + } catch (Throwable e) { + /* Klockwork - ok */ + } + + try { + fileManager.close(); + } catch (Throwable e) { + /* Klockwork - ok */ + } + + /* + * Release resources held by handlers, such as memory and file + * descriptors + */ + closeHandlers(); + + envState = DbEnvState.CLOSED; + + /* + * The wedged exception must be thrown even when the environment is + * invalid, since the app must restart the process. + */ + if (wedgedEFE.get() != null) { + throw wedgedEFE.get(); + } + } + + void incOpenCount() { + openCount.incrementAndGet(); + } + + /** + * Returns true if the environment should be closed. + */ + boolean decOpenCount() { + return (openCount.decrementAndGet() <= 0); + } + + /** + * Returns a count of open environment handles, not including the internal + * handle. + */ + private int getOpenCount() { + return openCount.get(); + } + + /** + * Returns the count of environment handles that were opened explicitly by + * the application. Because the internal environment handle is not included + * in the openCount, this method is currently equivalent to getOpenCount. + * + * @return the count of open application handles + */ + protected int getAppOpenCount() { + return openCount.get(); + } + + void incBackupCount() { + backupCount.incrementAndGet(); + } + + void decBackupCount() { + backupCount.decrementAndGet(); + } + + /** + * Returns a count of the number of in-progress DbBackups. + */ + protected int getBackupCount() { + return backupCount.get(); + } + + public static int getThreadLocalReferenceCount() { + return threadLocalReferenceCount; + } + + public static synchronized void incThreadLocalReferenceCount() { + threadLocalReferenceCount++; + } + + public static synchronized void decThreadLocalReferenceCount() { + threadLocalReferenceCount--; + } + + public boolean getDidFullThreadDump() { + return didFullThreadDump; + } + + public void setDidFullThreadDump(boolean val) { + didFullThreadDump = val; + } + + public boolean getNoComparators() { + return noComparators; + } + + /** + * Debugging support. Check for leaked locks and transactions. + */ + private void checkLeaks() + throws DatabaseException { + + /* Only enabled if this check leak flag is true. */ + if (!configManager.getBoolean(EnvironmentParams.ENV_CHECK_LEAKS)) { + return; + } + + boolean clean = true; + StatsConfig statsConfig = new StatsConfig(); + + /* Fast stats will not return NTotalLocks below. */ + statsConfig.setFast(false); + + LockStats lockStat = lockStat(statsConfig); + if (lockStat.getNTotalLocks() != 0) { + clean = false; + System.err.println("Problem: " + lockStat.getNTotalLocks() + + " locks left"); + txnManager.getLockManager().dump(); + } + + TransactionStats txnStat = txnStat(statsConfig); + if (txnStat.getNActive() != 0) { + clean = false; + System.err.println("Problem: " + txnStat.getNActive() + + " txns left"); + TransactionStats.Active[] active = txnStat.getActiveTxns(); + if (active != null) { + for (Active element : active) { + System.err.println(element); + } + } + } + + if (LatchSupport.TRACK_LATCHES) { + if (LatchSupport.nBtreeLatchesHeld() > 0) { + clean = false; + System.err.println("Some latches held at env close."); + System.err.println(LatchSupport.btreeLatchesHeldToString()); + } + } + + long memoryUsage = memoryBudget.getVariableCacheUsage(); + if (memoryUsage != 0) { + clean = false; + System.err.println("Local Cache Usage = " + memoryUsage); + System.err.println(memoryBudget.loadStats()); + } + + boolean assertionsEnabled = false; + assert assertionsEnabled = true; // Intentional side effect. + if (!clean && assertionsEnabled) { + throw EnvironmentFailureException.unexpectedState + ("Lock, transaction, latch or memory " + + "left behind at environment close"); + } + } + + /** + * Invoke a checkpoint programmatically. Note that only one checkpoint may + * run at a time. + */ + public void invokeCheckpoint(CheckpointConfig config, + String invokingSource) { + checkpointer.doCheckpoint( + config, invokingSource, false /*invokedFromDaemon*/); + } + + /** + * Coordinates an eviction with an in-progress checkpoint and returns + * whether provisional logging is needed. + * + * @return the provisional status to use for logging the target. + */ + public Provisional coordinateWithCheckpoint( + final DatabaseImpl dbImpl, + final int targetLevel, + final IN parent) { + + return checkpointer.coordinateEvictionWithCheckpoint( + dbImpl, targetLevel, parent); + } + + /** + * Flush the log buffers and write to the log, and optionally fsync. + * [#19111] + */ + public void flushLog(boolean fsync) { + if (fsync) { + logManager.flushSync(); + } else { + logManager.flushNoSync(); + } + } + + /** + * Flip the log to a new file, forcing an fsync. Return the LSN of the + * trace record in the new file. + */ + public long forceLogFileFlip() + throws DatabaseException { + + return logManager.logForceFlip( + new TraceLogEntry(new Trace("File Flip"))); + } + + /** + * Invoke a compress programmatically. Note that only one compress may run + * at a time. + */ + public void invokeCompressor() { + inCompressor.doCompress(); + } + + public void invokeEvictor() { + evictor.doManualEvict(); + offHeapCache.doManualEvict(); + } + + /** + * @throws UnsupportedOperationException if read-only or mem-only. + */ + public int invokeCleaner(boolean cleanMultipleFiles) { + + if (isReadOnly || isMemOnly) { + throw new UnsupportedOperationException + ("Log cleaning not allowed in a read-only or memory-only " + + "environment"); + } + + return cleaner.doClean( + cleanMultipleFiles, false /*forceCleaning*/); + } + + public void requestShutdownDaemons() { + + closing = true; + + inCompressor.requestShutdown(); + + /* + * Don't shutdown the shared cache evictor here. It is shutdown when + * the last shared cache environment is removed in DbEnvPool. + */ + if (!sharedCache) { + evictor.requestShutdown(); + offHeapCache.requestShutdown(); + } + + checkpointer.requestShutdown(); + cleaner.requestShutdown(); + statCapture.requestShutdown(); + logFlusher.requestShutdown(); + dataVerifier.requestShutdown(); + } + + /** + * Ask all daemon threads to shut down. + */ + public void shutdownDaemons() { + + /* Shutdown stats capture thread first so we can access stats. */ + statCapture.shutdown(); + + synchronized (statSynchronizer) { + + inCompressor.shutdown(); + + /* + * Cleaner has to be shutdown before checkpointer because former + * calls the latter. + */ + cleaner.shutdown(); + checkpointer.shutdown(); + + /* + * The evictors have to be shutdown last because the other daemons + * might create changes to the memory usage which result in a + * notify to eviction. The off-heap evictor is shutdown after the + * main evictor since main eviction moves data to off-heap, and not + * vice-versa. + */ + if (sharedCache) { + + /* + * Don't shutdown the SharedEvictor here. It is shutdown when + * the last shared cache environment is removed in DbEnvPool. + * Instead, remove this environment from the SharedEvictor's + * list so we won't try to evict from a closing/closed + * environment. Note that we do this after the final checkpoint + * so that eviction is possible during the checkpoint, and just + * before deconstructing the environment. Leave the evictor + * field intact so DbEnvPool can get it. + */ + evictor.removeSharedCacheEnv(this); + offHeapCache.clearCache(this); + } else { + evictor.shutdown(); + offHeapCache.shutdown(); + } + + logFlusher.shutdown(); + dataVerifier.shutdown(); + } + } + + public boolean isNoLocking() { + return isNoLocking; + } + + public boolean isTransactional() { + return isTransactional; + } + + public boolean isReadOnly() { + return isReadOnly; + } + + public boolean isMemOnly() { + return isMemOnly; + } + + /** + * Named "optional" because the nodeName property in EnvironmentConfig is + * optional may be null. {@link #getName()} should almost always be used + * instead for messages, exceptions, etc. + */ + public String getOptionalNodeName() { + return optionalNodeName; + } + + public String makeDaemonThreadName(String daemonName) { + + if (optionalNodeName == null) { + return daemonName; + } + + return daemonName + " (" + optionalNodeName + ")"; + } + + /** + * Returns whether DB/MapLN eviction is enabled. + */ + public boolean getDbEviction() { + return dbEviction; + } + + public static int getAdler32ChunkSize() { + return adler32ChunkSize; + } + + public boolean getSharedCache() { + return sharedCache; + } + + public boolean allowBlindOps() { + return allowBlindOps; + } + + public boolean allowBlindPuts() { + return allowBlindPuts; + } + + public int getMaxEmbeddedLN() { + return maxEmbeddedLN; + } + + /** + * Transactional services. + */ + public Txn txnBegin(Transaction parent, TransactionConfig txnConfig) + throws DatabaseException { + + return txnManager.txnBegin(parent, txnConfig); + } + + /* Services. */ + public LogManager getLogManager() { + return logManager; + } + + public LogFlusher getLogFlusher() { + return logFlusher; + } + + public DataVerifier getDataVerifier() { + return dataVerifier; + } + + public FileManager getFileManager() { + return fileManager; + } + + public DbTree getDbTree() { + return dbMapTree; + } + + /** + * Returns the config manager for the current base configuration. + * + *

        The configuration can change, but changes are made by replacing the + * config manager object with a enw one. To use a consistent set of + * properties, call this method once and query the returned manager + * repeatedly for each property, rather than getting the config manager via + * this method for each property individually.

        + */ + public DbConfigManager getConfigManager() { + return configManager; + } + + public NodeSequence getNodeSequence() { + return nodeSequence; + } + + /** + * Clones the current configuration. + */ + public EnvironmentConfig cloneConfig() { + return configManager.getEnvironmentConfig().clone(); + } + + /** + * Clones the current mutable configuration. + */ + public EnvironmentMutableConfig cloneMutableConfig() { + return DbInternal.cloneMutableConfig + (configManager.getEnvironmentConfig()); + } + + /** + * Throws an exception if an immutable property is changed. + */ + public void checkImmutablePropsForEquality(Properties handleConfigProps) + throws IllegalArgumentException { + + DbInternal.checkImmutablePropsForEquality + (configManager.getEnvironmentConfig(), handleConfigProps); + } + + /** + * Changes the mutable config properties that are present in the given + * config, and notifies all config observer. + */ + public void setMutableConfig(EnvironmentMutableConfig config) + throws DatabaseException { + + /* Calls doSetMutableConfig while synchronized on DbEnvPool. */ + DbEnvPool.getInstance().setMutableConfig(this, config); + } + + /** + * This method must be called while synchronized on DbEnvPool. + */ + synchronized void doSetMutableConfig(EnvironmentMutableConfig config) + throws DatabaseException { + + /* Clone the current config. */ + EnvironmentConfig newConfig = + configManager.getEnvironmentConfig().clone(); + + /* Copy in the mutable props. */ + DbInternal.copyMutablePropsTo(config, newConfig); + + /* + * Update the current config and notify observers. The config manager + * is replaced with a new instance that uses the new configuration. + * This avoids synchronization issues: other threads that have a + * reference to the old configuration object are not impacted. + * + * Notify listeners in reverse order of registration so that the + * environment listener is notified last and can start daemon threads + * after they are configured. + */ + configManager = resetConfigManager(newConfig); + for (int i = configObservers.size() - 1; i >= 0; i -= 1) { + EnvConfigObserver o = configObservers.get(i); + o.envConfigUpdate(configManager, newConfig); + } + } + + /** + * Make a new config manager that has all the properties needed. More + * complicated for subclasses. + */ + protected DbConfigManager resetConfigManager(EnvironmentConfig newConfig) { + return new DbConfigManager(newConfig); + } + + public ExceptionListener getExceptionListener() { + return exceptionListener; + } + + /** + * Adds an observer of mutable config changes. + */ + public synchronized void addConfigObserver(EnvConfigObserver o) { + configObservers.add(o); + } + + /** + * Removes an observer of mutable config changes. + */ + public synchronized void removeConfigObserver(EnvConfigObserver o) { + configObservers.remove(o); + } + + public INList getInMemoryINs() { + return inMemoryINs; + } + + public TxnManager getTxnManager() { + return txnManager; + } + + public Checkpointer getCheckpointer() { + return checkpointer; + } + + public Cleaner getCleaner() { + return cleaner; + } + + public MemoryBudget getMemoryBudget() { + return memoryBudget; + } + + /** + * Uses cached disk usage info to determine whether disk space limits are + * currently violated. This method simply returns a volatile field. The + * cached information is updated frequently enough to prevent violating the + * limits by a large amount. + * + * @return a non-null message (appropriate for an exception) if a disk + * limit is currently violated, else null. + */ + public String getDiskLimitViolation() { + return cleaner.getDiskLimitViolation(); + } + + /** + * Uses cached disk usage info to determine whether disk space limits are + * currently violated. This method simply checks a volatile field. The + * cached information is updated frequently enough to prevent violating the + * limits by a large amount. + * + * @throws DiskLimitException if a disk limit is currently violated. + */ + public void checkDiskLimitViolation() throws DiskLimitException { + final String violation = cleaner.getDiskLimitViolation(); + if (violation != null) { + throw new DiskLimitException(null, violation); + } + } + + /** + * @return environment Logger, for use in debugging output. + */ + public Logger getLogger() { + return envLogger; + } + + public boolean isDbLoggingDisabled() { + return dbLoggingDisabled; + } + + /* + * Verification, must be run while system is quiescent. + */ + public void verify(VerifyConfig config) + throws DatabaseException { + + final BtreeVerifier verifier = new BtreeVerifier(this); + verifier.setBtreeVerifyConfig(config); + verifier.verifyAll(); + } + + public void verifyCursors() + throws DatabaseException { + + inCompressor.verifyCursors(); + } + + public boolean getExposeUserData() { + return exposeUserData; + } + + /* + * Statistics + */ + + /** + * Retrieve and return stat information. + */ + public EnvironmentStats loadStats(StatsConfig config) + throws DatabaseException { + return statManager.loadStats(config, statKey); + } + + /** + * Retrieve and return stat information. + */ + public EnvironmentStats loadStatsInternal(StatsConfig config) + throws DatabaseException { + + EnvironmentStats stats = new EnvironmentStats(); + + synchronized (statSynchronizer) { + stats.setINCompStats(inCompressor.loadStats(config)); + stats.setCkptStats(checkpointer.loadStats(config)); + stats.setCleanerStats(cleaner.loadStats(config)); + stats.setLogStats(logManager.loadStats(config)); + stats.setMBAndEvictorStats( + memoryBudget.loadStats(), evictor.loadStats(config)); + stats.setOffHeapStats(offHeapCache.loadStats(config)); + stats.setLockStats(txnManager.loadStats(config)); + stats.setEnvStats(loadEnvImplStats(config)); + stats.setThroughputStats( + thrputStats.cloneGroup(config.getClear())); + } + return stats; + } + + private StatGroup loadEnvImplStats(StatsConfig config) { + StatGroup ret = envStats.cloneGroup(config.getClear()); + LongStat ct = new LongStat(ret, ENV_CREATION_TIME); + ct.set(creationTime); + return ret; + } + + public void incSearchOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secSearchOps.increment(); + } else { + priSearchOps.increment(); + } + } + + public void incSearchFailOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secSearchFailOps.increment(); + } else { + priSearchFailOps.increment(); + } + } + + public void incPositionOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secPositionOps.increment(); + } else { + priPositionOps.increment(); + } + } + + public void incInsertOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secInsertOps.increment(); + } else { + priInsertOps.increment(); + } + } + + public void incInsertFailOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (!dbImpl.isKnownSecondary()) { + priInsertFailOps.increment(); + } + } + + public void incUpdateOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secUpdateOps.increment(); + } else { + priUpdateOps.increment(); + } + } + + public void incDeleteOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + if (dbImpl.isKnownSecondary()) { + secDeleteOps.increment(); + } else { + priDeleteOps.increment(); + } + } + + public void incDeleteFailOps(final DatabaseImpl dbImpl) { + if (dbImpl.isInternalDb()) { + return; + } + /* Deletion failure always counted as primary DB deletion. */ + priDeleteFailOps.increment(); + } + + public void incRelatchesRequired() { + relatchesRequired.increment(); + } + + public void incBinDeltaGets() { + binDeltaGets.increment(); + } + + public void incBinDeltaInserts() { + binDeltaInserts.increment(); + } + + public void incBinDeltaUpdates() { + binDeltaUpdates.increment(); + } + + public void incBinDeltaDeletes() { + binDeltaDeletes.increment(); + } + + /** + * For replicated environments only; just return true for a standalone + * environment. + */ + public boolean addDbBackup(@SuppressWarnings("unused") DbBackup backup) { + incBackupCount(); + return true; + } + + /** + * For replicated environments only; do nothing for a standalone + * environment. + */ + public void removeDbBackup(@SuppressWarnings("unused") DbBackup backup) { + decBackupCount(); + } + + /** + * Retrieve lock statistics + */ + public synchronized LockStats lockStat(StatsConfig config) + throws DatabaseException { + + return txnManager.lockStat(config); + } + + /** + * Retrieve txn statistics + */ + public synchronized TransactionStats txnStat(StatsConfig config) { + return txnManager.txnStat(config); + } + + public int getINCompressorQueueSize() { + return inCompressor.getBinRefQueueSize(); + } + + public StartupTracker getStartupTracker() { + return startupTracker; + } + + /** + * Get the environment home directory. + */ + public File getEnvironmentHome() { + return envHome; + } + + public Environment getInternalEnvHandle() { + return envInternal; + } + + /** + * Closes the internally maintained environment handle. If the close is + * an abnormal close, it just does cleanup work instead of trying to close + * the internal environment handle which may result in further errors. + */ + private synchronized void closeInternalEnvHandle(boolean isAbnormalClose) { + + if (envInternal == null) { + return; + } + + if (isAbnormalClose) { + envInternal = null; + } else { + final Environment savedEnvInternal = envInternal; + /* Blocks recursions resulting from the close operation below */ + envInternal = null; + DbInternal.closeInternalHandle(savedEnvInternal); + } + } + + /** + * Get an environment name, for tagging onto logging and debug message. + * Useful for multiple environments in a JVM, or for HA. + */ + public String getName() { + if (optionalNodeName == null){ + return envHome.toString(); + } + return getOptionalNodeName(); + } + + public long getTxnTimeout() { + return txnTimeout; + } + + public long getLockTimeout() { + return lockTimeout; + } + + /* + * Only used for unit test com.sleepycat.je.test.SecondaryTest. + */ + public void setLockTimeout(long timeout) { + lockTimeout = timeout; + } + + public boolean getDeadlockDetection() { + return deadlockDetection; + } + + public long getDeadlockDetectionDelay() { + return deadlockDetectionDelay; + } + + public long getReplayTxnTimeout() { + if (lockTimeout != 0) { + return lockTimeout; + } + /* It can't be disabled, so make it the minimum. */ + return 1; + } + + /** + * Returns the shared secondary association latch. + */ + public ReentrantReadWriteLock getSecondaryAssociationLock() { + return secondaryAssociationLock; + } + + /** + * @return null if no off-heap cache is configured. + */ + public OffHeapCache getOffHeapCache() { + return offHeapCache; + } + + public boolean useOffHeapChecksums() { + return useOffHeapChecksums; + } + + /** + * Returns {@link EnvironmentParams#ENV_EXPIRATION_ENABLED}. + */ + public boolean isExpirationEnabled() { + return expirationEnabled; + } + + /** + * Returns whether a given expiration time precedes the current system + * time, i.e., the expiration time has passed. + */ + public boolean isExpired(final int expiration, final boolean hours) { + return expirationEnabled && + TTL.isExpired(expiration, hours); + } + + /** + * Returns whether a given expiration time precedes the current system + * time, i.e., the expiration time has passed. + */ + public boolean isExpired(final long expirationTime) { + return expirationEnabled && + TTL.isExpired(expirationTime); + } + + /** + * Returns whether a given expiration time precedes the current system time + * plus withinMs, i.e., the expiration time will pass within withinMs, or + * earlier. If withinMs is negative, this is whether the expiration time + * passed withinMs ago, or earlier. + */ + public boolean expiresWithin(final int expiration, + final boolean hours, + final long withinMs) { + return expirationEnabled && + TTL.expiresWithin(expiration, hours, withinMs); + } + + /** + * Same as {@link #expiresWithin(int, boolean, long)} but with a single + * expirationTime param. + */ + public boolean expiresWithin(final long expirationTime, + final long withinMs) { + return expirationEnabled && + TTL.expiresWithin(expirationTime, withinMs); + } + + public Evictor getEvictor() { + return evictor; + } + + /** + * Wake up the eviction threads when the main cache is full or close to + * full. We do not wake up the off-heap evictor threads since the off-heap + * budget is maintained internally by the off-heap evictor. + */ + void alertEvictor() { + evictor.alert(); + } + + /** + * Performs critical eviction if necessary. Is called before and after + * each cursor operation. We prefer to have the application thread do as + * little eviction as possible, to reduce the impact on latency, so + * critical eviction has an explicit set of criteria for determining when + * this should run. + * + * WARNING: The action performed here should be as inexpensive as possible, + * since it will impact app operation latency. Unconditional + * synchronization must not be performed, since that would introduce a new + * synchronization point for all app threads. + * + * An overriding method must call super.criticalEviction. + * + * No latches are held or synchronization is in use when this method is + * called. + */ + public void criticalEviction(boolean backgroundIO) { + evictor.doCriticalEviction(backgroundIO); + offHeapCache.doCriticalEviction(backgroundIO); + } + + /** + * Do eviction if the memory budget is over. Called by JE daemon + * threads that do not have the same latency concerns as application + * threads. + */ + public void daemonEviction(boolean backgroundIO) { + evictor.doDaemonEviction(backgroundIO); + offHeapCache.doDaemonEviction(backgroundIO); + } + + /** + * Performs special eviction (eviction other than standard IN eviction) + * for this environment. This method is called once per eviction batch to + * give other components an opportunity to perform eviction. For a shared + * cached, it is called for only one environment (in rotation) per batch. + * + * An overriding method must call super.specialEviction and return the sum + * of the long value it returns and any additional amount of budgeted + * memory that is evicted. + * + * No latches are held when this method is called, but it is called while + * synchronized on the evictor. + * + * @return the number of bytes evicted from the JE cache. + */ + public long specialEviction() { + return cleaner.getUtilizationTracker().evictMemory(); + } + + /** + * For stress testing. Should only ever be called from an assert. + */ + public static boolean maybeForceYield() { + if (forcedYield) { + Thread.yield(); + } + return true; // so assert doesn't fire + } + + /** + * Return true if this environment is part of a replication group. + */ + public boolean isReplicated() { + return false; + } + + /** + * Return true if this environment is used as an Arbiter. + */ + public boolean isArbiter() { + return false; + } + + /** + * Returns true if the VLSN is preserved as the record version. Always + * false in a standalone environment. Overridden by RepImpl. + */ + public boolean getPreserveVLSN() { + return false; + } + + /** + * Returns true if the VLSN is both preserved and cached. Always false in + * a standalone environment. Overridden by RepImpl. + */ + public boolean getCacheVLSN() { + return false; + } + + /** + * True if ReplicationConfig set allowConvert as true. Standalone + * environment is prohibited from doing a conversion, return false. + */ + public boolean getAllowRepConvert() { + return false; + } + + /** + * True if this environment is converted from non-replicated to + * replicated. + */ + public boolean isRepConverted() { + return dbMapTree.isRepConverted(); + } + + public boolean needRepConvert() { + return needRepConvert; + } + + /** + * Computes and assigns VLSNs as needed to this log item for a replicated + * log record. This method must be invoked under the LWL to ensure that the + * VLSNs it generates are correctly serialized with respect to their + * locations in the log. + * + * The method must be invoked before any calls are made to determine the + * log entry size, since some of the underlying values used to determine + * the size of the entry will only be finalized after this call has + * completed. + * + * This method is only invoked when the log is being written as the master, + * since the replica merely reuses the VLSN values computed by the master. + * + * Since this method is being written under the LWL it must not block. + * + * @param entry the log entry, an in/out argument: the entry is + * modified with an updated DTVLSN for commit and abort log entries. + * + * @return a non-null VLSN for all replicated log items + */ + public VLSN assignVLSNs(LogEntry entry) { + /* NOP for non-replicated environment. */ + return null; + } + + public VLSNRecoveryProxy getVLSNProxy() { + return new NoopVLSNProxy(); + } + + public boolean isMaster() { + /* NOP for non-replicated environment. */ + return false; + } + + public void preRecoveryCheckpointInit(RecoveryInfo recoveryInfo) { + /* NOP for non-replicated environment. */ + } + + public void registerVLSN(LogItem logItem) { + /* NOP for non-replicated environment. */ + } + + /** + * Truncate the head of the VLSNIndex to allow file deletion, if possible. + */ + public boolean tryVlsnHeadTruncate(long bytesNeeded) { + /* NOP for non-replicated environment. */ + return false; + } + + /** + * Do any work that must be done before the checkpoint end is written, as + * as part of the checkpoint process. + */ + public void preCheckpointEndFlush() { + /* NOP for non-replicated environment. */ + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public Txn createReplayTxn(long txnId) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public ThreadLocker createRepThreadLocker() { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public Txn createRepUserTxn(TransactionConfig config) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public Txn createRepTxn(TransactionConfig config, + long mandatedId) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public OperationFailureException + createLockPreemptedException(Locker locker, Throwable cause) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public OperationFailureException + createDatabasePreemptedException(String msg, + String dbName, + Database db) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * For replicated environments only; only the overridden method should + * ever be called. + */ + public OperationFailureException createLogOverwriteException(String msg) { + throw EnvironmentFailureException.unexpectedState + ("Should not be called on a non replicated environment"); + } + + /** + * Returns the deprecated HA REPLAY_FREE_DISK_PERCENT parameter, or zero + * if this is not an HA env. + */ + public int getReplayFreeDiskPercent() { + return 0; + } + + /** + * Check whether this environment can be opened on an existing environment + * directory. + * @param dbTreePreserveVLSN + * + * @throws UnsupportedOperationException via Environment ctor. + */ + public void checkRulesForExistingEnv(boolean dbTreeReplicatedBit, + boolean dbTreePreserveVLSN) + throws UnsupportedOperationException { + + /* + * We only permit standalone Environment construction on an existing + * environment when we are in read only mode, to support command + * line utilities. We prohibit read/write opening, because we don't + * want to chance corruption of the environment by writing non-VLSN + * tagged entries in. + */ + if (dbTreeReplicatedBit && (!isReadOnly())) { + throw new UnsupportedOperationException + ("This environment was previously opened for replication." + + " It cannot be re-opened for in read/write mode for" + + " non-replicated operation."); + } + + /* + * Same as above but for the preserve VLSN param, which may only be + * used in a replicated environment. See this overridden method in + * RepImpl which checks that the param is never changed. + */ + if (getPreserveVLSN() && (!isReadOnly())) { + /* Cannot use RepParams constant in standalone code. */ + throw new IllegalArgumentException + (EnvironmentParams.REP_PARAM_PREFIX + + "preserveRecordVersion parameter may not be true in a" + + " read-write, non-replicated environment"); + } + } + + /** + * Ensure that the in-memory vlsn index encompasses all logged entries + * before it is flushed to disk. A No-Op for non-replicated systems. + * [#19754] + */ + public void awaitVLSNConsistency() { + /* Nothing to do in a non-replicated system. */ + } + + /** + * The VLSNRecoveryProxy is only needed for replicated environments. + */ + private class NoopVLSNProxy implements VLSNRecoveryProxy { + + @Override + public void trackMapping(long lsn, + LogEntryHeader currentEntryHeader, + LogEntry targetLogEntry) { + /* intentional no-op */ + } + } + + public AtomicLongStat getThroughputStat(StatDefinition def) { + return thrputStats.getAtomicLongStat(def); + } + + /** + * Private class to prevent used of the close() method by the application + * on an internal handle. + */ + private static class InternalEnvironment extends Environment { + + public InternalEnvironment(File envHome, + EnvironmentConfig configuration, + EnvironmentImpl envImpl) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + VersionMismatchException, + DatabaseException, + IllegalArgumentException { + super(envHome, configuration, null /*repConfigProxy*/, envImpl); + } + + @Override + protected boolean isInternalHandle() { + return true; + } + + @Override + public synchronized void close() { + throw EnvironmentFailureException.unexpectedState + ("close() not permitted on an internal environment handle"); + } + } + + /** + * Preload exceptions, classes. + */ + + /** + * Undeclared exception used to throw through SortedLSNTreeWalker code + * when preload has either filled the user's max byte or time request. + */ + @SuppressWarnings("serial") + private static class HaltPreloadException extends RuntimeException { + + private final PreloadStatus status; + + HaltPreloadException(PreloadStatus status) { + super(status.toString()); + this.status = status; + } + + PreloadStatus getStatus() { + return status; + } + } + + private static final HaltPreloadException + TIME_EXCEEDED_PRELOAD_EXCEPTION = + new HaltPreloadException(PreloadStatus.EXCEEDED_TIME); + + private static final HaltPreloadException + MEMORY_EXCEEDED_PRELOAD_EXCEPTION = + new HaltPreloadException(PreloadStatus.FILLED_CACHE); + + private static final HaltPreloadException + USER_HALT_REQUEST_PRELOAD_EXCEPTION = + new HaltPreloadException(PreloadStatus.USER_HALT_REQUEST); + + public PreloadStats preload(final DatabaseImpl[] dbImpls, + final PreloadConfig config) + throws DatabaseException { + + try { + final long maxMillisecs = config.getMaxMillisecs(); + long targetTime = Long.MAX_VALUE; + if (maxMillisecs > 0) { + targetTime = System.currentTimeMillis() + maxMillisecs; + if (targetTime < 0) { + targetTime = Long.MAX_VALUE; + } + } + + /* + * Disable off-heap cache during preload. It appears to cause + * Btree corruption. [#25594] + */ + boolean useOffHeapCache = false; + /* + if (offHeapCache.isEnabled()) { + useOffHeapCache = true; + for (final DatabaseImpl db : dbImpls) { + if (db.isDeferredWriteMode() || + db.getDbType().isInternal()) { + useOffHeapCache = false; + break; + } + } + } + */ + + long cacheBudget = memoryBudget.getMaxMemory(); + if (useOffHeapCache) { + cacheBudget += offHeapCache.getMaxMemory(); + } + + long maxBytes = config.getMaxBytes(); + if (maxBytes == 0) { + maxBytes = cacheBudget; + } else if (maxBytes > cacheBudget) { + throw new IllegalArgumentException + ("maxBytes parameter to preload() was " + + "specified as " + + maxBytes + + " bytes but the maximum total cache size is only " + + cacheBudget + " bytes."); + } + + /* + * Sort DatabaseImpls so that we always latch in a well-defined + * order to avoid potential deadlocks if multiple preloads happen + * to (accidentally) execute concurrently. + */ + Arrays.sort(dbImpls, new Comparator() { + @Override + public int compare(DatabaseImpl o1, DatabaseImpl o2) { + DatabaseId id1 = o1.getId(); + DatabaseId id2 = o2.getId(); + return id1.compareTo(id2); + } + }); + + PreloadStats pstats = new PreloadStats(); + + PreloadProcessor callback = new PreloadProcessor( + this, maxBytes, useOffHeapCache, targetTime, pstats, config); + + int nDbs = dbImpls.length; + long[] rootLsns = new long[nDbs]; + for (int i = 0; i < nDbs; i += 1) { + rootLsns[i] = dbImpls[i].getTree().getRootLsn(); + } + + SortedLSNTreeWalker walker = new PreloadLSNTreeWalker( + dbImpls, rootLsns, useOffHeapCache, callback, config); + + try { + walker.walk(); + callback.close(); + } catch (HaltPreloadException HPE) { + pstats.setStatus(HPE.getStatus()); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + return pstats; + } catch (Error E) { + invalidate(E); + throw E; + } + } + + /** + * The processLSN() code for PreloadLSNTreeWalker. + */ + private static class PreloadProcessor implements TreeNodeProcessor { + + private final EnvironmentImpl envImpl; + private final long maxBytes; + private final boolean useOffHeapCache; + private final long targetTime; + private final PreloadStats stats; + private final boolean countLNs; + private final ProgressListener progressListener; + private long progressCounter = 0; + + PreloadProcessor(final EnvironmentImpl envImpl, + final long maxBytes, + final boolean useOffHeapCache, + final long targetTime, + final PreloadStats stats, + final PreloadConfig config) { + this.envImpl = envImpl; + this.maxBytes = maxBytes; + this.useOffHeapCache = useOffHeapCache; + this.targetTime = targetTime; + this.stats = stats; + this.countLNs = config.getLoadLNs(); + this.progressListener = config.getProgressListener(); + } + + /** + * Called for each LSN that the SortedLSNTreeWalker encounters. + */ + @Override + public void processLSN(long childLsn, + LogEntryType childType, + Node node, + @SuppressWarnings("unused") byte[] ignore2, + @SuppressWarnings("unused") int ignore3) { + + /* + * Check if we've exceeded either the max time or max bytes + * allowed for this preload() call. + */ + if (System.currentTimeMillis() > targetTime) { + throw TIME_EXCEEDED_PRELOAD_EXCEPTION; + } + + /* + * We don't worry about the memory usage being kept below the max + * by the evictor, since we keep the root INs latched. + */ + long usedBytes = envImpl.memoryBudget.getCacheMemoryUsage(); + if (useOffHeapCache) { + usedBytes += envImpl.offHeapCache.getUsedMemory(); + } + + if (usedBytes > maxBytes) { + throw MEMORY_EXCEEDED_PRELOAD_EXCEPTION; + } + + if (progressListener != null) { + progressCounter += 1; + if (!progressListener.progress(PreloadConfig.Phases.PRELOAD, + progressCounter, -1)) { + throw USER_HALT_REQUEST_PRELOAD_EXCEPTION; + } + } + + /* Count entry types to return in the PreloadStats. */ + if (childLsn == DbLsn.NULL_LSN) { + stats.incEmbeddedLNs(); + } else if (childType.equals( + LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL) || + childType.equals(LogEntryType.LOG_DUPCOUNTLN)) { + stats.incDupCountLNsLoaded(); + } else if (childType.isLNType()) { + if (countLNs) { + stats.incLNsLoaded(); + } + } else if (childType.equals(LogEntryType.LOG_DBIN)) { + stats.incDBINsLoaded(); + } else if (childType.equals(LogEntryType.LOG_BIN)) { + stats.incBINsLoaded(); + if (!countLNs) { + BIN bin = (BIN) node; + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.isEmbeddedLN(i)) { + stats.incEmbeddedLNs(); + } + } + } + } else if (childType.equals(LogEntryType.LOG_DIN)) { + stats.incDINsLoaded(); + } else if (childType.equals(LogEntryType.LOG_IN)) { + stats.incINsLoaded(); + } + } + + @Override + public void processDirtyDeletedLN(@SuppressWarnings("unused") + long childLsn, + @SuppressWarnings("unused") + LN ln, + @SuppressWarnings("unused") + byte[] lnKey) { + } + + @Override + public void noteMemoryExceeded() { + stats.incMemoryExceeded(); + } + + public void close() { + /* Indicate that we're finished. */ + if (progressListener != null) { + progressListener.progress(PreloadConfig.Phases.PRELOAD, + progressCounter, progressCounter); + } + } + } + + /* + * An extension of SortedLSNTreeWalker that latches the root IN. + */ + private class PreloadLSNTreeWalker extends SortedLSNTreeWalker { + + PreloadLSNTreeWalker(DatabaseImpl[] dbs, + long[] rootLsns, + boolean useOffHeapCache, + TreeNodeProcessor callback, + PreloadConfig conf) + throws DatabaseException { + + super(dbs, + false /*setDbState*/, + rootLsns, + callback, + null, null); /* savedException, exception predicate */ + accumulateLNs = conf.getLoadLNs(); + preloadIntoOffHeapCache = useOffHeapCache; + setLSNBatchSize(conf.getLSNBatchSize()); + setInternalMemoryLimit(conf.getInternalMemoryLimit()); + } + + @Override + public void walk() + throws DatabaseException { + + int nDbs = dbImpls.length; + int nDbsLatched = 0; + try { + try { + for (int i = 0; i < nDbs; i += 1) { + DatabaseImpl dbImpl = dbImpls[i]; + dbImpl.getTree().latchRootLatchExclusive(); + nDbsLatched += 1; + } + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException + (EnvironmentImpl.this, + "Couldn't latch all DatabaseImpls during preload", e); + } + + walkInternal(); + } finally { + + /* + * Release latches in reverse acquisition order to avoid + * deadlocks with possible concurrent preload operations. + */ + for (int i = nDbsLatched - 1; i >= 0; i -= 1) { + DatabaseImpl dbImpl = dbImpls[i]; + dbImpl.getTree().releaseRootLatch(); + } + } + } + + /* + * Method to get the Root IN for this DatabaseImpl's tree. + */ + @Override + IN getRootIN(DatabaseImpl dbImpl, @SuppressWarnings("unused") long rootLsn) { + return dbImpl.getTree().getRootINRootAlreadyLatched( + CacheMode.UNCHANGED, false /*exclusive*/); + } + + @Override + protected boolean fetchAndInsertIntoTree() { + return true; + } + } + + public ProgressListener getRecoveryProgressListener() { + return recoveryProgressListener; + } + + public ClassLoader getClassLoader() { + return classLoader; + } + + public PreloadConfig getDupConvertPreloadConfig() { + return dupConvertPreloadConfig; + } + + /** + * Checks that writing records with a TTL is allowed. + * + * @throws IllegalStateException if any node in the group is less than + * JE_TTL_VERSION. + */ + public void checkTTLAvailable() { + /* Do nothing when not overridden by RepImpl. */ + } + + /** + * Recovery encountered a RestoreRequired marker file, so recovery is + * halted and some intervention must be taken. + * + * @param restoreRequired getFailureType() is used to indicate the how + * the environment can be healed. + */ + public void handleRestoreRequired(RestoreRequired restoreRequired) { + switch (restoreRequired.getFailureType()) { + case LOG_CHECKSUM: + throw new EnvironmentFailureException( + this, + EnvironmentFailureReason.LOG_CHECKSUM, + VerifierUtils.getRestoreRequiredMessage(restoreRequired)); + case BTREE_CORRUPTION: + throw new EnvironmentFailureException( + this, + EnvironmentFailureReason.BTREE_CORRUPTION, + VerifierUtils.getRestoreRequiredMessage(restoreRequired)); + default: + throw EnvironmentFailureException.unexpectedState( + this, restoreRequired.toString()); + } + } +} diff --git a/src/com/sleepycat/je/dbi/ExpirationInfo.java b/src/com/sleepycat/je/dbi/ExpirationInfo.java new file mode 100644 index 0000000..d9358da --- /dev/null +++ b/src/com/sleepycat/je/dbi/ExpirationInfo.java @@ -0,0 +1,82 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.WriteOptions; + +/** + * A struct for passing record expiration info to a 'put' operation, and + * returning the old expiration time plus whether it was updated/changed. + */ +public class ExpirationInfo { + + public static final ExpirationInfo DEFAULT = + new ExpirationInfo(0, false, false); + + public final int expiration; + public final boolean expirationInHours; + public final boolean updateExpiration; + private boolean expirationUpdated = false; + private long oldExpirationTime = 0; + + public ExpirationInfo( + final int expiration, + final boolean expirationInHours, + final boolean updateExpiration) { + + this.expiration = expiration; + this.expirationInHours = expirationInHours; + this.updateExpiration = updateExpiration; + } + + /** + * Creates an ExpirationInfo struct from the WriteOptions TTL params, for + * the current system time. + * + * @param options WriteOptions, may not be null. + * + * @return ExpirationInfo, or null if WriteOptions.getTTL is zero and + * WriteOptions.getUpdateTTL is false, meaning we will not add or update + * the TTL. + */ + public static ExpirationInfo getInfo(final WriteOptions options) { + + if (options.getTTL() == 0 && !options.getUpdateTTL()) { + return null; + } + + return new ExpirationInfo( + TTL.ttlToExpiration(options.getTTL(), options.getTTLUnit()), + options.getTTLUnit() == TimeUnit.HOURS, + options.getUpdateTTL()); + } + + public void setExpirationUpdated(boolean val) { + expirationUpdated = val; + } + + public boolean getExpirationUpdated() { + return expirationUpdated; + } + + public void setOldExpirationTime(long val) { + oldExpirationTime = val; + } + + public long getOldExpirationTime() { + return oldExpirationTime; + } +} diff --git a/src/com/sleepycat/je/dbi/GetMode.java b/src/com/sleepycat/je/dbi/GetMode.java new file mode 100644 index 0000000..07f0396 --- /dev/null +++ b/src/com/sleepycat/je/dbi/GetMode.java @@ -0,0 +1,44 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * Internal class used to distinguish which variety of getXXX() that + * Cursor.retrieveNext should use. + */ +public enum GetMode { + NEXT("NEXT", true), + PREV("PREV", false), + NEXT_DUP("NEXT_DUP", true), + PREV_DUP("PREV_DUP", false), + NEXT_NODUP("NEXT_NODUP", true), + PREV_NODUP("PREV_NODUP", false); + + private String name; + private boolean forward; + + private GetMode(String name, boolean forward) { + this.name = name; + this.forward = forward; + } + + public final boolean isForward() { + return forward; + } + + @Override + public String toString() { + return name; + } +} diff --git a/src/com/sleepycat/je/dbi/INList.java b/src/com/sleepycat/je/dbi/INList.java new file mode 100644 index 0000000..efa34f8 --- /dev/null +++ b/src/com/sleepycat/je/dbi/INList.java @@ -0,0 +1,496 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.EvictorStatDefinition; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * The INList is a list of in-memory INs for a given environment. + * + * For an explanation of the 'enabled' mode, see RecoveryManager class + * comments. + */ +public class INList implements Iterable { + + private EnvironmentImpl envImpl; + private boolean enabled; + private volatile boolean recalcInProgress; + private volatile boolean recalcToggle; + private boolean recalcConsistent; + private AtomicLong recalcTotal; + + /** + * We use a Map of INs because there is no ConcurrentHashSet, only a + * ConcurrentHashMap. But this map is treated as a set of INs with the + * same object as the key and the value. + */ + private final ConcurrentMap ins; + + /** + * Stats about the composition of the INList must be kept in this class + * rather than the evictor because a sharedEnvCache encompasses many + * INLists. Note that we are keeping a true StatGroup instance/Stat fields. + * That's because these values are "instantaneous", and don't need to obey + * the accumulate/clear semantics of stats. When stats are loaded, we'll + * just create a new stats group to pass back. + */ + private AtomicLong nCachedUpperINs; + private AtomicLong nCachedBINs; + private AtomicLong nCachedBINDeltas; + + INList(EnvironmentImpl envImpl) { + init(envImpl); + ins = new ConcurrentHashMap(); + enabled = false; + } + + private void init(EnvironmentImpl environmentImpl) { + this.envImpl = environmentImpl; + recalcInProgress = false; + recalcToggle = false; + recalcConsistent = true; + recalcTotal = new AtomicLong(); + + nCachedUpperINs = new AtomicLong(); + nCachedBINs = new AtomicLong(); + nCachedBINDeltas = new AtomicLong(); + } + + /** + * All stats from the INList are instantaneous -- never need to be cleared. + */ + public StatGroup loadStats() { + StatGroup stats = new StatGroup(EvictorStatDefinition.GROUP_NAME, + EvictorStatDefinition.GROUP_DESC); + + long istat = nCachedUpperINs.get(); + long bstat = nCachedBINs.get(); + long bdstat = nCachedBINDeltas.get(); + new LongStat(stats, EvictorStatDefinition.CACHED_UPPER_INS, + istat); + new LongStat(stats, EvictorStatDefinition.CACHED_BINS, + bstat); + new LongStat(stats, EvictorStatDefinition.CACHED_BIN_DELTAS, + bdstat); + + // verifyPrint(istat, bstat); + + return stats; + } + + private void verifyPrint(long istat, long bstat) { + int numINs = 0; + int numBINs = 0; + + for (IN theIN : ins.keySet()) { + if (theIN instanceof BIN) { + numBINs++; + } else { + numINs++; + } + } + System.out.println("size=" + getSize() + " INcount=" + numINs + + " BINCount=" + numBINs + " INstat=" + istat + + " bstat=" + bstat); + } + + /* + * Ok to be imprecise. + */ + public int getSize() { + return ins.size(); + } + + public boolean contains(IN in) { + return ins.containsKey(in); + } + + /** + * Enable the INList during recovery. + */ + public void enable() { + assert ins.isEmpty(); + assert !enabled; + enabled = true; + } + + public boolean isEnabled() { + return enabled; + } + + /** + * An IN has just come into memory, add it to the list. + */ + public void add(IN in) { + /* Ignore additions until the INList is enabled. */ + if (!enabled) { + return; + } + + /* Be sure to check for BIN first, since it's a subclass of IN! */ + if (in.isBIN()) { + nCachedBINs.incrementAndGet(); + if (in.isBINDelta(false)) { + nCachedBINDeltas.incrementAndGet(); + } + } else { + nCachedUpperINs.incrementAndGet(); + } + + /* + * Use putIfAbsent to ensure that we never overwrite an IN, since this + * can cause Btree corruption. Throw a fatal EFE if the IN is already + * present to detect potential corruption bugs early. [#21686] + */ + IN oldValue = ins.putIfAbsent(in, in); + if (oldValue != null) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Failed adding new IN node=" + in.getNodeId() + + " dbIdentity=" + System.identityHashCode(in.getDatabase()) + + " db=" + in.getDatabase().dumpString(0) + + "\nExisting IN node=" + oldValue.getNodeId() + + " dbIdentity=" + + System.identityHashCode(oldValue.getDatabase()) + + " db=" + oldValue.getDatabase().dumpString(0)); + } + + long size = in.getBudgetedMemorySize(); + memRecalcAdd(in, size); + envImpl.getMemoryBudget().updateTreeMemoryUsage(size); + in.setInListResident(true); + } + + /** + * An IN is being evicted. + */ + public void remove(IN in) { + if (!enabled) { + return; + } + + boolean removed = removeInternal(in); + assert removed; + + long delta = 0 - in.getBudgetedMemorySize(); + memRecalcRemove(in, delta); + envImpl.getMemoryBudget().updateTreeMemoryUsage(delta); + } + + /** + * Performs unconditional IN removal, but does not update memory usage. + * + * @returns whether the IN was found in the map and removed. + */ + private boolean removeInternal(IN in) { + + /* Be sure to check for BIN first, since it's a subclass of IN! */ + if (in.isBIN()) { + nCachedBINs.decrementAndGet(); + if (in.isBINDelta(false/*checkLatched*/)) { + nCachedBINDeltas.decrementAndGet(); + } + } else { + nCachedUpperINs.decrementAndGet(); + } + + final Evictor evictor = envImpl.getEvictor(); + + boolean latchAcquired = false; + if (!in.isLatchOwner()) { + in.latch(CacheMode.UNCHANGED); + latchAcquired = true; + } + + try { + evictor.remove(in); + in.setInListResident(false); + envImpl.getOffHeapCache().removeINFromMain(in); + } finally { + if (latchAcquired) { + in.releaseLatch(); + } + } + + IN oldValue = ins.remove(in); + return oldValue != null; + } + + public void updateBINDeltaStat(int incr) { + nCachedBINDeltas.addAndGet(incr); + } + + /** + * Return an iterator over the main 'ins' set. Returned iterator may or + * may not show elements added or removed after the iterator is created. + * + * @return an iterator over the main 'ins' set. + */ + public Iterator iterator() { + return new Iter(); + } + + /** + * A direct Iterator on the INList may return INs that have been removed, + * since the underlying ConcurrentHashMap doesn't block changes to the list + * during the iteration. This Iterator implementation wraps a direct + * Iterator and returns only those INs that are on the INList. + * + * Note that this doesn't guarantee that an IN will not be removed from the + * INList after being returned by this iterator. But filtering out the INs + * already removed will avoid wasting effort in the evictor, checkpointer, + * and other places where INs are iterated and processed. + */ + private class Iter implements Iterator { + + private final Iterator baseIter; + private IN next; + private IN lastReturned; + + private Iter() { + baseIter = ins.keySet().iterator(); + } + + public boolean hasNext() { + if (next != null) { + return true; + } else { + return advance(); + } + } + + public IN next() { + if (next == null) { + if (!advance()) { + throw new NoSuchElementException(); + } + } + lastReturned = next; + next = null; + return lastReturned; + } + + private boolean advance() { + while (baseIter.hasNext()) { + IN in = baseIter.next(); + if (in.getInListResident()) { + next = in; + return true; + } + } + return false; + } + + /** + * Caller must update memory usage. + */ + @Override + public void remove() { + if (lastReturned != null) { + removeInternal(lastReturned); + lastReturned = null; + } else { + throw EnvironmentFailureException.unexpectedState(); + } + } + } + + /** + * Clear the entire list at shutdown and release its portion of the memory + * budget. + */ + public void clear() { + + ins.clear(); + nCachedUpperINs.set(0); + nCachedBINs.set(0); + nCachedBINDeltas.set(0); + + MemoryBudget mb = envImpl.getMemoryBudget(); + mb.refreshTreeMemoryUsage(0); + mb.refreshTreeAdminMemoryUsage(0); + } + + public void dump() { + System.out.println("size=" + getSize()); + for (IN theIN : ins.keySet()) { + System.out.println("db=" + theIN.getDatabase().getId() + + " nid=: " + theIN.getNodeId() + "/" + + theIN.getLevel()); + } + } + + /* + * The following set of memRecalc methods allow an iteration over the + * INList to recalculate the tree memory budget. This is done during a + * checkpoint by the DirtyINMap class. + * + * We flip the INList toggle, recalcToggle, at the beginning of the recalc. + * At that point, if recalcConsistent is true, all IN toggles have the + * opposite value of recalcToggle. As we process INs we flip their + * toggles. We can tell whether we have already processed an IN by + * comparing its toggle to recalcToggle. If they are equal, we have + * already processed the IN. + * + * The scenarios below describe how the recalcTotal is updated for a + * particular IN. + * + * Scenario #1: IN size is unchanged during the iteration + * begin + * iterate -- add total IN size, mark processed + * end + * + * Scenario #2: IN size is updated during the iteration + * begin + * update -- do not add delta because IN is not yet processed + * iterate -- add total IN size, mark processed + * update -- do add delta because IN was already processed + * end + * + * Scenario #3: IN is added during the iteration but not iterated + * begin + * add -- add IN size, mark processed + * end + * + * Scenario #4: IN is added during the iteration and is iterated + * begin + * add -- add IN size, mark processed + * iterate -- do not add size because IN was already processed + * end + * + * Scenario #5: IN is removed during the iteration but not iterated + * begin + * remove -- do not add delta because IN is not yet processed + * end + * + * Scenario #6: IN is removed during the iteration and is iterated + * begin + * iterate -- add total IN size, mark processed + * remove -- add delta because IN was already processed + * end + * + * If recalcConsistent is false, the last attempted recalc was not + * compeleted. In that case the next reset pass will simply set the toggle + * in every IN so that they are consistent. The pass following that will + * then do a normal recalc. At the end of any pass, we only update the + * memory budget if the last recalc was consistent (or this is the first + * recalc), and the current recalc is completed. + * + * We do not synchronize when changing state variables. In memRecalcBegin + * and memRecalcEnd it is possible for an IN to be added or removed by + * another thread in the window between settting recalcInProgress and + * setting or getting the recalclTotal. In memRecalcUpdate a similar thing + * can happen in the window between checking the IN toggle and adding to + * recalcTotal, if memRecaclcIterate is called by the checkpointer in that + * window. If this occurs, the reset total can be inaccurate by the amount + * that was changed in the window. We have chosen to live with this + * possible inaccuracy rather than synchronize these methods. We would + * have to synchronize every time we add/remove INs and update the size of + * an IN, which could introduce a new point of contention. + */ + + /** + * We are starting the iteration of the INList. Flip the INList toggle + * and set the total amount to zero. + * + * After calling this method, memRecalcEnd must be called in a finally + * block. If it is not called, internal state will be invalid. + */ + public void memRecalcBegin() { + recalcTotal.set(0); + recalcInProgress = true; + recalcToggle = !recalcToggle; + } + + /** + * An IN was encountered during the iteration through the entire INList. + * Add its size to the recalc total if we have not already processed it, + * and mark it as processed. If it was already processed, memRecalcAdd + * must have been called for the IN when it was added to the INList during + * the iteration. + */ + public void memRecalcIterate(IN in) { + assert recalcInProgress; + if (recalcConsistent && + recalcToggle != in.getRecalcToggle()) { + long delta = in.resetAndGetMemorySize(); + recalcTotal.addAndGet(delta); + } + in.setRecalcToggle(recalcToggle); + } + + /** + * An IN is being added to the INList. Add its size to the recalc total + * and mark it as processed. It cannot have already been processed since + * it is a new IN. + */ + private void memRecalcAdd(IN in, long size) { + if (recalcInProgress && + recalcConsistent) { + recalcTotal.addAndGet(size); + } + in.setRecalcToggle(recalcToggle); + } + + /** + * An IN is being removed from the INList. Add the delta to the recalc + * total if it was already processed, and mark it as processed. If we have + * not yet processed it, it is not included in the total. + */ + private void memRecalcRemove(IN in, long delta) { + memRecalcUpdate(in, delta); // Remove and update are the same + } + + /** + * The size of an IN is changing. Add the delta to the recalc total if it + * have already processed the IN. If we have not yet processed it, its + * total size will be added by memRecalcIterate. + */ + public void memRecalcUpdate(IN in, long delta) { + if (recalcInProgress && + recalcConsistent && + recalcToggle == in.getRecalcToggle()) { + recalcTotal.addAndGet(delta); + } + } + + /** + * The reset operation is over. Only update the tree budget if the + * iteration was completed and the state was consistent prior to this reset + * operation. + */ + public void memRecalcEnd(boolean completed) { + assert recalcInProgress; + if (completed && + recalcConsistent) { + envImpl.getMemoryBudget().refreshTreeMemoryUsage + (recalcTotal.get()); + } + recalcInProgress = false; + recalcConsistent = completed; + } +} diff --git a/src/com/sleepycat/je/dbi/LSNAccumulator.java b/src/com/sleepycat/je/dbi/LSNAccumulator.java new file mode 100644 index 0000000..50a79ae --- /dev/null +++ b/src/com/sleepycat/je/dbi/LSNAccumulator.java @@ -0,0 +1,128 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.Arrays; +import java.util.Map; +import java.util.TreeMap; + +import com.sleepycat.je.cleaner.OffsetList; +import com.sleepycat.je.utilint.DbLsn; + +/* + * The current set of LSNs of children which are not in-memory but are + * being accumulated, and will be subsequently sorted and processed. Once + * they have been accumulated, they will be sorted, fetched, and returned + * to the user. + * + * Represent this as a map from file number to OffsetList holding LSN + * offsets. + */ +abstract class LSNAccumulator { + /* File number -> OffsetList */ + private Map offsetsByFile; + private int nTotalEntries; + private long lsnAccMemoryUsage; + + LSNAccumulator() { + init(); + } + + private void init() { + incInternalMemoryUsage(-lsnAccMemoryUsage); + offsetsByFile = new TreeMap(); + nTotalEntries = 0; + incInternalMemoryUsage(MemoryBudget.TREEMAP_OVERHEAD); + } + + void clear() { + offsetsByFile.clear(); + nTotalEntries = 0; + incInternalMemoryUsage(-lsnAccMemoryUsage); + } + + boolean isEmpty() { + return nTotalEntries == 0; + } + + int getNTotalEntries() { + return nTotalEntries; + } + + long getMemoryUsage() { + return lsnAccMemoryUsage; + } + + abstract void noteMemUsage(long increment); + + private void incInternalMemoryUsage(long increment) { + lsnAccMemoryUsage += increment; + noteMemUsage(increment); + } + + void add(long lsn) { + long fileNumber = DbLsn.getFileNumber(lsn); + OffsetList offsetsForFile = offsetsByFile.get(fileNumber); + if (offsetsForFile == null) { + offsetsForFile = new OffsetList(); + offsetsByFile.put(fileNumber, offsetsForFile); + incInternalMemoryUsage(MemoryBudget.TFS_LIST_INITIAL_OVERHEAD); + incInternalMemoryUsage(MemoryBudget.TREEMAP_ENTRY_OVERHEAD); + } + + boolean newSegment = + offsetsForFile.add(DbLsn.getFileOffset(lsn), false); + if (newSegment) { + incInternalMemoryUsage(MemoryBudget.TFS_LIST_SEGMENT_OVERHEAD); + } + + nTotalEntries += 1; + } + + long[] getAndSortPendingLSNs() { + long[] currentLSNs = new long[nTotalEntries]; + int curIdx = 0; + + for (Map.Entry fileEntry : + offsetsByFile.entrySet()) { + + long fileNumber = fileEntry.getKey(); + + for (long fileOffset : fileEntry.getValue().toArray()) { + currentLSNs[curIdx] = DbLsn.makeLsn(fileNumber, fileOffset); + curIdx += 1; + } + } + + init(); + Arrays.sort(currentLSNs); + return currentLSNs; + } + + void getLSNs(long[] lsns, int nLsns) { + + for (Map.Entry fileEntry : + offsetsByFile.entrySet()) { + + long fileNumber = fileEntry.getKey(); + + for (long fileOffset : fileEntry.getValue().toArray()) { + lsns[nLsns] = DbLsn.makeLsn(fileNumber, fileOffset); + ++nLsns; + } + } + + init(); + } +} diff --git a/src/com/sleepycat/je/dbi/MemoryBudget.java b/src/com/sleepycat/je/dbi/MemoryBudget.java new file mode 100644 index 0000000..de8e1f8 --- /dev/null +++ b/src/com/sleepycat/je/dbi/MemoryBudget.java @@ -0,0 +1,1348 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_ADMIN_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DATA_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DATA_ADMIN_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DOS_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_GROUP_DESC; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_GROUP_NAME; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_LOCK_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_SHARED_CACHE_TOTAL_BYTES; +import static com.sleepycat.je.dbi.DbiStatDefinition.MB_TOTAL_BYTES; + +import java.lang.management.ManagementFactory; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * MemoryBudget calculates the available memory for JE and how to apportion + * it between cache and log buffers. It is meant to centralize all memory + * calculations. Objects that ask for memory budgets should get settings from + * this class, rather than using the configuration parameter values directly. + */ +public class MemoryBudget implements EnvConfigObserver { + + /* + * CLEANUP_DONE can be set to false for unit test debugging + * that is still in progress. When we do the final regression, + * this should be removed to be assured that it is never false. + */ + public static boolean CLEANUP_DONE = false; + + /* + * These DEBUG variables are public so unit tests can easily turn them + * on and off for different sections of code. + */ + public static boolean DEBUG_ADMIN = Boolean.getBoolean("memAdmin"); + public static boolean DEBUG_LOCK = Boolean.getBoolean("memLock"); + public static boolean DEBUG_TXN = Boolean.getBoolean("memTxn"); + public static boolean DEBUG_TREEADMIN = Boolean.getBoolean("memTreeAdmin"); + public static boolean DEBUG_TREE = Boolean.getBoolean("memTree"); + public static boolean DEBUG_DOS = Boolean.getBoolean("memDOS"); + + /* + * Object overheads. These are set statically with advance measurements. + * Java doesn't provide a way of assessing object size dynamically. These + * overheads will not be precise, but are close enough to let the system + * behave predictably. + * + * _32 values are the same on Windows and Solaris. + * _64 values are from Linux (were previously 1.5.0_05 on Solaris). + * _OOPS are on a 64b JVM with -XX:+UseCompressedOops + * + * The integer following the // below is the Sizeof argument used to + * compute the value. + */ + + // 7 + private final static int LONG_OVERHEAD_32 = 16; + private final static int LONG_OVERHEAD_64 = 24; + private final static int LONG_OVERHEAD_OOPS = 24; + + // 8 + private final static int ARRAY_OVERHEAD_32 = 16; + private final static int ARRAY_OVERHEAD_64 = 24; + private final static int ARRAY_OVERHEAD_OOPS = 16; + + // see byteArraySize + private final static int ARRAY_SIZE_INCLUDED_32 = 4; + private final static int ARRAY_SIZE_INCLUDED_64 = 0; + private final static int ARRAY_SIZE_INCLUDED_OOPS = 0; + + // 2 + private final static int OBJECT_OVERHEAD_32 = 8; + private final static int OBJECT_OVERHEAD_64 = 16; + private final static int OBJECT_OVERHEAD_OOPS = 16; + + // (4 - ARRAY_OVERHEAD) / 256 + // 32b: 4 is 1040 + // 64b: 4 is 2078 + // Oops: 4 is 1040 + private final static int OBJECT_ARRAY_ITEM_OVERHEAD_32 = 4; + private final static int OBJECT_ARRAY_ITEM_OVERHEAD_64 = 8; + private final static int OBJECT_ARRAY_ITEM_OVERHEAD_OOPS = 4; + + // 20 + private final static int HASHMAP_OVERHEAD_32 = 120; + private final static int HASHMAP_OVERHEAD_64 = 219; + private final static int HASHMAP_OVERHEAD_OOPS = 128; + + // 21 - OBJECT_OVERHEAD - HASHMAP_OVERHEAD + // 32b: 21 is 152 + // 64b: 21 is max(280,...,287) on Linux/Solaris 1.5/1.6 + // Oops: 21 is 176 + private final static int HASHMAP_ENTRY_OVERHEAD_32 = 24; + private final static int HASHMAP_ENTRY_OVERHEAD_64 = 52; + private final static int HASHMAP_ENTRY_OVERHEAD_OOPS = 32; + + // 22 + private final static int HASHSET_OVERHEAD_32 = 136; + private final static int HASHSET_OVERHEAD_64 = 240; + private final static int HASHSET_OVERHEAD_OOPS = 144; + + // 23 - OBJECT_OVERHEAD - HASHSET_OVERHEAD + // 32b: 23 is 168 + // 64b: 23 is max(304,...,311) on Linux/Solaris + // Oops: 23 is 192 + private final static int HASHSET_ENTRY_OVERHEAD_32 = 24; + private final static int HASHSET_ENTRY_OVERHEAD_64 = 55; + private final static int HASHSET_ENTRY_OVERHEAD_OOPS = 32; + + // HASHMAP_OVERHEAD * 2 + private final static int TWOHASHMAPS_OVERHEAD_32 = 240; + private final static int TWOHASHMAPS_OVERHEAD_64 = 438; + private final static int TWOHASHMAPS_OVERHEAD_OOPS = 256; + + // 34 + private final static int TREEMAP_OVERHEAD_32 = 48; + private final static int TREEMAP_OVERHEAD_64 = 80; + private final static int TREEMAP_OVERHEAD_OOPS = 48; + + // 35 - OBJECT_OVERHEAD - TREEMAP_OVERHEAD + // 32b: 35 is 88 + // 64b: 35 is 160 + // Oops: 35 is 104 + private final static int TREEMAP_ENTRY_OVERHEAD_32 = 32; + private final static int TREEMAP_ENTRY_OVERHEAD_64 = 64; + private final static int TREEMAP_ENTRY_OVERHEAD_OOPS = 40; + + // 36 + // 32b JDK 1.7 is 928 + private final static int MAPLN_OVERHEAD_32 = 920; + private final static int MAPLN_OVERHEAD_64 = 1624; + private final static int MAPLN_OVERHEAD_OOPS = 1016; + + // 9 + private final static int LN_OVERHEAD_32 = 16; + private final static int LN_OVERHEAD_64 = 32; + private final static int LN_OVERHEAD_OOPS = 24; + + // 17 minus 9 + // 32b: 17 is 24 + // 64b: 17 is 40 + // Oops: 17 is 32 + private final static int VERSIONEDLN_OVERHEAD_32 = 8; + private final static int VERSIONEDLN_OVERHEAD_64 = 8; + private final static int VERSIONEDLN_OVERHEAD_OOPS = 8; + + // No longer updated, as dups are no longer used except during conversion. + private final static int DUPCOUNTLN_OVERHEAD_32 = 32; + private final static int DUPCOUNTLN_OVERHEAD_64 = 48; + private final static int DUPCOUNTLN_OVERHEAD_OOPS = 40; + + // 12 + private final static int BIN_FIXED_OVERHEAD_32 = 223; + private final static int BIN_FIXED_OVERHEAD_64 = 352; + private final static int BIN_FIXED_OVERHEAD_OOPS = 232; + + // 18 + private final static int BINDELTA_OVERHEAD_32 = 48; + private final static int BINDELTA_OVERHEAD_64 = 72; + private final static int BINDELTA_OVERHEAD_OOPS = 64; + + // 19 + private final static int DELTAINFO_OVERHEAD_32 = 24; + private final static int DELTAINFO_OVERHEAD_64 = 40; + private final static int DELTAINFO_OVERHEAD_OOPS = 32; + + // 47 + private final static int SPARSE_TARGET_ENTRY_OVERHEAD_32 = 72; + private final static int SPARSE_TARGET_ENTRY_OVERHEAD_64 = 120; + private final static int SPARSE_TARGET_ENTRY_OVERHEAD_OOPS = 80; + + // 48 + private final static int DEFAULT_TARGET_ENTRY_OVERHEAD_32 = 16; + private final static int DEFAULT_TARGET_ENTRY_OVERHEAD_64 = 24; + private final static int DEFAULT_TARGET_ENTRY_OVERHEAD_OOPS = 16; + + // 49 + private final static int MAX_KEY_SIZE_KEYVALS_OVERHEAD_32 = 16; + private final static int MAX_KEY_SIZE_KEYVALS_OVERHEAD_64 = 32; + private final static int MAX_KEY_SIZE_KEYVALS_OVERHEAD_OOPS = 24; + + // 50 + private final static int DEFAULT_KEYVALS_OVERHEAD_32 = 16; + private final static int DEFAULT_KEYVALS_OVERHEAD_64 = 24; + private final static int DEFAULT_KEYVALS_OVERHEAD_OOPS = 16; + + // 52 + private final static int DEFAULT_LONG_REP_OVERHEAD_32 = 16; + private final static int DEFAULT_LONG_REP_OVERHEAD_64 = 32; + private final static int DEFAULT_LONG_REP_OVERHEAD_OOPS = 24; + + // 59 + private final static int SPARSE_LONG_REP_OVERHEAD_32 = 24; + private final static int SPARSE_LONG_REP_OVERHEAD_64 = 40; + private final static int SPARSE_LONG_REP_OVERHEAD_OOPS = 24; + + // 54 + private final static int DIN_FIXED_OVERHEAD_32 = 120; + private final static int DIN_FIXED_OVERHEAD_64 = 176; + private final static int DIN_FIXED_OVERHEAD_OOPS = 120; + + // 53 + private final static int DBIN_FIXED_OVERHEAD_32 = 152; + private final static int DBIN_FIXED_OVERHEAD_64 = 232; + private final static int DBIN_FIXED_OVERHEAD_OOPS = 168; + + // 13 + private final static int IN_FIXED_OVERHEAD_32 = 248; + private final static int IN_FIXED_OVERHEAD_64 = 392; + private final static int IN_FIXED_OVERHEAD_OOPS = 256; + + // 6 + private final static int KEY_OVERHEAD_32 = 16; + private final static int KEY_OVERHEAD_64 = 24; + private final static int KEY_OVERHEAD_OOPS = 16; + + // 24 + private final static int LOCKIMPL_OVERHEAD_32 = 24; + private final static int LOCKIMPL_OVERHEAD_64 = 48; + private final static int LOCKIMPL_OVERHEAD_OOPS = 32; + + // 42 + private final static int THINLOCKIMPL_OVERHEAD_32 = 16; + private final static int THINLOCKIMPL_OVERHEAD_64 = 32; + private final static int THINLOCKIMPL_OVERHEAD_OOPS = 24; + + // 25 + private final static int LOCKINFO_OVERHEAD_32 = 16; + private final static int LOCKINFO_OVERHEAD_64 = 32; + private final static int LOCKINFO_OVERHEAD_OOPS = 24; + + // 37 + private final static int WRITE_LOCKINFO_OVERHEAD_32 = 48; + private final static int WRITE_LOCKINFO_OVERHEAD_64 = 72; + private final static int WRITE_LOCKINFO_OVERHEAD_OOPS = 56; + + /* + * Txn memory is the size for the Txn + a hashmap entry + * overhead for being part of the transaction table. + */ + // 15 + private final static int TXN_OVERHEAD_32 = 224; + private final static int TXN_OVERHEAD_64 = 361; + private final static int TXN_OVERHEAD_OOPS = 240; + + // 26 + private final static int CHECKPOINT_REFERENCE_SIZE_32 = 40 + + HASHSET_ENTRY_OVERHEAD_32; + private final static int CHECKPOINT_REFERENCE_SIZE_64 = 56 + + HASHSET_ENTRY_OVERHEAD_64; + private final static int CHECKPOINT_REFERENCE_SIZE_OOPS = 48 + + HASHSET_ENTRY_OVERHEAD_OOPS; + + /* The per-log-file bytes used in UtilizationProfile. */ + // 29 / 10.0 (That is the number 10, not the Sizeof type 10) + // 32b: 29 is 1088 + // 64b: 29 is 1600 + // Oops: 29 is 1248 + private final static int UTILIZATION_PROFILE_ENTRY_32 = 109; + private final static int UTILIZATION_PROFILE_ENTRY_64 = 160; + private final static int UTILIZATION_PROFILE_ENTRY_OOPS = 125; + + // 38 + private final static int DBFILESUMMARY_OVERHEAD_32 = 40; + private final static int DBFILESUMMARY_OVERHEAD_64 = 48; + private final static int DBFILESUMMARY_OVERHEAD_OOPS = 48; + + /* Tracked File Summary overheads. */ + // 31 + private final static int TFS_LIST_INITIAL_OVERHEAD_32 = 464; + private final static int TFS_LIST_INITIAL_OVERHEAD_64 = 504; + private final static int TFS_LIST_INITIAL_OVERHEAD_OOPS = 464; + + // 30 + // 64b: 30 is max(464,464,464,465) on Linux/Solaris on 1.5/1.6 + private final static int TFS_LIST_SEGMENT_OVERHEAD_32 = 440; + private final static int TFS_LIST_SEGMENT_OVERHEAD_64 = 465; + private final static int TFS_LIST_SEGMENT_OVERHEAD_OOPS = 440; + + // 33 + private final static int LN_INFO_OVERHEAD_32 = 32; + private final static int LN_INFO_OVERHEAD_64 = 48; + private final static int LN_INFO_OVERHEAD_OOPS = 30; + + // 43 + private final static int FILESUMMARYLN_OVERHEAD_32 = 112; + private final static int FILESUMMARYLN_OVERHEAD_64 = 168; + private final static int FILESUMMARYLN_OVERHEAD_OOPS = 128; + + // 51 + private final static int INENTRY_OVERHEAD_32 = 16; + private final static int INENTRY_OVERHEAD_64 = 32; + private final static int INENTRY_OVERHEAD_OOPS= 24; + + // 46 + private final static int DELTAINENTRY_OVERHEAD_32 = 32; + private final static int DELTAINENTRY_OVERHEAD_64 = 48; + private final static int DELTAINENTRY_OVERHEAD_OOPS= 32; + + // 55 + private final static int DOS_WEAK_BINREF_OVERHEAD_32 = 48; + private final static int DOS_WEAK_BINREF_OVERHEAD_64 = 72; + private final static int DOS_WEAK_BINREF_OVERHEAD_OOPS= 48; + + // 56 + private final static int DOS_OFFHEAP_BINREF_OVERHEAD_32 = 32; + private final static int DOS_OFFHEAP_BINREF_OVERHEAD_64 = 40; + private final static int DOS_OFFHEAP_BINREF_OVERHEAD_OOPS = 40; + + // 57 + private final static int DOS_DEFERRED_LSN_BATCH_OVERHEAD_32 = 88; + private final static int DOS_DEFERRED_LSN_BATCH_OVERHEAD_64 = 128; + private final static int DOS_DEFERRED_LSN_BATCH_OVERHEAD_OOPS = 88; + + // 58 + private final static int DOS_DEFERRED_DELTAREF_OVERHEAD_32 = 16; + private final static int DOS_DEFERRED_DELTAREF_OVERHEAD_64 = 24; + private final static int DOS_DEFERRED_DELTAREF_OVERHEAD_OOPS= 16; + + // 27 minus zero length Object array + private final static int EMPTY_OBJ_ARRAY = objectArraySize(0); + private final static int ARRAYLIST_OVERHEAD_32 = 40 - EMPTY_OBJ_ARRAY; + private final static int ARRAYLIST_OVERHEAD_64 = 64 - EMPTY_OBJ_ARRAY; + private final static int ARRAYLIST_OVERHEAD_OOPS = 40 - EMPTY_OBJ_ARRAY; + + // 44 minus 45 + // 32b: 44 and 45 are 40 and 16, resp. + // 64b: 44 and 45 are 56 and 24, resp. + // Oops: 44 and 45 are 40 and 16, resp. + private final static int TUPLE_OUTPUT_OVERHEAD_32 = 24; + private final static int TUPLE_OUTPUT_OVERHEAD_64 = 32; + private final static int TUPLE_OUTPUT_OVERHEAD_OOPS = 24; + + public final static int LONG_OVERHEAD; + public final static int ARRAY_OVERHEAD; + public final static int ARRAY_SIZE_INCLUDED; + public final static int OBJECT_OVERHEAD; + public final static int OBJECT_ARRAY_ITEM_OVERHEAD; + public final static int HASHMAP_OVERHEAD; + public final static int HASHMAP_ENTRY_OVERHEAD; + public final static int HASHSET_OVERHEAD; + public final static int HASHSET_ENTRY_OVERHEAD; + public final static int TWOHASHMAPS_OVERHEAD; + public final static int TREEMAP_OVERHEAD; + public final static int TREEMAP_ENTRY_OVERHEAD; + public final static int MAPLN_OVERHEAD; + public final static int LN_OVERHEAD; + public final static int VERSIONEDLN_OVERHEAD; + public final static int DUPCOUNTLN_OVERHEAD; + public final static int BIN_FIXED_OVERHEAD; + public final static int BINDELTA_OVERHEAD; + public final static int DELTAINFO_OVERHEAD; + public final static int SPARSE_TARGET_ENTRY_OVERHEAD; + public final static int DEFAULT_TARGET_ENTRY_OVERHEAD; + public final static int DEFAULT_KEYVALS_OVERHEAD; + public final static int MAX_KEY_SIZE_KEYVALS_OVERHEAD; + public final static int DEFAULT_LONG_REP_OVERHEAD; + public final static int SPARSE_LONG_REP_OVERHEAD; + public final static int DIN_FIXED_OVERHEAD; + public final static int DBIN_FIXED_OVERHEAD; + public final static int IN_FIXED_OVERHEAD; + public final static int KEY_OVERHEAD; + public final static int LOCKIMPL_OVERHEAD; + public final static int THINLOCKIMPL_OVERHEAD; + public final static int LOCKINFO_OVERHEAD; + public final static int WRITE_LOCKINFO_OVERHEAD; + public final static int TXN_OVERHEAD; + public final static int CHECKPOINT_REFERENCE_SIZE; + public final static int UTILIZATION_PROFILE_ENTRY; + public final static int DBFILESUMMARY_OVERHEAD; + public final static int TFS_LIST_INITIAL_OVERHEAD; + public final static int TFS_LIST_SEGMENT_OVERHEAD; + public final static int LN_INFO_OVERHEAD; + public final static int FILESUMMARYLN_OVERHEAD; + public final static int INENTRY_OVERHEAD; + public final static int DELTAINENTRY_OVERHEAD; + + public final static int DOS_WEAK_BINREF_OVERHEAD; + public final static int DOS_OFFHEAP_BINREF_OVERHEAD; + public final static int DOS_DEFERRED_LSN_BATCH_OVERHEAD; + public final static int DOS_DEFERRED_DELTAREF_OVERHEAD; + + public final static int ARRAYLIST_OVERHEAD; + public final static int TUPLE_OUTPUT_OVERHEAD; + + /* Primitive long array item size is the same on all platforms. */ + public final static int PRIMITIVE_LONG_ARRAY_ITEM_OVERHEAD = 8; + + private final static String JVM_ARCH_PROPERTY = "sun.arch.data.model"; + private final static String FORCE_JVM_ARCH = "je.forceJVMArch"; + private static boolean COMPRESSED_OOPS_REQUESTED = false; + private static boolean COMPRESSED_OOPS_KNOWN = false; + private static boolean COMPRESSED_OOPS_KNOWN_ON = false; + + static { + boolean is64 = false; + String overrideArch = System.getProperty(FORCE_JVM_ARCH); + + try { + if (overrideArch == null) { + String arch = System.getProperty(JVM_ARCH_PROPERTY); + if (arch != null) { + is64 = Integer.parseInt(arch) == 64; + } + } else { + is64 = Integer.parseInt(overrideArch) == 64; + } + } catch (NumberFormatException NFE) { + NFE.printStackTrace(System.err); + } + + final Boolean checkCompressedOops = + CompressedOopsDetector.isEnabled(); + if (checkCompressedOops != null) { + COMPRESSED_OOPS_KNOWN = true; + COMPRESSED_OOPS_KNOWN_ON = checkCompressedOops; + } + + List args = + ManagementFactory.getRuntimeMXBean().getInputArguments(); + for (String arg : args) { + if ("-XX:+UseCompressedOops".equals(arg)) { + COMPRESSED_OOPS_REQUESTED = true; + break; + } + } + + final boolean useCompressedOops = COMPRESSED_OOPS_KNOWN ? + COMPRESSED_OOPS_KNOWN_ON : + COMPRESSED_OOPS_REQUESTED; + + if (useCompressedOops) { + LONG_OVERHEAD = LONG_OVERHEAD_OOPS; + ARRAY_OVERHEAD = ARRAY_OVERHEAD_OOPS; + ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_OOPS; + OBJECT_OVERHEAD = OBJECT_OVERHEAD_OOPS; + OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_OOPS; + HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_OOPS; + HASHSET_OVERHEAD = HASHSET_OVERHEAD_OOPS; + HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_OOPS; + TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_OOPS; + MAPLN_OVERHEAD = MAPLN_OVERHEAD_OOPS; + BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_OOPS; + BINDELTA_OVERHEAD = BINDELTA_OVERHEAD_OOPS; + DELTAINFO_OVERHEAD = DELTAINFO_OVERHEAD_OOPS; + SPARSE_TARGET_ENTRY_OVERHEAD = + SPARSE_TARGET_ENTRY_OVERHEAD_OOPS; + DEFAULT_TARGET_ENTRY_OVERHEAD = + DEFAULT_TARGET_ENTRY_OVERHEAD_OOPS; + DEFAULT_KEYVALS_OVERHEAD = DEFAULT_KEYVALS_OVERHEAD_OOPS; + MAX_KEY_SIZE_KEYVALS_OVERHEAD = + MAX_KEY_SIZE_KEYVALS_OVERHEAD_OOPS; + DEFAULT_LONG_REP_OVERHEAD = DEFAULT_LONG_REP_OVERHEAD_OOPS; + SPARSE_LONG_REP_OVERHEAD = SPARSE_LONG_REP_OVERHEAD_OOPS; + DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_OOPS; + DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_OOPS; + IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_OOPS; + HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_OOPS; + TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_OOPS; + TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_OOPS; + LN_OVERHEAD = LN_OVERHEAD_OOPS; + VERSIONEDLN_OVERHEAD = VERSIONEDLN_OVERHEAD_OOPS; + DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_OOPS; + TXN_OVERHEAD = TXN_OVERHEAD_OOPS; + CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_OOPS; + KEY_OVERHEAD = KEY_OVERHEAD_OOPS; + LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_OOPS; + THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_OOPS; + LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_OOPS; + WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_OOPS; + UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_OOPS; + DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_OOPS; + TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_OOPS; + TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_OOPS; + LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_OOPS; + FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_OOPS; + INENTRY_OVERHEAD = INENTRY_OVERHEAD_OOPS; + DELTAINENTRY_OVERHEAD = DELTAINENTRY_OVERHEAD_OOPS; + ARRAYLIST_OVERHEAD = ARRAYLIST_OVERHEAD_OOPS; + TUPLE_OUTPUT_OVERHEAD = TUPLE_OUTPUT_OVERHEAD_OOPS; + DOS_WEAK_BINREF_OVERHEAD = DOS_WEAK_BINREF_OVERHEAD_OOPS; + DOS_OFFHEAP_BINREF_OVERHEAD = DOS_OFFHEAP_BINREF_OVERHEAD_OOPS; + DOS_DEFERRED_LSN_BATCH_OVERHEAD = + DOS_DEFERRED_LSN_BATCH_OVERHEAD_OOPS; + DOS_DEFERRED_DELTAREF_OVERHEAD = + DOS_DEFERRED_DELTAREF_OVERHEAD_OOPS; + } else if (is64) { + LONG_OVERHEAD = LONG_OVERHEAD_64; + ARRAY_OVERHEAD = ARRAY_OVERHEAD_64; + ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_64; + OBJECT_OVERHEAD = OBJECT_OVERHEAD_64; + OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_64; + HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_64; + HASHSET_OVERHEAD = HASHSET_OVERHEAD_64; + HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_64; + TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_64; + MAPLN_OVERHEAD = MAPLN_OVERHEAD_64; + BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_64; + DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_64; + DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_64; + IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_64; + HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_64; + TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_64; + BINDELTA_OVERHEAD = BINDELTA_OVERHEAD_64; + DELTAINFO_OVERHEAD = DELTAINFO_OVERHEAD_64; + SPARSE_TARGET_ENTRY_OVERHEAD = SPARSE_TARGET_ENTRY_OVERHEAD_64; + DEFAULT_TARGET_ENTRY_OVERHEAD = + DEFAULT_TARGET_ENTRY_OVERHEAD_64; + DEFAULT_KEYVALS_OVERHEAD = DEFAULT_KEYVALS_OVERHEAD_64; + MAX_KEY_SIZE_KEYVALS_OVERHEAD = + MAX_KEY_SIZE_KEYVALS_OVERHEAD_64; + DEFAULT_LONG_REP_OVERHEAD = DEFAULT_LONG_REP_OVERHEAD_64; + SPARSE_LONG_REP_OVERHEAD = SPARSE_LONG_REP_OVERHEAD_64; + TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_64; + LN_OVERHEAD = LN_OVERHEAD_64; + VERSIONEDLN_OVERHEAD = VERSIONEDLN_OVERHEAD_64; + DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_64; + TXN_OVERHEAD = TXN_OVERHEAD_64; + CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_64; + KEY_OVERHEAD = KEY_OVERHEAD_64; + LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_64; + THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_64; + LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_64; + WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_64; + UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_64; + DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_64; + TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_64; + TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_64; + LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_64; + FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_64; + INENTRY_OVERHEAD = INENTRY_OVERHEAD_64; + DELTAINENTRY_OVERHEAD = DELTAINENTRY_OVERHEAD_64; + ARRAYLIST_OVERHEAD = ARRAYLIST_OVERHEAD_64; + TUPLE_OUTPUT_OVERHEAD = TUPLE_OUTPUT_OVERHEAD_64; + DOS_WEAK_BINREF_OVERHEAD = DOS_WEAK_BINREF_OVERHEAD_64; + DOS_OFFHEAP_BINREF_OVERHEAD = DOS_OFFHEAP_BINREF_OVERHEAD_64; + DOS_DEFERRED_LSN_BATCH_OVERHEAD = + DOS_DEFERRED_LSN_BATCH_OVERHEAD_64; + DOS_DEFERRED_DELTAREF_OVERHEAD = DOS_DEFERRED_DELTAREF_OVERHEAD_64; + } else { + LONG_OVERHEAD = LONG_OVERHEAD_32; + ARRAY_OVERHEAD = ARRAY_OVERHEAD_32; + ARRAY_SIZE_INCLUDED = ARRAY_SIZE_INCLUDED_32; + OBJECT_OVERHEAD = OBJECT_OVERHEAD_32; + OBJECT_ARRAY_ITEM_OVERHEAD = OBJECT_ARRAY_ITEM_OVERHEAD_32; + HASHMAP_OVERHEAD = HASHMAP_OVERHEAD_32; + HASHMAP_ENTRY_OVERHEAD = HASHMAP_ENTRY_OVERHEAD_32; + HASHSET_OVERHEAD = HASHSET_OVERHEAD_32; + HASHSET_ENTRY_OVERHEAD = HASHSET_ENTRY_OVERHEAD_32; + TWOHASHMAPS_OVERHEAD = TWOHASHMAPS_OVERHEAD_32; + TREEMAP_OVERHEAD = TREEMAP_OVERHEAD_32; + MAPLN_OVERHEAD = MAPLN_OVERHEAD_32; + TREEMAP_ENTRY_OVERHEAD = TREEMAP_ENTRY_OVERHEAD_32; + LN_OVERHEAD = LN_OVERHEAD_32; + VERSIONEDLN_OVERHEAD = VERSIONEDLN_OVERHEAD_32; + DUPCOUNTLN_OVERHEAD = DUPCOUNTLN_OVERHEAD_32; + BIN_FIXED_OVERHEAD = BIN_FIXED_OVERHEAD_32; + BINDELTA_OVERHEAD = BINDELTA_OVERHEAD_32; + DELTAINFO_OVERHEAD = DELTAINFO_OVERHEAD_32; + SPARSE_TARGET_ENTRY_OVERHEAD = SPARSE_TARGET_ENTRY_OVERHEAD_32; + DEFAULT_TARGET_ENTRY_OVERHEAD = + DEFAULT_TARGET_ENTRY_OVERHEAD_32; + DEFAULT_KEYVALS_OVERHEAD = DEFAULT_KEYVALS_OVERHEAD_32; + MAX_KEY_SIZE_KEYVALS_OVERHEAD = + MAX_KEY_SIZE_KEYVALS_OVERHEAD_32; + DEFAULT_LONG_REP_OVERHEAD = DEFAULT_LONG_REP_OVERHEAD_32; + SPARSE_LONG_REP_OVERHEAD = SPARSE_LONG_REP_OVERHEAD_32; + DIN_FIXED_OVERHEAD = DIN_FIXED_OVERHEAD_32; + DBIN_FIXED_OVERHEAD = DBIN_FIXED_OVERHEAD_32; + IN_FIXED_OVERHEAD = IN_FIXED_OVERHEAD_32; + TXN_OVERHEAD = TXN_OVERHEAD_32; + CHECKPOINT_REFERENCE_SIZE = CHECKPOINT_REFERENCE_SIZE_32; + KEY_OVERHEAD = KEY_OVERHEAD_32; + LOCKIMPL_OVERHEAD = LOCKIMPL_OVERHEAD_32; + THINLOCKIMPL_OVERHEAD = THINLOCKIMPL_OVERHEAD_32; + LOCKINFO_OVERHEAD = LOCKINFO_OVERHEAD_32; + WRITE_LOCKINFO_OVERHEAD = WRITE_LOCKINFO_OVERHEAD_32; + UTILIZATION_PROFILE_ENTRY = UTILIZATION_PROFILE_ENTRY_32; + DBFILESUMMARY_OVERHEAD = DBFILESUMMARY_OVERHEAD_32; + TFS_LIST_INITIAL_OVERHEAD = TFS_LIST_INITIAL_OVERHEAD_32; + TFS_LIST_SEGMENT_OVERHEAD = TFS_LIST_SEGMENT_OVERHEAD_32; + LN_INFO_OVERHEAD = LN_INFO_OVERHEAD_32; + FILESUMMARYLN_OVERHEAD = FILESUMMARYLN_OVERHEAD_32; + INENTRY_OVERHEAD = INENTRY_OVERHEAD_32; + DELTAINENTRY_OVERHEAD = DELTAINENTRY_OVERHEAD_32; + ARRAYLIST_OVERHEAD = ARRAYLIST_OVERHEAD_32; + TUPLE_OUTPUT_OVERHEAD = TUPLE_OUTPUT_OVERHEAD_32; + DOS_WEAK_BINREF_OVERHEAD = DOS_WEAK_BINREF_OVERHEAD_32; + DOS_OFFHEAP_BINREF_OVERHEAD = DOS_OFFHEAP_BINREF_OVERHEAD_32; + DOS_DEFERRED_LSN_BATCH_OVERHEAD = + DOS_DEFERRED_LSN_BATCH_OVERHEAD_32; + DOS_DEFERRED_DELTAREF_OVERHEAD = DOS_DEFERRED_DELTAREF_OVERHEAD_32; + } + } + + /* public for unit tests. */ + public final static long MIN_MAX_MEMORY_SIZE = 96 * 1024; + public final static String MIN_MAX_MEMORY_SIZE_STRING = + Long.toString(MIN_MAX_MEMORY_SIZE); + + /* This value prevents cache churn for apps with a high write rate. */ + @SuppressWarnings("unused") + private final static int DEFAULT_MIN_BTREE_CACHE_SIZE = 500 * 1024; + + private final static long N_64MB = (1 << 26); + + /* + * Note that this class contains long fields that are accessed by multiple + * threads. Access to these fields is synchronized when changing them but + * not when reading them to detect cache overflow or get stats. Although + * inaccuracies may occur when reading the values, correcting this is not + * worth the cost of synchronizing every time we access them. The worst + * that can happen is that we may invoke eviction unnecessarily. + */ + + /* + * Amount of memory cached for tree objects. + */ + private final AtomicLong treeMemoryUsage = new AtomicLong(0); + + /* + * Amount of memory cached for disk ordered scans. + */ + private final AtomicLong dosMemoryUsage = new AtomicLong(0); + + /* + * Amount of memory cached for txn usage. + */ + private final AtomicLong txnMemoryUsage = new AtomicLong(0); + + /* + * Amount of memory cached for log cleaning, dirty IN list, and other admin + * functions. + */ + private final AtomicLong adminMemoryUsage = new AtomicLong(0); + + /* + * Amount of memory cached for admininstrative structures that are + * sometimes housed within tree nodes. Right now, that's + * DbFileSummaryMap, which is sometimes referenced by a MapLN by + * way of a DatabaseImpl, and sometimes is just referenced by + * a DatabaseImpl without a MapLN (the id and name databases.) + */ + private final AtomicLong treeAdminMemoryUsage = new AtomicLong(0); + + /* + * Amount of memory cached for locks. Protected by the + * LockManager.lockTableLatches[lockTableIndex]. + */ + private final AtomicLong lockMemoryUsage = new AtomicLong(0); + + /* + * Memory available to JE, based on je.maxMemory and the memory available + * to this process. + */ + private final Totals totals; + + /* Memory available to log buffers. */ + private long logBufferBudget; + + /* Maximum allowed use of the admin budget by the UtilizationTracker. */ + private long trackerBudget; + + /* Mininum to prevent cache churn. */ + private long minTreeMemoryUsage; + + private final EnvironmentImpl envImpl; + + MemoryBudget(EnvironmentImpl envImpl, + EnvironmentImpl sharedCacheEnv, + DbConfigManager configManager) + throws DatabaseException { + + this.envImpl = envImpl; + + /* Request notification of mutable property changes. */ + envImpl.addConfigObserver(this); + + /* Perform first time budget initialization. */ + long newMaxMemory; + if (envImpl.getSharedCache()) { + if (sharedCacheEnv != null) { + totals = sharedCacheEnv.getMemoryBudget().totals; + /* For a new environment, do not override existing budget. */ + newMaxMemory = -1; + } else { + totals = new SharedTotals(); + newMaxMemory = calcMaxMemory(configManager); + } + } else { + totals = new PrivateTotals(this); + newMaxMemory = calcMaxMemory(configManager); + } + reset(newMaxMemory, true /*newEnv*/, configManager); + + checkCompressedOops(); + } + + /** + * Logs a SEVERE message if compressed oops was specified but did not take + * effect. Must be called after the environment is initialized so the + * message makes it to the output file. + */ + private void checkCompressedOops() { + if (COMPRESSED_OOPS_REQUESTED && + COMPRESSED_OOPS_KNOWN && + !COMPRESSED_OOPS_KNOWN_ON) { + LoggerUtils.severe(envImpl.getLogger(), envImpl, + "-XX:+UseCompressedOops was specified but is not in effect," + + " probably because the heap size is too large for this JVM" + + " option on this platform. This is likely to cause an" + + " OutOfMemoryError!"); + } + } + + /** + * Respond to config updates. + */ + public void envConfigUpdate(DbConfigManager configManager, + EnvironmentMutableConfig ignore) + throws DatabaseException { + + /* Reinitialize the cache budget and the log buffer pool. */ + reset(calcMaxMemory(configManager), false /*newEnv*/, configManager); + } + + /** + * @throws IllegalArgumentException via Environment ctor and + * setMutableConfig. + */ + private long calcMaxMemory(DbConfigManager configManager) { + + /* + * Calculate the total memory allotted to JE. + * 1. If je.maxMemory is specified, use that. Check that it's not more + * than the JVM memory. + * 2. Otherwise, take je.maxMemoryPercent * JVM max memory. + */ + long newMaxMemory = + configManager.getLong(EnvironmentParams.MAX_MEMORY); + long jvmMemory = JVMSystemUtils.getRuntimeMaxMemory(); + + if (newMaxMemory != 0) { + /* Application specified a cache size number, validate it. */ + if (jvmMemory < newMaxMemory) { + throw new IllegalArgumentException + (EnvironmentParams.MAX_MEMORY.getName() + + " has a value of " + newMaxMemory + + " but the JVM is only configured for " + + jvmMemory + + ". Consider using je.maxMemoryPercent."); + } + if (newMaxMemory < MIN_MAX_MEMORY_SIZE) { + throw new IllegalArgumentException + (EnvironmentParams.MAX_MEMORY.getName() + + " is " + newMaxMemory + + " which is less than the minimum: " + + MIN_MAX_MEMORY_SIZE); + } + } else { + + /* + * When no explicit cache size is specified and the JVM memory size + * is unknown, assume a default sized (64 MB) heap. This produces + * a reasonable cache size when no heap size is known. + */ + if (jvmMemory == Long.MAX_VALUE) { + jvmMemory = N_64MB; + } + + /* Use the configured percentage of the JVM memory size. */ + int maxMemoryPercent = + configManager.getInt(EnvironmentParams.MAX_MEMORY_PERCENT); + newMaxMemory = (maxMemoryPercent * jvmMemory) / 100; + } + + return newMaxMemory; + } + + /** + * Initialize at construction time and when the cache is resized. + * + * @param newMaxMemory is the new total cache budget or is less than 0 if + * the total should remain unchanged. + * + * @param newEnv is true if this is the first time we are resetting the + * budget for a new environment. Note that a new environment has not yet + * been added to the set of shared cache environments. + */ + void reset(long newMaxMemory, + boolean newEnv, + DbConfigManager configManager) + throws DatabaseException { + + long oldLogBufferBudget = logBufferBudget; + + /* + * Update the new total cache budget. + */ + if (newMaxMemory < 0) { + newMaxMemory = getMaxMemory(); + } else { + totals.setMaxMemory(newMaxMemory); + } + + /* + * This environment's portion is adjusted for a shared cache. Further + * below we make buffer and tracker sizes a fixed percentage (7% and + * 2%, by default) of the total shared cache size. The math for this + * starts by dividing the total size by number of environments to get + * myCachePortion. Then we take 7% or 2% of myCachePortion to get each + * environment's portion. In other words, if there are 10 environments + * then each gets 7%/10 and 2%/10 of the total cache size, by default. + * + * Note that when we resize the shared cache, we resize the buffer + * pools and tracker budgets for all environments. Resizing the + * tracker budget has no overhead, but resizing the buffer pools causes + * new buffers to be allocated. If reallocation of the log buffers is + * not desirable, the user can configure a byte amount rather than a + * percentage. + */ + long myCachePortion; + if (envImpl.getSharedCache()) { + int nEnvs = DbEnvPool.getInstance().getNSharedCacheEnvironments(); + if (newEnv) { + nEnvs += 1; + } + myCachePortion = newMaxMemory / nEnvs; + } else { + myCachePortion = newMaxMemory; + } + + /* + * Calculate the memory budget for log buffering. If the LOG_MEM_SIZE + * parameter is not set, start by using 7% (1/16th) of the cache + * size. If it is set, use that explicit setting. + * + * No point in having more log buffers than the maximum size. If + * this starting point results in overly large log buffers, + * reduce the log buffer budget again. + */ + long newLogBufferBudget = + configManager.getLong(EnvironmentParams.LOG_MEM_SIZE); + if (newLogBufferBudget == 0) { + newLogBufferBudget = myCachePortion >> 4; + } else if (newLogBufferBudget > myCachePortion / 2) { + newLogBufferBudget = myCachePortion / 2; + } + + /* + * We have a first pass at the log buffer budget. See what + * size log buffers result. Don't let them be too big, it would + * be a waste. + */ + int numBuffers = + configManager.getInt(EnvironmentParams.NUM_LOG_BUFFERS); + long startingBufferSize = newLogBufferBudget / numBuffers; + int logBufferSize = + configManager.getInt(EnvironmentParams.LOG_BUFFER_MAX_SIZE); + if (startingBufferSize > logBufferSize) { + startingBufferSize = logBufferSize; + newLogBufferBudget = numBuffers * startingBufferSize; + } else if (startingBufferSize < + EnvironmentParams.MIN_LOG_BUFFER_SIZE) { + startingBufferSize = EnvironmentParams.MIN_LOG_BUFFER_SIZE; + newLogBufferBudget = numBuffers * startingBufferSize; + } + + long newCriticalThreshold = + (newMaxMemory * + envImpl.getConfigManager().getInt + (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE))/100; + + long newTrackerBudget = + (myCachePortion * + envImpl.getConfigManager().getInt + (EnvironmentParams.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE))/100; + + long newMinTreeMemoryUsage = Math.min + (configManager.getLong(EnvironmentParams.MIN_TREE_MEMORY), + myCachePortion - newLogBufferBudget); + + /* + * If all has gone well, update the budget fields. Once the log buffer + * budget is determined, the remainder of the memory is left for tree + * nodes. + */ + logBufferBudget = newLogBufferBudget; + totals.setCriticalThreshold(newCriticalThreshold); + trackerBudget = newTrackerBudget; + minTreeMemoryUsage = newMinTreeMemoryUsage; + + /* The log buffer budget is counted in the cache usage. */ + totals.updateCacheUsage(logBufferBudget - oldLogBufferBudget); + + /* + * Only reset the log buffer pool if the log buffer has already been + * initialized (we're updating an existing budget) and the log buffer + * budget has changed (resetting it is expensive and may cause I/O). + */ + if (!newEnv && oldLogBufferBudget != logBufferBudget) { + envImpl.getLogManager().resetPool(configManager); + } + } + + /** + * Initialize the starting environment memory state. We really only need to + * recalibrate the tree and treeAdmin categories, since there are no locks + * and txns yet, and the items in the admin category are cleaner items and + * aren't affected by the recovery splicing process. + */ + void initCacheMemoryUsage(long dbTreeAdminMemory) { + long totalTree = 0; + long treeAdmin = 0; + for (IN in : envImpl.getInMemoryINs()) { + totalTree += in.getBudgetedMemorySize(); + treeAdmin += in.getTreeAdminMemorySize(); + } + refreshTreeMemoryUsage(totalTree); + refreshTreeAdminMemoryUsage(treeAdmin + dbTreeAdminMemory); + } + + /** + * Called by INList when clearing tree memory usage. + */ + void refreshTreeAdminMemoryUsage(long newSize) { + long oldSize = treeAdminMemoryUsage.getAndSet(newSize); + long diff = (newSize - oldSize); + + if (DEBUG_TREEADMIN) { + System.err.println("RESET = " + newSize); + } + if (totals.updateCacheUsage(diff)) { + envImpl.alertEvictor(); + } + } + + /** + * Called by INList when recalculating tree memory usage. + */ + void refreshTreeMemoryUsage(long newSize) { + long oldSize = treeMemoryUsage.getAndSet(newSize); + long diff = (newSize - oldSize); + + if (totals.updateCacheUsage(diff)) { + envImpl.alertEvictor(); + } + } + + /** + * Returns whether eviction of INList information is allowed. + * To prevent extreme cache churn, eviction of Btree information is + * prohibited unless the tree memory usage is above this minimum value. + */ + public boolean isTreeUsageAboveMinimum() { + return treeMemoryUsage.get() > minTreeMemoryUsage; + } + + /** + * For unit tests. + */ + public long getMinTreeMemoryUsage() { + return minTreeMemoryUsage; + } + + /** + * Update the environment wide tree memory count, wake up the evictor if + * necessary. + * @param increment note that increment may be negative. + */ + public void updateTreeMemoryUsage(long increment) { + updateCounter(increment, treeMemoryUsage, "tree", DEBUG_TREE); + } + + /** + * Update the environment wide tree memory count, wake up the evictor if + * necessary. + * @param increment note that increment may be negative. + */ + public void updateDOSMemoryUsage(long increment) { + updateCounter(increment, dosMemoryUsage, "DOS", DEBUG_DOS); + } + + /** + * Update the environment wide txn memory count, wake up the evictor if + * necessary. + * @param increment note that increment may be negative. + */ + public void updateTxnMemoryUsage(long increment) { + updateCounter(increment, txnMemoryUsage, "txn", DEBUG_TXN); + } + + /** + * Update the environment wide admin memory count, wake up the evictor if + * necessary. + * @param increment note that increment may be negative. + */ + public void updateAdminMemoryUsage(long increment) { + updateCounter(increment, adminMemoryUsage, "admin", DEBUG_ADMIN); + } + + /** + * Update the treeAdmin memory count, wake up the evictor if necessary. + * @param increment note that increment may be negative. + */ + public void updateTreeAdminMemoryUsage(long increment) { + updateCounter(increment, treeAdminMemoryUsage, "treeAdmin", + DEBUG_TREEADMIN); + } + + private void updateCounter(long increment, + AtomicLong counter, + String debugName, + boolean debug) { + if (increment != 0) { + long newSize = counter.addAndGet(increment); + + assert (sizeNotNegative(newSize)) : + makeErrorMessage(debugName, newSize, increment); + + if (debug) { + if (increment > 0) { + System.err.println("INC-------- =" + increment + " " + + debugName + " " + newSize); + } else { + System.err.println("-------DEC=" + increment + " " + + debugName + " " + newSize); + } + } + + if (totals.updateCacheUsage(increment)) { + envImpl.alertEvictor(); + } + } + } + + private boolean sizeNotNegative(long newSize) { + + if (CLEANUP_DONE) { + return (newSize >= 0); + } + return true; + } + + public void updateLockMemoryUsage(long increment, int lockTableIndex) { + if (increment != 0) { + lockMemoryUsage.addAndGet(increment); + + assert lockMemoryUsage.get() >= 0: + makeErrorMessage("lockMem", + lockMemoryUsage.get(), + increment); + if (DEBUG_LOCK) { + if (increment > 0) { + System.err.println("INC-------- =" + increment + + " lock[" + + lockTableIndex + "] " + + lockMemoryUsage.get()); + } else { + System.err.println("-------DEC=" + increment + + " lock[" + lockTableIndex + "] " + + lockMemoryUsage.get()); + } + } + + if (totals.updateCacheUsage(increment)) { + envImpl.alertEvictor(); + } + } + } + + private String makeErrorMessage(String memoryType, + long total, + long increment) { + return memoryType + "=" + total + + " increment=" + increment + " " + + LoggerUtils.getStackTrace(new Throwable()); + } + + void subtractCacheUsage() { + totals.updateCacheUsage(0 - getLocalCacheUsage()); + } + + public long getLocalCacheUsage() { + return (logBufferBudget + + treeMemoryUsage.get() + + dosMemoryUsage.get() + + adminMemoryUsage.get() + + treeAdminMemoryUsage.get() + + getLockMemoryUsage()); + } + + long getVariableCacheUsage() { + return (treeMemoryUsage.get() + + dosMemoryUsage.get() + + adminMemoryUsage.get() + + treeAdminMemoryUsage.get() + + getLockMemoryUsage()); + } + + /** + * Public for unit testing. + */ + public long getLockMemoryUsage() { + long accLockMemoryUsage = + txnMemoryUsage.get() + lockMemoryUsage.get(); + + return accLockMemoryUsage; + } + + /* + * The following 2 methods are shorthand for getTotals.getXxx(). + */ + + public long getCacheMemoryUsage() { + return totals.getCacheUsage(); + } + + public long getMaxMemory() { + return totals.getMaxMemory(); + } + + /** + * Used for unit testing. + */ + public long getTreeMemoryUsage() { + return treeMemoryUsage.get(); + } + + /** + * Used for unit testing. + */ + public long getDOSMemoryUsage() { + return dosMemoryUsage.get(); + } + + /** + * Used for unit testing. + */ + public long getAdminMemoryUsage() { + return adminMemoryUsage.get(); + } + + /* + * For unit testing + */ + public long getTreeAdminMemoryUsage() { + return treeAdminMemoryUsage.get(); + } + + public long getLogBufferBudget() { + return logBufferBudget; + } + + public long getTrackerBudget() { + return trackerBudget; + } + + public static int tupleOutputSize(TupleOutput o) { + return TUPLE_OUTPUT_OVERHEAD + + byteArraySize(o.getBufferBytes().length); + } + + /** + * Returns the memory size occupied by a byte array of a given length. All + * arrays (regardless of element type) have the same overhead for a zero + * length array. On 32b Java, there are 4 bytes included in that fixed + * overhead that can be used for the first N elements -- however many fit + * in 4 bytes. On 64b Java, there is no extra space included. In all + * cases, space is allocated in 8 byte chunks. + */ + public static int byteArraySize(int arrayLen) { + + /* + * ARRAY_OVERHEAD accounts for N bytes of data, which is 4 bytes on 32b + * Java and 0 bytes on 64b Java. Data larger than N bytes is allocated + * in 8 byte increments. + */ + int size = ARRAY_OVERHEAD; + if (arrayLen > ARRAY_SIZE_INCLUDED) { + size += ((arrayLen - ARRAY_SIZE_INCLUDED + 7) / 8) * 8; + } + + return size; + } + + public static int shortArraySize(int arrayLen) { + return byteArraySize(arrayLen * 2); + } + + public static int intArraySize(int arrayLen) { + return byteArraySize(arrayLen * 4); + } + + public static int longArraySize(int arrayLen) { + return byteArraySize(arrayLen * 8); + } + + public static int objectArraySize(int arrayLen) { + return byteArraySize(arrayLen * OBJECT_ARRAY_ITEM_OVERHEAD); + } + + StatGroup loadStats() { + StatGroup stats = new StatGroup(MB_GROUP_NAME, MB_GROUP_DESC); + new LongStat(stats, MB_SHARED_CACHE_TOTAL_BYTES, + totals.isSharedCache() ? totals.getCacheUsage() : 0); + new LongStat(stats, MB_TOTAL_BYTES, getLocalCacheUsage()); + new LongStat(stats, MB_DATA_BYTES, + treeMemoryUsage.get() + treeAdminMemoryUsage.get()); + new LongStat(stats, MB_DATA_ADMIN_BYTES, treeAdminMemoryUsage.get()); + new LongStat(stats, MB_DOS_BYTES, dosMemoryUsage.get()); + new LongStat(stats, MB_ADMIN_BYTES, adminMemoryUsage.get()); + new LongStat(stats, MB_LOCK_BYTES, getLockMemoryUsage()); + + return stats; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("treeUsage = ").append(treeMemoryUsage.get()); + sb.append("treeAdminUsage = ").append(treeAdminMemoryUsage.get()); + sb.append("dosUsage = ").append(dosMemoryUsage.get()); + sb.append("adminUsage = ").append(adminMemoryUsage.get()); + sb.append("txnUsage = ").append(txnMemoryUsage.get()); + sb.append("lockUsage = ").append(getLockMemoryUsage()); + return sb.toString(); + } + + public Totals getTotals() { + return totals; + } + + /** + * Common base class for shared and private totals. This abstraction + * allows most other classes to be unaware of whether we're using a + * SharedEvictor or PrivateEvictor. + */ + public abstract static class Totals { + + long maxMemory; + private long criticalThreshold; + + private Totals() { + maxMemory = 0; + } + + private void setMaxMemory(long maxMemory) { + this.maxMemory = maxMemory; + } + + public final long getMaxMemory() { + return maxMemory; + } + + private void setCriticalThreshold(long criticalThreshold) { + this.criticalThreshold = criticalThreshold; + } + + public final long getCriticalThreshold() { + return criticalThreshold; + } + + public abstract long getCacheUsage(); + abstract boolean updateCacheUsage(long increment); + abstract boolean isSharedCache(); + } + + /** + * Totals for a single environment's non-shared cache. Used when + * EnvironmentConfig.setSharedCache(false) and a PrivateEvictor are used. + */ + private static class PrivateTotals extends Totals { + + private final MemoryBudget parent; + + private PrivateTotals(MemoryBudget parent) { + this.parent = parent; + } + + @Override + public final long getCacheUsage() { + return parent.getLocalCacheUsage(); + } + + @Override + final boolean updateCacheUsage(long increment) { + return (parent.getLocalCacheUsage() > maxMemory); + } + + @Override + final boolean isSharedCache() { + return false; + } + } + + /** + * Totals for the multi-environment shared cache. Used when + * EnvironmentConfig.setSharedCache(false) and the SharedEvictor are used. + */ + private static class SharedTotals extends Totals { + + private final AtomicLong usage; + + private SharedTotals() { + usage = new AtomicLong(); + } + + @Override + public final long getCacheUsage() { + return usage.get(); + } + + @Override + final boolean updateCacheUsage(long increment) { + return (usage.addAndGet(increment) > maxMemory); + } + + @Override + final boolean isSharedCache() { + return true; + } + } +} diff --git a/src/com/sleepycat/je/dbi/NodeSequence.java b/src/com/sleepycat/je/dbi/NodeSequence.java new file mode 100644 index 0000000..d463be3 --- /dev/null +++ b/src/com/sleepycat/je/dbi/NodeSequence.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.utilint.DbLsn; + +/** + * NodeSequence encapsulates the generation and maintenance of a sequence for + * generating node IDs, transient LSNs and other misc sequences. + */ +public class NodeSequence { + + public static final int FIRST_LOCAL_NODE_ID = 1; + public static final int FIRST_REPLICATED_NODE_ID = -10; + + /* + * Node IDs: We need to ensure that local and replicated nodes use + * different number spaces for their ids, so there can't be any possible + * conflicts. Local, non replicated nodes use positive values starting + * with 1, replicated nodes use negative values starting with -10. + * + * Node ID values from 0 to -9 are reserved. 0 is not used and should be + * avoided. -1 is used to mean null or none, and should be used via the + * Node.NULL_NODE_ID constant. -2 through -9 are reserved for future use. + * + * The local and replicated node ID sequences are initialized by the first + * pass of recovery, after the log has been scanned for the latest used + * node ID. + */ + private AtomicLong lastAllocatedLocalNodeId = null; + private AtomicLong lastAllocatedReplicatedNodeId = null; + + /* + * Transient LSNs are used for not-yet-logged DeferredWrite records and + * for the EOF record used for Serializable isolation. Transient LSNs are + * used to provide unique locks, and are only used during the life of an + * environment, for non-persistent objects. + */ + private final AtomicLong lastAllocatedTransientLsnOffset = + new AtomicLong(0L); + + public final EnvironmentImpl envImpl; + + /* Transient sequences. */ + private final AtomicLong nextBackupId = new AtomicLong(0L); + private final AtomicLong nextDatabaseCountId = new AtomicLong(0L); + private final AtomicLong nextDiskOrderedCursorId = new AtomicLong(0L); + private final AtomicLong nextNetworkRestoreId = new AtomicLong(0L); + + public NodeSequence(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + } + + /** + * Initialize the counters in these methods rather than a constructor + * so we can control the initialization more precisely. + */ + void initRealNodeId() { + lastAllocatedLocalNodeId = new AtomicLong(FIRST_LOCAL_NODE_ID - 1); + lastAllocatedReplicatedNodeId = + new AtomicLong(FIRST_REPLICATED_NODE_ID + 1); + } + + /** + * The last allocated local and replicated node IDs are used for ckpts. + */ + public long getLastLocalNodeId() { + return lastAllocatedLocalNodeId.get(); + } + + public long getLastReplicatedNodeId() { + return lastAllocatedReplicatedNodeId.get(); + } + + /** + * We get a new node ID of the appropriate kind when creating a new node. + */ + public long getNextLocalNodeId() { + return lastAllocatedLocalNodeId.incrementAndGet(); + } + + /* + public long getNextReplicatedNodeId() { + return lastAllocatedReplicatedNodeId.decrementAndGet(); + } + */ + + /** + * Initialize the node IDs, from recovery. + */ + public void setLastNodeId(long lastReplicatedNodeId, + long lastLocalNodeId) { + lastAllocatedReplicatedNodeId.set(lastReplicatedNodeId); + lastAllocatedLocalNodeId.set(lastLocalNodeId); + } + + /* + * Tracks the lowest replicated node ID used during a replay of the + * replication stream, so that it's available as the starting point if this + * replica transitions to being the master. + */ + public void updateFromReplay(long replayNodeId) { + assert !envImpl.isMaster(); + if (replayNodeId > 0 && !envImpl.isRepConverted()) { + throw EnvironmentFailureException.unexpectedState + ("replay node id is unexpectedly positive " + replayNodeId); + } + + if (replayNodeId < lastAllocatedReplicatedNodeId.get()) { + lastAllocatedReplicatedNodeId.set(replayNodeId); + } + } + + /** + * Assign the next available transient LSN. + */ + public long getNextTransientLsn() { + return DbLsn.makeTransientLsn + (lastAllocatedTransientLsnOffset.getAndIncrement()); + } + + public long getNextBackupId() { + return nextBackupId.getAndIncrement(); + } + + public long getNextDatabaseCountId() { + return nextDatabaseCountId.getAndIncrement(); + } + + public long getNextDiskOrderedCursorId() { + return nextDiskOrderedCursorId.getAndIncrement(); + } + + public long getNextNetworkRestoreId() { + return nextNetworkRestoreId.getAndIncrement(); + } +} diff --git a/src/com/sleepycat/je/dbi/Operation.java b/src/com/sleepycat/je/dbi/Operation.java new file mode 100644 index 0000000..2155237 --- /dev/null +++ b/src/com/sleepycat/je/dbi/Operation.java @@ -0,0 +1,76 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.nio.ByteBuffer; + +/** + * An enumeration of different api call sources for replication, currently for + * debugging. This is also intended to support the future possibility of + * providing application level visibility into the replication operation + * stream. + */ +public class Operation { + + public static final Operation PUT = + new Operation((byte) 1, "PUT"); + public static final Operation NO_OVERWRITE = + new Operation((byte) 2, "NO_OVERWRITE"); + public static final Operation PLACEHOLDER = + new Operation((byte) 3, "PLACEHOLDER"); + + private static final Operation[] ALL_OPS = + {PUT, NO_OVERWRITE, PLACEHOLDER }; + + private static final byte MAX_OP = 3; + private static final byte MIN_OP = 1; + + private byte op; + private String name; + + public Operation() { + } + + private Operation(byte op, String name) { + this.op = op; + this.name = name; + } + + public int getContentSize() { + return 1; + } + + /** + * Serialize this object into the buffer. + * @param buffer is the destination buffer + */ + public void writeToBuffer(ByteBuffer buffer) { + buffer.put(op); + } + + public static Operation readFromBuffer(ByteBuffer buffer) { + byte opNum = buffer.get(); + if (opNum >= MIN_OP && + opNum <= MAX_OP) { + return ALL_OPS[opNum - 1]; + } else { + return new Operation(opNum, "UNKNOWN " + opNum); + } + } + + @Override + public String toString() { + return name; + } +} diff --git a/src/com/sleepycat/je/dbi/PutMode.java b/src/com/sleepycat/je/dbi/PutMode.java new file mode 100644 index 0000000..dcaa931 --- /dev/null +++ b/src/com/sleepycat/je/dbi/PutMode.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * Used to distinguish Cursor put operations. + */ +public enum PutMode { + + /** + * User operation: Cursor.putCurrent. Replace data at current position. + * Return KEYEMPTY if record at current position is deleted. + */ + CURRENT, + + /** + * User operation: Cursor.putNoDupData. Applies only to databases with + * duplicates. Insert key/data pair if it does not already exist; + * otherwise, return KEYEXIST. + */ + NO_DUP_DATA, + + /** + * User operation: Cursor.putNoOverwrite. Insert key/data pair if key + * does not already exist; otherwise, return KEYEXIST. + */ + NO_OVERWRITE, + + /** + * User operation: Cursor.put. Insert if key (for non-duplicates DBs) or + * key/data (for duplicates DBs) does not already exist; otherwise, + * overwrite key and data. + */ + OVERWRITE, +} diff --git a/src/com/sleepycat/je/dbi/RangeConstraint.java b/src/com/sleepycat/je/dbi/RangeConstraint.java new file mode 100644 index 0000000..6bfeab7 --- /dev/null +++ b/src/com/sleepycat/je/dbi/RangeConstraint.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +public interface RangeConstraint { + boolean inBounds(byte[] key); +} diff --git a/src/com/sleepycat/je/dbi/RangeRestartException.java b/src/com/sleepycat/je/dbi/RangeRestartException.java new file mode 100644 index 0000000..532d11b --- /dev/null +++ b/src/com/sleepycat/je/dbi/RangeRestartException.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.utilint.InternalException; + +/** + * Thrown by the LockManager when requesting a RANGE_READ or RANGE_WRITE + * lock, and a RANGE_INSERT lock is held or is waiting. This exception is + * caught by read operations and causes a restart of the operation. It should + * never be seen by the user. + */ +@SuppressWarnings("serial") +public class RangeRestartException extends InternalException { + + public RangeRestartException() { + super(); + } +} diff --git a/src/com/sleepycat/je/dbi/RecordVersion.java b/src/com/sleepycat/je/dbi/RecordVersion.java new file mode 100644 index 0000000..7fe9105 --- /dev/null +++ b/src/com/sleepycat/je/dbi/RecordVersion.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * Used to return the VLSN and LSN for a record. The VLSN is a unique version + * for a rep group, and the LSN is unique for a single node. + */ +public class RecordVersion { + + private final long vlsn; + private final long lsn; + + RecordVersion(long vlsn, long lsn) { + this.vlsn = vlsn; + this.lsn = lsn; + } + + public long getVLSN() { + return vlsn; + } + + public long getLSN() { + return lsn; + } +} diff --git a/src/com/sleepycat/je/dbi/RepConfigProxy.java b/src/com/sleepycat/je/dbi/RepConfigProxy.java new file mode 100644 index 0000000..ba74ac4 --- /dev/null +++ b/src/com/sleepycat/je/dbi/RepConfigProxy.java @@ -0,0 +1,23 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.ReplicaConsistencyPolicy; + +/** + * Used to pass a replication configuration instance through the non-HA code. + */ +public interface RepConfigProxy { + public ReplicaConsistencyPolicy getConsistencyPolicy(); +} diff --git a/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java b/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java new file mode 100644 index 0000000..cc8aef6 --- /dev/null +++ b/src/com/sleepycat/je/dbi/ReplicatedDatabaseConfig.java @@ -0,0 +1,222 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.log.BasicVersionedWriteLoggable; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * This class contains all fields of the database configuration which are + * persistent. This class is logged as part of a nameLN so that databases can + * be created on replica nodes with the correct configuration. + */ +public class ReplicatedDatabaseConfig extends BasicVersionedWriteLoggable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + private byte flags; + private int maxTreeEntriesPerNode; + private byte[] btreeComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + private byte[] duplicateComparatorBytes = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + private byte[][] triggerBytes = null; + + /** For reading */ + public ReplicatedDatabaseConfig() { + } + + /** For writing */ + ReplicatedDatabaseConfig(byte flags, + int maxTreeEntriesPerNode, + byte[] btreeComparatorBytes, + byte[] duplicateComparatorBytes, + byte[][] triggerBytes) { + + this.flags = flags; + this.maxTreeEntriesPerNode = maxTreeEntriesPerNode; + + if (btreeComparatorBytes != null) { + this.btreeComparatorBytes = btreeComparatorBytes; + } + + if (duplicateComparatorBytes != null) { + this.duplicateComparatorBytes = duplicateComparatorBytes; + } + + if (triggerBytes != null) { + this.triggerBytes = triggerBytes; + } + } + + /** + * Create a database config for use on the replica which contains + * all the configuration options that were conveyed by way of this class. + */ + public DatabaseConfig getReplicaConfig(EnvironmentImpl envImpl) { + DatabaseConfig replicaConfig = new DatabaseConfig(); + replicaConfig.setTransactional(true); + replicaConfig.setSortedDuplicates + (DatabaseImpl.getSortedDuplicates(flags)); + + /* + * KeyPrefixing is set to true if dups are enabled, to account for the + * upgrade scenario where the Master has not yet been upgraded but the + * Replica has been. + */ + replicaConfig.setKeyPrefixing(DatabaseImpl.getKeyPrefixing(flags) || + DatabaseImpl.getSortedDuplicates(flags)); + replicaConfig.setTemporary(DatabaseImpl.isTemporary(flags)); + replicaConfig.setReplicated(true); + replicaConfig.setNodeMaxEntries(maxTreeEntriesPerNode); + + DatabaseImpl.ComparatorReader reader = + new DatabaseImpl.ComparatorReader(btreeComparatorBytes, + "Btree", + envImpl.getClassLoader()); + if (reader.isClass()) { + replicaConfig.setBtreeComparator(reader.getComparatorClass()); + } else { + replicaConfig.setBtreeComparator(reader.getComparator()); + } + + reader = new DatabaseImpl.ComparatorReader(duplicateComparatorBytes, + "Duplicate", + envImpl.getClassLoader()); + if (reader.isClass()) { + replicaConfig.setDuplicateComparator(reader.getComparatorClass()); + } else { + replicaConfig.setDuplicateComparator(reader.getComparator()); + } + + replicaConfig.setTriggers(TriggerUtils. + unmarshallTriggers(null, triggerBytes, + envImpl.getClassLoader())); + + return replicaConfig; + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return 1 + // flags, 1 byte + LogUtils.getPackedIntLogSize(maxTreeEntriesPerNode) + + LogUtils.getByteArrayLogSize(btreeComparatorBytes) + + LogUtils.getByteArrayLogSize(duplicateComparatorBytes) + + TriggerUtils.logSize(triggerBytes); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + logBuffer.put(flags); + LogUtils.writePackedInt(logBuffer, maxTreeEntriesPerNode); + LogUtils.writeByteArray(logBuffer, btreeComparatorBytes); + LogUtils.writeByteArray(logBuffer, duplicateComparatorBytes); + TriggerUtils.writeTriggers(logBuffer, triggerBytes); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + /* + * ReplicatedDatabaseConfigs didn't exist before version 6 so they are + * always packed. + */ + flags = itemBuffer.get(); + maxTreeEntriesPerNode = + LogUtils.readInt(itemBuffer, false/*unpacked*/); + if (entryVersion < 8) { + /* Discard maxDupTreeEntriesPerNode. */ + LogUtils.readInt(itemBuffer, false/*unpacked*/); + } + btreeComparatorBytes = + LogUtils.readByteArray(itemBuffer, false/*unpacked*/); + duplicateComparatorBytes = + LogUtils.readByteArray(itemBuffer, false/*unpacked*/); + triggerBytes = (entryVersion < 8) ? + null : + TriggerUtils.readTriggers(itemBuffer, entryVersion); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + if (!(other instanceof ReplicatedDatabaseConfig)) { + return false; + } + + ReplicatedDatabaseConfig otherConfig = + (ReplicatedDatabaseConfig) other; + + if (flags != otherConfig.flags) { + return false; + } + + if (maxTreeEntriesPerNode != + otherConfig.maxTreeEntriesPerNode) { + return false; + } + + if (!Arrays.equals(btreeComparatorBytes, + otherConfig.btreeComparatorBytes)) { + return false; + } + + if (!Arrays.equals(duplicateComparatorBytes, + otherConfig.duplicateComparatorBytes)) { + return false; + } + + return true; + } +} diff --git a/src/com/sleepycat/je/dbi/SearchMode.java b/src/com/sleepycat/je/dbi/SearchMode.java new file mode 100644 index 0000000..a4e1ddc --- /dev/null +++ b/src/com/sleepycat/je/dbi/SearchMode.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +public enum SearchMode { + SET(true, false, "SET"), + BOTH(true, true, "BOTH"), + SET_RANGE(false, false, "SET_RANGE"), + BOTH_RANGE(false, true, "BOTH_RANGE"); + + private final boolean exactSearch; + private final boolean dataSearch; + private final String name; + + private SearchMode(boolean exactSearch, + boolean dataSearch, + String name) { + this.exactSearch = exactSearch; + this.dataSearch = dataSearch; + this.name = "SearchMode." + name; + } + + /** + * Returns true when the key or key/data search is exact, i.e., for SET + * and BOTH. + */ + public final boolean isExactSearch() { + return exactSearch; + } + + /** + * Returns true when the data value is included in the search, i.e., for + * BOTH and BOTH_RANGE. + */ + public final boolean isDataSearch() { + return dataSearch; + } + + @Override + public String toString() { + return name; + } +} diff --git a/src/com/sleepycat/je/dbi/SequenceStatDefinition.java b/src/com/sleepycat/je/dbi/SequenceStatDefinition.java new file mode 100644 index 0000000..87ca63e --- /dev/null +++ b/src/com/sleepycat/je/dbi/SequenceStatDefinition.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for JE sequence statistics. + */ +public class SequenceStatDefinition { + + public static final String GROUP_NAME = "Sequence"; + public static final String GROUP_DESC = "Sequence statistics"; + + public static final StatDefinition SEQUENCE_GETS = + new StatDefinition("nGets", + "Number of times that Sequence.get was called " + + "successfully."); + + public static final StatDefinition SEQUENCE_CACHED_GETS = + new StatDefinition("nCachedGets", + "Number of times that Sequence.get was called " + + "and a cached value was returned."); + + public static final StatDefinition SEQUENCE_STORED_VALUE = + new StatDefinition("current", + "The current value of the sequence in the " + + "database."); + + public static final StatDefinition SEQUENCE_CACHE_VALUE = + new StatDefinition("value", + "The current cached value of the sequence."); + + public static final StatDefinition SEQUENCE_CACHE_LAST = + new StatDefinition("lastValue", + "The last cached value of the sequence."); + + public static final StatDefinition SEQUENCE_RANGE_MIN = + new StatDefinition("min", + "The minimum permitted value of the sequence."); + + public static final StatDefinition SEQUENCE_RANGE_MAX = + new StatDefinition("max", + "The maximum permitted value of the sequence."); + + public static final StatDefinition SEQUENCE_CACHE_SIZE = + new StatDefinition("cacheSize", + "The mumber of values that will be cached in " + + "this handle."); +} diff --git a/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java b/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java new file mode 100644 index 0000000..5c2f7bf --- /dev/null +++ b/src/com/sleepycat/je/dbi/SortedLSNTreeWalker.java @@ -0,0 +1,1205 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.FileNotFoundException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.OldBINDeltaLogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.OldBINDelta; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * SortedLSNTreeWalker uses ordered disk access rather than random access to + * iterate over a database tree. Faulting in data records by on-disk order can + * provide much improved performance over faulting in by key order, since the + * latter may require random access. SortedLSN walking does not obey cursor + * and locking constraints, and therefore can only be guaranteed consistent for + * a quiescent tree which is not being modified by user or daemon threads. + * + * The class walks over the tree using sorted LSN fetching for parts of the + * tree that are not in memory. It returns LSNs for each node in the tree, + * except the root IN, in an arbitrary order (i.e. not key + * order). The caller is responsible for getting the root IN's LSN explicitly. + *

        + * A callback function specified in the constructor is executed for each LSN + * found. + *

        + * The walker works in two phases. The first phase is to gather and return all + * the resident INs using the roots that were specified when the SLTW was + * constructed. For each child of each root, if the child is resident it is + * passed to the callback method (processLSN). If the child was not in memory, + * it is added to a list of LSNs to read. When all of the in-memory INs have + * been passed to the callback for all LSNs collected, phase 1 is complete. + *

        + * In phase 2, for each of the sorted LSNs, the target is fetched, the type + * determined, and the LSN and type passed to the callback method for + * processing. LSNs of the children of those nodes are retrieved and the + * process repeated until there are no more nodes to be fetched for this + * database's tree. LSNs are accumulated in batches in this phase so that + * memory consumption is not excessive. For instance, if batches were not used + * then the LSNs of all of the BINs would need to be held in memory. + */ +public class SortedLSNTreeWalker { + + /* + * The interface for calling back to the user with each LSN. + */ + public interface TreeNodeProcessor { + void processLSN(long childLSN, + LogEntryType childType, + Node theNode, + byte[] lnKey, + int lastLoggedSize) + throws FileNotFoundException; + + /* Used for processing dirty (unlogged) deferred write LNs. [#15365] */ + void processDirtyDeletedLN(long childLSN, LN ln, byte[] lnKey); + + /* Called when the internal memory limit is exceeded. */ + void noteMemoryExceeded(); + } + + /* + * Optionally passed to the SortedLSNTreeWalker to be called when an + * exception occurs. + */ + interface ExceptionPredicate { + /* Return true if the exception can be ignored. */ + boolean ignoreException(Exception e); + } + + final DatabaseImpl[] dbImpls; + protected final EnvironmentImpl envImpl; + + /* + * Save the root LSN at construction time, because the root may be + * nulled out before walk() executes. + */ + private final long[] rootLsns; + + /* + * Whether to call DatabaseImpl.finishedINListHarvest(). + */ + private final boolean setDbState; + + /* The limit on memory to be used for internal structures during SLTW. */ + private long internalMemoryLimit = Long.MAX_VALUE; + + /* The current memory usage by internal SLTW structures. */ + private long internalMemoryUsage; + + private final TreeNodeProcessor callback; + + /* + * If true, then walker should fetch LNs and pass them to the + * TreeNodeProcessor callback method. Even if true, dup LNs are not + * fetched because they are normally never used (see accumulateDupLNs). + */ + boolean accumulateLNs = false; + + boolean preloadIntoOffHeapCache = false; + + /* + * If true, fetch LNs in a dup DB. Since LNs in a dup DB are not used by + * cursor operations, fetching dup LNs should only be needed in very + * exceptional situations. Currently this field is never set to true. + */ + boolean accumulateDupLNs = false; + + /* + * If non-null, save any exceptions encountered while traversing nodes into + * this savedException list, in order to walk as much of the tree as + * possible. The caller of the tree walker will handle the exceptions. + */ + private final List savedExceptions; + + private final ExceptionPredicate excPredicate; + + /* + * The batch size of LSNs which will be sorted. + */ + private long lsnBatchSize = Long.MAX_VALUE; + + /* Holder for returning LN key from fetchLSN. */ + private final DatabaseEntry lnKeyEntry = new DatabaseEntry(); + + /* + * This map provides an LSN to IN/index. When an LSN is processed by the + * tree walker, the map is used to lookup the parent IN and child entry + * index of each LSN processed by the tree walker. Since fetchLSN is + * called with an arbitrary LSN, and since when we fetch (for preload) we + * need to setup the parent to refer to the node which we are prefetching, + * we need to have the parent in hand at the time of the call to fetchLSN. + * This map allows us to keep a reference to that parent so that we can + * call fetchNode on that parent. + * + * It is also necessary to maintain this map for cases other than preload() + * so that during multi-db walks (i.e. multi db preload), we can associate + * an arbitrary LSN back to the parent IN and therefore connect a fetch'ed + * Node into the proper place in the tree. + * + * LSN -> INEntry + */ + /* struct to hold IN/entry-index pair. */ + public static class INEntry { + final IN in; + final int index; + + INEntry(IN in, int index) { + assert in != null; + assert in.getDatabase() != null; + this.in = in; + this.index = index; + } + + public INEntry(@SuppressWarnings("unused") SizeofMarker marker) { + this.in = null; + this.index = 0; + } + + Object getDelta() { + return null; + } + + long getDeltaLsn() { + return DbLsn.NULL_LSN; + } + + long getMemorySize() { + return MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.INENTRY_OVERHEAD; + } + } + + /** + * Supplements INEntry with BIN-delta information. When a BIN-delta is + * encountered during the fetching process, we cannot immediately place it + * in the tree. Instead we queue a DeltaINEntry for fetching the full BIN, + * in LSN order as usual. When the full BIN is fetched, the DeltaINEntry + * is used to apply the delta and place the result in the tree. + */ + public static class DeltaINEntry extends INEntry { + private final Object delta; + private final long deltaLsn; + + DeltaINEntry(IN in, int index, Object delta, long deltaLsn) { + super(in, index); + assert (delta != null); + assert (deltaLsn != DbLsn.NULL_LSN); + this.delta = delta; + this.deltaLsn = deltaLsn; + } + + public DeltaINEntry(@SuppressWarnings("unused") SizeofMarker marker) { + super(marker); + this.delta = null; + this.deltaLsn = 0; + } + + @Override + Object getDelta() { + return delta; + } + + @Override + long getDeltaLsn() { + return deltaLsn; + } + + @Override + long getMemorySize() { + final long deltaSize; + if (delta instanceof OldBINDelta) { + deltaSize = ((OldBINDelta) delta).getMemorySize(); + } else { + deltaSize = ((BIN) delta).getInMemorySize(); + } + return deltaSize + + MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.DELTAINENTRY_OVERHEAD; + } + } + + private final Map lsnINMap = new HashMap<>(); + + /* + * @param dbImpls an array of DatabaseImpls which should be walked over + * in disk order. This array must be parallel to the rootLsns array in + * that rootLsns[i] must be the root LSN for dbImpls[i]. + * + * @param setDbState if true, indicate when the INList harvest has + * completed for a particular DatabaseImpl. + * + * @param rootLsns is passed in addition to the dbImpls, because the + * root may be nulled out on the dbImpl before walk() is called. + * + * @param callback the callback instance + * + * @param savedExceptions a List of DatabaseExceptions encountered during + * the tree walk. + * + * @param excPredicate a predicate to determine whether a given exception + * should be ignored. + */ + public SortedLSNTreeWalker(DatabaseImpl[] dbImpls, + boolean setDbState, + long[] rootLsns, + TreeNodeProcessor callback, + List savedExceptions, + ExceptionPredicate excPredicate) { + + if (dbImpls == null || dbImpls.length < 1) { + throw EnvironmentFailureException.unexpectedState + ("DatabaseImpls array is null or 0-length for " + + "SortedLSNTreeWalker"); + } + + this.dbImpls = dbImpls; + this.envImpl = dbImpls[0].getEnv(); + /* Make sure all databases are from the same environment. */ + for (DatabaseImpl di : dbImpls) { + EnvironmentImpl ei = di.getEnv(); + if (ei == null) { + throw EnvironmentFailureException.unexpectedState + ("environmentImpl is null for target db " + + di.getDebugName()); + } + + if (ei != this.envImpl) { + throw new IllegalArgumentException + ("Environment.preload() must be called with Databases " + + "which are all in the same Environment. (" + + di.getDebugName() + ")"); + } + } + + this.setDbState = setDbState; + this.rootLsns = rootLsns; + this.callback = callback; + this.savedExceptions = savedExceptions; + this.excPredicate = excPredicate; + } + + void setLSNBatchSize(long lsnBatchSize) { + this.lsnBatchSize = lsnBatchSize; + } + + void setInternalMemoryLimit(long internalMemoryLimit) { + this.internalMemoryLimit = internalMemoryLimit; + } + + private void incInternalMemoryUsage(long increment) { + internalMemoryUsage += increment; + } + + private LSNAccumulator createLSNAccumulator() { + return new LSNAccumulator() { + @Override + void noteMemUsage(long increment) { + incInternalMemoryUsage(increment); + } + }; + } + + /** + * Find all non-resident nodes, and execute the callback. The root IN's + * LSN is not returned to the callback. + */ + public void walk() { + walkInternal(); + } + + void walkInternal() { + + /* + * Phase 1: seed the SLTW with all of the roots of the DatabaseImpl[]. + * For each root, look for all in-memory child nodes and process them + * (i.e. invoke the callback on those LSNs). For child nodes which are + * not in-memory (i.e. they are LSNs only and no Node references), + * accumulate their LSNs to be later sorted and processed during phase + * 2. + */ + LSNAccumulator pendingLSNs = createLSNAccumulator(); + for (int i = 0; i < dbImpls.length; i += 1) { + processRootLSN(dbImpls[i], pendingLSNs, rootLsns[i]); + } + + /* + * Phase 2: Sort and process any LSNs we've gathered so far. For each + * LSN, fetch the target record and process it as in Phase 1 (i.e. + * in-memory children get passed to the callback, not in-memory children + * have their LSN accumulated for later sorting, fetching, and + * processing. + */ + processAccumulatedLSNs(pendingLSNs); + } + + /* + * Retrieve the root for the given DatabaseImpl and then process its + * children. + */ + private void processRootLSN(DatabaseImpl dbImpl, + LSNAccumulator pendingLSNs, + long rootLsn) { + IN root = getOrFetchRootIN(dbImpl, rootLsn); + if (root != null) { + try { + accumulateLSNs(root, pendingLSNs, null, -1); + } finally { + releaseRootIN(root); + } + } + + if (setDbState) { + dbImpl.finishedINListHarvest(); + } + } + + /* + * Traverse the in-memory tree rooted at "parent". For each visited node N + * call the callback method on N and put in pendingLSNs the LSNs of N's + * non-resident children. + * + * On entering this method, parent is latched and remains latched on exit. + */ + private void accumulateLSNs(final IN parent, + final LSNAccumulator pendingLSNs, + final IN ohBinParent, + final int ohBinIndex) { + + final DatabaseImpl db = parent.getDatabase(); + final boolean dups = db.getSortedDuplicates(); + + /* + * Without dups, all BINs contain only LN children. With dups, it + * depends on the dup format. Preload works with the old dup format + * and the new. + * + * In the new dup format (or after dup conversion), BINs contain only + * LNs and no DBINs exist. In the old dup format, DBINs contain only + * LN children, but BINs may contain a mix of LNs and DINs. + */ + final boolean allChildrenAreLNs; + if (!dups || db.getDupsConverted()) { + allChildrenAreLNs = parent.isBIN(); + } else { + allChildrenAreLNs = parent.isBIN() && parent.containsDuplicates(); + } + + /* + * If LNs are not needed, there is no need to accumulate the child LSNs + * when all children are LNs. + */ + final boolean accumulateChildren = + !allChildrenAreLNs || (dups ? accumulateDupLNs : accumulateLNs); + + final BIN parentBin = parent.isBIN() ? ((BIN) parent) : null; + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + /* + * Process all children, but only accumulate LSNs for children that are + * not in memory. + */ + for (int i = 0; i < parent.getNEntries(); i += 1) { + + final long lsn = parent.getLsn(i); + Node child = parent.getTarget(i); + final boolean childCached = child != null; + + final byte[] lnKey = + (allChildrenAreLNs || (childCached && child.isLN())) ? + parent.getKey(i) : null; + + if (parentBin != null && parentBin.isDefunct(i)) { + + /* Dirty LNs (deferred write) get special treatment. */ + processDirtyLN(child, lsn, lnKey); + /* continue; */ + + } else if (!childCached && + parentBin != null && + parentBin.getOffHeapLNId(i) != 0) { + + /* Embedded LNs are not stored off-heap */ + assert !parent.isEmbeddedLN(i); + + child = ohCache.loadLN(parentBin, i, CacheMode.UNCHANGED); + assert child != null; + + processChild( + lsn, child, lnKey, parent.getLastLoggedSize(i), + pendingLSNs, null, -1); + + } else if (!childCached && parent.getOffHeapBINId(i) >= 0) { + + child = ohCache.materializeBIN( + envImpl, ohCache.getBINBytes(parent, i)); + + final BIN bin = (BIN) child; + bin.latchNoUpdateLRU(db); + boolean isLatched = true; + + try { + if (bin.isBINDelta()) { + + /* Deltas not allowed with deferred-write. */ + assert (lsn != DbLsn.NULL_LSN); + + /* + * Storing an off-heap reference would use less memory, + * but we prefer to optimize in the future by + * re-implementing preload. + */ + final long fullLsn = bin.getLastFullLsn(); + assert fullLsn != DbLsn.NULL_LSN; + pendingLSNs.add(fullLsn); + addToLsnINMap(fullLsn, parent, i, bin, lsn); + + } else { + + bin.releaseLatch(); + isLatched = false; + + processChild( + lsn, bin, lnKey, parent.getLastLoggedSize(i), + pendingLSNs, parent, i); + } + } finally { + if (isLatched) { + bin.releaseLatch(); + } + } + + } else if (accumulateChildren && + !childCached && + lsn != DbLsn.NULL_LSN) { + + /* + * Child is not in cache. Put its LSN in the current batch of + * LSNs to be sorted and fetched in phase 2. But don't do + * this if the child is an embedded LN. + */ + if (!parent.isEmbeddedLN(i)) { + pendingLSNs.add(lsn); + if (ohBinParent != null) { + addToLsnINMap(lsn, ohBinParent, ohBinIndex); + } else { + addToLsnINMap(lsn, parent, i); + } + } else { + processChild( + DbLsn.NULL_LSN, null /*child*/, lnKey, + 0 /*lastLoggedSize*/, pendingLSNs, null, -1); + } + + } else if (childCached) { + + child.latchShared(); + boolean isLatched = true; + + try { + if (child.isBINDelta()) { + + /* Deltas not allowed with deferred-write. */ + assert (lsn != DbLsn.NULL_LSN); + + final BIN delta = (BIN) child; + final long fullLsn = delta.getLastFullLsn(); + pendingLSNs.add(fullLsn); + addToLsnINMap(fullLsn, parent, i, delta, lsn); + + } else { + + child.releaseLatch(); + isLatched = false; + + processChild( + lsn, child, lnKey, parent.getLastLoggedSize(i), + pendingLSNs, null, -1); + } + } finally { + if (isLatched) { + child.releaseLatch(); + } + } + + } else { + /* + * We are here because the child was not cached and was not + * accumulated either (because it was an LN and LN accumulation + * is turned off or its LSN was NULL). + */ + processChild( + lsn, null /*child*/, lnKey, parent.getLastLoggedSize(i), + pendingLSNs, null, -1); + } + + /* + * If we've exceeded the batch size then process the current + * batch and start a new one. + */ + final boolean internalMemoryExceeded = + internalMemoryUsage > internalMemoryLimit; + + if (pendingLSNs.getNTotalEntries() > lsnBatchSize || + internalMemoryExceeded) { + if (internalMemoryExceeded) { + callback.noteMemoryExceeded(); + } + processAccumulatedLSNs(pendingLSNs); + pendingLSNs.clear(); + } + } + } + + private void processDirtyLN(Node node, long lsn, byte[] lnKey) { + if (node != null && node.isLN()) { + LN ln = (LN) node; + if (ln.isDirty()) { + callback.processDirtyDeletedLN(lsn, ln, lnKey); + } + } + } + + private void processChild( + final long lsn, + final Node child, + final byte[] lnKey, + final int lastLoggedSize, + final LSNAccumulator pendingLSNs, + final IN ohBinParent, + final int ohBinIndex) { + + final boolean childCached = (child != null); + + /* + * If the child is resident, use its log type, else it must be an LN. + */ + callProcessLSNHandleExceptions( + lsn, + (!childCached ? + LogEntryType.LOG_INS_LN /* Any LN type will do */ : + child.getGenericLogType()), + child, lnKey, lastLoggedSize); + + if (childCached && child.isIN()) { + final IN nodeAsIN = (IN) child; + try { + nodeAsIN.latch(CacheMode.UNCHANGED); + accumulateLSNs(nodeAsIN, pendingLSNs, ohBinParent, ohBinIndex); + } finally { + nodeAsIN.releaseLatch(); + } + } + } + + /* + * Process a batch of LSNs by sorting and fetching each of them. + */ + private void processAccumulatedLSNs(LSNAccumulator pendingLSNs) { + + while (!pendingLSNs.isEmpty()) { + final long[] currentLSNs = pendingLSNs.getAndSortPendingLSNs(); + pendingLSNs = createLSNAccumulator(); + for (long lsn : currentLSNs) { + fetchAndProcessLSN(lsn, pendingLSNs); + } + } + } + + /* + * Fetch the node at 'lsn' and callback to let the invoker process it. If + * it is an IN, accumulate LSNs for it. + */ + private void fetchAndProcessLSN(long lsn, LSNAccumulator pendingLSNs) { + + lnKeyEntry.setData(null); + + final FetchResult result = fetchLSNHandleExceptions( + lsn, lnKeyEntry, pendingLSNs); + + if (result == null) { + return; + } + + final boolean isIN = result.node.isIN(); + final IN in; + if (isIN) { + in = (IN) result.node; + in.latch(CacheMode.UNCHANGED); + } else { + in = null; + } + + try { + callProcessLSNHandleExceptions( + lsn, result.node.getGenericLogType(), result.node, + lnKeyEntry.getData(), result.lastLoggedSize); + + if (isIN) { + accumulateLSNs( + in, pendingLSNs, result.ohBinParent, result.ohBinIndex); + } + } finally { + if (isIN) { + in.releaseLatch(); + } + } + } + + private FetchResult fetchLSNHandleExceptions( + long lsn, + DatabaseEntry lnKeyEntry, + LSNAccumulator pendingLSNs) { + + DatabaseException dbe = null; + + try { + return fetchLSN(lsn, lnKeyEntry, pendingLSNs); + + } catch (DatabaseException e) { + if (excPredicate == null || + !excPredicate.ignoreException(e)) { + dbe = e; + } + } + + if (dbe != null) { + if (savedExceptions != null) { + + /* + * This LSN fetch hit a failure. Do as much of the rest of + * the tree as possible. + */ + savedExceptions.add(dbe); + } else { + throw dbe; + } + } + + return null; + } + + private void callProcessLSNHandleExceptions(long childLSN, + LogEntryType childType, + Node theNode, + byte[] lnKey, + int lastLoggedSize) { + DatabaseException dbe = null; + + try { + callback.processLSN( + childLSN, childType, theNode, lnKey, lastLoggedSize); + + } catch (FileNotFoundException e) { + if (excPredicate == null || + !excPredicate.ignoreException(e)) { + dbe = new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } + + } catch (DatabaseException e) { + if (excPredicate == null || + !excPredicate.ignoreException(e)) { + dbe = e; + } + } + + if (dbe != null) { + if (savedExceptions != null) { + + /* + * This LSN fetch hit a failure. Do as much of the rest of + * the tree as possible. + */ + savedExceptions.add(dbe); + } else { + throw dbe; + } + } + } + + /** + * Returns the root IN, latched shared. Allows subclasses to override + * getResidentRootIN and/or getRootIN to modify behavior. + * getResidentRootIN is called first, + */ + private IN getOrFetchRootIN(DatabaseImpl dbImpl, long rootLsn) { + final IN root = getResidentRootIN(dbImpl); + if (root != null) { + return root; + } + if (rootLsn == DbLsn.NULL_LSN) { + return null; + } + return getRootIN(dbImpl, rootLsn); + } + + /** + * The default behavior fetches the rootIN from the log and latches it + * shared. Classes extending this may fetch (and latch) the root from the + * tree. + */ + IN getRootIN(DatabaseImpl dbImpl, long rootLsn) { + final IN root = (IN) + envImpl.getLogManager().getEntryHandleFileNotFound(rootLsn); + if (root == null) { + return null; + } + root.setDatabase(dbImpl); + root.latchShared(CacheMode.DEFAULT); + return root; + } + + /** + * The default behavior returns (and latches shared) the IN if it is + * resident in the Btree, or null otherwise. Classes extending this may + * return (and latch) a known IN object. + */ + IN getResidentRootIN(DatabaseImpl dbImpl) { + return dbImpl.getTree().getResidentRootIN(true /*latched*/); + } + + /** + * Release the latch. Overriding this method should not be necessary. + */ + private void releaseRootIN(IN root) { + root.releaseLatch(); + } + + /** + * Add an LSN-IN/index entry to the map. + */ + private void addToLsnINMap(long lsn, IN in, int index) { + addEntryToLsnMap(lsn, new INEntry(in, index)); + } + + /** + * Add an LSN-IN/index entry, along with a delta and delta LSN, to the map. + */ + private void addToLsnINMap(long lsn, + IN in, + int index, + Object delta, + long deltaLsn) { + addEntryToLsnMap(lsn, new DeltaINEntry(in, index, delta, deltaLsn)); + } + + private void addEntryToLsnMap(long lsn, INEntry inEntry) { + if (lsnINMap.put(lsn, inEntry) == null) { + incInternalMemoryUsage(inEntry.getMemorySize()); + } + } + + private static class FetchResult { + final Node node; + final int lastLoggedSize; + final IN ohBinParent; + final int ohBinIndex; + + FetchResult(final Node node, + final int lastLoggedSize, + final IN ohBinParent, + final int ohBinIndex) { + this.node = node; + this.lastLoggedSize = lastLoggedSize; + this.ohBinParent = ohBinParent; + this.ohBinIndex = ohBinIndex; + } + } + + /* + * Process an LSN. Get & remove its INEntry from the map, then fetch the + * target at the INEntry's IN/index pair. This method will be called in + * sorted LSN order. + */ + private FetchResult fetchLSN( + long lsn, + DatabaseEntry lnKeyEntry, + LSNAccumulator pendingLSNs) { + + final LogManager logManager = envImpl.getLogManager(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + final INEntry inEntry = lsnINMap.remove(lsn); + assert (inEntry != null) : DbLsn.getNoFormatString(lsn); + + incInternalMemoryUsage(- inEntry.getMemorySize()); + + IN in = inEntry.in; + int index = inEntry.index; + + IN ohBinParent = null; + int ohBinIndex = -1; + + IN in1ToUnlatch = null; + IN in2ToUnlatch = null; + + if (!in.isLatchExclusiveOwner()) { + in.latch(); + in1ToUnlatch = in; + } + + final DatabaseImpl dbImpl = in.getDatabase(); + byte[] lnKey = null; + + Node residentNode = in.getTarget(index); + if (residentNode != null) { + residentNode.latch(); + } + + try { + /* + * When the indexed slot contains an off-heap BIN, the node to + * fetch is an LN within the off-heap BIN or the full BIN to merge + * with an off-heap BIN-delta. + */ + Object deltaObject = inEntry.getDelta(); + boolean isOffHeapBinInTree = in.getOffHeapBINId(index) >= 0; + boolean isLnInOffHeapBin = false; + + if (isOffHeapBinInTree && deltaObject == null) { + /* + * When fetching an LN within an off-heap BIN, materialize the + * parent BIN and set in/index to this true parent. + */ + isLnInOffHeapBin = true; + + final BIN ohBin = ohCache.materializeBIN( + envImpl, ohCache.getBINBytes(in, index)); + + int foundIndex = -1; + for (int i = 0; i < ohBin.getNEntries(); i += 1) { + if (ohBin.getLsn(i) == lsn) { + foundIndex = i; + break; + } + } + + if (foundIndex == -1) { + return null; // See note on concurrent activity below. + } + + ohBinParent = in; + ohBinIndex = index; + + in = ohBin; + index = foundIndex; + + in.latchNoUpdateLRU(dbImpl); + in2ToUnlatch = in; + } + + /* + * Concurrent activity (e.g., log cleaning) that was active before + * we took the root latch may have changed the state of a slot. + * Repeat check for LN deletion/expiration and check that the LSN + * has not changed. + */ + if (in.isBIN() && ((BIN) in).isDefunct(index)) { + return null; + } + + if (deltaObject == null) { + if (in.getLsn(index) != lsn) { + return null; + } + } else { + if (in.getLsn(index) != inEntry.getDeltaLsn()) { + return null; + } + } + + boolean mutateResidentDeltaToFullBIN = false; + + if (residentNode != null) { + /* + * If the resident node is not a delta then concurrent + * activity (e.g., log cleaning) must have loaded the node. + * Just return it and continue. + */ + if (!residentNode.isBINDelta()) { + if (residentNode.isLN()) { + lnKeyEntry.setData(in.getKey(index)); + } + return new FetchResult( + residentNode, in.getLastLoggedSize(index), null, -1); + } + + /* The resident node is a delta. */ + if (((BIN) residentNode).getLastFullLsn() != lsn) { + return null; // See note on concurrent activity above. + } + mutateResidentDeltaToFullBIN = true; + } + + /* Fetch log entry. */ + final WholeEntry wholeEntry; + try { + wholeEntry = logManager.getWholeLogEntry(lsn); + + } catch (FileNotFoundException e) { + final String msg = + (fetchAndInsertIntoTree() ? + "Preload failed" : + "SortedLSNTreeWalker failed") + + " dbId=" + dbImpl.getId() + + " isOffHeapBinInTree=" + isOffHeapBinInTree + + " isLnInOffHeapBin=" + isLnInOffHeapBin + + " deltaObject=" + (deltaObject != null) + + " residentNode=" + (residentNode != null); + + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + in.makeFetchErrorMsg(msg, lsn, index), e); + } + + final LogEntry entry = wholeEntry.getEntry(); + final int lastLoggedSize = wholeEntry.getHeader().getEntrySize(); + + /* + * For a BIN delta, queue fetching of the full BIN and combine the + * full BIN with the delta when it is processed later (see below). + * + * Note that for preload, this means that a BIN-delta is not placed + * in the tree when there is not enough memory for the full BIN. + * Ideally we should place the BIN-delta in the tree here. + */ + if (entry instanceof BINDeltaLogEntry) { + final BINDeltaLogEntry deltaEntry = (BINDeltaLogEntry) entry; + final long fullLsn = deltaEntry.getPrevFullLsn(); + final BIN delta = deltaEntry.getMainItem(); + pendingLSNs.add(fullLsn); + addToLsnINMap(fullLsn, in, index, delta, lsn); + return null; + } + + if (entry instanceof OldBINDeltaLogEntry) { + final OldBINDelta delta = (OldBINDelta) entry.getMainItem(); + final long fullLsn = delta.getLastFullLsn(); + pendingLSNs.add(fullLsn); + addToLsnINMap(fullLsn, in, index, delta, lsn); + return null; + } + + /* For an LNLogEntry, call postFetchInit and get the lnKey. */ + if (entry instanceof LNLogEntry) { + final LNLogEntry lnEntry = (LNLogEntry) entry; + lnEntry.postFetchInit(dbImpl); + lnKey = lnEntry.getKey(); + lnKeyEntry.setData(lnKey); + } + + /* Get the Node from the LogEntry. */ + final Node ret = (Node) entry.getResolvedItem(dbImpl); + + /* + * For an IN Node, set the database so it will be passed down to + * nested fetches. + */ + long lastLoggedLsn = lsn; + if (ret.isIN()) { + final IN retIn = (IN) ret; + retIn.setDatabase(dbImpl); + } + + /* + * If there is a delta, then this is the full BIN to which the + * delta must be applied. The delta LSN is the last logged LSN. + */ + if (mutateResidentDeltaToFullBIN) { + final BIN fullBIN = (BIN) ret; + BIN delta = (BIN) residentNode; + if (fetchAndInsertIntoTree()) { + delta.mutateToFullBIN(fullBIN, false /*leaveFreeSlot*/); + + return new FetchResult( + residentNode, lastLoggedSize, ohBinParent, ohBinIndex); + } else { + delta.reconstituteBIN( + dbImpl, fullBIN, false /*leaveFreeSlot*/); + + return new FetchResult( + ret, lastLoggedSize, ohBinParent, ohBinIndex); + } + } + + if (deltaObject != null) { + final BIN fullBIN = (BIN) ret; + + if (deltaObject instanceof OldBINDelta) { + final OldBINDelta delta = (OldBINDelta) deltaObject; + assert lsn == delta.getLastFullLsn(); + delta.reconstituteBIN(dbImpl, fullBIN); + lastLoggedLsn = inEntry.getDeltaLsn(); + } else { + final BIN delta = (BIN) deltaObject; + assert lsn == delta.getLastFullLsn(); + + delta.reconstituteBIN( + dbImpl, fullBIN, false /*leaveFreeSlot*/); + + lastLoggedLsn = inEntry.getDeltaLsn(); + } + } + + assert !ret.isBINDelta(false); + + /* + * When we store an off-heap BIN here, the caller must pass its + * parent/index to accumulateLSNs. + */ + IN retOhBinParent = null; + int retOhBinIndex = -1; + + /* During a preload, finally place the Node into the Tree. */ + if (fetchAndInsertIntoTree()) { + + /* Last logged size is not present before log version 9. */ + in.setLastLoggedSize(index, lastLoggedSize); + + /* + * We don't worry about the memory usage being kept below the + * max by the evictor, since we keep the root INs latched. + */ + final MemoryBudget memBudget = envImpl.getMemoryBudget(); + final boolean storeOffHeap = + preloadIntoOffHeapCache && + memBudget.getCacheMemoryUsage() > memBudget.getMaxMemory(); + + /* + * Note that UINs are always stored in the main cache even if + * it is full. The idea is that LNs and BINs should be evicted + * from main to make room. When the main cache fills with UINs, + * and an off-heap cache is also being filled, we currently + * allow the main cache to overflow. + */ + if (isOffHeapBinInTree || (storeOffHeap && !ret.isUpperIN())) { + if (ret.isLN()) { + /* + * Store LN off-heap. If an oh LN was added to an oh + * BIN we must re-store the oh BIN as well. This is + * inefficient but we don't know of a simple way to + * optimize. + */ + final BIN bin = (BIN) in; + final LN retLn = (LN) ret; + ohCache.storePreloadedLN(bin, index, retLn); + if (isOffHeapBinInTree) { + assert isLnInOffHeapBin; + ohCache.storePreloadedBIN( + bin, ohBinParent, ohBinIndex); + } + } else { + /* + * Store full BIN off-heap. Note that setLastLoggedLSN + * is normally called by postFetchInit or postLoadInit, + * but neither is used during preload so we must call + * setLastLoggedLsn here. + */ + assert !isLnInOffHeapBin; + final BIN retBin = (BIN) ret; + retBin.latchNoUpdateLRU(dbImpl); + retBin.setLastLoggedLsn(lsn); + try { + if (!ohCache.storePreloadedBIN( + retBin, in, index)) { + return null; // could not allocate memory + } + } finally { + retBin.releaseLatch(); + } + retOhBinParent = in; + retOhBinIndex = index; + } + } else { + /* Attach node to the Btree as in a normal operation. */ + if (ret.isIN()) { + final IN retIn = (IN) ret; + retIn.latchNoUpdateLRU(dbImpl); + ret.postFetchInit(dbImpl, lastLoggedLsn); + in.attachNode(index, ret, lnKey); + retIn.releaseLatch(); + } else { + ret.postFetchInit(dbImpl, lastLoggedLsn); + in.attachNode(index, ret, lnKey); + } + + /* BINs with resident LNs shouldn't be in the dirty LRU. */ + if (in.isBIN()) { + final CacheMode mode = + in.getDatabase().getDefaultCacheMode(); + + if (mode != CacheMode.EVICT_LN) { + envImpl.getEvictor().moveToPri1LRU(in); + } + } + } + + /* + * Clear the fetched-cold flag set, since we want the preloaded + * data to be "hot". This is necessary because the node is not + * latched after being preloaded, as it normally would be after + * being attached. + */ + if (ret.isIN()) { + ((IN) ret).setFetchedCold(false); + } else if (ret.isLN()) { + ((LN) ret).setFetchedCold(false); + } + } + + return new FetchResult( + ret, lastLoggedSize, retOhBinParent, retOhBinIndex); + + } finally { + if (residentNode != null) { + residentNode.releaseLatch(); + } + if (in1ToUnlatch != null) { + in1ToUnlatch.releaseLatch(); + } + if (in2ToUnlatch != null) { + in2ToUnlatch.releaseLatch(); + } + } + } + + /* + * Overriden by subclasses if fetch of an LSN should result in insertion + * into tree rather than just instantiating the target. + */ + protected boolean fetchAndInsertIntoTree() { + return false; + } +} diff --git a/src/com/sleepycat/je/dbi/StartupTracker.java b/src/com/sleepycat/je/dbi/StartupTracker.java new file mode 100644 index 0000000..34993ee --- /dev/null +++ b/src/com/sleepycat/je/dbi/StartupTracker.java @@ -0,0 +1,620 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.EnumMap; +import java.util.Formatter; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.recovery.CheckpointEnd; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Store and calculate elapsed time, counts, and other statistics about + * environment open. No synchronization is used, which generally works because + * with the exception of replication, environment startup is currently a + * serial, single threaded event. Replicated environments must be sure to + * record startup times only at thread safe points. + */ +public class StartupTracker { + + /* + * Statistics are kept about startup phases, defined below. Phases can + * be nested, so the child and root fields are used to express this + * relationship. For example: + * TotalEnvOpen + * TotalRecovery + * FindEndOfLog + * .. + * BuildTree + * ReadMapIN + * .. + * Ckpt + * TotalJoinGroup encompasses the following two phases. + * FindMaster + * BecomeConsistent + * Keep these enums in order of execution, so that the display is easier to + * comprehend. Of course, some phases subsume other phases, but in general, + * this enum order follows the order of execution. + */ + public enum Phase { + TOTAL_ENV_OPEN("Environment Open"), + TOTAL_RECOVERY, + FIND_END_OF_LOG, + FIND_LAST_CKPT, + BUILD_TREE, + READ_MAP_INS, + REDO_MAP_INS, + UNDO_MAP_LNS, + REDO_MAP_LNS, + READ_INS, + REDO_INS, + UNDO_LNS, + REDO_LNS, + POPULATE_UP, + POPULATE_EP, + REMOVE_TEMP_DBS, + CKPT, + TOTAL_JOIN_GROUP("Replication Join Group"), + FIND_MASTER, + BECOME_CONSISTENT; + + private Phase[] children; + private Phase root; + private String reportLabel; + + private Phase() { + } + + private Phase(String reportLabel) { + this.reportLabel = reportLabel; + } + + static { + TOTAL_ENV_OPEN.children = new Phase[] + {TOTAL_RECOVERY}; + TOTAL_RECOVERY.children = new Phase[] + {FIND_END_OF_LOG, + FIND_LAST_CKPT, + BUILD_TREE, + POPULATE_UP, + POPULATE_EP, + REMOVE_TEMP_DBS, + CKPT}; + BUILD_TREE.children = new Phase[] + {READ_MAP_INS, + REDO_MAP_INS, + UNDO_MAP_LNS, + REDO_MAP_LNS, + READ_INS, + REDO_INS, + UNDO_LNS, + REDO_LNS}; + TOTAL_JOIN_GROUP.children = new Phase[] + {FIND_MASTER, + BECOME_CONSISTENT}; + + TOTAL_RECOVERY.root = TOTAL_ENV_OPEN; + FIND_END_OF_LOG.root = TOTAL_ENV_OPEN; + FIND_LAST_CKPT.root = TOTAL_ENV_OPEN; + BUILD_TREE.root = TOTAL_ENV_OPEN; + READ_MAP_INS.root = TOTAL_ENV_OPEN; + REDO_MAP_INS.root = TOTAL_ENV_OPEN; + UNDO_MAP_LNS.root = TOTAL_ENV_OPEN; + REDO_MAP_LNS.root = TOTAL_ENV_OPEN; + READ_INS.root = TOTAL_ENV_OPEN; + REDO_INS.root = TOTAL_ENV_OPEN; + UNDO_LNS.root = TOTAL_ENV_OPEN; + REDO_LNS.root = TOTAL_ENV_OPEN; + POPULATE_UP.root = TOTAL_ENV_OPEN; + POPULATE_EP.root = TOTAL_ENV_OPEN; + REMOVE_TEMP_DBS.root = TOTAL_ENV_OPEN; + CKPT.root = TOTAL_ENV_OPEN; + + FIND_MASTER.root = TOTAL_JOIN_GROUP; + BECOME_CONSISTENT.root = TOTAL_JOIN_GROUP; + } + } + + private final Map elapsed; + private final Map counters; + private final Map stats; + private final Logger logger; + private final EnvironmentImpl envImpl; + private RecoveryInfo info; + private long lastDumpMillis; + + public StartupTracker(EnvironmentImpl envImpl) { + + elapsed = new EnumMap(Phase.class); + counters = new EnumMap(Phase.class); + stats = new EnumMap(Phase.class); + for (Phase p : Phase.values()){ + elapsed.put(p, new Elapsed()); + } + + this.envImpl = envImpl; + + logger = LoggerUtils.getLogger(getClass()); + lastDumpMillis = System.currentTimeMillis(); + } + + public void setRecoveryInfo(RecoveryInfo rInfo) { + info = rInfo; + } + + /** + * Note that a particular phase is starting. + */ + public void start(Phase phase) { + String msg = "Starting " + phase; + if (info != null) { + msg += " " + info; + } + LoggerUtils.logMsg(logger, envImpl, Level.CONFIG, msg); + + elapsed.get(phase).start(); + Counter c = new Counter(); + counters.put(phase, c); + if (!phase.equals(Phase.TOTAL_ENV_OPEN)) { + + /* + * LogManager does not exist yet so we can't reference it. Anyway, + * cache misses are 0 to start with, so TOTAL_ENV_OPEN does not + * have to set the starting cache miss count. + */ + c.setCacheMissStart(envImpl.getLogManager().getNCacheMiss()); + } + } + + /** + * Note that a particular phase is ending. + */ + public void stop(Phase phase) { + Elapsed e = elapsed.get(phase); + e.end(); + Counter c = getCounter(phase); + c.setCacheMissEnd(envImpl.getLogManager().getNCacheMiss()); + + /* Log this phase to the je.info file. */ + String msg = "Stopping " + phase; + if (info != null) { + msg += " " + info; + } + LoggerUtils.logMsg(logger, envImpl, Level.CONFIG, msg); + + /* + * Conditionally log the whole report to the je.info file, either + * because this family of phases has ended, or because this startup + * is taking a very long time. + * + * Take care to only check the value of dumpThreshold here, rather than + * setting it in the StartupTracker constructor, because StartupTracker + * is instantiated before the DbConfigManager and the + * STARTUP_DUMP_THRESHOLD param cannot be read. + */ + int dumpThreshold = envImpl.getConfigManager().getDuration + (EnvironmentParams.STARTUP_DUMP_THRESHOLD); + + /* We're at the end of a family of phases. */ + if (phase.root == null) { + if ((e.getEnd() - e.getStart()) > dumpThreshold) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream p = new PrintStream(baos); + displayStats(p, phase); + LoggerUtils.logMsg(logger, envImpl, Level.INFO, + baos.toString()); + return; + } + } + + /* + * It's not the ending phase, but this has been taking a very long + * time, so dump some information. + */ + if ((System.currentTimeMillis() - lastDumpMillis) > dumpThreshold) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream p = new PrintStream(baos); + displayInterim(p, phase); + LoggerUtils.logMsg(logger, envImpl, Level.INFO, baos.toString()); + } + } + + /** + * Record new progress states for any registered environment progress + * listener. + */ + public void setProgress(RecoveryProgress progress) { + ProgressListener progressListener = + envImpl.getRecoveryProgressListener(); + + if (progressListener == null) { + return; + } + if (!progressListener.progress(progress, -1, -1)) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.PROGRESS_LISTENER_HALT, + "EnvironmentConfig.recoveryProgressListener: "); + } + } + + /** + * Return the counter for this phase so we can update one of the detail + * values stored there. + */ + public Counter getCounter(Phase phase) { + return counters.get(phase); + } + + /** + * Save stats for a given phase. + */ + public void setStats(Phase phase, StatGroup sg) { + stats.put(phase, sg); + } + + /** + * Generate a description of the four recovery locations (firstActive, + * ckptStart, ckptend, end of Log) and the distance inbetween. + */ + private String displayRecoveryInterval() { + StringBuilder returnInfo = new StringBuilder(); + + CheckpointEnd cEnd = info.checkpointEnd; + if (cEnd != null) { + returnInfo.append("checkpointId = "); + returnInfo.append(cEnd.getId()); + if (cEnd.getInvoker() == null) { + returnInfo.append(" "); + } else { + returnInfo.append("[").append(cEnd.getInvoker()); + returnInfo.append("] "); + } + } + + long fileMax = + envImpl.getConfigManager().getLong(EnvironmentParams.LOG_FILE_MAX); + + long useStart = info.checkpointStartLsn == DbLsn.NULL_LSN ? + 0 : info.checkpointStartLsn; + long head = DbLsn.getNoCleaningDistance(useStart, + info.firstActiveLsn, + fileMax); + + long useEnd = info.checkpointEndLsn == DbLsn.NULL_LSN ? + 0 : info.checkpointEndLsn; + long ckpt = DbLsn.getNoCleaningDistance(useEnd, + info.checkpointStartLsn, + fileMax); + + long useLast = info.lastUsedLsn == DbLsn.NULL_LSN ? + 0 : info.lastUsedLsn; + long tail = DbLsn.getNoCleaningDistance(useLast, + info.checkpointEndLsn, + fileMax); + returnInfo.append( + "firstActive[" + + DbLsn.getNoFormatString(info.firstActiveLsn) + + "], ckptStart[" + + DbLsn.getNoFormatString(info.checkpointStartLsn) + + "], ckptEnd[" + + DbLsn.getNoFormatString(info.checkpointEndLsn) + + "], lastUsed[" + + DbLsn.getNoFormatString(info.lastUsedLsn) + + "]\n"); + StringBuilder sb = new StringBuilder(); + Formatter f = new Formatter(sb); + f.format("%24s bytes = %,d\n%24s bytes = %,d\n%24s bytes = %,d", + "firstActive->ckptStart", head, + "ckptStart->ckptEnd", ckpt, + "ckptEnd->end bytes", tail); + + return returnInfo.toString() + "\nApproximate distances:\n" + + sb.toString(); + } + + private String displayTimestamp(Long time) { + StringBuilder sb = new StringBuilder(); + Formatter timestampFormatter = new Formatter(sb); + timestampFormatter.format("%tD,%tH:%tM:%tS:%tL", + time, time, time, time, time); + return sb.toString(); + } + + /** + * Display a phase and its children, showing elapsed time as a + * percentage of the phases' root. + */ + private void displayPhaseSubtree(PrintStream stream, + Phase parent, + Elapsed parentTime, + Elapsed rootElapsed) { + + String headerFormat = "%24s %% of total %s\n"; + String parentFormat = "%20s %3d %s\n"; + String dataFormat = "%24s %3d %s\n"; + String divider = " "+ + "-------------------------"; + + if (parent.children == null) { + return; + } + + if ((parentTime.getEnd() - parentTime.getStart()) ==0) { + return; + } + + stream.println("\n"); + stream.printf(headerFormat, " ", Elapsed.DISPLAY_COLUMNS); + stream.printf(parentFormat, parent, + parentTime.getPercentage(rootElapsed), parentTime); + stream.println(divider); + + for (Phase child : parent.children) { + Elapsed time = elapsed.get(child); + if (time.getStart() == 0) { + continue; + } + stream.printf(dataFormat, + child, + time.getPercentage(rootElapsed), + time); + } + } + + private void displayCounters(PrintStream stream, Phase root) { + String basicFormat = "%20s %s\n"; + boolean headerNotPrinted = true; + for (Map.Entry c : counters.entrySet()) { + Phase p = c.getKey(); + if (p.root != root) { + continue; + } + Counter counter = c.getValue(); + if (counter.isEmpty()) { + continue; + } + + if (headerNotPrinted) { + stream.println(); + stream.printf(basicFormat, " " , Counter.DISPLAY_COLUMNS); + headerNotPrinted = false; + } + stream.printf(basicFormat, c.getKey(), counter); + } + } + + /** + * Display all information that has been tracked for this family of + * phases. + */ + public void displayStats(PrintStream stream, Phase root ) { + lastDumpMillis = System.currentTimeMillis(); + Elapsed rootTime = elapsed.get(root); + + stream.println("\n=== " + root.reportLabel + " Report ==="); + stream.println("start = " + displayTimestamp(rootTime.getStart())); + stream.println("end = " + displayTimestamp(rootTime.getEnd())); + if (root == Phase.TOTAL_ENV_OPEN) { + stream.print(displayRecoveryInterval()); + } + + /* Elapsed time. */ + for (Map.Entry x : elapsed.entrySet()) { + Phase p = x.getKey(); + if (p.root == null) { + if (p != root) { + continue; + } + } else if (p.root != root) { + continue; + } + + displayPhaseSubtree(stream, x.getKey(),x.getValue(), rootTime); + } + + /* Counters */ + displayCounters(stream, root); + + /* Stats */ + for (Map.Entry s : stats.entrySet()) { + Phase p = s.getKey(); + if (p.root != root) { + continue; + } + stream.println(s.getKey() + " stats:"); + stream.println(s.getValue()); + } + } + + /** + * Display all information available so far. + */ + private void displayInterim(PrintStream stream, Phase phase ) { + lastDumpMillis = System.currentTimeMillis(); + + stream.println("\n=== Interim " + phase + " Report ==="); + + stream.println(displayRecoveryInterval()); + + /* Elapsed time. */ + boolean headerNotPrinted = true; + for (Map.Entry x : elapsed.entrySet()) { + Phase p = x.getKey(); + Elapsed e = x.getValue(); + if (e.start == 0) { + continue; + } + if (headerNotPrinted) { + stream.println(" Elapsed(ms)"); + headerNotPrinted = false; + } + stream.printf("%20s : %s\n", p, e); + } + + /* Counters */ + displayCounters(stream, phase.root); + + /* Stats */ + for (Map.Entry s : stats.entrySet()) { + stream.println(s.getKey() + " stats:"); + stream.println(s.getValue()); + } + } + + /** Measures elapsed time in millisecond granularity. */ + static private class Elapsed { + + /* For dumping elapsed values in a column */ + static String DISPLAY_COLUMNS = " Elapsed(ms)"; + + private long start; + private long end; + + public long getStart() { + return start; + } + + public long getEnd() { + return end; + } + + /* Mark the start of a phase. */ + private void start() { + start = System.currentTimeMillis(); + } + + /* Mark the end of a phase. */ + private void end() { + end = System.currentTimeMillis(); + } + + private int getPercentage(Elapsed rootTime) { + if (rootTime == null) { + return 0; + } + + long rootTotal = rootTime.end-rootTime.start; + if (rootTotal <= 0) { + return 0; + } + + if (end == 0) { + return 0; + } + return (int)(((float) (end-start)/ rootTotal) * 100); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + Formatter f = new Formatter(sb); + if (end != 0) { + f.format("%,13d", (end-start)); + } else { + if (start != 0) { + f.format("%13s %tD,%tH:%tM:%tS:%tL", + "started at", start, start, start, start, start); + } else { + f.format("%13s", "none"); + } + } + return sb.toString(); + } + } + + /** + * Record number of log entries processed during a given recovery phase. + */ + static public class Counter { + private int numRead; + private int numProcessed; + private int numDeleted; + private int numAux; + private long numRepeatIteratorReads; + private long startCacheMiss; + private long endCacheMiss; + + /* If nothing is set, don't print this one. */ + private boolean isEmpty() { + return((numRead==0) && + (numProcessed==0) && + (numDeleted==0) && + (numAux==0) && + (numRepeatIteratorReads==0) && + ((endCacheMiss-startCacheMiss)==0)); + } + + public void incNumRead() { + numRead++; + } + + public void incNumProcessed() { + numProcessed++; + } + + public void incNumDeleted() { + numDeleted++; + } + + /** + * Keep track of auxiliary log entries processed during this pass. + * For example, LNs are the main target of the undoLN pass, but we + * also read aborts and commits. + */ + public void incNumAux() { + numAux++; + } + + public void setRepeatIteratorReads(long repeats) { + numRepeatIteratorReads = repeats; + } + + public void setCacheMissStart(long miss) { + startCacheMiss = miss; + } + + public void setCacheMissEnd(long miss) { + endCacheMiss = miss; + } + + static String DISPLAY_COLUMNS = +" nRead nProcessed nDeleted nAux nRepeatRd nCacheMiss"; + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + Formatter f = new Formatter(sb); + f.format("%,11d%,11d%,11d%,11d%,11d%,11d", + numRead, numProcessed, numDeleted, numAux, + numRepeatIteratorReads, (endCacheMiss - startCacheMiss)); + return sb.toString(); + } + + public int getNumProcessed() { + return numProcessed; + } + } +} diff --git a/src/com/sleepycat/je/dbi/TTL.java b/src/com/sleepycat/je/dbi/TTL.java new file mode 100644 index 0000000..0437f19 --- /dev/null +++ b/src/com/sleepycat/je/dbi/TTL.java @@ -0,0 +1,299 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.dbi; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.TestHook; + +/** + * Internal documentation and utility functions for the TTL feature. + * + * Repeatable-read + * ----------------- + * As described in {@link WriteOptions#setTTL}, repeatable-read is supported + * in simple cases by treating a record that expires after being locked as if + * it were not expired. This is implemented and documented in {@link + * CursorImpl#lockLN}. + * + * Unfortunately, we must check for whether a lock is already owned or shared + * by the locker before we attempt to lock the record. To optimize and avoid + * this extra overhead when it is unnecessary, we only do this when a record + * might expire during the transaction, according to the {@link + * EnvironmentParams#ENV_TTL_MAX_TXN_TIME} threshold. + * + * When a slot contains an expired record, {@link CursorImpl#lockLN} returns + * true in the LockStanding.defunct field, just as it does for deleted records. + * That way deleted records and expired records are filtered out of queries in + * the same way. + * + * Locking (read or write locks) also protects a record from being purged. The + * cleaner only considers an LN expired if its lock is uncontended, meaning + * that it could write-lock it. It places locked LNs on the pending LN queue. + * The compressor also only removes an expired slot if its lock is uncontended. + * + * However, if the clock was changed, purging may have occurred. Therefore, + * when an LN being fetched is in a cleaned file (LOG_FILE_NOT_FOUND), we treat + * it as a deleted record if it expires within {@link + * EnvironmentParams#ENV_TTL_CLOCK_TOLERANCE}. Records for which {@link + * IN#fetchLN} returns null must also be filtered out of queries. This can + * happen even after locking the record and determining that the slot is not + * expired. + * + * To prevent an LN from being purged while an operation is attempting to lock + * it, due to thread scheduling, we purge LNs only if they are already expired + * by at least {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. This is done to + * compensate for the fact that the BIN is latched by the cleaner when locking + * an expired LN, while all other LN locking does latch the BIN. This also + * means that, when calculating utilization of a .jdb file, we don't consider + * LNs expired until the ENV_TTL_MAX_TXN_TIME after their expiration time. + * + * There are several special cases involving LNs discovered to be purged after + * locking the record. In the cases where the operation fails, the situation + * is documented in {@link WriteOptions#setTTL}. + * + * + For a read operation with a non-null 'data' param, if the LN was + * previously locked but the data was not requested, and the LN is found to + * be purged during the read, the operation fails (returns null). + * + * + For an update operation with a partial 'data' param, if the LN was + * previously locked (but the data was not requested), and the LN is found + * to be purged during the update, the operation fails (returns null). + * + * + For an update of a primary record with secondary keys, if the record is + * locked and then we find the LN has been purged, we simply don't delete + * any pre-existing secondary keys. This is OK because those secondary + * records are also expired and will be purged naturally. + * + * Note that when the expiration time is reduced, including setting it to zero, + * no special handling is needed. The update operation itself will ensure that + * the expiration time in the BIN and LN are in sync, in the case of a single + * record, and that a primary record and its associated and secondary records + * have expiration times that are in sync. Since expiration checking always + * occurs after locking, the updated expiration time will always be used. + * + * Secondaries + * ----------- + * Locking also supports repeatable-read for secondaries, as long as the + * records being accessed were locked. To make this work when reading via a + * secondary, we must lock the secondary if it expires within + * {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. Normally we don't lock the + * secondary at all in this case, and rely only on the primary record lock. + * This extra lock is taken after the primary lock, so locking order it not + * violated, i.e., this does not increase the potential for deadlocks. + * + * When reading via a secondary, if the secondary exists but the primary record + * expired (within {@link EnvironmentParams#ENV_TTL_CLOCK_TOLERANCE}), then we + * we treat the record as deleted. + * + * When updating or deleting a primary record and its associated secondary + * records, we ignore integrity problems if the secondary record has expired + * (within {@link EnvironmentParams#ENV_TTL_CLOCK_TOLERANCE}). Specifically + * we ignore the integrity error when: 1. we are deleting the secondary record + * and it does not exist; 2. we are updating secondary record and it does not + * exist -- in this case we insert it. + */ +public class TTL { + + public static final long MILLIS_PER_HOUR = 1000L * 60 * 60; + public static final long MILLIS_PER_DAY = MILLIS_PER_HOUR * 24; + + /* Minimum JE version required for using TTL. */ + private static final JEVersion MIN_JE_VERSION = new JEVersion("6.5.0"); + + /* Set by tests to override MIN_JE_VERSION. */ + public static JEVersion TEST_MIN_JE_VERSION = null; + + private static TestHook timeTestHook = null; + + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + private static final SimpleDateFormat TIME_FORMAT = + new SimpleDateFormat("yyyy-MM-dd.HH"); + + static { + TIME_FORMAT.setTimeZone(UTC); + } + + public static JEVersion getMinJEVersion() { + if (TEST_MIN_JE_VERSION != null) { + return TEST_MIN_JE_VERSION; + } + return MIN_JE_VERSION; + } + + /** + * Sets a hook for simulating changes in the clock time that is used in TTL + * processing. + * + * If the hook is non-null, {@link TestHook#getHookValue()} returns the + * value used as the system clock time for all TTL processing. Other + * methods in the hook interface are not used. + *

        + * For unit testing, this might return a fixed time. For stress testing, + * this might return a time that advances more quickly than the real clock. + */ + public static void setTimeTestHook(TestHook hook) { + timeTestHook = hook; + } + + public static long currentSystemTime() { + + if (timeTestHook != null) { + return timeTestHook.getHookValue(); + } + + return System.currentTimeMillis(); + } + + /** + * Translates from expiration days or hours to a Java time in ms. + */ + public static long expirationToSystemTime(final int expiration, + final boolean hours) { + assert expiration >= 0; + + if (expiration == 0) { + return 0; + } + + return expiration * (hours ? MILLIS_PER_HOUR : MILLIS_PER_DAY); + } + + /** + * Translates from the user-supplied ttl parameters to the expiration value + * that we store internally. Validates the ttl parameters as a side effect. + */ + public static int ttlToExpiration(final int ttl, final TimeUnit ttlUnits) { + + if (ttl < 0) { + throw new IllegalArgumentException("Illegal ttl value: " + ttl); + } + + if (ttl == 0) { + return 0; + } + + final int currentTime; + + if (ttlUnits == TimeUnit.DAYS) { + + currentTime = (int) + ((currentSystemTime() + MILLIS_PER_DAY - 1) / + MILLIS_PER_DAY); + + } else if (ttlUnits == TimeUnit.HOURS) { + + currentTime = (int) + ((currentSystemTime() + MILLIS_PER_HOUR - 1) / + MILLIS_PER_HOUR); + + } else { + + throw new IllegalArgumentException( + "ttlUnits not allowed: " + ttlUnits); + } + + return currentTime + ttl; + } + + /** + * Returns whether the given time in millis, when converted to hours, + * rounding up, is not an even multiple of 24. + */ + public static boolean isSystemTimeInHours(final long systemMs) { + + final long hours = (systemMs + MILLIS_PER_HOUR - 1) / MILLIS_PER_HOUR; + + return hours % 24 != 0; + } + + /** + * Converts the user-supplied expirationTime parameter to an internal + * expiration time in days or hours. Assumes that the user parameter is + * evenly divisible by days or hours (call isSystemTimeInHours first). + */ + public static int systemTimeToExpiration(final long systemMs, + final boolean hours) { + return (int) (hours ? + ((systemMs + MILLIS_PER_HOUR - 1) / MILLIS_PER_HOUR) : + ((systemMs + MILLIS_PER_DAY - 1) / MILLIS_PER_DAY)); + } + + /** For logging and debugging output. */ + public static String formatExpiration(final int expiration, + final boolean hours) { + + return formatExpirationTime(expirationToSystemTime(expiration, hours)); + } + + /** For logging and debugging output. */ + public static String formatExpirationTime(final long time) { + + final Date date = new Date(time); + + synchronized (TIME_FORMAT) { + return TIME_FORMAT.format(date); + } + } + + /** + * Returns whether a given expiration time precedes the current system + * time, i.e., the expiration time has passed. + */ + public static boolean isExpired(final int expiration, + final boolean hours) { + return expiration != 0 && + currentSystemTime() > + expirationToSystemTime(expiration, hours); + } + + /** + * Returns whether a given expiration time precedes the current system + * time, i.e., the expiration time has passed. + */ + public static boolean isExpired(final long expirationTime) { + return expirationTime != 0 && + currentSystemTime() > expirationTime; + } + + /** + * Returns whether the given expiration time is LT the current system time + * plus withinMs. withinMs may be negative to check whether the expiration + * time is LT the current system time minus abs(withinMs). + */ + public static boolean expiresWithin(final int expiration, + final boolean hours, + final long withinMs) { + return expiration != 0 && + currentSystemTime() + withinMs > + expirationToSystemTime(expiration, hours); + } + + /** + * Same as {@link #expiresWithin(int, boolean, long)} but with a single + * expirationTime param. + */ + public static boolean expiresWithin(final long expirationTime, + final long withinMs) { + return expirationTime != 0 && + currentSystemTime() + withinMs > expirationTime; + } +} diff --git a/src/com/sleepycat/je/dbi/TriggerManager.java b/src/com/sleepycat/je/dbi/TriggerManager.java new file mode 100644 index 0000000..2680c06 --- /dev/null +++ b/src/com/sleepycat/je/dbi/TriggerManager.java @@ -0,0 +1,468 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.dbi; + +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.trigger.PersistentTrigger; +import com.sleepycat.je.trigger.TransactionTrigger; +import com.sleepycat.je.trigger.Trigger; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; + +/** + * Class that invokes the triggers associated with a database. It encapsulates + * the mechanics associated with actually invoking a trigger. + */ +public class TriggerManager { + + /** + * Invokes the trigger methods associated with the opening of a database. + */ + public static void runOpenTriggers(Locker locker, + Database db, + boolean isNew) { + + runOpenTriggers(locker, DbInternal.getDbImpl(db), isNew); + } + + /** + * Invokes the trigger methods associated with the opening of a database. + */ + public static void runOpenTriggers(Locker locker, + DatabaseImpl dbImpl, + final boolean isNew) { + + runTriggers(dbImpl, locker, new TriggerInvoker(isNew) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + if (dbt instanceof PersistentTrigger) { + Environment env = + getOpenTriggerEnvironment(triggerTransaction); + ((PersistentTrigger)dbt).open(triggerTransaction, env, + isNew); + } + } + }); + } + + /** + * Returns the environment handle that will be passed in as an argument to + * a database open trigger. + * + * To ensure that an environment handle is always available, an internal + * handle is created and stored in the EnvironmentImpl. The lifetime of the + * internal handle (Environment or ReplicatedEnvironment) roughly aligns + * with the lifetime of the underlying EnvironmentImpl, or it's subtype + * RepImpl. + * + * For standalone environments, using explicit transactions, the + * environment handle that's passed as the argument is the one used to + * initiate the transaction. When using AutoTransactions to open a + * database, the environment argument to the trigger is the internal + * environment handle. + * + * For replicated environments, the argument to the trigger is the internal + * environment handle in all cases. This is done to make the behavior of + * the parameter deterministic and independent of the interaction of the + * application level database open operations with those initiated from the + * "replay" stream. + * + * @param transaction the transaction associated with the trigger + * + * @return the environment or null (if the environment is + * non-transactional) + */ + private static Environment + getOpenTriggerEnvironment(Transaction transaction) { + + if (transaction == null) { + return null; + } + + final EnvironmentImpl envImpl = + DbInternal.getTxn(transaction).getEnvironmentImpl(); + + /* + * Always return the same internal environment handle for replicated + * environments. + */ + if (envImpl.isReplicated()) { + return envImpl.getInternalEnvHandle(); + } + + /* + * Returns the environment handle associated with the transaction. It's + * the internal handle for auto transactions, and the application + * supplied handle used during transaction creation in all other cases. + */ + return DbInternal.getEnvironment(transaction); + } + + /** + * Invokes the trigger methods associated with the closing of a database. + * Note that this also results in the invocation of removeTrigger methods, + * for transient triggers. + */ + public static void runCloseTriggers(Locker locker, DatabaseImpl dbImpl) { + + runTriggers(dbImpl, locker, new TriggerInvoker(false) { + + @Override + public void run(@SuppressWarnings("unused") + Transaction triggerTransaction, Trigger dbt) { + if (dbt instanceof PersistentTrigger) { + ((PersistentTrigger)dbt).close(); + } + } + }); + } + + /** + * Invokes the trigger methods associated with the removal of a database. + * Note that this also results in the invocation of removeTrigger methods. + */ + public static void runRemoveTriggers(Locker locker, + DatabaseImpl dbImpl) { + + runTriggers(dbImpl, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + if (dbt instanceof PersistentTrigger) { + ((PersistentTrigger)dbt).remove(triggerTransaction); + } + } + }); + + runTriggers(dbImpl, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + if (dbt instanceof PersistentTrigger) { + ((PersistentTrigger)dbt).removeTrigger(triggerTransaction); + } + } + }); + } + + /** + * Invokes the trigger methods associated with the truncation of a + * database. + */ + public static void runTruncateTriggers(Locker locker, + final DatabaseImpl newDb) { + + runTriggers(newDb, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, + Trigger dbt) { + if (dbt instanceof PersistentTrigger) { + ((PersistentTrigger)dbt).truncate(triggerTransaction); + dbt.setDatabaseName(newDb.getName()); + } + } + }); + } + + /** + * Invokes the trigger methods associated with the renaming of a database. + */ + public static void runRenameTriggers(Locker locker, + DatabaseImpl dbImpl, + final String newName) { + + runTriggers(dbImpl, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, + Trigger dbt) { + + if (dbt instanceof PersistentTrigger) { + ((PersistentTrigger)dbt).rename(triggerTransaction, + newName); + dbt.setDatabaseName(newName); + } + } + }); + } + + /* Transaction level triggers. */ + + /** + * Invokes the trigger methods associated with the commit of a transaction. + * Trigger methods are only invoked if the txn was associated with a + * trigger invocation. + */ + public static void runCommitTriggers(Txn txn) { + + assert txn != null; + + final Set triggerDbs = txn.getTriggerDbs(); + + if (triggerDbs == null) { + return; + } + + for (DatabaseImpl dbImpl : triggerDbs) { + + runTriggers(dbImpl, txn, new TriggerInvoker(false) { + + @Override + public void run(Transaction triggerTransaction, + Trigger dbt) { + if (dbt instanceof TransactionTrigger) { + ((TransactionTrigger)dbt).commit(triggerTransaction); + } + } + }); + } + } + + /** + * Invokes the trigger methods associated with the abort of a transaction. + * Trigger methods are only invoked if the txn was associated with a + * trigger invocation. + */ + public static void runAbortTriggers(Txn txn) { + + assert txn != null; + + final Set triggerDbs = txn.getTriggerDbs(); + + if (triggerDbs == null) { + return; + } + + for (final DatabaseImpl dbImpl : triggerDbs) { + + runTriggers(dbImpl, txn, new TriggerInvoker(false) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + + if (dbt instanceof TransactionTrigger) { + ((TransactionTrigger)dbt).abort(triggerTransaction); + if (!dbImpl.getName().equals(dbt.getDatabaseName())) { + dbt.setDatabaseName(dbImpl.getName()); + } + } + } + }); + } + } + + /** + * Invokes the trigger methods associated with a put operation. + */ + public static void runPutTriggers(Locker locker, + DatabaseImpl dbImpl, + final DatabaseEntry key, + final DatabaseEntry oldData, + final DatabaseEntry newData) { + assert key != null; + assert newData != null; + + runTriggers(dbImpl, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + + dbt.put(triggerTransaction, key, oldData, newData); + } + }); + } + + /** + * Invokes the trigger methods associated with a delete operation. + */ + public static void runDeleteTriggers(Locker locker, + DatabaseImpl dbImpl, + final DatabaseEntry key, + final DatabaseEntry oldData) { + assert key != null; + + runTriggers(dbImpl, locker, new TriggerInvoker(true) { + + @Override + public void run(Transaction triggerTransaction, Trigger dbt) { + + dbt.delete(triggerTransaction, key, oldData); + } + }); + } + + /** + * Generic method for invoking any trigger operation. It iterates over all + * the triggers associated with the database and if the trigger fails + * invalidates the environment. + * + * @param dbImpl the database associated with potential triggers + * + * @param locker provides the transaction associated with the operation + * + * @param invoker encapsulates the trigger invoker + */ + private static void runTriggers(final DatabaseImpl dbImpl, + final Locker locker, + TriggerInvoker invoker) { + + final List triggers = dbImpl.getTriggers(); + + if (triggers == null) { + return; + } + + Transaction triggerTransaction = + (locker instanceof Txn) ? ((Txn)locker).getTransaction() : null; + + try { + for (Trigger trigger : triggers) { + Trigger dbt = trigger; + invoker.run(triggerTransaction, dbt); + } + } catch (Exception e) { + final EnvironmentImpl env = dbImpl.getEnv(); + throw EnvironmentFailureException.unexpectedException(env, e); + } + + /* + * Note the use of a trigger for the database so that the appropriate + * commit/abort triggers can be run. + */ + if (invoker.invokeTransactionTrigger()) { + DbInternal.getTxn(triggerTransaction).noteTriggerDb(dbImpl); + } + } + + /** + * Utility class used to faciliatte the dispatch to a trigger method. + */ + private static abstract class TriggerInvoker { + /* + * Determines whether a subsequent transaction trigger should be + * invoked. + */ + final boolean invokeTransactionTrigger; + + public TriggerInvoker(boolean invokeTransactionTrigger) { + super(); + this.invokeTransactionTrigger = invokeTransactionTrigger; + } + + /* Runs the specific trigger method. */ + abstract void run(Transaction triggerTransaction, Trigger dbt); + + /* + * Determines whether the subsequent commit/abort trigger should be + * invoked. + */ + boolean invokeTransactionTrigger() { + return invokeTransactionTrigger; + } + } + + /** + * Invoke the triggers associated with the addition or removal of the + * trigger itself. They are typically invoked upon database open, or + * database removal. + * + * @param locker the locker associated with the trigger update operation + * @param oldTriggers the current list of triggers + * @param newTriggers the new list of triggers + */ + public static void invokeAddRemoveTriggers(Locker locker, + List oldTriggers, + List newTriggers) { + + Set oldNames = new MapOver(oldTriggers) { + @Override + protected String fun(Trigger e) { + return e.getName(); + } + }.run(new HashSet()); + + Set newNames = new MapOver(newTriggers) { + @Override + protected String fun(Trigger e) { + return e.getName(); + } + }.run(new HashSet()); + + Transaction txn = (locker instanceof Txn) ? + ((Txn)locker).getTransaction() : null; + + /* First invoke removeTrigger */ + if (oldTriggers != null) { + for (Trigger trigger : oldTriggers) { + if (!newNames.contains(trigger.getName())) { + trigger.removeTrigger(txn); + } + } + } + + /* Now invoke addTrigger */ + if (newTriggers != null) { + for (Trigger trigger : newTriggers) { + if (!oldNames.contains(trigger.getName())) { + trigger.addTrigger(txn); + } + } + } + } + + /** + * Lisp inspired Map function. + * + * @param The result element type for the list being returned. + * @param The type of the element being mapped over. + */ + public static abstract class MapOver { + final Collection c; + + public MapOver(Collection c) { + this.c = c; + } + + @SuppressWarnings("unchecked") + public > S run() { + Collection l = new LinkedList(); + return (S) run(l); + } + + public > S run(S l) { + if (c == null) { + return l; + } + for (E e : c) { + l.add(fun(e)); + } + return l; + } + + /* The function invoked for each element in the collection. */ + protected abstract R fun(E e); + } +} diff --git a/src/com/sleepycat/je/dbi/TriggerUtils.java b/src/com/sleepycat/je/dbi/TriggerUtils.java new file mode 100644 index 0000000..8ee6583 --- /dev/null +++ b/src/com/sleepycat/je/dbi/TriggerUtils.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.dbi; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.trigger.Trigger; + +/** + * Utility functions used by the trigger implementation. + * + */ +public class TriggerUtils { + + /** + * Determines the size in bytes used to represent the trigger in the log, + * that is, the size of the output generated by + * {@link #writeTriggers(ByteBuffer, byte[][])} + * + * @param triggerBytes the triggers whose size is to be estimated. + * + * @return the size in bytes + */ + static int logSize(byte[][] triggerBytes) { + + if (triggerBytes == null) { + return LogUtils.getPackedIntLogSize(0); + } + + /* Add up the individual trigger sizes */ + int size = LogUtils.getPackedIntLogSize(triggerBytes.length); + for (byte[] trigger : triggerBytes) { + size += LogUtils.getByteArrayLogSize(trigger); + } + return size; + } + + /** + * Writes the triggers out to the log buffer. + * + * @param logBuffer the buffer in which the bytes are assembled. + * + * @param triggerBytes the trigger bytes to be written. + */ + static void writeTriggers(ByteBuffer logBuffer, + byte[][] triggerBytes) { + if (triggerBytes == null) { + LogUtils.writePackedInt(logBuffer, 0); + } else { + /* Write out the triggers. */ + LogUtils.writePackedInt(logBuffer, triggerBytes.length); + for (byte[] triggerByte : triggerBytes) { + LogUtils.writeByteArray(logBuffer, triggerByte); + } + } + } + + /** + * Reads the triggers from a log buffer and returns then in their + * serialized byte array form. + * + * @param logBuffer the buffer from which to read the triggers. + * @param entryVersion the version associated with the current log + * + * @return the trigger bytes + */ + static byte[][] readTriggers(ByteBuffer logBuffer, + int entryVersion) { + + final int triggerCount = LogUtils.readPackedInt(logBuffer); + if (triggerCount == 0) { + return null; + } + + byte[][] triggerBytes = new byte[triggerCount][]; + for (int i = 0; i < triggerBytes.length; i++) { + triggerBytes[i] = + LogUtils.readByteArray(logBuffer, false /* unpacked */); + } + return triggerBytes; + } + + /** + * Deserializes the trigger representation to yield the trigger object + * instance. + * + * @param dbName the name to be associated with the de-serialized triggers + * + * @param triggerBytes the serialized representation of the trigger + * + * @return the list of trigger instances + */ + static LinkedList unmarshallTriggers(String dbName, + byte[][] triggerBytes, + ClassLoader loader) { + + if (triggerBytes == null) { + return null; + } + + final LinkedList triggers = new LinkedList(); + for (int i = 0; i < triggerBytes.length; i++) { + final Trigger trigger = + (Trigger)DatabaseImpl.bytesToObject(triggerBytes[i], + "trigger:" + i, + loader); + trigger.setDatabaseName(dbName); + triggers.add(trigger); + } + return triggers; + } + + /** + * Dumps an XML representation of the triggers into the StringBuilder. It + * gives preference to the instance representation if it's readily + * available. + * + * @param sb the string buffer that will contain the XML representation + * @param triggerBytes the bytes representing the trigger + * @param triggers the trigger instances corresponding to triggerBytes + */ + static void dumpTriggers(StringBuilder sb, + byte[][] triggerBytes, + List triggers) { + + if ((triggerBytes == null) || triggerBytes.length == 0) { + return; + } + + /* Use trigger instances if available, otherwise fallback to + * using the byte arrays. + */ + if ((triggers != null) && (triggers.size() != 0)) { + for (Trigger trigger : triggers) { + sb.append(""); + } + } else { + /* Use the byte array */ + for (int i=0; i < triggerBytes.length; i++) { + sb.append(""); + } + } + } +} diff --git a/src/com/sleepycat/je/dbi/TruncateResult.java b/src/com/sleepycat/je/dbi/TruncateResult.java new file mode 100644 index 0000000..9cc1971 --- /dev/null +++ b/src/com/sleepycat/je/dbi/TruncateResult.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * Holds the result of a database truncate operation. + */ +public class TruncateResult { + + private DatabaseImpl db; + private int count; + + TruncateResult(DatabaseImpl db, int count) { + this.db = db; + this.count = count; + } + + public DatabaseImpl getDatabase() { + return db; + } + + public int getRecordCount() { + return count; + } +} diff --git a/src/com/sleepycat/je/dbi/TxnStatDefinition.java b/src/com/sleepycat/je/dbi/TxnStatDefinition.java new file mode 100644 index 0000000..62c5ece --- /dev/null +++ b/src/com/sleepycat/je/dbi/TxnStatDefinition.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for JE transaction statistics. + */ +public class TxnStatDefinition { + + public static final StatDefinition TXN_ACTIVE = + new StatDefinition("nActive", + "Number of transactions that are currently " + + "active."); + + public static final StatDefinition TXN_BEGINS = + new StatDefinition("nBegins", + "Number of transactions that have begun."); + + public static final StatDefinition TXN_ABORTS = + new StatDefinition("nAborts", + "Number of transactions that have aborted."); + + public static final StatDefinition TXN_COMMITS = + new StatDefinition("nCommits", + "Number of transactions that have committed."); + + public static final StatDefinition TXN_XAABORTS = + new StatDefinition("nXAAborts", + "Number of XA transactions that have aborted."); + + public static final StatDefinition TXN_XAPREPARES = + new StatDefinition("nXAPrepares", + "Number of XA transactions that have been " + + "prepared."); + + public static final StatDefinition TXN_XACOMMITS = + new StatDefinition("nXACommits", + "Number of XA transactions that have committed."); + + public static final StatDefinition TXN_ACTIVE_TXNS = + new StatDefinition("activeTxns", + "Array of active transactions. Each element of " + + "the array is an object of type " + + "Transaction.Active."); +} diff --git a/src/com/sleepycat/je/dbi/VLSNProxy.java b/src/com/sleepycat/je/dbi/VLSNProxy.java new file mode 100644 index 0000000..a191a46 --- /dev/null +++ b/src/com/sleepycat/je/dbi/VLSNProxy.java @@ -0,0 +1,27 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.entry.LogEntry; + +/** + * The VLSNProxy is a handle for invoking VLSN tracking at recovery time. + */ +public interface VLSNProxy { + + public void trackMapping(long lsn, + LogEntryHeader currentEntryHeader, + LogEntry targetLogEntry); +} diff --git a/src/com/sleepycat/je/dbi/package-info.java b/src/com/sleepycat/je/dbi/package-info.java new file mode 100644 index 0000000..d482f3d --- /dev/null +++ b/src/com/sleepycat/je/dbi/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Underlying XxxImpl classes for Environment, Database and Cursor, + * plus other misc classes (originally, dbi meant "db internal interface"). + */ +package com.sleepycat.je.dbi; \ No newline at end of file diff --git a/src/com/sleepycat/je/evictor/Arbiter.java b/src/com/sleepycat/je/evictor/Arbiter.java new file mode 100644 index 0000000..4fe8513 --- /dev/null +++ b/src/com/sleepycat/je/evictor/Arbiter.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.TestHook; + +/** + * The Arbiter determines whether eviction should occur, by consulting the + * memory budget. + */ +class Arbiter { + + private final MemoryBudget.Totals memBudgetTotals; + + /* Debugging and unit test support. */ + private TestHook runnableHook; + + /* je.evictor.evictBytes */ + private final long evictBytesSetting; + + Arbiter(EnvironmentImpl envImpl) { + + DbConfigManager configManager = envImpl.getConfigManager(); + + evictBytesSetting = configManager.getLong( + EnvironmentParams.EVICTOR_EVICT_BYTES); + + memBudgetTotals = envImpl.getMemoryBudget().getTotals(); + } + + /** + * Return true if the memory budget is overspent. + */ + boolean isOverBudget() { + + return memBudgetTotals.getCacheUsage() > + memBudgetTotals.getMaxMemory(); + } + + /** + * Do a check on whether synchronous eviction is needed. + * + * Note that this method is intentionally not synchronized in order to + * minimize overhead when checking for critical eviction. This method is + * called from application threads for every cursor operation. + */ + boolean needCriticalEviction() { + + final long over = memBudgetTotals.getCacheUsage() - + memBudgetTotals.getMaxMemory(); + + return (over > memBudgetTotals.getCriticalThreshold()); + } + + /** + * Do a check on whether the cache should still be subject to eviction. + * + * Note that this method is intentionally not synchronized in order to + * minimize overhead, because it's checked on every iteration of the + * evict batch loop. + */ + boolean stillNeedsEviction() { + + return (memBudgetTotals.getCacheUsage() + evictBytesSetting) > + memBudgetTotals.getMaxMemory(); + } + + /** + * Return non zero number of bytes if eviction should happen. Caps the + * number of bytes a single thread will try to evict. + */ + long getEvictionPledge() { + + long currentUsage = memBudgetTotals.getCacheUsage(); + long maxMem = memBudgetTotals.getMaxMemory(); + + long overBudget = currentUsage - maxMem; + boolean doRun = (overBudget > 0); + + long requiredEvictBytes = 0; + + /* If running, figure out how much to evict. */ + if (doRun) { + requiredEvictBytes = overBudget + evictBytesSetting; + /* Don't evict more than 50% of the cache. */ + if (currentUsage - requiredEvictBytes < maxMem / 2) { + requiredEvictBytes = overBudget + (maxMem / 2); + } + } + + /* Unit testing, force eviction. */ + if (runnableHook != null) { + doRun = runnableHook.getHookValue(); + if (doRun) { + requiredEvictBytes = maxMem; + } else { + requiredEvictBytes = 0; + } + } + return requiredEvictBytes; + } + + /* For unit testing only. */ + void setRunnableHook(TestHook hook) { + runnableHook = hook; + } +} diff --git a/src/com/sleepycat/je/evictor/CHeapAllocator.java b/src/com/sleepycat/je/evictor/CHeapAllocator.java new file mode 100644 index 0000000..66a40d1 --- /dev/null +++ b/src/com/sleepycat/je/evictor/CHeapAllocator.java @@ -0,0 +1,230 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import java.lang.reflect.Field; +import java.util.concurrent.atomic.AtomicLong; + +import sun.misc.Unsafe; + +/** + * The default implementation of the off-heap allocator. + * + * Uses the sun.misc.Unsafe class to call the native 'malloc' and 'free' + * functions to allocate memory from the 'C' runtime heap. + * + * This class should not be referenced symbolically by any other other class. + * This is necessary to avoid a linkage error if JE is run on a JVM without the + * Unsafe class. The {@link OffHeapAllocatorFactory} loads this class by name, + * using reflection. + */ +class CHeapAllocator implements OffHeapAllocator { + + /* + * We should probably always perform bounds checking, since going out of + * bounds is likely to crash the JVM. + */ + private static final boolean CHECK_BOUNDS = true; + + /* Number of bytes for storing the int block size. */ + private static final int SIZE_BYTES = 4; + + private final Unsafe unsafe; + private final AtomicLong usedBytes = new AtomicLong(0); + + public CHeapAllocator() { + + /* + * We cannot call Unsafe.getUnsafe because it throws + * SecurityException when called from a non-bootstrap class. Getting + * the static field (that would be returned by Unsafe.getUnsafe) is + * better than calling the Unsafe private constructor, since Unsafe + * is intended to have a singleton instance. + */ + try { + final Field field = Unsafe.class.getDeclaredField("theUnsafe"); + field.setAccessible(true); + unsafe = (Unsafe) field.get(null); + } catch (Throwable e) { + throw new UnsupportedOperationException( + "Unable to get Unsafe object", e); + } + + if (unsafe == null) { + throw new UnsupportedOperationException( + "Unsafe singleton is null"); + } + + /* + * Check for seemingly obvious byte and int sizes, to ensure that the + * JVM isn't doing something strange. + */ + if (Unsafe.ARRAY_BYTE_INDEX_SCALE != 1) { + throw new UnsupportedOperationException( + "Unexpected Unsafe.ARRAY_BYTE_INDEX_SCALE: " + + Unsafe.ARRAY_BYTE_INDEX_SCALE); + } + if (Unsafe.ARRAY_INT_INDEX_SCALE != SIZE_BYTES) { + throw new UnsupportedOperationException( + "Unexpected Unsafe.ARRAY_INT_INDEX_SCALE: " + + Unsafe.ARRAY_INT_INDEX_SCALE); + } + } + + @Override + public void setMaxBytes(long maxBytes) { + } + + @Override + public long getUsedBytes() { + + return usedBytes.get(); + } + + @Override + public long allocate(int size) { + + final int allocSize = size + SIZE_BYTES; + final long memId = unsafe.allocateMemory(allocSize); + + unsafe.putInt(memId, size); + unsafe.setMemory(memId + SIZE_BYTES, size, (byte) 0); + usedBytes.addAndGet(addOverhead(allocSize)); + + return memId; + } + + @Override + public int free(long memId) { + + final int totalSize = addOverhead(size(memId) + SIZE_BYTES); + unsafe.freeMemory(memId); + usedBytes.addAndGet(0 - totalSize); + return totalSize; + } + + private int addOverhead(int allocSize) { + + /* TODO: There is 70 bytes added overhead when using the IBM JDK. */ + + /* Blocks are aligned on 8 byte boundaries with a 16 byte header. */ + allocSize += (allocSize % 8) + 16; + + /* The minimum block size is 24 bytes. */ + return (allocSize < 24) ? 24 : allocSize; + } + + @Override + public int size(long memId) { + + return unsafe.getInt(memId); + } + + @Override + public int totalSize(long memId) { + + return addOverhead(size(memId) + SIZE_BYTES); + } + + @Override + public void copy(long memId, int memOff, byte[] buf, int bufOff, int len) { + + if (CHECK_BOUNDS) { + if (memId == 0) { + throw new NullPointerException("memId is 0"); + } + if (buf == null) { + throw new NullPointerException("buf is null"); + } + if (memOff < 0 || memOff + len > size(memId)) { + throw new IndexOutOfBoundsException( + "memOff=" + memOff + + " memSize=" + size(memId) + + " copyLen=" + len); + } + if (bufOff < 0 || bufOff + len > buf.length) { + throw new IndexOutOfBoundsException( + "bufOff=" + bufOff + + " bufSize=" + buf.length + + " copyLen=" + len); + } + } + + unsafe.copyMemory( + null, memId + SIZE_BYTES + memOff, + buf, Unsafe.ARRAY_BYTE_BASE_OFFSET + bufOff, + len); + } + + @Override + public void copy(byte[] buf, int bufOff, long memId, int memOff, int len) { + + if (CHECK_BOUNDS) { + if (memId == 0) { + throw new NullPointerException("memId is 0"); + } + if (buf == null) { + throw new NullPointerException("buf is null"); + } + if (memOff < 0 || memOff + len > size(memId)) { + throw new IndexOutOfBoundsException( + "memOff=" + memOff + + " memSize=" + size(memId) + + " copyLen=" + len); + } + if (bufOff < 0 || bufOff + len > buf.length) { + throw new IndexOutOfBoundsException( + "bufOff=" + bufOff + + " bufSize=" + buf.length + + " copyLen=" + len); + } + } + + unsafe.copyMemory( + buf, Unsafe.ARRAY_BYTE_BASE_OFFSET + bufOff, + null, memId + SIZE_BYTES + memOff, + len); + } + + @Override + public void copy(long fromMemId, + int fromMemOff, + long toMemId, + int toMemOff, + int len) { + + if (CHECK_BOUNDS) { + if (fromMemId == 0 || toMemId == 0) { + throw new NullPointerException("memId is 0"); + } + if (fromMemOff < 0 || fromMemOff + len > size(fromMemId)) { + throw new IndexOutOfBoundsException( + "memOff=" + fromMemOff + + " memSize=" + size(fromMemId) + + " copyLen=" + len); + } + if (toMemOff < 0 || toMemOff + len > size(toMemId)) { + throw new IndexOutOfBoundsException( + "memOff=" + toMemOff + + " memSize=" + size(toMemId) + + " copyLen=" + len); + } + } + + unsafe.copyMemory( + null, fromMemId + SIZE_BYTES + fromMemOff, + null, toMemId + SIZE_BYTES + toMemOff, + len); + } +} diff --git a/src/com/sleepycat/je/evictor/DummyAllocator.java b/src/com/sleepycat/je/evictor/DummyAllocator.java new file mode 100644 index 0000000..a385ab4 --- /dev/null +++ b/src/com/sleepycat/je/evictor/DummyAllocator.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +/** + * Allocator that always fails to allocate. + */ +class DummyAllocator implements OffHeapAllocator { + + static final DummyAllocator INSTANCE = new DummyAllocator(); + + private DummyAllocator() { + } + + @Override + public void setMaxBytes(long maxBytes) { + } + + @Override + public long getUsedBytes() { + return 0; + } + + @Override + public long allocate(int size) { + return 0; + } + + @Override + public int free(long memId) { + return 0; + } + + @Override + public int size(long memId) { + return 0; + } + + @Override + public int totalSize(long memId) { + return 0; + } + + @Override + public void copy(long memId, int memOff, byte[] buf, int bufOff, int len) { + } + + @Override + public void copy(byte[] buf, int bufOff, long memId, int memOff, int len) { + } + + @Override + public void copy( + long fromMemId, int fromMemOff, long toMemId, int toMemOff, int len) { + } +} diff --git a/src/com/sleepycat/je/evictor/Evictor.java b/src/com/sleepycat/je/evictor/Evictor.java new file mode 100644 index 0000000..72e30de --- /dev/null +++ b/src/com/sleepycat/je/evictor/Evictor.java @@ -0,0 +1,3415 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_BLIND_OPS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS_RATIO; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_COMPACT_KEY; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_NO_TARGET; +import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_SPARSE_TARGET; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_DIRTY_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_EVICTION_RUNS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_LNS_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MOVED_TO_PRI2_LRU; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MUTATED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_PUT_BACK; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_SKIPPED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_STRIPPED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_TARGETED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_ROOT_NODES_EVICTED; +import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_SHARED_CACHE_ENVS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.FULL_BIN_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.GROUP_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.GROUP_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH_MISS; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_CACHEMODE_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_CACHEMODE_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_CRITICAL_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_CRITICAL_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_DAEMON_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_DAEMON_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_EVICTORTHREAD_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_EVICTORTHREAD_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_MANUAL_DESC; +import static com.sleepycat.je.evictor.EvictorStatDefinition.N_BYTES_EVICTED_MANUAL_NAME; +import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI1_LRU_SIZE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI2_LRU_SIZE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.THREAD_UNAVAILABLE; +import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH; +import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH_MISS; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.FloatStat; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThreadFactory; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * + * Overview + * -------- + * + * The Evictor is responsible for managing the JE cache. The cache is + * actually a collection of in-memory btree nodes, implemented by the + * com.sleepycat.je.dbi.INList class. A subset of the nodes in te INList + * are candidates for eviction. This subset is tracked in one or more + * LRULists, which are maintained by the Evictor. When a node is evicted, + * it is detached from its containing BTree and then removed from the INList + * and from its containing LRUList. Once all references to an evicted node + * are removed, it can be GC'd by the JVM. + * + * The Evictor owns a pool of threads that are available to handle eviction + * tasks. The eviction pool is a standard java.util.concurrent thread pool, + * and can be mutably configured in terms of core threads, max threads, and + * keepalive times. + * + * Eviction is carried out by three types of threads: + * 1. An application thread, in the course of doing critical eviction. + * 2. Daemon threads, such as the cleaner or INCompressor, in the course of + * doing their respective duties. + * 3. Eviction pool threads. + * + * Memory consumption is tracked by the MemoryBudget. The Arbiter, which is + * also owned by the Evictor, is used to query the MemoryBudget and determine + * whether eviction is actually needed, and if so, how many bytes should be + * evicted by an evicting thread. + * + * Multiple threads can do eviction concurrently. As a result, it's important + * that eviction is both thread safe and as parallel as possible. Memory + * thresholds are generally accounted for in an unsynchronized fashion, and are + * seen as advisory. The only point of true synchronization is around the + * selection of a node for eviction. The act of eviction itself can be done + * concurrently. + * + * The eviction method is not reentrant, and a simple concurrent hash map + * of threads is used to prevent recursive calls. + * + * Details on the implementation of the LRU-based eviction policy + * -------------------------------------------------------------- + * + * ------------------ + * Data structures + * ------------------ + * + * An LRU eviction policy is approximated by one or more LRULists. An LRUList + * is a doubly linked list consisting of BTree nodes. If a node participates + * in an LRUList, then whenever it is accessed, it moves to the "back" of the + * list. When eviction is needed, the evictor evicts the nodes at the "front" + * of the LRULists. + * + * An LRUList is implemented as 2 IN references: a "front" ref pointing to the + * IN at the front of the list and a "back" ref, pointing to the IN at the back + * of the list. In addition, each IN has "nextLRUNode" and "prevLRUNode" refs + * for participating in an LRUList. This implementation works because an IN can + * belong to at most 1 LRUList at a time. Furthermore, it is the responsibility + * of the Evictor to know which LRUList a node belongs to at any given time + * (more on this below). As a result, each LRUList can assume that a node will + * either not be in any list at all, or will belong to "this" list. This way, + * membership of a node to an LRUList can be tested by just checking that + * either the nextLRUNode or prevLRUNode field of the node is non-null. + * + * The operations on an LRUList are: + * + * - addBack(IN) : + * Insert an IN at the back of the list. Assert that the node does not belong + * to an LRUList already. + * + * - addFront(IN) : + * Insert an IN at the front of the list. Assert that the node does not belong + * to an LRUList already. + * + * - moveBack(IN) : + * Move an IN to the back of the list, if it is in the list already. Noop + * if the node is not in the list. + * + * - moveFront(IN) : + * Move an IN to the front of the list, if it is in the list already. Noop + * if the node is not in the list. + * + * - removeFront() : + * Remove the IN at the front of the list and return it to the caller. + * Return null if the list is empty. + * + * - remove(IN) : + * Remove the IN from the list, if it is there. Return true if the node was + * in the list, false otherwise. + * + * - contains(IN): + * Return true if the node is contained in the list, false otherwise. + * + * All of the above methods are synchronized on the LRUList object. This may + * create a synchronization bottleneck. To alleviate this, the Evictor uses + * multiple LRULists, which taken together comprise a logical LRU list, called + * an LRUSet. The number of LRULists per LRUSet (numLRULists) is fixed and + * determined by a config parameter (max of 64). The LRULists are stored in + * an array whose length is numLRULists. + * + * The Evictor actually maintains 2 LRUSets: priority-1 and priority-2. + * Within an LRUSet, the nodeId is used to place a node to an LRUList: a + * node with id N goes to the (N % numLRULists)-th list. In addition, each + * node has a flag (isInPri2LRU) to identify which LRUSet it belongs to. + * This way, the Evictor knows which LRUList a node should belong to, and + * accesses the appropriate LRUList instance when it needs to add/remove/move + * a node within the LRU. + * + * Access to the isInPri2LRU flag is synchronized via the SH/EX node latch. + * + * When there is no off-heap cache configured, the priority-1 LRU is the + * "mixed" one and the priority-2 LRU is the "dirty" one. When there is an + * off-heap cache configured, the priority-1 LRU is the "normal" one and the + * priority-2 LRU is the "level-2" one. + * + * Justification for the mixed and dirty LRUSets: We would like to keep dirty + * INs in memory as much as possible to achieve "write absorption". Ideally, + * dirty INs should be logged by the checkpointer only. So, we would like to + * have the option in the Evictor to chose a clean IN to evict over a dirty + * IN, even if the dirty IN is colder than the clean IN. In this mode, having + * a single LRUSet will not perform very well in the situation when most (or + * a lot) or the INs are dirty (because each time we get a dirty IN from an + * LRUList, we would have to put it back to the list and try another IN until + * we find a clean one, thus spending a lot of CPU time trying to select an + * eviction target). + * + * Justification for the normal and level-2 LRUSets: With an off-heap cache, + * if level-2 INs were not treated specially, the main cache evictor may run + * out of space and (according to LRU) evict a level 2 IN, even though the IN + * references off-heap BINs (which will also be evicted). The problem is that + * we really don't want to evict the off-heap BINs (or their LNs) when the + * off-heap cache is not full. Therefore we only evict level-2 INs with + * off-heap children when there are no other nodes that can be evicted. A + * level-2 IN is moved to the priority-2 LRUSet when it is encountered by the + * evictor in the priority-1 LRUSet. + * + * Within each LRUSet, picking an LRUList to evict from is done in a round- + * robin fashion. To this end, the Evictor maintains 2 int counters: + * nextPri1LRUList and nextPri2LRUList. To evict from the priority-1 LRUSet, an + * evicting thread picks the (nextPri1LRUList % numLRULists)-th list, and + * then increments nextPri1LRUList. Similarly, to evict from the priority-2 + * LRUSet, an evicting thread picks the (nextPri2LRUList % numLRULists)-th + * list, and then increments nextPri2LRUList. This does not have to be done in + * a synchronized way. + * + * A new flag (called hasCachedChildren) is added to each IN to indicate + * whether the IN has cached children or not. This flag is used and maintained + * for upper INs (UINs) only. The need for this flag is explained below. + * Access to this flag is synchronized via the SH/EX node latch. + * + * --------------------------------------------------------------------------- + * LRUSet management: adding/removing/moving INs in/out of/within the LRUSets + * --------------------------------------------------------------------------- + * + * We don't want to track upper IN (UIN) nodes that have cached children. + * There are 2 reasons for this: (a) we cannot evict UINs with cached children + * (the children must be evicted first) and (b) UINs will normally have high + * access rate, and would add a lot of CPU overhead if they were tracked. + * + * The hasCachedChildren flag is used as a quick way to determine whether a + * UIN has cached children or not. + * + * Adding a node to the LRU. + * ------------------------- + * + * A IN N is added in an LRUSet via one of the following Evictor methods: + * addBack(IN), addFront(IN), pri2AddBack(IN), or pri2AddFront(IN). The + * first 2 add the node to the priority-1 LRUSet and set its isInPri2LRU flag + * to false. The last 2 add the node to the priority-2 LRUSet and set its + * isInPri2LRU flag to true. + * + * Note: DINs and DBINs are never added to the LRU. + * + * A node N is added to the LRU in the following situations: + * + * 1. N is fetched into memory from the log. Evictor.addBack(N) is called + * inside IN.postfetchInit() (just before N is connected to its parent). + * + * 2. N is a brand new node created during a split, and either N is a BIN or + * N does not get any cached children from its split sibling. + * Evictor.addFront(N) is called if N is a BIN and the cachemode is + * MAKE_COLD or EVICT_BIN. Otherwise, Evictor.addBack(child) is called. + * + * 3. N is a UIN that is being split, and before the split it had cached + * children, but all its cached children have now moved to its newly + * created sibling. Evictor.addBack(N) is called in this case. + * + * 4. N is a UIN that looses its last cached child (either because the child is + * evicted or it is deleted). Evictor.addBack(N) is called inside + * IN.setTarget(), if the target is null, N is a UIN, N's hasCachedChildren + * flag is true, and N after setting the target to null, N has no remaining + * cached children. + * + * 5. N is the 1st BIN in a brand new tree. In this case, Evictor.addBack(N) + * is called inside Tree.findBinForInsert(). + * + * 6. N is a node visited during IN.rebuildINList() and N is either a BIN or + * a UIN with no cached children. + * + * 7. An evicting thread T removes N from the LRU, but after T EX-latches N, + * it determines that N is not evictable or should not be evicted, and + * should be put back in the LRU. T puts N back to the LRU using one of + * the above 4 methods (for details, read about the eviction processing + * below), but ONLY IF (a) N is still in the INList, and (b) N is not in + * the LRU already. + * + * Case (b) can happen if N is a UIN and after T removed N from the LRU + * but before T could latch N, another thread T1 added a child to N and + * removed that child. Thus, by item 4 above, T1 adds N back to the LRU. + * Furthermore, since N is now back in the LRU, case (a) can now happen + * as well if another thread can evict N before T latches it. + * + * 8. When the checkpointer (or any other thread/operation) cleans a dirty IN, + * it must move it from the priority-2 LRUSet (if there) to the priority-1 + * one. This is done via the Evictor.moveToPri1LRU(N) method: If the + * isInPri2LRU flag of N is true, LRUList.remove(N) is called to remove + * the node from the priority-2 LRUSet. If N was indeed in the priority-2 + * LRUSet (i.e., LRUList.remove() returns true), addBack(N) is called to + * put it in the priority-1 LRUSet. + * + * By moving N to the priority-1 LRUSet only after atomically removing it + * from the priority-2 LRUSet and checking that it was indeed there, we + * prevent N from being added into the LRU if N has been or would be removed + * from the LRU by a concurrently running evicting thread. + * + * In cases 2, 3, 4, 5, 7, and 8 N is EX-latched. In case 1, the node is not + * latched, but it is inaccessible by any other threads because it is not + * connected to its parent yet and the parent is EX-latched (but N has already + * been inserted in the INList; can this create any problems ?????). In case + * 6 there is only one thread running. So, in all cases it's ok to set the + * isInPri2LRU flag of the node. + * + * Question: can a thread T try to add a node N, seen as a Java obj instance, + * into the LRU, while N is already there? I believe not, and LRUList addBack() + * and addFront() methods assert that this cannot happen. In cases 1, 2, and 5 + * above N is newly created node, so it cannot be in the LRU already. In cases + * 3 and 4, N is a UIN that has cached children, so it cannot be in the LRU. + * In case 6 there is only 1 thread. Finally, in cases 7 and 8, T checks that + * N is not in the LRU before attempting to add it (and the situation cannot + * change between tis check and the insertion into the LRU because N is EX- + * latched). + * + * Question: can a thread T try to add a node N, seen as a logical entity + * represented by its nodeId, into the LRU, while N is already there? + * Specifically, (a) can two Java instances, N1 and N2, of the same node + * N exist in memory at the same time, and (b) while N1 is in the LRU, can + * a thread T try to add N2 in the LRU? The answer to (a) is "yes", and as + * far as I can think, the answer to (b) is "no", but there is no explicit + * check in the code for this. Consider the following sequence of events: + * Initially only N1 is in memory and in the LRU. An evicting thread T1 + * removes N1 from the LRU, thread T2 adds N1 in the LRU, thread T3 removes + * N1 from the LRU and actually evicts it, thread T4 fetches N from the log, + * thus creating instance N2 and adding N2 to the LRU, thread T1 finally + * EX-latches N1 and has to decide what to do with it. The check in case + * 7a above makes sure that N1 will not go back to the LRU. In fact the + * same check makes sure that N1 will not be evicted (i.e., logged, if + * dirty). T1 will just skip N1, thus allowing it to be GCed. + * + * Removing a node from the LRU + * ---------------------------- + * + * A node is removed from the LRU when it is selected as an eviction target + * by an evicting thread. The thread chooses an LRUList list to evict from + * and calls removeFront() on it. The node is not latched when it is removed + * from the LRU in this case. The evicting thread is going to EX-latch the + * node shortly after the removal. But as explain already earlier, between + * the removal and the latching, another thread may put the node back to the + * LRU, and as a result, another thread may also choose the same node for + * eviction. The node may also be detached from the BTree, or its database + * closed, or deleted. + * + * A node may also be removing from the LRU by a non-evicting thread. This + * is done via the Evictor.remove(IN) method. The method checks the node's + * isInDrtryLRU flag to determine which LRUSet the node belongs to (if any) + * and then calls LRUList.remove(N). The node must be at least SH latched + * when the method is called. The method is a noop if the node is not in the + * LRU. The node may not belong to any LRUList, because it has been selected + * for eviction by another thread (and thus removed from LRU), but the + * evicting thread has not yet latched the node. There are 3 cases (listed + * below) where Evictor.remove(N) is called. In the first two cases + * Evictor.remove(N) is invoked from INList.removeInternal(N). This makes + * sure that N is removed from the LRU whenever it it removed from the + * INList (to guarantee that the nodes in the LRU are always a subset of + * the nodes in the INList). + * + * 1. When a tree branch containing N gets detached from its tree. In this + * case, INList.remove(N) is invoked inside accountForSubtreeRemoval() or + * accountForDeferredWriteSubtreeRemoval(). + * + * 2. When the database containing N gets deleted or truncated. In this case, + * INList.iter.remove() is called in DatabaseImpl.finishDeleteProcessing(). + * + * 3. N is a UIN with no cached children (hasCachedChildren flag is false) + * and a new child for N is fetched. The call to Evictor.remove(N) is + * done inside IN.setTarget(). + * + * Moving a node within the LRU + * ---------------------------- + * + * A node N is moved within its containing LRUList (if any) via the Evictor + * moveBack(IN) and moveFront(IN) methods. The methods check the isInPri2LRU + * flag of the node to determine the LRUSet the node belongs to and then move + * the node to the back or to the front of the LRUList. The node will be at + * least SH latched when these methods are called. Normally, the IN will be + * in an LRUList. However, it may not belong to any LRUList, because it has + * been selected for eviction by another thread (and thus removed from LRU), + * but the evicting thread has not yet EX-latched the node. In this case, + * these methods are is a noop. The methods are called in the following + * situations: + * + * 1. N is latched with cachemode DEFAULT, KEEP_HOT, or EVICT_LN and N is a + * BIN or a UIN with no cached children (the hasCachedChildren flag is + * used to check if the UIN has cached children, so we don't need to + * iterate over all of the node's child entries). In this case, + * Evictor.moveBack(N) . + * + * 2. N is latched with cachemode MAKE_COLD or EVICT_BIN and N is a BIN. + * In this case, Evictor.moveFront(N) is called. + * + * ------------------- + * Eviction Processing + * ------------------- + * + * A thread can initiate eviction by invoking the Evictor.doEviction() method. + * This method implements an "eviction run". An eviction run consists of a + * number of "eviction passes", where each pass is given as input a maximum + * number of bytes to evict. An eviction pass is implemented by the + * Evictor.evictBatch() method. + * + * Inside Evictor.evictBatch(), an evicting thread T: + * + * 1. Picks the priority-1 LRUset initially as the "current" LRUSet to be + * processed, + * + * 2. Initializes the max number of nodes to be processed per LRUSet to the + * current size of the priority-1 LRUSet, + * + * 3. Executes the following loop: + * + * 3.1. Picks a non-empty LRUList from the current LRUSet in a round-robin + * fashion, as explained earlier, and invokes LRUList.removeFront() to + * remove the node N at the front of the list. N becomes the current + * eviction target. + * + * 3.2. If the DB node N belongs to has been deleted or closed, skips this node, + * i.e., leaves N outside the LRU and goes to 3.4. + * + * 3.3. Calls ProcessTarget(N) (see below) + * + * 3.4. If the current LRUset is the priority-1 one and the number of target nodes + * processed reaches the max number allowed, the priority-2 LRUSet becomes + * the current one, the max number of nodes to be processed per LRUSet is + * set to the current size of the priority-2 LRUSet, and the number of + * nodes processed is reset to 0. + * + * 3.5. Breaks the loop if the max number of bytes to evict during this pass + * has been reached, or memConsumption is less than (maxMemory - M) (where + * M is a config param), or the number of nodes that have been processed + * in the current LRUSet reaches the max allowed. + * + * -------------------------- + * The processTarget() method + * -------------------------- + * + * This method is called after a node N has been selected for eviction (and as + * result, removed from the LRU). The method EX-latches N and determines + * whether it can/should really be evicted, and if not what is the appropriate + * action to be taken by the evicting thread. Before returning, the method + * unlatches N. Finally, it returns the number of bytes evicted (if any). + * + * If a decision is taken to evict N or mutate it to a BINDelta, N must first + * be unlatched and its parent must be searched within the tree. During this + * search, many things can happen to the unlatched N, and as a result, after + * the parent is found and the N is relatched, processTarget() calls itself + * recursively to re-consider all the possible actions for N. + * + * Let T be an evicting thread running processTarget() to determine what to do + * with a target node N. The following is the list of possible outcomes: + * + * 1. SKIP - Do nothing with N if: + * (a) N is in the LRU. This can happen if N is a UIN and while it is + * unlatched by T, other threads fetch one or more of N's children, + * but then all of N's children are removed again, thus causing N to + * be put back to the LRU. + * (b) N is not in the INList. Given than N can be put back to the LRU while + * it is unlatched by T, it can also be selected as an eviction target + * by another thread and actually be evicted. + * (c) N is a UIN with cached children. N could have acquired children + * after the evicting thread removed it from the LRU, but before the + * evicting thread could EX-latch it. + * (d) N is the root of the DB naming tree or the DBmapping tree. + * (e) N is dirty, but the DB is read-only. + * (f) N's environment used a shared cache and the environment has been + * closed or invalidated. + * (g) If a decision was taken to evict od mutate N, but the tree search + * (using N's keyId) to find N's parent, failed to find the parent, or + * N itself. This can happen if during the search, N was evicted by + * another thread, or a branch containing N was completely removed + * from the tree. + * + * 2. PUT BACK - Put N to the back of the LRUSet it last belonged to, if: + * (a) It is a BIN that was last accessed with KEEP_HOT cache mode. + * (b) N has an entry with a NULL LSN and a null target. + * + * 3. PARTIAL EVICT - perform partial eviction on N, if none of the cases + * listed above is true. Currently, partial eviction applies to BINs only + * and involves the eviction (stripping) of evictable LNs. If a cached LN + * is not evictable, the whole BIN is not evictable as well. Currently, + * only MapLNs may be non-evictable (see MapLN.isEvictable()). + * + * After partial eviction is performed the following outcomes are possible: + * + * 4. STRIPPED PUT BACK - Put N to the back of the LRUSet it last belonged to, + * if partial eviction did evict any bytes, and N is not a BIN in EVICT_BIN + * or MAKE_COLD cache mode. + * + * 5. PUT BACK - Put N to the back of the LRUSet it last belonged to, if + * no bytes were stripped, but partial eviction determined that N is not + * evictable. + * + * 6. MUTATE - Mutate N to a BINDelta, if none of the above apply and N is a + * BIN that can be mutated. + * + * 7. MOVE DIRTY TO PRI-2 LRU - Move N to the front of the priority-2 LRUSet, + * if none of the above apply and N is a dirty node that last belonged to + * the priority-1 LRUSet, and a dirty LRUSet is used (meaning that no + * off-heap cache is configured). + * + * 8. MOVE LEVEL-2 TO PRI-2 LRU - Move N to the front of the priority-2 LRUSet, + * if none of the above apply and N is a level-2 node with off-heap BINs + * that last belonged to the priority-1 LRUSet. + * + * 9. EVICT - Evict N is none of the above apply. + * + * ------- + * TODO: + * ------- + * + * 1. Decide what to do about assertions (keep, remove, convert to JE + * exceptions, convert to DEBUG-only expensive checks). + * + */ +public class Evictor implements EnvConfigObserver { + + /* + * If new eviction source enums are added, a new stat is created, and + * EnvironmentStats must be updated to add a getter method. + * + * CRITICAL eviction is called by operations executed app or daemon + * threads which detect that the cache has reached its limits + * CACHE_MODE eviction is called by operations that use a specific + * Cursor. + * EVICTORThread is the eviction pool + * MANUAL is the call to Environment.evictMemory, called by recovery or + * application code. + */ + public enum EvictionSource { + /* Using ordinal for array values! */ + EVICTORTHREAD { + String getName() { + return N_BYTES_EVICTED_EVICTORTHREAD_NAME; + } + String getDesc() { + return N_BYTES_EVICTED_EVICTORTHREAD_DESC; + } + }, + MANUAL { + String getName() { + return N_BYTES_EVICTED_MANUAL_NAME; + } + String getDesc() { + return N_BYTES_EVICTED_MANUAL_DESC; + } + }, + CRITICAL { + String getName() { + return N_BYTES_EVICTED_CRITICAL_NAME; + } + String getDesc() { + return N_BYTES_EVICTED_CRITICAL_DESC; + } + }, + CACHEMODE { + String getName() { + return N_BYTES_EVICTED_CACHEMODE_NAME; + } + String getDesc() { + return N_BYTES_EVICTED_CACHEMODE_DESC; + } + }, + DAEMON { + String getName() { + return N_BYTES_EVICTED_DAEMON_NAME; + } + String getDesc() { + return N_BYTES_EVICTED_DAEMON_DESC; + } + }; + + abstract String getName(); + + abstract String getDesc(); + + public StatDefinition getNumBytesEvictedStatDef() { + return new StatDefinition(getName(), getDesc()); + } + } + + /* + * The purpose of EvictionDebugStats is to capture the stats of a single + * eviction run (i.e., an execution of the Evictor.doEviction() method by + * a single thread). An instance of EvictionDebugStats is created at the + * start of doEviction() and is passed around to the methods called from + * doEviction(). At the end of doEviction(), the EvictionDebugStats + * instance can be printed out (for debugging), or (TODO) the captured + * stats can be loaded to the global Evictor.stats. + */ + static class EvictionDebugStats { + boolean inPri1LRU; + boolean withParent; + + long pri1Size; + long pri2Size; + + int numSelectedPri1; + int numSelectedPri2; + + int numPutBackPri1; + int numPutBackPri2; + + int numBINsStripped1Pri1; + int numBINsStripped2Pri1; + int numBINsStripped1Pri2; + int numBINsStripped2Pri2; + + int numBINsMutatedPri1; + int numBINsMutatedPri2; + + int numUINsMoved1; + int numUINsMoved2; + int numBINsMoved1; + int numBINsMoved2; + + int numUINsEvictedPri1; + int numUINsEvictedPri2; + int numBINsEvictedPri1; + int numBINsEvictedPri2; + + void reset() { + inPri1LRU = true; + withParent = false; + + pri1Size = 0; + pri2Size = 0; + + numSelectedPri1 = 0; + numSelectedPri2 = 0; + + numPutBackPri1 = 0; + numPutBackPri2 = 0; + + numBINsStripped1Pri1 = 0; + numBINsStripped2Pri1 = 0; + numBINsStripped1Pri2 = 0; + numBINsStripped2Pri2 = 0; + + numBINsMutatedPri1 = 0; + numBINsMutatedPri2 = 0; + + numUINsMoved1 = 0; + numUINsMoved2 = 0; + numBINsMoved1 = 0; + numBINsMoved2 = 0; + + numUINsEvictedPri1 = 0; + numUINsEvictedPri2 = 0; + numBINsEvictedPri1 = 0; + numBINsEvictedPri2 = 0; + } + + void incNumSelected() { + if (inPri1LRU) { + numSelectedPri1++; + } else { + numSelectedPri2++; + } + } + + void incNumPutBack() { + if (inPri1LRU) { + numPutBackPri1++; + } else { + numPutBackPri2++; + } + } + + void incNumStripped() { + if (inPri1LRU) { + if (withParent) { + numBINsStripped2Pri1++; + } else { + numBINsStripped1Pri1++; + } + } else { + if (withParent) { + numBINsStripped2Pri2++; + } else { + numBINsStripped1Pri2++; + } + } + } + + void incNumMutated() { + if (inPri1LRU) { + numBINsMutatedPri1++; + } else { + numBINsMutatedPri2++; + } + } + + void incNumMoved(boolean isBIN) { + if (withParent) { + if (isBIN) { + numBINsMoved2++; + } else { + numUINsMoved2++; + } + } else { + if (isBIN) { + numBINsMoved1++; + } else { + numUINsMoved1++; + } + } + } + + void incNumEvicted(boolean isBIN) { + if (inPri1LRU) { + if (isBIN) { + numBINsEvictedPri1++; + } else { + numUINsEvictedPri1++; + } + } else { + if (isBIN) { + numBINsEvictedPri2++; + } else { + numUINsEvictedPri2++; + } + } + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("Eviction stats PRI1: size = "); + sb.append(pri1Size); + sb.append("\n"); + sb.append("selected = "); + sb.append(numSelectedPri1); + sb.append(" | "); + sb.append("put back = "); + sb.append(numPutBackPri1); + sb.append(" | "); + sb.append("stripped = "); + sb.append(numBINsStripped1Pri1); + sb.append("/"); + sb.append(numBINsStripped2Pri1); + sb.append(" | "); + sb.append("mutated = "); + sb.append(numBINsMutatedPri1); + sb.append(" | "); + sb.append("moved = "); + sb.append(numBINsMoved1); + sb.append("/"); + sb.append(numBINsMoved2); + sb.append(" - "); + sb.append(numUINsMoved1); + sb.append("/"); + sb.append(numUINsMoved2); + sb.append(" | "); + sb.append("evicted = "); + sb.append(numBINsEvictedPri1); + sb.append(" - "); + sb.append(numUINsEvictedPri1); + sb.append("\n"); + + sb.append("Eviction stats PRI2: size = "); + sb.append(pri2Size); + sb.append("\n"); + sb.append("selected = "); + sb.append(numSelectedPri2); + sb.append(" | "); + sb.append("put back = "); + sb.append(numPutBackPri2); + sb.append(" | "); + sb.append("stripped = "); + sb.append(numBINsStripped1Pri2); + sb.append("/"); + sb.append(numBINsStripped2Pri2); + sb.append(" | "); + sb.append("mutated = "); + sb.append(numBINsMutatedPri2); + sb.append(" | "); + sb.append("evicted = "); + sb.append(numBINsEvictedPri2); + sb.append(" - "); + sb.append(numUINsEvictedPri2); + sb.append("\n"); + + return sb.toString(); + } + } + + /* + * The purpose of LRUDebugStats is to capture stats on the current state + * of an LRUSet. This is done via a call to LRUEvictor.getPri1LRUStats(), + * or LRUEvictor.getPri2LRUStats(). For now at least, these methods are + * meant to be used for debugging and unit testing only. + */ + static class LRUDebugStats { + int size; + int dirtySize; + + int numBINs; + int numDirtyBINs; + + int numStrippedBINs; + int numDirtyStrippedBINs; + + void reset() { + size = 0; + dirtySize = 0; + numBINs = 0; + numDirtyBINs = 0; + numStrippedBINs = 0; + numDirtyStrippedBINs = 0; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("Clean/Dirty INs = "); + sb.append(size - dirtySize); + sb.append("/"); + sb.append(dirtySize); + + sb.append(" BINs = "); + sb.append(numBINs - numDirtyBINs); + sb.append("/"); + sb.append(numDirtyBINs); + + sb.append(" Stripped BINs = "); + sb.append(numStrippedBINs - numDirtyStrippedBINs); + sb.append("/"); + sb.append(numDirtyStrippedBINs); + + return sb.toString(); + } + } + + /* + * LRUList implementation + */ + static class LRUList { + + private static final boolean doExpensiveCheck = false; + + private final int id; + + private int size = 0; + + private IN front = null; + private IN back = null; + + LRUList(int id) { + this.id = id; + } + + synchronized void addBack(IN node) { + + /* Make sure node is not in any LRUlist already */ + if (node.getNextLRUNode() != null || + node.getPrevLRUNode() != null) { + + throw EnvironmentFailureException.unexpectedState( + node.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + node.getEnv().getName() + + "Attempting to add node " + node.getNodeId() + + " in the LRU, but node is already in the LRU."); + } + assert(!node.isDIN() && !node.isDBIN()); + + node.setNextLRUNode(node); + + if (back != null) { + node.setPrevLRUNode(back); + back.setNextLRUNode(node); + } else { + assert(front == null); + node.setPrevLRUNode(node); + } + + back = node; + + if (front == null) { + front = back; + } + + ++size; + } + + synchronized void addFront(IN node) { + + /* Make sure node is not in any LRUlist already */ + if (node.getNextLRUNode() != null || + node.getPrevLRUNode() != null) { + + throw EnvironmentFailureException.unexpectedState( + node.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + node.getEnv().getName() + + "Attempting to add node " + node.getNodeId() + + " in the LRU, but node is already in the LRU."); + } + assert(!node.isDIN() && !node.isDBIN()); + + node.setPrevLRUNode(node); + + if (front != null) { + node.setNextLRUNode(front); + front.setPrevLRUNode(node); + } else { + assert(back == null); + node.setNextLRUNode(node); + } + + front = node; + + if (back == null) { + back = front; + } + + ++size; + } + + synchronized void moveBack(IN node) { + + /* If the node is not in the list, don't do anything */ + if (node.getNextLRUNode() == null) { + assert(node.getPrevLRUNode() == null); + return; + } + + if (doExpensiveCheck && !contains2(node)) { + System.out.println("LRUList.moveBack(): list " + id + + "does not contain node " + + node.getNodeId() + + " Thread: " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + " isBIN: " + node.isBIN() + + " inPri2LRU: " + node.isInPri2LRU()); + assert(false); + } + + if (node.getNextLRUNode() == node) { + /* The node is aready at the back */ + assert(back == node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + + } else { + assert(front != back); + assert(size > 1); + + if (node.getPrevLRUNode() == node) { + /* the node is at the front */ + assert(front == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + front = node.getNextLRUNode(); + front.setPrevLRUNode(front); + } else { + /* the node is in the "middle" */ + assert(front != node && back != node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + node.getPrevLRUNode().setNextLRUNode(node.getNextLRUNode()); + node.getNextLRUNode().setPrevLRUNode(node.getPrevLRUNode()); + } + + node.setNextLRUNode(node); + node.setPrevLRUNode(back); + + back.setNextLRUNode(node); + back = node; + } + } + + synchronized void moveFront(IN node) { + + /* If the node is not in the list, don't do anything */ + if (node.getNextLRUNode() == null) { + assert(node.getPrevLRUNode() == null); + return; + } + + if (doExpensiveCheck && !contains2(node)) { + System.out.println("LRUList.moveFront(): list " + id + + "does not contain node " + + node.getNodeId() + + " Thread: " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + " isBIN: " + node.isBIN() + + " inPri2LRU: " + node.isInPri2LRU()); + assert(false); + } + + if (node.getPrevLRUNode() == node) { + /* the node is aready at the front */ + assert(front == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + } else { + assert(front != back); + assert(size > 1); + + if (node.getNextLRUNode() == node) { + /* the node is at the back */ + assert(back == node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + + back = node.getPrevLRUNode(); + back.setNextLRUNode(back); + } else { + /* the node is in the "middle" */ + assert(front != node && back != node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + node.getPrevLRUNode().setNextLRUNode(node.getNextLRUNode()); + node.getNextLRUNode().setPrevLRUNode(node.getPrevLRUNode()); + } + + node.setPrevLRUNode(node); + node.setNextLRUNode(front); + + front.setPrevLRUNode(node); + front = node; + } + } + + synchronized IN removeFront() { + if (front == null) { + assert(back == null); + return null; + } + + IN res = front; + + if (front == back) { + assert(front.getNextLRUNode() == front); + assert(front.getPrevLRUNode() == front); + assert(size == 1); + + front = null; + back = null; + + } else { + assert(size > 1); + + front = front.getNextLRUNode(); + front.setPrevLRUNode(front); + } + + res.setNextLRUNode(null); + res.setPrevLRUNode(null); + --size; + + return res; + } + + synchronized boolean remove(IN node) { + + /* If the node is not in the list, don't do anything */ + if (node.getNextLRUNode() == null) { + assert(node.getPrevLRUNode() == null); + return false; + } + + assert(node.getPrevLRUNode() != null); + + if (doExpensiveCheck && !contains2(node)) { + System.out.println("LRUList.remove(): list " + id + + "does not contain node " + + node.getNodeId() + + " Thread: " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + " isBIN: " + node.isBIN() + + " inPri2LRU: " + node.isInPri2LRU()); + assert(false); + } + + if (front == back) { + assert(size == 1); + assert(front == node); + assert(front.getNextLRUNode() == front); + assert(front.getPrevLRUNode() == front); + + front = null; + back = null; + + } else if (node.getPrevLRUNode() == node) { + /* node is at the front */ + assert(front == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + front = node.getNextLRUNode(); + front.setPrevLRUNode(front); + + } else if (node.getNextLRUNode() == node) { + /* the node is at the back */ + assert(back == node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + + back = node.getPrevLRUNode(); + back.setNextLRUNode(back); + } else { + /* the node is in the "middle" */ + assert(size > 2); + assert(front != back); + assert(front != node && back != node); + assert(node.getPrevLRUNode().getNextLRUNode() == node); + assert(node.getNextLRUNode().getPrevLRUNode() == node); + + node.getPrevLRUNode().setNextLRUNode(node.getNextLRUNode()); + node.getNextLRUNode().setPrevLRUNode(node.getPrevLRUNode()); + } + + node.setNextLRUNode(null); + node.setPrevLRUNode(null); + --size; + + return true; + } + + synchronized void removeINsForEnv(EnvironmentImpl env) { + + if (front == null) { + assert(back == null); + return; + } + + IN node = front; + + while (true) { + + IN nextNode = node.getNextLRUNode(); + IN prevNode = node.getPrevLRUNode(); + + if (node.getDatabase().getEnv() == env) { + + node.setNextLRUNode(null); + node.setPrevLRUNode(null); + + if (front == back) { + assert(size == 1); + assert(front == node); + assert(nextNode == front); + assert(prevNode == front); + + front = null; + back = null; + --size; + break; + + } else if (prevNode == node) { + /* node is at the front */ + assert(size > 1); + assert(front == node); + assert(nextNode.getPrevLRUNode() == node); + + front = nextNode; + front.setPrevLRUNode(front); + node = front; + --size; + + } else if (nextNode == node) { + /* the node is at the back */ + assert(size > 1); + assert(back == node); + assert(prevNode.getNextLRUNode() == node); + + back = prevNode; + back.setNextLRUNode(back); + --size; + break; + } else { + /* the node is in the "middle" */ + assert(size > 2); + assert(front != back); + assert(front != node && back != node); + assert(prevNode.getNextLRUNode() == node); + assert(nextNode.getPrevLRUNode() == node); + + prevNode.setNextLRUNode(nextNode); + nextNode.setPrevLRUNode(prevNode); + node = nextNode; + --size; + } + } else if (nextNode == node) { + break; + } else { + node = nextNode; + } + } + } + + synchronized boolean contains(IN node) { + return (node.getNextLRUNode() != null); + } + + private boolean contains2(IN node) { + + if (front == null) { + assert(back == null); + return false; + } + + IN curr = front; + + while (true) { + if (curr == node) { + return true; + } + + if (curr.getNextLRUNode() == curr) { + break; + } + + curr = curr.getNextLRUNode(); + } + + return false; + } + + synchronized List copyList() { + + if (front == null) { + assert(back == null); + return Collections.emptyList(); + } + + List list = new ArrayList<>(); + + IN curr = front; + + while (true) { + list.add(curr); + + if (curr.getNextLRUNode() == curr) { + break; + } + + curr = curr.getNextLRUNode(); + } + + return list; + } + + int getSize() { + return size; + } + + synchronized void getStats(EnvironmentImpl env, LRUDebugStats stats) { + + if (front == null) { + assert(back == null); + return; + } + + IN curr = front; + + while (true) { + if (env == null || curr.getEnv() == env) { + stats.size++; + + if (curr.getDirty()) { + stats.dirtySize++; + } + + if (curr.isBIN()) { + stats.numBINs++; + + if (curr.getDirty()) { + stats.numDirtyBINs++; + } + + if (!curr.hasCachedChildren()) { + stats.numStrippedBINs++; + + if (curr.getDirty()) { + stats.numDirtyStrippedBINs++; + } + } + } + } + + if (curr.getNextLRUNode() == curr) { + break; + } + + curr = curr.getNextLRUNode(); + } + } + } + + /** + * EnvInfo stores info related to the environments that share this evictor. + */ + private static class EnvInfo { + EnvironmentImpl env; + INList ins; + } + + /* Prevent endless eviction loops under extreme resource constraints. */ + private static final int MAX_BATCHES_PER_RUN = 100; + + private static final boolean traceUINs = false; + private static final boolean traceBINs = false; + private static final Level traceLevel = Level.INFO; + + /* LRU-TODO: remove */ + private static final boolean collectEvictionDebugStats = false; + + /** + * Number of LRULists per LRUSet. This is a configuration parameter. + * + * In general, using only one LRUList may create a synchronization + * bottleneck, because all LRUList methods are synchronized and are + * invoked with high frequency from multiple thread. To alleviate + * this bottleneck, we need the option to break a single LRUList + * into multiple ones comprising an "LRUSet" (even though this + * reduces the quality of the LRU approximation). + */ + private final int numLRULists; + + /* + * This is true when an off-heap cache is in use. If true, then the + * priority-2 LRUSet is always used for level 2 INs, and useDirtyLRUSet + * and mutateBins are both set to false. + */ + private final boolean useOffHeapCache; + + /** + * Whether to use the priority-2 LRUSet for dirty nodes or not. + * + * When useOffHeapCache is true, useDirtyLRUSet is always false. When + * useOffHeapCache is false, useDirtyLRUSet is set via a configuration + * parameter. + */ + private final boolean useDirtyLRUSet; + + /* + * Whether to allow deltas when logging a dirty BIN that is being evicted. + * This is a configuration parameter. + */ + private final boolean allowBinDeltas; + + /* + * Whether to mutate BINs to BIN deltas rather than evicting the full node. + * + * When useOffHeapCache is true, mutateBins is always false. When + * useOffHeapCache is false, mutateBins is set via a configuration + * parameter. + */ + private final boolean mutateBins; + + /* + * Access count after which we clear the DatabaseImpl cache. + * This is a configuration parameter. + */ + private int dbCacheClearCount; + + /* + * This is a configuration parameter. If true, eviction is done by a pool + * of evictor threads, as well as being done inline by application threads. + * Note: runEvictorThreads is needed as a distinct flag, rather than + * setting maxThreads to 0, because the ThreadPoolExecutor does not permit + * maxThreads to be 0. + */ + private boolean runEvictorThreads; + + /* This is a configuration parameter. */ + private int terminateMillis; + + /* The thread pool used to manage the background evictor threads. */ + private final ThreadPoolExecutor evictionPool; + + /* Flag to help shutdown launched eviction tasks. */ + private final AtomicBoolean shutdownRequested = new AtomicBoolean(false); + + private int maxPoolThreads; + private final AtomicInteger activePoolThreads = new AtomicInteger(0); + + /* + * Whether this evictor (and the memory cache) is shared by multiple + * environments + */ + private final boolean isShared; + + /* + * In case of multiple environments sharing a cache (and this Evictor), + * firstEnvImpl references the 1st EnvironmentImpl to be created with + * the shared cache. + */ + private final EnvironmentImpl firstEnvImpl; + + private final List envInfos; + + /** + * This is used only when this evictor is shared by multiple envs. It + * "points" to the next env to perform "special eviction" in. + */ + private int specialEvictionIndex = 0; + + /* + * + */ + private final Arbiter arbiter; + + /** + * With an off-heap cache configured: + * pri1LRUSet contains nodes of any type and level. A freshly cached node + * goes into this LRUSet. A level-2 node will go to the pri2LRUSet if it is + * selected for eviction from the pri1LRUSet and it contains off-heap BINs. + * A node will move from the pri2LRUSet to the pri1LRUSet when its last + * off-heap BIN is evicted from the off-heap cache. + * + * Without an off-heap cache configured: + * pri1LRUSet contains both clean and dirty nodes. A freshly cached node + * goes into this LRUSet. A dirty node will go to the pri2LRUSet if it is + * selected for eviction from the pri1LRUSet. A node will move from the + * pri2LRUSet to the pri1LRUSet when it gets logged (i.e., cleaned) by + * the checkpointer. + */ + private final LRUList[] pri1LRUSet; + private final LRUList[] pri2LRUSet; + + /** + * nextPri1LRUList is used to implement the traversal of the lists in + * the pri1LRUSet by one or more evicting threads. Such a thread will + * select for eviction the front node from the (nextPri1LRUList % + * numLRULists)-th list, and then increment nextPri1LRUList. + * nextPri2LRUList plays the same role for the priority-2 LRUSet. + */ + private int nextPri1LRUList = 0; + private int nextPri2LRUList = 0; + + /* + * The evictor is disabled during the 1st phase of recovery. The + * RecoveryManager enables the evictor after it finishes its 1st + * phase. + */ + private boolean isEnabled = false; + + /* Eviction calls cannot be recursive. */ + private ReentrancyGuard reentrancyGuard; + + private final Logger logger; + + /* + * Stats + */ + private final StatGroup stats; + + /* + * Number of eviction tasks that were submitted to the background evictor + * pool, but were refused because all eviction threads were busy. + */ + private final AtomicLongStat nThreadUnavailable; + + /* Number of evictBatch() invocations. */ + private final LongStat nEvictionRuns; + + /* + * Number of nodes selected as eviction targets. An eviction target may + * actually be evicted, or skipped, or put back to the LRU, potentially + * after partial eviction or BIN-delta mutation is done on it. + */ + private final LongStat nNodesTargeted; + + /* Number of nodes evicted. */ + private final LongStat nNodesEvicted; + + /* Number of closed database root nodes evicted. */ + private final LongStat nRootNodesEvicted; + + /* Number of dirty nodes logged and evicted. */ + private final LongStat nDirtyNodesEvicted; + + /* Number of LNs evicted. */ + private final LongStat nLNsEvicted; + + /* Number of BINs stripped. */ + private final LongStat nNodesStripped; + + /* Number of BINs mutated to deltas. */ + private final LongStat nNodesMutated; + + /* Number of target nodes put back to the LRU w/o any other action taken */ + private final LongStat nNodesPutBack; + + /* Number of target nodes skipped. */ + private final LongStat nNodesSkipped; + + /* Number of target nodes moved to the priority-2 LRU */ + private final LongStat nNodesMovedToPri2LRU; + + /* Number of bytes evicted per eviction source. */ + private final AtomicLongStat[] numBytesEvicted; + + /* + * Tree related cache hit/miss stats. A subset of the cache misses recorded + * by the log manager, in that these only record tree node hits and misses. + * Recorded by IN.fetchIN and IN.fetchLN, but grouped with evictor stats. + * Use AtomicLongStat for multithreading safety. + */ + private final AtomicLongStat nLNFetch; + + private final AtomicLongStat nLNFetchMiss; + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a UIN. + */ + private final AtomicLongStat nUpperINFetch; + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a UIN and that UIN was not already cached. + */ + private final AtomicLongStat nUpperINFetchMiss; + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN. + */ + private final AtomicLongStat nBINFetch; + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN and that BIN was not already cached. + */ + private final AtomicLongStat nBINFetchMiss; + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN, that BIN was not already cached, and a BIN-delta was + * fetched from disk. + */ + private final AtomicLongStat nBINDeltaFetchMiss; + + private final FloatStat binFetchMissRatio; + + /* + * Number of calls to BIN.mutateToFullBIN() + */ + private final AtomicLongStat nFullBINMiss; + + /* + * Number of blind operations on BIN deltas + */ + private final AtomicLongStat nBinDeltaBlindOps; + + /* Stats for IN compact array representations currently in cache. */ + private final AtomicLong nINSparseTarget; + private final AtomicLong nINNoTarget; + private final AtomicLong nINCompactKey; + + /* Number of envs sharing the cache. */ + private final IntStat sharedCacheEnvs; + + /* Debugging and unit test support. */ + + /* + * Number of consecutive "no-eviction" events (i.e. when evictBatch() + * returns 0). It is incremented at each "no-eviction" event and reset + * to 0 when eviction does occur. It is used to determine whether to + * log a WARNING for a "no-eviction" event: only 1 warning is logged + * per sequence of consecutive "no-eviction" events (to avoid flooding + * the logger files). + */ + private int numNoEvictionEvents = 0; + + private TestHook preEvictINHook; + private TestHook evictProfile; + + public Evictor(EnvironmentImpl envImpl) + throws DatabaseException { + + isShared = envImpl.getSharedCache(); + + firstEnvImpl = envImpl; + + /* Do the stats definitions. */ + stats = new StatGroup(GROUP_NAME, GROUP_DESC); + + nEvictionRuns = new LongStat(stats, EVICTOR_EVICTION_RUNS); + + nNodesTargeted = new LongStat(stats, EVICTOR_NODES_TARGETED); + nNodesEvicted = new LongStat(stats, EVICTOR_NODES_EVICTED); + nRootNodesEvicted = new LongStat(stats, EVICTOR_ROOT_NODES_EVICTED); + nDirtyNodesEvicted = new LongStat(stats, EVICTOR_DIRTY_NODES_EVICTED); + nLNsEvicted = new LongStat(stats, EVICTOR_LNS_EVICTED); + nNodesStripped = new LongStat(stats, EVICTOR_NODES_STRIPPED); + nNodesMutated = new LongStat(stats, EVICTOR_NODES_MUTATED); + nNodesPutBack = new LongStat(stats, EVICTOR_NODES_PUT_BACK); + nNodesSkipped = new LongStat(stats, EVICTOR_NODES_SKIPPED); + nNodesMovedToPri2LRU = new LongStat( + stats, EVICTOR_NODES_MOVED_TO_PRI2_LRU); + + nLNFetch = new AtomicLongStat(stats, LN_FETCH); + nBINFetch = new AtomicLongStat(stats, BIN_FETCH); + nUpperINFetch = new AtomicLongStat(stats, UPPER_IN_FETCH); + nLNFetchMiss = new AtomicLongStat(stats, LN_FETCH_MISS); + nBINFetchMiss = new AtomicLongStat(stats, BIN_FETCH_MISS); + nBINDeltaFetchMiss = new AtomicLongStat(stats, BIN_DELTA_FETCH_MISS); + nUpperINFetchMiss = new AtomicLongStat(stats, UPPER_IN_FETCH_MISS); + nFullBINMiss = new AtomicLongStat(stats, FULL_BIN_MISS); + nBinDeltaBlindOps = new AtomicLongStat(stats, BIN_DELTA_BLIND_OPS); + binFetchMissRatio = new FloatStat(stats, BIN_FETCH_MISS_RATIO); + + nThreadUnavailable = new AtomicLongStat(stats, THREAD_UNAVAILABLE); + + nINSparseTarget = new AtomicLong(0); + nINNoTarget = new AtomicLong(0); + nINCompactKey = new AtomicLong(0); + + sharedCacheEnvs = new IntStat(stats, EVICTOR_SHARED_CACHE_ENVS); + + EnumSet allSources = + EnumSet.allOf(EvictionSource.class); + + int numSources = allSources.size(); + + numBytesEvicted = new AtomicLongStat[numSources]; + + for (EvictionSource source : allSources) { + + int index = source.ordinal(); + + numBytesEvicted[index] = new AtomicLongStat( + stats, source.getNumBytesEvictedStatDef()); + } + + arbiter = new Arbiter(firstEnvImpl); + + logger = LoggerUtils.getLogger(getClass()); + reentrancyGuard = new ReentrancyGuard(firstEnvImpl, logger); + + DbConfigManager configManager = firstEnvImpl.getConfigManager(); + + int corePoolSize = configManager.getInt( + EnvironmentParams.EVICTOR_CORE_THREADS); + maxPoolThreads = configManager.getInt( + EnvironmentParams.EVICTOR_MAX_THREADS); + long keepAliveTime = configManager.getDuration( + EnvironmentParams.EVICTOR_KEEP_ALIVE); + terminateMillis = configManager.getDuration( + EnvironmentParams.EVICTOR_TERMINATE_TIMEOUT); + dbCacheClearCount = configManager.getInt( + EnvironmentParams.ENV_DB_CACHE_CLEAR_COUNT); + numLRULists = configManager.getInt( + EnvironmentParams.EVICTOR_N_LRU_LISTS); + + pri1LRUSet = new LRUList[numLRULists]; + pri2LRUSet = new LRUList[numLRULists]; + + for (int i = 0; i < numLRULists; ++i) { + pri1LRUSet[i] = new LRUList(i); + pri2LRUSet[i] = new LRUList(numLRULists + i); + } + + if (isShared) { + envInfos = new ArrayList(); + } else { + envInfos = null; + } + + if (configManager.getLong(EnvironmentParams.MAX_OFF_HEAP_MEMORY) > 0) { + mutateBins = false; + useDirtyLRUSet = false; + useOffHeapCache = true; + + } else { + mutateBins = configManager.getBoolean( + EnvironmentParams.EVICTOR_MUTATE_BINS); + + useDirtyLRUSet = configManager.getBoolean( + EnvironmentParams.EVICTOR_USE_DIRTY_LRU); + + useOffHeapCache = false; + } + + RejectedExecutionHandler rejectHandler = new RejectEvictHandler( + nThreadUnavailable); + + evictionPool = new ThreadPoolExecutor( + corePoolSize, maxPoolThreads, keepAliveTime, + TimeUnit.MILLISECONDS, + new ArrayBlockingQueue(1), + new StoppableThreadFactory( + isShared ? null : envImpl, "JEEvictor", logger), + rejectHandler); + + allowBinDeltas = configManager.getBoolean( + EnvironmentParams.EVICTOR_ALLOW_BIN_DELTAS); + + runEvictorThreads = configManager.getBoolean( + EnvironmentParams.ENV_RUN_EVICTOR); + + /* + * Request notification of mutable property changes. Do this after all + * fields in the evictor have been initialized, in case this is called + * quite soon. + */ + firstEnvImpl.addConfigObserver(this); + } + + /** + * Respond to config updates. + */ + @Override + public void envConfigUpdate( + DbConfigManager configManager, + EnvironmentMutableConfig ignore) + throws DatabaseException { + + int corePoolSize = configManager.getInt( + EnvironmentParams.EVICTOR_CORE_THREADS); + maxPoolThreads = configManager.getInt( + EnvironmentParams.EVICTOR_MAX_THREADS); + long keepAliveTime = configManager.getDuration( + EnvironmentParams.EVICTOR_KEEP_ALIVE); + terminateMillis = configManager.getDuration( + EnvironmentParams.EVICTOR_TERMINATE_TIMEOUT); + dbCacheClearCount = configManager.getInt( + EnvironmentParams.ENV_DB_CACHE_CLEAR_COUNT); + + evictionPool.setCorePoolSize(corePoolSize); + evictionPool.setMaximumPoolSize(maxPoolThreads); + evictionPool.setKeepAliveTime(keepAliveTime, TimeUnit.MILLISECONDS); + + runEvictorThreads = configManager.getBoolean( + EnvironmentParams.ENV_RUN_EVICTOR); + } + + public void setEnabled(boolean v) { + isEnabled = v; + } + + public ThreadPoolExecutor getThreadPool() { + return evictionPool; + } + + /** + * Request and wait for a shutdown of all running eviction tasks. + */ + public void shutdown() { + + /* + * Set the shutdown flag so that outstanding eviction tasks end + * early. The call to evictionPool.shutdown is a ThreadPoolExecutor + * call, and is an orderly shutdown that waits for and in flight tasks + * to end. + */ + shutdownRequested.set(true); + evictionPool.shutdown(); + + /* + * AwaitTermination will wait for the timeout period, or will be + * interrupted, but we don't really care which it is. The evictor + * shouldn't be interrupted, but if it is, something urgent is + * happening. + */ + boolean shutdownFinished = false; + try { + shutdownFinished = + evictionPool.awaitTermination(terminateMillis, + TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + /* We've been interrupted, just give up and end. */ + } finally { + if (!shutdownFinished) { + evictionPool.shutdownNow(); + } + } + } + + public void requestShutdown() { + shutdownRequested.set(true); + evictionPool.shutdown(); + } + + public synchronized void addEnvironment(EnvironmentImpl env) { + + if (isShared) { + int numEnvs = envInfos.size(); + for (int i = 0; i < numEnvs; i += 1) { + EnvInfo info = envInfos.get(i); + if (info.env == env) { + return; + } + } + + EnvInfo info = new EnvInfo(); + info.env = env; + info.ins = env.getInMemoryINs(); + envInfos.add(info); + } else { + throw EnvironmentFailureException.unexpectedState(); + } + } + + public synchronized void removeSharedCacheEnv(EnvironmentImpl env) { + if (!isShared) { + throw EnvironmentFailureException.unexpectedState(); + } + + int numEnvs = envInfos.size(); + for (int i = 0; i < numEnvs; i += 1) { + EnvInfo info = envInfos.get(i); + + if (info.env == env) { + + try { + for (int j = 0; j < numLRULists; ++j) { + pri1LRUSet[j].removeINsForEnv(env); + pri2LRUSet[j].removeINsForEnv(env); + } + } catch (AssertionError e) { + System.out.println("YYYYYYYYYY " + e); + e.printStackTrace(System.out); + throw e; + } + + envInfos.remove(i); + return; + } + } + } + + public synchronized boolean checkEnv(EnvironmentImpl env) { + if (isShared) { + int numEnvs = envInfos.size(); + for (int i = 0; i < numEnvs; i += 1) { + EnvInfo info = envInfos.get(i); + if (env == info.env) { + return true; + } + } + + return false; + + } else { + throw EnvironmentFailureException.unexpectedState(); + } + } + + /** + * Add the node to the back of the priority-1 LRUSet. The node is either + * EX-latched already or is inaccessible from other threads. + */ + public void addBack(IN node) { + + if (isEnabled && node.getEnv().getInMemoryINs().isEnabled()) { + + assert(node.getInListResident()); + + node.setInPri2LRU(false); + pri1LRUSet[(int)(node.getNodeId() % numLRULists)].addBack(node); + } + } + + /** + * Add the node to the front of the priority-1 LRUSet. The node is either + * EX-latched already or is inaccessible from other threads. + */ + public void addFront(IN node) { + + if (isEnabled && node.getEnv().getInMemoryINs().isEnabled()) { + + assert(node.getInListResident()); + + node.setInPri2LRU(false); + pri1LRUSet[(int)(node.getNodeId() % numLRULists)].addFront(node); + } + } + + /* + * Add the node to the back of the priority-2 LRUSet. + */ + private void pri2AddBack(IN node) { + + assert(node.isLatchExclusiveOwner()); + assert(node.getInListResident()); + + node.setInPri2LRU(true); + pri2LRUSet[(int)(node.getNodeId() % numLRULists)].addBack(node); + } + + /* + * Add the node to the front of the priority-2 LRUSet. + */ + private void pri2AddFront(IN node) { + + assert(node.isLatchExclusiveOwner()); + assert(node.getInListResident()); + + node.setInPri2LRU(true); + pri2LRUSet[(int)(node.getNodeId() % numLRULists)].addFront(node); + } + + /** + * Move the node to the back of its containing LRUList, if any. + */ + public void moveBack(IN node) { + + assert(node.isLatchOwner()); + + if (node.isInPri2LRU()) { + pri2LRUSet[(int)(node.getNodeId() % numLRULists)].moveBack(node); + } else { + pri1LRUSet[(int)(node.getNodeId() % numLRULists)].moveBack(node); + } + } + + /** + * Move the node to the front of its containing LRUList, if any. + */ + public void moveFront(IN node) { + + assert(node.isLatchOwner()); + + if (node.isInPri2LRU()) { + pri2LRUSet[(int)(node.getNodeId() % numLRULists)].moveFront(node); + } else { + pri1LRUSet[(int)(node.getNodeId() % numLRULists)].moveFront(node); + } + } + + /** + * Remove a node from its current LRUList, if any. + */ + public void remove(IN node) { + + assert(node.isLatchOwner()); + + int listId = (int)(node.getNodeId() % numLRULists); + + if (node.isInPri2LRU()) { + pri2LRUSet[listId].remove(node); + } else { + pri1LRUSet[listId].remove(node); + } + } + + /** + * Move the node from the priority-2 LRUSet to the priority-1 LRUSet, if + * the node is indeed in the priority-2 LRUSet. + */ + public void moveToPri1LRU(IN node) { + + assert(node.isLatchExclusiveOwner()); + + if (!node.isInPri2LRU()) { + return; + } + + int listId = (int)(node.getNodeId() % numLRULists); + + if (pri2LRUSet[listId].remove(node)) { + assert(node.getInListResident()); + node.setInPri2LRU(false); + pri1LRUSet[listId].addBack(node); + } + } + + public boolean contains(IN node) { + + assert(node.isLatchOwner()); + + int listId = (int)(node.getNodeId() % numLRULists); + + if (node.isInPri2LRU()) { + return pri2LRUSet[listId].contains(node); + } + return pri1LRUSet[listId].contains(node); + } + + public boolean getUseDirtyLRUSet() { + return useDirtyLRUSet; + } + + long getPri1LRUSize() { + long size = 0; + for (int i = 0; i < numLRULists; ++i) { + size += pri1LRUSet[i].getSize(); + } + + return size; + } + + long getPri2LRUSize() { + long size = 0; + for (int i = 0; i < numLRULists; ++i) { + size += pri2LRUSet[i].getSize(); + } + + return size; + } + + void getPri1LRUStats(EnvironmentImpl env, LRUDebugStats stats) { + stats.reset(); + for (int i = 0; i < numLRULists; ++i) { + pri1LRUSet[i].getStats(env, stats); + } + } + + void getPri2LRUStats(EnvironmentImpl env, LRUDebugStats stats) { + stats.reset(); + for (int i = 0; i < numLRULists; ++i) { + pri2LRUSet[i].getStats(env, stats); + } + } + + /** + * This method is called from application threads for every cursor + * operation. + */ + public void doCriticalEviction(boolean backgroundIO) { + + if (arbiter.isOverBudget()) { + + /* + * Any time there's excessive cache usage, let the thread pool know + * there's work to do. + */ + alert(); + + /* + * Only do eviction if the memory budget overage fulfills the + * critical eviction requirements. We want to avoid having + * application thread do eviction. + */ + if (arbiter.needCriticalEviction()) { + doEvict(EvictionSource.CRITICAL, backgroundIO); + } + } + } + + /** + * This method is called from daemon threads for every operation. + */ + public void doDaemonEviction(boolean backgroundIO) { + + if (arbiter.isOverBudget()) { + + /* + * Any time there's excessive cache usage, let the thread pool know + * there's work to do. + */ + alert(); + + /* + * Only do eviction if the memory budget overage fulfills the + * critical eviction requirements. This allows evictor threads to + * take the burden of eviction whenever possible, rather than + * slowing other threads and risking a growing cleaner or + * compressor backlog. + */ + if (arbiter.needCriticalEviction()) { + doEvict(EvictionSource.DAEMON, backgroundIO); + } + } + } + + /* + * Eviction invoked by the API + */ + public void doManualEvict() + throws DatabaseException { + + doEvict(EvictionSource.MANUAL, true/*backgroundIO*/); + } + + /** + * Evict a specific IN, used by tests. + */ + public long doTestEvict(IN target, EvictionSource source) { + return doEvictOneIN( + target, + source == EvictionSource.CACHEMODE ? CacheMode.EVICT_BIN : null, + source); + } + + /** + * Evict a specific IN, used by cache modes. + */ + public long doCacheModeEvict(IN target, CacheMode cacheMode) { + return doEvictOneIN(target, cacheMode, EvictionSource.CACHEMODE); + } + + private long doEvictOneIN(IN target, + CacheMode cacheMode, + EvictionSource source) { + assert(target.isBIN()); + assert(target.isLatchOwner()); + + /* + * If a dirty BIN is being evicted via a cache mode and an off-heap + * cache is not used, do not evict the node since it would be + * logged. When an off-heap cache is used, we can evict dirty nodes + * without logging them. + */ + if (source == EvictionSource.CACHEMODE && + target.getDirty() && + !useOffHeapCache) { + + try { + long evictedBytes = 0; + if (cacheMode == CacheMode.EVICT_BIN) { + evictedBytes = target.partialEviction(); + evictedBytes &= ~IN.NON_EVICTABLE_IN; + if (evictedBytes > 0) { + nNodesStripped.increment(); + numBytesEvicted[source.ordinal()].add(evictedBytes); + } + } + return evictedBytes; + } finally { + target.releaseLatch(); + } + } + + if (!reentrancyGuard.enter()) { + return 0; + } + + try { + remove(target); + + target.releaseLatch(); + + final long evictedBytes = processTarget( + null /* rootEvictor */, target, null /* parent */, + -1 /* entry index within parent */, + false /* backgroundIO */, source, null /* debug stats */); + + numBytesEvicted[source.ordinal()].add(evictedBytes); + + return evictedBytes; + + } finally { + reentrancyGuard.leave(); + } + } + + /** + * Let the eviction pool know there's work to do. + */ + public void alert() { + + if (!runEvictorThreads) { + return; + } + + /* + * For a private evictor/cache, we can prevent background eviction + * during recovery here. For a shared cache, we must do it on a + * per-target basis, in evictBatch(). + */ + if (!isShared && firstEnvImpl.isInInit()) { + return; + } + + /* + * This check is meant to avoid the lock taken by + * ArrayBlockingQueue.offer() when this is futile. The lock reduces + * concurrency because this method is called so frequently. + */ + if (activePoolThreads.get() >= maxPoolThreads) { + return; + } + + evictionPool.execute(new BackgroundEvictTask(this)); + } + + /** + * This is where the real work is done. + * Can execute concurrently, called by app threads or by background evictor + */ + void doEvict(EvictionSource source, boolean backgroundIO) + throws DatabaseException { + + if (!isEnabled) { + return; + } + + if (!reentrancyGuard.enter()) { + return; + } + + nEvictionRuns.increment(); + + try { + + /* + * Repeat as necessary to keep up with allocations. Stop if no + * progress is made, to prevent an infinite loop. + */ + boolean progress = true; + int nBatches = 0; + long bytesEvicted = 0; + + EvictionDebugStats evictionStats = null; + if (collectEvictionDebugStats) { + evictionStats = new EvictionDebugStats(); + evictionStats.reset(); + evictionStats.pri1Size = getPri1LRUSize(); + evictionStats.pri2Size = getPri2LRUSize(); + } + + while (progress && + nBatches < MAX_BATCHES_PER_RUN && + !shutdownRequested.get()) { + + /* + * Do eviction only if memory consumption is over budget. + * If so, try to evict (memoryConsumption + M - maxMemory) + * bytes, where M is a config param. + */ + long maxEvictBytes = arbiter.getEvictionPledge(); + + if (maxEvictBytes == 0) { + break; + } + + bytesEvicted = evictBatch( + source, backgroundIO, maxEvictBytes, evictionStats); + + numBytesEvicted[source.ordinal()].add(bytesEvicted); + + if (bytesEvicted == 0) { + + if (arbiter.stillNeedsEviction() && + numNoEvictionEvents == 0 && + logger.isLoggable(Level.FINE)) { + ++numNoEvictionEvents; + LoggerUtils.fine( + logger, firstEnvImpl, + "Eviction pass failed to evict any bytes"); + } else { + ++numNoEvictionEvents; + } + + progress = false; + } else { + numNoEvictionEvents = 0; + } + + nBatches += 1; + } + + if (evictionStats != null) { + System.out.println(evictionStats.toString()); + } + + /* For debugging. */ + if (source == EvictionSource.EVICTORTHREAD) { + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, firstEnvImpl, + "Thread evicted " + bytesEvicted + + " bytes in " + nBatches + " batches"); + } + } + } finally { + reentrancyGuard.leave(); + } + } + + /** + * Not private because it is used in unit test. + */ + long evictBatch( + Evictor.EvictionSource source, + boolean bgIO, + long maxEvictBytes, + EvictionDebugStats evictionStats) + throws DatabaseException { + + long totalEvictedBytes = 0; + boolean inPri1LRUSet = true; + int numNodesScannedThisBatch = 0; + long maxNodesScannedThisBatch = getPri1LRUSize(); + maxNodesScannedThisBatch += numLRULists; + + assert TestHookExecute.doHookSetupIfSet(evictProfile); + + /* + * Perform special eviction,i.e., evict non-tree memory. + * + * TODO: special eviction is done serially. We may want to absolve + * application threads of that responsibility, to avoid blocking, and + * only have evictor threads do special eviction. + */ + synchronized (this) { + if (isShared) { + int numEnvs = envInfos.size(); + if (numEnvs > 0) { + if (specialEvictionIndex >= numEnvs) { + specialEvictionIndex = 0; + } + EnvInfo info = envInfos.get(specialEvictionIndex); + specialEvictionIndex++; + + totalEvictedBytes = info.env.specialEviction(); + } + } else { + totalEvictedBytes = firstEnvImpl.specialEviction(); + } + } + + /* Use local caching to reduce DbTree.getDb overhead. [#21330] */ + final DbCache dbCache = new DbCache(isShared, dbCacheClearCount); + final MemoryBudget memBudget = firstEnvImpl.getMemoryBudget(); + + try { + while (totalEvictedBytes < maxEvictBytes && + numNodesScannedThisBatch < maxNodesScannedThisBatch && + arbiter.stillNeedsEviction()) { + + if (!isShared && !memBudget.isTreeUsageAboveMinimum()) { + break; + } + + final IN target = getNextTarget(inPri1LRUSet); + + numNodesScannedThisBatch++; + + if (target != null) { + + nNodesTargeted.increment(); + + if (evictionStats != null) { + evictionStats.incNumSelected(); + } + + assert TestHookExecute.doHookIfSet(evictProfile, target); + + final DatabaseImpl targetDb = target.getDatabase(); + final EnvironmentImpl dbEnv = targetDb.getEnv(); + + /* + * Check to make sure the target's DB was not deleted or + * truncated after selecting the target. Furthermore, + * prevent the DB from being deleted while we're working + * with it (this is done by the dbCache.getDb() call). + * + * Also check that the refreshedDb is the same instance + * as the targetDb. If not, then the MapLN associated with + * targetDb was recently evicted (which can happen after + * all handles to the DB are closed). In this case, + * targetDb and its INs are orphaned and cannot be + * processed; they should simply be removed from the + * LRU [#21686] + */ + final DatabaseImpl refreshedDb = + dbCache.getDb(dbEnv, targetDb.getId()); + + if (refreshedDb != null && + !refreshedDb.isDeleted() && + refreshedDb == targetDb) { + + long evictedBytes = 0; + + if (target.isRoot()) { + RootEvictor rootEvictor = new RootEvictor(); + rootEvictor.target = target; + rootEvictor.backgroundIO = bgIO; + rootEvictor.source = source; + rootEvictor.stats = evictionStats; + + /* try to evict the root */ + targetDb.getTree().withRootLatchedExclusive( + rootEvictor); + + /* + * If the root IN was flushed, write the dirtied + * MapLN. + */ + if (rootEvictor.flushed) { + dbEnv.getDbTree().modifyDbRoot(targetDb); + } + + evictedBytes = rootEvictor.evictedBytes; + + } else { + evictedBytes = processTarget( + null, /* rootEvictor */ + target, null, /* parent */ + -1, /* parent entry index */ + bgIO, source, evictionStats); + } + + totalEvictedBytes += evictedBytes; + + } else { + /* + * We don't expect to find in the INList an IN whose + * database that has finished delete processing, + * because it should have been removed from the + * INList during post-delete cleanup. + */ + if (targetDb.isDeleteFinished() && + target.getInListResident()) { + final String inInfo = + " IN type=" + target.getLogType() + " id=" + + target.getNodeId() + " not expected on INList"; + final String errMsg = (refreshedDb == null) ? + inInfo : + ("Database " + refreshedDb.getDebugName() + + " id=" + refreshedDb.getId() + " rootLsn=" + + DbLsn.getNoFormatString + (refreshedDb.getTree().getRootLsn()) + + ' ' + inInfo); + + throw EnvironmentFailureException. + unexpectedState(errMsg); + } + } + } + + /* + * Move to the priority-2 LRUSet, if we are done processing the + * priority-1 LRUSet. + */ + if (numNodesScannedThisBatch >= maxNodesScannedThisBatch && + totalEvictedBytes < maxEvictBytes && + inPri1LRUSet) { + + numNodesScannedThisBatch = 0; + maxNodesScannedThisBatch = getPri2LRUSize(); + maxNodesScannedThisBatch += numLRULists; + inPri1LRUSet = false; + + if (evictionStats != null) { + evictionStats.inPri1LRU = false; + } + } + } + } finally { + dbCache.releaseDbs(firstEnvImpl); + } + + return totalEvictedBytes; + } + + /** + * Returns a copy of the LRU list, for tightly controlled testing. + * Requires that there is exactly one LRU list configured. + */ + public List getPri1LRUList() { + assert pri1LRUSet.length == 1; + return pri1LRUSet[0].copyList(); + } + + private IN getNextTarget(boolean inPri1LRUSet) { + + if (inPri1LRUSet) { + int listId = Math.abs(nextPri1LRUList++) % numLRULists; + IN target = pri1LRUSet[listId].removeFront(); + + if (target != null && + ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN()))) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX priority-1 Eviction target: " + + target.getNodeId()); + } + + return target; + } + + int listId = Math.abs(nextPri2LRUList++) % numLRULists; + IN target = pri2LRUSet[listId].removeFront(); + + if (target != null && + ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN()))) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX Pri2 Eviction target: " + target.getNodeId()); + } + + return target; + } + + class RootEvictor implements WithRootLatched { + + IN target; + boolean backgroundIO; + EvictionSource source; + EvictionDebugStats stats = null; + + ChildReference rootRef; + boolean flushed = false; + long evictedBytes = 0; + + public IN doWork(ChildReference root) + throws DatabaseException { + + /* + * Do not call fetchTarget since this root or DB should be + * resident already if it is to be the target of eviction. If + * it is not present, it has been evicted by another thread and + * should not be fetched for two reasons: 1) this would be + * counterproductive, 2) to guard against bringing in a root + * for an evicted DB. + */ + IN rootIN = (IN) root.getTarget(); + if (rootIN == null) { + return null; + } + + rootRef = root; + + /* + * Latch the target and re-check that all conditions still hold. + * The latch on the target will be released by processTarget(). + */ + rootIN.latchNoUpdateLRU(); + + if (rootIN == target && rootIN.isRoot()) { + evictedBytes = processTarget( + this, null, /* target */ + null, /* parent */ + -1, /* entry index within parent */ + backgroundIO, source, stats); + } else { + rootIN.releaseLatch(); + } + + return null; + } + } + + /** + * Decide what to do with an eviction target and carry out the decision. + * Return the number of bytes evicted (if any). + * + * This method is called from evictBatch() after an IN has been selected + * for eviction. It EX-latches the IN and determines whether it can/should + * really be evicted, and if not what is the appropriate action to be + * taken by the evicting thread. + * + * If a decision is taken to evict the target or mutate it to a BINDelta, + * the target must first be unlatched and its parent must be searched + * within the tree. During this search, many things can happen to the + * unlatched target, and as a result, after the parent is found and the + * target is relatched, processTarget() calls itself recursively to + * re-consider all the possible actions on the target. + */ + private long processTarget( + RootEvictor rootEvictor, + IN target, + IN parent, + int index, + boolean bgIO, + EvictionSource source, + EvictionDebugStats stats) + throws DatabaseException { + + boolean targetIsLatched = false; + boolean parentIsLatched = false; + long evictedBytes = 0; + + if (stats != null) { + stats.withParent = (parent != null || rootEvictor != null); + } + + try { + if (parent != null) { + assert(parent.isLatchExclusiveOwner()); + parentIsLatched = true; + + if (target != parent.getTarget(index)) { + skip(target, stats); + return 0; + } + + target.latchNoUpdateLRU(); + + } else if (rootEvictor != null) { + target = rootEvictor.target; + + } else { + target.latchNoUpdateLRU(); + } + + targetIsLatched = true; + + DatabaseImpl db = target.getDatabase(); + EnvironmentImpl dbEnv = db.getEnv(); + + if (!target.getInListResident() || contains(target)) { + /* + * The node was put back to the LRU, and then possibly evicted + * by other threads before this thread could latch it. + */ + skip(target, stats); + return 0; + } + + /* + * Normally, UINs that have cached children are not in the LRU, + * and as a result, cannot be selected for eviction. However, a + * childless UIN may be selected for eviction and then acquire + * cached children in the time after its removal from its LRUSet + * and before it is EX-latched by the evicting thread. + */ + if (target.isUpperIN() && target.hasCachedChildrenFlag()) { + assert(target.hasCachedChildren()); + skip(target, stats); + return 0; + } + + /* + * Disallow eviction of the mapping and naming DB roots, because + * their eviction and re-fetching is a special case that is not + * worth supporting. [#13415] + */ + if (target.isRoot()) { + DatabaseId dbId = db.getId(); + if (dbId.equals(DbTree.ID_DB_ID) || + dbId.equals(DbTree.NAME_DB_ID)) { + skip(target, stats); + return 0; + } + } + + /* + * For a shared cache, we must prevent background eviction during + * recovery here, on a per-target basis. For a private + * evictor/cache, we can do it in alert(). + */ + if (isShared && dbEnv.isInInit() && + source == EvictionSource.EVICTORTHREAD) { + putBack(target, stats, 0); + return 0; + } + + assert !(dbEnv.isInInit() && + source == EvictionSource.EVICTORTHREAD); + + if (isShared) { + + if (dbEnv.isClosed() || dbEnv.wasInvalidated()) { + skip(target, stats); + return 0; + } + + if (!dbEnv.getMemoryBudget().isTreeUsageAboveMinimum()) { + putBack(target, stats, 1); + return 0; + } + } + + if (target.isPinned()) { + putBack(target, stats, 2); + return 0; + } + + /* + * Attempt partial eviction. The partialEviction() method also + * determines whether the IN in evictable or not. For now, + * partialEviction() will consider a node to be non-evictable if + * it is a BIN that (a) has cursors registered on it, or (b) has + * a resident non-evictable LN, which can happen only for MapLNs + * (see MapLN.isEvictable()). + */ + evictedBytes = target.partialEviction(); + + boolean isEvictable = (evictedBytes & IN.NON_EVICTABLE_IN) == 0; + evictedBytes &= ~IN.NON_EVICTABLE_IN; + + /* + * If we could evict some bytes from this node, put it back in + * the LRU, unless it is a BIN being explicitly evicted via a cache + * mode, in which case we should evict it, if possible. + */ + if (evictedBytes > 0 && + (target.isUpperIN() || source != EvictionSource.CACHEMODE)) { + strippedPutBack(target, stats); + return evictedBytes; + } + + /* + * If the node is not evictable, put it back. + * + * TODO: Logically this check should come after BIN mutation, not + * before, but currently this would have little or no impact. + */ + if (!isEvictable) { + putBack(target, stats, 5); + return evictedBytes; + } + + /* + * Give the node a second chance, if it is a full BIN that can be + * mutated to a BINDelta and it is not a BIN being explicitly + * evicted via a cache mode. + */ + if (target.isBIN() && + source != EvictionSource.CACHEMODE && + mutateBins && + ((BIN)target).canMutateToBINDelta()) { + + BIN bin = (BIN)target; + evictedBytes += bin.mutateToBINDelta(); + assert(evictedBytes > 0); + binDeltaPutBack(target, stats); + + return evictedBytes; + } + + /* + * Give the node a second chance, if it is dirty and is not in the + * priority-2 LRUSet already. + */ + if (useDirtyLRUSet && + target.getDirty() && + !target.isInPri2LRU()) { + + moveToPri2LRU(target, stats); + return evictedBytes; + } + + /* + * Give the node a second chance, if it has off-heap BIN children + * and is not in the priority-2 LRUSet already. + */ + if (useOffHeapCache && + target.hasOffHeapBINIds() && + !target.isInPri2LRU()) { + + moveToPri2LRU(target, stats); + return evictedBytes; + } + + /* + * Evict the node. To do so, we must find and latch the + * parent IN first, if we have not done this already. + */ + if (rootEvictor != null) { + evictedBytes += evictRoot(rootEvictor, bgIO, source, stats); + + } else if (parent != null) { + evictedBytes += evict(target, parent, index, bgIO, stats); + + } else { + assert TestHookExecute.doHookIfSet(preEvictINHook); + targetIsLatched = false; + evictedBytes += findParentAndRetry( + target, bgIO, source, stats); + } + + return evictedBytes; + + } finally { + if (targetIsLatched) { + target.releaseLatch(); + } + + if (parentIsLatched) { + parent.releaseLatch(); + } + } + } + + private void skip(IN target, EvictionDebugStats stats) { + + if ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN())) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX SKIPPED Eviction Target: " + + target.getNodeId()); + } + + nNodesSkipped.increment(); + } + + private void putBack(IN target, EvictionDebugStats stats, int caller) { + + if ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN())) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX PUT-BACK-" + caller + " Eviction Target: " + + target.getNodeId()); + } + + if (target.isInPri2LRU()) { + pri2AddBack(target); + } else { + addBack(target); + } + + if (stats != null) { + stats.incNumPutBack(); + } + + nNodesPutBack.increment(); + } + + private void strippedPutBack(IN target, EvictionDebugStats stats) { + + if ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN())) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX STRIPPED Eviction Target: " + + target.getNodeId()); + } + + if (target.isInPri2LRU()) { + pri2AddBack(target); + } else { + addBack(target); + } + + if (stats != null) { + stats.incNumStripped(); + } + + nNodesStripped.increment(); + } + + private void binDeltaPutBack(IN target, EvictionDebugStats stats) { + + if ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN())) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX MUTATED Eviction Target: " + + target.getNodeId()); + } + + if (target.isInPri2LRU()) { + pri2AddBack(target); + } else { + addBack(target); + } + + if (stats != null) { + stats.incNumMutated(); + } + + nNodesMutated.increment(); + } + + private void moveToPri2LRU(IN target, EvictionDebugStats stats) { + + if ((traceUINs && target.isUpperIN()) || + (traceBINs && target.isBIN())) { + LoggerUtils.envLogMsg( + traceLevel, target.getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + target.getEnv().getName() + + " XXXX MOVED-TO_PRI2 Eviction Target: " + + target.getNodeId()); + } + + if (stats != null) { + stats.incNumMoved(target.isBIN()); + } + + pri2AddFront(target); + + nNodesMovedToPri2LRU.increment(); + } + + private long findParentAndRetry( + IN target, + boolean backgroundIO, + EvictionSource source, + EvictionDebugStats stats) { + + Tree tree = target.getDatabase().getTree(); + + /* + * Pass false for doFetch to avoid fetching a full BIN when a + * delta is in cache. This also avoids a fetch when the node + * was evicted while unlatched, but that should be very rare. + */ + SearchResult result = tree.getParentINForChildIN( + target, false, /*useTargetLevel*/ + false, /*doFetch*/ CacheMode.UNCHANGED); + + if (result.exactParentFound) { + return processTarget(null, /* rootEvictor */ + target, + result.parent, + result.index, + backgroundIO, + source, + stats); + } + + /* + * The target has been detached from the tree and it should stay + * out of the LRU. It should not be in the INList, because whenever + * we detach a node we remove it from the INList, but in case we + * forgot to do this somewhere, we can just remove it here. + */ + assert(result.parent == null); + + target.latchNoUpdateLRU(); + + try { + if (target.getInListResident()) { + + firstEnvImpl.getInMemoryINs().remove(target); + + throw EnvironmentFailureException.unexpectedState( + "Node " + target.getNodeId() + + " has been detached from the in-memory tree," + + " but it is still in the INList. lastLogged=" + + DbLsn.getNoFormatString(target.getLastLoggedLsn())); + } + } finally { + target.releaseLatch(); + } + + return 0; + } + + private long evict( + IN target, + IN parent, + int index, + boolean backgroundIO, + EvictionDebugStats stats) { + + final DatabaseImpl db = target.getDatabase(); + final EnvironmentImpl dbEnv = db.getEnv(); + final OffHeapCache ohCache = dbEnv.getOffHeapCache(); + + //System.out.println("Evicting BIN " + target.getNodeId()); + + boolean storedOffHeap = false; + + if (useOffHeapCache && target.isBIN()) { + storedOffHeap = ohCache.storeEvictedBIN( + (BIN) target, parent, index); + } + + if (target.getNormalizedLevel() == 2) { + if (!ohCache.flushAndDiscardBINChildren(target, backgroundIO)) { + /* Could not log a dirty BIN. See below. */ + skip(target, stats); + return 0; + } + } + + boolean logged = false; + long loggedLsn = DbLsn.NULL_LSN; + + if (target.getDirty() && !storedOffHeap) { + /* + * Cannot evict dirty nodes in a read-only environment, or when a + * disk limit has been exceeded. We can assume that the cache will + * not overflow with dirty nodes because writes are prohibited. + */ + if (dbEnv.isReadOnly() || dbEnv.getDiskLimitViolation() != null) { + skip(target, stats); + return 0; + } + + Provisional provisional = dbEnv.coordinateWithCheckpoint( + db, target.getLevel(), parent); + + loggedLsn = target.log( + allowBinDeltas, provisional, backgroundIO, parent); + + logged = true; + } + + long evictedBytes = target.getBudgetedMemorySize(); + + parent.detachNode(index, logged /*updateLsn*/, loggedLsn); + + nNodesEvicted.increment(); + + if (logged) { + nDirtyNodesEvicted.increment(); + } + + if (stats != null) { + stats.incNumEvicted(target.isBIN()); + } + + return evictedBytes; + } + + private long evictRoot( + RootEvictor rootEvictor, + boolean backgroundIO, + EvictionSource source, + EvictionDebugStats stats) { + + final ChildReference rootRef = rootEvictor.rootRef; + final IN target = (IN) rootRef.getTarget(); + final DatabaseImpl db = target.getDatabase(); + final EnvironmentImpl dbEnv = db.getEnv(); + final INList inList = dbEnv.getInMemoryINs(); + + if (target.getNormalizedLevel() == 2) { + if (!dbEnv.getOffHeapCache().flushAndDiscardBINChildren( + target, backgroundIO)) { + /* Could not log a dirty BIN. See below. */ + skip(target, stats); + return 0; + } + } + + if (target.getDirty()) { + /* + * Cannot evict dirty nodes in a read-only environment, or when a + * disk limit has been exceeded. We can assume that the cache will + * not overflow with dirty nodes because writes are prohibited. + */ + if (dbEnv.isReadOnly() || dbEnv.getDiskLimitViolation() != null) { + skip(target, stats); + return 0; + } + + Provisional provisional = dbEnv.coordinateWithCheckpoint( + db, target.getLevel(), null /*parent*/); + + long newLsn = target.log( + false /*allowDeltas*/, provisional, + backgroundIO, null /*parent*/); + + rootRef.setLsn(newLsn); + rootEvictor.flushed = true; + } + + inList.remove(target); + + long evictBytes = target.getBudgetedMemorySize(); + + rootRef.clearTarget(); + + nNodesEvicted.increment(); + nRootNodesEvicted.increment(); + + if (rootEvictor.flushed) { + nDirtyNodesEvicted.increment(); + } + + if (stats != null) { + stats.incNumEvicted(false); + } + + return evictBytes; + } + + /* For unit testing only. */ + public void setRunnableHook(TestHook hook) { + arbiter.setRunnableHook(hook); + } + + /* For unit testing only. */ + public void setPreEvictINHook(TestHook hook) { + preEvictINHook = hook; + } + + /* For unit testing only. */ + public void setEvictProfileHook(TestHook hook) { + evictProfile = hook; + } + + public StatGroup getStatsGroup() { + return stats; + } + + /** + * Load stats. + */ + public StatGroup loadStats(StatsConfig config) { + + if (isShared) { + sharedCacheEnvs.set(envInfos.size()); + } + + float binFetchMisses = (float)nBINFetchMiss.get(); + float binFetches = (float)nBINFetch.get(); + + binFetchMissRatio.set( + (binFetches > 0 ? (binFetchMisses / binFetches) : 0)); + + StatGroup copy = stats.cloneGroup(config.getClear()); + + /* + * These stats are not cleared. They represent the current state of + * the cache. + */ + new LongStat(copy, CACHED_IN_SPARSE_TARGET, nINSparseTarget.get()); + new LongStat(copy, CACHED_IN_NO_TARGET, nINNoTarget.get()); + new LongStat(copy, CACHED_IN_COMPACT_KEY, nINCompactKey.get()); + + new LongStat(copy, PRI1_LRU_SIZE, getPri1LRUSize()); + new LongStat(copy, PRI2_LRU_SIZE, getPri2LRUSize()); + + copy.addAll(getINListStats(config)); + + return copy; + } + + private StatGroup getINListStats(StatsConfig config) { + + if (isShared) { + + StatGroup totalINListStats = new StatGroup("temp", "temp"); + + if (config.getFast()) { + + /* + * This is a slow stat for shared envs, because of the need to + * synchronize. + */ + return totalINListStats; + } + + List copy = null; + synchronized(this) { + copy = new ArrayList(envInfos); + } + + for (EnvInfo ei: copy) { + totalINListStats.addAll(ei.env.getInMemoryINs().loadStats()); + } + + return totalINListStats; + } else { + return firstEnvImpl.getInMemoryINs().loadStats(); + } + } + + public void incNumLNsEvicted(long inc) { + nLNsEvicted.add(inc); + } + + /** + * Update the appropriate fetch stat, based on node type. + */ + public void incLNFetchStats(boolean isMiss) { + nLNFetch.increment(); + if (isMiss) { + nLNFetchMiss.increment(); + } + } + + public void incUINFetchStats(boolean isMiss) { + nUpperINFetch.increment(); + if (isMiss) { + nUpperINFetchMiss.increment(); + } + } + + public void incBINFetchStats(boolean isMiss, boolean isDelta) { + nBINFetch.increment(); + if (isMiss) { + nBINFetchMiss.increment(); + if (isDelta) { + nBINDeltaFetchMiss.increment(); + } + } + } + + public void incFullBINMissStats() { + nFullBINMiss.increment(); + } + + public void incBinDeltaBlindOps() { + nBinDeltaBlindOps.increment(); + } + + public AtomicLong getNINSparseTarget() { + return nINSparseTarget; + } + + public AtomicLong getNINNoTarget() { + return nINNoTarget; + } + + public AtomicLong getNINCompactKey() { + return nINCompactKey; + } + + + static class ReentrancyGuard { + private final ConcurrentHashMap activeThreads; + private final EnvironmentImpl envImpl; + private final Logger logger; + + ReentrancyGuard(EnvironmentImpl envImpl, Logger logger) { + this.envImpl = envImpl; + this.logger = logger; + activeThreads = new ConcurrentHashMap(); + } + + boolean enter() { + Thread thisThread = Thread.currentThread(); + if (activeThreads.containsKey(thisThread)) { + /* We don't really expect a reentrant call. */ + LoggerUtils.severe(logger, envImpl, + "reentrant call to eviction from " + + LoggerUtils.getStackTrace()); + + /* If running w/assertions, in testing mode, assert here. */ + assert false: "reentrant call to eviction from " + + LoggerUtils.getStackTrace(); + return false; + } + + activeThreads.put(thisThread, thisThread); + return true; + } + + void leave() { + assert activeThreads.contains(Thread.currentThread()); + activeThreads.remove(Thread.currentThread()); + } + } + + static class BackgroundEvictTask implements Runnable { + + private final Evictor evictor; + private final boolean backgroundIO; + + BackgroundEvictTask(Evictor evictor) { + this.evictor = evictor; + this.backgroundIO = true; + } + + public void run() { + evictor.activePoolThreads.incrementAndGet(); + try { + evictor.doEvict(EvictionSource.EVICTORTHREAD, backgroundIO); + } finally { + evictor.activePoolThreads.decrementAndGet(); + } + } + } + + static class RejectEvictHandler implements RejectedExecutionHandler { + + private final AtomicLongStat threadUnavailableStat; + + RejectEvictHandler(AtomicLongStat threadUnavailableStat) { + this.threadUnavailableStat = threadUnavailableStat; + } + + public void rejectedExecution(Runnable r, + ThreadPoolExecutor executor) { + threadUnavailableStat.increment(); + } + } + + /** + * Caches DatabaseImpls to reduce DbTree.getDb overhead. + * + * SharedEvictor, unlike PrivateEvictor, must maintain a cache map for each + * EnvironmentImpl, since each cache map is logically associated with a + * single DbTree instance. + */ + static class DbCache { + + boolean shared = false; + + int nOperations = 0; + + int dbCacheClearCount = 0; + + final Map> envMap; + + final Map dbMap; + + DbCache(boolean shared, int dbCacheClearCount) { + + this.shared = shared; + this.dbCacheClearCount = dbCacheClearCount; + + if (shared) { + envMap = + new HashMap>(); + + dbMap = null; + } else { + dbMap = new HashMap(); + envMap = null; + } + } + + /** + * Calls DbTree.getDb for the given environment and database ID, and + * caches the result to optimize multiple calls for the same DB. + * + * @param env identifies which environment the dbId parameter + * belongs to. For PrivateEvictor, it is the same as the + * Evictor.firstEnvImpl field. + * + * @param dbId is the DB to get. + */ + DatabaseImpl getDb(EnvironmentImpl env, DatabaseId dbId) { + + Map map; + + if (shared) { + map = envMap.get(env); + if (map == null) { + map = new HashMap(); + envMap.put(env, map); + } + } else { + map = dbMap; + } + + /* + * Clear DB cache after dbCacheClearCount operations, to + * prevent starving other threads that need exclusive access to + * the MapLN (for example, DbTree.deleteMapLN). [#21015] + * + * Note that we clear the caches for all environments after + * dbCacheClearCount total operations, rather than after + * dbCacheClearCount operations for a single environment, + * because the total is a more accurate representation of + * elapsed time, during which other threads may be waiting for + * exclusive access to the MapLN. + */ + nOperations += 1; + if ((nOperations % dbCacheClearCount) == 0) { + releaseDbs(env); + } + + return env.getDbTree().getDb(dbId, -1, map); + } + + /** + * Calls DbTree.releaseDb for cached DBs, and clears the cache. + */ + void releaseDbs(EnvironmentImpl env) { + if (shared) { + for (Map.Entry> + entry : envMap.entrySet()) { + + final EnvironmentImpl sharingEnv = entry.getKey(); + final Map map = entry.getValue(); + + sharingEnv.getDbTree().releaseDbs(map); + map.clear(); + } + } else { + env.getDbTree().releaseDbs(dbMap); + dbMap.clear(); + } + } + } +} diff --git a/src/com/sleepycat/je/evictor/EvictorStatDefinition.java b/src/com/sleepycat/je/evictor/EvictorStatDefinition.java new file mode 100644 index 0000000..2b832f0 --- /dev/null +++ b/src/com/sleepycat/je/evictor/EvictorStatDefinition.java @@ -0,0 +1,406 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE evictor statistics. + */ +public class EvictorStatDefinition { + public static final String GROUP_NAME = "Cache"; + public static final String GROUP_DESC = + "The main cache resides in the Java heap and holds" + + " data, keys, Btree internal nodes, locks and JE metadata."; + + /** + * The StatDefinitions for N_BYTES_EVICTED_XXX stats are generated by + * {@link Evictor.EvictionSource}. + */ + public static final String N_BYTES_EVICTED_EVICTORTHREAD_NAME = + "nBytesEvictedEVICTORTHREAD"; + public static final String N_BYTES_EVICTED_EVICTORTHREAD_DESC = + "Number of bytes evicted by evictor pool threads."; + public static final String N_BYTES_EVICTED_MANUAL_NAME = + "nBytesEvictedMANUAL"; + public static final String N_BYTES_EVICTED_MANUAL_DESC = + "Number of bytes evicted by the Environment.evictMemory or during" + + " Environment startup."; + public static final String N_BYTES_EVICTED_CRITICAL_NAME = + "nBytesEvictedCRITICAL"; + public static final String N_BYTES_EVICTED_CRITICAL_DESC = + "Number of bytes evicted in the application thread because the cache" + + " is over budget."; + public static final String N_BYTES_EVICTED_CACHEMODE_NAME = + "nBytesEvictedCACHEMODE"; + public static final String N_BYTES_EVICTED_CACHEMODE_DESC = + "Number of bytes evicted by operations for which " + + "CacheMode.EVICT_BIN is specified."; + public static final String N_BYTES_EVICTED_DAEMON_NAME = + "nBytesEvictedDAEMON"; + public static final String N_BYTES_EVICTED_DAEMON_DESC = + "Number of bytes evicted by JE deamon threads."; + + public static final String EVICTOR_EVICTION_RUNS_NAME = + "nEvictionRuns"; + public static final String EVICTOR_EVICTION_RUNS_DESC = + "Number of times the background eviction thread is awoken."; + public static final StatDefinition EVICTOR_EVICTION_RUNS = + new StatDefinition( + EVICTOR_EVICTION_RUNS_NAME, + EVICTOR_EVICTION_RUNS_DESC); + + public static final String EVICTOR_NODES_TARGETED_NAME = + "nNodesTargeted"; + public static final String EVICTOR_NODES_TARGETED_DESC = + "Number of nodes (INs) selected as eviction targets."; + public static final StatDefinition EVICTOR_NODES_TARGETED = + new StatDefinition( + EVICTOR_NODES_TARGETED_NAME, + EVICTOR_NODES_TARGETED_DESC); + + public static final String EVICTOR_NODES_EVICTED_NAME = + "nNodesEvicted"; + public static final String EVICTOR_NODES_EVICTED_DESC = + "Number of target nodes (INs) evicted from the main cache."; + public static final StatDefinition EVICTOR_NODES_EVICTED = + new StatDefinition( + EVICTOR_NODES_EVICTED_NAME, + EVICTOR_NODES_EVICTED_DESC); + + public static final String EVICTOR_ROOT_NODES_EVICTED_NAME = + "nRootNodesEvicted"; + public static final String EVICTOR_ROOT_NODES_EVICTED_DESC = + "Number of database root nodes (INs) evicted."; + public static final StatDefinition EVICTOR_ROOT_NODES_EVICTED = + new StatDefinition( + EVICTOR_ROOT_NODES_EVICTED_NAME, + EVICTOR_ROOT_NODES_EVICTED_DESC); + + public static final String EVICTOR_DIRTY_NODES_EVICTED_NAME = + "nDirtyNodesEvicted"; + public static final String EVICTOR_DIRTY_NODES_EVICTED_DESC = + "Number of dirty target nodes logged and evicted."; + public static final StatDefinition EVICTOR_DIRTY_NODES_EVICTED = + new StatDefinition( + EVICTOR_DIRTY_NODES_EVICTED_NAME, + EVICTOR_DIRTY_NODES_EVICTED_DESC); + + public static final String EVICTOR_LNS_EVICTED_NAME = + "nLNsEvicted"; + public static final String EVICTOR_LNS_EVICTED_DESC = + "Number of LNs evicted as a result of LRU-based eviction (but not " + + "CacheMode.EVICT_LN)."; + public static final StatDefinition EVICTOR_LNS_EVICTED = + new StatDefinition( + EVICTOR_LNS_EVICTED_NAME, + EVICTOR_LNS_EVICTED_DESC); + + public static final String EVICTOR_NODES_STRIPPED_NAME = + "nNodesStripped"; + public static final String EVICTOR_NODES_STRIPPED_DESC = + "Number of target BINs whose child LNs were evicted (stripped)."; + public static final StatDefinition EVICTOR_NODES_STRIPPED = + new StatDefinition( + EVICTOR_NODES_STRIPPED_NAME, + EVICTOR_NODES_STRIPPED_DESC); + + public static final String EVICTOR_NODES_MUTATED_NAME = + "nNodesMutated"; + public static final String EVICTOR_NODES_MUTATED_DESC = + "Number of target BINs mutated to BIN-deltas."; + public static final StatDefinition EVICTOR_NODES_MUTATED = + new StatDefinition( + EVICTOR_NODES_MUTATED_NAME, + EVICTOR_NODES_MUTATED_DESC); + + public static final String EVICTOR_NODES_PUT_BACK_NAME = + "nNodesPutBack"; + public static final String EVICTOR_NODES_PUT_BACK_DESC = + "Number of target nodes (INs) moved to the cold end of the LRU " + + "list without any action taken on them."; + public static final StatDefinition EVICTOR_NODES_PUT_BACK = + new StatDefinition( + EVICTOR_NODES_PUT_BACK_NAME, + EVICTOR_NODES_PUT_BACK_DESC); + + public static final String EVICTOR_NODES_MOVED_TO_PRI2_LRU_NAME = + "nNodesMovedToDirtyLRU"; + public static final String EVICTOR_NODES_MOVED_TO_PRI2_LRU_DESC = + "Number of nodes (INs) moved from the mixed/priority-1 to the " + + "dirty/priority-2 LRU list."; + public static final StatDefinition EVICTOR_NODES_MOVED_TO_PRI2_LRU = + new StatDefinition( + EVICTOR_NODES_MOVED_TO_PRI2_LRU_NAME, + EVICTOR_NODES_MOVED_TO_PRI2_LRU_DESC); + + public static final String EVICTOR_NODES_SKIPPED_NAME = + "nNodesSkipped"; + public static final String EVICTOR_NODES_SKIPPED_DESC = + "Number of nodes (INs) that did not require any action."; + public static final StatDefinition EVICTOR_NODES_SKIPPED = + new StatDefinition( + EVICTOR_NODES_SKIPPED_NAME, + EVICTOR_NODES_SKIPPED_DESC); + + public static final String EVICTOR_SHARED_CACHE_ENVS_NAME = + "nSharedCacheEnvironments"; + public static final String EVICTOR_SHARED_CACHE_ENVS_DESC = + "Number of Environments sharing the main cache."; + public static final StatDefinition EVICTOR_SHARED_CACHE_ENVS = + new StatDefinition( + EVICTOR_SHARED_CACHE_ENVS_NAME, + EVICTOR_SHARED_CACHE_ENVS_DESC, + StatType.CUMULATIVE); + + public static final String LN_FETCH_NAME = + "nLNsFetch"; + public static final String LN_FETCH_DESC = + "Number of LNs (data records) requested by btree operations."; + public static final StatDefinition LN_FETCH = + new StatDefinition( + LN_FETCH_NAME, + LN_FETCH_DESC); + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a UIN. + */ + public static final String UPPER_IN_FETCH_NAME = + "nUpperINsFetch"; + public static final String UPPER_IN_FETCH_DESC = + "Number of Upper INs (non-bottom internal nodes) requested by btree " + + "operations."; + public static final StatDefinition UPPER_IN_FETCH = + new StatDefinition( + UPPER_IN_FETCH_NAME, + UPPER_IN_FETCH_DESC); + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN. + */ + public static final String BIN_FETCH_NAME = + "nBINsFetch"; + public static final String BIN_FETCH_DESC = + "Number of BINs (bottom internal nodes) and BIN-deltas requested by " + + "btree operations."; + public static final StatDefinition BIN_FETCH = + new StatDefinition( + BIN_FETCH_NAME, + BIN_FETCH_DESC); + + public static final String LN_FETCH_MISS_NAME = + "nLNsFetchMiss"; + public static final String LN_FETCH_MISS_DESC = + "Number of LNs (data records) requested by btree operations that " + + "were not in main cache."; + public static final StatDefinition LN_FETCH_MISS = + new StatDefinition( + LN_FETCH_MISS_NAME, + LN_FETCH_MISS_DESC); + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a UIN and that UIN was not already cached. + */ + public static final String UPPER_IN_FETCH_MISS_NAME = + "nUpperINsFetchMiss"; + public static final String UPPER_IN_FETCH_MISS_DESC = + "Number of Upper INs (non-bottom internal nodes) requested by btree " + + "operations that were not in main cache."; + public static final StatDefinition UPPER_IN_FETCH_MISS = + new StatDefinition( + UPPER_IN_FETCH_MISS_NAME, + UPPER_IN_FETCH_MISS_DESC); + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN and that BIN was not already cached. + */ + public static final String BIN_FETCH_MISS_NAME = + "nBINsFetchMiss"; + public static final String BIN_FETCH_MISS_DESC = + "Number of full BINs (bottom internal nodes) and BIN-deltas fetched " + + "to satisfy btree operations that were not in main cache."; + public static final StatDefinition BIN_FETCH_MISS = + new StatDefinition( + BIN_FETCH_MISS_NAME, + BIN_FETCH_MISS_DESC); + + /* + * BIN_FETCH_MISS / BIN_FETCH + */ + public static final String BIN_FETCH_MISS_RATIO_NAME = + "nBINsFetchMissRatio"; + public static final String BIN_FETCH_MISS_RATIO_DESC = + "The BIN fetch miss ratio (nBINsFetchMiss / nBINsFetch)"; + public static final StatDefinition BIN_FETCH_MISS_RATIO = + new StatDefinition( + BIN_FETCH_MISS_RATIO_NAME, + BIN_FETCH_MISS_RATIO_DESC, + StatType.CUMULATIVE); + + /* + * Number of times IN.fetchIN() or IN.fetchINWithNoLatch() was called + * to fetch a BIN, that BIN was not already cached, and a BIN-delta was + * fetched from disk. + */ + public static final String BIN_DELTA_FETCH_MISS_NAME = + "nBINDeltasFetchMiss"; + public static final String BIN_DELTA_FETCH_MISS_DESC = + "Number of BIN-deltas (partial BINs) fetched to satisfy btree " + + "operations that were not in main cache."; + public static final StatDefinition BIN_DELTA_FETCH_MISS = + new StatDefinition( + BIN_DELTA_FETCH_MISS_NAME, + BIN_DELTA_FETCH_MISS_DESC); + + /* + * The number of operations performed blindly in BIN deltas + */ + public static final String BIN_DELTA_BLIND_OPS_NAME = + "nBinDeltaBlindOps"; + public static final String BIN_DELTA_BLIND_OPS_DESC = + "The number of operations performed blindly in BIN deltas"; + public static final StatDefinition BIN_DELTA_BLIND_OPS = + new StatDefinition( + BIN_DELTA_BLIND_OPS_NAME, + BIN_DELTA_BLIND_OPS_DESC); + + /* + * Number of calls to BIN.mutateToFullBIN() + */ + public static final String FULL_BIN_MISS_NAME = + "nFullBINsMiss"; + public static final String FULL_BIN_MISS_DESC = + "Number of times a BIN-delta had to be mutated to a full BIN (and as" + + " a result a full BIN had to be read in from the log)."; + public static final StatDefinition FULL_BIN_MISS = + new StatDefinition( + FULL_BIN_MISS_NAME, + FULL_BIN_MISS_DESC); + + /* + * The number of UINs in the memory-resident tree at the time the + * stats were collected. This is an INSTANT stat. + */ + public static final String CACHED_UPPER_INS_NAME = + "nCachedUpperINs"; + public static final String CACHED_UPPER_INS_DESC = + "Number of upper INs (non-bottom internal nodes) in main cache."; + public static final StatDefinition CACHED_UPPER_INS = + new StatDefinition( + CACHED_UPPER_INS_NAME, + CACHED_UPPER_INS_DESC, + StatType.CUMULATIVE); + + /* + * The number of BINs (full or deltas) in the memory-resident tree at the + * time the stats were collected. This is an INSTANT stat. + */ + public static final String CACHED_BINS_NAME = + "nCachedBINs"; + public static final String CACHED_BINS_DESC = + "Number of BINs (bottom internal nodes) and BIN-deltas in main cache."; + public static final StatDefinition CACHED_BINS = + new StatDefinition( + CACHED_BINS_NAME, + CACHED_BINS_DESC, + StatType.CUMULATIVE); + + /* + * The number of delta-BINs in the memory-resident tree at the time the + * stats were collected. This is an INSTANT stat. + */ + public static final String CACHED_BIN_DELTAS_NAME = + "nCachedBINDeltas"; + public static final String CACHED_BIN_DELTAS_DESC = + "Number of BIN-deltas (partial BINs) in main cache. This is a subset" + + " of the nCachedBINs value."; + public static final StatDefinition CACHED_BIN_DELTAS = + new StatDefinition( + CACHED_BIN_DELTAS_NAME, + CACHED_BIN_DELTAS_DESC, + StatType.CUMULATIVE); + + /* + * Number of eviction tasks that were submitted to the background evictor + * pool, but were refused because all eviction threads were busy. + */ + public static final String THREAD_UNAVAILABLE_NAME = + "nThreadUnavailable"; + public static final String THREAD_UNAVAILABLE_DESC = + "Number of eviction tasks that were submitted to the background " + + "evictor pool, but were refused because all eviction threads " + + "were busy."; + public static final StatDefinition THREAD_UNAVAILABLE = + new StatDefinition( + THREAD_UNAVAILABLE_NAME, + THREAD_UNAVAILABLE_DESC); + + public static final String CACHED_IN_SPARSE_TARGET_NAME = + "nINSparseTarget"; + public static final String CACHED_IN_SPARSE_TARGET_DESC = + "Number of INs that use a compact sparse array representation to " + + "point to child nodes in the main cache."; + public static final StatDefinition CACHED_IN_SPARSE_TARGET = + new StatDefinition( + CACHED_IN_SPARSE_TARGET_NAME, + CACHED_IN_SPARSE_TARGET_DESC, + StatType.CUMULATIVE); + + public static final String CACHED_IN_NO_TARGET_NAME = + "nINNoTarget"; + public static final String CACHED_IN_NO_TARGET_DESC = + "Number of INs that use a compact representation when none of its " + + "child nodes are in the main cache."; + public static final StatDefinition CACHED_IN_NO_TARGET = + new StatDefinition( + CACHED_IN_NO_TARGET_NAME, + CACHED_IN_NO_TARGET_DESC, + StatType.CUMULATIVE); + + public static final String CACHED_IN_COMPACT_KEY_NAME = + "nINCompactKey"; + public static final String CACHED_IN_COMPACT_KEY_DESC = + "Number of INs that use a compact key representation to minimize the" + + " key object representation overhead."; + public static final StatDefinition CACHED_IN_COMPACT_KEY = + new StatDefinition( + CACHED_IN_COMPACT_KEY_NAME, + CACHED_IN_COMPACT_KEY_DESC, + StatType.CUMULATIVE); + + public static final String PRI1_LRU_SIZE_NAME = + "lruMixedSize"; + public static final String PRI1_LRU_SIZE_DESC = + "Number of INs in the mixed/priority-1 LRU "; + public static final StatDefinition PRI1_LRU_SIZE = + new StatDefinition( + PRI1_LRU_SIZE_NAME, + PRI1_LRU_SIZE_DESC, + StatType.CUMULATIVE); + + public static final String PRI2_LRU_SIZE_NAME = + "lruDirtySize"; + public static final String PRI2_LRU_SIZE_DESC = + "Number of INs in the dirty/priority-2 LRU "; + public static final StatDefinition PRI2_LRU_SIZE = + new StatDefinition( + PRI2_LRU_SIZE_NAME, + PRI2_LRU_SIZE_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/evictor/OffHeapAllocator.java b/src/com/sleepycat/je/evictor/OffHeapAllocator.java new file mode 100644 index 0000000..2e49d32 --- /dev/null +++ b/src/com/sleepycat/je/evictor/OffHeapAllocator.java @@ -0,0 +1,176 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +/** + * Implemented by off-heap memory allocators. + * + * The allocator is responsible for allocating and freeing a block of memory + * efficiently, maintaining the size of a block, identifying a block by a long + * integer value (ID), and looking up a memory ID efficiently (in constant + * time) in order to copy bytes, return the size, or free the block. + *

        + * The allocator is also responsible for compacting memory when necessary to + * perform allocations efficiently. A special case is when the off-heap cache + * is reduced in size, and memory should be compacted to make memory available + * to the OS; the implementation should account for this case. + *

        + * Another responsibility of the allocator is to estimate the RAM usage for all + * blocks currently allocated, including overhead and space taken by + * fragmentation. It is recognized that this may only be a rough estimate in + * some implementations (the default allocator, for example). See + * {@link #getUsedBytes}. + *

        + * Note that with the default allocator, the size is not a built-in property + * of each block, and the {@link #size} method will be implemented by storing + * the size at the front of the block. The {@code size} method is included in + * the interface to allow for implementations where a block size property is + * naturally available. + *

        + * This interface requires that memory is copied in and out of the Java address + * space to make use of the off-heap cache. A future enhancement might involve + * adding a way to obtain a ByteBuffer for direct access to the off-heap memory + * block, to avoid the copy if this is possible in some implementations. In the + * default implementation, this is not practical without using non-public JVM + * internals and risking incompatibilities. + *

        + * All methods in the allocator must be thread safe, and contention among + * threads should be minimized. + *

        + * The memory blocks are not assumed to be fast access RAM and in particular + * might be NVRAM. JE makes an effort to only copy memory to/from the block + * when necessary, and to use single larger copy operations rather than + * multiple smaller copy operations. + */ +public interface OffHeapAllocator { + + class OffHeapOverflowException extends Exception {} + + /** + * Sets the maximum size of the off-heap cache, to be used as a hint for + * the creation of implementation specific data structures. + * + * The maximum cache size is the amount of RAM that the app would like to + * use for the off-heap cache, at the gross level of dividing up the RAM on + * a machine among processes, the off-heap cache, the file system cache, + * etc. Because there is overhead with any allocation scheme, less bytes + * will actually be available for memory blocks created with the {@link + * #allocate) method. In other words, JE will not assume that it can + * allocate blocks totaling the specified maximum size. See {@link + * #getUsedBytes }. + *

        + * This method is always called once before any other method is called. It + * may be called more than once if the off-heap cache is resized by the + * app. + * + * @see #allocate(int) + */ + void setMaxBytes(long maxBytes); + + /** + * Returns an estimate of the amount of RAM used by the cache, including + * the metadata and block overhead used by the implementation, as well as + * any free space needed for performing compaction. + * + * This method should not cause thread contention when called frequently + * from multiple threads. A volatile long field is the suggested + * implementation. + * + * @see #allocate(int) + */ + long getUsedBytes(); + + /** + * Allocates a block of a given size and returns its ID. + * + * The bytes of the memory block must be initialized to zero. + *

        + * Note that because the used cache size is only an estimate, and in fact + * the maximum size might not actually be available (due to memory use by + * other processes when using the default allocator, for example), then the + * {@link #allocate} method may fail (thrown an exception) even when the + * used size is less than the maximum bytes (the value passed to {@link + * #setMaxBytes}). JE handles this situation as follows. + *

        + * JE uses an internal off-heap cache size limit to determine when to + * perform eviction (which frees off-heap blocks). The limit is + * initialized to the value passed to {@link #setMaxBytes}. JE calls + * {@link #getUsedBytes()} to determine when to evict memory from the + * cache. If the used bytes by grows very close to the limit or exceeds + * it, JE will perform off-heap cache eviction. JE will make a best effort + * not to call the {@link #allocate} method when the used size exceeds + * the limit. + *

        + * If an allocation failure occurs (i.e., this method throws an + * exception), JE adjusts the limit downward to account for the + * inaccuracies discussed above. When a RuntimeException is thrown, the + * limit is set to the used size; when an OutOfMemoryError is thrown, the + * limit is set to the used size minus the {@link + * com.sleepycat.je.EnvironmentConfig#OFFHEAP_EVICT_BYTES}. This adjustment + * should ensure that JE eviction occurs and prevent frequent allocation + * failures and associated exception handling. + * TODO: This never happens because Linux kills the process + *

        + * + * @return non-zero memory ID, or zero when the memory cannot be allocated. + * + * @throws OffHeapOverflowException if the block cannot be allocated + * because the max size has been reached. The internal off-heap cache + * size limit will be set as described above. + * + * @throws OutOfMemoryError if the block cannot be allocated because no + * system memory is available. The internal off-heap cache size limit will + * be set as described above. In addition, a SEVERE message for the + * exception will be logged. + * + * @see #getUsedBytes + */ + long allocate(int size) + throws OutOfMemoryError, OffHeapOverflowException; + + /** + * Frees a block previously allocated and returns the size freed, including + * any overhead for the block that is now free. + */ + int free(long memId); + + /** + * Returns the size of an allocated block. + */ + int size(long memId); + + /** + * Returns the size of an allocated block plus any overhead for the block. + */ + int totalSize(long memId); + + /** + * Copies bytes from an allocated block to a Java byte array. + */ + void copy(long memId, int memOff, byte[] buf, int bufOff, int len); + + /** + * Copies bytes from a Java byte array to an allocated block. + */ + void copy(byte[] buf, int bufOff, long memId, int memOff, int len); + + /** + * Copies bytes from one allocated block to another. + */ + void copy(long fromMemId, + int fromMemOff, + long toMemId, + int toMemOff, + int len); +} diff --git a/src/com/sleepycat/je/evictor/OffHeapAllocatorFactory.java b/src/com/sleepycat/je/evictor/OffHeapAllocatorFactory.java new file mode 100644 index 0000000..4b9c7ab --- /dev/null +++ b/src/com/sleepycat/je/evictor/OffHeapAllocatorFactory.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +/** + * Used to create OffHeapAllocator instances. + */ +public class OffHeapAllocatorFactory { + + private OffHeapAllocator defaultAllocator; + + OffHeapAllocatorFactory() + throws ClassNotFoundException, IllegalAccessException, + InstantiationException { + + /* + * The CHeapAllocator class should not be referenced symbolically here + * or by any other other class. This is necessary to avoid a linkage + * error if JE is run on a JVM without the Unsafe class. Therefore we + * load CHeapAllocator and create an instance using reflection. + */ + final Class cls = + Class.forName("com.sleepycat.je.evictor.CHeapAllocator"); + + defaultAllocator = (OffHeapAllocator) cls.newInstance(); + } + + /** + * @return null if the default allocator is not available on this JVM, + * presumably because the Unsafe class is not available. + */ + public OffHeapAllocator getDefaultAllocator() { + return defaultAllocator; + } +} diff --git a/src/com/sleepycat/je/evictor/OffHeapCache.java b/src/com/sleepycat/je/evictor/OffHeapCache.java new file mode 100644 index 0000000..6ea85be --- /dev/null +++ b/src/com/sleepycat/je/evictor/OffHeapCache.java @@ -0,0 +1,3584 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.ALLOC_FAILURE; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.ALLOC_OVERFLOW; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.BINS_LOADED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.BINS_STORED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.CACHED_BINS; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.CACHED_BIN_DELTAS; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.CACHED_LNS; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.CRITICAL_NODES_TARGETED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.DIRTY_NODES_EVICTED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.GROUP_DESC; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.GROUP_NAME; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.LNS_EVICTED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.LNS_LOADED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.LNS_STORED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.LRU_SIZE; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.NODES_EVICTED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.NODES_MUTATED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.NODES_SKIPPED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.NODES_STRIPPED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.NODES_TARGETED; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.THREAD_UNAVAILABLE; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.TOTAL_BLOCKS; +import static com.sleepycat.je.evictor.OffHeapStatDefinition.TOTAL_BYTES; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.zip.Checksum; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.Evictor.EvictionSource; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.utilint.Adler32; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThreadFactory; +import com.sleepycat.je.utilint.VLSN; + +/** + * Off-heap cache and evictor. + * + * Overview + * -------- + * When an LN or BIN is evicted from the main cache it is moved off-heap. The + * off-heap evictor (this class) will apply the same LRU algorithm and + * CacheMode logic that is used by the main evictor. When an off-heap cache is + * used, the main evictor will not place dirty INs on a priority 2 LRU list, + * and will not perform LN stripping or BIN delta mutation; instead, these + * actions become the responsibility of the off-heap evictor. + * + * UINs are not stored off-heap because the complexity this would add is not + * worth the benefits. An extremely large data set can be represented by the + * UINs that fit in a 10GB main cache, so the lack of off-heap UINs is not + * considered a deficiency. + * + * Movement of LNs and BINs between the two caches is performed as follows. + * Note that LNs and BINs are not moved off-heap if they are deleted nor if + * they belong to an internal database. And of course, embedded and duplicate + * DB LNs are not stored separately off-heap. + * + * When an LN or a BIN is evicted from main, it is stored off-heap. + * + * If the off-heap memory block cannot be allocated, the object is not stored + * and no exception is thrown. This prevents allocation failures from causing + * CRUD operation failures. Stats about allocation failures are maintained and + * a SEVERE message is logged when the failure is because no more system + * memory is available. See the OffHeapAllocator interface for details. + * + * For an off-heap LN with a parent BIN in main cache, the LN's memory ID is + * maintained in the BIN. The BIN is assigned an off-heap LRU index so the + * off-heap evictor can perform off-heap LN stripping. In this case, the BIN + * is in both a main and off-heap LRU list. For now at least (TODO), because + * deferred write DBs are not supported, a priority-1 off-heap LRU list is + * used, since the LNs will never be logged. + * + * An off-heap BIN is assigned an off-heap LRU index, which is stored in its + * parent UIN slot in main cache. The slot also has an "off-heap dirty" flag + * that allows the checkpointer to discover dirty off-heap BINs, and "priority + * 2" flag that indicates whether the BIN is in the priority 1 or 2 LRU lists. + * + * When a BIN moves off-heap and the BIN currently has off-heap LNs, the + * references (memory IDs) of the off-heap LNs are stored with the serialized + * BIN. When the off-heap evictor processes the BIN, it will free the + * off-heap LNs and modify or replace the off-heap BIN so that it no longer + * references them. This is the equivalent of the LN stripping performed by the + * main cache evictor. + * + * An off-heap BIN will be mutated to a BIN-delta using the same rules used by + * the main evictor. + * + * The use of separate priority 1 and priority 1 LRU lists also copies the + * approach used in the main evictor (when no off-heap cache is configured). + * + * - Eviction of nodes on the priority 2 lists occurs only after emptying the + * priority 1 lists. + * + * - A BIN is moved from a priority 1 list to a priority 2 list when it is + * dirty, its LNs have been stripped and BIN-delta mutation has been + * attempted. + * + * - Therefore, only dirty BINs with no resident LNs, and which have been + * mutated to BIN-deltas (if possible), appear in the priority 2 lists. + * + * However, in the off-heap cache, all off-heap BINs appear in an LRU list, + * unlike the main cache where some INs do not appear because they have + * resident children and therefore are not evictable. + * + * Nodes in both caches at once + * ---------------------------- + * There are advantages and disadvantages to allowing duplication of a node + * (LN or BIN) in the off-heap and main cache. The advantage is when a node is + * loaded from off-heap into main, and we know (because CacheMode.EVICT_LN, + * EVICT_BIN or UNCHANGED is used) that when the operation is complete the node + * will be evicted from main and stored off-heap again. In this case it is more + * efficient to leave it off-heap and tolerate the duplication for the duration + * of the operation. However, the drawbacks of doing this are: + * + * 1. We cannot assume in code that a node is in only one cache at a time. When + * it appears in both caches, we must always use the object in the main + * cache, since the off-heap object may be stale. + * + * 2. If for some reason the node is NOT evicted from the main cache, we must + * remove it from off-heap. This can happen when the node is accessed with + * a different CacheMode (by the original thread or a different thread) + * prior to completing the operation. Removal from the off-heap cache + * should be done ASAP, so the duplication does not cause unnecessary + * eviction. + * + * 3. If the node in the main cache is modified, this invalidates the node in + * the off-heap cache and we must be sure not to use the off-heap version + * and to remove it ASAP. This is very complex for BINs in particular, + * because they can be modified in so many different ways. For LNs, on the + * other hand, the types of modifications are fairly limited. + * + * Because of the complexity issue in item 3, we do not allow duplication of + * BINs. We do allow duplication of LNs, and this is handled as follows: + * + * - freeRedundantLN is called when an LN is accessed via IN.fetchLN or getLN. + * If a CacheMode is used that will not evict the LN, the LN is removed + * from off-heap. + * + * - freeLN is called (via BIN.freeOffHeapLN) during any operation that will + * modify an LN. + * + * If for some reason these mechanisms fail to prevent unwanted duplication, + * eviction will eventually remove the redundant nodes. + * + * LRU data structures and concurrency control + * ------------------------------------------- + * LRU entries form linked lists. Like in the main cache, there are two sets of + * LRU lists for priority 1 and 2 BINs, and multiple lists in each set to + * reduce thread contention on the linked lists. + * + * LRU information is allocated using arrays to minimize per-entry object + * overhead. There is a single pool of allocated entries that are used for all + * LRULists. The entries are uniquely identified by an int ID. + * + * The arrays are allocated in Chunks and a Chunk is never de-allocated. This + * is for two reasons: + * + * - Chunks can be referenced without any locking (concurrency control is + * discussed below). + * + * - Using Chunks avoids having to pre-allocate all LRU entries, while still + * minimizing Object overhead (see CHUNK_SIZE). + * + * The 'chunks' array contains all allocated Chunks. In each Chunk there is an + * array for each field in an LRU entry. + * + * LRU entries are assigned sequential int IDs starting from zero. The chunk + * for a given entry ID is: + * chunks[entry / CHUNK_SIZE] + * and the array index within the chunk is: + * entry % CHUNK_SIZE + * + * The chunks array can be read (indexed to obtain a Chunk object) without any + * locking because a copy-on-write technique is used. When a new Chunk must be + * allocated, the addRemoveEntryMutex protects the assignment of the chunks + * array. This mutex also protects the free entry list (the firstFreeListEntry + * field and the next/prev indexes of the entries on the free list). This mutex + * is global per Environment, but is not frequently locked -- only when an LRU + * entry is added or removed. + * + * The fields of an LRU entry -- the array slots -- are protected as follows. + * + * - The linked list fields -- prev and next slots -- are protected by the + * LRUList mutex, for entries in an LRUList. For entries on the free list, + * these are protected by the addRemoveEntryMutex. + * + * - Other fields -- owners and memIds slots, for example -- are protected by + * the IN latch. The IN "owns" these fields for its associated LRU entry + * (in the case of a BIN) or entries (in the case of an IN). + * + * Of course the IN latch also protects the fields in the IN related to the + * LRU entry: the BIN's lruIdx field, and the arrays of child LN memId + * (for a BIN) and child IN lruIdx (for a UIN). + * + * When multiple locks are taken, the order is: + * IN latch, LRUList mutex + * -or- + * IN latch, addRemoveEntryMutex + * + * The LRUList mutex and addRemoveEntryMutex are never both locked. + * + * AN LRU entry is in a special state when it is removed from the LRU list and + * is being processed by the evictor. In this case the IN is latched, but there + * is a window after it is removed and before it is latched where anything can + * happen. Before processing, several checks are made to ensure that the entry + * still belongs to the IN, the IN has not been evicted, and the entry has not + * been put back on the LRUList. This last check requires synchronizing on the + * LRUList, so unfortunately we must synchronize twice on the LRU list: once to + * remove the entry, and again after latching the IN to ensure that it has not + * been put back on the LRUList by another thread. + * + * TODO: + * - Test allocation failures using an allocator that simulates random + * failures. Currently, allocation failures never happen in the real world, + * because Linux kills the process before memory is exhausted. But when we + * allow alternate allocators, failures may occur if a memory pool is filled. + */ +public class OffHeapCache implements EnvConfigObserver { + + private static final int VLSN_SIZE = 8; + private static final int CHECKSUM_SIZE = 4; + private static final int MAX_UNUSED_BIN_BYTES = 100; + + private static final int BIN_FLAG_DELTA = 0x1; + private static final int BIN_FLAG_CAN_MUTATE = 0x2; + private static final int BIN_FLAG_PROHIBIT_NEXT_DELTA = 0x4; + private static final int BIN_FLAG_LOGGED_FULL_VERSION = 0x8; + + private static final boolean DEBUG_DOUBLE_FREE = false; + private static final int DEBUG_FREE_BLOCKS_PER_MAP = 250000; // about 0.5 G + + private static final boolean DEBUG_TRACE = false; + private static final boolean DEBUG_TRACE_STACK = false; + private static final boolean DEBUG_TRACE_AND_LOG = false; + + /* + * Number of LRU entries to allocate at a time, i.e., per chunk. + * The goals are: + * + * 1. Create arrays large enough to make the object overhead insignificant. + * The byte[], the smallest array, is 100KB and its object overhead (16 + * bytes max) is tiny in comparison. + * + * 2. Create arrays less than than 1MB in size to prevent GC issues. + * "Humongous" objects, which are expensive to GC viewpoint are 1MB or + * larger. The long[], the largest array, is 800KB with a 100K chunk size. + * + * 3. Create chunks small enough that we don't use a big percentage of a + * smallish heap to allocate one chunk. The chunk size is a little over + * 2MB, or easily small enough to meet this requirement. + * + * 4. Create chunks large enough so that we don't frequently grow the chunk + * list, which requires holding the free list mutex. 100K entries per chunk + * is easily enough. + * + * 5. Create chunks small enough so that we don't hold the free list mutex + * for too long while adding all the entries in a new chunk to the free + * list. 100K may be too large in this respect, and it could be reduced if + * this is a noticeable issue. Even better, rather than add a new chunk's + * entries to the free list, treat those entries as a "free stack" and pop + * them off separately. + */ + private static final int CHUNK_SIZE = 100 * 1024; + + private static final long CHUNK_MEMORY_SIZE = + MemoryBudget.OBJECT_OVERHEAD + + 16 + // For four array references -- accuracy is unimportant. + MemoryBudget.longArraySize(CHUNK_SIZE) + + MemoryBudget.objectArraySize(CHUNK_SIZE) + + MemoryBudget.intArraySize(CHUNK_SIZE) * 2; + + /* + * Amount that tests should add to a minimal main cache configuration, + * when an off-heap cache is used. + * + * TODO: For now this is not budgeted. + */ + public static final long MIN_MAIN_CACHE_OVERHEAD = 0;//CHUNK_MEMORY_SIZE; + + private static class Chunk { + + /* + * If the IN is a UIN, the memId is the block containing the BIN. + * + * If the IN is a BIN, the memId is currently unused. It may be used in + * the future for the off-heap full BIN for a BIN-delta in main. + */ + final long[] memIds; + + /* + * The IN that owns this entry. + * . Is null if the entry is not used, i.e., on the free list. + * . Is a UIN if the entry is for an off-heap BIN. + * . Is a BIN if the entry is for a BIN in the main cache. + */ + final IN[] owners; + + /* + * IDs of the prev/next entries in the LRU linked list. For entries on + * the free list, only the next entry is used (it is singly-linked). + * + * The prev and next entry ID are -1 to mean the end of the list. + * . If prev == -1, then entry ID == LRUList.back. + * . If next == -1, then entry ID == LRUList.front. + * + * If next == -2, the entry is not in an LRUList nor is it on the free + * list. When next == -2 and the owner is non-null, this means the + * entry has been removed from the LRU list to be processed by the + * evictor; the evictor may decide to add it back to an LRU list or + * place is on the free list. + * + * When an entry is on the free list, next is the next ID on the free + * list, and the owner is null. + */ + final int[] prev; + final int[] next; + + Chunk() { + memIds = new long[CHUNK_SIZE]; + owners = new IN[CHUNK_SIZE]; + prev = new int[CHUNK_SIZE]; + next = new int[CHUNK_SIZE]; + } + } + + private class LRUList { + + /* + * The front field is the entry ID of the cold end, and back is the ID + * of the hot end. Both fields are -1 if the list is empty. If there is + * only one entry, both fields have the same value. + */ + private int front = -1; + private int back = -1; + private int size = 0; + + void addBack(final int entry, final IN owner, final long memId) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + /* + * Must set owner before adding to LRU list, since an entry that is + * on the LRU list with a null owner would be considered as a free + * entry (by other threads). + */ + chunk.owners[chunkIdx] = owner; + chunk.memIds[chunkIdx] = memId; + + synchronized (this) { + addBackInternal(entry, chunk, chunkIdx); + } + } + + void addFront(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + synchronized (this) { + addFrontInternal(entry, chunk, chunkIdx); + } + } + + void moveBack(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + synchronized (this) { + + if (back == entry ) { + return; + } + + removeInternal(entry, chunk, chunkIdx); + addBackInternal(entry, chunk, chunkIdx); + } + } + + void moveFront(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + synchronized (this) { + + if (front == entry ) { + return; + } + + removeInternal(entry, chunk, chunkIdx); + addFrontInternal(entry, chunk, chunkIdx); + } + } + + int removeFront() { + + synchronized (this) { + + int entry = front; + if (entry < 0) { + return -1; + } + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + removeInternal(entry, chunk, chunkIdx); + + return entry; + } + } + + void remove(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + synchronized (this) { + removeInternal(entry, chunk, chunkIdx); + } + } + + private void addBackInternal(final int entry, + final Chunk chunk, + final int chunkIdx ) { + + assert chunk.owners[chunkIdx] != null; + assert chunk.next[chunkIdx] == -2; + + if (back < 0) { + assert back == -1; + assert front == -1; + + chunk.prev[chunkIdx] = -1; + chunk.next[chunkIdx] = -1; + + back = entry; + front = entry; + } else { + assert front >= 0; + + final Chunk nextChunk = chunks[back / CHUNK_SIZE]; + final int nextIdx = back % CHUNK_SIZE; + + assert nextChunk.prev[nextIdx] < 0; + + nextChunk.prev[nextIdx] = entry; + + chunk.next[chunkIdx] = back; + chunk.prev[chunkIdx] = -1; + + back = entry; + } + + size += 1; + } + + private void addFrontInternal(final int entry, + final Chunk chunk, + final int chunkIdx ) { + + assert chunk.owners[chunkIdx] != null; + assert chunk.next[chunkIdx] == -2; + + if (front < 0) { + assert back == -1; + assert front == -1; + + chunk.prev[chunkIdx] = -1; + chunk.next[chunkIdx] = -1; + + front = entry; + back = entry; + } else { + assert back >= 0; + + final Chunk prevChunk = chunks[front / CHUNK_SIZE]; + final int prevIdx = front % CHUNK_SIZE; + + assert prevChunk.next[prevIdx] < 0; + + prevChunk.next[prevIdx] = entry; + + chunk.prev[chunkIdx] = front; + chunk.next[chunkIdx] = -1; + + front = entry; + } + + size += 1; + } + + private void removeInternal(final int entry, + final Chunk chunk, + final int chunkIdx ) { + + assert chunk.owners[chunkIdx] != null; + + if (chunk.next[chunkIdx] == -2) { + return; + } + + assert front >= 0; + assert back >= 0; + + final int prev = chunk.prev[chunkIdx]; + final int next = chunk.next[chunkIdx]; + + if (prev < 0) { + assert prev == -1; + assert back == entry; + + back = next; + } else { + assert back != entry; + + final Chunk prevChunk = chunks[prev / CHUNK_SIZE]; + final int prevIdx = prev % CHUNK_SIZE; + + assert prevChunk.next[prevIdx] == entry; + + prevChunk.next[prevIdx] = next; + } + + if (next < 0) { + assert next == -1; + assert front == entry; + + front = prev; + } else { + assert front != entry; + + final Chunk nextChunk = chunks[next / CHUNK_SIZE]; + final int nextIdx = next % CHUNK_SIZE; + + assert nextChunk.prev[nextIdx] == entry; + + nextChunk.prev[nextIdx] = prev; + } + + chunk.next[chunkIdx] = -2; + + size -= 1; + } + + boolean contains(final Chunk chunk, final int chunkIdx) { + + synchronized (this) { + assert chunk.next[chunkIdx] >= -2; + + return chunk.next[chunkIdx] != -2 && + chunk.owners[chunkIdx] != null; + } + } + + int getSize() { + return size; + } + } + + private final Logger logger; + private final OffHeapAllocator allocator; + private boolean runEvictorThreads; + private int maxPoolThreads; + private final AtomicInteger activePoolThreads = new AtomicInteger(0); + private final AtomicBoolean shutdownRequested = new AtomicBoolean(false); + private final ThreadPoolExecutor evictionPool; + private int terminateMillis; + private long maxMemory; + private long memoryLimit; + private final long evictBytes; + private volatile Map freedBlocks; + private volatile Map prevFreedBlocks; + + private volatile Chunk[] chunks; + private int firstFreeListEntry = -1; + private final Object addRemoveEntryMutex = new Object(); + + private final int numLRULists; + private final LRUList[] pri1LRUSet; + private final LRUList[] pri2LRUSet; + private int nextPri1LRUList = 0; + private int nextPri2LRUList = 0; + + private final AtomicLong nAllocFailure = new AtomicLong(0); + private final AtomicLong nAllocOverflow = new AtomicLong(0); + private final AtomicLong nThreadUnavailable = new AtomicLong(0); + private final AtomicLong nCriticalNodesTargeted = new AtomicLong(0); + private final AtomicLong nNodesTargeted = new AtomicLong(0); + private final AtomicLong nNodesEvicted = new AtomicLong(0); + private final AtomicLong nDirtyNodesEvicted = new AtomicLong(0); + private final AtomicLong nNodesStripped = new AtomicLong(0); + private final AtomicLong nNodesMutated = new AtomicLong(0); + private final AtomicLong nNodesSkipped = new AtomicLong(0); + private final AtomicLong nLNsEvicted = new AtomicLong(0); + private final AtomicLong nLNsLoaded = new AtomicLong(0); + private final AtomicLong nLNsStored = new AtomicLong(0); + private final AtomicLong nBINsLoaded = new AtomicLong(0); + private final AtomicLong nBINsStored = new AtomicLong(0); + private final AtomicInteger cachedLNs = new AtomicInteger(0); + private final AtomicInteger cachedBINs = new AtomicInteger(0); + private final AtomicInteger cachedBINDeltas = new AtomicInteger(0); + private final AtomicInteger totalBlocks = new AtomicInteger(0); + private final AtomicInteger lruSize = new AtomicInteger(0); + + public OffHeapCache(final EnvironmentImpl envImpl) { + + logger = LoggerUtils.getLogger(getClass()); + + final DbConfigManager configManager = envImpl.getConfigManager(); + + maxMemory = configManager.getLong( + EnvironmentParams.MAX_OFF_HEAP_MEMORY); + + if (maxMemory == 0) { + allocator = DummyAllocator.INSTANCE; + } else { + try { + final OffHeapAllocatorFactory factory = + new OffHeapAllocatorFactory(); + allocator = factory.getDefaultAllocator(); + } catch (Throwable e) { + // TODO: allow continuing without an off-heap cache? + throw new IllegalStateException( + "Unable to create default allocator for off-heap cache", e); + } + } + + evictBytes = configManager.getLong( + EnvironmentParams.OFFHEAP_EVICT_BYTES); + + numLRULists = configManager.getInt( + EnvironmentParams.OFFHEAP_N_LRU_LISTS); + + allocator.setMaxBytes(maxMemory); + memoryLimit = maxMemory; + + pri1LRUSet = new LRUList[numLRULists]; + pri2LRUSet = new LRUList[numLRULists]; + + for (int i = 0; i < numLRULists; i += 1) { + pri1LRUSet[i] = new LRUList(); + pri2LRUSet[i] = new LRUList(); + } + + if (DEBUG_DOUBLE_FREE) { + freedBlocks = new ConcurrentHashMap<>(); + prevFreedBlocks = new ConcurrentHashMap<>(); + } else { + freedBlocks = null; + prevFreedBlocks = null; + } + + terminateMillis = configManager.getDuration( + EnvironmentParams.EVICTOR_TERMINATE_TIMEOUT); + + final int corePoolSize = configManager.getInt( + EnvironmentParams.OFFHEAP_CORE_THREADS); + + maxPoolThreads = configManager.getInt( + EnvironmentParams.OFFHEAP_MAX_THREADS); + + final long keepAliveTime = configManager.getDuration( + EnvironmentParams.OFFHEAP_KEEP_ALIVE); + + final boolean isShared = envImpl.getSharedCache(); + + evictionPool = new ThreadPoolExecutor( + corePoolSize, maxPoolThreads, + keepAliveTime, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue(1), + new StoppableThreadFactory( + isShared ? null : envImpl, "JEOffHeapEvictor", logger), + new RejectedExecutionHandler() { + @Override + public void rejectedExecution( + Runnable r, ThreadPoolExecutor executor) { + nThreadUnavailable.incrementAndGet(); + } + }); + + runEvictorThreads = configManager.getBoolean( + EnvironmentParams.ENV_RUN_OFFHEAP_EVICTOR); + + envImpl.addConfigObserver(this); + } + + @Override + public void envConfigUpdate( + final DbConfigManager configManager, + final EnvironmentMutableConfig ignore) { + + terminateMillis = configManager.getDuration( + EnvironmentParams.EVICTOR_TERMINATE_TIMEOUT); + + final int corePoolSize = configManager.getInt( + EnvironmentParams.OFFHEAP_CORE_THREADS); + + maxPoolThreads = configManager.getInt( + EnvironmentParams.OFFHEAP_MAX_THREADS); + + final long keepAliveTime = configManager.getDuration( + EnvironmentParams.OFFHEAP_KEEP_ALIVE); + + evictionPool.setCorePoolSize(corePoolSize); + evictionPool.setMaximumPoolSize(maxPoolThreads); + evictionPool.setKeepAliveTime(keepAliveTime, TimeUnit.MILLISECONDS); + + runEvictorThreads = configManager.getBoolean( + EnvironmentParams.ENV_RUN_OFFHEAP_EVICTOR); + + final long newMaxMemory = configManager.getLong( + EnvironmentParams.MAX_OFF_HEAP_MEMORY); + + if ((newMaxMemory > 0) != (maxMemory > 0)) { + // TODO detect this error earlier? + throw new IllegalArgumentException( + "Cannot change off-heap cache size between zero and non-zero"); + } + + maxMemory = newMaxMemory; + allocator.setMaxBytes(newMaxMemory); + memoryLimit = newMaxMemory; + } + + public void requestShutdown() { + shutdownRequested.set(true); + evictionPool.shutdown(); + } + + public void shutdown() { + + shutdownRequested.set(true); + evictionPool.shutdown(); + + boolean shutdownFinished = false; + try { + shutdownFinished = evictionPool.awaitTermination( + terminateMillis, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + /* We've been interrupted, just give up and end. */ + } finally { + if (!shutdownFinished) { + evictionPool.shutdownNow(); + } + + clearCache(null); + +// envImpl.getMemoryBudget().updateAdminMemoryUsage( +// 0 - (chunks.length * CHUNK_MEMORY_SIZE)); + + chunks = null; + } + } + + public boolean isEnabled() { + return allocator != DummyAllocator.INSTANCE; + } + + public long clearCache(final EnvironmentImpl matchEnv) { + + /* + * Use local var because when matchEnv is non-null, other threads (for + * other envs in the shared pool) are running and may replace the + * array. However, all entries for matchEnv will remain in the local + * array. + */ + final Chunk[] myChunks = chunks; + + if (myChunks == null) { + return 0; + } + + long size = 0; + + for (Chunk chunk : myChunks) { + + for (int chunkIdx = 0; chunkIdx < CHUNK_SIZE; chunkIdx += 1) { + + final IN owner = chunk.owners[chunkIdx]; + if (owner == null) { + continue; + } + if (matchEnv != null && owner.getEnv() != matchEnv) { + continue; + } + + owner.latchNoUpdateLRU(); + try { + size += removeINFromMain(owner); + } finally { + owner.releaseLatch(); + } + } + } + + return size; + } + + public StatGroup loadStats(StatsConfig config) { + + StatGroup stats = new StatGroup(GROUP_NAME, GROUP_DESC); + + new LongStat(stats, ALLOC_FAILURE, nAllocFailure.get()); + new LongStat(stats, ALLOC_OVERFLOW, nAllocOverflow.get()); + new LongStat(stats, THREAD_UNAVAILABLE, nThreadUnavailable.get()); + new LongStat(stats, CRITICAL_NODES_TARGETED, nCriticalNodesTargeted.get()); + new LongStat(stats, NODES_TARGETED, nNodesTargeted.get()); + new LongStat(stats, NODES_EVICTED, nNodesEvicted.get()); + new LongStat(stats, DIRTY_NODES_EVICTED, nDirtyNodesEvicted.get()); + new LongStat(stats, NODES_STRIPPED, nNodesStripped.get()); + new LongStat(stats, NODES_MUTATED, nNodesMutated.get()); + new LongStat(stats, NODES_SKIPPED, nNodesSkipped.get()); + new LongStat(stats, LNS_EVICTED, nLNsEvicted.get()); + new LongStat(stats, LNS_LOADED, nLNsLoaded.get()); + new LongStat(stats, LNS_STORED, nLNsStored.get()); + new LongStat(stats, BINS_LOADED, nBINsLoaded.get()); + new LongStat(stats, BINS_STORED, nBINsStored.get()); + new IntStat(stats, CACHED_LNS, cachedLNs.get()); + new IntStat(stats, CACHED_BINS, cachedBINs.get()); + new IntStat(stats, CACHED_BIN_DELTAS, cachedBINDeltas.get()); + new LongStat(stats, TOTAL_BYTES, allocator.getUsedBytes()); + new IntStat(stats, TOTAL_BLOCKS, totalBlocks.get()); + new IntStat(stats, LRU_SIZE, lruSize.get()); + + if (config.getClear()) { + nAllocFailure.set(0); + nAllocOverflow.set(0); + nThreadUnavailable.set(0); + nCriticalNodesTargeted.set(0); + nNodesTargeted.set(0); + nNodesEvicted.set(0); + nDirtyNodesEvicted.set(0); + nNodesStripped.set(0); + nNodesMutated.set(0); + nNodesSkipped.set(0); + nLNsEvicted.set(0); + nLNsLoaded.set(0); + nLNsStored.set(0); + nBINsLoaded.set(0); + nBINsStored.set(0); + } + + return stats; + } + + public long getMaxMemory() { + return maxMemory; + } + + public long getUsedMemory() { + return allocator.getUsedBytes(); + } + + /** + * Forces allocation of the first chunk of entries. Used by tests that need + * to more precisely control cache behavior. + */ + public void preallocateLRUEntries() { + + if (chunks == null) { + freeEntry(allocateEntry()); + } + } + + public OffHeapAllocator getAllocator() { + return allocator; + } + + private void debug(final EnvironmentImpl envImpl, String msg) { + + assert DEBUG_TRACE; + + if (DEBUG_TRACE_STACK) { + msg += " " + LoggerUtils.getStackTrace(); + } + + if (DEBUG_TRACE_AND_LOG) { + LoggerUtils.traceAndLog(logger, envImpl, Level.INFO, msg); + } else { + LoggerUtils.logMsg(logger, envImpl, Level.INFO, msg); + } + } + + private int addBack(final boolean pri2, IN owner, long memId) { + + assert owner.isLatchExclusiveOwner(); + + final int entry = allocateEntry(); + + final int lruIdx = entry % numLRULists; + final LRUList lru = + pri2 ? pri2LRUSet[lruIdx] : pri1LRUSet[lruIdx]; + + lru.addBack(entry, owner, memId); + + return entry; + } + + public int moveBack(final int entry, final boolean pri2) { + + final int lruIdx = entry % numLRULists; + final LRUList lru = + pri2 ? pri2LRUSet[lruIdx] : pri1LRUSet[lruIdx]; + + lru.moveBack(entry); + + return entry; + } + + private int moveFront(final int entry, final boolean pri2) { + + final int lruIdx = entry % numLRULists; + final LRUList lru = + pri2 ? pri2LRUSet[lruIdx] : pri1LRUSet[lruIdx]; + + lru.moveFront(entry); + + return entry; + } + + private void remove(final int entry, final boolean pri2) { + + final int lruIdx = entry % numLRULists; + final LRUList lru = + pri2 ? pri2LRUSet[lruIdx] : pri1LRUSet[lruIdx]; + + lru.remove(entry); + freeEntry(entry); + } + + /** + * Takes an entry from the free list. If the free list is empty, allocates + * a new chunk and adds its entries to the free list. + */ + private int allocateEntry() { + + synchronized (addRemoveEntryMutex) { + + if (firstFreeListEntry >= 0) { + + final int entry = firstFreeListEntry; + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + firstFreeListEntry = chunk.next[chunkIdx]; + chunk.next[chunkIdx] = -2; + + lruSize.incrementAndGet(); + + return entry; + } + + final Chunk newChunk = new Chunk(); + final int[] next = newChunk.next; + final int nOldChunks = (chunks != null) ? chunks.length : 0; + + /* Entry 0 in the new chunk will be returned. */ + int nextFree = nOldChunks * CHUNK_SIZE; + final int entry = nextFree++; + next[0] = -2; + + /* Entry 1 is the tail of the free list. */ + next[1] = -1; + + /* + * Entry 2 and above are added to the free list. + * + * This loop needs to be as fast as possible, which is why we're + * using local vars for next and nextFree. + * + * In the loop, nextFree starts out as entry 1 (tail of free + * list) and ends up as the last free entry (head of free list). + */ + for (int i = 2; i < CHUNK_SIZE; i += 1) { + next[i] = nextFree++; + } + + /* The last entry is the head of the free list. */ + firstFreeListEntry = nextFree; + + final Chunk[] newChunks = new Chunk[nOldChunks + 1]; + if (nOldChunks > 0) { + System.arraycopy(chunks, 0, newChunks, 0, nOldChunks); + } + newChunks[nOldChunks] = newChunk; + + /* Assign to volatile chunks field as the very last step. */ + chunks = newChunks; + + lruSize.incrementAndGet(); + +// envImpl.getMemoryBudget().updateAdminMemoryUsage( +// CHUNK_MEMORY_SIZE); + + return entry; + } + } + + /** + * Removes the entry from its LRU and adds it to the free list. + */ + private void freeEntry(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + synchronized (addRemoveEntryMutex) { + + if (chunk.owners[chunkIdx] == null) { + return; // Already on free list + } + + chunk.owners[chunkIdx] = null; + chunk.next[chunkIdx] = firstFreeListEntry; + firstFreeListEntry = entry; + + lruSize.decrementAndGet(); + } + } + + public long getMemId(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + return chunk.memIds[chunkIdx]; + } + + private IN getOwner(final int entry) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + return chunk.owners[chunkIdx]; + } + + public void setOwner(final int entry, final IN owner) { + + assert owner.isLatchExclusiveOwner(); + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + assert chunk.owners[chunkIdx] != null; + assert chunk.owners[chunkIdx].isLatchExclusiveOwner(); + + chunk.owners[chunkIdx] = owner; + } + + private void setOwnerAndMemId(final int entry, + final IN owner, + final long memId) { + + assert owner.isLatchExclusiveOwner(); + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + assert chunk.owners[chunkIdx] != null; + assert chunk.owners[chunkIdx].isLatchExclusiveOwner(); + + chunk.owners[chunkIdx] = owner; + chunk.memIds[chunkIdx] = memId; + } + + /** + * Called before eviction of an LN from main cache to provide an + * opportunity to store the LN off-heap. + */ + public boolean storeEvictedLN(final BIN bin, + final int index, + final LN ln) { + assert !ln.isDirty(); + assert bin.isLatchExclusiveOwner(); + assert bin.getInListResident(); + + final DatabaseImpl dbImpl = bin.getDatabase(); + + long memId = bin.getOffHeapLNId(index); + if (memId != 0) { + assert bin.getOffHeapLruId() >= 0; + + /* + * If already stored off-heap, make the entry hot when + * CacheMode.UNCHANGED does not apply (getFetchedCold is false). + */ + if (!bin.getFetchedCold()) { + moveBack(bin.getOffHeapLruId(), false); + } + + if (DEBUG_TRACE) { + debug( + bin.getEnv(), + "Evicted LN already store LSN=" + + DbLsn.getNoFormatString(bin.getLsn(index))); + } + + return true; + } + + /* + * Do not store off-heap: + * - When CacheMode.UNCHANGED applies (getFetchedCold is true). This + * is when the node was originally fetched from disk into main. + * - Deleted LNs are no longer needed. + * - For embedded LNs and dup DBs, there is no separate LN. + * - Off-heap caching for internal DBs is not currently supported. + */ + if (ln.getFetchedCold() || + ln.isDeleted() || + bin.isEmbeddedLN(index) || + dbImpl.getSortedDuplicates() || + dbImpl.isDeferredWriteMode() || // TODO remove + dbImpl.getDbType().isInternal()) { + return false; + } + + memId = serializeLN(dbImpl.getEnv(), ln); + if (memId == 0) { + return false; + } + + bin.setOffHeapLNId(index, memId); + + /* Add to LRU at hot end, or make hot if already in LRU. */ + int entry = bin.getOffHeapLruId(); + if (entry < 0) { + entry = addBack(false, bin, 0); + bin.setOffHeapLruId(entry); + } else { + moveBack(entry, false); + } + + if (DEBUG_TRACE) { + debug( + bin.getEnv(), + "Stored evicted LN LSN=" + + DbLsn.getNoFormatString(bin.getLsn(index))); + } + + return true; + } + + /** + * Called when an LN has been fetched from disk and should be stored + * off-heap. + */ + public boolean storePreloadedLN(final BIN bin, + final int index, + final LN ln) { + final DatabaseImpl dbImpl = bin.getDatabase(); + + assert !ln.isDirty(); + assert !ln.isDeleted(); + assert bin.isLatchExclusiveOwner(); + assert !bin.isEmbeddedLN(index); + assert bin.getTarget(index) == null; + assert !dbImpl.getSortedDuplicates(); + assert !dbImpl.isDeferredWriteMode(); // TODO remove + assert !dbImpl.getDbType().isInternal(); + + if (bin.getOffHeapLNId(index) != 0) { + assert bin.getInListResident(); + return true; + } + + final long memId = serializeLN(dbImpl.getEnv(), ln); + if (memId == 0) { + return false; + } + + bin.setOffHeapLNId(index, memId); + + if (!bin.getInListResident()) { + /* Preloading into a temporary BIN, not in the Btree. */ + return true; + } + + /* Add to LRU at hot end, or make hot if already in LRU. */ + int entry = bin.getOffHeapLruId(); + if (entry < 0) { + entry = addBack(false, bin, 0); + bin.setOffHeapLruId(entry); + } else { + moveBack(entry, false); + } + + return true; + } + + public boolean ensureOffHeapLNsInLRU(final BIN bin) { + + assert bin.isLatchExclusiveOwner(); + + if (bin.getOffHeapLruId() >= 0) { + return true; + } + + if (!bin.hasOffHeapLNs()) { + return false; + } + + final int entry = addBack(false, bin, 0); + bin.setOffHeapLruId(entry); + return true; + } + + public LN loadLN(final BIN bin, + final int index, + final CacheMode cacheMode) { + + assert bin.isLatchExclusiveOwner(); + + final long memId = bin.getOffHeapLNId(index); + if (memId == 0) { + return null; + } + + final LN ln = materializeLN(bin.getEnv(), memId); + + switch (cacheMode) { + case UNCHANGED: + case MAKE_COLD: + /* Will be evicted from main. Leave off-heap. */ + break; + case EVICT_LN: + case EVICT_BIN: + /* Will be evicted from main. Leave off-heap and make hot. */ + assert bin.getOffHeapLruId() >= 0; + moveBack(bin.getOffHeapLruId(), false); + break; + case DEFAULT: + case KEEP_HOT: + /* Will remain in main. Remove from off-heap. */ + bin.setOffHeapLNId(index, 0); + freeLN(memId); + break; + default: + assert false; + } + + if (DEBUG_TRACE) { + debug( + bin.getEnv(), + "Loaded LN LSN=" + + DbLsn.getNoFormatString(bin.getLsn(index))); + } + + return ln; + } + + public void freeRedundantLN(final BIN bin, + final int index, + final LN ln, + final CacheMode cacheMode) { + + assert bin.isLatchExclusiveOwner(); + + final long memId = bin.getOffHeapLNId(index); + if (memId == 0) { + return; + } + + switch (cacheMode) { + case UNCHANGED: + case MAKE_COLD: + if (ln.getFetchedCold()) { + /* Will be evicted from main. Leave off-heap. */ + return; + } + /* Will remain in main. Remove from off-heap. */ + break; + case EVICT_BIN: + case EVICT_LN: + /* Will be evicted from main. Leave off-heap. */ + return; + case DEFAULT: + case KEEP_HOT: + /* Will remain in main. Remove from off-heap. */ + break; + default: + assert false; + } + + bin.setOffHeapLNId(index, 0); + freeLN(memId); + } + + public long loadVLSN(final BIN bin, final int index) { + + if (!bin.getEnv().getCacheVLSN()) { + return VLSN.NULL_VLSN_SEQUENCE; + } + + final long memId = bin.getOffHeapLNId(index); + if (memId == 0) { + return VLSN.NULL_VLSN_SEQUENCE; + } + + return getLong(memId, 0, new byte[8]); + } + + public int freeLN(final BIN bin, final int index) { + + assert bin.isLatchExclusiveOwner(); + + final long memId = bin.getOffHeapLNId(index); + if (memId == 0) { + return 0; + } + + /* + * Since the LN was off-heap, set fetched-cold to false. Otherwise + * the fetched-cold flag will prevent the LN from being stored + * off-heap when it is evicted later. + */ + final LN ln = (LN) bin.getTarget(index); + if (ln != null) { + ln.setFetchedCold(false); + } + + bin.setOffHeapLNId(index, 0); + return freeLN(memId); + } + + private int freeLN(final long memId) { + + cachedLNs.decrementAndGet(); + return freeMemory(memId); + } + + private long serializeLN(final EnvironmentImpl envImpl, final LN ln) { + + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + final int vlsnSize = envImpl.getCacheVLSN() ? VLSN_SIZE : 0; + final int lnDataOffset = vlsnSize + checksumSize; + + /* + * We make 3 calls to allocator.copy (one explicit and two via putLong + * and putInt) rather than just one because: + * - This avoids an extra copy and buffer allocation for the LN data. + * - The LN data is potentially large. + * - The checksum is normally off in production, so there is at most + * one extra allocator.copy for the VLSN. + */ + final byte[] data = ln.getData(); + assert data != null; + + final long memId = allocateMemory(envImpl, lnDataOffset + data.length); + if (memId == 0) { + return 0; + } + + final byte[] tempBuf = + (vlsnSize > 0 || useChecksums) ? new byte[8] : null; + + if (vlsnSize > 0) { + putLong(ln.getVLSNSequence(), memId, 0, tempBuf); + } + + if (useChecksums) { + final Checksum checksum = Adler32.makeChecksum(); + checksum.update(data, 0, data.length); + final int checksumValue = (int) checksum.getValue(); + putInt(checksumValue, memId, vlsnSize, tempBuf); + } + + allocator.copy(data, 0, memId, lnDataOffset, data.length); + + nLNsStored.incrementAndGet(); + cachedLNs.incrementAndGet(); + + return memId; + } + + private LN materializeLN(final EnvironmentImpl envImpl, + final long memId) { + + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + final int vlsnSize = envImpl.getCacheVLSN() ? VLSN_SIZE : 0; + final int lnDataOffset = vlsnSize + checksumSize; + + final byte[] data = new byte[allocator.size(memId) - lnDataOffset]; + allocator.copy(memId, lnDataOffset, data, 0, data.length); + + final byte[] tempBuf = + (vlsnSize > 0 || useChecksums) ? new byte[8] : null; + + if (useChecksums) { + final int storedChecksum = getInt(memId, vlsnSize, tempBuf); + if (storedChecksum != 0) { + + final Checksum checksum = Adler32.makeChecksum(); + checksum.update(data, 0, data.length); + final int checksumValue = (int) checksum.getValue(); + + if (storedChecksum != checksumValue) { + throw unexpectedState( + envImpl, + "Off-heap cache checksum error. Expected " + + storedChecksum + " but got " + checksumValue); + } + } + } + + nLNsLoaded.incrementAndGet(); + + final LN ln = LN.makeLN(envImpl, data); + ln.clearDirty(); // New LNs are initially dirty. + + if (vlsnSize > 0) { + ln.setVLSNSequence(getLong(memId, 0, tempBuf)); + } + + return ln; + } + + /** + * Called before eviction of a BIN from main cache to provide an + * opportunity to store the BIN off-heap. + * + * removeINFromMain is called after this method by the main evictor. It + * is removeINFromMain that removes the main BIN's off-heap LRU entry, if + * it has one. The bin and parent latches are held across the calls to + * storeEvictedBIN and removeINFromMain. + * + * removeINFromMain will also free any off-heap LN IDs in the main BIN, + * and therefore this method must clear those IDs in the main BIN. When + * the BIN is stored off-heap by this method, the LN IDs will be stored + * along with the off-heap BIN. + */ + public boolean storeEvictedBIN(final BIN bin, + final IN parent, + final int index) { + + assert bin.isLatchExclusiveOwner(); + assert bin.getInListResident(); + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + assert bin == parent.getTarget(index); + assert parent.getOffHeapBINId(index) < 0; + + final DatabaseImpl dbImpl = bin.getDatabase(); + + /* + * Do not store off-heap: + * - When CacheMode.UNCHANGED applies, the BIN was not loaded from + * off-heap, and the BIN is not dirty. + * - Off-heap caching for internal DBs is not currently supported. + */ + if ((bin.getFetchedCold() && + !bin.getFetchedColdOffHeap() && + !bin.getDirty()) || + dbImpl.isDeferredWriteMode() || // TODO remove + dbImpl.getDbType().isInternal()) { + return false; + } + + /* Serialize the BIN and add it to the off-heap LRU. */ + + final long memId = serializeBIN(bin, bin.isBINDelta()); + if (memId == 0) { + return false; + } + + /* + * Reuse LRU entry if one exists for the BIN, in order not to change + * the effective LRU position of its off-heap LNs. When off-heap LNs + * are present, we want to preserve the off-heap LRU position to allow + * the LNs to be stripped sooner. + */ + int entry = bin.getOffHeapLruId(); + if (entry >= 0) { + setOwnerAndMemId(entry, parent, memId); + bin.clearOffHeapLNIds(); + bin.setOffHeapLruId(-1); + } else { + entry = addBack(false /*pri2*/, parent, memId); + } + + parent.setOffHeapBINId(index, entry, false /*pri2*/, bin.getDirty()); + + if (DEBUG_TRACE) { + debug( + bin.getEnv(), + "Stored BIN LSN=" + + DbLsn.getNoFormatString(parent.getLsn(index)) + + " Node=" + bin.getNodeId() + + " dirty=" + bin.getDirty()); + } + + return true; + } + + /** + * Called when a BIN has been fetched from disk and should be stored + * off-heap. + */ + public boolean storePreloadedBIN(final BIN bin, + final IN parent, + final int index) { + + assert bin != null; + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + assert parent.getTarget(index) == null; + + final DatabaseImpl dbImpl = bin.getDatabase(); + + assert !dbImpl.isDeferredWriteMode(); // TODO remove + assert !dbImpl.getDbType().isInternal(); + + /* Pass non-null 'bin' so that off-heap LNs are not freed. */ + freeBIN(bin, parent, index); + + final long memId = serializeBIN(bin, bin.isBINDelta()); + if (memId == 0) { + return false; + } + + final int entry = addBack(false /*pri2*/, parent, memId); + parent.setOffHeapBINId(index, entry, false /*pri2*/, bin.getDirty()); + + return true; + } + + /** + * Called before eviction of a level 2 IN from main cache. Any off-heap + * BIN children are first logged, if dirty, and then discarded. + * + * @return true if all BINs could be discarded, or false if a dirty BIN + * could not be logged due to a read-only env or disk limit violation. + */ + boolean flushAndDiscardBINChildren(final IN in, + final boolean backgroundIO) { + assert in.isLatchExclusiveOwner(); + assert in.getInListResident(); + assert in.getNormalizedLevel() == 2; + + if (!in.hasOffHeapBINIds()) { + return true; + } + + boolean allDiscarded = true; + + for (int i = 0; i < in.getNEntries(); i += 1) { + + final int entry = in.getOffHeapBINId(i); + if (entry < 0) { + continue; + } + + if (flushAndDiscardBIN( + entry, in.isOffHeapBINPri2(i), in.isOffHeapBINDirty(i), + getMemId(entry), in, i, backgroundIO, true /*freeLNs*/) == 0) { + allDiscarded = false; + } + } + + return allDiscarded; + } + + /** + * Called: + * - after eviction of an IN from main cache, and in that case + * storeEvictedBIN was called and the eviction was completed. + * - when an IN is removed from the main cache for another reason, + * such as a reverse split or Database removal. + * - for all INs in an Environment being removed from the shared cache. + */ + public long removeINFromMain(final IN in) { + + assert in.isLatchExclusiveOwner(); + + final int level = in.getNormalizedLevel(); + + if (level > 2) { + return 0; + } + + if (level == 2) { + + if (!in.hasOffHeapBINIds()) { + return 0; + } + + long size = 0; + + for (int i = 0; i < in.getNEntries(); i += 1) { + + final BIN bin = (BIN) in.getTarget(i); + + if (bin != null) { + bin.latchNoUpdateLRU(); + } + try { + size += freeBIN(bin, in, i); + } finally { + if (bin != null) { + bin.releaseLatch(); + } + } + } + + return size; + } + + assert level == 1 && in.isBIN(); + + final BIN bin = (BIN) in; + + final int entry = bin.getOffHeapLruId(); + if (entry < 0) { + assert !bin.hasOffHeapLNs(); + return 0; + } + + long size = 0; + + if (bin.hasOffHeapLNs()) { + for (int i = 0; i < bin.getNEntries(); i += 1) { + size += freeLN(bin, i); + } + } + + bin.setOffHeapLruId(-1); + remove(entry, false); + return size; + } + + public BIN loadBIN(final EnvironmentImpl envImpl, final int entry) { + + assert entry >= 0; + + return materializeBIN(envImpl, getMemBytes(getMemId(entry))); + } + + /** + * Loads a BIN for the given entry, if its last logged LSN is the given + * LSN. Can be used to store an entry for a BIN (the off-heap BIN ID) + * without holding its parent IN latch, and later find out whether that + * entry still refers to the same BIN. If the BIN was split, the LSN will + * have changed and null is returned. If the BIN is no longer off-heap, or + * was moved off-heap and back on, null is also returned. + * + * If the BIN is redundantly resident in the main and off-heap caches, the + * main cache "live" version is returned. Otherwise the BIN is deserialized + * from the off-heap version and is not "live". When non-null is returned, + * the returned BIN is latched. + */ + public BIN loadBINIfLsnMatches(final EnvironmentImpl envImpl, + final int entry, + final long lsn) { + + final Pair result = + findBINIfLsnMatches(envImpl, entry, lsn); + + if (result == null) { + return null; + } + + final IN in = result.first(); + final int index = result.second(); + + try { + BIN bin = (BIN) in.getTarget(index); + if (bin != null) { + bin.latchNoUpdateLRU(); + return bin; + } + + final long memId = getMemId(entry); + bin = materializeBIN(envImpl, getMemBytes(memId)); + bin.latchNoUpdateLRU(in.getDatabase()); + + return bin; + + } finally { + in.releaseLatch(); + } + } + + public void evictBINIfLsnMatch(final EnvironmentImpl envImpl, + final int entry, + final long lsn) { + + final Pair result = + findBINIfLsnMatches(envImpl, entry, lsn); + + if (result == null) { + return; + } + + final IN in = result.first(); + final int index = result.second(); + + try { + assert in.getTarget(index) == null; + freeBIN(null, in, index); + } finally { + in.releaseLatch(); + } + } + + /** + * If non-null is returned, the returned IN will be EX latched. + */ + private Pair findBINIfLsnMatches( + final EnvironmentImpl envImpl, + final int entry, + final long lsn) { + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + final IN in = chunk.owners[chunkIdx]; + + if (in == null) { + return null; + } + + /* + * The validation process here is very similar to in evictOne. See the + * comments in that method. + */ + in.latchNoUpdateLRU(); + + if (in != chunk.owners[chunkIdx] || + !in.getInListResident() || + in.getEnv() != envImpl || + in.isBIN()) { + + in.releaseLatch(); + return null; + } + + int index = -1; + for (int i = 0; i < in.getNEntries(); i += 1) { + if (in.getOffHeapBINId(i) == entry) { + index = i; + break; + } + } + + if (index < 0) { + in.releaseLatch(); + return null; + } + + if (in.getLsn(index) != lsn) { + in.releaseLatch(); + return null; + } + + return new Pair<>(in, index); + } + + public byte[] getBINBytes(final IN parent, final int index) { + + assert parent.isLatchOwner(); + + final int entry = parent.getOffHeapBINId(index); + if (entry < 0) { + return null; + } + + assert parent == getOwner(entry); + + return getMemBytes(getMemId(entry)); + } + + /** + * Called when a BIN's bytes were obtained holding a shared latch, and then + * the latch was released and acquired again. We need to determine whether + * the BIN was changed and moved off-heap again, while unlatched. + * + * Currently we just get the bytes again and compare. + * + * Possible optimization: Maintain a generation count in the serialized + * BIN, whose value comes from a global counter that is incremented + * whenever a BIN is serialized. But would the range of such a counter be + * large enough to guarantee that wrapping won't be a problem? Certainly + * the odds are low, but how can we guarantee it won't happen? Another + * approach is to maintain the counter in the BIN in main cache, so it is a + * per BIN value. + */ + public boolean haveBINBytesChanged(final IN parent, + final int index, + final byte[] bytes) { + assert parent.isLatchOwner(); + + return !Arrays.equals(bytes, getBINBytes(parent, index)); + } + + public void postBINLoad(final IN parent, final int index, final BIN bin) { + + assert bin.isLatchExclusiveOwner(); + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + assert parent.getTarget(index) == null; + + final int entry = parent.getOffHeapBINId(index); + assert entry >= 0; + assert parent == getOwner(entry); + + bin.setDirty(parent.isOffHeapBINDirty(index)); + + final long freed = freeBIN(bin, parent, index); + assert freed > 0; + + ensureOffHeapLNsInLRU(bin); + + if (DEBUG_TRACE) { + debug( + parent.getEnv(), + "Loaded BIN LSN=" + + DbLsn.getNoFormatString(parent.getLsn(index)) + + " Node=" + bin.getNodeId() + + " dirty=" + bin.getDirty()); + } + } + + public long freeBIN(final BIN bin, final IN parent, final int index) { + + assert parent.isLatchExclusiveOwner(); + + assert bin == null || bin.isLatchExclusiveOwner(); + + final int entry = parent.getOffHeapBINId(index); + + if (entry < 0) { + return 0; + } + + assert parent == getOwner(entry); + + final boolean pri2 = parent.isOffHeapBINPri2(index); + final long memId = getMemId(entry); + + parent.clearOffHeapBINId(index); + remove(entry, pri2); + + /* + * Only free the LNs referenced by the off-heap BIN if the BIN is not + * resident in main (bin == null). When the off-heap BIN is stale, its + * LN Ids are also stale. + */ + return freeBIN(parent.getEnv(), memId, bin == null /*freeLNs*/); + } + + private long freeBIN(final EnvironmentImpl envImpl, + final long memId, + final boolean freeLNs) { + + long size = 0; + final int flags; + + if (freeLNs) { + final ParsedBIN pb = parseBINBytes( + envImpl, getMemBytes(memId), + false /*partialBuf*/, true /*parseLNIds*/); + + if (pb.lnMemIds != null) { + for (final long lnMemId : pb.lnMemIds) { + if (lnMemId == 0) { + continue; + } + size += freeLN(lnMemId); + } + } + + flags = pb.flags; + } else { + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + flags = getByte(memId, checksumSize, new byte[1]); + } + + cachedBINs.decrementAndGet(); + if ((flags & BIN_FLAG_DELTA) != 0) { + cachedBINDeltas.decrementAndGet(); + } + + return size + freeMemory(memId); + } + + long serializeBIN(final BIN bin, final boolean asDelta) { + + assert !bin.hasCachedChildren(); + assert !(bin.isBINDelta() && !asDelta); + + final EnvironmentImpl envImpl = bin.getEnv(); + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + final boolean canMutate = !asDelta && bin.canMutateToBINDelta(); + + int flags = 0; + + if (asDelta) { + flags |= BIN_FLAG_DELTA; + } + if (canMutate) { + flags |= BIN_FLAG_CAN_MUTATE; + } + if (bin.getProhibitNextDelta()) { + flags |= BIN_FLAG_PROHIBIT_NEXT_DELTA; + } + + final short lnIdSize = getPackedLnMemIdSize(bin); + + /* + * If there are any LNs, then we should not be mutating from a full BIN + * to a BIN delta -- this isn't handled and should not happen. + */ + assert !(asDelta && !bin.isBINDelta() && lnIdSize != 0); + + final int memSize = + checksumSize + 1 + 8 + 8 + 4 + 2 + + lnIdSize + bin.getLogSize(asDelta); + + final long memId = allocateMemory(envImpl, memSize); + + if (memId == 0) { + return 0; + } + + final byte[] buf = new byte[memSize]; + int bufOffset = checksumSize; + + buf[bufOffset] = (byte) flags; + bufOffset += 1; + + putLong(bin.getLastFullLsn(), buf, bufOffset); + bufOffset += 8; + + putLong(bin.getLastDeltaLsn(), buf, bufOffset); + bufOffset += 8; + + putInt(getMinExpiration(bin), buf, bufOffset); + bufOffset += 4; + + putShort(lnIdSize, buf, bufOffset); + bufOffset += 2; + + if (lnIdSize > 0) { + packLnMemIds(bin, buf, bufOffset); + bufOffset += lnIdSize; + } + + final ByteBuffer byteBuf = + ByteBuffer.wrap(buf, bufOffset, buf.length - bufOffset); + + bin.serialize(byteBuf, asDelta, false /*clearDirtyBits*/); + + if (useChecksums) { + final Checksum checksum = Adler32.makeChecksum(); + checksum.update(buf, checksumSize, buf.length - checksumSize); + final int checksumValue = (int) checksum.getValue(); + putInt(checksumValue, memId, 0, buf); + } + + allocator.copy(buf, 0, memId, 0, buf.length); + + nBINsStored.incrementAndGet(); + cachedBINs.incrementAndGet(); + if (asDelta) { + cachedBINDeltas.incrementAndGet(); + } + + return memId; + } + + public BIN materializeBIN(final EnvironmentImpl envImpl, + final byte[] buf) { + + final ParsedBIN pb = parseBINBytes( + envImpl, buf, false /*partialBuf*/, true /*parseLNIds*/); + + final BIN bin = materializeBIN(pb, (pb.flags & BIN_FLAG_DELTA) != 0); + + nBINsLoaded.incrementAndGet(); + + return bin; + } + + private BIN materializeBIN(final ParsedBIN pb, final boolean asDelta) { + + final BIN bin = new BIN(); + + bin.materialize( + pb.binBytes, LogEntryType.LOG_VERSION, asDelta /*deltasOnly*/, + (pb.flags & BIN_FLAG_LOGGED_FULL_VERSION) != 0 /*clearDirtyBits*/); + + bin.setLastFullLsn(pb.lastFullLsn); + bin.setLastDeltaLsn(pb.lastDeltaLsn); + + bin.setProhibitNextDelta( + (pb.flags & BIN_FLAG_PROHIBIT_NEXT_DELTA) != 0); + + if (pb.lnMemIds != null) { + for (int i = 0; i < pb.lnMemIds.length; i += 1) { + final long lnMemId = pb.lnMemIds[i]; + if (lnMemId == 0) { + continue; + } + bin.setOffHeapLNId(i, lnMemId); + } + } + + return bin; + } + + public INLogEntry createBINLogEntryForCheckpoint(final IN parent, + final int index) { + final int entry = parent.getOffHeapBINId(index); + + if (entry < 0 || !parent.isOffHeapBINDirty(index)) { + return null; + } + + assert parent == getOwner(entry); + + final long memId = getMemId(entry); + + return createBINLogEntry( + memId, entry, parent, true /*preserveBINInCache*/); + } + + public void postBINLog(final IN parent, + final int index, + final INLogEntry logEntry, + final long newLsn) { + + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + + final EnvironmentImpl envImpl = parent.getEnv(); + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + + final boolean isDelta = logEntry.isBINDelta(); + final int entry = parent.getOffHeapBINId(index); + + assert entry >= 0; + assert parent.isOffHeapBINDirty(index); + + final BIN bin = + logEntry.isPreSerialized() ? null : logEntry.getMainItem(); + + /* + * Update checksum, flags and last full/delta LSNs. + */ + final long memId = getMemId(entry); + final byte[] buf = new byte[checksumSize + 1 + 8 + 8]; + allocator.copy(memId, 0, buf, 0, buf.length); + int bufOffset = 0; + + /* The checksum is now invalid. */ + if (useChecksums) { + putInt(0, buf, 0); + bufOffset += checksumSize; + } + + /* Update flags. */ + int flags = buf[bufOffset]; + if (!isDelta) { + flags |= BIN_FLAG_LOGGED_FULL_VERSION; + } + flags &= ~BIN_FLAG_PROHIBIT_NEXT_DELTA; + buf[bufOffset] = (byte) flags; + bufOffset += 1; + + /* Update lastFullLsn. */ + if (!isDelta) { + putLong(newLsn, buf, bufOffset); + } + bufOffset += 8; + + /* Update lastDeltaLsn. */ + putLong(isDelta ? newLsn : DbLsn.NULL_LSN, buf, bufOffset); + bufOffset += 8; + assert bufOffset == buf.length; + + allocator.copy(buf, 0, memId, 0, buf.length); + + /* Move from pri2 LRU list to back of pri1 LRU list. */ + if (parent.isOffHeapBINPri2(index)) { + pri2LRUSet[entry % numLRULists].remove(entry); + moveBack(entry, false /*pri2*/); + } + + parent.setOffHeapBINId( + index, entry, false /*pri2*/, false /*dirty*/); + + if (bin != null) { + bin.releaseLatch(); + } + } + + private INLogEntry createBINLogEntry( + final long memId, + final int entry, + final IN parent, + final boolean preserveBINInCache) { + + final EnvironmentImpl envImpl = parent.getEnv(); + + final byte[] buf = getMemBytes(memId); + + final ParsedBIN pb = parseBINBytes( + envImpl, buf, false /*partialBuf*/, false /*parseLNIds*/); + + final boolean isDelta = (pb.flags & BIN_FLAG_DELTA) != 0; + final boolean canMutateToDelta = (pb.flags & BIN_FLAG_CAN_MUTATE) != 0; + + /* + * If the BIN is a delta, we must log a delta. In that case we cannot + * compress expired slots, and we can log the pre-serialized BIN. + */ + if (isDelta) { + return new BINDeltaLogEntry( + pb.binBytes, pb.lastFullLsn, pb.lastDeltaLsn, + LogEntryType.LOG_BIN_DELTA, parent); + } + + /* + * For a full BIN, normally we log a delta iff it can be mutated to a + * delta. However, if any slots are expired, we attempt to compress + * them and then determine whether to log a delta. + * + * This mimics the logic in IN.logInternal, for BINs in main cache. + * + * TODO: Use materialized BIN in the log entry when any slot has an + * expiration time, even if none are currently expired. Such BINs must + * be materialized during logging for expiration tracking anyway by + * INLogEntry.getBINWithExpiration. Then the getBINWithExpiration and + * BIN.mayHaveExpirationValues methods will no longer be needed. + */ + final boolean hasExpiredSlot = + envImpl.isExpired(pb.minExpiration, true /*hours*/); + + /* + * If we do not need to log a full BIN as a delta, or to compress its + * expired slots, then we can log the pre-serialized BIN. + */ + if (!hasExpiredSlot && !canMutateToDelta) { + return new INLogEntry<>( + pb.binBytes, pb.lastFullLsn, pb.lastDeltaLsn, + LogEntryType.LOG_BIN, parent); + } + + /* + * We must materialize the full BIN in order to log it as a delta or to + * compress its expired slots. + */ + final BIN bin = materializeBIN(pb, false /*asDelta*/); + + /* + * Latch the BIN to avoid assertions during BIN.writeToLog. A side + * effect is setting the Database, which is also needed. + */ + bin.latchNoUpdateLRU(parent.getDatabase()); + + final boolean logDelta; + + if (hasExpiredSlot) { + final int origNSlots = bin.getNEntries(); + + /* Compress non-dirty slots before determining delta status. */ + bin.compress(false /*compressDirtySlots*/, null); + logDelta = bin.shouldLogDelta(); + + /* Also compress dirty slots, if we will not log a delta. */ + if (!logDelta) { + bin.compress(true /*compressDirtySlots*/, null); + } + + /* If we compressed an expired slot, re-serialize the BIN. */ + if (preserveBINInCache && origNSlots != bin.getNEntries()) { + final long newMemId = serializeBIN(bin, bin.isBINDelta()); + if (newMemId == 0) { + /* + * TODO: Is invalid if compressed slot had off-heap LN. + * Should discard off-heap BIN and install 'bin' in main. + */ + return new INLogEntry<>( + pb.binBytes, pb.lastFullLsn, pb.lastDeltaLsn, + LogEntryType.LOG_BIN, parent); + } + freeMemory(memId); + setOwnerAndMemId(entry, parent, newMemId); + } + } else { + /* hasExpiredSlot is false, therefore canMutateToDelta is true. */ + logDelta = true; + } + + return logDelta ? + (new BINDeltaLogEntry(bin)) : + (new INLogEntry<>(bin)); + } + + public static class BINInfo { + + public final boolean isBINDelta; + public final long fullBINLsn; + + private BINInfo(final ParsedBIN pb) { + isBINDelta = (pb.flags & BIN_FLAG_DELTA) != 0; + fullBINLsn = pb.lastFullLsn; + } + } + + public BINInfo getBINInfo(final EnvironmentImpl envImpl, final int entry) { + + assert entry >= 0; + + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + + final long memId = getMemId(entry); + final byte[] buf = new byte[checksumSize + 1 + 8 + 8 + 4]; + allocator.copy(memId, 0, buf, 0, buf.length); + + final ParsedBIN pb = parseBINBytes( + envImpl, buf, true /*partialBuf*/, false /*parseLNIds*/); + + return new BINInfo(pb); + } + + long getINSize(final IN in) { + + if (in.isBIN()) { + final BIN bin = (BIN) in; + + if (!bin.hasOffHeapLNs()) { + return 0; + } + + long size = 0; + + for (int i = 0; i < in.getNEntries(); i += 1) { + final long memId = bin.getOffHeapLNId(i); + if (memId == 0) { + continue; + } + size += allocator.totalSize(memId); + } + + return size; + } + + if (in.getNormalizedLevel() != 2) { + return 0; + } + + if (!in.hasOffHeapBINIds()) { + return 0; + } + + final EnvironmentImpl envImpl = in.getEnv(); + long size = 0; + + for (int i = 0; i < in.getNEntries(); i += 1) { + + final int entry = in.getOffHeapBINId(i); + if (entry < 0) { + continue; + } + + final long memId = getMemId(entry); + size += allocator.totalSize(memId); + + if (in.getTarget(i) != null) { + /* Redundant BIN, do not count off-heap LNs here. */ + continue; + } + + final ParsedBIN pb = parseBINBytes( + envImpl, getMemBytes(memId), + false /*partialBuf*/, true /*parseLNIds*/); + + if (pb.lnMemIds == null) { + continue; + } + + for (final long lnMemId : pb.lnMemIds) { + if (lnMemId == 0) { + continue; + } + size += allocator.totalSize(lnMemId); + } + } + + return size; + } + + private static class ParsedBIN { + final int flags; + final long[] lnMemIds; + final long lastFullLsn; + final long lastDeltaLsn; + final int minExpiration; + final ByteBuffer binBytes; + + ParsedBIN(final int flags, + final long[] lnMemIds, + final long lastFullLsn, + final long lastDeltaLsn, + final int minExpiration, + final ByteBuffer binBytes) { + + this.flags = flags; + this.lnMemIds = lnMemIds; + this.lastFullLsn = lastFullLsn; + this.lastDeltaLsn = lastDeltaLsn; + this.minExpiration = minExpiration; + this.binBytes = binBytes; + } + } + + private ParsedBIN parseBINBytes(final EnvironmentImpl envImpl, + final byte[] buf, + final boolean partialBuf, + final boolean parseLNIds) { + + assert !(partialBuf && parseLNIds); + + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + + if (useChecksums && !partialBuf) { + final int storedChecksum = getInt(buf, 0); + if (storedChecksum != 0) { + + final Checksum checksum = Adler32.makeChecksum(); + checksum.update(buf, checksumSize, buf.length - checksumSize); + final int checksumValue = (int) checksum.getValue(); + + if (storedChecksum != checksumValue) { + throw unexpectedState( + envImpl, + "Off-heap cache checksum error. Expected " + + storedChecksum + " but got " + checksumValue); + } + } + } + + int bufOffset = checksumSize; + + final int flags = buf[bufOffset]; + bufOffset += 1; + + final long lastFullLsn = getLong(buf, bufOffset); + bufOffset += 8; + + final long lastDeltaLsn = getLong(buf, bufOffset); + bufOffset += 8; + + final int minExpiration = getInt(buf, bufOffset); + bufOffset += 4; + + if (partialBuf) { + return new ParsedBIN( + flags, null, lastFullLsn, lastDeltaLsn, minExpiration, null); + } + + final short lnIdsSize = getShort(buf, bufOffset); + bufOffset += 2; + + /* lnIdsSize was negated if LNs were stripped by eviction. */ + final long[] lnMemIds; + + if (lnIdsSize > 0 && parseLNIds) { + lnMemIds = unpackLnMemIds(buf, bufOffset, lnIdsSize); + } else { + lnMemIds = null; + } + + bufOffset += Math.abs(lnIdsSize); + + final ByteBuffer byteBuf = + ByteBuffer.wrap(buf, bufOffset, buf.length - bufOffset); + + return new ParsedBIN( + flags, lnMemIds, lastFullLsn, lastDeltaLsn, minExpiration, + byteBuf); + } + + /** + * Returns the minimum expiration time in hours, or zero of no slots + * have an expiration time. + */ + private int getMinExpiration(final BIN bin) { + + int minExpire = 0; + + for (int i = 0; i < bin.getNEntries(); i += 1) { + int expire = bin.getExpiration(i); + if (expire == 0) { + continue; + } + if (minExpire > expire || minExpire == 0) { + minExpire = expire; + } + } + + if (minExpire == 0) { + return 0; + } + + return bin.isExpirationInHours() ? minExpire : (minExpire * 24); + } + + /** + * Adds LN memIds to the buffer using an RLE approach to save space: + * + * - The memIds are packed in slot index order. All slots are represented. + * - A positive byte indicates the number of 8-byte memIds that follow. + * - A negative byte indicates the number of slots that have no memId. + * - When a run exceeds 127 slots, another run is added. So there is no + * effective limit on number of slots, although we know the maximum will + * fit in a short integer. + */ + private static void packLnMemIds(final BIN bin, + final byte[] buf, + int off) { + int nOff = off; + off += 1; + byte n = 0; + + for (int i = 0; i < bin.getNEntries(); i += 1) { + + final long memId = bin.getOffHeapLNId(i); + + if (memId != 0) { + + if (n < 0 || n == 127) { + buf[nOff] = n; + nOff = off; + off += 1; + n = 0; + } + + putLong(memId, buf, off); + off += 8; + n += 1; + + } else { + + if (n > 0 || n == -127) { + buf[nOff] = n; + nOff = off; + off += 1; + n = 0; + } + + n -= 1; + } + } + + buf[nOff] = n; + } + + private static short getPackedLnMemIdSize(final BIN bin) { + + if (!bin.hasOffHeapLNs()) { + return 0; + } + + int off = 1; + byte n = 0; + + for (int i = 0; i < bin.getNEntries(); i += 1) { + + if (bin.getOffHeapLNId(i) != 0) { + + if (n < 0 || n == 127) { + off += 1; + n = 0; + } + + off += 8; + n += 1; + + } else { + + if (n > 0 || n == -127) { + off += 1; + n = 0; + } + + n -= 1; + } + } + + if (off > Short.MAX_VALUE) { + throw unexpectedState(); + } + + return (short) off; + } + + private static long[] unpackLnMemIds(final byte[] buf, + final int startOff, + final int len) { + assert len > 0; + + final int endOff = startOff + len; + int off = startOff; + int i = 0; + + while (off < endOff) { + + final int n = buf[off]; + off += 1; + + if (n > 0) { + off += n * 8; + i += n; + } else { + assert n < 0; + i -= n; + } + } + + final long[] ids = new long[i + 1]; + off = startOff; + i = 0; + + while (off < endOff) { + + int n = buf[off]; + off += 1; + + if (n > 0) { + while (n > 0) { + ids[i] = getLong(buf, off); + off += 8; + i += 1; + n -= 1; + } + } else { + assert n < 0; + i -= n; + } + } + + return ids; + } + + private long allocateMemory(final EnvironmentImpl envImpl, + final int size) { + + /* + * Only enable the off-heap cache after recovery. This ensures + * that off-heap memory is available to recovery as file system + * cache, which is important when performing multiple passes over + * the recovery interval. + */ + if (!envImpl.isValid()) { + return 0; + } + + long memId = 0; + + try { + memId = allocator.allocate(size); + totalBlocks.incrementAndGet(); + + if (DEBUG_DOUBLE_FREE) { + final Long key = memId; + freedBlocks.remove(key); + prevFreedBlocks.remove(key); + } + + } catch (OutOfMemoryError e) { + + LoggerUtils.envLogMsg( + Level.SEVERE, envImpl, + "OutOfMemoryError trying to allocate in the off-heap cache. " + + "Continuing, but more problems are likely. Allocator error: " + + e.getMessage()); + + nAllocFailure.incrementAndGet(); + + memoryLimit = allocator.getUsedBytes() - evictBytes; + + } catch (OffHeapAllocator.OffHeapOverflowException e) { + + nAllocOverflow.incrementAndGet(); + + memoryLimit = allocator.getUsedBytes(); + } + + if (needEviction()) { + wakeUpEvictionThreads(); + } + + return memId; + } + + private int freeMemory(final long memId) { + + if (DEBUG_DOUBLE_FREE) { + + final Long key = memId; + boolean added = false; + Exception e = null; + Map curr = freedBlocks; + Map prev = prevFreedBlocks; + + if (freedBlocks.size() >= DEBUG_FREE_BLOCKS_PER_MAP) { + + synchronized (this) { + + if (freedBlocks.size() >= DEBUG_FREE_BLOCKS_PER_MAP) { + + prevFreedBlocks = freedBlocks; + freedBlocks = new ConcurrentHashMap<>(); + + e = freedBlocks.put( + key, new Exception("Freed: " + memId)); + + added = true; + curr = freedBlocks; + prev = prevFreedBlocks; + } + } + } + + if (!added) { + e = curr.put(key, new Exception("Freed: " + memId)); + } + + if (e != null) { + new Exception( + "Double-freed: " + memId + "\n" + + LoggerUtils.getStackTrace(e)).printStackTrace(); + } + + if (curr != prev) { + e = prev.get(key); + if (e != null) { + new Exception( + "Double-freed: " + memId + "\n" + + LoggerUtils.getStackTrace(e)).printStackTrace(); + } + } + } + + totalBlocks.decrementAndGet(); + return allocator.free(memId); + } + + private byte[] getMemBytes(final long memId) { + + final byte[] bytes = new byte[allocator.size(memId)]; + allocator.copy(memId, 0, bytes, 0, bytes.length); + return bytes; + } + + private byte getByte(final long memId, + final int offset, + final byte[] tempBuf) { + allocator.copy(memId, offset, tempBuf, 0, 1); + return tempBuf[0]; + } + + private void putShort(final short val, + final long memId, + final int offset, + final byte[] tempBuf) { + putShort(val, tempBuf, 0); + allocator.copy(tempBuf, 0, memId, offset, 2); + } + + private short getShort(final long memId, + final int offset, + final byte[] tempBuf) { + allocator.copy(memId, offset, tempBuf, 0, 2); + return getShort(tempBuf, 0); + } + + private void putInt(final int val, + final long memId, + final int offset, + final byte[] tempBuf) { + putInt(val, tempBuf, 0); + allocator.copy(tempBuf, 0, memId, offset, 4); + } + + private int getInt(final long memId, + final int offset, + final byte[] tempBuf) { + allocator.copy(memId, offset, tempBuf, 0, 4); + return getInt(tempBuf, 0); + } + + private void putLong(final long val, + final long memId, + final int offset, + final byte[] tempBuf) { + putLong(val, tempBuf, 0); + allocator.copy(tempBuf, 0, memId, offset, 8); + } + + private long getLong(final long memId, + final int offset, + final byte[] tempBuf) { + allocator.copy(memId, offset, tempBuf, 0, 8); + return getLong(tempBuf, 0); + } + + private static void putShort(final short val, + final byte[] buf, + final int offset) { + buf[offset] = (byte) (val >> 8); + buf[offset + 1] = (byte) val; + } + + private static short getShort(final byte[] buf, + final int offset) { + return (short) + ((buf[offset] << 8) | + (buf[offset + 1] & 0xff)); + } + + private static void putInt(final int val, + final byte[] buf, + final int offset) { + buf[offset] = (byte) (val >> 24); + buf[offset + 1] = (byte) (val >> 16); + buf[offset + 2] = (byte) (val >> 8); + buf[offset + 3] = (byte) val; + } + + private static int getInt(final byte[] buf, + final int offset) { + return + ((buf[offset] << 24) | + ((buf[offset + 1] & 0xff) << 16) | + ((buf[offset + 2] & 0xff) << 8) | + (buf[offset + 3] & 0xff)); + } + + private static void putLong(final long val, + final byte[] buf, + final int offset) { + buf[offset] = (byte) (val >> 56); + buf[offset + 1] = (byte) (val >> 48); + buf[offset + 2] = (byte) (val >> 40); + buf[offset + 3] = (byte) (val >> 32); + buf[offset + 4] = (byte) (val >> 24); + buf[offset + 5] = (byte) (val >> 16); + buf[offset + 6] = (byte) (val >> 8); + buf[offset + 7] = (byte) val; + } + + private static long getLong(final byte[] buf, + final int offset) { + return + (((long)buf[offset] << 56) | + (((long)buf[offset + 1] & 0xff) << 48) | + (((long)buf[offset + 2] & 0xff) << 40) | + (((long)buf[offset + 3] & 0xff) << 32) | + (((long)buf[offset + 4] & 0xff) << 24) | + (((long)buf[offset + 5] & 0xff) << 16) | + (((long)buf[offset + 6] & 0xff) << 8) | + ((long)buf[offset + 7] & 0xff)); + } + + public void doCriticalEviction(boolean backgroundIO) { + + if (needEviction()) { + wakeUpEvictionThreads(); + + if (needCriticalEviction()) { + evictBatch(EvictionSource.CRITICAL, backgroundIO); + } + } + } + + public void doDaemonEviction(boolean backgroundIO) { + + if (needEviction()) { + wakeUpEvictionThreads(); + + if (needCriticalEviction()) { + evictBatch(EvictionSource.DAEMON, backgroundIO); + } + } + } + + public void doManualEvict() { + + if (!isEnabled()) { + return; + } + + evictBatch(EvictionSource.MANUAL, true /*backgroundIO*/); + } + + private void wakeUpEvictionThreads() { + + if (!runEvictorThreads || !isEnabled()) { + return; + } + + /* + * This check is meant to avoid the lock taken by + * ArrayBlockingQueue.offer() when this is futile. The lock reduces + * concurrency because this method is called so frequently. + */ + if (activePoolThreads.get() >= maxPoolThreads) { + return; + } + + evictionPool.execute(new Runnable() { + @Override + public void run() { + activePoolThreads.incrementAndGet(); + try { + evictBatch( + EvictionSource.EVICTORTHREAD, true /*backgroundIO*/); + } finally { + activePoolThreads.decrementAndGet(); + } + } + }); + } + + private boolean needEviction() { + + if (!isEnabled()) { + return false; + } + + /* + * When off-heap cache size is set to zero after being non-zero, we + * perform eviction only until the cache becomes empty. + */ + if (maxMemory == 0) { + return allocator.getUsedBytes() >= 0; + } + + return allocator.getUsedBytes() + evictBytes >= memoryLimit; + } + + private boolean needCriticalEviction() { + + if (!isEnabled()) { + return false; + } + + /* + * When off-heap cache size is set to zero after being non-zero, we + * perform only non-critical eviction. + */ + if (maxMemory == 0) { + return false; + } + + return allocator.getUsedBytes() >= memoryLimit; + } + + private int getLRUSize(final LRUList[] listSet) { + int size = 0; + for (final LRUList l : listSet) { + size += l.getSize(); + } + return size; + } + + private void evictBatch(final EvictionSource source, + final boolean backgroundIO) { + + final long maxBytesToEvict = + evictBytes + (allocator.getUsedBytes() - memoryLimit); + + long bytesEvicted = 0; + + boolean pri2 = false; + int maxLruEntries = getLRUSize(pri1LRUSet); + int nLruEntries = 0; + + while (bytesEvicted < maxBytesToEvict && + needEviction() && + !shutdownRequested.get()) { + + if (nLruEntries >= maxLruEntries) { + if (pri2) { + break; + } + pri2 = true; + maxLruEntries = getLRUSize(pri2LRUSet); + nLruEntries = 0; + } + + final LRUList lru; + + if (pri2) { + final int lruIdx = + Math.abs(nextPri2LRUList++) % numLRULists; + + lru = pri2LRUSet[lruIdx]; + + } else { + final int lruIdx = + Math.abs(nextPri1LRUList++) % numLRULists; + + lru = pri1LRUSet[lruIdx]; + } + + final int entry = lru.removeFront(); + nLruEntries += 1; + + if (entry < 0) { + continue; + } + + bytesEvicted += evictOne(source, backgroundIO, entry, lru, pri2); + } + } + + private long evictOne(final EvictionSource source, + final boolean backgroundIO, + final int entry, + final LRUList lru, + final boolean pri2) { + + nNodesTargeted.incrementAndGet(); + + if (source == EvictionSource.CRITICAL) { + nCriticalNodesTargeted.incrementAndGet(); + } + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + /* + * Note that almost anything could have happened in other threads + * after removing the entry from the LRU and prior to latching the + * owner IN. We account for these possibilities below. + * + * When we decide to "skip" an entry, it is not added back to the LRU + * and it is not freed. The assumption is that another thread is + * processing the entry and will add it to the LRU or free it. + */ + final IN in = chunk.owners[chunkIdx]; + + /* + * If the IN is null, skip the entry. The IN may have been evicted. + */ + if (in == null) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + final EnvironmentImpl envImpl = in.getEnv(); + + in.latchNoUpdateLRU(); + try { + + /* + * If the owner has changed or the IN was evicted, skip it. + */ + if (in != chunk.owners[chunkIdx] || + !in.getInListResident()) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + /* + * If owner is a BIN, it is in the main cache but may have + * off-heap LNs. + */ + if (in.isBIN()) { + final BIN bin = (BIN) in; + + /* + * If entry is no longer associated with this BIN, skip it. + */ + if (bin.getOffHeapLruId() != entry) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + /* + * If the entry was added back to the LRU, skip it. This check + * requires synchronizing on the LRUList after latching the IN. + * We know we're checking the correct LRUList because an entry + * with a BIN owner can never be in the priority 2 LRU set. + */ + if (lru.contains(chunk, chunkIdx)) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + return stripLNsFromMainBIN(bin, entry, pri2); + } + + /* + * The owner has a child BIN that is off-heap. + */ + int index = -1; + for (int i = 0; i < in.getNEntries(); i += 1) { + if (in.getOffHeapBINId(i) == entry) { + index = i; + break; + } + } + + /* + * If entry is no longer associated with this IN, skip it. + */ + if (index < 0) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + /* + * If the entry was moved between a pri1 and pri2 LRU list, skip + * it. This means that the LRUList from which we removed the entry + * is not the list it belongs to. + */ + if (pri2 != in.isOffHeapBINPri2(index)) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + /* + * If the entry was added back to the LRU, skip it. This check + * requires synchronizing on the LRUList after latching the IN, and + * it requires that we're using the correct LRU list (the check + * above). + */ + if (lru.contains(chunk, chunkIdx)) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + /* + * The BIN should never be resident in main. + */ + final BIN residentBIN = (BIN) in.getTarget(index); + if (residentBIN != null) { + throw EnvironmentFailureException.unexpectedState( + envImpl, "BIN is resident in both caches, id=" + + residentBIN.getNodeId()); + } + + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + + final long memId = chunk.memIds[chunkIdx]; + final int flags = getByte(memId, checksumSize, new byte[1]); + final boolean dirty = in.isOffHeapBINDirty(index); + + /* + * First try stripping LNs. + */ + final long nLNBytesEvicted = stripLNs( + entry, pri2, dirty, memId, chunk, chunkIdx, in, index, + backgroundIO); + + if (nLNBytesEvicted > 0) { + return nLNBytesEvicted; + } + + /* + * Next try mutating a full BIN to a BIN-delta. + */ + if ((flags & BIN_FLAG_CAN_MUTATE) != 0) { + + final long nBytesEvicted = mutateToBINDelta( + envImpl, in.getDatabase(), entry, pri2, + chunk, chunkIdx); + + if (nBytesEvicted > 0) { + return nBytesEvicted; + } + } + + /* + * If it is in the pri1 list and is dirty with no resident LNs, + * move it to the pri2 list. We currently have no stat for this. + */ + if (!pri2 && dirty) { + moveBack(entry, true /*pri2*/); + in.setOffHeapBINId( + index, entry, true /*pri2*/, true /*dirty*/); + return 0; + } + + /* + * Log the BIN if it is dirty and finally just get rid of it. + */ + return flushAndDiscardBIN( + entry, pri2, dirty, memId, in, index, backgroundIO, + false /*freeLNs*/); + + } finally { + in.releaseLatch(); + } + } + + /** + * Strip off-heap LNs referenced by a main cache BIN. If there are any + * off-heap expired LNs, strip only them. Otherwise, strip all LNs. + */ + private long stripLNsFromMainBIN(final BIN bin, + final int entry, + final boolean pri2) { + /* + * Strip expired LNs first. + */ + int nEvicted = 0; + long nBytesEvicted = 0; + boolean anyNonExpired = false; + + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.getOffHeapLNId(i) == 0) { + continue; + } + if (bin.isExpired(i)) { + nBytesEvicted += freeLN(bin, i); + nEvicted += 1; + } else { + anyNonExpired = true; + } + } + + /* + * If any were expired, return. If any non-expired LNs remain, put back + * the entry on the LRU list, leaving the non-expired LNs resident. + * Also compress the BIN to free the expired slots in the main cache. + */ + if (nEvicted > 0) { + if (anyNonExpired) { + moveBack(entry, pri2); + } else { + bin.setOffHeapLruId(-1); + freeEntry(entry); + } + + bin.getEnv().lazyCompress(bin); + + nLNsEvicted.addAndGet(nEvicted); + nNodesStripped.incrementAndGet(); + return nBytesEvicted; + } + + /* + * No expired LNs are present. Strip the non-expired LNs. + */ + for (int i = 0; i < bin.getNEntries(); i += 1) { + final int lnBytes = freeLN(bin, i); + if (lnBytes == 0) { + continue; + } + nBytesEvicted += lnBytes; + nEvicted += 1; + } + + if (nEvicted > 0) { + nLNsEvicted.addAndGet(nEvicted); + nNodesStripped.incrementAndGet(); + } else { + nNodesSkipped.incrementAndGet(); + } + + /* + * No LNs are off-heap now, so remove the entry. + */ + bin.setOffHeapLruId(-1); + freeEntry(entry); + + return nBytesEvicted; + } + + public long stripLNs(final IN parent, final int index) { + + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + + final int entry = parent.getOffHeapBINId(index); + assert entry >= 0; + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + final boolean pri2 = parent.isOffHeapBINPri2(index); + final boolean dirty = parent.isOffHeapBINDirty(index); + final long memId = chunk.memIds[chunkIdx]; + + return stripLNs( + entry, pri2, dirty, memId, chunk, chunkIdx, parent, index, false); + } + + private long stripLNs(final int entry, + final boolean pri2, + final boolean dirty, + final long memId, + final Chunk chunk, + final int chunkIdx, + final IN parent, + final int index, + final boolean backgroundIO) { + + final EnvironmentImpl envImpl = parent.getEnv(); + final boolean useChecksums = envImpl.useOffHeapChecksums(); + final int checksumSize = useChecksums ? CHECKSUM_SIZE : 0; + + /* + * Contents of headBuf: + * flags, fullLsn, deltaLsn, minExpiration, lnIdsSize. + * Note that headBuf does not contain the checksum. + * Contents of memId following headBuf fields: LN mem Ids, BIN. + */ + final byte[] headBuf = new byte[1 + 8 + 8 + 4 + 2]; + allocator.copy(memId, checksumSize, headBuf, 0, headBuf.length); + final int memHeadLen = checksumSize + headBuf.length; + final byte flags = headBuf[0]; + int bufOffset = 1 + 8 + 8; + final int minExpiration = getInt(headBuf, bufOffset); + bufOffset += 4; + final short lnIdsSize = getShort(headBuf, bufOffset); + bufOffset += 2; + assert bufOffset == headBuf.length; + + int nEvicted = 0; + long nBytesEvicted = 0; + + /* + * If this is a full BIN and any slot is expired, then materialize the + * BIN, evict expired off-heap LNs, compress expired slots, and + * re-serialize the BIN. + */ + if ((flags & BIN_FLAG_DELTA) == 0 && + envImpl.isExpired(minExpiration, true /*hours*/)) { + + final BIN bin = materializeBIN(envImpl, getMemBytes(memId)); + + bin.latchNoUpdateLRU(parent.getDatabase()); + try { + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.getOffHeapLNId(i) == 0 || + !bin.isExpired(i)) { + continue; + } + nBytesEvicted += freeLN(bin, i); + nEvicted += 1; + } + + /* + * TODO: Compression is expensive because we must re-serialize + * the BIN. It may be more efficient to only proceed to + * compression if no LNs were freed above, although we would + * first need to clear the LN memIds. + */ + final int origNSlots = bin.getNEntries(); + + bin.compress( + !bin.shouldLogDelta() /*compressDirtySlots*/, null); + + /* + * If we compressed any expired slots, re-serialize the BIN. + * Also re-serialize in the rare case than an LN was freed but + * no slot was compressed due to record locks; if we did not do + * this, the invalid/freed LN memId would not be cleared. + */ + if (origNSlots != bin.getNEntries() || nEvicted > 0) { + final long newMemId = serializeBIN(bin, bin.isBINDelta()); + if (newMemId == 0) { + /* + * When allocations are failing, freeing the BIN is the + * simplest and most productive thing to do. + */ + nBytesEvicted += flushAndDiscardBIN( + entry, pri2, dirty, memId, parent, index, + backgroundIO, true /*freeLNs*/); + + return nBytesEvicted; + } + + nBytesEvicted += freeMemory(memId); + nBytesEvicted -= allocator.totalSize(newMemId); + chunk.memIds[chunkIdx] = newMemId; + } + } finally { + bin.releaseLatch(); + } + + /* Return if we freed any memory by LN eviction or compression. */ + if (nBytesEvicted > 0) { + nLNsEvicted.addAndGet(nEvicted); + nNodesStripped.incrementAndGet(); + moveBack(entry, pri2); + return nBytesEvicted; + } + } + + if (lnIdsSize <= 0) { + return 0; + } + + final byte[] lnBuf = new byte[lnIdsSize]; + allocator.copy(memId, memHeadLen, lnBuf, 0, lnBuf.length); + final long[] lnMemIds = unpackLnMemIds(lnBuf, 0, lnIdsSize); + + for (final long lnMemId : lnMemIds) { + if (lnMemId == 0) { + continue; + } + nBytesEvicted += freeLN(lnMemId); + nEvicted += 1; + } + + assert nEvicted > 0; + + if (lnIdsSize <= MAX_UNUSED_BIN_BYTES) { + /* + * When there are only a small number of LN memIds, we can tolerate + * the wasted space in the BIN so we just negate the size. + */ + final byte[] tempBuf = new byte[8]; + + putShort((short) (-lnIdsSize), memId, memHeadLen - 2, tempBuf); + + /* However, the checksum is now invalid. */ + if (useChecksums) { + putInt(0, memId, 0, tempBuf); + } + } else { + /* + * When there are many LN memIds, we reclaim the space they use by + * copying the BIN to a smaller block and freeing the old block. + */ + final int newSize = allocator.size(memId) - lnIdsSize; + final long newMemId = allocateMemory(envImpl, newSize); + + if (newMemId == 0) { + /* + * When allocations are failing, freeing the BIN is the + * simplest and most productive thing to do. + */ + nBytesEvicted += flushAndDiscardBIN( + entry, pri2, dirty, memId, parent, index, backgroundIO, + true /*freeLNs*/); + + return nBytesEvicted; + } + + nBytesEvicted -= allocator.totalSize(newMemId); + + /* + * Copy all parts of the old BIN to the new, except for the + * checksum, lnIdsSize and the LN memIds. We don't need to set + * the checksum or lnIdsSize to zero in the new block because it + * was zero-filled when it was allocated. Instead we omit these + * fields when copying. + * + * The first copy includes all headBuf fields except for the + * lnIdsSize at the end of the buffer. The second copy includes + * the serialized BIN alone. + */ + allocator.copy( + headBuf, 0, + newMemId, checksumSize, headBuf.length - 2); + + allocator.copy( + memId, memHeadLen + lnIdsSize, + newMemId, memHeadLen, newSize - memHeadLen); + + nBytesEvicted += freeMemory(memId); + chunk.memIds[chunkIdx] = newMemId; + } + + nLNsEvicted.addAndGet(nEvicted); + nNodesStripped.incrementAndGet(); + moveBack(entry, pri2); + return nBytesEvicted; + } + + public long mutateToBINDelta(final IN parent, + final int index) { + + assert parent.isLatchExclusiveOwner(); + assert parent.getInListResident(); + + final int entry = parent.getOffHeapBINId(index); + if (entry < 0) { + return 0; + } + + final Chunk chunk = chunks[entry / CHUNK_SIZE]; + final int chunkIdx = entry % CHUNK_SIZE; + + return mutateToBINDelta( + parent.getEnv(), parent.getDatabase(), entry, + parent.isOffHeapBINPri2(index), chunk, chunkIdx); + } + + private long mutateToBINDelta(final EnvironmentImpl envImpl, + final DatabaseImpl dbImpl, + final int entry, + final boolean pri2, + final Chunk chunk, + final int chunkIdx) { + + final long memId = chunk.memIds[chunkIdx]; + + final BIN bin = materializeBIN(envImpl, getMemBytes(memId)); + assert bin.getLastFullLsn() != DbLsn.NULL_LSN; + + final long newMemId; + bin.latchNoUpdateLRU(dbImpl); + try { + newMemId = serializeBIN(bin, true /*asDelta*/); + } finally { + bin.releaseLatch(); + } + + if (newMemId == 0) { + return 0; + } + + long nBytesEvicted = freeBIN(envImpl, memId, false /*freeLNs*/); + nBytesEvicted -= allocator.totalSize(newMemId); + chunk.memIds[chunkIdx] = newMemId; + + nNodesMutated.incrementAndGet(); + moveBack(entry, pri2); + return nBytesEvicted; + } + + /** + * Logs the BIN child if it is dirty, and then discards it. + * + * @return bytes freed, or zero if a dirty BIN could not be logged due to + * a read-only env or disk limit violation. + */ + private long flushAndDiscardBIN(final int entry, + final boolean pri2, + final boolean dirty, + final long memId, + final IN parent, + final int index, + final boolean backgroundIO, + final boolean freeLNs) { + + assert parent.isLatchExclusiveOwner(); + + final EnvironmentImpl envImpl = parent.getEnv(); + + if (DEBUG_TRACE) { + debug( + envImpl, + "Discard BIN LSN=" + + DbLsn.getNoFormatString(parent.getLsn(index)) + + " pri2=" + pri2 + " dirty=" + dirty); + } + + if (dirty) { + /* + * Cannot evict dirty nodes in a read-only environment, or when a + * disk limit has been exceeded. We can assume that the cache will + * not overflow with dirty nodes because writes are prohibited. + */ + if (envImpl.isReadOnly() || + envImpl.getDiskLimitViolation() != null) { + nNodesSkipped.incrementAndGet(); + return 0; + } + + final INLogEntry logEntry = createBINLogEntry( + memId, entry, parent, false /*preserveBINInCache*/); + + final Provisional provisional = + envImpl.coordinateWithCheckpoint( + parent.getDatabase(), IN.BIN_LEVEL, parent); + + final long lsn = IN.logEntry( + logEntry, provisional, backgroundIO, parent); + + parent.updateEntry( + index, lsn, VLSN.NULL_VLSN_SEQUENCE, 0 /*lastLoggedSize*/); + + nDirtyNodesEvicted.incrementAndGet(); + + if (!logEntry.isPreSerialized()) { + logEntry.getMainItem().releaseLatch(); + } + } + + nNodesEvicted.incrementAndGet(); + parent.clearOffHeapBINId(index); + remove(entry, pri2); + return freeBIN(envImpl, memId, freeLNs); + } + + long testEvictMainBIN(final BIN bin) { + + final int entry = bin.getOffHeapLruId(); + assert entry >= 0; + + final LRUList lru = pri1LRUSet[entry % numLRULists]; + lru.remove(entry); + + return evictOne( + EvictionSource.MANUAL, false /*backgroundIO*/, entry, lru, + false /*pri2*/); + } + + long testEvictOffHeapBIN(final IN in, final int index) { + + final int entry = in.getOffHeapBINId(index); + assert entry >= 0; + + final boolean pri2 = in.isOffHeapBINPri2(index); + final int lruIdx = entry % numLRULists; + + final LRUList lru = + pri2 ? pri2LRUSet[lruIdx] : pri1LRUSet[lruIdx]; + + lru.remove(entry); + + return evictOne( + EvictionSource.MANUAL, false /*backgroundIO*/, entry, lru, pri2); + } +} diff --git a/src/com/sleepycat/je/evictor/OffHeapStatDefinition.java b/src/com/sleepycat/je/evictor/OffHeapStatDefinition.java new file mode 100644 index 0000000..1491a60 --- /dev/null +++ b/src/com/sleepycat/je/evictor/OffHeapStatDefinition.java @@ -0,0 +1,231 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * The off-heap stats were put in a separate group rather than being combined + * with the main cache evictor stats, simply because there were so many evictor + * stats already. + */ +public class OffHeapStatDefinition { + public static final String GROUP_NAME = "OffHeap"; + public static final String GROUP_DESC = + "The optional off-heap cache resides outside the " + + "Java heap and serves as an overflow area for the main cache."; + + public static final String ALLOC_FAILURE_NAME = + "offHeapAllocFailure"; + public static final String ALLOC_FAILURE_DESC = + "Number of off-heap allocation failures due to lack of system memory."; + public static final StatDefinition ALLOC_FAILURE = + new StatDefinition( + ALLOC_FAILURE_NAME, + ALLOC_FAILURE_DESC); + + public static final String ALLOC_OVERFLOW_NAME = + "offHeapAllocOverflow"; + public static final String ALLOC_OVERFLOW_DESC = + "Number of off-heap allocation attempts that exceeded the cache size."; + public static final StatDefinition ALLOC_OVERFLOW = + new StatDefinition( + ALLOC_OVERFLOW_NAME, + ALLOC_OVERFLOW_DESC); + + public static final String THREAD_UNAVAILABLE_NAME = + "offHeapThreadUnavailable"; + public static final String THREAD_UNAVAILABLE_DESC = + "Number of eviction tasks that were submitted to the background " + + "off-heap evictor pool, but were refused because all eviction " + + "threads were busy."; + public static final StatDefinition THREAD_UNAVAILABLE = + new StatDefinition( + THREAD_UNAVAILABLE_NAME, + THREAD_UNAVAILABLE_DESC); + + public static final String NODES_TARGETED_NAME = + "offHeapNodesTargeted"; + public static final String NODES_TARGETED_DESC = + "Number of BINs selected as off-heap eviction targets."; + public static final StatDefinition NODES_TARGETED = + new StatDefinition( + NODES_TARGETED_NAME, + NODES_TARGETED_DESC); + + public static final String CRITICAL_NODES_TARGETED_NAME = + "offHeapCriticalNodesTargeted"; + public static final String CRITICAL_NODES_TARGETED_DESC = + "Number of nodes targeted in 'critical eviction' mode."; + public static final StatDefinition CRITICAL_NODES_TARGETED = + new StatDefinition( + CRITICAL_NODES_TARGETED_NAME, + CRITICAL_NODES_TARGETED_DESC); + + public static final String NODES_EVICTED_NAME = + "offHeapNodesEvicted"; + public static final String NODES_EVICTED_DESC = + "Number of target BINs (including BIN-deltas) evicted from the " + + "off-heap cache."; + public static final StatDefinition NODES_EVICTED = + new StatDefinition( + NODES_EVICTED_NAME, + NODES_EVICTED_DESC); + + public static final String DIRTY_NODES_EVICTED_NAME = + "offHeapDirtyNodesEvicted"; + public static final String DIRTY_NODES_EVICTED_DESC = + "Number of target BINs evicted from the off-heap cache that were " + + "dirty and therefore were logged."; + public static final StatDefinition DIRTY_NODES_EVICTED = + new StatDefinition( + DIRTY_NODES_EVICTED_NAME, + DIRTY_NODES_EVICTED_DESC); + + public static final String NODES_STRIPPED_NAME = + "offHeapNodesStripped"; + public static final String NODES_STRIPPED_DESC = + "Number of target BINs whose off-heap child LNs were evicted " + + "(stripped)."; + public static final StatDefinition NODES_STRIPPED = + new StatDefinition( + NODES_STRIPPED_NAME, + NODES_STRIPPED_DESC); + + public static final String NODES_MUTATED_NAME = + "offHeapNodesMutated"; + public static final String NODES_MUTATED_DESC = + "Number of off-heap target BINs mutated to BIN-deltas."; + public static final StatDefinition NODES_MUTATED = + new StatDefinition( + NODES_MUTATED_NAME, + NODES_MUTATED_DESC); + + public static final String NODES_SKIPPED_NAME = + "offHeapNodesSkipped"; + public static final String NODES_SKIPPED_DESC = + "Number of off-heap target BINs on which no action was taken."; + public static final StatDefinition NODES_SKIPPED = + new StatDefinition( + NODES_SKIPPED_NAME, + NODES_SKIPPED_DESC); + + public static final String LNS_EVICTED_NAME = + "offHeapLNsEvicted"; + public static final String LNS_EVICTED_DESC = + "Number of LNs evicted from the off-heap cache as a result of BIN " + + "stripping."; + public static final StatDefinition LNS_EVICTED = + new StatDefinition( + LNS_EVICTED_NAME, + LNS_EVICTED_DESC); + + public static final String LNS_LOADED_NAME = + "offHeapLNsLoaded"; + public static final String LNS_LOADED_DESC = + "Number of LNs loaded from the off-heap cache."; + public static final StatDefinition LNS_LOADED = + new StatDefinition( + LNS_LOADED_NAME, + LNS_LOADED_DESC); + + public static final String LNS_STORED_NAME = + "offHeapLNsStored"; + public static final String LNS_STORED_DESC = + "Number of LNs stored into the off-heap cache."; + public static final StatDefinition LNS_STORED = + new StatDefinition( + LNS_STORED_NAME, + LNS_STORED_DESC); + + public static final String BINS_LOADED_NAME = + "offHeapBINsLoaded"; + public static final String BINS_LOADED_DESC = + "Number of BINs loaded from the off-heap cache."; + public static final StatDefinition BINS_LOADED = + new StatDefinition( + BINS_LOADED_NAME, + BINS_LOADED_DESC); + + public static final String BINS_STORED_NAME = + "offHeapBINsStored"; + public static final String BINS_STORED_DESC = + "Number of BINs stored into the off-heap cache."; + public static final StatDefinition BINS_STORED = + new StatDefinition( + BINS_STORED_NAME, + BINS_STORED_DESC); + + public static final String CACHED_LNS_NAME = + "offHeapCachedLNs"; + public static final String CACHED_LNS_DESC = + "Number of LNs residing in the off-heap cache."; + public static final StatDefinition CACHED_LNS = + new StatDefinition( + CACHED_LNS_NAME, + CACHED_LNS_DESC, + StatType.CUMULATIVE); + + public static final String CACHED_BINS_NAME = + "offHeapCachedBINs"; + public static final String CACHED_BINS_DESC = + "Number of BINs (full BINs and BIN-deltas) residing in the off-heap " + + "cache."; + public static final StatDefinition CACHED_BINS = + new StatDefinition( + CACHED_BINS_NAME, + CACHED_BINS_DESC, + StatType.CUMULATIVE); + + public static final String CACHED_BIN_DELTAS_NAME = + "offHeapCachedBINDeltas"; + public static final String CACHED_BIN_DELTAS_DESC = + "Number of BIN-deltas residing in the off-heap cache."; + public static final StatDefinition CACHED_BIN_DELTAS = + new StatDefinition( + CACHED_BIN_DELTAS_NAME, + CACHED_BIN_DELTAS_DESC, + StatType.CUMULATIVE); + + public static final String TOTAL_BYTES_NAME = + "offHeapTotalBytes"; + public static final String TOTAL_BYTES_DESC = + "Total number of estimated bytes in off-heap cache."; + public static final StatDefinition TOTAL_BYTES = + new StatDefinition( + TOTAL_BYTES_NAME, + TOTAL_BYTES_DESC, + StatType.CUMULATIVE); + + public static final String TOTAL_BLOCKS_NAME = + "offHeapTotalBlocks"; + public static final String TOTAL_BLOCKS_DESC = + "Total number of memory blocks in off-heap cache."; + public static final StatDefinition TOTAL_BLOCKS = + new StatDefinition( + TOTAL_BLOCKS_NAME, + TOTAL_BLOCKS_DESC, + StatType.CUMULATIVE); + + public static final String LRU_SIZE_NAME = + "offHeapLruSize"; + public static final String LRU_SIZE_DESC = + "Number of LRU entries used for the off-heap cache."; + public static final StatDefinition LRU_SIZE = + new StatDefinition( + LRU_SIZE_NAME, + LRU_SIZE_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/evictor/package-info.java b/src/com/sleepycat/je/evictor/package-info.java new file mode 100644 index 0000000..21e84a2 --- /dev/null +++ b/src/com/sleepycat/je/evictor/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Evicts data from the main and off-heap caches when they overflow. + */ +package com.sleepycat.je.evictor; \ No newline at end of file diff --git a/src/com/sleepycat/je/incomp/INCompStatDefinition.java b/src/com/sleepycat/je/incomp/INCompStatDefinition.java new file mode 100644 index 0000000..ddaf591 --- /dev/null +++ b/src/com/sleepycat/je/incomp/INCompStatDefinition.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.incomp; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for JE INCompressor statistics. + */ +public class INCompStatDefinition { + public static final String GROUP_NAME = "Node Compression"; + public static final String GROUP_DESC = + "Deleted records are removed from Btree internal nodes " + + "asynchronously and nodes are deleted when they become empty."; + + public static final String INCOMP_SPLIT_BINS_NAME = + "splitBins"; + public static final String INCOMP_SPLIT_BINS_DESC = + "Number of BINs encountered by the INCompressor that were split " + + "between the time they were put on the comprssor queue and when " + + "the compressor ran."; + public static final StatDefinition INCOMP_SPLIT_BINS = + new StatDefinition( + INCOMP_SPLIT_BINS_NAME, + INCOMP_SPLIT_BINS_DESC); + + public static final String INCOMP_DBCLOSED_BINS_NAME = + "dbClosedBins"; + public static final String INCOMP_DBCLOSED_BINS_DESC = + "Number of BINs encountered by the INCompressor that had their " + + "database closed between the time they were put on the compressor" + + " queue and when the compressor ran."; + public static final StatDefinition INCOMP_DBCLOSED_BINS = + new StatDefinition( + INCOMP_DBCLOSED_BINS_NAME, + INCOMP_DBCLOSED_BINS_DESC); + + public static final String INCOMP_CURSORS_BINS_NAME = + "cursorsBins"; + public static final String INCOMP_CURSORS_BINS_DESC = + "Number of BINs encountered by the INComprssor that had cursors " + + "referring to them when the compresor ran."; + public static final StatDefinition INCOMP_CURSORS_BINS = + new StatDefinition( + INCOMP_CURSORS_BINS_NAME, + INCOMP_CURSORS_BINS_DESC); + + public static final String INCOMP_NON_EMPTY_BINS_NAME = + "nonEmptyBins"; + public static final String INCOMP_NON_EMPTY_BINS_DESC = + "Number of BINs encountered by the INCompressor that were not " + + "actually empty when the compressor ran."; + public static final StatDefinition INCOMP_NON_EMPTY_BINS = + new StatDefinition( + INCOMP_NON_EMPTY_BINS_NAME, + INCOMP_NON_EMPTY_BINS_DESC); + + public static final String INCOMP_PROCESSED_BINS_NAME = + "processedBins"; + public static final String INCOMP_PROCESSED_BINS_DESC = + "Number of BINs that were successfully processed by the INCompressor."; + public static final StatDefinition INCOMP_PROCESSED_BINS = + new StatDefinition( + INCOMP_PROCESSED_BINS_NAME, + INCOMP_PROCESSED_BINS_DESC); + + public static final String INCOMP_QUEUE_SIZE_NAME = + "inCompQueueSize"; + public static final String INCOMP_QUEUE_SIZE_DESC = + "Number of entries in the INCompressor queue when the getStats() call" + + " was made."; + public static final StatDefinition INCOMP_QUEUE_SIZE = + new StatDefinition( + INCOMP_QUEUE_SIZE_NAME, + INCOMP_QUEUE_SIZE_DESC); +} diff --git a/src/com/sleepycat/je/incomp/INCompressor.java b/src/com/sleepycat/je/incomp/INCompressor.java new file mode 100644 index 0000000..b8244ae --- /dev/null +++ b/src/com/sleepycat/je/incomp/INCompressor.java @@ -0,0 +1,658 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.incomp; + +import static com.sleepycat.je.incomp.INCompStatDefinition.GROUP_DESC; +import static com.sleepycat.je.incomp.INCompStatDefinition.GROUP_NAME; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_CURSORS_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_DBCLOSED_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_NON_EMPTY_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_PROCESSED_BINS; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_QUEUE_SIZE; +import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_SPLIT_BINS; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.cleaner.LocalUtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.BINReference; +import com.sleepycat.je.tree.CursorsExistException; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.NodeNotEmptyException; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.utilint.DaemonThread; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * JE compression consists of removing BIN slots for deleted and expired + * records, and pruning empty IN/BINs from the tree which is also called a + * reverse split. + * + * One of the reasons compression is treated specially is that slot compression + * cannot be performed inline as part of a delete operation. When we delete an + * LN, a cursor is always present on the LN. The API dictates that the cursor + * will remain positioned on the deleted record. In addition, if the deleting + * transaction aborts we must restore the slot and the possibility of a split + * during an abort is something we wish to avoid; for this reason, compression + * will not occur if the slot's LSN is locked. In principle, slot compression + * could be performed during transaction commit, but that would be expensive + * because a Btree lookup would be required, and this would negatively impact + * operation latency. For all these reasons, slot compression is performed + * after the delete operation is complete and committed, and not in the thread + * performing the operation or transaction commit. + * + * Compression is of two types: + * + * + "Queued compression" is carried out by the INCompressor daemon thread. + * Both slot compression and pruning are performed. + * + * + "Lazy compression" is carried out opportunistically at various times when + * compression is beneficial. + * + * The use of BIN-deltas has a big impact on slot compression because dirty + * slots cannot be compressed until we know that a full BIN will be logged + * next. If a dirty slot were compressed prior to logging a BIN-delta, the + * record of the compression would be lost and the slot would "reappear" when + * the BIN is reconstituted. Normally we do not compress dirty slots when a + * delta would next be logged. However, there are times when we do compress + * dirty slots, and in that case the "prohibit next logged delta" flag is set + * on the BIN. + * + * Queued compression prior to logging a BIN-delta is also wasteful because the + * dequeued entry cannot be processed. Therefore, lazy compression is relied + * on when a BIN-delta will next be logged. Because, BIN-deltas are logged + * more often than BINs, lazy compression is used for slot compression more + * often than queued compression. + * + * Lazy compression is used for compressing expired slots, in addition to + * deleted slots. This is done opportunistically as described above. Expired + * slots are normally not dirty, so they can often be compressed even when a + * BIN will be logged next as a BIN-delta. The same is true of deleted slots + * that are not dirty, although these occur infrequently. + * + * Since we don't lazy-compress BIN-deltas, how can we prevent their expired + * slots from using space for long time periods? Currently the expired space + * will be reclaimed only when the delta is mutated to a full BIN and then + * compressed, including when the full BIN is cleaned. This is the same as + * reclaiming space for deleted slots, so this is acceptable for now at least. + * + * You may wonder, since lazy compression is necessary, why use queued + * compression for slot compression at all? Queued compression is useful for + * the following reasons: + * + * + If a BIN-delta will not be logged next, queued compression will cause the + * compression to occur sooner than with lazy compression. + * + * + When a cursor is on a BIN or a deleted entry is locked during lazy + * compression, we cannot compress the slot. Queuing allows it to be + * compressed sooner than if we waited for the next lazy compression. + * + * + The code to process a queue entry must do slot compression anyway, even if + * we only want to prune the BIN. We have to account for the case where all + * slots are deleted but not yet compressed. So the code to process the + * queue entry could not be simplified even if we were to decide not to queue + * entries for slot compression. + */ +public class INCompressor extends DaemonThread { + private static final boolean DEBUG = false; + + private final long lockTimeout; + + /* stats */ + private StatGroup stats; + private LongStat splitBins; + private LongStat dbClosedBins; + private LongStat cursorsBins; + private LongStat nonEmptyBins; + private LongStat processedBins; + private LongStat compQueueSize; + + /* per-run stats */ + private int splitBinsThisRun = 0; + private int dbClosedBinsThisRun = 0; + private int cursorsBinsThisRun = 0; + private int nonEmptyBinsThisRun = 0; + private int processedBinsThisRun = 0; + + /* + * The following stats are not kept per run, because they're set by + * multiple threads doing lazy compression. They are debugging aids; it + * didn't seem like a good idea to add synchronization to the general path. + */ + private int lazyProcessed = 0; + private int wokenUp = 0; + + /* + * Store logical references to BINs that have deleted entries and are + * candidates for compaction. + */ + private Map binRefQueue; + private final Object binRefQueueSync; + + /* For unit tests */ + private TestHook beforeFlushTrackerHook; // [#15528] + + public INCompressor(EnvironmentImpl env, long waitTime, String name) { + super(waitTime, name, env); + lockTimeout = env.getConfigManager().getDuration + (EnvironmentParams.COMPRESSOR_LOCK_TIMEOUT); + binRefQueue = new HashMap<>(); + binRefQueueSync = new Object(); + + /* Do the stats definitions. */ + stats = new StatGroup(GROUP_NAME, GROUP_DESC); + splitBins = new LongStat(stats, INCOMP_SPLIT_BINS); + dbClosedBins = new LongStat(stats, INCOMP_DBCLOSED_BINS); + cursorsBins = new LongStat(stats, INCOMP_CURSORS_BINS); + nonEmptyBins = new LongStat(stats, INCOMP_NON_EMPTY_BINS); + processedBins = new LongStat(stats, INCOMP_PROCESSED_BINS); + compQueueSize = new LongStat(stats, INCOMP_QUEUE_SIZE); + } + + /* For unit testing only. */ + public void setBeforeFlushTrackerHook(TestHook hook) { + beforeFlushTrackerHook = hook; + } + + public synchronized void verifyCursors() + throws DatabaseException { + + /* + * Environment may have been closed. If so, then our job here is done. + */ + if (envImpl.isClosed()) { + return; + } + + /* + * Use a snapshot to verify the cursors. This way we don't have to + * hold a latch while verify takes locks. + */ + final List queueSnapshot; + synchronized (binRefQueueSync) { + queueSnapshot = new ArrayList<>(binRefQueue.values()); + } + + /* + * Use local caching to reduce DbTree.getDb overhead. Do not call + * releaseDb after each getDb, since the entire dbCache will be + * released at the end. + */ + final DbTree dbTree = envImpl.getDbTree(); + final Map dbCache = new HashMap<>(); + + try { + for (final BINReference binRef : queueSnapshot) { + final DatabaseImpl db = dbTree.getDb( + binRef.getDatabaseId(), lockTimeout, dbCache); + + final BIN bin = searchForBIN(db, binRef); + if (bin != null) { + bin.verifyCursors(); + bin.releaseLatch(); + } + } + } finally { + dbTree.releaseDbs(dbCache); + } + } + + public int getBinRefQueueSize() { + synchronized (binRefQueueSync) { + return binRefQueue.size(); + } + } + + /* + * There are multiple flavors of the addBin*ToQueue methods. All allow + * the caller to specify whether the daemon should be notified. Currently + * no callers proactively notify, and we rely on lazy compression and + * the daemon timebased wakeup to process the queue. + */ + + /** + * Adds the BIN to the queue if the BIN is not already in the queue. + */ + public void addBinToQueue(BIN bin) { + synchronized (binRefQueueSync) { + addBinToQueueAlreadyLatched(bin); + } + } + + /** + * Adds the BINReference to the queue if the BIN is not already in the + * queue. + */ + private void addBinRefToQueue(BINReference binRef) { + synchronized (binRefQueueSync) { + addBinRefToQueueAlreadyLatched(binRef); + } + } + + /** + * Adds an entire collection of BINReferences to the queue at once. Use + * this to avoid latching for each add. + */ + public void addMultipleBinRefsToQueue(Collection binRefs) { + synchronized (binRefQueueSync) { + for (final BINReference binRef : binRefs) { + addBinRefToQueueAlreadyLatched(binRef); + } + } + } + + /** + * Adds the BINReference with the latch held. + */ + private void addBinRefToQueueAlreadyLatched(BINReference binRef) { + + final Long node = binRef.getNodeId(); + + if (binRefQueue.containsKey(node)) { + return; + } + + binRefQueue.put(node, binRef); + } + + /** + * Adds the BIN with the latch held. + */ + private void addBinToQueueAlreadyLatched(BIN bin) { + + final Long node = bin.getNodeId(); + + if (binRefQueue.containsKey(node)) { + return; + } + + binRefQueue.put(node, bin.createReference()); + } + + public boolean exists(long nodeId) { + synchronized (binRefQueueSync) { + return binRefQueue.containsKey(nodeId); + } + } + + /** + * Return stats + */ + public StatGroup loadStats(StatsConfig config) { + compQueueSize.set((long) getBinRefQueueSize()); + + if (DEBUG) { + System.out.println("lazyProcessed = " + lazyProcessed); + System.out.println("wokenUp=" + wokenUp); + } + + if (config.getClear()) { + lazyProcessed = 0; + wokenUp = 0; + } + + return stats.cloneGroup(config.getClear()); + } + + /** + * Return the number of retries when a deadlock exception occurs. + */ + @Override + protected long nDeadlockRetries() { + return envImpl.getConfigManager().getInt + (EnvironmentParams.COMPRESSOR_RETRY); + } + + @Override + public synchronized void onWakeup() + throws DatabaseException { + + if (envImpl.isClosing()) { + return; + } + wokenUp++; + doCompress(); + } + + /** + * The real work to doing a compress. This may be called by the compressor + * thread or programatically. + */ + public synchronized void doCompress() + throws DatabaseException { + + /* + * Make a snapshot of the current work queue so the compressor thread + * can safely iterate over the queue. Note that this impacts lazy + * compression, because it lazy compressors will not see BINReferences + * that have been moved to the snapshot. + */ + final Map queueSnapshot; + final int binQueueSize; + synchronized (binRefQueueSync) { + binQueueSize = binRefQueue.size(); + if (binQueueSize <= 0) { + return; + } + queueSnapshot = binRefQueue; + binRefQueue = new HashMap<>(); + } + + /* There is work to be done. */ + resetPerRunCounters(); + LoggerUtils.fine(logger, envImpl, + "InCompress.doCompress called, queue size: " + + binQueueSize); + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + + /* + * Compressed entries must be counted as obsoleted. A separate + * tracker is used to accumulate tracked obsolete info so it can be + * added in a single call under the log write latch. We log the + * info for deleted subtrees immediately because we don't process + * deleted IN entries during recovery; this reduces the chance of + * lost info. + */ + final LocalUtilizationTracker localTracker = + new LocalUtilizationTracker(envImpl); + + /* Use local caching to reduce DbTree.getDb overhead. */ + final Map dbCache = new HashMap<>(); + + final DbTree dbTree = envImpl.getDbTree(); + final BINSearch binSearch = new BINSearch(); + + try { + for (final BINReference binRef : queueSnapshot.values()) { + + if (envImpl.isClosed()) { + return; + } + + if (!findDBAndBIN(binSearch, binRef, dbTree, dbCache)) { + + /* + * Either the db is closed, or the BIN doesn't exist. + * Don't process this BINReference. + */ + continue; + } + + /* Compress deleted slots and prune if possible. */ + compressBin(binSearch.db, binSearch.bin, binRef, localTracker); + } + + /* SR [#11144]*/ + assert TestHookExecute.doHookIfSet(beforeFlushTrackerHook); + + /* + * Count obsolete nodes and write out modified file summaries + * for recovery. All latches must have been released. + */ + envImpl.getUtilizationProfile().flushLocalTracker(localTracker); + + } finally { + dbTree.releaseDbs(dbCache); + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + accumulatePerRunCounters(); + } + } + + /** + * Compresses a single BIN and then deletes the BIN if it is empty. + * + * @param bin is latched when this method is called, and unlatched when it + * returns. + */ + private void compressBin( + DatabaseImpl db, + BIN bin, + BINReference binRef, + LocalUtilizationTracker localTracker) { + + /* Safe to get identifier keys; bin is latched. */ + final byte[] idKey = bin.getIdentifierKey(); + boolean empty = (bin.getNEntries() == 0); + + try { + if (!empty) { + + /* + * Deltas in cache cannot be compressed. + * + * We strive not to add a slot to the queue when we will log a + * delta. However, it is possible that an entry is added, or + * that an entry is not cleared by lazy compression prior to + * logging a full BIN. Clean-up for such queue entries is + * here. + */ + if (bin.isBINDelta()) { + return; + } + + /* If there are cursors on the BIN, requeue and try later. */ + if (bin.nCursors() > 0) { + addBinRefToQueue(binRef); + cursorsBinsThisRun++; + return; + } + + /* + * If a delta should be logged, do not compress dirty slots, + * since this would prevent logging a delta. + */ + if (!bin.compress( + !bin.shouldLogDelta() /*compressDirtySlots*/, + localTracker)) { + + /* If compression is incomplete, requeue and try later. */ + addBinRefToQueue(binRef); + return; + } + + /* After compression the BIN may be empty. */ + empty = (bin.getNEntries() == 0); + } + } finally { + bin.releaseLatch(); + } + + /* After releasing the latch, prune the BIN if it is empty. */ + if (empty) { + pruneBIN(db, binRef, idKey); + } + } + + /** + * If the target BIN is empty, attempt to remove the empty branch of the + * tree. + */ + private void pruneBIN(DatabaseImpl dbImpl, + BINReference binRef, + byte[] idKey) { + + try { + final Tree tree = dbImpl.getTree(); + tree.delete(idKey); + processedBinsThisRun++; + } catch (NodeNotEmptyException NNEE) { + + /* + * Something was added to the node since the point when the + * deletion occurred; we can't prune, and we can throw away this + * BINReference. + */ + nonEmptyBinsThisRun++; + } catch (CursorsExistException e) { + /* If there are cursors in the way of the delete, retry later. */ + addBinRefToQueue(binRef); + cursorsBinsThisRun++; + } + } + + /** + * Search the tree for the BIN that corresponds to this BINReference. + * + * @param binRef the BINReference that indicates the bin we want. + * + * @return the BIN that corresponds to this BINReference. The + * node is latched upon return. Returns null if the BIN can't be found. + */ + public BIN searchForBIN(DatabaseImpl db, BINReference binRef) { + return db.getTree().search(binRef.getKey(), CacheMode.UNCHANGED); + } + + /** + * Reset per-run counters. + */ + private void resetPerRunCounters() { + splitBinsThisRun = 0; + dbClosedBinsThisRun = 0; + cursorsBinsThisRun = 0; + nonEmptyBinsThisRun = 0; + processedBinsThisRun = 0; + } + + private void accumulatePerRunCounters() { + splitBins.add(splitBinsThisRun); + dbClosedBins.add(dbClosedBinsThisRun); + cursorsBins.add(cursorsBinsThisRun); + nonEmptyBins.add(nonEmptyBinsThisRun); + processedBins.add(processedBinsThisRun); + } + + /** + * Lazily/opportunistically compress a full BIN. + * + * The target IN should be latched when we enter, and it will be remain + * latched. + * + * If compression succeeds, does not prune empty BINs, but does queue them + * for pruning later. If compression fails because a record lock cannot be + * obtained, queues the BIN to retry later. + * + * Note that we do not bother to delete queue entries for the BIN if + * compression succeeds. Queue entries are normally removed quickly by the + * compressor. In the case where queue entries happen to exist when we do + * the final compression below, we rely on the compressor to clean them up + * later on when they are processed. + */ + public void lazyCompress(final IN in, final boolean compressDirtySlots) { + + assert in.isLatchOwner(); + + /* Only full BINs can be compressed. */ + if (!in.isBIN() || in.isBINDelta()) { + return; + } + + final BIN bin = (BIN) in; + + /* + * Cursors prohibit compression. We queue for later when there is + * anything that can be compressed. + */ + if (bin.nCursors() > 0) { + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.isDefunct(i)) { + addBinToQueue(bin); + break; + } + } + return; + } + + if (bin.compress(compressDirtySlots, null /*localTracker*/)) { + if (bin.getNEntries() == 0) { + /* The BIN is empty. Prune it later. */ + addBinToQueue(bin); + } + } else { + /* A record lock prevented slot removal. Try again later. */ + addBinToQueue(bin); + } + + lazyProcessed++; + } + + /* + * Find the db and bin for a BINReference. + * @return true if the db is open and the target bin is found. + */ + private boolean findDBAndBIN( + BINSearch binSearch, + BINReference binRef, + DbTree dbTree, + Map dbCache) + throws DatabaseException { + + /* + * Find the database. Do not call releaseDb after this getDb, since + * the entire dbCache will be released later. + */ + binSearch.db = dbTree.getDb( + binRef.getDatabaseId(), lockTimeout, dbCache); + + if (binSearch.db == null || binSearch.db.isDeleted()) { + /* The db was deleted. Ignore this BIN Ref. */ + dbClosedBinsThisRun++; + return false; + } + + /* Perform eviction before each operation. */ + envImpl.daemonEviction(true /*backgroundIO*/); + + /* Find the BIN. */ + binSearch.bin = searchForBIN(binSearch.db, binRef); + + if (binSearch.bin == null || + binSearch.bin.getNodeId() != binRef.getNodeId()) { + /* The BIN may have been split. */ + if (binSearch.bin != null) { + binSearch.bin.releaseLatch(); + } + splitBinsThisRun++; + return false; + } + + return true; + } + + /* Struct to return multiple values from findDBAndBIN. */ + private static class BINSearch { + public DatabaseImpl db; + public BIN bin; + } +} diff --git a/src/com/sleepycat/je/incomp/package-info.java b/src/com/sleepycat/je/incomp/package-info.java new file mode 100644 index 0000000..97761a8 --- /dev/null +++ b/src/com/sleepycat/je/incomp/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: IN compressor performs background deletion of defunct IN slots + * and deletes empty INs. + */ +package com.sleepycat.je.incomp; \ No newline at end of file diff --git a/src/com/sleepycat/je/jca/ra/JEConnection.java b/src/com/sleepycat/je/jca/ra/JEConnection.java new file mode 100644 index 0000000..a89b53e --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEConnection.java @@ -0,0 +1,107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.Closeable; +import javax.resource.ResourceException; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Transaction; + +/** + * A JEConnection provides access to JE services. See + * <JEHOME>/examples/jca/HOWTO-**.txt and + * <JEHOME>/examples/jca/simple/SimpleBean.java for more information on + * how to build the resource adaptor and use a JEConnection. + */ +public class JEConnection implements Closeable { + + private JEManagedConnection mc; + private JELocalTransaction txn; + + public JEConnection(JEManagedConnection mc) { + this.mc = mc; + } + + protected void setManagedConnection(JEManagedConnection mc, + JELocalTransaction lt) { + this.mc = mc; + if (txn == null) { + txn = lt; + } + } + + JELocalTransaction getLocalTransaction() { + return txn; + } + + void setLocalTransaction(JELocalTransaction txn) { + this.txn = txn; + } + + public Environment getEnvironment() { + return mc.getEnvironment(); + } + + public Database openDatabase(String name, DatabaseConfig config) + throws DatabaseException { + + return mc.openDatabase(name, config); + } + + public SecondaryDatabase openSecondaryDatabase(String name, + Database primaryDatabase, + SecondaryConfig config) + throws DatabaseException { + + return mc.openSecondaryDatabase(name, primaryDatabase, config); + } + + public void removeDatabase(String databaseName) + throws DatabaseException { + + mc.removeDatabase(databaseName); + } + + public long truncateDatabase(String databaseName, boolean returnCount) + throws DatabaseException { + + return mc.truncateDatabase(databaseName, returnCount); + } + + public Transaction getTransaction() + throws ResourceException { + + if (txn == null) { + return null; + } + + try { + return txn.getTransaction(); + } catch (DatabaseException DE) { + ResourceException ret = new ResourceException(DE.toString()); + ret.initCause(DE); + throw ret; + } + } + + public void close() { + mc.close(); + } +} diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java b/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java new file mode 100644 index 0000000..2285956 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEConnectionFactory.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.Serializable; + +import javax.resource.Referenceable; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; + +/** + * An application may obtain a {@link JEConnection} in this manner: + *

        + *    InitialContext iniCtx = new InitialContext();
        + *    Context enc = (Context) iniCtx.lookup("java:comp/env");
        + *    Object ref = enc.lookup("ra/JEConnectionFactory");
        + *    JEConnectionFactory dcf = (JEConnectionFactory) ref;
        + *    JEConnection dc = dcf.getConnection(envDir, envConfig);
        + * 
        + * + * See <JEHOME>/examples/jca/HOWTO-**.txt and + * <JEHOME>/examples/jca/simple/SimpleBean.java for more information + * on how to build the resource adapter and use a JEConnection. + */ +public interface JEConnectionFactory + extends Referenceable, Serializable { + + public JEConnection getConnection(String jeRootDir, + EnvironmentConfig envConfig) + throws JEException; + + public JEConnection getConnection(String jeRootDir, + EnvironmentConfig envConfig, + TransactionConfig transConfig) + throws JEException; +} diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java b/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java new file mode 100644 index 0000000..17b831b --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEConnectionFactoryImpl.java @@ -0,0 +1,76 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.File; + +import javax.naming.Reference; +import javax.resource.ResourceException; +import javax.resource.spi.ConnectionManager; +import javax.resource.spi.ManagedConnectionFactory; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; + +public class JEConnectionFactoryImpl implements JEConnectionFactory { + + private static final long serialVersionUID = 410682596L; + + /* + * These are not transient because SJSAS seems to need to serialize + * them when leaving them in JNDI. + */ + private final /* transient */ ConnectionManager manager; + private final /* transient */ ManagedConnectionFactory factory; + private Reference reference; + + /* Make the constructor public for serializability testing. */ + public JEConnectionFactoryImpl(ConnectionManager manager, + ManagedConnectionFactory factory) { + this.manager = manager; + this.factory = factory; + } + + public JEConnection getConnection(String jeRootDir, + EnvironmentConfig envConfig) + throws JEException { + + return getConnection(jeRootDir, envConfig, null); + } + + public JEConnection getConnection(String jeRootDir, + EnvironmentConfig envConfig, + TransactionConfig transConfig) + throws JEException { + + JEConnection dc = null; + JERequestInfo jeInfo = + new JERequestInfo(new File(jeRootDir), envConfig, transConfig); + try { + dc = (JEConnection) manager.allocateConnection(factory, jeInfo); + } catch (ResourceException e) { + throw new JEException("Unable to get Connection: " + e); + } + + return dc; + } + + public void setReference(Reference reference) { + this.reference = reference; + } + + public Reference getReference() { + return reference; + } +} diff --git a/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java b/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java new file mode 100644 index 0000000..dd16fe0 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEConnectionMetaData.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import javax.resource.spi.ManagedConnectionMetaData; + +public class JEConnectionMetaData + implements ManagedConnectionMetaData { + + public JEConnectionMetaData() { + } + + public String getEISProductName() { + return "Berkeley DB Java Edition JCA"; + } + + public String getEISProductVersion() { + return "2.0"; + } + + public int getMaxConnections() { + /* Make a je.* parameter? */ + return 100; + } + + public String getUserName() { + return null; + } +} diff --git a/src/com/sleepycat/je/jca/ra/JEException.java b/src/com/sleepycat/je/jca/ra/JEException.java new file mode 100644 index 0000000..199303c --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEException.java @@ -0,0 +1,23 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +public class JEException extends Exception { + + private static final long serialVersionUID = 329949514L; + + public JEException(String message) { + super(message); + } +} diff --git a/src/com/sleepycat/je/jca/ra/JELocalTransaction.java b/src/com/sleepycat/je/jca/ra/JELocalTransaction.java new file mode 100644 index 0000000..1e8a0aa --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JELocalTransaction.java @@ -0,0 +1,134 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import javax.resource.ResourceException; +import javax.resource.spi.ConnectionEvent; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.XAEnvironment; + +public class JELocalTransaction + implements javax.resource.cci.LocalTransaction, + javax.resource.spi.LocalTransaction { + + private static boolean DEBUG = false; + + private transient XAEnvironment env; + private transient TransactionConfig transConfig; + private transient JEManagedConnection mgdConn; + + JELocalTransaction(XAEnvironment env, + TransactionConfig transConfig, + JEManagedConnection mgdConn) { + this.env = env; + this.transConfig = transConfig; + this.mgdConn = mgdConn; + } + + public Transaction getTransaction() + throws DatabaseException { + + return env.getThreadTransaction(); + } + + protected XAEnvironment getEnv() { + return env; + } + + private void checkEnv(String methodName) + throws ResourceException { + + if (env == null) { + throw new ResourceException("env is null in " + methodName); + } + } + + /* + * Methods for LocalTransaction. + */ + + public void begin() + throws ResourceException { + + checkEnv("begin"); + long id = -1; + try { + Transaction txn = env.beginTransaction(null, transConfig); + env.setThreadTransaction(txn); + id = txn.getId(); + } catch (DatabaseException DE) { + throw new ResourceException("During begin: " + DE.toString()); + } + + ConnectionEvent connEvent = new ConnectionEvent + (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_STARTED); + connEvent.setConnectionHandle(mgdConn); + mgdConn.sendConnectionEvent(connEvent); + + if (DEBUG) { + System.out.println("JELocalTransaction.begin " + id); + } + } + + public void commit() + throws ResourceException { + + checkEnv("commit"); + try { + env.getThreadTransaction().commit(); + } catch (DatabaseException DE) { + ResourceException ret = new ResourceException(DE.toString()); + ret.initCause(DE); + throw ret; + } finally { + env.setThreadTransaction(null); + } + + ConnectionEvent connEvent = new ConnectionEvent + (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_COMMITTED); + connEvent.setConnectionHandle(mgdConn); + mgdConn.sendConnectionEvent(connEvent); + + if (DEBUG) { + System.out.println("JELocalTransaction.commit"); + } + } + + public void rollback() + throws ResourceException { + + checkEnv("rollback"); + try { + env.getThreadTransaction().abort(); + } catch (DatabaseException DE) { + ResourceException ret = new ResourceException(DE.toString()); + ret.initCause(DE); + throw ret; + } finally { + env.setThreadTransaction(null); + } + + ConnectionEvent connEvent = new ConnectionEvent + (mgdConn, ConnectionEvent.LOCAL_TRANSACTION_ROLLEDBACK); + connEvent.setConnectionHandle(mgdConn); + mgdConn.sendConnectionEvent(connEvent); + + if (DEBUG) { + System.out.println("JELocalTransaction.rollback"); + } + } +} diff --git a/src/com/sleepycat/je/jca/ra/JEManagedConnection.java b/src/com/sleepycat/je/jca/ra/JEManagedConnection.java new file mode 100644 index 0000000..4b863e6 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEManagedConnection.java @@ -0,0 +1,315 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import javax.resource.ResourceException; +import javax.resource.spi.ConnectionEvent; +import javax.resource.spi.ConnectionEventListener; +import javax.resource.spi.ConnectionRequestInfo; +import javax.resource.spi.LocalTransaction; +import javax.resource.spi.ManagedConnection; +import javax.resource.spi.ManagedConnectionMetaData; +import javax.security.auth.Subject; +import javax.transaction.xa.XAResource; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.XAEnvironment; + +public class JEManagedConnection implements ManagedConnection { + private final ArrayList listeners; + private JEConnection conn; + private XAEnvironment env; + private JELocalTransaction savedLT; + private TransactionConfig savedTransConfig; + private final Map rwDatabaseHandleCache; + private final Map roDatabaseHandleCache; + private final Map rwSecondaryDatabaseHandleCache; + private final Map roSecondaryDatabaseHandleCache; + + JEManagedConnection(Subject subject, JERequestInfo jeInfo) + throws ResourceException { + + try { + savedTransConfig = jeInfo.getTransactionConfig(); + this.env = new XAEnvironment(jeInfo.getJERootDir(), + jeInfo.getEnvConfig()); + } catch (DatabaseException DE) { + throw new ResourceException(DE.toString()); + } + listeners = new ArrayList(); + savedLT = null; + rwDatabaseHandleCache = new HashMap(); + roDatabaseHandleCache = new HashMap(); + rwSecondaryDatabaseHandleCache = new HashMap(); + roSecondaryDatabaseHandleCache = new HashMap(); + } + + public Object getConnection(Subject subject, + ConnectionRequestInfo connectionRequestInfo) { + if (conn == null) { + conn = new JEConnection(this); + } + return conn; + } + + protected XAEnvironment getEnvironment() { + return env; + } + + public LocalTransaction getLocalTransaction() { + + /* + * If there is no JEConnection associated with this ManagedConnection + * yet, then the ManagedConnection holds on to the JELocalTransaction. + * Once a JEConnection is associated (it may not ever happen), we hand + * off the JELocalTransaction to the JEConnection and forget about it + * in the ManagedConnection. + */ + if (conn == null) { + savedLT = new JELocalTransaction(env, savedTransConfig, this); + return savedLT; + } + + JELocalTransaction lt = conn.getLocalTransaction(); + if (lt == null) { + if (savedLT == null) { + lt = new JELocalTransaction(env, savedTransConfig, this); + } else { + lt = savedLT; + } + conn.setLocalTransaction(lt); + savedLT = null; + } + return lt; + } + + public XAResource getXAResource() { + return env; + } + + public void associateConnection(Object connection) { + conn = (JEConnection) connection; + conn.setManagedConnection(this, savedLT); + savedLT = null; + } + + public void addConnectionEventListener(ConnectionEventListener listener) { + listeners.add(listener); + } + + public void + removeConnectionEventListener(ConnectionEventListener listener) { + + listeners.remove(listener); + } + + public ManagedConnectionMetaData getMetaData() { + return new JEConnectionMetaData(); + } + + public void setLogWriter(PrintWriter out) { + } + + public PrintWriter getLogWriter() { + return null; + } + + protected void close() { + ConnectionEvent connEvent = + new ConnectionEvent(this, ConnectionEvent.CONNECTION_CLOSED); + connEvent.setConnectionHandle(conn); + sendConnectionEvent(connEvent); + } + + protected void sendConnectionEvent(ConnectionEvent connEvent) { + for (int i = listeners.size() - 1; i >= 0; i--) { + ConnectionEventListener listener = + listeners.get(i); + if (connEvent.getId() == ConnectionEvent.CONNECTION_CLOSED) { + listener.connectionClosed(connEvent); + } else if (connEvent.getId() == + ConnectionEvent.CONNECTION_ERROR_OCCURRED) { + listener.connectionErrorOccurred(connEvent); + } else if (connEvent.getId() == + ConnectionEvent.LOCAL_TRANSACTION_STARTED) { + listener.localTransactionStarted(connEvent); + } else if (connEvent.getId() == + ConnectionEvent.LOCAL_TRANSACTION_COMMITTED) { + listener.localTransactionCommitted(connEvent); + } else if (connEvent.getId() == + ConnectionEvent.LOCAL_TRANSACTION_ROLLEDBACK) { + listener.localTransactionRolledback(connEvent); + } + } + } + + public void destroy() + throws ResourceException { + + try { + cleanupDatabaseHandleCache(roDatabaseHandleCache); + cleanupDatabaseHandleCache(rwDatabaseHandleCache); + cleanupDatabaseHandleCache(roSecondaryDatabaseHandleCache); + cleanupDatabaseHandleCache(rwSecondaryDatabaseHandleCache); + env.close(); + } catch (DatabaseException DE) { + throw new ResourceException(DE.toString()); + } + } + + public void cleanup() { + } + + void removeDatabase(String dbName) + throws DatabaseException { + + removeDatabaseFromCache(roDatabaseHandleCache, dbName); + removeDatabaseFromCache(rwDatabaseHandleCache, dbName); + removeDatabaseFromCache(roSecondaryDatabaseHandleCache, dbName); + removeDatabaseFromCache(rwSecondaryDatabaseHandleCache, dbName); + env.removeDatabase(null, dbName); + } + + long truncateDatabase(String dbName, boolean returnCount) + throws DatabaseException { + + removeDatabaseFromCache(roDatabaseHandleCache, dbName); + removeDatabaseFromCache(rwDatabaseHandleCache, dbName); + removeDatabaseFromCache(roSecondaryDatabaseHandleCache, dbName); + removeDatabaseFromCache(rwSecondaryDatabaseHandleCache, dbName); + return env.truncateDatabase(null, dbName, returnCount); + } + + Database openDatabase(String dbName, DatabaseConfig config) + throws DatabaseException { + + if (config.getReadOnly()) { + synchronized (roDatabaseHandleCache) { + return openDatabaseInternal + (roDatabaseHandleCache, dbName, config); + } + } else { + synchronized (rwDatabaseHandleCache) { + return openDatabaseInternal + (rwDatabaseHandleCache, dbName, config); + } + } + } + + SecondaryDatabase openSecondaryDatabase(String dbName, + Database primaryDatabase, + SecondaryConfig config) + throws DatabaseException { + + if (config.getReadOnly()) { + synchronized (roSecondaryDatabaseHandleCache) { + return openSecondaryDatabaseInternal + (roSecondaryDatabaseHandleCache, dbName, + primaryDatabase, config); + } + } else { + synchronized (rwSecondaryDatabaseHandleCache) { + return openSecondaryDatabaseInternal + (rwSecondaryDatabaseHandleCache, dbName, + primaryDatabase, config); + } + } + } + + private Database + openDatabaseInternal(Map databaseHandleCache, + String dbName, + DatabaseConfig config) + throws DatabaseException { + + Database db; + if (config.getExclusiveCreate()) { + db = env.openDatabase(null, dbName, config); + databaseHandleCache.put(dbName, db); + } else { + db = databaseHandleCache.get(dbName); + if (db == null) { + db = env.openDatabase(null, dbName, config); + databaseHandleCache.put(dbName, db); + } else { + DbInternal.validate(config, db.getConfig()); + } + } + return db; + } + + private SecondaryDatabase + openSecondaryDatabaseInternal(Map databaseHandleCache, + String dbName, + Database primaryDatabase, + SecondaryConfig config) + throws DatabaseException { + + SecondaryDatabase db; + if (config.getExclusiveCreate()) { + db = env.openSecondaryDatabase(null, dbName, + primaryDatabase, config); + databaseHandleCache.put(dbName, db); + } else { + db = (SecondaryDatabase) databaseHandleCache.get(dbName); + if (db == null) { + db = env.openSecondaryDatabase(null, dbName, + primaryDatabase, config); + databaseHandleCache.put(dbName, db); + } else { + DbInternal.validate(config, db.getConfig()); + } + } + return db; + } + + private void removeDatabaseFromCache(Map cache, + String dbName) + throws DatabaseException { + + synchronized (cache) { + Database db = cache.get(dbName); + if (db == null) { + return; + } + db.close(); + cache.remove(dbName); + } + } + + private void cleanupDatabaseHandleCache(Map cache) + throws DatabaseException { + + synchronized (cache) { + Iterator iter = cache.values().iterator(); + + while (iter.hasNext()) { + Database db = iter.next(); + db.close(); + } + } + } +} diff --git a/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java b/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java new file mode 100644 index 0000000..691cc83 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JEManagedConnectionFactory.java @@ -0,0 +1,106 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.PrintWriter; +import java.io.Serializable; +import java.util.Iterator; +import java.util.Set; + +import javax.resource.ResourceException; +import javax.resource.spi.ConnectionManager; +import javax.resource.spi.ConnectionRequestInfo; +import javax.resource.spi.ManagedConnection; +import javax.resource.spi.ManagedConnectionFactory; +import javax.security.auth.Subject; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +public class JEManagedConnectionFactory + implements ManagedConnectionFactory, Serializable { + + private static final long serialVersionUID = 658705244L; + + public JEManagedConnectionFactory() { + } + + public Object createConnectionFactory(ConnectionManager cxManager) { + return new JEConnectionFactoryImpl(cxManager, this); + } + + public Object createConnectionFactory() { + throw EnvironmentFailureException.unexpectedState + ("must supply a connMgr"); + } + + public ManagedConnection + createManagedConnection(Subject subject, + ConnectionRequestInfo info) + throws ResourceException { + + JERequestInfo jeInfo = (JERequestInfo) info; + return new JEManagedConnection(subject, jeInfo); + } + + public ManagedConnection + matchManagedConnections(Set connectionSet, + Subject subject, + ConnectionRequestInfo info) { + JERequestInfo jeInfo = (JERequestInfo) info; + Iterator iter = connectionSet.iterator(); + while (iter.hasNext()) { + Object next = iter.next(); + if (next instanceof JEManagedConnection) { + JEManagedConnection mc = (JEManagedConnection) next; + EnvironmentImpl nextEnvImpl = + DbInternal.getNonNullEnvImpl(mc.getEnvironment()); + /* Do we need to match on more than root dir and r/o? */ + if (nextEnvImpl.getEnvironmentHome(). + equals(jeInfo.getJERootDir()) && + nextEnvImpl.isReadOnly() == + jeInfo.getEnvConfig().getReadOnly()) { + return mc; + } + } + } + return null; + } + + public void setLogWriter(PrintWriter out) { + } + + public PrintWriter getLogWriter() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (obj instanceof JEManagedConnectionFactory) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/src/com/sleepycat/je/jca/ra/JERequestInfo.java b/src/com/sleepycat/je/jca/ra/JERequestInfo.java new file mode 100644 index 0000000..ca8e3df --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/JERequestInfo.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jca.ra; + +import java.io.File; + +import javax.resource.spi.ConnectionRequestInfo; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; + +public class JERequestInfo implements ConnectionRequestInfo { + private File rootDir; + private EnvironmentConfig envConfig; + private TransactionConfig transConfig; + + public JERequestInfo(File rootDir, + EnvironmentConfig envConfig, + TransactionConfig transConfig) { + this.rootDir = rootDir; + this.envConfig = envConfig; + this.transConfig = transConfig; + } + + File getJERootDir() { + return rootDir; + } + + EnvironmentConfig getEnvConfig() { + return envConfig; + } + + TransactionConfig getTransactionConfig() { + return transConfig; + } + + public boolean equals(Object obj) { + JERequestInfo info = (JERequestInfo) obj; + return rootDir.equals(info.rootDir); + } + + public int hashCode() { + return rootDir.hashCode(); + } + + public String toString() { + return ""; + } +} diff --git a/src/com/sleepycat/je/jca/ra/package.html b/src/com/sleepycat/je/jca/ra/package.html new file mode 100644 index 0000000..ce5fc42 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/package.html @@ -0,0 +1,42 @@ + + + + + + +Support for the Java Connector Architecture, which provides a standard +for connecting the J2EE platform to legacy enterprise information +systems (EIS), such as ERP systems, database systems, and legacy +applications not written in Java. + +

        Package Specification

        + +

        +Users who want to run JE within a J2EE Application Server can use the +JCA Resource Adapter to connect to JE through a standard API. The JE +Resource Adapter supports all three J2EE application server +transaction types: +

        + +
          +
        • No transaction. +
        • Local transactions. +
        • XA transactions. +
        • +
        + +

        +JCA also includes the Java Transaction API (JTA), which means that JE +supports 2 phase commit (XA). Therefore, JE can participate +in distributed transactions managed by either a J2EE server or +the applications direct use of the JTA API. +

        + + diff --git a/src/com/sleepycat/je/jca/ra/ra.xml b/src/com/sleepycat/je/jca/ra/ra.xml new file mode 100644 index 0000000..136a326 --- /dev/null +++ b/src/com/sleepycat/je/jca/ra/ra.xml @@ -0,0 +1,56 @@ + + + + + Berkeley DB Java Edition JCA Adapter + Oracle + 1.0 + Database + 2.0 + + + Berkeley DB Java Edition; license may be required for redistribution. + + true + + + com.sleepycat.je.jca.ra.JEManagedConnectionFactory + + com.sleepycat.je.jca.ra.JEConnectionFactory + + com.sleepycat.je.jca.ra.JEConnectionFactoryImpl + + com.sleepycat.je.jca.ra.JEConnection + + com.sleepycat.je.jca.ra.JEConnectionImpl + + LocalTransaction + + + UserName + java.lang.String + + + + Password + java.lang.String + + + + BasicPassword + javax.resource.security.PasswordCredential + + true + + Read/Write access is required to the contents of + the JERootDir + permission java.io.FilePermission + "/tmp/je_store/*", "read,write"; + + + diff --git a/src/com/sleepycat/je/jmx/JEDiagnostics.java b/src/com/sleepycat/je/jmx/JEDiagnostics.java new file mode 100644 index 0000000..b1cf15e --- /dev/null +++ b/src/com/sleepycat/je/jmx/JEDiagnostics.java @@ -0,0 +1,318 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.util.ArrayList; +import java.util.logging.Level; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + *

        + * JEDiagnostics is a debugging mbean for a non replicated JE Environment. + * This is intended as a locus of field support functionality. While it may be + * used by the application developer, the primary use case is for a support + * situation. Currently much of this functionality is also available through + * the standard java.util.logging MBean. + *

        + * It is a concrete MBean created by registering a JE Environment as an MBean + * through setting the JEDiagnostics system property. It only works on an + * active JE Environment, and one Environment can only have one JEDiagnostics + * instance. There are two attributes and one operation: + *

        + * Attributes: + *

          + *
        • consoleHandlerLevel: which sets the console handler level. + *
        • fileHandlerLevel: which sets the file handler level. + *
        + * Operations: + *
          + *
        • resetLoggingLevel: which sets the level for the current loggers in + * the LogManager. + *
        + *

        + * We can use these attributes and operations to dynamically change the + * logging level for debugging purposes. + */ +public class JEDiagnostics extends JEMBean implements DynamicMBean { + + /* --------------------- Attributes -------------------------- */ + protected static final String CONSOLEHANDLER_LEVEL = "consoleHandlerLevel"; + protected static final String FILEHANDLER_LEVEL = "fileHandlerLevel"; + + /* ConsoleHandler. */ + protected static final MBeanAttributeInfo ATT_CONSOLEHANDLER_LEVEL = + new MBeanAttributeInfo + (CONSOLEHANDLER_LEVEL, "java.lang.String", "ConsoleHandler level.", + true, true, false); + + /* FileHandler. */ + protected static final MBeanAttributeInfo ATT_FILEHANDLER_LEVEL = + new MBeanAttributeInfo + (FILEHANDLER_LEVEL, "java.lang.String", "FileHandler level.", + true, true, false); + + /* --------------------- Operations -------------------------- */ + + /* Operation names */ + protected static final String OP_RESET_LOGGING = "resetLoggerLevel"; + + /* Set the parameters and operation info for resetting logger's level. */ + protected static final MBeanParameterInfo[] resetLoggingParams = { + new MBeanParameterInfo("Logger Name", "java.lang.String", + "Specify the target logger."), + new MBeanParameterInfo("Logging Level", "java.lang.String", + "The new logging level for the target logger.") + }; + + /* Reset logger's level operation. */ + protected static final MBeanOperationInfo OP_RESET_LOGGING_LEVEL = + new MBeanOperationInfo + (OP_RESET_LOGGING, + "Change the logging level for the specified logger.", + resetLoggingParams, "void", MBeanOperationInfo.UNKNOWN); + + protected JEDiagnostics(Environment env) { + super(env); + } + + public JEDiagnostics() { + super(); + } + + @Override + protected void initClassFields() { + currentClass = JEDiagnostics.class; + className = "JEDiagnostics"; + DESCRIPTION = "Logging Monitor on an open Environment."; + } + + /** + * @see DynamicMBean#getAttribute + */ + public Object getAttribute(String attributeName) + throws AttributeNotFoundException, + MBeanException { + + if (attributeName == null) { + throw new AttributeNotFoundException + ("Attribute name can't be null."); + } + + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + if (attributeName.equals(CONSOLEHANDLER_LEVEL)) { + return envImpl.getConsoleHandler().getLevel().toString(); + } else if (attributeName.equals(FILEHANDLER_LEVEL)) { + return envImpl.getFileHandler().getLevel().toString(); + } else { + throw new AttributeNotFoundException + ("Attributes " + attributeName + " is not valid."); + } + } catch (DatabaseException e) { + throw new MBeanException(new RuntimeException(e.getMessage())); + } + } + + /** + * @see DynamicMBean#setAttribute + */ + public void setAttribute(Attribute attribute) + throws AttributeNotFoundException, + InvalidAttributeValueException, + MBeanException { + + if (attribute == null) { + throw new AttributeNotFoundException("Attribute can't be null."); + } + + /* Sanity check parameters. */ + String name = attribute.getName(); + Object value = attribute.getValue(); + + if (name == null) { + throw new AttributeNotFoundException + ("Attribute name can't be null."); + } + + if (value == null) { + throw new InvalidAttributeValueException + ("Attribute value for attribute " + name + " can't be null"); + } + + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Level level = Level.parse((String) value); + + if (name.equals(CONSOLEHANDLER_LEVEL)) { + envImpl.getConsoleHandler().setLevel(level); + } else if (name.equals(FILEHANDLER_LEVEL)) { + envImpl.getFileHandler().setLevel(level); + } else { + throw new AttributeNotFoundException + ("Attribute " + name + " is not valid."); + } + } catch (NullPointerException e) { + throw new InvalidAttributeValueException + ("Setting value for attribute " + name + + "is invalid because of " + e.getMessage()); + } catch (SecurityException e) { + throw new MBeanException(e, e.getMessage()); + } + } + + /* Parse and return the level represented by an object. */ + private Level getLevel(Object level) { + try { + return Level.parse((String) level); + } catch (NullPointerException e) { + throw new IllegalArgumentException + ("Can't use null for level value.", e); + } + } + + /** + * @see DynamicMBean#getAttributes + */ + public AttributeList getAttributes(String[] attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attributes can't be null"); + } + + AttributeList results = new AttributeList(); + + for (int i = 0; i < attributes.length; i++) { + try { + Object value = getAttribute(attributes[i]); + results.add(new Attribute(attributes[i], value)); + } catch (Exception e) { + e.printStackTrace(); + } + } + + return results; + } + + /** + * @see DynamicMBean#setAttributes + */ + public AttributeList setAttributes(AttributeList attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attribute list can't be null"); + } + + AttributeList results = new AttributeList(); + + for (int i = 0; i < attributes.size(); i++) { + Attribute attr = (Attribute) attributes.get(i); + try { + setAttribute(attr); + String name = attr.getName(); + Object newValue = getAttribute(name); + results.add(new Attribute(name, newValue)); + } catch (Exception e) { + e.printStackTrace(); + } + } + + return results; + } + + /** + * @see DynamicMBean#invoke + */ + public Object invoke(String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + /* Sanity checking. */ + if (actionName == null) { + throw new IllegalArgumentException("ActionName can't be null."); + } + + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + if (actionName.equals(OP_RESET_LOGGING)) { + if (params == null || params.length != 2) { + return new IllegalArgumentException + ("Parameter is not valid"); + } + envImpl.resetLoggingLevel(((String) params[0]).trim(), + Level.parse((String) params[1])); + return null; + } + + return new IllegalArgumentException + ("ActionName: " + actionName + " is not valid."); + } catch (DatabaseException e) { + + /* + * Add the message for easiest deciphering of the problem. Since + * the original exception cannot be transferred, send the exception + * stack. + */ + throw new MBeanException(new RuntimeException + (e.getMessage() + + LoggerUtils.getStackTrace(e))); + } catch (NullPointerException e) { + throw new MBeanException(e, e.getMessage()); + } + } + + @Override + protected void doRegisterMBean(Environment env) + throws Exception { + + server.registerMBean(new JEDiagnostics(env), jeName); + } + + @Override + protected MBeanAttributeInfo[] getAttributeList() { + ArrayList attrList = + new ArrayList(); + + attrList.add(ATT_CONSOLEHANDLER_LEVEL); + if (DbInternal.getNonNullEnvImpl(env).getFileHandler() != null) { + attrList.add(ATT_FILEHANDLER_LEVEL); + } + + return attrList.toArray(new MBeanAttributeInfo[attrList.size()]); + } + + @Override + protected void addOperations() { + operationList.add(OP_RESET_LOGGING_LEVEL); + } +} diff --git a/src/com/sleepycat/je/jmx/JEMBean.java b/src/com/sleepycat/je/jmx/JEMBean.java new file mode 100644 index 0000000..09958ce --- /dev/null +++ b/src/com/sleepycat/je/jmx/JEMBean.java @@ -0,0 +1,236 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.lang.management.ManagementFactory; +import java.lang.reflect.Constructor; +import java.util.ArrayList; + +import javax.management.DynamicMBean; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanConstructorInfo; +import javax.management.MBeanInfo; +import javax.management.MBeanNotificationInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.EnvironmentImpl.MBeanRegistrar; + +/* + * Base class for all JE concrete MBeans. + * + * It implements the MBeanRegistrar interface and defines the common part of + * those concrete MBeans. + * + * It defines the abstract methods which must be implemented in concrete + * MBeans. + * + * Subclasses of JEMBean must avoid passing JE exceptions across the network to + * the client side, since the client side will not have the proper JE exception + * class. For example, a method which does this: + * + * catch (DatabaseException databaseEx) { + * new MBeanException(databaseEx); + * } + * + * will result in a ClassNotFoundException on the client side when it receives + * the MBeanException, since it does not have DatabaseException. + */ +public abstract class JEMBean implements MBeanRegistrar { + + /* + * Parameters for getting JE database, environment stats, etc. + */ + public static final MBeanParameterInfo[] statParams = { + new MBeanParameterInfo("clear", "java.lang.Boolean", + "If true, reset statistics after reading."), + new MBeanParameterInfo("fast", "java.lang.Boolean", + "If true, only return statistics which do " + + "not require expensive computation.") + }; + + /* Concrete MBean's visible interface. */ + private MBeanInfo mbeanInfo; + + /* Fields used to register this concrete MBean. */ + protected MBeanServer server; + protected ObjectName jeName; + + /* Name for this class. */ + protected String className; + + protected String DESCRIPTION; + + /* Class type for the MBean. */ + protected Class currentClass; + + /* Environment used in this MBean. */ + protected Environment env; + + protected ArrayList operationList = + new ArrayList(); + + protected JEMBean(Environment env) { + this.env = env; + initClassFields(); + resetMBeanInfo(); + } + + public JEMBean() { + initClassFields(); + } + + /* Initiate the class fields used in this MBean. */ + protected abstract void initClassFields(); + + /** + * Create the available management interface for this environment. The + * attributes and operations available vary according to environment + * configuration. + */ + protected void resetMBeanInfo() { + /* Generate the MBean description. */ + mbeanInfo = new MBeanInfo(currentClass.getName(), + DESCRIPTION, + getAttributeList(), + getConstructors(), + getOperationList(), + getNotificationInfo()); + } + + /** + * Get attribute metadata for this MBean. + * + * @return array of MBeanAttributeInfo objects describing the available + * attributes. + */ + protected abstract MBeanAttributeInfo[] getAttributeList(); + + /** + * Add MBean operations into the list. + */ + protected abstract void addOperations(); + + /** + * Get constructor metadata for this MBean. + * + * Since the process of getting constructors is the same for each concrete + * MBean, define it here to reduce coding work. + * + * @return array of MBeanConstructorInfo objects describing the constructor + * attributes. + */ + @SuppressWarnings("unchecked") + protected MBeanConstructorInfo[] getConstructors() { + + Constructor[] constructors = currentClass.getConstructors(); + MBeanConstructorInfo[] constructorInfo = + new MBeanConstructorInfo[constructors.length]; + for (int i = 0; i < constructors.length; i++) { + constructorInfo[i] = + new MBeanConstructorInfo(currentClass.getName(), + constructors[i]); + } + + return constructorInfo; + } + + /** + * Get operation metadata for this MBean. + * + * @return array of MBeanOperationInfo describing available operations. + */ + private MBeanOperationInfo[] getOperationList() { + addOperations(); + + return operationList.toArray + (new MBeanOperationInfo[operationList.size()]); + } + + /** + * Get notification metadata for this MBean. + * + * @return array of MBeanNotificationInfo describing notifications. + */ + protected MBeanNotificationInfo[] getNotificationInfo() { + return null; + } + + /** + * For EnvironmentImpl.MBeanRegistrar interface. + * + * Register this MBean with the MBeanServer. + */ + public void doRegister(Environment env) + throws Exception { + + server = ManagementFactory.getPlatformMBeanServer(); + + StringBuilder sb = new StringBuilder("com.sleepycat.je.jmx:name="); + sb.append(className).append("("); + String noColonPathname = + env.getHome().getPath().replaceAll(":", ""); + sb.append(noColonPathname).append(")"); + jeName = new ObjectName(sb.toString()); + doRegisterMBean(env); + } + + /* Register the MBean with the server. */ + protected abstract void doRegisterMBean(Environment env) + throws Exception; + + /** + * For EnvironmentImpl.MBeanRegistrar interface. + * + * Remove this MBean from the MBeanServer. + */ + public void doUnregister() + throws Exception { + + if (server != null) { + server.unregisterMBean(jeName); + } + } + + /** + * @see DynamicMBean#getMBeanInfo + * + * Implement the getMBeanInfo method of DynamicMBean. + */ + public MBeanInfo getMBeanInfo() { + return mbeanInfo; + } + + /** + * Helper for creating a StatsConfig object to use as an operation + * parameter. + */ + protected StatsConfig getStatsConfig(Object[] params) { + StatsConfig statsConfig = new StatsConfig(); + if ((params != null) && (params.length > 0) && (params[0] != null)) { + Boolean clear = (Boolean) params[0]; + statsConfig.setClear(clear.booleanValue()); + } + if ((params != null) && (params.length > 1) && (params[1] != null)) { + Boolean fast = (Boolean) params[1]; + statsConfig.setFast(fast.booleanValue()); + } + + return statsConfig; + } +} diff --git a/src/com/sleepycat/je/jmx/JEMBeanHelper.java b/src/com/sleepycat/je/jmx/JEMBeanHelper.java new file mode 100644 index 0000000..dd686c1 --- /dev/null +++ b/src/com/sleepycat/je/jmx/JEMBeanHelper.java @@ -0,0 +1,750 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import javax.management.Attribute; +import javax.management.AttributeNotFoundException; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanNotificationInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DatabaseStats; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; + +/** + * @deprecated As of JE 4, JEMBeanHelper is deprecated in favor of the concrete + * MBeans available by default with a JE environment. These MBeans can be + * registered and enabled by the environment by setting the following JVM + * property: + * JEMonitor: + * This MBean provides general stats monitoring and access to basic + * environment level operations. + * + * JEMBeanHelper is a utility class for the MBean implementation which wants to + * add management of a JE environment to its capabilities. MBean + * implementations can contain a JEMBeanHelper instance to get MBean metadata + * for JE and to set attributes, get attributes, and invoke operations. + *

        + * com.sleepycat.je.jmx.JEMonitor and the example program + * jmx.JEApplicationMBean are two MBean implementations which provide support + * different application use cases. See those classes for examples of how to + * use JEMBeanHelper. + */ +public class JEMBeanHelper { + + /* + * A note to JE developers: all available JE attributes and operations are + * described in the following static info arrays. New management + * functionality can be added to the helper by adding to the appropriate + * set of static definitions. For example, if we want to add a new JE + * attribute called "foo", which is available for open environments, we + * need to define a new MBeanAttributeInfo in the OPEN_ATTR array. The + * helper then needs to provide an implementation in set/getAttribute. + */ + + /* Attribute names. */ + public static final String ATT_ENV_HOME = "environmentHome"; + public static final String ATT_OPEN = "isOpen"; + public static final String ATT_IS_READ_ONLY = "isReadOnly"; + public static final String ATT_IS_TRANSACTIONAL = "isTransactional"; + public static final String ATT_CACHE_SIZE = "cacheSize"; + public static final String ATT_CACHE_PERCENT = "cachePercent"; + public static final String ATT_LOCK_TIMEOUT = "lockTimeout"; + public static final String ATT_IS_SERIALIZABLE = "isSerializableIsolation"; + public static final String ATT_TXN_TIMEOUT = "transactionTimeout"; + public static final String ATT_SET_READ_ONLY = "openReadOnly"; + public static final String ATT_SET_TRANSACTIONAL = "openTransactional"; + public static final String ATT_SET_SERIALIZABLE = + "openSerializableIsolation"; + + /* COMMON_ATTR attributes are available for any environment. */ + private static final MBeanAttributeInfo[] COMMON_ATTR = { + + new MBeanAttributeInfo(ATT_ENV_HOME, + "java.lang.String", + "Environment home directory.", + true, // readable + false, // writable + false), // isIs + new MBeanAttributeInfo(ATT_OPEN, + "java.lang.Boolean", + "True if this environment is open.", + true, // readable + false, // writable + true) // isIs + }; + + /* OPEN_ATTR attributes are available for all open environments. */ + private static final MBeanAttributeInfo[] OPEN_ATTR = { + + new MBeanAttributeInfo(ATT_IS_READ_ONLY, + "java.lang.Boolean", + "True if this environment is read only.", + true, // readable + false, // writable + true), // isIs + new MBeanAttributeInfo(ATT_IS_TRANSACTIONAL, + "java.lang.Boolean", + "True if this environment supports transactions.", + true, // readable + false, // writable + true), // isIs + new MBeanAttributeInfo(ATT_CACHE_SIZE, + "java.lang.Long", + "Cache size, in bytes.", + true, // readable + true, // writable + false), // isIs + new MBeanAttributeInfo(ATT_CACHE_PERCENT, + "java.lang.Integer", + "By default, cache size is (cachePercent * " + + "JVM maximum memory. To change the cache size "+ + "using a percentage of the heap size, set " + + "the cache size to 0 and cachePercent to the "+ + "desired percentage value.", + true, // readable + true, // writable + false), // isIs + new MBeanAttributeInfo(ATT_LOCK_TIMEOUT, + "java.lang.Long", + "Lock timeout, in microseconds.", + true, // readable + false, // writable + false), // isIs + }; + + /* + * TRANSACTIONAL_ATTR attributes are available only for open, transactional + * environments. + */ + private static final MBeanAttributeInfo[] TRANSACTIONAL_ATTR = { + + new MBeanAttributeInfo(ATT_IS_SERIALIZABLE, + "java.lang.Boolean", + "True if this environment provides " + + "Serializable (degree 3) isolation. The " + + "default is RepeatableRead isolation.", + true, // readable + false, // writable + true), // isIs + new MBeanAttributeInfo(ATT_TXN_TIMEOUT, + "java.lang.Long", + "Transaction timeout, in seconds. A value " + + "of 0 means there is no timeout.", + true, // readable + false, // writable + false) // isIs + }; + + /* + * CREATE_ATTR attributes are available when the mbean is configured to + * support configuration and opening by the mbean. They express the + * configuration settings. + */ + private static final MBeanAttributeInfo[] CREATE_ATTR = { + + new MBeanAttributeInfo(ATT_SET_READ_ONLY, + "java.lang.Boolean", + "True if this environment should be opened " + + "in readonly mode.", + true, // readable + true, // writable + false), // isIs + new MBeanAttributeInfo(ATT_SET_TRANSACTIONAL, + "java.lang.Boolean", + "True if this environment should be opened " + + "in transactional mode.", + true, // readable + true, // writable + false), // isIs + new MBeanAttributeInfo(ATT_SET_SERIALIZABLE, + "java.lang.Boolean", + "True if this environment should be opened " + + "with serializableIsolation. The default is "+ + "false.", + true, // readable + true, // writable + false), // isIs + }; + + /* Operation names */ + static final String OP_CLEAN = "cleanLog"; + static final String OP_EVICT = "evictMemory"; + static final String OP_CHECKPOINT = "checkpoint"; + static final String OP_SYNC = "sync"; + static final String OP_ENV_STAT = "getEnvironmentStats"; + static final String OP_TXN_STAT = "getTxnStats"; + static final String OP_DB_NAMES = "getDatabaseNames"; + static final String OP_DB_STAT = "getDatabaseStats"; + + private static final MBeanOperationInfo OP_CLEAN_INFO = + new MBeanOperationInfo(OP_CLEAN, + "Remove obsolete environment log files. " + + "Zero or more log files will be cleaned as " + + "necessary to bring the disk space " + + "utilization of the environment above the " + + "configured minimum utilization threshold " + + "as determined by the setting " + + "je.cleaner.minUtilization. Returns the " + + "number of files cleaned, that will be " + + "deleted at the next qualifying checkpoint.", + new MBeanParameterInfo[0], // no params + "java.lang.Integer", + MBeanOperationInfo.UNKNOWN); + + private static final MBeanOperationInfo OP_EVICT_INFO = + new MBeanOperationInfo(OP_EVICT, + "Reduce cache usage to the threshold " + + "determined by the setting " + + "je.evictor.useMemoryFloor. ", + new MBeanParameterInfo[0], // no params + "void", + MBeanOperationInfo.UNKNOWN); + + /* parameter for checkpoint operation. */ + private static final MBeanParameterInfo[] checkpointParams = { + new MBeanParameterInfo("force", "java.lang.Boolean", + "If true, force a checkpoint even if " + + "there has been no activity since the last " + + "checkpoint. Returns true if a checkpoint " + + "executed.") + }; + + private static final MBeanOperationInfo OP_CHECKPOINT_INFO = + new MBeanOperationInfo(OP_CHECKPOINT, + "Checkpoint the environment.", + checkpointParams, + "void", + MBeanOperationInfo.UNKNOWN); + + private static final MBeanOperationInfo OP_SYNC_INFO = + new MBeanOperationInfo(OP_SYNC, + "Flush the environment to stable storage.", + new MBeanParameterInfo[0], // no params + "void", + MBeanOperationInfo.UNKNOWN); + + private static final MBeanParameterInfo[] statParams = { + new MBeanParameterInfo("clear", "java.lang.Boolean", + "If true, reset statistics after reading."), + new MBeanParameterInfo("fast", "java.lang.Boolean", + "If true, only return statistics which do " + + "not require expensive computation.") + }; + + private static final MBeanOperationInfo OP_ENV_STAT_INFO = + new MBeanOperationInfo(OP_ENV_STAT, + "Get environment statistics.", + statParams, + "java.lang.String", + MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_TXN_STAT_INFO = + new MBeanOperationInfo(OP_TXN_STAT, + "Get transactional statistics.", + statParams, + "java.lang.String", + MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_DB_NAMES_INFO = + new MBeanOperationInfo(OP_DB_NAMES, + "Get the names of databases in the environment.", + new MBeanParameterInfo[0], // no params + "java.util.ArrayList", + MBeanOperationInfo.INFO); + + private static final MBeanParameterInfo[] dbStatParams = { + new MBeanParameterInfo("clear", "java.lang.Boolean", + "If true, reset statistics after reading."), + new MBeanParameterInfo("fast", "java.lang.Boolean", + "If true, only return statistics which do " + + "not require expensive computation. " + + "Currently all database stats are not fast."), + new MBeanParameterInfo("databaseName", "java.lang.String", + "database name") + }; + + private static final MBeanOperationInfo OP_DB_STAT_INFO = + new MBeanOperationInfo(OP_DB_STAT, + "Get database statistics.", + dbStatParams, + "java.lang.String", + MBeanOperationInfo.INFO); + + /* target JE environment home directory. */ + private File environmentHome; + + /* + * If canConfigure is true, this helper will make environment configuration + * attributes available in the mbean metadata. Configuration attributes + * will be saved in the openConfig instance. + */ + private boolean canConfigure; + private EnvironmentConfig openConfig; + + /* true if the mbean metadata needs to be refreshed. */ + private boolean needReset; + + /* + * Save whether the environment was open the last time we fetched mbean + * attributes. Use to detect a change in environment status. + */ + private boolean envWasOpen; + + /** + * Instantiate a helper, specifying environment home and open capabilities. + * + * @param environmentHome home directory of the target JE environment. + * @param canConfigure If true, the helper will show environment + * configuration attributes. + */ + public JEMBeanHelper(File environmentHome, boolean canConfigure) { + + if (environmentHome == null) { + throw new IllegalArgumentException + ("Environment home cannot be null"); + } + this.environmentHome = environmentHome; + this.canConfigure = canConfigure; + if (canConfigure) { + openConfig = new EnvironmentConfig(); + } + } + + /** + * Return the target environment directory. + * + * @return the environment directory. + */ + public File getEnvironmentHome() { + return environmentHome; + } + + /** + * If the helper was instantiated with canConfigure==true, it shows + * environment configuration attributes. Those attributes are returned + * within this EnvironmentConfig object for use in opening environments. + * + * @return EnvironmentConfig object which saves configuration attributes + * recorded through MBean attributes. + */ + public EnvironmentConfig getEnvironmentOpenConfig() { + return openConfig; + } + + /** + * Tell the MBean if the available set of functionality has changed. + * + * @return true if the MBean should regenerate its JE metadata. + */ + public synchronized boolean getNeedReset() { + return needReset; + } + + /** + * Get MBean attribute metadata for this environment. + * @param targetEnv The target JE environment. May be null if the + * environment is not open. + * @return list of MBeanAttributeInfo objects describing the available + * attributes. + */ + public List getAttributeList(Environment targetEnv) { + + /* Turn off reset because the mbean metadata is being refreshed. */ + setNeedReset(false); + + ArrayList attrList = + new ArrayList(); + + /* Add attributes for all JE environments. */ + for (int i = 0; i < COMMON_ATTR.length; i++) { + attrList.add(COMMON_ATTR[i]); + } + + if (targetEnv == null) { + if (canConfigure) { + /* Add attributes for configuring an environment. */ + for (int i = 0; i < CREATE_ATTR.length; i++) { + attrList.add(CREATE_ATTR[i]); + } + } + } else { + /* Add attributes for an open environment. */ + for (int i = 0; i < OPEN_ATTR.length; i++) { + attrList.add(OPEN_ATTR[i]); + } + + /* Add attributes for an open, transactional environment. */ + try { + EnvironmentConfig config = targetEnv.getConfig(); + if (config.getTransactional()) { + for (int i = 0; i < TRANSACTIONAL_ATTR.length; i++) { + attrList.add(TRANSACTIONAL_ATTR[i]); + } + } + } catch (DatabaseException ignore) { + /* ignore */ + } + } + + return attrList; + } + + /** + * Get an attribute value for the given environment. Check + * JEMBeanHelper.getNeedReset() after this call because the helper may + * detect that the environment has changed and that the MBean metadata + * should be reset. + * + * @param targetEnv The target JE environment. May be null if the + * environment is not open. + * @param attributeName attribute name. + * @return attribute value. + */ + public Object getAttribute(Environment targetEnv, String attributeName) + throws AttributeNotFoundException, + MBeanException { + + /* Sanity check. */ + if (attributeName == null) { + throw new AttributeNotFoundException + ("Attribute name cannot be null"); + } + + /* These attributes are available regardless of environment state. */ + try { + if (attributeName.equals(ATT_ENV_HOME)) { + return environmentHome.getCanonicalPath(); + } else if (attributeName.equals(ATT_OPEN)) { + boolean envIsOpen = (targetEnv != null); + resetIfOpenStateChanged(envIsOpen); + return new Boolean(envIsOpen); + } else if (attributeName.equals(ATT_SET_READ_ONLY)) { + return new Boolean(openConfig.getReadOnly()); + } else if (attributeName.equals(ATT_SET_TRANSACTIONAL)) { + return new Boolean(openConfig.getTransactional()); + } else if (attributeName.equals(ATT_SET_SERIALIZABLE)) { + return new Boolean(openConfig.getTxnSerializableIsolation()); + } else { + /* The rest are JE environment attributes. */ + if (targetEnv != null) { + + EnvironmentConfig config = targetEnv.getConfig(); + + if (attributeName.equals(ATT_IS_READ_ONLY)) { + return new Boolean(config.getReadOnly()); + } else if (attributeName.equals(ATT_IS_TRANSACTIONAL)) { + return new Boolean(config.getTransactional()); + } else if (attributeName.equals(ATT_CACHE_SIZE)) { + return new Long(config.getCacheSize()); + } else if (attributeName.equals(ATT_CACHE_PERCENT)) { + return new Integer(config.getCachePercent()); + } else if (attributeName.equals(ATT_LOCK_TIMEOUT)) { + return new Long(config.getLockTimeout()); + } else if (attributeName.equals(ATT_IS_SERIALIZABLE)) { + return new + Boolean(config.getTxnSerializableIsolation()); + } else if (attributeName.equals(ATT_TXN_TIMEOUT)) { + return new Long(config.getTxnTimeout()); + } else { + throw new AttributeNotFoundException + ("attribute " + attributeName + " is not valid."); + } + } + return null; + } + } catch (Exception e) { + + /* + * Add both the message and the exception for easiest deciphering + * of the problem. Sometimes the original exception stacktrace gets + * hidden in server logs. + */ + throw new MBeanException(e, e.getMessage()); + } + } + + /** + * Set an attribute value for the given environment. + * + * @param targetEnv The target JE environment. May be null if the + * environment is not open. + * @param attribute name/value pair + */ + public void setAttribute(Environment targetEnv, Attribute attribute) + throws AttributeNotFoundException, + InvalidAttributeValueException { + + if (attribute == null) { + throw new AttributeNotFoundException("Attribute cannot be null"); + } + + /* Sanity check parameters. */ + String name = attribute.getName(); + Object value = attribute.getValue(); + + if (name == null) { + throw new AttributeNotFoundException + ("Attribute name cannot be null"); + } + + if (value == null) { + throw new InvalidAttributeValueException + ("Attribute value for attribute " + name + " cannot be null"); + } + + try { + if (name.equals(ATT_SET_READ_ONLY)) { + openConfig.setReadOnly(((Boolean) value).booleanValue()); + } else if (name.equals(ATT_SET_TRANSACTIONAL)) { + openConfig.setTransactional(((Boolean) value).booleanValue()); + } else if (name.equals(ATT_SET_SERIALIZABLE)) { + openConfig.setTxnSerializableIsolation + (((Boolean) value).booleanValue()); + } else { + /* Set the specified attribute if the environment is open. */ + if (targetEnv != null) { + + EnvironmentMutableConfig config = + targetEnv.getMutableConfig(); + + if (name.equals(ATT_CACHE_SIZE)) { + config.setCacheSize(((Long) value).longValue()); + targetEnv.setMutableConfig(config); + } else if (name.equals(ATT_CACHE_PERCENT)) { + config.setCachePercent(((Integer) value).intValue()); + targetEnv.setMutableConfig(config); + } else { + throw new AttributeNotFoundException + ("attribute " + name + " is not valid."); + } + } else { + throw new AttributeNotFoundException + ("attribute " + name + " is not valid."); + } + } + } catch (NumberFormatException e) { + throw new InvalidAttributeValueException("attribute name=" + name); + } catch (DatabaseException e) { + throw new InvalidAttributeValueException + ("attribute name=" + name + e.getMessage()); + } + } + + /********************************************************************/ + /* JE Operations */ + /********************************************************************/ + + /** + * Get mbean operation metadata for this environment. + * + * @param targetEnv The target JE environment. May be null if the + * environment is not open. + * @return List of MBeanOperationInfo describing available operations. + */ + public List getOperationList(Environment targetEnv) { + setNeedReset(false); + + List operationList = + new ArrayList(); + + if (targetEnv != null) { + + /* + * These operations are only available if the environment is open. + */ + operationList.add(OP_CLEAN_INFO); + operationList.add(OP_EVICT_INFO); + operationList.add(OP_ENV_STAT_INFO); + operationList.add(OP_DB_NAMES_INFO); + operationList.add(OP_DB_STAT_INFO); + + /* Add checkpoint only for transactional environments. */ + boolean isTransactional = false; + try { + EnvironmentConfig config = targetEnv.getConfig(); + isTransactional = config.getTransactional(); + } catch (DatabaseException e) { + /* Don't make any operations available. */ + return new ArrayList(); + } + + if (isTransactional) { + operationList.add(OP_CHECKPOINT_INFO); + operationList.add(OP_TXN_STAT_INFO); + } else { + operationList.add(OP_SYNC_INFO); + } + } + + return operationList; + } + + /** + * Invoke an operation for the given environment. + * + * @param targetEnv The target JE environment. May be null if the + * environment is not open. + * @param actionName operation name. + * @param params operation parameters. May be null. + * @param signature operation signature. May be null. + * @return the operation result + */ + public Object invoke(Environment targetEnv, + String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + /* Sanity checking. */ + if (actionName == null) { + throw new IllegalArgumentException("actionName cannot be null"); + } + + try { + if (targetEnv != null) { + if (actionName.equals(OP_CLEAN)) { + int numFiles = targetEnv.cleanLog(); + return new Integer(numFiles); + } else if (actionName.equals(OP_EVICT)) { + targetEnv.evictMemory(); + return null; + } else if (actionName.equals(OP_CHECKPOINT)) { + CheckpointConfig config = new CheckpointConfig(); + if ((params != null) && (params.length > 0)) { + Boolean force = (Boolean) params[0]; + config.setForce(force.booleanValue()); + } + targetEnv.checkpoint(config); + return null; + } else if (actionName.equals(OP_SYNC)) { + targetEnv.sync(); + return null; + } else if (actionName.equals(OP_ENV_STAT)) { + return targetEnv.getStats + (getStatsConfig(params)).toString(); + } else if (actionName.equals(OP_TXN_STAT)) { + return targetEnv.getTransactionStats + (getStatsConfig(params)).toString(); + } else if (actionName.equals(OP_DB_NAMES)) { + return targetEnv.getDatabaseNames(); + } else if (actionName.equals(OP_DB_STAT)) { + DatabaseStats stats = getDatabaseStats(targetEnv, params); + return stats != null ? stats.toString() : null; + } + } + + return new IllegalArgumentException + ("actionName: " + actionName + " is not valid"); + } catch (Exception e) { + + /* + * Add both the message and the exception for easiest deciphering + * of the problem. Sometimes the original exception stacktrace gets + * hidden in server logs. + */ + throw new MBeanException(e, e.getMessage()); + } + } + + /** + * Helper for creating a StatsConfig object to use as an operation + * parameter. + */ + private StatsConfig getStatsConfig(Object[] params) { + StatsConfig statsConfig = new StatsConfig(); + if ((params != null) && (params.length > 0) && (params[0] != null)) { + Boolean clear = (Boolean) params[0]; + statsConfig.setClear(clear.booleanValue()); + } + if ((params != null) && (params.length > 1) && (params[1] != null)) { + Boolean fast = (Boolean) params[1]; + statsConfig.setFast(fast.booleanValue()); + } + return statsConfig; + } + + /** + * Helper to get statistics for a given database. + * @param params operation parameters + * @return DatabaseStats object + */ + private DatabaseStats getDatabaseStats(Environment targetEnv, + Object[] params) + throws IllegalArgumentException, + DatabaseNotFoundException, + DatabaseException { + + if ((params == null) || (params.length < 3)) { + return null; + } + String dbName = (String)params[2]; + + Database db = null; + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + try { + db = targetEnv.openDatabase(null, dbName, dbConfig); + } catch (DatabaseExistsException e) { + /* Should never happen, ExlcusiveCreate is false. */ + throw EnvironmentFailureException.unexpectedException(e); + } + return db.getStats(getStatsConfig(params)); + } finally { + if (db != null) { + db.close(); + } + } + } + + /** + * No notifications are supported. + * @return List of MBeanNotificationInfo for available notifications. + */ + public MBeanNotificationInfo[] + getNotificationInfo(Environment targetEnv) { + return null; + } + + private synchronized void setNeedReset(boolean reset) { + needReset = reset; + } + + private synchronized void resetIfOpenStateChanged(boolean isOpen) { + if (isOpen != envWasOpen) { + setNeedReset(true); + envWasOpen = isOpen; + } + } +} diff --git a/src/com/sleepycat/je/jmx/JEMonitor.java b/src/com/sleepycat/je/jmx/JEMonitor.java new file mode 100644 index 0000000..0603b6d --- /dev/null +++ b/src/com/sleepycat/je/jmx/JEMonitor.java @@ -0,0 +1,544 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.util.ArrayList; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseStats; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + *

        + * JEMonitor is a JMX MBean which makes statistics and basic administrative + * operations available. The MBean is registered and enabled when the system + * property JEMonitor is set. It only works on an active JE Environment, and + * an Environment can only register one instance of JEMonitor. + * + * @see Monitoring + * JE with JConsole and JMX + */ +public class JEMonitor extends JEMBean implements DynamicMBean { + + /* Attributes and operations' definition for JEMonitor concrete MBean. */ + + /* Attribute names. */ + public static final String ATT_ENV_HOME = "environmentHome"; + public static final String ATT_IS_READ_ONLY = "isReadOnly"; + public static final String ATT_IS_TRANSACTIONAL = "isTransactional"; + public static final String ATT_CACHE_SIZE = "cacheSize"; + public static final String ATT_CACHE_PERCENT = "cachePercent"; + public static final String ATT_LOCK_TIMEOUT = "lockTimeout"; + public static final String ATT_IS_SERIALIZABLE = "isSerializableIsolation"; + public static final String ATT_TXN_TIMEOUT = "transactionTimeout"; + + /* Attributes available for any Environments. */ + private static final MBeanAttributeInfo[] COMMON_ATTR = { + + new MBeanAttributeInfo + (ATT_ENV_HOME, "java.lang.String", "Environment home directory.", + true, false , false ) + }; + + /* Available attributes for an open Environment. */ + private static final MBeanAttributeInfo[] OPEN_ATTR = { + + new MBeanAttributeInfo + (ATT_IS_READ_ONLY, "java.lang.Boolean", + "true if this Environment is read only.", true, false, true), + + new MBeanAttributeInfo + (ATT_IS_TRANSACTIONAL, "java.lang.Boolean", + "true if this Environment supports transactions.", + true, false, true), + + new MBeanAttributeInfo + (ATT_CACHE_SIZE, "java.lang.Long", "Cache size, in bytes.", + true, true, false), + + new MBeanAttributeInfo + (ATT_CACHE_PERCENT, "java.lang.Integer", + "By default, cache size is (cachePercent * JVM maximum " + + "memory). To change the cache size using a percentage of the " + + "heap size, set the cache size to 0 and cachePercent to the " + + "desired percentage value.", + true, true, false), + + new MBeanAttributeInfo + (ATT_LOCK_TIMEOUT, "java.lang.Long", + "Lock timeout, in microseconds.", true, false, false) + }; + + /* Attributes available only for an open transactional Environment. */ + private static final MBeanAttributeInfo[] TRANSACTIONAL_ATTR = { + + new MBeanAttributeInfo + (ATT_IS_SERIALIZABLE, "java.lang.Boolean", + "true if this environment provides Serializable (degree 3) " + + "isolation. The default is RepeatableRead isolation.", + true, false, true), + + new MBeanAttributeInfo + (ATT_TXN_TIMEOUT, "java.lang.Long", + "Transaction timeout, in seconds. A value of 0 means there is " + + "no timeout.", true, false, false) + }; + + /* --------------------- Operations -------------------------- */ + + /* Operation names. */ + static final String OP_CLEAN = "cleanLog"; + static final String OP_EVICT = "evictMemory"; + static final String OP_CHECKPOINT = "checkpoint"; + static final String OP_SYNC = "sync"; + static final String OP_TXN_STAT = "getTxnStats"; + static final String OP_DB_NAMES = "getDatabaseNames"; + static final String OP_DB_STAT = "getDatabaseStats"; + static final String OP_ENV_CONFIG = "getEnvConfig"; + + /** + * @hidden + */ + public static final String OP_ENV_STAT = "getEnvironmentStats"; + + /** + * @hidden + */ + public static final String OP_GET_TIPS = "getTips"; + + private static final MBeanOperationInfo OP_CLEAN_INFO = + new MBeanOperationInfo + (OP_CLEAN, + "Remove obsolete environment log files. Zero or more log files " + + "will be cleaned as necessary to bring the disk space utilization " + + "of the environment above the configured minimum utilization " + + "threshold as determined by the setting je.cleaner.minUtilization. " + + "Returns the number of files cleaned. These will be deleted at the " + + "next qualifying checkpoint.", + new MBeanParameterInfo[0], "java.lang.Integer", + MBeanOperationInfo.UNKNOWN); + + private static final MBeanOperationInfo OP_EVICT_INFO = + new MBeanOperationInfo + (OP_EVICT, + "Reduce cache usage to the threshold determined by the setting " + + "je.evictor.useMemoryFloor. ", + new MBeanParameterInfo[0], "void", MBeanOperationInfo.UNKNOWN); + + /* Parameter for checkpoint operation. */ + private static final MBeanParameterInfo[] checkpointParams = { + new MBeanParameterInfo("force", "java.lang.Boolean", + "If true, force a checkpoint even if " + + "there has been no activity since the last " + + "checkpoint. Returns true if a checkpoint " + + "executed.") + }; + + private static final MBeanOperationInfo OP_CHECKPOINT_INFO = + new MBeanOperationInfo + (OP_CHECKPOINT, "Checkpoint the environment.", checkpointParams, + "void", MBeanOperationInfo.UNKNOWN); + + private static final MBeanOperationInfo OP_SYNC_INFO = + new MBeanOperationInfo + (OP_SYNC, "Flush the environment to stable storage.", + new MBeanParameterInfo[0], "void", MBeanOperationInfo.UNKNOWN); + + private static final MBeanOperationInfo OP_ENV_STAT_INFO = + new MBeanOperationInfo + (OP_ENV_STAT, "Get environment statistics.", + statParams, "java.lang.String", MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_TXN_STAT_INFO = + new MBeanOperationInfo + (OP_TXN_STAT, "Get transactional statistics.", + statParams, "java.lang.String", MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_DB_NAMES_INFO = + new MBeanOperationInfo + (OP_DB_NAMES, "Get the names of databases in the environment.", + new MBeanParameterInfo[0], "java.util.ArrayList", + MBeanOperationInfo.INFO); + + private static final MBeanParameterInfo[] dbStatParams = { + new MBeanParameterInfo("clear", "java.lang.Boolean", + "If true, reset statistics after reading."), + new MBeanParameterInfo("fast", "java.lang.Boolean", + "If true, only return statistics which do " + + "not require expensive computation. " + + "Currently all database stats are not fast."), + new MBeanParameterInfo("databaseName", "java.lang.String", + "database name") + }; + + private static final MBeanOperationInfo OP_DB_STAT_INFO = + new MBeanOperationInfo + (OP_DB_STAT, "Get database statistics.", + dbStatParams, "java.lang.String", MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_ENV_CONFIG_INFO = + new MBeanOperationInfo + (OP_ENV_CONFIG, "Get environment configuration.", + new MBeanParameterInfo[0], "java.lang.String", + MBeanOperationInfo.INFO); + + protected JEMonitor(Environment env) { + super(env); + } + + public JEMonitor() { + super(); + } + + @Override + protected void initClassFields() { + currentClass = JEMonitor.class; + className = "JEMonitor"; + DESCRIPTION = "Monitor an open Berkeley DB, Java Edition Environment."; + } + + /** + * @see DynamicMBean#getAttribute + */ + @SuppressWarnings("deprecation") + public Object getAttribute(String attributeName) + throws AttributeNotFoundException, + MBeanException { + + if (attributeName == null) { + throw new AttributeNotFoundException + ("Attribute name can't be null."); + } + + try { + EnvironmentConfig envConfig = env.getConfig(); + if (attributeName.equals(ATT_ENV_HOME)) { + return env.getHome().getCanonicalPath(); + } else if (attributeName.equals(ATT_IS_READ_ONLY)) { + return new Boolean(envConfig.getReadOnly()); + } else if (attributeName.equals(ATT_IS_TRANSACTIONAL)) { + return new Boolean(envConfig.getTransactional()); + } else if (attributeName.equals(ATT_CACHE_SIZE)) { + return new Long(envConfig.getCacheSize()); + } else if (attributeName.equals(ATT_CACHE_PERCENT)) { + return new Integer(envConfig.getCachePercent()); + } else if (attributeName.equals(ATT_LOCK_TIMEOUT)) { + return new Long(envConfig.getLockTimeout()); + } else if (attributeName.equals(ATT_IS_SERIALIZABLE)) { + return new + Boolean(envConfig.getTxnSerializableIsolation()); + } else if (attributeName.equals(ATT_TXN_TIMEOUT)) { + return new Long(envConfig.getTxnTimeout()); + } else { + throw new AttributeNotFoundException + ("Attribute " + attributeName + " is not valid."); + } + } catch (DatabaseException e) { + /* Do not pass JE exceptions to the mbean client. */ + throw new MBeanException(new RuntimeException(e.getMessage())); + } catch (Exception e) { + /* Ok to pass general Java exception to the mbean client. */ + throw new MBeanException(e, e.getMessage()); + } + } + + /** + * @see DynamicMBean#setAttribute + */ + public void setAttribute(Attribute attribute) + throws AttributeNotFoundException, + InvalidAttributeValueException, + MBeanException { + + if (attribute == null) { + throw new AttributeNotFoundException("Attribute can't be null."); + } + + /* Sanity check parameters. */ + String name = attribute.getName(); + Object value = attribute.getValue(); + + if (name == null) { + throw new AttributeNotFoundException + ("Attribute name can't be null."); + } + + if (value == null) { + throw new InvalidAttributeValueException + ("Attribute value for attribute " + name + " can't be null"); + } + + try { + EnvironmentMutableConfig mutableConfig = env.getMutableConfig(); + + if (name.equals(ATT_CACHE_SIZE)) { + mutableConfig.setCacheSize(((Long) value).longValue()); + env.setMutableConfig(mutableConfig); + } else if (name.equals(ATT_CACHE_PERCENT)) { + mutableConfig.setCachePercent(((Integer) value).intValue()); + env.setMutableConfig(mutableConfig); + } else { + throw new AttributeNotFoundException + ("Attribute " + name + " is not valid."); + } + } catch (NumberFormatException e) { + throw new InvalidAttributeValueException + ("Attribute value for attribute " + name + " is not valid."); + } catch (DatabaseException e) { + throw new InvalidAttributeValueException + ("Setting value for attribute " + name + + "is invalid because of " + e.getMessage()); + } + } + + /** + * @see DynamicMBean#getAttributes + */ + public AttributeList getAttributes(String[] attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attributes can't be null"); + } + + /* Get each requested attribute. */ + AttributeList results = new AttributeList(); + + for (int i = 0; i < attributes.length; i++) { + try { + Object value = getAttribute(attributes[i]); + results.add(new Attribute(attributes[i], value)); + } catch (Exception e) { + e.printStackTrace(); + } + } + + return results; + } + + /** + * @see DynamicMBean#setAttributes + */ + public AttributeList setAttributes(AttributeList attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attribute list can't be null"); + } + + /* Set each attribute specified. */ + AttributeList results = new AttributeList(); + + for (int i = 0; i < attributes.size(); i++) { + Attribute attr = (Attribute) attributes.get(i); + try { + setAttribute(attr); + + /* + * Add the name and new value to the result list. Be sure to + * ask the MBean for the new value, rather than simply using + * attr.getValue(), because the new value may not be same if it + * is modified according to the JE implementation. + */ + String name = attr.getName(); + Object newValue = getAttribute(name); + results.add(new Attribute(name, newValue)); + } catch (Exception e) { + e.printStackTrace(); + } + } + + return results; + } + + /** + * @see DynamicMBean#invoke + */ + public Object invoke(String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + /* Sanity checking. */ + if (actionName == null) { + throw new IllegalArgumentException("ActionName can't be null."); + } + + try { + if (actionName.equals(OP_CLEAN)) { + int numFiles = env.cleanLog(); + return new Integer(numFiles); + } else if (actionName.equals(OP_EVICT)) { + env.evictMemory(); + return null; + } else if (actionName.equals(OP_CHECKPOINT)) { + CheckpointConfig ckptConfig = new CheckpointConfig(); + if ((params != null) && (params.length > 0)) { + Boolean force = (Boolean) params[0]; + ckptConfig.setForce(force.booleanValue()); + } + env.checkpoint(ckptConfig); + return null; + } else if (actionName.equals(OP_SYNC)) { + env.sync(); + return null; + } else if (actionName.equals(OP_ENV_STAT)) { + return env.getStats(getStatsConfig(params)).toString(); + } else if (actionName.equals(OP_TXN_STAT)) { + return env.getTransactionStats + (getStatsConfig(params)).toString(); + } else if (actionName.equals(OP_DB_NAMES)) { + return env.getDatabaseNames(); + } else if (actionName.equals(OP_DB_STAT)) { + DatabaseStats stats = getDatabaseStats(params); + return stats != null ? stats.toString() : null; + } else if (actionName.equals(OP_GET_TIPS)) { + return env.getStats + (getStatsConfig(new Object[] {false, true})).getTips(); + } else if (actionName.equals(OP_ENV_CONFIG)) { + return env.getConfig().toString(); + } + + return new IllegalArgumentException + ("ActionName: " + actionName + " is not valid."); + } catch (DatabaseException e) { + + /* + * Add the message for easiest deciphering of the problem. Since + * the original exception cannot be transferred, send the exception + * stack. + */ + throw new MBeanException(new RuntimeException + (e.getMessage() + + LoggerUtils.getStackTrace(e))); + } + } + + /** + * Helper to get statistics for a given database. + * + * @param params operation parameters + * @return DatabaseStats object + */ + private DatabaseStats getDatabaseStats(Object[] params) + throws IllegalArgumentException, + DatabaseException { + + if ((params == null) || (params.length < 3)) { + return null; + } + + String dbName = (String)params[2]; + + Database db = null; + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + db = env.openDatabase(null, dbName, dbConfig); + + return db.getStats(getStatsConfig(params)); + } finally { + if (db != null) { + db.close(); + } + } + } + + @Override + protected void doRegisterMBean(Environment env) + throws Exception { + + server.registerMBean(new JEMonitor(env), jeName); + } + + @Override + protected MBeanAttributeInfo[] getAttributeList() { + ArrayList attrList = + new ArrayList(); + + if (env == null) { + return null; + } + + /* Add attributes for all JE Environments. */ + for (int i = 0; i < COMMON_ATTR.length; i++) { + attrList.add(COMMON_ATTR[i]); + } + + /* Add attributes for an open Environment. */ + for (int i = 0; i < OPEN_ATTR.length; i++) { + attrList.add(OPEN_ATTR[i]); + } + + /* Add attributes for an open, transactional Environment. */ + try { + EnvironmentConfig config = env.getConfig(); + if (config.getTransactional()) { + for (int i = 0; i < TRANSACTIONAL_ATTR.length; i++) { + attrList.add(TRANSACTIONAL_ATTR[i]); + } + } + } catch (DatabaseException ignore) { + /* ignore */ + } + + return attrList.toArray(new MBeanAttributeInfo[attrList.size()]); + } + + @Override + protected void addOperations() { + if (env == null) { + return; + } + + operationList.add(OP_CLEAN_INFO); + operationList.add(OP_EVICT_INFO); + operationList.add(OP_ENV_STAT_INFO); + operationList.add(OP_DB_NAMES_INFO); + operationList.add(OP_DB_STAT_INFO); + operationList.add(OP_ENV_CONFIG_INFO); + + /* Add checkpoint only for transactional Environments. */ + try { + if (env.getConfig().getTransactional()) { + operationList.add(OP_CHECKPOINT_INFO); + operationList.add(OP_TXN_STAT_INFO); + } else { + operationList.add(OP_SYNC_INFO); + } + } catch (DatabaseException e) { + /* Don't make any operations available. */ + operationList = new ArrayList(); + return; + } + } +} diff --git a/src/com/sleepycat/je/jmx/package.html b/src/com/sleepycat/je/jmx/package.html new file mode 100644 index 0000000..519eacf --- /dev/null +++ b/src/com/sleepycat/je/jmx/package.html @@ -0,0 +1,23 @@ + + + + + + +Implementations of JMX MBeans for JE. + +

        Package Specification

        +This package provides deployable JMX MBeans for JE. + + +@see Monitoring +JE with JConsole and JMX + + diff --git a/src/com/sleepycat/je/jmx/plugin/JEStats.java b/src/com/sleepycat/je/jmx/plugin/JEStats.java new file mode 100644 index 0000000..e2bd1a7 --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/JEStats.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx.plugin; + +import java.util.HashMap; + +import javax.management.MBeanServerConnection; + +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.jmx.JEMonitor; + +public class JEStats extends Stats { + private static final long serialVersionUID = 2327923744424679603L; + + public JEStats(MBeanServerConnection connection) { + super(connection); + } + + @Override + protected void initVariables() { + statsTitles = EnvironmentStats.getStatGroupTitles(); + opName = JEMonitor.OP_ENV_STAT; + mBeanNamePrefix = JEStatsPlugin.mBeanNamePrefix; + } + + @SuppressWarnings("unchecked") + @Override + protected void generateTips() { + try { + tips = (HashMap) connection.invoke + (objName, JEMonitor.OP_GET_TIPS, + new Object[] {}, new String[] {}); + updateTips(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/src/com/sleepycat/je/jmx/plugin/JEStatsPlugin.java b/src/com/sleepycat/je/jmx/plugin/JEStatsPlugin.java new file mode 100644 index 0000000..b8c8cd7 --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/JEStatsPlugin.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx.plugin; + +import java.util.LinkedHashMap; + +import javax.management.ObjectName; +import javax.swing.JPanel; + +public class JEStatsPlugin extends StatsPlugin { + public static String mBeanNamePrefix = + "com.sleepycat.je.jmx:name=*JEMonitor(*"; + + @Override + protected void initTabs() { + if (tabs == null) { + tabs = new LinkedHashMap(); + try { + ObjectName name = new ObjectName(mBeanNamePrefix); + int count = getContext().getMBeanServerConnection(). + queryNames(name, null).size(); + + if (count > 0) { + Stats status = + new JEStats(getContext().getMBeanServerConnection()); + tabs.put("JE Statistics", status); + stats.add(status); + } else { + tabs.put("JE Statistics", new JPanel()); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + } +} diff --git a/src/com/sleepycat/je/jmx/plugin/Stats.java b/src/com/sleepycat/je/jmx/plugin/Stats.java new file mode 100644 index 0000000..e29aaa3 --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/Stats.java @@ -0,0 +1,1366 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx.plugin; + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.GridLayout; +import java.awt.LayoutManager; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.KeyEvent; +import java.awt.event.KeyListener; +import java.awt.event.MouseEvent; +import java.awt.event.MouseListener; +import java.awt.event.MouseMotionListener; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; +import java.util.Timer; +import java.util.TimerTask; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; + +import javax.management.MBeanServerConnection; +import javax.management.ObjectName; +import javax.swing.BorderFactory; +import javax.swing.JButton; +import javax.swing.JCheckBox; +import javax.swing.JCheckBoxMenuItem; +import javax.swing.JComboBox; +import javax.swing.JComponent; +import javax.swing.JFileChooser; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JMenuItem; +import javax.swing.JOptionPane; +import javax.swing.JPanel; +import javax.swing.JPopupMenu; +import javax.swing.JScrollPane; +import javax.swing.JSplitPane; +import javax.swing.JTable; +import javax.swing.JTextField; +import javax.swing.ListSelectionModel; +import javax.swing.SwingWorker; +import javax.swing.ToolTipManager; +import javax.swing.filechooser.FileFilter; +import javax.swing.filechooser.FileNameExtensionFilter; +import javax.swing.table.AbstractTableModel; +import javax.swing.table.DefaultTableCellRenderer; + +import sun.tools.jconsole.PlotterPanel; +import sun.tools.jconsole.TimeComboBox; +import sun.tools.jconsole.Plotter.Unit; + +public abstract class Stats extends JPanel { + + private static final long serialVersionUID = 6041540234044035106L; + private static final String STATS_COLLECTOR = "JEMonitor Stats Collector"; + + private boolean hideZeroValue = false; + private boolean doLog = false; + private int mBeansNum; + private int selectedRow = -1; + + private long statsIntervalMillis = 10000; + + /* Collection variables used in this file. */ + private Map logMap; + private Map shownStats; + private Map> valueStore; + private List frameList; + protected TreeMap comboToObjects; + + /* + * Collections used to save Stats and ObjectNames for showing in tab. + * These values are set by the statsCollector Timer thread, and may + * be concurrently read by other threads, such as the JConsole refresh + * thread. + */ + private Map> savedStats; + private volatile List savedObjectNames; + + /* Combo box for selecting a MBean. */ + private final int mBeanComboBoxLength = 50; + private JComboBox mBeansComboBox; + private ActionListener mBeanComboBoxListener; + + /* Stats table settings. */ + private JCheckBox hideZeroValueBox; + private JCheckBox cumulativeStatsBox; + private JTextField statsIntervalText; + + /* Stats logging settings. */ + private JButton saveLogButton; + private JButton startLogButton; + private JButton stopLogButton; + private SaveLogFileChooser fileChooser; + + /* Stats table components. */ + private JTable statsTable; + private StatsTableModel statsModel; + private JPopupMenu popup; + private JCheckBoxMenuItem logMenuItem; + private JMenuItem graphMenuItem; + + /* MBean related parameters, customized in subclasses. */ + private final Object[] envStatParams = + new Object[] {true /* setClear */, true /* setFast */ }; + private final String[] signature = + new String[] {"java.lang.boolean", "java.lang.boolean" }; + protected static MBeanServerConnection connection; + protected String[] statsTitles; + protected String opName; + protected String mBeanNamePrefix; + protected ObjectName objName; + protected Map tips; + + private Timer statsCollector; + private final TimerTask drawNewStats = new StatsCollectionTask(false); + + public Stats(MBeanServerConnection connection) { + Stats.connection = connection; + + setLayout(new FlowLayout()); + ToolTipManager.sharedInstance().setDismissDelay(10000); + + initVariables(); + + /* Initiate those containers. */ + initContainers(); + + /* Create GUI components. */ + initGUIs(); + + /* Start collecting stats. */ + statsCollector = new Timer(STATS_COLLECTOR); + statsCollector.scheduleAtFixedRate(new StatsCollectionTask(), + 0, statsIntervalMillis); + + /* Draw the stats tab when the plugin is first started. */ + drawNewStats.run(); + } + + public StatsTableModel getTModel() { + return statsModel; + } + + protected abstract void initVariables(); + + protected abstract void generateTips(); + + private void initContainers() { + valueStore = new HashMap>(); + logMap = new HashMap(); + shownStats = new HashMap(); + comboToObjects = new TreeMap(); + frameList = new ArrayList(); + + savedStats = new ConcurrentHashMap>(); + savedObjectNames = new ArrayList(); + + for (ObjectName name : getBeansNames()) { + Map storeMap = + new LinkedHashMap(); + Map map = generateStats(name); + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + if (key.contains(":")) { + storeMap.put(key.substring(0, key.indexOf(":")), "0"); + } else { + storeMap.put(key, "0"); + } + } + valueStore.put(name, storeMap); + logMap.put(name, new LogObject()); + comboToObjects.put(getMBeanComboBoxString(name), name); + } + + for (int i = 0; i < statsTitles.length; i++) { + shownStats.put(statsTitles[i], true); + } + + this.objName = getFirstObjectName(); + this.mBeansNum = logMap.size(); + + generateTips(); + } + + private ObjectName getFirstObjectName() { + return comboToObjects.firstEntry().getValue(); + } + + /* Update the tips to html format suitable. */ + protected void updateTips() { + for (Map.Entry entry : tips.entrySet()) { + String value = entry.getValue(); + String formatStr = new String(); + boolean stop = false; + while (!stop) { + if (value.length() <= 80) { + stop = true; + formatStr += value; + } else { + int endIndex = 79; + String phrase = value.substring(0, endIndex); + if (!phrase.endsWith(" ")) { + endIndex = phrase.lastIndexOf(" "); + } + formatStr += value.substring(0, endIndex) + "
        "; + value = value.substring(endIndex, value.length()); + } + } + formatStr = "" + formatStr + ""; + tips.put(entry.getKey(), formatStr); + } + } + + private void initGUIs() { + /* panel for selecting a specific environment. */ + JPanel mBeanPanel = createMBeanPanel(); + /* panel for stat settings. */ + JPanel setPanel = createSettingPanel(); + /* controls for logging stats to a file. */ + JPanel logPanel = createLogPanel(); + /* display of stats values. */ + JPanel statsPanel = createStatsPanel(); + + createMenu(); + + add(mBeanPanel); + add(setPanel); + add(logPanel); + add(statsPanel); + } + + /* Create the GUI Setting part. */ + private JPanel createMBeanPanel() { + mBeansComboBox = new JComboBox(); + mBeansComboBox.setPrototypeDisplayValue(generateString()); + mBeanComboBoxListener = new BeanComboBoxListener(); + initMBeanComboBox(getBeansNames()); + + return generatePanel(new JComponent[] { + new JLabel("Choose JE Environment:"), mBeansComboBox }, + new FlowLayout(), "Choose JE MBean"); + } + + /* Fill MBeanComboBox with blank spaces if MBean name is too short. */ + private String generateString() { + String length = new String(); + for (int i = 0; i < mBeanComboBoxLength; i++) { + length += "X"; + } + + return length; + } + + /* Initialize the contents of the mbean combobox. */ + private void initMBeanComboBox(List names) { + /* If the number of MBeans changes, update the map also. */ + if (names.size() != comboToObjects.size()) { + comboToObjects = new TreeMap(); + for (ObjectName name : names) { + comboToObjects.put(getMBeanComboBoxString(name), name); + } + } + + for (Map.Entry entry : comboToObjects.entrySet()) { + mBeansComboBox.addItem(entry.getKey()); + } + mBeansComboBox.setSelectedIndex(0); + mBeansComboBox.addActionListener(mBeanComboBoxListener); + } + + /* Return the MBean name to display in the mBeanComboBox. */ + private String getMBeanComboBoxString(ObjectName name) { + String envHome = name.toString().substring + (name.toString().indexOf("(") + 1, name.toString().length() - 1); + if (envHome.length() > 40) { + envHome = envHome.substring(0, 19) + "..." + + envHome.substring(envHome.length() - 20, envHome.length()); + } + + return envHome; + } + + /* Create the stats setting panel. */ + private JPanel createSettingPanel() { + cumulativeStatsBox = new JCheckBox("Display cumulative stats", false); + cumulativeStatsBox.addActionListener(new ClearStatsBoxListener()); + + hideZeroValueBox = new JCheckBox("Hide zero values", false); + hideZeroValueBox.addActionListener(new HideZeroValueBoxListener()); + + statsIntervalText = new JTextField("10", 4); + statsIntervalText.addKeyListener(new StatsIntervalListener()); + + return generatePanel(new JComponent[] { + new JLabel("Collection interval (secs):"), + statsIntervalText, + cumulativeStatsBox, + hideZeroValueBox} , + new FlowLayout(), "Settings"); + } + + /* Create the log setting panel. */ + private JPanel createLogPanel() { + startLogButton = + createButton("Start Recording", new StartLogListener()); + startLogButton.setEnabled(false); + stopLogButton = createButton("Stop Recording", new StopLogListener()); + stopLogButton.setEnabled(false); + saveLogButton = createButton("Record Statistics To ...", + new SaveLogListener(this)); + + return generatePanel(new JComponent[] { + startLogButton, stopLogButton, saveLogButton }, + new FlowLayout(), "Record Statistics"); + } + + /* Create Table Panel. */ + private JPanel createStatsPanel() { + /* Create the stats type choosen panel. */ + JPanel leftPanel = new JPanel(); + leftPanel.setLayout(new GridLayout(15, 1)); + leftPanel.add(new JLabel(" Stats to Display")); + for (int i = 0; i < statsTitles.length; i++) { + createCheckBox(statsTitles[i], leftPanel); + } + JScrollPane left = new JScrollPane(leftPanel); + + /* Add JE stats table. */ + statsModel = new StatsTableModel(); + statsTable = new JTable(statsModel); + statsTable.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); + statsTable.setDefaultRenderer(String.class, new StringRenderer()); + statsTable.setIntercellSpacing(new Dimension(6, 3)); + statsTable.setRowHeight(statsTable.getRowHeight() + 4); + statsTable.addMouseListener(new TableMouseListener()); + statsTable.addMouseMotionListener(new TableMouseMotionListener()); + JScrollPane right = new JScrollPane(statsTable); + right.setAlignmentX(Component.CENTER_ALIGNMENT); + + JSplitPane splitPane = + new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, left, right); + splitPane.setOneTouchExpandable(true); + splitPane.setResizeWeight(0.3); + + /* Add table to a panel and set the layout. */ + return generatePanel(new JComponent[] { splitPane }, + new FlowLayout(), "JE Stats Table"); + } + + /* Create the pop up menu for the stats shown table. */ + private void createMenu() { + popup = new JPopupMenu(); + logMenuItem = new JCheckBoxMenuItem("Log This Stat"); + logMenuItem.addActionListener(new LogMenuListener()); + popup.add(logMenuItem); + graphMenuItem = new JMenuItem("Graph This Stat"); + graphMenuItem.addActionListener(new GraphMenuListener()); + popup.add(graphMenuItem); + } + + /* Create CheckBox for choosing shown stats. */ + private void createCheckBox(String title, JPanel container) { + JCheckBox checkBox = new JCheckBox(title, true); + checkBox.addActionListener(new StatsTypeListener()); + container.add(checkBox); + } + + private JButton createButton(String text, ActionListener listener) { + JButton button = new JButton(text); + button.addActionListener(listener); + + return button; + } + + /* + * Add provided components to a panel using assigned layout + * with a broder surrounded. + */ + private JPanel generatePanel(JComponent[] components, + LayoutManager layout, + String panelName) { + JPanel panel = new JPanel(layout); + for (JComponent component : components) { + panel.add(component); + } + + /* If the panelName is none, then no border would be added. */ + if (!"none".equals(panelName)) { + panel.setBorder + (BorderFactory.createCompoundBorder + (BorderFactory.createTitledBorder(panelName), + BorderFactory.createEmptyBorder(0, 0, 0, 0))); + } + + return panel; + } + + /* Set the connection for this plugin. */ + public void setConnection(MBeanServerConnection connection) { + Stats.connection = connection; + } + + /* Get the connection to the MBeanServer. */ + public static MBeanServerConnection getConnection() { + return connection; + } + + /* Remove a GraphFrame from the list and release the resouces. */ + public void removeGraphFrame(ObjectName beanName, + String graphStats) { + CopyOnWriteArrayList list = + new CopyOnWriteArrayList(frameList); + for (GraphFrame frame : list) { + if (frame.getBeanName().equals(beanName) && + frame.getStatsName().equals(graphStats)) { + frameList.remove(frame); + frame = null; + } + } + } + + /* Get results for table stats. */ + public synchronized List> getResultsList() { + + if (savedObjectNames == null || savedObjectNames.size() == 0) { + return null; + } + + /* Refresh the table if there is an MBean change. */ + repaintComboBox(savedObjectNames); + + Map displayStats = savedStats.get(objName); + + if (displayStats == null || displayStats.size() == 0) { + return null; + } + + /* Remove the those stats we don't want to show on table. */ + removeUnShownTypeStats(displayStats); + + /* Hide zero values if we choose to hide them. */ + if (hideZeroValue) { + hideZeroValues(displayStats); + } + + /* If no stats are removed from shown, add a null stats on table. */ + if (displayStats.size() == 0) { + displayStats.put("No Stats", ""); + } + + List> list = + new ArrayList>(displayStats.entrySet()); + + return list; + } + + /* + * Go through the stats map to remove those stats which belongs to the + * unshown types chosen by users. + */ + private void removeUnShownTypeStats(Map map) { + Object[] keys = map.keySet().toArray(); + for (Map.Entry stats : shownStats.entrySet()) { + + /* If the stats is not chosen, remove it from map. */ + if (!stats.getValue()) { + for (Object key : keys) { + if (key.toString().contains(stats.getKey())) { + map.remove(key.toString()); + } + } + } + } + emptyArray(keys); + keys = null; + } + + /* Empty the array to release the resources it requires. */ + private static void emptyArray(Object[] array) { + for (int i = 0; i < array.length; i++) { + array[i] = null; + } + } + + /* + * Hide zero values from the stats table, if all stats belongs to a type + * are all zero values, then the whole type would remove from table, + * including the stats title, like: Compression stats, etc. + */ + private void hideZeroValues(Map map) { + Object[] keys = map.keySet().toArray(); + for (Map.Entry entry : shownStats.entrySet()) { + boolean deleteAll = true; + for (Object key : keys) { + String value = map.get(key.toString()); + /* Ensure we are operating on a non-deleted stat. */ + if (key.toString().contains(entry.getKey()) && value != null) { + if (!(value.equals(""))) { + if (value.equals("0")) { + map.remove(key.toString()); + } else { + deleteAll = false; + } + } + } + } + if (deleteAll) { + map.remove(entry.getKey()); + } + } + emptyArray(keys); + keys = null; + } + + /* Get results of the invoked MBean operation. */ + private synchronized Map generateStats(ObjectName name) { + Map map = new LinkedHashMap(); + try { + if (connection != null && + connection.queryNames(name, null) != null) { + String status = (String) + connection.invoke(name, opName, envStatParams, signature); + StringTokenizer st1 = new StringTokenizer(status, "\n"); + String title = null; + while (st1.hasMoreTokens()) { + String expression = st1.nextToken(); + if (expression != null) { + if (expression.indexOf("=") < 0) { + StringTokenizer st2 = + new StringTokenizer(expression, ":"); + title = st2.nextToken(); + map.put(title, ""); + } else { + StringTokenizer st2 = + new StringTokenizer(expression, "="); + String stats = " " + st2.nextToken(); + String value = st2.nextToken().trim(); + map.put(stats + ":" + title, value); + } + } + } + } + } catch (javax.management.InstanceNotFoundException e) { + + /* + * If the connection is broken while it is trying to invoke, close + * and release all resources. + */ + forceClose(); + } catch (Exception e) { + e.printStackTrace(); + } + + return map; + } + + /* Stop and null the log and graph thread, close all the file writers. */ + private void forceClose() { + setConnection(null); + for (Map.Entry item : logMap.entrySet()) { + item.getValue().closeFileWriter(); + } + } + + /* Return the JEMonitor MBean names in this application. */ + private synchronized ArrayList getBeansNames() { + if (connection == null) { + return null; + } + + ArrayList names = null; + try { + ObjectName name = new ObjectName(mBeanNamePrefix); + + if (connection.queryNames(name, null).size() != 0) { + names = new ArrayList + (connection.queryNames(name, null)); + } + } catch (java.rmi.RemoteException e) { + + /* + * Because the interval of plugin and the logging thread is not the + * same, sometimes logging thread would try to invoke a non-active + * connection and throws out RemoteException. We need to stop the + * thread and release the resources. + */ + forceClose(); + } catch (Exception e) { + e.printStackTrace(); + } + + return names; + } + + /* Repaint mbeans combobox if the number of MBeans changes. */ + private void repaintComboBox(List names) { + if (names == null) { + return; + } + + try { + if (names.size() != mBeansNum && names.size() != 0) { + mBeansComboBox.removeActionListener(mBeanComboBoxListener); + mBeansComboBox.removeAllItems(); + initMBeanComboBox(names); + + /* + * Remove it from the logging map, release the resources + * acquired by this MBean. + */ + for (Map.Entry entry : + logMap.entrySet()) { + boolean find = false; + for (ObjectName name : names) { + if (name.equals(entry.getKey())) { + find = true; + break; + } + } + if (!find) { + logMap.get(entry.getKey()).closeFileWriter(); + logMap.remove(entry.getKey()); + valueStore.remove(entry.getKey()); + break; + } + } + objName = getFirstObjectName(); + mBeansNum = names.size(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + /* Write chosen stats to the log. */ + private void writeToLog(Map map, + ObjectName currentName) { + try { + if (map == null) + return; + + /* If the log name is not set, return. */ + LogObject item = logMap.get(currentName); + if (item.getLogName() == null) + return; + + StringBuilder buffer = new StringBuilder(); + getCSVOutput(buffer, map, false, currentName); + buffer.append("\n"); + item.writeLog(buffer.toString()); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /* Make the stats output in CSV format. */ + private void getCSVOutput(StringBuilder buffer, + Map map, + boolean init, + ObjectName name) { + LogObject item = logMap.get(name); + + if (map == null || map.size() == 0) { + return; + } + + /* Before write to log, remove those unlog stats. */ + if (item != null) { + Object[] keys = map.keySet().toArray(); + for (String title : item.getTurnOff()) { + for (Object key : keys) { + if (key.toString().contains(title)) { + map.remove(key.toString()); + } + } + } + emptyArray(keys); + keys = null; + } + item = null; + + if (!init) { + buffer.append(System.currentTimeMillis()); + } else { + buffer.append("TIME"); + } + for (int i = 0; i < statsTitles.length; i++) { + map.remove(statsTitles[i]); + } + if (map.size() > 0) { + buffer.append(","); + int count = 1; + for (Map.Entry entry : map.entrySet()) { + String title = entry.getKey().substring + (0, entry.getKey().indexOf(":")).trim(); + if (!init) { + title = "\"" + entry.getValue() + "\""; + } + if (count < map.size()) + title = title + ","; + buffer.append(title); + count++; + } + } + } + + /* Return a new SwingWorker for UI update. */ + private SwingWorker newSwingWorker() { + ArrayList list = new ArrayList(); + list.add(this); + + return new StatsSwingWorker(list); + } + + /* Writing and graphing stats according to the specified interval. */ + private void writeLogAndGraphing(ObjectName objectName, + Map values) { + if ((values == null) || (values.size() == 0)) { + return; + } + + /* Graphing stats. */ + if (frameList.size() > 0) { + for (GraphFrame frame : frameList) { + if (frame.getBeanName().equals(objectName)) { + frame.writeData(values); + } + } + } + + /* Write stats to csv file. */ + if (doLog) { + writeToLog(values, objectName); + } + } + + /* A utility class for recording the environment-related information. */ + private class LogObject { + private String logName; + private FileWriter csvOutput = null; + private ArrayList turnOffIndex = new ArrayList(); + + public void setLogName(String logName) { + if (logName.contains(".csv")) { + this.logName = logName; + } else { + this.logName = logName + ".csv"; + } + } + + public String getLogName() { + return logName; + } + + public void addTurnOff(String title) { + turnOffIndex.add(title); + } + + public ArrayList getTurnOff() { + return turnOffIndex; + } + + /* Paint the CSV file header. */ + public void initCSVOutput(ObjectName objectName) { + if (logName != null) { + try { + csvOutput = new FileWriter(new File(logName), true); + StringBuilder buffer = new StringBuilder(); + Map map = generateStats(objectName); + getCSVOutput(buffer, map, true, objectName); + csvOutput.append(buffer.toString() + "\n"); + csvOutput.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + /* Write stats to log. */ + public void writeLog(String stats) { + try { + if (csvOutput != null) { + csvOutput.append(stats); + csvOutput.flush(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + /* Close the file writer. */ + public void closeFileWriter() { + try { + if (csvOutput != null) + csvOutput.close(); + csvOutput = null; + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + /* The table model for stats table. */ + public class StatsTableModel extends AbstractTableModel { + private static final long serialVersionUID = -2478788160419123718L; + + private String[] columnNames = { "Stat Name", "Value" }; + + private List> list = + new ArrayList>(); + + public int getColumnCount() { + return columnNames.length; + } + + public int getRowCount() { + return (list == null) ? 0 : list.size(); + } + + @Override + public String getColumnName(int col) { + return columnNames[col]; + } + + public Object getValueAt(int row, int col) { + Map.Entry value = list.get(row); + switch (col) { + case 0 : + /* Column 0 shows the stats name.*/ + if (value.getKey().indexOf(":") < 0) { + return value.getKey().trim(); + } else { + return value.getKey(). + substring(0, value.getKey().indexOf(":")); + } + case 1 : + return value.getValue(); + default: + return null; + } + } + + @Override + @SuppressWarnings("unchecked") + public Class getColumnClass(int c) { + return getValueAt(0, c).getClass(); + } + + public void setList(List> list) { + this.list = list; + } + } + + /* + * If a stat's value has changed since last time, then marked it in red. + */ + private class StringRenderer extends DefaultTableCellRenderer { + private static final long serialVersionUID = 480362177240428265L; + + public StringRenderer() { + super(); + setHorizontalAlignment(JLabel.LEFT); + } + + @Override + public Component getTableCellRendererComponent(JTable table, + Object value, + boolean isSelected, + boolean hasFocus, + int row, + int column) { + Component cell = + super.getTableCellRendererComponent(table, value, isSelected, + hasFocus, row, column); + + /* + * If the value is the same, the font color is black, if the value + * is different, then change the color to red, and replace the + * value in the valueStore to the new one. + */ + if (column == 1) { + String newValue = + valueStore.get(objName).get(table.getValueAt(row, 0)); + if (newValue != null && + !newValue.equals(table.getValueAt(row, 1))) { + + valueStore.get(objName). + put(table.getValueAt(row, 0).toString(), + table.getValueAt(row, 1).toString().trim()); + cell.setForeground(Color.RED); + } + } else { + cell.setForeground(Color.BLACK); + } + + return cell; + } + } + + /* The file chooser for setting log file. */ + private class SaveLogFileChooser extends JFileChooser { + private static final long serialVersionUID = -3035086973026766211L; + + public SaveLogFileChooser() { + setFileFilter(new FileNameExtensionFilter("CSV files", "csv")); + } + + @Override + public void approveSelection() { + File file = getSelectedFile(); + if (file != null) { + FileFilter filter = getFileFilter(); + if (filter != null && + filter instanceof FileNameExtensionFilter) { + String[] extensions = + ((FileNameExtensionFilter) filter).getExtensions(); + + boolean goodExt = (extensions.length > 0) ? true: false; + + if (!goodExt) { + file = new File(file.getParent(), file.getName() + + "." + extensions[0]); + } + } + + if (file.exists()) { + String okStr = "ok"; + String cancelStr = "cancel"; + int ret = + JOptionPane.showOptionDialog(this, + "File " + file.getName() + " already exists!", + "Save File", + JOptionPane.OK_CANCEL_OPTION, + JOptionPane.WARNING_MESSAGE, + null, + new Object[] {okStr, cancelStr}, okStr); + if (ret != JOptionPane.OK_OPTION) { + return; + } + } + + setSelectedFile(file); + } + super.approveSelection(); + } + } + + /* Following classes are listeners for GUI events. */ + + /* Listener for clear stats button. */ + private class ClearStatsBoxListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + envStatParams[0] = !(cumulativeStatsBox.isSelected()); + /* Repain the tab. */ + drawNewStats.run(); + } + } + + /* Listener for display non-zero checkbox. */ + private class HideZeroValueBoxListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + hideZeroValue = hideZeroValueBox.isSelected(); + /* Repaint the tab. */ + drawNewStats.run(); + } + } + + /* Listener for start logging button. */ + private class StartLogListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + /* Paint the CSV file header for each MBean. */ + for (Map.Entry entry : logMap.entrySet()) { + entry.getValue().initCSVOutput(entry.getKey()); + } + enableComponent(new JComponent[] { saveLogButton, + startLogButton, + logMenuItem }, + false); + doLog = true; + } + } + + /* Listener for stop logging button. */ + private class StopLogListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + enableComponent + (new JComponent[] { saveLogButton, logMenuItem }, true); + doLog = false; + startLogButton.setEnabled(false); + stopLogButton.setEnabled(false); + saveLogButton.setEnabled(true); + } + } + + /* Set provided component enabled or not. */ + private void enableComponent(JComponent[] components, boolean enabled) { + for (JComponent component : components) { + component.setEnabled(enabled); + } + } + + /* Listener for save log files button. */ + private class SaveLogListener implements ActionListener { + JPanel shownPanel; + + public SaveLogListener(JPanel shownPanel) { + this.shownPanel = shownPanel; + } + + public void actionPerformed(ActionEvent e) { + if (fileChooser == null) { + fileChooser = new SaveLogFileChooser(); + } + int ret = fileChooser.showSaveDialog(shownPanel); + if (ret == JFileChooser.APPROVE_OPTION) { + logMap.get(objName).setLogName + (fileChooser.getSelectedFile().getAbsolutePath()); + startLogButton.setEnabled(true); + stopLogButton.setEnabled(true); + } + } + } + + /* Listener for choosing a MBean ComboBox. */ + private class BeanComboBoxListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + /* If choose another environment, need to update the table. */ + objName = + comboToObjects.get(mBeansComboBox.getSelectedItem()); + /* Repaint the tab. */ + drawNewStats.run(); + } + } + + /* Listener for stats type checkbox. */ + private class StatsTypeListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + shownStats.put(((JCheckBox) e.getSource()).getText(), + ((JCheckBox) e.getSource()).isSelected()); + /* Repaint the tab. */ + drawNewStats.run(); + } + } + + /* Listener for log menu item on JEStats table. */ + private class LogMenuListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + LogObject currentBean = logMap.get(objName); + + if (selectedRow > -1) { + String title = + statsTable.getValueAt(selectedRow, 0).toString().trim(); + + if (!logMenuItem.getState() && + !currentBean.getTurnOff().contains(title)) { + currentBean.addTurnOff(title); + } + if (logMenuItem.getState() && + currentBean.getTurnOff().contains(title)) { + currentBean.getTurnOff().remove(title); + } + } + } + } + + /* Listener for graph menu item on JEStats table. */ + private class GraphMenuListener implements ActionListener { + public void actionPerformed(ActionEvent e) { + if (selectedRow > -1) { + String graphStats = + statsTable.getValueAt(selectedRow, 0).toString().trim(); + if (!isTitleEqual(graphStats)) { + boolean initialized = false; + for (GraphFrame frame : frameList) { + if (frame.getBeanName().equals(objName) && + frame.getStatsName().equals(graphStats)) { + initialized = true; + break; + } + } + if (!initialized) { + frameList.add(new GraphFrame(objName, graphStats)); + } + } + } + } + } + + /* If the string equals to one of those stats types. */ + private boolean isTitleEqual(String title) { + boolean equal = false; + for (int i = 0; i < statsTitles.length; i++) { + if (statsTitles[i].equals(title)) { + equal = true; + break; + } + } + + return equal; + } + + /* These two classes are listeners for mouse actions on the Stats table. */ + private class TableMouseListener implements MouseListener { + public void mouseClicked(MouseEvent e) { + if (e.getButton() == MouseEvent.BUTTON3) { + int row = statsTable.rowAtPoint(e.getPoint()); + if (row >= 0) { + statsTable.setRowSelectionInterval(row, row); + final String statName = + statsTable.getValueAt(row, 0).toString().trim(); + if (logMap.get(objName).getTurnOff().contains(statName)) { + logMenuItem.setState(false); + } else { + logMenuItem.setState(true); + } + + /* Check which stats shouldn't be graphed. */ + try { + String statValue = getParsedValue + (statsTable.getValueAt(row, 1).toString().trim()); + + /* + * If the stats value can't be converted to a number, + * disable graphing. + */ + Long.parseLong(statValue); + graphMenuItem.setEnabled(true); + } catch (NumberFormatException exception) { + + /* If the value can't be converted to long, disable the + * graphing menu. + */ + graphMenuItem.setEnabled(false); + } + selectedRow = row; + } + popup.show(e.getComponent(), e.getX(), e.getY()); + } + } + + public void mouseEntered(MouseEvent e) { + } + + public void mouseExited(MouseEvent e) { + } + + public void mousePressed(MouseEvent e) { + } + + public void mouseReleased(MouseEvent e) { + } + } + + /* + * Parse the number format ***,***,*** to *********. + */ + private String getParsedValue(String value) { + if (value.indexOf(",") > 0) { + StringTokenizer st = new StringTokenizer(value, ","); + value = new String(); + while (st.hasMoreTokens()) { + value = value + st.nextToken(); + } + } + + return value; + } + + /* The MouseMotionListener to the table. */ + private class TableMouseMotionListener implements MouseMotionListener { + public void mouseDragged(MouseEvent e) { + } + + public void mouseMoved(MouseEvent e) { + int row = statsTable.rowAtPoint(e.getPoint()); + if (row >= 0) { + String stats = ((String) statsTable.getValueAt(row, 0)).trim(); + statsTable.setToolTipText(tips.get(stats)); + } + } + } + + /* KeyListener for setting graph interval text field. */ + private class StatsIntervalListener implements KeyListener { + public void keyPressed(KeyEvent e) { + if (e.getKeyCode() == KeyEvent.VK_ENTER) { + String newIntervalText = statsIntervalText.getText(); + try { + long statsIntervalSeconds = new Long(newIntervalText); + statsIntervalMillis = statsIntervalSeconds * 1000; + } catch (Exception exception) { + System.err.println("\"" + newIntervalText + + "\" is not a valid interval. " + + exception); + } + + statsCollector.cancel(); + statsCollector = new Timer(STATS_COLLECTOR); + statsCollector.scheduleAtFixedRate(new StatsCollectionTask(), + 0, + statsIntervalMillis); + } + } + + public void keyReleased(KeyEvent e) { + } + + public void keyTyped(KeyEvent e) { + } + } + + /** + * Frame for showing stats. + * + * Note: stats whose type is float are not supported currently. + */ + private class GraphFrame extends JFrame { + private static final long serialVersionUID = 8921577524698094123L; + + /* Set the color of line in graphing. */ + private final Color statsColor = Color.blue.darker(); + /* The panel doing the graphing work. */ + private PlotterPanel plotterPanel; + + private final ObjectName bean; + private final String stats; + + public GraphFrame(ObjectName beanName, String statsName) { + super(statsName.trim() + " for " + beanName.toString()); + bean = beanName; + stats = statsName.trim(); + setLayout(new BorderLayout(0, 0)); + setSize(800, 400); + + /* Add TimeComboBox and PlotterPanel to the frame. */ + JPanel topPanel = new JPanel(new BorderLayout()); + JPanel controlPanel = + new JPanel(new FlowLayout(FlowLayout.CENTER, 20, 5)); + controlPanel.add(new JLabel("Time Range:")); + plotterPanel = new PlotterPanel(stats, Unit.NONE, false); + plotterPanel.getPlotter().createSequence + (stats, stats, statsColor, true); + TimeComboBox timeComboBox = + new TimeComboBox(plotterPanel.getPlotter()); + controlPanel.add(timeComboBox); + topPanel.add(controlPanel, BorderLayout.CENTER); + add(topPanel, BorderLayout.NORTH); + add(plotterPanel, BorderLayout.CENTER); + + /* + * When we click the close button, it would dispose the frame and + * if the connection is alive, remove the frame in the frame list. + */ + addWindowListener(new java.awt.event.WindowAdapter() { + @Override + public void windowClosing(java.awt.event.WindowEvent evt) { + setVisible(false); + dispose(); + if (JEStats.getConnection() != null) { + removeGraphFrame(bean, stats); + } + } + }); + setVisible(true); + } + + public ObjectName getBeanName() { + return bean; + } + + public String getStatsName() { + return stats; + } + + /* + * When use the PlotterPanel, we only need to write data into it. + */ + public void writeData(Map map) { + for (Map.Entry entry : map.entrySet()) { + if (entry.getKey().indexOf(":") > 0) { + String realKey = entry.getKey().substring + (0, entry.getKey().indexOf(":")).trim(); + + /* + * The following three stats can't convert to long, so + * ignore them. Also, some values have "," inside, need to + * remove it away. + */ + if (stats.equals(realKey)) { + String value = getParsedValue(entry.getValue()); + plotterPanel.getPlotter().addValues + (System.currentTimeMillis(), Long.valueOf(value)); + } + } + } + } + } + + private class StatsCollectionTask extends TimerTask { + private final boolean writeLogAndGraphing; + + public StatsCollectionTask() { + this(true); + } + + public StatsCollectionTask(boolean writeLogAndGraphing) { + this.writeLogAndGraphing = writeLogAndGraphing; + } + + @Override + public void run() { + List objectNames = getBeansNames(); + + if (objectNames == null || objectNames.size() == 0) { + return; + } + + /* + * Reset the object names. Make sure to do ths assignment + * atomically, because other threads may read savedObjectNames. + */ + savedObjectNames = objectNames; + + for (ObjectName objectName : objectNames) { + Map statValues = generateStats(objectName); + + /* Copy and save stats and ObjectNames for tab refreshing. */ + Map copiedStats = + new LinkedHashMap(); + copiedStats.putAll(statValues); + savedStats.put(objectName, copiedStats); + + /* Write log and update graphing for this MBean. */ + if (writeLogAndGraphing) { + writeLogAndGraphing(objectName, statValues); + } + } + newSwingWorker().execute(); + } + } +} diff --git a/src/com/sleepycat/je/jmx/plugin/StatsPlugin.java b/src/com/sleepycat/je/jmx/plugin/StatsPlugin.java new file mode 100644 index 0000000..e82a76e --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/StatsPlugin.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx.plugin; + +import java.beans.PropertyChangeEvent; +import java.beans.PropertyChangeListener; +import java.util.ArrayList; +import java.util.Map; + +import javax.swing.JPanel; +import javax.swing.SwingWorker; + +import com.sun.tools.jconsole.JConsolePlugin; +import com.sun.tools.jconsole.JConsoleContext; +import com.sun.tools.jconsole.JConsoleContext.ConnectionState; + +public abstract class StatsPlugin extends JConsolePlugin + implements PropertyChangeListener { + + protected ArrayList stats = new ArrayList(); + protected StatsSwingWorker worker; + protected Map tabs = null; + protected int mBeanCount = 0; + + public StatsPlugin() { + /* Register as a listener. */ + addContextPropertyChangeListener(this); + } + + /* + * Returns JEStats tabs to be added in JConsole. + */ + @Override + public synchronized Map getTabs() { + initTabs(); + + return tabs; + } + + protected abstract void initTabs(); + + /* + * Returns a SwingWorker which is responsible for updating the JEStats tab. + */ + @Override + public SwingWorker newSwingWorker() { + if (stats.size() > 0) { + return new StatsSwingWorker(stats); + } + return null; + } + + @Override + public void dispose() { + } + + /* + * Property listener to reset the MBeanServerConnection at reconnection + * time. + */ + public void propertyChange(PropertyChangeEvent ev) { + String prop = ev.getPropertyName(); + if (prop == JConsoleContext.CONNECTION_STATE_PROPERTY) { + ConnectionState newState = (ConnectionState) ev.getNewValue(); + if (newState == ConnectionState.CONNECTED && stats.size() != 0) { + for (Stats status : stats) { + status.setConnection( + getContext().getMBeanServerConnection()); + } + } else if (newState == ConnectionState.DISCONNECTED && + stats.size() != 0) { + for (int i = 0; i < stats.size(); i++) { + Stats status = stats.remove(i); + status.setConnection(null); + status = null; + } + } + } + } +} diff --git a/src/com/sleepycat/je/jmx/plugin/StatsSwingWorker.java b/src/com/sleepycat/je/jmx/plugin/StatsSwingWorker.java new file mode 100644 index 0000000..1a2798e --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/StatsSwingWorker.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx.plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import javax.swing.SwingWorker; + +/* + * The class takes the responsibility for updating the tabs in JConsole plugin. + */ +public class StatsSwingWorker extends + SwingWorker>>, Object> { + + private final ArrayList list; + + public StatsSwingWorker(ArrayList list) { + this.list = list; + } + + @Override + public List>> doInBackground() { + ArrayList>> statsList= + new ArrayList>>(); + for (Stats status: list) { + statsList.add(status.getResultsList()); + } + + return statsList; + } + + @Override + protected void done() { + try { + if (get() != null) { + for (int i = 0; i < list.size(); i++) { + list.get(i).getTModel().setList(get().get(i)); + list.get(i).getTModel().fireTableDataChanged(); + } + } + } catch (InterruptedException e) { + } catch (ExecutionException e) { + } + } +} diff --git a/src/com/sleepycat/je/jmx/plugin/package-info.java b/src/com/sleepycat/je/jmx/plugin/package-info.java new file mode 100644 index 0000000..742118a --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: JConsole plugin for viewing JE stats/config, rarely used. + */ +package com.sleepycat.je.jmx.plugin; \ No newline at end of file diff --git a/src/com/sleepycat/je/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin b/src/com/sleepycat/je/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin new file mode 100644 index 0000000..bc26a88 --- /dev/null +++ b/src/com/sleepycat/je/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin @@ -0,0 +1 @@ +com.sleepycat.je.jmx.plugin.JEStatsPlugin diff --git a/src/com/sleepycat/je/latch/Latch.java b/src/com/sleepycat/je/latch/Latch.java new file mode 100644 index 0000000..d2dba7b --- /dev/null +++ b/src/com/sleepycat/je/latch/Latch.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Provides exclusive (mutex-like) latching. This is implemented with Java's + * ReentrantLock, which is extended for a few reasons: + * 1) To prevent reentrant use, since latches are not used reentrantly in JE. + * This increases reliability by detecting accidental reentrant calls. + * Checks for reentrancy are unconditional, i.e., checked in production. + * 2) To support instrumentation for debugging (see LatchSupport). + * 3) Automatic use of configured latch timeout. + * 4) Built-in thread interrupt handling. + * + * Latches are expected to be held for short, defined periods of time. No + * deadlock detection is provided so it is the caller's responsibility to + * sequence latch acquisition in an ordered fashion to avoid deadlocks. + */ +public interface Latch { + + /** + * Acquires a latch for exclusive/write access. + * + * @throws EnvironmentFailureException if the latch is already held by the + * calling thread. + */ + void acquireExclusive(); + + /** + * Acquires a latch for exclusive/write access, but do not block if it's + * not available. + * + * @return true if the latch was acquired, false if it is not available. + * + * @throws EnvironmentFailureException if the latch is already held by the + * calling thread. + */ + boolean acquireExclusiveNoWait(); + + /** + * Releases the latch. If there are other thread(s) waiting for the latch, + * they are woken up and granted the latch. + * + * @throws EnvironmentFailureException if the latch is not currently held. + */ + void release(); + + /** + * Releases the latch. If there are other thread(s) waiting for the latch, + * one is woken up and granted the latch. If the latch was not owned by + * the caller, just return. + */ + void releaseIfOwner(); + + /** + * Returns true if the current thread holds this latch. For an exclusive + * latch, is equivalent to calling {@link #isExclusiveOwner()}. + */ + boolean isOwner(); + + /** + * Returns true if the current thread holds this latch for exclusive/write + * access. + */ + boolean isExclusiveOwner(); + + /** + * Returns the thread that currently holds the latch for exclusive access. + */ + Thread getExclusiveOwner(); + + /** + * Returns an estimate of the number of threads waiting. + */ + int getNWaiters(); + + /** + * Returns a stats group with information about this latch. + * + * @throws EnvironmentFailureException if stats were not requested when the + * latch was created. See LatchFactory. + */ + StatGroup getStats(); + + /** + * Resets collected stat values to zero. + * + * @throws EnvironmentFailureException if stats were not requested when the + * latch was created. See LatchFactory. + */ + void clearStats(); + + /** + * Returns the latch name and exclusive owner info. + */ + @Override + String toString(); + + /** + * Returns the same information as {@link #toString()} plus all known debug + * info. + */ + String debugString(); +} diff --git a/src/com/sleepycat/je/latch/LatchContext.java b/src/com/sleepycat/je/latch/LatchContext.java new file mode 100644 index 0000000..9c6ebc8 --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchContext.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Provides information about a latch, to avoid requiring this information to + * be stored with every latch object. This is implemented by the IN class to + * reduce memory usage. LatchFactory provides a default implementation for + * cases where creating an extra object is not an issue. + */ +public interface LatchContext { + + /** Returns EnvironmentParams.ENV_LATCH_TIMEOUT */ + int getLatchTimeoutMs(); + + /** Returns the latch name for debugging. */ + String getLatchName(); + + /** Returns LatchTable for debug/test tracking. */ + LatchTable getLatchTable(); + + /** Returns envImpl, or may throw another exception in unit tests. */ + EnvironmentImpl getEnvImplForFatalException(); +} diff --git a/src/com/sleepycat/je/latch/LatchFactory.java b/src/com/sleepycat/je/latch/LatchFactory.java new file mode 100644 index 0000000..c7ec4bc --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchFactory.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +public class LatchFactory { + + /** + * Creates a SharedLatch using a given LatchContext. + * + * @param exclusiveOnly indicates whether this latch can only be set + * exclusively (not shared). + */ + public static SharedLatch createSharedLatch(final LatchContext context, + final boolean exclusiveOnly) { + if (exclusiveOnly) { + return new LatchImpl(context); + } + return new SharedLatchImpl(false /*fair*/, context); + } + + /** + * Creates a SharedLatch, creating a LatchContext from the given name and + * envImpl. + * + * @param exclusiveOnly indicates whether this latch can only be set + * exclusively (not shared). + */ + public static SharedLatch createSharedLatch(final EnvironmentImpl envImpl, + final String name, + final boolean exclusiveOnly) { + if (exclusiveOnly) { + return new LatchImpl(createContext(envImpl, name)); + } + return new SharedLatchImpl( + false /*fair*/, createContext(envImpl, name)); + } + + /** + * Creates a Latch using a given LatchContext. + * + * @param collectStats is true to collect stats. If false, a smaller and + * faster implementation is used. + */ + public static Latch createExclusiveLatch(final LatchContext context, + final boolean collectStats) { + if (collectStats) { + return new LatchWithStatsImpl(context); + } + return new LatchImpl(context); + } + + /** + * Creates a Latch, creating a LatchContext from the given name and + * envImpl. + * + * @param collectStats is true to collect stats. If false, a smaller and + * faster implementation is used. + */ + public static Latch createExclusiveLatch(final EnvironmentImpl envImpl, + final String name, + final boolean collectStats) { + if (collectStats) { + return new LatchWithStatsImpl(createContext(envImpl, name)); + } + return new LatchImpl(createContext(envImpl, name)); + } + + private static LatchContext createContext(final EnvironmentImpl envImpl, + final String name) { + return new LatchContext() { + @Override + public int getLatchTimeoutMs() { + return envImpl.getLatchTimeoutMs(); + } + @Override + public String getLatchName() { + return name; + } + @Override + public LatchTable getLatchTable() { + return LatchSupport.otherLatchTable; + } + @Override + public EnvironmentImpl getEnvImplForFatalException() { + return envImpl; + } + }; + } + + /** + * Used for creating latches in tests, with having an EnvironmentImpl. + */ + public static LatchContext createTestLatchContext(final String name) { + return new LatchContext() { + @Override + public int getLatchTimeoutMs() { + return 1000; + } + @Override + public String getLatchName() { + return name; + } + @Override + public LatchTable getLatchTable() { + return LatchSupport.otherLatchTable; + } + @Override + public EnvironmentImpl getEnvImplForFatalException() { + throw EnvironmentFailureException.unexpectedState(); + } + }; + } +} diff --git a/src/com/sleepycat/je/latch/LatchImpl.java b/src/com/sleepycat/je/latch/LatchImpl.java new file mode 100644 index 0000000..f8e0c37 --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchImpl.java @@ -0,0 +1,179 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.StatGroup; + +/** + * An exclusive latch without stats. + * + * SharedLatch (not just Latch) is implemented to support exclusive-only BIN + * latches. + */ +@SuppressWarnings("serial") +public class LatchImpl extends ReentrantLock implements SharedLatch { + + private final LatchContext context; + private OwnerInfo lastOwnerInfo; + + LatchImpl(final LatchContext context) { + this.context = context; + } + + String getName() { + return context.getLatchName(); + } + + @Override + public boolean isExclusiveOnly() { + return true; + } + + @Override + public void acquireExclusive() { + + if (isHeldByCurrentThread()) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held: " + debugString()); + } + + if (LatchSupport.INTERRUPTIBLE_WITH_TIMEOUT) { + try { + if (!tryLock( + context.getLatchTimeoutMs(), TimeUnit.MILLISECONDS)) { + throw LatchSupport.handleTimeout(this, context); + } + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + context.getEnvImplForFatalException(), e); + } + } else { + lock(); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = new OwnerInfo(context); + } + assert EnvironmentImpl.maybeForceYield(); + } + + @Override + public boolean acquireExclusiveNoWait() { + + if (isHeldByCurrentThread()) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held: " + debugString()); + } + + if (!tryLock()) { + return false; + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = new OwnerInfo(context); + } + assert EnvironmentImpl.maybeForceYield(); + return true; + } + + @Override + public void acquireShared() { + acquireExclusive(); + } + + @Override + public void release() { + if (!isHeldByCurrentThread()) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch not held: " + debugString()); + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = null; + } + unlock(); + } + + @Override + public void releaseIfOwner() { + if (!isHeldByCurrentThread()) { + return; + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = null; + } + unlock(); + } + + @Override + public boolean isOwner() { + return isHeldByCurrentThread(); + } + + @Override + public boolean isExclusiveOwner() { + return isHeldByCurrentThread(); + } + + @Override + public Thread getExclusiveOwner() { + return getOwner(); + } + + @Override + public int getNWaiters() { + return getQueueLength(); + } + + @Override + public StatGroup getStats() { + throw unexpectedState(); + } + + @Override + public void clearStats() { + throw unexpectedState(); + } + + @Override + public String toString() { + return LatchSupport.toString(this, context, lastOwnerInfo); + } + + @Override + public String debugString() { + return LatchSupport.debugString(this, context, lastOwnerInfo); + } +} diff --git a/src/com/sleepycat/je/latch/LatchStatDefinition.java b/src/com/sleepycat/je/latch/LatchStatDefinition.java new file mode 100644 index 0000000..f39b8b5 --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchStatDefinition.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for JE latch statistics. + */ +public class LatchStatDefinition { + + public static final String GROUP_NAME = "Latch"; + public static final String GROUP_DESC = "Latch characteristics"; + + public static final String LATCH_NO_WAITERS_NAME = + "nLatchAcquiresNoWaiters"; + public static final String LATCH_NO_WAITERS_DESC = + "Number of times the latch was acquired without contention."; + public static final StatDefinition LATCH_NO_WAITERS = + new StatDefinition( + LATCH_NO_WAITERS_NAME, + LATCH_NO_WAITERS_DESC); + + public static final String LATCH_SELF_OWNED_NAME = + "nLatchAcquiresSelfOwned"; + public static final String LATCH_SELF_OWNED_DESC = + "Number of times the latch was acquired it was already owned by the " + + "caller."; + public static final StatDefinition LATCH_SELF_OWNED = + new StatDefinition( + LATCH_SELF_OWNED_NAME, + LATCH_SELF_OWNED_DESC); + + public static final String LATCH_CONTENTION_NAME = + "nLatchAcquiresWithContention"; + public static final String LATCH_CONTENTION_DESC = + "Number of times the latch was acquired when it was already owned by " + + "another thread."; + public static final StatDefinition LATCH_CONTENTION = + new StatDefinition( + LATCH_CONTENTION_NAME, + LATCH_CONTENTION_DESC); + + public static final String LATCH_NOWAIT_SUCCESS_NAME = + "nLatchAcquiresNoWaitSuccessful"; + public static final String LATCH_NOWAIT_SUCCESS_DESC = + "Number of successful no-wait acquires of the lock table latch."; + public static final StatDefinition LATCH_NOWAIT_SUCCESS = + new StatDefinition( + LATCH_NOWAIT_SUCCESS_NAME, + LATCH_NOWAIT_SUCCESS_DESC); + + public static final String LATCH_NOWAIT_UNSUCCESS_NAME = + "nLatchAcquireNoWaitUnsuccessful"; + public static final String LATCH_NOWAIT_UNSUCCESS_DESC = + "Number of unsuccessful no-wait acquires of the lock table latch."; + public static final StatDefinition LATCH_NOWAIT_UNSUCCESS = + new StatDefinition( + LATCH_NOWAIT_UNSUCCESS_NAME, + LATCH_NOWAIT_UNSUCCESS_DESC); + + public static final String LATCH_RELEASES_NAME = + "nLatchReleases"; + public static final String LATCH_RELEASES_DESC = + "Number of latch releases."; + public static final StatDefinition LATCH_RELEASES = + new StatDefinition( + LATCH_RELEASES_NAME, + LATCH_RELEASES_DESC); +} diff --git a/src/com/sleepycat/je/latch/LatchSupport.java b/src/com/sleepycat/je/latch/LatchSupport.java new file mode 100644 index 0000000..8493e01 --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchSupport.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Supports latch debugging. + * + * In JE test mode (when the JE_TEST system property is set), TRACK_LATCHES + * will be true and related debugging methods may be used to check the number + * of Btree latches held. + * + * CAPTURE_OWNER is also set to true if the system property + * JE_CAPTURE_LATCH_OWNER is defined to true. This will capture a stack trace + * when a latch is acquired exclusively, and the stack trace will be included + * in all error messages. Capturing the stack trace is expensive so this is + * off by default for unit testing. + */ +public class LatchSupport { + + public static final boolean TRACK_LATCHES = DatabaseUtil.TEST; + + static final boolean CAPTURE_OWNER = + Boolean.getBoolean("JE_CAPTURE_LATCH_OWNER"); + + /* + * Indicates whether to use tryLock() with a timeout, instead of a simple + * lock() that waits forever and is uninterruptible. We would like to + * always use timeouts and interruptible latches, but these are new + * features and this boolean allows reverting to the old behavior. + */ + static final boolean INTERRUPTIBLE_WITH_TIMEOUT = true; + + /* Used for Btree latches. */ + public final static LatchTable btreeLatchTable = + TRACK_LATCHES ? (new LatchTable()) : null; + + /* Used for all other latches. */ + public final static LatchTable otherLatchTable = + TRACK_LATCHES ? (new LatchTable()) : null; + + public static void expectBtreeLatchesHeld(final int expectNLatches) { + expectBtreeLatchesHeld(expectNLatches, ""); + } + + /* Used for SizeOf. */ + public static final LatchContext DUMMY_LATCH_CONTEXT = new LatchContext() { + @Override + public int getLatchTimeoutMs() { + return 0; + } + @Override + public String getLatchName() { + return null; + } + @Override + public LatchTable getLatchTable() { + return null; + } + @Override + public EnvironmentImpl getEnvImplForFatalException() { + return null; + } + }; + + public static void expectBtreeLatchesHeld(final int expectNLatches, + final String msg) { + final int nHeld = btreeLatchTable.nLatchesHeld(); + if (nHeld == expectNLatches) { + return; + } + throw unexpectedState(String.format( + "Expected %d Btree latches held but got %d. %s\nLatch table: %s\n", + expectNLatches, nHeld, msg, btreeLatchesHeldToString())); + } + + public static int nBtreeLatchesHeld() { + return btreeLatchTable.nLatchesHeld(); + } + + public static void dumpBtreeLatchesHeld() { + System.out.println(btreeLatchesHeldToString()); + } + + public static String btreeLatchesHeldToString() { + return btreeLatchTable.latchesHeldToString(); + } + + /** + * Should be called when closing the environment, so that residual latches + * don't impact another environment that is opened + */ + public static void clear() { + if (TRACK_LATCHES) { + btreeLatchTable.clear(); + otherLatchTable.clear(); + } + } + + /** + * Record debug info when a latch is acquired. + */ + static void trackAcquire(final Latch latch, final LatchContext context) { + + final LatchTable latchTable = context.getLatchTable(); + if (latchTable == null) { + return; + } + if (!latchTable.add(latch)) { + throw unexpectedState( + "Latch already held." + latch.debugString()); + } + } + + /** + * Record debug info when a latch is released. + */ + static void trackRelease(final Latch latch, final LatchContext context) { + + final LatchTable latchTable = context.getLatchTable(); + if (latchTable == null) { + return; + } + if (!latchTable.remove(latch)) { + throw unexpectedState( + "Latch not held." + latch.debugString()); + } + } + + static String toString(final Latch latch, + final LatchContext context, + final OwnerInfo lastOwnerInfo) { + final StringBuilder builder = new StringBuilder(); + builder.append(context.getLatchName()). + append(" exclusiveOwner: "). + append(latch.getExclusiveOwner()); + if (lastOwnerInfo != null) { + lastOwnerInfo.toString(builder); + } + return builder.toString(); + } + + static String debugString(final Latch latch, + final LatchContext context, + final OwnerInfo lastOwnerInfo) { + + final StringBuilder builder = new StringBuilder(500); + builder.append(context.getLatchName()); + builder.append(" currentThread: "); + builder.append(Thread.currentThread()); + builder.append(" currentTime: "); + builder.append(System.currentTimeMillis()); + + if (TRACK_LATCHES) { + final LatchTable latchTable = context.getLatchTable(); + if (latchTable != null) { + builder.append(" allLatchesHeld: ("); + builder.append(latchTable.latchesHeldToString()); + builder.append(")"); + } + } + + builder.append(" exclusiveOwner: "); + final Thread ownerThread = latch.getExclusiveOwner(); + if (ownerThread != null) { + builder.append(ownerThread); + if (lastOwnerInfo != null) { + lastOwnerInfo.toString(builder); + } + } else { + builder.append("-none-"); + } + + return builder.toString(); + } + + static EnvironmentFailureException handleTimeout( + final Latch latch, + final LatchContext context) { + + final EnvironmentImpl envImpl = context.getEnvImplForFatalException(); + final Logger logger = envImpl.getLogger(); + final String msg = latch.debugString(); + + LoggerUtils.logMsg( + logger, envImpl, Level.SEVERE, + "Thread dump follows for latch timeout: " + msg); + + LoggerUtils.fullThreadDump(logger, envImpl, Level.SEVERE); + + return unexpectedState( + envImpl, "Latch timeout. " + msg); + + } +} diff --git a/src/com/sleepycat/je/latch/LatchTable.java b/src/com/sleepycat/je/latch/LatchTable.java new file mode 100644 index 0000000..7771c43 --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchTable.java @@ -0,0 +1,84 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +/** + * Table of latches by thread for debugging. + */ +public class LatchTable { + + private ThreadLocal> latchesByThread; + + LatchTable() { + latchesByThread = new ThreadLocal>(); + } + + /** + * Adds latch acquired by this thread. + * @return true if added, false if already present. + */ + boolean add(Object latch) { + Set threadLatches = latchesByThread.get(); + if (threadLatches == null) { + threadLatches = new HashSet(); + latchesByThread.set(threadLatches); + } + return threadLatches.add(latch); + } + + /** + * Removes latch acquired by this thread. + * @return true if removed, false if not present. + */ + boolean remove(Object latch) { + Set threadLatches = latchesByThread.get(); + if (threadLatches == null) { + return false; + } else { + return threadLatches.remove(latch); + } + } + + /** + * Returns the number of latches held by this thread. + */ + int nLatchesHeld() { + Set threadLatches = latchesByThread.get(); + if (threadLatches != null) { + return threadLatches.size(); + } else { + return 0; + } + } + + String latchesHeldToString() { + Set threadLatches = latchesByThread.get(); + StringBuilder sb = new StringBuilder(); + if (threadLatches != null) { + Iterator i = threadLatches.iterator(); + while (i.hasNext()) { + sb.append(i.next()).append('\n'); + } + } + return sb.toString(); + } + + void clear() { + latchesByThread = new ThreadLocal>(); + } +} diff --git a/src/com/sleepycat/je/latch/LatchWithStatsImpl.java b/src/com/sleepycat/je/latch/LatchWithStatsImpl.java new file mode 100644 index 0000000..f90177c --- /dev/null +++ b/src/com/sleepycat/je/latch/LatchWithStatsImpl.java @@ -0,0 +1,204 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_CONTENTION; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_SUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_UNSUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NO_WAITERS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_RELEASES; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_SELF_OWNED; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * An exclusive latch with stats. + */ +@SuppressWarnings("serial") +public class LatchWithStatsImpl extends ReentrantLock implements Latch { + + private final LatchContext context; + private OwnerInfo lastOwnerInfo; + private final StatGroup stats; + private final IntStat nAcquiresNoWaiters; + private final IntStat nAcquiresSelfOwned; + private final IntStat nAcquiresWithContention; + private final IntStat nAcquiresNoWaitSuccessful; + private final IntStat nAcquiresNoWaitUnsuccessful; + private final IntStat nReleases; + + LatchWithStatsImpl(final LatchContext context) { + this.context = context; + + stats = new StatGroup( + LatchStatDefinition.GROUP_NAME, + LatchStatDefinition.GROUP_DESC); + nAcquiresNoWaiters = new IntStat(stats, LATCH_NO_WAITERS); + nAcquiresSelfOwned = new IntStat(stats, LATCH_SELF_OWNED); + nAcquiresWithContention = new IntStat(stats, LATCH_CONTENTION); + nAcquiresNoWaitSuccessful = new IntStat(stats, LATCH_NOWAIT_SUCCESS); + nAcquiresNoWaitUnsuccessful = + new IntStat(stats, LATCH_NOWAIT_UNSUCCESS); + nReleases = new IntStat(stats, LATCH_RELEASES); + } + + String getName() { + return context.getLatchName(); + } + + @Override + public void acquireExclusive() { + + if (isHeldByCurrentThread()) { + nAcquiresSelfOwned.increment(); + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held: " + debugString()); + } + + if (isLocked()) { + nAcquiresWithContention.increment(); + } else { + nAcquiresNoWaiters.increment(); + } + + if (LatchSupport.INTERRUPTIBLE_WITH_TIMEOUT) { + try { + if (!tryLock( + context.getLatchTimeoutMs(), TimeUnit.MILLISECONDS)) { + throw LatchSupport.handleTimeout(this, context); + } + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + context.getEnvImplForFatalException(), e); + } + } else { + lock(); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = new OwnerInfo(context); + } + assert EnvironmentImpl.maybeForceYield(); + } + + @Override + public boolean acquireExclusiveNoWait() { + + if (isHeldByCurrentThread()) { + nAcquiresSelfOwned.increment(); + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held: " + debugString()); + } + + if (!tryLock()) { + nAcquiresNoWaitUnsuccessful.increment(); + return false; + } + + nAcquiresNoWaitSuccessful.increment(); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = new OwnerInfo(context); + } + assert EnvironmentImpl.maybeForceYield(); + return true; + } + + @Override + public void release() { + if (!isHeldByCurrentThread()) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch not held: " + debugString()); + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = null; + } + unlock(); + nReleases.increment(); + } + + @Override + public void releaseIfOwner() { + if (!isHeldByCurrentThread()) { + return; + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = null; + } + unlock(); + nReleases.increment(); + } + + @Override + public boolean isOwner() { + return isHeldByCurrentThread(); + } + + @Override + public boolean isExclusiveOwner() { + return isHeldByCurrentThread(); + } + + @Override + public Thread getExclusiveOwner() { + return getOwner(); + } + + @Override + public int getNWaiters() { + return getQueueLength(); + } + + @Override + public StatGroup getStats() { + return stats; + } + + @Override + public void clearStats() { + stats.clear(); + } + + @Override + public String toString() { + return LatchSupport.toString(this, context, lastOwnerInfo); + } + + @Override + public String debugString() { + return LatchSupport.debugString(this, context, lastOwnerInfo); + } +} diff --git a/src/com/sleepycat/je/latch/OwnerInfo.java b/src/com/sleepycat/je/latch/OwnerInfo.java new file mode 100644 index 0000000..6125d8c --- /dev/null +++ b/src/com/sleepycat/je/latch/OwnerInfo.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.utilint.LoggerUtils; + +class OwnerInfo { + + private final Thread thread; + private final long acquireTime; + private final Throwable acquireStack; + + OwnerInfo(final LatchContext context) { + thread = Thread.currentThread(); + acquireTime = System.currentTimeMillis(); + acquireStack = + new Exception("Latch Acquired: " + context.getLatchName()); + } + + void toString(StringBuilder builder) { + builder.append(" captureThread: "); + builder.append(thread); + builder.append(" acquireTime: "); + builder.append(acquireTime); + if (acquireStack != null) { + builder.append("\n"); + builder.append(LoggerUtils.getStackTrace(acquireStack)); + } else { + builder.append(" -no stack-"); + } + } +} diff --git a/src/com/sleepycat/je/latch/SharedLatch.java b/src/com/sleepycat/je/latch/SharedLatch.java new file mode 100644 index 0000000..7471a2c --- /dev/null +++ b/src/com/sleepycat/je/latch/SharedLatch.java @@ -0,0 +1,38 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +/** + * Extends Latch to provide a reader-writer/shared-exclusive latch. This is + * implemented with Java's ReentrantReadWriteLock, which is extended for a + * few reasons (see Latch). + * + * This interface may be also be implemented using an underlying exclusive + * latch. This is done so that a single interface can be used for for all INs, + * even though BIN latches are exclusive-only. See method javadoc for their + * behavior in exclusive-only mode. + */ +public interface SharedLatch extends Latch { + + /** Returns whether this latch is exclusive-only. */ + boolean isExclusiveOnly(); + + /** + * Acquires a latch for shared/read access. + * + * In exclusive-only mode, calling this method is equivalent to calling + * {@link #acquireExclusive()}. + */ + void acquireShared(); +} diff --git a/src/com/sleepycat/je/latch/SharedLatchImpl.java b/src/com/sleepycat/je/latch/SharedLatchImpl.java new file mode 100644 index 0000000..792dad6 --- /dev/null +++ b/src/com/sleepycat/je/latch/SharedLatchImpl.java @@ -0,0 +1,194 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.StatGroup; + +@SuppressWarnings("serial") +public class SharedLatchImpl extends ReentrantReadWriteLock + implements SharedLatch { + + private final LatchContext context; + private OwnerInfo lastOwnerInfo; + + SharedLatchImpl(final boolean fair, final LatchContext context) { + super(fair); + this.context = context; + } + + @Override + public boolean isExclusiveOnly() { + return false; + } + + @Override + public void acquireExclusive() { + doAcquireExclusive(false /*noWait*/); + } + + @Override + public boolean acquireExclusiveNoWait() { + return doAcquireExclusive(true /*noWait*/); + } + + private boolean doAcquireExclusive(final boolean noWait) { + if (isWriteLockedByCurrentThread() || (getReadHoldCount() > 0)) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held: " + debugString()); + } + + if (noWait) { + if (!writeLock().tryLock()) { + return false; + } + } else if (LatchSupport.INTERRUPTIBLE_WITH_TIMEOUT) { + try { + if (!writeLock().tryLock( + context.getLatchTimeoutMs(), TimeUnit.MILLISECONDS)) { + throw LatchSupport.handleTimeout(this, context); + } + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + context.getEnvImplForFatalException(), e); + } + } else { + writeLock().lock(); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = new OwnerInfo(context); + } + assert EnvironmentImpl.maybeForceYield(); + return true; + } + + @Override + public void acquireShared() { + if (isWriteLockedByCurrentThread()) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held exclusively: " + debugString()); + } + + if (getReadHoldCount() > 0) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch already held non-exclusively: " + debugString()); + } + + if (LatchSupport.INTERRUPTIBLE_WITH_TIMEOUT) { + try { + if (!readLock().tryLock( + context.getLatchTimeoutMs(), TimeUnit.MILLISECONDS)) { + throw LatchSupport.handleTimeout(this, context); + } + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + context.getEnvImplForFatalException(), e); + } + } else { + readLock().lock(); + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackAcquire(this, context); + } + assert EnvironmentImpl.maybeForceYield(); + } + + @Override + public void release() { + doRelease(false /*ifOwner*/); + } + + @Override + public void releaseIfOwner() { + doRelease(true /*ifOwner*/); + } + + private void doRelease(final boolean ifOwner) { + if (getReadHoldCount() > 0) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + readLock().unlock(); + return; + } + if (isWriteLockedByCurrentThread()) { + if (LatchSupport.CAPTURE_OWNER) { + lastOwnerInfo = null; + } + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.trackRelease(this, context); + } + writeLock().unlock(); + return; + } + if (!ifOwner) { + throw unexpectedState( + context.getEnvImplForFatalException(), + "Latch not held: " + debugString()); + } + } + + @Override + public Thread getExclusiveOwner() { + return getOwner(); + } + + @Override + public boolean isExclusiveOwner() { + return isWriteLockedByCurrentThread(); + } + + @Override + public boolean isOwner() { + return isWriteLockedByCurrentThread() || (getReadHoldCount() > 0); + } + + @Override + public int getNWaiters() { + return getQueueLength(); + } + + @Override + public StatGroup getStats() { + throw unexpectedState(); + } + + @Override + public void clearStats() { + throw unexpectedState(); + } + + @Override + public String toString() { + return LatchSupport.toString(this, context, lastOwnerInfo); + } + + @Override + public String debugString() { + return LatchSupport.debugString(this, context, lastOwnerInfo); + } +} diff --git a/src/com/sleepycat/je/latch/TimingLatch.java b/src/com/sleepycat/je/latch/TimingLatch.java new file mode 100644 index 0000000..927f0ea --- /dev/null +++ b/src/com/sleepycat/je/latch/TimingLatch.java @@ -0,0 +1,167 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import com.sleepycat.je.utilint.EventTrace; + +/** + * A subclass of Latch that may be used for debugging performance issues. This + * latch can be used in place of an exclusive latch or object mutex in order to + * see who is waiting for a latch acquisition, how long they're waiting, and + * who the previous holder was. It crudely writes to System.out, but this can + * easily be changed to a java.util.Log or EventTrace as desired. You can + * specify a threshold for the wait and previous holder time (nanos). + * + * Note that this class has not recently been used because it is not + * implemented for shared (Btree) latches. The next time it is used, it should + * be integrated with the LatchFactory. + */ +public class TimingLatch extends LatchImpl { + private static final int WAIT_THRESHOLD_NANOS = 50000; + private static final int PREV_HOLD_THRESHOLD_NANOS = 50000; + + private long acquireTime; + private long releaseTime; + private Thread lastThread; + private final boolean debug; + private final int waitThreshold; + private final int holdThreshold; + + public TimingLatch(LatchContext context, boolean debug) { + super(context); + this.debug = debug; + this.waitThreshold = WAIT_THRESHOLD_NANOS; + this.holdThreshold = PREV_HOLD_THRESHOLD_NANOS; + } + + public TimingLatch(LatchContext context, + boolean debug, + int waitThreshold, + int holdThreshold) { + super(context); + this.debug = debug; + this.waitThreshold = waitThreshold; + this.holdThreshold = holdThreshold; + } + + public class AcquireRequestEvent extends EventTrace { + private long startTime; + private String name; + Thread us; + + public AcquireRequestEvent() { + super(); + startTime = System.nanoTime(); + name = getName(); + us = Thread.currentThread(); + } + + public String toString() { + StringBuilder sb = + new StringBuilder("AcquireRequestEvent for " + name + " "); + sb.append(us).append(" at "). + append(String.format("%,d", startTime)); + return sb.toString(); + } + } + + public class AcquireCompleteEvent extends EventTrace { + private long startTime; + private long waitTime; + private String name; + Thread us; + + public AcquireCompleteEvent(long startTime, long waitTime) { + super(); + this.startTime = startTime; + this.waitTime = waitTime; + name = getName(); + us = Thread.currentThread(); + } + + public String toString() { + StringBuilder sb = + new StringBuilder("AcquireCompleteEvent for " + name + " "); + sb.append(us).append(" at "). + append(String.format("%,d", startTime)). + append(" Took: ").append(String.format("%,d", waitTime)); + return sb.toString(); + } + } + + public class ReleaseEvent extends EventTrace { + private long startTime; + private String name; + Thread us; + + public ReleaseEvent(long time) { + super(); + startTime = time; + name = getName(); + us = Thread.currentThread(); + } + + public String toString() { + StringBuilder sb = + new StringBuilder("ReleaseEvent for " + name + " "); + sb.append(us).append(" at "). + append(String.format("%,d", startTime)); + return sb.toString(); + } + } + + public void release() { + releaseTime = System.nanoTime(); + EventTrace.addEvent(new ReleaseEvent(releaseTime)); + super.release(); + } + + public void acquireExclusive() { + if (!debug) { + super.acquireExclusive(); + return; + } + + try { + EventTrace.addEvent(new AcquireRequestEvent()); + if (acquireExclusiveNoWait()) { + EventTrace.addEvent + (new AcquireCompleteEvent(System.nanoTime(), 0)); + return; + } + + long startWait = System.nanoTime(); + super.acquireExclusive(); + long endWait = System.nanoTime(); + long ourWaitTime = endWait - startWait; + EventTrace.addEvent + (new AcquireCompleteEvent(System.nanoTime(), ourWaitTime)); + long previousHoldTime = releaseTime - acquireTime; + if (previousHoldTime > holdThreshold || + ourWaitTime > waitThreshold) { + System.out.println + (String.format("%1tT %s waited %,d nanosec for %s\n" + + " Previous held by %s for %,d nanosec.", + System.currentTimeMillis(), + Thread.currentThread(), ourWaitTime, + getName(), lastThread, previousHoldTime)); + EventTrace.dumpEvents(System.out); + EventTrace.disableEvents = false; + } + } finally { + acquireTime = System.nanoTime(); + lastThread = Thread.currentThread(); + } + } +} diff --git a/src/com/sleepycat/je/latch/package-info.java b/src/com/sleepycat/je/latch/package-info.java new file mode 100644 index 0000000..fac04d5 --- /dev/null +++ b/src/com/sleepycat/je/latch/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Latches wrap Java ReentrantLock and ReentrantReadWriteLock and + * add restrictions and debugging support; used mainly for IN locking. + */ +package com.sleepycat.je.latch; \ No newline at end of file diff --git a/src/com/sleepycat/je/log/BasicVersionedWriteLoggable.java b/src/com/sleepycat/je/log/BasicVersionedWriteLoggable.java new file mode 100644 index 0000000..482a194 --- /dev/null +++ b/src/com/sleepycat/je/log/BasicVersionedWriteLoggable.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +/** + * A basic implementation of {@link VersionedWriteLoggable} that provides for + * writing in a single format by default. Starting with log version 9, as + * specified by {@link LogEntryType#LOG_VERSION_REPLICATE_OLDER}, loggable + * classes whose log format has changed since the previous log version will + * need to override the {@link VersionedWriteLoggable#getLastFormatChange}, + * {@link #getLogSize(int, boolean)} and {@link #writeToLog(ByteBuffer, + * int, boolean)} methods to support writing the entry in earlier log formats. + */ +public abstract class BasicVersionedWriteLoggable + implements VersionedWriteLoggable { + + /** + * Creates an instance of this class. + */ + public BasicVersionedWriteLoggable() { + } + + @Override + public int getLogSize() { + return getLogSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer) { + writeToLog( + logBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } +} diff --git a/src/com/sleepycat/je/log/CheckpointFileReader.java b/src/com/sleepycat/je/log/CheckpointFileReader.java new file mode 100644 index 0000000..25b9761 --- /dev/null +++ b/src/com/sleepycat/je/log/CheckpointFileReader.java @@ -0,0 +1,99 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * CheckpointFileReader searches for root and checkpoint entries. + */ +public class CheckpointFileReader extends FileReader { + /* Status about the last entry. */ + private boolean isDbTree; + private boolean isCheckpointEnd; + private boolean isCheckpointStart; + + /** + * Create this reader to start at a given LSN. + */ + public CheckpointFileReader(EnvironmentImpl env, + int readBufferSize, + boolean forward, + long startLsn, + long finishLsn, + long endOfFileLsn) + throws DatabaseException { + + super(env, readBufferSize, forward, startLsn, + null, endOfFileLsn, finishLsn); + } + + /** + * @return true if this is a targeted entry. + */ + @Override + protected boolean isTargetEntry() { + byte logEntryTypeNumber = currentEntryHeader.getType(); + boolean isTarget = false; + isDbTree = false; + isCheckpointEnd = false; + isCheckpointStart = false; + if (LogEntryType.LOG_CKPT_END.equalsType(logEntryTypeNumber)) { + isTarget = true; + isCheckpointEnd = true; + } else if (LogEntryType.LOG_CKPT_START.equalsType + (logEntryTypeNumber)) { + isTarget = true; + isCheckpointStart = true; + } else if (LogEntryType.LOG_DBTREE.equalsType + (logEntryTypeNumber)) { + isTarget = true; + isDbTree = true; + } + return isTarget; + } + + /** + * This reader instantiates the first object of a given log entry + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + /* Don't need to read the entry, since we just use the LSN. */ + return true; + } + + /** + * @return true if last entry was a DbTree entry. + */ + public boolean isDbTree() { + return isDbTree; + } + + /** + * @return true if last entry was a checkpoint end entry. + */ + public boolean isCheckpointEnd() { + return isCheckpointEnd; + } + + /** + * @return true if last entry was a checkpoint start entry. + */ + public boolean isCheckpointStart() { + return isCheckpointStart; + } +} diff --git a/src/com/sleepycat/je/log/ChecksumException.java b/src/com/sleepycat/je/log/ChecksumException.java new file mode 100644 index 0000000..df8a142 --- /dev/null +++ b/src/com/sleepycat/je/log/ChecksumException.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +/** + * Indicates that a checksum validation failed. A checked exception is used so + * it can be caught and handled internally in some cases. When not handled + * internally, it is wrapped with an EnvironmentFailureException with + * EnvironmentFailureReason.LOG_CHECKSUM before being propagated through the + * public API. + */ +public class ChecksumException extends Exception { + + private static final long serialVersionUID = 1; + + public ChecksumException(String message) { + super(message); + } + + public ChecksumException(String message, Exception e) { + super(message, e); + } +} diff --git a/src/com/sleepycat/je/log/ChecksumValidator.java b/src/com/sleepycat/je/log/ChecksumValidator.java new file mode 100644 index 0000000..cad740f --- /dev/null +++ b/src/com/sleepycat/je/log/ChecksumValidator.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.zip.Checksum; + +import com.sleepycat.je.utilint.Adler32; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Checksum validator is used to check checksums on log entries. + */ +public class ChecksumValidator { + private static final boolean DEBUG = false; + + private Checksum cksum; + + public ChecksumValidator() { + cksum = Adler32.makeChecksum(); + } + + public void reset() { + cksum.reset(); + } + + /** + * Add this byte buffer to the checksum. Assume the byte buffer is already + * positioned at the data. + * @param buf target buffer + * @param length of data + */ + public void update(ByteBuffer buf, int length) + throws ChecksumException { + + if (buf == null) { + throw new ChecksumException( + "null buffer given to checksum validation, probably " + + " result of 0's in log file, len=" + length); + } + + int bufStart = buf.position(); + + if (DEBUG) { + System.out.println("bufStart = " + bufStart + + " length = " + length); + } + + update(buf.array(), bufStart + buf.arrayOffset(), length); + } + + public void update(byte[] buf, int offset, int length) { + cksum.update(buf, offset, length); + } + + void validate(long expectedChecksum, long lsn) + throws ChecksumException { + + if (expectedChecksum != cksum.getValue()) { + throw new ChecksumException + ("Location " + DbLsn.getNoFormatString(lsn) + + " expected " + expectedChecksum + " got " + cksum.getValue()); + } + } + + public void validate(long expectedChecksum, long fileNum, long fileOffset) + throws ChecksumException { + + if (expectedChecksum != cksum.getValue()) { + long problemLsn = DbLsn.makeLsn(fileNum, fileOffset); + + throw new ChecksumException + ("Location " + DbLsn.getNoFormatString(problemLsn) + + " expected " + expectedChecksum + " got " + + cksum.getValue()); + } + } +} diff --git a/src/com/sleepycat/je/log/CleanerFileReader.java b/src/com/sleepycat/je/log/CleanerFileReader.java new file mode 100644 index 0000000..ef001d1 --- /dev/null +++ b/src/com/sleepycat/je/log/CleanerFileReader.java @@ -0,0 +1,343 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.cleaner.BaseUtilizationTracker; +import com.sleepycat.je.cleaner.ExpirationTracker; +import com.sleepycat.je.cleaner.FileSummary; +import com.sleepycat.je.cleaner.INSummary; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.OldBINDeltaLogEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.OldBINDelta; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * CleanerFileReader scans log files for INs and LNs. + */ +public class CleanerFileReader extends FileReader { + private static final byte IS_LN = 0; + private static final byte IS_IN = 1; + private static final byte IS_BIN_DELTA = 2; + private static final byte IS_OLD_BIN_DELTA = 3; + private static final byte IS_DBTREE = 4; + private static final byte IS_FILEHEADER = 5; + + private final Map targetEntryMap; + private LogEntry targetLogEntry; + private byte targetCategory; + private final FileSummary fileSummary; + private final INSummary inSummary; + private final ExpirationTracker expTracker; + + /** The first VLSN, or null if none has been found */ + private VLSN firstVLSN = null; + + private VLSN lastVLSN = VLSN.NULL_VLSN; + + /** + * Create this reader to start at a given LSN. + * @param env The relevant EnvironmentImpl. + * @param readBufferSize buffer size in bytes for reading in log. + * @param startLsn where to start in the log, or null for the beginning. + * @param fileNum single file number. + * @param fileSummary returns true utilization. + * @param inSummary returns IN utilization. + * @param expTracker returns expiration info, if non-null. + */ + public CleanerFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + Long fileNum, + FileSummary fileSummary, + INSummary inSummary, + ExpirationTracker expTracker) { + super(env, + readBufferSize, + true, // forward + startLsn, + fileNum, // single file number + DbLsn.NULL_LSN, // endOfFileLsn + DbLsn.NULL_LSN); // finishLsn + + this.fileSummary = fileSummary; + this.inSummary = inSummary; + this.expTracker = expTracker; + + targetEntryMap = new HashMap(); + + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isLNType()) { + addTargetType(IS_LN, entryType); + } + + /* + * Note that DBIN/DIN are not included because they are + * automatically considered obsolete. + */ + if (entryType.isINType()) { + addTargetType(IS_IN, entryType); + } + } + addTargetType(IS_BIN_DELTA, LogEntryType.LOG_BIN_DELTA); + addTargetType(IS_OLD_BIN_DELTA, LogEntryType.LOG_OLD_BIN_DELTA); + addTargetType(IS_DBTREE, LogEntryType.LOG_DBTREE); + addTargetType(IS_FILEHEADER, LogEntryType.LOG_FILE_HEADER); + } + + private void addTargetType(byte category, LogEntryType entryType) + throws DatabaseException { + + targetEntryMap.put(entryType, + new EntryInfo(entryType.getNewLogEntry(), + category)); + } + + /** + * Process the header to track the last VLSN and count true utilization. + * Then read the entry and return true if the LogEntryType is of interest. + * + * We don't override isTargetEntry so it always returns true and we can + * count utilization correctly here in processEntry. We call getLastLsn to + * count utilization and this is not allowed from isTargetEntry. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + final LogEntryType type = + LogEntryType.findType(currentEntryHeader.getType()); + final int size = getLastEntrySize(); + + /* Count true utilization for new log entries. */ + if (currentEntryHeader.getType() != + LogEntryType.LOG_FILE_HEADER.getTypeNum()) { + fileSummary.totalCount += 1; + fileSummary.totalSize += size; + if (BaseUtilizationTracker.trackObsoleteInfo(type)) { + if (BaseUtilizationTracker.isLNType(type)) { + fileSummary.totalLNCount += 1; + fileSummary.totalLNSize += size; + } else { + fileSummary.totalINCount += 1; + fileSummary.totalINSize += size; + if (type.isINType()) { + inSummary.totalINCount += 1; + inSummary.totalINSize += size; + } + if (type.equals(LogEntryType.LOG_BIN_DELTA) || + type.equals(LogEntryType.LOG_OLD_BIN_DELTA)) { + inSummary.totalBINDeltaCount += 1; + inSummary.totalBINDeltaSize += size; + } + } + } + } + + /* Invisible entries should not be processed further. */ + if (currentEntryHeader.isInvisible()) { + skipEntry(entryBuffer); + countObsolete(); + return false; + } + + /* Maintain first and last VLSN encountered. */ + if (currentEntryHeader.getReplicated()) { + final VLSN vlsn = currentEntryHeader.getVLSN(); + if (vlsn != null) { + + /* Use a null comparison in this inner loop, for speed */ + if (firstVLSN == null) { + firstVLSN = vlsn; + } + assert (vlsn.compareTo(lastVLSN) > 0) : + "vlsns out of order, last=" + lastVLSN + + " current=" + vlsn; + lastVLSN = vlsn; + } + } + + /* + * Call readEntry and return true if this is a LogEntryType of + * interest. + */ + final EntryInfo info = targetEntryMap.get(type); + if (info == null) { + skipEntry(entryBuffer); + countObsolete(); + return false; + } + targetCategory = info.targetCategory; + targetLogEntry = info.targetLogEntry; + targetLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + return true; + } + + /** + * Records the current log entry as obsolete in the FileSummary used to + * count true utilization. + */ + public void countObsolete() { + final LogEntryType type = + LogEntryType.findType(currentEntryHeader.getType()); + if (!BaseUtilizationTracker.trackObsoleteInfo(type)) { + return; + } + final int size = getLastEntrySize(); + if (BaseUtilizationTracker.isLNType(type)) { + fileSummary.obsoleteLNCount += 1; + fileSummary.obsoleteLNSize += size; + fileSummary.obsoleteLNSizeCounted += 1; + } else { + fileSummary.obsoleteINCount += 1; + if (type.isINType()) { + inSummary.obsoleteINCount += 1; + inSummary.obsoleteINSize += size; + } + if (type.equals(LogEntryType.LOG_BIN_DELTA) || + type.equals(LogEntryType.LOG_OLD_BIN_DELTA)) { + inSummary.obsoleteBINDeltaCount += 1; + inSummary.obsoleteBINDeltaSize += size; + } + } + } + + public void countExpired() { + if (expTracker != null) { + expTracker.track(targetLogEntry, getLastEntrySize()); + } + } + + /** + * @return true if the last entry was an IN. + */ + public boolean isIN() { + return (targetCategory == IS_IN); + } + + /** + * @return true if the last entry was a live BIN delta. + */ + public boolean isBINDelta() { + return (targetCategory == IS_BIN_DELTA); + } + + /** + * @return true if the last entry was an Old BIN-delta. + */ + public boolean isOldBINDelta() { + return (targetCategory == IS_OLD_BIN_DELTA); + } + + /** + * @return true if the last entry was a LN. + */ + public boolean isLN() { + return (targetCategory == IS_LN); + } + + /** + * @return true if the last entry was a DbTree entry. + */ + public boolean isDbTree() { + return (targetCategory == IS_DBTREE); + } + + public boolean isFileHeader() { + return (targetCategory == IS_FILEHEADER); + } + + /** + * Get the last LN log entry seen by the reader. Note that + * LNLogEntry.postFetchInit must be called before calling certain + * LNLogEntry methods. + */ + public LNLogEntry getLNLogEntry() { + return (LNLogEntry) targetLogEntry; + } + + /** + * Get the last entry seen by the reader as an IN. + */ + public IN getIN(DatabaseImpl dbImpl) { + return ((INLogEntry) targetLogEntry).getIN(dbImpl); + } + + public BIN getBINDelta() { + return ((BINDeltaLogEntry) targetLogEntry).getMainItem(); + } + + public OldBINDelta getOldBINDelta() { + return ((OldBINDeltaLogEntry) targetLogEntry).getMainItem(); + } + + public FileHeader getFileHeader() { + return (FileHeader) (targetLogEntry.getMainItem()); + } + + /** + * Get the last databaseId seen by the reader. + */ + public DatabaseId getDatabaseId() { + if (targetCategory == IS_LN) { + return ((LNLogEntry) targetLogEntry).getDbId(); + } else if ((targetCategory == IS_IN) || + (targetCategory == IS_BIN_DELTA)) { + return ((INLogEntry) targetLogEntry).getDbId(); + } else if (targetCategory == IS_OLD_BIN_DELTA) { + return ((OldBINDeltaLogEntry) targetLogEntry).getDbId(); + } else { + return null; + } + } + + /** + * Returns the first VLSN encountered, or NULL_VLSN if no entries were + * replicated. + */ + public VLSN getFirstVLSN() { + return (firstVLSN != null) ? firstVLSN : VLSN.NULL_VLSN; + } + + /** + * Returns the last VLSN encountered, or NULL_VLSN if no entries were + * replicated. + */ + public VLSN getLastVLSN() { + return lastVLSN; + } + + private static class EntryInfo { + public LogEntry targetLogEntry; + public byte targetCategory; + + EntryInfo(LogEntry targetLogEntry, byte targetCategory) { + this.targetLogEntry = targetLogEntry; + this.targetCategory = targetCategory; + } + } +} diff --git a/src/com/sleepycat/je/log/DbOpReplicationContext.java b/src/com/sleepycat/je/log/DbOpReplicationContext.java new file mode 100644 index 0000000..fd8c52d --- /dev/null +++ b/src/com/sleepycat/je/log/DbOpReplicationContext.java @@ -0,0 +1,107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.ReplicatedDatabaseConfig; +import com.sleepycat.je.log.entry.DbOperationType; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.utilint.VLSN; + +/** + * This subclass of ReplicationContext adds information specific to database + * operations to the replication context passed from operation-aware code down + * the the logging layer. It's a way to transport enough information though the + * NameLNLogEntry to logically replicate database operations. + */ +public class DbOpReplicationContext extends ReplicationContext { + + /* + * Convenience static instance used when you know this database operation + * will not be replicated, either because it's executing on a + * non-replicated node or it's a local operation for a local database. + */ + public static DbOpReplicationContext NO_REPLICATE = + new DbOpReplicationContext(false, // inReplicationStream + DbOperationType.NONE); + + final private DbOperationType opType; + private ReplicatedDatabaseConfig createConfig = null; + private DatabaseId truncateOldDbId = null; + + /** + * Create a replication context for logging a database operation NameLN on + * the master. + */ + public DbOpReplicationContext(boolean inReplicationStream, + DbOperationType opType) { + super(inReplicationStream); + this.opType = opType; + } + + /** + * Create a repContext for executing a databaseOperation on the client. + */ + public DbOpReplicationContext(VLSN vlsn, + NameLNLogEntry nameLNEntry) { + + /* + * Initialize the context with the VLSN that was shipped with the + * replicated log entry. + */ + + super(vlsn); + opType = nameLNEntry.getOperationType(); + + if (DbOperationType.isWriteConfigType(opType)) { + createConfig = nameLNEntry.getReplicatedCreateConfig(); + } else if (opType == DbOperationType.TRUNCATE) { + truncateOldDbId = nameLNEntry.getTruncateOldDbId(); + } + } + + @Override + public DbOperationType getDbOperationType() { + return opType; + } + + public void setCreateConfig(ReplicatedDatabaseConfig createConfig) { + assert(DbOperationType.isWriteConfigType(opType)); + this.createConfig = createConfig; + } + + public ReplicatedDatabaseConfig getCreateConfig() { + assert(DbOperationType.isWriteConfigType(opType)); + return createConfig; + } + + public void setTruncateOldDbId(DatabaseId truncateOldDbId) { + assert(opType == DbOperationType.TRUNCATE); + this.truncateOldDbId = truncateOldDbId; + } + + public DatabaseId getTruncateOldDbId() { + assert(opType == DbOperationType.TRUNCATE); + return truncateOldDbId; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(super.toString()); + sb.append("opType=").append(opType); + sb.append("truncDbId=").append(truncateOldDbId); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/log/DumpFileReader.java b/src/com/sleepycat/je/log/DumpFileReader.java new file mode 100644 index 0000000..39515ac --- /dev/null +++ b/src/com/sleepycat/je/log/DumpFileReader.java @@ -0,0 +1,159 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.util.HashSet; +import java.util.Set; +import java.util.StringTokenizer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; + +/** + * The DumpFileReader prints every log entry to stdout. + */ +public abstract class DumpFileReader extends FileReader { + + /* A set of the entry type numbers that this DumpFileReader should dump. */ + private final Set targetEntryTypes; + + /* A set of the txn ids that this DumpFileReader should dump. */ + private final Set targetTxnIds; + + /* A set of the db ids that this DumpFileReader should dump. */ + private final Set targetDbIds; + + /* If true, dump the long version of the entry. */ + protected final boolean verbose; + + /* If true, only dump entries that have a VLSN */ + private final boolean repEntriesOnly; + + /** + * Create this reader to start at a given LSN. + */ + public DumpFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn, + String entryTypes, + String dbIds, + String txnIds, + boolean verbose, + boolean repEntriesOnly, + boolean forwards) + throws DatabaseException { + + super(env, + readBufferSize, + forwards, + startLsn, + null, // single file number + endOfFileLsn, // end of file lsn + finishLsn); // finish lsn + + /* If entry types is not null, record the set of target entry types. */ + targetEntryTypes = new HashSet<>(); + if (entryTypes != null) { + StringTokenizer tokenizer = new StringTokenizer(entryTypes, ","); + while (tokenizer.hasMoreTokens()) { + String typeString = tokenizer.nextToken(); + targetEntryTypes.add(Byte.valueOf(typeString.trim())); + } + } + /* If db ids is not null, record the set of target db ids. */ + targetDbIds = new HashSet<>(); + if (dbIds != null) { + StringTokenizer tokenizer = new StringTokenizer(dbIds, ","); + while (tokenizer.hasMoreTokens()) { + String dbIdString = tokenizer.nextToken(); + targetDbIds.add(Long.valueOf(dbIdString.trim())); + } + } + /* If txn ids is not null, record the set of target txn ids. */ + targetTxnIds = new HashSet<>(); + if (txnIds != null) { + StringTokenizer tokenizer = new StringTokenizer(txnIds, ","); + while (tokenizer.hasMoreTokens()) { + String txnIdString = tokenizer.nextToken(); + targetTxnIds.add(Long.valueOf(txnIdString.trim())); + } + } + this.verbose = verbose; + this.repEntriesOnly = repEntriesOnly; + } + + protected boolean needMatchEntry() { + return !targetTxnIds.isEmpty() || !targetDbIds.isEmpty(); + } + + protected boolean matchEntry(LogEntry entry) { + if (!targetTxnIds.isEmpty()) { + LogEntryType type = entry.getLogType(); + if (!type.isTransactional()) { + /* If -tx spec'd and not a transactional entry, don't dump. */ + return false; + } + if (!targetTxnIds.contains(entry.getTransactionId())) { + /* Not in the list of txn ids. */ + return false; + } + } + if (!targetDbIds.isEmpty()) { + DatabaseId dbId = entry.getDbId(); + if (dbId == null) { + /* If -db spec'd and not a db entry, don't dump. */ + return false; + } + if (!targetDbIds.contains(dbId.getId())) { + /* Not in the list of db ids. */ + return false; + } + } + + return true; + } + + /** + * @return true if this reader should process this entry, or just skip over + * it. + */ + @Override + protected boolean isTargetEntry() { + if (repEntriesOnly && !currentEntryHeader.getReplicated()) { + + /* + * Skip this entry; we only want replicated entries, and this + * one is not replicated. + */ + return false; + } + + if (targetEntryTypes.size() == 0) { + /* We want to dump all entry types. */ + return true; + } + return targetEntryTypes.contains + (Byte.valueOf(currentEntryHeader.getType())); + } + + /** + * @param ignore + */ + public void summarize(boolean ignore /*csvFile*/) { + } +} diff --git a/src/com/sleepycat/je/log/FSyncManager.java b/src/com/sleepycat/je/log/FSyncManager.java new file mode 100644 index 0000000..007e108 --- /dev/null +++ b/src/com/sleepycat/je/log/FSyncManager.java @@ -0,0 +1,524 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNCS; +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNC_REQUESTS; +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_TIMEOUTS; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_N_GROUP_COMMIT_REQUESTS; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_N_GROUP_COMMIT_WAITS; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_N_LOG_INTERVAL_EXCEEDED; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_N_LOG_MAX_GROUP_COMMIT; + +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/* + * The FsyncManager ensures that only one file fsync is issued at a time, for + * performance optimization. The goal is to reduce the number of fsyncs issued + * by the system by issuing 1 fsync on behalf of a number of threads. + * + * For example, suppose these writes happen which all need to be fsynced to + * disk: + * + * thread 1 writes a commit record + * thread 2 writes a checkpoint + * thread 3 writes a commit record + * thread 4 writes a commit record + * thread 5 writes a checkpoint + * + * Rather than executing 5 fsyncs, which all must happen synchronously, we hope + * to issue fewer. How many fewer depend on timing. Note that the writes + * themselves are serialized and are guaranteed to run in order. + * + * For example: + * thread 1 wants to fsync first, no other fsync going on, will issue fsync + * thread 2 waits + * thread 3 waits + * thread 4 waits + * - before thread 5 comes, thread 1 finishes fsyncing and returns to + * the caller. Now another fsync can be issued that will cover threads + * 2,3,4. One of those threads (2, 3, 4} issues the fsync, the others + * block. + * thread 5 wants to fsync, but sees one going on, so will wait. + * - the fsync issued for 2,3,4 can't cover thread 5 because we're not sure + * if thread 5's write finished before that fsync call. Thread 5 will have + * to issue its own fsync. + * + * Target file + * ----------- + * Note that when the buffer pool starts a new file, we fsync the previous file + * under the log write latch. Therefore, at any time we only have one target + * file to fsync, which is the current write buffer. We do this so that we + * don't have to coordinate between files. For example, suppose log files have + * 1000 bytes and a commit record is 10 bytes. An LSN of value 6/990 is in + * file 6 at offset 990. + * + * thread 1: logWriteLatch.acquire() + * write commit record to LSN 6/980 + * logWriteLatch.release() + * thread 2: logWriteLatch.acquire() + * write commit record to LSN 6/990 + * logWriteLatch.release + * thread 3: logWriteLatch.acquire() + * gets 7/000 as the next LSN to use + * see that we flipped to a new file, so call fsync on file 6 + * write commit record to LSN 7/000 + * logWriteLatch.release() + * + * Thread 3 will fsync file 6 within the log write latch. That way, at any + * time, any non-latched fsyncs should only fsync the latest file. If we + * didn't do, there's the chance that thread 3 would fsync file 7 and return to + * its caller before the thread 1 and 2 got an fsync for file 6. That wouldn't + * be correct, because thread 3's commit might depend on file 6. + * + * Note that the FileManager keeps a file descriptor that corresponds to the + * current end of file, and that is what we fsync. + */ +class FSyncManager { + private final EnvironmentImpl envImpl; + private final long timeout; + + /* Use as the target for a synchronization block. */ + private final Object mgrMutex; + + private volatile boolean workInProgress; + private FSyncGroup nextFSyncWaiters; + private int numNextWaiters; + private long startNextWait; + /* Number of waiters that prevents a group commit wait */ + private final int grpcThreshold; + private final long grpcInterval; + private final boolean grpWaitOn; + + /* stats */ + private final StatGroup stats; + private final LongStat nFSyncRequests; + private final AtomicLongStat nFSyncs; + private final LongStat nTimeouts; + private final LongStat nRequests; + private final LongStat nWaitersExceeded; + private final LongStat nTimeExceeded; + private final LongStat nWaits; + + /* For unit tests. */ + private TestHook flushHook; + + FSyncManager(EnvironmentImpl envImpl) { + timeout = envImpl.getConfigManager().getDuration + (EnvironmentParams.LOG_FSYNC_TIMEOUT); + grpcInterval = envImpl.getConfigManager().getDurationNS( + EnvironmentParams.LOG_GROUP_COMMIT_INTERVAL); + grpcThreshold = + envImpl.getConfigManager().getInt( + EnvironmentParams.LOG_GROUP_COMMIT_THRESHOLD); + if (grpcInterval == 0 || grpcThreshold == 0) { + grpWaitOn = false; + } else { + grpWaitOn = true; + } + + this.envImpl = envImpl; + + mgrMutex = new Object(); + workInProgress = false; + nextFSyncWaiters = new FSyncGroup(timeout, envImpl); + + stats = new StatGroup(LogStatDefinition.FSYNCMGR_GROUP_NAME, + LogStatDefinition.FSYNCMGR_GROUP_DESC); + nFSyncRequests = new LongStat(stats, FSYNCMGR_FSYNC_REQUESTS); + nFSyncs = new AtomicLongStat(stats, FSYNCMGR_FSYNCS); + nTimeouts = new LongStat(stats, FSYNCMGR_TIMEOUTS); + nRequests = new LongStat(stats, GRPCMGR_N_GROUP_COMMIT_REQUESTS); + nTimeExceeded = + new LongStat(stats, GRPCMGR_N_LOG_INTERVAL_EXCEEDED); + nWaitersExceeded = + new LongStat(stats, GRPCMGR_N_LOG_MAX_GROUP_COMMIT); + nWaits = + new LongStat(stats, GRPCMGR_N_GROUP_COMMIT_WAITS); + numNextWaiters = 0; + } + + /** + * Request to flush the log buffer and optionally fsync to disk. + * This thread may or may not actually execute the flush/fsync, + * but will not return until a flush/fsync has been + * issued and executed on behalf of its write. There is a timeout period + * specified by EnvironmentParam.LOG_FSYNC_TIMEOUT that ensures that no + * thread gets stuck here indefinitely. + * + * When a thread comes in, it will find one of two things. + * 1. There is no work going on right now. This thread should go + * ahead and become the group leader. The leader may wait and + * executes the flush/fsync. + * 2. There is work going on, wait on the next group commit. + * + * When a work is going on, all those threads that come along are grouped + * together as the nextFsyncWaiters. When the current work is finished, + * one of those nextFsyncWaiters will be selected as a leader to issue the + * next flush/fsync. The other members of the group will merely wait until + * the flush/fsync done on their behalf is finished. + * + * When a thread finishes a flush/fsync, it has to: + * 1. wake up all the threads that were waiting in the group. + * 2. wake up one member of the next group of waiting threads (the + * nextFsyncWaiters) so that thread can become the new leader + * and issue the next flush/fysnc call. + * + * If a non-leader member of the nextFsyncWaiters times out, it will issue + * its own flush/fsync anyway, in case something happened to the leader. + * + * @param fsyncRequired true if fsync is required + * @throws DatabaseException + */ + void flushAndSync(boolean fsyncRequired) + throws DatabaseException { + + long interval; + boolean doWork = false; + boolean isLeader = false; + boolean needToWait = false; + FSyncGroup inProgressGroup = null; + FSyncGroup myGroup = null; + + synchronized (mgrMutex) { + nRequests.increment(); + if (fsyncRequired) { + nFSyncRequests.increment(); + } + myGroup = nextFSyncWaiters; + myGroup.setDoFsync(fsyncRequired); + + /* Figure out if we're calling fsync or waiting. */ + if (workInProgress) { + needToWait = true; + numNextWaiters++; + if (grpWaitOn && numNextWaiters == 1) { + startNextWait = System.nanoTime(); + } + } else { + isLeader = true; + doWork = true; + workInProgress = true; + if (grpWaitOn) { + if (numNextWaiters < grpcThreshold) { + interval = System.nanoTime() - startNextWait; + if (interval < grpcInterval) { + try { + nWaits.increment(); + mgrMutex.wait(interval/1000000, + (int) interval%1000000); + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + envImpl, + "Unexpected interrupt while " + + "waiting for write or fsync", + e); + } + } + nTimeExceeded.increment(); + } else { + nWaitersExceeded.increment(); + } + } + inProgressGroup = nextFSyncWaiters; + nextFSyncWaiters = new FSyncGroup(timeout, envImpl); + numNextWaiters = 0; + } + } + + if (needToWait) { + + /* + * Note that there's no problem if we miss the notify on this set + * of waiters. We can check state in the FSyncGroup before we begin + * to wait. + * + * All members of the group may return from their waitForFSync() + * call with the need to do a fsync, because of timeout. Only one + * will return as the leader. + */ + int waitStatus = myGroup.waitForEvent(); + + if (waitStatus == FSyncGroup.DO_LEADER_FSYNC) { + synchronized (mgrMutex) { + + /* + * Check if there's a fsync in progress; this might happen + * even if you were designated the leader if a new thread + * came in between the point when the old leader woke you + * up and now. This new thread may have found that there + * was no fsync in progress, and may have started a fsync. + */ + if (workInProgress) { + + /* + * Ensure that an fsync is done before returning by + * forcing an fsync in this thread. [#20717] + */ + doWork = true; + } else { + isLeader = true; + doWork = true; + workInProgress = true; + + if (grpWaitOn) { + if (numNextWaiters < grpcThreshold) { + interval = System.nanoTime() - startNextWait; + if (interval < grpcInterval) { + try { + nWaits.increment(); + mgrMutex.wait(interval/1000000, + (int) interval%1000000); + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + envImpl, + "Unexpected interrupt while " + + "waiting for write or fsync", + e); + } + } + nTimeExceeded.increment(); + } else { + nWaitersExceeded.increment(); + } + } + inProgressGroup = myGroup; + nextFSyncWaiters = new FSyncGroup(timeout, envImpl); + numNextWaiters = 0; + } + } + } else if (waitStatus == FSyncGroup.DO_TIMEOUT_FSYNC) { + doWork = true; + synchronized (mgrMutex) { + nTimeouts.increment(); + } + } + } + + if (doWork) { + + /* + * There are 3 ways that this fsync gets called: + * + * 1. A thread calls sync and there is not a sync call already in + * progress. That thread executes fsync for itself only. Other + * threads requesting sync form a group of waiters. + * + * 2. A sync finishes and wakes up a group of waiters. The first + * waiter in the group to wake up becomes the leader. It executes + * sync for it's group of waiters. As above, other threads + * requesting sync form a new group of waiters. + * + * 3. If members of a group of waiters have timed out, they'll all + * just go and do their own sync for themselves. + */ + + /* flush the log buffer */ + if (myGroup.getDoFsync()) { + envImpl.getLogManager().flushBeforeSync(); + } else { + envImpl.getLogManager().flushNoSync(); + } + + TestHookExecute.doHookIfSet(flushHook); + + /* execute fsync */ + if (myGroup.getDoFsync()) { + executeFSync(); + nFSyncs.increment(); + } + + synchronized (mgrMutex) { + if (isLeader) { + + /* + * Wake up the group that requested the fsync before you + * started. They've piggybacked off your fsync. + */ + inProgressGroup.wakeupAll(); + + /* + * Wake up a single waiter, who will become the next + * leader. + */ + nextFSyncWaiters.wakeupOne(); + workInProgress = false; + } + } + } + } + + /* + * Stats. + */ + long getNFSyncRequests() { + return nFSyncRequests.get(); + } + + long getNFSyncs() { + return nFSyncs.get(); + } + + long getNTimeouts() { + return nTimeouts.get(); + } + + StatGroup loadStats(StatsConfig config) { + return stats.cloneGroup(config.getClear()); + } + + /** + * Put the fsync execution into this method so it can be overridden for + * testing purposes. + */ + protected void executeFSync() + throws DatabaseException { + + envImpl.getFileManager().syncLogEnd(); + } + + /* For unit testing only. */ + public void setFlushLogHook(TestHook hook) { + flushHook = hook; + } + + /* + * Embodies a group of threads waiting for a common fsync. Note that + * there's no collection here; group membership is merely that the threads + * are all waiting on the same monitor. + */ + static class FSyncGroup { + static int DO_TIMEOUT_FSYNC = 0; + static int DO_LEADER_FSYNC = 1; + static int NO_FSYNC_NEEDED = 2; + + private volatile boolean doFsync = false; + private volatile boolean workDone; + private final long fsyncTimeout; + private boolean leaderExists; + private final EnvironmentImpl envImpl; + + FSyncGroup(long fsyncTimeout, EnvironmentImpl envImpl) { + this.fsyncTimeout = fsyncTimeout; + workDone = false; + leaderExists = false; + this.envImpl = envImpl; + } + + synchronized boolean getLeader() { + if (workDone) { + return false; + } else { + if (leaderExists) { + return false; + } else { + leaderExists = true; + return true; + } + } + } + + /** + * Wait for either a turn to execute a fsync, or to find out that a + * fsync was done on your behalf. + * + * @return true if the fsync wasn't done, and this thread needs to + * execute a fsync when it wakes up. This may be true because it's the + * leader of its group, or because the wait timed out. + */ + synchronized int waitForEvent() + throws ThreadInterruptedException { + + int status = NO_FSYNC_NEEDED; + + if (!workDone) { + long startTime = System.currentTimeMillis(); + while (true) { + + try { + wait(fsyncTimeout); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, + "Unexpected interrupt while waiting "+ + "for write or fsync", e); + } + + /* + * This thread was awoken either by a timeout, by a notify, + * or by an interrupt. Is the fsync done? + */ + if (workDone) { + /* The fsync we're waiting on is done, leave. */ + status = NO_FSYNC_NEEDED; + break; + } else { + + /* + * The fsync is not done -- were we woken up to become + * the leader? + */ + if (!leaderExists) { + leaderExists = true; + status = DO_LEADER_FSYNC; + break; + } else { + + /* + * We're just a waiter. See if we're timed out or + * have more to wait. + */ + long now = System.currentTimeMillis(); + if ((now - startTime) > fsyncTimeout) { + /* we timed out. */ + status = DO_TIMEOUT_FSYNC; + break; + } + } + } + } + } + + return status; + } + + synchronized void setDoFsync(boolean doSync) { + this.doFsync |= doSync; + } + + synchronized boolean getDoFsync() { + return doFsync; + } + + synchronized void wakeupAll() { + workDone = true; + notifyAll(); + } + + synchronized void wakeupOne() { + /* FindBugs whines here. */ + notify(); + } + } +} diff --git a/src/com/sleepycat/je/log/FileCacheWarmer.java b/src/com/sleepycat/je/log/FileCacheWarmer.java new file mode 100644 index 0000000..bc0ba65 --- /dev/null +++ b/src/com/sleepycat/je/log/FileCacheWarmer.java @@ -0,0 +1,168 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.RandomAccessFile; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Warm-up the file system cache during startup, for some portion of the log + * that is not being read by recovery. + * + * See EnvironmentConfig.LOG_FILE_WARM_UP_SIZE (until we publish it, this is + * in EnvironmentParams). + */ +public class FileCacheWarmer extends Thread { + + private final EnvironmentImpl envImpl; + private final long recoveryStartLsn; + private final long endOfLogLsn; + private final int warmUpSize; + private final int bufSize; + private volatile boolean stop; + + FileCacheWarmer(final EnvironmentImpl envImpl, + final long recoveryStartLsn, + final long endOfLogLsn, + final int warmUpSize, + final int bufSize) { + this.envImpl = envImpl; + this.recoveryStartLsn = recoveryStartLsn; + this.endOfLogLsn = endOfLogLsn; + this.warmUpSize = warmUpSize; + this.bufSize = bufSize; + stop = false; + } + + /** + * Stops this thread. At most one read will occur after calling this + * method, and then the thread will exit. + */ + void shutdown() { + stop = true; + } + + @Override + public void run() { + try { + doRun(); + } catch (Throwable e) { + + /* + * Log error as SEVERE but do not invalidate environment since it + * perfectly usable. + */ + LoggerUtils.traceAndLogException( + envImpl, FileCacheWarmer.class.getName(), "run", + "Unable to warm file system cache due to exception", e); + + } finally { + /* Ensure that this thread can be GC'd after it stops. */ + envImpl.getFileManager().clearFileCacheWarmer(); + } + } + + private void doRun() + throws Throwable { + + final FileManager fm = envImpl.getFileManager(); + + final long ONE_MB = 1L << 20; + + long remaining = (warmUpSize * ONE_MB) - + DbLsn.getTrueDistance(recoveryStartLsn, endOfLogLsn, fm); + + if (remaining <= 0) { + return; + } + + // System.out.println("FileCacheWarmer start " + remaining); + + final byte[] buf = new byte[bufSize]; + + long fileNum = DbLsn.getFileNumber(recoveryStartLsn); + long fileOff = DbLsn.getFileOffset(recoveryStartLsn); + + String filePath = fm.getFullFileName(fileNum); + File file = new File(filePath); + RandomAccessFile raf = null; + + try { + raf = new RandomAccessFile(file, "r"); + + while (!stop && remaining > 0) { + + if (fileOff <= 0) { + raf.close(); + raf = null; + + while (!stop) { + final Long nextFileNum = fm.getFollowingFileNum( + fileNum, false /*forward*/); + + if (nextFileNum == null) { + return; + } + + fileNum = nextFileNum; + filePath = fm.getFullFileName(fileNum); + file = new File(filePath); + try { + raf = new RandomAccessFile(file, "r"); + } catch (FileNotFoundException e) { + continue; + } + fileOff = raf.length(); + break; + } + } + + final long pos = Math.max(0L, fileOff - bufSize); + raf.seek(pos); + + final int bytes = (int) (fileOff - pos); + final int read = raf.read(buf, 0, bytes); + + if (read != bytes) { + throw new IllegalStateException( + "Requested " + bytes + " bytes but read " + read); + } + + remaining -= bytes; + fileOff = pos; + } + + raf.close(); + raf = null; + + } finally { + + // System.out.println( + // "FileCacheWarmer finish " + remaining + " " + stop); + + if (raf != null) { + try { + raf.close(); + } catch (Exception e) { + /* Ignore this. Another exception is in flight. */ + } + } + } + } +} diff --git a/src/com/sleepycat/je/log/FileDeletionDetector.java b/src/com/sleepycat/je/log/FileDeletionDetector.java new file mode 100644 index 0000000..3e89832 --- /dev/null +++ b/src/com/sleepycat/je/log/FileDeletionDetector.java @@ -0,0 +1,322 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static java.nio.file.StandardWatchEventKinds.ENTRY_DELETE; +import static java.nio.file.StandardWatchEventKinds.OVERFLOW; + +import java.io.File; +import java.io.IOException; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.nio.file.WatchEvent; +import java.nio.file.WatchKey; +import java.nio.file.WatchService; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.Timer; +import java.util.TimerTask; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.StoppableThread; +import com.sun.nio.file.SensitivityWatchEventModifier; + +class FileDeletionDetector { + + private final EnvironmentImpl envImpl; + + /* + * Store the files deleted by JE. It is used to detect unexpected file + * deletion, e.g. log file wrongly deleted by user. + */ + private final Set filesDeletedByJE; + + /* WatchService to monitor directory change. */ + private final WatchService fileDeletionWatcher; + + /* Timer to periodically detect unexpected log file deletion. */ + private final Timer fileDeletionTimer; + + /* TimerTask to be executed by Timer to detect log file deletion. */ + private final FileDeleteDetectTask fileDeletionTask; + + /* + * Used to check whether "mv" action happens. + * + * If we register "folder" to WatchService, and if we execute "mv folder + * folder.new", the WatchService can not detect this action. So we check + * whether the corresponding dirs still exist to determine whether "mv" + * action happens. + */ + private Map fileDeletionWatchKeys; + + FileDeletionDetector(final File dbEnvHome, + final File[] dbEnvDataDirs, + final EnvironmentImpl envImpl) { + this.envImpl = envImpl; + + filesDeletedByJE = new HashSet<>(); + fileDeletionWatchKeys = new HashMap<>(); + + /* + * Create the WatchService which monitors the root env + * home or the sub-directories, such as data00N. + */ + try { + fileDeletionWatcher = + FileSystems.getDefault().newWatchService(); + + /* + * Register the root env home or the sub-directories. + * + * If there exist sub-directories, then only sub- + * directories contain .jdb files, so we only register + * sub-directories. Otherwise, we only register the + * root env home. + * + * Here, we do not use Files.walkFileTree( + * Path, FileVisitor). This is because this method will + * check every entry(file or directory) under the root + * path. The following scenario may happen( + * com.sleepycat.je.MultiProcessWriteTest. + * testMultiEnvWrite): + * 1. One thread is closing env + * 2. The second thread wants to register the env home dir + * 3. In the second thread, Files.walkFileTree() finds + * that je.info.0.lck, which is actually used by + * the first thread + * 4. When Files.walkFileTree() wants to further check + * the attribute of je.info.0.lck, je.info.0.lck can + * not be found because the first thread have closed + * the env. + * + * In a word, Files.walkFileTree may check more un-related + * files to cause unexpected Exception. + * + * On AIX, the deletion detect has an about 4 + * seconds delay, i.e. there is a 4 seconds delay from + * file deletion to deletion detect. We use + * SensitivityWatchEventModifier.HIGH to resolve this. + * + * In addition, when directly deleting a directory which + * contains some files on AIX, WatchService can only + * detect directory-deletion event, but ignore the + * file-deletion event. So for AIX, if a directory is + * unexpected deleted, the current method can not handle + * it. + */ + if (dbEnvDataDirs != null) { + for (File f : dbEnvDataDirs) { + final WatchKey key = f.toPath().register( + fileDeletionWatcher, + new WatchEvent.Kind[]{ENTRY_DELETE}, + SensitivityWatchEventModifier.HIGH); + fileDeletionWatchKeys.put(key, f); + } + } else { + final WatchKey key = dbEnvHome.toPath().register( + fileDeletionWatcher, + new WatchEvent.Kind[]{ENTRY_DELETE}, + SensitivityWatchEventModifier.HIGH); + fileDeletionWatchKeys.put(key, dbEnvHome); + } + } catch (IOException ie) { + throw new EnvironmentFailureException( + envImpl, + EnvironmentFailureReason.UNEXPECTED_EXCEPTION, + "Can not register " + dbEnvHome.toString() + + " or its sub-directories to WatchService.", + ie); + } + + DbConfigManager configManager = envImpl.getConfigManager(); + final int interval = configManager.getDuration( + EnvironmentParams.LOG_DETECT_FILE_DELETE_INTERVAL); + /* Periodically detect unexpected log file deletion. */ + fileDeletionTimer = new Timer( + envImpl.makeDaemonThreadName( + Environment.FILE_DELETION_DETECTOR_NAME), + true); + fileDeletionTask = new FileDeleteDetectTask(); + fileDeletionTimer.schedule(fileDeletionTask, 0, interval); + } + + private class FileDeleteDetectTask extends TimerTask { + public void run() { + boolean success = false; + try { + processLogFileDeleteEvents(); + success = true; + } catch (EnvironmentFailureException e) { + /* + * Log file is deleted unexpectedly. We have already + * invalidated the environment. Here we just close + * the Timer, TimerTask and the WatchService. Do this + * in finally block. + */ + } catch (Exception e) { + /* + * It is possible that processLogFileDeleteEvents is doing + * something on WatchService when FileManager.close call + * WatchService.close. Then processLogFileDeleteEvents may + * throw Exception, such as IOException or + * java.nio.file.ClosedWatchServiceException. For this + * situation, we just ignore the Exception and close + * the Timer, TimerTask and the WatchService in finally + * block. The close may be redundant, but for these close + * method, the second and subsequent calls have no effect. + * + * If the Exception is not caused by the WatchService.close + * in FileManager.close, then some ugly things happen. We + * should handle the Exception by method which is similar + * to StoppableThread.handleUncaughtException. + * + * !envImpl.isClosing represents that FileManager is not + * closing. requestShutdownDaemons sets envImpl.isClosing + * to be true and requestShutdownDaemons happens before + * FileManager.close. + * + * envImpl.isValid represents that the current env is valid. + * If the evn is invalid, then some other place has already + * invalidate the env, so here we do not need to invalidate + * the env again. + */ + if (envImpl.isValid() && !envImpl.isClosing()) { + handleUnexpectedThrowable(Thread.currentThread(), e); + } + } catch (Error e) { + /* + * Some ugly things happen. + */ + handleUnexpectedThrowable(Thread.currentThread(), e); + } finally { + if (!success) { + try { + close(); + } catch (IOException ie) { + handleUnexpectedThrowable(Thread.currentThread(), ie); + } + } + } + } + } + + private void handleUnexpectedThrowable(Thread t, Throwable e) { + StoppableThread.handleUncaughtException( + envImpl.getLogger(), envImpl, t, e); + } + + /* + * Detect unexpected log file deletion. + */ + private void processLogFileDeleteEvents() throws Exception{ + /* + * We may register multi-directories to the WatchService, + * there may be multi WatchKey for this WatchService. It is + * possible that file deletion happen in these multi directories + * simultaneously, i.e. multi WatchKey may be non-null. + * We should handle them all. + */ + while (true) { + final WatchKey key = fileDeletionWatcher.poll(); + if (key == null) { + /* + * If no event is detected, we check whether the directories + * corresponding to the WatchKeys still exist. + */ + for (final File file : fileDeletionWatchKeys.values()) { + if (!file.exists()) { + final String dir = file.getCanonicalPath(); + throw new IOException( + "Directory " + dir + " does not exist now. " + + "Something abnormal may happen."); + } + } + break; + } + + for (final WatchEvent event: key.pollEvents()) { + final WatchEvent.Kind kind = event.kind(); + if (kind == OVERFLOW) { + continue; + } + + /* Get the file name from the context. */ + final WatchEvent ev = cast(event); + final String fileName = ev.context().toString(); + if (fileName.endsWith(FileManager.JE_SUFFIX)) { + synchronized (filesDeletedByJE) { + if (!filesDeletedByJE.contains(fileName)) { + /* TimerTask.run will handle this exception. */ + throw new EnvironmentFailureException( + envImpl, + EnvironmentFailureReason. + LOG_UNEXPECTED_FILE_DELETION, + "Log file " + fileName + + " was deleted unexpectedly."); + } + filesDeletedByJE.remove(fileName); + } + } + } + + if (!key.reset()) { + /* + * If key.reset returns false, it indicates that the key + * is no longer valid. Invalid state happens when one of + * the following events occurs: + * 1. The process explicitly cancels the key by using the + * cancel method. --- The JE code never does this. + * 2. The directory becomes inaccessible. -- This indicates + * an abnormal situation. + * 3. The watch service is closed. -- The close may be + * expected, e.g. caused by WatchService.close. The + * exception caused by this can be handled in + * FileDeleteDetectTask.run. + */ + final String dir = + fileDeletionWatchKeys.get(key).getCanonicalPath(); + throw new IOException( + "Watch Key corresponding to " + dir + " return false " + + "when reset. Something abnormal may happen."); + } + } + } + + void addDeletedFile(String fileName) { + synchronized (filesDeletedByJE) { + filesDeletedByJE.add(fileName); + } + } + + public void close() throws IOException { + fileDeletionTask.cancel(); + fileDeletionTimer.cancel(); + synchronized(fileDeletionWatcher) { + fileDeletionWatcher.close(); + } + } + + @SuppressWarnings("unchecked") + WatchEvent cast(WatchEvent event) { + return (WatchEvent)event; + } +} diff --git a/src/com/sleepycat/je/log/FileHandle.java b/src/com/sleepycat/je/log/FileHandle.java new file mode 100644 index 0000000..60d5d90 --- /dev/null +++ b/src/com/sleepycat/je/log/FileHandle.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.IOException; +import java.io.RandomAccessFile; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; + +/** + * A FileHandle embodies a File and its accompanying latch. + */ +public class FileHandle { + private RandomAccessFile file; + private Latch fileLatch; + private int logVersion; + private long fileNum; + + /** + * Creates a new handle but does not initialize it. The init method must + * be called before using the handle to access the file. + */ + FileHandle(EnvironmentImpl envImpl, long fileNum, String label) { + fileLatch = LatchFactory.createExclusiveLatch( + envImpl, "file_" + label + "_fileHandle", false /*collectStats*/); + this.fileNum = fileNum; + } + + /** + * Initializes the handle after opening the file and reading the header. + */ + void init(RandomAccessFile file, int logVersion) { + assert this.file == null; + this.file = file; + this.logVersion = logVersion; + } + + RandomAccessFile getFile() { + return file; + } + + long getFileNum() { + return fileNum; + } + + public int getLogVersion() { + return logVersion; + } + + boolean isOldHeaderVersion() { + return logVersion < LogEntryType.LOG_VERSION; + } + + void latch() + throws DatabaseException { + + fileLatch.acquireExclusive(); + } + + boolean latchNoWait() + throws DatabaseException { + + return fileLatch.acquireExclusiveNoWait(); + } + + public void release() + throws DatabaseException { + + fileLatch.release(); + } + + void close() + throws IOException { + + if (file != null) { + try { + file.close(); + } finally { + file = null; + } + } + } +} diff --git a/src/com/sleepycat/je/log/FileHandleSource.java b/src/com/sleepycat/je/log/FileHandleSource.java new file mode 100644 index 0000000..2e1bc9d --- /dev/null +++ b/src/com/sleepycat/je/log/FileHandleSource.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.DatabaseException; + +/** + * FileHandleSource is a file source built on top of a cached file handle. + */ +class FileHandleSource extends FileSource { + + private FileHandle fileHandle; + + FileHandleSource(FileHandle fileHandle, + int readBufferSize, + FileManager fileManager) { + super(fileHandle.getFile(), readBufferSize, fileManager, + fileHandle.getFileNum(), fileHandle.getLogVersion()); + this.fileHandle = fileHandle; + } + + /** + * @see LogSource#release + */ + @Override + public void release() + throws DatabaseException { + + fileHandle.release(); + } + + public int getLogVersion() { + return fileHandle.getLogVersion(); + } +} diff --git a/src/com/sleepycat/je/log/FileHeader.java b/src/com/sleepycat/je/log/FileHeader.java new file mode 100644 index 0000000..20fc44f --- /dev/null +++ b/src/com/sleepycat/je/log/FileHeader.java @@ -0,0 +1,188 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.Calendar; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.VersionMismatchException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.Timestamp; + +/** + * A FileHeader embodies the header information at the beginning of each log + * file. + */ +public class FileHeader implements Loggable { + + /* + * fileNum is the number of file, starting at 0. An unsigned int, so stored + * in a long in memory, but in 4 bytes on disk + */ + private long fileNum; + private long lastEntryInPrevFileOffset; + private Timestamp time; + private int logVersion; + + FileHeader(long fileNum, long lastEntryInPrevFileOffset) { + this.fileNum = fileNum; + this.lastEntryInPrevFileOffset = lastEntryInPrevFileOffset; + Calendar now = Calendar.getInstance(); + time = new Timestamp(now.getTimeInMillis()); + logVersion = LogEntryType.LOG_VERSION; + } + + /** + * For logging only. + */ + public FileHeader() { + } + + public int getLogVersion() { + return logVersion; + } + + /** + * @return file header log version. + */ + int validate(EnvironmentImpl envImpl, + String fileName, + long expectedFileNum) + throws DatabaseException { + + if (logVersion > LogEntryType.LOG_VERSION) { + throw new VersionMismatchException + (envImpl, + "Expected log version " + LogEntryType.LOG_VERSION + + " or earlier but found " + logVersion); + } + + if (fileNum != expectedFileNum) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "Wrong filenum in header for file " + + fileName + " expected " + + expectedFileNum + " got " + fileNum); + } + + return logVersion; + } + + /** + * @return the offset of the last entry in the previous file. + */ + long getLastEntryInPrevFileOffset() { + return lastEntryInPrevFileOffset; + } + + /* + * Logging support + */ + + /** + * A header is always a known size. + */ + public static int entrySize() { + return + LogUtils.LONG_BYTES + // time + LogUtils.UNSIGNED_INT_BYTES + // file number + LogUtils.LONG_BYTES + // lastEntryInPrevFileOffset + LogUtils.INT_BYTES; // logVersion + } + + /** + * @see Loggable#getLogSize + * @return number of bytes used to store this object + */ + public int getLogSize() { + return entrySize(); + } + + /** + * @see Loggable#writeToLog + * Serialize this object into the buffer. Update cksum with all + * the bytes used by this object + * @param logBuffer is the destination buffer + */ + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeLong(logBuffer, time.getTime()); + LogUtils.writeUnsignedInt(logBuffer, fileNum); + LogUtils.writeLong(logBuffer, lastEntryInPrevFileOffset); + LogUtils.writeInt(logBuffer, logVersion); + } + + /** + * @see Loggable#readFromLog + * Initialize this object from the data in itemBuf. + * @param itemBuf the source buffer + */ + public void readFromLog(ByteBuffer logBuffer, int unusableEntryVersion) { + + /* Timestamp is always unpacked. */ + time = LogUtils.readTimestamp(logBuffer, true/*unpacked*/); + fileNum = LogUtils.readUnsignedInt(logBuffer); + lastEntryInPrevFileOffset = LogUtils.readLong(logBuffer); + logVersion = LogUtils.readInt(logBuffer); + + /* + * The log version is unknown until reading it. If there are + * version-specific fields in this entry, they must follow the log + * version and use it, not the entryVersion param, for conditionals. + */ + } + + /** + * @see Loggable#dumpLog + * @param sb destination string buffer + * @param verbose if true, dump the full, verbose version + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + /** + * Print in xml format + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpLog(sb, true); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/log/FileManager.java b/src/com/sleepycat/je/log/FileManager.java new file mode 100644 index 0000000..bc8c7f3 --- /dev/null +++ b/src/com/sleepycat/je/log/FileManager.java @@ -0,0 +1,3321 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_READ_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FILE_OPENS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_LOG_FSYNCS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_OPEN_FILES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READ_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITE_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_READS_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READS; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READ_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITE_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES; +import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITES_FROM_WRITEQUEUE; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_FSYNC_MAX_TIME; +import static com.sleepycat.je.log.LogStatDefinition.GRPCMGR_FSYNC_TIME; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.channels.OverlappingFileLockException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.locks.ReentrantLock; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.LogWriteException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.FileHeaderEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.HexFormatter; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongMaxZeroStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.RelatchRequiredException; +import com.sleepycat.je.utilint.StatGroup; + +/** + * The FileManager presents the abstraction of one contiguous file. It doles + * out LSNs. + */ +public class FileManager { + + public enum FileMode { + READ_MODE("r", false), + READWRITE_MODE("rw", true), + READWRITE_ODSYNC_MODE("rwd", true), + READWRITE_OSYNC_MODE("rws", true); + + private String fileModeValue; + private boolean isWritable; + + private FileMode(String fileModeValue, boolean isWritable) { + this.fileModeValue = fileModeValue; + this.isWritable = isWritable; + } + + public String getModeValue() { + return fileModeValue; + } + + public boolean isWritable() { + return isWritable; + } + } + + private static final boolean DEBUG = false; + + /* + * The number of writes that have been performed. + * + * public so that unit tests can diddle them. + */ + public static long WRITE_COUNT = 0; + + /* + * The write count value where we should stop or throw. + */ + public static long STOP_ON_WRITE_COUNT = Long.MAX_VALUE; + + /* + * If we're throwing, then throw on write #'s WRITE_COUNT through + * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive). + */ + public static long N_BAD_WRITES = Long.MAX_VALUE; + + /* + * If true, then throw an IOException on write #'s WRITE_COUNT through + * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive). + */ + public static boolean THROW_ON_WRITE = false; + + public static final String JE_SUFFIX = ".jdb"; // regular log files + public static final String DEL_SUFFIX = ".del"; // cleaned files + public static final String BAD_SUFFIX = ".bad"; // corrupt files + private static final String LOCK_FILE = "je.lck";// lock file + static final String[] DEL_SUFFIXES = { DEL_SUFFIX }; + static final String[] JE_SUFFIXES = { JE_SUFFIX }; + private static final String[] JE_AND_DEL_SUFFIXES = + { JE_SUFFIX, DEL_SUFFIX }; + + /* + * The suffix used to denote a file that is in the process of being + * transferred during a network backup. The file may not have been + * completely transferred, or its digest verified. + */ + public static final String TMP_SUFFIX = ".tmp"; + + /* + * The suffix used to rename files out of the way, if they are being + * retained during a backup. Note that the suffix is used in conjunction + * with a backup number as described in NetworkBackup + */ + public static final String BUP_SUFFIX = ".bup"; + + /* May be set to false to speed unit tests. */ + private boolean syncAtFileEnd = true; + + private final EnvironmentImpl envImpl; + private final long maxFileSize; + private final File dbEnvHome; + private final File[] dbEnvDataDirs; + + /* True if .del files should be included in the list of log files. */ + private boolean includeDeletedFiles = false; + + /* File cache */ + private final FileCache fileCache; + + private FileCacheWarmer fileCacheWarmer; + + /* The channel and lock for the je.lck file. */ + private RandomAccessFile lockFile; + private FileChannel channel; + private FileLock envLock; + private FileLock exclLock; + + /* True if all files should be opened readonly. */ + private final boolean readOnly; + + /* Handles onto log position */ + private volatile long currentFileNum; // number of the current file + private volatile long nextAvailableLsn; // the next LSN available + private volatile long lastUsedLsn; // last LSN used in the current file + private boolean forceNewFile; // Force new file on next write + + /* endOfLog is used for writes and fsyncs to the end of the log. */ + private final LogEndFileDescriptor endOfLog; + + /* + * When we bump the LSNs over to a new file, we must remember the last LSN + * of the previous file so we can set the prevOffset field of the file + * header appropriately. We have to save it in a map because there's a time + * lag between when we know what the last LSN is and when we actually do + * the file write, because LSN bumping is done before we get a write + * buffer. This map is keyed by file num->last LSN. + */ + private final Map perFileLastUsedLsn; + + /* + * True if we should use the Write Queue. This queue is enabled by default + * and contains any write() operations which were attempted but would have + * blocked because an fsync() or another write() was in progress at the + * time. The operations on the Write Queue are later executed by the next + * operation that is able to grab the fsync latch. File systems like ext3 + * need this queue in order to achieve reasonable throughput since it + * acquires an exclusive mutex on the inode during any IO operation + * (seek/read/write/fsync). OS's like Windows and Solaris do not since + * they are able to handle concurrent IO operations on a single file. + */ + private final boolean useWriteQueue; + + /* The starting size of the Write Queue. */ + private final int writeQueueSize; + + /* + * Use O_DSYNC to open JE log files. + */ + private final boolean useODSYNC; + + /* public for unit tests. */ + public boolean VERIFY_CHECKSUMS = false; + + /** {@link EnvironmentParams#LOG_FSYNC_TIME_LIMIT}. */ + private final int fSyncTimeLimit; + + /* + * Non-0 means to use envHome/data001 through envHome/data00N for the + * environment directories, where N is nDataDirs. Distribute *.jdb files + * through dataNNN directories round-robin. + */ + private final int nDataDirs; + + /* + * Last file to which any IO was done. + */ + long lastFileNumberTouched = -1; + + /* + * Current file offset of lastFile. + */ + long lastFileTouchedOffset = 0; + + /* + * For IO stats, this is a measure of what is "close enough" to constitute + * a sequential IO vs a random IO. 1MB for now. Generally a seek within a + * few tracks of the current disk track is "fast" and only requires a + * single rotational latency. + */ + private static final long ADJACENT_TRACK_SEEK_DELTA = 1 << 20; + + /* + * Used to detect unexpected file deletion. + */ + private final FileDeletionDetector fdd; + + /* + * Stats + */ + final StatGroup stats; + final LongStat nRandomReads; + final LongStat nRandomWrites; + final LongStat nSequentialReads; + final LongStat nSequentialWrites; + final LongStat nRandomReadBytes; + final LongStat nRandomWriteBytes; + final LongStat nSequentialReadBytes; + final LongStat nSequentialWriteBytes; + final IntStat nFileOpens; + final IntStat nOpenFiles; + final LongStat nBytesReadFromWriteQueue; + final LongStat nBytesWrittenFromWriteQueue; + final LongStat nReadsFromWriteQueue; + final LongStat nWritesFromWriteQueue; + final LongStat nWriteQueueOverflow; + final LongStat nWriteQueueOverflowFailures; + /* all fsyncs, includes those issued for group commit */ + final LongStat nLogFSyncs; + final LongStat nFSyncTime; + final LongMaxZeroStat nFSyncMaxTime; + + /** + * Set up the file cache and initialize the file manager to point to the + * beginning of the log. + * + * @param dbEnvHome environment home directory + * + * @throws IllegalArgumentException via Environment ctor + * + * @throws EnvironmentLockedException via Environment ctor + */ + public FileManager(EnvironmentImpl envImpl, + File dbEnvHome, + boolean readOnly) + throws EnvironmentLockedException { + + this.envImpl = envImpl; + this.dbEnvHome = dbEnvHome; + this.readOnly = readOnly; + + boolean success = false; + + stats = new StatGroup(LogStatDefinition.FILEMGR_GROUP_NAME, + LogStatDefinition.FILEMGR_GROUP_DESC); + nRandomReads = new LongStat(stats, FILEMGR_RANDOM_READS); + nRandomWrites = new LongStat(stats, FILEMGR_RANDOM_WRITES); + nSequentialReads = new LongStat(stats, FILEMGR_SEQUENTIAL_READS); + nSequentialWrites = new LongStat(stats, FILEMGR_SEQUENTIAL_WRITES); + nRandomReadBytes = new LongStat(stats, FILEMGR_RANDOM_READ_BYTES); + nRandomWriteBytes = new LongStat(stats, FILEMGR_RANDOM_WRITE_BYTES); + nSequentialReadBytes = + new LongStat(stats, FILEMGR_SEQUENTIAL_READ_BYTES); + nSequentialWriteBytes = + new LongStat(stats, FILEMGR_SEQUENTIAL_WRITE_BYTES); + nFileOpens = new IntStat(stats, FILEMGR_FILE_OPENS); + nOpenFiles = new IntStat(stats, FILEMGR_OPEN_FILES); + nBytesReadFromWriteQueue = + new LongStat(stats, FILEMGR_BYTES_READ_FROM_WRITEQUEUE); + nBytesWrittenFromWriteQueue = + new LongStat(stats, FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE); + nReadsFromWriteQueue = + new LongStat(stats, FILEMGR_READS_FROM_WRITEQUEUE); + nWritesFromWriteQueue = + new LongStat(stats, FILEMGR_WRITES_FROM_WRITEQUEUE); + nWriteQueueOverflow = new LongStat(stats, FILEMGR_WRITEQUEUE_OVERFLOW); + nWriteQueueOverflowFailures = + new LongStat(stats, FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES); + nLogFSyncs = new LongStat(stats, FILEMGR_LOG_FSYNCS); + nFSyncTime = new LongStat(stats, GRPCMGR_FSYNC_TIME); + nFSyncMaxTime = new LongMaxZeroStat(stats, GRPCMGR_FSYNC_MAX_TIME); + + try { + /* Read configurations. */ + DbConfigManager configManager = envImpl.getConfigManager(); + maxFileSize = + configManager.getLong(EnvironmentParams.LOG_FILE_MAX); + + useWriteQueue = configManager.getBoolean( + EnvironmentParams.LOG_USE_WRITE_QUEUE); + + writeQueueSize = configManager.getInt( + EnvironmentParams.LOG_WRITE_QUEUE_SIZE); + + useODSYNC = configManager.getBoolean( + EnvironmentParams.LOG_USE_ODSYNC); + + VERIFY_CHECKSUMS = configManager.getBoolean( + EnvironmentParams.LOG_VERIFY_CHECKSUMS); + + fSyncTimeLimit = configManager.getDuration( + EnvironmentParams.LOG_FSYNC_TIME_LIMIT); + + nDataDirs = configManager.getInt( + EnvironmentParams.LOG_N_DATA_DIRECTORIES); + + if (nDataDirs != 0) { + dbEnvDataDirs = gatherDataDirs(); + } else { + checkNoDataDirs(); + dbEnvDataDirs = null; + } + + if (!envImpl.isMemOnly()) { + if (!dbEnvHome.exists()) { + throw new IllegalArgumentException + ("Environment home " + dbEnvHome + " doesn't exist"); + } + + /* + * If this is an arbiter take an exclusive lock. + */ + boolean isReadOnly = envImpl.isArbiter() ? false : readOnly; + if (!lockEnvironment(isReadOnly, false)) { + throw new EnvironmentLockedException + (envImpl, + "The environment cannot be locked for " + + (isReadOnly ? "shared" : "single writer") + + " access."); + } + } + + /* Cache of files. */ + fileCache = new FileCache(configManager); + + /* Start out as if no log existed. */ + currentFileNum = 0L; + nextAvailableLsn = + DbLsn.makeLsn(currentFileNum, firstLogEntryOffset()); + lastUsedLsn = DbLsn.NULL_LSN; + perFileLastUsedLsn = + Collections.synchronizedMap(new HashMap()); + endOfLog = new LogEndFileDescriptor(); + forceNewFile = false; + + final String stopOnWriteCountName = "je.debug.stopOnWriteCount"; + final String stopOnWriteCountProp = + System.getProperty(stopOnWriteCountName); + if (stopOnWriteCountProp != null) { + try { + STOP_ON_WRITE_COUNT = Long.parseLong(stopOnWriteCountProp); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + ("Could not parse: " + stopOnWriteCountName, e); + } + } + + final String stopOnWriteActionName = "je.debug.stopOnWriteAction"; + final String stopOnWriteActionProp = + System.getProperty(stopOnWriteActionName); + if (stopOnWriteActionProp != null) { + if (stopOnWriteActionProp.compareToIgnoreCase("throw") == 0) { + THROW_ON_WRITE = true; + } else if (stopOnWriteActionProp. + compareToIgnoreCase("stop") == 0) { + THROW_ON_WRITE = false; + } else { + throw new IllegalArgumentException + ("Unknown value for: " + stopOnWriteActionName + + stopOnWriteActionProp); + } + } + + final Boolean logFileDeleteDetect = configManager.getBoolean( + EnvironmentParams.LOG_DETECT_FILE_DELETE); + if (!envImpl.isMemOnly() && logFileDeleteDetect) { + fdd = new FileDeletionDetector( + dbEnvHome, dbEnvDataDirs, envImpl); + } else { + fdd = null; + } + + success = true; + } finally { + if (!success) { + try { + close(); + } catch (IOException e) { + /* + * Klockwork - ok + * Eat it, we want to throw the original exception. + */ + } + } + } + } + + /** + * Set the file manager's "end of log". + * + * @param nextAvailableLsn LSN to be used for the next log entry + * @param lastUsedLsn last LSN to have a valid entry, may be null + * @param prevOffset value to use for the prevOffset of the next entry. + * If the beginning of the file, this is 0. + */ + public void setLastPosition(long nextAvailableLsn, + long lastUsedLsn, + long prevOffset) { + this.lastUsedLsn = lastUsedLsn; + perFileLastUsedLsn.put(DbLsn.getFileNumber(lastUsedLsn), lastUsedLsn); + this.nextAvailableLsn = nextAvailableLsn; + currentFileNum = DbLsn.getFileNumber(this.nextAvailableLsn); + } + + /** + * May be used to disable sync at file end to speed unit tests. + * Must only be used for unit testing, since log corruption may result. + */ + public void setSyncAtFileEnd(boolean sync) { + syncAtFileEnd = sync; + } + + /* + * File management + */ + + /** + * public for cleaner. + * + * @return the number of the first file in this environment. + */ + public Long getFirstFileNum() { + return getFileNum(true); + } + + public boolean getReadOnly() { + return readOnly; + } + + /** + * @return the number of the last file in this environment. + */ + public Long getLastFileNum() { + return getFileNum(false); + } + + /** + * Returns the highest (current) file number. Note that this is + * unsynchronized, so if it is called outside the log write latch it is + * only valid as an approximation. + */ + public long getCurrentFileNum() { + return currentFileNum; + } + + /** + * For unit tests. + */ + boolean getUseWriteQueue() { + return useWriteQueue; + } + + /** + * For assertions that check whether a file is valid or has been deleted + * via log cleaning. + */ + public boolean isFileValid(long fileNum) { + + /* + * If the file is the current file, it may be buffered and not yet + * created. If the env is memory-only, we will never create or delete + * log files. + */ + if (fileNum == currentFileNum || envImpl.isMemOnly()) { + return true; + } + + /* Check for file existence. */ + String fileName = getFullFileName(fileNum, FileManager.JE_SUFFIX); + File file = new File(fileName); + return file.exists(); + } + + public void setIncludeDeletedFiles(boolean includeDeletedFiles) { + this.includeDeletedFiles = includeDeletedFiles; + } + + /** + * Get all JE file numbers. + * @return an array of all JE file numbers. + */ + public Long[] getAllFileNumbers() { + /* Get all the names in sorted order. */ + String[] names = listFileNames(JE_SUFFIXES); + Long[] nums = new Long[names.length]; + for (int i = 0; i < nums.length; i += 1) { + String name = names[i]; + long num = nums[i] = getNumFromName(name); + if (nDataDirs != 0) { + int dbEnvDataDirsIdx = getDataDirIndexFromName(name) - 1; + if (dbEnvDataDirsIdx != (num % nDataDirs)) { + throw EnvironmentFailureException.unexpectedState + ("Found file " + name + " but it should have been in " + + "data directory " + (dbEnvDataDirsIdx + 1) + + ". Perhaps it was moved or restored incorrectly?"); + } + } + } + return nums; + } + + /** + * Get the next file number before/after currentFileNum. + * @param curFile the file we're at right now. Note that + * it may not exist, if it's been cleaned and renamed. + * @param forward if true, we want the next larger file, if false + * we want the previous file + * @return null if there is no following file, or if filenum doesn't exist + */ + public Long getFollowingFileNum(long curFile, boolean forward) { + + /* + * First try the next/prev file number without listing all files. This + * efficiently supports an important use case: reading files during + * recovery, where there are no gaps due to log cleaning. If there is a + * gap due to log cleaning, fall through and get a list of all files. + */ + final long tryFile; + if (forward) { + if (curFile == Long.MAX_VALUE) { + return null; + } + tryFile = curFile + 1; + } else { + if (curFile <= 0) { + return null; + } + tryFile = curFile - 1; + } + + String tryName = getFullFileName(tryFile, JE_SUFFIX); + if ((new File(tryName)).isFile()) { + return tryFile; + } + + /* Get all the names in sorted order. */ + String[] names = listFileNames(JE_SUFFIXES); + + /* Search for the current file. */ + String searchName = getFileName(curFile, JE_SUFFIX); + int foundIdx = Arrays.binarySearch(names, searchName, stringComparator); + + boolean foundTarget = false; + if (foundIdx >= 0) { + if (forward) { + foundIdx++; + } else { + foundIdx--; + } + } else { + + /* + * currentFileNum not found (might have been cleaned). FoundIdx + * will be (-insertionPoint - 1). + */ + foundIdx = Math.abs(foundIdx + 1); + if (!forward) { + foundIdx--; + } + } + + /* The current fileNum is found, return the next or prev file. */ + if (forward && (foundIdx < names.length)) { + foundTarget = true; + } else if (!forward && (foundIdx > -1)) { + foundTarget = true; + } + + if (foundTarget) { + return getNumFromName(names[foundIdx]); + } + return null; + } + + /** + * @return true if there are any files at all. + */ + public boolean filesExist() { + String[] names = listFileNames(JE_SUFFIXES); + return (names.length != 0); + } + + /** + * Get the first or last file number in the set of JE files. + * + * @param first if true, get the first file, else get the last file + * @return the file number or null if no files exist + */ + private Long getFileNum(boolean first) { + String[] names = listFileNames(JE_SUFFIXES); + if (names.length == 0) { + return null; + } + int index = 0; + if (!first) { + index = names.length - 1; + } + return getNumFromName(names[index]); + } + + /** + * Get the data dir index from a file name. + * + * @return index into dbEnvDataDirs of this fileName's data directory. + * -1 if multiple data directories are not being used. + */ + private int getDataDirIndexFromName(String fileName) { + if (nDataDirs == 0) { + return -1; + } + + int dataDirEnd = fileName.lastIndexOf(File.separator); + String dataDir = fileName.substring(0, dataDirEnd); + return Integer.valueOf + (Integer.parseInt(dataDir.substring("data".length()))); + } + + /** + * Get the file number from a file name. + * + * @param fileName the file name + * @return the file number + */ + public Long getNumFromName(String fileName) { + String name = fileName; + if (nDataDirs != 0) { + name = name.substring(name.lastIndexOf(File.separator) + 1); + } + String fileNumber = name.substring(0, name.indexOf(".")); + return Long.valueOf(Long.parseLong(fileNumber, 16)); + } + + /** + * Find JE files. Return names sorted in ascending fashion. + * @param suffixes which type of file we're looking for + * @return array of file names + * + * Used by unit tests so package protection. + */ + String[] listFileNames(String[] suffixes) { + JEFileFilter fileFilter = new JEFileFilter(suffixes); + return listFileNamesInternal(fileFilter); + } + + /** + * Find .jdb files which are >= the minimimum file number and + * <= the maximum file number. + * Return names sorted in ascending fashion. + * + * @return array of file names + */ + public String[] listFileNames(long minFileNumber, long maxFileNumber) { + JEFileFilter fileFilter = + new JEFileFilter(JE_SUFFIXES, minFileNumber, maxFileNumber); + return listFileNamesInternal(fileFilter); + } + + private static Comparator fileComparator = + new Comparator() { + + private String getFileNum(File file) { + String fname = file.toString(); + return fname.substring(fname.indexOf(File.separator) + 1); + } + + public int compare(File o1, File o2) { + String fnum1 = getFileNum(o1); + String fnum2 = getFileNum(o2); + return o1.compareTo(o2); + } + }; + + private static Comparator stringComparator = + new Comparator() { + + private String getFileNum(String fname) { + return fname.substring(fname.indexOf(File.separator) + 1); + } + + public int compare(String o1, String o2) { + String fnum1 = getFileNum(o1); + String fnum2 = getFileNum(o2); + return fnum1.compareTo(fnum2); + } + }; + + /** + * Find JE files, flavor for unit test support. + * + * @param suffixes which type of file we're looking for + * @return array of file names + */ + public static String[] listFiles(File envDirFile, + String[] suffixes, + boolean envMultiSubDir) { + String[] names = envDirFile.list(new JEFileFilter(suffixes)); + + ArrayList subFileNames = new ArrayList(); + if (envMultiSubDir) { + for (File file : envDirFile.listFiles()) { + if (file.isDirectory() && file.getName().startsWith("data")) { + File[] subFiles = + file.listFiles(new JEFileFilter(suffixes)); + for (File subFile : subFiles) { + subFileNames.add(file.getName() + + File.separator + subFile.getName()); + } + } + } + + String[] totalFileNames = + new String[names.length + subFileNames.size()]; + for (int i = 0; i < totalFileNames.length; i++) { + if (i < names.length) { + totalFileNames[i] = names[i]; + } else { + totalFileNames[i] = subFileNames.get(i - names.length); + } + } + names = totalFileNames; + } + + if (names != null) { + Arrays.sort(names, stringComparator); + } else { + names = new String[0]; + } + + return names; + } + + public File[] listJDBFiles() { + if (nDataDirs == 0) { + return listJDBFilesInternalSingleDir(new JEFileFilter(JE_SUFFIXES)); + } else { + return listJDBFilesInternalMultiDir(new JEFileFilter(JE_SUFFIXES)); + } + } + + public File[] listJDBFilesInternalSingleDir(JEFileFilter fileFilter) { + File[] files = dbEnvHome.listFiles(fileFilter); + if (files != null) { + Arrays.sort(files); + } else { + files = new File[0]; + } + + return files; + } + + public File[] listJDBFilesInternalMultiDir(JEFileFilter fileFilter) { + File[][] files = new File[nDataDirs][]; + int nTotalFiles = 0; + int i = 0; + for (File envDir : dbEnvDataDirs) { + files[i] = envDir.listFiles(fileFilter); + nTotalFiles += files[i].length; + i++; + } + + if (nTotalFiles == 0) { + return new File[0]; + } + + File[] ret = new File[nTotalFiles]; + i = 0; + for (File[] envFiles : files) { + for (File envFile : envFiles) { + ret[i++] = envFile; + } + } + + Arrays.sort(ret, fileComparator); + return ret; + } + + private String[] listFileNamesInternal(JEFileFilter fileFilter) { + if (nDataDirs == 0) { + return listFileNamesInternalSingleDir(fileFilter); + } else { + return listFileNamesInternalMultiDirs(fileFilter); + } + } + + private String[] listFileNamesInternalSingleDir(JEFileFilter fileFilter) { + String[] fileNames = dbEnvHome.list(fileFilter); + if (fileNames != null) { + Arrays.sort(fileNames); + } else { + fileNames = new String[0]; + } + return fileNames; + } + + private String[] listFileNamesInternalMultiDirs(JEFileFilter filter) { + String[][] files = new String[nDataDirs][]; + int nTotalFiles = 0; + int i = 0; + for (File envDir : dbEnvDataDirs) { + files[i] = envDir.list(filter); + + String envDirName = envDir.toString(); + String dataDirName = envDirName. + substring(envDirName.lastIndexOf(File.separator) + 1); + + for (int j = 0; j < files[i].length; j += 1) { + files[i][j] = dataDirName + File.separator + files[i][j]; + } + + nTotalFiles += files[i].length; + i++; + } + + if (nTotalFiles == 0) { + return new String[0]; + } + + String[] ret = new String[nTotalFiles]; + i = 0; + for (String[] envFiles : files) { + for (String envFile : envFiles) { + ret[i++] = envFile; + } + } + + Arrays.sort(ret, stringComparator); + return ret; + } + + private void checkNoDataDirs() { + String[] dataDirNames = + dbEnvHome.list(new FilenameFilter() { + public boolean accept(File dir, String name) { + /* We'll validate the subdirNum later. */ + return name != null && + name.length() == "dataNNN".length() && + name.startsWith("data"); + } + } + ); + if (dataDirNames != null && dataDirNames.length != 0) { + throw EnvironmentFailureException.unexpectedState + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName() + + " was not set and expected to find no" + + " data directories, but found " + + dataDirNames.length + " data directories instead."); + } + } + + public File[] gatherDataDirs() { + String[] dataDirNames = + dbEnvHome.list(new FilenameFilter() { + public boolean accept(File dir, String name) { + /* We'll validate the subdirNum later. */ + return name != null && + name.length() == "dataNNN".length() && + name.startsWith("data"); + } + } + ); + if (dataDirNames != null) { + Arrays.sort(dataDirNames); + } else { + dataDirNames = new String[0]; + } + + if (dataDirNames.length != nDataDirs) { + throw EnvironmentFailureException.unexpectedState + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName() + + " was set and expected to find " + nDataDirs + + " data directories, but found " + + dataDirNames.length + " instead."); + } + + int ddNum = 1; + File[] dataDirs = new File[nDataDirs]; + for (String fn : dataDirNames) { + String subdirNumStr = fn.substring(4); + try { + int subdirNum = Integer.parseInt(subdirNumStr); + if (subdirNum != ddNum) { + throw EnvironmentFailureException.unexpectedState + ("Expected to find data subdir: data" + + paddedDirNum(ddNum) + + " but found data" + + subdirNumStr + " instead."); + + } + + File dataDir = new File(dbEnvHome, fn); + if (!dataDir.exists()) { + throw EnvironmentFailureException.unexpectedState + ("Data dir: " + dataDir + " doesn't exist."); + } + if (!dataDir.isDirectory()) { + throw EnvironmentFailureException.unexpectedState + ("Data dir: " + dataDir + " is not a directory."); + } + dataDirs[ddNum - 1] = dataDir; + } catch (NumberFormatException E) { + throw EnvironmentFailureException.unexpectedState + ("Illegal data subdir: data" + subdirNumStr); + } + ddNum++; + } + return dataDirs; + } + + private String paddedDirNum(int dirNum) { + String paddedStr = "000" + dirNum; + int len = paddedStr.length(); + return paddedStr.substring(len - 3); + } + + /** + * @return the full file name and path for the nth JE file. + */ + String[] getFullFileNames(long fileNum) { + if (includeDeletedFiles) { + int nSuffixes = JE_AND_DEL_SUFFIXES.length; + String[] ret = new String[nSuffixes]; + for (int i = 0; i < nSuffixes; i++) { + ret[i] = getFullFileName(fileNum, JE_AND_DEL_SUFFIXES[i]); + } + return ret; + } + return new String[] { getFullFileName(fileNum, JE_SUFFIX) }; + } + + private File getDataDir(long fileNum) { + return (nDataDirs == 0) ? + dbEnvHome : + dbEnvDataDirs[((int) (fileNum % nDataDirs))]; + } + + public String getFullFileName(long fileNum) { + return getFullFileName(fileNum, JE_SUFFIX); + } + + /** + * @return the full file name and path for this file name. + */ + public String getFullFileName(long fileNum, String suffix) { + File dbEnvDataDir = getDataDir(fileNum); + return dbEnvDataDir + File.separator + getFileName(fileNum, suffix); + } + + /** + * @return the file name relative to the env home directory. + */ + public String getPartialFileName(long fileNum) { + String name = getFileName(fileNum, JE_SUFFIX); + if (nDataDirs == 0) { + return name; + } + File dataDir = getDataDir(fileNum); + return dataDir.getName() + File.separator + name; + } + + /* + * Return the full file name of a specified log file name, including the + * sub directories names if needed. + */ + public String getFullFileName(String fileName) { + final int suffixStartPos = fileName.indexOf("."); + String suffix = fileName.substring(suffixStartPos, fileName.length()); + assert suffix != null; + String fileNum = fileName.substring(0, suffixStartPos); + + return getFullFileName + (Long.valueOf(Long.parseLong(fileNum, 16)), suffix); + } + + /** + * @return the file name for the nth file. + */ + public static String getFileName(long fileNum, String suffix) { + return (getFileNumberString(fileNum) + suffix); + } + + /** @return the file name for the nth log (*.jdb) file. */ + public static String getFileName(long fileNum) { + return getFileName(fileNum, JE_SUFFIX); + } + + /** + * HexFormatter generates a 0 padded string starting with 0x. We want + * the right most 8 digits, so start at 10. + */ + private static String getFileNumberString(long fileNum) { + return HexFormatter.formatLong(fileNum).substring(10); + } + + /** + * @return true if successful, false if File.renameTo returns false, which + * can occur on Windows if the file was recently closed. + */ + public boolean renameFile(final long fileNum, final String newSuffix) + throws IOException, DatabaseException { + + return renameFile(fileNum, newSuffix, null) != null; + } + + /** + * Rename this file to NNNNNNNN.suffix. If that file already exists, try + * NNNNNNNN.suffix.1, etc. Used for deleting files or moving corrupt files + * aside. + * + * @param fileNum the file we want to move + * + * @param newSuffix the new file suffix + * + * @param subDir the data directory sub-directory to rename the file into. + * The subDir must already exist. May be null to leave the file in its + * current data directory. + * + * @return renamed File if successful, or null if File.renameTo returns + * false, which can occur on Windows if the file was recently closed. + */ + public File renameFile(final long fileNum, + final String newSuffix, + final String subDir) + throws IOException { + + final File oldDir = getDataDir(fileNum); + final String oldName = getFileName(fileNum); + final File oldFile = new File(oldDir, oldName); + + final File newDir = + (subDir != null) ? (new File(oldDir, subDir)) : oldDir; + + final String newName = getFileName(fileNum, newSuffix); + + String generation = ""; + int repeatNum = 0; + + while (true) { + final File newFile = new File(newDir, newName + generation); + + if (newFile.exists()) { + repeatNum++; + generation = "." + repeatNum; + continue; + } + + /* + * If CLEANER_EXPUNGE is false, then the cleaner will rename + * the .jdb file. The rename action will first delete the + * old file and then create the new file. So we should also + * record the file rename action here. + */ + if (fdd != null) { + if (oldName.endsWith(FileManager.JE_SUFFIX)) { + fdd.addDeletedFile(oldName); + } + } + + clearFileCache(fileNum); + + final boolean success = oldFile.renameTo(newFile); + return success ? newFile : null; + } + } + + /** + * Delete log file NNNNNNNN. + * + * @param fileNum the file we want to move + * + * @return true if successful, false if File.delete returns false, which + * can occur on Windows if the file was recently closed. + */ + public boolean deleteFile(final long fileNum) + throws IOException, DatabaseException { + + final String fileName = getFullFileNames(fileNum)[0]; + + /* + * Add files deleted by JE to filesDeletedByJE in fdd, which aims to + * check whether a deleted file is deleted by JE or by users wrongly. + * + * The file name gotten from WatchKey is the relative file name, + * so we should also get the relative file name here. + */ + if (fdd != null) { + if (fileName.endsWith(FileManager.JE_SUFFIX)) { + final int index = fileName.lastIndexOf(File.separator) + 1; + final String relativeFileName = fileName.substring(index); + fdd.addDeletedFile(relativeFileName); + } + } + + clearFileCache(fileNum); + final File file = new File(fileName); + return file.delete(); + } + + /** + * Returns the log version for the given file. + */ + public int getFileLogVersion(long fileNum) + throws DatabaseException { + + try { + FileHandle handle = getFileHandle(fileNum); + int logVersion = handle.getLogVersion(); + handle.release(); + return logVersion; + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } + } + + /** + * Return a read only file handle that corresponds to this file number. + * Retrieve it from the cache or open it anew and validate the file header. + * This method takes a latch on this file, so that the file descriptor will + * be held in the cache as long as it's in use. When the user is done with + * the file, the latch must be released. + * + * @param fileNum which file + * @return the file handle for the existing or newly created file + */ + public FileHandle getFileHandle(long fileNum) + throws FileNotFoundException, ChecksumException, DatabaseException { + + /* Check the file cache for this file. */ + Long fileId = Long.valueOf(fileNum); + FileHandle fileHandle = null; + + /** + * Loop until we get an open FileHandle. + */ + try { + while (true) { + + /* + * The file cache is intentionally not latched here so that + * it's not a bottleneck in the fast path. We check that the + * file handle that we get back is really still open after we + * latch it down below. + */ + fileHandle = fileCache.get(fileId); + + /* + * If the file isn't in the cache, latch the cache and check + * again. Under the latch, if the file is not in the cache we + * add it to the cache but do not open the file yet. We latch + * the handle here, and open the file further below after + * releasing the cache latch. This prevents blocking other + * threads that are opening other files while we open this + * file. The latch on the handle blocks other threads waiting + * to open the same file, which is necessary. + */ + boolean newHandle = false; + if (fileHandle == null) { + synchronized (fileCache) { + fileHandle = fileCache.get(fileId); + if (fileHandle == null) { + newHandle = true; + fileHandle = addFileHandle(fileId); + } + } + } + + if (newHandle) { + + /* + * Open the file with the fileHandle latched. It was + * latched by addFileHandle above. + */ + boolean success = false; + try { + openFileHandle(fileHandle, FileMode.READ_MODE, + null /*existingHandle*/); + success = true; + } finally { + if (!success) { + /* An exception is in flight -- clean up. */ + fileHandle.release(); + clearFileCache(fileNum); + } + } + } else { + + /* + * The handle was found in the cache. Latch the fileHandle + * before checking getFile below and returning. + */ + if (!fileHandle.latchNoWait()) { + + /* + * But the handle was latched. Rather than wait, let's + * just make a new transient handle. It doesn't need + * to be latched, but it does need to be closed. + */ + final FileHandle existingHandle = fileHandle; + fileHandle = new FileHandle( + envImpl, fileId, getFileNumberString(fileId)) { + @Override + public void release() + throws DatabaseException { + + try { + close(); + } catch (IOException E) { + // Ignore + } + } + }; + + openFileHandle(fileHandle, FileMode.READ_MODE, + existingHandle); + } + } + + /* + * We may have obtained this file handle outside the file cache + * latch, so we have to test that the handle is still valid. + * If it's not, then loop back and try again. + */ + if (fileHandle.getFile() == null) { + fileHandle.release(); + } else { + break; + } + } + } catch (FileNotFoundException e) { + /* Handle at higher levels. */ + throw e; + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, e); + } + + return fileHandle; + } + + /** + * Creates a new FileHandle and adds it to the cache, but does not open + * the file. + * @return the latched FileHandle. + */ + private FileHandle addFileHandle(Long fileNum) + throws IOException, DatabaseException { + + FileHandle fileHandle = + new FileHandle(envImpl, fileNum, getFileNumberString(fileNum)); + fileCache.add(fileNum, fileHandle); + fileHandle.latch(); + return fileHandle; + } + + private FileMode getAppropriateReadWriteMode() { + if (useODSYNC) { + return FileMode.READWRITE_ODSYNC_MODE; + } + return FileMode.READWRITE_MODE; + } + + /** + * Creates a new handle and opens it. Does not add the handle to the + * cache. + */ + private FileHandle makeFileHandle(long fileNum, FileMode mode) + throws FileNotFoundException, ChecksumException { + + FileHandle fileHandle = + new FileHandle(envImpl, fileNum, getFileNumberString(fileNum)); + openFileHandle(fileHandle, mode, null /*existingHandle*/); + return fileHandle; + } + + /** + * Opens the file for the given handle and initializes it. + * + * @param existingHandle is an already open handle for the same file or + * null. If non-null it is used to avoid the cost of reading the file + * header. + */ + private void openFileHandle(FileHandle fileHandle, + FileMode mode, + FileHandle existingHandle) + throws FileNotFoundException, ChecksumException { + + nFileOpens.increment(); + long fileNum = fileHandle.getFileNum(); + String[] fileNames = getFullFileNames(fileNum); + RandomAccessFile newFile = null; + String fileName = null; + boolean success = false; + try { + + /* + * Open the file. Note that we are going to try a few names to open + * this file -- we'll try for N.jdb, and if that doesn't exist and + * we're configured to look for all types, we'll look for N.del. + */ + FileNotFoundException FNFE = null; + for (String fileName2 : fileNames) { + fileName = fileName2; + try { + newFile = fileFactory.createFile(dbEnvHome, fileName, + mode.getModeValue()); + break; + } catch (FileNotFoundException e) { + /* Save the first exception thrown. */ + if (FNFE == null) { + FNFE = e; + } + } + } + + /* + * If we didn't find the file or couldn't create it, rethrow the + * exception. + */ + if (newFile == null) { + assert FNFE != null; + throw FNFE; + } + + /* + * If there is an existing open handle, there is no need to read or + * validate the header. Note that the log version is zero if the + * existing handle is not fully initialized. + */ + if (existingHandle != null) { + final int logVersion = existingHandle.getLogVersion(); + if (logVersion > 0) { + fileHandle.init(newFile, logVersion); + success = true; + return; + } + } + + int logVersion = LogEntryType.LOG_VERSION; + + if (newFile.length() == 0) { + + /* + * If the file is empty, reinitialize it if we can. If not, + * send the file handle back up; the calling code will deal + * with the fact that there's nothing there. + */ + if (mode.isWritable()) { + /* An empty file, write a header. */ + long lastLsn = DbLsn.longToLsn(perFileLastUsedLsn.remove + (Long.valueOf(fileNum - 1))); + long headerPrevOffset = 0; + if (lastLsn != DbLsn.NULL_LSN) { + headerPrevOffset = DbLsn.getFileOffset(lastLsn); + } + if ((headerPrevOffset == 0) && + (fileNum > 1) && + syncAtFileEnd) { + /* Get more info if this happens again. [#20732] */ + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Zero prevOffset fileNum=0x" + + Long.toHexString(fileNum) + + " lastLsn=" + DbLsn.getNoFormatString(lastLsn) + + " perFileLastUsedLsn=" + perFileLastUsedLsn + + " fileLen=" + newFile.length()); + } + FileHeader fileHeader = + new FileHeader(fileNum, headerPrevOffset); + writeFileHeader(newFile, fileName, fileHeader, fileNum); + } + } else { + /* A non-empty file, check the header */ + logVersion = + readAndValidateFileHeader(newFile, fileName, fileNum); + } + fileHandle.init(newFile, logVersion); + success = true; + } catch (FileNotFoundException e) { + /* Handle at higher levels. */ + throw e; + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, + "Couldn't open file " + fileName, e); + } catch (DatabaseException e) { + + /* + * Let this exception go as a checksum exception, so it sets the + * run recovery state correctly. + */ + closeFileInErrorCase(newFile); + e.addErrorMessage("Couldn't open file " + fileName); + throw e; + } finally { + if (!success) { + closeFileInErrorCase(newFile); + } + } + } + + /** + * Close this file and eat any exceptions. Used in catch clauses. + */ + private void closeFileInErrorCase(RandomAccessFile file) { + try { + if (file != null) { + file.close(); + } + } catch (Exception e) { + } + } + + /** + * Read the given JE log file and validate the header. + * + * @throws DatabaseException if the file header isn't valid + * + * @return file header log version. + */ + private int readAndValidateFileHeader(RandomAccessFile file, + String fileName, + long fileNum) + throws ChecksumException, DatabaseException { + + /* + * Read the file header from this file. It's always the first log + * entry. + * + * The special UNKNOWN_FILE_HEADER_VERSION value is passed for reading + * the entry header. The actual log version is read as part of the + * FileHeader entry. [#16939] + */ + LogManager logManager = envImpl.getLogManager(); + LogEntry headerEntry = logManager.getLogEntryAllowChecksumException + (DbLsn.makeLsn(fileNum, 0), file, + LogEntryType.UNKNOWN_FILE_HEADER_VERSION); + FileHeader header = (FileHeader) headerEntry.getMainItem(); + return header.validate(envImpl, fileName, fileNum); + } + + /** + * Write a proper file header to the given file. + */ + private void writeFileHeader(RandomAccessFile file, + String fileName, + FileHeader header, + long fileNum) + throws DatabaseException { + + /* Fail loudly if the environment is invalid. */ + envImpl.checkIfInvalid(); + + /* + * Fail silent if the environment is not open. + */ + if (envImpl.mayNotWrite()) { + return; + } + + /* Write file header into this buffer in the usual log entry format. */ + LogEntry headerLogEntry = + new FileHeaderEntry(LogEntryType.LOG_FILE_HEADER, header); + ByteBuffer headerBuf = envImpl.getLogManager(). + putIntoBuffer(headerLogEntry, + 0); // prevLogEntryOffset + + /* Write the buffer into the channel. */ + int bytesWritten; + try { + if (LOGWRITE_EXCEPTION_TESTING) { + generateLogWriteException(file, headerBuf, 0, fileNum); + } + + /* + * Always flush header so that file.length() will be non-zero when + * this method returns and two threads won't attempt to create the + * header. [#20732] + */ + bytesWritten = writeToFile(file, headerBuf, 0, fileNum, + true /*flushRequired*/); + + } catch (ClosedChannelException e) { + + /* + * The channel should never be closed. It may be closed because + * of an interrupt received by another thread. See SR [#10463] + */ + throw new ThreadInterruptedException + (envImpl, "Channel closed, may be due to thread interrupt", e); + } catch (IOException e) { + /* Possibly an out of disk exception. */ + throw new LogWriteException(envImpl, e); + } + + if (bytesWritten != headerLogEntry.getSize() + + LogEntryHeader.MIN_HEADER_SIZE) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "File " + fileName + + " was created with an incomplete header. Only " + + bytesWritten + " bytes were written."); + } + } + + /** + * @return the prevOffset field stored in the file header. + */ + long getFileHeaderPrevOffset(long fileNum) + throws ChecksumException, DatabaseException { + + try { + LogEntry headerEntry = + envImpl.getLogManager().getLogEntryAllowChecksumException + (DbLsn.makeLsn(fileNum, 0)); + FileHeader header = (FileHeader) headerEntry.getMainItem(); + return header.getLastEntryInPrevFileOffset(); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } + } + + /* + * Support for writing new log entries + */ + + /** + * Returns whether we should flip files to log an entry of 'size' bytes. + */ + boolean shouldFlipFile(long size) { + return forceNewFile || + (DbLsn.getFileOffset(nextAvailableLsn) + size) > maxFileSize; + } + + /** + * Calculates LSN of next entry to be logged. + */ + long calculateNextLsn(boolean flippedFile) { + return flippedFile ? + DbLsn.makeLsn( + currentFileNum + 1, + FileManager.firstLogEntryOffset()) : + nextAvailableLsn; + } + + /** + * Advance LSN position after determining the LSN of an entry to be logged. + * + *

        When flippedFile is true, this method must be called after flushing + * the prior file. We guarantee that certain volatile LSN fields + * (currentFileNumber, nextAvailableLsn, lastUsedLsn) are not updated until + * after flushing the prior file.

        + * + * @param currentLsn value returned by {@link #calculateNextLsn} + * + * @param size value passed to {@link #shouldFlipFile} + * + * @param flippedFile value returned by {@link #shouldFlipFile}. + * + * @return the file offset of the previous LSN that was used. Needed for + * constructing the header of the log entry for currentLsn. If the previous + * LSN was in the previous file, or this is the very first LSN of the env, + * zero is returned. + */ + long advanceLsn(long currentLsn, long size, boolean flippedFile) { + + final long prevOffset; + + if (flippedFile) { + assert DbLsn.getFileNumber(currentLsn) == currentFileNum + 1; + assert DbLsn.getFileOffset(currentLsn) == firstLogEntryOffset(); + + perFileLastUsedLsn.put(currentFileNum, lastUsedLsn); + currentFileNum += 1; + prevOffset = 0; + } else { + assert DbLsn.getFileNumber(currentLsn) == currentFileNum; + + prevOffset = (lastUsedLsn == DbLsn.NULL_LSN) ? + 0 : DbLsn.getFileOffset(lastUsedLsn); + } + + forceNewFile = false; + lastUsedLsn = currentLsn; + + nextAvailableLsn = DbLsn.makeLsn( + currentFileNum, + DbLsn.getFileOffset(currentLsn) + size); + + return prevOffset; + } + + /** + * Write out a log buffer to the file. + * @param fullBuffer buffer to write + * @param flushWriteQueue true if this write can not be queued on the + * Write Queue. + */ + void writeLogBuffer(LogBuffer fullBuffer, boolean flushWriteQueue) + throws DatabaseException { + + /* Fail loudly if the environment is invalid. */ + envImpl.checkIfInvalid(); + + /* + * Fail silent if the environment is not open. + */ + if (envImpl.mayNotWrite()) { + return; + } + + /* Use the LSN to figure out what file to write this buffer to. */ + long firstLsn = fullBuffer.getFirstLsn(); + + /* + * Is there anything in this write buffer? We could have been called by + * the environment shutdown, and nothing is actually in the buffer. + */ + if (firstLsn != DbLsn.NULL_LSN) { + + RandomAccessFile file = + endOfLog.getWritableFile(DbLsn.getFileNumber(firstLsn), true); + ByteBuffer data = fullBuffer.getDataBuffer(); + + try { + + /* + * Check that we do not overwrite unless the file only contains + * a header [#11915] [#12616]. + */ + assert fullBuffer.getRewriteAllowed() || + (DbLsn.getFileOffset(firstLsn) >= file.length() || + file.length() == firstLogEntryOffset()) : + "FileManager would overwrite non-empty file 0x" + + Long.toHexString(DbLsn.getFileNumber(firstLsn)) + + " lsnOffset=0x" + + Long.toHexString(DbLsn.getFileOffset(firstLsn)) + + " fileLength=0x" + + Long.toHexString(file.length()); + + if (LOGWRITE_EXCEPTION_TESTING) { + generateLogWriteException + (file, data, DbLsn.getFileOffset(firstLsn), + DbLsn.getFileNumber(firstLsn)); + } + writeToFile(file, data, DbLsn.getFileOffset(firstLsn), + DbLsn.getFileNumber(firstLsn), + flushWriteQueue); + } catch (ClosedChannelException e) { + + /* + * The file should never be closed. It may be closed because + * of an interrupt received by another thread. See SR [#10463]. + */ + throw new ThreadInterruptedException + (envImpl, "File closed, may be due to thread interrupt", + e); + } catch (IOException e) { + throw new LogWriteException(envImpl, e); + } + + assert EnvironmentImpl.maybeForceYield(); + } + } + + /** + * Write a buffer to a file at a given offset. + */ + private int writeToFile(RandomAccessFile file, + ByteBuffer data, + long destOffset, + long fileNum, + boolean flushWriteQueue) + throws IOException, DatabaseException { + + int totalBytesWritten = 0; + + bumpWriteCount("write"); + + int pos = data.position(); + int size = data.limit() - pos; + + if (lastFileNumberTouched == fileNum && + (Math.abs(destOffset - lastFileTouchedOffset) < + ADJACENT_TRACK_SEEK_DELTA)) { + nSequentialWrites.increment(); + nSequentialWriteBytes.add(size); + } else { + nRandomWrites.increment(); + nRandomWriteBytes.add(size); + } + + if (VERIFY_CHECKSUMS) { + verifyChecksums(data, destOffset, "pre-write"); + } + + /* + * Perform a RandomAccessFile write and update the buffer position. + * ByteBuffer.array() is safe to use since all non-direct ByteBuffers + * have a backing array. + * + * Synchronization on the file object is needed because two threads may + * call seek() on the same file object. + * + * If the Write Queue is enabled, attempt to get the fsync latch. If + * we can't get it, then an fsync or write is in progress and we'd + * block anyway. In that case, queue the write operation. + */ + boolean fsyncLatchAcquired = + endOfLog.fsyncFileSynchronizer.tryLock(); + boolean enqueueSuccess = false; + if (!fsyncLatchAcquired && + useWriteQueue && + !flushWriteQueue) { + enqueueSuccess = + endOfLog.enqueueWrite(fileNum, data.array(), destOffset, + pos + data.arrayOffset(), size); + } + + if (!enqueueSuccess) { + if (!fsyncLatchAcquired) { + endOfLog.fsyncFileSynchronizer.lock(); + } + try { + if (useWriteQueue) { + endOfLog.dequeuePendingWrites1(); + } + + synchronized (file) { + + file.seek(destOffset); + file.write(data.array(), pos + data.arrayOffset(), size); + + if (VERIFY_CHECKSUMS) { + file.seek(destOffset); + file.read( + data.array(), pos + data.arrayOffset(), size); + verifyChecksums(data, destOffset, "post-write"); + } + } + } finally { + endOfLog.fsyncFileSynchronizer.unlock(); + } + } + data.position(pos + size); + totalBytesWritten = size; + + lastFileNumberTouched = fileNum; + lastFileTouchedOffset = destOffset + size; + return totalBytesWritten; + } + + private void bumpWriteCount(final String debugMsg) + throws IOException { + + if (DEBUG) { + System.out.println("Write: " + WRITE_COUNT + " " + debugMsg); + } + + if (++WRITE_COUNT >= STOP_ON_WRITE_COUNT && + WRITE_COUNT < (STOP_ON_WRITE_COUNT + N_BAD_WRITES)) { + if (THROW_ON_WRITE) { + throw new IOException + ("IOException generated for testing: " + WRITE_COUNT + + " " + debugMsg); + } + Runtime.getRuntime().halt(0xff); + } + } + + /** + * Read a buffer from a file at a given offset. We know that the desired + * data exists in this file. There's no need to incur extra costs + * such as checks of the file length, nor to return status as to whether + * this file contains the data. + */ + void readFromFile(RandomAccessFile file, + ByteBuffer readBuffer, + long offset, + long fileNo) + throws DatabaseException { + readFromFile(file, readBuffer, offset, fileNo, + true /* dataKnownToBeInFile */); + } + + /** + * Read a buffer from a file at a given offset. + * + * @return true if the read buffer is filled, false, if there is nothing + * left in the file to read + */ + boolean readFromFile(RandomAccessFile file, + ByteBuffer readBuffer, + long offset, + long fileNo, + boolean dataKnownToBeInFile) + throws DatabaseException { + + /* + * All IOExceptions on read turn into EnvironmentFailureExceptions + * [#15768]. + */ + try { + + /* + * Check if there's a pending write(s) in the write queue for this + * fileNo/offset and if so, use it to fulfill this read request. + */ + if (useWriteQueue && + endOfLog.checkWriteCache(readBuffer, offset, fileNo)) { + return true; + } + + /* + * Nothing queued, all data for this file must be in the file. + * Note that there's no synchronization between the check of the + * write queue above, and this check of file length. It's possible + * that a newly written log entry could show up between the + * statements, and enter the write queue just after we finish the + * check. + * + * Because of this, callers of this method must abide by one of + * three conditions: + * 1. They guarantee that the attempt to read a chunk of new data + * comes after the new data has been logged by the LogManager. + * 2. The files are quiescent when the read is going on. + * 3. The caller is sure the data is in this file. + * + * The replication feeder reader abides by (1) while all other file + * readers abide by (2). Callers which are fetching specific log + * entries fall under (3). + */ + boolean readThisFile = true; + if (!dataKnownToBeInFile) { + + /* + * Callers who are not sure whether the desired data is in this + * file or the next incur the cost of a check of file.length(), + * which is a system call. + */ + readThisFile = (offset < file.length()); + } + + if (readThisFile) { + readFromFileInternal(file, readBuffer, offset, fileNo); + return true; + } + + return false; + } catch (ClosedChannelException e) { + + /* + * The channel should never be closed. It may be closed because + * of an interrupt received by another thread. See SR [#10463] + */ + throw new ThreadInterruptedException + (envImpl, "Channel closed, may be due to thread interrupt", e); + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, e); + } + } + + private void readFromFileInternal(RandomAccessFile file, + ByteBuffer readBuffer, + long offset, + long fileNum) + throws IOException { + + /* + * Perform a RandomAccessFile read and update the buffer position. + * ByteBuffer.array() is safe to use since all non-direct ByteBuffers + * have a backing array. Synchronization on the file object is needed + * because two threads may call seek() on the same file object. + */ + synchronized (file) { + int pos = readBuffer.position(); + int size = readBuffer.limit() - pos; + + if (lastFileNumberTouched == fileNum && + (Math.abs(offset - lastFileTouchedOffset) < + ADJACENT_TRACK_SEEK_DELTA)) { + nSequentialReads.increment(); + nSequentialReadBytes.add(size); + } else { + nRandomReads.increment(); + nRandomReadBytes.add(size); + } + + file.seek(offset); + + int bytesRead = file.read(readBuffer.array(), + pos + readBuffer.arrayOffset(), + size); + if (bytesRead > 0) { + readBuffer.position(pos + bytesRead); + } + + lastFileNumberTouched = fileNum; + lastFileTouchedOffset = offset + bytesRead; + } + } + + private void printLogBuffer(ByteBuffer entryBuffer, long lsn) { + + int curPos = entryBuffer.position(); + + while (entryBuffer.remaining() > 0) { + + int recStartPos = entryBuffer.position(); + + LogEntryHeader header = null; + + try { + header = new LogEntryHeader( + entryBuffer, LogEntryType.LOG_VERSION, lsn); + } catch (ChecksumException e) { + System.err.println("ChecksumException in printLogBuffer " + e); + break; + } + + LogEntryType recType = LogEntryType.findType(header.getType()); + int recSize = header.getSize() + header.getItemSize(); + + System.out.println( + "LOGREC " + recType.toStringNoVersion() + + " at LSN " + DbLsn.toString(lsn) + + " , log buffer offset " + recStartPos); + + lsn += recSize; + + entryBuffer.position(recStartPos + recSize); + } + + entryBuffer.position(curPos); + } + + private void verifyChecksums(ByteBuffer entryBuffer, + long lsn, + String comment) { + int curPos = entryBuffer.position(); + try { + while (entryBuffer.remaining() > 0) { + int recStartPos = entryBuffer.position(); + /* Write buffer contains current log version entries. */ + LogEntryHeader header = new LogEntryHeader( + entryBuffer, LogEntryType.LOG_VERSION, lsn); + verifyChecksum(entryBuffer, header, lsn, comment); + entryBuffer.position(recStartPos + header.getSize() + + header.getItemSize()); + } + } catch (ChecksumException e) { + System.err.println("ChecksumException: (" + comment + ") " + e); + System.err.println("start stack trace"); + e.printStackTrace(System.err); + System.err.println("end stack trace"); + } + entryBuffer.position(curPos); + } + + private void verifyChecksum(ByteBuffer entryBuffer, + LogEntryHeader header, + long lsn, + String comment) + throws ChecksumException { + + ChecksumValidator validator = null; + /* Add header to checksum bytes */ + validator = new ChecksumValidator(); + int headerSizeMinusChecksum = header.getSizeMinusChecksum(); + int itemStart = entryBuffer.position(); + entryBuffer.position(itemStart - headerSizeMinusChecksum); + validator.update(entryBuffer, headerSizeMinusChecksum); + entryBuffer.position(itemStart); + + /* + * Now that we know the size, read the rest of the entry if the first + * read didn't get enough. + */ + int itemSize = header.getItemSize(); + if (entryBuffer.remaining() < itemSize) { + System.err.println("Couldn't verify checksum (" + comment + ")"); + return; + } + + /* + * Do entry validation. Run checksum before checking the entry + * type, it will be the more encompassing error. + */ + validator.update(entryBuffer, itemSize); + validator.validate(header.getChecksum(), lsn); + } + + /** + * FSync the end of the log. + */ + void syncLogEnd() + throws DatabaseException { + + try { + endOfLog.force(); + } catch (IOException e) { + throw new LogWriteException + (envImpl, "IOException during fsync", e); + } + } + + /** + * Sync the end of the log, close off this log file. Should only be called + * under the log write latch. + */ + void syncLogEndAndFinishFile() + throws DatabaseException, IOException { + + if (syncAtFileEnd) { + syncLogEnd(); + } + endOfLog.close(); + } + + /** + * Returns whether anything is in the write queue. + */ + public boolean hasQueuedWrites() { + return endOfLog.hasQueuedWrites(); + } + + /** + * For unit testing only. + */ + public void testWriteQueueLock() { + endOfLog.fsyncFileSynchronizer.lock(); + } + + /** + * For unit testing only. + */ + public void testWriteQueueUnlock() { + endOfLog.fsyncFileSynchronizer.unlock(); + } + + public void startFileCacheWarmer(final long recoveryStartLsn){ + assert fileCacheWarmer == null; + + final DbConfigManager cm = envImpl.getConfigManager(); + + final int warmUpSize = cm.getInt( + EnvironmentParams.LOG_FILE_WARM_UP_SIZE); + + if (warmUpSize == 0) { + return; + } + + final int bufSize = cm.getInt( + EnvironmentParams.LOG_FILE_WARM_UP_BUF_SIZE); + + fileCacheWarmer = new FileCacheWarmer( + envImpl, recoveryStartLsn, lastUsedLsn, warmUpSize, bufSize); + + fileCacheWarmer.start(); + } + + private void stopFileCacheWarmer(){ + + /* + * Use fcw local var because fileCacheWarmer can be set to null by + * other threads calling clearFileCacheWarmer, namely the cache warmer + * thread. + */ + final FileCacheWarmer fcw = fileCacheWarmer; + + if (fcw == null) { + return; + } + + fcw.shutdown(); + + clearFileCacheWarmer(); + } + + /* Allow cache warmer thread to be GC'd. */ + void clearFileCacheWarmer() { + fileCacheWarmer = null; + } + + /** + * Close all file handles and empty the cache. + */ + public void clear() + throws IOException, DatabaseException { + + synchronized (fileCache) { + fileCache.clear(); + } + + endOfLog.close(); + } + + /** + * Clear the file lock. + */ + public void close() + throws IOException { + + stopFileCacheWarmer(); + + if (envLock != null) { + envLock.release(); + envLock = null; + } + + if (exclLock != null) { + exclLock.release(); + exclLock = null; + } + + if (channel != null) { + channel.close(); + channel = null; + } + + if (lockFile != null) { + lockFile.close(); + lockFile = null; + } + + if (fdd != null) { + fdd.close(); + } + } + + /** + * Lock the environment. Return true if the lock was acquired. If + * exclusive is false, then this implements a single writer, multiple + * reader lock. If exclusive is true, then implement an exclusive lock. + * + * There is a lock file and there are two regions of the lock file: byte 0, + * and byte 1. Byte 0 is the exclusive writer process area of the lock + * file. If an environment is opened for write, then it attempts to take + * an exclusive write lock on byte 0. Byte 1 is the shared reader process + * area of the lock file. If an environment is opened for read-only, then + * it attempts to take a shared lock on byte 1. This is how we implement + * single writer, multi reader semantics. + * + * The cleaner, each time it is invoked, attempts to take an exclusive lock + * on byte 1. The owning process already either has an exclusive lock on + * byte 0, or a shared lock on byte 1. This will necessarily conflict with + * any shared locks on byte 1, even if it's in the same process and there + * are no other holders of that shared lock. So if there is only one + * read-only process, it will have byte 1 for shared access, and the + * cleaner can not run in it because it will attempt to get an exclusive + * lock on byte 1 (which is already locked for shared access by itself). + * If a write process comes along and tries to run the cleaner, it will + * attempt to get an exclusive lock on byte 1. If there are no other + * reader processes (with shared locks on byte 1), and no other writers + * (which are running cleaners on with exclusive locks on byte 1), then the + * cleaner will run. + */ + public boolean lockEnvironment(boolean rdOnly, boolean exclusive) { + try { + if (checkEnvHomePermissions(rdOnly)) { + return true; + } + + if (lockFile == null) { + lockFile = + new RandomAccessFile + (new File(dbEnvHome, LOCK_FILE), + FileMode.READWRITE_MODE.getModeValue()); + } + + channel = lockFile.getChannel(); + + try { + if (exclusive) { + + /* + * To lock exclusive, must have exclusive on + * shared reader area (byte 1). + */ + exclLock = channel.tryLock(1, 1, false); + if (exclLock == null) { + return false; + } + return true; + } + if (rdOnly) { + envLock = channel.tryLock(1, 1, true); + } else { + envLock = channel.tryLock(0, 1, false); + } + if (envLock == null) { + return false; + } + return true; + } catch (OverlappingFileLockException e) { + return false; + } + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, e); + } + } + + public void releaseExclusiveLock() + throws DatabaseException { + + try { + if (exclLock != null) { + exclLock.release(); + } + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, e); + } + } + + /** + * Ensure that if the environment home dir is on readonly media or in a + * readonly directory that the environment has been opened for readonly + * access. + * + * @return true if the environment home dir is readonly. + * + * @throws IllegalArgumentException via Environment ctor + */ + public boolean checkEnvHomePermissions(boolean rdOnly) + throws DatabaseException { + + if (nDataDirs == 0) { + return checkEnvHomePermissionsSingleEnvDir(dbEnvHome, rdOnly); + } else { + return checkEnvHomePermissionsMultiEnvDir(rdOnly); + } + } + + private boolean checkEnvHomePermissionsSingleEnvDir(File dbEnvHome, + boolean rdOnly) + throws DatabaseException { + + boolean envDirIsReadOnly = !dbEnvHome.canWrite(); + if (envDirIsReadOnly && !rdOnly) { + + /* + * Use the absolute path in the exception message, to + * make a mis-specified relative path problem more obvious. + */ + throw new IllegalArgumentException + ("The Environment directory " + + dbEnvHome.getAbsolutePath() + + " is not writable, but the " + + "Environment was opened for read-write access."); + } + + return envDirIsReadOnly; + } + + private boolean checkEnvHomePermissionsMultiEnvDir(boolean rdOnly) + throws DatabaseException { + + for (File dbEnvDir : dbEnvDataDirs) { + if (!checkEnvHomePermissionsSingleEnvDir(dbEnvDir, rdOnly)) { + return false; + } + } + + return true; + } + + /** + * Truncate a log at this position. Used by recovery to a timestamp + * utilities and by recovery to set the end-of-log position, see + * LastFileReader.setEndOfFile(). + * + *

        This method forces a new log file to be written next, if the last + * file (the file truncated to) has an old version in its header. This + * ensures that when the log is opened by an old version of JE, a version + * incompatibility will be detected. [#11243]

        + */ + public void truncateSingleFile(long fileNum, long offset) + throws IOException, DatabaseException { + + try { + FileHandle handle = + makeFileHandle(fileNum, getAppropriateReadWriteMode()); + RandomAccessFile file = handle.getFile(); + + try { + file.getChannel().truncate(offset); + } finally { + file.close(); + } + + if (handle.isOldHeaderVersion()) { + forceNewFile = true; + } + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } + } + + /* + * Truncate all log entries after a specified log entry, the position of + * that entry is specified by the fileNum and offset, we do this to avoid + * the log file gap. Used by replication hard recovery and the + * DbTruncateLog utility, see SR [#19463]. + */ + public void truncateLog(long fileNum, long offset) + throws IOException, DatabaseException { + + /* + * Truncate the log files following by this log file in descending + * order to avoid the log entry gap, see SR [#19463]. + */ + for (long i = getLastFileNum(); i >= fileNum; i--) { + /* Do nothing if this file doesn't exist. */ + if (!isFileValid(i)) { + continue; + } + + /* + * If this is the file that truncation starts, invoke + * truncateSingleFile. If the offset is 0, which means the + * FileHeader is also deleted, delete the whole file to avoid a log + * file gap. + */ + if (i == fileNum) { + truncateSingleFile(fileNum, offset); + if (offset != 0) { + continue; + } + } + + boolean deleted = deleteFile(i); + assert deleted : "File " + getFullFileName(i, JE_SUFFIX) + + " not deleted during truncateLog"; + } + } + + /** + * Mark the specified log entries as invisible and obsolete. The entries + * are written here, but are fsync'ed later. If there is any problem or + * exception during the setting, the method will throw an + * EnvironmentFailureException. + * + * These changes are made directly to the file, but recently logged log + * entries may also be resident in the log buffers. The caller must take + * care to call LogManager.flush() before this method, to ensure that all + * entries are on disk. + * + * In addition, we must ensure that after this step, the affected log + * entries will only be read via a FileReader, and will not be faulted in + * by the LogManager. Entries may be present in the log and in the log + * buffers, but only the on disk version is modified by this method. The + * LogManager can read directly from the log buffers and may read the + * incorrect, non-invisible version of the log entry, rather than the + * invisible version from the file. This should not be an issue, because + * invisible log entries should be detached from the in-memory tree before + * they are made invisible. + * + * @param fileNum target file. + * @param lsns The list of LSNs to make invisible, must be sorted in + * ascending order. + */ + public void makeInvisible(long fileNum, List lsns) { + if (lsns.size() == 0) { + return; + } + + /* Open this file. */ + FileHandle handle = null; + try { + + /* + * Note that we are getting a new, non-cached file handle for + * specific use by this method. + */ + handle = makeFileHandle(fileNum, getAppropriateReadWriteMode()); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, + "Opening file " + fileNum + " for invisible marking ", e); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + "Opening file " + fileNum + " for invisible marking ", e); + } + RandomAccessFile file = handle.getFile(); + + /* Set the invisible bit for each entry. */ + try { + for (Long lsn : lsns) { + if (DbLsn.getFileNumber(lsn) != fileNum) { + + /* + * This failure will not invalidate the environment right + * away. But since it causes replication syncup to fail, + * the environment will shutdown, which is the effect we + * want. + */ + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNEXPECTED_STATE, + "LSN of " + DbLsn.getNoFormatString(lsn) + + " did not match file number" + fileNum); + } + + int entryFlagsOffset = (int) + (DbLsn.getFileOffset(lsn) + LogEntryHeader.FLAGS_OFFSET); + file.seek(entryFlagsOffset); + byte flags = file.readByte(); + byte newFlags = LogEntryHeader.makeInvisible(flags); + file.seek(entryFlagsOffset); + file.writeByte(newFlags); + } + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_WRITE, + "Flipping invisibility in file " + fileNum, e); + } finally { + + /* + * Just close the file. Fsyncs will be done later on, in the hope + * that the OS has already synced asynchronously. + */ + try { + file.close(); + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_WRITE, + "Closing after invisibility cloaking: file " + fileNum, e); + } + } + } + + /** + * Fsync this set of log files. Used for replication syncup rollback. + */ + public void force(Set fileNums) { + for (long fileNum : fileNums) { + RandomAccessFile file = null; + try { + FileHandle handle = + makeFileHandle(fileNum, getAppropriateReadWriteMode()); + file = handle.getFile(); + file.getChannel().force(false); + nLogFSyncs.increment(); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + "Invisible fsyncing file " + fileNum, e); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, + "Invisible fsyncing file " + fileNum, e); + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_WRITE, + "Invisible fsyncing file " + fileNum, e); + } finally { + if (file != null) { + try { + file.close(); + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_WRITE, + "Invisible fsyncing file " + fileNum, e); + } + } + } + } + } + + /** + * Set the flag that causes a new file to be written before the next write. + */ + public void forceNewLogFile() { + forceNewFile = true; + } + + /** + * Return the offset of the first log entry after the file header. + * + * @return the size in bytes of the file header log entry. + */ + public static int firstLogEntryOffset() { + return FileHeader.entrySize() + LogEntryHeader.MIN_HEADER_SIZE; + } + + /** + * Return the next available LSN in the log. Note that this is + * unsynchronized, so if it is called outside the log write latch it is + * only valid as an approximation. + */ + public long getNextLsn() { + return nextAvailableLsn; + } + + /** + * Return the last allocated LSN in the log. Note that this is + * unsynchronized, so if it is called outside the log write latch it is + * only valid as an approximation. + */ + public long getLastUsedLsn() { + return lastUsedLsn; + } + + StatGroup loadStats(StatsConfig config) { + nOpenFiles.set(fileCache.size()); + StatGroup copyStats = stats.cloneGroup(config.getClear()); + + return copyStats; + } + + /* + * Unit test support + */ + + /* + * @return ids of files in cache + */ + Set getCacheKeys() { + return fileCache.getCacheKeys(); + } + + /** + * Clear a file out of the file cache regardless of mode type. + */ + private void clearFileCache(long fileNum) + throws IOException, DatabaseException { + + synchronized (fileCache) { + fileCache.remove(fileNum); + } + } + + /* + * The file cache keeps N RandomAccessFile objects cached for file + * access. The cache consists of two parts: a Hashtable that doesn't + * require extra synchronization, for the most common access, and a linked + * list of files to support cache administration. Looking up a file from + * the hash table doesn't require extra latching, but adding or deleting a + * file does. + */ + private static class FileCache { + private final Map fileMap; // Long->file + private final List fileList; // list of file numbers + private final int fileCacheSize; + + FileCache(DbConfigManager configManager) { + + /* + * A fileMap maps the file number to FileHandles (RandomAccessFile, + * latch). The fileList is a list of Longs to determine which files + * to eject out of the file cache if it's too small. + */ + fileMap = new Hashtable(); + fileList = new LinkedList(); + fileCacheSize = + configManager.getInt(EnvironmentParams.LOG_FILE_CACHE_SIZE); + } + + private FileHandle get(Long fileId) { + return fileMap.get(fileId); + } + + private void add(Long fileId, FileHandle fileHandle) + throws IOException, DatabaseException { + + /* + * Does the cache have any room or do we have to evict? Hunt down + * the file list for an unused file. Note that the file cache might + * actually grow past the prescribed size if there is nothing + * evictable. Should we try to shrink the file cache? Presently if + * it grows, it doesn't shrink. + */ + if (fileList.size() >= fileCacheSize) { + Iterator iter = fileList.iterator(); + while (iter.hasNext()) { + Long evictId = iter.next(); + FileHandle evictTarget = fileMap.get(evictId); + + /* + * Try to latch. If latchNoWait returns false, then another + * thread owns this latch. Note that a thread that's trying + * to get a new file handle should never already own the + * latch on another file handle, because these latches are + * meant to be short lived and only held over the i/o out + * of the file. + */ + if (evictTarget.latchNoWait()) { + try { + fileMap.remove(evictId); + iter.remove(); + evictTarget.close(); + } finally { + evictTarget.release(); + } + break; + } + } + } + + /* + * We've done our best to evict. Add the file the the cache now + * whether or not we did evict. + */ + fileList.add(fileId); + fileMap.put(fileId, fileHandle); + } + + /** + * Take any file handles corresponding to this file name out of the + * cache. A file handle could be there twice, in rd only and in r/w + * mode. + */ + private void remove(long fileNum) + throws IOException, DatabaseException { + + Iterator iter = fileList.iterator(); + while (iter.hasNext()) { + Long evictId = iter.next(); + if (evictId.longValue() == fileNum) { + FileHandle evictTarget = fileMap.get(evictId); + try { + evictTarget.latch(); + fileMap.remove(evictId); + iter.remove(); + evictTarget.close(); + } finally { + evictTarget.release(); + } + } + } + } + + private void clear() + throws IOException, DatabaseException { + + Iterator iter = fileMap.values().iterator(); + while (iter.hasNext()) { + FileHandle fileHandle = iter.next(); + try { + fileHandle.latch(); + fileHandle.close(); + iter.remove(); + } finally { + fileHandle.release(); + } + } + fileMap.clear(); + fileList.clear(); + } + + private Set getCacheKeys() { + return fileMap.keySet(); + } + + private int size() { + return fileMap.size(); + } + } + + /** + * The LogEndFileDescriptor is used to write and fsync the end of the log. + * Because the JE log is append only, there is only one logical R/W file + * descriptor for the whole environment. This class actually implements two + * RandomAccessFile instances, one for writing and one for fsyncing, so the + * two types of operations don't block each other. + * + * The write file descriptor is considered the master. Manipulation of + * this class is done under the log write latch. Here's an explanation of + * why the log write latch is sufficient to safeguard all operations. + * + * There are two types of callers who may use this file descriptor: the + * thread that is currently writing to the end of the log and any threads + * that are fsyncing on behalf of the FSyncManager. + * + * The writing thread appends data to the file and fsyncs the file when we + * flip over to a new log file. The file is only instantiated at the point + * that it must do so -- which is either when the first fsync is required + * by JE or when the log file is full and we flip files. Therefore, the + * writing thread has two actions that change this descriptor -- we + * initialize the file descriptor for the given log file at the first write + * to the file, and we close the file descriptor when the log file is full. + * Therefore is a period when there is no log descriptor -- when we have + * not yet written a log buffer into a given log file. + * + * The fsyncing threads ask for the log end file descriptor asynchronously, + * but will never modify it. These threads may arrive at the point when + * the file descriptor is null, and therefore skip their fysnc, but that is + * fine because it means a writing thread already flipped that target file + * and has moved on to the next file. + * + * Time Activity + * 10 thread 1 writes log entry A into file 0x0, issues fsync + * outside of log write latch, yields the processor + * 20 thread 2 writes log entry B, piggybacks off thread 1 + * 30 thread 3 writes log entry C, but no room left in that file, + * so it flips the log, and fsyncs file 0x0, all under the log + * write latch. It nulls out endOfLogRWFile, moves onto file + * 0x1, but doesn't create the file yet. + * 40 thread 1 finally comes along, but endOfLogRWFile is null-- + * no need to fsync in that case, 0x0 got fsynced. + * + * If a write is attempted and an fsync is already in progress, then the + * information pertaining to the data to be written (data, offset, length) + * is saved away in the "queuedWrites" array. When the fsync completes, + * the queuedWrites buffer is emptied. This ensures that writes continue + * to execute on file systems which block all IO calls during an fsync() + * call (e.g. ext3). + */ + class LogEndFileDescriptor { + private RandomAccessFile endOfLogRWFile = null; + private RandomAccessFile endOfLogSyncFile = null; + private final ReentrantLock fsyncFileSynchronizer = new ReentrantLock(); + + /* + * Holds all data for writes which have been queued due to their + * being blocked by an fsync when the original write was attempted. + * The next thread to execute an fsync or write will execute any + * queued writes in this buffer. + * Latch order is fsyncFileSynchronizer, followed by the queuedWrites + * mutex [ synchronized (queuedWrites) {} ]. + * + * Default protection for unit tests. + */ + private final byte[] queuedWrites = + useWriteQueue ? new byte[writeQueueSize] : null; + + /* Current position in the queuedWrites array. */ + private int queuedWritesPosition = 0; + + /* The starting offset on disk of the first byte in queuedWrites. */ + private long qwStartingOffset; + + /* The file number that the queuedWrites are destined for. */ + private long qwFileNum = -1; + + /* For unit tests. */ + void setQueueFileNum(final long qwFileNum) { + this.qwFileNum = qwFileNum; + } + + /* + * Check if fileNo/offset is present in queuedWrites, and if so, fill + * readBuffer with those bytes. We theorize that this is needed + * because HA will be reading at the very end of the log and those + * writes, if enqueued, may no longer be in LogBuffers in the + * LogBufferPool. This might happen in the case of lots of concurrent + * non-synchronous writes (with synchronous commits) which become + * enqueued in the queuedWrites cache, but cycle out of the LBP. In + * general, using synchronous commits with HA is a bad idea. + * + * Default protection for unit tests. + * @return true if more data was available. If so, the read buffer + * will be filled up. + */ + /* private */ + boolean checkWriteCache(final ByteBuffer readBuffer, + final long requestedOffset, + final long fileNum) { + + int pos = readBuffer.position(); + int targetBufSize = readBuffer.limit() - pos; + synchronized (queuedWrites) { + if (qwFileNum != fileNum) { + return false; + } + + if (queuedWritesPosition == 0) { + return false; + } + + if (requestedOffset < qwStartingOffset || + (qwStartingOffset + queuedWritesPosition) <= + requestedOffset) { + return false; + } + + /* We have the bytes available. */ + int nBytesToCopy = (int) + (queuedWritesPosition - + (requestedOffset - qwStartingOffset)); + nBytesToCopy = Math.min(nBytesToCopy, targetBufSize); + readBuffer.put(queuedWrites, + (int) (requestedOffset - qwStartingOffset), + nBytesToCopy); + nBytesReadFromWriteQueue.add(nBytesToCopy); + nReadsFromWriteQueue.increment(); + return true; + } + } + + /* + * Enqueue a blocked write call for later execution by the next thread + * to do either an fsync or write call. fsyncFileSynchronizer is not + * held when this is called. + * + * Default protection for unit tests. + */ + /* private */ + boolean enqueueWrite(final long fileNum, + final byte[] data, + final long destOffset, + final int arrayOffset, + final int size) + throws DatabaseException { + + assert !fsyncFileSynchronizer.isHeldByCurrentThread(); + + for (int i = 0; i < 2; i++) { + try { + enqueueWrite1(fileNum, data, destOffset, + arrayOffset, size); + return true; + } catch (RelatchRequiredException RE) { + dequeuePendingWrites(); + } + } + + /* Give up after two tries. */ + nWriteQueueOverflowFailures.increment(); + return false; + } + + private void enqueueWrite1(final long fileNum, + final byte[] data, + final long destOffset, + final int arrayOffset, + final int size) + throws RelatchRequiredException, DatabaseException { + + /* + * The queuedWrites queue only ever holds writes for a single file. + * + * This check is safe because qwFileNum can only ever change inside + * enqueueWrite which can only ever be called while the Log Write + * Latch is held. + * + * NOTE: We believe the commented out second condition is safe + * to add to the code if we ever see contention with this call to + * dequeuePendingWrites against an fsync. Here is the reasoning: + * + * queuedWritesPosition is changed in two places: (1) enqueueWrite1 + * where it is incremented, and (2) dequeuePendingWrites1 where it + * is zeroed. Both of these places are proected by the queuedWrites + * mutex. The zero'ing (2) will only make the dequeue unnecessary + * so the extra commented out check below is safe since it will + * only result in eliminating an unnecessary dequeuePendingWrites + * call. + */ + if (qwFileNum < fileNum /* && queuedWritesPosition > 0 */) { + dequeuePendingWrites(); + qwFileNum = fileNum; + } + + synchronized (queuedWrites) { + boolean overflow = + (writeQueueSize - queuedWritesPosition) < size; + if (overflow) { + nWriteQueueOverflow.increment(); + + /* + * Since we can't write this "write call" into the + * ByteBuffer without overflowing, we will try to dequeue + * all current writes in the buffer. But that requires + * holding the fsyncFileSynchronizer latch first which + * would be latching out of order relative to the + * queuedWrites mutex. + */ + throw RelatchRequiredException.relatchRequiredException; + } + + assert qwFileNum == fileNum; + int curPos = queuedWritesPosition; + if (curPos == 0) { + + /* + * This is the first entry in queue. Set qwStartingOffset. + */ + qwStartingOffset = destOffset; + } + + if (curPos + qwStartingOffset != destOffset) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "non-consecutive writes queued. " + + "qwPos=" + queuedWritesPosition + + " write destOffset=" + destOffset); + } + + System.arraycopy(data, arrayOffset, + queuedWrites, queuedWritesPosition, + size); + queuedWritesPosition += size; + } + } + + /** + * Returns whether anything is in the write queue. + */ + boolean hasQueuedWrites() { + return queuedWritesPosition > 0; + } + + /* + * Execute pending writes. Assumes fsyncFileSynchronizer is not held. + */ + private void dequeuePendingWrites() + throws DatabaseException { + + assert !fsyncFileSynchronizer.isHeldByCurrentThread(); + + fsyncFileSynchronizer.lock(); + try { + dequeuePendingWrites1(); + } finally { + fsyncFileSynchronizer.unlock(); + } + } + + /* + * Execute pending writes. Assumes fsyncFileSynchronizer is held. + */ + private void dequeuePendingWrites1() + throws DatabaseException { + + assert fsyncFileSynchronizer.isHeldByCurrentThread(); + + try { + synchronized (queuedWrites) { + /* Nothing to see here. Move along. */ + if (queuedWritesPosition == 0) { + return; + } + + RandomAccessFile file = getWritableFile(qwFileNum, false); + synchronized (file) { + file.seek(qwStartingOffset); + file.write(queuedWrites, 0, queuedWritesPosition); + nBytesWrittenFromWriteQueue.add(queuedWritesPosition); + nWritesFromWriteQueue.increment(); + if (VERIFY_CHECKSUMS) { + file.seek(qwStartingOffset); + file.read(queuedWrites, 0, queuedWritesPosition); + ByteBuffer bb = + ByteBuffer.allocate(queuedWritesPosition); + bb.put(queuedWrites, 0, queuedWritesPosition); + bb.position(0); + verifyChecksums + (bb, qwStartingOffset, "post-write"); + } + } + + /* We flushed the queue. Reset the buffer. */ + queuedWritesPosition = 0; + } + } catch (IOException e) { + throw new LogWriteException + (envImpl, "IOException during fsync", e); + } + } + + /** + * getWritableFile must be called under the log write latch. + * + * Typically, endOfLogRWFile is not null. Hence the + * fsyncFileSynchronizer does not need to be locked (which would + * block the write queue from operating. + */ + private RandomAccessFile getWritableFile(final long fileNumber, + final boolean doLock) { + try { + if (endOfLogRWFile == null) { + + /* + * We need to make a file descriptor for the end of the + * log. This is guaranteed to be called under the log + * write latch. + * + * Protect both the RWFile and SyncFile under this lock, + * to avoid a race for creating the file and writing the + * header. [#20732] + */ + if (doLock) { + fsyncFileSynchronizer.lock(); + } + try { + endOfLogRWFile = + makeFileHandle(fileNumber, + getAppropriateReadWriteMode()). + getFile(); + endOfLogSyncFile = + makeFileHandle(fileNumber, + getAppropriateReadWriteMode()). + getFile(); + } finally { + if (doLock) { + fsyncFileSynchronizer.unlock(); + } + } + } + + return endOfLogRWFile; + } catch (Exception e) { + + /* + * If we can't get a write channel, we need to invalidate the + * environment. + */ + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, e); + } + } + + /** + * FSync the log file that makes up the end of the log. + */ + private void force() + throws DatabaseException, IOException { + + /* + * Get a local copy of the end of the log file descriptor, it could + * change. No need to latch, no harm done if we get an old file + * descriptor, because we forcibly fsync under the log write latch + * when we switch files. + * + * If there is no current end file descriptor, we know that the log + * file has flipped to a new file since the fsync was issued. + */ + fsyncFileSynchronizer.lock(); + try { + + /* Flush any queued writes. */ + if (useWriteQueue) { + dequeuePendingWrites1(); + } + + RandomAccessFile file = endOfLogSyncFile; + if (file != null) { + bumpWriteCount("fsync"); + FileChannel ch = file.getChannel(); + + long start = System.currentTimeMillis(); + try { + ch.force(false); + } catch (ClosedChannelException e) { + + /* + * The channel should never be closed. It may be closed + * because of an interrupt received by another thread. + * See SR [#10463]. + */ + throw new ThreadInterruptedException + (envImpl, + "Channel closed, may be due to thread interrupt", + e); + } + final long fSyncMs = System.currentTimeMillis() - start; + + nLogFSyncs.increment(); + nFSyncTime.add(fSyncMs); + + if (nFSyncMaxTime.setMax(fSyncMs) && + fSyncTimeLimit != 0 && + fSyncMs > fSyncTimeLimit) { + + LoggerUtils.warning( + envImpl.getLogger(), envImpl, + String.format( + "FSync time of %d ms exceeds limit (%d ms)", + fSyncMs, fSyncTimeLimit)); + } + + assert EnvironmentImpl.maybeForceYield(); + } + + /* Flush any writes which were queued while fsync'ing. */ + if (useWriteQueue) { + dequeuePendingWrites1(); + } + } finally { + fsyncFileSynchronizer.unlock(); + } + } + + /** + * Close the end of the log file descriptor. Use atomic assignment to + * ensure that we won't force and close on the same descriptor. + */ + void close() + throws IOException { + + /* + * Protect both the RWFile and SyncFile under this lock out of + * paranoia, although we don't expect two threads to call close + * concurrently. [#20732] + */ + fsyncFileSynchronizer.lock(); + try { + IOException firstException = null; + if (endOfLogRWFile != null) { + RandomAccessFile file = endOfLogRWFile; + + /* + * Null out so that other threads know endOfLogRWFile is no + * longer available. + */ + endOfLogRWFile = null; + try { + file.close(); + } catch (IOException e) { + /* Save this exception, so we can try second close. */ + firstException = e; + } + } + if (endOfLogSyncFile != null) { + RandomAccessFile file = endOfLogSyncFile; + + /* + * Null out so that other threads know endOfLogSyncFile is + * no longer available. + */ + endOfLogSyncFile = null; + file.close(); + } + + if (firstException != null) { + throw firstException; + } + } finally { + fsyncFileSynchronizer.unlock(); + } + } + } + + /* + * Generate IOExceptions for testing. + */ + + /* Testing switch. public so others can read the value. */ + public static final boolean LOGWRITE_EXCEPTION_TESTING; + private static String RRET_PROPERTY_NAME = "je.logwrite.exception.testing"; + + static { + LOGWRITE_EXCEPTION_TESTING = + (System.getProperty(RRET_PROPERTY_NAME) != null); + } + + /* Max write counter value. */ + private static final int LOGWRITE_EXCEPTION_MAX = 100; + /* Current write counter value. */ + private int logWriteExceptionCounter = 0; + /* Whether an exception has been thrown. */ + private boolean logWriteExceptionThrown = false; + /* Random number generator. */ + private Random logWriteExceptionRandom = null; + + private void generateLogWriteException(RandomAccessFile file, + ByteBuffer data, + long destOffset, + long fileNum) + throws DatabaseException, IOException { + + if (logWriteExceptionThrown) { + (new Exception("Write after LogWriteException")). + printStackTrace(); + } + logWriteExceptionCounter += 1; + if (logWriteExceptionCounter >= LOGWRITE_EXCEPTION_MAX) { + logWriteExceptionCounter = 0; + } + if (logWriteExceptionRandom == null) { + logWriteExceptionRandom = new Random(System.currentTimeMillis()); + } + if (logWriteExceptionCounter == + logWriteExceptionRandom.nextInt(LOGWRITE_EXCEPTION_MAX)) { + int len = logWriteExceptionRandom.nextInt(data.remaining()); + if (len > 0) { + byte[] a = new byte[len]; + data.get(a, 0, len); + ByteBuffer buf = ByteBuffer.wrap(a); + writeToFile(file, buf, destOffset, fileNum, + false /*flushRequired*/); + } + logWriteExceptionThrown = true; + throw new IOException("Randomly generated for testing"); + } + } + + /** + * The factory interface for creating RandomAccessFiles. For production + * use, the default factory is always used and a DefaultRandomAccessFile is + * always created. For testing, the factory can be overridden to return a + * subclass of DefaultRandomAccessFile that overrides methods and injects + * faults, for example. + */ + public interface FileFactory { + + /** + * @param envHome can be used to distinguish environments in a test + * program that opens multiple environments. Not for production use. + * + * @param fullName the full file name to be passed to the + * RandomAccessFile constructor. + * + * @param mode the file mode to be passed to the RandomAccessFile + * constructor. + */ + RandomAccessFile createFile(File envHome, String fullName, String mode) + throws FileNotFoundException; + } + + /** + * The RandomAccessFile for production use. Tests that override the + * default FileFactory should return a RandomAccessFile that subclasses + * this class to inherit workarounds such as the overridden length method. + */ + public static class DefaultRandomAccessFile extends RandomAccessFile { + + public DefaultRandomAccessFile(String fullName, String mode) + throws FileNotFoundException { + + super(fullName, mode); + } + + /** + * RandomAccessFile.length() is not thread safe and side-effects the + * file pointer if interrupted in the middle. It is synchronized here + * to work around that problem. + */ + @Override + public synchronized long length() + throws IOException { + + return super.length(); + } + } + + /** + * The factory instance used to create RandomAccessFiles. This field is + * intentionally public and non-static so it may be set by tests. See + * FileFactory. + */ + public static FileFactory fileFactory = new FileFactory() { + + public RandomAccessFile createFile(File envHome, + String fullName, + String mode) + throws FileNotFoundException { + + return new DefaultRandomAccessFile(fullName, mode); + } + }; +} diff --git a/src/com/sleepycat/je/log/FileReader.java b/src/com/sleepycat/je/log/FileReader.java new file mode 100644 index 0000000..9935a6b --- /dev/null +++ b/src/com/sleepycat/je/log/FileReader.java @@ -0,0 +1,1274 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * A FileReader is an abstract class that traverses the log files, reading-in + * chunks of the file at a time. The class provides a iterator interface, via + * its readNextEntry() method. Concrete subclasses must (a) provide the public + * methods that allow their users to examine the contents of the logrec that + * the iterator is currently positioned at, and (b) implement the (non-public) + * isTarget() and processEntry() methods that may filter-out logrecs that do + * not need to be seen by the caller of readNextEntry(), or perform specific + * actions before each logrec is "returned" to the caller of readNextEntry(). + */ +public abstract class FileReader { + + protected final EnvironmentImpl envImpl; + + protected final FileManager fileManager; + + /* + * The ReadWindow is a data buffer that acts as a sliding window view + * of the log. It is positioned against the log and filled up with data. + */ + protected final ReadWindow window; + + /* + * For piecing together a log entry that is read from multiple read buffer + * calls. + */ + private ByteBuffer saveBuffer; + + private final boolean singleFile;// if true, do not read across files + + /* + * true if at end of the log. + * TODO: assess whether this is redundant with the EOFException, and + * could be streamlined. + */ + protected boolean eof; + + /* if true, we're reading forward; otherwise backwards */ + protected final boolean forward; + + /* num entries we've seen */ + private int nRead; + + /* The log entry header for the entry that was just read. */ + protected LogEntryHeader currentEntryHeader; + + /* + * The log entry before the current entry. In general, + * currentEntryPrevOffset is the same as + * currentEntryHeader.getPrevOffset(), but it's initialized and used before + * a header is read. Only used for backward scanning. + */ + protected long currentEntryPrevOffset; + + /* + * nextEntryOffset is used to set the currentEntryOffset after we've read + * an entry. Only used for forward scanning. + */ + protected long currentEntryOffset; + protected long nextEntryOffset; + + protected long startLsn; // We start reading from this LSN. + private final long finishLsn; // If going backwards, read up to this LSN. + + /* For checking checksum on the read. */ + protected ChecksumValidator cksumValidator; + + private boolean doChecksumOnRead; // Validate checksums + private boolean alwaysValidateChecksum; // Validate for all entry types + + protected final Logger logger; + + /** + * A FileReader just needs to know what size chunks to read in. + * @param endOfFileLsn indicates the end of the log file + */ + public FileReader(EnvironmentImpl envImpl, + int readBufferSize, + boolean forward, + long startLsn, + Long singleFileNumber, + long endOfFileLsn, + long finishLsn) + throws DatabaseException { + + this.envImpl = envImpl; + this.fileManager = envImpl.getFileManager(); + this.singleFile = (singleFileNumber != null); + this.forward = forward; + + this.doChecksumOnRead = envImpl.getLogManager().getChecksumOnRead(); + if (this.doChecksumOnRead) { + cksumValidator = new ChecksumValidator(); + } + + window = makeWindow(readBufferSize); + saveBuffer = ByteBuffer.allocate(readBufferSize); + + /* stats */ + nRead = 0; + + /* Determine the starting position. */ + this.startLsn = startLsn; + this.finishLsn = finishLsn; + + logger = envImpl.getLogger(); + + initStartingPosition(endOfFileLsn, singleFileNumber); + } + + /** + * May be overridden by other FileReaders. + * @throws DatabaseException + */ + protected ReadWindow makeWindow(int readBufferSize) + throws DatabaseException { + + return new ReadWindow(readBufferSize, envImpl); + } + + /** + * Helper for determining the starting position and opening up a file at + * the desired location. + */ + protected void initStartingPosition(long endOfFileLsn, + Long ignoreSingleFileNumber) { + eof = false; + if (forward) { + + /* + * Start off at the startLsn. If that's null, start at the + * beginning of the log. If there are no log files, set eof. + */ + if (startLsn != DbLsn.NULL_LSN) { + window.initAtFileStart(startLsn); + } else { + Long firstNum = fileManager.getFirstFileNum(); + if (firstNum == null) { + eof = true; + } else { + window.initAtFileStart(DbLsn.makeLsn(firstNum, 0)); + } + } + + /* + * After we read the first entry, the currentEntry will point here. + */ + nextEntryOffset = window.getEndOffset(); + } else { + + /* + * Make the read buffer look like it's positioned off the end of + * the file. Initialize the first LSN we want to read. When + * traversing the log backwards, we always start at the very end. + */ + assert startLsn != DbLsn.NULL_LSN; + window.initAtFileStart(endOfFileLsn); + + /* + * currentEntryPrevOffset points to the entry we want to start out + * reading when going backwards. If it's 0, the entry we want to + * read is in a different file. + */ + if (DbLsn.getFileNumber(startLsn) == + DbLsn.getFileNumber(endOfFileLsn)) { + currentEntryPrevOffset = DbLsn.getFileOffset(startLsn); + } else { + currentEntryPrevOffset = 0; + } + currentEntryOffset = DbLsn.getFileOffset(endOfFileLsn); + } + } + + /** + * Whether to always validate the checksum, even for non-target entries. + */ + public void setAlwaysValidateChecksum(boolean validate) { + alwaysValidateChecksum = validate; + } + + /** + * @return the number of entries processed by this reader. + */ + public int getNumRead() { + return nRead; + } + + public long getNRepeatIteratorReads() { + return window.getNRepeatIteratorReads(); + } + + /** + * Get LSN of the last entry read. + */ + public long getLastLsn() { + return DbLsn.makeLsn(window.currentFileNum(), currentEntryOffset); + } + + /** + * Returns the total size (including header) of the last entry read. + */ + public int getLastEntrySize() { + return currentEntryHeader.getEntrySize(); + } + + /** + * Scans the log files until either it has reached the end of the log or + * has hit an invalid portion. + * + * @return true if an element has been read, false at end-of-log. + * + * @throws EnvironmentFailureException if a ChecksumException, + * FileNotFoundException, or another internal problem occurs. + */ + public boolean readNextEntry() { + try { + return readNextEntryAllowExceptions(); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } + } + + /** + * Variant of readNextEntry that throws FileNotFoundException and + * ChecksumException, rather than wrapping them in an + * EnvironmentFailureException and invalidating the enviornment. This + * allows users of this class (see cleaner.FileProcessor), and subclasses + * that override readNextEntry (see ScavengerFileReader and + * LastFileReader), to handle these exceptions specially. + */ + public final boolean readNextEntryAllowExceptions() + throws FileNotFoundException, ChecksumException { + + boolean foundEntry = false; + long savedCurrentEntryOffset = currentEntryOffset; + long savedNextEntryOffset = nextEntryOffset; + + try { + while ((!eof) && (!foundEntry)) { + + /* Read the invariant portion of the next header. */ + getLogEntryInReadBuffer(); + ByteBuffer dataBuffer = + readData(LogEntryHeader.MIN_HEADER_SIZE, + true); // collectData + + readBasicHeader(dataBuffer); + + boolean isTarget; + boolean isChecksumTarget; + boolean collectData; + + if (currentEntryHeader.isVariableLength()) { + + /* + * For all variable length entries, init the checksum w/the + * invariant portion of the header, before we know whether + * the entry is a target for this reader. This has + * to be done before we read the variable portion of the + * header, because readData() only guarantees that it + * returns a dataBuffer that contains the next bytes that + * are needed, and has no guarantee that it holds any bytes + * that were previously read. The act of calling + * readData() to obtain the optional portion may reset the + * dataBuffer, and nudge the invariant part of the header + * out of the buffer returned by readData() + */ + startChecksum(dataBuffer); + + int optionalPortionLen = + currentEntryHeader.getVariablePortionSize(); + + /* Load the optional part of the header into a buffer. */ + dataBuffer = readData(optionalPortionLen, true); + + /* + * Add to checksum while the buffer is positioned at + * the start of the new bytes. + */ + addToChecksum(dataBuffer, optionalPortionLen); + + /* Now read the optional bytes. */ + currentEntryHeader.readVariablePortion(dataBuffer); + } + + /* + * We've read the header of the next logrec. Move up our + * offsets if we're moving forward. If we're moving + * backwards, we set our offset before we read the header, + * because we knew where the entry started. + */ + if (forward) { + currentEntryOffset = nextEntryOffset; + nextEntryOffset += + currentEntryHeader.getSize() + // header size + currentEntryHeader.getItemSize(); // item size + } + + try { + isTarget = isTargetEntry(); + + isChecksumTarget = (isTarget || alwaysValidateChecksum); + + if (!currentEntryHeader.isVariableLength()) { + startChecksum(dataBuffer, isChecksumTarget); + } + + collectData = + (isChecksumTarget && doChecksumOnRead) || isTarget; + + /* + * Read in the body of the next entry. Note that even if + * this isn't a targeted entry, we have to move the buffer + * position along. + */ + dataBuffer = readData(currentEntryHeader.getItemSize(), + collectData); + } catch (Throwable e) { + if (forward) { + currentEntryOffset = savedCurrentEntryOffset; + nextEntryOffset = savedNextEntryOffset; + } + throw e; + } + + /* Validate the log entry checksum. */ + validateChecksum(dataBuffer, isChecksumTarget); + + if (isTarget) { + + /* + * For a target entry, call the subclass reader's + * processEntry method to do whatever we need with the + * entry. It returns true if this entry is one that should + * be returned. Note that some entries, although targeted + * and read, are not returned. + */ + if (processEntry(dataBuffer)) { + foundEntry = true; + nRead++; + } + } else if (collectData) { + + /* + * For a non-target entry that was validated, the buffer is + * positioned at the start of the entry; skip over it. + */ + skipEntry(dataBuffer); + } + } + } catch (EOFException e) { + eof = true; + } catch (DatabaseException e) { + eof = true; + /* Report on error. */ + reportProblem(e); + throw e; + } + + return foundEntry; + } + + /** + * May be called by processEntry when it determines that the entry does not + * need to be read/de-serialized. + */ + protected void skipEntry(ByteBuffer entryBuffer) { + entryBuffer.position( + entryBuffer.position() + + currentEntryHeader.getItemSize()); + } + + private void reportProblem(Exception e) { + + StringBuilder sb = new StringBuilder(); + sb.append("Halted log file reading at file 0x"). + append(Long.toHexString(window.currentFileNum())). + append(" offset 0x"). + append(Long.toHexString(nextEntryOffset)). + append(" offset(decimal)="). + append(nextEntryOffset). + append(" prev=0x"). + append(Long.toHexString(currentEntryPrevOffset)); + + if (currentEntryHeader != null) { + LogEntryType problemType = + LogEntryType.findType(currentEntryHeader.getType()); + sb.append(":\nentry="). + append(problemType). + append("type="). + append(currentEntryHeader.getType()). + append(",version="). + append(currentEntryHeader.getVersion()). + append(")\nprev=0x"). + append(Long.toHexString(currentEntryPrevOffset)). + append("\nsize="). + append(currentEntryHeader.getItemSize()). + append("\nNext entry should be at 0x"). + append(Long.toHexString(nextEntryOffset + + currentEntryHeader.getSize() + + currentEntryHeader.getItemSize())); + } + + LoggerUtils.traceAndLogException + (envImpl, "FileReader", "readNextEntry", sb.toString(), e); + } + + /** + * Make sure that the start of the target log entry is in the header. + */ + private void getLogEntryInReadBuffer() + throws ChecksumException, + EOFException, + FileNotFoundException, + DatabaseException { + + /* + * If we're going forward, because we read every byte sequentially, + * we're always sure the read buffer is positioned at the right spot. + * If we go backwards, we need to jump the buffer position. These + * methods may be overridden by subclasses. + */ + if (forward) { + setForwardPosition(); + } else { + setBackwardPosition(); + } + } + + /** + * Ensure that the next target is in the window. The default behavior is + * that the next target is the next, following entry, so we can assume that + * it's in the window. All we have to do is to check if we've gone past + * the specified end point. + * @throws DatabaseException + * @throws FileNotFoundException + * @throws ChecksumException + */ + protected void setForwardPosition() + throws EOFException, + DatabaseException, + ChecksumException, + FileNotFoundException { + + if (finishLsn != DbLsn.NULL_LSN) { + /* The next log entry has passed the end LSN. */ + long nextLsn = DbLsn.makeLsn(window.currentFileNum(), + nextEntryOffset); + if (DbLsn.compareTo(nextLsn, finishLsn) >= 0) { + throw new EOFException(); + } + } + } + + /** + * Ensure that the next target is in the window. The default behavior is + * that the next target is the next previous entry. + * @throws DatabaseException + */ + protected void setBackwardPosition() + throws ChecksumException, + FileNotFoundException, + EOFException, + DatabaseException { + + /* + * currentEntryPrevOffset is the entry before the current entry. + * currentEntryOffset is the entry we just read (or the end of the + * file if we're starting out. + */ + if ((currentEntryPrevOffset != 0) && + window.containsOffset(currentEntryPrevOffset)) { + + /* The next log entry has passed the start LSN. */ + long nextLsn = DbLsn.makeLsn(window.currentFileNum(), + currentEntryPrevOffset); + if (finishLsn != DbLsn.NULL_LSN) { + if (DbLsn.compareTo(nextLsn, finishLsn) == -1) { + throw new EOFException("finish=" + + DbLsn.getNoFormatString(finishLsn) + + "next=" + + DbLsn.getNoFormatString(nextLsn)); + } + } + + /* This log entry starts in this buffer, just reposition. */ + window.positionBuffer(currentEntryPrevOffset); + } else { + + /* + * The start of the log entry is not in this read buffer so + * we must fill the buffer again. + * + * 1) The target log entry is in a different file from the + * current window's file. Move the window to the previous + * file and start the read from the target LSN. + * + * 2) The target log entry is the same file but the log entry + * is larger than the read chunk size. Start the next read + * buffer from the target LSN. It's going to take multiple + * reads to get the log entry, and we might as well get as + * much as possible. + * + * 3) In the same file, and the log entry fits within one + * read buffer. Try to position the next buffer chunk so the + * target entry is held within the buffer, all the way at the + * end. That way, since we're reading backwards, there will be + * more buffered data available for following reads. + */ + long nextFile; + long nextWindowStart; + long nextTarget; + + if (currentEntryPrevOffset == 0) { + /* Case 1: Go to another file. */ + currentEntryPrevOffset = fileManager.getFileHeaderPrevOffset + (window.currentFileNum()); + + Long prevFileNum = + fileManager.getFollowingFileNum(window.currentFileNum(), + false); + if (prevFileNum == null) { + throw new EOFException("No file following " + + window.currentFileNum()); + } + + /* + * Check finishLSN before proceeding, in case we should stop + * the search before attempting to set the file reader to a + * position in the previous file. In [#22407] we threw a + * spurious EFE complaining that we cannot read backwards over + * a cleaned file because the previous file had been cleaned + * away. + */ + if (finishLsn != DbLsn.NULL_LSN && + prevFileNum < DbLsn.getFileNumber(finishLsn)) { + throw new EOFException( + "finish=" + DbLsn.getNoFormatString(finishLsn) + + " nextFile=0x" + Long.toHexString(prevFileNum)); + } + + if (window.currentFileNum() - prevFileNum.longValue() != 1) { + handleGapInBackwardsScan(prevFileNum); + } + + nextFile = prevFileNum; + nextWindowStart = currentEntryPrevOffset; + nextTarget = currentEntryPrevOffset; + } else if ((currentEntryOffset - currentEntryPrevOffset) > + window.capacity()) { + + /* + * Case 2: The entry is in the same file, but is bigger + * than one buffer. Position it at the front of the buffer. + */ + nextFile = window.currentFileNum(); + nextWindowStart = currentEntryPrevOffset; + nextTarget = currentEntryPrevOffset; + } else { + + /* + * Case 3: In same file, but not in this buffer. The target + * entry will fit in one buffer. + */ + nextFile = window.currentFileNum(); + long newPosition = currentEntryOffset - + window.capacity(); + nextWindowStart = (newPosition < 0) ? 0 : newPosition; + nextTarget = currentEntryPrevOffset; + } + + /* The next log entry has passed the start LSN. */ + long nextLsn = DbLsn.makeLsn(nextFile, + currentEntryPrevOffset); + if (finishLsn != DbLsn.NULL_LSN) { + if (DbLsn.compareTo(nextLsn, finishLsn) == -1) { + throw new EOFException("finish=" + + DbLsn.getNoFormatString(finishLsn) + + " next=" + + DbLsn.getNoFormatString(nextLsn)); + } + } + + window.slideAndFill + (nextFile, nextWindowStart, nextTarget, forward); + } + + /* The current entry will start at this offset. */ + currentEntryOffset = currentEntryPrevOffset; + } + + /** + * Read the basic log entry header, leaving the buffer mark at the + * beginning of the checksummed header data. + */ + private void readBasicHeader(ByteBuffer dataBuffer) + throws ChecksumException, DatabaseException { + + /* Read the header for this entry. */ + currentEntryHeader = new LogEntryHeader( + dataBuffer, window.logVersion, window.getCurrentLsn()); + + /* + * currentEntryPrevOffset is a separate field, and is not obtained + * directly from the currentEntryHeader, because it is initialized and + * used before any log entry was read. + */ + currentEntryPrevOffset = currentEntryHeader.getPrevOffset(); + } + + /** + * Reset the checksum validator and add the new header bytes. Assumes that + * the data buffer is positioned just past the end of the invariant + * portion of the log entry header. + * @throws DatabaseException + */ + private void startChecksum(ByteBuffer dataBuffer) + throws ChecksumException { + + startChecksum(dataBuffer, true /* isChecksumTarget */); + } + + private void startChecksum(ByteBuffer dataBuffer, + boolean isChecksumTarget) + throws ChecksumException { + + if (!doChecksumOnRead) { + return; + } + + if (!isChecksumTarget) { + return; + } + + /* Clear out any previous data. */ + cksumValidator.reset(); + + int originalPosition = dataBuffer.position(); + if (currentEntryHeader.isInvisible()) { + + /* + * Turn off invisibility so that the checksum will succeed. When + * entries are made invisible, the checksum is not adjusted. Note + * that the dataBuffer can leave the invisible bit transformed, + * because the header has already been initialized, and this data + * will never be read again. + */ + LogEntryHeader.turnOffInvisible(dataBuffer, originalPosition - + LogEntryHeader.MIN_HEADER_SIZE); + } + + /* Position the buffer at the start of the data, after the checksum. */ + int headerSizeMinusChecksum = + currentEntryHeader.getInvariantSizeMinusChecksum(); + int entryTypeStart = originalPosition - headerSizeMinusChecksum; + dataBuffer.position(entryTypeStart); + + /* Load the validate with the header bytes. */ + cksumValidator.update(dataBuffer, headerSizeMinusChecksum); + + /* Move the data buffer back to the original position. */ + dataBuffer.position(originalPosition); + } + + private void addToChecksum(ByteBuffer dataBuffer, int length) + throws ChecksumException { + + if (!doChecksumOnRead) { + return; + } + + cksumValidator.update(dataBuffer, length); + } + + /** + * Add the entry bytes to the checksum and check the value. This method + * must be called with the buffer positioned at the start of the entry. + */ + private void validateChecksum(ByteBuffer dataBuffer, + boolean isChecksumTarget) + throws ChecksumException { + + if (!doChecksumOnRead) { + return; + } + + if (!isChecksumTarget) { + return; + } + + cksumValidator.update(dataBuffer, currentEntryHeader.getItemSize()); + cksumValidator.validate(currentEntryHeader.getChecksum(), + window.currentFileNum(), + currentEntryOffset); + } + + /** + * Try to read a specified number of bytes. + * @param amountToRead is the number of bytes we need + * @param collectData is true if we need to actually look at the data. + * If false, we know we're skipping this entry, and all we need to + * do is to count until we get to the right spot. + * @return a byte buffer positioned at the head of the desired portion, + * or null if we reached eof. + */ + private ByteBuffer readData(int amountToRead, boolean collectData) + throws ChecksumException, + EOFException, + FileNotFoundException, + DatabaseException { + + int alreadyRead = 0; + ByteBuffer completeBuffer = null; + saveBuffer.clear(); + + while ((alreadyRead < amountToRead) && !eof) { + + int bytesNeeded = amountToRead - alreadyRead; + if (window.hasRemaining()) { + + /* There's data in the window, process it. */ + if (collectData) { + + /* + * Save data in a buffer for processing. + */ + if ((alreadyRead > 0) || + (window.remaining() < bytesNeeded)) { + + /* We need to piece an entry together. */ + copyToSaveBuffer(bytesNeeded); + alreadyRead = saveBuffer.position(); + completeBuffer = saveBuffer; + } else { + + /* A complete entry is available in this buffer. */ + completeBuffer = window.getBuffer(); + alreadyRead = amountToRead; + } + } else { + + /* + * We're not processing the data, so need to save it. just + * move buffer positions. + */ + int positionIncrement = + (window.remaining() > bytesNeeded) ? + bytesNeeded : window.remaining(); + + alreadyRead += positionIncrement; + window.incrementBufferPosition(positionIncrement); + completeBuffer = window.getBuffer(); + } + } else { + + /* + * Look for more data. + */ + if (window.fillNext(singleFile, bytesNeeded)) { + /* This call to fillNext slid the window to a new file. */ + nextEntryOffset = 0; + } + } + } + + /* Flip the save buffer just in case we've been accumulating in it. */ + saveBuffer.flip(); + + return completeBuffer; + } + + /* Try to skip over a specified number of bytes. */ + public void skipData(int amountToSkip) + throws ChecksumException, + EOFException, + FileNotFoundException, + DatabaseException { + + try { + readData(amountToSkip, false); + } catch (DatabaseException e) { + reportProblem(e); + throw e; + } + } + + /** + * Copy the required number of bytes into the save buffer. + */ + private void copyToSaveBuffer(int bytesNeeded) { + /* How much can we get from this current read buffer? */ + int bytesFromThisBuffer; + + if (bytesNeeded <= window.remaining()) { + bytesFromThisBuffer = bytesNeeded; + } else { + bytesFromThisBuffer = window.remaining(); + } + + /* Gather it all into this save buffer. */ + ByteBuffer temp; + + /* Make sure the save buffer is big enough. */ + if (saveBuffer.capacity() - saveBuffer.position() < + bytesFromThisBuffer) { + /* Grow the save buffer. */ + temp = ByteBuffer.allocate(saveBuffer.capacity() + + bytesFromThisBuffer); + saveBuffer.flip(); + temp.put(saveBuffer); + saveBuffer = temp; + } + + /* + * Bulk copy only the required section from the read buffer into the + * save buffer. We need from readBuffer.position() to + * readBuffer.position() + bytesFromThisBuffer + */ + temp = window.getBuffer().slice(); + temp.limit(bytesFromThisBuffer); + saveBuffer.put(temp); + window.incrementBufferPosition(bytesFromThisBuffer); + } + + /** + * Returns the number of reads since the last time this method was called. + */ + public int getAndResetNReads() { + return window.getAndResetNReads(); + } + + /** + * This method is called by readNextEntry() after the header of the current + * logrec has been de-serialized, but not the body. Based on header info + * only, it may perform some actions and then decide whether the rest of + * the logrec should be de-serialized or just skipped. + * + * @return true if this reader should process the current logrec further, + * via the processEntry() method. A logrec must be passed to processEntry + * if the full logrec (not just the header) must be de-serialized for + * further processing. Return false if no further processing is needed, + * in which case the current logrec will be skipped (i.e, not returned + * to the caller of readNextEntry(). + * + * @throws DatabaseException from subclasses. + */ + protected boolean isTargetEntry() + throws DatabaseException { + + return true; + } + + /** + * Each file reader implements this method to process the entry data. + * + * @param entryBuffer A ByteBuffer that the logrec data and is positioned + * at the start of the logrec body (i.e., just after the logrec header). + * + * @return true if this entry should be returned to the caller of + * readNextEntry(). + */ + protected abstract boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException; + + /** + * Never seen by user, used to indicate that the file reader should stop. + */ + @SuppressWarnings("serial") + public static class EOFException extends Exception { + public EOFException() { + super(); + } + + /* + * @param message The message is used to hold debugging + * information. + */ + public EOFException(String message) { + super(message); + } + } + + /** + * @return true if the current entry is part of replication stream. + */ + public boolean entryIsReplicated() { + + if (currentEntryHeader == null) { + throw EnvironmentFailureException.unexpectedState + ("entryIsReplicated should not be used before reader is " + + "initialized"); + } + return currentEntryHeader.getReplicated(); + } + + /** + * TBW + */ + protected void handleGapInBackwardsScan(long prevFileNum) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_INTEGRITY, + "Cannot read backward over cleaned file" + + " from 0x" + Long.toHexString(window.currentFileNum()) + + " to 0x" + Long.toHexString(prevFileNum)); + } + + /** + * A ReadWindow provides a swath of data read from the JE log. + */ + protected static class ReadWindow { + + /* + * fileNum, startOffset and endOffset indicate how the read buffer maps + * to the JE log. For example, if the read buffer size is 200 and the + * read buffer was filled from file 9, starting at byte 100, then: + * fileNum = 9 + * startOffset = 100 + * endOffset = 300 + * Note that the end point is not inclusive; endOffset is > the + * readBuffer's end. + */ + private long fileNum; // file number we're pointing to + private int logVersion; // log version for fileNum/readBuffer + protected long startOffset;// file offset that maps to buf start + protected long endOffset; // file offset that maps to buf end + protected ByteBuffer readBuffer; // buffer for reading from the file + + /* read buffer can't grow larger than this */ + private final int maxReadBufferSize; + + protected final EnvironmentImpl envImpl; + protected final FileManager fileManager; + + /* + * The number of times we've tried to read in a log entry that was too + * large for the read buffer. + */ + private long nRepeatIteratorReads; + + /* Number of reads since the last time getAndResetNReads was called. */ + private int nReadOperations; + + protected ReadWindow(int readBufferSize, EnvironmentImpl envImpl) { + DbConfigManager configManager = envImpl.getConfigManager(); + maxReadBufferSize = + configManager.getInt(EnvironmentParams.LOG_ITERATOR_MAX_SIZE); + this.envImpl = envImpl; + fileManager = envImpl.getFileManager(); + + readBuffer = ByteBuffer.allocate(readBufferSize); + readBuffer.flip(); + } + + /* + * Position this window at this LSN, but leave it empty, it has no data + * yet. + */ + public void initAtFileStart(long startLsn) { + setFileNum(DbLsn.getFileNumber(startLsn), + LogEntryType.UNKNOWN_FILE_HEADER_VERSION); + startOffset = DbLsn.getFileOffset(startLsn); + endOffset = startOffset; + } + + public long getEndOffset() { + return endOffset; + } + + /** + * Ensure that whenever we change the fileNum, the logVersion is also + * updated. The fileNum and logVersion fields should be kept private. + */ + protected void setFileNum(final long fileNum, final int logVersion) { + this.fileNum = fileNum; + this.logVersion = logVersion; + } + + public long currentFileNum() { + return fileNum; + } + + /* Return true if this offset is contained with the readBuffer. */ + boolean containsOffset(long targetOffset) { + return (targetOffset >= startOffset) && + (targetOffset < endOffset); + } + + /* Return true if this lsn is contained with the readBuffer. */ + public boolean containsLsn(long targetFileNumber, long targetOffset) { + return ((fileNum == targetFileNumber) && + containsOffset(targetOffset)); + } + + /* Position the readBuffer to the targetOffset. */ + public void positionBuffer(long targetOffset) { + + assert containsOffset(targetOffset) : this + " doesn't contain " + + DbLsn.getNoFormatString(targetOffset); + + readBuffer.position((int) (targetOffset - startOffset)); + } + + /* Move the readBuffer position up by the given increment. */ + void incrementBufferPosition(int increment) { + int currentPosition = readBuffer.position(); + readBuffer.position(currentPosition + increment); + } + + /* + * Reposition to the specified file, and fill starting at + * startOffset. Position the window's buffer to point at the log entry + * indicated by targetOffset + */ + public void slideAndFill(long windowfileNum, + long windowStartOffset, + long targetOffset, + boolean forward) + throws ChecksumException, + FileNotFoundException, + DatabaseException { + + FileHandle fileHandle = fileManager.getFileHandle(windowfileNum); + try { + startOffset = windowStartOffset; + setFileNum(windowfileNum, fileHandle.getLogVersion()); + boolean foundData = fillFromFile(fileHandle, targetOffset); + + /* + * When reading backwards, we need to guarantee there is no log + * gap, throws out an EnvironmentFailreException if it exists. + */ + if (!foundData && !forward) { + throw EnvironmentFailureException.unexpectedState + ("Detected a log file gap when reading backwards. " + + "Target position = " + DbLsn.getNoFormatString + (DbLsn.makeLsn(windowfileNum, targetOffset)) + + " starting position = " + DbLsn.getNoFormatString + (DbLsn.makeLsn(windowfileNum, windowStartOffset)) + + " end position = " + DbLsn.getNoFormatString + (DbLsn.makeLsn(windowfileNum, endOffset))); + } + } finally { + fileHandle.release(); + } + } + + /** + * Fill up the read buffer with more data, moving along to the + * following file (next largest number) if needed. + * @return true if the fill moved us to a new file. + */ + protected boolean fillNext(boolean singleFile, int bytesNeeded) + throws ChecksumException, + FileNotFoundException, + EOFException, + DatabaseException { + + adjustReadBufferSize(bytesNeeded); + + FileHandle fileHandle = null; + try { + /* Get a file handle to read in more log. */ + fileHandle = fileManager.getFileHandle(fileNum); + + /* + * Check to see if we've come to the end of the file. If so, + * get the next file. + */ + startOffset = endOffset; + if (fillFromFile(fileHandle, startOffset)) { + /* + * Successfully filled the read buffer, but didn't move to + * a new file. + */ + return false; + } + + /* This file is done -- can we read in the next file? */ + if (singleFile) { + throw new EOFException("Single file only"); + } + + Long nextFile = + fileManager.getFollowingFileNum(fileNum, + true /* forward */); + + if (nextFile == null) { + throw new EOFException(); + } + + fileHandle.release(); + fileHandle = null; + fileHandle = fileManager.getFileHandle(nextFile); + setFileNum(nextFile, fileHandle.getLogVersion()); + startOffset = 0; + fillFromFile(fileHandle, 0); + return true; + } finally { + if (fileHandle != null) { + fileHandle.release(); + } + } + } + + /* + * Assume that the window is properly positioned. Try to fill the read + * buffer with data from this file handle, starting at the location + * indicated by the starting offset field. If this file contains more + * data, return true. If this file doesn't contain more data, return + * false. + * + * In all cases, leave the the read buffer pointing at the target + * offset and in a state that's ready to support reads, even if there + * is nothing in the buffer. Note that the target offset, which may not + * be the same as starting offset. + * @return true if more data was read, false if not. + */ + protected boolean fillFromFile(FileHandle fileHandle, + long targetOffset) + throws DatabaseException { + + boolean foundData = false; + readBuffer.clear(); + if (fileManager.readFromFile(fileHandle.getFile(), + readBuffer, + startOffset, + fileHandle.getFileNum(), + false /* dataKnownToBeInFile */)) { + foundData = true; + nReadOperations += 1; + /* + * Ensure that fileNum and logVersion are in sync. setFileNum + * handles changes in the file number. But we must also update + * the logVersion here to handle the first read after we + * initialize fileNum and logVersion is unknown. + */ + logVersion = fileHandle.getLogVersion(); + } + + /* + * In all cases, setup read buffer for valid reading. If the buffer + * has no data, it will be positioned at the beginning, and will be + * able to correctly return the fact that there is no data present. + */ + + endOffset = startOffset + readBuffer.position(); + readBuffer.flip(); + readBuffer.position((int) (targetOffset - startOffset)); + return foundData; + } + + /** + * Change the read buffer size if we start hitting large log entries so + * we don't get into an expensive cycle of multiple reads and piecing + * together of log entries. + */ + protected void adjustReadBufferSize(int amountToRead) { + + int readBufferSize = readBuffer.capacity(); + + /* + * We need to read something larger than the current buffer + * size. + */ + if (amountToRead > readBufferSize) { + + /* We're not at the max yet. */ + if (readBufferSize < maxReadBufferSize) { + + /* + * Make the buffer the minimum of amountToRead or a + * maxReadBufferSize. + */ + if (amountToRead < maxReadBufferSize) { + readBufferSize = amountToRead; + /* Make it a multiple of 1K. */ + int remainder = readBufferSize % 1024; + readBufferSize += 1024 - remainder; + readBufferSize = Math.min(readBufferSize, + maxReadBufferSize); + } else { + readBufferSize = maxReadBufferSize; + } + readBuffer = ByteBuffer.allocate(readBufferSize); + } + + if (amountToRead > readBuffer.capacity()) { + nRepeatIteratorReads++; + } + } + } + + int capacity() { + return readBuffer.capacity(); + } + + int remaining() { + return readBuffer.remaining(); + } + + boolean hasRemaining() { + return readBuffer.hasRemaining(); + } + + ByteBuffer getBuffer() { + return readBuffer; + } + + /** + * Returns the number of reads since the last time this method was + * called. + */ + int getAndResetNReads() { + int tmp = nReadOperations; + nReadOperations = 0; + return tmp; + } + + long getNRepeatIteratorReads() { + return nRepeatIteratorReads; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + long start = DbLsn.makeLsn(fileNum, startOffset); + long end = DbLsn.makeLsn(fileNum, endOffset); + sb.append("window covers "); + sb.append(DbLsn.getNoFormatString(start)).append(" to "); + sb.append(DbLsn.getNoFormatString(end)); + sb.append(" positioned at "); + long target = getCurrentLsn(); + sb.append(DbLsn.getNoFormatString(target)); + return sb.toString(); + } + + long getCurrentLsn() { + return DbLsn.makeLsn(fileNum, startOffset + readBuffer.position()); + } + } +} diff --git a/src/com/sleepycat/je/log/FileSource.java b/src/com/sleepycat/je/log/FileSource.java new file mode 100644 index 0000000..f4495e7 --- /dev/null +++ b/src/com/sleepycat/je/log/FileSource.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * FileSource is used as a channel to a log file when faulting in objects + * from the log. + */ +class FileSource implements LogSource { + + private final RandomAccessFile file; + private final int readBufferSize; + private final FileManager fileManager; + private final long fileNum; + private final int logVersion; + + FileSource(RandomAccessFile file, + int readBufferSize, + FileManager fileManager, + long fileNum, + int logVersion) { + this.file = file; + this.readBufferSize = readBufferSize; + this.fileManager = fileManager; + this.fileNum = fileNum; + this.logVersion = logVersion; + } + + /** + * @throws DatabaseException in subclasses. + * @see LogSource#release + */ + public void release() + throws DatabaseException { + } + + /** + * @see LogSource#getBytes + */ + public ByteBuffer getBytes(long fileOffset) + throws DatabaseException { + + return getBytes(fileOffset, readBufferSize); + } + + /** + * @see LogSource#getBytes + */ + public ByteBuffer getBytes(long fileOffset, int numBytes) + throws DatabaseException { + + /* Fill up buffer from file. */ + ByteBuffer destBuf = ByteBuffer.allocate(numBytes); + fileManager.readFromFile(file, destBuf, fileOffset, fileNum); + + assert EnvironmentImpl.maybeForceYield(); + + destBuf.flip(); + return destBuf; + } + + public int getLogVersion() { + return logVersion; + } + + @Override + public String toString() { + return "[FileSource file=0x" + Long.toHexString(fileNum) + "]"; + } +} diff --git a/src/com/sleepycat/je/log/INFileReader.java b/src/com/sleepycat/je/log/INFileReader.java new file mode 100644 index 0000000..1e537d7 --- /dev/null +++ b/src/com/sleepycat/je/log/INFileReader.java @@ -0,0 +1,614 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.cleaner.RecoveryUtilizationTracker; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.INContainingEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.recovery.VLSNRecoveryProxy; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.utilint.DbLsn; + +/** + * INFileReader supports recovery by scanning log files during the IN rebuild + * pass. It looks for internal nodes (all types), segregated by whether they + * belong to the main tree or the duplicate trees. + * + *

        This file reader can also be run in tracking mode to keep track of the + * maximum node ID, database ID and txn ID seen so those sequences can be + * updated properly at recovery. In this mode it also performs utilization + * counting. It is only run once in tracking mode per recovery, in the first + * phase of recovery.

        + */ +public class INFileReader extends FileReader { + + /* Information about the last entry seen. */ + private boolean lastEntryWasDelete; + private boolean lastEntryWasDupDelete; + private LogEntryType fromLogType; + private boolean isProvisional; + + /* + * targetEntryMap maps DbLogEntryTypes to log entries. We use this + * collection to find the right LogEntry instance to read in the current + * entry. + */ + private Map targetEntryMap; + private LogEntry targetLogEntry; + + /* Set of non-target log entry types for ID tracking. */ + private Set idTrackingSet; + /* Cache of non-target log entries for ID tracking. */ + private Map idTrackingMap; + + private boolean trackIds; + private long minReplicatedNodeId; + private long maxNodeId; + private long minReplicatedDbId; + private long maxDbId; + private long minReplicatedTxnId; + private long maxTxnId; + private long ckptEnd; + + /* Used for utilization tracking. */ + private long partialCkptStart; + private RecoveryUtilizationTracker tracker; + + /* Used for replication. */ + private VLSNRecoveryProxy vlsnProxy; + + /** DBs that may violate the rule for upgrading to log version 8. */ + private Set logVersion8UpgradeDbs; + private AtomicBoolean logVersion8UpgradeDeltas; + + /** + * Create this reader to start at a given LSN. + */ + public INFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + boolean trackIds, + long partialCkptStart, + long ckptEnd, + RecoveryUtilizationTracker tracker) { + this(env, readBufferSize, startLsn, finishLsn, trackIds, + partialCkptStart, ckptEnd, tracker, + null /*logVersion8UpgradeDbs*/, + null /*logVersion8UpgradeDeltas*/); + } + + /** + * Create with logVersion8UpgradeDbs and logVersion8UpgradeDeltas params. + */ + public INFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + boolean trackIds, + long partialCkptStart, + long ckptEnd, + RecoveryUtilizationTracker tracker, + Set logVersion8UpgradeDbs, + AtomicBoolean logVersion8UpgradeDeltas) + throws DatabaseException { + + super(env, readBufferSize, true, startLsn, null, + DbLsn.NULL_LSN, finishLsn); + + this.trackIds = trackIds; + this.ckptEnd = ckptEnd; + targetEntryMap = new HashMap(); + + if (trackIds) { + maxNodeId = 0; + maxDbId = 0; + maxTxnId = 0; + minReplicatedNodeId = 0; + minReplicatedDbId = DbTree.NEG_DB_ID_START; + minReplicatedTxnId = 0; + this.tracker = tracker; + this.partialCkptStart = partialCkptStart; + + idTrackingSet = new HashSet(); + idTrackingMap = new HashMap(); + + /* + * Need all nodes for tracking: + * - Need all INs for node ID tracking. + * - Need all LNs for obsolete tracking. + * - Need txnal LNs for txn ID tracking. + * - Need FileSummaryLN for obsolete tracking. + * - Need MapLN for obsolete and DB ID tracking. + * - Need BIN-delta for obsolete tracking. + */ + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isNodeType()) { + idTrackingSet.add(entryType); + } + } + idTrackingSet.add(LogEntryType.LOG_BIN_DELTA); + idTrackingSet.add(LogEntryType.LOG_OLD_BIN_DELTA); + + /* For tracking VLSNs. */ + vlsnProxy = envImpl.getVLSNProxy(); + idTrackingSet.add(LogEntryType.LOG_ROLLBACK_START); + + /* For checking for log version 8 upgrade errors. */ + this.logVersion8UpgradeDbs = logVersion8UpgradeDbs; + this.logVersion8UpgradeDeltas = logVersion8UpgradeDeltas; + } + } + + /** + * Configure this reader to target this kind of entry. + */ + public void addTargetType(LogEntryType entryType) + throws DatabaseException { + + targetEntryMap.put(entryType, entryType.getNewLogEntry()); + } + + /* + * Utilization Tracking + * -------------------- + * This class counts all new log entries and obsolete INs. Obsolete LNs, + * on the other hand, are counted by RecoveryManager undo/redo. + * + * Utilization counting is done in the first recovery pass where IDs are + * tracked (trackIds=true). Processing is split between isTargetEntry + * and processEntry as follows. + * + * isTargetEntry counts only new non-node entries; this can be done very + * efficiently using only the LSN and entry type, without reading and + * unmarshalling the entry. + * + * processEntry counts new node entries and obsolete INs. + * + * processEntry also resets (sets all counters to zero and clears obsolete + * offsets) the tracked summary for a file or database when a FileSummaryLN + * or MapLN is encountered. This clears the totals that have accumulated + * during this recovery pass for entries prior to that point. We only want + * to count utilization for entries after that point. + * + * In addition, when processEntry encounters a FileSummaryLN or MapLN, its + * LSN is recorded in the tracker. This information is used during IN and + * LN utilization counting. For each file, knowing the LSN of the last + * logged FileSummaryLN for that file allows the undo/redo code to know + * whether to perform obsolete countng. If the LSN of the FileSummaryLN is + * less than (to the left of) the LN's LSN, obsolete counting should be + * performed. If it is greater, obsolete counting is already included in + * the logged FileSummaryLN and should not be repeated to prevent double + * counting. The same thing is true of counting per-database utilization + * relative to the LSN of the last logged MapLN. + */ + + /** + * If we're tracking node, database and txn IDs, we want to see all node + * log entries. If not, we only want to see IN entries. + */ + @Override + protected boolean isTargetEntry() + throws DatabaseException { + + lastEntryWasDelete = false; + lastEntryWasDupDelete = false; + targetLogEntry = null; + isProvisional = currentEntryHeader.getProvisional().isProvisional + (getLastLsn(), ckptEnd); + + /* Get the log entry type instance we need to read the entry. */ + fromLogType = LogEntryType.findType(currentEntryHeader.getType()); + LogEntry possibleTarget = targetEntryMap.get(fromLogType); + + /* Always select a non-provisional target entry. */ + if (!isProvisional) { + targetLogEntry = possibleTarget; + } + + /* Recognize IN deletion. */ + if (LogEntryType.LOG_IN_DELETE_INFO.equals(fromLogType)) { + lastEntryWasDelete = true; + } + if (LogEntryType.LOG_IN_DUPDELETE_INFO.equals(fromLogType)) { + lastEntryWasDupDelete = true; + } + + /* If we're not tracking IDs, select only the targeted entry. */ + if (!trackIds) { + return (targetLogEntry != null); + } + + /* + * Count all non-node non-delta entries except for the file header as + * new. UtilizationTracker does not count the file header. Node/delta + * entries will be counted in processEntry. Null is passed for the + * database ID; it is only needed for node entries. + */ + if (!fromLogType.isNodeType() && + !fromLogType.equals(LogEntryType.LOG_BIN_DELTA) && + !fromLogType.equals(LogEntryType.LOG_OLD_BIN_DELTA) && + !LogEntryType.LOG_FILE_HEADER.equals(fromLogType)) { + tracker.countNewLogEntry(getLastLsn(), + fromLogType, + currentEntryHeader.getSize() + + currentEntryHeader.getItemSize(), + null); // DatabaseId + } + + /* + * When we encouter a DbTree log entry, reset the tracked summary for + * the ID and Name mapping DBs. This clears what we accumulated + * previously for these databases during this recovery pass. Save the + * LSN for these databases for use by undo/redo. + */ + if (LogEntryType.LOG_DBTREE.equals(fromLogType)) { + tracker.saveLastLoggedMapLN(DbTree.ID_DB_ID, getLastLsn()); + tracker.saveLastLoggedMapLN(DbTree.NAME_DB_ID, getLastLsn()); + tracker.resetDbInfo(DbTree.ID_DB_ID); + tracker.resetDbInfo(DbTree.NAME_DB_ID); + } + + /* Track VLSNs in the log entry header of all replicated entries. */ + if (currentEntryHeader.getReplicated()) { + vlsnProxy.trackMapping(getLastLsn(), + currentEntryHeader, + null /*targetLogEntry*/); + } + + /* Return true if this logrec should be passed on to processEntry. */ + return (targetLogEntry != null || + idTrackingSet.contains(fromLogType)); + } + + /** + * This reader returns non-provisional INs and IN delete entries. + * In tracking mode, it may also scan log entries that aren't returned: + * -to set the sequences for txn, node, and database ID. + * -to update utilization and obsolete offset information. + * -for VLSN mappings for recovery + */ + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + boolean useEntry = false; + + /* Read targeted entry. */ + if (targetLogEntry != null) { + targetLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + useEntry = true; + } + + /* If we're not tracking IDs, we're done. */ + if (!trackIds) { + return useEntry; + } + + /* Read non-target entry. */ + if (targetLogEntry == null) { + assert idTrackingSet.contains(fromLogType); + + targetLogEntry = idTrackingMap.get(fromLogType); + if (targetLogEntry == null) { + targetLogEntry = fromLogType.getNewLogEntry(); + idTrackingMap.put(fromLogType, targetLogEntry); + } + + targetLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + } + + /* + * Count node and delta entries as new. Non-node/delta entries are + * counted in isTargetEntry. + */ + if (fromLogType.isNodeType() || + fromLogType.equals(LogEntryType.LOG_BIN_DELTA) || + fromLogType.equals(LogEntryType.LOG_OLD_BIN_DELTA)) { + tracker.countNewLogEntry(getLastLsn(), fromLogType, + currentEntryHeader.getSize() + + currentEntryHeader.getItemSize(), + targetLogEntry.getDbId()); + } + + /* Track VLSNs in RollbackStart. */ + if (fromLogType.equals(LogEntryType.LOG_ROLLBACK_START)) { + vlsnProxy.trackMapping(getLastLsn(), + currentEntryHeader, + targetLogEntry); + } + + /* Process LN types. */ + if (fromLogType.isLNType()) { + + LNLogEntry lnEntry = (LNLogEntry) targetLogEntry; + + /* + * When a MapLN is encountered, reset the tracked info for that + * DB. This clears what we have accummulated so far for the DB + * during this recovery pass. This is important to eliminate + * potential double counting of obsolete logrecs done earlier + * in this recovery pass. + * + * Also, save the LSN of the MapLN for use in utilization counting + * during LN undo/redo. + */ + if (fromLogType.equals(LogEntryType.LOG_MAPLN)) { + + MapLN mapLN = (MapLN) lnEntry.getMainItem(); + DatabaseId dbId = mapLN.getDatabase().getId(); + + /* Track latest DB ID. */ + long dbIdVal = dbId.getId(); + maxDbId = (dbIdVal > maxDbId ? dbIdVal : maxDbId); + minReplicatedDbId = (dbIdVal < minReplicatedDbId ? + dbIdVal : minReplicatedDbId); + + tracker.resetDbInfo(dbId); + + tracker.saveLastLoggedMapLN(dbId, getLastLsn()); + } + + /* Track latest txn ID. */ + if (fromLogType.isTransactional()) { + long txnId = lnEntry.getTxnId().longValue(); + maxTxnId = (txnId > maxTxnId ? txnId : maxTxnId); + minReplicatedTxnId = (txnId < minReplicatedTxnId ? + txnId : minReplicatedTxnId); + } + + /* + * When a FSLN is encountered, reset the tracked summary info for + * that file. This clears what we have accummulated so far for the + * file during this recovery pass. This is important to eliminate + * potential double counting of obsolete logrecs done earlier + * in this recovery pass. + * + * Also, save the LSN of the FSLN for use in utilization counting + * during LN undo/redo. + */ + if (LogEntryType.LOG_FILESUMMARYLN.equals(fromLogType)) { + + lnEntry.postFetchInit(false /*isDupDb*/); + + long fileNum = FileSummaryLN.getFileNumber(lnEntry.getKey()); + + tracker.resetFileInfo(fileNum); + + tracker.saveLastLoggedFileSummaryLN(fileNum, getLastLsn()); + + /* + * Do not cache the file summary in the UtilizationProfile here, + * since it may be for a deleted log file. [#10395] + */ + } + } + + /* Process IN types. */ + if (fromLogType.isINType()) { + INLogEntry inEntry = (INLogEntry) targetLogEntry; + + /* Keep track of the largest node ID seen. */ + long nodeId = inEntry.getNodeId(); + assert (nodeId != Node.NULL_NODE_ID); + + maxNodeId = (nodeId > maxNodeId ? nodeId : maxNodeId); + + minReplicatedNodeId = + (nodeId < minReplicatedNodeId ? nodeId : minReplicatedNodeId); + } + + /* Process INContainingEntry types. */ + if (fromLogType.isINType() || + fromLogType.equals(LogEntryType.LOG_OLD_BIN_DELTA)) { + + INContainingEntry inEntry = (INContainingEntry) targetLogEntry; + DatabaseId dbId = inEntry.getDbId(); + + long newLsn = getLastLsn(); + + /* + * Count the previous version of this IN as obsolete. If lsn + * (i.e. the current version) is non-provisional, then oldLsn is + * indeed obsolete. However, if lsn is provisional, oldLsn is + * obsolete only if an ancestor of this IN has been logged non- + * provisionally later in the log, and unless lsn < CKPT_END + * we cannot know if this is true or not. For this reason, we + * conservatively assume that oldLsn is indeed obsolete, but we + * use inexact counting, so that the oldLsn value will not be + * recorded by the tracker. (another reason is that earlier log + * versions did not have a full LSN in oldLsn; they had only the + * file number). + * + * Notice also that there may be an FSLN logrec after lsn that + * includes oldLsn as an obsolete IN. If so, we are double counting + * here, but the double counting will go away when we later process + * that FSLN and as a result wipe-out the utilization info we have + * collected so far about the log file containing oldLsn. + */ + long oldLsn = inEntry.getPrevFullLsn(); + + if (oldLsn != DbLsn.NULL_LSN && !inEntry.isBINDelta()) { + tracker.countObsoleteUnconditional( + oldLsn, fromLogType, 0, dbId, false /*countExact*/); + } + + oldLsn = inEntry.getPrevDeltaLsn(); + + if (oldLsn != DbLsn.NULL_LSN) { + tracker.countObsoleteUnconditional( + oldLsn, fromLogType, 0, dbId, false /*countExact*/); + } + + /* + * Count the current IN version as obsolete if lsn is + * provisional and is after partialCkptStart. In this case, the + * crash occurred during a ckpt and it the crash event itself + * that may make the current logrec be obsolete. Again, whether + * the lsn is indeed obsolete or not depends on whether an + * ancestor of this IN has been logged non-provisionally later + * in the log. At this point we cannot know if this is true or + * not. For this reason, we conservatively assume that lsn + * is indeed obsolete, but we use inexact counting, so that the + * lsn value will not be recorded by the tracker. As explained + * above, we may be double counting here, but this will be fixed + * if we later find an FSLN for the same log file as lsn. + * + * We are too conservative here in assuming that lsn is + * obsolete. This is because of the "grouping" behaviour of the + * checpointer. Specifically: + * + * Most of the provisional IN logrecs in this region of the log + * (i.e., after the start of an incomplete ckpt) are for BINs + * logged by the checkpointer (eviction during a ckpt logs dirty + * BINs and UINs provisionally as well). The checkpointer logs + * all the dirty BIN siblings and then logs their parent non- + * provisionally, before logging any other BINs. So, not taking + * eviction into account, there can be at most 128 BIN logrecs + * after the partialCkptStart that are trully obsolete. + * + * Note that older versions of the checkpointer did not use to + * group together the logging of siblings BINs and their parent. + * Without this grouping, the assumption done here that most + * provisional logrecs after partialCkptStart are obsolete is + * much more accurate. + * + * A potential solution: instead of counting these logrecs as + * obsolete here, save their LSNs on-the-side inside the tracker. + * When, later, a UIN N is replayed and attached to the tree, + * remove from the saved LSN set any LSNs that appear in the slots + * of N. After all REDO-INs passes are done, count as obsolete any + * LSNs remaining in the saved set. + */ + if (isProvisional && + partialCkptStart != DbLsn.NULL_LSN && + DbLsn.compareTo(partialCkptStart, newLsn) < 0) { + tracker.countObsoleteUnconditional( + newLsn, fromLogType, 0, inEntry.getDbId(), + false /*countExact*/); + } + } + + /* + * Add candidate DB IDs and note deltas for possible log version 8 + * upgrade violations. + */ + if (currentEntryHeader.getVersion() < 8) { + if (logVersion8UpgradeDbs != null && + fromLogType.isNodeType()) { + logVersion8UpgradeDbs.add(targetLogEntry.getDbId()); + } + if (logVersion8UpgradeDeltas != null && + (fromLogType.equals(LogEntryType.LOG_OLD_BIN_DELTA) || + fromLogType.equals(LogEntryType.LOG_OLD_DUP_BIN_DELTA))) { + logVersion8UpgradeDeltas.set(true); + } + } + + /* Return true if this is a targeted entry. */ + return useEntry; + } + + /** + * Get the last IN seen by the reader. + */ + public IN getIN(DatabaseImpl dbImpl) + throws DatabaseException { + + return ((INContainingEntry) targetLogEntry).getIN(dbImpl); + } + + /** + * Get the last databaseId seen by the reader. + */ + public DatabaseId getDatabaseId() { + return ((INContainingEntry) targetLogEntry).getDbId(); + } + + /** + * Get the maximum node ID seen by the reader. + */ + public long getMaxNodeId() { + return maxNodeId; + } + + public long getMinReplicatedNodeId() { + return minReplicatedNodeId; + } + + /** + * Get the maximum DB ID seen by the reader. + */ + public long getMaxDbId() { + return maxDbId; + } + + public long getMinReplicatedDbId() { + return minReplicatedDbId; + } + + /** + * Get the maximum txn ID seen by the reader. + */ + public long getMaxTxnId() { + return maxTxnId; + } + + public long getMinReplicatedTxnId() { + return minReplicatedTxnId; + } + + /** + * @return true if the last entry was a delete info entry. + */ + public boolean isDeleteInfo() { + return lastEntryWasDelete; + } + + /** + * @return true if the last entry was a dup delete info entry. + */ + public boolean isDupDeleteInfo() { + return lastEntryWasDupDelete; + } + + /** + * @return true if the last entry was a BIN-delta. + */ + public boolean isBINDelta() { + return + targetLogEntry.getLogType().equals(LogEntryType.LOG_BIN_DELTA) || + targetLogEntry.getLogType().equals(LogEntryType.LOG_OLD_BIN_DELTA); + } + + public VLSNRecoveryProxy getVLSNProxy() { + return vlsnProxy; + } +} diff --git a/src/com/sleepycat/je/log/JEFileFilter.java b/src/com/sleepycat/je/log/JEFileFilter.java new file mode 100644 index 0000000..4838a53 --- /dev/null +++ b/src/com/sleepycat/je/log/JEFileFilter.java @@ -0,0 +1,106 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.File; +import java.io.FilenameFilter; +import java.util.StringTokenizer; + +/** + * JEFileFilters are used for listing je files. + */ +class JEFileFilter implements FilenameFilter { + String[] suffix; + long minFileNumber = 0; + long maxFileNumber = -1; + + JEFileFilter(String[] suffix) { + this.suffix = suffix; + } + + /** + * @param maxFileNumber this filter will only return + * files that are numbers <= maxFileNumber. + */ + JEFileFilter(String[] suffix, long maxFileNumber) { + this.suffix = suffix; + this.maxFileNumber = maxFileNumber; + } + + /** + * @param minFileNumber this filter will only return files that are >= + * minFileNumber. + * @param maxFileNumber this filter will only return + * files that are numbers <= maxFileNumber. + */ + JEFileFilter(String[] suffix, long minFileNumber, long maxFileNumber) { + this.suffix = suffix; + this.minFileNumber = minFileNumber; + this.maxFileNumber = maxFileNumber; + } + + private boolean matches(String fileSuffix) { + for (int i = 0; i < suffix.length; i++) { + if (fileSuffix.equalsIgnoreCase(suffix[i])) { + return true; + } + } + return false; + } + + /** + * A JE file has to be of the format nnnnnnnn.suffix. + */ + public boolean accept(File dir, String name) { + boolean ok = false; + StringTokenizer tokenizer = new StringTokenizer(name, "."); + /* There should be two parts. */ + int nTokens = tokenizer.countTokens(); + if (nTokens == 2 || nTokens == 3) { + boolean hasVersion = (nTokens == 3); + String fileNumber = tokenizer.nextToken(); + String fileSuffix = "." + tokenizer.nextToken(); + String fileVersion = (hasVersion ? tokenizer.nextToken() : null); + + /* Check the length and the suffix. */ + if ((fileNumber.length() == 8) && + matches(fileSuffix)) { + //(fileSuffix.equalsIgnoreCase(suffix))) { + + /* The first part should be a number. */ + try { + long fileNum = Long.parseLong(fileNumber, 16); + if (fileNum < minFileNumber) { + ok = false; + } else if ((fileNum <= maxFileNumber) || + (maxFileNumber == -1)) { + ok = true; + } + } catch (NumberFormatException e) { + ok = false; + } + if (hasVersion) { + try { + Integer.parseInt(fileVersion); + ok = true; + } catch (NumberFormatException e) { + ok = false; + } + } + } + } + + return ok; + } +} diff --git a/src/com/sleepycat/je/log/LNFileReader.java b/src/com/sleepycat/je/log/LNFileReader.java new file mode 100644 index 0000000..9ac2c18 --- /dev/null +++ b/src/com/sleepycat/je/log/LNFileReader.java @@ -0,0 +1,258 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import javax.transaction.xa.Xid; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.txn.TxnPrepare; + +/** + * LNFileReader scans log files for LNs. Also, if it's going backwards for the + * undo phase in recovery, it reads transaction commit entries. + */ +public class LNFileReader extends FileReader { + + /* + * targetEntryMap maps DbLogEntryTypes to log entries. We use this + * collection to find the right LogEntry instance to read in the current + * entry. + */ + protected Map targetEntryMap; + protected LogEntry targetLogEntry; + + private long ckptEnd; + + /** + * Create this reader to start at a given LSN. + * @param env The relevant EnvironmentImpl + * @param readBufferSize buffer size in bytes for reading in log + * @param startLsn where to start in the log + * @param redo If true, we're going to go forward from + * the start LSN to the end of the log. If false, we're going + * backwards from the end of the log to the start LSN. + * @param finishLsn the last LSN to read in the log. May be null if we + * want to read to the end of the log. + * @param endOfFileLsn the virtual LSN that marks the end of the log. (The + * one off the end of the log). Only used if we're reading backwards. + * Different from the startLsn because the startLsn tells us where the + * beginning of the start entry is, but not the length/end of the start + * entry. May be null if we're going foward. + */ + public LNFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + boolean redo, + long endOfFileLsn, + long finishLsn, + Long singleFileNum, + long ckptEnd) + throws DatabaseException { + + super(env, readBufferSize, redo /*forward*/, startLsn, + singleFileNum, endOfFileLsn, finishLsn); + + this.ckptEnd = ckptEnd; + targetEntryMap = new HashMap(); + } + + public void addTargetType(LogEntryType entryType) + throws DatabaseException { + + targetEntryMap.put(entryType, entryType.getNewLogEntry()); + } + + /** + * @return true if this is a transactional LN or Locker Commit entry. + */ + @Override + protected boolean isTargetEntry() { + + if (currentEntryHeader.getProvisional().isProvisional + (getLastLsn(), ckptEnd)) { + /* Skip provisionial entries */ + targetLogEntry = null; + } else { + LogEntryType fromLogType = + new LogEntryType(currentEntryHeader.getType()); + + /* Is it a target entry? */ + targetLogEntry = targetEntryMap.get(fromLogType); + } + return (targetLogEntry != null); + } + + /** + * This reader instantiates an LN and key for every LN entry. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + targetLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + return true; + } + + /** + * @return true if the last entry was an LN. + */ + public boolean isLN() { + return (targetLogEntry instanceof LNLogEntry); + } + + /** + * Get the last LN log entry seen by the reader. Note that + * LNLogEntry.postFetchInit must be called before calling certain + * LNLogEntry methods. + */ + public LNLogEntry getLNLogEntry() { + return (LNLogEntry) targetLogEntry; + } + + /** + * Returns a NameLNLogEntry if the LN is a NameLN, or null otherwise. + */ + public NameLNLogEntry getNameLNLogEntry() { + return (targetLogEntry instanceof NameLNLogEntry) ? + ((NameLNLogEntry) targetLogEntry) : + null; + } + + /** + * Get the last databaseId seen by the reader. + */ + public DatabaseId getDatabaseId() { + return targetLogEntry.getDbId(); + } + + /** + * @return the transaction id of the current entry. + */ + public Long getTxnId() { + return ((LNLogEntry) targetLogEntry).getTxnId(); + } + + /* + * @return true if the last entry was a TxnPrepare record. + */ + public boolean isPrepare() { + return (targetLogEntry.getMainItem() instanceof TxnPrepare); + } + + /** + * Get the last txn prepare id seen by the reader. + */ + public long getTxnPrepareId() { + return ((TxnPrepare) targetLogEntry.getMainItem()).getId(); + } + + /** + * Get the last txn prepare Xid seen by the reader. + */ + public Xid getTxnPrepareXid() { + return ((TxnPrepare) targetLogEntry.getMainItem()).getXid(); + } + + /* + * @return true if the last entry was a TxnCommit record. + */ + public boolean isCommit() { + return (targetLogEntry.getMainItem() instanceof TxnCommit); + } + + /* + * @return true if the last entry was a RollbackStart record. + */ + public boolean isRollbackStart() { + return (targetLogEntry.getMainItem() instanceof RollbackStart); + } + + /* + * @return true if the last entry was a RollbackStart record. + */ + public boolean isRollbackEnd() { + return (targetLogEntry.getMainItem() instanceof RollbackEnd); + } + + public Object getMainItem() { + return targetLogEntry.getMainItem(); + } + + /* + * For error message. + */ + public String dumpCurrentHeader() { + return currentEntryHeader.toString(); + } + + /* + * @return true if the last entry was a TxnAbort record. + */ + public boolean isAbort() { + return (targetLogEntry.getMainItem() instanceof TxnAbort); + } + + /** + * Get the last txn abort id seen by the reader. + */ + public long getTxnAbortId() { + return ((TxnAbort) targetLogEntry.getMainItem()).getId(); + } + + /** + * Get the last txn commit id seen by the reader. + */ + public long getTxnCommitId() { + return ((TxnCommit) targetLogEntry.getMainItem()).getId(); + } + + /** + * Get last abort LSN seen by the reader (may be null). + */ + public long getAbortLsn() { + return ((LNLogEntry) targetLogEntry).getAbortLsn(); + } + + /** + * Get last abort known deleted seen by the reader. + */ + public boolean getAbortKnownDeleted() { + return ((LNLogEntry) targetLogEntry).getAbortKnownDeleted(); + } + + public boolean isInvisible() { + return currentEntryHeader.isInvisible(); + } + + /** + * Return the VLSN if this entry is in replicated stream. + */ + public long getVLSN() { + assert entryIsReplicated(); + return currentEntryHeader.getVLSN().getSequence(); + } +} diff --git a/src/com/sleepycat/je/log/LastFileReader.java b/src/com/sleepycat/je/log/LastFileReader.java new file mode 100644 index 0000000..ad5542f --- /dev/null +++ b/src/com/sleepycat/je/log/LastFileReader.java @@ -0,0 +1,435 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * LastFileReader traverses the last log file, doing checksums and looking for + * the end of the log. Different log types can be registered with it and it + * will remember the last occurrence of targeted entry types. + */ +public class LastFileReader extends FileReader { + + /* Log entry types to track. */ + private Set trackableEntries; + + private long nextUnprovenOffset; + private long lastValidOffset; + private LogEntryType entryType; + + /* + * Last LSN seen for tracked types. Key = LogEntryType, data is the offset + * (Long). + */ + private Map lastOffsetSeen; + + /* + * A marker log entry used to indicate that the log is physically or + * semantically corrupt and can't be recovered. A restore of some form must + * take place. + */ + private RestoreRequired restoreRequired; + + /** + * This file reader is always positioned at the last file. + * + * If no valid files exist (and invalid files do not contain data and can + * be moved away), we will not throw an exception. We will return false + * from the first call (all calls) to readNextEntry. + * + * @throws DatabaseException if the last file contains data and is invalid. + */ + public LastFileReader(EnvironmentImpl envImpl, + int readBufferSize) + throws DatabaseException { + + super(envImpl, readBufferSize, true, DbLsn.NULL_LSN, Long.valueOf(-1), + DbLsn.NULL_LSN, DbLsn.NULL_LSN); + + try { + startAtLastGoodFile(null /*singleFileNum*/); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } + + trackableEntries = new HashSet(); + lastOffsetSeen = new HashMap(); + + lastValidOffset = 0; + nextUnprovenOffset = nextEntryOffset; + } + + /** + * Ctor which allows passing in the file number we want to read to the end + * of. This is used by the ScavengerFileReader when it encounters a bad + * log record in the middle of a file. + * + * @throws ChecksumException rather than wrapping it, to allow + * ScavengerFileReader to handle it specially -- we should not invalidate + * the environment with EnvironmentFailureException. + */ + LastFileReader(EnvironmentImpl envImpl, + int readBufferSize, + Long specificFileNumber) + throws ChecksumException, DatabaseException { + + super(envImpl, readBufferSize, true, DbLsn.NULL_LSN, + specificFileNumber, DbLsn.NULL_LSN, DbLsn.NULL_LSN); + + startAtLastGoodFile(specificFileNumber); + + trackableEntries = new HashSet(); + lastOffsetSeen = new HashMap(); + + lastValidOffset = 0; + nextUnprovenOffset = nextEntryOffset; + } + + /** + * Initialize starting position to the last file with a complete header + * with a valid checksum. + */ + private void startAtLastGoodFile(Long singleFileNum) + throws ChecksumException { + + eof = false; + window.initAtFileStart(DbLsn.makeLsn(0, 0)); + + /* + * Start at what seems like the last file. If it doesn't exist, we're + * done. + */ + Long lastNum = ((singleFileNum != null) && + (singleFileNum.longValue() >= 0)) ? + singleFileNum : + fileManager.getLastFileNum(); + FileHandle fileHandle = null; + + long fileLen = 0; + while ((fileHandle == null) && !eof) { + if (lastNum == null) { + eof = true; + } else { + try { + try { + window.initAtFileStart(DbLsn.makeLsn(lastNum, 0)); + fileHandle = fileManager.getFileHandle(lastNum); + + /* + * Check the size of this file. If it opened + * successfully but only held a header or is 0 length, + * backup to the next "last" file unless this is the + * only file in the log. Note that an incomplete header + * will end up throwing a checksum exception, but a 0 + * length file will open successfully in read only + * mode. + */ + fileLen = fileHandle.getFile().length(); + if (fileLen <= FileManager.firstLogEntryOffset()) { + lastNum = fileManager.getFollowingFileNum + (lastNum, false); + if (lastNum != null) { + fileHandle.release(); + fileHandle = null; + } + } + } catch (DatabaseException e) { + lastNum = attemptToMoveBadFile(e); + fileHandle = null; + } catch (ChecksumException e) { + lastNum = attemptToMoveBadFile(e); + fileHandle = null; + } finally { + if (fileHandle != null) { + fileHandle.release(); + } + } + } catch (IOException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, e); + } + } + } + + nextEntryOffset = 0; + } + + /** + * Something is wrong with this file. If there is no data in this file (the + * header is <= the file header size) then move this last file aside and + * search the next "last" file. If the last file does have data in it, + * return null and throw an exception back to the application, since we're + * not sure what to do now. + * + * @param cause is a DatabaseException or ChecksumException. + */ + private Long attemptToMoveBadFile(Exception cause) + throws IOException, ChecksumException, DatabaseException { + + String fileName = + fileManager.getFullFileNames(window.currentFileNum())[0]; + File problemFile = new File(fileName); + + if (problemFile.length() <= FileManager.firstLogEntryOffset()) { + fileManager.clear(); // close all existing files + /* Move this file aside. */ + Long lastNum = fileManager.getFollowingFileNum + (window.currentFileNum(), false); + if (!fileManager.renameFile(window.currentFileNum(), + FileManager.BAD_SUFFIX)) { + throw EnvironmentFailureException.unexpectedState + ("Could not rename file: 0x" + + Long.toHexString(window.currentFileNum())); + } + + return lastNum; + } + /* There's data in this file, throw up to the app. */ + if (cause instanceof DatabaseException) { + throw (DatabaseException) cause; + } + if (cause instanceof ChecksumException) { + throw (ChecksumException) cause; + } + throw EnvironmentFailureException.unexpectedException(cause); + } + + public void setEndOfFile() + throws IOException, DatabaseException { + + fileManager.truncateSingleFile + (window.currentFileNum(), nextUnprovenOffset); + } + + /** + * @return The LSN to be used for the next log entry. + */ + public long getEndOfLog() { + return DbLsn.makeLsn(window.currentFileNum(), nextUnprovenOffset); + } + + public long getLastValidLsn() { + return DbLsn.makeLsn(window.currentFileNum(), lastValidOffset); + } + + public long getPrevOffset() { + return lastValidOffset; + } + + public LogEntryType getEntryType() { + return entryType; + } + + /** + * Tell the reader that we are interested in these kind of entries. + */ + public void setTargetType(LogEntryType type) { + trackableEntries.add(type); + } + + /** + * @return The last LSN seen in the log for this kind of entry, or null. + */ + public long getLastSeen(LogEntryType type) { + Long typeNumber = lastOffsetSeen.get(type); + if (typeNumber != null) { + return DbLsn.makeLsn(window.currentFileNum(), + typeNumber.longValue()); + } else { + return DbLsn.NULL_LSN; + } + } + + /** + * Validate the checksum on each entry, see if we should remember the LSN + * of this entry. + */ + protected boolean processEntry(ByteBuffer entryBuffer) { + + /* + * If we're supposed to remember this LSN, record it. Although LSN + * recording is currently done for test purposes only, still do get a + * valid logEntyType, so it can be used for reading in a log entry + * further in this method. + */ + entryType = LogEntryType.findType(currentEntryHeader.getType()); + if (trackableEntries.contains(entryType)) { + lastOffsetSeen.put(entryType, Long.valueOf(currentEntryOffset)); + } + + /* + * If this is a RestoreRequired entry, read it and + * deserialize. Otherwise, skip over the data, we're not doing anything + * with it. + */ + if (entryType.equals(LogEntryType.LOG_RESTORE_REQUIRED)) { + LogEntry logEntry = entryType.getSharedLogEntry(); + logEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + restoreRequired = (RestoreRequired) logEntry.getMainItem(); + } else { + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + } + + return true; + } + + /** + * readNextEntry will stop at a bad entry. + * @return true if an element has been read. + */ + @Override + public boolean readNextEntry() { + + boolean foundEntry = false; + + try { + + /* + * At this point, + * currentEntryOffset is the entry we just read. + * nextEntryOffset is the entry we're about to read. + * currentEntryPrevOffset is 2 entries ago. + * Note that readNextEntry() moves all the offset pointers up. + */ + + foundEntry = super.readNextEntryAllowExceptions(); + + /* + * Note that initStartingPosition() makes sure that the file header + * entry is valid. So by the time we get to this method, we know + * we're at a file with a valid file header entry. + */ + lastValidOffset = currentEntryOffset; + nextUnprovenOffset = nextEntryOffset; + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + LoggerUtils.fine + (logger, envImpl, + "Found checksum exception while searching for end of log. " + + "Last valid entry is at " + DbLsn.toString + (DbLsn.makeLsn(window.currentFileNum(), lastValidOffset)) + + " Bad entry is at " + + DbLsn.makeLsn(window.currentFileNum(), nextUnprovenOffset)); + + DbConfigManager configManager = envImpl.getConfigManager(); + boolean findCommitTxn = + configManager.getBoolean + (EnvironmentParams.HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION); + + /* Find the committed transactions at the rest the log file. */ + if (findCommitTxn) { + boolean committedTxnFound = findCommittedTxn(); + /* If we have found a committed txn. */ + if (committedTxnFound) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.FOUND_COMMITTED_TXN, + "Found committed txn after the corruption point"); + } + } + } + return foundEntry; + } + + /* + * [#18307] Find the committed transaction after the corrupted log entry + * log file. + * + * Suppose we have LSN 1000, and the log entry there has a checksum + * exception. + * Case 1. if the header at LSN 1000 is bad, findCommittedTxn() will not be + * called, and just truncate the log. + * Note: This case seems to not be handled. It seems we do call + * findCommittedTxn when the header is bad. Perhaps this comment + * is outdated. + * Case 2. if the header at LSN 1000 says the log entry size is N, we + * skip N bytes, which may let us go to the next log entry. If the + * next log entry also has a checksum exception (because size N is + * wrong, or because it really has a checksum problem), return + * false, and truncate the log. + * Case 3. if we manage to read past LSN 1000, but then hit a second + * checksum exception, return false and truncate the log at the + * first exception. + * Case 4. if we manage to read past LSN 1000, and do not see any checksum + * exceptions, and do not see any commits, return false and + * truncate the log. + * Case 5. if we manage to read past LSN 1000, and do not see any checksum + * exceptions, but do see a txn commit, return true and throw + * EnvironmentFailureException. + */ + private boolean findCommittedTxn() { + try { + + /* + * First we skip over the bad log entry, according to the item size + * we get from the log entry's header. + */ + skipData(currentEntryHeader.getItemSize()); + + /* + * Begin searching for the committed txn from the next log entry to + * the end of the log file. + */ + while (super.readNextEntryAllowExceptions()) { + /* Case 5. */ + if (LogEntryType.LOG_TXN_COMMIT.equals(entryType)) { + return true; + } + } + } catch (EOFException e) { + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + /* Case 2 and 3. */ + LoggerUtils.fine + (logger, envImpl, + "Found checksum exception while searching for end of log. " + + "Last valid entry is at " + DbLsn.toString + (DbLsn.makeLsn(window.currentFileNum(), lastValidOffset)) + + " Bad entry is at " + + DbLsn.makeLsn(window.currentFileNum(), nextUnprovenOffset)); + } + + /* Case 4. */ + return false; + } + + public RestoreRequired getRestoreRequired() { + return restoreRequired; + } +} diff --git a/src/com/sleepycat/je/log/LogBuffer.java b/src/com/sleepycat/je/log/LogBuffer.java new file mode 100644 index 0000000..45e9af3 --- /dev/null +++ b/src/com/sleepycat/je/log/LogBuffer.java @@ -0,0 +1,389 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.LockSupport; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.utilint.DbLsn; + +/** + * LogBuffers hold outgoing, newly written log entries. + * Space is allocated via the allocate() method that + * returns a LogBufferSegment object. The LogBuffer.writePinCount + * is incremented each time space is allocated. Once the + * caller copies data into the log buffer, the + * pin count is decremented via the free() method. + * Readers of a log buffer wait until the pin count + * is zero. + * + * The pin count is incremented under the readLatch. The + * pin count is decremented without holding the latch. + * Holding the readLatch will prevent the pin count from + * being incremented. + * + * Apart from the pin count, access to the buffer is protected by the + * readLatch and the LWL: + * - Write access requires holding both the LWL and the readLatch. + * - Read access requires holding either the LWL or the readLatch. + * + * Of course, for buffers outside the buffer pool, or in the process of being + * constructed, these rules do not apply and no latching is necessary. + * + * TODO: + * Although the above statement about latching reflects the current + * implementation, it would be better if we can remove the reliance on the LWL + * and protect all access to the buffer using the readLatch. To do this, the + * callers of getFirstLsn and hasRoom will have to acquire the readLatch. + * + * @see LogBufferPool + */ +public class LogBuffer implements LogSource { + + private static final String DEBUG_NAME = LogBuffer.class.getName(); + + /* Storage */ + private final ByteBuffer buffer; + + /* Information about what log entries are held here. */ + private long firstLsn; + private long lastLsn; + + /* + * The read latch protects all modifications to the buffer, and protects + * read access to the buffer when the LWL is not held. Decrementing the pin + * count is the only exception, and this can be done with no latching. + */ + private Latch readLatch; + + /* + * Buffer may be rewritten because an IOException previously occurred. + */ + private boolean rewriteAllowed; + + private AtomicInteger writePinCount = new AtomicInteger(); + private byte[] data; + private EnvironmentImpl env; + + LogBuffer(int capacity, EnvironmentImpl env) + throws DatabaseException { + + data = new byte[capacity]; + buffer = ByteBuffer.wrap(data); + readLatch = LatchFactory.createExclusiveLatch( + env, DEBUG_NAME, false /*collectStats*/); + this.env = env; + reinit(); + } + + /* + * Used by LogManager for the case when we have a temporary buffer in hand + * and no LogBuffers in the LogBufferPool are large enough to hold the + * current entry being written. We just wrap the temporary ByteBuffer + * in a LogBuffer and pass it to FileManager. [#12674]. + */ + LogBuffer(ByteBuffer buffer, long firstLsn) { + this.buffer = buffer; + this.firstLsn = firstLsn; + this.lastLsn = firstLsn; + rewriteAllowed = false; + } + + /** + * The LWL and buffer pool latch must be held. + */ + void reinit() + throws DatabaseException { + + readLatch.acquireExclusive(); + buffer.clear(); + firstLsn = DbLsn.NULL_LSN; + lastLsn = DbLsn.NULL_LSN; + rewriteAllowed = false; + writePinCount.set(0); + readLatch.release(); + } + + /* + * Write support + */ + + /** + * Return first LSN held in this buffer. + * + * The LWL or readLatch must be held. + */ + public long getFirstLsn() { + return firstLsn; + } + + /** + * Register the LSN for a buffer segment that has been allocated in this + * buffer. + * + * The LWL and readLatch must be held. + */ + void registerLsn(long lsn) { + assert readLatch.isExclusiveOwner(); + + if (lastLsn != DbLsn.NULL_LSN) { + assert (DbLsn.compareTo(lsn, lastLsn) > 0): + "lsn=" + lsn + " lastlsn=" + lastLsn; + } + + lastLsn = lsn; + + if (firstLsn == DbLsn.NULL_LSN) { + firstLsn = lsn; + } + } + + /** + * Check capacity of buffer. + * + * The LWL or readLatch must be held. + * + * @return true if this buffer can hold this many more bytes. + */ + boolean hasRoom(int numBytes) { + return (numBytes <= (buffer.capacity() - buffer.position())); + } + + /** + * Returns the buffer for read access (although some tests may write to the + * buffer). + * + * The LWL or readLatch must be held. + * + * @return the actual data buffer. + */ + public ByteBuffer getDataBuffer() { + return buffer; + } + + /** + * The LWL or readLatch must be held. + * + * @return capacity in bytes + */ + int getCapacity() { + return buffer.capacity(); + } + + /* + * Read support + */ + + /** + * Support for reading out of a still-in-memory log. Can be used to + * determine if a log entry with a given LSN is contained in this buffer, + * or whether an arbitrary LSN location is present in the buffer. + * + * No latches need be held. The buffer is latched for read if true is + * returned. + * + * This method must wait until the buffer's pin count goes to zero. When + * writing is active and this is the currentWriteBuffer, it may have to + * wait until the buffer is full. + * + * @return true if this buffer holds the data at this LSN location. If true + * is returned, the buffer will be latched for read. Returns false if LSN + * is not here, and releases the read latch. + */ + boolean containsLsn(long lsn) { + assert lsn != DbLsn.NULL_LSN; + + /* + * Latch before we look at the LSNs. We do not have to wait + * for zero to check the LSN field but need to have the count + * zero for a reader to read the buffer. + */ + waitForZeroAndLatch(); + boolean found = false; + + if ((firstLsn != DbLsn.NULL_LSN) && + (DbLsn.getFileNumber(firstLsn) == DbLsn.getFileNumber(lsn))) { + + final long fileOffset = DbLsn.getFileOffset(lsn); + final int contentSize; + if (buffer.position() == 0) { + /* Buffer was flipped for reading. */ + contentSize = buffer.limit(); + } else { + /* Buffer is still being written into. */ + contentSize = buffer.position(); + } + final long firstLsnOffset = DbLsn.getFileOffset(firstLsn); + final long lastContentOffset = firstLsnOffset + contentSize; + + if ((firstLsnOffset <= fileOffset) && + (lastContentOffset > fileOffset)) { + found = true; + } + } + + if (found) { + return true; + } else { + readLatch.release(); + return false; + } + } + + /** + * Acquires the readLatch, providing exclusive access to the buffer. + * When modifying the buffer, both the LWL and buffer latch must be held. + * + * Note that containsLsn() acquires the latch for reading. + * + * Call release() to release the latch. + * + * TODO: + * It would be possible to use a shared buffer latch to allow concurrent + * access by multiple readers. The access rules for would then be: + * - Write access requires holding both the LWL and the buffer latch EX. + * - Read access requires holding either the LWL or the buffer latch SH. + * Note that LogBufferPool.bumpCurrent calls latchForWrite, but it may + * actually only need read access. + */ + public void latchForWrite() + throws DatabaseException { + + readLatch.acquireExclusive(); + } + + /* + * LogSource support + */ + + /** + * Releases the readLatch. + * + * @see LogSource#release + */ + public void release() { + readLatch.releaseIfOwner(); + } + + boolean getRewriteAllowed() { + return rewriteAllowed; + } + + void setRewriteAllowed() { + rewriteAllowed = true; + } + + /** + * Allocate a segment out of the buffer. + * + * The LWL and readLatch must be held. + * + * @param size of buffer to allocate + * + * @return null if not enough room, otherwise a + * LogBufferSegment for the data. + */ + public LogBufferSegment allocate(int size) { + assert readLatch.isExclusiveOwner(); + + if (hasRoom(size)) { + ByteBuffer buf = + ByteBuffer.wrap(data, buffer.position(), size); + buffer.position(buffer.position() + size); + writePinCount.incrementAndGet(); + return new LogBufferSegment(this, buf); + } + return null; + } + + /** + * Called with the buffer not latched. + */ + public void free() { + writePinCount.decrementAndGet(); + } + + /** + * Acquire the buffer latched and with the buffer pin count equal to zero. + */ + public void waitForZeroAndLatch() { + boolean done = false; + while (!done) { + if (writePinCount.get() > 0) { + LockSupport.parkNanos(this, 100); + /* + * This may be overkill to check if a thread was + * interrupted. There should be no interrupt of the + * thread pinning and unpinning the buffer. + */ + if (Thread.interrupted()) { + throw new ThreadInterruptedException( + env, "Interrupt during read operation"); + } + } else { + readLatch.acquireExclusive(); + if (writePinCount.get() == 0) { + done = true; + } else { + readLatch.release(); + } + } + } + } + + /** + * Make a copy of this buffer (doesn't copy data, only buffer state) + * and position it to read the requested data. + * + * The LWL or readLatch must be held. + * + * @see LogSource#getBytes + */ + public ByteBuffer getBytes(long fileOffset) { + ByteBuffer copy = buffer.duplicate(); + copy.position((int) (fileOffset - DbLsn.getFileOffset(firstLsn))); + return copy; + } + + /** + * Same as getBytes(long fileOffset) since buffer should always hold a + * whole entry. + * + * The LWL or readLatch must be held. + * + * @see LogSource#getBytes + */ + public ByteBuffer getBytes(long fileOffset, int numBytes) { + return getBytes(fileOffset); + } + + /** + * Entries in write buffers are always the current version. + */ + public int getLogVersion() { + return LogEntryType.LOG_VERSION; + } + + @Override + public String toString() { + return + "[LogBuffer firstLsn=" + DbLsn.getNoFormatString(firstLsn) + "]"; + } +} diff --git a/src/com/sleepycat/je/log/LogBufferPool.java b/src/com/sleepycat/je/log/LogBufferPool.java new file mode 100644 index 0000000..b88a703 --- /dev/null +++ b/src/com/sleepycat/je/log/LogBufferPool.java @@ -0,0 +1,660 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.log.LogStatDefinition.LBFP_BUFFER_BYTES; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_LOG_BUFFERS; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_MISS; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_NOT_RESIDENT; +import static com.sleepycat.je.log.LogStatDefinition.LBFP_NO_FREE_BUFFER; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.LinkedList; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * LogBufferPool manages a circular pool of LogBuffers. + * The currentWriteBuffer is the buffer that is currently + * used to add data. When the buffer is full, the next (adjacent) + * buffer is made available for writing. The buffer pool has a dirty + * list of buffers. A buffer becomes a member of the dirty list when the + * currentWriteBuffer is moved to another buffer. Buffers are removed + * from the dirty list when they are written. + * The dirtyStart/dirtyEnd variables indicate the list of dirty buffers. + * A value of -1 for either variables indicates that there are no dirty + * buffers. These variable are synchronized via the + * LogBufferPool.bufferPoolLatch. The LogManager.logWriteLatch (aka LWL) + * is used to serialize access to the currentWriteBuffer, so that entries are + * added in write/LSN order. + * + * A buffer used for writing is accessed by the getWriteBuffer method. + * This method must be called while holding the LWL. The buffer returned + * is not latched. + * + * A LogBuffer has a pin count (LogBuffer.writePinCount) associated with + * it. The pin count is incremented when space is allocated in the buffer. + * The allocation of space is serialized under the LWL. Threads will add + * data to the buffer by latching the buffer, but without holding the LWL. + * After the data is added, the pin count is decremented. A buffer cannot be + * used for reading unless the pin count is zero. It should be noted that the + * increment of the pin count is done with the buffer latched. The decrement + * does not latch the buffer. + * + * Read access to a log buffer is allowed only if the buffer is latched + * and the pin count is zero. A thread that attempt to access a log + * buffer for reading will latch and check the pin count. If the pin + * count is not zero, the latch is released and the process is retried. + * The thread attempting to access the log buffer for reading may be delayed. + * The worst case is when the reader has to wait until the buffer + * is filled (the pin count would be zero). + * + * @see LogBuffer + */ +class LogBufferPool { + private static final String DEBUG_NAME = LogBufferPool.class.getName(); + + private EnvironmentImpl envImpl = null; + private int nLogBuffers; + private int logBufferSize; // size of each log buffer + private LinkedList bufferPool; + + /* + * The dirty start/end are the indexes of the first/last dirty buffers. + * These dirty buffers do not include the current write buffer. + * These fields are changed under buffer pool latch. + */ + private int dirtyStart = -1; + private int dirtyEnd = -1; + + /* + * Buffer that holds the current log end. All writes go + * to this buffer.The members are protected by + * the LogManager.logWriteLatch. + */ + private LogBuffer currentWriteBuffer; + private int currentWriteBufferIndex; + + private final FileManager fileManager; + + /* Stats */ + private final StatGroup stats; + private final AtomicLongStat nNotResident; // instantiated from an LSN + private final AtomicLongStat nCacheMiss; // retrieved from disk + private final IntStat logBuffers; + private final LongStat nBufferBytes; + + /* + * Number of times that current write pointer could not be incremented + * because there was no non-dirty buffer. + */ + private final LongStat nNoFreeBuffer; + + private final boolean runInMemory; + + /* + * bufferPoolLatch synchronizes access and changes to the buffer pool. + * Related latches are the log write latch in LogManager and the read + * latches in each log buffer. The log write latch is always taken before + * the bufferPoolLatch. The bufferPoolLatch is always taken before any + * logBuffer read latch. When faulting in an object from the log, the order + * of latching is: + * bufferPoolLatch.acquire() + * LogBuffer read latch acquire(); + * bufferPoolLatch.release(); + * LogBuffer read latch release() + * bufferPoolLatch is also used to protect assignment to the + * currentWriteBuffer field. + */ + private final Latch bufferPoolLatch; + + /* + * A minimum LSN property for the pool that can be checked without + * latching, to reduce contention by readers. An LSN less than minBufferLsn + * is guaranteed not to be in the pool. An LSN greater or equal to + * minBufferLsn may or may not be in the pool, and latching is necessary to + * determine this. Initializing minBufferLsn to zero ensures that we will + * latch and check the pool until it is initialized with a valid LSN. + * [#19642] + */ + private volatile long minBufferLsn = 0; + + LogBufferPool(FileManager fileManager, + EnvironmentImpl envImpl) + throws DatabaseException { + + this.fileManager = fileManager; + this.envImpl = envImpl; + bufferPoolLatch = LatchFactory.createExclusiveLatch( + envImpl, DEBUG_NAME + "_FullLatch", true /*collectStats*/); + + /* Configure the pool. */ + DbConfigManager configManager = envImpl.getConfigManager(); + runInMemory = envImpl.isMemOnly(); + reset(configManager); + + /* Current buffer is the active buffer that writes go into. */ + currentWriteBuffer = bufferPool.getFirst(); + currentWriteBufferIndex = 0; + + stats = new StatGroup(LogStatDefinition.LBF_GROUP_NAME, + LogStatDefinition.LBF_GROUP_DESC); + nNotResident = new AtomicLongStat(stats, LBFP_NOT_RESIDENT); + nCacheMiss = new AtomicLongStat(stats, LBFP_MISS); + logBuffers = new IntStat(stats, LBFP_LOG_BUFFERS); + nBufferBytes = new LongStat(stats, LBFP_BUFFER_BYTES); + nNoFreeBuffer = new LongStat(stats, LBFP_NO_FREE_BUFFER); + } + + final int getLogBufferSize() { + return logBufferSize; + } + + /** + * Initialize the pool at construction time and when the cache is resized. + * This method is called after the memory budget has been calculated. + * + * The LWL must be held when the buffer pool is not being constructed. + */ + void reset(DbConfigManager configManager) + throws DatabaseException { + + /* + * When running in memory, we can't clear the existing pool and + * changing the buffer size is not very useful, so just return. + */ + if (runInMemory && bufferPool != null) { + return; + } + + /* + * Write the currentWriteBuffer to the file and reset + * currentWriteBuffer. + */ + if (currentWriteBuffer != null) { + bumpAndWriteDirty(0, true); + } + + /* + * Based on the log budget, figure the number and size of + * log buffers to use. + */ + int numBuffers = + configManager.getInt(EnvironmentParams.NUM_LOG_BUFFERS); + long logBufferBudget = envImpl.getMemoryBudget().getLogBufferBudget(); + + long logFileSize = + configManager.getLong(EnvironmentParams.LOG_FILE_MAX); + /* Buffers must be int sized. */ + int newBufferSize = (int) logBufferBudget / numBuffers; + /* Limit log buffer size to size of a log file. */ + newBufferSize = Math.min(newBufferSize, (int) logFileSize); + /* list of buffers that are available for log writing */ + LinkedList newPool = new LinkedList(); + + /* + * If we're running in memory only, don't pre-allocate all the buffers. + * This case only occurs when called from the constructor. + */ + if (runInMemory) { + numBuffers = 1; + } + + for (int i = 0; i < numBuffers; i++) { + newPool.add(new LogBuffer(newBufferSize, envImpl)); + } + + /* + * The following applies when this method is called to reset the pool + * when an existing pool is in use: + * - The old pool will no longer be referenced. + * - Buffers being read in the old pool will be no longer referenced + * after the read operation is complete. + * - The currentWriteBuffer field is not changed here; it will be no + * longer referenced after it is written to the file and a new + * currentWriteBuffer is assigned. + * - The logBufferSize can be changed now because it is only used for + * allocating new buffers; it is not used as the size of the + * currentWriteBuffer. + */ + bufferPoolLatch.acquireExclusive(); + bufferPool = newPool; + nLogBuffers = numBuffers; + logBufferSize = newBufferSize; + /* Current buffer is the active buffer that writes go into. */ + currentWriteBuffer = bufferPool.getFirst(); + currentWriteBufferIndex = 0; + bufferPoolLatch.release(); + } + + /** + * Get a log buffer for writing an entry of sizeNeeded bytes. + * + * If sizeNeeded will fit in currentWriteBuffer, currentWriteBuffer is + * returned without requiring any flushing. The caller can allocate the + * entry in the buffer returned. + * + * If sizeNeeded won't fit in currentWriteBuffer, but is LTE the LogBuffer + * capacity, we bump the buffer to get an empty currentWriteBuffer. If + * there are no free write buffers, then all dirty buffers must be flushed. + * The caller can allocate the entry in the buffer returned. + * + * If sizeNeeded is greater than the LogBuffer capacity, flush all dirty + * buffers and return an empty (but too small) currentWriteBuffer. The + * caller must then write the entry to the file directly. + * + * The LWL must be held. + * + * @param sizeNeeded size of the entry to be written. + * + * @param flippedFile if true, always flush all dirty buffers and get an + * empty currentWriteBuffer, and fsync/finish the log file. + * + * @return currentWriteBuffer, which may or may not have enough room for + * sizeNeeded. + */ + LogBuffer getWriteBuffer(int sizeNeeded, boolean flippedFile) + throws IOException, DatabaseException { + + /* + * We need a new log buffer either because this log buffer is full, or + * the LSN has marched along to the next file. Each log buffer only + * holds entries that belong to a single file. If we've flipped over + * into the next file, we'll need to get a new log buffer even if the + * current one has room. + */ + if (flippedFile) { + + /* + * Write the dirty buffers to the file and get an empty + * currentWriteBuffer. + * + * TODO: Why pass true for flushWriteQueue before doing an fsync? + */ + bumpAndWriteDirty(sizeNeeded, true /*flushWriteQueue*/); + + /* Now that the buffers have been written, fsync. */ + if (!runInMemory) { + fileManager.syncLogEndAndFinishFile(); + } + } else if (!currentWriteBuffer.hasRoom(sizeNeeded)) { + + /* + * Try to bump the current write buffer since there + * was not enough space in the current write buffer. + */ + + if (!bumpCurrent(sizeNeeded) || + !currentWriteBuffer.hasRoom(sizeNeeded) ) { + + /* + * We could not bump because there was no free + * buffer, or the item is larger than the buffer size. + * Write the dirties to free a buffer up, or to flush + * in preparation for writing a temporary buffer. + */ + bumpAndWriteDirty(sizeNeeded, false /*flushWriteQueue*/); + } + } + + return currentWriteBuffer; + } + + /** + * Bump current write buffer and write the dirty buffers. + * + * The LWL must be held to ensure that, if there are no free buffers, we + * can write the dirty buffers and have a free one, which is required to + * bump the current write buffer. + * + * @param sizeNeeded used only if running in memory. Size of the log buffer + * buffer that is needed. + * + * @param flushWriteQueue true if data is written to log, otherwise data + * may be placed on the write queue. + */ + void bumpAndWriteDirty(int sizeNeeded, boolean flushWriteQueue) { + + /* + * Write the currentWriteBuffer to the file and reset + * currentWriteBuffer. + */ + if (!bumpCurrent(sizeNeeded)) { + + /* + * Could not bump the current write buffer; no clean buffers. + * Write the current dirty buffers so we can bump. + */ + writeDirty(flushWriteQueue); + + if (bumpCurrent(sizeNeeded)) { + + /* + * Since we have the log write latch we should be + * able to bump the current buffer. + */ + writeDirty(flushWriteQueue); + } else { + /* should not ever get here */ + throw EnvironmentFailureException.unexpectedState( + envImpl, "No free log buffers."); + } + + } else { + + /* + * Since we have the log write latch we should be + * able to bump the current buffer. + */ + writeDirty(flushWriteQueue); + } + } + + /** + * Returns the next buffer slot number from the input buffer slot number. + * The slots are are a circular buffer. + * + * The bufferPoolLatch must be held. + * + * @return the next slot number after the given slotNumber. + */ + private int getNextSlot(int slotNumber) { + assert bufferPoolLatch.isExclusiveOwner(); + return (slotNumber < (bufferPool.size() -1)) ? ++slotNumber : 0; + } + + /** + * Writes the dirty log buffers. + * + * No latches need be held. + * + * Note that if no buffers are dirty, nothing will be written, even when + * data is buffered in the write queue and flushWriteQueue is true. + * If we were to allow a condition where 1) the write queue is non-empty, + * 2) there are no dirty log buffers, and 3) the current write buffer is + * empty, then a flushNoSync at that time won't flush the write queue. + * This should never happen because: a) flushNoSync leaves the write queue + * empty, b) LogManager.serialLogWork leaves the current write buffer + * non-empty, and c) writeDirty(false) doesn't change the state of the + * current write buffer. TODO: Confirm this with a more detailed analysis. + * + * @param flushWriteQueue true then data is written to file otherwise + * the data may be placed on the FileManager WriteQueue. + */ + void writeDirty(boolean flushWriteQueue) { + bufferPoolLatch.acquireExclusive(); + try { + if (dirtyStart < 0) { + return; + } + boolean process = true; + do { + LogBuffer lb = bufferPool.get(dirtyStart); + lb.waitForZeroAndLatch(); + try { + writeBufferToFile(lb, flushWriteQueue); + } finally { + lb.release(); + } + if (dirtyStart == dirtyEnd) { + process = false; + } else { + dirtyStart = getNextSlot(dirtyStart); + } + } while (process); + dirtyStart = -1; + dirtyEnd = -1; + } finally { + bufferPoolLatch.releaseIfOwner(); + } + } + + /** + * Writes a log buffer. + * + * The LWL or buffer latch must be held. + * + * @param latchedBuffer buffer to write + * + * @param flushWriteQueue true then data is written to file otherwise + * the data may be placed on the FileManager WriteQueue. + */ + private void writeBufferToFile(LogBuffer latchedBuffer, + boolean flushWriteQueue) { + + if (runInMemory) { + return; + } + + /* + * Check for an invalid env while buffer is latched and before writing. + * This is necessary to prevent writing a buffer corruption that was + * detected during a read from the buffer. The read will invalidate the + * env while holding the buffer latch. + */ + envImpl.checkIfInvalid(); + + try { + ByteBuffer currentByteBuffer = latchedBuffer.getDataBuffer(); + int savePosition = currentByteBuffer.position(); + int saveLimit = currentByteBuffer.limit(); + currentByteBuffer.flip(); + + /* + * If we're configured for writing (not memory-only situation), + * write this buffer to disk and find a new buffer to use. + */ + try { + fileManager.writeLogBuffer(latchedBuffer, flushWriteQueue); + } catch (Throwable t) { + currentByteBuffer.position(savePosition); + currentByteBuffer.limit(saveLimit); + + /* + * Exceptions thrown during logging are expected to be + * fatal. Ensure that the environment is invalidated + * when a non-fatal exception is unexpectedly thrown. + */ + if (t instanceof EnvironmentFailureException) { + + /* + * If we've already invalidated the environment, + * re-throw so as not to excessively wrap the + * exception. + */ + if (!envImpl.isValid()) { + throw (EnvironmentFailureException)t; + } + /* Otherwise, invalidate the environment. */ + throw EnvironmentFailureException.unexpectedException( + envImpl, (EnvironmentFailureException)t); + } else if (t instanceof Error) { + envImpl.invalidate((Error)t); + throw (Error)t; + } else if (t instanceof Exception) { + throw EnvironmentFailureException.unexpectedException( + envImpl, (Exception)t); + } else { + throw EnvironmentFailureException.unexpectedException( + envImpl, t.getMessage(), null); + } + } + } finally { + latchedBuffer.release(); + } + } + + /** + * Move the current write buffer to the next. Will not bump the current + * write buffer if the buffer is empty. + * + * The LWL must be held. + * + * @param sizeNeeded used only if running in memory. Size of the log + * buffer that is needed. + * + * @return false when the buffer needs flushing, but there are no free + * buffers. Returns true when when running in memory, when the buffer is + * empty, or when the buffer is non-empty and is bumped. + */ + boolean bumpCurrent(int sizeNeeded) { + + /* We're done with the buffer, flip to make it readable. */ + bufferPoolLatch.acquireExclusive(); + currentWriteBuffer.latchForWrite(); + + LogBuffer latchedBuffer = currentWriteBuffer; + try { + + /* + * Is there anything in this write buffer? + */ + if (currentWriteBuffer.getFirstLsn() == DbLsn.NULL_LSN) { + return true; + } + + if (runInMemory) { + int bufferSize = + ((logBufferSize > sizeNeeded) ? + logBufferSize : sizeNeeded); + /* We're supposed to run in-memory, allocate another buffer. */ + currentWriteBuffer = new LogBuffer(bufferSize, envImpl); + bufferPool.add(currentWriteBuffer); + currentWriteBufferIndex = bufferPool.size() - 1; + return true; + } + + if (dirtyStart < 0) { + dirtyStart = currentWriteBufferIndex; + } else { + /* Check to see if there is an undirty buffer to use. */ + if (getNextSlot(currentWriteBufferIndex) == dirtyStart) { + nNoFreeBuffer.increment(); + return false; + } + } + + dirtyEnd = currentWriteBufferIndex; + currentWriteBufferIndex = getNextSlot(currentWriteBufferIndex); + LogBuffer nextToUse = bufferPool.get(currentWriteBufferIndex); + LogBuffer newInitialBuffer = + bufferPool.get(getNextSlot(currentWriteBufferIndex)); + nextToUse.reinit(); + + /* Assign currentWriteBuffer with the latch held. */ + currentWriteBuffer = nextToUse; + + /* Paranoia: do this after transition to new buffer. */ + updateMinBufferLsn(newInitialBuffer); + return true; + } finally { + latchedBuffer.release(); + bufferPoolLatch.releaseIfOwner(); + } + } + + /** + * Set minBufferLsn to start of new initial buffer. The update occurs only + * after cycling once through the buffers in the pool. This is a simple + * implementation, and waiting until we've filled the buffer pool to + * initialize it is sufficient for reducing read contention in + * getReadBufferByLsn. [#19642] + * + * The LWL must be held. + */ + private void updateMinBufferLsn(final LogBuffer newInitialBuffer) { + final long newMinLsn = newInitialBuffer.getFirstLsn(); + if (newMinLsn != DbLsn.NULL_LSN) { + minBufferLsn = newMinLsn; + } + } + + /** + * Find a buffer that contains the given LSN location. + * + * No latches need be held. + * + * @return the buffer that contains the given LSN location, latched and + * ready to read, or return null. + */ + LogBuffer getReadBufferByLsn(long lsn) + throws DatabaseException { + + nNotResident.increment(); + + /* Avoid latching if the LSN is known not to be in the pool. */ + if (DbLsn.compareTo(lsn, minBufferLsn) < 0) { + nCacheMiss.increment(); + return null; + } + + /* Latch and check the buffer pool. */ + bufferPoolLatch.acquireExclusive(); + try { + /* + * TODO: Check currentWriteBuffer last, because we will have to + * wait for its pin count to go to zero, which may require waiting + * until it is full. + */ + for (LogBuffer l : bufferPool) { + if (l.containsLsn(lsn)) { + return l; + } + } + + nCacheMiss.increment(); + return null; + } finally { + bufferPoolLatch.releaseIfOwner(); + } + } + + StatGroup loadStats(StatsConfig config) + throws DatabaseException { + + /* Also return buffer pool memory usage */ + logBuffers.set(nLogBuffers); + nBufferBytes.set((long) nLogBuffers * logBufferSize); + + return stats.cloneGroup(config.getClear()); + } + + /** + * Return the current nCacheMiss statistic in a lightweight fashion, + * without perturbing other statistics or requiring synchronization. + */ + public long getNCacheMiss() { + return nCacheMiss.get(); + } + + /** + * For unit testing. + */ + public StatGroup getBufferPoolLatchStats() { + return bufferPoolLatch.getStats(); + } +} diff --git a/src/com/sleepycat/je/log/LogBufferSegment.java b/src/com/sleepycat/je/log/LogBufferSegment.java new file mode 100644 index 0000000..97a83a5 --- /dev/null +++ b/src/com/sleepycat/je/log/LogBufferSegment.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +/** + * LogBufferSegment is used by a writer to access + * a portion of a LogBuffer. + * + */ +class LogBufferSegment { + private final LogBuffer logBuffer; + private final ByteBuffer data; + + public LogBufferSegment(LogBuffer lb, ByteBuffer bb) { + logBuffer = lb; + data = bb; + } + + /** + * Copies the data into the underlying LogBuffer + * and decrements the LogBuffer pin count. + * @param dataToCopy data to copy into the underlying + * LogBuffer. + */ + public void put(ByteBuffer dataToCopy) { + + /* + * The acquisition of the log buffer latch is + * done to guarantee the java happens-before + * semantic. There is no other reason to take the + * latch here. + */ + logBuffer.latchForWrite(); + data.put(dataToCopy); + logBuffer.release(); + logBuffer.free(); + } +} diff --git a/src/com/sleepycat/je/log/LogEntryHeader.java b/src/com/sleepycat/je/log/LogEntryHeader.java new file mode 100644 index 0000000..ca4b72a --- /dev/null +++ b/src/com/sleepycat/je/log/LogEntryHeader.java @@ -0,0 +1,629 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.zip.Checksum; + +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.Adler32; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * A LogEntryHeader embodies the header information at the beginning of each + * log entry file. + */ +public class LogEntryHeader { + + /** + * Persistent fields. Layout on disk is + * (invariant) checksum - 4 bytes + * (invariant) entry type - 1 byte + * (invariant) entry flags - 1 byte + * (invariant) offset of previous log entry - 4 bytes + * (invariant) item size (not counting header size) - 4 bytes + * (optional) vlsn - 8 bytes + * + * Flags: + * The provisional bit can be set for any log type in the log. It's an + * indication to recovery that the entry shouldn't be processed when + * rebuilding the tree. See com.sleepycat.je.log.Provisional.java for + * the reasons why it's set. + * + * The replicated bit is set when this particular log entry is + * part of the replication stream and contains a VLSN in the header. + * + * The invisible bit is set when this log entry has been rolled back as + * part of replication syncup. The ensuing log entry has not been + * checksum-corrected, and to read it, the invisible bit must be cloaked. + * + * The VLSN_PRESENT bit is set when a VLSN is present for log version 8+, + * and is set when the replicated bit is *not* set in the case of a cleaner + * migrated LN. Prior to version 8, the replicated bit alone indicates + * that a VLSN is present. For all versions, if the replicated bit is set + * then a VLSN is always present. [#19476] + * + * first version of migrated LN + * a replicated LN + * --------------- ----------- + * log version 7- replicated = true replicated = false + * (JE 4.1 and vlsn present = false vlsn present = false + * earlier) vlsn exists in header no vlsn in header + * + * log version 8+ replicated = true replicated = false + * preserve record vlsn present = true vlsn present = false + * version = false vlsn exists in header no vlsn in header + * + * log version 8+ replicated = true replicated = false + * preserve record vlsn present = true vlsn present = true + * version = true vlsn exists in header vlsn exists in header + */ + + /* The invariant size of the log entry header. */ + public static final int MIN_HEADER_SIZE = 14; + + /* Only used for tests and asserts. */ + public static final int MAX_HEADER_SIZE = MIN_HEADER_SIZE + VLSN.LOG_SIZE; + + public static final int CHECKSUM_BYTES = 4; + + static final int ENTRYTYPE_OFFSET = 4; + static final int FLAGS_OFFSET = 5; + private static final int PREV_OFFSET = 6; + private static final int ITEMSIZE_OFFSET = 10; + public static final int VLSN_OFFSET = MIN_HEADER_SIZE; + + /* + * Flags defined in the entry header. + * + * WARNING: Flags may not be defined or used in the entry header of the + * FileHeader. All flags defined here may only be used in log entries + * other then the FileHeader. [#16939] + */ + private static final byte PROVISIONAL_ALWAYS_MASK = (byte) 0x80; + private static final byte PROVISIONAL_BEFORE_CKPT_END_MASK = (byte) 0x40; + private static final byte REPLICATED_MASK = (byte) 0x20; + private static final byte INVISIBLE = (byte) 0x10; + private static final byte IGNORE_INVISIBLE = ~INVISIBLE; + private static final byte VLSN_PRESENT = (byte) 0x08; + /* Flags stored in version byte for logVersion 6 and below.*/ + private static final byte VERSION_6_FLAGS = + PROVISIONAL_ALWAYS_MASK | + PROVISIONAL_BEFORE_CKPT_END_MASK | + REPLICATED_MASK; + private static final byte IGNORE_VERSION_6_FLAGS = ~VERSION_6_FLAGS; + + private static final byte FILE_HEADER_TYPE_NUM = + LogEntryType.LOG_FILE_HEADER.getTypeNum(); + + private long checksumVal; // stored in 4 bytes as an unsigned int + private final byte entryType; + private long prevOffset; + private final int itemSize; + private VLSN vlsn; + + /* + * Prior to log version 6, a type-specific version was stored in each + * entry, and was packed together with the flags in a single byte. + * + * For version 6, we changed to use a global version (not type specific), + * but it was stored in each entry, packed with the flags as in earlier + * versions, as well as being stored redundantly in the FileHeader. The + * entry header and file header versions are always the same for all + * entries in a file. We flip the log file to guarantee this, when running + * for the first time with an upgraded JE with a new log version. + * + * For version 7 and above, the version is stored only in the FileHeader, + * freeing the space formerly taken by the version in each entry for use + * by flag bits. The version is not stored in each entry; however, the + * version is still maintained in this in-memory object for two reasons: + * + * 1. When reading log files prior to version 6, each entry potentially has + * a different version. + * 2. Convenience of access to the version when processing log entries. + * + * [#16939] + */ + private int entryVersion; + + /* Version flag fields */ + private Provisional provisional; + private boolean replicated; + private boolean invisible; + private boolean vlsnPresent; + + /** + * For reading a log entry. + * + * @param entryBuffer the buffer containing at least the first + * MIN_HEADER_SIZE bytes of the entry header. + * + * @param logVersion is the log version of the file that contains the given + * buffer, and is obtained from the file header. Note that for the file + * header entry itself, UNKNOWN_FILE_HEADER_VERSION may be passed. + * + * @param lsn is the LSN of the entry, for exception reporting. + */ + public LogEntryHeader(ByteBuffer entryBuffer, int logVersion, long lsn) + throws ChecksumException { + + assert logVersion == LogEntryType.UNKNOWN_FILE_HEADER_VERSION || + (logVersion >= LogEntryType.FIRST_LOG_VERSION && + logVersion <= LogEntryType.LOG_VERSION) : logVersion; + + checksumVal = LogUtils.readUnsignedInt(entryBuffer); + entryType = entryBuffer.get(); + if (!LogEntryType.isValidType(entryType)) { + throw new ChecksumException( + "Invalid log entry type: " + entryType + + " lsn=" + DbLsn.getNoFormatString(lsn) + + " bufPosition=" + entryBuffer.position() + + " bufRemaining=" + entryBuffer.remaining()); + } + + if (entryType == FILE_HEADER_TYPE_NUM) { + /* Actual version will be set by setFileHeaderVersion. */ + entryVersion = LogEntryType.UNKNOWN_FILE_HEADER_VERSION; + /* Discard flags byte: none are allowed for the file header. */ + entryBuffer.get(); + initFlags(0); + } else { + if (logVersion == LogEntryType.UNKNOWN_FILE_HEADER_VERSION ) { + /* + * If we are reading a log header the type should be + * FILE_HEADER_TYPE_NUM. + */ + throw new ChecksumException( + "Wrong entry type for header: " + entryType + + " lsn=" + DbLsn.getNoFormatString(lsn) + + " bufPosition=" + entryBuffer.position() + + " bufRemaining=" + entryBuffer.remaining()); + } else if (logVersion <= 6) { + /* Before version 7, flags and version were packed together. */ + entryVersion = entryBuffer.get(); + initFlags(entryVersion & VERSION_6_FLAGS); + entryVersion &= IGNORE_VERSION_6_FLAGS; + /* For log version 6, the entry version should always match. */ + assert (logVersion == 6) ? (entryVersion == 6) : true; + } else { + /* For log version 7+, only flags are stored in the entry. */ + entryVersion = logVersion; + initFlags(entryBuffer.get()); + } + } + prevOffset = LogUtils.readUnsignedInt(entryBuffer); + itemSize = LogUtils.readInt(entryBuffer); + if (itemSize < 0) { + throw new ChecksumException( + "Invalid log entry size: " + itemSize + + " lsn=" + DbLsn.getNoFormatString(lsn) + + " bufPosition=" + entryBuffer.position() + + " bufRemaining=" + entryBuffer.remaining()); + } + } + + /** + * For writing a log header. public for unit tests. + */ + public LogEntryHeader(LogEntry entry, + Provisional provisional, + ReplicationContext repContext) { + + LogEntryType logEntryType = entry.getLogType(); + entryType = logEntryType.getTypeNum(); + entryVersion = LogEntryType.LOG_VERSION; + this.itemSize = entry.getSize(); + this.provisional = provisional; + + assert (!((!logEntryType.isReplicationPossible()) && + repContext.inReplicationStream())) : + logEntryType + " should never be replicated."; + + if (logEntryType.isReplicationPossible()) { + this.replicated = repContext.inReplicationStream(); + } else { + this.replicated = false; + } + invisible = false; + + /* + * If we about to write a new replicated entry, the VLSN will be null + * and mustGenerateVLSN will return true. For a cleaner migrated LN + * that was replicated, the VLSN will be non-null and mustGenerateVLSN + * will return false. [#19476] + */ + vlsnPresent = repContext.getClientVLSN() != null || + repContext.mustGenerateVLSN(); + } + + /** + * For reading a replication message. The node-specific parts of the header + * are not needed. + */ + public LogEntryHeader(byte entryType, + int entryVersion, + int itemSize, + VLSN vlsn) { + + assert ((vlsn != null) && !vlsn.isNull()) : + "vlsn = " + vlsn; + + this.entryType = entryType; + this.entryVersion = entryVersion; + this.itemSize = itemSize; + this.vlsn = vlsn; + replicated = true; + vlsnPresent = true; + provisional = Provisional.NO; + } + + private void initFlags(int entryFlags) { + if ((entryFlags & PROVISIONAL_ALWAYS_MASK) != 0) { + provisional = Provisional.YES; + } else if ((entryFlags & PROVISIONAL_BEFORE_CKPT_END_MASK) != 0) { + provisional = Provisional.BEFORE_CKPT_END; + } else { + provisional = Provisional.NO; + } + replicated = ((entryFlags & REPLICATED_MASK) != 0); + invisible = ((entryFlags & INVISIBLE) != 0); + vlsnPresent = ((entryFlags & VLSN_PRESENT) != 0) || replicated; + } + + /** + * Called to set the version for a file header entry after reading the + * version from the item data. See FileHeaderEntry.readEntry. [#16939] + */ + public void setFileHeaderVersion(final int logVersion) { + entryVersion = logVersion; + } + + public long getChecksum() { + return checksumVal; + } + + public byte getType() { + return entryType; + } + + public int getVersion() { + return entryVersion; + } + + public long getPrevOffset() { + return prevOffset; + } + + public int getItemSize() { + return itemSize; + } + + public int getEntrySize() { + return getSize() + getItemSize(); + } + + public VLSN getVLSN() { + return vlsn; + } + + public boolean getReplicated() { + return replicated; + } + + public Provisional getProvisional() { + return provisional; + } + + public boolean isInvisible() { + return invisible; + } + + public int getVariablePortionSize() { + return VLSN.LOG_SIZE; + } + + /** + * @return number of bytes used to store this header + */ + public int getSize() { + if (vlsnPresent) { + return MIN_HEADER_SIZE + VLSN.LOG_SIZE; + } + return MIN_HEADER_SIZE; + } + + /** + * @return the number of bytes used to store the header, excepting + * the checksum field. + */ + int getSizeMinusChecksum() { + return getSize()- CHECKSUM_BYTES; + } + + /** + * @return the number of bytes used to store the header, excepting + * the checksum field. + */ + int getInvariantSizeMinusChecksum() { + return MIN_HEADER_SIZE - CHECKSUM_BYTES; + } + + /** + * Assumes this is called directly after the constructor, and that the + * entryBuffer is positioned right before the VLSN. + */ + public void readVariablePortion(ByteBuffer entryBuffer) { + if (vlsnPresent) { + vlsn = new VLSN(); + vlsn.readFromLog(entryBuffer, entryVersion); + } + } + + /** + * Serialize this object into the buffer and leave the buffer positioned in + * the right place to write the following item. The checksum, prevEntry, + * and vlsn values will filled in later on. + * + * public for unit tests. + */ + public void writeToLog(ByteBuffer entryBuffer) { + + /* Skip over the checksumVal, proceed to the entry type. */ + entryBuffer.position(ENTRYTYPE_OFFSET); + entryBuffer.put(entryType); + + /* Flags */ + byte flags = 0; + if (provisional == Provisional.YES) { + flags |= PROVISIONAL_ALWAYS_MASK; + } else if (provisional == Provisional.BEFORE_CKPT_END) { + flags |= PROVISIONAL_BEFORE_CKPT_END_MASK; + } + if (replicated) { + flags |= REPLICATED_MASK; + } + if (vlsnPresent) { + flags |= VLSN_PRESENT; + } + entryBuffer.put(flags); + + /* + * Leave room for the prev offset, which must be added under + * the log write latch. Proceed to write the item size. + */ + entryBuffer.position(ITEMSIZE_OFFSET); + LogUtils.writeInt(entryBuffer, itemSize); + + /* + * Leave room for a VLSN if needed, must also be generated + * under the log write latch. + */ + if (vlsnPresent) { + entryBuffer.position(entryBuffer.position() + VLSN.LOG_SIZE); + } + } + + /** + * Add those parts of the header that must be calculated later to the + * entryBuffer, and also assign the fields in this class. + * That's + * - the prev offset, which must be done within the log write latch to + * be sure what that lsn is + * - the VLSN, for the same reason + * - the checksumVal, which must be added last, after all other + * fields are marshalled. + * (public for unit tests) + */ + public ByteBuffer addPostMarshallingInfo(ByteBuffer entryBuffer, + long lastOffset, + VLSN vlsn) { + + /* Add the prev pointer */ + prevOffset = lastOffset; + entryBuffer.position(PREV_OFFSET); + LogUtils.writeUnsignedInt(entryBuffer, prevOffset); + + if (vlsn != null) { + this.vlsn = vlsn; + entryBuffer.position(VLSN_OFFSET); + + vlsn.writeToLog(entryBuffer); + } + + /* + * Now calculate the checksumVal and write it into the buffer. Be sure + * to set the field in this instance, for use later when printing or + * debugging the header. + */ + Checksum checksum = Adler32.makeChecksum(); + checksum.update(entryBuffer.array(), + entryBuffer.arrayOffset() + CHECKSUM_BYTES, + entryBuffer.limit() - CHECKSUM_BYTES); + entryBuffer.position(0); + checksumVal = checksum.getValue(); + LogUtils.writeUnsignedInt(entryBuffer, checksumVal); + + /* Leave this buffer ready for copying into another buffer. */ + entryBuffer.position(0); + + return entryBuffer; + } + + /** + * @param sb destination string buffer + * @param verbose if true, dump the full, verbose version + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + /** + * Dump the header without enclosing
        tags. Used for + * DbPrintLog, to make the header attributes in the tag, for + * a more compact rendering. + * @param sb destination string buffer + * @param verbose if true, dump the full, verbose version + */ + void dumpLogNoTag(StringBuilder sb, boolean verbose) { + LogEntryType lastEntryType = LogEntryType.findType(entryType); + + sb.append("type=\"").append(lastEntryType.toStringNoVersion()). + append("/").append(entryVersion); + if (provisional != Provisional.NO) { + sb.append("\" prov=\""); + sb.append(provisional); + } + + if (vlsn != null) { + sb.append("\" "); + vlsn.dumpLog(sb, verbose); + } else { + sb.append("\""); + } + + if (getReplicated()) { + sb.append(" isReplicated=\"1\""); + } + + if (isInvisible()) { + sb.append(" isInvisible=\"1\""); + } + + sb.append(" prev=\"0x").append(Long.toHexString(prevOffset)); + if (verbose) { + sb.append("\" size=\"").append(itemSize); + sb.append("\" cksum=\"").append(checksumVal); + } + } + + /** + * For use in special case where commits are transformed to aborts because + * of i/o errors during a logBuffer flush. See [11271]. + * Assumes that the entryBuffer is positioned at the start of the item. + * Return with the entryBuffer positioned to the end of the log entry. + */ + void convertCommitToAbort(ByteBuffer entryBuffer) { + assert (entryType == LogEntryType.LOG_TXN_COMMIT.getTypeNum()); + + /* Remember the start of the entry item. */ + int itemStart = entryBuffer.position(); + + /* Back up to where the type is stored and change the type. */ + int entryTypePosition = + itemStart - (getSize() - ENTRYTYPE_OFFSET); + entryBuffer.position(entryTypePosition); + entryBuffer.put(LogEntryType.LOG_TXN_ABORT.getTypeNum()); + + /* + * Recalculate the checksum. This byte buffer could be large, + * so don't just turn the whole buffer into an array to pass + * into the checksum object. + */ + Checksum checksum = Adler32.makeChecksum(); + int checksumSize = itemSize + (getSize() - CHECKSUM_BYTES); + checksum.update(entryBuffer.array(), + entryTypePosition + entryBuffer.arrayOffset(), + checksumSize); + entryBuffer.position(itemStart - getSize()); + checksumVal = checksum.getValue(); + LogUtils.writeUnsignedInt(entryBuffer, checksumVal); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpLog(sb, true /* verbose */); + return sb.toString(); + } + + /* + * Dump only the parts of the header that apply for replicated entries. + */ + public void dumpRep(StringBuilder sb) { + + LogEntryType lastEntryType = LogEntryType.findType(entryType); + + sb.append(lastEntryType.toStringNoVersion()). + append("/").append(entryVersion); + + if (vlsn != null) { + sb.append(" vlsn=" ).append(vlsn); + } else { + sb.append("\""); + } + + if (getReplicated()) { + sb.append(" isReplicated=\"1\""); + } + + if (isInvisible()) { + sb.append(" isInvisible=\"1\""); + } + } + + /** + * @return true if two log headers are logically the same. This check will + * ignore the log version. + * + * Used by replication. + */ + public boolean logicalEqualsIgnoreVersion(LogEntryHeader other) { + + /* + * Note that item size is not part of the logical equality, because + * on-disk compression can make itemSize vary if the entry has VLSNs + * that were packed differently. + */ + return ((getVLSN().equals(other.getVLSN())) && + (getReplicated() == other.getReplicated()) && + (isInvisible() == other.isInvisible()) && + (LogEntryType.compareTypeAndVersion(getVersion(), getType(), + other.getVersion(), + other.getType()))); + } + + /** + * May be called after reading MIN_HEADER_SIZE bytes to determine + * whether more bytes (getVariablePortionSize) should be read. + */ + public boolean isVariableLength() { + /* Currently only entries with VLSNs are variable length. */ + return vlsnPresent; + } + + /** + * Set the invisible bit in the given log entry flags. + */ + static byte makeInvisible(byte flags) { + return flags |= INVISIBLE; + } + + /** + * Turn off the invisible bit in the byte buffer which backs this log entry + * header. + * @param logHeaderStartPosition the byte position of the start of the log + * entry header. + */ + public static void turnOffInvisible(ByteBuffer buffer, + int logHeaderStartPosition) { + + int flagsPosition = logHeaderStartPosition + FLAGS_OFFSET; + byte flags = buffer.get(flagsPosition); + flags &= IGNORE_INVISIBLE; + buffer.put(flagsPosition, flags); + } +} diff --git a/src/com/sleepycat/je/log/LogEntryType.java b/src/com/sleepycat/je/log/LogEntryType.java new file mode 100644 index 0000000..b380e3d --- /dev/null +++ b/src/com/sleepycat/je/log/LogEntryType.java @@ -0,0 +1,1055 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.log.entry.AbortLogEntry; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.CommitLogEntry; +import com.sleepycat.je.log.entry.DeletedDupLNLogEntry; +import com.sleepycat.je.log.entry.FileHeaderEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.MatchpointLogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.log.entry.OldBINDeltaLogEntry; +import com.sleepycat.je.log.entry.ReplicableLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.log.entry.TraceLogEntry; + +/** + * LogEntryType is an enumeration of all log entry types. + * + *

        Log entries are versioned. When changing the persistent form of a log + * entry in any way that is incompatible with prior releases, make sure the + * LogEntry instance is capable of reading in older versions from the log and + * be sure to increment LOG_VERSION. The LogEntry.readEntry and + * Loggable.readFromLog methods should check the actual version of the entry. + * If it is less than LOG_VERSION, the old version should be converted to the + * current version. + * + *

        Prior to LOG_VERSION 6, each log entry type had a separate version number + * that was incremented only when that log version changed. From LOG_VERSION 6 + * onward, all types use the same version, the LOG_VERSION constant. For + * versions prior to 6, the readEntry and readFromLog methods will be checking + * the old per-type version. There is no overlap between the old per-type + * versions and the LOG_VERSION values, because the per-type values are all + * below 6. [#15365]

        + + *

        The LogEntry instance must be sure that older versions are converted in + * memory into a correct instance of the newest version, so when that LogEntry + * object is written again as the result of migration, eviction, the resulting + * new log entry conforms to the requirements of the new version. If context + * objects are required for data conversion, the conversion can be done in the + * Node.postFetchInit method.

        + * + *

        Starting with LOG_VERSION 9, log entries that can be included in the + * replication stream must be able to write themselves in the format for the + * immediately previous log version, to allow replication during an upgrade + * when the master has been upgraded and a replica has not. Starting with + * LOG_VERSION 8, log entries that support replication must implement {@link + * ReplicableLogEntry}. When changes are made to replicable log entries for + * LOG_VERSION 9 and later, those entries need to support writing in the + * previous version's format.

        + */ +public class LogEntryType { + + /** + * Version of the file header, which identifies the version of all entries + * in that file. + * + * Changes to log entries for each version are: + * + * Version 3 + * --------- + * [12328] Add main and dupe tree fanout values for DatabaseImpl. + * [12557] Add IN LSN array compression. + * [11597] Add a change to FileSummaryLNs: obsolete offset tracking was + * added and multiple records are stored for a single file rather than a + * single record. Each record contains the offsets that were tracked since + * the last record was written. + * [11597] Add the full obsolete LSN in LNLogEntry. + * + * Version 4 + * --------- + * [#14422] Bump MapLN version from 1 to 2. Instead of a String for the + * comparator class name, store either a serialized string or Comparator. + * + * Version 5 + * --------- + * [#15195] FileSummaryLN version 3. Add FileSummary.obsoleteLNSize and + * obsoleteLNSizeCounted fields. + * + * Version 6 (in JE 3.3.X) + * --------- + * [#15365] From this point onward, all log entries have the same version, + * LOG_VERSION, rather than using per-type versions. + * [#15365] DatabaseImpl stores a map of DbFileSummaries. + * + * [#13467] Convert duplicatesAllowed boolean to DUPS_ALLOWED_BIT flag in + * DatabaseImpl. Add REPLICATED_BIT flag to DatabaseImpl. + * [#13467] Add REPLICATED_BIT to DbTree. + * [#13467] Add ReplicatedDatabaseConfig to NameLN_TX to support + * replication of database operations. + * + * [#15581] Add lastAllocateReplicatedDbId to DbTree + * [#16083] Add replication master node ID to txn commit/abort + * + * Version 7 (in JE 4.0) + * --------------------- + * Add the invisible bit in the entry header version field. + * Add the RollbackStart log entry type + * Add the RollbackEnd log entry type + * Add the Matchpoint log entry type. + * + * Version 8 (in JE 5.0) + * --------------------- + * Made provisions for storing Triggers in a DatabaseImpl. + * + * Database IDs enlarged from int or packed int, to long or packed long + * (note that packed int and packed long are compatible). [#18540] + * + * Add new log entry types for LN delete, insert and update. [#18055] + * + * Apply optimization to omit key size for some internal LNs, in addition + * to user LNs. [#18055] + * + * LN no longer has node ID. [#18633] + * + * Add FileSummary.maxLNSize. [#18633] + * + * VLSN is optionally maintained in LogEntryHeader for cleaner migrated LNs + * and a new VLSN_PRESENT entry header flag is used to signify the presence + * of the VLSN. PRESERVE_VLSN_BIT was added to DbTree to correspond to the + * je.env.preserveRecordVersion environment config param. [#19476] + * + * Dup tree representation changed to use two-part keys. Deprecated: DIN, + * DBIN, DupCountLN, INDeleteInfo, INDupDeleteInfo. Removed + * DatabaseImpl.maxDupTreeEntriesPerNode. [#19165] + * + * Version 9 (in JE 6.0) + * --------------------- + * See comment above about ReplicableLogEntry. + * + * BIN-deltas are now represented as BINs using the new BINDeltaLogEntry + * (log entry type NewBINDelta). + * + * Version 10 (in JE 6.2) + * ---------------------- + * Each BIN-delta stores the total and max number of entries in the + * previous full version of the same BIN. A BIN-delta may also store a + * bloom filter for the keys in the full BIN. + * + * Version 11 (in JE 6.3) + * ---------------------- + * LN log records have additional info to handle embedded records. See + * LNLogEntry for details. Also, BIN log records include the VLSNs of + * embedded records. + * + * Added LOG_IMMUTABLE_FILE entry type. + * + * Version 12 (in JE 7.0) + * ---------------------- + * Added expiration info to LNs and BIN slots. The LN's expiration time is + * replicated, so this changes the replication format. Also added the "have + * abort LSN" flag, to avoid writing a byte for a null LSN, and moved the + * flags to the front to support the new forReplication format. + * + * For ReplicableLogEntry and VersionedWriteLoggable, added the + * forReplication parameter to write methods to support a replication + * format variation. This allows omitting some fields from an entry when + * they are not needed by replication. + * + * LNLogEntry in forReplication mode no longer includes the abortLSN or the + * abortKnownDeleted flag. This is a format change because these were + * used by Replay in earlier versions. + * + * Txn and VersionedWriteTxnEnd (Commit and Abort) in forReplication mode + * now always includes a null prevLsn field, which only occupies one byte. + * However, this is not strictly a format change, because this field has + * never been used by Replay. + * + * Version 13 (in JE 7.1) + * ---------------------- + * + * Added dtvlsn field to LOG_TXN_END entry to support efficient persistent + * tracking of the DTVLN (Durable Transaction VLSN). + * + * Version 14 (in JE 7.3) + * ---------------------- + * + * Added LOG_RESTORE_REQUIRED to indicate that the environment's log is no + * longer consistent, and some curative action must happen before the + * environment can be recovered. + * + * Version 15 (in JE 7.5) + * ---------------------- + * + * Fixed a bug in mutation of a BIN-delta to a full BIN where the + * identifierKey was not set correctly. The identifierKey can only be + * checked by the BtreeVerifier when the log was initially created using + * log version 15 or greater. Added new field DbTree.initialLogVersion + * to support this. + * + * The GlobalCBVLSN is no longer updated in the rep group DB when all + * nodes in a rep group have been updated to 7.5 or later. The network + * restore protocol no longer relies on the GlobalCBVLSN. + * + * The _jeReservedFilesDb internal DB (DbType.RESERVED_FILES) was added. + * Uses a new LN log entry type: LOG_RESERVED_FILE_LN. Contains metadata + * used to manage reserved files. + */ + public static final int LOG_VERSION = 15; + + /** + * The latest log version for which the replicated log format of any + * replicable log entry class changed. Replication uses this value to + * determine if the latest version of the replication stream can be + * understood by an earlier software version. This field is needed to + * account for cases where log entry format changes only apply to + * non-replicable entries, or only to the local, not replicated, form of + * replicable entries, the as was the case for log versions 9, 10, and 11. + */ + public static final int LOG_VERSION_HIGHEST_REPLICABLE = 13; + + public static final int FIRST_LOG_VERSION = 1; + + /** + * The earliest log version for which replicable log entries support + * writing themselves in older versions, to support replication to + * older nodes during upgrades. + */ + public static final int LOG_VERSION_REPLICATE_OLDER = 9; + + /* + * The log version that added expiration info to LNs and BIN slots for JE + * 7.0. + */ + public static final int LOG_VERSION_EXPIRE_INFO = 12; + + /* + * The log version that introduced the dtvlsn field in commit log entries. + */ + public static final int LOG_VERSION_DURABLE_VLSN = 13; + + /** + * Should be used for reading the entry header of the file header, since + * the actual version is not known until the FileHeader item is read. + */ + public static final int UNKNOWN_FILE_HEADER_VERSION = -1; + + /* + * Collection of log entry type classes, used to read the log. Note that + * this must be declared before any instances of LogEntryType, since the + * constructor uses this map. Each statically defined LogEntryType should + * register itself with this collection. + */ + private static final int MAX_TYPE_NUM = 40; + private static LogEntryType[] LOG_TYPES = new LogEntryType[MAX_TYPE_NUM]; + + /* + * Enumeration of log entry types. The log entry type represents the 2 byte + * field that starts every log entry. The top byte is the log type, the + * bottom byte holds the version value, provisional bit, replicated bit, + * and invisible bit. + * + * Log type(8 bits) + * Provisional(2 bits) Replicated(1 bit) Invisible(1 bit) Version(5 bits) + * + * The top byte (log type) identifies the type and can be used to lookup + * the LogEntryType object, while the bottom byte has information about the + * entry (instance) of this type. The bottom byte is effectively entry + * header information that is common to all types and is managed by methods + * in LogEntryHeader. See LogEntryHeader.java + */ + + /* Node types */ + + /* + * Deprecated transactional LN entry type, use LOG_DEL_LN_TRANSACTIONAL, + * LOG_INS_LN_TRANSACTIONAL, LOG_UPD_LN_TRANSACTIONAL instead according to + * the operation type. + */ + public static final LogEntryType LOG_OLD_LN_TRANSACTIONAL = + createReplicableLogEntryType( + (byte) 1, "LN_TX", + LNLogEntry.create(com.sleepycat.je.tree.LN.class), + Txnal.TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_USER); + + /* + * Deprecated LN entry type, use LOG_DEL_LN, LOG_INS_LN, LOG_UPD_LN instead + * according to the operation type. + */ + public static final LogEntryType LOG_OLD_LN = + createReplicableLogEntryType( + (byte) 2, "LN", + LNLogEntry.create(com.sleepycat.je.tree.LN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_USER); + + /* + * Never used + */ + public static final LogEntryType LOG_MAPLN_TRANSACTIONAL = + new LogEntryType + ((byte) 3, "MapLN_TX", + LNLogEntry.create(com.sleepycat.je.tree.MapLN.class), + Txnal.TXNAL, + Marshall.INSIDE_LATCH, /* Logging changes DB utilization. */ + NodeType.LN_INTERNAL); + + public static final LogEntryType LOG_MAPLN = + new LogEntryType + ((byte) 4, "MapLN", + LNLogEntry.create(com.sleepycat.je.tree.MapLN.class), + Txnal.NON_TXNAL, + Marshall.INSIDE_LATCH, /* Logging changes DB utilization. */ + NodeType.LN_INTERNAL); + + public static final LogEntryType LOG_NAMELN_TRANSACTIONAL = + createReplicableLogEntryType( + (byte) 5, "NameLN_TX", + new NameLNLogEntry(), + Txnal.TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_INTERNAL); + + public static final LogEntryType LOG_NAMELN = + createReplicableLogEntryType( + (byte) 6, "NameLN", + new NameLNLogEntry(), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_INTERNAL); + + /* Obsolete in version 8, only used by some log readers. */ + public static final LogEntryType LOG_DEL_DUPLN_TRANSACTIONAL = + createReplicableLogEntryType( + (byte) 7, "DelDupLN_TX", + new DeletedDupLNLogEntry(), + Txnal.TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_USER); + + /* Obsolete in version 8, only used by some log readers. */ + public static final LogEntryType LOG_DEL_DUPLN = + createReplicableLogEntryType( + (byte) 8, "DelDupLN", + new DeletedDupLNLogEntry(), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.LN_USER); + + /* Obsolete in version 8, only used by DupConvert and some log readers. */ + public static final LogEntryType LOG_DUPCOUNTLN_TRANSACTIONAL = + new LogEntryType + ((byte) 9, "DupCountLN_TX", + LNLogEntry.create(com.sleepycat.je.tree.dupConvert.DupCountLN.class), + Txnal.TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.OLD_DUP); + + /* Obsolete in version 8, only used by DupConvert and some log readers. */ + public static final LogEntryType LOG_DUPCOUNTLN = + new LogEntryType + ((byte) 10, "DupCountLN", + LNLogEntry.create(com.sleepycat.je.tree.dupConvert.DupCountLN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.OLD_DUP); + + public static final LogEntryType LOG_FILESUMMARYLN = + new LogEntryType + ((byte) 11, "FileSummaryLN", + LNLogEntry.create(com.sleepycat.je.tree.FileSummaryLN.class), + Txnal.NON_TXNAL, + Marshall.INSIDE_LATCH, /* Logging changes file utilization. */ + NodeType.LN_INTERNAL); + + public static final LogEntryType LOG_IN = + new LogEntryType + ((byte) 12, "IN", + INLogEntry.create(com.sleepycat.je.tree.IN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.IN); + + public static final LogEntryType LOG_BIN = + new LogEntryType + ((byte) 13, "BIN", + INLogEntry.create(com.sleepycat.je.tree.BIN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.IN); + + /* Obsolete in version 8, only used by DupConvert and some log readers. */ + public static final LogEntryType LOG_DIN = + new LogEntryType + ((byte) 14, "DIN", + INLogEntry.create(com.sleepycat.je.tree.dupConvert.DIN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.OLD_DUP); + + /* Obsolete in version 8, only used by DupConvert and some log readers. */ + public static final LogEntryType LOG_DBIN = + new LogEntryType + ((byte) 15, "DBIN", + INLogEntry.create(com.sleepycat.je.tree.dupConvert.DBIN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.OLD_DUP); + + /* + * The root entry of the DbTree, it saves the root information for name + * and id database. + */ + public static final LogEntryType LOG_DBTREE = + new LogEntryType + ((byte) 16, "DbTree", + SingleItemEntry.create( + com.sleepycat.je.dbi.DbTree.class), + Txnal.NON_TXNAL, + Marshall.INSIDE_LATCH, /* Logging changes DB utilization. */ + NodeType.NONE); + + /* Transactional entries */ + public static final LogEntryType LOG_TXN_COMMIT = + createReplicableLogEntryType( + (byte) 17, "Commit", + new CommitLogEntry(), + Txnal.TXNAL, + Marshall.INSIDE_LATCH, /* To ensure DTVLSN is in sync with VLSN */ + Replicable.REPLICABLE_MATCH, + NodeType.NONE); + + public static final LogEntryType LOG_TXN_ABORT = + createReplicableLogEntryType( + (byte) 18, "Abort", + new AbortLogEntry(), + Txnal.TXNAL, + Marshall.INSIDE_LATCH, /* To ensure DTVLSN is in sync with VLSN */ + Replicable.REPLICABLE_MATCH, + NodeType.NONE); + + public static final LogEntryType LOG_CKPT_START = + new LogEntryType + ((byte) 19, "CkptStart", + SingleItemEntry.create( + com.sleepycat.je.recovery.CheckpointStart.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_CKPT_END = + new LogEntryType + ((byte) 20, "CkptEnd", + SingleItemEntry.create( + com.sleepycat.je.recovery.CheckpointEnd.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + /* Obsolete in version 8, only used by some log readers. */ + public static final LogEntryType LOG_IN_DELETE_INFO = + new LogEntryType + ((byte) 21, "INDelete", + SingleItemEntry.create( + com.sleepycat.je.tree.dupConvert.INDeleteInfo.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + /* Obsolete in version 9, replaced by "live" LOG_BIN_DELTA. */ + public static final LogEntryType LOG_OLD_BIN_DELTA = + new LogEntryType + ((byte) 22, "BINDelta", + new OldBINDeltaLogEntry + (com.sleepycat.je.tree.OldBINDelta.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + /* Obsolete in version 8, only used by some log readers. */ + public static final LogEntryType LOG_OLD_DUP_BIN_DELTA = + new LogEntryType + ((byte) 23, "DupBINDelta", + new OldBINDeltaLogEntry + (com.sleepycat.je.tree.OldBINDelta.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + /* Although this is replicable, it is never replicated except in tests. */ + public static final LogEntryType LOG_TRACE = + createReplicableLogEntryType( + (byte) 24, "Trace", + new TraceLogEntry(), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, + NodeType.NONE); + + /* File header */ + public static final LogEntryType LOG_FILE_HEADER = + new LogEntryType + ((byte) 25, "FileHeader", + new FileHeaderEntry + (com.sleepycat.je.log.FileHeader.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + /* Obsolete in version 8, only used by some log readers. */ + public static final LogEntryType LOG_IN_DUPDELETE_INFO = + new LogEntryType + ((byte) 26, "INDupDelete", + SingleItemEntry.create( + com.sleepycat.je.tree.dupConvert.INDupDeleteInfo.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_TXN_PREPARE = + new LogEntryType + ((byte) 27, "Prepare", + SingleItemEntry.create( + com.sleepycat.je.txn.TxnPrepare.class), + Txnal.TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_ROLLBACK_START = + new LogEntryType + ((byte) 28, "RollbackStart", + SingleItemEntry.create( + com.sleepycat.je.txn.RollbackStart.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_ROLLBACK_END = + new LogEntryType + ((byte) 29, "RollbackEnd", + SingleItemEntry.create( + com.sleepycat.je.txn.RollbackEnd.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_MATCHPOINT = + createReplicableLogEntryType( + (byte) 30, "Matchpoint", + new MatchpointLogEntry(), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_MATCH, + NodeType.NONE); + + public static final LogEntryType LOG_DEL_LN_TRANSACTIONAL = + new UserLNLogEntryType((byte) 31, "DEL_LN_TX", Txnal.TXNAL); + + public static final LogEntryType LOG_DEL_LN = + new UserLNLogEntryType((byte) 32, "DEL_LN", Txnal.NON_TXNAL); + + public static final LogEntryType LOG_INS_LN_TRANSACTIONAL = + new UserLNLogEntryType((byte) 33, "INS_LN_TX", Txnal.TXNAL); + + public static final LogEntryType LOG_INS_LN = + new UserLNLogEntryType((byte) 34, "INS_LN", Txnal.NON_TXNAL); + + public static final LogEntryType LOG_UPD_LN_TRANSACTIONAL = + new UserLNLogEntryType((byte) 35, "UPD_LN_TX", Txnal.TXNAL); + + public static final LogEntryType LOG_UPD_LN = + new UserLNLogEntryType((byte) 36, "UPD_LN", Txnal.NON_TXNAL); + + public static final LogEntryType LOG_BIN_DELTA = + new LogEntryType( + (byte) 37, "NewBINDelta", + new BINDeltaLogEntry(com.sleepycat.je.tree.BIN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.IN); + + public static final LogEntryType LOG_IMMUTABLE_FILE = + new LogEntryType( + (byte) 38, "ImmutableFile", + SingleItemEntry.create( + com.sleepycat.je.log.entry.EmptyLogEntry.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_RESTORE_REQUIRED = + new LogEntryType( + (byte) 39, "RestoreRequired", + SingleItemEntry.create( + com.sleepycat.je.log.entry.RestoreRequired.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.NONE); + + public static final LogEntryType LOG_RESERVED_FILE_LN = + new LogEntryType + ((byte) 40, "ReservedFileLN", + LNLogEntry.create(com.sleepycat.je.tree.LN.class), + Txnal.NON_TXNAL, + Marshall.OUTSIDE_LATCH, + NodeType.LN_INTERNAL); + + /*** If you add new types, be sure to update MAX_TYPE_NUM at the top.***/ + + /* Persistent fields */ + private final byte typeNum; // persistent value for this entry type + + /* Transient fields */ + private final String displayName; + private final LogEntry logEntry; + + /* + * Attributes + */ + + /* Whether the log entry holds a transactional information. */ + private Txnal isTransactional; + + /* + * Does this log entry be marshalled outside or inside the log write + * latch. + */ + private Marshall marshallBehavior; + + /* Can this log entry be put in the replication stream? */ + private Replicable replicationPossible; + + private NodeType nodeType; + + /* + * Constructors + */ + + /** + * For base class support. + */ + + /* + * This constructor only used when the LogEntryType is being used as a key + * for a map. No log types can be defined outside this package. + */ + LogEntryType(byte typeNum) { + this.typeNum = typeNum; + displayName = null; + logEntry = null; + } + + /** + * Used to create a map key for reporting that is not a real type. + */ + LogEntryType(byte typeNum, String displayName) { + assert typeNum > MAX_TYPE_NUM; + this.typeNum = typeNum; + this.displayName = displayName; + logEntry = null; + } + + /** + * Create a non-replicable log type. + * + * @param isTransactional whether this type of log entry holds data + * involved in a transaction. For example, transaction commit and LN data + * records are transactional, but INs are not. + * @param marshallBehavior whether this type of log entry may be serialized + * outside the log write latch. This is true of the majority of + * types. Certain types like the FileSummaryLN rely on the log write latch + * to enforce serial semantics. + */ + private LogEntryType(final byte typeNum, + final String displayName, + final LogEntry logEntry, + final Txnal isTransactional, + final Marshall marshallBehavior, + final NodeType nodeType) { + + this(typeNum, displayName, logEntry, isTransactional, marshallBehavior, + Replicable.LOCAL, nodeType); + } + + /** + * Create a replicable log type. + * + * @param isTransactional whether this type of log entry holds data + * involved in a transaction + * @param marshallBehavior whether this type of log entry may be serialized + * outside the log write latch + * @param replicationPossible whether this type of log entry can be shared + * with a replication group + */ + private static LogEntryType createReplicableLogEntryType( + final byte typeNum, + final String displayName, + final ReplicableLogEntry logEntry, + final Txnal isTransactional, + final Marshall marshallBehavior, + final Replicable replicationPossible, + final NodeType nodeType) { + + return new LogEntryType(typeNum, displayName, logEntry, + isTransactional, marshallBehavior, + replicationPossible, nodeType); + } + + /** + * Internal constructor for all log types. Don't create instances using + * this directly, to improve error checking. + */ + private LogEntryType(final byte typeNum, + final String displayName, + final LogEntry logEntry, + final Txnal isTransactional, + final Marshall marshallBehavior, + final Replicable replicationPossible, + final NodeType nodeType) { + + this.typeNum = typeNum; + this.displayName = displayName; + this.logEntry = logEntry; + this.isTransactional = isTransactional; + this.marshallBehavior = marshallBehavior; + this.replicationPossible = replicationPossible; + this.nodeType = nodeType; + logEntry.setLogType(this); + LOG_TYPES[typeNum - 1] = this; + + assert logEntry != null && replicationPossible != null; + assert !replicationPossible.isReplicable() || + logEntry instanceof ReplicableLogEntry + : "Replicable log types must have replicable log entries"; + } + + /** + * @return the static version of this type + */ + public static LogEntryType findType(byte typeNum) { + if (typeNum <= 0 || typeNum > MAX_TYPE_NUM) { + return null; + } + return LOG_TYPES[typeNum - 1]; + } + + /** + * Get a copy of all types for unit testing. + */ + public static Set getAllTypes() { + HashSet ret = new HashSet(); + + for (int i = 0; i < MAX_TYPE_NUM; i++) { + ret.add(LOG_TYPES[i]); + } + return ret; + } + + /** + * @return the log entry type owned by the shared, static version + */ + public LogEntry getSharedLogEntry() { + return logEntry; + } + + /** + * @return a clone of the log entry type for a given log type. + */ + public LogEntry getNewLogEntry() + throws DatabaseException { + + return logEntry.clone(); + } + + public byte getTypeNum() { + return typeNum; + } + + /** + * @return true if type number is valid. + */ + static boolean isValidType(byte typeNum) { + return typeNum > 0 && typeNum <= MAX_TYPE_NUM; + } + + public String toStringNoVersion() { + return displayName; + } + + @Override + public String toString() { + return displayName; + } + + /** + * Check for equality without making a new object. + */ + public boolean equalsType(byte type) { + return (this.typeNum == type); + } + + /* + * Override Object.equals. Ignore provisional bit when checking for + * equality. + */ + @Override + public boolean equals(Object obj) { + /* Same instance? */ + if (this == obj) { + return true; + } + + /* Is it the right type of object? */ + if (!(obj instanceof LogEntryType)) { + return false; + } + + return typeNum == ((LogEntryType) obj).typeNum; + } + + /** + * This is used as a hash key. + */ + @Override + public int hashCode() { + return typeNum; + } + + static enum Txnal { + TXNAL(true), + NON_TXNAL(false); + + private final boolean isTxnal; + + Txnal(boolean isTxnal) { + this.isTxnal = isTxnal; + } + + boolean isTransactional() { + return isTxnal; + } + } + + /** + * Return true if this log entry has transactional information in it, + * like a commit or abort record, or a transactional LN. + */ + public boolean isTransactional() { + return isTransactional.isTransactional(); + } + + static enum Marshall { + OUTSIDE_LATCH(true), + INSIDE_LATCH(false); + + private final boolean marshallOutsideLatch; + + Marshall(boolean marshallOutsideLatch) { + this.marshallOutsideLatch = marshallOutsideLatch; + } + + boolean marshallOutsideLatch() { + return marshallOutsideLatch; + } + } + + /** + * Return true if this log entry should be marshalled into a buffer outside + * the log write latch. Currently, the FileSummaryLN and MapLN (which + * contains DbFileSummary objects) and the commit and abort log entries + * (due to their DTVLSN fields) need to be logged inside the log write + * latch. + */ + public boolean marshallOutsideLatch() { + return marshallBehavior.marshallOutsideLatch(); + } + + /** + * Return true if the type of this LogEntryType is equivalent to typeB. + * Version is used as a factor in the comparison when new log entry types + * are introduced in one release, which supercede existing types. + */ + public boolean compareTypeAndVersion(int versionA, + int versionB, + final byte typeB) { + return typeNum == typeB; + } + + /* + * Indicates whether this type of log entry is shared in a replicated + * environment or not, and whether it can be used as a replication + * matchpoint. + */ + static enum Replicable { + REPLICABLE_MATCH(true, true), + REPLICABLE_NO_MATCH(true, false), + LOCAL(false, false); + + private final boolean isReplicable; + private final boolean isMatchable; + + Replicable(boolean isReplicable, boolean isMatchable) { + this.isReplicable = isReplicable; + this.isMatchable = isMatchable; + } + + boolean isReplicable() { + return isReplicable; + } + + boolean isMatchable() { + return isMatchable; + } + } + + /** + * Return true if this type of log entry can be part of the replication + * stream. For example, INs can never be replicated, while LNs are + * replicated only if their owning database is replicated. + */ + public boolean isReplicationPossible() { + return replicationPossible.isReplicable(); + } + + /** + * Return true if this type of log entry can serve as the synchronization + * matchpoint for the replication stream. That generally means that this + * log entry contains an replication node ID. + */ + public boolean isSyncPoint() { + return replicationPossible.isMatchable(); + } + + /** + * Return true if this type of log entry can serve as the synchronization + * matchpoint for the replication stream. + */ + public static boolean isSyncPoint(byte entryType) { + return findType(entryType).isSyncPoint(); + } + + /* Type of Btree node. */ + static enum NodeType { + + /* Not a Btree node. */ + NONE, + + /* Internal node. Does not include old-format DIN/DBIN. */ + IN, + + /* DIN/DBIN/DupCountLN in old-format duplicates database. */ + OLD_DUP, + + /* LNs representing records in internal databases. */ + LN_INTERNAL, + + /* LNs representing ordinary user records. */ + LN_USER; + } + + public boolean isNodeType() { + return nodeType != NodeType.NONE; + } + + public boolean isUserLNType() { + return nodeType == NodeType.LN_USER; + } + + public boolean isLNType() { + return nodeType == NodeType.LN_INTERNAL || isUserLNType(); + } + + public boolean isINType() { + return nodeType == NodeType.IN; + } + + public boolean isOldDupType() { + return nodeType == NodeType.OLD_DUP; + } + + /** + * Return true if the two types are equal. Handles the situation where new + * log entry types were introduced in one release, that are actually + * equivalent to old, deprecated types. + */ + public static boolean compareTypeAndVersion(int versionA, + byte typeA, + int versionB, + byte typeB) { + LogEntryType entryA = findType(typeA); + return entryA.compareTypeAndVersion(versionA, versionB, typeB); + } + + private static class UserLNLogEntryType extends LogEntryType { + public UserLNLogEntryType(byte typeNum, + String displayName, + Txnal txnal) { + super(typeNum, displayName, + LNLogEntry.create(com.sleepycat.je.tree.LN.class), + txnal, Marshall.OUTSIDE_LATCH, + Replicable.REPLICABLE_NO_MATCH, NodeType.LN_USER); + } + + @Override + public boolean compareTypeAndVersion(int versionA, + int versionB, + byte typeB) { + /* If the other entry is newer, the types should match. */ + if (versionA <= versionB && getTypeNum() == typeB) { + return true; + } + + /* + * If the other entry is older, the type might be an old, + * deprecated, equivalent type. + */ + if ((versionA > versionB) && isEquivalentOldType(typeB)) { + return true; + } + + /* + * In this case, the other entry's version is older or newer but it + * doesn't matter -- it's completely the wrong type. + */ + return false; + } + + private boolean isEquivalentOldType(byte typeB) { + if ((!isTransactional() && (typeB == LOG_OLD_LN.getTypeNum())) || + (isTransactional() && + (typeB == LOG_OLD_LN_TRANSACTIONAL.getTypeNum()))) { + return true; + } + + return false; + } + } +} diff --git a/src/com/sleepycat/je/log/LogFlusher.java b/src/com/sleepycat/je/log/LogFlusher.java new file mode 100644 index 0000000..b6f8cc5 --- /dev/null +++ b/src/com/sleepycat/je/log/LogFlusher.java @@ -0,0 +1,247 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.config.EnvironmentParams.LOG_FLUSH_NO_SYNC_INTERVAL; +import static com.sleepycat.je.config.EnvironmentParams.LOG_FLUSH_SYNC_INTERVAL; +import static com.sleepycat.je.config.EnvironmentParams.OLD_REP_LOG_FLUSH_TASK_INTERVAL; +import static com.sleepycat.je.config.EnvironmentParams.OLD_REP_RUN_LOG_FLUSH_TASK; + +import java.util.Timer; +import java.util.TimerTask; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.StoppableThread; + +/** + * Flush the log buffers (and write queue) periodically to disk and to the file + * system, as specified by + * {@link com.sleepycat.je.EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL} and + * {@link com.sleepycat.je.EnvironmentConfig#LOG_FLUSH_NO_SYNC_INTERVAL}. + * + * Currently flushing occurs if any transactions were committed during the + * interval. In the future we may want to flush if there were no writes or + * fynscs in the interval, to allow specifying an even smaller interval for + * NO_SYNC flushing. This would mean that the wakeup interval should be the + * config interval divided by 2. + */ +public class LogFlusher { + private final EnvironmentImpl envImpl; + private final Timer timer; + private int flushSyncInterval; + private int flushNoSyncInterval; + private FlushTask flushSyncTask; + private FlushTask flushNoSyncTask; + + private boolean shutdownRequest = false; + + public LogFlusher(EnvironmentImpl envImpl) { + + this.envImpl = envImpl; + + this.timer = new Timer( + envImpl.makeDaemonThreadName(Environment.LOG_FLUSHER_NAME), + true /*isDaemon*/); + } + + /** + * Applies the new configuration, then cancels and reschedules the flush + * tasks as needed. + * + * @throws IllegalArgumentException if an illegal combination of old and + * new flush params were specified. + */ + public void configFlushTask(DbConfigManager configMgr) { + + if (!updateConfig(configMgr)) { + return; + } + + synchronized (this) { + if (!shutdownRequest) { + cancel(); + + if (flushSyncInterval > 0) { + flushSyncTask = new FlushTask(envImpl, true /*fsync*/); + + timer.schedule( + flushSyncTask, flushSyncInterval, flushSyncInterval); + } + + if (flushNoSyncInterval > 0) { + flushNoSyncTask = new FlushTask(envImpl, false /*fsync*/); + + timer.schedule( + flushNoSyncTask, flushNoSyncInterval, + flushNoSyncInterval); + } + } + } + } + + private void cancel() { + if (flushSyncTask != null) { + flushSyncTask.cancel(); + flushSyncTask = null; + } + if (flushNoSyncTask != null) { + flushNoSyncTask.cancel(); + flushNoSyncTask = null; + } + } + + public void requestShutdown() { + shutdown(); + } + + public void shutdown() { + synchronized (this) { + shutdownRequest = true; + cancel(); + timer.cancel(); + } + } + + /** + * Applies the new configuration and returns whether it changed. + * + * @throws IllegalArgumentException if an illegal combination of old and + * new flush params were specified. + */ + private boolean updateConfig(DbConfigManager configMgr) { + + int newSyncInternal; + int newNoSyncInterval; + + /* + * If specified and set to false (which is not the default), the + * deprecated OLD_REP_RUN_LOG_FLUSH_TASK overrides other settings. + */ + if (configMgr.isSpecified(OLD_REP_RUN_LOG_FLUSH_TASK) && + !configMgr.getBoolean(OLD_REP_RUN_LOG_FLUSH_TASK)) { + + if (configMgr.isSpecified(LOG_FLUSH_SYNC_INTERVAL) || + configMgr.isSpecified(LOG_FLUSH_NO_SYNC_INTERVAL)) { + + throw new IllegalArgumentException( + "When " + OLD_REP_RUN_LOG_FLUSH_TASK.getName() + + " is set to false, " + LOG_FLUSH_SYNC_INTERVAL + + " and " + LOG_FLUSH_NO_SYNC_INTERVAL + + " must not be specified."); + } + + newSyncInternal = 0; + newNoSyncInterval = 0; + + } else { + + /* + * If specified, the deprecated OLD_REP_LOG_FLUSH_TASK_INTERVAL + * overrides LOG_FLUSH_SYNC_INTERVAL. + */ + if (configMgr.isSpecified(OLD_REP_LOG_FLUSH_TASK_INTERVAL)) { + + if (configMgr.isSpecified(LOG_FLUSH_SYNC_INTERVAL)) { + + throw new IllegalArgumentException( + "Both " + OLD_REP_LOG_FLUSH_TASK_INTERVAL.getName() + + " and " + LOG_FLUSH_SYNC_INTERVAL + + " must not be specified."); + } + + newSyncInternal = + configMgr.getDuration(OLD_REP_LOG_FLUSH_TASK_INTERVAL); + } else { + newSyncInternal = + configMgr.getDuration(LOG_FLUSH_SYNC_INTERVAL); + } + + newNoSyncInterval = + configMgr.getDuration(LOG_FLUSH_NO_SYNC_INTERVAL); + } + + if (newSyncInternal == flushSyncInterval && + newNoSyncInterval == flushNoSyncInterval) { + return false; + } + + flushSyncInterval = newSyncInternal; + flushNoSyncInterval = newNoSyncInterval; + return true; + } + + int getFlushSyncInterval() { + return flushSyncInterval; + } + + int getFlushNoSyncInterval() { + return flushNoSyncInterval; + } + + FlushTask getFlushSyncTask() { + return flushSyncTask; + } + + FlushTask getFlushNoSyncTask() { + return flushNoSyncTask; + } + + static class FlushTask extends TimerTask { + private final EnvironmentImpl envImpl; + private final boolean fsync; + private long lastNCommits; + private volatile int flushCount; + + FlushTask(EnvironmentImpl envImpl, boolean fsync) { + this.envImpl = envImpl; + this.fsync = fsync; + this.lastNCommits = envImpl.getTxnManager().getNTotalCommits(); + } + + int getFlushCount() { + return flushCount; + } + + @Override + public void run() { + try { + final long newNCommits = + envImpl.getTxnManager().getNTotalCommits(); + + /* Do nothing if there have been no new commits. */ + if (newNCommits <= lastNCommits) { + return; + } + + if (fsync) { + envImpl.getLogManager().flushSync(); + } else { + envImpl.getLogManager().flushNoSync(); + } + + lastNCommits = newNCommits; + flushCount++; + + } catch (Throwable e) { + if (envImpl.isValid()) { + StoppableThread.handleUncaughtException( + envImpl.getLogger(), envImpl, Thread.currentThread(), + e); + } + } + } + } +} diff --git a/src/com/sleepycat/je/log/LogItem.java b/src/com/sleepycat/je/log/LogItem.java new file mode 100644 index 0000000..3581389 --- /dev/null +++ b/src/com/sleepycat/je/log/LogItem.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.entry.ReplicableLogEntry; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Values returned when a item is logged. + * + * This class is used as a simple struct for returning multiple values, and + * does not need getters and setters. + */ +public class LogItem { + + /** + * LSN of the new log entry. Is NULL_LSN if a BIN-delta is logged. If + * not NULL_LSN for a tree node, is typically used to update the slot in + * the parent IN. + */ + public long lsn = DbLsn.NULL_LSN; + + /** + * Size of the new log entry. Is used to update the LN slot in the BIN. + */ + public int size = 0; + + /** + * The header of the new log entry. Used by HA to do VLSN tracking and + * implement a tip cache. + */ + public LogEntryHeader header = null; + + /** + * The bytes of new log entry. Used by HA to implement a tip cache. + */ + public ByteBuffer buffer = null; + + /** + * Used for saving the materialized form of the buffer in LogItemCache. + */ + public volatile ReplicableLogEntry cachedEntry = null; +} diff --git a/src/com/sleepycat/je/log/LogManager.java b/src/com/sleepycat/je/log/LogManager.java new file mode 100644 index 0000000..824ff7b --- /dev/null +++ b/src/com/sleepycat/je/log/LogManager.java @@ -0,0 +1,1548 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.log.LogStatDefinition.GROUP_DESC; +import static com.sleepycat.je.log.LogStatDefinition.GROUP_NAME; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_END_OF_LOG; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_REPEAT_FAULT_READS; +import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_TEMP_BUFFER_WRITES; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.cleaner.DbFileSummary; +import com.sleepycat.je.cleaner.ExpirationTracker; +import com.sleepycat.je.cleaner.LocalUtilizationTracker; +import com.sleepycat.je.cleaner.TrackedFileSummary; +import com.sleepycat.je.cleaner.UtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.util.verify.VerifierUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LSNStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * The LogManager supports reading and writing to the JE log. + * The writing of data to the log is serialized via the logWriteMutex. + * Typically space is allocated under the LWL. The client computes + * the checksum and copies the data into the log buffer (not holding + * the LWL). + */ +public class LogManager { + + /* No-op loggable object. */ + private static final String DEBUG_NAME = LogManager.class.getName(); + + private final LogBufferPool logBufferPool; // log buffers + private final Object logWriteMutex; // synchronizes log writes + private final boolean doChecksumOnRead; // if true, do checksum on read + private final FileManager fileManager; // access to files + private final FSyncManager grpManager; + private final EnvironmentImpl envImpl; + private final boolean readOnly; + + /* How many bytes to read when faulting in. */ + private final int readBufferSize; + + /* The last LSN in the log during recovery. */ + private long lastLsnAtRecovery = DbLsn.NULL_LSN; + + /* Stats */ + private final StatGroup stats; + + /* + * Number of times we have to repeat a read when we fault in an object + * because the initial read was too small. + */ + private final LongStat nRepeatFaultReads; + + /* + * Number of times we have to use the temporary marshalling buffer to + * write to the log. + */ + private final LongStat nTempBufferWrites; + + /* The location of the next entry to be written to the log. */ + private final LSNStat endOfLog; + + /* + * Used to determine if we switched log buffers. For + * NOSYNC durability, if we switched log buffers, + * the thread will write the previous dirty buffers. + */ + private LogBuffer prevLogBuffer = null; + + /* For unit tests */ + private TestHook readHook; // used for generating exceptions on log reads + + /* For unit tests. */ + private TestHook delayVLSNRegisterHook; + private TestHook flushHook; + + /* A queue to hold log entries which are to be logged lazily. */ + private final Queue lazyLogQueue = + new ConcurrentLinkedQueue(); + + /* + * Used for tracking the current file. Is null if no tracking should occur. + * Read/write of this field is protected by the LWL, but the tracking + * actually occurs outside the LWL. + */ + private ExpirationTracker expirationTracker = null; + + /* + * An entry in the lazyLogQueue. A struct to hold the entry and repContext. + */ + private static class LazyQueueEntry { + private final LogEntry entry; + private final ReplicationContext repContext; + + private LazyQueueEntry(LogEntry entry, ReplicationContext repContext) { + this.entry = entry; + this.repContext = repContext; + } + } + + /** + * There is a single log manager per database environment. + */ + public LogManager(EnvironmentImpl envImpl, + boolean readOnly) + throws DatabaseException { + + /* Set up log buffers. */ + this.envImpl = envImpl; + this.fileManager = envImpl.getFileManager(); + this.grpManager = new FSyncManager(this.envImpl); + DbConfigManager configManager = envImpl.getConfigManager(); + this.readOnly = readOnly; + logBufferPool = new LogBufferPool(fileManager, envImpl); + + /* See if we're configured to do a checksum when reading in objects. */ + doChecksumOnRead = + configManager.getBoolean(EnvironmentParams.LOG_CHECKSUM_READ); + + logWriteMutex = new Object(); + readBufferSize = + configManager.getInt(EnvironmentParams.LOG_FAULT_READ_SIZE); + + /* Do the stats definitions. */ + stats = new StatGroup(GROUP_NAME, GROUP_DESC); + nRepeatFaultReads = new LongStat(stats, LOGMGR_REPEAT_FAULT_READS); + nTempBufferWrites = new LongStat(stats, LOGMGR_TEMP_BUFFER_WRITES); + endOfLog = new LSNStat(stats, LOGMGR_END_OF_LOG); + } + + public boolean getChecksumOnRead() { + return doChecksumOnRead; + } + + public long getLastLsnAtRecovery() { + return lastLsnAtRecovery; + } + + public void setLastLsnAtRecovery(long lastLsnAtRecovery) { + this.lastLsnAtRecovery = lastLsnAtRecovery; + } + + /** + * Called at the end of recovery to begin expiration tracking using the + * given tracker. During recovery we are single threaded, so we can set + * the field without taking the LWL. + */ + public void initExpirationTracker(final ExpirationTracker tracker) { + expirationTracker = tracker; + } + + /** + * Reset the pool when the cache is resized. This method is called after + * the memory budget has been calculated. + */ + public void resetPool(DbConfigManager configManager) + throws DatabaseException { + synchronized (logWriteMutex) { + logBufferPool.reset(configManager); + } + } + + /* + * Writing to the log + */ + + /** + * Log this single object and force a write of the log files. + * @param entry object to be logged + * @param fsyncRequired if true, log files should also be fsynced. + * @return LSN of the new log entry + */ + public long logForceFlush(LogEntry entry, + boolean fsyncRequired, + ReplicationContext repContext) + throws DatabaseException { + + return log(entry, + Provisional.NO, + true, // flush required + fsyncRequired, + false, // forceNewLogFile + repContext); // repContext + } + + /** + * Log this single object and force a flip of the log files. + * @param entry object to be logged + * @return LSN of the new log entry + */ + public long logForceFlip(LogEntry entry) + throws DatabaseException { + + return log(entry, + Provisional.NO, + true, // flush required + false, // fsync required + true, // forceNewLogFile + ReplicationContext.NO_REPLICATE); + } + + /** + * Write a log entry. + * @param entry object to be logged + * @return LSN of the new log entry + */ + public long log(LogEntry entry, ReplicationContext repContext) + throws DatabaseException { + + return log(entry, + Provisional.NO, + false, // flush required + false, // fsync required + false, // forceNewLogFile + repContext); + } + + /** + * Write a log entry lazily. + * @param entry object to be logged + */ + public void logLazily(LogEntry entry, ReplicationContext repContext) { + + lazyLogQueue.add(new LazyQueueEntry(entry, repContext)); + } + + /** + * Translates individual log params to LogItem and LogContext fields. + */ + private long log(final LogEntry entry, + final Provisional provisional, + final boolean flushRequired, + final boolean fsyncRequired, + final boolean forceNewLogFile, + final ReplicationContext repContext) + throws DatabaseException { + + final LogParams params = new LogParams(); + + params.entry = entry; + params.provisional = provisional; + params.repContext = repContext; + params.flushRequired = flushRequired; + params.fsyncRequired = fsyncRequired; + params.forceNewLogFile = forceNewLogFile; + + final LogItem item = log(params); + + return item.lsn; + } + + /** + * Log an item, first logging any items on the lazyLogQueue, and finally + * flushing and sync'ing (if requested). + */ + public LogItem log(LogParams params) + throws DatabaseException { + + final LogItem item = new LogItem(); + + /* + * In a read-only env we return NULL_LSN (the default value for + * LogItem.lsn) for all entries. We allow this to proceed, rather + * than throwing an exception, to support logging INs for splits that + * occur during recovery, for one reason. Logging LNs in a read-only + * env is not allowed, and this is checked in the LN class. + */ + if (readOnly) { + return item; + } + + try { + /* Flush any pending lazy entries. */ + for (LazyQueueEntry lqe = lazyLogQueue.poll(); + lqe != null; + lqe = lazyLogQueue.poll()) { + + LogParams lqeParams = new LogParams(); + lqeParams.entry = lqe.entry; + lqeParams.provisional = Provisional.NO; + lqeParams.repContext = lqe.repContext; + + logItem(new LogItem(), lqeParams); + } + + final LogEntry logEntry = params.entry; + + /* + * If possible, marshall this entry outside the log write latch to + * allow greater concurrency by shortening the write critical + * section. Note that the header may only be created during + * marshalling because it calls entry.getSize(). + */ + if (logEntry.getLogType().marshallOutsideLatch()) { + + item.header = new LogEntryHeader( + logEntry, params.provisional, params.repContext); + + item.buffer = marshallIntoBuffer(item.header, logEntry); + } + + logItem(item, params); + + if (params.fsyncRequired || params.flushRequired) { + + /* Flush log buffers and write queue, and optionally fsync. */ + grpManager.flushAndSync(params.fsyncRequired); + + } else if (params.switchedLogBuffer) { + /* + * The operation does not require writing to the log file, but + * since we switched log buffers, this thread will write the + * previously dirty log buffers (not this thread's log entry + * though). This is done for NOSYNC durability so those types + * of transactions won't fill all the log buffers thus forcing + * to have to write the buffers under the log write latch. + */ + logBufferPool.writeDirty(false /*flushWriteQueue*/); + } + + TestHookExecute.doHookIfSet(flushHook); + + /* + * We've logged this log entry from the replication stream. Let the + * Replicator know, so this node can create a VLSN->LSN mapping. Do + * this before the ckpt so we have a better chance of writing this + * mapping to disk. + */ + if (params.repContext.inReplicationStream()) { + + assert (item.header.getVLSN() != null) : + "Unexpected null vlsn: " + item.header + " " + + params.repContext; + + /* Block the VLSN registration, used by unit tests. */ + TestHookExecute.doHookIfSet(delayVLSNRegisterHook); + + envImpl.registerVLSN(item); + } + + } catch (EnvironmentFailureException e) { + + /* + * Final checks are below for unexpected exceptions during the + * critical write path. Most should be caught by + * serialLogInternal, but the catches here account for other + * exceptions above. Note that Errors must be caught here as well + * as Exceptions. [#21929] + * + * If we've already invalidated the environment, rethrow so as not + * to excessively wrap the exception. + */ + if (!envImpl.isValid()) { + throw e; + } + throw EnvironmentFailureException.unexpectedException(envImpl, e); + + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException(envImpl, e); + + } catch (Error e) { + envImpl.invalidate(e); + throw e; + } + + /* + * Periodically, as a function of how much data is written, ask the + * checkpointer or the cleaner to wake up. + */ + envImpl.getCheckpointer().wakeupAfterWrite(); + envImpl.getCleaner().wakeupAfterWrite(item.size); + + /* Update background writes. */ + if (params.backgroundIO) { + envImpl.updateBackgroundWrites( + item.size, logBufferPool.getLogBufferSize()); + } + + return item; + } + + private void logItem(final LogItem item, final LogParams params) + throws IOException, DatabaseException { + + final UtilizationTracker tracker = envImpl.getUtilizationTracker(); + + final LogWriteInfo lwi = serialLog(item, params, tracker); + + if (lwi != null) { + + /* + * Add checksum, prev offset, and VLSN to the entry. + * Copy data into the log buffer. + */ + item.buffer = item.header.addPostMarshallingInfo( + item.buffer, lwi.fileOffset, lwi.vlsn); + + lwi.lbs.put(item.buffer); + } + + /* Update obsolete info under the LWL */ + updateObsolete(params, tracker); + + /* Expiration tracking is protected by the Btree latch, not the LWL. */ + if (params.expirationTrackerToUse != null) { + params.expirationTrackerToUse.track(params.entry, item.size); + } + + /* Queue flushing of expiration tracker after a file flip. */ + if (params.expirationTrackerCompleted != null) { + envImpl.getExpirationProfile().addCompletedTracker( + params.expirationTrackerCompleted); + } + } + + /** + * This method handles exceptions to be certain that the Environment is + * invalidated when any exception occurs in the critical write path, and it + * checks for an invalid environment to be sure that no subsequent write is + * allowed. [#21929] + * + * Invalidation is necessary because a logging operation does not ensure + * that the internal state -- correspondence of LSN pointer, log buffer + * position and file position, and the integrity of the VLSN index [#20919] + * -- is maintained correctly when an exception occurs. Allowing a + * subsequent write can cause log corruption. + */ + private LogWriteInfo serialLog( + final LogItem item, + final LogParams params, + final UtilizationTracker tracker) + throws IOException { + + synchronized (logWriteMutex) { + + /* Do not attempt to write with an invalid environment. */ + envImpl.checkIfInvalid(); + + try { + return serialLogWork(item, params, tracker); + + } catch (EnvironmentFailureException e) { + /* + * If we've already invalidated the environment, rethrow so + * as not to excessively wrap the exception. + */ + if (!envImpl.isValid()) { + throw e; + } + + /* Otherwise, invalidate the environment. */ + throw EnvironmentFailureException.unexpectedException( + envImpl, e); + + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException( + envImpl, e); + + } catch (Error e) { + /* Errors must be caught here as well as Exceptions.[#21929] */ + envImpl.invalidate(e); + throw e; + } + } + } + + /** + * This method is used as part of writing data to the log. Called + * under the LogWriteLatch. + * Data is either written to the LogBuffer or allocates space in the + * LogBuffer. The LogWriteInfo object is used to save information about + * the space allocate in the LogBuffer. The caller uses the object to + * copy data into the underlying LogBuffer. A null value returned + * indicates that the item was written to the log. This occurs when the + * data item is too big to fit into an empty LogBuffer. + * + * @param params log params. + * @param tracker utilization. + * @return a LogWriteInfo object used to access allocated LogBuffer space. + * If null, the data was written to the log. + * @throws IOException + */ + private LogWriteInfo serialLogWork( + final LogItem item, + final LogParams params, + final UtilizationTracker tracker) + throws IOException { + + /* + * Do obsolete tracking before marshalling a FileSummaryLN into the + * log buffer so that a FileSummaryLN counts itself. + * countObsoleteNode must be called before computing the entry + * size, since it can change the size of a FileSummaryLN entry that + * we're logging + */ + final LogEntryType entryType = params.entry.getLogType(); + + if (!DbLsn.isTransientOrNull(params.oldLsn)) { + if (params.obsoleteDupsAllowed) { + tracker.countObsoleteNodeDupsAllowed( + params.oldLsn, entryType, params.oldSize, params.nodeDb); + } else { + tracker.countObsoleteNode( + params.oldLsn, entryType, params.oldSize, params.nodeDb); + } + } + + /* Count auxOldLsn for same database; no specified size. */ + if (!DbLsn.isTransientOrNull(params.auxOldLsn)) { + if (params.obsoleteDupsAllowed) { + tracker.countObsoleteNodeDupsAllowed( + params.auxOldLsn, entryType, 0, params.nodeDb); + } else { + tracker.countObsoleteNode( + params.auxOldLsn, entryType, 0, params.nodeDb); + } + } + + /* + * Compute the VLSNs and modify the DTVLSN in commit/abort entries + * before the entry is marshalled or its size is required. At that + * at this point we are committed to writing a log entry with the + * computed VLSN. + */ + final VLSN vlsn; + + if (params.repContext.getClientVLSN() != null || + params.repContext.mustGenerateVLSN()) { + + if (params.repContext.mustGenerateVLSN()) { + vlsn = envImpl.assignVLSNs(params.entry); + } else { + vlsn = params.repContext.getClientVLSN(); + } + } else { + vlsn = null; + } + + /* + * If an entry must be protected within the log write latch for + * marshalling, take care to also calculate its size in the + * protected section. Note that we have to get the size *before* + * marshalling so that the currentLsn and size are correct for + * utilization tracking. + */ + final boolean marshallOutsideLatch = (item.buffer != null); + final int entrySize; + + if (marshallOutsideLatch) { + entrySize = item.buffer.limit(); + assert item.header != null; + } else { + assert item.header == null; + item.header = new LogEntryHeader( + params.entry, params.provisional, params.repContext); + entrySize = item.header.getEntrySize(); + } + + /* + * Get the next free slot in the log, under the log write latch. + */ + if (params.forceNewLogFile) { + fileManager.forceNewLogFile(); + } + + final boolean flippedFile = fileManager.shouldFlipFile(entrySize); + final long currentLsn = fileManager.calculateNextLsn(flippedFile); + + /* + * TODO: Count file header, since it is not logged via LogManager. + * Some tests (e.g., INUtilizationTest) will need to be adjusted. + * + final int fileHeaderSize = FileManager.firstLogEntryOffset(); + if (DbLsn.getFileOffset(currentLsn) == fileHeaderSize) { + final long fileNum = DbLsn.getFileNumber(currentLsn); + + tracker.countNewLogEntry( + DbLsn.makeLsn(fileNum, 0), LogEntryType.LOG_FILE_HEADER, + fileHeaderSize, null); + } + */ + + /* + * countNewLogEntry and countObsoleteNodeInexact cannot change + * a FileSummaryLN size, so they are safe to call after + * getSizeForWrite. + */ + tracker.countNewLogEntry( + currentLsn, entryType, entrySize, params.nodeDb); + + /* + * LN deletions and dup DB LNs are obsolete immediately. Inexact + * counting is used to save resources because the cleaner knows + * that all such LNs are obsolete. + */ + if (params.entry.isImmediatelyObsolete(params.nodeDb)) { + tracker.countObsoleteNodeInexact( + currentLsn, entryType, entrySize, params.nodeDb); + } + + /* + * This entry must be marshalled within the log write latch. + */ + if (!marshallOutsideLatch) { + assert item.buffer == null; + item.buffer = marshallIntoBuffer(item.header, params.entry); + } + + /* Sanity check */ + if (entrySize != item.buffer.limit()) { + throw EnvironmentFailureException.unexpectedState( + "Logged entry entrySize= " + entrySize + + " but marshalledSize=" + item.buffer.limit() + + " type=" + entryType + " currentLsn=" + + DbLsn.getNoFormatString(currentLsn)); + } + + /* + * Ask for a log buffer suitable for holding this new entry. If + * entrySize is larger than the LogBuffer capacity, this will flush + * all dirty buffers and return the next empty (but too small) buffer. + * The returned buffer is not latched. + */ + final LogBuffer lastLogBuffer = + logBufferPool.getWriteBuffer(entrySize, flippedFile); + + /* + * Bump the LSN values, which gives us a valid previous pointer, + * which is part of the log entry header. This must be done: + * - before logging the currentLsn. + * - after calling getWriteBuffer, to flush the prior file when + * flippedFile is true. + */ + final long prevOffset = fileManager.advanceLsn( + currentLsn, entrySize, flippedFile); + + if (lastLogBuffer != prevLogBuffer) { + params.switchedLogBuffer = true; + } + prevLogBuffer = lastLogBuffer; + + final LogBufferSegment useBuffer; + + lastLogBuffer.latchForWrite(); + try { + useBuffer = lastLogBuffer.allocate(entrySize); + + if (useBuffer != null) { + /* Register the lsn while holding the buffer latch. */ + lastLogBuffer.registerLsn(currentLsn); + } else { + /* + * The item buffer is larger than the LogBuffer capacity, so + * write the item buffer to the file directly. Note that + * getWriteBuffer has flushed all dirty buffers. + * + * First add checksum, prev offset, and VLSN to the entry. + */ + item.buffer = item.header.addPostMarshallingInfo( + item.buffer, prevOffset, vlsn); + + final boolean flushWriteQueue = + params.flushRequired && !params.fsyncRequired; + + fileManager.writeLogBuffer( + new LogBuffer(item.buffer, currentLsn), + flushWriteQueue); + + assert lastLogBuffer.getDataBuffer().position() == 0; + + /* Leave a clue that the buffer size needs to be increased. */ + nTempBufferWrites.increment(); + } + } finally { + lastLogBuffer.release(); + } + + /* + * If the txn is not null, the first entry is an LN. Update the txn + * with info about the latest LSN. Note that this has to happen + * within the log write latch. + */ + params.entry.postLogWork(item.header, currentLsn, vlsn); + + item.lsn = currentLsn; + item.size = entrySize; + + /* If the expirationTracker field is null, no tracking should occur. */ + if (expirationTracker != null) { + /* + * When logging to a new file, also flip the expirationTracker + * under the LWL and return expirationTrackerCompleted so it will + * be queued for flushing. + */ + final long newFile = DbLsn.getFileNumber(item.lsn); + if (flippedFile && newFile != expirationTracker.getFileNum()) { + params.expirationTrackerCompleted = expirationTracker; + expirationTracker = new ExpirationTracker(newFile); + } + /* + * Increment the pending calls under the LWL, so we can determine + * when we're finished. + */ + expirationTracker.incrementPendingTrackCalls(); + params.expirationTrackerToUse = expirationTracker; + } + + return (useBuffer == null ? + null : new LogWriteInfo(useBuffer, vlsn, prevOffset)); + } + + /** + * Serialize a loggable object into this buffer. + */ + private ByteBuffer marshallIntoBuffer(LogEntryHeader header, + LogEntry entry) { + int entrySize = header.getSize() + header.getItemSize(); + + ByteBuffer destBuffer = ByteBuffer.allocate(entrySize); + header.writeToLog(destBuffer); + + /* Put the entry in. */ + entry.writeEntry(destBuffer); + + /* Set the limit so it can be used as the size of the entry. */ + destBuffer.flip(); + + return destBuffer; + } + + /** + * Serialize a log entry into this buffer with proper entry header. Return + * it ready for a copy. + */ + ByteBuffer putIntoBuffer(LogEntry entry, + long prevLogEntryOffset) { + LogEntryHeader header = new LogEntryHeader + (entry, Provisional.NO, ReplicationContext.NO_REPLICATE); + + /* + * Currently this method is only used for serializing the FileHeader. + * Assert that we do not need the Txn mutex in case this method is used + * in the future for other log entries. See LN.log. [#17204] + */ + assert !entry.getLogType().isTransactional(); + + ByteBuffer destBuffer = marshallIntoBuffer(header, entry); + + return header.addPostMarshallingInfo(destBuffer, + prevLogEntryOffset, + null); + } + + /* + * Reading from the log. + */ + + /** + * Instantiate all the objects in the log entry at this LSN. + */ + public LogEntry getLogEntry(long lsn) + throws FileNotFoundException { + + return getLogEntry(lsn, 0, false /*invisibleReadAllowed*/). + getEntry(); + } + + public WholeEntry getWholeLogEntry(long lsn) + throws FileNotFoundException { + + return getLogEntry(lsn, 0, false /*invisibleReadAllowed*/); + } + + /** + * Instantiate all the objects in the log entry at this LSN. Allow the + * fetch of invisible log entries if we are in recovery. + */ + public WholeEntry getLogEntryAllowInvisibleAtRecovery(long lsn, int size) + throws FileNotFoundException { + + return getLogEntry( + lsn, size, envImpl.isInInit() /*invisibleReadAllowed*/); + } + + /** + * Instantiate all the objects in the log entry at this LSN. The entry + * may be marked invisible. + */ + public WholeEntry getLogEntryAllowInvisible(long lsn) + throws FileNotFoundException { + + return getLogEntry(lsn, 0, true); + } + + /** + * Instantiate all the objects in the log entry at this LSN. + * @param lsn location of entry in log. + * @param invisibleReadAllowed true if it's expected that the target log + * entry might be invisible. Correct the known-to-be-bad checksum before + * proceeding. + * @return log entry that embodies all the objects in the log entry. + */ + private WholeEntry getLogEntry( + long lsn, + int lastLoggedSize, + boolean invisibleReadAllowed) + throws FileNotFoundException { + + /* Fail loudly if the environment is invalid. */ + envImpl.checkIfInvalid(); + + LogSource logSource = null; + try { + + /* + * Get a log source for the log entry which provides an abstraction + * that hides whether the entry is in a buffer or on disk. Will + * register as a reader for the buffer or the file, which will take + * a latch if necessary. Latch is released in finally block. + */ + logSource = getLogSource(lsn); + + try { + return getLogEntryFromLogSource( + lsn, lastLoggedSize, logSource, invisibleReadAllowed); + + } catch (ChecksumException ce) { + + /* + * When using a FileSource, a checksum error indicates a + * persistent corruption. An EFE with LOG_CHECKSUM is created + * in the catch below and EFE.isCorrupted will return true. + */ + if (!(logSource instanceof LogBuffer)) { + assert logSource instanceof FileSource; + throw ce; + } + + /* + * When using a LogBuffer source, we must try to read the entry + * from disk to see if the corruption is persistent. + */ + final LogBuffer logBuffer = (LogBuffer) logSource; + FileHandle fileHandle = null; + long fileLength = -1; + try { + fileHandle = + fileManager.getFileHandle(DbLsn.getFileNumber(lsn)); + fileLength = fileHandle.getFile().length(); + } catch (IOException ioe) { + /* FileNotFound or another IOException was thrown. */ + } + + /* + * If the file does not exist (FileNotFoundException is thrown + * above) or the firstLsn in the buffer does not appear in the + * file (the buffer was not flushed), then the corruption is + * not persistent and we throw a EFE for which isCorrupted + * will return false. + */ + if (fileHandle == null || + fileLength <= + DbLsn.getFileOffset(logBuffer.getFirstLsn())) { + + throw EnvironmentFailureException.unexpectedException( + envImpl, + "Corruption detected in log buffer, " + + "but was not written to disk.", + ce); + } + + /* + * The log entry should have been written to the file. Try + * getting the log entry from the FileSource. If a + * ChecksumException is thrown, the corruption is persistent + * and an EFE with LOG_CHECKSUM is thrown below. + */ + final FileSource fileSource = new FileHandleSource( + fileHandle, readBufferSize, fileManager); + try { + return getLogEntryFromLogSource( + lsn, lastLoggedSize, fileSource, + invisibleReadAllowed); + } finally { + fileSource.release(); + } + } + + } catch (ChecksumException e) { + /* + * WARNING: EFE with LOG_CHECKSUM indicates a persistent corruption + * and therefore LogSource.release must not be called until after + * invalidating the environment (in the finally below). The buffer + * latch prevents the corrupt buffer from being logged by another + * thread. + */ + throw VerifierUtils.createMarkerFileFromException( + RestoreRequired.FailureType.LOG_CHECKSUM, + e, + envImpl, + EnvironmentFailureReason.LOG_CHECKSUM); + + } catch (Error e) { + envImpl.invalidate(e); + throw e; + + } finally { + if (logSource != null) { + logSource.release(); + } + } + } + + public LogEntry getLogEntryHandleFileNotFound(long lsn) + throws DatabaseException { + + try { + return getLogEntry(lsn); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } + } + + public WholeEntry getWholeLogEntryHandleFileNotFound(long lsn) + throws DatabaseException { + + try { + return getWholeLogEntry(lsn); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } + } + + /** + * Throws ChecksumException rather than translating it to + * EnvironmentFailureException and invalidating the environment. Used + * instead of getLogEntry when a ChecksumException is handled specially. + */ + LogEntry getLogEntryAllowChecksumException(long lsn) + throws ChecksumException, FileNotFoundException, DatabaseException { + + final LogSource logSource = getLogSource(lsn); + + try { + return getLogEntryFromLogSource( + lsn, 0, logSource, false /*invisibleReadAllowed*/). + getEntry(); + } finally { + logSource.release(); + } + } + + LogEntry getLogEntryAllowChecksumException(long lsn, + RandomAccessFile file, + int logVersion) + throws ChecksumException, DatabaseException { + + final LogSource logSource = new FileSource( + file, readBufferSize, fileManager, DbLsn.getFileNumber(lsn), + logVersion); + + try { + return getLogEntryFromLogSource( + lsn, 0, logSource, + false /*invisibleReadAllowed*/). + getEntry(); + } finally { + logSource.release(); + } + } + + /** + * Gets log entry from the given source; the caller is responsible for + * calling logSource.release and handling ChecksumException. + * + * Is non-private for unit testing. + * + * @param lsn location of entry in log + * @param lastLoggedSize is the entry size if known, or zero if unknown. + * @param invisibleReadAllowed if true, we will permit the read of invisible + * log entries, and we will adjust the invisible bit so that the checksum + * will validate + * @return log entry that embodies all the objects in the log entry + */ + WholeEntry getLogEntryFromLogSource(long lsn, + int lastLoggedSize, + LogSource logSource, + boolean invisibleReadAllowed) + throws ChecksumException, DatabaseException { + + /* + * Read the log entry header into a byte buffer. If the + * lastLoggedSize is available (non-zero), we can use it to avoid a + * repeat-read further below. Otherwise we use the configured + * LOG_FAULT_READ_SIZE, and a repeat-read may occur if the log + * entry is larger than the buffer. + * + * Even when lastLoggedSize is non-zero, we do not assume that it + * is always accurate, because this is not currently guaranteed + * in corner cases such as transaction aborts. We do the initial + * read with lastLoggedSize. If lastLoggedSize is larger than the + * actual size, we will simply read more bytes than needed. If + * lastLoggedSize is smaller than the actual size, we will do a + * repeat-read further below. + */ + long fileOffset = DbLsn.getFileOffset(lsn); + + ByteBuffer entryBuffer = (lastLoggedSize > 0) ? + logSource.getBytes(fileOffset, lastLoggedSize) : + logSource.getBytes(fileOffset); + + if (entryBuffer.remaining() < LogEntryHeader.MIN_HEADER_SIZE) { + throw new ChecksumException( + "Incomplete log entry header in " + logSource + + " needed=" + LogEntryHeader.MIN_HEADER_SIZE + + " remaining=" + entryBuffer.remaining() + + " lsn=" + DbLsn.getNoFormatString(lsn)); + } + + /* Read the fixed length portion of the header. */ + LogEntryHeader header = new LogEntryHeader( + entryBuffer, logSource.getLogVersion(), lsn); + + /* Read the variable length portion of the header. */ + if (header.isVariableLength()) { + if (entryBuffer.remaining() < + header.getVariablePortionSize()) { + throw new ChecksumException( + "Incomplete log entry header in " + logSource + + " needed=" + header.getVariablePortionSize() + + " remaining=" + entryBuffer.remaining() + + " lsn=" + DbLsn.getNoFormatString(lsn)); + } + header.readVariablePortion(entryBuffer); + } + + ChecksumValidator validator = null; + if (doChecksumOnRead) { + int itemStart = entryBuffer.position(); + + /* + * We're about to read an invisible log entry, which has knowingly + * been left on disk with a bad checksum. Flip the invisible bit in + * the backing byte buffer now, so the checksum will be valid. The + * LogEntryHeader object itself still has the invisible bit set, + * which is useful for debugging. + */ + if (header.isInvisible()) { + LogEntryHeader.turnOffInvisible + (entryBuffer, itemStart - header.getSize()); + } + + /* Add header to checksum bytes */ + validator = new ChecksumValidator(); + int headerSizeMinusChecksum = header.getSizeMinusChecksum(); + entryBuffer.position(itemStart - + headerSizeMinusChecksum); + validator.update(entryBuffer, headerSizeMinusChecksum); + entryBuffer.position(itemStart); + } + + /* + * Now that we know the size, read the rest of the entry if the first + * read didn't get enough. + */ + int itemSize = header.getItemSize(); + if (entryBuffer.remaining() < itemSize) { + entryBuffer = logSource.getBytes( + fileOffset + header.getSize(), itemSize); + if (entryBuffer.remaining() < itemSize) { + throw new ChecksumException( + "Incomplete log entry item in " + logSource + + " needed=" + itemSize + + " remaining=" + entryBuffer.remaining() + + " lsn=" + DbLsn.getNoFormatString(lsn)); + } + nRepeatFaultReads.increment(); + } + + /* + * Do entry validation. Run checksum before checking the entry type, it + * will be the more encompassing error. + */ + if (doChecksumOnRead) { + /* Check the checksum first. */ + validator.update(entryBuffer, itemSize); + validator.validate(header.getChecksum(), lsn); + } + + /* + * If invisibleReadAllowed == false, we should not be fetching an + * invisible log entry. + */ + if (header.isInvisible() && !invisibleReadAllowed) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "Read invisible log entry at " + + DbLsn.getNoFormatString(lsn) + " " + header); + } + + assert LogEntryType.isValidType(header.getType()): + "Read non-valid log entry type: " + header.getType(); + + /* Read the entry. */ + LogEntry logEntry = + LogEntryType.findType(header.getType()).getNewLogEntry(); + logEntry.readEntry(envImpl, header, entryBuffer); + + /* For testing only; generate a read io exception. */ + if (readHook != null) { + try { + readHook.doIOHook(); + } catch (IOException e) { + /* Simulate what the FileManager would do. */ + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, e); + } + } + + return new WholeEntry(header, logEntry); + } + + /** + * Fault in the first object in the log entry log entry at this LSN. + * @param lsn location of object in log + * @return the object in the log + */ + public Object getEntry(long lsn) + throws FileNotFoundException, DatabaseException { + + LogEntry entry = getLogEntry(lsn); + return entry.getMainItem(); + } + + public Object getEntryHandleFileNotFound(long lsn) { + LogEntry entry = getLogEntryHandleFileNotFound(lsn); + return entry.getMainItem(); + } + + /** + * Find the LSN, whether in a file or still in the log buffers. + * Is public for unit testing. + */ + public LogSource getLogSource(long lsn) + throws FileNotFoundException, ChecksumException, DatabaseException { + + /* + * First look in log to see if this LSN is still in memory. + */ + LogBuffer logBuffer = logBufferPool.getReadBufferByLsn(lsn); + + if (logBuffer == null) { + try { + /* Not in the in-memory log -- read it off disk. */ + long fileNum = DbLsn.getFileNumber(lsn); + return new FileHandleSource + (fileManager.getFileHandle(fileNum), + readBufferSize, fileManager); + } catch (DatabaseException e) { + /* Add LSN to exception message. */ + e.addErrorMessage("lsn= " + DbLsn.getNoFormatString(lsn)); + throw e; + } + } + return logBuffer; + } + + /** + * Reads a log entry using a FileSource, and returns null (rather than + * throwing a ChecksumException) if the entry exists in a log buffer that + * was not flushed. + * + * Used to check whether an in-memory corruption is persistent. + * + * @return WholeEntry null means that this lsn does not exist in the file + */ + public WholeEntry getLogEntryDirectFromFile(long lsn) + throws ChecksumException { + + final LogSource logSource; + try { + logSource = getLogSource(lsn); + } catch (FileNotFoundException fnfe) { + return null; + } + + final FileSource fileSource; + + if (logSource instanceof LogBuffer) { + + final FileHandle fileHandle; + + try { + final LogBuffer logBuffer = (LogBuffer) logSource; + final long fileLength; + try { + fileHandle = + fileManager.getFileHandle(DbLsn.getFileNumber(lsn)); + fileLength = fileHandle.getFile().length(); + } catch (IOException ioe) { + /* FileNotFound or another IOException was thrown. */ + return null; + } + + /* + * If the file does not exist (FileNotFoundException is thrown + * above) or the firstLsn in the buffer does not appear in the + * file (the buffer was not flushed), then the corruption is + * not persistent and later we will throw a EFE for which + * isCorrupted will return false. + */ + if (fileLength <= + DbLsn.getFileOffset(logBuffer.getFirstLsn())) { + return null; + } + } finally { + logSource.release(); + } + + /* + * The log entry should have been written to the file. Try + * getting the log entry from the FileSource. If the log entry is + * incomplete, ChecksumException is thrown below and later we will + * throw an EFE with LOG_CHECKSUM. + */ + fileSource = new FileHandleSource( + fileHandle, readBufferSize, fileManager); + } else { + fileSource = (FileSource) logSource; + } + + try { + return getLogEntryFromLogSource( + lsn, 0, fileSource, false /*invisibleReadAllowed*/); + } finally { + fileSource.release(); + } + } + + /** + * Return a log buffer locked for reading, or null if no log buffer + * holds this LSN location. + */ + public LogBuffer getReadBufferByLsn(long lsn) { + + assert DbLsn.getFileOffset(lsn) != 0 : + "Read of lsn " + DbLsn.getNoFormatString(lsn) + + " is illegal because file header entry is not in the log buffer"; + + return logBufferPool.getReadBufferByLsn(lsn); + } + + /** + * Flush all log entries, fsync the log file. + */ + public void flushSync() + throws DatabaseException { + + if (readOnly) { + return; + } + + /* The write queue is flushed by syncLogEnd. */ + flushInternal(false /*flushWriteQueue*/); + fileManager.syncLogEnd(); + } + + /** + * Flush all log entries and write to the log but do not fsync. + */ + public void flushNoSync() + throws DatabaseException { + + if (readOnly) { + return; + } + + flushInternal(true /*flushWriteQueue*/); + } + + /** + * Flush log buffers, but do not flush the write queue. This is used only + * by FsyncManager, just prior to an fsync. When FsyncManager performs the + * fsync, the write queue will be flushed by FileManager.fsyncLogEnd. + */ + void flushBeforeSync() + throws DatabaseException { + + if (readOnly) { + return; + } + + flushInternal(false /*flushWriteQueue*/); + } + + /** + * Flush the dirty log buffers, and optionally the write queue as well. + * + * Flushing logically means flushing all write buffers to the file system, + * so flushWriteQueue should be false only when this method is called just + * before an fsync (FileManager.syncLogEnd will flush the write queue). + */ + private void flushInternal(boolean flushWriteQueue) + throws DatabaseException { + + assert !readOnly; + + /* + * If we cannot bump the current buffer because there are no + * free buffers, the only recourse is to write all buffers + * under the LWL. + */ + synchronized (logWriteMutex) { + if (!logBufferPool.bumpCurrent(0)) { + logBufferPool.bumpAndWriteDirty(0, flushWriteQueue); + return; + } + } + + /* + * We bumped the current buffer but did not write any buffers above. + * Write the dirty buffers now. Hopefully this is the common case. + */ + logBufferPool.writeDirty(flushWriteQueue); + } + + public StatGroup loadStats(StatsConfig config) + throws DatabaseException { + + endOfLog.set(fileManager.getLastUsedLsn()); + + StatGroup copyStats = stats.cloneGroup(config.getClear()); + copyStats.addAll(logBufferPool.loadStats(config)); + copyStats.addAll(fileManager.loadStats(config)); + copyStats.addAll(grpManager.loadStats(config)); + + return copyStats; + } + + /** + * Return the current number of cache misses in a lightweight fashion, + * without incurring the cost of loading all the stats, and without + * clearing any stats. + */ + public long getNCacheMiss() { + return logBufferPool.getNCacheMiss(); + } + + /** + * For unit testing. + */ + public StatGroup getBufferPoolLatchStats() { + return logBufferPool.getBufferPoolLatchStats(); + } + + /** + * Returns a tracked summary for the given file which will not be flushed. + */ + public TrackedFileSummary getUnflushableTrackedSummary(long file) { + synchronized (logWriteMutex) { + return envImpl.getUtilizationTracker(). + getUnflushableTrackedSummary(file); + } + } + + /** + * Removes the tracked summary for the given file. + */ + public void removeTrackedFile(TrackedFileSummary tfs) { + synchronized (logWriteMutex) { + tfs.reset(); + } + } + + private void updateObsolete( + LogParams params, + UtilizationTracker tracker) { + + if (params.packedObsoleteInfo == null && + params.obsoleteWriteLockInfo == null) { + return; + } + + synchronized (logWriteMutex) { + + /* Count other obsolete info under the log write latch. */ + if (params.packedObsoleteInfo != null) { + params.packedObsoleteInfo.countObsoleteInfo( + tracker, params.nodeDb); + } + + if (params.obsoleteWriteLockInfo != null) { + for (WriteLockInfo info : params.obsoleteWriteLockInfo) { + tracker.countObsoleteNode(info.getAbortLsn(), + null /*type*/, + info.getAbortLogSize(), + info.getDb()); + } + } + } + } + + /** + * Count node as obsolete under the log write latch. This is done here + * because the log write latch is managed here, and all utilization + * counting must be performed under the log write latch. + */ + public void countObsoleteNode(long lsn, + LogEntryType type, + int size, + DatabaseImpl nodeDb, + boolean countExact) { + synchronized (logWriteMutex) { + UtilizationTracker tracker = envImpl.getUtilizationTracker(); + if (countExact) { + tracker.countObsoleteNode(lsn, type, size, nodeDb); + } else { + tracker.countObsoleteNodeInexact(lsn, type, size, nodeDb); + } + } + } + + /** + * A flavor of countObsoleteNode which does not fire an assert if the + * offset has already been counted. Called through the LogManager so that + * this incidence of all utilization counting can be performed under the + * log write latch. + */ + public void countObsoleteNodeDupsAllowed(long lsn, + LogEntryType type, + int size, + DatabaseImpl nodeDb) { + synchronized (logWriteMutex) { + UtilizationTracker tracker = envImpl.getUtilizationTracker(); + tracker.countObsoleteNodeDupsAllowed(lsn, type, size, nodeDb); + } + } + + /** + * @see LocalUtilizationTracker#transferToUtilizationTracker + */ + public void transferToUtilizationTracker(LocalUtilizationTracker + localTracker) + throws DatabaseException { + synchronized (logWriteMutex) { + UtilizationTracker tracker = envImpl.getUtilizationTracker(); + localTracker.transferToUtilizationTracker(tracker); + } + } + + /** + * @see DatabaseImpl#countObsoleteDb + */ + public void countObsoleteDb(DatabaseImpl db) { + synchronized (logWriteMutex) { + db.countObsoleteDb(envImpl.getUtilizationTracker(), + DbLsn.NULL_LSN /*mapLnLsn*/); + } + } + + public boolean removeDbFileSummaries(DatabaseImpl db, + Collection fileNums) { + synchronized (logWriteMutex) { + return db.removeDbFileSummaries(fileNums); + } + } + + /** + * @see DatabaseImpl#cloneDbFileSummaries + */ + public Map cloneDbFileSummaries(DatabaseImpl db) { + synchronized (logWriteMutex) { + return db.cloneDbFileSummariesInternal(); + } + } + + /* For unit testing only. */ + public void setReadHook(TestHook hook) { + readHook = hook; + } + + /* For unit testing only. */ + public void setDelayVLSNRegisterHook(TestHook hook) { + delayVLSNRegisterHook = hook; + } + + /* For unit testing only. */ + public void setFlushLogHook(TestHook hook) { + flushHook = hook; + grpManager.setFlushLogHook(hook); + } + + private class LogWriteInfo { + final LogBufferSegment lbs; + final VLSN vlsn; + final long fileOffset; + + LogWriteInfo(final LogBufferSegment bs, + final VLSN vlsn, + final long fileOffset) { + lbs = bs; + this.vlsn = vlsn; + this.fileOffset = fileOffset; + } + } +} diff --git a/src/com/sleepycat/je/log/LogParams.java b/src/com/sleepycat/je/log/LogParams.java new file mode 100644 index 0000000..4ca97f5 --- /dev/null +++ b/src/com/sleepycat/je/log/LogParams.java @@ -0,0 +1,137 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.util.Collection; + +import com.sleepycat.je.cleaner.ExpirationTracker; +import com.sleepycat.je.cleaner.PackedObsoleteInfo; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Parameters passed when an item is logged. + * + * This class is used as a simple struct for passing multiple params, and does + * not need getters and setters. + */ +public class LogParams { + + /** + * Database of the node(s), or null if entry is not a node. Used for per- + * database utilization tracking. + */ + public DatabaseImpl nodeDb = null; + + /** + * Whether the log buffer(s) must be written to the file system. + */ + public boolean flushRequired = false; + + /** + * Whether a new log file must be created for containing the logged + * item(s). + */ + public boolean forceNewLogFile = false; + + /** + * Whether an fsync must be performed after writing the item(s) to the log. + */ + public boolean fsyncRequired = false; + + /** + * Whether the write should be counted as background IO when throttling of + * background IO is configured. + */ + public boolean backgroundIO = false; + + /** + * Set of obsolete LSNs which are counted when logging a commit entry. + * This information includes the DatabaseImpl for each LSN, and the nodeDb + * field does not apply. + */ + public Collection obsoleteWriteLockInfo = null; + + /** + * Sequence of packed obsolete info which is counted when logging a + * non-provisional IN. This information is for a single database, the + * nodeDb. The nodeDb is passed as a parameter to countObosoleteNode when + * adding this information to the global tracker. + */ + public PackedObsoleteInfo packedObsoleteInfo = null; + + /** + * Whether it is possible that the previous version of this log + * entry is already marked obsolete. In general, the latest version + * of any IN or LN is alive, so that logging a new version requires making + * the last version obsolete. Utilization tracking generally asserts + * that this last version is not already obsolete. + * + * When partial rollbacks are used, some of the original intermediate + * versions may have been pruned away, leaving a current previous that + * was already marked obsolete. For example, a transaction might have + * done: + * + * LNA (version 1) + * LNA (version 2) + * -- now version 1 is obsolete + * -- if we do a partial rollback to version1, verison 2 is removed + * -- we start retransmitting + * LNA (version 2) + * + * When we log this LNA/version2, this previous LNA (version1) is + * already obsolete. obsoleteDupsAllowed supports this case. + */ + public boolean obsoleteDupsAllowed = false; + + /** + * Object to be marshaled and logged. + */ + public LogEntry entry = null; + + /** + * The previous version of the node to be counted as obsolete, or NULL_LSN + * if the entry is not a node or has no old LSN. + */ + public long oldLsn = DbLsn.NULL_LSN; + + /** + * For LNs, oldSize should be set along with oldLsn before logging. It + * should normally be obtained by calling BIN.getLastLoggedSize. + */ + public int oldSize = 0; + + /** + * Another LSN to be counted as obsolete in the LogContext.nodeDb database, + * or NULL_LSN. Used for obsolete BIN-deltas. + */ + public long auxOldLsn = DbLsn.NULL_LSN; + + /** + * Whether the logged entry should be processed during recovery. + */ + public Provisional provisional = null; + + /** + * Whether the logged entry should be replicated. + */ + public ReplicationContext repContext = null; + + /* Fields used internally by log method. */ + boolean switchedLogBuffer = false; + ExpirationTracker expirationTrackerToUse = null; + ExpirationTracker expirationTrackerCompleted = null; +} diff --git a/src/com/sleepycat/je/log/LogSource.java b/src/com/sleepycat/je/log/LogSource.java new file mode 100644 index 0000000..096e4f8 --- /dev/null +++ b/src/com/sleepycat/je/log/LogSource.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; + +/** + * A class that implements LogSource can return portions of the log. + * Is public for unit testing. + */ +public interface LogSource { + + /** + * We're done with this log source. + */ + void release() throws DatabaseException; + + /** + * Fill the destination byte array with bytes. The offset indicates the + * absolute log file position. + */ + ByteBuffer getBytes(long fileOffset) throws DatabaseException; + + /** + * Fill the destination byte array with the requested number of bytes. The + * offset indicates the absolute position in the log file. + */ + ByteBuffer getBytes(long fileOffset, int numBytes) + throws DatabaseException; + + /** + * Returns the log version of the log entries from this source. + */ + int getLogVersion(); +} diff --git a/src/com/sleepycat/je/log/LogStatDefinition.java b/src/com/sleepycat/je/log/LogStatDefinition.java new file mode 100644 index 0000000..900535a --- /dev/null +++ b/src/com/sleepycat/je/log/LogStatDefinition.java @@ -0,0 +1,388 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE FileManager, FSyncManager, LogManager and + * LogBufferPool statistics. + */ +public class LogStatDefinition { + + /* Group definition for all log statistics. */ + public static final String GROUP_NAME = "I/O"; + public static final String GROUP_DESC = + "The I/O portion of the append-only storage system includes " + + "access to data files and caching of file handles."; + + /* Group definition for LogBufferPool statistics. */ + public static final String LBF_GROUP_NAME = "LogBufferPool"; + public static final String LBF_GROUP_DESC = "LogBufferPool statistics"; + + /* Group definition for FileManager statistics. */ + public static final String FILEMGR_GROUP_NAME = "FileManager"; + public static final String FILEMGR_GROUP_DESC = "FileManager statistics"; + + /* Group definition for FSyncManager statistics. */ + public static final String FSYNCMGR_GROUP_NAME = "FSyncManager"; + public static final String FSYNCMGR_GROUP_DESC = "FSyncManager statistics"; + + /* Group definition for GrpCommitManager statistics. */ + public static final String GRPCOMMITMGR_GROUP_NAME = "GrpCommitManager"; + public static final String GRPCOMMITMGR_GROUP_DESC = + "GrpCommitManager statistics"; + + /* The following stat definitions are used in FileManager. */ + public static final String FILEMGR_RANDOM_READS_NAME = + "nRandomReads"; + public static final String FILEMGR_RANDOM_READS_DESC = + "Number of disk reads which required respositioning the disk head " + + "more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_RANDOM_READS = + new StatDefinition( + FILEMGR_RANDOM_READS_NAME, + FILEMGR_RANDOM_READS_DESC); + + public static final String FILEMGR_RANDOM_WRITES_NAME = + "nRandomWrites"; + public static final String FILEMGR_RANDOM_WRITES_DESC = + "Number of disk writes which required respositioning the disk head by" + + " more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_RANDOM_WRITES = + new StatDefinition( + FILEMGR_RANDOM_WRITES_NAME, + FILEMGR_RANDOM_WRITES_DESC); + + public static final String FILEMGR_SEQUENTIAL_READS_NAME = + "nSequentialReads"; + public static final String FILEMGR_SEQUENTIAL_READS_DESC = + "Number of disk reads which did not require respositioning the disk " + + "head more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_SEQUENTIAL_READS = + new StatDefinition( + FILEMGR_SEQUENTIAL_READS_NAME, + FILEMGR_SEQUENTIAL_READS_DESC); + + public static final String FILEMGR_SEQUENTIAL_WRITES_NAME = + "nSequentialWrites"; + public static final String FILEMGR_SEQUENTIAL_WRITES_DESC = + "Number of disk writes which did not require respositioning the disk " + + "head by more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_SEQUENTIAL_WRITES = + new StatDefinition( + FILEMGR_SEQUENTIAL_WRITES_NAME, + FILEMGR_SEQUENTIAL_WRITES_DESC); + + public static final String FILEMGR_RANDOM_READ_BYTES_NAME = + "nRandomReadBytes"; + public static final String FILEMGR_RANDOM_READ_BYTES_DESC = + "Number of bytes read which required respositioning the disk head " + + "more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_RANDOM_READ_BYTES = + new StatDefinition( + FILEMGR_RANDOM_READ_BYTES_NAME, + FILEMGR_RANDOM_READ_BYTES_DESC); + + public static final String FILEMGR_RANDOM_WRITE_BYTES_NAME = + "nRandomWriteBytes"; + public static final String FILEMGR_RANDOM_WRITE_BYTES_DESC = + "Number of bytes written which required respositioning the disk head " + + "more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_RANDOM_WRITE_BYTES = + new StatDefinition( + FILEMGR_RANDOM_WRITE_BYTES_NAME, + FILEMGR_RANDOM_WRITE_BYTES_DESC); + + public static final String FILEMGR_SEQUENTIAL_READ_BYTES_NAME = + "nSequentialReadBytes"; + public static final String FILEMGR_SEQUENTIAL_READ_BYTES_DESC = + "Number of bytes read which did not require respositioning the disk " + + "head more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_SEQUENTIAL_READ_BYTES = + new StatDefinition( + FILEMGR_SEQUENTIAL_READ_BYTES_NAME, + FILEMGR_SEQUENTIAL_READ_BYTES_DESC); + + public static final String FILEMGR_SEQUENTIAL_WRITE_BYTES_NAME = + "nSequentialWriteBytes"; + public static final String FILEMGR_SEQUENTIAL_WRITE_BYTES_DESC = + "Number of bytes written which did not require respositioning the " + + "disk head more than 1MB from the previous file position."; + public static final StatDefinition FILEMGR_SEQUENTIAL_WRITE_BYTES = + new StatDefinition( + FILEMGR_SEQUENTIAL_WRITE_BYTES_NAME, + FILEMGR_SEQUENTIAL_WRITE_BYTES_DESC); + + public static final String FILEMGR_FILE_OPENS_NAME = + "nFileOpens"; + public static final String FILEMGR_FILE_OPENS_DESC = + "Number of times a log file has been opened."; + public static final StatDefinition FILEMGR_FILE_OPENS = + new StatDefinition( + FILEMGR_FILE_OPENS_NAME, + FILEMGR_FILE_OPENS_DESC); + + public static final String FILEMGR_OPEN_FILES_NAME = + "nOpenFiles"; + public static final String FILEMGR_OPEN_FILES_DESC = + "Number of files currently open in the file cache."; + public static final StatDefinition FILEMGR_OPEN_FILES = + new StatDefinition( + FILEMGR_OPEN_FILES_NAME, + FILEMGR_OPEN_FILES_DESC, + StatType.CUMULATIVE); + + public static final String FILEMGR_BYTES_READ_FROM_WRITEQUEUE_NAME = + "nBytesReadFromWriteQueue"; + public static final String FILEMGR_BYTES_READ_FROM_WRITEQUEUE_DESC = + "Number of bytes read to fulfill file read operations by reading out " + + "of the pending write queue."; + public static final StatDefinition FILEMGR_BYTES_READ_FROM_WRITEQUEUE = + new StatDefinition( + FILEMGR_BYTES_READ_FROM_WRITEQUEUE_NAME, + FILEMGR_BYTES_READ_FROM_WRITEQUEUE_DESC); + + public static final String FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_NAME = + "nBytesWrittenFromWriteQueue"; + public static final String FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_DESC = + "Number of bytes written from the pending write queue."; + public static final StatDefinition FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE = + new StatDefinition( + FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_NAME, + FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_DESC); + + public static final String FILEMGR_READS_FROM_WRITEQUEUE_NAME = + "nReadsFromWriteQueue"; + public static final String FILEMGR_READS_FROM_WRITEQUEUE_DESC = + "Number of file read operations which were fulfilled by reading out " + + "of the pending write queue."; + public static final StatDefinition FILEMGR_READS_FROM_WRITEQUEUE = + new StatDefinition( + FILEMGR_READS_FROM_WRITEQUEUE_NAME, + FILEMGR_READS_FROM_WRITEQUEUE_DESC); + + public static final String FILEMGR_WRITES_FROM_WRITEQUEUE_NAME = + "nWritesFromWriteQueue"; + public static final String FILEMGR_WRITES_FROM_WRITEQUEUE_DESC = + "Number of file write operations executed from the pending write " + + "queue."; + public static final StatDefinition FILEMGR_WRITES_FROM_WRITEQUEUE = + new StatDefinition( + FILEMGR_WRITES_FROM_WRITEQUEUE_NAME, + FILEMGR_WRITES_FROM_WRITEQUEUE_DESC); + + public static final String FILEMGR_WRITEQUEUE_OVERFLOW_NAME = + "nWriteQueueOverflow"; + public static final String FILEMGR_WRITEQUEUE_OVERFLOW_DESC = + "Number of write operations which would overflow the Write Queue."; + public static final StatDefinition FILEMGR_WRITEQUEUE_OVERFLOW = + new StatDefinition( + FILEMGR_WRITEQUEUE_OVERFLOW_NAME, + FILEMGR_WRITEQUEUE_OVERFLOW_DESC); + + public static final String FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_NAME = + "nWriteQueueOverflowFailures"; + public static final String FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_DESC = + "Number of write operations which would overflow the Write Queue and " + + "could not be queued."; + public static final StatDefinition FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES = + new StatDefinition( + FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_NAME, + FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_DESC); + + /* The following stat definitions are used in FSyncManager. */ + public static final String FSYNCMGR_FSYNCS_NAME = + "nFSyncs"; + public static final String FSYNCMGR_FSYNCS_DESC = + "Number of fsyncs issued through the group commit manager for actions" + + " such as transaction commits and checkpoints. A subset of " + + "nLogFsyncs."; + public static final StatDefinition FSYNCMGR_FSYNCS = + new StatDefinition( + FSYNCMGR_FSYNCS_NAME, + FSYNCMGR_FSYNCS_DESC); + + public static final String FSYNCMGR_FSYNC_REQUESTS_NAME = + "nFSyncRequests"; + public static final String FSYNCMGR_FSYNC_REQUESTS_DESC = + "Number of fsyncs requested through the group commit manager for " + + "actions such as transaction commits and checkpoints."; + public static final StatDefinition FSYNCMGR_FSYNC_REQUESTS = + new StatDefinition( + FSYNCMGR_FSYNC_REQUESTS_NAME, + FSYNCMGR_FSYNC_REQUESTS_DESC); + + public static final String FSYNCMGR_TIMEOUTS_NAME = + "nGrpCommitTimeouts"; + public static final String FSYNCMGR_TIMEOUTS_DESC = + "Number of requests submitted to the group commit manager for actions" + + " such as transaction commmits and checkpoints which timed out."; + public static final StatDefinition FSYNCMGR_TIMEOUTS = + new StatDefinition( + FSYNCMGR_TIMEOUTS_NAME, + FSYNCMGR_TIMEOUTS_DESC); + + public static final String FILEMGR_LOG_FSYNCS_NAME = + "nLogFSyncs"; + public static final String FILEMGR_LOG_FSYNCS_DESC = + "Total number of fsyncs of the JE log. This includes those fsyncs " + + "recorded under the nFsyncs stat"; + public static final StatDefinition FILEMGR_LOG_FSYNCS = + new StatDefinition( + FILEMGR_LOG_FSYNCS_NAME, + FILEMGR_LOG_FSYNCS_DESC); + + /* The following stat definitions are used in GrpCommitManager. */ + public static final String GRPCMGR_FSYNC_TIME_NAME = + "nFSyncTime"; + public static final String GRPCMGR_FSYNC_TIME_DESC = + "Total fsync time in ms"; + public static final StatDefinition GRPCMGR_FSYNC_TIME = + new StatDefinition( + GRPCMGR_FSYNC_TIME_NAME, + GRPCMGR_FSYNC_TIME_DESC); + + public static final String GRPCMGR_FSYNC_MAX_TIME_NAME = + "nFSyncMaxTime"; + public static final String GRPCMGR_FSYNC_MAX_TIME_DESC = + "Maximum fsync time in ms"; + public static final StatDefinition GRPCMGR_FSYNC_MAX_TIME = + new StatDefinition( + GRPCMGR_FSYNC_MAX_TIME_NAME, + GRPCMGR_FSYNC_MAX_TIME_DESC); + + public static final String GRPCMGR_N_GROUP_COMMIT_REQUESTS_NAME = + "nGroupCommitRequests"; + public static final String GRPCMGR_N_GROUP_COMMIT_REQUESTS_DESC = + "Number of group commit requests."; + public static final StatDefinition GRPCMGR_N_GROUP_COMMIT_REQUESTS = + new StatDefinition( + GRPCMGR_N_GROUP_COMMIT_REQUESTS_NAME, + GRPCMGR_N_GROUP_COMMIT_REQUESTS_DESC); + + public static final String GRPCMGR_N_GROUP_COMMIT_WAITS_NAME = + "nGroupCommitWaits"; + public static final String GRPCMGR_N_GROUP_COMMIT_WAITS_DESC = + "Number of group commit leader waits."; + public static final StatDefinition GRPCMGR_N_GROUP_COMMIT_WAITS = + new StatDefinition( + GRPCMGR_N_GROUP_COMMIT_WAITS_NAME, + GRPCMGR_N_GROUP_COMMIT_WAITS_DESC); + + public static final String GRPCMGR_N_LOG_MAX_GROUP_COMMIT_NAME = + "nLogMaxGroupCommitThreshold"; + public static final String GRPCMGR_N_LOG_MAX_GROUP_COMMIT_DESC = + "Number of group commits that were initiated due to the group commit " + + "size threshold being exceeded."; + public static final StatDefinition GRPCMGR_N_LOG_MAX_GROUP_COMMIT = + new StatDefinition( + GRPCMGR_N_LOG_MAX_GROUP_COMMIT_NAME, + GRPCMGR_N_LOG_MAX_GROUP_COMMIT_DESC); + + public static final String GRPCMGR_N_LOG_INTERVAL_EXCEEDED_NAME = + "nLogIntervalExceeded"; + public static final String GRPCMGR_N_LOG_INTERVAL_EXCEEDED_DESC = + "Number of group commits that were initiated due to the group commit " + + "time interval being exceeded."; + public static final StatDefinition GRPCMGR_N_LOG_INTERVAL_EXCEEDED = + new StatDefinition( + GRPCMGR_N_LOG_INTERVAL_EXCEEDED_NAME, + GRPCMGR_N_LOG_INTERVAL_EXCEEDED_DESC); + + /* The following stat definitions are used in LogManager. */ + public static final String LOGMGR_REPEAT_FAULT_READS_NAME = + "nRepeatFaultReads"; + public static final String LOGMGR_REPEAT_FAULT_READS_DESC = + "Number of reads which had to be repeated when faulting in an object " + + "from disk because the read chunk size controlled by je.log" + + ".faultReadSize is too small."; + public static final StatDefinition LOGMGR_REPEAT_FAULT_READS = + new StatDefinition( + LOGMGR_REPEAT_FAULT_READS_NAME, + LOGMGR_REPEAT_FAULT_READS_DESC); + + public static final String LOGMGR_TEMP_BUFFER_WRITES_NAME = + "nTempBufferWrites"; + public static final String LOGMGR_TEMP_BUFFER_WRITES_DESC = + "Number of writes which had to be completed using the temporary " + + "marshalling buffer because the fixed size log buffers specified " + + "by je.log.totalBufferBytes and je.log.numBuffers were not large " + + "enough."; + public static final StatDefinition LOGMGR_TEMP_BUFFER_WRITES = + new StatDefinition( + LOGMGR_TEMP_BUFFER_WRITES_NAME, + LOGMGR_TEMP_BUFFER_WRITES_DESC); + + public static final String LOGMGR_END_OF_LOG_NAME = + "endOfLog"; + public static final String LOGMGR_END_OF_LOG_DESC = + "The location of the next entry to be written to the log."; + public static final StatDefinition LOGMGR_END_OF_LOG = + new StatDefinition( + LOGMGR_END_OF_LOG_NAME, + LOGMGR_END_OF_LOG_DESC, + StatType.CUMULATIVE); + + public static final String LBFP_NO_FREE_BUFFER_NAME = + "nNoFreeBuffer"; + public static final String LBFP_NO_FREE_BUFFER_DESC = + "Number of requests to get a free buffer that force a log write."; + public static final StatDefinition LBFP_NO_FREE_BUFFER = + new StatDefinition( + LBFP_NO_FREE_BUFFER_NAME, + LBFP_NO_FREE_BUFFER_DESC); + + /* The following stat definitions are used in LogBufferPool. */ + public static final String LBFP_NOT_RESIDENT_NAME = + "nNotResident"; + public static final String LBFP_NOT_RESIDENT_DESC = + "Number of request for database objects not contained within the in " + + "memory data structure."; + public static final StatDefinition LBFP_NOT_RESIDENT = + new StatDefinition( + LBFP_NOT_RESIDENT_NAME, + LBFP_NOT_RESIDENT_DESC); + + public static final String LBFP_MISS_NAME = + "nCacheMiss"; + public static final String LBFP_MISS_DESC = + "Total number of requests for database objects which were not in " + + "memory."; + public static final StatDefinition LBFP_MISS = + new StatDefinition( + LBFP_MISS_NAME, + LBFP_MISS_DESC); + + public static final String LBFP_LOG_BUFFERS_NAME = + "nLogBuffers"; + public static final String LBFP_LOG_BUFFERS_DESC = + "Number of log buffers currently instantiated."; + public static final StatDefinition LBFP_LOG_BUFFERS = + new StatDefinition( + LBFP_LOG_BUFFERS_NAME, + LBFP_LOG_BUFFERS_DESC, + StatType.CUMULATIVE); + + public static final String LBFP_BUFFER_BYTES_NAME = + "bufferBytes"; + public static final String LBFP_BUFFER_BYTES_DESC = + "Total memory currently consumed by log buffers, in bytes."; + public static final StatDefinition LBFP_BUFFER_BYTES = + new StatDefinition( + LBFP_BUFFER_BYTES_NAME, + LBFP_BUFFER_BYTES_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/log/LogUtils.java b/src/com/sleepycat/je/log/LogUtils.java new file mode 100644 index 0000000..35580e1 --- /dev/null +++ b/src/com/sleepycat/je/log/LogUtils.java @@ -0,0 +1,641 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +import javax.transaction.xa.Xid; + +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.util.PackedInteger; +import com.sleepycat.utilint.StringUtils; + +/** + * This class holds convenience methods for marshalling internal JE data to and + * from the log. + */ +public class LogUtils { + /* Storage sizes for int, long in log. */ + public static final int SHORT_BYTES = 2; + public static final int INT_BYTES = 4; + public static final int LONG_BYTES = 8; + public static final int UNSIGNED_INT_BYTES = 4; + + private static final boolean DEBUG = false; + + /* + * We can return the same byte[] for 0 length arrays. + */ + public static final byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; + + /* + * The je.logCharset system property can be specified when running + * DbPrintLog to work around the charset issue in JE 5.0 and earlier (see + * [#15296] below). For example, because of this issue, on a z/OS system + * the trace messages and other internal strings (such as the checkpoint + * invoker) are stored in the log in EBCDIC encoding. The following system + * property allows such strings to be viewed correctly in the DbPrintLog + * output: + * -Dje.logCharset=IBM1047 + * + * WARNING: Do not specify this property when running an application that + * writes to the log. It is only for reading a log (such as with + * DbPrintLog) that was written with a non-ANSI-based default charset. + */ + private static Charset logCharset = null; + static { + final String charsetName = System.getProperty("je.logCharset"); + if (charsetName != null && charsetName.length() > 0) { + try { + logCharset = Charset.forName(charsetName); + } catch (RuntimeException e) { + e.printStackTrace(); + } + } + } + + /** + * Marshall a long into the next 4 bytes in this buffer. Necessary when the + * long is used to hold an unsigned int. + */ + public static void writeUnsignedInt(ByteBuffer buf, long value) { + buf.put((byte) (value >>> 0)); + buf.put((byte) (value >>> 8)); + buf.put((byte) (value >>> 16)); + buf.put((byte) (value >>> 24)); + } + + /** + * Unmarshall the next four bytes which hold an unsigned int into a long. + */ + public static long readUnsignedInt(ByteBuffer buf) { + long ret = (buf.get() & 0xFFL) << 0; + ret += (buf.get() & 0xFFL) << 8; + ret += (buf.get() & 0xFFL) << 16; + ret += (buf.get() & 0xFFL) << 24; + return ret; + } + + /* + * Marshall objects. + */ + + /** + * Write a short into the log. + */ + public static void writeShort(ByteBuffer logBuf, short i) { + byte b = (byte) ((i >> 0) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 8) & 0xff); + logBuf.put(b); + } + + /** + * Read a short from the log. + */ + public static short readShort(ByteBuffer logBuf) { + return (short) (((logBuf.get() & 0xFF) << 0) + + ((logBuf.get() & 0xFF) << 8)); + } + + /** + * Read an int from the log in either packed or unpacked format. + */ + public static int readInt(ByteBuffer logBuf, boolean unpacked) { + if (unpacked) { + return readInt(logBuf); + } else { + return readPackedInt(logBuf); + } + } + + /** + * Write an int into the log. + */ + public static void writeInt(ByteBuffer logBuf, int i) { + byte b = (byte) ((i >> 0) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 8) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 16) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 24) & 0xff); + logBuf.put(b); + } + + /** + * Read a int from the log. + */ + public static int readInt(ByteBuffer logBuf) { + int ret = (logBuf.get() & 0xFF) << 0; + ret += (logBuf.get() & 0xFF) << 8; + ret += (logBuf.get() & 0xFF) << 16; + ret += (logBuf.get() & 0xFF) << 24; + return ret; + } + + /** + * @return log storage size for an int. + */ + public static int getIntLogSize() { + return INT_BYTES; + } + + /** + * Write a packed int into the log. + */ + public static void writePackedInt(ByteBuffer logBuf, int i) { + int off = logBuf.arrayOffset(); + int newPos = + PackedInteger.writeInt(logBuf.array(), + logBuf.position() + off, i); + logBuf.position(newPos - off); + } + + /** + * Read a packed int from the log. + */ + public static int readPackedInt(ByteBuffer logBuf) { + byte a[] = logBuf.array(); + int oldPos = logBuf.position(); + int off = logBuf.arrayOffset() + oldPos; + int len = PackedInteger.getReadIntLength(a, off); + int val = PackedInteger.readInt(a, off); + logBuf.position(oldPos + len); + return val; + } + + /** + * @return log storage size for a packed int. + */ + public static int getPackedIntLogSize(int i) { + return PackedInteger.getWriteIntLength(i); + } + + /** + * Write an int into the log in MSB order. Used for ordered keys. + */ + public static void writeIntMSB(ByteBuffer logBuf, int i) { + byte b = (byte) ((i >> 24) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 16) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 8) & 0xff); + logBuf.put(b); + b = (byte) ((i >> 0) & 0xff); + logBuf.put(b); + } + + /** + * Read a int from the log in MSB order. Used for ordered keys. + */ + public static int readIntMSB(ByteBuffer logBuf) { + int ret = (logBuf.get() & 0xFF) << 24; + ret += (logBuf.get() & 0xFF) << 16; + ret += (logBuf.get() & 0xFF) << 8; + ret += (logBuf.get() & 0xFF) << 0; + return ret; + } + + /** + * Write a long into the log. + */ + public static void writeLong(ByteBuffer logBuf, long l) { + byte b =(byte) (l >>> 0); + logBuf.put(b); + b =(byte) (l >>> 8); + logBuf.put(b); + b =(byte) (l >>> 16); + logBuf.put(b); + b =(byte) (l >>> 24); + logBuf.put(b); + b =(byte) (l >>> 32); + logBuf.put(b); + b =(byte) (l >>> 40); + logBuf.put(b); + b =(byte) (l >>> 48); + logBuf.put(b); + b =(byte) (l >>> 56); + logBuf.put(b); + } + + /** + * Read an int from the log in either packed or unpacked format. + */ + public static long readLong(ByteBuffer logBuf, boolean unpacked) { + if (unpacked) { + return readLong(logBuf); + } else { + return readPackedLong(logBuf); + } + } + + /** + * Read a long from the log. + */ + public static long readLong(ByteBuffer logBuf) { + long ret = (logBuf.get() & 0xFFL) << 0; + ret += (logBuf.get() & 0xFFL) << 8; + ret += (logBuf.get() & 0xFFL) << 16; + ret += (logBuf.get() & 0xFFL) << 24; + ret += (logBuf.get() & 0xFFL) << 32; + ret += (logBuf.get() & 0xFFL) << 40; + ret += (logBuf.get() & 0xFFL) << 48; + ret += (logBuf.get() & 0xFFL) << 56; + return ret; + } + + /** + * @return log storage size for a long. + */ + public static int getLongLogSize() { + return LONG_BYTES; + } + + /** + * Write a packed long into the log. + */ + public static void writePackedLong(ByteBuffer logBuf, long l) { + int off = logBuf.arrayOffset(); + int newPos = + PackedInteger.writeLong(logBuf.array(), + logBuf.position() + off, l); + logBuf.position(newPos - off); + } + + /** + * Read a packed long from the log. + */ + public static long readPackedLong(ByteBuffer logBuf) { + byte a[] = logBuf.array(); + int oldPos = logBuf.position(); + int off = logBuf.arrayOffset() + oldPos; + int len = PackedInteger.getReadLongLength(a, off); + long val = PackedInteger.readLong(a, off); + logBuf.position(oldPos + len); + return val; + } + + /** + * @return log storage size for a packed long. + */ + public static int getPackedLongLogSize(long l) { + return PackedInteger.getWriteLongLength(l); + } + + /** + * Write a byte array into the log. The size is stored first as an integer. + */ + public static void writeByteArray(ByteBuffer logBuf, byte[] b) { + + if (b == null) { + writePackedInt(logBuf, -1); + return; + } + + /* Write the length. */ + writePackedInt(logBuf, b.length); + + /* Add the data itself. */ + logBuf.put(b); // data + } + + /** + * Read a byte array from the log. The size is stored first as an integer. + */ + public static byte[] readByteArray(ByteBuffer logBuf, boolean unpacked) { + int size = readInt(logBuf, unpacked); + if (DEBUG) { + System.out.println("pos = " + logBuf.position() + + " byteArray is " + size + " on read"); + } + + if (size < 0) { + return null; + } + + if (size == 0) { + return ZERO_LENGTH_BYTE_ARRAY; + } + + byte[] b = new byte[size]; + logBuf.get(b); // read it out + return b; + } + + /** + * @return log storage size for a byteArray + */ + public static int getByteArrayLogSize(byte[] b) { + if (b == null) { + return LogUtils.getPackedIntLogSize(-1); + } else { + int len = b.length; + return LogUtils.getPackedIntLogSize(len) + len; + } + } + + /** + * Write a byte array into the log. No size is stored. + */ + public static void writeBytesNoLength(ByteBuffer logBuf, byte[] b) { + + /* Add the data itself. */ + logBuf.put(b); + } + + /** + * Read a byte array from the log. The size is not stored. + */ + public static byte[] readBytesNoLength(ByteBuffer logBuf, int size) { + if (DEBUG) { + System.out.println("pos = " + logBuf.position() + + " byteArray is " + size + " on read"); + } + + if (size == 0) { + return ZERO_LENGTH_BYTE_ARRAY; + } + + byte[] b = new byte[size]; + logBuf.get(b); // read it out + return b; + } + + /** + * Write a string into the log. The size is stored first as an integer. + */ + public static void writeString(ByteBuffer logBuf, + String stringVal) { + writeByteArray(logBuf, StringUtils.toUTF8(stringVal)); + } + + /** + * Read a string from the log. The size is stored first as an integer. + */ + public static String readString(ByteBuffer logBuf, + boolean unpacked, + int entryVersion) { + final byte[] bytes = readByteArray(logBuf, unpacked); + + /* + * Use logCharset only prior to version 9, since in version 9 + * UTF8 is always used. See logCharset for details. + */ + if (entryVersion >= 9) { + return StringUtils.fromUTF8(bytes); + } + if (logCharset != null) { + return new String(bytes, logCharset); + } + return new String(bytes); + } + + /** + * @return log storage size for a string + */ + public static int getStringLogSize(String s) { + return getByteArrayLogSize(StringUtils.toUTF8(s)); + } + + /** + * Write a timestamp into the log. + */ + public static void writeTimestamp(ByteBuffer logBuf, Timestamp time) { + writePackedLong(logBuf, time.getTime()); + } + + /** + * Read a timestamp from the log. + */ + public static Timestamp readTimestamp(ByteBuffer logBuf, + boolean unpacked) { + long millis = readLong(logBuf, unpacked); + return new Timestamp(millis); + } + + /** + * @return log storage size for a timestamp + */ + public static int getTimestampLogSize(Timestamp time) { + return PackedInteger.getWriteLongLength(time.getTime()); + } + + /** + * Write a boolean into the log. + */ + public static void writeBoolean(ByteBuffer logBuf, boolean bool) { + byte val = bool ? (byte) 1 : (byte) 0; + logBuf.put(val); + } + + /** + * Read a boolean from the log. + */ + public static boolean readBoolean(ByteBuffer logBuf) { + byte val = logBuf.get(); + return (val == (byte) 1) ? true : false; + } + + /** + * @return log storage size for a boolean. + */ + public static int getBooleanLogSize() { + return 1; + } + + /* + * Dumping support. + */ + public static boolean dumpBoolean(ByteBuffer itemBuffer, + StringBuilder sb, + String tag) { + sb.append("<"); + sb.append(tag); + sb.append(" exists = \""); + boolean exists = readBoolean(itemBuffer); + sb.append(exists); + if (exists) { + sb.append("\">"); + } else { + /* Close off the tag, we're done. */ + sb.append("\"/>"); + } + return exists; + } + + /** + * The byte[]'s in Xid's are known to be 255 or less in length. So instead + * of using read/writeByteArray(), we can save 6 bytes per record by making + * the byte[] length be 1 byte instead of 4. + */ + public static int getXidSize(Xid xid) { + byte[] gid = xid.getGlobalTransactionId(); + byte[] bqual = xid.getBranchQualifier(); + return + INT_BYTES + // FormatId + 1 + // gxid length byte + 1 + // bqual length byte + (gid == null ? 0 : gid.length) + // gid bytes + (bqual == null ? 0 : bqual.length); // bqual bytes + } + + /* + * Xid.gid[] and bqual[] can't be longer than 64 bytes so we can get away + * with writing the length in one byte, rather than 4. + */ + public static void writeXid(ByteBuffer logBuf, Xid xid) { + byte[] gid = xid.getGlobalTransactionId(); + byte[] bqual = xid.getBranchQualifier(); + + writeInt(logBuf, xid.getFormatId()); + + if (gid == null) { + logBuf.put((byte) -1); + } else { + logBuf.put((byte) (gid.length)); + logBuf.put(gid); + } + + if (bqual == null) { + logBuf.put((byte) -1); + } else { + logBuf.put((byte) (bqual.length)); + logBuf.put(bqual); + } + } + + /* + * Xid.gid[] and bqual[] can't be longer than 64 bytes so we can get away + * with writing the length in one byte, rather than 4. + */ + public static Xid readXid(ByteBuffer logBuf) { + int formatId = readInt(logBuf); + + int gidLen = logBuf.get(); + byte[] gid = null; + if (gidLen >= 0) { + gid = new byte[gidLen]; + logBuf.get(gid); + } + + int bqualLen = logBuf.get(); + byte[] bqual = null; + if (bqualLen >= 0) { + bqual = new byte[bqualLen]; + logBuf.get(bqual); + } + + return new XidImpl(formatId, gid, bqual); + } + + public static class XidImpl implements Xid { + private int formatId; + private byte[] gid; + private byte[] bqual; + + /* public for unit tests. */ + public XidImpl(int formatId, byte[] gid, byte[] bqual) { + this.formatId = formatId; + this.gid = gid; + this.bqual = bqual; + } + + public int getFormatId() { + return formatId; + } + + public byte[] getGlobalTransactionId() { + return gid; + } + + public byte[] getBranchQualifier() { + return bqual; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof XidImpl)) { + return false; + } + + XidImpl xid = (XidImpl) o; + if (xid.getFormatId() != formatId) { + return false; + } + if (compareByteArrays(xid.getGlobalTransactionId(), gid) && + compareByteArrays(xid.getBranchQualifier(), bqual)) { + return true; + } + + return false; + } + + @Override + public int hashCode() { + int code = formatId; + if (gid != null) { + for (int i = 0; i < gid.length; i++) { + code += gid[i]; + } + } + if (bqual != null) { + for (int i = 0; i < bqual.length; i++) { + code += bqual[i]; + } + } + return code; + } + + private boolean compareByteArrays(byte[] b1, byte[] b2) { + if (b1 == null || + b2 == null) { + return b1 == b2; + } + + if (b1.length != b2.length) { + return false; + } + + for (int i = 0; i < b1.length; i++) { + if (b1[i] != b2[i]) { + return false; + } + } + + return true; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + } +} diff --git a/src/com/sleepycat/je/log/Loggable.java b/src/com/sleepycat/je/log/Loggable.java new file mode 100644 index 0000000..673825d --- /dev/null +++ b/src/com/sleepycat/je/log/Loggable.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +/** + * A class that implements Loggable knows how to read and write itself into + * a ByteBuffer in a format suitable for the JE log or JE replication + * messages. + * + *

        Classes that implement {@code Loggable} and are included in replication + * data should implement {@code VersionedWriteLoggable}. + */ +public interface Loggable { + + /* + * Writing to a byte buffer + */ + + /** + * @return number of bytes used to store this object. + */ + public int getLogSize(); + + /** + * Serialize this object into the buffer. + * @param logBuffer is the destination buffer + */ + public void writeToLog(ByteBuffer logBuffer); + + /* + * Reading from a byte buffer + */ + + /** + * Initialize this object from the data in itemBuf. + * @param itemBuffer the source buffer + * @param entryVersion the log version of the data + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion); + + /** + * Write the object into the string buffer for log dumping. Each object + * should be dumped without indentation or new lines and should be valid + * XML. + * @param sb destination string buffer + * @param verbose if true, dump the full, verbose version + */ + public void dumpLog(StringBuilder sb, boolean verbose); + + /** + * @return the transaction id embedded within this loggable object. Objects + * that have no transaction id should return 0. + */ + public long getTransactionId(); + + /** + * @return true if these two loggable items are logically the same. + * Used for replication testing. + */ + public boolean logicalEquals(Loggable other); +} diff --git a/src/com/sleepycat/je/log/PrintFileReader.java b/src/com/sleepycat/je/log/PrintFileReader.java new file mode 100644 index 0000000..2962a78 --- /dev/null +++ b/src/com/sleepycat/je/log/PrintFileReader.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; + +/** + * The PrintFileReader prints out the target log entries. + */ +public class PrintFileReader extends DumpFileReader { + + /** + * Create this reader to start at a given LSN. + */ + public PrintFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn, + String entryTypes, + String dbIds, + String txnIds, + boolean verbose, + boolean repEntriesOnly, + boolean forwards) + throws DatabaseException { + + super(env, + readBufferSize, + startLsn, + finishLsn, + endOfFileLsn, + entryTypes, + dbIds, + txnIds, + verbose, + repEntriesOnly, + forwards); + } + + /** + * This reader prints the log entry item. + */ + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + /* Figure out what kind of log entry this is */ + LogEntryType type = + LogEntryType.findType(currentEntryHeader.getType()); + + /* Read the entry. */ + LogEntry entry = type.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + /* Match according to command line args. */ + if (!matchEntry(entry)) { + return true; + } + + /* Dump it. */ + StringBuilder sb = new StringBuilder(); + sb.append(""); + entry.dumpEntry(sb, verbose); + sb.append(""); + System.out.println(sb.toString()); + + return true; + } +} diff --git a/src/com/sleepycat/je/log/Provisional.java b/src/com/sleepycat/je/log/Provisional.java new file mode 100644 index 0000000..903f630 --- /dev/null +++ b/src/com/sleepycat/je/log/Provisional.java @@ -0,0 +1,298 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.utilint.DbLsn; + +/** + * Specifies whether to log an entry provisionally. + * + * Provisional log entries: + * + * What are provisional log entries? + * + * Provisional log entries are those tagged with the provisional attribute + * in the log entry header. The provisional attribute can be applied to any + * type of log entry, and is implemented in + * com.sleepycat.je.log.LogEntryHeader as two stolen bits in the 8 bit + * version field. + * + * When is the provisional attribute used? + * + * The provisional attribute is used only during recovery. It very simply + * indicates that recovery will ignore and skip over this log entry. + * + * When is the provisional attribute set? + * + * The provisional attribute started out as a way to create atomicity among + * different log entries. Child pointers in the JE Btree are physical LSNs, + * so each Btree node's children must be logged before it in the log. On + * the other hand, one fundamental assumption of the JE log is that each + * Btree node found in the log can be replayed and placed in the in-memory + * tree. To do so, each Btree node must have a parent node that will house + * it. The grouping of multiple log entries into one atomic group is often + * used to fulfiil this requirement. + * + * * Atomic log entries: + * + * + When a btree is first created, we bootstrap tree creation by + * logging the first BIN provisionally, then creating a parent IN + * which is the Btree root IN, which points to this first BIN. + * + * + When we split a Btree node, we create a new IN, which is the + * sibling of the split node. We log the old sibling and the new + * sibling provisionally, and then log the parent, so that any + * crashes in the middle of this triumvirate which result in the + * failure to log the parent will skip over the orphaned siblings. + * + * + Splitting the Btree root is just a special case of a split. + * + * + Creating a duplicate subtree to hang in the middle of a btree is + * just a special case of a split and btree first creation. + * + * * Entries not meant to be recovered + * + * Temp DBs are not meant to be recovered and we log their LN + * and IN nodes in a very lax fashion, purely as a way of evicting + * them out of the cache temporarily. There is no guarantee that a + * consistent set has been logged to disk. We skip over them for both + * recovery performance and the "each-node-must-have-a-parent" rule. + * + * * Durable deferred-write entries + * + * Deferred-write INs are logged provisionally for the same reasons + * as for temp DBs (above): for recovery performance and the + * "each-node-must-have-a-parent" rule. + * + * Deferred-write LNs are logged non-provisionally to support + * obsolete LSN counting. It would be nice to log them provisionally + * for recovery performance and to allow LN deletion without logging; + * however, that is not currently practical if obsolete counting is + * to be supported. See [#16864]. + * + * * Checkpoint performance + * + * When we flush a series of nodes, it's a waste to replay nodes + * which are referenced by higher levels. For example, if we + * checkpoint this btree: + * + * INA -> INb -> BINc (dirty)-> LNd + * + * we log them in this order: + * + * BINc + * INb + * + * And there's no point to replaying BINc, because it's referenced by + * INb. We skip over BINc, which we do by logging it provisionally. + * + * In addition, BEFORE_CKPT_END is used to improve cleaner + * performance by keeping utilization information up-to-date during + * the checkpoint. See below for details. + * + * * Log cleaning - removing references to deleted files. + * + * When we delete a file for log cleaning we guarantee that no active log + * entries refer to any log entry in the deleted file. Suppose our + * checkpoint looks like this: + * + * 5/100 LNa + * 5/200 Ckpt start + * 5/300 INs + * ... + * 5/500 Ckpt end + * ... + * 5/800 last entry in log + * + * Because we do not delete a file until the Ckpt end after processing + * (cleaning) it, nothing from 5/500 to 5/800 can refer to a file deleted + * due to the Ckpt end in 5/500. + * + * BEFORE_CKPT_END is motivated in part (see below for a complete + * description) by the fact that while log entries between 5/100 + * (first active lsn) and 5/500 (ckpt end) will not in of themselves + * contain a LSN for a cleaned, deleted file, the act of processing them + * during recovery could require fetching a node from a deleted file. For + * example, the IN at 5/300 could have an in-memory parent which has a + * reference to an older, cleaned version of that IN. Treating the span + * between 5/200 and 5/500 as provisional is both optimal, because only + * the high level INs need to be processed, and required, in order not to + * fetch from a cleaned file. + * + * The correctness issue is described in [#16037] comment 151, where we + * attempted to log non-provisionally below maxFlushLevel. It is + * repeated below. + * + * IN-A + * \ + * IN-B + * \ + * IN-C + * \ + * BIN-D + * + * 1/100 CkptStart + * 1/200 BIN-D provisional + * 1/300 IN-C non-provisional + * 2/100 IN-B non-provisional + * 2/200 IN-A non-provisional + * 2/300 MapLN refers to IN-A + * 2/400 CkptEnd + * 5/100 cleaner processes file 1 + * BIN-D and IN-C are dirty + * 5/200 CkptStart + * 5/300 BIN-D provisional + * 5/400 IN-C non-provisional + * 5/500 IN-B non-provisional (must log one extra level) + * IN-A is not logged + * MapLN still refers to IN-A at 2/200 + * 5/600 CkptEnd + * file 1 is deleted + * 6/100 Start recovery + * + * Note that only the bottom level BINs are logged provisionally because + * we're logging level 2 and up non-provisionally in this experiment. + * + * Recovery replays IN-C at 5/400 because it is non-provisional. + * + * When it does the tree lookup (getParentINForChildIN) it uses the root + * IN-A at 2/200. This search fetches IN-B at 2/100 and then fails + * fetching IN-C at 1/300 because file 1 has been deleted. + * + * In reality we log provisionally below maxFlushLevel, so that IN-C at + * 5/400 is not replayed. IN-B at 5/500 is at the maxFlushLevel and is + * non-provisional and is replayed. The search succeeds because nothing + * in file 1 needs to be fetched to find the parent. + * + * TODO: Could we instead replay INs in reverse order? + * Then IN-B at 5/500 would be replayed first. Unfortunately this would + * probably break something else. For example, the utilization tracking + * replay for INs currently depends on reading forward. This is worth + * exploring, however, since reducing logging during checkpoints would be + * extremely beneficial. + * + * Provisional.BEFORE_CKPT_END + * --------------------------- + * This property was added to solve a specific problem that occurs in earlier + * versions of JE: When a long checkpoint runs, the BINs are not counted + * obsolete until after the entire BIN level has been logged. Specifically, + * they are counted obsolete when their ancestor is logged non-provisionally. + * Most INs logged by a checkpoint are BINs. This means that during a very + * long checkpoint, cleaning of the files containing those old BINs is delayed, + * and more importantly the calculated utilization is much higher than it + * actually is. The correction in utilization does not occur until the end of + * the checkpoint, when the higher level INs are logged. This manifests as a + * lull in cleaning during the checkpoint, because calculated utilization is + * artificially high, and a spike in cleaning at the end of the checkpoint. In + * some cases, the cleaner cannot recover from the backlog that is created by + * the spike. + * + * The provisional property effects obsolete counting as follows: + * + * + If an IN is logged with Provisional.YES, the old version of the IN is not + * counted obsolete immediately. Instead, the offset of the old version of + * the IN is added to a list in its parent IN. The offsets migrate upward + * in the tree in this manner until an ancestor IN is logged + * non-provisionally. + * + * + If an IN is logged with Provisional.NO or BEFORE_CKPT_END, the old + * version of the IN is counted obsolete immediately (and offsets + * accumulated from provisional child INs are counted). This means + * that the obsolete offset is added to the UtilizationTracker, and may be + * flushed in a FileSummaryLN any time after that. At the latest, it is + * flushed at the end of the checkpoint. + * + * Because subtree logging is now used for checkpoints and the parent IN of + * each logged sub-tree is logged with BEFORE_CKPT_END, the prior version of + * all INs in the sub-tree will be counted obsolete at that time. This keeps + * the calculated utilization accurate throughout the checkpoint, and prevents + * the large per-checkpoint lull and spike in log cleaning. + * + * For the intermediate levels, Provisional.BEFORE_CKPT_END must be used rather + * than Provisional.NO, which is reserved for the highest level only. During + * recovery, the Provisional values are treated as follows (this is from the + * Provisional javadoc): + * + NO: The entry is non-provisional and is always processed by recovery. + * + YES: The entry is provisional and is never processed by recovery. + * + BEFORE_CKPT_END: The entry is provisional (not processed by recovery) if + * it occurs before the CkptEnd in the recovery interval, or is + * non-provisional (is processed) if it occurs after CkptEnd. + * + * The key to BEFORE_CKPT_END is that it is treated as provisional if a CkptEnd + * is logged, i.e., if we do not crash before completing the checkpoint. + * Because the checkpoint completed, we may have deleted log files that + * would be necessary to replay the IN. So we cannot safely replay it. + * + * Note the difference in the treatment of BEFORE_CKPT_END for obsolete + * counting and recovery: + * + For obsolete counting, BEFORE_CKPT_END is treated as non-provisional. + * + For recovery, when the IN occurs before CkptEnd, BEFORE_CKPT_END is + * treated as provisional. + * This difference is the reason for the existence of BEFORE_CKPT_END. + * + * TODO: Improvement to tracking of obsolete data. + * When we checkpoint INs, why can't we always count the previous version + * obsolete immediately, irrespective of whether it is logged provisionally? + * The previous version file can't be deleted until after a complete + * checkpoint. If we do not complete the next checkpoint, recovery will + * replay the INs logged with BEFORE_CKPT_END. So the previous version will be + * obsolete. This would avoid storing a list of obsolete child LSNs in each + * parent IN, and would make the utilization summary more up-to-date. The + * motivation for sub-tree logging was to keep the utilization info up-to-date, + * so we may be able to remove sub-tree logging as well. Additionally, I think + * we can remove BEFORE_CKPT_END and log provisionally (YES) instead, because + * recovery will replay the actions that dirtied the INs, and the ckpt at the + * end of recovery will flush the dirty nodes, making the previous version + * obsolete; however, this would duplicate the provisional INs, so perhaps it + * is best to continue to use BEFORE_CKPT_END. + */ +public enum Provisional { + + /** + * The entry is non-provisional and is always processed by recovery. + */ + NO, + + /** + * The entry is provisional and is never processed by recovery. + */ + YES, + + /** + * The entry is provisional (not processed by recovery) if it occurs before + * the CkptEnd in the recovery interval, or is non-provisional (is + * processed) if it occurs after CkptEnd. + */ + BEFORE_CKPT_END; + + /** + * Determines whether a given log entry should be processed during + * recovery. + */ + public boolean isProvisional(long logEntryLsn, long ckptEndLsn) { + assert logEntryLsn != DbLsn.NULL_LSN; + switch (this) { + case NO: + return false; + case YES: + return true; + case BEFORE_CKPT_END: + return ckptEndLsn != DbLsn.NULL_LSN && + DbLsn.compareTo(logEntryLsn, ckptEndLsn) < 0; + default: + assert false; + return false; + } + } +} diff --git a/src/com/sleepycat/je/log/ReplicationContext.java b/src/com/sleepycat/je/log/ReplicationContext.java new file mode 100644 index 0000000..c63644b --- /dev/null +++ b/src/com/sleepycat/je/log/ReplicationContext.java @@ -0,0 +1,138 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.log.entry.DbOperationType; +import com.sleepycat.je.utilint.VLSN; + +/** + * ReplicationContext provides context about high-level operations so that the + * logging level can determine which replication related actions are required + * for a given Loggable item. + * + * Those lower level actions are: + * - does a log entry need to be logged with a VLSN generated by this + * (master) node? + * - does the log entry need to be logged with the VLSN which accompanied a + * replication message? + * - do we need to wait for PERM acks after logging an entry? + * - do we need to record the client VLSN that was just written to the log? + * + * ReplicationContext subclasses may hold additional information about the + * logical operation which instigated logging, so that this can be added + * to the log entry. + * + * All LogEntryType(s) have a "replicationPossible" attribute. For example, + * INs will never be replicated, but LN_TX's may or may not be replicated, + * depending on whether the owning database is replicated. + * + * If a LogEntryType will never be replicated, it should be logged with + * the static ReplicationContext.NO_REPLICATE instance. + * If replication is possible, the replication context may be: + * - one allocated for this operation, as the result of client apply + * - the static instance MASTER, if this node is the replication master + * - the static instance NO_REPLICATE, if this is a local operation + * + */ +public class ReplicationContext { + + /* + * Convenience static instance used when you know this operation is + * executing on a replication master node. + */ + public static final ReplicationContext MASTER = + new ReplicationContext(true /* inReplicationStream */); + + /* + * Convenience static instance used when you know this operation will not + * be replicated, either because it's executing on a non-replicated node, + * it's a local operation for a local database, it's a read only operation, + * or because this loggable item is the type that is never replicated. + */ + public static final ReplicationContext NO_REPLICATE = + new ReplicationContext(false /* inReplicationStream */); + + /* + * If true, this Loggable item is part of the replication stream, and + * needs to be logged with a VLSN. + */ + private final boolean inReplicationStream; + + /* + * The VLSN value passed in from a replication message directed at + * this replication client. + */ + private final VLSN clientVLSN; + + protected ReplicationContext(boolean inReplicationStream) { + this.inReplicationStream = inReplicationStream; + clientVLSN = null; + } + + /** + * Used to pass the VLSN held in an arriving message down to the logging + * levels. + */ + public ReplicationContext(VLSN clientVLSN) { + this.inReplicationStream = true; + this.clientVLSN = clientVLSN; + } + + /** + * Used to pass the VLSN held in a migrated LN down to the logging levels. + */ + public ReplicationContext(VLSN clientVLSN, boolean inReplicationStream) { + this.clientVLSN = clientVLSN; + this.inReplicationStream = inReplicationStream; + } + + /** + * @return the VLSN that arrived in the replication message which + * instigated this Loggable item. + */ + public VLSN getClientVLSN() { + return clientVLSN; + } + + /** + * @return true if this loggable item is part of the replication stream + */ + public boolean inReplicationStream() { + return inReplicationStream; + } + + /** + * @return true if this node is the master, and should + * generate a VLSN for this log entry + */ + public boolean mustGenerateVLSN() { + return (inReplicationStream && (clientVLSN == null)); + } + + /** + * @return the type of database operation in progress. For the default + * case, we return DbOperationType.NONE. + */ + public DbOperationType getDbOperationType() { + return DbOperationType.NONE; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("inRepStream=").append(inReplicationStream); + sb.append(" clientVLSN=").append(clientVLSN); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/log/RestoreMarker.java b/src/com/sleepycat/je/log/RestoreMarker.java new file mode 100644 index 0000000..2393a57 --- /dev/null +++ b/src/com/sleepycat/je/log/RestoreMarker.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.log; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Properties; + +import com.sleepycat.je.log.entry.FileHeaderEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.log.entry.SingleItemEntry; + +/** + * A RestoreMarker is a file that indicates that a normal recovery is not + * possible, because the log files are physically or semantically inconsistent + * in some way. + * + * One example is an interrupted, incomplete network restore. The network + * restore copies log files from a source to destination node. If it's halted, + * while the destination node may contain files that are readable, + * checksum-able and seem correct, the set as a whole may not have a complete + * and coherent copy of the log. In such a case, recovery should not be run on + * this environment's log until some action is taken to make it consistent. For + * a network restore, this curative action is to restart the copy process, and + * to complete the copy of a set of logs from some node to this node. The + * network restore creates a marker file just before it does any form of change + * to the log that would make the log inconsistent. + * + * The restore marker file is named .jdb, and holds a normal log file + * header and a RestoreRequired log entry. The RestoreRequired entry indicates + * the type of error of the initial cause, and information needed to repair the + * environment. The mechanism depends on the fact that the very first step of + * recovery is to read backwards from the last file in the + * environment. Recovery will start at this file, and will fail when it reads + * the RestoreRequired log entry, throwing an exception that contains + * prescriptive information for how the environment can be repaired. + * + * Note that createMarkerFile() is idempotent and can be safely called multiple + * times. It's done that way to make it simpler for the caller. + * + * The handler that repairs the log must also delete the marker file so future + * recoveries can succeed. + */ +public class RestoreMarker { + + /** + * Internal exception used to distinguish marker file creation from other + * IOErrors. + */ + public static class FileCreationException extends Exception { + FileCreationException(String msg, Throwable cause) { + super(msg, cause); + } + } + + private final LogManager logManager; + private final File lastFile; + + public RestoreMarker(FileManager fileManager, + LogManager logManager) { + + this.logManager = logManager; + String lastFileName = fileManager.getFullFileName(Integer.MAX_VALUE); + lastFile = new File(lastFileName); + } + + public static String getMarkerFileName() { + return FileManager.getFileName(Integer.MAX_VALUE); + } + + /** + * Remove the marker file. Use FileManager.delete so this file works with + * the FileDeletionDetector. + * @throws IOException if the file won't delete. + */ + public void removeMarkerFile(FileManager fileManager) + throws IOException { + + if (lastFile.exists()) { + fileManager.deleteFile(Integer.MAX_VALUE); + } + } + + /** + * Create the restore marker file. + * + * The method may be called repeatedly, but will not re-create the marker + * file if there's already a non-zero length file. + * + * @param failureType the failure type that should be recorded in the + * RestoreRequired log entry. + * @param props will be serialized to store information about how to handle + * the failure type. + * @throws FileCreationException if the marker file can't be created. + */ + public void createMarkerFile(RestoreRequired.FailureType failureType, + Properties props) + throws FileCreationException { + + /* Don't overwrite the file if it already exists. */ + if (lastFile.exists() && lastFile.length() > 0) { + return; + } + + try { + lastFile.createNewFile(); + + /* + * The file will have two log entries: + * - a manufactured file header. Note that the file header usually + * has a previous offset that points to the previous log entry. In + * this case, it's set to 0 because we will never scan backwards + * from this file. + * - a RestoreRequired log entry + */ + FileHeader header = new FileHeader(Integer.MAX_VALUE, 0); + LogEntry headerLogEntry = + new FileHeaderEntry(LogEntryType.LOG_FILE_HEADER, header); + ByteBuffer buf1 = logManager.putIntoBuffer(headerLogEntry, + 0); // prevLogEntryOffset + + RestoreRequired rr = new RestoreRequired(failureType, props); + + LogEntry marker = + SingleItemEntry.create(LogEntryType.LOG_RESTORE_REQUIRED, rr); + ByteBuffer buf2 = logManager.putIntoBuffer(marker, 0); + + try (FileOutputStream stream = new FileOutputStream(lastFile); + FileChannel channel = stream.getChannel()) { + channel.write(buf1); + channel.write(buf2); + } catch (IOException e) { + /* the stream and channel will be closed */ + throw e; + } + } catch (IOException ioe) { + throw new FileCreationException( + "Marker file creation failed for: " + failureType + + " " + ioe.toString(), ioe); + } + } +} diff --git a/src/com/sleepycat/je/log/ScavengerFileReader.java b/src/com/sleepycat/je/log/ScavengerFileReader.java new file mode 100644 index 0000000..c3351ac --- /dev/null +++ b/src/com/sleepycat/je/log/ScavengerFileReader.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.DbLsn; + +/** + * A ScavengerFileReader reads the log backwards. If it encounters a checksum + * error, it goes to the start of that log file and reads forward until it + * encounters a checksum error. It then continues the reading backwards in the + * log. + * + * The caller may set "dumpCorruptedBounds" to true if information about the + * start and finish of the corrupted portion should be displayed on stderr. + * + * The caller is expected to implement processEntryCallback. This method is + * called once for each entry that the ScavengerFileReader finds in the log. + */ +abstract public class ScavengerFileReader extends FileReader { + + /* A Set of the entry type numbers that this FileReader should dump. */ + private Set targetEntryTypes; + + private int readBufferSize; + + /* True if reader should write corrupted boundaries to System.err. */ + private boolean dumpCorruptedBounds; + + /** + * Create this reader to start at a given LSN. + */ + public ScavengerFileReader(EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn) + throws DatabaseException { + + super(env, + readBufferSize, + false, + startLsn, + null, // single file number + endOfFileLsn, + finishLsn); + + this.readBufferSize = readBufferSize; + + /* + * Indicate that a checksum error should not shutdown the whole + * environment. + */ + targetEntryTypes = new HashSet(); + dumpCorruptedBounds = false; + } + + /** + * Set to true if corrupted boundaries should be dumped to stderr. + */ + public void setDumpCorruptedBounds(boolean dumpCorruptedBounds) { + this.dumpCorruptedBounds = dumpCorruptedBounds; + } + + /** + * Tell the reader that we are interested in these kind of entries. + */ + public void setTargetType(LogEntryType type) { + targetEntryTypes.add(Byte.valueOf(type.getTypeNum())); + } + + /* + * For each entry that is selected, just call processEntryCallback. + */ + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + LogEntryType lastEntryType = + LogEntryType.findType(currentEntryHeader.getType()); + LogEntry entry = lastEntryType.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + processEntryCallback(entry, lastEntryType); + return true; + } + + /* + * Method overriden by the caller. Each entry of the types selected + * is passed to this method. + */ + abstract protected void processEntryCallback(LogEntry entry, + LogEntryType entryType) + throws DatabaseException; + + /* + * Read the next entry. If a checksum exception is encountered, attempt + * to find the other side of the corrupted area and try to re-read this + * file. + */ + @Override + public boolean readNextEntry() { + long saveCurrentEntryOffset = currentEntryOffset; + try { + return super.readNextEntryAllowExceptions(); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + resyncReader(DbLsn.makeLsn(window.currentFileNum(), + saveCurrentEntryOffset), + dumpCorruptedBounds); + return super.readNextEntry(); + } + } + + /* + * A checksum error has been encountered. Go to the start of this log file + * and read forward until the lower side of the corrupted area has been + * found. + */ + + /** + * TBW + */ + @Override + protected void handleGapInBackwardsScan(long prevFileNum) { + if (!resyncReader(DbLsn.makeLsn(prevFileNum, DbLsn.MAX_FILE_OFFSET), + false)) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_INTEGRITY, + "Cannot read backward over cleaned file" + + " from " + window.currentFileNum() + + " to " + prevFileNum); + } + } + + protected boolean resyncReader(long nextGoodRecordPostCorruption, + boolean showCorruptedBounds) + throws DatabaseException { + + LastFileReader reader = null; + long tryReadBufferFileNum = + DbLsn.getFileNumber(nextGoodRecordPostCorruption); + + while (tryReadBufferFileNum >= 0) { + try { + reader = + new LastFileReader(envImpl, readBufferSize, + Long.valueOf(tryReadBufferFileNum)); + break; + } catch (ChecksumException e) { + + /* + * We encountered a problem opening this file so skip to an + * earlier file. + */ + tryReadBufferFileNum--; + continue; + } + } + + boolean switchedFiles = tryReadBufferFileNum != + DbLsn.getFileNumber(nextGoodRecordPostCorruption); + + if (!switchedFiles) { + + /* + * Read forward until a checksum fails. This reader will not throw + * an exception if a checksum error is hit -- it will just return + * false. + */ + while (reader.readNextEntry()) { + } + } + + long lastUsedLsn = reader.getLastValidLsn(); + long nextAvailableLsn = reader.getEndOfLog(); + if (showCorruptedBounds) { + System.err.println("A checksum error was found in the log."); + System.err.println + ("Corruption begins at LSN:\n " + + DbLsn.toString(nextAvailableLsn)); + System.err.println + ("Last known good record before corruption is at LSN:\n " + + DbLsn.toString(lastUsedLsn)); + System.err.println + ("Next known good record after corruption is at LSN:\n " + + DbLsn.toString(nextGoodRecordPostCorruption)); + } + + startLsn = lastUsedLsn; + initStartingPosition(nextAvailableLsn, null); + if (switchedFiles) { + currentEntryPrevOffset = 0; + } + /* Indicate resync is permitted so don't throw exception. */ + return true; + } + + /** + * @return true if this reader should process this entry, or just skip + * over it. + */ + @Override + protected boolean isTargetEntry() { + if (currentEntryHeader.isInvisible()) { + + /* + * This log entry is supposed to be effectivly truncated, so we + * know this data is not alive. + */ + return false; + } + + if (targetEntryTypes.size() == 0) { + /* We want to dump all entry types. */ + return true; + } else { + return targetEntryTypes.contains + (Byte.valueOf(currentEntryHeader.getType())); + } + } +} diff --git a/src/com/sleepycat/je/log/SearchFileReader.java b/src/com/sleepycat/je/log/SearchFileReader.java new file mode 100644 index 0000000..4112e20 --- /dev/null +++ b/src/com/sleepycat/je/log/SearchFileReader.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.DbLsn; + +/** + * SearchFileReader searches for the a given entry type. + */ +public class SearchFileReader extends FileReader { + + private LogEntryType targetType; + private LogEntry logEntry; + + /** + * Create this reader to start at a given LSN. + */ + public SearchFileReader(EnvironmentImpl env, + int readBufferSize, + boolean forward, + long startLsn, + long endOfFileLsn, + LogEntryType targetType) + throws DatabaseException { + + super(env, readBufferSize, forward, startLsn, null, + endOfFileLsn, DbLsn.NULL_LSN); + + this.targetType = targetType; + logEntry = targetType.getNewLogEntry(); + } + + /** + * @return true if this is a targeted entry. + */ + @Override + protected boolean isTargetEntry() { + return (targetType.equalsType(currentEntryHeader.getType())); + } + + /** + * This reader instantiate the first object of a given log entry. + */ + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + logEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + return true; + } + + /** + * @return the last object read. + */ + public Object getLastObject() { + return logEntry.getMainItem(); + } +} diff --git a/src/com/sleepycat/je/log/StatsFileReader.java b/src/com/sleepycat/je/log/StatsFileReader.java new file mode 100644 index 0000000..1c41430 --- /dev/null +++ b/src/com/sleepycat/je/log/StatsFileReader.java @@ -0,0 +1,610 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.DbLsn; + +/** + * The StatsFileReader generates stats about the log entries read, such as the + * count of each type of entry, the number of bytes, minimum and maximum sized + * log entry. + */ +public class StatsFileReader extends DumpFileReader { + + private final Map entryInfoMap; + private long totalLogBytes; + private long totalCount; + + /* Keep stats on log composition in terms of ckpt intervals. */ + private final ArrayList ckptList; + private CheckpointCounter ckptCounter; + private long firstLsnRead; + + private long realTotalKeyCount = 0; + private long realTotalKeyBytes = 0; + private long realMinKeyBytes = 0; + private long realMaxKeyBytes = 0; + private long realTotalDataCount = 0; + private long realTotalDataBytes = 0; + private long realMinDataBytes = 0; + private long realMaxDataBytes = 0; + + /** + * Create this reader to start at a given LSN. + */ + public StatsFileReader(EnvironmentImpl envImpl, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn, + String entryTypes, + String dbIds, + String txnIds, + boolean verbose, + boolean repEntriesOnly, + boolean forwards) + throws DatabaseException { + + super(envImpl, readBufferSize, startLsn, finishLsn, endOfFileLsn, + entryTypes, dbIds, txnIds, verbose, repEntriesOnly, forwards); + entryInfoMap = new TreeMap<>(new LogEntryTypeComparator()); + + totalLogBytes = 0; + totalCount = 0; + + ckptCounter = new CheckpointCounter(); + ckptList = new ArrayList(); + if (verbose) { + ckptList.add(ckptCounter); + } + } + + /** + * This reader collects stats about the log entry. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + byte currentType = currentEntryHeader.getType(); + LogEntryType type = LogEntryType.findType(currentType); + LogEntry entry = null; + + if (needMatchEntry()) { + entry = type.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + if (!matchEntry(entry)) { + return true; + } + } + + int itemSize = currentEntryHeader.getItemSize(); + int headerSize = currentEntryHeader.getSize(); + + /* + * Record various stats based on the entry header. + * + * Get the info object for it, if this is the first time it's seen, + * create an info object and insert it. + */ + EntryInfo info = entryInfoMap.get(type); + if (info == null) { + info = new EntryInfo(); + entryInfoMap.put(type, info); + } + + /* Update counts. */ + info.count++; + totalCount++; + if (currentEntryHeader.getProvisional() == Provisional.YES) { + info.provisionalCount++; + } + int size = itemSize + headerSize; + info.totalBytes += size; + info.headerBytes += headerSize; + totalLogBytes += size; + + if ((info.minBytes == 0) || (info.minBytes > size)) { + info.minBytes = size; + } + if (info.maxBytes < size) { + info.maxBytes = size; + } + + if (verbose) { + if (firstLsnRead == DbLsn.NULL_LSN) { + firstLsnRead = getLastLsn(); + } + + if (currentType == LogEntryType.LOG_CKPT_END.getTypeNum()) { + /* Start counting a new interval. */ + ckptCounter.endCkptLsn = getLastLsn(); + ckptCounter = new CheckpointCounter(); + ckptList.add(ckptCounter); + } else { + ckptCounter.increment(this, currentType); + } + } + + if (type.isUserLNType()) { + /* Read the entry into the ByteBuffer. */ + if (entry == null) { + entry = type.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + } + LNLogEntry lnEntry = (LNLogEntry) entry; + + /* + * The getUnconvertedXxx methods are used because we don't have a + * DatabaseImpl for calling LNLogEntry.postFetchInit, and we can + * tolerate statistics that use the old duplicates format. + */ + int keyLen = lnEntry.getUnconvertedKeyLength(); + + realTotalKeyBytes += keyLen; + realTotalKeyCount += 1; + + if ((realMinKeyBytes == 0) || (realMinKeyBytes > keyLen)) { + realMinKeyBytes = keyLen; + } + if (realMaxKeyBytes < keyLen) { + realMaxKeyBytes = keyLen; + } + + if (!entry.isDeleted()) { + int dataLen = lnEntry.getUnconvertedDataLength(); + + realTotalDataBytes += dataLen; + realTotalDataCount += 1; + + if ((realMinDataBytes == 0) || (realMinDataBytes > dataLen)) { + realMinDataBytes = dataLen; + } + if (realMaxDataBytes < dataLen) { + realMaxDataBytes = dataLen; + } + } + } + + /* + * If we have not read the entry, skip over it. + */ + if (entry == null) { + int nextEntryPosition = entryBuffer.position() + itemSize; + entryBuffer.position(nextEntryPosition); + } + return true; + } + + @Override + public void summarize(boolean csvFormat) { + if (csvFormat) { + summarizeCSV(); + } else { + summarizeText(); + } + } + + class CheckpointInfoTextFormatter { + private NumberFormat form; + + CheckpointInfoTextFormatter() { + } + + CheckpointInfoTextFormatter(NumberFormat form) { + this.form = form; + } + + String format(String value) { + return pad(value); + } + + String format(int value) { + return pad(form.format(value)); + } + + String format(long value) { + return pad(form.format(value)); + } + } + + class CheckpointInfoCSVFormatter + extends CheckpointInfoTextFormatter { + + CheckpointInfoCSVFormatter() { + } + + @Override + String format(String value) { + return value + ","; + } + + @Override + String format(int value) { + return value + ","; + } + + @Override + String format(long value) { + return value + ","; + } + } + + private void summarizeCSV() { + Iterator> iter = + entryInfoMap.entrySet().iterator(); + + NumberFormat form = NumberFormat.getIntegerInstance(); + NumberFormat percentForm = NumberFormat.getInstance(); + percentForm.setMaximumFractionDigits(1); + System.out.println + ("type,total count,provisional count,total bytes," + + "min bytes,max bytes,avg bytes,entries as % of log"); + + while (iter.hasNext()) { + Map.Entry m = iter.next(); + EntryInfo info = m.getValue(); + StringBuilder sb = new StringBuilder(); + LogEntryType entryType = m.getKey(); + sb.append(entryType.toString()).append(','); + sb.append(info.count).append(','); + sb.append(info.provisionalCount).append(','); + sb.append(info.totalBytes).append(','); + sb.append(info.minBytes).append(','); + sb.append(info.maxBytes).append(','); + sb.append(info.totalBytes / info.count).append(','); + double entryPercent = + ((double) (info.totalBytes * 100) / totalLogBytes); + sb.append(entryPercent); + System.out.println(sb.toString()); + } + + /* Print special line for key/data */ + StringBuilder sb = new StringBuilder(); + sb.append("key bytes,"); + sb.append(realTotalKeyCount).append(','); + sb.append(","); + sb.append(realTotalKeyBytes).append(','); + sb.append(realMinKeyBytes).append(','); + sb.append(realMaxKeyBytes).append(','); + sb.append(realTotalKeyBytes / realTotalKeyCount).append(','); + sb.append(((double) (realTotalKeyBytes * 100) / + totalLogBytes)); + System.out.println(sb.toString()); + + sb = new StringBuilder(); + sb.append("data bytes,"); + sb.append(realTotalDataCount).append(','); + sb.append(","); + sb.append(realTotalDataBytes).append(','); + sb.append(realMinDataBytes).append(','); + sb.append(realMaxDataBytes).append(','); + sb.append(realTotalDataBytes / realTotalDataCount).append(','); + sb.append((double) (realTotalDataBytes * 100) / + totalLogBytes); + System.out.println(sb.toString()); + + System.out.println("\nTotal bytes in portion of log read: " + + form.format(totalLogBytes)); + System.out.println("Total number of entries: " + + form.format(totalCount)); + + if (verbose) { + summarizeCheckpointInfo(new CheckpointInfoCSVFormatter()); + } + } + + private void summarizeText() { + System.out.println("Log statistics:"); + Iterator> iter = + entryInfoMap.entrySet().iterator(); + + NumberFormat form = NumberFormat.getIntegerInstance(); + NumberFormat percentForm = NumberFormat.getInstance(); + percentForm.setMaximumFractionDigits(1); + System.out.println(pad("type") + + pad("total") + + pad("provisional") + + pad("total") + + pad("min") + + pad("max") + + pad("avg") + + pad("entries")); + + System.out.println(pad("") + + pad("count") + + pad("count") + + pad("bytes") + + pad("bytes") + + pad("bytes") + + pad("bytes") + + pad("as % of log")); + + while (iter.hasNext()) { + Map.Entry m = iter.next(); + EntryInfo info = m.getValue(); + StringBuilder sb = new StringBuilder(); + LogEntryType entryType = m.getKey(); + sb.append(pad(entryType.toString())); + sb.append(pad(form.format(info.count))); + sb.append(pad(form.format(info.provisionalCount))); + sb.append(pad(form.format(info.totalBytes))); + sb.append(pad(form.format(info.minBytes))); + sb.append(pad(form.format(info.maxBytes))); + sb.append(pad(form.format(info.totalBytes / info.count))); + double entryPercent = + ((double) (info.totalBytes * 100) / totalLogBytes); + sb.append(pad(percentForm.format(entryPercent))); + System.out.println(sb.toString()); + } + + /* Print special line for key/data */ + StringBuilder sb = new StringBuilder(); + sb.append(pad("key bytes")); + sb.append(pad(form.format(realTotalKeyCount))); + sb.append(pad("")); + sb.append(pad(form.format(realTotalKeyBytes))); + sb.append(pad(form.format(realMinKeyBytes))); + sb.append(pad(form.format(realMaxKeyBytes))); + long keySize = (realTotalKeyCount == 0) ? 0 : + (realTotalKeyBytes / realTotalKeyCount); + double keyPct = (totalLogBytes == 0) ? 0 : + (((double) (realTotalKeyBytes * 100)) / totalLogBytes); + sb.append(pad(form.format(keySize))); + String realSize = "(" + percentForm.format(keyPct) + ")"; + sb.append(pad(realSize)); + System.out.println(sb.toString()); + + sb = new StringBuilder(); + sb.append(pad("data bytes")); + sb.append(pad(form.format(realTotalDataCount))); + sb.append(pad("")); + sb.append(pad(form.format(realTotalDataBytes))); + sb.append(pad(form.format(realMinDataBytes))); + sb.append(pad(form.format(realMaxDataBytes))); + long dataSize = (realTotalDataCount == 0) ? 0 : + (realTotalDataBytes / realTotalDataCount); + double dataPct = (totalLogBytes == 0) ? 0 : + (((double) (realTotalDataBytes * 100))) / totalLogBytes; + sb.append(pad(form.format(dataSize))); + realSize = "(" + percentForm.format(dataPct) + ")"; + sb.append(pad(realSize)); + System.out.println(sb.toString()); + + System.out.println("\nTotal bytes in portion of log read: " + + form.format(totalLogBytes)); + System.out.println("Total number of entries: " + + form.format(totalCount)); + + if (verbose) { + summarizeCheckpointInfo(new CheckpointInfoTextFormatter(form)); + } + } + + private String pad(String result) { + int spaces = 20 - result.length(); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < spaces; i++) { + sb.append(" "); + } + sb.append(result); + return sb.toString(); + } + + private void summarizeCheckpointInfo(CheckpointInfoTextFormatter f) { + System.out.println("\nPer checkpoint interval info:"); + + /* + * Print out checkpoint interval info. + * If the log looks like this: + * + * start of log + * ckpt1 start + * ckpt1 end + * ckpt2 start + * ckpt2 end + * end of log + * + * There are 3 ckpt intervals + * start of log->ckpt1 end + * ckpt1 end -> ckpt2 end + * ckpt2 end -> end of log + */ + System.out.println + (f.format("lnTxn") + + f.format("ln") + + f.format("mapLNTxn") + + f.format("mapLN") + + f.format("end to end") + // ckpt n-1 end -> ckpt n end + f.format("end to start") +// ckpt n-1 end -> ckpt n start + f.format("start to end") +// ckpt n start -> ckpt n end + f.format("maxLNReplay") + + f.format("ckptEnd")); + + long logFileMax = + envImpl.getConfigManager().getLong(EnvironmentParams.LOG_FILE_MAX); + + Iterator iter = ckptList.iterator(); + CheckpointCounter prevCounter = null; + while (iter.hasNext()) { + CheckpointCounter c = iter.next(); + StringBuilder sb = new StringBuilder(); + + /* Entry type counts. */ + int maxTxnLNs = c.preStartLNTxnCount + c.postStartLNTxnCount; + sb.append(f.format(maxTxnLNs)); + int maxLNs = c.preStartLNCount + c.postStartLNCount; + sb.append(f.format(maxLNs)); + sb.append(f.format(c.preStartMapLNTxnCount + + c.postStartMapLNTxnCount)); + sb.append(f.format(c.preStartMapLNCount + + c.postStartMapLNCount)); + + /* Checkpoint interval distance. */ + long end = (c.endCkptLsn == DbLsn.NULL_LSN) ? + getLastLsn() : + c.endCkptLsn; + long endToEndDistance = 0; + + FileManager fileMgr = envImpl.getFileManager(); + if (prevCounter == null) { + endToEndDistance = DbLsn.getWithCleaningDistance( + end, firstLsnRead, logFileMax, fileMgr); + } else { + endToEndDistance = DbLsn.getWithCleaningDistance( + end, prevCounter.endCkptLsn, logFileMax, fileMgr); + } + sb.append(f.format(endToEndDistance)); + + /* + * Interval between last checkpoint end and this checkpoint start. + */ + long start = (c.startCkptLsn == DbLsn.NULL_LSN) ? getLastLsn() : + c.startCkptLsn; + long endToStartDistance = 0; + + if (prevCounter == null) { + endToStartDistance = DbLsn.getWithCleaningDistance( + start, firstLsnRead, logFileMax, fileMgr); + } else { + endToStartDistance = DbLsn.getWithCleaningDistance( + start, prevCounter.endCkptLsn, logFileMax, fileMgr); + } + sb.append(f.format(endToStartDistance)); + + /* + * Interval between ckpt start and ckpt end. + */ + long startToEndDistance = 0; + if ((c.startCkptLsn != DbLsn.NULL_LSN) && + (c.endCkptLsn != DbLsn.NULL_LSN)) { + startToEndDistance = DbLsn.getWithCleaningDistance( + c.endCkptLsn, c.startCkptLsn, logFileMax, fileMgr); + } + sb.append(f.format(startToEndDistance)); + + /* + * The maximum number of LNs to replay includes the portion of LNs + * from checkpoint start to checkpoint end of the previous + * interval. + */ + int maxReplay = maxLNs + maxTxnLNs; + if (prevCounter != null) { + maxReplay += prevCounter.postStartLNTxnCount; + maxReplay += prevCounter.postStartLNCount; + } + sb.append(f.format(maxReplay)); + + if (c.endCkptLsn == DbLsn.NULL_LSN) { + sb.append(" ").append(DbLsn.getNoFormatString(getLastLsn())); + } else { + sb.append(" ").append(DbLsn.getNoFormatString(c.endCkptLsn)); + } + + System.out.println(sb.toString()); + prevCounter = c; + } + } + + static class EntryInfo { + public int count; + public int provisionalCount; + public long totalBytes; + public int headerBytes; + public int minBytes; + public int maxBytes; + + EntryInfo() { + count = 0; + provisionalCount = 0; + totalBytes = 0; + headerBytes = 0; + minBytes = 0; + maxBytes = 0; + } + } + + static class LogEntryTypeComparator implements Comparator { + public int compare(LogEntryType o1, LogEntryType o2) { + if (o1 == null) { + return -1; + } + + if (o2 == null) { + return 1; + } + + Byte t1 = Byte.valueOf(o1.getTypeNum()); + Byte t2 = Byte.valueOf(o2.getTypeNum()); + return t1.compareTo(t2); + } + } + + /* + * Accumulate the count of items from checkpoint end->checkpoint end. + */ + static class CheckpointCounter { + public long startCkptLsn = DbLsn.NULL_LSN; + public long endCkptLsn = DbLsn.NULL_LSN; + public int preStartLNTxnCount; + public int preStartLNCount; + public int preStartMapLNTxnCount; + public int preStartMapLNCount; + public int postStartLNTxnCount; + public int postStartLNCount; + public int postStartMapLNTxnCount; + public int postStartMapLNCount; + + public void increment(FileReader reader, byte currentEntryTypeNum) { + LogEntryType entryType = + LogEntryType.findType(currentEntryTypeNum); + + if (entryType == LogEntryType.LOG_CKPT_START) { + startCkptLsn = reader.getLastLsn(); + } else if (entryType.isUserLNType()) { + if (entryType.isTransactional()) { + if (startCkptLsn == DbLsn.NULL_LSN) { + preStartLNTxnCount++; + } else { + postStartLNTxnCount++; + } + } else { + if (startCkptLsn == DbLsn.NULL_LSN) { + preStartLNCount++; + } else { + postStartLNCount++; + } + } + } else if (entryType == LogEntryType.LOG_MAPLN) { + if (startCkptLsn == DbLsn.NULL_LSN) { + preStartMapLNCount++; + } else { + postStartMapLNCount++; + } + } + } + } +} diff --git a/src/com/sleepycat/je/log/Trace.java b/src/com/sleepycat/je/log/Trace.java new file mode 100644 index 0000000..a00b1a7 --- /dev/null +++ b/src/com/sleepycat/je/log/Trace.java @@ -0,0 +1,196 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.Calendar; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; + +/** + * Trace logs event tracing messages into .jdb files. Only critical messages + * that should always be included in a log should use this functionality. + */ +public class Trace extends BasicVersionedWriteLoggable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /* Contents of a debug message. */ + private Timestamp time; + private String msg; + + /** Create a new debug record. */ + public Trace(String msg) { + this.time = getCurrentTimestamp(); + this.msg = msg; + } + + /** Create a trace record that will be filled in from the log. */ + public Trace() { + } + + /** + * @return message part of trace record. + */ + public String getMessage() { + return msg; + } + + /* Generate a timestamp for the current time. */ + private Timestamp getCurrentTimestamp() { + Calendar cal = Calendar.getInstance(); + + return new Timestamp(cal.getTime().getTime()); + } + + /* Check to see if this Environment supports writing. */ + private static boolean isWritePermitted(EnvironmentImpl envImpl) { + if (envImpl == null || + envImpl.isReadOnly() || + envImpl.mayNotWrite() || + envImpl.isDbLoggingDisabled()) { + return false; + } + + return true; + } + + /** Convenience method to create a log entry containing this trace msg. */ + public static void trace(EnvironmentImpl envImpl, String message) { + trace(envImpl, new Trace(message)); + } + + /** Trace a trace object, unit tests only. */ + public static long trace(EnvironmentImpl envImpl, Trace traceMsg) { + if (isWritePermitted(envImpl)) { + return envImpl.getLogManager().log( + new TraceLogEntry(traceMsg), + ReplicationContext.NO_REPLICATE); + } + + return DbLsn.NULL_LSN; + } + + /** + * Convenience method to create a log entry (lazily) containing this trace + * msg. Lazy tracing is used when tracing is desired, but the .jdb files + * are not initialized. + */ + public static void traceLazily(EnvironmentImpl envImpl, + String message) { + if (isWritePermitted(envImpl)) { + envImpl.getLogManager().logLazily( + new TraceLogEntry(new Trace(message)), + ReplicationContext.NO_REPLICATE); + } + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return (LogUtils.getTimestampLogSize(time) + + LogUtils.getStringLogSize(msg)); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + LogUtils.writeTimestamp(logBuffer, time); + LogUtils.writeString(logBuffer, msg); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + time = LogUtils.readTimestamp(itemBuffer, unpacked); + msg = LogUtils.readString(itemBuffer, unpacked, entryVersion); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(""); + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof Trace)) + return false; + + return msg.equals(((Trace) other).msg); + } + + @Override + public String toString() { + return (time + "/" + msg); + } + + /** + * Just in case it's ever used as a hash key. + */ + @Override + public int hashCode() { + return toString().hashCode(); + } + + @Override + public boolean equals(Object obj) { + /* Same instance? */ + if (this == obj) { + return true; + } + + /* Is it another Trace? */ + if (!(obj instanceof Trace)) { + return false; + } + + /* + * We could compare all the fields individually, but since they're all + * placed in our toString() method, we can just compare the String + * version of each offer. + */ + return (toString().equals(obj.toString())); + } +} diff --git a/src/com/sleepycat/je/log/UtilizationFileReader.java b/src/com/sleepycat/je/log/UtilizationFileReader.java new file mode 100644 index 0000000..22f7f66 --- /dev/null +++ b/src/com/sleepycat/je/log/UtilizationFileReader.java @@ -0,0 +1,349 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.cleaner.FileSummary; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.OldBINDeltaLogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Summarizes the utilized and unutilized portion of each log file by examining + * each log entry. Does not use the Cleaner UtilizationProfile information in + * order to provide a second measure against which to evaluation the + * UtilizationProfile accuracy. + * + * Limitations + * =========== + * BIN-deltas are all considered obsolete, as an implementation short cut and + * for efficiency. 90% (by default) of deltas are obsolete anyway, and it + * would be expensive to fetch the parent BIN to find the lookup key. + * + * Assumes that any currently open transactions will be committed. For + * example, if a deletion or update has been performed but not yet committed, + * the old record will be considered obsolete. Perhaps this behavior could be + * changed in the future by attempting to lock a record (non-blocking) and + * considering a locked record to be non-obsolete; this might make it match + * live utilization counting more closely. + * + * Accesses the Btree, using JE cache memory if necessary and contending with + * other accessors, to check whether an entry is active. + * + * Historical note: This implementation, which uses the Btree to determine + * whether a node is active, replaced an earlier implementation that attempted + * to duplicate the Btree in memory and read the entire log. This older + * implementation had inaccuracies and was less efficient. With the new + * implementation it is also possible to calculation utilization for a range of + * LSNs, reading only that portion of the log. [#22208] + */ +public class UtilizationFileReader extends FileReader { + + /* Long file -> FileSummary */ + private final Map summaries; + + /* Cache of DB ID -> DatabaseImpl for reading live databases. */ + private final Map dbCache; + private final DbTree dbTree; + + private UtilizationFileReader(EnvironmentImpl envImpl, + int readBufferSize, + long startLsn, + long finishLsn) + throws DatabaseException { + + super(envImpl, + readBufferSize, + true, // read forward + startLsn, + null, // single file number + DbLsn.NULL_LSN, // end of file LSN + finishLsn); + + summaries = new HashMap(); + dbCache = new HashMap(); + dbTree = envImpl.getDbTree(); + } + + @Override + protected boolean isTargetEntry() { + + /* + * UtilizationTracker is supposed to mimic the UtilizationProfile. + * Accordingly it does not count the file header or invisible log + * entries because those entries are not covered by the U.P. + */ + return ((currentEntryHeader.getType() != + LogEntryType.LOG_FILE_HEADER.getTypeNum()) && + !currentEntryHeader.isInvisible()); + } + + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + final LogEntryType lastEntryType = + LogEntryType.findType(currentEntryHeader.getType()); + final LogEntry entry = lastEntryType.getNewLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + ExtendedFileSummary summary = + (ExtendedFileSummary) summaries.get(window.currentFileNum()); + if (summary == null) { + summary = new ExtendedFileSummary(); + summaries.put(window.currentFileNum(), summary); + } + + final int size = getLastEntrySize(); + + summary.totalCount += 1; + summary.totalSize += size; + + if (entry instanceof LNLogEntry) { + final LNLogEntry lnEntry = (LNLogEntry) entry; + final DatabaseImpl dbImpl = getActiveDb(lnEntry.getDbId()); + final boolean isActive = (dbImpl != null) && + !lnEntry.isImmediatelyObsolete(dbImpl) && + isLNActive(lnEntry, dbImpl); + applyLN(summary, size, isActive); + } else if (entry instanceof BINDeltaLogEntry || + entry instanceof OldBINDeltaLogEntry) { + /* Count Delta as IN. */ + summary.totalINCount += 1; + summary.totalINSize += size; + /* Most deltas are obsolete, so count them all obsolete. */ + summary.obsoleteINCount += 1; + summary.recalcObsoleteINSize += size; + } else if (entry instanceof INLogEntry) { + final INLogEntry inEntry = (INLogEntry) entry; + final DatabaseImpl dbImpl = getActiveDb(inEntry.getDbId()); + final boolean isActive = dbImpl != null && + isINActive(inEntry, dbImpl); + applyIN(summary, size, isActive); + } + + return true; + } + + private DatabaseImpl getActiveDb(DatabaseId dbId) { + final DatabaseImpl dbImpl = + dbTree.getDb(dbId, -1 /*timeout*/, dbCache); + if (dbImpl == null) { + return null; + } + if (dbImpl.isDeleteFinished()) { + return null; + } + return dbImpl; + } + + /** + * Mimics lookup in com.sleepycat.je.cleaner.FileProcessor.processLN. + */ + private boolean isLNActive(LNLogEntry lnEntry, DatabaseImpl dbImpl) { + lnEntry.postFetchInit(dbImpl); + final byte[] key = lnEntry.getKey(); + final Tree tree = dbImpl.getTree(); + final TreeLocation location = new TreeLocation(); + + final boolean parentFound = tree.getParentBINForChildLN( + location, key, false /*splitsAllowed*/, + false /*blindDeltaOps*/, CacheMode.DEFAULT); + + final BIN bin = location.bin; + + try { + if (!parentFound || bin.isEntryKnownDeleted(location.index)) { + return false; + } + final int index = location.index; + final long treeLsn = bin.getLsn(index); + if (treeLsn == DbLsn.NULL_LSN) { + return false; + } + final long logLsn = getLastLsn(); + return treeLsn == logLsn; + + } finally { + if (bin != null) { + bin.releaseLatch(); + } + } + } + + /** + * Mimics lookup in com.sleepycat.je.cleaner.FileProcessor.processIN. + */ + private boolean isINActive(INLogEntry inEntry, DatabaseImpl dbImpl) { + + final long logLsn = getLastLsn(); + final IN logIn = inEntry.getIN(dbImpl); + logIn.setDatabase(dbImpl); + final Tree tree = dbImpl.getTree(); + if (logIn.isRoot()) { + return logLsn == tree.getRootLsn(); + } + + logIn.latch(CacheMode.DEFAULT); + + final SearchResult result = tree.getParentINForChildIN( + logIn, true, /*useTargetLevel*/ + true, /*doFetch*/ CacheMode.DEFAULT); + + if (!result.exactParentFound) { + return false; + } + try { + long treeLsn = result.parent.getLsn(result.index); + + if (treeLsn == DbLsn.NULL_LSN) { + return false; + } + + if (treeLsn == logLsn) { + return true; + } + + if (!logIn.isBIN()) { + return false; + } + + /* The treeLsn may refer to a BIN-delta. */ + final IN treeIn = + result.parent.fetchIN(result.index, CacheMode.DEFAULT); + + treeLsn = treeIn.getLastFullLsn(); + + return treeLsn == logLsn; + } finally { + result.parent.releaseLatch(); + } + } + + private void applyLN(ExtendedFileSummary summary, + int size, + boolean isActive) { + summary.totalLNCount += 1; + summary.totalLNSize += size; + if (!isActive) { + summary.obsoleteLNCount += 1; + summary.recalcObsoleteLNSize += size; + } + } + + private void applyIN(ExtendedFileSummary summary, + int size, + boolean isActive) { + summary.totalINCount += 1; + summary.totalINSize += size; + if (!isActive) { + summary.obsoleteINCount += 1; + summary.recalcObsoleteINSize += size; + } + } + + private void cleanUp() { + dbTree.releaseDbs(dbCache); + } + + /** + * Creates a UtilizationReader, reads the log, and returns the resulting + * Map of Long file number to FileSummary. + */ + public static Map + calcFileSummaryMap(EnvironmentImpl envImpl) { + return calcFileSummaryMap(envImpl, DbLsn.NULL_LSN, DbLsn.NULL_LSN); + } + + public static Map + calcFileSummaryMap(EnvironmentImpl envImpl, + long startLsn, + long finishLsn) { + + final int readBufferSize = envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + final UtilizationFileReader reader = new UtilizationFileReader + (envImpl, readBufferSize, startLsn, finishLsn); + try { + while (reader.readNextEntry()) { + /* All the work is done in processEntry. */ + } + return reader.summaries; + } finally { + reader.cleanUp(); + } + } + + private static class ExtendedFileSummary extends FileSummary { + private int recalcObsoleteINSize; + private int recalcObsoleteLNSize; + + /** + * Overrides the LN size calculation to return the recalculated number + * of obsolete LN bytes. + */ + @Override + public int getObsoleteLNSize() { + return recalcObsoleteLNSize; + } + + /** + * Overrides the IN size calculation to return the recalculated number + * of obsolete IN bytes. + */ + @Override + public int getObsoleteINSize() { + return recalcObsoleteINSize; + } + + /** + * Overrides to add the extended data fields. + */ + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(super.toString()); + buf.append(""); + return buf.toString(); + } + } + + private static class NodeInfo { + ExtendedFileSummary summary; + int size; + long dbId; + } +} diff --git a/src/com/sleepycat/je/log/VLSNDistributionReader.java b/src/com/sleepycat/je/log/VLSNDistributionReader.java new file mode 100644 index 0000000..1dce983 --- /dev/null +++ b/src/com/sleepycat/je/log/VLSNDistributionReader.java @@ -0,0 +1,178 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.VLSN; + +/** + * This is a debugging utility which implements the unadvertised DbPrintLog -vd + * option, which displays VLSN distribution in a log. Here's a sample of the + * output. This is used to analyze log cleaner barrier behavior. + * + * ... 3 files + * file 0xb6 numRepRecords = 9 firstVLSN = 1,093,392 lastVLSN = 1,093,400 + * file 0xb7 numRepRecords = 4 firstVLSN = 1,093,401 lastVLSN = 1,093,404 + * ... 3 files + * file 0xbb numRepRecords = 1 firstVLSN = 1,093,405 lastVLSN = 1,093,405 + * file 0xbc numRepRecords = 1 firstVLSN = 1,093,406 lastVLSN = 1,093,406 + * ... 1 files + * file 0xbe numRepRecords = 1 firstVLSN = 1,093,407 lastVLSN = 1,093,407 + * file 0xbf numRepRecords = 2 firstVLSN = 1,093,408 lastVLSN = 1,093,409 + * file 0xc0 numRepRecords = 7 firstVLSN = 1,093,410 lastVLSN = 1,093,416 + * ... 0 files at end + * First file: 0x0 + * Last file: 0xc0 + */ +public class VLSNDistributionReader extends DumpFileReader { + + private final Map countByFile; + private PerFileInfo info; + private final Long[] allFileNums; + private int fileNumIndex; + + /** + * Create this reader to start at a given LSN. + */ + public VLSNDistributionReader(EnvironmentImpl envImpl, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn, + boolean verbose, + boolean forwards) + throws DatabaseException { + + super(envImpl, readBufferSize, startLsn, finishLsn, endOfFileLsn, + null /* all entryTypes */, + null /* all dbIds */, + null /* all txnIds */, + verbose, + true, /*repEntriesOnly*/ + forwards); + countByFile = new HashMap(); + allFileNums = fileManager.getAllFileNumbers(); + fileNumIndex = 0; + } + + /** + * Count the number of vlsns in the file, along with the first and last + * vlsn. Display this when the log reader moves to a new file. . + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + VLSN currentVLSN = currentEntryHeader.getVLSN(); + long currentFile = window.currentFileNum(); + + if (info == null) { + info = new PerFileInfo(currentFile); + countByFile.put(currentFile, info); + } else if (!info.isFileSame(currentFile)) { + /* + * We've flipped to a new file. We'd like to print the number + * of files between the one targeted by this info to give a sense + * for how many are inbetween. For example, if the log has file + * 4, 5, 6, and only 6 has a vlsn, we should print + * ... 2 files + * file 0x6: ... + */ + info.display(); + + /* Set up a new file. */ + info = new PerFileInfo(currentFile); + countByFile.put(currentFile, info); + } + + info.increment(currentVLSN); + + int nextEntryPosition = + entryBuffer.position() + currentEntryHeader.getItemSize(); + entryBuffer.position(nextEntryPosition); + + return true; + } + + @Override + public void summarize(boolean csvFormat) { + if (info != null) { + info.display(); + } + + System.err.println( "... " + + (allFileNums.length - fileNumIndex) + + " files at end"); + + System.err.println("First file: 0x" + + Long.toHexString(fileManager.getFirstFileNum())); + System.err.println("Last file: 0x" + + Long.toHexString(fileManager.getLastFileNum())); + } + + /** + * Tracks per-file statistics. + */ + private class PerFileInfo { + private final long fileNum; + private VLSN firstVLSNInFile; + private VLSN lastVLSNInFile; + private int count; + + PerFileInfo(long fileNum) { + this.fileNum = fileNum; + } + + public boolean isFileSame(long currentFile) { + return fileNum == currentFile; + } + + void increment(VLSN currentVLSN) { + count++; + if (firstVLSNInFile == null) { + firstVLSNInFile = currentVLSN; + } + lastVLSNInFile = currentVLSN; + } + + @Override + public String toString() { + return "file 0x" + Long.toHexString(fileNum) + + " numRepRecords = " + count + + " firstVLSN = " + firstVLSNInFile + + " lastVLSN = " + lastVLSNInFile; + } + + void display() { + int inbetweenCount = 0; + while (fileNumIndex < allFileNums.length) { + long whichFile = allFileNums[fileNumIndex]; + + if (whichFile > fileNum) { + break; + } + fileNumIndex++; + inbetweenCount++; + } + + if (inbetweenCount > 1) { + System.err.println("... " + (inbetweenCount -1) + " files"); + } + System.err.println(this); + } + } +} diff --git a/src/com/sleepycat/je/log/VersionedWriteLoggable.java b/src/com/sleepycat/je/log/VersionedWriteLoggable.java new file mode 100644 index 0000000..78bcb7b --- /dev/null +++ b/src/com/sleepycat/je/log/VersionedWriteLoggable.java @@ -0,0 +1,105 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; +import java.util.Collection; + +import com.sleepycat.je.log.entry.ReplicableLogEntry; + +/** + * A sub-interface of {@link Loggable} implemented by classes that can write + * themselves to a byte buffer in an earlier log format, for use by instances + * of {@link ReplicableLogEntry} that need to support an earlier log format + * during replication. See [#22336]. + * + *

        Classes that implement {@code Loggable} should implement this interface + * if they are included in replication data. + * + *

        Implementing classes should document the version of the class's most + * recent format change. Log entry classes that contain {@code + * VersionedWriteLoggable} items can use that information to determine if they + * can copy the log contents for an entry directly or if they need to convert + * them in order to be compatible with a particular log version. + */ +public interface VersionedWriteLoggable extends Loggable { + + /** + * Returns the log version of the most recent format change for this + * loggable item. + * + * @return the log version of the most recent format change + * + * @see ReplicableLogEntry#getLastFormatChange() + */ + int getLastFormatChange(); + + /** + * @see ReplicableLogEntry#getEmbeddedLoggables() + */ + Collection getEmbeddedLoggables(); + + /** + * Returns the number of bytes needed to store this object in the format + * for the specified log version. Earlier log versions only need to be + * supported for log entries with format changes made in {@link + * LogEntryType#LOG_VERSION_REPLICATE_OLDER} or greater. + * + * @param logVersion the log version + * @param forReplication whether the entry will be sent over the wire, + * and not written to the log. + * @return the number of bytes to store this object for the log version + */ + int getLogSize(int logVersion, boolean forReplication); + + /** + * Serializes this object into the specified buffer in the format for the + * specified log version. Earlier log versions only need to be + * supported for log entries with format changes made in {@link + * LogEntryType#LOG_VERSION_REPLICATE_OLDER} or greater. + * + * @param logBuffer the destination buffer + * @param logVersion the log version + * @param forReplication whether the entry will be sent over the wire, + * and not written to the log. + */ + void writeToLog(ByteBuffer logBuffer, + int logVersion, + boolean forReplication); + + /** + * Returns whether this format has a variant that is optimized for + * replication. + */ + boolean hasReplicationFormat(); + + /** + * Returns whether it is worthwhile to materialize and then re-serialize a + * log entry in a format optimized for replication. Implementations should + * attempt to check efficiently, without instantiating the log entry + * object. Some implementations will simply return false. + * + *

        WARNING: The logBuffer position must not be changed by this method. + * + *

        WARNING: The shared LogEntry object is used for calling this method, + * and this method must not change any of the fields in the object. + * + * @param logBuffer contains the entry that would be re-serialized. + * @param srcVersion the log version of entry in logBuffer. + * @param destVersion the version that would be used for re-serialization. + */ + boolean isReplicationFormatWorthwhile(ByteBuffer logBuffer, + int srcVersion, + int destVersion); +} diff --git a/src/com/sleepycat/je/log/WholeEntry.java b/src/com/sleepycat/je/log/WholeEntry.java new file mode 100644 index 0000000..bbd0f3e --- /dev/null +++ b/src/com/sleepycat/je/log/WholeEntry.java @@ -0,0 +1,38 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import com.sleepycat.je.log.entry.LogEntry; + +/** + * This class packages the log entry header and the log entry "contents" + * together for the use of components that need information from both parts. + */ +public class WholeEntry { + private final LogEntryHeader header; + private final LogEntry entry; + + WholeEntry(LogEntryHeader header, LogEntry entry) { + this.header = header; + this.entry = entry; + } + + public LogEntryHeader getHeader() { + return header; + } + + public LogEntry getEntry() { + return entry; + } +} diff --git a/src/com/sleepycat/je/log/entry/AbortLogEntry.java b/src/com/sleepycat/je/log/entry/AbortLogEntry.java new file mode 100644 index 0000000..6e786d6 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/AbortLogEntry.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.txn.TxnAbort; + +/** + * Log entry for a transaction abort. + */ +public class AbortLogEntry extends SingleItemReplicableEntry { + + /** + * The log version number of the most recent change for this log entry, + * including any changes to the format of the underlying {@link TxnAbort} + * object. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 13; + + /** Construct a log entry for reading a {@link TxnAbort} object. */ + public AbortLogEntry() { + super(TxnAbort.class); + } + + /** Construct a log entry for writing a {@link TxnAbort} object. */ + public AbortLogEntry(final TxnAbort abort) { + super(LogEntryType.LOG_TXN_ABORT, abort); + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } +} diff --git a/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java b/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java new file mode 100644 index 0000000..760fd17 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/BINDeltaLogEntry.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; + +/** + * Holds a partial BIN that serves as a live BIN delta. + * + * A live delta (unlike a the obsolete OldBINDelta, which is contained in an + * OldBINDeltaLogEntry) may appear in the Btree to serve as an incomplete BIN. + */ +public class BINDeltaLogEntry extends INLogEntry { + + public BINDeltaLogEntry(Class logClass) { + super(logClass); + } + + /** + * When constructing an entry for writing to the log, use LOG_BIN_DELTA. + */ + public BINDeltaLogEntry(BIN bin) { + super(bin, true /*isBINDelta*/); + } + + /** + * Used to write a pre-serialized log entry. + */ + public BINDeltaLogEntry(final ByteBuffer bytes, + final long lastFullLsn, + final long lastDeltaLsn, + final LogEntryType logEntryType, + final IN parent) { + super(bytes, lastFullLsn, lastDeltaLsn, logEntryType, parent); + } + + /* + * Whether this LogEntry reads/writes a BIN-Delta logrec. + */ + @Override + public boolean isBINDelta() { + return true; + } +} diff --git a/src/com/sleepycat/je/log/entry/BaseEntry.java b/src/com/sleepycat/je/log/entry/BaseEntry.java new file mode 100644 index 0000000..deb63ba --- /dev/null +++ b/src/com/sleepycat/je/log/entry/BaseEntry.java @@ -0,0 +1,177 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.VLSN; + +/** + * A Log entry allows you to read, write and dump a database log entry. Each + * entry may be made up of one or more loggable items. + * + * The log entry on disk consists of + * a. a log header defined by LogManager + * b. a VLSN, if this entry type requires it, and replication is on. + * c. the specific contents of the log entry. + * + * This class encompasses (b and c). + * + * @param the type of the loggable items in this entry + */ +abstract class BaseEntry implements LogEntry { + + /* + * These fields are transient and are not persisted to the log + */ + + /* + * Constructor used to create log entries when reading. + */ + private final Constructor noArgsConstructor; + + /* + * Attributes of the entry type may be used to conditionalizing the reading + * and writing of the entry. + */ + LogEntryType entryType; + + /** + * Constructor to read an entry. The logEntryType must be set later, + * through setLogType(). + * + * @param logClass the class for the contained loggable item or items + */ + BaseEntry(Class logClass) { + noArgsConstructor = getNoArgsConstructor(logClass); + } + + static Constructor getNoArgsConstructor( + final Class logClass) { + try { + return logClass.getConstructor((Class[]) null); + } catch (SecurityException | NoSuchMethodException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * @return a new instance of the class used to create the log entry. + */ + T newInstanceOfType() { + return newInstanceOfType(noArgsConstructor); + } + + static T newInstanceOfType( + final Constructor noArgsConstructor) { + try { + return noArgsConstructor.newInstance((Object[]) null); + } catch (IllegalAccessException | InstantiationException | + IllegalArgumentException | InvocationTargetException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Constructor to write an entry. + */ + BaseEntry() { + noArgsConstructor = null; + } + + /** + * Returns the class of the contained loggable item or items, or null if + * the instance was created to write an entry. + * + * @return the loggable class or null + */ + public Class getLogClass() { + return (noArgsConstructor != null) ? + noArgsConstructor.getDeclaringClass() : + null; + } + + /** + * Inform a BaseEntry instance of its corresponding LogEntryType. + */ + @Override + public void setLogType(LogEntryType entryType) { + this.entryType = entryType; + } + + @Override + public LogEntryType getLogType() { + return entryType; + } + + /** + * By default, this log entry is complete and does not require fetching + * additional entries. This method is overridden by BINDeltaLogEntry. + */ + @Override + public Object getResolvedItem(DatabaseImpl dbImpl) { + return getMainItem(); + } + + @Override + public boolean isImmediatelyObsolete(DatabaseImpl dbImpl) { + return false; + } + + @Override + public boolean isDeleted() { + return false; + } + + /** + * Do any processing we need to do after logging, while under the logging + * latch. + * @throws DatabaseException from subclasses. + */ + @Override + public void postLogWork(@SuppressWarnings("unused") LogEntryHeader header, + @SuppressWarnings("unused") long justLoggedLsn, + @SuppressWarnings("unused") VLSN vlsn) { + + /* by default, do nothing. */ + } + + public void postFetchInit(@SuppressWarnings("unused") + DatabaseImpl dbImpl) { + } + + @Override + public LogEntry clone() { + + try { + return (LogEntry) super.clone(); + } catch (CloneNotSupportedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpEntry(sb, true); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/log/entry/BaseReplicableEntry.java b/src/com/sleepycat/je/log/entry/BaseReplicableEntry.java new file mode 100644 index 0000000..9b4f38e --- /dev/null +++ b/src/com/sleepycat/je/log/entry/BaseReplicableEntry.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * A basic implementation of a replicable log entry that provides for writing + * in a single format by default. Starting with log version 9, as specified by + * {@link LogEntryType#LOG_VERSION_REPLICATE_OLDER}, entry classes whose log + * format has changed since the previous log version will need to override the + * {@link ReplicableLogEntry#getLastFormatChange}, {@link #getSize(int, + * boolean)} and {@link #writeEntry(ByteBuffer, int, boolean)} methods to + * support writing the entry in the previous log format. + * + * @param the type of the loggable items in this entry + */ +abstract class BaseReplicableEntry + extends BaseEntry + implements ReplicableLogEntry { + + /** + * Creates an instance of this class for reading a log entry. + * + * @param logClass the class of the contained loggable item or items + * @see BaseEntry#BaseEntry(Class) + */ + BaseReplicableEntry(final Class logClass) { + super(logClass); + } + + /** + * Creates an instance of this class for writing a log entry. + */ + BaseReplicableEntry() { + } + + @Override + public void writeEntry(final ByteBuffer destBuffer) { + writeEntry( + destBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public int getSize() { + return getSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } +} diff --git a/src/com/sleepycat/je/log/entry/CommitLogEntry.java b/src/com/sleepycat/je/log/entry/CommitLogEntry.java new file mode 100644 index 0000000..4c07150 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/CommitLogEntry.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.txn.TxnCommit; + +/** + * Log entry for a transaction commit. + */ +public class CommitLogEntry extends SingleItemReplicableEntry { + + /** + * The log version number of the most recent change for this log entry, + * including any changes to the format of the underlying {@link TxnCommit} + * object. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 13; + + /** Construct a log entry for reading a {@link TxnCommit} object. */ + public CommitLogEntry() { + super(TxnCommit.class); + } + + /** Construct a log entry for writing a {@link TxnCommit} object. */ + public CommitLogEntry(final TxnCommit commit) { + super(LogEntryType.LOG_TXN_COMMIT, commit); + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } +} diff --git a/src/com/sleepycat/je/log/entry/DbOperationType.java b/src/com/sleepycat/je/log/entry/DbOperationType.java new file mode 100644 index 0000000..31d1ff1 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/DbOperationType.java @@ -0,0 +1,153 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * DbOperationType is a persistent enum used in NameLNLogEntries. It supports + * replication of database operations by documenting the type of api operation + * which instigated the logging of a NameLN. + */ +public enum DbOperationType implements VersionedWriteLoggable { + + NONE((byte) 0), + CREATE((byte) 1), + REMOVE((byte) 2), + TRUNCATE((byte) 3), + RENAME((byte) 4), + UPDATE_CONFIG((byte) 5); + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + private byte value; + + private DbOperationType(byte value) { + this.value = value; + } + + public static DbOperationType readTypeFromLog(final ByteBuffer entryBuffer, + @SuppressWarnings("unused") + int entryVersion) { + byte opVal = entryBuffer.get(); + switch (opVal) { + case 1: + return CREATE; + + case 2: + return REMOVE; + + case 3: + return TRUNCATE; + + case 4: + return RENAME; + + case 5: + return UPDATE_CONFIG; + + case 0: + default: + return NONE; + + } + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize() { + return getLogSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer) { + writeToLog( + logBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return 1; + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + logBuffer.put(value); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + value = itemBuffer.get(); + } + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + if (!(other instanceof DbOperationType)) + return false; + + return value == ((DbOperationType) other).value; + } + + /** + * Return true if this database operation type needs to write + * DatabaseConfig. + */ + public static boolean isWriteConfigType(DbOperationType opType) { + return (opType == CREATE || opType == UPDATE_CONFIG); + } +} diff --git a/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java b/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java new file mode 100644 index 0000000..f82dfd9 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/DeletedDupLNLogEntry.java @@ -0,0 +1,110 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DupKeyData; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; + +/** + * DupDeletedLNEntry encapsulates a deleted dupe LN entry. This contains all + * the regular transactional LN log entry fields and an extra key, which is the + * nulled out data field of the LN (which becomes the key in the duplicate + * tree. + * + * WARNING: Obsolete in version 8, only used by some log readers. + * + * TODO Move to dupConvert package, after testing is complete. + */ +public class DeletedDupLNLogEntry extends LNLogEntry { + + /* + * Deleted duplicate LN must log an extra key in their log entries, + * because the data field that is the "key" in a dup tree has been + * nulled out because the LN is deleted. + */ + private byte[] dataAsKey; + + /** + * Constructor to read an entry. + */ + public DeletedDupLNLogEntry() { + super(com.sleepycat.je.tree.LN.class); + } + + @Override + byte[] combineDupKeyData() { + return DupKeyData.combine(getKey(), dataAsKey); + } + + /** + * Extends its super class to read in the extra dup key. + */ + @Override + public void readEntry(EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + + readBaseLNEntry(envImpl, header, entryBuffer, + false /*keyIsLastSerializedField*/); + + /* Key */ + int logVersion = header.getVersion(); + dataAsKey = LogUtils.readByteArray(entryBuffer, (logVersion < 6)); + } + + /** + * Extends super class to dump out extra key. + */ + @Override + public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { + super.dumpEntry(sb, verbose); + sb.append(Key.dumpString(dataAsKey, 0)); + return sb; + } + + /* + * Writing support + */ + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } + + @Override + public int getSize(final int logVersion, final boolean forReplication) { + throw EnvironmentFailureException.unexpectedState(); + } + + @Override + public void writeEntry(final ByteBuffer destBuffer, + final int logVersion, + final boolean forReplication) { + throw EnvironmentFailureException.unexpectedState(); + } +} diff --git a/src/com/sleepycat/je/log/entry/EmptyLogEntry.java b/src/com/sleepycat/je/log/entry/EmptyLogEntry.java new file mode 100644 index 0000000..85cd1ee --- /dev/null +++ b/src/com/sleepycat/je/log/entry/EmptyLogEntry.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.Loggable; + +/** + * Contains no information, implying that the LogEntryType is the only + * information needed. + *

        + * A single byte is actually written, but this is only to satisfy non-null + * buffer dependencies in ChecksumValidator and file readers. + */ +public class EmptyLogEntry implements Loggable { + + public EmptyLogEntry() { + } + + public int getLogSize() { + return 1; + } + + public void writeToLog(ByteBuffer logBuffer) { + logBuffer.put((byte) 42); + } + + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + logBuffer.get(); + } + + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + /** + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + public long getTransactionId() { + return 0; + } +} diff --git a/src/com/sleepycat/je/log/entry/FileHeaderEntry.java b/src/com/sleepycat/je/log/entry/FileHeaderEntry.java new file mode 100644 index 0000000..0127ac8 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/FileHeaderEntry.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileHeader; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; + +/** + * Contains a FileHeader entry. + */ +public class FileHeaderEntry extends SingleItemEntry { + + /** + * Construct a log entry for reading. + */ + public FileHeaderEntry(Class logClass) { + super(logClass); + } + + /** + * Construct a log entry for writing. + */ + public FileHeaderEntry(LogEntryType entryType, FileHeader item) { + super(entryType, item); + } + + /** + * For a file header, the version is not available until after reading the + * item. Set the version in the entry header so it can be used by + * FileReaders, etc. [#16939] + */ + @Override + public void readEntry(EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + super.readEntry(envImpl, header, entryBuffer); + FileHeader entry = (FileHeader) getMainItem(); + header.setFileHeaderVersion(entry.getLogVersion()); + } +} diff --git a/src/com/sleepycat/je/log/entry/INContainingEntry.java b/src/com/sleepycat/je/log/entry/INContainingEntry.java new file mode 100644 index 0000000..b830647 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/INContainingEntry.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.tree.IN; + +/** + * An INContainingEntry is a log entry that contains internal nodes. + */ +public interface INContainingEntry { + + /** + * Currently used by recovery only. For an OldBINDeltaEntry it merges + * the delta with the last full BIN and returns the new full BIN. For + * a new BINDeltaLogEntry, it just returns the delta. And for an + * INLogEntry it just returns the (full) IN. + */ + public IN getIN(DatabaseImpl dbImpl) + throws DatabaseException; + + /* + * A quick way to check whether this LogEntry reads/writes a BIN-Delta + * logrec. + */ + public boolean isBINDelta(); + + /** + * @return the database id held within this log entry. + */ + public DatabaseId getDbId(); + + /** + * @return the LSN of the prior full version of this node, or NULL_LSN if + * no prior full version. Used for counting the prior version as obsolete. + * If the offset of the LSN is zero, only the file number is known because + * we read a version 1 log entry. + */ + public long getPrevFullLsn(); + + /** + * @return the LSN of the prior delta version of this node, or NULL_LSN if + * the prior version is a full version. Used for counting the prior + * version as obsolete. If the offset of the LSN is zero, only the file + * number is known because we read a version 1 log entry. + */ + public long getPrevDeltaLsn(); +} diff --git a/src/com/sleepycat/je/log/entry/INLogEntry.java b/src/com/sleepycat/je/log/entry/INLogEntry.java new file mode 100644 index 0000000..7197b8b --- /dev/null +++ b/src/com/sleepycat/je/log/entry/INLogEntry.java @@ -0,0 +1,394 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.DbLsn; + +/** + * - INLogEntry is used to read/write full-version IN logrecs. + * + * - BINDeltaLogEntry subclasses INLogEntry and is used to read/write + * BIN-delta logrecs for log versions 9 or later. + * + * - OldBINDeltaLogEntry is used to read/write BIN-delta logrecs for + * log versions earlier than 9. OldBINDeltaLogEntry is not a subclass + * of INLogEntry. + * + * On disk, a full IN logrec contains: + * + *

        + * (3 <= version < 6)
        + *        IN
        + *        database id
        + *        prevFullLsn  -- in version 2
        + *
        + * (6 <= version < 8)
        + *        database id
        + *        prevFullLsn
        + *        IN
        + *
        + * (8 <= version)
        + *        database id
        + *        prevFullLsn
        + *        prevDeltaLsn
        + *        IN
        + * 
        + * + * On disk, a BIN-delta logrec written via the BINDeltaLogEntry contains: + * + *
        + * (version == 9)
        + *        database id
        + *        prevFullLsn  -- always NULL
        + *        prevDeltaLsn
        + *        BIN (dirty slots only)
        + *        prevFullLsn
        + *
        + * (version >= 10)
        + *        database id
        + *        prevFullLsn
        + *        prevDeltaLsn
        + *        BIN (dirty slots only and including the new fullBinNEntries and
        + *             fullBinMaxEntries fields) 
        + * 
        + * + */ +public class INLogEntry extends BaseEntry + implements LogEntry, INContainingEntry { + + /* + * Persistent fields in an IN entry. + */ + + private DatabaseId dbId; + + /* + * this.in may be a (a) UIN, (b) full BIN, or (c) BIN delta. + * In case (a), "this" is a INLogEntry + * In case (c), "this" is a BINDeltaLogEntry instance. + * In case (b), "this" may be either a INLogEntry or a BINDeltaLogEntry + * instance. It will be a BINDeltaLogEntry instance, if "this" is used + * to log a full in-memory BIN as a BIN-delta. + */ + private T in; + + /** + * If non-null, used to write a pre-serialized log entry. In this case the + * 'in' field is null. + */ + private ByteBuffer inBytes; + + /* + * The lsn of the previous full-version logrec for the same IN. + * + * See comment above about the evolution of this field. + */ + private long prevFullLsn; + + /* + * If this is a BIN logrec and the previous logrec for the same BIN was + * a BIN-delta, prevDeltaLsn is the lsn of that previous logrec. Otherwise, + * prevDeltaLsn is NULL. + * + * See comment above about the evolution of this field. + */ + private long prevDeltaLsn; + + /** + * Construct a log entry for reading. + */ + public static INLogEntry create(final Class INClass) { + return new INLogEntry(INClass); + } + + INLogEntry(Class INClass) { + super(INClass); + } + + /** + * Construct an INLogEntry for writing to the log. + */ + public INLogEntry(T in) { + this(in, false /*isBINDelta*/); + } + + /* + * Used by both INLogEntry and BINDeltaLogEntry for writing to the log. + */ + INLogEntry(T in, boolean isBINDelta) { + + setLogType(isBINDelta ? LogEntryType.LOG_BIN_DELTA : in.getLogType()); + + dbId = in.getDatabase().getId(); + + this.in = in; + inBytes = null; + + prevFullLsn = in.getLastFullLsn(); + prevDeltaLsn = in.getLastDeltaLsn(); + } + + /** + * Used to write a pre-serialized log entry. + */ + public INLogEntry(final ByteBuffer bytes, + final long lastFullLsn, + final long lastDeltaLsn, + final LogEntryType logEntryType, + final IN parent) { + + setLogType(logEntryType); + + dbId = parent.getDatabase().getId(); + + in = null; + inBytes = bytes; + + prevFullLsn = lastFullLsn; + prevDeltaLsn = lastDeltaLsn; + } + + /* + * Whether this LogEntry reads/writes a BIN-Delta logrec. + * Overriden by the BINDeltaLogEntry subclass. + */ + @Override + public boolean isBINDelta() { + return false; + } + + @Override + public DatabaseId getDbId() { + return dbId; + } + + @Override + public long getPrevFullLsn() { + return prevFullLsn; + } + + @Override + public long getPrevDeltaLsn() { + return prevDeltaLsn; + } + + @Override + public T getMainItem() { + assert inBytes == null; + + return in; + } + + @Override + public IN getIN(DatabaseImpl dbImpl) { + assert inBytes == null; + + return in; + } + + public long getNodeId() { + assert inBytes == null; + + return in.getNodeId(); + } + + public boolean isPreSerialized() { + return inBytes != null; + } + + /** + * Returns the main item BIN if it has any slots with expiration times. + * Must only be called if this entry's type is BIN or BIN_DELTA. + * + * This method is called for expiration tracking because getMainItem and + * getIN cannot be called on an INLogEntry logging parameter, since it may + * be in pre-serialize form when it appears in the off-heap cache. + */ + public BIN getBINWithExpiration() { + + if (inBytes != null) { + final BIN bin = new BIN(); + if (!bin.mayHaveExpirationValues( + inBytes, LogEntryType.LOG_VERSION)) { + return null; + } + inBytes.mark(); + readMainItem((T) bin, inBytes, LogEntryType.LOG_VERSION); + inBytes.reset(); + return bin.hasExpirationValues() ? bin : null; + } + + assert in.isBIN(); + final BIN bin = (BIN) in; + return bin.hasExpirationValues() ? bin : null; + } + + /* + * Read support + */ + + @Override + public void readEntry( + EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + + assert inBytes == null; + + int logVersion = header.getVersion(); + boolean version6OrLater = (logVersion >= 6); + + if (logVersion < 2) { + throw unexpectedState( + "Attempt to read from log file with version " + + logVersion + ", which is not supported any more"); + } + + if (version6OrLater) { + dbId = new DatabaseId(); + dbId.readFromLog(entryBuffer, logVersion); + + prevFullLsn = LogUtils.readLong(entryBuffer, false/*unpacked*/); + if (logVersion >= 8) { + prevDeltaLsn = LogUtils.readPackedLong(entryBuffer); + } else { + prevDeltaLsn = DbLsn.NULL_LSN; + } + } + + /* Read IN. */ + in = newInstanceOfType(); + readMainItem(in, entryBuffer, logVersion); + + if (!version6OrLater) { + dbId = new DatabaseId(); + dbId.readFromLog(entryBuffer, logVersion); + + prevFullLsn = LogUtils.readLong(entryBuffer, true/*unpacked*/); + prevDeltaLsn = DbLsn.NULL_LSN; + } + } + + private void readMainItem(T in, ByteBuffer entryBuffer, int logVersion) { + + if (isBINDelta()) { + assert(logVersion >= 9); + + in.readFromLog( + entryBuffer, logVersion, true /*deltasOnly*/); + + if (logVersion == 9) { + prevFullLsn = LogUtils.readPackedLong(entryBuffer); + } + + in.setLastFullLsn(prevFullLsn); + + } else { + in.readFromLog(entryBuffer, logVersion); + } + } + + /* + * Writing support + */ + @Override + public int getSize() { + + final int inSize; + + if (inBytes != null) { + inSize = inBytes.remaining(); + } else { + inSize = in.getLogSize(isBINDelta()); + } + + return (inSize + + dbId.getLogSize() + + LogUtils.getPackedLongLogSize(prevFullLsn) + + LogUtils.getPackedLongLogSize(prevDeltaLsn)); + } + + @Override + public void writeEntry(ByteBuffer destBuffer) { + + dbId.writeToLog(destBuffer); + + LogUtils.writePackedLong(destBuffer, prevFullLsn); + LogUtils.writePackedLong(destBuffer, prevDeltaLsn); + + if (inBytes != null) { + final int pos = inBytes.position(); + destBuffer.put(inBytes); + inBytes.position(pos); + } else { + in.writeToLog(destBuffer, isBINDelta()); + } + } + + @Override + public long getTransactionId() { + return 0; + } + + /** + * INs from two different environments are never considered equal, + * because they have lsns that are environment-specific. + */ + @Override + public boolean logicalEquals(@SuppressWarnings("unused") LogEntry other) { + return false; + } + + @Override + public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { + + dbId.dumpLog(sb, verbose); + + if (inBytes != null) { + sb.append(""); + } else { + in.dumpLog(sb, verbose); + } + + if (prevFullLsn != DbLsn.NULL_LSN) { + sb.append(""); + sb.append(DbLsn.getNoFormatString(prevFullLsn)); + sb.append(""); + } + if (prevDeltaLsn != DbLsn.NULL_LSN) { + sb.append(""); + sb.append(DbLsn.getNoFormatString(prevDeltaLsn)); + sb.append(""); + } + return sb; + } + + /** Never replicated. */ + public void dumpRep(@SuppressWarnings("unused") StringBuilder sb) { + } +} diff --git a/src/com/sleepycat/je/log/entry/LNLogEntry.java b/src/com/sleepycat/je/log/entry/LNLogEntry.java new file mode 100644 index 0000000..f1a6f23 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/LNLogEntry.java @@ -0,0 +1,1122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.lang.reflect.Constructor; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DupKeyData; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.VersionedLN; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * An LNLogEntry is the in-memory image of an LN logrec describing a write op + * (insertion, update, or deletion) performed by a locker T on a record R. + * T always locks R in exclusive (WRITE or WRITE_RANGE) mode before performing + * any write ops on it, and it retains its exclusive lock on R until it + * terminates (commits or aborts). (Non-transactional lockers can be viewed as + * "simple" transactions that perform at most one write op, and then + * immediately commit). + * + * On disk, an LN logrec contains : + * + * 1 <= version <= 5 + * + * LN data + * databaseid + * key + * abortLsn -- if transactional + * abortKnownDeleted -- if transactional + * txn id -- if transactional + * prev LSN of same txn -- if transactional + * + * 6 <= versions <= 10 : + * + * databaseid + * abortLsn -- if transactional + * abortKnownDeleted -- if transactional + * txn id -- if transactional + * prev LSN of same txn -- if transactional + * data + * key + * + * 11 == version : + * + * databaseid + * abortLsn -- if transactional + * 1-byte flags + * abortKnownDeleted + * embeddedLN + * haveAbortKey + * haveAbortData + * haveAbortVLSN + * txn id -- if transactional + * prev LSN of same txn -- if transactional + * abort key -- if haveAbortKey + * abort data -- if haveAbortData + * abort vlsn -- if haveAbortVLSN + * data + * key + * + * In forReplication mode, these flags and fields are omitted: + * embeddedLN, haveAbortKey, haveAbortData, haveAbortVLSN, + * abort key, abort data, abort vlsn + * + * 12 <= version : + * + * 1-byte flags + * abortKnownDeleted + * embeddedLN + * haveAbortKey + * haveAbortData + * haveAbortVLSN + * haveAbortLSN + * haveAbortExpiration + * haveExpiration + * databaseid + * abortLsn -- if transactional and haveAbortLSN + * txn id -- if transactional + * prev LSN of same txn -- if transactional + * abort key -- if haveAbortKey + * abort data -- if haveAbortData + * abort vlsn -- if haveAbortVLSN + * abort expiration -- if haveAbortExpiration + * expiration -- if haveExpiration + * data + * key + * + * In forReplication mode, these flags and fields are omitted: + * abortKnownDeleted, embeddedLN, haveAbortKey, haveAbortData, + * haveAbortVLSN, abort key, abort data, abort vlsn + * + * NOTE: LNLogEntry is sub-classed by NameLNLogEntry, which adds some extra + * fields after the record key. + */ +public class LNLogEntry extends BaseReplicableEntry { + + private static final byte ABORT_KD_MASK = 0x1; + private static final byte EMBEDDED_LN_MASK = 0x2; + private static final byte HAVE_ABORT_KEY_MASK = 0x4; + private static final byte HAVE_ABORT_DATA_MASK = 0x8; + private static final byte HAVE_ABORT_VLSN_MASK = 0x10; + private static final byte HAVE_ABORT_LSN_MASK = 0x20; + private static final byte HAVE_ABORT_EXPIRATION_MASK = 0x40; + private static final byte HAVE_EXPIRATION_MASK = (byte) 0x80; + + /** + * Used for computing the minimum log space used by an LNLogEntry. + */ + public static final int MIN_LOG_SIZE = 1 + // Flags + 1 + // DatabaseId + 1 + // LN with zero-length data + LogEntryHeader.MIN_HEADER_SIZE; + + /** + * The log version when the most recent format change for this entry was + * made (including any changes to the format of the underlying LN and other + * loggables). + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 12; + + /* + * Persistent fields. + */ + + /* + * The id of the DB containing the record. + */ + private DatabaseId dbId; + + /* + * The Txn performing the write op. It is null for non-transactional DBs. + * On disk we store only the txn id and the LSN of the previous logrec + * (if any) generated by this txn. + */ + private Txn txn; + + /* + * The LSN of the record's "abort" version, i.e., the version to revert to + * if this logrec must be undone as a result of a txn abort. It is set to + * the most recent version before the record was locked by the locker T + * associated with this logrec. Because T locks R before it writes it, the + * abort version is always a committed version. + * + * It is null for non-transactional lockers, because such lockers never + * abort. + */ + private long abortLsn = DbLsn.NULL_LSN; + + /* + * Whether the record's abort version was a deleted version or not. + */ + private boolean abortKnownDeleted; + + /* + * The key of the record's abort version, if haveAbortKey is true; + * null otherwise. + */ + private byte[] abortKey = null; + + /* + * The data portion of the record's abort version, if haveAbortData is + * true; null otherwise. + */ + private byte[] abortData = null; + + /* + * The VLSN of the record's abort version, if haveAbortVLSN is true; + * NULL_VLSN otherwise. + */ + private long abortVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /* Abort expiration time in days or hours. */ + private int abortExpiration = 0; + private boolean abortExpirationInHours = false; + + /* + * True if the logrec stores an abort LSN, which is the case only if + * (a) this is a transactional logrec (b) the abort LSN is non-null. + */ + private boolean haveAbortLSN; + + /* + * True if the logrec stores an abort key, which is the case only if + * (a) this is a transactional logrec, (b) the record's abort version + * was embedded in the BIN, and (c) the DB allows key updates. + */ + private boolean haveAbortKey; + + /* + * True if the logrec stores abort data, which is the case only if + * (a) this is a transactional logrec and (b) the record's abort + * version was embedded in the BIN. + */ + private boolean haveAbortData; + + /* + * True if the logrec stores an abort VLSN, which is the case only if + * (a) this is a transactional logrec (b) the record's abort version + * was embedded in the BIN, and (c) VLSN caching is enabled. + */ + private boolean haveAbortVLSN; + + /* + * True if the logrec stores an abort expiration, which is the case only if + * (a) this is a transactional logrec (b) the record's abort version has a + * non-zero expiration. + */ + private boolean haveAbortExpiration; + + /* + * True if the logrec stores a non-zero expiration. + */ + private boolean haveExpiration; + + /* + * Whether, after the write op described by this logrec, the record is + * embedded in the BIN or not. + */ + private boolean embeddedLN; + + /* + * The LN storing the record's data, after the write op described by this + * logrec. The ln has a null data value if the write op is a deletion. For + * replicated DBs, the ln contains the record's VLSN as well. + */ + private LN ln; + + /* + * The value of the record's key, after the write op described by this + * logrec. + */ + private byte[] key; + + /* Expiration time in days or hours. */ + private int expiration; + private boolean expirationInHours; + + /* + * Transient fields. + */ + + /* Transient field for duplicates conversion and user key/data methods. */ + enum DupStatus { UNKNOWN, NEED_CONVERSION, DUP_DB, NOT_DUP_DB } + private DupStatus dupStatus; + + /* For construction of VersionedLN, when VLSN is preserved. */ + private final Constructor versionedLNConstructor; + + /** + * Creates an instance to read an entry. + * + * @param the type of the contained LN + * @param cls the class of the contained LN + * @return the log entry + */ + public static LNLogEntry create(final Class cls) { + return new LNLogEntry<>(cls); + } + + /* Constructor to read an entry. */ + LNLogEntry(final Class cls) { + super(cls); + if (cls == LN.class) { + versionedLNConstructor = getNoArgsConstructor(VersionedLN.class); + } else { + versionedLNConstructor = null; + } + } + + /* Constructor to write an entry. */ + public LNLogEntry( + LogEntryType entryType, + DatabaseId dbId, + Txn txn, + long abortLsn, + boolean abortKD, + byte[] abortKey, + byte[] abortData, + long abortVLSN, + int abortExpiration, + boolean abortExpirationInHours, + byte[] key, + T ln, + boolean embeddedLN, + int expiration, + boolean expirationInHours) { + + setLogType(entryType); + this.dbId = dbId; + this.txn = txn; + this.abortLsn = abortLsn; + this.abortKnownDeleted = abortKD; + this.abortKey = abortKey; + this.abortData = abortData; + this.abortVLSN = abortVLSN; + this.abortExpiration = abortExpiration; + this.abortExpirationInHours = abortExpirationInHours; + + this.haveAbortLSN = (abortLsn != DbLsn.NULL_LSN); + this.haveAbortKey = (abortKey != null); + this.haveAbortData = (abortData != null); + this.haveAbortVLSN = !VLSN.isNull(abortVLSN); + this.haveAbortExpiration = (abortExpiration != 0); + this.haveExpiration = (expiration != 0); + + this.embeddedLN = embeddedLN; + this.key = key; + this.ln = ln; + this.expiration = expiration; + this.expirationInHours = expirationInHours; + + versionedLNConstructor = null; + + /* A txn should only be provided for transactional entry types. */ + assert(entryType.isTransactional() == (txn != null)); + } + + private void reset() { + dbId = null; + txn = null; + abortLsn = DbLsn.NULL_LSN; + abortKnownDeleted = false; + abortKey = null; + abortData = null; + abortVLSN = VLSN.NULL_VLSN_SEQUENCE; + abortExpiration = 0; + abortExpirationInHours = false; + + haveAbortLSN = false; + haveAbortKey = false; + haveAbortData = false; + haveAbortVLSN = false; + haveAbortExpiration = false; + haveExpiration = false; + + embeddedLN = false; + key = null; + ln = null; + expiration = 0; + expirationInHours = false; + + dupStatus = null; + } + + @Override + public void readEntry( + EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + + /* Subclasses must call readBaseLNEntry. */ + assert getClass() == LNLogEntry.class; + + /* + * Prior to version 8, the optimization to omit the key size was + * mistakenly not applied to internal LN types such as FileSummaryLN + * and MapLN, and was only applied to user LN types. The optimization + * should be applicable whenever LNLogEntry is not subclassed to add + * additional fields. [#18055] + */ + final boolean keyIsLastSerializedField = + header.getVersion() >= 8 || entryType.isUserLNType(); + + readBaseLNEntry(envImpl, header, entryBuffer, + keyIsLastSerializedField); + } + + /** + * Method shared by LNLogEntry subclasses. + * + * @param keyIsLastSerializedField specifies whether the key length can be + * omitted because the key is the last field. This should be false when + * an LNLogEntry subclass adds fields to the serialized format. + */ + final void readBaseLNEntry( + EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer, + boolean keyIsLastSerializedField) { + + reset(); + + int logVersion = header.getVersion(); + boolean unpacked = (logVersion < 6); + int recStartPosition = entryBuffer.position(); + + if (logVersion >= 12) { + setFlags(entryBuffer.get()); + } + + /* + * For log version 6 and above we store the key last so that we can + * avoid storing the key size. Instead, we derive it from the LN size + * and the total entry size. The DatabaseId is also packed. + */ + if (logVersion < 6) { + /* LN is first for log versions prior to 6. */ + ln = newLNInstance(envImpl); + ln.readFromLog(entryBuffer, logVersion); + } + + /* DatabaseImpl Id. */ + dbId = new DatabaseId(); + dbId.readFromLog(entryBuffer, logVersion); + + /* Key. */ + if (logVersion < 6) { + key = LogUtils.readByteArray(entryBuffer, true/*unpacked*/); + } + + if (entryType.isTransactional()) { + + /* + * AbortLsn. If it was a marker LSN that was used to fill in a + * create, mark it null. + */ + if (haveAbortLSN || logVersion < 12) { + abortLsn = LogUtils.readLong(entryBuffer, unpacked); + if (DbLsn.getFileNumber(abortLsn) == + DbLsn.getFileNumber(DbLsn.NULL_LSN)) { + abortLsn = DbLsn.NULL_LSN; + } + } + + if (logVersion < 12) { + setFlags(entryBuffer.get()); + haveAbortLSN = (abortLsn != DbLsn.NULL_LSN); + } + + /* txn id and prev LSN by same txn. */ + txn = new Txn(); + txn.readFromLog(entryBuffer, logVersion); + + } else if (logVersion == 11) { + setFlags(entryBuffer.get()); + } + + if (logVersion >= 11) { + if (haveAbortKey) { + abortKey = LogUtils.readByteArray(entryBuffer, false); + } + if (haveAbortData) { + abortData = LogUtils.readByteArray(entryBuffer, false); + } + if (haveAbortVLSN) { + abortVLSN = LogUtils.readPackedLong(entryBuffer); + } + } + + if (logVersion >= 12) { + if (haveAbortExpiration) { + abortExpiration = LogUtils.readPackedInt(entryBuffer); + if (abortExpiration < 0) { + abortExpiration = (- abortExpiration); + abortExpirationInHours = true; + } + } + if (haveExpiration) { + expiration = LogUtils.readPackedInt(entryBuffer); + if (expiration < 0) { + expiration = (- expiration); + expirationInHours = true; + } + } + } + + if (logVersion >= 6) { + + ln = newLNInstance(envImpl); + ln.readFromLog(entryBuffer, logVersion); + + int keySize; + if (keyIsLastSerializedField) { + int bytesWritten = entryBuffer.position() - recStartPosition; + keySize = header.getItemSize() - bytesWritten; + } else { + keySize = LogUtils.readPackedInt(entryBuffer); + } + key = LogUtils.readBytesNoLength(entryBuffer, keySize); + } + + /* Save transient fields after read. */ + + if (header.getVLSN() != null) { + ln.setVLSNSequence(header.getVLSN().getSequence()); + } + + /* Dup conversion will be done by postFetchInit. */ + dupStatus = + (logVersion < 8) ? DupStatus.NEED_CONVERSION : DupStatus.UNKNOWN; + } + + private void setFlags(final byte flags) { + embeddedLN = ((flags & EMBEDDED_LN_MASK) != 0); + abortKnownDeleted = ((flags & ABORT_KD_MASK) != 0); + haveAbortLSN = ((flags & HAVE_ABORT_LSN_MASK) != 0); + haveAbortKey = ((flags & HAVE_ABORT_KEY_MASK) != 0); + haveAbortData = ((flags & HAVE_ABORT_DATA_MASK) != 0); + haveAbortVLSN = ((flags & HAVE_ABORT_VLSN_MASK) != 0); + haveAbortExpiration = ((flags & HAVE_ABORT_EXPIRATION_MASK) != 0); + haveExpiration = ((flags & HAVE_EXPIRATION_MASK) != 0); + } + + @Override + public boolean hasReplicationFormat() { + return true; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + + /* The replication format is optimized only in versions >= 11. */ + if (destVersion < 11) { + return false; + } + + /* + * It is too much trouble to parse versions older than 12, because the + * flags are not at the front in older versions. + */ + if (srcVersion < 12) { + return false; + } + + final byte flags = logBuffer.get(0); + + /* + * If we have an abort key or data, assume that the savings is + * substantial enough to be worthwhile. + * + * The abort key is unusual and implies that data is hidden in the key + * using a partial comparator, so we assume it is probably large, + * relative to the total size. + * + * If there is abort data, it may be small, however, because the + * presence of abort data implies that this is an update or deletion, + * there will also be an abort LSN and an abort VLSN (with HA). Plus, + * abort data is likely to be around the same size as the non-abort + * data, and keys are normally smallish, meaning that the abort data is + * largish relative to the total record size. So we assume the savings + * are substantial enough. + */ + return (flags & + (HAVE_ABORT_KEY_MASK | HAVE_ABORT_DATA_MASK)) != 0; + } + + /** + * newLNInstance usually returns exactly the type of LN of the type that + * was contained in in the log. For example, if a LNLogEntry holds a MapLN, + * newLNInstance will return that MapLN. There is one extra possibility for + * vanilla (data record) LNs. In that case, this method may either return a + * LN or a generated type, the VersionedLN, which adds the vlsn information + * from the log header to the LN object. + */ + LN newLNInstance(EnvironmentImpl envImpl) { + if (versionedLNConstructor != null && envImpl.getPreserveVLSN()) { + return newInstanceOfType(versionedLNConstructor); + } + return newInstanceOfType(); + } + + @Override + public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { + + dbId.dumpLog(sb, verbose); + + ln.dumpKey(sb, key); + ln.dumpLog(sb, verbose); + + sb.append(""); + + if (haveExpiration) { + sb.append(""); + } + + if (entryType.isTransactional()) { + + txn.dumpLog(sb, verbose); + + sb.append(""); + + sb.append(""); + + if (haveAbortKey) { + sb.append(Key.dumpString(abortKey, "abortKey", 0)); + } + if (haveAbortData) { + sb.append(Key.dumpString(abortData, "abortData", 0)); + } + if (haveAbortVLSN) { + sb.append(""); + } + if (haveAbortExpiration) { + sb.append(""); + } + } + + return sb; + } + + @Override + public void dumpRep(StringBuilder sb) { + if (entryType.isTransactional()) { + sb.append(" txn=").append(txn.getId()); + } + } + + @Override + public LN getMainItem() { + return ln; + } + + @Override + public long getTransactionId() { + if (entryType.isTransactional()) { + return txn.getId(); + } + return 0; + } + + /* + * Writing support. + */ + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Arrays.asList(new LN(), new DatabaseId(), new Txn()); + } + + @Override + public int getSize(final int logVersion, final boolean forReplication) { + + assert getClass() == LNLogEntry.class; + + return getBaseLNEntrySize( + logVersion, true /*keyIsLastSerializedField*/, forReplication); + } + + /** + * Method shared by LNLogEntry subclasses. + * + * @param keyIsLastSerializedField specifies whether the key length can be + * omitted because the key is the last field. This should be false when + * an LNLogEntry subclass adds fields to the serialized format. + */ + final int getBaseLNEntrySize( + final int logVersion, + final boolean keyIsLastSerializedField, + final boolean forReplication) { + + int size = ln.getLogSize(logVersion, forReplication) + + dbId.getLogSize(logVersion, forReplication) + + key.length; + + if (!keyIsLastSerializedField) { + size += LogUtils.getPackedIntLogSize(key.length); + } + + if (entryType.isTransactional() || logVersion >= 11) { + size += 1; // flags + } + + if (entryType.isTransactional()) { + if (logVersion < 12 || (haveAbortLSN && !forReplication)) { + size += LogUtils.getPackedLongLogSize(abortLsn); + } + size += txn.getLogSize(logVersion, forReplication); + } + + if (!forReplication) { + if (logVersion >= 11 ) { + if (haveAbortKey) { + size += LogUtils.getByteArrayLogSize(abortKey); + } + if (haveAbortData) { + size += LogUtils.getByteArrayLogSize(abortData); + } + if (haveAbortVLSN) { + size += LogUtils.getPackedLongLogSize(abortVLSN); + } + } + if (haveAbortExpiration) { + size += LogUtils.getPackedIntLogSize( + abortExpirationInHours ? + (- abortExpiration) : abortExpiration); + } + } + + if (logVersion >= 12) { + if (haveExpiration) { + size += LogUtils.getPackedIntLogSize( + expirationInHours ? (- expiration) : expiration); + } + } + + return size; + } + + @Override + public void writeEntry(final ByteBuffer destBuffer, + final int logVersion, + final boolean forReplication) { + + /* Subclasses must call writeBaseLNEntry. */ + assert getClass() == LNLogEntry.class; + + writeBaseLNEntry( + destBuffer, logVersion, true /*keyIsLastSerializedField*/, + forReplication); + } + + /** + * Method shared by LNLogEntry subclasses. + * + * @param keyIsLastSerializedField specifies whether the key length can be + * omitted because the key is the last field. This should be false when + * an LNLogEntry subclass adds fields to the serialized format. + */ + final void writeBaseLNEntry( + final ByteBuffer destBuffer, + final int logVersion, + final boolean keyIsLastSerializedField, + final boolean forReplication) { + + byte flags = 0; + + if (entryType.isTransactional() && + (logVersion < 12 || !forReplication)) { + + if (abortKnownDeleted) { + flags |= ABORT_KD_MASK; + } + if (haveAbortLSN) { + flags |= HAVE_ABORT_LSN_MASK; + } + } + + if (!forReplication) { + if (logVersion >= 11) { + if (embeddedLN) { + flags |= EMBEDDED_LN_MASK; + } + if (haveAbortKey) { + flags |= HAVE_ABORT_KEY_MASK; + } + if (haveAbortData) { + flags |= HAVE_ABORT_DATA_MASK; + } + if (haveAbortVLSN) { + flags |= HAVE_ABORT_VLSN_MASK; + } + } + if (logVersion >= 12) { + if (haveAbortExpiration) { + flags |= HAVE_ABORT_EXPIRATION_MASK; + } + } + } + + if (logVersion >= 12) { + if (haveExpiration) { + flags |= HAVE_EXPIRATION_MASK; + } + destBuffer.put(flags); + } + + dbId.writeToLog(destBuffer, logVersion, forReplication); + + if (entryType.isTransactional()) { + + if (logVersion < 12 || (haveAbortLSN && !forReplication)) { + LogUtils.writePackedLong(destBuffer, abortLsn); + } + + if (logVersion < 12) { + destBuffer.put(flags); + } + + txn.writeToLog(destBuffer, logVersion, forReplication); + + } else if (logVersion == 11) { + destBuffer.put(flags); + } + + if (!forReplication) { + if (logVersion >= 11) { + if (haveAbortKey) { + LogUtils.writeByteArray(destBuffer, abortKey); + } + if (haveAbortData) { + LogUtils.writeByteArray(destBuffer, abortData); + } + if (haveAbortVLSN) { + LogUtils.writePackedLong(destBuffer, abortVLSN); + } + } + if (logVersion >= 12) { + if (haveAbortExpiration) { + LogUtils.writePackedInt( + destBuffer, + abortExpirationInHours ? + (-abortExpiration) : abortExpiration); + } + } + } + + if (logVersion >= 12) { + if (haveExpiration) { + LogUtils.writePackedInt( + destBuffer, + expirationInHours ? (-expiration) : expiration); + } + } + + ln.writeToLog(destBuffer, logVersion, forReplication); + + if (!keyIsLastSerializedField) { + LogUtils.writePackedInt(destBuffer, key.length); + } + LogUtils.writeBytesNoLength(destBuffer, key); + } + + @Override + public boolean isImmediatelyObsolete(DatabaseImpl dbImpl) { + return (ln.isDeleted() || + embeddedLN || + dbImpl.isLNImmediatelyObsolete()); + } + + @Override + public boolean isDeleted() { + return ln.isDeleted(); + } + + /** + * For LN entries, we need to record the latest LSN for that node with the + * owning transaction, within the protection of the log latch. This is a + * callback for the log manager to do that recording. + */ + @Override + public void postLogWork( + LogEntryHeader header, + long justLoggedLsn, + VLSN vlsn) { + + if (entryType.isTransactional()) { + txn.addLogInfo(justLoggedLsn); + } + + /* Save transient fields after write. */ + if (vlsn != null) { + ln.setVLSNSequence(vlsn.getSequence()); + } + } + + @Override + public void postFetchInit(DatabaseImpl dbImpl) { + postFetchInit(dbImpl.getSortedDuplicates()); + } + + /** + * Converts the key/data for old format LNs in a duplicates DB. + * + * This method MUST be called before calling any of the following methods: + * getLN + * getKey + * getUserKeyData + * + * TODO: + * This method is not called by the HA feeder when materializing entries. + * This is OK because entries with log version 7 and below are never + * materialized. But we may want to rename this method to make it clear + * that it only is, and only must be, called for the log versions < 8. + */ + public void postFetchInit(boolean isDupDb) { + + final boolean needConversion = + (dupStatus == DupStatus.NEED_CONVERSION); + + dupStatus = isDupDb ? DupStatus.DUP_DB : DupStatus.NOT_DUP_DB; + + /* Do not convert more than once. */ + if (!needConversion) { + return; + } + + /* Nothing to convert for non-duplicates DB. */ + if (dupStatus == DupStatus.NOT_DUP_DB) { + return; + } + + key = combineDupKeyData(); + } + + /** + * Combine old key and old LN's data into a new key, and set the LN's data + * to empty. + */ + byte[] combineDupKeyData() { + assert !ln.isDeleted(); // DeletedLNLogEntry overrides this method. + return DupKeyData.combine(key, ln.setEmpty()); + } + + /** + * Translates two-part keys in duplicate DBs back to the original user + * operation params. postFetchInit must be called before calling this + * method. + */ + public void getUserKeyData( + DatabaseEntry keyParam, + DatabaseEntry dataParam) { + + requireKnownDupStatus(); + + if (dupStatus == DupStatus.DUP_DB) { + DupKeyData.split(new DatabaseEntry(key), keyParam, dataParam); + } else { + if (keyParam != null) { + keyParam.setData(key); + } + if (dataParam != null) { + dataParam.setData(ln.getData()); + } + } + } + + /* + * Accessors. + */ + public boolean isEmbeddedLN() { + return embeddedLN; + } + + public LN getLN() { + requireKnownDupStatus(); + return ln; + } + + public byte[] getKey() { + requireKnownDupStatus(); + return key; + } + + public byte[] getData() { + return ln.getData(); + } + + public byte[] getEmbeddedData() { + + if (!isEmbeddedLN()) { + return null; + } + + if (ln.isDeleted()) { + return Key.EMPTY_KEY; + } + + return ln.getData(); + } + + public int getExpiration() { + return expiration; + } + + public boolean isExpirationInHours() { + return expirationInHours; + } + + private void requireKnownDupStatus() { + if (dupStatus != DupStatus.DUP_DB && + dupStatus != DupStatus.NOT_DUP_DB) { + throw unexpectedState( + "postFetchInit was not called"); + } + } + + /** + * This method is only used when the converted length is not needed, for + * example by StatsFileReader. + */ + public int getUnconvertedDataLength() { + return ln.getData().length; + } + + /** + * This method is only used when the converted length is not needed, for + * example by StatsFileReader. + */ + public int getUnconvertedKeyLength() { + return key.length; + } + + @Override + public DatabaseId getDbId() { + return dbId; + } + + public long getAbortLsn() { + return abortLsn; + } + + public boolean getAbortKnownDeleted() { + return abortKnownDeleted; + } + + public byte[] getAbortKey() { + return abortKey; + } + + public byte[] getAbortData() { + return abortData; + } + + public long getAbortVLSN() { + return abortVLSN; + } + + public int getAbortExpiration() { + return abortExpiration; + } + + public boolean isAbortExpirationInHours() { + return abortExpirationInHours; + } + + public Long getTxnId() { + if (entryType.isTransactional()) { + return txn.getId(); + } + return null; + } + + public Txn getUserTxn() { + if (entryType.isTransactional()) { + return txn; + } + return null; + } + + @Override + public boolean logicalEquals(LogEntry other) { + if (!(other instanceof LNLogEntry)) { + return false; + } + + LNLogEntry otherEntry = (LNLogEntry) other; + + if (!dbId.logicalEquals(otherEntry.dbId)) { + return false; + } + + if (txn != null) { + if (!txn.logicalEquals(otherEntry.txn)) { + return false; + } + } else { + if (otherEntry.txn != null) { + return false; + } + } + + if (!Arrays.equals(key, otherEntry.key)) { + return false; + } + + if (!ln.logicalEquals(otherEntry.ln)) { + return false; + } + + return true; + } +} diff --git a/src/com/sleepycat/je/log/entry/LogEntry.java b/src/com/sleepycat/je/log/entry/LogEntry.java new file mode 100644 index 0000000..1ac1c3e --- /dev/null +++ b/src/com/sleepycat/je/log/entry/LogEntry.java @@ -0,0 +1,132 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.VLSN; + +/** + * A Log entry allows you to read, write and dump a database log entry. Each + * entry may be made up of one or more loggable items. + * + *

        The log entry on disk consists of a log header defined by LogManager and + * the specific contents of the log entry. + * + *

        Log entries that support replication are required to implement {@link + * ReplicableLogEntry}. + */ +public interface LogEntry extends Cloneable { + + /** + * Inform a LogEntry instance of its corresponding LogEntryType. + */ + public void setLogType(LogEntryType entryType); + + /** + * @return the type of log entry + */ + public LogEntryType getLogType(); + + /** + * Read in a log entry. + */ + public void readEntry(EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer); + + /** + * Print out the contents of an entry. + */ + public StringBuilder dumpEntry(StringBuilder sb, boolean verbose); + + /** + * @return the first item of the log entry + */ + public Object getMainItem(); + + /** + * Construct a complete item from a item entry, fetching additional log + * entries as needed to ensure that a usable main object is available. + * + * For an OldBINDeltaLogEntry, fetches the full BIN and merges the delta + * information. This is necessary to return a Node main object. + * However, for the new BINDeltaLogEntry, the full BIN is not fetched, + * since the partial BIN (the delta) is usable as a Node. + */ + public Object getResolvedItem(DatabaseImpl dbImpl); + + /** + * @return the ID of the database containing this entry, or null if this + * entry type is not part of a database. + */ + public DatabaseId getDbId(); + + /** + * @return return the transaction id if this log entry is transactional, + * 0 otherwise. + */ + public long getTransactionId(); + + /** + * @return size of byte buffer needed to store this entry. + */ + public int getSize(); + + /** + * Serialize this object into the buffer. + * @param logBuffer is the destination buffer + */ + public void writeEntry(ByteBuffer logBuffer); + + /** + * Returns true if this item should be counted as obsolete when logged. + */ + public boolean isImmediatelyObsolete(DatabaseImpl dbImpl); + + /** + * Returns whether this is a deleted LN. + */ + public boolean isDeleted(); + + /** + * Do any processing we need to do after logging, while under the logging + * latch. + */ + public void postLogWork(LogEntryHeader header, + long justLoggedLsn, + VLSN vlsn); + + /** + * @return a shallow clone. + */ + public LogEntry clone(); + + /** + * @return true if these two log entries are logically the same. + * Used for replication. + */ + public boolean logicalEquals(LogEntry other); + + /** + * Dump the contents of the log entry that are interesting for + * replication. + */ + public void dumpRep(StringBuilder sb); +} diff --git a/src/com/sleepycat/je/log/entry/MatchpointLogEntry.java b/src/com/sleepycat/je/log/entry/MatchpointLogEntry.java new file mode 100644 index 0000000..32a15f5 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/MatchpointLogEntry.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.Matchpoint; + +/** + * Log entry for a matchpoint object. + */ +public class MatchpointLogEntry extends SingleItemReplicableEntry { + + /** + * The log version number of the most recent change for this log entry, + * including any changes to the format of the underlying {@link Matchpoint} + * object. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /** Construct a log entry for reading a {@link Matchpoint} object. */ + public MatchpointLogEntry() { + super(Matchpoint.class); + } + + /** Construct a log entry for writing a {@link Matchpoint} object. */ + public MatchpointLogEntry(final Matchpoint matchpoint) { + super(LogEntryType.LOG_MATCHPOINT, matchpoint); + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } +} diff --git a/src/com/sleepycat/je/log/entry/NameLNLogEntry.java b/src/com/sleepycat/je/log/entry/NameLNLogEntry.java new file mode 100644 index 0000000..6a9ec3d --- /dev/null +++ b/src/com/sleepycat/je/log/entry/NameLNLogEntry.java @@ -0,0 +1,298 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.ReplicatedDatabaseConfig; +import com.sleepycat.je.log.DbOpReplicationContext; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.VLSN; + +/** + * NameLNLogEntry contains all the regular LNLogEntry fields and additional + * information about the database operation which instigated the logging of + * this NameLN. This additional information is used to support replication of + * database operations in a replication group. + * + * Database operations pose a special problem for replication because unlike + * data record put and get calls, they can result in multiple log entries that + * are not all members of a single transaction. Create and truncate are the + * problem operations because they end up logging new MapLNs, and our + * implementation does not treat MapLNs as transactional. Database operations + * challenge two replication assumptions: (a) that all logical operations can + * be repeated on the client node based on the contents of a single log entry, + * and (b) that non-txnal log entries like MapLNs need not be replicated. + * + * Specifically, here's what is logged for database operations. + * + * create: + * + * 1. new NameLN_TX + * 2. new MapLN, which has the database config info. + * 3. txn commit of autocommit or user txn. + * + * rename: + * + * 1. deleted NameLN_TX + * 2. new NameLN_TX + * 3. txn commit from autocommit or user txn + * + * truncate: + * + * 1. new MapLN w/new id + * 2. modify the existing NameLN with new id (old database is deleted by + * usual commit-time processing) + * 3. txn commit from autocommit or user txn + * + * delete + * + * 1. deleted NameLN_TX (old database gets deleted by usual commit-time + * processing) + * 2. txn commit from autocommit or user txn + * + * Extra information is needed for create and truncate, which both log + * information within the MapLN. Rename and delete only log NameLNs, so they + * can be replicated on the client using the normal replication messages. The + * extra fields which follow the usual LNLogEntry fields are: + * + * operationType - the type of database operation. In a single node system, + * this is local information implicit in the code path. + * databaseConfig (optional) - For creates, database configuration info + * databaseId (optional)- For truncates, the old db id, so we know which + * MapLN to delete. + */ +public class NameLNLogEntry extends LNLogEntry { + + /** + * The log version of the most recent format change for this entry, + * including the superclass and any changes to the format of referenced + * loggables. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 12; + + /* + * operationType, truncateOldDbId and replicatedCreateConfig are + * logged as part of the entry. + */ + private DbOperationType operationType; + private DatabaseId truncateOldDbId; + private ReplicatedDatabaseConfig replicatedCreateConfig; + + /** + * Constructor to read an entry. + */ + public NameLNLogEntry() { + super(com.sleepycat.je.tree.NameLN.class); + } + + /** + * Constructor to write this entry. + */ + public NameLNLogEntry( + LogEntryType entryType, + DatabaseId dbId, + Txn txn, + long abortLsn, + boolean abortKD, + byte[] key, + NameLN nameLN, + ReplicationContext repContext) { + + super( + entryType, dbId, txn, + abortLsn, abortKD, + null/*abortKey*/, null/*abortData*/, + VLSN.NULL_VLSN_SEQUENCE/*abortVLSN*/, + 0 /*abortExpiration*/, false /*abortExpirationInHours*/, + key, nameLN, false/*newEmbeddedLN*/, + 0 /*expiration*/, false /*expirationInHours*/); + + ReplicationContext operationContext = repContext; + + operationType = repContext.getDbOperationType(); + if (DbOperationType.isWriteConfigType(operationType)) { + replicatedCreateConfig = + ((DbOpReplicationContext) operationContext).getCreateConfig(); + } + + if (operationType == DbOperationType.TRUNCATE) { + truncateOldDbId = + ((DbOpReplicationContext) operationContext).getTruncateOldDbId(); + } + } + + /** + * Extends its super class to read in database operation information. + */ + @Override + public void readEntry(EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + + readBaseLNEntry(envImpl, header, entryBuffer, + false /*keyIsLastSerializedField*/); + + /* + * The NameLNLogEntry was introduced in version 6. Before, a LNLogEntry + * was used for NameLNs, and there is no extra information in the log + * entry. + */ + int version = header.getVersion(); + if (version >= 6) { + operationType = DbOperationType.readTypeFromLog(entryBuffer, + version); + if (DbOperationType.isWriteConfigType(operationType)) { + replicatedCreateConfig = new ReplicatedDatabaseConfig(); + replicatedCreateConfig.readFromLog(entryBuffer, version); + } + + if (operationType == DbOperationType.TRUNCATE) { + truncateOldDbId = new DatabaseId(); + truncateOldDbId.readFromLog(entryBuffer, version); + } + } else { + operationType = DbOperationType.NONE; + } + } + + /** + * Extends its super class to dump database operation information. + */ + @Override + public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { + + super.dumpEntry(sb, verbose); + + operationType.dumpLog(sb, verbose); + if (replicatedCreateConfig != null ) { + replicatedCreateConfig.dumpLog(sb, verbose); + } + if (truncateOldDbId != null) { + truncateOldDbId.dumpLog(sb, verbose); + } + + return sb; + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + final Collection list = + new ArrayList<>(super.getEmbeddedLoggables()); + list.addAll(Arrays.asList( + new NameLN(), DbOperationType.NONE, + new ReplicatedDatabaseConfig())); + return list; + } + + @Override + public int getSize(final int logVersion, final boolean forReplication) { + + int size = getBaseLNEntrySize( + logVersion, false /*keyIsLastSerializedField*/, + forReplication); + + size += operationType.getLogSize(logVersion, forReplication); + + if (DbOperationType.isWriteConfigType(operationType)) { + size += replicatedCreateConfig.getLogSize( + logVersion, forReplication); + } + + if (operationType == DbOperationType.TRUNCATE) { + size += truncateOldDbId.getLogSize(logVersion, forReplication); + } + return size; + } + + @Override + public void writeEntry(final ByteBuffer destBuffer, + final int logVersion, + final boolean forReplication) { + + writeBaseLNEntry( + destBuffer, logVersion, + false /*keyIsLastSerializedField*/, forReplication); + + operationType.writeToLog(destBuffer, logVersion, forReplication); + + if (DbOperationType.isWriteConfigType(operationType)) { + replicatedCreateConfig.writeToLog( + destBuffer, logVersion, forReplication); + } + + if (operationType == DbOperationType.TRUNCATE) { + truncateOldDbId.writeToLog(destBuffer, logVersion, forReplication); + } + } + + @Override + public boolean logicalEquals(LogEntry other) { + + if (!super.logicalEquals(other)) + return false; + + NameLNLogEntry otherEntry = (NameLNLogEntry) other; + if (!operationType.logicalEquals(otherEntry.operationType)) { + return false; + } + + if ((truncateOldDbId != null) && + (!truncateOldDbId.logicalEquals(otherEntry.truncateOldDbId))) { + return false; + } + + if (replicatedCreateConfig != null) { + if (!replicatedCreateConfig.logicalEquals + (otherEntry.replicatedCreateConfig)) + return false; + } + return true; + } + + public DbOperationType getOperationType() { + return operationType; + } + + public ReplicatedDatabaseConfig getReplicatedCreateConfig() { + return replicatedCreateConfig; + } + + public DatabaseId getTruncateOldDbId() { + return truncateOldDbId; + } + + @Override + public void dumpRep(StringBuilder sb) { + super.dumpRep(sb); + sb.append(" dbop=").append(operationType); + } +} diff --git a/src/com/sleepycat/je/log/entry/OldBINDeltaLogEntry.java b/src/com/sleepycat/je/log/entry/OldBINDeltaLogEntry.java new file mode 100644 index 0000000..3c127c1 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/OldBINDeltaLogEntry.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.tree.OldBINDelta; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Before log version 9, this was used to hold a OldBINDelta that can be combined + * with a BIN when fetched from the log; see getResolvedItem. This class was + * replaced by BINDeltaLogEntry in log version 9, which can be used to + * create a live (but incomplete) BIN in the Btree. + */ +public class OldBINDeltaLogEntry extends SingleItemEntry + implements INContainingEntry { + + public OldBINDeltaLogEntry(Class logClass) { + super(logClass); + } + + /* + * Whether this LogEntry reads/writes a BIN-Delta logrec. + */ + @Override + public boolean isBINDelta() { + return true; + } + + /** + * Resolve a BIN-delta item by fetching the full BIN and merging the delta. + */ + @Override + public Object getResolvedItem(DatabaseImpl dbImpl) { + return getIN(dbImpl); + } + + @Override + public IN getIN(DatabaseImpl dbImpl) { + OldBINDelta delta = getMainItem(); + return delta.reconstituteBIN(dbImpl); + } + + @Override + public DatabaseId getDbId() { + OldBINDelta delta = getMainItem(); + return delta.getDbId(); + } + + @Override + public long getPrevFullLsn() { + OldBINDelta delta = getMainItem(); + return delta.getLastFullLsn(); + } + + @Override + public long getPrevDeltaLsn() { + OldBINDelta delta = getMainItem(); + return delta.getPrevDeltaLsn(); + } +} diff --git a/src/com/sleepycat/je/log/entry/ReplicableLogEntry.java b/src/com/sleepycat/je/log/entry/ReplicableLogEntry.java new file mode 100644 index 0000000..8bd0780 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/ReplicableLogEntry.java @@ -0,0 +1,139 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; +import java.util.Collection; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * A sub-interface of {@link LogEntry} that must be implemented by all log + * entries that can be replicated. Replicable log entries are all those + * entries for which the associated {@link LogEntryType}'s {@link + * LogEntryType#isReplicationPossible} method returns {@code true}. These are + * the log entries that can be included in the replication stream distributed + * from feeders to replicas during replication. See [#22336]. + * + *

        Starting with the release using log version 9, as specified by {@link + * LogEntryType#LOG_VERSION_REPLICATE_OLDER}, all replicable log entries + * need to support writing themselves in earlier log formats, to support + * replication during an upgrade when the master is replicated first. Any + * loggable objects that they reference should also implement {@link + * com.sleepycat.je.log.VersionedWriteLoggable} for the same reason. + * + *

        The {@link #getLastFormatChange} method identifies the log version for + * which the entry's log format has most recently changed. This information is + * used to determine if the current log format is compatible with a + * non-upgraded replica. + * + *

        The {@link #getSize(int, boolean)} method overloading is used when + * creating the buffer that will be used to transmit the log entry data in the + * earlier format. + * + *

        The {@link #writeEntry(ByteBuffer, int, boolean)} method overloading is + * used to convert the in-memory format of the log entry into the log data in + * the earlier format. + * + *

        To simplify the implementation of writing log entries in multiple log + * version formats, a log entry that needs to be written in a previous format + * will first be read into its in-memory format in the current version, and + * then written from there to the previous format. + */ +public interface ReplicableLogEntry extends LogEntry { + + /** + * Returns the log version of the most recent format change for this log + * entry. + * + * @return the log version of the most recent format change + */ + int getLastFormatChange(); + + /** + * Returns all possible {@link VersionedWriteLoggable} objects that may be + * embedded in the binary data of this log entry. + * + *

        This is used by tests to ensure that for each X:Y pair, where X is a + * ReplicableLogEntry and Y is a VersionedWriteLoggable, and X embeds Y + * either directly or indirectly, X.getLastFormatChange is greater than or + * equal to Y.getLastFormatChange. + * + *

        Each ReplicableLogEntry and VersionedWriteLoggable class typically + * has a LAST_FORMAT_CHANGE constant that is returned by its + * getLastFormatChange method. When bumping this constant for an object X + * embedded by an log entry Y, Y.LAST_FORMAT_CHANGE should also be set to + * the minimum of its current value and X.LAST_FORMAT_CHANGE. + * + *

        Enforcing this rule in a general way is made possible by the + * getEmbeddedLoggables method of each ReplicableLogEntry and + * VersionedWriteLoggable. Note that this method is not intended to be + * called outside of tests. + */ + Collection getEmbeddedLoggables(); + + /** + * Returns the number of bytes needed to store this entry in the format for + * the specified log version. Earlier log versions only need to be + * supported for log entries with format changes made in {@link + * LogEntryType#LOG_VERSION_REPLICATE_OLDER} or greater. + * + * @param logVersion the log version + * @param forReplication whether the entry will be sent over the wire, + * and not written to the log. + * @return the number of bytes to store this entry for the log version + */ + int getSize(int logVersion, boolean forReplication); + + /** + * Serializes this object into the specified buffer in the format for the + * the specified log version. Earlier log versions only need to be + * supported for log entries with format changes made in {@link + * LogEntryType#LOG_VERSION_REPLICATE_OLDER} or greater. + * + * @param logBuffer the destination buffer + * @param forReplication whether the entry will be sent over the wire, + * and not written to the log. + * @param logVersion the log version + */ + void writeEntry(ByteBuffer logBuffer, + int logVersion, + boolean forReplication); + + /** + * Returns whether this format has a variant that is optimized for + * replication. + */ + boolean hasReplicationFormat(); + + /** + * Returns whether it is worthwhile to materialize and then re-serialize a + * log entry in a format optimized for replication. Implementations should + * attempt to check efficiently, without instantiating the log entry + * object. Some implementations will simply return false. + * + *

        WARNING: The logBuffer position must not be changed by this method. + * + *

        WARNING: The shared LogEntry object is used for calling this method, + * and this method must not change any of the fields in the object. + * + * @param logBuffer contains the entry that would be re-serialized. + * @param srcVersion the log version of entry in logBuffer. + * @param destVersion the version that would be used for re-serialization. + */ + boolean isReplicationFormatWorthwhile(ByteBuffer logBuffer, + int srcVersion, + int destVersion); +} diff --git a/src/com/sleepycat/je/log/entry/RestoreRequired.java b/src/com/sleepycat/je/log/entry/RestoreRequired.java new file mode 100644 index 0000000..424b955 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/RestoreRequired.java @@ -0,0 +1,134 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.log.entry; + +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.nio.ByteBuffer; +import java.util.Properties; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.Timestamp; + +/** + * This log entry is used to indicate that the environment's log files are not + * recoverable and that some sort of curative action should happen first. It's + * a general purpose mechanism that can be used for many types of errors. + */ +public class RestoreRequired implements Loggable { + + /* The failure type is used to decide on the course of action. */ + public enum FailureType {NETWORK_RESTORE, LOG_CHECKSUM, BTREE_CORRUPTION}; + + private FailureType failureType; + + /* For debugging, information */ + private Timestamp time; + + /* + * PropVals is a general purpose, serialized property list, to hold + * whatever each failure type needs, in order to fix the environment. + */ + private String propVals; + + public RestoreRequired(FailureType failureType, + Properties props) throws IOException { + this.failureType = failureType; + time = new Timestamp(System.currentTimeMillis()); + StringWriter sw = new StringWriter(); + props.store(sw, null); + propVals = sw.toString(); + } + + public RestoreRequired() { + } + + public FailureType getFailureType() { + return failureType; + } + + public Properties getProperties() { + Properties p = new Properties(); + StringReader reader = new StringReader(propVals); + try { + p.load(reader); + } catch (IOException e) { + /* This should never occur since there is no real IO. */ + throw EnvironmentFailureException.unexpectedException(e); + } + return p; + } + + @Override + public int getLogSize() { + return LogUtils.getStringLogSize(failureType.name()) + + LogUtils.getTimestampLogSize(time) + + LogUtils.getStringLogSize(propVals); + } + + @Override + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeString(logBuffer, failureType.name()); + LogUtils.writeTimestamp(logBuffer, time); + LogUtils.writeString(logBuffer, propVals); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + String typeName = LogUtils.readString(itemBuffer, false, entryVersion); + failureType = FailureType.valueOf(FailureType.class, typeName); + time = LogUtils.readTimestamp(itemBuffer, false); + propVals = LogUtils.readString(itemBuffer, false, entryVersion); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof RestoreRequired)) { + return false; + } + + RestoreRequired otherEntry = (RestoreRequired) other; + if (!time.equals(otherEntry.time)) { + return false; + } + + if (!propVals.equals(otherEntry.propVals)) { + return false; + } + return true; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpLog(sb, true); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/log/entry/SingleItemEntry.java b/src/com/sleepycat/je/log/entry/SingleItemEntry.java new file mode 100644 index 0000000..768f443 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/SingleItemEntry.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; + +/** + * This class embodies log entries that have a single loggable item. + * On disk, an entry contains: + *

        + *     the Loggable item
        + * 
        + * + * @param the type of the Loggable item + */ +public class SingleItemEntry extends BaseEntry + implements LogEntry { + + /* + * Persistent fields in a SingleItemEntry. + */ + private T item; + + /** + * Construct a log entry for reading. + */ + public static SingleItemEntry create( + final Class logClass) { + + return new SingleItemEntry(logClass); + } + + /** + * Construct a log entry for reading. + */ + SingleItemEntry(final Class logClass) { + super(logClass); + } + + /** + * Construct a log entry for writing. + */ + public static SingleItemEntry create( + final LogEntryType entryType, final T item) { + + return new SingleItemEntry(entryType, item); + } + + /** + * Construct a log entry for writing. + */ + public SingleItemEntry(final LogEntryType entryType, final T item) { + setLogType(entryType); + this.item = item; + } + + @Override + public void readEntry(EnvironmentImpl envImpl, + LogEntryHeader header, + ByteBuffer entryBuffer) { + + item = newInstanceOfType(); + item.readFromLog(entryBuffer, header.getVersion()); + } + + @Override + public StringBuilder dumpEntry(final StringBuilder sb, + final boolean verbose) { + item.dumpLog(sb, verbose); + return sb; + } + + @Override + public void dumpRep(@SuppressWarnings("unused") StringBuilder sb) { + } + + @Override + public T getMainItem() { + return item; + } + + @Override + public long getTransactionId() { + return item.getTransactionId(); + } + + @Override + public DatabaseId getDbId() { + return null; + } + + /* + * Writing support + */ + + @Override + public int getSize() { + return item.getLogSize(); + } + + @Override + public void writeEntry(final ByteBuffer destBuffer) { + item.writeToLog(destBuffer); + } + + @Override + public boolean logicalEquals(final LogEntry other) { + return item.logicalEquals((Loggable) other.getMainItem()); + } +} diff --git a/src/com/sleepycat/je/log/entry/SingleItemReplicableEntry.java b/src/com/sleepycat/je/log/entry/SingleItemReplicableEntry.java new file mode 100644 index 0000000..56cb0d1 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/SingleItemReplicableEntry.java @@ -0,0 +1,86 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * A basic implementation of a replicable log entry that has a single loggable + * item and provides for writing in a single format by default. Starting with + * log version 9, entry classes whose log format has changed since the previous + * log version will need to override the {@link #getSize(int, boolean)} and + * {@link #writeEntry(ByteBuffer, int, boolean)} methods to support writing the + * entry in earlier log formats. + * + * @param the type of the loggable items in this entry + */ +abstract class SingleItemReplicableEntry + extends SingleItemEntry implements ReplicableLogEntry { + + /** + * Creates an instance of this class for reading a log entry. + * + * @param logClass the class of the contained loggable item + */ + SingleItemReplicableEntry(final Class logClass) { + super(logClass); + } + + /** + * Creates an instance of this class for writing a log entry. + * + * @param entryType the associated log entry type + * @param item the contained loggable item + */ + SingleItemReplicableEntry(final LogEntryType entryType, final T item) { + super(entryType, item); + } + + @Override + public Collection getEmbeddedLoggables() { + /* The cast is needed due to quirks of Java generics. */ + return Collections.singleton( + (VersionedWriteLoggable) newInstanceOfType()); + } + + @Override + public int getSize(final int logVersion, final boolean forReplication) { + return getMainItem().getLogSize(logVersion, forReplication); + } + + @Override + public void writeEntry(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + getMainItem().writeToLog(logBuffer, logVersion,forReplication); + } + + @Override + public boolean hasReplicationFormat() { + return getMainItem().hasReplicationFormat(); + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return newInstanceOfType().isReplicationFormatWorthwhile( + logBuffer, srcVersion, destVersion); + } +} diff --git a/src/com/sleepycat/je/log/entry/TraceLogEntry.java b/src/com/sleepycat/je/log/entry/TraceLogEntry.java new file mode 100644 index 0000000..a4c541e --- /dev/null +++ b/src/com/sleepycat/je/log/entry/TraceLogEntry.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log.entry; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.Trace; + +/** + * Log entry for a trace object. + */ +public class TraceLogEntry extends SingleItemReplicableEntry { + + /** + * The log version number of the most recent change for this log entry, + * including any changes to the format of the underlying {@link Trace} + * object. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /** + * If non-null, write this object when asked to write in the log format + * prior to the last changed version, for testing. + */ + private static volatile Loggable testPriorItem = null; + + /** Construct a log entry for reading a {@link Trace} object. */ + public TraceLogEntry() { + super(Trace.class); + } + + /** Construct a log entry for writing a {@link Trace} object. */ + public TraceLogEntry(final Trace trace) { + super(LogEntryType.LOG_TRACE, trace); + } + + /** + * Specify an object to write instead of the enclosed item when asked to + * write this entry in the log format prior to the last changed version, + * for testing. + */ + public static void setTestPriorItem(final Loggable priorItem) { + testPriorItem = priorItem; + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + /** + * {@inheritDoc} + * + *

        This implementation provides additional behavior for testing. + */ + @Override + public int getSize(final int logVersion, final boolean forReplication) { + if (testPriorItem != null && logVersion == LAST_FORMAT_CHANGE - 1) { + return testPriorItem.getLogSize(); + } + return super.getSize(logVersion, forReplication); + } + + /** + * {@inheritDoc} + * + *

        This implementation provides additional behavior for testing. + */ + @Override + public void writeEntry(final ByteBuffer destBuffer, + final int logVersion, + final boolean forReplication) { + if (testPriorItem != null && logVersion == LAST_FORMAT_CHANGE - 1) { + testPriorItem.writeToLog(destBuffer); + return; + } + super.writeEntry(destBuffer, logVersion, forReplication); + } +} diff --git a/src/com/sleepycat/je/log/entry/package-info.java b/src/com/sleepycat/je/log/entry/package-info.java new file mode 100644 index 0000000..412bb86 --- /dev/null +++ b/src/com/sleepycat/je/log/entry/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Classes for serializing/materializing log entries. + */ +package com.sleepycat.je.log.entry; \ No newline at end of file diff --git a/src/com/sleepycat/je/log/package-info.java b/src/com/sleepycat/je/log/package-info.java new file mode 100644 index 0000000..d07bc23 --- /dev/null +++ b/src/com/sleepycat/je/log/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Low level data storage including log entry sequential + * logging/writing, random reading/fetching, and sequential reading. + */ +package com.sleepycat.je.log; diff --git a/src/com/sleepycat/je/package.html b/src/com/sleepycat/je/package.html new file mode 100644 index 0000000..8fc27f2 --- /dev/null +++ b/src/com/sleepycat/je/package.html @@ -0,0 +1,47 @@ + + + + + + +Foundation for creating environments, databases and transactions; provides +cursor based data access. + +

        Package Specification

        +This package constitutes the base public API for Berkeley DB, Java +Edition. The classes here are used to create database +objects, and insert and retrieve data. +

        +This package provides a key/data pair model of a database +record. Databases and database cursors are the key objects used to +access data. An alternative collections based API is available through +com.sleepycat.collections. +

        +The Environment class embodies the database environment and is the starting +point for the application. Databases and transaction objects are +created through the Environment class. +

        +Data can be inserted and retrieved directly through the Database +object, or through a Cursor obtained from the Database. A database record +consist of a key/data pair, where key and data are each individually +represented by a DatabaseEntry object. Classes in com.sleepycat.bind +provide optional support for mapping a Java object to a DatabaseEntry. +

        +Configuration classes are used to specify the attributes of particular +operations. For example the attributes of a database environment are +specified in the EnvironmentConfig class. An instance of that class is +required for Environment construction. Likewise, the attributes of a +database are described in DatabaseConfig, which is a parameter to the +Environment.openDatabase() method. + +@see [Getting Started Guide] + + diff --git a/src/com/sleepycat/je/recovery/CheckpointEnd.java b/src/com/sleepycat/je/recovery/CheckpointEnd.java new file mode 100644 index 0000000..1381306 --- /dev/null +++ b/src/com/sleepycat/je/recovery/CheckpointEnd.java @@ -0,0 +1,347 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.nio.ByteBuffer; +import java.util.Calendar; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; + +/** + * CheckpointEnd encapsulates the information needed by a checkpoint end log + * entry. + */ +public class CheckpointEnd implements Loggable { + + private static final byte ROOT_LSN_MASK = (byte) 0x1; + private static final byte CLEANED_FILES_MASK = (byte) 0x2; + + /* + * invoker is just a way to tag each checkpoint in the log for easier log + * based debugging. It will tell us whether the checkpoint was invoked by + * recovery, the daemon, the api, or the cleaner. + */ + private String invoker; + + private Timestamp endTime; + private long checkpointStartLsn; + private boolean rootLsnExists; + private long rootLsn; + private long firstActiveLsn; + private long lastLocalNodeId; + private long lastReplicatedNodeId; + private long lastLocalDbId; + private long lastReplicatedDbId; + private long lastLocalTxnId; + private long lastReplicatedTxnId; + private long id; + + /* + * True if there were cleaned files to delete after this checkpoint. + * Used to govern HA recovery truncation. Defaults to true. + */ + private boolean cleanedFilesToDelete; + + public CheckpointEnd(String invoker, + long checkpointStartLsn, + long rootLsn, + long firstActiveLsn, + long lastLocalNodeId, + long lastReplicatedNodeId, + long lastLocalDbId, + long lastReplicatedDbId, + long lastLocalTxnId, + long lastReplicatedTxnId, + long id, + boolean cleanedFilesToDelete) { + if (invoker == null) { + this.invoker = ""; + } else { + this.invoker = invoker; + } + + Calendar cal = Calendar.getInstance(); + this.endTime = new Timestamp(cal.getTime().getTime()); + this.checkpointStartLsn = checkpointStartLsn; + this.rootLsn = rootLsn; + if (rootLsn == DbLsn.NULL_LSN) { + rootLsnExists = false; + } else { + rootLsnExists = true; + } + if (firstActiveLsn == DbLsn.NULL_LSN) { + this.firstActiveLsn = checkpointStartLsn; + } else { + this.firstActiveLsn = firstActiveLsn; + } + this.lastLocalNodeId = lastLocalNodeId; + this.lastReplicatedNodeId = lastReplicatedNodeId; + this.lastLocalDbId = lastLocalDbId; + this.lastReplicatedDbId = lastReplicatedDbId; + this.lastLocalTxnId = lastLocalTxnId; + this.lastReplicatedTxnId = lastReplicatedTxnId; + this.id = id; + this.cleanedFilesToDelete = cleanedFilesToDelete; + } + + /* For logging only */ + public CheckpointEnd() { + checkpointStartLsn = DbLsn.NULL_LSN; + rootLsn = DbLsn.NULL_LSN; + firstActiveLsn = DbLsn.NULL_LSN; + } + + public String getInvoker() { + return invoker; + } + + /* + * Logging support for writing to the log + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + int size = + LogUtils.getStringLogSize(invoker) + // invoker + LogUtils.getTimestampLogSize(endTime) + // endTime + LogUtils.getPackedLongLogSize(checkpointStartLsn) + + 1 + // flags: rootLsnExists, cleanedFilesToDelete + LogUtils.getPackedLongLogSize(firstActiveLsn) + + LogUtils.getPackedLongLogSize(lastLocalNodeId) + + LogUtils.getPackedLongLogSize(lastReplicatedNodeId) + + LogUtils.getPackedLongLogSize(lastLocalDbId) + + LogUtils.getPackedLongLogSize(lastReplicatedDbId) + + LogUtils.getPackedLongLogSize(lastLocalTxnId) + + LogUtils.getPackedLongLogSize(lastReplicatedTxnId) + + LogUtils.getPackedLongLogSize(id); + + if (rootLsnExists) { + size += LogUtils.getPackedLongLogSize(rootLsn); + } + return size; + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeString(logBuffer, invoker); + LogUtils.writeTimestamp(logBuffer, endTime); + LogUtils.writePackedLong(logBuffer, checkpointStartLsn); + + byte flags = 0; + if (rootLsnExists) { + flags |= ROOT_LSN_MASK; + } + + if (cleanedFilesToDelete) { + flags |= CLEANED_FILES_MASK; + } + + logBuffer.put(flags); + + if (rootLsnExists) { + LogUtils.writePackedLong(logBuffer, rootLsn); + } + LogUtils.writePackedLong(logBuffer, firstActiveLsn); + + LogUtils.writePackedLong(logBuffer, lastLocalNodeId); + LogUtils.writePackedLong(logBuffer, lastReplicatedNodeId); + + LogUtils.writePackedLong(logBuffer, lastLocalDbId); + LogUtils.writePackedLong(logBuffer, lastReplicatedDbId); + + LogUtils.writePackedLong(logBuffer, lastLocalTxnId); + LogUtils.writePackedLong(logBuffer, lastReplicatedTxnId); + + LogUtils.writePackedLong(logBuffer, id); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + boolean version6OrLater = (entryVersion >= 6); + invoker = LogUtils.readString(logBuffer, !version6OrLater, + entryVersion); + endTime = LogUtils.readTimestamp(logBuffer, !version6OrLater); + checkpointStartLsn = LogUtils.readLong(logBuffer, !version6OrLater); + byte flags = logBuffer.get(); + rootLsnExists = (flags & ROOT_LSN_MASK) != 0; + + if (rootLsnExists) { + rootLsn = LogUtils.readLong(logBuffer, !version6OrLater); + } + + if (entryVersion >= 7) { + cleanedFilesToDelete = ((flags & CLEANED_FILES_MASK) != 0); + } else { + cleanedFilesToDelete = true; + } + + firstActiveLsn = LogUtils.readLong(logBuffer, !version6OrLater); + + lastLocalNodeId = LogUtils.readLong(logBuffer, !version6OrLater); + if (version6OrLater) { + lastReplicatedNodeId = LogUtils.readPackedLong(logBuffer); + } + + if (version6OrLater) { + lastLocalDbId = LogUtils.readPackedLong(logBuffer); + lastReplicatedDbId = LogUtils.readPackedLong(logBuffer); + } else { + lastLocalDbId = LogUtils.readInt(logBuffer); + } + + lastLocalTxnId = LogUtils.readLong(logBuffer, !version6OrLater); + if (version6OrLater) { + lastReplicatedTxnId = LogUtils.readPackedLong(logBuffer); + } + + id = LogUtils.readLong(logBuffer, !version6OrLater); + + if (entryVersion >= 8 && entryVersion <= 10) { + /* Read defunct CleanerLogSummary. */ + LogUtils.readPackedLong(logBuffer); + LogUtils.readPackedInt(logBuffer); + final int nAvgLNSizes = LogUtils.readPackedInt(logBuffer); + for (int i = 0; i < nAvgLNSizes; i += 1) { + LogUtils.readPackedInt(logBuffer); + LogUtils.readPackedInt(logBuffer); + LogUtils.readPackedInt(logBuffer); + LogUtils.readPackedInt(logBuffer); + } + } + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(""); + sb.append(DbLsn.toString(checkpointStartLsn)); + sb.append(""); + + if (rootLsnExists) { + sb.append(""); + sb.append(DbLsn.toString(rootLsn)); + sb.append(""); + } + sb.append(""); + sb.append(DbLsn.toString(firstActiveLsn)); + sb.append(""); + + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("time=").append(endTime); + sb.append(" lastLocalNodeId=").append(lastLocalNodeId); + sb.append(" lastReplicatedNodeId=").append(lastReplicatedNodeId); + sb.append(" lastLocalDbId=").append(lastLocalDbId); + sb.append(" lastReplicatedDbId=").append(lastReplicatedDbId); + sb.append(" lastLocalTxnId=").append(lastLocalTxnId); + sb.append(" lastReplicatedTxnId=").append(lastReplicatedTxnId); + sb.append(" id=").append(id); + sb.append(" rootExists=").append(rootLsnExists); + sb.append(" ckptStartLsn=").append + (DbLsn.getNoFormatString(checkpointStartLsn)); + if (rootLsnExists) { + sb.append(" root=").append(DbLsn.getNoFormatString(rootLsn)); + } + sb.append(" firstActive="). + append(DbLsn.getNoFormatString(firstActiveLsn)); + return sb.toString(); + } + + /* + * Accessors + */ + long getCheckpointStartLsn() { + return checkpointStartLsn; + } + + long getRootLsn() { + return rootLsn; + } + + long getFirstActiveLsn() { + return firstActiveLsn; + } + + long getLastLocalNodeId() { + return lastLocalNodeId; + } + + long getLastReplicatedNodeId() { + return lastReplicatedNodeId; + } + + long getLastLocalDbId() { + return lastLocalDbId; + } + + long getLastReplicatedDbId() { + return lastReplicatedDbId; + } + + long getLastLocalTxnId() { + return lastLocalTxnId; + } + + long getLastReplicatedTxnId() { + return lastReplicatedTxnId; + } + + public long getId() { + return id; + } + + public boolean getCleanedFilesToDelete() { + return cleanedFilesToDelete; + } +} diff --git a/src/com/sleepycat/je/recovery/CheckpointStart.java b/src/com/sleepycat/je/recovery/CheckpointStart.java new file mode 100644 index 0000000..ab52093 --- /dev/null +++ b/src/com/sleepycat/je/recovery/CheckpointStart.java @@ -0,0 +1,110 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.nio.ByteBuffer; +import java.util.Calendar; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.Timestamp; + +/** + * CheckpointStart creates a log entry that marks the beginning of a + * checkpoint. + */ +public class CheckpointStart implements Loggable { + + private Timestamp startTime; + private long id; + + /* + * invoker is just a way to tag each checkpoint in the log for easier log + * based debugging. It will tell us whether the checkpoint was invoked by + * recovery, the daemon, the api, or the cleaner. + */ + private String invoker; + + public CheckpointStart(long id, String invoker) { + Calendar cal = Calendar.getInstance(); + this.startTime = new Timestamp(cal.getTime().getTime()); + this.id = id; + if (invoker == null) { + this.invoker = ""; + } else { + this.invoker = invoker; + } + } + + /* For logging only. */ + public CheckpointStart() { + } + + /* + * Logging support for writing. + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + return LogUtils.getTimestampLogSize(startTime) + + LogUtils.getPackedLongLogSize(id) + + LogUtils.getStringLogSize(invoker); + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeTimestamp(logBuffer, startTime); + LogUtils.writePackedLong(logBuffer, id); + LogUtils.writeString(logBuffer, invoker); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + startTime = LogUtils.readTimestamp(logBuffer, unpacked); + id = LogUtils.readLong(logBuffer, unpacked); + invoker = LogUtils.readString(logBuffer, unpacked, entryVersion); + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } +} diff --git a/src/com/sleepycat/je/recovery/CheckpointStatDefinition.java b/src/com/sleepycat/je/recovery/CheckpointStatDefinition.java new file mode 100644 index 0000000..85c39e4 --- /dev/null +++ b/src/com/sleepycat/je/recovery/CheckpointStatDefinition.java @@ -0,0 +1,104 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE checkpointer statistics. + */ +public class CheckpointStatDefinition { + public static final String GROUP_NAME = "Checkpoints"; + public static final String GROUP_DESC = + "Dirty Btree internal nodes are written to the data " + + "log periodically to bound recovery time."; + + public static final String CKPT_CHECKPOINTS_NAME = + "nCheckpoints"; + public static final String CKPT_CHECKPOINTS_DESC = + "Total number of checkpoints run so far."; + public static final StatDefinition CKPT_CHECKPOINTS = + new StatDefinition( + CKPT_CHECKPOINTS_NAME, + CKPT_CHECKPOINTS_DESC); + + public static final String CKPT_LAST_CKPTID_NAME = + "lastCheckpointId"; + public static final String CKPT_LAST_CKPTID_DESC = + "Id of the last checkpoint."; + public static final StatDefinition CKPT_LAST_CKPTID = + new StatDefinition( + CKPT_LAST_CKPTID_NAME, + CKPT_LAST_CKPTID_DESC, + StatType.CUMULATIVE); + + public static final String CKPT_FULL_IN_FLUSH_NAME = + "nFullINFlush"; + public static final String CKPT_FULL_IN_FLUSH_DESC = + "Accumulated number of full INs flushed to the log."; + public static final StatDefinition CKPT_FULL_IN_FLUSH = + new StatDefinition( + CKPT_FULL_IN_FLUSH_NAME, + CKPT_FULL_IN_FLUSH_DESC); + + public static final String CKPT_FULL_BIN_FLUSH_NAME = + "nFullBINFlush"; + public static final String CKPT_FULL_BIN_FLUSH_DESC = + "Accumulated number of full BINs flushed to the log."; + public static final StatDefinition CKPT_FULL_BIN_FLUSH = + new StatDefinition( + CKPT_FULL_BIN_FLUSH_NAME, + CKPT_FULL_BIN_FLUSH_DESC); + + public static final String CKPT_DELTA_IN_FLUSH_NAME = + "nDeltaINFlush"; + public static final String CKPT_DELTA_IN_FLUSH_DESC = + "Accumulated number of Delta INs flushed to the log."; + public static final StatDefinition CKPT_DELTA_IN_FLUSH = + new StatDefinition( + CKPT_DELTA_IN_FLUSH_NAME, + CKPT_DELTA_IN_FLUSH_DESC); + + public static final String CKPT_LAST_CKPT_INTERVAL_NAME = + "lastCheckpointInterval"; + public static final String CKPT_LAST_CKPT_INTERVAL_DESC = + "Byte length from last checkpoint start to the previous checkpoint " + + "start."; + public static final StatDefinition CKPT_LAST_CKPT_INTERVAL = + new StatDefinition( + CKPT_LAST_CKPT_INTERVAL_NAME, + CKPT_LAST_CKPT_INTERVAL_DESC, + StatType.CUMULATIVE); + + public static final String CKPT_LAST_CKPT_START_NAME = + "lastCheckpointStart"; + public static final String CKPT_LAST_CKPT_START_DESC = + "Location in the log of the last checkpoint start."; + public static final StatDefinition CKPT_LAST_CKPT_START = + new StatDefinition( + CKPT_LAST_CKPT_START_NAME, + CKPT_LAST_CKPT_START_DESC, + StatType.CUMULATIVE); + + public static final String CKPT_LAST_CKPT_END_NAME = + "lastCheckpointEnd"; + public static final String CKPT_LAST_CKPT_END_DESC = + "Location in the log of the last checkpoint end."; + public static final StatDefinition CKPT_LAST_CKPT_END = + new StatDefinition( + CKPT_LAST_CKPT_END_NAME, + CKPT_LAST_CKPT_END_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/recovery/Checkpointer.java b/src/com/sleepycat/je/recovery/Checkpointer.java new file mode 100644 index 0000000..f0a2f05 --- /dev/null +++ b/src/com/sleepycat/je/recovery/Checkpointer.java @@ -0,0 +1,1660 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_CHECKPOINTS; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_DELTA_IN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_BIN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_IN_FLUSH; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPTID; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_END; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_INTERVAL; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_START; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.GROUP_DESC; +import static com.sleepycat.je.recovery.CheckpointStatDefinition.GROUP_NAME; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Level; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.cleaner.Cleaner; +import com.sleepycat.je.cleaner.FileSelector.CheckpointStartCleanerState; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.utilint.DaemonThread; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LSNStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * The Checkpointer looks through the tree for internal nodes that must be + * flushed to the log. Checkpoint flushes must be done in ascending order from + * the bottom of the tree up. + * + * Checkpoint and IN Logging Rules + * ------------------------------- + * The checkpoint must log, and make accessible via non-provisional ancestors, + * all INs that are dirty at CkptStart. If we crash and recover from that + * CkptStart onward, any IN that became dirty (before the crash) after the + * CkptStart must become dirty again as the result of replaying the action that + * caused it to originally become dirty. + * + * Therefore, when an IN is dirtied at some point in the checkpoint interval, + * but is not logged by the checkpoint, the log entry representing the action + * that dirtied the IN must follow either the CkptStart or the FirstActiveLSN + * that is recorded in the CkptEnd entry. The FirstActiveLSN is less than or + * equal to the CkptStart LSN. Recovery will process LNs between the + * FirstActiveLSN and the end of the log. Other entries are only processed + * from the CkptStart forward. And provisional entries are not processed. + * + * Example: Non-transactional LN logging. We take two actions: 1) log the LN + * and then 2) dirty the parent BIN. What if the LN is logged before CkptStart + * and the BIN is dirtied after CkptStart? How do we avoid breaking the rules? + * The answer is that we log the LN while holding the latch on the parent BIN, + * and we don't release the latch until after we dirty the BIN. The + * construction of the checkpoint dirty map requires latching the BIN. Since + * the LN was logged before CkptStart, the BIN will be dirtied before the + * checkpointer latches it during dirty map construction. So the BIN will + * always be included in the dirty map and logged by the checkpoint. + * + * Example: Abort. We take two actions: 1) log the abort and then 2) undo the + * changes, which modifies (dirties) the BIN parents of the undone LNs. There + * is nothing to prevent logging CkptStart in between these two actions, so how + * do we avoid breaking the rules? The answer is that we do not unregister the + * transaction until after the undo phase. So although the BINs may be dirtied + * by the undo after CkptStart is logged, the FirstActiveLSN will be prior to + * CkptStart. Therefore, we will process the Abort and replay the action that + * modifies the BINs. + * + * Exception: Lazy migration. The log cleaner will make an IN dirty without + * logging an action that makes it dirty. This is an exception to the general + * rule that actions should be logged when they cause dirtiness. The reasons + * this is safe are: + * 1. The IN contents are not modified, so there is no information lost if the + * IN is never logged, or is logged provisionally and no ancestor is logged + * non-provisionally. + * 2. If the IN is logged non-provisionally, this will have the side effect of + * recording the old LSN as being obsolete. However, the general rules for + * checkpointing and recovery will ensure that the new version is used in + * the Btree. The new version will either be replayed by recovery or + * referenced in the active Btree via a non-provisional ancestor. + * + * Checkpoint Algorithm TODO update this + * -------------------- + * The final checkpointDirtyMap field is used to hold (in addition to the dirty + * INs) the state of the checkpoint and highest flush levels. Access to this + * object is synchronized so that eviction and checkpointing can access it + * concurrently. When a checkpoint is not active, the state is CkptState.NONE + * and the dirty map is empty. When a checkpoint runs, we do this: + * + * 1. Get set of files from cleaner that can be deleted after this checkpoint. + * 2. Set checkpointDirtyMap state to DIRTY_MAP_INCOMPLETE, meaning that dirty + * map construction is in progress. + * 3. Log CkptStart + * 4. Construct dirty map, organized by Btree level, from dirty INs in INList. + * The highest flush levels are calculated during dirty map construction. + * Set checkpointDirtyMap state to DIRTY_MAP_COMPLETE. + * 5. Flush INs in dirty map. + * + First, flush the bottom two levels a sub-tree at a time, where a + * sub-tree is one IN at level two and all its BIN children. Higher + * levels (above level two) are logged strictly by level, not using + * subtrees. + * o If je.checkpointer.highPriority=false, we log one IN at a + * time, whether or not the IN is logged as part of a subtree, + * and do a Btree search for the parent of each IN. + * o If je.checkpointer.highPriority=true, for the bottom two + * levels we log each sub-tree in a single call to the + * LogManager with the parent IN latched, and we only do one + * Btree search for each level two IN. Higher levels are logged + * one IN at a time as with highPriority=false. + * + The Provisional property is set as follows, depending on the level + * of the IN: + * o level is max flush level: Provisional.NO + * o level is bottom level: Provisional.YES + * o Otherwise (middle levels): Provisional.BEFORE_CKPT_END + * 6. Flush VLSNIndex cache to make VLSNIndex recoverable. + * 7. Flush UtilizationTracker (write FileSummaryLNs) to persist all + * tracked obsolete offsets and utilization summary info, to make this info + * recoverable. + * 8. Log CkptEnd + * 9. Delete cleaned files from step 1. + * 10. Set checkpointDirtyMap state to NONE. + * + * Per-DB Highest Flush Level + * -------------------------- + * As mentioned above, when the dirty map is constructed we also determine the + * highest flush level for each database. This is the maximum Btree level at + * which a dirty node exists in the DB. + * + * When logging a node below the maxFlushLevel, we add the parent to the dirty + * map. It may or may not have been added when the dirty map was constructed. + * The idea is to flush all ancestors of all nodes in the dirty map, up to and + * including the maxFlushLevel, even if those ancestors were not dirty when the + * dirty map was constructed. + * + * This is done to avoid orphaning a dirty node as shown in this example. + * + * IN-A (root level=4) + * / \ + * (d) IN-B IN-C (maxFlushLevel=3) + * \ + * (d) IN-D + * + * IN-C is not dirty (d) when the dirty map is constructed, but it will be + * logged because its child (IN-D) is dirty, and it is not above maxFlushLevel. + * + * If IN-C were not logged, and there were a crash after the checkpoint, the + * changes to IN-D would be lost. IN-D would not be replayed by recovery + * because it is logged provisionally, and it would not be accessible via its + * parent. This is because only nodes at maxFlushLevel are logged + * non-provisionally. The actions that led to the changes in IN-D may not be + * replayed either, because they may appear before the firstActiveLsn + * associated with the checkpoint. + * + * When log files are to be deleted at the end of the checkpoint (after being + * processed by the log cleaner), the maxFlushLevel is increased by one. + * This is to ensure that LSNs in deleted files will not be fetched during + * recovery. Such files are in the FileSelector.CLEANED state, which means + * they have been processed by the cleaner since the last checkpoint. + * + * TODO: Document circumstances and motivation for the extra flush level. + * + * Lastly, for Database.sync or a checkpoint with MinimizeRecoveryTime + * configured, we will flush all the way to the root rather than using the + * maxFlushLevel computed as described above. + * + * Provisional.BEFORE_CKPT_END + * --------------------------- + * See Provisional.java for a description of the relationship between the + * checkpoint algorithm above and the BEFORE_CKPT_END property. + * + * Coordination of Eviction and Checkpointing + * ------------------------------------------ + * Eviction can proceed concurrently with all phases of a checkpoint, and + * eviction may take place concurrently in multiple threads. This concurrency + * is crucial to avoid blocking application threads that perform eviction and + * to reduce the amount of eviction required in application threads. + * + * Eviction calls Checkpointer.coordinateEvictionWithCheckpoint, which calls + * DirtyINMap.coordinateEvictionWithCheckpoint, just before logging an IN. + * coordinateEvictionWithCheckpoint returns whether the IN should be logged + * provisionally (Provisional.YES) or non-provisionally (Provisional.NO). + * + * Other coordination necessary depends on the state of the checkpoint: + * + NONE: No additional action. + * o return Provisional.NO + * + DIRTY_MAP_INCOMPLETE: The parent IN is added to the dirty map, exactly + * as if it were encountered as dirty in the INList during dirty map + * construction. + * o IN is root: return Provisional.NO + * o IN is not root: return Provisional.YES + * + DIRTY_MAP_COMPLETE: + * o IN level GTE highest flush level: return Provisional.NO + * o IN level LT highest flush level: return Provisional.YES + * + * In general this is designed so that eviction will use the same provisional + * value that would be used by the checkpoint, as if the checkpoint itself were + * logging the IN. However, there are several conditions where this is not + * exactly the case. + * + * 1. Eviction may log an IN with Provisional.YES when the IN was not dirty at + * the time of dirty map creation, if it became dirty afterwards. In this + * case, the checkpointer would not have logged the IN at all. This is safe + * because the actions that made that IN dirty are logged in the recovery + * period. + * 2. Eviction may log an IN with Provisional.YES after the checkpoint has + * logged it, if it becomes dirty again. In this case the IN is logged + * twice, which would not have been done by the checkpoint alone. This is + * safe because the actions that made that IN dirty are logged in the + * recovery period. + * 3. An intermediate level IN (not bottom most and not the highest flush + * level) will be logged by the checkpoint with Provisional.BEFORE_CKPT_END + * but will be logged by eviction with Provisional.YES. See below for why + * this is safe. + * 4. Between checkpoint step 8 (log CkptEnd) and 10 (set checkpointDirtyMap + * state to NONE), eviction may log an IN with Provisional.YES, although a + * checkpoint is not strictly active during this interval. See below for + * why this is safe. + * + * It is safe for eviction to log an IN as Provisional.YES for the last two + * special cases, because this does not cause incorrect recovery behavior. For + * recovery to work properly, it is only necessary that: + * + * + Provisional.NO is used for INs at the max flush level during an active + * checkpoint. + * + Provisional.YES or BEFORE_CKPT_END is used for INs below the max flush + * level, to avoid replaying an IN during recovery that may depend on a file + * deleted as the result of the checkpoint. + * + * You may ask why we don't use Provisional.YES for eviction when a checkpoint + * is not active. There are two reason, both related to performance: + * + * 1. This would be wasteful when an IN is evicted in between checkpoints, and + * that portion of the log is processed by recovery later, in the event of a + * crash. The evicted INs would be ignored by recovery, but the actions + * that caused them to be dirty would be replayed and the INs would be + * logged again redundantly. + * 2. Logging a IN provisionally will not count the old LSN as obsolete + * immediately, so cleaner utilization will be inaccurate until the a + * non-provisional parent is logged, typically by the next checkpoint. It + * is always important to keep the cleaner from stalling and spiking, to + * keep latency and throughput as level as possible. + * + * Therefore, it is safe to log with Provisional.YES in between checkpoints, + * but not desirable. + * + * Although we don't do this, it would be safe and optimal to evict with + * BEFORE_CKPT_END in between checkpoints, because it would be treated by + * recovery as if it were Provisional.NO. This is because the interval between + * checkpoints is only processed by recovery if it follows the last CkptEnd, + * and BEFORE_CKPT_END is treated as Provisional.NO if the IN follows the last + * CkptEnd. + * + * However, it would not be safe to evict an IN with BEFORE_CKPT_END during a + * checkpoint, when logging of the IN's ancestors does not occur according to + * the rules of the checkpoint. If this were done, then if the checkpoint + * completes and is used during a subsequent recovery, an obsolete offset for + * the old version of the IN will mistakenly be recorded. Below are two cases + * where BEFORE_CKPT_END is used correctly and one showing how it could be used + * incorrectly. + * + * 1. Correct use of BEFORE_CKPT_END when the checkpoint does not complete. + * + * 050 BIN-A + * 060 IN-B parent of BIN-A + * 100 CkptStart + * 200 BIN-A logged with BEFORE_CKPT_END + * 300 FileSummaryLN with obsolete offset for BIN-A at 050 + * Crash and recover + * + * Recovery will process BIN-A at 200 (it will be considered + * non-provisional) because there is no following CkptEnd. It is + * therefore correct that BIN-A at 050 is obsolete. + * + * 2. Correct use of BEFORE_CKPT_END when the checkpoint does complete. + * + * 050 BIN-A + * 060 IN-B parent of BIN-A + * 100 CkptStart + * 200 BIN-A logged with BEFORE_CKPT_END + * 300 FileSummaryLN with obsolete offset for BIN-A at 050 + * 400 IN-B parent of BIN-A, non-provisional + * 500 CkptEnd + * Crash and recover + * + * Recovery will not process BIN-A at 200 (it will be considered + * provisional) because there is a following CkptEnd, but it will + * process its parent IN-B at 400, and therefore the BIN-A at 200 will be + * active in the tree. It is therefore correct that BIN-A at 050 is + * obsolete. + * + * 3. Incorrect use of BEFORE_CKPT_END when the checkpoint does complete. + * + * 050 BIN-A + * 060 IN-B parent of BIN-A + * 100 CkptStart + * 200 BIN-A logged with BEFORE_CKPT_END + * 300 FileSummaryLN with obsolete offset for BIN-A at 050 + * 400 CkptEnd + * Crash and recover + * + * Recovery will not process BIN-A at 200 (it will be considered + * provisional) because there is a following CkptEnd, but no parent + * IN-B is logged, and therefore the IN-B at 060 and BIN-A at 050 will be + * active in the tree. It is therefore incorrect that BIN-A at 050 is + * obsolete. + * + * This last case is what caused the LFNF in SR [#19422], when BEFORE_CKPT_END + * was mistakenly used for logging evicted BINs via CacheMode.EVICT_BIN. + * During the checkpoint, we evict BIN-A and log it with BEFORE_CKPT_END, yet + * neither it nor its parent are part of the checkpoint. After being counted + * obsolete, we crash and recover. Then the file containing the BIN (BIN-A at + * 050 above) is cleaned and deleted. During cleaning, it is not migrated + * because an obsolete offset was previously recorded. The LFNF occurs when + * trying to access this BIN during a user operation. + * + * CacheMode.EVICT_BIN + * ------------------- + * Unlike in JE 4.0 where EVICT_BIN was first introduced, in JE 4.1 and later + * we do not use special rules when an IN is evicted. Since concurrent + * eviction and checkpointing are supported in JE 4.1, the above rules apply to + * EVICT_BIN as well as all other types of eviction. + */ +public class Checkpointer extends DaemonThread implements EnvConfigObserver { + + /** + * For unit testing only. Called before we flush the max level. This + * field is static because it is called from the static flushIN method. + */ + private static TestHook maxFlushLevelHook = null; + + private static TestHook beforeFlushHook = null; + + static TestHook examineINForCheckpointHook = null; + + /* Checkpoint sequence, initialized at recovery. */ + private long checkpointId; + + /* + * How much the log should grow between checkpoints. If 0, we're using time + * based checkpointing. + */ + private final long logSizeBytesInterval; + private final long logFileMax; + private final long timeInterval; + private long lastCheckpointMillis; + private volatile boolean wakeupAfterNoWrites; + + /* Configured to true to minimize checkpoint duration. */ + private boolean highPriority; + + private long nCheckpoints; + private long lastCheckpointStart; + private long lastCheckpointEnd; + private long lastCheckpointInterval; + private final FlushStats flushStats; + + /** + * The DirtyINMap for checkpointing is created once and is reset after each + * checkpoint is complete. Access to this object is synchronized so that + * eviction and checkpointing can access it concurrently. + */ + private final DirtyINMap checkpointDirtyMap; + + public Checkpointer(EnvironmentImpl envImpl, + long waitTime, + String name) { + super(waitTime, name, envImpl); + logSizeBytesInterval = + envImpl.getConfigManager().getLong + (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL); + logFileMax = + envImpl.getConfigManager().getLong(EnvironmentParams.LOG_FILE_MAX); + timeInterval = waitTime; + lastCheckpointMillis = 0; + + nCheckpoints = 0; + flushStats = new FlushStats(); + + checkpointDirtyMap = new DirtyINMap(envImpl); + + /* Initialize mutable properties and register for notifications. */ + envConfigUpdate(envImpl.getConfigManager(), null); + envImpl.addConfigObserver(this); + } + + /** + * Process notifications of mutable property changes. + */ + @Override + public void envConfigUpdate(DbConfigManager cm, + EnvironmentMutableConfig ignore) { + highPriority = cm.getBoolean + (EnvironmentParams.CHECKPOINTER_HIGH_PRIORITY); + } + + /** + * Initializes the checkpoint intervals when no checkpoint is performed + * while opening the environment. + */ + void initIntervals(long lastCheckpointStart, + long lastCheckpointEnd, + long lastCheckpointMillis) { + this.lastCheckpointStart = lastCheckpointStart; + this.lastCheckpointEnd = lastCheckpointEnd; + this.lastCheckpointMillis = lastCheckpointMillis; + } + + /** + * Coordinates an eviction with an in-progress checkpoint and returns + * whether provisional logging is needed. + * + * @return the provisional status to use for logging the target. + */ + public Provisional coordinateEvictionWithCheckpoint( + final DatabaseImpl db, + final int targetLevel, + final IN parent) { + + return checkpointDirtyMap. + coordinateEvictionWithCheckpoint(db, targetLevel, parent); + } + + /** + * Coordinates a split with an in-progress checkpoint. + * + * @param newSibling the sibling IN created by the split. + */ + public void coordinateSplitWithCheckpoint(final IN newSibling) { + checkpointDirtyMap.coordinateSplitWithCheckpoint(newSibling); + } + + /** + * Figure out the wakeup period. Supplied through this static method + * because we need to pass wakeup period to the superclass and need to do + * the calcuation outside this constructor. + * + * @throws IllegalArgumentException via Environment ctor and + * setMutableConfig. + */ + public static long getWakeupPeriod(DbConfigManager configManager) + throws IllegalArgumentException { + + long wakeupPeriod = configManager.getDuration + (EnvironmentParams.CHECKPOINTER_WAKEUP_INTERVAL); + long bytePeriod = configManager.getLong + (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL); + + /* Checkpointing period must be set either by time or by log size. */ + if ((wakeupPeriod == 0) && (bytePeriod == 0)) { + throw new IllegalArgumentException + (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName() + + " and " + + EnvironmentParams.CHECKPOINTER_WAKEUP_INTERVAL.getName() + + " cannot both be 0. "); + } + + /* + * Checkpointing by log size takes precendence over time based period. + */ + if (bytePeriod == 0) { + return wakeupPeriod; + } else { + return 0; + } + } + + /** + * Set checkpoint id -- can only be done after recovery. + */ + synchronized void setCheckpointId(long lastCheckpointId) { + checkpointId = lastCheckpointId; + } + + /** + * Load stats. + */ + @SuppressWarnings("unused") + public StatGroup loadStats(StatsConfig config) { + StatGroup stats = new StatGroup(GROUP_NAME, GROUP_DESC); + new LongStat(stats, CKPT_LAST_CKPTID, checkpointId); + new LongStat(stats, CKPT_CHECKPOINTS, nCheckpoints); + new LongStat(stats, CKPT_LAST_CKPT_INTERVAL, lastCheckpointInterval); + new LSNStat(stats, CKPT_LAST_CKPT_START, lastCheckpointStart); + new LSNStat(stats, CKPT_LAST_CKPT_END, lastCheckpointEnd); + new LongStat(stats, CKPT_FULL_IN_FLUSH, flushStats.nFullINFlush); + new LongStat(stats, CKPT_FULL_BIN_FLUSH, flushStats.nFullBINFlush); + new LongStat(stats, CKPT_DELTA_IN_FLUSH, flushStats.nDeltaINFlush); + + if (config.getClear()) { + nCheckpoints = 0; + flushStats.nFullINFlush = 0; + flushStats.nFullBINFlush = 0; + flushStats.nDeltaINFlush = 0; + } + + return stats; + } + + /** + * Return the number of retries when a deadlock exception occurs. + */ + @Override + protected long nDeadlockRetries() { + return envImpl.getConfigManager().getInt + (EnvironmentParams.CHECKPOINTER_RETRY); + } + + /** + * Called whenever the DaemonThread wakes up from a sleep. + */ + @Override + protected void onWakeup() { + + if (envImpl.isClosing()) { + return; + } + + doCheckpoint( + CheckpointConfig.DEFAULT, "daemon", true /*invokedFromDaemon*/); + + wakeupAfterNoWrites = false; + } + + /** + * Wakes up the checkpointer if a checkpoint log interval is configured and + * the number of bytes written since the last checkpoint exceeds the size + * of the interval. + */ + public void wakeupAfterWrite() { + + if ((logSizeBytesInterval != 0) && !isRunning()) { + + long nextLsn = envImpl.getFileManager().getNextLsn(); + + if (DbLsn.getNoCleaningDistance( + nextLsn, lastCheckpointStart, logFileMax) >= + logSizeBytesInterval) { + + wakeup(); + } + } + } + + /** + * Wakes up the checkpointer if a checkpoint is needed to reclaim disk + * space for already cleaned files. This method is called after an idle + * period with no writes. + */ + public void wakeupAfterNoWrites() { + + if (!isRunning() && needCheckpointForCleanedFiles()) { + wakeupAfterNoWrites = true; + wakeup(); + } + } + + private boolean needCheckpointForCleanedFiles() { + return envImpl.getCleaner().getFileSelector().isCheckpointNeeded(); + } + + /** + * Determine whether a checkpoint should be run. + */ + private boolean isRunnable(CheckpointConfig config) { + /* Figure out if we're using log size or time to determine interval.*/ + long useBytesInterval = 0; + long useTimeInterval = 0; + long nextLsn = DbLsn.NULL_LSN; + boolean runnable = false; + try { + if (config.getForce()) { + runnable = true; + return true; + } + + if (wakeupAfterNoWrites && needCheckpointForCleanedFiles()) { + runnable = true; + return true; + } + + if (config.getKBytes() != 0) { + useBytesInterval = config.getKBytes() << 10; + + } else if (config.getMinutes() != 0) { + /* Convert to millis. */ + useTimeInterval = config.getMinutes() * 60 * 1000; + + } else if (logSizeBytesInterval != 0) { + useBytesInterval = logSizeBytesInterval; + + } else { + useTimeInterval = timeInterval; + } + + /* + * If our checkpoint interval is defined by log size, check on how + * much log has grown since the last checkpoint. + */ + if (useBytesInterval != 0) { + nextLsn = envImpl.getFileManager().getNextLsn(); + + if (DbLsn.getNoCleaningDistance( + nextLsn, lastCheckpointStart, logFileMax) >= + useBytesInterval) { + + runnable = true; + } + + } else if (useTimeInterval != 0) { + + /* + * Our checkpoint is determined by time. If enough time has + * passed and some log data has been written, do a checkpoint. + */ + final long lastUsedLsn = + envImpl.getFileManager().getLastUsedLsn(); + + if (((System.currentTimeMillis() - lastCheckpointMillis) >= + useTimeInterval) && + (DbLsn.compareTo(lastUsedLsn, lastCheckpointEnd) != 0)) { + + runnable = true; + } + } + return runnable; + + } finally { + if (logger.isLoggable(Level.FINEST)) { + final StringBuilder sb = new StringBuilder(); + sb.append("size interval=").append(useBytesInterval); + if (nextLsn != DbLsn.NULL_LSN) { + sb.append(" nextLsn="). + append(DbLsn.getNoFormatString(nextLsn)); + } + if (lastCheckpointEnd != DbLsn.NULL_LSN) { + sb.append(" lastCkpt="); + sb.append(DbLsn.getNoFormatString(lastCheckpointEnd)); + } + sb.append(" time interval=").append(useTimeInterval); + sb.append(" force=").append(config.getForce()); + sb.append(" runnable=").append(runnable); + + LoggerUtils.finest(logger, envImpl, sb.toString()); + } + } + } + + /** + * The real work to do a checkpoint. This may be called by the checkpoint + * thread when waking up, or it may be invoked programatically through the + * api. + * + * @param invokingSource a debug aid, to indicate who invoked this + * checkpoint. (i.e. recovery, the checkpointer daemon, the cleaner, + * programatically) + */ + public synchronized void doCheckpoint(CheckpointConfig config, + String invokingSource, + boolean invokedFromDaemon) { + if (envImpl.isReadOnly()) { + return; + } + + if (!isRunnable(config)) { + return; + } + + /* Stop if we cannot write because of a disk limit violation. */ + try { + envImpl.checkDiskLimitViolation(); + } catch (DiskLimitException e) { + if (!invokedFromDaemon) { + throw e; + } + return; + } + + /* + * If minimizing recovery time is desired, then flush all the way to + * the top of the dbtree instead of stopping at the highest level last + * modified, so that only the root INs are processed by recovery. + */ + final boolean flushAll = config.getMinimizeRecoveryTime(); + + /* + * If there are cleaned files to be deleted, flush an extra level to + * write out the parents of cleaned nodes. This ensures that no node + * will contain the LSN of a cleaned file. + * + * Note that we don't currently distinguish between files in the + * CLEANED and FULLY_PROCESSED states. For a FULLY_PROCESSED file, a + * pending LN may have been processed since the prior checkpoint. + * However, the BIN containing the LSN of the LN is guaranteed to be + * logged, so there is no need to increment maxFlushLevel. So we could + * optimize in the future and only set flushExtraLevel when some files + * are CLEANED (i.e., do not set flushExtraLevel when all files are + * FULLY_PROCESSED or cleanerState.isEmpty()). + */ + final Cleaner cleaner = envImpl.getCleaner(); + + final CheckpointStartCleanerState cleanerState = + cleaner.getFilesAtCheckpointStart(); + + final boolean flushExtraLevel = !cleanerState.isEmpty(); + + lastCheckpointMillis = System.currentTimeMillis(); + flushStats.resetPerRunCounters(); + + /* Get the next checkpoint id. */ + checkpointId++; + nCheckpoints++; + + boolean success = false; + boolean traced = false; + + final LogManager logManager = envImpl.getLogManager(); + + /* + * Set the checkpoint state so that concurrent eviction can be + * coordinated. + */ + checkpointDirtyMap.beginCheckpoint(flushAll, flushExtraLevel); + + try { + /* Log the checkpoint start. */ + final SingleItemEntry startEntry = + SingleItemEntry.create( + LogEntryType.LOG_CKPT_START, + new CheckpointStart(checkpointId, invokingSource)); + + final long checkpointStart = + logManager.log(startEntry, ReplicationContext.NO_REPLICATE); + + /* + * Note the first active LSN point. The definition of + * firstActiveLsn is that all log entries for active transactions + * are equal to or after that LSN. This is the starting point for + * replaying LNs during recovery and will be stored in the CkptEnd + * entry. + * + * Use the checkpointStart as the firstActiveLsn if firstActiveLsn + * is null, meaning that no txns are active. + * + * The current value must be retrieved from TxnManager after + * logging CkptStart. If it were instead retrieved before logging + * CkptStart, the following failure could occur. [#20270] + * + * ... getFirstActiveLsn returns NULL_LSN, will use 200 CkptStart + * 100 LN-A in Txn-1 + * 200 CkptStart + * 300 BIN-B refers to 100 LN-A + * 400 CkptEnd + * ... Crash and recover. Recovery does not undo 100 LN-A. + * ... Txn-1 is uncommitted, yet 100 LN-A takes effect. + */ + long firstActiveLsn = envImpl.getTxnManager().getFirstActiveLsn(); + if (firstActiveLsn == DbLsn.NULL_LSN) { + firstActiveLsn = checkpointStart; + } + + /* + * In a replicated system, the checkpointer will be flushing out + * the VLSNIndex, which is HA metadata. Check that the in-memory + * version encompasses all metadata up to the point of the + * CheckpointStart record. This is no-op for non-replicated + * systems. [#19754] + */ + envImpl.awaitVLSNConsistency(); + + /* Find the set of dirty INs that must be logged. */ + checkpointDirtyMap.selectDirtyINsForCheckpoint(); + + /* Call hook after dirty map creation and before flushing. */ + TestHookExecute.doHookIfSet(beforeFlushHook); + + /* Flush IN nodes. */ + flushDirtyNodes( + envImpl, checkpointDirtyMap, checkpointStart, highPriority, + flushStats); + + if (DirtyINMap.DIRTY_SET_DEBUG_TRACE) { + LoggerUtils.logMsg( + envImpl.getLogger(), envImpl, Level.INFO, + "Ckpt flushed" + + " nFullINFlushThisRun = " + + flushStats.nFullINFlushThisRun + + " nFullBINFlushThisRun = " + + flushStats.nFullBINFlushThisRun + + " nDeltaINFlushThisRun = " + + flushStats.nDeltaINFlushThisRun); + + } + + /* + * Flush MapLNs if not already done by flushDirtyNodes. Only flush + * a database if it has not already been flushed since checkpoint + * start. Lastly, flush the DB mapping tree root. + */ + checkpointDirtyMap.flushMapLNs(checkpointStart); + checkpointDirtyMap.flushRoot(checkpointStart); + + /* + * Flush replication information if necessary so that the VLSNIndex + * cache is flushed and is recoverable. + */ + envImpl.preCheckpointEndFlush(); + + /* + * Flush utilization info AFTER flushing IN nodes to reduce the + * inaccuracies caused by the sequence FileSummaryLN-LN-BIN. + */ + envImpl.getUtilizationProfile().flushFileUtilization + (envImpl.getUtilizationTracker().getTrackedFiles()); + + final DbTree dbTree = envImpl.getDbTree(); + final boolean willDeleteFiles = !cleanerState.isEmpty(); + + final CheckpointEnd ckptEnd = new CheckpointEnd( + invokingSource, checkpointStart, envImpl.getRootLsn(), + firstActiveLsn, + envImpl.getNodeSequence().getLastLocalNodeId(), + envImpl.getNodeSequence().getLastReplicatedNodeId(), + dbTree.getLastLocalDbId(), dbTree.getLastReplicatedDbId(), + envImpl.getTxnManager().getLastLocalTxnId(), + envImpl.getTxnManager().getLastReplicatedTxnId(), + checkpointId, willDeleteFiles); + + final SingleItemEntry endEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_END, ckptEnd); + + /* + * Log checkpoint end and update state kept about the last + * checkpoint location. Send a trace message *before* the + * checkpoint end log entry. This is done so that the normal trace + * message doesn't affect the time-based isRunnable() calculation, + * which only issues a checkpoint if a log record has been written + * since the last checkpoint. + */ + trace(envImpl, invokingSource, true); + traced = true; + + lastCheckpointInterval = DbLsn.getNoCleaningDistance( + checkpointStart, lastCheckpointStart, logFileMax); + + /* + * We must flush and fsync to ensure that cleaned files are not + * referenced. This also ensures that this checkpoint is not wasted + * if we crash. + */ + lastCheckpointEnd = logManager.logForceFlush( + endEntry, true /*fsyncRequired*/, + ReplicationContext.NO_REPLICATE); + + lastCheckpointStart = checkpointStart; + + success = true; + cleaner.updateFilesAtCheckpointEnd(cleanerState); + + } catch (DiskLimitException e) { + + LoggerUtils.logMsg( + envImpl.getLogger(), envImpl, Level.WARNING, + "Ckpt id=" + checkpointId + " success=" + success + + " aborted because of disk limit violation: " + e); + + if (!invokedFromDaemon) { + throw e; + } + + } catch (DatabaseException e) { + LoggerUtils.traceAndLogException(envImpl, "Checkpointer", + "doCheckpoint", "checkpointId=" + + checkpointId, e); + throw e; + } finally { + + /* + * Reset the checkpoint state so evictor activity knows there's no + * further requirement for provisional logging. SR 11163. + */ + checkpointDirtyMap.reset(); + + if (!traced) { + trace(envImpl, invokingSource, success); + } + } + } + + private void trace(EnvironmentImpl envImpl, + String invokingSource, + boolean success ) { + + final StringBuilder sb = new StringBuilder(); + sb.append("Checkpoint ").append(checkpointId); + sb.append(": source=" ).append(invokingSource); + sb.append(" success=").append(success); + sb.append(" nFullINFlushThisRun="); + sb.append(flushStats.nFullINFlushThisRun); + sb.append(" nDeltaINFlushThisRun="); + sb.append(flushStats.nDeltaINFlushThisRun); + LoggerUtils.logMsg(logger, envImpl, Level.CONFIG, sb.toString()); + } + + /** + * Flush a given database to disk. Like checkpoint, log from the bottom + * up so that parents properly represent their children. + */ + public void syncDatabase(EnvironmentImpl envImpl, + DatabaseImpl dbImpl, + boolean flushLog) { + if (envImpl.isReadOnly()) { + return; + } + + envImpl.checkDiskLimitViolation(); + + final DirtyINMap dirtyMap = new DirtyINMap(envImpl); + final FlushStats fstats = new FlushStats(); + + try { + /* Find the dirty set. */ + dirtyMap.selectDirtyINsForDbSync(dbImpl); + + if (dirtyMap.getNumEntries() > 0) { + /* Write all dirtyINs out.*/ + flushDirtyNodes( + envImpl, dirtyMap, DbLsn.NULL_LSN /*ckptStart*/, + false /*highPriority*/, fstats); + + /* Make changes durable. [#15254] */ + if (flushLog) { + envImpl.getLogManager().flushSync(); + } + } + } catch (DiskLimitException e) { + throw e; + } catch (DatabaseException e) { + LoggerUtils.traceAndLogException + (envImpl, "Checkpointer", "syncDatabase", + "of " + dbImpl.getDebugName(), e); + throw e; + } finally { + dirtyMap.reset(); + } + } + + /* For unit testing only. */ + public static void setMaxFlushLevelHook(TestHook hook) { + maxFlushLevelHook = hook; + } + + /* For unit testing only. */ + public static void setBeforeFlushHook(TestHook hook) { + beforeFlushHook = hook; + } + + /** + * Flush the nodes in order, from the lowest level to highest level. As a + * flush dirties its parent, add it to the dirty map, thereby cascading the + * writes up the tree. If flushAll wasn't specified, we need only cascade + * up to the highest level set at the start of checkpointing. + * + * Note that all but the top level INs are logged provisionally. That's + * because we don't need to process lower INs during recovery because the + * higher INs will end up pointing at them. + */ + private static void flushDirtyNodes(EnvironmentImpl envImpl, + DirtyINMap dirtyMap, + long checkpointStart, + boolean highPriority, + FlushStats fstats) { + + final DbTree dbTree = envImpl.getDbTree(); + final Map dbCache = new HashMap<>(); + + try { + while (dirtyMap.getNumLevels() > 0) { + + /* + * Work on one level's worth of nodes in ascending level order. + */ + final Integer currentLevel = dirtyMap.getLowestLevelSet(); + final int currentLevelVal = currentLevel; + + /* + * Flush MapLNs just prior to flushing the first level of the + * mapping tree. Only flush a database if it has not already + * been flushed since checkpoint start. + */ + if (currentLevelVal == IN.DBMAP_LEVEL) { + dirtyMap.flushMapLNs(checkpointStart); + } + + /* Flush the nodes at the current level. */ + while (true) { + final CheckpointReference targetRef = + dirtyMap.removeNextNode(currentLevel); + + if (targetRef == null) { + break; + } + + envImpl.checkDiskLimitViolation(); + + /* + * Check to make sure the DB was not deleted after putting + * it in the dirty map, and prevent the DB from being + * deleted while we're working with it. + */ + final DatabaseImpl db = dbTree.getDb( + targetRef.dbId, -1 /*lockTimeout*/, dbCache); + + if (db != null && !db.isDeleted()) { + + /* Flush if we're below maxFlushLevel. */ + final int maxFlushLevel = + dirtyMap.getHighestFlushLevel(db); + + if (currentLevelVal <= maxFlushLevel) { + + flushIN( + db, targetRef, dirtyMap, maxFlushLevel, + highPriority, fstats, true /*allowLogSubtree*/); + + /* + * Sleep if background read/write limit was + * exceeded. + */ + envImpl.sleepAfterBackgroundIO(); + } + } + + /* + * If the environment was invalidated by other activity, + * get out of this loop, and re-throw the invalidating + * exception to indicate that the checkpoint did not + * succeed. + */ + envImpl.checkIfInvalid(); + } + + /* We're done with this level. */ + dirtyMap.removeLevel(currentLevel); + } + } finally { + dbTree.releaseDbs(dbCache); + } + + /* + * Do not flush FileSummaryLNs/MapLNs (do not call + * UtilizationProfile.flushLocalTracker) here because that flushing is + * already done by the checkpoint. + */ + } + + /** + * Flush the target IN. + * + * Where applicable, also attempt to flush the subtree that houses this + * target, which means we flush the siblings of this target to promote + * better cleaning throughput. The problem lies in the fact that + * provisionally logged nodes are not available for log cleaning until + * their parent is logged non-provisionally. On the other hand, we want to + * log nodes in provisional mode as much as possible, both for recovery + * performance, and for correctness to avoid fetches against cleaned log + * files. (See [#16037].) These conflicting goals are reconciled by + * flushing nodes in subtree grouping, because writing the non-provisional + * parent of a set of provisionally written nodes frees the cleaner to work + * on that set of provisional nodes as soon as possible. For example, if a + * tree consists of: + * + * INa + * +------+-------+ + * INb INc + * +-----+----+ +-----+ + * BINd BINe BINf BINg BINh + * + * It is more efficient for cleaning throughput to log in this order: + * BINd, BINe, BINf, INb, BINg, BINh, INc, INa + * rather than: + * BINd, BINe, BINf, BINg, BINh, INb, INc, INa + * + * Suppose the subtree in question is INb->{BINd, BINe, BINf} + * + * Suppose we see BINd in the dirty map first, before BINe and BINf. + * - flushIN(BINd) is called + * - we fetch and latch its parent, INb + * + * If this is a high priority checkpoint, we'll hold the INb latch across + * the time it takes to flush all three children. In flushIN(BINd), we + * walk through INb, create a local map of all the siblings that can be + * found in the dirty map, and then call logSiblings with that local map. + * Then we'll write out INb. + * + * If high priority is false, we will not hold the INb latch across + * multiple IOs. Instead, we + * - write BINd out, using logSiblings + * - while still holding the INb latch, we create a list of dirty siblings + * - release the INb latch + * - call flushIN() recursively on each entry in the local sibling map, + * which will result in a search and write of each sibling. These + * recursive calls to flushIN are called with the allowLogSubtree + * parameter of false to halt the recursion and prevent a repeat of the + * sibling examination. + * - write INb + */ + private static void flushIN(final DatabaseImpl db, + final CheckpointReference targetRef, + final DirtyINMap dirtyMap, + final int maxFlushLevel, + final boolean highPriority, + final FlushStats fstats, + final boolean allowLogSubtree) { + + final EnvironmentImpl envImpl = db.getEnv(); + final Tree tree = db.getTree(); + final int targetLevel = targetRef.nodeLevel; + + /* Call test hook when we reach the max level. */ + assert (targetLevel < maxFlushLevel) || + TestHookExecute.doHookIfSet(maxFlushLevelHook); + + if (targetRef.isRoot) { + + final RootFlusher flusher = + new RootFlusher(db, targetRef.nodeId); + + tree.withRootLatchedExclusive(flusher); + + /* + * Update the tree's owner, whether it's the env root or the + * db-mapping tree. + */ + if (flusher.getFlushed()) { + DbTree dbTree = envImpl.getDbTree(); + dbTree.modifyDbRoot(db); + fstats.nFullINFlushThisRun++; + fstats.nFullINFlush++; + } + + /* + * If this target isn't the root anymore, we'll have to handle it + * like a regular node. + */ + if (flusher.stillRoot()) { + return; + } + } + + /* + * The following applies to two cases: + * (1) the target was not ever the root + * (2) the target was the root, when the checkpoint dirty set was + * assembled but is not the root now. + */ + final SearchResult result = tree.getParentINForChildIN( + -1 /*nodeId*/, targetRef.treeKey, + targetRef.nodeLevel /*targetLevel*/, + targetRef.nodeLevel + 1 /*exclusiveLevel*/, + false /*requireExactMatch*/, false /*doFetch*/, + CacheMode.UNCHANGED, null /*trackingList*/); + + /* + * If no possible parent is found, the compressor may have deleted + * this item before we got to processing it. (Although it seems this + * cannot currently happen since we never delete the root node.) + */ + if (result.parent == null) { + return; + } + + final IN parent = result.parent; + final int index = result.index; + final int parentLevel = parent.getLevel(); + final CheckpointReference parentRef; + + /* List of siblings to log after releasing the parent latch. */ + final List logSiblingsSeparately; + + try { + /* + * If bottomLevelTarget is true, the parent IN contains bottom + * level BINs. The masking is used to normalize the level for + * ordinary DBs and the mapping tree DB. + */ + final boolean bottomLevelTarget = + ((parentLevel & IN.LEVEL_MASK) == 2); + + /* + * INs at the max flush level are always non-provisional and + * INs at the bottom level (when this is not also the max flush + * level) are always provisional. In between INs are + * provisional BEFORE_CKPT_END (see Provisional). + */ + final Provisional provisional; + if (targetLevel >= maxFlushLevel) { + provisional = Provisional.NO; + } else if (bottomLevelTarget) { + provisional = Provisional.YES; + } else { + provisional = Provisional.BEFORE_CKPT_END; + } + + /* + * If we didn't reach the target level, a child wasn't resident + * and there is nothing to log at this level. To be on the safe + * side, we'll put the parent into the dirty set to be logged when + * that level is processed. + * + * Only do this if the parent we found is at a higher level than + * the child. This ensures that the non-exact search does not + * find a sibling rather than a parent. [#11555] + */ + if (!result.exactParentFound) { + if (parentLevel > targetLevel) { + dirtyMap.addIN( + parent, -1 /*index*/, + false /*updateFlushLevels*/, + true /*updateMemoryBudget*/); + } + return; + } + + /* + * We found the parent. Add it unconditionally to the dirty map. We + * must make sure that every IN that was selected for the + * checkpointer's dirty IN set at the beginning of checkpoint is + * written into the log and can be properly accessed from + * ancestors. Eviction or a split may have written out a member of + * this dirty set before the checkpointer got to it. See [#10249]. + */ + assert parentLevel == targetLevel + 1; + + dirtyMap.addIN( + parent, -1 /*index*/, + false /*updateFlushLevels*/, + true /*updateMemoryBudget*/); + + /* + * Determine whether our search found the IN identified by either + * targetRef.nodeId or targetRef.lsn. If there is not a match, then + * the node was deleted, logged or split since creating the + * reference. + * + * For a non-DW DB, targetRef.lsn will be not null and we match on + * it. If the LSN has changed then of course the node was logged, + * and possibly split, and we will not log this target here. + * + * For a DW DB we also match on LSN if it is non-null. If the LSN + * is null then the reference was created for a never-logged IN and + * targetRef.nodeId >= 0. In that case we match on the nodeId. If + * the LSN or nodeId doesn't match, there must have been a split, + * and we will not log this target here. However, because splits + * are not logged for DW, this is not sufficient to cause both + * siblings that were part of split to be logged, when one node was + * added to the dirty map. We account for this when the parent is + * logged by calling logDirtyChildren. This approach relies on the + * fact that a split will dirty the parent. + * + * TODO: + * Why not always call logDirtyIN for a DW IN, whether or not the + * LSN or nodeId matches? logDirtyChildren is going to log it + * anyway if it is dirty. + */ + if (targetRef.lsn != DbLsn.NULL_LSN) { + + if (targetRef.lsn != parent.getLsn(index)) { + return; + } + } else { + assert targetRef.nodeId >= 0; + assert db.isDeferredWriteMode(); + + final IN target = (IN) parent.getTarget(index); + + if (target == null || + targetRef.nodeId != target.getNodeId()) { + return; + } + } + + /* Log the target, if dirty. */ + logDirtyIN(envImpl, parent, index, provisional, fstats); + + /* + * We will log a sub-tree when the target is at the bottom level + * and this is not a recursive call to flushIN during sub-tree + * logging. Return if we are only logging the target node here. + */ + if (!bottomLevelTarget || !allowLogSubtree) { + return; + } + + /* + * Log sub-tree siblings with the latch held when highPriority + * is configured and this is not a DW DB. For a DW DB, dirty LNs + * are logged for each BIN. If we were to log a DW sub-tree with + * the parent latch held, the amount of logging may cause the latch + * to be held for too long a period. + */ + if (highPriority && !db.isDurableDeferredWrite()) { + logSiblingsSeparately = null; + } else { + logSiblingsSeparately = new ArrayList<>(); + } + + for (int i = 0; i < parent.getNEntries(); i += 1) { + + if (i == index) { + continue; + } + + final IN child = (IN) parent.getTarget(i); + final long childId = (child != null) ? child.getNodeId() : -1; + final long childLsn = parent.getLsn(i); + + final CheckpointReference childRef = + dirtyMap.removeNode(targetLevel, childLsn, childId); + + if (childRef == null) { + continue; + } + + if (logSiblingsSeparately != null) { + logSiblingsSeparately.add(childRef); + } else { + logDirtyIN(envImpl, parent, i, provisional, fstats); + } + } + + /* Get parentRef before releasing the latch. */ + if (parentLevel <= maxFlushLevel) { + parentRef = dirtyMap.removeNode( + parentLevel, parent.getLastLoggedLsn(), + parent.getNodeId()); + } else { + parentRef = null; + } + } finally { + parent.releaseLatch(); + } + + /* + * If highPriority is false, we don't hold the latch while logging + * the bottom level siblings. We log them here with flushIN, + * performing a separate search for each one, after releasing the + * parent latch above. + */ + if (logSiblingsSeparately != null) { + for (final CheckpointReference childRef : logSiblingsSeparately) { + flushIN( + db, childRef, dirtyMap, maxFlushLevel, highPriority, + fstats, false /*allowLogSubtree*/); + } + } + + /* + * Log the sub-tree parent, which will be logged non-provisionally, + * in order to update cleaner utilization. This must be done with + * flushIN after releasing the parent latch above, since we must search + * and acquire the grandparent latch. + */ + if (parentRef != null) { + flushIN( + db, parentRef, dirtyMap, maxFlushLevel, highPriority, fstats, + false /*allowLogSubtree*/); + } + } + + /** + * Note that if this method is called, the parent must also be logged. This + * is true even if this method finds that the child is not dirty. In that + * case the child has already been flushed (e.g., by eviction) and the + * parent must be logged according to the rule for max flush level. + */ + private static void logDirtyIN( + final EnvironmentImpl envImpl, + final IN parent, + final int index, + final Provisional provisional, + final FlushStats fstats) { + + final IN child = (IN) parent.getTarget(index); + final long newLsn; + final boolean isBIN; + final boolean isDelta; + + if (child != null) { + child.latch(CacheMode.UNCHANGED); + try { + if (!child.getDirty()) { + return; + } + + if (child.getDatabase().isDurableDeferredWrite()) { + + /* + * Find dirty descendants to avoid logging nodes with + * never-logged children. See [#13936] and + * IN.logDirtyChildren for description of the case. + * + * Note that we must log both dirty and never-logged + * descendants to be sure to have a consistent view of + * the split. If we didn't, we could end up with the + * post-split version of a new sibling and the + * pre-split version of an split sibling in the log, + * which could result in a recovery where descendants + * are incorrectly duplicated, because they are in both + * the pre-split split sibling, and the post-split + * version of the new sibling. + */ + child.logDirtyChildren(); + } + + newLsn = child.log( + true /*allowDeltas*/, provisional, + true /*backgroundIO*/, parent); + + assert (newLsn != DbLsn.NULL_LSN); + + isBIN = child.isBIN(); + isDelta = (newLsn == child.getLastDeltaLsn()); + } finally { + child.releaseLatch(); + } + } else { + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + final INLogEntry logEntry = + ohCache.createBINLogEntryForCheckpoint(parent, index); + + if (logEntry == null) { + return; + } + + isBIN = true; + isDelta = logEntry.isBINDelta(); + + newLsn = IN.logEntry( + logEntry, provisional, true /*backgroundIO*/, parent); + + ohCache.postBINLog(parent, index, logEntry, newLsn); + } + + parent.updateEntry(index, newLsn, VLSN.NULL_VLSN_SEQUENCE, 0); + + if (isDelta) { + fstats.nDeltaINFlushThisRun++; + fstats.nDeltaINFlush++; + } else { + fstats.nFullINFlushThisRun++; + fstats.nFullINFlush++; + if (isBIN) { + fstats.nFullBINFlush++; + fstats.nFullBINFlushThisRun++; + } + } + } + + /* + * RootFlusher lets us write out the root IN within the root latch. + */ + private static class RootFlusher implements WithRootLatched { + private final DatabaseImpl db; + private boolean flushed; + private boolean stillRoot; + private final long targetNodeId; + + RootFlusher(final DatabaseImpl db, + final long targetNodeId) { + this.db = db; + flushed = false; + this.targetNodeId = targetNodeId; + stillRoot = false; + } + + /** + * Flush the rootIN if dirty. + */ + @Override + public IN doWork(ChildReference root) { + + if (root == null) { + return null; + } + + IN rootIN = (IN) root.fetchTarget(db, null); + rootIN.latch(CacheMode.UNCHANGED); + try { + if (rootIN.getNodeId() == targetNodeId) { + + /* + * Find dirty descendants to avoid logging nodes with + * never-logged children. See [#13936] + */ + if (rootIN.getDatabase().isDurableDeferredWrite()) { + rootIN.logDirtyChildren(); + } + + /* + * stillRoot handles the situation where the root was split + * after it was placed in the checkpointer's dirty set. + */ + stillRoot = true; + + if (rootIN.getDirty()) { + long newLsn = rootIN.log(); + root.setLsn(newLsn); + flushed = true; + } + } + } finally { + rootIN.releaseLatch(); + } + return null; + } + + boolean getFlushed() { + return flushed; + } + + boolean stillRoot() { + return stillRoot; + } + } + + /* + * CheckpointReferences are used to identify nodes that must be flushed as + * part of the checkpoint. We don't keep an actual reference to the node + * because that prevents nodes from being GC'ed during checkpoint. + * + * Using a checkpointReference introduces a window between the point when + * the checkpoint dirty set is created and when the node is flushed. Some + * of the fields saved in the reference are immutable: db, nodeId. The + * others are not and we have to handle potential change: + * + * isRoot: it's possible for isRoot to go from true->false, but not + * false->true. True->false is handled by the flushIN method + * by finding the root and checking if it is the target. + * treeKey: This can change only in the event of a split. If it does, there + * is the chance that the checkpointer will find the wrong node to + * flush, but that's okay because the split guarantees flushing to + * the root, so the target will be properly logged within the + * checkpoint period. + * + * The class and ctor are public for the Sizeof program. + */ + public static class CheckpointReference { + final DatabaseId dbId; + final long nodeId; + final int nodeLevel; + final boolean isRoot; + final byte[] treeKey; + final long lsn; + + CheckpointReference(final DatabaseId dbId, + final long nodeId, + final int nodeLevel, + final boolean isRoot, + final byte[] treeKey, + final long lsn) { + this.dbId = dbId; + this.nodeId = nodeId; + this.nodeLevel = nodeLevel; + this.isRoot = isRoot; + this.treeKey = treeKey; + this.lsn = lsn; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CheckpointReference)) { + return false; + } + + CheckpointReference other = (CheckpointReference) o; + return nodeId == other.nodeId; + } + + @Override + public int hashCode() { + return (int) nodeId; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("db=").append(dbId); + sb.append(" nodeId=").append(nodeId); + return sb.toString(); + } + } + + /** + * A struct to hold log flushing stats for checkpoint and database sync. + */ + public static class FlushStats { + + public long nFullINFlush; + public long nFullBINFlush; + public long nDeltaINFlush; + long nFullINFlushThisRun; + long nFullBINFlushThisRun; + long nDeltaINFlushThisRun; + + /* For future addition to stats: + private int nAlreadyEvictedThisRun; + */ + + /* Reset per-run counters. */ + void resetPerRunCounters() { + nFullINFlushThisRun = 0; + nFullBINFlushThisRun = 0; + nDeltaINFlushThisRun = 0; + /* nAlreadyEvictedThisRun = 0; -- for future */ + } + } +} diff --git a/src/com/sleepycat/je/recovery/DirtyINMap.java b/src/com/sleepycat/je/recovery/DirtyINMap.java new file mode 100644 index 0000000..eb1c4fb --- /dev/null +++ b/src/com/sleepycat/je/recovery/DirtyINMap.java @@ -0,0 +1,836 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.logging.Level; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.recovery.Checkpointer.CheckpointReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * Manages the by-level map of checkpoint references that are to be flushed by + * a checkpoint or Database.sync, the MapLNs to be flushed, the highest level + * by database to be flushed, and the state of the checkpoint. + * + * An single instance of this class is used for checkpoints and has the same + * lifetime as the checkpointer and environment. An instance per Database.sync + * is created as needed. Only one checkpoint can occur at a time, but multiple + * syncs may occur concurrently with each other and with the checkpoint. + * + * The methods in this class are synchronized to protect internal state from + * concurrent access by the checkpointer and eviction, and to coordinate state + * changes between the two. Eviction must participate in the checkpoint so + * that INs cascade up properly; see coordinateEvictionWithCheckpoint. + * + * When INs are latched along with synchronization on a DirtyINMap, the order + * must be: 1) IN latches and 2) synchronize on DirtyINMap. For example, + * the evictor latches the parent and child IN before calling the synchronized + * method coordinateEvictionWithCheckpoint, and selectDirtyINsForCheckpoint + * latches the IN before calling the synchronized method selectForCheckpoint. + */ +class DirtyINMap { + + static final boolean DIRTY_SET_DEBUG_TRACE = false; + + private final EnvironmentImpl envImpl; + private final SortedMap, + Map>> levelMap; + private int numEntries; + private final Set mapLNsToFlush; + private final Map highestFlushLevels; + + enum CkptState { + /** No checkpoint in progress, or is used for Database.sync. */ + NONE, + /** Checkpoint started but dirty map is not yet complete. */ + DIRTY_MAP_INCOMPLETE, + /** Checkpoint in progress and dirty map is complete. */ + DIRTY_MAP_COMPLETE, + }; + + private CkptState ckptState; + private boolean ckptFlushAll; + private boolean ckptFlushExtraLevel; + + DirtyINMap(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + levelMap = new TreeMap<>(); + numEntries = 0; + mapLNsToFlush = new HashSet(); + highestFlushLevels = new IdentityHashMap(); + ckptState = CkptState.NONE; + } + + /** + * Coordinates an eviction with an in-progress checkpoint and returns + * whether or not provisional logging is needed. + * + * @return the provisional status to use for logging the target. + */ + synchronized Provisional coordinateEvictionWithCheckpoint( + final DatabaseImpl db, + final int targetLevel, + final IN parent) { + + /* + * If the checkpoint is in-progress and has not finished dirty map + * construction, we must add the parent to the dirty map. That way the + * dirtiness and logging will cascade up in the same way as if the + * target were not evicted, and instead were encountered during dirty + * map construction. We don't want the evictor's actions to introduce + * an IN in the log that has not cascaded up properly. + * + * Note that we add the parent even if it is not dirty here. It will + * become dirty after the target child is logged, but that hasn't + * happened yet. + * + * We do not add the parent if it is null, which is the case when the + * root is being evicted. + */ + if (ckptState == CkptState.DIRTY_MAP_INCOMPLETE && + parent != null) { + + /* Add latched parent IN to dirty map. */ + selectForCheckpoint(parent, -1 /*index*/); + + /* Save dirty/temp DBs for later. */ + saveMapLNsToFlush(parent); + } + + /* + * The evictor has to log provisionally in three cases: + * + * 1 - The eviction target is part of a deferred write database. + */ + if (db.isDeferredWriteMode()) { + return Provisional.YES; + } + + /* + * 2 - The checkpoint is in-progress and has not finished dirty map + * construction, and the target is not the root. The parent IN has + * been added to the dirty map, so we know the child IN is at a + * level below the max flush level. + */ + if (ckptState == CkptState.DIRTY_MAP_INCOMPLETE && + parent != null) { + return Provisional.YES; + } + + /* + * 3 - The checkpoint is in-progress and has finished dirty map + * construction, and is at a level above the eviction target. + */ + if (ckptState == CkptState.DIRTY_MAP_COMPLETE && + targetLevel < getHighestFlushLevel(db)) { + return Provisional.YES; + } + + /* Otherwise, log non-provisionally. */ + return Provisional.NO; + } + + /** + * Coordinates a split with an in-progress checkpoint. + * + * TODO: + * Is it necessary to perform MapLN flushing for nodes logged by a split + * (and not just the new sibling)? + * + * @param newSibling the sibling IN created by the split. + */ + void coordinateSplitWithCheckpoint(final IN newSibling) { + + assert newSibling.isLatchExclusiveOwner(); + + /* + * If the checkpoint is in-progress and has not finished dirty map + * construction, we must add the BIN children of the new sibling to the + * dirty map. The new sibling will be added to the INList but it may or + * may not be seen by the in-progress INList iteration, and we must + * ensure that its dirty BIN children are logged by the checkpoint. + * + * Note that we cannot synchronize on 'this' before calling + * selectDirtyBINChildrenForCheckpoint, since it latches BIN children. + * IN latching must come before synchronization on 'this'. Eventually + * after latching the IN, selectForCheckpoint is called , which is + * synchronized and checks for ckptState == DIRTY_MAP_INCOMPLETE. + */ + selectDirtyBINChildrenForCheckpoint(newSibling); + } + + /** + * Must be called before starting a checkpoint, and must not be called for + * Database.sync. Updates memory budget and sets checkpoint state. + */ + synchronized void beginCheckpoint(boolean flushAll, + boolean flushExtraLevel) { + assert levelMap.isEmpty(); + assert mapLNsToFlush.isEmpty(); + assert highestFlushLevels.isEmpty(); + assert numEntries == 0; + assert ckptState == CkptState.NONE; + ckptState = CkptState.DIRTY_MAP_INCOMPLETE; + ckptFlushAll = flushAll; + ckptFlushExtraLevel = flushExtraLevel; + } + + /** + * Must be called after a checkpoint or Database.sync is complete. Updates + * memory budget and clears checkpoint state. + */ + synchronized void reset() { + removeCostFromMemoryBudget(); + levelMap.clear(); + mapLNsToFlush.clear(); + highestFlushLevels.clear(); + numEntries = 0; + ckptState = CkptState.NONE; + } + + /** + * Scan the INList for all dirty INs, excluding temp DB INs. Save them in + * a tree-level ordered map for level ordered flushing. + * + * Take this opportunity to recalculate the memory budget tree usage. + * + * This method itself is not synchronized to allow concurrent eviction. + * Synchronization is performed on a per-IN basis to protect the data + * structures here, and eviction can occur in between INs. + */ + void selectDirtyINsForCheckpoint() { + assert ckptState == CkptState.DIRTY_MAP_INCOMPLETE; + + /* + * Opportunistically recalculate the INList memory budget while + * traversing the entire INList. + */ + final INList inMemINs = envImpl.getInMemoryINs(); + inMemINs.memRecalcBegin(); + + boolean completed = false; + try { + for (IN in : inMemINs) { + in.latchShared(CacheMode.UNCHANGED); + try { + if (!in.getInListResident()) { + continue; + } + + inMemINs.memRecalcIterate(in); + + /* Add dirty UIN to dirty map. */ + if (in.getDirty() && !in.isBIN()) { + selectForCheckpoint(in, -1 /*index*/); + } + + /* Add dirty level 2 children to dirty map. */ + selectDirtyBINChildrenForCheckpoint(in); + + /* Save dirty/temp DBs for later. */ + saveMapLNsToFlush(in); + } finally { + in.releaseLatch(); + } + + /* Call test hook after releasing latch. */ + TestHookExecute.doHookIfSet( + Checkpointer.examineINForCheckpointHook, in); + } + completed = true; + } finally { + inMemINs.memRecalcEnd(completed); + } + + /* + * Finish filling out the highestFlushLevels map. For each entry in + * highestFlushLevels that has a null level Integer value (set by + * selectForCheckpoint), we call DbTree.getHighestLevel and replace the + * null level. We must call DbTree.getHighestLevel, which latches the + * root, only when not synchronized, to avoid breaking the + * synchronization rules described in the class comment. This must be + * done in several steps to follow the sychronization rules, yet + * protect the highestFlushLevels using synchronization. + */ + final Map maxFlushDbs = new HashMap<>(); + + /* Copy entries with a null level. */ + synchronized (this) { + for (DatabaseImpl db : highestFlushLevels.keySet()) { + + if (highestFlushLevels.get(db) == null) { + maxFlushDbs.put(db, null); + } + } + } + + /* Call getHighestLevel without synchronization. */ + final DbTree dbTree = envImpl.getDbTree(); + + for (Map.Entry entry : maxFlushDbs.entrySet()) { + + entry.setValue(dbTree.getHighestLevel(entry.getKey())); + } + + /* Fill in levels in highestFlushLevels. */ + synchronized (this) { + + for (Map.Entry entry : + maxFlushDbs.entrySet()) { + + highestFlushLevels.put(entry.getKey(), entry.getValue()); + } + } + + /* Complete this phase of the checkpoint. */ + synchronized (this) { + addCostToMemoryBudget(); + ckptState = CkptState.DIRTY_MAP_COMPLETE; + } + + if (DIRTY_SET_DEBUG_TRACE) { + traceDirtySet(); + } + } + + /** + * Add the IN to the dirty map if dirty map construction is in progress and + * the IN is not in a temp DB. If added, the highest flush level map is + * also updated. + */ + synchronized void selectForCheckpoint(final IN in, final int index) { + + /* + * Must check state while synchronized. The state may not be + * DIRTY_MAP_INCOMPLETE when called from eviction or a split. + */ + if (ckptState != CkptState.DIRTY_MAP_INCOMPLETE) { + return; + } + + final DatabaseImpl db = in.getDatabase(); + + if (db.isTemporary()) { + return; + } + + addIN(in, index, + true /*updateFlushLevels*/, + false /*updateMemoryBudget*/); + } + + /** + * Adds the the dirty child BINs of the 'in' if dirty map construction is + * in progress and the IN is not in a temp DB. + * + * Main cache resident BINs are added when their parent is encountered in + * the INList iteration, rather than when the BIN is encountered in the + * iteration. This is because a BIN can transition between main and + * off-heap caches during the construction of the dirty map. When a BIN is + * loaded from off-heap and added to the main cache, it is added to the + * INList at that time, and such a BIN may not be encountered in the + * iteration. (ConcurrentHashMap iteration only guarantees that nodes will + * be encountered if they are present when the iterator is created). So if + * we relied on encountering BINs in the iteration, some might be missed. + * + * Note that this method is not synchronized because it latches the BIN + * children. IN latching must come before synchronizing on 'this'. The + * selectForCheckpoint method, which is called after latching the BIN, is + * synchronized. + */ + private void selectDirtyBINChildrenForCheckpoint(final IN in) { + + if (in.getNormalizedLevel() != 2) { + return; + } + + for (int i = 0; i < in.getNEntries(); i += 1) { + + final IN bin = (IN) in.getTarget(i); + + if (bin != null) { + + /* When called via split a child may already be latched. */ + final boolean latchBinHere = !bin.isLatchOwner(); + + if (latchBinHere) { + bin.latchShared(CacheMode.UNCHANGED); + } + + try { + if (bin.getDirty()) { + selectForCheckpoint(bin, -1); + } + } finally { + if (latchBinHere) { + bin.releaseLatch(); + } + } + } else { + if (in.isOffHeapBINDirty(i)) { + selectForCheckpoint(in, i); + } + } + } + } + + private void updateFlushLevels(Integer level, + final DatabaseImpl db, + final boolean isBIN, + final boolean isRoot) { + + /* + * IN was added to the dirty map. Update the highest level seen + * for the database. Use one level higher when ckptFlushExtraLevel + * is set. When ckptFlushAll is set, use the maximum level for the + * database. Durable deferred-write databases must be synced, so + * also use the maximum level. + * + * Always flush at least one level above the bottom-most BIN level so + * that the BIN level is logged provisionally and the expense of + * processing BINs during recovery is avoided. + */ + if (ckptFlushAll || db.isDurableDeferredWrite()) { + if (!highestFlushLevels.containsKey(db)) { + + /* + * Null is used as an indicator that getHighestLevel should be + * called in selectDirtyINsForCheckpoint, when not + * synchronized. + */ + highestFlushLevels.put(db, null); + } + } else { + if ((ckptFlushExtraLevel || isBIN) && !isRoot) { + /* Next level up in the same tree. */ + level += 1; + } + + final Integer highestLevelSeen = highestFlushLevels.get(db); + + if (highestLevelSeen == null || level > highestLevelSeen) { + highestFlushLevels.put(db, level); + } + } + } + + /** + * Scan the INList for all dirty INs for a given database. Arrange them in + * level sorted map for level ordered flushing. + * + * This method is not synchronized to allow concurrent eviction. + * Coordination between eviction and Database.sync is not required. + */ + void selectDirtyINsForDbSync(DatabaseImpl dbImpl) { + + assert ckptState == CkptState.NONE; + + final DatabaseId dbId = dbImpl.getId(); + + for (IN in : envImpl.getInMemoryINs()) { + if (in.getDatabaseId().equals(dbId)) { + in.latch(CacheMode.UNCHANGED); + try { + if (in.getInListResident() && in.getDirty()) { + addIN( + in, -1 /*index*/, + false /*updateFlushLevels*/, + false /*updateMemoryBudget*/); + } + } finally { + in.releaseLatch(); + } + } + } + + /* + * Create a single entry map that forces all levels of this DB to + * be flushed. + */ + highestFlushLevels.put( + dbImpl, envImpl.getDbTree().getHighestLevel(dbImpl)); + + /* Add the dirty map to the memory budget. */ + addCostToMemoryBudget(); + } + + synchronized int getHighestFlushLevel(DatabaseImpl db) { + + assert ckptState != CkptState.DIRTY_MAP_INCOMPLETE; + + /* + * This method is only called while flushing dirty nodes for a + * checkpoint or Database.sync, not for an eviction, so an entry for + * this database should normally exist. However, if the DB root (and + * DatabaseImpl) have been evicted since the highestFlushLevels was + * constructed, the new DatabaseImpl instance will not be present in + * the map. In this case, we do not need to checkpoint the IN and + * eviction should be non-provisional. + */ + Integer val = highestFlushLevels.get(db); + return (val != null) ? val : IN.MIN_LEVEL; + } + + synchronized int getNumLevels() { + return levelMap.size(); + } + + private synchronized void addCostToMemoryBudget() { + final MemoryBudget mb = envImpl.getMemoryBudget(); + final long cost = + ((long) numEntries) * MemoryBudget.CHECKPOINT_REFERENCE_SIZE; + mb.updateAdminMemoryUsage(cost); + } + + private synchronized void removeCostFromMemoryBudget() { + final MemoryBudget mb = envImpl.getMemoryBudget(); + final long cost = + ((long) numEntries) * MemoryBudget.CHECKPOINT_REFERENCE_SIZE; + mb.updateAdminMemoryUsage(0 - cost); + } + + /** + * Add a node unconditionally to the dirty map. + * + * @param in is the IN to add, or the parent of an off-heap IN to add when + * index >= 0. + * + * @param index is the index of the off-heap child to add, or -1 to add the + * 'in' itself. + * + * @param updateMemoryBudget if true then update the memory budget as the + * map is changed; if false then addCostToMemoryBudget must be called + * later. + */ + synchronized void addIN(final IN in, + final int index, + final boolean updateFlushLevels, + final boolean updateMemoryBudget) { + final Integer level; + final long lsn; + final long nodeId; + final boolean isRoot; + final byte[] idKey; + final boolean isBin; + + if (index >= 0) { + level = in.getLevel() - 1; + lsn = in.getLsn(index); + nodeId = -1; + isRoot = false; + idKey = in.getKey(index); + isBin = true; + } else { + level = in.getLevel(); + lsn = in.getLastLoggedLsn(); + nodeId = in.getNodeId(); + isRoot = in.isRoot(); + idKey = in.getIdentifierKey(); + isBin = in.isBIN(); + } + + final Map lsnMap; + final Map nodeMap; + + Pair, + Map> pairOfMaps = levelMap.get(level); + + if (pairOfMaps != null) { + lsnMap = pairOfMaps.first(); + nodeMap = pairOfMaps.second(); + } else { + /* + * We use TreeMap rather than HashMap because HashMap.iterator() is + * a slow way of getting the first element (see removeNextNode). + */ + lsnMap = new TreeMap<>(); + nodeMap = new TreeMap<>(); + pairOfMaps = new Pair<>(lsnMap, nodeMap); + levelMap.put(level, pairOfMaps); + } + + final DatabaseImpl db = in.getDatabase(); + + final CheckpointReference ref = new CheckpointReference( + db.getId(), nodeId, level, isRoot, idKey, lsn); + + final boolean added; + + if (lsn != DbLsn.NULL_LSN) { + added = lsnMap.put(lsn, ref) == null; + } else { + assert nodeId >= 0; + assert db.isDeferredWriteMode(); + added = nodeMap.put(nodeId, ref) == null; + } + + if (!added) { + return; + } + + numEntries++; + + if (updateFlushLevels) { + updateFlushLevels(level, db, isBin, isRoot); + } + + if (updateMemoryBudget) { + final MemoryBudget mb = envImpl.getMemoryBudget(); + mb.updateAdminMemoryUsage(MemoryBudget.CHECKPOINT_REFERENCE_SIZE); + } + } + + /** + * Get the lowest level currently stored in the map. + */ + synchronized Integer getLowestLevelSet() { + return levelMap.firstKey(); + } + + /** + * Removes the set corresponding to the given level. + */ + synchronized void removeLevel(Integer level) { + levelMap.remove(level); + } + + synchronized CheckpointReference removeNode(final int level, + final long lsn, + final long nodeId) { + + final Pair, + Map> pairOfMaps = + levelMap.get(level); + + if (pairOfMaps == null) { + return null; + } + + final Map lsnMap = pairOfMaps.first(); + final Map nodeMap = pairOfMaps.second(); + + if (lsn != DbLsn.NULL_LSN) { + final CheckpointReference ref = lsnMap.remove(lsn); + if (ref != null) { + return ref; + } + } + + if (nodeId >= 0) { + final CheckpointReference ref = nodeMap.remove(nodeId); + if (ref != null) { + return ref; + } + } + + return null; + } + + synchronized CheckpointReference removeNextNode(Integer level) { + + final Pair, + Map> pairOfMaps = + levelMap.get(level); + + if (pairOfMaps == null) { + return null; + } + + final Map map; + + if (!pairOfMaps.first().isEmpty()) { + map = pairOfMaps.first(); + } else if (!pairOfMaps.second().isEmpty()) { + map = pairOfMaps.second(); + } else { + return null; + } + + final Iterator> iter = + map.entrySet().iterator(); + + assert iter.hasNext(); + final CheckpointReference ref = iter.next().getValue(); + iter.remove(); + return ref; + } + + /** + * If the given IN is a BIN for the ID mapping database, saves all + * dirty/temp MapLNs contained in it. + */ + private synchronized void saveMapLNsToFlush(IN in) { + + if (in.isBIN() && + in.getDatabase().getId().equals(DbTree.ID_DB_ID)) { + + for (int i = 0; i < in.getNEntries(); i += 1) { + final MapLN ln = (MapLN) in.getTarget(i); + + if (ln != null && ln.getDatabase().isCheckpointNeeded()) { + mapLNsToFlush.add(ln.getDatabase().getId()); + } + } + } + } + + /** + * Flushes all saved dirty/temp MapLNs and clears the saved set. + * + *

        If dirty, a MapLN must be flushed at each checkpoint to record + * updated utilization info in the checkpoint interval. If it is a + * temporary DB, the MapLN must be flushed because all temp DBs must be + * encountered by recovery so they can be removed if they were not closed + * (and removed) by the user.

        + * + * This method is not synchronized because it takes the Btree root latch, + * and we must never latch something in the Btree after synchronizing on + * DirtyINMap; see class comments. Special synchronization is performed + * for accessing internal state; see below. + * + * @param checkpointStart start LSN of the checkpoint in progress. To + * reduce unnecessary logging, the MapLN is only flushed if it has not been + * written since that LSN. + */ + void flushMapLNs(long checkpointStart) { + + /* + * This method is called only while flushing dirty nodes for a + * checkpoint or Database.sync, not for an eviction, and mapLNsToFlush + * is not changed during the flushing phase. So we don't strictly need + * to synchronize while accessing mapLNsToFlush. However, for + * consistency and extra safety we always synchronize while accessing + * internal state. + */ + final Set mapLNsCopy; + + synchronized (this) { + assert ckptState != CkptState.DIRTY_MAP_INCOMPLETE; + + if (mapLNsToFlush.isEmpty()) { + mapLNsCopy = null; + } else { + mapLNsCopy = new HashSet<>(mapLNsToFlush); + mapLNsToFlush.clear(); + } + } + + if (mapLNsCopy != null) { + final DbTree dbTree = envImpl.getDbTree(); + + for (DatabaseId dbId : mapLNsCopy) { + envImpl.checkDiskLimitViolation(); + final DatabaseImpl db = dbTree.getDb(dbId); + try { + if (db != null && + !db.isDeleted() && + db.isCheckpointNeeded()) { + + dbTree.modifyDbRoot( + db, checkpointStart /*ifBeforeLsn*/, + true /*mustExist*/); + } + } finally { + dbTree.releaseDb(db); + } + } + } + } + + /** + * Flushes the DB mapping tree root at the end of the checkpoint, if either + * mapping DB is dirty and the root was not flushed previously during the + * checkpoint. + * + * This method is not synchronized because it does not access internal + * state. Also, it takes the DbTree root latch and although this latch + * should never be held by eviction, for consistency we should not latch + * something related to the Btree after synchronizing on DirtyINMap; see + * class comments. + * + * @param checkpointStart start LSN of the checkpoint in progress. To + * reduce unnecessary logging, the Root is only flushed if it has not been + * written since that LSN. + */ + void flushRoot(long checkpointStart) { + + final DbTree dbTree = envImpl.getDbTree(); + + if (dbTree.getDb(DbTree.ID_DB_ID).isCheckpointNeeded() || + dbTree.getDb(DbTree.NAME_DB_ID).isCheckpointNeeded()) { + + envImpl.logMapTreeRoot(checkpointStart); + } + } + + synchronized int getNumEntries() { + return numEntries; + } + + private void traceDirtySet() { + assert DIRTY_SET_DEBUG_TRACE; + + final StringBuilder sb = new StringBuilder(); + sb.append("Ckpt dirty set"); + + for (final Integer level : levelMap.keySet()) { + + final Pair, + Map> pairOfMaps = + levelMap.get(level); + + final Map lsnMap = + pairOfMaps.first(); + + final Map nodeMap = + pairOfMaps.second(); + + sb.append("\nlevel = 0x").append(Integer.toHexString(level)); + sb.append(" lsnMap = ").append(lsnMap.size()); + sb.append(" nodeMap = ").append(nodeMap.size()); + } + + sb.append("\ndbId:highestFlushLevel"); + + for (final DatabaseImpl db : highestFlushLevels.keySet()) { + sb.append(' ').append(db.getId()).append(':'); + sb.append(highestFlushLevels.get(db) & IN.LEVEL_MASK); + } + + LoggerUtils.logMsg( + envImpl.getLogger(), envImpl, Level.INFO, sb.toString()); + } +} diff --git a/src/com/sleepycat/je/recovery/RecoveryInfo.java b/src/com/sleepycat/je/recovery/RecoveryInfo.java new file mode 100644 index 0000000..fd91cac --- /dev/null +++ b/src/com/sleepycat/je/recovery/RecoveryInfo.java @@ -0,0 +1,98 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * RecoveryInfo keeps information about recovery processing. + */ +public class RecoveryInfo { + + /* Locations found during recovery. */ + public long lastUsedLsn = DbLsn.NULL_LSN; // location of last entry + /* EOF, location of first unused spot. */ + public long nextAvailableLsn = DbLsn.NULL_LSN; + public long firstActiveLsn = DbLsn.NULL_LSN; + public long checkpointStartLsn = DbLsn.NULL_LSN; + public long checkpointEndLsn = DbLsn.NULL_LSN; + public long useRootLsn = DbLsn.NULL_LSN; + + /* + * Represents the first CkptStart following the CkptEnd. It is a CkptStart + * with no CkptEnd, and is used for counting provisional INs obsolete. + */ + public long partialCheckpointStartLsn = DbLsn.NULL_LSN; + + /* Checkpoint record used for this recovery. */ + public CheckpointEnd checkpointEnd; + + /* Ids */ + public long useMinReplicatedNodeId; + public long useMaxNodeId; + public long useMinReplicatedDbId; + public long useMaxDbId; + public long useMinReplicatedTxnId; + public long useMaxTxnId; + + /* VLSN mappings seen during recovery processing, for replication. */ + public VLSNRecoveryProxy vlsnProxy; + + /* + * The last reserved file that did not exist at recovery time, and its last + * VLSN. The file could be missing because it was not included in a network + * restore, or deleted by DbDeleteReservedFiles, or even manually deleted + * when the environment was closed. The VLSN index must be truncated after + * this file before being used. + */ + public long lastMissingFileNumber = -1; + public VLSN lastMissingFileVLSN = VLSN.NULL_VLSN; + + /** + * ReplayTxns that are resurrected during recovery processing, for + * replication. Txnid -> replayTxn + */ + public final Map replayTxns = new HashMap(); + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Recovery Info "); + appendLsn(sb, " firstActive=", firstActiveLsn); + appendLsn(sb, " ckptStart=", checkpointStartLsn); + appendLsn(sb, " ckptEnd=", checkpointEndLsn); + appendLsn(sb, " lastUsed=", lastUsedLsn); + appendLsn(sb, " nextAvail=", nextAvailableLsn); + appendLsn(sb, " useRoot=", useRootLsn); + sb.append(checkpointEnd); + sb.append(" useMinReplicatedNodeId=").append(useMinReplicatedNodeId); + sb.append(" useMaxNodeId=").append(useMaxNodeId); + sb.append(" useMinReplicatedDbId=").append(useMinReplicatedDbId); + sb.append(" useMaxDbId=").append(useMaxDbId); + sb.append(" useMinReplicatedTxnId=").append(useMinReplicatedTxnId); + sb.append(" useMaxTxnId=").append(useMaxTxnId); + return sb.toString(); + } + + private void appendLsn(StringBuilder sb, String name, long lsn) { + if (lsn != DbLsn.NULL_LSN) { + sb.append(name).append(DbLsn.getNoFormatString(lsn)); + } + } +} diff --git a/src/com/sleepycat/je/recovery/RecoveryManager.java b/src/com/sleepycat/je/recovery/RecoveryManager.java new file mode 100644 index 0000000..0372ac6 --- /dev/null +++ b/src/com/sleepycat/je/recovery/RecoveryManager.java @@ -0,0 +1,3324 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.cleaner.RecoveryUtilizationTracker; +import com.sleepycat.je.cleaner.ReservedFileInfo; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.StartupTracker; +import com.sleepycat.je.dbi.StartupTracker.Counter; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.CheckpointFileReader; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.INFileReader; +import com.sleepycat.je.log.LNFileReader; +import com.sleepycat.je.log.LastFileReader; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.recovery.RollbackTracker.Scanner; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.TrackingInfo; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.PreparedTxn; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnChain.RevertInfo; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Performs recovery when an Environment is opened. + * + * TODO: Need a description of the recovery algorithm here. For some related + * information, see the Checkpointer class comments. + * + * Recovery, the INList and Eviction + * ================================= + * There are two major steps in recovery: 1) recover the mapping database and + * the INs for all other databases, 2) recover the LNs for the other databases. + * In the buildTree method, step 1 comes before the call to buildINList and + * step 2 comes after that. The INList is not maintained in step 1. + * + * The INList is not maintained in the step 1 because there is no benefit -- we + * cannot evict anyway as explained below -- and there are potential drawbacks + * to maintaining it: added complexity and decreased performance. The + * drawbacks are described in more detail further below. + * + * Even if the INList were maintained in step 1, eviction could not be enabled + * until step 2, because logging is not allowed until all the INs are in place. + * In principle we could evict non-dirty nodes in step 1, but since recovery is + * dirtying the tree as it goes, there would be little or nothing that is + * non-dirty and could be evicted. + * + * Therefore, the INList has an 'enabled' mode that is initially false (in step + * 1) and is set to true by buildINList, just before step 2. The mechanism for + * adding nodes to the INList is skipped when it is disabled. In addition to + * enabling it, buildINList populates it from the contents of the Btrees that + * were constructed in step 1. In step 2, eviction is invoked explicitly by + * calling EnvironmentImpl.invokeEvictor often during recovery. This is + * important since the background evictor thread is not yet running. + * + * An externally visible limitation created by this situation is that the nodes + * placed in the Btree during step 1 must all fit in memory, since no eviction + * is performed. So memory is a limiting factor in how large a recovery can be + * performed. Since eviction is allowed in step 2, and step 2 is where the + * bulk of the recovery is normally performed, this limitation of step 1 hasn't + * been a critical problem. + * + * Maintaining the INList + * ---------------------- + * In this section we consider the impact of maintaining the INList in step 1, + * if this were done in a future release. It is being considered for a future + * release so we can rely on the INList to reference INs by node ID in the + * in-memory representation of an IN (see the Big Memory SR [#22292]). + * + * To maintain the INList in step 1, when a branch of a tree (a parent IN) is + * spliced in, the previous branch (all of the previous node's children) would + * have to be removed from the INList. Doing this incorrectly could cause an + * OOME, and it may also have a performance impact. + * + * The performance impact of removing the previous branch from the INList is + * difficult to estimate. In the normal case (recovery after normal shutdown), + * very few nodes will be replaced because normally only nodes at the max flush + * level are replayed, and the slots they are placed into will be empty (not + * resident). Here is description of a worst case scenario, which is when + * there is a crash near the end of a checkpoint: + * + * + The last checkpoint is large, includes all nodes in the tree, is mostly + * complete, but was not finished (no CkptEnd). The middle INs (above BIN + * and below the max flush level) must be replayed (see Checkpointer and + * Provisional.BEFORE_CKPT_END). + * + * + For these middle INs, the INs at each level are placed in the tree and + * replace any IN present in the slot. For the bottom-most level of middle + * INs (level 2), these don't replace a node (the slot will be empty because + * BINs are never replayed). But for the middle INs in all levels above + * that, they replace a node that was fetched earlier; it was fetched + * because it is the parent of a node at a lower level that was replayed. + * + * + In the worst case, all INs from level 3 to R-1, where R is the root + * level, would be replayed and replace a node. However, it seems the + * replaced node would not have resident children in the scenario described, + * so the cost of removing it from the INList does not seem excessive. + * + * + Here's an example illustrating this scenario. The BINs and their parents + * (as a sub-tree) are logged first, followed by all dirty INs at the next + * level, etc. + * + * 0050 CkptStart + * 0100 BIN level 1 + * 0200 BIN level 1 + * ... + * 1000 IN level 2, parent of 0100, 0200, etc. + * 1100 BIN level 1 + * 1200 BIN level 1 + * ... + * 2000 IN level 2, parent of 1100, 1200, etc. + * ... + * 7000 IN level 2, last level 2 IN logged + * 8000 IN level 3, parent of 1000, 2000, etc. + * ... + * 9000 IN level 4, parent of 8000, etc. + * ... + * + * 9000 level 4 + * / + * ----8000---- level 3 + * / / \ + * 1000 2000 ...... level 2 + * + * BINs not shown + * + * Only the root (if it happens to be logged right before the crash) is + * non-provisional. We'll assume in this example that the root was not + * logged. Level 2 through R-1 are logged as Provisional.BEFORE_CKPT_END, + * and treated as non-provisional (replayed) by recovery because there is no + * CkptEnd. + * + * When 1000 (and all other nodes at level 2) is replayed, it is placed into + * an empty slot. + * + * When 8000 (and all other INs at level 3 and higher, below the root) is + * replayed, it will replace a resident node that was fetched and placed in + * the slot when replaying its children. The replaced node is one not + * shown, and assumed to have been logged sometime prior to this checkpoint. + * The replaced node will have all the level 2 nodes that were replayed + * earlier (1000, 2000, etc.) as its resident children, and these are the + * nodes that would have to be removed from the INList, if recovery were + * changed to place INs on the INList in step 1. + * + * So if the INs were placed on the INList, in this worst case scenario, all + * INs from level 3 to R-1 will be replayed, and all their immediate + * children would need to be removed from the INList. Grandchildren would + * not be resident. In other words, all nodes at level 2 and above (except + * the root) would be removed from the INList and replaced by a node being + * replayed. + * + * When there is a normal shutdown, we don't know of scenarios that would cause + * this sort of INList thrashing. So perhaps maintaining the INList in step 1 + * could be justified, if the additional recovery cost after a crash is + * acceptable. + * + * Or, a potential solution for the worst case scenario above might be to place + * the resident child nodes in the new parent, rather than discarding them and + * removing them from the INList. This would have the benefit of populating + * the cache and not wasting the work done to read and replay these nodes. + * OTOH, it may cause OOME if too much of the tree is loaded in step 1. + */ +public class RecoveryManager { + private static final String TRACE_LN_REDO = "LNRedo:"; + private static final String TRACE_LN_UNDO = "LNUndo"; + private static final String TRACE_IN_REPLACE = "INRecover:"; + private static final String TRACE_ROOT_REPLACE = "RootRecover:"; + + private final EnvironmentImpl envImpl; + private final int readBufferSize; + private final RecoveryInfo info; // stat info + /* Committed txn ID to Commit LSN */ + private final Map committedTxnIds; + private final Set abortedTxnIds; // aborted txns + private final Map preparedTxns; // txnid -> prepared Txn + + /* + * A set of lsns for log entries that will be resurrected is kept so that + * we can correctly redo utilization. See redoUtilization() + */ + private final Set resurrectedLsns; + + /* dbs for which we have to build the in memory IN list. */ + private final Set inListBuildDbIds; + + private final Set tempDbIds; // temp DBs to be removed + + private final Set expectDeletedMapLNs; + + /* + * Reserved file db records in the recovery interval are tracked in order + * to redo MapLN updates at the end of recovery. + */ + private final Set reservedFiles; + private final Set reservedFileDbs; + + /* Handles rollback periods created by HA syncup. */ + private final RollbackTracker rollbackTracker; + + private final RecoveryUtilizationTracker tracker; + private final StartupTracker startupTracker; + private final Logger logger; + + /* DBs that may violate the rule for upgrading to log version 8. */ + private final Set logVersion8UpgradeDbs; + + /* Whether deltas violate the rule for upgrading to log version 8. */ + private final AtomicBoolean logVersion8UpgradeDeltas; + + /* Used to recalc disk usage to prevent eviction from violating limits. */ + private int nOpsSinceDiskLimitRecalc = 0; + + /** + * Make a recovery manager + */ + public RecoveryManager(EnvironmentImpl env) + throws DatabaseException { + + this.envImpl = env; + DbConfigManager cm = env.getConfigManager(); + readBufferSize = + cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + committedTxnIds = new HashMap<>(); + abortedTxnIds = new HashSet<>(); + preparedTxns = new HashMap<>(); + resurrectedLsns = new HashSet<>(); + inListBuildDbIds = new HashSet<>(); + tempDbIds = new HashSet<>(); + expectDeletedMapLNs = new HashSet<>(); + reservedFiles = new HashSet<>(); + reservedFileDbs = new HashSet<>(); + tracker = new RecoveryUtilizationTracker(env); + logger = LoggerUtils.getLogger(getClass()); + rollbackTracker = new RollbackTracker(envImpl); + info = new RecoveryInfo(); + logVersion8UpgradeDbs = new HashSet<>(); + logVersion8UpgradeDeltas = new AtomicBoolean(false); + + startupTracker = envImpl.getStartupTracker(); + startupTracker.setRecoveryInfo(info); + } + + /** + * Look for an existing log and use it to create an in memory structure for + * accessing existing databases. The file manager and logging system are + * only available after recovery. + * @return RecoveryInfo statistics about the recovery process. + */ + public RecoveryInfo recover(boolean readOnly) + throws DatabaseException { + + startupTracker.start(Phase.TOTAL_RECOVERY); + try { + FileManager fileManager = envImpl.getFileManager(); + DbConfigManager configManager = envImpl.getConfigManager(); + boolean forceCheckpoint; + + /* + * After a restore from backup we must flip the file on the first + * write. The lastFileInBackup must be immutable. [#22834] + */ + if (configManager.getBoolean( + EnvironmentParams.ENV_RECOVERY_FORCE_NEW_FILE)) { + fileManager.forceNewLogFile(); + /* Must write something to create new file.*/ + forceCheckpoint = true; + } else { + forceCheckpoint = configManager.getBoolean( + EnvironmentParams.ENV_RECOVERY_FORCE_CHECKPOINT); + } + + if (fileManager.filesExist()) { + + /* + * Check whether log files are correctly located in the sub + * directories. + */ + fileManager.getAllFileNumbers(); + + /* + * Establish the location of the end of the log. Log this + * information to the java.util.logging logger, but delay + * tracing this information in the .jdb file, because the + * logging system is not yet initialized. Because of that, be + * sure to use lazy logging, and do not use + * LoggerUtils.logAndTrace(). + */ + findEndOfLog(readOnly); + + String endOfLogMsg = "Recovery underway, valid end of log = " + + DbLsn.getNoFormatString(info.nextAvailableLsn); + + Trace.traceLazily(envImpl, endOfLogMsg); + + /* + * Establish the location of the root, the last checkpoint, and + * the first active LSN by finding the last checkpoint. + */ + findLastCheckpoint(); + + envImpl.getLogManager().setLastLsnAtRecovery + (fileManager.getLastUsedLsn()); + + /* Read in the root. */ + envImpl.readMapTreeFromLog(info.useRootLsn); + + /* Build the in memory tree from the log. */ + buildTree(); + } else { + + /* + * Nothing more to be done. Enable publishing of debug log + * messages to the database log. + */ + LoggerUtils.logMsg + (logger, envImpl, Level.CONFIG, "Recovery w/no files."); + + /* Enable the INList and log the root of the mapping tree. */ + envImpl.getInMemoryINs().enable(); + envImpl.getEvictor().setEnabled(true); + /* Do not write LNs in a read-only environment. */ + if (!readOnly) { + envImpl.logMapTreeRoot(); + } + + /* Add shared cache environment when buildTree is not used. */ + if (envImpl.getSharedCache()) { + envImpl.getEvictor().addEnvironment(envImpl); + } + + /* + * Always force a checkpoint during creation. + */ + forceCheckpoint = true; + } + + int ptSize = preparedTxns.size(); + if (ptSize > 0) { + boolean singular = (ptSize == 1); + LoggerUtils.logMsg(logger, envImpl, Level.INFO, + "There " + (singular ? "is " : "are ") + + ptSize + " prepared but unfinished " + + (singular ? "txn." : "txns.")); + + /* + * We don't need this set any more since these are all + * registered with the TxnManager now. + */ + preparedTxns.clear(); + } + + final EnvironmentConfig envConfig = + envImpl.getConfigManager().getEnvironmentConfig(); + + /* Use of cleaner DBs may be disabled for unittests. */ + if (DbInternal.getCreateUP(envConfig)) { + + /* + * Open the file summary DB and populate the cache before the + * first checkpoint so that the checkpoint may flush file + * summary information. + */ + startupTracker.start(Phase.POPULATE_UP); + + startupTracker.setProgress( + RecoveryProgress.POPULATE_UTILIZATION_PROFILE); + + forceCheckpoint |= + envImpl.getUtilizationProfile().populateCache( + startupTracker.getCounter(Phase.POPULATE_UP), + info, reservedFiles, reservedFileDbs); + + startupTracker.stop(Phase.POPULATE_UP); + } + if (DbInternal.getCreateEP(envConfig)) { + /* + * Open the file expiration DB, populate the expiration + * profile, and initialize the current expiration tracker. + */ + startupTracker.start(Phase.POPULATE_EP); + + startupTracker.setProgress( + RecoveryProgress.POPULATE_EXPIRATION_PROFILE); + + envImpl.getExpirationProfile().populateCache( + startupTracker.getCounter(Phase.POPULATE_EP), + envImpl.getRecoveryProgressListener()); + + startupTracker.stop(Phase.POPULATE_EP); + } + + /* Transfer recovery utilization info to the global tracker. */ + tracker.transferToUtilizationTracker( + envImpl.getUtilizationTracker()); + + /* + * After utilization info is complete and prior to the checkpoint, + * remove all temporary databases encountered during recovery. + */ + removeTempDbs(); + + /* + * For truncate/remove NameLNs with no corresponding deleted MapLN + * found, delete the MapLNs now. + */ + deleteMapLNs(); + + /* + * Execute any replication initialization that has to happen before + * the checkpoint. + */ + envImpl.preRecoveryCheckpointInit(info); + + /* + * At this point, we've recovered (or there were no log files at + * all). Write a checkpoint into the log. + */ + if (!readOnly && + ((envImpl.getLogManager().getLastLsnAtRecovery() != + info.checkpointEndLsn) || + forceCheckpoint)) { + + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + config.setMinimizeRecoveryTime(true); + + startupTracker.setProgress(RecoveryProgress.CKPT); + startupTracker.start(Phase.CKPT); + try { + envImpl.invokeCheckpoint(config, "recovery"); + } catch (DiskLimitException e) { + LoggerUtils.logMsg(logger, envImpl, Level.WARNING, + "Recovery checkpoint failed due to disk limit" + + " violation but environment can still service reads: " + + e); + } + startupTracker.setStats + (Phase.CKPT, + envImpl.getCheckpointer().loadStats(StatsConfig.DEFAULT)); + startupTracker.stop(Phase.CKPT); + } else { + /* Initialize intervals when there is no initial checkpoint. */ + envImpl.getCheckpointer().initIntervals + (info.checkpointStartLsn, info.checkpointEndLsn, + System.currentTimeMillis()); + } + } catch (IOException e) { + LoggerUtils.traceAndLogException(envImpl, "RecoveryManager", + "recover", "Couldn't recover", e); + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_READ, e); + } finally { + startupTracker.stop(Phase.TOTAL_RECOVERY); + } + + return info; + } + + /** + * Find the end of the log, initialize the FileManager. While we're + * perusing the log, return the last checkpoint LSN if we happen to see it. + */ + private void findEndOfLog(boolean readOnly) + throws IOException, DatabaseException { + + startupTracker.start(Phase.FIND_END_OF_LOG); + startupTracker.setProgress(RecoveryProgress.FIND_END_OF_LOG); + Counter counter = startupTracker.getCounter(Phase.FIND_END_OF_LOG); + + LastFileReader reader = new LastFileReader(envImpl, readBufferSize); + + /* + * Tell the reader to iterate through the log file until we hit the end + * of the log or an invalid entry. Remember the last seen CkptEnd, and + * the first CkptStart with no following CkptEnd. + */ + while (reader.readNextEntry()) { + counter.incNumRead(); + counter.incNumProcessed(); + + LogEntryType type = reader.getEntryType(); + + if (LogEntryType.LOG_CKPT_END.equals(type)) { + info.checkpointEndLsn = reader.getLastLsn(); + info.partialCheckpointStartLsn = DbLsn.NULL_LSN; + } else if (LogEntryType.LOG_CKPT_START.equals(type)) { + if (info.partialCheckpointStartLsn == DbLsn.NULL_LSN) { + info.partialCheckpointStartLsn = reader.getLastLsn(); + } + } else if (LogEntryType.LOG_DBTREE.equals(type)) { + info.useRootLsn = reader.getLastLsn(); + } else if (LogEntryType.LOG_IMMUTABLE_FILE.equals(type)) { + envImpl.getFileManager().forceNewLogFile(); + } else if (LogEntryType.LOG_RESTORE_REQUIRED.equals(type)) { + /* + * This log entry is a marker that indicates that the log is + * considered corrupt in some way, and recovery should not + * proceed. Some external action has to happen to obtain new + * log files that are coherent and can be recovered. + */ + envImpl.handleRestoreRequired(reader.getRestoreRequired()); + } + } + + /* + * The last valid LSN should point to the start of the last valid log + * entry, while the end of the log should point to the first byte of + * blank space, so these two should not be the same. + */ + assert (reader.getLastValidLsn() != reader.getEndOfLog()): + "lastUsed=" + DbLsn.getNoFormatString(reader.getLastValidLsn()) + + " end=" + DbLsn.getNoFormatString(reader.getEndOfLog()); + + /* Now truncate if necessary. */ + if (!readOnly) { + reader.setEndOfFile(); + } + + /* Tell the fileManager where the end of the log is. */ + info.lastUsedLsn = reader.getLastValidLsn(); + info.nextAvailableLsn = reader.getEndOfLog(); + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + envImpl.getFileManager().setLastPosition(info.nextAvailableLsn, + info.lastUsedLsn, + reader.getPrevOffset()); + startupTracker.stop(Phase.FIND_END_OF_LOG); + } + + /** + * Find the last checkpoint and establish the firstActiveLsn point, + * checkpoint start, and checkpoint end. + */ + private void findLastCheckpoint() + throws IOException, DatabaseException { + + startupTracker.start(Phase.FIND_LAST_CKPT); + startupTracker.setProgress(RecoveryProgress.FIND_LAST_CKPT); + Counter counter = startupTracker.getCounter(Phase.FIND_LAST_CKPT); + + /* + * The checkpointLsn might have been already found when establishing + * the end of the log. If it was found, then partialCheckpointStartLsn + * was also found. If it was not found, search backwards for it now + * and also set partialCheckpointStartLsn. + */ + if (info.checkpointEndLsn == DbLsn.NULL_LSN) { + + /* + * Search backwards though the log for a checkpoint end entry and a + * root entry. + */ + CheckpointFileReader searcher = + new CheckpointFileReader(envImpl, readBufferSize, false, + info.lastUsedLsn, DbLsn.NULL_LSN, + info.nextAvailableLsn); + + while (searcher.readNextEntry()) { + counter.incNumRead(); + counter.incNumProcessed(); + + /* + * Continue iterating until we find a checkpoint end entry. + * While we're at it, remember the last root seen in case we + * don't find a checkpoint end entry. + */ + if (searcher.isCheckpointEnd()) { + + /* + * We're done, the checkpoint end will tell us where the + * root is. + */ + info.checkpointEndLsn = searcher.getLastLsn(); + break; + } else if (searcher.isCheckpointStart()) { + + /* + * Remember the first CkptStart following the CkptEnd. + */ + info.partialCheckpointStartLsn = searcher.getLastLsn(); + + } else if (searcher.isDbTree()) { + + /* + * Save the last root that was found in the log in case we + * don't see a checkpoint. + */ + if (info.useRootLsn == DbLsn.NULL_LSN) { + info.useRootLsn = searcher.getLastLsn(); + } + } + } + counter.setRepeatIteratorReads(searcher.getNRepeatIteratorReads()); + } + + /* + * If we haven't found a checkpoint, we'll have to recover without + * one. At a minimium, we must have found a root. + */ + if (info.checkpointEndLsn == DbLsn.NULL_LSN) { + info.checkpointStartLsn = DbLsn.NULL_LSN; + info.firstActiveLsn = DbLsn.NULL_LSN; + } else { + /* Read in the checkpoint entry. */ + CheckpointEnd checkpointEnd = (CheckpointEnd) + (envImpl.getLogManager().getEntry(info.checkpointEndLsn)); + info.checkpointEnd = checkpointEnd; + info.checkpointStartLsn = checkpointEnd.getCheckpointStartLsn(); + info.firstActiveLsn = checkpointEnd.getFirstActiveLsn(); + + /* + * Use the last checkpoint root only if there is no later root. + * The latest root has the correct per-DB utilization info. + */ + if (checkpointEnd.getRootLsn() != DbLsn.NULL_LSN && + info.useRootLsn == DbLsn.NULL_LSN) { + info.useRootLsn = checkpointEnd.getRootLsn(); + } + + /* Init the checkpointer's id sequence.*/ + envImpl.getCheckpointer().setCheckpointId(checkpointEnd.getId()); + } + + /* + * Let the rollback tracker know where the checkpoint start is. + * Rollback periods before the checkpoint start do not need to be + * processed. + */ + rollbackTracker.setCheckpointStart(info.checkpointStartLsn); + + startupTracker.stop(Phase.FIND_LAST_CKPT); + + if (info.useRootLsn == DbLsn.NULL_LSN) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_INTEGRITY, + "This environment's log file has no root. Since the root " + + "is the first entry written into a log at environment " + + "creation, this should only happen if the initial creation " + + "of the environment was never checkpointed or synced. " + + "Please move aside the existing log files to allow the " + + "creation of a new environment"); + } + } + + /** + * Should be called when performing operations that may add to the cache, + * but only after all INs are in place and buildINList has been called. + */ + private void invokeEvictor() { + + /* + * To prevent eviction from violating disk limits we must periodically + * freshen the log size stats. (Since the cleaner isn't running.) + */ + nOpsSinceDiskLimitRecalc += 1; + if (nOpsSinceDiskLimitRecalc == 1000) { + envImpl.getCleaner().freshenLogSizeStats(); + nOpsSinceDiskLimitRecalc = 0; + } + + envImpl.invokeEvictor(); + } + + /** + * Use the log to recreate an in memory tree. + */ + private void buildTree() + throws DatabaseException { + + startupTracker.start(Phase.BUILD_TREE); + + try { + + /* + * Read all map database INs, find largest node ID before any + * possibility of splits, find largest txn Id before any need for a + * root update (which would use an AutoTxn) + */ + buildINs(true /*mappingTree*/, + Phase.READ_MAP_INS, + Phase.REDO_MAP_INS, + RecoveryProgress.READ_DBMAP_INFO, + RecoveryProgress.REDO_DBMAP_INFO); + + /* + * Undo all aborted map LNs. Read and remember all committed, + * prepared, and replicated transaction ids, to prepare for the + * redo phases. + */ + startupTracker.start(Phase.UNDO_MAP_LNS); + startupTracker.setProgress(RecoveryProgress.UNDO_DBMAP_RECORDS); + + Set mapLNSet = new HashSet<>(); + mapLNSet.add(LogEntryType.LOG_TXN_COMMIT); + mapLNSet.add(LogEntryType.LOG_TXN_ABORT); + mapLNSet.add(LogEntryType.LOG_TXN_PREPARE); + mapLNSet.add(LogEntryType.LOG_ROLLBACK_START); + mapLNSet.add(LogEntryType.LOG_ROLLBACK_END); + + undoLNs(mapLNSet, true /*firstUndoPass*/, + startupTracker.getCounter(Phase.UNDO_MAP_LNS)); + + startupTracker.stop(Phase.UNDO_MAP_LNS); + + /* + * Start file cache warmer after we have read the log from + * firstActiveLsn forward. From here forward, recovery should be + * reading from the file system cache, so another reading thread + * should not cause disk head movement. + */ + envImpl.getFileManager().startFileCacheWarmer(info.firstActiveLsn); + + /* + * Replay all mapLNs, mapping tree in place now. Use the set of + * committed txns, replicated and prepared txns found from the undo + * pass. + */ + startupTracker.start(Phase.REDO_MAP_LNS); + startupTracker.setProgress(RecoveryProgress.REDO_DBMAP_RECORDS); + + mapLNSet.clear(); + mapLNSet.add(LogEntryType.LOG_MAPLN); + + redoLNs(mapLNSet, startupTracker.getCounter(Phase.REDO_MAP_LNS)); + + startupTracker.stop(Phase.REDO_MAP_LNS); + + /* + * When the mapping DB is complete, check for log version 8 upgrade + * violations. Will throw an exception if there is a violation. + */ + checkLogVersion8UpgradeViolations(); + + /* + * Reconstruct the internal nodes for the main level trees. + */ + buildINs(false /*mappingTree*/, + Phase.READ_INS, + Phase.REDO_INS, + RecoveryProgress.READ_DATA_INFO, + RecoveryProgress.REDO_DATA_INFO); + + /* + * Build the in memory IN list. Now that the INs are complete we + * can add the environment to the evictor (for a shared cache) and + * invoke the evictor. The evictor will also be invoked during the + * undo and redo passes. + */ + buildINList(); + if (envImpl.getSharedCache()) { + envImpl.getEvictor().addEnvironment(envImpl); + } + invokeEvictor(); + + /* + * Undo aborted LNs. No need to include TxnAbort, TxnCommit, + * TxnPrepare, RollbackStart and RollbackEnd records again, since + * those were scanned during the the undo of all aborted MapLNs. + */ + startupTracker.start(Phase.UNDO_LNS); + startupTracker.setProgress(RecoveryProgress.UNDO_DATA_RECORDS); + + Set lnSet = new HashSet<>(); + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isLNType() && entryType.isTransactional() && + !entryType.equals(LogEntryType.LOG_MAPLN_TRANSACTIONAL)) { + lnSet.add(entryType); + } + } + + undoLNs(lnSet, false /*firstUndoPass*/, + startupTracker.getCounter(Phase.UNDO_LNS)); + + startupTracker.stop(Phase.UNDO_LNS); + + /* Replay LNs. Also read non-transactional LNs. */ + startupTracker.start(Phase.REDO_LNS); + startupTracker.setProgress(RecoveryProgress.REDO_DATA_RECORDS); + + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isLNType() && !entryType.isTransactional() && + !entryType.equals(LogEntryType.LOG_MAPLN)) { + lnSet.add(entryType); + } + } + + redoLNs(lnSet, startupTracker.getCounter(Phase.REDO_LNS)); + + startupTracker.stop(Phase.REDO_LNS); + + rollbackTracker.recoveryEndFsyncInvisible(); + } finally { + startupTracker.stop(Phase.BUILD_TREE); + } + } + + /** + * Perform two passes for the INs of a given level. Root INs must be + * processed first to account for splits/compressions that were done + * during/after a checkpoint [#14424] [#24663]. + * + * Splits and compression require logging up to the root of the tree, to + * ensure that all INs are properly returned to the correct position at + * recovery. In other words, splits and compression ensure that the + * creation and deletion of all nodes is promptly logged. + * + * However, checkpoints are not propagated to the top of the tree, in + * order to conserve on logging. Because of that, a great-aunt situation + * can occur, where an ancestor of a given node can be logged without + * referring to the latest on-disk position of the node, because that + * ancestor was part of a split or compression. + * + * Take this scenario: + * Root-A + * / \ + * IN-B IN-C + * / / | \ + * BIN-D + * / + * LN-E + * + * 1) LN-E is logged, BIN-D is dirtied + * 2) BIN-D is logged during a checkpoint, IN-B is dirtied + * 3) IN-C is split and Root-A is logged + * 4) We recover using Root-A and the BIN-D logged at (2) is lost + * + * At (3) when Root-A is logged, it points to an IN-B on disk that does not + * point to the most recent BIN-D + * + * At (4) when we recover, although we will process the BIN-D logged at (2) + * and splice it into the tree, the Root-A logged at (3) is processed last + * and overrides the entire subtree containing BIN-D + * + * This could be addressed by always logging to the root at every + * checkpoint. Barring that, we address it by replaying the root INs first, + * and then all non-root INs. + * + * It is important that no IN is replayed that would cause a fetch of an + * older IN version which has been replaced by a newer version in the + * checkpoint interval. If the newer version were logged as the result of + * log cleaning, and we attempt to fetch the older version, this would + * cause a LOG_FILE_NOT_FOUND exception. The replay of the root INs in the + * first pass is safe, because it won't cause a fetch [#24663]. The replay + * of INs in the second pass is safe because only nodes at maxFlushLevel + * were logged non-provisionally and only these nodes are replayed. + * + * @param mappingTree if true, we're building the mapping tree + */ + private void buildINs( + boolean mappingTree, + StartupTracker.Phase phaseA, + StartupTracker.Phase phaseB, + RecoveryProgress progressA, + RecoveryProgress progressB) + throws DatabaseException { + + /* + * Pass a: Replay root INs. + */ + startupTracker.start(phaseA); + startupTracker.setProgress(progressA); + + if (mappingTree) { + readRootINsAndTrackIds(startupTracker.getCounter(phaseA)); + } else { + readRootINs(startupTracker.getCounter(phaseA)); + } + startupTracker.stop(phaseA); + + /* + * Pass b: Replay non-root INs. + */ + startupTracker.start(phaseB); + startupTracker.setProgress(progressB); + + readNonRootINs(mappingTree, startupTracker.getCounter(phaseB)); + + startupTracker.stop(phaseB); + } + + /* + * Read root INs in the mapping tree DB and place in the in-memory tree. + * + * Also peruse all pertinent log entries in order to update our knowledge + * of the last used database, transaction and node ids, and to to track + * utilization profile and VLSN->LSN mappings. + */ + private void readRootINsAndTrackIds( + StartupTracker.Counter counter) + throws DatabaseException { + + INFileReader reader = new INFileReader( + envImpl, readBufferSize, + info.checkpointStartLsn, // start lsn + info.nextAvailableLsn, // finish lsn + true, // track ids + info.partialCheckpointStartLsn, // partialCkptStart + info.checkpointEndLsn, // ckptEnd + tracker, + logVersion8UpgradeDbs, + logVersion8UpgradeDeltas); + + reader.addTargetType(LogEntryType.LOG_IN); + + /* Validate all entries in at least one full recovery pass. */ + reader.setAlwaysValidateChecksum(true); + + try { + DbTree dbMapTree = envImpl.getDbTree(); + + /* Process every IN and BIN in the mapping tree. */ + while (reader.readNextEntry()) { + + counter.incNumRead(); + + DatabaseId dbId = reader.getDatabaseId(); + + if (!dbId.equals(DbTree.ID_DB_ID)) { + continue; + } + + DatabaseImpl db = dbMapTree.getDb(dbId); + + assert db != null; // mapping DB is always available + + try { + if (!reader.getIN(db).isRoot()) { + continue; + } + + replayOneIN(reader, db); + + counter.incNumProcessed(); + + } finally { + dbMapTree.releaseDb(db); + } + } + + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + + /* + * Update node ID, database ID, and txn ID sequences. Use either + * the maximum of the IDs seen by the reader vs. the IDs stored in + * the checkpoint. + */ + info.useMinReplicatedNodeId = reader.getMinReplicatedNodeId(); + info.useMaxNodeId = reader.getMaxNodeId(); + + info.useMinReplicatedDbId = reader.getMinReplicatedDbId(); + info.useMaxDbId = reader.getMaxDbId(); + + info.useMinReplicatedTxnId = reader.getMinReplicatedTxnId(); + info.useMaxTxnId = reader.getMaxTxnId(); + + if (info.checkpointEnd != null) { + CheckpointEnd ckptEnd = info.checkpointEnd; + + if (info.useMinReplicatedNodeId > + ckptEnd.getLastReplicatedNodeId()) { + info.useMinReplicatedNodeId = + ckptEnd.getLastReplicatedNodeId(); + } + if (info.useMaxNodeId < ckptEnd.getLastLocalNodeId()) { + info.useMaxNodeId = ckptEnd.getLastLocalNodeId(); + } + + if (info.useMinReplicatedDbId > + ckptEnd.getLastReplicatedDbId()) { + info.useMinReplicatedDbId = + ckptEnd.getLastReplicatedDbId(); + } + if (info.useMaxDbId < ckptEnd.getLastLocalDbId()) { + info.useMaxDbId = ckptEnd.getLastLocalDbId(); + } + + if (info.useMinReplicatedTxnId > + ckptEnd.getLastReplicatedTxnId()) { + info.useMinReplicatedTxnId = + ckptEnd.getLastReplicatedTxnId(); + } + if (info.useMaxTxnId < ckptEnd.getLastLocalTxnId()) { + info.useMaxTxnId = ckptEnd.getLastLocalTxnId(); + } + } + + envImpl.getNodeSequence(). + setLastNodeId(info.useMinReplicatedNodeId, info.useMaxNodeId); + envImpl.getDbTree().setLastDbId(info.useMinReplicatedDbId, + info.useMaxDbId); + envImpl.getTxnManager().setLastTxnId(info.useMinReplicatedTxnId, + info.useMaxTxnId); + + info.vlsnProxy = reader.getVLSNProxy(); + } catch (Exception e) { + traceAndThrowException(reader.getLastLsn(), "readMapIns", e); + } + } + + /** + * Read root INs for DBs other than the mapping tree, and process. + */ + private void readRootINs( + StartupTracker.Counter counter) + throws DatabaseException { + + /* Don't need to track IDs. */ + INFileReader reader = new INFileReader( + envImpl, readBufferSize, + info.checkpointStartLsn, // start lsn + info.nextAvailableLsn, // finish lsn + false, // track ids + info.partialCheckpointStartLsn, // partialCkptStart + info.checkpointEndLsn, // ckptEnd + null); // tracker + + reader.addTargetType(LogEntryType.LOG_IN); + + try { + + /* + * Read all non-provisional INs, and process if they don't belong + * to the mapping tree. + */ + DbTree dbMapTree = envImpl.getDbTree(); + + while (reader.readNextEntry()) { + + counter.incNumRead(); + + DatabaseId dbId = reader.getDatabaseId(); + + if (dbId.equals(DbTree.ID_DB_ID)) { + continue; + } + + DatabaseImpl db = dbMapTree.getDb(dbId); + + if (db == null) { + /* This db has been deleted, ignore the entry. */ + counter.incNumDeleted(); + continue; + } + + try { + if (!reader.getIN(db).isRoot()) { + continue; + } + + replayOneIN(reader, db); + + counter.incNumProcessed(); + + } finally { + dbMapTree.releaseDb(db); + } + } + + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + } catch (Exception e) { + traceAndThrowException(reader.getLastLsn(), "readNonMapIns", e); + } + } + + /** + * Read non-root INs and process. + */ + private void readNonRootINs( + boolean mappingTree, + StartupTracker.Counter counter) + throws DatabaseException { + + /* Don't need to track IDs. */ + INFileReader reader = new INFileReader( + envImpl, readBufferSize, + info.checkpointStartLsn, // start lsn + info.nextAvailableLsn, // finish lsn + false, // track ids + info.partialCheckpointStartLsn, // partialCkptStart + info.checkpointEndLsn, // ckptEnd + null); // tracker + + reader.addTargetType(LogEntryType.LOG_IN); + reader.addTargetType(LogEntryType.LOG_BIN); + reader.addTargetType(LogEntryType.LOG_BIN_DELTA); + reader.addTargetType(LogEntryType.LOG_OLD_BIN_DELTA); + + try { + + /* Read all non-provisional INs that are in the repeat set. */ + DbTree dbMapTree = envImpl.getDbTree(); + + while (reader.readNextEntry()) { + + counter.incNumRead(); + + DatabaseId dbId = reader.getDatabaseId(); + + if (mappingTree != dbId.equals(DbTree.ID_DB_ID)) { + continue; + } + + DatabaseImpl db = dbMapTree.getDb(dbId); + + if (db == null) { + /* This db has been deleted, ignore the entry. */ + counter.incNumDeleted(); + continue; + } + + try { + if (reader.getIN(db).isRoot()) { + continue; + } + + replayOneIN(reader, db); + + counter.incNumProcessed(); + + } finally { + dbMapTree.releaseDb(db); + } + } + + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + } catch (Exception e) { + traceAndThrowException(reader.getLastLsn(), "readNonMapIns", e); + } + } + + /** + * Get an IN from the reader, set its database, and fit into tree. + */ + private void replayOneIN( + INFileReader reader, + DatabaseImpl db) + throws DatabaseException { + + /* + * Last entry is a node, replay it. Now, we should really call + * IN.postFetchInit, but we want to do something different from the + * faulting-in-a-node path, because we don't want to put the IN on the + * in memory list, and we don't want to search the db map tree, so we + * have a IN.postRecoveryInit. + */ + final long logLsn = reader.getLastLsn(); + final IN in = reader.getIN(db); + in.postRecoveryInit(db, logLsn); + in.latch(); + + recoverIN(db, in, logLsn); + + /* + * Add any db that we encounter IN's for because they will be part of + * the in-memory tree and therefore should be included in the INList + * build. + */ + inListBuildDbIds.add(db.getId()); + } + + /** + * Recover an internal node. + * + * inFromLog should be latched upon entering this method and it will + * not be latched upon exiting. + * + * @param inFromLog - the new node to put in the tree. The identifier key + * and node ID are used to find the existing version of the node. + * @param logLsn - the location of log entry in in the log. + */ + private void recoverIN(DatabaseImpl db, IN inFromLog, long logLsn) + throws DatabaseException { + + List trackingList = null; + try { + + /* + * We must know a priori if this node is the root. We can't infer + * that status from a search of the existing tree, because + * splitting the root is done by putting a node above the old root. + * A search downward would incorrectly place the new root below the + * existing tree. + */ + if (inFromLog.isRoot()) { + recoverRootIN(db, inFromLog, logLsn); + + } else { + + /* + * Look for a parent. The call to getParentNode unlatches node. + * Then place inFromLog in the tree if appropriate. + */ + trackingList = new ArrayList<>(); + recoverChildIN(db, inFromLog, logLsn, trackingList); + } + } catch (EnvironmentFailureException e) { + /* Pass through untouched. */ + throw e; + } catch (Exception e) { + String trace = printTrackList(trackingList); + LoggerUtils.traceAndLogException( + db.getEnv(), "RecoveryManager", "recoverIN", + " lsnFromLog: " + DbLsn.getNoFormatString(logLsn) + + " " + trace, e); + + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "lsnFromLog=" + DbLsn.getNoFormatString(logLsn), e); + } finally { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld( + 0, "LSN = " + DbLsn.toString(logLsn) + + " inFromLog = " + inFromLog.getNodeId()); + } + } + } + + /** + * If the root of this tree is null, use this IN from the log as a root. + * Note that we should really also check the LSN of the mapLN, because + * perhaps the root is null because it's been deleted. However, the replay + * of all the LNs will end up adjusting the tree correctly. + * + * If there is a root, check if this IN is a different LSN and if so, + * replace it. + */ + private void recoverRootIN(DatabaseImpl db, IN inFromLog, long lsn) + throws DatabaseException { + + boolean success = true; + Tree tree = db.getTree(); + RootUpdater rootUpdater = new RootUpdater(tree, inFromLog, lsn); + try { + /* Run the root updater while the root latch is held. */ + tree.withRootLatchedExclusive(rootUpdater); + + /* Update the mapLN if necessary */ + if (rootUpdater.updateDone()) { + + /* + * Dirty the database to call DbTree.modifyDbRoot later during + * the checkpoint. We should not log a DatabaseImpl until its + * utilization info is correct. + */ + db.setDirty(); + } + } catch (Exception e) { + success = false; + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "lsnFromLog=" + DbLsn.getNoFormatString(lsn), e); + } finally { + if (rootUpdater.getInFromLogIsLatched()) { + inFromLog.releaseLatch(); + } + + trace(logger, + db, TRACE_ROOT_REPLACE, success, inFromLog, + lsn, + null, + true, + rootUpdater.getReplaced(), + rootUpdater.getInserted(), + rootUpdater.getOriginalLsn(), + DbLsn.NULL_LSN, + -1); + } + } + + /* + * RootUpdater lets us replace the tree root within the tree root latch. + */ + private static class RootUpdater implements WithRootLatched { + private final Tree tree; + private final IN inFromLog; + private long lsn = DbLsn.NULL_LSN; + private boolean inserted = false; + private boolean replaced = false; + private long originalLsn = DbLsn.NULL_LSN; + private boolean inFromLogIsLatched = true; + + RootUpdater(Tree tree, IN inFromLog, long lsn) { + this.tree = tree; + this.inFromLog = inFromLog; + this.lsn = lsn; + } + + boolean getInFromLogIsLatched() { + return inFromLogIsLatched; + } + + public IN doWork(ChildReference root) + throws DatabaseException { + + ChildReference newRoot = + tree.makeRootChildReference(inFromLog, new byte[0], lsn); + inFromLog.releaseLatch(); + inFromLogIsLatched = false; + + if (root == null) { + tree.setRoot(newRoot, false); + inserted = true; + } else { + originalLsn = root.getLsn(); // for debugLog + + /* + * The current in-memory root IN is older than the root IN from + * the log. + */ + if (DbLsn.compareTo(originalLsn, lsn) < 0) { + tree.setRoot(newRoot, false); + replaced = true; + } + } + return null; + } + + boolean updateDone() { + return inserted || replaced; + } + + boolean getInserted() { + return inserted; + } + + boolean getReplaced() { + return replaced; + } + + long getOriginalLsn() { + return originalLsn; + } + } + + /** + * Recovers a non-root IN. See algorithm below. + * + * Note that this method never inserts a slot for an IN, it only replaces + * the node in a slot under certain conditions. Insertion of slots is + * unnecessary because splits are logged all the way to the root, and + * therefore inserted slots are always visible via the parent node. In + * fact, it is critical that splits are not allowed during this phase of + * recovery, because that might require splits and logging is not allowed + * until the INs are all in place. + */ + private void recoverChildIN( + DatabaseImpl db, + IN inFromLog, + long logLsn, + List trackingList) + throws DatabaseException { + + boolean replaced = false; + long treeLsn = DbLsn.NULL_LSN; + boolean finished = false; + SearchResult result = new SearchResult(); + + try { + long targetNodeId = inFromLog.getNodeId(); + byte[] targetKey = inFromLog.getIdentifierKey(); + int exclusiveLevel = inFromLog.getLevel() + 1; + + inFromLog.releaseLatch(); + + result = db.getTree().getParentINForChildIN( + targetNodeId, targetKey, -1, /*targetLevel*/ + exclusiveLevel, true /*requireExactMatch*/, true, /*doFetch*/ + CacheMode.UNCHANGED, trackingList); + + /* + * Does inFromLog exist in this parent? + * + * 1. IN is not in the current tree. Skip this IN; it's represented + * by a parent that's later in the log or it has been deleted. + * This works because splits and IN deleteions are logged + * immediately when they occur all the way up to the root. + * 2. physical match: (LSNs same) this LSN is already in place, + * do nothing. + * 3. logical match: another version of this IN is in place. + * Replace child with inFromLog if inFromLog's LSN is greater. + */ + if (result.parent == null) { + finished = true; + return; // case 1, + } + + IN parent = result.parent; + int idx = result.index; + + assert(result.exactParentFound); + assert(result.index >= 0); + assert(targetNodeId == ((IN)parent.getTarget(idx)).getNodeId()); + + /* Get the key that will locate inFromLog in this parent. */ + if (parent.getLsn(idx) == logLsn) { + /* case 2: do nothing */ + } else { + + /* Not an exact physical match, now need to look at child. */ + treeLsn = parent.getLsn(idx); + + /* case 3: It's a logical match, replace */ + if (DbLsn.compareTo(treeLsn, logLsn) < 0) { + + /* + * It's a logical match, replace. Put the child + * node reference into the parent, as well as the + * true LSN of the IN or BIN-delta. + */ + parent.recoverIN( + idx, inFromLog, logLsn, 0 /*lastLoggedSize*/); + + replaced = true; + } + } + + finished = true; + + } finally { + if (result.parent != null) { + result.parent.releaseLatch(); + } + + trace(logger, db, + TRACE_IN_REPLACE, finished, inFromLog, + logLsn, result.parent, + result.exactParentFound, replaced, false /*inserted*/, + treeLsn, DbLsn.NULL_LSN, result.index); + } + } + + /** + * Undo all LNs that belong to aborted transactions. These are LNs in the + * log that + * (1) don't belong to a committed txn AND + * (2) aren't part of a prepared txn AND + * (3) shouldn't be resurrected as part of a replication ReplayTxn. + * + * LNs that are part of a rollback period do need to be undone, but in + * a different way from the other LNs. They are rolledback and take a + * different path. + * + * To find these LNs, walk the log backwards, using log entry commit record + * to create a collection of committed txns. If we see a log entry that + * doesn't fit the criteria above, undo it. + * + * @param firstUndoPass is true if this is the first time that undoLNs is + * called. This is a little awkward, but is done to explicitly indicate to + * the rollback tracker that this is the tracker's construction phase. + * During this first pass, RollbackStart and RollbackEnd are in the target + * log types, and the rollback period map is created. + * We thought that it was better to be explicit than to reply on checking + * the logTypes parameter to see if RollbackStart/RollbackEnd is included. + */ + private void undoLNs( + Set logTypes, + boolean firstUndoPass, + StartupTracker.Counter counter) + throws DatabaseException { + + long firstActiveLsn = info.firstActiveLsn; + long lastUsedLsn = info.lastUsedLsn; + long endOfFileLsn = info.nextAvailableLsn; + + /* Set up a reader to pick up target log entries from the log. */ + LNFileReader reader = new LNFileReader( + envImpl, readBufferSize, lastUsedLsn, + false, endOfFileLsn, firstActiveLsn, null, + info.checkpointEndLsn); + + for (LogEntryType lt: logTypes) { + reader.addTargetType(lt); + } + + DbTree dbMapTree = envImpl.getDbTree(); + + /* + * See RollbackTracker.java for details on replication rollback + * periods. Standalone recovery must handle replication rollback at + * recovery, because we might be opening a replicated environment in a + * read-only, non-replicated way for use by a command line utility. + * Even though the utility will not write invisible bits, it will need + * to ensure that all btree nodes are in the proper state, and reflect + * any rollback related changes. + * + * Note that when opening a read-only environment, because we cannot + * write invisible bits, we may end up redo'ing LNs in rolled back txns + * that should be marked invisible. This is very unlikely, but should + * be fixed at some point by using the LSNs collected by + * RollbackTracker to determine whether a log entry should be treated + * as invisible by redo. See [#23708]. + * + * The rollbackScanner is a sort of cursor that acts with the known + * state of the rollback period detection. + * + * We let the tracker know if it is the first pass or not, in order + * to support some internal tracker assertions. + */ + rollbackTracker.setFirstPass(firstUndoPass); + final Scanner rollbackScanner = rollbackTracker.getScanner(); + + try { + + /* + * Iterate over the target LNs and commit records, constructing the + * tree. + */ + while (reader.readNextEntry()) { + counter.incNumRead(); + if (reader.isLN()) { + + /* Get the txnId from the log entry. */ + Long txnId = reader.getTxnId(); + + /* Skip past this, no need to undo non-txnal LNs. */ + if (txnId == null) { + continue; + } + + if (rollbackScanner.positionAndCheck(reader.getLastLsn(), + txnId)) { + /* + * If an LN is in the rollback period and was part of a + * rollback, let the rollback scanner decide how it + * should be handled. This does not include LNs that + * were explicitly aborted. + */ + rollbackScanner.rollback(txnId, reader, tracker); + continue; + } + + /* This LN is part of a committed txn. */ + if (committedTxnIds.containsKey(txnId)) { + continue; + } + + /* This LN is part of a prepared txn. */ + if (preparedTxns.get(txnId) != null) { + resurrectedLsns.add(reader.getLastLsn()); + continue; + } + + /* + * This LN is part of a uncommitted, unaborted + * replicated txn. + */ + if (isReplicatedUncommittedLN(reader, txnId)) { + createReplayTxn(txnId); + resurrectedLsns.add(reader.getLastLsn()); + continue; + } + + undoUncommittedLN(reader, dbMapTree); + counter.incNumProcessed(); + + } else if (reader.isPrepare()) { + handlePrepare(reader); + counter.incNumAux(); + + } else if (reader.isAbort()) { + /* The entry just read is an abort record. */ + abortedTxnIds.add(reader.getTxnAbortId()); + counter.incNumAux(); + + } else if (reader.isCommit()) { + + /* + * Sanity check that the commit does not interfere with the + * rollback period. Since the reader includes commits only + * on the first pass, the cost of the check is confined to + * that pass, and is very low if there is no rollback + * period. + */ + rollbackTracker.checkCommit(reader.getLastLsn(), + reader.getTxnCommitId()); + + committedTxnIds.put(reader.getTxnCommitId(), + reader.getLastLsn()); + counter.incNumAux(); + + } else if (reader.isRollbackStart()) { + rollbackTracker.register( + (RollbackStart) reader.getMainItem(), + reader.getLastLsn()); + counter.incNumAux(); + + } else if (reader.isRollbackEnd()) { + rollbackTracker.register( + (RollbackEnd) reader.getMainItem(), + reader.getLastLsn()); + counter.incNumAux(); + + } else { + throw EnvironmentFailureException.unexpectedState( + envImpl, + "LNreader should not have picked up type " + + reader.dumpCurrentHeader()); + } + } /* while */ + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + rollbackTracker.singlePassSetInvisible(); + + } catch (RuntimeException e) { + traceAndThrowException(reader.getLastLsn(), "undoLNs", e); + } + } + + /** + * Uncommitted, unaborted LNs that belong to a replicated txn are + * resurrected rather than undone. This means that the LN is also + * replicated. + */ + private boolean isReplicatedUncommittedLN(LNFileReader reader, Long txnId) { + + /* + * This only applies if the environment is replicated AND the entry is + * in a replicated txn. If a replicated environment is opened by a read + * only command line utility, it will be opened in a non-replicated + * way, and we don't want to resurrect the txn and acquire write locks. + */ + if (!envImpl.isReplicated()) { + return false; + } + + if (abortedTxnIds.contains(txnId)) { + return false; + } + + if (reader.entryIsReplicated()) { + return true; + } + + return false; + } + + /** + * When recovering a replicated environment, all uncommitted, replicated + * transactions are resurrected much the same way as a prepared + * transaction. If the node turns out to be a new master, by definition + * those txns won't resume, and the code path for new master setup will + * abort these transactions. If the node is a replica, the transactions + * will either resume or abort depending on whether the originating master + * is alive or not. + */ + private void createReplayTxn(long txnId) + throws DatabaseException { + + /* + * If we didn't see this transaction yet, create a ReplayTxn + * to use in the later redo stage, when we redo and resurrect + * this transaction. + */ + if (info.replayTxns.get(txnId) == null) { + info.replayTxns.put(txnId, envImpl.createReplayTxn(txnId)); + } + } + + /** + * The entry just read is a prepare record. Setup a PrepareTxn that will + * exempt any of its uncommitted LNs from undo. Instead, uncommitted LNs + * that belong to a PrepareTxn are redone. + */ + private void handlePrepare(LNFileReader reader) + throws DatabaseException { + + long prepareId = reader.getTxnPrepareId(); + Long prepareIdL = prepareId; + if (!committedTxnIds.containsKey(prepareIdL) && + !abortedTxnIds.contains(prepareIdL)) { + TransactionConfig txnConf = new TransactionConfig(); + PreparedTxn preparedTxn = PreparedTxn.createPreparedTxn + (envImpl, txnConf, prepareId); + + /* + * There should be no lock conflicts during recovery, but just in + * case there are, we set the locktimeout to 0. + */ + preparedTxn.setLockTimeout(0); + preparedTxns.put(prepareIdL, preparedTxn); + preparedTxn.setPrepared(true); + envImpl.getTxnManager().registerXATxn + (reader.getTxnPrepareXid(), preparedTxn, true); + LoggerUtils.logMsg(logger, envImpl, Level.INFO, + "Found unfinished prepare record: id: " + + reader.getTxnPrepareId() + + " Xid: " + reader.getTxnPrepareXid()); + } + } + + /** + * Found an uncommitted LN, set up the work to undo the LN. + */ + private void undoUncommittedLN(LNFileReader reader, DbTree dbMapTree) + throws DatabaseException { + + /* Invoke the evictor to reduce memory consumption. */ + invokeEvictor(); + + DatabaseId dbId = reader.getDatabaseId(); + DatabaseImpl db = dbMapTree.getDb(dbId); + + /* Database may be null if it's been deleted. */ + if (db == null) { + return; + } + + LNLogEntry lnEntry = reader.getLNLogEntry(); + lnEntry.postFetchInit(db); + + LN ln = lnEntry.getLN(); + TreeLocation location = new TreeLocation(); + long logLsn = reader.getLastLsn(); + + try { + + ln.postFetchInit(db, logLsn); + + recoveryUndo(location, db, lnEntry, logLsn); + + /* Undo utilization info. */ + undoUtilizationInfo(lnEntry, db, logLsn, reader.getLastEntrySize()); + + /* + * Add any db that we encounter LN's for because they'll be + * part of the in-memory tree and therefore should be included + * in the INList build. + */ + inListBuildDbIds.add(dbId); + + /* + * For temporary DBs that are encountered as MapLNs, add them + * to the set of databases to be removed. + */ + if (ln instanceof MapLN) { + MapLN mapLN = (MapLN) ln; + if (mapLN.getDatabase().isTemporary()) { + tempDbIds.add(mapLN.getDatabase().getId()); + } + } + } finally { + dbMapTree.releaseDb(db); + } + } + + /** + * Undo the changes to this node. Here are the rules that govern the action + * taken. + * + *
        +     *
        +     * found LN in  | abortLsn is | logLsn ==       | action taken
        +     *    tree      | null        | LSN in tree     | by undo
        +     * -------------+-------------+----------------------------------------
        +     *      Y       |     N       |      Y          | replace w/abort LSN
        +     * ------------ +-------------+-----------------+-----------------------
        +     *      Y       |     Y       |      Y          | remove from tree
        +     * ------------ +-------------+-----------------+-----------------------
        +     *      Y       |     N/A     |      N          | no action
        +     * ------------ +-------------+-----------------+-----------------------
        +     *      N       |     N/A     |    N/A          | no action (*)
        +     * (*) If this key is not present in the tree, this record doesn't
        +     * reflect the IN state of the tree and this log entry is not applicable.
        +     *
        +     * 
        + * @param location holds state about the search in the tree. Passed + * in from the recovery manager to reduce objection creation overhead. + * @param logLsn is the LSN from the just-read log entry + * + * Undo can take place for regular recovery, for aborts, and for recovery + * rollback processing. Each flavor has some slight differences, and are + * factored out below. + */ + private void recoveryUndo( + TreeLocation location, + DatabaseImpl db, + LNLogEntry lnEntry, + long logLsn) { + + undo(logger, Level.FINE, location, db, lnEntry, logLsn, + lnEntry.getAbortLsn(), lnEntry.getAbortKnownDeleted(), + false/*revertPD*/, + lnEntry.getAbortKey(), lnEntry.getAbortData(), + lnEntry.getAbortVLSN(), + lnEntry.getAbortExpiration(), lnEntry.isAbortExpirationInHours()); + } + + public static void abortUndo( + Logger logger, + Level traceLevel, + TreeLocation location, + DatabaseImpl db, + LNLogEntry lnEntry, + long logLsn) { + + undo(logger, traceLevel, location, db, lnEntry, logLsn, + lnEntry.getAbortLsn(), lnEntry.getAbortKnownDeleted(), + false/*revertPD*/, + lnEntry.getAbortKey(), lnEntry.getAbortData(), + lnEntry.getAbortVLSN(), + lnEntry.getAbortExpiration(), lnEntry.isAbortExpirationInHours()); + } + + public static void rollbackUndo( + Logger logger, + Level traceLevel, + TreeLocation location, + DatabaseImpl db, + LNLogEntry lnEntry, + long undoLsn, + RevertInfo revertTo) { + + undo(logger, traceLevel, location, + db, lnEntry, undoLsn, + revertTo.revertLsn, revertTo.revertKD, revertTo.revertPD, + revertTo.revertKey, revertTo.revertData, revertTo.revertVLSN, + revertTo.revertExpiration, revertTo.revertExpirationInHours); + } + + private static void undo( + Logger logger, + Level traceLevel, + TreeLocation location, + DatabaseImpl db, + LNLogEntry lnEntry, + long logLsn, + long revertLsn, + boolean revertKD, + boolean revertPD, + byte[] revertKey, + byte[] revertData, + long revertVLSN, + int revertExpiration, + boolean revertExpirationInHours) + throws DatabaseException { + + boolean found = false; + boolean replaced = false; + boolean success = false; + + try { + + /* Find the BIN which is the parent of this LN. */ + location.reset(); + + found = db.getTree().getParentBINForChildLN( + location, lnEntry.getKey(), false /*splitsAllowed*/, + false /*blindDeltaOps*/, CacheMode.DEFAULT); + + if (found) { + + /* This LN is in the tree. See if it needs replacing. */ + BIN bin = location.bin; + int slotIdx = location.index; + long slotLsn = location.childLsn; + + if (slotLsn == DbLsn.NULL_LSN) { + + /* + * Slots can exist and have a NULL_LSN as the result of an + * undo of an insertion that was done without slot reuse. + * + * We must be sure not to compare the NULL_LSN against the + * valid LSN, lest we get a NPE. [#17427] [#17578] + * + * To be really sure, check that the location is truly + * empty, just as an assertion safety check. + */ + if (!(bin.isEntryKnownDeleted(slotIdx) || + bin.isEntryPendingDeleted(slotIdx))) { + throw EnvironmentFailureException.unexpectedState( + location + " has a NULL_LSN but the " + + "slot is not empty. KD=" + + bin.isEntryKnownDeleted(slotIdx) + + " PD=" + + bin.isEntryPendingDeleted(slotIdx)); + } + + bin.queueSlotDeletion(slotIdx); + success = true; + return; + } + + boolean updateEntry = DbLsn.compareTo(logLsn, slotLsn) == 0; + + if (updateEntry) { + + int revertLogrecSize = 0; + if (revertLsn != DbLsn.NULL_LSN && + !bin.isEmbeddedLN(slotIdx) && + revertData == null) { + revertLogrecSize = fetchLNSize(db, 0, revertLsn); + } + + bin.recoverRecord( + slotIdx, revertLsn, revertKD, revertPD, + revertKey, revertData, revertVLSN, revertLogrecSize, + revertExpiration, revertExpirationInHours); + + replaced = true; + } + + /* + * Because we undo before we redo, the record may not be in + * the tree, even if it were in the tree when the write op + * reflected by the current logrec was performed. + } else { + assert(revertLsn == DbLsn.NULL_LSN || revertKD); + } + */ + } + + success = true; + + } finally { + + if (location.bin != null) { + location.bin.releaseLatch(); + } + + trace(logger, traceLevel, db, TRACE_LN_UNDO, success, + lnEntry.getLN(), logLsn, location.bin, found, replaced, + false, location.childLsn, revertLsn, location.index); + } + } + + /** + * Redo all LNs that should be revived. That means + * - all committed LNs + * - all prepared LNs + * - all uncommitted, replicated LNs on a replicated node. + */ + private void redoLNs( + Set lnTypes, + StartupTracker.Counter counter) + throws DatabaseException { + + long endOfFileLsn = info.nextAvailableLsn; + long firstActiveLsn = info.firstActiveLsn; + + /* + * Set up a reader to pick up target log entries from the log. + * + * There are 2 RedoLNs passes. + * + * The 1st pass applies to the DbIdMap BTree only. Only LOG_MAPLN + * logrecs are returned by the reader during this pass. All such + * logrecs are eligible for redo and will be processed further here. + * + * The 2nd pass applies to all other DB BTrees. The logrecs returned by + * the reader during this pass are all user-LN logrecs (transactional + * or not) plus LOG_NAMELN, LOG_NAMELN_TRANSACTIONAL, and + * LOG_FILESUMMARYLN. During this pass, for most logrecs we should only + * redo them if they start after the ckpt start LSN. However, there are + * two categories of logrecs that are not committed, but still need + * to be redone. These are logrecs that belong to 2PC txns that have + * been prepared, but still not committed) and logrecs that belong to + * replicated, uncommitted txns. These logrecs still need to be + * processed and can live in the log between the firstActive LSN and + * the checkpointStart LSN, so we start the LNFileReader at the First + * Active LSN. + */ + LNFileReader reader = new LNFileReader( + envImpl, readBufferSize, firstActiveLsn, + true/*forward*/, DbLsn.NULL_LSN, endOfFileLsn, null, + info.checkpointEndLsn); + + for (LogEntryType lt : lnTypes) { + reader.addTargetType(lt); + } + + DbTree dbMapTree = envImpl.getDbTree(); + TreeLocation location = new TreeLocation(); + + try { + + /* + * Iterate over the target LNs and construct in-memory tree. + */ + while (reader.readNextEntry()) { + + counter.incNumRead(); + + RedoEligible eligible = eligibleForRedo(reader); + + if (!eligible.isEligible) { + continue; + } + + /* + * We're doing a redo. Invoke the evictor in this loop to + * reduce memory consumption. + */ + invokeEvictor(); + + DatabaseId dbId = reader.getDatabaseId(); + DatabaseImpl db = dbMapTree.getDb(dbId); + + long logrecLsn = reader.getLastLsn(); + + /* + * Database may be null if it's been deleted. Only redo for + * existing databases. + */ + if (db == null) { + counter.incNumDeleted(); + + tracker.countObsoleteIfUncounted( + logrecLsn, logrecLsn, null, + reader.getLastEntrySize(), dbId, + false/*trackOffset*/); + + continue; + } + + try { + LNLogEntry logrec = reader.getLNLogEntry(); + logrec.postFetchInit(db); + + counter.incNumProcessed(); + + redoOneLN( + reader, logrec, logrecLsn, dbId, db, eligible, location); + } finally { + dbMapTree.releaseDb(db); + } + } + + counter.setRepeatIteratorReads(reader.getNRepeatIteratorReads()); + + } catch (Exception e) { + traceAndThrowException(reader.getLastLsn(), "redoLns", e); + } + } + + /** + * These categories of LNs are redone: + * - LNs from committed txns between the ckpt start and end of log + * - non-transactional LNs between the ckpt start and end of log + * - LNs from prepared txns between the first active LSN and end of log + * - LNs from replicated, uncommitted, unaborted txns between the first + * active LSN and end of log that are NOT invisible. + * + * LNs that are in a rollback part of the log are invisible and will not be + * redone. + */ + private RedoEligible eligibleForRedo(LNFileReader reader) { + + if (!reader.isLN()) { + return RedoEligible.NOT; + } + + if (reader.isInvisible()) { + return RedoEligible.NOT; + } + + /* + * afterCheckpointStart indicates that we're processing log entries + * after the checkpoint start LSN. We only process prepared or + * replicated resurrected txns before checkpoint start. After + * checkpoint start, we evaluate everything. If there is no + * checkpoint, the beginning of the log is really the checkpoint start, + * and all LNs should be evaluated. + */ + boolean afterCheckpointStart = + info.checkpointStartLsn == DbLsn.NULL_LSN || + DbLsn.compareTo(reader.getLastLsn(), info.checkpointStartLsn) >= 0; + + /* + * A transaction will be either prepared OR replayed OR will be a + * regular committed transaction. A transaction would never be in more + * than one of these sets. + */ + Long txnId = reader.getTxnId(); + Txn preparedTxn = preparedTxns.get(txnId); + Txn replayTxn = info.replayTxns.get(txnId); + + if (preparedTxn != null) { + return new RedoEligible(preparedTxn); + } else if (replayTxn != null) { + return new RedoEligible(replayTxn); + } else { + if (afterCheckpointStart) { + if (txnId == null) { + /* A non-txnal LN after ckpt start is always redone. */ + return RedoEligible.ELIGIBLE_NON_TXNAL; + } + Long commitLongLsn = committedTxnIds.get(txnId); + if (commitLongLsn != null) { + /* A committed LN after ckpt start is always redone. */ + return new RedoEligible(commitLongLsn); + } + } + } + return RedoEligible.NOT; + } + + /* Packages eligibility info. */ + private static class RedoEligible{ + final boolean isEligible; + final Txn resurrectTxn; // either a prepared or a replay txn + final long commitLsn; + + static RedoEligible NOT = new RedoEligible(false); + static RedoEligible ELIGIBLE_NON_TXNAL = new RedoEligible(true); + + /* Used for eligible prepared and replicated, resurrected txns. */ + RedoEligible(Txn resurrectTxn) { + this.isEligible = true; + this.resurrectTxn = resurrectTxn; + this.commitLsn = DbLsn.NULL_LSN; + } + + /* Used for eligible, committed LNs. */ + RedoEligible(long commitLsn) { + this.isEligible = true; + this.resurrectTxn = null; + this.commitLsn = commitLsn; + } + + RedoEligible(boolean eligible) { + this.isEligible = eligible; + this.resurrectTxn = null; + this.commitLsn = DbLsn.NULL_LSN; + } + + boolean isNonTransactional() { + return isEligible && + commitLsn == DbLsn.NULL_LSN && + resurrectTxn == null; + } + + boolean isCommitted() { + return commitLsn != DbLsn.NULL_LSN || isNonTransactional(); + } + } + + /* + * Redo the LN and utilization info. LNs from prepared and replay txns are + * "resurrected" and also need to re-establish its write locks and undo + * information. + */ + private void redoOneLN( + LNFileReader reader, + LNLogEntry logrec, + long logrecLsn, + DatabaseId dbId, + DatabaseImpl db, + RedoEligible eligible, + TreeLocation location) + throws DatabaseException { + + int logrecSize = reader.getLastEntrySize(); + LN ln = logrec.getLN(); + + ln.postFetchInit(db, logrecLsn); + + if (eligible.resurrectTxn != null) { + + /* + * This is a prepared or replay txn, so we need to reacquire the + * write lock as well as redoing the operation, in order to end up + * with an active txn. + */ + relock(eligible.resurrectTxn, logrecLsn, logrec, db); + } + + long treeLsn = redo( + db, location, logrec, logrecLsn, logrecSize, eligible); + + /* + * Add any db that we encounter LN's for because they'll be part of the + * in-memory tree and therefore should be included in the INList build. + */ + inListBuildDbIds.add(dbId); + + /* + * Further processing of MapLNs: + * - For temporary DBs that are encountered as MapLNs, add them to the + * set of databases to be removed. + * - For deleted MapLNs (truncated or removed DBs), redo utilization + * counting by counting the entire database as obsolete. + */ + if (ln instanceof MapLN) { + MapLN mapLN = (MapLN) ln; + + if (mapLN.getDatabase().isTemporary()) { + tempDbIds.add(mapLN.getDatabase().getId()); + } + + if (mapLN.isDeleted()) { + mapLN.getDatabase().countObsoleteDb(tracker, logrecLsn); + } + } + + /* + * For committed truncate/remove NameLNs, we expect a deleted MapLN + * after it. Maintain expectDeletedMapLNs to contain all DB IDs for + * which a deleted MapLN is not found. [#20816] + *

        + * Note that we must use the new NameLNLogEntry operationType to + * identify truncate and remove ops, and to distinguish them from a + * rename (which also deletes the old NameLN) [#21537]. + */ + if (eligible.resurrectTxn == null) { + NameLNLogEntry nameLNEntry = reader.getNameLNLogEntry(); + if (nameLNEntry != null) { + switch (nameLNEntry.getOperationType()) { + case REMOVE: + assert nameLNEntry.isDeleted(); + NameLN nameLN = (NameLN) nameLNEntry.getLN(); + expectDeletedMapLNs.add(nameLN.getId()); + break; + case TRUNCATE: + DatabaseId truncateId = + nameLNEntry.getTruncateOldDbId(); + assert truncateId != null; + expectDeletedMapLNs.add(truncateId); + break; + default: + break; + } + } + } + + boolean treeLsnIsImmediatelyObsolete = db.isLNImmediatelyObsolete(); + + if (!treeLsnIsImmediatelyObsolete && treeLsn != DbLsn.NULL_LSN) { + treeLsnIsImmediatelyObsolete = + (location.isEmbedded || location.isKD); + } + + redoUtilizationInfo( + logrec, reader.getLastEntrySize(), logrecLsn, + treeLsn, treeLsnIsImmediatelyObsolete, location.childLoggedSize, + eligible.commitLsn, eligible.isCommitted(), + db); + + trackReservedFileRecords(logrec); + } + + private void trackReservedFileRecords(LNLogEntry logrec) { + + if (!LogEntryType.LOG_RESERVED_FILE_LN.equals(logrec.getLogType()) || + logrec.isDeleted()) { + return; + } + + Long file = ReservedFileInfo.entryToKey( + new DatabaseEntry(logrec.getKey())); + + ReservedFileInfo info = ReservedFileInfo.entryToObject( + new DatabaseEntry(logrec.getData())); + + reservedFiles.add(file); + reservedFileDbs.addAll(info.dbIds); + } + + /* + * Reacquire the write lock for the given LN, so we can end up with an + * active txn. + */ + private void relock( + Txn txn, + long logLsn, + LNLogEntry logrec, + DatabaseImpl db) + throws DatabaseException { + + txn.addLogInfo(logLsn); + + /* + * We're reconstructing an unfinished transaction. We know that there + * was a write lock on this LN since it exists in the log under this + * txnId. + */ + final LockResult result = txn.nonBlockingLock( + logLsn, LockType.WRITE, false /*jumpAheadOfWaiters*/, db); + + if (result.getLockGrant() == LockGrantType.DENIED) { + throw EnvironmentFailureException.unexpectedState( + "Resurrected lock denied txn=" + txn.getId() + + " logLsn=" + DbLsn.getNoFormatString(logLsn) + + " abortLsn=" + DbLsn.getNoFormatString(logrec.getAbortLsn())); + } + + /* + * Set abortLsn and database for utilization tracking. We don't know + * lastLoggedSize, so a default will be used for utilization counting. + * This should not be common. + */ + result.setAbortInfo( + logrec.getAbortLsn(), logrec.getAbortKnownDeleted(), + logrec.getAbortKey(), logrec.getAbortData(), logrec.getAbortVLSN(), + logrec.getAbortExpiration(), logrec.isAbortExpirationInHours(), + db); + + final WriteLockInfo wli = result.getWriteLockInfo(); + + if (wli == null) { + throw EnvironmentFailureException.unexpectedState( + "Resurrected lock has no write info txn=" + txn.getId() + + " logLsn=" + DbLsn.getNoFormatString(logLsn) + + " abortLsn=" + DbLsn.getNoFormatString(logrec.getAbortLsn())); + } + + wli.setAbortLogSize(0 /*lastLoggedSize*/); + } + + /** + * Redo a committed LN for recovery. + * + * Let R and T be the record and locker associated with the current logrec + * L. Let TL be the logrec pointed to by the BIN slot for R (if any) just + * before the call to the redo() method on R. Let TT be the locker that + * wrote TL. + * + * R slot found | L vs TL | L is | action + * in tree | | deletion | + * --------------+-------- +----------+------------------------ + * Y | L <= TL | | no action + * --------------+---------+----------+------------------------ + * Y | L > TL | N | replace w/log LSN + * --------------+---------+----------+------------------------ + * Y | L > TL | Y | replace w/log LSN, put + * | | | on compressor queue + * --------------+---------+----------+------------------------ + * N | n/a | N | insert into tree + * --------------+---------+----------+------------------------ + * N | n/a | Y | no action + * --------------+---------+----------+------------------------ + * + * @param location Used to return to the caller info about the R slot found + * in the tree (if any) before the slot is updated by this method. It is + * allocated once by the caller and reset by this method; this way the + * overhead of object creation is avoided. + * + * @param logrec the LN logrec L that is being redone. + * + * @param logrecLsn the LSN of L. + * + * @param logrecSize the on-disk size of L. + * + * @return the LSN found in the tree, or NULL_LSN if the tree did not + * contain any slot for the record. + */ + private long redo( + DatabaseImpl db, + TreeLocation location, + LNLogEntry logrec, + long logrecLsn, + int logrecSize, + RedoEligible eligible) + throws DatabaseException { + + boolean found; + boolean foundNotKD = false; + boolean replaced = false; + boolean inserted = false; + boolean success = false; + + DbConfigManager configManager = db.getEnv().getConfigManager(); + + LogEntryType logrecType = logrec.getLogType(); + LN logrecLN = logrec.getLN(); + long logrecVLSN = logrecLN.getVLSNSequence(); + boolean isDeletion = logrecLN.isDeleted(); + byte[] logrecKey = logrec.getKey(); + byte[] logrecData = logrec.getEmbeddedData(); + long abortLsn = logrec.getAbortLsn(); + boolean abortKD = logrec.getAbortKnownDeleted(); + int expiration = logrec.getExpiration(); + boolean expirationInHours = logrec.isExpirationInHours(); + + long treeLsn = DbLsn.NULL_LSN; + + /* + * Let RL be the logrec being replayed here. Let R and T be + * the record and the txn associated with RL. + * + * We say that RL is replayed "blindly" if the search for + * R's key in the tree lands on a BIN-delta, this delta does + * not contain R's key, and we don't mutate the delta to a + * full BIN to check if R is indeed in the tree or not; + * instead we just insert R in the delta. + * + * RL can be applied blindly only if RL is a "pure" insertion, + * i.e. RL is an insertion and R did not exist prior to T. + * + * A non-pure insertion (where R existed before T, it was + * deleted by T, and then reinserted by T) cannot be applied + * blindly, because if it were, it would generate a logrec + * with abortLSN == NULL, and if T were aborted, undoing the + * logrec with the NULL abortLSN would cause the loss of the + * pre-T version of R. So, to replay a non-pure insertion, + * we must check if a slot for R exists in the tree already, + * and if so, generate a new logrec with an abortLSN pointing + * to the pre-T version of R. + * + * Updates and deletes cannot be replayed blindly either, + * because we wouldn't be able to generate logrecs with the + * correct abortLsn, nor count the previous version of R as + * obsolete. + * + * The condition (abortLsn == DbLsn.NULL_LSN || abortKD) + * guarantees that LN is a pure insertion. + */ + boolean blindInsertions = + (configManager.getBoolean( + EnvironmentParams.BIN_DELTA_BLIND_OPS) && + eligible.isCommitted() && + (db.isLNImmediatelyObsolete() || + ((abortLsn == DbLsn.NULL_LSN || abortKD) && + (logrecType.equals(LogEntryType.LOG_INS_LN_TRANSACTIONAL) || + logrecType.equals(LogEntryType.LOG_INS_LN))))); + + try { + + /* + * Find the BIN which is the parent of this LN. + */ + location.reset(); + + found = db.getTree().getParentBINForChildLN( + location, logrecKey, true /*splitsAllowed*/, + blindInsertions /*blindDeltaOps*/, CacheMode.DEFAULT); + + if (!found && (location.bin == null)) { + + /* + * There is no possible parent for this LN. This tree was + * probably compressed away. + */ + success = true; + return DbLsn.NULL_LSN; + } + + BIN bin = location.bin; + int index = location.index; + + foundNotKD = found && !bin.isEntryKnownDeleted(index); + + if (foundNotKD) { + + treeLsn = location.childLsn; + + int lsnCmp = DbLsn.compareTo(logrecLsn, treeLsn); + + /* + * TL <= L + */ + if (lsnCmp >= 0) { + + /* + * If L is a deletion, make sure the KD and PD flags in the + * slot are set correctly. Specifically, if T is committed, + * set KD and clear PD (if set); otherwise set PD (we know + * KD is not set here). + */ + boolean redoKD = false; + boolean redoPD = false; + if (isDeletion) { + if (eligible.resurrectTxn != null) { + redoPD = true; + } else { + redoKD = true; + } + } + + /* + * If TL < L apply L, i.e., replace the TL version with the + * newer L version. Do not attach the LN as a resident LN + * would consume too much memory. + * + * If TL == L we only need to take act if L is a committed + * deletion; in this case, we want to set the KD and clear + * the PD flag. + */ + if (lsnCmp > 0) { + bin.recoverRecord( + index, logrecLsn, redoKD, redoPD, + logrecKey, logrecData, logrecVLSN, logrecSize, + expiration, expirationInHours); + + replaced = true; + + } else if (isDeletion) { + if (redoKD) { + if (!bin.isEntryKnownDeleted(index)) { + bin.setKnownDeleted(index); + } + } else { + assert(bin.isEntryPendingDeleted(index)); + assert(!bin.isEntryKnownDeleted(index)); + } + } + + /* + * If L is a deletion, put its slot on the compressor + * queue. Even when there is a resurrectTxn, it will + * not contain delete info ????. + */ + if (isDeletion) { + bin.queueSlotDeletion(index); + } + } + + } else if (found) { + + treeLsn = bin.getLsn(index); + + /* + * There is a KD slot with the record's key. If the current + * logrec is not a deletion, insert the record in the existing + * slot. + */ + if (!isDeletion) { + + if (treeLsn == DbLsn.NULL_LSN || + DbLsn.compareTo(logrecLsn, treeLsn) > 0) { + bin.recoverRecord( + index, logrecLsn, false/*KD*/, false/*PD*/, + logrecKey, logrecData, logrecVLSN, logrecSize, + expiration, expirationInHours); + + inserted = true; + } + } else { + bin.queueSlotDeletion(index); + /* + * logecLsn cannot be > treeLsn, because the record must + * have been re-inserted between treeLsn and logrecLsn, + * in which case, the slot could not have been KD. + */ + assert(treeLsn == DbLsn.NULL_LSN || + DbLsn.compareTo(logrecLsn, treeLsn) <= 0); + } + + } else if (bin.isBINDelta()) { + + assert(blindInsertions); + + index = bin.insertEntry1( + null/*ln*/, logrecKey, logrecData, logrecLsn, + true/*blindInsertion*/); + + assert((index & IN.INSERT_SUCCESS) != 0); + + inserted = true; + index &= ~IN.INSERT_SUCCESS; + location.index = index; + + bin.setLastLoggedSize(index, logrecSize); + bin.setExpiration(index, expiration, expirationInHours); + + if (bin.isEmbeddedLN(index)) { + bin.setCachedVLSN(index, logrecVLSN); + } + + /* + * If current logrec is a deletion set the KD flag to prevent + * fetching a cleaned LN (we know that the logrec is comitted). + */ + if (isDeletion) { + assert(eligible.resurrectTxn == null); + bin.setKnownDeleted(index); + } + + } else { + + /* + * This LN's key is not in the tree. If the current logrec is + * not a deletion, insert the LN to the tree. + */ + if (!isDeletion) { + + index = bin.insertEntry1( + null, logrecKey, logrecData, logrecLsn, + false/*blindInsertion*/); + + assert((index & IN.INSERT_SUCCESS) != 0); + + inserted = true; + index &= ~IN.INSERT_SUCCESS; + location.index = index; + + bin.setLastLoggedSize(index, logrecSize); + bin.setExpiration(index, expiration, expirationInHours); + + if (bin.isEmbeddedLN(index)) { + bin.setCachedVLSN(index, logrecVLSN); + } + } + } + + /* + * We're about to cast away this instantiated LN. It may have + * registered for some portion of the memory budget, so free + * that now. Specifically, this would be true for the + * DbFileSummaryMap in a MapLN. + */ + logrecLN.releaseMemoryBudget(); + + success = true; + return treeLsn; + + } finally { + if (location.bin != null) { + location.bin.releaseLatch(); + } + + trace(logger, db, + TRACE_LN_REDO, success, logrecLN, + logrecLsn, location.bin, foundNotKD, + replaced, inserted, + location.childLsn, DbLsn.NULL_LSN, location.index); + } + } + + /** + * Update utilization info during redo. + * + * Let R and T be the record and txn associated with the current logrec L. + * Let TL be the logrec pointed to by the BIN slot for R (if any) just + * before the call to the redo() method on R. Let TT be the txn that wrote + * TL. Let AL by the logrec whose LSN is stored as the abortLSN in L (if + * L.abortLsn != NULL). + * + * This method considers whether L, TL, or AL should be counted as + * obsolete, and if so, it does the counting. + * + * @param logrec The deserialized logrec L that is being processed. + * + * @param logrecSize The on-disk size of L. + * + * @param logrecLsn The LSN of L. + * + * @param treeLsn The LSN of TL. Will be NULL_LSN if there was no R slot + * in the BTree, or the R slot was a KD slot with a NULL_LSN. + * + * @param treeLsnIsImmediatelyObsolete True if (a) the DB is a dups DB + * with all immediatelly obsolete LNs, or (b) treeLsn is NULL_LSN, or (c) + * TL is an embedded logrec (as indicated by the embedded flag in the + * slot). + * + * @param treeLNLoggedSize The on-disk size of TL + * + * @param commitLsn The commitLSN of T, if T is a Txn that did commit. + * + * @param isCommitted True if T is non-transactional or a Txn that did + * commit. + * + * @param db The DatabaseImpl obj for the DB containing R. + * + * There are cases where we do not count the previous version of an LN as + * obsolete when that obsolete LN occurs prior to the recovery interval. + * This happens when a later version of the LN is current in the tree + * because its parent IN has been flushed non-provisionally after it. The + * old version of the LN is not in the tree so we never encounter it during + * recovery and cannot count it as obsolete. For example: + * + * 100 LN-A + * checkpoint occurred (ckpt begin, flush, ckpt end) + * 200 LN-A + * 300 BIN parent of 200 + * 400 IN parent of 300, non-provisional via a sync + * + * no utilization info is flushed + * no checkpoint + * crash and recover + * + * 200 is the current LN-A in the tree. When we redo 200 we do not count + * anything as obsolete because the log and tree LSNs are equal. 100 is + * never counted obsolete because it is not in the recovery interval. + * + * The same thing occurs when a deleted LN is replayed and the old version + * is not found in the tree because it was compressed and the IN was + * flushed non-provisionally. + * + * In these cases we may be able to count the abortLsn as obsolete but that + * would not work for non-transactional entries. + */ + private void redoUtilizationInfo( + LNLogEntry logrec, + int logrecSize, + long logrecLsn, + long treeLsn, + boolean treeLsnIsImmediatelyObsolete, + int treeLNLoggedSize, + long commitLsn, + boolean isCommitted, + DatabaseImpl db) { + + /* + * If the logrec is "immediately obsolete", L was counted as obsolete + * during normal processing and it should be counted here only if its + * obsoleteness was not made durable before the crash, i.e., if it is + * after the latest FSLN for the containing log file. No need to record + * L's LSN in the tracker, because the cleaner already knows that all + * immediately-obsolete logrecs are obsolete. + */ + if (logrec.isImmediatelyObsolete(db)) { + tracker.countObsoleteIfUncounted( + logrecLsn, logrecLsn, null, logrecSize, db.getId(), + false /*trackOffset*/); + } + + /* + * Nothing more to be done of immediatelly obsolete DBs. If the treeLsn + * or the abortLsn are before the ckpt start, then they are already + * counted as obsolete because utilization info is flushed just before + * ckpt end. And if they are after the ckpt start, then they have been + * processed earlier in this RedoLNs pass and as a result counted by + * the countObsoleteIfUncounted() call above. + */ + if (db.isLNImmediatelyObsolete()) { + return; + } + + /* Determine whether to count the treeLsn or the logrecLsn obsolete. */ + if (treeLsn != DbLsn.NULL_LSN) { + + final int cmpLogLsnToTreeLsn = DbLsn.compareTo(logrecLsn, treeLsn); + + if (cmpLogLsnToTreeLsn < 0) { + + /* + * L < TL. + * + * In normal standalone recovery, if L < TL, we can assume + * that TL belongs to a committed txn. This is because the + * undo phase happened first, and any uncommitted lsns would + * have been removed from the tree. But for replicated and + * prepared txns, this assumption is not true; such txns + * may be committed or aborted later on. So, TL may belong to + * a later, uncommitted txn. [#17710] [#17022] + * + * L may be obsolete. It is obsolete iff: + * + * 1. it is immediately obsolete, or + * 2. TT is committed (we can check this by checking whether + * TL is not among the resurrected LSNs), or + * 3. L is not the last logrec on R by T. + * + * L is not obsolete if TT is not committed and L is the last + * logrec on R by T. These conditions, together with the fact + * that L < TL imply that T != TT and L is the abortLsn of TT. + * + * We have already handled case 1 above. We cannot check for + * case 3, so we will conservatively assume L is indeed + * obsolete but record its LSN in the tracker only if we know + * for sure that it is obsolete, ie, if TT is committed. + * + * Notice also that if L is indeed obsolete, we cannot be + * sure which logrec caused L to become obsolete during + * normal processing. So, here we conservatively assume + * that it was TL that made L obsolete, and pass TL as the + * "lsn" param to countObsoleteIfUncounted(). This will + * result in double counting if (a) another logrec L', + * with L < L' < TL, made L obsolete, an FSLN for L's + * logfile was logged after L' and before TL, and no other + * FSLN for the that logfile was logged after TL. + */ + if (!logrec.isImmediatelyObsolete(db)) { + tracker.countObsoleteIfUncounted( + logrecLsn, treeLsn, null, + fetchLNSize(db, logrecSize, logrecLsn), db.getId(), + !resurrectedLsns.contains(treeLsn) /*trackOffset*/); + } + + } else if (cmpLogLsnToTreeLsn > 0) { + + /* + * TL < L. + * + * Basically, the above discussion for the L < TL case applies + * here as well, with the roles of L and TL reversed. + * + * Notice that in this case, there cannot be another R logrec + * L' such that TL < L' < L. To see why, assume L' exists and + * consider these 2 cases: (a) L' > ckpt_start. Then L' was + * replayed earlier during the current RedoLNs pass, so at this + * time, TL must be L'. (b) L' < ckpt_start. Then L' <= TL, + * because the ckpt writes all dirty BINs to the log and + * RedoINs is done before RedoLNs. + * + * The above observation implies that either T == TT or TL is + * the abortLSN of L. If TL == AL, then it is T's commit logrec + * that made TL obsolete during normal processing. However, here + * we pass L as the lsn param of countObsoleteIfUncounted(). + * As explained below this can result in missing counting for + * a real obsolete logrec. + * + * If TL is immediatelly obsolete, it has been counted already, + * for the same reason described above in the case of + * db.isLNImmediatelyObsolete(). So, to avoid double counting, + * we don't attempt to count it here again. + */ + if (!treeLsnIsImmediatelyObsolete) { + tracker.countObsoleteIfUncounted( + treeLsn, logrecLsn, null, + fetchLNSize(db, treeLNLoggedSize, treeLsn), db.getId(), + isCommitted/*trackOffset*/); + } + } + } + + /* + * The abortLSN is obsolete iff T is a committed txn. In fact, it is + * the commit logrec of T that makes abortLSN obsolete. So, we pass + * commitLSN as the "lsn" param to the countObsoleteIfUncounted() + * call below. However, to avoid excessive double-counting, we don't + * always call countObsoleteIfUncounted(AL, T-cmt). In relatively + * rare scenarios, this can result in failing to count AL as obsolete. + * Consider the following cases: + * + * TL < L + * ------- + * + * If TL < L we don't call countObsoleteIfUncounted() on AL, because + * in most cases this has been done already during the current RedoLNs + * pass or AL was counted durably as obsolete during normal processing. + * The reasoning is as follows. As explained above, if TL < L, one of + * the following is true: + * + * (a) TL == AL. + * + * TL/AL --- TT-cmt --- L --- (FSLN)? --- T-cmt + * + * countObsoleteIfUncounted(TL, L) was called above. If FSLN + * exists, this call did not count TL. However, FSLN does not + * contain TL, because it is T-cmt that recorded TL as obsolete. + * Therefore, assuming no other FSLN exists after T-cmt, we miss + * counting TL by not calling countObsoleteIfUncounted(TL, T-cmt). + * + * However, in most cases, there won't be any FSLN between L and + * T-cmt, or there will be another FLSN after T-cmt. As a result, + * the countObsoleteIfUncounted(TL, L) call suffices. Therefore, + * we prefer to occasionally miss an abortLSN than doing too much + * double counting. + * + * (b1) T == TT and TL < ckpt_start. In + * + * AL --- TL --- ckpt-start --- L --- T-cmt --- (FSLN)? + * + * TL and L have the same abortLSN and the same commitLSN. TL is + * not processed during this RedoLNs pass, so unless an FSLN was + * logged during normal processing after T-cmt, we miss counting + * AL. Notice that an FSLN will exist if T-cmt occurs before + * ckpt-end, which is the more common case. + * + * (b2) T == TT and TL > ckpt_start + * + * ckpt-start --- TL --- L --- (FSLN-1)? --- T-cmt --- (FSLN)? + * + * TL and L have the same abortLSN and the same commitLSN. + * Furthermore, TL was processed during the current RedoLNs pass. + * We assume that what ever was done about AL during the + * processing of TL was correct and we don't repeat it. + * + * L < TL + * ------- + * + * (a) ckpt_start --- AL --- L --- T-cmt --- (FSLN)? --- TL --- (FSLN)? + * + * AL was processed earlier in this RedoLNs pass. When it was + * processed, it was < TL, so countObsoleteIfUncounted(AL, TL) + * was called. There is no reason to count again. + * + * (b) ckpt_start --- AL --- L --- TL --- (FSLN))? --- T-cmt --- (FSLN)? + * + * AL was processed earlier in this RedoLNs pass. When it was + * processed, it was < TL, so countObsoleteIfUncounted(AL, TL) + * was called. To avoid double counting, we will not call + * countObsoleteIfUncounted(AL, t-cmt). However, in this case + * we will fail counting AL as obsolete if there is an FSLN + * between TL and T-cmt and no FSLN after T-cmt. + * + * (c) AL --- ckpt_start --- L --- TL + * + * In this case we call countObsoleteIfUncounted(AL, t-cmt) + * + * L == TL + * ------- + * + * (a) ckpt_start --- AL --- L/TL --- (FSLN)? --- T-cmt --- (FSLN)? + * + * Same as L < TL, case (b). + * + * (c) AL --- ckpt_start --- L --- TL + * + * Same as L < TL, case (c). + * + * As usual, we should not count abortLSN as obsolete here if it is + * an immediatelly obsolete logrec (i.e. if abortKD == true or + * abortData != null). + */ + + long abortLsn = logrec.getAbortLsn(); + boolean abortKD = logrec.getAbortKnownDeleted(); + + if (commitLsn != DbLsn.NULL_LSN && + abortLsn != DbLsn.NULL_LSN && + !abortKD && + logrec.getAbortData() == null) { + + if (treeLsn == DbLsn.NULL_LSN || + (DbLsn.compareTo(logrecLsn, treeLsn) <= 0 && + DbLsn.compareTo(abortLsn, info.checkpointStartLsn) < 0)) { + tracker.countObsoleteIfUncounted( + abortLsn, commitLsn, null, 0, db.getId(), + true/*trackOffset*/); + } + } + } + + /** + * Update utilization info during recovery undo (not abort undo). + * + * Let R and T be the record and txn associated with the current logrec L. + * L is for sure obsolete. It may or may have been counted as such already. + * Consider the following cases: + * + * 1. L is an immediately obsolete logrec. Then, L was counted as obsolete + * during normal processing and it should be counted here only if its + * obsoleteness was not made durable before the crash, i.e., if it is + * after the latest FSLN for the containing log file. No need to record + * L's LSN in the tracker, because L is immediately obsolete. + * + * 2. L is not an immediately obsolete logrec. + * + * 2.1. L is the last logrec for R by T. In this case, L was not counted + * as obsolete during normal processing. L is made obsolete here by the + * fact that it is undone. + * + * 2.2 L is not the last logrec for R by T. In this case, L was counted + * as obsolete during normal processing it should be counted here only + * if its obsoleteness was not made durable before the crash. + * + * Unfortunately, we cannot differentiate between cases 2.1 and 2.2, + * so the code below calls tracker.countObsoleteUnconditional() for + * both of those cases, which can result to some double counting in + * case 2.2. However, it is not very common for a txn to update the + * same record multiple times, so this should not be a big issue. + */ + private void undoUtilizationInfo( + LNLogEntry logrec, + DatabaseImpl db, + long logrecLsn, + int logrecSize) { + + if (logrec.isImmediatelyObsolete(db)) { + tracker.countObsoleteIfUncounted( + logrecLsn, logrecLsn, null/*logEntryType*/, + logrecSize, db.getId(), false /*trackOffset*/); + } else { + tracker.countObsoleteUnconditional( + logrecLsn, null/*logEntryType*/, + logrecSize, db.getId(), true /*trackOffset*/); + } + } + + /** + * Fetches the LN to get its size only if necessary and so configured. + */ + private static int fetchLNSize(DatabaseImpl db, int size, long lsn) + throws DatabaseException { + + if (size != 0) { + return size; + } + final EnvironmentImpl envImpl = db.getEnv(); + if (!envImpl.getCleaner().getFetchObsoleteSize(db)) { + return 0; + } + try { + final LogEntryHeader header = + envImpl.getLogManager().getWholeLogEntry(lsn).getHeader(); + return header.getEntrySize(); + } catch (FileNotFoundException e) { + /* Ignore errors if the file was cleaned. */ + } + return 0; + } + + /** + * Build the in memory inList with INs that have been made resident by the + * recovery process. + */ + private void buildINList() + throws DatabaseException { + + envImpl.getInMemoryINs().enable(); // enable INList + envImpl.getEvictor().setEnabled(true); + envImpl.getDbTree().rebuildINListMapDb(); // scan map db + + /* For all the dbs that we read in recovery, scan for resident INs. */ + for (DatabaseId dbId : inListBuildDbIds) { + /* We already did the map tree, don't do it again. */ + if (!dbId.equals(DbTree.ID_DB_ID)) { + DatabaseImpl db = envImpl.getDbTree().getDb(dbId); + try { + if (db != null) { + /* Temp DBs will be removed, skip build. */ + if (!db.isTemporary()) { + db.getTree().rebuildINList(); + } + } + } finally { + envImpl.getDbTree().releaseDb(db); + } + } + } + } + + /** + * Remove all temporary databases that were encountered as MapLNs during + * recovery undo/redo. A temp DB needs to be removed when it is not closed + * (closing a temp DB removes it) prior to a crash. We ensure that the + * MapLN for every open temp DBs is logged each checkpoint interval. + */ + private void removeTempDbs() + throws DatabaseException { + + startupTracker.start(Phase.REMOVE_TEMP_DBS); + startupTracker.setProgress(RecoveryProgress.REMOVE_TEMP_DBS); + Counter counter = startupTracker.getCounter(Phase.REMOVE_TEMP_DBS); + + DbTree dbMapTree = envImpl.getDbTree(); + BasicLocker locker = + BasicLocker.createBasicLocker(envImpl, false /*noWait*/); + boolean operationOk = false; + try { + for (DatabaseId tempDbId : tempDbIds) { + counter.incNumRead(); + DatabaseImpl db = dbMapTree.getDb(tempDbId); + dbMapTree.releaseDb(db); // Decrement use count. + if (db != null) { + assert db.isTemporary(); + if (!db.isDeleted()) { + try { + counter.incNumProcessed(); + envImpl.getDbTree().dbRemove(locker, + db.getName(), + db.getId()); + } catch (DbTree.NeedRepLockerException e) { + /* Should never happen; db is never replicated. */ + throw EnvironmentFailureException. + unexpectedException(envImpl, e); + } catch (DatabaseNotFoundException e) { + throw EnvironmentFailureException. + unexpectedException(e); + } + } else { + counter.incNumDeleted(); + } + } + } + operationOk = true; + } catch (Error E) { + envImpl.invalidate(E); + throw E; + } finally { + locker.operationEnd(operationOk); + startupTracker.stop(Phase.REMOVE_TEMP_DBS); + + } + } + + /** + * For committed truncate/remove NameLNs with no corresponding deleted + * MapLN found, delete the MapLNs now. MapLNs are deleted by + * truncate/remove operations after logging the Commit, so it is possible + * that a crash occurs after logging the Commit of the NameLN, and before + * deleting the MapLN. [#20816] + */ + private void deleteMapLNs() { + for (final DatabaseId id : expectDeletedMapLNs) { + final DatabaseImpl dbImpl = envImpl.getDbTree().getDb(id); + if (dbImpl == null) { + continue; + } + + /* + * Delete the MapLN, count the DB obsolete, set the deleted + * state, and call releaseDb. + */ + dbImpl.finishDeleteProcessing(); + } + } + + /* + * Throws an EnvironmentFailureException if there is any Node entry that + * meets these qualifications: + * 1. It is in the recovery interval. + * 2. it belongs to a duplicate DB. + * 3. Its log version is less than 8. + */ + private void checkLogVersion8UpgradeViolations() + throws EnvironmentFailureException { + + /* + * Previously during the initial INFileReader pass (for ID tracking) we + * collected a set of database IDs for every Node log entry in the + * recovery interval that has a log version less than 8. Now that the + * DbTree is complete we can check to see if any of these are in a + * duplicates DB. + */ + boolean v8DupNodes = false; + for (DatabaseId dbId : logVersion8UpgradeDbs) { + final DbTree dbTree = envImpl.getDbTree(); + final DatabaseImpl db = dbTree.getDb(dbId); + try { + + /* + * If DB is null (deleted in the recovery interval), no + * conversion is needed because all entries for the DB will be + * discarded. [#22203] + */ + if (db == null) { + continue; + } + + if (db.getSortedDuplicates()) { + v8DupNodes = true; + break; + } + } finally { + dbTree.releaseDb(db); + } + } + + boolean v8Deltas = logVersion8UpgradeDeltas.get(); + + if (v8DupNodes || v8Deltas) { + final String illegalEntries = v8DupNodes ? + "JE 4.1 duplicate DB entries" : + "JE 4.1 BINDeltas"; + throw EnvironmentFailureException.unexpectedState + (illegalEntries + " were found in the recovery interval. " + + "Before upgrading to JE 5.0, the following utility " + + "must be run using JE 4.1 (4.1.20 or later): " + + (envImpl.isReplicated() ? + "DbRepPreUpgrade_4_1 " : "DbPreUpgrade_4_1 ") + + ". See the change log."); + } + } + + /** + * Dump a tracking list into a string. + */ + private String printTrackList(List trackingList) { + if (trackingList != null) { + StringBuilder sb = new StringBuilder(); + Iterator iter = trackingList.iterator(); + sb.append("Trace list:"); + sb.append('\n'); + while (iter.hasNext()) { + sb.append(iter.next()); + sb.append('\n'); + } + return sb.toString(); + } + return null; + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. This is used to + * construct verbose trace messages for individual log entry processing. + */ + private static void trace(Logger logger, + DatabaseImpl database, + String debugType, + boolean success, + Node node, + long logLsn, + IN parent, + boolean found, + boolean replaced, + boolean inserted, + long replacedLsn, + long abortLsn, + int index) { + trace(logger, Level.FINE, database, debugType, success, node, logLsn, + parent, found, replaced, inserted, replacedLsn, abortLsn, index); + } + + private static void trace(Logger logger, + Level level, + DatabaseImpl database, + String debugType, + boolean success, + Node node, + long logLsn, + IN parent, + boolean found, + boolean replaced, + boolean inserted, + long replacedLsn, + long abortLsn, + int index) { + Level useLevel = level; + if (!success) { + useLevel = Level.SEVERE; + } + if (logger.isLoggable(useLevel)) { + StringBuilder sb = new StringBuilder(); + sb.append(debugType); + sb.append(" success=").append(success); + if (node instanceof IN) { + sb.append(" node="); + sb.append(((IN) node).getNodeId()); + } + sb.append(" lsn="); + sb.append(DbLsn.getNoFormatString(logLsn)); + if (parent != null) { + sb.append(" parent=").append(parent.getNodeId()); + } + sb.append(" found="); + sb.append(found); + sb.append(" replaced="); + sb.append(replaced); + sb.append(" inserted="); + sb.append(inserted); + if (replacedLsn != DbLsn.NULL_LSN) { + sb.append(" replacedLsn="); + sb.append(DbLsn.getNoFormatString(replacedLsn)); + } + if (abortLsn != DbLsn.NULL_LSN) { + sb.append(" abortLsn="); + sb.append(DbLsn.getNoFormatString(abortLsn)); + } + sb.append(" index=").append(index); + if (useLevel.equals(Level.SEVERE)) { + LoggerUtils.traceAndLog( + logger, database.getEnv(), useLevel, sb.toString()); + } else { + LoggerUtils.logMsg( + logger, database.getEnv(), useLevel, sb.toString()); + } + } + } + + private void traceAndThrowException(long badLsn, + String method, + Exception originalException) + throws DatabaseException { + + String badLsnString = DbLsn.getNoFormatString(badLsn); + LoggerUtils.traceAndLogException(envImpl, "RecoveryManager", method, + "last LSN = " + badLsnString, + originalException); + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "last LSN=" + badLsnString, originalException); + } +} diff --git a/src/com/sleepycat/je/recovery/RollbackTracker.java b/src/com/sleepycat/je/recovery/RollbackTracker.java new file mode 100644 index 0000000..1e2dec8 --- /dev/null +++ b/src/com/sleepycat/je/recovery/RollbackTracker.java @@ -0,0 +1,1063 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Level; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.cleaner.RecoveryUtilizationTracker; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LNFileReader; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.txn.TxnChain; +import com.sleepycat.je.txn.TxnManager; +import com.sleepycat.je.txn.UndoReader; +import com.sleepycat.je.txn.TxnChain.RevertInfo; +import com.sleepycat.je.utilint.DbLsn; + +/** + * RollbackTracker is used to detect rollback periods in the log that are the + * result of HA replica syncups. These rollback periods affect how LNs should + * be processed at recovery. Rollbacks differ from aborts in that a rollback + * returns a LN to its previous version, whether intra or inter-txnal, while an + * abort always returns an LN to its pre-txn version. + * + * What is a Rollback Period? + * -------------------------- + * The rollback represents the logical truncation of the log. Any transactional + * LNs in that rollback period should be undone, even if they are ultimately + * part of a committed transaction. See the wiki page on Syncup Recovery for + * the full design. See com.sleepycat.je.rep.impl.node.Replay.rollback for the + * steps taken at the time of the rollback. + * + * A RollbackStart record is logged at the start of any rollback, and a + * RollbackEnd is logged at the completion of a rollback. RollbackStarts refer + * to a matchpoint and the area between the matchpoint and the RollbackStart is + * the rollback period.The RollbackTracker peruses RollbackStarts and Ends and + * generates a map of the rollback periods. + * + * RollbackStarts and their starting Matchpoints can be nested or can be + * distinct, but several invariants are in place and can be enforced. For + * example: + * + * LSN + * --- + * 100 txnA commit + * + * 200 txnB abort + * 250 LN for txnC + * 300 txnC abort + * .. + * 400 RollbackStart A (starting matchpoint = 200) + * 500 RollbackEnd A + * ... + * 600 RollbackStart B (starting matchpoint = 200) + * 700 RollbackStart C (starting matchpoint = 100) + * 800 RollbackEnd C + * 900 txnD abort + * 1000 RollbackStart D (starting matchpoint = 900) + * + * This log creates four rollback periods + * 1) LSN 100 -> 700 (defined by RollbackStart C). This has two rollback + * periods nested within. + * 2) LSN 200 -> 400, (defined by RollbackStart A) nested within B + * 3) LSN 200 -> 600, (defined by RollbackStart B) nested within C + * 4) LSN 1000 -> 900 (defined by RolbackStart D) + + * - There can be no commits or aborts within a rollback period, because we + * shouldn't have executed a soft recovery that undid a commit or abort. in + * the rollback period. + * + * - There can be no LN_TXs between a RollbackStart and its matching + * RollbackEnd (should be no LN write operations happening during the syncup.) + * However, there might be INs written by a checkpoint, and eviction. + * + * - The recovery period should never see a RollbackEnd without its matching + * RollbackStart record, though it is possible to see a RollbackStart that has + * no RollbackEnd. + * + * - There can never be any overlapping, or intersection of periods, because a + * rollback period is supposed to be like a truncation of the log. Since that + * log is "gone", a subsequent rollback shouldn't find a matchpoint inside + * another rollback period. + * + * - A child period must be wholly contained between the parent's matchpoint + * and RollbackStart. This is simply due to the way rollbacks occur. A parent + * rollback has a Matchpoint <= the child's Matchpoint or it wouldn't be + * nested. The parent's RollbackStart > the child's RollbackEnd, since the + * parent occurs after the child in time. + * + * The Rollback tracker keeps a list of all the rollback periods. Some are + * distinct, some are nested. + * + * Recovery processing and rollback periods + * ---------------------------------------- + * The actions taken at a rollback may not have been made persistent to the + * log, so at recovery, we literally mimic and replay these two steps: (a) make + * sure invisible log entries have their invisible bit on and (b) make sure all + * INs reflect the correct LNs. All use of the rollback periods and tracker + * take place on the backwards scans. The RollbackStart and End entries are + * read during the first recovery undo pass When a rollback period is found, a + * transaction chain is constructed for each transaction that was active in the + * period, to support a repeat of the actions taken originally. + * + * The first undo pass, for the mapping tree, has to construct a map of + * recovery periods. Since the mapping tree only has MapLNs, and we never write + * any txnal MapLNs, that first pass does not encounter any txnal LNs. The + * next two undo passes consult the rollback period map to determine if an LN + * needs to be rolledback, or just treated like other LNs. + * + * Rollback periods that precede the checkpoint start can be ignored, because + * we can be assured that all the INs and LNs modified by that rollback were + * made persistent by the checkpoint. Ignoring such periods is required, and + * is not just an optimization, because it guarantees that we will not need to + * create a transaction chain that needs to traverse the log beyond the first + * active lsn. A rollback period precedes the checkpoint if its RollbackEnd is + * before the checkpoint start. + * + * When a rollback period overlaps CkptStart and we recover, we are guaranteed + * that the undo passes will process all LNs in the rollback period, because + * they are >= to the firstActiveLEnd of the checkpoint. + * + * The lastActiveLSN for the checkpoint will be <= the LSN of the first LN of + * any transaction that is being rolled back at the time of CkptStart, since + * these transactions were still active at that time. + * + * No file containing a transaction rolled back in the recovery interval, or a + * file containing the abortLSN of such a transaction, will be deleted by the + * cleaner. An active transaction prevents cleaning of its first logged entry + * and beyond. The LN of the abortLSN will be locked, which prevents it from + * being cleaned. + * + * All the work lies on the undo side. Recovery redo only needs to ignore + * invisible log entries, because we know that the undo pass applied the + * invisible bit where needed. Note that the undo pass must be sure to write + * the invisible bits after the pass, before redo attempts to read the log. + * + * Each rollback LN_TX belongs to a single rollback period. When periods are + * nested, the LN_TX belongs to the closest rollback period that encompasses + * it. + * Using the example above, + * a LN at lsn 350 belongs to rollback period A + * a LN at lsn 550 belongs to rollback period B + * a LN at lsn 650 belongs to rollback period C + * It uses its rollback period's txn chain to find its previous version. + */ +public class RollbackTracker { + + private final EnvironmentImpl envImpl; + private long checkpointStart; + + /* for assertions. */ + private boolean firstUndoPass; + + /* + * List of lsns that were made invisible and need to be fsynced, from + * this recovery. + * + * singlePassLsns are collected for a single recovery pass. After that + * pass, the lsns must be written to the log, so that the next redo + * recovery pass properly skips over invisible lsns, but it need not + * do a fsync. After each pass, the file numbers involved are added to + * recoveryFilesToSync. + * + * After recovery is finished, all file that have re-flipped invisible bits + * are fsynced. Hopefully, the OS may have fsynced some, and waiting until + * the end to fsync will be an optimization. + */ + private final Set recoveryFilesToSync; + private List singlePassInvisibleLsns; + + /* + * Used only for the first construction pass. This is the rollback + * period that we have just found. + */ + private RollbackPeriod underConstructionPeriod; + + /* Top level list of rollback periods */ + private final List periodList; + + RollbackTracker(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + periodList = new ArrayList(); + checkpointStart = DbLsn.NULL_LSN; + recoveryFilesToSync = new HashSet(); + singlePassInvisibleLsns = new ArrayList(); + } + + /** + * Construction Pass: A RollbackEnd is seen, make new period. + */ + void register(RollbackEnd rollbackEnd, long rollbackEndLSN) { + assertFirstPass(rollbackEndLSN); + + if ((underConstructionPeriod != null) && + (underConstructionPeriod.makeNestedPeriod(rollbackEnd, + rollbackEndLSN))) { + return; + } + + underConstructionPeriod = new RollbackPeriod(this, + rollbackEnd, + rollbackEndLSN); + periodList.add(underConstructionPeriod); + } + + /** + * Construction Pass: A RollbackStart is seen. Might be the matching + * one for the current period, or it might be a new period. + */ + void register(RollbackStart rollbackStart, long rollbackStartLSN) { + assertFirstPass(rollbackStartLSN); + + /* There's no rollback period going on, start a new one. */ + if ((underConstructionPeriod != null) && + (underConstructionPeriod.makeNestedPeriod(rollbackStart, + rollbackStartLSN))) { + return; + } + + underConstructionPeriod = new RollbackPeriod(this, + rollbackStart, + rollbackStartLSN); + periodList.add(underConstructionPeriod); + } + + /** + * A TxnCommit showed up on the construction pass. If it's a replicated + * txn, check if it's in a valid place. It should not be within the + * rollback period. + * + * Omit commits for internal, non-replicated transactions from this check. + */ + void checkCommit(long commitLSN, long txnId) { + assertFirstPass(commitLSN); + + if (!TxnManager.isReplicatedTxn(txnId)) { + return; + } + + if (underConstructionPeriod == null) { + return; + } + + if (underConstructionPeriod.contains(commitLSN)) { + underConstructionPeriod.fail("Commit at " + + DbLsn.getNoFormatString(commitLSN) + + " is within rollback period."); + } + } + + /* + * Set the checkpoint start before we begin marking rollback periods, so we + * know that we can ignore periods that are before the checkpoint start. + */ + void setCheckpointStart(long lsn) { + checkpointStart = lsn; + } + + long getCheckpointStart() { + return checkpointStart; + } + + EnvironmentImpl getEnvImpl() { + return envImpl; + } + + /* For unit tests */ + List getPeriodList() { + return periodList; + } + + void setFirstPass(boolean firstUndoPass) { + this.firstUndoPass = firstUndoPass; + } + + /** + * A Scanner is a cursor over the tracker's rollback periods. + */ + Scanner getScanner() { + if (firstUndoPass) { + + /* + * The RollbackTracker is being built, and we need a special + * scanner that can use the rollback period map while it is in an + * incomplete state. This is only needed for JE log versions that + * use MapLN_TXNAL, which are 2.0 and earlier. + */ + return new UnderConstructionScanner(); + } + + return new BackwardScanner(); + } + + /** + * Flip the invisible bit for each lsn in rollbackLsns. Collect the + * corresponding unique set of file numbers and add them to fileNums. + */ + private static void setInvisible(EnvironmentImpl envImpl, + List rollbackLsns, + Set filesToFsync) { + if (rollbackLsns.size() == 0) { + return; + } + + /* + * Sort so that the entries are made invisible in disk order for better + * efficiency. + */ + FileManager fileManager = envImpl.getFileManager(); + + Collections.sort(rollbackLsns); + List perFileLsns = new ArrayList(); + long currentFileNum = -1; + + for (Long lsn : rollbackLsns) { + + /* See if we have moved to a new file. */ + if (DbLsn.getFileNumber(lsn) != currentFileNum) { + /* + * We've moved on to a new file. Make the previous set of + * lsns invisible. + */ + fileManager.makeInvisible(currentFileNum, perFileLsns); + + currentFileNum = DbLsn.getFileNumber(lsn); + filesToFsync.add(currentFileNum); + + /* make a new set to house the lsns for the next file. */ + perFileLsns = new ArrayList(); + } + perFileLsns.add(lsn); + } + + /* Take care of the last set. */ + fileManager.makeInvisible(currentFileNum, perFileLsns); + } + + /* + * Flip the invisible bit for the rollback set of lsns, in lsn order. + * Fsync the set of files represented in this collection of lsns. Used by + * syncup rollback. + */ + public static void makeInvisible(EnvironmentImpl targetEnvImpl, + List rollbackLsns) { + + Set fsyncFiles = new HashSet(); + setInvisible(targetEnvImpl, rollbackLsns, fsyncFiles); + targetEnvImpl.getFileManager().force(fsyncFiles); + } + + /** + * At the end of a recovery pass, write out all invisible bits, save + * a set of file numbers to fsync, and reinitialize the per-pass list + * for the next round. + */ + void singlePassSetInvisible() { + if (envImpl.isReadOnly()) { + return; + } + + setInvisible(envImpl, + singlePassInvisibleLsns, + recoveryFilesToSync); + singlePassInvisibleLsns = new ArrayList(); + } + + void recoveryEndFsyncInvisible() { + if (envImpl.isReadOnly()) { + return; + } + + envImpl.getFileManager().force(recoveryFilesToSync); + } + + /** + * Count an LN obsolete that is being made invisble by rollback. + * + * Use inexact counting. Since invisible entries are not processed by the + * cleaner, recording the obsolete offset would be a waste of resources. + * Since we don't count offsets, we don't need to worry about duplicate + * offsets. + * + * Some entries may be double counted if they were previously counted + * obsolete, for example, when multiple versions of an LN were logged. + * This is tolerated for an exceptional situation like rollback. + */ + private void countObsolete(long undoLsn, + UndoReader undo, + RecoveryUtilizationTracker tracker) { + tracker.countObsoleteUnconditional + (undoLsn, + null /*type*/, + undo.logEntrySize, + undo.db.getId(), + false /*countExact*/); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (RollbackPeriod period : periodList) { + sb.append(period).append("\n"); + } + return sb.toString(); + } + + private void assertFirstPass(long logLSN) { + if (!firstUndoPass) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.UNEXPECTED_STATE, + "Saw entry at " + DbLsn.getNoFormatString(logLSN) + + "Should only be building the tracker on the first pass"); + } + } + + /** + * A Scanner is to process LNs during a recovery pass. It determines + * whether this log entry is within the rollback period, and should be + * accordingly undone or ignored. It serves as a sort of cursor or iterator + * that works with the rollback tracker. + */ + abstract class Scanner { + + /* + * The target period is the one which houses the LNs that will be + * rolled back. + */ + RollbackPeriod target; + + /** + * Return true if this transactional log entry is something that should + * be rolled back in this rollback period. The Scanner's position can + * be changed by this call. Update the target field if necessary. + */ + abstract boolean positionAndCheck(long lsn, long txnId); + + /** + * Rollback the filereader's current LN_TX. This assumes that the the + * caller has ascertained that the LN is contained within this rollback + * period. + */ + public void rollback(Long txnId, + LNFileReader reader, + RecoveryUtilizationTracker tracker) { + + /* + * If this period is before the checkpoint start, we need not + * repeat the partial rollback. + */ + if (target.beforeCheckpointStart()) { + return; + } + + long undoLsn = reader.getLastLsn(); + + DbTree dbTree = envImpl.getDbTree(); + + UndoReader undo = UndoReader.createForRecovery(reader, dbTree); + + if (undo == null) { + /* Database of LN has been deleted. [#22052] */ + return; + } + + /* Get the TxnChain for this log entry. */ + TxnChain chain = target.getChain(txnId, undoLsn, envImpl); + + try { + RevertInfo revertTo = chain.pop(); + + /* + * When we undo this log entry, we've logically truncated it + * from the log. Remove it from the btree and mark it obsolete. + */ + RecoveryManager.rollbackUndo( + envImpl.getLogger(), Level.FINER, new TreeLocation(), + undo.db, undo.logEntry, undoLsn, revertTo); + + if (!target.hasRollbackEnd()) { + + /* + * We're not positive that the fsync of the invisible log + * entries happened. Make it invisible again. + */ + if (!reader.isInvisible()) { + singlePassInvisibleLsns.add(undoLsn); + } + } + } finally { + dbTree.releaseDb(undo.db); + } + + countObsolete(undoLsn, undo, tracker); + } + + /* For unit tests */ + boolean needsRollback() { + if (target == null) { + return false; + } + + return !target.beforeCheckpointStart(); + } + } + + class UnderConstructionScanner extends Scanner { + + @Override + public boolean positionAndCheck(long lsn, long txnId) { + + if (underConstructionPeriod == null) { + return false; + } + + assert underConstructionPeriod.notInRollbackStartAndEnd + (lsn, txnId) : + underConstructionPeriod.bracketFailure(lsn); + + target = underConstructionPeriod.getScannerTarget(lsn); + if ((target != null) && (target.containsLN(lsn, txnId))) { + return true; + } + + return false; + } + } + + /** + * In a backward scanner, the currentPeriod field is always pointing to the + * period that contains this lsn. If the lsn is not in a period, the + * currentPeriod is the period that is just before this lsn. If there is no + * period before this lsn, the currentPeriod field is null. + */ + class BackwardScanner extends Scanner { + private final Iterator iter; + + /* + * The current period is the period where the scanner is currently + * posed. It is one of the top level periods in the scanner. When + * rollback periods are nested, currentPeriod may not equal target. + */ + private RollbackPeriod currentPeriod; + + BackwardScanner() { + this.iter = periodList.iterator(); + if (iter.hasNext()) { + currentPeriod = iter.next(); + currentPeriod.initChildIter(); + } else { + currentPeriod = null; + } + } + + @Override + public boolean positionAndCheck(long lsn, long txnId) { + if (currentPeriod == null) { + return false; + } + + if (currentPeriod.follows(lsn)) { + + /* + * We've passed out of the currentPeriod. Look for a new one + * that might cover this lsn. + */ + if (iter.hasNext()) { + currentPeriod = iter.next(); + currentPeriod.initChildIter(); + } else { + currentPeriod = null; + return false; + } + } + + assert currentPeriod.notInRollbackStartAndEnd(lsn, txnId) : + currentPeriod.bracketFailure(lsn); + + if (currentPeriod.contains(lsn)) { + + /* + * Make the stack of periods point to the one that contains + * this lsn, or which precedes this lsn. + */ + currentPeriod.positionChildren(lsn); + + /* + * See if any period contains this lsn. There might not be a + * target if the lsn was aborted or committed already at the + * time of rollback. + */ + target = currentPeriod.findTarget(lsn, txnId); + return (target != null); + } + + return false; + } + } + + /** + * A RollbackPeriod describes a section of the log that is logically + * truncated. + */ + static class RollbackPeriod { + private final RollbackTracker tracker; + private final long matchpointLSN; // start of period + private final long rollbackStartLSN; // end of period + private final long rollbackEndLSN; // for debugging and sanity checks + + /* + * lsn of the checkpoint start, to determine if this rollback period + * needs to be used. + */ + private final boolean beforeCheckpointStart; + + /* + * The transactions that were rolled back for this rollback period, + * which were logged in the RollbackStart entry. + */ + private Set activeTxnIds; + + /* + * The txn chain constructed to support rollback to an earlier version. + */ + private final Map txnChainMap; + + /* Nested rollbacks. */ + private final List children; + private RollbackPeriod currentChild = null; + private Iterator childIter; + + RollbackPeriod(RollbackTracker tracker, + RollbackEnd rollbackEnd, + long rollbackEndLSN) { + this(tracker, + rollbackEnd.getMatchpoint(), + rollbackEnd.getRollbackStart(), + rollbackEndLSN, + tracker.getCheckpointStart(), + null); // activeTxnIds + } + + RollbackPeriod(RollbackTracker tracker, + RollbackStart rollbackStart, + long rollbackStartLSN) { + this(tracker, + rollbackStart.getMatchpoint(), + rollbackStartLSN, + DbLsn.NULL_LSN, // rollbackendLSN; + tracker.getCheckpointStart(), + rollbackStart.getActiveTxnIds()); + } + + /* For unit testing only. */ + RollbackPeriod(long matchpointLSN, + long rollbackStartLSN, + long rollbackEndLSN, + long checkpointStart) { + this(null, matchpointLSN, rollbackStartLSN, rollbackEndLSN, + checkpointStart, null /*activeTxnIds*/); + } + + + private RollbackPeriod(RollbackTracker tracker, + long matchpointLSN, + long rollbackStartLSN, + long rollbackEndLSN, + long checkpointStart, + Set activeTxnIds) { + this.tracker = tracker; + this.matchpointLSN = matchpointLSN; + this.rollbackStartLSN = rollbackStartLSN; + this.rollbackEndLSN = rollbackEndLSN; + this.beforeCheckpointStart = calcBeforeCheckpoint(checkpointStart); + txnChainMap = new HashMap(); + children = new ArrayList(); + this.activeTxnIds = activeTxnIds; + } + + private boolean calcBeforeCheckpoint(long checkpointStart) { + return ((checkpointStart != DbLsn.NULL_LSN) && + (rollbackEndLSN != DbLsn.NULL_LSN) && + (DbLsn.compareTo(rollbackEndLSN, checkpointStart) < 0)); + } + + /** + * A new RollbackEnd has been seen. + * + * @return true if the RollbackEnd belongs to a period nested within + * the current period. Return false if the RollbackEnd belongs to new, + * distinct, different period, and the current period is closed. + */ + boolean makeNestedPeriod(RollbackEnd foundRBEnd, long foundLSN) { + RollbackPeriod target = getNewPeriodTarget(foundRBEnd, foundLSN); + if (target != null) { + target.makeChild(foundRBEnd, foundLSN); + return true; + } + return false; + } + + /** + * A new RollbackStart has been seen. + * + * @return true if the RollbackStart belongs to a period nested within + * the current period, or if it is the current period. Return false if + * the RollbackStart belongs to new, distinct, different period, and + * this current period is closed. + */ + boolean makeNestedPeriod(RollbackStart foundRBStart, long foundLSN) { + RollbackPeriod target = getNewPeriodTarget(foundRBStart, foundLSN); + if (target != null) { + if (target.isMatchingRollbackStart(foundLSN)) { + assert target.activeTxnIds == null; + target.activeTxnIds = foundRBStart.getActiveTxnIds(); + } else { + target.makeChild(foundRBStart, foundLSN); + } + + /* + * Retrun true to let the caller know that it doesn't have to + * make a new Rollback period. Either the RBStart did not + * initiate a new period, or we made a nested child. + */ + return true; + } + /* This period is closed. */ + return false; + } + + private boolean contained(RollbackEnd foundRBEnd, long foundLSN) { + + /* + * This RollbackEnd must either + * 1 - precede this period, in which case this period is closed, or + * 2 - be wholly contained within this period. + */ + + /* case 1 */ + if (DbLsn.compareTo(foundLSN, matchpointLSN) < 0) { + /* The found rollback end precedes this period. */ + return false; + } + + if (DbLsn.compareTo(foundLSN, rollbackStartLSN) >= 0) { + fail("Should not be two RollbackEnds in a row. " + + "New RollbackEnd at " + + DbLsn.getNoFormatString(foundLSN) + + " " + foundRBEnd); + } + + /* + * Check for compliance to the rule that this RollbackEnd does not + * intersect this rollback period. + */ + if (!((DbLsn.compareTo(foundRBEnd.getMatchpoint(), + matchpointLSN) >= 0) && + (DbLsn.compareTo(foundRBEnd.getRollbackStart(), + rollbackStartLSN) < 0))) { + fail("RollbackEnd intersects current rollback period " + + foundRBEnd + " at " + DbLsn.getNoFormatString(foundLSN)); + } + + /* case 2 */ + return true; + } + + /** + * @return true if the current rollback period is still open + */ + private boolean contained(RollbackStart foundRBStart, long foundLSN) { + + /* + * This RollbackStart must: + * 1 - precede the current period, indicating the end of this + * period. + * 2 - is the rolblack start that belongs to this period. + * 3 - be wholly contained within this period. + */ + + /* case 1 */ + if (DbLsn.compareTo(foundLSN, matchpointLSN) < 0) { + /* The found rollback start precedes this period. */ + return false; + } + + if (isMatchingRollbackStart(foundLSN)) { + return true; + } + + /* Check for compliance with case 3. */ + if (!((DbLsn.compareTo(foundRBStart.getMatchpoint(), + matchpointLSN) >= 0) && + (DbLsn.compareTo(foundLSN, rollbackStartLSN) < 0))) { + fail("RollbackStart intersects current rollback period " + + foundRBStart + " at " + + DbLsn.getNoFormatString(foundLSN)); + } + + /* case 3. */ + return true; + } + + /** + * @return true if this RollbackStart entry is the one that is the + * RollbackStart for this open period. + */ + private boolean isMatchingRollbackStart(long foundLSN) { + return (DbLsn.compareTo(foundLSN, rollbackStartLSN) == 0); + } + + private void makeChild(RollbackEnd foundRBEnd, long foundLSN) { + currentChild = new RollbackPeriod(tracker, + foundRBEnd, + foundLSN); + children.add(currentChild); + } + + private void makeChild(RollbackStart foundRBStart, long foundLSN) { + currentChild = new RollbackPeriod(tracker, + foundRBStart, + foundLSN); + children.add(currentChild); + } + + /** + * Return the period that should own this foundRBEnd. That may be + * either a nested period, or this period. + */ + RollbackPeriod getNewPeriodTarget(RollbackEnd foundRBEnd, + long foundLSN) { + if (currentChild != null) { + final RollbackPeriod target = + currentChild.getNewPeriodTarget(foundRBEnd, foundLSN); + if (target != null) { + return target; + } + } + + if (contained(foundRBEnd, foundLSN)) { + return this; + } + return null; + } + + /** + * Return the period that should own this foundRBStart. That may be + * either a nested period, or this period. + */ + RollbackPeriod getNewPeriodTarget(RollbackStart foundRBStart, + long foundLSN) { + if (currentChild != null) { + final RollbackPeriod target = + currentChild.getNewPeriodTarget(foundRBStart, foundLSN); + if (target != null) { + return target; + } + } + + if (contained(foundRBStart, foundLSN)) { + return this; + } + return null; + } + + RollbackPeriod getScannerTarget(long lsn) { + if (currentChild != null) { + RollbackPeriod target = currentChild.getScannerTarget(lsn); + if (target != null) { + return target; + } + } + + if (DbLsn.compareTo(lsn, matchpointLSN) > 0) { + return this; + } + return null; + } + + void initChildIter() { + childIter = children.iterator(); + if (childIter.hasNext()) { + currentChild = childIter.next(); + currentChild.initChildIter(); + } else { + currentChild = null; + } + } + + void fail(String errorMessage) { + throw new EnvironmentFailureException + (tracker.getEnvImpl(), + EnvironmentFailureReason.LOG_INTEGRITY, + errorMessage + "\ntracker contents=" + tracker); + } + + /** + * This log entry belongs to this rollback period if it lies between + * the matchpoint and the RollbackStart. We don't use RollbackEnd, + * because there may not be a RollbackEnd. Also, by definition, + * anything whose rollback fate is define by this period must have + * happened before the RollbackStart. + */ + boolean contains(long lsn) { + return (DbLsn.compareTo(matchpointLSN, lsn) < 0) && + (DbLsn.compareTo(rollbackStartLSN, lsn) > 0); + } + + boolean containsLN(long lsn, long txnId) { + return contains(lsn) && activeTxnIds.contains(txnId); + } + + void positionChildren(long lsn) { + if (currentChild == null) + return; + + if (currentChild.follows(lsn)) { + if (childIter.hasNext()) { + currentChild = childIter.next(); + currentChild.initChildIter(); + } else { + currentChild = null; + return; + } + } + + currentChild.positionChildren(lsn); + } + + RollbackPeriod findTarget(long lsn, long txnId) { + + if (currentChild != null) { + final RollbackPeriod candidate = + currentChild.findTarget(lsn, txnId); + if (candidate != null) { + return candidate; + } + } + + if (containsLN(lsn, txnId)) { + return this; + } + return null; + } + + /** + * There should not be any txnal LNs between a rollback start and + * rollback end log entry. + */ + boolean notInRollbackStartAndEnd(long lsn, long txnId) { + if (!TxnManager.isReplicatedTxn(txnId)) { + /* Don't bother checking a non-replicated txn. */ + return true; + } + + if (rollbackEndLSN == DbLsn.NULL_LSN) + return true; + + return (!((DbLsn.compareTo(rollbackStartLSN, lsn) < 0) && + (DbLsn.compareTo(rollbackEndLSN, lsn) > 0))); + } + + String bracketFailure(long lsn) { + return lsn + " [" + DbLsn.getNoFormatString(lsn) + + "] should not be within rollbackStart " + rollbackStartLSN + + " [" + DbLsn.getNoFormatString(rollbackStartLSN) + + "] and rollbackEnd " + rollbackEndLSN + " [" + + DbLsn.getNoFormatString(rollbackEndLSN) + "]"; + } + + /** + * @return true if this rollback period is after, and does not contain + * the lsn. + */ + boolean follows(long lsn) { + return DbLsn.compareTo(matchpointLSN, lsn) > 0; + } + + /** + * @return true if this rollback period is before, and does not contain + * the lsn. + */ + boolean precedes(long lsn) { + return DbLsn.compareTo(rollbackStartLSN, lsn) < 0; + } + + TxnChain getChain(long txnId, long undoLsn, EnvironmentImpl envImpl) { + TxnChain chain = txnChainMap.get(txnId); + if (chain == null) { + chain = new TxnChain(undoLsn, + txnId, + matchpointLSN, + envImpl); + txnChainMap.put(txnId, chain); + } + return chain; + } + + boolean hasRollbackEnd() { + return rollbackEndLSN != DbLsn.NULL_LSN; + } + + @Override + public String toString() { + return "matchpoint=" + matchpointLSN + " [" + + DbLsn.getNoFormatString(matchpointLSN) + + "] rollbackStart=" + rollbackStartLSN + " [" + + DbLsn.getNoFormatString(rollbackStartLSN) + + "] rollbackEnd=" + rollbackEndLSN + " [" + + DbLsn.getNoFormatString(rollbackEndLSN) + "]"; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof RollbackPeriod)) { + return false; + } + + RollbackPeriod otherPeriod = (RollbackPeriod) other; + return ((matchpointLSN == otherPeriod.matchpointLSN) && + (rollbackStartLSN == otherPeriod.rollbackStartLSN) && + (rollbackEndLSN == otherPeriod.rollbackEndLSN)); + } + + boolean beforeCheckpointStart() { + return beforeCheckpointStart; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = (prime + (int) matchpointLSN); + result = (result * prime)+ (int) rollbackStartLSN; + result = (result * prime) + (int) rollbackEndLSN; + return result; + } + + } +} diff --git a/src/com/sleepycat/je/recovery/VLSNRecoveryProxy.java b/src/com/sleepycat/je/recovery/VLSNRecoveryProxy.java new file mode 100644 index 0000000..bbfa921 --- /dev/null +++ b/src/com/sleepycat/je/recovery/VLSNRecoveryProxy.java @@ -0,0 +1,28 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.entry.LogEntry; + +/** + * The VLSNRecoveryProxy is a handle for invoking VLSN tracking at recovery + * time. + */ +public interface VLSNRecoveryProxy { + + public void trackMapping(long lsn, + LogEntryHeader currentEntryHeader, + LogEntry logEntry); +} diff --git a/src/com/sleepycat/je/recovery/package-info.java b/src/com/sleepycat/je/recovery/package-info.java new file mode 100644 index 0000000..67cf1da --- /dev/null +++ b/src/com/sleepycat/je/recovery/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Performs recovery/startup processing during Environment open, and + * checkpoints to bound recovery time. + */ +package com.sleepycat.je.recovery; \ No newline at end of file diff --git a/src/com/sleepycat/je/rep/AppStateMonitor.java b/src/com/sleepycat/je/rep/AppStateMonitor.java new file mode 100644 index 0000000..e62cf6b --- /dev/null +++ b/src/com/sleepycat/je/rep/AppStateMonitor.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +/** + * A mechanism for adding application specific information when asynchronously + * tracking the state of a running JE HA application. + *

        + * {@link NodeState} provides information about the current state of a member + * of the replication group. The application can obtain NodeState via {@link + * com.sleepycat.je.rep.util.ReplicationGroupAdmin#getNodeState} or {@link + * com.sleepycat.je.rep.util.DbPing#getNodeState}. A NodeState contains mostly + * JE-centric information, such as whether the node is a master or + * replica. However, it may be important to add in some application specific + * information to enable the best use of the status. + *

        + * For example, an application may want to direct operations to specific nodes + * based on whether the node is available. The fields in {@link NodeState} will + * tell the application whether the node is up and available in a JE HA sense, + * but the application may also need information about an application level + * resource, which would affect the load balancing decision. The AppStateMonitor + * is a way for the application to inject this kind of application specific + * information into the replicated node status. + *

        + * The AppStateMonitor is registered with the replicated environment using + * {@link ReplicatedEnvironment#registerAppStateMonitor(AppStateMonitor)}. + * There is at most one AppStateMonitor associated with the actual environment + * (not an {@link com.sleepycat.je.Environment} handle) at any given time. JE + * HA calls {@link AppStateMonitor#getAppState} when it is assembling status + * information for a given node. + *

        + * After registration, the application can obtain this application specific + * information along with other JE HA status information when it obtains a + * {@link NodeState}, through {@link NodeState#getAppState}. + *

        + * {@link AppStateMonitor#getAppState()} returns a byte array whose length + * should be larger than 0. An IllegalStateException will be thrown if the + * returned byte array is 0 size. Users are responsible for serializing and + * deserializing the desired information into this byte array. + * @since 5.0 + */ +public interface AppStateMonitor { + + /** + * Return a byte array which holds information about the application's + * state. The application is responsible for serialize and deserialize this + * information. + *

        + * Note the returned byte array's length should be larger than 0. + * + * @return the application state + */ + public byte[] getAppState(); +} diff --git a/src/com/sleepycat/je/rep/CommitPointConsistencyPolicy.java b/src/com/sleepycat/je/rep/CommitPointConsistencyPolicy.java new file mode 100644 index 0000000..4a19c34 --- /dev/null +++ b/src/com/sleepycat/je/rep/CommitPointConsistencyPolicy.java @@ -0,0 +1,226 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.utilint.PropUtil; + +/** + * A consistency policy which ensures that the environment on a Replica node is + * at least as current as denoted by the specified {@link CommitToken}. This + * token represents a point in the serialized transaction schedule created by + * the master. In other words, this token is like a bookmark, representing a + * particular transaction commit in the replication stream. The Replica ensures + * that the commit identified by the {@link CommitToken} has been executed on + * this node before allowing the application's {@link + * com.sleepycat.je.Environment#beginTransaction(com.sleepycat.je.Transaction, + * com.sleepycat.je.TransactionConfig) Environment.beginTransaction()} + * operation on the Replica to proceed. + *

        + * For example, suppose the application is a web application where a replicated + * group is implemented within a load balanced web server group. Each request + * to the web server consists of an update operation followed by read + * operations (say from the same client), The read operations naturally expect + * to see the data from the updates executed by the same request. However, the + * read operations might have been routed to a node that did not execute the + * update. + *

        + * In such a case, the update request would generate a {@link CommitToken}, + * which would be resubmitted by the browser, along with subsequent read + * requests. The read request could be directed at any one of the available web + * servers by a load balancer. The node which executes the read request would + * create a CommitPointConsistencyPolicy with that {@link CommitToken} and use + * it at transaction begin. If the environment at the web server was already + * current (wrt the commit token), it could immediately execute the transaction + * and satisfy the request. If not, the "transaction begin" would stall until + * the Replica replay had caught up and the change was available at that web + * server. + *

        + * Consistency policies are specified at either a per-transaction level through + * {@link com.sleepycat.je.TransactionConfig#setConsistencyPolicy} or as an + * replication node wide default through {@link + * com.sleepycat.je.rep.ReplicationConfig#setConsistencyPolicy} + * + * @see com.sleepycat.je.CommitToken + * @see Managing Consistency + */ +public class CommitPointConsistencyPolicy implements ReplicaConsistencyPolicy { + + /** + * The name:{@value} associated with this policy. The name can be used when + * constructing policy property values for use in je.properties files. + */ + public static final String NAME = "CommitPointConsistencyPolicy"; + + /* + * Identifies the commit of interest in a serialized transaction schedule. + */ + private final CommitToken commitToken; + + /* + * Amount of time (in milliseconds) to wait for consistency to be + * reached. + */ + private final int timeout; + + /** + * Defines how current a Replica needs to be in terms of a specific + * transaction that was committed on the Master. A transaction on the + * Replica that uses this consistency policy is allowed to start only + * after the transaction identified by the commitToken has + * been committed on the Replica. The {@link + * com.sleepycat.je.Environment#beginTransaction( + * com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig) + * Environment.beginTransaction()} will wait for at most + * timeout for the Replica to catch up. If the Replica has + * not caught up in this period, the beginTransaction() + * method will throw a {@link ReplicaConsistencyException}. + * + * @param commitToken the token identifying the transaction + * + * @param timeout the maximum amount of time that the transaction start + * will wait to allow the Replica to catch up. + * + * @param timeoutUnit the {@code TimeUnit} for the timeout parameter. + * + * @throws IllegalArgumentException if the commitToken or timeoutUnit is + * null. + */ + public CommitPointConsistencyPolicy(CommitToken commitToken, + long timeout, + TimeUnit timeoutUnit) { + if (commitToken == null) { + throw new IllegalArgumentException("commitToken must not be null"); + } + this.commitToken = commitToken; + this.timeout = PropUtil.durationToMillis(timeout, timeoutUnit); + } + + /** + * Returns the name:{@value #NAME}, associated with this policy. + * @see #NAME + */ + @Override + public String getName() { + return NAME; + } + + /** + * @hidden + * For internal use only. + * Ensures that the replica has replayed the replication stream to the + * point identified by the commit token or past it. If it has not, the + * method waits until the constraint is satisfied, or the timeout period + * has expired, whichever event takes place first. + */ + @Override + public void ensureConsistency(EnvironmentImpl envImpl) + throws InterruptedException, + ReplicaConsistencyException { + + /* + * Cast is done to preserve replication/non replication code + * boundaries. + */ + RepImpl repImpl = (RepImpl) envImpl; + if (!commitToken.getRepenvUUID().equals + (repImpl.getRepNode().getUUID())) { + throw new IllegalArgumentException + ("Replication environment mismatch. " + + "The UUID associated with the commit token is: " + + commitToken.getRepenvUUID() + + " but this replica environment has the UUID: " + + repImpl.getRepNode().getUUID()); + } + Replica replica = repImpl.getRepNode().replica(); + replica.getConsistencyTracker().awaitVLSN + (commitToken.getVLSN(), this); + } + + /** + * Return the CommitToken used to create this consistency + * policy. + * @return the CommitToken used to create this consistency + * policy. + */ + public CommitToken getCommitToken() { + return commitToken; + } + + /** + * Return the timeout specified when creating this consistency policy. + * + * @param unit the {@code TimeUnit} of the returned value. + * + * @return the timeout specified when creating this consistency policy + */ + @Override + public long getTimeout(TimeUnit unit) { + return PropUtil.millisToDuration(timeout, unit); + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((commitToken == null) ? 0 : commitToken.hashCode()); + result = prime * result + timeout; + return result; + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof CommitPointConsistencyPolicy)) { + return false; + } + CommitPointConsistencyPolicy other = + (CommitPointConsistencyPolicy) obj; + if (commitToken == null) { + if (other.commitToken != null) { + return false; + } + } else if (!commitToken.equals(other.commitToken)) { + return false; + } + if (timeout != other.timeout) { + return false; + } + return true; + } + + @Override + public String toString() { + return getName() + " commitToken=" + commitToken; + } +} diff --git a/src/com/sleepycat/je/rep/DatabasePreemptedException.java b/src/com/sleepycat/je/rep/DatabasePreemptedException.java new file mode 100644 index 0000000..6d6f69b --- /dev/null +++ b/src/com/sleepycat/je/rep/DatabasePreemptedException.java @@ -0,0 +1,115 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.Database; +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown when attempting to use a Database handle that was forcibly closed by + * replication. This exception only occurs in a replicated environment and + * normally only occurs on a Replica node. In the case of a DPL schema upgrade + * where an entity class or secondary key is renamed, it may also occur on a + * Master node, as described below. + * + *

        This exception occurs when accessing a database or store and one of the + * following methods was recently executed on the master node and then replayed + * on a replica node: + * {@link com.sleepycat.je.Environment#truncateDatabase truncateDatabase}, + * {@link com.sleepycat.je.Environment#removeDatabase removeDatabase} and + * {@link com.sleepycat.je.Environment#renameDatabase renameDatabase}.

        + * + *

        When using the {@link com.sleepycat.persist DPL}, this occurs only in two + * circumstances:

        + *
          + *
        1. This exception is thrown on a Replica node when the {@link + * com.sleepycat.persist.EntityStore#truncateClass truncateClass} method has + * been called on the Master node.
        2. + *
        3. This exception is thrown on a Replica or Master node when an entity + * class or secondary key has been renamed and the application has been + * upgraded. See + * Upgrading + * a Replication Group.
        4. + *
        + * + *

        When this exception occurs, the application must close any open cursors + * and abort any open transactions that are using the database or store, and + * then close the database or store handle. If the application wishes, it may + * then reopen the database (if it still exists) or store.

        + * + *

        Some applications may wish to coordinate the Master and Replica sites to + * prevent a Replica from accessing a database that is being truncated, removed + * or renamed, and thereby prevent this exception. Such coordination is not + * directly supported by JE. The DatabasePreemptedException is provided to + * allow an application to handle database truncation, removal and renaming + * without such coordination between nodes.

        + * + *

        The {@link com.sleepycat.je.Transaction} handle is not + * invalidated as a result of this exception.

        + * + * @since 4.0 + */ +public class DatabasePreemptedException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + private final String dbName; + private final Database dbHandle; + + /** + * For internal use only. + * @hidden + */ + public DatabasePreemptedException(final String message, + final String dbName, + final Database dbHandle) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + this.dbName = dbName; + this.dbHandle = dbHandle; + } + + /** + * For internal use only. + * @hidden + */ + private DatabasePreemptedException(String message, + DatabasePreemptedException cause) { + super(message, cause); + dbName = cause.dbName; + dbHandle = cause.dbHandle; + } + + /** + * Returns the database handle that was forcibly closed. + */ + public Database getDatabase() { + return dbHandle; + } + + /** + * Returns the name of the database that was forcibly closed. + */ + public String getDatabaseName() { + return dbName; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DatabasePreemptedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/GroupShutdownException.java b/src/com/sleepycat/je/rep/GroupShutdownException.java new file mode 100644 index 0000000..632066e --- /dev/null +++ b/src/com/sleepycat/je/rep/GroupShutdownException.java @@ -0,0 +1,123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * Thrown when an attempt is made to access an environment that was + * shutdown by the Master as a result of a call to + * {@link ReplicatedEnvironment#shutdownGroup(long, TimeUnit)}. + */ +public class GroupShutdownException extends EnvironmentFailureException { + private static final long serialVersionUID = 1; + + /* The time that the shutdown was initiated on the master. */ + private final long shutdownTimeMs; + + /* The master node that initiated the shutdown. */ + private final String masterNodeName; + + /* The VLSN at the time of shutdown */ + private final VLSN shutdownVLSN; + + /** + * For internal use only. + * @hidden + */ + public GroupShutdownException(Logger logger, + RepNode repNode, + long shutdownTimeMs) { + super(repNode.getRepImpl(), + EnvironmentFailureReason.SHUTDOWN_REQUESTED, + String.format("Master:%s, initiated shutdown at %1tc.", + repNode.getMasterStatus().getNodeMasterNameId(). + getName(), + shutdownTimeMs)); + + shutdownVLSN = repNode.getVLSNIndex().getRange().getLast(); + masterNodeName = + repNode.getMasterStatus().getNodeMasterNameId().getName(); + this.shutdownTimeMs = shutdownTimeMs; + + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Explicit shutdown request from Master:" + + masterNodeName); + } + + /** + * For internal use only. + * @hidden + */ + public GroupShutdownException(Logger logger, + RepImpl repImpl, + String masterNodeName, + VLSN shutdownVLSN, + long shutdownTimeMs) { + super(repImpl, + EnvironmentFailureReason.SHUTDOWN_REQUESTED, + String.format("Master:%s, initiated shutdown at %1tc.", + masterNodeName, + shutdownTimeMs)); + + this.shutdownVLSN = shutdownVLSN; + this.masterNodeName = masterNodeName; + this.shutdownTimeMs = shutdownTimeMs; + + LoggerUtils.warning(logger, repImpl, + "Explicit shutdown request from Master:" + + masterNodeName); + + } + + /** + * For internal use only. + * @hidden + */ + private GroupShutdownException(String message, + GroupShutdownException shutdownException) { + super(message, shutdownException); + shutdownVLSN = shutdownException.shutdownVLSN; + shutdownTimeMs = shutdownException.shutdownTimeMs; + masterNodeName = shutdownException.masterNodeName; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public GroupShutdownException wrapSelf(String msg) { + return new GroupShutdownException(msg, this); + } + + /** + * For internal use only. + * + * Returns the shutdownVLSN, if it was available, at the time of the + * exception + * + * @hidden + */ + public VLSN getShutdownVLSN() { + return shutdownVLSN; + } +} diff --git a/src/com/sleepycat/je/rep/InsufficientAcksException.java b/src/com/sleepycat/je/rep/InsufficientAcksException.java new file mode 100644 index 0000000..c1e9819 --- /dev/null +++ b/src/com/sleepycat/je/rep/InsufficientAcksException.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.rep.txn.MasterTxn; + +/** + *

        + * This exception is thrown at the time of a commit in a Master, if the Master + * could not obtain transaction commit acknowledgments from its Replicas in + * accordance with the {@link ReplicaAckPolicy} currently in effect and within + * the requested timeout interval. This exception will never be thrown when the + * {@code ReplicaAckPolicy} of {@link ReplicaAckPolicy#NONE NONE} is in effect. + *

        + * Note that an {@link InsufficientAcksException} means the transaction has + * already committed at the master. The transaction may also have been + * committed at one or more Replicas, but the lack of replica acknowledgments + * means that the number of replicas that committed could not be + * established. If the transaction was in fact committed by less than a simple + * majority of the nodes, it could result in a {@link RollbackException} when + * the node subsequently attempts to rejoin the group as a Replica. + *

        + * The application can handle the exception and choose to respond in a number + * of ways. For example, it can + *

          + *
        • do nothing, assuming that the transaction will eventually propagate to + * enough replicas to become durable, + *
        • retry the operation in a new transaction, which may succeed or fail + * depending on whether the underlying problems have been resolved, + *
        • retry using a larger timeout interval and return to the original + * timeout interval at a later time, + *
        • fall back temporarily to a read-only mode, + *
        • increase the durability of the transaction on the Master by ensuring + * that the changes are flushed to the operating system's buffers or to + * the disk, or + *
        • give up and report an error at a higher level, perhaps to allow an + * administrator to check the underlying cause of the failure. + *
        + * + * @see Durability + */ +public class InsufficientAcksException extends OperationFailureException { + private static final long serialVersionUID = 1; + + private final int acksPending; + private final int acksRequired; + private final int ackTimeoutMs; + private final String feederState; + + /** + * @hidden + * Creates a InsufficientAcksException. + * + * @param acksPending the number of missing acknowledgments + * @param ackTimeoutMs the current acknowledgment timeout value in + * milliseconds + */ + public InsufficientAcksException(MasterTxn txn, + int acksPending, + int ackTimeoutMs, + String feederState) { + super(null, false /*abortOnly*/, + "Transaction: " + txn.getId() + + " VLSN: " + txn.getCommitVLSN() + + ", initiated at: " + String.format("%1tT. ", txn.getStartMs()) + + " Insufficient acks for policy:" + + txn.getCommitDurability().getReplicaAck() + ". " + + "Need replica acks: " + txn.getRequiredAckCount() + ". " + + "Missing replica acks: " + acksPending + ". " + + "Timeout: " + ackTimeoutMs + "ms. " + + "FeederState=" + feederState, + null /*cause*/); + assert(acksPending <= txn.getRequiredAckCount()); + this.acksPending = acksPending; + this.acksRequired = txn.getRequiredAckCount(); + this.ackTimeoutMs = ackTimeoutMs; + this.feederState = feederState; + } + + /** + * For testing only. + * @hidden + */ + public InsufficientAcksException(String message) { + super(message); + this.acksPending = 0; + this.acksRequired = 0; + this.ackTimeoutMs = 0; + this.feederState = "Test feeder state"; + } + + /** + * For internal use only. + * @hidden + */ + private InsufficientAcksException(String message, + InsufficientAcksException cause) { + super(message, cause); + this.acksPending = cause.acksPending; + this.acksRequired = cause.acksRequired; + this.ackTimeoutMs = cause.ackTimeoutMs; + this.feederState = cause.feederState; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new InsufficientAcksException(msg, this); + } + + /** + * It returns the number of Replicas that did not respond with an + * acknowledgment within the Replica commit timeout period. + * + * @return the number of missing acknowledgments + */ + public int acksPending() { + return acksPending; + } + + /** + * It returns the number of acknowledgments required by the commit policy. + * + * @return the number of acknowledgments required + */ + public int acksRequired() { + return acksRequired; + } + + /** + * Returns the acknowledgment timeout that was in effect at the time of the + * exception. + * + * @return the acknowledgment timeout in milliseconds + */ + public int ackTimeout() { + return ackTimeoutMs; + } +} diff --git a/src/com/sleepycat/je/rep/InsufficientLogException.java b/src/com/sleepycat/je/rep/InsufficientLogException.java new file mode 100644 index 0000000..d97e007 --- /dev/null +++ b/src/com/sleepycat/je/rep/InsufficientLogException.java @@ -0,0 +1,429 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; +import java.util.Properties; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.utilint.VLSN; + +/** + * This exception indicates that the log files constituting the Environment are + * insufficient and cannot be used as the basis for continuing with the + * replication stream provided by the current master. + *

        + * This exception is typically thrown by the ReplicatedEnvironment constructor + * when a node has been down for a long period of time and is being started up + * again. It may also be thrown when a brand new node attempts to become a + * member of the group and it does not have a sufficiently current set of log + * files. If the group experiences sustained network connectivity problems, + * this exception may also be thrown by an active Replica that has been unable + * to stay in touch with the members of its group for an extended period of + * time. + *

        + * In the typical case, application handles the exception by invoking + * {@link NetworkRestore#execute} to obtain the log files it needs from one of + * the members of the replication group. After the log files are obtained, the + * node recreates its environment handle and resumes participation as an active + * member of the group. + * + * @see NetworkRestore + */ +public class InsufficientLogException extends RestartRequiredException { + private static final long serialVersionUID = 1; + + /* + * These properties store information to serialize the ILE and save it in a + * RestoreRequired log entry. + * + * The properties named P_* describe a log provider. An ILE may have one or + * more log providers, and a more structured format like Json would make it + * easier to describe an array of items. Since we're currently constrained + * to using a property list, we follow the convention of suffixing each + * provider property with a number. For example, if there are two + * providers, P_NUMPROVIDERS=2, and there would be P_NODENAME0, + * P_NODENAME1, etc. + */ + private static final String P_NUMPROVIDERS = "P_NUMPROVIDERS"; + private static final String P_NODENAME = "P_NODENAME"; + private static final String P_NODETYPE = "P_NODETYPE"; + private static final String P_HOSTNAME = "P_HOSTNAME"; + private static final String P_PORT = "P_PORT"; + + /* + * Properties needed to create an environment handle for the node which + * needs new logs, and is the target for a backup. + */ + private static final String GROUP_NAME = "GROUP_NAME"; + private static final String NODE_NAME = "NODE_NAME"; + private static final String HOSTNAME = "HOSTNAME"; + private static final String PORT = "PORT"; + private static final String ENV_DIR = "ENV_DIR"; + private static final String REFRESH_VLSN = "REFRESH_VLSN"; + + /* + * A handle to the replication environment which is the target for the + * network restore. May be null. If repImpl was created by the ILE, the + * caller has the responsibility for closing the environment. Note that a + * RepImpl rather than RepNode is used as the environment handle. RepNodes + * are only available for nodes which are a member of a group. In some + * cases, an ILE is used by a node which is detached, and is not currently + * connected to its group. + */ + private transient RepImpl repImpl; + private boolean openedByILE; + + /* Attributes used by the network restore, in serialized format */ + private final transient Properties props; + + /* + * No longer used as of JE 7.5, but the field is retained for serializable + * compatibility. + */ + private final VLSN refreshVLSN; + + /* + * Candidate nodes for a log file refresh. Note that this field is only + * used by a thread that is synchronously processing the caught exception, + * which is safely after the instance has been initialized. + */ + private final Set logProviders; + + /** + * For KVS test use only. + * @hidden + */ + @SuppressWarnings("unused") // KVS-only + public InsufficientLogException(String message) { + super(null, EnvironmentFailureReason.UNEXPECTED_STATE, message); + this.repImpl = null; + this.refreshVLSN = null; + this.logProviders = null; + this.props = null; + } + + /** + * @hidden + * + * Creates an instance of the exception and packages up the information + * needed by NetworkRestore. + */ + public InsufficientLogException(RepNode repNode, + Set logProviders) { + super(repNode.getRepImpl(), EnvironmentFailureReason.INSUFFICIENT_LOG); + this.repImpl = repNode.getRepImpl(); + this.refreshVLSN = VLSN.NULL_VLSN; + this.logProviders = logProviders; + props = initProperties(repNode.getGroup().getName()); + } + + private Properties initProperties(String groupName) { + Properties p = new Properties(); + + /* + * These apply to the destination node which is experiencing the + * insufficient logs. + */ + p.setProperty(GROUP_NAME, groupName); + p.setProperty(NODE_NAME, repImpl.getNameIdPair().getName()); + p.setProperty(HOSTNAME, repImpl.getHostName()); + p.setProperty(PORT, Integer.toString(repImpl.getPort())); + p.setProperty(ENV_DIR, repImpl.getEnvironmentHome().getPath()); + p.setProperty(REFRESH_VLSN, Long.toString(refreshVLSN.getSequence())); + + /* + * There is a set of nodes which might act as the source for the + * network restore. Since we can't store arrays, append an index + * to the property name for each log provider. + */ + p.setProperty(P_NUMPROVIDERS, Integer.toString(logProviders.size())); + int i = 0; + for (ReplicationNode rn: logProviders) { + p.setProperty(P_NODENAME + i, rn.getName()); + p.setProperty(P_HOSTNAME + i, rn.getHostName()); + p.setProperty(P_PORT + i, Integer.toString(rn.getPort())); + p.setProperty(P_NODETYPE + i, rn.getType().name()); + i++; + } + return p; + } + + /** + * @hidden + * + * Creates an instance of the exception and packages up the information + * needed by Subscription API. The target is not a replication node, so the + * repImpl field is a shell which represents the subscription target. + */ + public InsufficientLogException(RepImpl repImpl, VLSN refreshVLSN) { + super(repImpl, EnvironmentFailureReason.INSUFFICIENT_LOG); + this.repImpl = repImpl; + this.refreshVLSN = refreshVLSN; + /* + * No log providers in this use case, but initialize the set for + * initProperties, and for robustness. + */ + this.logProviders = new HashSet(); + props = initProperties("NO_GROUP"); + } + + /** + * @hidden + * + * Creates an instance of the exception when a LOG_RESTORE_REQUIRED was + * found at recovery, and network restore must be initiated before the + * recovery can succeed. The flow is: + * 0. A network restore is underway. It writes a marker file, essentially + * serializing this ILE as a property list, and writing it into the log. + * Something then interrupts this network restore. The process dies, + * so all knowledge of the interrupted network restore is lost in + * memory but the the marker file acts as a persistent breadcrumb. + * 1. Since knowledge of the network restore was lost, the application tries + * to open and recover the target node. The LOG_RESTORE_REQUIRED entry + * is seen, which means that recovery can't continue. + * 2. Within recovery, a new ILE is created using information from the + * LOG_RESTORE_REQUIRED. It's thrown, which ends recovery. + * 3. The caller realizes that a network restore has to be carried out + * before this environment can be recovered. It uses the ILE instance + * created in step 2 to start a new invocation of network restore. + * 4. Network restore starts. Since network restore needs a RepImpl, one + * is instantiated using info from the ILE + * 5. When Network restore succeeds, it removes the marker file. + * + * @param helperHosts extra helper hosts are derived from those used + * to open the environment. + */ + public InsufficientLogException(Properties properties, + String helperHosts) { + super(null, EnvironmentFailureReason.INSUFFICIENT_LOG); + + /* + * Don't initialize the repImpl until it's needed. If we try to do + * so in step2, we'll be in a loop, trying to recover an environment + * from within recovery. + */ + String vlsnVal = properties.getProperty(REFRESH_VLSN); + this.refreshVLSN = new VLSN(Long.parseLong(vlsnVal)); + this.logProviders = new HashSet<>(); + + if (helperHosts != null) { + for (String hostPortPair : helperHosts.split(",")) { + final String hpp = hostPortPair.trim(); + if (hpp.length() > 0) { + LogFileSource source = + new LogFileSource("NoName", // not important + NodeType.ELECTABLE.name(), + HostPortPair.getHostname(hpp), + HostPortPair.getPort(hpp)); + logProviders.add(source); + } + } + } + this.props = properties; + } + + private void initRepImpl() { + /* + * Setup log providers. Since can't use something that supports array + * types like JSON, and must use a property list, the provider flags + * are named NODE_NAME0, HOSTNAME0, etc. + */ + int numLogProviders = + Integer.parseInt(props.getProperty(P_NUMPROVIDERS)); + for (int i = 0; i < numLogProviders; i++) { + String name = props.getProperty(P_NODENAME + i); + String nodeType = props.getProperty(P_NODETYPE + i); + String hostname = props.getProperty(P_HOSTNAME + i); + int port = Integer.parseInt(props.getProperty(P_PORT + i)); + logProviders.add(new LogFileSource(name, nodeType, hostname, port)); + } + + /* + * Create new, read only, internal handle type environment for use + * by the network backup. + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + envConfig.setConfigParam(EnvironmentParams.ENV_RECOVERY.getName(), + "false"); + envConfig.setTransactional(true); + + String hostname = props.getProperty(HOSTNAME); + int portVal = Integer.parseInt(props.getProperty(PORT)); + ReplicationConfig repConfig = + new ReplicationConfig(props.getProperty(GROUP_NAME), + props.getProperty(NODE_NAME), + HostPortPair.getString(hostname, portVal)); + + repConfig.setConfigParam(RepParams.NETWORKBACKUP_USE.getName(), "true"); + ReplicationNetworkConfig defaultNetConfig = + ReplicationNetworkConfig.createDefault(); + repConfig.setRepNetConfig(defaultNetConfig); + + File envDir = new File(props.getProperty(ENV_DIR)); + ReplicatedEnvironment restoreEnv = + RepInternal.createInternalEnvHandle(envDir, + repConfig, + envConfig); + this.repImpl = RepInternal.getRepImpl(restoreEnv); + openedByILE = true; + } + + /** + * For internal use only. + * @hidden + */ + private InsufficientLogException(String message, + InsufficientLogException cause) { + super(message, cause); + this.repImpl = cause.repImpl; + this.openedByILE = cause.openedByILE; + this.refreshVLSN = cause.refreshVLSN; + this.logProviders = cause.logProviders; + this.props = cause.props; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new InsufficientLogException(msg, this); + } + + /** + * Returns the members of the replication group that can serve as candidate + * log providers to supply the logs needed by this node. + * + * @return a list of members that can provide logs + */ + public Set getLogProviders() { + return logProviders; + } + + /** + * For internal use only. + * @hidden + * + * Returns the replication node whose log files need to be refreshed. + */ + public RepImpl getRepImpl() { + if (repImpl == null) { + initRepImpl(); + } + + return repImpl; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(super.toString()); + sb.append("refreshVLSN=").append(refreshVLSN); + sb.append(" logProviders=").append(logProviders); + sb.append(" repImpl=").append(repImpl); + sb.append(" props=").append(props); + return sb.toString(); + } + + /** + * For internal use only. + * @hidden + */ + public Properties getProperties() { + return props; + } + + /* + * A standin for a ReplicationNode, so NetworkRestore can establish a + * protocol connection with the source of the log files. + */ + private class LogFileSource implements ReplicationNode { + + private final String name; + private final NodeType type; + private final String hostname; + private final int port; + + LogFileSource(String name, + String nodeTypeName, + String hostname, + int port) { + this.name = name; + this.type = NodeType.valueOf(nodeTypeName); + this.hostname = hostname; + this.port = port; + } + + + @Override + public String getName() { + return name; + } + + @Override + public NodeType getType() { + return type; + } + + @Override + public InetSocketAddress getSocketAddress() { + return new InetSocketAddress(hostname, port); + } + + @Override + public String getHostName() { + return hostname; + } + + @Override + public int getPort() { + return port; + } + } + + /** + * Called when network restore is complete to close the env if it was + * opened via this exception. + * + * Also sets the repImpl field to null, to avoid OOME when opening the + * restored environment. This is important whether or not the env was + * opened via this exception. [#26305] + */ + void releaseRepImpl() { + if (repImpl == null) { + return; + } + + try { + if (openedByILE) { + repImpl.close(); + } + } finally { + repImpl = null; + } + } +} diff --git a/src/com/sleepycat/je/rep/InsufficientReplicasException.java b/src/com/sleepycat/je/rep/InsufficientReplicasException.java new file mode 100644 index 0000000..e09a909 --- /dev/null +++ b/src/com/sleepycat/je/rep/InsufficientReplicasException.java @@ -0,0 +1,131 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.Set; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.txn.Locker; + +/** + * Thrown by {@link Environment#beginTransaction} and {@link + * Transaction#commit} when these operations are initiated at a Master which is + * not in contact with a quorum of Replicas as determined by the {@link + * ReplicaAckPolicy} that is in effect for the operation. + */ +public class InsufficientReplicasException extends OperationFailureException { + private static final long serialVersionUID = 1; + + private final ReplicaAckPolicy commitPolicy; + private final int requiredAckCount; + private final Set availableReplicas; + + /** + * Creates a Commit exception. + * + * @param ackPolicy the ack policy that could not be implemented + * @param requiredAckCount the replica acks required to satisfy the policy + * @param availableReplicas the set of available Replicas + */ + public InsufficientReplicasException(Locker locker, + ReplicaAckPolicy ackPolicy, + int requiredAckCount, + Set availableReplicas) { + super(locker, true /*abortOnly*/, + makeMsg(ackPolicy, requiredAckCount, availableReplicas), + null /*cause*/); + this.commitPolicy = ackPolicy; + this.requiredAckCount = requiredAckCount; + this.availableReplicas = availableReplicas; + } + + /** + * For internal use only. + * @hidden + */ + private InsufficientReplicasException(String message, + InsufficientReplicasException + cause) { + super(message, cause); + this.commitPolicy = cause.commitPolicy; + this.requiredAckCount = cause.requiredAckCount; + this.availableReplicas = cause.availableReplicas; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new InsufficientReplicasException(msg, this); + } + + /** + * Returns the Replica ack policy that was in effect for the transaction. + * + * @return the Replica ack policy + */ + public ReplicaAckPolicy getCommitPolicy() { + return commitPolicy; + } + + /** + * Returns the number of nodes (including the master) that were + * required to be active in order to satisfy the Replica ack + * policy. + * + * @return the required number of nodes + */ + public int getRequiredNodeCount() { + return requiredAckCount + 1; + } + + /** + * Returns the set of Replicas that were in contact with the master at the + * time of the commit operation. + * + * @return a set of Replica node names + */ + public Set getAvailableReplicas() { + return availableReplicas; + } + + private static String makeMsg(ReplicaAckPolicy commitPolicy, + int requiredAckCount, + Set availableReplicas) { + + String errorPrefix = "Commit policy: " + commitPolicy.name() + + " required " + requiredAckCount + " replica" + + (requiredAckCount > 1 ? "s. " : ". "); + + switch (availableReplicas.size()) { + case 0: + return errorPrefix + "But none were active with this master."; + + case 1: + return errorPrefix + "Only replica: " + availableReplicas + + " was available."; + + default: + return errorPrefix + " Only the following " + + availableReplicas.size() + + " replicas listed here were available: " + + availableReplicas; + } + } +} diff --git a/src/com/sleepycat/je/rep/LockPreemptedException.java b/src/com/sleepycat/je/rep/LockPreemptedException.java new file mode 100644 index 0000000..51d76b7 --- /dev/null +++ b/src/com/sleepycat/je/rep/LockPreemptedException.java @@ -0,0 +1,69 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.txn.Locker; + +/** + * Thrown when a lock has been "stolen", or preempted, from a transaction in a + * replicated environment. + * + *

        The {@link com.sleepycat.je.Transaction} handle is invalidated as a + * result of this exception.

        + * + *

        Locks may be preempted in a JE HA environment on a Replica system when + * the HA write operation needs a lock that an application reader transaction + * or cursor holds. This exception is thrown by a reader transaction or cursor + * method that is called after a lock has been preempted.

        + * + *

        Normally, applications should catch the base class {@link + * LockConflictException} rather than catching one of its subclasses. All lock + * conflicts are typically handled in the same way, which is normally to abort + * and retry the transaction. See {@link LockConflictException} for more + * information.

        + * + * @since 4.0 + */ +public class LockPreemptedException extends LockConflictException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public LockPreemptedException(Locker locker, Throwable cause) { + super(locker, "Lock was preempted by a replication stream replay " + + "write operation", cause); + } + + /** + * For internal use only. + * @hidden + */ + private LockPreemptedException(String message, + LockPreemptedException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public LockPreemptedException wrapSelf(String msg) { + return new LockPreemptedException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/LogFileRewriteListener.java b/src/com/sleepycat/je/rep/LogFileRewriteListener.java new file mode 100644 index 0000000..94a1bf8 --- /dev/null +++ b/src/com/sleepycat/je/rep/LogFileRewriteListener.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.util.Set; + +/** + * @hidden + * A notification callback interface to warn the user that JE is about to + * modify previously written log files as part of sync-up rollback. + * + * @see RollbackException + */ +public interface LogFileRewriteListener { + + /** + * @hidden + * Notifies the user that JE is about to modify previously written log + * files. + * + * @param files the log files that will be modified. + */ + public void rewriteLogFiles(Set files); +} diff --git a/src/com/sleepycat/je/rep/LogOverwriteException.java b/src/com/sleepycat/je/rep/LogOverwriteException.java new file mode 100644 index 0000000..b8b75e2 --- /dev/null +++ b/src/com/sleepycat/je/rep/LogOverwriteException.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown when one or more log files are modified (overwritten) as the result + * of a replication operation. This occurs when a replication operation must + * change existing data in a log file in order to synchronize with other nodes + * in a replication group. Any previously copied log files may be invalid and + * should be discarded. + * + *

        This exception is thrown by {@link + * com.sleepycat.je.util.DbBackup}. Backups and similar operations that copy + * log files should discard any copied files when this exception occurs, and + * may retry the operation at a later time. The time interval during which + * backups are not possible will be fairly short (less than a minute).

        + * + *

        Note that this exception is never thrown in a standalone (non-replicated) + * environment.

        + * + *

        The {@link com.sleepycat.je.Transaction} handle is not + * invalidated as a result of this exception.

        + * + * @since 4.0 + */ +public class LogOverwriteException extends OperationFailureException { + + private static final long serialVersionUID = 19238344223L; + + /** + * For internal use only. + * @hidden + */ + public LogOverwriteException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private LogOverwriteException(String message, + LogOverwriteException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String message) { + return new LogOverwriteException(message, this); + } +} diff --git a/src/com/sleepycat/je/rep/MasterReplicaTransitionException.java b/src/com/sleepycat/je/rep/MasterReplicaTransitionException.java new file mode 100644 index 0000000..fad6810 --- /dev/null +++ b/src/com/sleepycat/je/rep/MasterReplicaTransitionException.java @@ -0,0 +1,65 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * In the past, MasterReplicaTransitionException was sometimes thrown in JE + * replication systems when an environment that was a master and transitioned + * to replica state. In some cases, the environment had to reinitialize + * internal state to become a replica, and the application was required to + * the application close and reopen its environment handle, thereby + * properly reinitializing the node. + *

        + * As of JE 5.0.88, the environment can transition from master to replica + * without requiring an environment close and re-open. + * @deprecated as of JE 5.0.88 because the environment no longer needs to + * restart when transitioning from master to replica. + */ +@Deprecated +public class MasterReplicaTransitionException + extends RestartRequiredException { + + private static final long serialVersionUID = 1; + + /* Maintain for unit testing in SerializeUtils.java */ + public MasterReplicaTransitionException(EnvironmentImpl envImpl, + Exception cause) { + super(envImpl, + EnvironmentFailureReason.MASTER_TO_REPLICA_TRANSITION, + cause); + } + + /** + * @hidden + * For internal use only. + */ + private MasterReplicaTransitionException + (String message, + MasterReplicaTransitionException cause) { + super(message, cause); + } + + /** + * @hidden + * For internal use only. + */ + @Override + public EnvironmentFailureException wrapSelf(String msg) { + return new MasterReplicaTransitionException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/MasterStateException.java b/src/com/sleepycat/je/rep/MasterStateException.java new file mode 100644 index 0000000..7b2a660 --- /dev/null +++ b/src/com/sleepycat/je/rep/MasterStateException.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + + +/** + * This exception indicates that the application attempted an operation that is + * not permitted when it is in the {@link ReplicatedEnvironment.State#MASTER} + * state. + */ +public class MasterStateException extends StateChangeException { + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public MasterStateException(StateChangeEvent stateChangeEvent) { + super(null, stateChangeEvent); + } + + /** + * For internal use only. + * @hidden + */ + public MasterStateException(String message) { + super(message, null); + } + + private MasterStateException(String message, + MasterStateException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public MasterStateException wrapSelf(String msg) { + return new MasterStateException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/MasterTransferFailureException.java b/src/com/sleepycat/je/rep/MasterTransferFailureException.java new file mode 100644 index 0000000..34cbe03 --- /dev/null +++ b/src/com/sleepycat/je/rep/MasterTransferFailureException.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown by {@link ReplicatedEnvironment#transferMaster} if a Master Transfer + * operation cannot be completed within the allotted time. + */ +public class MasterTransferFailureException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public MasterTransferFailureException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private MasterTransferFailureException + (String message, MasterTransferFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public MasterTransferFailureException wrapSelf(String message) { + return new MasterTransferFailureException(message, this); + } +} diff --git a/src/com/sleepycat/je/rep/MemberActiveException.java b/src/com/sleepycat/je/rep/MemberActiveException.java new file mode 100644 index 0000000..3f059c9 --- /dev/null +++ b/src/com/sleepycat/je/rep/MemberActiveException.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; + +/** + * @hidden internal, for use in disaster recovery [#23447] + * + * Thrown when an operation is performed on an active replication group member + * but it requires that the member not be active. + */ +public class MemberActiveException extends OperationFailureException { + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public MemberActiveException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private MemberActiveException(String message, + MemberActiveException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new MemberActiveException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/MemberNotFoundException.java b/src/com/sleepycat/je/rep/MemberNotFoundException.java new file mode 100644 index 0000000..495baac --- /dev/null +++ b/src/com/sleepycat/je/rep/MemberNotFoundException.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown when an operation requires a replication group member and that member + * is not present in the replication group. + */ +public class MemberNotFoundException extends OperationFailureException { + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public MemberNotFoundException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + /** + * For internal use only. + * @hidden + */ + private MemberNotFoundException(String message, + MemberNotFoundException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new MemberNotFoundException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/NetworkRestore.java b/src/com/sleepycat/je/rep/NetworkRestore.java new file mode 100644 index 0000000..4e29b39 --- /dev/null +++ b/src/com/sleepycat/je/rep/NetworkRestore.java @@ -0,0 +1,457 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.networkRestore.NetworkBackup; +import com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStats; +import com.sleepycat.je.rep.impl.networkRestore.NetworkBackup.RejectedServerException; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; + +/** + * Obtains log files for a Replica from other members of the replication + * group. A Replica may need to do so if it has been offline for some time, and + * has fallen behind in its execution of the replication stream. + *

        + * During that time, the connected nodes may have reduced their log files by + * deleting files after doing log cleaning. When this node rejoins the group, + * it is possible that the current Master's log files do not go back far enough + * to adequately {@link + * sync * up} this node. In that case, the node can use a {@code + * NetworkRestore} object to copy the log files from one of the nodes in the + * group. The system tries to avoid deleting log files that either would be + * needed for replication by current nodes or where replication would be more + * efficient than network restore. + *

        + * A Replica discovers the need for a NetworkRestore operation when a call to + * {@code ReplicatedEnvironment()} fails with a {@link + * InsufficientLogException}. + *

        + * A call to {@code NetworkRestore.execute()} will copy the required log + * files from a member of the group who owns the files and seems to be the + * least busy. For example: + *

        + *  try {
        + *     node = new ReplicatedEnvironment(envDir, envConfig, repConfig);
        + * } catch (InsufficientLogException insufficientLogEx) {
        + *
        + *     NetworkRestore restore = new NetworkRestore();
        + *     NetworkRestoreConfig config = new NetworkRestoreConfig();
        + *     config.setRetainLogFiles(false); // delete obsolete log files.
        + *
        + *     // Use the members returned by insufficientLogEx.getLogProviders() to
        + *     // select the desired subset of members and pass the resulting list
        + *     // as the argument to config.setLogProviders(), if the default selection
        + *     // of providers is not suitable.
        + *
        + *     restore.execute(insufficientLogEx, config);
        + *
        + *     // retry
        + *     node = new ReplicatedEnvironment(envDir, envConfig, repConfig);
        + * }
        + * 
        + * @see + * Restoring Log Files + */ +public class NetworkRestore { + + /* The node that needs to be restored. */ + private RepImpl repImpl; + + /* The server's VLSN range end must be GT this vlsn. */ + private VLSN minVLSN; + + /* See 'Algorithm'. */ + private int maxLag; + + /* + * The log provider actually used to obtain the log files. It must be one + * of the members from the logProviders list. + */ + private ReplicationNode logProvider; + + /* The current backup attempt. */ + private volatile NetworkBackup backup; + + private Logger logger; + + /* For unit tests only */ + private TestHook interruptHook; + + /** + * Creates an instance of NetworkRestore suitable for restoring the logs at + * this node. After the logs are restored, the node can create a new + * {@link ReplicatedEnvironment} and join the group + */ + public NetworkRestore() { + } + + /** + * Initializes this instance for an impending execute() operation. + * + * Algorithm + * ========= + * If we simply choose the server with the highest maxVSLN, we would + * always choose the master, which is typically the server with the + * highest load. If we choose based on load alone, we may choose a + * lagging replica, and this may result in syncup failing later on the + * restored server. The compromise solution involves maxLag. We don't + * select servers less than maxLag behind the master (the server with the + * highest VLSN range end) to increase the chances of syncup working + * later, and among the non-lagging servers we choose the lowest load. + * + * 1. Collect list of servers and get their load/rangeEnd using the + * first part of the restore protocol. For each server, its load is its + * number of feeders and rangeEnd is the upper end of its VLSN range. + * Remove unresponsive servers from the list. + * + * 2. At the beginning of each round, if the server list is empty, give up. + * + * 3. Sort list by load. Let minVLSN be max(all rangeEnds) minus maxLag. + * + * 4. Attempt to perform restore in list order, refreshing each server's + * load/rangeEnd as we go. Reject any server with a refreshed rangeEnd + * that is LT minVLSN or a refreshed load that is GT its prior known + * load. Remove unresponsive servers from the list. + * + * 5. If the restore is incomplete, goto 2 and do another round. + * + * Note that between rounds the load and minVLSN of each server can + * change, which is why servers are not removed from the list unless + * they are unresponsive. The idea is to choose the best server based on + * the information we collected in the last round, but reject servers + * with new load or rangeEnd values that invalidate the earlier decision, + * and always get fresh load/rangeEnd values for each server. + * + * @param logException the exception packing information driving the + * restore operation. + * @param config may contain an explicit list of members. + * @return the list of candidate Server instances + * @throws IllegalArgumentException if the configured log providers are + * invalid + */ + private List init(InsufficientLogException logException, + NetworkRestoreConfig config) + throws IllegalArgumentException { + + repImpl = logException.getRepImpl(); + + logger = LoggerUtils.getLogger(getClass()); + + maxLag = repImpl.getConfigManager().getInt( + RepParams.NETWORKBACKUP_MAX_LAG); + + List logProviders; + + if ((config.getLogProviders() != null) && + (config.getLogProviders().size() > 0)) { + final Set memberNames = new HashSet<>(); + for (ReplicationNode node : logException.getLogProviders()) { + memberNames.add(node.getName()); + } + for (ReplicationNode node : config.getLogProviders()) { + if (!memberNames.contains(node.getName())) { + throw new IllegalArgumentException + ("Node:" + node.getName() + + " is not a suitable member for NetworkRestore." + + " It's not a member of logException." + + "getLogProviders(): " + + Arrays.toString(memberNames.toArray())); + } + } + logProviders = config.getLogProviders(); + } else { + logProviders = new LinkedList<>(logException.getLogProviders()); + } + + LoggerUtils.info(logger, repImpl, "Started network restore"); + + List serverList = new LinkedList<>(); + + /* + * Set minVLSN and loadThreshold such that all attempts in the initial + * round will produce RejectedServerException. Real values will be used + * as servers are contacted and added to the list for the next round. + * No initial sort is needed because all servers have the same load. + */ + VLSN maxVLSN = new VLSN(Long.MAX_VALUE); + int loadThreshold = -1; + + for (ReplicationNode node : logProviders) { + serverList.add(new Server(node, maxVLSN, loadThreshold)); + } + + minVLSN = maxVLSN; + + return serverList; + } + + /** + * Sorts the refreshed server list by load and computes minVLSN. + */ + private void resetServerList(List serverList) { + + if (serverList.isEmpty()) { + return; + } + + /* Natural comparator sorts by Server.load. */ + Collections.sort(serverList); + + /* Get server with max VLSN range end. */ + Server maxVlsnServer = Collections.max(serverList, + Comparator.comparingLong(s -> s.rangeEnd.getSequence())); + + /* Subtract lag and ensure that result is GTE 0. */ + minVLSN = new VLSN( + Math.max(0, maxVlsnServer.rangeEnd.getSequence() - maxLag)); + } + + /** + * Restores the log files from one of the members of the replication group. + *

        + * If config.getLogProviders() returns null, or an empty list, + * it uses the member that is least busy as the provider of the log files. + * Otherwise it selects a member from the list, choosing the first member + * that's available, to provide the log files. If the members in this list + * are not present in logException.getLogProviders(), it will + * result in an IllegalArgumentException being thrown. + * Exceptions handlers for InsufficientLogException will + * typically use {@link InsufficientLogException#getLogProviders()} as the + * starting point to compute an appropriate list, with which to set up + * the config argument. + *

        + * Log files that are currently at the node will be retained if they are + * part of a consistent set of log files. Obsolete log files are either + * deleted, or are renamed based on the the configuration of + * config.getRetainLogFiles(). + * + * @param logException the exception thrown by {@code + * ReplicatedEnvironment()} that necessitated this log refresh operation + * + * @param config configures the execution of the network restore operation + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalArgumentException if the config is invalid + * + * @see NetworkRestoreConfig + */ + public synchronized void execute(InsufficientLogException logException, + NetworkRestoreConfig config) + throws EnvironmentFailureException, + IllegalArgumentException { + + try { + List serverList = init(logException, config); + boolean firstRound = true; + /* + * Loop trying busier servers. It sorts the servers by the number + * of active feeders at each server and contacts each one in turn, + * trying increasingly busy servers until it finds a suitable one + * that will service its request for log files. The same server may + * be contacted multiple times, since it may become busier between + * the time it was first contacted and a subsequent attempt. + */ + while (!serverList.isEmpty()) { + final List newServerList = new LinkedList<>(); + File envHome = repImpl.getEnvironmentHome(); + + for (Server server : serverList) { + InetSocketAddress serverSocket = + server.node.getSocketAddress(); + if (serverSocket.equals(repImpl.getSocket())) { + /* Cannot restore from yourself. */ + continue; + } + LoggerUtils.info(logger, repImpl, + "Network restore candidate server: " + + server.node); + logProvider = server.node; + final long startTime = System.currentTimeMillis(); + try { + backup = new NetworkBackup + (serverSocket, + config.getReceiveBufferSize(), + envHome, + repImpl.getNameIdPair(), + config.getRetainLogFiles(), + server.load, + minVLSN, + repImpl, + repImpl.getFileManager(), + repImpl.getLogManager(), + repImpl.getChannelFactory(), + logException.getProperties()); + + backup.setInterruptHook(interruptHook); + backup.execute(); + LoggerUtils.info(logger, repImpl, String.format( + "Network restore completed from: %s. " + + "Elapsed time: %,d s.", + server.node, + ((System.currentTimeMillis() - startTime) / + 1000))); + return; + } catch (RestoreMarker.FileCreationException e) { + throw + EnvironmentFailureException.unexpectedException(e); + } catch (DatabaseException e) { + /* Likely A malfunctioning server. */ + LoggerUtils.warning(logger, repImpl, + "Backup failed from node: " + + server.node + "\n" + + e.getMessage()); + } catch (ConnectException e) { + /* Move on if the network connection is troublesome. */ + LoggerUtils.info(logger, repImpl, + "Backup server node: " + server.node + + " is not available: " + + e.getMessage()); + + } catch (IOException | ServiceConnectFailedException e) { + /* Move on if the network connection is troublesome. */ + LoggerUtils.warning(logger, repImpl, + "Backup failed from node: " + + server.node + "\n" + + e.getMessage()); + } catch (RejectedServerException e) { + /* + * This is for one of two reasons: + * 1. This is the initial round and we expect this + * exception for every server. We should not log a + * message. Add server to the new list now that we + * have its true rangeEnd and load. + * 2. The server got busier or is lagging since the + * prior round, based on its refreshed rangeEnd and + * load. Add server to the list in case it qualifies + * in subsequent rounds. + */ + if (!firstRound) { + LoggerUtils.info(logger, repImpl, e.getMessage()); + } + + newServerList.add( + new Server(server.node, e.getRangeLast(), + e.getActiveServers())); + + } catch (IllegalArgumentException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + serverList = newServerList; /* New list for the next round. */ + resetServerList(serverList); + firstRound = false; + } + throw EnvironmentFailureException.unexpectedState + ("Tried and failed with every node"); + } finally { + logException.releaseRepImpl(); + } + } + + /** + * @hidden + * + * for testing use only + */ + public NetworkBackup getBackup() { + return backup; + } + + /** + * @hidden + * + * for testing use only + * + * Returns the member that was used to provide the log files. + */ + public ReplicationNode getLogProvider() { + return logProvider; + } + + /** + * @hidden + * + * Returns the network backup statistics for the current network restore + * attempt, or {@code null} if a network backup is not currently underway. + * + * @return the statistics or {@code null} + */ + public NetworkBackupStats getNetworkBackupStats() { + final NetworkBackup currentBackup = backup; + return (currentBackup != null) ? currentBackup.getStats() : null; + } + + /** + * A convenience class to help aggregate server attributes that may be + * relevant to ordering the servers in terms of their suitability. + */ + private static class Server implements Comparable { + private final ReplicationNode node; + private final VLSN rangeEnd; + private final int load; + + public Server(ReplicationNode node, VLSN rangeEnd, int load) { + this.node = node; + this.rangeEnd = rangeEnd; + this.load = load; + } + + /** + * This method is used in the sort to prioritize servers. + */ + @Override + public int compareTo(Server o) { + return load - o.load; + } + + @Override + public String toString() { + return node.getName(); + } + } + + /** + * @hidden + * For unit testing + * + * @param hook + */ + public void setInterruptHook(TestHook hook) { + interruptHook = hook; + } +} diff --git a/src/com/sleepycat/je/rep/NetworkRestoreConfig.java b/src/com/sleepycat/je/rep/NetworkRestoreConfig.java new file mode 100644 index 0000000..b5c36a6 --- /dev/null +++ b/src/com/sleepycat/je/rep/NetworkRestoreConfig.java @@ -0,0 +1,160 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.List; + +/** + * NetworkRestoreConfig defines the configuration parameters used to configure + * a NetworkRestore operation. + * + * @see NetworkRestore + */ +public class NetworkRestoreConfig { + /** + * Determines whether obsolete log files must be renamed or deleted. + */ + private boolean retainLogFiles = true; + + /** + * The size of the network restore client socket's receive buffer. + */ + private int receiveBufferSize = 0x200000; /* 2 MB */ + + /** + * List (in priority order) of the data nodes, either ELECTABLE or + * SECONDARY members, that should be contacted for the the log files. + */ + private List logProviders; + + /** + * Returns a boolean indicating whether existing log files should be + * retained or deleted. + * + * @return true if log files must be retained + */ + public boolean getRetainLogFiles() { + return retainLogFiles; + } + + /** + * If true retains obsolete log files, by renaming them instead of deleting + * them. The default is "true". + *

        + * A renamed file has its .jdb suffix replaced by + * .bup and an additional numeric monotonically increasing + * numeric suffix. All files that were renamed as part of the same + * NetworkRestore attempt will have the same numeric suffix. + *

        + * For example, if files 00000001.jdb and files 00000002.jdb were rendered + * obsolete, and 4 was the highest suffix in use for this environment when + * the operation was initiated, then the files would be renamed as + * 00000001.bup.5 and 00000002.bup.5. + * + * @param retainLogFiles if true retains obsolete log files + * + * @return this + */ + public NetworkRestoreConfig setRetainLogFiles(boolean retainLogFiles) { + setRetainLogFilesVoid(retainLogFiles); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setRetainLogFilesVoid(boolean retainLogFiles) { + this.retainLogFiles = retainLogFiles; + } + + /** + * Returns the size of the receive buffer associated with the socket used + * to transfer files during the NetworkRestore operation. + */ + public int getReceiveBufferSize() { + return receiveBufferSize; + } + + /** + * Sets the size of the receive buffer associated with the socket used to + * transfer files during the NetworkRestore operation. + *

        + * Note that if the size specified is larger than the operating system + * constrained maximum, it will be limited to this maximum value. For + * example, on Linux you may need to set the kernel parameter: + * net.core.rmem_max property using the command: sysctl -w + * net.core.rmem_max=1048576 to increase the operating system imposed + * limit. + *

        + * @param receiveBufferSize the size of the receive buffer. If it's zero, + * the operating system default value is used. + */ + public NetworkRestoreConfig setReceiveBufferSize(int receiveBufferSize) { + if (receiveBufferSize < 0) { + throw new IllegalArgumentException("receiveBufferSize:" + + receiveBufferSize + + " is negative."); + } + this.receiveBufferSize = receiveBufferSize; + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReceiveBufferSizeVoid(int receiveBufferSize) { + setReceiveBufferSize(receiveBufferSize); + } + + /** + * Returns the candidate list of data nodes, either ELECTABLE or SECONDARY + * members, that may be used to obtain log files. + * + * @return the list of data nodes in priority order, or null + */ + public List getLogProviders() { + return logProviders; + } + + /** + * Sets the prioritized list of data nodes, either ELECTABLE or SECONDARY + * members, used to select a node from which to obtain log files for the + * NetworkRestore operation. If a list is supplied, NetworkRestore will + * only use nodes from this list, trying each one in order. + * + *

        The default value is null. If a null value is configured for + * NetworkRestore, it will choose the least busy data node with a current + * set of logs, as the provider of log files. + * + * @param providers the list of data nodes in priority order, or null + * + * @return this + */ + public NetworkRestoreConfig + setLogProviders(List providers) { + + setLogProvidersVoid(providers); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLogProvidersVoid(List providers) { + logProviders = providers; + } +} diff --git a/src/com/sleepycat/je/rep/NetworkRestoreConfigBeanInfo.java b/src/com/sleepycat/je/rep/NetworkRestoreConfigBeanInfo.java new file mode 100644 index 0000000..b22023a --- /dev/null +++ b/src/com/sleepycat/je/rep/NetworkRestoreConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +import com.sleepycat.util.ConfigBeanInfoBase; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class NetworkRestoreConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(NetworkRestoreConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(NetworkRestoreConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.java b/src/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.java new file mode 100644 index 0000000..0471ad0 --- /dev/null +++ b/src/com/sleepycat/je/rep/NoConsistencyRequiredPolicy.java @@ -0,0 +1,104 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * A consistency policy that lets a transaction on a replica using this policy + * proceed regardless of the state of the Replica relative to the Master. It + * can also be used to access a database when a replication node is in a + * DETACHED state. + *

        + * Consistency policies are specified at either a per-transaction level through + * {@link com.sleepycat.je.TransactionConfig#setConsistencyPolicy} or as an + * replication node wide default through {@link + * com.sleepycat.je.rep.ReplicationConfig#setConsistencyPolicy} + * + * @see Managing Consistency + */ +public class NoConsistencyRequiredPolicy implements ReplicaConsistencyPolicy { + + /** + * The name:{@value} associated with this policy. The name can be used when + * constructing policy property values for use in je.properties files. + */ + public static final String NAME = "NoConsistencyRequiredPolicy"; + + /** + * Convenience instance. + */ + public final static NoConsistencyRequiredPolicy NO_CONSISTENCY = + new NoConsistencyRequiredPolicy(); + + /** + * Create a NoConsistencyRequiredPolicy. + */ + public NoConsistencyRequiredPolicy() { + } + + /** + * Returns the name:{@value #NAME}, associated with this policy. + * @see #NAME + */ + @Override + public String getName() { + return NAME; + } + + @Override + @SuppressWarnings("unused") + public void ensureConsistency(EnvironmentImpl repInstance) { + /* Nothing to check. */ + return; + } + + /** + */ + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result; + return result; + } + + /** + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof NoConsistencyRequiredPolicy)) { + return false; + } + return true; + } + + /** + * Always returns 0, no timeout is needed for this policy. + */ + @Override + public long getTimeout(@SuppressWarnings("unused") TimeUnit unit) { + return 0; + } +} diff --git a/src/com/sleepycat/je/rep/NodeState.java b/src/com/sleepycat/je/rep/NodeState.java new file mode 100644 index 0000000..30d9637 --- /dev/null +++ b/src/com/sleepycat/je/rep/NodeState.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; + +/** + * The current state of a replication node and the application this node is + * running in. + *

        + * This includes the following information: + *

        + *

          + *
        • the replication {@link ReplicatedEnvironment.State state} of this + * node
        • + *
        • the name of the current master, as known by this node
        • + *
        • the time when this node joined the replication group
        • + *
        • the latest transaction end (abort or commit) VLSN on this node
        • + *
        • the transaction end (abort or commit) VLSN on the master known by this + * node. The difference between transaction end VLSNs on the master versus on + * this node gives an indication of how current this node's data is. The gap + * in VLSN values indicates the number of replication records that must be + * processed by this node, to be caught up to the master.
        • + *
        • the number of feeders running on this node
        • + *
        • the system load average for the last minute
        • + *
        • + * The appState field is a byte array meant to hold information generated by + * the JE HA application, as provided by a registered {@link com.sleepycat.je.rep.AppStateMonitor}. + * Users are responsible for serializing and deserializing information for this + * field.
        • + *
        + * @since 5.0 + */ +public class NodeState { + + /* The name of the node requested. */ + private final String nodeName; + + /* The name of the group which this node joins. */ + private final String groupName; + + /* The current state of the node. */ + private final State currentState; + + /* The name of the current master in the group. */ + private final String masterName; + + /* The JEVersion this node runs on. */ + private final JEVersion jeVersion; + + /* The time when this node last joined the group. */ + private final long joinTime; + + /* The current transaction end VLSN on this node. */ + private final long currentTxnEndVLSN; + + /* The master transaction end VLSN known by this node. */ + private final long masterTxnEndVLSN; + + /* The number of active feeders that running on this node. */ + private final int activeFeeders; + + /* The current log version of this node. */ + private final int logVersion; + + /* The current application state. */ + private final byte[] appState; + + /* The system load average for the last minute. */ + private final double systemLoad; + + /** + * @hidden + * Internal use only. + */ + public NodeState(String nodeName, + String groupName, + State currentState, + String masterName, + JEVersion jeVersion, + long joinTime, + long currentTxnEndVLSN, + long masterTxnEndVLSN, + int activeFeeders, + int logVersion, + byte[] appState, + double systemLoad) { + this.nodeName = nodeName; + this.groupName = groupName; + this.currentState = currentState; + this.masterName = masterName; + this.jeVersion = jeVersion; + this.joinTime = joinTime; + this.currentTxnEndVLSN = currentTxnEndVLSN; + this.masterTxnEndVLSN = masterTxnEndVLSN; + this.activeFeeders = activeFeeders; + this.logVersion = logVersion; + this.appState = appState; + this.systemLoad = systemLoad; + } + + /** + * Returns the name of the node whose state is requested. + * + * @return the name of the node. + */ + public String getNodeName() { + return nodeName; + } + + /** + * Returns the name of the group which the node joins. + * + * @return name of the group which the node joins + */ + public String getGroupName() { + return groupName; + } + + /** + * Returns the replication {@link ReplicatedEnvironment.State state} of + * this node. + * + * @return the replication state of this node. + */ + public State getNodeState() { + return currentState; + } + + /** + * Returns the name of the current + * {@link State#MASTER master} known by this node. + * + * @return the name of the current master + */ + public String getMasterName() { + return masterName; + } + + /** + * Returns the current JEVersion that this node runs on. + * + * @return the current JEVersion used by this node. + */ + public JEVersion getJEVersion() { + return jeVersion; + } + + /** + * Returns the time when this node joins the replication group. + * + * @return the time when this node joins the group + */ + public long getJoinTime() { + return joinTime; + } + + /** + * Returns the latest transaction end VLSN on this replication node. + * + * @return the commit VLSN on this node + */ + public long getCurrentTxnEndVLSN() { + return currentTxnEndVLSN; + } + + /** + * Returns the transaction end VLSN on the master known by this node. + * + * @return the known commit VLSN on master + */ + public long getKnownMasterTxnEndVLSN() { + return masterTxnEndVLSN; + } + + /** + * Returns the number of current active Feeders running on this node. + * + * @return the number of running Feeders on the node + */ + public int getActiveFeeders() { + return activeFeeders; + } + + /** + * Returns the log version of this node. + * + * @return the log version of this node. + */ + public int getLogVersion() { + return logVersion; + } + + /** + * Returns the application state which is obtained via + * {@link AppStateMonitor#getAppState}. + * + * @return the application state + */ + public byte[] getAppState() { + return appState; + } + + /** + * Returns the system load average for the last minute. + * + * @return the system average load, -1.0 if the node is running on jdk5 or + * exceptions thrown while getting this information. + */ + public double getSystemLoad() { + return systemLoad; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Current state of node: " + nodeName + + " from group: " + groupName + "\n"); + sb.append(" Current state: " + currentState + "\n"); + sb.append(" Current master: " + masterName + "\n"); + sb.append(" Current JE version: " + jeVersion + "\n"); + sb.append(" Current log version: " + logVersion + "\n"); + sb.append(" Current transaction end (abort or commit) VLSN: " + + currentTxnEndVLSN + "\n"); + sb.append(" Current master transaction end (abort or commit) VLSN: " + + masterTxnEndVLSN + "\n"); + sb.append(" Current active feeders on node: " + activeFeeders + "\n"); + sb.append(" Current system load average: " + systemLoad + "\n"); + + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/NodeType.java b/src/com/sleepycat/je/rep/NodeType.java new file mode 100644 index 0000000..2ceb4ad --- /dev/null +++ b/src/com/sleepycat/je/rep/NodeType.java @@ -0,0 +1,188 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +/** + * The different types of nodes that can be in a replication group. + */ +public enum NodeType { + + /** + * A node that passively listens for the results of elections, but does not + * participate in them. It does not have a replicated environment + * associated with it. + * @see com.sleepycat.je.rep.monitor.Monitor + */ + MONITOR { + @Override + public boolean isMonitor() { + return true; + } + }, + + /** + * A full fledged member of the replication group with an associated + * replicated environment that can serve as both a Master and a Replica. + */ + ELECTABLE { + @Override + public boolean isElectable() { + return true; + } + @Override + public boolean isDataNode() { + return true; + } + }, + + /** + * A member of the replication group with an associated replicated + * environment that serves as a Replica but does not participate in + * elections or durability decisions. Secondary nodes are only remembered + * by the group while they maintain contact with the Master. + * + *

        You can use SECONDARY nodes to: + *

          + *
        • Provide a copy of the data available at a distant location + *
        • Maintain an extra copy of the data to increase redundancy + *
        • Change the number of replicas to adjust to dynamically changing read + * loads + *
        + * + * @since 6.0 + */ + SECONDARY { + @Override + public boolean isSecondary() { + return true; + } + @Override + public boolean isDataNode() { + return true; + } + @Override + public boolean hasTransientId() { + return true; + } + }, + + ARBITER { + @Override + public boolean isArbiter() { + return true; + } + @Override + public boolean isElectable() { + return true; + } + }, + + /** + * @hidden + * For internal use only. + * + * A node that receives replication data, but does not participate in + * elections or durability decisions, and is not considered a data node + * and cannot be depended on to maintain a copy of the data. + * + * @since 7.2 + */ + EXTERNAL { + @Override + public boolean isExternal() { + return true; + } + @Override + public boolean hasTransientId() { + return true; + } + }; + + /** + * Returns whether this is the {@link #MONITOR} type. + * + * @return whether this is {@code MONITOR} + * @since 6.0 + */ + public boolean isMonitor() { + return false; + } + + /** + * Returns whether this is the {@link #ELECTABLE} type. + * + * @return whether this is {@code ELECTABLE} + * @since 6.0 + */ + public boolean isElectable() { + return false; + } + + /** + * Returns whether this is the {@link #SECONDARY} type. + * + * @return whether this is {@code SECONDARY} + * @since 6.0 + */ + public boolean isSecondary() { + return false; + } + + /** + * Returns whether this type represents a data node, either {@link + * #ELECTABLE} or {@link #SECONDARY}. + * + * @return whether this represents a data node + * @since 6.0 + */ + public boolean isDataNode() { + return false; + } + + /** + * Returns whether this is the {@link #ARBITER} type. + * + * @return whether this is {@code ARBITER} + * @since 6.0 + */ + public boolean isArbiter() { + return false; + } + + /** + * @hidden + * For internal use only. + * + * Returns whether this is the {@link #EXTERNAL} type. + * + * @return whether this is {@code EXTERNAL} + * @since 7.2 + */ + public boolean isExternal() { + return false; + } + + /** + * @hidden + * For internal use only + * + * Returns whether this node has a transient node ID. New transient node + * IDs are assigned each time the node connects to the feeder. + * + * @return whether this node has a transient node ID + * @since 7.2 + */ + public boolean hasTransientId() { + return false; + } +} diff --git a/src/com/sleepycat/je/rep/QuorumPolicy.java b/src/com/sleepycat/je/rep/QuorumPolicy.java new file mode 100644 index 0000000..5ad8a9a --- /dev/null +++ b/src/com/sleepycat/je/rep/QuorumPolicy.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * The quorum policy determine the number of nodes that must participate to + * pick the winner of an election, and therefore the master of the group. + * The default quorum policy during the lifetime of the group is + * QuorumPolicy.SIMPLE_MAJORITY. The only time that the application needs to + * specify a specific quorum policy is at node startup time, by passing one + * to the {@link ReplicatedEnvironment} constructor. + * + *

        Note that {@link NodeType#SECONDARY} nodes are not counted as part of + * master election quorums. + */ +public enum QuorumPolicy { + + /** + * All participants are required to vote. + */ + ALL, + + /** + * A simple majority of participants is required to vote. + */ + SIMPLE_MAJORITY; + + /** + * Returns the minimum number of nodes to needed meet the quorum policy. + * + * @param groupSize the number of election participants in the replication + * group + * + * @return the number of nodes that are needed for a quorum for a group + * with {@code groupSize} number of election participants + */ + public int quorumSize(int groupSize) { + switch (this) { + case ALL: + return groupSize; + + case SIMPLE_MAJORITY: + return (groupSize / 2 + 1); + + default: + throw EnvironmentFailureException.unexpectedState + ("Unknown quorum:" + this); + } + } +} diff --git a/src/com/sleepycat/je/rep/RepInternal.java b/src/com/sleepycat/je/rep/RepInternal.java new file mode 100644 index 0000000..ff297da --- /dev/null +++ b/src/com/sleepycat/je/rep/RepInternal.java @@ -0,0 +1,139 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.util.Properties; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; + +/** + * @hidden + * For internal use only. It serves to shelter methods that must be public to + * be used by other BDBJE packages but that are not part of the public api + * available to applications. + */ +public class RepInternal { + + /** + * Proxy to ReplicatedEnvironment.getMaybeNullRepImpl. + * + * This method does not check whether the env is valid. + * + * WARNING: This method will be phased out over time and normally + * getNonNullRepImpl should be called instead. + * + * @return the non-null underlying RepImpl, or null if the env has been + * closed. + */ + public static RepImpl getRepImpl(ReplicatedEnvironment rep) { + return rep.getMaybeNullRepImpl(); + } + + /** + * Proxy to ReplicatedEnvironment.getNonNullEnvImpl + * + * This method is called to access the underlying RepImpl when an env is + * expected to be open, to guard against NPE when the env has been closed. + * + * This method does not check whether the env is valid. + * + * @return the non-null, underlying RepImpl. + * + * @throws IllegalStateException if the env has been closed. + */ + public static RepImpl getNonNullRepImpl(ReplicatedEnvironment rep) { + return rep.getNonNullRepImpl(); + } + + public static RepGroupImpl getRepGroupImpl(ReplicationGroup group) { + return group.getRepGroupImpl(); + } + + public static ReplicationConfig + makeReplicationConfig(Properties props, boolean validateParams) + throws IllegalArgumentException { + + return new ReplicationConfig(props, validateParams); + } + + public static int getNodeId(ReplicatedEnvironment rep) { + return getNonNullRepImpl(rep).getNodeId(); + } + + /* + * Create an environment handle but do not join the group as part of the + * creation of this handle. This operation is only really meaningful in + * the absence of existing handles that had already been used to join the + * group. + */ + public static ReplicatedEnvironment + createDetachedEnv(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) + throws DatabaseException { + + return new ReplicatedEnvironment(envHome, repConfig, envConfig, + null, + QuorumPolicy.SIMPLE_MAJORITY, + false, + null); + } + + /* + * Create an environment handle but do not join the group as part of the + * creation of this handle. + */ + public static ReplicatedEnvironment + createInternalEnvHandle(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) + throws DatabaseException { + + return new ReplicatedEnvironment(envHome, repConfig, envConfig, + null, + null, + false, + null); + } + + /** + * Proxy to ReplicationMutableConfig.validateParams. + */ + public static void disableParameterValidation + (ReplicationMutableConfig config) { + config.setOverrideValidateParams(false); + } + + public static + ReplicatedEnvironmentStats makeReplicatedEnvironmentStats + (RepImpl repImpl, StatsConfig config) { + + return new ReplicatedEnvironmentStats(repImpl, config); + } + + + public static void setAllowConvert(final ReplicationConfig repConfig, + final boolean allowConvert) { + repConfig.setAllowConvert(allowConvert); + } + + public static boolean getAllowConvert(final ReplicationConfig repConfig) { + return repConfig.getAllowConvert(); + } +} diff --git a/src/com/sleepycat/je/rep/RepStatManager.java b/src/com/sleepycat/je/rep/RepStatManager.java new file mode 100644 index 0000000..026451b --- /dev/null +++ b/src/com/sleepycat/je/rep/RepStatManager.java @@ -0,0 +1,105 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.Map; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.utilint.StatCaptureRepDefinitions; +import com.sleepycat.je.statcap.StatManager; +import com.sleepycat.je.utilint.StatGroup; + +/** + * @hidden + * For internal use only. + */ +public class RepStatManager extends StatManager { + + private final UpdateMinMax updateRepMinMaxStat = + new UpdateMinMax(StatCaptureRepDefinitions.minStats, + StatCaptureRepDefinitions.maxStats); + + public RepStatManager(RepImpl env) { + super(env); + } + + public synchronized ReplicatedEnvironmentStats getRepStats( + StatsConfig config, + Integer contextKey) { + + StatContext sc = statContextMap.get(contextKey); + if (sc == null) { + throw EnvironmentFailureException.unexpectedState( + "Internal error stat context is not registered"); + } + ReplicatedEnvironmentStats rstat = + ((RepImpl)env).getStatsInternal(config); + if (rstat == null) { + return null; + } + Map cur = rstat.getStatGroupsMap(); + Map base = sc.getRepBase(); + + ReplicatedEnvironmentStats intervalStats; + if (base != null) { + intervalStats = computeRepIntervalStats(cur, base); + } else { + intervalStats = rstat; + } + + if (config.getClear()) { + + for (StatContext context : statContextMap.values()) { + if (context.getRepBase() != null) { + updateRepMinMaxStat.updateBase(context.getRepBase(), cur); + } + } + + for (StatContext context : statContextMap.values()) { + if (context == sc) { + context.setRepBase(null); + } else { + if (context.getRepBase() == null) { + context.setRepBase(cloneAndNegate(cur)); + } else { + // reset base + context.setRepBase( + computeRepIntervalStats( + context.getRepBase(),cur).getStatGroupsMap()); + } + } + } + } + + return intervalStats; + } + + private ReplicatedEnvironmentStats computeRepIntervalStats( + Mapcurrent, + Map base) { + + ReplicatedEnvironmentStats envStats = new ReplicatedEnvironmentStats(); + for (StatGroup cg : current.values()) { + if (base != null) { + StatGroup bg = base.get(cg.getName()); + envStats.setStatGroup(cg.computeInterval(bg)); + } else { + envStats.setStatGroup(cg.cloneGroup(false)); + } + } + return envStats; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicaConsistencyException.java b/src/com/sleepycat/je/rep/ReplicaConsistencyException.java new file mode 100644 index 0000000..f6e920e --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicaConsistencyException.java @@ -0,0 +1,119 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.ReplicaConsistencyPolicy; + +/** + * This exception is thrown by a Replica to indicate it could not meet the + * consistency requirements as defined by the + * ReplicaConsistencyPolicy in effect for the transaction, within + * the allowed timeout period. + *

        + * A Replica will typically keep current with its Master. However, network + * problems, excessive load on the Master, or Replica, may prevent the Replica + * from keeping up and the Replica may fall further behind than is permitted by + * its consistency policy. If the Replica cannot catch up in the time defined + * by its ReplicaConsistencyPolicy, it will throw this exception + * from the {@link com.sleepycat.je.Environment#beginTransaction + * Environment.beginTransaction} method, thus preventing the transaction from + * accessing data that does not meet its consistency requirements. + *

        + * If this exception is encountered frequently, it indicates that the + * consistency policy requirements are too strict and cannot be met routinely + * given the load being placed on the system and the hardware resources that + * are available to service the load. The exception may also indicate that + * there is a network related issue that is preventing the Replica from + * communicating with the master and keeping up with the replication stream. + *

        + * The application can choose to retry the transaction, until the underlying + * system problem has been resolved. Or it can try relaxing the consistency + * constraints, or choose the {@link NoConsistencyRequiredPolicy} so that the + * constraints can be satisfied more easily. + * For example, in a {@link two node + * replication group}, if the primary goes down, the application may want + * the secondary node to continue to service read requests, and will lower the + * consistency requirement on that node in order to maintain read availability. + * + * @see ReplicaConsistencyPolicy + * @see Managing Consistency + */ +public class ReplicaConsistencyException extends OperationFailureException { + private static final long serialVersionUID = 1; + + final ReplicaConsistencyPolicy consistencyPolicy; + + /** + * @hidden + * For internal use only. + */ + public ReplicaConsistencyException(ReplicaConsistencyPolicy + consistencyPolicy, + String rnName, + boolean unknownMaster) { + /* No need to set abortOnly, beginTransaction will fail. */ + super(null /*locker*/, false /*abortOnly*/, + "Unable to achieve consistency at rep node:" + rnName + + ", despite waiting for " + + consistencyPolicy.getTimeout(TimeUnit.MILLISECONDS) + " ms." + + (unknownMaster ? + " The node is not currently in contact with a master." : + ""), + null /*cause*/); + this.consistencyPolicy = consistencyPolicy; + } + + public ReplicaConsistencyException(String message, + ReplicaConsistencyPolicy + consistencyPolicy) { + /* No need to set abortOnly, beginTransaction will fail. */ + super(null /*locker*/, false /*abortOnly*/, + message, + null /*cause*/); + this.consistencyPolicy = consistencyPolicy; + } + + /** + * For internal use only. + * @hidden + */ + private ReplicaConsistencyException(String message, + ReplicaConsistencyException cause) { + super(message, cause); + this.consistencyPolicy = cause.consistencyPolicy; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new ReplicaConsistencyException(msg, this); + } + + /** + * Returns the Replica consistency policy that could not be satisfied. + * + * @return the Replica consistency policy + */ + public ReplicaConsistencyPolicy getConsistencyPolicy() { + return consistencyPolicy; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicaStateException.java b/src/com/sleepycat/je/rep/ReplicaStateException.java new file mode 100644 index 0000000..7580955 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicaStateException.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + + +/** + * This exception indicates that the application attempted an operation that is + * not permitted when it is in the {@link ReplicatedEnvironment.State#REPLICA} + * state. + */ +public class ReplicaStateException extends StateChangeException { + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public ReplicaStateException(String message) { + super(message, null); + } + + private ReplicaStateException(String message, + ReplicaStateException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public ReplicaStateException wrapSelf(String msg) { + return new ReplicaStateException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicaWriteException.java b/src/com/sleepycat/je/rep/ReplicaWriteException.java new file mode 100644 index 0000000..e640b9a --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicaWriteException.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.txn.Locker; + +/** + * This exception indicates that an update operation or transaction commit + * or abort was attempted while in the + * {@link ReplicatedEnvironment.State#REPLICA} state. The transaction is marked + * as being invalid. + *

        + * The exception is the result of either an error in the application logic or + * the result of a transition of the node from Master to Replica while a + * transaction was in progress. + *

        + * The application must abort the current transaction and redirect all + * subsequent update operations to the Master. + */ +public class ReplicaWriteException extends StateChangeException { + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * @hidden + */ + public ReplicaWriteException(Locker locker, + StateChangeEvent stateChangeEvent) { + super(locker, stateChangeEvent); + } + + private ReplicaWriteException(String message, + ReplicaWriteException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new ReplicaWriteException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicatedEnvironment.java b/src/com/sleepycat/je/rep/ReplicatedEnvironment.java new file mode 100644 index 0000000..e2f5807 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicatedEnvironment.java @@ -0,0 +1,1350 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.io.PrintStream; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.VersionMismatchException; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.RepConfigProxy; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.DatabaseUtil; + +/** + * A replicated database environment that is a node in a replication + * group. Please read the {@link Berkeley DB JE High + * Availability Overview} for an introduction to basic concepts and key + * terminology. + *

        + * Berkeley DB JE High Availability (JE HA) is a replicated, embedded database + * management system which provides fast, reliable, and scalable data + * management. JE HA enables replication of an environment across a Replication + * Group. A ReplicatedEnvironment is a single node in the replication group. + *

        + * ReplicatedEnvironment extends {@link Environment}. All database operations + * are executed in the same fashion in both replicated and non replicated + * applications, using {@link Environment} methods. A ReplicatedEnvironment + * must be transactional. All replicated databases created in the replicated + * environment must be transactional as well. However, non-replicated databases may be used as well. + *

        + * ReplicatedEnvironment handles are analogous to {@link Environment} + * handles. A replicated environment handle is a ReplicatedEnvironment + * instance; multiple ReplicatedEnvironment instances may be created for the + * same physical directory. In other words, more than one ReplicatedEnvironment + * handle may be open at a time for a given environment. + *

        + *

        + * A ReplicatedEnvironment joins its replication group when it is instantiated. + * When the constructor returns, the node will have established contact with + * the other members of the group and will be ready to service operations. The + * {@link life + * cycle} overview is useful for understanding replication group creation. + *

        + * The membership of a replication group is dynamically defined. The group + * comes into being when ReplicatedEnvironments that are configured as members + * of a group are created and discover each other. ReplicatedEnvironments are + * identified by a group name, a node name, and a hostname:port + * value. Membership information for electable and monitor nodes is stored in + * an internal, replicated database available to electable and secondary nodes. + *

        + * To start a node and join a group, instantiate a ReplicatedEnvironment. The + * very first instantiation of a node differs slightly from all future + * instantiations. A brand new, empty node does not yet have access to the + * membership database, so it must discover the group with the aid of a + * helper node, which is a fellow member. If this is the very first node of the + * entire group, there is no available helper. Instead, the helper host address + * to use is the node's own address. The example below takes the simple + * approach of creating a replication group by starting up a node that will act + * as the first master, though it is not necessary to follow this order. + * {@link + * Configuring Replicated Environments} describes group startup in greater + * detail. + *

        + * To create the master node in a brand new group, instantiate a + * ReplicatedEnvironment this way: + *

        + * EnvironmentConfig envConfig = new EnvironmentConfig();
        + * envConfig.setAllowCreate(true);
        + * envConfig.setTransactional(true);
        + *
        + * // Identify the node
        + * ReplicationConfig repConfig = new ReplicationConfig();
        + * repConfig.setGroupName("PlanetaryRepGroup");
        + * repConfig.setNodeName("Mercury");
        + * repConfig.setNodeHostPort("mercury.acme.com:5001");
        + *
        + * // This is the first node, so its helper is itself
        + * repConfig.setHelperHosts("mercury.acme.com:5001");
        + *
        + * ReplicatedEnvironment repEnv =
        + *     new ReplicatedEnvironment(envHome, repConfig, envConfig);
        + * 
        + *

        + * To create a new node when there are other existing group members, + * set a helper address which points to an existing node in the group. A simple + * way to bring up a new group is to "chain" the new nodes by having the + * helpers reference a previously created node. + *

        + * EnvironmentConfig envConfig = new EnvironmentConfig();
        + * envConfig.setAllowCreate(true);
        + * envConfig.setTransactional(true);
        + *
        + * // Identify the node
        + * ReplicationConfig repConfig =
        + *     new ReplicationConfig("PlanetaryRepGroup",
        + *                           "Jupiter",
        + *                           "jupiter.acme.com:5002");
        + *
        + * // Use the node at mercury.acme.com:5001 as a helper to find the rest
        + * // of the group.
        + * repConfig.setHelperHosts("mercury.acme.com:5001");
        + *
        + * ReplicatedEnvironment repEnv =
        + *     new ReplicatedEnvironment(envHome, repConfig, envConfig);
        + * 
        + *

        + * In these examples, node Mercury was configured as its own helper, and + * becomes the first master. The next nodes were configured to use Mercury as + * their helper, and became replicas. It is also possible to start these in + * reverse order, bringing mercury up last. In that case, the earlier nodes + * will block until a helper is awake and can service their requests for group + * metadata. + *

        + * Creating a ReplicatedEnvironment for an existing environment requires + * less configuration. The call + * to {@code EnvironmentConfig.setAllowCreate()} is eliminated to guard + * against the unintentional creation of a new environment. Also, there is no + * need to set a helper host address, because the environment exists and has + * access to the shared, persistent membership information. + *

        + * EnvironmentConfig envConfig = new EnvironmentConfig();
        + * envConfig.setTransactional(true);
        + * ReplicationConfig repConfig =
        + *     new ReplicationConfig("PlanetaryRepGroup",
        + *                           "Mercury",
        + *                           "mercury.acme.com:5001");
        + *
        + * ReplicatedEnvironment repEnv =
        + *     new ReplicatedEnvironment(envHome, repConfig, envConfig);
        + * 
        + *

        + * {@literal See} {@link com.sleepycat.je.rep.util.ReplicationGroupAdmin + * ReplicationGroupAdmin} for information on how to remove nodes from the + * replication group. + * + *

        + * ReplicatedEnvironment properties can be set via the the {@literal + * /}je.properties file, just like {@link Environment} + * properties. They follow the same property value precedence rules. + * + *

        + * A replicated environment directory can only be accessed by a read write + * ReplicatedEnvironment handle or a read only {@link Environment} handle. In + * the current release, there is an additional restriction that a read only + * {@link Environment} is only permitted when the directory is not also + * accessed from a different process by a read/write ReplicatedEnvironment. If + * a read/write ReplicatedEnvironment and a read only {@link Environment} from + * two different processes concurrently access an environment directory, there + * is the small possibility that the read only {@link Environment} may see + * see exceptions thrown about an inconsistent log if the ReplicatedEnvironment + * executes certain kinds of failover. There is no problem if the {@link + * Environment} and ReplicatedEnvironment are in the same process, or are not + * concurrent. + *

        + * JE HA prohibits opening a replicated environment directory with a read/write + * {@link Environment} handle, because from the group's perspective, + * unreplicated updates to a single node would cause data inconsistency. To + * use an existing, non-replicated environment to bootstrap a replication + * group, use {@link com.sleepycat.je.rep.util.DbEnableReplication} to do a one + * time conversion of the directory. + *

        + * All other database objects, such as {@link com.sleepycat.je.Database} or + * {@link com.sleepycat.je.Cursor} (when using the Base API) or {@link + * com.sleepycat.persist.EntityStore} or {@link + * com.sleepycat.persist.PrimaryIndex} (when using the Direct Persistence + * Layer) should be created, used and closed before calling {@link + * ReplicatedEnvironment#close}. + * + *

        Replicated environments can be created with node type {@link + * NodeType#ELECTABLE} or {@link NodeType#SECONDARY}. ELECTABLE nodes can be + * masters or replicas, and participate in both master elections and commit + * durability decisions. + * + *

        SECONDARY nodes can only be replicas, not masters, and do not participate + * in either elections or durability decisions. SECONDARY nodes can be used to + * increase the available number of read replicas without changing the election + * or durability quorum of the group, and without requiring communication with + * the secondaries during master elections or transaction commits. As a result, + * SECONDARY nodes are a good choice for nodes that are connected to the other + * nodes in the group by high latency network connections, for example over + * long distance networks. SECONDARY nodes maintain replication streams with + * the replication group master to update the data contents of their + * environment. + * + *

        You can use SECONDARY nodes to: + *

          + *
        • Provide a copy of the data available at a distant location + *
        • Maintain an extra copy of the data to increase redundancy + *
        • Change the number of replicas to adjust to dynamically changing read + * loads + *
        + * + *

        Membership information for SECONDARY nodes is not stored persistently, so + * their membership is only known to the master, and only while the nodes + * remain connected to the master. Because a SECONDARY node cannot become a + * master, it will not act as master even if it is the first node created for + * the group. + * + *

        Non-replicated Databases in a Replicated + * Environment

        + * + * A database or entity store in a replicated environment is replicated by + * default, but may be explicitly configured as non-replicated using + * {@link com.sleepycat.je.DatabaseConfig#setReplicated} or + * {@link com.sleepycat.persist.StoreConfig#setReplicated}. Such + * non-replicated databases may be transactional or non-transactional + * (including deferred-write and temporary). The special considerations for + * using non-replicated databases in a replicated environment are described + * below. + *

        + * The data in a non-replicated database is not guaranteed to be persistent, + * for two reasons. + *

          + *
        • + * When a hard recovery occurs as part of an election, some data at the end of + * the transaction log may be lost. For a replicated database this data is + * automatically recovered from other members of the group, but for a + * non-replicated database it is not. + *
        • + *
        • + * When a node's contents are replaced via network restore or by otherwise + * copying the transaction log from another node, all previously existing + * non-replicated databases on that node are destroyed, and the non-replicated + * databases from the source node are copied along with the replicated + * data. The non-replicated databases copied from the source node will be in + * whatever state they were in at the time of the copy. + *
        • + *
        + *

        + * Therefore, non-replicated databases are intended to be used primarily for + * persistent caching and other non-critical local storage. The application + * is responsible for maintaining the state of the database and handling data + * loss after one the events described above. + *

        + * To perform write operations on a non-replicated database, special + * considerations are necessary for user-supplied transactions. Namely, the + * transaction must be configured for + * {@link com.sleepycat.je.TransactionConfig#setLocalWrite(boolean) + * local-write}. A given transaction may be used to write to either replicated + * databases or non-replicated databases, but not both. + *

        + * For auto-commit transactions (when the Transaction parameter is null), the + * local-write setting is automatically set to correspond to whether the + * database is replicated. With auto-commit, local-write is always true for a + * non-replicated database, and always false for a replicated database. + *

        + * A local-write transaction automatically uses + * {@link com.sleepycat.je.Durability.ReplicaAckPolicy#NONE}. + * A local-write transaction on a Master will thus not be held up, or + * throw {@link com.sleepycat.je.rep.InsufficientReplicasException}, if the + * Master is not in contact with a sufficient number of Replicas at the + * time the transaction is initiated. + *

        + * For read operations, a single transaction may be used to read any + * combination of replicated and non-replicated databases. If only read + * operations are performed, it is normally desirable to configure a user + * supplied transaction as + * {@link com.sleepycat.je.TransactionConfig#setReadOnly(boolean) read-only}. + * Like a local-write transaction, a read-only transaction automatically uses + * {@link com.sleepycat.je.Durability.ReplicaAckPolicy#NONE}. + *

        + * For user-supplied transactions, note that even when accessing only + * non-replicated databases, group consistency checks are performed by + * default. In this case it is normally desirable to disable consistency + * checks by calling + * {@link com.sleepycat.je.TransactionConfig#setConsistencyPolicy} with + * {@link NoConsistencyRequiredPolicy#NO_CONSISTENCY}. This allows the + * non-replicated databases to be accessed regardless of the state of the other + * members of the group and the network connections to them. When auto-commit + * is used (when the Transaction parameter is null) with a non-replicated + * database, consistency checks are automatically disabled. + * + * @see Environment + * @see Replication First Steps + * @since 4.0 + */ +public class ReplicatedEnvironment extends Environment { + + /* + * The canonical RepImpl associated with the environment directory, + * accessed by different handles. + * + * The repEnvironmentImpl field is set to null during close to avoid OOME. + * It should normally only be accessed via the checkOpen (which calls + * Environment.checkOpen) and getNonNullRepImpl methods. During close, while + * synchronized, it is safe to access it directly. + */ + private volatile RepImpl repEnvironmentImpl; + + /* The unique name and id associated with the node. */ + private final NameIdPair nameIdPair; + + /* + * The replication configuration that has been used to create this + * handle. This is derived from the original configuration argument, after + * cloning a copy to keep it distinct from the user's instance, applying + * je.properties settings, and validating against the underlying node. + */ + private ReplicationConfig handleRepConfig; + + /** + * Creates a replicated environment handle and starts participating in the + * replication group as either a Master or a Replica. The node's state is + * determined when it joins the group, and mastership is not preconfigured. + * If the group has no current master and the node has the default node + * type of {@link NodeType#ELECTABLE}, then creation of a handle will + * trigger an election to determine whether this node will participate as a + * Master or a Replica. + *

        + * If the node participates as a Master, the constructor will return after + * a sufficient number of Replicas, in accordance with the + * {@code initialElectionPolicy} argument, have established contact with + * the Master. + *

        + * If the node participates as a Replica, it will become consistent in + * accordance with the {@code consistencyPolicy} argument before returning + * from the constructor. + *

        + * If an election cannot be concluded in the time period defined by {@link + * ReplicationConfig#ENV_SETUP_TIMEOUT}, by default it will throw an {@code + * UnknownMasterException}. This behavior can be overridden via the {@link + * ReplicationConfig#ENV_UNKNOWN_STATE_TIMEOUT} to permit the creation of + * the handle in the {@link State#UNKNOWN} state. A handle in UNKNOWN state + * can be used to service read operations with an appropriately relaxed + * consistency policy. Note that these timeouts do not apply when opening + * an environment for the very first time. In the first time case, if the + * node is not the only group member, or if it is a SECONDARY node, the + * constructor will wait indefinitely until it can contact an existing + * group member. + *

        + * A brand new node will always join an existing group as a Replica, unless + * it is the very first electable node that is creating the group. In that + * case it joins as the Master of the newly formed singleton group. A brand + * new node must always specify one or more active helper nodes via the + * {@link ReplicationConfig#setHelperHosts(String)} method, or via the + * <environment home>/je.properties file. If this is the + * very first member of a nascent group, it must specify just itself as the + * helper. + *

        + * There are special considerations to keep in mind when a replication + * group is started and elections are first held to determine a master. The + * default {@link com.sleepycat.je.rep.QuorumPolicy#SIMPLE_MAJORITY} calls + * for a simple majority vote. If the group members were previously created + * and populated, the default election policy may result in the election of + * a master that may not have the most up to date copy of the environment. + * This could happen if the best qualified node is slow to start up; it's + * possible that by the time it's ready to participate in an election, the + * election has already have completed with a simple majority. + *

        + * To avoid this possibility, the method has a parameter + * initialElectionPolicy, which can be used to specify + * {@link com.sleepycat.je.rep.QuorumPolicy#ALL}, which will cause the + * elections to wait until all electable nodes can vote. By ensuring that + * all the nodes can vote, the best possible node is chosen to be the + * master at group startup. + *

        + * Note that it is the application's responsibility to ensure that all + * electable nodes coordinate their choice of initialElectionPolicy so that + * the very first elections held when a group is brought up use the same + * value for this parameter. This parameter is only used for the first + * election. After the first election has been held and the group is + * functioning, subsequent elections do not require participation of all + * the nodes. A simple majority is sufficient to elect the node with the + * most up to date environment as the master. + *

        + * + * @param envHome The environment's home directory. + * + * @param repConfig replication configurations. If null, the default + * replication configurations are used. + * + * @param envConfig environment configurations for this node. If null, the + * default environment configurations are used. + * + * @param consistencyPolicy the consistencyPolicy used by the Replica at + * startup to make its environment current with respect to the master. This + * differs from the consistency policy specified + * {@link ReplicationConfig#setConsistencyPolicy} because it is used only + * at construction, when the node joins the group for the first time. The + * consistency policy set in {@link ReplicationConfig} is used any time a + * policy is used after node startup, such as at transaction begins. + * + * @param initialElectionPolicy the policy to use when holding the initial + * election. + * + * @throws RestartRequiredException if some type of corrective action is + * required. The subclasses of this exception provide further details. + * + * @throws ReplicaConsistencyException if it is a Replica and cannot + * satisfy the specified consistency policy within the consistency timeout + * period + * + * @throws UnknownMasterException if the + * {@link ReplicationConfig#ENV_UNKNOWN_STATE_TIMEOUT} has a zero value and + * the node cannot join the group in the time period specified by the + * {@link ReplicationConfig#ENV_SETUP_TIMEOUT} property. The node may be + * unable to join the group because the Master could not be determined due + * to a lack of sufficient nodes as required by the election policy, or + * because a master was present but lacked a + * {@link QuorumPolicy#SIMPLE_MAJORITY} needed to update the environment + * with information about this node, if it's a new node and is joining the + * group for the first time. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws EnvironmentLockedException when an environment cannot be opened + * for write access because another process has the same environment open + * for write access. Warning: This exception should be + * handled when an environment is opened by more than one process. + * + * @throws VersionMismatchException when the existing log is not compatible + * with the version of JE that is running. This occurs when a later version + * of JE was used to create the log. Warning: This + * exception should be handled when more than one version of JE may be used + * to access an environment. + * + * @throws UnsupportedOperationException if the environment exists and has + * not been enabled for replication. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code EnvironmentConfig} parameter. + */ + public ReplicatedEnvironment(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig, + ReplicaConsistencyPolicy consistencyPolicy, + QuorumPolicy initialElectionPolicy) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + InsufficientLogException, + ReplicaConsistencyException, + IllegalArgumentException { + + this(envHome, + repConfig, + envConfig, + consistencyPolicy, + initialElectionPolicy, + true /*joinGroup*/, + null /*envImplParam*/); + } + + /** + * A convenience constructor that defaults the replica consistency policy + * and the initial election policy to be used. + * + *

        + * The default replica consistency policy results in the replica being + * consistent with the master as of the time the handle was created. + *

        + * + *

        + * The default initial election policy is + * {@link QuorumPolicy#SIMPLE_MAJORITY} + *

        + * + * @throws RestartRequiredException if some type of corrective action is + * required. The subclasses of this exception provide further details. + * + * @throws ReplicaConsistencyException if it is a Replica and and cannot be + * made consistent within the timeout specified by + * {@link ReplicationConfig#ENV_CONSISTENCY_TIMEOUT} + * + * @throws UnknownMasterException if the + * {@link ReplicationConfig#ENV_UNKNOWN_STATE_TIMEOUT} has a zero value and + * the node cannot join the group in the time period specified by the + * {@link ReplicationConfig#ENV_SETUP_TIMEOUT} property. The node may be + * unable to join the group because the Master could not be determined due + * to a lack of sufficient nodes as required by the election policy, or + * because a master was present but lacked a + * {@link QuorumPolicy#SIMPLE_MAJORITY} needed to update the environment + * with information about this node, if it's a new node and is joining the + * group for the first time. + * + * @throws EnvironmentLockedException when an environment cannot be opened + * for write access because another process has the same environment open + * for write access. Warning: This exception should be + * handled when an environment is opened by more than one process. + * + * @throws VersionMismatchException when the existing log is not compatible + * with the version of JE that is running. This occurs when a later version + * of JE was used to create the log. Warning: This + * exception should be handled when more than one version of JE may be used + * to access an environment. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws UnsupportedOperationException if the environment exists and has + * not been enabled for replication. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code EnvironmentConfig} parameter. + * + * @see #ReplicatedEnvironment(File, ReplicationConfig, EnvironmentConfig, + * ReplicaConsistencyPolicy, QuorumPolicy) + */ + public ReplicatedEnvironment(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + ReplicaConsistencyException, + InsufficientLogException, + RollbackException, + IllegalArgumentException { + + this(envHome, repConfig, envConfig, null /*consistencyPolicy*/, + QuorumPolicy.SIMPLE_MAJORITY); + } + + /* + * Joins the replication group as part of the creation of a handle. + */ + private void joinGroup(RepImpl repImpl, + ReplicaConsistencyPolicy consistencyPolicy, + QuorumPolicy initialElectionPolicy) + throws DatabaseException { + + /* Just return if we don't want to join the group. */ + if (dontJoinGroup()) { + return; + } + + State state = null; + try { + state = + repImpl.joinGroup(consistencyPolicy, initialElectionPolicy); + } finally { + if (state == null) { + + /* + * Something bad happened, close the environment down with + * minimal activity. The environment may not actually be + * invalidated, but the constructor did not succeed, so it's + * logically invalid. We don't go to the effort of invalidating + * the environment, to avoid masking the original problem. Use + * abnormalClose() because it will remove the + * environment from the environment pool. + */ + repImpl.abnormalClose(); + } + } + } + + /* Return true if this node won't join the group. */ + private boolean dontJoinGroup() { + return new Boolean(getRepConfig().getConfigParam + (RepParams.DONT_JOIN_REP_GROUP.getName())); + } + + /** + * For internal use only. + * @hidden + * + * Note that repImpl.joinGroup is a synchronized + * method, and therefore protected against multiple concurrent attempts to + * create a handle. + * + * @param envImplParam is non-null only when used by EnvironmentIml to + * create an InternalEnvironment. + */ + protected ReplicatedEnvironment(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig, + ReplicaConsistencyPolicy consistencyPolicy, + QuorumPolicy initialElectionPolicy, + boolean joinGroup, + RepImpl envImplParam) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + ReplicaConsistencyException, + IllegalArgumentException { + + super(envHome, envConfig, repConfig, envImplParam); + + repEnvironmentImpl = (RepImpl) DbInternal.getNonNullEnvImpl(this); + nameIdPair = repEnvironmentImpl.getNameIdPair(); + + /* + * Ensure that the DataChannelFactory configuration is usable + * and initialize logging state. + */ + repEnvironmentImpl.initializeChannelFactory(); + + if (joinGroup) { + + try { + joinGroup( + repEnvironmentImpl, consistencyPolicy, + initialElectionPolicy); + + } catch (RollbackException e) { + + /* + * Syncup failed, a hard recovery is needed. Throwing the + * RollbackException closed the RepImpl and the EnvironmentImpl + * Redo the creation of RepImpl and retry the join once. If the + * second joinGroup fails, let the exception throw out to the + * user. + * + * Clear references to the old envImpl/repImpl, to prevent OOME + * during recovery when we retry below. + */ + DbInternal.clearEnvImpl(this); + repEnvironmentImpl = null; + + repEnvironmentImpl = (RepImpl) makeEnvironmentImpl( + envHome, envConfig, repConfig); + + /* + * Ensure that the DataChannelFactory configuration is usable + * and initialize logging state. + */ + repEnvironmentImpl.initializeChannelFactory(); + + joinGroup( + repEnvironmentImpl, consistencyPolicy, + initialElectionPolicy); + + repEnvironmentImpl.setHardRecoveryInfo(e); + } + + /* + * Fire a JoinGroupEvent only when the ReplicatedEnvironment is + * successfully created for the first time. + */ + if (repEnvironmentImpl.getRepNode() != null) { + repEnvironmentImpl.getRepNode(). + getMonitorEventManager().notifyJoinGroup(); + } + } else { + /* For testing only */ + if (repEnvironmentImpl.getRepNode() != null) { + throw EnvironmentFailureException.unexpectedState + ("An earlier handle creation had resulted in the node" + + "joining the group"); + } + } + } + + /** + * @hidden + * For internal use only. + * + * Validate and resolve replication configuration params, and extract a + * ReplicationConfig with those params for passing into environment + * creation. Note that a copy of the ReplicationConfig argument is created + * to insulate the application from changes made by the replication + * implementation and vice versa. + */ + @Override + protected RepConfigProxy setupRepConfig(File envHome, + RepConfigProxy repConfigProxy, + EnvironmentConfig envConfig) { + + /** + * If the user specified a null object, use the default. Apply the + * je.properties file to the replication config object. + */ + ReplicationConfig repConfig = (ReplicationConfig) repConfigProxy; + ReplicationConfig baseConfig = + (repConfig == null) ? ReplicationConfig.DEFAULT : repConfig; + ReplicationConfig useConfig = baseConfig.clone(); + + if (envConfig.getReadOnly()) { + + /* + * Read-only replicated environments are not usually permitted, + * since a RN should be able to assume master identity + * at any moment. ReadOnly is only supported if the node is an + * arbiter, subscriber or network backup. + * + * TBW: the arbiter, subscriber, and a network backup all need a + * replicated environment handle that has pieces of the env + * infrastructure, like info logging, service dispatching, log file + * management. The user of XXX_USE parameters is really selecting + * those infrastructure pieces in an implicit way. It would be nice + * to have a way to specify which services they use in a more + * explicit way. To do so, we probably need to do a bit of + * refactoring of the env handle to call out those components. + */ + boolean arbUse = useConfig.getConfigParam( + RepParams.ARBITER_USE.getName()).equals("true"); + boolean subUse = useConfig.getConfigParam( + RepParams.SUBSCRIBER_USE.getName()).equals("true"); + boolean networkBackupUse = useConfig.getConfigParam( + RepParams.NETWORKBACKUP_USE.getName()).equals("true"); + + if (!arbUse && !subUse && !networkBackupUse) { + throw new IllegalArgumentException("A replicated environment " + + "may not be opened read-only"); + } + } + DbConfigManager.applyFileConfig(envHome, + useConfig.getProps(), + true); /* forReplication */ + useConfig.propagateRepNetProps(); + this.handleRepConfig = useConfig; + return handleRepConfig; + } + + /** + * Returns the unique name used to identify this replicated environment. + * @see ReplicationConfig#setNodeName + * + * @return the node name + */ + public String getNodeName() { + return nameIdPair.getName(); + } + + /** + * Returns the current state of the node associated with this replication + * environment. See {@link State} for a description of node states. + *

        + * If the caller's intent is to track the state of the node, + * {@link StateChangeListener} may be a more convenient and efficient + * approach, rather than using getState() directly. + * + * @return the current replication state associated with this node + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public State getState() + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + try { + return repImpl.getState(); + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * Returns a description of the replication group as known by this node. + * The replicated group metadata is stored in a replicated database and + * updates are propagated by the current master node to all replicas. If + * this node is not the master, it is possible for its description of the + * group to be out of date, and it will not include information about + * SECONDARY nodes. + * + * @return the group description + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public ReplicationGroup getGroup() + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + try { + return new ReplicationGroup(repImpl.getRepNode().getGroup()); + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * Close this ReplicatedEnvironment and release any resources used by the + * handle. + * + *

        + * When the last handle is closed, allocated resources are freed, and + * daemon threads are stopped, even if they are performing work. The node + * ceases participation in the replication group. If the node was currently + * the master, the rest of the group will hold an election. If a quorum of + * nodes can participate in the election, a new master will be chosen. + *

        + * The ReplicatedEnvironment should not be closed while any other type of + * handle that refers to it is not yet closed. For example, the + * ReplicatedEnvironment should not be closed while there are open Database + * instances, or while transactions in the environment have not yet + * committed or aborted. Specifically, this includes {@link + * com.sleepycat.je.Database Database}, {@link com.sleepycat.je.Cursor + * Cursor} and {@link com.sleepycat.je.Transaction Transaction} handles. + *

        + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + */ + @Override + synchronized public void close() + throws DatabaseException { + + try { + super.close(); + } catch (DatabaseException e) { + /* Add this node's address to the exception message for clarity. */ + e.addErrorMessage("Problem closing handle " + nameIdPair); + throw e; + } catch (Exception e) { + /* Add this node's address to the exception message for clarity. */ + throw new EnvironmentFailureException( + repEnvironmentImpl, + EnvironmentFailureReason.UNEXPECTED_EXCEPTION, + "Problem closing handle " + nameIdPair, e); + } finally { + repEnvironmentImpl = null; + } + } + + /** + * Sets the listener used to receive asynchronous replication node state + * change events. Note that there is one listener per replication node, not + * one per handle. Invoking this method replaces the previous Listener. + * + * Invoking this method typically results in an immediate callback to the + * application via the {@link StateChangeListener#stateChange} method, so + * that the application is made aware of the existing state of the + * node at the time StateChangeListener is first established. + * + * @param listener the state change listener. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public void setStateChangeListener(StateChangeListener listener) + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + try { + repImpl.setChangeListener(listener); + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * Returns the listener used to receive asynchronous replication node state + * change events. A StateChangeListener provides the replication + * application with an asynchronous mechanism for tracking the {@link + * ReplicatedEnvironment.State State} of the replicated environment. + *

        + * Note that there is one listener per replication node, not one per + * ReplicatedEnvironment handle. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public StateChangeListener getStateChangeListener() + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + try { + return repImpl.getChangeListener(); + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public void setRepMutableConfig(ReplicationMutableConfig mutableConfig) + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + DatabaseUtil.checkForNullParam(mutableConfig, "mutableConfig"); + + try { + repImpl.setRepMutableConfig(mutableConfig); + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public ReplicationMutableConfig getRepMutableConfig() + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + try { + final ReplicationMutableConfig config = + repImpl.cloneRepMutableConfig(); + config.fillInEnvironmentGeneratedProps(repImpl); + return config; + } catch (Error E) { + repImpl.invalidate(E); + throw E; + } + } + + /** + * Return the replication configuration that has been used to create this + * handle. This is derived from the original configuration argument, after + * cloning a copy to keep it distinct from the user's instance, applying + * je.properties settings, and validating against the underlying + * node. + * + * @return this handle's configuration. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public ReplicationConfig getRepConfig() + throws DatabaseException { + + checkOpen(); + + return handleRepConfig; + } + + /** + * Returns statistics associated with this environment. See {@link + * ReplicatedEnvironmentStats} for the kind of information available. + * + * @param config is used to specify attributes such as whether the stats + * should be cleared, whether the complete set of stats should be obtained, + * etc. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public ReplicatedEnvironmentStats getRepStats(StatsConfig config) + throws DatabaseException { + + final RepImpl repImpl = checkOpen(); + + if (config == null) { + config = StatsConfig.DEFAULT; + } + + return repImpl.getStats(config); + } + + /* + * Returns the non-null, underlying RepImpl. For internal access only. + * Intentionally non-public; non package access must use the RepInternal + * proxy. + * + * This method is used to access the repEnvironmentImpl field, to guard + * against NPE when the environment has been closed. + * + * This method does not check whether the env is valid. For API method + * calls, checkOpen is called at API entry points to check validity. The + * validity of the env should also be checked before critical operations + * (e.g., disk writes), after idle periods, and periodically during time + * consuming operations. + * + * @throws IllegalStateException if the env has been closed. + */ + RepImpl getNonNullRepImpl() { + + final RepImpl repImpl = repEnvironmentImpl; + + if (repImpl == null) { + throw new IllegalStateException("Environment is closed."); + } + + return repImpl; + } + + /** + * Returns the underlying RepImpl, or null if the env has been closed. + * + * WARNING: This method will be phased out over time and normally + * getNonNullRepImpl should be called instead. + */ + RepImpl getMaybeNullRepImpl() { + return repEnvironmentImpl; + } + + /** + * @throws EnvironmentFailureException if the underlying environment is + * invalid. + * @throws IllegalStateException if the environment is not open. + */ + private RepImpl checkOpen() { + + DbInternal.checkOpen(this); + + /* + * Will throw ISE if the environment becomes closed or invalid after + * the above check. + */ + return getNonNullRepImpl(); + } + + /** + * Print a detailed report about the costs of different phases of + * environment startup. This report is by default logged to the je.info + * file if startup takes longer than je.env.startupThreshold. + */ + @Override + public void printStartupInfo(PrintStream out) { + + super.printStartupInfo(out); + + getNonNullRepImpl().getStartupTracker().displayStats( + out, Phase.TOTAL_JOIN_GROUP); + } + + /** + * The replication node state determines the operations that the + * application can perform against its replicated environment. + * The method {@link #getState} returns the current state. + *

        + * When the first handle to a {@link ReplicatedEnvironment} is instantiated + * and the node is bought up, the node usually establishes + * MASTER or REPLICA state before returning from + * the constructor. However, these states are actually preceeded by the + * UNKNOWN state, which may be visible if the application has + * configured a suitable {@link + * ReplicationConfig#ENV_UNKNOWN_STATE_TIMEOUT}. + *

        + * As the various remote nodes in the group become unavailable and + * elections are held, the local node may change between + * MASTER and REPLICA states, always with a + * (usually brief) transition through UNKNOWN state. + *

        + * When the last handle to the environment is closed, the node transitions + * to the DETACHED state. + *

        + * The state transitions visible to the application can be summarized by + * the regular expression: + *

        + * [ MASTER | REPLICA | UNKNOWN ]+ DETACHED + *
        + * with the caveat that redundant "transitions" (MASTER to + * MASTER, REPLICA to REPLICA, etc.) + * never occur. + */ + public static enum State { + + /** + * The node is not associated with the group. Its handle has been + * closed. No operations can be performed on the environment when it is + * in this state. + */ + DETACHED, + + /** + * The node is not currently in contact with the master, but is actively + * trying to establish contact with, or decide upon, a master. While in + * this state the node is restricted to performing just read operations + * on its environment. In a functioning group, this state is + * transitory. + */ + UNKNOWN, + + /** + * The node is the unique master of the group and can both read and + * write to its environment. When the node transitions to the + * state, the application running on the node must make provisions to + * start processing application level write requests in addition to + * read requests. + */ + MASTER, + + /** + * The node is a replica that is being updated by the master. It is + * restricted to reading its environment. When the node + * transitions to this state, the application running on the node must + * make provisions to ensure that it does not write to the + * environment. It must arrange for all write requests to be routed to + * the master. + */ + REPLICA; + + /** + * @return true if the node is a Master when in this state + */ + final public boolean isMaster() { + return this == MASTER; + } + + /** + * @return true if the node is a Replica when in this state + */ + final public boolean isReplica() { + return this == REPLICA; + } + + /** + * @return true if the node is disconnected from the replication + * group when in this state. + */ + final public boolean isDetached() { + return this == DETACHED; + } + + /** + * @return true if the node's state is unknown, and it is attempting + * to transition to Master or Replica. + */ + final public boolean isUnknown() { + return this == UNKNOWN; + } + + /** + * @return true if the node is currently participating in the group as + * a Replica or a Master + */ + final public boolean isActive() { + return (this == MASTER) || (this == REPLICA); + } + } + + /** + * Closes this handle and shuts down the Replication Group by forcing all + * active Replicas to exit. + *

        + * This method must be invoked on the node that's currently the Master + * after all other outstanding handles have been closed. + *

        + * The Master waits for all active Replicas to catch up so that they have a + * current set of logs, and then shuts them down. The Master will wait for + * a maximum of replicaShutdownTimeout for a Replica to catch + * up. If the Replica has not caught up in this time period it will force + * the Replica to shut down before it is completely caught up. A negative + * or zero replicaShutdownTimeout value will result in an + * immediate shutdown without waiting for lagging Replicas to catch up. + * Nodes that are currently inactive cannot be contacted by the Master, as + * a consequence, their state is not impacted by the shutdown. + *

        + * The shutdown operation will close this handle on the Master node. The + * environments on Replica nodes will be invalidated, and attempts to use + * those handles will result in a {@link GroupShutdownException} being + * thrown. The application is responsible for closing the remaining handles + * on the Replica. + * + * @param replicaShutdownTimeout the maximum amount of time the Master + * waits for a Replica to shutdown. + * + * @param unit the time unit associated with the + * replicaShutdownTimeout + * + * @throws IllegalStateException if the method is invoked on a node that's + * not currently the Master, or there are other open handles to this + * Environment. + */ + public synchronized void shutdownGroup(long replicaShutdownTimeout, + TimeUnit unit) + throws IllegalStateException { + + final RepImpl repImpl = checkOpen(); + + /* + * Hold repImpl stable, across the setup and close. Note that close() + * synchronizes on DbEnvPool, and synchronization order must be + * DbEnvPool before repImpl/envImpl. + */ + synchronized (DbEnvPool.getInstance()) { + synchronized (repImpl) { + repImpl.shutdownGroupSetup( + unit.toMillis(replicaShutdownTimeout)); + close(); + } + } + } + + /** + * Registers an {@link AppStateMonitor} to receive the application state + * which this {@link ReplicatedEnvironment} is running in. Note that there + * is only one AppStateMonitor per replication node, not one + * per handle. Invoking this method replaces the previous + * AppStateMonitor. + *

        + * After registration, the application state can be returned by invoking + * {@link com.sleepycat.je.rep.util.ReplicationGroupAdmin#getNodeState}. + * + * @param appStateMonitor the user implemented AppStateMonitor + * + * @throws IllegalStateException if this handle or the underlying + * environment has already been closed. + */ + public void registerAppStateMonitor(AppStateMonitor appStateMonitor) + throws IllegalStateException { + + final RepImpl repImpl = checkOpen(); + + repImpl.getRepNode().registerAppStateMonitor(appStateMonitor); + } + + /** + * Transfers the current master state from this node to one of the + * electable replicas supplied in the argument list. The replica that is + * actually chosen to be the new master is the one with which the Master + * Transfer can be completed most rapidly. The transfer operation ensures + * that all changes at this node are available at the new master upon + * conclusion of the operation. + *

        + * The following sequence of steps is used to accomplish the transfer: + *

          + *
        1. The master first waits for at least one replica, from + * amongst the supplied {@code Set} of candidate replicas, to + * become reasonably current. It may have to wait for at least + * one of the replicas to establish a feeder, if none of them are + * currently connected to the master. "Reasonably current" means + * that the replica is close enough to the end of the transaction + * stream that it has managed to acknowledge a transaction within + * the time that the commit thread is still awaiting + * acknowledgments. If the candidate replicas are working + * through a long backlog after having been disconnected, this can + * take some time, so the timeout value should be chosen to allow + * for this possibility. + * + *
        2. The master blocks new transactions from being committed or + * aborted. + * + *
        3. The master now waits for one of the candidate replicas to + * become fully current (completely caught up with the end of the + * log on the master). The first replica that becomes current is + * the one that is chosen to become the new master. This second + * wait period is expected to be brief, since it only has to wait + * until transactions that were committed in the interval between + * step 1) and step 2) have been acknowledged by a replica. + * + *
        4. The master sends messages to all other nodes announcing the chosen + * replica as the new master. This node will eventually become a replica, + * and any subsequent attempt commit or abort existing transactions, or to + * do write operations will result in a {@code ReplicaWriteException}. + * + *
        5. The current master releases the transactions that were blocked in + * step 2) allowing them to proceed. The released transactions will fail + * with {@code ReplicaWriteException} since the environment has become a + * replica. + *
        + * + * @param replicas the set of replicas to be considered when choosing the + * new master. The method returns immediately if this node is a member of + * the set. + * @param timeout the amount of time to allow for the transfer to be + * accomplished. A {@code MasterTransferFailureException} is thrown if the + * transfer is not accomplished within this timeout period. + * @param timeUnit the time unit associated with the timeout + * + * @return the name of the replica that was chosen to be the new master + * from amongst the set of supplied replicas + * + * @throws MasterTransferFailureException if the master transfer operation + * fails + * @throws IllegalArgumentException if any of the named replicas is not a + * member of the replication group or is not of type + * {@link NodeType#ELECTABLE} + * @throws IllegalStateException if this node is not currently the master, + * or this handle or the underlying environment has already been closed. + */ + public String transferMaster(Set replicas, + int timeout, + TimeUnit timeUnit) { + return transferMaster(replicas, timeout, timeUnit, false); + } + + /** + * Transfers the current master state from this node to one of the replicas + * supplied in the argument list. + * + * @param force true if this request should supersede and cancel any + * currently pending Master Transfer operation + * + * @see #transferMaster(Set, int, TimeUnit) + */ + public String transferMaster(Set replicas, + int timeout, + TimeUnit timeUnit, + boolean force) { + final RepImpl repImpl = checkOpen(); + + if (timeUnit == null || timeout <= 0) { + throw new IllegalArgumentException("Invalid timeout"); + } + + return repImpl.transferMaster(replicas, + timeUnit.toMillis(timeout), + force); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicatedEnvironmentStats.java b/src/com/sleepycat/je/rep/ReplicatedEnvironmentStats.java new file mode 100644 index 0000000..86ecfbe --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicatedEnvironmentStats.java @@ -0,0 +1,946 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_FEEDERS_CREATED; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_FEEDERS_SHUTDOWN; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG_NAME; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_DELAY_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_LAST_COMMIT_TIMESTAMP_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_LAST_COMMIT_VLSN_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_VLSN_LAG_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_VLSN_RATE_MAP; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.LATEST_COMMIT_LAG_MS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.MAX_COMMIT_PROCESSING_NANOS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.MIN_COMMIT_PROCESSING_NANOS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_ABORTS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMITS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_ACKS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_NO_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_WRITE_NO_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_ELAPSED_TXN_TIME; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMITS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_MAX_EXCEEDED; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_TIMEOUTS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_TXNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_LNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_NAME_LNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.TOTAL_COMMIT_LAG_MS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.TOTAL_COMMIT_PROCESSING_NANOS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_LAG_CONSISTENCY_WAITS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_LAG_CONSISTENCY_WAIT_MS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAITS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAIT_MS; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.ACK_WAIT_MS; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.LAST_COMMIT_TIMESTAMP; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.LAST_COMMIT_VLSN; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TOTAL_TXN_MS; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TXNS_ACKED; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TXNS_NOT_ACKED; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.VLSN_RATE; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.BYTES_READ_RATE; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.BYTES_WRITE_RATE; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.MESSAGE_READ_RATE; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.MESSAGE_WRITE_RATE; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_BYTES_READ; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_BYTES_WRITTEN; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_ENTRIES_WRITTEN_OLD_VERSION; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_BATCHED; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_READ; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_WRITTEN; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGE_BATCHES; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_READ_NANOS; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_WRITE_NANOS; +import static com.sleepycat.je.utilint.CollectionUtils.emptySortedMap; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.ReplayStatDefinition; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.rep.impl.node.ReplicaStatDefinition; +import com.sleepycat.je.rep.stream.FeederTxnStatDefinition; +import com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition; +import com.sleepycat.je.rep.vlsn.VLSNIndexStatDefinition; +import com.sleepycat.je.utilint.AtomicLongMapStat; +import com.sleepycat.je.utilint.IntegralLongAvgStat; +import com.sleepycat.je.utilint.LongAvgRateMapStat; +import com.sleepycat.je.utilint.LongAvgRateStat; +import com.sleepycat.je.utilint.LongDiffMapStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Statistics for a replicated environment. + *

        + * The statistics are logically grouped into four categories. Viewing the + * statistics through {@link ReplicatedEnvironmentStats#toString()} displays + * the values in these categories, as does viewing the stats through the {@link + * RepJEMonitor + * mbean}. Viewing the stats with {@link + * ReplicatedEnvironmentStats#toStringVerbose()} will provide more detailed + * descriptions of the stats and stat categories. + *

        + * The current categories are: + *

          + *
        • FeederManager: A feed is the {@link replication + * stream} between a master and replica. The current number of feeders + * gives a sense of the connectivity of the replication group. + *
        • + *
        • BinaryProtocol: These statistics center on the network traffic + * engendered by the replication stream, and provide a sense of the network + * bandwidth seen by the replication group. + *
        • + *
        • Replay: The act of receiving and applying the replication stream + * at the Replica node is called Replay. These stats give a sense of how much + * load the replica node is experiencing when processing the traffic from the + * replication group. + *
        • + *
        • ConsistencyTracker: The tracker is invoked when consistency + * policies are used at a replica node. This provides a measure of delays + * experienced by read requests at a replica, in order to conform with the + * consistency specified by the application. + *
        • + *
        + * + * @see Viewing + * Statistics with JConsole + */ +public class ReplicatedEnvironmentStats implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * The "impossible" return value used by stats accessors to indicate the + * statistic is not available in this instance of + * ReplicatedEnvironmentStats, because it represents an earlier + * de-serialized instance in which this statistic was unavailable. + */ + private static final int VALUE_UNAVAILABLE = -1; + + private StatGroup feederManagerStats; + private StatGroup feederTxnStats; + private StatGroup replayStats; + private StatGroup trackerStats; + private StatGroup protocolStats; + private StatGroup vlsnIndexStats; + + private final Map tipsMap = new HashMap(); + + ReplicatedEnvironmentStats(RepImpl repImpl, StatsConfig config) { + final RepNode repNode = repImpl.getRepNode(); + final FeederManager feederManager = repNode.feederManager(); + + feederManagerStats = feederManager.getFeederManagerStats(config); + feederTxnStats = repNode.getFeederTxns().getStats(config); + + final Replica replica = repNode.getReplica(); + replayStats = replica.getReplayStats(config); + trackerStats = replica.getTrackerStats(config); + protocolStats = feederManager.getProtocolStats(config); + vlsnIndexStats = repImpl.getVLSNIndex().getStats(config); + + protocolStats.addAll(replica.getProtocolStats(config)); + addMessageRateStats(); + addBytesRateStats(); + } + + /** + * @hidden + * Internal use only. + */ + public ReplicatedEnvironmentStats() { + } + + /** + * @hidden + * Internal use only. + */ + public Collection getStatGroups() { + return (feederTxnStats != null) ? + Arrays.asList(feederManagerStats, + feederTxnStats, + replayStats, + trackerStats, + protocolStats, + vlsnIndexStats) : + Arrays.asList(feederManagerStats, + replayStats, + trackerStats, + protocolStats, + vlsnIndexStats); + } + + /** + * @hidden + * Internal use only. + */ + public Map getStatGroupsMap() { + HashMap statmap = new HashMap(); + statmap.put(feederManagerStats.getName(), feederManagerStats); + statmap.put(replayStats.getName(), replayStats); + statmap.put(trackerStats.getName(), trackerStats); + statmap.put(protocolStats.getName(), protocolStats); + statmap.put(vlsnIndexStats.getName(), vlsnIndexStats); + if (feederTxnStats != null) { + statmap.put(feederTxnStats.getName(), feederTxnStats); + } + return statmap; + } + + /** + * @hidden + * Internal use only. + */ + public void setStatGroup(StatGroup sg) { + + if (sg.getName().equals(FeederManagerStatDefinition.GROUP_NAME)) { + feederManagerStats = sg; + } else if (sg.getName().equals(ReplayStatDefinition.GROUP_NAME)) { + replayStats = sg; + } else if (sg.getName().equals(ReplicaStatDefinition.GROUP_NAME)) { + trackerStats = sg; + } else if (sg.getName().equals + (BinaryProtocolStatDefinition.GROUP_NAME)) { + protocolStats = sg; + } else if (sg.getName().equals(VLSNIndexStatDefinition.GROUP_NAME)) { + vlsnIndexStats = sg; + } else if (sg.getName().equals(FeederTxnStatDefinition.GROUP_NAME)) { + feederTxnStats = sg; + } else { + throw EnvironmentFailureException.unexpectedState + ("Internal error stat context is not registered"); + } + } + + /** + * @hidden + * Internal use only + * + * For JConsole plugin support. + */ + public static String[] getStatGroupTitles() { + return new String[] { + FeederManagerStatDefinition.GROUP_NAME, + FeederTxnStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_NAME, + ReplayStatDefinition.GROUP_NAME, + ReplicaStatDefinition.GROUP_NAME, + VLSNIndexStatDefinition.GROUP_NAME}; + } + + private void addMessageRateStats() { + long numerator; + long denominator; + + numerator = (protocolStats.getLongStat(N_MESSAGES_READ) == null) ? + 0 : protocolStats.getLongStat(N_MESSAGES_READ).get(); + denominator = (protocolStats.getLongStat(N_READ_NANOS) == null) ? + 0 : protocolStats.getLongStat(N_READ_NANOS).get(); + @SuppressWarnings("unused") + IntegralLongAvgStat msgReadRate = + new IntegralLongAvgStat + (protocolStats, + MESSAGE_READ_RATE, + numerator, + denominator, + 1000000000); + + numerator = (protocolStats.getLongStat(N_MESSAGES_WRITTEN) == null) ? + 0 : protocolStats.getLongStat(N_MESSAGES_WRITTEN).get(); + denominator = (protocolStats.getLongStat(N_WRITE_NANOS) == null) ? + 0 : protocolStats.getLongStat(N_WRITE_NANOS).get(); + @SuppressWarnings("unused") + IntegralLongAvgStat msgWriteRate = + new IntegralLongAvgStat + (protocolStats, + MESSAGE_WRITE_RATE, + numerator, + denominator, + 1000000000); + } + + private void addBytesRateStats() { + long numerator; + long denominator; + + numerator = (protocolStats.getLongStat(N_BYTES_READ) == null) ? + 0 : protocolStats.getLongStat(N_BYTES_READ).get(); + denominator = (protocolStats.getLongStat(N_READ_NANOS) == null) ? + 0 : protocolStats.getLongStat(N_READ_NANOS).get(); + @SuppressWarnings("unused") + IntegralLongAvgStat bytesReadRate = + new IntegralLongAvgStat + (protocolStats, + BYTES_READ_RATE, + numerator, + denominator, + 1000000000); + + numerator = (protocolStats.getLongStat(N_BYTES_WRITTEN) == null) ? + 0 : protocolStats.getLongStat(N_BYTES_WRITTEN).get(); + denominator = (protocolStats.getLongStat(N_WRITE_NANOS) == null) ? + 0 : protocolStats.getLongStat(N_WRITE_NANOS).get(); + @SuppressWarnings("unused") + IntegralLongAvgStat bytesWriteRate = + new IntegralLongAvgStat + (protocolStats, + BYTES_WRITE_RATE, + numerator, + denominator, + 1000000000); + } + + /* Feeder Stats. */ + + /** + * The number of Feeder threads since this node was started. A Master + * supplies the Replication Stream to a Replica via a Feeder thread. The + * Feeder thread is created when a Replica connects to the node and is + * shutdown when the connection is terminated. + */ + public int getNFeedersCreated() { + return feederManagerStats.getInt(N_FEEDERS_CREATED); + } + + /** + * The number of Feeder threads that were shut down, either because this + * node, or the Replica terminated the connection. + * + * @see #getNFeedersCreated() + */ + public int getNFeedersShutdown() { + return feederManagerStats.getInt(N_FEEDERS_SHUTDOWN); + } + + /** + * The lag (in VLSNs) associated with the replica that's farthest behind in + * replaying the replication stream. + */ + public long getNMaxReplicaLag() { + + /* TODO: Implement using REPLICA_VLSN_LAG_MAP */ + return feederManagerStats.getLong(N_MAX_REPLICA_LAG); + } + + /** + * The name of the replica that's farthest behind in replaying the + * replication stream. + */ + public String getNMaxReplicaLagName() { + + /* TODO: Implement using REPLICA_VLSN_LAG_MAP */ + return feederManagerStats.getString(N_MAX_REPLICA_LAG_NAME); + } + + /** + * Returns a map from replica node name to the delay, in milliseconds, + * between when a transaction was committed on the master and when the + * master learned that the transaction was processed on the replica, if + * known. Returns an empty map if this node is not the master. + * + * @since 6.3.0 + */ + public SortedMap getReplicaDelayMap() { + final LongDiffMapStat stat = + (LongDiffMapStat) feederManagerStats.getStat(REPLICA_DELAY_MAP); + if (stat == null) { + return emptySortedMap(); + } + return stat.getMap(); + } + + /** + * Returns a map from replica node name to the commit timestamp of the last + * committed transaction that was processed on the replica, if known. + * Returns an empty map if this node is not the master. + * + * @since 6.3.0 + */ + public SortedMap getReplicaLastCommitTimestampMap() { + final AtomicLongMapStat stat = (AtomicLongMapStat) + feederManagerStats.getStat(REPLICA_LAST_COMMIT_TIMESTAMP_MAP); + if (stat == null) { + return emptySortedMap(); + } + return stat.getMap(); + } + + /** + * Returns a map from replica node name to the VLSN of the last committed + * transaction that was processed on the replica, if known. Returns an + * empty map if this node is not the master. + * + * @since 6.3.0 + */ + public SortedMap getReplicaLastCommitVLSNMap() { + final AtomicLongMapStat stat = (AtomicLongMapStat) + feederManagerStats.getStat(REPLICA_LAST_COMMIT_VLSN_MAP); + if (stat == null) { + return emptySortedMap(); + } + return stat.getMap(); + } + + /** + * Returns a map from replica node name to the lag, in VLSNs, between the + * replication state of the replica and the master, if known. Returns an + * empty map if this node is not the master. + * + * @since 6.3.0 + */ + public SortedMap getReplicaVLSNLagMap() { + final LongDiffMapStat stat = + (LongDiffMapStat) feederManagerStats.getStat(REPLICA_VLSN_LAG_MAP); + if (stat == null) { + return emptySortedMap(); + } + return stat.getMap(); + } + + /** + * Returns a map from replica node name to a moving average of the rate, in + * VLSNs per minute, that the replica is processing replication data, if + * known. Returns an empty map if this node is not the master. + * + * @since 6.3.0 + */ + public SortedMap getReplicaVLSNRateMap() { + final LongAvgRateMapStat stat = (LongAvgRateMapStat) + feederManagerStats.getStat(REPLICA_VLSN_RATE_MAP); + if (stat == null) { + return emptySortedMap(); + } + return stat.getMap(); + } + + /* Master transaction commit acknowledgment statistics. */ + + /** + * The number of transactions that were successfully acknowledged based + * upon the {@link ReplicaAckPolicy} policy associated with the + * transaction commit. + */ + public long getNTxnsAcked() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(TXNS_ACKED); + } + + /** + * The number of transactions that were not acknowledged as required by the + * {@link ReplicaAckPolicy} policy associated with the transaction commit. + * These transactions resulted in {@link InsufficientReplicasException} or + * {@link InsufficientAcksException}. + */ + public long getNTxnsNotAcked() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(TXNS_NOT_ACKED); + } + + /** + * The total time in milliseconds spent in replicated transactions. This + * represents the time from the start of the transaction until its + * successful commit and acknowledgment. It includes the time spent + * waiting for transaction commit acknowledgments, as determined by + * {@link #getAckWaitMs()}. + */ + public long getTotalTxnMs() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(TOTAL_TXN_MS); + } + + /** + * The total time in milliseconds that the master spent waiting for the + * {@link ReplicaAckPolicy} to be satisfied during successful transaction + * commits. + * + * @see #getTotalTxnMs() + */ + public long getAckWaitMs() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(ACK_WAIT_MS); + } + + /** + * The VLSN of the last committed transaction on the master, or 0 if not + * known or this node is not the master. + * + * @since 6.3.0 + */ + public long getLastCommitVLSN() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(LAST_COMMIT_VLSN); + } + + /** + * The commit timestamp of the last committed transaction on the master, or + * 0 if not known or this node is not the master. + * + * @since 6.3.0 + */ + public long getLastCommitTimestamp() { + return (feederTxnStats == null) ? + VALUE_UNAVAILABLE : + feederTxnStats.getAtomicLong(LAST_COMMIT_TIMESTAMP); + } + + /** + * A moving average of the rate replication data is being generated by the + * master, in VLSNs per minute, or 0 if not known or this node is not the + * master. + * + * @since 6.3.0 + */ + public long getVLSNRate() { + if (feederTxnStats == null) { + return VALUE_UNAVAILABLE; + } + final LongAvgRateStat stat = + (LongAvgRateStat) feederTxnStats.getStat(VLSN_RATE); + return (stat != null) ? stat.get() : 0; + } + + /* Replay Stats. */ + + /** + * The number of commit log records that were replayed by this node when + * it was a Replica. There is one commit record record for each actual + * commit on the Master. + */ + public long getNReplayCommits() { + return replayStats.getLong(N_COMMITS); + } + + /** + * The number of commit log records that needed to be acknowledged to the + * Master by this node when it was a Replica. The rate of change of this + * statistic, will show a strong correlation with that of + * NReplayCommits statistic, if the Durability + * policy used by transactions on the master calls for transaction commit + * acknowledgments and the Replica is current with respect to the Master. + */ + public long getNReplayCommitAcks() { + return replayStats.getLong(N_COMMIT_ACKS); + } + + /** + * The number of commitSync() calls executed when satisfying transaction + * commit acknowledgment requests from the Master. + */ + public long getNReplayCommitSyncs() { + return replayStats.getLong(N_COMMIT_SYNCS); + } + + /** + * The number of commitNoSync() calls executed when satisfying transaction + * commit acknowledgment requests from the Master. + */ + public long getNReplayCommitNoSyncs() { + return replayStats.getLong(N_COMMIT_NO_SYNCS); + } + + /** + * The number of commitNoSync() calls executed when satisfying transaction + * commit acknowledgment requests from the Master. + */ + public long getNReplayCommitWriteNoSyncs() { + return replayStats.getLong(N_COMMIT_WRITE_NO_SYNCS); + } + + /** + * The number of abort records which were replayed while the node was in + * the Replica state. + */ + public long getNReplayAborts() { + return replayStats.getLong(N_ABORTS); + } + + /** + * The number of NameLN records which were replayed while the node was in + * the Replica state. + */ + public long getNReplayNameLNs() { + return replayStats.getLong(N_NAME_LNS); + } + + /** + * The number of data records (creation, update, deletion) which were + * replayed while the node was in the Replica state. + */ + public long getNReplayLNs() { + return replayStats.getLong(N_LNS); + } + + /** + * The total elapsed time in milliseconds spent replaying committed and + * aborted transactions. + */ + public long getReplayElapsedTxnTime() { + return replayStats.getLong(N_ELAPSED_TXN_TIME); + } + + /** + * The number of group commits that were initiated due to the + * {@link ReplicationConfig#REPLICA_GROUP_COMMIT_INTERVAL group timeout + * interval} being exceeded. + * + * @since 5.0.76 + */ + public long getNReplayGroupCommitTimeouts() { + return replayStats.getLong(N_GROUP_COMMIT_TIMEOUTS); + } + + /** + * The number of group commits that were initiated due the + * {@link ReplicationConfig#REPLICA_MAX_GROUP_COMMIT max group size} being + * exceeded. + * + * @since 5.0.76 + */ + public long getNReplayGroupCommitMaxExceeded() { + return replayStats.getLong(N_GROUP_COMMIT_MAX_EXCEEDED); + } + + /** + * The number of replay transaction commits that were part of a group + * commit operation. + * + * @since 5.0.76 + */ + public long getNReplayGroupCommitTxns() { + return replayStats.getLong(N_GROUP_COMMIT_TXNS); + } + + /** + * The number of group commit operations. + * + * @since 5.0.76 + */ + public long getNReplayGroupCommits() { + return replayStats.getLong(N_GROUP_COMMITS); + } + + /** + * The minimum time taken to replay a transaction commit operation. + */ + public long getReplayMinCommitProcessingNanos() { + return replayStats.getLong(MIN_COMMIT_PROCESSING_NANOS); + } + + /** + * The maximum time taken to replay a transaction commit operation. + */ + public long getReplayMaxCommitProcessingNanos() { + return replayStats.getLong(MAX_COMMIT_PROCESSING_NANOS); + } + + /** + * The total time spent to replay all commit operations. + */ + public long getReplayTotalCommitProcessingNanos() { + return replayStats.getLong(TOTAL_COMMIT_PROCESSING_NANOS); + } + + /** + * @hidden + * TODO: Make visible after experimenting with this new stat + * + * The sum of time periods, measured in milliseconds, between when update + * operations commit on the master and then subsequently commit on the + * replica. Divide this value by the total number of commit operations, + * available by calling {@link #getNReplayCommits}, to find the average + * commit lag for a single operation. + * + *

        Note that each lag is computed on the replica by comparing the time + * of the master commit, as measured by the master, and time on the replica + * when it commits locally. As a result, the return value will be affected + * by any clock skew between the master and the replica. + */ + public long getReplayTotalCommitLagMs() { + return replayStats.getLong(TOTAL_COMMIT_LAG_MS); + } + + /** + * @hidden + * TODO: Make visible after experimenting with this new stat + * + * The time in milliseconds between when the latest update operation + * committed on the master and then subsequently committed on the replica. + * + *

        Note that the lag is computed on the replica by comparing the time of + * the master commit, as measured by the master, and time on the replica + * when it commits locally. As a result, the return value will be affected + * by any clock skew between the master and the replica. + */ + public long getReplayLatestCommitLagMs() { + return replayStats.getLong(LATEST_COMMIT_LAG_MS); + } + + /* Protocol Stats. */ + + /** + * The number of bytes of Replication Stream read over the network. It does + * not include the TCP/IP overhead. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getNProtocolBytesRead() { + return protocolStats.getLong(N_BYTES_READ); + } + + /** + * The number of Replication Stream messages read over the network. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getNProtocolMessagesRead() { + return protocolStats.getLong(N_MESSAGES_READ); + } + + /** + * The number of Replication Stream bytes written over the network. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getNProtocolBytesWritten() { + return protocolStats.getLong(N_BYTES_WRITTEN); + } + + /** + * The number of Replication Stream messages that were written as part + * of a message batch instead of being written individually. + * + * It represents a subset of the messages returned by + * {@link #getNProtocolMessagesWritten()} + * + * @see #getNProtocolMessageBatches + * + * @since 6.2.7 + */ + public long getNProtocolMessagesBatched() { + return protocolStats.getLong(N_MESSAGES_BATCHED); + } + + /** + * The number of Replication Stream message batches written to the network. + * + * @see #getNProtocolMessagesBatched + * + * @since 6.2.7 + */ + public long getNProtocolMessageBatches() { + return protocolStats.getLong(N_MESSAGE_BATCHES); + } + + /** + * The total number of Replication Stream messages written over the + * network. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getNProtocolMessagesWritten() { + return protocolStats.getLong(N_MESSAGES_WRITTEN); + } + + /** + * The number of nanoseconds spent reading from the network channel. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getProtocolReadNanos() { + return protocolStats.getLong(N_READ_NANOS); + } + + /** + * The number of nanoseconds spent writing to the network channel. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the sum total of all Feeder related + * network activity, as well as Replica network activity. + */ + public long getProtocolWriteNanos() { + return protocolStats.getLong(N_WRITE_NANOS); + } + + /** + * Incoming replication message throughput, in terms of messages received + * from the replication network channels per second. + *

        If the node has served as both a Replica and Master since + * it was first started, the number represents the message reading rate + * over all Feeder related network activity, as well as Replica network + * activity. + */ + public long getProtocolMessageReadRate() { + IntegralLongAvgStat rstat = + protocolStats.getIntegralLongAvgStat(MESSAGE_READ_RATE); + return (rstat != null) ? rstat.get().longValue() : 0; + } + + /** + * Outgoing message throughput, in terms of message written to the + * replication network channels per second. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the message writing rate over all Feeder + * related network activity, as well as Replica network activity. + */ + public long getProtocolMessageWriteRate() { + IntegralLongAvgStat rstat = + protocolStats.getIntegralLongAvgStat(MESSAGE_WRITE_RATE); + return (rstat != null) ? rstat.get().longValue() : 0; + } + + /** + * Bytes read throughput, in terms of bytes received from the replication + * network channels per second. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the bytes reading rate over all Feeder + * related network activity, as well as Replica network activity. + */ + public long getProtocolBytesReadRate() { + IntegralLongAvgStat rstat = + protocolStats.getIntegralLongAvgStat(BYTES_READ_RATE); + return (rstat != null) ? rstat.get().longValue() : 0; + } + + /** + * Bytes written throughput, in terms of bytes written to the replication + * network channels per second. + *

        + * If the node has served as both a Replica and Master since it was first + * started, the number represents the bytes writing rate over all Feeder + * related network activity, as well as Replica network activity. + */ + public long getProtocolBytesWriteRate() { + IntegralLongAvgStat rstat = + protocolStats.getIntegralLongAvgStat(BYTES_WRITE_RATE); + return (rstat != null) ? rstat.get().longValue() : 0; + } + + /** + * Returns the number of messages containing log entries that were written + * to the replication stream using the previous log format to support + * replication to a replica running an earlier version during an upgrade. + */ + public long getNProtocolEntriesWrittenOldVersion() { + return protocolStats.getLong(N_ENTRIES_WRITTEN_OLD_VERSION); + } + + /* ConsistencyTracker Stats. */ + + /** + * The number of times a Replica held back a + * {@link Environment#beginTransaction(Transaction,TransactionConfig)} + * operation to satisfy the {@link TimeConsistencyPolicy}. + */ + public long getTrackerLagConsistencyWaits() { + return trackerStats.getLong(N_LAG_CONSISTENCY_WAITS); + } + + /** + * The total time (in msec) for which a Replica held back a + * {@link Environment#beginTransaction(Transaction,TransactionConfig)} + * operation to satisfy the {@link TimeConsistencyPolicy}. + */ + public long getTrackerLagConsistencyWaitMs() { + return trackerStats.getLong(N_LAG_CONSISTENCY_WAIT_MS); + } + + /** + * The number of times a Replica held back a + * {@link Environment#beginTransaction(Transaction,TransactionConfig)} + * operation to satisfy the {@link CommitPointConsistencyPolicy}. + */ + public long getTrackerVLSNConsistencyWaits() { + return trackerStats.getLong(N_VLSN_CONSISTENCY_WAITS); + } + + /** + * The total time (in msec) for which a Replica held back a + * {@link Environment#beginTransaction(Transaction,TransactionConfig)} + * operation to satisfy the {@link CommitPointConsistencyPolicy}. + */ + public long getTrackerVLSNConsistencyWaitMs() { + return trackerStats.getLong(N_VLSN_CONSISTENCY_WAIT_MS); + } + + /** + * Returns a string representation of the statistics. + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + for (StatGroup group : getStatGroups()) { + sb.append(group.toString()); + } + + return sb.toString(); + } + + public String toStringVerbose() { + StringBuilder sb = new StringBuilder(); + + for (StatGroup group : getStatGroups()) { + sb.append(group.toStringVerbose()); + } + + return sb.toString(); + } + + public Map getTips() { + /* Add FeederManager stats definition. */ + + for (StatGroup group : getStatGroups()) { + tipsMap.put(group.getName(), group.getDescription()); + for (StatDefinition def : group.getStats().keySet()) { + tipsMap.put(def.getName(), def.getDescription()); + } + } + + return tipsMap; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationBasicConfig.java b/src/com/sleepycat/je/rep/ReplicationBasicConfig.java new file mode 100644 index 0000000..b756807 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationBasicConfig.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +/** + * @hidden SSL deferred + * Specifies the parameters for unencrypted communication within a + * replicated environment. The parameters contained here are immutable. + */ +public class ReplicationBasicConfig extends ReplicationNetworkConfig { + + private static final long serialVersionUID = 1L; + + /* The set of Replication properties specific to this class */ + private static Set repBasicProperties; + static { + repBasicProperties = new HashSet(); + /* Nail the set down */ + repBasicProperties = Collections.unmodifiableSet(repBasicProperties); + } + + /** + * Constructs a ReplicationBasicConfig initialized with the system + * default settings. + */ + public ReplicationBasicConfig() { + } + + /** + * Constructs a ReplicationBasicConfig initialized with the + * provided propeties. + * @param properties a set of properties which which to initialize the + * instance properties + */ + public ReplicationBasicConfig(Properties properties) { + super(properties); + } + + /** + * Get the channel type setting for the replication service. + * This configuration specifies a "basic" channel type. + * + * @return the channel type + */ + @Override + public String getChannelType() { + return "basic"; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public ReplicationBasicConfig clone() { + return (ReplicationBasicConfig) super.clone(); + } + + /** + * @hidden + * Enumerate the subset of configuration properties that are intended to + * control network access. + */ + static Set getRepBasicPropertySet() { + + return repBasicProperties; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationBasicConfigBeanInfo.java b/src/com/sleepycat/je/rep/ReplicationBasicConfigBeanInfo.java new file mode 100644 index 0000000..88b9c67 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationBasicConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class ReplicationBasicConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(ReplicationBasicConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(ReplicationBasicConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationConfig.java b/src/com/sleepycat/je/rep/ReplicationConfig.java new file mode 100644 index 0000000..459c188 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationConfig.java @@ -0,0 +1,1939 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.net.InetSocketAddress; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.RepConfigProxy; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.rep.utilint.RepUtils; + +/** + * Specifies the immutable attributes of a replicated environment. + *

        + * To change the default settings for a replicated environment, an application + * creates a configuration object, customizes settings and uses it for {@link + * ReplicatedEnvironment} construction. The set methods of this class validate + * the configuration values when the method is invoked. An + * IllegalArgumentException is thrown if the value is not valid for that + * attribute. + *

        + * Note that ReplicationConfig only describes those attributes which must be + * set at {@code ReplicatedEnvironment} construction time, while its superclass + * {@link ReplicationMutableConfig} describes attributes that may be modified + * during the life of the replication group. + *

        + * ReplicationConfig follows precedence rules similar to those of + * {@link EnvironmentConfig}. + *

          + *
        1. Configuration parameters specified + * in {@literal /je.properties} take first precedence.
        2. + *
        3. Configuration parameters set in the ReplicationConfig object used + * at {@code ReplicatedEnvironment} construction are next.
        4. + *
        5. Any configuration parameters not set by the application are set to + * system defaults, described along with the parameter name String constants + * in this class.
        6. + *
        + *

        + * After a {@code ReplicatedEnvironment} has been constructed, its mutable + * properties may be changed using {@code + * ReplicatedEnvironment#setMutableConfig}. See {@code + * ReplicationMutableConfig} for a list of mutable properties; all other + * properties are immutable. Whether a property is mutable or immutable is + * also described along with the parameter name String constants in this class. + * + *

        Getting the Current ReplicatedEnvironment Properties

        + * + * To get the current "live" properties of a replicated environment after + * constructing it or changing its properties, you must call {@link + * ReplicatedEnvironment#getRepConfig} or {@link + * ReplicatedEnvironment#getRepMutableConfig}. The original ReplicationConfig + * or ReplicationMutableConfig object used to set the properties is not kept up + * to date as properties are changed, and does not reflect property validation + * or properties that are computed. + */ +public class ReplicationConfig extends ReplicationMutableConfig + implements RepConfigProxy { + + private static final long serialVersionUID = 1L; + + /* + * Note: all replicated parameters should start with + * EnvironmentParams.REP_PARAMS_PREFIX, which is "je.rep.", + * see SR [#19080]. + */ + + /** + * The name for the replication group. + * The name should consist of letters, digits, and/or hyphen ("-"), + * underscore ("_"), or period ("."). + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"DefaultGroup"

        + * @see ReplicationConfig#setGroupName + * @see ReplicationConfig#getGroupName + */ + public static final String GROUP_NAME = + EnvironmentParams.REP_PARAM_PREFIX + "groupName"; + + /** + * The node name uniquely identifies this node within the replication + * group. + * The name should consist of letters, digits, and/or hyphen ("-"), + * underscore ("_"), or period ("."). + * + *

        Note that the node name is immutable. Normally the host name should + * not be used as the node name, unless you intend to reuse the host + * name when a machine fails and is replaced, or the node is upgraded to + * new hardware.

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"DefaultRepNodeName"

        + * @see ReplicationConfig#setNodeName + * @see ReplicationConfig#getNodeName + */ + public static final String NODE_NAME = + EnvironmentParams.REP_PARAM_PREFIX + "nodeName"; + + /** + * The type of this node. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}{@link NodeType}NoELECTABLE

        + * @see ReplicationConfig#setNodeType + * @see ReplicationConfig#getNodeType + */ + public static final String NODE_TYPE = + EnvironmentParams.REP_PARAM_PREFIX + "nodeType"; + + /** + * The string identifying one or more helper host and port pairs in + * this format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + * @see ReplicationConfig#setHelperHosts + * @see ReplicationConfig#getHelperHosts + * @deprecated replaced by {@link ReplicationMutableConfig#HELPER_HOSTS}. + */ + @Deprecated + public static final String HELPER_HOSTS = + EnvironmentParams.REP_PARAM_PREFIX + "helperHosts"; + + /** + * The default port used for replication. + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo50011024Short.MAX_VALUE

        + */ + public static final String DEFAULT_PORT = + EnvironmentParams.REP_PARAM_PREFIX + "defaultPort"; + + /** + * Names the hostname and port associated with this node in the + * replication group, e.g. je.rep.nodeHostPort=foo.com:5001. + *

        + * The hostname is defaulted to "localhost" to make it easy to prototype + * and to execute the examples, but the user should be very sure to set a + * specific hostname before starting nodes on multiple machines. The value + * of je.rep.nodeHostPort is saved persistently in replication group + * metadata and is expected to be a unique address, and a value of + * "localhost" in the replication metadata will cause severe communication + * confusion. + *

        + * The port portion of the host value is optional. If it's not specified, + * the value of "je.rep.defaultPort" is used. + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"localhost"

        + * @see ReplicationConfig#setNodeHostPort + * @see ReplicationConfig#getNodeHostPort + */ + public static final String NODE_HOST_PORT = + EnvironmentParams.REP_PARAM_PREFIX + "nodeHostPort"; + + /** + * When this configuration parameter is set to true, it binds the HA socket + * to INADDR_ANY, so that HA services are available on all network + * interfaces. The default value (false) results in the HA socket being + * bound to the specific interface specified by the {@link #NODE_HOST_PORT} + * configuration. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNofalse
        + *

        + */ + public static final String BIND_INADDR_ANY = + EnvironmentParams.REP_PARAM_PREFIX + "bindInaddrAny"; + + /** + * The default consistency policy used by a replica. This value is used + * when no {@link com.sleepycat.je.TransactionConfig#setConsistencyPolicy + * transaction consistency policy} is specified, including when a null + * {@code Transaction} parameter is used for a read operation. + *

        + * Only two + * policies are meaningful as properties denoting environment level default + * policies: {@link NoConsistencyRequiredPolicy} and + * {@link TimeConsistencyPolicy}. They + * can be specified as: + *

          NoConsistencyRequiredPolicy
        + * or + *
          {@code TimeConsistencyPolicy(,)}
        + * where {@code } and {@code } are {@link Time Duration + * Properties}. + *

        + * For example, a time based consistency policy with a lag of one second + * and a timeout of one hour is denoted by the string: + * {@code TimeConsistencyPolicy(1 s,1 h)} + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"TimeConsistencyPolicy(1 s,1 h)"

        + * + * @see ReplicationConfig#setConsistencyPolicy + * @see ReplicationConfig#getConsistencyPolicy + * @see com.sleepycat.je.TransactionConfig#setConsistencyPolicy + * @see com.sleepycat.je.TransactionConfig#getConsistencyPolicy + * @see Time Duration + * Properties + */ + public static final String CONSISTENCY_POLICY = + EnvironmentParams.REP_PARAM_PREFIX + "consistencyPolicy"; + + /** + * @deprecated and no longer used as of JE 7.5. Reserved files are now + * retained based on available disk space -- see + * {@link EnvironmentConfig#MAX_DISK} and + * {@link EnvironmentConfig#FREE_DISK} should be used instead. + * However, this param is still used when some, but not all, nodes in a + * group have been upgraded to 7.5 or later. + */ + public static final String REP_STREAM_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "repStreamTimeout"; + + /** + * @deprecated and no longer used as of JE 7.5. Reserved files are now + * retained based on available disk space -- see + * {@link EnvironmentConfig#MAX_DISK} and + * {@link EnvironmentConfig#FREE_DISK} should be used instead. + */ + public static final String REPLAY_COST_PERCENT = + EnvironmentParams.REP_PARAM_PREFIX + "replayCostPercent"; + + /** + * @deprecated and no longer needed as of JE 7.5. Reserved files are now + * retained based on available disk space -- see + * {@link EnvironmentConfig#MAX_DISK} and + * {@link EnvironmentConfig#FREE_DISK} should be used instead. + * However, this param is still used when it has been specified and + * is non-zero, and FREE_DISK has not been specified. In this case, + * REPLAY_FREE_DISK_PERCENT overrides the FREE_DISK default value. If + * both REPLAY_FREE_DISK_PERCENT and FREE_DISK are specified, an + * IllegalArgumentException is thrown. + */ + public static final String REPLAY_FREE_DISK_PERCENT = + EnvironmentParams.REP_PARAM_PREFIX + "replayFreeDiskPercent"; + + /** + * The maximum amount of time for a replay transaction to wait for a lock. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No500 ms1 ms75 min

        + * + * @see Time Duration + * Properties + */ + public static final String REPLAY_TXN_LOCK_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "replayTxnLockTimeout"; + + /** + * The maximum number of most recently used database handles that + * are kept open during the replay of the replication stream. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntYes101-none-

        + * + * @deprecated replaced by {@link ReplicationMutableConfig#REPLAY_MAX_OPEN_DB_HANDLES}. + */ + @Deprecated + public static final String REPLAY_MAX_OPEN_DB_HANDLES = + EnvironmentParams.REP_PARAM_PREFIX + "replayMaxOpenDbHandles"; + + /** + * The maximum amount of time that an inactive database handle is kept open + * during a replay of the replication stream. Handles that are inactive for + * more than this time period are automatically closed. Note that this does + * not impact any handles that may have been opened by the application. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}Yes30 sec1 sec-none-

        + * + * @see Time Duration + * Properties + * + * @deprecated replaced by {@link ReplicationMutableConfig#REPLAY_DB_HANDLE_TIMEOUT}. + */ + @Deprecated + public static final String REPLAY_DB_HANDLE_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "replayOpenHandleTimeout"; + + /** + * The amount of time to wait for a Replica to become consistent with the + * Master, when a ReplicatedEnvironment handle is created and + * no ConsistencyPolicy is specified. If the Replica does not + * become consistent within this period, a + * ReplicaConsistencyException is thrown by the + * ReplicatedEnvironment constructor. + *

        + * If an explicit ConsistencyPolicy is specified via a + * constructor argument, then the timeout defined by the + * ConsistencyPolicy argument is used instead of this default. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No5 min10 ms-none-
        + *

        + * + * @see Time Duration + * Properties + */ + public static final String ENV_CONSISTENCY_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "envConsistencyTimeout"; + + /** + * The amount of time that the + * {@link com.sleepycat.je.Transaction#commit(com.sleepycat.je.Durability)} + * on the Master will wait for a sufficient number of acknowledgments from + * electable Replicas. If the Master does not receive a sufficient number of + * acknowledgments within this timeout period, the commit() + * will throw {@link InsufficientAcksException}. In the special case of a + * two node group, if this node is the designated Primary, + * the Primary will be activated, and the + * commit() will proceed normally instead of throwing an + * exception. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No5 s10 ms-none-
        + *

        + * + * @see Time Duration + * Properties + * @see ReplicationMutableConfig#DESIGNATED_PRIMARY + */ + public static final String REPLICA_ACK_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "replicaAckTimeout"; + + /** + * @hidden + * + * The amount of time that the + * {@link com.sleepycat.je.Transaction#commit(com.sleepycat.je.Durability)} + * on the Master will wait for acknowledgments from an Arbiter. This wait + * occurs after waiting for the REPLICA_ACK_TIMEOUT period and not + * receiving the required number of acknowledgments. + * If the Master does not receive a sufficient number of acknowledgments + * within this timeout period, the commit() + * will throw {@link InsufficientAcksException}. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No2 s10 ms-none-
        + *

        + * + * @see Time Duration + * Properties + */ + public static final String ARBITER_ACK_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "arbiterAckTimeout"; + + /** + * The amount of time that a + * {@link ReplicatedEnvironment#beginTransaction(com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig)} + * on the Master will wait for a sufficient number of electable Replicas, + * as determined by the default Durability policy, to contact + * the Master. If the timeout period expires before a sufficient number of + * Replicas contact the Master, the + * {@link ReplicatedEnvironment#beginTransaction(com.sleepycat.je.Transaction, com.sleepycat.je.TransactionConfig)} + * will throw {@link InsufficientReplicasException}. In the special case of + * a two node group, if this node is the designated Primary, + * the Primary will be activated, and the + * beginTransaction() will proceed normally instead of + * throwing an exception. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No10 s10 ms-none-
        + *

        + * + * @see Time Duration + * Properties + * @see ReplicationMutableConfig#DESIGNATED_PRIMARY + */ + public static final String INSUFFICIENT_REPLICAS_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "insufficientReplicasTimeout"; + + /** + * The maximum message size which will be accepted by a node (to prevent + * DOS attacks). While the default shown here is 0, it dynamically + * calculated when the node is created and is set to the half of the + * environment cache size. The cache size is mutable, but changing the + * cache size at run time (after environment initialization) will not + * change the value of this parameter. If a value other than cache size / + * 2 is desired, this non-mutable parameter should be specified at + * initialization time. + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}LongNohalf of cache size256KBLong.MAX_VALUE

        + */ + public static final String MAX_MESSAGE_SIZE = + EnvironmentParams.REP_PARAM_PREFIX + "maxMessageSize"; + + /** + * Sets the maximum acceptable clock skew between this Replica and its + * Feeder, which is the node that is the source of its replication stream. + * This value is checked whenever a Replica establishes a connection to its + * replication stream source. The connection is abandoned if the clock skew + * is larger than this value. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No2 s0 s1 min

        + * + * @see ReplicationConfig#setMaxClockDelta + * @see ReplicationConfig#getMaxClockDelta + * @see Time Duration + * Properties + */ + public static final String MAX_CLOCK_DELTA = + EnvironmentParams.REP_PARAM_PREFIX + "maxClockDelta"; + + /** + * The number of times an unsuccessful election will be retried by a + * designated Primary in a two node group before it is + * activated and becomes the Master. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo20Integer.MAX_VALUE

        + * + * @see ReplicationMutableConfig#DESIGNATED_PRIMARY + */ + public static final String ELECTIONS_PRIMARY_RETRIES = + EnvironmentParams.REP_PARAM_PREFIX + "electionsPrimaryRetries"; + + /** + * The time interval between rebroadcasts of election results by the master + * node to all nodes not currently connected to it. These rebroadcasts help + * ensure that a replication group is fully restored after a network + * partition, by permitting nodes on either side of the resolved partition + * to catch up with the latest election results. + *

        + * A network partition, may in some circumstances, result in a node + * continuing to think it is the master, even though it is on the side of + * the partition containing a minority of electable nodes, and the side + * with the majority has elected a new master. Rebroadcasting election + * results on a periodic basis ensures that the obsolete master is brought + * up to date after the network partition has been resolved. As a result of + * the update, the environment at the obsolete master will transition into + * a replica state. + *

        + * Decreasing the period will result in more frequent broadcasts and thus a + * faster return to normal operations after a network partition has been + * resolved. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration}No1 min1 snone
        + *

        + */ + public static final String ELECTIONS_REBROADCAST_PERIOD = + EnvironmentParams.REP_PARAM_PREFIX + "electionsRebroadcastPeriod"; + + /** + * In rare cases, a node may need to rollback committed transactions in + * order to rejoin a replication group. This parameter limits the number of + * durable transactions that may be rolled back. Durable transactions are + * transactions that were successfully committed with a durability + * requiring acknowledgments from at least a simple majority of nodes. If + * the number of durable committed transactions targeted for rollback + * exceeds this parameter, a {@link RollbackProhibitedException} will be + * thrown. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo100Integer.MAX_VALUE
        + *

        + * + * @see RollbackProhibitedException + */ + public static final String TXN_ROLLBACK_LIMIT = + EnvironmentParams.REP_PARAM_PREFIX + "txnRollbackLimit"; + + /** + * In rare cases, a node may need to rollback committed transactions in + * order to rejoin a replication group. If this parameter is set to true + * and a rollback is necessary to rejoin the group, a {@link + * RollbackProhibitedException} will be thrown. + * + *

        Unlike setting {@link #TXN_ROLLBACK_LIMIT} to zero, setting this + * parameter to true disables the rollback without regard to whether the + * transactions to roll back are considered durable.

        + * + *

        Setting {@code TXN_ROLLBACK_DISABLED} to true should not be + * necessary for most applications. Its intended purpose is for the rare + * application that needs manual control over rollback of all transactions, + * including transactions that are not considered to be durable.

        + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNoFalse
        + */ + public static final String TXN_ROLLBACK_DISABLED = + EnvironmentParams.REP_PARAM_PREFIX + "txnRollbackDisabled"; + + /** + * A heartbeat is exchanged between the feeder and replica to ensure they + * are alive. This is the timeout associated with the heartbeat on the + * feeder side of the connection. + *

        + * Reducing this value enables the master to discover failed Replicas, and + * recycle feeder connections, faster. However, it increases the chances of + * false timeouts, if the network is experiencing transient problems, or + * the Java GC is responsible for long pauses. In the latter case, it's + * generally better to tune the GC to avoid such pauses. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No30 s2 s-none-
        + *

        + * + * @see Time Duration + * Properties + * @since 4.0.100 + */ + public static final String FEEDER_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "feederTimeout"; + + /** + * A heartbeat is exchanged between the feeder and replica to ensure they + * are alive. This is the timeout associated with the heartbeat on the + * replica side of the connection. + *

        + * Reducing the value means that a master failure will be discovered more + * promptly in some circumstances and the overall time needed to failover + * to a new master will be reduced. However, it increases the chances of + * false timeouts, if the network is experiencing transient problems, or + * the Java GC is responsible for long pauses. In the latter case, it's + * generally better to tune the GC to avoid such pauses. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * No30 s2 s-none-
        + *

        + * + * @see Time Duration + * Properties + * @since 4.0.100 + */ + public static final String REPLICA_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "replicaTimeout"; + + /** + * The size of the the TCP receive buffer associated with the socket used + * by the replica to transfer the replication stream. + *

        + * Larger values help handle incoming network traffic even when the replica + * has been paused for a garbage collection. The parameter default value of + * 1 MB should be sufficient in most of the environments. Consider + * increasing the value if network monitoring shows packet loss, or if your + * JE environment contains large data values. Note that if the size + * specified is larger than the operating system constrained maximum, it + * will be limited to this maximum value. For example, on Linux you may + * need to set the kernel parameter: net.core.rmem_max property using the + * command: sysctl -w net.core.rmem_max=1048576 to increase the + * operating system imposed limit. + *

        + * A parameter value of zero will result in the use of operating system + * specified default socket buffer size. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo10485760-none-
        + *

        + * + * @since 5.0.37 + */ + public static final String REPLICA_RECEIVE_BUFFER_SIZE = + EnvironmentParams.REP_PARAM_PREFIX + "replicaReceiveBufferSize"; + + /** + * The maximum number of transactions that can be grouped to amortize the + * cost of an fsync when a transaction commits with SyncPolicy#SYNC on the + * Replica. A value of zero effectively turns off the group commit + * optimization. + *

        + * Specifying larger values can result in more transactions being grouped + * together decreasing average commit times. + *

        + * An fsync is issued if the size of the transaction group reaches the + * maximum within the time period specified by + * {@link #REPLICA_GROUP_COMMIT_INTERVAL}. + *

        + * The {@link + * ReplicatedEnvironmentStats#getNReplayGroupCommitMaxExceeded()} + * statistic may be used to tune this parameter. Large values indicate that + * commit throughput could be improved by increasing the current value. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo2000-none-
        + *

        + * + * @since 5.0.76 + * @see #REPLICA_GROUP_COMMIT_INTERVAL + */ + public static final String REPLICA_MAX_GROUP_COMMIT = + EnvironmentParams.REP_PARAM_PREFIX + "replicaMaxGroupCommit"; + + /** + * The time interval during which transactions may be grouped to amortize + * the cost of fsync when a transaction commits with SyncPolicy#SYNC on the + * Replica. This parameter is only meaningful if the + * {@link #REPLICA_MAX_GROUP_COMMIT group commit size} is greater than one. + *

        + * The first (as ordered by transaction serialization) transaction in a + * transaction group may be delayed by at most this amount. Subsequent + * transactions in the group will have smaller delays since they are later + * in the serialization order. + *

        + * The {@link ReplicatedEnvironmentStats#getNReplayGroupCommitTimeouts()} + * statistic may be used to tune this parameter. Large numbers of timeouts + * in conjunction with large numbers of group commits ( + * {@link ReplicatedEnvironmentStats#getNReplayGroupCommits()}) indicate + * that commit throughput could be improved by increasing the time + * interval. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No3 ms0-none-
        + *

        + * + * @since 5.0.76 + * @see #REPLICA_MAX_GROUP_COMMIT + */ + public static final String REPLICA_GROUP_COMMIT_INTERVAL = + EnvironmentParams.REP_PARAM_PREFIX + "replicaGroupCommitInterval"; + + /** + * The maximum amount of time for the internal housekeeping, like + * elections, syncup with the master, etc. to be accomplished when opening + * a new handle to an environment. + *

        + * This timeout does not encompass the time spent making the node + * consistent with the master, if it is a Replica. The timeout associated + * with making a replica consistent is normally determined by the + * {@link #ENV_CONSISTENCY_TIMEOUT} parameter but can be overridden by the + * timeout associated with the ReplicaConsistencyPolicy if a + * consistencyPolicy argument was supplied to the handle + * constructor. + *

        + * Note that the default value (10 hours) is a long time to allow for cases + * where elections may take a long time when other nodes are not available. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}DurationNo10 h-none--none-
        + *

        + * + * @see Time Duration + * Properties + */ + public static final String ENV_SETUP_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "envSetupTimeout"; + + /** + * @hidden + * @deprecated + * + * For internal use only. + * + * When set to true, it permits opening of a + * ReplicatedEnvironment handle in the {@link + * ReplicatedEnvironment.State#UNKNOWN} state, if a Master could not be + * determined within the timeout specified by {@link + * ReplicationConfig#ENV_SETUP_TIMEOUT}. If it's false, an + * UnknownMasterException exception is thrown upon expiration of + * the timeout. + *

        + * A ReplicatedEnvironment handle in the {@link + * ReplicatedEnvironment.State#UNKNOWN} state can only be used to initiate + * read operations with an appropriately relaxed {@link + * NoConsistencyRequiredPolicy}; write operations will fail with a + * ReplicaWriteException. The handle will transition to + * a Master or Replica state when it can contact + * a sufficient number of other nodes in the replication group. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNoFalse
        + */ + @Deprecated + public static final String ALLOW_UNKNOWN_STATE_ENV_OPEN = + EnvironmentParams.REP_PARAM_PREFIX + "allowUnknownStateEnvOpen"; + + /** + * Permits opening of a ReplicatedEnvironment handle in the + * {@link ReplicatedEnvironment.State#UNKNOWN} state, if a Master cannot be + * determined within this timeout period. For the timeout to be meaningful + * it must be less than {@link #ENV_SETUP_TIMEOUT}. This parameter is + * ignored when creating a replicated environment for the first time. + *

        + * A ReplicatedEnvironment handle in the + * {@link ReplicatedEnvironment.State#UNKNOWN} state can only be used to + * initiate read operations with an appropriately relaxed, e.g. + * {@link NoConsistencyRequiredPolicy}; write operations will fail with a + * {@link ReplicaWriteException}. The handle will transition to a + * {@code Master} or {@code Replica} state when it can contact a + * sufficient number of other nodes in the replication group. + *

        + * If the parameter is set to zero, and an election cannot be concluded + * within the timeout defined by {@link #ENV_SETUP_TIMEOUT}, the + * ReplicatedEnvironment constructor will throw {@link + * UnknownMasterException}. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * @since 5.0.33 + */ + public static final String ENV_UNKNOWN_STATE_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "envUnknownStateTimeout"; + + /** + * When set to true, which is currently the default, the + * replication network protocol will use the JVM platform default charset + * (text encoding) for node names and host names. This is incorrect, in + * that it requires that the JVM for all nodes in a replication group have + * the same default charset. + *

        + * When this parameter is set to false, the UTF-8 charset is + * always used in the replication protocol. In other words, the JVM + * default charset has no impact on the replication protocol. + *

        + * An application is not impacted by this issue, and does not need + * to set this parameter, if it has the following characteristics. + *

          + *
        • The default charset on all JVMs is UTF-8 or ASCII, or
        • + *
        • all node names and host names contain only ASCII characters, and + * the default charset on all JVMs is a superset of ASCII.
        • + *
        + *

        + * In JE 5.1, the default value for this parameter will be changed to + * false. In preparation for this, impacted applications should explicitly + * set the parameter to false at the next available opportunity. For + * applications not yet deployed, this should be done now. For deployed + * applications, a hot upgrade may not be performed when changing the + * parameter. Instead, a cold upgrade must be performed: all nodes must + * be stopped and upgraded before bringing them up again. In other words, + * for impacted applications the value of this configuration parameter must + * be the same for all running nodes in a replication group. + *

        + * Note that the default charset issue applies only to the replication + * network protocol and not to stored data of any kind. + *

        + *

        NameTypeMutableDefaultMinimumMaximum
        {@value}DurationNo0-none-ENV_SETUP_TIMEOUT
        + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNoTrue
        + */ + public static final String PROTOCOL_OLD_STRING_ENCODING = + EnvironmentParams.REP_PARAM_PREFIX + "protocolOldStringEncoding"; + + /** + * @hidden + * + * For internal use only. + * + * When set to true the ReplicatedEnvironment will + * be used by the Arbiter. + * *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNoFalse
        + */ + public static final String ARBITER_USE = + EnvironmentParams.REP_PARAM_PREFIX + "arbiterUse"; + + /** + * The size of the the queue used to hold commit records that the Feeder + * uses to request acknowledgment from an Arbiter. + *

        + * An entry is attempted to be put on the queue. If it cannot be done within + * a certain amount of time, the transaction will fail due to insufficient + * acks. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerNo40960-none-
        + *

        + * + */ + public static final String ARBITER_OUTPUT_QUEUE_SIZE = + EnvironmentParams.REP_PARAM_PREFIX + "arbiterOutputQueueSize"; + + /** + * @hidden + * For internal use, to allow null as a valid value for the config + * parameter. + */ + public static final ReplicationConfig DEFAULT = + new ReplicationConfig(); + + /* Support conversion of a non-replicated environment to replicated. */ + private boolean allowConvert = false; + + /* + * The ReplicationNetworkConfig portion of the overall replication + * configuration. + * + * This field should be typed as ReplicationNetworkConfig, but because this + * class is serializable and field is not transient, javadoc wants to + * describe it as part of the javadoc output, but the fact that the type is + * hidden causes javadoc to encounter a NullPointerException. As a + * temporary measure, this class is typed as Object in order to avoid + * the javadoc error. When ReplicationNetworkConfig becomes public, the + * type of the field should be changed to ReplicationNetworkConfig. This + * will not cause any problems with cross-release serialization as long as + * we take care that only ReplicationNetworkConfig instances are assigned + * to this field. + */ + private Object repNetConfig = ReplicationNetworkConfig.createDefault(); + + /* A ProgressListener for tracking this node's syncups. */ + private transient + ProgressListener syncupProgressListener; + + private transient LogFileRewriteListener logRewriteListener; + + private transient FeederFilter feederFilter; + + private transient StreamAuthenticator authenticator; + + /** + * Creates a ReplicationConfig initialized with the system default + * settings. Defaults are documented with the string constants in this + * class. + */ + public ReplicationConfig() { + super(); + } + + /** + * Creates a ReplicationConfig initialized with the system default + * settings and the specified group name, node name, and hostname/port + * values. + * + *

        Note that the node name is immutable. Normally the host name should + * not be used as the node name, unless you intend to reuse the host + * name when a machine fails and is replaced, or the node is upgraded to + * new hardware.

        + * + * @param groupName the name for the replication group + * @param nodeName the name for this node + * @param hostPort the hostname and port for this node + * + * @see #setGroupName + * @see #setNodeName + */ + public ReplicationConfig(String groupName, + String nodeName, + String hostPort) { + super(); + setGroupName(groupName); + setNodeName(nodeName); + setNodeHostPort(hostPort); + } + + /** + * Creates a ReplicationConfig which includes the properties specified in + * the properties parameter. + * + * @param properties Supported properties are described as the string + * constants in this class. + * + * @throws IllegalArgumentException If any properties read from the + * properties parameter are invalid. + */ + public ReplicationConfig(Properties properties) + throws IllegalArgumentException { + + super(properties, true /* validateParams */); + propagateRepNetProps(); + } + + /** + * Internal use only, from RepConfigManager. + */ + ReplicationConfig(Properties properties, boolean validateParams) + throws IllegalArgumentException { + + super(properties, validateParams); + propagateRepNetProps(); + } + + /** + * Gets the name associated with the replication group. + * + * @return the name of this replication group. + */ + public String getGroupName() { + return DbConfigManager.getVal(props, RepParams.GROUP_NAME); + } + + /** + * Sets the name for the replication group. + *

        + * The name should consist of letters, digits, and/or hyphen ("-"), + * underscore ("_"), or period ("."). + * + * @param groupName the string representing the name + * + * @return this + * + * @throws IllegalArgumentException If the string name is not valid + */ + public ReplicationConfig setGroupName(String groupName) + throws IllegalArgumentException { + + setGroupNameVoid(groupName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setGroupNameVoid(String groupName) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.GROUP_NAME, groupName, + validateParams); + } + + /** + * For internal use only. + * + * Returns a boolean that specifies if we need to convert the existing logs + * to replicated format. + * + * @return true if we want to convert the existing logs to replicated + * format + */ + boolean getAllowConvert() { + return allowConvert; + } + + /** + * For internal use only. + * + * If set to true, this environment should be converted to replicated + * format. + * + * @param allowConvert if true, this environment should be converted to + * replicated format. + */ + void setAllowConvert(boolean allowConvert) { + this.allowConvert = allowConvert; + } + + /** + * Returns the unique name associated with this node. + * + * @return the node name + */ + public String getNodeName() { + return DbConfigManager.getVal(props, RepParams.NODE_NAME); + } + + /** + * Sets the name to be associated with this node. It must be unique within + * the group. When the node is instantiated and joins the replication + * group, a check is done to ensure that the name is unique, and a {@link + * RestartRequiredException} is thrown if it is not. + *

        + * The name should consist of letters, digits, and/or hyphen ("-"), + * underscore ("_"), or period ("."). + * + *

        Note that the node name is immutable. Normally the host name should + * not be used as the node name, unless you intend to reuse the host + * name when a machine fails and is replaced, or the node is upgraded to + * new hardware.

        + * + * @param nodeName the node name for this replicated environment. + * + * @return this + * @throws IllegalArgumentException If the name is not valid + */ + public ReplicationConfig setNodeName(String nodeName) + throws IllegalArgumentException { + + setNodeNameVoid(nodeName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeNameVoid(String nodeName) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.NODE_NAME, nodeName, + validateParams); + } + + /** + * Returns the {@link NodeType} of this node. + * + * @return the node type + */ + public NodeType getNodeType() { + return RepParams.NODE_TYPE.getEnumerator + (DbConfigManager.getVal(props, RepParams.NODE_TYPE)); + } + + /** + * Sets the type of this node. + * + * @param nodeType the node type + * + * @return this + */ + public ReplicationConfig setNodeType(NodeType nodeType){ + setNodeTypeVoid(nodeType); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeTypeVoid(NodeType nodeType){ + DbConfigManager.setVal + (props, RepParams.NODE_TYPE, nodeType.name(), validateParams); + } + + /** + * Returns the hostname and port associated with this node. The hostname + * and port combination are denoted by a string of the form: + *
        +     *   hostname:port
        +     * 
        + * @return the hostname and port string. + * + * @see ReplicationConfig#NODE_HOST_PORT + */ + public String getNodeHostPort() { + return DbConfigManager.getVal(props, RepParams.NODE_HOST_PORT); + } + + /** + * Sets the hostname and port associated with this node. The hostname + * and port combination are denoted by a string of the form: + *
        +     *  hostname[:port]
        +     * 
        + * The port must be outside the range of "Well Known Ports" + * (zero through 1023). + * + * @param hostPort the string containing the hostname and port as above. + * + * @return this + * + * @see ReplicationConfig#NODE_HOST_PORT + */ + public ReplicationConfig setNodeHostPort(String hostPort) { + setNodeHostPortVoid(hostPort); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeHostPortVoid(String hostPort) { + DbConfigManager.setVal(props, RepParams.NODE_HOST_PORT, hostPort, + validateParams); + } + + /** + * Returns the configured replica timeout value. + * + * @return the timeout in milliseconds + */ + public long getReplicaAckTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal + (props, RepParams.REPLICA_ACK_TIMEOUT, unit); + } + + /** + * Set the replica commit timeout. + * + * @param replicaAckTimeout time in milliseconds + * + * @return this + */ + public ReplicationConfig setReplicaAckTimeout(long replicaAckTimeout, + TimeUnit unit) { + setReplicaAckTimeoutVoid(replicaAckTimeout, unit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReplicaAckTimeoutVoid(long replicaAckTimeout, + TimeUnit unit) { + DbConfigManager.setDurationVal + (props, RepParams.REPLICA_ACK_TIMEOUT, replicaAckTimeout, unit, + validateParams); + } + + /** + * Returns the maximum acceptable clock skew between this Replica and its + * Feeder, which is the node that is the source of its replication stream. + * + * @return the max permissible clock skew + */ + public long getMaxClockDelta(TimeUnit unit) { + return DbConfigManager.getDurationVal(props, RepParams.MAX_CLOCK_DELTA, + unit); + } + + /** + * Sets the maximum acceptable clock skew between this Replica and its + * Feeder, which is the node that is the source of its replication + * stream. This value is checked whenever a Replica establishes a + * connection to its replication stream source. The connection is abandoned + * if the clock skew is larger than this value. + * + * @param maxClockDelta the maximum acceptable clock skew + * + * @return this + * + * @throws IllegalArgumentException if the value is not a positive integer + */ + public ReplicationConfig setMaxClockDelta(long maxClockDelta, + TimeUnit unit) + throws IllegalArgumentException { + + setMaxClockDeltaVoid(maxClockDelta, unit); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxClockDeltaVoid(long maxClockDelta, TimeUnit unit) + throws IllegalArgumentException { + + DbConfigManager.setDurationVal(props, RepParams.MAX_CLOCK_DELTA, + maxClockDelta, unit, validateParams); + } + + /** + * Sets the consistency policy to be associated with the configuration. + * This policy acts as the default policy used to govern the consistency + * requirements when starting new transactions. See the {@link overview on + * consistency in replicated systems} for more background. + *

        + * @param policy the consistency policy to be set for this config. + * + * @return this + */ + public ReplicationConfig + setConsistencyPolicy(ReplicaConsistencyPolicy policy) { + + setConsistencyPolicyVoid(policy); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setConsistencyPolicyVoid(ReplicaConsistencyPolicy policy) { + + DbConfigManager.setVal(props, + RepParams.CONSISTENCY_POLICY, + RepUtils.getPropertyString(policy), + validateParams); + } + + /** + * Returns the default consistency policy associated with the + * configuration. + *

        + * If the user does not set the default consistency policy through {@link + * ReplicationConfig#setConsistencyPolicy}, the system will use the policy + * defined by {@link ReplicationConfig#CONSISTENCY_POLICY}. + * + * @return the consistency policy currently associated with this config. + */ + @Override + public ReplicaConsistencyPolicy getConsistencyPolicy() { + String propertyValue = + DbConfigManager.getVal(props, + RepParams.CONSISTENCY_POLICY); + return RepUtils.getReplicaConsistencyPolicy(propertyValue); + } + + @Override + public ReplicationConfig setConfigParam(String paramName, String value) + throws IllegalArgumentException { + + if (ReplicationNetworkConfig.getRepNetPropertySet(). + contains(paramName)) { + getRepNetConfig().setConfigParam(paramName, value); + } else { + DbConfigManager.setConfigParam(props, + paramName, + value, + false, /* require mutability. */ + validateParams, + true, /* forReplication */ + true); /* verifyForReplication */ + } + return this; + } + + /** + * @hidden SSL deferred + * Get the replication service net configuration associated with + * this ReplicationConfig. + */ + public ReplicationNetworkConfig getRepNetConfig() { + return (ReplicationNetworkConfig) repNetConfig; + } + + /** + * @hidden SSL deferred + * Set the replication service net configuration associated with + * this ReplicationConfig. + * + * @param netConfig the new ReplicationNetworkConfig to be associated + * with this ReplicationConfig. This must not be null. + * + * @throws IllegalArgumentException if the netConfig is null + */ + public ReplicationConfig setRepNetConfig( + ReplicationNetworkConfig netConfig) { + + setRepNetConfigVoid(netConfig); + return this; + } + + /** + * @hidden + * For bean editors + */ + public void setRepNetConfigVoid(ReplicationNetworkConfig netConfig) { + if (netConfig == null) { + throw new IllegalArgumentException("netConfig may not be null"); + } + repNetConfig = netConfig; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public ReplicationConfig clone() { + try { + ReplicationConfig copy = (ReplicationConfig) super.clone(); + copy.setRepNetConfig(getRepNetConfig().clone()); + return copy; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * @hidden + * For use by this class and by ReplicatedEnvironment.setupRepConfig() + * Moves any properties that belong to ReplicationNetworkConfig to + * repNetConfig. + * This is intended to be called after a bulk property load. + */ + void propagateRepNetProps() { + + /* If there is no current RepNetConfig, simply adopt the new config. */ + final ReplicationNetworkConfig rnConfig = getRepNetConfig(); + if (rnConfig == null) { + setRepNetConfig(ReplicationNetworkConfig.create(props)); + return; + } + + /* + * Construct a new properties set that includes both the properties + * that we hold directly, plus any properties stored on an existing + * repNetConfig object. Our properties will override those on + * repNetConfig. + */ + final Properties combProps = new Properties(rnConfig.getProps()); + for (String propName : props.stringPropertyNames()) { + combProps.setProperty(propName, props.getProperty(propName)); + } + + /* + * Create a new ReplicationNetworkConfig instance based on the combined + * properties. + */ + ReplicationNetworkConfig newRepNetConfig = + ReplicationNetworkConfig.create(combProps); + + /* + * If the type of the config object did not change, there may be + * non-property fields that should be retained from the original, + * so use the orignal object and just change the properties. + */ + if (newRepNetConfig.getClass() == repNetConfig.getClass()) { + rnConfig.applyRepNetProperties(combProps); + } else { + setRepNetConfig(newRepNetConfig); + } + } + + /** + * @hidden + * + * For internal use only: Internal convenience method. + * + * Returns the set of sockets associated with helper nodes. This method + * should only be used when the configuration object is known to have an + * authoritative value for the helper hosts values. In a replication node, + * the je.properties file may override the values in this configuration + * object. + * + * @return the set of helper sockets, returns an empty set if there are no + * helpers. + */ + public Set getHelperSockets() { + return HostPortPair.getSockets(getHelperHosts()); + } + + /** + * @hidden + * Internal convenience methods for returning replication sockets. + * + * This method should only be used when the configuration object is known + * to have an authoritative value for its socket value. In a replication + * node, the je.properties file may override the values in this + * configuration object. + */ + public InetSocketAddress getNodeSocketAddress() { + + return new InetSocketAddress(getNodeHostname(), getNodePort()); + } + + /** + * Returns the hostname component of the nodeHost property. + * + * @return the hostname string + */ + public String getNodeHostname() { + String hostAndPort = + DbConfigManager.getVal(props, RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + + return (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : hostAndPort; + } + + /** + * Returns the port component of the nodeHost property. + * + * @return the port number + */ + public int getNodePort() { + String hostAndPort = + DbConfigManager.getVal(props, RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + + String portString = (colonToken >= 0) ? + hostAndPort.substring(colonToken + 1) : + DbConfigManager.getVal(props, RepParams.DEFAULT_PORT); + + return Integer.parseInt(portString) ; + } + + /** + * Configure the environment to make periodic calls to a {@link + * ProgressListener} to provide feedback on replication stream sync-up. + * The ProgressListener.progress() method is called at different stages of + * the syncup process. See {@link SyncupProgress} for information about + * those stages. + *

        + * When using progress listeners, review the information at {@link + * ProgressListener#progress} to avoid any unintended disruption to + * replication stream syncup. + * @param progressListener The ProgressListener to callback during + * environment instantiation (syncup). + * @see Replication Group Life Cycle + * @since 5.0 + */ + public ReplicationConfig setSyncupProgressListener + (final ProgressListener progressListener) { + setSyncupProgressListenerVoid(progressListener); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSyncupProgressListenerVoid + (final ProgressListener progressListener) { + this.syncupProgressListener = progressListener; + } + + /** + * Return the ProgressListener to be used at this environment startup. + */ + public ProgressListener getSyncupProgressListener() { + return syncupProgressListener; + } + + /** + * @hidden + * Installs a callback to be notified when JE is about to modify previously + * written log files. + */ + public ReplicationConfig setLogFileRewriteListener + (final LogFileRewriteListener listener) { + setLogFileRewriteListenerVoid(listener); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLogFileRewriteListenerVoid(final LogFileRewriteListener l) { + logRewriteListener = l; + } + + /** @hidden */ + public LogFileRewriteListener getLogFileRewriteListener() { + return logRewriteListener; + } + + /** + * @hidden + * + * Configures a filter object that is transmitted to the remote Feeder as + * part of the replica feeder syncup. The remote feeder then invokes this + * filter on each record before it sends the record to the replica. The + * filter can be used to filter out replication records at the feeder + * itself and this eliminate the feeder to replica transmission overhead + * for records in which it has no interest. + * + */ + public ReplicationConfig setFeederFilter(final FeederFilter feederFilter) { + setFeederFilterVoid(feederFilter); + return this; + } + + /** @hidden */ + public void setFeederFilterVoid(final FeederFilter feederFilter) { + this.feederFilter = feederFilter; + } + + /** @hidden */ + public FeederFilter getFeederFilter() { + return feederFilter; + } + + /** + * @hidden + * + * Sets the feeder authenticator. + * + * @param authenticator the feeder authenticator + */ + public ReplicationConfig setAuthenticator( + final StreamAuthenticator authenticator) { + + setAuthenticatorVoid(authenticator); + return this; + } + + /** + * @hidden + * + * Sets the feeder authenticator + * + * @param authenticator feeder authenticator + */ + public void setAuthenticatorVoid(final StreamAuthenticator authenticator) { + this.authenticator = authenticator; + } + + /** + * @hidden + * + * Returns feeder authenticator + * + * @return feeder authenticator, null if no feeder authenticator is + * available. + */ + public StreamAuthenticator getAuthenticator() { + return authenticator; + } + + /** + * @hidden + * For internal use only. + * + * Performs the checks need to ensure that this is a valid replicated + * environment configuration. This method must only be invoked after all + * the appropriate fields are set. + */ + public void verify() throws IllegalArgumentException { + if ((getGroupName() == null) || "".equals(getGroupName())) { + throw new IllegalArgumentException("Missing group name"); + } + + if ((getNodeName() == null) || "".equals(getNodeName())){ + throw new IllegalArgumentException("Missing node name"); + } + + if ((getNodeHostPort() == null) || "".equals(getNodeHostPort())) { + throw new IllegalArgumentException("Missing node host"); + } + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationConfigBeanInfo.java b/src/com/sleepycat/je/rep/ReplicationConfigBeanInfo.java new file mode 100644 index 0000000..75d7c55 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationConfigBeanInfo.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class ReplicationConfigBeanInfo + extends ReplicationMutableConfigBeanInfo { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(ReplicationConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(ReplicationConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationGroup.java b/src/com/sleepycat/je/rep/ReplicationGroup.java new file mode 100644 index 0000000..444abce --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationGroup.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.rep.impl.RepGroupImpl; + +/** + * An administrative view of the collection of nodes that form the replication + * group. Can be obtained from a {@link ReplicatedEnvironment} or a {@link + * com.sleepycat.je.rep.util.ReplicationGroupAdmin}. + */ +public class ReplicationGroup { + + /* All methods delegate to the group implementation. */ + final RepGroupImpl repGroupImpl; + + /** + * @hidden + * For internal use only + * Used to wrap the actual group object implementation. + */ + public ReplicationGroup(RepGroupImpl repGroupImpl) { + this.repGroupImpl = repGroupImpl; + } + + /** + * Returns the name associated with the group. + * + * @return the name of the replication group. + */ + public String getName() { + return repGroupImpl.getName(); + } + + /** + * Returns the set of all nodes in the group. The return value includes + * ELECTABLE, MONITOR, and SECONDARY nodes. + * + *

        Note that SECONDARY nodes will only be included in the result when + * this method is called for a replicated environment that is the master. + * + * @return the set of all nodes + * @see NodeType + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public Set getNodes() { + final Set result = new HashSet(); + repGroupImpl.includeMembers(null, result); + return result; + } + + /** + * Returns the subset of nodes in the group with replicated environments + * that participate in elections and can become masters, ignoring node + * priority. The return value includes ELECTABLE nodes, and excludes + * MONITOR and SECONDARY nodes. + * + * @return the set of electable nodes + * @see NodeType + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public Set getElectableNodes() { + final Set result = new HashSet(); + repGroupImpl.includeElectableMembers(result); + return result; + } + + /** + * Returns the subset of nodes in the group with replicated environments + * that do not participate in elections and cannot become masters. The + * return value includes SECONDARY nodes, and excludes ELECTABLE and + * MONITOR nodes. + * + *

        Note that SECONDARY nodes will only be returned when this method is + * called for a replicated environment that is the master. + * + * @return the set of secondary nodes + * @see NodeType + * @since 6.0 + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public Set getSecondaryNodes() { + final Set result = new HashSet(); + repGroupImpl.includeSecondaryMembers(result); + return result; + } + + /** + * Returns the subset of nodes in the group that monitor group membership + * but do not maintain replicated environments. The return value includes + * MONITOR nodes, but excludes ELECTABLE and SECONDARY nodes. + * + * @return the set of monitor nodes + * @see NodeType + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public Set getMonitorNodes() { + final Set result = new HashSet(); + repGroupImpl.includeMonitorMembers(result); + return result; + } + + /** + * Returns the subset of nodes in the group that store replication data. + * The return value includes all ELECTABLE and SECONDARY nodes, but + * excludes MONITOR nodes. + * + *

        Note that SECONDARY nodes will only be included in the result when + * this method is called for a replicated environment that is the master. + * + * @return the set of data nodes + * @see NodeType + * @since 6.0 + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public Set getDataNodes() { + final Set result = new HashSet(); + repGroupImpl.includeDataMembers(result); + return result; + } + + /** + * Returns the subset of nodes in the group that participates in elections + * but does not have a copy of the data and cannot become a master. + * The return value includes ARBITER nodes. + * + * @return the set of arbiter nodes + * @see NodeType + */ + public Set getArbiterNodes() { + final Set result = new HashSet(); + repGroupImpl.includeArbiterMembers(result); + return result; + } + + /** + * Get administrative information about a node by its node name. + * + *

        Note that SECONDARY nodes will only be returned when this method is + * called for a replicated environment that is the master. + * + * @param nodeName the node name to be used in the lookup + * + * @return an administrative view of the node associated with nodeName, or + * null if there isn't such a node currently in the group + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public ReplicationNode getMember(String nodeName) { + return repGroupImpl.getMember(nodeName); + } + + /** + * @hidden + * Internal use only. + * + * Returns the underlying group implementation object. + */ + public RepGroupImpl getRepGroupImpl() { + return repGroupImpl; + } + + /** + * Returns a formatted version of the information held in a + * ReplicationGroup. + */ + @Override + public String toString() { + return repGroupImpl.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationMutableConfig.java b/src/com/sleepycat/je/rep/ReplicationMutableConfig.java new file mode 100644 index 0000000..1a85884 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationMutableConfig.java @@ -0,0 +1,722 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.Serializable; +import java.util.Enumeration; +import java.util.Properties; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; + +/** + * Specifies the attributes that may be changed after a {@link + * ReplicatedEnvironment} has been created. {@code ReplicationMutableConfig} is + * a parameter to {@link ReplicatedEnvironment#setMutableConfig} and is + * returned by {@link ReplicatedEnvironment#getMutableConfig}. + */ +public class ReplicationMutableConfig implements Cloneable, Serializable { + private static final long serialVersionUID = 1L; + + /* + * Note: all replicated parameters should start with + * EnvironmentParams.REP_PARAMS_PREFIX, which is "je.rep.", + * see SR [#19080]. + */ + + /** + * Boolean flag if set to true, an Arbiter may acknowledge a transaction if + * a replication node is not available. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesTrue

        + */ + public static final String ALLOW_ARBITER_ACK = + EnvironmentParams.REP_PARAM_PREFIX + "allowArbiterAck"; + + /** + * Identifies the Primary node in a two node group. See the discussion of + * issues when + * {@link + * configuring two node groups} + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanYesFalse

        + */ + public static final String DESIGNATED_PRIMARY = + EnvironmentParams.REP_PARAM_PREFIX + "designatedPrimary"; + + /** + * An escape mechanism to modify the way in which the number of electable + * nodes, and consequently the quorum requirements for elections and commit + * acknowledgments, is calculated. The override is accomplished by + * specifying the quorum size via this mutable configuration parameter. + *

        + * When this parameter is set to a non-zero value at a member node, the + * member will use this value as the electable group size, instead of using + * the metadata stored in the RepGroup database for its quorum + * calculations. This parameter's value should be set to the number of + * electable nodes known to be available. The default value is zero, which + * indicates normal operation with the electable group size being + * calculated from the metadata. + *

        + * Please keep in mind that this is an escape mechanism, only for use in + * exceptional circumstances, to be used with care. Since JE HA is no + * longer maintaining quorum requirements automatically, there is the + * possibility that the simple majority of unavailable nodes could elect + * their own Master, which would result in a diverging set of changes to + * the same environment being made by multiple Masters. It is essential to + * ensure that the problematic nodes are in fact down before making this + * temporary configuration change. + * + * See the discussion in {@link Appendix: + * Managing a Failure of the Majority}. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}IntegerYes0
        + *

        + * + * @see QuorumPolicy + * @see com.sleepycat.je.Durability.ReplicaAckPolicy + */ + public static final String ELECTABLE_GROUP_SIZE_OVERRIDE = + EnvironmentParams.REP_PARAM_PREFIX + "electableGroupSizeOverride"; + + /** + * The election priority associated with this node. The election algorithm + * for choosing a new master will pick the participating node that has the + * most current set of log files. When there is a tie, the election + * priority is used as a tie-breaker to select amongst these nodes. + *

        + * A priority of zero is used to ensure that this node is never elected + * master, even if it has the most up to date log files. Note that the node + * still votes for a Master and participates in quorum requirements. Please + * use this option with care, since it means that some node with less + * current log files could be elected master. As a result, this node would + * be forced to rollback committed data and must be prepared to handle any + * {@link RollbackException} exceptions that might be thrown. + * + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntegerYes10Integer.MAX_VALUE
        + *

        + * + * @see RollbackException + */ + public static final String NODE_PRIORITY = + EnvironmentParams.REP_PARAM_PREFIX + "node.priority"; + + /** + * If true, JE HA (replication) will flush all committed transactions to + * disk at the specified time interval. This is of interest because the + * default durability for replicated transactions of {@link + * Durability#COMMIT_NO_SYNC}. The default for this behavior is true. + *

        + * When using {@link Durability#COMMIT_NO_SYNC}, continued activity will + * naturally cause the steady flush of committed transactions, but a pause + * in activity may cause the latest commits to stay in memory. In such a + * case, it is unlikely but possible that all members of the replication + * group have these last transactions in memory and that no members have + * persisted them to disk. A catastrophic failure of all nodes in the + * replication group would cause a loss of these transactions, in this + * unlikely scenario. This background flush task will reduce such a + * possibility. + *

        + * Note that enabling this feature when using {@link + * Durability#COMMIT_NO_SYNC}, does not constitute a guarantee that + * updates made by a transaction are persisted. For an explicit guarantee, + * transactions should use {@link Durability#COMMIT_SYNC} or {@link + * Durability#COMMIT_WRITE_NO_SYNC}. These more stringent, persistent + * Durability options can be set at the environment or per-transaction + * scope. Using one of these Durability settings for a given transaction + * will also flush all commits that occurred earlier in time. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}BooleanNotrue
        + *

        + * + * @deprecated as of 7.2. Log flushing can be disabled by setting {@link + * EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL} and {@link + * EnvironmentConfig#LOG_FLUSH_NO_SYNC_INTERVAL} to zero. For compatibility + * with earlier releases, if this parameter is specified as false, no log + * flushing will be performed; in this case, {@link + * EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL} and {@link + * EnvironmentConfig#LOG_FLUSH_NO_SYNC_INTERVAL} may not also be specified. + */ + public static final String RUN_LOG_FLUSH_TASK = + EnvironmentParams.REP_PARAM_PREFIX + "runLogFlushTask"; + + /** + * The interval that JE HA will do a log buffer flush. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * Yes5 min1 s-none-
        + *

        + * + * @see Time Duration + * Properties + * + * @deprecated as of 7.2. Replaced by {@link + * EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL}. For compatibility with + * earlier releases, if this parameter is specified its value will be used + * as the flush sync interval; in this case, {@link + * EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL} may not also be specified. + */ + public static final String LOG_FLUSH_TASK_INTERVAL = + EnvironmentParams.REP_PARAM_PREFIX + "logFlushTaskInterval"; + + /** + * The maximum number of most recently used database handles that + * are kept open during the replay of the replication stream. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}IntYes101-none-

        + * + * @since 5.0.38 + */ + public static final String REPLAY_MAX_OPEN_DB_HANDLES = + EnvironmentParams.REP_PARAM_PREFIX + "replayMaxOpenDbHandles"; + + /** + * The string identifying one or more helper host and port pairs in + * this format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringYes""

        + * @see ReplicationMutableConfig#setHelperHosts + * @see ReplicationMutableConfig#getHelperHosts + */ + public static final String HELPER_HOSTS = + EnvironmentParams.REP_PARAM_PREFIX + "helperHosts"; + + /** + * The maximum amount of time that an inactive database handle is kept open + * during a replay of the replication stream. Handles that are inactive for + * more than this time period are automatically closed. Note that this does + * not impact any handles that may have been opened by the application. + * + *

        + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value}{@link Duration}No30 sec1 sec-none-

        + * + * @see Time Duration + * Properties + * + * @since 5.0.38 + */ + public static final String REPLAY_DB_HANDLE_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "replayOpenHandleTimeout"; + + /** + * @hidden + * + * For internal use only. + * + * The timeout specifies the amount of time that the + * {@link com.sleepycat.je.rep.util.ReplicationGroupAdmin#transferMaster + * ReplicationGroupAdmin.transferMastership} command can use to + * have the specified replica catch up with the original master. + *

        + * If the replica has not successfully caught up with the original + * master, the call to {@link + * com.sleepycat.je.rep.util.ReplicationGroupAdmin#transferMaster + * ReplicationGroupAdmin.transferMastership} will throw an exception. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        NameTypeMutableDefaultMinimumMaximum
        {@value} + * {@link Duration} + * Yes100 s1 s-none-
        + *

        + * + * @see Time Duration + * Properties + */ + public static final String CATCHUP_MASTER_TIMEOUT = + EnvironmentParams.REP_PARAM_PREFIX + "catchupMasterTimeout"; + + static { + + /* + * Force loading when a ReplicationConfig is used with strings and + * an environment has not been created. + */ + @SuppressWarnings("unused") + ConfigParam forceLoad = RepParams.GROUP_NAME; + } + + /** + * @hidden + * Storage for replication related properties. + */ + protected Properties props; + + /* For unit testing only: only ever set false when testing. */ + transient boolean validateParams = true; + + /** + * Create a ReplicationMutableConfig initialized with the system + * default settings. Parameter defaults are documented with the string + * constants in this class. + */ + public ReplicationMutableConfig() { + props = new Properties(); + } + + /** + * Used by ReplicationConfig to support construction from a property file. + * @param properties Hold replication related properties + */ + ReplicationMutableConfig(Properties properties, boolean validateParams) + throws IllegalArgumentException { + + this.validateParams = validateParams; + validateProperties(properties); + /* For safety, copy the passed in properties. */ + props = new Properties(); + props.putAll(properties); + } + + /** + * Fills in the properties calculated by the environment to the given + * config object. + */ + void fillInEnvironmentGeneratedProps(RepImpl repImpl) { + props.put(RepParams.DESIGNATED_PRIMARY.getName(), + Boolean.toString(repImpl.isDesignatedPrimary())); + props.put(RepParams.NODE_PRIORITY.getName(), + Integer.toString(getNodePriority())); + } + + /** + * @hidden + * For internal use only + */ + public void copyMutablePropsTo(ReplicationMutableConfig toConfig) { + + Properties toProps = toConfig.props; + Enumeration propNames = props.propertyNames(); + while (propNames.hasMoreElements()) { + String paramName = (String) propNames.nextElement(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + assert param != null; + if (param.isForReplication() && + param.isMutable()) { + String newVal = props.getProperty(paramName); + toProps.setProperty(paramName, newVal); + } + } + } + + /** + * If {@code isPrimary} is true, designate this node as a Primary. This + * setting only takes effect for electable nodes. The application must + * ensure that exactly one electable node is designated to be a Primary at + * any given time. Primary node configuration is only a concern when the + * group has two electable nodes, and there cannot be a simple + * majority. See the overview on configuring two + * node groups. + * + * @param isPrimary true if this node is to be made the Primary + * + * @return this + */ + public ReplicationMutableConfig setDesignatedPrimary(boolean isPrimary) { + setDesignatedPrimaryVoid(isPrimary); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDesignatedPrimaryVoid(boolean isPrimary) { + DbConfigManager.setBooleanVal(props, RepParams.DESIGNATED_PRIMARY, + isPrimary, validateParams); + } + + /** + * Determines whether this node is the currently designated Primary. See + * the overview on {@link issues around + * two node groups} + * @return true if this node is a Primary, false otherwise. + */ + public boolean getDesignatedPrimary() { + return DbConfigManager.getBooleanVal(props, + RepParams.DESIGNATED_PRIMARY); + } + + /** + * Returns the value associated with the override. A value of zero means + * that the number of electable nodes is determined as usual, that is, from + * the contents of the group metadata. + * + * @return the number of electable nodes as specified by the override + * + * @see #ELECTABLE_GROUP_SIZE_OVERRIDE + */ + public int getElectableGroupSizeOverride() { + return DbConfigManager. + getIntVal(props, RepParams.ELECTABLE_GROUP_SIZE_OVERRIDE); + } + + /** + * Sets the size used to determine the number of electable nodes. + * + * @param override the number of electable nodes. A value of zero means + * that the number of electable nodes is determined as usual, that is, from + * the contents of the group metadata. + * + * @return this + * + * @see #ELECTABLE_GROUP_SIZE_OVERRIDE + */ + public ReplicationMutableConfig + setElectableGroupSizeOverride(int override) { + + setElectableGroupSizeOverrideVoid(override); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setElectableGroupSizeOverrideVoid(int override) { + + DbConfigManager. + setIntVal(props, RepParams.ELECTABLE_GROUP_SIZE_OVERRIDE, override, + validateParams); + } + + /** + * Returns the election priority associated with the node. + * + * @return the priority for this node + * + * @see #NODE_PRIORITY + */ + public int getNodePriority() { + return DbConfigManager.getIntVal(props, RepParams.NODE_PRIORITY); + } + + /** + * Sets the election priority for the node. The algorithm for choosing a + * new master will pick the participating node that has the most current + * set of log files. When there is a tie, the priority is used as a + * tie-breaker to select amongst these nodes. + *

        + * A priority of zero is used to ensure that a node is never elected + * master, even if it has the most current set of files. Please use this + * option with caution, since it means that a node with less current log + * files could be elected master potentially forcing this node to rollback + * data that had been committed. + * + * @param priority the priority to be associated with the node. It must be + * zero, or a positive number. + * + * @see #NODE_PRIORITY + */ + public ReplicationMutableConfig setNodePriority(int priority) { + setNodePriorityVoid(priority);; + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodePriorityVoid(int priority) { + DbConfigManager.setIntVal(props, RepParams.NODE_PRIORITY, priority, + validateParams); + } + + /** + * Returns the string identifying one or more helper host and port pairs in + * this format: + *

        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * The port name may be omitted if it's the default port. + * + * @return the string representing the host port pairs + * + */ + public String getHelperHosts() { + return DbConfigManager.getVal(props, RepParams.HELPER_HOSTS); + } + + /** + * Identify one or more helpers nodes by their host and port pairs in this + * format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * If the port is omitted, the default port defined by XXX is used. + * + * @param hostsAndPorts the string representing the host and port pairs. + * + * @return this + */ + public ReplicationMutableConfig setHelperHosts(String hostsAndPorts) { + setHelperHostsVoid(hostsAndPorts); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setHelperHostsVoid(String hostsAndPorts) { + DbConfigManager.setVal + (props, RepParams.HELPER_HOSTS, hostsAndPorts, validateParams); + } + + /** + * Set this configuration parameter with this value. Values are validated + * before setting the parameter. + * + * @param paramName the configuration parameter name, one of the String + * constants in this class + * @param value the configuration value. + * + * @return this; + * + * @throws IllegalArgumentException if the paramName or value is invalid. + */ + public ReplicationMutableConfig setConfigParam(String paramName, + String value) + throws IllegalArgumentException { + + DbConfigManager.setConfigParam(props, + paramName, + value, + true, /* require mutability. */ + validateParams, + true, /* forReplication */ + true); /* verifyForReplication */ + return this; + } + + /** + * Return the value for this parameter. + * @param paramName a valid configuration parameter, one of the String + * constants in this class. + * @return the configuration value. + * + * @throws IllegalArgumentException if the paramName is invalid. + */ + public String getConfigParam(String paramName) + throws IllegalArgumentException { + + return DbConfigManager.getConfigParam(props, paramName); + } + + /** + * Validate a property bag passed in a construction time. + */ + void validateProperties(Properties checkProps) + throws IllegalArgumentException { + + /* Check that the properties have valid names and values */ + Enumeration propNames = checkProps.propertyNames(); + while (propNames.hasMoreElements()) { + String name = (String) propNames.nextElement(); + /* Is this a valid property name? */ + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(name); + if (param == null) { + throw new IllegalArgumentException + (name + " is not a valid JE environment configuration"); + } + /* Is this a valid property value? */ + if (validateParams) { + param.validateValue(checkProps.getProperty(name)); + } + } + } + + /** + * @hidden + * For internal use only. + * Access the internal property bag, used during startup. + */ + public Properties getProps() { + return props; + } + + /** + * List the configuration parameters and values that have been set + * in this configuration object. + */ + @Override + public String toString() { + return props.toString(); + } + + /** + * For unit testing only + */ + void setOverrideValidateParams(boolean validateParams) { + this.validateParams = validateParams; + } + + /** + * @hidden + * For testing only + */ + public boolean getValidateParams() { + return validateParams; + } + + /** + * @hidden + * For internal use only. + * Overrides Object.clone() to clone all properties, used by this class and + * ReplicationConfig. + */ + @Override + protected Object clone() + throws CloneNotSupportedException { + + ReplicationMutableConfig copy = + (ReplicationMutableConfig) super.clone(); + copy.props = (Properties) props.clone(); + return copy; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationMutableConfigBeanInfo.java b/src/com/sleepycat/je/rep/ReplicationMutableConfigBeanInfo.java new file mode 100644 index 0000000..5700e9e --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationMutableConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +import com.sleepycat.util.ConfigBeanInfoBase; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class ReplicationMutableConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(ReplicationMutableConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(ReplicationMutableConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationNetworkConfig.java b/src/com/sleepycat/je/rep/ReplicationNetworkConfig.java new file mode 100644 index 0000000..9983913 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationNetworkConfig.java @@ -0,0 +1,611 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.Serializable; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.utilint.RepUtils; + +/** + * @hidden SSL deferred + * This is the root class for specifying the parameters that control + * replication network communication within a a replicated environment. The + * parameters contained here are immutable. + *

        + * To change the default settings for a replicated environment, an application + * creates a configuration object, customizes settings and uses it for {@link + * ReplicatedEnvironment} construction. Except as noted, the set methods of + * this class perform only minimal validation of configuration values when the + * method is called, and value checking is deferred until the time a + * DataChannel factory is constructed. An IllegalArgumentException is thrown + * if the value is not valid for that attribute. + *

        + * ReplicationNetworkConfig follows precedence rules similar to those of + * {@link EnvironmentConfig}. + *

          + *
        1. Configuration parameters specified + * in {@literal /je.properties} take first precedence.
        2. + *
        3. Configuration parameters set in the ReplicationNetworkConfig object used + * at {@code ReplicatedEnvironment} construction are next.
        4. + *
        5. Any configuration parameters not set by the application are set to + * system defaults, described along with the parameter name String constants + * in this class.
        6. + *
        + *

        + * + */ +public abstract class ReplicationNetworkConfig + implements Cloneable, Serializable { + + private static final long serialVersionUID = 1L; + + /* + * Note: all replicated parameters should start with + * EnvironmentParams.REP_PARAMS_PREFIX, which is "je.rep.", + * see SR [#19080]. + */ + + /* + * The following is currently undocumented: + * The channelType property may also take the value: + * custom + * custom indicates that the channel implementation is to be + * provided by the application. This can be done through the use of the + * combination of two configuration parameters + *

        +     *   {@link #CHANNEL_FACTORY_CLASS je.rep.channelFactoryClass}
        +     *   {@link #CHANNEL_FACTORY_PARAMS je.rep.channelFactoryParams}
        +     * 
        + */ + + /** + * Configures the type of communication channel to use. This property is + * not directly setable. It can be specified in a property file or + * property set passed to a create() method, or a direct instantiation of + * a class derived from this class may be used. When set through one of + * the create() methods or when read through the getChannelType() method, + * the valid values for this parameter are: + *
          + *
        • basic
        • + *
        • ssl
        • + *
        + * + * basic is the standard implementation, which uses ordinary, + * unencrypted communication, and is represented by this the + * {@link ReplicationBasicConfig} class. + *

        + * ssl indicates that SSL is to be used for service + * communication. When using SSL, an instance of + * {@link ReplicationSSLConfig} must be used. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"basic"

        + */ + public static final String CHANNEL_TYPE = + EnvironmentParams.REP_PARAM_PREFIX + "channelType"; + + /** + * @hidden + * A string identifying a class to instantiate as the data channel + * factory. Typical product use does not require this configuration + * parameter, but this allows a custom data channel factory to be supplied. + * If supplied, it must be a fully qualified Java class name for a class + * that implements the {@link DataChannelFactory} interface and + * provides a public constructor with an argument list of the form + * ( {@link InstanceParams} ) + *

        + * Note: Setting this class instantiated from this parameter must + * be of the same configuration type as indicated by channelType(). + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String CHANNEL_FACTORY_CLASS = + EnvironmentParams.REP_PARAM_PREFIX + "channelFactoryClass"; + + /** + * @hidden + * A string providing factory-specific data channel configuration + * parameters. The encoding of parameters within the string is determined + * by the specific factory class implementation. As examples, it may + * choose to join multiple strings with a delimiter character or may + * allow binary data to be hex-encoded. + * + * Note: Setting this parameter is ignored unless + * {@link #CHANNEL_TYPE je.rep.channelFactoryClass} is set. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String CHANNEL_FACTORY_PARAMS = + EnvironmentParams.REP_PARAM_PREFIX + "channelFactoryParams"; + + /** + * A string providing a logging context identification string. This string + * is incorporated into log messages in order to help associate messages + * with the configuration context. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String CHANNEL_LOG_NAME = + EnvironmentParams.REP_PARAM_PREFIX + "channelLogName"; + + /* The set of Replication properties specific to this class */ + private static Set repNetLocalProperties; + static { + repNetLocalProperties = new HashSet(); + repNetLocalProperties.add(CHANNEL_TYPE); + repNetLocalProperties.add(CHANNEL_LOG_NAME); + repNetLocalProperties.add(CHANNEL_FACTORY_CLASS); + repNetLocalProperties.add(CHANNEL_FACTORY_PARAMS); + /* Nail the set down */ + repNetLocalProperties = + Collections.unmodifiableSet(repNetLocalProperties); + } + + /* + * The set of Replication properties for this class and derived classes. + * It is created later, no demand, to deal with class loading ordering + */ + private static Set repNetAllProperties; + + static { + + /* + * Force loading when a ReplicationNetworkConfig is used and an + * environment has not been created. + */ + @SuppressWarnings("unused") + final ConfigParam forceLoad = RepParams.CHANNEL_TYPE; + } + + /* The properties for this configuration */ + protected final Properties props; + protected final boolean validateParams = true; + + /** + * Creates an ReplicationNetworkConfig which includes the properties + * specified in the named properties file. + * + * @param propFile a File from which the configuration properties will + * be read. + * + * @return an instance of a class derived from ReplicationNetworkConfig + * as indicated by the channelType property. + * + * @throws FileNotFoundException If the property file cannot be found + * @throws IllegalArgumentException If any properties read from the + * properties parameter are invalid. + */ + public static ReplicationNetworkConfig create(File propFile) + throws IllegalArgumentException, FileNotFoundException { + + return create(readProperties(propFile)); + } + + /** + * Creates an ReplicationNetworkConfig which includes the properties + * specified in the properties parameter. + * + * @param properties Supported properties are described as the string + * constants in this class. + * + * @return an instance of a class derived from ReplicationNetworkConfig + * as indicated by the channelType property. + * + * @throws IllegalArgumentException If any properties read from the + * properties parameter are invalid. + */ + public static ReplicationNetworkConfig create(Properties properties) + throws IllegalArgumentException { + + final String channelType = + DbConfigManager.getVal(properties, RepParams.CHANNEL_TYPE); + + if ("basic".equals(channelType)) { + return new ReplicationBasicConfig(properties); + } + if ("ssl".equals(channelType)) { + return new ReplicationSSLConfig(properties); + } + throw new IllegalArgumentException( + "Unknown channel type: " + channelType); + } + + /** + * Creates a default ReplicationNetworkConfig instance. + * + * @return an instance of a class derived from ReplicationNetworkConfig + * as indicated by the channelType property default. + */ + public static ReplicationNetworkConfig createDefault() { + + return new ReplicationBasicConfig(); + } + + /** + * Constructs a basic ReplicationNetworkConfig initialized with the system + * default settings. Defaults are documented with the string constants in + * this class. + */ + public ReplicationNetworkConfig() { + props = new Properties(); + } + + /** + * Constructs a basic ReplicationNetworkConfig initialized with the + * provided propeties. + * @param properties a set of properties which which to initialize the + * instance properties + */ + public ReplicationNetworkConfig(Properties properties) { + props = new Properties(); + applyRepNetProperties(properties); + } + + /** + * Get the channel type setting for the replication service. + * + * @return the channel type + */ + public abstract String getChannelType(); + + /** + * Get the channel logging name setting for the replication service. + * + * @return the channel logging name + */ + public String getLogName() { + return DbConfigManager.getVal(props, RepParams.CHANNEL_LOG_NAME); + } + + /** + * Sets the channel logging name to be used for replication service access. + * + * @param logName the channel logging name to be used. + * + * @return this + * + * @throws IllegalArgumentException If the value of logName is invalid. + */ + public ReplicationNetworkConfig setLogName(String logName) + throws IllegalArgumentException { + + setLogNameVoid(logName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setLogNameVoid(String logName) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.CHANNEL_LOG_NAME, logName, + validateParams); + } + + /** + * @hidden + * Returns name of the DataChannel factory class to be used for creating + * new DataChannel instances + * + * @return the DataChannelFactory class name, if configured + */ + public String getChannelFactoryClass() { + + return DbConfigManager.getVal( + props, RepParams.CHANNEL_FACTORY_CLASS); + } + + /** + * @hidden + * Sets the name of the DataChannelFactory class to be instantiated for + * creation of new DataChannel instances. + * + * @param factoryClass the class name to use + * + * @return this + */ + public ReplicationNetworkConfig setChannelFactoryClass( + String factoryClass) { + + setChannelFactoryClassVoid(factoryClass); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setChannelFactoryClassVoid(String factoryClass) { + + DbConfigManager.setVal(props, RepParams.CHANNEL_FACTORY_CLASS, + factoryClass, validateParams); + } + + /** + * @hidden + * Returns the DataChannelFactory class parameters to be used for when + * instantiating the DataChannelFactoryClass + * + * @return the parameters argument, if configured + */ + public String getChannelFactoryParams() { + + return DbConfigManager.getVal(props, RepParams.CHANNEL_FACTORY_PARAMS); + } + + /** + * @hidden + * Sets the DataChannelFactory parameters to be passed when instantiating + * the DataChannelFactoryClass. + * + * @param factoryParams a string encoding any parameters to be passed to + * the class constructor. + * + * @return this + */ + public ReplicationNetworkConfig setChannelFactoryParams( + String factoryParams) { + + setChannelFactoryParamsVoid(factoryParams); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setChannelFactoryParamsVoid(String factoryParams) { + + DbConfigManager.setVal(props, RepParams.CHANNEL_FACTORY_PARAMS, + factoryParams, validateParams); + } + + /** + * Set this configuration parameter with this value. Values are validated + * before setting the parameter. + * + * @param paramName the configuration parameter name, one of the String + * constants in this class + * @param value the configuration value. + * + * @return this + * + * @throws IllegalArgumentException if the paramName or value is invalid, or + * if paramName is not a parameter that applies to ReplicationNetworkConfig. + */ + public ReplicationNetworkConfig setConfigParam( + String paramName, String value) + throws IllegalArgumentException { + + if (isValidConfigParam(paramName)) { + setConfigParam(props, paramName, value, validateParams); + } + return this; + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public ReplicationNetworkConfig clone() { + try { + ReplicationNetworkConfig copy = + (ReplicationNetworkConfig) super.clone(); + return copy; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * @hidden + * Enumerate the subset of configuration properties that are intended to + * control network access that are specific to this class. + */ + static Set getRepNetLocalPropertySet() { + + return repNetLocalProperties; + } + + /** + * @hidden + * Enumerate the subset of configuration properties that are intended to + * control network access. + */ + public static Set getRepNetPropertySet() { + + synchronized(repNetLocalProperties) { + if (repNetAllProperties == null) { + Set props = new HashSet(); + props.addAll(repNetLocalProperties); + props.addAll(ReplicationBasicConfig.getRepBasicPropertySet()); + props.addAll(ReplicationSSLConfig.getRepSSLPropertySet()); + /* Nail the set down */ + repNetAllProperties = Collections.unmodifiableSet(props); + } + return repNetAllProperties; + } + } + + /** + * @hidden + * Ensure that the parameters for this and all known derived classes are + * registered. This is called by testing code to ensure that parameter + * registration happens when non-standard API access is used. + */ + public static void registerParams() { + /* Call for side effect */ + getRepNetPropertySet(); + } + + + /** + * @hidden + * Apply the configurations specified in sourceProps to our properties. + * + * @throws IllegalArgumentException if any of the contained property + * entries have invalid names or invalid values + */ + void applyRepNetProperties(Properties sourceProps) + throws IllegalArgumentException { + + for (Map.Entry propPair : sourceProps.entrySet()) { + final String name = (String) propPair.getKey(); + if (!isValidConfigParam(name)) { + continue; + } + final String value = (String) propPair.getValue(); + setConfigParam(props, name, value, + true /* validateParams */); + } + } + + /** + * @hidden + * Apply the configurations specified in sourceProps to updateProps, + * effectively copying the replication service access properties from the + * sourceProps hash. Only the source properties that are applicable to + * a ReplicationNetworkConfig are used. + * + * @throws IllegalArgumentException if any of the contained property + * entries have invalid values + */ + public static void applyRepNetProperties(Properties sourceProps, + Properties updateProps) + throws IllegalArgumentException { + + final Set repNetProps = getRepNetPropertySet(); + for (Map.Entry propPair : sourceProps.entrySet()) { + final String name = (String) propPair.getKey(); + if (!repNetProps.contains(name)) { + continue; + } + final String value = (String) propPair.getValue(); + setConfigParam(updateProps, name, value, + true /* validateParams */); + } + } + + /** + * @hidden + * For access by ReplicationConfig and testing only + */ + Properties getProps() { + Properties returnProps = new Properties(props); + returnProps.setProperty(CHANNEL_TYPE, getChannelType()); + return returnProps; + } + + /** + * Set this configuration parameter with this value in the specified + * Properties object, which is assumed to represent the properties + * that are applicable to this class. Values are validated + * before setting the parameter. + * + * @param props the Properties object to update + * @param paramName the configuration parameter name, one of the String + * constants in this class + * @param value the configuration value. + * + * @throws IllegalArgumentException if the paramName or value is invalid + */ + private static void setConfigParam( + Properties props, String paramName, String value, + boolean validateParams) + throws IllegalArgumentException { + + DbConfigManager.setConfigParam(props, + paramName, + value, + false, /* require mutability. */ + validateParams, + true, /* forReplication */ + false); /* verifyForReplication */ + } + + /** + * Checks whether the named parameter is valid for this configuration type. + * @param paramName the configuration parameter name, one of the String + * constants in this class + * @return true if the named parameter is a valid parameter name + */ + protected boolean isValidConfigParam(String paramName) { + return repNetLocalProperties.contains(paramName); + } + + /** + * Read a properties file into a Properties object. + * @param propFile a file containing property settings + * @return a Properties object containing the property settings + * @throws FileNotFoundException if the file does not exist + */ + private static Properties readProperties(File propFile) + throws FileNotFoundException { + + if (!propFile.exists()) { + throw new FileNotFoundException( + "The properties file " + propFile + " does not exist."); + } + + Properties props = new Properties(); + RepUtils.populateNetProps(props, propFile); + return props; + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationNetworkConfigBeanInfo.java b/src/com/sleepycat/je/rep/ReplicationNetworkConfigBeanInfo.java new file mode 100644 index 0000000..9d5785f --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationNetworkConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class ReplicationNetworkConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(ReplicationNetworkConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(ReplicationNetworkConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationNode.java b/src/com/sleepycat/je/rep/ReplicationNode.java new file mode 100644 index 0000000..edcad38 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationNode.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import java.net.InetSocketAddress; + +/** + * An administrative view of a node in a replication group. + */ +public interface ReplicationNode { + + /** + * Returns the unique name associated with the node. + * + * @return the name of the node + */ + String getName(); + + /** + * Returns the type associated with the node. + * + * @return the node type + */ + NodeType getType(); + + /** + * The socket address used by other nodes in the replication group to + * communicate with this node. + * + * @return the socket address + */ + InetSocketAddress getSocketAddress(); + + /** + * Returns the host name associated with the node. + * + * @return the host name of the node + */ + String getHostName(); + + /** + * Returns the port number associated with the node. + * + * @return the port number of the node + */ + int getPort(); +} diff --git a/src/com/sleepycat/je/rep/ReplicationSSLConfig.java b/src/com/sleepycat/je/rep/ReplicationSSLConfig.java new file mode 100644 index 0000000..647b329 --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationSSLConfig.java @@ -0,0 +1,1232 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + + +import java.util.Collections; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.net.SSLAuthenticator; + +/** + * @hidden SSL deferred + * Specifies the parameters that control replication network communication + * within a replicated environment using SSL. The parameters contained here are + * immutable. + *

        + * To change the default settings for a replicated environment, an application + * creates a configuration object, customizes settings and uses it for {@link + * ReplicatedEnvironment} construction. Except as noted, the set methods of + * this class perform only minimal validation of configuration values when the + * method is called, and value checking is deferred until the time a + * DataChannel factory is constructed. An IllegalArgumentException is thrown + * if the value is not valid for that attribute. + *

        + * ReplicationSSLkConfig follows precedence rules similar to those of + * {@link EnvironmentConfig}. + *

          + *
        1. Configuration parameters specified + * in {@literal /je.properties} take first precedence.
        2. + *
        3. Configuration parameters set in the ReplicationSSLConfig object used + * at {@code ReplicatedEnvironment} construction are next.
        4. + *
        5. Any configuration parameters not set by the application are set to + * system defaults, described along with the parameter name String constants + * in this class.
        6. + *
        + *

        + * + */ +public class ReplicationSSLConfig extends ReplicationNetworkConfig { + + private static final long serialVersionUID = 1L; + + /* + * Note: all replicated parameters should start with + * EnvironmentParams.REP_PARAMS_PREFIX, which is "je.rep.", + * see SR [#19080]. + */ + + /** + * Configures the type of communication channel to use. Valid values + * for this parameter are: + *

          + *
        • ssl
        • + *
        + * + * ssl indicates that SSL is to be used for service + * communication. Using SSL normally provides both encryption and + * authentication. This option supports numerous associated configuration + * parameters. It requires, at a minimum, that a Java keystore and + * associated keystore password be supplied. The keystore password can be + * supplied using multiple methods, considered in the following order: + * + * {@link #setSSLKeyStorePasswordSource} + * {@link #SSL_KEYSTORE_PASSWORD_CLASS je.rep.ssl.keyStorePasswordClass} + * {@link #setSSLKeyStorePassword} + * {@link #SSL_KEYSTORE_PASSWORD je.rep.ssl.keyStorePassword} + * The javax.net.ssl.keyStorePassword system property + * + * The properties supported by the supplied SSL channel factory are: + *
        +     *   {@link #SSL_KEYSTORE_FILE je.rep.ssl.keyStoreFile}
        +     *   {@link #SSL_KEYSTORE_PASSWORD_CLASS je.rep.ssl.keyStorePasswordClass}
        +     *   {@link #SSL_KEYSTORE_PASSWORD_PARAMS je.rep.ssl.keyStorePasswordParams}
        +     *   {@link #SSL_KEYSTORE_PASSWORD je.rep.ssl.keyStorePassword}
        +     *   {@link #SSL_KEYSTORE_TYPE je.rep.ssl.keyStoreType}
        +     *   {@link #SSL_CLIENT_KEY_ALIAS je.rep.ssl.clientKeyAlias}
        +     *   {@link #SSL_SERVER_KEY_ALIAS je.rep.ssl.serverKeyAlias}
        +     *   {@link #SSL_TRUSTSTORE_FILE je.rep.ssl.trustStoreFile}
        +     *   {@link #SSL_TRUSTSTORE_TYPE je.rep.ssl.trustStoreType}
        +     *   {@link #SSL_CIPHER_SUITES je.rep.ssl.cipherSuites}
        +     *   {@link #SSL_PROTOCOLS je.rep.ssl.protocols}
        +     *   {@link #SSL_AUTHENTICATOR je.rep.ssl.authenticator}
        +     *   {@link #SSL_AUTHENTICATOR_CLASS je.rep.ssl.authenticatorClass}
        +     *   {@link #SSL_AUTHENTICATOR_PARAMS je.rep.ssl.authenticatorParams}
        +     *   {@link #SSL_HOST_VERIFIER je.rep.ssl.hostVerifier}
        +     *   {@link #SSL_HOST_VERIFIER_CLASS je.rep.ssl.hostVerifierClass}
        +     *   {@link #SSL_HOST_VERIFIER_PARAMS je.rep.ssl.hostVerifierParams}
        +     * 
        + */ + + /** + * The path to the Java keystore file for SSL data channnel factories. + * The specified path must be absolute. + * If this parameter is not set or has an empty value, the Java system + * property javax.net.ssl.keyStore is used. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_KEYSTORE_FILE = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.keyStoreFile"; + + /** + * The password for accessing the Java keystore file for SSL data channnel + * factories. If this parameter is not set or has an empty value, the Java + * system property javax.net.ssl.keyStorePassword is used. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_KEYSTORE_PASSWORD = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.keyStorePassword"; + + /** + * A class that will be instantiated in order to retrieve a password that + * allows access to the keystore file. The class must implement the + * com.sleepycat.je.rep.net.PasswordSource interface. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_KEYSTORE_PASSWORD_CLASS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.keyStorePasswordClass"; + + /** + * A string encoding the parameters for configuring the password class. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_KEYSTORE_PASSWORD_PARAMS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.keyStorePasswordParams"; + + /** + * The type of the Java keystore file. This is used to determine what + * keystore implementation should be used to manipulate the named + * keystore file. If set to a non-empty value, the value must be a valid + * keystore type for the Java environment. If this parameter is not set to + * a non-empty value, the default Java keystore type is assumed. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_KEYSTORE_TYPE = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.keyStoreType"; + + /** + * The alias name of the preferred key for use by the service dispatcher + * acting in SSL server mode. When not set to a non-empty value and the + * keystore contains multiple key options, the key selection algorithm is + * unspecified. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_SERVER_KEY_ALIAS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.serverKeyAlias"; + + /** + * The alias name of the preferred key for use by a client connecting + * to the service dispatcher. When not set to a non-empty value and the + * keystore contains multiple key options, the key selection algorithm is + * unspecified. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_CLIENT_KEY_ALIAS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.clientKeyAlias"; + + /** + * The path to the Java truststore file for SSL data channel factories. + * The specified path must be absolute. + * If this parameter is not set to a non-empty value, the Java system + * property javax.net.ssl.trustStore is used. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_TRUSTSTORE_FILE = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.trustStoreFile"; + + /** + * The type of the Java truststore file. This is used to determine what + * keystore implementation should be used to manipulate the named + * keystore file. If set to a non-empty value, the value must be a valid + * keystore type for the Java environment. If this parameter is not set to + * a non-empty value, the default Java keystore type is assumed. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo"JKS"

        + */ + public static final String SSL_TRUSTSTORE_TYPE = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.trustStoreType"; + + /** + * The list of SSL cipher suites that are acceptable for SSL data channel + * factories. The cipher suite list must be in comma-delimited form. + * If this parameter is not set to a non-empty value, the Java default + * set of enabled cipher suites is allowed. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_CIPHER_SUITES = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.cipherSuites"; + + /** + * The list of SSL protocols that are acceptable for SSL data channel + * factories. The protocol list must be in comma-delimited form. + * If not specified, the default type selected is TBD: TLSv1. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_PROTOCOLS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.protocols"; + + /** + * The specification for an SSL authenicator. + * The authenticator can be configured in one of the following ways: + *
          + *
        • mirror
        • + *
        • dnmatch(<Regular Expression>)
        • + *
        + * + * The mirror option causes the authenticator to check that the + * Distinguished Name(DN) in the certificate of the incoming client + * connection matches the DN of the certificate that this server presents + * when connecting as a client to another server. + * + * The dnmatch() option causes the authenticator to check that + * the DN in the certificate of the incoming client connection matches the + * regular expression provided in the dnmatch() specification. + * + * Do not configure both the SSL authenticator and the SSL authenticator + * class, or an exception will be thrown during DataChannelFactory + * instantiation. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_AUTHENTICATOR = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.authenticator"; + + /** + * The string identifying a class to be instantiated to check whether + * incoming client SSL connections are to be trusted. If specified, the + * string must be a fully qualified Java class name for a class that + * implements the {@link SSLAuthenticator} + * interface and provides a public constructor with an argument list of + * the form + * ( {@link InstanceParams} ). + *

        + * Do not configure both the SSL authenticator and the SSL authenticator + * class, or an exception will be thrown during DataChannelFactory + * instantiation. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_AUTHENTICATOR_CLASS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.authenticatorClass"; + + /** + * A string encoding the parameters for configuring the authenticator class. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_AUTHENTICATOR_PARAMS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.authenticatorParams"; + + /** + * The configuration to be used for verifying the certificate of + * a server when a connection is made. + * + * The verifier can be configured in one of the following ways: + *
          + *
        • hostname
        • + *
        • mirror
        • + *
        • dnmatch(<Regular Expression>)
        • + *
        + *

        + * The hostname option causes the verifier to check that the + * Distinguished Name(DN) or one of the Subject Alternative Names in the + * certificate presented by the server contains the hostname that + * was the target of the connection attempt. This assumes that server + * certificates are unique per server. + *

        + * The mirror option causes the verifier to check that the + * Distinguished Name(DN) in the certificate of the server matches the DN + * of the certificate that this server presents to incoming client + * connections. This assumes that all servers have equivalent certificates. + *

        + * The dnmatch() option causes the verifier to check that + * the DN in the certificate of the server matches the regular expression + * string provided in the dnmatch() specification. + *

        + * Do not configure both the SSL host verifier and the SSL host verifier + * class, or an exception will be thrown during DataChannelFactory + * instantiation. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_HOST_VERIFIER = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.hostVerifier"; + + /** + * The class to be instantiated to check whether the target host of a + * connection initiated by a client is to be trusted. If specified, the + * string must be a fully qualified Java class name for a class that + * implements the javax.net.ssl.HostnameVerifier interface + * and provides a public constructor with an argument list of the form + * ({@link InstanceParams}). + *

        + * Do not configure both the SSL host verifier and the SSL host verifier + * class, or an exception will be thrown during DataChannelFactory + * instantiation. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_HOST_VERIFIER_CLASS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.hostVerifierClass"; + + /** + * A string encoding the parameters for configuring the host verifier + * class, if needed. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        {@value}StringNo""

        + */ + public static final String SSL_HOST_VERIFIER_PARAMS = + EnvironmentParams.REP_PARAM_PREFIX + "ssl.hostVerifierParams"; + + /* The set of Replication properties specific to this class */ + private static Set repSSLProperties; + static { + repSSLProperties = new HashSet(); + repSSLProperties.add(SSL_KEYSTORE_FILE); + repSSLProperties.add(SSL_KEYSTORE_PASSWORD); + repSSLProperties.add(SSL_KEYSTORE_PASSWORD_CLASS); + repSSLProperties.add(SSL_KEYSTORE_PASSWORD_PARAMS); + repSSLProperties.add(SSL_KEYSTORE_TYPE); + repSSLProperties.add(SSL_SERVER_KEY_ALIAS); + repSSLProperties.add(SSL_CLIENT_KEY_ALIAS); + repSSLProperties.add(SSL_TRUSTSTORE_FILE); + repSSLProperties.add(SSL_TRUSTSTORE_TYPE); + repSSLProperties.add(SSL_CIPHER_SUITES); + repSSLProperties.add(SSL_PROTOCOLS); + repSSLProperties.add(SSL_AUTHENTICATOR); + repSSLProperties.add(SSL_AUTHENTICATOR_CLASS); + repSSLProperties.add(SSL_AUTHENTICATOR_PARAMS); + repSSLProperties.add(SSL_HOST_VERIFIER); + repSSLProperties.add(SSL_HOST_VERIFIER_CLASS); + repSSLProperties.add(SSL_HOST_VERIFIER_PARAMS); + /* Nail the set down */ + repSSLProperties = Collections.unmodifiableSet(repSSLProperties); + } + + static { + + /* + * Force loading when a ReplicationNetworkConfig is used and an + * environment has not been created. + */ + @SuppressWarnings("unused") + final ConfigParam forceLoad = RepParams.CHANNEL_TYPE; + } + + /* The possibly null password source for keystore access */ + private transient PasswordSource sslKeyStorePasswordSource; + + /** + * Constructs a ReplicationSSLConfig initialized with the system default + * settings. Defaults are documented with the string constants in this + * class. + */ + public ReplicationSSLConfig() { + } + + /** + * Creates an ReplicationSSLConfig which includes the properties + * specified in the properties parameter. + * + * @param properties Supported properties are described as the string + * constants in this class. + * + * @throws IllegalArgumentException If any properties read from the + * properties parameter are invalid. + */ + public ReplicationSSLConfig(Properties properties) + throws IllegalArgumentException { + + super(properties); + } + + /** + * Get the channel type setting for the replication service. + * + * @return the channel type + */ + @Override + public String getChannelType() { + return "ssl"; + } + + /** + * Returns the name of the Java KeyStore file to be used for SSL key pair + * retrieval. + * + * @return the KeyStore file name + */ + public String getSSLKeyStore() { + return DbConfigManager.getVal(props, RepParams.SSL_KEYSTORE_FILE); + } + + /** + * Sets the name of the Java KeyStore file to be used when creating + * SSL connections. + * + * @param filename the KeyStore filename + * + * @return this + */ + public ReplicationNetworkConfig setSSLKeyStore(String filename) { + + setSSLKeyStoreVoid(filename); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLKeyStoreVoid(String filename) { + + DbConfigManager.setVal(props, RepParams.SSL_KEYSTORE_FILE, filename, + validateParams); + } + + /** + * Returns the type of the Java Keystore file to be used for SSL key pair + * retrieval. + * + * @return the KeyStore type + */ + public String getSSLKeyStoreType() { + return DbConfigManager.getVal(props, RepParams.SSL_KEYSTORE_TYPE); + } + + /** + * Sets the type of the Java KeyStore file to be used when creating + * SSL connections. + * + * @param keyStoreType the Keystore type + * + * @return this + */ + public ReplicationNetworkConfig setSSLKeyStoreType(String keyStoreType) { + + setSSLKeyStoreTypeVoid(keyStoreType); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLKeyStoreTypeVoid(String keyStoreType) { + + DbConfigManager.setVal(props, RepParams.SSL_KEYSTORE_TYPE, + keyStoreType, validateParams); + } + + /** + * Returns the password for the Java KeyStore file to be used for SSL key + * pair retrieval. + * + * @return the KeyStore password + */ + public String getSSLKeyStorePassword() { + return DbConfigManager.getVal(props, RepParams.SSL_KEYSTORE_PASSWORD); + } + + /** + * Sets the password for the Java KeyStore file to be used when creating + * SSL connections. + * + * @param password the KeyStore password + * + * @return this + */ + public ReplicationNetworkConfig setSSLKeyStorePassword(String password) { + + setSSLKeyStorePasswordVoid(password); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLKeyStorePasswordVoid(String password) { + + DbConfigManager.setVal(props, RepParams.SSL_KEYSTORE_PASSWORD, password, + validateParams); + } + + /** + * Returns the name of a class that should be instantiated to retrieve the + * password for the Java KeyStore file. + * + * @return the KeyStore password source class name + */ + public String getSSLKeyStorePasswordClass() { + + return DbConfigManager.getVal(props, + RepParams.SSL_KEYSTORE_PASSWORD_CLASS); + } + + /** + * Sets the name of a class that should be instantiated to retrieve the + * password for the Java KeyStore file. + * + * @param className the name of the class + * + * @return this + */ + public ReplicationNetworkConfig setSSLKeyStorePasswordClass( + String className) { + + setSSLKeyStorePasswordClassVoid(className); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLKeyStorePasswordClassVoid(String className) { + + DbConfigManager.setVal(props, RepParams.SSL_KEYSTORE_PASSWORD_CLASS, + className, validateParams); + } + + /** + * Returns a string to be used in the constructor for a keystore password + * source instance. + * + * @return the parameter values + */ + public String getSSLKeyStorePasswordParams() { + + return DbConfigManager.getVal(props, + RepParams.SSL_KEYSTORE_PASSWORD_PARAMS); + } + + /** + * Sets the string to be used in the constructor for a keystore password + * source instance. + * + * @param params a string that is to be passed to the constructor + * + * @return this + */ + public ReplicationNetworkConfig setSSLKeyStorePasswordParams( + String params) { + + setSSLKeyStorePasswordParamsVoid(params); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLKeyStorePasswordParamsVoid(String params) { + + DbConfigManager.setVal(props, RepParams.SSL_KEYSTORE_PASSWORD_PARAMS, + params, validateParams); + } + + /** + * Returns the Java KeyStore alias associated with the key that should be + * used to accept incoming SSL connections. + * + * @return the KeyStore alias + */ + public String getSSLServerKeyAlias() { + return DbConfigManager.getVal(props, RepParams.SSL_SERVER_KEY_ALIAS); + } + + /** + * Sets the alias associated with the key in the Java KeyStore file to be + * used when accepting incoming SSL connections. + * + * @param alias the KeyStore alias + * + * @return this + */ + public ReplicationNetworkConfig setSSLServerKeyAlias(String alias) { + + setSSLServerKeyAliasVoid(alias); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLServerKeyAliasVoid(String alias) { + + DbConfigManager.setVal(props, RepParams.SSL_SERVER_KEY_ALIAS, alias, + validateParams); + } + + /** + * Returns the Java KeyStore alias associated with the key that should be + * used when initiating SSL connections . + * + * @return the KeyStore alias + */ + public String getSSLClientKeyAlias() { + return DbConfigManager.getVal(props, RepParams.SSL_CLIENT_KEY_ALIAS); + } + + /** + * Sets the alias associated with the key in the Java KeyStore file to be + * used when initiating SSL connections. + * + * @param alias the KeyStore alias + * + * @return this + */ + public ReplicationNetworkConfig setSSLClientKeyAlias(String alias) { + + setSSLClientKeyAliasVoid(alias); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLClientKeyAliasVoid(String alias) { + + DbConfigManager.setVal(props, RepParams.SSL_CLIENT_KEY_ALIAS, alias, + validateParams); + } + + /** + * Returns the name of the Java TrustStore file to be used for SSL + * certificate validation. + * + * @return the TrustStore file name + */ + public String getSSLTrustStore() { + return DbConfigManager.getVal(props, RepParams.SSL_TRUSTSTORE_FILE); + } + + /** + * Sets the name of the Java TrustStore file to be used when validating + * SSL certificates. + * + * @param filename the TrustStore filename + * + * @return this + */ + public ReplicationNetworkConfig setSSLTrustStore(String filename) { + + setSSLTrustStoreVoid(filename); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLTrustStoreVoid(String filename) { + + DbConfigManager.setVal(props, RepParams.SSL_TRUSTSTORE_FILE, filename, + validateParams); + } + + /** + * Returns the type of the Java Truststore file to be used for SSL key pair + * retrieval. + * + * @return the Truststore type + */ + public String getSSLTrustStoreType() { + return DbConfigManager.getVal(props, RepParams.SSL_TRUSTSTORE_TYPE); + } + + /** + * Sets the type of the Java Truststore file to be used when creating + * SSL connections. + * + * @param trustStoreType the Truststore type + * + * @return this + */ + public ReplicationNetworkConfig setSSLTrustStoreType(String trustStoreType) { + + setSSLTrustStoreTypeVoid(trustStoreType); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLTrustStoreTypeVoid(String trustStoreType) { + + DbConfigManager.setVal(props, RepParams.SSL_TRUSTSTORE_TYPE, + trustStoreType, validateParams); + } + + /** + * Returns the list of SSL cipher suites that are acceptable + * + * @return the list of SSL cipher suites in comma-delimited form + */ + public String getSSLCipherSuites() { + return DbConfigManager.getVal(props, RepParams.SSL_CIPHER_SUITES); + } + + /** + * Sets the list of SSL cipher suites that are acceptable + * + * @param cipherSuites a comma-delimited list of SSL cipher suites + * + * @return this + */ + public ReplicationNetworkConfig setSSLCipherSuites(String cipherSuites) { + + setSSLCipherSuitesVoid(cipherSuites); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLCipherSuitesVoid(String cipherSuites) { + + DbConfigManager.setVal(props, RepParams.SSL_CIPHER_SUITES, cipherSuites, + validateParams); + } + + /** + * Returns the list of SSL protocols that are acceptable + * + * @return the list of SSL protocols in comma-delimited form + */ + public String getSSLProtocols() { + return DbConfigManager.getVal(props, RepParams.SSL_PROTOCOLS); + } + + /** + * Sets the list of SSL protocols that are acceptable + * + * @param protocols a comma-delimited list of SSL protocols + * + * @return this + */ + public ReplicationNetworkConfig setSSLProtocols(String protocols) { + + setSSLProtocolsVoid(protocols); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLProtocolsVoid(String protocols) { + + DbConfigManager.setVal(props, RepParams.SSL_PROTOCOLS, protocols, + validateParams); + } + + /** + * Returns the SSLAuthenticator configuration to be used for authenticating + * incoming client connections. + * + * @return the authentication configuration, if configured + */ + public String getSSLAuthenticator() { + + return DbConfigManager.getVal(props, RepParams.SSL_AUTHENTICATOR); + } + + /** + * Sets the authenticator configuration to be used for authenticating + * incoming client connections. + * + * See {@link #SSL_AUTHENTICATOR} for a complete description of this + * parameter. + * + * @param authenticator the authentication configuration to use + * + * @return this + * + * @throws IllegalArgumentException if the authenticator specification + * is not syntactically valid + */ + public ReplicationNetworkConfig setSSLAuthenticator(String authenticator) + throws IllegalArgumentException { + + setSSLAuthenticatorVoid(authenticator); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLAuthenticatorVoid(String authenticator) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.SSL_AUTHENTICATOR, + authenticator, validateParams); + } + + /** + * Returns the SSLAuthenticator factory class to be used for creating + * new Authenticator instances + * + * @return the SSLAuthenticator factory class name, if configured + */ + public String getSSLAuthenticatorClass() { + + return DbConfigManager.getVal( + props, RepParams.SSL_AUTHENTICATOR_CLASS); + } + + /** + * Sets the authenticator class to be instantiated for creation of + * new SSL Authenticator instances. + * + * @param authenticatorClass the class name to use + * + * @return this + */ + public ReplicationNetworkConfig setSSLAuthenticatorClass( + String authenticatorClass) { + + setSSLAuthenticatorClassVoid(authenticatorClass); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLAuthenticatorClassVoid(String authenticatorClass) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.SSL_AUTHENTICATOR_CLASS, + authenticatorClass, validateParams); + } + + /** + * Returns the SSLAuthenticator parameters to be used for creating + * new Authenticator instances + * + * @return the SSLAuthenticator factory params name, if configured + */ + public String getSSLAuthenticatorParams() { + + return DbConfigManager.getVal( + props, RepParams.SSL_AUTHENTICATOR_PARAMS); + } + + /** + * Sets the Authenticator parameters to be passed to the + * SSL server Authenticator class when instantiated. + * + * @param authenticatorParams the parameter value to use + * + * @return this + */ + public ReplicationNetworkConfig setSSLAuthenticatorParams( + String authenticatorParams) { + + setSSLAuthenticatorParamsVoid(authenticatorParams); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLAuthenticatorParamsVoid(String authenticatorParams) { + + DbConfigManager.setVal(props, RepParams.SSL_AUTHENTICATOR_PARAMS, + authenticatorParams, validateParams); + } + + /** + * Returns the HostnameVerifier factory class to be used for creating + * new host verifier instances for client-mode operation + * + * @return the HostnameVerifier factory class name, if configured + */ + public String getSSLHostVerifier() { + + return DbConfigManager.getVal( + props, RepParams.SSL_HOST_VERIFIER); + } + + /** + * Sets the configuration to be used for verifying the certificate of + * a server when a connection is made. + * + * See {@link #SSL_HOST_VERIFIER} for a complete description of this + * parameter. + * + * @param hostVerifier the verifier configuration to use + * + * @return this + * + * @throws IllegalArgumentException if the authenticator specification + * is not syntactically valid + */ + public ReplicationNetworkConfig setSSLHostVerifier(String hostVerifier) + throws IllegalArgumentException { + + setSSLHostVerifierVoid(hostVerifier); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLHostVerifierVoid(String hostVerifier) + throws IllegalArgumentException { + + DbConfigManager.setVal(props, RepParams.SSL_HOST_VERIFIER, + hostVerifier, validateParams); + } + + /** + * Returns the HostnameVerifier factory class to be used for creating + * new host verifier instances for client-mode operation + * + * @return the HostnameVerifier factory class name, if configured + */ + public String getSSLHostVerifierClass() { + + return DbConfigManager.getVal( + props, RepParams.SSL_HOST_VERIFIER_CLASS); + } + + /** + * Sets the host verifier class to be instantiated for creation of + * new SSL host verifier instances. + * + * @param hostVerifierClass the class name to use + * + * @return this + */ + public ReplicationNetworkConfig setSSLHostVerifierClass( + String hostVerifierClass) { + + setSSLHostVerifierClassVoid(hostVerifierClass); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLHostVerifierClassVoid(String hostVerifierClass) { + + DbConfigManager.setVal(props, RepParams.SSL_HOST_VERIFIER_CLASS, + hostVerifierClass, validateParams); + } + + /** + * Returns the SSLHostVerifier parameters to be used for creating + * new host verifier instances for operation in client mode, if needed. + * + * @return the SSLHostVerifier factory params name, if configured + */ + public String getSSLHostVerifierParams() { + + return DbConfigManager.getVal( + props, RepParams.SSL_HOST_VERIFIER_PARAMS); + } + + /** + * Sets the host verifier parameters to be passed to the SSL host verifier + * class when instantiated. + * + * @param hostVerifierParams the parameter value to use + * + * @return this + */ + public ReplicationNetworkConfig setSSLHostVerifierParams( + String hostVerifierParams) { + + setSSLHostVerifierParamsVoid(hostVerifierParams); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSSLHostVerifierParamsVoid(String hostVerifierParams) { + + DbConfigManager.setVal(props, RepParams.SSL_HOST_VERIFIER_PARAMS, + hostVerifierParams, validateParams); + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public ReplicationSSLConfig clone() { + return (ReplicationSSLConfig) super.clone(); + } + + /** + * Gets the password source provided for KeyStore access by the SSL + * implementation. + */ + public PasswordSource getSSLKeyStorePasswordSource() { + return sslKeyStorePasswordSource; + } + + /** + * Sets the password source for KeyStore access by the SSL implementation. + * If not set to a non-empty value, the SSL implementation uses the + * {@link #SSL_KEYSTORE_PASSWORD je.rep.ssl.keyStorePassword} + * property instead. + * This setting is not included in the serialized representation. + */ + public ReplicationNetworkConfig + setSSLKeyStorePasswordSource(PasswordSource passwordSource) { + + setSSLKeyStorePasswordSourceVoid(passwordSource); + return this; + } + + /** + * @hidden + * For bean editors. + */ + public void setSSLKeyStorePasswordSourceVoid( + PasswordSource passwordSource) { + sslKeyStorePasswordSource = passwordSource; + } + + /** + * @hidden + * Enumerate the subset of configuration properties that are intended to + * control network access. + */ + static Set getRepSSLPropertySet() { + + return repSSLProperties; + } + + /** + * Checks whether the named parameter is valid for this configuration type. + * @param paramName the configuration parameter name, one of the String + * constants in this class + * @return true if the named parameter is a valid parameter name + */ + protected boolean isValidConfigParam(String paramName) { + if (repSSLProperties.contains(paramName)) { + return true; + } + return super.isValidConfigParam(paramName); + } + + static { + + /* + * Force loading when a ReplicationNetworkConfig is used and an + * environment has not been created. + */ + @SuppressWarnings("unused") + final ConfigParam forceLoad = RepParams.CHANNEL_TYPE; + } + +} diff --git a/src/com/sleepycat/je/rep/ReplicationSSLConfigBeanInfo.java b/src/com/sleepycat/je/rep/ReplicationSSLConfigBeanInfo.java new file mode 100644 index 0000000..c9f3acf --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationSSLConfigBeanInfo.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +/** + * @hidden + * Getter/Setters for JavaBean based tools. + */ +public class ReplicationSSLConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(ReplicationSSLConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(ReplicationSSLConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/ReplicationSecurityException.java b/src/com/sleepycat/je/rep/ReplicationSecurityException.java new file mode 100644 index 0000000..09ed82a --- /dev/null +++ b/src/com/sleepycat/je/rep/ReplicationSecurityException.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.DatabaseException; + +/** + * Exception that is thrown when a security failure has occurred which + * may terminate the current replication stream. When it is raised, the + * replication stream consumer is no longer eligible to consume the stream. + *

        + * + * This exception covers following security failure during streaming: + *

          + *
        • The user attempted to contact the feeder of a secure store without + * authenticating. It will be raised during client does service handshake + * with server in this case; + * + *
        • There was a problem authenticating the stream client during stream, say + * because the token provided by client is no longer valid during streaming; + * + *
        • Stream client attempted to perform an operation that they were not + * authorized to perform, say, the stream client is trying to stream data + * from a table that she is not eligible to read. + * + *
        + * @hidden For internal use only + */ +public class ReplicationSecurityException extends DatabaseException { + + private static final long serialVersionUID = 1; + + /* consumer of replication stream */ + private final String consumer; + + public ReplicationSecurityException(String msg, + String consumer, + Throwable cause) { + super(msg, cause); + this.consumer = consumer; + } + + /** + * Gets the replication stream consumer name + * + * @return the replication stream consumer name + */ + public String getConsumer() { + return consumer; + } +} diff --git a/src/com/sleepycat/je/rep/RestartRequiredException.java b/src/com/sleepycat/je/rep/RestartRequiredException.java new file mode 100644 index 0000000..9e1dd6b --- /dev/null +++ b/src/com/sleepycat/je/rep/RestartRequiredException.java @@ -0,0 +1,79 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * RestartRequiredException serves as the base class for all exceptions which + * makes it impossible for HA to proceed without some form of corrective action + * on the part of the user, followed by a restart of the application. The + * corrective action may involve an increase in resources used by the + * application, a JE configurations change, discarding cached state, etc. The + * error message details the nature of the problem. + */ +public abstract class RestartRequiredException + extends EnvironmentFailureException { + + /* + * Classes that extend RestartRequiredException should be aware that their + * constructors should not be seen as atomic. If the failure reason + * mandates it, the environment may be invalidated by the super class + * constructor, EnvironmentFailureException. At invalidation time, the + * exception is saved within the environment as the precipitating failure, + * and may be seen and used by other threads, and the sub class instance + * may be seen before construction is complete. The subclass should take + * care if it has any fields that are initialized in the constructor, after + * the call to super(). + * + * Any overloadings of getMessage() should also assume that they may be + * called asynchronously before the subclass is fully initialized. + */ + + private static final long serialVersionUID = 1; + + public RestartRequiredException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason) { + super(envImpl, reason); + } + + public RestartRequiredException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + Exception cause) { + super(envImpl, reason, cause); + } + + public RestartRequiredException(EnvironmentImpl envImpl, + EnvironmentFailureReason reason, + String msg) { + super(envImpl, reason, msg); + } + + /** + * For internal use only. + */ + protected RestartRequiredException(String message, + RestartRequiredException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public abstract EnvironmentFailureException wrapSelf(String msg) ; +} diff --git a/src/com/sleepycat/je/rep/RollbackException.java b/src/com/sleepycat/je/rep/RollbackException.java new file mode 100644 index 0000000..9342785 --- /dev/null +++ b/src/com/sleepycat/je/rep/RollbackException.java @@ -0,0 +1,148 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.je.dbi.EnvironmentFailureReason.HARD_RECOVERY; + +import com.sleepycat.je.Database; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.MatchpointSearchResults; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * This asynchronous exception indicates that a new master has been selected, + * this Replica's log is ahead of the current Master, + * and in this case, the Replica was unable to rollback without a + * recovery. As a consequence, it is possible that one or more of the most + * recently committed transactions may need to be rolled back, before the + * Replica can synchronize its state with that of the current + * Master. Note that any CommitTokens obtained before restarting + * this Replica shouldn't be used after {@link RollbackException} + * is thrown because the token may no longer exist on the current + * Master node, due to failover processing. + *

        + * Existing {@link ReplicatedEnvironment}, and consequently {@link Database} + * handles, are invalidated as a result of this exception. The application must + * close all old handles and create new handles before it can proceed. The + * actual rollback of any recently committed transactions is done when the + * application re-instantiates and thereby reopens the {@link + * ReplicatedEnvironment}. The application is responsible for discarding and + * recreating any transient state that may be associated with the committed + * transactions that were rolled back. {@link #getEarliestTransactionId} and + * {@link #getEarliestTransactionCommitTime} provide information to help + * determine which transactions might be rolled back. Note that it is possible + * that no committed transactions have been rolled back and that the + * application need do no adjustments, in which case + * {@link #getEarliestTransactionCommitTime} will return null. + *

        + * This exception should be encountered relatively infrequently in practice, + * since the election mechanism favors nodes with the most advanced log when + * deciding upon a master. The exception, due to its nature, can only be + * encountered when the node is in the Replica state, or the node + * is trying to transition to the Replica state. + *

        + * Use of weak durability requirements like + * {@link com.sleepycat.je.Durability.ReplicaAckPolicy#NONE} or a + * {@link com.sleepycat.je.rep.ReplicationMutableConfig#NODE_PRIORITY} of zero + * increases the likelihood of this exception. + * @see RollbackProhibitedException + */ +public class RollbackException extends RestartRequiredException { + private static final long serialVersionUID = 1; + + /* Testing support only */ + private final MatchpointSearchResults searchResults; + /** + * For internal use only. + * @hidden + */ + public RollbackException(RepImpl repImpl, + VLSN matchpointVLSN, + MatchpointSearchResults searchResults) { + super(repImpl, HARD_RECOVERY, makeMessage(repImpl.getName(), + searchResults, + matchpointVLSN)); + + this.searchResults = searchResults; + + } + + private static String makeMessage + (final String nodeName, + final MatchpointSearchResults searchResults, + final VLSN matchpointVLSN) { + + final long matchpointLSN = searchResults.getMatchpointLSN(); + return "Node " + nodeName + + " must rollback" + searchResults.getRollbackMsg() + + " in order to rejoin the replication group. All existing " + + "ReplicatedEnvironment handles must be closed and " + + "reinstantiated. Log files were truncated to file 0x" + + DbLsn.getFileNumber(matchpointLSN) + ", offset 0x" + + DbLsn.getFileOffset(matchpointLSN) + ", vlsn " + + matchpointVLSN; + } + + /** + * Return the time in milliseconds of the earliest transaction commit that + * has been rolled back. May return null if no commits have been rolled + * back. + */ + public Long getEarliestTransactionCommitTime() { + if (searchResults == null) { + return null; + } + + if (searchResults.getEarliestPassedTxn() == null) { + return null; + } + + return searchResults.getEarliestPassedTxn().time.getTime(); + } + + /** + * Return the id of the earliest transaction commit that has been + * rolled back. 0 is returned if no commits have been rolled back. + */ + public long getEarliestTransactionId() { + if (searchResults == null) { + return 0; + } + + if (searchResults.getEarliestPassedTxn() == null) { + return 0; + } + + return searchResults.getEarliestPassedTxn().id; + } + + /** + * For internal use only. + * @hidden + */ + public RollbackException(String message, RollbackException cause) { + super(message + " " + cause.getMessage(), cause); + searchResults = cause.searchResults; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public RollbackException wrapSelf(String msg) { + return new RollbackException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/RollbackProhibitedException.java b/src/com/sleepycat/je/rep/RollbackProhibitedException.java new file mode 100644 index 0000000..b33a207 --- /dev/null +++ b/src/com/sleepycat/je/rep/RollbackProhibitedException.java @@ -0,0 +1,184 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.MatchpointSearchResults; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * This exception may be thrown by a Replica during the {@link + * replication stream sync-up} phase of startup. It indicates that a syncup + * cannot proceed without undoing a number of committed transactions that + * exceeds the limit defined by {@link ReplicationConfig#TXN_ROLLBACK_LIMIT}. + *

        + * It is rare for committed transactions to be rolled back during a + * sync-up. One way this can happen is if a replication group has been + * executing with a {@link com.sleepycat.je.Durability} policy that specifies a + * {@link com.sleepycat.je.Durability.ReplicaAckPolicy ReplicaAckPolicy} of + * NONE. + *

        + * When ReplicaAckPolicy.NONE is specified, transactions can commit on the + * master without receiving any acknowledgments from replica nodes. Using that + * policy, it is possible that if the master node crashes at a given time, and + * the group fails over and continues on with a new master, the old master's + * environment will have transactions on disk that were never replicated and + * received by other nodes. When this old master comes back up and rejoins the + * group as a replica, it will have committed transactions that need to be + * rolled back. + *

        + * If the number of committed transactions to be rolled back is less than or + * equal to the limit specified by {@link + * ReplicationConfig#TXN_ROLLBACK_LIMIT}, JE will automatically truncate the + * environment log to remove the unreplicated transactions, and will throw a + * {@link RollbackException}. The application only needs to reinstantiate the + * ReplicatedEnvironment and proceed on. If the limit specified by {@link + * ReplicationConfig#TXN_ROLLBACK_LIMIT} is exceeded, the application will + * receive a RollbackProhibitedException to indicate that manual intervention + * is required. + *

        + * The RollbackProhibitedException lets the user interject application specific + * processing before the log is truncated. The exception message and getter + * methods indicate the number of transactions that must be rolled back, and + * the time and id of the earliest targeted transaction, and the user can use + * this information to make any desired application adjustments. The + * application may then manually truncate the log using {@link + * com.sleepycat.je.util.DbTruncateLog}. + *

        + * Note that any CommitTokens obtained before restarting this + * Replica shouldn't be used after + * {@link RollbackProhibitedException} is thrown because the token may no + * longer exist on the current Master node. + */ +public class RollbackProhibitedException extends RestartRequiredException { + private static final long serialVersionUID = 1L; + + /* + * searchResults is only used by threads that catch the exception, + * so the field is sure to be initialized. + */ + private final MatchpointSearchResults searchResults; + + /** + * For internal use only. + * @hidden + */ + public RollbackProhibitedException(RepImpl repImpl, + int rollbackTxnLimit, + boolean rollbackDisabled, + VLSN matchpointVLSN, + MatchpointSearchResults searchResults) { + + super(repImpl, EnvironmentFailureReason.ROLLBACK_PROHIBITED, + makeMessage(repImpl.getName(), searchResults, matchpointVLSN, + rollbackTxnLimit, rollbackDisabled)); + this.searchResults = searchResults; + } + + private static String makeMessage(String nodeName, + MatchpointSearchResults searchResults, + VLSN matchpointVLSN, + int rollbackTxnLimit, + boolean rollbackDisabled) { + long matchpointLSN = searchResults.getMatchpointLSN(); + long fileNumber = DbLsn.getFileNumber(matchpointLSN); + long fileOffset = DbLsn.getFileOffset(matchpointLSN); + StringBuilder str = new StringBuilder(); + + str.append("Node ").append(nodeName). + append(" must rollback ").append(searchResults.getRollbackMsg()). + append(" in order to rejoin the replication group, but "); + + if (rollbackDisabled) { + str.append("rollbacks are disabled because "). + append("je.rep.txnRollbackDisabled has been set to true. "). + append("Either set je.rep.txnRollbackDisabled to false to "). + append("permit automatic rollback,"); + + } else { + str.append(" the transaction rollback limit of "). + append(rollbackTxnLimit).append(" prohibits this. "). + append("Either increase je.rep.txnRollbackLimit to a "). + append("value larger than ").append(rollbackTxnLimit). + append(" to permit automatic rollback,"); + } + + str.append(" or manually remove the problematic transactions. "). + append("To do manual removal, truncate the log to file "). + append(FileManager.getFileName(fileNumber)). + append(", offset 0x").append(Long.toHexString(fileOffset)). + append(", vlsn ").append(matchpointVLSN). + append(" using the directions in "). + append("com.sleepycat.je.util.DbTruncateLog."); + + return str.toString(); + } + + /** + * For internal use only. + * @hidden + */ + public RollbackProhibitedException(String message, + RollbackProhibitedException cause) { + super(message + " " + cause.getMessage(), cause); + this.searchResults = cause.searchResults; + } + + /** + * For internal use only. + * @hidden + */ + @Override + public RollbackProhibitedException wrapSelf(String msg) { + return new RollbackProhibitedException(msg, this); + } + + /* + * The JE log must be truncated to this file in order for this node to + * rejoin the group. + */ + public long getTruncationFileNumber() { + return DbLsn.getFileNumber(searchResults.getMatchpointLSN()); + } + + /** + * The JE log must be truncated to this offset in the specified + * file in order for this node to rejoin the group. + */ + public long getTruncationFileOffset() { + return DbLsn.getFileOffset(searchResults.getMatchpointLSN()); + } + + /** + * Return the time in milliseconds of the earliest transaction commit that + * will be rolled back if the log is truncated to the location specified by + * {@link #getTruncationFileNumber} and {@link #getTruncationFileOffset} + */ + public Long getEarliestTransactionCommitTime() { + return searchResults.getEarliestPassedTxn().time.getTime(); + } + + /** + * Return the id of the earliest transaction commit that will be + * rolled back if the log is truncated to the location specified by + * {@link #getTruncationFileNumber} and {@link #getTruncationFileOffset} + */ + public long getEarliestTransactionId() { + return searchResults.getEarliestPassedTxn().id; + } +} diff --git a/src/com/sleepycat/je/rep/StateChangeEvent.java b/src/com/sleepycat/je/rep/StateChangeEvent.java new file mode 100644 index 0000000..78d86fe --- /dev/null +++ b/src/com/sleepycat/je/rep/StateChangeEvent.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import java.io.Serializable; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.node.NameIdPair; + +/** + * Communicates the {@link ReplicatedEnvironment.State state} change at a node + * to the StateChangeListener. There is a distinct instance of this event + * representing each state change at a node. + *

        + * Each event instance may have zero or more state change related exceptions + * associated with it. The exceptions are of type {@link StateChangeException}. + * {@link StateChangeException} has a method called {@link + * StateChangeException#getEvent()} that can be used to associate an event with + * an exception. + * @see StateChangeListener + */ +public class StateChangeEvent implements Serializable { + private static final long serialVersionUID = 1L; + + final private ReplicatedEnvironment.State state; + final private NameIdPair masterNameId; + + /* Records the time associated with the event. */ + final private long eventTime = System.currentTimeMillis(); + + /** + * @hidden + * For internal use only. + * Creates a StateChangeEvent identifying the new state and the new master + * if there is a master in the new state. + * + * @param state the new state + * @param masterNameId the new master or NULL if there isn't + * one. + */ + public StateChangeEvent(State state, NameIdPair masterNameId) { + assert((masterNameId.getId() == NameIdPair.NULL_NODE_ID) || + ((state == State.MASTER) || (state == State.REPLICA))): + "state=" + state + " masterId=" + masterNameId.getId(); + this.state = state; + this.masterNameId = masterNameId; + } + + /** + * Returns the state that the node has transitioned to. + * + * @return the new State resulting from this event + */ + public ReplicatedEnvironment.State getState() { + return state; + } + + /** + * Returns the time (in nano second units) the event occurred, as reported + * by {@link System#nanoTime} + * + * @return the time the event occurred, in nanoseconds + */ + public long getEventTime() { + return eventTime; + } + + /** + * Returns the node name identifying the master at the time of the event. + * + * @return the master node name + * + * @throws IllegalStateException if the node is in the + * DETACHED or UNKNOWN state. + */ + public String getMasterNodeName() + throws IllegalStateException { + if ((state == State.MASTER) || (state == State.REPLICA)) { + return masterNameId.getName(); + } + throw new IllegalStateException("No current master in state: " + + state); + } +} diff --git a/src/com/sleepycat/je/rep/StateChangeException.java b/src/com/sleepycat/je/rep/StateChangeException.java new file mode 100644 index 0000000..87c3597 --- /dev/null +++ b/src/com/sleepycat/je/rep/StateChangeException.java @@ -0,0 +1,85 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import java.util.Date; + +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.txn.Locker; + +/** + * Provides a synchronous mechanism for informing an application about a change + * in the state of the replication node. StateChangeException is an abstract + * class, with subtypes for each type of Transition. + *

        + * A single state change can result in multiple state change exceptions (one + * per thread operating against the environment). Each exception is associated + * with the event that provoked the exception. The application can use this + * association to ensure that each such event is processed just once. + */ +public abstract class StateChangeException extends OperationFailureException { + private static final long serialVersionUID = 1; + + /* Null if the event is not available. */ + private final StateChangeEvent stateChangeEvent; + + /** + * For internal use only. + * @hidden + */ + protected StateChangeException(Locker locker, + StateChangeEvent stateChangeEvent) { + super(locker, (locker != null), + makeMessage(locker, stateChangeEvent), null); + this.stateChangeEvent = stateChangeEvent; + } + + /** + * Used when no state change event is available + */ + protected StateChangeException(String message, Exception reason) { + super(null, false, message, reason); + this.stateChangeEvent = null; + } + + /** + * Returns the event that resulted in this exception. + * + * @return the state change event + */ + public StateChangeEvent getEvent() { + return stateChangeEvent; + } + + private static String makeMessage(Locker locker, StateChangeEvent event) { + long lockerId = (locker == null) ? 0 : locker.getId(); + return (event != null) ? + ("Problem closing transaction " + lockerId + + ". The current state is:" + event.getState() + "." + + " The node transitioned to this state at:" + + new Date(event.getEventTime())) : + "Node state inconsistent with operation"; + } + + /** + * For internal use only. + * @hidden + * Only for use by wrapSelf methods. + */ + protected StateChangeException(String message, + StateChangeException cause) { + super(message, cause); + stateChangeEvent = + (cause != null) ? cause.stateChangeEvent : null; + } +} diff --git a/src/com/sleepycat/je/rep/StateChangeListener.java b/src/com/sleepycat/je/rep/StateChangeListener.java new file mode 100644 index 0000000..cfeabb3 --- /dev/null +++ b/src/com/sleepycat/je/rep/StateChangeListener.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +/** + * An asynchronous mechanism for tracking the {@link + * ReplicatedEnvironment.State State} of the replicated environment and + * choosing how to route database operations. {@code State} determines which + * operations are currently permitted on the node. For example, only the {@link + * ReplicatedEnvironment.State#MASTER MASTER} node can execute write + * operations. + *

        + * The Listener is registered with the replicated environment using {@link + * ReplicatedEnvironment#setStateChangeListener(StateChangeListener)}. There + * is at most one Listener associated with the actual environment (not an + * {@link com.sleepycat.je.Environment} handle) at any given instance in time. + *

        + * {@literal See} the {@link + * examples} for information on different approaches toward routing + * database operations and an example of using the StateChangeListener. + * @see Managing + * Write Requests at a Replica + */ +public interface StateChangeListener { + + /** + * The notification method. It is initially invoked when the {@code + * StateChangeListener} is first associated with the {@code + * ReplicatedEnvironment} via the {@link + * ReplicatedEnvironment#setStateChangeListener(StateChangeListener)} + * method and subsequently each time there is a state change. + *

        + * This method should do the minimal amount of work, queuing any resource + * intensive operations for processing by another thread before returning + * to the caller, so that it does not unduly delay the other housekeeping + * operations performed by the internal thread which invokes this method. + *

        + * @param stateChangeEvent the new state change event + * @throws RuntimeException Any uncaught exceptions will result in the + * shutdown of the ReplicatedEnvironment. + */ + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException; +} diff --git a/src/com/sleepycat/je/rep/SyncupProgress.java b/src/com/sleepycat/je/rep/SyncupProgress.java new file mode 100644 index 0000000..3939fe8 --- /dev/null +++ b/src/com/sleepycat/je/rep/SyncupProgress.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +/** + * Describes the different phases of replication stream syncup that are + * executed when a replica starts working with a new replication group master. + * Meant to be used in conjunction with a + * {@link com.sleepycat.je.ProgressListener} that is configured through + * {@link ReplicationConfig#setSyncupProgressListener}, to monitor the + * occurrence and cost of replica sync-ups. + * @see Replication Group Life Cycle + * @since 5.0 + */ +public enum SyncupProgress { + + /** + * Syncup is starting up. The replica and feeder are searching for the + * most recent common shared point in the replication stream. + */ + FIND_MATCHPOINT, + + /** + * A matchpoint has been found, and the replica is determining whether it + * has to rollback any uncommitted replicated records applied from the + * previous master. + */ + CHECK_FOR_ROLLBACK, + + /** + * The replica is rolling back uncommitted replicated records applied from + * the previous master. + */ + DO_ROLLBACK, + + /** Replication stream syncup has ended. */ + END +} diff --git a/src/com/sleepycat/je/rep/TimeConsistencyPolicy.java b/src/com/sleepycat/je/rep/TimeConsistencyPolicy.java new file mode 100644 index 0000000..a30d026 --- /dev/null +++ b/src/com/sleepycat/je/rep/TimeConsistencyPolicy.java @@ -0,0 +1,176 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.utilint.PropUtil; + +/** + * A consistency policy which describes the amount of time the Replica is + * allowed to lag the Master. The application can use this policy to ensure + * that this node sees all transactions that were committed on the Master + * before the lag interval. + *

        + * Effective use of this policy requires that the clocks on the Master and + * Replica are synchronized by using a protocol like NTP + *

        + * Consistency policies are specified at either a per-transaction level through + * {@link com.sleepycat.je.TransactionConfig#setConsistencyPolicy} or as an replication node + * wide default through {@link + * com.sleepycat.je.rep.ReplicationConfig#setConsistencyPolicy} + * + * @see Managing Consistency + */ +public class TimeConsistencyPolicy implements ReplicaConsistencyPolicy { + + /** + * The name:{@value} associated with this policy. The name can be used when + * constructing policy property values for use in je.properties files. + */ + public static final String NAME = "TimeConsistencyPolicy"; + + private final int permissibleLag; + + /* Amount of time to wait (in ms) for the consistency to be reached. */ + private final int timeout; + + /** + * Specifies the amount of time by which the Replica is allowed to lag the + * master when initiating a transaction. The Replica ensures that all + * transactions that were committed on the Master before this lag interval + * are available at the Replica before allowing a transaction to proceed + * with Environment.beginTransaction. + * + * Effective use of this policy requires that the clocks on the Master and + * Replica are synchronized by using a protocol like NTP. + * + * @param permissibleLag the time interval by which the Replica may be out + * of date with respect to the Master when a transaction is initiated on + * the Replica. + * + * @param permissibleLagUnit the {@code TimeUnit} for the permissibleLag + * parameter. + * + * @param timeout the amount of time to wait for the consistency to be + * reached. + * + * @param timeoutUnit the {@code TimeUnit} for the timeout parameter. + * + * @throws IllegalArgumentException if the permissibleLagUnit or + * timeoutUnit is null. + */ + public TimeConsistencyPolicy(long permissibleLag, + TimeUnit permissibleLagUnit, + long timeout, + TimeUnit timeoutUnit) { + this.permissibleLag = PropUtil.durationToMillis(permissibleLag, + permissibleLagUnit); + this.timeout = PropUtil.durationToMillis(timeout, timeoutUnit); + } + + /** + * Returns the name:{@value #NAME}, associated with this policy. + * @see #NAME + */ + @Override + public String getName() { + return NAME; + } + + /** + * Returns the allowed time lag associated with this policy. + * + * @param unit the {@code TimeUnit} of the returned value. + * + * @return the permissible lag time in the specified unit. + */ + public long getPermissibleLag(TimeUnit unit) { + return PropUtil.millisToDuration(permissibleLag, unit); + } + + /** + * Returns the consistency timeout associated with this policy. + * + * @param unit the {@code TimeUnit} of the returned value. + * + * @return the consistency timeout in the specified unit. + */ + @Override + public long getTimeout(TimeUnit unit) { + return PropUtil.millisToDuration(timeout, unit); + } + + /** + * @hidden + * For internal use only. + * Ensures that the replica has replayed the replication stream to the + * point identified by the lag period. If it isn't the method waits until + * the constraint is satisfied by the replica. + */ + @Override + public void ensureConsistency(EnvironmentImpl replicatorImpl) + throws InterruptedException, + ReplicaConsistencyException{ + + /* + * Cast is done to preserve replication/non replication code + * boundaries. + */ + RepImpl repImpl = (RepImpl) replicatorImpl; + Replica replica = repImpl.getRepNode().replica(); + replica.getConsistencyTracker().lagAwait(this); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + permissibleLag; + result = prime * result + timeout; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeConsistencyPolicy other = + (TimeConsistencyPolicy) obj; + if (permissibleLag != other.permissibleLag) { + return false; + } + if (timeout != other.timeout) { + return false; + } + return true; + } + + @Override + public String toString(){ + return getName() + " permissibleLag=" + permissibleLag; + } +} diff --git a/src/com/sleepycat/je/rep/UnknownMasterException.java b/src/com/sleepycat/je/rep/UnknownMasterException.java new file mode 100644 index 0000000..cb98160 --- /dev/null +++ b/src/com/sleepycat/je/rep/UnknownMasterException.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.txn.Locker; + +/** + * Indicates that the underlying operation requires communication with a + * Master, but that a Master was not available. + *

        + * This exception typically indicates there is a system level problem. It could + * indicate for example, that a sufficient number of nodes are not available to + * hold an election and elect a Master, or that this node was having problems + * with the network and was unable to communicate with other nodes. + *

        + * The application can choose to retry the operation, potentially logging the + * problem, until the underlying system level problem has been addressed. + */ +public class UnknownMasterException extends StateChangeException { + private static final long serialVersionUID = 1; + + public UnknownMasterException(Locker locker, + StateChangeEvent stateChangeEvent) { + super(locker, stateChangeEvent); + } + + /** + * Used when the inability to determine a master is not related to a + * state change. + */ + public UnknownMasterException(String message) { + super(message, null); + } + + /** + * Used when the inability to determine a master is not related to a + * state change but some inability to communicate with a node identified + * as a master. The reason contains further explanation. + */ + public UnknownMasterException(String message, Exception reason) { + super(message, reason); + } + + private UnknownMasterException(String message, + UnknownMasterException cause) { + super(message, cause); + } + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new UnknownMasterException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/Arbiter.java b/src/com/sleepycat/je/rep/arbiter/Arbiter.java new file mode 100644 index 0000000..b72c966 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/Arbiter.java @@ -0,0 +1,273 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.arbiter; + +import java.io.File; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.Properties; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.arbiter.impl.ArbiterImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.utilint.DatabaseUtil; + +/** + * Provides a mechanism to allow write availability for the Replication + * group even when the number of replication nodes is less than majority. + * The main use of an Arbiter is when the replication group consists of + * two nodes. The addition of an Arbiter to the replication group + * allows for one node to fail and provide write availability with ACK + * durability of SIMPLE_MAJORITY. The Arbiter acknowledges the transaction, + * but does not retain a copy of the data. The Arbiter persists a + * small amount of state to insure that only the Replication nodes that + * contain the Arbiter acknowledged transactions may become a Master. + *

        + * The Arbiter node participates in elections and may acknowledge transaction + * commits. + *

        + * The Arbiter state is as follows: + * UNKNOWN [ UNKNOWN | REPLICA]+ DETACHED + */ +public class Arbiter { + + private ArbiterImpl ai; + private final ReplicatedEnvironment repEnv; + private final ArbiterConfig ac; + + private final String ARB_CONFIG = "ArbiterConfig"; + private final String ARB_HOME = "ArbiterHome"; + + /** + * An Arbiter used in elections and transaction acknowledgments. + * This method returns when a connection to the current master + * replication node is made. The Arbiter.shutdown() method is + * used to shutdown the threads that run as part of the Arbiter. + * + * @param arbiterConfig Configuration parameters for the Arbiter. + * + * @throws EnvironmentNotFoundException if the environment does not exist + * + * @throws EnvironmentLockedException when an environment cannot be opened + * because another Arbiter has the environment open. + * + * @throws DatabaseException problem establishing connection to the master. + * + * @throws IllegalArgumentException if an invalid parameter is specified, + * for example, an invalid {@code ArbiterConfig} parameter. + */ + public Arbiter(ArbiterConfig arbiterConfig) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + DatabaseException, + IllegalArgumentException { + + ac = arbiterConfig.clone(); + verifyParameters(ac); + File envHome = new File(ac.getArbiterHome()); + if (!envHome.exists()) { + throw new IllegalArgumentException( + "The specified environment directory " + + envHome.getAbsolutePath() + + " does not exist."); + } + Properties allProps = ac.getProps(); + EnvironmentConfig envConfig = + new EnvironmentConfig(getEnvProps(allProps)); + envConfig.setReadOnly(true); + envConfig.setTransactional(true); + envConfig.setConfigParam( + EnvironmentParams.ENV_RECOVERY.getName(), "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_SETUP_LOGGER.getName(), "true"); + envConfig.setConfigParam( + EnvironmentParams.LOG_USE_WRITE_QUEUE.getName(), "false"); + envConfig.setConfigParam( + EnvironmentParams.LOG_WRITE_QUEUE_SIZE.getName(), "4096"); + if (ac.getLoggingHandler() != null) { + envConfig.setLoggingHandler(ac.getLoggingHandler()); + } + + ReplicationConfig repConfig = + new ReplicationConfig(getRepEnvProps(allProps)); + repConfig.setConfigParam(RepParams.ARBITER_USE.getName(), "true"); + repConfig.setRepNetConfig(ac.getRepNetConfig()); + + repEnv = RepInternal.createInternalEnvHandle(envHome, + repConfig, + envConfig); + try { + ai = new ArbiterImpl( + envHome, RepInternal.getNonNullRepImpl(repEnv)); + ai.runArbiter(); + } catch (Throwable t) { + shutdown(); + throw t; + } + } + + /** + * Returns the Arbiter mutable attributes. + * + * @return Arbiter attributes. + */ + public ArbiterMutableConfig getArbiterMutableConfig() { + return ac.getArbiterMutableConfig(); + } + + /** + * Sets the Arbiter mutable attributes. + * + * @param config Arbiter attributes. + * @throws DatabaseException + */ + public void setArbiterMutableConfig(ArbiterMutableConfig config) + throws DatabaseException { + ReplicationMutableConfig rmc = repEnv.getRepMutableConfig(); + Properties newProps = config.getProps(); + copyMutablePropsTo(newProps, rmc); + repEnv.setRepMutableConfig(rmc); + ai.refreshHelperHosts(); + + EnvironmentMutableConfig emc = repEnv.getMutableConfig(); + copyMutablePropsTo(newProps, emc); + repEnv.setMutableConfig(emc); + } + + /** + * Gets the Arbiter state. + */ + public ReplicatedEnvironment.State getState() { + return ai.getArbState(); + } + /** + * Gets the Arbiter statistics. + * + * @param config The general statistics attributes. If null, default + * attributes are used. + * + * @return Arbiter statistics. + * @throws DatabaseException + */ + public ArbiterStats getStats(StatsConfig config) + throws DatabaseException { + if (ai == null) { + return null; + } + + StatsConfig useConfig = + (config == null) ? StatsConfig.DEFAULT : config; + + return new ArbiterStats(ai.loadStats(useConfig)); + } + + /** + * Shutdown the Arbiter. + * Threads are stopped and resources are released. + * @throws DatabaseException + */ + public void shutdown() + throws DatabaseException { + if (ai != null) { + ai.shutdown(); + try { + ai.join(); + } catch (InterruptedException ignore) { + + } + } + if (repEnv != null) { + repEnv.close(); + } + } + + private void verifyParameters(ArbiterConfig ac) + throws IllegalArgumentException { + DatabaseUtil.checkForNullParam(ac, ARB_CONFIG); + DatabaseUtil.checkForNullParam(ac.getArbiterHome(), ARB_HOME); + DatabaseUtil.checkForNullParam(ac.getGroupName(), ReplicationConfig.GROUP_NAME); + DatabaseUtil.checkForNullParam(ac.getNodeHostPort(), ReplicationConfig.NODE_HOST_PORT); + DatabaseUtil.checkForNullParam(ac.getHelperHosts(), ReplicationMutableConfig.HELPER_HOSTS); + } + + private Properties getEnvProps(Properties props) { + Properties envProps = new Properties(); + Iterator> iter = props.entrySet().iterator(); + while (iter.hasNext()) { + Entry m = iter.next(); + String key = (String)m.getKey(); + if (!key.startsWith(EnvironmentParams.REP_PARAM_PREFIX)) { + envProps.put(key, m.getValue()); + } + } + return envProps; + } + + private Properties getRepEnvProps(Properties props) { + Properties repEnvProps = new Properties(); + Iterator> iter = props.entrySet().iterator(); + while (iter.hasNext()) { + Entry m = iter.next(); + String key = (String)m.getKey(); + if (key.startsWith(EnvironmentParams.REP_PARAM_PREFIX)) { + repEnvProps.put(key, m.getValue()); + } + } + return repEnvProps; + } + + private void copyMutablePropsTo(Properties from, + ReplicationMutableConfig toConfig) { + + Enumeration propNames = from.propertyNames(); + while (propNames.hasMoreElements()) { + String paramName = (String) propNames.nextElement(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + + if (param != null && param.isForReplication() && + param.isMutable()) { + toConfig.setConfigParam(paramName, from.getProperty(paramName)); + } + } + } + + private void copyMutablePropsTo(Properties from, + EnvironmentMutableConfig toConfig) { + + Enumeration propNames = from.propertyNames(); + while (propNames.hasMoreElements()) { + String paramName = (String) propNames.nextElement(); + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + + if (param != null && !param.isForReplication() && + param.isMutable()) { + toConfig.setConfigParam(paramName, from.getProperty(paramName)); + } + } + } + +} diff --git a/src/com/sleepycat/je/rep/arbiter/ArbiterConfig.java b/src/com/sleepycat/je/rep/arbiter/ArbiterConfig.java new file mode 100644 index 0000000..f77869b --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/ArbiterConfig.java @@ -0,0 +1,495 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter; + +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Handler; + +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.impl.RepParams; + +/** + * The configuration parameters for an {@link Arbiter}. + * + * @see Arbiter#Arbiter(ArbiterConfig) + */ +public class ArbiterConfig extends ArbiterMutableConfig implements Cloneable { + + private String arbiterHome; + private ReplicationNetworkConfig repNetConfig; + private Handler loggingHandler; + + /** + * Arbiter configuration. + */ + public ArbiterConfig() { + super(); + repNetConfig = ReplicationNetworkConfig.createDefault(); + } + + /** + * Arbiter configuration. + * @param props to initialize configuration object. + */ + public ArbiterConfig(Properties props) { + super(props); + repNetConfig = ReplicationNetworkConfig.createDefault(); + } + + /** + * Gets the Arbiter home directory. + * + * @return Path of the Arbiter home directory. + */ + public String getArbiterHome() { + return arbiterHome; + } + + /** + * Sets the Arbiter Home directory + * + * @param arbiterHome Path of the Arbiter home directory. + */ + public void setArbiterHome(String arbiterHome) { + this.arbiterHome = arbiterHome; + } + + /** + * Sets the name to be associated with this Arbiter. It must + * be unique within the group. When the Arbiter is + * instantiated and joins the replication group, a check is done to ensure + * that the name is unique, and a + * {@link com.sleepycat.je.rep.RestartRequiredException} is thrown if it is + * not. + * + * @param nodeName the name of this arbiter. + */ + public ArbiterConfig setNodeName(String nodeName) + throws IllegalArgumentException { + DbConfigManager.setVal( + props, RepParams.NODE_NAME, nodeName, validateParams); + return this; + } + + /** + * Returns the unique name associated with this Arbiter. + * + * @return the Arbiter name + */ + public String getNodeName() { + return DbConfigManager.getVal(props, RepParams.NODE_NAME); + } + + /** + * Sets the name for the replication group. The name must be made up of + * just alpha numeric characters and must not be zero length. + * + * @param groupName the alpha numeric string representing the name. + * + * @throws IllegalArgumentException if the string name is not valid. + */ + public ArbiterConfig setGroupName(String groupName) + throws IllegalArgumentException { + + DbConfigManager.setVal( + props, RepParams.GROUP_NAME, groupName, validateParams); + return this; + } + + /** + * Gets the name associated with the replication group. + * + * @return the name of this replication group. + */ + public String getGroupName() { + return DbConfigManager.getVal(props, RepParams.GROUP_NAME); + } + + /** + * Sets the hostname and port associated with this arbiter. The hostname + * and port combination are denoted by a string of the form: + *

        +     *  hostname[:port]
        +     * 
        + * The port must be outside the range of "Well Known Ports" + * (zero through 1023). + * + * @param hostPort the string containing the hostname and port as above. + */ + public ArbiterConfig setNodeHostPort(String hostPort) { + DbConfigManager.setVal( + props, RepParams.NODE_HOST_PORT, hostPort, validateParams); + return this; + } + + /** + * Returns the hostname and port associated with this node. The hostname + * and port combination are denoted by a string of the form: + *
        +     *  hostname:port
        +     * 
        + * + * @return the hostname and port string of this Arbiter. + */ + public String getNodeHostPort() { + return DbConfigManager.getVal(props, RepParams.NODE_HOST_PORT); + } + + /** + * Time to wait for the discovery of the Master during the instantiation + * of the Arbiter. If no Master is found with in the timeout period, + * the Arbiter constructor return with the Arbiter in the UNKNOWN state. + * + * @param timeout The unknown state timeout. A value of 0 turns off + * Unknown state timeouts. The creation of the Arbiter will wait until + * a Master is found. + * + * @param unit the {@code TimeUnit} of the timeout value. May be null only + * if timeout is zero. + * + * @return this + * + * @throws IllegalArgumentException If the value of timeout is negative + * + */ + public ArbiterConfig setUnknownStateTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal( + props, RepParams.ENV_UNKNOWN_STATE_TIMEOUT, + timeout, unit, validateParams); + return this; + } + + /** + * Returns the Unknown state timeout. + * + *

        A value of 0 means Unknown state timeouts are not configured.

        + * + * @param unit the {@code TimeUnit} of the returned value. May not be null. + * + * @return The transaction timeout. + */ + public long getUnknownStateTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal( + props, RepParams.ENV_UNKNOWN_STATE_TIMEOUT, unit); + + } + + /** + * Sets the heartbeat interval. + * @param millis Interval in milliseconds. + * @return this + */ + public ArbiterConfig setHeartbeatInterval(int millis) { + DbConfigManager.setIntVal( + props, RepParams.HEARTBEAT_INTERVAL, millis, validateParams); + return this; + } + + /** + * Gets the heartbeat interval in milliseconds. + * @return Heartbeat interval. + */ + public int getHeartbeatInterval() { + return DbConfigManager.getIntVal(props, RepParams.HEARTBEAT_INTERVAL); + } + + /** + * @hidden + * The size of the message queue the Arbiter uses to read + * messages and used to size the output message queue for + * responses to the master. + * + * @param val size of the queue + * @return this + */ + public ArbiterConfig setMessageQueueSize(int val) { + DbConfigManager.setIntVal( + props, + RepParams.REPLICA_MESSAGE_QUEUE_SIZE, val, validateParams); + return this; + } + + /** + * @hidden + * Internal parameter enable use of the group ack message. + * + * @param val Boolean value. + * @return this + */ + public ArbiterConfig setEnableGroupAcks(boolean val) { + DbConfigManager.setBooleanVal( + props, RepParams.ENABLE_GROUP_ACKS, val, validateParams); + return this; + } + + /** + * @hidden + * Get boolean controlling the use of group ack message. + * @return boolean + */ + public boolean getEnableGroupAcks() { + return DbConfigManager.getBooleanVal( + props, RepParams.ENABLE_GROUP_ACKS); + } + + /** + * @hidden + * Gets the size of the message queue. + * @return size of the message queue. + */ + public int getMessageQueueSize() { + return DbConfigManager.getIntVal( + props, RepParams.REPLICA_MESSAGE_QUEUE_SIZE); + } + + /** + * @hidden + * The interval used when checking an inactive connection to the + * master. + * + * @param timeout Timeout value + * @param unit time unit + * @return this + * @throws IllegalArgumentException + */ + public ArbiterConfig setChannelTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal( + props, RepParams.REPLICA_TIMEOUT, + timeout, unit, validateParams); + return this; + } + + /** + * @hidden + * Gets the timeout value. + * @param unit TimeUnit + * @return timeout value. + */ + public long getChannelTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal( + props, RepParams.REPLICA_TIMEOUT, unit); + } + + /** + * @hidden + * The timeout used when waiting for the initial heartbeat + * when establishing a connection. + * @param timeout Maximum time to wait. + * @param unit TimeUnit + * @return this + * @throws IllegalArgumentException + */ + public ArbiterConfig setPreHeartbeatTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal( + props, RepParams.PRE_HEARTBEAT_TIMEOUT, + timeout, unit, validateParams); + return this; + } + + /** + * @hidden + * The pre heartbeat timeout value. + * @param unit TimeUnit + * @return timeout + */ + public long getPreHeartbeatTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal( + props, RepParams.PRE_HEARTBEAT_TIMEOUT, unit); + } + + /** + * @hidden + * The heartbeat timeout. + * + * @param timeout Timeout value + * @param unit time unit + * @return this + * @throws IllegalArgumentException + */ + public ArbiterConfig setFeederTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal( + props, RepParams.FEEDER_TIMEOUT, + timeout, unit, validateParams); + return this; + } + + /** + * @hidden + * Gets the timeout value. + * @param unit TimeUnit + * @return timeout value. + */ + public long getFeederTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal( + props, RepParams.FEEDER_TIMEOUT, unit); + } + + /** + * @hidden + * The size of the the TCP receive buffer associated with the socket used + * by the Arbiter to communicate to the master. + * @param val size of the buffer + * @return this + */ + public ArbiterConfig setReceiveBufferSize(int val) { + DbConfigManager.setIntVal( + props, + RepParams.REPLICA_RECEIVE_BUFFER_SIZE, val, validateParams); + return this; + } + + /** + * @hidden + * Returns the receive buffer size. + * @return buffer size. + */ + public int getReceiveBufferSize() { + return DbConfigManager.getIntVal( + props, RepParams.REPLICA_RECEIVE_BUFFER_SIZE); + } + + /** + * @hidden + * The socket timeout value used by an Arbiter when it opens a new + * connection to establish a stream with a feeder. + * @param timeout maximum time to wait + * @param unit TimeUnit + * @return this + * @throws IllegalArgumentException + */ + public ArbiterConfig setStreamOpenTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal( + props, RepParams.REPSTREAM_OPEN_TIMEOUT, + timeout, unit, validateParams); + return this; + } + + /** + * @hidden + * Returns the socket timeout value. + * @param unit TimeUnit + * @return Timeout value. + */ + public long getStreamOpenTimeout(TimeUnit unit) { + return DbConfigManager.getDurationVal( + props, RepParams.REPSTREAM_OPEN_TIMEOUT, unit); + } + + /** + * @hidden + * Get the replication service net configuration associated with + * this MonitorConfig. + */ + public ReplicationNetworkConfig getRepNetConfig() { + return repNetConfig; + } + + /** + * @hidden + * Set the replication service net configuration associated with + * this MonitorConfig. + * + * @param netConfig the new ReplicationNetworkConfig to be associated + * with this MonitorConfig. This must not be null. + * + * @throws IllegalArgumentException if the netConfig is null + */ + public ArbiterConfig setRepNetConfig( + ReplicationNetworkConfig netConfig) { + + setRepNetConfigVoid(netConfig); + return this; + } + + /** + * Documentation inherited from ArbiterMutableConfig.setConfigParam. + */ + @Override + public ArbiterConfig setConfigParam(String paramName, String value) + throws IllegalArgumentException { + + boolean forReplication = false; + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + if (param != null) { + forReplication = param.isForReplication(); + } + DbConfigManager.setConfigParam(props, + paramName, + value, + false, /* requireMutablity */ + validateParams, + forReplication, /* forReplication */ + true /* verifyForReplication */); + return this; + } + + /** + * @hidden + * For bean editors + */ + public void setRepNetConfigVoid(ReplicationNetworkConfig netConfig) { + if (netConfig == null) { + throw new IllegalArgumentException("netConfig may not be null"); + } + repNetConfig = netConfig; + } + + ArbiterMutableConfig getArbiterMutableConfig() { + return super.copy(); + } + + /** + */ + public ArbiterConfig clone() { + ArbiterConfig retval = (ArbiterConfig)super.clone(); + retval.repNetConfig = repNetConfig.clone(); + retval.arbiterHome = this.arbiterHome; + return retval; + } + + /** + */ + public ArbiterConfig setLoggingHandler(Handler handler) { + loggingHandler = handler; + return this; + } + + /** + * Returns the custom java.util.logging.Handler specified by the + * application. + */ + public Handler getLoggingHandler() { + return loggingHandler; + } + + /** + * Display configuration values. + */ + @Override + public String toString() { + return ("arbiterHome=" + arbiterHome + "\n" + + "repNetConfig=" + repNetConfig + "\n" + + super.toString()); + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.java b/src/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.java new file mode 100644 index 0000000..1c87be0 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/ArbiterMutableConfig.java @@ -0,0 +1,228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter; + +import java.util.Properties; +import java.util.logging.Level; + +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.impl.RepParams; + +/** + * The mutable configuration parameters for an {@link Arbiter}. + * + * @see Arbiter#setArbiterMutableConfig(ArbiterMutableConfig) + */ +public class ArbiterMutableConfig implements Cloneable { + + Properties props; + + boolean validateParams = true; + + ArbiterMutableConfig() { + props = new Properties(); + } + + ArbiterMutableConfig(Properties properties) { + props = (Properties)properties.clone(); + } + + /** + * Identify one or more helpers nodes by their host and port pairs in this + * format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * + * @param helperHosts the string representing the host and port pairs. + */ + public ArbiterMutableConfig setHelperHosts(String helperHosts) { + DbConfigManager.setVal( + props, RepParams.HELPER_HOSTS, helperHosts, validateParams); + return this; + } + + /** + * Returns the string identifying one or more helper host and port pairs in + * this format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * + * @return the string representing the host port pairs. + */ + public String getHelperHosts() { + return DbConfigManager.getVal(props, RepParams.HELPER_HOSTS); + } + + /** + * Trace messages equal and above this level will be logged to the je.info + * file, which is in the Arbiter home directory. Value should + * be one of the predefined java.util.logging.Level values. + *

        + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        com.sleepycat.je.util.FileHandler.levelStringNo"INFO"

        + * @see Chapter 12. Logging + * + * @param val value of the logging level. + * @return ArbiterConfig. + */ + public ArbiterMutableConfig setFileLoggingLevel(String val) { + Level.parse(val); + DbConfigManager.setVal( + props, EnvironmentParams.JE_FILE_LEVEL, val, false); + return this; + } + + /** + * Gets the file logging level. + * @return logging level + */ + public String getFileLoggingLevel() { + return DbConfigManager.getVal(props, EnvironmentParams.JE_FILE_LEVEL); + } + + /** + * Trace messages equal and above this level will be logged to the + * console. Value should be one of the predefined + * java.util.logging.Level values. + * + *

        + * + * + * + * + * + * + * + *
        NameTypeMutableDefault
        com.sleepycat.je.util.ConsoleHandler.levelStringNo"OFF"

        + * @see Chapter 12. Logging + * + * @param val Logging level. + * @return this. + */ + public ArbiterMutableConfig setConsoleLoggingLevel(String val) { + Level.parse(val); + DbConfigManager.setVal( + props, EnvironmentParams.JE_CONSOLE_LEVEL, val, false); + return this; + } + + /** + * Gets the console logging level. + * @return logging level + */ + public String getConsoleLoggingLevel() { + return DbConfigManager.getVal(props, EnvironmentParams.JE_CONSOLE_LEVEL); + } + + /** + * @hidden + * Set this configuration parameter. First validate the value specified for + * the configuration parameter; if it is valid, the value is set in the + * configuration. Hidden could be used to set parameters internally. + * + * @param paramName the configuration parameter name, one of the String + * constants in this class + * + * @param value The configuration value + * + * @return this + * + * @throws IllegalArgumentException if the paramName or value is invalid. + */ + public ArbiterMutableConfig setConfigParam(String paramName, + String value) + throws IllegalArgumentException { + + boolean forReplication = false; + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(paramName); + if (param != null) { + forReplication = param.isForReplication(); + } + + DbConfigManager.setConfigParam(props, + paramName, + value, + true, /* require mutability. */ + true, + forReplication, /* forReplication */ + true /* verifyForReplication */); + return this; + } + + /** + * @hidden + * Returns the value for this configuration parameter. + * + * @param paramName a valid configuration parameter, one of the String + * constants in this class. + * @return the configuration value. + * @throws IllegalArgumentException if the paramName is invalid. + */ + public String getConfigParam(String paramName) + throws IllegalArgumentException { + + return DbConfigManager.getConfigParam(props, paramName); + } + + protected ArbiterMutableConfig copy() { + return new ArbiterMutableConfig(props); + } + + /** + * @hidden + * For internal use only. + */ + public boolean isConfigParamSet(String paramName) { + return props.containsKey(paramName); + } + + public ArbiterMutableConfig clone() { + try { + ArbiterMutableConfig copy = + (ArbiterMutableConfig) super.clone(); + copy.props = (Properties) props.clone(); + return copy; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + public Properties getProps() { + return (Properties) props.clone(); + } + + /** + * Display configuration values. + */ + @Override + public String toString() { + return (props.toString() + "\n"); + } + +} diff --git a/src/com/sleepycat/je/rep/arbiter/ArbiterStats.java b/src/com/sleepycat/je/rep/arbiter/ArbiterStats.java new file mode 100644 index 0000000..88c60bd --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/ArbiterStats.java @@ -0,0 +1,114 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.arbiter; + +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_DTVLSN; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_MASTER; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_ACKS; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_FSYNCS; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_REPLAY_QUEUE_OVERFLOW; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_WRITES; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_STATE; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_VLSN; + +import java.io.Serializable; + +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Statistics for an {@link Arbiter}. + * + * @see Arbiter#getStats(StatsConfig) + */ +public class ArbiterStats implements Serializable { + + private static final long serialVersionUID = 1734048134L; + + private final StatGroup arbStats; + + /** + * @hidden + * Internal use only. + */ + ArbiterStats(StatGroup arbGrp) { + if (arbGrp != null) { + arbStats = arbGrp; + } else { + arbStats = new StatGroup(ArbiterStatDefinition.GROUP_NAME, + ArbiterStatDefinition.GROUP_DESC); + } + } + + /** + * The number of attempts to queue a response when + * the queue was full. + */ + public long getReplayQueueOverflow() { + return arbStats.getLong(ARB_N_REPLAY_QUEUE_OVERFLOW); + } + + /** + * The number of transactions that has been + * acknowledged. + */ + public long getAcks() { + return arbStats.getLong(ARB_N_ACKS); + } + + /** + * The current master node. + */ + public String getMaster() { + return arbStats.getString(ARB_MASTER); + } + + /** + * The ReplicatedEnvironment.State of the node. + */ + public String getState() { + return arbStats.getString(ARB_STATE); + } + + /** + * The highest commit VLSN that has been + * acknowledged. + */ + public long getVLSN() { + return arbStats.getLong(ARB_VLSN); + } + + /** + * The highest commit DTVLSN that has been + * acknowledged. + */ + public long getDTVLSN() { + return arbStats.getLong(ARB_DTVLSN); + } + + /** + * The number of file writes. + */ + public long getWrites() { + return arbStats.getLong(ARB_N_WRITES); + } + + /** + * The number of file fsyncs. + */ + public long getFSyncs() { + return arbStats.getLong(ARB_N_FSYNCS); + } +} + diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbBinaryStateService.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbBinaryStateService.java new file mode 100644 index 0000000..3a000e8 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbBinaryStateService.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateRequest; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateResponse; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; + +/** + * The service registered by an Arbiter to answer the state request. + * + * To support the new BinaryStateProtocol, we introduce this new + * BinaryNodeStateService, it's used by "Ping" command. + * + */ +public class ArbBinaryStateService extends ExecutingService { + + private final ArbiterImpl arbImpl; + private final ServiceDispatcher dispatcher; + private final Logger logger; + + /* Identifies the Node State querying Service. */ + public static final String SERVICE_NAME = "BinaryNodeState"; + + public ArbBinaryStateService(ServiceDispatcher dispatcher, + ArbiterImpl arbImpl) { + super(SERVICE_NAME, dispatcher); + this.arbImpl = arbImpl;; + this.dispatcher = dispatcher; + this.logger = LoggerUtils.getLogger(getClass()); + + dispatcher.register(this); + } + + public void shutdown() { + dispatcher.cancel(SERVICE_NAME); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new NodeStateServiceRunnable(dataChannel); + } + + class NodeStateServiceRunnable implements Runnable { + private final DataChannel channel; + + NodeStateServiceRunnable(DataChannel channel) { + this.channel = channel; + } + + /* Create the NodeState for the request. */ + private BinaryNodeStateResponse createResponse + (BinaryNodeStateProtocol protocol) { + + long joinTime = arbImpl.getJoinGroupTime(); + long vlsnValue = (arbImpl.getArbiterVLSNTracker().get() == null ? + 0L : arbImpl.getArbiterVLSNTracker().get().getSequence()); + + return protocol.new BinaryNodeStateResponse( + arbImpl.getNameIdPair().getName(), + arbImpl.getGroupName(), + arbImpl.getMasterStatus().getNodeMasterNameId().getName(), + JEVersion.CURRENT_VERSION, joinTime, + arbImpl.getNodeState(), + vlsnValue, vlsnValue, + 0, LogEntryType.LOG_VERSION, + null, JVMSystemUtils.getSystemLoad()); + } + + @Override + public void run() { + BinaryNodeStateProtocol protocol = null; + + try { + protocol = new BinaryNodeStateProtocol(NameIdPair.NOCHECK, + arbImpl.getRepImpl()); + try { + channel.getSocketChannel().configureBlocking(true); + + BinaryNodeStateRequest msg = + protocol.read(channel, BinaryNodeStateRequest.class); + + /* + * Response a protocol error if the group name doesn't + * match. + */ + final String groupName = msg.getGroupName(); + if (!arbImpl.getGroupName().equals(groupName) || + !arbImpl.getNameIdPair().getName(). + equals(msg.getNodeName())) { + throw new ProtocolException("Sending the request to" + + " a wrong group or a wrong node."); + } + + /* Write the response the requested node. */ + BinaryNodeStateResponse response = + createResponse(protocol); + protocol.write(response, channel); + LoggerUtils.finest(logger, arbImpl.getRepImpl(), + "Deal with a node state request successfully."); + } catch (ProtocolException e) { + LoggerUtils.info(logger, arbImpl.getRepImpl(), + "Get a ProtocolException with message: " + + LoggerUtils.exceptionTypeAndMsg(e) + + " while dealing with a node state request."); + protocol.write + (protocol.new ProtocolError(e.getMessage()), channel); + } catch (Exception e) { + LoggerUtils.info(logger, arbImpl.getRepImpl(), + "Unexpected exception: " + + LoggerUtils.exceptionTypeAndMsg(e)); + protocol.write + (protocol.new ProtocolError(e.getMessage()), channel); + } finally { + if (channel.isOpen()) { + channel.close(); + } + } + } catch (IOException e) { + + /* + * Channel has already been closed, or the close itself + * failed. + */ + } + } + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java new file mode 100644 index 0000000..84a0c3c --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java @@ -0,0 +1,884 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_COMMIT; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_MASTER; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_ACKS; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_REPLAY_QUEUE_OVERFLOW; + +import java.io.IOException; +import java.net.ConnectException; +import java.nio.channels.ClosedByInterruptException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.ReplicaOutputThread; +import com.sleepycat.je.rep.impl.node.ReplicaOutputThreadBase; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.stream.BaseProtocol.ShutdownRequest; +import com.sleepycat.je.rep.stream.InputWireRecord; +import com.sleepycat.je.rep.stream.MasterStatus.MasterSyncException; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshake; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshakeConfig; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.MessageOp; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StringStat; +import com.sleepycat.je.utilint.VLSN; + +/** + * The ArbiterAcker is used to acknowledge transactions. A feeder + * connection is established with the current master. Commit and Heartbeat + * messages are sent by the master. The ArbiterAcker responds and persistently + * tracks the high VLSN of the commit messages that it acknowledges. + * + * There are configuration parameters that are used. + * RepParams.REPLICA_MESSAGE_QUEUE_SIZE used for the replay queue size and + * in the computation of the output + * queue size. + * RepParams.REPLICA_TIMEOUT used for the Arbiter feeder channel timeout. + * RepParams.PRE_HEARTBEAT_TIMEOUT used for the Arbiter feeder channel timeout + * before the first heartbeat is sent. + * RepParams.REPLICA_RECEIVE_BUFFER_SIZE used for the datachannel buffer size. + * RepParams.REPSTREAM_OPEN_TIMEOUT used for the datachannel open timeout. + * RepParams.MAX_CLOCK_DELTA - used for ReplicaFeederHandshake maximum clock + * delta. + * RepParams.HEARTBEAT_INTERVAL heartbeat interval in millis. + * RepParams.ENABLE_GROUP_ACKS enables output thread ack grouping. + * + * The main Arbiter thread reads messages from the feeder channel and queues + * the message on the request queue. The request thread reads entries + * from the request queue. The request thread may queue an entry on the + * output queue. The ArbiterOutputThread reads from the output queue and + * writes to the network channel. + * read from network -> ArbiterAcker main thread -> requestQueue + * requestQueue -> RequestThread -> outputQueue + * outputQueue -> ArbiterOutputThread -> writes to network + */ +class ArbiterAcker { + + /* + * Defines the possible types of exits that can be requested from the + * RequestThread. + */ + private enum RequestExitType { + IMMEDIATE, /* An immediate exit; ignore queued requests. */ + SOFT /* Process pending requests in queue, then exit */ + } + /* Number of times to retry on a network connection failure. */ + private static final int NETWORK_RETRIES = 2 ; + + /* + * Service unavailable retries. These are typically the result of service + * request being made before the node is ready to provide them. For + * example, the feeder service is only available after a node has + * transitioned to becoming the master. + */ + private static final int SERVICE_UNAVAILABLE_RETRIES = 10; + + /* + * The number of ms to wait between above retries, allowing time for the + * master to assume its role, and start listening on its port. + */ + private static final int CONNECT_RETRY_SLEEP_MS = 1000; + + /* The queue poll interval, 1 second */ + private final static long QUEUE_POLL_INTERVAL_NS = 1000000000l; + + /* The exception that provoked the ArbiterAcker exit. */ + private Exception shutdownException = null; + + private final RepImpl repImpl; + private final Logger logger; + private NamedChannelWithTimeout arbiterFeederChannel; + private final Clock clock; + private Protocol protocol; + private final ArbiterImpl arbiterImpl; + + private final BlockingQueue outputQueue; + + /* + * The message queue used for communications between the network read + * thread and the request thread. + */ + private final BlockingQueue requestQueue; + + private ArbiterOutputThread arbiterOutputThread; + private RequestThread requestThread; + + /* + * The last commit entry acknowledged. + */ + private volatile VLSN lastReplayedVLSN = null; + + /* The in-memory DTVLSN maintained by the Arbiter. */ + private long dtvlsn = VLSN.NULL_VLSN_SEQUENCE; + + /* Statistics */ + private final StatGroup stats; + + /* + * The number of times a message entry could not be inserted into + * the queue within the poll period and had to be retried. + */ + private final LongStat nReplayQueueOverflow; + /* Number of transactions acknowledged */ + private final LongStat nAcks; + /* Current or last master that was connected */ + private final StringStat masterStat; + + /* + * The maximum number of entries pulled out of the request queue that + * are grouped together. There is at most one write to the data file for + * this group. + */ + private final int N_MAX_GROUP_XACT = 100; + private final List groupMessages = new ArrayList(); + private final List groupXact = new ArrayList(); + private final long FSYNC_INTERVAL = 1000; + private long lastFSyncTime; + + ArbiterAcker(ArbiterImpl arbiterImpl, + RepImpl repImpl) { + this.arbiterImpl = arbiterImpl; + this.repImpl = repImpl; + logger = repImpl.getLogger(); + + clock = new Clock(RepImpl.getClockSkewMs()); + /* Set up the request queue. */ + final int requestQueueSize = repImpl.getConfigManager(). + getInt(RepParams.REPLICA_MESSAGE_QUEUE_SIZE); + + requestQueue = new ArrayBlockingQueue(requestQueueSize); + + /* + * The factor of 2 below is somewhat arbitrary. It should be > 1 X so + * that the RequestThread can completely process the buffered + * messages in the face of a network drop and 2X to allow for + * additional headroom and minimize the chances that the operation + * might be blocked due to the limited queue length. + */ + final int outputQueueSize = 2 * + repImpl.getConfigManager().getInt( + RepParams.REPLICA_MESSAGE_QUEUE_SIZE); + outputQueue = new ArrayBlockingQueue(outputQueueSize); + + stats = new StatGroup(ArbiterStatDefinition.GROUP_NAME, + ArbiterStatDefinition.GROUP_DESC); + nReplayQueueOverflow = + new LongStat(stats, ARB_N_REPLAY_QUEUE_OVERFLOW); + nAcks = new LongStat(stats, ARB_N_ACKS); + masterStat = new StringStat(stats, ARB_MASTER); + } + + private void initializeConnection() + throws ConnectRetryException, + IOException { + createArbiterFeederChannel(); + arbiterImpl.refreshCachedGroup(); + ReplicaFeederHandshake handshake = + new ReplicaFeederHandshake(new RepFeederHandshakeConfig()); + protocol = handshake.execute(); + + arbiterImpl.refreshCachedGroup(); + + /* read heartbeat and respond */ + protocol.read(arbiterFeederChannel.getChannel(), + Protocol.Heartbeat.class); + queueAck(ReplicaOutputThread.HEARTBEAT_ACK); + + /* decrement latch to indicate we are connected */ + arbiterImpl.getReadyLatch().countDown(); + arbiterImpl.notifyJoinGroup(); + } + + /** + * The core Arbiter control loop. The loop exits when it + * encounters one of the following possible conditions: + * + * 1) The connection to the master can no longer be maintained, due to + * connectivity issues, or because the master has explicitly shutdown its + * connections due to an election. + * + * 2) The node becomes aware of a change in master, that is, assertSync() + * fails. + * + * 3) The loop is interrupted, which is interpreted as a request to + * shutdown the Arbiter node as a whole. + * + * 4) It fails to establish its node information in the master as it + * attempts to join the replication group for the first time. + * + * Normal exit from this run loop results in the Arbiter node retrying + * finding the group master. + * A thrown exception, on the other hand, results in the Arbiter + * node as a whole terminating its operation and no longer participating in + * the replication group, that is, it enters the DETACHED state. + * + * @throws InterruptedException + * @throws DatabaseException if the environment cannot be closed/for a + * re-init + * @throws GroupShutdownException + */ + void runArbiterAckLoop() + throws InterruptedException, + DatabaseException, + GroupShutdownException { + + Class retryExceptionClass = null; + int retryCount = 0; + try { + + while (true) { + try { + runArbiterAckLoopInternal(); + /* Normal exit */ + break; + } catch (RetryException e) { + if (!arbiterImpl.getMasterStatus().inSync()) { + LoggerUtils.fine(logger, repImpl, + "Retry terminated, out of sync."); + break; + } + if ((e.getClass() == retryExceptionClass) || + (e.retries == 0)) { + if (++retryCount >= e.retries) { + /* Exit replica retry elections */ + LoggerUtils.info + (logger, repImpl, + "Failed to recover from exception: " + + e.getMessage() + ", despite " + e.retries + + " retries.\n" + + LoggerUtils.getStackTrace(e)); + break; + } + } else { + retryCount = 0; + retryExceptionClass = e.getClass(); + } + LoggerUtils.fine(logger, repImpl, "Retry #: " + + retryCount + "/" + e.retries + + " Will retry Arbiter loop after " + + e.retrySleepMs + "ms. "); + Thread.sleep(e.retrySleepMs); + if (!arbiterImpl.getMasterStatus().inSync()) { + break; + } + } + } + } finally { + arbiterImpl.resetReadyLatch(shutdownException); + } + /* Exit use elections to try a different master. */ + } + + void shutdown() { + if (requestThread != null) { + try { + requestThread.shutdownThread(logger); + } catch (Exception e) { + /* Ignore so shutdown can continue */ + LoggerUtils.info(logger, repImpl, + "Request thread error shutting down." + e); + } + } + if (arbiterOutputThread != null) { + arbiterOutputThread.shutdownThread(logger); + try { + arbiterOutputThread.join(); + } catch(InterruptedException e) { + /* Ignore we will clean up via killing IO channel anyway. */ + } + } + RepUtils.shutdownChannel(arbiterFeederChannel); + } + + private void runArbiterAckLoopInternal() + throws InterruptedException, + RetryException { + + shutdownException = null; + LoggerUtils.info(logger, repImpl, + "Arbiter loop started with master: " + + arbiterImpl.getMasterStatus().getNodeMasterNameId()); + try { + initializeConnection(); + arbiterImpl.setState(ReplicatedEnvironment.State.REPLICA); + doRunArbiterLoopInternalWork(); + arbiterImpl.setState(ReplicatedEnvironment.State.UNKNOWN); + } catch (ClosedByInterruptException closedByInterruptException) { + if (arbiterImpl.isShutdown()) { + LoggerUtils.info(logger, repImpl, + "Arbiter loop interrupted for shutdown."); + return; + } + LoggerUtils.warning(logger, repImpl, + "Arbiter loop unexpected interrupt."); + throw new InterruptedException + (closedByInterruptException.getMessage()); + } catch (IOException e) { + + /* + * Master may have changed with the master shutting down its + * connection as a result. Normal course of events, log it and + * return to the outer node level loop. + */ + LoggerUtils.fine(logger, repImpl, + "Arbiter IO exception: " + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + } catch (RetryException e) { + /* Propagate it outwards. Node does not need to shutdown. */ + throw e; + } catch (GroupShutdownException e) { + shutdownException = e; + throw e; + } catch (RuntimeException e) { + shutdownException = e; + LoggerUtils.severe(logger, repImpl, + "Arbiter unexpected exception " + e + + " " + LoggerUtils.getStackTrace(e)); + throw e; + } catch (MasterSyncException e) { + /* expected change in masters from an election. */ + LoggerUtils.fine(logger, repImpl, e.getMessage()); + } catch (Exception e) { + shutdownException = e; + LoggerUtils.severe(logger, repImpl, + "Arbiter unexpected exception " + e + + " " + LoggerUtils.getStackTrace(e)); + throw EnvironmentFailureException.unexpectedException(e); + } finally { + loopExitCleanup(); + } + } + + protected void doRunArbiterLoopInternalWork() + throws Exception { + + final int timeoutMs = repImpl.getConfigManager(). + getDuration(RepParams.REPLICA_TIMEOUT); + arbiterFeederChannel.setTimeoutMs(timeoutMs); + + requestQueue.clear(); + outputQueue.clear(); + + arbiterOutputThread = + new ArbiterOutputThread(repImpl, + outputQueue, + protocol, + arbiterFeederChannel.getChannel(), + arbiterImpl.getArbiterVLSNTracker()); + arbiterOutputThread.start(); + + requestThread = new RequestThread(); + requestThread.start(); + + long maxPending = 0; + + try { + while (true) { + Message message = protocol.read(arbiterFeederChannel); + + if (arbiterImpl.isShutdownOrInvalid() || (message == null)) { + return; + } + + while (!requestQueue. + offer(message, + QUEUE_POLL_INTERVAL_NS, + TimeUnit.NANOSECONDS)) { + /* Offer timed out. */ + if (!requestThread.isAlive()) { + return; + } + /* Retry the offer */ + nReplayQueueOverflow.increment(); + } + + final int pending = requestQueue.size(); + if (pending > maxPending) { + maxPending = pending; + LoggerUtils.fine(logger, repImpl, + "Max pending request log items:" + + maxPending); + } + } + } catch (IOException ioe) { + + /* + * Make sure messages in the queue are processed. Ensure, in + * particular, that shutdown requests are processed and not ignored + * due to the IOEException resulting from a closed connection. + */ + requestThread.exitRequest = RequestExitType.SOFT; + } finally { + + if (requestThread.exitRequest == RequestExitType.SOFT) { + + /* + * Drain all queued messages, exceptions may be generated + * in the process. They logically precede IO exceptions. + */ + requestThread.join(); + } + + try { + + if (requestThread.exception != null) { + /* request thread is dead or exiting. */ + throw requestThread.exception; + } + + if (arbiterOutputThread.getException() != null) { + throw arbiterOutputThread.getException(); + } + } finally { + + /* Ensure thread has exited in all circumstances */ + requestThread.exitRequest = RequestExitType.IMMEDIATE; + requestThread.join(); + arbiterOutputThread.shutdownThread(logger); + } + } + } + + StatGroup loadStats(StatsConfig config) + throws DatabaseException { + masterStat.set( + arbiterImpl.getMasterStatus().getNodeMasterNameId().toString()); + StatGroup copyStats = stats.cloneGroup(config.getClear()); + return copyStats; + } + + /** + * Performs the cleanup actions upon exit from the internal arbiter loop. + * + */ + private void loopExitCleanup() { + + if (shutdownException != null) { + if (shutdownException instanceof RetryException) { + LoggerUtils.fine(logger, repImpl, + "Retrying connection to feeder. Message: " + + shutdownException.getMessage()); + } else if (shutdownException instanceof GroupShutdownException) { + LoggerUtils.info(logger, repImpl, + "Exiting inner Arbiter loop." + + " Master requested shutdown."); + } else { + LoggerUtils.warning + (logger, repImpl, + "Exiting inner Arbiter loop with exception " + + shutdownException + "\n" + + LoggerUtils.getStackTrace(shutdownException)); + } + } else { + LoggerUtils.fine(logger, repImpl, "Exiting inner Arbiter loop." ); + } + + shutdown(); + } + + /** + * Returns a channel used by the Arbiter to connect to the Feeder. The + * socket is configured with a read timeout that's a multiple of the + * heartbeat interval to help detect, or initiate a change in master. + * + * @throws IOException + * @throws ConnectRetryException + */ + private void createArbiterFeederChannel() + throws IOException, ConnectRetryException { + + DataChannel dataChannel = null; + + final DbConfigManager configManager = repImpl.getConfigManager(); + final int timeoutMs = configManager. + getDuration(RepParams.PRE_HEARTBEAT_TIMEOUT); + + final int receiveBufferSize = + configManager.getInt(RepParams.REPLICA_RECEIVE_BUFFER_SIZE); + + try { + final int openTimeout = configManager. + getDuration(RepParams.REPSTREAM_OPEN_TIMEOUT); + + /* + * Note that soTimeout is not set since it's a blocking channel and + * setSoTimeout has no effect on a blocking nio channel. + * + * Push responses out rapidly, they are small (heart beat or commit + * response) and need timely delivery to the master. + * (tcpNoDelay = true) + */ + + final ConnectOptions connectOpts = new ConnectOptions(). + setTcpNoDelay(true). + setReceiveBufferSize(receiveBufferSize). + setOpenTimeout(openTimeout). + setBlocking(true); + + dataChannel = + repImpl.getChannelFactory(). + connect(arbiterImpl.getMasterStatus().getNodeMaster(), + connectOpts); + + arbiterFeederChannel = + new NamedChannelWithTimeout(repImpl, + logger, + arbiterImpl.getChannelTimeoutTask(), + dataChannel, + timeoutMs); + + ServiceDispatcher.doServiceHandshake + (dataChannel, FeederManager.FEEDER_SERVICE); + } catch (ConnectException e) { + + /* + * A network problem, or the node went down between the time we + * learned it was the master and we tried to connect. + */ + throw new ConnectRetryException(e.getMessage(), + NETWORK_RETRIES, + CONNECT_RETRY_SLEEP_MS); + } catch (ServiceConnectFailedException e) { + + /* + * The feeder may not have established the Feeder Service + * as yet. For example, the transition to the master may not have + * been completed. Wait longer. + */ + if (e.getResponse() == Response.UNKNOWN_SERVICE) { + throw new ConnectRetryException(e.getMessage(), + SERVICE_UNAVAILABLE_RETRIES, + CONNECT_RETRY_SLEEP_MS); + } + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Process a heartbeat message. It queues a response and updates + * the consistency tracker with the information in the heartbeat. + * + * @param xid + * @throws IOException + */ + private void queueAck(Long xid) + throws IOException { + try { + outputQueue.put(xid); + } catch (InterruptedException ie) { + + /* + * Have the higher levels treat it like an IOE and + * exit the thread. + */ + throw new IOException("Ack I/O interrupted", ie); + } + } + + /** + * Process the shutdown message from the master and return the + * GroupShutdownException that must be thrown to exit the Replica loop. + * + * @return the GroupShutdownException + */ + private GroupShutdownException processShutdown(ShutdownRequest shutdown) + throws IOException { + + /* + * Acknowledge the shutdown message right away, since the checkpoint + * operation can take a long time to complete. Long enough to exceed + * the feeder timeout on the master. The master only needs to know that + * the replica has received the message. + */ + queueAck(ReplicaOutputThreadBase.SHUTDOWN_ACK); + + /* + * Turn off network timeouts on the replica, since we don't want the + * replica to timeout the connection. The connection itself is no + * longer used past this point and will be reclaimed as part of normal + * replica exit cleanup. + */ + arbiterFeederChannel.setTimeoutMs(Integer.MAX_VALUE); + final String masterHostName = + arbiterImpl.getMasterStatus().getGroupMaster().getHostName(); + return new GroupShutdownException( + logger, + repImpl, + masterHostName, + arbiterImpl.getArbiterVLSNTracker().get(), + shutdown.getShutdownTimeMs()); + } + + @SuppressWarnings("serial") + static abstract class RetryException extends Exception { + final int retries; + final int retrySleepMs; + + RetryException(String message, + int retries, + int retrySleepMs) { + super(message); + this.retries = retries; + this.retrySleepMs = retrySleepMs; + } + + @Override + public String getMessage() { + return "Failed after retries: " + retries + + " with retry interval: " + retrySleepMs + "ms."; + } + } + + /** + * Apply the operation represented by this log entry on this Arbiter node. + */ + private Message replayEntries(Message firstMessage) throws IOException { + boolean doSync = false; + long highVLSN = 0; + Message shutdownMessage = null; + groupXact.clear(); + groupMessages.clear(); + groupMessages.add(firstMessage); + requestQueue.drainTo(groupMessages, N_MAX_GROUP_XACT); + for (int i = 0;i < groupMessages.size(); i++) { + final Message message = groupMessages.get(i) ; + + final MessageOp messageOp = message.getOp(); + + if (messageOp == Protocol.SHUTDOWN_REQUEST) { + shutdownMessage = message; + } else if (messageOp == Protocol.HEARTBEAT) { + groupXact.add(ReplicaOutputThreadBase.HEARTBEAT_ACK); + } else { + InputWireRecord wireRecord = + ((Protocol.Entry) message).getWireRecord(); + final byte entryType = wireRecord.getEntryType(); + lastReplayedVLSN = wireRecord.getVLSN(); + + if (LOG_TXN_COMMIT.equalsType(entryType)) { + Protocol.Commit commitEntry = (Protocol.Commit) message; + if (commitEntry.getReplicaSyncPolicy() == + SyncPolicy.SYNC) { + doSync = true; + } + + LogEntry logEntry = wireRecord.getLogEntry(); + if (lastReplayedVLSN.getSequence() > highVLSN) { + highVLSN = lastReplayedVLSN.getSequence(); + } + final TxnCommit masterCommit = + (TxnCommit) logEntry.getMainItem(); + + long nextDTVLSN = masterCommit.getDTVLSN(); + if (nextDTVLSN == VLSN.UNINITIALIZED_VLSN_SEQUENCE) { + /* Pre-DTVLSN log commit record. */ + nextDTVLSN = wireRecord.getVLSN().getSequence(); + } + /* + * The Arbiter, unlike Replicas, does not receive commits + * in ascending VLSN order, so discard lower DTVLSNs. + */ + dtvlsn = nextDTVLSN > dtvlsn ? nextDTVLSN : dtvlsn; + groupXact.add(logEntry.getTransactionId()); + nAcks.increment(); + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, repImpl, + "Arbiter ack commit record " + + wireRecord); + } + } else { + String errMsg = "Illegal message type recieved by " + + " Arbiter. [" + wireRecord + "]"; + throw new IllegalStateException(errMsg); + } + } + } + + if (doSync || + (lastFSyncTime + FSYNC_INTERVAL) <= System.currentTimeMillis()) { + doSync = true; + lastFSyncTime = System.currentTimeMillis(); + } + + arbiterImpl.getArbiterVLSNTracker().write(new VLSN(highVLSN), + new VLSN(dtvlsn), + doSync); + for (int i = 0; i < groupXact.size(); i++) { + queueAck(groupXact.get(i)); + } + return shutdownMessage; + } + + @SuppressWarnings("serial") + static class ConnectRetryException extends RetryException { + + ConnectRetryException(String message, + int retries, + int retrySleepMs) { + super(message, retries, retrySleepMs); + } + } + + class RequestThread extends StoppableThread { + + volatile private Exception exception; + + /* + * Set asynchronously when a shutdown is being requested. + */ + volatile RequestExitType exitRequest = null; + + /* The queue poll interval, 1 second */ + private final static long REQUEST_QUEUE_POLL_INTERVAL_NS = 1000000000l; + + protected RequestThread() { + super(repImpl, "RequestThread"); + } + + @Override + protected int initiateSoftShutdown() { + /* Use immediate, since the stream will continue to be read. */ + exitRequest = RequestExitType.IMMEDIATE; + return 0; + } + + @Override + public void run() { + + LoggerUtils.fine(logger, repImpl, + "Request thread started. Message queue size:" + + requestQueue.remainingCapacity()); + try { + while (true) { + final Message message = + requestQueue.poll(REQUEST_QUEUE_POLL_INTERVAL_NS, + TimeUnit.NANOSECONDS); + + if ((exitRequest == RequestExitType.IMMEDIATE) || + ((exitRequest == RequestExitType.SOFT) && + (message == null)) || + arbiterImpl.isShutdownOrInvalid()) { + return; + } + arbiterImpl.getMasterStatus().assertSync(); + if (message == null) { + /* Timeout on poll. */ + continue; + } + Message shutdownMessage = replayEntries(message); + if (shutdownMessage != null) { + throw processShutdown( + (ShutdownRequest) shutdownMessage); + } + } + } catch (Exception e) { + exception = e; + + /* + * Bring it to the attention of the main thread by freeing + * up the "offer" wait right away. + */ + requestQueue.clear(); + + /* + * Get the attention of the main arbiter thread in case it's + * waiting in a read on the socket channel. + */ + LoggerUtils.fine(logger, repImpl, + "closing arbiterFeederChannel = " + + arbiterFeederChannel); + RepUtils.shutdownChannel(arbiterFeederChannel); + + LoggerUtils.info(logger, repImpl, + "ArbiterAcker thread exiting with exception:" + + e.getMessage()); + } + } + + @Override + protected Logger getLogger() { + return logger; + } + } + + private class RepFeederHandshakeConfig + implements ReplicaFeederHandshakeConfig { + + @Override + public RepImpl getRepImpl() { + return repImpl; + } + + @Override + public NameIdPair getNameIdPair() { + return arbiterImpl.getNameIdPair(); + } + + @Override + public Clock getClock() { + return clock; + } + + @Override + public NodeType getNodeType() { + return NodeType.ARBITER; + } + + @Override + public RepGroupImpl getGroup() { + return arbiterImpl.getGroup(); + } + + @Override + public NamedChannel getNamedChannel() { + return arbiterFeederChannel; + } + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java new file mode 100644 index 0000000..ee7a1e8 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java @@ -0,0 +1,862 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_STATE; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; +import java.util.Timer; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.elections.Acceptor; +import com.sleepycat.je.rep.elections.Elections; +import com.sleepycat.je.rep.elections.ElectionsConfig; +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateResponse; +import com.sleepycat.je.rep.impl.BinaryNodeStateService; +import com.sleepycat.je.rep.impl.NodeStateService; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.ChannelTimeoutTask; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.stream.MasterStatus; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.RepUtils.ExceptionAwareCountDownLatch; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StringStat; + +/** + * The implementation of the Arbiter. The Arbiter is a participant in + * elections and may acknowledge transaction commits. + *

        + * The Arbiter persists the Arbiter's replication group node identifier + * and the highest commit VLSN that has been acknowledged. + * Currently the Feeder sends commit acknowledgment requests + * to the Arbiter if the replication factor is two and the other + * Rep node is not available (RepImpl.preLogCommitHook). + *

        + * The VLSN is used in response to an election promise request. + * The priority of the response is lower than + * a RepNode. This allows the RepNode to be selected if the VLSN is + * equal. An Arbiter may not "win" an election. If the Arbiter's VLSN + * is the highest in the election, the election result is ignored. A + * NULL nodeid in a promise response is used to identify a promise + * response is from an Arbiter. + *

        + * Two pieces of information are persisted by the + * Arbiter. + * The replication group node identifier is persisted because + * this information is located in the group database and the Arbiter + * does not have a copy of the group database. + * The other is the high VLSN of a Arbiter ACKed commit. + *

        + * In the future, the algorithm could be changed to request commit + * acknowledgments when the replication factor is greater than + * two. This would allow for better write availability when + * the replication factor is an even number. + */ + +public class ArbiterImpl extends StoppableThread { + + private final static String DATA_FILE_NAME = "00000000.adb"; + + /* + * Amount of times to sleep between retries when a new node tries to locate + * a master. + */ + private static final int MASTER_QUERY_INTERVAL = 1000; + + private ServiceDispatcher serviceDispatcher; + private DataChannelFactory channelFactory; + private MasterStatus masterStatus; + private MasterChangeListener changeListener; + private Acceptor.SuggestionGenerator suggestionGenerator; + private ReplicationGroupAdmin repGroupAdmin; + private RepGroupImpl cachedRepGroupImpl; + private Timer timer; + private ChannelTimeoutTask channelTimeoutTask; + private ArbiterVLSNTracker arbiterVLSNTracker; + private ArbiterNodeStateService nodeStateService; + private ArbBinaryStateService binaryStateService; + + /* The Arbiter's logger. */ + private Logger logger; + private Formatter formatter; + private final RepImpl repImpl; + NameIdPair nameIdPair; + private Elections elections; + private final File arbiterHome; + private String groupName; + private ArbiterAcker arbiterAcker; + private MonitorEventManager monitorEventManager; + + /* + * Determines whether the Arbiter has been shutdown. Usually this is held + * within the StoppableThread, but the Feeder's two child threads have + * their shutdown coordinated by the parent Feeder. + */ + private final AtomicBoolean shutdown = new AtomicBoolean(false); + + /* + * The latch used to wait for the ArbiterAcker to establish + * a connection with the Master. + */ + private volatile ExceptionAwareCountDownLatch readyLatch = null; + private AtomicReference currentState; + private long joinGroupTime; + private Set helperSockets; + + /** + * The Arbiter implementation. + * Uses the following replication parameters: + * RepParams.GROUP_NAME The replication group name. + * RepParams.ENV_UNKNOWN_STATE_TIMEOUT Timeout used for being in + * the unknown state. + * RepParams.NODE_HOST_PORT The host name and port associated with this + * node. + * + * @param arbiterHome - Arbiter home directory. + * @param repImpl - RepImpl + * + * @throws EnvironmentNotFoundException + * @throws EnvironmentLockedException + * @throws DatabaseException + */ + public ArbiterImpl(File arbiterHome, RepImpl repImpl) + throws EnvironmentNotFoundException, + EnvironmentLockedException, + DatabaseException { + super(repImpl, "ArbiterNode " + repImpl.getNameIdPair()); + this.repImpl = repImpl; + this.arbiterHome = arbiterHome; + try { + initialize(); + } catch (IOException ioe) { + throw EnvironmentFailureException.unexpectedException( + repImpl, "Problem attempting to join on " + getSocket(), ioe); + } + } + + public StatGroup loadStats(StatsConfig config) { + StatGroup arbStat; + if (arbiterAcker == null) { + arbStat = new StatGroup(ArbiterStatDefinition.GROUP_NAME, + ArbiterStatDefinition.GROUP_DESC); + } else { + arbStat = arbiterAcker.loadStats(config); + } + StringStat state = new StringStat(arbStat, ARB_STATE); + state.set(currentState.toString()); + + StatGroup trackerStats = + arbiterVLSNTracker == null ? ArbiterVLSNTracker.loadEmptyStats() : + arbiterVLSNTracker.loadStats(config); + /* Add the tracker stats */ + arbStat.addAll(trackerStats); + + return arbStat; + } + + private void initialize() throws IOException { + nameIdPair = repImpl.getNameIdPair(); + currentState = new AtomicReference + (ReplicatedEnvironment.State.UNKNOWN); + + logger = LoggerUtils.getLogger(getClass()); + formatter = new ReplicationFormatter(nameIdPair); + readyLatch = new ExceptionAwareCountDownLatch(repImpl, 1); + + channelFactory = repImpl.getChannelFactory(); + + serviceDispatcher = + new ServiceDispatcher(getSocket(), repImpl, + channelFactory); + serviceDispatcher.start(); + + masterStatus = new MasterStatus(nameIdPair); + changeListener = new MasterChangeListener(); + File dataFile = + new File(arbiterHome.getAbsolutePath() + + File.separator + DATA_FILE_NAME); + arbiterVLSNTracker = new ArbiterVLSNTracker(dataFile); + suggestionGenerator = new MasterSuggestionGenerator(); + + if (arbiterVLSNTracker.getCachedNodeId() != NameIdPair.NULL_NODE_ID) { + nameIdPair.update( + new NameIdPair(nameIdPair.getName(), + arbiterVLSNTracker.getCachedNodeId())); + } + groupName = repImpl.getConfigManager().get(RepParams.GROUP_NAME); + helperSockets = repImpl.getHelperSockets(); + monitorEventManager = new MonitorEventManager(this); + } + + public void runArbiter() { + + elections = new Elections(new ArbElectionsConfig(), + changeListener, + suggestionGenerator); + + elections.startLearner(); + elections.startAcceptor(); + + repGroupAdmin = + new ReplicationGroupAdmin( + groupName, + helperSockets, + channelFactory); + timer = new Timer(true); + channelTimeoutTask = new ChannelTimeoutTask(timer); + + utilityServicesStart(); + + start(); + + int timeout = + repImpl.getConfigManager().getDuration( + RepParams.ENV_UNKNOWN_STATE_TIMEOUT); + if (timeout == 0) { + timeout = Integer.MAX_VALUE; + } + + try { + /* + * Wait for ArbiterAcker to establish connection to master if there + * is one, or timeout and return, if we could not find one in the + * ENV_UNKNOWN_STATE_TIMEOUT period. + */ + getReadyLatch().awaitOrException(timeout, + TimeUnit.MILLISECONDS); + LoggerUtils.fine(logger, repImpl, + "Arbiter started in " + currentState + " state."); + } catch (InterruptedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + @Override + public void run() { + /* Set to indicate an error-initiated shutdown. */ + Error repNodeError = null; + try { + while (!isShutdownOrInvalid()) { + queryGroupForMembership(); + masterStatus.sync(); + arbiterAcker = new ArbiterAcker(this, repImpl); + arbiterAcker.runArbiterAckLoop(); + } + } catch (InterruptedException e) { + LoggerUtils.fine(logger, repImpl, + "Arbiter main thread interrupted - " + + " forced shutdown."); + } catch (GroupShutdownException e) { + saveShutdownException(e); + LoggerUtils.fine(logger, repImpl, + "Arbiter main thread sees group shutdown - " + e); + } catch (InsufficientLogException e) { + saveShutdownException(e); + } catch (RuntimeException e) { + LoggerUtils.fine(logger, repImpl, + "Arbiter main thread sees runtime ex - " + e); + saveShutdownException(e); + throw e; + } catch (Error e) { + LoggerUtils.fine(logger, repImpl, e + + " incurred during arbiter loop"); + repNodeError = e; + repImpl.invalidate(e); + } finally { + LoggerUtils.info(logger, repImpl, + "Arbiter main thread shutting down."); + + if (repNodeError != null) { + LoggerUtils.info(logger, repImpl, + "Node state at shutdown:\n"+ + repImpl.dumpState()); + throw repNodeError; + } + Throwable exception = getSavedShutdownException(); + + if (exception == null) { + LoggerUtils.fine(logger, repImpl, + "Node state at shutdown:\n"+ + repImpl.dumpState()); + } else { + LoggerUtils.info(logger, repImpl, + "Arbiter shutdown exception:\n" + + exception.getMessage() + + repImpl.dumpState()); + } + + try { + shutdown(); + } catch (DatabaseException e) { + RepUtils.chainExceptionCause(e, exception); + LoggerUtils.severe(logger, repImpl, + "Unexpected exception during shutdown" + + e); + throw e; + } + setState(ReplicatedEnvironment.State.DETACHED); + cleanup(); + } + } + + public ReplicatedEnvironment.State getArbState() { + return currentState.get(); + } + + /* Get the shut down reason for this node. */ + private LeaveReason getLeaveReason() { + LeaveReason reason = null; + + Exception exception = getSavedShutdownException(); + if (exception == null) { + reason = LeaveReason.NORMAL_SHUTDOWN; + } else if (exception instanceof GroupShutdownException) { + reason = LeaveReason.MASTER_SHUTDOWN_GROUP; + } else { + reason = LeaveReason.ABNORMAL_TERMINATION; + } + + return reason; + } + + /* Get the current master name if it exists. */ + String getMasterName() { + if (masterStatus.getGroupMasterNameId().getId() == + NameIdPair.NULL_NODE_ID) { + return null; + } + + return masterStatus.getGroupMasterNameId().getName(); + } + + String getNodeName() { + return nameIdPair.getName(); + } + + RepGroupImpl getGroup() { + return cachedRepGroupImpl; + } + + public Elections getElections() { + return elections; + } + + public void setState(ReplicatedEnvironment.State state) { + currentState.set(state); + repImpl.getNodeState().changeAndNotify(state, NameIdPair.NULL); + } + + private void utilityServicesStart() { + /* Register the node state querying service. */ + nodeStateService = new ArbiterNodeStateService(serviceDispatcher, this); + serviceDispatcher.register(nodeStateService); + + binaryStateService = + new ArbBinaryStateService(serviceDispatcher, this); + } + + private void utilityServicesShutdown() { + + if (binaryStateService != null) { + try { + binaryStateService.shutdown(); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Error shutting down binaryStateService " + + e.getMessage()); + } + } + + if (nodeStateService != null) { + try { + serviceDispatcher.cancel(NodeStateService.SERVICE_NAME); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Error canceling serviceDispatch " + + e.getMessage()); + } + } + } + + public void shutdown() { + boolean changed = shutdown.compareAndSet(false, true); + if (!changed) { + return; + } + + try { + monitorEventManager.notifyLeaveGroup(getLeaveReason()); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Error shutting down monitor event manager " + + e.getMessage()); + } + + utilityServicesShutdown(); + + if (arbiterAcker != null) { + try { + arbiterAcker.shutdown(); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Error shutting down ArbiterAcker " + + e.getMessage()); + } + } + + if (elections != null) { + try { + elections.shutdown(); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Error shutting down elections " + + e.getMessage()); + } + } + if (serviceDispatcher != null) { + serviceDispatcher.shutdown(); + } + LoggerUtils.info(logger, repImpl, + nameIdPair + " shutdown completed."); + masterStatus.setGroupMaster(null, 0, NameIdPair.NULL); + readyLatch.releaseAwait(getSavedShutdownException()); + arbiterVLSNTracker.close(); + /* Cancel the TimerTasks. */ + channelTimeoutTask.cancel(); + timer.cancel(); + } + + ReplicatedEnvironment.State getNodeState() { + return currentState.get(); + } + + String getGroupName() { + return groupName; + } + + RepImpl getRepImpl() { + return repImpl; + } + + public void refreshHelperHosts() { + final Set helpers = + new HashSet(repImpl.getHelperSockets()); + if (cachedRepGroupImpl != null) { + helpers.addAll(cachedRepGroupImpl.getAllHelperSockets()); + } + helperSockets = helpers; + if (repGroupAdmin != null) { + repGroupAdmin.setHelperSockets(helperSockets); + } + } + + RepGroupImpl refreshCachedGroup() + throws DatabaseException { + RepGroupImpl repGroupImpl; + repGroupImpl = repGroupAdmin.getGroup().getRepGroupImpl(); + elections.updateRepGroupOnly(repGroupImpl); + if (nameIdPair.hasNullId()) { + RepNodeImpl n = repGroupImpl.getMember(nameIdPair.getName()); + if (n != null) { + nameIdPair.update(n.getNameIdPair()); + arbiterVLSNTracker.writeNodeId(n.getNameIdPair().getId()); + } + } + final Set helpers = + new HashSet(repImpl.getHelperSockets()); + helpers.addAll(repGroupImpl.getAllHelperSockets()); + helperSockets = helpers; + cachedRepGroupImpl = repGroupImpl; + return cachedRepGroupImpl; + } + + void updateNameIdPair(NameIdPair other) { + nameIdPair.update(other); + } + + void notifyJoinGroup() { + this.joinGroupTime = System.currentTimeMillis(); + monitorEventManager.notifyJoinGroup(); + } + + long getJoinGroupTime() { + return joinGroupTime; + } + + /** + * Communicates with existing nodes in the group in order figure out + * who is the master. + * In the case where the local node does not appear to be in + * the (local copy of the) GroupDB, typically because the node is starting + * up with an empty env directory. It could be that this is a new node + * (never before been part of the group). Or it could be a pre-existing + * group member that has lost its env dir contents and wants to be restored + * via a Network Restore operation. + *

        + * We query the designated helpers for the Master + * information. The helpers are + * the ones that were identified via the node's configuration. + *

        + * Returns normally when the master is found. + * + * @throws InterruptedException if the current thread is interrupted, + * typically due to a shutdown + */ + private void queryGroupForMembership() + throws InterruptedException { + + checkLoopbackAddresses(); + + if (helperSockets.isEmpty()) { + throw EnvironmentFailureException.unexpectedState + ("Need a helper to add a new node into the group"); + } + + NameIdPair groupMasterNameId; + while (true) { + elections.getLearner().queryForMaster(helperSockets); + groupMasterNameId = masterStatus.getGroupMasterNameId(); + if (!groupMasterNameId.hasNullId()) { + /* A new, or pre-query, group master. */ + if (nameIdPair.hasNullId() && + groupMasterNameId.getName().equals(nameIdPair.getName())) { + + /* + * Residual obsolete information in replicas, ignore it. + * Can't be master if we don't know our own id, but some + * other node does! This state means that the node was a + * master in the recent past, but has had its environment + * deleted since that time. + */ + try { + Thread.sleep(MASTER_QUERY_INTERVAL); + } catch (InterruptedException e) { + throw EnvironmentFailureException. + unexpectedException(e); + } + continue; + } + if (checkGroupMasterIsAlive(groupMasterNameId)) { + /* Use the current group master if it's alive. */ + break; + } + } + if (isShutdownOrInvalid()) { + throw new InterruptedException("Arbiter node shutting down."); + } + Thread.sleep(MASTER_QUERY_INTERVAL); + } + LoggerUtils.fine(logger, repImpl, "New node " + nameIdPair.getName() + + " located master: " + groupMasterNameId); + } + + ArbiterVLSNTracker getArbiterVLSNTracker() { + return arbiterVLSNTracker; + } + + /** + * Returns true if the node has been shutdown or if the underlying + * environment has been invalidated. It's used as the basis for exiting + * the FeederManager or the Replica. + */ + boolean isShutdownOrInvalid() { + if (isShutdown()) { + return true; + } + if (repImpl.wasInvalidated()) { + saveShutdownException(repImpl.getInvalidatingException()); + return true; + } + return false; + } + + private void checkLoopbackAddresses() { + + final InetAddress myAddress = getSocket().getAddress(); + final boolean isLoopback = myAddress.isLoopbackAddress(); + + for (InetSocketAddress socketAddress : helperSockets) { + final InetAddress nodeAddress = socketAddress.getAddress(); + + if (nodeAddress.isLoopbackAddress() == isLoopback) { + continue; + } + String message = getSocket() + + " the address associated with this node, " + + (isLoopback? "is " : "is not ") + "a loopback address." + + " It conflicts with an existing use, by a different node " + + " of the address:" + + socketAddress + + (!isLoopback ? " which is a loopback address." : + " which is not a loopback address.") + + " Such mixing of addresses within a group is not allowed, " + + "since the nodes will not be able to communicate with " + + "each other."; + throw new IllegalArgumentException(message); + } + } + + /** + * Check that the master found by querying other group nodes is indeed + * alive and that we are not dealing with obsolete cached information. + * + * @return true if the master node could be contacted and was truly alive + * + * TODO: handle protocol version mismatch here and in DbPing, also + * consolidate code so that a single copy is shared. + */ + private boolean checkGroupMasterIsAlive(NameIdPair groupMasterNameId) { + + DataChannel channel = null; + + try { + final InetSocketAddress masterSocket = + masterStatus.getGroupMaster(); + + final BinaryNodeStateProtocol protocol = + new BinaryNodeStateProtocol(NameIdPair.NOCHECK, null); + + /* Build the connection. Set the parameter connectTimeout.*/ + channel = repImpl.getChannelFactory(). + connect(masterSocket, + new ConnectOptions(). + setTcpNoDelay(true). + setOpenTimeout(5000). + setReadTimeout(5000)); + ServiceDispatcher.doServiceHandshake + (channel, BinaryNodeStateService.SERVICE_NAME); + /* Send a NodeState request to the node. */ + protocol.write( + protocol.new BinaryNodeStateRequest( + groupMasterNameId.getName(), + repImpl.getConfigManager().get(RepParams.GROUP_NAME)), + channel); + /* Get the response and return the NodeState. */ + BinaryNodeStateResponse response = + protocol.read(channel, BinaryNodeStateResponse.class); + + ReplicatedEnvironment.State state = response.getNodeState(); + return (state != null) && state.isMaster(); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Queried master:" + groupMasterNameId + + " unavailable. Reason:" + e); + return false; + } finally { + if (channel != null) { + try { + channel.close(); + } catch (IOException ioe) { + /* Ignore it */ + } + } + } + } + + private InetSocketAddress getSocket() { + return new InetSocketAddress(getHostName(), getPort()); + } + + NameIdPair getNameIdPair() { + return nameIdPair; + } + + MasterStatus getMasterStatus() { + return masterStatus; + } + + ChannelTimeoutTask getChannelTimeoutTask() { + return channelTimeoutTask; + } + + @Override + public boolean isShutdown() { + return shutdown.get(); + } + @Override + public Logger getLogger() { + return logger; + } + + public ExceptionAwareCountDownLatch getReadyLatch() { + return readyLatch; + } + + public void resetReadyLatch(Exception exception) { + ExceptionAwareCountDownLatch old = readyLatch; + readyLatch = new ExceptionAwareCountDownLatch(repImpl, 1); + if (old.getCount() != 0) { + /* releasing latch in some error situation. */ + old.releaseAwait(exception); + } + } + + /** + * Returns the hostname associated with this node. + * + * @return the hostname + */ + public String getHostName() { + String hostAndPort = + repImpl.getConfigManager().get(RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + return (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : + hostAndPort; + } + + /** + * Returns the port used by the replication node. + * + * @return the port number + */ + public int getPort() { + + String hostAndPort = + repImpl.getConfigManager().get(RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + + return (colonToken >= 0) ? + Integer.parseInt(hostAndPort.substring(colonToken + 1)) : + Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()); + } + + /** + * The Listener used to learn about new Masters + */ + private class MasterChangeListener implements Learner.Listener { + /* The current learned value. */ + private MasterValue currentValue = null; + + /** + * Implements the Listener protocol. + */ + @Override + public void notify(Proposal proposal, Value value) { + /* We have a winning new proposal, is it truly different? */ + if (value.equals(currentValue)) { + return; + } + currentValue = (MasterValue) value; + try { + String currentMasterName = currentValue.getNodeName(); + LoggerUtils.logMsg(logger, formatter, Level.FINE, + "Arbiter notified of new Master: " + + currentMasterName); + masterStatus.setGroupMaster + (currentValue.getHostName(), + currentValue.getPort(), + currentValue.getNameId()); + } catch (Exception e) { + LoggerUtils.logMsg + (logger, formatter, Level.SEVERE, + "Arbiter change event processing exception: " + + e.getMessage()); + } + } + } + + private class ArbElectionsConfig implements ElectionsConfig { + + @Override + public String getGroupName() { + return groupName; + } + + @Override + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + @Override + public ServiceDispatcher getServiceDispatcher() { + return serviceDispatcher; + } + + @Override + public int getElectionPriority() { + return Integer.MIN_VALUE; + } + + @Override + public int getLogVersion() { + return LogEntryType.LOG_VERSION; + } + + @Override + public RepImpl getRepImpl() { + return repImpl; + } + + @Override + public RepNode getRepNode() { + return null; + } + } + + private class MasterSuggestionGenerator + implements Acceptor.SuggestionGenerator { + @Override + public Value get(Proposal proposal) { + return new MasterValue(null, + getPort(), + NameIdPair.NULL); + } + + @Override + public Ranking getRanking(Proposal proposal) { + return new Ranking(arbiterVLSNTracker.getDTVLSN().getSequence(), + arbiterVLSNTracker.get().getSequence()); + + } + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterNodeStateService.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterNodeStateService.java new file mode 100644 index 0000000..aba9320 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterNodeStateService.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.impl.NodeStateProtocol; +import com.sleepycat.je.rep.impl.NodeStateProtocol.NodeStateRequest; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingRunnable; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The service registered by an Arbiter to answer the state request from + * another node. + */ +public class ArbiterNodeStateService extends ExecutingService { + + private final NodeStateProtocol protocol; + private final Logger logger; + private final ArbiterImpl arbImpl; + + /* Identifies the Node State querying Service. */ + public static final String SERVICE_NAME = "NodeState"; + + public ArbiterNodeStateService(ServiceDispatcher dispatcher, + ArbiterImpl arbImpl) { + super(SERVICE_NAME, dispatcher); + this.arbImpl = arbImpl; + protocol = new NodeStateProtocol( + arbImpl.getGroupName(), + arbImpl.getNameIdPair(), + arbImpl.getRepImpl(), + dispatcher.getChannelFactory()); + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Process a node state querying request. + */ + public ResponseMessage process(NodeStateRequest stateRequest) { + return protocol.new + NodeStateResponse( + arbImpl.getNameIdPair().getName(), + arbImpl.getMasterStatus().getNodeMasterNameId().getName(), + arbImpl.getJoinGroupTime(), + arbImpl.getNodeState()); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new NodeStateServiceRunnable(dataChannel, protocol); + } + + class NodeStateServiceRunnable extends ExecutingRunnable { + NodeStateServiceRunnable(DataChannel dataChannel, + NodeStateProtocol protocol) { + super(dataChannel, protocol, true); + } + + @Override + protected ResponseMessage getResponse(RequestMessage request) + throws IOException { + + return protocol.process(ArbiterNodeStateService.this, request); + } + + @Override + protected void logMessage(String message) { + LoggerUtils.warning(logger, arbImpl.getRepImpl(), message); + } + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterOutputThread.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterOutputThread.java new file mode 100644 index 0000000..6cd71b7 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterOutputThread.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.arbiter.impl; + +import java.io.IOException; +import java.util.concurrent.BlockingQueue; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.ReplicaOutputThreadBase; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.utilint.VLSN; + +/** + * The ArbiterOutputThread reads transaction identifiers + * from the outputQueue and writes a acknowledgment + * response to to the network channel. Also used + * to write responses for heart beat messages. + */ +public class ArbiterOutputThread extends ReplicaOutputThreadBase { + private final ArbiterVLSNTracker vlsnTracker; + + public ArbiterOutputThread(RepImpl repImpl, + BlockingQueue outputQueue, + Protocol protocol, + DataChannel replicaFeederChannel, + ArbiterVLSNTracker vlsnTracker) { + super(repImpl, outputQueue, protocol, replicaFeederChannel); + this.vlsnTracker = vlsnTracker; + } + + public void writeHeartbeat(Long txnId) throws IOException { + VLSN vlsn = vlsnTracker.get(); + protocol.write(protocol.new HeartbeatResponse + (VLSN.NULL_VLSN, + vlsn), + replicaFeederChannel); + } + + @Override + public void writeReauthentication() throws IOException { + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterStatDefinition.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterStatDefinition.java new file mode 100644 index 0000000..9e13756 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterStatDefinition.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +public class ArbiterStatDefinition { + + public static final String GROUP_NAME = "Arbiter"; + public static final String GROUP_DESC = + "Arbiter statistics"; + + public static final String ARBIO_GROUP_NAME = "ArbFileIO"; + public static final String ARBIO_GROUP_DESC = + "Arbiter file I/O statistics"; + + public static final StatDefinition ARB_N_FSYNCS = + new StatDefinition( + "nFSyncs", + "The number of fsyncs."); + + public static final StatDefinition ARB_N_WRITES = + new StatDefinition( + "nWrites", + "The number of file writes."); + + public static final StatDefinition ARB_N_REPLAY_QUEUE_OVERFLOW = + new StatDefinition( + "nReplayQueueOverflow", + "The number of times replay queue failed to insert " + + "because if was full."); + + public static final StatDefinition ARB_N_ACKS = + new StatDefinition( + "nAcks", + "The number of transactions acknowledged."); + + public static final StatDefinition ARB_MASTER = + new StatDefinition( + "master", + "The current or last Master Replication Node the Arbiter accessed.", + StatType.CUMULATIVE); + + public static final StatDefinition ARB_STATE = + new StatDefinition( + "state", + "The current state of the Arbiter.", + StatType.CUMULATIVE); + + public static final StatDefinition ARB_VLSN = + new StatDefinition( + "vlsn", + "The highest VLSN that was acknowledged by the Arbiter.", + StatType.CUMULATIVE); + + public static final StatDefinition ARB_DTVLSN = + new StatDefinition( + "dtvlsn", + "The highest DTVLSN that was acknowledged by the Arbiter.", + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/ArbiterVLSNTracker.java b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterVLSNTracker.java new file mode 100644 index 0000000..31dbbd1 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/ArbiterVLSNTracker.java @@ -0,0 +1,255 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_FSYNCS; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_N_WRITES; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_VLSN; +import static com.sleepycat.je.rep.arbiter.impl.ArbiterStatDefinition.ARB_DTVLSN; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * This class is used to maintain two pieces of persistent state. The + * replication group node identifier of the Arbiter and a VLSN value that + * represents the highest commit record VLSN the Arbiter has acknowledged. + */ +class ArbiterVLSNTracker { + private static final int VERSION = 1; + + private RandomAccessFile raf; + private final File dataFile; + private VLSN currentVLSN = VLSN.NULL_VLSN; + private volatile VLSN dtvlsn = VLSN.NULL_VLSN; + private final int VERSION_OFFSET = 0; + private final int NODEID_OFFSET = Integer.SIZE + VERSION_OFFSET; + private final int DATA_OFFSET = Integer.SIZE + NODEID_OFFSET; + private int nodeId = NameIdPair.NULL_NODE_ID; + private final StatGroup stats; + private final LongStat nWrites; + private final LongStat nFSyncs; + private final LongStat vlsnStat; + private final LongStat dtVlsnStat; + + ArbiterVLSNTracker(File file) { + dataFile = file; + boolean fileExists = dataFile.exists(); + + stats = new StatGroup(ArbiterStatDefinition.ARBIO_GROUP_NAME, + ArbiterStatDefinition.ARBIO_GROUP_DESC); + nFSyncs = new LongStat(stats, ARB_N_FSYNCS); + nWrites = new LongStat(stats, ARB_N_WRITES); + vlsnStat = new LongStat(stats, ARB_VLSN); + dtVlsnStat = new LongStat(stats, ARB_DTVLSN); + try { + raf = new RandomAccessFile(dataFile, "rw"); + if (fileExists) { + final int readVersion = readVersion(); + if (readVersion > VERSION) { + throw new RuntimeException( + "Arbiter data file does not have a supported " + + "version field " + + dataFile.getAbsolutePath()); + } + nodeId = readNodeId(); + if (raf.length() > DATA_OFFSET) { + raf.seek(DATA_OFFSET); + currentVLSN = new VLSN(raf.readLong()); + dtvlsn = new VLSN(raf.readLong()); + } + } else { + writeVersion(VERSION); + writeNodeIdInternal(nodeId); + } + } catch (IOException e) { + throw new RuntimeException( + "Unable to read the Arbiter data file " + + dataFile.getAbsolutePath()); + } + catch (Exception e) { + throw new RuntimeException( + "Unable to open the Arbiter data file " + + dataFile.getAbsolutePath() + " exception " + e.getMessage()); + } + } + + public StatGroup loadStats(StatsConfig config) { + vlsnStat.set(get().getSequence()); + dtVlsnStat.set(getDTVLSN().getSequence()); + return stats.cloneGroup(config.getClear()); + } + + public synchronized void writeNodeId(int id) { + if (nodeId == id) { + return; + } + writeNodeIdInternal(id); + } + + public synchronized int getCachedNodeId() { + return nodeId; + } + + private void writeNodeIdInternal(int id) { + if (raf == null) { + throw new RuntimeException( + "Internal error: Unable to write the Arbiter data file " + + " because the file is not open." + + dataFile.getAbsolutePath()); + } + try { + raf.seek(NODEID_OFFSET); + raf.writeInt(id); + nWrites.increment(); + doFSync(); + } catch (IOException e) { + throw new RuntimeException( + "Unable to write the Arbiter data file " + + dataFile.getAbsolutePath()); + } + } + + private int readNodeId() { + if (raf == null) { + throw new RuntimeException( + "Internal error: Unable to read the Arbiter data file " + + " because the file is not open." + + dataFile.getAbsolutePath()); + } + try { + raf.seek(NODEID_OFFSET); + return raf.readInt(); + } catch (IOException e) { + throw new RuntimeException( + "Unable to read the Arbiter data file " + + dataFile.getAbsolutePath()); + } + } + + public synchronized void writeVersion(int id) { + if (raf == null) { + throw new RuntimeException( + "Internal error: Unable to write the Arbiter data file " + + " because the file is not open." + + dataFile.getAbsolutePath()); + } + + if (nodeId == id) { + return; + } + try { + raf.seek(VERSION_OFFSET); + raf.writeInt(id); + nWrites.increment(); + doFSync(); + } catch (IOException e) { + throw new RuntimeException( + "Unable to write the Arbiter data file " + + dataFile.getAbsolutePath()); + } + } + + private int readVersion() { + if (raf == null) { + throw new RuntimeException( + "Internal error: Unable to read the Arbiter data file " + + " because the file is not open." + + dataFile.getAbsolutePath()); + } + try { + raf.seek(VERSION_OFFSET); + return raf.readInt(); + } catch (IOException e) { + throw new RuntimeException( + "Unable to write the Arbiter data file " + + dataFile.getAbsolutePath()); + } + } + + public synchronized void write(VLSN nextCurrentVLSN, + VLSN nextDTVLSN, + boolean doFSync) { + if (raf == null) { + throw new RuntimeException( + "Internal error: Unable to write the Arbiter data file " + + " because the file is not open." + + dataFile.getAbsolutePath()); + } + if (nextCurrentVLSN.compareTo(currentVLSN) > 0) { + this.currentVLSN = nextCurrentVLSN; + this.dtvlsn = nextDTVLSN; + try { + raf.seek(DATA_OFFSET); + raf.writeLong(nextCurrentVLSN.getSequence()); + raf.writeLong(nextDTVLSN.getSequence()); + nWrites.add(2); + if (doFSync) { + doFSync(); + } + } catch (IOException e) { + throw new RuntimeException( + "Unable to write the Arbiter data file " + + dataFile.getAbsolutePath()); + } + } + } + + public synchronized void close() { + if (raf != null) { + try { + doFSync(); + raf.close(); + } catch (IOException ignore) { + } finally { + raf = null; + } + } + } + + public VLSN get() { + return currentVLSN; + } + + public VLSN getDTVLSN() { + return dtvlsn; + } + + public static StatGroup loadEmptyStats() { + StatGroup tmpStats = + new StatGroup(ArbiterStatDefinition.ARBIO_GROUP_NAME, + ArbiterStatDefinition.ARBIO_GROUP_DESC); + new LongStat(tmpStats, ARB_N_FSYNCS); + new LongStat(tmpStats, ARB_N_WRITES); + new LongStat(tmpStats, ARB_VLSN); + new LongStat(tmpStats, ARB_DTVLSN); + return tmpStats; + } + + private void doFSync() throws IOException { + if (raf == null) { + return; + } + raf.getFD().sync(); + nFSyncs.increment(); + + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/MonitorEventManager.java b/src/com/sleepycat/je/rep/arbiter/impl/MonitorEventManager.java new file mode 100644 index 0000000..761e48f --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/MonitorEventManager.java @@ -0,0 +1,150 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbiter.impl; + +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.rep.elections.Utils; +import com.sleepycat.je.rep.elections.Utils.FutureTrackingCompService; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.monitor.MonitorService; +import com.sleepycat.je.rep.monitor.Protocol.JoinGroup; +import com.sleepycat.je.rep.monitor.Protocol.LeaveGroup; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The class for firing MonitorChangeEvents. + * + * Each time when there happens a MonitorChangeEvents, it refreshes the group + * information so that it can send messages to current monitors. + */ +public class MonitorEventManager { + + /* The time when this node joins the group, 0 if it hasn't joined yet. */ + private long joinTime = 0L; + + ArbiterImpl arbImpl; + + public MonitorEventManager(ArbiterImpl arbImpl) { + this.arbImpl = arbImpl; + } + + /* Return the time when JoinGroupEvent for this Arbiter fires. */ + public long getJoinTime() { + return joinTime; + } + + /* Disable the LeaveGroupEvent because the node is abnormally closed. */ + public void disableLeaveGroupEvent() { + joinTime = 0L; + } + + /** + * Fire a JoinGroupEvent. + */ + public void notifyJoinGroup() + throws DatabaseException { + + if (joinTime > 0) { + /* Already notified. */ + return; + } + + RepGroupImpl repGroup = arbImpl.getGroup(); + if (repGroup == null) { + return; + } + + joinTime = System.currentTimeMillis(); + JoinGroup joinEvent = + getProtocol(repGroup).new JoinGroup(arbImpl.getNodeName(), + arbImpl.getMasterName(), + joinTime); + refreshMonitors(repGroup, joinEvent); + } + + /** + * Fire a LeaveGroupEvent and wait for responses. + */ + public void notifyLeaveGroup(LeaveReason reason) + throws DatabaseException, InterruptedException { + + if (joinTime == 0) { + /* No join event, therefore no matching leave event. */ + return; + } + + RepGroupImpl repGroup = arbImpl.getGroup(); + if (repGroup == null) { + return; + } + LeaveGroup leaveEvent = + getProtocol(repGroup).new LeaveGroup(arbImpl.getNodeName(), + arbImpl.getMasterName(), + reason, + joinTime, + System.currentTimeMillis()); + final FutureTrackingCompService compService = + refreshMonitors(repGroup, leaveEvent); + + /* Wait for the futures to be evaluated. */ + for (final Future f : compService.getFutures()) { + try { + + /* + * Ignore the result. Wait 10 seconds for the evaluation of + * the future before giving up. + */ + f.get(10, TimeUnit.SECONDS); + } catch (ExecutionException e) { + /* Ignore the exception. */ + } catch (TimeoutException e) { + /* Continue after time out. */ + } + } + } + + /* Create a monitor protocol. */ + private com.sleepycat.je.rep.monitor.Protocol + getProtocol(RepGroupImpl repGroup) { + + return new com.sleepycat.je.rep.monitor.Protocol + (repGroup.getName(), NameIdPair.NOCHECK, null, + arbImpl.getRepImpl().getChannelFactory()); + } + + /* Refresh all the monitors with specified message. */ + private FutureTrackingCompService + refreshMonitors(RepGroupImpl repGroup, + RequestMessage requestMessage) { + Set monitors = repGroup.getAllMonitorSockets(); + LoggerUtils.fine(arbImpl.getLogger(), arbImpl.getRepImpl(), + "Refreshed " + monitors.size() + " monitors."); + /* Broadcast and forget. */ + return Utils.broadcastMessage(monitors, + MonitorService.SERVICE_NAME, + requestMessage, + arbImpl.getElections().getThreadPool()); + } +} diff --git a/src/com/sleepycat/je/rep/arbiter/impl/package-info.java b/src/com/sleepycat/je/rep/arbiter/impl/package-info.java new file mode 100644 index 0000000..43491f8 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/impl/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Implementation classes for the arbiter node. + */ +package com.sleepycat.je.rep.arbiter.impl; \ No newline at end of file diff --git a/src/com/sleepycat/je/rep/arbiter/package.html b/src/com/sleepycat/je/rep/arbiter/package.html new file mode 100644 index 0000000..83862b0 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbiter/package.html @@ -0,0 +1,6 @@ + + +Provides a mechanism to allow write availability for the Replication +group even when the number of replication nodes is less than majority. + + diff --git a/src/com/sleepycat/je/rep/arbitration/Arbiter.java b/src/com/sleepycat/je/rep/arbitration/Arbiter.java new file mode 100644 index 0000000..88310f6 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbitration/Arbiter.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbitration; + +import java.util.logging.Logger; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The locus for management of this node's active arbitration state, and of the + * mechanisms available to this node for doing arbitration. + *

        + * A node is in active arbitration state if + *

          + *
        • is the master + *
        • is lacking the required durability quorum + *
        • is maintaining its authoritative master status and its ability to + * commit writes through the good graces of an ArbiterProvider. + *
        + *

        + * The Arbiter detects which arbitration options are available in the JE HA + * group. + */ +public class Arbiter { + + /** + * True if this node is in active arbitration. + */ + private volatile boolean active; + + private final ArbiterProvider provider; + + private final RepImpl repImpl; + private final Logger logger; + + /** + * Examine environment configuration and rep group membership to figure out + * which arbitration options are in operation for this HA group. + */ + public Arbiter(RepImpl repImpl) { + this.repImpl = repImpl; + provider = new DesignatedPrimaryProvider(repImpl); + logger = LoggerUtils.getLogger(this.getClass()); + } + + /** + * The replication node knows that it has lost its durability quorum, and + * wants to try to enter active arbitration mode. + * @return true if the node successfully transitions to active arbitration, + * or was already in active arbitration. + */ + public synchronized boolean activateArbitration() { + if (provider.attemptActivation()) { + active = true; + } else { + active = false; + } + return active; + } + + /** + * The node has determined that it need not be in active arbitration. + * End the active arbitration state. If the node was not in active + * arbitration, do nothing. + */ + public void endArbitration() { + synchronized(this) { + if (!active) { + return; + } + + provider.endArbitration(); + active = false; + } + + LoggerUtils.info(logger, repImpl, "Arbitration is inactivated"); + } + + /** + * Return true if it's possible that this node can switch into active + * arbitration. The criteria for activation depend on the type of + * arbitration enabled for this node. + *

        + * For example, if designated primary arbitration is used, then it's only + * possible to move into active arbitration if the Designated Primary + * configuration parameter is set for this node. If LWT Node arbitration is + * used, then this node must have a valid connection to the arbiter node. + */ + public boolean activationPossible() { + return provider.activationPossible(); + } + + /** + * Return true if this node is in active arbitration, and if arbitration + * should take precedence over the election quorum policy. + */ + public boolean isApplicable(QuorumPolicy quorumPolicy) { + return active && (quorumPolicy.equals(QuorumPolicy.SIMPLE_MAJORITY)); + } + + /** + * Return true if this node is in active arbitration, and if arbitration + * should take precedence over the durability quorum policy. + */ + public boolean isApplicable(ReplicaAckPolicy ackPolicy) { + return active && (ackPolicy.equals(ReplicaAckPolicy.SIMPLE_MAJORITY)); + } + + /** + * Return the arbitration-influenced election quorum size. Arbitration + * may reduce the value that would usually be indicated by the quorum + * policy. + */ + public int getElectionQuorumSize(QuorumPolicy quorumPolicy) { + return provider.getElectionQuorumSize(quorumPolicy); + } + + /** + * Return the arbitration-influenced durability quorum size. Arbitration + * may reduce the value that would usually be indicated by the ack policy. + */ + public int getAckCount(ReplicaAckPolicy ackPolicy) { + return provider.getAckCount(ackPolicy); + } + + /** + * The replication configuration was changed. Check the new configuration + * to see it impacts active arbitration state or makes more arbitration + * mechanisms available. For example, if we are in active arbitration using + * designated primary arbitration, a change to the node's designated + * primary configuration parameter may affect whether this node can stay in + * active arbitration. + */ + public synchronized void + processConfigChange (ReplicationMutableConfig newConfig) { + + if (!active) { + return; + } + + if (provider.shouldEndArbitration(newConfig)) { + endArbitration(); + } + } + + /** + * Return true if this node is in active arbitration. + */ + public synchronized boolean isActive() { + return active; + } +} + + + diff --git a/src/com/sleepycat/je/rep/arbitration/ArbiterProvider.java b/src/com/sleepycat/je/rep/arbitration/ArbiterProvider.java new file mode 100644 index 0000000..8eda66a --- /dev/null +++ b/src/com/sleepycat/je/rep/arbitration/ArbiterProvider.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbitration; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.ReplicationMutableConfig; + +/** + * Provides access to arbitration services provided by different arbitration + * mechanisms. + */ +public interface ArbiterProvider { + + /** + * Return true if the pre-requisites are in place to permit this node to + * enter active arbitration. Different provider implementations have + * different criteria. For example, the DesignatedPrimaryProvider requires + * that a node's designated primary configuration parameter is true. + */ + public boolean activationPossible(); + + /** + * Return true if this node has successfully entered active arbitration + * state. + */ + public boolean attemptActivation(); + + /** + * End active arbitration. + */ + public void endArbitration(); + + /** + * Return the election quorum size that is dictated by arbitration, for + * this quorum policy. The arbiter provider has the leeway to decide that + * the quorum policy takes precedence, and that arbitration does not + * reduce the election quorum size. + */ + public int getElectionQuorumSize(QuorumPolicy quorumPolicy); + + /** + * Return the durability quorum size that is dictated by arbitration, for + * this replica ack policy. The arbiter provider has the leeway to decide + * that the ack policy takes precedence, and that arbitration does not + * reduce the durabilty quorum size. + */ + public int getAckCount(ReplicaAckPolicy ackPolicy); + + /** + * Return true if the environment configuration parameters specified in + * newConfig indicate that this node is not qualified to remain in active + * arbitration + */ + public boolean shouldEndArbitration(ReplicationMutableConfig newConfig); +} \ No newline at end of file diff --git a/src/com/sleepycat/je/rep/arbitration/DesignatedPrimaryProvider.java b/src/com/sleepycat/je/rep/arbitration/DesignatedPrimaryProvider.java new file mode 100644 index 0000000..a94b896 --- /dev/null +++ b/src/com/sleepycat/je/rep/arbitration/DesignatedPrimaryProvider.java @@ -0,0 +1,129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.arbitration; + +import java.util.logging.Logger; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Designated Primary arbitration relies on the configuration parameter + * je.rep.designatedPrimary. This form of arbitration is only effective when + * the electable group size is 2. When one of the two electable nodes goes + * down, the remaining node is permitted to win elections, retain authoritative + * mastership, and commit transactions without any participation from its dead + * sibling, if and only if it has been configured as designated primary. + *

        + * The user is responsible for ensuring that only one node at any time is + * annointed as the designated primary. There is some sanity checking that + * designated primary is only set by one node by master/replica syncups. The + * parameter is mutable. + */ +public class DesignatedPrimaryProvider implements ArbiterProvider { + + private final RepImpl repImpl; + + private final Logger logger; + + DesignatedPrimaryProvider(RepImpl repImpl) { + this.repImpl = repImpl; + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Try to activate this node as a Primary, if it has been configured as + * such and if the group size is two. This method is invoked when an + * operation falls short of quorum requirements and is ready to trade + * durability for availability. More specifically it's invoked when an + * election fails, or there is an insufficient number of replicas during + * a begin transaction or a transaction commit. + * + * Active arbitration ends when the Non-Primary contacts it. + * + * @return true if the primary was activated. + */ + @Override + public boolean attemptActivation() { + if (checkDesignatedPrimary()) { + LoggerUtils.info(logger, repImpl, + "Primary activated; quorum is one."); + return true; + } + + LoggerUtils.fine(logger, repImpl, + "Attempted unsuccessfully to activate designated " + + "primary"); + return false; + } + + /** + * Return true if this node is in a 2-node group and is configured as the + * Designated Primary, and is therefore capable of entering active + * arbitration. + */ + @Override + public boolean activationPossible() { + return checkDesignatedPrimary(); + } + + /** + * No cleannup is needed when exiting designated primary arbitration. + */ + @Override + public void endArbitration() { + /* nothing to do. */ + } + + /** + * Check the electable group size and the designated primary configuration + * to see if this node has the option of becoming the primary. + */ + private boolean checkDesignatedPrimary() { + return (repImpl != null) && /* repImpl can be null in unit tests */ + repImpl.isDesignatedPrimary() && + repImpl.getRepNode().getGroup().getElectableGroupSize() == 2; + } + + /** + * When operating under designated primary arbitration, the election quorum + * is 1 for a group with electable group size of 2, + */ + @Override + public int getElectionQuorumSize(QuorumPolicy quorumPolicy) { + return 1; + } + + /** + * Always returns 0, no replica acks are needed when acting under + * designated primary arbitration. + * TODO: is this still true with non voting nodes? + */ + @Override + public int getAckCount(ReplicaAckPolicy ackPolicy) { + return 0; + } + + /** + * Return true if this node is no longer configured as the designated + * primary under the new configuration. + */ + @Override + public boolean shouldEndArbitration(ReplicationMutableConfig newConfig) { + return (!newConfig.getDesignatedPrimary()); + } +} diff --git a/src/com/sleepycat/je/rep/arbitration/package-info.java b/src/com/sleepycat/je/rep/arbitration/package-info.java new file mode 100644 index 0000000..a18733e --- /dev/null +++ b/src/com/sleepycat/je/rep/arbitration/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Manages the designated primary of a two-node group -- this is + * NOT the {@link com.sleepycat.je.rep.arbiter arbiter node}. + */ +package com.sleepycat.je.rep.arbitration; \ No newline at end of file diff --git a/src/com/sleepycat/je/rep/elections/Acceptor.java b/src/com/sleepycat/je/rep/elections/Acceptor.java new file mode 100644 index 0000000..4f58a95 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Acceptor.java @@ -0,0 +1,314 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.nio.channels.Channels; +import java.util.logging.Level; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator.Ranking; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Accept; +import com.sleepycat.je.rep.elections.Protocol.Propose; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Plays the role of Acceptor in the consensus algorithm. It runs in its + * own thread listening for and responding to messages sent by Proposers. + */ +public class Acceptor extends ElectionAgentThread { + + /* + * The currently promised proposal. Proposals below this one are rejected. + */ + private Proposal promisedProposal = null; + + private Value acceptedValue = null; + + /* Used to return suggestions in response to Propose requests. */ + private final SuggestionGenerator suggestionGenerator; + + /* Identifies the Acceptor Service. */ + public static final String SERVICE_NAME = "Acceptor"; + + private final ElectionsConfig config; + + /** + * Creates an Acceptor + */ + public Acceptor(Protocol protocol, + ElectionsConfig config, + SuggestionGenerator suggestionGenerator) { + + super(config.getRepImpl(), protocol, + "Acceptor Thread " + config.getNameIdPair().getName()); + this.config = config; + + this.suggestionGenerator = suggestionGenerator; + } + + /** + * The Acceptor thread body. + */ + @Override + public void run() { + final ServiceDispatcher serviceDispatcher = + config.getServiceDispatcher(); + serviceDispatcher.register(SERVICE_NAME, channelQueue); + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.FINE, "Acceptor started"); + DataChannel channel = null; + try { + while (true) { + channel = serviceDispatcher.takeChannel + (SERVICE_NAME, true /* block */, + protocol.getReadTimeout()); + + if (channel == null) { + /* A soft shutdown. */ + return; + } + + BufferedReader in = null; + PrintWriter out = null; + try { + in = new BufferedReader( + new InputStreamReader( + Channels.newInputStream(channel))); + out = new PrintWriter( + Channels.newOutputStream(channel), true); + String requestLine = in.readLine(); + if (requestLine == null) { + LoggerUtils.logMsg(logger, envImpl, + formatter, Level.FINE, + "Acceptor: EOF on request"); + continue; + } + RequestMessage requestMessage = null; + try { + requestMessage = protocol.parseRequest(requestLine); + } catch (InvalidMessageException ime) { + protocol.processIME(channel, ime); + continue; + } + ResponseMessage responseMessage = null; + if (requestMessage.getOp() == protocol.PROPOSE) { + responseMessage = process((Propose) requestMessage); + } else if (requestMessage.getOp() == protocol.ACCEPT) { + responseMessage = process((Accept) requestMessage); + } else if (requestMessage.getOp() == protocol.SHUTDOWN) { + break; + } else { + LoggerUtils.logMsg(logger, envImpl, + formatter, Level.SEVERE, + "Unrecognized request: " + + requestLine); + continue; + } + + /* + * The request message may be of an earlier version. If so, + * this node transparently read the older version. JE only + * throws out InvalidMesageException when the version of + * the request message is newer than the current protocol. + * To avoid sending a repsonse that the requester cannot + * understand, we send a response in the same version as + * that of the original request message. + */ + responseMessage.setSendVersion + (requestMessage.getSendVersion()); + out.println(responseMessage.wireFormat()); + } catch (IOException e) { + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.WARNING, + "IO error on socket: " + e.getMessage()); + continue; + } finally { + Utils.cleanup(logger, envImpl, formatter, channel, in, out); + cleanup(); + } + } + } catch (InterruptedException e) { + if (isShutdown()) { + /* Treat it like a shutdown, exit the thread. */ + return; + } + LoggerUtils.logMsg(logger, envImpl, formatter, Level.WARNING, + "Acceptor unexpected interrupted"); + throw EnvironmentFailureException.unexpectedException(e); + } finally { + serviceDispatcher.cancel(SERVICE_NAME); + cleanup(); + } + } + + /** + * Responds to a Propose request. + * + * @param propose the request proposal + * + * @return the response: a Promise if the request was accepted, a Reject + * otherwise. + */ + ResponseMessage process(Propose propose) { + + if ((promisedProposal != null) && + (promisedProposal.compareTo(propose.getProposal()) > 0)) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Reject Propose: " + propose.getProposal() + + " Promised proposal: " + promisedProposal); + return protocol.new Reject(promisedProposal); + } + + promisedProposal = propose.getProposal(); + final Value suggestedValue = suggestionGenerator.get(promisedProposal); + final Ranking suggestionRanking = + suggestionGenerator.getRanking(promisedProposal); + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Promised: " + promisedProposal + + " Suggested Value: " + suggestedValue + + " Suggestion Ranking: " + suggestionRanking); + return protocol.new Promise + (promisedProposal, + acceptedValue, + suggestedValue, + suggestionRanking, + config.getElectionPriority(), + config.getLogVersion(), + JEVersion.CURRENT_VERSION); + } + + /** + * Responds to Accept request + * + * @param accept the request + * @return an Accepted or Reject response as appropriate. + */ + ResponseMessage process(Accept accept) { + if ((promisedProposal != null) && + (promisedProposal.compareTo(accept.getProposal()) != 0)) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Reject Accept: " + accept.getProposal() + + " Promised proposal: " + promisedProposal); + return protocol.new Reject(promisedProposal); + } + acceptedValue = accept.getValue(); + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Promised: " + promisedProposal + " Accepted: " + + accept.getProposal() + " Value: " + acceptedValue); + return protocol.new Accepted(accept.getProposal(), acceptedValue); + } + + public interface SuggestionGenerator { + + /** + * Used to generate a suggested value for use by a Proposer. It's a + * hint. The proposal argument may be used to freeze values like the + * VLSN number from advancing (if they were used in the ranking) until + * an election has completed. + * + * @param proposal the Proposal for which the value is being suggested. + * + * @return the suggested value. + */ + abstract Value get(Proposal proposal); + + /** + * The importance associated with the above suggestion. Acceptors have + * to agree on a common system for ranking importance so that the + * relative importance of different suggestions can be meaningfully + * compared. + * + * @param the proposal associated with the ranking + * + * @return the importance of the suggestion as a number + */ + abstract Ranking getRanking(Proposal proposal); + + /** + * A description of the ranking used when comparing Promises to pick a + * Master. + */ + class Ranking implements Comparable { + /* The major component of the ranking. */ + final long major; + + /* The minor component. */ + final long minor; + + static Ranking UNINITIALIZED = + new Ranking(Long.MIN_VALUE, Long.MIN_VALUE); + + public Ranking(long major, long minor) { + this.major = major; + this.minor = minor; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (int) (major ^ (major >>> 32)); + result = prime * result + (int) (minor ^ (minor >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Ranking other = (Ranking) obj; + if (major != other.major) { + return false; + } + if (minor != other.minor) { + return false; + } + return true; + } + + @Override + public String toString() { + return "Ranking major:" + major + " minor:" + minor; + } + + @Override + public int compareTo(Ranking o) { + int result = Long.compare(major, o.major); + if (result != 0) { + return result; + } + return Long.compare(minor, o.minor); + } + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/ElectionAgentThread.java b/src/com/sleepycat/je/rep/elections/ElectionAgentThread.java new file mode 100644 index 0000000..b4b104e --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/ElectionAgentThread.java @@ -0,0 +1,104 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.logging.Formatter; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; + +/** + * ElectionAgentThread is the base class for the election agent threads + * underlying the Acceptor and Learner agents. + */ +public class ElectionAgentThread extends StoppableThread { + + /* The instance of the protocol bound to a specific Value and Proposal */ + protected final Protocol protocol; + + protected final Logger logger; + + /* + * Used when the unit test AcceptorTest creates a RepNode without a RepIml + * instance. + */ + protected final Formatter formatter; + + /* + * The queue into which the ServiceDispatcher queues socket channels for + * new Feeder instances. + */ + protected final BlockingQueue channelQueue = + new LinkedBlockingQueue(); + + protected ElectionAgentThread(RepNode repNode, + Protocol protocol, + String threadName) { + super((repNode == null ? null : repNode.getRepImpl()), threadName); + this.protocol = protocol; + + logger = (envImpl != null) ? + LoggerUtils.getLogger(getClass()) : + LoggerUtils.getLoggerFormatterNeeded(getClass()); + + formatter = new ReplicationFormatter(protocol.getNameIdPair()); + } + + protected ElectionAgentThread(EnvironmentImpl envImpl, + Protocol protocol, + String threadName) { + super(envImpl, threadName); + this.protocol = protocol; + + logger = (envImpl != null) ? + LoggerUtils.getLogger(getClass()) : + LoggerUtils.getLoggerFormatterNeeded(getClass()); + + formatter = new ReplicationFormatter(protocol.getNameIdPair()); + } + + @Override + protected Logger getLogger() { + return logger; + } + + /** + * Shuts down the Agent. + * @throws InterruptedException + */ + public void shutdown() + throws InterruptedException{ + + if (shutdownDone(logger)) { + return; + } + shutdownThread(logger); + } + + @Override + protected int initiateSoftShutdown() { + channelQueue.clear(); + /* Add special entry so that the channelQueue.poll operation exits. */ + channelQueue.add(RepUtils.CHANNEL_EOF_MARKER); + return 0; + } +} diff --git a/src/com/sleepycat/je/rep/elections/Elections.java b/src/com/sleepycat/je/rep/elections/Elections.java new file mode 100644 index 0000000..c65caa0 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Elections.java @@ -0,0 +1,997 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.impl.RepParams.ELECTIONS_REBROADCAST_PERIOD; + +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; +import java.util.TimerTask; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.IntConfigParam; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.elections.Proposer.MaxRetriesException; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.Utils.FutureTrackingCompService; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.node.ElectionQuorum; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StoppableThreadFactory; + +/** + * Represents the environment in which elections are run on behalf of a node. + * There is exactly one instance of an Elections for each node. Elections are + * initiated via this class. + * + * One of the primary goals of this interface is to keep Elections as free + * standing as possible, so that we can change how elections are held, or + * aspects of the election infrastructure with minimal impact on replication. + * For example, elections currently used tcp for communication of election + * messages but may want to switch over to udp. Such a change should be + * confined to just the Elections module. Other changes might include changes + * to the strategy used to suggest Values and the weight associated with a + * suggested Value. + * + * The following are the principal points of interaction between Elections and + * Replication: + * + * 1) The initiation of elections via the initiateElections() method. + * + * 2) The suggestion of nodes as masters and the ranking of the + * suggestion. This is done via the Acceptor.SuggestionGenerator interface. An + * instance of this interface is supplied when the Elections class is + * instantiated. Note that the implementation must also initiate a freeze of + * VLSNs to ensure that the ranking does not change as the election + * progresses. The VLSN can make progress when the node is informed via its + * Listener that an election with a higher Proposal number (than the one in the + * Propose request) has finished. + * + * 3) Obtaining the result of an election initiated in step 1. This is done via + * the Learner.Listener interface. An instance of this class is supplied when + * the Election class is first instantiated. + * + */ + +public class Elections { + + /* Describes all nodes of the group. */ + private RepGroupImpl repGroup; + + /* + * A unique identifier for this election agent. It's used by all the + * agents that comprise Elections. + */ + private final NameIdPair nameIdPair; + + /* + * A repNode is kept for error propagation if this election belongs to a + * replicated environment. Elections are dependent on the RepNode to track + * the number of members currently in a group and to deal with changing + * quorum requirements when a node is acting as a Primary. + * Note that repNode may be null if the creator of this Elections object + * does not initiate an election and if the node can never be a master. + * The Arbiter uses it this way. + */ + private final RepNode repNode; + + private final ElectionsConfig config; + + private final RepImpl envImpl; + + /* + * Shutdown can only be executed once. The shutdown field protects against + * multiple invocations. + */ + private final AtomicBoolean shutdown = new AtomicBoolean(false); + + /* The three agents involved in the elections run by this class. */ + private Proposer proposer; + private Acceptor acceptor; + private Learner learner; + + /* The thread pool used to manage the threads used by the Proposer. */ + private final ExecutorService pool; + + /* Components of the agents. */ + final private Acceptor.SuggestionGenerator suggestionGenerator; + final private Learner.Listener listener; + + /* + * The protocol used to run the elections. All three agents use this + * instance of the protocol + */ + private final Protocol protocol; + + /* + * The thread used to run the proposer during the current election. It's + * volatile to ensure that shutdown can perform an unsynchronized access + * to the iv even if an election is in progress. + */ + private volatile ElectionThread electionThread = null; + + /* The listener used to indicate completion of an election. */ + private ElectionListener electionListener = null; + + /** + * The timer task that re-broadcasts election results from a master. It's + * null in unit tests. + */ + private final RebroadcastTask rebroadcastTask; + + /* The number of elections that were held. */ + private int nElections = 0; + + private final Logger logger; + private final Formatter formatter; + + /** + * Creates an instance of Elections. There should be just one instance per + * node. Note that the creation does not result in the election threads + * being started, that is, the instance does not participate in elections. + * This call is typically followed up with a call to startLearner that lets + * it both learn about and supply elections results, and, if applicable, by + * a subsequent call to participate to let it vote in elections. + * The RepNode parameter is null when the Elections object is used by + * the Arbiter. The Arbiter is a Learner and Acceptor. It will never + * initiate an election (the RepNode must be non-null) and never + * become Master. + * + * @param config elections configuration + * @param listener the Listener invoked when results are available + * @param suggestionGenerator used by the Acceptor + */ + public Elections(ElectionsConfig config, + Learner.Listener listener, + Acceptor.SuggestionGenerator suggestionGenerator) { + + envImpl = config.getRepImpl(); + this.repNode = config.getRepNode(); + this.config = config; + this.nameIdPair = config.getNameIdPair(); + DataChannelFactory channelFactory; + + if (repNode != null && repNode.getRepImpl() != null) { + logger = LoggerUtils.getLogger(getClass()); + final DbConfigManager configManager = envImpl.getConfigManager(); + int rebroadcastPeriod = configManager. + getDuration(ELECTIONS_REBROADCAST_PERIOD); + rebroadcastTask = new RebroadcastTask(rebroadcastPeriod); + } else { + logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + rebroadcastTask = null; + } + channelFactory = config.getServiceDispatcher().getChannelFactory(); + formatter = new ReplicationFormatter(nameIdPair); + + protocol = new Protocol(TimebasedProposalGenerator.getParser(), + MasterValue.getParser(), + config.getGroupName(), + nameIdPair, + config.getRepImpl(), + channelFactory); + this.suggestionGenerator = suggestionGenerator; + this.listener = listener; + + pool = Executors.newCachedThreadPool + (new StoppableThreadFactory("JE Elections Factory " + nameIdPair, + logger)); + } + + /* The thread pool used to allocate threads used during elections. */ + public ExecutorService getThreadPool() { + return pool; + } + + public ServiceDispatcher getServiceDispatcher() { + return config.getServiceDispatcher(); + } + + public ElectionQuorum getElectionQuorum() { + return repNode.getElectionQuorum(); + } + + public RepNode getRepNode() { + return repNode; + } + + public Logger getLogger() { + return logger; + } + + /* Get repImpl for Proposer to set up loggers. */ + public RepImpl getRepImpl() { + return config.getRepImpl(); + } + + /** + * Starts a Learner agent. Note that the underlying Protocol instance it + * uses must have a current picture of the replication group otherwise it + * will reject messages from nodes that it does not think are currently + * part of the replication group. + */ + public void startLearner() { + // repNode used for thread name but can be null here + learner = new Learner(config.getRepImpl(), + protocol, + config.getServiceDispatcher()); + learner.start(); + learner.addListener(listener); + electionListener = new ElectionListener(); + learner.addListener(electionListener); + if (rebroadcastTask != null) { + repNode.getTimer().schedule(rebroadcastTask, + rebroadcastTask.getPeriod(), + rebroadcastTask.getPeriod()); + } + } + + /** + * Permits the Election agent to start participating in elections held + * by the replication group, or initiate elections on behalf of this node. + * Participation in elections is initiated only after a node has current + * information about group membership. + */ + public void participate() { + proposer = new RankingProposer(this, nameIdPair); + startAcceptor(); + } + + public void startAcceptor() { + acceptor = new Acceptor(protocol, config, suggestionGenerator); + acceptor.start(); + } + + /** + * Returns the Acceptor associated with this node. + * @return the Acceptor + */ + public Acceptor getAcceptor() { + return acceptor; + } + + /** + * Returns a current set of acceptor sockets. + */ + public Set getAcceptorSockets() { + if (repGroup == null) { + throw EnvironmentFailureException.unexpectedState + ("No rep group was configured"); + } + return repGroup.getAllAcceptorSockets(); + } + + public Protocol getProtocol() { + return protocol; + } + + /** + * Returns the Learner associated with this node + * @return the Learner + */ + public Learner getLearner() { + return learner; + } + + /** + * The number of elections that have been held. Used for testing. + * + * @return total elections initiated by this node. + */ + public int getElectionCount() { + return nElections; + } + + /** + * Initiates an election. Note that this may just be one of many possible + * elections that are in progress in a replication group. The method does + * not wait for this election to complete, but instead returns as soon as + * any election result (including one initiated by some other Proposer) + * becomes available via the Learner. + * + * A proposal submitted as part of this election may lose out to other + * concurrent elections, or there may not be a sufficient number of + * Acceptor agents active or reachable to reach a quorum. In such cases, + * the election will not produce a result. That is, there will be no + * notification to the Learners. Note that only one election can be + * initiated at a time at a node If a new election is initiated while one + * is already in progress, then the method will wait until it completes + * before starting a new one. + * + * The results of this and any other elections that may have been initiated + * concurrently by other nodes are made known to the Learner agents. Note + * that this method does not return a result, since the concurrent arrival + * of results could invalidate the result even before its returned. + * + * @param newGroup the definition of the group to be used for this election + * @param quorumPolicy the policy to be used to reach a quorum. + * @param maxRetries the max number of times a proposal may be retried + * @throws InterruptedException + */ + public synchronized void initiateElection(RepGroupImpl newGroup, + QuorumPolicy quorumPolicy, + int maxRetries) + throws InterruptedException { + + updateRepGroup(newGroup); + long startTime = System.currentTimeMillis(); + nElections++; + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Election initiated; election #" + nElections); + if (electionThread != null) { + /* + * The factor of four used below to arrive at a timeout value is a + * heuristic: A factor of two to cover any pending message exchange + * and another factor of two as a grace period. We really don't + * expect to hit this timeout in the absence of networking issues, + * hence the thread dump to understand the reason in case there's + * some bug. + */ + final int waitMs = protocol.getReadTimeout() * 4; + // A past election request, wait until the election has quiesced + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Election in progress. Waiting ... for " + + waitMs + "ms"); + + electionThread.join(waitMs); + if (electionThread.isAlive()) { + /* Dump out threads for future analysis if it did not quit. */ + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Election did not finish as expected." + + " resorting to shutdown"); + LoggerUtils.fullThreadDump(logger, envImpl, Level.INFO); + electionThread.shutdown(); + } + + final Exception exception = + electionThread.getSavedShutdownException(); + if (exception != null) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.UNEXPECTED_EXCEPTION, + exception); + } + } + + CountDownLatch countDownLatch = null; + synchronized (electionListener) { + // Watch for any election results from this point forward + countDownLatch = electionListener.setLatch(); + } + + RetryPredicate retryPredicate = + new RetryPredicate(repNode, maxRetries, countDownLatch); + electionThread = new ElectionThread(quorumPolicy, retryPredicate, + envImpl, + (envImpl == null) ? null : + envImpl.getName()); + + electionThread.start(); + try { + /* Wait until we hear of some "new" election result */ + countDownLatch.await(); + if (retryPredicate.pendingRetries <= 0) { + /* Ran out of retries -- a test situation */ + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Retry count exhausted: " + + retryPredicate.maxRetries); + } + + /* + * Note that the election thread continues to run past this point + * and may be active upon re-entry + */ + } catch (InterruptedException e) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.WARNING, + "Election initiation interrupted"); + shutdown(); + throw e; + } + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Election finished. Elapsed time: " + + (System.currentTimeMillis() - startTime) + "ms"); + } + + /** + * The standard method for requesting and election, we normally want to run + * elections until we hear of an election result. Once initiated, elections + * run until there is a successful conclusion, that is, a new master has + * been elected. Since a successful conclusion requires the participation + * of at least a simple majority, this may take a while if a sufficient + * number of nodes are not available. + * + * The above method is used mainly for testing. + * + * @throws InterruptedException + * + * @see #initiateElection + */ + public synchronized void initiateElection(RepGroupImpl newGroup, + QuorumPolicy quorumPolicy) + throws InterruptedException { + + initiateElection(newGroup, quorumPolicy, Integer.MAX_VALUE); + } + + /** + * Updates elections notion of the rep group, so that acceptors are aware + * of the current state of the group, even in the absence of an election + * conducted by the node itself. + * + * This method should be invoked each time a node becomes aware of a group + * membership change. + * + * @param newRepGroup defines the new group + */ + public void updateRepGroup(RepGroupImpl newRepGroup) { + repGroup = newRepGroup; + protocol.updateNodeIds(newRepGroup.getAllElectionMemberIds()); + } + + /** + * Updates elections notion of the rep group, so that acceptors are aware + * of the current state of the group, even in the absence of an election + * conducted by the node itself. However this method does not update the + * members in the protocol so checks are not made for the member id. + * + * This method should be invoked each time a node becomes aware of a group + * membership change. + * + * @param newRepGroup defines the new group + */ + public void updateRepGroupOnly(RepGroupImpl newRepGroup) { + repGroup = newRepGroup; + } + + /** + * Predicate to determine whether an election is currently in progress. + */ + public synchronized boolean electionInProgress() { + return (electionThread != null) && electionThread.isAlive(); + } + + /** + * Statistics used during testing. + */ + public synchronized StatGroup getStats() { + if (electionInProgress()) { + throw EnvironmentFailureException.unexpectedState + ("Election in progress"); + } + return electionThread.getStats(); + } + + /** + * For INTERNAL TESTING ONLY. Ensures that the initiated election has + * reached a conclusion that can be tested. + * + * @throws InterruptedException + */ + public synchronized void waitForElection() + throws InterruptedException { + + assert(electionThread != null); + electionThread.join(); + } + + /** + * Shutdown all acceptor and learner agents by broadcasting a Shutdown + * message. It waits until reachable agents have acknowledged the message + * and the local learner and acceptor threads have exited. + * + * This is method is intended for use during testing only. + * + * @throws InterruptedException + */ + public void shutdownAcceptorsLearners + (Set acceptorSockets, + Set learnerSockets) + throws InterruptedException { + + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Elections being shutdown"); + FutureTrackingCompService compService = + Utils.broadcastMessage(acceptorSockets, + Acceptor.SERVICE_NAME, + protocol.new Shutdown(), + pool); + /* The 60 seconds is just a reasonable timeout for use in tests */ + Utils.checkFutures(compService, 60, TimeUnit.SECONDS, + logger, envImpl, formatter); + compService = Utils.broadcastMessage(learnerSockets, + Learner.SERVICE_NAME, + protocol.new Shutdown(), + pool); + Utils.checkFutures(compService, 60, TimeUnit.SECONDS, + logger, envImpl, formatter); + if (learner != null) { + learner.join(); + } + if (acceptor != null) { + acceptor.join(); + } + } + + /** + * Shuts down just the election support at this node. That is the Acceptor, + * and Learner associated with this Elections as well as any pending + * election running in its thread is terminated. + * + * @throws InterruptedException + */ + public void shutdown() throws InterruptedException { + if (!shutdown.compareAndSet(false, true)) { + return; + } + + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Elections shutdown initiated"); + if (acceptor != null) { + acceptor.shutdown(); + } + + if (learner != null) { + learner.shutdown(); + } + + if (electionThread != null) { + electionThread.shutdown(); + } + + if (proposer != null) { + proposer.shutdown(); + } + + if (rebroadcastTask != null) { + rebroadcastTask.cancel(); + } + pool.shutdown(); + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Elections shutdown completed"); + } + + public boolean isShutdown() { + return shutdown.get(); + } + + /** + * Used to short-circuit Proposal retries if a new election has completed + * since the time this election was initiated. + */ + static class ElectionListener implements Learner.Listener { + + /* + * The election latch that is shared by the RetryPredicate. It's + * counted down either when some election result becomes available or + * when elections that are in progress need to be shutdown. + */ + private CountDownLatch electionLatch = null; + + ElectionListener() { + this.electionLatch = null; + } + + /** + * Returns a new latch to be associated with the RetryPredicate. + */ + public synchronized CountDownLatch setLatch() { + electionLatch = new CountDownLatch(1); + return electionLatch; + } + + /** + * Used during shutdown only + * + * @return the latch on which elections wait + */ + public CountDownLatch getElectionLatch() { + return electionLatch; + } + + /** + * The Listener protocol announcing election results. + */ + @Override + public synchronized void notify(Proposal proposal, Value value) { + // Free up the retry predicate if its waiting + if (electionLatch != null) { + electionLatch.countDown(); + } + } + } + + /** + * Implements the retry policy + */ + static class RetryPredicate implements Proposer.RetryPredicate { + private final RepNode repNode; + private final int maxRetries; + private int pendingRetries; + /* The latch that is activated by the Listener. */ + private final CountDownLatch electionLatch; + + /* + * The number of time to retry an election before trying to activate + * the primary. + */ + private final int primaryRetries; + + private static final int BACKOFF_SLEEP_MIN = 1; + private static final int BACKOFF_SLEEP_MAX = 32; + + private int backoffSleepInterval = BACKOFF_SLEEP_MIN; + + RetryPredicate(RepNode repNode, + int maxRetries, + CountDownLatch electionLatch) { + this.repNode = repNode; + this.maxRetries = maxRetries; + pendingRetries = maxRetries; + this.electionLatch = electionLatch; + final RepImpl repImpl = repNode.getRepImpl(); + final IntConfigParam retriesParam = + RepParams.ELECTIONS_PRIMARY_RETRIES; + primaryRetries = (repImpl != null) ? + repImpl.getConfigManager().getInt(retriesParam) : + Integer.parseInt(retriesParam.getDefault()); + } + + /** + * Returns the time to backoff before a retry. The backoff is + * non-linear. + * + * @return the time to backoff in ms + */ + private int backoffWaitTime() { + backoffSleepInterval = + Math.min(BACKOFF_SLEEP_MAX, backoffSleepInterval * 2); + return backoffSleepInterval * 1000; + } + + /** + * Implements the protocol + */ + @Override + public boolean retry() throws InterruptedException { + if ((maxRetries - pendingRetries) >= primaryRetries) { + if ((repNode != null) && + repNode.getArbiter().activateArbitration()) { + pendingRetries = maxRetries; + return true; + } + } + if (pendingRetries-- <= 0) { + /* Free up the main election thread */ + electionLatch.countDown(); + return false; + } + + electionLatch.await(backoffWaitTime(), TimeUnit.MILLISECONDS); + if (electionLatch.getCount() == 0) { + /* An election completed, we can quit issuing proposals */ + return false; + } + /* Timed out and did not hear any election results. */ + return true; + } + + /** + * The number of times a retry was attempted + */ + @Override + public int retries() { + return (maxRetries-pendingRetries); + } + } + + /** + * The thread that actually runs an election. The thread exits either after + * it has successfully had its proposal accepted and after it has informed + * all learners, or if it gives up after some number of retries. + */ + private class ElectionThread extends StoppableThread { + + final private QuorumPolicy quorumPolicy; + + /* Non-null on termination if a proposal was issued and accepted. */ + Proposer.WinningProposal winningProposal; + + /* Non-null at termination if no proposal was accepted. */ + MaxRetriesException maxRetriesException; + + final private RetryPredicate retryPredicate; + + private ElectionThread(QuorumPolicy quorumPolicy, + RetryPredicate retryPredicate, + EnvironmentImpl envImpl, + String envName) { + super(envImpl, "ElectionThread_" + envName); + this.quorumPolicy = quorumPolicy; + this.retryPredicate = retryPredicate; + } + + /** + * Carries out an election and informs learners of the results. Any + * uncaught exception will invalidate the environment if this is + * being executed on behalf of a replicated node. + */ + @Override + public void run() { + try { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Started election thread " + new Date()); + winningProposal = + proposer.issueProposal(quorumPolicy, retryPredicate); + + /* + * TODO: Consider adding an optimization to inform SECONDARY + * nodes of election results, but continuing to only wait for + * the completion of notifications to ELECTABLE nodes. That + * change would increase the chance that SECONDARY nodes have + * up-to-date information about the master, but would avoid + * adding sensitivity to potentially longer network delays in + * communicating with secondary nodes. + */ + Learner.informLearners(repGroup.getAllLearnerSockets(), + winningProposal, + protocol, + pool, + logger, + config.getRepImpl(), + null); + } catch (MaxRetriesException mre) { + maxRetriesException = mre; + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Exiting election after " + + retryPredicate.retries() + " retries"); + return; + } catch (InterruptedException e) { + pool.shutdownNow(); + LoggerUtils.logMsg(logger, envImpl, formatter, Level.INFO, + "Election thread interrupted"); + } catch (Exception e) { + saveShutdownException(e); + } finally { + cleanup(); + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.INFO, + "Election thread exited. Group master: " + + ((repNode != null) ? + repNode.getMasterStatus().getGroupMasterNameId() : + Integer.MAX_VALUE)); + } + } + + public void shutdown() { + + if (shutdownDone(logger)) { + return; + } + shutdownThread(logger); + } + + @Override + protected int initiateSoftShutdown() { + + final CountDownLatch electionLatch = + electionListener.getElectionLatch(); + + if (electionLatch != null) { + + /* + * Unblock any initiated elections waiting for a result as + * well as this thread. + */ + electionLatch.countDown(); + } + + /* + * Wait roughly for the time it would take for a read to timeout. + * since the delay in testing the latch is probably related to + * some slow network event + */ + return protocol.getReadTimeout(); + } + + /** + * Statistics from the election. Should only be invoked after the run() + * method has exited. + * + * @return statistics generated by the proposer + */ + StatGroup getStats() { + return (winningProposal != null) ? + winningProposal.proposerStats : + maxRetriesException.proposerStats; + } + + /** + * @see StoppableThread#getLogger + */ + @Override + protected Logger getLogger() { + return logger; + } + } + + /** + * Used to propagate the results of an election to any monitors. Note that + * monitors are informed of results redundantly, both from the node that + * concludes the election and via this re-propagation. The use of multiple + * network paths increases the likelihood that the result will reach the + * monitor via some functioning network path. + * + * The method returns immediately after queuing the operation in the + * thread pool. + */ + public void asyncInformMonitors(Proposal proposal, Value value) { + final Set monitorSockets = + repGroup.getAllMonitorSockets(); + if (monitorSockets.size() == 0) { + return; + } + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.INFO, + String.format("Propagating election results to %d monitors\n", + monitorSockets.size())); + pool.execute(new InformLearners + (monitorSockets, + new Proposer.WinningProposal(proposal, value, null))); + } + + /** + * Provides the underpinnings of the async mechanism used to deliver + * election results to the monitors. + */ + private class InformLearners implements Runnable { + final Set learners; + final Proposer.WinningProposal winningProposal; + + InformLearners(Set learners, + Proposer.WinningProposal winningProposal) { + this.learners = learners; + this.winningProposal = winningProposal; + } + + @Override + public void run() { + Learner.informLearners(learners, + winningProposal, + protocol, + pool, + logger, + config.getRepImpl(), + null); + } + } + + /** + * Task to re-inform learners of election results by re-broadcasting the + * results of an election from the master. This re-broadcast is intended to + * help in network partition situations. See [#20220] for details. + */ + private class RebroadcastTask extends TimerTask { + + /* Lock to ensure that async executions don't overlap. */ + private final ReentrantLock lock = new ReentrantLock(); + private int acquireFailCount = 0; + private final int periodMs; + + public RebroadcastTask(int periodMs) { + this.periodMs = periodMs; + } + + public int getPeriod() { + return periodMs; + } + + /** + * If the node is a master, it broadcasts election results to nodes + * that are not currently connected to it via feeders. + * + * It's worth noting that since this is a timer task method it must be + * be lightweight. So the actual broadcast is done in an asynchronous + * method using a thread from the election thread pool. + */ + @Override + public void run() { + try { + if (!lock.tryLock()) { + if ((++acquireFailCount % 100) == 0) { + LoggerUtils.logMsg(logger, envImpl, formatter, + Level.WARNING, + "Failed to acquire lock after " + + acquireFailCount + " retries"); + + } + return; + } + acquireFailCount = 0; + if (!repNode.getMasterStatus().isGroupMaster()) { + return; + } + + /* + * Re-informing when the node is a master is just an + * optimization, it does not impact correctness. Further + * minimize network traffic by trying just the nodes that are + * currently disconnected. + */ + final FeederManager feederManager = repNode.feederManager(); + final Set active = feederManager.activeReplicas(); + active.add(repNode.getNodeName()); + + final Set learners = + new HashSet<>(); + for (final RepNodeImpl rn : repGroup.getAllLearnerMembers()) { + if (!active.contains(rn.getName())) { + learners.add(rn.getSocketAddress()); + } + } + + if (learners.size() == 0) { + return; + } + + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "informing learners:" + + Arrays.toString(learners.toArray()) + + " active: " + + Arrays.toString(active.toArray())); + + pool.execute(new Runnable() { + @Override + public void run() { + learner.reinformLearners(learners, pool); + } + }); + } catch (Exception e) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.SEVERE, + "Unexpected exception:" + e.getMessage()); + } finally { + if (lock.isHeldByCurrentThread()) { + lock.unlock(); + } + } + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/ElectionsConfig.java b/src/com/sleepycat/je/rep/elections/ElectionsConfig.java new file mode 100644 index 0000000..d2b51ca --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/ElectionsConfig.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; + +public interface ElectionsConfig { + + /** + * Gets the replication group name. + * @return group name + */ + public String getGroupName(); + + /** + * Gets the nodes NameIdPair. + * @return NameIdPair + */ + public NameIdPair getNameIdPair(); + + /** + * Gets the ServiceDispatcher. + * @return ServiceDispatcher + */ + public ServiceDispatcher getServiceDispatcher(); + + /** + * Gets the election priority. + * @return election priority + */ + public int getElectionPriority(); + + /** + * Gets the JE log version. + * @return log version + */ + public int getLogVersion(); + + /** + * Gets the RepImpl. + * @return RepImpl + */ + public RepImpl getRepImpl(); + + /** + * Get the RepNode. May be null if the Elections + * object is not used for the initiation of + * an election. + * @return RepNode + */ + public RepNode getRepNode(); +} diff --git a/src/com/sleepycat/je/rep/elections/Learner.java b/src/com/sleepycat/je/rep/elections/Learner.java new file mode 100644 index 0000000..8a4a77e --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Learner.java @@ -0,0 +1,568 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.nio.channels.Channels; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Proposer.WinningProposal; +import com.sleepycat.je.rep.elections.Protocol.MasterQueryResponse; +import com.sleepycat.je.rep.elections.Protocol.Result; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.Utils.FutureTrackingCompService; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.MessageError; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.MessageOp; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StoppableThreadFactory; + +/** + * The Learner agent. It runs in its own dedicated thread, listening for + * messages announcing the results of elections and, in turn, invoking + * Listeners within the process to propagate the result. It also listens for + * requests asking for elections results, and provides static methods for + * requesting those results. + */ +public class Learner extends ElectionAgentThread { + + /* The service dispatcher used by the Learner */ + private final ServiceDispatcher serviceDispatcher; + + /* The listeners interested in Election outcomes. */ + private final List listeners = new LinkedList<>(); + + /* The latest winning proposal and value propagated to Listeners. */ + private Proposal currentProposal = null; + private Value currentValue = null; + + /* Identifies the Learner Service. */ + public static final String SERVICE_NAME = "Learner"; + + /** + * Creates an instance of a Learner which will listen for election results + * to propagate to local listeners, and for requests asking for election + * results. + * + *

        Note that this constructor does not take a repNode as an argument, so + * that it can be used as the basis for the standalone Monitor. + * + * @param protocol the protocol used for message exchange + * @param serviceDispatcher the service dispatcher used by the agent + */ + public Learner(Protocol protocol, + ServiceDispatcher serviceDispatcher) { + this(null, protocol, serviceDispatcher); + } + + public Learner(RepImpl repImpl, + Protocol protocol, + ServiceDispatcher serviceDispatcher) { + super(repImpl, protocol, + "Learner Thread " + protocol.getNameIdPair().getName()); + this.serviceDispatcher = serviceDispatcher; + + /* Add a listener for logging. */ + addListener(new Listener() { + @Override + public void notify(Proposal proposal, Value value) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Learner notified. Proposal:" + + proposal + " Value: " + value); + } + }); + } + + /** + * Adds a Listener to the existing set of listeners, so that it can be + * informed of the outcome of election results. + * + * @param listener the new listener to be added + */ + public void addListener(Listener listener) { + synchronized (listeners) { + if (!listeners.contains(listener)) { + listeners.add(listener); + } + } + } + + /** + * Removes a Listeners from the existing set of listeners. + * + * @param listener the listener to be removed. + */ + void removeListener(Listener listener) { + synchronized (listeners) { + listeners.remove(listener); + } + } + + /** + * Processes a result message + * + * @param proposal the winning proposal + * @param value the winning value + */ + synchronized public void processResult(Proposal proposal, Value value) { + if ((currentProposal != null) && + (proposal.compareTo(currentProposal) < 0)) { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Ignoring obsolete winner: " + proposal); + return; + } + currentProposal = proposal; + currentValue = value; + + /* We have a new winning proposal and value, inform the listeners */ + synchronized (listeners) { + for (Listener listener : listeners) { + try { + listener.notify(currentProposal, currentValue); + } catch (Exception e) { + e.printStackTrace(); + /* Report the exception and keep going. */ + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.SEVERE, + "Exception in Learner Listener: " + e.getMessage()); + continue; + } + } + } + } + + /** + * The main Learner loop. It accepts requests and propagates them to its + * Listeners, if the proposal isn't out of date. + */ + @Override + public void run() { + serviceDispatcher.register(SERVICE_NAME, channelQueue); + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.FINE, "Learner started"); + DataChannel channel = null; + try { + while (true) { + channel = serviceDispatcher.takeChannel + (SERVICE_NAME, + true /* blocking socket */, + protocol.getReadTimeout()); + + if (channel == null) { + /* A soft shutdown. */ + return; + } + + BufferedReader in = null; + PrintWriter out = null; + try { + in = new BufferedReader + (new InputStreamReader( + Channels.newInputStream(channel))); + final String requestLine = in.readLine(); + if (requestLine == null) { + continue; + } + final RequestMessage requestMessage; + try { + requestMessage = protocol.parseRequest(requestLine); + } catch (InvalidMessageException ime) { + protocol.processIME(channel, ime); + continue; + } + + final MessageOp op = requestMessage.getOp(); + LoggerUtils.logMsg(logger, envImpl, formatter, + Level.FINEST, + "learner request: " + op + + " sender: " + + requestMessage.getSenderId()); + if (op == protocol.RESULT) { + Result result = (Result) requestMessage; + processResult(result.getProposal(), result.getValue()); + } else if (op == protocol.MASTER_QUERY) { + processMasterQuery(channel, requestMessage); + } else if (op == protocol.SHUTDOWN) { + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.FINE, + "Learner thread exiting"); + break; + } else { + final String message = + "Malformed request: '" + requestLine + "'" + + " Unexpected op:" + op; + final InvalidMessageException ime = + new InvalidMessageException(MessageError.BAD_FORMAT, + message); + protocol.processIME(channel, ime); + continue; + } + } catch (IOException e) { + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.INFO, + "IO exception: " + e.getMessage()); + } catch (Exception e) { + throw EnvironmentFailureException.unexpectedException(e); + } finally { + Utils.cleanup(logger, envImpl, formatter, channel, in, out); + } + } + } catch (InterruptedException e) { + if (isShutdown()) { + /* Treat it like a shutdown, exit the thread. */ + return; + } + LoggerUtils.logMsg(logger, envImpl, formatter, Level.WARNING, + "Learner unexpected interrupted"); + throw EnvironmentFailureException.unexpectedException(e); + } finally { + serviceDispatcher.cancel(SERVICE_NAME); + cleanup(); + } + } + + /** + * Responds to a query for the current master. A response is only + * generated if the node is currently in the Master or Replica state to + * ensure that the information is reasonably current. + */ + synchronized private void processMasterQuery(DataChannel channel, + RequestMessage requestMessage) + { + if ((currentProposal == null) || (currentValue == null)) { + /* Don't have any election results to share. */ + return; + } + + if ((envImpl == null) || !((RepImpl) envImpl).getState().isActive()) { + /* Knowledge of master is potentially obsolete */ + return; + } + + PrintWriter out = null; + try { + out = new PrintWriter(Channels.newOutputStream(channel), true); + final MasterQueryResponse responseMessage = protocol.new + MasterQueryResponse(currentProposal, currentValue); + + /* + * The request message may be of an earlier version. If so, this + * node transparently read the older version. JE only throws out + * InvalidMessageException when the version of the request message + * is newer than the current protocol. To avoid sending a response + * that the requester cannot understand, we send a response in the + * same version as that of the original request message. + */ + responseMessage.setSendVersion(requestMessage.getSendVersion()); + out.println(responseMessage.wireFormat()); + } finally { + if (out != null) { + out.close(); + } + } + } + + /** + * Queries other learners, in parallel, to determine whether they know of + * an existing master in the group. If one is found, the result is + * processed via {@link #processResult} as though it were an election + * result that was sent to the Learner, resulting in the node transitioning + * to the master or replica state as appropriate. + *

        + * Note that this node itself is not allowed to become a master as a result + * of such a query. It must only do so via an election. + * + * @param learnerSockets the sockets associated with learners at other + * nodes. The nodes are queried on these sockets. + */ + public void queryForMaster(Set learnerSockets) { + if (learnerSockets.size() <= 0) { + return; + } + int threadPoolSize = Math.min(learnerSockets.size(), 10); + final ExecutorService pool = + Executors.newFixedThreadPool + (threadPoolSize, new StoppableThreadFactory("JE Learner", + logger)); + try { + RequestMessage masterQuery = protocol.new MasterQuery(); + FutureTrackingCompService compService = + Utils.broadcastMessage(learnerSockets, + Learner.SERVICE_NAME, + masterQuery, + pool); + /* + * 2 * read timeout below to roughly cover the max time for a + * message exchange. + */ + new Utils.WithFutureExceptionHandler + (compService, 2 * protocol.getReadTimeout(), TimeUnit.MILLISECONDS, + logger, (RepImpl)envImpl, formatter) { + + @Override + protected void processResponse(MessageExchange me) { + + if (me.getResponseMessage().getOp() == + protocol.MASTER_QUERY_RESPONSE){ + MasterQueryResponse accept = + (MasterQueryResponse) me.getResponseMessage(); + MasterValue masterValue = + (MasterValue) accept.getValue(); + if ((masterValue != null) && + masterValue.getNameId(). + equals(protocol.getNameIdPair())) { + + /* + * Should not transition to master as a result + * of a query it risks imposing a hard recovery + * on the replicas. + */ + return; + } + processResult(accept.getProposal(), masterValue); + } + } + + @Override + protected boolean isShutdown() { + return Learner.this.isShutdown(); + } + }.execute(); + } finally { + pool.shutdownNow(); + } + } + + /** + * Returns the socket address for the current master, or null if one + * could not be determined from the available set of learners. This API + * is suitable for tools which need to contact the master for a specific + * service, e.g. to delete a replication node, or to add a monitor. This + * method could be used in principle to establish other types of nodes as + * well via a tool, but that is currently done by the handshake process. + * + * @param protocol the protocol to be used when determining the master + * + * @param learnerSockets the learner to be queried for the master + * @param logger for log messages + * @return the MasterValue identifying the master + * @throws UnknownMasterException if no master could be established + */ + static public MasterValue findMaster + (final Protocol protocol, + Set learnerSockets, + final Logger logger, + final RepImpl repImpl, + final Formatter formatter ) + throws UnknownMasterException { + + if (learnerSockets.size() <= 0) { + return null; + } + int threadPoolSize = Math.min(learnerSockets.size(), 10); + final ExecutorService pool = + Executors.newFixedThreadPool(threadPoolSize); + try { + FutureTrackingCompService compService = + Utils.broadcastMessage(learnerSockets, + Learner.SERVICE_NAME, + protocol.new MasterQuery(), + pool); + + final List results = new LinkedList<>(); + new Utils.WithFutureExceptionHandler + (compService, 2 * protocol.getReadTimeout(), TimeUnit.MILLISECONDS, + logger, repImpl, formatter) { + + @Override + protected void processResponse(MessageExchange me) { + + final ResponseMessage response = me.getResponseMessage(); + + if (response.getOp() == + protocol.MASTER_QUERY_RESPONSE){ + results.add((MasterQueryResponse)response); + } else { + LoggerUtils.logMsg(logger, repImpl, formatter, + Level.WARNING, + "Unexpected MasterQuery response:" + + response.wireFormat()); + } + } + + @Override + protected boolean isShutdown() { + return (repImpl != null) && !repImpl.isValid(); + } + + }.execute(); + + MasterQueryResponse bestResponse = null; + for (MasterQueryResponse result : results) { + if ((bestResponse == null) || + (result.getProposal(). + compareTo(bestResponse.getProposal()) > 0)) { + bestResponse = result; + } + } + if (bestResponse == null) { + throw new UnknownMasterException + ("Could not determine master from helpers at:" + + learnerSockets.toString()); + } + return(MasterValue) bestResponse.getValue(); + } finally { + pool.shutdownNow(); + } + } + + /** + * A method to re-broadcast this Learner's notion of the master. This + * re-broadcast is done primarily to inform an obsolete master that it's no + * longer the current master. Obsolete master situations arise in network + * partition scenarios, where a current master is not able to participate + * in an election, nor is it informed about the results. The re-broadcast + * is the mechanism for rectifying such a situation. When the obsolete + * master receives the new results after the network partition has been + * fixed, it will revert to being a replica. + * + * @param learners the learners that must be informed + * @param threadPool the pool used to dispatch broadcast requests in + * in parallel + */ + public void reinformLearners(Set learners, + ExecutorService threadPool) { + + Proposer.WinningProposal winningProposal; + synchronized (this) { + if ((currentProposal == null) || (currentValue == null)) { + return; + } + winningProposal = + new WinningProposal(currentProposal, currentValue, null); + } + + final RepImpl repImpl = (RepImpl)envImpl; + if (repImpl == null) { + return; + } + informLearners(learners, winningProposal, protocol, threadPool, + logger, repImpl, formatter); + } + + /** + * A utility method used to broadcast the results of an election to + * Listeners. + * + * @param learners that need to be informed. + * @param winningProposal the result that needs to be propagated + * @param protocol to be used for communication + * @param threadPool used to supply threads for the broadcast + */ + public static void informLearners(Set learners, + Proposer.WinningProposal winningProposal, + Protocol protocol, + ExecutorService threadPool, + final Logger logger, + final RepImpl repImpl, + final Formatter formatter) { + + if ((learners == null) || (learners.size() == 0)) { + throw EnvironmentFailureException.unexpectedState + ("There must be at least one learner"); + } + + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Informing " + learners.size() + " learners."); + FutureTrackingCompService compService = + Utils.broadcastMessage(learners, + Learner.SERVICE_NAME, + protocol.new Result + (winningProposal.proposal, + winningProposal.chosenValue), + threadPool); + + /* Consume the futures. */ + + /* Atomic to provide incrementable "final" to nested method. */ + final AtomicInteger count = new AtomicInteger(0); + + new Utils.WithFutureExceptionHandler + (compService, 2 * protocol.getReadTimeout(), TimeUnit.MILLISECONDS, + logger, repImpl, formatter) { + + @Override + protected void processResponse(MessageExchange me) { + /* Do nothing, just consume the futures. */ + count.incrementAndGet(); + } + + @Override + protected void processNullResponse(MessageExchange me) { + if (me.getException() == null) { + count.incrementAndGet(); + } + } + + @Override + protected boolean isShutdown() { + return (repImpl != null) && !repImpl.isValid(); + } + + }.execute(); + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.FINE, + "Informed learners: " + count.get()); + } + + /** + * @see StoppableThread#getLogger + */ + @Override + protected Logger getLogger() { + return logger; + } + + /* + * Notifies the listener that a new proposal has been accepted. Note that + * the value may be unchanged. The proposals may be out of sequence, it's + * up to the listener to deal with it appropriately. + */ + public static interface Listener { + void notify(Proposal proposal, Value value); + } +} diff --git a/src/com/sleepycat/je/rep/elections/MasterValue.java b/src/com/sleepycat/je/rep/elections/MasterValue.java new file mode 100644 index 0000000..4dad2e6 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/MasterValue.java @@ -0,0 +1,125 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.elections; + +import com.sleepycat.je.rep.elections.Protocol.StringValue; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.Protocol.ValueParser; +import com.sleepycat.je.rep.impl.node.NameIdPair; + +/** + * Class represents the identity of a Master as a subclass of a StringValue + * that can be used during the election protocol. + */ +public class MasterValue extends StringValue { + private static final String DELIMITER = "$$$"; + private static final String DELIMITER_REGEXP = "\\$\\$\\$"; + + private final String hostname; + private final int port; + private final NameIdPair nameIdPair; + + public MasterValue(String masterHostname, + int masterPort, + NameIdPair masterNameIdPair) { + super(masterHostname + DELIMITER + + masterPort + DELIMITER + + masterNameIdPair.getName() + DELIMITER + + masterNameIdPair.getId()); + this.hostname = masterHostname; + this.port = masterPort; + this.nameIdPair = masterNameIdPair; + } + + public MasterValue(String valueString) { + super(valueString); + String[] tokens = valueString.split(DELIMITER_REGEXP); + hostname = tokens[0]; + port = Integer.parseInt(tokens[1]); + nameIdPair = new NameIdPair(tokens[2], Integer.parseInt(tokens[3])); + } + + public String getHostName() { + return hostname; + } + + public int getPort() { + return port; + } + + public String getNodeName() { + return nameIdPair.getName(); + } + + public NameIdPair getNameId() { + return nameIdPair; + } + + /** + * Returns a parser that can convert a wire format value into a MasterValue + * + * @return the value parser + */ + static public ValueParser getParser() { + return masterValueParser; + } + + private static final ValueParser masterValueParser = new ValueParser() { + + @Override + public Value parse(String wireFormat) { + return ((wireFormat == null) || "".equals(wireFormat)) ? + null : + new MasterValue(wireFormat); + } + }; + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + + ((hostname == null) ? 0 : hostname.hashCode()); + result = prime * result + nameIdPair.hashCode(); + result = prime * result + port; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof MasterValue)) { + return false; + } + final MasterValue other = (MasterValue) obj; + if (hostname == null) { + if (other.hostname != null) { + return false; + } + } else if (!hostname.equals(other.hostname)) { + return false; + } + if (!nameIdPair.equals(other.nameIdPair)) { + return false; + } + if (port != other.port) { + return false; + } + return true; + } +} diff --git a/src/com/sleepycat/je/rep/elections/Proposer.java b/src/com/sleepycat/je/rep/elections/Proposer.java new file mode 100644 index 0000000..7cf712e --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Proposer.java @@ -0,0 +1,546 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_ARBITER; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_HIGHER_PROPOSAL; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_NO_NON_ZERO_PRIO; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_NO_QUORUM; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE2_HIGHER_PROPOSAL; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE2_NO_QUORUM; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PROMISE_COUNT; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.elections.Protocol.Accept; +import com.sleepycat.je.rep.elections.Protocol.Promise; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.Utils.FutureTrackingCompService; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.MessageOp; +import com.sleepycat.je.rep.impl.TextProtocol.ProtocolError; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Plays the role of a Proposer in the consensus algorithm. + * + * Note that the Proposer also plays the role of a distinguished learner and + * informs all other listeners about the acceptance of a proposal. + */ +public abstract class Proposer { + + /* The elections instance that is running this Learner agent. */ + protected final Elections elections; + + /* The statistics from one call of issueProposal. */ + private final StatGroup statistics; + private final IntStat phase1NoQuorum; + protected final IntStat phase1NoNonZeroPrio; + protected final IntStat phase1Arbiter; + private final IntStat phase1HigherProposal; + private final IntStat phase2NoQuorum; + private final IntStat phase2HigherProposal; + private final IntStat promiseCount; + + private volatile boolean shutdown; + + protected final Logger logger; + protected final Formatter formatter; + + /** + * Initializes a proposer with the set of acceptors identified by the + * sockets on which they accept proposals. + */ + public Proposer(Elections elections, NameIdPair nameIdPair) { + this.elections = elections; + + if (elections.getRepImpl() != null) { + logger = LoggerUtils.getLogger(getClass()); + } else { + logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + } + formatter = new ReplicationFormatter(nameIdPair); + + statistics = new StatGroup(ProposerStatDefinition.GROUP_NAME, + ProposerStatDefinition.GROUP_DESC); + phase1NoQuorum = new IntStat(statistics, PHASE1_NO_QUORUM); + phase1NoNonZeroPrio = new IntStat(statistics, PHASE1_NO_NON_ZERO_PRIO); + phase1Arbiter = new IntStat(statistics, PHASE1_ARBITER); + phase1HigherProposal = new IntStat(statistics, PHASE1_HIGHER_PROPOSAL); + phase2NoQuorum = new IntStat(statistics, PHASE2_NO_QUORUM); + phase2HigherProposal = new IntStat(statistics, PHASE2_HIGHER_PROPOSAL); + promiseCount = new IntStat(statistics, PROMISE_COUNT); + } + + /** + * Shut down the proposer and reclaim its resources. + */ + public void shutdown() { + shutdown = true; + } + + /** + * Returns the current proposer statistics. + */ + public StatGroup getProposerStats() { + return statistics; + } + + /** + * Predicate to determine whether we have a quorum based upon the quorum + * policy. Note that the case of two attendees is special-cased when a + * SIMPLE_MAJORITY is called for, to return success with just one yes vote. + * + * @param quorumPolicy the policy to be used for the quorum. + * @param votes the number of yes votes. + * @return true if the number of votes satisfies the quorum policy. + */ + private boolean haveQuorum(QuorumPolicy quorumPolicy, int votes) { + return elections.getElectionQuorum(). + haveQuorum(quorumPolicy, votes); + } + + /** + * Implements phase 1. It sends our Propose requests to all acceptors and + * tallies up the results. + * + * @param proposal the new unique proposal. + * + * @return the results from running Phase1, or null if phase 1 did not get + * a promise from the majority of acceptors. + * + * @throws HigherNumberedProposal + */ + private Phase1Result phase1(QuorumPolicy quorumPolicy, Proposal proposal) + throws HigherNumberedProposal { + + LoggerUtils.logMsg(logger, elections.getRepImpl(), formatter, + Level.FINE, "Phase 1 proposal: " + proposal); + + /* Broadcast Propose requests. */ + final Set acceptorSockets = + elections.getAcceptorSockets(); + FutureTrackingCompService compService = + Utils.broadcastMessage + (acceptorSockets, + Acceptor.SERVICE_NAME, + elections.getProtocol().new Propose(proposal), + elections.getThreadPool()); + + Phase1Result result = tallyPhase1Results(proposal, compService); + if (haveQuorum(quorumPolicy, result.promisories.size())) { + return result; + } + phase1NoQuorum.increment(); + + return null; + } + + /** + * Tally the results from Phase 1. Terminate the tally if we see any + * rejection messages which indicate our proposal has been obsoleted by a + * more recent one. It also tracks the highest numbered proposal and the + * associated value that was accepted by the Acceptors. + * + * @param currentProposal the proposal for this round + * @param compService the futures resulting from Prepare messages + * + * @return results Summarized results from Phase 1 + * @throws HigherNumberedProposal + */ + private Phase1Result + tallyPhase1Results(Proposal currentProposal, + final FutureTrackingCompService + compService) + throws HigherNumberedProposal { + + final Phase1Result result = new Phase1Result(); + + new Utils.WithFutureExceptionHandler + (compService, 2 * elections.getProtocol().getReadTimeout(), + TimeUnit.MILLISECONDS, logger, elections.getRepImpl(), null) { + + @Override + protected void processResponse(MessageExchange me) { + + final MessageOp op = me.getResponseMessage().getOp(); + if (op == elections.getProtocol().REJECT) { + + /* + * The acceptor has already received a higher numbered + * Prepare request. Abandon this round as an optimization. + */ + compService.cancelFutures(true); + phase1HigherProposal.increment(); + throw new HigherNumberedProposal(me.target); + } else if (op == elections.getProtocol().PROMISE) { + result.promisories.add(me.target); + result.promisoryMessages.add(me); + Promise accept = (Promise) me.getResponseMessage(); + Proposal acceptProposal = accept.getHighestProposal(); + if ((result.acceptProposal == null) || + ((acceptProposal != null) && + (acceptProposal.compareTo(result.acceptProposal) > + 0))) { + result.acceptProposal = acceptProposal; + result.acceptedValue = accept.getAcceptedValue(); + } + } else if (op == elections.getProtocol().PROTOCOL_ERROR){ + ProtocolError protocolError = + (ProtocolError) me.getResponseMessage(); + /* Possible protocol version, group, or config skew. */ + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.WARNING, + "Proposer phase 1 protocol error: " + + protocolError.getMessage()); + return; + } else { + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.WARNING, + "Proposer phase 1 unknown" + + " response: " + op); + return; + } + } + + @Override + protected boolean isShutdown() { + return shutdown; + } + }.execute(); + + if ((result.promisories.size() > 0) && + (result.acceptProposal == null)) { + result.acceptProposal = currentProposal; + } + promiseCount.set(result.promisories.size()); + + return result; + } + + /** + * Runs Phase 2 for the proposal and value. + * + * @param proposal the proposal resulting from Phase1 + * @param phase2Value the value that we would like to be chosen. + * @param promisories from Phase1 + * @return a summary of the result or null if proposal was not accepted + * @throws HigherNumberedProposal + */ + private Phase2Result phase2(QuorumPolicy quorumPolicy, + Proposal proposal, + Value phase2Value, + Set promisories) + throws HigherNumberedProposal { + + LoggerUtils.logMsg + (logger, elections.getRepImpl(), formatter, Level.FINE, + "Phase 2 proposal: " + proposal + " Value: " + phase2Value); + + /* Broadcast Accept requests to promisories. */ + final Accept accept = + elections.getProtocol().new Accept(proposal, phase2Value); + FutureTrackingCompService compService = + Utils.broadcastMessage(promisories, Acceptor.SERVICE_NAME, + accept, elections.getThreadPool()); + Phase2Result result = tallyPhase2Results(quorumPolicy, compService); + + if (haveQuorum(quorumPolicy, result.accepts.size())) { + return result; + } + + phase2NoQuorum.increment(); + return null; + } + + /** + * Tallies the results from Phase 2. + * + * @param compService the futures from phase2 + * @return the Phase2Result + * @throws HigherNumberedProposal if a higher numbered proposal was + * encountered + */ + private Phase2Result + tallyPhase2Results( + final QuorumPolicy quorumPolicy, + final FutureTrackingCompService compService) + throws HigherNumberedProposal { + + final Phase2Result result = new Phase2Result(); + try { + new Utils.WithFutureExceptionHandler + (compService, 2 * elections.getProtocol().getReadTimeout(), + TimeUnit.MILLISECONDS, logger, elections.getRepImpl(), null) { + + @Override + protected void processResponse(MessageExchange me) { + final ResponseMessage responseMessage = + me.getResponseMessage(); + + final Protocol protocol = elections.getProtocol(); + final MessageOp op = responseMessage.getOp(); + if (op == protocol.REJECT) { + compService.cancelFutures(true); + phase2HigherProposal.increment(); + throw new HigherNumberedProposal(me.target); + } else if (op == protocol.ACCEPTED) { + result.accepts.add(me.target); + if (haveQuorum(quorumPolicy, + result.accepts.size())) { + compService.cancelFutures(true); + throw new HaveQuorum(); + } + } else if (op == protocol.PROTOCOL_ERROR) { + final Protocol.ProtocolError errorMessage = + (Protocol.ProtocolError)responseMessage; + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.WARNING, + "Proposer phase 2 " + + "protocol error:" + + errorMessage.getMessage()); + } else { + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.WARNING, + "Proposer Phase 2 " + + "unknown response: " + op); + } + } + + @Override + protected boolean isShutdown() { + return shutdown; + } + }.execute(); + + } catch (HaveQuorum hq) { + /* Terminated phase2 early from an early quorum. */ + } + return result; + } + + /** + * Selects the value to be used during Phase2, which if it succeeds will + * become the "chosen value". + * + * @param exchanges the message exchanges from Phase 1 + * + * @return the candidate value for Phase2, or null, if no suitable value + * was found during phase 1 + */ + protected abstract Value choosePhase2Value(Set exchanges); + + /** + * Creates the next unique Proposal to be used + * + * @return the Proposal + */ + protected abstract Proposal nextProposal(); + + /** + * Runs an election using the consensus algorithm. An election can be used + * either to determine an existing consensus value, or to force an election + * if the current value is not suitable. + * + * @param quorumPolicy the policy used to determine whether we have a + * quorum. + * + * @param retryPredicate the interface which determines whether a retry + * should be attempted. + * + * @return the WinningProposal encapsulating the proposal and chosen value + * + * @throws MaxRetriesException when a winning proposal could not be + * established. + * @throws InterruptedException + */ + public WinningProposal issueProposal(QuorumPolicy quorumPolicy, + RetryPredicate retryPredicate) + throws MaxRetriesException, InterruptedException { + + statistics.clear(); + + while (retryPredicate.retry()) { + try { + final Proposal proposal = nextProposal(); + final Phase1Result result1 = phase1(quorumPolicy, proposal); + if (result1 == null) { + continue; + } + + final Value phase2Value = + choosePhase2Value(result1.promisoryMessages); + if (phase2Value == null) { + continue; + } + + final Phase2Result result2 = + phase2(quorumPolicy, proposal, phase2Value, + result1.promisories); + if (result2 == null) { + continue; + } + + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.INFO, + "Winning proposal: " + proposal + + " Value: " + phase2Value); + return new WinningProposal(proposal, phase2Value, statistics); + } catch (HigherNumberedProposal eip) { + continue; + } + } + throw new MaxRetriesException(statistics); + } + + /* Local classes */ + + /** + * Summarizes Phase 1 results + */ + private static class Phase1Result { + + /* + * Subset of acceptors who have promised not to accept lower numbered + * proposals. + */ + Set promisories = new HashSet<>(); + Set promisoryMessages = + new HashSet<>(); + + /* + * The highest accepted proposal that was received from the + * promisories. It can be null. + */ + Proposal acceptProposal; + + /* + * The value associated with the proposal. It's non-null if + * acceptProposal is not null. + */ + @SuppressWarnings("unused") + Value acceptedValue; + } + + /** + * Summarizes Phase 2 results. + */ + private static class Phase2Result { + + /* Subset of promisories who accepted the current proposal. */ + Set accepts = new HashSet<>(); + } + + /** + * Summarizes the results of the election + */ + public static class WinningProposal { + final Proposal proposal; + final Value chosenValue; + final StatGroup proposerStats; + + public WinningProposal(Proposal proposal, + Value value, + StatGroup proposerStats) { + this.proposal = proposal; + this.chosenValue = value; + this.proposerStats = proposerStats; + } + } + + /** + * Exception to indicate that a more recent election is in progress. + */ + @SuppressWarnings("serial") + private static class HigherNumberedProposal extends RuntimeException { + /* The acceptor that had a more recent proposal. */ + final InetSocketAddress acceptor; + HigherNumberedProposal(InetSocketAddress acceptor) { + this.acceptor = acceptor; + } + + @Override + public String getMessage() { + return "More recent proposal at: " + acceptor.getHostName(); + } + } + + /** + * Exception thrown when a Proposal issue fails because the retry limit has + * been exceeded. + */ + @SuppressWarnings("serial") + public static class MaxRetriesException extends Exception { + final StatGroup proposerStats; + + MaxRetriesException(StatGroup proposerStats) { + this.proposerStats = proposerStats; + } + } + + /** + * Exception to indicate we have an early Quorum. + */ + @SuppressWarnings("serial") + private static class HaveQuorum extends RuntimeException { + HaveQuorum() {} + } + + /** + * The Interface defining a Proposal. + */ + public interface Proposal extends Comparable { + + /** + * The string representation of the proposal. It must not contain + * the Protocol.SEPARATOR character. + * + * @return the String representation of theProposal + */ + String wireFormat(); + } + + public interface ProposalParser { + + /** + * Converts the wire format back into a Proposal + * + * @param wireFormat String representation of a Proposal + * + * @return the de-serialized Proposal + */ + abstract Proposal parse(String wireFormat); + } + + /** + * Interface to determine whether the Proposer should be retry with a new + * Proposal or abandon the effort. + */ + public interface RetryPredicate { + boolean retry() throws InterruptedException; + int retries(); + } +} diff --git a/src/com/sleepycat/je/rep/elections/ProposerStatDefinition.java b/src/com/sleepycat/je/rep/elections/ProposerStatDefinition.java new file mode 100644 index 0000000..bf14bb8 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/ProposerStatDefinition.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for each Proposer statistics. + */ +public class ProposerStatDefinition { + + public static final String GROUP_NAME = "Election Proposer"; + public static final String GROUP_DESC = + "Proposals are the first stage of a replication group election."; + + public static StatDefinition PHASE1_ARBITER = + new StatDefinition + ("phase1Arbiter", + "Number of times Phase 1 ended due to Arbiter " + + "having highest VLSN."); + + public static StatDefinition PHASE1_NO_QUORUM = + new StatDefinition + ("phase1NoQuorum", + "Number of times Phase 1 ended with insufficient votes for a " + + "quorum."); + + public static StatDefinition PHASE1_NO_NON_ZERO_PRIO = + new StatDefinition + ("phase1NoNonZeroPrio", + "Number of times Phase 1 ended due to the absence of " + + "participating electable nodes with non-zero priority"); + + public static StatDefinition PHASE1_HIGHER_PROPOSAL = + new StatDefinition + ("phase1HigherProposal", + "Number of times Phase 1 was terminated because one of the " + + "Acceptor agents already had a higher numbered proposal."); + + public static StatDefinition PHASE2_NO_QUORUM = + new StatDefinition + ("phase2NoQuorum", + "Number of times Phase 2 ended with insufficient votes for a " + + "quorum."); + + public static StatDefinition PHASE2_HIGHER_PROPOSAL = + new StatDefinition + ("phase2HigherProposal", + "Number of times Phase 2 was terminated because one of the " + + "Acceptor agents already had a higher numbered proposal."); + + public static StatDefinition PROMISE_COUNT = + new StatDefinition + ("promiseCount", + "Number of promises made by Acceptors in phase 1."); +} diff --git a/src/com/sleepycat/je/rep/elections/Protocol.java b/src/com/sleepycat/je/rep/elections/Protocol.java new file mode 100644 index 0000000..2fa7be9 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Protocol.java @@ -0,0 +1,760 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.impl.RepParams.ELECTIONS_OPEN_TIMEOUT; +import static com.sleepycat.je.rep.impl.RepParams.ELECTIONS_READ_TIMEOUT; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator.Ranking; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Proposer.ProposalParser; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; + +/** + * Defines the request/response messages used in the implementation of + * elections. + * + * From Proposer to Acceptor: + * Propose -> Promise | Reject + * Accept -> Accepted | Reject + * + * From Proposer initiator to Learners: + * Result -> none + * + * The following exchange is not part of the elections process, but is used by + * the Monitor to query a Learner for the latest election result it's aware of, + * when the Monitor first starts up. It is also used by nodes and utilities + * that are attempting to find the master. + * + * From Monitor to Learner + * MasterQuery -> MasterQueryResponse | None + * + */ +public class Protocol extends TextProtocol { + + /* Protocol version string. Format: . */ + /* It's used to ensure compatibility across versions. */ + private static final String VERSION = "2.0"; + + /* An instance of ProposalParser used to de-serialize proposals */ + private final ProposalParser proposalParser; + + /* An instance of ValueParser used to de-serialize values */ + private final ValueParser valueParser; + + /* Request Operations */ + public final MessageOp PROPOSE; + public final MessageOp ACCEPT; + public final MessageOp RESULT; + public final MessageOp MASTER_QUERY; + public final MessageOp SHUTDOWN; + + /* Response operations */ + public final MessageOp REJECT; + public final MessageOp PROMISE; + public final MessageOp ACCEPTED; + public final MessageOp MASTER_QUERY_RESPONSE; + + /** + * Creates an instance of the Protocol. + * + * @param proposalParser parses a string into a Proposal object. + * @param valueParser parses a string into a Value object. + * @parameter groupName the name of the group running the election process. + * @param nameIdPair a unique identifier for this election participant. + */ + public Protocol(ProposalParser proposalParser, + ValueParser valueParser, + String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + + /* Request operations */ + super(VERSION, groupName, nameIdPair, repImpl, channelFactory); + + PROPOSE = new MessageOp("P", Propose.class); + ACCEPT = new MessageOp("A", Accept.class); + RESULT = new MessageOp("RE", Result.class); + MASTER_QUERY = new MessageOp("MQ", MasterQuery.class); + SHUTDOWN = new MessageOp("X", Shutdown.class ); + + REJECT = new MessageOp("R", Reject.class); + PROMISE = new MessageOp("PR", Promise.class); + ACCEPTED = new MessageOp("AD", Accepted.class); + MASTER_QUERY_RESPONSE = + new MessageOp("MQR", MasterQueryResponse.class); + + initializeMessageOps(new MessageOp[] { + PROPOSE, + ACCEPT, + RESULT, + MASTER_QUERY, + SHUTDOWN, + + REJECT, + PROMISE, + ACCEPTED, + MASTER_QUERY_RESPONSE, + }); + this.proposalParser = proposalParser; + this.valueParser = valueParser; + + setTimeouts(repImpl, ELECTIONS_OPEN_TIMEOUT, ELECTIONS_READ_TIMEOUT); + } + + /** + * Promise response message. It's sent in response to a Propose message. + * + * Note that the "minor" part of the suggestion ranking is always tagged on + * to the end of the promise request payload. Older pre 7.1.3 nodes will + * ignore extra tokens at the end, since they do not know about the minor + * component of the ranking. This node will use it if it's present and + * otherwise use a value of zero for the minor (VLSN) component. + * + * So when comparing rankings across old and new nodes, we are effectively + * comparing a Ranking(VLSN, Long.MIN_VALUE) with a Ranking(DTVLSN, VLSN), + * resulting in suboptimal election results (from a dtvlsn perspective) + * while an upgrade is in progress, that is, it will tend to favor the + * older node. But this inaccuracy will vanish once all nodes have been + * upgraded. + */ + public class Promise extends ResponseMessage { + private Proposal highestProposal = null; + private Value acceptedValue = null; + private Value suggestion = null; + + /** + * The major and minor components of the Ranking represent the DTVLSN + * and the latest VLSN respectively. + */ + private final Ranking suggestionRanking; + private final int priority; + private int logVersion; + private JEVersion jeVersion; + + public Promise(Proposal highestProposal, + Value value, + Value suggestion, + Ranking suggestionRanking, + int priority, + int logVersion, + JEVersion jeVersion) { + this.highestProposal = highestProposal; + this.acceptedValue = value; + this.suggestion = suggestion; + this.suggestionRanking = suggestionRanking ; + this.priority = priority; + this.logVersion = logVersion; + this.jeVersion = jeVersion; + } + + public Promise(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + highestProposal = proposalParser.parse(nextPayloadToken()); + acceptedValue = valueParser.parse(nextPayloadToken()); + suggestion = valueParser.parse(nextPayloadToken()); + String weight = nextPayloadToken(); + long majorRanking = + "".equals(weight) ? + Ranking.UNINITIALIZED.major : + Long.parseLong(weight); + long minorRanking = Ranking.UNINITIALIZED.major; + priority = Integer.parseInt(nextPayloadToken()); + if (getMajorVersionNumber(sendVersion) > 1) { + logVersion = Integer.parseInt(nextPayloadToken()); + jeVersion = new JEVersion(nextPayloadToken()); + if (hasMoreTokens()) { + /* + * The tie breaker is appended to the end by newer versions + * of JE nodes >= version 7.1.3 + */ + minorRanking = Long.parseLong(nextPayloadToken()); + } + } + suggestionRanking = new Ranking(majorRanking, minorRanking); + } + + @Override + public MessageOp getOp() { + return PROMISE; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + getOuterType().hashCode(); + result = prime * result + + ((acceptedValue == null) ? 0 : acceptedValue.hashCode()); + result = prime + * result + + ((highestProposal == null) ? 0 + : highestProposal.hashCode()); + result = prime * result + priority; + result = prime * result + + ((suggestion == null) ? 0 : suggestion.hashCode()); + result = prime * result + suggestionRanking.hashCode(); + + if (getMajorVersionNumber(sendVersion) > 1) { + result += prime* result + logVersion + jeVersion.hashCode(); + } + + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!super.equals(obj)) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + Promise other = (Promise) obj; + if (!getOuterType().equals(other.getOuterType())) { + return false; + } + + if (acceptedValue == null) { + if (other.acceptedValue != null) { + return false; + } + } else if (!acceptedValue.equals(other.acceptedValue)) { + return false; + } + + if (highestProposal == null) { + if (other.highestProposal != null) { + return false; + } + } else if (!highestProposal.equals(other.highestProposal)) { + return false; + } + + if (priority != other.priority) { + return false; + } + + if (getMajorVersionNumber(sendVersion) > 1) { + if (logVersion != other.logVersion) { + return false; + } + + if (jeVersion.compareTo(other.jeVersion) != 0) { + return false; + } + } + + if (suggestion == null) { + if (other.suggestion != null) { + return false; + } + } else if (!suggestion.equals(other.suggestion)) { + return false; + } + + if (!suggestionRanking.equals(other.suggestionRanking)) { + return false; + } + + return true; + } + + @Override + public String wireFormat() { + String line = + wireFormatPrefix() + + SEPARATOR + + ((highestProposal != null) ? + highestProposal.wireFormat() : + "") + + SEPARATOR + + ((acceptedValue != null) ? acceptedValue.wireFormat() : "") + + SEPARATOR + + ((suggestion != null) ? suggestion.wireFormat() : "") + + SEPARATOR + + ((suggestionRanking.major == Long.MIN_VALUE) ? + "" : + Long.toString(suggestionRanking.major)) + + SEPARATOR + + priority; + + if (getMajorVersionNumber(sendVersion) > 1) { + line += SEPARATOR + logVersion + + SEPARATOR + jeVersion.toString() + + SEPARATOR + Long.toString(suggestionRanking.minor); + } + + return line; + } + + Proposal getHighestProposal() { + return highestProposal; + } + + Value getAcceptedValue() { + return acceptedValue; + } + + Value getSuggestion() { + return suggestion; + } + + Ranking getSuggestionRanking() { + return suggestionRanking; + } + + int getPriority() { + return priority; + } + + int getLogVersion() { + return logVersion; + } + + JEVersion getJEVersion() { + return jeVersion; + } + + private Protocol getOuterType() { + return Protocol.this; + } + } + + /** + * Response to a successful Accept message. + */ + public class Accepted extends ResponseMessage { + private final Proposal proposal; + private final Value value; + + Accepted(Proposal proposal, Value value) { + assert(proposal!= null); + assert(value != null); + this.proposal = proposal; + this.value = value; + } + + public Accepted(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + proposal = proposalParser.parse(nextPayloadToken()); + value = valueParser.parse(nextPayloadToken()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + + ((proposal == null) ? 0 : proposal.hashCode()); + result = prime * result + ((value == null) ? 0 : value.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof Accepted)) { + return false; + } + final Accepted other = (Accepted) obj; + if (proposal == null) { + if (other.proposal != null) { + return false; + } + } else if (!proposal.equals(other.proposal)) { + return false; + } + if (value == null) { + if (other.value != null) { + return false; + } + } else if (!value.equals(other.value)) { + return false; + } + return true; + } + + @Override + public MessageOp getOp() { + return ACCEPTED; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + proposal.wireFormat() + + SEPARATOR + value.wireFormat(); + } + + public Value getValue() { + return value; + } + + public Proposal getProposal() { + return proposal; + } + } + + /** + * The response to a Master Query request. It simply repackages the + * Accepted response. + */ + public class MasterQueryResponse extends Accepted { + + MasterQueryResponse(Proposal proposal, Value value) { + super(proposal, value); + } + + public MasterQueryResponse(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + } + @Override + public MessageOp getOp() { + return MASTER_QUERY_RESPONSE; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + } + + /** + * Reject response to a message. + */ + public class Reject extends ResponseMessage { + private final Proposal higherProposal; + + Reject(Proposal higherProposal) { + this.higherProposal = higherProposal; + } + + public Reject(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + higherProposal = proposalParser.parse(nextPayloadToken()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + + ((higherProposal == null) ? 0 : higherProposal.hashCode()); + + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof Reject)) { + return false; + } + final Reject other = (Reject) obj; + if (higherProposal == null) { + if (other.higherProposal != null) { + return false; + } + } else if (!higherProposal.equals(other.higherProposal)) { + return false; + } + return true; + } + + @Override + public MessageOp getOp() { + return REJECT; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + higherProposal.wireFormat(); + } + + Proposal getHigherProposal() { + return higherProposal; + } + } + + /** + * Propose request used in Phase 1 of Paxos + */ + public class Propose extends RequestMessage { + protected final Proposal proposal; + + Propose(Proposal proposal) { + this.proposal = proposal; + } + + public Propose(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + proposal = proposalParser.parse(nextPayloadToken()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + + ((proposal == null) ? 0 : proposal.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof Propose)) { + return false; + } + final Propose other = (Propose) obj; + if (proposal == null) { + if (other.proposal != null) { + return false; + } + } else if (!proposal.equals(other.proposal)) { + return false; + } + return true; + } + + @Override + public MessageOp getOp() { + return PROPOSE; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + proposal.wireFormat(); + } + + Proposal getProposal() { + return proposal; + } + } + + public class Shutdown extends RequestMessage { + + public Shutdown() {} + + public Shutdown(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + } + + @Override + public MessageOp getOp() { + return SHUTDOWN; + } + + @Override + public String wireFormat() { + return wireFormatPrefix(); + } + + } + + /** + * Accept request issued in Phase 2 of paxos. + */ + public class Accept extends Propose { + private final Value value; + + Accept(Proposal proposal, Value value) { + super(proposal); + this.value = value; + } + + public Accept(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + value = valueParser.parse(nextPayloadToken()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((value == null) ? 0 : value.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof Accept)) { + return false; + } + final Accept other = (Accept) obj; + if (value == null) { + if (other.value != null) { + return false; + } + } else if (!value.equals(other.value)) { + return false; + } + return true; + } + + @Override + public MessageOp getOp() { + return ACCEPT; + } + + @Override + public String wireFormat() { + return super.wireFormat() + SEPARATOR + value.wireFormat(); + } + + Value getValue() { + return value; + } + } + + /** + * Used to inform Learners of a "chosen value". + */ + public class Result extends Accept { + + Result(Proposal proposal, Value value) { + super(proposal, value); + } + + public Result(String requestLine, String[] tokens) + throws InvalidMessageException { + super(requestLine, tokens); + } + + @Override + public MessageOp getOp() { + return RESULT; + } + } + + /** + * Used to query the Learner for a current master + */ + public class MasterQuery extends RequestMessage { + + public MasterQuery() {} + + public MasterQuery(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + } + + @Override + public MessageOp getOp() { + return MASTER_QUERY; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix(); + } + + @Override + public String toString() { + return getOp() + " " + getMessagePrefix() + " " + wireFormat(); + } + } + + /* Represents a Value in Paxos. */ + public interface Value extends WireFormatable { + } + + public interface ValueParser { + /** + * Converts the wire format back into a Value + * + * @param wireFormat String representation of a Value + * + * + * @return the de-serialized Value + * + */ + abstract Value parse(String wireFormat); + } + + /** + * A String based value implementation used as the "default" Value + */ + public static class StringValue extends StringFormatable implements Value { + + StringValue() { + super(null); + } + + public StringValue(String s) { + super(s); + } + + @Override + public String toString() { + return "Value:" + s; + } + + public String getString() { + return s; + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/RankingProposer.java b/src/com/sleepycat/je/rep/elections/RankingProposer.java new file mode 100644 index 0000000..1c7c000 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/RankingProposer.java @@ -0,0 +1,283 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.logging.Level; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator.Ranking; +import com.sleepycat.je.rep.elections.Protocol.Promise; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Extends the base proposer to choose a phase 2 value based on a suggestion's + * relative ranking. + */ +public class RankingProposer extends Proposer { + + /** + * If non-zero, use the specified log version as the one that supports + * writing replication data in the previous format, to control whether the + * skipPromiseDueToVersion method should always return false, for testing. + */ + static volatile int testLogVersionReplicatePrevious = 0; + + public RankingProposer(Elections elections, + NameIdPair nameIdPair) { + super(elections, nameIdPair); + } + + /** + * Chooses a Value based on the relative ranking of all Promise responses. + * The one with the highest ranking is chosen. Zero priority responses are + * never chosen. In the case of a tie, priority is used to resolve it. If + * priority is insufficient the socket address is used to order the choice + * so that a consistent result is obtained across the set irrespective of + * the iteration order over the set. + */ + @Override + protected Value choosePhase2Value(Set exchanges) { + Ranking maxRanking = + new Ranking(Long.MIN_VALUE, Long.MIN_VALUE); + int maxPriority = Integer.MIN_VALUE; + String maxTarget = null; + int zeroPrioNodes = 0; + Ranking arbRanking = null; + int nonArbCount = 0; + + /* Check log versions in this group. */ + VersionCalculator calculator = + new VersionCalculator(elections, exchanges); + + Value acceptorValue = null; + for (MessageExchange me : exchanges) { + if (me.getResponseMessage().getOp() != + elections.getProtocol().PROMISE) { + continue; + } + final Promise p = (Promise) me.getResponseMessage(); + if (p.getPriority() == 0) { + zeroPrioNodes++; + continue; + } + + if (calculator.skipPromiseDueToVersion(p.getLogVersion())) { + continue; + } + + /* NULL name/ID pair means value came from arbiter */ + final Value suggestion = p.getSuggestion(); + final boolean isArb = (suggestion instanceof MasterValue) && + ((MasterValue) suggestion).getNameId().equals(NameIdPair.NULL); + + /* + * Ignore arbiter if there are replies from multiple non-arbiters. + * Arbiters should only matter for RF=2, and should be ignored if + * both non-arbiter nodes replied, since they have all of the + * existing data and should be allowed to proceed even if they have + * lost data the arbiter remembers. [#25311] + */ + if (!isArb) { + nonArbCount++; + } else { + arbRanking = p.getSuggestionRanking(); + continue; + } + + final int compareTo = + p.getSuggestionRanking().compareTo(maxRanking); + if (compareTo < 0) { + continue; + } + + /* Use priority as a tie breaker. */ + if (compareTo == 0) { + if (p.getPriority() < maxPriority) { + continue; + } + + /* + * Use socket address to choose in case of a tie, so we + * always have a consistent ordering. + */ + if ((p.getPriority() == maxPriority) && + ((maxTarget != null) && + (me.target.toString().compareTo(maxTarget) <= 0))) { + continue; + } + } + + acceptorValue = p.getSuggestion(); + maxRanking = p.getSuggestionRanking(); + maxPriority = p.getPriority(); + maxTarget = me.target.toString(); + } + + if ((acceptorValue == null) && (zeroPrioNodes > 0)) { + LoggerUtils.logMsg(logger, elections.getRepImpl(), + formatter, Level.INFO, + "No positive election priority node responded."+ + " Zero election priority node count:" + + zeroPrioNodes); + phase1NoNonZeroPrio.increment(); + } else if (acceptorValue != null && arbRanking != null && nonArbCount <= 1) { + /* Check if we have an arbiter response. */ + if (maxRanking.compareTo(arbRanking) < 0) { + phase1Arbiter.increment(); + acceptorValue = null; + } + } + return acceptorValue; + } + + /** + * Returns a proposal number. Note that the proposal numbers must increase + * over time, even across restarts of the proposer process. + * @return a 24 character string representing the proposal number + */ + @Override + public synchronized Proposal nextProposal() { + return proposalGenerator.nextProposal(); + } + + private final TimebasedProposalGenerator proposalGenerator = + new TimebasedProposalGenerator(); + + /* Adds versioning information as a factor for elections. */ + private static class VersionCalculator { + private final Set exchanges; + private final Elections elections; + /* The majority log version in this group. */ + private int majorityVersion = Integer.MIN_VALUE; + /* The lowest log version in this group. */ + private int lowestVersion = Integer.MAX_VALUE; + /* True if there is only one log version in this group. */ + private boolean singleVersion = false; + + public VersionCalculator(Elections elections, + Set exchanges) { + this.exchanges = exchanges; + this.elections = elections; + calculate(); + } + + private void calculate() { + + /* + * Calculate the lowest log version and the total nodes that take + * part in the election, save all the log version information to + * calculate the majority log version. + */ + Map logFormats = new HashMap(); + for (MessageExchange me : exchanges) { + if (me.getResponseMessage().getOp() != + elections.getProtocol().PROMISE) { + continue; + } + + Promise p = (Promise) me.getResponseMessage(); + + if (p.getLogVersion() < lowestVersion) { + lowestVersion = p.getLogVersion(); + } + + if (!logFormats.containsKey(p.getLogVersion())) { + logFormats.put(p.getLogVersion(), 1); + } else { + logFormats.put(p.getLogVersion(), + logFormats.get(p.getLogVersion()) + 1); + } + } + + /* + * If there is only log version in the whole group, return and do + * nothing. + */ + if (logFormats.size() == 1) { + singleVersion = true; + return; + } + + /* + * If the RepNode is null, just return, so that the nodes with the + * smallest log version can always be elected as master. + */ + if (elections.getRepNode() == null) { + return; + } + + /* Calculate the majority log version. */ + int electableNodeCount = + elections.getRepNode().getGroup().getElectableGroupSize(); + for (Map.Entry entry : logFormats.entrySet()) { + if (entry.getValue() > (electableNodeCount / 2)) { + majorityVersion = entry.getKey(); + break; + } + } + } + + /** + * For JE 5 and earlier versions, if there are multiple log versions in + * a replication group, only elect the nodes with the lowest log + * version or the nodes with the majority log version to be the master. + * This behavior is required because, in those JE versions, the master + * can only supply replication data in the current log format, so the + * master must be chosen from nodes running the earlier version during + * an upgrade until a majority of the nodes have been upgraded. This + * restriction no longer applies as of log version 9 in JE 6 -- see + * [#22336]. + * + *

        Returns true if election will ignore the promise because there + * are multiple log versions in the group, all log versions correspond + * to JE 5 and earlier versions, and the log version of a replica + * satisfies one of the following rules: + *

          + *
        1. If there is no majority log version in the group, and log + * version of this replica is not the lowest log version. + *
        2. If there exists a majority log version, and log version of this + * replica is not the lowest log version, nor the majority log + * version. + *
        + */ + boolean skipPromiseDueToVersion(int logVersion) { + if (singleVersion) { + return false; + } + int logVersionReplicatePrevious = testLogVersionReplicatePrevious; + if (logVersionReplicatePrevious == 0) { + logVersionReplicatePrevious = + LogEntryType.LOG_VERSION_REPLICATE_OLDER; + } + if (lowestVersion >= logVersionReplicatePrevious - 1) { + return false; + } + if ((majorityVersion == Integer.MIN_VALUE && + logVersion != lowestVersion) || + (majorityVersion != Integer.MIN_VALUE && + logVersion != lowestVersion && + logVersion != majorityVersion)) { + return true; + } + + return false; + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/TimebasedProposalGenerator.java b/src/com/sleepycat/je/rep/elections/TimebasedProposalGenerator.java new file mode 100644 index 0000000..462c27b --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/TimebasedProposalGenerator.java @@ -0,0 +1,268 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.security.SecureRandom; +import java.util.Enumeration; +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Proposer.ProposalParser; + +/** + * Generates a unique sequence of ascending proposal numbers that is unique + * across all machines. + * + * Each proposal number is built as the catenation of the following components: + * + * ms time (8 bytes) | machineId (16 bytes) | locally unique Id (4 bytes) + * + * The ms time supplies the increasing number and the IP address is a number + * unique across machines. + * + * The machineId is generated as described below. + * + * The locally unique Id is used to allow for multiple unique proposal + * generators in a single process. + */ +public class TimebasedProposalGenerator { + + /* + * A number that is unique for all instances of the TimeBasedGenerator on + * this machine. + */ + private final int locallyUniqueId; + private static final AtomicInteger uniqueIdGenerator = new AtomicInteger(1); + + /* + * Tracks the time (in ms) used to generate the previous proposal + * preventing the creation of duplicate proposals. Synchronize on this + * instance when accessing this field. + */ + private long prevProposalTime = System.currentTimeMillis(); + + /* + * A unique ID for this JVM, using a hex representation of the IP address + * XOR'ed with a random value. If the IP address cannot be determined, + * a secure random number is generated and used instead. The risk of + * collision is very low since the number of machines in a replication + * group is typically small, in the 10s at most. + */ + private static final String machineId; + + /* Width of each field in the Proposal number in hex characters. */ + final static int TIME_WIDTH = 16; + + /* Allow for 16 byte ipv6 addresses. */ + final static int ADDRESS_WIDTH =32; + final static int UID_WIDTH = 8; + + /* + * Initialize machineId, do it just once to minimize latencies in the face + * of misbehaving networks that slow down calls to getLocalHost() + */ + static { + + InetAddress localHost; + try { + localHost = java.net.InetAddress.getLocalHost(); + } catch (UnknownHostException e) { + /* + * Likely a misconfigured machine if it could not determine + * localhost. + */ + localHost = null; + } + byte[] localAddress = null; + if (localHost != null) { + localAddress = localHost.getAddress(); + + if (localHost.isLoopbackAddress()) { + /* Linux platforms return a loopback address, examine the + * interfaces individually for a suitable address. + */ + localAddress = null; + try { + for (Enumeration interfaces = + NetworkInterface.getNetworkInterfaces(); + interfaces.hasMoreElements();) { + for (Enumeration addresses = + interfaces.nextElement().getInetAddresses(); + addresses.hasMoreElements();) { + InetAddress ia = addresses.nextElement(); + if (! (ia.isLoopbackAddress() || + ia.isAnyLocalAddress() || + ia.isMulticastAddress())) { + /* Found one, any one of these will do. */ + localAddress = ia.getAddress(); + break; + } + } + } + } catch (SocketException e) { + /* Could not get the network interfaces, give up */ + } + } + } + + if (localAddress != null) { + /* + * Convert the address to a positive integer, XOR it with a + * random value of the right size, and format in hex + */ + final BigInteger addrVal = new BigInteger(1, localAddress); + final BigInteger randVal = + new BigInteger(ADDRESS_WIDTH * 4, new SecureRandom()); + machineId = String.format("%0" + ADDRESS_WIDTH + "x", + addrVal.xor(randVal)); + } else { + /* + * If the localAddress is null, this host is likely disconnected, + * or localHost is misconfigured, fall back to using just a secure + * random number. + */ + final BigInteger randVal = + new BigInteger(ADDRESS_WIDTH * 4, new SecureRandom()); + machineId = String.format("%0" + ADDRESS_WIDTH + "x", randVal); + } + } + + /** + * Creates an instance with an application-specified locally (machine wide) + * unique id, e.g. a port number, or a combination of a pid and some other + * number. + * + * @param locallyUniqueId the machine wide unique id + */ + TimebasedProposalGenerator(int locallyUniqueId) { + this.locallyUniqueId = locallyUniqueId; + } + + /** + * Constructor defaulting the unique id so it's merely unique within the + * process. + */ + public TimebasedProposalGenerator() { + this(uniqueIdGenerator.getAndIncrement()); + } + + /** + * Returns the next Proposal greater than all previous proposals returned + * on this machine. + * + * @return the next unique proposal + */ + public Proposal nextProposal() { + long proposalTime = System.currentTimeMillis(); + synchronized (this) { + if (proposalTime <= prevProposalTime) { + /* Proposals are moving faster than the clock. */ + proposalTime = ++prevProposalTime; + } + prevProposalTime = proposalTime; + } + return new StringProposal(String.format("%016x%s%08x", proposalTime, + machineId, locallyUniqueId)); + } + + /** + * Returns the parser used to convert wire representations into Proposal + * instances. + * + * @return a ProposalParser + */ + public static ProposalParser getParser() { + return StringProposal.getParser(); + } + + /** + * Implements the Proposal interface for a string based proposal. The + * string is a hex representation of the Proposal. + */ + private static class StringProposal implements Proposal { + private final String proposal; + + /* The canonical proposal parser. */ + private static ProposalParser theParser = new ProposalParser() { + @Override + public Proposal parse(String wireFormat) { + return ((wireFormat == null) || ("".equals(wireFormat))) ? + null : + new StringProposal(wireFormat); + } + }; + + StringProposal(String proposal) { + assert (proposal != null); + this.proposal = proposal; + } + + @Override + public String wireFormat() { + return proposal; + } + + @Override + public int compareTo(Proposal otherProposal) { + return this.proposal.compareTo + (((StringProposal) otherProposal).proposal); + } + + @Override + public String toString() { + return "Proposal(" + + proposal.substring(0, TIME_WIDTH) + + ":" + + proposal.substring(TIME_WIDTH, TIME_WIDTH + ADDRESS_WIDTH) + + ":" + proposal.substring(TIME_WIDTH + ADDRESS_WIDTH) + + ")"; + } + + private static ProposalParser getParser() { + return theParser; + } + + @Override + public int hashCode() { + return ((proposal == null) ? 0 : proposal.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StringProposal)) { + return false; + } + final StringProposal other = (StringProposal) obj; + if (proposal == null) { + if (other.proposal != null) { + return false; + } + } else if (!proposal.equals(other.proposal)) { + return false; + } + return true; + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/Utils.java b/src/com/sleepycat/je/rep/elections/Utils.java new file mode 100644 index 0000000..27cbe88 --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/Utils.java @@ -0,0 +1,436 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.PrintWriter; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.utilint.LoggerUtils; + +public class Utils { + + /** + * Cleans up the socket and its related streams after a request/response + * cycle. + * + * @param channel the channel to be closed + * @param in the request stream to be closed + * @param out the response stream to be closed + */ + static public void cleanup(Logger logger, + EnvironmentImpl envImpl, + Formatter formatter, + DataChannel channel, + BufferedReader in, + PrintWriter out) { + if (in != null) { + try { + in.close(); + } catch (IOException e) { + /* Ignore it, it's only cleanup. */ + } + } + if (out != null) { + out.close(); + } + if (channel != null) { + try { + channel.close(); + } catch (IOException e) { + /* Log it and continue. */ + LoggerUtils.logMsg + (logger, envImpl, formatter, Level.FINE, + "Channel exception on close: " + e.getMessage()); + } + } + } + + /** + * @hidden + * Utility to broadcast a request to set of targets. + * + * @param targets of the broadcast + * @param requestMessage to be broadcast + * @param threadPool used to issue message in parallel + * + * @return the CompletionService representing the futures generated by the + * broadcast + */ + public static FutureTrackingCompService + broadcastMessage(Set targets, + String serviceName, + RequestMessage requestMessage, + ExecutorService threadPool) { + + final FutureTrackingCompService compService = + new FutureTrackingCompService<>(threadPool); + + for (InetSocketAddress socketAddress : targets) { + MessageExchange me = requestMessage.getProtocol() + .new MessageExchange(socketAddress, serviceName, requestMessage); + try { + compService.submit(me, me); + } catch (RejectedExecutionException ree) { + if (threadPool.isTerminated()) { + /* + * The thread pool has been shutdown asynchronously as + * part of a general elections shutdown. Discard submitted + * and running tasks. + */ + compService.cancelFutures(true); + return compService; + } + + /* + * Unexpected, rethrow so it can be reported at a higher + * level. + */ + throw ree; + } + } + return compService; + } + + /** + * Utility to wait for completion of futures in unit tests + * + * @param compService the futures to wait for + * @param logger used to report any error messages + */ + public static void checkFutures( + FutureTrackingCompService compService, + long futureTimeout, + TimeUnit unit, + Logger logger, + final RepImpl envImpl, + Formatter formatter) { + + new WithFutureExceptionHandler + (compService, futureTimeout, unit, logger, envImpl, formatter) { + + @Override + protected void processResponse (MessageExchange result) { + /* Do nothing, merely waiting for a response */ + } + + @Override + protected boolean isShutdown() { + return (envImpl != null) && !envImpl.isValid(); + } + }.execute(); + } + + /** + * @hidden + * + * A utility wrapper to handle all exceptions from futures in a consistent + * way. The above method illustrates its intended usage pattern + */ + public static abstract + class WithFutureExceptionHandler { + + private final FutureTrackingCompService completionService; + private final long completionTimeout; + private final TimeUnit unit; + + private final Logger logger; + private final RepImpl envImpl; + private final Formatter formatter; + + /** + * Generate a handler for processing future results. + * + * @param compService the CompletionService representing the futures + * + * @param completionTimeout the timeout indicating how long to wait for + * all the results. If running the tasks involves I/O, especially + * network I/O, the timeout should be sufficient to ensure that it + * allows for the associated latency. The timeout assumes that all the + * tasks are run in parallel, so it represents the max estimated task + * completion time associated with the tasks in the set. + * + * @param unit the units associated with the above timeout + */ + public WithFutureExceptionHandler( + FutureTrackingCompService compService, + long completionTimeout, + TimeUnit unit, + Logger logger, + RepImpl envImpl, + Formatter formatter) { + + super(); + + this.completionService = compService; + this.completionTimeout = completionTimeout; + this.unit = unit; + this.logger = logger; + this.envImpl = envImpl; + this.formatter = formatter; + } + + /** + * The method represents the result processing code being wrapped upon + * a success response message being received. + * + * @param result the non null result, with a non null response message + */ + protected abstract void processResponse(T result); + + /** + * The counterpart to processResponse. It's invoked when there was no + * response to a message. The exception, if present, details the reason + * for the failure; some protocols may choose not to require a + * response, that is, both the response and result.exception could be + * null. The default method simply logs the event. + * + * Note that Timeouts don't come down this path; they result in tasks + * being cancelled with the timeout being logged. + * + * @param result the non null result, with a null response message + */ + void processNullResponse(T result) { + LoggerUtils.logMsg(logger, envImpl, + formatter, Level.FINE, + "No response from: " + result.target + + " request" + result.getRequestMessage() + + " reason: " + result.exception); + return; + } + + /** + * Determines whether the initiating operation was shutdown. + * + * @return true if the futures should not be processed but should be + * cancelled instead. + */ + protected abstract boolean isShutdown(); + + /** + * Processes futures in the order in which they complete, as + * determined by the completion service, to minimize unnecessary + * waiting. + */ + public final void execute() { + final long limitTimeMs = + System.currentTimeMillis() + unit.toMillis(completionTimeout); + try { + for (int count = completionService.getFutures().size(); + count > 0; count--) { + final long pollTimeoutMs = + limitTimeMs - System.currentTimeMillis(); + if (pollTimeoutMs <= 0) { + /* Timed out. */ + LoggerUtils. + logMsg(logger, envImpl, formatter, Level.INFO, + "Election messages timed out after " + + unit.toMillis(completionTimeout) + "ms."); + return; + } + + /* Wait for the next task that is ready. */ + final Future f = + completionService.poll(pollTimeoutMs, + TimeUnit.MILLISECONDS); + + if (f == null) { + /* Timed out. */ + LoggerUtils. + logMsg(logger, envImpl, formatter, Level.INFO, + "Election messages timed out after " + + unit.toMillis(completionTimeout) + "ms."); + return; + } + + if (isShutdown()) { + LoggerUtils. + logMsg(logger, envImpl, formatter, Level.INFO, + "Election messages terminated." + + " Environment being shutdown." ); + /* Simply exit. */ + return; + } + + if (f.isCancelled()) { + continue; + } + + final long futureTimeoutMs = + limitTimeMs - System.currentTimeMillis(); + if (futureTimeoutMs <= 0) { + return; + } + + assert(f.isDone()); + executeInternal(f); + } + + } catch (InterruptedException e) { + + if (envImpl != null) { + final RepNode rn = envImpl.getRepNode(); + if ((rn != null) && rn.isShutdown()) { + + /* + * Interrupt for shutdown, it's likely part of a + * "hard" stoppable thread shutdown, ignore it. + */ + LoggerUtils. + logMsg(logger, envImpl, formatter, Level.INFO, + "Election operation interrupted." + + " Environment being shutdown." ); + /* Simply exit. */ + return; + } + } + + throw EnvironmentFailureException.unexpectedException(e); + } finally { + /* + * Clean up all tasks, in case we exited before they were all + * done. + */ + completionService.cancelFutures(true); + } + } + + /** + * The method represents the future process code being wrapped. It + * processes generic future-specific exceptions, where the impact of + * the fault can be localized to the future. + * + * @param future the "done" future + */ + private final void executeInternal(Future future) + throws InterruptedException { + + try { + /* We don't expect to wait, since the future is "done". */ + assert future.isDone(); + T result = future.get(); + if (result.getResponseMessage() == null) { + processNullResponse(result); + } else { + processResponse(result); + } + } catch (CancellationException ce) { + /* Ignore. */ + } catch (ExecutionException e) { + /* Get the true cause, unwrap the intermediate wrappers */ + Exception cause = (Exception)e.getCause(); + while (cause instanceof RuntimeException) { + Throwable t = ((RuntimeException)cause).getCause(); + if ((t != null) && (t instanceof Exception)) { + cause = (Exception)t; + } else { + break; + } + } + if ((cause instanceof ConnectException) || + (cause instanceof SocketException) || + (cause instanceof SocketTimeoutException) || + (cause instanceof ServiceConnectFailedException)){ + // Network exceptions are expected, log it and keep moving + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINE, + "Election connection failure " + + cause.getMessage()); + return; + } + /* Unanticipated exception, higher level will handle it */ + throw EnvironmentFailureException.unexpectedException(e); + } + } + } + + /** + * @hidden + * + * A subclass of ExecutorCompletionService that tracks submitted tasks, so + * that futures associated with tasks can be cancelled in a modular way, + * without the need for maintaining distinct state about the futures that + * were created. + */ + public static class FutureTrackingCompService extends + ExecutorCompletionService { + + /* The list of futures resulting from submitted tasks. */ + private final List> futures = new LinkedList<>(); + + public FutureTrackingCompService(Executor executor) { + super(executor); + } + + public List> getFutures() { + return futures; + } + + /** + * Wrapper around submit() method to track futures resulting from + * submitted tasks. + */ + @Override + public Future submit(Runnable task, V result) { + final Future f = super.submit(task, result); + futures.add(f); + return f; + } + + /** + * Wrapper around submit() method to track futures resulting from + * submitted tasks. + */ + @Override + public Future submit(Callable task) { + final Future f = super.submit(task); + futures.add(f); + return f; + } + + public void cancelFutures(boolean mayInterruptIfRunning) { + for (Future f : futures) { + if (!f.isCancelled()) { + f.cancel(mayInterruptIfRunning); + } + } + } + } +} diff --git a/src/com/sleepycat/je/rep/elections/package-info.java b/src/com/sleepycat/je/rep/elections/package-info.java new file mode 100644 index 0000000..124eaac --- /dev/null +++ b/src/com/sleepycat/je/rep/elections/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Elections of the master node among the nodes in a group. + */ +package com.sleepycat.je.rep.elections; diff --git a/src/com/sleepycat/je/rep/impl/BinaryNodeStateProtocol.java b/src/com/sleepycat/je/rep/impl/BinaryNodeStateProtocol.java new file mode 100644 index 0000000..c0a297f --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/BinaryNodeStateProtocol.java @@ -0,0 +1,269 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.BinaryProtocol; + +/** + * Defines the protocol used in support of node state querying. + * + * Because this protocol has to transfer byte array between two nodes, so + * instead of using the former NodeStateProtocol, we introduce this new + * protocol which inherits from BinaryProtocol. + * + * Note: once we support active version update, we can use one protocol only. + * + * The message request sequence: + * NODE_STATE_REQ -> NODE_STATE_RESP + */ +public class BinaryNodeStateProtocol extends BinaryProtocol { + + public static final int VERSION = 1; + + /* The messages defined by this class. */ + public final static MessageOp BIN_NODE_STATE_REQ = + new MessageOp((short) 1, BinaryNodeStateRequest.class); + public final static MessageOp BIN_NODE_STATE_RESP = + new MessageOp((short) 2, BinaryNodeStateResponse.class); + + public BinaryNodeStateProtocol(NameIdPair nameIdPair, + RepImpl repImpl) { + + super(nameIdPair, VERSION, VERSION, repImpl); + + this.initializeMessageOps(new MessageOp[] { + BIN_NODE_STATE_REQ, + BIN_NODE_STATE_RESP + }); + } + + /* Message request the state of the specified node. */ + public class BinaryNodeStateRequest extends SimpleMessage { + /* The name of the node whose status is requested. */ + private final String nodeName; + /* The name of the group the node belongs to. */ + private final String groupName; + + public BinaryNodeStateRequest(String nodeName, String groupName) { + super(); + this.nodeName = nodeName; + this.groupName = groupName; + } + + public BinaryNodeStateRequest(ByteBuffer buffer) { + super(); + nodeName = getString(buffer); + groupName = getString(buffer); + } + + public String getNodeName() { + return nodeName; + } + + public String getGroupName() { + return groupName; + } + + @Override + public MessageOp getOp() { + return BIN_NODE_STATE_REQ; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(nodeName, groupName); + } + } + + /* Message return state of specified node. */ + public class BinaryNodeStateResponse extends SimpleMessage { + /* The name of the node which requests the status. */ + private final String nodeName; + /* The name of the group which this node joins. */ + private final String groupName; + /* The name of the current master in the group. */ + private final String masterName; + /* The JEVersion that this node runs. */ + private final JEVersion jeVersion; + /* Time when this node joins the group. */ + private final long joinTime; + /* The current state of this node. */ + private final State nodeState; + /* The last commit VLSN on this node. */ + private final long commitVLSN; + /* The last master commit VLSN known by this node. */ + private final long masterCommitVLSN; + /* The number of running feeders on this node. */ + private final int activeFeeders; + /* The log version of this node. */ + private final int logVersion; + /* The state of the JE application, specified by users themselves. */ + private final byte[] appState; + + /* + * The system load of the node, it is serialized and deserialized in + * String format. + */ + private final double systemLoad; + + public BinaryNodeStateResponse(String nodeName, + String groupName, + String masterName, + JEVersion jeVersion, + long joinTime, + State nodeState, + long commitVLSN, + long masterCommitVLSN, + int activeFeeders, + int logVersion, + byte[] appState, + double systemLoad) { + super(); + this.nodeName = nodeName; + this.groupName = groupName; + /* + * Master could be unknown, but must be non-null for + * serialization. + */ + this.masterName = (masterName == null) ? "" : masterName; + this.jeVersion = jeVersion; + this.joinTime = joinTime; + this.nodeState = nodeState; + this.commitVLSN = commitVLSN; + this.masterCommitVLSN = masterCommitVLSN; + this.activeFeeders = activeFeeders; + this.logVersion = logVersion; + this.appState = appState; + this.systemLoad = systemLoad; + } + + public BinaryNodeStateResponse(ByteBuffer buffer) { + super(); + nodeName = getString(buffer); + groupName = getString(buffer); + masterName = getString(buffer); + jeVersion = new JEVersion(getString(buffer)); + joinTime = LogUtils.readLong(buffer); + nodeState = getEnum(State.class, buffer); + commitVLSN = LogUtils.readLong(buffer); + masterCommitVLSN = LogUtils.readLong(buffer); + activeFeeders = LogUtils.readInt(buffer); + logVersion = LogUtils.readInt(buffer); + appState = getByteArray(buffer); + systemLoad = getDouble(buffer); + } + + public String getNodeName() { + return nodeName; + } + + public String getGroupName() { + return groupName; + } + + public String getMasterName() { + return masterName; + } + + public JEVersion getJEVersion() { + return jeVersion; + } + + public long getJoinTime() { + return joinTime; + } + + public State getNodeState() { + return nodeState; + } + + public long getCommitVLSN() { + return commitVLSN; + } + + public long getKnownMasterCommitVLSN() { + return masterCommitVLSN; + } + + public int getActiveFeeders() { + return activeFeeders; + } + + public int getLogVersion() { + return logVersion; + } + + public byte[] getAppState() { + if (appState.length == 0) { + return null; + } + + return appState; + } + + public double getSystemLoad() { + return systemLoad; + } + + @Override + public MessageOp getOp() { + return BIN_NODE_STATE_RESP; + } + + @Override + public ByteBuffer wireFormat() { + + /* + * If the appState is null, make a new zero byte array, since + * writing null byte array would cause a NPE, and a meaningful + * application state shouldn't be a zero length byte array. + */ + byte[] realAppState = (appState == null ? new byte[0] : appState); + return wireFormat(nodeName, + groupName, + masterName, + jeVersion.toString(), + joinTime, + nodeState, + commitVLSN, + masterCommitVLSN, + activeFeeders, + logVersion, + realAppState, + systemLoad); + } + + /* Convert the response to the NodeState. */ + public NodeState convertToNodeState() { + return new NodeState(nodeName, + groupName, + nodeState, + masterName, + jeVersion, + joinTime, + commitVLSN, + masterCommitVLSN, + activeFeeders, + logVersion, + getAppState(), + systemLoad); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/BinaryNodeStateService.java b/src/com/sleepycat/je/rep/impl/BinaryNodeStateService.java new file mode 100644 index 0000000..71e2a43 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/BinaryNodeStateService.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateRequest; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateResponse; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; + +/** + * The service registered by a RepNode to answer the state request. + * + * To support the new BinaryStateProtocol, we introduce this new + * BinaryNodeStateService, it's used by "Ping" command. + * + * Note: we can merge the two NodeState services together once we support + * acitve version updates. + */ +public class BinaryNodeStateService extends ExecutingService { + + private final RepNode repNode; + private final ServiceDispatcher dispatcher; + private final Logger logger; + + /* Identifies the Node State querying Service. */ + public static final String SERVICE_NAME = "BinaryNodeState"; + + public BinaryNodeStateService(ServiceDispatcher dispatcher, + RepNode repNode) { + super(SERVICE_NAME, dispatcher); + this.repNode = repNode; + this.dispatcher = dispatcher; + this.logger = LoggerUtils.getLogger(getClass()); + + dispatcher.register(this); + } + + public void shutdown() { + dispatcher.cancel(SERVICE_NAME); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new NodeStateServiceRunnable(dataChannel); + } + + class NodeStateServiceRunnable implements Runnable { + private DataChannel channel; + + NodeStateServiceRunnable(DataChannel channel) { + this.channel = channel; + } + + /* Create the NodeState for the request. */ + private BinaryNodeStateResponse createResponse + (BinaryNodeStateProtocol protocol) { + + long joinTime = repNode.getMonitorEventManager().getJoinTime(); + long txnEndVLSN = (repNode.getCurrentTxnEndVLSN() == null ? + 0L : repNode.getCurrentTxnEndVLSN().getSequence()); + long masterTxnEndVLSN = repNode.replica().getMasterTxnEndVLSN(); + int activeFeeders = repNode.feederManager().activeReplicaCount(); + + return protocol.new BinaryNodeStateResponse + (repNode.getNodeName(), repNode.getGroup().getName(), + repNode.getMasterName(), JEVersion.CURRENT_VERSION, joinTime, + repNode.getRepImpl().getState(), txnEndVLSN, masterTxnEndVLSN, + activeFeeders, LogEntryType.LOG_VERSION, + repNode.getAppState(), JVMSystemUtils.getSystemLoad()); + } + + @Override + public void run() { + BinaryNodeStateProtocol protocol = null; + + try { + protocol = new BinaryNodeStateProtocol(NameIdPair.NOCHECK, + repNode.getRepImpl()); + try { + channel.getSocketChannel().configureBlocking(true); + + BinaryNodeStateRequest msg = + protocol.read(channel, BinaryNodeStateRequest.class); + + /* + * Response a protocol error if the group name doesn't + * match. + */ + final String groupName = msg.getGroupName(); + if (!repNode.getGroup().getName().equals(groupName) || + !repNode.getNodeName().equals(msg.getNodeName())) { + throw new ProtocolException("Sending the request to" + + " a wrong group or a wrong node."); + } + + /* Write the response the requested node. */ + BinaryNodeStateResponse response = + createResponse(protocol); + protocol.write(response, channel); + LoggerUtils.finest(logger, repNode.getRepImpl(), + "Deal with a node state request successfully."); + } catch (ProtocolException e) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Get a ProtocolException with message: " + + LoggerUtils.exceptionTypeAndMsg(e) + + " while dealing with a node state request."); + protocol.write + (protocol.new ProtocolError(e.getMessage()), channel); + } catch (Exception e) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Unexpected exception: " + + LoggerUtils.exceptionTypeAndMsg(e)); + protocol.write + (protocol.new ProtocolError(e.getMessage()), channel); + } finally { + if (channel.isOpen()) { + channel.close(); + } + } + } catch (IOException e) { + + /* + * Channel has already been closed, or the close itself + * failed. + */ + } + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/EnumConfigParam.java b/src/com/sleepycat/je/rep/impl/EnumConfigParam.java new file mode 100644 index 0000000..66d10bb --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/EnumConfigParam.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.config.ConfigParam; + +/** + * A JE configuration parameter with an enumerated value + */ +public class EnumConfigParam> extends ConfigParam { + + /* The class denoting the enum type */ + private final Class enumClass; + + public EnumConfigParam(String configName, + Enum defaultValue, + boolean mutable, + boolean forReplication, + Class enumClass) { + super(configName, defaultValue.name(), mutable, forReplication); + this.enumClass = enumClass; + } + + /** + * Returns the enumerator associated with the name + * + * @param enumName the string naming the enumerator + * + * @return the enumerator + */ + public T getEnumerator(String enumName) { + return Enum.valueOf(enumClass, enumName); + } + + @Override + public void validateValue(String value) + throws IllegalArgumentException { + + /* + * If validateValue() is called by through the ConfigParam + * constructor, enumVal is not assigned yet, so we guard against + * that happening. + */ + if (enumClass != null) { + Enum.valueOf(enumClass, value); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/GroupService.java b/src/com/sleepycat/je/rep/impl/GroupService.java new file mode 100644 index 0000000..e4e600c --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/GroupService.java @@ -0,0 +1,359 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.channels.Channels; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.StringTokenizer; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MasterTransferFailureException; +import com.sleepycat.je.rep.MemberActiveException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.ReplicaStateException; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepGroupProtocol.DeleteMember; +import com.sleepycat.je.rep.impl.RepGroupProtocol.EnsureNode; +import com.sleepycat.je.rep.impl.RepGroupProtocol.FailReason; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupRequest; +import com.sleepycat.je.rep.impl.RepGroupProtocol.RemoveMember; +import com.sleepycat.je.rep.impl.RepGroupProtocol.TransferMaster; +import com.sleepycat.je.rep.impl.RepGroupProtocol.UpdateAddress; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingRunnable; +import com.sleepycat.je.utilint.LoggerUtils; + +public class GroupService extends ExecutingService { + + /* The replication node */ + final RepNode repNode; + final RepGroupProtocol protocol; + + /** + * List of channels for in-flight requests. + * The channel is in this collection while the request is being processed, + * and must be removed before sending any response. + * + * @see #cancel + * @see #unregisterChannel + */ + private final Collection activeChannels = + new ArrayList(); + + private final Logger logger; + + /* Identifies the Group Service. */ + public static final String SERVICE_NAME = "Group"; + + public GroupService(ServiceDispatcher dispatcher, RepNode repNode) { + super(SERVICE_NAME, dispatcher); + this.repNode = repNode; + + final DbConfigManager configManager = + repNode.getRepImpl().getConfigManager(); + String groupName = configManager.get(GROUP_NAME); + protocol = + new RepGroupProtocol(groupName, + repNode.getNameIdPair(), + repNode.getRepImpl(), + repNode.getRepImpl().getChannelFactory()); + logger = LoggerUtils.getLogger(getClass()); + } + + @Override + protected void cancel() { + Collection channels; + synchronized (this) { + channels = new ArrayList(activeChannels); + activeChannels.clear(); + } + if (!channels.isEmpty()) { + LoggerUtils.warning + (logger, repNode.getRepImpl(), + "In-flight GroupService request(s) canceled: node shutdown"); + } + for (DataChannel channel : channels) { + try { + PrintWriter out = + new PrintWriter(Channels.newOutputStream(channel), true); + ResponseMessage rm = + protocol.new Fail(FailReason.DEFAULT, "shutting down"); + out.println(rm.wireFormat()); + } finally { + if (channel.isOpen()) { + try { + channel.close(); + } + catch (IOException e) { + LoggerUtils.warning + (logger, repNode.getRepImpl(), + "IO error on channel close: " + e.getMessage()); + } + } + } + } + } + + /* Dynamically invoked process methods */ + + /** + * Wraps the replication group as currently cached on this node in + * a Response message and returns it. + */ + @SuppressWarnings("unused") + public ResponseMessage process(GroupRequest groupRequest) { + RepGroupImpl group = repNode.getGroup(); + if (group == null) { + return protocol.new Fail(groupRequest, FailReason.DEFAULT, + "no group info yet"); + } + return protocol.new GroupResponse(groupRequest, group); + } + + /** + * Ensures that the Monitor node, as described in the request, is a member + * of the group. + * + * @param ensureNode the request message describing the monitor node + * + * @return EnsureOK message if the monitor node is already part of the rep + * group, or was just made a part of the replication group. It returns a + * Fail message if it could not be made part of the group. The message + * associated with the response provides further details. + */ + public ResponseMessage process(EnsureNode ensureNode) { + RepNodeImpl node = ensureNode.getNode(); + try { + ensureMaster(); + repNode.getRepGroupDB().ensureMember(node); + RepNodeImpl enode = + repNode.getGroup().getMember(node.getName()); + return protocol.new EnsureOK(ensureNode, enode.getNameIdPair()); + } catch (ReplicaStateException e) { + return protocol.new Fail(ensureNode, FailReason.IS_REPLICA, + e.getMessage()); + } catch (DatabaseException e) { + return protocol.new Fail(ensureNode, FailReason.DEFAULT, + e.getMessage()); + } + } + + /** + * Removes a current member from the group. + * + * @param removeMember the request identifying the member to be removed. + * + * @return OK message if the member was removed from the group. + */ + public ResponseMessage process(RemoveMember removeMember) { + final String nodeName = removeMember.getNodeName(); + try { + ensureMaster(); + repNode.removeMember(nodeName); + return protocol.new OK(removeMember); + } catch (MemberNotFoundException e) { + return protocol.new Fail(removeMember, FailReason.MEMBER_NOT_FOUND, + e.getMessage()); + } catch (MasterStateException e) { + return protocol.new Fail(removeMember, FailReason.IS_MASTER, + e.getMessage()); + } catch (ReplicaStateException e) { + return protocol.new Fail(removeMember, FailReason.IS_REPLICA, + e.getMessage()); + } catch (DatabaseException e) { + return protocol.new Fail(removeMember, FailReason.DEFAULT, + e.getMessage()); + } + } + + /** + * Deletes a current member from the group, which marks the node as removed + * and deletes its entry from the rep group DB. + * + * @param deleteMember the request identifying the member to be deleted + * + * @return OK message if the member was deleted from the group + */ + public ResponseMessage process(DeleteMember deleteMember) { + final String nodeName = deleteMember.getNodeName(); + try { + ensureMaster(); + repNode.removeMember(nodeName, true); + return protocol.new OK(deleteMember); + } catch (MemberNotFoundException e) { + return protocol.new Fail(deleteMember, FailReason.MEMBER_NOT_FOUND, + e.getMessage()); + } catch (MasterStateException e) { + return protocol.new Fail(deleteMember, FailReason.IS_MASTER, + e.getMessage()); + } catch (ReplicaStateException e) { + return protocol.new Fail(deleteMember, FailReason.IS_REPLICA, + e.getMessage()); + } catch (MemberActiveException e) { + return protocol.new Fail(deleteMember, FailReason.MEMBER_ACTIVE, + e.getMessage()); + } catch (DatabaseException e) { + return protocol.new Fail(deleteMember, FailReason.DEFAULT, + e.getMessage()); + } + } + + /** + * Update the network address for a dead replica. + * + * @param updateAddress the request identifying the new network address for + * the node. + * + * @return OK message if the address is successfully updated. + */ + public ResponseMessage process(UpdateAddress updateAddress) { + try { + ensureMaster(); + repNode.updateAddress(updateAddress.getNodeName(), + updateAddress.getNewHostName(), + updateAddress.getNewPort()); + return protocol.new OK(updateAddress); + } catch (MemberNotFoundException e) { + return protocol.new Fail( + updateAddress, FailReason.MEMBER_NOT_FOUND, e.getMessage()); + } catch (MasterStateException e) { + return protocol.new Fail(updateAddress, FailReason.IS_MASTER, + e.getMessage()); + } catch (ReplicaStateException e) { + return protocol.new Fail(updateAddress, FailReason.IS_REPLICA, + e.getMessage()); + } catch (DatabaseException e) { + return protocol.new Fail(updateAddress, FailReason.DEFAULT, + e.getMessage()); + } + } + + /** + * Transfer the master role from the current master to one of the specified + * replicas. + * + * @param transferMaster the request identifying nodes to be considered for + * the role of new master + * @return null + */ + public ResponseMessage process(TransferMaster transferMaster) { + try { + ensureMaster(); + final String nodeList = transferMaster.getNodeNameList(); + final Set replicas = parseNodeList(nodeList); + final long timeout = transferMaster.getTimeout(); + final boolean force = transferMaster.getForceFlag(); + String winner = repNode.transferMaster(replicas, timeout, force); + return protocol.new TransferOK(transferMaster, winner); + } catch (ReplicaStateException e) { + return protocol.new Fail(transferMaster, FailReason.IS_REPLICA, + e.getMessage()); + } catch (MasterTransferFailureException e) { + return protocol.new Fail(transferMaster, FailReason.TRANSFER_FAIL, + e.getMessage()); + } catch (DatabaseException e) { + return protocol.new Fail(transferMaster, FailReason.DEFAULT, + e.getMessage()); + } catch (IllegalArgumentException e) { + return protocol.new Fail(transferMaster, FailReason.DEFAULT, + e.toString()); + } catch (IllegalStateException e) { + return protocol.new Fail(transferMaster, FailReason.DEFAULT, + e.toString()); + } + } + + private Set parseNodeList(String list) { + Set set = new HashSet(); + StringTokenizer st = new StringTokenizer(list, ","); + while (st.hasMoreTokens()) { + set.add(st.nextToken()); + } + return set; + } + + private void ensureMaster() throws ReplicaStateException { + if (!repNode.isMaster()) { + throw new ReplicaStateException + ("GroupService operation can only be performed at master"); + } + } + + synchronized private void registerChannel(DataChannel dc) { + activeChannels.add(dc); + } + + /** + * Removes the given {@code DataChannel} from our list of active channels. + *

        + * Before sending any response on the channel, this method must be invoked + * to claim ownership of it. + * This avoids a potential race between the request processing thread in + * the normal case, and a thread calling {@code cancel()} at env shutdown + * time. + * + * @return true, if the channel is still active (usual case); false + * otherwise, presumably because the service was shut down. + */ + synchronized private boolean unregisterChannel(DataChannel dc) { + return activeChannels.remove(dc); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new GroupServiceRunnable(dataChannel, protocol); + } + + class GroupServiceRunnable extends ExecutingRunnable { + GroupServiceRunnable(DataChannel dataChannel, + RepGroupProtocol protocol) { + super(dataChannel, protocol, true); + registerChannel(dataChannel); + } + + @Override + protected ResponseMessage getResponse(RequestMessage request) + throws IOException { + + ResponseMessage rm = protocol.process(GroupService.this, request); + + /* + * If the channel has already been closed, before we got a chance to + * produce the response, then just discard the tardy response and + * return null. + */ + return unregisterChannel(channel) ? rm : null; + } + + @Override + protected void logMessage(String message) { + LoggerUtils.warning(logger, repNode.getRepImpl(), message); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/MinJEVersionUnsupportedException.java b/src/com/sleepycat/je/rep/impl/MinJEVersionUnsupportedException.java new file mode 100644 index 0000000..09c6c26 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/MinJEVersionUnsupportedException.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.JEVersion; + +/** + * Thrown when a conflict is detected between a minimum JE version requirement + * and the JE version of a particular node. + */ +public class MinJEVersionUnsupportedException extends Exception { + private static final long serialVersionUID = 1; + + /** The minimum JE version. */ + public final JEVersion minVersion; + + /** The name of the node where the requested version is not supported. */ + public final String nodeName; + + /** The node version, or null if not known. */ + public final JEVersion nodeVersion; + + /** + * Creates an instance of this class. + * + * @param minVersion the minimum JE version + * @param nodeName the name of the node where the version is not supported + * @param nodeVersion the node version, or {@code null} if not known + */ + public MinJEVersionUnsupportedException(final JEVersion minVersion, + final String nodeName, + final JEVersion nodeVersion) { + if (minVersion == null) { + throw new NullPointerException("The minVersion must not be null"); + } + if (nodeName == null) { + throw new NullPointerException("The nodeName must not be null"); + } + this.minVersion = minVersion; + this.nodeName = nodeName; + this.nodeVersion = nodeVersion; + } + + @Override + public String getMessage() { + return "Version is not supported:" + + " minVersion: " + minVersion + + ", nodeName: " + nodeName + + ", nodeVersion: " + nodeVersion; + } +} diff --git a/src/com/sleepycat/je/rep/impl/NodeStateProtocol.java b/src/com/sleepycat/je/rep/impl/NodeStateProtocol.java new file mode 100644 index 0000000..aa77de0 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/NodeStateProtocol.java @@ -0,0 +1,149 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; + +/** + * Defines the protocol used in support of node state querying. + * + * The message request sequence: + * NODE_STATE_REQ -> NODE_STATE_RESP + */ +public class NodeStateProtocol extends TextProtocol { + + public static final String VERSION = "1.0"; + + /* The messages defined by this class. */ + public final MessageOp NODE_STATE_REQ = + new MessageOp("STATEREQ", NodeStateRequest.class); + public final MessageOp NODE_STATE_RESP = + new MessageOp("STATERESP", NodeStateResponse.class); + + public NodeStateProtocol(String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + + super(VERSION, groupName, nameIdPair, repImpl, channelFactory); + + this.initializeMessageOps(new MessageOp[] { + NODE_STATE_REQ, + NODE_STATE_RESP + }); + + setTimeouts(repImpl, + RepParams.REP_GROUP_OPEN_TIMEOUT, + RepParams.REP_GROUP_READ_TIMEOUT); + } + + /* Message request the state of the specified node. */ + public class NodeStateRequest extends RequestMessage { + private final String nodeName; + + public NodeStateRequest(String nodeName) { + this.nodeName = nodeName; + } + + public NodeStateRequest(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + nodeName = nextPayloadToken(); + } + + public String getNodeName() { + return nodeName; + } + + @Override + public MessageOp getOp() { + return NODE_STATE_REQ; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + nodeName; + } + } + + /* Message return state of specified node. */ + public class NodeStateResponse extends ResponseMessage { + private final String nodeName; + private final String masterName; + private final long joinTime; + private final State nodeState; + + public NodeStateResponse(String nodeName, + String masterName, + long joinTime, + State nodeState) { + this.nodeName = nodeName; + this.masterName = masterName; + this.joinTime = joinTime; + this.nodeState = nodeState; + } + + public NodeStateResponse(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + nodeName = nextPayloadToken(); + masterName = nextPayloadToken(); + joinTime = Long.parseLong(nextPayloadToken()); + nodeState = State.valueOf(nextPayloadToken()); + } + + public String getNodeName() { + return nodeName; + } + + public String getMasterName() { + return masterName; + } + + public long getJoinTime() { + return joinTime; + } + + public State getNodeState() { + return nodeState; + } + + @Override + public MessageOp getOp() { + return NODE_STATE_RESP; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + + nodeName + SEPARATOR + + masterName + SEPARATOR + + Long.toString(joinTime) + SEPARATOR + + nodeState.toString(); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/NodeStateService.java b/src/com/sleepycat/je/rep/impl/NodeStateService.java new file mode 100644 index 0000000..7b64b3c --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/NodeStateService.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.impl.NodeStateProtocol.NodeStateRequest; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingRunnable; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The service registered by a RepNode to answer the state request from + * another node. It can also be extended to be used by "Ping" command. + */ +public class NodeStateService extends ExecutingService { + + private final RepNode repNode; + private final NodeStateProtocol protocol; + private final Logger logger; + + /* Identifies the Node State querying Service. */ + public static final String SERVICE_NAME = "NodeState"; + + public NodeStateService(ServiceDispatcher dispatcher, RepNode repNode) { + super(SERVICE_NAME, dispatcher); + this.repNode = repNode; + + String groupName = + repNode.getRepImpl().cloneRepConfig().getGroupName(); + protocol = new NodeStateProtocol + (groupName, repNode.getNameIdPair(), repNode.getRepImpl(), + dispatcher.getChannelFactory()); + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Process a node state querying request. + */ + @SuppressWarnings("unused") + public ResponseMessage process(NodeStateRequest stateRequest) { + long joinTime = repNode.getMonitorEventManager().getJoinTime(); + return protocol.new NodeStateResponse(repNode.getNodeName(), + repNode.getMasterName(), + joinTime, + repNode.getRepImpl().getState()); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new NodeStateServiceRunnable(dataChannel, protocol); + } + + class NodeStateServiceRunnable extends ExecutingRunnable { + NodeStateServiceRunnable(DataChannel dataChannel, + NodeStateProtocol protocol) { + super(dataChannel, protocol, true); + } + + @Override + protected ResponseMessage getResponse(RequestMessage request) + throws IOException { + + return protocol.process(NodeStateService.this, request); + } + + @Override + protected void logMessage(String message) { + LoggerUtils.warning(logger, repNode.getRepImpl(), message); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/PointConsistencyPolicy.java b/src/com/sleepycat/je/rep/impl/PointConsistencyPolicy.java new file mode 100644 index 0000000..8228c4f --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/PointConsistencyPolicy.java @@ -0,0 +1,131 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.utilint.PropUtil; +import com.sleepycat.je.utilint.VLSN; + +/** + * This is used to ensure that the Replica has finished replaying or proceeded + * past the vlsn specified by the policy. It's like the externally visible + * CommitPointConsistencyPolicy, except that the latter restricts consistency + * points to commit vlsns, whereas this policy lets you sync at uncommitted log + * entries. + */ +public class PointConsistencyPolicy + implements ReplicaConsistencyPolicy { + + /** + * The name:{@value} associated with this policy. The name can be used when + * constructing policy property values for use in je.properties files. + */ + public static final String NAME = "PointConsistencyPolicy"; + + private final VLSN targetVLSN; + + /* + * Amount of time (in milliseconds) to wait for consistency to be + * reached. + */ + private final int timeout; + + public PointConsistencyPolicy(VLSN targetVLSN) { + this(targetVLSN, Integer.MAX_VALUE, TimeUnit.MILLISECONDS); + } + + public PointConsistencyPolicy(VLSN targetVLSN, + long timeout, + TimeUnit timeoutUnit) { + this.targetVLSN = targetVLSN; + this.timeout = PropUtil.durationToMillis(timeout, timeoutUnit); + } + + /** + * Returns the name:{@value #NAME}, associated with this policy. + * @see #NAME + */ + @Override + public String getName() { + return NAME; + } + + /** + * Ensures that the replica has replayed the replication stream to the + * point identified by the commit token. If it isn't the method waits until + * the constraint is satisfied by the replica. + */ + @Override + public void ensureConsistency(EnvironmentImpl replicatorImpl) + throws InterruptedException, + ReplicaConsistencyException, + DatabaseException { + + /* + * Cast is done to preserve replication/non replication code + * boundaries. + */ + RepImpl repImpl = (RepImpl) replicatorImpl; + Replica replica = repImpl.getRepNode().replica(); + replica.getConsistencyTracker().awaitVLSN(targetVLSN.getSequence(), + this); + } + + @Override + public long getTimeout(TimeUnit unit) { + return PropUtil.millisToDuration(timeout, unit); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((targetVLSN == null) ? 0 : targetVLSN.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PointConsistencyPolicy other = (PointConsistencyPolicy) obj; + if (targetVLSN == null) { + if (other.targetVLSN != null) { + return false; + } + } else if (!targetVLSN.equals(other.targetVLSN)) { + return false; + } + return true; + } + + @Override + public String toString() { + return getName() + " targetVLSN=" + targetVLSN; + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepConfigManager.java b/src/com/sleepycat/je/rep/impl/RepConfigManager.java new file mode 100644 index 0000000..2d6a6b9 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepConfigManager.java @@ -0,0 +1,112 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.util.Enumeration; +import java.util.Properties; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.RepConfigProxy; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicationConfig; + +public class RepConfigManager extends DbConfigManager { + + /** + * If the replication configuration object was set with overridden + * validation, we need to continue to override when we generate a new + * configuration. + */ + private final boolean validateParams; + + public RepConfigManager(EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy) { + super(envConfig); + checkEnvConfig(envConfig); + ReplicationConfig repConfig = (ReplicationConfig) repConfigProxy; + props.putAll(repConfig.getProps()); + repConfig.verify(); + this.validateParams = repConfig.getValidateParams(); + } + + /** + * Verifies that the environment config is suitable for a replicated + * environment. + * + * @param envConfig the environment config being checked. + * + * @throws IllegalArgumentException via ReplicatedEnvironment ctor. + */ + private static void checkEnvConfig(EnvironmentConfig envConfig) + throws IllegalArgumentException { + + if (!envConfig.getTransactional()) { + throw new IllegalArgumentException + ("A replicated environment must be transactional"); + } + String logMemOnly = envConfig.getConfigParam + (EnvironmentParams.LOG_MEMORY_ONLY.getName()); + if (Boolean.parseBoolean(logMemOnly)) { + throw new IllegalArgumentException + ("A replicated environment must not log to memory"); + } + } + + /** + * Create a new ReplicationConfig for use in creating Replicator handles. + * Be sure to only pick out replication related properties. + */ + public ReplicationConfig makeReplicationConfig() { + + /* + * TODO: the code would be nicer if we replaced the properties bag with + * a class that knows how to iterate over the param values, associating + * the ConfigParam object with it. As a future task, this is similar to + * code in DbConfigManager, and ought to be refactored out. We can also + * strip out the multivalued param support. + */ + Properties repProperties = new Properties(); + + /* Check that the properties have valid names and values. */ + Enumeration propNames = props.propertyNames(); + while (propNames.hasMoreElements()) { + String name = (String) propNames.nextElement(); + /* Is this a valid property name? */ + ConfigParam param = + EnvironmentParams.SUPPORTED_PARAMS.get(name); + + if (param == null) { + /* See if the parameter is an multi-value parameter. */ + String mvParamName = ConfigParam.multiValueParamName(name); + param = EnvironmentParams.SUPPORTED_PARAMS.get(mvParamName); + if (param == null) { + throw EnvironmentFailureException.unexpectedState + (name + + " is not a valid BDBJE environment configuration"); + } + } + + if (param.isForReplication()) { + repProperties.setProperty(name, props.getProperty(name)); + } + } + + return RepInternal.makeReplicationConfig + (repProperties, validateParams); + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepEnvConfigObserver.java b/src/com/sleepycat/je/rep/impl/RepEnvConfigObserver.java new file mode 100644 index 0000000..8261e0a --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepEnvConfigObserver.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.rep.ReplicationMutableConfig; + +/** + * Implemented by observers of mutable rep config changes. + */ +public interface RepEnvConfigObserver { + + /** + * Notifies the observer that one or more mutable rep properties have been + * changed. + */ + void repEnvConfigUpdate(RepConfigManager configMgr, + ReplicationMutableConfig newConfig) + throws DatabaseException; +} diff --git a/src/com/sleepycat/je/rep/impl/RepGroupDB.java b/src/com/sleepycat/je/rep/impl/RepGroupDB.java new file mode 100644 index 0000000..5d8f3a9 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepGroupDB.java @@ -0,0 +1,1521 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static com.sleepycat.je.rep.NoConsistencyRequiredPolicy.NO_CONSISTENCY; +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static com.sleepycat.je.rep.impl.RepParams.NODE_NAME; +import static com.sleepycat.je.rep.impl.RepParams.RESET_REP_GROUP_RETAIN_UUID; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.node.cbvlsn.CleanerBarrierState; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.txn.ReadonlyTxn; +import com.sleepycat.je.rep.util.DbResetRepGroup; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * This class is used to encapsulate all access to the rep group data that is + * present in every replicated JE environment. The rep group data exists + * primarily to support dynamic group membership. Both read and update access + * must be done through the APIs provided by this class. + * + * The database is simply a representation of the RepGroup. Each entry in the + * database represents a node in RepGroup; the key is the String node name, and + * the data is the serialized ReplicationNode. There is a special entry keyed + * by GROUP_KEY that holds the contents of the RepGroup (excluding the nodes) + * itself. + * + * The database may be modified concurrently by multiple transactions as a + * master processes requests to update it. It may also be accessed by multiple + * overlapping transactions as a Replica replays the rep stream. These updates + * need to be interleaved with operations like getGroup() that create copies of + * the RepGroup instance. To avoid deadlocks, entries in the database are + * accessed in order of ascending key. GROUP_KEY in particular is associated + * with the lowest key value so that it's locked first implicitly as part of + * any iteration and any other modifications to the database must first lock it + * before making changes to the group itself. + * + * An instance of this class is created as part of a replication node and is + * retained for the entire lifetime of that node. + */ +public class RepGroupDB { + + private final RepImpl repImpl; + + /* A convenient, cached empty group. */ + public final RepGroupImpl emptyGroup; + + private final Logger logger; + + /* The key used to store group-wide information in the database. It must + * be the lowest key in the database, so that it's locked first during + * database iteration. + */ + public final static String GROUP_KEY = "$$GROUP_KEY$$"; + public final static DatabaseEntry groupKeyEntry = new DatabaseEntry(); + + /* Initialize the entry. */ + static { + StringBinding.stringToEntry(GROUP_KEY, groupKeyEntry); + } + + private final static HashMap lockMap = + new HashMap(); + + /* The fixed DB ID associated with the internal rep group database. */ + public final static long DB_ID = DbTree.NEG_DB_ID_START - 1; + + /* + * Number of times to retry for ACKs on the master before returning to + * to the Replica, which will then again retry on some periodic basis. + */ + private final static int QUORUM_ACK_RETRIES = 5; + + /* Convenience Durability and Config constants. */ + private final static Durability QUORUM_ACK_DURABILITY = + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY); + + private final static TransactionConfig QUORUM_ACK = + new TransactionConfig(); + + private final static TransactionConfig NO_ACK = new TransactionConfig(); + + /* + * TODO: Change this when we support true read only transactions. + */ + final static TransactionConfig READ_ONLY = NO_ACK; + + private final static Durability NO_ACK_DURABILITY = + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.NONE); + + private final static Durability NO_ACK_NO_SYNC_DURABILITY = + new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + + static { + /* Initialize config constants. */ + QUORUM_ACK.setDurability(QUORUM_ACK_DURABILITY); + NO_ACK.setDurability(NO_ACK_DURABILITY); + } + + /** + * Create an instance. Note that the database handle is not initialized at + * this time, since the state of the node master/replica is not known + * at the time the replication node (and consequently this instance) is + * created. + * @throws DatabaseException + */ + public RepGroupDB(RepImpl repImpl) + throws DatabaseException { + + this.repImpl = repImpl; + + DbConfigManager configManager = repImpl.getConfigManager(); + emptyGroup = new RepGroupImpl(configManager.get(GROUP_NAME), + repImpl.getCurrentJEVersion()); + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Returns all the members that are currently part of the replication + * group, using NO_CONSISTENCY. This method can read the database directly, + * and can be used when the replicated environment is detached and the + * RepNode is null. It's for the latter reason that the method reads + * uncommitted data. In detached mode, there may be transactions on the + * database that were in progress when the node was last shutdown. These + * transactions may have locks which will not be released until after the + * node is re-attached and the replication stream is resumed. Using + * uncommitted reads avoids use of locks in this circumstance. It's safe to + * read these records, since the database will eventually be updated with + * these changes. + * + * @return the group object + * @throws DatabaseException if the object could not be obtained + */ + public static RepGroupImpl getGroup(RepImpl rImpl, + String groupName) + throws DatabaseException { + + /* Get persistent nodes from the database */ + DatabaseImpl dbImpl = null; + boolean foundDbImpl = false; + try { + dbImpl = rImpl.getGroupDb(); + foundDbImpl = true; + } catch (DatabaseNotFoundException e) { + } + final RepGroupImpl group; + if (!foundDbImpl) { + + /* Creates a temporary placeholder group for use until the real + * definition comes over the replication stream as part of the + * replicated group database. + */ + group = new RepGroupImpl(groupName, true, + rImpl.getCurrentJEVersion()); + + } else { + final TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(READ_ONLY.getDurability()); + txnConfig.setConsistencyPolicy(NO_CONSISTENCY); + txnConfig.setReadUncommitted(true); + + Txn txn = null; + try { + txn = new ReadonlyTxn(rImpl, txnConfig); + group = fetchGroup(groupName, dbImpl, txn); + + /* + * Correct summary info since we are reading uncommitted data + */ + group.makeConsistent(); + txn.commit(); + txn = null; + } finally { + if (txn != null) { + txn.abort(); + } + } + } + + /* Get nodes w/ transient id from their feeders */ + final RepNode repNode = rImpl.getRepNode(); + if (repNode != null) { + for (final Feeder feeder : + repNode.feederManager().activeReplicasMap().values()) { + final RepNodeImpl node = feeder.getReplicaNode(); + /* RepNodeImpl may be null in a test with a dummy feeder. */ + if (node != null && node.getType().hasTransientId()) { + group.addTransientIdNode(node); + } + } + } + + return group; + } + + public RepGroupImpl getGroup() + throws DatabaseException { + + return getGroup(repImpl, + repImpl.getConfigManager().get(GROUP_NAME)); + } + + /** + * Sets the minimum JE version required for nodes to join the replication + * group and refreshes the group object cached in the rep group. Throws a + * {@link MinJEVersionUnsupportedException} if the requested version is not + * supported by current nodes. + * + *

        If this method returns successfully, nodes that are running a JE + * version older than the one specified will not be permitted to join the + * replication group in the future. Use this method to implement features + * that require all group members to meet a minimum version requirement. + * + *

        The update attempts to obtain acknowledgments from a simple majority, + * to make sure that future masters agree that the update has taken place, + * but does not require this. + * + * @param newMinJEVersion the new minimum JE version + * @throws DatabaseException if an error occurs when accessing the + * replication group database + * @throws MinJEVersionUnsupportedException if the requested version is not + * supported + */ + public void setMinJEVersion(final JEVersion newMinJEVersion) + throws DatabaseException, MinJEVersionUnsupportedException { + + final DatabaseImpl groupDbImpl; + try { + groupDbImpl = repImpl.getGroupDb(); + } catch (DatabaseNotFoundException e) { + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedException(e); + } + MasterTxn txn = + new MasterTxn(repImpl, QUORUM_ACK, repImpl.getNameIdPair()); + try { + RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.RMW); + repGroup = fetchGroup(repGroup.getName(), groupDbImpl, txn); + repGroup.setMinJEVersion(newMinJEVersion); + saveGroupObject(txn, repGroup, groupDbImpl); + txn.commit(QUORUM_ACK_DURABILITY); + txn = null; + LoggerUtils.info(logger, repImpl, + "Updated minimum JE group version to " + newMinJEVersion); + } catch (InsufficientAcksException e) { + + /* + * Didn't receive acknowledgments from a simple majority. OK to + * proceed, since this operation will be repeated if the change is + * lost. + */ + LoggerUtils.info(logger, repImpl, + "Proceeding without enough acks, did not update minimum JE " + + "group version to " + newMinJEVersion); + } finally { + if (txn != null) { + txn.abort(); + } + } + repImpl.getRepNode().refreshCachedGroup(); + } + + /** + * All rep group db access uses cursors with eviction disabled. + */ + static private Cursor makeCursor(DatabaseImpl dbImpl, + Txn txn, + CursorConfig cursorConfig) { + Cursor cursor = DbInternal.makeCursor(dbImpl, + txn, + cursorConfig); + DbInternal.getCursorImpl(cursor).setAllowEviction(false); + return cursor; + } + + /** + * Returns a representation of the nodes of the group stored in the + * database, using the txn and handles that were passed in. + */ + private static RepGroupImpl fetchGroup(String groupName, + DatabaseImpl dbImpl, + Txn txn) + throws DatabaseException { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry value = new DatabaseEntry(); + NodeBinding nodeBinding = null; + final GroupBinding groupBinding = new GroupBinding(); + + RepGroupImpl group = null; + Map nodes = + new HashMap(); + final CursorConfig cursorConfig = new CursorConfig(); + cursorConfig.setReadCommitted(true); + + Cursor mcursor = null; + + try { + mcursor = makeCursor(dbImpl, txn, cursorConfig); + while (mcursor.getNext(keyEntry, value, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + final String key = StringBinding.entryToString(keyEntry); + + if (GROUP_KEY.equals(key)) { + group = groupBinding.entryToObject(value); + if (!group.getName().equals(groupName)) { + throw EnvironmentFailureException.unexpectedState + ("The argument: " + groupName + + " does not match the expected group name: " + + group.getName()); + } + + /* + * The group entry should always be first, so we can use it + * to provide the group version for reading node entries. + */ + nodeBinding = new NodeBinding(group.getFormatVersion()); + } else { + if (nodeBinding == null) { + throw new IllegalStateException( + "Found node binding before group binding"); + } + final RepNodeImpl node = nodeBinding.entryToObject(value); + nodes.put(node.getNameIdPair().getId(), node); + } + } + if (group == null) { + throw EnvironmentFailureException.unexpectedState + ("Group key: " + GROUP_KEY + " is missing"); + } + group.setNodes(nodes); + return group; + } finally { + if (mcursor != null) { + mcursor.close(); + } + } + } + + /** + * Ensures that information about this node, the current master, is in the + * member database. If it isn't, enter it into the database. If the + * database does not exist, create it as well. + * + *

        Note that this overloading is only used by a node that is the master. + * + * @throws DatabaseException + */ + public void addFirstNode() + throws DatabaseException { + + DbConfigManager configManager = repImpl.getConfigManager(); + String groupName = configManager.get(GROUP_NAME); + String nodeName = configManager.get(NODE_NAME); + + DatabaseImpl groupDbImpl = repImpl.createGroupDb(); + + /* setup the group information as data. */ + RepGroupImpl repGroup = + new RepGroupImpl(groupName, repImpl.getCurrentJEVersion()); + GroupBinding groupBinding = + new GroupBinding(repGroup.getFormatVersion()); + DatabaseEntry groupEntry = new DatabaseEntry(); + groupBinding.objectToEntry(repGroup, groupEntry); + + /* Create the common group entry. */ + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(NO_ACK.getDurability()); + txnConfig.setConsistencyPolicy(NO_CONSISTENCY); + Txn txn = null; + Cursor cursor = null; + try { + txn = new MasterTxn(repImpl, + txnConfig, + repImpl.getNameIdPair()); + + cursor = makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + OperationStatus status = cursor.put(groupKeyEntry, groupEntry); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Couldn't write first group entry " + status); + } + cursor.close(); + cursor = null; + txn.commit(); + txn = null; + } finally { + if (cursor != null) { + cursor.close(); + } + + if (txn != null) { + txn.abort(); + } + } + + ensureMember(new RepNodeImpl(nodeName, + repImpl.getHostName(), + repImpl.getPort(), + repImpl.getCurrentJEVersion())); + } + + /** + * Ensures that the membership info for the replica is in the database. A + * call to this method is initiated by the master as part of the + * feeder/replica handshake, where the replica provides membership + * information as part of the handshake protocol. The membership database + * must already exist, with the master in it, when this method is invoked. + * + *

        This method should not be called for nodes with transient IDs. + * + * @param membershipInfo provided by the replica + * + * @throws InsufficientReplicasException upon failure of 2p member update + * @throws InsufficientAcksException upon failure of 2p member update + * @throws DatabaseException when the membership info could not be entered + * into the membership database. + */ + public void ensureMember(Protocol.NodeGroupInfo membershipInfo) + throws DatabaseException { + + ensureMember(new RepNodeImpl(membershipInfo)); + } + + void ensureMember(RepNodeImpl ensureNode) + throws DatabaseException { + + if (ensureNode.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call ensureMember on " + ensureNode.getType() + + " node: " + ensureNode); + } + DatabaseImpl groupDbImpl; + try { + groupDbImpl = repImpl.getGroupDb(); + } catch (DatabaseNotFoundException e) { + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedException(e); + } + + DatabaseEntry nodeNameKey = new DatabaseEntry(); + StringBinding.stringToEntry(ensureNode.getName(), nodeNameKey); + + DatabaseEntry value = new DatabaseEntry(); + NodeBinding mib = null; + + Txn txn = null; + Cursor cursor = null; + try { + txn = new ReadonlyTxn(repImpl, NO_ACK); + + /* + * Fetch the group so we know the group format version. Read the + * group before reading the node entry in each case to avoid the + * potential of deadlocks caused by reversing the order of lock + * acquisition. + */ + final RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.DEFAULT); + mib = new NodeBinding(repGroup.getFormatVersion()); + + CursorConfig config = new CursorConfig(); + config.setReadCommitted(true); + cursor = makeCursor(groupDbImpl, txn, config); + + OperationStatus status = + cursor.getSearchKey(nodeNameKey, value, null); + if (status == OperationStatus.SUCCESS) { + /* Let's see if the entry needs updating. */ + RepNodeImpl miInDb = mib.entryToObject(value); + if (miInDb.equivalent(ensureNode)) { + if (miInDb.isQuorumAck()) { + /* Present, matched and acknowledged. */ + return; + } + ensureNode.getNameIdPair().update(miInDb.getNameIdPair()); + /* Not acknowledged, retry the update. */ + } else { + /* Present but not equivalent. */ + LoggerUtils.warning(logger, repImpl, + "Incompatible node descriptions. " + + "Membership database definition: " + + miInDb.toString() + + " Transient definition: " + + ensureNode.toString()); + if (ensureNode.getType() != miInDb.getType()) { + throw EnvironmentFailureException.unexpectedState( + "Conflicting node types for node " + + ensureNode.getName() + + ": expected " + ensureNode.getType() + + ", found " + miInDb.getType()); + } + throw EnvironmentFailureException.unexpectedState( + "Incompatible node descriptions for node: " + + ensureNode.getName() + ", node ID: " + + ensureNode.getNodeId()); + } + LoggerUtils.info(logger, repImpl, + "Present but not ack'd node: " + + ensureNode.getNodeId() + + " ack status: " + miInDb.isQuorumAck()); + } + cursor.close(); + cursor = null; + txn.commit(); + txn = null; + } finally { + if (cursor != null) { + cursor.close(); + } + + if (txn != null) { + txn.abort(); + } + + } + createMember(ensureNode); + + /* Refresh group and Fire an ADD GroupChangeEvent. */ + refreshGroupAndNotifyGroupChange + (ensureNode.getName(), GroupChangeType.ADD); + } + + private void refreshGroupAndNotifyGroupChange(String nodeName, + GroupChangeType opType) { + repImpl.getRepNode().refreshCachedGroup(); + repImpl.getRepNode().getMonitorEventManager().notifyGroupChange + (nodeName, opType); + } + + /** + * Removes a node from the replication group by marking the node's entry in + * the rep group db as removed, and optionally deleting the entry. + * + *

        This method should not be called for nodes with transient IDs. + */ + public void removeMember(final RepNodeImpl removeNode, + final boolean delete) { + LoggerUtils.info(logger, repImpl, + (delete ? "Deleting node: " : "Removing node: ") + + removeNode.getName()); + + if (removeNode.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call removeMember on a node with type " + + removeNode.getType() + ": " + removeNode); + } + + TwoPhaseUpdate twoPhaseUpdate = new TwoPhaseUpdate(removeNode, true) { + + @Override + void phase1Body() { + final RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.RMW); + int changeVersion = repGroup.incrementChangeVersion(); + saveGroupObject(txn, repGroup, groupDbImpl); + node.setChangeVersion(changeVersion); + node.setRemoved(true); + saveNodeObject(txn, node, groupDbImpl, repGroup); + } + /** Override phase 2 to delete the node entry if delete is true. */ + @Override + void phase2Body() { + if (!delete) { + super.phase2Body(); + return; + } + final DatabaseEntry nodeNameKey = new DatabaseEntry(); + StringBinding.stringToEntry(removeNode.getName(), nodeNameKey); + final Cursor cursor = + makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + try { + final OperationStatus status = cursor.getSearchKey( + nodeNameKey, new DatabaseEntry(), LockMode.RMW); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState( + "Node ID: " + removeNode.getNameIdPair() + + " not present in group db"); + } + cursor.delete(); + } finally { + cursor.close(); + } + } + }; + + twoPhaseUpdate.execute(); + + /* Refresh group and fire a REMOVE GroupChangeEvent. */ + refreshGroupAndNotifyGroupChange + (removeNode.getName(), GroupChangeType.REMOVE); + + LoggerUtils.info(logger, repImpl, + "Successfully deleted node: " + removeNode.getName()); + } + + /* Add a new rep node into the RepGroupDB. */ + private void createMember(final RepNodeImpl node) + throws InsufficientReplicasException, + InsufficientAcksException, + DatabaseException { + + LoggerUtils.fine + (logger, repImpl, "Adding node: " + node.getNameIdPair()); + + twoPhaseMemberUpdate(node, true); + + LoggerUtils.info(logger, repImpl, + "Successfully added node:" + node.getNameIdPair() + + " HostPort = " + node.getHostName() + ": " + + node.getPort() + " [" + node.getType() + "]"); + } + + /* + * Update a current rep node information in the RepGroupDB. + * + *

        This method should not be called for nodes with transient IDs. + * + * @param node the new node information + * @param quorumAck whether to require acknowledgments from a quorum + * + * @throws InsufficientReplicasException upon failure of 2p member update + * @throws InsufficientAcksException upon failure of 2p member update + */ + public void updateMember(final RepNodeImpl node, final boolean quorumAck) + throws DatabaseException { + + if (node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call updateMember on a node of type " + + node.getType() + ": " + node); + } + + LoggerUtils.fine(logger, repImpl, "Updating node: " + node); + + twoPhaseMemberUpdate(node, quorumAck); + + // TODO: clean up the Monitor interface. There are several aspects of + // that interface that need fixing; but in particular it ought to have + // a way to inform listeners that a node has moved to a new network + // address. Once that's done, the following should be replaced by a + // full refreshGroupAndNotifyGroupChange(). And actually that + // operation should be done closer to where we know the GroupDB has + // been changed. In particular, if the GroupDB update suffers an IAE, + // the exception blows by the following, even though the database + // actually does now contain the updated value. + // + repImpl.getRepNode().refreshCachedGroup(); + + LoggerUtils.info(logger, repImpl, + "Successfully updated node: " + node.getNameIdPair() + + " Hostport = " + node.getHostName() + ": " + + node.getPort() + " [" + node.getType() + "]"); + } + + /** + * Implements the two phase update of membership information. + * + * In the first phase the master repeatedly tries to commit the "put" + * operation until it gets a Quorum of acks, ensuring that the operation + * has been made durable. Nodes that obtain this entry will start using it + * in elections. However, the node itself will not participate in elections + * until it has successfully completed phase 2. + * + * In the second phase, the entry for the member is updated to note + * that a quorum of acks was received. + * + * Failure leaves the database with the member info absent, or + * present but without the update to quorumAcks indicating that a + * quorum has acknowledged the change. + * + * @param node the member info for the node. + * @param quorumAck whether to require acknowledgments from a quorum + * + * @throws InsufficientReplicasException upon failure of 2p member update + * @throws InsufficientAcksException upon failure of 2p member update + * @throws DatabaseException upon failure. + */ + private void twoPhaseMemberUpdate(final RepNodeImpl node, + final boolean quorumAck) + throws DatabaseException { + + TwoPhaseUpdate twoPhaseUpdate = new TwoPhaseUpdate(node, quorumAck) { + int saveOrigId = node.getNameIdPair().getId(); + + @Override + void phase1Body() { + RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.RMW); + repGroup = fetchGroup(repGroup.getName(), groupDbImpl, txn); + int changeVersion = repGroup.incrementChangeVersion(); + if (node.getNameIdPair().hasNullId()) { + node.getNameIdPair().setId(repGroup.getNextNodeId()); + } + repGroup.checkForConflicts(node); + saveGroupObject(txn, repGroup, groupDbImpl); + node.setChangeVersion(changeVersion); + final RepNodeImpl existingNode = + repGroup.getNode(node.getName()); + if ((existingNode != null) && (node.getJEVersion() == null)) { + node.updateJEVersion(existingNode.getJEVersion()); + } + saveNodeObject(txn, node, groupDbImpl, repGroup); + } + + @Override + void deadlockHandler() { + node.getNameIdPair().setId(saveOrigId, false); + } + + @Override + void insufficientReplicasHandler() { + node.getNameIdPair().setId(saveOrigId, false); + } + }; + + twoPhaseUpdate.execute(); + } + + /** + * This method is not used when the CBVLSN is defunct -- see GlobalCBVLSN. + * This method was not moved to GlobalCBVLSN to avoid modularity problems. + * + * Updates the database entry associated with the node with the new local + * CBVLSN, if it can do so without encountering lock contention, and unless + * the node is a secondary, arbiter, or external node. Also updates the + * rep node's transient group information about the global CBVLSN. If it + * encounters contention, it returns false, and the caller must retry at + * some later point in time. + * + * Note that changes to the local CBVLSN do not update the group version + * number since they do not impact group membership. + * + * @param nameIdPair identifies the node being updated + * @param newCBVLSN the new local CBVLSN to be associated with the node. + * @param nodeType the node type of the RepNode + * @return true if the update succeeded. + * @throws DatabaseException + */ + public boolean updateLocalCBVLSN(final NameIdPair nameIdPair, + final VLSN newCBVLSN, + final NodeType nodeType) + throws DatabaseException { + + DatabaseImpl groupDbImpl = null; + try { + groupDbImpl = repImpl.probeGroupDb(); + } catch (DatabaseException e) { + /* Contention on the groupDbImpl, try later. */ + return false; + } + + if (groupDbImpl == null) { + /* Contention on the groupDbImpl, try later. */ + return false; + } + + DatabaseEntry nodeNameKey = new DatabaseEntry(); + StringBinding.stringToEntry(nameIdPair.getName(), nodeNameKey); + DatabaseEntry value = new DatabaseEntry(); + final CleanerBarrierState barrierState = + new CleanerBarrierState(newCBVLSN, System.currentTimeMillis()); + Txn txn = null; + Cursor cursor = null; + boolean ok = false; + try { + + /* + * No database update for secondary, arbiter, or external nodes, + * but set ok to true so that the rep node's group information is + * updated. + */ + if (nodeType.isSecondary() || nodeType.isArbiter() || + nodeType.isExternal()) { + ok = true; + return true; + } + + final TransactionConfig txnConfig = new TransactionConfig(); + + txnConfig.setDurability(NO_ACK_NO_SYNC_DURABILITY); + /* + * Don't wait for locks. It's ok to miss an update because we could + * not acquire the lock, since the operation will be retried later + * by a subsequent heartbeat message. + */ + txnConfig.setNoWait(true); + txn = new MasterTxn(repImpl, + txnConfig, + repImpl.getNameIdPair()); + + /* Read the group first to avoid deadlocks */ + final RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.DEFAULT); + cursor = makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + + OperationStatus status = + cursor.getSearchKey(nodeNameKey, value, LockMode.RMW); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Node ID: " + nameIdPair + " not present in group db"); + } + + /* Let's see if the entry needs updating. */ + final NodeBinding nodeBinding = + new NodeBinding(repGroup.getFormatVersion()); + final RepNodeImpl node = nodeBinding.entryToObject(value); + final VLSN lastCBVLSN = node.getBarrierState().getLastCBVLSN(); + if (lastCBVLSN.equals(newCBVLSN)) { + ok = true; + return true; + } + + node.setBarrierState(barrierState); + nodeBinding.objectToEntry(node, value); + status = cursor.putCurrent(value); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Node ID: " + nameIdPair + + " stored localCBVLSN could not be updated. Status: " + + status); + } + LoggerUtils.fine(logger, repImpl, + "Local CBVLSN updated to " + newCBVLSN + + " for node " + nameIdPair); + ok = true; + } finally { + if (cursor != null) { + cursor.close(); + } + + if (txn != null) { + if (ok) { + txn.commit(NO_ACK_NO_SYNC_DURABILITY); + } else { + txn.abort(); + } + txn = null; + } + if (ok) { + /* RepNode may be null during shutdown. [#17424] */ + RepNode repNode = repImpl.getRepNode(); + if (repNode != null) { + repNode.updateGroupInfo(nameIdPair, barrierState); + } + } + } + + return true; + } + + /* + * Returns just the de-serialized special rep group object from the + * database, using the specified lock mode. + */ + private RepGroupImpl fetchGroupObject(final Txn txn, + final DatabaseImpl groupDbImpl, + final LockMode lockMode) + throws DatabaseException { + + RepGroupDB.GroupBinding groupBinding = new RepGroupDB.GroupBinding(); + DatabaseEntry groupEntry = new DatabaseEntry(); + + Cursor cursor = null; + try { + cursor = makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + + final OperationStatus status = + cursor.getSearchKey(groupKeyEntry, groupEntry, lockMode); + + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Group entry key: " + GROUP_KEY + + " missing from group database"); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + return groupBinding.entryToObject(groupEntry); + } + + /* + * Saves the rep group in the database. + */ + private void saveGroupObject(Txn txn, + RepGroupImpl repGroup, + DatabaseImpl groupDbImpl) + throws DatabaseException { + + final GroupBinding groupBinding = + new GroupBinding(repGroup.getFormatVersion()); + DatabaseEntry groupEntry = new DatabaseEntry(); + groupBinding.objectToEntry(repGroup, groupEntry); + + Cursor cursor = null; + try { + cursor = makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + + OperationStatus status = cursor.put(groupKeyEntry, groupEntry); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Group entry save failed"); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + /* + * Save a ReplicationNode in the database, using the format version + * specified by the group. + */ + private void saveNodeObject(Txn txn, + RepNodeImpl node, + DatabaseImpl groupDbImpl, + RepGroupImpl repGroup) + throws DatabaseException { + + assert !node.getType().hasTransientId(); + + DatabaseEntry nodeNameKey = new DatabaseEntry(); + StringBinding.stringToEntry(node.getName(), nodeNameKey); + + final NodeBinding nodeBinding = + new NodeBinding(repGroup.getFormatVersion()); + DatabaseEntry memberInfoEntry = new DatabaseEntry(); + nodeBinding.objectToEntry(node, memberInfoEntry); + + Cursor cursor = null; + try { + cursor = makeCursor(groupDbImpl, txn, CursorConfig.DEFAULT); + + OperationStatus status = cursor.put(nodeNameKey, memberInfoEntry); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + ("Group entry save failed"); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + /** + * Converts a numeric version string to a JEVersion, returning null for an + * empty string. + */ + static JEVersion parseJEVersion(final String versionString) { + return versionString.isEmpty() ? + null : + new JEVersion(versionString); + } + + /** + * Converts a JEVersion to a numeric version string, returning an empty + * string for null. + */ + static String jeVersionString(final JEVersion jeVersion) { + return (jeVersion == null) ? + "" : + jeVersion.getNumericVersionString(); + } + + /** + * RepGroupImpl version 3: Add the minJEVersion field + */ + public static class GroupBinding extends TupleBinding { + + /** + * The rep group format version to use for writing, or -1 for reading. + */ + private final int writeFormatVersion; + + /** Create an instance for reading. */ + public GroupBinding() { + writeFormatVersion = -1; + } + + /** + * Create an instance for writing using the specified group format + * version. + */ + GroupBinding(final int writeFormatVersion) { + if (writeFormatVersion < 0) { + throw new IllegalArgumentException( + "writeFormatVersion must be non-negative: " + + writeFormatVersion); + } + this.writeFormatVersion = writeFormatVersion; + } + + @Override + public RepGroupImpl entryToObject(TupleInput input) { + if (writeFormatVersion >= 0) { + throw new IllegalStateException( + "GroupBinding not created for read"); + } + final String name = input.readString(); + final UUID uuid = new UUID(input.readLong(), input.readLong()); + final int formatVersion = input.readInt(); + return new RepGroupImpl( + name, + uuid, + formatVersion, + input.readInt(), + input.readInt(), + ((formatVersion < RepGroupImpl.FORMAT_VERSION_3) ? + RepGroupImpl.MIN_FORMAT_VERSION_JE_VERSION : + parseJEVersion(input.readString()))); + } + + @Override + public void objectToEntry(RepGroupImpl group, TupleOutput output) { + if (writeFormatVersion < 0) { + throw new IllegalStateException( + "GroupBinding not created for write"); + } + output.writeString(group.getName()); + output.writeLong(group.getUUID().getMostSignificantBits()); + output.writeLong(group.getUUID().getLeastSignificantBits()); + output.writeInt(writeFormatVersion); + output.writeInt(group.getChangeVersion()); + output.writeInt(group.getNodeIdSequence()); + if (writeFormatVersion >= RepGroupImpl.FORMAT_VERSION_3) { + output.writeString(jeVersionString(group.getMinJEVersion())); + } + } + } + + /** + * Supports the serialization/deserialization of node info into and out of + * the database. Nodes are always saved using the current group format + * version, and the node's format version is checked on reading to make + * sure it is not newer than the current group format version, although + * they could have an older format version if they have not been saved + * recently. + * + *

        Prior to RepGroupImpl version 3, the second field was always the + * ordinal value of the node type, which was either 0 or 1. Starting with + * version 3, values greater than 1 are treated as the rep group version of + * the format used to write the node binding, with the node type following + * in the next field, and the jeVersion field added at the end. + */ + public static class NodeBinding extends TupleBinding { + + /** The approximate maximum size of the serialized form. */ + static final int APPROX_MAX_SIZE = + 40 + /* node name (guess) */ + 4 + /* node ID */ + 1 + /* group version */ + 1 + /* NodeType */ + 1 + /* quorumAck */ + 1 + /* isRemoved */ + 40 + /* hostName (guess) */ + 4 + /* port */ + 8 + /* lastCBVLSN */ + 8 + /* barrierTime */ + 4 + /* changeVersion */ + 10; /* jeVersion (approx) */ + + /** The maximum node type value for version 2. */ + private static final int V2_MAX_NODE_TYPE = 1; + + /** The group format version to use for reading or writing. */ + private final int groupFormatVersion; + + /** + * Create an instance for reading or writing using the specified group + * format version. + */ + public NodeBinding(final int groupFormatVersion) { + this.groupFormatVersion = groupFormatVersion; + } + + @Override + public RepNodeImpl entryToObject(final TupleInput input) { + final NameIdPair nameIdPair = NameIdPair.deserialize(input); + final int versionOrNodeType = input.readByte(); + final boolean v2 = (versionOrNodeType <= V2_MAX_NODE_TYPE); + if (!v2 && (versionOrNodeType > groupFormatVersion)) { + throw new IllegalStateException( + "Node entry version " + versionOrNodeType + " for node " + + nameIdPair.getId() + + " is illegal because it is newer than group version " + + groupFormatVersion); + } + final int nodeTypeNum = v2 ? versionOrNodeType : input.readByte(); + return new RepNodeImpl( + nameIdPair, + NodeType.values()[nodeTypeNum], + input.readBoolean(), + input.readBoolean(), + input.readString(), + input.readInt(), + new CleanerBarrierState(new VLSN(input.readLong()), + input.readLong()), + input.readInt(), + v2 ? null : parseJEVersion(input.readString())); + } + + /** + * Returns whether the node can be serialized using the specified group + * format version. + */ + public static boolean supportsObjectToEntry( + final RepNodeImpl node, + final int groupFormatVersion) { + + /* Version 2 supports a limited set of node types */ + return ((groupFormatVersion > RepGroupImpl.FORMAT_VERSION_2) || + (node.getType().compareTo(NodeType.ELECTABLE) <= 0)); + } + + @Override + public void objectToEntry(final RepNodeImpl mi, + final TupleOutput output) { + if (!supportsObjectToEntry(mi, groupFormatVersion)) { + throw new IllegalArgumentException( + "Node type " + mi.getType() + + " is not supported for group version " + + groupFormatVersion); + } + final boolean v2 = + (groupFormatVersion <= RepGroupImpl.FORMAT_VERSION_2); + final CleanerBarrierState syncState = mi.getBarrierState(); + mi.getNameIdPair().serialize(output); + if (!v2) { + output.writeByte(groupFormatVersion); + } + output.writeByte(mi.getType().ordinal()); + output.writeBoolean(mi.isQuorumAck()); + output.writeBoolean(mi.isRemoved()); + output.writeString(mi.getHostName()); + output.writeInt(mi.getPort()); + output.writeLong(syncState.getLastCBVLSN().getSequence()); + output.writeLong(syncState.getBarrierTime()); + output.writeInt(mi.getChangeVersion()); + if (!v2) { + output.writeString(jeVersionString(mi.getJEVersion())); + } + } + } + + /** + * Implements two phase updates for membership changes to the group + * database. It compartmentalizes the retry operations and exception + * handling so that it's independent of the core logic. + */ + private abstract class TwoPhaseUpdate { + + final RepNodeImpl node; + final boolean quorumAck; + final DatabaseImpl groupDbImpl; + + protected Txn txn; + private DatabaseException phase1Exception = null; + + TwoPhaseUpdate(final RepNodeImpl node, final boolean quorumAck) { + this.node = node; + this.quorumAck = quorumAck; + try { + groupDbImpl = repImpl.getGroupDb(); + } catch (DatabaseNotFoundException e) { + /* Should never happen. */ + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /* Phase1 exception handlers for phase1Body-specific cleanup */ + void insufficientReplicasHandler() {} + + void deadlockHandler() {} + + /* The changes to be made in phase1 */ + abstract void phase1Body(); + + /* The changes to be made in phase2. */ + void phase2Body() { + node.setQuorumAck(true); + final RepGroupImpl repGroup = + fetchGroupObject(txn, groupDbImpl, LockMode.DEFAULT); + saveNodeObject(txn, node, groupDbImpl, repGroup); + } + + private void phase1() + throws DatabaseException { + + for (int i = 0; i < QUORUM_ACK_RETRIES; i++ ) { + txn = null; + try { + txn = new MasterTxn(repImpl, + quorumAck ? QUORUM_ACK : NO_ACK, + repImpl.getNameIdPair()); + phase1Body(); + txn.commit( + quorumAck ? QUORUM_ACK_DURABILITY : NO_ACK_DURABILITY); + txn = null; + return; + } catch (InsufficientReplicasException e) { + phase1Exception = e; + insufficientReplicasHandler(); + /* Commit was aborted. */ + LoggerUtils.warning(logger, repImpl, + "Phase 1 retry; for node: " + + node.getName() + + " insufficient active replicas: " + + e.getMessage()); + continue; + } catch (InsufficientAcksException e) { + phase1Exception = e; + /* Local commit completed but did not get enough acks. */ + LoggerUtils.warning(logger, repImpl, + "Phase 1 retry; for node: " + + node.getName() + + " insufficient acks: " + + e.getMessage()); + continue; + } catch (LockConflictException e) { + /* Likely a timeout, can't distinguish between them. */ + phase1Exception = e; + deadlockHandler(); + LoggerUtils.warning(logger, repImpl, + "Phase 1 retry; for node: " + + node.getName() + + " deadlock exception: " + + e.getMessage()); + continue; + } catch (DatabaseException e) { + LoggerUtils.severe(logger, repImpl, + "Phase 1 failed unexpectedly: " + + e.getMessage()); + if (txn != null) { + txn.abort(); + } + throw e; + } finally { + if (txn != null) { + txn.abort(); + } + } + } + LoggerUtils.warning(logger, + repImpl, + "Phase 1 failed: " + + phase1Exception.getMessage()); + throw phase1Exception; + } + + private void phase2() { + try { + txn = new MasterTxn(repImpl, NO_ACK, repImpl.getNameIdPair()); + phase2Body(); + txn.commit(); + txn = null; + } catch (DatabaseException e) { + LoggerUtils.severe(logger, repImpl, + "Unexpected failure in Phase 2: " + + e.getMessage()); + throw e; + } finally { + if (txn != null) { + txn.abort(); + } + } + } + + void execute() { + Object lock; + synchronized(lockMap) { + lock = lockMap.get(node.getName()); + if (lock == null) { + lock = new Object(); + lockMap.put(node.getName(), lock); + } + } + synchronized(lock) { + phase1(); + /* Only executed if phase 1 succeeds. */ + phase2(); + } + } + } + + /** + * An internal API used to obtain group information by opening a stand + * alone environment handle and reading the RepGroupDB. Used for debugging + * and utilities. + * + * @param envDir the directory containing the environment log files + * + * @return the group as currently defined by the environment + */ + public static RepGroupImpl getGroup(final File envDir) { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + envConfig.setTransactional(true); + envConfig.setAllowCreate(false); + Environment env = new Environment(envDir, envConfig); + Transaction txn = null; + Database db = null; + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(false); + txn = env.beginTransaction(null, null); + db = env.openDatabase(txn, DbType.REP_GROUP.getInternalName(), + dbConfig); + + DatabaseEntry groupEntry = new DatabaseEntry(); + OperationStatus status = db.get( + txn, groupKeyEntry, groupEntry, LockMode.READ_COMMITTED); + if (status != OperationStatus.SUCCESS) { + throw new IllegalStateException + ("Group entry not found " + status); + } + GroupBinding groupBinding = new GroupBinding(); + RepGroupImpl group = groupBinding.entryToObject(groupEntry); + + group = fetchGroup(group.getName(), + DbInternal.getDbImpl(db), + DbInternal.getTxn(txn)); + txn.commit(); + txn = null; + return group; + } finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + env.close(); + } + } + + /** + * Deletes all the current members from the rep group database and creates + * a new group, with just the member supplied via the configuration. This + * method exists to support the utility {@link DbResetRepGroup} + *

        + * The changes proceed in three steps: + * + * 1) Determine the node id sequence number. This is to ensure that rep + * node ids are not reused. Old rep node ids are present in the logs as + * commit records. + * + * 2) A new group object, with the node id sequence number determined + * in step 1), is created and all existing nodes are deleted. + * + * 3) The first node is added to the rep group. + * + * @param lastOldVLSN the VLSN used to associate the new barrier wrt this + * node. + */ + public void reinitFirstNode(VLSN lastOldVLSN) { + + DbConfigManager configManager = repImpl.getConfigManager(); + String groupName = configManager.get(GROUP_NAME); + String nodeName = configManager.get(NODE_NAME); + String hostPortPair = configManager.get(RepParams.NODE_HOST_PORT); + String hostname = HostPortPair.getHostname(hostPortPair); + int port = HostPortPair.getPort(hostPortPair); + final boolean retainUUID = + configManager.getBoolean(RESET_REP_GROUP_RETAIN_UUID); + + final DatabaseImpl dbImpl = repImpl.getGroupDb(); + + /* + * Retrieve the previous rep group object, so we can use its node + * sequence id. + */ + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(NO_ACK.getDurability()); + txnConfig.setConsistencyPolicy(NO_CONSISTENCY); + + NameIdPair nameIdPair = repImpl.getRepNode().getNameIdPair(); + nameIdPair.revertToNull(); /* read transaction, so null id is ok. */ + + /* Now delete old nodes and the group, and establish a new group */ + Txn txn = new MasterTxn(repImpl, txnConfig, nameIdPair); + RepGroupImpl prevRepGroup = + fetchGroupObject(txn, dbImpl, LockMode.RMW); + txn.commit(); + + final int nodeIdSequenceStart = prevRepGroup.getNodeIdSequence(); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry value = new DatabaseEntry(); + + /* + * We have the "predicted" real node id, so set it and it will be used + * in the commit lns that will be written in future. + */ + final int firstNodeId = nodeIdSequenceStart + 1; + nameIdPair.setId(firstNodeId); + + RepNodeImpl firstNode = new RepNodeImpl( + nodeName, hostname, port, repImpl.getCurrentJEVersion()); + final CleanerBarrierState barrierState = + new CleanerBarrierState(lastOldVLSN, System.currentTimeMillis()); + firstNode.setBarrierState(barrierState); + + txn = new MasterTxn(repImpl, txnConfig, nameIdPair); + + final CursorConfig cursorConfig = new CursorConfig(); + cursorConfig.setReadCommitted(true); + Cursor mcursor = makeCursor(dbImpl, txn, cursorConfig); + + while (mcursor.getNext(keyEntry, value, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + final String key = StringBinding.entryToString(keyEntry); + + if (GROUP_KEY.equals(key)) { + final RepGroupImpl repGroup; + if (retainUUID) { + repGroup = new GroupBinding().entryToObject(value); + repGroup.incrementChangeVersion(); + } else { + repGroup = new RepGroupImpl( + groupName, repImpl.getCurrentJEVersion()); + } + GroupBinding groupBinding = + new GroupBinding(repGroup.getFormatVersion()); + repGroup.setNodeIdSequence(nodeIdSequenceStart); + DatabaseEntry groupEntry = new DatabaseEntry(); + groupBinding.objectToEntry(repGroup, groupEntry); + OperationStatus status = mcursor.putCurrent(groupEntry); + if (!OperationStatus.SUCCESS.equals(status)) { + throw new IllegalStateException("Unexpected state:" + + status); + } + } else { + LoggerUtils.info(logger, repImpl, "Removing node: " + key); + mcursor.delete(); + } + } + mcursor.close(); + txn.commit(); + + /* Now add the first node of the new group. */ + ensureMember(firstNode); + if (firstNodeId != firstNode.getNodeId()) { + throw new IllegalStateException("Expected nodeid:" + firstNodeId + + " but found:" + + firstNode.getNodeId()); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepGroupImpl.java b/src/com/sleepycat/je/rep/impl/RepGroupImpl.java new file mode 100644 index 0000000..9e4ac09 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepGroupImpl.java @@ -0,0 +1,1441 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.impl.RepGroupDB.NodeBinding; + +/** + * Represents a snapshot of the Replication Group as a whole. Note that + * membership associated with a group is dynamic and its constituents can + * change at any time. It's useful to keep in mind that due to the distributed + * nature of the Replication Group all the nodes in a replication group may not + * have the same consistent picture of the replication group at a single point + * in time, but will converge to become consistent eventually. + */ +public class RepGroupImpl { + + /** The latest supported format version. */ + public static final int MAX_FORMAT_VERSION = 3; + + /** + * Format version introduced in JE 6.0.1 that records a node's most recent + * JE version, and the minimum JE version required to join the group. + */ + public static final int FORMAT_VERSION_3 = 3; + + /** + * The latest format version that is compatible with JE 6.0.0 and earlier + * versions. + */ + public static final int FORMAT_VERSION_2 = 2; + + /** The initial format version for newly created RepGroupImpl instances. */ + public static final int INITIAL_FORMAT_VERSION = 3; + + /** The oldest supported format version. */ + static final int MIN_FORMAT_VERSION = 2; + + /** The first JE version that supports FORMAT_VERSION_3. */ + public static final JEVersion FORMAT_VERSION_3_JE_VERSION = + new JEVersion("6.0.1"); + + /** + * The first JE version that supports the oldest supported format version. + */ + public static final JEVersion MIN_FORMAT_VERSION_JE_VERSION = + new JEVersion("5.0.0"); + + /** The initial change version. */ + private final static int CHANGE_VERSION_START = 0; + + /* + * The special UUID associated with a group, when the group UUID is unknown + * because a node is still in the process of joining the group. This value + * cannot be created by UUID.randomUUID + */ + private final static UUID UNKNOWN_UUID = new UUID(0, 0); + + /** + * The maximum number of nodes with transient ID that can join the group at + * the same time time. This number of transient id node IDs will be + * reserved at the top of the node ID range. + */ + public static final int MAX_NODES_WITH_TRANSIENT_ID = 1024; + + /** The first node ID for persistent nodes. */ + private static final int NODE_SEQUENCE_START = 0; + + /** The maximum node ID for persistent nodes. */ + private static final int NODE_SEQUENCE_MAX = + Integer.MAX_VALUE - MAX_NODES_WITH_TRANSIENT_ID; + + /** Returns true if the node is electable. */ + private static final Predicate ELECTABLE_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isElectable(); + } + }; + + /** Returns true if the node is a monitor. */ + private static final Predicate MONITOR_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isMonitor(); + } + }; + + /** Returns true if the node is secondary. */ + private static final Predicate SECONDARY_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isSecondary(); + } + }; + + /** Returns true if the node is external. */ + private static final Predicate EXTERNAL_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isExternal(); + } + }; + + /** Returns true if the node can return acks but is not an Arbiter. */ + private static final Predicate ACK_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isElectable() && !n.getType().isArbiter(); + } + }; + + /** Returns true if the node is an arbiter. */ + private static final Predicate ARBITER_PREDICATE = new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isArbiter(); + } + }; + + /* The name of the Replication Group. */ + private final String groupName; + + /* + * The universally unique UUID associated with the replicated environment. + */ + private UUID uuid; + + /* + * The version number associated with this group's format in the database. + */ + private volatile int formatVersion; + + /* + * Tracks the change version level. It's updated with every change to the + * member set in the membership database. + */ + private int changeVersion = 0; + + /* + * The most recently assigned node ID for persistent nodes. Node IDs for + * persistent nodes are never reused. + */ + private int nodeIdSequence; + + /* + * The following maps represent the set of nodes in the group indexed in + * two different ways: by user-defined node name and by internal id. Note + * that both maps contain nodes that are no longer members of the group. + * + * All access to nodesById and nodesByName should be synchronized on + * nodesById, to avoid ConcurrentModificationException and to provide + * consistent results for both maps. + */ + + /* All the nodes that form the replication group, indexed by Id. */ + private final Map nodesById = + new HashMap(); + + /* + * All the nodes that form the replication group, indexed by node name. + * This map is used exclusively for efficient lookups by name. The map + * nodesById does all the heavy lifting. + */ + private final Map nodesByName = + new HashMap(); + + /** The minimum JE version required for nodes to join the group. */ + private volatile JEVersion minJEVersion = MIN_FORMAT_VERSION_JE_VERSION; + + /** + * Constructor to create a new empty repGroup, typically as part of + * environment initialization. + * + * @param groupName the group name + * @param currentJEVersion if non-null, override the current JE version, + * for testing + */ + public RepGroupImpl(String groupName, JEVersion currentJEVersion) { + this(groupName, false, currentJEVersion); + } + + /** + * Constructor to create a group and specify if the group's UUID should be + * unknown or generated randomly. + */ + public RepGroupImpl(String groupName, + boolean unknownUUID, + JEVersion currentJEVersion) { + this(groupName, + unknownUUID ? UNKNOWN_UUID : UUID.randomUUID(), + getCurrentFormatVersion(currentJEVersion)); + } + + /** Get the current format version, supporting a test override. */ + private static int getCurrentFormatVersion( + final JEVersion currentJEVersion) { + + return (currentJEVersion == null) ? + MAX_FORMAT_VERSION : + getMaxFormatVersion(currentJEVersion); + } + + /** + * Constructor to create a group and specify the group's UUID and format + * version. + */ + public RepGroupImpl(String groupName, UUID uuid, int formatVersion) { + this(groupName, + uuid, + formatVersion, + CHANGE_VERSION_START, + NODE_SEQUENCE_START, + ((formatVersion < FORMAT_VERSION_3) ? + MIN_FORMAT_VERSION_JE_VERSION : + FORMAT_VERSION_3_JE_VERSION)); + } + + /** + * Constructor used to recreate an existing RepGroup, typically as part of + * a deserialization operation. + * + * @param groupName + * @param uuid + * @param formatVersion + * @param changeVersion + * @param minJEVersion + */ + public RepGroupImpl(String groupName, + UUID uuid, + int formatVersion, + int changeVersion, + int nodeIdSequence, + JEVersion minJEVersion) { + this.groupName = groupName; + this.uuid = uuid; + this.formatVersion = formatVersion; + this.changeVersion = changeVersion; + setNodeIdSequence(nodeIdSequence); + this.minJEVersion = minJEVersion; + + if (formatVersion < MIN_FORMAT_VERSION || + formatVersion > MAX_FORMAT_VERSION) { + throw new IllegalStateException( + "Expected membership database format version between: " + + MIN_FORMAT_VERSION + " and " + MAX_FORMAT_VERSION + + ", encountered unsupported version: " + formatVersion); + } + if (minJEVersion == null) { + throw new IllegalArgumentException( + "The minJEVersion must not be null"); + } + } + + /* + * Returns true if the UUID has not as yet been established at this node. + * This is the case when a knew node first joins a group, and it has not + * as yet replicated the group database via the replication stream. + */ + public boolean hasUnknownUUID() { + return UNKNOWN_UUID.equals(uuid); + } + + /** + * Predicate to help determine whether the UUID is the canonical unknown + * UUID. + */ + public static boolean isUnknownUUID(UUID uuid) { + return UNKNOWN_UUID.equals(uuid); + } + + /** + * Sets the UUID. The UUID can only be set if it's currently unknown. + */ + public void setUUID(UUID uuid) { + if (!hasUnknownUUID()) { + throw EnvironmentFailureException.unexpectedState + ("Expected placeholder UUID, not " + uuid); + } + this.uuid = uuid; + } + + /** + * Removes a member transiently from the rep group by marking it as removed + * and optionally deleting it from the by-name and by-ID maps. This action + * is usually a precursor to making the change persistent on disk. + * + * @param nodeName identifies the node being removed + * + * @param delete whether to delete the node from the maps + * + * @return the node that was removed + * + * @throws EnvironmentFailureException if the node is not part of the group + * or is a node with a transient ID + */ + public RepNodeImpl removeMember(final String nodeName, + final boolean delete) { + final RepNodeImpl node = getMember(nodeName); + if (node == null) { + throw EnvironmentFailureException.unexpectedState + ("Node:" + nodeName + " is not a member of the group."); + } + if (node.getType().hasTransientId()) { + throw EnvironmentFailureException.unexpectedState( + "Cannot remove node with transient id: " + nodeName); + } + if (delete) { + synchronized (nodesById) { + nodesById.remove(node.getNodeId()); + nodesByName.remove(nodeName); + } + } + node.setRemoved(true); + return node; + } + + /** + * Checks for whether a new or changed node definition is in conflict with + * other members of the group. In particular, checks that the specified + * node does not use the same socket address as another member. + *

        + * This check must be done when adding a new member to the group, or + * changing the network address of an existing member, and must be done + * with the rep group entry in the database locked for write to prevent + * race conditions. + * + * @param node the new node that is being checked for conflicts + * @throws NodeConflictException if there is a conflict + */ + public void checkForConflicts(RepNodeImpl node) + throws DatabaseException, NodeConflictException { + + for (RepNodeImpl n : getAllMembers(null)) { + if (n.getNameIdPair().equals(node.getNameIdPair())) { + continue; + } + if (n.getSocketAddress().equals(node.getSocketAddress())) { + throw new NodeConflictException + ("New or moved node:" + node.getName() + + ", is configured with the socket address: " + + node.getSocketAddress() + + ". It conflicts with the socket already " + + "used by the member: " + n.getName()); + } + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + changeVersion; + result = prime * result + + ((groupName == null) ? 0 : groupName.hashCode()); + synchronized (nodesById) { + result = prime * result + nodesById.hashCode(); + } + /* Don't bother with nodesByName */ + result = prime * result + + ((uuid == null) ? 0 : uuid.hashCode()); + result = prime * result + formatVersion; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof RepGroupImpl)) { + return false; + } + RepGroupImpl other = (RepGroupImpl) obj; + if (changeVersion != other.changeVersion) { + return false; + } + if (groupName == null) { + if (other.groupName != null) { + return false; + } + } else if (!groupName.equals(other.groupName)) { + return false; + } + /* Don't bother with nodesByName, since nodesById equality covers it */ + if (uuid == null) { + if (other.uuid != null) { + return false; + } + } else if (!uuid.equals(other.uuid)) { + return false; + } + if (formatVersion != other.formatVersion) { + return false; + } + if (!minJEVersion.equals(other.minJEVersion)) { + return false; + } + + /* + * Do this last, since it is expensive because of its need to avoid + * concurrency conflicts. + */ + final Map otherNodesById; + synchronized (other.nodesById) { + otherNodesById = + new HashMap(other.nodesById); + } + synchronized (nodesById) { + if (!nodesById.equals(otherNodesById)) { + return false; + } + } + + return true; + } + + /** + * Sets the nodes associated with the Rep group. Note that both nodesById + * and nodesByIndex are initialized. + */ + public void setNodes(final Map nodes) { + + synchronized (nodesById) { + + /* Remove nodes with persistent id */ + for (final Iterator iter = + nodesById.values().iterator(); + iter.hasNext(); ) { + final RepNodeImpl node = iter.next(); + if (!node.getType().hasTransientId()) { + iter.remove(); + nodesByName.remove(node.getName()); + } + } + + /* Add specified nodes */ + if (nodes != null) { + for (final RepNodeImpl node : nodes.values()) { + final RepNodeImpl prevById = + nodesById.put(node.getNodeId(), node); + final RepNodeImpl prevByName = + nodesByName.put(node.getName(), node); + + /* + * Also remove entries for any previous nodes if the + * mapping between names and IDs was changed. + */ + if ((prevById != null) && + !node.getName().equals(prevById.getName())) { + nodesByName.remove(prevById.getName()); + } + if ((prevByName != null) && + node.getNodeId() != prevByName.getNodeId()) { + nodesById.remove(prevByName.getNodeId()); + } + } + } + + assert new HashSet<>(nodesById.values()).equals( + new HashSet<>(nodesByName.values())) + : "Node maps indexed by ID and name differ: " + + "IDs: " + nodesById + ", Names: " + nodesByName; + } + } + + /** + * Add a node with transient id. The caller should already have assigned + * the node an ID and checked that the replication group supports secondary + * nodes. + * + * @param node the node with transient id + * @throws IllegalStateException if the store does not currently support + * secondary nodes + * @throws NodeConflictException if the node conflicts with an existing + * persistent node + */ + public void addTransientIdNode(final RepNodeImpl node) { + if (!node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call addTransientIdNode on a node without " + + "transient id: " + node); + } + if (node.getNameIdPair().hasNullId()) { + throw new IllegalArgumentException( + "Attempt to call addTransientIdNode on node without ID: " + + node); + } + + synchronized (nodesById) { + final RepNodeImpl prevById = nodesById.get(node.getNodeId()); + assert (prevById == null) || prevById.getType().hasTransientId() + : "Same node ID for nodes with transient and persistent ID: " + + node + ", " + prevById; + final RepNodeImpl prevByName = nodesByName.get(node.getName()); + if ((prevByName != null) && + !prevByName.getType().hasTransientId()) { + throw new NodeConflictException( + "New node with transient ID " + node.getName() + + " conflicts with an existing node with persistent ID" + + " with the same name: " + prevByName); + } + final RepNodeImpl prevById2 = + nodesById.put(node.getNodeId(), node); + assert prevById == prevById2; + final RepNodeImpl prevByName2 = + nodesByName.put(node.getName(), node); + assert prevByName == prevByName2; + if ((prevById != null) && + !node.getName().equals(prevById.getName())) { + nodesByName.remove(prevById.getName()); + } + if ((prevByName != null) && + (node.getNodeId() != prevByName.getNodeId())) { + nodesById.remove(prevByName.getNodeId()); + } + + assert new HashSet<>(nodesById.values()).equals( + new HashSet<>(nodesByName.values())) + : "Node maps indexed by ID and name differ: " + + "IDs: " + nodesById + ", Names: " + nodesByName; + } + } + + /** + * Remove a node with transient id, which should have an assigned ID + * + * @param node the node with a transient ID + */ + public void removeTransientNode(final RepNodeImpl node) { + if (!node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call removeTransientNode on a" + + " node without transient ID: " + node); + } + if (node.getNameIdPair().hasNullId()) { + throw new IllegalArgumentException( + "Attempt to call removeTransientNode on a node with no ID: " + + node); + } + synchronized (nodesById) { + nodesById.remove(node.getNodeId()); + nodesByName.remove(node.getName()); + } + } + + /** + * returns the unique UUID associated with the replicated environment. + * + * @return the UUID + */ + public UUID getUUID() { + return uuid; + } + + /** + * Returns the version of the format (the schema) in use by this group + * instance in the database. + * + * @return the format version + */ + public int getFormatVersion() { + return formatVersion; + } + + /** + * Returns the highest format version supported by the specified JE + * version. + * + * @param jeVersion the JE version + * @return the highest format version supported by that JE version + */ + public static int getMaxFormatVersion(final JEVersion jeVersion) { + if (jeVersion.compareTo(FORMAT_VERSION_3_JE_VERSION) < 0) { + return FORMAT_VERSION_2; + } + return FORMAT_VERSION_3; + } + + /** + * Returns the version of the instance as represented by changes to the + * members constituting the group. + * + * @return the object change version + */ + public int getChangeVersion() { + return changeVersion; + } + + /** + * Increments the object change version. It must be called with the group + * entry locked in the group database. + * + * @return the incremented change version + */ + public int incrementChangeVersion() { + return ++changeVersion; + } + + /** + * Returns the current highest node ID currently in use by the group. + * + * @return the highest node ID in use + */ + public int getNodeIdSequence() { + return nodeIdSequence; + } + + /** + * Set the node id sequence. This is only done in unusual circumstances, + * e.g. when a replication group is being reset in an existing replicated + * environment and we want to ensure that the internal node ids are not + * reused in the logs. + */ + public void setNodeIdSequence(int nodeIdSequence) { + if (nodeIdSequence < 0 || nodeIdSequence > NODE_SEQUENCE_MAX) { + throw new IllegalArgumentException( + "Bad nodeIdSequence: " + nodeIdSequence); + } + this.nodeIdSequence = nodeIdSequence; + } + + /** + * Increments the node ID sequence and returns it. + * + * @return the next node ID for use in a new node + */ + public int getNextNodeId() { + if (nodeIdSequence >= NODE_SEQUENCE_MAX) { + throw new IllegalStateException("Reached maximum node ID"); + } + return ++nodeIdSequence; + } + + /** + * Returns the node ID that is associated with the very first node in the + * replication group. + */ + public static int getFirstNodeId() { + return NODE_SEQUENCE_START + 1; + } + + /** + * Returns the minimum JE version that a node must be running in order to + * join the group. + */ + public JEVersion getMinJEVersion() { + return minJEVersion; + } + + /** + * Sets the minimum JE version that a node must be running in order to join + * the replication group. The group object should have had its nodes + * fetched using the {@link RepGroupDB#fetchGroup} method and should be + * stored to the group database after making this change. Throws a {@link + * MinJEVersionUnsupportedException} if the requested version is not + * supported. Updates the group format version as needed to match the JE + * version. Has no effect if the current minimum value is already as high + * or higher than the requested one. + * + * @param newMinJEVersion the new minimum JE version + * @throws MinJEVersionUnsupportedException if the requested version is not + * supported by the group's electable nodes + */ + public void setMinJEVersion(final JEVersion newMinJEVersion) + throws MinJEVersionUnsupportedException { + + if (newMinJEVersion == null) { + throw new IllegalArgumentException( + "The newMinJEVersion argument must not be null"); + } + if (newMinJEVersion.compareTo(minJEVersion) <= 0) { + return; + } + final int newFormatVersion = getMaxFormatVersion(newMinJEVersion); + + /* Minimum JE version is not stored before format version 3 */ + if (newFormatVersion < FORMAT_VERSION_3) { + return; + } + + for (final RepNodeImpl node : getElectableMembers()) { + final JEVersion nodeJEVersion = node.getJEVersion(); + if ((nodeJEVersion != null) && + nodeJEVersion.compareTo(newMinJEVersion) < 0) { + throw new MinJEVersionUnsupportedException( + newMinJEVersion, node.getName(), nodeJEVersion); + } + } + minJEVersion = newMinJEVersion; + formatVersion = newFormatVersion; + } + + /** + * Used to ensure that the ReplicationGroup value is consistent after it + * has been fetched via a readUncommitted access to the rep group database. + * It does so by ensuring that the summarized values match the nodes that + * were actually read. + */ + public void makeConsistent() { + synchronized (nodesById) { + if (nodesById.isEmpty()) { + return; + } + int computedNodeId = NODE_SEQUENCE_START-1; + int computedChangeVersion = -1; + for (RepNodeImpl mi : nodesById.values()) { + /* Get the highest node ID */ + if (computedNodeId < mi.getNodeId()) { + computedNodeId = mi.getNodeId(); + } + /* Get the highest change version. */ + if (computedChangeVersion < mi.getChangeVersion()) { + computedChangeVersion = mi.getChangeVersion(); + } + } + setNodeIdSequence(computedNodeId); + changeVersion = computedChangeVersion; + } + } + + /* + * Serialization + */ + + /** + * Serializes an object by converting its TupleBinding byte based + * representation into the hex characters denoting the bytes. + * + * @param the type of the object being serialized + * @param binding the tuble binding used to convert it into its byte form + * @param object the object being serialized + * @return the hex string containing the serialized hex form of the object + */ + static String objectToHex(TupleBinding binding, T object) { + StringBuilder buffer = new StringBuilder(); + TupleOutput tuple = new TupleOutput(new byte[100]); + binding.objectToEntry(object, tuple); + byte[] bytes = tuple.getBufferBytes(); + int size = tuple.getBufferLength(); + + for (int i = 0; i < size; i++) { + int lowNibble = (bytes[i] & 0xf); + int highNibble = ((bytes[i]>>4) & 0xf); + buffer.append(Character.forDigit(lowNibble, 16)); + buffer.append(Character.forDigit(highNibble, 16)); + } + return buffer.toString(); + } + + /** + * Returns a serialized character based form of the group suitable for use + * in subclasses of SimpleProtocol. The serialized form is a multi-token + * string. The first token represents the RepGroup object itself with each + * subsequent node representing a node in the group. Tokens are separated + * by '|', the protocol separator character. The number of tokens is thus + * equal to the number of nodes in the group + 1. Each token is itself a + * hex character based representation of the binding used to serialize a + * RepGroup and store it into the database. + * + * @param groupFormatVersion the group format version + * @return the string encoded as above + */ + public String serializeHex(final int groupFormatVersion) { + final RepGroupDB.GroupBinding groupBinding = + new RepGroupDB.GroupBinding(groupFormatVersion); + StringBuilder buffer = new StringBuilder(); + buffer.append(objectToHex(groupBinding, this)); + synchronized (nodesById) { + for (RepNodeImpl mi : nodesById.values()) { + + /* + * Only include nodes that can be serialized with the specified + * format version + */ + if (NodeBinding.supportsObjectToEntry( + mi, groupFormatVersion)) { + buffer.append(TextProtocol.SEPARATOR); + buffer.append(serializeHex(mi, groupFormatVersion)); + } + } + } + return buffer.toString(); + } + + /** + * Returns the serialized form of the node as a sequence of hex characters + * suitable for use by the text based protocols. + * + * @param node the node to be serialized. + * @param formatVersion the group format version + * @return the string containing the serialized form of the node + */ + public static String serializeHex(final RepNodeImpl node, + final int formatVersion) { + final NodeBinding nodeBinding = new NodeBinding(formatVersion); + return objectToHex(nodeBinding, node); + } + + /** + * Serialize the node into its byte representation. + * + * @param node the node to be serialized + * @param formatVersion the group format version + * @return the serialized byte array + */ + public static byte[] serializeBytes(final RepNodeImpl node, + final int formatVersion) { + + final NodeBinding binding = new NodeBinding(formatVersion); + final TupleOutput tuple = + new TupleOutput(new byte[NodeBinding.APPROX_MAX_SIZE]); + binding.objectToEntry(node, tuple); + return tuple.getBufferBytes(); + } + + /** + * Deserializes the object serialized by {@link #serializeHex} + * + * @param hex the string containing the serialized form of the node + * @param formatVersion the group format version + * + * @return the de-serialized object + */ + public static RepNodeImpl hexDeserializeNode(final String hex, + final int formatVersion) { + final NodeBinding nodeBinding = new NodeBinding(formatVersion); + return hexToObject(nodeBinding, hex); + } + + /** + * Deserialize the mode from its byte representation. + * + * @param bytes the byte representation of the node. + * @param formatVersion the group format version + * + * @return the deserialized object + */ + public static RepNodeImpl deserializeNode(final byte[] bytes, + final int formatVersion) { + final NodeBinding binding = new NodeBinding(formatVersion); + TupleInput tuple = new TupleInput(bytes); + return binding.entryToObject(tuple); + } + + /** + * Carries out the two step de-serialization from hex string into a byte + * buffer and subsequently into its object representation. + * + * @return the object representation + */ + private static T hexToObject(TupleBinding binding, String hex) { + byte buffer[] = new byte[(hex.length() / 2)]; + for (int i = 0; i < hex.length(); i += 2) { + int value = Character.digit(hex.charAt(i), 16); + value |= Character.digit(hex.charAt(i + 1), 16) << 4; + buffer[i >> 1] = (byte)value; + } + TupleInput tuple = new TupleInput(buffer); + return binding.entryToObject(tuple); + } + + /** + * De-serializes an array of tokens into a Rep group object and its nodes. + * the token at startrepresents the group object and each + * subsequent token represents a node in the group. + * + * @param tokens the array representing the group and its nodes + * @param start the position in the array at which to start the + * de-serialization. + * + * @return the de-serialized RepGroup + */ + static public RepGroupImpl deserializeHex(String[] tokens, int start) { + final RepGroupDB.GroupBinding groupBinding = + new RepGroupDB.GroupBinding(); + RepGroupImpl group = hexToObject(groupBinding, tokens[start++]); + Map nodeMap = + new HashMap(); + while (start < tokens.length) { + RepNodeImpl n = + hexDeserializeNode(tokens[start++], group.getFormatVersion()); + RepNodeImpl old = nodeMap.put(n.getNameIdPair().getId(), n); + assert(old == null); + } + group.setNodes(nodeMap); + return group; + } + + /* + * Accessing nodes and groups of nodes + */ + + /** + * Returns the node IDs for all nodes that are currently members of the + * group and that act as proposers, acceptors, or distinguished learners. + * Returns IDs for all ELECTABLE and MONITOR nodes that are not removed, + * even if they are not acknowledged, but not for SECONDARY or EXTERNAL + * nodes. + */ + public Set getAllElectionMemberIds() { + Set ret = new HashSet(); + synchronized (nodesById) { + for (RepNodeImpl mi : nodesById.values()) { + if (!mi.isRemoved() && !mi.getType().hasTransientId()) { + ret.add(mi.getNodeId()); + } + } + } + return ret; + } + + /** + * Returns all nodes that are currently members of the group. Returns all + * ELECTABLE and MONITOR nodes that are acknowledged and not removed, + * and SECONDARY and EXTERNAL nodes. If the predicate is + * not null, only includes members that satisfy the predicate. + */ + public Set getAllMembers(final Predicate p) { + final Set result = new HashSet(); + includeMembers(p, result); + return result; + } + + /** + * Adds all nodes that are currently members of the group to the specified + * set. Adds all ELECTABLE and MONITOR nodes that are not removed, even if + * they are not acknowledged, and SECONDARY and EXTERNAL nodes. If the + * predicate is not null, only adds members that satisfy the predicate. + */ + public void includeAllMembers(final Predicate p, + final Set set) { + synchronized (nodesById) { + for (RepNodeImpl mi : nodesById.values()) { + if (!mi.isRemoved() && ((p == null) || p.include(mi))) { + set.add(mi); + } + } + } + } + + /** + * Counts the number of nodes that are currently members of the group. + * Counts all ELECTABLE and MONITOR nodes that are not removed, even if + * they are not acknowledged, and SECONDARY and EXTERNAL nodes. If the + * predicate is not null, only counts members that satisfy the predicate. + */ + public int countAllMembers(final Predicate p) { + int count = 0; + synchronized (nodesById) { + for (final RepNodeImpl mi : nodesById.values()) { + if (!mi.isRemoved() && ((p == null) || p.include(mi))) { + count++; + } + } + } + return count; + } + + /** + * Adds nodes that are currently members of the group to the specified set. + * Adds ELECTABLE and MONITOR node that are not removed and are + * acknowledged, and SECONDARY and EXTERNAL nodes. If the predicate is not + * null, only adds members that satisfy the predicate. + */ + public void includeMembers(final Predicate p, + final Set set) { + synchronized (nodesById) { + for (RepNodeImpl n : nodesById.values()) { + if (!n.isRemoved() && + n.isQuorumAck() && + ((p == null) || p.include(n))) { + set.add(n); + } + } + } + } + + /** + * Gets the node that is currently a member of the group that has the given + * socket address. Returns ELECTABLE and MONITOR nodes that are not + * removed, even if it is not acknowledged, and SECONDARY and EXTERNAL + * nodes. + * + * @return the desired node, or null if there is no such node, including + * if it was removed + */ + public RepNodeImpl getMember(InetSocketAddress socket) { + synchronized (nodesById) { + for (RepNodeImpl n : nodesById.values()) { + if (socket.equals(n.getSocketAddress()) && !n.isRemoved()) { + return n; + } + } + } + return null; + } + + /** + * Returns nodes that are removed from the group. Returns ELECTABLE and + * MONITOR nodes that are removed and are acknowledged, but not SECONDARY + * or EXTERNAL nodes, which are not remembered when they are removed. + */ + public Set getRemovedNodes() { + Set ret = new HashSet(); + synchronized (nodesById) { + for (RepNodeImpl mi : nodesById.values()) { + if (mi.isRemoved() && mi.isQuorumAck()) { + ret.add(mi); + } + } + } + return ret; + } + + /** A predicate for specifying which replication nodes to include. */ + abstract static class Predicate { + abstract boolean include(RepNodeImpl n); + } + + /** + * Returns all electable nodes that are currently members of the group. + * Returns all ELECTABLE nodes that are not removed, even if they are not + * acknowledged, but not MONITOR, SECONDARY, or EXTERNAL nodes. + */ + public Set getAllElectableMembers() { + return getAllMembers(ELECTABLE_PREDICATE); + } + + /** + * Returns electable nodes that are currently members of the group. + * Returns ELECTABLE nodes that are not removed and are acknowledged, but + * not MONITOR, SECONDARY, or EXTERNAL nodes. + */ + public Set getElectableMembers() { + final Set result = new HashSet(); + includeElectableMembers(result); + return result; + } + + /** + * Adds the electable nodes that are currently members of the group to the + * specified set. Adds ELECTABLE nodes that are not removed and are + * acknowledged, but not MONITOR, SECONDARY, or EXTERNAL nodes. + */ + public void includeElectableMembers(final Set set) { + includeAllMembers( + new Predicate() { + @Override + boolean include(RepNodeImpl n) { + return n.getType().isElectable() && n.isQuorumAck(); + } + }, + set); + } + + /** + * Returns the nodes that are currently members of the group that store + * replication data. Returns ELECTABLE nodes that are not removed and are + * acknowledged, and SECONDARY nodes, but not MONITOR or EXTERNAL nodes. + */ + public Set getDataMembers() { + final Set result = new HashSet(); + includeDataMembers(result); + return result; + } + + /** + * Adds the nodes that are currently members of the group that store + * replication data to the specified set. Adds ELECTABLE nodes that are + * not removed and are acknowledged, and SECONDARY nodes, but not MONITOR + * or EXTERNAL nodes. + */ + public void includeDataMembers(final Set set) { + includeAllMembers( + new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return n.getType().isDataNode() && n.isQuorumAck(); + } + }, + set); + } + + /** + * Returns the monitor nodes that are currently members of the group. + * Returns MONITOR nodes that are not removed and are acknowledged, but not + * ELECTABLE, SECONDARY, or EXTERNAL nodes. + * + * @return the set of monitor nodes + */ + public Set getMonitorMembers() { + final Set result = new HashSet(); + includeMonitorMembers(result); + return result; + } + + /** + * Adds the monitor nodes that are currently members of the group to the + * specified set. Adds MONITOR nodes that are not removed and are + * acknowledged, but not ELECTABLE, SECONDARY, or EXTERNAL nodes. + */ + public void includeMonitorMembers(final Set set) { + includeMembers(MONITOR_PREDICATE, set); + } + + /** + * Returns all the nodes that are currently members of the group that act + * as distinguished learners to receive election results. Returns all + * ELECTABLE and MONITOR that are not removed, even if they are not + * acknowledged, but not SECONDARY or EXTERNAL nodes. + */ + public Set getAllLearnerMembers() { + final Set result = new HashSet(); + includeAllMembers( + new Predicate() { + @Override + boolean include(final RepNodeImpl n) { + return (n.getType().isElectable() || + n.getType().isMonitor()); + } + }, + result); + return result; + } + + /** + * Returns the secondary nodes that are currently members of the group. + * + * Returns SECONDARY nodes, but not ELECTABLE, MONITOR, or EXTERNAL nodes. + */ + public Set getSecondaryMembers() { + final Set result = new HashSet<>(); + includeSecondaryMembers(result); + return result; + } + + /** + * Returns the external nodes that are currently members of the group. + * + * Returns EXTERNAL nodes, but not ELECTABLE, MONITOR, or SECONDARY nodes. + */ + public Set getExternalMembers() { + final Set result = new HashSet<>(); + includeExternalMembers(result); + return result; + } + + /** + * Adds the secondary nodes that are currently members of the group to the + * specified set. Adds SECONDARY nodes, but not ELECTABLE, MONITOR, or + * EXTERNAL nodes. + */ + public void includeSecondaryMembers(final Set set) { + includeAllMembers(SECONDARY_PREDICATE, set); + } + + /** + * Adds the external nodes. Adds EXTERNAL nodes, but not ELECTABLE, + * MONITOR, or SECONDARY nodes. + */ + public void includeExternalMembers(final Set set) { + includeAllMembers(EXTERNAL_PREDICATE, set); + } + + /** + * Returns the arbiter nodes that are currently members of the group. + * Returns ARBITER nodes. + */ + public Set getArbiterMembers() { + final Set result = new HashSet(); + includeArbiterMembers(result); + return result; + } + + /** + * Adds the arbiter nodes that are currently members of the group to the + * specified set. Adds ARBITER nodes. + */ + public void includeArbiterMembers(final Set set) { + includeMembers(ARBITER_PREDICATE, set); + } + + /** + * Returns the socket addresses for all nodes that are currently members of + * the group. Returns addresses for all ELECTABLE and MONITOR nodes that + * are not removed, even if they are not acknowledged, and for all + * nodes with transient id. If the predicate is not null, only returns + * addresses for members that satisfy the predicate. ARBITER nodes are + * also ELECTABLE and will be part of the returned set. + */ + private Set getAllMemberSockets(Predicate p) { + Set sockets = new HashSet(); + synchronized (nodesById) { + for (final RepNodeImpl mi : nodesById.values()) { + if ((((mi.getType().isElectable() || + mi.getType().isMonitor()) && + !mi.isRemoved()) || + mi.getType().hasTransientId()) && + ((p == null) || p.include(mi))) { + sockets.add(mi.getSocketAddress()); + } + } + } + return sockets; + } + + /** + * Return the socket addresses for all nodes that are currently members of + * the group and act as distinguished learners to receive election results. + * Returns addresses for all ELECTABLE and MONITOR nodes that are not + * removed, even if they are not acknowledged, but not for SECONDARY or + * EXTERNAL nodes. + * + * @return set of learner socket addresses + */ + public Set getAllLearnerSockets() { + + /* + * TODO: Consider including secondary nodes in this list. + * That change would increase the chance that SECONDARY nodes have + * up-to-date information about the master, but would need to be + * paired with a change to only wait for delivery of notifications to + * ELECTABLE nodes, to avoid adding sensitivity to potentially longer + * network delays in communicating with secondary nodes. + */ + return getAllMemberSockets(new Predicate() { + @Override + boolean include(RepNodeImpl n) { + return !n.getType().isSecondary() && !n.getType().isExternal(); + } + }); + } + + /** + * Return the socket addresses for all nodes that are currently members of + * the group and act as helpers to supply election results. Returns + * addresses for all ELECTABLE and MONITOR nodes that are not removed, even + * if they are not acknowledged, and SECONDARY nodes. + * + * @return set of helper socket addresses + */ + public Set getAllHelperSockets() { + return getAllMemberSockets(null); + } + + /** + * Returns the socket addresses for all monitor nodes that are currently + * members of the group. Returns addresses for all MONITOR nodes that are + * not removed, even if they are not acknowledged, but not for ELECTABLE, + * SECONDARY, or EXTERNAL nodes. + * + * @return the set of Monitor socket addresses + */ + public Set getAllMonitorSockets() { + return getAllMemberSockets(MONITOR_PREDICATE); + } + + /** + * Returns the socket addresses for all nodes that are currently members of + * the group and act as acceptors for elections. Returns addresses for all + * ELECTABLE nodes that are not removed, even if they are not acknowledged, + * but not for MONITOR, SECONDARY, or EXTERNAL nodes, which do not act as + * acceptors. + * + * @return the set of acceptor socket addresses + */ + public Set getAllAcceptorSockets() { + return getAllMemberSockets(ELECTABLE_PREDICATE); + } + + /** + * Returns the node with the specified ID that is currently a member of the + * group, throwing an exception if the node is found but is no longer a + * member. Returns ELECTABLE and MONITOR nodes that are not removed, even + * if they are not acknowledged, and SECONDARY and EXTERNAL nodes. + * + * @param nodeId the node ID + * @return the member or null + * @throws EnvironmentFailureException if the node is no longer a member + */ + public RepNodeImpl getMember(int nodeId) { + RepNodeImpl node = getNode(nodeId); + if (node == null) { + return null; + } + if (node.isRemoved()) { + throw EnvironmentFailureException.unexpectedState + ("No longer a member:" + nodeId); + } + return node; + } + + /** + * Returns the node with the specified name that is currently a member of + * the group, throwing an exception if the node is found but is no longer a + * member. Returns ELECTABLE and MONITOR nodes that are not removed, even + * if they are not acknowledged, and SECONDARY and EXTERNAL nodes. + * + * @param name the node name + * @return the member or null + * @throws MemberNotFoundException if the node is no longer a member + */ + public RepNodeImpl getMember(String name) + throws MemberNotFoundException { + + RepNodeImpl node = getNode(name); + if (node == null) { + return null; + } + if (node.isRemoved()) { + throw new MemberNotFoundException + ("Node no longer a member:" + name); + } + return node; + } + + /** + * Returns the node with the specified ID, regardless of its membership + * state. Returns all ELECTABLE and MONITOR nodes, even if they are + * removed or are not acknowledged, and SECONDARY and EXTERNAL nodes. + * + * @return the node or null + */ + public RepNodeImpl getNode(int nodeId) { + synchronized (nodesById) { + return nodesById.get(nodeId); + } + } + + /** + * Returns the node with the specified name, regardless of its membership + * state. Returns all ELECTABLE and MONITOR nodes, even if they are + * removed or are not acknowledged, and SECONDARY and EXTERNAL nodes. + * + * @return the node or null + */ + public RepNodeImpl getNode(String name) { + synchronized (nodesById) { + return nodesByName.get(name); + } + } + + /** + * Returns the number of all electable nodes that are currently members of + * the group. Includes all ELECTABLE nodes that are not removed, even if + * they are not acknowledged, but not MONITOR, SECONDARY, or EXTERNAL + * nodes. Note that even unACKed nodes are considered part of the group + * for group size/durability considerations. + * + * @return the size of the group for durability considerations + */ + public int getElectableGroupSize() { + return countAllMembers(ELECTABLE_PREDICATE); + } + + /** + * Returns the number of all electable nodes that are currently members of + * the group. Includes all ELECTABLE nodes that are not removed, even if + * they are not acknowledged, but not MONITOR, ARBITER, SECONDARY, or + * EXTERNAL nodes. Note that even unACKed nodes are considered part of the + * group for group size/durability considerations. + * + * @return the size of the group for durability considerations + */ + public int getAckGroupSize() { + return countAllMembers(ACK_PREDICATE); + } + + /* Miscellaneous */ + + /** + * Returns the name of the group. + * + * @return the name of the group. + */ + public String getName() { + return groupName; + } + + /* + * An internal exception indicating that two nodes have conflicting + * configurations. For example, they both use the same hostname and port. + */ + @SuppressWarnings("serial") + public static class NodeConflictException extends DatabaseException { + public NodeConflictException(String message) { + super(message); + } + } + + /** + * Return information to the user, format nicely for ease of reading. + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Group info [").append(groupName).append("] "); + sb.append(getUUID()). + append("\n Format version: ").append(getFormatVersion()). + append("\n Change version: ").append(getChangeVersion()). + append("\n Max persist rep node ID: ").append(getNodeIdSequence()). + append("\n Min JE version: ").append(minJEVersion). + append("\n"); + + synchronized (nodesById) { + for (final RepNodeImpl node : nodesById.values()) { + sb.append(" ").append(node); + } + } + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepGroupProtocol.java b/src/com/sleepycat/je/rep/impl/RepGroupProtocol.java new file mode 100644 index 0000000..027e706 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepGroupProtocol.java @@ -0,0 +1,506 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; + +/** + * Defines the protocol used in support of group membership. + * + * API to Master + * ENSURE_NODE -> ENSURE_OK | FAIL + * REMOVE_MEMBER -> OK | FAIL + * TRANSFER_MASTER -> TRANSFER_OK | FAIL + * DELETE_MEMBER -> OK | FAIL + * + * Monitor to Master + * GROUP_REQ -> GROUP | FAIL + */ +public class RepGroupProtocol extends TextProtocol { + + /** The current protocol version. */ + public static final String VERSION = "4"; + + /** The protocol version introduced to support RepGroupImpl version 3. */ + public static final String REP_GROUP_V3_VERSION = "4"; + + /** The protocol version used with RepGroupImpl version 2. */ + public static final String REP_GROUP_V2_VERSION = "3"; + + /** + * Used during testing: A non-null value overrides the actual protocol + * version. + */ + private static volatile String testCurrentVersion = null; + + public static enum FailReason { + DEFAULT, MEMBER_NOT_FOUND, IS_MASTER, IS_REPLICA, TRANSFER_FAIL, + MEMBER_ACTIVE; + } + + /* The messages defined by this class. */ + + public final MessageOp ENSURE_NODE = + new MessageOp("ENREQ", EnsureNode.class); + public final MessageOp ENSURE_OK = + new MessageOp("ENRESP", EnsureOK.class); + public final MessageOp REMOVE_MEMBER = + new MessageOp("RMREQ", RemoveMember.class); + public final MessageOp GROUP_REQ = + new MessageOp("GREQ", GroupRequest.class); + public final MessageOp GROUP_RESP = + new MessageOp("GRESP", GroupResponse.class); + public final MessageOp RGFAIL_RESP = + new MessageOp("GRFAIL", Fail.class); + public final MessageOp UPDATE_ADDRESS = + new MessageOp("UPDADDR", UpdateAddress.class); + public final MessageOp TRANSFER_MASTER = + new MessageOp("TMASTER", TransferMaster.class); + public final MessageOp TRANSFER_OK = + new MessageOp("TMRESP", TransferOK.class); + public final MessageOp DELETE_MEMBER = + new MessageOp("DLREQ", DeleteMember.class); + + /** + * Creates an instance of this class using the current protocol version. + */ + public RepGroupProtocol(String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + + this(getCurrentVersion(), groupName, nameIdPair, repImpl, + channelFactory); + } + + /** + * Creates an instance of this class using the specified protocol version. + */ + RepGroupProtocol(String version, + String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + + super(version, groupName, nameIdPair, repImpl, channelFactory); + + this.initializeMessageOps(new MessageOp[] { + ENSURE_NODE, + ENSURE_OK, + REMOVE_MEMBER, + GROUP_REQ, + GROUP_RESP, + RGFAIL_RESP, + UPDATE_ADDRESS, + TRANSFER_MASTER, + TRANSFER_OK, + DELETE_MEMBER + }); + + setTimeouts(repImpl, + RepParams.REP_GROUP_OPEN_TIMEOUT, + RepParams.REP_GROUP_READ_TIMEOUT); + } + + /** Get the current version, supporting a test override. */ + public static String getCurrentVersion() { + return (testCurrentVersion != null) ? testCurrentVersion : VERSION; + } + + /** + * Set the current version to a different value, for testing. Specifying + * {@code null} reverts to the standard value. + */ + public static void setTestVersion(final String testVersion) { + testCurrentVersion = testVersion; + } + + /** + * Returns the RepGroupImpl format version to use for the specified + * RepGroupProtocol version. + */ + private static int getGroupFormatVersion(final String protocolVersion) { + return (Double.parseDouble(protocolVersion) <= + Double.parseDouble(REP_GROUP_V2_VERSION)) ? + RepGroupImpl.FORMAT_VERSION_2 : + RepGroupImpl.MAX_FORMAT_VERSION; + } + + private abstract class CommonRequest extends RequestMessage { + private final String nodeName; + + public CommonRequest(String nodeName) { + this.nodeName = nodeName; + } + + public CommonRequest(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + nodeName = nextPayloadToken(); + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + nodeName; + } + + public String getNodeName() { + return nodeName; + } + } + + public class RemoveMember extends CommonRequest { + public RemoveMember(String nodeName) { + super(nodeName); + } + + public RemoveMember(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + } + + @Override + public MessageOp getOp() { + return REMOVE_MEMBER; + } + } + + /** + * Like RemoveMember, but also deletes the node's entry from the rep group + * DB. + */ + public class DeleteMember extends CommonRequest { + public DeleteMember(String nodeName) { + super(nodeName); + } + + public DeleteMember(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + } + + @Override + public MessageOp getOp() { + return DELETE_MEMBER; + } + } + + public class TransferMaster extends RequestMessage { + private final String nodeNameList; + private final long timeout; + private final boolean force; + + public TransferMaster(String nodeNameList, + long timeout, + boolean force) { + super(); + this.nodeNameList = nodeNameList; + this.timeout = timeout; + this.force = force; + } + + public TransferMaster(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + this.nodeNameList = nextPayloadToken(); + this.timeout = Long.parseLong(nextPayloadToken()); + this.force = Boolean.parseBoolean(nextPayloadToken()); + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + nodeNameList + + SEPARATOR + timeout + SEPARATOR + force; + } + + @Override + public MessageOp getOp() { + return TRANSFER_MASTER; + } + + public String getNodeNameList() { + return nodeNameList; + } + + public long getTimeout() { + return timeout; + } + + public boolean getForceFlag() { + return force; + } + } + + public class GroupRequest extends RequestMessage { + + public GroupRequest() { + } + + public GroupRequest(String line, String[] tokens) + throws InvalidMessageException { + super(line, tokens); + } + + @Override + public MessageOp getOp() { + return GROUP_REQ; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix(); + } + } + + public class UpdateAddress extends CommonRequest { + private final String newHostName; + private final int newPort; + + public UpdateAddress(String nodeName, + String newHostName, + int newPort) { + super(nodeName); + this.newHostName = newHostName; + this.newPort = newPort; + } + + public UpdateAddress(String requestLine, String[] tokens) + throws InvalidMessageException { + + super(requestLine, tokens); + this.newHostName = nextPayloadToken(); + this.newPort = new Integer(nextPayloadToken()); + } + + @Override + public MessageOp getOp() { + return UPDATE_ADDRESS; + } + + public String getNewHostName() { + return newHostName; + } + + public int getNewPort() { + return newPort; + } + + @Override + public String wireFormat() { + return super.wireFormat() + SEPARATOR + newHostName + SEPARATOR + + newPort; + } + } + + public class EnsureNode extends RequestMessage { + final RepNodeImpl node; + + public EnsureNode(RepNodeImpl node) { + assert node.getType().isMonitor(); + this.node = node; + } + + public EnsureNode(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + node = RepGroupImpl.hexDeserializeNode( + nextPayloadToken(), getGroupFormatVersion(sendVersion)); + } + + public RepNodeImpl getNode() { + return node; + } + + @Override + public MessageOp getOp() { + return ENSURE_NODE; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + + RepGroupImpl.serializeHex( + node, getGroupFormatVersion(sendVersion)); + } + } + + public class EnsureOK extends OK { + private final NameIdPair nameIdPair; + + public EnsureOK(EnsureNode request, NameIdPair nameIdPair) { + super(request); + this.nameIdPair = nameIdPair; + } + + public EnsureOK(String line, String[] tokens) + throws InvalidMessageException { + super(line, tokens); + nameIdPair = new NameIdPair(nextPayloadToken(), + Integer.parseInt(nextPayloadToken())); + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + @Override + public MessageOp getOp() { + return ENSURE_OK; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + + nameIdPair.getName() + SEPARATOR + + Integer.toString(nameIdPair.getId()); + } + } + + public class TransferOK extends OK { + private final String winner; + + public TransferOK(TransferMaster request, String winner) { + super(request); + this.winner = winner; + } + + public TransferOK(String line, String[] tokens) + throws InvalidMessageException { + super(line, tokens); + winner = nextPayloadToken(); + } + + public String getWinner() { + return winner; + } + + @Override + public MessageOp getOp() { + return TRANSFER_OK; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + winner; + } + } + + public class GroupResponse extends ResponseMessage { + final RepGroupImpl group; + + public GroupResponse(GroupRequest request, RepGroupImpl group) { + super(request); + this.group = group; + } + + public GroupResponse(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + group = RepGroupImpl.deserializeHex + (tokens, getCurrentTokenPosition()); + } + + public RepGroupImpl getGroup() { + return group; + } + + @Override + public MessageOp getOp() { + return GROUP_RESP; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + + /* + * Use the requested group version, unless it is newer than the + * current group version. + */ + int groupFormatVersion = getGroupFormatVersion(sendVersion); + if (group.getFormatVersion() < groupFormatVersion) { + groupFormatVersion = group.getFormatVersion(); + } + return wireFormatPrefix() + SEPARATOR + + group.serializeHex(groupFormatVersion); + } + } + + /** + * Extends the class Fail, adding a reason code to distinguish amongst + * different types of failures. + */ + public class Fail extends TextProtocol.Fail { + final FailReason reason; + + /** + * Create a failure response that is not related to a specific request. + */ + public Fail(FailReason reason, String message) { + super(message); + this.reason = reason; + } + + public Fail(RequestMessage request, FailReason reason, String message) { + super(request, message); + this.reason = reason; + } + + public Fail(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + reason = FailReason.valueOf(nextPayloadToken()); + } + + @Override + public MessageOp getOp() { + return RGFAIL_RESP; + } + + @Override + public String wireFormat() { + return super.wireFormat() + SEPARATOR + reason.toString(); + } + + public FailReason getReason() { + return reason; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepImpl.java b/src/com/sleepycat/je/rep/impl/RepImpl.java new file mode 100644 index 0000000..63a0729 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepImpl.java @@ -0,0 +1,2303 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static com.sleepycat.je.rep.NoConsistencyRequiredPolicy.NO_CONSISTENCY; +import static com.sleepycat.je.rep.impl.RepParams.NODE_NAME; +import static com.sleepycat.je.rep.impl.RepParams.REPLAY_FREE_DISK_PERCENT; +import static com.sleepycat.je.rep.impl.RepParams.TEST_JE_VERSION; +import static com.sleepycat.je.rep.impl.RepParams.VLSN_MAX_DIST; +import static com.sleepycat.je.rep.impl.RepParams.VLSN_MAX_MAP; +import static com.sleepycat.je.rep.impl.RepParams.VLSN_STRIDE; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.logging.Formatter; +import java.util.logging.Level; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionTimeoutException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.RepConfigProxy; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.recovery.VLSNRecoveryProxy; +import com.sleepycat.je.rep.DatabasePreemptedException; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.LockPreemptedException; +import com.sleepycat.je.rep.LogFileRewriteListener; +import com.sleepycat.je.rep.LogOverwriteException; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.RepStatManager; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironmentStats; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.RestartRequiredException; +import com.sleepycat.je.rep.RollbackException; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.rep.SyncupProgress; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.impl.node.MasterTransfer; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.NodeState; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.Replay; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.stream.ArbiterFeederSource; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.stream.FeederReader; +import com.sleepycat.je.rep.stream.FeederTxns; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.txn.MasterThreadLocker; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.txn.ReadonlyTxn; +import com.sleepycat.je.rep.txn.ReplayTxn; +import com.sleepycat.je.rep.txn.ReplicaThreadLocker; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.StatCaptureRepDefinitions; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.rep.vlsn.VLSNRecoveryTracker; +import com.sleepycat.je.statcap.StatManager; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.ThreadLocker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnEnd; +import com.sleepycat.je.txn.VersionedWriteTxnEnd; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.utilint.BooleanStat; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StringStat; +import com.sleepycat.je.utilint.VLSN; + +public class RepImpl + extends EnvironmentImpl + implements RepEnvConfigObserver { + + private VLSNIndex vlsnIndex; + /* VLSNIndexAccess coordinates the closing of the vlsn index */ + private final VLSNIndexAccess vlsnIndexAccess = new VLSNIndexAccess(); + + private final FeederTxns feederTxns; + + /* + * The repNode is only non-null when the replicated environment has joined + * a group. It's null otherwise. + */ + private volatile RepNode repNode; + private Replay replay; + + /* + * This is the canonical nameIdPair instance used by the node. The internal + * Id part of the pair will be updated when the node actually joins the + * group. + */ + private NameIdPair nameIdPair; + + private final NodeState nodeState; + + /* + * The clockskew used by this environment in ms. It's only used by testing + * to inject clock skew between ReplicatedEnvironments. + */ + private static int clockSkewMs = 0; + + /* + * A handle to the group database. This handle is initialized lazily when + * the contents of the database are first required. It's set to null upon + * shutdown. The handle must be initialized lazily because the database is + * created by the master, and we only know master identity later. The + * RepImpl manages the rep group database, so that the lifetime of the + * databaseImpl handle can be managed more easily to mesh with the opening + * and closing of the RepImpl. + */ + private DatabaseImpl groupDbImpl = null; + + /* The status presents whether this replica is doing rollback. */ + private boolean backupProhibited = false; + + /* + * Represents whether this Environment is allowed to convert a + * non-replicated Environment to replicated. + */ + private boolean allowConvert = false; + + /** Config params for preserving and caching the VLSN. */ + private boolean preserveVLSN; + private boolean cacheVLSN; + + /* + * True if TTL is available. Volatile is not used, since checking more than + * once is idempotent. + */ + private boolean isTTLAvailable = false; + + /* Keep an eye on the ongoing DbBackups. */ + private final Set backups = new HashSet(); + + /* + * The list of observers who are notified when a mutable rep param changes. + */ + private final List repConfigObservers; + + /* + * Lock used to control access and lazy initialization of groupDbImpl, + * ensuring that there is exactly one database made. A mutex is used rather + * than synchronization to allow us to probe for contention on the + * groupDbImpl. + */ + private final ReentrantLock groupDbLock = new ReentrantLock(); + + private int replicaAckTimeout; + private int arbiterAckTimeout; + private int insufficientReplicasTimeout; + private int replayTxnTimeout; + private ReplicaConsistencyPolicy defaultConsistencyPolicy; + private boolean allowArbiterAck; + + /* + * Arbiters, subscribers and networkBackup use RepImpls which are read + * only and have some daemon functionality disabled. + */ + private boolean isArbiter; + private boolean isSubscriber; + private boolean isNetworkBackup; + + /* + * NodeStats are currently not public, but we may want to evaluate + * and decide if they would be useful, perhaps as a debugging aid. + */ + private final StatGroup nodeStats; + private final BooleanStat hardRecoveryStat; + private final StringStat hardRecoveryInfoStat; + + /* + * Used to block transaction commit/abort execution just before completing + * a Master Transfer operation. + */ + private volatile CountDownLatch blockTxnLatch = new CountDownLatch(0); + + /** + * A lock used to coordinate access to {@link #blockTxnLatch}. + *

        + * When a Master Transfer operation completes Phase 1, it sets a new {@code + * CountDownLatch} in order to block the completion of transactions at the + * commit or abort stage. We must avoid having it do so at an awkward + * moment. There are two (unrelated) cases: + *

          + *
        1. There is a brief period between the time a transaction "awaits" the + * latch (in {@code checkBlock()}) and the time it publishes its VLSN. We + * must avoid having Master Transfer read its "ultimate goal" VLSN during + * that interval. + *
        2. The Feeder input thread occasionally updates the GroupDB, upon + * receiving a Heartbeat response. That happens in a transaction, like any + * other, so it could be subject to the normal blockage in Phase 2. But + * the Feeder input thread is of course also the thread that we rely on for + * making progress towards the goal of Master Transfer; so blocking it is + * counterproductive. + *
        + * + * @see MasterTransfer + * @see ReplicatedEnvironment#transferMaster + */ + private final ReentrantReadWriteLock blockLatchLock = + new ReentrantReadWriteLock(true); + + /* application listener for syncups. */ + private final ProgressListener syncupProgressListener; + + /* Application callback to be notified before we overwrite log files. */ + private final LogFileRewriteListener logRewriteListener; + + /* Configuration for ServiceDispatcher communication */ + private final ReplicationNetworkConfig repNetConfig; + + /* + * Factory for creating channel instances. Not available until + * initializeChannelFactory is called. + */ + private volatile DataChannelFactory channelFactory; + + /** + * Used for testing, to create log files with + * VLSN.UNINITIALIZED_VLSN_SEQUENCE as the value for the dtvlsn + */ + private static boolean simulatePreDTVLSNMaster = false; + + /* + * Used to verify VLSN invariants as they are written to the log on the + * master + */ + private long prevLoggedVLSN = VLSN.NULL_VLSN_SEQUENCE; + private long prevLoggedDTVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /* + * The filter transmitted to a Feeder so that records can be filtered at + * the source + */ + private final FeederFilter feederFilter; + + /** + * The feeder authenticator generator is instantiated by the replicated + * environment creator. + */ + private volatile StreamAuthenticator authenticator = null; + + public RepImpl(File envHome, + EnvironmentConfig envConfig, + EnvironmentImpl sharedCacheEnv, + RepConfigProxy repConfigProxy) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + super(envHome, envConfig, sharedCacheEnv, repConfigProxy); + + allowConvert = + RepInternal.getAllowConvert(((ReplicationConfig) repConfigProxy)); + + repConfigObservers = new ArrayList(); + addRepConfigObserver(this); + + repNetConfig = + ((ReplicationConfig)repConfigProxy).getRepNetConfig(); + nodeState = new NodeState(nameIdPair, this); + + if (isArbiter || isSubscriber || isNetworkBackup ) { + nodeStats = null; + syncupProgressListener = null; + logRewriteListener = null; + hardRecoveryStat = null; + hardRecoveryInfoStat = null; + feederTxns = null; + feederFilter = null; + return; + } + + feederTxns = new FeederTxns(this); + replay = new Replay(this, nameIdPair); + + nodeStats = new StatGroup(RepImplStatDefinition.GROUP_NAME, + RepImplStatDefinition.GROUP_DESC); + hardRecoveryStat = new BooleanStat(nodeStats, + RepImplStatDefinition.HARD_RECOVERY); + hardRecoveryInfoStat = + new StringStat(nodeStats, RepImplStatDefinition.HARD_RECOVERY_INFO, + "This node did not incur a hard recovery."); + + syncupProgressListener = + ((ReplicationConfig)repConfigProxy).getSyncupProgressListener(); + logRewriteListener = + ((ReplicationConfig)repConfigProxy).getLogFileRewriteListener(); + + feederFilter = + ((ReplicationConfig)repConfigProxy).getFeederFilter(); + + authenticator = ((ReplicationConfig)repConfigProxy).getAuthenticator(); + } + + /** + * Called by the EnvironmentImpl constructor. Some rep params, + * preserveVLSN for example, are accessed by the EnvironmentImpl via + * methods (getPreserveVLSN for example), so they need to be initialized + * early. + */ + @Override + protected void initConfigParams(EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy) { + + /* Init standalone config params first. */ + super.initConfigParams(envConfig, repConfigProxy); + + /* Init rep config params. */ + replicaAckTimeout = + configManager.getDuration(RepParams.REPLICA_ACK_TIMEOUT); + insufficientReplicasTimeout = + configManager.getDuration(RepParams.INSUFFICIENT_REPLICAS_TIMEOUT); + replayTxnTimeout = + configManager.getDuration(RepParams.REPLAY_TXN_LOCK_TIMEOUT); + defaultConsistencyPolicy = RepUtils.getReplicaConsistencyPolicy + (configManager.get(RepParams.CONSISTENCY_POLICY)); + preserveVLSN = + configManager.getBoolean(RepParams.PRESERVE_RECORD_VERSION); + cacheVLSN = + configManager.getBoolean(RepParams.CACHE_RECORD_VERSION); + allowArbiterAck = + configManager.getBoolean(RepParams.ALLOW_ARBITER_ACK); + isArbiter = + configManager.getBoolean(RepParams.ARBITER_USE); + isSubscriber = + configManager.getBoolean(RepParams.SUBSCRIBER_USE); + isNetworkBackup = + configManager.getBoolean(RepParams.NETWORKBACKUP_USE); + arbiterAckTimeout = + configManager.getDuration(RepParams.ARBITER_ACK_TIMEOUT); + } + + @Override + protected Formatter initFormatter() { + + /* + * The nameIdPair field is assigned here rather than in the constructor + * because of base class/subclass dependencies. initFormatter() is + * called by the base class constructor, and nameIdPair must be + * available at that time. + */ + nameIdPair = new NameIdPair(configManager.get(NODE_NAME)); + return new ReplicationFormatter(nameIdPair); + } + + @Override + public String getMonitorClassName() { + return "com.sleepycat.je.rep.jmx.RepJEMonitor"; + } + + @Override + public String getDiagnosticsClassName() { + return "com.sleepycat.je.rep.jmx.RepJEDiagnostics"; + } + + /** + * @see super#initConfigManager + */ + @Override + protected DbConfigManager + initConfigManager(EnvironmentConfig envConfig, + RepConfigProxy repConfigProxy) { + return new RepConfigManager(envConfig, repConfigProxy); + } + + @Override + public boolean getAllowRepConvert() { + return allowConvert; + } + + /** + * @see super#resetConfigManager + */ + @Override + protected DbConfigManager resetConfigManager(EnvironmentConfig newConfig) { + /* Save all the replication related properties. */ + RepConfigManager repConfigManager = (RepConfigManager) configManager; + ReplicationConfig repConfig = repConfigManager.makeReplicationConfig(); + return new RepConfigManager(newConfig, repConfig); + } + + public ReplicationConfig cloneRepConfig() { + RepConfigManager repConfigManager = (RepConfigManager) configManager; + return repConfigManager.makeReplicationConfig(); + } + + /* Make an ReplicatedEnvironment handle for this RepImpl. */ + public ReplicatedEnvironment makeEnvironment() { + return new ReplicatedEnvironment(getEnvironmentHome(), + cloneRepConfig(), + cloneConfig()); + } + + public ReplicationMutableConfig cloneRepMutableConfig() { + RepConfigManager repConfigManager = (RepConfigManager) configManager; + return repConfigManager.makeReplicationConfig(); + } + + public void setRepMutableConfig(ReplicationMutableConfig config) + throws DatabaseException { + + /* Clone the current config. */ + RepConfigManager repConfigManager = (RepConfigManager) configManager; + ReplicationConfig newConfig = repConfigManager.makeReplicationConfig(); + + /* Copy in the mutable props. */ + config.copyMutablePropsTo(newConfig); + repConfigManager = new RepConfigManager + (configManager.getEnvironmentConfig(), newConfig); + + /* + * Update the current config and notify observers. The config manager + * is replaced with a new instance that uses the new configuration. + * This avoids synchronization issues: other threads that have a + * reference to the old configuration object are not impacted. + * + * Notify listeners in reverse order of registration so that the + * environment listener is notified last and can start daemon threads + * after they are configured. + */ + for (int i = repConfigObservers.size() - 1; i >= 0; i -= 1) { + RepEnvConfigObserver o = repConfigObservers.get(i); + o.repEnvConfigUpdate(repConfigManager, newConfig); + } + } + + @Override + public void repEnvConfigUpdate(RepConfigManager configMgr, + ReplicationMutableConfig newConfig) + throws DatabaseException { + + allowArbiterAck = + configMgr.getBoolean(RepParams.ALLOW_ARBITER_ACK); + + if (repNode == null) { + return; + } + + repNode.getArbiter().processConfigChange(newConfig); + + repNode.getElectionQuorum().setElectableGroupSizeOverride + (newConfig.getElectableGroupSizeOverride()); + + /* Account for mutation of deprecated HA LogFlusher params. */ + getLogFlusher().configFlushTask(configMgr); + + repNode.getReplica().getDbCache().setConfig(configMgr); + } + + public synchronized void addRepConfigObserver(RepEnvConfigObserver o) { + repConfigObservers.add(o); + } + + /** + * The VLSNIndex must be created, merged and flushed before the recovery + * checkpoint. This method should be called even if there is no recovery + * checkpoint, because it sets up needed data structures. + * + * On the face of it, it seems that one could flush the VLSNIndex cache + * after the recovery checkpoint, before the Replicator constructor returns + * and before any user level HA operations can start. That's not sufficient + * because the recovery checkpoint is shortening the recovery interval for + * future recoveries, and any information that has been garnered must be + * persisted. Here's an example of what might happen after a series of + * recoveries if we fail to flush VLSNIndex as part of the recovery + * checkpoint: + * + * Environment recovers for first time, brand new environment + * recovery did not find any VLSNs in log, because log is brand new + * recovery logs ckpt 1start + * recovery logs ckpt 1 end + * + * VLSN 1 logged + * VLSN 2 logged + * VLSN 3 logged + * + * crash .... Environment recovers + * recovery crawls log from ckpt 1 start onward, finds VLSNs 1-3 + * recovery logs ckpt 2 start + * recovery logs ckpt 2 end + * VLSN index instantiated, VLSNs 1-3 added in but not written too disk + * + * crash ... Environment recovers + * recovery crawls log from ckpt start 2 start onward, finds no VLSNs. + * + * Instead, the flushed VLSN has to be logged before the checkpoint end + * record that is used for the next recovery. + */ + @Override + public void preRecoveryCheckpointInit(RecoveryInfo recoveryInfo) { + + int stride = configManager.getInt(VLSN_STRIDE); + int maxMappings = configManager.getInt(VLSN_MAX_MAP); + int maxDist = configManager.getInt(VLSN_MAX_DIST); + + /* + * Our local nameIdPair field isn't set yet because we haven't finished + * our initialization, so get it from the config manager. + */ + NameIdPair useNameIdPair = + new NameIdPair(configManager.get(NODE_NAME)); + + vlsnIndex = new VLSNIndex(this, DbType.VLSN_MAP.getInternalName(), + useNameIdPair, stride, maxMappings, maxDist, + recoveryInfo); + replay.preRecoveryCheckpointInit(recoveryInfo); + } + + /** + * Returns the current state associated with this ReplicatedEnvironment + * + * @return the externally visible ReplicatedEnvironment state + */ + public ReplicatedEnvironment.State getState() { + return nodeState.getRepEnvState(); + } + + /** + * Returns the state change event that transitioned the + * ReplicatedEnviroment to its current state. + */ + public StateChangeEvent getStateChangeEvent() { + return nodeState.getStateChangeEvent(); + } + + public NodeState getNodeState() { + return nodeState; + } + + /** + * Wait for this node to join a replication group and return whether it is + * a MASTER or REPLICA. Note that any method that creates or clears the + * repNode field must be synchronized. + */ + public synchronized ReplicatedEnvironment.State + joinGroup(ReplicaConsistencyPolicy consistency, + QuorumPolicy initialElectionPolicy) + throws ReplicaConsistencyException, DatabaseException { + + startupTracker.start(Phase.TOTAL_JOIN_GROUP); + try { + if (repNode == null) { + repNode = new RepNode(this, replay, nodeState); + } + + return repNode.joinGroup(consistency, initialElectionPolicy); + } catch (IOException ioe) { + throw EnvironmentFailureException.unexpectedException + (this, "Problem attempting to join on " + getSocket(), ioe); + } finally { + startupTracker.stop(Phase.TOTAL_JOIN_GROUP); + } + } + + /** + * Initialize the DataChannelFactory in our configuration for use. + * This is public to allow access by the ReplicatedEnvironment constructor. + * @throws IllegalArgumentException if the ReplicationNetworkConfig + * is invalid. + */ + public void initializeChannelFactory() { + if (channelFactory != null) { + return; + } + + synchronized (this) { + if (channelFactory == null) { + channelFactory = + DataChannelFactoryBuilder.construct( + repNetConfig, + DataChannelFactoryBuilder.makeLoggerFactory(this)); + } + } + } + + @Override + protected Environment createInternalEnvironment() { + return new InternalReplicatedEnvironment + (getEnvironmentHome(), cloneRepConfig(), cloneConfig(), this); + } + + /** + * @see EnvironmentImpl#setupClose + * Release all replication resources that can be released before the + * checkpoint. Note that any method that creates or clears the repNode + * field must be called from a synchronized caller. + * + * Note that the vlsnIndex is closed as a callback, from + * postCheckpointPreEnvClose() + * @throws DatabaseException + * + */ + @Override + protected synchronized void setupClose(PrintWriter errors) + throws DatabaseException { + + if (groupDbImpl != null) { + getDbTree().releaseDb(groupDbImpl); + groupDbImpl = null; + LoggerUtils.fine + (envLogger, this, "Group member database shutdown"); + } + + try { + if (repNode != null) { + repNode.shutdown(); + repNode = null; + } + } catch (InterruptedException e) { + appendException(errors, e, "shutting down node " + nameIdPair); + } + } + + /** + * Close any resources that need to be closed after the closing checkpoint. + * Note that since Replay.close closes open transactions, it must be + * invoked after the checkpoint has been completed, so that the checkpoint + * operation can correctly account for the open transactions. + */ + @Override + protected synchronized void postCheckpointClose(boolean checkpointed) + throws DatabaseException { + + if (replay != null) { + replay.close(); + replay = null; + } + + vlsnIndexAccess.closeVLSNIndex(checkpointed); + } + + /** + * @see EnvironmentImpl#setupClose + * + * Note: this conversion process will iterate over all user created + * databases in the environment, which could be potentially be a costly + * affair. However, let's opt for simplicity and defer any optimizations + * until we see whether this is an important use case. + */ + @Override + protected void postRecoveryConversion() { + + super.postRecoveryConversion(); + + if (needRepConvert) { + /* Set NameDb to replicated. */ + DatabaseImpl nameDb = null; + try { + nameDb = dbMapTree.getDb(DbTree.NAME_DB_ID); + if (!nameDb.isReplicated()) { + nameDb.setIsReplicatedBit(); + nameDb.setDirty(); + } + } finally { + if (nameDb != null) { + dbMapTree.releaseDb(nameDb); + } + } + + /* Set user defined databases to replicated. */ + Map idNameMap = dbMapTree.getDbNamesAndIds(); + for (DatabaseId id : idNameMap.keySet()) { + DatabaseImpl db = null; + try { + db = dbMapTree.getDb(id); + if (db != null && + !DbTree.isReservedDbName(idNameMap.get(id))) { + + db.setIsReplicatedBit(); + db.setDirty(); + } + } finally { + if (db != null) { + dbMapTree.releaseDb(db); + } + } + } + + /* + * Do a checkpointer to flush dirty datbaseImpls that are converted + * to replicated and write the current VLSNRange to the log. + */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + ckptConfig.setMinimizeRecoveryTime(true); + invokeCheckpoint(ckptConfig, "Environment conversion"); + } + } + + /* + * Close enough resources to support reopening the environment in the same + * JVM. + * @see EnvironmentImpl#doCloseAfterInvalid() + */ + @Override + public synchronized void doCloseAfterInvalid() { + + try { + /* Release the repNode, in order to release sockets. */ + if (repNode != null) { + repNode.shutdown(); + repNode = null; + } + } catch (Exception ignore) { + } + + super.doCloseAfterInvalid(); + } + + /** + * Used by error handling to forcibly close an environment, and by tests to + * close an environment to simulate a crash. Database handles do not have + * to be closed before calling this method. A checkpoint is not performed. + * The various thread pools will be shutdown abruptly. + * + * @throws DatabaseException + */ + @Override + public void abnormalClose() + throws DatabaseException { + + /* + * Shutdown the daemons, and the checkpointer in particular, before + * nulling out the vlsnIndex. + */ + shutdownDaemons(); + + try { + if (repNode != null) { + + /* + * Don't fire a LeaveGroupEvent if it's an abnormal close, + * otherwise an EnvironmentFailureException would be thrown + * because daemons of this Environment have been shutdown. + */ + repNode.getMonitorEventManager().disableLeaveGroupEvent(); + repNode.shutdown(); + repNode = null; + } + } catch (InterruptedException ignore) { + /* ignore */ + } + + try { + vlsnIndexAccess.abnormalCloseVLSNIndex(); + } catch (DatabaseException ignore) { + /* ignore */ + } + + try { + super.abnormalClose(); + } catch (DatabaseException ignore) { + /* ignore */ + } + } + + /** + * A replicated log entry has been written on this node. Update the + * VLSN->LSN mapping. Called outside the log write latch. + * @throws DatabaseException + */ + @Override + public void registerVLSN(LogItem logItem) { + LogEntryHeader header = logItem.header; + VLSN vlsn = header.getVLSN(); + + /* + * Although the very first replicated entry of the system is never a + * syncable log entry type, the first GlobalCBVLSN of the system must + * start at 1. If we only track the first syncable entry, the + * GlobalCBVLSN will start a a value > 1, and replicas that are + * starting up from VLSN 1 will be caught in spurious network restores + * because VLSN 1 < the GlobalCBVLSN. Therefore treat the VLSN 1 as a + * syncable entry for the sake of the GlobalCBVLSN. + */ + if (LogEntryType.isSyncPoint(header.getType()) || + VLSN.FIRST_VLSN.equals(vlsn)) { + repNode.trackSyncableVLSN(vlsn, logItem.lsn); + } + vlsnIndex.put(logItem); + } + + /** + * Generate the next VLSN and update the DTVLSN value in the item. Note + * that this method is only invoked when the node is in the Master state, + * since the master assigns new VLSNs and DTVLSNs, and the replicas simply + * preserve them. + * + * The DTVLSN value must be calculated under the same latch as the updating + * of the VLSN to ensure that the following invariants are maintained: + * + * lsn1 > lsn2 ==> VLSN(lsn1) > VLSN(lsn2) + * vlsn2 > vlsn1 ==> DTVLSN(vlsn2) >= DTVLSN(vlsn1) + * + * where vlsn2 and vlsn1 are transaction commit or abort records. + * + * Replicas, when replaying their stream, verify that this invariant is + * maintained. + * + * Commit/Abort records for Replication groups that have a single electable + * and durable node have their dtvlsn written as the associated VLSN, that + * is, DTVLSN(vlsn) == vlsn. For all other RG configurations, DTVLSN(vlsn) + * < vlsn. + * + * Commit/Abort Log records that are created by replaying an HA stream from + * a pre DTVLSN feeder, will have their dtvlsns set to + * VLSN.UNINITIALIZED_VLSN_SEQUENCE during replica replay. They do not + * follow this code path. + * + * @param entry the log entry with which the VLSN will be associated. If + * the log entry represents a commit or abort entry, its DTVLSN is modified + * so that it's correct when it's serialized out. + */ + @Override + public VLSN assignVLSNs(LogEntry entry) { + final VLSN vlsn = vlsnIndex.bump(); + + final byte itemType = entry.getLogType().getTypeNum(); + if (itemType != LogEntryType.LOG_TXN_COMMIT.getTypeNum() && + itemType != LogEntryType.LOG_TXN_ABORT.getTypeNum()) { + return vlsn; + } + + /* + * A commit or abort record. Compute the DTVLSN value to associate with + * the commit record and store it into the buffer at the appropriate + * position. + */ + final long dtvlsn; + + if (simulatePreDTVLSNMaster) { + dtvlsn = VLSN.UNINITIALIZED_VLSN_SEQUENCE; + } else if (repNode.isNeedsAcks()) { + /* + * Use the dtvlsn value being tracked via acknowledgments from + * replicas when replication is being used for durability. + */ + dtvlsn = getRepNode().getDTVLSN(); + } else { + /* + * Replicated environment, but replication is not being used for + * durability. That is, the commit is self-acknowledged, set dtvlsn + * == vlsn. + */ + dtvlsn = getRepNode().updateDTVLSN(vlsn.getSequence()); + } + + final VersionedWriteTxnEnd txnEnd = + (VersionedWriteTxnEnd)entry.getMainItem(); + + /* + * As a cheap sanity check, commits/aborts on the master are created + * with VLSN.NULL values, so they can be verified here. + */ + final long checkDTVLSN = txnEnd.getDTVLSN(); + if (checkDTVLSN != VLSN.NULL_VLSN_SEQUENCE) { + throw new IllegalStateException("NULL DTVLSN expected at VLSN:" + + vlsn + " not " + checkDTVLSN); + } + + txnEnd.setDTVLSN(dtvlsn); + + /* Verify invariant */ + if (prevLoggedVLSN > vlsn.getSequence()) { + if (dtvlsn < prevLoggedDTVLSN) { + String msg = + "DTVLSNs must be in ascending order in the stream. " + + " prev DTVLSN:" + prevLoggedDTVLSN + + " next DTVLSN:" + dtvlsn + " at VLSN: " + vlsn; + throw EnvironmentFailureException.unexpectedState(this, msg); + } + } + prevLoggedVLSN = vlsn.getSequence(); + prevLoggedDTVLSN = dtvlsn; + + return vlsn; + } + + /** + * Flush any information that needs to go out at checkpoint. Specifically, + * write any in-memory VLSN->LSN mappings to the VLSNIndex database so we + * are guaranteed that the VLSNIndex database will recover properly. + * This must be committed with noSync because + * - the ensuing checkpoint end record will be logged with an fsync and + * will effectively force this out + * - it's important to minmize lock contention on the vlsn index and + * any fsync done during a checkpoint will be expensive, as there may + * be quite a lot to push to disk. We don't want to incur that cost + * while holding locks on the vlsn index. [#20702] + */ + @Override + public void preCheckpointEndFlush() + throws DatabaseException { + + if (vlsnIndex != null) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + } + + @Override + public boolean isMaster() { + + /* + * The volatile repNode field might be modified by joinGroup(), + * leaveGroup, or close(), which are synchronized. Keep this method + * unsynchronized, assign to a temporary field to guard against a + * change. + */ + RepNode useNode = repNode; + if (useNode == null) { + return false; + } + return useNode.isMaster(); + } + + public void setChangeListener(StateChangeListener listener) { + StateChangeListener prevListener = nodeState.getChangeListener(); + nodeState.setChangeListener(listener); + + /* + * Call back so that it's aware of the last state change event and + * the application can initialize itself correctly as a master or + * replica. + */ + final StateChangeEvent stateChangeEvent = + nodeState.getStateChangeEvent(); + try { + /* Invoke application code and handle any app exceptions. */ + listener.stateChange(stateChangeEvent); + } catch (Exception e) { + /* Revert the change. */ + nodeState.setChangeListener(prevListener); + LoggerUtils.severe + (envLogger, this, + "State Change listener exception: " + e.getMessage()); + /* An application error. */ + throw new EnvironmentFailureException + (this, EnvironmentFailureReason.LISTENER_EXCEPTION, e); + } + } + + public StateChangeListener getChangeListener() { + return nodeState.getChangeListener(); + } + + public VLSNIndex getVLSNIndex() { + return vlsnIndex; + } + + public FeederTxns getFeederTxns() { + return feederTxns; + } + + public ReplicatedEnvironmentStats getStats(StatsConfig config) { + return getStats(config, statKey); + } + + @Override + public Collection getRepStatGroups(StatsConfig config, + Integer statKey1) { + ReplicatedEnvironmentStats res = getStats(config, statKey1); + return (res == null) ? null : res.getStatGroups(); + } + + @Override + public SortedSet getStatCaptureProjections() { + return new StatCaptureRepDefinitions().getStatisticProjections(); + } + + @Override + public StatManager createStatManager() { + return new RepStatManager(this); + } + + public FeederFilter getFeederFilter() { + return feederFilter; + } + + public ReplicatedEnvironmentStats getStatsInternal(StatsConfig config) { + if (repNode == null) { + return null; + } + return repNode.getStats(config); + } + + public ReplicatedEnvironmentStats getStats( + StatsConfig config, + Integer contextKey) { + return ((RepStatManager)statManager).getRepStats(config, contextKey); + } + + public Replay getReplay() { + return replay; + } + + /** + * Ensures that the environment is currently a Master before proceeding + * with an operation that requires it to be the master. + * + * @throws UnknownMasterException if the node is disconnected + * @throws ReplicaWriteException if the node is currently a replica + */ + public void checkIfMaster(Locker locker) + throws UnknownMasterException, ReplicaWriteException { + + final StateChangeEvent event = nodeState.getStateChangeEvent(); + + switch (nodeState.getRepEnvState()) { + case MASTER: + break; + + case REPLICA: + throw new ReplicaWriteException(locker, event); + + case UNKNOWN: + throw new UnknownMasterException(locker, event); + + case DETACHED: + throw new UnknownMasterException(locker, event); + + default: + throw EnvironmentFailureException.unexpectedState + ("Unexpected state: " + nodeState.getRepEnvState()); + } + } + + /** + * @return the repNode. May return null. + */ + public RepNode getRepNode() { + return repNode; + } + + /** + * Create an appropriate type of ThreadLocker. Specifically, it creates an + * MasterThreadLocker if the node is currently a Master, and a + * ReplicaThreadLocker otherwise, that is, if the node is a Replica, or + * it's currently in a DETACHED state. + * + * @return an instance of MasterThreadLocker or ReplicaThreadLocker + */ + @Override + public ThreadLocker createRepThreadLocker() { + return (isMaster() ? + new MasterThreadLocker(this) : + new ReplicaThreadLocker(this)); + } + + /** + * Create an appropriate type of Replicated transaction. Specifically, + * it creates a MasterTxn, if the node is currently a Master, a ReadonlyTxn + * otherwise, that is, if the node is a Replica, or it's currently in a + * DETACHED state. + * + * Note that a ReplicaTxn, used for transaction replay on a Replica is not + * created on this path. It's created explicitly in the Replay loop by a + * Replica. + * + * @param config the transaction configuration + * + * @return an instance of MasterTxn or ReadonlyTxn + * @throws DatabaseException + */ + @Override + public Txn createRepUserTxn(TransactionConfig config) + throws DatabaseException { + + return (isMaster() && + !config.getReadOnly() && + !config.getLocalWrite()) ? + MasterTxn.create(this, config, nameIdPair) : + new ReadonlyTxn(this, config); + } + + /** + * Ensure that a sufficient number of feeders are available before + * proceeding with a master transaction begin. + * + * @param txn the master transaction being initiated. + * + * @throws InterruptedException + * @throws DatabaseException if there were insufficient Replicas after the + * timeout period. + */ + public void txnBeginHook(MasterTxn txn) + throws InterruptedException, + DatabaseException { + + checkIfInvalid(); + final long txnTimeout = txn.getTxnTimeout(); + int timeout = insufficientReplicasTimeout; + + if ((txnTimeout != 0) && (txnTimeout < insufficientReplicasTimeout)) { + timeout = (int) txnTimeout; + } + + repNode.getDurabilityQuorum().ensureReplicasForCommit(txn, timeout); + } + + /** + * Installs the commit-blocking latch that is used to halt the commit/abort + * of transactions, in the final phase of a master transfer. + * + * @see #updateCBVLSN(LocalCBVLSNUpdater) + */ + public void blockTxnCompletion(CountDownLatch blocker) + throws InterruptedException { + + ReentrantReadWriteLock.WriteLock lock = blockLatchLock.writeLock(); + lock.lockInterruptibly(); + try { + blockTxnLatch = blocker; + } finally { + lock.unlock(); + } + } + + /** + * If the GlobalCBVLSN is not defunct, updates the CBVLSN on behalf of a + * Feeder input thread (or FeederManager running in the RepNode thread). + * If the GlobalCBVLSN is defunct, does nothing. + *

        + * Does the update while avoiding the possibility that any resulting + * GroupDB update may get blocked behind the final phase of a master + * transfer. + *

        + * We skip the update if we're at the point of blocking new transactions + * for a master transfer. And we use a read/write lock in order to be able + * to examine that state safely. + */ + public void updateCBVLSN(LocalCBVLSNUpdater updater) { + + if (repNode.isGlobalCBVLSNDefunct()) { + return; + } + + ReentrantReadWriteLock.ReadLock lock = blockLatchLock.readLock(); + lock.lock(); + try { + if (blockTxnLatch.getCount() > 0) { + return; + } + updater.update(); + } finally { + lock.unlock(); + } + } + + /** + * Releases the transaction block latch. + */ + public void unblockTxnCompletion() { + LoggerUtils.info(envLogger, this, "Releasing commit block latch"); + blockTxnLatch.countDown(); + } + + /** + * This hook is used primarily to perform the final checks before allowing + * the commit operation to proceed. The following checks are performed + * here: + * + * 1) Check for master + * 2) Check for sufficient Feeder connections to ensure that the commit + * policy could be implemented. There is no guarantee that they will all + * ack the commit request. + * + * The method also associates a latch with the transaction. The latch is + * used to delay the commit operation until a sufficient number of commits + * have been received. + * + * In addition, when mastership transfers are done, and this node is the + * original master, commits and aborts are blocked so as to avoid hard + * recovery after electing a new master, see [#18081]. + * + * @param txn the master transaction being committed + * + * @throws InsufficientReplicasException if the feeder is not in contact + * with enough replicas. + * @throws RestartRequiredException if the environment is invalid. + * @throws UnknownMasterException if the current master is unknown. + * @throws ReplicaWriteException if the node transitioned to a Replica + * after the transaction was initiated. + */ + public void preLogCommitHook(MasterTxn txn) + throws InsufficientReplicasException, + RestartRequiredException, + UnknownMasterException, + ReplicaWriteException, + EnvironmentFailureException { + + checkIfInvalid(); + checkIfMaster(txn); + checkBlock(txn); + + /* Still a master, check for a sufficient number of connections */ + int activeReplicaCount = + repNode.feederManager().activeAckReplicaCount(); + ReplicaAckPolicy ackPolicy = + txn.getCommitDurability().getReplicaAck(); + int requiredAckCount = txn.getRequiredAckCount(); + + if (envLogger.isLoggable(Level.FINE)) { + LoggerUtils.fine(envLogger, this, + "Txn " + txn.getId() + " requires: " + + requiredAckCount + " active: " + + activeReplicaCount + + " replica acks. Commit Policy: " + ackPolicy); + } + + if (requiredAckCount > activeReplicaCount) { + /* Check for possible activation of Primary */ + if (ackPolicy.equals(ReplicaAckPolicy.SIMPLE_MAJORITY) && + repNode.getArbiter().activateArbitration()) { + txn.resetRequiredAckCount(); + } else if (useArbiter(txn)) { + /* + * Note we could change the check to allow a degraded + * write from any group size. Limit is place at rep group + * size of two due to the priority of requirements + * and lower cost of testing. + */ + txn.setArbiterAck(true); + } else { + /* + * Capture the set to ensure it's consistent with the exception + * message. + */ + final boolean includeArbiters = + !ackPolicy.equals(ReplicaAckPolicy.ALL); + final Set activeAckRepSet = + repNode.feederManager().activeAckReplicas(includeArbiters); + + if (requiredAckCount > activeAckRepSet.size()) { + /* No change in window, throw exception */ + InsufficientReplicasException ire = + new InsufficientReplicasException + (txn, ackPolicy, requiredAckCount, activeAckRepSet); + LoggerUtils.info(envLogger, this, ire.getMessage()); + throw ire; + } + + /** + * A new replica became active in the window between the + * first fast check and the second slower check, just continue + */ + } + } + feederTxns.setupForAcks(txn); + } + + /* + * Block transaction commits/aborts if this node is the original master + * and we're doing Master Transfer. + */ + private void checkBlock(MasterTxn txn) { + try { + + /* + * Lock out the setting of the block latch by Master Transfer in + * the interval between waiting on the latch and setting the VLSN + * for the commit: Master Transfer needs to get a coherent idea of + * the final VLSN when it sets the latch. This lock will be + * released by the {@code postLogXxxHook()} functions, one of which + * is guaranteed to be called, unless an Environment-invalidating + * exception occurs. + */ + if (txn.lockOnce()) { + blockLatchLock.readLock().lockInterruptibly(); + } + + if (blockTxnLatch.getCount() > 0) { + LoggerUtils.info(envLogger, this, + "Block transaction: " + txn.getId() + + " pending master transfer. Write locks = " + + txn.getWriteLockIds()); + } + + final long txnTimeout = txn.getTxnTimeout(); + if (txnTimeout <= 0) { + blockTxnLatch.await(); + } else if (! blockTxnLatch.await(txnTimeout, + TimeUnit.MILLISECONDS)) { + + final String message = + "Timed out waiting for master transfer. " + + "Configured transaction timeout:" + txnTimeout + "ms"; + + throw new TransactionTimeoutException(txn, message); + } + + checkIfInvalid(); + + /* + * Check again, after the block! The block may be a result of a + * master->replica transfer, and if this node transitions from + * master to replica, this node will be disqualified from being + * able to commit transactions. + */ + checkIfMaster(txn); + + } catch (InterruptedException e) { + throw new ThreadInterruptedException(this, e); + } + } + + /** + * It ensures that the feeder obtains the requisite number of + * acknowledgments required for a successful commit. + * + * @param txn The MasterTxn that was committed locally. + * + * @throws InterruptedException if the thread was interrupted while + * waiting for acknowledgments. + * @throws InsufficientAcksException if the master received an insufficient + * number of commit acknowledgments within the replica commit timeout + * period. + * @throws EnvironmentFailureException + */ + public void postLogCommitHook(MasterTxn txn, LogItem commitItem) + throws InsufficientAcksException, + InterruptedException, + EnvironmentFailureException { + final long txnTimeout = txn.getTxnTimeout(); + int timeout = replicaAckTimeout; + + if ((txnTimeout != 0) && (txnTimeout < replicaAckTimeout)) { + timeout = (int) txnTimeout; + } + postLogCommitHookInternal(txn, commitItem, timeout); + } + + + private void postLogCommitHookInternal(MasterTxn txn, LogItem commitItem, + int ackTimeout) + throws InsufficientAcksException, + InterruptedException, + EnvironmentFailureException { + if (txn.unlockOnce()) { + blockLatchLock.readLock().unlock(); + } + + if (!isValid()) { + final int currentRequiredAckCount = repNode.getDurabilityQuorum(). + getCurrentRequiredAckCount(txn.getCommitDurability(). + getReplicaAck()); + if (currentRequiredAckCount > 0) { + /* Throw a more actionable and accurate exception than EFE */ + final String msg = + "Acks could not be obtained because the environment" + + "was invalidated"; + LoggerUtils.info(envLogger, this, msg); + throw new InsufficientAcksException(msg); + } + /* No acks are required, transaction is complete. */ + return; + } + + /* Check if using Arbiter for transaction. */ + if (txn.getArbiterAck()) { + // get the arbiter acker source and add txn id to its queue. + Feeder arbFeeder = repNode.feederManager().getArbiterFeeder(); + if (arbFeeder != null) { + ArbiterFeederSource as = arbFeeder.getArbiterFeederSource(); + as.addCommit(commitItem); + } + } + + /* Don't do master check, the transaction has already been committed */ + try { + feederTxns.awaitReplicaAcks(txn, ackTimeout); + } catch (InsufficientAcksException e) { + if (txn.getArbiterAck() == false && useArbiter(txn)) { + txn.setArbiterAck(true); + postLogCommitHookInternal(txn, commitItem, arbiterAckTimeout); + return; + } + LoggerUtils.info(envLogger, this, e.getMessage()); + throw e; + } + } + + /** + * Invoked before aborting a MasterTxn, this happens when the master is + * going to be a replica because of mastership transfer. We do this to make + * sure that the replica going to be the master has the most recent log and + * no hard recovery would happen after its election, see SR [#18081]. + * + * @param txn The MasterTxn that was aborted locally. + * + * @throws ReplicaWriteException if the node transitioned to a Replica + * after the transaction was initiated. + * @throws UnknownMasterException if the current master is unknown. + * @throws EnvironmentFailureException + */ + public void preLogAbortHook(MasterTxn txn) + throws EnvironmentFailureException, + ReplicaWriteException, + UnknownMasterException { + + checkIfInvalid(); + checkIfMaster(txn); + checkBlock(txn); + } + + /** + * Releases the block latch lock, if held. This hook is called in the + * normal course of Txn.abort(), once the abort log record has been written + * and the associated VLSN stored. + */ + public void postLogAbortHook(MasterTxn txn) { + if (txn.unlockOnce()) { + blockLatchLock.readLock().unlock(); + } + } + + /** + * Removes any pending acknowledgments that were registered by the + * preLogCommitHook. This hook is called only when a {@code commit()} + * fails and therefore must be aborted. + */ + public void postLogCommitAbortHook(MasterTxn txn) { + LoggerUtils.info(envLogger, this, + "post log abort hook for txn: " + txn.getId()); + if (txn.unlockOnce()) { + blockLatchLock.readLock().unlock(); + } + feederTxns.clearTransactionAcks(txn); + } + + /** + * Create a ReplayTxn for recovery processing. + */ + @Override + public Txn createReplayTxn(long txnId) + throws DatabaseException { + + return + new ReplayTxn(this, TransactionConfig.DEFAULT, txnId, envLogger); + } + + /** + * Used by environment recovery to get a tracker to collect VLSN-LSN + * mappings that are within the recovery part of the log. These might + * not be reflected in the persistent mapping db. + */ + @Override + public VLSNRecoveryProxy getVLSNProxy() { + int stride = configManager.getInt(RepParams.VLSN_STRIDE); + int maxMappings = configManager.getInt(RepParams.VLSN_MAX_MAP); + int maxDist = configManager.getInt(RepParams.VLSN_MAX_DIST); + + return new VLSNRecoveryTracker(this, stride, maxMappings, maxDist); + } + + public UUID getUUID() { + return repNode.getUUID(); + } + + /** + * Used during testing to introduce artificial clock skews. + */ + public static void setSkewMs(int skewMs) { + clockSkewMs = skewMs; + } + + public static int getClockSkewMs() { + return clockSkewMs; + } + + /** + * Truncate the head of the VLSNIndex to allow file deletion, if possible. + */ + @Override + public boolean tryVlsnHeadTruncate(long bytesNeeded) { + return vlsnIndex.tryTruncateFromHead(bytesNeeded); + } + + public int getNodeId() { + return nameIdPair.getId(); + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + @Override + public long getReplayTxnTimeout() { + return replayTxnTimeout; + } + + /* Return the default consistency policy. */ + @Override + public ReplicaConsistencyPolicy getDefaultConsistencyPolicy() { + return defaultConsistencyPolicy; + } + + /** + * The default consistency is not currently mutable in the API, but can be + * set for testing purposes. + * + * TODO: Make it mutable in the API, since Durability is mutable. + */ + public void setDefaultConsistencyPolicy(ReplicaConsistencyPolicy policy) { + defaultConsistencyPolicy = policy; + } + + /* Returns the on disk LSN for VLSN. */ + private long getLsnForVLSN(VLSN vlsn, int readBufferSize) { + /* Returns the file number which is nearest to the vlsn. */ + long fileNumber = vlsnIndex.getLTEFileNumber(vlsn); + + /* Start reading from the nearest file. */ + FeederReader feederReader = + new FeederReader(this, + vlsnIndex, + DbLsn.makeLsn(fileNumber, 0), + readBufferSize); + + try { + feederReader.initScan(vlsn); + + /* + * Go on scan the log until FeederReader find the target VLSN, + * thrown out an EnvironmentFailureException if it can't be found. + */ + if (!feederReader.readNextEntry()) { + throw EnvironmentFailureException.unexpectedState + ("VLSN not found: " + vlsn); + } + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + + return feederReader.getLastLsn(); + } + + /** + * Returns the logged durable txn VLSN. The logged DTVLSN is part of the + * last txn commit or abort entry. + * + * @return the persistent DTVLSN. The value may be + * VLSN.UNINITIALIZED_VLSN_SEQUENCE if the environment was newly created, + * that is, it has no transactions in it as yet, or if the last entry was + * created by a pre-DTVLSN master. + * + * @throws FileNotFoundException if the file containing the last txn commit + * or abort entry does not exist + */ + public long getLoggedDTVLSN() + throws FileNotFoundException { + + final VLSN lastTxnEnd = getLastTxnEnd(); + + if (lastTxnEnd.isNull()) { + /* A brand new environment with no transactions in it. */ + return VLSN.UNINITIALIZED_VLSN_SEQUENCE; + } + + final long lsn = getLsnForVLSN(lastTxnEnd, + 1024 /* buffer size for txn end */); + final TxnEnd txnEnd = + (TxnEnd)getLogManager().getLogEntry(lsn).getMainItem(); + + long dtvlsn = txnEnd.getDTVLSN(); + if (dtvlsn != VLSN.UNINITIALIZED_VLSN_SEQUENCE) { + return dtvlsn; + } + + /* A JE version <= 7.1 log entry. */ + LoggerUtils.logMsg(envLogger, this, Level.INFO, + "Pre DTVLSN log, starting with zero dtvlsn"); + + return dtvlsn; + } + + /* Returns the end of the log. */ + @Override + public long getEndOfLog() { + return vlsnIndex.getRange().getLast().getSequence(); + } + + /** + * Returns true if the VLSN is preserved as the record version. + */ + @Override + public boolean getPreserveVLSN() { + return preserveVLSN; + } + + /** + * Returns true if the VLSN is both preserved and cached. + */ + @Override + public boolean getCacheVLSN() { + return preserveVLSN && cacheVLSN; + } + + /** + * @see EnvironmentImpl#getName + */ + @Override + public String getName() { + return nameIdPair + ":" + super.getName(); + } + + /** + * Return true if this environment is part of a replication group. + */ + @Override + public boolean isReplicated() { + return true; + } + + /** + * Return true if this environment is used as an Arbiter. + */ + @Override + public boolean isArbiter() { + return isArbiter; + } + + /** + * Check whether this environment can be opened on an existing environment + * directory. + */ + @Override + public void checkRulesForExistingEnv(boolean dbTreeReplicatedBit, + boolean dbTreePreserveVLSN) + throws UnsupportedOperationException { + + if (!dbTreeReplicatedBit) { + + /* + * We are attempting to open an existing, non-replicated + * environment. + */ + throw new UnsupportedOperationException + ("This environment must be converted for replication." + + " using com.sleepycat.je.rep.util.DbEnableReplication."); + } + + /* The preserveVLSN setting is forever immutable. */ + if (dbTreePreserveVLSN != getPreserveVLSN()) { + throw new IllegalArgumentException + (RepParams.PRESERVE_RECORD_VERSION.getName() + + " parameter may not be changed." + + " Previous value: " + dbTreePreserveVLSN + + " New value: " + getPreserveVLSN()); + } + } + + /** + * Returns the hostname associated with this node. + * + * @return the hostname + */ + public String getHostName() { + String hostAndPort = configManager.get(RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + return (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : + hostAndPort; + } + + /** + * Returns the port used by the replication node. + * + * @return the port number + */ + public int getPort() { + String hostAndPort = configManager.get(RepParams.NODE_HOST_PORT); + int colonToken = hostAndPort.indexOf(":"); + return (colonToken >= 0) ? + Integer.parseInt(hostAndPort.substring(colonToken + 1)) : + configManager.getInt(RepParams.DEFAULT_PORT); + } + + /* Convenience method for returning replication sockets. */ + public InetSocketAddress getSocket() { + return new InetSocketAddress(getHostName(), getPort()); + } + + /** + * Returns the JE version that is currently running on this node, + * consulting the TEST_JE_VERSION configuration parameter for a test + * override. + */ + public JEVersion getCurrentJEVersion() { + final String testJEVersion = configManager.get(TEST_JE_VERSION); + return testJEVersion.isEmpty() ? + JEVersion.CURRENT_VERSION : + new JEVersion(testJEVersion); + } + + /** + * Returns the set of sockets associated with helper nodes. + * + * @return the set of helper sockets, returns an empty set if there + * are no helpers. + */ + public Set getHelperSockets() { + String helperHosts = configManager.get(RepParams.HELPER_HOSTS); + return HostPortPair.getSockets(helperHosts); + } + + /** + * Called when a node has identified itself as the master, which is when + * the RepNode.selfElect is called. The database should not exist at + * this point. + * + * Lock hierarchy: GroupDbLock -> sync on EnvironmentImpl + * @throws DatabaseException + */ + public DatabaseImpl createGroupDb() + throws DatabaseException { + + assert isMaster(); + + try { + groupDbLock.lockInterruptibly(); + } catch (InterruptedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + + try { + if (groupDbImpl != null) { + throw EnvironmentFailureException.unexpectedState + ("GroupDb should not exist."); + } + + DatabaseImpl newDbImpl = null; + Txn txn = null; + try { + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.NONE)); + txnConfig.setConsistencyPolicy(NO_CONSISTENCY); + txn = new MasterTxn(this, + txnConfig, + getNameIdPair()); + + /* Database should not exist yet, create it now */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setReplicated(true); + + newDbImpl = getDbTree().createInternalDb + (txn, DbType.REP_GROUP.getInternalName(), dbConfig); + txn.commit(); + txn = null; + } finally { + if (txn != null) { + txn.abort(); + } + } + + groupDbImpl = newDbImpl; + } finally { + groupDbLock.unlock(); + } + return groupDbImpl; + } + + /** + * Open the group db, which should exist already, using NO_CONSISTENCY. + */ + public DatabaseImpl getGroupDb() + throws DatabaseNotFoundException, + DatabaseException { + + return openGroupDb(false /* doLockProbe */); + } + + /** + * Open the group db, which should exist already, using NO_CONSISTENCY. Do + * not wait on the group db lock, return null if the databaseImpl hasn't + * been created and we can't obtain it. + * + * Lock hierarchy: GroupDbLock -> sync on EnvironmentImpl + */ + public DatabaseImpl probeGroupDb() + throws DatabaseException { + + try { + return openGroupDb(true /* doLockProbe */); + } catch (DatabaseNotFoundException e) { + /* Should never happen, DB should exist. */ + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Do the work of creating the lock and then assigning the groupDbImpl + * field, using NO_CONSISTENCY. + * + * @throws DatabaseException + * @throws DatabaseNotFoundException + */ + private DatabaseImpl openGroupDb(final boolean doLockProbe) + throws DatabaseNotFoundException, DatabaseException { + + /* Acquire the lock. */ + try { + if (doLockProbe) { + if (!groupDbLock.tryLock(1, TimeUnit.MILLISECONDS)) { + /* Contention, try later. */ + return null; + } + } else { + groupDbLock.lockInterruptibly(); + } + } catch(InterruptedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + + Txn txn = null; + try { + if (groupDbImpl != null) { + return groupDbImpl; + } + + DatabaseImpl newDbImpl = null; + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setConsistencyPolicy(NO_CONSISTENCY); + txn = new ReadonlyTxn(this, txnConfig); + + newDbImpl = getDbTree().getDb(txn, + DbType.REP_GROUP.getInternalName(), + null /* databaseHandle */, + false); + if (newDbImpl == null) { + throw new DatabaseNotFoundException + (DbType.REP_GROUP.getInternalName()); + } + txn.commit(); + txn = null; + + groupDbImpl = newDbImpl; + return groupDbImpl; + } finally { + if (txn != null) { + txn.abort(); + } + groupDbLock.unlock(); + } + } + + /** + * Return true if the node has been configured as a Designated Primary. + * This does not necessarily mean that the node is actively operating in + * designated primary mode. See + * {@link com.sleepycat.je.rep.arbitration.Arbiter#isActive} + */ + public boolean isDesignatedPrimary() { + return getConfigManager().getBoolean(RepParams.DESIGNATED_PRIMARY); + } + + @Override + public boolean addDbBackup(DbBackup backup) { + synchronized (backups) { + if (backupProhibited) { + return false; + } + boolean added = backups.add(backup); + assert added; + } + + super.addDbBackup(backup); + return true; + } + + @Override + public void removeDbBackup(DbBackup backup) { + synchronized (backups) { + boolean removed = backups.remove(backup); + assert removed; + } + super.removeDbBackup(backup); + } + + /* Invalidate all the on going DbBackups, used in Replay.rollback(). */ + public void invalidateBackups(long fileNumber) { + synchronized (backups) { + for (DbBackup backup : backups) { + backup.invalidate(fileNumber); + } + } + } + + /* Set the backupProhibited status, used in Replay.rollback(). */ + public void setBackupProhibited(boolean backupProhibited) { + synchronized (backups) { + this.backupProhibited = backupProhibited; + } + } + + /* For creating a rep exception from standalone code. */ + @Override + public LockPreemptedException + createLockPreemptedException(Locker locker, Throwable cause) { + return new LockPreemptedException(locker, cause); + } + + /* For creating a rep exception from standalone code. */ + @Override + public DatabasePreemptedException + createDatabasePreemptedException(String msg, + String dbName, + Database db) { + return new DatabasePreemptedException(msg, dbName, db); + } + + /* For creating a rep exception from standalone code. */ + @Override + public LogOverwriteException createLogOverwriteException(String msg) { + return new LogOverwriteException(msg); + } + + @Override + public int getReplayFreeDiskPercent() { + return getConfigManager().getInt(REPLAY_FREE_DISK_PERCENT); + } + + /** + * Sets up the environment for group shutdown when the environment is + * closed. + * + * @see ReplicatedEnvironment#shutdownGroup(long, TimeUnit) + */ + public void shutdownGroupSetup(long timeoutMs) { + final int openCount = getAppOpenCount(); + if (openCount > 1) { + throw new IllegalStateException + ("Environment has " + (openCount - 1) + + " additional open handles."); + } + + final int backupCount = getBackupCount(); + if (backupCount > 0) { + throw new IllegalStateException + ("Environment has " + backupCount + + " DbBackups in progress."); + } + + repNode.shutdownGroupOnClose(timeoutMs); + } + + public String transferMaster(Set replicas, + long timeout, + boolean force) { + return repNode.transferMaster(replicas, timeout, force); + } + + /** + * Dump interesting aspects of the node's state. Currently for debugging + * use, possibly useful for field support. + */ + public String dumpState() { + StringBuilder sb = new StringBuilder(); + + sb.append(getNameIdPair()); + sb.append("[").append(getState()).append("] " ); + + if (repNode != null) { + sb.append(repNode.dumpState()); + } + + if (vlsnIndex != null) { + sb.append("vlsnRange="); + sb.append(vlsnIndex.getRange()).append("\n"); + } + + if (replay != null) { + sb.append(replay.dumpState()); + } + + return sb.toString(); + } + + /** + * Dumps the state associated with all active Feeders that supply + * acknowledgments, along with identifying information about the node and + * its current HA state. + */ + public String dumpAckFeederState() { + return getNameIdPair() + "[" + getState() + "]" + + repNode.dumpAckFeederState() ; + } + + /** + * If this node was started with a hard recovery, preserve that + * information. + */ + public void setHardRecoveryInfo(RollbackException e) { + hardRecoveryStat.set(true); + hardRecoveryInfoStat.set(e.getMessage()); + } + + public StatGroup getNodeStats() { + return nodeStats; + } + + /** + * Ensure that the in-memory vlsn index encompasses all logged entries + * before it is flushed to disk. A No-Op for non-replicated systems. + * [#19754] + */ + @Override + public void awaitVLSNConsistency() { + vlsnIndex.awaitConsistency(); + } + + public void setSyncupProgress(SyncupProgress progress) { + setSyncupProgress(progress, 0, -1); + } + + public void setSyncupProgress(SyncupProgress progress, long n, long total) { + if (syncupProgressListener == null) { + return; + } + + if (!(syncupProgressListener.progress(progress, n, total))) { + throw new EnvironmentFailureException + (this, EnvironmentFailureReason.PROGRESS_LISTENER_HALT, + "ReplicatedEnvironmentConfig.syncupProgressListener: "); + } + } + + /** + * Test method to create pre-DTVLSN logs. When this is turned on in a test + * environment the dtvlsn value in the log is written as a + * UNINITIALIZED_VLSN_SEQUENCE (zero), which is the value that + * deserialization would assign to it if a new replica came across an older + * version commit or abort record. + */ + public static void setSimulatePreDTVLSNMaster( + boolean simulatePreDTVLSNMaster) { + + RepImpl.simulatePreDTVLSNMaster = simulatePreDTVLSNMaster; + } + + public static boolean isSimulatePreDTVLSNMaster() { + return simulatePreDTVLSNMaster; + } + + public LogFileRewriteListener getLogRewriteListener() { + return logRewriteListener; + } + + public ReplicationNetworkConfig getRepNetConfig() { + return repNetConfig; + } + + public DataChannelFactory getChannelFactory() { + initializeChannelFactory(); + return channelFactory; + } + + @Override + public void invalidate(EnvironmentFailureException e) { + super.invalidate(e); + unblockTxnCompletion(); + } + + public VLSN getLastTxnEnd() { + return vlsnIndexAccess.getLastTxnEnd(); + } + + /** + * Private class to prevent used of the close() method by the application + * on an internal handle. + */ + private static class InternalReplicatedEnvironment + extends ReplicatedEnvironment { + + public InternalReplicatedEnvironment(File environmentHome, + ReplicationConfig cloneRepConfig, + EnvironmentConfig cloneConfig, + RepImpl envImpl) { + super(environmentHome, cloneRepConfig, cloneConfig, + null /*consistencyPolicy*/, null /*initialElectionPolicy*/, + false /*joinGroup*/, envImpl); + } + + @Override + protected boolean isInternalHandle() { + return true; + } + + @Override + public synchronized void close() { + throw EnvironmentFailureException.unexpectedState + ("close() not permitted on an internal environment handle"); + } + } + + /** + * Peruse the environment wide transaction table, and return a set of + * all existing MasterTxns. + */ + public Set getExistingMasterTxns() { + return getTxnManager().getTxns(MasterTxn.class); + } + + /** + * RepImpl supplies the last txn abort or commit vlsn for use cases such as + * determining how caught up a feeder or master transfer is. This info is + * usually obtained from the VLSNRange via the VLSNIndex, but in some types + * of environment shutdowns, the VLSNIndex may need to be nulled out. When + * that happens, VLSNIndexAccess will switch over from using the VLSNIndex + * to obtain the range, to using a reference to the last known + * VLSNRange. Note that the VLSNRange instance held within VLSNIndex is + * constantly being replaced when the replication stream is active., and + * that's why LastTxnEndAccess generally obtains the range via the + * VLSNIndex, rather keeping a reference to a VLSNRange instance. + */ + private class VLSNIndexAccess { + + private VLSNRange savedRange; + + synchronized VLSN getLastTxnEnd() { + if (vlsnIndex != null) { + return vlsnIndex.getRange().getLastTxnEnd(); + } + return savedRange.getLastTxnEnd(); + } + + /** + * Save the last range so the lastTxnEnd value can continue + * to be available, and null out the vlsnIndex. + */ + synchronized void closeVLSNIndex(boolean checkpointed) { + if (vlsnIndex != null) { + vlsnIndex.close(checkpointed); + savedRange = vlsnIndex.getRange(); + vlsnIndex = null; + } + } + + /** + * Save the last range so the lastTxnEnd value can continue + * to be available, and null out the vlsnIndex. + */ + synchronized void abnormalCloseVLSNIndex() { + if (vlsnIndex != null) { + vlsnIndex.abnormalClose(); + savedRange = vlsnIndex.getRange(); + vlsnIndex = null; + } + } + } + + /** + * Checks that writing records with a TTL is allowed. + * + * @throws IllegalStateException if any node in the group is less than + * JE_TTL_VERSION. + */ + @Override + public void checkTTLAvailable() { + if (isTTLAvailable) { + return; + } + final JEVersion requiredJEVersion = TTL.getMinJEVersion(); + try { + repNode.setMinJEVersion(requiredJEVersion); + isTTLAvailable = true; + } catch (MinJEVersionUnsupportedException e) { + if (e.nodeVersion == null) { + throw new IllegalStateException( + "TTL is not currently supported." + + " The version running on node " + e.nodeName + + " could not be determined," + + " but this feature requires version " + + requiredJEVersion.getNumericVersionString() + + " or later."); + } + throw new IllegalStateException( + "TTL is not currently supported." + + " Node " + e.nodeName + " is running version " + + e.nodeVersion.getNumericVersionString() + + ", but this feature requires version " + + requiredJEVersion.getNumericVersionString() + + " or later."); + } + } + + /** + * Recovery encountered a RestoreRequired marker. + */ + @Override + public void handleRestoreRequired(RestoreRequired restoreRequired) { + + switch (restoreRequired.getFailureType()) { + case NETWORK_RESTORE: + /* + * A network restore must be done to get a coherent copy of + * the log files into this environment's directory. + */ + throw new InsufficientLogException( + restoreRequired.getProperties(), + configManager.get(RepParams.HELPER_HOSTS)); + default: + /* Not a type we can handle, go to the default behavior */ + super.handleRestoreRequired(restoreRequired); + } + } + + private boolean useArbiter(MasterTxn txn) { + if (allowArbiterAck && + repNode.getGroup().getAckGroupSize() == 2 && + repNode.feederManager().activeAckArbiterCount() > 0 && + txn.getCommitDurability().getReplicaAck() == + ReplicaAckPolicy.SIMPLE_MAJORITY) { + return true; + } + return false; + } + + public void setAuthenticator(StreamAuthenticator authenticator) { + this.authenticator = authenticator; + } + + public StreamAuthenticator getAuthenticator() { + return authenticator; + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepImplStatDefinition.java b/src/com/sleepycat/je/rep/impl/RepImplStatDefinition.java new file mode 100644 index 0000000..89c8bda --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepImplStatDefinition.java @@ -0,0 +1,38 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * General information for replicated nodes. + */ +public class RepImplStatDefinition { + + public static final String GROUP_NAME = "ReplicatedEnvironment"; + public static final String GROUP_DESC = + "General information about a replication node"; + + public static final StatDefinition HARD_RECOVERY = + new StatDefinition("hardRecoveryIncurred", + "If true, this node had to truncate committed " + + "transactions which differed from the group's " + + "version of the replication stream from its log " + + "in order to come up."); + + public static final StatDefinition HARD_RECOVERY_INFO = + new StatDefinition("hardRecoveryInfo", + "Description of the amount of log truncated " + + " in order to do a hard recovery."); +} \ No newline at end of file diff --git a/src/com/sleepycat/je/rep/impl/RepNodeImpl.java b/src/com/sleepycat/je/rep/impl/RepNodeImpl.java new file mode 100644 index 0000000..65d77af --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepNodeImpl.java @@ -0,0 +1,445 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.io.Serializable; +import java.net.InetSocketAddress; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.node.cbvlsn.CleanerBarrierState; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.utilint.VLSN; + +/** + * Describes a node that is a member of the replication group. + */ +public class RepNodeImpl implements ReplicationNode, Serializable { + + private static final long serialVersionUID = 1L; + + /* Identifies the node both by external name and internal node ID. */ + private final NameIdPair nameIdPair; + + /* The node type, electable, monitor, etc. */ + private final NodeType type; + + /* + * True if the node was acknowledged by a quorum and its entry is therefore + * considered durable. SECONDARY and EXTERNAL nodes are always considered + * acknowledged. + */ + private boolean quorumAck; + + /* + * True if the node has been removed and is no longer an active part of the + * group + */ + private boolean isRemoved; + + /* The hostname used for communications with the node. */ + private String hostName; + + /* The port used by a node. */ + private int port; + + /* + * The CBVLSN Barrier state associated with the node. Is unused if the + * GlobalCBVLSN is defunct. See GlobalCBVLSN. + */ + private CleanerBarrierState barrierState; + + /* + * This version is used in conjunction with the group level change + * version to identify the incremental changes made to individual + * changes made to a group. + */ + private int changeVersion = NULL_CHANGE; + + private static final int NULL_CHANGE = -1; + + /** + * The JE version most recently noted running on this node, or null if not + * known. + */ + private volatile JEVersion jeVersion; + + /** + * @hidden + * + * Constructor used to de-serialize a Node. All other convenience + * constructors funnel through this one so that argument checks can + * be systematically enforced. + */ + public RepNodeImpl(final NameIdPair nameIdPair, + final NodeType type, + final boolean quorumAck, + final boolean isRemoved, + final String hostName, + final int port, + final CleanerBarrierState barrierState, + final int changeVersion, + final JEVersion jeVersion) { + + if (nameIdPair.getName().equals(RepGroupDB.GROUP_KEY)) { + throw EnvironmentFailureException.unexpectedState + ("Member node ID is the reserved key value: " + nameIdPair); + } + + if (hostName == null) { + throw EnvironmentFailureException.unexpectedState + ("The hostname argument must not be null"); + } + + if (type == null) { + throw EnvironmentFailureException.unexpectedState + ("The nodeType argument must not be null"); + } + + this.nameIdPair = nameIdPair; + this.type = type; + this.quorumAck = quorumAck || type.isSecondary() || type.isExternal(); + this.isRemoved = isRemoved; + this.hostName = hostName; + this.port = port; + this.barrierState = barrierState; + this.changeVersion = changeVersion; + this.jeVersion = jeVersion; + } + + /** + * @hidden + * + * Convenience constructor for the above. + */ + public RepNodeImpl(final NameIdPair nameIdPair, + final NodeType type, + final boolean quorumAck, + final boolean isRemoved, + final String hostName, + final int port, + final int changeVersion, + final JEVersion jeVersion) { + this(nameIdPair, type, quorumAck, isRemoved, hostName, port, + new CleanerBarrierState(VLSN.NULL_VLSN, System.currentTimeMillis()), + changeVersion, jeVersion); + } + + /** + * @hidden + * Convenience constructor for transient nodes + */ + public RepNodeImpl(final NameIdPair nameIdPair, + final NodeType type, + final String hostName, + final int port, + final JEVersion jeVersion) { + this(nameIdPair, type, false, false, hostName, port, NULL_CHANGE, + jeVersion); + } + + /** + * @hidden + * Convenience constructor for transient nodes during unit tests. + */ + public RepNodeImpl(final ReplicationConfig repConfig) { + this(new NameIdPair(repConfig.getNodeName(), NameIdPair.NULL_NODE_ID), + repConfig.getNodeType(), + repConfig.getNodeHostname(), + repConfig.getNodePort(), + JEVersion.CURRENT_VERSION); + } + + /** + * @hidden + * + * Convenience constructor for the above. + */ + public RepNodeImpl(final String nodeName, + final String hostName, + final int port, + final JEVersion jeVersion) { + this(new NameIdPair(nodeName, NameIdPair.NULL.getId()), + NodeType.ELECTABLE, hostName, port, jeVersion); + } + + /** + * @hidden + * + * Convenience constructor for the above. + */ + public RepNodeImpl(Protocol.NodeGroupInfo mi) { + this(mi.getNameIdPair(), + mi.getNodeType(), + mi.getHostName(), + mi.port(), + mi.getJEVersion()); + } + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.ReplicationNode#getSocketAddress() + */ + @Override + public InetSocketAddress getSocketAddress() { + return new InetSocketAddress(hostName, port); + } + + /** + * Returns whether the node was acknowledged by a quorum and its entry is + * therefore considered durable. Secondary nodes are always considered + * acknowledged. + */ + public boolean isQuorumAck() { + return quorumAck; + } + + public boolean isRemoved() { + assert !(isRemoved && type.hasTransientId()) + : "Nodes with transient IDs are never marked removed"; + return isRemoved; + } + + public void setChangeVersion(int changeVersion) { + this.changeVersion = changeVersion; + } + + public int getChangeVersion() { + return changeVersion; + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.ReplicationNode#getName() + */ + @Override + public String getName() { + return nameIdPair.getName(); + } + + public int getNodeId() { + return nameIdPair.getId(); + } + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.ReplicationNode#getNodeType() + */ + @Override + public NodeType getType() { + return type; + } + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.ReplicationNode#getHostName() + */ + @Override + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.ReplicationNode#getPort() + */ + @Override + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public String getHostPortPair() { + return HostPortPair.getString(hostName, port); + } + + public CleanerBarrierState getBarrierState() { + return barrierState; + } + + public CleanerBarrierState setBarrierState(CleanerBarrierState barrierState) { + return this.barrierState = barrierState; + } + + public void setQuorumAck(boolean quorumAck) { + this.quorumAck = quorumAck; + } + + public void setRemoved(boolean isRemoved) { + this.isRemoved = isRemoved; + } + + /** + * Returns the JE Version most recently noted running on this node, or + * {@code null} if not known. + */ + public JEVersion getJEVersion() { + return jeVersion; + } + + /** + * Updates the JE version most recently known running on this node to match + * the version specified. Does nothing if the argument is null. + * + * @param otherJEVersion the version or {@code null} + */ + public void updateJEVersion(final JEVersion otherJEVersion) { + if (otherJEVersion != null) { + jeVersion = otherJEVersion; + } + } + + @Override + public String toString() { + + String acked = " (is member)"; + + if (!quorumAck) { + acked = " (not yet a durable member)"; + } + + if (isRemoved) { + acked = " (is removed)"; + } + + String info = + String.format("Node:%s %s:%d%s%s changeVersion:%d %s%s\n", + getName(), getHostName(), getPort(), + acked, + (!type.isElectable() ? " " + type : ""), + getChangeVersion(), + barrierState, + ((jeVersion != null) ? + " jeVersion:" + jeVersion : + "")); + return info; + + } + + /** + * Checks if the argument represents the same node, ignoring fields that + * might legitimately vary over time. Like the equals method, considers + * all fields, except ignores the quorumAck field (which may change + * temporarily), the nodeId (since it may not have been resolved as yet), + * and the isRemoved, barrierState, changeVersion, and jeVersion fields + * (which can change over time). + * + * @param mi the other object in the comparison + * + * @return true if the two are equivalent + */ + public boolean equivalent(RepNodeImpl mi) { + if (this == mi) { + return true; + } + + if (mi == null) { + return false; + } + + if (port != mi.port) { + return false; + } + + if (hostName == null) { + if (mi.hostName != null) { + return false; + } + } else if (!hostName.equals(mi.hostName)) { + return false; + } + + /* Ignore the id. */ + if (!nameIdPair.getName().equals(mi.nameIdPair.getName())) { + return false; + } + + if (getType() != mi.getType()) { + return false; + } + + /* + * Ignore quorumAck, isRemoved, barrierState, changeVersion, and + * jeVersion + */ + + return true; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((hostName == null) ? 0 : hostName.hashCode()); + result = prime * result + nameIdPair.hashCode(); + result = prime * result + port; + result = prime * result + (isQuorumAck() ? 1231 : 1237); + result = prime * result + + (jeVersion == null ? 0 : jeVersion.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof RepNodeImpl)) { + return false; + } + final RepNodeImpl other = (RepNodeImpl) obj; + if (hostName == null) { + if (other.hostName != null) { + return false; + } + } else if (!hostName.equals(other.hostName)) { + return false; + } + if (!nameIdPair.equals(other.nameIdPair)) { + return false; + } + if (getType() != other.getType()) { + return false; + } + if (port != other.port) { + return false; + } + if (isQuorumAck() != other.isQuorumAck()) { + return false; + } + if (jeVersion == null) { + if (other.jeVersion != null) { + return false; + } + } else if (!jeVersion.equals(other.getJEVersion())) { + return false; + } + return true; + } +} diff --git a/src/com/sleepycat/je/rep/impl/RepParams.java b/src/com/sleepycat/je/rep/impl/RepParams.java new file mode 100644 index 0000000..6ef4110 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/RepParams.java @@ -0,0 +1,1613 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.UnknownHostException; +import java.util.HashSet; +import java.util.StringTokenizer; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.BooleanConfigParam; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.DurationConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.config.IntConfigParam; +import com.sleepycat.je.config.LongConfigParam; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.util.DbResetRepGroup; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.net.SSLChannelFactory; + +public class RepParams { + + /* + * Note: all replicated parameters should start with + * EnvironmentParams.REP_PARAM_PREFIX, which is "je.rep.", + * see SR [#19080]. + */ + + /** + * @hidden + * Name of a java System property (boolean) which can be turned on in order + * to avoid input validation checks on node names. This is undocumented. + *

        + * Generally users should not skip validation, because there are a few + * kinds of punctuation characters that would cause problems if they were + * allowed in node names. But in the past users might have inadvertantly + * created node names that do not conform to the new, stricter rules. In + * that case they would not be able to upgrade to the newer version of JE + * that now includes this checking. + *

        + * This flag actually applies to the group name too. But for group names + * the new rules are actually less strict than they used to be, so there + * should be no problem. + */ + public static final String SKIP_NODENAME_VALIDATION = + "je.rep.skipNodenameValidation"; + + /** + * @hidden + * Name of a java System property (boolean) which can be turned on in order + * to avoid hostname resolution checks on helper host values. + * This is undocumented. + *

        + * Generally users should not skip validation, because having valid helper + * hosts is an important path to let this node find the master in a + * replication group when its group membership db is not + * sufficient. Disabling the check should only be done in unusual cases, + * such as in the face of intermittent DNS failures. In that case, a + * hostname which seems to be invalid may actually be a transient problem, + * rather than a permanent configuration issue. Skipping the resolution + * check at config setting time may supply some degree of resilience in + * this unusual case. + */ + public static final String SKIP_HELPER_HOST_RESOLUTION = + "je.rep.skipHelperHostResolution"; + + /** + * A JE/HA configuration parameter describing an Identifier name. + */ + static public class IdentifierConfigParam extends ConfigParam { + private static final String DEBUG_NAME = + IdentifierConfigParam.class.getName(); + + public IdentifierConfigParam(String configName, + String defaultValue, + boolean mutable, + boolean forReplication) { + super(configName, defaultValue, mutable, forReplication); + } + + @Override + public void validateValue(String value) { + if (Boolean.getBoolean(SKIP_NODENAME_VALIDATION)) { + return; + } + if ((value == null) || (value.length() == 0)) { + throw new IllegalArgumentException + (DEBUG_NAME + ": a value is required"); + } + for (char c : value.toCharArray()) { + if (!isValid(c)) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + name + ", must consist of " + + "letters, digits, hyphen, underscore, period."); + } + } + } + + private boolean isValid(char c) { + if (Character.isLetterOrDigit(c) || + c == '-' || + c == '_' || + c == '.') { + return true; + } + return false; + } + } + + /* + * Replication group-wide properties. These properties are candidates for + * consistency checking whenever there is a handshake between a master and + * replica. + */ + + /** Names the Replication group. */ + public static final ConfigParam GROUP_NAME = + new IdentifierConfigParam(ReplicationConfig.GROUP_NAME, + "DefaultGroup", // default + false, // mutable + true); // forReplication + + /** + * @deprecated see {@link ReplicationConfig#REP_STREAM_TIMEOUT} + */ + public static final DurationConfigParam REP_STREAM_TIMEOUT = + new DurationConfigParam(ReplicationConfig.REP_STREAM_TIMEOUT, + null, // min + null, // max + "30 min", // default + false, // mutable + true); + + /** + * MIN_RETAINED_VLSNS was never exposed in the public API, although we + * did ask several users to configure it in the past, so we shouldn't + * delete the param definition. + * + * @deprecated and no longer used as of JE 7.5. Reserved files are now + * retained based on available disk space -- see + * {@link EnvironmentConfig#MAX_DISK} and + * {@link EnvironmentConfig#FREE_DISK} should be used instead. + * However, this param is still used when some, but not all, nodes in a + * group have been upgraded to 7.5 or later. + */ + public static final IntConfigParam MIN_RETAINED_VLSNS = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "minRetainedVLSNs", + 0, // min + null, // max + 0, // default + false, // mutable + true); // forReplication + + /** + * Unpublished (for now at least) parameter describing the minimum size + * of the VLSNIndex as a number of VLSNs. Once the index grows to this + * size, it will not get smaller due to head truncation by the cleaner. + * + *

        When a disk limit is violated we will have already deleted as many + * reserved files as possible. If space is made available and write + * operations can resume, we need to ensure that each node has a large + * enough VLSNIndex to perform syncup, etc.

        + * + *

        This limit is enforced on both master and replicas. It is + * particularly important on replicas because feeders will not prevent + * VLSN index head truncation.

        + */ + public static final IntConfigParam MIN_VLSN_INDEX_SIZE = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "minVLSNIndexSize", + 0, // min + null, // max + 1000, // default + false, // mutable + true); // forReplication + + /** + * Can be used by tests to prevent the GlobalCBVLSN from being defunct, + * even when all nodes are DEFUNCT_JE_VERSION or higher. + */ + public static final BooleanConfigParam TEST_CBVLSN = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "testCBVLSN", + false, // default + false, // mutable + true); + + /** + * @see ReplicationConfig#REPLICA_RECEIVE_BUFFER_SIZE + */ + public static final IntConfigParam REPLICA_RECEIVE_BUFFER_SIZE = + new IntConfigParam(ReplicationConfig.REPLICA_RECEIVE_BUFFER_SIZE, + 0, // min + null, // max + 1048576, // default + false, // mutable + true); // forReplication + + /** + * The size of the message queue used for communication between the thread + * reading the replication stream and the thread doing the replay. The + * default buffer size has been chosen to hold 500 single operation + * transactions (the ln + commit record) assuming 1K sized LN record. + *

        + * Larger values of buffer size may result in higher peak memory + * utilization, due to a larger number of LNs sitting in the queue. The + * size of the queue itself is unlikely to be an issue, since it's tiny + * relative to cache sizes. At 1000, 1kbyte LNs it raises the peak + * utilization by 1MB which for most apps is an insignificant rise in the + * peak. + * + * Note that the parameter is lazily mutable, that is, the change will take + * effect the next time the node transitions to a replica state. + */ + public static final IntConfigParam REPLICA_MESSAGE_QUEUE_SIZE = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "replicaMessageQueueSize", + 1, // min + null, // max + 1000, // default + true, // mutable + true); // forReplication + + /** + * The lock timeout for replay transactions. + */ + public static final DurationConfigParam REPLAY_TXN_LOCK_TIMEOUT = + new DurationConfigParam(ReplicationConfig.REPLAY_TXN_LOCK_TIMEOUT, + "1 ms", // min + "75 min", // max + "500 ms", // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#ENV_SETUP_TIMEOUT + */ + public static final DurationConfigParam ENV_SETUP_TIMEOUT = + new DurationConfigParam + (ReplicationConfig.ENV_SETUP_TIMEOUT, + null, // min + null, // max + "10 h", // default 10 hrs + false, // mutable + true); + + /** + * @see ReplicationConfig#ENV_CONSISTENCY_TIMEOUT + */ + public static final DurationConfigParam + ENV_CONSISTENCY_TIMEOUT = + new DurationConfigParam(ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, + "10 ms", // min + null, // max + "5 min", // default + false, // mutable + true); + + /** + * @see ReplicationConfig#ENV_UNKNOWN_STATE_TIMEOUT + */ + public static final DurationConfigParam ENV_UNKNOWN_STATE_TIMEOUT = + new DurationConfigParam + (ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, + null, // min + null, // max + "0 s", // default + false, // mutable + true); + + /** + * @see ReplicationConfig#REPLICA_ACK_TIMEOUT + */ + public static final DurationConfigParam REPLICA_ACK_TIMEOUT = + new DurationConfigParam(ReplicationConfig.REPLICA_ACK_TIMEOUT, + "10 ms", // min + null, // max + "5 s", // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#INSUFFICIENT_REPLICAS_TIMEOUT + */ + public static final DurationConfigParam INSUFFICIENT_REPLICAS_TIMEOUT = + new DurationConfigParam(ReplicationConfig.INSUFFICIENT_REPLICAS_TIMEOUT, + "10 ms", // min + null, // max + "10 s", // default + false, // mutable + true); // forReplication + + /** + * @hidden + * @see ReplicationConfig#ARBITER_ACK_TIMEOUT + */ + public static final DurationConfigParam ARBITER_ACK_TIMEOUT = + new DurationConfigParam(ReplicationConfig.ARBITER_ACK_TIMEOUT, + "10 ms", // min + null, // max + "2 s", // default + false, // mutable + true); // forReplication + + /** + * Internal parameter enable use of the group ack message. It's on by + * default since protocol version 6. + */ + public static final BooleanConfigParam ENABLE_GROUP_ACKS = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "enableGroupAcks", + true, // default + false, // mutable + true); + + /** + * The maximum message size which will be accepted by a node (to prevent + * DOS attacks). While the default shown here is 0, it dynamically + * calculated when the node is created and is set to the half of the + * environment cache size. The cache size is mutable, but changing the + * cache size at run time (after environment initialization) will not + * change the value of this parameter. If a value other than cache size / + * 2 is desired, this non-mutable parameter should be specified at + * initialization time. + */ + public static final LongConfigParam MAX_MESSAGE_SIZE = + new LongConfigParam(ReplicationConfig.MAX_MESSAGE_SIZE, + Long.valueOf(1 << 18), // min (256KB) + Long.valueOf(Long.MAX_VALUE), // max + Long.valueOf(0), // default (cachesize / 2) + false, // mutable + true); // forReplication + + /** + * Identifies the default consistency policy used by a replica. Only two + * policies are meaningful as properties denoting environment level default + * policies: NoConsistencyRequiredPolicy and TimeConsistencyPolicy. They + * can be specified as: NoConsistencyRequiredPolicy or + * TimeConsistencyPolicy(,). For example, a time + * based consistency policy with a lag of 1 second and a timeout of 1 hour + * is denoted by the string: TimeConsistencyPolicy(1000,3600000) + */ + public static final ConfigParam CONSISTENCY_POLICY = + new ConfigParam(ReplicationConfig.CONSISTENCY_POLICY, + // Default lag of 1 sec, and timeout of 1 hour + "TimeConsistencyPolicy(1 s,1 h)", + false, // mutable + true) { // for Replication + @Override + public void validateValue(String propertyValue) + throws IllegalArgumentException { + + /* Evaluate for the checking side-effect. */ + RepUtils.getReplicaConsistencyPolicy(propertyValue); + } + }; + + /* The ports used by a replication group */ + + /** + * The port used for replication. + */ + public static final IntConfigParam DEFAULT_PORT = + new IntConfigParam(ReplicationConfig.DEFAULT_PORT, + Integer.valueOf(1024), // min + Integer.valueOf(Short.MAX_VALUE), // max + Integer.valueOf(5001), // default + false, // mutable + true); // forReplication + + /** + * Names the host (or interface) and port associated with the node in the + * replication group, e.g. je.rep.nodeHostPort=foo.com:5001 + */ + public static final ConfigParam NODE_HOST_PORT = + new ConfigParam(ReplicationConfig.NODE_HOST_PORT, + "localhost", // default + false, // mutable + true) { // forReplication + + @Override + public void validateValue(String hostAndPort) + throws IllegalArgumentException { + + if ((hostAndPort == null) || (hostAndPort.length() == 0)) { + throw new IllegalArgumentException + ("The value cannot be null or zero length: " + name); + } + int colonToken = hostAndPort.indexOf(":"); + String hostName = (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : + hostAndPort; + ServerSocket testSocket = null; + try { + testSocket = new ServerSocket(); + /* The bind will fail if the hostName does not name this m/c.*/ + testSocket.bind(new InetSocketAddress(hostName, 0)); + testSocket.close(); + } catch (UnknownHostException e) { + throw new IllegalArgumentException + ("Property: " + name + + " Invalid hostname: " + hostName, e); + } catch (IOException e) { + + /* + * Server socket could not be bound to any port. Hostname is + * not associated with this m/c. + */ + throw new IllegalArgumentException + ("Property: " + name + + " Invalid hostname: " + hostName, e); + } + + if (colonToken >= 0) { + validatePort(hostAndPort.substring(colonToken + 1)); + } + } + }; + + /* + * The Name uniquely identifies this node within the replication group. + */ + public static final ConfigParam NODE_NAME = + new IdentifierConfigParam(ReplicationConfig.NODE_NAME, + "DefaultRepNodeName",// default + false, // mutable + true); // forReplication + + /* + * Identifies the type of the node. + */ + public static final EnumConfigParam NODE_TYPE = + new EnumConfigParam(ReplicationConfig.NODE_TYPE, + NodeType.ELECTABLE, // default + false, // mutable + true, + NodeType.class); + + /* + * Associated a priority with this node. The priority is used during + * elections to favor one node over another. All other considerations being + * equal, the priority is used as a tie-breaker; the node with the higher + * priority is selected as the master. + */ + public static final IntConfigParam NODE_PRIORITY = + new IntConfigParam(ReplicationMutableConfig.NODE_PRIORITY, + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE), // max + Integer.valueOf(1), // default + true, // mutable + true); // forReplication + + /* + * Allow for Arbiter to provide Acks. + */ + public static final BooleanConfigParam ALLOW_ARBITER_ACK = + new BooleanConfigParam(ReplicationMutableConfig.ALLOW_ARBITER_ACK, + true, // default + true, // mutable + true); + + public static final IntConfigParam ARBITER_OUTPUT_QUEUE_SIZE = + new IntConfigParam(ReplicationConfig.ARBITER_OUTPUT_QUEUE_SIZE, + Integer.valueOf(128), // min + null, // max + Integer.valueOf(4096), // default + false, // mutable + false); // forReplication + + /* + * Identifies the Primary node in a two node group. + */ + public static final BooleanConfigParam DESIGNATED_PRIMARY = + new BooleanConfigParam(ReplicationMutableConfig.DESIGNATED_PRIMARY, + false, // default + true, // mutable + true); + + /* + * An internal option used to control the use of Nagle's algorithm + * on feeder connections. A value of true disables use of Nagle's algorithm + * and causes output to be sent immediately without delay. + */ + public static final BooleanConfigParam FEEDER_TCP_NO_DELAY = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "feederTcpNoDelay", + true, // default + false, // mutable + true); + + /** + * The time interval in nanoseconds during which records from a feeder may + * be batched before being written to the network. + * + * Larger values can result in fewer network packets and lower interrupt + * processing overheads. Since the grouping is only done when the feeder + * knows that the replica is not completely in sync, it's unlikely to have + * an adverse impact on overall throughput. Consequently this parameter is + * retained as an internal tuning knob. + * + * The HEARTBEAT_INTERVAL parameter serves as a ceiling on this time + * interval. Parameter values larger than HEARTBEAT_INTERVAL are truncated + * to HEARTBEAT_INTERVAL. + */ + public static final IntConfigParam FEEDER_BATCH_NS = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "feederBatchNs", + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE),// max + Integer.valueOf(1000000), // default 1 ms + true, // mutable + true); // forReplication + + /** + * The size in KB used to batch outgoing feeder records. Upon overflow the + * existing buffer contents are written to the network and a new batch is + * initiated. The default value is 64K to take advantage of networks that + * support jumbo frames. + */ + public static final IntConfigParam FEEDER_BATCH_BUFF_KB = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "feederBatchBuffKb", + Integer.valueOf(4), // min + Integer.valueOf(Integer.MAX_VALUE),// max + Integer.valueOf(64), // default 64K + true, // mutable + true); // forReplication + + /** + * @see ReplicationMutableConfig#ELECTABLE_GROUP_SIZE_OVERRIDE + */ + public static final IntConfigParam ELECTABLE_GROUP_SIZE_OVERRIDE = + new IntConfigParam(ReplicationMutableConfig. + ELECTABLE_GROUP_SIZE_OVERRIDE, + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE),// max + Integer.valueOf(0), // default + true, // mutable + true); // forReplication + + /** + * An internal option, accessed only via the utility + * {@link DbResetRepGroup} utility, to reset a replication group to a + * single new member when the replicated environment is opened. + */ + public static final BooleanConfigParam RESET_REP_GROUP = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "resetRepGroup", + false, // default + false, // mutable + true); + + /** + * An internal option, used with {@link #RESET_REP_GROUP}, that causes the + * reset of the replication group to retain the original group UUID and to + * not truncate the VLSN index. Use this option when converting a + * SECONDARY node to an ELECTABLE node when recovering a replication group. + */ + public static final BooleanConfigParam RESET_REP_GROUP_RETAIN_UUID = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "resetRepGroupRetainUUID", + false, // default + false, // mutable + true); + + /** + * An internal option to allow converting an ELECTABLE node to a SECONDARY + * node by ignoring the electable node ID stored in the local rep group + * DB. + */ + public static final BooleanConfigParam IGNORE_SECONDARY_NODE_ID = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "ignoreSecondaryNodeId", + false, // default + false, // mutable + true); + + /* + * Sets the maximum allowable skew between a Feeder and its replica. The + * clock skew is checked as part of the handshake when the Replica + * establishes a connection to its Feeder. + */ + public static final DurationConfigParam MAX_CLOCK_DELTA = + new DurationConfigParam(ReplicationConfig.MAX_CLOCK_DELTA, + null, // min + "1 min", // max + "2 s", // default + false, // mutable + true); // forReplication + + /* + * The list of helper node and port pairs. + */ + public static final ConfigParam HELPER_HOSTS = + new ConfigParam(ReplicationConfig.HELPER_HOSTS, + "", // default + true, // mutable + true) { // forReplication + + @Override + public void validateValue(String hostPortPairs) + throws IllegalArgumentException { + + if ((hostPortPairs == null) || (hostPortPairs.length() == 0)) { + return; + } + HashSet hostPortSet = new HashSet(); + for (StringTokenizer tokenizer = + new StringTokenizer(hostPortPairs, ","); + tokenizer.hasMoreTokens();) { + try { + String hostPortPair = tokenizer.nextToken(); + if (!hostPortSet.add(hostPortPair)) { + throw new IllegalArgumentException + ("Property: " + name + + " Duplicate specification: " + hostPortPair); + } + validateHostAndPort( + hostPortPair, + Boolean.getBoolean(SKIP_HELPER_HOST_RESOLUTION)); + } catch (IllegalArgumentException iae) { + throw new IllegalArgumentException + ("Property: " + name + "Error: " + iae.getMessage(), + iae); + } + } + } + }; + + /* Heartbeat interval in milliseconds. */ + public static final IntConfigParam HEARTBEAT_INTERVAL = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "heartbeatInterval", + Integer.valueOf(1000),// min + null, // max + Integer.valueOf(1000),// default + false, // mutable + true); // forReplication + + /* + * Security check interval in milliseconds. This parameter controls how + * frequently the feeder checks that stream consumers authenticated and + * authorized to stream the requested tables. + */ + public static final IntConfigParam SECURITY_CHECK_INTERVAL = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "securityCheckInterval", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(1000),// default + false, // mutable + true); // forReplication + + /* Replay Op Count after which we clear the DbTree cache. */ + public static final IntConfigParam DBTREE_CACHE_CLEAR_COUNT = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "dbIdCacheOpCount", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(5000), // default + false, // mutable + true); // forReplication + + public static final IntConfigParam VLSN_STRIDE = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + "vlsn.stride", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(10), // default + false, // mutable + true); // forReplication + + public static final IntConfigParam VLSN_MAX_MAP = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "vlsn.mappings", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(1000), // default + false, // mutable + true); // forReplication + + public static final IntConfigParam VLSN_MAX_DIST = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "vlsn.distance", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(100000), // default + false, // mutable + true); // forReplication + + /* + * Internal testing use only: Simulate a delay in the replica loop for test + * purposes. The value is the delay in milliseconds. + */ + public static final IntConfigParam TEST_REPLICA_DELAY = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "test.replicaDelay", + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE), // max + Integer.valueOf(0), // default + false, // mutable + true); // forReplication + + /* + * Sets the VLSNIndex cache holding recent log items in support of the + * feeders. The size must be a power of two. + */ + public static final IntConfigParam VLSN_LOG_CACHE_SIZE = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "vlsn.logCacheSize", + Integer.valueOf(0), // min + Integer.valueOf(1<<10), // max + Integer.valueOf(32), // default + false, // mutable + true); // forReplication + + /* + * The socket timeout value used by a Replica when it opens a new + * connection to establish a replication stream with a feeder. + */ + public static final DurationConfigParam REPSTREAM_OPEN_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "repstreamOpenTimeout", + null, // min + "5 min", // max + "5 s", // default + false, // mutable + true); // forReplication + + /* + * The socket timeout value used by Elections agents when they open + * sockets to communicate with each other using the Elections protocol. + */ + public static final DurationConfigParam ELECTIONS_OPEN_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "electionsOpenTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /* + * The maximum amount of time a Learner or Acceptor agent will wait for + * input on a network connection, while listening for a message before + * timing out. This timeout applies to the Elections protocol. + */ + public static final DurationConfigParam ELECTIONS_READ_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "electionsReadTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /** + * The master re-broadcasts the results of an election with this period. + */ + public static final DurationConfigParam + ELECTIONS_REBROADCAST_PERIOD = + new DurationConfigParam + (ReplicationConfig.ELECTIONS_REBROADCAST_PERIOD, + null, // min + null, // max + "1 min", // default + false, // mutable + true); + + /** + * @see ReplicationConfig#ELECTIONS_PRIMARY_RETRIES + */ + public static final IntConfigParam ELECTIONS_PRIMARY_RETRIES = + new IntConfigParam(ReplicationConfig.ELECTIONS_PRIMARY_RETRIES, + 0, + Integer.MAX_VALUE, + 2, + false, + true); + + /* + * Socket open timeout for use with the RepGroupProtocol. + */ + public static final DurationConfigParam REP_GROUP_OPEN_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "repGroupOpenTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /* + * Socket read timeout for use with the RepGroupProtocol. + */ + public static final DurationConfigParam REP_GROUP_READ_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "repGroupReadTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /* + * Socket open timeout for use with the Monitor Protocol. + */ + public static final DurationConfigParam MONITOR_OPEN_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "monitorOpenTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /* + * Socket read timeout for use with the MonitorProtocol. + */ + public static final DurationConfigParam MONITOR_READ_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "monitorReadTimeout", + null, // min + "1 min", // max + "10 s", // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#REPLICA_TIMEOUT + */ + public static final DurationConfigParam REPLICA_TIMEOUT = + new DurationConfigParam(ReplicationConfig.REPLICA_TIMEOUT, + "2 s", // min + null, // max + "30 s", // default + false, // mutable + true); // forReplication + + /* @see ReplicationConfig#REPLAY_MAX_OPEN_DB_HANDLES */ + public static final IntConfigParam REPLAY_MAX_OPEN_DB_HANDLES = + new IntConfigParam(ReplicationMutableConfig.REPLAY_MAX_OPEN_DB_HANDLES, + Integer.valueOf(1), // min + Integer.valueOf(Integer.MAX_VALUE), // max + Integer.valueOf(10), // default + true, // mutable + true); // forReplication + + /* @see ReplicationConfig#REPLAY_DB_HANDLE_TIMEOUT */ + public static final DurationConfigParam REPLAY_DB_HANDLE_TIMEOUT = + new DurationConfigParam(ReplicationConfig.REPLAY_DB_HANDLE_TIMEOUT, + "1 s", // min + null, // max + "30 s", // default + true, // mutable + true); // forReplication + + /* @see ReplicationConfig#REPLICA_MAX_GROUP_COMMIT */ + public static final IntConfigParam REPLICA_MAX_GROUP_COMMIT = + new IntConfigParam(ReplicationConfig.REPLICA_MAX_GROUP_COMMIT, + Integer.valueOf(0), // min + null, // max + Integer.valueOf(200), // default + false, // mutable + true); // forReplication + + /* @see ReplicationConfig#REPLICA_GROUP_COMMIT_INTERVAL */ + public static final DurationConfigParam REPLICA_GROUP_COMMIT_INTERVAL = + new DurationConfigParam(ReplicationConfig.REPLICA_GROUP_COMMIT_INTERVAL, + "0 ms", // min + null, // max + "3 ms", // default + false, // mutable + true); // forReplication + + /* + * The number of heartbeat responses that must be detected as missing + * during an otherwise idle period before the Feeder shuts down the + * connection with the Replica. + * + * This value provides the basis for the "read timeout" used by the Feeder + * when communicating with the Replica. The timeout is calculated as + * FEEDER_HEARTBEAT_TIMEOUT * HEARTBEAT_INTERVAL. Upon a timeout the Feeder + * closes the connection. + * + * Reducing this value permits the master to discover failed Replicas + * faster. However, it increases the chances of false positives as well, if + * the network is experiencing transient problems from which it might + * just recover. + */ + public static final IntConfigParam FEEDER_HEARTBEAT_TIMEOUT = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "feederHeartbeatTrigger", + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE), // max + Integer.valueOf(4), // default + false, // mutable + true); + + /** + * Used to force setting of SO_REUSEADDR to true on the HA server socket + * when it binds to its port. + * + * Note that the default is false, meaning that the socket has the + * system-specific default setting associated with it. We set it to true + * primarily in unit tests where the interacting HA processes are all on + * the same machine and use of this option is safe. + * + * This option is currently intended just for internal test use. + */ + public static final BooleanConfigParam SO_REUSEADDR = + new BooleanConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "soReuseAddr", + false, // default + false, // mutable + true); + + /** + * This option was motivated by the BDA. The BDA uses IB for intra-rack + * node communications and 10gigE for inter-rack node communications. DNS + * is used to map the hostname to different IP addresses depending on + * whether the hostname was resolved from within the rack or outside it. + * The host thus gets HA traffic on both the IB and 10gigE interfaces and + * therefore needs to listen on both interfaces. It does so binding its + * socket using a wild card address when this option iks turned on. + * + * @see ReplicationConfig#BIND_INADDR_ANY + */ + public static final BooleanConfigParam BIND_INADDR_ANY = + new BooleanConfigParam + (ReplicationConfig.BIND_INADDR_ANY, + false, // default + false, // mutable + true); + + /** + * Determines how long to wait for a bound socket to come free. This option + * can be useful when dealing with sockets in the TIME_WAIT state to come + * free so they can be reused. Attempts are made to retry binding to this + * period at intervals of 1 second until the port is bound successfully, or + * this wait period is exceeded. + * + * A value of zero means that there are no retries. It does not make sense + * to wait too much longer than the 2 min TIME_WAIT period, but we allow + * waiting as long as 2.5 min to account for race conditions. + * + * This option is currently intended just for internal test use. + */ + public static final IntConfigParam SO_BIND_WAIT_MS = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "retrySocketBind", + Integer.valueOf(0), // min + Integer.valueOf(150 * 1000), // max + Integer.valueOf(0), // default + false, // mutable + true); + + /** + * Internal parameter used to determine the poll timeout used when + * accepting incoming feeder connections. This timeout also determines + * the frequency of various housekeeping tasks, e.g. detection of a + * master to replica change, etc. + */ + public static final DurationConfigParam FEEDER_MANAGER_POLL_TIMEOUT = + new DurationConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "feederManagerPollTimeout", + "100 ms", // min + null, // max + "1 s", // default + false, // mutable + true); // forReplication + /** + * @see ReplicationConfig#FEEDER_TIMEOUT + */ + public static final DurationConfigParam FEEDER_TIMEOUT = + new DurationConfigParam(ReplicationConfig.FEEDER_TIMEOUT, + "2 s", // min + null, // max + "30 s", // default + false, // mutable + true); // forReplication + + /** + * Used to log an info message when a commit log record exceeds this + * time interval from the time it was created, to the time it was written + * out to the network. + */ + public static final DurationConfigParam TRANSFER_LOGGING_THRESHOLD = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "transferLoggingThreshold", + "1 ms", // min + null, // max + "5 s", // default + false, // mutable + true); // forReplication + + /** + * Used to log an info message when the time taken to replay a single log + * entry at a replica exceeds this threshold. + */ + public static final DurationConfigParam REPLAY_LOGGING_THRESHOLD = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "replayLoggingThreshold", + "1 ms", // min + null, // max + "5 s", // default + false, // mutable + true); // forReplication + + /** + * Changes the notion of an ack. When set to true, a replica is considered + * to have acknowledged a commit as soon as the feeder has written the + * commit record to the network. That is, it does not wait for the replica + * to actually acknowledge the commit via a return message. This permits + * the master to operate in a more async manner relative to the replica + * provide for higher throughput. + * + * This config parameter is internal. + */ + public static final BooleanConfigParam COMMIT_TO_NETWORK = + new BooleanConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "commitToNetwork", + false, // default + false, // mutable + true); + + public static final DurationConfigParam PRE_HEARTBEAT_TIMEOUT = + new DurationConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + "preHeartbeatTimeoutMs", + "1 s", // min + null, // max + "60 s", // default + false, // mutable + true); + + /** + * Verifies that the port is a reasonable number. The port must be outside + * the range of "Well Known Ports" (zero through 1024). + * + * @param portString the string representing the port. + */ + private static void validatePort(String portString) + throws IllegalArgumentException { + + try { + int port = Integer.parseInt(portString); + + if ((port <= 0) || (port > 0xffff)) { + throw new IllegalArgumentException + ("Invalid port number: " + portString); + } + if (port <= 1023) { + throw new IllegalArgumentException + ("Port number " + port + + " is invalid because the port must be "+ + "outside the range of \"well known\" ports"); + } + } catch (NumberFormatException e) { + throw new IllegalArgumentException + ("Invalid port number: " + portString); + } + } + + /** + * Validates that the hostPort is a string of the form: + * + * hostName[:port] + * + * @param hostAndPort + * @param skipHostnameResolution if true, don't bother checking that the + * hostname resolves + * @throws IllegalArgumentException + */ + private static void validateHostAndPort(String hostAndPort, + boolean skipHostnameResolution) + throws IllegalArgumentException { + + int colonToken = hostAndPort.indexOf(":"); + String hostName = (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : + hostAndPort; + if ("".equals(hostName)) { + throw new IllegalArgumentException("missing hostname"); + } + + if (!skipHostnameResolution) { + try { + InetAddress.getByName(hostName); + } catch (UnknownHostException e) { + throw new IllegalArgumentException + ("Invalid hostname: " + e.getMessage()); + } + } + + if (colonToken >= 0) { + validatePort(hostAndPort.substring(colonToken + 1)); + } + } + + /** + * @see ReplicationConfig#TXN_ROLLBACK_LIMIT + */ + public static final IntConfigParam TXN_ROLLBACK_LIMIT = + new IntConfigParam(ReplicationConfig. + TXN_ROLLBACK_LIMIT, + Integer.valueOf(0), // min + Integer.valueOf(Integer.MAX_VALUE),// max + Integer.valueOf(10), // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#TXN_ROLLBACK_DISABLED + */ + public static final BooleanConfigParam TXN_ROLLBACK_DISABLED = + new BooleanConfigParam(ReplicationConfig.TXN_ROLLBACK_DISABLED, + false, // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#ALLOW_UNKNOWN_STATE_ENV_OPEN + */ + @SuppressWarnings({ "javadoc", "deprecation" }) + public static final BooleanConfigParam ALLOW_UNKNOWN_STATE_ENV_OPEN = + new BooleanConfigParam(ReplicationConfig.ALLOW_UNKNOWN_STATE_ENV_OPEN, + false, // default + false, // mutable + true); + + /** + * If true, the replica runs with this property will not join the + * replication group. + */ + public static final BooleanConfigParam DONT_JOIN_REP_GROUP = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "dontJoinRepGroup", + false, + false, + true); + + /** + * Internal parameter used by the Arbiter. + */ + public static final BooleanConfigParam ARBITER_USE = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "arbiterUse", + false, + false, + true); + + /** + * Internal parameter used by the Subscriber. + * + * If true, the node is a replica that operates as a subscriber. + */ + public static final BooleanConfigParam SUBSCRIBER_USE = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "subscriberUse", + false, + false, + true); + + /** + * Internal parameter used by network backups. + * + * If true, the node is used to support network backups and operates in + * read only mode, with various daemons disabled. + * + * TODO: would be nice to combine ARBITER_USE, SUBSCRIBER_USE and this into + * one concept. + */ + public static final BooleanConfigParam NETWORKBACKUP_USE = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "networkBackupUse", + false, + false, + true); + + /** + * Internal parameter used by network backups. + * + * See 'Algorithm' in {@link com.sleepycat.je.rep.NetworkRestore}. + * Is currently 50k, which represents less than 1s of replay time. + */ + public static final IntConfigParam NETWORKBACKUP_MAX_LAG = + new IntConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "networkBackupMaxLag", + 0, // min + null, // max + 50 * 1000, // default + false, // mutable + true); // forReplication + + /** + * Internal parameter to preserve record version (VLSN). Is immutable + * forever, i.e., it may not be changed after the environment has been + * created. It has the following impacts: + * + * . The VLSN is stored with the LN in the Btree and is available via the + * CursorImpl API. + * . The VLSN is included when migrating an LN during log cleaning. + * + * FUTURE: Expose this in ReplicationConfig and improve doc if we make + * record versions part of the public API. + */ + public static final BooleanConfigParam PRESERVE_RECORD_VERSION = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "preserveRecordVersion", + false, // default + false, // mutable + true); // forReplication + + /** + * Whether to cache the VLSN in the BIN after the LN has been stripped by + * eviction, unless caching is explicitly disabled using the + * CACHE_RECORD_VERSION setting. + * + * This setting has no impact if PRESERVE_RECORD_VERSION is not also + * enabled. + * + * FUTURE: Expose this in ReplicationConfig and improve doc if we make + * record versions part of the public API. + */ + public static final BooleanConfigParam CACHE_RECORD_VERSION = + new BooleanConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "cacheRecordVersion", + true, // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#PROTOCOL_OLD_STRING_ENCODING + * TODO: Change default to false in JE 5.1. + */ + public static final BooleanConfigParam PROTOCOL_OLD_STRING_ENCODING = + new BooleanConfigParam(ReplicationConfig.PROTOCOL_OLD_STRING_ENCODING, + true, // default + false, // mutable + true); // forReplication + + /** + * A JE/HA configuration parameter specifying a data channel type + */ + static public class ChannelTypeConfigParam extends ConfigParam { + public static final String BASIC = "basic"; + public static final String SSL = "ssl"; + public static final String CUSTOM = "custom"; + + private static final String DEBUG_NAME = + ChannelTypeConfigParam.class.getName(); + + public ChannelTypeConfigParam(String configName, + String defaultValue, + boolean mutable, + boolean forReplication) { + super(configName, defaultValue, mutable, forReplication); + } + + @Override + public void validateValue(String value) { + if (value == null) { + throw new IllegalArgumentException + (DEBUG_NAME + ": a value is required"); + } + if (!(BASIC.equals(value) || + SSL.equals(value) || + CUSTOM.equals(value))) { + throw new IllegalArgumentException + (DEBUG_NAME + ": " + value + " a not a valid value"); + } + } + } + + /** + * Replication data channel factory configuration + * @see ReplicationNetworkConfig#CHANNEL_TYPE + */ + public static final ConfigParam CHANNEL_TYPE = + new ChannelTypeConfigParam( + ReplicationNetworkConfig.CHANNEL_TYPE, + ChannelTypeConfigParam.BASIC, // default + false, // mutable + true); // forReplication + + /** + * Replication data channel logging identifier. + * @see ReplicationNetworkConfig#CHANNEL_LOG_NAME + */ + public static final ConfigParam CHANNEL_LOG_NAME = + new ConfigParam( + ReplicationNetworkConfig.CHANNEL_LOG_NAME, + "", // default + false, // mutable + true); // forReplication + + /** + * Data channel factory class + * @see ReplicationNetworkConfig#CHANNEL_FACTORY_CLASS + */ + public static final ConfigParam CHANNEL_FACTORY_CLASS = + new ConfigParam(ReplicationNetworkConfig.CHANNEL_FACTORY_CLASS, + "", // default + false, // mutable + true); // forReplication + + /** + * Data channel factory parameters + * @see ReplicationNetworkConfig#CHANNEL_FACTORY_PARAMS + */ + public static final ConfigParam CHANNEL_FACTORY_PARAMS = + new ConfigParam(ReplicationNetworkConfig.CHANNEL_FACTORY_PARAMS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL KeyStore file + * @see ReplicationSSLConfig#SSL_KEYSTORE_FILE + */ + public static final ConfigParam SSL_KEYSTORE_FILE = + new ConfigParam(ReplicationSSLConfig.SSL_KEYSTORE_FILE, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL KeyStore password + * @see ReplicationSSLConfig#SSL_KEYSTORE_PASSWORD + */ + public static final ConfigParam SSL_KEYSTORE_PASSWORD = + new ConfigParam(ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL KeyStore password source class + * @see ReplicationSSLConfig#SSL_KEYSTORE_PASSWORD_CLASS + */ + public static final ConfigParam SSL_KEYSTORE_PASSWORD_CLASS = + new ConfigParam(ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_CLASS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL KeyStore password source constructor parameters + * @see ReplicationSSLConfig#SSL_KEYSTORE_PASSWORD_PARAMS + */ + public static final ConfigParam SSL_KEYSTORE_PASSWORD_PARAMS = + new ConfigParam(ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_PARAMS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL KeyStore type + * @see ReplicationSSLConfig#SSL_KEYSTORE_TYPE + */ + public static final ConfigParam SSL_KEYSTORE_TYPE = + new ConfigParam(ReplicationSSLConfig.SSL_KEYSTORE_TYPE, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL server key alias + * @see ReplicationSSLConfig#SSL_SERVER_KEY_ALIAS + */ + public static final ConfigParam SSL_SERVER_KEY_ALIAS = + new ConfigParam(ReplicationSSLConfig.SSL_SERVER_KEY_ALIAS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL client key alias + * @see ReplicationSSLConfig#SSL_CLIENT_KEY_ALIAS + */ + public static final ConfigParam SSL_CLIENT_KEY_ALIAS = + new ConfigParam(ReplicationSSLConfig.SSL_CLIENT_KEY_ALIAS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL TrustStore file + * @see ReplicationSSLConfig#SSL_TRUSTSTORE_FILE + */ + public static final ConfigParam SSL_TRUSTSTORE_FILE = + new ConfigParam(ReplicationSSLConfig.SSL_TRUSTSTORE_FILE, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL TrustStore type + * @see ReplicationSSLConfig#SSL_TRUSTSTORE_TYPE + */ + public static final ConfigParam SSL_TRUSTSTORE_TYPE = + new ConfigParam(ReplicationSSLConfig.SSL_TRUSTSTORE_TYPE, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL cipher suites + * @see ReplicationSSLConfig#SSL_CIPHER_SUITES + */ + public static final ConfigParam SSL_CIPHER_SUITES = + new ConfigParam(ReplicationSSLConfig.SSL_CIPHER_SUITES, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL protocols + * @see ReplicationSSLConfig#SSL_PROTOCOLS + */ + public static final ConfigParam SSL_PROTOCOLS = + new ConfigParam(ReplicationSSLConfig.SSL_PROTOCOLS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL Authenticator + * @see ReplicationSSLConfig#SSL_AUTHENTICATOR + */ + public static final ConfigParam SSL_AUTHENTICATOR = + new ConfigParam(ReplicationSSLConfig.SSL_AUTHENTICATOR, + "", // default + false, // mutable + true) { // forReplication + + @Override + public void validateValue(String value) { + if (value == null) { + throw new IllegalArgumentException + ("a value is required"); + } + if (!SSLChannelFactory.isValidAuthenticator(value)) { + throw new IllegalArgumentException + (value + " a not a valid value"); + } + } + }; + + /** + * SSL Authenticator class + * @see ReplicationSSLConfig#SSL_AUTHENTICATOR_CLASS + */ + public static final ConfigParam SSL_AUTHENTICATOR_CLASS = + new ConfigParam(ReplicationSSLConfig.SSL_AUTHENTICATOR_CLASS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL Authenticator parameters + * @see ReplicationSSLConfig#SSL_AUTHENTICATOR_PARAMS + */ + public static final ConfigParam SSL_AUTHENTICATOR_PARAMS = + new ConfigParam(ReplicationSSLConfig.SSL_AUTHENTICATOR_PARAMS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL Host Verifier + * @see ReplicationSSLConfig#SSL_HOST_VERIFIER + */ + public static final ConfigParam SSL_HOST_VERIFIER = + new ConfigParam(ReplicationSSLConfig.SSL_HOST_VERIFIER, + "", // default + false, // mutable + true) { // forReplication + + @Override + public void validateValue(String value) { + if (value == null) { + throw new IllegalArgumentException + ("a value is required"); + } + if (!SSLChannelFactory.isValidHostVerifier(value)) { + throw new IllegalArgumentException + (value + " a not a valid value"); + } + } + }; + + /** + * SSL Host Verifier class + * @see ReplicationSSLConfig#SSL_HOST_VERIFIER_CLASS + */ + public static final ConfigParam SSL_HOST_VERIFIER_CLASS = + new ConfigParam(ReplicationSSLConfig.SSL_HOST_VERIFIER_CLASS, + "", // default + false, // mutable + true); // forReplication + + /** + * SSL Host Verifier parameters + * @see ReplicationSSLConfig#SSL_HOST_VERIFIER_PARAMS + */ + public static final ConfigParam SSL_HOST_VERIFIER_PARAMS = + new ConfigParam(ReplicationSSLConfig.SSL_HOST_VERIFIER_PARAMS, + "", // default + false, // mutable + true); // forReplication + + /** + * Override the current JE version, for testing only. + */ + public static final ConfigParam TEST_JE_VERSION = new ConfigParam( + EnvironmentParams.REP_PARAM_PREFIX + "test.jeVersion", + "", // default + false, // mutable + true); // forReplication + + /** + * @deprecated see {@link ReplicationConfig#REPLAY_COST_PERCENT} + */ + public static final IntConfigParam REPLAY_COST_PERCENT = + new IntConfigParam(ReplicationConfig.REPLAY_COST_PERCENT, + 0, // min + 1000, // max + 150, // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#REPLAY_FREE_DISK_PERCENT + */ + public static final IntConfigParam REPLAY_FREE_DISK_PERCENT = + new IntConfigParam(ReplicationConfig.REPLAY_FREE_DISK_PERCENT, + 0, // min + 99, // max + 0, // default + false, // mutable + true); // forReplication + + /** + * The subscription queue poll interval. + */ + public static final DurationConfigParam SUBSCRIPTION_POLL_INTERVAL = + new DurationConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "subscriptionPollInterval", + "10 ms", // min + null, // max + "1 s", // default + false, // not mutable + true); // forReplication + + /** + * The subscription queue poll timeout. + */ + public static final DurationConfigParam SUBSCRIPTION_POLL_TIMEOUT = + new DurationConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "subscriptionPollTimeout", + "100 ms", // min + null, // max + "30 s", // default + false, // not mutable + true); // forReplication + + /** + * The maximum number of times to retry failed subscription connections. + */ + public static final IntConfigParam SUBSCRIPTION_MAX_CONNECT_RETRIES = + new IntConfigParam + (EnvironmentParams.REP_PARAM_PREFIX + + "subscriptionMaxConnectRetries", + Integer.valueOf(1), // min + null, // max + Integer.valueOf(3), // default + false, // not mutable + true); // forReplication + + /** + * The amount of time that the subscription thread should sleep time before + * retrying a failed connection. + */ + public static final DurationConfigParam SUBSCRIPTION_SLEEP_BEFORE_RETRY = + new DurationConfigParam(EnvironmentParams.REP_PARAM_PREFIX + + "subscriptionSleepBeforeRetry", + "1 s", // min + null, // max + "3 s", // default + false, // not mutable + true); // forReplication + +} diff --git a/src/com/sleepycat/je/rep/impl/TextProtocol.java b/src/com/sleepycat/je/rep/impl/TextProtocol.java new file mode 100644 index 0000000..f9c6532 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/TextProtocol.java @@ -0,0 +1,1205 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.PrintWriter; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.SocketException; +import java.nio.channels.Channels; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.DurationConfigParam; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.elections.Utils; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; + +/** + * TextProtocol provides the support for implementing simple low performance + * protocols involving replication nodes. The protocol is primarily text based, + * and checks group membership and version matches with every message favoring + * flexibility over performance. + * + * The base class is primarily responsible for the message formatting and + * message envelope validation. The subclasses define the specific messages + * that constitute the protocol and the request/response semantics. + * + * Every message has the format: + * |||| + * + * is the version of the protocol in use. + * identifies a group participating in an election. It avoids + * accidental cross-talk across groups holding concurrent elections. + * identifies the originator of the message within the group. + * the operation identified by the specific message. + * the payload associated with the particular operation. + */ + +public abstract class TextProtocol { + + /* The name of the class associated with this protocol. */ + private final String name; + + /* + * Protocol version string. Format: . + * It's used to ensure compatibility across versions. + */ + private final String VERSION; + + /* The name of the group executing this protocol. */ + private final String groupName; + + /* + * The set of ids of nodes that are permitted to communicate via this + * protocol, or null if not restricted. It's updated as nodes enter and + * leave the dynamic group. + */ + private Set memberIds; + + /* The id associated with this protocol participant. */ + private final NameIdPair nameIdPair; + + /* + * The suffix of message prefix constituting the "fixed" part of the + * message for this group and node, it does not include the version + * information, which goes in front of this prefix. + */ + protected final String messageNocheckSuffix; + + /* + * Timeouts used for network communications. Use setTimeouts() to override + * the defaults. + */ + private int openTimeoutMs = 10000; // Default to 10 sec + private int readTimeoutMs = 10000; // Default to 10 sec + + /* The token separator in messages */ + public static final String SEPARATOR = "|"; + public static final String SEPARATOR_REGEXP="\\" + SEPARATOR; + + /* A message defined by the base class to deal with all errors. */ + public final MessageOp PROTOCOL_ERROR = + new MessageOp("PE", ProtocolError.class); + public final MessageOp OK_RESP = new MessageOp("OK", OK.class); + public final MessageOp FAIL_RESP = new MessageOp("FAIL", Fail.class); + + /* The number of message types defined by the subclass. */ + private int nonDefaultMessageCount; + + /* Maps request Ops to the corresponding enumerator. */ + private final Map ops = new HashMap<>(); + + protected final Logger logger; + protected final Formatter formatter; + protected final RepImpl repImpl; + protected final DataChannelFactory channelFactory; + + /** + * Hook used to modify messages in the serialized form. The hook is invoked + * on a serialized Request immediately before it's written to the network + * and immediately after a response is received and before it's + * deserialized. The hook implementation must be re-entrant. + */ + private static TestHook serDeHook; + + /** + * Creates an instance of the Protocol. + * + * @parameter version the protocol version number + * @parameter groupName the name of the group executing this protocol + * @param nameIdPair a unique identifier for this node + * @param repImpl for logging, may be null + * @param channelFactory the factory for channel creation + */ + public TextProtocol(String version, + String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + this.VERSION = version; + this.groupName = groupName; + this.nameIdPair = nameIdPair; + this.repImpl = repImpl; + this.channelFactory = channelFactory; + name = getClass().getName(); + + messageNocheckSuffix = + groupName + SEPARATOR + NameIdPair.NOCHECK_NODE_ID; + + if (repImpl != null) { + this.logger = LoggerUtils.getLogger(getClass()); + } else { + this.logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + } + this.formatter = new ReplicationFormatter(nameIdPair); + } + + /** + * Sets the hook that is invoked post serialization on request messages and + * pre deserialization on response messages. + * + * The hook implementation must be re-entrant. + */ + public static void setSerDeHook(TestHook serDeHook) { + TextProtocol.serDeHook = serDeHook; + } + + /** + * Set the network timeouts associated with uses of this protocol instance. + */ + protected void setTimeouts(RepImpl repImpl, + DurationConfigParam openTimeoutConfig, + DurationConfigParam readTimeoutConfig) { + if (repImpl == null) { + return; + } + final DbConfigManager configManager = repImpl.getConfigManager(); + openTimeoutMs = configManager.getDuration(openTimeoutConfig); + readTimeoutMs = configManager.getDuration(readTimeoutConfig); + } + + /** + * The messages as defined by the subclass. Note that PROTOCOL_ERROR is a + * pre-defined message that is defined by this class. The initialization is + * not considered until this method after been invoked typically in the + * constructor itself. This two-step is unfortunately necessary since the + * creation of MessageOps instances requires that this class be completely + * initialized, otherwise the MessageOp list could have been passed in as a + * constructor argument. + * + * @param protocolOps the message ops defined by the subclass. + */ + protected void initializeMessageOps(MessageOp[] protocolOps) { + for (MessageOp op : protocolOps) { + ops.put(op.opId, op); + } + nonDefaultMessageCount = protocolOps.length; + ops.put(PROTOCOL_ERROR.opId, PROTOCOL_ERROR); + ops.put(OK_RESP.opId, OK_RESP); + ops.put(FAIL_RESP.opId, FAIL_RESP); + } + + /** + * For testing only. + */ + protected MessageOp replaceOp(String op, MessageOp message) { + return ops.put(op, message); + } + + /** + * Returns the messages, of the specified type, used by the protocol + */ + public Set getOps(Class messageType) { + final Set reqOps = new HashSet<>(); + for (Entry e : ops.entrySet()) { + if (messageType. + isAssignableFrom(e.getValue().getMessageClass())) { + reqOps.add(e.getKey()); + } + } + + return reqOps; + } + + public int getOpenTimeout() { + return openTimeoutMs; + } + + public int getReadTimeout() { + return readTimeoutMs; + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + /* The total number of nonDefault messages defined by the protocol. */ + public int messageCount() { + return nonDefaultMessageCount; + } + + /** + * Updates the current set of nodes that are permitted to communicate via + * this protocol, or null for unrestricted. + * + * @param newMemberIds + */ + public void updateNodeIds(Set newMemberIds) { + memberIds = newMemberIds; + } + + /** + * Returns the Integer number which represents a Protocol version. + */ + public int getMajorVersionNumber(String version) { + return Double.valueOf(version).intValue(); + } + + /** + * The Operations that are part of the protocol. + */ + public static class MessageOp { + + /* The string denoting the operation for the request message. */ + private final String opId; + + /* The class used to represent the message. */ + private final Class messageClass; + + public MessageOp(String opId, Class messageClass) { + this.opId = opId; + this.messageClass = messageClass; + } + + String getOpId() { + return opId; + } + + Class getMessageClass() { + return messageClass; + } + + @Override + public String toString() { + return opId; + } + } + + /** + * Represents the tokens on a message line. The order of the enumerators + * represents the order of the tokens in the wire format. + */ + public enum TOKENS { + VERSION_TOKEN, + NAME_TOKEN, + ID_TOKEN, + OP_TOKEN, + FIRST_PAYLOAD_TOKEN; + } + + /* Used to indicate that an entity is formatable and can be serialized and + * de-serialized. + */ + protected interface WireFormatable { + + /* + * Returns the string representation suitable for use in a network + * request. + */ + abstract String wireFormat(); + } + + /** + * Parses a line into a Request/Response message. + * + * @param line containing the message + * @return a message instance + * @throws InvalidMessageException + */ + public Message parse(String line) + throws InvalidMessageException { + + String[] tokens = line.split(SEPARATOR_REGEXP); + + final int index = TOKENS.OP_TOKEN.ordinal(); + if (index >= tokens.length) { + throw new InvalidMessageException( + MessageError.BAD_FORMAT, + "Missing message op in message: " + line); + } + final MessageOp op = ops.get(tokens[index]); + if (op == null) { + throw new InvalidMessageException(MessageError.BAD_FORMAT, + "Text Protocol" + + " unknown op:" + tokens[index] + + " in message: " + line); + } + + try { + Class c = op.getMessageClass(); + Constructor cons = + c.getConstructor(c.getEnclosingClass(), + line.getClass(), + tokens.getClass()); + Message message = cons.newInstance(this, line, tokens); + return message; + } catch (InstantiationException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (IllegalAccessException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (SecurityException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (NoSuchMethodException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (InvocationTargetException e) { + /* Unwrap the exception. */ + Throwable target = e.getTargetException(); + if (target instanceof RuntimeException) { + final String message = "message: " + line + + " exception:" + target.getClass().getName() + + " exception message:" + target.getMessage(); + throw new InvalidMessageException(MessageError.BAD_FORMAT, + message); + + } else if (target instanceof InvalidMessageException) { + throw (InvalidMessageException) target; + } + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Base message class for all messages exchanged in the protocol. + */ + public abstract class Message implements WireFormatable { + /* The sender of the message. */ + private int senderId = 0; + + /* + * The version of this message, as it's deserialized and sent across + * the network. The default is that messages are sent in the VERSION of + * the current protocol, but in cases of mixed-version upgrades, the + * message may be sent in an earlier version format. + * + * When this message is a RequestMessage, the sender will always + * initially send it out its own native version, but may resend it in + * an earlier version, if the recipient can't understand the native + * version. When the message is a ResponseMessage, the sender can reply + * either in its native version, or in an earlier version if the + * requester is an older version of JE. + */ + protected String sendVersion; + + /* The line representing the message. */ + private final String line; + + /* The tokenized form of the above line. */ + private final String[] tokens; + + /* The current variable arg token */ + private int currToken = TOKENS.FIRST_PAYLOAD_TOKEN.ordinal(); + + protected String messagePrefixNocheck; + + /** + * The constructor used for the original non-serialized instance of the + * message, which does not use the line or tokens. + */ + protected Message() { + line = null; + tokens = null; + setSendVersion(VERSION); + } + + /** + * Every message must define a constructor of this form so that it can + * be de-serialized. The constructor is invoked using reflection by the + * parse() method. + * + * @param line the line constituting the message + * @param tokens the line in token form + * @throws InvalidMessageException + * @throws EnvironmentFailureException on format errors + */ + protected Message(String line, String[] tokens) + throws InvalidMessageException { + + this.line = line; + this.tokens = tokens; + + /* Validate the leading fixed fields. */ + final String version = getTokenString(TOKENS.VERSION_TOKEN); + if (new Double(VERSION) < new Double(version)) { + throw new InvalidMessageException + (MessageError.VERSION_MISMATCH, + "Version argument mismatch." + + " Expected: " + VERSION + ", found: " + version + + ", in message: " + line); + } + + /* + * Set the sender version of a request message. This version + * information will be used by the receiver to determine what + * version should be used for the response message. + */ + setSendVersion(version); + + final String messageGroupName = getTokenString(TOKENS.NAME_TOKEN); + if (!groupName.equals(messageGroupName)) { + throw new InvalidMessageException + (MessageError.GROUP_MISMATCH, + "Group name mismatch; this group name: " + groupName + + ", message group name: " + messageGroupName + + ", in message: " + line); + } + + senderId = + new Integer(getTokenString(TOKENS.ID_TOKEN)).intValue(); + if ((memberIds != null) && + (memberIds.size() > 0) && + (nameIdPair.getId() != NameIdPair.NOCHECK_NODE_ID) && + (senderId != NameIdPair.NOCHECK_NODE_ID) && + (senderId != nameIdPair.getId()) && + !memberIds.contains(senderId)) { + throw new InvalidMessageException + (MessageError.NOT_A_MEMBER, + "Sender's member id: " + senderId + + ", message op: " + getTokenString(TOKENS.OP_TOKEN) + + ", was not a member of the group: " + memberIds + + ", in message: " + line); + } + } + + public int getSenderId() { + return senderId; + } + + /* + * Set the version of the message that we have just received. This + * version information will be used by the receiver to determine what + * version should be used for the response message. + */ + public void setSendVersion(String version) { + if (new Double(VERSION) < new Double(version)) { + throw new IllegalStateException + ("Send version: " + version + " shouldn't be larger " + + "than the current version: " + VERSION); + } + + if (!version.equals(sendVersion)) { + sendVersion = version; + messagePrefixNocheck = + sendVersion + SEPARATOR + messageNocheckSuffix; + } + } + + /* Get the send version of a message. */ + public String getSendVersion() { + return sendVersion; + } + + protected String getMessagePrefix() { + return sendVersion + SEPARATOR + groupName + SEPARATOR + + nameIdPair.getId(); + } + + public abstract MessageOp getOp(); + + /** + * Returns the protocol associated with this message + */ + public TextProtocol getProtocol() { + return TextProtocol.this; + } + + /** + * Returns the token value associated with the token type. + * + * @param tokenType identifies the token in the message + * @return the associated token value + */ + private String getTokenString(TOKENS tokenType) { + final int index = tokenType.ordinal(); + if (index >= tokens.length) { + throw EnvironmentFailureException.unexpectedState + ("Bad format; missing token: " + tokenType + + "at position: " + index + "in message: " + line); + } + return tokens[index]; + } + + /** + * Returns the next token in the payload. + * + * @return the next payload token + * @throws InvalidMessageException + */ + protected String nextPayloadToken() + throws InvalidMessageException { + + if (currToken >= tokens.length) { + throw new InvalidMessageException + (MessageError.BAD_FORMAT, + "Bad format; missing token at position: " + currToken + + ", in message: " + line); + } + return tokens[currToken++]; + } + + protected boolean hasMoreTokens() { + return currToken < tokens.length; + } + + /** + * Returns the current token position in the payload. + * + * @return the current token position + */ + protected int getCurrentTokenPosition() { + return currToken; + } + } + + /** + * Base classes for response messages. + */ + public abstract class ResponseMessage extends Message { + + protected ResponseMessage() { + super(); + } + + /** + * Create an instance with the send version specified by the request. + * + * @param request the request + */ + protected ResponseMessage(final RequestMessage request) { + setSendVersion(request.getSendVersion()); + } + + protected ResponseMessage(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + } + + /** + * Returns the version id and Op concatenation that starts every + * message. + */ + protected String wireFormatPrefix() { + return getMessagePrefix() + SEPARATOR + getOp().opId; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof ResponseMessage)) { + return false; + } + return getOp().equals(((ResponseMessage)obj).getOp()); + } + + @Override + public int hashCode() { + return getOp().getOpId().hashCode(); + } + } + + public class ProtocolError extends ResponseMessage { + private final String message; + private final MessageError errorType; + + public ProtocolError(InvalidMessageException messageException) { + this(messageException.getErrorType(), + messageException.getMessage()); + } + + public ProtocolError(MessageError messageError, String message) { + this.message = message; + this.errorType = messageError; + } + + public ProtocolError(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + errorType = MessageError.valueOf(nextPayloadToken()); + message = nextPayloadToken(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + + ((message == null) ? 0 : message.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof ProtocolError)) { + return false; + } + final ProtocolError other = (ProtocolError) obj; + if (message == null) { + if (other.message != null) { + return false; + } + } else if (!message.equals(other.message)) { + return false; + } + + return true; + } + + @Override + public MessageOp getOp() { + return PROTOCOL_ERROR; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + + errorType.toString() + SEPARATOR + message; + } + + public MessageError getErrorType() { + return errorType; + } + + public String getMessage() { + return message; + } + } + + public class OK extends ResponseMessage { + + /** + * Create an instance with the send version specified by the request. + * + * @param request the request + */ + public OK(final RequestMessage request) { + super(request); + } + + public OK(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + } + + @Override + public MessageOp getOp() { + return OK_RESP; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix(); + } + } + + public class Fail extends ResponseMessage { + private final String message; + + public Fail(String message) { + this.message = sanitize(message); + } + + /** + * Create an instance with the send version specified by the request. + * + * @param request the request + * @param message the failure message + */ + public Fail(final RequestMessage request, final String message) { + super(request); + this.message = sanitize(message); + } + + public Fail(String line, String[] tokens) + throws InvalidMessageException { + super(line, tokens); + + message = nextPayloadToken(); + } + + public String getMessage() { + return message; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + getOuterType().hashCode(); + result = prime * result + + ((message == null) ? 0 : message.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (!(obj instanceof Fail)) { + return false; + } + Fail other = (Fail) obj; + if (!getOuterType().equals(other.getOuterType())) { + return false; + } + if (message == null) { + if (other.message != null) { + return false; + } + } else if (!message.equals(other.message)) { + return false; + } + return true; + } + + @Override + public MessageOp getOp() { + return FAIL_RESP; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + message; + } + + private TextProtocol getOuterType() { + return TextProtocol.this; + } + + /** + * Removes any newline characters. Embedded newlines are not supported + * by {@code TextProtocol}, but exception messages sometimes have them, + * and the payload of a {@code Fail} response often comes from an + * exception message. + */ + private String sanitize(String msg) { + return msg.replace("\n", " "); + } + } + + /** + * Base class for all Request messages + */ + public abstract class RequestMessage extends Message { + + protected RequestMessage() {} + + protected RequestMessage(String line, String[] tokens) + throws InvalidMessageException { + super(line, tokens); + } + + /** + * Returns the version id and Op concatenation that form the prefix + * for every message. + */ + protected String wireFormatPrefix() { + return getMessagePrefix() + SEPARATOR + getOp().opId; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof RequestMessage)) { + return false; + } + return getOp().equals(((RequestMessage) obj).getOp()); + } + + @Override + public int hashCode() { + return getOp().getOpId().hashCode(); + } + } + + /** + * Converts a response line into a ResponseMessage. + * + * @param responseLine + * @return the response message + * @throws InvalidMessageException + */ + ResponseMessage parseResponse(String responseLine) + throws InvalidMessageException { + + return (ResponseMessage) parse(responseLine); + } + + /** + * Converts a request line into a requestMessage. + * + * @param requestLine + * @return the request message + * @throws InvalidMessageException + */ + public RequestMessage parseRequest(String requestLine) + throws InvalidMessageException { + + return (RequestMessage) parse(requestLine); + } + + /** + * Reads the channel and returns a read request. If the message format + * was bad, it sends a ProtocolError response back over the channel and + * no further action is needed by the caller. + * + * @param channel the channel delivering the request + * @return null if EOF was reached or the message format was bad + * @throws IOException + */ + public RequestMessage getRequestMessage(DataChannel channel) + throws IOException { + + BufferedReader in = new BufferedReader + (new InputStreamReader(Channels.newInputStream(channel))); + + String requestLine = in.readLine(); + if (requestLine == null) { + /* EOF */ + return null; + } + try { + return parseRequest(requestLine); + } catch (InvalidMessageException e) { + processIME(channel, e); + return null; + } + } + + /** + * Process an IME encountered during request processing by writing a + * ProtocolError message as a response and logging it. + * + * @param channel the channel used to write the ProtocolError message + * @param ime the exception + */ + public void processIME(DataChannel channel, + InvalidMessageException ime) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + name + " format error:" + + LoggerUtils.exceptionTypeAndMsg(ime)); + PrintWriter out = + new PrintWriter(Channels.newOutputStream(channel), true); + out.println(new ProtocolError(ime).wireFormat()); + out.close(); + } + + public ResponseMessage process(Object requestProcessor, + RequestMessage requestMessage) { + + Class cl = requestProcessor.getClass(); + try { + Method method = + cl.getMethod("process", requestMessage.getClass()); + return (ResponseMessage) method.invoke + (requestProcessor, requestMessage); + } catch (NoSuchMethodException e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.SEVERE, + name + + " Method: process(" + + requestMessage.getClass().getName() + + ") was missing"); + throw EnvironmentFailureException.unexpectedException(e); + } catch (Exception e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.SEVERE, + name + + " Unexpected exception: " + + LoggerUtils.exceptionTypeAndMsg(e)); + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * A single request/response interaction, targetted at a given service + * running at a particular remote socket address. Since it implements + * {@code Runnable} it can be used with thread pools, {@code Future}s, + * etc. But its {@code run()} method can also simply be called directly. + */ + public class MessageExchange implements Runnable { + + public final InetSocketAddress target; + private final RequestMessage requestMessage; + private final String serviceName; + private ResponseMessage responseMessage; + public Exception exception; + + public MessageExchange(InetSocketAddress target, + String serviceName, + RequestMessage request) { + this.target = target; + this.serviceName = serviceName; + this.requestMessage = request; + } + + /* + * Get the response message for a request message. + * + * If the response message is a ProtocolError message which caused by + * protocol version mismatch, it resets the request message's + * sendVersion as the ResponseMessage ProtocolError's version and send + * again. + */ + @Override + public void run() { + messageExchange(); + + if (responseMessage != null && + responseMessage.getOp() == PROTOCOL_ERROR) { + ProtocolError error = (ProtocolError) responseMessage; + if (error.getErrorType() == MessageError.VERSION_MISMATCH) { + requestMessage.setSendVersion(error.getSendVersion()); + messageExchange(); + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.INFO, + name + + " Resend message: " + requestMessage.toString() + + " in version: " + requestMessage.getSendVersion() + + " while protocol version is: " + VERSION + + " because of the version mismatch, the returned" + + " response message is: " + responseMessage); + } + } + } + + /** + * Run a message exchange. A successful exchange results in a response + * message being set. All failures result in the response message being + * null and an exception being set. + */ + public void messageExchange() { + + DataChannel dataChannel = null; + BufferedReader in = null; + PrintWriter out = null; + try { + dataChannel = + channelFactory.connect( + target, + new ConnectOptions(). + setTcpNoDelay(true). + setOpenTimeout(openTimeoutMs). + setReadTimeout(readTimeoutMs). + setBlocking(true). + setReuseAddr(true)); + + ServiceDispatcher.doServiceHandshake(dataChannel, + serviceName); + + OutputStream ostream = + Channels.newOutputStream(dataChannel); + out = new PrintWriter(ostream, true); + String wireFormat = requestMessage.wireFormat(); + if (serDeHook != null) { + serDeHook.doHook(wireFormat); + wireFormat = serDeHook.getHookValue(); + } + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + name + + " request: " + wireFormat+ " to " + target); + out.println(wireFormat); + out.flush(); + in = new BufferedReader + (new InputStreamReader( + Channels.newInputStream(dataChannel))); + String line = in.readLine(); + if (serDeHook != null) { + serDeHook.doHook(line); + line = serDeHook.getHookValue(); + } + + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + name + + " response: " + line + " from " + target); + if (line == null) { + setResponseMessage + (new ProtocolError(MessageError.BAD_FORMAT, + "Premature EOF for request: " + + wireFormat)); + } else { + setResponseMessage(parseResponse(line)); + } + } catch (java.net.SocketTimeoutException e){ + this.exception = e; + } catch (SocketException e) { + this.exception = e; + } catch (IOException e) { + this.exception = e; + } catch (TextProtocol.InvalidMessageException ime) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + name + " response format error:" + + LoggerUtils.exceptionTypeAndMsg(ime) + + " from:" + target); + this.exception = ime; + } catch (ServiceConnectFailedException e) { + this.exception = e; + } catch (Exception e) { + this.exception = e; + LoggerUtils.logMsg(logger, repImpl, formatter, Level.SEVERE, + name + " Unexpected exception:" + + LoggerUtils.exceptionTypeAndMsg(e)); + throw EnvironmentFailureException.unexpectedException + ("Service: " + serviceName + + " failed; attempting request: " + requestMessage.getOp(), + e); + } finally { + Utils.cleanup(logger, repImpl, formatter, dataChannel, in, out); + } + } + + public void setResponseMessage(ResponseMessage responseMessage) { + this.responseMessage = responseMessage; + } + + /** + * Returns the response message. The null may be returned as part of + * the protocol exchange, or it may be null if an exception was + * encountered say because of some IO problem. It's the caller's + * responsibility to check for an exception in that circumstance. + *

        + * Note: there may be some protocols (e.g., Monitor) that define null + * to be a proper, expected response upon success. It might be + * preferable to redefine them to return an explicit OK response, if + * possible. + * + * @return the response + */ + public ResponseMessage getResponseMessage() { + return responseMessage; + } + + public RequestMessage getRequestMessage() { + return requestMessage; + } + + public Exception getException() { + return exception; + } + } + + protected static class StringFormatable implements WireFormatable { + protected String s; + + StringFormatable() {} + + protected StringFormatable(String s) { + this.s = s; + } + + public void init(String wireFormat) { + s = wireFormat; + } + + @Override + public String wireFormat() { + return s; + } + + @Override + public int hashCode() { + return ((s == null) ? 0 : s.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StringFormatable)) { + return false; + } + + final StringFormatable other = (StringFormatable) obj; + if (s == null) { + if (other.s != null) { + return false; + } + } else if (!s.equals(other.s)) { + return false; + } + return true; + } + } + + /* + * The type associated with an invalid Message. It's used by the exception + * below and by ProtocolError. + */ + static public enum MessageError + {BAD_FORMAT, VERSION_MISMATCH, GROUP_MISMATCH, NOT_A_MEMBER} + + /** + * Used to indicate a message format or invalid content exception. + */ + @SuppressWarnings("serial") + public static class InvalidMessageException extends Exception { + private final MessageError errorType; + + public InvalidMessageException(MessageError errorType, + String message) { + super(message); + + this.errorType = errorType; + } + + public MessageError getErrorType() { + return errorType; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/FeederManager.java b/src/com/sleepycat/je/rep/impl/networkRestore/FeederManager.java new file mode 100644 index 0000000..4d626b9 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/FeederManager.java @@ -0,0 +1,290 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import java.util.ArrayList; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; + +/** + * Manages the multiple log file feeders that may be servicing requests from + * multiple clients requesting log files. + */ +public class FeederManager extends StoppableThread { + + /* + * The queue into which the ServiceDispatcher queues socket channels for + * new Feeder instances. + */ + private final BlockingQueue channelQueue = + new LinkedBlockingQueue(); + + /* + * Map indexed by the client id. Each Feeder adds itself to the Map when + * its first created and removes itself when it exits. + */ + final Map feeders = + new ConcurrentHashMap(); + + /* + * Maps the client id to its Lease. Except for instantaneous overlaps, + * a client will have an entry in either the feeders map or the leases + * map, but not in both maps. + */ + final Map leases = new ConcurrentHashMap(); + + /* + * A cache of StatResponses to try minimize the recomputation of SHA1 + * hashes. + */ + final Map statResponses = + new ConcurrentHashMap(); + + /* Implements the timer used to maintain the leases. */ + final Timer leaseTimer = new Timer(true); + + /* This node's name and internal id */ + final NameIdPair nameIdPair; + + /* Counts the number of times the lease was renewed. */ + public int leaseRenewalCount; + + /* The duration of leases. */ + long leaseDuration = DEFAULT_LEASE_DURATION; + + final ServiceDispatcher serviceDispatcher; + + /* Determines whether the feeder manager has been shutdown. */ + final AtomicBoolean shutdown = new AtomicBoolean(false); + + final Logger logger; + + /* Wait indefinitely for somebody to request the service. */ + private static long POLL_TIMEOUT = Long.MAX_VALUE; + + /* Identifies the Feeder Service. */ + public static final String FEEDER_SERVICE = "LogFileFeeder"; + + /* + * Default duration of lease on DbBackup associated with the client. It's + * five minutes. + */ + private static final long DEFAULT_LEASE_DURATION = 5 * 60 * 1000; + + /** + * Creates a FeederManager but does not start it. + * + * @param serviceDispatcher The service dispatcher with which the + * FeederManager must register itself. It's null only in a test + * environment. + * + * @param nameIdPair The node name and id associated with the feeder + * + * @param envImpl the environment that will provide the log files + */ + public FeederManager(ServiceDispatcher serviceDispatcher, + EnvironmentImpl envImpl, + NameIdPair nameIdPair) { + + super(envImpl, "Feeder Manager node: " + nameIdPair.getName()); + this.serviceDispatcher = serviceDispatcher; + serviceDispatcher.register + (serviceDispatcher.new + LazyQueuingService(FEEDER_SERVICE, channelQueue, this)); + this.nameIdPair = nameIdPair; + logger = LoggerUtils.getLogger(getClass()); + } + + EnvironmentImpl getEnvImpl() { + return envImpl; + } + + /** + * Returns the number of times the lease was actually renewed. + */ + public int getLeaseRenewalCount() { + return leaseRenewalCount; + } + + /** + * Returns the number of leases that are currently outstanding. + */ + public int getLeaseCount() { + return leases.size(); + } + + /** + * Returns the number of feeders that are currently active with this node. + * Note that active leases are included in this count, since it's expected + * that the clients will try to reconnect. + */ + public int getActiveFeederCount() { + return feeders.size() + getLeaseCount(); + } + + public long getLeaseDuration() { + return leaseDuration; + } + + public void setLeaseDuration(long leaseDuration) { + this.leaseDuration = leaseDuration; + } + + /** + * The dispatcher method that starts up new log file feeders. + */ + @Override + public void run() { + try { + while (true) { + final DataChannel channel = + channelQueue.poll(POLL_TIMEOUT, TimeUnit.MILLISECONDS); + if (channel == RepUtils.CHANNEL_EOF_MARKER) { + LoggerUtils.info(logger, envImpl, + "Log file Feeder manager soft shutdown."); + return; + } + new LogFileFeeder(this, channel).start(); + } + } catch (InterruptedException ie) { + LoggerUtils.info + (logger, envImpl, "Log file feeder manager interrupted"); + } catch (Exception e) { + LoggerUtils.severe(logger, envImpl, + "unanticipated exception: " + e.getMessage()); + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNCAUGHT_EXCEPTION, e); + } finally { + shutdown(); + } + } + + public void shutdown() { + LoggerUtils.fine + (logger, envImpl, "Shutting down log file feeder manager"); + + if (!shutdown.compareAndSet(false, true)) { + return; + } + shutdownThread(logger); + + /* shutdown active feeder threads */ + for (LogFileFeeder feeder : + new ArrayList(feeders.values())) { + feeder.shutdown(); + } + leaseTimer.cancel(); + /* + * Terminate any outstanding leases, so we don't hold back file + * deletion by the cleaner. + */ + for (Lease l : new ArrayList(leases.values())) { + l.terminate(); + } + serviceDispatcher.cancel(FEEDER_SERVICE); + cleanup(); + LoggerUtils.fine(logger, envImpl, + "Shut down log file feeder manager completed"); + } + + @Override + protected int initiateSoftShutdown() { + /* Shutdown invoked from a different thread. */ + channelQueue.clear(); + /* Add special entry so that the channelQueue.poll operation exits. */ + channelQueue.add(RepUtils.CHANNEL_EOF_MARKER); + return 0; + } + + /** + * Provides the lease mechanism used to maintain a handle to the DbBackup + * object across Server client disconnects. + */ + class Lease extends TimerTask { + private final int id; + private DbBackup dbBackup; + + public Lease(int id, long duration, DbBackup dbbackup) { + super(); + this.dbBackup = dbbackup; + this.id = id; + Lease oldLease = leases.put(id, this); + if (oldLease != null) { + throw EnvironmentFailureException.unexpectedState + ("Found an old lease for node: " + id); + } + leaseTimer.schedule(this, duration); + } + + @Override + /* The timer went off, expire the lease if it hasn't been terminated */ + public synchronized void run() { + if (dbBackup == null) { + return; + } + dbBackup.endBackup(); + terminate(); + } + + /** + * Fetches the leased DbBackup instance and terminates the lease. + * + * @return the dbBackup instance, if the lease hasn't already been + * terminated + */ + public synchronized DbBackup terminate() { + if (dbBackup == null) { + return null; + } + cancel(); + Lease l = leases.remove(id); + assert(l == this); + DbBackup saveDbBackup = dbBackup; + dbBackup = null; + return saveDbBackup; + } + + public synchronized DbBackup getOpenDbBackup() { + return (dbBackup != null) && dbBackup.backupIsOpen() ? + dbBackup : + null; + } + } + + /** + * @see StoppableThread#getLogger + */ + @Override + protected Logger getLogger() { + return logger; + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/LogFileFeeder.java b/src/com/sleepycat/je/rep/impl/networkRestore/LogFileFeeder.java new file mode 100644 index 0000000..4829541 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/LogFileFeeder.java @@ -0,0 +1,531 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FeederInfoReq; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileInfoReq; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileInfoResp; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileReq; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ClientVersion; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.utilint.LogVerifier; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.VLSN; + +/** + * The LogFileFeeder supplies log files to a client. There is one instance of + * this class per client that's currently active. LogFileFeeders are created by + * the FeederManager and exist for the duration of the session with the client. + */ +public class LogFileFeeder extends StoppableThread { + + /** + * Time to wait for the next request from the client, 5 minutes. + */ + private static final int SOCKET_TIMEOUT_MS = 5 * 60 * 1000; + + /* + * 8K transfer size to take advantage of increasingly prevalent jumbo + * frame sizes and to keep disk i/o contention to a minimum. + */ + static final int TRANSFER_BYTES = 0x2000; + + /* + * The parent FeederManager that creates and maintains LogFileFeeder + * instances. + */ + private final FeederManager feederManager; + + /* The channel on which the feeder communicates with the client. */ + private final NamedChannel namedChannel; + + /* The client node requesting the log files. */ + private int clientId; + + /* + * The dbBackup instance that's used to manage the list of files that will + * be transferred. It's used to ensure that a consistent set is transferred + * over to the client. If an open dbBackup exists for the client, it's + * established in the checkProtocol method immediately after the client has + * been identified. + */ + private DbBackup dbBackup = null; + + /* Used to compute a SHA1 during a transfer, or if a client requests it. */ + final MessageDigest messageDigest; + + /* Logger shared with the FeederManager. */ + final private Logger logger; + + public LogFileFeeder(FeederManager feederManager, + DataChannel channel) + throws DatabaseException { + super(feederManager.getEnvImpl(), "Log File Feeder"); + + this.feederManager = feederManager; + logger = feederManager.logger; + this.namedChannel = new NamedChannel(channel, feederManager.nameIdPair); + + try { + messageDigest = MessageDigest.getInstance("SHA1"); + } catch (NoSuchAlgorithmException e) { + LoggerUtils.severe(logger, feederManager.getEnvImpl(), + "The SHA1 algorithm was not made available " + + "by the security provider"); + throw EnvironmentFailureException.unexpectedException(e); + } + } + + public void shutdown() { + if (shutdownDone(logger)) { + return; + } + + shutdownThread(logger); + feederManager.feeders.remove(clientId); + LoggerUtils.info(logger, feederManager.getEnvImpl(), + "Log file feeder for client:" + clientId + + " is shutdown."); + } + + @Override + protected int initiateSoftShutdown() { + /* + * The feeder will get an I/O exception and exit, since it can't use + * the channel after it has been closed. + */ + RepUtils.shutdownChannel(namedChannel); + return SOCKET_TIMEOUT_MS; + } + + /** + * The main driver loop that enforces the protocol message sequence and + * implements it. + */ + @Override + public void run() { + /* The initial protocol */ + Protocol protocol = new Protocol(feederManager.nameIdPair, + Protocol.VERSION, + feederManager.getEnvImpl()); + try { + configureChannel(); + protocol = checkProtocol(protocol); + checkFeeder(protocol); + sendFileList(protocol); + sendRequestedFiles(protocol); + + /* Done, cleanup */ + dbBackup.endBackup(); + dbBackup = null; + } catch (ClosedByInterruptException e) { + LoggerUtils.fine + (logger, feederManager.getEnvImpl(), + "Ignoring ClosedByInterruptException normal shutdown"); + } catch (IOException e) { + LoggerUtils.warning(logger, feederManager.getEnvImpl(), + " IO Exception: " + e.getMessage()); + } catch (ProtocolException e) { + LoggerUtils.severe(logger, feederManager.getEnvImpl(), + " Protocol Exception: " + e.getMessage()); + } catch (Exception e) { + throw new EnvironmentFailureException + (feederManager.getEnvImpl(), + EnvironmentFailureReason.UNCAUGHT_EXCEPTION, + e); + } finally { + try { + namedChannel.getChannel().close(); + } catch (IOException e) { + LoggerUtils.warning(logger, feederManager.getEnvImpl(), + "Log File feeder io exception on " + + "channel close: " + e.getMessage()); + } + shutdown(); + + if (dbBackup != null) { + if (feederManager.shutdown.get()) { + dbBackup.endBackup(); + } else { + + /* + * Establish lease so client can resume within the lease + * period. + */ + feederManager.new Lease(clientId, + feederManager.leaseDuration, + dbBackup); + LoggerUtils.info(logger, feederManager.getEnvImpl(), + "Lease created for node: " + clientId); + } + } + LoggerUtils.info + (logger, feederManager.getEnvImpl(), + "Log file feeder for client: " + clientId + " exited"); + } + } + + /** + * Implements the message exchange used to determine whether this feeder + * is suitable for use the client's backup needs. The feeder may be + * unsuitable if it's already busy, or it's not current enough to service + * the client's needs. + */ + private void checkFeeder(Protocol protocol) + throws IOException, DatabaseException { + + protocol.read(namedChannel.getChannel(), FeederInfoReq.class); + int feeders = feederManager.getActiveFeederCount() - + 1 /* Exclude this one */; + VLSN rangeFirst = VLSN.NULL_VLSN; + VLSN rangeLast = VLSN.NULL_VLSN; + if (feederManager.getEnvImpl() instanceof RepImpl) { + /* Include replication stream feeders as a load component. */ + RepImpl repImpl = (RepImpl) feederManager.getEnvImpl(); + feeders += + repImpl.getRepNode().feederManager().activeReplicaCount(); + VLSNRange range = repImpl.getVLSNIndex().getRange(); + rangeFirst = range.getFirst(); + rangeLast = range.getLast(); + } + protocol.write(protocol.new FeederInfoResp + (feeders, rangeFirst, rangeLast), namedChannel); + } + + /** + * Send files in response to request messages. The request sequence looks + * like the following: + * + * [FileReq | FileInfoReq]+ Done + * + * The response sequence to a FileReq looks like: + * + * FileStart FileEnd + * + * and that for a FileInfoReq, is simply a FileInfoResp + */ + private void sendRequestedFiles(Protocol protocol) + throws IOException, ProtocolException, DatabaseException { + + String prevFileName = null; + + try { + while (true) { + FileReq fileReq = protocol.read(namedChannel.getChannel(), + FileReq.class); + final String fileName = fileReq.getFileName(); + + /* + * Calculate the full path for a specified log file name, + * especially when this Feeder is configured to run with sub + * directories. + */ + FileManager fMgr = feederManager.getEnvImpl().getFileManager(); + File file = new File(fMgr.getFullFileName(fileName)); + + if (!file.exists()) { + throw EnvironmentFailureException.unexpectedState + ("Log file not found: " + fileName); + } + /* Freeze the length and last modified date. */ + final long length = file.length(); + final long lastModified = file.lastModified(); + byte digest[] = null; + FileInfoResp resp = null; + Protocol.FileInfoResp cachedResp = + feederManager.statResponses.get(fileName); + byte cachedDigest[] = + ((cachedResp != null) && + (cachedResp.getFileLength() == length) && + (cachedResp.getLastModifiedTime() == lastModified)) ? + cachedResp.getDigestSHA1() : null; + + if (fileReq instanceof FileInfoReq) { + if (cachedDigest != null) { + digest = cachedDigest; + } else if (((FileInfoReq) fileReq).getNeedSHA1()) { + digest = getSHA1Digest(file, length).digest(); + } else { + // Digest not requested + digest = new byte[0]; + } + resp = protocol.new FileInfoResp + (fileName, length, lastModified, digest); + } else { + /* Allow deletion of previous file. */ + if (prevFileName != null && + !fileName.equals(prevFileName)) { + dbBackup.removeFileProtection(prevFileName); + } + prevFileName = fileName; + + protocol.write(protocol.new FileStart + (fileName, length, lastModified), + namedChannel); + digest = sendFileContents(file, length); + if ((cachedDigest != null) && + !Arrays.equals(cachedDigest, digest)) { + throw EnvironmentFailureException.unexpectedState + ("Inconsistent cached and computed digests"); + } + resp = protocol.new FileEnd + (fileName, length, lastModified, digest); + } + /* Cache for subsequent requests, if it was computed. */ + if (digest.length > 0) { + feederManager.statResponses.put(fileName, resp); + } + protocol.write(resp, namedChannel); + } + } catch (ProtocolException pe) { + if (pe.getUnexpectedMessage() instanceof Protocol.Done) { + return; + } + throw pe; + } + } + + /** + * Returns the SHA1 has associated with the file. + * + * @param file + * @param length + * @return + * @throws IOException + * @throws DatabaseException + */ + static MessageDigest getSHA1Digest(File file, long length) + throws IOException, DatabaseException { + + MessageDigest messageDigest = null; + + try { + messageDigest = MessageDigest.getInstance("SHA1"); + } catch (NoSuchAlgorithmException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + final FileInputStream fileStream = new FileInputStream(file); + try { + ByteBuffer buffer = ByteBuffer.allocate(TRANSFER_BYTES); + for (long bytes = length; bytes > 0; ) { + int readSize = (int)Math.min(TRANSFER_BYTES, bytes); + int readBytes = + fileStream.read(buffer.array(), 0, readSize); + if (readBytes == -1) { + throw new IOException("Premature EOF. Was expecting: " + + readSize); + } + messageDigest.update(buffer.array(), 0, readBytes); + bytes -= readBytes; + } + } finally { + fileStream.close(); + } + return messageDigest; + } + + /** + * Sends over the contents of the file and computes the SHA-1 hash. Note + * that the method does not rely on EOF detection, but rather on the + * promised file size, since the final log file might be growing while the + * transfer is in progress. The client uses the length sent in the FileResp + * message to maintain its position in the network stream. It expects to + * see a FileInfoResp once it has read the agreed upon number of bytes. + * + * Since JE log files are append only, there is no danger that we will send + * over any uninitialized file blocks. + * + * @param file the log file to be sent. + * @param length the number of bytes to send + * @return the digest associated with the file that was sent + * + * @throws IOException + */ + private byte[] sendFileContents(File file, long length) + throws IOException { + + final LogVerifier verifier = + new LogVerifier(feederManager.getEnvImpl(), file.getName(), -1L); + final FileInputStream fileStream = new FileInputStream(file); + + try { + final FileChannel fileChannel = fileStream.getChannel(); + messageDigest.reset(); + final ByteBuffer buffer = + ByteBuffer.allocateDirect(TRANSFER_BYTES); + final byte[] array = + (buffer.hasArray()) ? buffer.array() : new byte[TRANSFER_BYTES]; + int transmitBytes = 0; + + while (true) { + buffer.clear(); + if (fileChannel.read(buffer) < 0) { + verifier.verifyAtEof(); + break; + } + + buffer.flip(); + final int lim = buffer.limit(); + final int off; + if (buffer.hasArray()) { + off = buffer.arrayOffset(); + } else { + off = 0; + buffer.get(array, 0, lim); + buffer.rewind(); + } + verifier.verify(array, off, lim); + messageDigest.update(array, off, lim); + transmitBytes += namedChannel.getChannel().write(buffer); + } + + if (transmitBytes != length) { + String msg = "File length:" + length + " does not match the " + + "number of bytes that were transmitted:" + + transmitBytes; + + throw new IllegalStateException(msg); + } + + LoggerUtils.info + (logger, feederManager.getEnvImpl(), + "Sent file: " + file + " Length: " + length + " bytes"); + } finally { + fileStream.close(); + } + return messageDigest.digest(); + } + + /** + * Processes the request for the list of files that constitute a valid + * backup. If a leased DbBackup instance is available, it uses it, + * otherwise it creates a new instance and uses it instead. + */ + private void sendFileList(Protocol protocol) + throws IOException, ProtocolException, DatabaseException { + + /* Wait for the request message. */ + protocol.read(namedChannel.getChannel(), Protocol.FileListReq.class); + + if (dbBackup == null) { + dbBackup = new DbBackup(feederManager.getEnvImpl()); + dbBackup.setNetworkRestore(); + dbBackup.startBackup(); + } else { + feederManager.leaseRenewalCount++; + } + + /* + * Remove the subdirectory header of the log files, because the nodes + * that need to copy those log files may not configure the spreading + * log files into sub directories feature. + */ + String[] files = dbBackup.getLogFilesInBackupSet(); + for (int i = 0; i < files.length; i++) { + if (files[i].contains(File.separator)) { + files[i] = files[i].substring + (files[i].indexOf(File.separator) + 1, files[i].length()); + } + } + + protocol.write(protocol.new FileListResp(files), namedChannel); + } + + /** + * Verify that the protocols are compatible, switch to a different protocol + * version, if we need to. + */ + private Protocol checkProtocol(Protocol protocol) + throws IOException, ProtocolException { + + ClientVersion clientVersion = + protocol.read(namedChannel.getChannel(), + Protocol.ClientVersion.class); + clientId = clientVersion.getNodeId(); + FeederManager.Lease lease = feederManager.leases.get(clientId); + if (lease != null) { + dbBackup = lease.terminate(); + } + feederManager.feeders.put(clientId, this); + if (clientVersion.getVersion() != protocol.getVersion()) { + String message = "Client requested protocol version: " + + clientVersion.getVersion() + " but the server version is " + + protocol.getVersion(); + + /* + * Simply log the difference on the server, it's up to the client + * to reject the protocol version, if it can't accommodate it. + */ + LoggerUtils.warning(logger, feederManager.getEnvImpl(), message); + } + protocol.write(protocol.new ServerVersion(), namedChannel); + + /* In future we may switch protocol versions to accommodate the client. + * For now, simply return the one and only version. + */ + return protocol; + } + + /** + * Sets up the channel to facilitate efficient transfer of large log files. + */ + private DataChannel configureChannel() + throws IOException { + + namedChannel.getChannel().getSocketChannel().configureBlocking(true); + LoggerUtils.fine + (logger, feederManager.getEnvImpl(), + "Log File Feeder accepted connection from " + namedChannel); + namedChannel.getChannel().getSocketChannel().socket(). + setSoTimeout(SOCKET_TIMEOUT_MS); + + /* + * Enable Nagle's algorithm since throughput is important for the large + * files we will be transferring. + */ + namedChannel.getChannel().getSocketChannel().socket(). + setTcpNoDelay(false); + return namedChannel.getChannel(); + } + + /** + * @see StoppableThread#getLogger + */ + @Override + protected Logger getLogger() { + return logger; + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackup.java b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackup.java new file mode 100644 index 0000000..f60e218 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackup.java @@ -0,0 +1,939 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.BACKUP_FILE_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.DISPOSED_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.EXPECTED_BYTES; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.FETCH_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.SKIP_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.TRANSFERRED_BYTES; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.TRANSFER_RATE; +import static java.util.concurrent.TimeUnit.MINUTES; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FeederInfoResp; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileEnd; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileInfoResp; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileListResp; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileStart; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ServerVersion; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.utilint.AtomicIntStat; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongAvgRateStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * This class implements a hot network backup that permits it to obtain a + * consistent set of log files from any running environment that provides a + * LogFileFeeder service. This class thus plays the role of a client, and the + * running environment that of a server. + *

        + * The log files that are retrieved over the network are placed in a directory + * that can serve as an environment directory for a JE stand alone or HA + * environment. If log files are already present in the target directory, it + * will try reuse them, if they are really consistent with those on the server. + * Extant log files that are no longer part of the current backup file set are + * deleted or are renamed, depending on how the backup operation was + * configured. + *

        + * Renamed backup files have the following syntax: + * + * NNNNNNNN.bup. + * + * where the backup number is the number associated with the backup attempt, + * rather than with an individual file. That is, the backup number is increased + * by one each time a backup is repeated in the same directory and log files + * actually needed to be renamed. + *

        + * The implementation tries to be resilient in the face of network failures and + * minimizes the amount of work that might need to be done if the client or + * server were to fail and had to be restarted. Users of this API must be + * careful to ensure that the execute() completes successfully before accessing + * the environment. The user fails to do this, the InsufficientLogException + * will be thrown again when the user attempts to open the environment. This + * safeguard is implemented using the {@link RestoreMarker} mechanism. + */ +public class NetworkBackup { + /* The server that was chosen to supply the log files. */ + private final InetSocketAddress serverAddress; + + /* The environment directory into which the log files will be backed up */ + private final File envDir; + + /* The id used during logging to identify a node. */ + private final NameIdPair clientNameId; + + /* + * Determines whether any existing log files in the envDir should be + * retained under a different name (with a BUP_SUFFIX), or whether it + * should be deleted. + */ + private final boolean retainLogfiles; + + /* + * The minimal VLSN that the backup must cover. Used to ensure that the + * backup is sufficient to permit replay of a replication stream from a + * feeder. It's NULL_VLSN if the VLSN does not matter, that is, it's a + * backup for a standalone environment. + */ + private final VLSN minVLSN; + + /* + * The client abandons a backup attempt if the server is loaded beyond this + * threshold + */ + private final int serverLoadThreshold; + + /* The RepImpl instance used in Protocol. */ + private final RepImpl repImpl; + + private final FileManager fileManager; + + /* The factory for creating new channels */ + private final DataChannelFactory channelFactory; + + /* The protocol used to communicate with the server. */ + private Protocol protocol; + + /* The channel connecting this client to the server. */ + private DataChannel channel; + + /* + * The message digest used to compute the digest as each log file is pulled + * over the network. + */ + private final MessageDigest messageDigest; + + /* Statistics on number of files actually fetched and skipped */ + private final StatGroup statistics; + private final AtomicIntStat backupFileCount; + private final AtomicIntStat disposedCount; + private final AtomicIntStat fetchCount; + private final AtomicIntStat skipCount; + private final AtomicLongStat expectedBytes; + private final AtomicLongStat transferredBytes; + private final LongAvgRateStat transferRate; + + private final Logger logger; + + private CyclicBarrier testBarrier = null; + + /** + * The receive buffer size associated with the socket used for the log file + * transfers + */ + private final int receiveBufferSize; + + /** + * Time to wait for a request from the client. + */ + private static final int SOCKET_TIMEOUT_MS = 10000; + + /** + * The number of times to retry on a digest exception. That is, when the + * SHA1 hash as computed by the server for the file does not match the hash + * as computed by the client for the same file. + */ + private static final int DIGEST_RETRIES = 5; + + /* + * Save the properties from the instigating InsufficientLogException in + * order to persist the exception into a RestoreRequired entry. + */ + private final Properties exceptionProperties; + + /* + * Be prepared to create a marker file saying that the log can't be + * recovered. + */ + private final RestoreMarker restoreMarker; + + /* For testing */ + private TestHook interruptHook; + + /** + * Creates a configured backup instance which when executed will backup the + * files to the environment directory. + * + * @param serverSocket the socket on which to contact the server + * @param receiveBufferSize the receive buffer size to be associated with + * the socket used for the log file transfers. + * @param envDir the directory in which to place the log files + * @param clientNameId the id used to identify this client + * @param retainLogfiles determines whether obsolete log files should be + * retained by renaming them, instead of deleting them. + * @param serverLoadThreshold only backup from this server if it has fewer + * than this number of feeders active. + * @param repImpl is passed in as a distinct field from the log manager and + * file manager because it is used only for logging and environment + * invalidation. A network backup may be invoked by unit tests without + * an enclosing environment. + * @param minVLSN the VLSN that should be covered by the server. It ensures + * that the log files are sufficiently current for this client's needs. + * @throws IllegalArgumentException if the environment directory is not + * valid. When used internally, this should be caught appropriately. + */ + public NetworkBackup(InetSocketAddress serverSocket, + int receiveBufferSize, + File envDir, + NameIdPair clientNameId, + boolean retainLogfiles, + int serverLoadThreshold, + VLSN minVLSN, + RepImpl repImpl, + FileManager fileManager, + LogManager logManager, + DataChannelFactory channelFactory, + Properties exceptionProperties) + throws IllegalArgumentException { + + super(); + this.serverAddress = serverSocket; + this.receiveBufferSize = receiveBufferSize; + + if (!envDir.exists()) { + throw new IllegalArgumentException("Environment directory: " + + envDir + " not found"); + } + this.envDir = envDir; + this.clientNameId = clientNameId; + this.retainLogfiles = retainLogfiles; + this.serverLoadThreshold = serverLoadThreshold; + this.minVLSN = minVLSN; + this.repImpl = repImpl; + this.fileManager = fileManager; + this.channelFactory = channelFactory; + + try { + messageDigest = MessageDigest.getInstance("SHA1"); + } catch (NoSuchAlgorithmException e) { + // Should not happen -- if it does it's a JDK config issue + throw EnvironmentFailureException.unexpectedException(e); + } + + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + clientNameId.toString(), + repImpl); + statistics = new StatGroup(NetworkBackupStatDefinition.GROUP_NAME, + NetworkBackupStatDefinition.GROUP_DESC); + backupFileCount = new AtomicIntStat(statistics, BACKUP_FILE_COUNT); + disposedCount = new AtomicIntStat(statistics, DISPOSED_COUNT); + fetchCount = new AtomicIntStat(statistics, FETCH_COUNT); + skipCount = new AtomicIntStat(statistics, SKIP_COUNT); + expectedBytes = new AtomicLongStat(statistics, EXPECTED_BYTES); + transferredBytes = new AtomicLongStat( + statistics, TRANSFERRED_BYTES); + transferRate = new LongAvgRateStat( + statistics, TRANSFER_RATE, 10000, MINUTES); + + this.exceptionProperties = exceptionProperties; + restoreMarker = new RestoreMarker(fileManager, logManager); + } + + /** + * Convenience overloading. + * + * @see NetworkBackup(InetSocketAddress, int, File, NameIdPair, boolean, + * int, VLSN, RepImpl, FileManager, Properties) + */ + public NetworkBackup(InetSocketAddress serverSocket, + File envDir, + NameIdPair clientNameId, + boolean retainLogfiles, + FileManager fileManager, + LogManager logManager, + DataChannelFactory channelFactory) + throws DatabaseException { + + this(serverSocket, + 0, + envDir, + clientNameId, + retainLogfiles, + Integer.MAX_VALUE, + VLSN.NULL_VLSN, + null, + fileManager, + logManager, + channelFactory, + new Properties()); + } + + /** + * Returns statistics associated with the NetworkBackup execution. + */ + public NetworkBackupStats getStats() { + return new NetworkBackupStats(statistics.cloneGroup(false)); + } + + /** + * Execute the backup. + * + * @throws ServiceConnectFailedException + * @throws LoadThresholdExceededException + * @throws InsufficientVLSNRangeException + */ + public String[] execute() + throws IOException, + DatabaseException, + ServiceConnectFailedException, + LoadThresholdExceededException, + InsufficientVLSNRangeException, + RestoreMarker.FileCreationException { + + try { + channel = channelFactory. + connect(serverAddress, + new ConnectOptions(). + setTcpNoDelay(true). + setReceiveBufferSize(receiveBufferSize). + setOpenTimeout(SOCKET_TIMEOUT_MS). + setReadTimeout(SOCKET_TIMEOUT_MS)); + ServiceDispatcher.doServiceHandshake + (channel, FeederManager.FEEDER_SERVICE); + + protocol = checkProtocol(new Protocol(clientNameId, + Protocol.VERSION, + repImpl)); + checkServer(); + final String[] fileNames = getFileList(); + + LoggerUtils.info(logger, repImpl, + "Restoring from:" + serverAddress + + " Allocated network receive buffer size:" + + channel.getSocketChannel().socket(). + getReceiveBufferSize() + + "(" + receiveBufferSize + ")" + + " candidate log file count:" + fileNames.length); + + getFiles(fileNames); + cleanup(fileNames); + assert fileManager.listJDBFiles().length == fileNames.length : + "envDir=" + envDir + " list=" + + Arrays.asList(fileManager.listJDBFiles()) + + " fileNames=" + Arrays.asList(fileNames); + + /* + * The fileNames array is sorted in getFileList method, so we can + * use the first and last array elements to get the range of the + * files to be restored. + */ + final long fileBegin = fileManager.getNumFromName(fileNames[0]); + final long fileEnd = + fileManager.getNumFromName(fileNames[fileNames.length - 1]); + /* Return file names with sub directories' names if exists. */ + return fileManager.listFileNames(fileBegin, fileEnd); + } finally { + if (channel != null) { + /* + * Closing the socket directly is not correct. Let the channel + * do the work (necessary for correct TLS operation). + */ + channel.close(); + } + LoggerUtils.info(logger, repImpl, + "Backup file total: " + + backupFileCount.get() + + ". Files actually fetched: " + + fetchCount.get() + + ". Files skipped(available locally): " + + skipCount.get() + + ". Local files renamed/deleted: " + + disposedCount.get()); + } + } + + /** + * Ensures that the log file feeder is a suitable choice for this backup: + * The feeder's VLSN range end must be GTE the minVSLN and its load must + * be LTE the serverLoadThreshold. + */ + private void checkServer() + throws IOException, + ProtocolException, + LoadThresholdExceededException, + InsufficientVLSNRangeException { + + protocol.write(protocol.new FeederInfoReq(), channel); + FeederInfoResp resp = protocol.read(channel, FeederInfoResp.class); + if (resp.getRangeLast().compareTo(minVLSN) < 0) { + throw new InsufficientVLSNRangeException( + minVLSN, + resp.getRangeFirst(), resp.getRangeLast(), + resp.getActiveFeeders()); + } + if (resp.getActiveFeeders() > serverLoadThreshold) { + throw new LoadThresholdExceededException( + serverLoadThreshold, + resp.getRangeFirst(), resp.getRangeLast(), + resp.getActiveFeeders()); + } + } + + /** + * Delete or rename residual jdb files that are not part of the log file + * set. This method is only invoked after all required files have been + * copied over from the server. + * + * @throws IOException + */ + private void cleanup(String[] fileNames) + throws IOException { + + LoggerUtils.fine(logger, repImpl, "Cleaning up"); + + Set logFileSet = new HashSet(Arrays.asList(fileNames)); + for (File file : fileManager.listJDBFiles()) { + if (!logFileSet.contains(file.getName())) { + disposeFile(file); + } + } + + StringBuilder logFiles = new StringBuilder(); + for (String string : logFileSet) { + + /* + * Use the full path of this file in case the environment uses + * multiple data directories. + */ + File file = new File(fileManager.getFullFileName(string)); + if (!file.exists()) { + throw EnvironmentFailureException.unexpectedState + ("Missing file: " + file); + } + logFiles.append(file.getCanonicalPath()).append(", "); + } + + String names = logFiles.toString(); + if (names.length() > 0) { + names = names.substring(0, names.length()-2); + } + LoggerUtils.fine(logger, repImpl, "Log file set: " + names); + } + + /** + * Retrieves all the files in the list, that are not already in the envDir. + * @throws DatabaseException + */ + private void getFiles(String[] fileNames) + throws IOException, DatabaseException, + RestoreMarker.FileCreationException { + + LoggerUtils.info(logger, repImpl, + fileNames.length + " files in backup set"); + + /* Get all file transfer lengths first, so we can track progress */ + final List fileTransferLengths = + getFileTransferLengths(fileNames); + + for (final FileAndLength entry : fileTransferLengths) { + if (testBarrier != null) { + try { + testBarrier.await(); + } catch (InterruptedException e) { + // Ignore just a test mechanism + } catch (BrokenBarrierException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + for (int i = 0; i < DIGEST_RETRIES; i++) { + try { + getFile(entry.file); + fetchCount.increment(); + break; + } catch (DigestException e) { + if ((i + 1) == DIGEST_RETRIES) { + throw new IOException("Digest mismatch despite " + + DIGEST_RETRIES + " attempts"); + } + + /* Account for the additional transfer */ + expectedBytes.add(entry.length); + continue; + } + } + } + /* We've finished transferring all files, remove the marker file. */ + restoreMarker.removeMarkerFile(fileManager); + + /* All done, shutdown conversation with the server. */ + protocol.write(protocol.new Done(), channel); + } + + /** Store File and file length pair. */ + private static class FileAndLength { + FileAndLength(File file, long length) { + this.file = file; + this.length = length; + } + final File file; + final long length; + } + + /** + * Returns information about files that need to be transferred, and updates + * expectedBytes and skipCount accordingly. This method tries to avoid + * requesting the SHA1 if the file lengths are not equal, since computing + * the SHA1 if it's not already cached requires a pass over the log + * file. Note that the server will always send back the SHA1 value if it + * has it cached. + */ + private List getFileTransferLengths(String[] fileNames) + throws IOException, DatabaseException { + + final List fileTransferLengths = new ArrayList<>(); + for (final String fileName : fileNames) { + + /* + * Use the full path of this file in case the environment uses + * multiple data directories. + */ + final File file = new File(fileManager.getFullFileName(fileName)); + protocol.write(protocol.new FileInfoReq(fileName, false), channel); + FileInfoResp statResp = + protocol.read(channel, Protocol.FileInfoResp.class); + final long fileLength = statResp.getFileLength(); + + /* + * See if we can skip the file if it is present with correct length + */ + if (file.exists() && (fileLength == file.length())) { + + /* Make sure we have the message digest */ + if (statResp.getDigestSHA1().length == 0) { + protocol.write( + protocol.new FileInfoReq(fileName, true), channel); + statResp = + protocol.read(channel, Protocol.FileInfoResp.class); + } + final byte digest[] = + LogFileFeeder.getSHA1Digest(file, fileLength).digest(); + if (Arrays.equals(digest, statResp.getDigestSHA1())) { + LoggerUtils.info(logger, repImpl, + "File: " + file.getCanonicalPath() + + " length: " + fileLength + + " available with matching SHA1, copy skipped"); + skipCount.increment(); + continue; + } + } + fileTransferLengths.add(new FileAndLength(file, fileLength)); + expectedBytes.add(fileLength); + } + return fileTransferLengths; + } + + /** + * Requests and obtains the specific log file from the server. The file is + * first created under a name with the .tmp suffix and is renamed to its + * true name only after its digest has been verified. + * + * This method is protected to facilitate error testing. + */ + protected void getFile(File file) + throws IOException, ProtocolException, DigestException, + RestoreMarker.FileCreationException { + + LoggerUtils.fine(logger, repImpl, "Requesting file: " + file); + protocol.write(protocol.new FileReq(file.getName()), channel); + FileStart fileResp = protocol.read(channel, Protocol.FileStart.class); + + /* + * Delete the tmp file if it already exists. + * + * Use the full path of this file in case the environment uses multiple + * data directories. + */ + File tmpFile = new File(fileManager.getFullFileName(file.getName()) + + FileManager.TMP_SUFFIX); + if (tmpFile.exists()) { + boolean deleted = tmpFile.delete(); + if (!deleted) { + throw EnvironmentFailureException.unexpectedState + ("Could not delete file: " + tmpFile); + } + } + + /* + * Use a direct buffer to avoid an unnecessary copies into and out of + * native buffers. + */ + final ByteBuffer buffer = + ByteBuffer.allocateDirect(LogFileFeeder.TRANSFER_BYTES); + messageDigest.reset(); + + /* Write the tmp file. */ + final FileOutputStream fileStream = new FileOutputStream(tmpFile); + final FileChannel fileChannel = fileStream.getChannel(); + + try { + /* Compute the transfer rate roughly once each MB */ + final int rateInterval = 0x100000 / LogFileFeeder.TRANSFER_BYTES; + int count = 0; + + /* Copy over the file contents. */ + for (long bytes = fileResp.getFileLength(); bytes > 0;) { + int readSize = + (int) Math.min(LogFileFeeder.TRANSFER_BYTES, bytes); + buffer.clear(); + buffer.limit(readSize); + int actualBytes = channel.read(buffer); + if (actualBytes == -1) { + throw new IOException("Premature EOF. Was expecting:" + + readSize); + } + bytes -= actualBytes; + + buffer.flip(); + fileChannel.write(buffer); + + buffer.rewind(); + messageDigest.update(buffer); + transferredBytes.add(actualBytes); + + /* Update the transfer rate at interval and last time */ + if (((++count % rateInterval) == 0) || (bytes <= 0)) { + transferRate.add( + transferredBytes.get(), System.currentTimeMillis()); + } + } + + if (logger.isLoggable(Level.INFO)) { + LoggerUtils.info(logger, repImpl, + String.format( + "Fetched log file: %s, size: %,d bytes," + + " %s bytes," + + " %s bytes," + + " %s bytes/second", + file.getName(), + fileResp.getFileLength(), + transferredBytes, + expectedBytes, + transferRate)); + } + } finally { + fileStream.close(); + } + + final FileEnd fileEnd = protocol.read(channel, Protocol.FileEnd.class); + + /* Check that the read is successful. */ + if (!Arrays.equals(messageDigest.digest(), fileEnd.getDigestSHA1())) { + LoggerUtils.warning(logger, repImpl, + "digest mismatch on file: " + file); + throw new DigestException(); + } + + /* + * We're about to alter the files that exist in the log, either by + * deleting file N.jdb, or by renaming N.jdb.tmp -> N, and thereby + * adding a file to the set in the directory. Create the marker that + * says this log is no longer coherent and can't be recovered. Marker + * file creation can safely be called multiple times; the file will + * only be created the first time. + */ + restoreMarker.createMarkerFile + (RestoreRequired.FailureType.NETWORK_RESTORE, + exceptionProperties); + + assert TestHookExecute.doHookIfSet(interruptHook, file); + + /* Now that we know it's good, move the file into place. */ + if (file.exists()) { + /* + * Delete or back up this and all subsequent obsolete files, + * excluding the marker file. The marker file will be explicitly + * cleaned up when the entire backup finishes. + */ + disposeObsoleteFiles(file); + } + + /* Rename the tmp file. */ + LoggerUtils.fine(logger, repImpl, "Renamed " + tmpFile + " to " + file); + boolean renamed = tmpFile.renameTo(file); + if (!renamed) { + throw EnvironmentFailureException.unexpectedState + ("Rename from: " + tmpFile + " to " + file + " failed"); + } + + /* Retain last modified time, to leave an audit trail. */ + if (!file.setLastModified(fileResp.getLastModifiedTime())) { + throw EnvironmentFailureException.unexpectedState + ("File.setlastModifiedTime() for:" + file + " and time " + + new Date(fileResp.getLastModifiedTime()) + " failed."); + } + } + + /** + * Renames (or deletes) this log file, and all other files following it in + * the log sequence. The operation is done from the highest file down to + * this one, to ensure the integrity of the log files in the directory is + * always preserved. Exclude the marker file because that is meant to serve + * as an indicator that the backup is in progress. It will be explicitly + * removed only when the entire backup is finished. + * + * @param startFile the lowest numbered log file that must be renamed or + * deleted + * @throws IOException + */ + private void disposeObsoleteFiles(File startFile) throws IOException { + File[] dirFiles = fileManager.listJDBFiles(); + Arrays.sort(dirFiles); // sorts in ascending order + + /* Start with highest numbered file to be robust in case of failure. */ + for (int i = dirFiles.length - 1; i >= 0; i--) { + File file = dirFiles[i]; + + /* Skip the marker file, wait until the whole backup is done */ + if (file.getName().equals(RestoreMarker.getMarkerFileName())) { + continue; + } + disposeFile(file); + if (startFile.equals(file)) { + break; + } + } + } + + /** + * Remove the file from the current set of log files in the directory. + * @param file + */ + private void disposeFile(File file) { + disposedCount.increment(); + final long fileNumber = fileManager.getNumFromName(file.getName()); + if (retainLogfiles) { + boolean renamed = false; + try { + renamed = + fileManager.renameFile(fileNumber, FileManager.BUP_SUFFIX); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedState + ("Could not rename log file " + file.getPath() + + " because of exception: " + e.getMessage()); + } + + if (!renamed) { + throw EnvironmentFailureException.unexpectedState + ("Could not rename log file " + file.getPath()); + } + LoggerUtils.fine(logger, repImpl, + "Renamed log file: " + file.getPath()); + } else { + boolean deleted = false; + try { + deleted = fileManager.deleteFile(fileNumber); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException + ("Could not delete log file " + file.getPath() + + " during network restore.", e); + } + if (!deleted) { + throw EnvironmentFailureException.unexpectedState + ("Could not delete log file " + file.getPath()); + } + LoggerUtils.fine(logger, repImpl, + "deleted log file: " + file.getPath()); + } + } + + /** + * Carries out the message exchange to obtain the list of backup files. + * @return + * @throws IOException + * @throws ProtocolException + */ + private String[] getFileList() + throws IOException, ProtocolException { + + protocol.write(protocol.new FileListReq(), channel); + FileListResp fileListResp = protocol.read(channel, + Protocol.FileListResp.class); + String[] fileList = fileListResp.getFileNames(); + Arrays.sort(fileList); //sort the file names in ascending order + backupFileCount.set(fileList.length); + return fileList; + } + + /** + * Verify that the protocols are compatible, switch to a different protocol + * version, if we need to. + * + * @throws DatabaseException + */ + private Protocol checkProtocol(Protocol candidateProtocol) + throws IOException, ProtocolException { + + candidateProtocol.write + (candidateProtocol.new ClientVersion(), channel); + ServerVersion serverVersion = + candidateProtocol.read(channel, Protocol.ServerVersion.class); + + if (serverVersion.getVersion() != candidateProtocol.getVersion()) { + String message = "Server requested protocol version:" + + serverVersion.getVersion() + + " but the client version is " + + candidateProtocol.getVersion(); + LoggerUtils.info(logger, repImpl, message); + throw new ProtocolException(message); + } + + /* + * In future we may switch protocol versions to accommodate the server. + * For now, simply return the one and only version. + */ + return candidateProtocol; + } + + /* + * @hidden + * + * A test entry point used to simulate a slow network restore. + */ + public void setTestBarrier(CyclicBarrier testBarrier) { + this.testBarrier = testBarrier; + } + + /* For unit testing only */ + public void setInterruptHook(TestHook hook) { + interruptHook = hook; + } + + /** + * Exception indicating that the digest sent by the server did not match + * the digest computed by the client, that is, the log file was corrupted + * during transit. + */ + @SuppressWarnings("serial") + protected static class DigestException extends Exception { + } + + /** + * Exception indicating that the server could not be used for the restore. + */ + @SuppressWarnings("serial") + public static class RejectedServerException extends Exception { + + /* The actual range covered by the server. */ + final VLSN rangeFirst; + final VLSN rangeLast; + + /* The actual load of the server. */ + final int activeServers; + + RejectedServerException(VLSN rangeFirst, + VLSN rangeLast, + int activeServers) { + this.rangeFirst = rangeFirst; + this.rangeLast = rangeLast; + this.activeServers = activeServers; + } + + public VLSN getRangeLast() { + return rangeLast; + } + + public int getActiveServers() { + return activeServers; + } + } + + /** + * Exception indicating that the server vlsn range did not cover the VLSN + * of interest. + */ + @SuppressWarnings("serial") + public static class InsufficientVLSNRangeException + extends RejectedServerException { + + /* The VLSN that must be covered by the server. */ + private final VLSN minVLSN; + + InsufficientVLSNRangeException(VLSN minVLSN, + VLSN rangeFirst, + VLSN rangeLast, + int activeServers) { + super(rangeFirst, rangeLast, activeServers); + this.minVLSN = minVLSN; + } + + @Override + public String getMessage() { + return "Insufficient VLSN range. Needed VLSN: " + minVLSN + + " Available range: " + + "[" + rangeFirst + ", " + rangeLast + "]"; + } + } + + @SuppressWarnings("serial") + public static class LoadThresholdExceededException + extends RejectedServerException { + + private final int threshold; + + LoadThresholdExceededException(int threshold, + VLSN rangeFirst, + VLSN rangeLast, + int activeServers) { + super(rangeFirst, rangeLast, activeServers); + assert(activeServers > threshold); + this.threshold = threshold; + } + + @Override + public String getMessage() { + return "Active server threshold: " + threshold + " exceeded. " + + "Active servers: " + activeServers; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStatDefinition.java b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStatDefinition.java new file mode 100644 index 0000000..125d960 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStatDefinition.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static com.sleepycat.je.utilint.StatDefinition.StatType.CUMULATIVE; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for each NetworkBackup statistics. + */ +public class NetworkBackupStatDefinition { + + public static final String GROUP_NAME = "NetworkBackup"; + public static final String GROUP_DESC = "NetworkBackup statistics"; + + public static StatDefinition BACKUP_FILE_COUNT = + new StatDefinition + ("backupFileCount", + "The total number of files."); + + public static StatDefinition SKIP_COUNT = + new StatDefinition + ("skipCount", + "The number of files that were skipped because they were already " + + "present and current in the local environment directory."); + + public static StatDefinition FETCH_COUNT = + new StatDefinition + ("fetchCount", + "The number of files that were actually transferred from the " + + "server"); + + public static StatDefinition DISPOSED_COUNT = + new StatDefinition + ("disposedCount", + "The number of files that were disposed (deleted or renamed) from " + + "the local environment directory."); + + public static StatDefinition EXPECTED_BYTES = + new StatDefinition( + "expectedBytes", + "The number of bytes that are expected to be transferred.", + CUMULATIVE); + + public static StatDefinition TRANSFERRED_BYTES = + new StatDefinition( + "transferredBytes", + "The number of bytes that have been transferred so far.", + CUMULATIVE); + + public static StatDefinition TRANSFER_RATE = + new StatDefinition( + "transferRate", + "The moving average of the rate, in bytes per second, at which" + + " bytes have been transferred so far."); +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStats.java b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStats.java new file mode 100644 index 0000000..a5467ee --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupStats.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.BACKUP_FILE_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.DISPOSED_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.EXPECTED_BYTES; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.FETCH_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.SKIP_COUNT; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.TRANSFERRED_BYTES; +import static com.sleepycat.je.rep.impl.networkRestore.NetworkBackupStatDefinition.TRANSFER_RATE; + +import java.io.Serializable; + +import com.sleepycat.je.utilint.LongAvgRateStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Stores NetworkBackup statistics. + * + * @see NetworkBackupStatDefinition + */ +public class NetworkBackupStats implements Serializable { + private static final long serialVersionUID = 0; + + private final StatGroup statGroup; + + NetworkBackupStats(StatGroup statGroup) { + this.statGroup = statGroup; + } + + public int getBackupFileCount() { + return statGroup.getInt(BACKUP_FILE_COUNT); + } + + public int getSkipCount() { + return statGroup.getInt(SKIP_COUNT); + } + + public int getFetchCount() { + return statGroup.getInt(FETCH_COUNT); + } + + public int getDisposedCount() { + return statGroup.getInt(DISPOSED_COUNT); + } + + public long getExpectedBytes() { + return statGroup.getLong(EXPECTED_BYTES); + } + + public long getTransferredBytes() { + return statGroup.getLong(TRANSFERRED_BYTES); + } + + public long getTransferRate() { + final LongAvgRateStat stat = + (LongAvgRateStat) statGroup.getStat(TRANSFER_RATE); + return (stat == null) ? 0 : stat.get(); + } + + @Override + public String toString() { + return statGroup.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/Protocol.java b/src/com/sleepycat/je/rep/impl/networkRestore/Protocol.java new file mode 100644 index 0000000..bc8e443 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/Protocol.java @@ -0,0 +1,423 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.utilint.VLSN; + +/** + * The protocol used to obtain backup files from a LF Feeder. The message + * exchange is always initiated by the client. + * + * The following describes the request/response messages exchanged between the + * two nodes: + * + * FeederInfoReq -> FeederInfoResp + * + * FileListReq -> FileListResp + * + * FileInfoReq -> FileInfoResp + * + * FileReq -> FileStart FileEnd + * + * Done + * + * So a complete sequence of successful request messages looks like: + * + * FeederInfoReq FileListReq [[FileInfoReq] [FileReq] ]+ Done + * + * A response sequence would look like: + * + * FeederInfoResp FileListResp [[FileInfoResp] [FileStart FileEnd] ]+ + * + * The client may abandon its interaction with the server if it decides the + * server is overloaded. + * + * The client tries to minimize the number of files it actually requests based + * upon its current state. + * + * When a FileReq is received by the server, other files previously requested + * (using FileReq) may be deleted by the server. These previously requested + * files must not be requested again using FileReq or FileReqInfo. + */ +public class Protocol extends BinaryProtocol { + + static public final int VERSION = 2; + + /* The messages defined by this class. */ + public final MessageOp FEEDER_INFO_REQ = + new MessageOp((short)1, FeederInfoReq.class); + + public final MessageOp FEEDER_INFO_RESP = + new MessageOp((short)2, FeederInfoResp.class); + + public final MessageOp FILE_LIST_REQ = + new MessageOp((short)3, FileListReq.class); + + public final MessageOp FILE_LIST_RESP = + new MessageOp((short)4, FileListResp.class); + + public final MessageOp FILE_REQ = + new MessageOp((short)5, FileReq.class); + + public final MessageOp FILE_START = + new MessageOp((short)6, FileStart.class); + + public final MessageOp FILE_END = + new MessageOp((short)7, FileEnd.class); + + public final MessageOp FILE_INFO_REQ = + new MessageOp((short)8, FileInfoReq.class); + + public final MessageOp FILE_INFO_RESP = + new MessageOp((short)9, FileInfoResp.class); + + public final MessageOp DONE = + new MessageOp((short)10, Done.class); + + public Protocol(NameIdPair nameIdPair, + int configuredVersion, + EnvironmentImpl envImpl) { + + super(nameIdPair, VERSION, configuredVersion, envImpl); + + initializeMessageOps(new MessageOp[] + {FEEDER_INFO_REQ, + FEEDER_INFO_RESP, + FILE_LIST_REQ, + FILE_LIST_RESP, + FILE_INFO_REQ, + FILE_INFO_RESP, + FILE_REQ, + FILE_START, + FILE_END, + DONE}); + } + + /* Requests the list of log files that need to be backed up. */ + public class FeederInfoReq extends SimpleMessage { + + public FeederInfoReq() { + super(); + } + + @SuppressWarnings("unused") + public FeederInfoReq(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return FEEDER_INFO_REQ; + } + } + + public class FeederInfoResp extends SimpleMessage { + /* The number of feeders that are currently busy at this server. */ + private final int activeFeeders; + + /* The vlsn range covered by this server if it's a rep node. */ + private final VLSN rangeFirst; + private final VLSN rangeLast; + + public FeederInfoResp(int activeFeeders, + VLSN rangeFirst, + VLSN rangeLast) { + super(); + this.activeFeeders = activeFeeders; + this.rangeFirst = rangeFirst; + this.rangeLast = rangeLast; + } + + public FeederInfoResp(ByteBuffer buffer) { + super(); + activeFeeders = LogUtils.readInt(buffer); + rangeFirst = getVLSN(buffer); + rangeLast = getVLSN(buffer); + } + + @Override + public MessageOp getOp() { + return FEEDER_INFO_RESP; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(activeFeeders, rangeFirst, rangeLast); + } + + public int getActiveFeeders() { + return activeFeeders; + } + + public VLSN getRangeFirst() { + return rangeFirst; + } + + public VLSN getRangeLast() { + return rangeLast; + } + } + + /* Requests the list of log files that need to be backed up. */ + public class FileListReq extends SimpleMessage { + + public FileListReq() { + super(); + } + + @SuppressWarnings("unused") + public FileListReq(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return FILE_LIST_REQ; + } + } + + /* Response to the above containing the list of files. */ + public class FileListResp extends SimpleMessage { + private final String[] fileNames; + + public FileListResp(String[] fileNames) { + super(); + this.fileNames = fileNames; + } + + public FileListResp(ByteBuffer buffer) { + fileNames = getStringArray(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_LIST_RESP; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat((Object)fileNames); + } + + public String[] getFileNames() { + return fileNames; + } + } + + /** + * Requests that a specific file be sent to the client. + */ + public class FileReq extends SimpleMessage { + + protected final String fileName; + + public FileReq(String fileName) { + super(); + this.fileName = fileName; + } + + public FileReq(ByteBuffer buffer) { + fileName = getString(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_REQ; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(fileName); + } + + public String getFileName() { + return fileName; + } + } + + /** + * Requests information about a specific log file. + */ + public class FileInfoReq extends FileReq { + private final boolean needSHA1; + + public FileInfoReq(String fileName, boolean needSHA1) { + super(fileName); + this.needSHA1 = needSHA1; + } + + public FileInfoReq(ByteBuffer buffer) { + super(buffer); + needSHA1 = getBoolean(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_INFO_REQ; + } + + @Override + public ByteBuffer wireFormat() { + return super.wireFormat(fileName, needSHA1); + } + + public boolean getNeedSHA1() { + return needSHA1; + } + } + + /* + * The Response for information about a specific log file. + */ + public class FileInfoResp extends FileStart { + private final byte[] digestSHA1; + + public FileInfoResp(String fileName, + long fileLength, + long lastModifiedTime, + byte[] digestSHA1) { + super(fileName, fileLength, lastModifiedTime); + this.digestSHA1 = digestSHA1; + } + + public FileInfoResp(ByteBuffer buffer) { + super(buffer); + this.digestSHA1 = getByteArray(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_INFO_RESP; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(fileName, + fileLength, + lastModifiedTime, + digestSHA1); + } + + /** + * Returns the SHA1 value if it was requested, or a zero length byte + * array if it was not requested. + */ + public byte[] getDigestSHA1() { + return digestSHA1; + } + } + + /** + * The message starting the response triple: + * + * FileStart FileEnd + */ + public class FileStart extends SimpleMessage { + /* Must match the request name. */ + protected final String fileName; + + /* The actual file length in bytes on disk */ + protected final long fileLength; + protected final long lastModifiedTime; + + public FileStart(String fileName, + long fileLength, + long lastModifiedTime) { + super(); + this.fileName = fileName; + this.fileLength = fileLength; + this.lastModifiedTime = lastModifiedTime; + } + + public FileStart(ByteBuffer buffer) { + fileName = getString(buffer); + fileLength = LogUtils.readLong(buffer); + lastModifiedTime = LogUtils.readLong(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_START; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(fileName, fileLength, lastModifiedTime); + } + + public long getFileLength() { + return fileLength; + } + + public long getLastModifiedTime() { + return lastModifiedTime; + } + } + + /** + * The message ending the response triple: + * + * FileStart FileEnd + */ + public class FileEnd extends FileInfoResp { + + public FileEnd(String fileName, + long fileLength, + long lastModifiedTime, + byte[] digestSHA1) { + super(fileName, fileLength, lastModifiedTime, digestSHA1); + } + + public FileEnd(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return FILE_END; + } + + @Override + public ByteBuffer wireFormat() { + return super.wireFormat(); + } + } + + /** + * Message from client indicating it's done with all the files it needs and + * that the connection can be terminated. + */ + public class Done extends SimpleMessage { + + public Done() { + super(); + } + + @SuppressWarnings("unused") + public Done(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return DONE; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/networkRestore/package-info.java b/src/com/sleepycat/je/rep/impl/networkRestore/package-info.java new file mode 100644 index 0000000..38959a0 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/networkRestore/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Protocol for + * {@link com.sleepycat.je.rep.NetworkRestore network restore}. + */ +package com.sleepycat.je.rep.impl.networkRestore; diff --git a/src/com/sleepycat/je/rep/impl/node/ChannelTimeoutTask.java b/src/com/sleepycat/je/rep/impl/node/ChannelTimeoutTask.java new file mode 100644 index 0000000..bdecf83 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ChannelTimeoutTask.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Timer; +import java.util.TimerTask; + +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; + +/** + * The ChannelTimeoutTask ensures that all channels registered with it are + * periodically checked to ensure that they are active. The period roughly + * corresponds to a second, although intervening GC activity may expand this + * period considerably. Note that elapsedMs used for timeouts is always ticked + * up in 1 second increments. Thus multiple seconds of real time may correspond + * to a single second of "timer time" if the system is paricularly busy, or the + * gc has been particularly active. + * + * This property allows the underlying timeout implementation to compensate for + * GC pauses in which activity on the channel at the java level would have been + * suspended and thus reduces the number of false timeouts. + */ +public class ChannelTimeoutTask extends TimerTask { + + private final long ONE_SECOND_MS = 1000l; + + /* Elapsed time as measured by the timer task. It's always incremented + * in one second intervals. + */ + private long elapsedMs = 0; + + private final List channels = + Collections.synchronizedList(new LinkedList()); + + /** + * Creates and schedules the timer task. + * @param timer the timer associated with this task + */ + public ChannelTimeoutTask(Timer timer) { + timer.schedule(this, ONE_SECOND_MS, ONE_SECOND_MS); + } + + /** + * Runs once a second checking to see if a channel is still active. Each + * channel establishes its own timeout period using elapsedMs to check for + * timeouts. Inactive channels are removed from the list of registered + * channels. + */ + @Override + public void run() { + elapsedMs += ONE_SECOND_MS; + synchronized (channels) { + for (Iterator i = channels.iterator(); + i.hasNext();) { + if (!i.next().isActive(elapsedMs)) { + i.remove(); + } + } + } + } + + /** + * Registers a channel so that the timer can make periodic calls to + * isActive(). Note that closing a channel renders it inactive and causes + * it to be removed from the list by the run() method. Consequently, there + * is no corresponding unregister operation. + * + * @param channel the channel being registered. + */ + public void register(NamedChannelWithTimeout channel) { + channels.add(channel); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/CommitFreezeLatch.java b/src/com/sleepycat/je/rep/impl/node/CommitFreezeLatch.java new file mode 100644 index 0000000..7dca410 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/CommitFreezeLatch.java @@ -0,0 +1,179 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.rep.elections.Proposer.Proposal; + +/** + * Ensures that a VLSN is not advanced at this node while an election is in + * progress. Note that this is difficult, if not impossible to achieve + * efficiently in a distributed environment across the entire group, when + * communications may not always be reliable. So, the implementation really + * represents a good faith effort to freeze the VLSN. JE HA itself should be + * able to make forward progress in the event of such a failure. + * + * The class coordinates three threads: the acceptor, the learner, and the + * replay thread. There is exactly one instance of each thread per replication + * node, so it coordinates the activity of these three threads. + * + * The typical serialized sequence of calls is therefore: + * + * latch.freeze() -- invoked in response to a Promise by an Acceptor + * latch.vlsnEvent() -- one or more of them in response to ongoing election + * latch.awaitThaw() -- by the replica thread waiting for the freeze to lift + * + * Both vlsnEvent() and awaitThaw() are NOPs in the absence of a freeze. + * + * @see Freezing VLSNs + */ +public class CommitFreezeLatch { + + /* The current frozen promise/vlsn pair */ + private Proposal proposal = null; + + /* Statistics */ + private int freezeCount = 0; + private int awaitTimeoutCount = 0; + private int awaitElectionCount = 0; + + /* The latch used internally. */ + private CountDownLatch latch = null; + /* The end time of the freeze. */ + private long freezeEnd = 0; + private long timeOut = DEFAULT_LATCH_TIMEOUT; + + private static long DEFAULT_LATCH_TIMEOUT = 5000; // ms + + public int getAwaitTimeoutCount() { + return awaitTimeoutCount; + } + + public int getAwaitElectionCount() { + return awaitElectionCount; + } + + public int getFreezeCount() { + return freezeCount; + } + + public long getTimeOut() { + return timeOut; + } + + public void setTimeOut(long timeOut) { + this.timeOut = timeOut; + } + + /** + * Initiates or extends a freeze on a VLSN in response to a new election + * that is in progress. It's invoked by the Acceptor thread. + * + * @param freezeProposal identifies the election that is provoking the freeze + */ + public synchronized void freeze(Proposal freezeProposal) { + if ((proposal != null) && (freezeProposal.compareTo(proposal) <= 0)) { + // Older proposal ignore it. + return; + } + if (latch != null) { + /* Enable waiters who will reacquire the new latch below. */ + latch.countDown(); + } + latch = new CountDownLatch(1); + proposal = freezeProposal; + freezeEnd = System.currentTimeMillis() + timeOut; + return; + } + + /** + * Invoked by the Learner thread whenever it receives an election result. + * The freeze on the VLSN is only lifted if the proposal associated with + * the event is current, that is, it represents a proposal that is newer + * than the one used to establish the freeze. + * + * @param listenerProposal identifies the election that just concluded + */ + public synchronized void vlsnEvent(Proposal listenerProposal) { + if (proposal == null) { + // No VLSN to unfreeze + return; + } + if (listenerProposal.compareTo(this.proposal) >= 0) { + latch.countDown(); + } + } + + /** + * Clears the latch freeing any waiters. + */ + public synchronized void clearLatch() { + if (latch != null) { + latch.countDown(); + } + latch = null; + proposal = null; + freezeEnd = 0; + } + + /** + * Used to wait for an event that unfreezes the VLSN. In our case this + * event is a message to the Learner agent announcing the result of an + * election. Note that the latch must be re-initialized after a return from + * this await method. + * + * This method is invoked by the Replay thread. Completion of an awaitThaw + * always results in the freeze being lifted. + * + * @return true if the await was satisfied due to completion of an + * election, false if no freeze was in effect, or the latch was timed out. + * + * @throws InterruptedException + */ + public boolean awaitThaw() + throws InterruptedException { + + CountDownLatch awaitLatch; + long awaitTimeout; + + synchronized (this) { + /* Copy out the values of interest */ + awaitLatch = latch; + if (awaitLatch == null) { + return false; + } + awaitTimeout = this.freezeEnd - System.currentTimeMillis(); + } + freezeCount++; + + boolean done = awaitLatch.await(awaitTimeout, TimeUnit.MILLISECONDS); + + synchronized (this) { + if (done) { + awaitElectionCount++; + clearLatch(); + return true; + } + if (this.freezeEnd - System.currentTimeMillis() <= 0) { + awaitTimeoutCount++; + /* freeze end was not extended, election completed. */ + clearLatch(); + return false; + } + } + /* Re-acquire the new latch and wait for the extended timeout. */ + return awaitThaw(); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/DbCache.java b/src/com/sleepycat/je/rep/impl/node/DbCache.java new file mode 100644 index 0000000..b934761 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/DbCache.java @@ -0,0 +1,218 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.rep.impl.RepConfigManager; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.txn.Txn; + +/** + * Cache used to maintain DatabaseImpl handles. The cache retains some + * configurable number of MRU entries. In addition, the cache will discard MRU + * entries that have not been used within a configurable timeout period. + *

        + * Implementation notes: + *

          + *
        • + * The methods are not synchronized, since it's used exclusively from the + * single threaded replay thread.
        • + *
        • + * The timeout mechanism is coarse and is implemented by a lightweight tick + * mechanism that minimizes calls to the system clock, since we expect the + * cache to be consulted very frequently and need to minimize the overhead. The + * tick method should be invoked with a period that's less than the timeout + * interval if it is to work effectively.
        • + *
        + */ + +@SuppressWarnings("serial") +public class DbCache { + + private final DbCacheLinkedHashMap map; + + private final DbTree dbTree; + private volatile int maxEntries; + private volatile int timeoutMs; + + /* + * The current tick and its associated timestamp. It's updated by the + * tick() method. + */ + private int tick = 1; + private long tickTime = System.currentTimeMillis(); + + /** + * Creates an instance of a DbCache. + * + * @param dbTree the source of the data being cached + * @param maxEntries the max MRU entries to be retained in the cache + * @param timeoutMs the timeout used to remove stale entries. A timeout + * value of zero means that each call to tick() will move the "clock" + * forward. It's useful for testing purposes. + */ + DbCache(DbTree dbTree, int maxEntries, int timeoutMs) { + assert dbTree != null; + + this.dbTree = dbTree; + this.timeoutMs = timeoutMs; + this.maxEntries = maxEntries; + map = new DbCacheLinkedHashMap(); + } + + /** + * The tick() method forms the basis for removing stale entries from the + * cache. It effectively advances the "clock" and removes any entries that + * have been rendered stale. + */ + public void tick() { + + if ((timeoutMs > 0) && + (System.currentTimeMillis() - tickTime) <= timeoutMs) { + return; + } + + for (Iterator vi = map.values().iterator(); vi.hasNext();) { + Info dbInfo = vi.next(); + if (dbInfo.lastAccess < tick) { + release(dbInfo.dbImpl); + vi.remove(); + } + } + tick++; + tickTime = System.currentTimeMillis(); + } + + private void release(DatabaseImpl dbImpl) { + dbTree.releaseDb(dbImpl); + if (dbImpl.noteWriteHandleClose() == 0) { + TriggerManager.runCloseTriggers(null, dbImpl); + } + } + + /** + * Returns the DatabaseImpl associated with the dbId, caching the return + * value, if it's not already cached. The open triggers will be invoked if + * this was the first write reference. + * + * @param dbId the dbId that is to be resolved. + * + * @return the corresponding DatabaseImpl + */ + public DatabaseImpl get(DatabaseId dbId, Txn txn) { + Info info = map.get(dbId); + + if (info != null) { + info.lastAccess = tick; + return info.dbImpl; + } + + info = new Info(dbTree.getDb(dbId, -1)); + map.put(dbId, info); + if (info.dbImpl.noteWriteHandleOpen() == 1) { + TriggerManager.runOpenTriggers(txn, info.dbImpl, false); + } + return info.dbImpl; + } + + /** + * Updates the configuration of the db cache, by resetting + * maxEntries and timeoutMs to the configured + * values. + *

        + * Note that setting the cache to a smaller max entry does not immediately + * reduce the number of entries currently in the cache, if the size of the + * cache is already at the maximum. The reduction will take place + * incrementally over time, as calls to "put" operations are made and + * {@link DbCacheLinkedHashMap#removeEldestEntry} is invoked for each put + * operation. This incremental cache size reduction is not expected to be a + * significant drawback in practice. + *

        + * @param configMgr the configuration holding the cache parameters + */ + public void setConfig(RepConfigManager configMgr) { + + maxEntries = configMgr.getInt(RepParams.REPLAY_MAX_OPEN_DB_HANDLES); + timeoutMs = configMgr.getDuration(RepParams.REPLAY_DB_HANDLE_TIMEOUT); + + } + + /** + * Returns the max entries that can be held by the cache. + */ + public int getMaxEntries() { + return maxEntries; + } + + /** + * Returns the configured timeout in ms. If a db handle has been inactive + * for a period of time that exceeds the timeout it's removed from the + * cache. + */ + public int getTimeoutMs() { + return timeoutMs; + } + + /** + * Clears out the cache releasing db handles as well + */ + public void clear() { + for (Info dbInfo : map.values()) { + release(dbInfo.dbImpl); + } + map.clear(); + } + + /* For testing only. */ + LinkedHashMap getMap() { + return map; + } + + /** + * Struct to associate a tick with the dbImpl + */ + private class Info { + int lastAccess; + final DatabaseImpl dbImpl; + + public Info(DatabaseImpl dbImpl) { + super(); + this.lastAccess = DbCache.this.tick; + this.dbImpl = dbImpl; + } + } + + /** + * Subclass supplies the method used to remove the LRU entry and the + * bookkeeping that goes along with it. + */ + private class DbCacheLinkedHashMap + extends LinkedHashMap { + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + if (size() <= maxEntries) { + return false; + } + release(eldest.getValue().dbImpl); + return true; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java b/src/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java new file mode 100644 index 0000000..e597a36 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java @@ -0,0 +1,250 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static java.util.logging.Level.FINE; + +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.arbitration.Arbiter; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.stream.FeederTxns; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Provides information about quorums needed for durability decisions. + */ +public class DurabilityQuorum { + + private final RepImpl repImpl; + private final Logger logger; + + public DurabilityQuorum(RepImpl repImpl) { + + this.repImpl = repImpl; + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * See if there are a sufficient number of replicas alive to support + * the commit for this transaction. Used as an optimizing step before any + * writes are executed, to reduce the number of outstanding writes that + * suffer from insufficient ack problems. + * + * If this node is not the master, just return. A different kind of check + * will catch the fact that this node cannot support writes. + * + * TODO: Read only transactions on the master should not have to wait. + * In the future, introduce either a read-only attribute as part of + * TransactionConfig or a read only transaction class to optimize this. + * + * @param insufficientReplicasTimeout timeout in ms + * @throws InsufficientReplicasException if there are not enough replicas + * connected to this feeder to be able to commit the transaction. + */ + public void ensureReplicasForCommit(MasterTxn txn, + int insufficientReplicasTimeout) + throws DatabaseException, InterruptedException, + InsufficientReplicasException { + + RepNode repNode = repImpl.getRepNode(); + if (!repNode.isMaster()) { + return; + } + + ReplicaAckPolicy ackPolicy = + txn.getDefaultDurability().getReplicaAck(); + int requiredReplicaAckCount = getCurrentRequiredAckCount(ackPolicy); + if (logger.isLoggable(FINE)) { + LoggerUtils.fine(logger, repImpl, + "Txn " + txn + ": checking that " + + requiredReplicaAckCount + + " feeders exist before starting commit"); + } + + /* No need to wait for anyone else, only this node is needed. */ + if (requiredReplicaAckCount == 0) { + return; + } + + if (repNode.feederManager().awaitFeederReplicaConnections + (requiredReplicaAckCount, insufficientReplicasTimeout)) { + /* Wait was successful */ + return; + } + + /* + * Timed out, not enough replicas connected, or feeder was shutdown + * normally, that is, without any exceptions while waiting. + */ + if (!repNode.isMaster()) { + + /* + * Continue if we are no longer the master after the wait. The + * transaction will fail if it tries to acquire write locks, or + * at commit. + */ + return; + } + + if (ackPolicy.equals(ReplicaAckPolicy.SIMPLE_MAJORITY) && + repNode.getArbiter().activateArbitration()) { + return; + } + + final boolean includeArbiters = + !ackPolicy.equals(ReplicaAckPolicy.ALL); + throw new InsufficientReplicasException( + txn, ackPolicy, requiredReplicaAckCount, + repNode.feederManager().activeAckReplicas(includeArbiters)); + } + + /** + * Determine whether acknowledgments from the specified replica should be + * counted against transaction durability requirements. + * + * @param replica the replica node + * @return whether acknowledgments from the replica should be counted + */ + public boolean replicaAcksQualify(final RepNodeImpl replica) { + + /* Only acknowledgments from electable nodes should be counted */ + return replica.getType().isElectable(); + } + + /** + * Determine if this transaction has been adequately acknowledged. + * + * @throws InsufficientAcksException if the transaction's durability + * requirements have not been met. + */ + public void ensureSufficientAcks(FeederTxns.TxnInfo txnInfo, + int timeoutMs) + throws InsufficientAcksException { + + int pendingAcks = txnInfo.getPendingAcks(); + if (pendingAcks == 0) { + return; + } + + MasterTxn txn = txnInfo.getTxn(); + final int requiredAcks = getCurrentRequiredAckCount( + txn.getCommitDurability().getReplicaAck()); + int requiredAckDelta = txn.getRequiredAckCount() - requiredAcks; + if (requiredAckDelta >= pendingAcks) { + + /* + * The group size was reduced while waiting for acks and the + * acks received are sufficient given the new reduced group + * size. + */ + return; + } + + /* Snapshot the state to be used in the error message */ + final String dumpState = repImpl.dumpAckFeederState(); + + /* + * Repeat the check to ensure that acks have not been received in + * the time between the completion of the await() call above and + * the creation of the exception message. This tends to happen when + * there are lots of threads in the process thus potentially + * delaying the resumption of this thread following the timeout + * resulting from the await. + * + * It should be noted that some transactions may be setup to not + * decrement the wait latch count for Arbiter acks. Checking an + * Arbiters feeder VLSN here will account for the Arbiter ack. + */ + final FeederManager feederManager = + repImpl.getRepNode().feederManager(); + int currentFeederCount = + feederManager.getNumCurrentAckFeeders(txn.getCommitVLSN()); + if (currentFeederCount >= requiredAcks) { + String msg = "txn " + txn.getId() + + " commit vlsn:" + txnInfo.getCommitVLSN() + + " acknowledged after explicit feeder check" + + " latch count:" + txnInfo.getPendingAcks() + + " state:" + dumpState + + " required acks:" + requiredAcks; + + LoggerUtils.info(logger, repImpl, msg); + return; + } + + /* + * We can avoid the exception if it's possible for this node to enter + * activate arbitration. It's useful to check for this again here in + * case we happen to lose connections to replicas in the (brief) + * period since the pre-log hook. Note that in this case we merely + * want to check; we don't want to switch into active arbitration + * unless/until we actually lose the connection to the replica at + * commit time. TODO: this doesn't seem right! Shouldn't we require + * activation at this point!!! + */ + if (repImpl.getRepNode().getArbiter().activationPossible()) { + return; + } + throw new InsufficientAcksException(txn, pendingAcks, timeoutMs, + dumpState); + + } + + /** + * Returns the minimum number of acknowledgments required to satisfy the + * ReplicaAckPolicy for a given group size. Does not include the master. + * The method factors in considerations like the current arbitration status + * of the environment and the composition of the replication group. + * + * TODO: it seems sufficient to return a number, as opposed to a set of + * qualified ack nodes, as long as {@link #replicaAcksQualify} will only + * count qualified acks against the required count. That does mean that + * getCurrentRequiredAckCount and noteReplicaAcks for a transaction must be + * kept consistent. + * + * @return the number of nodes that are needed, not including the master. + */ + public int getCurrentRequiredAckCount(ReplicaAckPolicy ackPolicy) { + + /* + * If the electableGroupSizeOverride is determining the size of the + * election quorum, let it also influence the durability quorum. + */ + RepNode repNode = repImpl.getRepNode(); + int electableGroupSizeOverride = + repNode.getElectionQuorum().getElectableGroupSizeOverride(); + if (electableGroupSizeOverride > 0) { + + /* + * Use the override-defined group size to determine the + * number of acks. + */ + return ackPolicy.minAckNodes(electableGroupSizeOverride) - 1; + } + + Arbiter arbiter = repNode.getArbiter(); + if (arbiter.isApplicable(ackPolicy)) { + return arbiter.getAckCount(ackPolicy); + } + + return ackPolicy.minAckNodes + (repNode.getGroup().getAckGroupSize()) - 1; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ElectionQuorum.java b/src/com/sleepycat/je/rep/impl/node/ElectionQuorum.java new file mode 100644 index 0000000..e85fc4e --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ElectionQuorum.java @@ -0,0 +1,150 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.util.logging.Logger; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.arbitration.Arbiter; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.stream.MasterStatus; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * ElectionQuorum centralizes decision making about what constitutes a + * successful election quorum and the definition of an authoritative master. + */ +public class ElectionQuorum { + + private final RepImpl repImpl; + private final Logger logger; + + /* + * If non-zero use this value to override the normal group size + * calculations. + */ + private volatile int electableGroupSizeOverride; + + public ElectionQuorum(RepImpl repImpl) { + + this.repImpl = repImpl; + logger = LoggerUtils.getLogger(getClass()); + + electableGroupSizeOverride = repImpl.getConfigManager(). + getInt(RepParams.ELECTABLE_GROUP_SIZE_OVERRIDE); + if (electableGroupSizeOverride > 0) { + LoggerUtils.warning(logger, repImpl, + "Electable group size override set to:" + + electableGroupSizeOverride); + } + } + + /** For unit testing */ + public ElectionQuorum() { + repImpl = null; + logger = null; + } + + /* + * Sets the override value for the Electable Group size. + */ + public void setElectableGroupSizeOverride(int override) { + if (electableGroupSizeOverride != override) { + LoggerUtils.warning(logger, repImpl, + "Electable group size override changed to:" + + override); + } + this.electableGroupSizeOverride = override; + } + + public int getElectableGroupSizeOverride() { + return electableGroupSizeOverride; + } + + /** + * Predicate to determine whether we have a quorum based upon the quorum + * policy. + */ + public boolean haveQuorum(QuorumPolicy quorumPolicy, int votes) { + return votes >= getElectionQuorumSize(quorumPolicy); + } + + /** + * Returns a definitive answer to whether this node is currently the master + * by checking both its status as a master and that a sufficient number + * of nodes agree that it's the master based on the number of feeder + * connections to it. Currently, the sufficient number is just a simple + * majority. Such an authoritative answer is needed in a network partition + * situation to detect a master that may be isolated on the minority side + * of a network partition. + * + * @return true if the node is definitely the master. False if it's not or + * we cannot be sure. + */ + boolean isAuthoritativeMaster(MasterStatus masterStatus, + FeederManager feederManager) { + if (!masterStatus.isGroupMaster()) { + return false; + } + + return (feederManager.activeReplicaCount() + 1) >= + getElectionQuorumSize(QuorumPolicy.SIMPLE_MAJORITY); + } + + /** + * Return the number of nodes that are required to achieve consensus on the + * election. Over time, this may evolve to be a more detailed description + * than simply the size of the quorum. Instead, it may return the set of + * possible voters. + * + * Special situations, like an active designated primary or an election + * group override will change the default quorum size. + * + * @param quorumPolicy + * @return the number of nodes required for a quorum + */ + private int getElectionQuorumSize(QuorumPolicy quorumPolicy) { + if (electableGroupSizeOverride > 0) { + return quorumPolicy.quorumSize(electableGroupSizeOverride); + } + + /* + * If arbitration is active, check whether arbitration determines the + * election group size. + */ + RepNode repNode = repImpl.getRepNode(); + Arbiter arbiter = repNode.getArbiter(); + if (arbiter.isApplicable(quorumPolicy)) { + return arbiter.getElectionQuorumSize(quorumPolicy); + } + + return quorumPolicy.quorumSize + (repNode.getGroup().getElectableGroupSize()); + } + + /** + * Return whether nodes of the specified type should participate in + * elections. + * + * @param nodeType the node type + * @return whether nodes of that type should participate in elections + */ + public boolean nodeTypeParticipates(final NodeType nodeType) { + + /* Only electable nodes participate in elections */ + return nodeType.isElectable(); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/Feeder.java b/src/com/sleepycat/je/rep/impl/node/Feeder.java new file mode 100644 index 0000000..297dfec --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/Feeder.java @@ -0,0 +1,1674 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG_NAME; + +import java.io.IOException; +import java.lang.Thread.UncaughtExceptionHandler; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.rep.ReplicationSecurityException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.MasterTransfer.VLSNProgress; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.stream.ArbiterFeederSource; +import com.sleepycat.je.rep.stream.BaseProtocol.Ack; +import com.sleepycat.je.rep.stream.BaseProtocol.Commit; +import com.sleepycat.je.rep.stream.BaseProtocol.GroupAck; +import com.sleepycat.je.rep.stream.BaseProtocol.HeartbeatResponse; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.stream.FeederReplicaHandshake; +import com.sleepycat.je.rep.stream.FeederReplicaSyncup; +import com.sleepycat.je.rep.stream.FeederReplicaSyncup.NetworkRestoreException; +import com.sleepycat.je.rep.stream.FeederSource; +import com.sleepycat.je.rep.stream.FeederTxns.TxnInfo; +import com.sleepycat.je.rep.stream.MasterFeederSource; +import com.sleepycat.je.rep.stream.MasterStatus; +import com.sleepycat.je.rep.stream.MasterStatus.MasterSyncException; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.AtomicLongComponent; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongAvgRate; +import com.sleepycat.je.utilint.LongAvgRateStat; +import com.sleepycat.je.utilint.LongDiffStat; +import com.sleepycat.je.utilint.LongMaxZeroStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StringStat; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * There is an instance of a Feeder for each client that needs a replication + * stream. Either a master, or replica (providing feeder services) may + * establish a feeder. + * + * A feeder is created in response to a request from a Replica, and is shutdown + * either upon loss of connectivity, or upon a change in mastership. + * + * The protocol used to validate and negotiate a connection is synchronous, but + * once this phase has been completed, the communication between the feeder and + * replica is asynchronous. To handle the async communications, the feeder has + * two threads associated with it: + * + * 1) An output thread whose sole purpose is to pump log records (and if + * necessary heart beat requests) down to the replica as fast as the network + * will allow it + * + * 2) An input thread that listens for responses to transaction commits and + * heart beat responses. + * + *

        The feeder maintains several statistics that provide information about + * the replication rate for each replica. By comparing this information to + * information about master replication maintained by the FeederTxns class, it + * is also possible to estimate the lag between replicas and the master. + * + *

        The statistics facilities do not expect the set of available statistics + * to change dynamically. To handle recording statistics about the changing + * set of replicas, the statistics are represented as maps that associated node + * names with statistics. Each feeder adds individual statistics in these maps + * at startup, and removes them at shutdown time to make sure that the + * statistics in the map only reflect up-to-date information. + * + *

        Some notes about the specific statistics:

        + * + *
        replicaDelay + * + *
        The difference between the commit times of the latest transaction + * committed on the master and the transaction most recently processed by the + * replica. The master timestamp comes from the lastCommitTimestamp statistic + * maintained by FeederTxns. The feeder determines the commit timestamp of the + * replica's most recently processed transaction by obtaining timestamps from + * commit records being sent to the replica, and noting the last one prior to + * sending a heartbeat. When a heartbeat response is received, if the latest + * replica VLSN included in the response is equal or greater to the one + * recorded when the heartbeat request was sent, then the delay is computed by + * comparing the commit timestamp for that most recently sent transaction with + * the timestamp of the master's latest transaction. Replicas can send + * heartbeat responses on their own, so comparing the VLSNs is necessary to + * make sure that the response matches the request. Note that this arrangement + * depends on the fact that the replica processes transactions and heartbeats + * in order, and only sends a heartbeat response once all preceding + * transactions have been processed. If the master processes transactions at a + * fast enough rate that additional transactions are generated while waiting + * for a heartbeat response, then the value of this statistic will not reach + * zero, but will represent the total time for sending a commit operation to + * the replica and receiving the associated response, including the roundtrip + * latency of the network and any time spent due to buffering of replication + * data. + * + *
        replicaLastCommitTimestamp + * + *
        The commit timestamp of the last transaction committed before the most + * recent heartbeat for which a heartbeat response has been received. This + * statistic represents the commit time on the master of the most recent data + * known to have been processed on the replica. It provides the information + * used for the replica component of the replicaDelay statistic. + * + *
        replicaLastCommitVLSN + * + *
        The VLSN of the committed transaction described for + * replicaLastCommitTimestamp. This statistic provides the information used + * for the replica component of the replicaVLSNLag statistic. + * + *
        replicaVLSNLag + * + *
        The difference between the VLSN of the latest transaction committed on + * the master and the one most recently processed by the replica. The master + * VLSN comes from the lastCommitVLSN statistic maintained by FeederTxns. This + * statistic is similar to replicaDelay, but provides information about the + * VLSN lag rather than the time delay. + * + *
        replicaVLSNRate + * + *
        An exponential moving average of the rate of change of the + * replicaLastCommitVLSN statistic over time, averaged over a 10 second time + * period. This statistic provides information about how quickly the replica + * is processing replication data, which can be used, along with the vlsnRate + * statistic maintained by FeederTxns, to estimate the amount of time it will + * take for the replica to catch up with the master.
        + */ +final public class Feeder { + /* + * A heartbeat is written with this period by the feeder output thread. + * Is mutable. + */ + private int heartbeatMs; + + /* The manager for all Feeder instances. */ + private final FeederManager feederManager; + + /* The replication node that is associated with this Feeder */ + private final RepNode repNode; + /* The RepImpl that is associated with this rep node. */ + private final RepImpl repImpl; + + /* The socket on which the feeder communicates with the Replica. */ + private final NamedChannelWithTimeout feederReplicaChannel; + + /* The Threads that implement the Feeder */ + private final InputThread inputThread; + private final OutputThread outputThread; + + /* The filter to be used for records written to the replication stream.*/ + private FeederFilter feederFilter; + + /* feeder authenticator */ + private final StreamAuthenticator authenticator; + + /* security check interval in ms */ + private final long securityChkIntvMs; + + private boolean isArbiterFeeder = false; + + /* The source of log records to be sent to the Replica. */ + private FeederSource feederSource; + + /* Negotiated message protocol version for the replication stream. */ + private int protocolVersion; + + /** + * The current position of the feeder, that is, the log record with this + * VLSN will be sent next to the Replica. Note that this does not mean that + * the replica has actually processed all log records preceding feederVLSN. + * The records immediately preceding feederVLSN (down to replicaAckVLSN) + * may be in the network, in transit to the replica. + * + * The feederVLSN can only move forwards post feeder-replica syncup. + * However, it can move forwards or backwards as matchpoints are + * negotiated during syncup. + */ + private volatile VLSN feederVLSN = VLSN.NULL_VLSN; + + /** + * The latest commit or abort that the replica has reported receiving, + * either by ack (in the case of a commit), or via heartbeat response. It + * serves as a rough indication of the replay state of the replica that is + * used in exception messages. + * + * The following invariant must always hold: replicaTxnEndLSN < feederVLSN + */ + private volatile VLSN replicaTxnEndVLSN = VLSN.NULL_VLSN; + + /* The time that the feeder last heard from its Replica */ + private volatile long lastResponseTime = 0l; + + /* + * Used to communicate our progress when getting ready for a Master + * Transfer operation. + */ + private volatile MasterTransfer masterXfr; + private volatile boolean caughtUp = false; + + /* Used to track the status of the master. */ + private final MasterStatus masterStatus; + + /* + * Determines whether the Feeder has been shutdown. Usually this is held + * within the StoppableThread, but the Feeder's two child threads have + * their shutdown coordinated by the parent Feeder. + */ + private final AtomicBoolean shutdown = new AtomicBoolean(false); + + private final Logger logger; + + /* The Feeder's node ID. */ + private final NameIdPair nameIdPair; + + /** + * The replica node ID, that is, the node that is the recipient of the + * replication stream. Its established at the time of the Feeder/Replica + * handshake. + */ + private volatile NameIdPair replicaNameIdPair = NameIdPair.NULL; + + /** + * The agreed upon log format that should be used for writing log entries + * to send to the replica, or zero if not yet known. + */ + private volatile int streamLogVersion = 0; + + /** The JE version of the replica, or null if not known. */ + private volatile JEVersion replicaJEVersion = null; + + /** The RepNodeImpl of the replica, or null if not known. */ + private volatile RepNodeImpl replicaNode = null; + + /** Tracks when the last heartbeat was sent, or 0 if none has been sent */ + private volatile long lastHeartbeatTime; + + /** + * The VLSN of the most recent log entry that committed a transaction and + * was sent to the replica before the last heartbeat was sent, or 0 if no + * such log entries have been sent since the previous heartbeat. + */ + private volatile long lastHeartbeatCommitVLSN; + + /** + * The timestamp of the most recent log entry that committed a transaction + * and was sent to the replica before the last heartbeat was sent, or 0 if + * no such log entries have been sent since the previous heartbeat. + */ + private volatile long lastHeartbeatCommitTimestamp; + + /** The VLSN generation rate of the master in VLSNs/minute. */ + private final LongAvgRateStat vlsnRate; + + /** + * A test hook that is called before a message is written. Note that the + * hook is inherited by the ReplicaFeederHandshake, and will be kept in + * place there for the entire handshake. + */ + private volatile TestHook writeMessageHook; + + /** + * A test hook that is used to set the writeMessageHook for newly created + * feeders. + */ + private static volatile TestHook initialWriteMessageHook; + + /** + * Returns a configured DataChannel + * + * @param channel the channel to be configured + * @return the configured DataChannel + * @throws IOException + */ + private NamedChannelWithTimeout configureChannel(DataChannel channel) + throws IOException { + + try { + channel.getSocketChannel().configureBlocking(true); + LoggerUtils.info + (logger, repImpl, "Feeder accepted connection from " + channel); + final int timeoutMs = repNode.getConfigManager(). + getDuration(RepParams.PRE_HEARTBEAT_TIMEOUT); + final boolean tcpNoDelay = repNode.getConfigManager(). + getBoolean(RepParams.FEEDER_TCP_NO_DELAY); + + /* Set use of Nagle's algorithm on the socket. */ + channel.getSocketChannel().socket().setTcpNoDelay(tcpNoDelay); + return new NamedChannelWithTimeout(repNode, channel, timeoutMs); + } catch (IOException e) { + LoggerUtils.warning(logger, repImpl, + "IO exception while configuring channel " + + "Exception:" + e.getMessage()); + throw e; + } + } + + Feeder(FeederManager feederManager, DataChannel dataChannel) + throws DatabaseException, IOException { + + this.feederManager = feederManager; + this.repNode = feederManager.repNode(); + this.repImpl = repNode.getRepImpl(); + this.masterStatus = repNode.getMasterStatus(); + nameIdPair = repNode.getNameIdPair(); + this.feederSource = null; + logger = LoggerUtils.getLogger(getClass()); + + this.feederReplicaChannel = configureChannel(dataChannel); + inputThread = new InputThread(); + outputThread = new OutputThread(); + heartbeatMs = feederManager.repNode().getHeartbeatInterval(); + vlsnRate = repImpl.getFeederTxns().getVLSNRate(); + writeMessageHook = initialWriteMessageHook; + + feederFilter = null; + + /* get authenticator from containing rn */ + authenticator = feederManager.repNode().getAuthenticator(); + securityChkIntvMs = + feederManager.repNode().getSecurityCheckInterval(); + } + + void startFeederThreads() { + inputThread.start(); + } + + /** + * @hidden + * Place holder Feeder for testing only + */ + public Feeder() { + feederManager = null; + repNode = null; + repImpl = null; + masterStatus = null; + feederSource = null; + feederReplicaChannel = null; + nameIdPair = NameIdPair.NULL; + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "TestFeeder"); + inputThread = null; + outputThread = null; + shutdown.set(true); + vlsnRate = null; + writeMessageHook = initialWriteMessageHook; + feederFilter = null; + authenticator = null; + securityChkIntvMs = 0; + } + + /** + * Creates the MasterFeederSource, which must be done while all files in + * the VLSNIndex range are protected by syncup. + */ + public void initMasterFeederSource(VLSN startVLSN) + throws IOException, InterruptedException { + + replicaTxnEndVLSN = startVLSN.getPrev(); + if (replicaTxnEndVLSN.compareTo(repNode.getCurrentTxnEndVLSN()) >= 0) { + caughtUp = true; + } + feederVLSN = startVLSN; + feederSource = new MasterFeederSource(repNode.getRepImpl(), + repNode.getVLSNIndex(), replicaNameIdPair, startVLSN); + } + + private void initArbiterFeederSource() + throws IOException, InterruptedException { + + feederSource = new ArbiterFeederSource(repNode.getRepImpl()); + feederVLSN = VLSN.NULL_VLSN; + isArbiterFeeder = true; + } + + /* Get the protocol stats of this Feeder. */ + public StatGroup getProtocolStats(StatsConfig config) { + final Protocol protocol = outputThread.protocol; + + return (protocol != null) ? + protocol.getStats(config) : + new StatGroup(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_DESC); + } + + void resetStats() { + final Protocol protocol = outputThread.protocol; + if (protocol != null) { + protocol.resetStats(); + } + } + + void setMasterTransfer(MasterTransfer mt) { + masterXfr = mt; + if (caughtUp) { + adviseMasterTransferProgress(); + } + } + + void adviseMasterTransferProgress() { + MasterTransfer mt = masterXfr; + if (mt != null) { + mt.noteProgress + (new VLSNProgress(replicaTxnEndVLSN, + replicaNameIdPair.getName())); + } + } + + public RepNode getRepNode() { + return repNode; + } + + public NameIdPair getReplicaNameIdPair() { + return replicaNameIdPair; + } + + public void setFeederFilter(FeederFilter filter) { + feederFilter = filter; + } + + /** + * Returns the latest commit VLSN that was acked by the replica, or + * NULL_VLSN if no commit was acked since the time the feeder was + * established. + */ + public VLSN getReplicaTxnEndVLSN() { + return replicaTxnEndVLSN; + } + + /** + * Returns the next VLSN that will be sent to the replica. It will + * return VLSN.NULL if the Feeder is in the process of being created and + * FeederReplicaSyncup has not yet happened. + */ + public VLSN getFeederVLSN() { + return feederVLSN; + } + + /** + * Returns the JE version supported by the replica, or {@code null} if the + * value is not yet known. + * + * @return the replica JE version or {@code null} + */ + public JEVersion getReplicaJEVersion() { + return replicaJEVersion; + } + + /** + * Returns a RepNodeImpl that describes the replica, or {@code null} if the + * value is not yet known. The value will be non-null if the feeder + * handshake has completed successfully. + * + * @return the replica node or {@code null} + */ + public RepNodeImpl getReplicaNode() { + return replicaNode; + } + + /** + * Shutdown the feeder, closing its channel and releasing its threads. May + * be called internally upon noticing a problem, or externally when the + * RepNode is shutting down. + */ + void shutdown(Exception shutdownException) { + + boolean changed = shutdown.compareAndSet(false, true); + if (!changed) { + return; + } + + MasterTransfer mt = masterXfr; + final String replicaName = replicaNameIdPair.getName(); + if (mt != null) { + mt.giveUp(replicaName); + } + feederManager.removeFeeder(this); + + /* Shutdown feeder source to remove file protection. */ + if (feederSource != null) { + feederSource.shutdown(repImpl); + } + + StatGroup pstats = (inputThread.protocol != null) ? + inputThread.protocol.getStats(StatsConfig.DEFAULT) : + new StatGroup(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_DESC); + if (outputThread.protocol != null) { + pstats.addAll(outputThread.protocol.getStats(StatsConfig.DEFAULT)); + } + feederManager.incStats(pstats); + + /* Remove replica stats */ + feederManager.getReplicaDelayMap().removeStat(replicaName); + feederManager.getReplicaLastCommitTimestampMap().removeStat( + replicaName); + feederManager.getReplicaLastCommitVLSNMap().removeStat(replicaName); + feederManager.getReplicaVLSNLagMap().removeStat(replicaName); + feederManager.getReplicaVLSNRateMap().removeStat(replicaName); + + LoggerUtils.info(logger, repImpl, + "Shutting down feeder for replica " + replicaName + + ((shutdownException == null) ? + "" : + (" Reason: " + shutdownException.getMessage())) + + RepUtils.writeTimesString(pstats)); + + if (repNode.getReplicaCloseCatchupMs() >= 0) { + + /* + * Need to shutdown the group cleanly, wait for it to let the + * replica catchup and exit in the allowed time period. + */ + try { + + /* + * Note that we wait on the Input thread, since it's the one + * that will exit on the ShutdownResponse message from the + * Replica. The output thread will exit immediately after + * sending the ShutdownRequest. + */ + inputThread.join(); + /* Timed out, or the input thread exited; keep going. */ + } catch (InterruptedException e) { + LoggerUtils.warning(logger, repImpl, + "Interrupted while waiting to join " + + "thread:" + outputThread); + } + } + + outputThread.shutdownThread(logger); + inputThread.shutdownThread(logger); + + LoggerUtils.finest(logger, repImpl, + feederReplicaChannel + " isOpen=" + + feederReplicaChannel.getChannel().isOpen()); + } + + public boolean isShutdown() { + return shutdown.get(); + } + + public ArbiterFeederSource getArbiterFeederSource() { + if (feederSource != null && + feederSource instanceof ArbiterFeederSource) { + return (ArbiterFeederSource)feederSource; + } + + return null; + } + + public StreamAuthenticator getAuthenticator() { + return authenticator; + } + + /** + * Implements the thread responsible for processing the responses from a + * Replica. + */ + private class InputThread extends StoppableThread { + + Protocol protocol = null; + private LocalCBVLSNUpdater replicaCBVLSN; + + /* + * Per-replica stats stored in a map in the feeder manager. These can + * only be set once the replica name is found following the handshake. + * + * See the class javadoc comment for more information about these + * statistics and how they can be used to gather information about + * replication rates. + */ + private volatile LongDiffStat replicaDelay; + private volatile AtomicLongComponent replicaLastCommitTimestamp; + private volatile AtomicLongComponent replicaLastCommitVLSN; + private volatile LongDiffStat replicaVLSNLag; + private volatile LongAvgRate replicaVLSNRate; + + InputThread() { + /* + * The thread will be renamed later on during the life of this + * thread, when we're sure who the replica is. + */ + super(repImpl, new IOThreadsHandler(), "Feeder Input"); + } + + /** + * Does the initial negotiation to validate replication group wide + * consistency and establish the starting VLSN. It then starts up the + * Output thread and enters the response loop. + */ + @Override + public void run() { + + /* Set to indicate an error-initiated shutdown. */ + Error feederInputError = null; + Exception shutdownException = null; + + try { + FeederReplicaHandshake handshake = + new FeederReplicaHandshake(repNode, + Feeder.this, + feederReplicaChannel); + protocol = handshake.execute(); + protocolVersion = protocol.getVersion(); + replicaNameIdPair = handshake.getReplicaNameIdPair(); + streamLogVersion = handshake.getStreamLogVersion(); + replicaJEVersion = handshake.getReplicaJEVersion(); + replicaNode = handshake.getReplicaNode(); + + /* + * Rename the thread when we get the replica name in, so that + * it's clear who is on the other end. + */ + Thread.currentThread().setName("Feeder Input for " + + replicaNameIdPair.getName()); + + if (replicaNode.getType().isArbiter()) { + initArbiterFeederSource(); + } else { + FeederReplicaSyncup syncup = new FeederReplicaSyncup( + Feeder.this, feederReplicaChannel, protocol); + + /* + * For data nodes we must update the global CBVLSN using + * the replica's CBVLSN (when the global CBVLSN it is not + * defunct). The replicaCBVLSN can only be instantiated + * after we know the replica's name. + */ + if (replicaNode.getType().isDataNode()) { + replicaCBVLSN = new LocalCBVLSNUpdater( + replicaNameIdPair, replicaNode.getType(), repNode); + } + + /* + * Sync-up produces the VLSN of the next log record needed + * by the replica, one beyond the last commit or abort it + * already has. Sync-up calls initMasterFeederSource while + * the VLSNIndex range is protected. + */ + syncup.execute(); + } + + /* Set up stats */ + replicaDelay = feederManager.getReplicaDelayMap().createStat( + replicaNameIdPair.getName(), + repNode.getFeederTxns().getLastCommitTimestamp()); + replicaLastCommitTimestamp = + feederManager.getReplicaLastCommitTimestampMap() + .createStat(replicaNameIdPair.getName()); + replicaLastCommitVLSN = + feederManager.getReplicaLastCommitVLSNMap() + .createStat(replicaNameIdPair.getName()); + replicaVLSNLag = feederManager.getReplicaVLSNLagMap() + .createStat( + replicaNameIdPair.getName(), + repNode.getFeederTxns() + .getLastCommitVLSN()); + replicaVLSNRate = feederManager.getReplicaVLSNRateMap() + .createStat( + replicaNameIdPair.getName()); + + /* Start the thread to pump out log records */ + outputThread.start(); + lastResponseTime = System.currentTimeMillis(); + masterStatus.assertSync(); + feederManager.activateFeeder(Feeder.this); + + runResponseLoop(); + } catch (ReplicationSecurityException ue) { + shutdownException = ue; + LoggerUtils.warning(logger, repImpl, ue.getMessage()); + } catch (NetworkRestoreException e) { + shutdownException = e; + /* The replica will retry after a network restore. */ + LoggerUtils.info(logger, repImpl, e.getMessage()); + } catch (IOException e) { + /* Trio of benign "expected" exceptions below. */ + shutdownException = e; /* Expected. */ + } catch (MasterSyncException e) { + shutdownException = e; /* Expected. */ + } catch (InterruptedException e) { + shutdownException = e; /* Expected. */ + } catch (ExitException e) { + shutdownException = e; + LoggerUtils.warning(logger, repImpl, + "Exiting feeder loop: " + e.getMessage()); + } catch (Error e) { + feederInputError = e; + repNode.getRepImpl().invalidate(e); + } catch (ChecksumException e) { + shutdownException = e; + + /* An internal, unexpected error. Invalidate the environment. */ + throw new EnvironmentFailureException + (repNode.getRepImpl(), + EnvironmentFailureReason.LOG_CHECKSUM, e); + } catch (RuntimeException e) { + shutdownException = e; + + /* + * An internal error. Shut down the rep node as well for now + * by throwing the exception out of the thread. + * + * In future we may want to close down just the impacted Feeder + * but this is the safe course of action. + */ + LoggerUtils.severe(logger, repImpl, + "Unexpected exception: " + e.getMessage() + + LoggerUtils.getStackTrace(e)); + throw e; + } finally { + if (feederInputError != null) { + /* Propagate the error, skip cleanup. */ + throw feederInputError; + } + + /* + * Shutdown the feeder in its entirety, in case the input + * thread is the only one to notice a problem. The Replica can + * decide to re-establish the connection + */ + shutdown(shutdownException); + cleanup(); + } + } + + /* + * This method deals with responses from the Replica. There are exactly + * two types of responses from the Replica: + * + * 1) Responses acknowledging a successful commit by the Replica. + * + * 2) Responses to heart beat messages. + * + * This loop (like the loop in the OutputThread) is terminated under + * one of the following conditions: + * + * 1) The thread detects a change in masters. + * 2) There is network connection issue (which might also be an + * indication of an unfolding change in masters). + * 3) If the replica closes its connection -- variation of the above. + * + * In addition, the loop will also exit if it gets a ShutdownResponse + * message sent in response to a ShutdownRequest sent by the + * OutputThread. + */ + private void runResponseLoop() + throws IOException, MasterSyncException { + + /* + * Start the acknowledgment loop. It's very important that this + * loop be wait/contention free. + */ + while (!checkShutdown()) { + Message response = protocol.read(feederReplicaChannel); + if (checkShutdown()) { + + /* + * Shutdown quickly, in particular, don't update sync + * VLSNs. + */ + break; + } + masterStatus.assertSync(); + + lastResponseTime = System.currentTimeMillis(); + + if (response.getOp() == Protocol.HEARTBEAT_RESPONSE) { + processHeartbeatResponse(response); + } else if (response.getOp() == Protocol.ACK) { + + /* + * Check if a commit has been waiting for this + * acknowledgment and signal any waiters. + */ + long txnId = ((Ack) response).getTxnId(); + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, repImpl, "Ack for: " + txnId); + } + deemAcked(txnId); + } else if (response.getOp() == Protocol.GROUP_ACK) { + final long txnIds[] = ((GroupAck) response).getTxnIds(); + + for (long txnId : txnIds) { + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, repImpl, + "Group Ack for: " + txnId); + } + deemAcked(txnId); + } + } else if (response.getOp() == Protocol.SHUTDOWN_RESPONSE) { + LoggerUtils.info(logger, repImpl, + "Shutdown confirmed by replica " + + replicaNameIdPair.getName()); + /* Exit the loop and the thread. */ + break; + } else if (response.getOp() == Protocol.REAUTHENTICATE) { + processReauthenticate(response); + } else { + throw EnvironmentFailureException.unexpectedState + ("Unexpected message: " + response); + } + } + } + + private void processHeartbeatResponse(Message response) { + /* Last response has been updated, keep going. */ + final HeartbeatResponse hbResponse = + (Protocol.HeartbeatResponse)response; + + /* + * For arbiters we do not process the response, but it is still + * important for preventing the channel from timing out. + */ + if (replicaNode.getType().isArbiter()) { + return; + } + + /* + * When the global CBVLSN is not defunct, update it for a data node + * (replicaCBVLSN is null for non-data nodes). + */ + if (replicaCBVLSN != null) { + replicaCBVLSN.updateForReplica(hbResponse); + } + + final VLSN replicaTxnVLSN = hbResponse.getTxnEndVLSN(); + + /* All further work requires the replica's VLSN */ + if (replicaTxnVLSN == null) { + return; + } + + replicaTxnEndVLSN = replicaTxnVLSN; + final long replicaTxnVLSNSeq = replicaTxnVLSN.getSequence(); + + feederManager.updateDTVLSN(replicaTxnVLSNSeq); + + if (replicaTxnVLSN.compareTo( + repNode.getCurrentTxnEndVLSN()) >= 0) { + + caughtUp = true; + adviseMasterTransferProgress(); + } + + /* + * Only tally statistics for the commit VLSN and timestamp if both + * values were recorded when the heartbeat was requested. Make + * computations based directly on the measured heartbeat delay if + * the heartbeat reply confirms that the requested VLSN has been + * processed. Otherwise, use the master VLSN rate to estimate the + * delay. + */ + final long commitVLSN = lastHeartbeatCommitVLSN; + final long commitTimestamp = lastHeartbeatCommitTimestamp; + if ((commitVLSN == 0) || (commitTimestamp == 0)) { + return; + } + final long statCommitVLSN = (commitVLSN <= replicaTxnVLSNSeq) ? + commitVLSN : replicaTxnVLSNSeq; + + /* Set the depended-on stats first */ + replicaLastCommitVLSN.set(statCommitVLSN); + replicaVLSNLag.set(statCommitVLSN, lastResponseTime); + replicaVLSNRate.add(statCommitVLSN, lastResponseTime); + + final long statCommitTimestamp; + if (commitVLSN <= replicaTxnVLSNSeq) { + statCommitTimestamp = commitTimestamp; + } else { + + /* Adjust the commit timestamp based on the VLSN rate */ + final long vlsnRatePerMinute = vlsnRate.get(); + if (vlsnRatePerMinute <= 0) { + return; + } + final long vlsnLag = commitVLSN - replicaTxnVLSNSeq; + final long timeLagMillis = + (long) (60000.0 * ((double) vlsnLag / vlsnRatePerMinute)); + statCommitTimestamp = commitTimestamp - timeLagMillis; + } + replicaLastCommitTimestamp.set(statCommitTimestamp); + replicaDelay.set(statCommitTimestamp, lastResponseTime); + } + + /* + * Returns true if the InputThread should be shutdown, that is, if the + * thread has been marked for shutdown and it's not a group shutdown + * request. For a group shutdown the input thread will wait for an + * acknowledgment of the shutdown message from the Replica. + */ + private boolean checkShutdown() { + return shutdown.get() && + (repNode.getReplicaCloseCatchupMs() < 0); + } + + @Override + protected int initiateSoftShutdown() { + + /* + * Provoke an I/O exception that will cause the input thread to + * exit. + */ + RepUtils.shutdownChannel(feederReplicaChannel); + return repNode.getThreadWaitInterval(); + } + + @Override + protected Logger getLogger() { + return logger; + } + } + + /** + * Simply pumps out log entries as rapidly as it can. + */ + private class OutputThread extends StoppableThread { + Protocol protocol = null; + + private long totalTransferDelay = 0; + + /* The time at which the group shutdown was initiated. */ + private long shutdownRequestStart = 0; + + /** + * Determines whether writing to the network connection for the replica + * suffices as a commit acknowledgment. + */ + private final boolean commitToNetwork; + + /** + * The threshold used to trigger the logging of transfers of commit + * records. + */ + private final int transferLoggingThresholdMs; + + /** + * The max time interval during which feeder records are grouped. + */ + private final int batchNs; + + /** + * The direct byte buffer holding the batched feeder records. + */ + private final ByteBuffer batchBuff; + + /* Shared stats used to track max replica lag across all feeders. */ + private final LongMaxZeroStat nMaxReplicaLag; + private final StringStat nMaxReplicaLagName; + + private final VLSNIndex vlsnIndex; + + /* The timestamp of the most recently written commit record or 0 */ + private long lastCommitTimestamp; + + /* The VLSN of the most recently written commit record or 0 */ + private long lastCommitVLSN; + + /* + * The delay between writes of a replication message. Note that + * setting this to a non-zero value effectively turns off message + * batching. + */ + final int testDelayMs; + + OutputThread() { + /* + * The thread will be renamed later on during the life of this + * thread, when we know who the replica is. + */ + super(repImpl, new IOThreadsHandler(), "Feeder Output"); + final DbConfigManager configManager = repNode.getConfigManager(); + commitToNetwork = configManager. + getBoolean(RepParams.COMMIT_TO_NETWORK); + transferLoggingThresholdMs = configManager. + getDuration(RepParams.TRANSFER_LOGGING_THRESHOLD); + + batchNs = Math.min(configManager. + getInt(RepParams.FEEDER_BATCH_NS), + heartbeatMs * 1000000); + + final int batchBuffSize = configManager. + getInt(RepParams.FEEDER_BATCH_BUFF_KB) * 1024; + + batchBuff = ByteBuffer.allocateDirect(batchBuffSize); + + if (feederManager != null) { + nMaxReplicaLag = feederManager.getnMaxReplicaLag(); + nMaxReplicaLagName = feederManager.getnMaxReplicaLagName(); + } else { + /* Create a placeholder stat for testing. */ + StatGroup stats = + new StatGroup(FeederManagerStatDefinition.GROUP_NAME, + FeederManagerStatDefinition.GROUP_DESC); + nMaxReplicaLag = new LongMaxZeroStat(stats, N_MAX_REPLICA_LAG); + nMaxReplicaLagName = new StringStat(stats, + N_MAX_REPLICA_LAG_NAME); + } + + testDelayMs = feederManager.getTestDelayMs(); + if (testDelayMs > 0) { + LoggerUtils.info(logger, repImpl, + "Test delay of:" + testDelayMs + "ms." + + " after each message sent"); + } + vlsnIndex = repNode.getVLSNIndex(); + } + + /** + * Determines whether we should exit the output loop. If we are trying + * to shutdown the Replica cleanly, that is, this is a group shutdown, + * the method delays the shutdown until the Replica has had a chance + * to catch up to the current commit VLSN on this node, after which + * it sends the Replica a Shutdown message. + * + * @return true if the output thread should be shutdown. + * + * @throws IOException + */ + private boolean checkShutdown() + throws IOException { + + if (!shutdown.get()) { + return false; + } + if (repNode.getReplicaCloseCatchupMs() >= 0) { + if (shutdownRequestStart == 0) { + shutdownRequestStart = System.currentTimeMillis(); + } + /* Determines if the feeder has waited long enough. */ + boolean timedOut = + (System.currentTimeMillis() - shutdownRequestStart) > + repNode.getReplicaCloseCatchupMs(); + if (!timedOut && + !isArbiterFeeder && + (feederVLSN.compareTo + (repNode.getCurrentTxnEndVLSN()) <= 0)) { + /* + * Replica is not caught up. Note that feederVLSN at stasis + * is one beyond the last value that was actually sent, + * hence the <= instead of < above. + */ + return false; + } + /* Replica is caught up or has timed out, shut it down. */ + writeMessage(protocol.new ShutdownRequest(shutdownRequestStart), + feederReplicaChannel); + + String shutdownMessage = + String.format("Shutdown message sent to: %s. " + + "Feeder vlsn: %,d. " + + "Shutdown elapsed time: %,dms", + replicaNameIdPair, + feederVLSN.getSequence(), + (System.currentTimeMillis() - + shutdownRequestStart)); + LoggerUtils.info(logger, repImpl, shutdownMessage); + return true; + } + return true; + } + + /** Write a protocol message to the channel. */ + private void writeMessage(final Message message, + final NamedChannel namedChannel) + throws IOException { + + assert TestHookExecute.doHookIfSet(writeMessageHook, message); + protocol.write(message, namedChannel); + } + + @Override + public void run() { + protocol = + Protocol.get(repNode, protocolVersion, protocolVersion, + streamLogVersion); + Thread.currentThread().setName + ("Feeder Output for " + + Feeder.this.getReplicaNameIdPair().getName()); + { + VLSNRange range = vlsnIndex.getRange(); + LoggerUtils.info + (logger, repImpl, String.format + ("Feeder output thread for replica %s started at " + + "VLSN %,d master at %,d (DTVLSN:%,d) " + + "VLSN delta=%,d socket=%s", + replicaNameIdPair.getName(), + feederVLSN.getSequence(), + range.getLast().getSequence(), + repNode.getAnyDTVLSN(), + range.getLast().getSequence() - feederVLSN.getSequence(), + feederReplicaChannel)); + } + + /* Set to indicate an error-initiated shutdown. */ + Error feederOutputError = null; + Exception shutdownException = null; + try { + + /* + * Always start out with a heartbeat; the replica is counting + * on it. + */ + sendHeartbeat(); + final int timeoutMs = repNode.getConfigManager(). + getDuration(RepParams.FEEDER_TIMEOUT); + feederReplicaChannel.setTimeoutMs(timeoutMs); + + while (!checkShutdown()) { + if (feederVLSN.compareTo + (repNode.getCurrentTxnEndVLSN()) >= 0) { + + /* + * The replica is caught up, if we are a Primary stop + * playing that role, and start requesting acks from + * the replica. + */ + repNode.getArbiter().endArbitration(); + } + + doSecurityCheck(); + + writeAvailableEntries(); + + masterStatus.assertSync(); + + sendHeartbeat(); + + if (testDelayMs > 0) { + Thread.sleep(testDelayMs); + } + } + + } catch (IOException e) { + /* Trio of benign "expected" exceptions below. */ + shutdownException = e; /* Expected. */ + } catch (MasterSyncException e) { + /* Expected, shutdown just the feeder. */ + shutdownException = e; /* Expected. */ + } catch (InterruptedException e) { + /* Expected, shutdown just the feeder. */ + shutdownException = e; /* Expected. */ + } catch (ReplicationSecurityException ure) { + shutdownException = ure; + /* dump warning if client is not authorized */ + LoggerUtils.warning(logger, repImpl, + "Unauthorized replication stream " + + "consumer " + ure.getConsumer() + + ", exception: " + ure.getMessage()); + + } catch (RuntimeException e) { + shutdownException = e; + + /* + * An internal error. Shut down the rep node as well for now + * by throwing the exception out of the thread. + * + * In future we may want to close down just the impacted + * Feeder but this is the safe course of action. + */ + LoggerUtils.severe(logger, repImpl, + "Unexpected exception: " + e.getMessage() + + LoggerUtils.getStackTrace(e)); + throw e; + } catch (Error e) { + feederOutputError = e; + repNode.getRepImpl().invalidate(e); + } finally { + if (feederOutputError != null) { + /* Propagate the error, skip cleanup. */ + throw feederOutputError; + } + LoggerUtils.info(logger, repImpl, + "Feeder output for replica " + + replicaNameIdPair.getName() + + " shutdown. feeder VLSN: " + feederVLSN + + " currentTxnEndVLSN: " + + repNode.getCurrentTxnEndVLSN()); + + /* + * Shutdown the feeder in its entirety, in case the output + * thread is the only one to notice a problem. The Replica can + * decide to re-establish the connection + */ + shutdown(shutdownException); + cleanup(); + } + } + + /** + * Write as many readily "available" log entries as possible to the + * network. The term "available" is used in the sense that these values + * are typically sitting around in the JE or FS cache especially for + * messages that are recent enough to need timely acknowledgement. The + * method tried to batch multiple entries, to minimize the number of + * network calls permitting better packet utilization and fewer network + * related interrupts, since FEEDER_TCP_NO_DELAY is set on the channel. + * + * The size of the batch is limited by one of: + * + * 1) The number of "available" trailing vlsn entries between the + * current position of the feeder and the end of the log. + * + * 2) The size of the batchWriteBuffer and + * + * 3) The time it takes to accumulate the batch without exceeding the + * minimum of: + * + * a) heartbeat interval, a larger time window typically in effect + * when the replica is not in the ack window. It effectively favors + * batching. + * + * b) (batchNs + time to first ack requiring) transaction, + * typically in effect when the replica is in the ack window and + * more timely acks are needed. + * + * This adaptive time interval strategy effectively adapts the batch + * sizes to the behavior needed of the replica at any given instant + * in time. + */ + private void writeAvailableEntries() + throws DatabaseException, InterruptedException, + IOException, MasterSyncException { + + /* + * Set the initial limit at the heartbeat and constrain it, if the + * batch contains commits that need acks. The batchLimitNS + * calculation is slightly sloppy in that it does not allow for + * disk and network latencies, but that's ok. We don't need to send + * heartbeats exactly on a heartbeat boundary since the code is + * resilient in this regard. It's the feeder timeout that's the + * main worry here; it's 30 sec by default and is set at 10s for + * KVS, so lots of built in slop. + */ + long batchLimitNs = System.nanoTime() + (heartbeatMs * 1000000l); + boolean batchNeedsAcks = false; + int nMessages = 0; + batchBuff.clear(); + + do { + OutputWireRecord record = + feederSource.getWireRecord(feederVLSN, heartbeatMs); + + masterStatus.assertSync(); + + if (record == null) { + /* Caught up -- no more records from feeder source */ + break; + } + + /* apply the filter if it is available */ + if (feederFilter != null) { + record = feederFilter.execute(record, repImpl); + if (record == null) { + /* skip the record, go to the next VLSN */ + feederVLSN = feederVLSN.getNext(); + continue; + } + } + + final long txnId = record.getCommitTxnId(); + final long commitTimestamp = record.getCommitTimeStamp(); + if (commitTimestamp != 0) { + lastCommitTimestamp = commitTimestamp; + lastCommitVLSN = record.getVLSN().getSequence(); + } + if (commitToNetwork && txnId != 0) { + deemAcked(txnId); + } + + if (isArbiterFeeder) { + feederVLSN = record.getVLSN(); + } + + validate(record); + final Message message = createMessage(txnId, record); + + if (!batchNeedsAcks && (txnId != 0)) { + final Commit commit = (Commit) message; + if (commit.getNeedsAck()) { + batchNeedsAcks = true; + /* Tighten the time constraints if needed. */ + final long ackLimitNs = System.nanoTime() + batchNs; + batchLimitNs = ackLimitNs < batchLimitNs ? + ackLimitNs : batchLimitNs; + } + } + assert TestHookExecute.doHookIfSet(writeMessageHook, message); + + nMessages = protocol.bufferWrite(feederReplicaChannel, + batchBuff, + ++nMessages, + message); + + feederVLSN = feederVLSN.getNext(); + } while ((testDelayMs == 0) && /* Don't batch if set by test. */ + (vlsnIndex.getLatestAllocatedVal() >= + feederVLSN.getSequence()) && + ((System.nanoTime() - batchLimitNs) < 0)) ; + + if (batchBuff.position() == 0) { + /* No entries -- timed out waiting for one. */ + return; + } + + /* + * We have collected the largest possible batch given the + * batching constraints, flush it out. + */ + protocol.flushBufferedWrites(feederReplicaChannel, + batchBuff, + nMessages); + } + + /** + * Sends a heartbeat message, if we have exceeded the heartbeat + * interval. + * + * @throws IOException + */ + private void sendHeartbeat() + throws IOException { + + long now = System.currentTimeMillis(); + long interval = now - lastHeartbeatTime; + + if (interval <= heartbeatMs) { + return; + } + + final VLSN vlsn = repNode.getCurrentTxnEndVLSN(); + writeMessage(protocol.new Heartbeat(now, vlsn.getSequence()), + feederReplicaChannel); + lastHeartbeatTime = now; + + if (isArbiterFeeder) { + return; + } + + /* Record the most recent transaction end or clear */ + if (lastCommitTimestamp != 0) { + lastHeartbeatCommitTimestamp = lastCommitTimestamp; + lastHeartbeatCommitVLSN = lastCommitVLSN; + } else { + lastHeartbeatCommitTimestamp = 0; + lastHeartbeatCommitVLSN = 0; + } + + final long lag = vlsn.getSequence() - feederVLSN.getSequence(); + if (nMaxReplicaLag.setMax(lag)) { + nMaxReplicaLagName.set(replicaNameIdPair.getName()); + } + } + + @Override + protected int initiateSoftShutdown() { + + /* + * Provoke an I/O exception that will cause the output thread to + * exit. + */ + RepUtils.shutdownChannel(feederReplicaChannel); + return repNode.getThreadWaitInterval(); + } + + /** + * Converts a log entry into a specific Message to be sent out by the + * Feeder. + * + * @param txnId > 0 if the entry is a LOG_TXN_COMMIT + * + * @return the Message representing the entry + * + * @throws DatabaseException + */ + private Message createMessage(long txnId, + OutputWireRecord wireRecord) + + throws DatabaseException { + + /* A vanilla entry */ + if (txnId == 0) { + return protocol.new Entry(wireRecord); + } + + boolean needsAck; + + MasterTxn ackTxn = repNode.getFeederTxns().getAckTxn(txnId); + SyncPolicy replicaSync = SyncPolicy.NO_SYNC; + if (ackTxn != null) { + ackTxn.stampRepWriteTime(); + long messageTransferMs = ackTxn.messageTransferMs(); + totalTransferDelay += messageTransferMs; + if (messageTransferMs > transferLoggingThresholdMs) { + final String message = + String.format("Feeder for: %s, Txn: %,d " + + " log to rep stream time %,dms." + + " Total transfer time: %,dms.", + replicaNameIdPair.getName(), + txnId, messageTransferMs, + totalTransferDelay); + LoggerUtils.info(logger, repImpl, message); + } + + /* + * Only request an acknowledgment if we are not committing to + * the network and DurabilityQuorum says the acknowledgment + * qualifies + */ + needsAck = !commitToNetwork && + repNode.getDurabilityQuorum().replicaAcksQualify( + replicaNode); + replicaSync = ackTxn.getCommitDurability().getReplicaSync(); + } else { + + /* + * Replica is catching up. Specify the weakest and leave it + * up to the replica. + */ + needsAck = false; + replicaSync = SyncPolicy.NO_SYNC; + } + + return protocol.new Commit(needsAck, replicaSync, wireRecord); + } + + /** + * Sanity check the outgoing record. + */ + private void validate(OutputWireRecord record) { + + /* Check that we've fetched the right message. */ + if (!record.getVLSN().equals(feederVLSN)) { + throw EnvironmentFailureException.unexpectedState + ("Expected VLSN:" + feederVLSN + " log entry VLSN:" + + record.getVLSN()); + } + + if (!repImpl.isRepConverted()) { + assert record.verifyNegativeSequences("node=" + nameIdPair); + } + } + + @Override + protected Logger getLogger() { + return logger; + } + } + + private void deemAcked(long txnId) { + final TxnInfo txnInfo = + repNode.getFeederTxns().noteReplicaAck(replicaNode, + txnId); + if (txnInfo == null) { + /* Txn did not call for an ack. */ + return; + } + final VLSN commitVLSN = txnInfo.getCommitVLSN(); + if (commitVLSN == null) { + return; + } + if (commitVLSN.compareTo(replicaTxnEndVLSN) > 0) { + replicaTxnEndVLSN = commitVLSN; + + if (txnInfo.getPendingAcks() == 0) { + /* + * We could do better for ACK all, when we get a majority of + * acks but not all of them but we don't worry about optimizing + * this failure case. The heartbeat response will correct it. + */ + repNode.updateDTVLSN(replicaTxnEndVLSN.getSequence()); + } + } + caughtUp = true; + adviseMasterTransferProgress(); + } + + /** + * Defines the handler for the RepNode thread. The handler invalidates the + * environment by ensuring that an EnvironmentFailureException is in place. + * + * The handler communicates the cause of the exception back to the + * FeederManager's thread by setting the repNodeShutdownException and then + * interrupting the FM thread. The FM thread upon handling the interrupt + * notices the exception and propagates it out in turn to other threads + * that might be coordinating activities with it. + */ + private class IOThreadsHandler implements UncaughtExceptionHandler { + + @Override + public void uncaughtException(Thread t, Throwable e) { + LoggerUtils.severe(logger, repImpl, + "Uncaught exception in feeder thread " + t + + e.getMessage() + + LoggerUtils.getStackTrace(e)); + + /* Bring the exception to the parent thread's attention. */ + feederManager.setRepNodeShutdownException + (EnvironmentFailureException.promote + (repNode.getRepImpl(), + EnvironmentFailureReason.UNCAUGHT_EXCEPTION, + "Uncaught exception in feeder thread:" + t, + e)); + + /* + * Bring it to the FeederManager's attention, it's currently the + * same as the rep node's thread. + */ + repNode.interrupt(); + } + } + + /** + * A marker exception that wraps the real exception. It indicates that the + * impact of wrapped exception can be contained, that is, it's sufficient + * cause to exit the Feeder, but does not otherwise impact the RepNode. + */ + @SuppressWarnings("serial") + public static class ExitException extends Exception { + /* + * If true, cause the remote replica to throw an EFE instead of + * retrying. + */ + final boolean failReplica; + + public ExitException(String message) { + super(message); + this.failReplica = true; + } + + public ExitException(Throwable cause, + boolean failReplica) { + super(cause); + this.failReplica = failReplica; + } + + public boolean failReplica() { + return failReplica; + } + } + + /** For debugging and exception messages. */ + public String dumpState() { + return "feederVLSN=" + feederVLSN + + " replicaTxnEndVLSN=" + replicaTxnEndVLSN + + ((replicaNode != null) && !replicaNode.getType().isElectable() ? + " nodeType=" + replicaNode.getType() : + ""); + } + + /** + * Set a test hook that will be called before sending a message using the + * protocol's write method, supplying the hook with the message as an + * argument. + */ + public void setWriteMessageHook(final TestHook writeMessageHook) { + this.writeMessageHook = writeMessageHook; + } + + /** + * Get the test hook to be called before sending a message using the + * protocol's write method. + */ + public TestHook getWriteMessageHook() { + return writeMessageHook; + } + + /** + * Set the value of the write message hook that will be used for newly + * created feeders. + */ + public static void setInitialWriteMessageHook( + final TestHook initialWriteMessageHook) { + + Feeder.initialWriteMessageHook = initialWriteMessageHook; + } + + /* Returns if feeder needs to do security checks */ + public boolean needSecurityChecks() { + + /* no check for non-secure store without an authenticator */ + if (authenticator == null) { + return false; + } + + final DataChannel channel = feederReplicaChannel.getChannel(); + return channel.isTrustCapable() && !channel.isTrusted(); + } + + /* Authenticates the replication stream consumer and checks authorization */ + private void doSecurityCheck() throws ReplicationSecurityException { + + if (!needSecurityChecks()) { + return; + } + + final long curr = System.currentTimeMillis(); + + if ((curr - authenticator.getLastCheckTimeMs()) >= securityChkIntvMs) { + checkAccess(authenticator, + feederReplicaChannel.getNameIdPair().getName()); + } + } + + /* Re-authenticates the stream consumer if applicable */ + private void processReauthenticate(Message msg) { + + /* ignore if replica is not an external node */ + if (!getReplicaNode().getType().isExternal()) { + return; + } + + /* ignore the message if no authentication is enabled */ + if (authenticator == null) { + return; + } + + final Protocol.ReAuthenticate reauth = (Protocol.ReAuthenticate)msg; + authenticator.setToken(reauth.getTokenBytes()); + checkAccess(authenticator, + feederReplicaChannel.getNameIdPair().getName()); + } + + private static void checkAccess(StreamAuthenticator authenticator, + String replicaName) { + /* both authentication and authorization */ + if (!authenticator.checkAccess()) { + final String err = + "replica " + replicaName + + "fails the security check to stream requested data."; + throw new ReplicationSecurityException(err, replicaName, null); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/FeederManager.java b/src/com/sleepycat/je/rep/impl/node/FeederManager.java new file mode 100644 index 0000000..f9e1b89 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/FeederManager.java @@ -0,0 +1,1047 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_FEEDERS_CREATED; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_FEEDERS_SHUTDOWN; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.N_MAX_REPLICA_LAG_NAME; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_DELAY_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_LAST_COMMIT_TIMESTAMP_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_LAST_COMMIT_VLSN_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_VLSN_LAG_MAP; +import static com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition.REPLICA_VLSN_RATE_MAP; +import static java.util.concurrent.TimeUnit.MINUTES; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNTracker; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.stream.MasterStatus.MasterSyncException; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition; +import com.sleepycat.je.rep.utilint.IntRunningTotalStat; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.SizeAwaitMap; +import com.sleepycat.je.rep.utilint.SizeAwaitMap.Predicate; +import com.sleepycat.je.utilint.AtomicLongMapStat; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongAvgRateMapStat; +import com.sleepycat.je.utilint.LongDiffMapStat; +import com.sleepycat.je.utilint.LongMaxZeroStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StringStat; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * FeedManager is responsible for the creation and management of the Feeders + * used to respond to connections initiated by a Replica. runfeeders() is the + * central loop that listens for replica connections and manages the lifecycle + * of individual Feeders. It's re-entered each time the node becomes a Master + * and is exited when its status changes. + * + * There is a single instance of FeederManager that is created for a + * replication node. There are many instances of Feeders per FeederManager. + * Each Feeder instance represents an instance of a connection between the node + * serving as the feeder and the replica. + * + * Note that the FeederManager and the Replica currently reuse the Replication + * node's thread of control. When we implement r2r we will need to revisit the + * thread management to provide for concurrent operation of the FeederManger + * and the Replica. + */ +final public class FeederManager { + + private final RepNode repNode; + + /* + * The queue into which the ServiceDispatcher queues socket channels for + * new Feeder instances. + */ + private final BlockingQueue channelQueue = + new LinkedBlockingQueue(); + + /* + * Feeders are stored in either nascentFeeders or activeFeeders, and not + * both. To avoid deadlock, if locking both collections, lock + * nascentFeeders first and then activeFeeders. + */ + + /* + * Nascent feeders that are starting up and are not yet active. They have + * network connections but have not synched up or completed handshakes. + * They are moved into the active feeder map, once they become active. + */ + private final Set nascentFeeders = + Collections.synchronizedSet(new HashSet()); + + /* + * The collection of active feeders currently feeding replicas. The map is + * indexed by the Replica's node name. Access to this map must be + * synchronized, since it's updated concurrently by the Feeders that share + * it. + * + * A feeder is considered to be active after it has completed the handshake + * sequence with its associated Replica. + * + * Note that the SizeAwaitMap will only wait for feeders that are connected + * to electable replicas, since those are the only ones participating in + * durability decisions. + */ + private final SizeAwaitMap activeFeeders; + + /* + * The number of active ack feeders feeding electable, i.e. acking, nodes. + */ + private final AtomicInteger ackFeeders = new AtomicInteger(0); + + /* + * The number of arbiter feeders; there can only be one currently. It's + * Atomic for consistency with ackFeeders. + */ + private final AtomicInteger arbiterFeeders = new AtomicInteger(0); + private String arbiterFeederName; + + /* + * A test delay introduced in the feeder loop to simulate a loaded master. + * The feeder waits this amount of time after each message is sent. + */ + private int testDelayMs = 0; + + /* Set to true to force a shutdown of the FeederManager. */ + AtomicBoolean shutdown = new AtomicBoolean(false); + + /* + * Non null if the replication node must be shutdown as well. This is + * typically the result of an unexpected exception in the feeder. + */ + private RuntimeException repNodeShutdownException; + + /** + * Used to manage the flushing of the DTVLSN via a null TXN commit + * when appropriate. + */ + private final DTVLSNFlusher dtvlsnFlusher; + + private final Logger logger; + + /* FeederManager statistics. */ + private final StatGroup stats; + private final IntStat nFeedersCreated; + private final IntStat nFeedersShutdown; + private final LongDiffMapStat replicaDelayMap; + private final AtomicLongMapStat replicaLastCommitTimestampMap; + private final AtomicLongMapStat replicaLastCommitVLSNMap; + private final LongDiffMapStat replicaVLSNLagMap; + private final LongAvgRateMapStat replicaVLSNRateMap; + + /* + * The maximum lag across all replicas. Atomic values or synchronization + * are not used for the shared statistic to minimize overheads and the + * resulting occasional inaccuracy in the statics is an acceptable + * tradeoff. + */ + private final LongMaxZeroStat nMaxReplicaLag; + private final StringStat nMaxReplicaLagName; + + /* The poll timeout used when accepting feeder connections. */ + public final long pollTimeoutMs ; + + /* Identifies the Feeder Service. */ + public static final String FEEDER_SERVICE = "Feeder"; + + /** The moving average period in milliseconds */ + private static final long MOVING_AVG_PERIOD_MILLIS = 10000; + + /** + * A test hook, parameterized by feeder's Name/ID pair, that delays CBVLSN + * updates if it throws an IllegalStateException. + */ + private static volatile TestHook delayCBVLSNUpdateHook; + + FeederManager(RepNode repNode) { + this.repNode = repNode; + activeFeeders = new SizeAwaitMap( + repNode.getRepImpl(), new MatchElectableFeeders()); + logger = LoggerUtils.getLogger(getClass()); + stats = new StatGroup(FeederManagerStatDefinition.GROUP_NAME, + FeederManagerStatDefinition.GROUP_DESC); + nFeedersCreated = new IntRunningTotalStat(stats, N_FEEDERS_CREATED); + nFeedersShutdown = new IntRunningTotalStat(stats, N_FEEDERS_SHUTDOWN); + nMaxReplicaLag = new LongMaxZeroStat(stats, N_MAX_REPLICA_LAG); + nMaxReplicaLagName = new StringStat(stats, N_MAX_REPLICA_LAG_NAME); + + /* + * Treat delays and lags as valid for twice the heartbeat interval, to + * allow for minor networking delays when receiving heartbeats + */ + final long validityMillis = 2 * repNode.getHeartbeatInterval(); + replicaDelayMap = + new LongDiffMapStat(stats, REPLICA_DELAY_MAP, validityMillis); + replicaLastCommitTimestampMap = + new AtomicLongMapStat(stats, REPLICA_LAST_COMMIT_TIMESTAMP_MAP); + replicaLastCommitVLSNMap = + new AtomicLongMapStat(stats, REPLICA_LAST_COMMIT_VLSN_MAP); + replicaVLSNLagMap = + new LongDiffMapStat(stats, REPLICA_VLSN_LAG_MAP, validityMillis); + replicaVLSNRateMap = new LongAvgRateMapStat( + stats, REPLICA_VLSN_RATE_MAP, MOVING_AVG_PERIOD_MILLIS, MINUTES); + + pollTimeoutMs = repNode.getConfigManager(). + getDuration(RepParams.FEEDER_MANAGER_POLL_TIMEOUT); + dtvlsnFlusher = new DTVLSNFlusher(); + } + + /** + * A SizeAwaitMap predicate that matches feeders connected to electable + * replicas. + */ + private class MatchElectableFeeders implements Predicate { + @Override + public boolean match(final Feeder value) { + + /* The replica node might be null during unit testing */ + final RepNodeImpl replica = value.getReplicaNode(); + return (replica != null) && + repNode.getDurabilityQuorum().replicaAcksQualify(replica); + } + } + + /** + * Returns the statistics associated with the FeederManager. + * + * @return the statistics + */ + public StatGroup getFeederManagerStats(StatsConfig config) { + + synchronized (stats) { + return stats.cloneGroup(config.getClear()); + } + } + + /* Get the protocol stats for this FeederManager. */ + public StatGroup getProtocolStats(StatsConfig config) { + /* Aggregate stats that have not yet been aggregated. */ + StatGroup protocolStats = + new StatGroup(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_DESC); + synchronized (activeFeeders) { + for (Feeder feeder : activeFeeders.values()) { + protocolStats.addAll(feeder.getProtocolStats(config)); + } + } + + return protocolStats; + } + + /* Reset the feeders' stats of this FeederManager. */ + public void resetStats() { + synchronized (stats) { + stats.clear(); + } + synchronized (activeFeeders) { + for (Feeder feeder : activeFeeders.values()) { + feeder.resetStats(); + } + } + } + + /** + * Accumulates statistics from a terminating feeder. + * @param feederStats stats of feeder + */ + void incStats(StatGroup feederStats) { + synchronized (stats) { + stats.addAll(feederStats); + } + } + + public int getTestDelayMs() { + return testDelayMs; + } + + public void setTestDelayMs(int testDelayMs) { + this.testDelayMs = testDelayMs; + } + + /** + * Returns the RepNode associated with the FeederManager + * @return + */ + RepNode repNode() { + return repNode; + } + + /** + * Returns the Feeder associated with the node, if such a feeder is + * currently active. + */ + public Feeder getFeeder(String nodeName) { + return activeFeeders.get(nodeName); + } + + public Feeder getArbiterFeeder() { + synchronized (activeFeeders) { + return activeFeeders.get(arbiterFeederName); + } + } + + /* + * For test use only. + */ + public Feeder putFeeder(String nodeName, Feeder feeder) { + /* + * Can't check for an electable node since the feeder object can be + * mocked for testing so it does not have a rep node. + */ + ackFeeders.incrementAndGet(); + return activeFeeders.put(nodeName, feeder); + } + + public LongMaxZeroStat getnMaxReplicaLag() { + return nMaxReplicaLag; + } + + public StringStat getnMaxReplicaLagName() { + return nMaxReplicaLagName; + } + + LongDiffMapStat getReplicaDelayMap() { + return replicaDelayMap; + } + + AtomicLongMapStat getReplicaLastCommitTimestampMap() { + return replicaLastCommitTimestampMap; + } + + AtomicLongMapStat getReplicaLastCommitVLSNMap() { + return replicaLastCommitVLSNMap; + } + + LongDiffMapStat getReplicaVLSNLagMap() { + return replicaVLSNLagMap; + } + + LongAvgRateMapStat getReplicaVLSNRateMap() { + return replicaVLSNRateMap; + } + + void setRepNodeShutdownException(RuntimeException rNSE) { + this.repNodeShutdownException = rNSE; + } + + /** + * The numbers of Replicas currently "active" with this feeder. Active + * currently means they are connected. It does not make any guarantees + * about where they are in the replication stream. They may, for example, + * be too far behind to participate in timely acks. + * + * @return the active replica count + */ + public int activeReplicaCount() { + return activeFeeders.size(); + } + + public int activeAckReplicaCount() { + return ackFeeders.get(); + } + + public int activeAckArbiterCount() { + return arbiterFeeders.get(); + } + + /** + * Returns the set of Replicas that are currently active with this feeder. + * A replica is active if it has completed the handshake sequence. + * + * @return the set of replica node names + */ + public Set activeReplicas() { + synchronized (activeFeeders) { + + /* + * Create a copy to avoid inadvertent concurrency conflicts, + * since the keySet is a view of the underlying map. + */ + return new HashSet<>(activeFeeders.keySet()); + } + } + + /** + * Returns the set of active replicas and arbiters, that are currently + * active with this feeder and are supplying acknowledgments. A replica is + * active if it has completed the handshake sequence. An Arbiter is only + * returned if it's in active arbitration. + * + * @param includeArbiters include active arbiters in the list of returned + * node names if true; exclude arbiters otherwise. + * + * @return the set of replica and if includeArbiters active arbiter node names + */ + public Set activeAckReplicas(boolean includeArbiters) { + final Set nodeNames = new HashSet(); + synchronized (activeFeeders) { + for (final Entry entry : + activeFeeders.entrySet()) { + final Feeder feeder = entry.getValue(); + + /* The replica node should be non-null for an active feeder */ + final RepNodeImpl replica = feeder.getReplicaNode(); + if (!replica.getType().isElectable()) { + continue; + } + + if (replica.getType().isArbiter()) { + if (!includeArbiters || + !feeder.getRepNode().getArbiter().isActive()) { + /* Skip the arbiter. */ + continue; + } + } + + nodeNames.add(entry.getKey()); + } + } + return nodeNames; + } + + public Map activeReplicasMap() { + synchronized (activeFeeders){ + return new HashMap(activeFeeders); + } + } + + /** + * Transitions a Feeder to being active, so that it can be used in + * considerations relating to commit acknowledgments and decisions about + * choosing feeders related to system load. + * + * @param feeder the feeder being transitioned. + */ + void activateFeeder(Feeder feeder) { + synchronized (nascentFeeders) { + synchronized (activeFeeders) { + boolean removed = nascentFeeders.remove(feeder); + if (feeder.isShutdown()) { + return; + } + assert(removed); + String replicaName = feeder.getReplicaNameIdPair().getName(); + assert(!feeder.getReplicaNameIdPair().equals(NameIdPair.NULL)); + Feeder dup = activeFeeders.get(replicaName); + if ((dup != null) && !dup.isShutdown()) { + throw EnvironmentFailureException. + unexpectedState(repNode.getRepImpl(), + feeder.getReplicaNameIdPair() + + " is present in both nascent and " + + "active feeder sets"); + } + activeFeeders.put(replicaName, feeder); + if (feeder.getReplicaNode().getType().isArbiter()) { + assert(arbiterFeeders.get() == 0); + arbiterFeeders.incrementAndGet(); + arbiterFeederName = replicaName; + + } else if (feeder.getReplicaNode().getType().isElectable()) { + ackFeeders.incrementAndGet(); + } + + MasterTransfer xfr = repNode.getActiveTransfer(); + if (xfr != null) { + xfr.addFeeder(feeder); + } + } + } + } + + /** + * Remove the feeder from the sets used to track it. Invoked when a feeder + * is shutdown. + * + * @param feeder + */ + void removeFeeder(Feeder feeder) { + assert(feeder.isShutdown()); + final String replicaName = feeder.getReplicaNameIdPair().getName(); + synchronized (nascentFeeders) { + synchronized (activeFeeders) { + nascentFeeders.remove(feeder); + if (activeFeeders.remove(replicaName) != null) { + if (arbiterFeederName != null && + arbiterFeederName.equals(replicaName)) { + arbiterFeeders.decrementAndGet(); + arbiterFeederName = null; + } else if (feeder.getReplicaNode().getType().isElectable()) { + ackFeeders.decrementAndGet(); + } + } + } + } + + final RepNodeImpl node = feeder.getReplicaNode(); + if ((node != null) && node.getType().hasTransientId()) { + repNode.removeTransientNode(node); + } + } + + /** + * Clears and shuts down the runFeeders by inserting a special EOF marker + * value into the queue. + */ + void shutdownQueue() { + if (!repNode.isShutdown()) { + throw EnvironmentFailureException.unexpectedState + ("Rep node is still active"); + } + channelQueue.clear(); + /* Add special entry so that the channelQueue.poll operation exits. */ + channelQueue.add(RepUtils.CHANNEL_EOF_MARKER); + } + + /** + * The core feeder listener loop that is run either in a Master node, or in + * a Replica that is serving as a Feeder to other Replica nodes. The core + * loop accepts connections from Replicas as they come in and establishes a + * Feeder on that connection. + * + * The loop can be terminated for one of the following reasons: + * + * 1) A change in Masters. + * + * 2) A forced shutdown, via a thread interrupt. + * + * 3) A server socket level exception. + * + * The timeout on the accept is used to ensure that the check is done at + * least once per timeout period. + */ + void runFeeders() + throws DatabaseException { + + if (shutdown.get()) { + throw EnvironmentFailureException.unexpectedState + ("Feeder manager was shutdown"); + } + Exception feederShutdownException = null; + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder manager accepting requests."); + + /* Init GlobalCBVLSN using minJEVersion in the rep group DB. */ + repNode.globalCBVLSN.init(repNode, repNode.getMinJEVersion()); + + /* This updater represents the masters's local cbvlsn, which the master + updates directly. */ + final LocalCBVLSNUpdater updater = new LocalCBVLSNUpdater( + repNode.getNameIdPair(), repNode.getNodeType(), repNode); + final LocalCBVLSNTracker tracker = repNode.getCBVLSNTracker(); + + try { + + /* + * Ensure that the Global CBVLSN is initialized for the master when + * it first comes up; it's subsequently maintained in the loop + * below. + */ + updater.updateForMaster(tracker); + + repNode.getServiceDispatcher(). + register(FEEDER_SERVICE, channelQueue); + + /* + * The Feeder is ready for business, indicate that the node is + * ready by counting down the latch and releasing any waiters. + */ + repNode.getReadyLatch().countDown(); + + while (true) { + final DataChannel feederReplicaChannel = + channelQueue.poll(pollTimeoutMs, TimeUnit.MILLISECONDS); + + if (feederReplicaChannel == RepUtils.CHANNEL_EOF_MARKER) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder manager soft shutdown."); + return; + } + + repNode.getMasterStatus().assertSync(); + if (feederReplicaChannel == null) { + if (repNode.isShutdownOrInvalid()) { + /* Timeout and shutdown request */ + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder manager forced shutdown."); + return; + } + + /* + * Simulate extending the polling period for channel input + * by delay updating the CBVLSN, to allow tests to control + * the timing. + */ + try { + assert TestHookExecute.doHookIfSet( + delayCBVLSNUpdateHook, repNode.getNameIdPair()); + } catch (IllegalStateException e) { + continue; + } + + /* + * Take this opportunity to update this node's CBVLSN The + * replicas are sending in their CBVLSNs through the + * heartbeat responses, but a master does not send any + * heartbeat responses, and needs a different path to + * update its local CBVLSN. + */ + updater.updateForMaster(tracker); + + /* Flush the DTVLSN if it's warranted. */ + dtvlsnFlusher.flush(); + + /* + * Opportunistically attempt to update minJEVersion. + * This must be done while the feeder is active. + */ + repNode.globalCBVLSN.setDefunctJEVersion(repNode); + + continue; + } + + nFeedersCreated.increment(); + try { + Feeder feeder = new Feeder(this, feederReplicaChannel); + nascentFeeders.add(feeder); + feeder.startFeederThreads(); + } catch (IOException e) { + + /* + * Indicates a feeder socket level exception. + */ + LoggerUtils.fine + (logger, repNode.getRepImpl(), + "Feeder I/O exception: " + e.getMessage()); + try { + feederReplicaChannel.close(); + } catch (IOException e1) { + LoggerUtils.fine + (logger, repNode.getRepImpl(), + "Exception during cleanup." + e.getMessage()); + } + continue; + } + } + } catch (MasterSyncException e) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Master change: " + e.getMessage()); + + feederShutdownException = new UnknownMasterException("Node " + + repNode.getRepImpl().getName() + + " is not a master anymore"); + } catch (InterruptedException e) { + if (this.repNodeShutdownException != null) { + + /* + * The interrupt was issued to propagate an exception from one + * of the Feeder threads. It's not a normal exit. + */ + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Feeder manager unexpected interrupt"); + throw repNodeShutdownException; /* Terminate the rep node */ + } + if (repNode.isShutdown()) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder manager interrupted for shutdown"); + return; + } + feederShutdownException = e; + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Feeder manager unexpected interrupt"); + } finally { + repNode.resetReadyLatch(feederShutdownException); + repNode.getServiceDispatcher().cancel(FEEDER_SERVICE); + shutdownFeeders(feederShutdownException); + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder manager exited. CurrentTxnEnd VLSN: " + + repNode.getCurrentTxnEndVLSN()); + } + } + + /** + * Shuts down all the feeders managed by the FeederManager + * + * @param feederShutdownException the exception provoking the shutdown. + */ + private void shutdownFeeders(Exception feederShutdownException) { + boolean changed = shutdown.compareAndSet(false, true); + if (!changed) { + return; + } + + try { + /* Copy sets for safe iteration in the presence of deletes.*/ + final Set feederSet; + synchronized (nascentFeeders) { + synchronized (activeFeeders) { + feederSet = new HashSet(activeFeeders.values()); + feederSet.addAll(nascentFeeders); + } + } + + for (Feeder feeder : feederSet) { + nFeedersShutdown.increment(); + feeder.shutdown(feederShutdownException); + } + } finally { + if (feederShutdownException == null) { + feederShutdownException = + new IllegalStateException("FeederManager shutdown"); + /* + * Release any threads that may have been waiting, but + * don't throw any exception + */ + activeFeeders.clear(null); + } else { + activeFeeders.clear(feederShutdownException); + } + nascentFeeders.clear(); + } + } + + /** + * Shuts down a specific feeder. It's typically done in response to the + * removal of a member from the group. + */ + public void shutdownFeeder(RepNodeImpl node) { + Feeder feeder = activeFeeders.get(node.getName()); + if (feeder == null) { + return; + } + nFeedersShutdown.increment(); + feeder.shutdown(null); + } + + /** + * Block until the required number of electable feeders/replica connections + * are established. Used for establishing durability quorums. Since this is + * counting feeder/replica connections, requiredReplicaCount does not + * include the master. + * + * In the future this could be improved by also taking account the position + * of each feeder. If a feeder is lagging far behind the master and this + * is likely to prevent commit, we may want to reject the transaction at + * the outset to reduce the number of wasted txns/aborts. A special case + * is when the replica is in out-of-disk mode and not acking at all. + */ + boolean awaitFeederReplicaConnections( + int requiredReplicaCount, long insufficientReplicasTimeout) + throws InterruptedException { + return activeFeeders.sizeAwait(requiredReplicaCount, + insufficientReplicasTimeout, + TimeUnit.MILLISECONDS); + } + + /* + * For debugging help, and for expanded exception messages, dump feeder + * related state. If acksOnly is true, only include information about + * feeders for replicas that supply acknowledgments. + */ + public String dumpState(final boolean acksOnly) { + StringBuilder sb = new StringBuilder(); + synchronized (activeFeeders) { + Set> feeds = activeFeeders.entrySet(); + if (feeds.size() == 0) { + sb.append("No feeders."); + } else { + sb.append("Current feeds:"); + for (Map.Entry feedEntry : feeds) { + final Feeder feeder = feedEntry.getValue(); + + /* + * Ignore secondary and external nodes if only want nodes + * that provide acknowledgments + */ + if (acksOnly) { + final NodeType nodeType = + feeder.getReplicaNode().getType(); + if (nodeType.isSecondary() || nodeType.isExternal()) { + continue; + } + } + sb.append("\n ").append(feedEntry.getKey()).append(": "); + sb.append(feeder.dumpState()); + } + } + } + return sb.toString(); + } + + /** + * Returns a count of the number of feeders whose replicas are counted in + * durability decisions and have acknowledged txn-end VLSNs >= the + * commitVLSN argument. + * + * @param commitVLSN the commitVLSN being checked + */ + public int getNumCurrentAckFeeders(VLSN commitVLSN) { + final DurabilityQuorum durabilityQuorum = + repNode.getDurabilityQuorum(); + int count = 0; + synchronized (activeFeeders) { + for (Feeder feeder : activeFeeders.values()) { + if ((commitVLSN.compareTo(feeder.getReplicaTxnEndVLSN()) <= 0) + && durabilityQuorum.replicaAcksQualify( + feeder.getReplicaNode())) { + count++; + } + } + return count; + } + } + + /** + * Update the Master's DTVLSN if we can conclude based upon the state of + * the replicas that the DTVLSN needs to be advanced. + * + * This method is invoked when a replica heartbeat reports a more recent + * txn VLSN. This (sometimes) redundant form of DTVLS update is useful in + * circumstances when the value could not be maintained via the usual ack + * response processing: + * + * 1) The application is using no ack transactions explicitly. + * + * 2) There were ack transaction timeouts due to network problems and the + * acks were never received or were received after the timeout had expired. + */ + public void updateDTVLSN(long heartbeatVLSN) { + + final long currDTVLSN = repNode.getDTVLSN(); + if (heartbeatVLSN <= currDTVLSN) { + /* Nothing to update, a lagging replica that's catching up */ + return; + } + + final DurabilityQuorum durabilityQuorum = repNode.getDurabilityQuorum(); + final int durableAckCount = durabilityQuorum. + getCurrentRequiredAckCount(ReplicaAckPolicy.SIMPLE_MAJORITY); + + long min = Long.MAX_VALUE; + + synchronized (activeFeeders) { + + int ackCount = 0; + for (Feeder feeder : activeFeeders.values()) { + + if (!durabilityQuorum. + replicaAcksQualify(feeder.getReplicaNode())) { + continue; + } + + final long replicaTxnVLSN = + feeder.getReplicaTxnEndVLSN().getSequence(); + + if (replicaTxnVLSN <= currDTVLSN) { + continue; + } + + if (replicaTxnVLSN < min) { + min = replicaTxnVLSN; + } + + if (++ackCount >= durableAckCount) { + /* + * If a majority of replicas have vlsns >= durable txn + * vlsn, advance the DTVLSN. + */ + repNode.updateDTVLSN(min); + return; + } + } + + /* DTVLSN unchanged. */ + return; + } + } + + /** + * Set a test hook, parameterized by feeder's Name/ID pair, that delays + * CBVLSN updates if it throws an IllegalStateException. + */ + public static void setDelayCBVLSNUpdateHook(TestHook hook) { + delayCBVLSNUpdateHook = hook; + } + + /** + * Convenience constant used by the DTVLSN flusher when committing the null + * transaction. + */ + private static TransactionConfig NULL_TXN_CONFIG = new TransactionConfig(); + static { + NULL_TXN_CONFIG.setDurability(new Durability(SyncPolicy.WRITE_NO_SYNC, + SyncPolicy.WRITE_NO_SYNC, + ReplicaAckPolicy.NONE)); + } + + /** + * Writes a null (no modifications) commit record when it detects that the + * DTVLSN is ahead of the persistent DTVLSN and needs to be updated. + * + * Note that without this mechanism, the in-memory DTVLSN would always be + * ahead of the persisted VLSN, since in general DTVLSN(vlsn) < vlsn. That + * is, the commit or abort log record containing the DTVLSN always has a + * more recent VLSN than the one it contains. + */ + private class DTVLSNFlusher { + + /** + * The number of feeder ticks for which the in-memory DTVLSN must be + * stable before it's written to the log as a null TXN. We are using + * this "tick" indirection to avoid yet another call to the clock. A + * "tick" in this context is the FEEDER_MANAGER_POLL_TIMEOUT. + */ + final int targetStableTicks; + + /** + * The number of ticks for which the DTVLSN has been stable. + */ + private int stableTicks = 0; + + public DTVLSNFlusher() { + final int heartbeatMs = repNode.getConfigManager(). + getInt(RepParams.HEARTBEAT_INTERVAL); + targetStableTicks = + (int) Math.max(1, (2 * heartbeatMs) / pollTimeoutMs); + } + + /** + * Used to track whether the DTVLSN has been stable enough to write + * out. While it's changing application commits and aborts are writing + * it out, so no need to write it here. + */ + private long stableDTVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /** + * Update each time we actually persist the DTVLSN via a null txn. It + * represents the DTVLSN that's been written out. + */ + private long persistedDTVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /* Identifies the Txn that was used to persist the DTVLSN. */ + private long nullTxnVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /** + * Persists the DTVLSN if necessary. The DTVLSN is persisted if the + * version in memory is more current than the version on disk and has + * not changed for targetStableTicks. + */ + void flush() { + final long dtvlsn = repNode.getDTVLSN(); + + if (dtvlsn == nullTxnVLSN) { + /* Don't save VLSN from null transaction as DTVLSN */ + return; + } + + if (dtvlsn > stableDTVLSN) { + stableTicks = 0; + stableDTVLSN = dtvlsn; + + /* The durable DTVLSN is being actively updated. */ + return; + } + + if (dtvlsn < stableDTVLSN) { + /* Enforce the invariant that the DTVLSN cannot decrease. */ + throw new IllegalStateException("The DTVLSN sequence cannot decrease" + + "current DTVLSN:" + dtvlsn + + " previous DTVLSN:" + stableDTVLSN); + } + + /* DTVLSN == stableDTVLSN */ + if (++stableTicks <= targetStableTicks) { + /* + * Increase the stable tick counter. it has not been stable + * long enough. + */ + return; + } + + stableTicks = 0; + + /* dtvlsn has been stable */ + if (stableDTVLSN > persistedDTVLSN) { + if (repNode.getActiveTransfer() != null) { + /* + * Don't attempt writing a transaction. while a transfer is + * in progress and txns will be blocked. + */ + LoggerUtils.info(logger, repNode.getRepImpl(), + "Skipped null txn updating DTVLSN: " + + dtvlsn + " Master transfer in progress"); + return; + } + final RepImpl repImpl = repNode.getRepImpl(); + final MasterTxn nullTxn = + MasterTxn.createNullTxn(repImpl, NULL_TXN_CONFIG, + repImpl.getNameIdPair()); + /* + * We don't want to wait for any reason, if the txn fails, + * we can try later. + */ + nullTxn.setTxnTimeout(1); + try { + nullTxn.commit(); + LoggerUtils.fine(logger, repNode.getRepImpl(), + "Persist DTVLSN: " + dtvlsn + + " at VLSN: " + nullTxn.getCommitVLSN() + + " via null transaction:" + nullTxn.getId()); + nullTxnVLSN = nullTxn.getCommitVLSN().getSequence(); + persistedDTVLSN = dtvlsn; + stableDTVLSN = persistedDTVLSN; + } catch (Exception e) { + nullTxn.abort(); + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Failed to write null txn updating DTVLSN; " + + e.getMessage()); + } + } + } + } + +} diff --git a/src/com/sleepycat/je/rep/impl/node/FeederManagerStatDefinition.java b/src/com/sleepycat/je/rep/impl/node/FeederManagerStatDefinition.java new file mode 100644 index 0000000..249b09a --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/FeederManagerStatDefinition.java @@ -0,0 +1,129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.utilint.StatDefinition.StatType.CUMULATIVE; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for HA Replay statistics. + */ +public class FeederManagerStatDefinition { + + public static final String GROUP_NAME = "FeederManager"; + public static final String GROUP_DESC = + "A feeder is a replication stream connection between a master and " + + "replica nodes."; + + public static final String N_FEEDERS_CREATED_NAME = + "nFeedersCreated"; + public static final String N_FEEDERS_CREATED_DESC = + "Number of Feeder threads since this node was started."; + public static final StatDefinition N_FEEDERS_CREATED = + new StatDefinition( + N_FEEDERS_CREATED_NAME, + N_FEEDERS_CREATED_DESC); + + public static final String N_FEEDERS_SHUTDOWN_NAME = + "nFeedersShutdown"; + public static final String N_FEEDERS_SHUTDOWN_DESC = + "Number of Feeder threads that were shut down, either because this " + + "node, or the Replica terminated the connection."; + public static final StatDefinition N_FEEDERS_SHUTDOWN = + new StatDefinition( + N_FEEDERS_SHUTDOWN_NAME, + N_FEEDERS_SHUTDOWN_DESC); + + /* Naming conflict -- use SNAME suffix in this one case. */ + public static final String N_MAX_REPLICA_LAG_SNAME = + "nMaxReplicaLag"; + public static final String N_MAX_REPLICA_LAG_DESC = + "The maximum number of VLSNs by which a replica is lagging."; + public static final StatDefinition N_MAX_REPLICA_LAG = + new StatDefinition( + N_MAX_REPLICA_LAG_SNAME, + N_MAX_REPLICA_LAG_DESC); + + public static final String N_MAX_REPLICA_LAG_NAME_NAME = + "nMaxReplicaLagName"; + public static final String N_MAX_REPLICA_LAG_NAME_DESC = + "The name of the replica with the maximal lag."; + public static final StatDefinition N_MAX_REPLICA_LAG_NAME = + new StatDefinition( + N_MAX_REPLICA_LAG_NAME_NAME, + N_MAX_REPLICA_LAG_NAME_DESC); + + public static final String REPLICA_DELAY_MAP_NAME = + "replicaDelayMap"; + public static final String REPLICA_DELAY_MAP_DESC = + "A map from replica node name to the delay, in milliseconds, between " + + "when a transaction was committed on the master and when the " + + "master learned that the change was processed on the replica, if " + + "known. Returns an empty map if this node is not the master."; + public static final StatDefinition REPLICA_DELAY_MAP = + new StatDefinition( + REPLICA_DELAY_MAP_NAME, + REPLICA_DELAY_MAP_DESC, + CUMULATIVE); + + public static final String REPLICA_LAST_COMMIT_TIMESTAMP_MAP_NAME = + "replicaLastCommitTimestampMap"; + public static final String REPLICA_LAST_COMMIT_TIMESTAMP_MAP_DESC = + "A map from replica node name to the commit timestamp of the last " + + "committed transaction that was processed on the replica, if " + + "known. Returns an empty map if this node is not the master."; + public static final StatDefinition REPLICA_LAST_COMMIT_TIMESTAMP_MAP = + new StatDefinition( + REPLICA_LAST_COMMIT_TIMESTAMP_MAP_NAME, + REPLICA_LAST_COMMIT_TIMESTAMP_MAP_DESC, + CUMULATIVE); + + public static final String REPLICA_LAST_COMMIT_VLSN_MAP_NAME = + "replicaLastCommitVLSNMap"; + public static final String REPLICA_LAST_COMMIT_VLSN_MAP_DESC = + "A map from replica node name to the VLSN of the last committed " + + "transaction that was processed on the replica, if known. Returns" + + " an empty map if this node is not the master."; + public static final StatDefinition REPLICA_LAST_COMMIT_VLSN_MAP = + new StatDefinition( + REPLICA_LAST_COMMIT_VLSN_MAP_NAME, + REPLICA_LAST_COMMIT_VLSN_MAP_DESC, + CUMULATIVE); + + public static final String REPLICA_VLSN_LAG_MAP_NAME = + "replicaVLSNLagMap"; + public static final String REPLICA_VLSN_LAG_MAP_DESC = + "A map from replica node name to the lag, in VLSNs, between the " + + "replication state of the replica and the master, if known. " + + "Returns an empty map if this node is not the master."; + public static final StatDefinition REPLICA_VLSN_LAG_MAP = + new StatDefinition( + REPLICA_VLSN_LAG_MAP_NAME, + REPLICA_VLSN_LAG_MAP_DESC, + CUMULATIVE); + + public static final String REPLICA_VLSN_RATE_MAP_NAME = + "replicaVLSNRateMap"; + public static final String REPLICA_VLSN_RATE_MAP_DESC = + "A map from replica node name to a moving average of the rate, in " + + "VLSNs per minute, that the replica is processing replication " + + "data, if known. Returns an empty map if this node is not the " + + "master."; + public static final StatDefinition REPLICA_VLSN_RATE_MAP = + new StatDefinition( + REPLICA_VLSN_RATE_MAP_NAME, + REPLICA_VLSN_RATE_MAP_DESC); +} + diff --git a/src/com/sleepycat/je/rep/impl/node/JoinGroupTimeouts.java b/src/com/sleepycat/je/rep/impl/node/JoinGroupTimeouts.java new file mode 100644 index 0000000..3f16fd6 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/JoinGroupTimeouts.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.impl.RepParams.ALLOW_UNKNOWN_STATE_ENV_OPEN; +import static com.sleepycat.je.rep.impl.RepParams.ENV_SETUP_TIMEOUT; +import static com.sleepycat.je.rep.impl.RepParams.ENV_UNKNOWN_STATE_TIMEOUT; + +import com.sleepycat.je.dbi.DbConfigManager; + +/** + * Encapsulates the handling of timeouts: ENV_SETUP_TIMEOUT and + * ENV_UNKNOWN_STATE_TIMEOUT, used when a replicated environment handle is + * opened and a node joins the group. + *

        + * There are three timeouts that are relevant at the time a Replica joins a + * group. They are listed below in the order in which each is applied. + * + * 1) The ENV_UNKNOWN_STATE_TIMEOUT which is basically an election timeout. If + * set and an election is not concluded in this time period, the environment + * handle is opened in the unknown state. + * + * 2) The ENV_SETUP_TIMEOUT. This timeout determines the maximum amount of time + * allowed to hold an election and sync up with a master if the joins as a + * replica. + * + * 3) The consistency timeout as determined by the consistency policy in the + * event that the node joins as a replica. + * + * The first two timeouts are managed by this class. RepNode.joinGroup uses the + * timeouts supplied by the getTimeout() method to wait for each timeout if + * both are specified. + * + * joinGroup first waits up to the unknown state timeout for an election to be + * concluded. If the node is not in the unknown state at the timeout, + * it advances to the env setup timeout by invoking setSetupTimeout() and + * proceeds to wait up to this timeout for the syncup activity to complete. + * + */ +class JoinGroupTimeouts { + + /* + * The timeout associated with opening a handle in the unknown state. It's + * max int if the handle should not be opened in that state. + */ + private final int unknownStateTimeout; + + /* The timeout associated with the total setup of the handle. */ + private final int setupTimeout; + + /* + * The timeout that's currently active, it can be either of the two values + * values above. + */ + private int timeout; + + /* used as the basis for determining time limits from timeouts. */ + private final long start = System.currentTimeMillis(); + + JoinGroupTimeouts(DbConfigManager configManager) { + setupTimeout = configManager.getDuration(ENV_SETUP_TIMEOUT); + + if (configManager.getDuration(ENV_UNKNOWN_STATE_TIMEOUT) == 0) { + /* Support deprecated usage. */ + final boolean allowUnknownStateEnv = + configManager.getBoolean(ALLOW_UNKNOWN_STATE_ENV_OPEN); + unknownStateTimeout = + (allowUnknownStateEnv ? setupTimeout : Integer.MAX_VALUE); + } else { + unknownStateTimeout = configManager. + getDuration(ENV_UNKNOWN_STATE_TIMEOUT); + if (unknownStateTimeout > setupTimeout) { + String message = String.format( + " The timeout ENV_UNKNOWN_STATE_TIMEOUT(%,d ms)" + + " exceeds the timeout ENV_SETUP_TIMEOUT(%,d ms)", + unknownStateTimeout, + setupTimeout); + + throw new IllegalArgumentException(message); + } + } + /* Set the first timeout. */ + timeout = Math.min(unknownStateTimeout, setupTimeout); + } + + /** + * Returns the currently active timeout, adjusted for the time that has + * already elapsed. + */ + int getTimeout() { + return Math.max(timeout - (int)(System.currentTimeMillis() - start), + 0); + } + + /** + * Returns the setup timeout + */ + public int getSetupTimeout() { + return setupTimeout; + } + + /** + * Returns true if the currently active timeout is the one for the + * transition out of the unknown state. + */ + boolean timeoutIsForUnknownState() { + return timeout == unknownStateTimeout; + } + + /** + * Set the currently active timeout to be the env setup timeout. + */ + void setSetupTimeout() { + timeout = setupTimeout; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/MasterTransfer.java b/src/com/sleepycat/je/rep/impl/node/MasterTransfer.java new file mode 100644 index 0000000..edb00ac --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/MasterTransfer.java @@ -0,0 +1,378 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.rep.MasterTransferFailureException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.elections.Elections; +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Proposer.WinningProposal; +import com.sleepycat.je.rep.elections.TimebasedProposalGenerator; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.utilint.RepUtils.ExceptionAwareBlockingQueue; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * A Master Transfer operation. + *

        + * Each Master Transfer operation uses a separate instance of this class. + * There is usually no more than one instance in the lifetime of a master node, + * because if the transfer succeeds, the old master node environment becomes + * invalid and must be closed. However, if an operation times out, another + * operation can try again later. Or, a second operation can "forcibly" + * supersede an existing operation in progress. + * + * @see ReplicatedEnvironment#transferMaster(Set, int, TimeUnit) + */ +public class MasterTransfer { + final private Set replicas; + final private long startTimeMs; + final private long timeout; + final private long deadlineTimeMs; + final private RepNode repNode; + final private Map readyReplicas; + volatile private CountDownLatch blocker; + + /** + * Flag that indicates we've reached the point where we're committed to + * proceeding with the transfer: we've completed phase 2, chosen a winner, + * and are now notifying everyone of the new (fake) election result. Once + * we get to this point, we can't allow a new Master Transfer operation + * attempt to supersede us. + */ + volatile private boolean done; + + /** + * Queue which communicates key events of interest from Feeders regarding + * the progress of their efforts to catch up with the end of the log. The + * existence of this object signifies that (1) the owning Master Transfer + * object is viable (hasn't been superseded by a later, "forcing" MT + * operation); and (2) we have not yet discovered a winner. Once we have + * chosen a winner we disallow any future attempt to supersede this + * operation. + * + * @see #abort + * @see RepNode#setUpTransfer + */ + private ExceptionAwareBlockingQueue eventQueue; + + final private Logger logger = LoggerUtils.getLogger(getClass()); + + MasterTransfer(Set replicas, long timeout, RepNode repNode) { + this.replicas = replicas; + this.timeout = timeout; + startTimeMs = System.currentTimeMillis(); + deadlineTimeMs = startTimeMs + timeout; + this.repNode = repNode; + + LoggerUtils.info(logger, repNode.getRepImpl(), + "Start Master Transfer for " + + timeout + " msec, targeting: " + + Arrays.toString(replicas.toArray())); + readyReplicas = new HashMap(replicas.size()); + eventQueue = new ExceptionAwareBlockingQueue + (repNode.getRepImpl(), new VLSNProgress(null, null)); + } + + /** + * Aborts an existing, in-progress Master Transfer operation, if it hasn't + * reached the point of no return. + * + * @return true, if the operation was cancelled, false if it's too late for + * a clean cancellation. + */ + synchronized public boolean abort(Exception e) { + assert (e != null); + if (done) { + return false; + } + final ExceptionAwareBlockingQueue queue = getQueue(); + if (queue != null) { + queue.releasePoll(e); + } + return true; + } + + /** + * Accepts a Progress event and posts it to our queue for processing by the + * Master Transfer operation thread. + */ + synchronized void noteProgress(VLSNProgress p) { + final ExceptionAwareBlockingQueue queue = getQueue(); + if (queue != null) { + queue.add(p); + } + } + + /** + * Informs this Master Transfer operation that the named Feeder is shutting + * down, because its replica connection has been lost. This of course + * means that we can't expect this Feeder to soon catch up with our VLSN. + * In particular, if we have reached Phase 2 on the strength of the + * progress of only this one Feeder, then we must revert back to Phase 1. + *

        + * Actually all we do here is post a special kind of "progress" event to + * our queue; it gets processed for real in the {@code chooseReplica()} + * thread, along with all the other events. + * + * @see #chooseReplica + */ + void giveUp(String replicaNodeName) { + noteProgress(VLSNProgress.makeFeederDeathEvent(replicaNodeName)); + } + + synchronized private ExceptionAwareBlockingQueue getQueue() { + return eventQueue; + } + + /** + * Performs the core processing of a Master Transfer operation. We first + * wait for one of the candidate target replica nodes to become completely + * synchronized. We then send a message to all nodes in the group + * (including ourselves) announcing which node is to become the new + * master. + *

        + * If the operation fails we release any transaction commit/abort threads + * that may have been blocked during phase 2 of the wait. However, in the + * success case the release of any such transaction threads is done as a + * natural by-product of the transition of the environment from master to + * replica status. + */ + String transfer() { + try { + String result = chooseReplica(); + if (result == null) { + throw new MasterTransferFailureException(getTimeoutMsg()); + } + done = true; + synchronized (this) { + eventQueue = null; + } + annouceWinner(result); + return result; + } catch (MasterTransferFailureException e) { + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Master Transfer operation failed: " + e); + throw e; + } catch (InterruptedException ie) { + throw new ThreadInterruptedException(repNode.getRepImpl(), ie); + } finally { + eventQueue = null; + if (!done && blocker != null) { + blocker.countDown(); + } + } + } + + /** + * Prepares for a Master Transfer operation by waiting for one of the + * nominated candidate target replica nodes to catch up with the master, + * in two phases, as described in + * {@link ReplicatedEnvironment#transferMaster(Set, int, TimeUnit)}. + *

        + * This method works by observing events generated by Feeder threads and + * passed to us via a queue. + * + * @return the node name of the first replica to complete phase 2 of the + * preparation, or {@code null} if the operation times out. + */ + private String chooseReplica() throws InterruptedException { + final ExceptionAwareBlockingQueue queue = getQueue(); + if (queue == null) { + return null; + } + final FeederManager feederManager = repNode.feederManager(); + final Map activeReplicas = + feederManager.activeReplicasMap(); + for (String nodeName : replicas) { + final Feeder feeder = activeReplicas.get(nodeName); + if (feeder != null) { + feeder.setMasterTransfer(this); + } + } + + /* + * Phase 1 could last a long time, if all of our candidate replicas are + * still catching up (or not even connected); so we allow new + * transactions to be written. But once we get to phase 2 we block + * commit/abort operations for a final (quicker) catch-up. Thus we can + * tell whether we're in phase 2 by whether we have a non-null blocker. + */ + String result = null; + for (;;) { + final long pollTimeout = + deadlineTimeMs - System.currentTimeMillis(); + final VLSNProgress event = + queue.pollOrException(pollTimeout, TimeUnit.MILLISECONDS); + if (event == null) { + return null; + } + final VLSN endVLSN = repNode.getCurrentTxnEndVLSN(); + + Level level = Level.INFO; + if (event.isFeederDeathEvent()) { + readyReplicas.remove(event.replicaNodeName); + if (blocker != null && readyReplicas.isEmpty()) { + + /* + * Must revert back to phase 1. The latch will still + * exist, because we've passed it to repImpl; and this is + * exactly what we want, so that blocked txns can proceed, + * and new ones won't get blocked for now. + */ + blocker.countDown(); + blocker = null; + } + } else if (blocker == null) { /* phase 1 */ + assert readyReplicas.isEmpty(); + readyReplicas.put(event.replicaNodeName, event.vlsn); + blocker = new CountDownLatch(1); + repNode.getRepImpl().blockTxnCompletion(blocker); + /* + * >= comparison, here and below, since currentTxnEndVLSN can + * lag the latest txnEndVLSN actually written to the log. + */ + if (event.getVLSN().compareTo(endVLSN) >= 0) { + result = event.replicaNodeName; + } + } else { /* phase 2 */ + if (event.getVLSN().compareTo(endVLSN) >= 0) { + result = event.replicaNodeName; + } else { + + /* + * The present VLSN does not match the ultimate target + * VLSN, so we're not done yet. Since there could be a few + * events of this type, only log all of them at the + * {@code FINE} level. + */ + readyReplicas.put(event.replicaNodeName, event.vlsn); + level = Level.FINE; + } + } + + /* Emit log message after the fact */ + LoggerUtils.logMsg(logger, repNode.getRepImpl(), level, + "Master Transfer progress: " + + event.replicaNodeName + ", " + event.vlsn + + ", phase: " + (blocker == null ? 1 : 2) + + ", endVLSN: " + endVLSN); + if (result != null) { + return result; + } + } + } + + /** + * Broadcasts a fake election result message. This does a couple things: + * (1) prods the chosen replica to become the new master; and (2) forces + * the old master to notice and shut down with a master-replica transition + * exception. + */ + private void annouceWinner(String nodeName) { + final RepGroupImpl group = repNode.getGroup(); + RepNodeImpl node = group.getNode(nodeName); + MasterValue newMaster = new MasterValue + (node.getSocketAddress().getHostName(), + node.getSocketAddress().getPort(), + node.getNameIdPair()); + Proposal proposal = + new TimebasedProposalGenerator().nextProposal(); + final Elections elections = repNode.getElections(); + elections.getLearner(); + Learner.informLearners + (group.getAllLearnerSockets(), + new WinningProposal(proposal, newMaster, null), + elections.getProtocol(), + elections.getThreadPool(), + elections.getLogger(), + repNode.getRepImpl(), + null); + } + + /** + * Enables the given {@code Feeder} to contribute to this Master Transfer + * operation. Called from the {@code FeederManager} when a new {@code + * Feeder} is established during the time when a Master Transfer operation + * is already in progress. + */ + void addFeeder(Feeder f) { + String name = f.getReplicaNameIdPair().getName(); + if (replicas.contains(name)) { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Add node " + name + + " to existing Master Transfer"); + f.setMasterTransfer(this); + } + } + + long getStartTime() { + return startTimeMs; + } + + /** + * Generates a detailed error message for the case when the operation times + * out. + */ + private String getTimeoutMsg() { + return "Timed out: started at " + new Date(startTimeMs) + + " for " + timeout + " milliseconds\n" + + "master's VLSN: " + repNode.getCurrentTxnEndVLSN() + + repNode.dumpAckFeederState(); + } + + /** + * An event of interest in the pursuit of our goal of completing the Master + * Transfer. Generally it indicates that the named replica has received + * and processed the transaction identified by the given VLSN. As a + * special case, an event representing the death of a Feeder is represented + * by a {@code null} VLSN. + */ + static class VLSNProgress { + final VLSN vlsn; + final String replicaNodeName; + VLSNProgress(VLSN vlsn, String replicaNodeName) { + this.vlsn = vlsn; + this.replicaNodeName = replicaNodeName; + } + + static VLSNProgress makeFeederDeathEvent(String nodeName) { + return new VLSNProgress(null, nodeName); + } + + VLSN getVLSN() { + assert vlsn != null; + return vlsn; + } + + boolean isFeederDeathEvent() { + return vlsn == null; + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/MonitorEventManager.java b/src/com/sleepycat/je/rep/impl/node/MonitorEventManager.java new file mode 100644 index 0000000..ae253be --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/MonitorEventManager.java @@ -0,0 +1,144 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.rep.elections.Utils; +import com.sleepycat.je.rep.elections.Utils.FutureTrackingCompService; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.monitor.MonitorService; +import com.sleepycat.je.rep.monitor.Protocol.GroupChange; +import com.sleepycat.je.rep.monitor.Protocol.JoinGroup; +import com.sleepycat.je.rep.monitor.Protocol.LeaveGroup; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The class for firing MonitorChangeEvents. + * + * Each time when there happens a MonitorChangeEvents, it refreshes the group + * information so that it can send messages to current monitors. + */ +public class MonitorEventManager { + + /* The time when this node joins the group, 0 if it hasn't joined yet. */ + private long joinTime = 0L; + + private final RepNode repNode; + + public MonitorEventManager(RepNode repNode) { + this.repNode = repNode; + } + + /* Return the time when JoinGroupEvent for this RepNode fires. */ + public long getJoinTime() { + return joinTime; + } + + /* Disable the LeaveGroupEvent because the node is abnormally closed. */ + public void disableLeaveGroupEvent() { + joinTime = 0L; + } + + /** + * Fire a GroupChangeEvent. + */ + public void notifyGroupChange(String nodeName, GroupChangeType opType) + throws DatabaseException { + + RepGroupImpl repGroup = repNode.getGroup(); + GroupChange changeEvent = + getProtocol(repGroup).new GroupChange(repGroup, nodeName, opType); + refreshMonitors(repGroup, changeEvent); + } + + /** + * Fire a JoinGroupEvent. + */ + public void notifyJoinGroup() + throws DatabaseException { + + if (joinTime > 0) { + /* Already notified. */ + return; + } + + joinTime = System.currentTimeMillis(); + RepGroupImpl repGroup = repNode.getGroup(); + JoinGroup joinEvent = + getProtocol(repGroup).new JoinGroup(repNode.getNodeName(), + repNode.getMasterName(), + joinTime); + refreshMonitors(repGroup, joinEvent); + } + + /** + * Fire a LeaveGroupEvent and wait for responses. + */ + public void notifyLeaveGroup(LeaveReason reason) + throws DatabaseException, InterruptedException { + + if (joinTime == 0) { + /* No join event, therefore no matching leave event. */ + return; + } + + RepGroupImpl repGroup = repNode.getGroup(); + LeaveGroup leaveEvent = + getProtocol(repGroup).new LeaveGroup(repNode.getNodeName(), + repNode.getMasterName(), + reason, + joinTime, + System.currentTimeMillis()); + final FutureTrackingCompService compService = + refreshMonitors(repGroup, leaveEvent); + + /* Wait for the futures to be evaluated. */ + Utils.checkFutures + (compService, 10, TimeUnit.SECONDS, repNode.getLogger(), + repNode.getRepImpl(), null); + } + + /* Create a monitor protocol. */ + private com.sleepycat.je.rep.monitor.Protocol + getProtocol(RepGroupImpl repGroup) { + + return new com.sleepycat.je.rep.monitor.Protocol + (repGroup.getName(), NameIdPair.NOCHECK, null, + repNode.getRepImpl().getChannelFactory()); + } + + /* Refresh all the monitors with specified message. */ + private FutureTrackingCompService + refreshMonitors(RepGroupImpl repGroup, + RequestMessage requestMessage) { + Set monitors = repGroup.getAllMonitorSockets(); + if (monitors.size() > 0) { + LoggerUtils.info(repNode.getLogger(), repNode.getRepImpl(), + "Refreshed " + monitors.size() + " monitors."); + } + /* Broadcast and forget. */ + return Utils.broadcastMessage(monitors, + MonitorService.SERVICE_NAME, + requestMessage, + repNode.getElections().getThreadPool()); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/NameIdPair.java b/src/com/sleepycat/je/rep/impl/node/NameIdPair.java new file mode 100644 index 0000000..41d8635 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/NameIdPair.java @@ -0,0 +1,184 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.utilint.BinaryProtocol; + +/** + * The public name and internal id pair used to uniquely identify a node + * within a replication group. + */ +public class NameIdPair implements Serializable { + private static final long serialVersionUID = 1L; + + private final String name; + private int id; + + /* Constant to denote an unknown NODE_ID */ + public final static int NULL_NODE_ID = -1; + + /* The node ID used to bypass group membership checks. */ + public static final int NOCHECK_NODE_ID = Integer.MIN_VALUE; + + public static final NameIdPair NULL = + new ReadOnlyNameIdPair("NullNode", NameIdPair.NULL_NODE_ID); + + public static final NameIdPair NOCHECK = + new ReadOnlyNameIdPair("NoCheckNode", NOCHECK_NODE_ID); + + public NameIdPair(String name, int id) { + if (name == null) { + throw EnvironmentFailureException.unexpectedState + ("name argument was null"); + } + this.name = name; + this.id = id; + } + + /** + * Constructor for a pair where the node ID is as yet unknown. + */ + public NameIdPair(String name) { + this(name, NULL.getId()); + } + + /** Serializes from a ByteBuffer for a given protocol. */ + public static NameIdPair deserialize(ByteBuffer buffer, + BinaryProtocol protocol) { + return new NameIdPair(protocol.getString(buffer), + LogUtils.readInt(buffer)); + } + + /** Serializes from a TupleInput after retrieving from storage. */ + public static NameIdPair deserialize(TupleInput buffer) { + return new NameIdPair(buffer.readString(), buffer.readInt()); + } + + /** Serializes into a ByteBuffer for a given protocol. */ + public void serialize(ByteBuffer buffer, BinaryProtocol protocol) { + protocol.putString(name, buffer); + LogUtils.writeInt(buffer, id); + } + + /** Serializes into a TupleOutput before storing. */ + public void serialize(TupleOutput buffer) { + buffer.writeString(name); + buffer.writeInt(id); + } + + /** Returns serialized for a given protocol. */ + public int serializedSize(BinaryProtocol protocol) { + return protocol.stringSize(name) + 4; + } + + /** + * Returns the application assigned name + */ + public String getName() { + return name; + } + + @Override + public String toString() { + return name + "(" + id + ")"; + } + + /** + * Returns the internally generated compact id. + */ + public int getId() { + return id; + } + + public boolean hasNullId() { + return this.id == NameIdPair.NULL_NODE_ID; + } + + public void setId(int id) { + setId(id, true); + } + + public void setId(int id, boolean checkId) { + if (checkId && (id != this.id) && ! hasNullId()) { + throw EnvironmentFailureException.unexpectedState + ("Id was already not null: " + this.id); + } + this.id = id; + } + + public void revertToNull() { + this.id = NameIdPair.NULL_NODE_ID; + } + + public void update(NameIdPair other) { + if (!name.equals(other.getName())) { + throw EnvironmentFailureException.unexpectedState + ("Pair name mismatch: " + name + " <> " + other.getName()); + } + setId(other.getId()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + id; + result = prime * result + ((name == null) ? 0 : name.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof NameIdPair)) { + return false; + } + NameIdPair other = (NameIdPair) obj; + if (id != other.id) { + return false; + } + if (!name.equals(other.name)) { + throw EnvironmentFailureException.unexpectedState + ("Ids: " + id + " were equal." + " But names: " + name + ", " + + other.name + " weren't!"); + } + return true; + } + + private static class ReadOnlyNameIdPair extends NameIdPair { + private static final long serialVersionUID = 1L; + + public ReadOnlyNameIdPair(String name, int id) { + super(name, id); + } + + @Override + public void setId(int id) { + throw EnvironmentFailureException.unexpectedState + ("Read only NameIdPair"); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/NodeState.java b/src/com/sleepycat/je/rep/impl/node/NodeState.java new file mode 100644 index 0000000..751c319 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/NodeState.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * NodeState encapsulates the current replicator state, and the ability to wait + * for state transition and fire state change notifications. + */ +public class NodeState { + /* The rep impl whose state is being tracked. */ + private final RepImpl repImpl; + + /* The application registered state change listener for this node. */ + private StateChangeListener stateChangeListener = null; + + /* The state change event that resulted in the current state. */ + private StateChangeEvent stateChangeEvent = null; + private final AtomicReference currentState; + private final Logger logger; + private final NameIdPair nameIdPair; + + public NodeState(NameIdPair nameIdPair, + RepImpl repImpl) { + + currentState = new AtomicReference + (ReplicatedEnvironment.State.DETACHED); + this.nameIdPair = nameIdPair; + this.repImpl = repImpl; + logger = LoggerUtils.getLogger(getClass()); + } + + synchronized public + void setChangeListener(StateChangeListener stateChangeListener){ + this.stateChangeListener = stateChangeListener; + } + + synchronized public StateChangeListener getChangeListener() { + return stateChangeListener; + } + + /** + * Change to a new node state and release any threads waiting for a state + * transition. + */ + synchronized public void changeAndNotify(ReplicatedEnvironment.State state, + NameIdPair masterNameId) { + + ReplicatedEnvironment.State newState = state; + ReplicatedEnvironment.State oldState = currentState.getAndSet(state); + stateChangeEvent = new StateChangeEvent(state, masterNameId); + + LoggerUtils.info(logger, repImpl, + "node:" + masterNameId + + " state change from " + oldState + " to " + newState); + + if (stateChangeListener != null) { + try { + stateChangeListener.stateChange(stateChangeEvent); + } catch (Exception e) { + LoggerUtils.severe(logger, repImpl, + "State Change listener exception" + + e.getMessage()); + throw new EnvironmentFailureException + (repImpl, EnvironmentFailureReason.LISTENER_EXCEPTION, e); + } + } + + /* Make things obvious in thread dumps */ + Thread.currentThread().setName(currentState + " " + nameIdPair); + } + + synchronized public ReplicatedEnvironment.State getRepEnvState() { + return currentState.get(); + } + + synchronized public StateChangeEvent getStateChangeEvent() { + return stateChangeEvent; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/RepNode.java b/src/com/sleepycat/je/rep/impl/node/RepNode.java new file mode 100644 index 0000000..39e1d4a --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/RepNode.java @@ -0,0 +1,2721 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.DETACHED; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; +import static com.sleepycat.je.rep.impl.RepParams.DBTREE_CACHE_CLEAR_COUNT; +import static com.sleepycat.je.rep.impl.RepParams.ENV_CONSISTENCY_TIMEOUT; +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static com.sleepycat.je.rep.impl.RepParams.HEARTBEAT_INTERVAL; +import static com.sleepycat.je.rep.impl.RepParams.IGNORE_SECONDARY_NODE_ID; +import static com.sleepycat.je.rep.impl.RepParams.NODE_TYPE; +import static com.sleepycat.je.rep.impl.RepParams.RESET_REP_GROUP_RETAIN_UUID; +import static com.sleepycat.je.rep.impl.RepParams.SECURITY_CHECK_INTERVAL; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.BitSet; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; +import java.util.Timer; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileSet; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.StartupTracker.Phase; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.rep.AppStateMonitor; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MasterTransferFailureException; +import com.sleepycat.je.rep.MemberActiveException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaStateException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironmentStats; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.RestartRequiredException; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.arbitration.Arbiter; +import com.sleepycat.je.rep.elections.Elections; +import com.sleepycat.je.rep.elections.ElectionsConfig; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.TimebasedProposalGenerator; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateResponse; +import com.sleepycat.je.rep.impl.BinaryNodeStateService; +import com.sleepycat.je.rep.impl.GroupService; +import com.sleepycat.je.rep.impl.MinJEVersionUnsupportedException; +import com.sleepycat.je.rep.impl.NodeStateService; +import com.sleepycat.je.rep.impl.PointConsistencyPolicy; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepGroupImpl.NodeConflictException; +import com.sleepycat.je.rep.impl.RepGroupProtocol; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupResponse; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.cbvlsn.CleanerBarrierState; +import com.sleepycat.je.rep.impl.node.cbvlsn.GlobalCBVLSN; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNTracker; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.stream.FeederTxns; +import com.sleepycat.je.rep.stream.MasterChangeListener; +import com.sleepycat.je.rep.stream.MasterStatus; +import com.sleepycat.je.rep.stream.MasterSuggestionGenerator; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.txn.ReplayTxn; +import com.sleepycat.je.rep.util.AtomicLongMax; +import com.sleepycat.je.rep.util.ldiff.LDiffService; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; +import com.sleepycat.je.rep.utilint.RepUtils.ExceptionAwareCountDownLatch; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * Represents a replication node. This class is the locus of operations that + * manage the state of the node, master, replica, etc. Once the state of a node + * has been established the thread of control passes over to the Replica or + * FeederManager instances. + * + * Note that both Feeders and the Replica instance may be active in future when + * we support r2r replication, in addition to m2r replication. For now however, + * either the FeederManager is active, or the Replica is and the same common + * thread control can be shared between the two. + */ +public class RepNode extends StoppableThread { + + /* + * The unique node name and internal id that identifies the node within + * the rep group. There is a canonical instance of this that's updated + * when the node joins the group. + */ + private final NameIdPair nameIdPair; + + /* The service dispatcher used by this replication node. */ + private final ServiceDispatcher serviceDispatcher; + + /* The election instance for this node */ + private Elections elections; + + /* The locus of operations when the node is a replica. */ + private final Replica replica; + + /* Used when the node is a feeder. */ + private FeederManager feederManager; + + /* + * The status of the Master. Note that this is the leading state as + * communicated to this node via the Listener. The node itself may not as + * yet have responded to this state change announced by the Listener. That + * is, nodeState, may reflect a different state until the transition to + * this state has been completed. + */ + private final MasterStatus masterStatus; + private final MasterChangeListener changeListener; + private final MasterSuggestionGenerator suggestionGenerator; + + /* + * Represents the application visible state of this node. It may lag the + * state as described by masterStatus. + */ + private final NodeState nodeState; + + private final RepImpl repImpl; + + /* The encapsulated internal replication group database. */ + final RepGroupDB repGroupDB; + + /* + * The latch used to indicate that the node has a well defined state as a + * Master or Replica and has finished the node-specific initialization that + * will permit it to function immediately in that capacity. + * + * For a Master it means that it's ready to start accepting connections + * from Replicas. + * + * For a Replica, it means that it has established a connection with a + * Feeder, completed the handshake process that validates it as being a + * legitimate member of the group, established a sync point, and is ready + * to start replaying the replication stream. + */ + private volatile ExceptionAwareCountDownLatch readyLatch = null; + + /* + * Latch used to freeze txn commit VLSN advancement during an election. + */ + private final CommitFreezeLatch vlsnFreezeLatch = new CommitFreezeLatch(); + + /** + * Describes the nodes that form the group. This information is dynamic + * it's initialized at startup and subsequently as a result of changes + * made either directly to it, when the node is a master, or via the + * replication stream, when it is a Replica. + * + * Always use the setGroup() method to set this iv, so that needsAck in + * particular is updated in unison. + */ + volatile private RepGroupImpl group; + + /** + * Acks needed. Determines whether durability needs acknowledgments from + * other nodes, that is, the rep group has more than one data node that's + * also electable. + * + * Only update via the setGroup method. + */ + volatile private boolean needsAcks = false; + + /* + * Determines the election policy to use when the node holds its very first + * elections + */ + private QuorumPolicy electionQuorumPolicy = QuorumPolicy.SIMPLE_MAJORITY; + + /* + * Amount of times to sleep between retries when a new node tries to locate + * a master. + */ + private static final int MASTER_QUERY_INTERVAL = 10000; + + /* Number of times to retry joining on a retryable exception. */ + private static final int JOIN_RETRIES = 10; + + /* + * Encapsulates access to current time, to arrange for testing of clock + * skews. + */ + private final Clock clock; + + private com.sleepycat.je.rep.impl.networkRestore.FeederManager + logFeederManager; + private LDiffService ldiff; + private NodeStateService nodeStateService; + private BinaryNodeStateService binaryNodeStateService; + private GroupService groupService; + + /* tracks the local CBVLSN for this node. */ + final LocalCBVLSNTracker cbvlsnTracker; + + /* The currently in-progress Master Transfer operation, if any. */ + private MasterTransfer xfrInProgress; + + /* calculates and manages the global, cached CBVLSN */ + final GlobalCBVLSN globalCBVLSN; + + /* Determines how long to wait for a replica to catch up on a close. */ + private long replicaCloseCatchupMs = -1; + + /* Manage and notify MonitorChangeEvents fired by this RepNode. */ + private MonitorEventManager monitorEventManager; + + /* The user defined AppStateMonitor which gets the application state. */ + private AppStateMonitor appStateMonitor; + + /* + * A timer used for misc short-lived scheduled tasks: + * ChannelTimeoutTask, Elections.RebroadcastTask. + */ + private final Timer timer; + private final ChannelTimeoutTask channelTimeoutTask; + + final Logger logger; + + /* Locus of election and durability quorum decisions */ + private final ElectionQuorum electionQuorum; + private final DurabilityQuorum durabilityQuorum; + + private final Arbiter arbiter; + private final NodeType nodeType; + + /** Manages the allocation of node IDs for secondary nodes. */ + private final TransientIds transientIds = + new TransientIds(RepGroupImpl.MAX_NODES_WITH_TRANSIENT_ID); + + /** + * Synchronize on this object when setting the minimum JE version or adding + * a secondary node, which could change the JE versions of the nodes to + * check when setting a new minimum. + * + * @see #setMinJEVersion + * @see #addTransientIdNode + */ + private final Object minJEVersionLock = new Object(); + + /* Used by tests only. */ + private int logVersion = LogEntryType.LOG_VERSION; + + /* For unit testing */ + private Set> convertHooks; + + /** + * The in-memory DTVLSN. It represents the highest transaction known to + * have been replicated to a majority of the Replicas. + * + * At a master, knowledge of this replication state may have been + * communicated explicitly due to the use of SIMPLE_MAJORITY or ALL ACKs, + * or it may have been communicated via a heartbeat indicating the progress + * of replication at a replica. + * + * At a replica, this state is obtained from commit/abort records in the + * replication stream. + * + * This field is initialized from its persistent value whenever the + * environment is first opened. It may be the null VLSN value for brand new + * environments. This value can only advance as increasing numbers of + * transactions are acknowledged. + * + * @see + * DTVLSN + * + */ + private final AtomicLongMax dtvlsn = + new AtomicLongMax(VLSN.NULL_VLSN_SEQUENCE); + + /** + * If not null, a test hook that is called with the name of the current + * node during the query for group membership before the node sleeps after + * failing to obtain information about the group master -- for unit + * testing. + */ + public static volatile TestHook + queryGroupForMembershipBeforeSleepHook; + + /** + * If not null, called by queryGroupForMembership with the name of the + * current node before querying learners for the master -- for unit + * testing. + */ + public static volatile TestHook + queryGroupForMembershipBeforeQueryForMaster; + + /** + * If not null, a test hook that is called with the name of the current + * node before attempting to contact each network restore supplier, for + * unit testing. + */ + public static volatile TestHook beforeFindRestoreSupplierHook; + + public RepNode(RepImpl repImpl, + Replay replay, + NodeState nodeState) + throws IOException, DatabaseException { + + super(repImpl, "RepNode " + repImpl.getNameIdPair()); + + this.repImpl = repImpl; + readyLatch = new ExceptionAwareCountDownLatch(repImpl, 1); + nameIdPair = repImpl.getNameIdPair(); + logger = LoggerUtils.getLogger(getClass()); + + this.serviceDispatcher = + new ServiceDispatcher(getSocket(), repImpl, + repImpl.getChannelFactory()); + serviceDispatcher.start(); + clock = new Clock(RepImpl.getClockSkewMs()); + this.repGroupDB = new RepGroupDB(repImpl); + + masterStatus = new MasterStatus(nameIdPair); + replica = ReplicaFactory.create(this, replay); + + feederManager = new FeederManager(this); + changeListener = new MasterChangeListener(this); + suggestionGenerator = new MasterSuggestionGenerator(this); + + this.nodeState = nodeState; + + electionQuorum = new ElectionQuorum(repImpl); + durabilityQuorum = new DurabilityQuorum(repImpl); + + utilityServicesStart(); + this.globalCBVLSN = new GlobalCBVLSN(this); + this.cbvlsnTracker = new LocalCBVLSNTracker(this, globalCBVLSN); + this.monitorEventManager = new MonitorEventManager(this); + timer = new Timer(true); + channelTimeoutTask = new ChannelTimeoutTask(timer); + + arbiter = new Arbiter(repImpl); + nodeType = NodeType.valueOf(getConfigManager().get(NODE_TYPE)); + + dtvlsn.updateMax(repImpl.getLoggedDTVLSN()); + LoggerUtils.info(logger, repImpl, + String.format("DTVLSN at start:%,d", dtvlsn.get())); + } + + private void utilityServicesStart() { + ldiff = new LDiffService(serviceDispatcher, repImpl); + logFeederManager = + new com.sleepycat.je.rep.impl.networkRestore.FeederManager + (serviceDispatcher, repImpl, nameIdPair); + + /* Register the node state querying service. */ + nodeStateService = new NodeStateService(serviceDispatcher, this); + serviceDispatcher.register(nodeStateService); + + binaryNodeStateService = + new BinaryNodeStateService(serviceDispatcher, this); + groupService = new GroupService(serviceDispatcher, this); + serviceDispatcher.register(groupService); + } + + /* Create a placeholder node, for test purposes only. */ + public RepNode(NameIdPair nameIdPair) { + this(nameIdPair, null); + } + + public RepNode() { + this(NameIdPair.NULL); + } + + public RepNode(NameIdPair nameIdPair, + ServiceDispatcher serviceDispatcher) { + super("RepNode " + nameIdPair); + repImpl = null; + clock = new Clock(0); + + this.nameIdPair = nameIdPair; + this.serviceDispatcher = serviceDispatcher; + + this.repGroupDB = null; + + masterStatus = new MasterStatus(NameIdPair.NULL); + replica = null; + feederManager = null; + changeListener = null; + suggestionGenerator = null; + nodeState = null; + cbvlsnTracker = null; + globalCBVLSN = null; + logger = null; + timer = null; + channelTimeoutTask = null; + electionQuorum = null; + durabilityQuorum = null; + arbiter = null; + nodeType = NodeType.ELECTABLE; + } + + @Override + public Logger getLogger() { + return logger; + } + + /** + * Returns the node type of this node. + */ + public NodeType getNodeType() { + return nodeType; + } + + /** + * Returns the timer associated with this RepNode + */ + public Timer getTimer() { + return timer; + } + + public ServiceDispatcher getServiceDispatcher() { + return serviceDispatcher; + } + + /** + * Returns the accumulated statistics for this node. The method + * encapsulates the statistics associated with its two principal components + * the FeederManager and the Replica. + */ + public ReplicatedEnvironmentStats getStats(StatsConfig config) { + return RepInternal.makeReplicatedEnvironmentStats(repImpl, config); + } + + public void resetStats() { + feederManager.resetStats(); + replica.resetStats(); + } + + public ExceptionAwareCountDownLatch getReadyLatch() { + return readyLatch; + } + + public CommitFreezeLatch getVLSNFreezeLatch() { + return vlsnFreezeLatch; + } + + public void resetReadyLatch(Exception exception) { + ExceptionAwareCountDownLatch old = readyLatch; + readyLatch = new ExceptionAwareCountDownLatch(repImpl, 1); + if (old.getCount() != 0) { + /* releasing latch in some error situation. */ + old.releaseAwait(exception); + } + } + + /* The methods below return the components of the rep node. */ + public FeederManager feederManager() { + return feederManager; + } + + public Replica replica() { + return replica; + } + + public Clock getClock() { + return clock; + } + + public Replica getReplica() { + return replica; + } + + public RepGroupDB getRepGroupDB() { + return repGroupDB; + } + + /** + * Retrieves the node's current snapshot image of the group definition. + *

        + * There is a very brief period of time, during node start-up, where this + * can be null. But after that it should always return a + * valid object. + */ + public RepGroupImpl getGroup() { + return group; + } + + /** + * Returns the UUID associated with the replicated environment. + */ + public UUID getUUID() { + if (group == null) { + throw EnvironmentFailureException.unexpectedState + ("Group info is not available"); + } + return group.getUUID(); + } + + /** + * Returns the nodeName associated with this replication node. + * + * @return the nodeName + */ + public String getNodeName() { + return nameIdPair.getName(); + } + + /** + * Returns the nodeId associated with this replication node. + * + * @return the nodeId + */ + public int getNodeId() { + return nameIdPair.getId(); + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + public InetSocketAddress getSocket() { + return repImpl.getSocket(); + } + + public String getHostName() { + return repImpl.getHostName(); + } + + public int getPort() { + return repImpl.getPort(); + } + + public MasterStatus getMasterStatus() { + return masterStatus; + } + + /** + * Returns a definitive answer to whether this node is currently the master + * by checking both its status as a master and whether the group agrees + * that it is the master. + * + * Such an authoritative answer is needed in a network partition situation + * to detect a master that may be isolated on the minority side of a + * network partition. + * + * @return true if the node is definitely the master. False if it's not or + * we cannot be sure. + */ + public boolean isAuthoritativeMaster() { + return (electionQuorum.isAuthoritativeMaster(getMasterStatus(), + feederManager)); + } + + public int getHeartbeatInterval() { + return getConfigManager().getInt(HEARTBEAT_INTERVAL); + } + + /* For unit testing only. */ + public void setVersion(int version) { + logVersion = version; + } + + public int getLogVersion() { + return logVersion; + } + + public int getElectionPriority() { + + /* A node should not become master if it cannot write. */ + if (repImpl.getDiskLimitViolation() != null) { + return 0; + } + + final int priority = + getConfigManager().getInt(RepParams.NODE_PRIORITY); + + final int defaultPriority = + Integer.parseInt(RepParams.NODE_PRIORITY.getDefault()); + + return (getConfigManager().getBoolean(RepParams.DESIGNATED_PRIMARY) && + (priority == defaultPriority)) ? + defaultPriority + 1 : /* Raise its priority. */ + priority; /* Explicit priority, leave it intact. */ + } + + /* + * Amount of time to wait for a thread to finish on a shutdown. It's + * a multiple of a heartbeat, since a thread typically polls for a + * shutdown once per heartbeat. + */ + public int getThreadWaitInterval() { + return getHeartbeatInterval() * 4; + } + + int getDbTreeCacheClearingOpCount() { + return getConfigManager().getInt(DBTREE_CACHE_CLEAR_COUNT); + } + + public RepImpl getRepImpl() { + return repImpl; + } + + public LogManager getLogManager() { + return repImpl.getLogManager(); + } + + DbConfigManager getConfigManager() { + return repImpl.getConfigManager(); + } + + public VLSNIndex getVLSNIndex() { + return repImpl.getVLSNIndex(); + } + + public FeederTxns getFeederTxns() { + return repImpl.getFeederTxns(); + } + + public Elections getElections() { + return elections; + } + + public MasterSuggestionGenerator getSuggestionGenerator() { + return suggestionGenerator; + } + + /* Used by unit tests only. */ + public QuorumPolicy getElectionPolicy() { + return electionQuorumPolicy; + } + + /** + * Returns an array of nodes suitable for feeding log files for a network + * restore. + * + * @return an array of feeder nodes + */ + public RepNodeImpl[] getLogProviders() { + final Set nodes = getGroup().getDataMembers(); + return nodes.toArray(new RepNodeImpl[nodes.size()]); + } + + public ChannelTimeoutTask getChannelTimeoutTask() { + return channelTimeoutTask; + } + + public boolean isMaster() { + return masterStatus.isNodeMaster(); + } + + public MonitorEventManager getMonitorEventManager() { + return monitorEventManager; + } + + /** + * Register an AppStateMonitor with this RepNode. + */ + public void registerAppStateMonitor(AppStateMonitor stateMonitor) { + this.appStateMonitor = stateMonitor; + } + + /** + * Return the application state that defined in user specified + * AppStateMonitor. + */ + public byte[] getAppState() { + + /* + * If the AppStateMonitor is not defined, or there is currently no + * returned application state, return null. + */ + if (appStateMonitor == null || appStateMonitor.getAppState() == null) { + return null; + } + + /* Application state shouldn't be a zero length byte array. */ + if (appStateMonitor.getAppState().length == 0) { + throw new IllegalStateException + ("Application state should be a byte array larger than 0."); + } + + return appStateMonitor.getAppState(); + } + + /* Get the current master name if it exists. */ + public String getMasterName() { + if (masterStatus.getGroupMasterNameId().getId() == + NameIdPair.NULL_NODE_ID) { + return null; + } + + return masterStatus.getGroupMasterNameId().getName(); + } + + /** + * Returns the latest VLSN associated with a replicated commit. Note that + * since the lastTxnEndVLSN is computed outside the write log latch, via + * EnvironmentImpl.registerVLSN(LogItem) it's possible for it to be behind + * on an instantaneous basis, but it will eventually catch up when the + * updates quiesce. + */ + public VLSN getCurrentTxnEndVLSN() { + return repImpl.getLastTxnEnd(); + } + + /** + * Returns the instantaneous non-null DTVLSN value. The value should be non + * null once initialization has been completed. + * + * The returned value can be VLSN.UNINITIALIZED_VLSN_SEQUENCE if the node + * is a replica in a pre-dtvlsn log segment, or a master that has not as + * yet seen any acknowledged transactions. + */ + public long getDTVLSN() { + final long retValue = dtvlsn.get(); + if (VLSN.isNull(retValue)) { + throw new IllegalStateException("DTVLSN cannot be null"); + } + return retValue; + } + + /** + * Returns a DTVLSN (possibly null) for logging/debugging purposes. + */ + public long getAnyDTVLSN() { + return dtvlsn.get(); + } + + /** + * Updates the DTVLSN with a potentially new DTVLSN value. Note that this + * method is only invoked when the node is a Master. The Replica simply + * sets the DTVLSN to a specific value. + * + * @param candidateDTVLSN the new candidate DTVLSN + * + * @return the new DTVLSN which is either the candidatDTVLSN or a more + * recent DTVLSN > candidateDTVLSN + */ + public long updateDTVLSN(long candidateDTVLSN) { + if (RepImpl.isSimulatePreDTVLSNMaster()) { + return VLSN.UNINITIALIZED_VLSN_SEQUENCE; + } + return dtvlsn.updateMax(candidateDTVLSN); + } + + /** + * Sets the DTVLSN to a specific value. This method is used exclusively by + * the Replica as it maintains the DTVLSN based upon the contents of the + * replication stream. + * + * @return the previous DTVLSN value + */ + public long setDTVLSN(long newDTVLSN) { + return dtvlsn.set(newDTVLSN); + } + + /** + * Sets the group metadata associated with the RepNode and updates any + * local derived data. + */ + public void setGroup(RepGroupImpl repGroupImpl) { + group = repGroupImpl; + needsAcks = durabilityQuorum. + getCurrentRequiredAckCount(ReplicaAckPolicy.SIMPLE_MAJORITY) > 0; + } + + /* + * Testing API used to force this node as a master. The mastership is + * communicated upon election completion via the Listener. It's the + * responsibility of the caller to ensure that only one node is forced + * at a time via this API. + * + * @param force true to force this node as the master, false reverts back + * to use of normal (non-preemptive) elections. + */ + public void forceMaster(boolean force) + throws InterruptedException, DatabaseException { + + suggestionGenerator.forceMaster(force); + /* Initiate elections to make the changed proposal heard. */ + refreshCachedGroup(); + elections.initiateElection(group, electionQuorumPolicy); + } + + int getSecurityCheckInterval() { + return getConfigManager().getInt(SECURITY_CHECK_INTERVAL); + } + + StreamAuthenticator getAuthenticator() { + if (repImpl == null) { + return null; + } + + return repImpl.getAuthenticator(); + } + + /** + * Starts up the thread in which the node does its processing as a master + * or replica. It then waits for the newly started thread to transition it + * out of the DETACHED state, and returns upon completion of this + * transition. + * + * @throws DatabaseException + */ + private void startup(QuorumPolicy initialElectionPolicy) + throws DatabaseException { + + if (isAlive()) { + return; + } + + if (nodeState.getRepEnvState().isDetached()) { + nodeState.changeAndNotify(UNKNOWN, NameIdPair.NULL); + } + elections = new Elections(new RepElectionsConfig(this), + changeListener, + suggestionGenerator); + + repImpl.getStartupTracker().start(Phase.FIND_MASTER); + try { + + if (repImpl.getConfigManager(). + getBoolean(RepParams.RESET_REP_GROUP)) { + /* Invoked by DbResetRepGroup utility */ + reinitSelfElect(); + } else { + findMaster(); + } + this.electionQuorumPolicy = initialElectionPolicy; + + /* Electable members should participate in elections */ + if (electionQuorum.nodeTypeParticipates(nodeType)) { + elections.participate(); + } + } finally { + repImpl.getStartupTracker().stop(Phase.FIND_MASTER); + } + + start(); + } + + /** + * This method must be invoked when a RepNode is first initialized and + * subsequently every time there is a change to the replication group. + *

        + * The Master should invoke this method each time a member is added or + * removed, and a replica should invoke it each time it detects the commit + * of a transaction that modifies the membership database. + *

        + * In addition, it must be invoked after a syncup operation, since it may + * revert changes made to the membership table. + * + * @throws DatabaseException + */ + public RepGroupImpl refreshCachedGroup() + throws DatabaseException { + + setGroup(repGroupDB.getGroup()); + + elections.updateRepGroup(group); + if (nameIdPair.hasNullId()) { + RepNodeImpl n = group.getMember(nameIdPair.getName()); + if (n != null) { + + /* + * Don't update the node ID for a secondary node if + * IGNORE_SECONDARY_NODE_ID is true. In that case, we are + * trying to convert a previously electable node to a secondary + * node, so the information about the electable node ID in the + * local copy of the rep group DB should be ignored. + */ + if (!nodeType.isSecondary() || + !getConfigManager().getBoolean(IGNORE_SECONDARY_NODE_ID)) { + /* May not be sufficiently current in the rep stream. */ + nameIdPair.update(n.getNameIdPair()); + } + } + } + return group; + } + + /** + * Removes a node so that it's no longer a member of the group. + * + * Note that names referring to removed nodes cannot be reused. + * + * @param nodeName identifies the node to be removed + * + * @throws MemberNotFoundException if the node denoted by + * memberName is not a member of the replication group. + * + * @throws MasterStateException if the member being removed is currently + * the Master + * + * @see Member Deletion + */ + public void removeMember(String nodeName) { + removeMember(nodeName, false); + } + + /** + * Remove or delete a node from the group. If deleting a node, the node + * must not be active. + * + *

        Note that names referring to removed nodes cannot be reused, but + * names for deleted nodes can be. + * + * @param nodeName identifies the node to be removed or deleted + * + * @param delete whether to delete the node rather than just remove it + * + * @throws MemberActiveException if {@code delete} is {@code true} and + * the node is currently active + * + * @throws MemberNotFoundException if the node denoted by + * memberName is not a member of the replication group. + * + * @throws MasterStateException if the member being removed or deleted is + * currently the Master + */ + public void removeMember(String nodeName, boolean delete) { + checkValidity( + nodeName, delete ? "Deleting member" : "Removing member"); + + if (delete && feederManager.activeReplicas().contains(nodeName)) { + throw new MemberActiveException( + "Attempt to delete an active node: " + nodeName); + } + + /* + * First remove it from the cached group, effectively setting new + * durability requirements, for the ensuing group db updates. + */ + RepNodeImpl node = group.removeMember(nodeName, delete); + + /* + * Shutdown any feeder that may be active with the replica. Unless + * deleting, any subsequent attempts by the replica to rejoin the group + * will result in a failure. + */ + feederManager.shutdownFeeder(node); + repGroupDB.removeMember(node, delete); + } + + /** + * Update the network address of a node. + * + * Note that an alive node's address can't be updated, we'll throw an + * ReplicaStateException for this case. + * + * @param nodeName identifies the node to be updated + * @param newHostName the new host name of this node + * @param newPort the new port of this node + */ + public void updateAddress(String nodeName, + String newHostName, + int newPort) { + final RepNodeImpl node = + checkValidity(nodeName, "Updating node's address"); + + /* Check whether the node is still alive. */ + if (feederManager.getFeeder(nodeName) != null) { + throw new ReplicaStateException + ("Can't update the network address for a live node."); + } + + /* Update the node information in the group database. */ + node.setHostName(newHostName); + node.setPort(newPort); + node.setQuorumAck(false); + repGroupDB.updateMember(node, true); + } + + /** + * Transfer the master role to one of the specified replicas. + *

        + * We delegate most of the real work to an instance of the {@link + * MasterTransfer} class. Here, after some simple initial validity + * checking, we're concerned with coordinating the potential for multiple + * overlapping Master Transfer operation attempts. The possible outcomes + * are: + *

          + *
        1. complete success ({@code done == true}) + *
            + *
          • + * don't unblock txns here; that'll happen automatically as part of the + * usual handling when the environment transitions from master->replica + * state. + *
          • + * don't clear xfrInProgress, because we don't want to allow another + * attempt to supersede + *
          + *
        2. timeout before establishing a winner (no superseder) + *
            + *
          • unblock txns + *
          • clear xfrInProgress + *
          + *
        3. superseded (see {@link #setUpTransfer}) + *
            + *
          • abort existing op (if permitted), unblock txns before unleashing the + * new one + *
          • replace xfrInProgress + *
          + *
        4. env is closed (or invalidated because of an error) during the + * operation + *
            + *
          • release the block + *
          • leave xfrInProgress as is. + *
          + *
        + * + * @param replicas candidate targets for new master role + * @param timeout time limit, in msec + * @param force whether to replace any existing, in-progress + * transfer operation + */ + public String transferMaster(Set replicas, + long timeout, + boolean force) { + if (replicas == null || replicas.isEmpty()) { + throw new IllegalArgumentException + ("Parameter 'replicas' cannot be null or empty"); + } + if (!nodeState.getRepEnvState().isMaster()) { + throw new IllegalStateException("Not currently master"); + } + if (replicas.contains(getNodeName())) { + + /* + * The local node is on the list of candidate new masters, and + * we're already master: the operation is trivially satisfied. + */ + return getNodeName(); + } + for (String rep : replicas) { + RepNodeImpl node = group.getNode(rep); + if (node == null || node.isRemoved()) { + throw new IllegalArgumentException + ("Node '" + rep + + "' is not currently an active member of the group"); + } else if (!node.getType().isElectable()) { + throw new IllegalArgumentException + ("Node '" + rep + + "' must have node type ELECTABLE, but had type " + + node.getType()); + } + } + + MasterTransfer xfr = setUpTransfer(replicas, timeout, force); + boolean done = false; + try { + String winner = xfr.transfer(); + done = true; + return winner; + } finally { + synchronized (this) { + if (xfrInProgress == xfr && !done) { + xfrInProgress = null; + } + } + } + } + + /** + * Sets up a Master Transfer operation, ensuring that only one operation + * can be in progress at a time. + */ + synchronized private MasterTransfer setUpTransfer(Set replicas, + long timeout, + boolean force) { + boolean reject = false; // initial guess, refine below if nec. + if (xfrInProgress != null) { + reject = true; // next best guess, refine below again if nec. + + /* + * If the new operation is "forcing", see if we can abort the + * existing one. + */ + if (force && + xfrInProgress.abort + (new MasterTransferFailureException("superseded"))) { + reject = false; + + repImpl.unblockTxnCompletion(); + } + } + if (reject) { + throw new MasterTransferFailureException + ("another Master Transfer (started at " + + new Date(xfrInProgress.getStartTime()) + + ") is already in progress"); + } + xfrInProgress = new MasterTransfer(replicas, timeout, this); + return xfrInProgress; + } + + public MasterTransfer getActiveTransfer() { + return xfrInProgress; + } + + /** + * Called by the RepNode when a transition to replica status has completely + * finished. + */ + public synchronized void clearActiveTransfer() { + xfrInProgress = null; + } + + /** + * Performs some basic validity checking, common code for some + * Group Membership operations. + * + * @param nodeName name of a replica node on which an operation is + * to be performed + * @param actionName textual description of the operation (for + * exception message) + * @return the named node + */ + private RepNodeImpl checkValidity(String nodeName, String actionName) + throws MemberNotFoundException { + + if (!nodeState.getRepEnvState().isMaster()) { + throw EnvironmentFailureException.unexpectedState + ("Not currently a master. " + actionName + " must be " + + "invoked on the node that's currently the master."); + } + + final RepNodeImpl node = group.getNode(nodeName); + if (node == null) { + throw new MemberNotFoundException("Node:" + nodeName + + "is not a member of the group:" + + group.getName()); + } + + if (node.isRemoved() && node.isQuorumAck()) { + throw new MemberNotFoundException("Node:" + nodeName + + "is not currently a member of " + + "the group:" + group.getName() + + " It had been removed."); + } + + /* Check if the node is the master itself. */ + if (nodeName.equals(getNodeName())) { + throw new MasterStateException(getRepImpl(). + getStateChangeEvent()); + } + + return node; + } + + /** + * Updates the cached group info for the node, avoiding a database read, + * if the global CBVLSN is not defunct. If it is defunct, does nothing. + * + * @param updateNameIdPair the node whose localCBVLSN must be updated. + * @param barrierState the new node syncup state + */ + public void updateGroupInfo(NameIdPair updateNameIdPair, + CleanerBarrierState barrierState) { + globalCBVLSN.updateGroupInfo(updateNameIdPair, barrierState); + } + + /** + * When the GlobalVLSN is not defunct, recalculates it. Provoked by Replay + * to ensure that the replica's GlobalVLSN is up to date. When the + * GlobalVLSN is defunct, does nothing. + */ + public void recalculateGlobalCBVLSN() { + globalCBVLSN.recalculate(group); + } + + LocalCBVLSNTracker getCBVLSNTracker() { + return cbvlsnTracker; + } + + /** + * Finds a master node. + * + * @throws DatabaseException + */ + private void findMaster() + throws DatabaseException { + + refreshCachedGroup(); + elections.startLearner(); + LoggerUtils.info(logger, repImpl, "Current group size: " + + group.getElectableGroupSize()); + final RepNodeImpl thisNode = group.getNode(nameIdPair.getName()); + if ((thisNode == null) && + + /* + * Secondary nodes are not stored in the group DB, so they will not + * be found even though they are not new. Use group UUID to + * distinguish -- it is only unknown if the node is new. + */ + (nodeType.isElectable() || group.hasUnknownUUID())) { + + /* A new node */ + LoggerUtils.info(logger, repImpl, "New node " + nameIdPair + + " unknown to rep group"); + Set helperSockets = repImpl.getHelperSockets(); + + /* + * Not present in the replication group. Use the helper, to get + * to a master and enter the group. + */ + if ((group.getElectableGroupSize() == 0) && + (helperSockets.size() == 1) && + nodeType.isElectable() && + serviceDispatcher.getSocketAddress(). + equals(helperSockets.iterator().next())) { + /* A startup situation, should this node become master. */ + selfElect(); + elections.updateRepGroup(group); + /* Update minJEVersion for a new or reset group. */ + globalCBVLSN.setDefunctJEVersion(this); + return; + } + try { + queryGroupForMembership(); + } catch (InterruptedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } else if ((thisNode != null) && thisNode.isRemoved()) { + throw EnvironmentFailureException.unexpectedState + ("Node: " + nameIdPair.getName() + + " was previously deleted."); + } else { + + /* An existing node */ + LoggerUtils.info(logger, repImpl, + "Existing node " + nameIdPair.getName() + + " querying for a current master."); + + /* + * The group has other members, see if they know of a master, + * along with any helpers that were also supplied. + */ + Set helperSockets = repImpl.getHelperSockets(); + helperSockets.addAll(group.getAllHelperSockets()); + elections.getLearner().queryForMaster(helperSockets); + } + } + + /** + * This method enforces the requirement that all addresses within a + * replication group, must be loopback addresses or they must all be + * non-local ip addresses. Mixing them means that the node with a loopback + * address cannot be contacted by a different node. Addresses specified by + * hostnames that currently have no DNS entries are assumed to not be + * loopback addresses. + * + * @param helperSockets the helper nodes used by this node when contacting + * the master. + */ + private void checkLoopbackAddresses(Set helperSockets) { + + final InetAddress myAddress = getSocket().getAddress(); + final boolean isLoopback = myAddress.isLoopbackAddress(); + + for (InetSocketAddress socketAddress : helperSockets) { + final InetAddress nodeAddress = socketAddress.getAddress(); + + /* + * If the node address was specified with a hostname that does not, + * at least currently, have a DNS entry, then the address will be + * null. We can safely assume this will not happen for loopback + * addresses, whose host names and addresses are both fixed. + */ + final boolean nodeAddressIsLoopback = + (nodeAddress != null) && nodeAddress.isLoopbackAddress(); + + if (nodeAddressIsLoopback == isLoopback) { + continue; + } + String message = getSocket() + + " the address associated with this node, " + + (isLoopback? "is " : "is not ") + "a loopback address." + + " It conflicts with an existing use, by a different node " + + " of the address:" + + socketAddress + + (!isLoopback ? " which is a loopback address." : + " which is not a loopback address.") + + " Such mixing of addresses within a group is not allowed, " + + "since the nodes will not be able to communicate with " + + "each other."; + throw new IllegalArgumentException(message); + } + } + + /** + * Communicates with existing nodes in the group in order to figure out how + * to start up, in the case where the local node does not appear to be in + * the (local copy of the) GroupDB, typically because the node is starting + * up with an empty env directory. It could be that this is a new node + * (never before been part of the group). Or it could be a pre-existing + * group member that has lost its env dir contents and wants to be restored + * via a Network Restore operation. + *

        + * We first try to find a currently running master node. (An authoritative + * master can easily handle either of the above-mentioned situations.) If + * we can't find a master, we look for other running nodes that may know of + * us (by asking them for their Group information). + *

        + * We query the designated helpers and all known learners. The helpers are + * the ones that were identified via the node's configuration, while the + * learners are the ones currently in the member database. We use both in + * order to cast the widest possible net. + *

        + * Returns normally when the master is found. + * + * @throws InterruptedException if the current thread is interrupted, + * typically due to a shutdown + * @throws InsufficientLogException if the environment requires a network + * restore + * @see #findRestoreSuppliers + */ + private void queryGroupForMembership() + throws InterruptedException { + + Set helperSockets = repImpl.getHelperSockets(); + + checkLoopbackAddresses(helperSockets); + + /* + * Not in the rep group. Use the designated helpers and other members + * of the group to help us figure out how to get started. + */ + final Set helpers = new HashSet<>(helperSockets); + helpers.addAll(group.getAllHelperSockets()); + if (helpers.isEmpty()) { + throw EnvironmentFailureException.unexpectedState + ("Need a helper to add a new node into the group"); + } + + NameIdPair groupMasterNameId; + while (true) { + assert TestHookExecute.doHookIfSet( + queryGroupForMembershipBeforeQueryForMaster, + nameIdPair.getName()); + elections.getLearner().queryForMaster(helpers); + if (isShutdownOrInvalid()) { + throw new InterruptedException("Node is shutdown or invalid"); + } + groupMasterNameId = masterStatus.getGroupMasterNameId(); + if (!groupMasterNameId.hasNullId()) { + /* A new, or pre-query, group master. */ + if (nameIdPair.hasNullId() && + groupMasterNameId.getName().equals(nameIdPair.getName())) { + + /* + * Residual obsolete information in replicas, ignore it. + * Can't be master if we don't know our own id, but some + * other node does! This state means that the node was a + * master in the recent past, but has had its environment + * deleted since that time. + */ + try { + Thread.sleep(MASTER_QUERY_INTERVAL); + } catch (InterruptedException e) { + throw EnvironmentFailureException.unexpectedException( + e); + } + continue; + } + + if (checkGroupMasterIsAlive(groupMasterNameId)) { + /* Use the current group master if it's alive. */ + break; + } + } + + /* + * If there's no master, or the last known master cannot be + * reached, see if anyone thinks we're actually already in the + * group, and could supply us with a Network Restore. (Remember, + * we're here only if we didn't find ourselves in the local + * GroupDB. So we could be in a group restore from backup + * situation.) + */ + findRestoreSuppliers(helpers); + + assert TestHookExecute.doHookIfSet( + queryGroupForMembershipBeforeSleepHook, nameIdPair.getName()); + + /* + * The node could have been shutdown or invalidated while we were + * looking for restore suppliers + */ + if (isShutdownOrInvalid()) { + throw new InterruptedException("Node is shutdown or invalid"); + } + + Thread.sleep(MASTER_QUERY_INTERVAL); + } + LoggerUtils.info(logger, repImpl, "New node " + nameIdPair.getName() + + " located master: " + groupMasterNameId); + } + + /** + * Check that the master found by querying other group nodes is indeed + * alive and that we are not dealing with obsolete cached information. + * + * @return true if the master node could be contacted and was truly alive + * + * TODO: handle protocol version mismatch here and in DbPing, also + * consolidate code so that a single copy is shared. + */ + private boolean checkGroupMasterIsAlive(NameIdPair groupMasterNameId) { + + DataChannel channel = null; + + try { + final InetSocketAddress masterSocket = + masterStatus.getGroupMaster(); + + final BinaryNodeStateProtocol protocol = + new BinaryNodeStateProtocol(NameIdPair.NOCHECK, null); + + /* Build the connection. Set the parameter connectTimeout.*/ + channel = repImpl.getChannelFactory(). + connect(masterSocket, + new ConnectOptions(). + setTcpNoDelay(true). + setOpenTimeout(5000). + setReadTimeout(5000)); + ServiceDispatcher.doServiceHandshake + (channel, BinaryNodeStateService.SERVICE_NAME); + + /* Send a NodeState request to the node. */ + protocol.write + (protocol.new + BinaryNodeStateRequest(groupMasterNameId.getName(), + group.getName()), + channel); + + /* Get the response and return the NodeState. */ + BinaryNodeStateResponse response = + protocol.read(channel, BinaryNodeStateResponse.class); + + ReplicatedEnvironment.State state = response.getNodeState(); + return (state != null) && state.isMaster(); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, + "Queried master:" + groupMasterNameId + + " unavailable. Reason:" + e); + return false; + } finally { + if (channel != null) { + try { + channel.close(); + } catch (IOException ioe) { + /* Ignore it */ + } + } + } + } + + /** + * Sets up a Network Restore, as part of the process of restoring an entire + * group from backup, by producing an appropriate {@code + * InsufficientLogException} if possible. + *

        + * Queries each of the supplied helper hosts for their notion of the group + * make-up. If any of them consider us to be already in the group, then + * instead of joining the group as a new node we ought to try a Network + * Restore; and the node(s) that do already know of us are the suitable + * suppliers for it. + * + * @throws InsufficientLogException in the successful case, if one or more + * suitable suppliers for a Network Restore can be found; otherwise just + * returns. + * + * @throws InterruptedException if the node was shutdown or invalidated + * while we were looking for a network restore supplier + */ + public void findRestoreSuppliers(Set helpers) + throws InterruptedException { + + final Set suppliers = new HashSet<>(); + RepGroupProtocol protocol = + new RepGroupProtocol(group.getName(), nameIdPair, repImpl, + repImpl.getChannelFactory()); + + for (InetSocketAddress helper : helpers) { + + assert TestHookExecute.doHookIfSet( + beforeFindRestoreSupplierHook, nameIdPair.getName()); + + /* + * The node could have been shutdown or invalidated while we were + * looking for a network restore supplier [#25314] + */ + if (isShutdownOrInvalid()) { + throw new InterruptedException("Node is shutdown or invalid"); + } + + MessageExchange msg = + protocol.new MessageExchange(helper, + GroupService.SERVICE_NAME, + protocol.new GroupRequest()); + + /* + * Just as we did in the queryForMaster() case, quietly ignore any + * unsurprising response error or socket exceptions; we'll retry + * later if we end up not finding any Network Restore suppliers. + */ + msg.run(); + ResponseMessage response = msg.getResponseMessage(); + if (response == null || + protocol.RGFAIL_RESP.equals(response.getOp())) { + continue; + } else if (!protocol.GROUP_RESP.equals(response.getOp())) { + LoggerUtils.warning(logger, repImpl, + "Expected GROUP_RESP, got " + + response.getOp() + ": " + response); + continue; + } + GroupResponse groupResp = (GroupResponse) response; + + /* + * If the response from the remote node shows that I am already a + * member of the group, add the node to the list of nodes that will + * serve the Network Restore. + */ + RepGroupImpl groupInfo = groupResp.getGroup(); + RepNodeImpl me = groupInfo.getNode(nameIdPair.getName()); + if (me == null || me.isRemoved() || !me.isQuorumAck()) { + continue; + } + + ReplicationNode supplier = groupInfo.getMember(helper); + if (supplier != null) { + suppliers.add(supplier); + } + } + + if (suppliers.isEmpty()) { + return; + } + + throw new InsufficientLogException(this, suppliers); + } + + /** + * Elects this node as the master. The operation is only valid when the + * group consists of just this node, and when this is an ELECTABLE node. + * + * @throws DatabaseException + * @throws IllegalStateException if the node type is not ELECTABLE + */ + private void selfElect() + throws DatabaseException { + + if (!nodeType.isElectable()) { + throw new IllegalStateException( + "Cannot elect node " + nameIdPair.getName() + + " as master because its node type, " + nodeType + + ", is not ELECTABLE"); + } + nameIdPair.setId(RepGroupImpl.getFirstNodeId()); + + /* Master by default of a nascent group. */ + Proposal proposal = new TimebasedProposalGenerator().nextProposal(); + elections.getLearner().processResult(proposal, + suggestionGenerator.get(proposal)); + LoggerUtils.info(logger, repImpl, "Nascent group. " + + nameIdPair.getName() + + " is master by virtue of being the first node."); + masterStatus.sync(); + nodeState.changeAndNotify(MASTER, masterStatus.getNodeMasterNameId()); + repImpl.getVLSNIndex().initAsMaster(); + /* + * Start it off as this value. It will be rapidly updated, as + * transactions are committed. + */ + dtvlsn.updateMax(VLSN.UNINITIALIZED_VLSN_SEQUENCE); + repGroupDB.addFirstNode(); + refreshCachedGroup(); + /* Unsync so that the run loop does not call for an election. */ + masterStatus.unSync(); + } + + /** + * Establishes this node as the master, after re-initializing the group + * with this as the sole node in the group. This method is used solely + * as part of the DbResetRepGroup utility. + * + * @throws IllegalStateException if the node type is not ELECTABLE + */ + private void reinitSelfElect() { + if (!nodeType.isElectable()) { + throw new IllegalStateException( + "Cannot elect node " + nameIdPair.getName() + + " as master because its node type, " + nodeType + + ", is not ELECTABLE"); + } + + /* Establish an empty group so transaction commits can proceed. */ + setGroup(repGroupDB.emptyGroup); + + LoggerUtils.info(logger, repImpl, "Reinitializing group to node " + + nameIdPair); + + /* + * Unilaterally transition the nodeState to Master, so that write + * transactions needed to reset the group and establish this node can + * be issued against the environment. + */ + nodeState.changeAndNotify(MASTER, masterStatus.getNodeMasterNameId()); + repImpl.getVLSNIndex().initAsMaster(); + + for (ReplayTxn replayTxn : + repImpl.getTxnManager().getTxns(ReplayTxn.class)) { + /* + * We don't have a node id at this point, simply use 1 since we + * know it's valid. It will subsequently be set to the the next + * node id in sequence. + */ + final int nodeId = 1; + LoggerUtils.info(logger, repImpl, + "Aborting incomplete replay txn:" + + nameIdPair + " as part of group reset"); + /* The DTVLSN will be corrected when it's written to the log */ + replayTxn.abort(ReplicationContext.MASTER, nodeId, + VLSN.NULL_VLSN_SEQUENCE); + } + + /* + * Start using new log files. The file ensures that we can safely + * truncate the past VLSNs. + */ + repImpl.forceLogFileFlip(); + + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + + /* + * The checkpoint ensures that we do not have to replay VLSNs from the + * prior group and that we have a complete VLSN index on disk. + */ + repImpl.invokeCheckpoint(ckptConfig, "Reinit of RepGroup"); + VLSN lastOldVLSN = repImpl.getVLSNIndex().getRange().getLast(); + + /* Now create the new rep group on disk. */ + repGroupDB.reinitFirstNode(lastOldVLSN); + refreshCachedGroup(); + + long lastOldFile = + repImpl.getVLSNIndex().getLTEFileNumber(lastOldVLSN); + + /* + * Discard the VLSN index covering the pre group reset VLSNS, to ensure + * that the pre reset part of the log is never replayed. We don't want + * to replay this part of the log, since it contains references to + * repnodes via node ids that are no longer part of the reset rep + * group. Note that we do not reuse rep node ids, that is, rep node id + * sequence continues across the reset operation and is not itself + * reset. Nodes joining the new group will need to do a network restore + * when they join the group. + * + * Don't perform the truncation if RESET_REP_GROUP_RETAIN_UUID is true. + * In that case, we are only removing the rep group members, but + * retaining the remaining information, because we will be restarting + * the rep group in place with an old secondary acting as an electable + * node. + */ + final boolean retainUUID = + getConfigManager().getBoolean(RESET_REP_GROUP_RETAIN_UUID); + if (!retainUUID) { + repImpl.getVLSNIndex().truncateFromHead(lastOldVLSN, lastOldFile); + } + + elections.startLearner(); + /* Unsync so that the run loop does not call for an election. */ + masterStatus.unSync(); + + /* Initialize minJEVersion for a new or reset group. */ + globalCBVLSN.setDefunctJEVersion(this); + } + + + /** + * When a disk limit is violated, the node state will transition to + * UNKNOWN and wait for disk to become available again before it + * transitions to a Replica or Master State. This method will not exit + * until one of the following occurs: + * + * 1. The disk limit violation is cleared. + * 2. The node is shutdown or invalidated. + * 3. The thread is interrupted, in which case InterruptedException is + * thrown. + */ + private void waitWhileDiskLimitViolation() + throws InterruptedException { + + LoggerUtils.info(logger, repImpl, + "Node waiting for disk space to become available. " + + "Disk limit violation:" + getRepImpl().getDiskLimitViolation()); + + while (getRepImpl().getDiskLimitViolation() != null) { + + if (isShutdownOrInvalid()) { + return; + } + + Thread.sleep(1000); + } + + LoggerUtils.info(logger, repImpl, "Disk limit violation cleared."); + } + + /** + * The top level Master/Feeder or Replica loop in support of replication. + * It's responsible for driving the node level state changes resulting + * from elections initiated either by this node, or by other members of the + * group. + *

        + * The thread is terminated via an orderly shutdown initiated as a result + * of an interrupt issued by the shutdown() method. Any exception that is + * not handled by the run method itself is caught by the thread's uncaught + * exception handler, and results in the RepImpl being made invalid. In + * that case, the application is responsible for closing the Replicated + * Environment, which will provoke the shutdown. + *

        + * Note: This method currently runs either the feeder loop or the replica + * loop. With R to R support, it would be possible for a Replica to run + * both. This will be a future feature. + */ + @Override + public void run() { + /* Set to indicate an error-initiated shutdown. */ + Error repNodeError = null; + try { + LoggerUtils.info(logger, repImpl, + "Node " + nameIdPair.getName() + " started" + + (!nodeType.isElectable() ? + " as " + nodeType : + "")); + while (!isShutdownOrInvalid()) { + if (nodeState.getRepEnvState() != UNKNOWN) { + /* Avoid unnecessary state changes. */ + nodeState.changeAndNotify(UNKNOWN, NameIdPair.NULL); + } + + if (getRepImpl().getDiskLimitViolation() != null) { + /* + * Progress is not possible while out of disk. So stay in + * the UNKNOWN state, participating in elections at + * election priority zero to help establish election quorum + * but avoid being elected master. + */ + waitWhileDiskLimitViolation(); + } + + /* + * Initiate elections if we don't have a group master, or there + * is a master, but we were unable to use it. + */ + if (masterStatus.getGroupMasterNameId().hasNullId() || + masterStatus.inSync()) { + + /* + * But we can't if we don't have our own node ID yet or if + * we are not ELECTABLE. + */ + if (nameIdPair.hasNullId() || !nodeType.isElectable()) { + queryGroupForMembership(); + } else { + elections.initiateElection(group, electionQuorumPolicy); + + /* + * Subsequent elections must always use a simple + * majority. + */ + electionQuorumPolicy = QuorumPolicy.SIMPLE_MAJORITY; + } + /* In case elections were shut down. */ + if (isShutdownOrInvalid()) { + return; + } + } + + /* Start syncing this node to the new group master */ + masterStatus.sync(); + + if (masterStatus.isNodeMaster()) { + repImpl.getVLSNIndex().initAsMaster(); + replica.masterTransitionCleanup(); + + /* Master is ready for business. */ + nodeState.changeAndNotify + (MASTER, masterStatus.getNodeMasterNameId()); + + /* + * Update the JE version information stored for the master + * in the RepGroupDB, if needed. + */ + maybeUpdateMasterJEVersion(); + + feederManager.runFeeders(); + + /* + * At this point, the feeder manager has been shutdown. + * Re-initialize the VLSNIndex put latch mechanism, which + * is present on masters to maintain a tip cache of the + * last record on the replication stream, and by all + * nodes when doing checkpoint vlsn consistency waiting. + * Create a new feeder manager, should this node become a + * master later on. + * Set the node to UNKNOWN state right away, because the + * MasterTxn will use node state to prevent the advent of + * any replicated writes. Once the VLSNIndex is + * initialized for replica state, the node will NPE if it + * attempts execute replicated writes. + */ + nodeState.changeAndNotify(UNKNOWN, NameIdPair.NULL); + repImpl.getVLSNIndex().initAsReplica(); + assert runConvertHooks(); + feederManager = new FeederManager(this); + } else { + + /* + * Replica will notify us when connection is successfully + * made, and Feeder handshake done, at which point we'll + * update nodeState. + */ + replica.replicaTransitionCleanup(); + replica.runReplicaLoop(); + } + } + } catch (InterruptedException e) { + LoggerUtils.fine(logger, repImpl, + "RepNode main thread interrupted - " + + " forced shutdown."); + } catch (GroupShutdownException e) { + saveShutdownException(e); + LoggerUtils.fine(logger, repImpl, + "RepNode main thread sees group shutdown - " + e); + } catch (InsufficientLogException e) { + saveShutdownException(e); + } catch (RuntimeException e) { + LoggerUtils.fine(logger, repImpl, + "RepNode main thread sees runtime ex - " + e); + saveShutdownException(e); + throw e; + } catch (Error e) { + LoggerUtils.fine(logger, repImpl, e + + " incurred during repnode loop"); + repNodeError = e; + repImpl.invalidate(e); + } finally { + try { + LoggerUtils.info(logger, repImpl, + "RepNode main thread shutting down."); + + if (repNodeError != null) { + LoggerUtils.info(logger, repImpl, + "Node state at shutdown:\n"+ + repImpl.dumpState()); + throw repNodeError; + } + Throwable exception = getSavedShutdownException(); + + if (exception == null) { + LoggerUtils.fine(logger, repImpl, + "Node state at shutdown:\n"+ + repImpl.dumpState()); + } else { + LoggerUtils.info(logger, repImpl, + "RepNode shutdown exception:\n" + + exception.getMessage() + + repImpl.dumpState()); + } + + try { + shutdown(); + } catch (DatabaseException e) { + RepUtils.chainExceptionCause(e, exception); + LoggerUtils.severe(logger, repImpl, + "Unexpected exception during shutdown" + + e); + throw e; + } + } catch (InterruptedException e1) { + // Ignore exceptions on exit + } + nodeState.changeAndNotify(DETACHED, NameIdPair.NULL); + cleanup(); + } + } + + /** + * Update the information stored for the master in the RepGroupDB if + * storing it is supported and the current version is different from the + * recorded version. + */ + private void maybeUpdateMasterJEVersion() { + + /* Check if storing JE version information is supported */ + if (group.getFormatVersion() < RepGroupImpl.FORMAT_VERSION_3) { + return; + } + + final JEVersion currentJEVersion = repImpl.getCurrentJEVersion(); + final RepNodeImpl node = group.getMember(nameIdPair.getName()); + + if (currentJEVersion.equals(node.getJEVersion())) { + return; + } + node.updateJEVersion(currentJEVersion); + repGroupDB.updateMember(node, false); + } + + void notifyReplicaConnected() { + nodeState.changeAndNotify(REPLICA, masterStatus.getNodeMasterNameId()); + } + + /** + * Returns true if the node has been shutdown or if the underlying + * environment has been invalidated. It's used as the basis for exiting + * the FeederManager or the Replica. + */ + public boolean isShutdownOrInvalid() { + if (isShutdown()) { + return true; + } + if (getRepImpl().wasInvalidated()) { + saveShutdownException(getRepImpl().getInvalidatingException()); + return true; + } + return false; + } + + /** + * Used to shutdown all activity associated with this replication stream. + * If method is invoked from different thread of control, it will wait + * until the rep node thread exits. If it's from the same thread, it's the + * caller's responsibility to exit the thread upon return from this method. + * + * @throws InterruptedException + * @throws DatabaseException + */ + public void shutdown() + throws InterruptedException, DatabaseException { + + if (shutdownDone(logger)) { + return; + } + + LoggerUtils.info(logger, repImpl, + "Shutting down node " + nameIdPair + + " DTVLSN:" + getAnyDTVLSN()); + + /* Fire a LeaveGroup if this RepNode is valid. */ + if (repImpl.isValid()) { + monitorEventManager.notifyLeaveGroup(getLeaveReason()); + } + + /* Stop accepting any new network requests. */ + serviceDispatcher.preShutdown(); + + if (elections != null) { + elections.shutdown(); + } + + /* Initiate the FeederManger soft shutdown if it's active. */ + feederManager.shutdownQueue(); + + if ((getReplicaCloseCatchupMs() >= 0) && + (nodeState.getRepEnvState().isMaster())) { + + /* + * A group shutdown. Shutting down the queue will cause the + * FeederManager to shutdown its feeders and exit. + */ + this.join(); + } + + /* Shutdown the replica, if it's active. */ + replica.shutdown(); + + shutdownThread(logger); + + LoggerUtils.info(logger, repImpl, + "RepNode main thread: " + this.getName() + " exited."); + /* Shut down all other services. */ + utilityServicesShutdown(); + + /* Shutdown all the services before shutting down the dispatcher. */ + MasterTransfer mt = getActiveTransfer(); + if (mt != null) { + Exception ex = getSavedShutdownException(); + if (ex == null) { + ex = new MasterTransferFailureException("shutting down"); + } + mt.abort(ex); + } + serviceDispatcher.shutdown(); + LoggerUtils.info(logger, repImpl, + nameIdPair + " shutdown completed."); + masterStatus.setGroupMaster(null, 0, NameIdPair.NULL); + readyLatch.releaseAwait(getSavedShutdownException()); + + /* Cancel the TimerTasks. */ + channelTimeoutTask.cancel(); + timer.cancel(); + } + + /** + * Soft shutdown for the RepNode thread. Note that since the thread is + * shared by the FeederManager and the Replica, the FeederManager or + * Replica specific soft shutdown actions should already have been done + * earlier. + */ + @Override + protected int initiateSoftShutdown() { + return getThreadWaitInterval(); + } + + /* Get the shut down reason for this node. */ + private LeaveReason getLeaveReason() { + LeaveReason reason = null; + + Exception exception = getSavedShutdownException(); + if (exception == null) { + reason = LeaveReason.NORMAL_SHUTDOWN; + } else if (exception instanceof GroupShutdownException) { + reason = LeaveReason.MASTER_SHUTDOWN_GROUP; + } else { + reason = LeaveReason.ABNORMAL_TERMINATION; + } + + return reason; + } + + private void utilityServicesShutdown() { + if (ldiff != null) { + ldiff.shutdown(); + } + + if (logFeederManager != null) { + logFeederManager.shutdown(); + } + + if (binaryNodeStateService != null) { + binaryNodeStateService.shutdown(); + } + + if (nodeStateService != null) { + serviceDispatcher.cancel(NodeStateService.SERVICE_NAME); + } + + if (groupService != null) { + serviceDispatcher.cancel(GroupService.SERVICE_NAME); + } + } + + /** + * Must be invoked on the Master via the last open handle. + * + * Note that the method itself does not shutdown the group. It merely + * sets replicaCloseCatchupMs, indicating that the ensuing handle close + * should shutdown the Replicas. The actual coordination with the closing + * of the handle is implemented by ReplicatedEnvironment.shutdownGroup(). + * + * @see ReplicatedEnvironment#shutdownGroup(long, TimeUnit) + */ + public void shutdownGroupOnClose(long timeoutMs) + throws IllegalStateException { + + if (!nodeState.getRepEnvState().isMaster()) { + throw new IllegalStateException + ("Node state must be " + MASTER + + ", not " + nodeState.getRepEnvState()); + } + replicaCloseCatchupMs = (timeoutMs < 0) ? 0 : timeoutMs; + } + + /** + * JoinGroup ensures that a RepNode is actively participating in a + * replication group. It's invoked each time a replicated environment + * handle is created. + * + * If the node is already participating in a replication group, because + * it's not the first handle to the environment, it will return without + * having to wait. Otherwise it will wait until a master is elected and + * this node is active, either as a Master, or as a Replica. + * + * If the node joins as a replica, it will wait further until it has become + * sufficiently consistent as defined by its consistency argument. By + * default it uses PointConsistencyPolicy to ensure that it is at least as + * consistent as the master as of the time the handle was opened. + * + * A node can also join in the Unknown state if it has been configured to + * do so via ENV_UNKNOWN_STATE_TIMEOUT. + * + * @throws UnknownMasterException If a master cannot be established within + * ENV_SETUP_TIMEOUT, unless ENV_UNKNOWN_STATE_TIMEOUT has + * been set to allow the creation of a handle while in the UNKNOWN state. + * + * @return MASTER, REPLICA, or UNKNOWN (if ENV_UNKNOWN_STATE_TIMEOUT + * is set) + */ + public ReplicatedEnvironment.State + joinGroup(ReplicaConsistencyPolicy consistency, + QuorumPolicy initialElectionPolicy) + throws ReplicaConsistencyException, DatabaseException { + + final JoinGroupTimeouts timeouts = + new JoinGroupTimeouts(getConfigManager()); + + startup(initialElectionPolicy); + LoggerUtils.finest(logger, repImpl, "joinGroup " + + nodeState.getRepEnvState()); + + DatabaseException exitException = null; + int retries = 0; + repImpl.getStartupTracker().start(Phase.BECOME_CONSISTENT); + repImpl.getStartupTracker().setProgress + (RecoveryProgress.BECOME_CONSISTENT); + try { + for (retries = 0; retries < JOIN_RETRIES; retries++ ) { + try { + /* Wait for Feeder/Replica to be fully initialized. */ + boolean done = getReadyLatch().awaitOrException + (timeouts.getTimeout(), TimeUnit.MILLISECONDS); + + /* + * Save the state, and use it from this point forward, + * since the node's state may change again. + */ + final ReplicatedEnvironment.State finalState = + nodeState.getRepEnvState(); + if (!done) { + + /* An election or setup, timeout. */ + if (finalState.isReplica()) { + if (timeouts.timeoutIsForUnknownState()) { + + /* + * Replica syncing up; move onwards to the + * setup timeout and continue with the syncup. + */ + timeouts.setSetupTimeout(); + continue; + } + throw new ReplicaConsistencyException + (String.format("Setup time exceeded %,d ms", + timeouts.getSetupTimeout()), + null); + } + + if (finalState.isUnknown() && + timeouts.timeoutIsForUnknownState()) { + return UNKNOWN; + } + break; + } + + switch (finalState) { + case UNKNOWN: + + /* + * State flipped between release of ready latch and + * nodeState.getRepEnvState() above; retry for a + * Master/Replica state. + */ + continue; + + case REPLICA: + joinAsReplica(consistency); + break; + + case MASTER: + LoggerUtils.info(logger, repImpl, + "Joining group as master"); + break; + + case DETACHED: + throw EnvironmentFailureException. + unexpectedState("Node in DETACHED state " + + "while joining group."); + } + + return finalState; + } catch (InterruptedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (MasterStateException e) { + /* Transition to master while establishing consistency. */ + LoggerUtils.warning(logger, repImpl, + "Join retry due to master transition: " + + e.getMessage()); + continue; + } catch (RestartRequiredException e) { + LoggerUtils.warning(logger, repImpl, + "Environment needs to be restarted: " + + e.getMessage()); + throw e; + } catch (DatabaseException e) { + Throwable cause = e.getCause(); + if ((cause != null) && + (cause.getClass() == + Replica.ConnectRetryException.class)) { + + /* + * The master may have changed. Retry if there is time + * left to do so. It may result in a new master. + */ + exitException = e; + if (timeouts.getTimeout() > 0) { + LoggerUtils.warning(logger, repImpl, + "Join retry due to exception: " + + cause.getMessage()); + continue; + } + } + throw e; + } + } + } finally { + repImpl.getStartupTracker().stop(Phase.BECOME_CONSISTENT); + } + + /* Timed out or exceeded retries. */ + if (exitException != null) { + LoggerUtils.warning(logger, repImpl, "Exiting joinGroup after " + + retries + " retries." + exitException); + throw exitException; + } + throw new UnknownMasterException(null, repImpl.getStateChangeEvent()); + } + + /** + * Join the group as a Replica ensuring that the node is sufficiently + * consistent as defined by its consistency policy. + * + * @param consistency the consistency policy to use when joining initially + */ + private void joinAsReplica(ReplicaConsistencyPolicy consistency) + throws InterruptedException { + + if (consistency == null) { + final int consistencyTimeout = + getConfigManager().getDuration(ENV_CONSISTENCY_TIMEOUT); + consistency = new PointConsistencyPolicy + (new VLSN(replica.getMasterTxnEndVLSN()), + consistencyTimeout, TimeUnit.MILLISECONDS); + } + + /* + * Wait for the replica to become sufficiently consistent. + */ + consistency.ensureConsistency(repImpl); + + /* + * Flush changes to the file system. The flush ensures in particular + * that any member database updates defining this node itself are not + * lost in case of a process crash. See SR 20607. + */ + repImpl.getLogManager().flushNoSync(); + + LoggerUtils.info(logger, repImpl, "Joined group as a replica. " + + " join consistencyPolicy=" + consistency + + " " + repImpl.getVLSNIndex().getRange()); + } + + /** + * Should be called whenever a new VLSN is associated with a log entry + * suitable for Replica/Feeder syncup. If GlobalCBVLSN is defunct, does + * nothing. + */ + public void trackSyncableVLSN(VLSN syncableVLSN, long lsn) { + cbvlsnTracker.track(syncableVLSN, lsn); + } + + /** + * Returns the global CBVLSN if it is not defunct, and otherwise a null + * VLSN. + * + * @see GlobalCBVLSN#getCBVLSN() + */ + public VLSN getGlobalCBVLSN() { + return globalCBVLSN.getCBVLSN(); + } + + /** + * Returns whether the GlobalCBVLSN is defunct -- see {@link GlobalCBVLSN}. + */ + public boolean isGlobalCBVLSNDefunct() { + return globalCBVLSN.isDefunct(); + } + + /** + * Returns a VLSN appropriate for the RestoreResponse.cbvlsn field when the + * GlobalCBVLSN is not defunct, and otherwise a null VLSN. + * + * @see GlobalCBVLSN#getRestoreResponseVLSN + */ + public VLSN getRestoreResponseVLSN(final VLSNRange range) { + return globalCBVLSN.getRestoreResponseVLSN(range); + } + + /** + * Marks the start of the search for a matchpoint that happens during a + * syncup. The lower bound of the VLSN range must remain stable during + * syncup to prevent deleting files that are being read by a syncup file + * reader. + *

        + * A feeder may have multiple syncups in action. The caller + * should call {@link #syncupEnded} when the syncup is done, passing the + * value returned by this method. + * + * @param syncupNode identifies the other node involved in the the syncup, + * and is the name to be used in LogSizeStats. + * + * @return the ProtectedFileSet protecting the VLSNIndex range and + * identifying the syncup in LogSizeStats. + */ + public ProtectedFileSet syncupStarted(NameIdPair syncupNode) { + + return getVLSNIndex().protectRangeHead( + FileProtector.SYNCUP_NAME + "-" + syncupNode.toString()); + } + + public void syncupEnded(ProtectedFileSet fileSet) { + repImpl.getFileProtector().removeFileProtection(fileSet); + } + + long getReplicaCloseCatchupMs() { + return replicaCloseCatchupMs; + } + + public Arbiter getArbiter() { + return arbiter; + } + + /** + * Shuts down the Network backup service *before* a rollback is initiated + * as part of syncup, thus ensuring that NetworkRestore does not see an + * inconsistent set of log files. Any network backup operations that are in + * progress at this node are aborted. The client of the service will + * experience network connection failures and will retry with this node + * (when the service is re-established at this node), or with some other + * node. + *

        + * restartNetworkBackup() is then used to restart the service after it was + * shut down. + */ + final public void shutdownNetworkBackup() { + logFeederManager.shutdown(); + logFeederManager = null; + } + + /** + * Restarts the network backup service *after* a rollback has been + * completed and the log files are once again in a consistent state. + */ + final public void restartNetworkBackup() { + if (logFeederManager != null) { + throw EnvironmentFailureException.unexpectedState(repImpl); + } + logFeederManager = + new com.sleepycat.je.rep.impl.networkRestore.FeederManager + (serviceDispatcher, repImpl, nameIdPair); + } + + /** + * Dumps the states associated with any active Feeders as well as + * the composition of the group itself. + */ + public String dumpState() { + return "\n" + feederManager.dumpState(false /* acksOnly */) + + "\n" + getGroup(); + } + + /** + * Dumps the state associated with all active Feeders that supply + * acknowledgments. + */ + public String dumpAckFeederState() { + return "\n" + feederManager.dumpState(true /* acksOnly */) + "\n"; + } + + public ElectionQuorum getElectionQuorum() { + return electionQuorum; + } + + public DurabilityQuorum getDurabilityQuorum() { + return durabilityQuorum; + } + + public void setConvertHook(TestHook hook) { + if (convertHooks == null) { + convertHooks = new HashSet<>(); + } + convertHooks.add(hook); + } + + private boolean runConvertHooks () { + if (convertHooks == null) { + return true; + } + + for (TestHook h : convertHooks) { + assert TestHookExecute.doHookIfSet(h, 0); + } + return true; + } + + /** + * Get the group minimum JE version. + * + *

        Returns the minimum JE version that is required for all nodes that + * join this node's replication group. The version returned is supported + * by all current and future group members. The minimum JE version is + * guaranteed to only increase over time, so long as the data for the + * environment is not rolled back or lost. + * + * @return the group minimum JE version + */ + public JEVersion getMinJEVersion() { + synchronized (minJEVersionLock) { + return group.getMinJEVersion(); + } + } + + /** + * Checks if all data nodes in the replication group support the specified + * JE version. Updates the group minimum JE version, and the group format + * version, as needed to require all nodes joining the group to be running + * at least the specified JE version. + * + *

        This method should only be called on the master, because attempts to + * update the rep group DB on an replica will fail. + * + * @param newMinJEVersion the new minimum JE version + * @throws DatabaseException if an error occurs when accessing the + * replication group database + * @throws MinJEVersionUnsupportedException if the version is not supported + * by one or more current group members + */ + public void setMinJEVersion(final JEVersion newMinJEVersion) + throws MinJEVersionUnsupportedException { + + /* + * Synchronize here on minJEVersionLock to prevent new secondary nodes + * from being added while updating the minimum JE version. Electable + * nodes are stored in the RepGroupDB, so the check performed on that + * class's setMinJEVersion within a transaction insures that all + * current nodes have been checked before the minimum JE version is + * increased. But secondary nodes are not stored persistently, so + * other synchronization is needed for them. + */ + synchronized (minJEVersionLock) { + + /* Check if at least this version is already required */ + final JEVersion groupMinJEVersion = group.getMinJEVersion(); + if (groupMinJEVersion.compareTo(newMinJEVersion) >= 0) { + return; + } + + for (final RepNodeImpl node : group.getDataMembers()) { + JEVersion nodeJEVersion = node.getJEVersion(); + if (getNodeName().equals(node.getName())) { + + /* Use the current software version for the local node */ + nodeJEVersion = repImpl.getCurrentJEVersion(); + } else { + + /* Use the version recorded by the feeder for replicas */ + final Feeder feeder = + feederManager.getFeeder(node.getName()); + if (feeder != null) { + final JEVersion currentReplicaJEVersion = + feeder.getReplicaJEVersion(); + if (currentReplicaJEVersion != null) { + nodeJEVersion = currentReplicaJEVersion; + } + } + } + if ((nodeJEVersion == null) || + (newMinJEVersion.compareTo(nodeJEVersion) > 0)) { + throw new MinJEVersionUnsupportedException( + newMinJEVersion, node.getName(), nodeJEVersion); + } + } + repGroupDB.setMinJEVersion(newMinJEVersion); + } + } + + /** + * Returns true if acks are needed by the group for durability. This is the + * case if the rep group has > 1 data node that's also electable. + */ + public boolean isNeedsAcks() { + return needsAcks; + } + + /** + * Adds a transient ID node to the group. Assign a node ID and add the + * node to the RepGroupImpl. Don't notify the monitor: transient ID nodes + * do not generate GroupChangeEvents. + * + * @param node the node + * @throws IllegalStateException if the store does not currently support + * secondary nodes or the node doesn't meet the current minimum JE + * version + * @throws NodeConflictException if the node conflicts with an existing + * persistent node + */ + public void addTransientIdNode(final RepNodeImpl node) { + if (!node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call addTransientIdNode with a" + + " node without transient ID: " + node); + } + final JEVersion requiredJEVersion = + RepGroupImpl.FORMAT_VERSION_3_JE_VERSION; + try { + setMinJEVersion(requiredJEVersion); + } catch (MinJEVersionUnsupportedException e) { + if (e.nodeVersion == null) { + throw new IllegalStateException( + "Secondary nodes are not currently supported." + + " The version running on node " + e.nodeName + + " could not be determined," + + " but this feature requires version " + + requiredJEVersion.getNumericVersionString() + + " or later."); + } + throw new IllegalStateException( + "Secondary nodes are not currently supported." + + " Node " + e.nodeName + " is running version " + + e.nodeVersion.getNumericVersionString() + + ", but this feature requires version " + + requiredJEVersion.getNumericVersionString() + + " or later."); + } + + /* + * Synchronize on minJEVersionLock to coordinate with setMinJEVersion + */ + synchronized (minJEVersionLock) { + final JEVersion minJEVersion = group.getMinJEVersion(); + if (node.getJEVersion().compareTo(minJEVersion) < 0) { + throw new IllegalStateException( + "The node does not meet the minimum required version" + + " for the group." + + " Node " + node.getNameIdPair().getName() + + " is running version " + node.getJEVersion() + + ", but the minimum required version is " + + minJEVersion); + } + if (!node.getNameIdPair().hasNullId()) { + throw new IllegalStateException( + "New " + node.getType().toString().toLowerCase() + + " node " + node.getNameIdPair().getName() + + " already has an ID: " + node.getNameIdPair().getId()); + } + node.getNameIdPair().setId(transientIds.allocateId()); + group.addTransientIdNode(node); + } + } + + /** + * Removes a node with transient id from the group. Remove the node from + * the RepGroupImpl and deallocate the node ID. + * + * @param node the node + */ + public void removeTransientNode(final RepNodeImpl node) { + if (!node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Attempt to call removeTransientNode with a" + + " node without transient ID: " + node); + } + group.removeTransientNode(node); + transientIds.deallocateId(node.getNodeId()); + } + + private class RepElectionsConfig implements ElectionsConfig { + private final RepNode repNode; + RepElectionsConfig(RepNode repNode) { + this.repNode = repNode; + } + + @Override + public String getGroupName() { + return repNode.getRepImpl().getConfigManager().get(GROUP_NAME); + } + + @Override + public NameIdPair getNameIdPair() { + return repNode.getNameIdPair(); + } + + @Override + public ServiceDispatcher getServiceDispatcher() { + return repNode.getServiceDispatcher(); + } + + @Override + public int getElectionPriority() { + return repNode.getElectionPriority(); + } + + @Override + public int getLogVersion() { + return repNode.getLogVersion(); + } + + @Override + public RepImpl getRepImpl() { + return repNode.getRepImpl(); + } + + @Override + public RepNode getRepNode() { + return repNode; + } + } + + /** + * Track node IDs for node with transient IDs. IDs are allocated from the + * specified number of values at the high end of the range of integers. + */ + static class TransientIds { + private final int size; + private final BitSet bits; + + /** Creates an instance that allocates the specified number of IDs. */ + TransientIds(final int size) { + this.size = size; + assert size > 0; + bits = new BitSet(size); + } + + /** + * Allocates a free ID, throwing IllegalStateException if none are + * available. + */ + synchronized int allocateId() { + + /* + * Note that scanning for the next clear bit is somewhat + * inefficient, but this inefficiency shouldn't matter given the + * small number of secondary nodes expected. If needed, the next + * improvement would probably be to remember the last allocated ID, + * to avoid repeated scans of an initial range of already allocated + * bits. + */ + final int pos = bits.nextClearBit(0); + if (pos >= size) { + throw new IllegalStateException("No more secondary node IDs"); + } + bits.set(pos); + return Integer.MAX_VALUE - pos; + } + + /** + * Deallocates a previously allocated ID, throwing + * IllegalArgumentException if the argument was not allocated by + * allocateId or if the ID is not currently allocated. + */ + synchronized void deallocateId(final int id) { + if (id < Integer.MAX_VALUE - size) { + throw new IllegalArgumentException( + "Illegal secondary node ID: " + id); + } + final int pos = Integer.MAX_VALUE - id; + if (!bits.get(pos)) { + throw new IllegalArgumentException( + "Secondary node ID is not currently allocated: " + id); + } + bits.clear(pos); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/Replay.java b/src/com/sleepycat/je/rep/impl/node/Replay.java new file mode 100644 index 0000000..48b1abf --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/Replay.java @@ -0,0 +1,1495 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.log.LogEntryType.LOG_NAMELN_TRANSACTIONAL; +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_ABORT; +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_COMMIT; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.LATEST_COMMIT_LAG_MS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.MAX_COMMIT_PROCESSING_NANOS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.MIN_COMMIT_PROCESSING_NANOS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_ABORTS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMITS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_ACKS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_NO_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_COMMIT_WRITE_NO_SYNCS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_ELAPSED_TXN_TIME; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMITS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_MAX_EXCEEDED; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_TIMEOUTS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_GROUP_COMMIT_TXNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_LNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_MESSAGE_QUEUE_OVERFLOWS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.N_NAME_LNS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.TOTAL_COMMIT_LAG_MS; +import static com.sleepycat.je.rep.impl.node.ReplayStatDefinition.TOTAL_COMMIT_PROCESSING_NANOS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree.TruncateDbResult; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.PutMode; +import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.log.DbOpReplicationContext; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.DbOperationType; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.recovery.RollbackTracker; +import com.sleepycat.je.rep.LogFileRewriteListener; +import com.sleepycat.je.rep.SyncupProgress; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.stream.InputWireRecord; +import com.sleepycat.je.rep.stream.MasterStatus.MasterSyncException; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.txn.ReplayTxn; +import com.sleepycat.je.rep.utilint.LongMinZeroStat; +import com.sleepycat.je.rep.utilint.SimpleTxnMap; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.txn.TxnEnd; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongMaxStat; +import com.sleepycat.je.utilint.LongMaxZeroStat; +import com.sleepycat.je.utilint.LongMinStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.NanoTimeUtil; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.utilint.StringUtils; + +/** + * Replays log records from the replication stream, and manages the + * transactions for those records. + * + * The Replay module has a lifetime equivalent to the environment owned by + * this replicator. Its lifetime is longer than the feeder/replica stream. + * For example, suppose this is nodeX: + * + * t1 - Node X is a replica, node A is master. Replay X is alive + * t2 - Node X is a replica, node B takes over as master. X's Replay module + * is still alive and has the same set of active txns. It doesn't matter + * to X that the master has changed. + * t3 - Node X becomes the master. Now its Replay unit is cleared, because + * anything managed by the Replay is defunct. + */ +public class Replay { + + /* These are strings for the rollback logging. */ + private static final String RBSTATUS_START = + "Started Rollback"; + private static final String RBSTATUS_NO_ACTIVE = + "No active txns, nothing to rollback"; + private static final String RBSTATUS_RANGE_EQUALS = + "End of range equals matchpoint, nothing to rollback"; + private static final String RBSTATUS_LOG_RBSTART = + "Logged RollbackStart entry"; + private static final String RBSTATUS_MEM_ROLLBACK = + "Finished in-memory rollback"; + private static final String RBSTATUS_INVISIBLE = + "Finished invisible setting"; + private static final String RBSTATUS_FINISH = + "Finished rollback"; + + /* + * DatabaseEntry objects reused during replay, to minimize allocation in + * high performance replay path. + */ + final DatabaseEntry replayKeyEntry = new DatabaseEntry(); + final DatabaseEntry replayDataEntry = new DatabaseEntry(); + final DatabaseEntry delDataEntry = new DatabaseEntry(); + + private final RepImpl repImpl; + + /** + * If a commit replay operation takes more than this threshold, it's + * logged. This information helps determine whether ack timeouts on the + * master are due to a slow replica, or the network. + */ + private final long ackTimeoutLogThresholdNs; + + /** + * ActiveTxns is a collection of txn objects used for applying replicated + * transactions. This collection should be empty if the node is a master. + * + * Note that there is an interesting relationship between ActiveTxns and + * the txn collection managed by the environment TxnManager. ActiveTxns is + * effectively a subset of the set of txns held by the + * TxnManager. ReplayTxns must be sure to register and unregister + * themselves from ActiveTxns, just as all Txns must register and + * unregister with the TxnManager's set. One implementation alternative to + * having an ActiveTxns map here is to search the TxnManager set (which is + * a set, not a map) for a given ReplayTxn. Another is to subclass + * TxnManager so that replicated nodes have their own replayTxn map, just + * as XATxns have a XID->Txn map. + * + * Both alternatives seemed too costly in terms of performance or elaborate + * in terms of code to do for the current function. It seems clearer to + * make the ActiveTxns a map in the one place that it is currently + * used. This choice may change over time, and should be reevaluated if the + * implementation changes. + * + * The ActiveTxns key is the transaction id. These transactions are closed + * when: + * - the replay unit executes a commit received over the replication stream + * - the replay unit executes an abort received over the replication stream + * - the replication node realizes that it has just become the new master, + * and was not previously the master. + * + * Note that the Replay class has a lifetime that is longer than that of a + * RepNode. This means in particular, that transactions may be left open, + * and will be resumed when a replica switches from one master to another, + * creating a new RepNode in the process. Because of that, part of the + * transaction may be implemented by the rep stream from one master and + * another part by another. + * + * The map is synchronized, so simple get/put operations do not require + * additional synchronization. However, iteration requires synchronization + * and copyActiveTxns can be used in most cases. + */ + private final SimpleTxnMap activeTxns; + + /* + * The entry representing the last replayed txn commit. Supports the + * replica's notion of consistency. + */ + private volatile TxnInfo lastReplayedTxn = null; + + /* + * The last replayed entry of any kind. Supports PointConsistencyPolicy. + */ + private volatile VLSN lastReplayedVLSN = null; + + /* + * The last replayed DTVLSN in the stream. It's used to ensure that the + * DTVLSNs in the stream are correctly sequenced. + */ + private long lastReplayedDTVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /* + * The sync policy to be used in the absence of an ACK request. The replica + * in this case has some latitude about how it syncs the commit. + */ + private final SyncPolicy noAckSyncPolicy = SyncPolicy.NO_SYNC; + + /** + * The RepParams.REPLAY_LOGGING_THRESHOLD configured logging threshold. + */ + private final long replayLoggingThresholdNs; + + /** + * State that is reinitialized by the reinit() method each time a replay + * loop is started with a new feeder. + */ + + /** + * All writes (predominantly acks) are queued here, so they do not block + * the replay thread. + */ + private final BlockingQueue outputQueue; + + /** + * Holds the state associated with group commits. + */ + private final GroupCommit groupCommit; + + /* Maintains the statistics associated with stream replay. */ + private final StatGroup statistics; + private final LongStat nCommits; + private final LongStat nCommitAcks; + private final LongStat nCommitSyncs; + private final LongStat nCommitNoSyncs; + private final LongStat nCommitWriteNoSyncs; + private final LongStat nAborts; + private final LongStat nNameLNs; + private final LongStat nLNs; + private final LongStat nElapsedTxnTime; + private final LongStat nMessageQueueOverflows; + private final LongMinStat minCommitProcessingNanos; + private final LongMaxStat maxCommitProcessingNanos; + private final LongStat totalCommitProcessingNanos; + private final LongStat totalCommitLagMs; + private final LongStat latestCommitLagMs; + + private final Logger logger; + public Replay(RepImpl repImpl, + @SuppressWarnings("unused") NameIdPair nameIdPair) { + + /* + * This should have already been caught in + * ReplicatedEnvironment.setupRepConfig, but it is checked here anyway + * as an added sanity check. [#17643] + */ + if (repImpl.isReadOnly()) { + throw EnvironmentFailureException.unexpectedState + ("Replay created with readonly ReplicatedEnvironment"); + } + + this.repImpl = repImpl; + final DbConfigManager configManager = repImpl.getConfigManager(); + + ackTimeoutLogThresholdNs = MILLISECONDS.toNanos(configManager. + getDuration(RepParams.REPLICA_ACK_TIMEOUT)); + + /** + * The factor of 2 below is somewhat arbitrary. It should be > 1 X so + * that the ReplicaOutputThread can completely process the buffered + * messages in the face of a network drop and 2X to allow for + * additional headroom and minimize the chances that the replay might + * be blocked due to the limited queue length. + */ + final int outputQueueSize = 2 * + configManager.getInt(RepParams.REPLICA_MESSAGE_QUEUE_SIZE); + outputQueue = new ArrayBlockingQueue(outputQueueSize); + + /* + * The Replay module manages all write transactions and mimics a + * writing application thread. When the node comes up, it populates + * the activeTxn collection with ReplayTxns that were resurrected + * at recovery time. + */ + activeTxns = new SimpleTxnMap<>(1024); + + /* + * Configure the data entry used for deletion to avoid fetching the + * old data during deletion replay. + */ + delDataEntry.setPartial(0, 0, true); + + logger = LoggerUtils.getLogger(getClass()); + statistics = new StatGroup(ReplayStatDefinition.GROUP_NAME, + ReplayStatDefinition.GROUP_DESC); + + groupCommit = new GroupCommit(configManager); + + nCommits = new LongStat(statistics, N_COMMITS); + nCommitAcks = new LongStat(statistics, N_COMMIT_ACKS); + nCommitSyncs = new LongStat(statistics, N_COMMIT_SYNCS); + nCommitNoSyncs = new LongStat(statistics, N_COMMIT_NO_SYNCS); + nCommitWriteNoSyncs = + new LongStat(statistics, N_COMMIT_WRITE_NO_SYNCS); + nAborts = new LongStat(statistics, N_ABORTS); + nNameLNs = new LongStat(statistics, N_NAME_LNS); + nLNs = new LongStat(statistics, N_LNS); + nElapsedTxnTime = new LongStat(statistics, N_ELAPSED_TXN_TIME); + nMessageQueueOverflows = + new LongStat(statistics, N_MESSAGE_QUEUE_OVERFLOWS); + minCommitProcessingNanos = + new LongMinZeroStat(statistics, MIN_COMMIT_PROCESSING_NANOS); + maxCommitProcessingNanos = + new LongMaxZeroStat(statistics, MAX_COMMIT_PROCESSING_NANOS); + totalCommitProcessingNanos = + new LongStat(statistics, TOTAL_COMMIT_PROCESSING_NANOS); + totalCommitLagMs = new LongStat(statistics, TOTAL_COMMIT_LAG_MS); + latestCommitLagMs = new LongStat(statistics, LATEST_COMMIT_LAG_MS); + + replayLoggingThresholdNs = MILLISECONDS.toNanos(configManager. + getDuration(RepParams.REPLAY_LOGGING_THRESHOLD)); + } + + public BlockingQueue getOutputQueue() { + return outputQueue; + } + + /** + * Reinitialize for replay from a new feeder + */ + public void reset() { + outputQueue.clear(); + } + + LongStat getMessageQueueOverflows() { + return nMessageQueueOverflows; + } + + /** + * Actions that must be taken before the recovery checkpoint, whether + * the environment is read/write or read/only. + */ + public void preRecoveryCheckpointInit(RecoveryInfo recoveryInfo) { + for (Txn txn : recoveryInfo.replayTxns.values()) { + + /* + * ReplayTxns need to know about their owning activeTxn map, + * so they can remove themselves at close. We are casting upwards, + * because the non-HA code is prohibited from referencing + * Replication classes, and the RecoveryInfo.replayTxns collection + * doesn't know that it's got ReplayTxns. + */ + ((ReplayTxn) txn).registerWithActiveTxns(activeTxns); + } + lastReplayedVLSN = repImpl.getVLSNIndex().getRange().getLast(); + } + + public TxnInfo getLastReplayedTxn() { + return lastReplayedTxn; + } + + public VLSN getLastReplayedVLSN() { + return lastReplayedVLSN; + } + + /** + * When mastership changes, all inflight replay transactions are aborted. + * Replay transactions need only be aborted by the node that has become + * the new master (who was previously a Replica). The replay transactions + * on the other replicas who have not changed roles are + * resolved by the abort record issued by said new master. + */ + public void abortOldTxns() + throws DatabaseException { + + final int masterNodeId = repImpl.getNodeId(); + for (ReplayTxn replayTxn : copyActiveTxns().values()) { + /* + * Use NULL for the DTVLSN since it's being written as the MASTER + * despite being a ReplayTxn; it will be corrected when it's + * written to the log. + */ + replayTxn.abort(ReplicationContext.MASTER, masterNodeId, + VLSN.NULL_VLSN_SEQUENCE); + } + assert activeTxns.isEmpty() : "Unexpected txns in activeTxns = " + + activeTxns; + } + + private void updateCommitStats(final boolean needsAck, + final SyncPolicy syncPolicy, + final long startTimeNanos, + final long masterCommitTimeMs, + final long replicaCommitTimeMs) { + + final long now = System.nanoTime(); + final long commitNanos = now - startTimeNanos; + + if (commitNanos > ackTimeoutLogThresholdNs && + logger.isLoggable(Level.INFO)) { + LoggerUtils.info + (logger, repImpl, + "Replay commit time: " + (commitNanos / 1000000) + + " ms exceeded log threshold: " + + (ackTimeoutLogThresholdNs / 1000000)); + } + + nCommits.increment(); + + if (needsAck) { + nCommitAcks.increment(); + } + + if (syncPolicy == SyncPolicy.SYNC) { + nCommitSyncs.increment(); + } else if (syncPolicy == SyncPolicy.NO_SYNC) { + nCommitNoSyncs.increment(); + } else if (syncPolicy == SyncPolicy.WRITE_NO_SYNC) { + nCommitWriteNoSyncs.increment(); + } else { + throw EnvironmentFailureException.unexpectedState + ("Unknown sync policy: " + syncPolicy); + } + + totalCommitProcessingNanos.add(commitNanos); + minCommitProcessingNanos.setMin(commitNanos); + maxCommitProcessingNanos.setMax(commitNanos); + + /* + * Tally the lag between master and replica commits, even if clock skew + * makes the lag appear negative. The documentation already warns that + * the value will be affected by clock skew, so users can adjust for + * that, but only if we don't throw the information way. + */ + final long replicaLagMs = replicaCommitTimeMs - masterCommitTimeMs; + totalCommitLagMs.add(replicaLagMs); + latestCommitLagMs.set(replicaLagMs); + } + + /** + * Apply the operation represented by this log entry on this replica node. + */ + public void replayEntry(long startNs, + Protocol.Entry entry) + throws DatabaseException, + IOException, + InterruptedException, + MasterSyncException { + + final InputWireRecord wireRecord = entry.getWireRecord(); + final LogEntry logEntry = wireRecord.getLogEntry(); + + /* + * Sanity check that the replication stream is in sequence. We want to + * forestall any possible corruption from replaying invalid entries. + */ + if (!wireRecord.getVLSN().follows(lastReplayedVLSN)) { + throw EnvironmentFailureException.unexpectedState + (repImpl, + "Rep stream not sequential. Current VLSN: " + + lastReplayedVLSN + + " next log entry VLSN: " + wireRecord.getVLSN()); + } + + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, repImpl, "Replaying " + wireRecord); + } + + final ReplayTxn repTxn = getReplayTxn(logEntry.getTransactionId(), true); + updateReplicaSequences(logEntry); + final byte entryType = wireRecord.getEntryType(); + + lastReplayedVLSN = wireRecord.getVLSN(); + + try { + final long txnId = repTxn.getId(); + + if (LOG_TXN_COMMIT.equalsType(entryType)) { + Protocol.Commit commitEntry = (Protocol.Commit) entry; + + final boolean needsAck = commitEntry.getNeedsAck(); + final SyncPolicy txnSyncPolicy = + commitEntry.getReplicaSyncPolicy(); + final SyncPolicy implSyncPolicy = + needsAck ? + groupCommit.getImplSyncPolicy(txnSyncPolicy) : + noAckSyncPolicy; + + logReplay(repTxn, needsAck, implSyncPolicy); + + final TxnCommit commit = (TxnCommit) logEntry.getMainItem(); + final long dtvlsn = updateDTVLSN(commit); + + if (needsAck) { + + /* + * Only wait if the replica is not lagging and the + * durability requires it. + */ + repImpl.getRepNode().getVLSNFreezeLatch().awaitThaw(); + repImpl.getRepNode().getMasterStatus().assertSync(); + } + + repTxn.commit(implSyncPolicy, + new ReplicationContext(lastReplayedVLSN), + commit.getMasterNodeId(), + dtvlsn); + + final long masterCommitTimeMs = commit.getTime().getTime(); + lastReplayedTxn = new TxnInfo(lastReplayedVLSN, + masterCommitTimeMs); + + updateCommitStats(needsAck, implSyncPolicy, startNs, + masterCommitTimeMs, repTxn.getEndTime()); + + /* Respond to the feeder. */ + if (needsAck) { + /* + * Need an ack, either buffer it, for sync group commit, or + * queue it. + */ + if (!groupCommit.bufferAck(startNs, repTxn, + txnSyncPolicy)) { + queueAck(txnId); + } + } + + /* + * The group refresh and recalculation can be expensive, since + * it may require a database read. Do it after the ack. + */ + if (repTxn.getRepGroupDbChange() && canRefreshGroup(repTxn)) { + repImpl.getRepNode().refreshCachedGroup(); + repImpl.getRepNode().recalculateGlobalCBVLSN(); + } + + nElapsedTxnTime.add(repTxn.elapsedTime()); + + } else if (LOG_TXN_ABORT.equalsType(entryType)) { + + nAborts.increment(); + final TxnAbort abort = (TxnAbort) logEntry.getMainItem(); + final ReplicationContext abortContext = + new ReplicationContext(wireRecord.getVLSN()); + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, repImpl, + "abort called for " + txnId + + " masterId=" + + abort.getMasterNodeId() + + " repContext=" + abortContext); + } + + long dtvlsn = updateDTVLSN(abort); + repTxn.abort(abortContext, abort.getMasterNodeId(), dtvlsn); + lastReplayedTxn = new TxnInfo(lastReplayedVLSN, + abort.getTime().getTime()); + if (repTxn.getRepGroupDbChange() && canRefreshGroup(repTxn)) { + + /* + * Refresh is the safe thing to do on an abort, since a + * refresh may have been held back from an earlier commit + * due to this active transaction. + */ + repImpl.getRepNode().refreshCachedGroup(); + } + nElapsedTxnTime.add(repTxn.elapsedTime()); + + } else if (LOG_NAMELN_TRANSACTIONAL.equalsType(entryType)) { + + repImpl.getRepNode().getReplica().clearDbTreeCache(); + nNameLNs.increment(); + applyNameLN(repTxn, wireRecord); + + } else { + nLNs.increment(); + /* A data operation. */ + assert wireRecord.getLogEntry() instanceof LNLogEntry; + applyLN(repTxn, wireRecord); + } + + /* Remember the last VLSN applied by this txn. */ + repTxn.setLastAppliedVLSN(lastReplayedVLSN); + + } catch (DatabaseException e) { + e.addErrorMessage("Problem seen replaying entry " + wireRecord); + throw e; + } finally { + final long elapsedNs = System.nanoTime() - startNs; + if (elapsedNs > replayLoggingThresholdNs) { + LoggerUtils.info(logger, repImpl, + "Replay time for entry type:" + + LogEntryType.findType(entryType) + " " + + NANOSECONDS.toMillis(elapsedNs) + "ms " + + "exceeded threshold:" + + NANOSECONDS. + toMillis(replayLoggingThresholdNs) + + "ms"); + } + } + } + + /** + * Update the replica's in-memory DTVLSN using the value in the + * commit/abort entry. + * + * In the normal course of events, DTVLSNs should not decrease. However, + * there is just one exception: if the rep stream transitions from a post + * to a pre-dtvlsn stream, it will transition from a positive to the + * UNINITIALIZED_VLSN_SEQUENCE. + * + * A transition from a pre to a post-dtvlsn transition (from zero to some + * positive value), observes the "DTVLSNs should not decrease" rule + * automatically. + * + * @return the DTVLSN entry in the log txnEnd record so it can be used in + * the commit/abort operation for replay + */ + private long updateDTVLSN(final TxnEnd txnEnd) { + final long txnDTVLSN = txnEnd.getDTVLSN(); + + if (txnDTVLSN == VLSN.UNINITIALIZED_VLSN_SEQUENCE) { + /* + * A pre DTVLSN format entry, simply set it as the in-memory DTVLSN + */ + final long prevDTVLSN = repImpl.getRepNode().setDTVLSN(txnDTVLSN); + if (prevDTVLSN != VLSN.UNINITIALIZED_VLSN_SEQUENCE) { + LoggerUtils.info(logger, repImpl, + "Transitioned to pre DTVLSN stream." + + " DTVLSN:" + prevDTVLSN + + " at VLSN:" + lastReplayedVLSN); + + } + lastReplayedDTVLSN = txnDTVLSN; + return txnDTVLSN; + } + + /* Sanity check. */ + if (txnDTVLSN < lastReplayedDTVLSN) { + String msg = "DTVLSNs must be in ascending order in the stream. " + + " prev DTVLSN:" + lastReplayedDTVLSN + + " next DTVLSN:" + txnDTVLSN + " at VLSN: " + + lastReplayedVLSN.getSequence(); + throw EnvironmentFailureException.unexpectedState(repImpl, msg); + } + + if ((lastReplayedDTVLSN == VLSN.UNINITIALIZED_VLSN_SEQUENCE) && + (txnDTVLSN > 0)) { + LoggerUtils.info(logger, repImpl, + "Transitioned to post DTVLSN stream." + + " DTVLSN:" + txnDTVLSN + + " at VLSN:" + lastReplayedVLSN); + } + + lastReplayedDTVLSN = txnDTVLSN; + repImpl.getRepNode().setDTVLSN(txnDTVLSN); + + return txnDTVLSN; + } + + /** + * Queue the request ack for an async ack write to the network. + */ + void queueAck(final long txnId) throws IOException { + try { + outputQueue.put(txnId); + } catch (InterruptedException ie) { + /* + * Have the higher levels treat it like an IOE and + * exit the thread. + */ + throw new IOException("Ack I/O interrupted", ie); + } + } + + /** + * Logs information associated with the replay of the txn commit + */ + private void logReplay(ReplayTxn repTxn, + boolean needsAck, + SyncPolicy syncPolicy) { + + if (!logger.isLoggable(Level.FINE)) { + return; + } + + if (needsAck) { + LoggerUtils.fine(logger, repImpl, + "Replay: got commit for txn=" + repTxn.getId() + + ", ack needed, replica sync policy=" + + syncPolicy + + " vlsn=" + lastReplayedVLSN); + } else { + LoggerUtils.fine(logger, repImpl, + "Replay: got commit for txn=" + repTxn.getId() + + " ack not needed" + + " vlsn=" + lastReplayedVLSN); + } + } + + /** + * Returns true if there are no other activeTxns that have also modified + * the membership database and are still open, since they could potentially + * hold write locks that would block the read locks acquired during the + * refresh operation. + * + * @param txn the current txn being committed or aborted + * + * @return true if there are no open transactions that hold locks on the + * membership database. + */ + private boolean canRefreshGroup(ReplayTxn txn) { + + /* + * Use synchronized rather than copyActiveTxns, since this is called + * during replay and there is no nested locking to worry about. + */ + synchronized (activeTxns) { + // TODO: very inefficient + for (ReplayTxn atxn : activeTxns.getMap().values()) { + if (atxn == txn) { + continue; + } + if (atxn.getRepGroupDbChange()) { + return false; + } + } + } + return true; + } + + /** + * Update this replica's node, txn and database sequences with any ids in + * this log entry. We can call update, even if the replay id doesn't + * represent a new lowest-id point, or if the apply is not successful, + * because the apply checks that the replay id is < the sequence on the + * replica. We just want to ensure that if this node becomes the master, + * its sequences are in sync with what came before in the replication + * stream, and ids are not incorrectly reused. + */ + private void updateReplicaSequences(LogEntry logEntry) { + + /* For now, we assume all replay entries have a txn id. */ + repImpl.getTxnManager().updateFromReplay(logEntry.getTransactionId()); + + /* If it's a database operation, update the database id. */ + if (logEntry instanceof NameLNLogEntry) { + NameLNLogEntry nameLogEntry = (NameLNLogEntry) logEntry; + nameLogEntry.postFetchInit(false /*isDupDb*/); + NameLN nameLN = (NameLN) nameLogEntry.getLN(); + repImpl.getDbTree().updateFromReplay(nameLN.getId()); + } + } + + /** + * Obtain a ReplayTxn to represent the incoming operation. + */ + public ReplayTxn getReplayTxn(long txnId, boolean registerTxnImmediately) + throws DatabaseException { + + ReplayTxn useTxn = null; + synchronized (activeTxns) { + useTxn = activeTxns.get(txnId); + if (useTxn == null) { + + /* + * Durability will be explicitly specified when + * ReplayTxn.commit is called, so TransactionConfig.DEFAULT is + * fine. + */ + if (registerTxnImmediately) { + useTxn = new ReplayTxn(repImpl, TransactionConfig.DEFAULT, + txnId, activeTxns, logger); + } else { + useTxn = new ReplayTxn(repImpl, TransactionConfig.DEFAULT, + txnId, activeTxns, logger) { + @Override + protected + boolean registerImmediately() { + return false; + } + }; + } + } + } + return useTxn; + } + + /** + * Replays the NameLN. + * + * Note that the operations: remove, rename and truncate need to establish + * write locks on the database. Any open handles are closed by this + * operation by virtue of the ReplayTxn's importunate property. The + * application will receive a LockPreemptedException if it subsequently + * accesses the database handle. + */ + private void applyNameLN(ReplayTxn repTxn, + InputWireRecord wireRecord) + throws DatabaseException { + + NameLNLogEntry nameLNEntry = (NameLNLogEntry) wireRecord.getLogEntry(); + final NameLN nameLN = (NameLN) nameLNEntry.getLN(); + + String databaseName = StringUtils.fromUTF8(nameLNEntry.getKey()); + + final DbOpReplicationContext repContext = + new DbOpReplicationContext(wireRecord.getVLSN(), nameLNEntry); + + DbOperationType opType = repContext.getDbOperationType(); + DatabaseImpl dbImpl = null; + try { + switch (opType) { + case CREATE: + { + DatabaseConfig dbConfig = + repContext.getCreateConfig().getReplicaConfig(repImpl); + + dbImpl = repImpl.getDbTree().createReplicaDb + (repTxn, databaseName, dbConfig, nameLN, repContext); + + /* + * We rely on the RepGroupDB.DB_ID value, so make sure + * it's what we expect for this internal replicated + * database. + */ + if ((dbImpl.getId().getId() == RepGroupDB.DB_ID) && + !DbType.REP_GROUP.getInternalName().equals + (databaseName)) { + throw EnvironmentFailureException.unexpectedState + ("Database: " + + DbType.REP_GROUP.getInternalName() + + " is associated with id: " + + dbImpl.getId().getId() + + " and not the reserved database id: " + + RepGroupDB.DB_ID); + } + + TriggerManager.runOpenTriggers(repTxn, dbImpl, true); + break; + } + + case REMOVE: { + dbImpl = repImpl.getDbTree().getDb(nameLN.getId()); + try { + repImpl.getDbTree().removeReplicaDb + (repTxn, databaseName, nameLN.getId(), repContext); + TriggerManager.runRemoveTriggers(repTxn, dbImpl); + } catch (DatabaseNotFoundException e) { + throw EnvironmentFailureException.unexpectedState + ("Database: " + dbImpl.getName() + + " Id: " + nameLN.getId() + + " not found on the Replica."); + } + break; + } + + case TRUNCATE: { + dbImpl = repImpl.getDbTree().getDb + (repContext.getTruncateOldDbId()); + try { + TruncateDbResult result = + repImpl.getDbTree().truncateReplicaDb + (repTxn, databaseName, false, nameLN, repContext); + TriggerManager.runTruncateTriggers(repTxn, result.newDb); + } catch (DatabaseNotFoundException e) { + throw EnvironmentFailureException.unexpectedState + ("Database: " + dbImpl.getName() + + " Id: " + nameLN.getId() + + " not found on the Replica."); + } + + break; + } + + case RENAME: { + dbImpl = repImpl.getDbTree().getDb(nameLN.getId()); + try { + dbImpl = + repImpl.getDbTree().renameReplicaDb + (repTxn, dbImpl.getName(), databaseName, nameLN, + repContext); + TriggerManager.runRenameTriggers(repTxn, dbImpl, + databaseName); + } catch (DatabaseNotFoundException e) { + throw EnvironmentFailureException.unexpectedState + ("Database rename from: " + dbImpl.getName() + + " to " + databaseName + + " failed, name not found on the Replica."); + } + break; + } + + case UPDATE_CONFIG: { + /* Get the replicated database configurations. */ + DatabaseConfig dbConfig = + repContext.getCreateConfig().getReplicaConfig(repImpl); + + /* Update the NameLN and write it to the log. */ + dbImpl = repImpl.getDbTree().getDb(nameLN.getId()); + final String dbName = dbImpl.getName(); + repImpl.getDbTree().updateNameLN + (repTxn, dbName, repContext); + + /* Set the new configurations to DatabaseImpl. */ + dbImpl.setConfigProperties + (repTxn, dbName, dbConfig, repImpl); + + repImpl.getDbTree().modifyDbRoot(dbImpl); + + break; + } + + default: + throw EnvironmentFailureException.unexpectedState + ("Illegal database op type of " + opType.toString() + + " from " + wireRecord + " database=" + databaseName); + } + } finally { + if (dbImpl != null) { + repImpl.getDbTree().releaseDb(dbImpl); + } + } + } + + private void applyLN( + final ReplayTxn repTxn, + final InputWireRecord wireRecord) + throws DatabaseException { + + final LNLogEntry lnEntry = (LNLogEntry) wireRecord.getLogEntry(); + final DatabaseId dbId = lnEntry.getDbId(); + + /* + * If this is a change to the rep group db, remember at commit time, + * and refresh this node's group metadata. + */ + if (dbId.getId() == RepGroupDB.DB_ID) { + repTxn.noteRepGroupDbChange(); + } + + /* + * Note that we don't have to worry about serializable isolation when + * applying a replicated txn; serializable isolation in only an issue + * for txns that take read locks, and a replicated txn consists only of + * write operations. + */ + final DatabaseImpl dbImpl = + repImpl.getRepNode().getReplica().getDbCache().get(dbId, repTxn); + + lnEntry.postFetchInit(dbImpl); + + final ReplicationContext repContext = + new ReplicationContext(wireRecord.getVLSN()); + + try (final Cursor cursor = DbInternal.makeCursor( + dbImpl, repTxn, null /*cursorConfig*/)) { + + OperationResult result; + final LN ln = lnEntry.getLN(); + + /* In a dup DB, do not expect embedded LNs or non-empty data. */ + if (dbImpl.getSortedDuplicates() && + (lnEntry.isEmbeddedLN() || + (ln.getData() != null && ln.getData().length > 0))) { + + throw EnvironmentFailureException.unexpectedState( + dbImpl.getEnv(), + "[#25288] emb=" + lnEntry.isEmbeddedLN() + + " key=" + Key.getNoFormatString(lnEntry.getKey()) + + " data=" + Key.getNoFormatString(ln.getData()) + + " vlsn=" + ln.getVLSNSequence()); + } + + if (ln.isDeleted()) { + + /* + * Perform an exact search by key. Use a partial data entry + * (delDataEntry) to avoid reading old data. + */ + replayKeyEntry.setData(lnEntry.getKey()); + + result = DbInternal.searchForReplay( + cursor, replayKeyEntry, delDataEntry, + LockMode.RMW, SearchMode.SET); + + if (result != null) { + result = DbInternal.deleteForReplay(cursor, repContext); + } + } else { + replayKeyEntry.setData(lnEntry.getKey()); + replayDataEntry.setData(ln.getData()); + + result = DbInternal.putForReplay( + cursor, replayKeyEntry, replayDataEntry, ln, + lnEntry.getExpiration(), lnEntry.isExpirationInHours(), + PutMode.OVERWRITE, repContext); + } + + if (result == null) { + throw new EnvironmentFailureException( + repImpl, + EnvironmentFailureReason.LOG_INCOMPLETE, + "Replicated operation could not be applied. " + + wireRecord); + } + } + } + + /** + * Go through all active txns and rollback up to but not including the log + * entry represented by the matchpoint VLSN. + * + * Effectively truncate these rolled back log entries by making them + * invisible. Flush the log first, to make sure these log entries are out + * of the log buffers and are on disk, so we can reliably find them through + * the FileManager. + * + * Rollback steps are described in + * https://sleepycat.oracle.com/trac/wiki/Logging#Recoverysteps. In + * summary, + * + * 1. Log and fsync a new RollbackStart record + * 2. Do the rollback in memory. There is no need to explicitly + * log INs made dirty by the rollback operation. + * 3. Do invisibility masking by overwriting LNs. + * 4. Fsync all overwritten log files at this point. + * 5. Write a RollbackEnd record, for ease of debugging + * + * Note that application read txns can continue to run during syncup. + * Reader txns cannot access records that are being rolled back, because + * they are in txns that are not committed, i.e, they are write locked. + * The rollback interval never includes committed txns, and we do a hard + * recovery if it would include them. + */ + public void rollback(VLSN matchpointVLSN, long matchpointLsn) { + + String rollbackStatus = RBSTATUS_START; + + final Map localActiveTxns = copyActiveTxns(); + try { + if (localActiveTxns.size() == 0) { + /* no live read/write txns, nothing to do. */ + rollbackStatus = RBSTATUS_NO_ACTIVE; + return; + } + + VLSNRange range = repImpl.getVLSNIndex().getRange(); + if (range.getLast().equals(matchpointVLSN)) { + /* nothing to roll back. */ + rollbackStatus = RBSTATUS_RANGE_EQUALS; + return; + } + + repImpl.setSyncupProgress(SyncupProgress.DO_ROLLBACK); + + /* + * Stop the log file backup service, since the files will be in an + * inconsistent state while the rollback is in progress. + */ + repImpl.getRepNode().shutdownNetworkBackup(); + + /* + * Set repImpl's isRollingBack to true, and invalidate all the in + * progress DbBackup. + */ + repImpl.setBackupProhibited(true); + repImpl.invalidateBackups(DbLsn.getFileNumber(matchpointLsn)); + + /* + * 1. Log RollbackStart. The fsync guarantees that this marker will + * be present in the log for recovery. It also ensures that all log + * entries will be flushed to disk and the TxnChain will not have + * to worry about entries that are in log buffers when constructing + * the rollback information. + */ + LogManager logManager = repImpl.getLogManager(); + LogEntry rollbackStart = SingleItemEntry.create( + LogEntryType.LOG_ROLLBACK_START, + new RollbackStart( + matchpointVLSN, matchpointLsn, localActiveTxns.keySet())); + long rollbackStartLsn = + logManager.logForceFlush(rollbackStart, + true, // fsyncRequired, + ReplicationContext.NO_REPLICATE); + rollbackStatus = RBSTATUS_LOG_RBSTART; + + /* + * 2. Do rollback in memory. Undo any operations that were logged + * after the matchpointLsn, and save the LSNs for those log + * entries.. There should be something to undo, because we checked + * earlier that there were log entries after the matchpoint. + */ + List rollbackLsns = new ArrayList(); + for (ReplayTxn replayTxn : localActiveTxns.values()) { + Collection txnRollbackLsns = + replayTxn.rollback(matchpointLsn); + + /* + * Txns that were entirely rolled back should have been removed + * from the activeTxns map. + */ + assert checkRemoved(replayTxn) : + "Should have removed " + replayTxn; + + rollbackLsns.addAll(txnRollbackLsns); + } + rollbackStatus = RBSTATUS_MEM_ROLLBACK; + assert rollbackLsns.size() != 0 : dumpActiveTxns(matchpointLsn); + + /* + * 3 & 4 - Mark the rolled back log entries as invisible. But + * before doing so, invoke any registered rewrite listeners, so the + * application knows that existing log files will be modified. + * + * After all are done, fsync the set of files. By waiting, some may + * have made it out on their own. + */ + LogFileRewriteListener listener = repImpl.getLogRewriteListener(); + if (listener != null) { + listener.rewriteLogFiles(getFileNames(rollbackLsns)); + } + RollbackTracker.makeInvisible(repImpl, rollbackLsns); + rollbackStatus = RBSTATUS_INVISIBLE; + + /* + * 5. Log RollbackEnd. Flush it so that we can use it to optimize + * recoveries later on. If the RollbackEnd exists, we can skip the + * step of re-making LNs invisible. + */ + logManager.logForceFlush( + SingleItemEntry.create(LogEntryType.LOG_ROLLBACK_END, + new RollbackEnd(matchpointLsn, + rollbackStartLsn)), + true, // fsyncRequired + ReplicationContext.NO_REPLICATE); + + /* + * Restart the backup service only if all the steps of the + * rollback were successful. + */ + repImpl.getRepNode().restartNetworkBackup(); + repImpl.setBackupProhibited(false); + rollbackStatus = RBSTATUS_FINISH; + } finally { + + /* Reset the lastReplayedVLSN so it's correct when we resume. */ + lastReplayedVLSN = matchpointVLSN; + LoggerUtils.info(logger, repImpl, + "Rollback to matchpoint " + matchpointVLSN + + " at " + DbLsn.getNoFormatString(matchpointLsn) + + " status=" + rollbackStatus); + } + } + + /* For debugging support */ + private String dumpActiveTxns(long matchpointLsn) { + StringBuilder sb = new StringBuilder(); + sb.append("matchpointLsn="); + sb.append(DbLsn.getNoFormatString(matchpointLsn)); + for (ReplayTxn replayTxn : copyActiveTxns().values()) { + sb.append("txn id=").append(replayTxn.getId()); + sb.append(" locks=").append(replayTxn.getWriteLockIds()); + sb.append("lastLogged="); + sb.append(DbLsn.getNoFormatString(replayTxn.getLastLsn())); + sb.append("\n"); + } + + return sb.toString(); + } + + private Set getFileNames(List lsns) { + Set fileNums = new HashSet(); + Set files = new HashSet(); + + for (long lsn : lsns) { + fileNums.add(DbLsn.getFileNumber(lsn)); + } + for (long fileNum : fileNums) { + files.add(new File(FileManager.getFileName(fileNum))); + } + return files; + } + + private boolean checkRemoved(ReplayTxn txn) { + if (txn.isClosed()) { + if (activeTxns.get(txn.getId()) != null) { + return false; + } + } + + return true; + } + + /** + * Make a copy of activeTxns to avoid holding its mutex while iterating. + * Can be used whenever the cost of the HashMap copy is not significant. + */ + private Map copyActiveTxns() { + return activeTxns.getMap(); + } + + /** + * Release all transactions, database handles, etc held by the replay + * unit. The Replicator is closing down and Replay will not be invoked + * again. + */ + public void close() { + + for (ReplayTxn replayTxn : copyActiveTxns().values()) { + try { + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, repImpl, + "Unregistering open replay txn: " + + replayTxn.getId()); + } + replayTxn.cleanup(); + } catch (DatabaseException e) { + LoggerUtils.fine(logger, repImpl, + "Replay txn: " + replayTxn.getId() + + " unregistration failed: " + e.getMessage()); + } + } + assert activeTxns.isEmpty(); + } + + /** + * Returns a copy of the statistics associated with Replay + */ + public StatGroup getStats(StatsConfig config) { + StatGroup ret = statistics.cloneGroup(config.getClear()); + + return ret; + } + + public void resetStats() { + statistics.clear(); + } + + /* For unit tests */ + public SimpleTxnMap getActiveTxns() { + return activeTxns; + } + + public String dumpState() { + StringBuilder sb = new StringBuilder(); + sb.append("lastReplayedTxn=").append(lastReplayedTxn); + sb.append(" lastReplayedVLSN=").append(lastReplayedVLSN); + sb.append(" numActiveReplayTxns=").append(activeTxns.size()); + sb.append("\n"); + return sb.toString(); + } + + /** + * Write out any pending acknowledgments. See GroupCommit.flushPendingAcks + * for details. This method is invoked after each log entry is read from + * the replication stream. + * + * @param nowNs the time at the reading of the log entry + */ + void flushPendingAcks(long nowNs) + throws IOException { + + groupCommit.flushPendingAcks(nowNs); + } + + /** + * See GroupCommit.getPollIntervalNs(long) + */ + long getPollIntervalNs(long defaultNs) { + return groupCommit.getPollIntervalNs(defaultNs); + } + + /** + * Implements group commit. It's really a substructure of Replay and exists + * mainly for modularity reasons. + *

        + * Since replay is single threaded, the group commit mechanism works + * differently in the replica than in the master. In the replica, SYNC + * transactions are converted into NO_SYNC transactions and executed + * immediately, but their acknowledgments are delayed until after either + * the REPLICA_GROUP_COMMIT_INTERVAL (the max amount the first transaction + * in the group is delayed) has expired, or the size of the group (as + * specified by REPLICA_MAX_GROUP_COMMIT) has been exceeded. + */ + private class GroupCommit { + + /* Size determines max fsync commits that can be grouped. */ + private final long pendingCommitAcks[]; + + /* Number of entries currently in pendingCommitAcks */ + private int nPendingAcks; + + /* + * If this time limit is reached, the group will be forced to commit. + * Invariant: nPendingAcks > 0 ==> limitGroupCommitNs > 0 + */ + private long limitGroupCommitNs = 0; + + /* The time interval that an open group is held back. */ + private final long groupCommitIntervalNs; + + private final LongStat nGroupCommitTimeouts; + private final LongStat nGroupCommitMaxExceeded; + private final LongStat nGroupCommits; + private final LongStat nGroupCommitTxns; + + private GroupCommit(DbConfigManager configManager) { + pendingCommitAcks = new long[configManager. + getInt(RepParams.REPLICA_MAX_GROUP_COMMIT)]; + + nPendingAcks = 0; + + final long groupCommitIntervalMs = configManager. + getDuration(RepParams.REPLICA_GROUP_COMMIT_INTERVAL); + + groupCommitIntervalNs = + NANOSECONDS.convert(groupCommitIntervalMs, MILLISECONDS); + nGroupCommitTimeouts = + new LongStat(statistics, N_GROUP_COMMIT_TIMEOUTS); + + nGroupCommitMaxExceeded = + new LongStat(statistics, N_GROUP_COMMIT_MAX_EXCEEDED); + + nGroupCommitTxns = + new LongStat(statistics, N_GROUP_COMMIT_TXNS); + + nGroupCommits = + new LongStat(statistics, N_GROUP_COMMITS); + } + + /** + * Returns true if group commits are enabled at the replica. + */ + private boolean isEnabled() { + return pendingCommitAcks.length > 0; + } + + /** + * The interval used to poll for incoming log entries. The time is + * lowered from the defaultNs time, if there are pending + * acknowledgments. + * + * @param defaultNs the default poll interval + * + * @return the actual poll interval + */ + private long getPollIntervalNs(long defaultNs) { + if (nPendingAcks == 0) { + return defaultNs; + } + final long now = System.nanoTime(); + + final long interval = limitGroupCommitNs - now; + return Math.min(interval, defaultNs); + } + + /** + * Returns the sync policy to be implemented at the replica. If + * group commit is active, and SYNC is requested it will return + * NO_SYNC instead to delay the fsync. + * + * @param txnSyncPolicy the sync policy as stated in the txn + * + * @return the sync policy to be implemented by the replica + */ + private SyncPolicy getImplSyncPolicy(SyncPolicy txnSyncPolicy) { + return ((txnSyncPolicy == SyncPolicy.SYNC) && isEnabled()) ? + SyncPolicy.NO_SYNC : txnSyncPolicy; + } + + /** + * Buffers the acknowledgment if the commit calls for a sync, or if + * there are pending acknowledgments to ensure that acks are sent + * in order. + * + * @param nowNs the current time + * @param ackTxn the txn associated with the ack + * @param txnSyncPolicy the sync policy as request by the committing + * txn + * + * @return true if the ack has been buffered + */ + private final boolean bufferAck(long nowNs, + ReplayTxn ackTxn, + SyncPolicy txnSyncPolicy) + throws IOException { + + if (!isEnabled() || + !((txnSyncPolicy == SyncPolicy.SYNC) || (nPendingAcks > 0))) { + return false; + } + + pendingCommitAcks[nPendingAcks++] = ackTxn.getId(); + + if (nPendingAcks == 1) { + /* First txn in group, start the clock. */ + limitGroupCommitNs = nowNs + groupCommitIntervalNs; + } else { + flushPendingAcks(nowNs); + } + return true; + } + + /** + * Flush if there are pending acks and either the buffer limit or the + * group interval has been reached. + * + * @param nowNs the current time (passed in to minimize system calls) + */ + private final void flushPendingAcks(long nowNs) + throws IOException { + + if ((nPendingAcks == 0) || + ((nPendingAcks != pendingCommitAcks.length) && + (NanoTimeUtil.compare(nowNs, limitGroupCommitNs) < 0))) { + + return; + } + + /* Update statistics. */ + nGroupCommits.increment(); + nGroupCommitTxns.add(nPendingAcks); + if (NanoTimeUtil.compare(nowNs, limitGroupCommitNs) >= 0) { + nGroupCommitTimeouts.increment(); + } else if (nPendingAcks >= pendingCommitAcks.length) { + nGroupCommitMaxExceeded.increment(); + } + + /* flush log buffer and fsync to disk */ + repImpl.getLogManager().flushSync(); + + /* commits are on disk, send out acknowledgments on the network. */ + for (int i=0; i < nPendingAcks; i++) { + queueAck(pendingCommitAcks[i]); + pendingCommitAcks[i] = 0; + } + + nPendingAcks = 0; + limitGroupCommitNs = 0; + } + } + + /** + * Simple helper class to package a Txn vlsn and its associated commit + * time. + */ + public static class TxnInfo { + final VLSN txnVLSN; + final long masterTxnEndTime; + + private TxnInfo(VLSN txnVLSN, long masterTxnEndTime) { + this.txnVLSN = txnVLSN; + this.masterTxnEndTime = masterTxnEndTime; + } + + public VLSN getTxnVLSN() { + return txnVLSN; + } + + public long getMasterTxnEndTime() { + return masterTxnEndTime; + } + + @Override + public String toString() { + return " VLSN: " + txnVLSN + + " masterTxnEndTime=" + new Date(masterTxnEndTime); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ReplayStatDefinition.java b/src/com/sleepycat/je/rep/impl/node/ReplayStatDefinition.java new file mode 100644 index 0000000..32b259b --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ReplayStatDefinition.java @@ -0,0 +1,211 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for HA Replay statistics. + */ +public class ReplayStatDefinition { + + public static final String GROUP_NAME = "Replay"; + public static final String GROUP_DESC = "The Replay unit applies the " + + "incoming replication stream at a Replica. These stats show the " + + "load the Replica incurs when processing updates."; + + public static final String N_COMMITS_NAME = + "nCommits"; + public static final String N_COMMITS_DESC = + "Number of Commits replayed by the Replica."; + public static final StatDefinition N_COMMITS = + new StatDefinition( + N_COMMITS_NAME, + N_COMMITS_DESC); + + public static final String N_GROUP_COMMIT_TIMEOUTS_NAME = + "nGroupCommitTimeouts"; + public static final String N_GROUP_COMMIT_TIMEOUTS_DESC = + "Number of group commits that were initiated due to the group timeout" + + " interval(ReplicationConfig.REPLICA_GROUP_COMMIT_INTERVAL) being" + + " exceeded."; + public static final StatDefinition N_GROUP_COMMIT_TIMEOUTS = + new StatDefinition( + N_GROUP_COMMIT_TIMEOUTS_NAME, + N_GROUP_COMMIT_TIMEOUTS_DESC); + + public static final String N_GROUP_COMMIT_MAX_EXCEEDED_NAME = + "nGroupCommitMaxExceeded"; + public static final String N_GROUP_COMMIT_MAX_EXCEEDED_DESC = + "Number of group commits that were initiated due to the max group " + + "size(ReplicationConfig.REPLICA_MAX_GROUP_COMMIT) being exceeded."; + public static final StatDefinition N_GROUP_COMMIT_MAX_EXCEEDED = + new StatDefinition( + N_GROUP_COMMIT_MAX_EXCEEDED_NAME, + N_GROUP_COMMIT_MAX_EXCEEDED_DESC); + + public static final String N_GROUP_COMMIT_TXNS_NAME = + "nGroupCommitTxns"; + public static final String N_GROUP_COMMIT_TXNS_DESC = + "Number of replay transaction commits that were part of a group " + + "commit operation."; + public static final StatDefinition N_GROUP_COMMIT_TXNS = + new StatDefinition( + N_GROUP_COMMIT_TXNS_NAME, + N_GROUP_COMMIT_TXNS_DESC); + + public static final String N_GROUP_COMMITS_NAME = + "nGroupCommits"; + public static final String N_GROUP_COMMITS_DESC = + "Number of group commit operations."; + public static final StatDefinition N_GROUP_COMMITS = + new StatDefinition( + N_GROUP_COMMITS_NAME, + N_GROUP_COMMITS_DESC); + + public static final String N_COMMIT_ACKS_NAME = + "nCommitAcks"; + public static final String N_COMMIT_ACKS_DESC = + "Number of commits for which the Master requested an ack."; + public static final StatDefinition N_COMMIT_ACKS = + new StatDefinition( + N_COMMIT_ACKS_NAME, + N_COMMIT_ACKS_DESC); + + public static final String N_COMMIT_SYNCS_NAME = + "nCommitSyncs"; + public static final String N_COMMIT_SYNCS_DESC = + "Number of CommitSyncs used to satisfy ack requests. Note that user " + + "level commit sync requests may be optimized into CommitNoSync " + + "requests as part of a group commit."; + public static final StatDefinition N_COMMIT_SYNCS = + new StatDefinition( + N_COMMIT_SYNCS_NAME, + N_COMMIT_SYNCS_DESC); + + public static final String N_COMMIT_NO_SYNCS_NAME = + "nCommitNoSyncs"; + public static final String N_COMMIT_NO_SYNCS_DESC = + "Number of CommitNoSyncs used to satisfy ack requests."; + public static final StatDefinition N_COMMIT_NO_SYNCS = + new StatDefinition( + N_COMMIT_NO_SYNCS_NAME, + N_COMMIT_NO_SYNCS_DESC); + + public static final String N_COMMIT_WRITE_NO_SYNCS_NAME = + "nCommitWriteNoSyncs"; + public static final String N_COMMIT_WRITE_NO_SYNCS_DESC = + "Number of CommitWriteNoSyncs used to satisfy ack requests."; + public static final StatDefinition N_COMMIT_WRITE_NO_SYNCS = + new StatDefinition( + N_COMMIT_WRITE_NO_SYNCS_NAME, + N_COMMIT_WRITE_NO_SYNCS_DESC); + + public static final String N_ABORTS_NAME = + "nAborts"; + public static final String N_ABORTS_DESC = + "Number of Aborts replayed by the Replica."; + public static final StatDefinition N_ABORTS = + new StatDefinition( + N_ABORTS_NAME, + N_ABORTS_DESC); + + public static final String N_LNS_NAME = + "nLNs"; + public static final String N_LNS_DESC = + "Number of LNs."; + public static final StatDefinition N_LNS = + new StatDefinition( + N_LNS_NAME, + N_LNS_DESC); + + public static final String N_NAME_LNS_NAME = + "nNameLNs"; + public static final String N_NAME_LNS_DESC = + "Number of Name LNs."; + public static final StatDefinition N_NAME_LNS = + new StatDefinition( + N_NAME_LNS_NAME, + N_NAME_LNS_DESC); + + public static final String N_ELAPSED_TXN_TIME_NAME = + "nElapsedTxnTime"; + public static final String N_ELAPSED_TXN_TIME_DESC = + "The elapsed time in ms, spent replaying all transactions."; + public static final StatDefinition N_ELAPSED_TXN_TIME = + new StatDefinition( + N_ELAPSED_TXN_TIME_NAME, + N_ELAPSED_TXN_TIME_DESC); + + public static final String N_MESSAGE_QUEUE_OVERFLOWS_NAME = + "nMessageQueueOverflows"; + public static final String N_MESSAGE_QUEUE_OVERFLOWS_DESC = + "Number of failed attempts to place an entry in the replica message " + + "queue due to the queue being full."; + public static final StatDefinition N_MESSAGE_QUEUE_OVERFLOWS = + new StatDefinition( + N_MESSAGE_QUEUE_OVERFLOWS_NAME, + N_MESSAGE_QUEUE_OVERFLOWS_DESC); + + public static final String MIN_COMMIT_PROCESSING_NANOS_NAME = + "minCommitProcessingNanos"; + public static final String MIN_COMMIT_PROCESSING_NANOS_DESC = + "Minimum nanosecs for commit processing"; + public static final StatDefinition MIN_COMMIT_PROCESSING_NANOS = + new StatDefinition( + MIN_COMMIT_PROCESSING_NANOS_NAME, + MIN_COMMIT_PROCESSING_NANOS_DESC); + + public static final String MAX_COMMIT_PROCESSING_NANOS_NAME = + "maxCommitProcessingNanos"; + public static final String MAX_COMMIT_PROCESSING_NANOS_DESC = + "Maximum nanosecs for commit processing"; + public static final StatDefinition MAX_COMMIT_PROCESSING_NANOS = + new StatDefinition( + MAX_COMMIT_PROCESSING_NANOS_NAME, + MAX_COMMIT_PROCESSING_NANOS_DESC); + + public static final String TOTAL_COMMIT_PROCESSING_NANOS_NAME = + "totalCommitProcessingNanos"; + public static final String TOTAL_COMMIT_PROCESSING_NANOS_DESC = + "Total nanosecs for commit processing"; + public static final StatDefinition TOTAL_COMMIT_PROCESSING_NANOS = + new StatDefinition( + TOTAL_COMMIT_PROCESSING_NANOS_NAME, + TOTAL_COMMIT_PROCESSING_NANOS_DESC); + + public static final String TOTAL_COMMIT_LAG_MS_NAME = + "totalCommitLagMs"; + public static final String TOTAL_COMMIT_LAG_MS_DESC = + "Sum of time periods, in msec, between when update operations commit " + + "on the master and then subsequently commit on the replica. This " + + "value is affected by any clock skew between the master and the " + + "replica."; + public static final StatDefinition TOTAL_COMMIT_LAG_MS = + new StatDefinition( + TOTAL_COMMIT_LAG_MS_NAME, + TOTAL_COMMIT_LAG_MS_DESC); + + public static final String LATEST_COMMIT_LAG_MS_NAME = + "latestCommitLagMs"; + public static final String LATEST_COMMIT_LAG_MS_DESC = + "Time in msec between when the latest update operation committed on " + + "the master and then subsequently committed on the replica. This " + + "value is affected by any clock skew between the master and the " + + "replica."; + public static final StatDefinition LATEST_COMMIT_LAG_MS = + new StatDefinition( + LATEST_COMMIT_LAG_MS_NAME, + LATEST_COMMIT_LAG_MS_DESC); +} diff --git a/src/com/sleepycat/je/rep/impl/node/Replica.java b/src/com/sleepycat/je/rep/impl/node/Replica.java new file mode 100644 index 0000000..3c46858 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/Replica.java @@ -0,0 +1,1745 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_LAG_CONSISTENCY_WAITS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_LAG_CONSISTENCY_WAIT_MS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAITS; +import static com.sleepycat.je.rep.impl.node.ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAIT_MS; + +import java.io.IOException; +import java.net.ConnectException; +import java.nio.channels.ClosedByInterruptException; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.CommitPointConsistencyPolicy; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.RestartRequiredException; +import com.sleepycat.je.rep.TimeConsistencyPolicy; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.stream.BaseProtocol.Heartbeat; +import com.sleepycat.je.rep.stream.BaseProtocol.ShutdownRequest; +import com.sleepycat.je.rep.stream.MasterStatus.MasterSyncException; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshake; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshakeConfig; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.txn.ReplayTxn; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.MessageOp; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; +import com.sleepycat.je.rep.utilint.RepUtils.ExceptionAwareCountDownLatch; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * The Replica class is the locus of the replay operations and replica + * transaction consistency tracking and management operations at a replica + * node. + * + * A single instance of this class is created when the replication node is + * created and exists for the lifetime of the replication node, although it is + * only really used when the node is operating as a Replica. + * + * Note that the Replica (like the FeederManager) does not have its own + * independent thread of control; it runs in the RepNode's thread. To make the + * network I/O as aync as possible, and avoid stalls during network I/O the + * input and output are done in separate threads. The overall thread + * and queue organization is as sketched below: + * + * read from network -> RepNodeThread (does read) -> replayQueue + * replayQueue -> ReplayThread -> outputQueue + * outputQueue -> ReplicaOutputThread (does write) -> writes to network + * + * This three thread organization has the following benefits over a single + * thread replay model: + * + * 1) It makes the heartbeat mechanism used to determine whether the HA sockets + * are in use more reliable. This is because a heartbeat response cannot + * be blocked by lock contention in the replay thread, since a heartbeat + * can be sent spontaneously (without an explicit heartbeat request from the + * feeder) by the ReplicaOutputThread, if a heartbeat had not been sent during + * a heartbeat interval period. + * + * 2) The cpu load in the replay thread is reduced by offloading the + * network-specific aspects of the processing to different threads. It's + * important to keep the CPU load in this thread at a minimum so we can use + * a simple single thread replay scheme. + * + * 3) Prevents replay thread stalls by input and output buffering in the two + * threads on either side of it. + * + * With jdk 1.7 we could eliminate the use of these threads and switch over to + * the new aysnc I/O APIs, but that involves a lot more code restructuring. + */ +public class Replica { + + /* The Node to which the Replica belongs. */ + private final RepNode repNode; + private final RepImpl repImpl; + + /* The replay component of the Replica */ + private final Replay replay; + + /* The exception that provoked the replica exit. */ + private Exception shutdownException = null; + + /* + * It's non null when the loop is active. + */ + private NamedChannelWithTimeout replicaFeederChannel = null; + + /* The consistency component. */ + private final ConsistencyTracker consistencyTracker; + + /** + * The latest txn-ending (commit or abort) VLSN that we have on this + * replica. + */ + private volatile VLSN txnEndVLSN; + + /* + * A test delay introduced in the replica loop to simulate a loaded + * replica. The replica waits this amount of time before processing each + * message. + */ + private int testDelayMs = 0; + + /* For testing only - mimic a network partition. */ + private boolean dontProcessStream = false; + + /* Number of times to retry on a network connection failure. */ + private static final int NETWORK_RETRIES = 2 ; + + /* + * Service unavailable retries. These are typically the result of service + * request being made before the node is ready to provide them. For + * example, the feeder service is only available after a node has + * transitioned to becoming the master. + */ + private static final int SERVICE_UNAVAILABLE_RETRIES = 10; + + /* + * The number of ms to wait between above retries, allowing time for the + * master to assume its role, and start listening on its port. + */ + private static final int CONNECT_RETRY_SLEEP_MS = 1000; + + /* + * The protocol instance if one is currently in use by the Replica. + */ + private Protocol protocol = null; + + /* + * Protocol statistics aggregated across all past protocol instantiations. + * It does not include the statistics for the current Protocol object in + * use. A node can potentially go through the Replica state multiple time + * during it's lifetime. This instance aggregates replica statistics + * across all transitions into and out of the Replica state. + */ + private final StatGroup aggProtoStats; + + /* + * Holds the exception that is thrown to indicate that an election is + * needed before a hard recovery can proceed. It's set to a non-null value + * when the need for a hard recovery is first discovered and is + * subsequently cleared after an election is held and before the next + * attempt at a syncup with the newly elected master. The election ensures + * that the master being used for an actual rollback is current and is not + * an isolated master that is out of date, due to a network partition that + * has since been resolved. + */ + private HardRecoveryElectionException hardRecoveryElectionException; + + /* For testing only. */ + private TestHook replicaFeederSyncupHook; + private final com.sleepycat.je.utilint.TestHook replayHook; + static private com.sleepycat.je.utilint.TestHook initialReplayHook; + + /* + * A cache of DatabaseImpls for the Replay to speed up DbTree.getId(). + * Cleared/invalidated by a heartbeat or if je.rep.dbIdCacheOpCount + * operations have gone by, or if any replay operations on Name LNs are + * executed. + */ + private final DbCache dbCache; + + /** + * The message queue used for communications between the network read + * thread and the replay thread. + */ + private final BlockingQueue replayQueue; + + /* + * The replica output thread. It's only maintained here as an IV, rather + * than as a local variable inside doRunReplicaLoopInternalWork() to + * facilitate unit tests and is non null only for for the duration of the + * method. + */ + private volatile ReplicaOutputThread replicaOutputThread; + + private final Logger logger; + + /** + * The number of times a message entry could not be inserted into + * the queue within the poll period and had to be retried. + */ + private final LongStat nMessageQueueOverflows; + + Replica(RepNode repNode, Replay replay) { + this.repNode = repNode; + this.repImpl = repNode.getRepImpl(); + DbConfigManager configManager = repNode.getConfigManager(); + dbCache = new DbCache(repImpl.getDbTree(), + configManager.getInt + (RepParams.REPLAY_MAX_OPEN_DB_HANDLES), + configManager.getDuration + (RepParams.REPLAY_DB_HANDLE_TIMEOUT)); + + consistencyTracker = new ConsistencyTracker(); + this.replay = replay; + logger = LoggerUtils.getLogger(getClass()); + aggProtoStats = + new StatGroup(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_DESC); + nMessageQueueOverflows = replay.getMessageQueueOverflows(); + testDelayMs = + repNode.getConfigManager().getInt(RepParams.TEST_REPLICA_DELAY); + replayHook = initialReplayHook; + + /* Set up the replay queue. */ + final int replayQueueSize = repNode.getConfigManager(). + getInt(RepParams.REPLICA_MESSAGE_QUEUE_SIZE); + + replayQueue = new ArrayBlockingQueue<>(replayQueueSize); + } + + /** + * Shutdown the Replica, free any threads that may have been waiting for + * the replica to reach some degree of consistency. This method is only + * invoked as part of the repnode shutdown. + * + * If the shutdown is being executed from a different thread, it attempts + * to interrupt the thread by first shutting down the channel it may be + * waiting on for input from the feeder. The replica thread should notice + * the channel shutdown and/or the shutdown state of the rep node itself. + * The caller will use harsher methods, like an interrupt, if the rep node + * thread (Replica or Feeder) is still active. + */ + public void shutdown() { + if (!repNode.isShutdown()) { + throw EnvironmentFailureException.unexpectedState + ("Rep node must have initiated the shutdown."); + } + consistencyTracker.shutdown(); + if (Thread.currentThread() == repNode) { + return; + } + + /* + * Perform the actions to provoke a "soft" shutdown. + * + * Since the replica shares the RepNode thread, it will take care of + * the actual thread shutdown itself. + */ + + /* + * Shutdown the channel as an attempt to interrupt just the socket + * read/write operation. + */ + RepUtils.shutdownChannel(replicaFeederChannel); + + /* + * Clear the latch in case the replica loop is waiting for the outcome + * of an election. + */ + repNode.getVLSNFreezeLatch().clearLatch(); + } + + /** + * For unit testing only! + */ + public void setTestDelayMs(int testDelayMs) { + this.testDelayMs = testDelayMs; + } + + public int getTestDelayMs() { + return testDelayMs; + } + + /** + * For unit testing only! + */ + public void setDontProcessStream() { + dontProcessStream = true; + } + + public VLSN getTxnEndVLSN() { + return txnEndVLSN; + } + + public Replay replay() { + return replay; + } + + public DbCache getDbCache() { + return dbCache; + } + + public ConsistencyTracker getConsistencyTracker() { + return consistencyTracker; + } + + DataChannel getReplicaFeederChannel() { + return replicaFeederChannel.getChannel(); + } + + Protocol getProtocol() { + return protocol; + } + + /** + * Returns the last commit VLSN at the master, as known at the replica. + * + * @return the commit VLSN + */ + public long getMasterTxnEndVLSN() { + return consistencyTracker.getMasterTxnEndVLSN(); + } + + /** + * For test use only. + */ + public ReplicaOutputThread getReplicaOutputThread() { + return replicaOutputThread; + } + + /** + * The core control loop when the node is serving as a Replica. Note that + * if a Replica is also serving the role of a feeder, it will run + * additional feeder loops in separate threads. The loop exits when it + * encounters one of the following possible conditions: + * + * 1) The connection to the master can no longer be maintained, due to + * connectivity issues, or because the master has explicitly shutdown its + * connections due to an election. + * + * 2) The node becomes aware of a change in master, that is, assertSync() + * fails. + * + * 3) The loop is interrupted, which is interpreted as a request to + * shutdown the replication node as a whole. + * + * 4) It fails to establish its node information in the master as it + * attempts to join the replication group for the first time. + * + * Normal exit from this run loop results in the rep node retrying an + * election and continuing in its new role as determined by the outcome of + * the election. A thrown exception, on the other hand, results in the rep + * node as a whole terminating its operation and no longer participating in + * the replication group, that is, it enters the DETACHED state. + * + * Note that the in/out streams are handled synchronously on the replica, + * while they are handled asynchronously by the Feeder. + * + * @throws InterruptedException + * @throws DatabaseException if the environment cannot be closed/for a + * re-init + * @throws GroupShutdownException + */ + void runReplicaLoop() + throws InterruptedException, + DatabaseException, + GroupShutdownException { + + Class retryExceptionClass = null; + int retryCount = 0; + try { + + while (true) { + try { + runReplicaLoopInternal(); + /* Normal exit */ + break; + } catch (RetryException e) { + if (!repNode.getMasterStatus().inSync()) { + LoggerUtils.fine(logger, repImpl, + "Retry terminated, out of sync."); + break; + } + if ((e.getClass() == retryExceptionClass) || + (e.retries == 0)) { + if (++retryCount >= e.retries) { + /* Exit replica retry elections */ + LoggerUtils.info + (logger, repImpl, + "Failed to recover from exception: " + + e.getMessage() + ", despite " + e.retries + + " retries.\n" + + LoggerUtils.getStackTrace(e)); + break; + } + } else { + retryCount = 0; + retryExceptionClass = e.getClass(); + } + LoggerUtils.info(logger, repImpl, "Retry #: " + + retryCount + "/" + e.retries + + " Will retry replica loop after " + + e.retrySleepMs + "ms. "); + Thread.sleep(e.retrySleepMs); + if (!repNode.getMasterStatus().inSync()) { + break; + } + } catch (DiskLimitException e) { + /* + * Exit replica loop, wait for disk space to become + * available in main rep node loop. + */ + break; + } + } + } finally { + + /* + * Reset the rep node ready latch unless the replica is not ready + * because it's going to hold an election before proceeding with + * hard recovery and joining the group. + */ + if (hardRecoveryElectionException == null) { + repNode.resetReadyLatch(shutdownException); + } + } + /* Exit use elections to try a different master. */ + } + + private void runReplicaLoopInternal() + throws RestartRequiredException, + InterruptedException, + RetryException, + InsufficientLogException { + + shutdownException = null; + LoggerUtils.info(logger, repImpl, + "Replica loop started with master: " + + repNode.getMasterStatus().getNodeMasterNameId()); + if (testDelayMs > 0) { + LoggerUtils.info(logger, repImpl, + "Test delay of: " + testDelayMs + "ms." + + " after each message sent"); + } + try { + initReplicaLoop(); + doRunReplicaLoopInternalWork(); + } catch (RestartRequiredException rre) { + shutdownException = rre; + throw rre; + } catch (ClosedByInterruptException closedByInterruptException) { + if (repNode.isShutdown()) { + LoggerUtils.info(logger, repImpl, + "Replica loop interrupted for shutdown."); + return; + } + LoggerUtils.warning(logger, repImpl, + "Replica loop unexpected interrupt."); + throw new InterruptedException + (closedByInterruptException.getMessage()); + } catch (IOException e) { + + /* + * Master may have changed with the master shutting down its + * connection as a result. Normal course of events, log it and + * return to the outer node level loop. + */ + LoggerUtils.info(logger, repImpl, + "Replica IO exception: " + e.getClass().getName() + + " Message:" + e.getMessage() + + (logger.isLoggable(Level.FINE) ? + ("\n" + LoggerUtils.getStackTrace(e)) : "")); + } catch (RetryException|DiskLimitException e) { + /* Propagate it outwards. Node does not need to shutdown. */ + throw e; + } catch (GroupShutdownException e) { + shutdownException = e; + throw e; + } catch (RuntimeException e) { + shutdownException = e; + LoggerUtils.severe(logger, repImpl, + "Replica unexpected exception " + e + + " " + LoggerUtils.getStackTrace(e)); + throw e; + } catch (MasterSyncException e) { + /* expected change in masters from an election. */ + LoggerUtils.info(logger, repImpl, e.getMessage()); + } catch (HardRecoveryElectionException e) { + + /* + * Exit the replica loop so that elections can be held and the + * master confirmed. + */ + hardRecoveryElectionException = e; + LoggerUtils.info(logger, repImpl, e.getMessage()); + } catch (Exception e) { + shutdownException = e; + LoggerUtils.severe(logger, repImpl, + "Replica unexpected exception " + e + + " " + LoggerUtils.getStackTrace(e)); + throw EnvironmentFailureException.unexpectedException(e); + } finally { + loopExitCleanup(); + } + } + + protected void doRunReplicaLoopInternalWork() + throws Exception { + + final int timeoutMs = repNode.getConfigManager(). + getDuration(RepParams.REPLICA_TIMEOUT); + replicaFeederChannel.setTimeoutMs(timeoutMs); + + replayQueue.clear(); + repImpl.getReplay().reset(); + + replicaOutputThread = new ReplicaOutputThread(repImpl); + replicaOutputThread.start(); + + final ReplayThread replayThread = new ReplayThread(); + replayThread.start(); + long maxPending = 0; + + try { + while (true) { + final Message message = protocol.read(replicaFeederChannel); + + if (repNode.isShutdownOrInvalid() || (message == null)) { + return; + } + + /* Throw DiskLimitException if there is a violation. */ + repNode.getRepImpl().checkDiskLimitViolation(); + + while (!replayQueue. + offer(message, + ReplayThread.QUEUE_POLL_INTERVAL_NS, + TimeUnit.NANOSECONDS)) { + /* Offer timed out. */ + if (!replayThread.isAlive()) { + return; + } + /* Retry the offer */ + nMessageQueueOverflows.increment(); + } + + final int pending = replayQueue.size(); + if (pending > maxPending) { + maxPending = pending; + } + } + } catch (IOException ioe) { + + /* + * Make sure messages in the queue are processed. Ensure, in + * particular, that shutdown requests are processed and not ignored + * due to the IOEException resulting from a closed connection. + */ + replayThread.exitRequest = ReplayExitType.SOFT; + } finally { + + if (replayThread.exitRequest == ReplayExitType.SOFT) { + + /* + * Drain all queued messages, exceptions may be generated + * in the process. They logically precede IO exceptions. + */ + replayThread.join(); + } + + try { + + if (replayThread.exception != null) { + /* replay thread is dead or exiting. */ + throw replayThread.exception; + } + + if (replicaOutputThread.getException() != null) { + throw replicaOutputThread.getException(); + } + } finally { + + /* Ensure thread has exited in all circumstances */ + replayThread.exitRequest = ReplayExitType.IMMEDIATE; + replayThread.join(); + + replicaOutputThread.shutdownThread(logger); + replicaOutputThread = null; + } + } + } + + /** + * Process the shutdown message from the master and return the + * GroupShutdownException that must be thrown to exit the Replica loop. + * + * @return the GroupShutdownException + */ + private GroupShutdownException processShutdown(ShutdownRequest shutdown) + throws IOException { + + /* + * Acknowledge the shutdown message right away, since the checkpoint + * operation can take a long time to complete. Long enough to exceed + * the feeder timeout on the master. The master only needs to know that + * the replica has received the message. + */ + replay.queueAck(ReplicaOutputThread.SHUTDOWN_ACK); + + /* + * Turn off network timeouts on the replica, since we don't want the + * replica to timeout the connection. The connection itself is no + * longer used past this point and will be reclaimed as part of normal + * replica exit cleanup. + */ + replicaFeederChannel.setTimeoutMs(Integer.MAX_VALUE); + + /* + * TODO: Share the following code with the standalone Environment + * shutdown, or better yet, call EnvironmentImpl.doClose here. + */ + + /* + * Begin shutdown of the deamons before checkpointing. Cleaning during + * the checkpoint is wasted and slows down the checkpoint, plus it may + * cause additional checkpoints. + */ + repNode.getRepImpl().requestShutdownDaemons(); + + /* + * Now start a potentially long running checkpoint. + */ + LoggerUtils.info(logger, repImpl, "Checkpoint initiated."); + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + config.setMinimizeRecoveryTime(true); + try { + repNode.getRepImpl().invokeCheckpoint(config, "Group Shutdown"); + LoggerUtils.info(logger, repImpl, "Checkpoint completed."); + } catch (Exception e) { + LoggerUtils.info(logger, repImpl, "Checkpoint failed: " + e); + } + /* Force final shutdown of the daemons. */ + repNode.getRepImpl().shutdownDaemons(); + + return new GroupShutdownException(logger, + repNode, + shutdown.getShutdownTimeMs()); + } + + /** + * Initialize for replica loop entry, which involves completing the + * following steps successfully: + * + * 1) The replica feeder handshake. + * 2) The replica feeder syncup. + * 3) Processing the first heartbeat request from the feeder. + */ + private void initReplicaLoop() + throws IOException, + ConnectRetryException, + DatabaseException, + ProtocolException, + InterruptedException, + HardRecoveryElectionException { + + createReplicaFeederChannel(); + ReplicaFeederHandshake handshake = + new ReplicaFeederHandshake(new RepFeederHandshakeConfig()); + protocol = handshake.execute(); + repNode.notifyReplicaConnected(); + + /* Init GlobalCBVLSN using feeder manager's minJEVersion. */ + repNode.globalCBVLSN.init(repNode, handshake.getFeederMinJEVersion()); + + final boolean hardRecoveryNeedsElection; + + if (hardRecoveryElectionException != null) { + LoggerUtils.info(logger, repImpl, + "Replica syncup after election to verify master:"+ + hardRecoveryElectionException.getMaster() + + " elected master:" + + repNode.getMasterStatus().getNodeMasterNameId()); + hardRecoveryNeedsElection = false; + } else { + hardRecoveryNeedsElection = true; + } + hardRecoveryElectionException = null; + + ReplicaFeederSyncup syncup = + new ReplicaFeederSyncup(repNode, replay, replicaFeederChannel, + protocol, hardRecoveryNeedsElection); + syncup.execute(repNode.getCBVLSNTracker()); + + txnEndVLSN = syncup.getMatchedVLSN(); + long matchedTxnEndTime = syncup.getMatchedVLSNTime(); + consistencyTracker.reinit(txnEndVLSN.getSequence(), + matchedTxnEndTime); + Protocol.Heartbeat heartbeat = + protocol.read(replicaFeederChannel.getChannel(), + Protocol.Heartbeat.class); + processHeartbeat(heartbeat); + long replicaDelta = consistencyTracker.getMasterTxnEndVLSN() - + consistencyTracker.lastReplayedVLSN.getSequence(); + LoggerUtils.info(logger, repImpl, String.format + ("Replica initialization completed. Replica VLSN: %s " + + " Heartbeat master commit VLSN: %,d " + + " DTVLSN:%,d Replica VLSN delta: %,d", + consistencyTracker.lastReplayedVLSN, + consistencyTracker.getMasterTxnEndVLSN(), + repNode.getAnyDTVLSN(), + replicaDelta)); + + /* + * The replica is ready for business, indicate that the node is + * ready by counting down the latch and releasing any waiters. + */ + repNode.getReadyLatch().countDown(); + } + + /** + * Process a heartbeat message. It queues a response and updates + * the consistency tracker with the information in the heartbeat. + * + * @param heartbeat the heartbeat message + * @throws IOException + */ + private void processHeartbeat(Heartbeat heartbeat) + throws IOException { + + replay.queueAck(ReplicaOutputThread.HEARTBEAT_ACK); + consistencyTracker.trackHeartbeat(heartbeat); + } + + /** + * Performs the cleanup actions upon exit from the internal replica loop. + */ + private void loopExitCleanup() { + + if (shutdownException != null) { + if (shutdownException instanceof RetryException) { + LoggerUtils.info(logger, repImpl, + "Retrying connection to feeder. Message: " + + shutdownException.getMessage()); + } else if (shutdownException instanceof GroupShutdownException) { + LoggerUtils.info(logger, repImpl, + "Exiting inner Replica loop." + + " Master requested shutdown."); + } else { + LoggerUtils.warning + (logger, repImpl, + "Exiting inner Replica loop with exception " + + shutdownException + "\n" + + LoggerUtils.getStackTrace(shutdownException)); + } + } else { + LoggerUtils.info(logger, repImpl, "Exiting inner Replica loop." ); + } + + clearDbTreeCache(); + RepUtils.shutdownChannel(replicaFeederChannel); + + if (consistencyTracker != null) { + consistencyTracker.logStats(); + } + + /* Sum up statistics for the loop. */ + if (protocol != null) { + aggProtoStats.addAll(protocol.getStats(StatsConfig.DEFAULT)); + } + protocol = null; + + /* + * If this node has a transient ID, then null out its ID to allow the + * next feeder connection to assign it a new one + */ + if (repNode.getNodeType().hasTransientId()) { + repNode.getNameIdPair().revertToNull(); + } + } + + /* + * Clear the DatabaseId -> DatabaseImpl cache used to speed up DbTree + * lookup operations. + */ + void clearDbTreeCache() { + dbCache.clear(); + } + + /** + * Invoked when this node transitions to the master state. Aborts all + * inflight replay transactions outstanding from a previous state as a + * Replica, because they were initiated by a different master and will + * never complete. Also, release any Replica transactions that were waiting + * on consistency policy requirements. + */ + void masterTransitionCleanup() + throws DatabaseException { + hardRecoveryElectionException = null; + replay.abortOldTxns(); + consistencyTracker.forceTripLatches + (new MasterStateException(repNode.getRepImpl(). + getStateChangeEvent())); + } + + /** + * Invoked when this node seamlessly changes roles from master to replica + * without a recovery. The ability to do this transition without a recovery + * is desirable because it's a faster transition, and avoids the GC + * overhead of releasing the JE cache, and the I/O overhead of recreating + * the in-memory btree. + *

        + * The two key cases where this happens are: + * A) a network partition occurs, and the group elects a new master. The + * orphaned master did not crash and its environment is still valid, and + * when it regains contact with the group, it discovers that it has been + * deposed. It transitions into a replica status. + *

        + * B) a master transfer request moves mastership from this node to another + * member of the group. This node's environment is still valid, and it + * transitions to replica state. + *

        + * The transition from master to replica requires resetting state so all + * is as expected for a Replica. There are two categories of work: + * - network connections: shutting down feeder connections and + * reinitializing feeder infrastructure so that a future replica->master + * transition will work. + * - resetting transaction state. All MasterTxns must be transformed + * into ReplayTxns, bearing the same transaction id and holding the same + * locks. + *

        + * Note: since non-masters can't commit txns, the inflight MasterTxns are + * destined to be aborted in the future. An alternative to resetting + * transaction state would be to mark them in some way so that the later HA + * syncup/ replay ignores operations pertaining to these ill fated txns. We + * didn't chose that approach because the simplicity of the replay is a + * plus; it is almost entirely ignorant of the semantics of the replication + * stream. Also, replays have potential for complexity, either because + * syncups could restart if masters change or become unavailable, or + * because there may be future performance optimizations in that area. + *

        + * Resetting transaction state is tricky because the MasterTxn is + * accessible to the application code. While the Replay thread is + * attempting to transform the MasterTxn, application threads may be + * attempting to commit or abort the transactions. Note that application + * threads will not be trying to add locks, because the node will be in + * UNKNOWN state, and writes will be prohibited by the MasterTxn. + *

        + * MasterTransfers do impose a blocking period on transaction commits and + * aborts, but even there, windows exist in the post-block period where + * the application may try to abort the transaction. Network partitions + * do no form of blocking, and have a wider window when the application + * and RepNode thread must be coordinated. Here's a diagram of the time + * periods of concern + *

        + * t1 - master transfer request issued (only when master transfer) + * t2 - user txns which attempt to abort or commit are blocked on + * RepImpl.blockTxnLatch (only when mt) + * t3 - node detects that it has transitioned to UNKNOWN and lost + * master status. MasterTxns are now stopped from acquiring + * locks or committing and will throw UnknownMasterException. + * t4 - feeder connections shutdown + * t5 - node begins conversion to replica state + * t6 - blockTxnLatch released (only when master transfer) + * t7 - existing MasterTxns converted into ReplayTxns, locks moved into + * new ReplayTxns. Blocked txns must be released before this + * conversion, because the application thread is holding the + * txn mutex, and conversion needs to take that mutex. + *

        + * At any time during this process, the application threads may attempt to + * abort or commit outstanding txns, or acquire read or write locks. After + * t3, any attempts to lock, abort or commit will throw an + * UnknownMasterException or ReplicaWriteException, and in the normal + * course of events, the txn would internally abort. But once t5 is + * reached, we want to prevent any changes to the number of write locks in + * the txn so as to prevent interference with the conversion of the master + * txns and any danger of converting only part of a txn. We set the + * volatile, transient MasterTxn.freeze field at t5 to indicate that there + * should be no change to the contents of the transaction. When freeze is + * true, any attempts to abort or commit the transaction will throw + * Unknown/ReplicaWriteException, and the txn will be put into MUST_ABORT + * state, but the existing locks will be unchanged. + *

        + * In a network partition, it's possible that the txn will be aborted or + * committed locally before t5. In that case, there may be a hard rollback + * when the node syncs up with the new master, and finds the anomalous + * abort record. In masterTransfer, the window is smaller, and the blocking + * latch ensures that no commits can happen bettween t1-t5. After t5, the + * node will not be a master, so there can be no commits. Aborts may happen + * and can cause hard rollbacks, but no data will be lost. + *

        + * The freeze field is similar to the blockTxnLatch, and we considered + * using the blockTxnLatch to stabilize the txns, but ruled it out because: + * - the locking hierarchy where the application thread holds the txn + * mutex while awaiting the block txn latch prevents txn conversion. + * - the blockTxnLatch is scoped to the MasterTransfer instance, which may + * not be in play for network partitioning. + */ + void replicaTransitionCleanup() { + + /* + * Logically an assert, use an exception rather than Java assert + * because we want this check to be enabled at all times. If + * unexpectedly in master state, invalidate the environment, so we do a + * recovery and are sure to cleanup. + */ + if (repImpl.getState() == State.MASTER) { + throw EnvironmentFailureException.unexpectedState(repImpl, + "Should not be in MASTER state when converting from master " + + "to replica state"); + } + + /* + * Find all MasterTxns, and convert them to ReplayTxns. The set of + * existing MasterTxns cannot increase at this point, because the node + * is not in MASTER state. Freeze all txns and prevent change. + */ + Set existingMasterTxns = repImpl.getExistingMasterTxns(); + LoggerUtils.info(logger, repImpl, + "Transitioning node to replica state, " + + existingMasterTxns.size() + " txns to clean up"); + + /* Prevent aborts on all MasterTxns; hold their contents steady */ + for (MasterTxn masterTxn: existingMasterTxns) { + masterTxn.freeze(); + } + + /* + * Unblock any transactions that are stuck in their commit processing, + * awaiting the release of the master transfer block. Such + * transactions hold a mutex on the transaction, and the mutex would + * block any of the lock stealing that will occur below. Note that if + * we are doing this transition because of a network partition, there + * will be no blocked transactions. + */ + repImpl.unblockTxnCompletion(); + + for (MasterTxn masterTxn: existingMasterTxns) { + + /* + * Convert this masterTxn to a ReplayTxn and move any existing + * write locks to it. Unfreeze and then abort the masterTxn. + */ + ReplayTxn replayTxn = + masterTxn.convertToReplayTxnAndClose(logger, + repImpl.getReplay()); + + if (replayTxn == null) { + LoggerUtils.info(logger, repImpl, "Master Txn " + + masterTxn.getId() + + " has no locks, nothing to transfer"); + } else { + repImpl.getTxnManager().registerTxn(replayTxn); + LoggerUtils.info(logger, repImpl, + "state for replay transaction " + + replayTxn.getId() + " = " + + replayTxn.getState()); + } + } + + /* + * We're done with the transition, clear any active master transfers, + * if they exist. + */ + repNode.clearActiveTransfer(); + } + + /** + * Returns a channel used by the Replica to connect to the Feeder. The + * socket is configured with a read timeout that's a multiple of the + * heartbeat interval to help detect, or initiate a change in master. + * + * @throws IOException + * @throws ConnectRetryException + */ + private void createReplicaFeederChannel() + throws IOException, ConnectRetryException { + + DataChannel dataChannel = null; + + final DbConfigManager configManager = repNode.getConfigManager(); + final int timeoutMs = configManager. + getDuration(RepParams.PRE_HEARTBEAT_TIMEOUT); + + final int receiveBufferSize = + configManager.getInt(RepParams.REPLICA_RECEIVE_BUFFER_SIZE); + + try { + final int openTimeout = configManager. + getDuration(RepParams.REPSTREAM_OPEN_TIMEOUT); + + /* + * Note that soTimeout is not set since it's a blocking channel and + * setSoTimeout has no effect on a blocking nio channel. + * + * Push responses out rapidly, they are small (heart beat or commit + * response) and need timely delivery to the master. + * (tcpNoDelay = true) + */ + + final ConnectOptions connectOpts = new ConnectOptions(). + setTcpNoDelay(true). + setReceiveBufferSize(receiveBufferSize). + setOpenTimeout(openTimeout). + setBlocking(true); + + dataChannel = + repImpl.getChannelFactory(). + connect(repNode.getMasterStatus().getNodeMaster(), + connectOpts); + + replicaFeederChannel = + new NamedChannelWithTimeout(repNode, dataChannel, timeoutMs); + + ServiceDispatcher.doServiceHandshake + (dataChannel, FeederManager.FEEDER_SERVICE); + } catch (ConnectException e) { + + /* + * A network problem, or the node went down between the time we + * learned it was the master and we tried to connect. + */ + throw new ConnectRetryException(e.getMessage(), + NETWORK_RETRIES, + CONNECT_RETRY_SLEEP_MS); + } catch (ServiceConnectFailedException e) { + + /* + * The feeder may not have established the Feeder Service + * as yet. For example, the transition to the master may not have + * been completed. Wait longer. + */ + if (e.getResponse() == Response.UNKNOWN_SERVICE) { + throw new ConnectRetryException(e.getMessage(), + SERVICE_UNAVAILABLE_RETRIES, + CONNECT_RETRY_SLEEP_MS); + } + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * Returns the replay statistics associated with the Replica. + * + * @return the statistics. + */ + public StatGroup getReplayStats(StatsConfig config) { + return replay.getStats(config); + } + + /* Get the protocl statistics for this replica. */ + public StatGroup getProtocolStats(StatsConfig config) { + StatGroup protoStats = aggProtoStats.cloneGroup(config.getClear()); + + /* Guard against concurrent modification. */ + Protocol prot = this.protocol; + if (prot != null) { + /* These statistics are not ye a part of the agg statistics. */ + protoStats.addAll(prot.getStats(config)); + } + + return protoStats; + } + + /* Get the consistency tracker stats for this replica. */ + public StatGroup getTrackerStats(StatsConfig config) { + return consistencyTracker.getStats(config); + } + + /* Reset the stats associated with this Replica. */ + public void resetStats() { + replay.resetStats(); + aggProtoStats.clear(); + if (protocol != null) { + protocol.resetStats(); + } + consistencyTracker.resetStats(); + } + + /** + * Defines the possible types of exits that can be requested from the + * ReplayThread. + */ + private enum ReplayExitType { + IMMEDIATE, /* An immediate exit; ignore queued requests. */ + SOFT /* Process pending requests in queue, then exit */ + } + + /** + * The thread responsible for the replay of messages delivered over the + * replication stream. Reading and replay are done in separate threads for + * two reasons: + * + * 1) It allows the two activities to make independent progress. The + * network can be read and messages assembled even if the replay activity + * has stalled. 2) The two threads permit use of two cores to perform the + * replay thus making it less likely that cpu is the replay bottleneck. + * + * The inputs and outputs of this thread are schematically described as: + * + * replayQueue -> ReplayThread -> outputQueue + * + * It's the second component of the three thread structure outlined in the + * Replica's class level comment. + */ + class ReplayThread extends StoppableThread { + + /** + * Thread exit exception. It's null if the thread exited due to an + * exception. It's the responsibility of the main replica thread to + * propagate the exception across the thread boundary in this case. + */ + volatile private Exception exception; + + /** + * Set asynchronously when a shutdown is being requested. + */ + volatile ReplayExitType exitRequest = null; + + /* The queue poll interval, 1 second */ + private final static long QUEUE_POLL_INTERVAL_NS = 1000000000l; + + protected ReplayThread() { + super(repImpl, "ReplayThread"); + } + + @Override + protected int initiateSoftShutdown() { + /* Use immediate, since the stream will continue to be read. */ + exitRequest = ReplayExitType.IMMEDIATE; + return 0; + } + + @Override + public void run() { + + LoggerUtils.info(logger, repImpl, + "Replay thread started. Message queue size:" + + replayQueue.remainingCapacity()); + + final int dbTreeCacheClearingOpCount = + repNode.getDbTreeCacheClearingOpCount(); + + long opCount = 0; + + try { + while (true) { + + final long pollIntervalNs = + replay.getPollIntervalNs(QUEUE_POLL_INTERVAL_NS); + + final Message message = + replayQueue.poll(pollIntervalNs, + TimeUnit.NANOSECONDS); + + if ((exitRequest == ReplayExitType.IMMEDIATE) || + ((exitRequest == ReplayExitType.SOFT) && + (message == null)) || + repNode.isShutdownOrInvalid()) { + + if (exitRequest == ReplayExitType.SOFT) { + replay.flushPendingAcks(Long.MAX_VALUE); + } + return; + } + + final long startNs = System.nanoTime(); + replay.flushPendingAcks(startNs); + + repNode.getMasterStatus().assertSync(); + + if (message == null) { + /* Timeout on poll. */ + continue; + } + assert TestHookExecute.doHookIfSet(replayHook, message); + + final MessageOp messageOp = message.getOp(); + + if (messageOp == Protocol.SHUTDOWN_REQUEST) { + throw processShutdown((ShutdownRequest) message); + } + + if (messageOp == Protocol.HEARTBEAT) { + processHeartbeat((Protocol.Heartbeat) message); + dbCache.tick(); + } else { + /* Check for test mimicking network partition. */ + if (dontProcessStream) { + continue; + } + + replay.replayEntry(startNs, (Protocol.Entry) message); + + /* + * Note: the consistency tracking is more obscure than + * it needs to be, because the commit/abort VLSN is set + * in Replay.replayEntry() and is then used below. An + * alternative would be to promote the following + * conditional to a level above, so commit/abort + * operations get their own replay method which does + * the consistency tracking. + */ + if (((Protocol.Entry) message).isTxnEnd()) { + txnEndVLSN = replay.getLastReplayedVLSN(); + consistencyTracker.trackTxnEnd(); + } + consistencyTracker.trackVLSN(); + } + + if (testDelayMs > 0) { + Thread.sleep(testDelayMs); + } + + if (opCount++ % dbTreeCacheClearingOpCount == 0) { + clearDbTreeCache(); + } + } + } catch (Exception e) { + exception = e; + + /* + * Bring it to the attention of the main thread by freeing + * up the "offer" wait right away. + */ + replayQueue.clear(); + + /* + * Get the attention of the main replica thread in case it's + * waiting in a read on the socket channel. + */ + LoggerUtils.info(logger, repImpl, + "closing replicaFeederChannel = " + + replicaFeederChannel); + RepUtils.shutdownChannel(replicaFeederChannel); + + LoggerUtils.info(logger, repImpl, + "Replay thread exiting with exception:" + + e.getMessage()); + } + } + + @Override + protected Logger getLogger() { + return logger; + } + } + + private class RepFeederHandshakeConfig + implements ReplicaFeederHandshakeConfig { + + @Override + public RepImpl getRepImpl() { + return repNode.getRepImpl(); + } + + @Override + public NameIdPair getNameIdPair() { + return repNode.getNameIdPair(); + } + + @Override + public Clock getClock() { + return repNode.getClock(); + } + + @Override + public NodeType getNodeType() { + return repNode.getNodeType(); + } + + @Override + public RepGroupImpl getGroup() { + return repNode.getGroup(); + } + + @Override + public NamedChannel getNamedChannel() { + return replicaFeederChannel; + } + } + + /** + * Tracks the consistency of this replica wrt the Master. It provides the + * mechanisms that will cause a beginTransaction() or a joinGroup() to wait + * until the specified consistency policy is satisfied. + */ + public class ConsistencyTracker { + private final long NULL_VLSN_SEQUENCE = VLSN.NULL_VLSN_SEQUENCE; + + /* + * Initialized by the Feeder handshake and updated by commit replays. + * All access to lastReplayedXXXX must be synchronized on the + * ConsistencyTracker itself. + */ + private long lastReplayedTxnVLSN = NULL_VLSN_SEQUENCE; + private VLSN lastReplayedVLSN = VLSN.NULL_VLSN; + private long masterTxnEndTime = 0l; + + /* Updated by heartbeats */ + private volatile long masterTxnEndVLSN; + private volatile long masterNow = 0l; + + private final StatGroup stats = + new StatGroup(ReplicaStatDefinition.GROUP_NAME, + ReplicaStatDefinition.GROUP_DESC); + + private final LongStat nLagConsistencyWaits = + new LongStat(stats, N_LAG_CONSISTENCY_WAITS); + + private final LongStat nLagConsistencyWaitMs = + new LongStat(stats, N_LAG_CONSISTENCY_WAIT_MS); + + private final LongStat nVLSNConsistencyWaits = + new LongStat(stats, N_VLSN_CONSISTENCY_WAITS); + + private final LongStat nVLSNConsistencyWaitMs = + new LongStat(stats, N_VLSN_CONSISTENCY_WAIT_MS); + + private final OrderedLatches vlsnLatches = + new OrderedLatches(repNode.getRepImpl()) { + + /* + * Note that this assumes that NULL_VLSN is -1, and that + * the vlsns ascend. + */ + @Override + boolean tripPredicate(long keyVLSN, long tripVLSN) { + return keyVLSN <= tripVLSN; + } + }; + + private final OrderedLatches lagLatches = + new OrderedLatches(repNode.getRepImpl()) { + @Override + boolean tripPredicate(long keyLag, long currentLag) { + return currentLag <= keyLag; + } + }; + + /** + * Invoked each time after a replica syncup so that the Replica + * can re-establish it's consistency vis a vis the master and what + * part of the replication stream it considers as having been replayed. + * + * @param matchedTxnVLSN the replica state corresponds to this txn + * @param matchedTxnEndTime the time at which this txn was committed or + * aborted on the master + */ + void reinit(long matchedTxnVLSN, long matchedTxnEndTime) { + this.lastReplayedVLSN = new VLSN(matchedTxnVLSN); + this.lastReplayedTxnVLSN = matchedTxnVLSN; + this.masterTxnEndTime = matchedTxnEndTime; + } + + public long getMasterTxnEndVLSN() { + return masterTxnEndVLSN; + } + + void close() { + logStats(); + } + + void logStats() { + if (logger.isLoggable(Level.INFO)) { + LoggerUtils.info + (logger, repImpl, + "Replica stats - Lag waits: " + nLagConsistencyWaits.get() + + " Lag wait time: " + nLagConsistencyWaitMs.get() + + "ms. " + + " VLSN waits: " + nVLSNConsistencyWaits.get() + + " Lag wait time: " + nVLSNConsistencyWaitMs.get() + + "ms."); + } + } + + /** + * Calculates the time lag in ms at the Replica. + */ + private long currentLag() { + if (masterNow == 0l) { + + /* + * Have not seen a heartbeat, can't determine the time lag in + * its absence. It's the first message sent by the feeder after + * completion of the handshake. + */ + return Integer.MAX_VALUE; + } + + long lag; + if (lastReplayedTxnVLSN < masterTxnEndVLSN) { + lag = System.currentTimeMillis() - masterTxnEndTime; + } else if (lastReplayedTxnVLSN == masterTxnEndVLSN) { + + /* + * The lag is determined by the transactions (if any) that are + * further downstream, assume the worst. + */ + lag = System.currentTimeMillis() - masterNow; + } else { + /* commit leapfrogged the heartbeat */ + lag = System.currentTimeMillis() - masterNow; + } + return lag; + } + + /** + * Frees all the threads that are waiting on latches. + * + * @param exception the exception to be thrown to explain the reason + * behind the latches being forced. + */ + synchronized void forceTripLatches(DatabaseException exception) { + assert (exception != null); + vlsnLatches.trip(Long.MAX_VALUE, exception); + lagLatches.trip(0, exception); + } + + synchronized void trackTxnEnd() { + Replay.TxnInfo lastReplayedTxn = replay.getLastReplayedTxn(); + lastReplayedTxnVLSN = lastReplayedTxn.getTxnVLSN().getSequence(); + masterTxnEndTime = lastReplayedTxn.getMasterTxnEndTime(); + + if ((lastReplayedTxnVLSN > masterTxnEndVLSN) && + (masterTxnEndTime >= masterNow)) { + masterTxnEndVLSN = lastReplayedTxnVLSN; + masterNow = masterTxnEndTime; + } + + /* + * Advances both replica VLSN and commit time, trip qualifying + * latches in both sets. + */ + vlsnLatches.trip(lastReplayedTxnVLSN, null); + lagLatches.trip(currentLag(), null); + } + + synchronized void trackVLSN() { + lastReplayedVLSN = replay.getLastReplayedVLSN(); + vlsnLatches.trip(lastReplayedVLSN.getSequence(), null); + } + + synchronized void trackHeartbeat(Protocol.Heartbeat heartbeat) { + masterTxnEndVLSN = heartbeat.getCurrentTxnEndVLSN(); + masterNow = heartbeat.getMasterNow(); + /* Trip just the time lag latches. */ + lagLatches.trip(currentLag(), null); + } + + public void lagAwait(TimeConsistencyPolicy consistencyPolicy) + throws InterruptedException, + ReplicaConsistencyException, + DatabaseException { + + long currentLag = currentLag(); + long lag = + consistencyPolicy.getPermissibleLag(TimeUnit.MILLISECONDS); + if (currentLag <= lag) { + return; + } + long waitStart = System.currentTimeMillis(); + ExceptionAwareCountDownLatch waitLagLatch = + lagLatches.getOrCreate(lag); + await(waitLagLatch, consistencyPolicy); + nLagConsistencyWaits.increment(); + nLagConsistencyWaitMs.add(System.currentTimeMillis() - waitStart); + } + + /** + * Wait until the log record identified by VLSN has gone by. + */ + public void awaitVLSN(long vlsn, + ReplicaConsistencyPolicy consistencyPolicy) + throws InterruptedException, + ReplicaConsistencyException, + DatabaseException { + + long waitStart = System.currentTimeMillis(); + + ExceptionAwareCountDownLatch waitVLSNLatch = null; + + synchronized(this) { + final long compareVLSN = + (consistencyPolicy instanceof CommitPointConsistencyPolicy)? + lastReplayedTxnVLSN : + lastReplayedVLSN.getSequence(); + if (vlsn <= compareVLSN) { + return; + } + waitVLSNLatch = vlsnLatches.getOrCreate(vlsn); + } + await(waitVLSNLatch, consistencyPolicy); + /* Stats after the await, so the counts and times are related. */ + nVLSNConsistencyWaits.increment(); + nVLSNConsistencyWaitMs.add(System.currentTimeMillis() - waitStart); + } + + /** + * Wait on the given countdown latch and generate the appropriate + * exception upon timeout. + * + * @throws InterruptedException + */ + private void await(ExceptionAwareCountDownLatch consistencyLatch, + ReplicaConsistencyPolicy consistencyPolicy) + throws ReplicaConsistencyException, + DatabaseException, + InterruptedException { + + if (!consistencyLatch.awaitOrException + (consistencyPolicy.getTimeout(TimeUnit.MILLISECONDS), + TimeUnit.MILLISECONDS)) { + /* Timed out. */ + final RepImpl rimpl = repNode.getRepImpl(); + final boolean inactive = !rimpl.getState().isActive(); + final String rnName = rimpl.getNameIdPair().getName(); + throw new ReplicaConsistencyException(consistencyPolicy, + rnName, + inactive); + } + } + + private StatGroup getStats(StatsConfig config) { + return stats.cloneGroup(config.getClear()); + } + + private void resetStats() { + stats.clear(); + } + + /** + * Shutdown the consistency tracker. This is typically done as part + * of the shutdown of a replication node. It counts down all open + * latches, so the threads waiting on them can make progress. It's + * the responsibility of the waiting threads to check whether the + * latch countdown was due to a shutdown, and take appropriate action. + */ + public void shutdown() { + final Exception savedShutdownException = + repNode.getSavedShutdownException(); + + /* + * Don't wrap in another level of EnvironmentFailureException + * if we have one in hand already. It can confuse any catch + * handlers which are expecting a specific exception e.g. + * RollBackException while waiting for read consistency. + */ + final EnvironmentFailureException latchException = + (savedShutdownException instanceof + EnvironmentFailureException) ? + + ((EnvironmentFailureException)savedShutdownException) : + + EnvironmentFailureException.unexpectedException + ("Node: " + repNode.getNameIdPair() + " was shut down.", + savedShutdownException); + + forceTripLatches(latchException); + } + } + + /** + * Manages a set of ordered latches. They are ordered by the key value. + */ + private abstract class OrderedLatches { + + final EnvironmentImpl envImpl; + + final SortedMap latchMap = + new TreeMap<>(); + + abstract boolean tripPredicate(long key, long tripValue); + + OrderedLatches(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + } + + synchronized ExceptionAwareCountDownLatch getOrCreate(Long key) { + ExceptionAwareCountDownLatch latch = latchMap.get(key); + if (latch == null) { + latch = new ExceptionAwareCountDownLatch(envImpl, 1); + latchMap.put(key, latch); + } + return latch; + } + + /** + * Trip all latches until the first latch that will not trip. + * + * @param tripValue + * @param exception the exception to be thrown by the waiter upon + * exit from the await. It can be null if no exception need be thrown. + */ + synchronized void trip(long tripValue, + DatabaseException exception) { + while (latchMap.size() > 0) { + Long key = latchMap.firstKey(); + if (!tripPredicate(key, tripValue)) { + /* It will fail on the rest as well. */ + return; + } + /* Set the waiters free. */ + ExceptionAwareCountDownLatch latch = latchMap.remove(key); + latch.releaseAwait(exception); + } + } + } + + /** + * Thrown to indicate that the Replica must retry connecting to the same + * master, after some period of time. + */ + @SuppressWarnings("serial") + static abstract class RetryException extends Exception { + final int retries; + final int retrySleepMs; + + RetryException(String message, + int retries, + int retrySleepMs) { + super(message); + this.retries = retries; + this.retrySleepMs = retrySleepMs; + } + + @Override + public String getMessage() { + return "Failed after retries: " + retries + + " with retry interval: " + retrySleepMs + "ms."; + } + } + + @SuppressWarnings("serial") + static class ConnectRetryException extends RetryException { + + ConnectRetryException(String message, + int retries, + int retrySleepMs) { + super(message, retries, retrySleepMs); + } + } + + /** + * Indicates that an election is needed before the hard recovery can + * proceed. Please see SR 20572 for a motivating scenario and + * NetworkPartitionHealingTest for an example. + */ + @SuppressWarnings("serial") + public static class HardRecoveryElectionException extends Exception { + + final NameIdPair masterNameIdPair; + final VLSN lastTxnEnd; + final VLSN matchpointVLSN; + + public HardRecoveryElectionException(NameIdPair masterNameIdPair, + VLSN lastTxnEnd, + VLSN matchpointVLSN) { + + this.masterNameIdPair = masterNameIdPair; + this.lastTxnEnd = lastTxnEnd; + this.matchpointVLSN = matchpointVLSN; + } + + /** + * The master that needs to be verified with an election. + */ + public NameIdPair getMaster() { + return masterNameIdPair; + } + + @Override + public String getMessage() { + return "Need election preceding hard recovery to verify master:" + + masterNameIdPair + + " last txn end:" + lastTxnEnd + + " matchpoint VLSN:" + matchpointVLSN; + } + } + + /** + * Sets a test hook for installation into Replica class instances to be + * created in the future. This is needed when the test hook must be + * installed before the {@code ReplicatedEnvironment} handle constructor + * returns, so that a test may influence the replay of the sync-up + * transaction backlog. + */ + static public void setInitialReplayHook + (com.sleepycat.je.utilint.TestHook hook) { + initialReplayHook = hook; + } + + /** + * Set a test hook which is executed when the ReplicaFeederSyncup + * finishes. This differs from the static method + * ReplicaFeederSyncup.setGlobalSyncupHook in that it sets the hook for a + * specific node, whereas the other method is static and sets it globally. + * + * This method is required when a test is trying to set the hook for only + * one node, and the node already exists. The other method is useful when a + * test is trying to set the hook before a node exists. + */ + public void setReplicaFeederSyncupHook(TestHook syncupHook) { + replicaFeederSyncupHook = syncupHook; + } + + public TestHook getReplicaFeederSyncupHook() { + return replicaFeederSyncupHook; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ReplicaFactory.java b/src/com/sleepycat/je/rep/impl/node/ReplicaFactory.java new file mode 100644 index 0000000..f0abd3a --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ReplicaFactory.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_BYTES_READ; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_READ; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.utilint.StatGroup; + +public class ReplicaFactory { + private static ReplicaType type = ReplicaType.DEFAULT; + private static long statsInterval = 0; + + public enum ReplicaType { + DEFAULT, + NULL_REPLICA + } + + public static void setReplicaType(ReplicaType t) { + type = t; + } + + public static void setStatsInterval(long interval) { + statsInterval = interval; + } + + public static Replica create(RepNode repNode, Replay replay) { + switch (type) { + case DEFAULT: + return new Replica(repNode, replay); + + case NULL_REPLICA: + + /** + * Create a replica which just eats messages. Used for testing + * network bandwidth. + */ + return + new Replica(repNode, replay) { + @Override + protected void doRunReplicaLoopInternalWork() + throws Exception { + + long ctime = System.currentTimeMillis(); + long opCount = 0; + while (true) { + opCount++; + @SuppressWarnings("unused") + Message message = + getProtocol().read(getReplicaFeederChannel()); + if (statsInterval > 0 && + (opCount % statsInterval) == 0) { + StatGroup stats = getProtocol(). + getStats(StatsConfig.DEFAULT); + long bytesRead = + stats.getLong(N_BYTES_READ); + long messagesRead = + stats.getLong(N_MESSAGES_READ); + long elapsedTime = + System.currentTimeMillis() - ctime; + long bytesPerMilliSecond = + bytesRead / elapsedTime; + System.out.println + (" Bytes Read: " + bytesRead + + " Messages Read: " + messagesRead + + " BytesPerMSec: " + bytesPerMilliSecond + + " MS: " + elapsedTime); + ctime = System.currentTimeMillis(); + } + } + + } + }; + + default: + throw EnvironmentFailureException.unexpectedState + ("unknown type passed to makeReplica: " + type); + } + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThread.java b/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThread.java new file mode 100644 index 0000000..a47e1ed --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThread.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.io.IOException; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.utilint.VLSN; + +public class ReplicaOutputThread extends ReplicaOutputThreadBase { + private final RepNode repNode; + + ReplicaOutputThread(RepImpl repImpl) { + super(repImpl); + repNode = repImpl.getRepNode(); + } + + @Override + public void writeReauthentication() throws IOException { + } + + @Override + public void writeHeartbeat(Long txnId) throws IOException { + + if ((txnId == null) && (repNode.getReplica().getTestDelayMs() > 0)) { + return; + } + + final VLSN broadcastCBVLSN = repNode.getCBVLSNTracker() + .getBroadcastCBVLSN(); + protocol.write(protocol.new HeartbeatResponse(broadcastCBVLSN, + repNode.getReplica() + .getTxnEndVLSN()), + replicaFeederChannel); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadBase.java b/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadBase.java new file mode 100644 index 0000000..ed64e5f --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadBase.java @@ -0,0 +1,326 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.ReplicationSecurityException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * The thread used to write responses asynchronously to the network, to avoid + * network stalls in the replica replay thread. This thread, like the + * Replica.ReplayThread, is created each time the node establishes contact with + * a new feeder and starts replaying the log from it. + * + * The inputs and outputs of this thread are schematically described as: + * + * outputQueue -> ReplicaOutputThread (does write) -> writes to network + * + * It's the third component of the three thread structure outlined in the + * Replica's class level comment. + */ +public abstract class ReplicaOutputThreadBase extends StoppableThread { + + /** + * The size of the write queue. + */ + protected final int queueSize; + + /* + * The heartbeat interval in ms. + */ + protected final int heartbeatMs; + + /** + * Thread exit exception. It's non-null if the thread exited due to an + * exception. It's the responsibility of the main replica thread to + * propagate the exception across the thread boundary in this case. + */ + protected volatile Exception exception; + + protected final RepImpl repImpl; + + /* + * A reference to the common output queue shared with Replay + */ + protected final BlockingQueue outputQueue; + + protected final Protocol protocol ; + + protected final DataChannel replicaFeederChannel; + + /* + * Reserved transaction ids, that don't represent transaction Acks + * when encountered in the write queue. + */ + + /* + * Forces the replica thread to exit when encountered in the write + * queue. + */ + public final static Long EOF = Long.MAX_VALUE; + + /* + * Results in a heartbeat response when encountered in the write queue. + */ + public final static Long HEARTBEAT_ACK = EOF - 1; + + /* + * Results in a shutdown response when encountered in the write queue. + */ + public final static Long SHUTDOWN_ACK = EOF - 2; + + private TestHook outputHook; + + /* Keep the max size below Maximum Segment Size = 1460 bytes. */ + private final static int maxGroupedAcks = (1460 - 100) / 8; + + final ArrayList groupAcks = new ArrayList<>(maxGroupedAcks); + + protected final boolean groupAcksEnabled; + + protected volatile long numGroupedAcks = 0; + + protected final Logger logger; + + ReplicaOutputThreadBase(RepImpl repImpl) { + this(repImpl, + repImpl.getReplay().getOutputQueue(), + repImpl.getRepNode().getReplica().getProtocol(), + repImpl.getRepNode().getReplica().getReplicaFeederChannel()); + } + + public ReplicaOutputThreadBase(RepImpl repImpl, + BlockingQueue outputQueue, + Protocol protocol, + DataChannel replicaFeederChannel) { + super(repImpl, "ReplicaOutputThread"); + + logger = repImpl.getLogger(); + this.repImpl = repImpl; + + this.outputQueue = outputQueue; + this.protocol = protocol; + this.replicaFeederChannel = replicaFeederChannel; + + heartbeatMs = + repImpl.getConfigManager().getInt(RepParams.HEARTBEAT_INTERVAL); + + queueSize = outputQueue.remainingCapacity(); + + groupAcksEnabled = + (protocol.getVersion() > Protocol.VERSION_5) && + repImpl.getConfigManager().getBoolean(RepParams.ENABLE_GROUP_ACKS); + + } + + @Override + protected Logger getLogger() { + return logger; + } + + public Exception getException() { + return exception; + } + + public long getNumGroupedAcks() { + return numGroupedAcks; + } + + /** + * For testing only. + */ + public long getOutputQueueSize() { + return outputQueue.size(); + } + + public void setOutputHook(TestHook outputHook) { + this.outputHook = outputHook; + } + + @Override + public void run() { + + /* Max number of pending responses in the output queue. */ + long maxPending = 0; + + /* Number of singleton acks. */ + long numAcks = 0; + + LoggerUtils.info(logger, repImpl, + "Replica output thread started. Queue size:" + + queueSize + + " Max grouped acks:" + maxGroupedAcks); + + try { + for (Long txnId = outputQueue.poll(heartbeatMs, + TimeUnit.MILLISECONDS); + !EOF.equals(txnId); + txnId = outputQueue.poll(heartbeatMs, + TimeUnit.MILLISECONDS)) { + + assert TestHookExecute.doHookIfSet(outputHook, this); + + writeReauthentication(); + + if ((txnId == null) || HEARTBEAT_ACK.equals(txnId)) { + + /* + * Send a heartbeat if requested, or unsolicited in the + * absence of output activity for a heartbeat interval. + */ + writeHeartbeat(txnId); + continue; + } else if (SHUTDOWN_ACK.equals(txnId)) { + + /* + * Acknowledge the shutdown request, the actual shutdown is + * processed in the replay thread. + */ + protocol.write(protocol.new ShutdownResponse(), + replicaFeederChannel); + continue; + } + + final int pending = outputQueue.size(); + if (pending > maxPending) { + maxPending = pending; + if ((maxPending % 100) == 0) { + LoggerUtils.info(logger, repImpl, + "Max pending acks:" + maxPending); + } + } + + if ((pending == 0) || (! groupAcksEnabled)) { + /* A singleton ack. */ + numAcks++; + protocol.write(protocol.new Ack(txnId), + replicaFeederChannel); + } else { + + /* + * Have items pending inthe queue and group acks are + * enabled. + */ + if (groupWriteAcks(txnId)) { + /* At eof */ + break; + } + } + } + } catch (ReplicationSecurityException rse) { + exception = rse; + LoggerUtils.info(logger, repImpl, + "Output thread exiting due to security error, " + + "client: " + rse.getConsumer() + + ", error: " + rse.getMessage()); + } catch (Exception e) { + exception = e; + + /* + * Get the attention of the main replica thread. + */ + RepUtils.shutdownChannel(replicaFeederChannel); + + LoggerUtils.info(logger, repImpl, + this + "exiting with exception:" + e); + } finally { + LoggerUtils.info(logger, repImpl, + this + "exited. " + + "Singleton acks sent:" + numAcks + + " Grouped acks sent:" + numGroupedAcks + + " Max pending acks:" + maxPending); + } + } + + public abstract void writeReauthentication() throws IOException; + + public abstract void writeHeartbeat(Long txnId) throws IOException; + + /** + * Writes out the acks that are currently queued in the output queue + * + * Returns true if it encountered an EOF or a request for a shutdown. + */ + protected boolean groupWriteAcks(long txnId) + throws IOException { + + /* More potential acks, group them. */ + boolean eof = false; + groupAcks.clear(); + groupAcks.add(txnId); + outputQueue.drainTo(groupAcks, maxGroupedAcks - 1); + long txnIds[] = new long[groupAcks.size()]; + + int i = 0; + for (long gtxnId : groupAcks) { + if (gtxnId == EOF) { + eof = true; + break; + } else if (gtxnId == SHUTDOWN_ACK) { + protocol.write(protocol.new ShutdownResponse(), + replicaFeederChannel); + eof = true; + break; + } else if (gtxnId == HEARTBEAT_ACK) { + + /* + * Heartbeat could be out of sequence relative to acks, but + * that's ok. + */ + writeHeartbeat(gtxnId); + continue; + } + txnIds[i++] = gtxnId; + } + + if (i > 0) { + if (txnIds.length > i) { + long la[] = new long[txnIds.length - 1]; + System.arraycopy(txnIds, 0, la, 0, la.length); + txnIds = la; + } + + protocol.write(protocol.new GroupAck(txnIds), replicaFeederChannel); + numGroupedAcks += txnIds.length; + } + return eof; + } + + @Override + protected int initiateSoftShutdown() { + + /* Queue EOF to terminate the thread */ + if (! outputQueue.offer(EOF)) { + /* No room in write queue, resort to an interrupt. */ + return -1; + } + + /* Wait up to 10 seconds for any queued writes to be flushed out. */ + return 10000; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/ReplicaStatDefinition.java b/src/com/sleepycat/je/rep/impl/node/ReplicaStatDefinition.java new file mode 100644 index 0000000..e0f9254 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/ReplicaStatDefinition.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for HA Replica statistics. + */ +public class ReplicaStatDefinition { + + public static final String GROUP_NAME = "ConsistencyTracker"; + public static final String GROUP_DESC = "Statistics on the delays " + + "experienced by read requests at the replica in order to conform " + + "to the specified ReplicaConsistencyPolicy."; + + public static final String N_LAG_CONSISTENCY_WAITS_NAME = + "nLagConsistencyWaits"; + public static final String N_LAG_CONSISTENCY_WAITS_DESC = + "Number of Transaction waits while the replica catches up in order to" + + " meet a transaction's consistency requirement."; + public static final StatDefinition N_LAG_CONSISTENCY_WAITS = + new StatDefinition( + N_LAG_CONSISTENCY_WAITS_NAME, + N_LAG_CONSISTENCY_WAITS_DESC); + + public static final String N_LAG_CONSISTENCY_WAIT_MS_NAME = + "nLagConsistencyWaitMS"; + public static final String N_LAG_CONSISTENCY_WAIT_MS_DESC = + "Number of msec waited while the replica catches up in order to meet " + + "a transaction's consistency requirement."; + public static final StatDefinition N_LAG_CONSISTENCY_WAIT_MS = + new StatDefinition( + N_LAG_CONSISTENCY_WAIT_MS_NAME, + N_LAG_CONSISTENCY_WAIT_MS_DESC); + + public static final String N_VLSN_CONSISTENCY_WAITS_NAME = + "nVLSNConsistencyWaits"; + public static final String N_VLSN_CONSISTENCY_WAITS_DESC = + "Number of Transaction waits while the replica catches up in order to" + + " receive a VLSN."; + public static final StatDefinition N_VLSN_CONSISTENCY_WAITS = + new StatDefinition( + N_VLSN_CONSISTENCY_WAITS_NAME, + N_VLSN_CONSISTENCY_WAITS_DESC); + + public static final String N_VLSN_CONSISTENCY_WAIT_MS_NAME = + "nVLSNConsistencyWaitMS"; + public static final String N_VLSN_CONSISTENCY_WAIT_MS_DESC = + "Number of msec waited while the replica catches up in order to " + + "receive a VLSN."; + public static final StatDefinition N_VLSN_CONSISTENCY_WAIT_MS = + new StatDefinition( + N_VLSN_CONSISTENCY_WAIT_MS_NAME, + N_VLSN_CONSISTENCY_WAIT_MS_DESC); +} diff --git a/src/com/sleepycat/je/rep/impl/node/cbvlsn/CleanerBarrierState.java b/src/com/sleepycat/je/rep/impl/node/cbvlsn/CleanerBarrierState.java new file mode 100644 index 0000000..6f0a84f --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/cbvlsn/CleanerBarrierState.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node.cbvlsn; + +import java.io.Serializable; + +import com.sleepycat.je.utilint.VLSN; + +/** + * This class is not used if the GlobalCBVLSN is defunct -- see + * {@link GlobalCBVLSN}. Instances of this class are created in order to + * initialize the RepNodeImpl.barrierState field or pass parameters, but these + * instances are not actually used when the GlobalCBVLSN is defunct. + * + * Encapsulates the last known syncup state associated with a node. + */ +public class CleanerBarrierState implements Serializable { + private static final long serialVersionUID = 1L; + + /* + * The latest sync position of this node in the replication stream. + * This position is approximate and is updated on some regular basis. + * It is conservative in that the node is likely to have a newer sync + * point. So it represents a lower bound for its sync point. + */ + private final VLSN lastLocalCBVLSN; + + /* + * The time that the sync point was last recorded. Note that clocks + * should be reasonably synchronized. + */ + private final long barrierTime; + + public CleanerBarrierState(VLSN lastLocalCBVLSN, long barrierTime) { + super(); + this.lastLocalCBVLSN = lastLocalCBVLSN; + this.barrierTime = barrierTime; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + (lastLocalCBVLSN == null ? 0 : lastLocalCBVLSN.hashCode()); + result = prime * result + + (int) (barrierTime ^ (barrierTime >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CleanerBarrierState other = (CleanerBarrierState) obj; + if (lastLocalCBVLSN == null) { + if (other.lastLocalCBVLSN != null) { + return false; + } + } else if (!lastLocalCBVLSN.equals(other.lastLocalCBVLSN)) { + return false; + } + if (barrierTime != other.barrierTime) { + return false; + } + return true; + } + + public VLSN getLastCBVLSN() { + return lastLocalCBVLSN; + } + + public long getBarrierTime() { + return barrierTime; + } + + @Override + public String toString() { + return String.format("LocalCBVLSN:%,d at:%tc", + lastLocalCBVLSN.getSequence(), barrierTime); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/cbvlsn/GlobalCBVLSN.java b/src/com/sleepycat/je/rep/impl/node/cbvlsn/GlobalCBVLSN.java new file mode 100644 index 0000000..dc12e03 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/cbvlsn/GlobalCBVLSN.java @@ -0,0 +1,451 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node.cbvlsn; + +import static com.sleepycat.je.rep.impl.RepParams.MIN_RETAINED_VLSNS; +import static com.sleepycat.je.rep.impl.RepParams.REP_STREAM_TIMEOUT; +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.impl.MinJEVersionUnsupportedException; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * The GlobalCBVLSN was used prior to JE 7.5 for ensuring that nodes do not + * delete files needed for feeding other nodes. The methods in this class are + * disabled if the GlobalCBVLSN is defunct -- see below. + * + *

        In version 7.5 and greater we instead retain all files up to the disk + * limits just in case they are needed for replication, and the GlobalCBVLSN + * is no longer needed. The {@link com.sleepycat.je.cleaner.FileProtector} is + * used for preventing file deletion on a local node. See [#25220] in the + * JE 7.5 change log for a list of related external changes.

        + * + *

        The global and local CBVLSNs are completely defunct (no longer + * maintained) in a rep group where all nodes are running JE 7.5 or greater. + * In a mixed version group of new (7.5 or later) and old (pre-7.5) nodes, + * we must continue to maintain the GlobalCBVLSN for the sake of the old + * nodes. If we did not do this, for example, the following scenario could + * occur: + *

        + *  - RF-3, N1 is new, N2 and N3 are old.
        + *  - N1 is the master, N2 is an up-to-date replica, N3 is a lagging replica.
        + *  - N2 deletes files that would be needed to feed N3, because the GlobalVLSN
        + *    is not updated and therefore N2 doesn't know that N3 is lagging.
        + *  - N1 goes down, N2 is elected master
        + *  - N3 requires a network restore because N2 doesn't have the files that N3
        + *    needs for syncup.
        + *
        + * + *

        The c.s.je.rep.impl.node.cbvlsn package contains most of the code for + * maintaining the CBVLSN. This code is isolated here for two reasons:

        + *
          + *
        • To make it clear that it is not in play for groups of all new nodes. + * We don't have to think about the CBVLSN at all in this case.
        • + *
        • To make it easier to eventually remove this code entirely, when + * backward compatibility with JE 7.4 and earlier is dropped.
        • + *
        + * + *

        When it is not yet defunct, the GlobalCBVLSN class represents this + * node's view of the global CBVLSN. Each node has its own view of the global + * CBVLSN, based upon its local replicated copy of the rep group db. There is + * a single instance of the GlobalCBVLSN and it exists for the lifetime of the + * RepNode.

        + * + *

        A global CBVLSN is a per-environment value, and is safeguarded from + * decreasing during the lifetime of a single RepImpl. Because nodes crash and + * re-sync, and new nodes join, it's possible that the persisted local cbvlsns + * can decrease, and drag the global cbvlsn value down, but those decreases + * are ignored during the lifetime of the global CBVLSN instance.

        + * + *

        On pre-7.5 nodes, the global CBVLSN is used by:

        + *
          + *
        • As a barrier to file deletion by the Cleaner.
        • + *
        • The Feeder which only serves log records in the interval: + * [GlobalCBVLSN .. VLSNRange.last]
        • + *
        • The Replica which uses the interval [GlobalCBVLSN .. VLSNRange.last] + * at syncup time.
        • + *
        + */ +public class GlobalCBVLSN { + + /* + * The GlobalCBVLSN is defunct when all nodes in the group are running this + * version or later. + */ + private static final JEVersion DEFUNCT_JE_VERSION = new JEVersion("7.5.0"); + + private final RepImpl repImpl; + private final Logger logger; + private final long streamTimeoutMs; + private final int minRetainedVLSNs; + private volatile boolean defunct = false; + + /* GroupCBVLSN can only be updated when synchronized. */ + private volatile VLSN groupCBVLSN = VLSN.NULL_VLSN; + + public GlobalCBVLSN(RepNode repNode) { + + this.repImpl = repNode.getRepImpl(); + streamTimeoutMs = + repImpl.getConfigManager().getDuration(REP_STREAM_TIMEOUT); + minRetainedVLSNs = + repImpl.getConfigManager().getInt(MIN_RETAINED_VLSNS); + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Uses minJEVersion to determine whether the global CBVLSN is defunct. + * + * Try to avoid any use of the GlobalCBVLSN when all nodes are + * running new software: + * + * - For the master, this method should be called using the minJEVersion + * in the rep group DB. + * + * - For a replica, it should be called using the minJEVersion of the + * feeder, from the feeder handshake. + * + * See {@link #setDefunctJEVersion(RepNode)} for how the minJEVersion is + * set for a new group (with new software) and for an upgraded group. + * + * @param minJEVersion the min JE version for the group, or null if + * unknown. + */ + public void init(RepNode repNode, JEVersion minJEVersion) { + + if (repNode.getRepImpl().getConfigManager().getBoolean( + RepParams.TEST_CBVLSN)) { + /* Should never transition from defunct to !defunct. */ + assert !defunct; + return; + } + + if (minJEVersion != null && + DEFUNCT_JE_VERSION.compareTo(minJEVersion) <= 0) { + defunct = true; + } else { + /* Should never transition from defunct to !defunct. */ + assert !defunct; + } + } + + /** + * Calls RepNode.setMinJEVersion to upgrade the min version to + * DEFUNCT_JE_VERSION. This allows a group to treat the global CBLVSN as + * defunct when all the electable nodes in the group have been upgraded. + * + * This method should be called when the first node in a a new group is + * created to avoid all use of the GlobalVLSN. It should also be called + * periodically on the master, as feeders become active, to update the min + * JE version after all nodes in a group are upgraded. + * + * If the version update fails because not all nodes are upgraded, or + * there is no quorum for the group DB update, or there is a lock + * conflict, this method fails silently and should be called again later. + */ + public void setDefunctJEVersion(RepNode repNode) { + + if (repNode.getRepImpl().getConfigManager().getBoolean( + RepParams.TEST_CBVLSN)) { + return; + } + + try { + repNode.setMinJEVersion(DEFUNCT_JE_VERSION); + defunct = true; + } catch (MinJEVersionUnsupportedException | + OperationFailureException e) { + /* Fail silently -- will try again later. */ + } + } + + /** + * Returns true if all nodes in the group are running JE 7.5 or later, and + * therefore the GlobalCBVLSN is not maintained. Returns false for a mixed + * old/new version group (or when TEST_GLOBAL_CBVLSN is true). + */ + public boolean isDefunct() { + return defunct; + } + + /** + * Returns the global CBVLSN if it is not defunct. Returns a null if it is + * defunct. + * + *

        For sake of old nodes, the global CBVLSN is computed as the minimum + * of CBVLSNs after discarding CBVLSNs that are obsolete. A CBVLSN is + * considered obsolete if it has not been updated within a configurable + * time interval relative to the time that the most recent CBVLSN was + * updated.

        + */ + public VLSN getCBVLSN() { + return defunct ? VLSN.NULL_VLSN : groupCBVLSN; + } + + /** + * Updates the cached group info for the node, avoiding a database read, + * if the global CBVLSN is not defunct. If it is defunct, does nothing. + * + * @param updateNameIdPair the node whose localCBVLSN must be updated. + * @param barrierState the new node syncup state + */ + public void updateGroupInfo(NameIdPair updateNameIdPair, + CleanerBarrierState barrierState) { + + if (defunct) { + return; + } + + RepGroupImpl group = repImpl.getRepNode().getGroup(); + RepNodeImpl node = group.getMember(updateNameIdPair.getName()); + if (node == null) { + /* A subsequent refresh will get it, along with the new node. */ + return; + } + + LoggerUtils.fine(logger, repImpl, + "LocalCBVLSN for " + updateNameIdPair + + " updated to " + barrierState + + " from " + node.getBarrierState().getLastCBVLSN()); + node.setBarrierState(barrierState); + recalculate(group); + } + + /** + * Recalculates the GlobalVLSN when it is not defunct. When it is defunct, + * does nothing. + * + *

        For sake of old nodes, the globalCBVLSN is computed as it was + * earlier: the minimum of CBVLSNs after discarding CBVLSNs that are + * obsolete. A CBVLSN is considered obsolete if it has not been updated + * within a configurable time interval relative to the time that the + * most recent CBVLSN was updated.

        + * + *

        Note that the read of GroupInfo is not protected, and that groupInfo + * could be changing. That's okay, because we guarantee that none of the + * local CBVLSNs can be LT globalCBVLSN. If a local CBVLSN is written, and + * we miss it, it only means that this recalculation of global CBVLSN is + * too pessimistic -- it's too low.

        + * + *

        Secondary nodes do not appear in the RepGroupDB, but the feeder has + * local CBVLSN values for them which are part of this calculation. + * Secondary nodes and new nodes have their VLSN ranges protected by the + * mechanism which tries to ensure that files which may be needed by active + * feeders are not candidates for deletion.

        + */ + public void recalculate(RepGroupImpl groupInfo) { + + if (defunct) { + return; + } + + /* Find the time the highest CBVLSN was computed. */ + VLSN maxCBVLSN = NULL_VLSN; + long latestBarrierTime = 0; + String nodeName = null; + for (RepNodeImpl node : groupInfo.getDataMembers()) { + + CleanerBarrierState nodeBarrier = node.getBarrierState(); + VLSN cbvlsn = nodeBarrier.getLastCBVLSN(); + + if ((cbvlsn == null) || cbvlsn.isNull()) { + continue; + } + + /* + * Count all nodes when finding the max time, including old nodes + * that are in the middle of syncup and have not established their + * low point . + */ + final long nodeBarrierTime = nodeBarrier.getBarrierTime(); + + if (maxCBVLSN.compareTo(cbvlsn) <= 0) { + + /* + * Use min, since it represents the real change when they are + * equal. + */ + latestBarrierTime = cbvlsn.equals(maxCBVLSN) ? + Math.min(nodeBarrierTime, latestBarrierTime) : + nodeBarrierTime; + maxCBVLSN = cbvlsn; + + /* + * Track the name of the node holding the maximum CBVLSN, since + * that node pins that VLSN + */ + nodeName = node.getName(); + } + } + + if (latestBarrierTime == 0) { + /* No cbvlsns entered yet, don't bother to recalculate. */ + return; + } + + if (maxCBVLSN.isNull()) { + /* No cbvlsns entered yet, don't bother to recalculate. */ + return; + } + + /* + * Now find the min CBVLSN that has not been timed out. This may mean + * that the min CBVLSN == NULL_VLSN, for old nodes that have not yet + * finished syncup. + */ + VLSN newGroupCBVLSN = maxCBVLSN; + long nodeBarrierTime = 0; + for (RepNodeImpl node : groupInfo.getDataMembers()) { + + CleanerBarrierState nodeBarrier = node.getBarrierState(); + VLSN nodeCBVLSN = nodeBarrier.getLastCBVLSN(); + + if ((nodeCBVLSN == null) || nodeCBVLSN.isNull()) { + continue; + } + + if (((latestBarrierTime - nodeBarrier.getBarrierTime()) <= + streamTimeoutMs) && + (newGroupCBVLSN.compareTo(nodeCBVLSN) > 0)) { + newGroupCBVLSN = nodeCBVLSN; + + /* + * A node is pinning the CBVLSN because it is lagging and has + * not timed out + */ + nodeName = node.getName(); + nodeBarrierTime = nodeBarrier.getBarrierTime(); + } + } + + /* + * Adjust to retain min number of VLSNs, while ensuring we stay within + * the current VLSN range. + */ + newGroupCBVLSN = + new VLSN(newGroupCBVLSN.getSequence() - minRetainedVLSNs); + + final VLSNIndex vlsnIndex = repImpl.getVLSNIndex(); + final VLSN rangeFirst = (vlsnIndex != null) ? + vlsnIndex.getRange().getFirst() : VLSN.FIRST_VLSN; + + /* + * Environments where the minRetainedVLSNs was expanded need to ensure + * the global cbvlsn still stays within the vlsn range. + */ + if (rangeFirst.compareTo(newGroupCBVLSN) > 0) { + newGroupCBVLSN = rangeFirst; + } + + updateGroupCBVLSN(groupInfo, newGroupCBVLSN, nodeName, + nodeBarrierTime, latestBarrierTime); + } + + /* + * Update the group CBVLSN, but only if the newGroupCBVLSN is more recent + * This is to ensure that the group CBVLSN can only advance during the + * lifetime of this instance. + */ + private void updateGroupCBVLSN(RepGroupImpl groupInfo, + VLSN newGroupCBVLSN, + String nodeName, + long nodeBarrierTime, + long latestBarrierTime) { + assert !defunct; + boolean changed = false; + String cbvlsnLoweredMessage = null; + VLSN oldCBVLSN = VLSN.NULL_VLSN; + + synchronized(this) { + + /* + * Be sure not to do anything expensive in this synchronized + * section, such as logging. + */ + if (newGroupCBVLSN.compareTo(groupCBVLSN) > 0) { + VLSNRange currentRange = repImpl.getVLSNIndex().getRange(); + if (!currentRange.contains(newGroupCBVLSN) && + logger.isLoggable(Level.FINE)) { + cbvlsnLoweredMessage = + "GroupCBVLSN: " + newGroupCBVLSN + + " is outside VLSN range: " + currentRange + + " Current group:" + groupInfo; + } else { + oldCBVLSN = groupCBVLSN; + groupCBVLSN = newGroupCBVLSN; + changed = true; + } + } + } + + if (logger.isLoggable(Level.FINE)) { + if (cbvlsnLoweredMessage != null) { + LoggerUtils.fine(logger, repImpl, cbvlsnLoweredMessage); + } + + if (changed) { + LoggerUtils.fine(logger, repImpl, + "Global CBVLSN changed from " + oldCBVLSN + + " to " + newGroupCBVLSN); + } + } + } + + /** + * Returns a VLSN appropriate for the RestoreResponse.cbvlsn field when the + * GlobalCBVLSN is not defunct. When it is defunct, returns a null VLSN. + * + *

        When sending a RestoreResponse to a (potentially) old node, we + * supply a VLSN that will cause selection of reasonably current nodes as a + * server (feeder) for the network restore. The return value is the VLSN + * range end of this node (the master) minus a value that will allow + * up-to-date servers to qualify (NETWORKBACKUP_MAX_LAG). Old nodes + * will reject servers whose VLSN range does not cover this VLSN.

        + * + *

        In JE 7.4 and earlier, the "group CBVLSN" was used for this value. + * This was incorrect, because it was the lower bound for lagging replicas + * and reserved files. In JE 7.5 we improve on this by sending the + * value described above, when older nodes must be supported.

        + * + *

        When GlobalCBLVSN is defunct, a null VLSN is returned because the + * RestoreResponse.cbvlsn field is not used at all. See the updated + * 'Algorithm' in {@link NetworkRestore}.

        + */ + public VLSN getRestoreResponseVLSN(final VLSNRange range) { + + if (defunct) { + return VLSN.NULL_VLSN; + } + + final long vlsn = range.getLast().getSequence() - + repImpl.getConfigManager().getInt(RepParams.NETWORKBACKUP_MAX_LAG); + + return new VLSN(Math.max(0, vlsn)); + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNTracker.java b/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNTracker.java new file mode 100644 index 0000000..0a6cdfb --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNTracker.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node.cbvlsn; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * The methods in this class are disabled if the GlobalCBVLSN is defunct -- see + * {@link GlobalCBVLSN}. + * + *

        When GlobalCBVLSN} is not defunct, the LocalCBVLSNTracker tracks this + * node's local CBVLSN. Each node has a single tracker instance.

        + * + *

        The GlobalCBVLSN must be durable. Since the GlobalCBVLSN is derived from + * the LocalCBVLSN, we need to make the LocalCBVLSN durable too. [#18728]

        + *
          + *
        1. For HA, the GlobalCbvlsn is supposed to ensure that the replication + * stream is always available for replay, across failovers.
        2. + *
        + * + *

        The local CBVLSN is maintained by each node. Replicas periodically update + * the Master with their current CBVLSN via a response to a heartbeat message + * from the Master, where it is managed by the LocalCBVLSNUpdater and + * flushed out to RepGroup database, whenever the updater notices that it + * has changed. The change is then effectively broadcast to all the Replicas + * including the originating Replica, via the replication stream. For this + * reason, the CBVLSN for the node as represented in the RepGroup database + * will generally lag the value contained in the tracker.

        + * + *

        Note that track() api is invoked in critical code with locks being + * held and must be lightweight.

        + * + *

        Local CBVLSNs are used only to contribute to the calculation of the + * global CBVLSN. The global CBVLSN acts as the cleaner throttle on old nodes. + * Any invariants, such as the rule that the cleaner throttle cannot regress, + * are applied when doing the global calculation. In addition, we enforce + * the rule against regressing local CBVLSNs here.

        + */ +public class LocalCBVLSNTracker { + + /* + * Note that all reference fields below are null when then GlobalCBVLSN is + * defunct. + */ + + /* Used to keep track of the last fsynced matchable VLSN. */ + private VLSN lastSyncableVLSN; + + /** + * Final syncable VLSN from the penultimate log file. + */ + private VLSN currentLocalCBVLSN; + + /* + * We really only need to update the localCBVLSN once per file. currentFile + * is used to determine if this is the first VLSN in the file. + */ + private long currentFile; + + /* Test hook for disabling LocalCBVLSN changes. */ + private boolean allowUpdate = true; + + /* Same value as GlobalCBVLSN.defunct. */ + private final boolean defunct; + + public LocalCBVLSNTracker(RepNode repNode, GlobalCBVLSN globalCBVLSN) { + defunct = globalCBVLSN.isDefunct(); + + if (!defunct) { + VLSNIndex vlsnIndex = repNode.getRepImpl().getVLSNIndex(); + + /* Initialize the currentLocalCBVLSN and lastSyncableVLSN. */ + currentLocalCBVLSN = vlsnIndex.getRange().getLastSync(); + lastSyncableVLSN = currentLocalCBVLSN; + + /* Approximation good enough to start with. */ + currentFile = DbLsn.getFileNumber(DbLsn.NULL_LSN); + } + } + + /* Test hook, disable the LocalCBVLSN updates. */ + public void setAllowUpdate(boolean allowUpdate) { + this.allowUpdate = allowUpdate; + } + + /** + * If GlobalCBVLSN is defunct, does nothing. + * + *

        If GlobalCBVLSN is defunct, tracks barrier VLSNs, updating the + * local CBVLSN if the associated log file has changed. When tracking is + * done on a replica, the currentLocalCBVLSN value is ultimately sent + * via heartbeat response to the master, which updates the RepGroupDb. + * When tracking is done on a master, the update is done on this node.

        + * + *

        The update is only done once per file in order to decrease the cost + * of tracking. Since we want the local cbvlsn to be durable, we use the + * last vlsn in the penultimate log file as the local cbvlsn value. We know + * the penultimate log file has been fsynced, and therefore the last vlsn + * within that file has also been fsynced.

        + * + *

        Tracking can be called quite often, and should be lightweight.

        + * + * @param newVLSN + * @param lsn + */ + public void track(VLSN newVLSN, long lsn) { + if (defunct || !allowUpdate) { + return; + } + + synchronized (this) { + if (newVLSN.compareTo(lastSyncableVLSN) > 0) { + VLSN old = lastSyncableVLSN; + lastSyncableVLSN = newVLSN; + if (DbLsn.getFileNumber(lsn) != currentFile) { + currentFile = DbLsn.getFileNumber(lsn); + currentLocalCBVLSN = old; + } + } + } + } + + /** + * If the GlobalVLSN is not defunct, initialize the local CBVLSN with the + * syncup matchpoint, so that the heartbeat responses sent before the node + * has replayed any log entries are still valid for saving a place in the + * replication stream. If the GlobalVLSN is defunct, do nothing. + * + * @param matchpoint + */ + public void registerMatchpoint(VLSN matchpoint) { + if (defunct) { + return; + } + this.currentLocalCBVLSN = matchpoint; + this.lastSyncableVLSN = matchpoint; + } + + /** + * @return the local CBVLSN for broadcast from replica to master on the + * heartbeat response, or a null VLSN if the GlobalVLSN is defunct. + */ + public VLSN getBroadcastCBVLSN() { + return defunct ? VLSN.NULL_VLSN : currentLocalCBVLSN; + } + + /** + * @return last syncable VLSN seen by this tracker, or a null VLSN if the + * GlobalVLSN is defunct. Note that this VLSN has not yet been broadcast -- + * see {@link #track}. + */ + public VLSN getLastSyncableVLSN() { + return defunct ? VLSN.NULL_VLSN : lastSyncableVLSN; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNUpdater.java b/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNUpdater.java new file mode 100644 index 0000000..c196872 --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/cbvlsn/LocalCBVLSNUpdater.java @@ -0,0 +1,289 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node.cbvlsn; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * The methods in this class are disabled if the GlobalCBVLSN is defunct -- see + * {@link GlobalCBVLSN}. + * + *

        When the GlobalCBVLSN is not defunct, this class supports updating the + * group database with each node's local CBVLSN when it is in the Master + * state. There is one instance per feeder connection, plus one for the + * Master. There is, logically, a LocalCBVLSNTracker instance associated + * with each instance of the updater. The instance is local for an update + * associated with a node in the Master state and is remote for each + * Replica.

        + * + *

        The nodeCBVLSN can only increase during the lifetime of the + * LocalCBVLSNUpdater instance. Note however that the value of the node's + * CBVLSN as stored in the database, which represents the values from multiple + * updaters associated with a node over its lifetime, may both decrease and + * increase over its lifetime. The decreases are due primarily to rollbacks, + * and should be relatively rare.

        + * + *

        The updaters used to maintain the Replica's local CBVLSNs are stored in + * the Feeder.InputThread. The lifetime of such a LocalCBVLSNUpdater is + * therefore determined by the lifetime of the connection between the Master + * and the Replica. The node CBVLSN is updated each time a heart beat response + * is processed by the FeederInput thread. It's also updated when an old Master + * detects that a Replica needs a network restore. In this case, it updates + * cbvlsn to the value expected from the node after a network restore so that + * the global CBVLSN can continue to make forward progress and not hold up the + * cleaner.

        + * + *

        The Master maintains an updater for its own CBVLSN in the FeederManager. + * This updater exists as long as the node retains its Master state.

        + * + *

        Local CBVLSNs are used only to contribute to the calculation of the + * global CBVLSN. The global CBVLSN acts as the cleaner throttle on old nodes. + * Any invariants, such as the rule that the cleaner throttle cannot regress, + * are applied when doing the global calculation.

        + * + *

        Note that CBVLSNs are not stored in the database for secondary nodes, but + * transient information about them is still maintained.

        + */ +public class LocalCBVLSNUpdater { + + private static final String MASTER_SOURCE = "master"; + private static final String HEARTBEAT_SOURCE = "heartbeat"; + + /* + * Note that all reference fields below are null when then GlobalCBVLSN is + * defunct. + */ + + /* + * The node ID of the node whose CBLVLSN is being tracked. If this updater + * is working on the behalf of a replica node, the nameIdPair is not the + * name of this node. + */ + private final NameIdPair nameIdPair; + + /** The node type of the node whose CBVLSN is being tracked. */ + private final NodeType trackedNodeType; + + /* This node; note that its node ID may be different from nodeId above. */ + private final RepNode repNode; + + /* + * The node's local CBVLSN is cached here, for use without reading the + * group db. + */ + private VLSN nodeCBVLSN; + + /* + * True if this node's local CBVLSN has changed, but the new value + * has not been stored into the group db yet. + */ + private boolean updatePending; + + /* Used to suppress database updates during unit testing. */ + private static boolean suppressGroupDBUpdates = false; + + private final Logger logger; + + /* Same value as GlobalCBVLSN.defunct. */ + private final boolean defunct; + + public LocalCBVLSNUpdater(final NameIdPair nameIdPair, + final NodeType trackedNodeType, + final RepNode repNode) { + defunct = repNode.isGlobalCBVLSNDefunct(); + if (!defunct) { + this.nameIdPair = nameIdPair; + this.trackedNodeType = trackedNodeType; + this.repNode = repNode; + + logger = LoggerUtils.getLogger(getClass()); + } else { + this.nameIdPair = null; + this.trackedNodeType = null; + this.repNode = null; + logger = null; + } + nodeCBVLSN = NULL_VLSN; + updatePending = false; + } + + /** + * Sets the current CBVLSN for this node, and trips the updatePending + * flag so that we know there is something to store to the RepGroupDB. + * + * @param syncableVLSN the new local CBVLSN + */ + private void set(VLSN syncableVLSN, String source) { + assert !defunct; + + assert repNode.isMaster() : + "LocalCBVLSNUpdater.set() can only be called by the master"; + + if (!nodeCBVLSN.equals(syncableVLSN)) { + LoggerUtils.fine(logger, repNode.getRepImpl(), + "update local CBVLSN for " + nameIdPair + + " from nodeCBVLSN " + nodeCBVLSN + " to " + + syncableVLSN + " from " + source); + if (nodeCBVLSN.compareTo(syncableVLSN) >= 0) { + + /* + * LCBVLSN must not decrease, since it can result in a GCBVLSN + * value that's outside a truncated VLSNIndex range. See SR + * [#17343] + */ + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), + "nodeCBVLSN" + nodeCBVLSN + " >= " + syncableVLSN + + " attempted update local CBVLSN for " + nameIdPair + + " from " + source); + } + nodeCBVLSN = syncableVLSN; + updatePending = true; + } + } + + /** + * If the GlobalCBVLSN is not defunct, sets the current CBVLSN for this + * node. Can only be used by the master. The new cbvlsn value comes from + * an incoming heartbeat response message. If the GlobalCBVLSN is defunct, + * does nothing. + * + * @param heartbeat The incoming heartbeat response message from the + * replica containing its newest local cbvlsn. + */ + public void updateForReplica(Protocol.HeartbeatResponse heartbeat) { + if (defunct) { + return; + } + doUpdate(heartbeat.getSyncupVLSN(), HEARTBEAT_SOURCE); + } + + /** + * If the GlobalCBVLSN is not defunct, as a master, update the database + * with the local CBVLSN for this node. This call is needed because the + * master's local CBVLSN will not be broadcast via a heartbeat, so it + * needs to get to the updater another way. If the GlobalCBVLSN is not + * defunct, do nothing. + */ + public void updateForMaster(LocalCBVLSNTracker tracker) { + if (defunct) { + return; + } + doUpdate(tracker.getBroadcastCBVLSN(), MASTER_SOURCE); + } + + private void doUpdate(VLSN vlsn, String source) { + assert !defunct; + set(vlsn, source); + repNode.getRepImpl().updateCBVLSN(this); + } + + /** + * If the GlobalCBVLSN is not defunct, update the database, with the local + * CBVLSN associated with the node ID if required. Note that updates can + * only be invoked on the master. If the GlobalCBVLSN is defunct, do + * nothing. + */ + public void update() { + + if (defunct) { + return; + } + + if (!updatePending) { + return; + } + + if (suppressGroupDBUpdates) { + /* Short circuit the database update. For testing only. */ + updatePending = false; + return; + } + + if (repNode.isShutdownOrInvalid()) { + + /* + * Don't advance VLSNs after a shutdown request, to minimize the + * need for a hard recovery. + */ + return; + } + + try { + VLSN candidate = nodeCBVLSN; + + if (candidate.isNull()) { + return; + } + + if (candidate.compareTo(repNode.getGlobalCBVLSN()) < 0) { + /* Don't let the group CBVLSN regress.*/ + return; + } + + final boolean updated = repNode.getRepGroupDB().updateLocalCBVLSN( + nameIdPair, candidate, trackedNodeType); + /* If not updated, we'll try again later. */ + if (updated) { + updatePending = false; + } + } catch (EnvironmentFailureException e) { + + /* + * Propagate environment failure exception so that the master + * can shut down. + */ + throw e; + } catch (LockNotAvailableException lnae) { + /* + * Expected exception, due to use of nowait transaction + */ + LoggerUtils.info(repNode.getLogger(), repNode.getRepImpl(), + " lock not available without waiting. " + + "local cbvlsn update skipped for node: " + + nameIdPair + " Error: " + lnae.getMessage()); + } catch (DatabaseException e) { + LoggerUtils.warning(repNode.getLogger(), repNode.getRepImpl(), + "local cbvlsn update failed for node: " + + nameIdPair + " Error: " + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + } + } + + /** + * Used during testing to suppress CBVLSN updates at this node. Note that + * the cleaner must also typically be turned off (first) in conjunction + * with the suppression. If multiple nodes are running in the VM all nodes + * will have the CBVLSN updates turned off. + * @param suppressGroupDBUpdates If true, the group DB and the group CBVLSN + * won't be updated at the master. + */ + public static void setSuppressGroupDBUpdates( + boolean suppressGroupDBUpdates) { + + LocalCBVLSNUpdater.suppressGroupDBUpdates = suppressGroupDBUpdates; + } +} diff --git a/src/com/sleepycat/je/rep/impl/node/cbvlsn/package-info.java b/src/com/sleepycat/je/rep/impl/node/cbvlsn/package-info.java new file mode 100644 index 0000000..1e309cd --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/cbvlsn/package-info.java @@ -0,0 +1,19 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Eventual burial ground for Cleaner Barrier VLSN (CBVLSN) classes, + * currently used only for compatibility with old version nodes in a mixed + * version group. + */ +package com.sleepycat.je.rep.impl.node.cbvlsn; diff --git a/src/com/sleepycat/je/rep/impl/node/package-info.java b/src/com/sleepycat/je/rep/impl/node/package-info.java new file mode 100644 index 0000000..47ba57f --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/node/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Feeder and Replica node implementations. + */ +package com.sleepycat.je.rep.impl.node; diff --git a/src/com/sleepycat/je/rep/impl/package-info.java b/src/com/sleepycat/je/rep/impl/package-info.java new file mode 100644 index 0000000..e06ddce --- /dev/null +++ b/src/com/sleepycat/je/rep/impl/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Top level HA implementation classes. + */ +package com.sleepycat.je.rep.impl; diff --git a/src/com/sleepycat/je/rep/jmx/RepJEDiagnostics.java b/src/com/sleepycat/je/rep/jmx/RepJEDiagnostics.java new file mode 100644 index 0000000..6792e28 --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/RepJEDiagnostics.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.jmx.JEDiagnostics; + +/* + * This concrete MBean is a logging monitor on a replicated JE Environment. + * + * It not only has the same attributes and operations as the standalone + * JEDiagnostics, but also has some specific replicated related operations. + */ +public class RepJEDiagnostics extends JEDiagnostics { + protected RepJEDiagnostics(Environment env) { + super(env); + } + + public RepJEDiagnostics() { + super(); + } + + @Override + protected void initClassFields() { + currentClass = RepJEDiagnostics.class; + className = "RepJEDiagnostics"; + DESCRIPTION = "Logging Monitor on an open replicated Environment."; + } + + @Override + protected void doRegisterMBean(Environment useEnv) + throws Exception { + + server.registerMBean(new RepJEDiagnostics(useEnv), jeName); + } +} diff --git a/src/com/sleepycat/je/rep/jmx/RepJEMonitor.java b/src/com/sleepycat/je/rep/jmx/RepJEMonitor.java new file mode 100644 index 0000000..faf420e --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/RepJEMonitor.java @@ -0,0 +1,123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx; + +import javax.management.MBeanException; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.jmx.JEMonitor; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.RepInternal; + +/** + * A concrete MBean for monitoring a replicated open JE Environment. + * + * It not only has the same attributes and operations as the standalone + * JEMonitor, but also has some special replicated related operations. + */ +public class RepJEMonitor extends JEMonitor { + + /** + * @hidden + * + * Name for dumping rep stats operation. + */ + public static final String OP_DUMP_REPSTATS = "getReplicationStats"; + + /** + * @hidden + * + * Name for getting rep stats tips. + */ + public static final String OP_GET_REP_TIPS = "getRepTips"; + + /* Name for getting RepImpl state. */ + static final String OP_DUMP_STATE = "dumpReplicationState"; + + /* Define the dumping rep stats operation. */ + private static final MBeanOperationInfo OP_DUMP_REPSTATS_INFO = + new MBeanOperationInfo + (OP_DUMP_REPSTATS, + "Dump environment's replicated stats.", + statParams, "java.lang.String", MBeanOperationInfo.INFO); + + private static final MBeanOperationInfo OP_DUMP_STATE_INFO = + new MBeanOperationInfo + (OP_DUMP_STATE, + "Dump replicated environment state, including current position in " + + "replication stream and replication group database.", + new MBeanParameterInfo[0], + "java.lang.String", MBeanOperationInfo.INFO); + + protected RepJEMonitor(Environment env) { + super(env); + } + + public RepJEMonitor() { + super(); + } + + @Override + protected void initClassFields() { + currentClass = RepJEMonitor.class; + className = "RepJEMonitor"; + DESCRIPTION = "Monitor an open replicated Berkeley DB, " + + "Java Edition environment."; + } + + @Override + public Object invoke(String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + if (actionName == null) { + throw new IllegalArgumentException("ActionName can't be null."); + } + + try { + if (actionName.equals(OP_DUMP_REPSTATS)) { + return ((ReplicatedEnvironment) env). + getRepStats(getStatsConfig(params)).toString(); + } else if (actionName.equals(OP_GET_REP_TIPS)) { + return ((ReplicatedEnvironment) env).getRepStats + (getStatsConfig(new Object[] {false, true})).getTips(); + } else if (actionName.equals(OP_DUMP_STATE)) { + return RepInternal.getNonNullRepImpl + ((ReplicatedEnvironment) env).dumpState(); + } + } catch (DatabaseException e) { + throw new MBeanException(new RuntimeException(e.getMessage())); + } + + return super.invoke(actionName, params, signature); + } + + @Override + protected void doRegisterMBean(Environment useEnv) + throws Exception { + + server.registerMBean(new RepJEMonitor(useEnv), jeName); + } + + @Override + protected void addOperations() { + super.addOperations(); + operationList.add(OP_DUMP_REPSTATS_INFO); + operationList.add(OP_DUMP_STATE_INFO); + } +} diff --git a/src/com/sleepycat/je/rep/jmx/package-info.java b/src/com/sleepycat/je/rep/jmx/package-info.java new file mode 100644 index 0000000..2bccf2f --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Monitoring of HA statistics via JMX. + */ +package com.sleepycat.je.rep.jmx; diff --git a/src/com/sleepycat/je/rep/jmx/plugin/RepJEStats.java b/src/com/sleepycat/je/rep/jmx/plugin/RepJEStats.java new file mode 100644 index 0000000..d4ebd61 --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/plugin/RepJEStats.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx.plugin; + +import java.util.HashMap; + +import javax.management.MBeanServerConnection; + +import com.sleepycat.je.jmx.plugin.Stats; +import com.sleepycat.je.rep.ReplicatedEnvironmentStats; +import com.sleepycat.je.rep.jmx.RepJEMonitor; + +public class RepJEStats extends Stats { + private static final long serialVersionUID = 4240112567440108407L; + + public RepJEStats(MBeanServerConnection connection) { + super(connection); + } + + @Override + protected void initVariables() { + statsTitles = ReplicatedEnvironmentStats.getStatGroupTitles(); + opName = RepJEMonitor.OP_DUMP_REPSTATS; + mBeanNamePrefix = RepJEStatsPlugin.mBeanNamePrefix; + } + + @SuppressWarnings("unchecked") + @Override + protected void generateTips() { + try { + tips = (HashMap) connection.invoke + (objName, RepJEMonitor.OP_GET_REP_TIPS, + new Object[] {}, new String[] {}); + updateTips(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/src/com/sleepycat/je/rep/jmx/plugin/RepJEStatsPlugin.java b/src/com/sleepycat/je/rep/jmx/plugin/RepJEStatsPlugin.java new file mode 100644 index 0000000..83c56e0 --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/plugin/RepJEStatsPlugin.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx.plugin; + +import java.util.LinkedHashMap; + +import javax.management.ObjectName; +import javax.swing.JPanel; + +import com.sleepycat.je.jmx.plugin.JEStats; +import com.sleepycat.je.jmx.plugin.Stats; +import com.sleepycat.je.jmx.plugin.StatsPlugin; + +public class RepJEStatsPlugin extends StatsPlugin { + public static final String mBeanNamePrefix = + "com.sleepycat.je.jmx:name=RepJEMonitor(*"; + + @Override + protected void initTabs() { + if (tabs == null) { + tabs = new LinkedHashMap(); + try { + ObjectName name = new ObjectName(mBeanNamePrefix); + mBeanCount = getContext().getMBeanServerConnection(). + queryNames(name, null).size(); + + if (mBeanCount > 0) { + Stats status = + new JEStats(getContext().getMBeanServerConnection()); + tabs.put("JE Statistics", status); + stats.add(status); + status = + new RepJEStats(getContext().getMBeanServerConnection()); + tabs.put("JE Replicated Statistics", status); + stats.add(status); + } else { + tabs.put("JE Statistics", new JPanel()); + tabs.put("JE Replicated Statistics", new JPanel()); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + } +} diff --git a/src/com/sleepycat/je/rep/jmx/plugin/package-info.java b/src/com/sleepycat/je/rep/jmx/plugin/package-info.java new file mode 100644 index 0000000..31caffc --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/plugin/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: JConsole plugin for viewing HA stats/config, rarely used. + */ +package com.sleepycat.je.rep.jmx.plugin; diff --git a/src/com/sleepycat/je/rep/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin b/src/com/sleepycat/je/rep/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin new file mode 100644 index 0000000..cec66c5 --- /dev/null +++ b/src/com/sleepycat/je/rep/jmx/plugin/services/com.sun.tools.jconsole.JConsolePlugin @@ -0,0 +1 @@ +com.sleepycat.je.rep.jmx.plugin.RepJEStatsPlugin diff --git a/src/com/sleepycat/je/rep/monitor/GroupChangeEvent.java b/src/com/sleepycat/je/rep/monitor/GroupChangeEvent.java new file mode 100644 index 0000000..509d62e --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/GroupChangeEvent.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * The event used to track changes to the composition and status of the + * group. An instance of this event is created each time there is any change to + * the group. + */ +package com.sleepycat.je.rep.monitor; + +import com.sleepycat.je.rep.ReplicationGroup; + +/** + * The event generated when the group composition changes. A new instance of + * this event is generated each time a node is added or removed from the + * group. Note that SECONDARY nodes do not generate these events. + */ +/* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ +public class GroupChangeEvent extends MonitorChangeEvent { + + /** + * The kind of GroupChangeEvent. + */ + public static enum GroupChangeType { + + /** + * A new node was added to the replication group. + */ + ADD, + + /** + * A node was removed from the replication group. + */ + REMOVE + }; + + /** + * The latest information about the replication group. + */ + private final ReplicationGroup repGroup; + + /** + * The type of this change. + */ + private final GroupChangeType opType; + + GroupChangeEvent(ReplicationGroup repGroup, + String nodeName, + GroupChangeType opType) { + super(nodeName); + this.repGroup = repGroup; + this.opType = opType; + } + + /** + * Returns the current description of the replication group. + */ + public ReplicationGroup getRepGroup() { + return repGroup; + } + + /** + * Returns the type of the change (the addition of a new member or the + * removal of an existing member) made to the group. The method + * {@link MonitorChangeEvent#getNodeName() MonitorChangeEvent.getNodeName} + * can be used to identify the node that triggered the event. + * + * @return the group change type. + */ + public GroupChangeType getChangeType() { + return opType; + } + + @Override + public String toString() { + return "Node " + getNodeName() + " change type=" + getChangeType(); + } +} diff --git a/src/com/sleepycat/je/rep/monitor/JoinGroupEvent.java b/src/com/sleepycat/je/rep/monitor/JoinGroupEvent.java new file mode 100644 index 0000000..0fab573 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/JoinGroupEvent.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import java.util.Date; + +/** + * The event generated when a node joins the group. A new instance of this + * event is generated each time a node joins the group. + * + * The event is generated on a "best effort" basis. It may not be generated, + * for example, if the joining node was unable to communicate with the monitor + * due to a network problem. The application must be resilient in the face of + * such missing events. + */ +public class JoinGroupEvent extends MemberChangeEvent { + + /** + * The time when this node joins the group. + */ + private final long joinTime; + + JoinGroupEvent(String nodeName, String masterName, long joinTime) { + super(nodeName, masterName); + this.joinTime = joinTime; + } + + /** + * Returns the time at which the node joined the group. + */ + public Date getJoinTime() { + return new Date(joinTime); + } + + @Override + public String toString() { + return "Node " + getNodeName() + " joined at " + getJoinTime(); + } +} diff --git a/src/com/sleepycat/je/rep/monitor/LeaveGroupEvent.java b/src/com/sleepycat/je/rep/monitor/LeaveGroupEvent.java new file mode 100644 index 0000000..29da4dc --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/LeaveGroupEvent.java @@ -0,0 +1,103 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import java.util.Date; + +/** + * The event generated when a node leaves the group. A new instance of this + * event is generated each time a node leaves the group. + * + * The events is generated on a "best effort" basis. It may not be generated if + * the node leaving the group dies before it has a chance to generate the + * event, for example, if the process was killed, or the node was unable to + * communicate with the monitor due to a network problem. The application must + * be resilient in the face of such missing events. + */ +public class LeaveGroupEvent extends MemberChangeEvent { + + /** + * The reason for why the node leaves the group. + */ + public static enum LeaveReason { + + /** + * Normal replica shutdown. + */ + NORMAL_SHUTDOWN, + + /** + * Abnormal termination. + */ + ABNORMAL_TERMINATION, + + /** + * Master initiated shutdown. + */ + MASTER_SHUTDOWN_GROUP + }; + + /** + * The time when this node joins the group. + */ + private final long joinTime; + + /** + * The time when this node leaves the group. + */ + private final long leaveTime; + + /** + * The reason why this node leaves the group. + */ + private final LeaveReason leaveReason; + + LeaveGroupEvent(String nodeName, + String masterName, + LeaveReason leaveReason, + long joinTime, + long leaveTime) { + super(nodeName, masterName); + this.joinTime = joinTime; + this.leaveTime = leaveTime; + this.leaveReason = leaveReason; + } + + /** + * @return the time this node joins the group. + */ + public Date getJoinTime() { + return new Date(joinTime); + } + + /** + * Returns the time at which the node left the group. + */ + public Date getLeaveTime() { + return new Date(leaveTime); + } + + /** + * Returns the reason why the node left the group. + */ + public LeaveReason getLeaveReason() { + return leaveReason; + } + + @Override + public String toString() { + return "Node " + getNodeName() + " left at " + getLeaveTime() + + " because of " + getLeaveReason(); + } +} diff --git a/src/com/sleepycat/je/rep/monitor/MemberChangeEvent.java b/src/com/sleepycat/je/rep/monitor/MemberChangeEvent.java new file mode 100644 index 0000000..edc3cfe --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MemberChangeEvent.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +/** + * MemberChangeEvent is the base class for all member status changed events. + * Its subclasses provide additional event-specific information. + */ +public abstract class MemberChangeEvent extends MonitorChangeEvent { + + /** + * The master name when this event happens. + */ + private final String masterName; + + MemberChangeEvent(String nodeName, String masterName) { + super(nodeName); + this.masterName = masterName; + } + + /** + * Returns the name of the master at the time of this event. The return + * value may be null if there is no current master. + */ + public String getMasterName() { + return masterName; + } +} diff --git a/src/com/sleepycat/je/rep/monitor/Monitor.java b/src/com/sleepycat/je/rep/monitor/Monitor.java new file mode 100644 index 0000000..870a766 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/Monitor.java @@ -0,0 +1,914 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.TimebasedProposalGenerator; +import com.sleepycat.je.rep.impl.NodeStateProtocol; +import com.sleepycat.je.rep.impl.NodeStateProtocol.NodeStateResponse; +import com.sleepycat.je.rep.impl.NodeStateService; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.util.DbPing; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Provides a lightweight mechanism to track the current master node and the + * members of the replication group. The information provided by the monitor + * can be used to route update requests to the node that is currently the + * master and distribute read requests across the other members of the group. + *

        + * The Monitor is typically run on a machine that participates in load + * balancing, request routing or is simply serving as a basis for application + * level monitoring and does not have a replicated environment. To avoid + * creating a single point of failure, an application may need to create + * multiple monitor instances, with each monitor running on a distinct machine. + *

        + * Applications with direct access to a {@link + * com.sleepycat.je.rep.ReplicatedEnvironment ReplicatedEnvironment} can use + * its {@link + * synchronous and asynchronous mechanisms} for determining the master node + * and group composition changes. The Monitor class is not needed by such + * applications. + *

        + * The Monitor generally learns about changes to group status through events + * issued by replication group members. In addition, the Monitor maintains a + * daemon thread which periodically pings members of the group so that the + * Monitor can proactively discover group status changes that occur when it is + * down or has lost network connectivity. + *

        + * The following code excerpt illustrates the typical code sequence used to + * initiate a Monitor. Exception handling has been omitted to simplify the + * example. + * + *

        + * MonitorConfig monConfig = new MonitorConfig();
        + * monConfig.setGroupName("PlanetaryRepGroup");
        + * monConfig.setNodeName("mon1");
        + * monConfig.setNodeHostPort("monhost1.acme.com:7000");
        + * monConfig.setHelperHosts("mars.acme.com:5000,jupiter.acme.com:5000");
        + *
        + * Monitor monitor = new Monitor(monConfig);
        + *
        + * // If the monitor has not been registered as a member of the group,
        + * // register it now. register() returns the current node that is the
        + * // master.
        + *
        + * ReplicationNode currentMaster = monitor.register();
        + *
        + * // Start up the listener, so that it can be used to track changes
        + * // in the master node, or group composition. It can also be used to help
        + * // determine the electable nodes that are currently active and participating
        + * // in the replication group.
        + * monitor.startListener(new MyChangeListener());
        + * 
        + * + * @see MonitorChangeListener + * @see Writing Monitor + * Nodes + * @see je.rep.quote + * Examples + */ +public class Monitor { + + /* The Monitor Id */ + private final NameIdPair nameIdPair; + + /* The configuration in use by this Monitor. */ + private final MonitorConfig monitorConfig; + + /* Provides the admin functionality for the monitor. */ + private final ReplicationGroupAdmin repGroupAdmin; + + /* The underlying learner that drives the Monitor. */ + private Learner learner; + + /* The Master change listener used by the Learner agent */ + private MasterChangeListener masterChangeListener; + + /* The Monitor's logger. */ + private final Logger logger; + private final Formatter formatter; + + /* The user designated monitor change listener to be invoked. */ + private MonitorChangeListener monitorChangeListener; + + /* The channel factory */ + private DataChannelFactory channelFactory; + + /* The service dispatcher used by the Learner Agent and the Monitor. */ + private ServiceDispatcher serviceDispatcher; + + /* Set to true to force a shutdown of this monitor. */ + final AtomicBoolean shutdown = new AtomicBoolean(false); + + /* + * The groupMembers set saves the names of all nodes known to be part of + * the group. + * + * 1. If a node is not in the set, it means this node has not been added to + * the group or has been removed from the group already. Note that a + * node not in the groupMembers set may have an entry in the joinEvents + * map if a join event arrived for the node without an associated group + * add event, possibly because of an event delivery failure. + * 2. If a node is in the set, but does not have an entry in the joinEvents + * map, this node may have closed itself (due to either crash or normal + * close) or has issued an ADD GroupChangeEvent. However, it hasn't + * issued a JoinGroupChangeEvent yet (because ADD GroupChangeEvent and + * JoinGroupEvent are notified in two phases). + * 3. If a node is in the set, and is also in the joinEvents map, the node + * has already issued a JoinGroupEvent. + */ + private final Set groupMembers = Collections.newSetFromMap( + new ConcurrentHashMap()); + + /* + * This map records whether a JoinGroupEvent has been issued for this node. + * Used when the ping thread is issuing a LeaveGroupEvent. + */ + private final ConcurrentHashMap joinEvents = + new ConcurrentHashMap(); + + /* + * A thread which proactively checks on group status. TODO: ideally + * implementation would be changed to use an ExecutorService rather than + * a Thread. + */ + private PingThread pingThread; + + /* + * A TestHook, used by unit tests. If it's true, no MonitorChangeEvents + * except NewMasterEvent will be issued. + */ + private boolean disableNotify = false; + + /** + * Deprecated as of JE5. Creates a monitor instance using a {@link + * ReplicationConfig}. Monitor-specific properties that are not available + * in ReplicationConfig use default settings. + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * + * @deprecated As of JE 5, replaced by + * {@link Monitor#Monitor(MonitorConfig)} + */ + @Deprecated + public Monitor(ReplicationConfig monitorConfig) { + this(new MonitorConfig(monitorConfig)); + } + + /** + * Creates a monitor instance. + *

        + * @param monitorConfig configuration used by a Monitor + * + * @throws IllegalArgumentException if an invalid parameter is specified. + */ + public Monitor(MonitorConfig monitorConfig) { + String groupName = monitorConfig.getGroupName(); + if (groupName == null) { + throw new IllegalArgumentException("Missing group name"); + } + nameIdPair = new NameIdPair(monitorConfig.getNodeName()); + String nodeHost = monitorConfig.getNodeHostPort(); + if (nodeHost == null) { + throw new IllegalArgumentException("Missing nodeHost"); + } + this.monitorConfig = monitorConfig.clone(); + this.channelFactory = + DataChannelFactoryBuilder.construct( + monitorConfig.getRepNetConfig(), groupName); + repGroupAdmin = + new ReplicationGroupAdmin(groupName, + monitorConfig.getHelperSockets(), + channelFactory); + logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + formatter = new ReplicationFormatter(nameIdPair); + } + + /** + * Returns the name of the group associated with the Monitor. + * + * @return the group name + */ + public String getGroupName() { + return monitorConfig.getGroupName(); + } + + /** + * @hidden + * Returns the group-wide unique id associated with the monitor + * + * @return the monitor id + */ + public NameIdPair getMonitorNameIdPair() { + return nameIdPair; + } + + /** + * Returns the group-wide unique name associated with the monitor + * + * @return the monitor name + */ + public String getNodeName() { + return nameIdPair.getName(); + } + + /** + * Returns the socket used by this monitor to listen for group changes + * + * @return the monitor socket address + */ + public InetSocketAddress getMonitorSocketAddress() { + return monitorConfig.getNodeSocketAddress(); + } + + /** + * Registers the monitor with the group so that it can be kept informed + * of the outcome of elections and group membership changes. The + * monitor, just like a replication node, is identified by its nodeName. + * The Monitor uses the helper nodes to locate a master with which it can + * register itself. If the helper nodes are not available the registration + * will fail. + *

        + * A monitor must be registered at least once in order to be informed of + * ongoing election results and group changes. Attempts to re-register the + * same monitor are ignored. Registration, once it has been completed + * successfully, persists beyond the lifetime of the Monitor instance and + * does not need to be repeated. Repeated registrations are benign and + * merely confirm that the current monitor configuration is consistent with + * earlier registrations of this monitor. + * + * @return the node that is the current master + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the monitor has been shutdown, or no + * helper sockets were specified at Monitor initialization. + */ + public ReplicationNode register() + throws EnvironmentFailureException { + + if (shutdown.get()) { + throw new IllegalStateException("The monitor has been shutdown"); + } + + if (repGroupAdmin.getHelperSockets().size() == 0) { + throw new IllegalStateException + ("No helper sockets were specified at Monitor initialization"); + } + RepNodeImpl monitorNode = + new RepNodeImpl(nameIdPair, + NodeType.MONITOR, + monitorConfig.getNodeHostname(), + monitorConfig.getNodePort(), + JEVersion.CURRENT_VERSION); + /* Ensure that the monitor is part of the group. */ + return repGroupAdmin.ensureMonitor(monitorNode); + } + + /** + * Starts the listener so it's actively listening for election results and + * broadcasts of replication group changes. + *

        + * {@link Monitor#register} should be called before starting the listener. + * If the monitor has not been registered, it will not be updated, and its + * listener will not be invoked. + *

        + * Once the registration has been completed, the Monitor can start + * listening even if none of the other nodes in the group are available. + * It will be contacted automatically by the other nodes as they come up. + *

        + * If the group has a Master, invoking startListener results + * in a synchronous callback to the application via the {@link + * MonitorChangeListener#notify(NewMasterEvent)} method. If there is no + * Master at this time, the callback takes place asynchronously, after the + * method returns, when a Master is eventually elected. + *

        + * Starting the listener will start the underlying ping thread, which + * proactively checks group status for changes that might have been + * missed when this Monitor instance has lost network connectivity or + * is down. + * + * @param newListener the listener used to monitor events of interest. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IOException if the monitor socket could not be set up + * + * @throws IllegalArgumentException if an invalid parameter is specified. + * + * @throws IllegalStateException if the monitor has been shutdown, or a + * listener has already been established. + */ + public void startListener(MonitorChangeListener newListener) + throws DatabaseException, IOException { + + if (shutdown.get()) { + throw new IllegalStateException("The monitor has been shutdown"); + } + if (newListener == null) { + throw new IllegalArgumentException + ("A MonitorChangeListener must be associated with " + + " this Monitor when invoking this method"); + } + if (this.monitorChangeListener != null) { + throw new IllegalStateException + ("A Listener has already been established"); + } + + this.monitorChangeListener = newListener; + + serviceDispatcher = + new ServiceDispatcher(monitorConfig.getNodeSocketAddress(), + channelFactory); + serviceDispatcher.start(); + Protocol electionProtocol = + new Protocol(TimebasedProposalGenerator.getParser(), + MasterValue.getParser(), + monitorConfig.getGroupName(), + nameIdPair, + null, + channelFactory); + learner = new Learner(electionProtocol, serviceDispatcher); + serviceDispatcher.register(new MonitorService(this, + serviceDispatcher)); + masterChangeListener = new MasterChangeListener(); + learner.addListener(masterChangeListener); + learner.start(); + try { + /* Notify the listener about the current master. */ + final ReplicationGroup repGroup = repGroupAdmin.getGroup(); + final RepGroupImpl group = RepInternal.getRepGroupImpl(repGroup); + + /* + * In the absence of a network failure, the query should result in + * a call to the notify method of MonitorChangeListener. + */ + learner.queryForMaster(group.getAllLearnerSockets()); + + /* Notify JoinGroupEvents for those current active nodes. */ + notifyJoinGroupEventsForActiveNodes(repGroup); + + /* Start an underlying ping thread. */ + pingThread = new PingThread(repGroup); + pingThread.start(); + } catch (UnknownMasterException ume) { + /* The Listener will be informed when a Master is elected. */ + LoggerUtils.logMsg + (logger, formatter, Level.INFO, "No current master."); + } + } + + /* + * Used by unit test, disable notifying any GroupChangeEvents or + * JoinGroupEvents. + */ + void disableNotify(@SuppressWarnings("hiding") + final boolean disableNotify) { + this.disableNotify = disableNotify; + } + + /** + * Notify JoinGroupEvents for currently active nodes in replication group, + * and update the list of group members. + */ + private void notifyJoinGroupEventsForActiveNodes(ReplicationGroup group) { + NodeStateProtocol stateProtocol = + new NodeStateProtocol(group.getName(), + NameIdPair.NOCHECK, + null, + channelFactory); + for (ReplicationNode repNode : group.getNodes()) { + + /* + * Note any existing nodes with persistent id as group + * members. Nodes w/ transient id never send add or remove + * events, so they are not included in groupMembers. + */ + if (!repNode.getType().hasTransientId()) { + groupMembers.add(repNode.getName()); + } + + /* + * Send out a NodeState request message for this node, but only for + * electable and secondary nodes, since monitor nodes don't support + * the NodeStateService. + */ + if (repNode.getType().isMonitor()) { + continue; + } + MessageExchange me = stateProtocol.new MessageExchange + (repNode.getSocketAddress(), + NodeStateService.SERVICE_NAME, + stateProtocol.new NodeStateRequest(repNode.getName())); + me.run(); + ResponseMessage resp = me.getResponseMessage(); + if (resp instanceof NodeStateResponse) { + NodeStateResponse response = (NodeStateResponse) resp; + notifyJoin(new JoinGroupEvent(response.getNodeName(), + response.getMasterName(), + response.getJoinTime())); + } + } + } + + /** + * Identifies the master of the replication group, resulting from the last + * successful election. This method relies on the helper nodes supplied + * to the monitor and queries them for the master. + * + * This method is useful when a Monitor first starts up and the Master + * needs to be determined. Once a Monitor is registered and the Listener + * has been started, it's kept up to date via events that are delivered + * to the Listener. + * + * @return the id associated with the master replication node. + * + * @throws UnknownMasterException if the master could not be determined + * from the set of helpers made available to the Monitor. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the monitor has been shutdown. + */ + public String getMasterNodeName() + throws UnknownMasterException { + + if (shutdown.get()) { + throw new IllegalStateException("The monitor has been shutdown"); + } + return repGroupAdmin.getMasterNodeName(); + } + + /** + * Returns the current composition of the group. It does so by first + * querying the helpers to determine the master and then obtaining the + * group information from the master. + * + * @return an instance of RepGroup denoting the current composition of the + * group + * + * @throws UnknownMasterException if the master could not be determined + * from the set of helpers made available to the Monitor. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if the monitor has been shutdown. + */ + public ReplicationGroup getGroup() + throws UnknownMasterException, DatabaseException { + + if (shutdown.get()) { + throw new IllegalStateException("The monitor has been shutdown"); + } + + /* + * TODO: Should we use this information to update the helper set as an + * optimization? + */ + return repGroupAdmin.getGroup(); + } + + /** + * Release monitor resources and shut down the monitor. + * @throws InterruptedException + */ + public synchronized void shutdown() + throws InterruptedException { + + boolean changed = shutdown.compareAndSet(false, true); + if (!changed) { + return; + } + + LoggerUtils.logMsg(logger, formatter, Level.INFO, + "Shutting down monitor " + nameIdPair); + + /* Shutdown the Ping thread. */ + if (pingThread != null) { + pingThread.stopThread(); + pingThread = null; + } + + if (learner != null) { + learner.shutdown(); + } + + if (serviceDispatcher != null) { + serviceDispatcher.shutdown(); + } + } + + /** + * Notify the MonitorChangeListener that a GroupChangeEvent occurred. + */ + synchronized void notifyGroupChange(GroupChangeEvent event) { + if (disableNotify) { + return; + } + + final String name = event.getNodeName(); + switch (event.getChangeType()) { + case REMOVE: + /* If the REMOVE event has been fired before, do nothing. */ + if (!groupMembers.contains(name)) { + return; + } + /* Remove the deleted node from the group members set. */ + groupMembers.remove(name); + break; + case ADD: + /* If the ADD event has been fired before, do nothing. */ + if (groupMembers.contains(name)) { + return; + } + + /* Add the node to the group members set. */ + groupMembers.add(name); + break; + default: + throw new IllegalArgumentException + ("Unrecognized GroupChangeType: " + event.getChangeType()); + } + + monitorChangeListener.notify(event); + } + + /** + * Notify the MonitorChangeListener that a JoinGroupEvent happens. + */ + synchronized void notifyJoin(JoinGroupEvent event) { + if (disableNotify) { + return; + } + + final String name = event.getNodeName(); + /* If this JoinGroupEvent has been fired before, do nothing. */ + if (joinEvents.containsKey(name)) { + return; + } + + /* + * Save the JoinGroupEvent for this node so that it can be used while + * notifying an abnormal LeaveGroupEvent for this node. + */ + joinEvents.put(name, event); + monitorChangeListener.notify(event); + } + + /** + * Notify the MonitorChangeListener that a LeaveGroupEvent occurred. + */ + synchronized void notifyLeave(LeaveGroupEvent event) { + + /* Only notify a leave event if the node is currently joined */ + if (joinEvents.remove(event.getNodeName()) != null) { + monitorChangeListener.notify(event); + } + } + + /** + * The Listener used to learn about new Masters + */ + private class MasterChangeListener implements Learner.Listener { + /* The current learned value. */ + private MasterValue currentValue = null; + + /** + * Implements the Listener protocol. + */ + @Override + public void notify(Proposal proposal, Value value) { + /* We have a winning new proposal, is it truly different? */ + if (value.equals(currentValue)) { + return; + } + currentValue = (MasterValue) value; + try { + String currentMasterName = currentValue.getNodeName(); + LoggerUtils.logMsg(logger, formatter, Level.INFO, + "Monitor notified of new Master: " + + currentMasterName); + if (monitorChangeListener == null) { + /* No interest */ + return; + } + monitorChangeListener.notify + (new NewMasterEvent(currentValue)); + } catch (Exception e) { + LoggerUtils.logMsg + (logger, formatter, Level.SEVERE, + "Monitor change event processing exception: " + + e.getMessage()); + } + } + } + + /* + * PingThread periodically queries the replication group state in order + * to proactively find group changes that this monitor may have missed + * if the monitor was down or had network connectivity problems. Any + * missed changes are propagated as the appropriate type of events. + * + * PingThread takes these steps: + * 1. Get the current group information. If not available, use the last + * valid group information. Get information for both removed and + * non-removed nodes. + * 2. Walk through the removed nodes set. If there is a node which is in + * the removed nodes set but still contained in groupMembers, send a + * REMOVE GroupChangeEvent. + * 2a. Also walk through the group members remembered by the monitor, and + * send REMOVE GroupChangeEvent for each one that is not recorded in + * the current group info for removed or unremoved nodes, to account + * for deleted nodes. + * 3. Walk through all unremoved nodes. There are three cases: + * a. If a node is reachable (it acks state request), but it's not + * in groupMembers, emit an ADD GroupChangeEvent. Only do this for + * electable and monitor nodes -- secondary nodes don't send these + * events. + * b. If a node is reachable, but joinEvents.containsKey(node name) + * returns false, send a JoinGroupEvent. + * c. If a node is unreachable, but joinEvents.containsKey(node name) + * returns true, we want to send a LeaveGroupEvent, but guard against + * this being a transient situation. We do some retries and if we get + * the same result for all retries, send a missed LeaveGroupEvent. + * 3a. Also walk through all nodes removed nodes, and do cases 3.b. and + * 3.c. for those nodes + * 4. Walk through all nodes in joinEvents, and perform 3.c. for ones which + * are not present as removed or unremoved nodes in the group. These + * nodes are secondary nodes, which are removed from the group when the + * node becomes unavailable. + */ + private class PingThread extends Thread { + private volatile boolean running = true; + private ReplicationGroup group; + private final int retries; + private final long retryInterval; + private final int socketConnectTimeout; + + /* + * Track the missed LeaveEvents, mapping from the node name to the + * frequency it is thought to be missed. + */ + private final Map missedLeaveEvents = + new HashMap(); + + /* Construct an underlying PingThread. */ + public PingThread(ReplicationGroup group) { + this.group = group; + this.retries = monitorConfig.getNumRetries(); + this.retryInterval = monitorConfig.getRetryInterval(); + this.socketConnectTimeout = + monitorConfig.getSocketConnectTimeout(); + setDaemon(true); + } + + @Override + public void run() { + try { + while (running) { + for (int i = 0; i < retries && running; i++) { + + queryNodes(); + + /* + * Ensure that the monitor hasn't missed any election + * results due to network interruptions by explicitly + * querying for a new master on a periodic basis. + */ + RepGroupImpl groupImpl = + RepInternal.getRepGroupImpl(group); + /* + * The query for a master will result in + * NewMasterEvents, if there are any changes in the + * master. + */ + learner.queryForMaster( + groupImpl.getAllLearnerSockets()); + + /* Sleep a while after querying all the nodes. */ + sleep(retryInterval); + } + missedLeaveEvents.clear(); + } + } catch (InterruptedException e) { + LoggerUtils.logMsg(logger, formatter, Level.INFO, + "The daemon PingThread is interrupted: " + + e.getMessage()); + } + } + + /** + * Ping all nodes to find out about possible missed events. + * Manufacture notifications for any missed events. + */ + private void queryNodes() { + /* Get the current valid group information. */ + final ReplicationGroup repGroup = getValidGroup(); + + final Set removedNodes = + repGroup.getRepGroupImpl().getRemovedNodes(); + + /* Send missed REMOVE GroupChangeEvents for removed nodes. */ + for (final ReplicationNode repNode : removedNodes) { + notifyGroupChange + (new GroupChangeEvent(repGroup, + repNode.getName(), + GroupChangeType.REMOVE)); + } + + /* + * Also send REMOVE GroupChangeEvents for nodes in groupMembers + * that are no longer in the replication group, to account for + * deleted nodes, which will not be remembered as removed nodes + */ + for (final String nodeName : groupMembers) { + if (repGroup.getRepGroupImpl().getNode(nodeName) == null) { + notifyGroupChange( + new GroupChangeEvent( + repGroup, nodeName, GroupChangeType.REMOVE)); + } + } + + /* + * Ping nodes to send missed ADD GroupChangeEvents, JoinGroupEvent + * and LeaveGroupEvent. + */ + for (ReplicationNode repNode : repGroup.getNodes()) { + pingNode(repNode, repGroup, false); + } + + /* Also ping removed nodes for missed leave events */ + for (final ReplicationNode repNode : removedNodes) { + pingNode(repNode, repGroup, true); + } + + /* + * Send leave events for secondary nodes that no longer appear in + * the group, since those nodes are completely removed as soon as + * they disconnect from the feeder, which, because they are not + * removed from the feeder otherwise, should only happen due to a + * shutdown or network failure. + * + * TODO: Provide for distinguishing secondary and deleted electable + * nodes. + */ + for (final String nodeName : joinEvents.keySet()) { + final ReplicationNode repNode = + repGroup.getRepGroupImpl().getNode(nodeName); + if (repNode == null) { + notifyMissedLeaveEvents(nodeName); + } + } + } + + /** Ping node to issue missed events. */ + private void pingNode(ReplicationNode repNode, + ReplicationGroup repGroup, + boolean removed) { + + /* Monitor nodes don't respond to pings */ + if (repNode.getType().isMonitor()) { + return; + } + + final String name = repNode.getName(); + try { + DbPing ping = new DbPing(repNode, + getGroupName(), + socketConnectTimeout, + channelFactory); + NodeState state = ping.getNodeState(); + + /* Send ADD GroupChangeEvent if needed */ + if (!groupMembers.contains(name) && + !repNode.getType().isSecondary() && + !repNode.getType().isExternal() && + !removed) { + notifyGroupChange( + new GroupChangeEvent(repGroup, + name, + GroupChangeType.ADD)); + } + + /* Send JoinGroupEvent if needed */ + if (!joinEvents.containsKey(name)) { + notifyJoin(new JoinGroupEvent(name, + state.getMasterName(), + state.getJoinTime())); + } + } catch (IOException e) { + /* Increase the counter of this down node. */ + notifyMissedLeaveEvents(name); + } catch (ServiceConnectFailedException e) { + /* Increase the counter of this down node. */ + notifyMissedLeaveEvents(name); + } catch (ProtocolException e) { + + /* + * Thrown if a node is restarted with a new name on a + * hostname/port combination used for an earlier node, meaning + * the old node is offline. Increase the counter of this down + * node. + */ + notifyMissedLeaveEvents(name); + } + } + + /* + * If the master is currently unknown, use the last valid group + * information so that the ping thread can continue working. + */ + private ReplicationGroup getValidGroup() { + ReplicationGroup repGroup = null; + try { + repGroup = getGroup(); + group = repGroup; + } catch (Exception e) { + repGroup = group; + } + + return repGroup; + } + + /* Notify a missed LeaveGroupEvent. */ + private void notifyMissedLeaveEvents(String name) { + final JoinGroupEvent event = joinEvents.get(name); + if (event == null) { + return; + } + + int counter = (missedLeaveEvents.get(name) == null) ? + 1 : missedLeaveEvents.get(name) + 1; + missedLeaveEvents.put(name, counter); + + if (missedLeaveEvents.get(name) == retries) { + notifyLeave(new LeaveGroupEvent( + name, event.getMasterName(), + LeaveReason.ABNORMAL_TERMINATION, + event.getJoinTime().getTime(), + System.currentTimeMillis())); + } + + } + + public void stopThread() { + running = false; + } + } +} diff --git a/src/com/sleepycat/je/rep/monitor/MonitorChangeEvent.java b/src/com/sleepycat/je/rep/monitor/MonitorChangeEvent.java new file mode 100644 index 0000000..746dbe6 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MonitorChangeEvent.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +/** + * MonitorChangeEvent is the base class for all Monitor events. Its subclasses + * provide additional event-specific information. + *

        + * See {@link Replication Guide, Writing Monitor Nodes} + */ +public abstract class MonitorChangeEvent { + + /** + * The name of the node associated with the event + */ + private final String nodeName; + + MonitorChangeEvent(String nodeName) { + this.nodeName = nodeName; + } + + /** + * Returns the name of the node associated with the event. + */ + public String getNodeName() { + return nodeName; + } +} diff --git a/src/com/sleepycat/je/rep/monitor/MonitorChangeListener.java b/src/com/sleepycat/je/rep/monitor/MonitorChangeListener.java new file mode 100644 index 0000000..0fc99dd --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MonitorChangeListener.java @@ -0,0 +1,84 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import com.sleepycat.je.rep.ReplicatedEnvironment; + +/** + * Applications can register for Monitor event notification through + * {@link Monitor#startListener}. The interface defines an overloaded notify + * event for each event supported by the Monitor. + *

        + * Changes in the composition of the replication group, or in the dynamic state + * of a member, are communicated to the listener as events that are represented + * as subclasses of {@link MonitorChangeEvent MonitorChangeEvent}. Classes + * implementing this interface supply implementations for a notify + * associated with each type of event, so they can respond with some + * application-specific course of action. + *

        + * See {@link Replication Guide, Writing Monitor Nodes} + */ +public interface MonitorChangeListener { + + /** + * The method is invoked whenever there is new master associated with the + * replication group. + * + * If the method throws an exception, JE will log the exception as a trace + * message, which will be propagated through the usual channels. + * + * @param newMasterEvent the event that resulted in the notify. It + * identifies the new master. + */ + public void notify(NewMasterEvent newMasterEvent); + + /** + * The method is invoked whenever there is a change in the composition of + * the replication group. That is, a new node has been added to the group + * or an existing member has been removed from the group. Note that + * SECONDARY nodes do not produce these events. + * + * If the method throws an exception, JE will log the exception as a trace + * message, which will be propagated through the usual channels. + * + * @param groupChangeEvent the event that resulted in the notify. It + * describes the new group composition and identifies the node that + * provoked the change. + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public void notify(GroupChangeEvent groupChangeEvent); + + /** + * The method is invoked whenever a node joins the group, by successfully + * opening its first + * {@link ReplicatedEnvironment ReplicatedEnvironment} handle. + * + * @param joinGroupEvent the event that resulted in the notify. It + * identifies the node that joined the group. + */ + public void notify(JoinGroupEvent joinGroupEvent); + + /** + * The method is invoked whenever a node leaves the group by closing its + * last {@link ReplicatedEnvironment ReplicatedEnvironment} handle. + * + * @param leaveGroupEvent the event that resulted in the notify. It + * identifies the node that left the group. + */ + public void notify(LeaveGroupEvent leaveGroupEvent); +} diff --git a/src/com/sleepycat/je/rep/monitor/MonitorConfig.java b/src/com/sleepycat/je/rep/monitor/MonitorConfig.java new file mode 100644 index 0000000..1b0242c --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MonitorConfig.java @@ -0,0 +1,444 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import java.net.InetSocketAddress; +import java.util.Properties; +import java.util.Set; + +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.HostPortPair; + +/** + * Specifies the attributes used by a replication {@link Monitor}. + *

        + * The following properties identify the target group. + *

          + *
        • groupName: the name of the replication group being monitored.
        • + *
        • nodeName: the group-wide unique name associated with this + * monitor node.
        • + *
        • nodeHost: the hostname and port associated with this Monitor. Used + * by group members to contact the Monitor.
        • + *
        • helperHosts: the list of replication nodes which the Monitor uses to + * register itself so it can receive notifications about group status + * changes.
        • + *
        + * The following properties configure the daemon ping thread implemented + * within the Monitor. This daemon thread lets the Monitor proactively find + * status changes that occur when the Monitor is down or has lost network + * connectivity. + *
          + *
        • numRetries: number of times the ping thread attempts to contact a + * node before deeming is unreachable.
        • + *
        • retryInterval: number of milliseconds between ping thread retries. + *
        • + *
        • timeout: socketConnection timeout, in milliseconds, specified + * when the ping thread attempts to establish a connection with a replication + * node.
        • + *
        + * @since JE 5.0 + */ +public class MonitorConfig implements Cloneable { + + /** + * An instance created using the default constructor is initialized with + * the default settings. + */ + public static final MonitorConfig DEFAULT = new MonitorConfig(); + + /* + * Since the MonitorConfig and ReplicationConfig have lots of common + * properties, it uses lots of properties defined in RepParams. + */ + private Properties props; + private final boolean validateParams = true; + + /* These properties are mutable for a Monitor. */ + private int numRetries = 5; + private long retryInterval = 1000; + private int socketConnectTimeout = 10000; + + /* The replication net configuration */ + private ReplicationNetworkConfig repNetConfig; + + /** + * An instance created using the default constructor is initialized with + * the default settings. + */ + public MonitorConfig() { + props = new Properties(); + repNetConfig = ReplicationNetworkConfig.createDefault(); + } + + /* Internal use only, support the deprecated Monitor Constructor. */ + MonitorConfig(ReplicationConfig repConfig) { + props = new Properties(); + repNetConfig = repConfig.getRepNetConfig().clone(); + setNodeName(repConfig.getNodeName()); + setGroupName(repConfig.getGroupName()); + setNodeHostPort(repConfig.getNodeHostPort()); + setHelperHosts(repConfig.getHelperHosts()); + + if (!repConfig.getNodeType().isMonitor()) { + throw new IllegalArgumentException + ("The configured node type was: " + repConfig.getNodeType() + + " instead of: " + NodeType.MONITOR); + } + } + + /** + * Sets the name for the replication group. The name must be made up of + * just alpha numeric characters and must not be zero length. + * + * @param groupName the alpha numeric string representing the name. + * + * @throws IllegalArgumentException if the string name is not valid. + */ + public MonitorConfig setGroupName(String groupName) + throws IllegalArgumentException { + + setGroupNameVoid(groupName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setGroupNameVoid(String groupName) + throws IllegalArgumentException { + + DbConfigManager.setVal + (props, RepParams.GROUP_NAME, groupName, validateParams); + } + + /** + * Gets the name associated with the replication group. + * + * @return the name of this replication group. + */ + public String getGroupName() { + return DbConfigManager.getVal(props, RepParams.GROUP_NAME); + } + + /** + * Sets the name to be associated with this monitor. It must + * be unique within the group. When the monitor is + * instantiated and joins the replication group, a check is done to ensure + * that the name is unique, and a + * {@link com.sleepycat.je.rep.RestartRequiredException} is thrown if it is + * not. + * + * @param nodeName the name of this monitor. + */ + public MonitorConfig setNodeName(String nodeName) + throws IllegalArgumentException { + + setNodeNameVoid(nodeName); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeNameVoid(String nodeName) + throws IllegalArgumentException { + + DbConfigManager.setVal + (props, RepParams.NODE_NAME, nodeName, validateParams); + } + + /** + * Returns the unique name associated with this monitor. + * + * @return the monitor name + */ + public String getNodeName() { + return DbConfigManager.getVal(props, RepParams.NODE_NAME); + } + + /** + * Sets the hostname and port associated with this monitor. The hostname + * and port combination are denoted by a string of the form: + *
        +     *  hostname[:port]
        +     * 
        + * The port must be outside the range of "Well Known Ports" + * (zero through 1023). + * + * @param hostPort the string containing the hostname and port as above. + */ + public MonitorConfig setNodeHostPort(String hostPort) { + setNodeHostPortVoid(hostPort); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNodeHostPortVoid(String hostPort) { + DbConfigManager.setVal + (props, RepParams.NODE_HOST_PORT, hostPort, validateParams); + } + + /** + * Returns the hostname and port associated with this node. The hostname + * and port combination are denoted by a string of the form: + *
        +     *  hostname:port
        +     * 
        + * + * @return the hostname and port string of this monitor. + */ + public String getNodeHostPort() { + return DbConfigManager.getVal(props, RepParams.NODE_HOST_PORT); + } + + /** + * Identify one or more helpers nodes by their host and port pairs in this + * format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * + * @param helperHosts the string representing the host and port pairs. + */ + public MonitorConfig setHelperHosts(String helperHosts) { + setHelperHostsVoid(helperHosts); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setHelperHostsVoid(String helperHosts) { + DbConfigManager.setVal + (props, RepParams.HELPER_HOSTS, helperHosts, validateParams); + } + + /** + * Returns the string identifying one or more helper host and port pairs in + * this format: + *
        +     * hostname[:port][,hostname[:port]]*
        +     * 
        + * + * @return the string representing the host port pairs. + */ + public String getHelperHosts() { + return DbConfigManager.getVal(props, RepParams.HELPER_HOSTS); + } + + /** + * @hidden + * + * For internal use only: Internal convenience method. + * + * Returns the set of sockets associated with helper nodes. + * + * @return the set of helper sockets, returns an empty set if there are no + * helpers. + */ + public Set getHelperSockets() { + return HostPortPair.getSockets(getHelperHosts()); + } + + /** + * Returns the hostname component of the nodeHost property. + * + * @return the hostname string + */ + public String getNodeHostname() { + String hostAndPort = getNodeHostPort(); + int colonToken = hostAndPort.indexOf(":"); + + return (colonToken >= 0) ? + hostAndPort.substring(0, colonToken) : hostAndPort; + } + + /** + * Returns the port component of the nodeHost property. + * + * @return the port number + */ + public int getNodePort() { + String hostAndPort = getNodeHostPort(); + int colonToken = hostAndPort.indexOf(":"); + + String portString = (colonToken >= 0) ? + hostAndPort.substring(colonToken + 1) : + DbConfigManager.getVal(props, RepParams.DEFAULT_PORT); + + return Integer.parseInt(portString); + } + + /** + * @hidden + * Internal use only. + * + * This method should only be used when the configuration object is known + * to have an authoritative value for its socket value. + * + * @return the InetSocketAddress used by this monitor + */ + public InetSocketAddress getNodeSocketAddress() { + return new InetSocketAddress(getNodeHostname(), getNodePort()); + } + + /** + * Sets the number of times a ping thread attempts to contact a node + * before deeming it unreachable. + * The default value is 5. + */ + public MonitorConfig setNumRetries(final int numRetries) { + setNumRetriesVoid(numRetries); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setNumRetriesVoid(final int numRetries) { + validate(numRetries, "numRetries"); + this.numRetries = numRetries; + } + + /** + * Returns the number of times a ping thread attempts to contact a node + * before deeming it unreachable. + */ + public int getNumRetries() { + return numRetries; + } + + /** + * Sets the number of milliseconds between ping thread retries. The default + * value is 1000. + */ + public MonitorConfig setRetryInterval(final long retryInterval) { + setRetryIntervalVoid(retryInterval); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setRetryIntervalVoid(final long retryInterval) { + validate(retryInterval, "retryInterval"); + this.retryInterval = retryInterval; + } + + /** + * Returns the number of milliseconds between ping thread retries. + */ + public long getRetryInterval() { + return retryInterval; + } + + /** + * Sets the socketConnection timeout, in milliseconds, used + * when the ping thread attempts to establish a connection with a + * replication node. The default value is 10,000. + */ + public MonitorConfig setSocketConnectTimeout(final int socketConnectTimeout) { + setSocketConnectTimeoutVoid(socketConnectTimeout); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setSocketConnectTimeoutVoid(final int socketConnectTimeout) { + validate(socketConnectTimeout, "socketConnectTimeout"); + this.socketConnectTimeout = socketConnectTimeout; + } + + /** + * Returns the socketConnection timeout, in milliseconds, used + * when the ping thread attempts to establish a connection with a + * replication node. + */ + public int getSocketConnectTimeout() { + return socketConnectTimeout; + } + + private void validate(Number number, String param) { + if (number.longValue() <= 0) { + throw new IllegalArgumentException + ("Parameter: " + param + " should be a positive number."); + } + } + + /** + * Returns a copy of this configuration object. + */ + @Override + public MonitorConfig clone() { + try { + MonitorConfig copy = (MonitorConfig) super.clone(); + + copy.props = (Properties) props.clone(); + copy.repNetConfig = repNetConfig.clone(); + return copy; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * @hidden SSL deferred + * Get the replication service net configuration associated with + * this MonitorConfig. + */ + public ReplicationNetworkConfig getRepNetConfig() { + return repNetConfig; + } + + /** + * @hidden SSL deferred + * Set the replication service net configuration associated with + * this MonitorConfig. + * + * @param netConfig the new ReplicationNetworkConfig to be associated + * with this MonitorConfig. This must not be null. + * + * @throws IllegalArgumentException if the netConfig is null + */ + public MonitorConfig setRepNetConfig( + ReplicationNetworkConfig netConfig) { + + setRepNetConfigVoid(netConfig); + return this; + } + /** + * @hidden + * For bean editors + */ + public void setRepNetConfigVoid(ReplicationNetworkConfig netConfig) { + if (netConfig == null) { + throw new IllegalArgumentException("netConfig may not be null"); + } + repNetConfig = netConfig; + } +} diff --git a/src/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.java b/src/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.java new file mode 100644 index 0000000..3161ee1 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MonitorConfigBeanInfo.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +public class MonitorConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(MonitorConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(MonitorConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/monitor/MonitorService.java b/src/com/sleepycat/je/rep/monitor/MonitorService.java new file mode 100644 index 0000000..4b1bfc2 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/MonitorService.java @@ -0,0 +1,147 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.monitor; + +import static com.sleepycat.je.utilint.TestHookExecute.doHookIfSet; + +import java.io.IOException; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.monitor.Protocol.GroupChange; +import com.sleepycat.je.rep.monitor.Protocol.JoinGroup; +import com.sleepycat.je.rep.monitor.Protocol.LeaveGroup; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingRunnable; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; + +/** + * @hidden + * For internal use only. + */ +public class MonitorService extends ExecutingService { + + /* + * Test hooks that are run before processing various events. If the hooks + * throw IllegalStateException, then the event will not be processed. + */ + public static volatile TestHook processGroupChangeHook; + public static volatile TestHook processJoinGroupHook; + public static volatile TestHook processLeaveGroupHook; + + private final Monitor monitor; + private final Protocol protocol; + private final Logger logger; + private final Formatter formatter; + + /* Identifies the Group Service. */ + public static final String SERVICE_NAME = "Monitor"; + + public MonitorService(Monitor monitor, + ServiceDispatcher dispatcher) { + super(SERVICE_NAME, dispatcher); + this.monitor = monitor; + protocol = new Protocol(monitor.getGroupName(), + monitor.getMonitorNameIdPair(), + null, + dispatcher.getChannelFactory()); + logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + formatter = new ReplicationFormatter(monitor.getMonitorNameIdPair()); + } + + /* Dynamically invoked process methods */ + + /** + * Notify the monitor about the group change (add/remove a node) event. + */ + public ResponseMessage process(GroupChange groupChange) { + GroupChangeEvent event = + new GroupChangeEvent(new ReplicationGroup(groupChange.getGroup()), + groupChange.getNodeName(), + groupChange.getOpType()); + try { + assert doHookIfSet(processGroupChangeHook, event); + } catch (IllegalStateException e) { + return null; + } + monitor.notifyGroupChange(event); + return null; + } + + /** + * Notify the monitor about a node has joined the group. + */ + public ResponseMessage process(JoinGroup joinGroup) { + JoinGroupEvent event = new JoinGroupEvent(joinGroup.getNodeName(), + joinGroup.getMasterName(), + joinGroup.getJoinTime()); + try { + assert doHookIfSet(processJoinGroupHook, event); + } catch (IllegalStateException e) { + return null; + } + monitor.notifyJoin(event); + return null; + } + + /** + * Notify the monitor about a node has left the group. + */ + public ResponseMessage process(LeaveGroup leaveGroup) { + LeaveGroupEvent event = + new LeaveGroupEvent(leaveGroup.getNodeName(), + leaveGroup.getMasterName(), + leaveGroup.getLeaveReason(), + leaveGroup.getJoinTime(), + leaveGroup.getLeaveTime()); + try { + assert doHookIfSet(processLeaveGroupHook, event); + } catch (IllegalStateException e) { + return null; + } + monitor.notifyLeave(event); + return null; + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new MonitorServiceRunnable(dataChannel, protocol); + } + + class MonitorServiceRunnable extends ExecutingRunnable { + MonitorServiceRunnable(DataChannel dataChannel, + Protocol protocol) { + super(dataChannel, protocol, false); + } + + @Override + protected ResponseMessage getResponse(RequestMessage request) + throws IOException { + + return protocol.process(MonitorService.this, request); + } + + @Override + protected void logMessage(String message) { + LoggerUtils.logMsg(logger, formatter, Level.WARNING, message); + } + } +} diff --git a/src/com/sleepycat/je/rep/monitor/NewMasterEvent.java b/src/com/sleepycat/je/rep/monitor/NewMasterEvent.java new file mode 100644 index 0000000..5171754 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/NewMasterEvent.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import java.net.InetSocketAddress; + +import com.sleepycat.je.rep.elections.MasterValue; + +/** + * The event generated upon detecting a new Master. A new instance of this + * event is generated each time a new master is elected for the group. + */ +public class NewMasterEvent extends MemberChangeEvent { + /* The node ID identifying the master node. */ + private final MasterValue masterValue; + + NewMasterEvent(MasterValue masterValue) { + super(masterValue.getNodeName(), masterValue.getNodeName()); + this.masterValue = masterValue; + } + + /** + * Returns the socket address associated with the new master + */ + public InetSocketAddress getSocketAddress() { + return new InetSocketAddress(masterValue.getHostName(), + masterValue.getPort()); + } + + @Override + public String toString() { + return getNodeName() + " is new master"; + } +} diff --git a/src/com/sleepycat/je/rep/monitor/Protocol.java b/src/com/sleepycat/je/rep/monitor/Protocol.java new file mode 100644 index 0000000..47c249d --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/Protocol.java @@ -0,0 +1,260 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.monitor; + +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.net.DataChannelFactory; + +/** + * @hidden + * For internal use only. + * + * Defines the protocol used by the Monitor to keep informed about group + * changes, and a node joins/leaves the group. The Master uses the protocol to + * inform all Monitors about group change and node join/leave change. + * + * GCHG -> no response expected from the monitor. + * JoinGroup -> no response expected from the monitor. + * LeaveGroup -> no response expected from the monitor. + */ +public class Protocol extends TextProtocol { + + /** The latest protocol version. */ + public static final String VERSION = "2.0"; + + /** The protocol version introduced to support RepGroupImpl version 3. */ + static final String REP_GROUP_V3_VERSION = "2.0"; + + /** The protocol version used with RepGroupImpl version 2. */ + static final String REP_GROUP_V2_VERSION = "1.0"; + + /* The messages defined by this class. */ + public final MessageOp GROUP_CHANGE_REQ = + new MessageOp("GCHG", GroupChange.class); + + public final MessageOp JOIN_GROUP_REQ = + new MessageOp("JG", JoinGroup.class); + + public final MessageOp LEAVE_GROUP_REQ = + new MessageOp("LG", LeaveGroup.class); + + /** + * Creates an instance of this class using the current protocol version. + */ + public Protocol(String groupName, NameIdPair nameIdPair, RepImpl repImpl, + DataChannelFactory channelFactory) { + this(VERSION, groupName, nameIdPair, repImpl, channelFactory); + } + + /** + * Creates an instance of this class using the specified protocol version. + */ + Protocol(String version, + String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + + super(version, groupName, nameIdPair, repImpl, channelFactory); + + initializeMessageOps(new MessageOp[] { + GROUP_CHANGE_REQ, + JOIN_GROUP_REQ, + LEAVE_GROUP_REQ + }); + + setTimeouts(repImpl, + RepParams.MONITOR_OPEN_TIMEOUT, + RepParams.MONITOR_READ_TIMEOUT); + } + + private abstract class ChangeEvent extends RequestMessage { + /* Name of node which this change event happens on. */ + private final String nodeName; + + public ChangeEvent(String nodeName) { + this.nodeName = nodeName; + } + + public ChangeEvent(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + nodeName = nextPayloadToken(); + } + + public String getNodeName() { + return nodeName; + } + + @Override + protected String getMessagePrefix() { + return messagePrefixNocheck; + } + + @Override + public String wireFormat() { + return wireFormatPrefix() + SEPARATOR + nodeName + SEPARATOR; + } + } + + public class GroupChange extends ChangeEvent { + private final RepGroupImpl group; + /* Represents it's a ADD or REMOVE change event. */ + private final GroupChangeType opType; + + public GroupChange(RepGroupImpl group, + String nodeName, + GroupChangeType opType) { + super(nodeName); + this.group = group; + this.opType = opType; + } + + public GroupChange(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + opType = GroupChangeType.valueOf(nextPayloadToken()); + group = RepGroupImpl.deserializeHex + (tokens, getCurrentTokenPosition()); + } + + public RepGroupImpl getGroup() { + return group; + } + + public GroupChangeType getOpType() { + return opType; + } + + @Override + public MessageOp getOp() { + return GROUP_CHANGE_REQ; + } + + @Override + public String wireFormat() { + final int repGroupVersion = + (Double.parseDouble(sendVersion) <= + Double.parseDouble(REP_GROUP_V2_VERSION)) ? + RepGroupImpl.FORMAT_VERSION_2 : + RepGroupImpl.MAX_FORMAT_VERSION; + return super.wireFormat() + + opType.toString() + SEPARATOR + + group.serializeHex(repGroupVersion); + } + } + + private abstract class MemberEvent extends ChangeEvent { + private final String masterName; + private final long joinTime; + + public MemberEvent(String nodeName, String masterName, long joinTime) { + super(nodeName); + this.masterName = masterName; + this.joinTime = joinTime; + } + + public MemberEvent(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + masterName = nextPayloadToken(); + joinTime = Long.parseLong(nextPayloadToken()); + } + + public long getJoinTime() { + return joinTime; + } + + public String getMasterName() { + return masterName; + } + + @Override + public String wireFormat() { + return super.wireFormat() + + masterName + SEPARATOR + + Long.toString(joinTime); + } + } + + /* Represents the event that a node joins the group. */ + public class JoinGroup extends MemberEvent { + public JoinGroup(String nodeName, String masterName, long joinTime) { + super(nodeName, masterName, joinTime); + } + + public JoinGroup(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + } + + @Override + public MessageOp getOp() { + return JOIN_GROUP_REQ; + } + } + + /* Represents the event that a node leaves the group. */ + public class LeaveGroup extends MemberEvent { + private final LeaveReason leaveReason; + private final long leaveTime; + + public LeaveGroup(String nodeName, + String masterName, + LeaveReason leaveReason, + long joinTime, + long leaveTime) { + super(nodeName, masterName, joinTime); + this.leaveReason = leaveReason; + this.leaveTime = leaveTime; + } + + public LeaveGroup(String line, String[] tokens) + throws InvalidMessageException { + + super(line, tokens); + leaveReason = LeaveReason.valueOf(nextPayloadToken()); + leaveTime = Long.parseLong(nextPayloadToken()); + } + + public LeaveReason getLeaveReason() { + return leaveReason; + } + + public long getLeaveTime() { + return leaveTime; + } + + @Override + public MessageOp getOp() { + return LEAVE_GROUP_REQ; + } + + @Override + public String wireFormat() { + return super.wireFormat() + SEPARATOR + + leaveReason.toString() + SEPARATOR + + Long.toString(leaveTime); + } + } +} diff --git a/src/com/sleepycat/je/rep/monitor/package.html b/src/com/sleepycat/je/rep/monitor/package.html new file mode 100644 index 0000000..1267824 --- /dev/null +++ b/src/com/sleepycat/je/rep/monitor/package.html @@ -0,0 +1,27 @@ + + + + + + +BDB JE HA support for applications that need to track the composition of a +replication group, in order to do tasks such as load balancing and +request routing. + +

        Package Specification

        +The Monitor is intended for applications that do not directly +reference ReplicatedEnvironment, but need to track the composition of +a replication group and the current Master. A Monitor tracks changes +in replication group membership and roles. + +@see Replication Guide, Writing Monitor Nodes + + diff --git a/src/com/sleepycat/je/rep/net/DataChannel.java b/src/com/sleepycat/je/rep/net/DataChannel.java new file mode 100644 index 0000000..d06072c --- /dev/null +++ b/src/com/sleepycat/je/rep/net/DataChannel.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +import java.io.IOException; +import java.nio.channels.ByteChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.SocketChannel; + +/** + * @hidden + * An interface that associates a delegate socketChannel for network I/O, which + * provides ByteChannel, GatheringByteChannel, and ScatteringByteChannel, + * interfaces for callers. + */ +public interface DataChannel extends ByteChannel, + GatheringByteChannel, + ScatteringByteChannel { + + /** + * Accessor for the underlying SocketChannel. + * Callers may used the returned SocketChannel in order to query/modify + * connections attributes, but may not directly close, read from or write + * to the SocketChannel. + * + * @return the socket channel underlying this data channel instance + */ + public SocketChannel getSocketChannel(); + + /** + * Checks whether the channel encrypted. + * + * @return true if the data channel provides network privacy + */ + public boolean isSecure(); + + /** + * Checks whether the channel capable of determining peer trust. + * + * @return true if the data channel implementation has the capability + * to determine trust. + */ + public boolean isTrustCapable(); + + /** + * Checks whether the channel peer is trusted. + * + * @return true if the channel has determined that the peer is trusted. + */ + public boolean isTrusted(); + + /** + * The status of the flush method. + */ + public enum FlushStatus { + + /** Flushes are not being used. */ + DISABLED, + + /** Nothing needs to be flushed. */ + DONE, + + /** Flush not complete because there is something left to flush. */ + AGAIN, + + /** Flush not complete because socket is busy for write. */ + WRITE_BUSY, + + /** Flush not complete due to read data dependency. */ + NEED_READ, + + /** Flush not complete due to task execution dependency. */ + NEED_TASK, + } + + /** + * Attempt to flush any pending writes to the underlying socket buffer. + * The caller should ensure that it is the only thread accessing the + * DataChannel in order that the return value be meaningful. + * + * @return the flush status + */ + public FlushStatus flush() throws IOException; +} + diff --git a/src/com/sleepycat/je/rep/net/DataChannelFactory.java b/src/com/sleepycat/je/rep/net/DataChannelFactory.java new file mode 100644 index 0000000..c4f8e1e --- /dev/null +++ b/src/com/sleepycat/je/rep/net/DataChannelFactory.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.SocketChannel; + +/** + * @hidden + * Interface for creating DataChannel instances. + */ +public interface DataChannelFactory { + /** + * Creates a DataChannel from an newly accepted socketChannel + * + * @param socketChannel the newly accepted SocketChannel + * @return an implementation of DataChannel that wraps the + * input SocketChannel + */ + DataChannel acceptChannel(SocketChannel socketChannel); + + /** + * A collection of options to apply to a connection. + */ + public class ConnectOptions { + private boolean tcpNoDelay = false; + private int receiveBufferSize = 0; + private int openTimeout = 0; + private int readTimeout = 0; + private boolean blocking = true; + private boolean reuseAddr = false; + + /** + * Creates a base set of connection options. The default values + * for the options are: + *
        +         *   tcpNoDelay = false
        +         *   receiveBufferSize = 0
        +         *   openTimeout = 0
        +         *   readTimeout = 0
        +         *   blocking = true
        +         *   reuseAddr = false
        +         * 
        + */ + public ConnectOptions() { + } + + /** + * Sets the tcpNoDelay option for the connection. + * + * @param tcpNoDelay if true, disable the Nagle algorithm for delayed + * transmissions on connection + * @return this instance + */ + public final ConnectOptions setTcpNoDelay(boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + return this; + } + + /** + * Gets the tcpNoDelay option for the connection. + * + * @return true if the tcpNoDelay option is enabled + */ + public final boolean getTcpNoDelay() { + return this.tcpNoDelay; + } + + /** + * Sets the connection receive buffer size for the connection. + * + * @param rcvBufferSize the desired size of the receive buffer, or + * 0 to use system defaults. + * @return this instance + */ + public final ConnectOptions setReceiveBufferSize(int rcvBufferSize) { + this.receiveBufferSize = rcvBufferSize; + return this; + } + + /** + * Gets the connection receive buffer size for the connection. + * + * @return the configured receive buffer size option + */ + public final int getReceiveBufferSize() { + return this.receiveBufferSize; + } + + /** + * Sets the connection open timeout value for the connection. + * + * @param timeout the desired timeout value for connection initiation + * in milliseconds, or 0 if system defaults should be used + * @return this instance + */ + public final ConnectOptions setOpenTimeout(int timeout) { + this.openTimeout = timeout; + return this; + } + + /** + * Gets the connection open timeout value for the connection. + * + * @return the configured timeout value + */ + public final int getOpenTimeout() { + return this.openTimeout; + } + + /** + * Sets the read timeout value for the connection. + * + * @param timeout the desired timeout value for read operations in + * milliseconds, or 0 if system defaults should be used + * @return this instance + */ + public final ConnectOptions setReadTimeout(int timeout) { + this.readTimeout = timeout; + return this; + } + + /** + * Gets the read timeout value for the connection. + * + * @return the configured timeout value + */ + public final int getReadTimeout() { + return this.readTimeout; + } + + /** + * Sets the blocking mode option for the connection. + * + * @param blocking if true, the connection will use blocking mode IO + * @return this instance + */ + public final ConnectOptions setBlocking(boolean blocking) { + this.blocking = blocking; + return this; + } + + /** + * Gets the blocking mode option for the connection. + * + * @return the blockingMode configuration setting + */ + public final boolean getBlocking() { + return this.blocking; + } + + /** + * Sets the reuseAddr option for the connection. + * + * @param reuseAddr if true, enable the SO_REUSEADDR option on the + * underlying socket + * @return this instance + */ + public final ConnectOptions setReuseAddr(boolean reuseAddr) { + this.reuseAddr = reuseAddr; + return this; + } + + /** + * Gets the reuseAddr option for the connection. + * + * @return the setting of the reuseAddr option + */ + public final boolean getReuseAddr() { + return this.reuseAddr; + } + + /** + * Generates a String representation of the object. + */ + @Override + public String toString() { + return "ConnectOptions[" + + "tcpNoDelay = " + tcpNoDelay + + ", receiveBufferSize = " + receiveBufferSize + + ", openTimeout = " + openTimeout + + ", readTimeout = " + readTimeout + + ", blocking = " + blocking + + ", reuseAddr = " + reuseAddr + + "]"; + } + }; + + /** + * Creates a DataChannel that connects to the specified address, + * with the specified options. + * + * @param addr The address to which the connection should be made. + * It is possible for a DataChannelFactory implementation to + * proxy this connection through an intermediary. + * @param connectOptions the collection of connection options to be + * applied to the connection. + * @return A DataChannel connected to the the specified address. + */ + DataChannel connect(InetSocketAddress addr, + ConnectOptions connectOptions) + throws IOException; +} diff --git a/src/com/sleepycat/je/rep/net/InstanceContext.java b/src/com/sleepycat/je/rep/net/InstanceContext.java new file mode 100644 index 0000000..ae14687 --- /dev/null +++ b/src/com/sleepycat/je/rep/net/InstanceContext.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +import com.sleepycat.je.rep.ReplicationNetworkConfig; + +/** + * The InstanceContext class captures contextual information for object + * instantiation by DataChannelFactory implementations. + */ +public class InstanceContext { + private final ReplicationNetworkConfig repNetConfig; + private LoggerFactory loggerFactory; + + /** + * Creates an InstanceContext instance. + * + * @param repNetConfig the configuration from which an instantiation + * is being generated. + * @param logger a logger that can be used for logging errors or other + * information + */ + public InstanceContext(ReplicationNetworkConfig repNetConfig, + LoggerFactory loggerFactory) { + this.repNetConfig = repNetConfig; + this.loggerFactory = loggerFactory; + } + + /** + * Gets configuration information for this context. + * + * @return the configuration from which this context was created + */ + final public ReplicationNetworkConfig getRepNetConfig() { + return repNetConfig; + } + + /** + * Gets the LoggerFactory that is usable by an instantiation for creation + * of a JE HA-friendly logging object. + * @return a LoggerFactory object. + */ + final public LoggerFactory getLoggerFactory() { + return loggerFactory; + } +} diff --git a/src/com/sleepycat/je/rep/net/InstanceLogger.java b/src/com/sleepycat/je/rep/net/InstanceLogger.java new file mode 100644 index 0000000..8b98bd8 --- /dev/null +++ b/src/com/sleepycat/je/rep/net/InstanceLogger.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +import java.util.logging.Level; + +/** + * The InstanceLogger interface provides a basic logging interface. + */ +public interface InstanceLogger { + + /** + * Logs a message at the specified logging level. The message is prefixed + * with an instance-dependent identifier. + * + * @param logLevel the logging level at which the message should be logged. + * @param msg a string to be logged. + */ + public void log(Level logLevel, String msg); +} diff --git a/src/com/sleepycat/je/rep/net/InstanceParams.java b/src/com/sleepycat/je/rep/net/InstanceParams.java new file mode 100644 index 0000000..f89a2a4 --- /dev/null +++ b/src/com/sleepycat/je/rep/net/InstanceParams.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +/** + * The InstanceParams class captures configuration information for object + * instantiation by DataChannelFactory implementations. + */ +public class InstanceParams { + private final InstanceContext context; + private final String classParams; + + /** + * Creates an InstanceParams instance. + * @param context the configuration context from which an instantiation + * is being generated. + * @param classParams a class-specific parameter argument, which may + * be null + */ + public InstanceParams(InstanceContext context, String classParams) { + this.context = context; + this.classParams = classParams; + } + + final public InstanceContext getContext() { + return context; + } + + final public String getClassParams() { + return classParams; + } +} diff --git a/src/com/sleepycat/je/rep/net/LoggerFactory.java b/src/com/sleepycat/je/rep/net/LoggerFactory.java new file mode 100644 index 0000000..e0a73fe --- /dev/null +++ b/src/com/sleepycat/je/rep/net/LoggerFactory.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +/** + * The LoggerFactory interface provides a mechanism for obtaining logging + * objects for use with dynamically constructed network objects. Instances + * of this interface are provided during object instantiation. + */ +public interface LoggerFactory { + + /** + * Obtains an InstanceLogger for the specified class. + * + * @param clazz the class for which a logger instance is to be obtained. + * @return a logging object + */ + public InstanceLogger getLogger(Class clazz); +} diff --git a/src/com/sleepycat/je/rep/net/PasswordSource.java b/src/com/sleepycat/je/rep/net/PasswordSource.java new file mode 100644 index 0000000..32a61b0 --- /dev/null +++ b/src/com/sleepycat/je/rep/net/PasswordSource.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +/** + * Interface to support supplying of a password. + */ + +public interface PasswordSource { + /** + * Get the password. The use of this is context dependent. + * + * @return a copy of the password. It is recommended that the caller + * overwrite the return value with other characters when the password + * is no longer required. + */ + public char[] getPassword(); +} diff --git a/src/com/sleepycat/je/rep/net/SSLAuthenticator.java b/src/com/sleepycat/je/rep/net/SSLAuthenticator.java new file mode 100644 index 0000000..5f8444f --- /dev/null +++ b/src/com/sleepycat/je/rep/net/SSLAuthenticator.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.net; + +import javax.net.ssl.SSLSession; + +/** + * Interface to check the identity of a client based on its certificate. + * Implementations of this interface can be configured for use by the HA + * service to determine whether a client connection should be treated as + * authenticated. + */ +public interface SSLAuthenticator { + /* + * Based on the information in the SSLSession object, should the client peer + * be trusted as an internal entity? This method is called only in server + * mode. + * + * @param sslSession an SSL session object + * @return true if the SSL peer should be treated as "trusted" + */ + public boolean isTrusted(SSLSession sslSession); +} + diff --git a/src/com/sleepycat/je/rep/net/package.html b/src/com/sleepycat/je/rep/net/package.html new file mode 100644 index 0000000..1e793d1 --- /dev/null +++ b/src/com/sleepycat/je/rep/net/package.html @@ -0,0 +1,23 @@ + + + + + + +BDB JE HA support for network communications configuration. + +

        Package Specification

        +A JE HA environment can be configured to provide additional network +communications capabilities such as encryption and authentication, using the +com.sleepycat.je.rep.ReplicationNetworkConfig class. This package includes +interfaces and classes that may be needed for advanced interaction with +the those configuration capabilities. + + diff --git a/src/com/sleepycat/je/rep/package.html b/src/com/sleepycat/je/rep/package.html new file mode 100644 index 0000000..f1160b4 --- /dev/null +++ b/src/com/sleepycat/je/rep/package.html @@ -0,0 +1,110 @@ + + + + + + JE HA + + +
        +Berkeley DB Java Edition High Availability (JE HA) enables replication of JE +environments. JE HA is an embedded database management +system designed to provide fast, reliable, and scalable data +management. A JE environment is replicated across the nodes of a +single read/write Master, multiple read only Replica Replication +Group. JE HA is used to improve application availability, provide +improved read performance, and increase data durability. +

        Getting Started

        +The +Replication +Guide is invaluable for understanding the capabilities of JE HA +and how best to design your replicated application. +
        +

        +The +Introduction +covers terminology, the replication group +lifecycle, and the concepts of durability and consistency. Much of the +javadoc refers to the topics covered there. +

        +Replication +API First Steps explains how to configure and start a replication +group. +

        +Transaction Management + highlights the tradeoffs that must be considered in a replicated +application and provides some use cases. +

        +In addition, the +je.rep.quote example +package provides three example replication applications. + +

        What the com.sleepycat.je.rep package contains

        +
        Replication control
        +
          +
        • ReplicatedEnvironment is the main access point to + replication.
        • +
        • ReplicationConfig and + ReplicationMutableConfig specify attributes of the + replication system.
        • +
        +
        Administration
        +
          +
        • + ReplicationNode and ReplicationGroup supply + administrative views of the replication system. +
        • +
        +
        Support
        +
          +
        • + StateChangeListener and StateChangeEvent + implement a Listener pattern for tracking changes in the replication system. +
        • +
        • + CommitPointConsistencyPolicy, + TimeConsistencyPolicy and + NoConsistencyPolicy let the user control the read only + replica's view of the the replicated data. +
        • +
        • + ReplicatedEnviromentStats provide feedback on system execution. +
        • +
        + +

        Related Packages

        +
          +
        • +com.sleepycat.je.rep.monitor lets the application track +the replication system in order to do tasks such as load balancing and +write request routing. +
        • +
        • +com.sleepycat.je.rep.util provides command line and +programmatic APIs for administering and starting up a replication +system. +
        • +
        +

        Related Documentation

        +@see Replication Guide +@see + JE HA Examples + + + + + + diff --git a/src/com/sleepycat/je/rep/stream/ArbiterFeederSource.java b/src/com/sleepycat/je/rep/stream/ArbiterFeederSource.java new file mode 100644 index 0000000..954e0ee --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ArbiterFeederSource.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.rep.stream.ArbiterFeederStatDefinition.QUEUE_FULL; + +import java.io.IOException; +import java.util.NoSuchElementException; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * Implementation of a master node acting as a FeederSource for an Arbiter. + */ +public class ArbiterFeederSource implements FeederSource { + + + private final BlockingQueue queue; + private final EnvironmentImpl envImpl; + private final StatGroup stats; + private final LongStat nQueueFull; + + public ArbiterFeederSource(EnvironmentImpl envImpl) + throws DatabaseException { + + int queueSize = + envImpl.getConfigManager().getInt + (RepParams.ARBITER_OUTPUT_QUEUE_SIZE); + queue = new ArrayBlockingQueue(queueSize); + this.envImpl = envImpl; + stats = + new StatGroup(ArbiterFeederStatDefinition.GROUP_NAME, + ArbiterFeederStatDefinition.GROUP_DESC); + nQueueFull = new LongStat(stats, QUEUE_FULL); + } + + public void addCommit(LogItem commitItem) { + + if (!queue.offer(commitItem)) { + + /* + * If the commit could not be added to the queue because + * the queue is filled. Try to remove an item + * and replace with the item with the higher VLSN. + * The Arbiter ack for the higher VLSN is sufficient + * for transactions with a lower commit VLSN. + */ + nQueueFull.increment(); + try { + LogItem queuedItem = queue.remove(); + VLSN vlsn = commitItem.header.getVLSN(); + if (queuedItem.header.getVLSN().compareTo(vlsn) > 0) { + + /* + * The removed item has higher vlsn so use that one. + */ + commitItem = queuedItem; + } + } catch (NoSuchElementException noe) { + /* Queue was empty so try to insert one last time. */ + } + + /* + * Attempt to put the item on the queue. If another + * thread has inserted and the queue is full, we will + * skip this transaction for an Arbiter ack attempt. The + * transaction may still succeed in this case due to acks from + * Replicas or other Arbiter acked transactions with a higher + * VLSN. + */ + queue.offer(commitItem); + } + } + + @Override + public void shutdown(EnvironmentImpl envImpl) { + } + + /* + * @see com.sleepycat.je.rep.stream.FeederSource#getLogRecord + * (com.sleepycat.je.utilint.VLSN, int) + */ + @Override + public OutputWireRecord getWireRecord(VLSN vlsn, int waitTime) + throws DatabaseException, InterruptedException, IOException { + + LogItem commitItem = queue.poll(waitTime, TimeUnit.MILLISECONDS); + if (commitItem != null) { + return new OutputWireRecord(envImpl, commitItem) ; + } + return null; + } + + public StatGroup loadStats(StatsConfig config) + throws DatabaseException { + StatGroup copyStats = stats.cloneGroup(config.getClear()); + return copyStats; + } + + @Override + public String dumpState() { + return null; + } +} diff --git a/src/com/sleepycat/je/rep/stream/ArbiterFeederStatDefinition.java b/src/com/sleepycat/je/rep/stream/ArbiterFeederStatDefinition.java new file mode 100644 index 0000000..02b41a8 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ArbiterFeederStatDefinition.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for HA Arbiter feeder statistics. + */ +public class ArbiterFeederStatDefinition { + + public static final String GROUP_NAME = "ArbiterFeeder"; + public static final String GROUP_DESC = "ArbiterFeeder statistics"; + + public static StatDefinition QUEUE_FULL = + new StatDefinition("queueFull", "Number of times a item could " + + "not be queued because the queue was full."); +} diff --git a/src/com/sleepycat/je/rep/stream/BaseProtocol.java b/src/com/sleepycat/je/rep/stream/BaseProtocol.java new file mode 100644 index 0000000..1f938e5 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/BaseProtocol.java @@ -0,0 +1,1321 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_ACK_MESSAGES; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_GROUPED_ACKS; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_GROUP_ACK_MESSAGES; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MAX_GROUPED_ACKS; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.cbvlsn.GlobalCBVLSN; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.utilint.LongMaxStat; +import com.sleepycat.je.utilint.LongMaxZeroStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.VLSN; + +/** + * Defines the base protocol of messages used to set up a stream between + * source and target. + * + * Note BaseProtocol represents a set of basic protocol operations intended to + * be used by subclasses. For a complete description of message operations + * used in JE HA protocol, please see the Protocol class in the same package. + * + * @see com.sleepycat.je.rep.stream.Protocol + */ +public abstract class BaseProtocol extends BinaryProtocol { + + /* --------------------------- */ + /* --- protocol versions --- */ + /* --------------------------- */ + + /* + * Note that the GROUP_ACK response message was introduced in version 5, + * but is disabled by default via RepParams.REPLICA_ENABLE_GROUP_ACKS. + * + * It can be enabled when we can increase the protocol version number. + */ + + /* The default (highest) version supported by the Protocol code. */ + public static final int MAX_VERSION = 7; + + /* The minimum version we're willing to interact with. */ + static final int MIN_VERSION = 3; + + /* + * Version added in JE 7.5.6 to support entry request type + */ + public static final int VERSION_7 = 7; + public static final JEVersion VERSION_7_JE_VERSION = + new JEVersion("7.5.6"); + + /* + * Version added in JE 6.4.10 to support generic feeder filtering + */ + public static final int VERSION_6 = 6; + public static final JEVersion VERSION_6_JE_VERSION = + new JEVersion("6.4.10"); + + /* Version added in JE 6.0.1 to support RepGroupImpl version 3. */ + public static final int VERSION_5 = 5; + public static final JEVersion VERSION_5_JE_VERSION = + new JEVersion("6.0.1"); + + /* + * Version in which HEARTBEAT_RESPONSE added a second field. We can manage + * without this optional additional information if we have to, we we can + * still interact with the previous protocol version. (JE 5.0.58) + */ + static final int VERSION_4 = 4; + public static final JEVersion VERSION_4_JE_VERSION = + new JEVersion("5.0.58"); + + /* Version added in JE 4.0.50 to address byte order issues. */ + static final int VERSION_3 = 3; + public static final JEVersion VERSION_3_JE_VERSION = + new JEVersion("4.0.50"); + + /* ------------------------------------------ */ + /* --- messages defined in base protocol --- */ + /* ------------------------------------------ */ + + /* range of op codes allowed in subclasses, inclusively. */ + protected final static short MIN_MESSAGE_OP_CODE_IN_SUBCLASS = 1024; + protected final static short MAX_MESSAGE_OP_CODE_IN_SUBCLASS = 2047; + + /* + * Following ops are core replication stream post-handshake messages + * defined in streaming protocol and are intended to be used in subclasses. + * + * Note these msg op codes inherit from original implementation of stream + * protocol. Due to backward compatibility requirement, we keep them + * unchanged and directly copy them here. + */ + public final static MessageOp ENTRY = + new MessageOp((short) 101, Entry.class); + + public final static MessageOp START_STREAM = + new MessageOp((short) 102, StartStream.class); + + public final static MessageOp HEARTBEAT = + new MessageOp((short) 103, Heartbeat.class); + + public final static MessageOp HEARTBEAT_RESPONSE = + new MessageOp((short) 104, HeartbeatResponse.class); + + public final static MessageOp COMMIT = + new MessageOp((short) 105, Commit.class); + + public final static MessageOp ACK = + new MessageOp((short) 106, Ack.class); + + public final static MessageOp ENTRY_REQUEST = + new MessageOp((short) 107, EntryRequest.class); + + public final static MessageOp ENTRY_NOTFOUND = + new MessageOp((short) 108, EntryNotFound.class); + + public final static MessageOp ALT_MATCHPOINT = + new MessageOp((short) 109, AlternateMatchpoint.class); + + public final static MessageOp RESTORE_REQUEST = + new MessageOp((short) 110, RestoreRequest.class); + + public final static MessageOp RESTORE_RESPONSE = + new MessageOp((short) 111, RestoreResponse.class); + + public final static MessageOp SHUTDOWN_REQUEST = + new MessageOp((short) 112, ShutdownRequest.class); + + public final static MessageOp SHUTDOWN_RESPONSE = + new MessageOp((short) 113, ShutdownResponse.class); + + public final static MessageOp GROUP_ACK = + new MessageOp((short) 114, GroupAck.class); + + /* --------------------------- */ + /* -------- fields --------- */ + /* --------------------------- */ + + /** The log version of the format used to write log entries to the stream. */ + protected int streamLogVersion; + + /* Count of all singleton ACK messages. */ + protected final LongStat nAckMessages; + + /* Count of all group ACK messages. */ + protected final LongStat nGroupAckMessages; + + /* Sum of all acks sent via group ACK messages. */ + protected final LongStat nGroupedAcks; + + /* Max number of acks sent via a single group ACK message. */ + protected final LongMaxStat nMaxGroupedAcks; + + protected final RepImpl repImpl; + + /** + * Whether to fix the log version for log entries received from JE 7.0.x + * feeders that use log version 12 format but are incorrectly marked with + * later log versions due to a bug ([#25222]). The problem is that the + * feeder supplies an entry in log version 12 (LOG_VERSION_EXPIRE_INFO) + * format, but says it has a later log version. + * + *

        This field is only set to true by the replica, which only reads and + * writes it from the main Replica thread, so no synchronization is needed. + */ + private boolean fixLogVersion12Entries = false; + + /** + * Returns a BaseProtocol object configured that implements the specified + * (supported) protocol version. + * + * @param repImpl the node using the protocol + * + * @param nameIdPair name-id pair of the node using the protocol + * + * @param protocolVersion the version of the protocol that must be + * implemented by this object + * + * @param maxProtocolVersion the highest supported protocol version, which + * may be lower than the code version, for testing purposes + * + * @param streamLogVersion the log version of the format used to write log + * entries + * + * @param protocolOps the message operations that make up this protocol + * + * @param checkValidity whether to check the message operations for + * validity. Checks should be performed for new protocols, but + * suppressed for legacy ones. + */ + BaseProtocol(final RepImpl repImpl, + final NameIdPair nameIdPair, + final int protocolVersion, + final int maxProtocolVersion, + final int streamLogVersion, + final MessageOp[] protocolOps, + final boolean checkValidity) { + super(nameIdPair, maxProtocolVersion, protocolVersion, repImpl); + this.streamLogVersion = streamLogVersion; + this.repImpl = repImpl; + + nAckMessages = new LongStat(stats, N_ACK_MESSAGES); + nGroupAckMessages = new LongStat(stats, N_GROUP_ACK_MESSAGES); + nGroupedAcks = new LongStat(stats, N_GROUPED_ACKS); + nMaxGroupedAcks = new LongMaxZeroStat(stats, N_MAX_GROUPED_ACKS); + initializeMessageOps(protocolOps, checkValidity); + } + + /** + * Returns a BaseProtocol object configured that implements the specified + * (supported) protocol version, with enforced message operation + * code validity check. + * + * This constructor enforces checking validity of message operation code. + * It should be used in any subclass except the legacy HA protocol in + * je.stream.Protocol. + * + * @param repImpl the node using the protocol + * + * @param nameIdPair name-id pair of the node using the protocol + * + * @param protocolVersion the version of the protocol that must be + * implemented by this object + * + * @param maxProtocolVersion the highest supported protocol version, which + * may be lower than the code version, for testing purposes + * + * @param streamLogVersion the log version of the format used to write log + * entries + * + * @param protocolOps the message operations that make up this protocol + */ + protected BaseProtocol(final RepImpl repImpl, + final NameIdPair nameIdPair, + final int protocolVersion, + final int maxProtocolVersion, + final int streamLogVersion, + final MessageOp[] protocolOps) { + + this(repImpl, nameIdPair, protocolVersion, maxProtocolVersion, + streamLogVersion, protocolOps, true); + } + + public int getStreamLogVersion() { + return streamLogVersion; + } + + /** + * Invoked in cases where the stream log version is not known at the time + * the protocol object is created and stream log version negotiations are + * required to determine the version that will be used for log records sent + * over the HA stream. + * + * @param streamLogVersion the maximum log version associated with stream + * records + */ + public void setStreamLogVersion(int streamLogVersion) { + this.streamLogVersion = streamLogVersion; + } + + /** + * Returns whether log entries need their log versions fixed to work around + * [#25222]. + */ + public boolean getFixLogVersion12Entries() { + return fixLogVersion12Entries; + } + + /** + * Sets whether log entries need their log versions fixed to work around + * [#25222]. + */ + public void setFixLogVersion12Entries(boolean value) { + fixLogVersion12Entries = value; + } + + /* ------------------------------------------------- */ + /* --- message classes defined in base protocol --- */ + /* ------------------------------------------------- */ + + /** + * A message containing a log entry in the replication stream. + */ + public class Entry extends Message { + + /* + * InputWireRecord is set when this Message had been received at this + * node. OutputWireRecord is set when this message is created for + * sending from this node. + */ + final protected InputWireRecord inputWireRecord; + protected OutputWireRecord outputWireRecord; + + public Entry(final OutputWireRecord outputWireRecord) { + inputWireRecord = null; + this.outputWireRecord = outputWireRecord; + } + + @Override + public MessageOp getOp() { + return ENTRY; + } + + @Override + public ByteBuffer wireFormat() { + final int bodySize = getWireSize(); + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + writeOutputWireRecord(outputWireRecord, messageBuffer); + messageBuffer.flip(); + return messageBuffer; + } + + protected int getWireSize() { + return outputWireRecord.getWireSize(streamLogVersion); + } + + public Entry(final ByteBuffer buffer) + throws DatabaseException { + + inputWireRecord = + new InputWireRecord(repImpl, buffer, BaseProtocol.this); + } + + public InputWireRecord getWireRecord() { + return inputWireRecord; + } + + @Override + public String toString() { + + final StringBuilder sb = new StringBuilder(); + sb.append(super.toString()); + + if (inputWireRecord != null) { + sb.append(" "); + sb.append(inputWireRecord); + } + + if (outputWireRecord != null) { + sb.append(" "); + sb.append(outputWireRecord); + } + + return sb.toString(); + } + + /* For unit test support */ + @Override + public boolean match(Message other) { + + /* + * This message was read in, but we need to compare it to a message + * that was sent out. + */ + if (outputWireRecord == null) { + outputWireRecord = new OutputWireRecord(repImpl, + inputWireRecord); + } + return super.match(other); + } + + /* True if the log entry is a TxnAbort or TxnCommit. */ + public boolean isTxnEnd() { + final byte entryType = getWireRecord().getEntryType(); + return LogEntryType.LOG_TXN_COMMIT.equalsType(entryType) || + LogEntryType.LOG_TXN_ABORT.equalsType(entryType); + + } + } + + /** + * StartStream indicates that the replica would like the feeder to start + * the replication stream at the proposed vlsn. + */ + public class StartStream extends VLSNMessage { + private final FeederFilter feederFilter; + + StartStream(VLSN startVLSN) { + super(startVLSN); + feederFilter = null; + } + + StartStream(VLSN startVLSN, FeederFilter filter) { + super(startVLSN); + feederFilter = filter; + } + + public StartStream(ByteBuffer buffer) { + super(buffer); + + /* Feeder filtering not supported before protocol version 6 */ + if (getVersion() < VERSION_6) { + feederFilter = null; + return; + } + + final int length = LogUtils.readInt(buffer); + if (length == 0) { + /* no filter is provided by client */ + feederFilter = null; + return; + } + + /* reconstruct filter from buffer */ + final byte filterBytes[] = + LogUtils.readBytesNoLength(buffer, length); + final ByteArrayInputStream bais = + new ByteArrayInputStream(filterBytes); + ObjectInputStream ois = null; + try { + ois = new ObjectInputStream(bais); + feederFilter = (FeederFilter) ois.readObject(); + } catch (ClassNotFoundException | IOException e) { + logger.warning(e.getLocalizedMessage()); + throw new IllegalStateException(e); + } finally { + if (ois != null) { + try { + ois.close(); + } catch (IOException e) { + logger.finest("exception raised when closing the " + + "object input stream object " + + e.getLocalizedMessage()); + } + } + } + } + + public FeederFilter getFeederFilter() { + return feederFilter; + } + + @Override + public ByteBuffer wireFormat() { + /* Feeder filtering not supported before protocol version 6 */ + if (getVersion() < VERSION_6) { + return super.wireFormat(); + } + + final int feederBufferSize; + final byte[] filterBytes; + + if (feederFilter != null) { + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = null; + try { + oos = new ObjectOutputStream(baos); + oos.writeObject(feederFilter); + oos.flush(); + } catch (IOException e) { + logger.warning(e.getLocalizedMessage()); + throw new IllegalStateException(e); + } finally { + if (oos != null) { + try { + oos.close(); + } catch (IOException e) { + logger.finest("exception raised when closing the " + + "object output stream object " + + e.getLocalizedMessage()); + } + } + } + filterBytes = baos.toByteArray(); + feederBufferSize = filterBytes.length; + } else { + filterBytes = null; + feederBufferSize = 0; + } + + /* build message buffer */ + final int bodySize = wireFormatSize() + 4 + feederBufferSize; + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + /* write 8 bytes of VLSN */ + LogUtils.writeLong(messageBuffer, vlsn.getSequence()); + /* write 4 bytes of feeder buf size */ + LogUtils.writeInt(messageBuffer, feederBufferSize); + /* write feeder buffer */ + if (feederBufferSize > 0) { + LogUtils.writeBytesNoLength(messageBuffer, filterBytes); + } + messageBuffer.flip(); + return messageBuffer; + } + + @Override + public MessageOp getOp() { + return START_STREAM; + } + + @Override + public String toString() { + String filterString = (feederFilter == null) ? "[no filtering]" : + feederFilter.toString(); + + return super.toString() + " " + filterString; + } + } + + public class Heartbeat extends Message { + + private final long masterNow; + private final long currentTxnEndVLSN; + + public Heartbeat(long masterNow, long currentTxnEndVLSN) { + this.masterNow = masterNow; + this.currentTxnEndVLSN = currentTxnEndVLSN; + } + + @Override + public MessageOp getOp() { + return HEARTBEAT; + } + + @Override + public ByteBuffer wireFormat() { + int bodySize = 8 * 2 /* masterNow + currentTxnEndVLSN */; + ByteBuffer messageBuffer = allocateInitializedBuffer(bodySize); + LogUtils.writeLong(messageBuffer, masterNow); + LogUtils.writeLong(messageBuffer, currentTxnEndVLSN); + messageBuffer.flip(); + return messageBuffer; + } + + public Heartbeat(ByteBuffer buffer) { + masterNow = LogUtils.readLong(buffer); + currentTxnEndVLSN = LogUtils.readLong(buffer); + } + + public long getMasterNow() { + return masterNow; + } + + public long getCurrentTxnEndVLSN() { + return currentTxnEndVLSN; + } + + @Override + public String toString() { + return super.toString() + " masterNow=" + masterNow + + " currentCommit=" + currentTxnEndVLSN; + } + } + + public class HeartbeatResponse extends Message { + + /* + * The latest syncupVLSN. If the GlobalCBVLSN is defunct, this field + * contains a null VLSN and is unused. If the GlobalCBVLSN is not + * defunct: + * - When sent by an arbiter or subscriber, this field is null + * and unused. + * - When sent by a replica, this is the replica's local CBVLSN and + * is used for updating the GlobalCBVLSN on the master. + */ + private final VLSN syncupVLSN; + + /* The latest commit/abort VLSN on the replica/arbiter/subscriber. */ + private final VLSN txnEndVLSN; + + public HeartbeatResponse(VLSN syncupVLSN, VLSN ackedVLSN) { + super(); + this.syncupVLSN = syncupVLSN; + this.txnEndVLSN = ackedVLSN; + } + + public HeartbeatResponse(ByteBuffer buffer) { + syncupVLSN = new VLSN(LogUtils.readLong(buffer)); + txnEndVLSN = + getVersion() >= VERSION_4 ? + new VLSN(LogUtils.readLong(buffer)) : + null; + } + + @Override + public MessageOp getOp() { + return HEARTBEAT_RESPONSE; + } + + @Override + public ByteBuffer wireFormat() { + boolean includeTxnEndVLSN = getVersion() >= VERSION_4; + int bodySize = includeTxnEndVLSN ? + 8 * 2 : + 8; + ByteBuffer messageBuffer = allocateInitializedBuffer(bodySize); + LogUtils.writeLong(messageBuffer, syncupVLSN.getSequence()); + if (includeTxnEndVLSN) { + LogUtils.writeLong(messageBuffer, txnEndVLSN.getSequence()); + } + messageBuffer.flip(); + return messageBuffer; + } + + public VLSN getSyncupVLSN() { + return syncupVLSN; + } + + public VLSN getTxnEndVLSN() { + return txnEndVLSN; + } + + @Override + public String toString() { + return super.toString() + + " txnEndVLSN=" + txnEndVLSN + + " syncupVLSN=" + syncupVLSN; + } + } + + /** + * Message of a commit op + */ + public class Commit extends Entry { + private final boolean needsAck; + private final SyncPolicy replicaSyncPolicy; + + public Commit(final boolean needsAck, + final SyncPolicy replicaSyncPolicy, + final OutputWireRecord wireRecord) { + super(wireRecord); + this.needsAck = needsAck; + this.replicaSyncPolicy = replicaSyncPolicy; + } + + @Override + public MessageOp getOp() { + return COMMIT; + } + + @Override + public ByteBuffer wireFormat() { + final int bodySize = super.getWireSize() + + 1 /* needsAck */ + + 1 /* replica sync policy */; + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + messageBuffer.put((byte) (needsAck ? 1 : 0)); + messageBuffer.put((byte) replicaSyncPolicy.ordinal()); + writeOutputWireRecord(outputWireRecord, messageBuffer); + messageBuffer.flip(); + return messageBuffer; + } + + public Commit(final ByteBuffer buffer) + throws DatabaseException { + + this(getByteNeedsAck(buffer.get()), + getByteReplicaSyncPolicy(buffer.get()), + buffer); + } + + private Commit(final boolean needsAck, + final SyncPolicy replicaSyncPolicy, + final ByteBuffer buffer) + throws DatabaseException { + + super(buffer); + this.needsAck = needsAck; + this.replicaSyncPolicy = replicaSyncPolicy; + } + + public boolean getNeedsAck() { + return needsAck; + } + + public SyncPolicy getReplicaSyncPolicy() { + return replicaSyncPolicy; + } + } + + /** + * Message of an ack op + */ + public class Ack extends Message { + + private final long txnId; + + public Ack(long txnId) { + super(); + this.txnId = txnId; + nAckMessages.increment(); + } + + @Override + public MessageOp getOp() { + return ACK; + } + + @Override + public ByteBuffer wireFormat() { + int bodySize = 8; + ByteBuffer messageBuffer = allocateInitializedBuffer(bodySize); + LogUtils.writeLong(messageBuffer, txnId); + messageBuffer.flip(); + return messageBuffer; + } + + public Ack(ByteBuffer buffer) { + txnId = LogUtils.readLong(buffer); + } + + public long getTxnId() { + return txnId; + } + + @Override + public String toString() { + return super.toString() + " txn " + txnId; + } + } + + /** + * A replica node asks a feeder for the log entry at this VLSN. + */ + public class EntryRequest extends VLSNMessage { + + private final EntryRequestType type; + + EntryRequest(VLSN matchpoint) { + super(matchpoint); + type = EntryRequestType.DEFAULT; + } + + EntryRequest(VLSN matchpoint, EntryRequestType type) { + super(matchpoint); + this.type = type; + } + + public EntryRequest(ByteBuffer buffer) { + super(buffer); + + /* entry request type not supported before protocol version 7 */ + if (getVersion() < VERSION_7) { + type = EntryRequestType.DEFAULT; + return; + } + + final int i = LogUtils.readInt(buffer); + type = EntryRequestType.values()[i]; + } + + public EntryRequestType getType() { + return type; + } + + @Override + public ByteBuffer wireFormat() { + + /* type not supported before protocol version 7 */ + if (getVersion() < VERSION_7) { + return super.wireFormat(); + } + + /* build message buffer */ + final int bodySize = wireFormatSize(); + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + /* write 8 bytes of VLSN */ + LogUtils.writeLong(messageBuffer, vlsn.getSequence()); + /* write 4 bytes of type */ + LogUtils.writeInt(messageBuffer, type.ordinal()); + messageBuffer.flip(); + return messageBuffer; + } + + @Override + public int wireFormatSize() { + /* type not supported before protocol version 7 */ + if (getVersion() < VERSION_7) { + return super.wireFormatSize(); + } + + return super.wireFormatSize() + 4; + } + + @Override + public MessageOp getOp() { + return ENTRY_REQUEST; + } + + @Override + public String toString() { + return "entry request vlsn: " + super.toString() + + ", type: " + type; + } + } + + /** + * Type of entry request sent to feeder + * + * RV: VLSN requested by client + * LOW: low end of available VLSN range in vlsn index + * HIGH: high end of available VLSN range in vlsn index + * + * The DEFAULT mode is used by existing replication stream consumer e.g. + * replica, arbiter, secondary nodes, etc, while the others are only used + * in subscription (je.rep.subscription). + * + * ------------------------------------------------------------------- + * MODE | RV < LOW | RV in [LOW, HIGH] | RV > HIGH + * ------------------------------------------------------------------- + * DEFAULT | NOT_FOUND | REQUESTED ENTRY | ALT MATCH POINT + * AVAILABLE | LOW | REQUESTED ENTRY | HIGH + * NOW | HIGH | HIGH | HIGH + */ + public enum EntryRequestType { + DEFAULT, + AVAILABLE, + NOW + } + + /** + * Response when the EntryRequest asks for a VLSN that is below the VLSN + * range covered by the Feeder. + */ + public class EntryNotFound extends Message { + + public EntryNotFound() { + } + + public EntryNotFound(@SuppressWarnings("unused") ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return ENTRY_NOTFOUND; + } + } + + public class AlternateMatchpoint extends Message { + + private final InputWireRecord alternateInput; + private OutputWireRecord alternateOutput = null; + + AlternateMatchpoint(final OutputWireRecord alternate) { + alternateInput = null; + this.alternateOutput = alternate; + } + + @Override + public MessageOp getOp() { + return ALT_MATCHPOINT; + } + + @Override + public ByteBuffer wireFormat() { + final int bodySize = alternateOutput.getWireSize(streamLogVersion); + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + writeOutputWireRecord(alternateOutput, messageBuffer); + messageBuffer.flip(); + return messageBuffer; + } + + public AlternateMatchpoint(final ByteBuffer buffer) + throws DatabaseException { + alternateInput = + new InputWireRecord(repImpl, buffer, BaseProtocol.this); + } + + public InputWireRecord getAlternateWireRecord() { + return alternateInput; + } + + /* For unit test support */ + @Override + public boolean match(Message other) { + + /* + * This message was read in, but we need to compare it to a message + * that was sent out. + */ + if (alternateOutput == null) { + alternateOutput = + new OutputWireRecord(repImpl, alternateInput); + } + return super.match(other); + } + } + + /** + * Request from the replica to the feeder for sufficient information to + * start a network restore. + */ + public class RestoreRequest extends VLSNMessage { + + RestoreRequest(VLSN failedMatchpoint) { + super(failedMatchpoint); + } + + public RestoreRequest(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return RESTORE_REQUEST; + } + } + + /** + * Response when the replica needs information to instigate a network + * restore. The message contains a set of nodes that could be used as the + * basis for a NetworkBackup so that the request node can become current + * again. + * + *

        In addition, when support is needed for older replica nodes that use + * the GlobalCBVLSN, a vlsn is included. See + * {@link GlobalCBVLSN#getRestoreResponseVLSN}.

        + */ + public class RestoreResponse extends SimpleMessage { + /* Is null VLSN and unused if the GlobalCBVLSN is defunct. */ + private final VLSN cbvlsn; + + private final RepNodeImpl[] logProviders; + + public RestoreResponse(VLSN cbvlsn, RepNodeImpl[] logProviders) { + this.cbvlsn = cbvlsn; + this.logProviders = logProviders; + } + + public RestoreResponse(ByteBuffer buffer) { + long vlsnSequence = LogUtils.readLong(buffer); + cbvlsn = new VLSN(vlsnSequence); + logProviders = getRepNodeImplArray(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(cbvlsn.getSequence(), logProviders); + } + + /* Add support for RepNodeImpl arrays. */ + + @Override + protected void putWireFormat(final ByteBuffer buffer, + final Object obj) { + if (obj.getClass() == RepNodeImpl[].class) { + putRepNodeImplArray(buffer, (RepNodeImpl[]) obj); + } else { + super.putWireFormat(buffer, obj); + } + } + + @Override + protected int wireFormatSize(final Object obj) { + if (obj.getClass() == RepNodeImpl[].class) { + return getRepNodeImplArraySize((RepNodeImpl[]) obj); + } + return super.wireFormatSize(obj); + } + + private void putRepNodeImplArray(final ByteBuffer buffer, + final RepNodeImpl[] ra) { + LogUtils.writeInt(buffer, ra.length); + final int groupFormatVersion = getGroupFormatVersion(); + for (final RepNodeImpl node : ra) { + putByteArray( + buffer, + RepGroupImpl.serializeBytes(node, groupFormatVersion)); + } + } + + private RepNodeImpl[] getRepNodeImplArray(final ByteBuffer buffer) { + final RepNodeImpl[] ra = new RepNodeImpl[LogUtils.readInt(buffer)]; + final int groupFormatVersion = getGroupFormatVersion(); + for (int i = 0; i < ra.length; i++) { + ra[i] = RepGroupImpl.deserializeNode( + getByteArray(buffer), groupFormatVersion); + } + return ra; + } + + private int getRepNodeImplArraySize(RepNodeImpl[] ra) { + int size = 4; /* array length */ + final int groupFormatVersion = getGroupFormatVersion(); + for (final RepNodeImpl node : ra) { + size += (4 /* Node size */ + + RepGroupImpl.serializeBytes(node, groupFormatVersion) + .length); + } + return size; + } + + /** + * Returns the RepGroupImpl version to use for the currently configured + * protocol version. + */ + private int getGroupFormatVersion() { + return (getVersion() < VERSION_5) ? + RepGroupImpl.FORMAT_VERSION_2 : + RepGroupImpl.MAX_FORMAT_VERSION; + } + + @Override + public MessageOp getOp() { + return RESTORE_RESPONSE; + } + + RepNodeImpl[] getLogProviders() { + return logProviders; + } + + VLSN getCBVLSN() { + return cbvlsn; + } + } + + /** + * Message used to shutdown a node + */ + public class ShutdownRequest extends SimpleMessage { + /* The time that the shutdown was initiated on the master. */ + private final long shutdownTimeMs; + + public ShutdownRequest(long shutdownTimeMs) { + super(); + this.shutdownTimeMs = shutdownTimeMs; + } + + @Override + public MessageOp getOp() { + return SHUTDOWN_REQUEST; + } + + public ShutdownRequest(ByteBuffer buffer) { + shutdownTimeMs = LogUtils.readLong(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(shutdownTimeMs); + } + + public long getShutdownTimeMs() { + return shutdownTimeMs; + } + } + + /** + * Message in response to a shutdown request. + */ + public class ShutdownResponse extends Message { + + public ShutdownResponse() { + super(); + } + + @Override + public MessageOp getOp() { + return SHUTDOWN_RESPONSE; + } + + public ShutdownResponse(@SuppressWarnings("unused") ByteBuffer buffer) { + } + } + + public class GroupAck extends Message { + + private final long txnIds[]; + + public GroupAck(long txnIds[]) { + super(); + this.txnIds = txnIds; + nGroupAckMessages.increment(); + nGroupedAcks.add(txnIds.length); + nMaxGroupedAcks.setMax(txnIds.length); + } + + @Override + public MessageOp getOp() { + return GROUP_ACK; + } + + @Override + public ByteBuffer wireFormat() { + + final int bodySize = 4 + 8 * txnIds.length; + final ByteBuffer messageBuffer = + allocateInitializedBuffer(bodySize); + + putLongArray(messageBuffer, txnIds); + messageBuffer.flip(); + + return messageBuffer; + } + + public GroupAck(ByteBuffer buffer) { + txnIds = readLongArray(buffer); + } + + public long[] getTxnIds() { + return txnIds; + } + + @Override + public String toString() { + return super.toString() + " txn " + Arrays.toString(txnIds); + } + } + + /** + * Base class for messages which contain only a VLSN + */ + protected abstract class VLSNMessage extends Message { + protected final VLSN vlsn; + + VLSNMessage(VLSN vlsn) { + super(); + this.vlsn = vlsn; + } + + public VLSNMessage(ByteBuffer buffer) { + long vlsnSequence = LogUtils.readLong(buffer); + vlsn = new VLSN(vlsnSequence); + } + + @Override + public ByteBuffer wireFormat() { + int bodySize = wireFormatSize(); + ByteBuffer messageBuffer = allocateInitializedBuffer(bodySize); + LogUtils.writeLong(messageBuffer, vlsn.getSequence()); + messageBuffer.flip(); + return messageBuffer; + } + + int wireFormatSize() { + return 8; + } + + VLSN getVLSN() { + return vlsn; + } + + @Override + public String toString() { + return super.toString() + " " + vlsn; + } + } + + /** + * Base class for all protocol handshake messages. + */ + protected abstract class HandshakeMessage extends SimpleMessage { + } + + /** + * Version broadcasts the sending node's protocol version. + */ + protected abstract class ProtocolVersion extends HandshakeMessage { + private final int version; + + @SuppressWarnings("hiding") + private final NameIdPair nameIdPair; + + public ProtocolVersion(int version) { + super(); + this.version = version; + this.nameIdPair = BaseProtocol.this.nameIdPair; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(version, nameIdPair); + } + + public ProtocolVersion(ByteBuffer buffer) { + version = LogUtils.readInt(buffer); + nameIdPair = getNameIdPair(buffer); + } + + /** + * @return the version + */ + protected int getVersion() { + return version; + } + + /** + * The nodeName of the sender + * + * @return nodeName + */ + protected NameIdPair getNameIdPair() { + return nameIdPair; + } + } + + /* ---------------------------------------- */ + /* --- end of message class definition --- */ + /* ---------------------------------------- */ + + /** + * Write an entry output wire record to the message buffer using the write + * log version format and increment nEntriesWrittenOldVersion if the entry + * format was changed. + */ + protected void writeOutputWireRecord(final OutputWireRecord record, + final ByteBuffer messageBuffer) { + final boolean changedFormat = + record.writeToWire(messageBuffer, streamLogVersion); + if (changedFormat) { + nEntriesWrittenOldVersion.increment(); + } + } + + /** + * Initializes message ops and check if is valid within allocated range + * + * @param protocolOps ops to be initialized + * @param checkValidity true if check validity of op code + */ + private void initializeMessageOps(MessageOp[] protocolOps, + boolean checkValidity) { + + if (checkValidity) { + /* Check if op code is valid before initialization */ + for (MessageOp op : protocolOps) { + if (!isValidMsgOpCode(op.getOpId())) { + throw EnvironmentFailureException.unexpectedState + ("Op id: " + op.getOpId() + + " is out of allowed range inclusively [" + + MIN_MESSAGE_OP_CODE_IN_SUBCLASS + ", " + + MAX_MESSAGE_OP_CODE_IN_SUBCLASS + "]"); + } + } + } + initializeMessageOps(protocolOps); + } + + /** + * Returns whether the byte value specifies that an acknowledgment is + * needed. + */ + private static boolean getByteNeedsAck(final byte needsAckByte) { + switch (needsAckByte) { + case 0: + return false; + case 1: + return true; + default: + throw EnvironmentFailureException.unexpectedState( + "Invalid bool ordinal: " + needsAckByte); + } + } + + /** Checks if op code defined in subclass fall in pre-allocated range */ + private static boolean isValidMsgOpCode(short opId) { + + return (opId <= MAX_MESSAGE_OP_CODE_IN_SUBCLASS) && + (opId >= MIN_MESSAGE_OP_CODE_IN_SUBCLASS); + } + + /** Returns the sync policy specified by the argument. */ + private static SyncPolicy getByteReplicaSyncPolicy( + final byte syncPolicyByte) { + + for (final SyncPolicy p : SyncPolicy.values()) { + if (p.ordinal() == syncPolicyByte) { + return p; + } + } + throw EnvironmentFailureException.unexpectedState( + "Invalid sync policy ordinal: " + syncPolicyByte); + } + + /* Writes array of longs into buffer */ + private void putLongArray(ByteBuffer buffer, long[] la) { + LogUtils.writeInt(buffer, la.length); + + for (long l : la) { + LogUtils.writeLong(buffer, l); + } + } + + /* Reads array of longs from buffer */ + private long[] readLongArray(ByteBuffer buffer) { + final long la[] = new long[LogUtils.readInt(buffer)]; + + for (int i = 0; i < la.length; i++) { + la[i] = LogUtils.readLong(buffer); + } + + return la; + } +} diff --git a/src/com/sleepycat/je/rep/stream/FeederFilter.java b/src/com/sleepycat/je/rep/stream/FeederFilter.java new file mode 100644 index 0000000..4342bf7 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederFilter.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import com.sleepycat.je.rep.impl.RepImpl; + +/** + * The FeederFilter is used by the Feeder to determine whether a record should + * be sent to the Replica. The filter object is created at the replica and is + * transmitted to the Feeder as part of the syncup process. The filter thus + * represents replica code that is running inside the Feeder, that is, the + * computation has been moved closer to the data and can be used to eliminate + * unnecessary network communication overheads. + */ +public interface FeederFilter { + + /** + * The execute method that invoked before a record is sent to the replica. + * If the filter returns null, the feeder will not send the record to the + * replica as part of the replication stream, since it's not of interest + * to the replica. It can for example be used to filter out tables that + * are not of interest to the replica. + * + * @param record the record to be filtered + * @param repImpl repImpl of the RN where the filter is executed + * + * @return the original input record if it is to be sent to the replica. + * null if it's to be skipped. + */ + OutputWireRecord execute(final OutputWireRecord record, + final RepImpl repImpl); + + + /** + * Gets arrays of subscribed table ids. If null or array length is 0, + * that means the subscriber would subscribe all tables in the store. + * + * @return arrays of subscribed table ids + */ + String[] getTableIds(); +} diff --git a/src/com/sleepycat/je/rep/stream/FeederReader.java b/src/com/sleepycat/je/rep/stream/FeederReader.java new file mode 100644 index 0000000..97cf4e8 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederReader.java @@ -0,0 +1,558 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.FileHandle; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogBuffer; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNIndex.ForwardVLSNScanner; +import com.sleepycat.je.rep.vlsn.VLSNIndex.WaitTimeOutException; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * The FeederReader is a flavor of VLSNReader which supports replication + * stream feeding. It assumes that reading will always go forward in the log. + * Special features are: + * + * - The reader can read either from a log buffer or from the file. Sometimes + * log entries are logged but are not yet available on disk. In general, it's + * better to read from the log buffers rather then the file. + * + * - The reader can block for a given time period, waiting for the next vlsn to + * appear + */ +public class FeederReader extends VLSNReader { + + /* The scanner is a cursor over the VLSNIndex. */ + private final ForwardVLSNScanner scanner; + + /* The reader has never been used before, it needs to be initialized. */ + private boolean initDone = false; + + /* + * A constantly resetting counter of hits in the log item cache. This + * serves as state that lets the FeederReader know that its position in the + * log files might have become stale, due to cache hits. Because it's reset + * midstream, it is not an accurate statistics for cache hits. + */ + private long prevCacheHits = 0; + + /* + * If true, the FeederReader will always read directly from the log, and + * will not use the vlsnIndex LogItem cache. Should only be used for + * unit tests! + */ + private final boolean bypassCache; + + public FeederReader(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + long startLsn, + int readBufferSize) { + + this(envImpl, vlsnIndex, startLsn, readBufferSize, + false /*bypassCache*/); + } + + /** + * @param bypassCache For unit testing only!! Bypass the VLSNIndex cache. + */ + FeederReader(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + long startLsn, + int readBufferSize, + boolean bypassCache) { + super(envImpl, + vlsnIndex, + true, // forward + startLsn, + readBufferSize, + DbLsn.NULL_LSN); // finishLsn + + scanner = new ForwardVLSNScanner(vlsnIndex); + this.bypassCache = bypassCache; + } + + /** + * Use a ReadWindow which can read from LogBuffers as well as the physical + * file. + * @throws DatabaseException + */ + @Override + protected ReadWindow makeWindow(int readBufferSize) { + + return new SwitchWindow(readBufferSize, envImpl); + } + + /** + * Set up the FeederReader to start scanning from this VLSN. If we find a + * mapping for this VLSN, we'll start precisely at its LSN, else we'll have + * to start from an earlier location. This initialization can't be done in + * the constructor, because the Feeder is set up to require the + * construction of the source before we know where to start. + * + * @throws IOException + * + * @return the startLsn + */ + public long initScan(VLSN startVLSN) + throws IOException { + + if (startVLSN.equals(VLSN.NULL_VLSN)) { + throw EnvironmentFailureException.unexpectedState + ("startVLSN can't be null"); + } + + VLSNRange currentRange = vlsnIndex.getRange(); + VLSN startPoint = startVLSN; + if (currentRange.getLast().compareTo(startVLSN) < 0) { + /* + * When feeding, we may be starting at the VLSN following the last + * VLSN in the node. + */ + startPoint = currentRange.getLast(); + } + + startLsn = scanner.getStartingLsn(startPoint); + assert startLsn != DbLsn.NULL_LSN; + + window.initAtFileStart(startLsn); + nextEntryOffset = window.getEndOffset(); + currentVLSN = startVLSN; + + initDone = true; + return startLsn; + } + + /** + * Get file number of the last log entry returned. + */ + long getLastFile(OutputWireRecord record ) { + /* + * If the record has a LogItem (from the tip cache), then its LSN is + * the most current log position. + */ + final long lsn = record.getLogItemLSN(); + if (lsn != DbLsn.NULL_LSN) { + return DbLsn.getFileNumber(lsn); + } + /* Otherwise, the current log position is the FileReader position. */ + return window.currentFileNum(); + } + + /** + * Forward scanning for feeding the replica: get the log record for this + * VLSN. If the log record hasn't been created yet, wait for a period + * specified by "waitTime". + * + * Where possible, the FeederReader fetches the log record from the cache + * within the VLSNIndex. (See the VLSNIndex for a description of this two + * level cache). If the requested VLSN is not available from the cache, the + * reader fetches the item from the JE log -- either from the log buffers + * or from disk. + * + * The FeederReader is like a cursor on the log, and retains a position + * in the log. When there are log item cache hits, the FeederReader's + * position can fall behind, because it is being bypassed. It is possible + * for log cleaning to take place between the point of the FeederReader's + * stale position and the end of the log. If so, the FeederReader must + * not attempt to scan from its current position, because it might + * run afoul of gap created by the cleaned and delete log files. When + * there have been log item cache hits, the FeederReader must jump its + * position forward using the vlsnIndex mappings to safely skip over + * any cleaned gaps in the log. + */ + public OutputWireRecord scanForwards(VLSN vlsn, int waitTime) + throws InterruptedException { + + assert initDone; + + LogItem logItem = null; + + try { + logItem = vlsnIndex.waitForVLSN(vlsn, waitTime); + } catch (WaitTimeOutException e) { + /* This vlsn not yet available */ + return null; + } + + currentVLSN = vlsn; + + if ((logItem != null) && (!bypassCache)) { + + /* We've found the requested log item in the cache. */ + assert logItem.header.getVLSN().equals(vlsn); + prevCacheHits++; + return new OutputWireRecord(envImpl, logItem); + } + + /* + * We must go to the log for this requested VLSN. Use the VLSNIndex for + * the closest position in the log file to find the next replicated log + * entry. + * + * If there are no cache hits and the reader has been supplying log + * entries sequentially, we know that it is already positioned at the + * immediately preceding log entry and that we can scan from there to + * the current requested record. In that case, we are only hoping that + * the VLSNIndex can supply the exact location of the current requested + * record in order to reduce the scanning. + * + * If there have been cache hits, the reader's current position is some + * unknown distance back. In that case, scanning from the current + * position could run into a cleaned gap in the log files, and could + * fail. Because of that, we must reposition to a VLSN that is <= to + * the current requested VLSN. We know that such a VLSN must exist and + * have a valid lsn mapping, because the begin and end point in the + * vlsn range always exists. + */ + long repositionLsn; + if (prevCacheHits > 0) { + repositionLsn = scanner.getApproximateLsn(vlsn); + + /* + * Guard against sliding the window backwards. This could happen if + * by dint of previous scans, the reader is fortuitously positioned + * at a point in the log that is before the current target VLSN, + * but after any available mappings. For example, suppose the + * VLSNIndex has VLSNs 10, 50, 100. Suppose the reader is + * positioned at VLSN 20, and we have supplied VLSNs 21->40 from + * the cache. VLSN 41 has not hit in the cache, and we must fetch + * the log record from disk. We do not want to slide the + * FeederReader from its current position at 21 back to VLSN 10. + */ + if (DbLsn.compareTo(getLastLsn(), repositionLsn) >= 0) { + repositionLsn = DbLsn.NULL_LSN; + } + } else { + repositionLsn = scanner.getPreciseLsn(vlsn); + } + + /* + * We're going to start scanning, so reset the prevCacheHits field, and + * position the reader at the optimal spot. + */ + prevCacheHits = 0; + try { + /* setPosition is a noop if repositionLsn is null. */ + setPosition(repositionLsn); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_CHECKSUM, + "trying to reposition FeederReader to " + + DbLsn.getNoFormatString(repositionLsn) + " prevWindow=" + + window, e); + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + "Trying to reposition FeederReader to " + + DbLsn.getNoFormatString(repositionLsn) + + " for vlsn:" + vlsn + " prevWindow=" + window, e); + } + + if (readNextEntry()) { + return currentFeedRecord; + } + + throw EnvironmentFailureException.unexpectedState + (envImpl, "VLSN=" + vlsn + " repositionLsn = " + + DbLsn.getNoFormatString(repositionLsn) + window); + } + + /** + * @throw an EnvironmentFailureException if we were scanning for a + * particular VLSN and we have passed it by. + */ + private void checkForPassingTarget(int compareResult) { + if (compareResult > 0) { + /* Hey, we passed the VLSN we wanted. */ + throw EnvironmentFailureException.unexpectedState + ("want to read " + currentVLSN + " but reader at " + + currentEntryHeader.getVLSN()); + } + } + + /** + * Return true if this entry is replicated and its VLSN is currentVLSN. + */ + @Override + protected boolean isTargetEntry() { + nScanned++; + + if (currentEntryHeader.isInvisible()) { + return false; + } + + if (entryIsReplicated()) { + VLSN entryVLSN = currentEntryHeader.getVLSN(); + + int compareResult = entryVLSN.compareTo(currentVLSN); + checkForPassingTarget(compareResult); + + /* return true if this is the entry we want. */ + return (compareResult == 0); + } + + return false; + } + + /** + * The SwitchWindow can fill itself from either the log file or the log + * buffers. + */ + static class SwitchWindow extends ReadWindow { + + private final LogManager logManager; + + SwitchWindow(int readBufferSize, EnvironmentImpl envImpl) { + super(readBufferSize, envImpl); + + logManager = envImpl.getLogManager(); + } + + /* + * Reposition to the specified file, and fill starting at + * targetOffset. For this use case, we are always going forwards, and + * windowStartOffset should == targetOffset. Position the window's + * buffer to point at the log entry indicated by targetOffset + */ + @Override + public void slideAndFill(long windowFileNum, + long windowStartOffset, + long targetOffset, + boolean forward) + throws ChecksumException, + FileNotFoundException, + DatabaseException { + + if (!fillFromLogBuffer(windowFileNum, targetOffset)) { + /* The entry was not in the LogBufferPool. */ + super.slideAndFill(windowFileNum, + windowStartOffset, + targetOffset, + forward); + } + } + + /** + * Fill the read window's buffer from a LogBuffer. + * @return true if the read window was filled. + * @throws DatabaseException + */ + private boolean fillFromLogBuffer(long windowFileNum, + long targetOffset) + throws DatabaseException { + + LogBuffer logBuffer = null; + + try { + long fileLocation = DbLsn.makeLsn(windowFileNum, targetOffset); + logBuffer = logManager.getReadBufferByLsn(fileLocation); + if (logBuffer == null) { + return false; + } + + /* + * Copy at much as we can of the logBuffer into the window's + * readBuffer. We don't call ByteBuffer.put(ByteBuffer) because + * the logBuffer may be larger than the window readBuffer, and + * we don't want to get an overflow. Instead, we convert to an + * array and carefully size the copy. A LogBuffer is positioned + * for writing, and hasn't yet been flipped. LogManager.get() + * does an absolute retrieval of bytes from the buffer, because + * it knows that the log entry exists, and is only reading one + * entry. We need to flip the buffer, because we don't know + * apriori how much is in the buffer, and we want to scan it. + */ + + /* + * Put the logBuffer's contents into wholeContents, and + * position wholeContents at the desired target offset. If + * this logBuffer had been the currentWriteBuffer, it's + * positioned for writing and must be flipped for reading. + */ + ByteBuffer wholeContents = + logBuffer.getDataBuffer().duplicate(); + if (wholeContents.position() != 0) { + wholeContents.flip(); + } + long firstOffset = + DbLsn.getFileOffset(logBuffer.getFirstLsn()); + wholeContents.position((int) (targetOffset - firstOffset)); + + /* Make a buffer which starts at target. */ + ByteBuffer startAtTarget = wholeContents.slice(); + byte[] data = startAtTarget.array(); + int availableContentLen = startAtTarget.limit(); + int copyLength = + (availableContentLen > readBuffer.capacity()) ? + readBuffer.capacity() : availableContentLen; + + readBuffer.clear(); + readBuffer.put(data, startAtTarget.arrayOffset(), copyLength); + readBuffer.flip(); + + /* LogBuffers were just written and use the current version. */ + setFileNum(windowFileNum, LogEntryType.LOG_VERSION); + startOffset = targetOffset; + endOffset = startOffset + readBuffer.limit(); + readBuffer.position(0); + return true; + } finally { + if (logBuffer != null) { + logBuffer.release(); + } + } + } + + /** + * Fill up the read buffer with more data, moving along to the + * following file (next largest number) if needed. Unlike other file + * readers, we are reading log files that are concurrently growing, so + * this read window must also know to look in the log buffers. + * + * The contract between the feeder reader and the VLSNIndex lets us + * assume that the feeder reader is only active when it is sure that + * there is more data available somewhere -- whether it's in the log + * buffers, write queue, or on disk. + * + * @return true if the fill moved us to a new file. + * @see ReadWindow#fillNext + */ + @Override + protected boolean fillNext(boolean singleFile, int bytesNeeded) + throws ChecksumException, DatabaseException, EOFException { + + /* + * The SwitchReadWindow should only be used for feeding, and + * singleFile should never be true. + */ + assert !singleFile; + + adjustReadBufferSize(bytesNeeded); + + /* + * Try to fill the window by asking for the next offset from + * the log buffers. + */ + if (fillFromLogBuffer(currentFileNum(), endOffset)) { + /* Didn't move to a new file. */ + return false; + } + + /* + * If that didn't work, there are these possible reasons why: + * a - it's a valid offset, but it's no longer in a log buffer, it + * was written to disk. + * b - it's not a valid offset, because the log file flipped. + * In both cases, go to the FileManager and see if there's more log + * to be found. + */ + + FileHandle fileHandle = null; + try { + + /* Get a file handle to read in more log. */ + fileHandle = fileManager.getFileHandle(currentFileNum()); + + /* Attempt to read more from this file. */ + startOffset = endOffset; + if (fillFromFile(fileHandle, startOffset)) { + /* + * Successfully filled the read buffer, but didn't move to + * a new file. + */ + return false; + } + + fileHandle.release(); + fileHandle = null; + + /* This file is done -- can we read in the next file? */ + if (singleFile) { + throw new EOFException(); + } + + /* + * Remember that the nextFile may not be fileNum + 1 if + * there has been log cleaning. + */ + Long nextFile = + fileManager.getFollowingFileNum(currentFileNum(), + true /* forward */); + + /* + * But if there's no next file, let's assume that the desired + * data is still in the log buffers, and the next lsn is the + * first entry in the subsequent file number. Start the read + * from the first real log entry, because the file header entry + * is not in the log buffers. + */ + if (nextFile == null) { + nextFile = currentFileNum() + 1; + } + + if (fillFromLogBuffer(nextFile, + FileManager.firstLogEntryOffset())) { + /* + * We filled the read buffer, and jumped to a new + * file. + */ + return true; + } + + /* + * Didn't find the next bytes in the log buffer, go look on + * disk. + */ + fileHandle = fileManager.getFileHandle(nextFile); + setFileNum(nextFile, fileHandle.getLogVersion()); + startOffset = 0; + boolean moreData = fillFromFile(fileHandle, 0); + assert moreData : + "FeederReader should find more data in next file"; + return true; + } catch (IOException e) { + e.printStackTrace(); + throw EnvironmentFailureException.unexpectedException + ("Problem in ReadWindow.fill, reading from = " + + currentFileNum(), e); + + } finally { + if (fileHandle != null) { + fileHandle.release(); + } + } + } + } + + /* For debugging */ + String dumpState() { + return "prevCacheHits=" + prevCacheHits + " " + window; + } +} diff --git a/src/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java b/src/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java new file mode 100644 index 0000000..c83d602 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java @@ -0,0 +1,632 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.io.IOException; +import java.net.SocketAddress; +import java.util.Set; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.LockTimeoutException; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepGroupImpl.NodeConflictException; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.impl.node.Feeder.ExitException; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.Protocol.JEVersions; +import com.sleepycat.je.rep.stream.Protocol.JEVersionsReject; +import com.sleepycat.je.rep.stream.Protocol.NodeGroupInfo; +import com.sleepycat.je.rep.stream.Protocol.ReplicaJEVersions; +import com.sleepycat.je.rep.stream.Protocol.ReplicaProtocolVersion; +import com.sleepycat.je.rep.stream.Protocol.SNTPRequest; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * Implements the Feeder side of the handshake between the Feeder and the + * Replica. The ReplicaFeederHandshake class takes care of the other side. + * + * @see FeederReplicaHandshake + */ +public class FeederReplicaHandshake { + private final int GROUP_RETRY = 60; + private final long GROUP_RETRY_SLEEP_MS = 1000; + /* The rep node (server or replica) */ + private final RepNode repNode; + private final NamedChannel namedChannel; + + private final NameIdPair feederNameIdPair; + + /* Established during the first message. */ + private NameIdPair replicaNameIdPair = null; + + private ReplicaJEVersions replicaJEVersions; + + /** + * The negotiated highest version associated with log records to be sent + * by this feeder in the HA stream. + */ + private int streamLogVersion; + + /** The node associated with the replica, or null if not known. */ + private volatile RepNodeImpl replicaNode; + + private final Logger logger; + + /** + * A test hook that is called before a message is written. Note that the + * hook is inherited by the ReplicaFeederHandshake, and will be kept in + * place for the entire handshake. + */ + private final TestHook writeMessageHook; + + /* + * Used during testing: A non-zero value overrides the actual log + * version. + */ + private static int testCurrentLogVersion = 0; + + /** + * An instance of this class is created with each new handshake preceding + * the setting up of a connection. + * + * @param repNode the replication node + * @param feeder the feeder instance + * @param namedChannel the channel to be used for the handshake + */ + public FeederReplicaHandshake(RepNode repNode, + Feeder feeder, + NamedChannel namedChannel) { + this.repNode = repNode; + this.namedChannel = namedChannel; + feederNameIdPair = repNode.getNameIdPair(); + logger = LoggerUtils.getLogger(getClass()); + writeMessageHook = feeder.getWriteMessageHook(); + } + + /** + * Returns the replica node ID. The returned value is only valid after + * the handshake has been executed. + * + * @return the replica node name id pair + */ + public NameIdPair getReplicaNameIdPair() { + return replicaNameIdPair; + } + + /** + * Returns the current log version for the feeder, which is the highest log + * version of any replicable log entry supplied by this feeder. Uses + * LogEntryType.LOG_VERSION_HIGHEST_REPLICABLE, not LOG_VERSION, since some + * log versions may have only applied to non-replicable log entries, as was + * the case for log version 10. + */ + private int getCurrentLogVersion() { + return (testCurrentLogVersion != 0) ? + testCurrentLogVersion : + LogEntryType.LOG_VERSION_HIGHEST_REPLICABLE; + } + + /** + * Return the negotiated log version that will be used for the HA stream + * between the feeder and the replica. + */ + public int getStreamLogVersion() { + if (streamLogVersion <= 0) { + throw new IllegalStateException("execute() method was not invoked"); + } + + return streamLogVersion; + } + + public static void setTestLogVersion(int testLogVersion) { + testCurrentLogVersion = testLogVersion; + } + + /** Get the current JE version, supporting a test override. */ + private JEVersion getCurrentJEVersion() { + return repNode.getRepImpl().getCurrentJEVersion(); + } + + /** Get the current protocol version, supporting a test override. */ + private int getCurrentProtocolVersion() { + return Protocol.getJEVersionProtocolVersion(getCurrentJEVersion()); + } + + /** + * Determines log compatibility. Returns null if they are compatible or the + * server would like to defer the rejection to the replica. Return a + * JEVersionsReject message if the server does not wish to communicate with + * the replica. + * + * This check requires the log version of the replicas to be greater than + * or equal to {@value LogEntryType#LOG_VERSION_REPLICATE_OLDER} . Allowing + * the replica version to be behind the feeder version supports replication + * during upgrades where the feeder is upgraded before the replica. + * [#22336] + * + * This check also requires that the JE version of the replica is at least + * the minJEVersion specified by the RepGroupImpl, if any. This check + * makes sure that nodes running an older software version cannot join the + * group after a new and incompatible feature has been used. + */ + private JEVersionsReject checkJECompatibility(final Protocol protocol, + final JEVersions jeVersions) { + + final int replicaLogVersion = jeVersions.getLogVersion(); + if (replicaLogVersion < LogEntryType.LOG_VERSION_REPLICATE_OLDER - 1) { + return protocol.new JEVersionsReject( + "Incompatible log versions. " + + "Feeder log version: " + getCurrentLogVersion() + + ", Feeder JE version: " + getCurrentJEVersion() + + ", Replica log version: " + replicaLogVersion + + ", Replica JE version: " + jeVersions.getVersion()); + } + + final JEVersion minJEVersion = repNode.getGroup().getMinJEVersion(); + if (minJEVersion.compareTo(jeVersions.getVersion()) > 0) { + return protocol.new JEVersionsReject( + "Unsupported JE version. " + + "Feeder JE version: " + getCurrentJEVersion() + + ", Feeder min JE version: " + minJEVersion + + ", Replica JE version: " + jeVersions.getVersion()); + } + + return null; + } + + /** + * Returns the JE version supported by the replica, or {@code null} if the + * value is not yet known. This method should only be called after the + * {@link #execute} method has returned successfully. + * + * @return the replica's JE version or {@code null} + */ + public JEVersion getReplicaJEVersion() { + return (replicaJEVersions != null) ? + replicaJEVersions.getVersion() : + null; + } + + /** + * Returns a RepNodeImpl that represents the replica for a successful + * handshake. This method should only be called after the {@link #execute} + * method has returned successfully, and will throw IllegalStateException + * otherwise. + * + * @return the replica node + * @throws IllegalStateException if the handshake did not complete + */ + public RepNodeImpl getReplicaNode() { + if (replicaNode == null) { + throw new IllegalStateException("Handshake did not complete"); + } + return replicaNode; + } + + /** + * Executes the feeder side of the handshake. + * @throws ProtocolException + * @throws ExitException + */ + public Protocol execute() + throws DatabaseException, + IOException, + ProtocolException, + ExitException { + + LoggerUtils.info(logger, repNode.getRepImpl(), + "Feeder-replica handshake start"); + + /* First negotiate a compatible protocol */ + Protocol protocol = negotiateProtocol(); + + /* Now exchange JE version information using the negotiated protocol */ + replicaJEVersions = (ReplicaJEVersions) protocol.read(namedChannel); + final String versionMsg = + " Replica " + replicaNameIdPair.getName() + + " Versions" + + " JE: " + replicaJEVersions.getVersion().getVersionString() + + " Log: " + replicaJEVersions.getLogVersion() + + " Protocol: " + protocol.getVersion(); + LoggerUtils.fine(logger, repNode.getRepImpl(), versionMsg); + JEVersionsReject reject = + checkJECompatibility(protocol, replicaJEVersions); + + if (reject != null) { + final String msg = "Version incompatibility: " + + reject.getErrorMessage() + + " with replica " + replicaNameIdPair.getName(); + LoggerUtils.warning(logger, repNode.getRepImpl(), msg); + writeMessage(protocol, reject); + throw new ExitException(msg); + } + + /* + * Use the minimum common log version. This ensures that the feeder + * can generate it and that the replica can understand it. + */ + streamLogVersion = + Math.min(getCurrentLogVersion(), replicaJEVersions.getLogVersion()); + + writeMessage(protocol, + protocol.new FeederJEVersions( + getCurrentJEVersion(), + streamLogVersion, + repNode.getMinJEVersion())); + + /* Ensure that the feeder sends the agreed upon version. */ + protocol.setStreamLogVersion(streamLogVersion); + + /* Verify replica membership info */ + verifyMembershipInfo(protocol); + + checkClockSkew(protocol); + LoggerUtils.info + (logger, repNode.getRepImpl(), + "Feeder-replica " + replicaNameIdPair.getName() + + " handshake completed." + + versionMsg + + " Stream Log: " + protocol.getStreamLogVersion()); + + return protocol; + } + + /** Write a protocol message to the channel. */ + private void writeMessage(final Protocol protocol, + final Message message) + throws IOException { + + assert TestHookExecute.doHookIfSet(writeMessageHook, message); + protocol.write(message, namedChannel); + } + + /** + * Responds to message exchanges used to establish clock skew. + * @throws ProtocolException + */ + private void checkClockSkew(Protocol protocol) + throws IOException, + ProtocolException { + SNTPRequest request; + do { + request = protocol.read(namedChannel.getChannel(), + SNTPRequest.class); + writeMessage(protocol, protocol.new SNTPResponse(request)); + } while (!request.isLast()); + } + + /** + * Verifies that the group as configured here at the Feeder matches the + * configuration of the replica. + * + * @param protocol the protocol to use for this verification + * + * @throws IOException + * @throws DatabaseException + */ + private void verifyMembershipInfo(Protocol protocol) + throws IOException, + DatabaseException, + ExitException { + + NodeGroupInfo nodeGroup = + (Protocol.NodeGroupInfo)(protocol.read(namedChannel)); + final RepGroupImpl group = repNode.getGroup(); + RepNodeImpl node = group.getNode(nodeGroup.getNodeName()); + + try { + + if (nodeGroup.getNodeId() != replicaNameIdPair.getId()) { + throw new ExitException + ("The replica node ID sent during protocol negotiation: " + + replicaNameIdPair + + " differs from the one sent in the MembershipInfo " + + "request: " + nodeGroup.getNodeId()); + } + + if (nodeGroup.getNodeType().hasTransientId()) { + + /* + * Note the secondary or external node if this is a new node. + * Otherwise, fall through, and the subsequent code will + * notice the incompatible node type. + */ + if (node == null) { + + /* A new node with transient id */ + node = new RepNodeImpl(nodeGroup); + try { + repNode.addTransientIdNode(node); + } catch (IllegalStateException | NodeConflictException e) { + throw new ExitException(e, true); + } + } + } else if (node == null || !node.isQuorumAck()) { + /* Not currently a confirmed member. */ + if (nodeGroup.getNodeType().isArbiter()) { + Set arbMembers = group.getArbiterMembers(); + if (arbMembers.size() > 0) { + throw new ExitException( + "An Arbiter node already exists in the "+ + "replication group."); + } + } + try { + + /* + * It is possible that the thread that added the + * node to the group database has not added the + * member to the in memory representation. We retry + * several times to give the other thread a chance to + * update the in memory structure. + */ + repNode.getRepGroupDB().ensureMember(nodeGroup); + for (int i=0; i < GROUP_RETRY; i++) { + node = + repNode.getGroup().getMember( + nodeGroup.getNodeName()); + if (node != null) { + break; + } + try { + Thread.sleep(GROUP_RETRY_SLEEP_MS); + } catch (Exception e) { + + } + } + if (node == null) { + throw EnvironmentFailureException.unexpectedState + ("Node: " + nodeGroup.getNameIdPair() + + " not found"); + } + } catch (InsufficientReplicasException | + InsufficientAcksException | + LockTimeoutException e) { + throw new ExitException(e, false); + } catch (NodeConflictException e) { + throw new ExitException(e, true); + } + } else if (node.isRemoved()) { + throw new ExitException + ("Node: " + nodeGroup.getNameIdPair() + + " is no longer a member of the group." + + " It was explicitly removed."); + } + + doGroupChecks(nodeGroup, group); + doNodeChecks(nodeGroup, node); + maybeUpdateJEVersion(nodeGroup, group, node); + } catch (ExitException exception) { + LoggerUtils.info + (logger, repNode.getRepImpl(), exception.getMessage()); + if (exception.failReplica()) { + /* + * Explicit message to force replica to invalidate the + * environment. + */ + writeMessage(protocol, + protocol.new NodeGroupInfoReject( + exception.getMessage())); + } + throw exception; + } + + /* Id is now established for sure, update the pair. */ + replicaNameIdPair.update(node.getNameIdPair()); + namedChannel.setNameIdPair(replicaNameIdPair); + LoggerUtils.fine(logger, repNode.getRepImpl(), + "Channel Mapping: " + replicaNameIdPair + " is at " + + namedChannel.getChannel()); + writeMessage(protocol, + protocol.new NodeGroupInfoOK( + group.getUUID(), replicaNameIdPair)); + } + + /** + * Verifies that the group related information is consistent. + * + * @throws ExitException if the configuration in the group db differs + * from the supplied config + */ + private void doGroupChecks(NodeGroupInfo nodeGroup, + final RepGroupImpl group) + throws ExitException { + + if (nodeGroup.isDesignatedPrimary() && + repNode.getRepImpl().isDesignatedPrimary()) { + throw new ExitException + ("Conflicting Primary designations. Feeder node: " + + repNode.getNodeName() + + " and replica node: " + nodeGroup.getNodeName() + + " cannot simultaneously be designated primaries"); + } + + if (!nodeGroup.getGroupName().equals(group.getName())) { + throw new ExitException + ("The feeder belongs to the group: " + + group.getName() + " but replica id" + replicaNameIdPair + + " belongs to the group: " + nodeGroup.getGroupName()); + } + + if (!RepGroupImpl.isUnknownUUID(nodeGroup.getUUID()) && + !nodeGroup.getUUID().equals(group.getUUID())) { + throw new ExitException + ("The environments have the same name: " + + group.getName() + + " but represent different environment instances." + + " The environment at the master has UUID " + + group.getUUID() + + ", while the replica " + replicaNameIdPair.getName() + + " has UUID: " + nodeGroup.getUUID()); + } + } + + /** + * Verifies that the old and new node configurations are the same. + * + * @throws ExitException if the configuration in the group db differs + * from the supplied config + */ + private void doNodeChecks(NodeGroupInfo nodeGroup, + RepNodeImpl node) + throws ExitException { + + if (!nodeGroup.getHostName().equals(node.getHostName())) { + throw new ExitException + ("Conflicting hostnames for replica id: " + + replicaNameIdPair + + " Feeder thinks it is: " + node.getHostName() + + " Replica is configured to use: " + + nodeGroup.getHostName()); + } + + if (nodeGroup.port() != node.getPort()) { + throw new ExitException + ("Conflicting ports for replica id: " + replicaNameIdPair + + " Feeder thinks it uses: " + node.getPort() + + " Replica is configured to use: " + nodeGroup.port()); + } + + if (!((NodeType.ELECTABLE == node.getType()) || + (NodeType.SECONDARY == node.getType()) || + (NodeType.EXTERNAL == node.getType()) || + (NodeType.ARBITER == node.getType()) || + (NodeType.MONITOR == node.getType()))) { + throw new ExitException + ("The replica node: " + replicaNameIdPair + + " is of type: " + node.getType()); + } + + if (!nodeGroup.getNodeType().equals(node.getType())) { + throw new ExitException + ("Conflicting node types for: " + replicaNameIdPair + + " Feeder thinks it uses: " + node.getType() + + " Replica is configured as type: " + nodeGroup.getNodeType()); + } + replicaNode = node; + } + + /** + * Update the node's JE version from the provided group info if storing the + * JE version is supported and the current version differs from the stored + * one. It's OK if the attempt does not fully succeed due to a lack of + * replicas or acknowledgments: we can try again the next time. + * + * @throws ExitException if the attempt fails because the updated node's + * socket address conflicts with another node + */ + private void maybeUpdateJEVersion(final NodeGroupInfo nodeGroup, + final RepGroupImpl group, + final RepNodeImpl node) + throws ExitException { + + if ((group.getFormatVersion() >= RepGroupImpl.FORMAT_VERSION_3) && + (nodeGroup.getJEVersion() != null) && + !nodeGroup.getJEVersion().equals(node.getJEVersion())) { + + /* + * Try updating the JE version information, given that the group + * format supports this. Don't require a quorum of acknowledgments + * since the fact that the handshake for this replica is underway + * may mean that a quorum is not available. Saving the JE version + * is only an optimization, so it is OK if this attempt fails to be + * persistent. + */ + try { + repNode.getRepGroupDB().updateMember( + new RepNodeImpl(nodeGroup), false); + } catch (InsufficientReplicasException | + InsufficientAcksException | + LockTimeoutException e) { + /* Ignored */ + } catch (NodeConflictException e) { + throw new ExitException(e, true); + } + } + } + + /** + * Negotiates and returns the protocol that will be used in all subsequent + * interactions with the Replica, if the replica accepts to it. + * + * @return the protocol instance to be used for subsequent interactions + * + * @throws IOException + * @throws ExitException + */ + private Protocol negotiateProtocol() + throws IOException, ExitException { + + Protocol defaultProtocol = + Protocol.getProtocol(repNode, getCurrentProtocolVersion()); + + /* + * Wait to receive the replica's version, decide which protocol version + * to use ourselves, and then reply with our version. + */ + ReplicaProtocolVersion message = + (ReplicaProtocolVersion) defaultProtocol.read(namedChannel); + + replicaNameIdPair = message.getNameIdPair(); + + Feeder dup = + repNode.feederManager().getFeeder(replicaNameIdPair.getName()); + if ((dup != null) || + (message.getNameIdPair().getName(). + equals(feederNameIdPair.getName()))) { + /* Reject the connection. */ + writeMessage(defaultProtocol, + defaultProtocol.new DuplicateNodeReject( + "This node: " + replicaNameIdPair + + " is already in active use at the feeder ")); + SocketAddress dupAddress = + namedChannel.getChannel().getSocketChannel().socket(). + getRemoteSocketAddress(); + + throw new ExitException + ("A replica with the id: " + replicaNameIdPair + + " is already active with this feeder. " + + " The duplicate replica resides at: " + + dupAddress); + } + + /* + * If the Replica's version is acceptable, use it, otherwise return the + * default protocol at this node, in case the Replica can support it. + */ + final int replicaVersion = message.getVersion(); + Protocol protocol = + Protocol.get(repNode, replicaVersion, getCurrentProtocolVersion()); + if (protocol == null) { + protocol = defaultProtocol; + } + defaultProtocol.write + (defaultProtocol.new FeederProtocolVersion(protocol.getVersion()), + namedChannel); + return protocol; + } +} diff --git a/src/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java b/src/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java new file mode 100644 index 0000000..1a7b68b --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java @@ -0,0 +1,418 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileSet; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.rep.ReplicationSecurityException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequest; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.stream.BaseProtocol.RestoreRequest; +import com.sleepycat.je.rep.stream.BaseProtocol.StartStream; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * Establish where the replication stream should start for a feeder and replica + * pair. The Feeder's job is to send the replica the parts of the replication + * stream it needs, so that the two can determine a common matchpoint. + * + * If a successful matchpoint is found the feeder learns where to start the + * replication stream for this replica. + */ +public class FeederReplicaSyncup { + + /* A test hook that is called after a syncup has started. */ + private static volatile TestHook afterSyncupStartedHook; + + /* A test hook that is called after a syncup has ended. */ + private static volatile TestHook afterSyncupEndedHook; + + private final Feeder feeder; + private final RepNode repNode; + private final NamedChannel namedChannel; + private final Protocol protocol; + private final VLSNIndex vlsnIndex; + private final Logger logger; + private FeederSyncupReader backwardsReader; + + public FeederReplicaSyncup(Feeder feeder, + NamedChannel namedChannel, + Protocol protocol) { + this.feeder = feeder; + this.repNode = feeder.getRepNode(); + logger = LoggerUtils.getLogger(getClass()); + this.namedChannel = namedChannel; + this.protocol = protocol; + this.vlsnIndex = repNode.getVLSNIndex(); + } + + /** + * The feeder's side of the protocol. Find out where to start the + * replication stream. + * + * @throws NetworkRestoreException if sync up failed and network store is + * required + * @throws ChecksumException if checksum validation failed + */ + public void execute() + throws DatabaseException, IOException, InterruptedException, + NetworkRestoreException, ChecksumException { + + final long startTime = System.currentTimeMillis(); + RepImpl repImpl = repNode.getRepImpl(); + LoggerUtils.info(logger, repImpl, + "Feeder-replica " + + feeder.getReplicaNameIdPair().getName() + + " syncup started. Feeder range: " + + repNode.getVLSNIndex().getRange()); + + /* + * Prevent the VLSNIndex range from being changed and protect all files + * in the range. To search the index and read files within this range + * safely, VLSNIndex.getRange must be called after syncupStarted. + */ + final ProtectedFileSet protectedFileSet = + repNode.syncupStarted(feeder.getReplicaNameIdPair()); + + try { + assert TestHookExecute.doHookIfSet(afterSyncupStartedHook, feeder); + + /* + * Wait for the replica to start the syncup message exchange. The + * first message will always be an EntryRequest. This relies on the + * fact that a brand new group always begins with a master that has + * a few vlsns from creating the nameDb that exist before a replica + * syncup. The replica will never issue a StartStream before doing + * an EntryRequest. + * + * The first entry request has three possible types of message + * responses - EntryNotFound, AlternateMatchpoint, or Entry. + */ + VLSNRange range = vlsnIndex.getRange(); + EntryRequest firstRequest = + (EntryRequest) protocol.read(namedChannel); + Message response = makeResponseToEntryRequest(range, + firstRequest, + true); + + protocol.write(response, namedChannel); + + /* + * Now the replica may send one of three messages: + * - a StartStream message indicating that the replica wants to + * start normal operations + * - a EntryRequest message if it's still hunting for a + * matchpoint. There's the possibility that the new EntryRequest + * asks for a VLSN that has been log cleaned, so check that we can + * supply it. + * - a RestoreRequest message that indicates that the replica + * has given up, and will want a network restore. + */ + + VLSN startVLSN; + while (true) { + Message message = protocol.read(namedChannel); + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, repImpl, + "Replica " + + feeder.getReplicaNameIdPair() + + " message op: " + message.getOp()); + } + if (message instanceof StartStream) { + final StartStream startMessage = (StartStream) message; + startVLSN = startMessage.getVLSN(); + + /* set feeder filter */ + final FeederFilter filter = startMessage.getFeederFilter(); + feeder.setFeederFilter(filter); + + /* + * skip security check if not needed, e.g., a replica in + * a secure store + */ + if (!feeder.needSecurityChecks()) { + break; + } + + final StreamAuthenticator auth = feeder.getAuthenticator(); + /* if security check is needed, auth cannot be null */ + assert (auth != null); + /* remember table id strings of subscribed tables */ + if (filter != null) { + auth.setTableIds(filter.getTableIds()); + } else { + /* if no filter, subscribe all tables */ + auth.setTableIds(null); + } + /* security check */ + if (!auth.checkAccess()) { + final String err = "Replica " + + feeder.getReplicaNameIdPair() + .getName() + + " fails security check."; + LoggerUtils.warning(logger, repImpl, err); + + throw new ReplicationSecurityException( + err, feeder.getReplicaNameIdPair().getName(), + null); + } + break; + } else if (message instanceof EntryRequest) { + response = makeResponseToEntryRequest + (range, (EntryRequest) message, false); + protocol.write(response, namedChannel); + } else if (message instanceof RestoreRequest) { + throw answerRestore(range, + ((RestoreRequest) message).getVLSN()); + } else { + throw EnvironmentFailureException.unexpectedState + (repImpl, + "Expected StartStream or EntryRequest but got " + + message); + } + } + + LoggerUtils.info(logger, repImpl, + "Feeder-replica " + + feeder.getReplicaNameIdPair().getName() + + " start stream at VLSN: " + startVLSN); + + feeder.initMasterFeederSource(startVLSN); + + } finally { + repNode.syncupEnded(protectedFileSet); + assert TestHookExecute.doHookIfSet(afterSyncupEndedHook, feeder); + LoggerUtils.info + (logger, repImpl, + String.format("Feeder-replica " + + feeder.getReplicaNameIdPair().getName() + + " syncup ended. Elapsed time: %,dms", + (System.currentTimeMillis() - startTime))); + + } + } + + /** For testing. */ + public static void setAfterSyncupStartedHook(TestHook hook) { + afterSyncupStartedHook = hook; + } + + /** For testing. */ + public static void setAfterSyncupEndedHook(TestHook hook) { + afterSyncupEndedHook = hook; + } + + private FeederSyncupReader setupReader(VLSN startVLSN) + throws DatabaseException, IOException { + + EnvironmentImpl envImpl = repNode.getRepImpl(); + int readBufferSize = envImpl.getConfigManager(). + getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + /* + * A BackwardsReader for scanning the log file backwards. + */ + long lastUsedLsn = envImpl.getFileManager().getLastUsedLsn(); + + VLSN firstVLSN = vlsnIndex.getRange().getFirst(); + long firstFile = vlsnIndex.getLTEFileNumber(firstVLSN); + long finishLsn = DbLsn.makeLsn(firstFile, 0); + return new FeederSyncupReader(envImpl, + vlsnIndex, + lastUsedLsn, + readBufferSize, + startVLSN, + finishLsn); + } + + private Message makeResponseToEntryRequest(VLSNRange range, + EntryRequest request, + boolean isFirstResponse) + throws IOException, ChecksumException { + + final VLSN requestMatchpoint = request.getVLSN(); + final EntryRequestType type = request.getType(); + + /* if NOW mode, return high end regardless of requested vlsn */ + if (type.equals(EntryRequestType.NOW)) { + /* + * VLSN range is not empty even without user data, so we can + * always get a valid entry. + */ + return protocol.new Entry(getMatchPtRecord(range.getLast())); + } + + /* stream modes other than NOW */ + + /* + * The matchpoint must be in the VLSN range, or more specifically, in + * the VLSN index so we can map the VLSN to the lsn in order to fetch + * the associated log record. + */ + if (range.getFirst().compareTo(requestMatchpoint) > 0) { + /* request point is smaller than lower bound of range */ + if (type.equals(BaseProtocol.EntryRequestType.AVAILABLE)) { + return protocol.new Entry(getMatchPtRecord(range.getFirst())); + } + + /* default mode */ + return protocol.new EntryNotFound(); + } + + if (range.getLast().compareTo(requestMatchpoint) < 0) { + /* request point is higher than upper bound of range */ + if (type.equals(EntryRequestType.AVAILABLE)) { + return protocol.new Entry(getMatchPtRecord(range.getLast())); + } + + /* + * default mode: + * + * The matchpoint is after the last one in the range. We have to + * suggest the lastSync entry on this node as an alternative. This + * should only happen on the feeder's first response. For example, + * suppose the feeder's range is vlsns 1-100. It's possible that + * the exchange is as follows: + * 1 - replica has 1-110, asks feeder for 110 + * 2 - feeder doesn't have 110, counters with 100 + * 3 - from this point on, the replica should only ask for vlsns + * that are <= the feeder's counter offer of 100 + * Guard that this holds true, because the feeder's log reader is + * only set to search backwards; it does not expect to toggle + * between forward and backwards. + */ + assert backwardsReader == null : + "Replica request for vlsn > feeder range should only happen " + + "on the first exchange."; + + if (range.getLastSync().equals(VLSN.NULL_VLSN)) { + /* + * We have no syncable entry at all. The replica will have to + * do a network restore. + */ + return protocol.new EntryNotFound(); + } + + if (isFirstResponse) { + final OutputWireRecord lastSync = + getMatchPtRecord(range.getLastSync()); + assert lastSync != null : + "Look for alternative, range=" + range; + return protocol.new AlternateMatchpoint(lastSync); + } + + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), "RequestMatchpoint=" + + requestMatchpoint + " range=" + range + + "should only happen on first response"); + } + + /* The matchpoint is within the range. Find it. */ + final OutputWireRecord matchRecord = + getMatchPtRecord(requestMatchpoint); + if (matchRecord == null) { + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), + "Couldn't find matchpoint " + requestMatchpoint + + " in log. VLSN range=" + range); + } + + return protocol.new Entry(matchRecord); + } + + /* scan log backwards to find match point record */ + private OutputWireRecord getMatchPtRecord(VLSN matchPointVLSN) + throws IOException, ChecksumException { + + if (backwardsReader == null) { + backwardsReader = setupReader(matchPointVLSN); + } + return backwardsReader.scanBackwards(matchPointVLSN); + } + + private NetworkRestoreException answerRestore(VLSNRange range, + VLSN failedMatchpoint) + throws IOException { + + /* + * Note that getGlobalCBVLSN returns a null VLSN if the GlobalCBVLSN + * is defunct. In that case the RestoreResponse.cbvlsn field is unused. + */ + Message response = protocol.new + RestoreResponse(repNode.getRestoreResponseVLSN(range), + repNode.getLogProviders()); + protocol.write(response, namedChannel); + + return new NetworkRestoreException(failedMatchpoint, + range.getFirst(), + range.getLast(), + feeder.getReplicaNameIdPair()); + } + + @SuppressWarnings("serial") + static public class NetworkRestoreException extends Exception { + /* The out-of-range vlsn that provoked the exception */ + private final VLSN vlsn; + private final VLSN firstVLSN; + private final VLSN lastVLSN; + + /* The replica that made the request. */ + private final NameIdPair replicaNameIdPair; + + public NetworkRestoreException(VLSN vlsn, + VLSN firstVLSN, + VLSN lastVLSN, + NameIdPair replicaNameIdPair) { + this.vlsn = vlsn; + this.firstVLSN = firstVLSN; + this.lastVLSN = lastVLSN; + this.replicaNameIdPair = replicaNameIdPair; + } + + @Override + public String getMessage() { + return "Matchpoint vlsn " + vlsn + " requested by node: " + + replicaNameIdPair + " was outside the VLSN range: " + + "[" + firstVLSN + "-" + lastVLSN + "]"; + } + + public VLSN getVlsn() { + return vlsn; + } + + public NameIdPair getReplicaNameIdPair() { + return replicaNameIdPair; + } + } +} diff --git a/src/com/sleepycat/je/rep/stream/FeederSource.java b/src/com/sleepycat/je/rep/stream/FeederSource.java new file mode 100644 index 0000000..2040c20 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederSource.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.io.IOException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.VLSN; + +/** + * Provides the next log record, blocking if one is not available. It + * encapsulates the source of the Log records, which can be a real Master or a + * Replica in a Replica chain that is replaying log records it received from + * some other source. + */ +public interface FeederSource { + + public void shutdown(EnvironmentImpl envImpl); + + /** + * Must be called to allow deletion of files protected by this feeder. + */ + public OutputWireRecord getWireRecord(VLSN vlsn, int waitTime) + throws DatabaseException, InterruptedException, IOException; + + public String dumpState(); +} diff --git a/src/com/sleepycat/je/rep/stream/FeederSyncupReader.java b/src/com/sleepycat/je/rep/stream/FeederSyncupReader.java new file mode 100644 index 0000000..e65bd3f --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederSyncupReader.java @@ -0,0 +1,188 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.utilint.DbLsn.NULL_LSN; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNIndex.BackwardVLSNScanner; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.VLSN; + +/** + * The FeederSyncupReader scans the log backwards for requested log entries. + * It uses the vlsnIndex to optimize its search, repositioning when a concrete + * vlsn->lsn mapping is available. + * + * The FeederSyncupReader is not thread safe, and can only be used serially. It + * will stop at the finishLsn, which should be set using the GlobalCBVLSN. + */ +public class FeederSyncupReader extends VLSNReader { + /* The scanner is a cursor over the VLSNIndex. */ + private final BackwardVLSNScanner scanner; + + public FeederSyncupReader(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + long endOfLogLsn, + int readBufferSize, + VLSN startVLSN, + long finishLsn) + throws IOException, DatabaseException { + + /* + * If we go backwards, endOfFileLsn and startLsn must not be null. + * Make them the same, so we always start at the same very end. + */ + super(envImpl, + vlsnIndex, + false, // forward + endOfLogLsn, + readBufferSize, + finishLsn); + scanner = new BackwardVLSNScanner(vlsnIndex); + initScan(startVLSN); + } + + /** + * Set up the FeederSyncupReader to start scanning from this VLSN. If we + * find a mapping for this VLSN, we'll start precisely at its LSN, else + * we'll have to start from an earlier location. + * + * @throws InterruptedException + * @throws IOException + * @throws DatabaseException + */ + private void initScan(VLSN startVLSN) + throws DatabaseException, IOException { + + if (startVLSN.equals(VLSN.NULL_VLSN)) { + throw EnvironmentFailureException.unexpectedState + ("FeederSyncupReader start can't be NULL_VLSN"); + } + + VLSN startPoint = startVLSN; + startLsn = scanner.getStartingLsn(startPoint); + assert startLsn != NULL_LSN; + + /* + * Flush the log so that syncup can assume that all log entries that + * are represented in the VLSNIndex are safely out of the log buffers + * and on disk. Simplifies this reader, so it can use the regular + * ReadWindow, which only works on a file. + */ + envImpl.getLogManager().flushNoSync(); + + window.initAtFileStart(startLsn); + currentEntryPrevOffset = window.getEndOffset(); + currentEntryOffset = window.getEndOffset(); + currentVLSN = startVLSN; + } + + /** + * Backward scanning for records for the feeder's part in syncup. + * @throws ChecksumException + * @throws FileNotFoundException + */ + public OutputWireRecord scanBackwards(VLSN vlsn) + throws FileNotFoundException, ChecksumException { + + VLSNRange range = vlsnIndex.getRange(); + if (vlsn.compareTo(range.getFirst()) < 0) { + /* + * The requested VLSN is before the start of our range, we don't + * have this record. + */ + return null; + } + + currentVLSN = vlsn; + + /* + * If repositionLsn is not NULL_LSN, the reader will seek to that + * position when calling readNextEntry instead of scanning. + * setPosition() is a noop if repositionLsn is null. + */ + long repositionLsn = scanner.getPreciseLsn(vlsn); + setPosition(repositionLsn); + + if (readNextEntry()) { + return currentFeedRecord; + } + + return null; + } + + /** + * @throw an EnvironmentFailureException if we were scanning for a + * particular VLSN and we have passed it by. + */ + private void checkForPassingTarget(int compareResult) { + + if (compareResult < 0) { + /* Hey, we passed the VLSN we wanted. */ + throw EnvironmentFailureException.unexpectedState + ("want to read " + currentVLSN + " but reader at " + + currentEntryHeader.getVLSN()); + } + } + + @Override + protected boolean isTargetEntry() + throws DatabaseException { + + nScanned++; + + /* Skip invisible entries. */ + if (currentEntryHeader.isInvisible()) { + return false; + } + + /* + * Return true if this entry is replicated and its VLSN is currentVLSN. + */ + if (entryIsReplicated()) { + VLSN entryVLSN = currentEntryHeader.getVLSN(); + int compareResult = entryVLSN.compareTo(currentVLSN); + checkForPassingTarget(compareResult); + + /* return true if this is the entry we want. */ + return (compareResult == 0); + } + + return false; + } + + /** + * Instantiate a WireRecord to house this log entry. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + + ByteBuffer buffer = entryBuffer.slice(); + buffer.limit(currentEntryHeader.getItemSize()); + currentFeedRecord = + new OutputWireRecord(envImpl, currentEntryHeader, buffer); + + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } +} diff --git a/src/com/sleepycat/je/rep/stream/FeederTxnStatDefinition.java b/src/com/sleepycat/je/rep/stream/FeederTxnStatDefinition.java new file mode 100644 index 0000000..44c25d1 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederTxnStatDefinition.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for HA Feeder Transaction statistics. + */ +public class FeederTxnStatDefinition { + + public static final String GROUP_NAME = "FeederTxns"; + public static final String GROUP_DESC = "FeederTxns statistics"; + + public static final String TXNS_ACKED_NAME = + "txnsAcked"; + public static final String TXNS_ACKED_DESC = + "Number of Transaction ack'd."; + public static final StatDefinition TXNS_ACKED = + new StatDefinition( + TXNS_ACKED_NAME, + TXNS_ACKED_DESC); + + public static final String TXNS_NOT_ACKED_NAME = + "txnsNotAcked"; + public static final String TXNS_NOT_ACKED_DESC = + "Number of Transactions not Ack'd."; + public static final StatDefinition TXNS_NOT_ACKED = + new StatDefinition( + TXNS_NOT_ACKED_NAME, + TXNS_NOT_ACKED_DESC); + + public static final String TOTAL_TXN_MS_NAME = + "totalTxnMS"; + public static final String TOTAL_TXN_MS_DESC = + "The total elapsed MS across all txns from transaction start to end."; + public static final StatDefinition TOTAL_TXN_MS = + new StatDefinition( + TOTAL_TXN_MS_NAME, + TOTAL_TXN_MS_DESC); + + public static final String ACK_WAIT_MS_NAME = + "ackWaitMS"; + public static final String ACK_WAIT_MS_DESC = + "Total MS waited for acks."; + public static final StatDefinition ACK_WAIT_MS = + new StatDefinition( + ACK_WAIT_MS_NAME, + ACK_WAIT_MS_DESC); + + public static final String LAST_COMMIT_VLSN_NAME = + "lastCommitVLSN"; + public static final String LAST_COMMIT_VLSN_DESC = + "The VLSN of the last committed transaction on the master, or 0 if " + + "not known or this node is not the master."; + public static final StatDefinition LAST_COMMIT_VLSN = + new StatDefinition( + LAST_COMMIT_VLSN_NAME, + LAST_COMMIT_VLSN_DESC, + StatType.CUMULATIVE); + + public static final String LAST_COMMIT_TIMESTAMP_NAME = + "lastCommitTimestamp"; + public static final String LAST_COMMIT_TIMESTAMP_DESC = + "The commit timestamp of the last committed transaction on the " + + "master, or 0 if not known or this node is not the master."; + public static final StatDefinition LAST_COMMIT_TIMESTAMP = + new StatDefinition( + LAST_COMMIT_TIMESTAMP_NAME, + LAST_COMMIT_TIMESTAMP_DESC, + StatType.CUMULATIVE); + + public static final String VLSN_RATE_NAME = + "vlsnRate"; + public static final String VLSN_RATE_DESC = + "A moving average of the rate replication data is being generated by " + + "the master, in VLSNs per minute, or 0 if not known or this node " + + "is not the master."; + public static final StatDefinition VLSN_RATE = + new StatDefinition( + VLSN_RATE_NAME, + VLSN_RATE_DESC); +} diff --git a/src/com/sleepycat/je/rep/stream/FeederTxns.java b/src/com/sleepycat/je/rep/stream/FeederTxns.java new file mode 100644 index 0000000..bb5ba15 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/FeederTxns.java @@ -0,0 +1,273 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.ACK_WAIT_MS; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.LAST_COMMIT_TIMESTAMP; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.LAST_COMMIT_VLSN; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TOTAL_TXN_MS; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TXNS_ACKED; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.TXNS_NOT_ACKED; +import static com.sleepycat.je.rep.stream.FeederTxnStatDefinition.VLSN_RATE; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.DurabilityQuorum; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.AtomicLongStat; +import com.sleepycat.je.utilint.LongAvgRateStat; +import com.sleepycat.je.utilint.NoClearAtomicLongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * FeederTxns manages transactions that need acknowledgments. + * + *

        The lastCommitVLSN, lastCommitTimestamp, and vlsnRate statistics provide + * general information about committed transactions on the master, but are also + * intended to be used programmatically along with other statistics for the + * feeder to provide information about how up-to-date the replicas are. See + * the Feeder class for more details. + */ +public class FeederTxns { + + /** The moving average period in milliseconds */ + private static final long MOVING_AVG_PERIOD_MILLIS = 10000; + + /* + * Tracks transactions that have not yet been acknowledged for the entire + * replication node. + */ + private final Map txnMap; + + private final RepImpl repImpl; + private final StatGroup statistics; + private final AtomicLongStat txnsAcked; + private final AtomicLongStat txnsNotAcked; + private final AtomicLongStat ackWaitMs; + private final AtomicLongStat totalTxnMs; + private final NoClearAtomicLongStat lastCommitVLSN; + private final NoClearAtomicLongStat lastCommitTimestamp; + private final LongAvgRateStat vlsnRate; + + public FeederTxns(RepImpl repImpl) { + + txnMap = new ConcurrentHashMap(); + this.repImpl = repImpl; + statistics = new StatGroup(FeederTxnStatDefinition.GROUP_NAME, + FeederTxnStatDefinition.GROUP_DESC); + txnsAcked = new AtomicLongStat(statistics, TXNS_ACKED); + txnsNotAcked = new AtomicLongStat(statistics, TXNS_NOT_ACKED); + ackWaitMs = new AtomicLongStat(statistics, ACK_WAIT_MS); + totalTxnMs = new AtomicLongStat(statistics, TOTAL_TXN_MS); + lastCommitVLSN = + new NoClearAtomicLongStat(statistics, LAST_COMMIT_VLSN); + lastCommitTimestamp = + new NoClearAtomicLongStat(statistics, LAST_COMMIT_TIMESTAMP); + vlsnRate = new LongAvgRateStat( + statistics, VLSN_RATE, MOVING_AVG_PERIOD_MILLIS, TimeUnit.MINUTES); + } + + public AtomicLongStat getLastCommitVLSN() { + return lastCommitVLSN; + } + + public AtomicLongStat getLastCommitTimestamp() { + return lastCommitTimestamp; + } + + public LongAvgRateStat getVLSNRate() { + return vlsnRate; + } + + /** + * Create a new TxnInfo so that transaction commit can wait on the latch it + * sets up. + * + * @param txn identifies the transaction. + */ + public void setupForAcks(MasterTxn txn) { + if (txn.getRequiredAckCount() == 0) { + /* No acks called for, no setup needed. */ + return; + } + TxnInfo txnInfo = new TxnInfo(txn); + TxnInfo prevInfo = txnMap.put(txn.getId(), txnInfo); + assert(prevInfo == null); + } + + /** + * Returns the transaction if it's waiting for acknowledgments. Returns + * null otherwise. + */ + public MasterTxn getAckTxn(long txnId) { + TxnInfo txnInfo = txnMap.get(txnId); + return (txnInfo == null) ? null : txnInfo.txn; + } + + /* + * Clears any ack requirements associated with the transaction. It's + * typically invoked on a transaction abort. + */ + public void clearTransactionAcks(Txn txn) { + txnMap.remove(txn.getId()); + } + + /** + * Notes that an acknowledgment was received from a replica. + * + * @param replica the replica node + * @param txnId the locally committed transaction that was acknowledged. + * @param isArbiterFeeder true if feeder is an Arbiter false otherwise. + * + * @return the TxnInfo associated with the txnId, if txnId needs an ack, + * null otherwise + */ + public TxnInfo noteReplicaAck(final RepNodeImpl replica, + final long txnId) { + final DurabilityQuorum durabilityQuorum = + repImpl.getRepNode().getDurabilityQuorum(); + if (!durabilityQuorum.replicaAcksQualify(replica)) { + return null; + } + final TxnInfo txnInfo = txnMap.get(txnId); + if (txnInfo == null) { + return null; + } + txnInfo.countDown(); + return txnInfo; + } + + /** + * Waits for the required number of replica acks to come through. + * + * @param txn identifies the transaction to wait for. + * + * @param timeoutMs the amount of time to wait for the acknowledgments + * before giving up. + * + * @throws InsufficientAcksException if the ack requirements were not met + */ + public void awaitReplicaAcks(MasterTxn txn, int timeoutMs) + throws InterruptedException { + + /* Record master commit information even if no acks are needed */ + final long vlsn = txn.getCommitVLSN().getSequence(); + final long ackAwaitStartMs = System.currentTimeMillis(); + lastCommitVLSN.set(vlsn); + lastCommitTimestamp.set(ackAwaitStartMs); + vlsnRate.add(vlsn, ackAwaitStartMs); + + TxnInfo txnInfo = txnMap.get(txn.getId()); + if (txnInfo == null) { + return; + } + txnInfo.await(timeoutMs, ackAwaitStartMs); + txnMap.remove(txn.getId()); + final RepNode repNode = repImpl.getRepNode(); + if (repNode != null) { + repNode.getDurabilityQuorum().ensureSufficientAcks( + txnInfo, timeoutMs); + } + } + + /** + * Used to track the latch and the transaction information associated with + * a transaction needing an acknowledgment. + */ + public class TxnInfo { + /* The latch used to track transaction acknowledgments. */ + final private CountDownLatch latch; + final MasterTxn txn; + + private TxnInfo(MasterTxn txn) { + assert(txn != null); + final int numRequiredAcks = txn.getRequiredAckCount(); + this.latch = (numRequiredAcks == 0) ? + null : + new CountDownLatch(numRequiredAcks); + this.txn = txn; + } + + /** + * Returns the VLSN associated with the committed txn, or null if the + * txn has not yet been committed. + */ + public VLSN getCommitVLSN() { + return txn.getCommitVLSN(); + } + + private final boolean await(int timeoutMs, long ackAwaitStartMs) + throws InterruptedException { + + boolean isZero = (latch == null) || + latch.await(timeoutMs, TimeUnit.MILLISECONDS); + if (isZero) { + txnsAcked.increment(); + final long now = System.currentTimeMillis(); + ackWaitMs.add(now - ackAwaitStartMs); + totalTxnMs.add(now - txn.getStartMs()); + } else { + txnsNotAcked.increment(); + } + return isZero; + } + + public final void countDown() { + if (latch == null) { + return; + } + + latch.countDown(); + } + + public final int getPendingAcks() { + if (latch == null) { + return 0; + } + + return (int) latch.getCount(); + } + + public final MasterTxn getTxn() { + return txn; + } + } + + public StatGroup getStats() { + StatGroup ret = statistics.cloneGroup(false); + + return ret; + } + + public void resetStats() { + statistics.clear(); + } + + public StatGroup getStats(StatsConfig config) { + + StatGroup cloneStats = statistics.cloneGroup(config.getClear()); + + return cloneStats; + } +} diff --git a/src/com/sleepycat/je/rep/stream/InputWireRecord.java b/src/com/sleepycat/je/rep/stream/InputWireRecord.java new file mode 100644 index 0000000..34b774e --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/InputWireRecord.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.VLSN; + +/** + * Format for messages received at across the wire for replication. Instead of + * sending a direct copy of the log entry as it is stored on the JE log files + * (LogEntryHeader + LogEntry), select parts of the header are sent. + * + * An InputWireRecord de-serializes the logEntry from the message bytes and + * releases any claim on the backing ByteBuffer. + */ +public class InputWireRecord extends WireRecord { + + private final LogEntry logEntry; + + /** + * Make a InputWireRecord from an incoming replication message buffer for + * applying at a replica. + * @throws DatabaseException + */ + InputWireRecord(final EnvironmentImpl envImpl, + final ByteBuffer msgBuffer, + final BaseProtocol protocol) + throws DatabaseException { + + super(createLogEntryHeader(msgBuffer, protocol)); + + logEntry = instantiateEntry(envImpl, msgBuffer); + } + + private static LogEntryHeader createLogEntryHeader( + final ByteBuffer msgBuffer, final BaseProtocol protocol) { + + final byte entryType = msgBuffer.get(); + int entryVersion = LogUtils.readInt(msgBuffer); + final int itemSize = LogUtils.readInt(msgBuffer); + final VLSN vlsn = new VLSN(LogUtils.readLong(msgBuffer)); + + /* + * Check to see if we need to fix the entry's log version to work + * around [#25222]. + */ + if ((entryVersion > LogEntryType.LOG_VERSION_EXPIRE_INFO) + && protocol.getFixLogVersion12Entries()) { + entryVersion = LogEntryType.LOG_VERSION_EXPIRE_INFO; + } + + return new LogEntryHeader(entryType, entryVersion, itemSize, vlsn); + } + + /** + * Unit test support. + * @throws DatabaseException + */ + InputWireRecord(final EnvironmentImpl envImpl, + final byte entryType, + final int entryVersion, + final int itemSize, + final VLSN vlsn, + final ByteBuffer entryBuffer) + throws DatabaseException { + + super(new LogEntryHeader(entryType, entryVersion, itemSize, vlsn)); + logEntry = LogEntryType.findType(header.getType()). + getNewLogEntry(); + logEntry.readEntry(envImpl, header, entryBuffer); + + } + + public VLSN getVLSN() { + return header.getVLSN(); + } + + public byte getEntryType() { + return header.getType(); + } + + public LogEntry getLogEntry() { + return logEntry; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + header.dumpRep(sb); + sb.append(" "); + logEntry.dumpRep(sb); + return sb.toString(); + } + + /** + * Convert the full version of the log entry to a string. + */ + public String dumpLogEntry() { + StringBuilder sb = new StringBuilder(); + sb.append(header); + sb.append(" ").append(logEntry); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/stream/MasterChangeListener.java b/src/com/sleepycat/je/rep/stream/MasterChangeListener.java new file mode 100644 index 0000000..0302f10 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/MasterChangeListener.java @@ -0,0 +1,78 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.net.InetSocketAddress; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * The Listener registered with Elections to learn about new Masters + */ +public class MasterChangeListener implements Learner.Listener { + + /* The Value that is "current" for this Node. */ + private Value currentValue = null; + + private final RepNode repNode; + private final Logger logger; + + public MasterChangeListener(RepNode repNode) { + this.repNode = repNode; + logger = LoggerUtils.getLogger(getClass()); + } + + /** + * Implements the Listener protocol. The method should not have any + * operations that might wait, since notifications are single threaded. + */ + @Override + public void notify(Proposal proposal, Value value) { + + try { + repNode.getVLSNFreezeLatch().vlsnEvent(proposal); + /* We have a new proposal, is it truly different? */ + if (value.equals(currentValue)) { + LoggerUtils.fine(logger, repNode.getRepImpl(), + "Master change listener -- no value change." + + "Proposal: " + proposal + " Value: " + value); + return; + } + + MasterValue masterValue = ((MasterValue) value); + + LoggerUtils.fine(logger, repNode.getRepImpl(), + "Master change listener notified. Proposal:" + + proposal + " Value: " + value); + LoggerUtils.info(logger, repNode.getRepImpl(), + "Master changed to " + + masterValue.getNameId().getName()); + + repNode.getMasterStatus().setGroupMaster + (masterValue.getHostName(), + masterValue.getPort(), + masterValue.getNameId()); + + /* Propagate the information to any monitors. */ + repNode.getElections().asyncInformMonitors(proposal, value); + } finally { + currentValue = value; + } + } +} diff --git a/src/com/sleepycat/je/rep/stream/MasterFeederSource.java b/src/com/sleepycat/je/rep/stream/MasterFeederSource.java new file mode 100644 index 0000000..767b7cf --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/MasterFeederSource.java @@ -0,0 +1,116 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.io.IOException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * Implementation of a master node acting as a FeederSource. The + * MasterFeederSource is stateful, because it keeps its own FeederReader which + * acts as a cursor or scanner across the log files, so it can only be used by + * a single Feeder. + */ +public class MasterFeederSource implements FeederSource { + + private final FeederReader feederReader; + + /* Protects files being read from being deleted. See FileProtector. */ + private final FileProtector.ProtectedFileRange protectedFileRange; + + public MasterFeederSource(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + NameIdPair replicaNameIdPair, + VLSN startVLSN) + throws DatabaseException, IOException { + + int readBufferSize = + envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + feederReader = new FeederReader(envImpl, + vlsnIndex, + DbLsn.NULL_LSN, // startLsn + readBufferSize); + + long startLsn = feederReader.initScan(startVLSN); + + /* + * Syncup currently protects all files in the entire VLSNIndex range. + * This allows us to safely protect files from the matchpoint onward. + */ + protectedFileRange = envImpl.getFileProtector().protectFileRange( + FileProtector.FEEDER_NAME + "-" + replicaNameIdPair, + DbLsn.getFileNumber(startLsn), + true /*protectVlsnIndex*/); + } + + /** + * Must be called to allow deletion of files protected by this feeder. + */ + @Override + public void shutdown(EnvironmentImpl envImpl) { + envImpl.getFileProtector().removeFileProtection(protectedFileRange); + } + + /* + * @see com.sleepycat.je.rep.stream.FeederSource#getLogRecord + * (com.sleepycat.je.utilint.VLSN, int) + */ + @Override + public OutputWireRecord getWireRecord(VLSN vlsn, int waitTime) + throws DatabaseException, InterruptedException, IOException { + + try { + OutputWireRecord record = + feederReader.scanForwards(vlsn, waitTime); + + if (record == null) { + return null; + } + + /* + * Advance the protected file range when we advance to a new file, + * to allow deletion of older files. Use getRangeStart (which is + * not synchronized) to check whether the file has advanced, before + * calling advanceRange (which is synchronized). This check must be + * inexpensive and non-blocking. + */ + long lastFile = feederReader.getLastFile(record); + if (lastFile > protectedFileRange.getRangeStart()) { + protectedFileRange.advanceRange(lastFile); + } + + return record; + } catch (DatabaseException e) { + /* Add more information */ + e.addErrorMessage + ("MasterFeederSource fetching vlsn=" + vlsn + + " waitTime=" + waitTime); + throw e; + } + } + + @Override + public String dumpState() { + return feederReader.dumpState(); + } +} diff --git a/src/com/sleepycat/je/rep/stream/MasterStatus.java b/src/com/sleepycat/je/rep/stream/MasterStatus.java new file mode 100644 index 0000000..6ae8b9b --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/MasterStatus.java @@ -0,0 +1,178 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.net.InetSocketAddress; + +import com.sleepycat.je.rep.impl.node.NameIdPair; + +/** + * Class used by a node to track changes in Master Status. It's updated by + * the Listener. It represents the abstract notion that the notion of the + * current Replica Group is definitive and is always in advance of the notion + * of a master at each node. A node is typically playing catch up as it tries + * to bring its view in line with that of the group. + */ +public class MasterStatus implements Cloneable { + + /* This node's identity */ + private final NameIdPair nameIdPair; + + /* The current master resulting from election notifications */ + private String groupMasterHostName = null; + private int groupMasterPort = 0; + /* The node ID used to identify the master. */ + private NameIdPair groupMasterNameId = NameIdPair.NULL; + + /* + * The Master as implemented by the Node. It can lag the groupMaster + * as the node tries to catch up. + */ + private String nodeMasterHostName = null; + private int nodeMasterPort = 0; + private NameIdPair nodeMasterNameId = NameIdPair.NULL; + + public MasterStatus(NameIdPair nameIdPair) { + this.nameIdPair = nameIdPair; + } + + /** + * Returns a read-only snapshot of the object. + */ + @Override + public synchronized Object clone() { + try { + return super.clone(); + } catch (CloneNotSupportedException e) { + assert(false); + } + return null; + } + + /** + * Returns true if it's the master from the Group's perspective + */ + public synchronized boolean isGroupMaster() { + final int id = nameIdPair.getId(); + return (id != NameIdPair.NULL_NODE_ID) && + (id == groupMasterNameId.getId()); + } + + /** + * Returns true if it's the master from the node's localized perspective + */ + public synchronized boolean isNodeMaster() { + final int id = nameIdPair.getId(); + return (id != NameIdPair.NULL_NODE_ID) && + (id == nodeMasterNameId.getId()); + } + + public synchronized void setGroupMaster(String hostname, + int port, + NameIdPair newGroupMasterNameId) { + groupMasterHostName = hostname; + groupMasterPort = port; + groupMasterNameId = newGroupMasterNameId; + } + + /** + * Predicate to determine whether the group and node have a consistent + * notion of the Master. + * + * @return false if the node does not know of a Master, or the group Master + * is different from the node's notion the master. + */ + + public synchronized boolean inSync() { + return !nodeMasterNameId.hasNullId() && + (groupMasterNameId.getId() == nodeMasterNameId.getId()); + } + + public synchronized void unSync() { + nodeMasterHostName = null; + nodeMasterPort = 0; + nodeMasterNameId = NameIdPair.NULL; + } + + /** + * An assertion form of the above. By combining the check and exception + * generation in an atomic operation, it provides for an accurate exception + * message. + * + * @throws MasterSyncException + */ + public synchronized void assertSync() + throws MasterSyncException { + + if (!inSync()) { + throw new MasterSyncException(); + } + } + + /** + * Syncs to the group master + */ + public synchronized void sync() { + nodeMasterHostName = groupMasterHostName; + nodeMasterPort = groupMasterPort; + nodeMasterNameId = groupMasterNameId; + } + + /** + * Returns the Node's current idea of the Master. It may be "out of sync" + * with the Group's notion of the Master + */ + public synchronized InetSocketAddress getNodeMaster() { + if (nodeMasterHostName == null) { + return null; + } + return new InetSocketAddress(nodeMasterHostName, nodeMasterPort); + } + + public synchronized NameIdPair getNodeMasterNameId() { + return nodeMasterNameId; + } + + /** + * Returns a socket that can be used to communicate with the group master. + * It can return null, if there is no current group master, that is, + * groupMasterNameId is NULL. + */ + public synchronized InetSocketAddress getGroupMaster() { + if (groupMasterHostName == null) { + return null; + } + return new InetSocketAddress(groupMasterHostName, groupMasterPort); + } + + public synchronized NameIdPair getGroupMasterNameId() { + return groupMasterNameId; + } + + @SuppressWarnings("serial") + public class MasterSyncException extends Exception { + private final NameIdPair savedGroupMasterId; + private final NameIdPair savedNodeMasterId; + + MasterSyncException () { + savedGroupMasterId = MasterStatus.this.getGroupMasterNameId(); + savedNodeMasterId = MasterStatus.this.getNodeMasterNameId(); + } + + @Override + public String getMessage() { + return "Master change. Node master id: " + savedNodeMasterId + + " Group master id: " + savedGroupMasterId; + } + } +} diff --git a/src/com/sleepycat/je/rep/stream/MasterSuggestionGenerator.java b/src/com/sleepycat/je/rep/stream/MasterSuggestionGenerator.java new file mode 100644 index 0000000..8d6d94f --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/MasterSuggestionGenerator.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import com.sleepycat.je.rep.elections.Acceptor; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.utilint.VLSN; + +/** + * A Basic suggestion generator. + * + * A more sophisticated version may contact other replica nodes to see if it + * has sufficient connectivity to implement the commit policy in effect for + * the Replication Group. KIS for now. + */ +public class MasterSuggestionGenerator + implements Acceptor.SuggestionGenerator { + + private final RepNode repNode; + + /* Determines whether to use pre-emptive ranking to make this + * node the Master during the next election */ + private boolean forceAsMaster = false; + + /* Used during a forced election to guarantee this proposal as a winner. */ + private static final Ranking PREMPTIVE_RANKING = + new Ranking(Long.MAX_VALUE, 0); + /* The ranking used to ensure that a current master is reselected. */ + private static final Ranking MASTER_RANKING = + new Ranking(Long.MAX_VALUE - 1, 0); + + public MasterSuggestionGenerator(RepNode repNode) { + this.repNode = repNode; + } + + @Override + public Value get(Proposal proposal) { + /* Suggest myself as master */ + return new MasterValue(repNode.getHostName(), + repNode.getPort(), + repNode.getNameIdPair()); + } + + @Override + public Ranking getRanking(Proposal proposal) { + if (forceAsMaster) { + return PREMPTIVE_RANKING; + } + repNode.getVLSNFreezeLatch().freeze(proposal); + + if (repNode.isAuthoritativeMaster()) { + return MASTER_RANKING; + } + + final long dtvlsn = repNode.getDTVLSN(); + final long vlsn = repNode.getVLSNIndex().getRange(). + getLast().getSequence(); + + if (dtvlsn == VLSN.UNINITIALIZED_VLSN_SEQUENCE) { + /* + * In a preDTVLSN stream segment on a postDTVLSN replica. No + * DTVLSN information as yet. + */ + return new Ranking(vlsn, 0); + } + + return new Ranking(dtvlsn, vlsn); + } + + /** + * This entry point is for testing only. + * + * It will submit a Proposal with a premptive ranking so that it's + * guaranteed to be the selected as the master at the next election. + * + * @param force determines whether the forced proposal is in effect + */ + public void forceMaster(boolean force) { + this.forceAsMaster = force; + } +} diff --git a/src/com/sleepycat/je/rep/stream/MatchpointSearchResults.java b/src/com/sleepycat/je/rep/stream/MatchpointSearchResults.java new file mode 100644 index 0000000..0eaefeb --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/MatchpointSearchResults.java @@ -0,0 +1,331 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.je.utilint.VLSN; + +/** + * Holds information seen by the ReplicaSyncupReader when + * scanning a replica's log for a matchpoint. + */ +public class MatchpointSearchResults implements Serializable { + private static final long serialVersionUID = 1L; + + private long matchpointLSN; + + /* + * Track whether we passed a checkpoint which deleted cleaned log files. + * If so, we cannot do a hard recovery. + */ + private boolean passedCheckpointEnd; + + /* + * If we skip a gap in the log file when searching for a matchpoint, we + * no longer can be sure if we have passed checkpoints. + */ + private boolean passedSkippedGap; + + /* + * We save a set number of passed transactions for debugging information. + * The list is limited in size in case we end up passing a large number + * of transactions on our hunt for a matchpoint. Note that we save both + * durable and non-durable transactions to aid with debugging. + */ + private final List passedTxns; + private final int passedTxnLimit; + + /* + * We need to keep the penultimate passed txn so we can readjust the + * passed transaction information when the matchpoint is found. For + * example, suppose the log contains: + * txn A commit + * txn B commit + * txn C commit + * and txn A commit is the matchpoint. The way the loop works, we'll + * have earliestPassedTxn = txnA, and penultimate = txn B. If the + * matchpoint is txnA, the log will be truncated at txnB, and we + * should reset earliestPassedTxn = txnB. + * + * Note that numPassedDurableCommits is a subset of numPassedCommits + */ + private PassedTxnInfo earliestPassedTxn; + private PassedTxnInfo penultimatePassedTxn; + private int numPassedCommits; + private int numPassedDurableCommits; + + /* + * The DTVLSN established when scanning the log backwards from high to low + * VLSNS + */ + private VLSN dtvlsn = VLSN.NULL_VLSN; + + public MatchpointSearchResults(EnvironmentImpl envImpl) { + + /* + * The matchpointLSN should be a non-null value, because we always have + * to provide a valid truncation point. + */ + matchpointLSN = DbLsn.makeLsn(0, 0); + + passedTxnLimit = + envImpl.getConfigManager().getInt(RepParams.TXN_ROLLBACK_LIMIT); + passedTxns = new ArrayList(); + numPassedCommits = 0; + numPassedDurableCommits = 0; + } + + /** + * If we see a checkpoint end record, see if it is a barrier to + * rolling back, and advance the file reader position. + */ + void notePassedCheckpointEnd() { + passedCheckpointEnd = true; + } + + /** + * Keep track if we have jumped over a gap in the log files, and might have + * missed a checkpoint end. + */ + void noteSkippedGap() { + passedSkippedGap = true; + } + + /** + * At the end of the search for a matchpoint, set the matchpointLSN and + * adjust the debugging list of passed transactions. The matchpoint entry + * is just before the truncation point, and does not get truncated. + */ + void setMatchpoint(long match) { + matchpointLSN = match; + if ((earliestPassedTxn != null) && + (earliestPassedTxn.lsn == matchpointLSN)) { + numPassedCommits--; + + if (passedTxns.size() > 0) { + int lastSavedTxn = passedTxns.size() - 1; + if (passedTxns.get(lastSavedTxn).lsn == match) { + passedTxns.remove(lastSavedTxn); + } + earliestPassedTxn = penultimatePassedTxn; + } + } + } + + /** + * The reader saw a transaction commit; record relevant information. Note + * that we record all transactions (replicated and non-replicated) to avoid + * cutting off a checkpoint. Note that the stream can be a combination of + * log records from pre and post-dtvlsn masters. Note that we are scanning + * the log backwards, so we are encountering commits in decreasing VLSN + * sequence. + * + * @param commit the commit being passed. Note that this object is reused + * and so should not be saved. + * + * @param commitVLSN the VLSN associated with the commit. It could be null + * if the commit was local and not HA + * + * @param commitLSN the LSN at which the commit is located + */ + void notePassedCommits(TxnCommit commit, + VLSN commitVLSN, + long commitLSN) { + boolean durableCommit = false; + final Timestamp commitTime = commit.getTime(); + final long txnId = commit.getId(); + final long commitDTVLSN = commit.getDTVLSN(); + + if ((commitVLSN != null) && !commitVLSN.isNull()) { + /* A replicated txn */ + processDTVLSN(commitVLSN, commitDTVLSN); + + if (commit.hasLoggedEntries() && + ((dtvlsn.getSequence() == VLSN.UNINITIALIZED_VLSN_SEQUENCE) || + (commitVLSN.compareTo(dtvlsn) <= 0))) { + numPassedDurableCommits++; + durableCommit = true; + } + } + + numPassedCommits++; + if (earliestPassedTxn != null) { + penultimatePassedTxn = earliestPassedTxn; + } + earliestPassedTxn = + new PassedTxnInfo(commitTime, txnId, commitVLSN, commitLSN, + durableCommit); + + /* + * Save only a limited number of passed txns, for displaying to the log + */ + if (numPassedCommits <= passedTxnLimit) { + passedTxns.add(earliestPassedTxn); + } + } + + private void processDTVLSN(VLSN txnEndVLSN, final long txnEndDTVLSN) { + if (dtvlsn.isNull()) { + /* + * The first commit/abort record, set the dtvlsn to a + * non null value + */ + dtvlsn = new VLSN(txnEndDTVLSN); + } else { + /* + * Already set, verify sequencing. Make allowance for transitions + * from pre to post-DTVLSN stream segments. Note that we are going + * backwards in the log at this point. + */ + if ((txnEndDTVLSN > dtvlsn.getSequence()) && + (dtvlsn.getSequence() != VLSN.UNINITIALIZED_VLSN_SEQUENCE)) { + throw new IllegalStateException + ("DTVLSNs should only decrease with decreasing VLSNs." + + " prev:" + dtvlsn + " next:" + txnEndDTVLSN + + " commit VLSN:" + txnEndVLSN); + } + } + } + + /** + * Use the DTVLSN value from a replicated abort to set the dtvlsn. + * + * @param abort the abort log record + * + * @param abortVLSN the VLSN associated with the abort log record + */ + void notePassedAborts(TxnAbort abort, + VLSN abortVLSN) { + + if (abortVLSN.isNull()) { + /* A non-replicated abort. */ + return; + } + + final long abortDTVLSN = abort.getDTVLSN(); + processDTVLSN(abortVLSN, abortDTVLSN); + } + + boolean getPassedCheckpointEnd() { + return passedCheckpointEnd; + } + + boolean getSkippedGap() { + return passedSkippedGap; + } + + public long getMatchpointLSN() { + return matchpointLSN; + } + + public int getNumPassedCommits() { + return numPassedCommits; + } + + public int getNumPassedDurableCommits() { + return numPassedDurableCommits; + } + + public PassedTxnInfo getEarliestPassedTxn() { + return earliestPassedTxn; + } + + public VLSN getDTVLSN() { + return dtvlsn; + } + + /** + * Display the saved transaction information. + */ + public String dumpPassedTxns() { + StringBuilder sb = new StringBuilder(); + for (PassedTxnInfo info : passedTxns) { + sb.append(info); + sb.append("\n"); + } + return sb.toString(); + } + + @Override + public String toString() { + return "matchpointLSN=" + DbLsn.getNoFormatString(matchpointLSN) + + " passedCkpt=" + passedCheckpointEnd + + " passedTxnLimit=" + passedTxnLimit + + " passedTxns=" + passedTxns + + " earliestTxn=" + earliestPassedTxn + + " penultimateTxn=" + penultimatePassedTxn + + " numPassedCommits=" + numPassedCommits + + " numPassedDurableCommits=" + numPassedDurableCommits + + " passedSkippedGap=" + passedSkippedGap; + } + + /** + * If 1 or more commits was passed, construct a message that can + * be used by RollbackException and RollbackProhibitedException. + */ + public String getRollbackMsg() { + if (numPassedCommits == 0) { + return " uncommitted operations"; + } + + if (numPassedDurableCommits == 0) { + return " " + numPassedCommits + + " total commits to the earliest point indicated by transaction " + + earliestPassedTxn; + } + + return " " + numPassedCommits + " total commits(" + + numPassedDurableCommits + " of which were durable) " + + "to the earliest point indicated by transaction " + + earliestPassedTxn; + } + + /* Struct to hold information about passed txns. */ + public static class PassedTxnInfo implements Serializable { + private static final long serialVersionUID = 1L; + + public final Timestamp time; + public final long id; + public final VLSN vlsn; + public final long lsn; + public final boolean durableCommit; + + PassedTxnInfo(Timestamp time, long id, VLSN vlsn, long lsn, + boolean durableCommit) { + this.time = time; + this.id = id; + this.vlsn = vlsn; + this.lsn = lsn; + this.durableCommit = durableCommit; + } + + @Override + public String toString() { + return "id=" + id + + " time=" + time + + " vlsn=" + vlsn + + " lsn=" + DbLsn.getNoFormatString(lsn) + + " durable=" + durableCommit; + } + } +} diff --git a/src/com/sleepycat/je/rep/stream/OutputWireRecord.java b/src/com/sleepycat/je/rep/stream/OutputWireRecord.java new file mode 100644 index 0000000..fe2e51d --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/OutputWireRecord.java @@ -0,0 +1,527 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.ReplicableLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.txn.TxnEnd; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * Format for log entries sent across the wire for replication. In most + * cases, the bytes are read directly from the log and never need to be + * serialized into the backing object. + * + * Note that the ByteBuffer held within the OutputWireRecord has a limited + * lifetime. Often it's just sliced, rather than copied from the underlying + * buffer. + */ +public class OutputWireRecord extends WireRecord { + + protected final ByteBuffer entryBuffer; + protected final EnvironmentImpl envImpl; + private final LogItem logItem; + + /** A shared entry of the type specified by the header, or null */ + private ReplicableLogEntry sharedEntry = null; + + /** A log entry created from the data in the entry buffer, or null */ + private ReplicableLogEntry logEntry = null; + + /** Whether the log entry will be re-serialized, or null if unknown. */ + private Boolean reserialize = null; + + /** Size of re-serialized log entry, or -1 if reserialize != true. */ + private int reSerializedSize = -1; + + /** Whether an old log format must be used, or null if unknown. */ + private Boolean oldFormatRequired = null; + + /** + * Make a OutputWireRecord from FileReader output for sending out. + */ + OutputWireRecord(final EnvironmentImpl envImpl, + final LogEntryHeader header, + final ByteBuffer readerBuffer) { + super(header); + this.envImpl = envImpl; + this.logItem = null; + this.entryBuffer = readerBuffer.slice(); + this.entryBuffer.limit(header.getItemSize()); + } + + /** + * Creates an OutputWireRecord from a log item. This constructor is used + * when a Feeder can bypass access to the log because the log item is + * available in the log item cache associated with the VLSNIndex. + */ + OutputWireRecord(final EnvironmentImpl envImpl, final LogItem logItem) { + super(logItem.header); + this.envImpl = envImpl; + this.logItem = logItem; + final ByteBuffer buffer = logItem.buffer; + buffer.position(header.getSize()); + entryBuffer = buffer.slice(); + assert entryBuffer.limit() == header.getItemSize() : + "Limit:" + entryBuffer.limit() + " size:" + header.getItemSize(); + } + + /* For unit test support. */ + OutputWireRecord(final EnvironmentImpl envImpl, + final InputWireRecord input) { + super(input.header); + this.envImpl = envImpl; + this.logItem = null; + final LogEntry entry = input.getLogEntry(); + this.entryBuffer = ByteBuffer.allocate(entry.getSize()); + entry.writeEntry(entryBuffer); + entryBuffer.flip(); + } + + /** + * Returns the shared replicable log entry associated with the log entry + * header. + */ + private synchronized ReplicableLogEntry getSharedEntry() + throws DatabaseException { + + if (sharedEntry == null) { + final LogEntryType entryType = getLogEntryType(); + if (!entryType.isReplicationPossible()) { + throw EnvironmentFailureException.unexpectedState( + "Log entry type does not support replication: " + entryType); + } + sharedEntry = (ReplicableLogEntry) entryType.getSharedLogEntry(); + } + return sharedEntry; + } + + /** + * Returns a log entry corresponding to the entry buffer. Note that the log + * entry will only be created once, at most. + * + * When a LogItem from the LogItemCache was used to construct this record, + * we cache the materialized entry in LogItem to try to avoid redundant + * materialization in multiple feeders. + */ + public synchronized ReplicableLogEntry instantiateEntry() + throws DatabaseException { + + if (logEntry != null) { + return logEntry; + } + if (logItem != null) { + logEntry = logItem.cachedEntry; + if (logEntry != null) { + return logEntry; + } + } + final LogEntry entry = instantiateEntry(envImpl, entryBuffer); + if (!(entry instanceof ReplicableLogEntry)) { + throw EnvironmentFailureException.unexpectedState( + "Log entry type does not support replication: " + + entry.getClass().getName()); + } + logEntry = (ReplicableLogEntry) entry; + if (logItem != null) { + logItem.cachedEntry = logEntry; + } + return logEntry; + } + + /** + * @return the log entry type for this record. + */ + public byte getEntryType() { + return header.getType(); + } + + /** + * Used at syncup, when comparing records received from the feeder against + * local records. + * + * @return true if this OutputWireRecord has the same logical contents as + * the InputWireRecord. The comparison will disregard portions of the + * logEntry that may be different, such at timestamps on a Commit + * entry. Must be called before the entryBuffer that backs this + * OutputWireRecord is reused. + * @throws DatabaseException + */ + public boolean match(final InputWireRecord input) + throws DatabaseException { + + /* + * Ignore the log version check if the log versions on the feeder and + * replica don't match. This would happen if the group is doing an + * upgrade that requires a log version change. + */ + if (!header.logicalEqualsIgnoreVersion(input.header)) { + return false; + } + + final LogEntry entry = instantiateEntry(); + return entry.logicalEquals(input.getLogEntry()); + } + + /** + * For unit tests. + * @return true if this OutputWireRecord has the same logical contents as + * "other". + * @throws DatabaseException + */ + public boolean match(final OutputWireRecord otherRecord) + throws DatabaseException { + + if (!header.logicalEqualsIgnoreVersion(otherRecord.header)) { + return false; + } + + final LogEntry entry = instantiateEntry(); + final LogEntry otherEntry = + otherRecord.instantiateEntry(envImpl, otherRecord.entryBuffer); + return entry.logicalEquals(otherEntry); + } + + /** + * Returns lsn if this entry was created from the log item cache, and + * NULL_LSN otherwise. + */ + long getLogItemLSN() { + return (logItem != null) ? logItem.lsn : DbLsn.NULL_LSN; + } + + public VLSN getVLSN() { + return header.getVLSN(); + } + + /** + * Dump the contents. + * @throws DatabaseException + */ + public String dump() + throws DatabaseException { + + final StringBuilder sb = new StringBuilder(); + header.dumpRep(sb); + final LogEntry entry = instantiateEntry(); + entry.dumpRep(sb); + return sb.toString(); + } + + @Override + public String toString() { + try { + return dump(); + } catch (DatabaseException e) { + e.printStackTrace(); + return ""; + } + } + + /** + * Returns the number of bytes needed to represent the message data for this + * record for the specified log version. + */ + int getWireSize(final int logVersion) { + return 1 + 4 + 4 + VLSN.LOG_SIZE + getEntrySize(logVersion); + } + + /** + * Returns the number of bytes needed to represent the entry portion of the + * message data for this record for the specified log version. + */ + private int getEntrySize(final int logVersion) { + return willReSerialize(logVersion) ? + reSerializedSize : header.getItemSize(); + } + + /** + * Returns whether the log entry will be re-serialized when written, due to + * a required format change or because an optimized replication format + * should be used. + * + * This method caches its result, so that it can be called twice (from + * getEntrySize and writeToWire) and will not repeat the calculation or + * return different results. If true is returned, reSerializedSize will + * also be set. + */ + private boolean willReSerialize(final int logVersion) { + + if (reserialize != null) { + return reserialize; + } + + int newSize = -1; + + if (header.getVersion() < 8) { + /* + * Before version 8, duplicates LN format conversion is necessary + * and we don't have the DatabaseImpl needed to do that, so we + * cannot convert to a later version. Conversion to an earlier + * version is not needed, because we can only convert to versions + * greater than 8. So re-serialization is not possible or needed. + */ + reserialize = false; + + } else if (isOldFormatRequired(logVersion)) { + /* Re-serialization is mandatory. */ + reserialize = true; + + } else { + /* Determine whether re-serialization is worthwhile. */ + if (logEntry != null) { + /* + * If we have the entry, then using the optimized replication + * format is worthwhile simply if the entry has an optimized + * format and its size is smaller, since the cost of + * re-serialization is fairly low. + */ + if (logEntry.hasReplicationFormat()) { + newSize = logEntry.getSize( + logVersion, true /*forReplication*/); + reserialize = header.getItemSize() > newSize; + } else { + reserialize = false; + } + } else { + /* + * If we must materialize the entry in order to re-serialize + * it, then we must make a best guess about whether this is + * worthwhile by examining the entry in serialized format. + */ + reserialize = getSharedEntry().isReplicationFormatWorthwhile( + entryBuffer, header.getVersion(), logVersion); + } + } + + if (reserialize) { + if (newSize == -1) { + newSize = instantiateEntry().getSize( + logVersion, true /*forReplication*/); + } + reSerializedSize = newSize; + } + + assert reserialize != null; + return reserialize; + } + + /** + * Returns whether the format of the entry needs to be changed in order to + * be read by a replica that only understands versions no later than {@code + * logVersion}. + */ + private boolean isOldFormatRequired(final int logVersion) { + + if (oldFormatRequired != null) { + return oldFormatRequired; + } + + oldFormatRequired = + /* The requested version is older than the current version, */ + logVersion < LogEntryType.LOG_VERSION && + /* it is older than the entry version, */ + logVersion < header.getVersion() && + /* and it is older than the entry class's last format change */ + logVersion < (getSharedEntry().getLastFormatChange()); + + return oldFormatRequired; + } + + /** + * Write the log header and entry associated with this instance to the + * specified buffer using the format for the specified log version. + * + * @param messageBuffer the destination buffer + * @param logVersion the log version of the format + * @return whether the data format was changed to support an old version. + */ + boolean writeToWire(final ByteBuffer messageBuffer, + final int logVersion) { + + messageBuffer.put(header.getType()); + if (willReSerialize(logVersion)) { + final ReplicableLogEntry entry = instantiateEntry(); + LogUtils.writeInt(messageBuffer, logVersion); + LogUtils.writeInt(messageBuffer, reSerializedSize); + LogUtils.writeLong(messageBuffer, header.getVLSN().getSequence()); + entryBuffer.mark(); + entry.writeEntry( + messageBuffer, logVersion, true /*forReplication*/); + } else { + LogUtils.writeInt(messageBuffer, header.getVersion()); + LogUtils.writeInt(messageBuffer, header.getItemSize()); + LogUtils.writeLong(messageBuffer, header.getVLSN().getSequence()); + entryBuffer.mark(); + messageBuffer.put(entryBuffer); + } + entryBuffer.reset(); + return isOldFormatRequired(logVersion); + } + + /* + * Returns the transaction id associated with a commit log entry. + * @return the transaction id, if it's a commit record, zero otherwise. + */ + public long getCommitTxnId() + throws DatabaseException { + + if (!LogEntryType.LOG_TXN_COMMIT.equalsType(header.getType())) { + return 0; + } + + final LogEntry commitEntry = instantiateEntry(); + return commitEntry.getTransactionId(); + } + + /** + * Returns the timestamp associated with a commit log entry, or 0. + * + * @return the commit timestamp or 0 + */ + public long getCommitTimeStamp() + throws DatabaseException { + + if (!LogEntryType.LOG_TXN_COMMIT.equalsType(header.getType())) { + return 0; + } + + final TxnCommit txnCommit = + (TxnCommit) instantiateEntry().getMainItem(); + return txnCommit.getTime().getTime(); + } + + /* + * Returns the timestamp associated with transaction ending log entry, or + * zero if doesn't end a transaction. + * + * @return the timestamp or zero + */ + public long getTimeStamp() + throws DatabaseException { + + /* + * Use the shared log entry to determine the class of the loggable to + * see if it is worth instantiating it to get the timestamp + */ + final LogEntry sharedLogEntry = getLogEntryType().getSharedLogEntry(); + if (sharedLogEntry instanceof SingleItemEntry) { + final Class logClass = + ((SingleItemEntry) sharedLogEntry).getLogClass(); + if (TxnEnd.class.isAssignableFrom(logClass)) { + final TxnEnd txnEnd = + (TxnEnd) instantiateEntry().getMainItem(); + return txnEnd.getTime().getTime(); + } + } + return 0L; + } + + /** + * Returns the ID of the database containing the associated entry if it is + * a replicable entry. Returns null for non-replicable entries and for + * entries not associated with a database. + * + * @return the database ID or null + * @throws DatabaseException + */ + public DatabaseId getReplicableDBId() + throws DatabaseException { + + final LogEntryType logEntryType = getLogEntryType(); + + /* Return null for non-replicable entries */ + if (!logEntryType.isReplicationPossible()) { + return null; + } + + /* + * LN entries are the only replicable log entries associated with a + * database + */ + if (!logEntryType.isLNType()) { + return null; + } + + return instantiateEntry().getDbId(); + } + + /* + * Unit test and assertion support: Transaction, database and node IDs in + * the replication sequences are supposed to occupy the negative + * numberspace. + * + * It seems a little fragile to test this here, using instanceof to decide + * what to test. It would be cleaner to put this validity check as part of + * the implementation of a log entry. But this is a HA related check, and + * we want to keep the core code as independent as possible. The check is + * here rather than in some other test code because it needs to access the + * deserialized log entry. We don't want to provide a method which returns + * a logEntry, because in general an OutputWireRecord should not + * instantiate the log entry. + * + * @throws RuntimeException if there are any sequences that are not + * negative. + */ + public boolean verifyNegativeSequences(final String debugTag) { + + LogEntry entry = null; + try { + entry = instantiateEntry(); + } catch (DatabaseException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + + if (entry.getTransactionId() >= 0) { + throw EnvironmentFailureException.unexpectedState + (debugTag + " txn id should be negative: " + entry); + } + + if (entry instanceof LNLogEntry) { + if (LogEntryType.LOG_NAMELN_TRANSACTIONAL.equalsType + (getEntryType())) { + final LNLogEntry lnEntry = (LNLogEntry) entry; + lnEntry.postFetchInit(false /*isDupDb*/); + final NameLN nameLN = (NameLN) lnEntry.getLN(); + if (nameLN.getId().getId() >= 0) { + throw EnvironmentFailureException.unexpectedState + (debugTag + " db id should be negative: " + entry); + } + } else { + if (entry.getDbId().getId() >= 0) { + throw EnvironmentFailureException.unexpectedState + (debugTag + " db id should be negative: " + entry); + } + } + } + + return true; + } +} diff --git a/src/com/sleepycat/je/rep/stream/Protocol.java b/src/com/sleepycat/je/rep/stream/Protocol.java new file mode 100644 index 0000000..f750072 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/Protocol.java @@ -0,0 +1,920 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.nio.ByteBuffer; +import java.util.UUID; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; + +/** + * Defines the messages used to set up a feeder-replica replication stream. + * + * Note: this protocol is an extension of je.rep.stream.BaseProtocol, which + * defines a set of basic message operation codes that are to be used by + * its subclasses. + * + * From Feeder to Replica + * + * Heartbeat -> HeartbeatResponse + * Commit -> Ack + * Commit+ -> GroupAck + * Entry + * ShutdownRequest -> ShutdownResponse + * + * Note: in the future, we may want to support bulk entry messages + * + * From Replica to Feeder + * + * The following subset of messages represents the handshake protocol that + * precedes the transmission of replication log entries. + * + * ReplicaProtocolVersion -> FeederProtocolVersion | DuplicateNodeReject + * ReplicaJEVersions -> FeederJEVersions | JEVersionsReject + * NodeGroupInfo -> NodeGroupInfoOK | NodeGroupInfoReject + * SNTPRequest -> SNTPResponse + * -> HeartbeatResponse + * + * A HeartbeatResponse is not strictly a response message and may also be sent + * spontaneously if there is no output activity in a heartbeat interval. This + * spontaneous generation of a HeartbeatReponse ensures that a socket is not + * timed out if the feeder or the replica replay are otherwise busy. + * + * Note that there may be multiple SNTPRequest/SNTPResponse message pairs that + * are exchanged as part of a single handshake. So a successful handshake + * requested sequence generated by the Replica looks like: + * + * ReplicaProtocolVersion ReplicaJEVersions MembershipInfo [SNTPRequest]+ + * + * The following messages constitute the syncup and the transmission of log + * entries. + * + * EntryRequest -> Entry | EntryNotFound | AlternateMatchpoint + * RestoreRequest -> RestoreResponse + * StartStream + * + * The Protocol instance has local state in terms of buffers that are reused + * across multiple messages. A Protocol instance is expected to be used in + * strictly serial fashion. Consequently, there is an instance for each Replica + * to Feeder connection, and two instances per Feeder to Replica connection: + * one for the InputThread and one for the OutputThread. + */ +public class Protocol extends BaseProtocol { + + /** + * Normally the op code should fall in the range defined in BaseProtocol, + * however following op code inherit from original implementation of + * stream protocol thus we just copy them here for backward compatibility. + */ + public final static MessageOp REPLICA_PROTOCOL_VERSION = + new MessageOp((short) 1, ReplicaProtocolVersion.class); + + public final static MessageOp FEEDER_PROTOCOL_VERSION = + new MessageOp((short) 2, FeederProtocolVersion.class); + + public final static MessageOp DUP_NODE_REJECT = + new MessageOp((short) 3, DuplicateNodeReject.class); + + public final static MessageOp REPLICA_JE_VERSIONS = + new MessageOp((short) 4, ReplicaJEVersions.class); + + public final static MessageOp FEEDER_JE_VERSIONS = + new MessageOp((short) 5, FeederJEVersions.class); + + public final static MessageOp JE_VERSIONS_REJECT = + new MessageOp((short) 6, JEVersionsReject.class); + + public final static MessageOp MEMBERSHIP_INFO = + new MessageOp((short) 7, NodeGroupInfo.class); + + public final static MessageOp MEMBERSHIP_INFO_OK = + new MessageOp((short) 8, NodeGroupInfoOK.class); + + public final static MessageOp MEMBERSHIP_INFO_REJECT = + new MessageOp((short) 9, NodeGroupInfoReject.class); + + public final static MessageOp SNTP_REQUEST = + new MessageOp((short)10, SNTPRequest.class); + + public final static MessageOp SNTP_RESPONSE = + new MessageOp((short)11, SNTPResponse.class); + + public final static MessageOp REAUTHENTICATE = + new MessageOp((short)12, ReAuthenticate.class); + + /** All message ops needed by protocol */ + private static final MessageOp[] ALL_MESSAGE_OPS = { + REPLICA_PROTOCOL_VERSION, + FEEDER_PROTOCOL_VERSION, + DUP_NODE_REJECT, + REPLICA_JE_VERSIONS, + FEEDER_JE_VERSIONS, + JE_VERSIONS_REJECT, + MEMBERSHIP_INFO, + MEMBERSHIP_INFO_OK, + MEMBERSHIP_INFO_REJECT, + SNTP_REQUEST, + SNTP_RESPONSE, + ENTRY, + START_STREAM, + HEARTBEAT, + HEARTBEAT_RESPONSE, + COMMIT, + ACK, + ENTRY_REQUEST, + ENTRY_NOTFOUND, + RESTORE_REQUEST, + RESTORE_RESPONSE, + ALT_MATCHPOINT, + SHUTDOWN_REQUEST, + SHUTDOWN_RESPONSE, + GROUP_ACK, + REAUTHENTICATE + }; + + private final Clock clock; + + /** + * Returns a Protocol object configured that implements the specified + * (supported) protocol version. + * + * @param repImpl the node using the protocol + * + * @param nameIdPair name-id pair of the node using the protocol + * + * @param clock clock used by the node + * + * @param protocolVersion the version of the protocol that must be + * implemented by this object + * + * @param maxProtocolVersion the highest supported protocol version, which + * may be lower than the code version, for testing purposes + * + * @param streamLogVersion the log version of the format used to write log + * entries + */ + private Protocol(final RepImpl repImpl, + final NameIdPair nameIdPair, + final Clock clock, + final int protocolVersion, + final int maxProtocolVersion, + final int streamLogVersion, + @SuppressWarnings("unused") + final int groupFormatVersion) { + + super(repImpl, nameIdPair, protocolVersion, maxProtocolVersion, + streamLogVersion, ALL_MESSAGE_OPS, + /* no validity check, from legacy protocol */ + false); + + this.configuredVersion = protocolVersion; + this.clock = clock; + } + + /** + * Returns a protocol object that supports the specific requested protocol + * version, which must not be higher than the specified maximum version, or + * null if no such version is supported. + */ + public static Protocol get(final RepNode repNode, + final int protocolVersion, + final int maxProtocolVersion) { + return get(repNode.getRepImpl(), + repNode.getNameIdPair(), + repNode.getClock(), + protocolVersion, + maxProtocolVersion, + repNode.getGroup().getFormatVersion()); + } + + public static Protocol get(final RepImpl repImpl, + final NameIdPair nameIdPair, + final Clock clock, + final int protocolVersion, + final int maxProtocolVersion, + final int groupFormatVersion) { + return get(repImpl, nameIdPair, clock, + protocolVersion, maxProtocolVersion, + LogEntryType.LOG_VERSION, groupFormatVersion); + } + + /** + * Returns a protocol object that supports the specified protocol, which + * must be less than the specified maximum version, and writes log entries + * in the specified log version format. Returns null if no such version is + * supported. + */ + public static Protocol get(final RepNode repNode, + final int protocolVersion, + final int maxProtocolVersion, + final int streamLogVersion) { + return get(repNode.getRepImpl(), + repNode.getNameIdPair(), + repNode.getClock(), + protocolVersion, + maxProtocolVersion, + streamLogVersion, + repNode.getGroup().getFormatVersion()); + + } + public static Protocol get(final RepImpl repImpl, + final NameIdPair nameIdPair, + final Clock clock, + final int protocolVersion, + final int maxProtocolVersion, + final int streamLogVersion, + final int groupFormatVersion) { + + /* + * If the RepGroupImpl has been upgraded to version 3, then require + * protocol version 5, which is required to support that RepGroupImpl + * version. This check prevents new facilities that depend on + * RepGroupImpl version 3 from being seen by non-upgraded replicas. + */ + int minProtocolVersion = MIN_VERSION; + if (groupFormatVersion >= RepGroupImpl.FORMAT_VERSION_3) { + minProtocolVersion = VERSION_5; + } + + return get(repImpl, nameIdPair, clock, + protocolVersion, minProtocolVersion, + maxProtocolVersion, streamLogVersion, groupFormatVersion); + } + + /** + * Returns a protocol object using the specified minimum and maximum + * values, returning null if no supported version is found. Use this + * method for testing when the RepGroupImpl object is not available. + */ + static Protocol get(final RepNode repNode, + final int protocolVersion, + final int minProtocolVersion, + final int maxProtocolVersion, + final int streamLogVersion) { + int formatVersion = RepGroupImpl.MAX_FORMAT_VERSION; + if (repNode.getGroup() != null) { + formatVersion = repNode.getGroup().getFormatVersion(); + } + + return get(repNode.getRepImpl(), + repNode.getNameIdPair(), + repNode.getClock(), + protocolVersion, + minProtocolVersion, + maxProtocolVersion, + streamLogVersion, + formatVersion); + } + + private static Protocol get(final RepImpl repImpl, + final NameIdPair nameIdPair, + final Clock clock, + final int protocolVersion, + final int minProtocolVersion, + final int maxProtocolVersion, + final int streamLogVersion, + final int groupFormatVersion) { + + if (!isSupportedVersion(protocolVersion, minProtocolVersion, + maxProtocolVersion)) { + return null; + } + + /* + * Future code will do what is appropriate in support of the version + * depending on the nature of the incompatibility. + */ + return new Protocol(repImpl, nameIdPair, clock, + protocolVersion, maxProtocolVersion, + streamLogVersion, groupFormatVersion); + } + + /** + * Returns a protocol object using the specified protocol version. + */ + static Protocol getProtocol(final RepNode repNode, + final int protocolVersion) { + int formatVersion = RepGroupImpl.MAX_FORMAT_VERSION; + if (repNode.getGroup() != null) { + formatVersion = repNode.getGroup().getFormatVersion(); + } + + return + getProtocol(repNode.getRepImpl(), + repNode.getNameIdPair(), + repNode.getClock(), + protocolVersion, + formatVersion); + } + + static Protocol getProtocol(final RepImpl repImpl, + final NameIdPair nameIdPair, + final Clock clock, + final int protocolVersion, + final int groupFormatVersion) { + + return new Protocol(repImpl, nameIdPair, clock, + protocolVersion, protocolVersion, + LogEntryType.LOG_VERSION, groupFormatVersion); + } + + /** + * Returns true if the code can support the version. + * + * @param protocolVersion protocol version being queried + * @param minProtocolVersion minimum protocol version supported + * @param maxProtocolVersion maximum protocol version supported + * + * @return true if the protocol version is supported by this implementation + * of the protocol + */ + private static boolean isSupportedVersion(final int protocolVersion, + final int minProtocolVersion, + final int maxProtocolVersion) { + if (protocolVersion == Integer.MIN_VALUE) { + /* For testing purposes. */ + return false; + } + + /* + * Version compatibility check: for now, a simple range check. We can + * make this fancier in the future if necessary. + */ + return minProtocolVersion <= protocolVersion && + protocolVersion <= maxProtocolVersion; + } + + /** + * Gets the JE version that corresponds to the specified protocol version, + * for use in creating error messages that explain protocol version errors + * in terms of JE versions. Returns null if the associated version is not + * known. + */ + static JEVersion getProtocolJEVersion(final int protocolVersion) { + switch (protocolVersion) { + case VERSION_7: + return VERSION_7_JE_VERSION; + case VERSION_6: + return VERSION_6_JE_VERSION; + case VERSION_5: + return VERSION_5_JE_VERSION; + case VERSION_4: + return VERSION_4_JE_VERSION; + case VERSION_3: + return VERSION_3_JE_VERSION; + default: + return null; + } + } + + /** + * Gets the protocol version that corresponds to the specified JE version, + * throwing an IllegalArgumentException if the version is not supported. + */ + static int getJEVersionProtocolVersion(final JEVersion jeVersion) { + if (jeVersion == null) { + return VERSION_7; + } else if (jeVersion.compareTo(VERSION_7_JE_VERSION) >= 0) { + return VERSION_7; + } else if (jeVersion.compareTo(VERSION_6_JE_VERSION) >= 0) { + return VERSION_6; + } else if (jeVersion.compareTo(VERSION_5_JE_VERSION) >= 0) { + return VERSION_5; + } else if (jeVersion.compareTo(VERSION_4_JE_VERSION) >= 0) { + return VERSION_4; + } else if (jeVersion.compareTo(VERSION_3_JE_VERSION) >= 0) { + return VERSION_3; + } else { + throw new IllegalArgumentException( + "JE version not supported: " + jeVersion); + } + } + + /** + * The replica sends the feeder its protocol version. + * + * IMPORTANT: This message must not change. + */ + public class ReplicaProtocolVersion extends ProtocolVersion { + + public ReplicaProtocolVersion() { + super(configuredVersion); + } + + public ReplicaProtocolVersion(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return REPLICA_PROTOCOL_VERSION; + } + } + + /** + * The feeder sends the replica its proposed version. + * + * IMPORTANT: This message must not change. + */ + public class FeederProtocolVersion extends ProtocolVersion { + + public FeederProtocolVersion(int proposedVersion) { + super(proposedVersion); + } + + public FeederProtocolVersion(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return FEEDER_PROTOCOL_VERSION; + } + } + + /* Reject response to a ReplicaProtocolVersion request */ + public class DuplicateNodeReject extends RejectMessage { + + DuplicateNodeReject(String errorMessage) { + super(errorMessage); + } + + public DuplicateNodeReject(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return DUP_NODE_REJECT; + } + } + + public class SNTPRequest extends HandshakeMessage { + + private final long originateTimestamp; + + /* Set by the receiver at the time the message is recreated. */ + private long receiveTimestamp = -1; + + /* + * Determines whether this is the last in a consecutive stream of + * requests to determine the skew. + */ + private boolean isLast = true; + + public SNTPRequest(boolean isLast) { + super(); + this.isLast = isLast; + originateTimestamp = clock.currentTimeMillis(); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(originateTimestamp, isLast); + } + + public SNTPRequest(ByteBuffer buffer) { + this.originateTimestamp = LogUtils.readLong(buffer); + this.isLast = getBoolean(buffer); + this.receiveTimestamp = clock.currentTimeMillis(); + } + + @Override + public MessageOp getOp() { + return SNTP_REQUEST; + } + + public long getOriginateTimestamp() { + return originateTimestamp; + } + + public long getReceiveTimestamp() { + return receiveTimestamp; + } + + public boolean isLast() { + return isLast; + } + } + + public class SNTPResponse extends HandshakeMessage { + + /* These fields have the standard SNTP interpretation */ + private final long originateTimestamp; // time request sent by client + private final long receiveTimestamp; // time request received by server + + /* + * Initialized when the message is serialized to ensure it's as + * accurate as possible. + */ + private long transmitTimestamp = -1; // time reply sent by server + + /* Initialized at de-serialization for similar reasons. */ + private long destinationTimestamp = -1; //time reply received by client + + public SNTPResponse(SNTPRequest request) { + this.originateTimestamp = request.originateTimestamp; + this.receiveTimestamp = request.receiveTimestamp; + } + + @Override + public ByteBuffer wireFormat() { + transmitTimestamp = clock.currentTimeMillis(); + return wireFormat(originateTimestamp, + receiveTimestamp, + transmitTimestamp); + } + + public SNTPResponse(ByteBuffer buffer) { + originateTimestamp = LogUtils.readLong(buffer); + receiveTimestamp = LogUtils.readLong(buffer); + transmitTimestamp = LogUtils.readLong(buffer); + destinationTimestamp = clock.currentTimeMillis(); + } + + @Override + public MessageOp getOp() { + return SNTP_RESPONSE; + } + + public long getOriginateTimestamp() { + return originateTimestamp; + } + + public long getReceiveTimestamp() { + return receiveTimestamp; + } + + public long getTransmitTimestamp() { + return transmitTimestamp; + } + + public long getDestinationTimestamp() { + return destinationTimestamp; + } + + public long getDelay() { + assert(destinationTimestamp != -1); + return (destinationTimestamp - originateTimestamp) - + (transmitTimestamp - receiveTimestamp); + } + + public long getDelta() { + assert(destinationTimestamp != -1); + return ((receiveTimestamp - originateTimestamp) + + (transmitTimestamp - destinationTimestamp))/2; + } + } + + public class ReAuthenticate extends SimpleMessage { + + private final byte[] tokenBytes; + + public ReAuthenticate(byte[] tokenBytes) { + this.tokenBytes = tokenBytes; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(tokenBytes); + } + + public ReAuthenticate(ByteBuffer buffer) { + tokenBytes = getByteArray(buffer); + } + + public byte[] getTokenBytes() { + if (tokenBytes.length == 0) { + return null; + } + + return tokenBytes; + } + + @Override + public MessageOp getOp() { + return REAUTHENTICATE; + } + } + + /** + * Abstract message used as the basis for the exchange of software versions + * between replicated nodes + */ + abstract class JEVersions extends HandshakeMessage { + private final JEVersion version; + + private final int logVersion; + + public JEVersions(JEVersion version, int logVersion) { + this.version = version; + this.logVersion = logVersion; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(version.getVersionString(), logVersion); + } + + public JEVersions(ByteBuffer buffer) { + this.version = new JEVersion(getString(buffer)); + this.logVersion = LogUtils.readInt(buffer); + } + + public JEVersion getVersion() { + return version; + } + + public byte getLogVersion() { + return (byte)logVersion; + } + } + + public class ReplicaJEVersions extends JEVersions { + + ReplicaJEVersions(JEVersion version, int logVersion) { + super(version, logVersion); + } + + public ReplicaJEVersions(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return REPLICA_JE_VERSIONS; + } + + } + + public class FeederJEVersions extends JEVersions { + + /* Is null if protocol version < VERSION_7. */ + private JEVersion minJEVersion; + + FeederJEVersions(JEVersion version, + int logVersion, + JEVersion minJEVersion) { + super(version, logVersion); + this.minJEVersion = minJEVersion; + } + + @Override + public ByteBuffer wireFormat() { + if (configuredVersion < VERSION_7){ + return super.wireFormat(); + } + return wireFormat( + getVersion().getVersionString(), + (int) getLogVersion(), + minJEVersion.getVersionString()); + } + + public FeederJEVersions(ByteBuffer buffer) { + super(buffer); + if (configuredVersion < VERSION_7){ + return; + } + this.minJEVersion = new JEVersion(getString(buffer)); + } + + @Override + public MessageOp getOp() { + return FEEDER_JE_VERSIONS; + } + + /** + * Returns the minJEVersion of the group, or null if unknown (in + * protocol versions < VERSION_7). + */ + public JEVersion getMinJEVersion() { + return minJEVersion; + } + } + + /* Reject response to a ReplicaJEVersions request */ + public class JEVersionsReject extends RejectMessage { + + public JEVersionsReject(String errorMessage) { + super(errorMessage); + } + + public JEVersionsReject(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return JE_VERSIONS_REJECT; + } + } + + public class NodeGroupInfo extends HandshakeMessage { + private final String groupName; + private final UUID uuid; + + @SuppressWarnings("hiding") + private final NameIdPair nameIdPair; + private final String hostName; + private final int port; + private final NodeType nodeType; + private final boolean designatedPrimary; + + /** + * A string version of the JE version running on this node, or the + * empty string if not known. + */ + private final String jeVersion; + + NodeGroupInfo(final String groupName, + final UUID uuid, + final NameIdPair nameIdPair, + final String hostName, + final int port, + final NodeType nodeType, + final boolean designatedPrimary, + final JEVersion jeVersion) { + + this.groupName = groupName; + this.uuid = uuid; + this.nameIdPair = nameIdPair; + this.hostName = hostName; + this.port = port; + this.nodeType = nodeType; + this.designatedPrimary = designatedPrimary; + this.jeVersion = (jeVersion != null) ? + jeVersion.getNumericVersionString() : + ""; + } + + @Override + public MessageOp getOp() { + return MEMBERSHIP_INFO; + } + + @Override + public ByteBuffer wireFormat() { + final boolean repGroupV3 = (getVersion() >= VERSION_5); + if (!repGroupV3 && nodeType.compareTo(NodeType.ELECTABLE) > 0) { + throw new IllegalStateException( + "Node type not supported before group version 3: " + + nodeType); + } + final Object[] args = new Object[repGroupV3 ? 9 : 8]; + args[0] = groupName; + args[1] = uuid.getMostSignificantBits(); + args[2] = uuid.getLeastSignificantBits(); + args[3] = nameIdPair; + args[4] = hostName; + args[5] = port; + args[6] = nodeType; + args[7] = designatedPrimary; + if (repGroupV3) { + args[8] = jeVersion; + } + return wireFormat(args); + } + + public NodeGroupInfo(ByteBuffer buffer) { + this.groupName = getString(buffer); + this.uuid = new UUID(LogUtils.readLong(buffer), + LogUtils.readLong(buffer)); + this.nameIdPair = getNameIdPair(buffer); + this.hostName = getString(buffer); + this.port = LogUtils.readInt(buffer); + this.nodeType = getEnum(NodeType.class, buffer); + this.designatedPrimary = getBoolean(buffer); + jeVersion = (getVersion() >= VERSION_5) ? getString(buffer) : ""; + } + + public String getGroupName() { + return groupName; + } + + public UUID getUUID() { + return uuid; + } + + public String getNodeName() { + return nameIdPair.getName(); + } + + public int getNodeId() { + return nameIdPair.getId(); + } + + public String getHostName() { + return hostName; + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + public int port() { + return port; + } + public NodeType getNodeType() { + return nodeType; + } + + public boolean isDesignatedPrimary() { + return designatedPrimary; + } + + /** + * Returns the JE version most recently noted running on the associated + * node, or null if not known. + */ + public JEVersion getJEVersion() { + return !jeVersion.isEmpty() ? new JEVersion(jeVersion) : null; + } + } + + /** + * Response to a NodeGroupInfo request that was successful. The object + * contains the group's UUID and the replica's NameIdPair. The group UUID + * is used to update the replica's notion of the group UUID on first + * joining. The NameIdPair is used to update the replica's node ID for a + * secondary node, which is not available in the RepGroupDB. + */ + public class NodeGroupInfoOK extends HandshakeMessage { + + private final UUID uuid; + @SuppressWarnings("hiding") + private final NameIdPair nameIdPair; + + public NodeGroupInfoOK(UUID uuid, NameIdPair nameIdPair) { + super(); + this.uuid = uuid; + this.nameIdPair = nameIdPair; + } + + public NodeGroupInfoOK(ByteBuffer buffer) { + uuid = new UUID(LogUtils.readLong(buffer), + LogUtils.readLong(buffer)); + nameIdPair = getNameIdPair(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(uuid.getMostSignificantBits(), + uuid.getLeastSignificantBits(), + nameIdPair); + } + + @Override + public MessageOp getOp() { + return MEMBERSHIP_INFO_OK; + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + public UUID getUUID() { + return uuid; + } + } + + public class NodeGroupInfoReject extends RejectMessage { + + NodeGroupInfoReject(String errorMessage) { + super(errorMessage); + } + + @Override + public MessageOp getOp() { + return MEMBERSHIP_INFO_REJECT; + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(errorMessage); + } + + public NodeGroupInfoReject(ByteBuffer buffer) { + super(buffer); + } + } + +} diff --git a/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java b/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java new file mode 100644 index 0000000..a177a10 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java @@ -0,0 +1,437 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.log.LogEntryType.LOG_VERSION_EXPIRE_INFO; +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static com.sleepycat.je.rep.impl.RepParams.MAX_CLOCK_DELTA; + +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.stream.Protocol.DuplicateNodeReject; +import com.sleepycat.je.rep.stream.Protocol.FeederJEVersions; +import com.sleepycat.je.rep.stream.Protocol.FeederProtocolVersion; +import com.sleepycat.je.rep.stream.Protocol.JEVersionsReject; +import com.sleepycat.je.rep.stream.Protocol.NodeGroupInfoOK; +import com.sleepycat.je.rep.stream.Protocol.NodeGroupInfoReject; +import com.sleepycat.je.rep.stream.Protocol.SNTPResponse; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Implements the Replica side of the handshake protocol between the Replica + * and the Feeder. The FeederReplicaHandshake class takes care of the other + * side. + * + * @see FeederReplicaHandshake + */ +public class ReplicaFeederHandshake { + + /* The rep node (server or replica) */ + private final RepImpl repImpl; + private final Clock clock; + private final int groupFormatVersion; + private final NamedChannel namedChannel; + private final NameIdPair replicaNameIdPair; + private final NodeType nodeType; + private NameIdPair feederNameIdPair; + private final RepGroupImpl repGroup; + + private Protocol protocol = null; + + /* The JE software versions in use by the Feeder */ + private FeederJEVersions feederJEVersions; + + /* + * The time to wait between retries to establish node info in the master. + */ + static final int MEMBERSHIP_RETRY_SLEEP_MS = 60 * 1000; + static final int MEMBERSHIP_RETRIES = 0; + + /* + * Used during testing: A non-zero value overrides the actual log version. + */ + private static volatile int testCurrentLogVersion = 0; + + /** + * Used during testing: A non-zero value overrides the actual protocol + * version. + */ + private static volatile int testCurrentProtocolVersion = 0; + + /* Fields used to track clock skew wrt the feeder. */ + private long clockDelay = Long.MAX_VALUE; + private long clockDelta = Long.MAX_VALUE; + private static int CLOCK_SKEW_MAX_SAMPLE_SIZE = 5; + private static final long CLOCK_SKEW_MIN_DELAY_MS = 2; + private final int maxClockDelta; + + /** + * If the nodeType is an Arbiter, the SNTPRequest message + * is sent but the clock skew is not checked. + */ + private final boolean checkClockSkew; + + private final Logger logger; + + /** + * An instance of this class is created with each new handshake preceding + * the setting up of a connection. + * + * @param conf handshake configuration with feeder + */ + public ReplicaFeederHandshake(ReplicaFeederHandshakeConfig conf) { + this.repImpl = conf.getRepImpl(); + this.namedChannel = conf.getNamedChannel(); + this.repGroup = conf.getGroup(); + this.groupFormatVersion = repGroup.getFormatVersion(); + this.nodeType = conf.getNodeType(); + + replicaNameIdPair = conf.getNameIdPair(); + this.clock = conf.getClock(); + if (nodeType.isArbiter()) { + maxClockDelta = Integer.MAX_VALUE; + checkClockSkew = false; + } else { + maxClockDelta = + repImpl.getConfigManager().getDuration(MAX_CLOCK_DELTA); + checkClockSkew = true; + } + logger = LoggerUtils.getLogger(getClass()); + } + + /** Get the current log version, supporting a test override. */ + private static int getCurrentLogVersion() { + return (testCurrentLogVersion != 0) ? + testCurrentLogVersion : + LogEntryType.LOG_VERSION; + } + + /** + * Set the current log version to a different value, for testing. + * Specifying {@code 0} reverts to the standard value. + * + * @param testLogVersion the testing log version or {@code 0} + */ + public static void setTestLogVersion(int testLogVersion) { + testCurrentLogVersion = testLogVersion; + } + + /** Get the current JE version, supporting a test override. */ + private JEVersion getCurrentJEVersion() { + return repImpl.getCurrentJEVersion(); + } + + /** Get the current protocol version, supporting a test override. */ + private int getCurrentProtocolVersion() { + if (testCurrentProtocolVersion != 0) { + return testCurrentProtocolVersion; + } + return Protocol.getJEVersionProtocolVersion(getCurrentJEVersion()); + } + + /** + * Set the current protocol version to a different value, for testing. + * Specifying {@code 0} reverts to the standard value. + */ + public static void setTestProtocolVersion(final int testProtocolVersion) { + testCurrentProtocolVersion = testProtocolVersion; + } + + /** + * Returns the minJEVersion of the group, or null if unknown (in + * protocol versions < VERSION_7). + */ + public JEVersion getFeederMinJEVersion() { + return feederJEVersions.getMinJEVersion(); + } + + /** + * Negotiates a protocol that both the replica and feeder can support. + * + * @return the common protocol + * + * @throws IOException + */ + private Protocol negotiateProtocol() + throws IOException { + + final Protocol defaultProtocol = + Protocol.getProtocol(repImpl, replicaNameIdPair, clock, + getCurrentProtocolVersion(), + groupFormatVersion); + /* Send over the latest version protocol this replica can support. */ + defaultProtocol.write(defaultProtocol.new ReplicaProtocolVersion(), + namedChannel); + + /* + * Returns the highest level the feeder can support, or the version we + * just sent, if it can support that version + */ + Message message = defaultProtocol.read(namedChannel); + if (message instanceof DuplicateNodeReject) { + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + "A replica with the name: " + replicaNameIdPair + + " is already active with the Feeder:" + feederNameIdPair); + } + + FeederProtocolVersion feederVersion = + ((FeederProtocolVersion) message); + feederNameIdPair = feederVersion.getNameIdPair(); + Protocol configuredProtocol = + Protocol.get(repImpl, replicaNameIdPair, + clock, feederVersion.getVersion(), + getCurrentProtocolVersion(), groupFormatVersion); + LoggerUtils.fine(logger, repImpl, + "Feeder id: " + feederVersion.getNameIdPair() + + "Response message: " + feederVersion.getVersion()); + namedChannel.setNameIdPair(feederNameIdPair); + LoggerUtils.fine(logger, repImpl, + "Channel Mapping: " + feederNameIdPair + " is at " + + namedChannel.getChannel()); + + if (configuredProtocol == null) { + /* Include JE version information [#22541] */ + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.PROTOCOL_VERSION_MISMATCH, + "Incompatible protocol versions. " + + "Protocol version: " + feederVersion.getVersion() + + " introduced in JE version: " + + Protocol.getProtocolJEVersion(feederVersion.getVersion()) + + " requested by the Feeder: " + feederNameIdPair + + " is not supported by this Replica: " + replicaNameIdPair + + " with protocol version: " + defaultProtocol.getVersion() + + " introduced in JE version: " + + Protocol.getProtocolJEVersion(defaultProtocol.getVersion())); + } + return configuredProtocol; + } + + /** + * Executes the replica side of the handshake. + * @throws ProtocolException + */ + public Protocol execute() + throws IOException, + ProtocolException { + + LoggerUtils.info(logger, repImpl, + "Replica-feeder handshake start"); + + /* First negotiate the protocol, then use it. */ + protocol = negotiateProtocol(); + + /* Ensure that software versions are compatible. */ + verifyVersions(); + + /** + * Note whether log entries with later log versions need to be + * converted to log version 12 to work around [#25222]. + */ + if (feederJEVersions.getLogVersion() == LOG_VERSION_EXPIRE_INFO) { + protocol.setFixLogVersion12Entries(true); + } + + /* + * Now perform the membership information validation part of the + * handshake + */ + verifyMembership(); + + checkClockSkew(); + + LoggerUtils.info(logger, repImpl, + "Replica-feeder " + feederNameIdPair.getName() + + " handshake completed."); + return protocol; + } + + /** + * Checks software and log version compatibility. + */ + private void verifyVersions() + throws IOException { + + protocol.write(protocol.new + ReplicaJEVersions(getCurrentJEVersion(), + getCurrentLogVersion()), + namedChannel); + Message message = protocol.read(namedChannel); + if (message instanceof JEVersionsReject) { + /* The software version is not compatible with the Feeder. */ + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + " Feeder: " + feederNameIdPair + ". " + + ((JEVersionsReject) message).getErrorMessage()); + } + + /* + * Save the version information in case we want to use it as the basis + * for further compatibility checking in future. + */ + feederJEVersions = (FeederJEVersions) message; + + if (feederJEVersions.getLogVersion() > getCurrentLogVersion()) { + throw new EnvironmentFailureException( + repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + " Feeder: " + feederNameIdPair + ". " + + "Feeder log version " + feederJEVersions.getLogVersion() + + " is not known to the replica, whose current log version is " + + getCurrentLogVersion()); + } + } + + /** + * Exchange membership information messages. + */ + private void verifyMembership() + throws IOException { + + DbConfigManager configManager = repImpl.getConfigManager(); + String groupName = configManager.get(GROUP_NAME); + + Message message = protocol.new + NodeGroupInfo(groupName, + repGroup.getUUID(), + replicaNameIdPair, + repImpl.getHostName(), + repImpl.getPort(), + nodeType, + repImpl.isDesignatedPrimary(), + getCurrentJEVersion()); + protocol.write(message, namedChannel); + + message = protocol.read(namedChannel); + + if (message instanceof NodeGroupInfoReject) { + NodeGroupInfoReject reject = (NodeGroupInfoReject) message; + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + " Feeder: " + feederNameIdPair + ". " + + reject.getErrorMessage()); + } + + if (!(message instanceof NodeGroupInfoOK)) { + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + " Feeder: " + feederNameIdPair + ". " + + "Protocol error. Unexpected response " + message); + } + final NodeGroupInfoOK nodeGroupInfoOK = (NodeGroupInfoOK) message; + if (repGroup.hasUnknownUUID()) { + /* Correct the initial UUID */ + repGroup.setUUID(nodeGroupInfoOK.getUUID()); + } + if (nodeType.hasTransientId()) { + /* Update the transient node's ID */ + replicaNameIdPair.update(nodeGroupInfoOK.getNameIdPair()); + } + } + + /** + * Checks for clock skew wrt the current feeder. It's important that the + * clock skew be within an acceptable range so that replica can meet any + * time based consistency requirements requested by transactions. The + * intent of this check is to draw the attention of the application or the + * administrators to the skew, not correct it. + *

        + * The scheme implemented below is a variation on the scheme used by SNTP protocol. The Feeder + * plays the role of the SNTP server and the replica the role of the client + * in this situation. The mechanism used here is rough and does not + * guarantee the detection of a clock skew, especially since it's a one + * time check done each time a connection is re-established with the + * Feeder. The clocks could be in sync at the time of this check and drift + * apart over the lifetime of the connection. It's also for this reason + * that we do not store the skew value and make compensations using it when + * determining replica consistency. + *

        + * Replications nodes should therefore ensure that they are using NTP or a + * similar time synchronization service to keep time on all the replication + * nodes in a group in sync. + *

        + * + * @throws IOException + * @throws EnvironmentFailureException + * @throws ProtocolException + */ + private void checkClockSkew() + throws IOException, + ProtocolException { + + boolean isLast = false; + int sampleCount = 0; + do { + if (checkClockSkew) { + /* Iterate until we have a value that's good enough. */ + isLast = (++sampleCount >= CLOCK_SKEW_MAX_SAMPLE_SIZE) || + (clockDelay <= CLOCK_SKEW_MIN_DELAY_MS); + } else { + isLast = true; + } + + protocol.write(protocol.new SNTPRequest(isLast), namedChannel); + SNTPResponse response = protocol.read(namedChannel, + SNTPResponse.class); + if (response.getDelay() < clockDelay) { + clockDelay = response.getDelay(); + clockDelta = response.getDelta(); + } + + } while (!isLast); + + if (!checkClockSkew) { + return; + } + + LoggerUtils.logMsg + (logger, repImpl, + (Math.abs(clockDelta) >= maxClockDelta) ? + Level.SEVERE : Level.FINE, + "Round trip delay: " + clockDelay + " ms. " + "Clock delta: " + + clockDelta + " ms. " + "Max permissible delta: " + + maxClockDelta + " ms."); + + if (Math.abs(clockDelta) >= maxClockDelta) { + throw new EnvironmentFailureException + (repImpl, + EnvironmentFailureReason.HANDSHAKE_ERROR, + "Clock delta: " + clockDelta + " ms. " + + "between Feeder: " + feederNameIdPair.getName() + + " and this Replica exceeds max permissible delta: " + + maxClockDelta + " ms."); + } + } +} diff --git a/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshakeConfig.java b/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshakeConfig.java new file mode 100644 index 0000000..8db6f0c --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ReplicaFeederHandshakeConfig.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.RepUtils.Clock; + +public interface ReplicaFeederHandshakeConfig { + + /** + * Gets the RepImpl. + * @return RepImpl + */ + public RepImpl getRepImpl(); + + /** + * Gets the nodes NameIdPair. + * @return NameIdPair + */ + public NameIdPair getNameIdPair(); + + /** + * Gets the clock. + * @return Clock + */ + public Clock getClock(); + + /** + * Gets the NodeType. + * @return NodeType + */ + public NodeType getNodeType(); + + /** + * Gets the RepGroupImpl. + * @return RepGroupImpl + */ + public RepGroupImpl getGroup(); + + /** + * Gets the NamedChannel. + * @return NamedChannel + */ + public NamedChannel getNamedChannel(); +} diff --git a/src/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java b/src/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java new file mode 100644 index 0000000..7f7a399 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java @@ -0,0 +1,830 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileSet; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.RollbackException; +import com.sleepycat.je.rep.RollbackProhibitedException; +import com.sleepycat.je.rep.SyncupProgress; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.Replay; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.rep.impl.node.Replica.HardRecoveryElectionException; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNTracker; +import com.sleepycat.je.rep.stream.BaseProtocol.AlternateMatchpoint; +import com.sleepycat.je.rep.stream.BaseProtocol.Entry; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryNotFound; +import com.sleepycat.je.rep.stream.BaseProtocol.RestoreResponse; +import com.sleepycat.je.rep.stream.ReplicaSyncupReader.SkipGapException; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * Establish where the replication stream should start for a replica and feeder + * pair. The replica compares what is in its log with what the feeder has, to + * determine the latest common log entry matchpoint + * + * - If the replica has applied log entries after that matchpoint, roll them + * back + * - If a common matchpoint can't be found, the replica will need to do + * a network restore. + */ +public class ReplicaFeederSyncup { + + private final Logger logger; + + private final NamedChannel namedChannel; + private final Protocol protocol; + private final RepNode repNode; + private final VLSNIndex vlsnIndex; + private final Replay replay; + private final RepImpl repImpl; + private ReplicaSyncupReader backwardsReader; + + /* The VLSN, lsn and log entry at which a match was made. */ + private VLSN matchpointVLSN = NULL_VLSN; + private Long matchedVLSNTime = 0L; + + private final boolean hardRecoveryNeedsElection; + + /* + * searchResults are the bundled outputs from the backwards scan by the + * ReplicaSyncReader during its search for a matchpoint. + */ + private final MatchpointSearchResults searchResults; + + /** + * For unit tests only. + */ + private static TestHook globalSyncupEndHook; + private final TestHook syncupEndHook; + private static com.sleepycat.je.utilint.TestHook + rollbackHook; + + public ReplicaFeederSyncup(RepNode repNode, + Replay replay, + NamedChannel namedChannel, + Protocol protocol, + boolean hardRecoveryNeedsElection) { + this.replay = replay; + logger = LoggerUtils.getLogger(getClass()); + this.repNode = repNode; + this.vlsnIndex = repNode.getVLSNIndex(); + this.namedChannel = namedChannel; + this.protocol = protocol; + this.repImpl = repNode.getRepImpl(); + this.hardRecoveryNeedsElection = hardRecoveryNeedsElection; + searchResults = new MatchpointSearchResults(repNode.getRepImpl()); + syncupEndHook = repNode.replica().getReplicaFeederSyncupHook(); + } + + public long getMatchedVLSNTime() { + return matchedVLSNTime; + } + + public VLSN getMatchedVLSN() { + return matchpointVLSN; + } + + /** + * The replica's side of the protocol. + * @throws InterruptedException + * @throws InsufficientLogException + * @throws HardRecoveryElectionException + */ + public void execute(LocalCBVLSNTracker cbvlsnTracker) + throws IOException, + DatabaseException, + InterruptedException, + InsufficientLogException, + HardRecoveryElectionException { + + final long startTime = System.currentTimeMillis(); + String feederName = namedChannel.getNameIdPair().getName(); + LoggerUtils.info(logger, repImpl, + "Replica-feeder " + feederName + + " syncup started. Replica range: " + + repNode.getVLSNIndex().getRange()); + + /* + * Prevent the VLSNIndex range from being changed and protect all files + * in the range. To search the index and read files within this range + * safely, VLSNIndex.getRange must be called after syncupStarted. + */ + final ProtectedFileSet protectedFileSet = + repNode.syncupStarted(namedChannel.getNameIdPair()); + + try { + + /* + * Find a replication stream matchpoint and a place to start + * the replication stream. If the feeder cannot service this + * protocol because it has run out of replication stream, + * findMatchpoint will throw a InsufficientLogException. If the + */ + VLSNRange range = vlsnIndex.getRange(); + findMatchpoint(range); + + /* + * If we can't rollback to the found matchpoint, verifyRollback + * will throw the appropriate exception. + */ + verifyRollback(range); + + replay.rollback(matchpointVLSN, searchResults.getMatchpointLSN()); + + /* Update the vlsnIndex, it will commit synchronously. */ + VLSN startVLSN = matchpointVLSN.getNext(); + vlsnIndex.truncateFromTail(startVLSN, + searchResults.getMatchpointLSN()); + + protocol.write(protocol.new + StartStream(startVLSN, repImpl.getFeederFilter()), + namedChannel); + LoggerUtils.info(logger, repImpl, + "Replica-feeder " + feederName + + " start stream at VLSN: " + startVLSN); + + /* + * If the GlobalCBVLSN is not defunct, we initialize this node's + * local CBVLSN while the entire VLSN range is protected. The + * idea is to hang onto the vlsn at the matchpoint -- don't + * let that be cleaned, because it may be of use for other replicas + * who need to sync up. Right now, this seems to be the best + * matchpoint in the group. + */ + cbvlsnTracker.registerMatchpoint(matchpointVLSN); + } finally { + + /* For unit test support only. */ + assert runHook(); + + repNode.syncupEnded(protectedFileSet); + + LoggerUtils.info + (logger, repImpl, + String.format + ("Replica-feeder " + feederName + + " syncup ended. Elapsed time: %,dms", + (System.currentTimeMillis() - startTime))); + + repImpl.setSyncupProgress(SyncupProgress.END); + } + } + + /** + * A matchpoint has been found. What happens next depends on the position + * of the matchpoint and its relationship to the last transaction end + * record. + * + * In following table, + * M = some non-null matchpoint VLSN value, + * T = some non-null last txn end value + * S = some non-null last sync value + * + * Note that currently T == S, since a sync points is always a txn end. + * + * txn end last sync found action + * VLSN VLSN matchpoint + * ---------- --------- --------- ------------------------ + * NULL_VLSN NULL_VLSN NULL_VLSN rollback everything + * NULL_VLSN NULL_VLSN M can't occur + * NULL_VLSN S NULL_VLSN rollback everything + * NULL_VLSN S M rollback to M + * T NULL_VLSN NULL_VLSN can't occur + * T NULL_VLSN M can't occur + * T S NULL_VLSN network restore, though + * could also do hard recov + * T <= M S M rollback to matchpoint + * T > M, truncate not ok S M network restore + * T > M, rollback limit + * exceeded or S M throw RollbackProhibited + * disabled + * T > M, truncate ok S M hard recovery + * + * @throws IOException + * @throws HardRecoveryElectionException + */ + private void verifyRollback(VLSNRange range) + throws RollbackException, InsufficientLogException, + HardRecoveryElectionException, IOException { + repImpl.setSyncupProgress(SyncupProgress.CHECK_FOR_ROLLBACK); + VLSN lastTxnEnd = range.getLastTxnEnd(); + VLSN lastSync = range.getLastSync(); + + LoggerUtils.finest(logger, repImpl, "verify rollback" + + " vlsn range=" + range + + " searchResults=" + searchResults); + + /* Test a rollback exception is thrown when sync up */ + TestHookExecute.doHookIfSet(rollbackHook, this); + + /* + * If the lastTxnEnd VLSN is null, we don't have to worry about hard + * recovery. See truth table above. + */ + if (lastTxnEnd.isNull()) { + if (range.getLastSync().isNull() && !matchpointVLSN.isNull()) { + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), "Shouldn't be possible to find a "+ + "matchpoint of " + matchpointVLSN + + " when the sync VLSN is null. Range=" + range); + } + + /* We'll be doing a normal rollback. */ + LoggerUtils.fine(logger, repImpl, "normal rollback, no txn end"); + return; + } + + if (lastSync.isNull()) { + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), + "Shouldn't be possible to have a null sync VLSN when the " + + " lastTxnVLSN " + lastTxnEnd + " is not null. Range=" + + range); + } + + /* + * There is a non-null lastTxnEnd VLSN, so check if the found + * matchpoint precedes it. If it doesn't, we can't rollback. + */ + if (matchpointVLSN.isNull()) { + + /* + * We could actually also try to do a hard recovery and truncate + * all committed txns, but for now, let's assume that it will cost + * less to copy the log files. + */ + LoggerUtils.info(logger, repImpl, + "This node had a txn end at vlsn = " + lastTxnEnd + + "but no matchpoint found."); + throw setupLogRefresh(matchpointVLSN); + } + + /* + * The matchpoint is after or equal to the last txn end, no problem + * with doing a normal rollback. + */ + if ((lastTxnEnd.compareTo(matchpointVLSN) <= 0) && + (searchResults.getNumPassedCommits() == 0)) { + LoggerUtils.fine(logger, repImpl, "txn end vlsn of " + lastTxnEnd + + "<= matchpointVLSN of " + matchpointVLSN + + ", normal rollback"); + return; + } + + /* Rolling back past a commit or abort. */ + + if (hardRecoveryNeedsElection) { + throw new Replica.HardRecoveryElectionException + (repNode.getMasterStatus().getNodeMasterNameId(), + lastTxnEnd, matchpointVLSN); + } + + /* + * We're planning on rolling back past a commit or abort. The more + * optimal course of action is to truncate the log and run a hard + * recovery, but if the matchpoint precedes a checkpoint that deleted + * log files, the truncation is not permissible because the resulting + * log might be missing needed files. Instead, we have to do a network + * restore. + * + * A checkpoint that resulted in deleted log files will have flushed + * references to entries migrated from the deleted file, and the + * original entries might still be referenced if we truncate the log + * prior to the CkptEnd. For example: + * + * 3/100 LN-A + * 4/100 BIN parent of LN-1 at 3/100 + * 5/100 cleaning of file 3 begins + * 5/200 LN-A migrated from 3/100, parent BIN is dirtied + * 5/300 CkptStart + * 5/400 matchpoint + * 5/500 BIN parent of LN-A at 5/200 + * 5/600 CkptEnd, after which file 3 is deleted + * + * If we truncate the log at the matchpoint, the BIN at 5/500 is + * truncated. The BIN at 4/100 will be used, which refers to an LN in a + * now deleted file. Note that the cleaner guarantees that a checkpoint + * occurs after migration and before file deletion. + * + * However, because files become reserved after a checkpoint and are + * not deleted immediately, we could trucate at a matchpoint that + * precedes such a checkpoint, when these reserved files have not yet + * been deleted. In the future we could enable such a feature by + * keeping track (persistently) of the earliest point at which the log + * can be truncated, taking into account the deletion of reserved + * files. Then we could use this information to validate a matchpoint. + */ + if (searchResults.getPassedCheckpointEnd()) { + LoggerUtils.info(logger, repImpl, "matchpointVLSN of " + + matchpointVLSN + " precedes a checkpoint end, " + + "needs network restore."); + throw setupLogRefresh(matchpointVLSN); + } + + /* + * Likewise, if we skipped over a gap in the log files, we can't be + * sure if we passed a ckpt with deleted log files. Do a network + * restore rather than a hard recovery. + */ + if (searchResults.getSkippedGap()) { + LoggerUtils.info(logger, repImpl, "matchpointVLSN of " + + matchpointVLSN + " was found in a replica log " + + "with gaps. Since we can't be sure if it " + + "precedes a checkpoint end, do network restore."); + throw setupLogRefresh(matchpointVLSN); + } + + /* + * We're planning on rolling back past a commit or abort, and we know + * that we have not passed a barrier checkpoint. See if we have + * exceeded the number of rolledback commits limit. + */ + EnvironmentImpl envImpl = repNode.getRepImpl(); + DbConfigManager configMgr = envImpl.getConfigManager(); + final int rollbackTxnLimit = + configMgr.getInt(RepParams.TXN_ROLLBACK_LIMIT); + final boolean rollbackDisabled = + configMgr.getBoolean(RepParams.TXN_ROLLBACK_DISABLED); + + final int numPassedDurableCommits = + searchResults.getNumPassedDurableCommits(); + final int numPassedCommits = + searchResults.getNumPassedCommits(); + final long dtvlsn = searchResults.getDTVLSN().getSequence(); + LoggerUtils.info(logger, repImpl, + String.format("Rollback info. " + + "Number of passed commits:%,d. " + + "(durable commits:%,d). " + + "Durable commit VLSN:%,d " + + "Rollback transaction limit:%,d", + numPassedCommits, + numPassedDurableCommits, + dtvlsn, + rollbackTxnLimit)); + + if (numPassedDurableCommits > rollbackTxnLimit || rollbackDisabled) { + + LoggerUtils.severe(logger, repImpl, + "Limited list of transactions that would " + + " be truncated for hard recovery:\n" + + searchResults.dumpPassedTxns()); + + throw new RollbackProhibitedException(repNode.getRepImpl(), + rollbackTxnLimit, + rollbackDisabled, + matchpointVLSN, + searchResults); + } + + /* + * After passing all the earlier qualifications, do a truncation and + * hard recovery. + */ + throw setupHardRecovery(); + } + + /** + * Find a matchpoint, which is a log entry in the replication stream which + * is the same on feeder and replica. Assign the matchpointVLSN field. The + * matchpoint log entry must be be tagged with an environment id. If no + * matching entry is found, the matchpoint is effectively the NULL_VLSN. + * + * To determine the matchpoint, exchange messages with the feeder and + * compare log entries. If the feeder does not have enough log entries, + * throw InsufficientLogException. + * @throws InsufficientLogException + */ + private void findMatchpoint(VLSNRange range) + throws IOException, + InsufficientLogException { + + int matchCounter = 0; + repImpl.setSyncupProgress(SyncupProgress.FIND_MATCHPOINT, + matchCounter++, -1); + VLSN candidateMatchpoint = range.getLastSync(); + if (candidateMatchpoint.equals(NULL_VLSN)) { + + /* + * If the replica has no sync-able log entries at all, the + * matchpoint is the NULL_VLSN, and we should start the replication + * stream at VLSN 1. Check if the feeder has the VLSN 1. If it + * doesn't, getFeederRecord() will throw a + * InsufficientLogException. We can assume that a non-cleaned + * feeder always has VLSN 1, because a ReplicatedEnvironment always + * creates a few replicated vlsns, such as the name db, at + * initial startup. + */ + getFeederRecord(range, VLSN.FIRST_VLSN, + false /*acceptAlternative*/); + return; + } + + /* + * CandidateMatchpoint is not null, so ask the feeder for the log + * record at that vlsn. + */ + InputWireRecord feederRecord = + getFeederRecord(range, candidateMatchpoint, + true /*acceptAlternative*/); + + /* + * The feeder may have suggested an alternative matchpoint, so reset + * candidate matchpoint. + */ + candidateMatchpoint = feederRecord.getVLSN(); + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, repImpl, + "first candidate matchpoint: " + + candidateMatchpoint); + } + /* + * Start comparing feeder records to replica records. Instead of using + * the VLSNIndex to direct our search, we must scan from the end of the + * log, recording entries that have an impact on our ability to + * rollback, like checkpoints. + * + * Start by finding the candidate matchpoint in the Replica. + */ + backwardsReader = setupBackwardsReader + (candidateMatchpoint, + repNode.getRepImpl().getFileManager().getLastUsedLsn()); + OutputWireRecord replicaRecord = getReplicaRecord(candidateMatchpoint); + + while (!replicaRecord.match(feederRecord)) { + repImpl.setSyncupProgress(SyncupProgress.FIND_MATCHPOINT, + matchCounter++, -1); + + /* + * That first bid didn't match, now just keep looking at all + * potential matchpoints. + */ + replicaRecord = scanMatchpointEntries(); + + if (replicaRecord == null) { + + /* + * The search for the previous sync log entry went past our + * available contiguous VLSN range, so there is no + * matchpoint. + */ + LoggerUtils.info(logger, repImpl, + "Looking at candidate matchpoint vlsn " + + candidateMatchpoint + + " but this node went past its available" + + " contiguous VLSN range, need network" + + " restore."); + throw setupLogRefresh(candidateMatchpoint); + } + + /* + * Ask the feeder for the record. If the feeder doesn't have + * it, we'll throw out and do a network restore. + */ + candidateMatchpoint = replicaRecord.getVLSN(); + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, repImpl, + "Next candidate matchpoint: " + + candidateMatchpoint); + } + feederRecord = getFeederRecord(range, candidateMatchpoint, + false); + } + + /* We've found the matchpoint. */ + matchedVLSNTime = replicaRecord.getTimeStamp(); + matchpointVLSN = candidateMatchpoint; + searchResults.setMatchpoint(backwardsReader.getLastLsn()); + LoggerUtils.finest(logger, repImpl, + "after setting matchpoint, searchResults=" + + searchResults); + } + + private ReplicaSyncupReader setupBackwardsReader(VLSN startScanVLSN, + long startScanLsn) { + + EnvironmentImpl envImpl = repNode.getRepImpl(); + int readBufferSize = envImpl.getConfigManager(). + getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + return new ReplicaSyncupReader + (envImpl, + repNode.getVLSNIndex(), + startScanLsn, + readBufferSize, + startScanVLSN, + DbLsn.makeLsn(vlsnIndex.getProtectedRangeStartFile(), 0), + searchResults); + } + + /** + * Search backwards for the replica's log record at this target VLSN. The + * target record is either the replica's first suggestion for a matchpoint, + * or feeder's counter offer. We have checked earlier that the counter + * offer is within the replica's vlsn range. + */ + private OutputWireRecord getReplicaRecord(VLSN candidateMatchpoint) { + + OutputWireRecord replicaRecord = null; + do { + try { + replicaRecord = + backwardsReader.scanBackwards(candidateMatchpoint); + + /* + * We're hunting for a VLSN that should be in the VLSN range, + * and it should exist. + */ + if (replicaRecord == null) { + throw EnvironmentFailureException.unexpectedState + (repImpl, + "Searching for candidate matchpoint " + + candidateMatchpoint + + " but got null record back "); + } + + /* We've found the record at candidateMatchpoint */ + return replicaRecord; + } catch (SkipGapException e) { + /* + * The ReplicaSyncupReader will throw a SkipGapException if it + * encounters a cleaned files gap in the log. There can be + * multiple gaps on its way toward finding the candidate + * vlsn. The ReplicaSyncupReader is obliged to traverse the + * log, in order to note checkpoints, rather than simply using + * the vlsn index. When a gap is detected, the vlsn on the left + * side of the gap is used to re-init a new reader. For + * example, suppose the log looks like this: + * + * file 100 has vlsns 41-50 + * file 200 has vlsns 51-60 + * file 300 has vlsns 61-70 + * + * and the candidate matchpoint is 45, the search will start at + * vlsn 70. + * t1: SkipGapException thrown at gap between file 200 & 300, + * create new reader positioned at vlsn 60 + * t2: SkipGapException thrown at gap between file 100 & 200, + * create new reader positioned at vlsn 50 + */ + VLSN gapRepositionVLSN = e.getVLSN(); + if (gapRepositionVLSN.compareTo(candidateMatchpoint) < 0) { + throw EnvironmentFailureException.unexpectedState + ("Gap reposition point of " + gapRepositionVLSN + + " should always be >= candidate matchpoint VLSN of " + + candidateMatchpoint); + } + + long startScanLsn = vlsnIndex.getGTELsn(gapRepositionVLSN); + backwardsReader = setupBackwardsReader(candidateMatchpoint, + startScanLsn); + /* + * If we skip a gap, there is a chance that we will have passed + * a checkpoint which had deleted log files. This has no impact + * if we are doing a soft rollback, but if we do a hard + * recovery, it would prevent us from truncating the log. It + * would require doing a network restore if we need to rollback + * committed txns. + */ + searchResults.noteSkippedGap(); + } + } while (true); + } + + /** + * Search backwards for potential matchpoints in the replica log, + * accounting for potential gaps. + */ + private OutputWireRecord scanMatchpointEntries() { + OutputWireRecord replicaRecord = null; + boolean firstAttempt = true; + do { + try { + /* + * The first time around, when firstAttempt is true, ask the + * reader to search for the vlsn before the currentVLSN, + * because we entered this method having searched to a given + * target matchpoint. All subsequent times, we are in search of + * the reader's currentVLSN, but haven't found it yet, because + * we hit a gap, so leave the currentVLSN alone. + */ + replicaRecord = + backwardsReader.findPrevSyncEntry(firstAttempt); + + /* + * Either se've found a possible matchpoint, or we've come to + * the end and the replicaRecord is null. One way or another, + * return the results of the scan. + */ + return replicaRecord; + } catch (SkipGapException e) { + /* + * The ReplicaSyncupReader will throw a SkipGapException if it + * encounters a cleaned files gap in the log. There can be + * multiple gaps on its way toward finding the next potential + * matchpoint. The ReplicaSyncupReader is obliged to traverse + * the log, in order to note checkpoints, rather than simply + * using the vlsn index. When a gap is detected, the vlsn on + * the left side of the gap is used to re-init a new + * reader. For example, suppose the log looks like this and the + * search starts at vlsn 70 + * + * file 100 has vlsns 51-60 + * file 200 has no vlsns + * file 300 has no vlsns + * file 400 has vlsns 61-70 + * + * SkipGapException thrown at gap between file 300 & 400, + * when the reader's currentVLSN is 60. Create a new reader, + * positioned at vlsn 60, skipping over files 200 and 300. + */ + + VLSN gapRepositionVLSN = e.getVLSN(); + backwardsReader = setupBackwardsReader + (gapRepositionVLSN, + vlsnIndex.getGTELsn(gapRepositionVLSN)); + firstAttempt = false; + searchResults.noteSkippedGap(); + } + } while(true); + } + + /** + * Ask the feeder for information to add to InsufficientLogException, + * and then throw the exception. + * + * The endVLSN marks the last VLSN that this node will want from + * the network restore. That information helps ensure that the restore + * source has enough vlsns to satisfy this replica. + * + * The replication node list identifies possible log provider members. + * @throws IOException + */ + private InsufficientLogException setupLogRefresh(VLSN failedMatchpoint) + throws IOException { + + protocol.write(protocol.new RestoreRequest(failedMatchpoint), + namedChannel); + RestoreResponse response = + (RestoreResponse) protocol.read(namedChannel); + + return new InsufficientLogException( + repNode, + new HashSet<>(Arrays.asList(response.getLogProviders()))); + } + + /** + * Hard recovery: truncate the files, repeat recovery. + * If this hard recovery came about before the ReplicatedEnvironment was + * fully instantiated, we will recreate the environment under the + * covers. If this came while the replica was up and supporting existing + * Environment handles, we must invalidate the environment, and ask the + * application to reopen. + */ + public RollbackException setupHardRecovery() + throws IOException { + + /* Creating the exception invalidates the environment. */ + RollbackException r = new RollbackException(repImpl, + matchpointVLSN, + searchResults); + LoggerUtils.severe(logger, repImpl, + "Limited list of transactions truncated for " + + "hard recovery:\n" + + searchResults.dumpPassedTxns()); + + /* + * Truncate after the environment is invalidated, which happens + * when we instantiate RollbackException. + */ + long matchpointLSN = searchResults.getMatchpointLSN(); + repImpl.getFileManager().truncateLog + (DbLsn.getFileNumber(matchpointLSN), + DbLsn.getFileOffset(matchpointLSN)); + + return r; + } + + /** + * Request a log entry from the feeder at this VLSN. The Feeder will only + * return the log record or say that it isn't available. + * + * @throws InsufficientLogException + */ + private InputWireRecord getFeederRecord(VLSNRange range, + VLSN requestVLSN, + boolean acceptAlternative) + throws IOException, InsufficientLogException { + + /* Ask the feeder for the matchpoint log record. */ + protocol.write(protocol.new EntryRequest(requestVLSN), namedChannel); + + /* + * Expect + * a) the requested log record + * b) message that says this feeder doesn't have RequestVLSN + * c) if acceptAlternative == true and the feeder didn't have + * requestVLSN, but had an earlier entry, the feeder may send an + * earlier, alternative matchpoint + */ + Message message = protocol.read(namedChannel); + if (message instanceof Entry) { + Entry entry = (Entry) message; + return entry.getWireRecord(); + } + + if (message instanceof EntryNotFound) { + LoggerUtils.info(logger, repImpl, "Requested " + requestVLSN + + " from " + namedChannel.getNameIdPair() + + " but that node did not have that vlsn."); + throw setupLogRefresh(requestVLSN); + } + + if ((acceptAlternative) && + (message instanceof AlternateMatchpoint)) { + + AlternateMatchpoint alt = (AlternateMatchpoint) message; + InputWireRecord feederRecord = alt.getAlternateWireRecord(); + VLSN altMatchpoint = feederRecord.getVLSN(); + if (range.getFirst().compareTo(altMatchpoint) > 0) { + + /* + * The feeder suggest a different matchpoint, but it's outside + * the replica's range. Give up and do a network restore. + */ + throw setupLogRefresh(altMatchpoint); + } + return feederRecord; + } + + throw EnvironmentFailureException.unexpectedState + (repNode.getRepImpl(), + "Sent EntryRequest, got unexpected response of " + message); + } + + + + public static void setGlobalSyncupEndHook(TestHook syncupEndHook) { + ReplicaFeederSyncup.globalSyncupEndHook = syncupEndHook; + } + + private boolean runHook() + throws InterruptedException { + + if (syncupEndHook != null) { + syncupEndHook.doHook(); + } + + if (globalSyncupEndHook != null) { + globalSyncupEndHook.doHook(); + } + return true; + } + + /** + * This interface is used instead of com.sleepycat.je.utilint.TestHook + * because the doHook method needs to throw InterruptedException. + */ + public interface TestHook { + public void doHook() throws InterruptedException; + } + + /* Setup the static rollback test hook, test use only */ + public static void setRollbackTestHook( + com.sleepycat.je.utilint.TestHook rollbackHook) { + ReplicaFeederSyncup.rollbackHook = rollbackHook; + } +} diff --git a/src/com/sleepycat/je/rep/stream/ReplicaSyncupReader.java b/src/com/sleepycat/je/rep/stream/ReplicaSyncupReader.java new file mode 100644 index 0000000..e13d395 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/ReplicaSyncupReader.java @@ -0,0 +1,421 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.utilint.DbLsn.NULL_LSN; + +import java.nio.ByteBuffer; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.recovery.CheckpointEnd; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * The ReplicaSyncupReader scans the log backwards for requested log entries. + * The reader must track whether it has passed a checkpoint, and therefore + * can not used the vlsn index to skip over entries. + * + * The ReplicaSyncupReader is not thread safe, and can only be used + * serially. It will stop at the finishLsn, which should be set using the + * GlobalCBVLSN. + */ +public class ReplicaSyncupReader extends VLSNReader { + + /* + * True if this particular record retrieval is for a syncable record. + * False if the reader is looking for a specific VLSN + */ + private boolean syncableSearch; + + private final LogEntry ckptEndLogEntry = + LogEntryType.LOG_CKPT_END.getNewLogEntry(); + + private final LogEntry commitLogEntry = + LogEntryType.LOG_TXN_COMMIT.getNewLogEntry(); + + private final LogEntry abortLogEntry = + LogEntryType.LOG_TXN_ABORT.getNewLogEntry(); + + /* + * SearchResults retains the information as to whether the found + * matchpoint is valid. + */ + private final MatchpointSearchResults searchResults; + + private final Logger logger1; + + private static TestHook fileGapHook; + + public ReplicaSyncupReader(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + long endOfLogLsn, + int readBufferSize, + VLSN startVLSN, + long finishLsn, + MatchpointSearchResults searchResults) + throws DatabaseException { + + /* + * If we go backwards, endOfFileLsn and startLsn must not be null. + * Make them the same, so we always start at the same very end. + */ + super(envImpl, + vlsnIndex, + false, // forward + endOfLogLsn, + readBufferSize, + finishLsn); + + initScan(startVLSN, endOfLogLsn); + this.searchResults = searchResults; + logger1 = LoggerUtils.getLogger(getClass()); + } + + /** + * Set up the ReplicaSyncupReader to start scanning from this VLSN. + */ + private void initScan(VLSN startVLSN, long endOfLogLsn) { + + if (startVLSN.equals(VLSN.NULL_VLSN)) { + throw EnvironmentFailureException.unexpectedState + ("ReplicaSyncupReader start can't be NULL_VLSN"); + } + + startLsn = endOfLogLsn; + assert startLsn != NULL_LSN; + + /* + * Flush the log so that syncup can assume that all log entries that + * are represented in the VLSNIndex are safely out of the log buffers + * and on disk. Simplifies this reader, so it can use the regular + * ReadWindow, which only works on a file. + */ + envImpl.getLogManager().flushNoSync(); + + window.initAtFileStart(startLsn); + currentEntryPrevOffset = window.getEndOffset(); + currentEntryOffset = window.getEndOffset(); + currentVLSN = startVLSN; + } + + /** + * Backward scanning for the replica's part in syncup. + */ + public OutputWireRecord scanBackwards(VLSN vlsn) + throws DatabaseException { + + syncableSearch = false; + VLSNRange range = vlsnIndex.getRange(); + if (vlsn.compareTo(range.getFirst()) < 0) { + /* + * The requested VLSN is before the start of our range, we don't + * have this record. + */ + return null; + } + + currentVLSN = vlsn; + + if (readNextEntry()) { + return currentFeedRecord; + } + + return null; + } + + /** + * Backward scanning for finding an earlier candidate syncup matchpoint. + */ + public OutputWireRecord findPrevSyncEntry(boolean startAtPrev) + throws DatabaseException { + + currentFeedRecord = null; + syncableSearch = true; + + if (startAtPrev) { + /* Start by looking at the entry before the current record. */ + currentVLSN = currentVLSN.getPrev(); + } else { + LoggerUtils.info(logger1, envImpl, + "Restart ReplicaSyncupReader at " + + "vlsn " + currentVLSN); + } + + VLSNRange range = vlsnIndex.getRange(); + if (currentVLSN.compareTo(range.getFirst()) < 0) { + + /* + * We've walked off the end of the contiguous VLSN range. + */ + return null; + } + + if (readNextEntry() == false) { + /* + * We scanned all the way to the front of the log, no + * other sync-able entry found. + */ + return null; + } + + assert LogEntryType.isSyncPoint(currentFeedRecord.getEntryType()) : + "Unexpected log type= " + currentFeedRecord; + + return currentFeedRecord; + } + + /** + * @throw an EnvironmentFailureException if we were scanning for a + * particular VLSN and we have passed it by. + */ + private void checkForPassingTarget(int compareResult) { + + if (compareResult < 0) { + /* Hey, we passed the VLSN we wanted. */ + throw EnvironmentFailureException.unexpectedState + ("want to read " + currentVLSN + " but reader at " + + currentEntryHeader.getVLSN()); + } + } + + /** + * Return true for ckpt entries, for syncable entries, and if we're in + * specific vlsn scan mode, any replicated entry. There is an additional + * level of filtering in processEntry. + */ + @Override + protected boolean isTargetEntry() + throws DatabaseException { + + if (logger1.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger1, envImpl, + " isTargetEntry " + currentEntryHeader); + } + + nScanned++; + + /* Skip invisible entries. */ + if (currentEntryHeader.isInvisible()) { + return false; + } + + byte currentType = currentEntryHeader.getType(); + + /* + * Return true if this entry is replicated. All entries need to be + * perused by processEntry, when we are doing a vlsn based search, + * even if they are not a sync point, because: + * (a) If this is a vlsn-based search, it's possible that the replica + * and feeder are mismatched. The feeder will only propose a sync type + * entry as a matchpoint but it might be that the replica has a non- + * sync entry at that vlsn. + * (b) We need to note passed commits in processEntry. + */ + if (entryIsReplicated()) { + if (syncableSearch) { + if (LogEntryType.isSyncPoint(currentType)) { + return true; + } + currentVLSN = currentEntryHeader.getVLSN().getPrev(); + } else { + return true; + } + } + + /* + * We'll also need to read checkpoint end records to record their + * presence. + */ + if (LogEntryType.LOG_CKPT_END.equalsType(currentType)) { + return true; + } + + return false; + } + + /** + * ProcessEntry does additional filtering before deciding whether to + * return an entry as a candidate for matching. + * + * If this is a record we are submitting as a matchpoint candidate, + * instantiate a WireRecord to house this log entry. If this is a + * non-replicated entry or a txn end that follows the candidate matchpoint, + * record whatever status we need to, but don't use it for comparisons. + * + * For example, suppose the log is like this: + * + * VLSN entry + * 10 LN + * 11 commit + * 12 LN + * -- ckpt end + * 13 commit + * 14 abort + * + * And that the master only has VLSNs 1-12. The replica will suggest vlsn + * 14 as the first matchpoint. The feeder will counter with a suggestion + * of vlsn 11, since it does not have vlsn 14. + * + * At that point, the ReplicaSyncupReader will scan backwards in the log, + * looking for vlsn 11. Although the reader should only return an entry + * when it gets to vlsn 11. The reader must process commits and ckpts that + * follow 11, so that they can be recorded in the searchResults, so the + * number of rolled back commits can be accurately reported. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + + if (logger1.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger1, envImpl, + " syncup reader saw " + currentEntryHeader); + } + byte currentType = currentEntryHeader.getType(); + + + /* + * CheckpointEnd entries are tracked in order to see if a rollback + * must be done, but are not returned as possible matchpoints. + */ + if (LogEntryType.LOG_CKPT_END.equalsType(currentType)) { + + /* + * Read the entry, which both lets us decipher its contents and + * also advances the file reader position. + */ + ckptEndLogEntry.readEntry(envImpl, currentEntryHeader, + entryBuffer); + + if (logger1.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger1, envImpl, + " syncup reader read " + + currentEntryHeader + ckptEndLogEntry); + } + + if (((CheckpointEnd) ckptEndLogEntry.getMainItem()). + getCleanedFilesToDelete()) { + searchResults.notePassedCheckpointEnd(); + } + + return false; + } + + /* + * Setup the log entry as a wire record so we can compare it to + * the entry from the feeder as we look for a matchpoint. Do this + * before we change positions on the entry buffer by reading it. + */ + ByteBuffer buffer = entryBuffer.slice(); + buffer.limit(currentEntryHeader.getItemSize()); + currentFeedRecord = + new OutputWireRecord(envImpl, currentEntryHeader, buffer); + + /* + * All commit records must be tracked to figure out if we've exceeded + * the txn rollback limit. For reporting reasons, we'll need to + * unmarshal the log entry, so we can read the timestamp in the commit + * record. + */ + if (LogEntryType.LOG_TXN_COMMIT.equalsType(currentType)) { + + commitLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + final TxnCommit commit = (TxnCommit) commitLogEntry.getMainItem(); + searchResults.notePassedCommits(commit, + currentEntryHeader.getVLSN(), + getLastLsn()); + + if (logger1.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger1, envImpl, + "syncup reader read " + + currentEntryHeader + commitLogEntry); + } + } else if (LogEntryType.LOG_TXN_ABORT.equalsType(currentType)) { + abortLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + final TxnAbort abort = (TxnAbort) abortLogEntry.getMainItem(); + + searchResults.notePassedAborts(abort, + currentEntryHeader.getVLSN()); + if (logger1.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger1, envImpl, + "syncup reader read " + + currentEntryHeader + abortLogEntry); + } + } else { + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + } + + if (syncableSearch) { + return true; + } + + /* We're looking for a particular VLSN. */ + int compareResult = currentEntryHeader.getVLSN().compareTo(currentVLSN); + checkForPassingTarget(compareResult); + + /* return true if this is the entry we want. */ + return (compareResult == 0); + } + + /** + * TBW + */ + @Override + protected void handleGapInBackwardsScan(long prevFileNum) { + SkipGapException e = new SkipGapException(window.currentFileNum(), + prevFileNum, + currentVLSN); + LoggerUtils.info(logger1, envImpl, e.getMessage()); + assert TestHookExecute.doHookIfSet(fileGapHook, prevFileNum); + throw e; + } + + /* + * An internal exception indicating that the reader must scan across a + * gap in the log files. The gap may have been created by cleaning. + */ + public static class SkipGapException extends DatabaseException { + + private static final long serialVersionUID = 1L; + private final VLSN currentVLSN; + public SkipGapException(long currentFileNum, + long nextFileNum, + VLSN currentVLSN) { + super("Restarting reader in order to read backwards across gap " + + "from file 0x" + Long.toHexString(currentFileNum) + + " to file 0x" + Long.toHexString(nextFileNum) + + ". Reset position to vlsn " + currentVLSN); + this.currentVLSN = currentVLSN; + } + + public VLSN getVLSN() { + return currentVLSN; + } + } + + public static void setFileGapHook(TestHook hook) { + fileGapHook = hook; + } +} diff --git a/src/com/sleepycat/je/rep/stream/SubscriberFeederSyncup.java b/src/com/sleepycat/je/rep/stream/SubscriberFeederSyncup.java new file mode 100644 index 0000000..f133e15 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/SubscriberFeederSyncup.java @@ -0,0 +1,245 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + + +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.BaseProtocol.AlternateMatchpoint; +import com.sleepycat.je.rep.stream.BaseProtocol.Entry; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryNotFound; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * Object to sync-up the Feeder and Subscriber to establish the VLSN from + * which subscriber should should start stream log entries from feeder. + */ +public class SubscriberFeederSyncup { + + private final Logger logger; + private final RepImpl repImpl; + private final NamedChannel namedChannel; + private final Protocol protocol; + private final FeederFilter filter; + private final EntryRequestType type; + + public SubscriberFeederSyncup(NamedChannel namedChannel, + Protocol protocol, + FeederFilter filter, + RepImpl repImpl, + EntryRequestType type, + Logger logger) { + this.namedChannel = namedChannel; + this.protocol = protocol; + this.filter = filter; + this.repImpl = repImpl; + this.type = type; + this.logger = logger; + } + + /** + * Execute sync-up to the Feeder. Request Feeder to start a replication + * stream from a start VLSN, if it is available. Otherwise return NULL + * VLSN to subscriber. + * + * @param reqVLSN VLSN requested by subscriber to stream log entries + * + * @return start VLSN from subscribe can stream log entries + * @throws InternalException if fail to execute syncup + */ + public VLSN execute(VLSN reqVLSN) throws InternalException { + + final long startTime = System.currentTimeMillis(); + + LoggerUtils.info(logger, repImpl, + "Subscriber-Feeder " + namedChannel.getNameIdPair() + + " syncup started."); + + try { + /* first query the start VLSN from feeder */ + final VLSN startVLSN = getStartVLSNFromFeeder(reqVLSN); + if (!startVLSN.equals(VLSN.NULL_VLSN)) { + LoggerUtils.info(logger, repImpl, + "Response from feeder " + + namedChannel.getNameIdPair() + + ": the start VLSN " + startVLSN + + ", the requested VLSN " + reqVLSN + + ", send startStream request with filter."); + + /* start streaming from feeder if valid start VLSN */ + protocol.write(protocol.new StartStream(startVLSN, filter), + namedChannel); + } else { + LoggerUtils.info(logger, repImpl, + "Unable to stream from Feeder " + + namedChannel.getNameIdPair() + + " from requested VLSN " + reqVLSN); + } + return startVLSN; + } catch (IllegalStateException | IOException e) { + throw new InternalException(e.getMessage()); + } finally { + LoggerUtils.info(logger, repImpl, + String.format("Subscriber to feeder " + + namedChannel.getNameIdPair() + + " sync-up done, elapsed time: %,dms", + System.currentTimeMillis() - + startTime)); + } + } + + /** + * Request a start VLSN from feeder. The feeder will return a valid + * start VLSN, which can be equal to or earlier than the request VLSN, + * or null if feeder is unable to service the requested VLSN. + * + * @param requestVLSN start VLSN requested by subscriber + * + * @return VLSN a valid start VLSN from feeder, or null if it unavailable + * at the feeder + * @throws IOException if unable to read message from channel + * @throws IllegalStateException if the feeder sends an unexpected message + */ + private VLSN getStartVLSNFromFeeder(VLSN requestVLSN) + throws IOException, IllegalStateException { + + LoggerUtils.fine(logger, repImpl, + "Subscriber send requested VLSN " + requestVLSN + + " to feeder " + namedChannel.getNameIdPair()); + + /* ask the feeder for the requested VLSN. */ + protocol.write(protocol.new EntryRequest(requestVLSN, type), + namedChannel); + + /* + * Expect the feeder to return one of following if type is + * EntryRequestType.DEFAULT: + * a) not_found if the requested VLSN is too low + * b) the requested VLSN if the requested VLSN is found + * c) the alt match VLSN if the requested VLSN is too high + * + * If type is EntryRequestType.AVAILABLE: + * a) the lowest available VLSN if the requested VLSN is too low + * b) the requested VLSN if the requested VLSN is found + * c) the highest available VLSN if the request VLSN is too high + + * If type is EntryRequestType.NOW: + * a) always returns highest available VLSN + */ + final Message message = protocol.read(namedChannel); + final VLSN vlsn; + if (message instanceof Entry) { + vlsn = ((Entry) message).getWireRecord().getVLSN(); + + /* must be exact match for the default type */ + if (type.equals(EntryRequestType.DEFAULT)) { + assert (vlsn.equals(requestVLSN)); + } + + /* dump traces */ + if (vlsn.equals(requestVLSN)) { + LoggerUtils.finest(logger, repImpl, + "Subscriber successfully requested VLSN " + + requestVLSN + " from feeder " + + namedChannel.getNameIdPair() + + ", request type: " + type); + } + + if (vlsn.compareTo(requestVLSN) < 0) { + LoggerUtils.finest(logger, repImpl, + "Requested VLSN " + requestVLSN + + " is not available from feeder " + + namedChannel.getNameIdPair() + + " instead, start stream from a lowest " + + "available VLSN " + vlsn + + ", request type: " + type); + } + + if (vlsn.compareTo(requestVLSN) > 0) { + if (type.equals(EntryRequestType.NOW)) { + LoggerUtils.finest(logger, repImpl, + "Stream from highest available vlsn " + + "from feeder " + + namedChannel.getNameIdPair() + ":" + + vlsn + ", request type: " + type); + } else { + LoggerUtils.finest(logger, repImpl, + "Requested VLSN " + requestVLSN + + " is not available from feeder " + + namedChannel.getNameIdPair() + + " instead, start stream from a highest" + + " available VLSN " + vlsn + + ", request type: " + type); + } + } + + } else if (message instanceof AlternateMatchpoint) { + /* now and available type should not see alter match point */ + if (type.equals(EntryRequestType.NOW) || + type.equals(EntryRequestType.AVAILABLE)) { + String msg = "Receive unexpected response " + message + + "from feeder " + namedChannel.getNameIdPair() + + ", request type: " + type; + LoggerUtils.warning(logger, repImpl, msg); + throw new IllegalStateException(msg); + } + + vlsn = ((AlternateMatchpoint) message).getAlternateWireRecord() + .getVLSN(); + /* must be an earlier VLSN */ + assert (vlsn.compareTo(requestVLSN) < 0); + LoggerUtils.finest(logger, repImpl, + "Feeder " + namedChannel.getNameIdPair() + + " returns a valid start VLSN" + vlsn + + " but earlier than requested one " + + requestVLSN + ", request type: " + type); + + } else if (message instanceof EntryNotFound) { + /* now and available type should not see not found */ + if (type.equals(EntryRequestType.NOW) || + type.equals(EntryRequestType.AVAILABLE)) { + /* + * even for a brand new environment, the VLSN range at feeder + * is not empty so we should not see entry not found + */ + String msg = "Receive unexpected response " + message + + "from feeder " + namedChannel.getNameIdPair() + + ", request type: " + type; + LoggerUtils.warning(logger, repImpl, msg); + throw new IllegalStateException(msg); + } + + vlsn = VLSN.NULL_VLSN; + LoggerUtils.finest(logger, repImpl, + "Feeder " + namedChannel.getNameIdPair() + + " is unable to service the request vlsn " + + requestVLSN + ", request type: " + type); + } else { + /* unexpected response from feeder */ + String msg = "Receive unexpected response " + message + + "from feeder " + namedChannel.getNameIdPair() + + ", request type: " + type; + LoggerUtils.warning(logger, repImpl, msg); + throw new IllegalStateException(msg); + } + + return vlsn; + } +} diff --git a/src/com/sleepycat/je/rep/stream/VLSNReader.java b/src/com/sleepycat/je/rep/stream/VLSNReader.java new file mode 100644 index 0000000..812da88 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/VLSNReader.java @@ -0,0 +1,162 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.stream; + +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.FileReader; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * The VLSNReader returns picks out replicated log entries from the log. It + * works in tandem with the VLSNIndex, using vlsn->lsn mappings if those are + * available, and otherwise scanning the log for replicated entries. + * + * A VLSNReader is not thread safe, and can only be used serially. + */ +abstract class VLSNReader extends FileReader { + + final VLSNIndex vlsnIndex; + + /* + * currentVLSN is the target VLSN that the reader is looking for. It is + * set if the reader is scanning vlsn sequentially, either forwards or + * backwards. + */ + VLSN currentVLSN; + + /* + * The current log entry that the reader is positioned at, in wire record + * format. + */ + OutputWireRecord currentFeedRecord; + + /* + * True if the reader has been positioned at a point in the file. Forward + * scanning readers may read from either the log buffer or the log files. + * It's preferable to read first from the log buffers, in case the log + * entry is cached (or the log entry hasn't been written to disk). + */ + + /* stats */ + long nScanned; // Num log entries seen by the reader + long nReposition; // Number of times the reader has used a vlsn->lsn + // mapping and have reset the read window + + VLSNReader(EnvironmentImpl envImpl, + VLSNIndex vlsnIndex, + boolean forward, + long startLsn, + int readBufferSize, + long finishLsn) + throws DatabaseException { + + /* + * If we go backwards, endOfFileLsn and startLsn must not be null. + * Make them the same, so we always start at the same very end. + */ + super(envImpl, + readBufferSize, + forward, + startLsn, + null, // singleFileNumber + startLsn, // endOfFileLsn + finishLsn); + + this.vlsnIndex = vlsnIndex; + currentVLSN = VLSN.NULL_VLSN; + } + + void setPosition(long startLsn) + throws ChecksumException, FileNotFoundException, DatabaseException { + + if (startLsn == DbLsn.NULL_LSN) { + return; + } + + /* + * An assertion: a reposition should never make the reader lose ground. + */ + if (forward) { + if (DbLsn.compareTo(getLastLsn(), startLsn) > 0) { + throw EnvironmentFailureException.unexpectedState + ("Feeder forward scanning should not be repositioned to " + + " a position earlier than the current position. Current" + + " lsn = " + DbLsn.getNoFormatString(getLastLsn()) + + " reposition = " + DbLsn.getNoFormatString(startLsn)); + } + } else { + if (DbLsn.compareTo(getLastLsn(), startLsn) < 0) { + throw EnvironmentFailureException.unexpectedState + ("Feeder backward scanning should not be repositioned to " + + " a position later than the current position. Current" + + " lsn = " + DbLsn.getNoFormatString(getLastLsn()) + + " reposition = " + DbLsn.getNoFormatString(startLsn)); + } + } + + long fileNum = DbLsn.getFileNumber(startLsn); + long offset = DbLsn.getFileOffset(startLsn); + + if (window.containsLsn(fileNum, offset)) { + window.positionBuffer(offset); + } else { + window.slideAndFill(fileNum, offset, offset, forward); + } + + if (forward) { + nextEntryOffset = offset; + } else { + currentEntryPrevOffset = offset; + } + nReposition++; + } + + /** + * Instantiate a WireRecord to house this log entry. + */ + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + ByteBuffer buffer = entryBuffer.slice(); + buffer.limit(currentEntryHeader.getItemSize()); + currentFeedRecord = + new OutputWireRecord(envImpl, currentEntryHeader, buffer); + + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } + + /* For unittests */ + long getNReposition() { + return nReposition; + } + + /* For unit tests. */ + long getNScanned() { + return nScanned; + } + + /* For unit tests. */ + void resetStats() { + nReposition = 0; + nScanned = 0; + } +} diff --git a/src/com/sleepycat/je/rep/stream/WireRecord.java b/src/com/sleepycat/je/rep/stream/WireRecord.java new file mode 100644 index 0000000..4a4fa03 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/WireRecord.java @@ -0,0 +1,69 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LogEntry; + +/** + * Format for log entries sent across the wire for replication. Instead of + * sending a direct copy of the log entry as it is stored on the JE log files + * (LogEntryHeader + LogEntry), select parts of the header are sent. + * + * @see InputWireRecord + * @see OutputWireRecord + */ +abstract class WireRecord { + + final LogEntryHeader header; + + WireRecord(final LogEntryHeader header) { + this.header = header; + } + + /** + * Returns the log entry type for this record. + */ + LogEntryType getLogEntryType() + throws DatabaseException { + + final LogEntryType type = LogEntryType.findType(header.getType()); + if (type == null) { + throw EnvironmentFailureException.unexpectedState( + "Unknown header type:" + header.getType()); + } + return type; + } + + /** + * Instantiates the log entry for this wire record using the specified + * environment and data. + */ + LogEntry instantiateEntry(final EnvironmentImpl envImpl, + final ByteBuffer buffer) + throws DatabaseException { + + final LogEntry entry = getLogEntryType().getNewLogEntry(); + buffer.mark(); + entry.readEntry(envImpl, header, buffer); + buffer.reset(); + return entry; + } +} diff --git a/src/com/sleepycat/je/rep/stream/package-info.java b/src/com/sleepycat/je/rep/stream/package-info.java new file mode 100644 index 0000000..21aa6d7 --- /dev/null +++ b/src/com/sleepycat/je/rep/stream/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Feeding and syncup (init) of rep stream from master node to + * other node types (replicas, etc). + */ +package com.sleepycat.je.rep.stream; diff --git a/src/com/sleepycat/je/rep/subscription/ClientAuthMethod.java b/src/com/sleepycat/je/rep/subscription/ClientAuthMethod.java new file mode 100644 index 0000000..e6dda9d --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/ClientAuthMethod.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceHandshake; + +/** + * Object represents a subscription authentication method used in service + * handshake at client side + */ +public class ClientAuthMethod implements ServiceHandshake.AuthenticationMethod { + + private final SubscriptionAuthHandler clientAuthHandler; + + ClientAuthMethod(SubscriptionAuthHandler clientAuthHandler) { + this.clientAuthHandler = clientAuthHandler; + } + + @Override + public String getMechanismName() { + return SubscriptionConfig.SERVICE_HANDSHAKE_AUTH_METHOD; + } + + @Override + public ServiceHandshake.ClientInitOp + getClientOp(ServiceHandshake.ClientHandshake initState, + String ignoredParams) { + return new ClientTokenOp(initState, clientAuthHandler); + } + + @Override + public ServiceHandshake.ServerInitOp + getServerOp(ServiceHandshake.ServerHandshake initState) { + return new ServerTokenOp(initState); + } + + @Override + public String getServerParams() { + return ""; + } + + /** + * Client side authentication + */ + static class ClientTokenOp extends ServiceHandshake.ClientInitOp { + + private final SubscriptionAuthHandler auth; + ClientTokenOp(ServiceHandshake.ClientHandshake initState, + SubscriptionAuthHandler auth) { + super(initState); + this.auth = auth; + } + + @Override + public ServiceHandshake.InitResult processOp( + ServiceHandshake.IOAdapter ioAdapter) throws IOException { + + final byte[] token = auth.getToken(); + if (token == null || token.length == 0) { + throw new IOException("Token cannot be null or empty"); + } + + /* write size of token */ + final ByteBuffer szBuf = ByteBuffer.allocate(4); + LogUtils.writeInt(szBuf, token.length); + ioAdapter.write(szBuf.array()); + /* write token */ + final ByteBuffer tokenBuf = ByteBuffer.allocate(token.length); + LogUtils.writeBytesNoLength(tokenBuf, token); + ioAdapter.write(tokenBuf.array()); + + final byte[] responseByte = new byte[1]; + final int result = ioAdapter.read(responseByte); + if (result < 0) { + throw new IOException( + "No service authenticate response byte: " + result); + } + final ServiceDispatcher.Response + response = ServiceDispatcher.Response.get(responseByte[0]); + if (response == null) { + throw new IOException("Unexpected read response byte: " + + responseByte[0]); + } + setResponse(response); + return ServiceHandshake.InitResult.DONE; + } + } + + /** + * Server side authentication, effectively no-op except rejecting + * handshake and it is not supposed to be called at server-side. + */ + class ServerTokenOp extends ServiceHandshake.ServerInitOp { + + ServerTokenOp(ServiceHandshake.ServerHandshake initState) { + super(initState); + } + + @Override + public ServiceHandshake.InitResult processOp(DataChannel channel) + throws IOException { + return ServiceHandshake.InitResult.FAIL; + } + } +} diff --git a/src/com/sleepycat/je/rep/subscription/ServerAuthMethod.java b/src/com/sleepycat/je/rep/subscription/ServerAuthMethod.java new file mode 100644 index 0000000..c3ad572 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/ServerAuthMethod.java @@ -0,0 +1,133 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceHandshake; + +/** + * Object represents a subscription authentication method used in service + * handshake at server side + */ +public class ServerAuthMethod implements ServiceHandshake.AuthenticationMethod { + + private final StreamAuthenticator serverAuth; + + public ServerAuthMethod(StreamAuthenticator serverAuth) { + this.serverAuth = serverAuth; + } + + @Override + public String getMechanismName() { + return SubscriptionConfig.SERVICE_HANDSHAKE_AUTH_METHOD; + } + + @Override + public ServiceHandshake.ClientInitOp getClientOp( + ServiceHandshake.ClientHandshake initState, String ignoredParams) { + return new ClientTokenOp(initState); + } + + @Override + public ServiceHandshake.ServerInitOp getServerOp( + ServiceHandshake.ServerHandshake initState) { + return new ServerTokenOp(initState, serverAuth); + } + + @Override + public String getServerParams() { + return ""; + } + + /** + * Server side authentication + */ + static class ServerTokenOp extends ServiceHandshake.ServerInitOp { + + /* start with tokenBuf length */ + private final static int BUFFER_TOKEN_SIZE = 4; + private final ByteBuffer + tokenSzBuf = ByteBuffer.allocate(BUFFER_TOKEN_SIZE); + private ByteBuffer tokenBuf = null; + private int tokenSz = 0; + + private final StreamAuthenticator auth; + ServerTokenOp(ServiceHandshake.ServerHandshake initState, + StreamAuthenticator auth) { + super(initState); + this.auth = auth; + } + + @Override + public ServiceHandshake.InitResult processOp(DataChannel channel) + throws IOException { + + ServiceHandshake.InitResult readResult; + + /* processOp() might be called multiple times? */ + if (tokenBuf == null) { + readResult = fillBuffer(channel, tokenSzBuf); + if (readResult != ServiceHandshake.InitResult.DONE) { + return readResult; + } + + /* allocate buffer for token */ + tokenSzBuf.flip(); + tokenSz = LogUtils.readInt(tokenSzBuf); + + if (tokenSz <= 0) { + /* just in case a client put a bad value here */ + return ServiceHandshake.InitResult.REJECT; + } + + tokenBuf = ByteBuffer.allocate(tokenSz); + } + + /* continue read token */ + readResult = fillBuffer(channel, tokenBuf); + if (readResult != ServiceHandshake.InitResult.DONE) { + return readResult; + } + + tokenBuf.flip(); + final byte[] token = LogUtils.readBytesNoLength(tokenBuf, tokenSz); + auth.setToken(token); + if (!auth.authenticate()) { + return ServiceHandshake.InitResult.REJECT; + } + return ServiceHandshake.InitResult.DONE; + } + } + + /** + * Client side authentication, effectively no-op except rejecting + * handshake and it is not supposed to be called at client-side. + */ + class ClientTokenOp extends ServiceHandshake.ClientInitOp { + + ClientTokenOp(ServiceHandshake.ClientHandshake initState) { + super(initState); + } + + @Override + public ServiceHandshake.InitResult processOp( + ServiceHandshake.IOAdapter ioAdapter) throws IOException { + return ServiceHandshake.InitResult.REJECT; + } + } +} diff --git a/src/com/sleepycat/je/rep/subscription/StreamAuthenticator.java b/src/com/sleepycat/je/rep/subscription/StreamAuthenticator.java new file mode 100644 index 0000000..335303d --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/StreamAuthenticator.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.subscription; + +/** + * Object represents an interface to authenticate stream consumer and check its + * access privilege. + */ +public interface StreamAuthenticator { + + /** + * Specifies the login token. + * + * @param token login token in bytes + */ + void setToken(byte[] token); + + /** + * Specifies the table Ids. The table Ids are passed from stream consumer + * as string form. Each of the table id strings uniquely identifies a + * subscribed table. + * + * @param tableIds set of subscribed table id strings + */ + void setTableIds(String[] tableIds); + + /** + * Returns whether the current token is valid. + * + * @return true if currently stored token is valid, false otherwise. + */ + boolean authenticate(); + + /** + * Returns whether the current token is valid and grants access to the + * current table Ids. + * + * @return true if owner of current token is valid and has enough + * privileges to stream updates from subscribed tables, false otherwise. + */ + boolean checkAccess(); + + /** + * Gets the time stamp of last check. Implementation of this interface + * shall remember the time stamp of each check, regardless of the check + * result. It shall return 0 if no previous check has been performed. The + * caller can determine if a security check has been performed in the + * last certain milliseconds by subtracting this value from the current + * time. + * + * @return the time stamp of last check in milliseconds, 0 if no previous + * check has been performed. + */ + long getLastCheckTimeMs(); +} diff --git a/src/com/sleepycat/je/rep/subscription/Subscription.java b/src/com/sleepycat/je/rep/subscription/Subscription.java new file mode 100644 index 0000000..f5913b5 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/Subscription.java @@ -0,0 +1,280 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.subscription; + +import java.io.File; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; + +/** + * Object to represent a subscription to receive and process replication + * streams from Feeder. It defines the public subscription APIs which can + * be called by clients. + */ +public class Subscription { + + /* configuration parameters */ + private final SubscriptionConfig configuration; + /* logger */ + private final Logger logger; + /* subscription dummy environment */ + private final ReplicatedEnvironment dummyRepEnv; + /* subscription statistics */ + private final SubscriptionStat statistics; + + /* main subscription thread */ + private SubscriptionThread subscriptionThread; + + /** + * Create an instance of subscription from configuration + * + * @param configuration configuration parameters + * @param logger logging handler + * + * @throws IllegalArgumentException if env directory does not exist + */ + public Subscription(SubscriptionConfig configuration, Logger logger) + throws IllegalArgumentException { + + this.configuration = configuration; + this.logger = logger; + + /* init environment and parameters */ + dummyRepEnv = createDummyRepEnv(configuration, logger); + subscriptionThread = null; + statistics = new SubscriptionStat(); + } + + /** + * Start subscription main thread, subscribe from the very first VLSN + * from the feeder. The subscriber will stay alive and consume all entries + * until it shuts down. + * + * @throws InsufficientLogException if feeder is unable to stream from + * start VLSN + * @throws GroupShutdownException if subscription receives group shutdown + * @throws InternalException if internal exception + * @throws TimeoutException if subscription initialization timeout + */ + public void start() + throws IllegalArgumentException, InsufficientLogException, + GroupShutdownException, InternalException, TimeoutException { + + start(VLSN.FIRST_VLSN); + } + + /** + * Start subscription main thread, subscribe from a specific VLSN + * from the feeder. The subscriber will stay alive and consume all entries + * until it shuts down. + * + * @param vlsn the start VLSN of subscription. It cannot be NULL_VLSN + * otherwise an IllegalArgumentException will be raised. + * + * @throws InsufficientLogException if feeder is unable to stream from + * start VLSN + * @throws GroupShutdownException if subscription receives group shutdown + * @throws InternalException if internal exception + * @throws TimeoutException if subscription initialization timeout + */ + public void start(VLSN vlsn) + throws IllegalArgumentException, InsufficientLogException, + GroupShutdownException, InternalException, TimeoutException { + + if (vlsn.equals(VLSN.NULL_VLSN)) { + throw new IllegalArgumentException("Start VLSN cannot be null"); + } + + subscriptionThread = + new SubscriptionThread(dummyRepEnv, vlsn, + configuration, statistics, + logger); + /* fire the subscription thread */ + subscriptionThread.start(); + + if (!waitForSubscriptionInitDone(subscriptionThread)) { + LoggerUtils.warning(logger, + RepInternal.getNonNullRepImpl(dummyRepEnv), + "Timeout in initialization, shut down " + + "subscription."); + shutdown(); + throw new TimeoutException("Subscription initialization timeout " + + "after " + + configuration.getPollTimeoutMs() + + " ms"); + } + + /* if not success, throw exception to caller */ + final Exception exp = subscriptionThread.getStoredException(); + switch (subscriptionThread.getStatus()) { + case SUCCESS: + break; + + case VLSN_NOT_AVAILABLE: + /* shutdown and close env before throw exception to client */ + shutdown(); + throw (InsufficientLogException) exp; + + case GRP_SHUTDOWN: + /* shutdown and close env before throw exception to client */ + shutdown(); + throw (GroupShutdownException) exp; + + case UNKNOWN_ERROR: + case CONNECTION_ERROR: + default: + /* shutdown and close env before throw exception to client */ + shutdown(); + throw new InternalException("internal exception from " + + "subscription thread, err:" + + exp.getMessage(), exp); + } + } + + /** + * Shutdown a subscription completely + */ + public void shutdown() { + if (subscriptionThread != null && subscriptionThread.isAlive()) { + subscriptionThread.shutdown(); + } + subscriptionThread = null; + + if (dummyRepEnv != null) { + final NodeType nodeType = configuration.getNodeType(); + if (nodeType.hasTransientId() && !dummyRepEnv.isClosed()) { + RepInternal.getNonNullRepImpl(dummyRepEnv) + .getNameIdPair() + .revertToNull(); + } + dummyRepEnv.close(); + logger.fine("Closed env " + dummyRepEnv.getNodeName() + + "(forget transient id? " + + nodeType.hasTransientId() + ")"); + } + } + + /** + * Get subscription thread status, if thread does not exit, + * return subscription not yet started. + * + * @return status of subscription + */ + public SubscriptionStatus getSubscriptionStatus() { + if (subscriptionThread == null) { + return SubscriptionStatus.INIT; + } else { + return subscriptionThread.getStatus(); + } + } + + /** + * Get subscription statistics + * + * @return statistics + */ + public SubscriptionStat getStatistics() { + return statistics; + } + + /** + * For unit test only + * + * @return dummy env + */ + ReplicatedEnvironment getDummyRepEnv() { + return dummyRepEnv; + } + + /** + * For unit test only + * + * @param testHook test hook + */ + void setExceptionHandlingTestHook(TestHook testHook) { + if (subscriptionThread != null) { + subscriptionThread.setExceptionHandlingTestHook(testHook); + } + } + + /** + * Create a dummy replicated env used by subscription. The dummy env will + * be used in the SubscriptionThread, SubscriptionProcessMessageThread and + * SubscriptionOutputThread to connect to feeder. + * + * @param conf subscription configuration + * @param logger logger + * @return a replicated environment + * @throws IllegalArgumentException if env directory does not exist + */ + private static ReplicatedEnvironment + createDummyRepEnv(SubscriptionConfig conf, Logger logger) + throws IllegalArgumentException { + + final ReplicatedEnvironment ret; + final File envHome = new File(conf.getSubscriberHome()); + if (!envHome.exists()) { + throw new IllegalArgumentException("Env directory " + + envHome.getAbsolutePath() + + " does not exist."); + } + + ret = + RepInternal.createInternalEnvHandle(envHome, + conf.createReplicationConfig(), + conf.createEnvConfig()); + + /* + * A safety check and clear id if necessary, to prevent env with + * existing id from failing the subscription + */ + final NameIdPair pair = RepInternal.getNonNullRepImpl(ret) + .getNameIdPair(); + if (conf.getNodeType().hasTransientId() && !pair.hasNullId()) { + logger.fine("Env has a non-null id, clear its id(name id: " + + pair + ")"); + pair.revertToNull(); + } + logger.fine("Env created with name id pair " + pair); + return ret; + } + + /** + * Wait for subscription thread to finish initialization + * + * @param t thread of subscription + * @return true if init done successfully, false if timeout + */ + private boolean waitForSubscriptionInitDone(final SubscriptionThread t) { + return new PollCondition(configuration.getPollIntervalMs(), + configuration.getPollTimeoutMs()) { + @Override + protected boolean condition() { + return t.getStatus() != SubscriptionStatus.INIT; + } + + }.await(); + } +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionAuthHandler.java b/src/com/sleepycat/je/rep/subscription/SubscriptionAuthHandler.java new file mode 100644 index 0000000..c881301 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionAuthHandler.java @@ -0,0 +1,48 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import com.sleepycat.je.rep.ReplicationSecurityException; + +/** + * Object represents an interface of subscriber authenticator, used by + * subscriber to track token expiration and refresh token proactively. + */ +public interface SubscriptionAuthHandler { + + /** + * Returns if subscriber has new token to update during subscription. + * Subscriber need to update an existing token when 1) token will + * expire soon and need to to be renewed, or 2) token is unable to be + * renewed further and re-authenticate is needed to get a new token. + * Implementation of the interface shall check the two conditions above + * and refresh token if necessary. + * + * @throws ReplicationSecurityException if implementation of the + * interface fails to renew or re-authenticate to get a new token + * + * @return true if the subscriber has a new token to update, false + * otherwise. + */ + boolean hasNewToken() throws ReplicationSecurityException; + + /** + * Returns the login token in bytes. It returns null if no identity + * information is available. + * + * @return login token as byte array, null if token is not available at + * the time of calling. + */ + byte[] getToken(); +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionCallback.java b/src/com/sleepycat/je/rep/subscription/SubscriptionCallback.java new file mode 100644 index 0000000..5f87447 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionCallback.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import com.sleepycat.je.utilint.VLSN; + +/** + * Interface of subscription callback function, to be implemented by clients to + * process each received subscription message. + */ +public interface SubscriptionCallback { + + /** + * Process a put (insert or update) entry from stream + * + * @param vlsn VLSN of the insert entry + * @param key key of the insert entry + * @param value value of the insert entry + * @param txnId id of txn the entry belongs to + */ + void processPut(VLSN vlsn, byte[] key, byte[] value, long txnId); + + /** + * Process a delete entry from stream + * + * @param vlsn VLSN of the delete entry + * @param key key of the delete entry + * @param txnId id of txn the entry belongs to + */ + void processDel(VLSN vlsn, byte[] key, long txnId); + + /** + * Process a commit entry from stream + * + * @param vlsn VLSN of commit entry + * @param txnId id of txn to commit + */ + void processCommit(VLSN vlsn, long txnId); + + /** + * Process an abort entry from stream + * + * @param vlsn VLSN of abort entry + * @param txnId id of txn to abort + */ + void processAbort(VLSN vlsn, long txnId); + + /** + * Process the exception from stream. + * + * @param exp exception raised in service and to be processed by + * client + */ + void processException(final Exception exp); +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionConfig.java b/src/com/sleepycat/je/rep/subscription/SubscriptionConfig.java new file mode 100644 index 0000000..4f63a73 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionConfig.java @@ -0,0 +1,803 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.subscription; + +import java.io.Serializable; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.DurationConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.config.IntConfigParam; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.stream.BaseProtocol; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.rep.utilint.ServiceHandshake; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.PropUtil; +import com.sleepycat.je.utilint.VLSN; + +/** + * Object to represent parameters to configure a subscription. + */ +public class SubscriptionConfig implements Cloneable { + + /*-----------------------------------*/ + /*- Constant Parameters -*/ + /*-----------------------------------*/ + + /* queue poll interval in millisecond, 1 second */ + public final static long QUEUE_POLL_INTERVAL_MS = 1000l; + + /* authentication method used in service handshake */ + public final static String SERVICE_HANDSHAKE_AUTH_METHOD = + "SubscriptionTokenAuth"; + + /* + * Default minimal HA protocol version to use subscription, version 6 + * is used by client like FTS in KV + */ + private final static int DEFAULT_MIN_PROTOCOL_VERSION = + BaseProtocol.VERSION_6; + + /* for quick response, no Nagle's algorithm */ + public final boolean TCP_NO_DELAY = true; + /* always blocking mode socket channel */ + public final boolean BLOCKING_MODE_CHANNEL = true; + /* always validate parameters */ + private final boolean validateParams = true; + + /*-----------------------------------*/ + /*- User-defined Parameters -*/ + /*-----------------------------------*/ + + /* local directory of subscriber */ + private final String subHome; + + /* + * identity of a subscription node. + * + * Subscription client need to create a globally unique node name, e.g., + * subscription- because the feeder maintains the identity of + * each connection, and would reject request from a client with a + * duplicate identity. + */ + private final String subNodeName; + + /* subscriber host and port */ + private final String subHostPortPair; + /* host where the feeder is running */ + private final String feederHostPortPair; + /* name of replication group */ + private final String groupName; + + /* authenticator, null if no authentication is needed */ + private final SubscriptionAuthHandler authenticator; + + /* + * Stream mode to specify how subscription should start, depending on + * the entry request type set by user. For details of each supported + * request type, please see {@link BaseProtocol.EntryRequestType}. + */ + private EntryRequestType streamMode; + + /* + * uuid of feeder replication group. + * + * This parameter is optional. It subscription client does not provide a + * group UUID, subscription would subscribe a feeder as long as the + * subscription group name matches that of the feeder. However, if + * subscription client does provide a valid group UUID, it has to match + * that of feeder, otherwise subscription request will be rejected. + */ + private UUID groupUUID; + + /* callback used in subscription */ + private SubscriptionCallback callBack; + /* filter passed to feeder */ + private FeederFilter feederFilter; + + /* home of a set of connection parameters */ + private Properties props; + + /* message queue size */ + private int inputMessageQueueSize; + private int outputMessageQueueSize; + + /* + * subscription node type, by default it is SECONDARY, e.g. used by FTS. + * however, user is able to override it with other types, e.g., EXTERNAL + * used in NoSQL Stream. As of now we only allow SECONDARY and EXTERNAL + * node to use subscription service. + */ + private NodeType nodeType = NodeType.SECONDARY; + + /* + * minimal required HA protocol version. If the minimal required version + * is higher than the default, client should set it in config. + */ + private int minProtocolVersion = DEFAULT_MIN_PROTOCOL_VERSION; + + /** + * authentication methods, null if no authentication is required + * + * TODO: at the time of writing, only token authentication is supported. + * In future we may support multiple authentication methods for + * Subscription client. + */ + private final ServiceHandshake.AuthenticationMethod authInfo; + + /** + * Create a subscription configuration + * + * @param subNodeName id of the subscription + * @param subHome home directory of subscriber + * @param subHostPortPair subscriber host and port + * @param feederHostPortPair feeder host and port + * @param groupName name of replication group feeder belong to + */ + public SubscriptionConfig(String subNodeName, + String subHome, + String subHostPortPair, + String feederHostPortPair, + String groupName) throws UnknownHostException { + this(subNodeName, subHome, subHostPortPair, feederHostPortPair, + groupName, null); + } + + /** + * Create a subscription configuration with group UUID. + * + * @param subNodeName id of the subscription + * @param subHome home directory of subscriber + * @param subHostPortPair subscriber host and port + * @param feederHostPortPair feeder host and port + * @param groupName name of replication group feeder belong to + * @param groupUUID id of replication group feeder belong to + */ + public SubscriptionConfig(String subNodeName, + String subHome, + String subHostPortPair, + String feederHostPortPair, + String groupName, + UUID groupUUID) throws UnknownHostException { + this(subNodeName, subHome, subHostPortPair, feederHostPortPair, + groupName, groupUUID, NodeType.SECONDARY); + } + + /** + * Create a subscription configuration with group UUID. + * + * @param subNodeName id of the subscription + * @param subHome home directory of subscriber + * @param subHostPortPair subscriber host and port + * @param feederHostPortPair feeder host and port + * @param groupName name of replication group feeder belong to + * @param groupUUID id of replication group feeder belong to + * @param nodeType type of subscription node + */ + public SubscriptionConfig(String subNodeName, + String subHome, + String subHostPortPair, + String feederHostPortPair, + String groupName, + UUID groupUUID, + NodeType nodeType) throws UnknownHostException { + + this(subNodeName, subHome, subHostPortPair, feederHostPortPair, + groupName, groupUUID, nodeType, null); + } + + /** + * Create a subscription configuration with group UUID. + * + * @param subNodeName id of the subscription + * @param subHome home directory of subscriber + * @param subHostPortPair subscriber host and port + * @param feederHostPortPair feeder host and port + * @param groupName name of replication group feeder belong to + * @param groupUUID id of replication group feeder belong to + * @param nodeType type of subscription node + * @param authenticator the stream authentication handler or null + */ + public SubscriptionConfig(String subNodeName, + String subHome, + String subHostPortPair, + String feederHostPortPair, + String groupName, + UUID groupUUID, + NodeType nodeType, + SubscriptionAuthHandler authenticator) + throws UnknownHostException { + + this(subNodeName, subHome, subHostPortPair, feederHostPortPair, + groupName, groupUUID, nodeType, authenticator, new Properties()); + } + + /** + * Create a subscription configuration with group UUID. + * + * @param subNodeName id of the subscription + * @param subHome home directory of subscriber + * @param subHostPortPair subscriber host and port + * @param feederHostPortPair feeder host and port + * @param groupName name of replication group feeder belong to + * @param groupUUID id of replication group feeder belong to + * @param nodeType type of subscription node + * @param authenticator the stream authentication handler or null + * @param props connection parameters + */ + public SubscriptionConfig(String subNodeName, + String subHome, + String subHostPortPair, + String feederHostPortPair, + String groupName, + UUID groupUUID, + NodeType nodeType, + SubscriptionAuthHandler authenticator, + Properties props) + throws UnknownHostException { + + /* subscriber */ + this.subNodeName = subNodeName; + this.subHome = subHome; + this.subHostPortPair = subHostPortPair; + + /* feeder */ + this.feederHostPortPair = feederHostPortPair; + + /* replication group */ + this.groupName = groupName; + this.groupUUID = groupUUID; + this.nodeType = nodeType; + this.authenticator = authenticator; + this.props = props; + + streamMode = BaseProtocol.EntryRequestType.DEFAULT; + + /* other parameters */ + inputMessageQueueSize = getDefaultMsgQueueSize(); + outputMessageQueueSize = getDefaultMsgQueueSize(); + + /* default callback and filter */ + callBack = new DefaultCallback(); + feederFilter = new DefaultFeederFilter(); + + authInfo = createClientAuthMethod(authenticator); + verifyParameters(); + } + + /** + * Create an environment configuration for subscription + * + * @return an environment configuration + */ + public EnvironmentConfig createEnvConfig() { + /* Populate env. configuration parameters */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + envConfig.setTransactional(true); + envConfig.setConfigParam( + EnvironmentParams.ENV_RECOVERY.getName(), "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_SETUP_LOGGER.getName(), "true"); + + return envConfig; + } + + /** + * Create a replication configuration for subscription + * + * @return a replication configuration + */ + ReplicationConfig createReplicationConfig() { + /* Populate rep. configuration parameters */ + ReplicationConfig repConfig = + new ReplicationConfig(getGroupName(), + getSubNodeName(), + getSubNodeHostPort()); + + repConfig.setConfigParam(RepParams.SUBSCRIBER_USE.getName(), "true"); + + final ReplicationNetworkConfig rwc; + if (props == null || props.isEmpty()) { + /* no property, create default non-ssl rwc */ + rwc = ReplicationNetworkConfig.createDefault(); + } else { + /* create rwc from property, will create ssl rwc if configured */ + rwc = ReplicationNetworkConfig.create(props); + } + repConfig.setRepNetConfig(rwc); + + repConfig.setConfigParam(RepParams.REPLICA_MESSAGE_QUEUE_SIZE.getName(), + Integer.toString(getDefaultMsgQueueSize())); + + repConfig.setConfigParam( + RepParams.REPLICA_TIMEOUT.getName(), + String.valueOf(getChannelTimeout(TimeUnit.MILLISECONDS)) + + " ms"); + + repConfig.setConfigParam( + RepParams.PRE_HEARTBEAT_TIMEOUT.getName(), + String.valueOf(getPreHeartbeatTimeout(TimeUnit.MILLISECONDS)) + + " ms"); + + repConfig.setConfigParam( + RepParams.REPSTREAM_OPEN_TIMEOUT.getName(), + String.valueOf(getStreamOpenTimeout(TimeUnit.MILLISECONDS)) + + " ms"); + + repConfig.setConfigParam(RepParams.HEARTBEAT_INTERVAL.getName(), + Integer.toString(getHeartbeatIntervalMs())); + + repConfig.setConfigParam( + RepParams.REPLICA_RECEIVE_BUFFER_SIZE.getName(), + Integer.toString(getReceiveBufferSize())); + + /* set subscription client node type */ + repConfig.setNodeType(nodeType); + + return repConfig; + } + + /*--------------*/ + /*- Getters -*/ + /*--------------*/ + + public FeederFilter getFeederFilter() { + return feederFilter; + } + + public SubscriptionAuthHandler getAuthenticator() { + return authenticator; + } + + public SubscriptionCallback getCallBack() { + return callBack; + } + + public String getSubscriberHome() { + return subHome; + } + + public String getFeederHost() { + return HostPortPair.getHostname(feederHostPortPair); + } + + public int getFeederPort() { + return HostPortPair.getPort(feederHostPortPair); + } + + public InetAddress getFeederHostAddr() throws UnknownHostException { + return InetAddress.getByName(HostPortPair + .getHostname(feederHostPortPair)); + } + + public String getSubNodeName() { + return subNodeName; + } + + public String getSubNodeHostPort() { + return subHostPortPair; + } + + public String getGroupName() { + return groupName; + } + + public UUID getGroupUUID() { + return groupUUID; + } + + public int getMaxConnectRetries() { + return DbConfigManager.getIntVal(props, + RepParams + .SUBSCRIPTION_MAX_CONNECT_RETRIES); + } + + public long getSleepBeforeRetryMs() { + return + DbConfigManager.getDurationVal(props, + RepParams + .SUBSCRIPTION_SLEEP_BEFORE_RETRY, + TimeUnit.MILLISECONDS); + } + + public long getChannelTimeout(TimeUnit unit) { + DurationConfigParam param = RepParams.REPLICA_TIMEOUT; + if (props.containsKey(param.getName())) { + return DbConfigManager.getDurationVal(props, + RepParams.REPLICA_TIMEOUT, + unit); + } else { + long ms = PropUtil.parseDuration(param.getDefault()); + return unit.convert(ms, TimeUnit.MILLISECONDS); + } + } + + public long getPollIntervalMs() { + return DbConfigManager.getDurationVal(props, + RepParams + .SUBSCRIPTION_POLL_INTERVAL, + TimeUnit.MILLISECONDS); + } + + public long getPollTimeoutMs() { + return DbConfigManager.getDurationVal(props, + RepParams + .SUBSCRIPTION_POLL_TIMEOUT, + TimeUnit.MILLISECONDS); + } + + public long getPreHeartbeatTimeout(TimeUnit unit) { + DurationConfigParam param = RepParams.PRE_HEARTBEAT_TIMEOUT; + if (props.containsKey(param.getName())) { + return DbConfigManager.getDurationVal(props, param, unit); + } else { + long ms = PropUtil.parseDuration(param.getDefault()); + return unit.convert(ms, TimeUnit.MILLISECONDS); + } + } + + public long getStreamOpenTimeout(TimeUnit unit) { + DurationConfigParam param = RepParams.REPSTREAM_OPEN_TIMEOUT; + if (props.containsKey(param.getName())) { + return DbConfigManager.getDurationVal(props, param, unit); + } else { + long ms = PropUtil.parseDuration(param.getDefault()); + return unit.convert(ms, TimeUnit.MILLISECONDS); + } + } + + public int getHeartbeatIntervalMs() { + IntConfigParam param = RepParams.HEARTBEAT_INTERVAL; + if (props.containsKey(param.getName())) { + return DbConfigManager.getIntVal(props, param); + } else { + return Integer.parseInt(param.getDefault()); + } + } + + public int getReceiveBufferSize() { + IntConfigParam param = RepParams.REPLICA_RECEIVE_BUFFER_SIZE; + if (props.containsKey(param.getName())) { + return DbConfigManager.getIntVal(props, param); + } else { + return Integer.parseInt(param.getDefault()); + } + } + + public int getInputMessageQueueSize() { + return inputMessageQueueSize; + } + + public int getOutputMessageQueueSize() { + return outputMessageQueueSize; + } + + public InetSocketAddress getInetSocketAddress() + throws UnknownHostException { + return new InetSocketAddress(getFeederHostAddr(), getFeederPort()); + } + + public EntryRequestType getStreamMode() { + return streamMode; + } + + /*--------------*/ + /*- Setters -*/ + /*--------------*/ + + public void setMinProtocolVersion(int ver) { + if (ver < DEFAULT_MIN_PROTOCOL_VERSION) { + throw new IllegalArgumentException( + "Minimal HA protocol version cannot be lower than " + + DEFAULT_MIN_PROTOCOL_VERSION); + } + + minProtocolVersion = ver; + } + + public void setGroupUUID(UUID gID) { + groupUUID = gID; + } + + public void setCallback(SubscriptionCallback cbk) { + if (cbk == null) { + throw new IllegalArgumentException("Subscription callback cannot " + + "be null."); + } + callBack = cbk; + } + + public void setChannelTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal(props, RepParams.REPLICA_TIMEOUT, + timeout, unit, validateParams); + } + + public void setPreHeartbeatTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal(props, RepParams.PRE_HEARTBEAT_TIMEOUT, + timeout, unit, validateParams); + } + + public void setHeartbeatInterval(int ms) + throws IllegalArgumentException { + DbConfigManager.setIntVal(props, RepParams.HEARTBEAT_INTERVAL, ms, + validateParams); + } + + public void setStreamOpenTimeout(long timeout, TimeUnit unit) + throws IllegalArgumentException { + DbConfigManager.setDurationVal(props, RepParams.REPSTREAM_OPEN_TIMEOUT, + timeout, unit, validateParams); + } + + public void setReceiveBufferSize(int val) { + DbConfigManager.setIntVal(props, RepParams.REPLICA_RECEIVE_BUFFER_SIZE, + val, validateParams); + } + + public void setInputMessageQueueSize(int size) { + inputMessageQueueSize = size; + } + + public void setOutputMessageQueueSize(int size) { + outputMessageQueueSize = size; + } + + public NodeType getNodeType() { + return nodeType; + } + + public int getMinProtocolVersion() { + return minProtocolVersion; + } + + public SubscriptionConfig clone() { + try { + SubscriptionConfig ret = (SubscriptionConfig) super.clone(); + ret.setProps(this.props); + return ret; + } catch (CloneNotSupportedException willNeverOccur) { + return null; + } + } + + /** + * Set the feeder filter which will be transmitted to Feeder. + * + * @param filter the non-null feeder filter + */ + public void setFeederFilter(FeederFilter filter) { + + if (filter == null) { + throw new IllegalArgumentException("Feeder filter cannot be null."); + } + feederFilter = filter; + } + + public void setStreamMode(EntryRequestType type) { + streamMode = type; + } + + private void setProps(Properties p) { + props = p; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("subscription configuration: ").append("\n"); + sb.append("subscription name: ").append(subNodeName).append("\n"); + sb.append("home directory: ").append(subHome).append("\n"); + sb.append("home host and port: ").append(subHostPortPair).append("\n"); + + sb.append("feeder host and port: ").append(feederHostPortPair) + .append("\n"); + + try { + sb.append("feeder address: ") + .append(getFeederHostAddr()).append("\n"); + } catch (UnknownHostException e) { + sb.append("feeder address: unknown host ") + .append(feederHostPortPair).append("\n"); + } + sb.append("feeder filter: ").append(feederFilter).append("\n"); + + sb.append("rep group: ").append(groupName).append("\n"); + sb.append("rep group id: ").append(groupUUID).append("\n"); + sb.append("stream mode: ").append(streamMode); + + return sb.toString(); + } + + /* Creates handshake auth method from subscription auth handler */ + private ClientAuthMethod createClientAuthMethod(SubscriptionAuthHandler + authHandler) { + if (authHandler == null) { + return null; + } + + return new ClientAuthMethod(authHandler); + } + + /* Return authentication method */ + ServiceHandshake.AuthenticationMethod[] getAuthInfo() { + if (authInfo == null) { + return null; + } + return new ServiceHandshake.AuthenticationMethod[]{authInfo}; + } + + /* + * Verify all required parameters are available and valid + * + * must-have parameters: + * - non-null home directory + * - non-null feeder host port pair + * - non-null feeder host name + * - non-null feeder host port + * - non-null subscriber node host port pair + * - non-null subscriber node name + * - non-null subscriber node host port + * - non-null replication group name + * + * @throws IllegalArgumentException + */ + private void verifyParameters() throws IllegalArgumentException { + + DatabaseUtil.checkForNullParam(getSubscriberHome(), + "subscription home directory"); + + DatabaseUtil.checkForNullParam(feederHostPortPair, + "feeder host port pair"); + + DatabaseUtil.checkForNullParam(getFeederHost(), "feeder host name"); + + DatabaseUtil.checkForNullParam(getFeederPort(), "feeder host port"); + + DatabaseUtil.checkForNullParam(subHostPortPair, + "subscriber host port pair"); + + DatabaseUtil.checkForNullParam(getSubNodeName(), + "subscriber node name"); + + DatabaseUtil.checkForNullParam(getSubNodeHostPort(), + "subscriber node host port"); + + DatabaseUtil.checkForNullParam(getGroupName(), "replication group"); + + /* we only support SECONDARY and EXTERNAL node type for subscription */ + if (!nodeType.isExternal() && !nodeType.isSecondary()) { + throw new IllegalArgumentException( + "'node type' param must be either SECONDARY or " + + "EXTERNAL, found node type: " + nodeType); + } + } + + /* a default no-op callback */ + private class DefaultCallback implements SubscriptionCallback { + + DefaultCallback() { + } + + @Override + public void processPut(VLSN vlsn, byte[] key, byte[] value, + long txnId) { + + } + + @Override + public void processDel(VLSN vlsn, byte[] key, long txnId) { + + } + + @Override + public void processCommit(VLSN vlsn, long txnid) { + + } + + @Override + public void processAbort(VLSN vlsn, long txnid) { + + } + + @Override + public void processException(final Exception exception) { + + } + } + + private int getDefaultMsgQueueSize() { + IntConfigParam param = RepParams.REPLICA_MESSAGE_QUEUE_SIZE; + if (props.containsKey(param.getName())) { + return DbConfigManager.getIntVal(props, param); + } else { + return Integer.parseInt(param.getDefault()); + } + } + + /* + * a default filter that filters out entries from internal db and db that + * supports duplicates, and subscribes to all tables. It allows subscriber + * to receive updates from all tables since no table id is set. + */ + private static class DefaultFeederFilter + implements FeederFilter, Serializable { + private static final long serialVersionUID = 1L; + + DefaultFeederFilter() { + super(); + } + + @Override + public String[] getTableIds() { + return null; + } + + @Override + public OutputWireRecord execute(final OutputWireRecord record, + final RepImpl repImpl) { + + /* keep record if db id is null */ + final DatabaseId dbId = record.getReplicableDBId(); + if (dbId == null) { + return record; + } + + final DbTree dbTree = repImpl.getDbTree(); + final DatabaseImpl impl = dbTree.getDb(dbId); + try { + /* keep record if db impl is not available */ + if (impl == null) { + return record; + } + + /* filter out if from an db supporting duplicates */ + if (impl.getSortedDuplicates()) { + return null; + } + + /* filter out if from an internal db */ + if (impl.isInternalDb()) { + return null; + } + + return record; + } finally { + if (impl != null) { + dbTree.releaseDb(impl); + } + } + } + } +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionOutputThread.java b/src/com/sleepycat/je/rep/subscription/SubscriptionOutputThread.java new file mode 100644 index 0000000..cf816c5 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionOutputThread.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.subscription; + +import java.io.IOException; +import java.util.concurrent.BlockingQueue; + +import com.sleepycat.je.rep.ReplicationSecurityException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.ReplicaOutputThreadBase; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.stream.BaseProtocol.HeartbeatResponse; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.utilint.VLSN; + +/** + * Object of the output thread created by subscription to respond the + * heartbeat ping from feeder + */ +class SubscriptionOutputThread extends ReplicaOutputThreadBase { + + /* handle to statistics */ + private final SubscriptionStat stats; + private final SubscriptionAuthHandler authenticator; + private final SubscriptionThread parentThread; + + SubscriptionOutputThread(SubscriptionThread parentThread, + RepImpl repImpl, + BlockingQueue outputQueue, + Protocol protocol, + DataChannel replicaFeederChannel, + SubscriptionAuthHandler authenticator, + SubscriptionStat stats) { + super(repImpl, outputQueue, protocol, replicaFeederChannel); + this.parentThread = parentThread; + this.authenticator = authenticator; + this.stats = stats; + } + + /** + * Implements the reauthentication response for output thread. It sends + * token to server which would conduct security check for the subscriber + * with the new token. + * + * @throws ReplicationSecurityException if fail to obtain a new login + * token by renewal or reauthentication; + * @throws IOException if fail to write reauth message to channel. + */ + @Override + public void writeReauthentication() + throws ReplicationSecurityException, IOException { + + if (authenticator != null && authenticator.hasNewToken()) { + + Protocol.ReAuthenticate response = + protocol.new ReAuthenticate(authenticator.getToken()); + + protocol.write(response, replicaFeederChannel); + } + } + + /** + * Implements the heartbeat response for output thread + * + * @param txnId txn id + * @throws IOException if fail to write heartbeat message to channel + */ + @Override + public void writeHeartbeat(Long txnId) throws IOException { + + /* report the most recently received VLSN to feeder */ + HeartbeatResponse response = + protocol.new HeartbeatResponse(VLSN.NULL_VLSN, + stats.getHighVLSN()); + + protocol.write(response, replicaFeederChannel); + stats.getNumMsgResponded().increment(); + } +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java b/src/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java new file mode 100644 index 0000000..1802e0e --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java @@ -0,0 +1,231 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.subscription; + +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.InputWireRecord; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.VLSN; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_ABORT; +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_COMMIT; + +/** + * Object to represent the thread created by Subscription to process messages + * received from feeder. + */ +class SubscriptionProcessMessageThread extends StoppableThread { + + /* handle to stats */ + private final SubscriptionStat stats; + /* configuration */ + private final SubscriptionConfig config; + /* input queue from which to consume messages */ + private final BlockingQueue queue; + /* logger */ + private final Logger logger; + + /* exit flag to specify exit type */ + private volatile ExitType exitRequest; + + /** + * Construct a subscription thread to process messages + * + * @param impl RepImpl of the RN where thread is running + * @param queue Input queue from which to consume messages + * @param config Subscription configuration + * @param logger Logger + */ + SubscriptionProcessMessageThread(RepImpl impl, + BlockingQueue queue, + SubscriptionConfig config, + SubscriptionStat stats, + Logger logger) { + super(impl, "SubscriptionProcessMessageThread"); + this.logger = logger; + this.config = config; + this.queue = queue; + this.stats = stats; + + exitRequest = ExitType.NONE; + stats.setHighVLSN(VLSN.NULL_VLSN); + } + + /** + * Shut down input thread immediately, regardless of the state of queue + */ + public void shutdown() { + exitRequest = ExitType.IMMEDIATE; + } + + /** + * Implement a soft shutdown. The thread will exist after all messages in + * the queue are consumed and processed. + * + * @return the amount of time in ms that the shutdownThread method will + * wait for the thread to exit. A -ve value means that the method will not + * wait. A zero value means it will wait indefinitely. + */ + @Override + public int initiateSoftShutdown() { + exitRequest = ExitType.IMMEDIATE; + + return 0; + } + + /** + * Implement thread run() method. Dequeue message from the queue and + * process it via the callback. + * + */ + @Override + public void run() { + + /* callback provided by client to process each message in input queue */ + final SubscriptionCallback callBack = config.getCallBack(); + + logger.info("Input thread started. Message queue size:" + + queue.remainingCapacity()); + + /* loop to process each message in the queue */ + try { + while (true) { + if (exitRequest == ExitType.IMMEDIATE) { + /* + * if immediate exit is requested, exit without + * consuming any message in the queue + */ + break; + } else { + + /* fetch next message from queue */ + final Object message = + queue.poll(SubscriptionConfig.QUEUE_POLL_INTERVAL_MS, + TimeUnit.MILLISECONDS); + + if (message == null) { + /* + * No message to consume, continue and wait for the + * next message. + */ + continue; + + } else if (message instanceof Exception) { + + callBack.processException((Exception) message); + + /* exits if shutdown message from feeder */ + if (message instanceof GroupShutdownException) { + exitRequest = ExitType.IMMEDIATE; + GroupShutdownException gse = + (GroupShutdownException) message; + logger.info("Received shutdown message from " + + config.getFeederHost() + + " at VLSN " + gse.getShutdownVLSN()); + break; + } + } else { + + /* use different callbacks depending on entry type */ + final InputWireRecord wireRecord = + ((Protocol.Entry) message).getWireRecord(); + final VLSN vlsn = wireRecord.getVLSN(); + final byte type = wireRecord.getEntryType(); + final LogEntry entry = wireRecord.getLogEntry(); + final long txnId = entry.getTransactionId(); + + stats.setHighVLSN(vlsn); + stats.getNumOpsProcessed().increment(); + + /* call different proc depending on entry type */ + if (LOG_TXN_COMMIT.equalsType(type)) { + stats.getNumTxnCommitted().increment(); + callBack.processCommit(vlsn, txnId); + continue; + } + + if (LOG_TXN_ABORT.equalsType(type)) { + stats.getNumTxnAborted().increment(); + callBack.processAbort(vlsn, txnId); + continue; + } + + if (entry instanceof LNLogEntry) { + + /* receive a LNLogEntry from Feeder */ + final LNLogEntry lnEntry = (LNLogEntry)entry; + + /* + * We have to call postFetchInit to avoid EFE. The + * function will reformat the key/data if entry is + * from a dup DB. The default feeder filter would + * filter out all dup db entries for us. + * + * TODO: + * Note today we temporarily disabled user-defined + * feeder filter and thus users are unable to + * replace the default feeder filter with their own. + * So here it is safe to assume no dup db entry. + * + * We will have to address the dup db entry issue + * in future to make the Subscription API public, + * in which users will be allowed to use their own + * feeder filter. + */ + lnEntry.postFetchInit(false); + + if (lnEntry.getLN().isDeleted()) { + callBack.processDel(vlsn, lnEntry.getKey(), + txnId); + } else { + callBack.processPut(vlsn, lnEntry.getKey(), + lnEntry.getData(), txnId); + } + } + } + } + } + } catch (InterruptedException e) { + logger.warning("input thread receives exception " + e.getMessage() + + ", process the exception in callback, clear queue " + + "and exit." + "\n" + LoggerUtils.getStackTrace(e)); + + exitRequest = ExitType.IMMEDIATE; + } finally { + queue.clear(); + logger.info("message queue cleared, thread exits with type: " + + exitRequest); + } + } + + @Override + protected Logger getLogger() { + return logger; + } + + /* types of exits */ + private enum ExitType { + NONE, /* No exit requested */ + IMMEDIATE, /* An immediate exit; ignore queued requests. */ + SOFT /* Process pending requests in queue, then exit */ + } +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionStat.java b/src/com/sleepycat/je/rep/subscription/SubscriptionStat.java new file mode 100644 index 0000000..fae8aee --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionStat.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.subscription; + +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * Object to represent subscription statistics + */ +public class SubscriptionStat { + + /* + * VLSN from which feeder agrees to stream log entries, it is returned from + * the feeder and can be equal to or earlier than the VLSN requested by the + * client, which is specified in subscription configuration. + */ + private VLSN startVLSN; + + /* the last VLSN that has been processed */ + private VLSN highVLSN; + + /* used by main thread: # of retries to insert msgs into input queue */ + private final LongStat nReplayQueueOverflow; + /* used by main thread: # of msgs received from feeder */ + private final LongStat nMsgReceived; + /* used by main thread: max # of items pending in input queue */ + private final LongStat maxPendingInput; + /* used by output thread: # of acks sent to feeder */ + private final LongStat nMsgResponded; + /* used by input thread: # of data ops processed */ + private final LongStat nOpsProcessed; + /* used by input thread: # of txn aborted and committed */ + private final LongStat nTxnAborted; + private final LongStat nTxnCommitted; + + SubscriptionStat() { + + startVLSN = VLSN.NULL_VLSN; + + /* initialize statistics */ + StatGroup stats = new StatGroup("subscription", + "subscription " + "statistics"); + nReplayQueueOverflow = new LongStat(stats, + SubscriptionStatDefinition.SUB_N_REPLAY_QUEUE_OVERFLOW, 0L); + nMsgReceived = new LongStat(stats, + SubscriptionStatDefinition.SUB_MSG_RECEIVED, 0L); + nMsgResponded = new LongStat(stats, + SubscriptionStatDefinition.SUB_MSG_RESPONDED, 0L); + maxPendingInput = new LongStat(stats, + SubscriptionStatDefinition.SUB_MAX_PENDING_INPUT, 0L); + + nOpsProcessed = new LongStat(stats, + SubscriptionStatDefinition.SUB_OPS_PROCESSED, 0L); + nTxnAborted = new LongStat(stats, + SubscriptionStatDefinition.SUB_TXN_ABORTED, 0L); + nTxnCommitted = new LongStat(stats, + SubscriptionStatDefinition.SUB_TXN_COMMITTED, 0L); + + } + + /*--------------*/ + /*- Getters -*/ + /*--------------*/ + public synchronized LongStat getNumReplayQueueOverflow() { + return nReplayQueueOverflow; + } + + public synchronized LongStat getMaxPendingInput() { + return maxPendingInput; + } + + public synchronized LongStat getNumMsgResponded() { + return nMsgResponded; + } + + public synchronized LongStat getNumMsgReceived() { + return nMsgReceived; + } + + public synchronized LongStat getNumOpsProcessed() { + return nOpsProcessed; + } + + public synchronized LongStat getNumTxnAborted() { + return nTxnAborted; + } + + public synchronized LongStat getNumTxnCommitted() { + return nTxnCommitted; + } + + public synchronized VLSN getStartVLSN() { + return startVLSN; + } + + public synchronized VLSN getHighVLSN() { + return highVLSN; + } + + /*--------------*/ + /*- Setters -*/ + /*--------------*/ + public synchronized void setStartVLSN(VLSN vlsn) { + startVLSN = vlsn; + } + + public synchronized void setHighVLSN(VLSN vlsn) { + highVLSN = vlsn; + } +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionStatDefinition.java b/src/com/sleepycat/je/rep/subscription/SubscriptionStatDefinition.java new file mode 100644 index 0000000..2607f82 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionStatDefinition.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.subscription; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Object to represent subscription statistics + */ +class SubscriptionStatDefinition { + + public static final String GROUP_NAME = "Subscription"; + public static final String GROUP_DESC = "Subscription statistics"; + + public static final StatDefinition SUB_N_REPLAY_QUEUE_OVERFLOW = + new StatDefinition( + "nReplayQueueOverflow", + "The number inserts into the replay queue that failed " + + "because the queue was full."); + + public static final StatDefinition SUB_MSG_RECEIVED = + new StatDefinition( + "msg_received", + "The number of messages received from feeder"); + + public static final StatDefinition SUB_MSG_RESPONDED = + new StatDefinition( + "msg_responded", + "The number of messages responded to feeder"); + + public static final StatDefinition SUB_OPS_PROCESSED = + new StatDefinition( + "ops_processed", + "The number of data operations processed by subscriber"); + + public static final StatDefinition SUB_TXN_COMMITTED = + new StatDefinition( + "txn_committed", + "The number of committed transactions received from feeder "); + + public static final StatDefinition SUB_TXN_ABORTED = + new StatDefinition( + "txn_aborted", + "The number of aborted transactions received from feeder "); + + public static final StatDefinition SUB_MAX_PENDING_INPUT = + new StatDefinition( + "max_pending_input", + "The max number of pending items in the input queue"); +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionStatus.java b/src/com/sleepycat/je/rep/subscription/SubscriptionStatus.java new file mode 100644 index 0000000..6b86232 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionStatus.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +/** + * Subscription status returned to client + */ +public enum SubscriptionStatus { + /* subscription not yet started */ + INIT, + /* subscription start successfully and start consume stream */ + SUCCESS, + /* requested vlsn is not available */ + VLSN_NOT_AVAILABLE, + /* rep group shutdown */ + GRP_SHUTDOWN, + /* connection error */ + CONNECTION_ERROR, + /* timeout error */ + TIMEOUT_ERROR, + /* unknown error */ + UNKNOWN_ERROR, + /* security check error: authentication or authorization failure */ + SECURITY_CHECK_ERROR +} diff --git a/src/com/sleepycat/je/rep/subscription/SubscriptionThread.java b/src/com/sleepycat/je/rep/subscription/SubscriptionThread.java new file mode 100644 index 0000000..d3b2757 --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/SubscriptionThread.java @@ -0,0 +1,851 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import java.io.IOException; +import java.util.Timer; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationSecurityException; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.ChannelTimeoutTask; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.ReplicaOutputThread; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshake; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshakeConfig; +import com.sleepycat.je.rep.stream.SubscriberFeederSyncup; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * Main thread created by Subscription to stream log entries from feeder + */ +class SubscriptionThread extends StoppableThread { + + private final Logger logger; + private final SubscriptionConfig config; + private final SubscriptionStat stats; + + /* communication queues and working threads */ + private final BlockingQueue outputQueue; + private final BlockingQueue inputQueue; + private SubscriptionProcessMessageThread messageProcThread; + + /* communication channel between subscriber and feeder */ + private NamedChannelWithTimeout namedChannel; + /* task to register channel with timeout */ + private ChannelTimeoutTask channelTimeoutTask; + /* protocol used to communicate with feeder */ + private Protocol protocol; + + /* requested VLSN from which to stream log entries */ + private final VLSN reqVLSN; + + /* + * volatile because it can be concurrently accessed by the subscription + * thread itself in checkOutputThread(), and another thread trying to + * shut down subscription by calling shutdown() + */ + private volatile SubscriptionOutputThread outputThread; + + private volatile SubscriptionStatus status; + + /* stored exception */ + private volatile Exception storedException; + + /* + * For unit test only. The hook will be called by unit test to inject an + * exception into msg queue, which to be processed by the callback function + * defined in unit test. + */ + private TestHook exceptionHandlingTestHook; + + SubscriptionThread(ReplicatedEnvironment env, + VLSN reqVLSN, + SubscriptionConfig config, + SubscriptionStat stats, + Logger logger) { + + super(RepInternal.getNonNullRepImpl(env), "Subscription Main"); + setUncaughtExceptionHandler(new SubscriptionThreadExceptionHandler()); + + this.reqVLSN = reqVLSN; + this.config = config; + this.stats = stats; + this.logger = logger; + protocol = null; + namedChannel = null; + /* init subscription input and output queue */ + inputQueue = + new ArrayBlockingQueue<>(config.getInputMessageQueueSize()); + outputQueue = + new ArrayBlockingQueue<>(config.getOutputMessageQueueSize()); + + status = SubscriptionStatus.INIT; + storedException = null; + exceptionHandlingTestHook = null; + } + + /** + * Returns subscription status to client + * + * @return subscription status + */ + public SubscriptionStatus getStatus() { + return status; + } + + /** + * Returns stored exception + * + * @return stored exception + */ + public Exception getStoredException() { + return storedException; + } + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + public void run() { + + LoggerUtils.info(logger, envImpl, + "Start subscription from VLSN " + reqVLSN + + " from feeder at " + + config.getFeederHost() + ":" + config.getFeederPort()); + + try { + final int maxRetry = config.getMaxConnectRetries(); + boolean auxThreadCreated = false; + int numRetry = 0; + + while (!isShutdown()) { + try { + initializeConnection(); + if (!auxThreadCreated) { + LoggerUtils.fine(logger, envImpl, + "Create auxiliary msg processing " + + "and output threads"); + + auxThreadCreated = createAuxThread(); + if (auxThreadCreated) { + /* subscription succeed, start streaming data */ + status = SubscriptionStatus.SUCCESS; + loopInternal(); + } else { + status = SubscriptionStatus.UNKNOWN_ERROR; + } + } + break; + } catch (ConnectionException e) { + if (numRetry == maxRetry) { + LoggerUtils.info(logger, envImpl, + "Reaching the max retry " + maxRetry + + " to connect feeder " + + config.getFeederHost() + + ", shut down subscription" + + "\n" + LoggerUtils.getStackTrace(e)); + storedException = e; + status = SubscriptionStatus.CONNECTION_ERROR; + break; + } else { + numRetry++; + LoggerUtils.fine(logger, envImpl, + "Fail to connect feeder at " + + config.getFeederHost() + + " sleep for " + e.getRetrySleepMs() + + " ms and re-connect again"); + Thread.sleep(e.getRetrySleepMs()); + } + } + } + } catch (ReplicationSecurityException ure) { + storedException = ure; + LoggerUtils.warning(logger, envImpl, + "Subscription exited due to security check " + + "failure: " + ure.getMessage()); + status = SubscriptionStatus.SECURITY_CHECK_ERROR; + } catch (GroupShutdownException e) { + if (messageProcThread.isAlive()) { + try { + /* let message processing thread finish up */ + messageProcThread.join(); + } catch (InterruptedException ie) { + /* ignore since we will shut down, just log */ + LoggerUtils.fine(logger, envImpl, + "exception in shutting down msg proc " + + "thread " + ie.getMessage() + + "\n" + LoggerUtils.getStackTrace(ie)); + } + } + storedException = e; + LoggerUtils.info(logger, envImpl, + "received group shutdown " + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + status = SubscriptionStatus.GRP_SHUTDOWN; + } catch (InsufficientLogException e) { + storedException = e; + LoggerUtils.info(logger, envImpl, + "unable to subscribe from requested VLSN " + + reqVLSN + + "\n" + LoggerUtils.getStackTrace(e)); + status = SubscriptionStatus.VLSN_NOT_AVAILABLE; + } catch (EnvironmentFailureException e) { + storedException = e; + LoggerUtils.warning(logger, envImpl, + "unable to sync up with feeder due to EFE " + + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + status = SubscriptionStatus.UNKNOWN_ERROR; + } catch (InterruptedException e) { + storedException = e; + LoggerUtils.warning(logger, envImpl, + "interrupted exception " + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + status = SubscriptionStatus.UNKNOWN_ERROR; + } catch (InternalException e) { + storedException = e; + LoggerUtils.warning(logger, envImpl, + "internal exception " + e.getMessage() + + "\n" + LoggerUtils.getStackTrace(e)); + status = SubscriptionStatus.UNKNOWN_ERROR; + } finally { + shutdown(); + } + } + + /** + * For unit test + * + * @param exceptionHandlingTestHook test hook + */ + void setExceptionHandlingTestHook( + TestHook exceptionHandlingTestHook) { + this.exceptionHandlingTestHook = exceptionHandlingTestHook; + } + + /** + * Sets subscription status + * + * @param s subscription status + */ + void setStatus(SubscriptionStatus s) { + status = s; + } + + /** + * shutdown the subscriber and all auxiliary threads, close channel to + * the Feeder. + */ + void shutdown() { + + /* Note start of shutdown and return if already requested */ + if (shutdownDone(logger)) { + return; + } + + /* shutdown aux threads */ + if (messageProcThread != null) { + try { + messageProcThread.shutdownThread(logger); + LoggerUtils.info(logger, envImpl, + "message processing thread has shut down."); + } catch (Exception e) { + /* Ignore so shutdown can continue */ + LoggerUtils.warning(logger, envImpl, + "error in shutdown msg proc thread: " + + e.getMessage() + ", continue shutdown the" + + " subscription thread."); + } finally { + messageProcThread = null; + } + } + if (outputThread != null) { + try { + outputThread.shutdownThread(logger); + LoggerUtils.info(logger, envImpl, + "output thread has shut down."); + + } catch (Exception e) { + /* Ignore we will clean up via killing IO channel anyway. */ + LoggerUtils.warning(logger, envImpl, + "error in shutdown output thread: " + + e.getMessage() + ", continue shutdown " + + "subscription thread."); + } finally { + outputThread = null; + } + } + + inputQueue.clear(); + outputQueue.clear(); + RepUtils.shutdownChannel(namedChannel); + if (channelTimeoutTask != null) { + channelTimeoutTask.cancel(); + } + + shutdownThread(logger); + + LoggerUtils.info(logger, envImpl, + "queues cleared and channel closed, subscription " + + "thread has completely shut down"); + } + + /** + * Enqueue message received from feeder into input queue + * + * @param message message received from feeder + * + * @throws InterruptedException if enqueue is interrupted + * @throws GroupShutdownException if receive shutdown message from feeder + */ + void offer(Object message) + throws InterruptedException, GroupShutdownException { + + RepImpl repImpl = (RepImpl)envImpl; + + while (!isShutdown() && /* stop enqueue msg if thread is shutdown */ + !inputQueue.offer(message, + SubscriptionConfig.QUEUE_POLL_INTERVAL_MS, + TimeUnit.MILLISECONDS)) { + /* Offer timed out. */ + if (!messageProcThread.isAlive()) { + LoggerUtils.info(logger, repImpl, + "Thread consuming input queue is gone, start" + + " shutdown process"); + throw new GroupShutdownException(logger, repImpl, + config.getFeederHost(), + stats.getHighVLSN(), 0); + } else { + /* count the overflow and retry */ + stats.getNumReplayQueueOverflow().increment(); + } + } + } + + /** + * Create connection to feeder and execute handshake + * + * @throws InternalException if unable to connect to source node due to + * protocol error + * @throws EnvironmentFailureException if fail to handshake with source, or + * source does not have enough log to start streaming + * @throws ConnectionException if unable to connect to source node + * @throws ReplicationSecurityException if authentication failure + */ + private void initializeConnection() throws InternalException, + EnvironmentFailureException, ConnectionException, + ReplicationSecurityException { + + /* open a channel to feeder */ + LoggerUtils.fine(logger, envImpl, + "Subscription " + config.getSubNodeName() + + " start open channel and handshake with feeder"); + + try { + + openChannel(); + ReplicaFeederHandshake handshake = + new ReplicaFeederHandshake(new SubFeederHandshakeConfig + (config.getNodeType())); + + protocol = handshake.execute(); + + /* check if negociated protocol version is high enough */ + final int minReqVersion = config.getMinProtocolVersion(); + if (protocol.getVersion() < minReqVersion) { + throw new BinaryProtocol.ProtocolException( + "HA protocol version (" + protocol.getVersion() + ") is " + + "lower than minimal required version (" + minReqVersion + + ")"); + } + + LoggerUtils.fine(logger, envImpl, + "subscription " + config.getSubNodeName() + + " sync-up with feeder at vlsn: " + reqVLSN); + SubscriberFeederSyncup syncup = + new SubscriberFeederSyncup(namedChannel, protocol, + config.getFeederFilter(), + (RepImpl) envImpl, + config.getStreamMode(), + logger); + final VLSN startVLSN = syncup.execute(reqVLSN); + LoggerUtils.fine(logger, envImpl, + "sync-up with feeder done, start vlsn: " + + startVLSN); + + if (!startVLSN.equals(VLSN.NULL_VLSN)) { + + stats.setStartVLSN(startVLSN); + + /* read heartbeat and respond */ + protocol.read(namedChannel.getChannel(), + Protocol.Heartbeat.class); + queueAck(ReplicaOutputThread.HEARTBEAT_ACK); + + LoggerUtils.info(logger, envImpl, + "Subscription " + config.getSubNodeName() + + " successfully connect to feeder at " + + config.getFeederHost() + ":" + + config.getFeederPort() + + ", reqVLSN: " + reqVLSN + + ", start VLSN: " + startVLSN); + } else { + throw new InsufficientLogException((RepImpl) envImpl, reqVLSN); + } + } catch (IOException e) { + throw new ConnectionException("Unable to connect due to " + + e.getMessage() + + ", will retry later.", + config.getSleepBeforeRetryMs(), + e); + } catch (EnvironmentFailureException e) { + logger.warning("Fail to handshake with feeder: " + + e.getMessage()); + throw e; + } catch (BinaryProtocol.ProtocolException e) { + final String msg = ("Unable to connect to feeder " + + config.getFeederHost() + + " due to protocol exception " + + e.getMessage()); + LoggerUtils.warning(logger, envImpl, msg); + throw new InternalException(msg, e); + } + } + + /** + * Create auxiliary message processing and output thread + */ + private boolean createAuxThread() { + + RepImpl repImpl = (RepImpl)envImpl; + + inputQueue.clear(); + outputQueue.clear(); + + /* start output thread over data channel to send response to feeder */ + outputThread = + new SubscriptionOutputThread(this, + repImpl, outputQueue, protocol, + namedChannel.getChannel(), + config.getAuthenticator(), stats); + /* + * output thread can be shutdown and set to null anytime, thus + * use a cached copy to ensure it is alive before start it + */ + final SubscriptionOutputThread cachedOutputThread = outputThread; + if (cachedOutputThread != null) { + cachedOutputThread.start(); + LoggerUtils.fine(logger, envImpl, + "output thread created for subscription " + + config.getSubNodeName()); + /* start thread to consume data in input queue */ + messageProcThread = + new SubscriptionProcessMessageThread(repImpl, inputQueue, + config, + stats, logger); + messageProcThread.start(); + LoggerUtils.fine(logger, envImpl, + "message processing thread created for subscription " + + config.getSubNodeName()); + return true; + } else { + LoggerUtils.info(logger, envImpl, + "subscription " + config.getSubNodeName() + " " + + "just shut down, no need to create auxiliary " + + "threads"); + return false; + } + } + + /** + * Open a data channel to feeder + * + * @return created name channel + * + * @throws ConnectionException unable to connect due to error and need retry + * @throws InternalException fail to handshake with feeder + * @throws ReplicationSecurityException if unauthorized to stream + * from feeder + */ + private NamedChannel openChannel() throws ConnectionException, + InternalException, ReplicationSecurityException { + + RepImpl repImpl = (RepImpl)envImpl; + + if (repImpl == null) { + throw new IllegalStateException("Replication env is unavailable."); + } + + try { + DataChannelFactory.ConnectOptions connectOpts = + new DataChannelFactory + .ConnectOptions() + .setTcpNoDelay(config.TCP_NO_DELAY) + .setReceiveBufferSize(config.getReceiveBufferSize()) + .setOpenTimeout((int) config + .getStreamOpenTimeout(TimeUnit.MILLISECONDS)) + .setBlocking(config.BLOCKING_MODE_CHANNEL); + + final DataChannel channel = + repImpl.getChannelFactory() + .connect(config.getInetSocketAddress(), connectOpts); + + ServiceDispatcher.doServiceHandshake(channel, + FeederManager.FEEDER_SERVICE, + config.getAuthInfo()); + LoggerUtils.fine(logger, envImpl, + "channel opened to service " + + FeederManager.FEEDER_SERVICE + "@" + + config.getFeederHost() + + "[address: " + config.getFeederHostAddr() + + " port: " + config.getFeederPort() + "]"); + + final int timeoutMs = repImpl.getConfigManager(). + getDuration(RepParams.PRE_HEARTBEAT_TIMEOUT); + + channelTimeoutTask = new ChannelTimeoutTask(new Timer(true)); + namedChannel = + new NamedChannelWithTimeout(repImpl, logger, channelTimeoutTask, + channel, timeoutMs); + } catch (IOException cause) { + /* retry if unable to connect to feeder */ + throw new ConnectionException("Fail to open channel to feeder " + + "due to " + cause.getMessage() + + ", will retry later", + config.getSleepBeforeRetryMs(), + cause); + } catch (ServiceDispatcher.ServiceConnectFailedException cause) { + + /* + * The feeder may not have established the Feeder Service + * as yet. For example, the transition to the master may not have + * been completed. + */ + if (cause.getResponse() == + ServiceDispatcher.Response.UNKNOWN_SERVICE) { + throw new ConnectionException("Service exception: " + + cause.getMessage() + + ", wait longer and will retry " + + "later", + config.getSleepBeforeRetryMs(), + cause); + } + + if (cause.getResponse() == + ServiceDispatcher.Response.INVALID) { + throw new ReplicationSecurityException( + "Security check failure:" + cause.getMessage(), + config.getSubNodeName(), + cause); + } + + throw new InternalException("Subscription " + + config.getSubNodeName() + + "failed to handshake for service " + + FeederManager.FEEDER_SERVICE + + " with feeder " + + config.getFeederHost(), + cause); + } + LoggerUtils.info(logger, envImpl, + "Subscription " + config.getSubNodeName() + + " has successfully created a channel to feeder at " + + config.getFeederHost() + ":" + config.getFeederPort()); + + return namedChannel; + } + + /** + * Internal loop to dequeue message from channel to the feeder, + * process shutdown and heartbeat messages, and relay data operations to + * the input queue to be consumed by input thread. + * + * @throws InternalException if error in reading messages from channel or + * enqueue message into input queue + * @throws GroupShutdownException if receive shutdown message from feeder + * @throws ReplicationSecurityException if output thread exits due to + * security check failure. In this case the main subscription need to + * exit without retry. + */ + private void loopInternal() throws InternalException, + GroupShutdownException, ReplicationSecurityException { + + RepImpl repImpl = (RepImpl)envImpl; + + try { + + LoggerUtils.info(logger, envImpl, + "Start reading messages from feeder " + + config.getFeederHost() + ":" + + config.getFeederPort()); + while (!isShutdown()) { + + checkOutputThread(); + + BinaryProtocol.Message message = protocol.read(namedChannel); + + if ((message == null)) { + LoggerUtils.info(logger, envImpl, + "Subscription " + config.getSubNodeName() + + " has nothing stream, exit loop."); + return; + } + + assert TestHookExecute.doHookIfSet(exceptionHandlingTestHook, + this); + + stats.getNumMsgReceived().increment(); + + BinaryProtocol.MessageOp messageOp = message.getOp(); + if (messageOp == Protocol.HEARTBEAT) { + LoggerUtils.finest(logger, envImpl, + "receive heartbeat from " + + namedChannel.getNameIdPair()); + queueAck(ReplicaOutputThread.HEARTBEAT_ACK); + } else if (messageOp == Protocol.SHUTDOWN_REQUEST) { + + LoggerUtils.info(logger, envImpl, + "Receive shutdown request from feeder " + + config.getFeederHost() + + ", shutdown subscriber"); + + /* + * create a shutdown request, make it in the queue so + * client is able to see that in callback, and throw an + * exception. + * + * The message processing thread will exit when seeing a + * GroupShutdownException + */ + Protocol.ShutdownRequest req = + (Protocol.ShutdownRequest) message; + Exception exp = + new GroupShutdownException(logger, repImpl, + config.getFeederHost(), + stats.getHighVLSN(), + req.getShutdownTimeMs()); + offer(exp); + throw exp; + } else { + /* a regular data entry message */ + offer(message); + + final long pending = inputQueue.size(); + if (pending > stats.getMaxPendingInput().get()) { + stats.getMaxPendingInput().set(pending); + LoggerUtils.finest(logger, envImpl, + "Max pending request log items:" + + pending); + } + } + + } + } catch (GroupShutdownException | ReplicationSecurityException exp) { + /* throw to caller, let caller deal with it */ + throw exp; + } catch (Exception e) { + /* other exception is thrown as IE */ + throw new InternalException(e.getMessage(), e); + } + } + + /** + * Checks status of output thread and propagates RSE to main + * loop. If output thread exited due to RSE, the main thread need to + * capture it to set the subscription status correctly. For other + * exceptions, output thread uses the traditional mechanism to notify the + * main subscription thread: simply shut down channel. + */ + private void checkOutputThread() + throws InternalException, ReplicationSecurityException { + + /* + * output thread can be shutdown and set to null anytime, thus + * use a cached copy to avoid NPE after the first check + */ + final SubscriptionOutputThread cachedOutputThread = outputThread; + + /* output thread already gone */ + if (cachedOutputThread == null) { + /* + * if output thread is set to null only when subscription thread + * shut down. If we reach here, it means the subscription thread + * is shut down right after isShutdown check in loopInternal(). + * We simply return and subscription thread would detect the shut + * down in next check of isShutdown in loopInternal(). + */ + LoggerUtils.fine(logger, envImpl, + "output thread no longer exists"); + return; + } + + if (cachedOutputThread.getException() instanceof + ReplicationSecurityException) { + final ReplicationSecurityException rse = + (ReplicationSecurityException) cachedOutputThread.getException(); + LoggerUtils.warning(logger, envImpl, + "Output thread exited due to security check " + + "failure: " + rse.getMessage()); + throw rse; + } + } + + /** + * Enqueue an ack message in output queue + * + * @param xid txn id to enqueue + * + * @throws IOException if fail to queue the msg + */ + private void queueAck(Long xid) throws IOException { + + try { + outputQueue.put(xid); + } catch (InterruptedException ie) { + + /* + * If interrupted while waiting, have the higher levels treat + * it like an IOE and exit the thread. + */ + throw new IOException("Ack I/O interrupted", ie); + } + } + + /*-----------------------------------*/ + /*- Inner Classes -*/ + /*-----------------------------------*/ + + /** + * Subscriber-Feeder handshake config + */ + private class SubFeederHandshakeConfig + implements ReplicaFeederHandshakeConfig { + + private final NodeType nodeType; + private final RepImpl repImpl; + SubFeederHandshakeConfig(NodeType nodeType) { + this.nodeType = nodeType; + repImpl = (RepImpl)envImpl; + } + + public RepImpl getRepImpl() { + return repImpl; + } + + public NameIdPair getNameIdPair() { + return getRepImpl().getNameIdPair(); + } + + public RepUtils.Clock getClock() { + return new RepUtils.Clock(RepImpl.getClockSkewMs()); + } + + public NodeType getNodeType() { + return nodeType; + } + + public NamedChannel getNamedChannel() { + return namedChannel; + } + + /* create a group impl from group name and group uuid */ + public RepGroupImpl getGroup() { + + RepGroupImpl repGroupImpl = new RepGroupImpl( + config.getGroupName(), + true, /* unknown group uuid */ + repImpl.getCurrentJEVersion()); + + /* use uuid if specified, otherwise unknown uuid will be used */ + if (config.getGroupUUID() != null) { + repGroupImpl.setUUID(config.getGroupUUID()); + } + return repGroupImpl; + } + } + + /** + * Thrown to indicate that the Subscriber must retry connecting to the same + * master, after some period of time. + */ + private class ConnectionException extends RuntimeException { + + private final long retrySleepMs; + + ConnectionException(String message, + long retrySleepMs, + Throwable cause) { + super(message, cause); + this.retrySleepMs = retrySleepMs; + } + + /** + * Get thread sleep time before retry + * + * @return sleep time in ms + */ + long getRetrySleepMs() { + return retrySleepMs; + } + + @Override + public String getMessage() { + return "Failed to connect, will retry after sleeping " + + retrySleepMs + " ms"; + } + } + + /** + * Handle exceptions uncaught in SubscriptionThread + */ + private class SubscriptionThreadExceptionHandler + implements UncaughtExceptionHandler { + + public void uncaughtException(Thread t, Throwable e) { + logger.severe("Error { " + e.getMessage() + + " } in SubscriptionThread {" + + t + " } was uncaught.\nstack trace:\n" + + LoggerUtils.getStackTrace(e)); + } + } +} + diff --git a/src/com/sleepycat/je/rep/subscription/package-info.java b/src/com/sleepycat/je/rep/subscription/package-info.java new file mode 100644 index 0000000..388a4cc --- /dev/null +++ b/src/com/sleepycat/je/rep/subscription/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Subscription of rep stream for use by stream API in NoSQL DB. + */ +package com.sleepycat.je.rep.subscription; diff --git a/src/com/sleepycat/je/rep/txn/MasterThreadLocker.java b/src/com/sleepycat/je/rep/txn/MasterThreadLocker.java new file mode 100644 index 0000000..3c713e5 --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/MasterThreadLocker.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.txn.ThreadLocker; + +/** + * A MasterThreadLocker is used with a user initiated non-transactional + * operation on a Master, for a replicated DB. Currently there is no behavior + * specific to this class, and it is only a placeholder for future HA + * functionality. + */ +public class MasterThreadLocker extends ThreadLocker { + + public MasterThreadLocker(final RepImpl repImpl) { + super(repImpl); + } + + @Override + public ThreadLocker newEmptyThreadLockerClone() { + return new MasterThreadLocker((RepImpl) envImpl); + } +} diff --git a/src/com/sleepycat/je/rep/txn/MasterTxn.java b/src/com/sleepycat/je/rep/txn/MasterTxn.java new file mode 100644 index 0000000..80ca718 --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/MasterTxn.java @@ -0,0 +1,718 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.Replay; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnManager; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * A MasterTxn represents: + * - a user initiated Txn executed on the Master node, when local-write and + * read-only are not configured, or + * - an auto-commit Txn on the Master node for a replicated DB. + * + * This class uses the hooks defined by Txn to support the durability + * requirements of a replicated transaction on the Master. + */ +public class MasterTxn extends Txn { + + /* Holds the commit VLSN after a successful commit. */ + private VLSN commitVLSN = VLSN.NULL_VLSN; + private final NameIdPair nameIdPair; + private final UUID envUUID; + + /* The number of acks required by this txn commit. */ + private int requiredAckCount = -1; + + /* If this transaction requests an Arbiter ack. */ + private boolean needsArbiterAck; + + /* + * Used to measure replicated transaction commit performance. All deltas + * are measured relative to the start time, to minimize storage overhead. + */ + + /* The time the transaction was started. */ + private final long startMs = System.currentTimeMillis(); + + /* The start relative delta time when the commit pre hook exited. */ + private int preLogCommitEndDeltaMs = 0; + + /* + * The start relative delta time when the commit message was written to + * the rep stream. + */ + private int repWriteStartDeltaMs = 0; + + /** + * Flag to keep track of whether this transaction has taken the read lock + * that protects access to the blocking latch used by Master Transfer. + */ + private boolean locked; + + /** + * Flag to prevent any change to the txn's contents. Used in + * master->replica transition. Intentionally volatile, so it can be + * interleaved with use of the MasterTxn mutex. + */ + private volatile boolean freeze; + + /* For unit testing */ + private TestHook convertHook; + + /* The default factory used to create MasterTxns */ + private static final MasterTxnFactory DEFAULT_FACTORY = + new MasterTxnFactory() { + + @Override + public MasterTxn create(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + return new MasterTxn(envImpl, config, nameIdPair); + } + + @Override + public MasterTxn createNullTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + + return new MasterTxn(envImpl, config, nameIdPair) { + @Override + protected boolean updateLoggedForTxn() { + /* + * Return true so that the commit will be logged even + * though there are no changes associated with this txn + */ + return true; + } + }; + } + }; + + /* The current Txn Factory. */ + private static MasterTxnFactory factory = DEFAULT_FACTORY; + + public MasterTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) + throws DatabaseException { + + super(envImpl, config, ReplicationContext.MASTER); + this.nameIdPair = nameIdPair; + this.envUUID = ((RepImpl) envImpl).getUUID(); + assert !config.getLocalWrite(); + } + + @Override + public boolean isLocalWrite() { + return false; + } + + /** + * Returns the transaction commit token used to identify the transaction. + * + * @see com.sleepycat.je.txn.Txn#getCommitToken() + */ + @Override + public CommitToken getCommitToken() { + if (commitVLSN.isNull()) { + return null; + } + return new CommitToken(envUUID, commitVLSN.getSequence()); + } + + public VLSN getCommitVLSN() { + return commitVLSN; + } + + /** + * MasterTxns use txn ids from a reserved negative space. So override + * the default generation of ids. + */ + @Override + protected long generateId(TxnManager txnManager, + long ignore /* mandatedId */) { + assert(ignore == 0); + return txnManager.getNextReplicatedTxnId(); + } + + /** + * Causes the transaction to wait until we have sufficient replicas to + * acknowledge the commit. + */ + @Override + protected void txnBeginHook(TransactionConfig config) + throws DatabaseException { + + RepImpl repImpl = (RepImpl) envImpl; + try { + repImpl.txnBeginHook(this); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + } + + @Override + protected void preLogCommitHook() + throws DatabaseException { + + RepImpl repImpl = (RepImpl) envImpl; + ReplicaAckPolicy ackPolicy = getCommitDurability().getReplicaAck(); + requiredAckCount = + repImpl.getRepNode().getDurabilityQuorum(). + getCurrentRequiredAckCount(ackPolicy); + + /* + * TODO: An optimization we'd like to do is to identify transactions + * that only modify non-replicated databases, so they can avoid waiting + * for Replica commit acks and avoid checks like the one that requires + * that the node be a master before proceeding with the transaction. + */ + repImpl.preLogCommitHook(this); + preLogCommitEndDeltaMs = (int) (System.currentTimeMillis() - startMs); + } + + @Override + protected void postLogCommitHook(LogItem commitItem) + throws DatabaseException { + + commitVLSN = commitItem.header.getVLSN(); + try { + RepImpl repImpl = (RepImpl) envImpl; + repImpl.postLogCommitHook(this, commitItem); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + } + + @Override + protected void preLogAbortHook() + throws DatabaseException { + + RepImpl repImpl = (RepImpl) envImpl; + repImpl.preLogAbortHook(this); + } + + @Override + protected void postLogCommitAbortHook() { + + RepImpl repImpl = (RepImpl) envImpl; + repImpl.postLogCommitAbortHook(this); + } + + @Override + protected void postLogAbortHook() { + RepImpl repImpl = (RepImpl)envImpl; + repImpl.postLogAbortHook(this); + } + + /** + * Prevent this MasterTxn from taking locks if the node becomes a + * replica. The application has a reference to this Txn, and may + * attempt to use it well after the node has transitioned from master + * to replica. + */ + @Override + public LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws LockNotAvailableException, LockConflictException, + DatabaseException { + ReplicatedEnvironment.State nodeState = ((RepImpl)envImpl).getState(); + if (nodeState.isMaster()) { + return super.lockInternal + (lsn, lockType, noWait, jumpAheadOfWaiters, database); + } + + throwNotMaster(nodeState); + return null; /* not reached */ + } + + private void throwNotMaster(ReplicatedEnvironment.State nodeState) { + if (nodeState.isReplica()) { + throw new ReplicaWriteException + (this, ((RepImpl)envImpl).getStateChangeEvent()); + } + throw new UnknownMasterException + ("Transaction " + getId() + + " cannot execute write operations because this node is" + + " no longer a master"); + } + + /** + * If logging occurs before locking, we must screen out write locks here. + */ + @Override + public synchronized void preLogWithoutLock(DatabaseImpl database) { + ReplicatedEnvironment.State nodeState = ((RepImpl)envImpl).getState(); + if (nodeState.isMaster()) { + super.preLogWithoutLock(database); + return; + } + + throwNotMaster(nodeState); + } + + /** + * Determines whether we should lock the block-latch lock. + *

        + * We acquire the lock during pre-log hook, and release it during post-log + * hook. Specifically, there are the following cases: + *

          + *
        1. + * For a normal commit, we acquire it in {@code preLogCommitHook()} and + * release it in {@code postLogCommitHook()} + *
        2. + * For a normal abort (invoked by the application on the {@code + * Txn.abort()} API), we acquire the lock in {@code preLogAbortHook()} and + * release it in {@code postLogAbortHook()}. + *
        3. + * When a commit fails in such a way as to call {@code + * Txn.throwPreCommitException()}, we go through the abort path as well. + * In this case: + *
            + *
          • we will of course already have called {@code preLogCommitHook()}; + *
          • the abort path calls {@code preLogAbortHook()} and {@code + * postLogAbortHook()} as always; + *
          • finally we call {@code postLogCommitAbortHook()} + *
          + * Fortunately we can avoid the complexity of dealing with a second + * (recursive) lock acquisition here, because by the time either post-hook + * is called we've done any writing of VLSNs. Thus, when we want to + * take the lock, we take it if it hasn't already been taken, and do + * nothing if it has; when releasing, we release it if we have it, and do + * nothing if we don't. + *
        + *

        + * See additional javadoc at {@code RepImpl.blockLatchLock} + */ + public boolean lockOnce() { + if (locked) { + return false; + } + locked = true; + return true; + } + + /** + * Determines whether we should unlock the block-latch lock. + * + * @see #lockOnce + */ + public boolean unlockOnce() { + if (locked) { + locked = false; + return true; + } + return false; + } + + public int getRequiredAckCount() { + return requiredAckCount; + } + + public void resetRequiredAckCount() { + requiredAckCount = 0; + } + + /** A masterTxn always writes its own id into the commit or abort. */ + @Override + protected int getReplicatorNodeId() { + return nameIdPair.getId(); + } + + @Override + protected long getDTVLSN() { + /* + * For the master transaction, it should always be null, and will + * be corrected under the write log latch on its way to disk. + */ + return VLSN.NULL_VLSN_SEQUENCE; + } + + public long getStartMs() { + return startMs; + } + + public void stampRepWriteTime() { + this.repWriteStartDeltaMs = + (int)(System.currentTimeMillis() - startMs); + } + + /** + * Returns the amount of time it took to copy the commit record from the + * log buffer to the rep stream. It's measured as the time interval + * starting with the time the preCommit hook completed, to the time the + * message write to the replication stream was initiated. + */ + public long messageTransferMs() { + return repWriteStartDeltaMs > 0 ? + + (repWriteStartDeltaMs - preLogCommitEndDeltaMs) : + + /* + * The message was invoked before the post commit hook fired. + */ + 0; + } + + @Override + protected boolean + propagatePostCommitException(DatabaseException postCommitException) { + return (postCommitException instanceof InsufficientAcksException) ? + true : + super.propagatePostCommitException(postCommitException); + } + + /* The Txn factory interface. */ + public interface MasterTxnFactory { + MasterTxn create(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair); + + /** + * Create a special "null" txn that does not result in any changes to + * the environment. It's sole purpose is to persist and communicate + * DTVLSN values. + */ + MasterTxn createNullTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair); + } + + /* The method used to create user Master Txns via the factory. */ + public static MasterTxn create(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + return factory.create(envImpl, config, nameIdPair); + } + + public static MasterTxn createNullTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + return factory.createNullTxn(envImpl, config, nameIdPair); + } + + /** + * Method used for unit testing. + * + * Sets the factory to the one supplied. If the argument is null it + * restores the factory to the original default value. + */ + public static void setFactory(MasterTxnFactory factory) { + MasterTxn.factory = (factory == null) ? DEFAULT_FACTORY : factory; + } + + /** + * Convert a MasterTxn that has any write locks into a ReplayTxn, and close + * the MasterTxn after it is disemboweled. A MasterTxn that only has read + * locks is unchanged and is still usable by the application. To be clear, + * the application can never use a MasterTxn to obtain a lock if the node + * is in Replica mode, but may indeed be able to use a read-lock-only + * MasterTxn if the node cycles back into Master status. + * + * For converted MasterTxns, all write locks are transferred to a replay + * transaction, read locks are released, and the txn is closed. Used when a + * node is transitioning from master to replica mode without recovery, + * which may happen for an explicit master transfer request, or merely for + * a network partition/election of new + * master. + * + * The newly created replay transaction will need to be in the appropriate + * state, holding all write locks, so that the node in replica form can + * execute the proper syncups. Note that the resulting replay txn will + * only be aborted, and will never be committed, because the txn originated + * on this node, which is transitioning from master -> replica. + * + * We only transfer write locks. We need not transfer read locks, because + * replays only operate on writes, and are never required to obtain read + * locks. Read locks are released though, because (a) this txn is now only + * abortable, and (b) although the Replay can preempt any read locks held + * by the MasterTxn, such preemption will add delay. + * + * @return a ReplayTxn, if there were locks in this transaction, and + * there's a need to create a ReplayTxn. + */ + public ReplayTxn convertToReplayTxnAndClose(Logger logger, + Replay replay) { + + /* Assertion */ + if (!freeze) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Txn " + getId() + + " should be frozen when converting to replay txn"); + } + + /* + * This is an important and relatively rare operation, and worth + * logging. + */ + LoggerUtils.info(logger, envImpl, + "Transforming txn " + getId() + + " from MasterTxn to ReplayTxn"); + + int hookCount = 0; + ReplayTxn replayTxn = null; + boolean needToClose = true; + try { + synchronized (this) { + + if (isClosed()) { + LoggerUtils.info(logger, envImpl, + "Txn " + getId() + + " is closed, no tranform needed"); + needToClose = false; + return null; + } + + /* + * Get the list of write locks, and process them in lsn order, + * so we properly maintain the lastLoggedLsn and firstLoggedLSN + * fields of the newly created ReplayTxn. + */ + final Set lockedLSNs = getWriteLockIds(); + + /* + * This transaction may not actually have any write locks. In + * that case, we permit it to live on. + */ + if (lockedLSNs.size() == 0) { + LoggerUtils.info(logger, envImpl, "Txn " + getId() + + " had no write locks, didn't create" + + " ReplayTxn"); + needToClose = false; + return null; + } + + /* + * We have write locks. Make sure that this txn can now + * only be aborted. Otherwise, there could be this window + * in this method: + * t1: locks stolen, no locks left in this txn + * t2: txn unfrozen, commits and aborts possible + * -- at this point, another thread could sneak in and + * -- try to commit. The txn would commmit successfully, + * -- because a commit w/no write locks is a no-op. + * -- but that would convey the false impression that the + * -- txn's write operations had commmitted. + * t3: txn is closed + */ + setOnlyAbortable(new UnknownMasterException + (envImpl.getName() + + " is no longer a master")); + replayTxn = replay.getReplayTxn(getId(), false); + + /* + * Order the lsns, so that the locks are taken in the proper + * order, and the txn's firstLoggedLsn and lastLoggedLsn fields + * are properly set. + */ + List sortedLsns = new ArrayList<>(lockedLSNs); + Collections.sort(sortedLsns); + LoggerUtils.info(logger, envImpl, + "Txn " + getId() + " has " + + lockedLSNs.size() + " locks to transform"); + + /* + * Transfer each lock. Note that ultimately, since mastership + * is changing, and replicated commits will only be executed + * when a txn has originated on that node, the target ReplayTxn + * can never be committed, and will only be aborted. + */ + for (Long lsn: sortedLsns) { + + LoggerUtils.info(logger, envImpl, + "Txn " + getId() + + " is transferring lock " + lsn); + + /* + * Use a special method to steal the lock. Another approach + * might have been to have the replayTxn merely attempt a + * lock(); as an importunate txn, the replayTxn would + * preempt the MasterTxn's lock. However, that path doesn't + * work because lock() requires having a databaseImpl in + * hand, and that's not available here. + */ + replayTxn.stealLockFromMasterTxn(lsn); + + /* + * Copy all the lock's info into the Replay and remove it + * from the master. Normally, undo clears write locks, but + * this MasterTxn will not be executing undo. + */ + WriteLockInfo replayWLI = replayTxn.getWriteLockInfo(lsn); + WriteLockInfo masterWLI = getWriteLockInfo(lsn); + replayWLI.copyAllInfo(masterWLI); + removeLock(lsn); + } + + /* + * Txns have collections of undoDatabases and deletedDatabases. + * Undo databases are normally incrementally added to the txn + * as locks are obtained Unlike normal locking or recovery + * locking, in this case we don't have a reference to the + * databaseImpl that goes with this lock, so we copy the undo + * collection in one fell swoop. + */ + replayTxn.copyDatabasesForConversion(this); + + /* + * This txn is no longer responsible for databaseImpl + * cleanup, as that issue now lies with the ReplayTxn, so + * remove the collection. + */ + deletedDatabases = null; + + /* + * All locks have been removed from this transaction. Clear + * the firstLoggedLsn and lastLoggedLsn so there's no danger + * of attempting to undo anything; this txn is no longer + * responsible for any records. + */ + lastLoggedLsn = DbLsn.NULL_LSN; + firstLoggedLsn = DbLsn.NULL_LSN; + + /* If this txn also had read locks, clear them */ + clearReadLocks(); + } + } finally { + + assert TestHookExecute.doHookIfSet(convertHook, hookCount++); + + unfreeze(); + + assert TestHookExecute.doHookIfSet(convertHook, hookCount++); + + /* + * We need to abort the txn, but we can't call abort() because that + * method checks whether we are the master! Instead, call the + * internal method, close(), in order to end this transaction and + * unregister it from the transactionManager. Must be called + * outside the synchronization block. + */ + if (needToClose) { + LoggerUtils.info(logger, envImpl, "About to close txn " + + getId() + " state=" + getState()); + close(false /*isCommit */); + LoggerUtils.info(logger, envImpl, "Closed txn " + getId() + + " state=" + getState()); + } + assert TestHookExecute.doHookIfSet(convertHook, hookCount++); + } + + return replayTxn; + } + + public void freeze() { + freeze = true; + } + + private void unfreeze() { + freeze = false; + } + + /** + * Used to hold the transaction stable while it is being cloned as a + * ReplayTxn, during master->replica transitions. Essentially, there + * are two parties that now have a reference to this transaction -- the + * originating application thread, and the RepNode thread that is + * trying to set up internal state so it can begin to act as a replica. + * + * The transaction will throw UnknownMasterException or + * ReplicaWriteException if the transaction is frozen, so that the + * application knows that the transaction is no longer viable, but it + * doesn't attempt to do most of the follow-on cleanup and release of locks + * that failed aborts and commits normally attempt. One aspect of + * transaction cleanup can't be skipped though. It is necessary to do the + * post log hooks to free up the block txn latch lock so that the + * transaction can be closed by the RepNode thread. For example: + * - application thread starts transaction + * - application takes the block txn latch lock and attempts commit or + * abort, but is stopped because the txn is frozen by master transfer. + * - the application must release the block txn latch lock. + * @see Replica#replicaTransitionCleanup + */ + @Override + protected void checkIfFrozen(boolean isCommit) { + if (freeze) { + try { + ((RepImpl) envImpl).checkIfMaster(this); + } catch (DatabaseException e) { + if (isCommit) { + postLogCommitAbortHook(); + } else { + postLogAbortHook(); + } + throw e; + } + } + } + + /* For unit testing */ + public void setConvertHook(TestHook hook) { + convertHook = hook; + } + + @Override + public boolean isMasterTxn() { + return true; + } + + public void setArbiterAck(boolean val) { + needsArbiterAck = val; + } + + public boolean getArbiterAck() { + return needsArbiterAck; + } + +} diff --git a/src/com/sleepycat/je/rep/txn/ReadonlyTxn.java b/src/com/sleepycat/je/rep/txn/ReadonlyTxn.java new file mode 100644 index 0000000..88a73ce --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/ReadonlyTxn.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Txn; + +/** + * A ReadonlyTxn represents + * - a user initiated Txn executed on the Master node, when local-write or + * read-only is configured, or + * - a user initiated Txn executed on the Replica node, whether or not + * local-write is configured, or + * - an auto-commit Txn on a Replica node for a replicated DB. + * + * As its name implies it is used to implement the read-only semantics for + * access to replicated DBs on the Replica. It is not replicated txn, i.e., + * it is not part of the rep stream. + * + * In addition, it uses the transaction hooks defined on Txn to implement the + * ReplicaConsistencyPolicy. This must be done for all access to replicated + * DBs, including when local-write is configured. + */ +public class ReadonlyTxn extends Txn { + + private final boolean localWrite; + + public ReadonlyTxn(EnvironmentImpl envImpl, TransactionConfig config) + throws DatabaseException { + + super(envImpl, config, ReplicationContext.NO_REPLICATE); + + localWrite = config.getLocalWrite(); + } + + @Override + public boolean isLocalWrite() { + return localWrite; + } + + /** + * Provides a wrapper to screen for write locks. The use of write locks is + * used to infer that an attempt is being made to modify a replicated + * database. Note that this technique misses "conditional" updates, for + * example a delete operation using a non-existent key, but we are ok with + * that since the primary intent here is to ensure the integrity of the + * replicated stream that is being played back at that replica and these + * checks prevent such mishaps. + */ + @Override + public LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws LockNotAvailableException, LockConflictException, + DatabaseException { + + if (lockType.isWriteLock() && !database.allowReplicaWrite()) { + disallowReplicaWrite(); + } + return super.lockInternal + (lsn, lockType, noWait, jumpAheadOfWaiters, database); + } + + /** + * If logging occurs before locking, we must screen out write locks here. + * + * If we allow the operation (e.g., for a NameLN), then be sure to call the + * base class method to prepare to undo in the (very unlikely) event that + * logging succeeds but locking fails. [#22875] + */ + @Override + public void preLogWithoutLock(DatabaseImpl database) { + if (!database.allowReplicaWrite()) { + disallowReplicaWrite(); + } + super.preLogWithoutLock(database); + } + + /** + * Unconditionally throws ReplicaWriteException because this locker was + * created on a replica. + */ + @Override + public void disallowReplicaWrite() { + throw new ReplicaWriteException + (this, ((RepImpl) envImpl).getStateChangeEvent()); + } + + /** + * Verifies that consistency requirements are met before allowing the + * transaction to proceed. + */ + @Override + protected void txnBeginHook(TransactionConfig config) + throws ReplicaConsistencyException, DatabaseException { + + checkConsistency((RepImpl) envImpl, config.getConsistencyPolicy()); + } + + /** + * Utility method used here and by ReplicaThreadLocker. + */ + static void checkConsistency(final RepImpl repImpl, + final ReplicaConsistencyPolicy policy) { + if (State.DETACHED.equals(repImpl.getState()) || + State.MASTER.equals(repImpl.getState())) { + /* Detached state, permit read-only access to the environment. */ + return; + } + assert (policy != null) : "Missing default consistency policy"; + try { + policy.ensureConsistency(repImpl); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(repImpl, e); + } catch (MasterStateException e) { + /* + * Transitioned to master, while waiting for consistency, so the + * txn is free to go ahead on the master. + */ + return; + } + } +} diff --git a/src/com/sleepycat/je/rep/txn/ReplayTxn.java b/src/com/sleepycat/je/rep/txn/ReplayTxn.java new file mode 100644 index 0000000..0bc4f03 --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/ReplayTxn.java @@ -0,0 +1,713 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static com.sleepycat.je.utilint.DbLsn.NULL_LSN; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.recovery.RecoveryManager; +import com.sleepycat.je.rep.utilint.SimpleTxnMap; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.txn.LockAttemptResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.TxnChain; +import com.sleepycat.je.txn.TxnChain.RevertInfo; +import com.sleepycat.je.txn.TxnManager; +import com.sleepycat.je.txn.UndoReader; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * Used for replaying replicated operations at replica nodes. + */ +public class ReplayTxn extends Txn { + + /* The time the Txn was initiated. */ + private final long startTime = System.currentTimeMillis(); + + /* The time the txn was committed or aborted. Zero if in progress */ + private long endTime = 0; + + /* + * The last VLSN applied by this txn. Used for sanity checking. + * This field is currently not precisely set when a transaction is + * resurrected. The sanity check could be made more robust by setting + * this field at resurrection. + */ + private VLSN lastApplied = VLSN.NULL_VLSN; + + /* Tracks whether the rep group db was changed by the transaction */ + private boolean repGroupDbChange = false; + + /* NodeId of the master which initiated the commit or abort. */ + private int masterNodeId; + + /* + * The DTVLSN to be associated with this replay TXN. It's null for + * ReplayTxns that are actually written as part of a replica to master + * transition. + */ + private long dtvlsn = VLSN.NULL_VLSN_SEQUENCE; + + private SimpleTxnMap activeTxns; + + /* + * ReplayTxns are frequently constructed. Don't create its own logger; + * instead, use the Replay's logger. + */ + private final Logger logger; + + /** + * Used when creating ReplayTxns for Replay. The ReplayTxn adds itself to + * the activeTxns map. + */ + public ReplayTxn(EnvironmentImpl envImpl, + TransactionConfig config, + long txnId, + SimpleTxnMap activeTxns, + Logger logger) + throws DatabaseException { + + this(envImpl, config, txnId, logger); + registerWithActiveTxns(activeTxns); + } + + /** + * Used when creating ReplayTxns at recovery. No ActiveTxns map is + * available. + */ + public ReplayTxn(EnvironmentImpl envImpl, + TransactionConfig config, + long txnId, + Logger logger) + throws DatabaseException { + + super(envImpl, + config, + null, // ReplicationContext set later + txnId); // mandatedId + /* Preempt reader transactions when a lock conflict occurs. */ + setImportunate(true); + this.logger = logger; + assert !config.getLocalWrite(); + } + + /** + * Don't let the ReplayTxn have a timeout of 0. If it did, it could be + * deadlocked against a reader txn. As long as there is a non zero timeout, + * any conflicts will be adjugated by the LockManager in its favor. + */ + @Override + protected long getInitialLockTimeout() { + return envImpl.getReplayTxnTimeout(); + } + + @Override + public boolean isLocalWrite() { + return false; + } + + public boolean getRepGroupDbChange() { + return repGroupDbChange; + } + + public void noteRepGroupDbChange() { + repGroupDbChange = true; + } + + public void registerWithActiveTxns(SimpleTxnMap newActiveTxns) { + assert activeTxns == null; + activeTxns = newActiveTxns; + activeTxns.put(this); + } + + /** + * Replay transactions always use the txn id that is included in its + * replicated operation. + */ + @Override + protected long generateId(TxnManager txnManager, + long mandatedId) { + return mandatedId; + } + + /* + * A ReplayTxn always writes the node ID of the master which generated + * the commit or abort. + */ + @Override + protected int getReplicatorNodeId() { + return masterNodeId; + } + + /** + * Utility method to validate DTVLSN. It ensures that the DTVLSN is not + * null and that DTVLSN(vlsn) <= VLSN. + */ + private long validateDTVLSN(VLSN txnVLSN, + long checkDTVLSN) { + + if ((txnVLSN != null) && VLSN.isNull(checkDTVLSN)) { + throw new IllegalStateException("DTVLSN(" + txnVLSN +") is null"); + } + + if (txnVLSN == null) { + /* + * Can be null, if this is a in-flight replay Txn that is being + * aborted as part of a replica -> master transition and + * consequently does not have a pre-assigned vlsn; a VLSN will + * be assigned when the abort is actually written + */ + return checkDTVLSN; + } + + if (checkDTVLSN > txnVLSN.getSequence()) { + throw new IllegalStateException("DTVLSN(vlsn)=" + checkDTVLSN + + " > " + "vlsn=" + txnVLSN); + } + + return checkDTVLSN; + } + + /** + * Commits the txn being replayed. + * + * @param syncPolicy to be used for the commit. + * @param clientRepContext the replication context it encapsulates the VLSN + * associated with the txn. + * @param commitDTVLSN the dtvlsn + * + * @return the commit LSN + * + * @throws DatabaseException + */ + public long commit(SyncPolicy syncPolicy, + ReplicationContext clientRepContext, + int commitMasterNodeId, + long commitDTVLSN) + throws DatabaseException { + + + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, envImpl, "commit called for " + getId()); + } + + setRepContext(clientRepContext); + + this.dtvlsn = + validateDTVLSN(clientRepContext.getClientVLSN(), commitDTVLSN); + Durability durability = null; + if (syncPolicy == SyncPolicy.SYNC) { + durability = Durability.COMMIT_SYNC; + } else if (syncPolicy == SyncPolicy.NO_SYNC) { + durability = Durability.COMMIT_NO_SYNC; + } else if (syncPolicy == SyncPolicy.WRITE_NO_SYNC) { + durability = Durability.COMMIT_WRITE_NO_SYNC; + } else { + throw EnvironmentFailureException.unexpectedState + ("Unknown sync policy: " + syncPolicy); + } + + /* + * Set the master id before commit is called, so getReplicatorNodeId() + * will return this value and write the originating node's id into + * the commit record on this log. + */ + this.masterNodeId = commitMasterNodeId; + long lsn = super.commit(durability); + endTime = System.currentTimeMillis(); + + return lsn; + } + + @Override + public long commit() { + throw EnvironmentFailureException.unexpectedState + ("Replay Txn abort semantics require use of internal commit api"); + } + + @Override + public long commit(Durability durability) { + throw EnvironmentFailureException.unexpectedState + ("Replay Txn abort semantics require use of internal commit api"); + } + + @Override + public void abort() { + throw EnvironmentFailureException.unexpectedState + ("Replay Txn abort semantics require use of internal abort api"); + } + + @Override + public long abort(boolean forceFlush) { + throw EnvironmentFailureException.unexpectedState + ("Replay Txn abort semantics require use of internal abort api"); + } + + @Override + protected long getDTVLSN() { + return dtvlsn; + } + + public long abort(ReplicationContext clientRepContext, + int abortMasterNodeId, + long abortDTVLSN) + throws DatabaseException { + + setRepContext(clientRepContext); + this.dtvlsn = validateDTVLSN(clientRepContext.getClientVLSN(), + abortDTVLSN); + /* + * Set the master id before abort is called, so getReplicatorNodeId() + * will return this value and write the originating node's id into + * the abort record on this log. + */ + this.masterNodeId = abortMasterNodeId; + long lsn = super.abort(false /* forceFlush */); + endTime = System.currentTimeMillis(); + + return lsn; + } + + /** + * Always return true in order to ensure that the VLSN is logged. Normally + * this method returns false when no LN has been logged by the txn. But + * when replaying a Master txn, we need to guarantee that the VLSN is + * logged on the Replica. [#20543] + */ + @Override + protected boolean updateLoggedForTxn() { + return true; + } + + @Override + public void close(boolean isCommit) + throws DatabaseException { + + super.close(isCommit); + + if (activeTxns != null) { + Txn removed = activeTxns.remove(getId()); + assert removed != null : "txn was not in map " + this + " " + + LoggerUtils.getStackTrace(); + } + } + + /** + * Invoked when a ReplayTxn is being abandoned on shutdown. + */ + public void cleanup() + throws DatabaseException { + releaseWriteLocks(); + /* Close the transaction thus causing it to be unregistered. */ + close(false); + } + + /** + * Rollback all write operations that are logged with an LSN > the + * matchpointLsn parameter. This is logically a truncation of the log + * entries written by this transaction. Any log entries created by this + * transaction are marked obsolete. + * + * Note that this is by no means a complete implementation of what would be + * needed to support user visible savepoints. This method only rolls back + * write operations and doesn't handle other types of state, like read + * locks and open cursors. + * + * There are several key assumptions: + * - the transaction does not hold read locks. + * - the transaction will either be resumed, and any rolled back + * operations will be repeated, or the transaction will be aborted + * in its entirety. + * + * If all operations in the transaction are rolled back, this transaction + * is also unregistered and closed. + * + * Rolling back a log entry through rollback is akin to truncating the + * transactional log. The on-disk entries should not be referred to by + * anything in the in-memory tree or the transaction chain. JE's append + * only storage and the fact that the transactional log entries are + * intertwined through the physical log prohibits any log truncation. To + * mimic log truncation, any rolled back log entry is marked as + * obsolete. Since only the last version of any data record is alive, + * any future uses of this transaction must use the obsoleteDupsAllowed + * option (see Txn.countObsoleteExact) to prevent asserts about duplicate + * obsolete offsets. For example, suppose the transaction logs this: + * + * 100 LNa (version1) + * 200 LNa (version2) + * 300 LNa (version3) + * + * At this point in time, LSN 100 and 200 are obsolete. + * + * Now, suppose we roll back to LSN 100. LSNs 200 and 300 are marked + * obsolete by the rollback.(although LSN 200 was already obsolete). It is + * true that for an instance in time LSN 100 is incorrectly marked as + * obsolete, when it's really alive. But this transaction is going to + * either abort or resume exactly as it was before, so LSN 100 is going to + * be obsolete again. + * + * Suppose txn.abort() is called. The abort() logic will mark LSN 100 as + * obsolete, since it is the latest version of the record in the + * transaction. Using the obsoleteDupsAllowed option avoids an assertion on + * the double recording of LSN 100. + * + * Alternatively, suppose LNa (version2) is retransmitted and logged as LSN + * 400. Normal execution of LN.log() marks LSN 100 as obsolete, which would + * trigger the assertion were it not for obsoleteDupsAllowed. + * + * @return list of LSNs that were rolled back + */ + public Collection rollback(long matchpointLsn) + throws DatabaseException { + + List rollbackLsns = new ArrayList(); + LoggerUtils.finest(logger, envImpl, "Partial Rollback of " + this); + synchronized (this) { + checkState(true); + + /* This transaction didn't log anything, nothing to rollback. */ + if (lastLoggedLsn == NULL_LSN) { + return rollbackLsns; + } + + /* + * This transaction doesn't include any operations that are after + * the matchpointLsn. There is nothing to rollback. + */ + if (DbLsn.compareTo(lastLoggedLsn, matchpointLsn) <= 0) { + return rollbackLsns; + } + + setRollback(); + undoWrites(matchpointLsn, rollbackLsns); + } + + /* + * The call to undoWrites() may have rolled everything back, and set + * lastLoggedLsn to NULL_LSN. + */ + if (lastLoggedLsn == NULL_LSN) { + /* Everything was rolled back. */ + try { + + /* + * Purge any databaseImpls not needed as a result of the abort. + * Be sure to do this outside the synchronization block, to + * avoid conflict w/checkpointer. + */ + cleanupDatabaseImpls(false); + } finally { + close(false /* isCommit */); + } + } + + /* + * We don't expect there to be any database handles associated with + * a ReplayTxn, because only DatabaseImpls are used. Because of that, + * there should be no cleanup needed. + */ + if (openedDatabaseHandles != null) { + throw EnvironmentFailureException.unexpectedState + ("Replay Txn " + getId() + " has a openedDatabaseHandles"); + } + + /* + * There is no need to call cleanupDatabaseImpls if the txn still holds + * locks. The operations in this txn will either be entirely aborted, + * or will be repeated, so any cleanup will happen when the txn ends. + */ + return rollbackLsns; + } + + /** + * Rollback the changes to this txn's write locked nodes up to but not + * including the entry at the specified matchpoint. When we log a + * transactional entry, we record the LSN of the original, + * before-this-transaction version as the abort LSN. This means that if + * there are multiple updates to a given record in a single transaction, + * each update only references that original version and its true + * predecessor. + * + * This was done to streamline abort processing, so that an undo reverts + * directly to the original version rather than stepping through all the + * intermediates. The intermediates are skipped. However, undo to a + * matchpoint may need to stop at an intermediate point, so we need to + * create a true chain of versions. + * + * To do so, we read the transaction backwards from the last logged LSN + * to reconstruct a transaction chain that links intermediate versions + * of records. For example, suppose our transaction looks like this and + * that we are undoing up to LSN 250 + * + * lsn=100 node=A (version 1) + * lsn=200 node=B (version 1) + * <-- matchpointLsn + * lsn=300 node=C (version 1) + * lsn=400 node=A (version 2) + * lsn=500 node=B (version 2) + * lsn=600 node=A (version 3) + * lsn=700 node=A (version 4) + * + * To setup the old versions, We walk from LSN 700 -> 100 + * 700 (A) rolls back to 600 + * 600 (A) rolls back to 400 + * 500 (B) rolls back to 200 + * 400 (A) rolls back to 100 + * 300 (C) rolls back to an empty slot (NULL_LSN). + * + * A partial rollback also requires resetting the lastLoggedLsn field, + * because these operations are no longer in the btree and their on-disk + * entries are no longer valid. + * + * Lastly, the appropriate write locks must be released. + * @param matchpointLsn the rollback should go up to but not include this + * LSN. + */ + private void undoWrites(long matchpointLsn, List rollbackLsns) + throws DatabaseException { + + /* + * Generate a map of node->List of intermediate LSNs for this node. + * to re-create the transaction chain. + */ + TreeLocation location = new TreeLocation(); + Long undoLsn = lastLoggedLsn; + + TxnChain chain = + new TxnChain(undoLsn, id, matchpointLsn, undoDatabases, envImpl); + + try { + while ((undoLsn != DbLsn.NULL_LSN) && + DbLsn.compareTo(undoLsn, matchpointLsn) > 0) { + + UndoReader undo = + UndoReader.create(envImpl, undoLsn, undoDatabases); + + RevertInfo revertTo = chain.pop(); + + logFinest(undoLsn, undo, revertTo); + + /* + * When we undo this log entry, we've logically truncated + * it from the log. Remove it from the btree and mark it + * obsolete. + */ + RecoveryManager.rollbackUndo( + logger, Level.FINER, location, + undo.db, undo.logEntry, undoLsn, revertTo); + + countObsoleteInexact(undoLsn, undo); + rollbackLsns.add(undoLsn); + + /* + * Move on to the previous log entry for this txn and update + * what is considered to be the end of the transaction chain. + */ + undoLsn = undo.logEntry.getUserTxn().getLastLsn(); + lastLoggedLsn = undoLsn; + } + + /* + * Correct the fields which hold LSN and VLSN state that may + * now be changed. + */ + lastApplied = chain.getLastValidVLSN(); + if (!updateLoggedForTxn()) { + firstLoggedLsn = NULL_LSN; + } + + } catch (DatabaseException e) { + LoggerUtils.traceAndLogException(envImpl, "Txn", "undo", + "For LSN=" + + DbLsn.getNoFormatString(undoLsn), e); + throw e; + } catch (RuntimeException e) { + throw EnvironmentFailureException.unexpectedException + ("Txn undo for LSN=" + DbLsn.getNoFormatString(undoLsn), e); + } + + if (lastLoggedLsn == DbLsn.NULL_LSN) { + + /* + * The whole txn is rolled back, and it may not appear again. This + * is the equivalent of an abort. Do any delete processing for an + * abort which is needed. + * + * Set database state for deletes before releasing any write + * locks. + */ + setDeletedDatabaseState(false); + } + + /* Clear any write locks that are no longer needed. */ + clearWriteLocks(chain.getRemainingLockedNodes()); + } + + /** + * Count an LN obsolete that is being made invisble by rollback. + * + * Use inexact counting. Since invisible entries are not processed by the + * cleaner, recording the obsolete offset would be a waste of resources. + * Since we don't count offsets, we don't need to worry about duplicate + * offsets. + * + * Some entries may be double counted if they were previously counted + * obsolete, for example, when multiple versions of an LN were logged. + * This is tolerated for an exceptional situation like rollback. + */ + private void countObsoleteInexact(long undoLsn, UndoReader undo) { + + /* + * "Immediately obsolete" LNs are counted as obsolete when they are + * logged, so no need to repeat here. + */ + if (undo.logEntry.isImmediatelyObsolete(undo.db)) { + return; + } + + envImpl.getLogManager().countObsoleteNode(undoLsn, + null, /*type*/ + undo.logEntrySize, + undo.db, + false /*countExact*/); + } + + /** + * Returns the elapsed time associated with this transaction. If the + * transaction is in progress, it returns the running elapsed time. + * + * @return the elapsed time as above. + */ + public long elapsedTime() { + return ((endTime > 0) ? endTime : System.currentTimeMillis()) - + startTime; + } + + /** + * Returns the time when this transaction was committed or aborted. + * Returns zero if the transaction is still in progress. + * + * @return the end time or zero + */ + public long getEndTime() { + return endTime; + } + + public void setLastAppliedVLSN(VLSN justApplied) { + if (justApplied.compareTo(lastApplied) <= 0) { + throw EnvironmentFailureException.unexpectedState + ("Txn " + getId() + " attempted VLSN = " + justApplied + + " txnLastApplied = " + lastApplied); + } + this.lastApplied = justApplied; + } + + /** + * ReplicatedTxns set it when the txn commit + * or abort arrives. + */ + public void setRepContext(ReplicationContext repContext) { + this.repContext = repContext; + } + + /* Wrap the call to logger to reduce runtime overhead. */ + private void logFinest(long lsn, UndoReader undo, RevertInfo revertTo) { + if ((logger != null) && (logger.isLoggable(Level.FINEST))) { + LoggerUtils.finest(logger, envImpl, + "undoLsn=" + DbLsn.getNoFormatString(lsn) + + " undo=" + undo + " revertInfo=" + revertTo); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(""); + sb.append(super.toString()); + return sb.toString(); + } + + /* + * Copy all collections that will be needed to convert masterTxn to this + * ReplayTxn. Note that we do not need to copy the openDatabaseHandle + * collection. That collection is only needed by an application-facing + * Txn, so that those database handles can be invalidated if + * needed. ReplayTxn is not application-facing, and uses DatabaseImpls + * rather than Databases. + */ + public void copyDatabasesForConversion(Txn masterTxn) { + if (masterTxn.getUndoDatabases() != null) { + if (undoDatabases == null) { + undoDatabases = new HashMap(); + } + undoDatabases.putAll(masterTxn.getUndoDatabases()); + } + + if (masterTxn.getDeletedDatabases() != null) { + if (deletedDatabases == null) { + deletedDatabases = new HashSet(); + } + deletedDatabases.addAll(masterTxn.getDeletedDatabases()); + } + } + + /** + * Transfer a lock from another transaction to this one. Used for master-> + * replica transitions, when a node has to transform a MasterTxn into a + * ReplayTxn. Another approach would be to have this importunate ReplayTxn + * call lock() on the lsn, but that path is not available because we + * do not have a handle on a databaseImpl. + */ + public void stealLockFromMasterTxn(Long lsn) { + + LockAttemptResult result = lockManager.stealLock + (lsn, this, LockType.WRITE); + + /* + * Assert, and if something strange happened, opt to invalidate + * the environment and wipe the slate clean. + */ + if (!result.success) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Transferring from master to replica state, txn " + + getId() + " was unable to transfer lock for " + + DbLsn.getNoFormatString(lsn) + ", lock grant type=" + + result.lockGrant); + } + + addLock(Long.valueOf(lsn), LockType.WRITE, result.lockGrant); + addLogInfo(lsn); + } +} diff --git a/src/com/sleepycat/je/rep/txn/ReplicaThreadLocker.java b/src/com/sleepycat/je/rep/txn/ReplicaThreadLocker.java new file mode 100644 index 0000000..ac2757f --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/ReplicaThreadLocker.java @@ -0,0 +1,105 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.ThreadLocker; + +/** + * A ReplicaThreadLocker is used with a user initiated non-transactional + * operation on a Replica, for a replicated DB. + * + * Like ReadonlyTxn, it enforces read-only semantics and implements the + * ReplicaConsistencyPolicy. Unlike ReadonlyTxn, the environment default + * ReplicaConsistencyPolicy is enforced rather than the policy specified via + * TransactionConfig, and this policy is enforced via the openCursorHook rather + * than the txnBeginHook. + */ +public class ReplicaThreadLocker extends ThreadLocker { + + private final RepImpl repImpl; + + public ReplicaThreadLocker(final RepImpl repImpl) { + super(repImpl); + this.repImpl = repImpl; + } + + @Override + public ThreadLocker newEmptyThreadLockerClone() { + return new ReplicaThreadLocker(repImpl); + } + + /** + * This overridden method prevents writes on a replica. This check is + * redundant because Cursor won't allow writes to a transactional database + * when no Transaction is specified. But we check here also for safety and + * for consistency with ReadonlyTxn. + */ + @Override + public LockResult lockInternal(final long lsn, + final LockType lockType, + final boolean noWait, + final boolean jumpAheadOfWaiters, + final DatabaseImpl database) { + if (lockType.isWriteLock() && !database.allowReplicaWrite()) { + disallowReplicaWrite(); + } + return super.lockInternal(lsn, lockType, noWait, jumpAheadOfWaiters, + database); + } + + /** + * If logging occurs before locking, we must screen out write locks here. + * + * If we allow the operation (e.g., for a non-replicated DB), then be sure + * to call the base class method to prepare to undo in the (very unlikely) + * event that logging succeeds but locking fails. [#22875] + */ + @Override + public void preLogWithoutLock(DatabaseImpl database) { + if (!database.allowReplicaWrite()) { + disallowReplicaWrite(); + } + super.preLogWithoutLock(database); + } + + /** + * Unconditionally throws ReplicaWriteException because this locker was + * created on a replica. + */ + @Override + public void disallowReplicaWrite() { + throw new ReplicaWriteException(this, repImpl.getStateChangeEvent()); + } + + /** + * Verifies that consistency requirements are met before allowing the + * cursor to be opened. + */ + @Override + public void openCursorHook(DatabaseImpl dbImpl) + throws ReplicaConsistencyException { + + if (!dbImpl.isReplicated()) { + return; + } + ReadonlyTxn.checkConsistency(repImpl, + repImpl.getDefaultConsistencyPolicy()); + } +} diff --git a/src/com/sleepycat/je/rep/txn/package-info.java b/src/com/sleepycat/je/rep/txn/package-info.java new file mode 100644 index 0000000..07335c6 --- /dev/null +++ b/src/com/sleepycat/je/rep/txn/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: HA Txn/Locker subclasses for enforcing consistency and durability + * constraints. + */ +package com.sleepycat.je.rep.txn; diff --git a/src/com/sleepycat/je/rep/util/AtomicLongMax.java b/src/com/sleepycat/je/rep/util/AtomicLongMax.java new file mode 100644 index 0000000..59e289f --- /dev/null +++ b/src/com/sleepycat/je/rep/util/AtomicLongMax.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * An Atomic long that maintains a max value + */ +public class AtomicLongMax { + + private final AtomicLong value; + + public AtomicLongMax(long initialValue) { + value = new AtomicLong(initialValue); + } + + /** + * Updates the max value if the argument is greater than the current max. + */ + public long updateMax(long newMax) { + long currMax = value.get(); + for (; newMax > currMax; currMax = value.get()) { + if (value.compareAndSet(currMax, newMax)) { + return newMax; + } + } + + /* Higher or equal value already present. */ + return currMax; + } + + /** + * Gets the current value. + * + * @return the current value + */ + public long get() { + return value.get(); + } + + /** + * Set the value to newValue and returns the old value. + */ + public long set(long newValue) { + return value.getAndSet(newValue); + } +} diff --git a/src/com/sleepycat/je/rep/util/DbEnableReplication.java b/src/com/sleepycat/je/rep/util/DbEnableReplication.java new file mode 100644 index 0000000..31c3add --- /dev/null +++ b/src/com/sleepycat/je/rep/util/DbEnableReplication.java @@ -0,0 +1,256 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static com.sleepycat.je.rep.impl.RepParams.NODE_HOST_PORT; + +import java.io.File; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; + +/** + * A utility to convert an existing, non replicated JE environment for + * replication. This is useful when the user wants to initially prototype and + * develop a standalone transactional application, and then add replication as + * a second stage. + *

        + * JE HA environment log files contain types of log records and metadata used + * only by replication. Non replicated environments are lacking that + * information and must undergo a one time conversion process to add that + * metadata and enable replication. The conversion process is one way. Once an + * environment directory is converted, the rules that govern {@link + * ReplicatedEnvironment} apply; namely, the directory cannot be opened by a + * read/write standalone {@link com.sleepycat.je.Environment}. Only a minimum + * amount of replication metadata is added, and the conversion process is not + * dependent on the size of the existing directory. + *

        + * The conversion process takes these steps: + *

          + *
        1. Use {@code DbEnableReplication} to convert an existing environment + * directory. {@code DbEnableReplication} can be used as a command line + * utility, and must be executed locally on the host which houses the + * environment directory. Alternatively, {@code DbEnableReplication} may be + * used programmatically through the provided APIs. + *
        2. + *
        3. Once converted, the environment directory may be treated as an existing + * master node, and can be opened with a {@code ReplicatedEnvironment}. No + * helper host configuration is needed. + *
        4. Additional nodes may be created and can join the group as newly created + * replicas, as described in {@code ReplicatedEnvironment}. Since these new + * nodes are empty, they should be configured to use the converted master as + * their helper node, and will go through the {@link + * replication node lifecycle} to populate their environment + * directories. In this case, there will be data in the converted master that + * can only be transferred to the replica through a file copy executed with the + * help of a {@link com.sleepycat.je.rep.NetworkRestore} + *
        5. + *
        + *

        + * For example: + *

        + * // Create the first node using an existing environment 
        + * DbEnableReplication converter = 
        + *     new DbEnableReplication(envDirMars,          // env home dir
        + *                             "UniversalRepGroup", // group name
        + *                             "nodeMars",          // node name
        + *                             "mars:5001");        // node host,port
        + * converter.convert();
        + *
        + * ReplicatedEnvironment nodeMars = new ReplicatedEnvironment(envDirMars, ...);
        + * 
        + * // Bring up additional nodes, which will be initialized from 
        + * // nodeMars.
        + * ReplicationConfig repConfig = null;
        + * try {
        + *     repConfig = new ReplicationConfig("UniversalRepGroup", // groupName
        + *                                       "nodeVenus",         // nodeName
        + *                                       "venus:5008");       // nodeHostPort
        + *     repConfig.setHelperHosts("mars:5001");
        + * 
        + *     nodeVenus = new ReplicatedEnvironment(envDirB, repConfig, envConfig);
        + * } catch (InsufficientLogException insufficientLogEx) {
        + * 
        + *     // log files will be copied from another node in the group
        + *     NetworkRestore restore = new NetworkRestore();
        + *     restore.execute(insufficientLogEx, new NetworkRestoreConfig());
        + *     
        + *     // try opening the node now
        + *     nodeVenus = new ReplicatedEnvironment(envDirVenus, 
        + *                                           repConfig,
        + *                                           envConfig);
        + * }
        + * ...
        + * 
        + */ +public class DbEnableReplication { + + /* + * The code snippet in the header comment is tested in + * com.sleepycat.je.rep.util.EnvConvertTest. + * testJavadocForDbEnableReplication(). Please update this test case + * when the example is changed. + */ + private File envHome; + private String groupName; + private String nodeName; + private String nodeHostPort; + + private static final String usageString = + "usage: java -cp je.jar " + + "com.sleepycat.je.rep.util.DbEnableReplication\n" + + " -h # environment home directory\n" + + " -groupName # replication group name\n" + + " -nodeName # replicated node name\n" + + " -nodeHostPort # host name or IP address\n" + + " and port number to use\n" + + " for this node\n"; + + /** + * Usage: + *
        +     * java -cp je.jar com.sleepycat.je.rep.util.DbEnableReplication
        +     *   -h <dir>                          # environment home directory
        +     *   -groupName <group name>           # replication group name
        +     *   -nodeName <node name>             # replicated node name
        +     *   -nodeHostPort <host name:port number> # host name or IP address
        +     *                                             and port number to use
        +     *                                             for this node
        +     * 
        + */ + public static void main(String[] args) { + DbEnableReplication converter = new DbEnableReplication(); + converter.parseArgs(args); + + try { + converter.convert(); + } catch (Throwable t) { + t.printStackTrace(); + } + } + + private void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + private void parseArgs(String[] args) { + int argc = 0; + int nArgs = args.length; + + while (argc < nArgs) { + String thisArg = args[argc++].trim(); + if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(args[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-groupName")) { + if (argc < nArgs) { + groupName = args[argc++]; + } else { + printUsage("-groupName requires an argument"); + } + } else if (thisArg.equals("-nodeName")) { + if (argc < nArgs) { + nodeName = args[argc++]; + } else { + printUsage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-nodeHostPort")) { + if (argc < nArgs) { + nodeHostPort = args[argc++]; + try { + NODE_HOST_PORT.validateValue(nodeHostPort); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + printUsage("-nodeHostPort is illegal!"); + } + } else { + printUsage("-nodeHostPort requires an argument"); + } + } + } + + if (envHome == null) { + printUsage("-h is a required argument."); + } + + if (groupName == null) { + printUsage("-groupName is a required argument."); + } + + if (nodeName == null) { + printUsage("-nodeName is a required argument."); + } + + if (nodeHostPort == null) { + printUsage("-nodeHostPort is a required argument."); + } + } + + private DbEnableReplication() { + } + + /** + * Create a DbEnableReplication object for this node. + * + * @param envHome The node's environment directory + * @param groupName The name of the new replication group + * @param nodeName The node's name + * @param nodeHostPort The host and port for this node + */ + public DbEnableReplication(File envHome, + String groupName, + String nodeName, + String nodeHostPort) { + this.envHome = envHome; + this.groupName = groupName; + this.nodeName = nodeName; + this.nodeHostPort = nodeHostPort; + } + + /** + * Modify the log files in the environment directory to add a modicum of + * replication required metadata. + */ + public void convert() { + + Durability durability = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(durability); + + ReplicationConfig repConfig = + new ReplicationConfig(groupName, nodeName, nodeHostPort); + repConfig.setHelperHosts(repConfig.getNodeHostPort()); + RepInternal.setAllowConvert(repConfig, true); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + + repEnv.close(); + } +} diff --git a/src/com/sleepycat/je/rep/util/DbGroupAdmin.java b/src/com/sleepycat/je/rep/util/DbGroupAdmin.java new file mode 100644 index 0000000..4c40a31 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/DbGroupAdmin.java @@ -0,0 +1,601 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import java.io.File; +import java.io.FileNotFoundException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Set; +import java.util.StringTokenizer; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.PropUtil; + +/** + * DbGroupAdmin supplies the functionality of the administrative class {@link + * ReplicationGroupAdmin} in a convenient command line utility. For example, it + * can be used to display replication group information, or to remove a node + * from the replication group. + *

        + * Note: This utility does not handle security and authorization. It is left + * to the user to ensure that the utility is invoked with proper authorization. + *

        + * See {@link DbGroupAdmin#main} for a full description of the command line + * arguments. + */ +/* + * SSL deferred + * See {@link ReplicationConfig} for descriptions of the parameters that + * control replication service access. + */ +public class DbGroupAdmin { + + enum Command { DUMP, REMOVE, TRANSFER_MASTER, UPDATE_ADDRESS, DELETE }; + + private String groupName; + private Set helperSockets; + private String nodeName; + private String newHostName; + private int newPort; + private String timeout; + private boolean forceFlag; + private DataChannelFactory channelFactory; + private ReplicationGroupAdmin groupAdmin; + private final ArrayList actions = new ArrayList(); + + private static final String undocumentedUsageString = + " -netProps # name of a property file containing\n" + + " # properties needed for replication\n" + + " # service access\n"; + + private static final String usageString = + "Usage: " + CmdUtil.getJavaCommand(DbGroupAdmin.class) + "\n" + + " -groupName # name of replication group\n" + + " -helperHosts # identifier for one or more members\n" + + " # of the replication group which can\n"+ + " # be contacted for group information,\n"+ + " # in this format:\n" + + " # hostname[:port][,hostname[:port]]\n" + + " -dumpGroup # dump group information\n" + + " -removeMember # node to be removed\n" + + " -updateAddress \n" + + " # update the network address for a\n" + + " # specified node. The node should not\n" + + " # be alive when updating the address\n" + + " -transferMaster [-force] \n" + + " # transfer master role to one of the\n" + + " # specified nodes."; + + /* Undocumented options for main() + * -netProps <propFile> # (optional) + * # name of a property file containing + * # properties needed for replication + * # service access + * -deleteMember # Deletes the node from the group, doesn't + * # just mark it removed + */ + + /** + * Usage: + *

        +     * java {com.sleepycat.je.rep.util.DbGroupAdmin |
        +     *       -jar je-<version>.jar DbGroupAdmin}
        +     *   -groupName <group name>  # name of replication group
        +     *   -helperHosts <host:port> # identifier for one or more members
        +     *                            # of the replication group which can be
        +     *                            # contacted for group information, in
        +     *                            # this format:
        +     *                            # hostname[:port][,hostname[:port]]*
        +     *   -dumpGroup               # dump group information
        +     *   -removeMember <node name># node to be removed
        +     *   -updateAddress <node name> <new host:port>
        +     *                            # update the network address for a specified
        +     *                            # node. The node should not be alive when
        +     *                            # updating address
        +     *   -transferMaster [-force] <node1,node2,...> <timeout>
        +     *                            # transfer master role to one of the
        +     *                            # specified nodes.
        +     * 
        + */ + public static void main(String... args) + throws Exception { + + DbGroupAdmin admin = new DbGroupAdmin(); + admin.parseArgs(args); + admin.run(); + } + + /** + * Print usage information for this utility. + * + * @param msg + */ + private void printUsage(String msg) { + if (msg != null) { + System.out.println(msg); + } + + System.out.println(usageString); + System.exit(-1); + } + + + /** + * Parse the command line parameters. + * + * @param argv Input command line parameters. + */ + private void parseArgs(String argv[]) { + int argc = 0; + int nArgs = argv.length; + String netPropsName = null; + + if (nArgs == 0) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-groupName")) { + if (argc < nArgs) { + groupName = argv[argc++]; + } else { + printUsage("-groupName requires an argument"); + } + } else if (thisArg.equals("-helperHosts")) { + if (argc < nArgs) { + helperSockets = HostPortPair.getSockets(argv[argc++]); + } else { + printUsage("-helperHosts requires an argument"); + } + } else if (thisArg.equals("-dumpGroup")) { + actions.add(Command.DUMP); + } else if (thisArg.equals("-removeMember")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + actions.add(Command.REMOVE); + } else { + printUsage("-removeMember requires an argument"); + } + } else if (thisArg.equals("-updateAddress")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + + if (argc < nArgs) { + String hostPort = argv[argc++]; + int index = hostPort.indexOf(":"); + if (index < 0) { + printUsage("Host port pair format must be " + + ":"); + } + + newHostName = hostPort.substring(0, index); + newPort = Integer.parseInt + (hostPort.substring(index + 1, hostPort.length())); + } else { + printUsage("-updateAddress requires a " + + ": argument"); + } + + actions.add(Command.UPDATE_ADDRESS); + } else { + printUsage + ("-updateAddress requires the node name argument"); + } + } else if (thisArg.equals("-transferMaster")) { + + // TODO: it wouldn't be too hard to allow "-force" as a + // node name. + // + if (argc < nArgs && "-force".equals(argv[argc])) { + forceFlag = true; + argc++; + } + if (argc + 1 < nArgs) { + nodeName = argv[argc++]; + + /* + * Allow either + * -transferMaster mercury,venus 900 ms + * or + * -transferMaster mercury,venus "900 ms" + */ + if (argc + 1 < nArgs && argv[argc + 1].charAt(0) != '-') { + timeout = argv[argc] + " " + argv[argc + 1]; + argc += 2; + } else { + timeout = argv[argc++]; + } + + actions.add(Command.TRANSFER_MASTER); + } else { + printUsage + ("-transferMaster requires at least two arguments"); + } + } else if (thisArg.equals("-netProps")) { + if (argc < nArgs) { + netPropsName = argv[argc++]; + } else { + printUsage("-netProps requires an argument"); + } + } else if (thisArg.equals("-deleteMember")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + actions.add(Command.DELETE); + } else { + printUsage("-deleteMember requires an argument"); + } + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + if (netPropsName != null) { + try { + repNetConfig = + ReplicationNetworkConfig.create(new File(netPropsName)); + } catch (FileNotFoundException fnfe) { + printUsage("The net properties file " + netPropsName + + " does not exist: " + fnfe.getMessage()); + } catch (IllegalArgumentException iae) { + printUsage("The net properties file " + netPropsName + + " is not valid: " + iae.getMessage()); + } + } + + this.channelFactory = initializeFactory(repNetConfig, groupName); + } + + /* Execute commands */ + private void run() + throws Exception { + + createGroupAdmin(); + + if (actions.size() == 0) { + return; + } + + for (Command action : actions) { + switch (action) { + + /* Dump the group information. */ + case DUMP: + dumpGroup(); + break; + + /* Remove a member. */ + case REMOVE: + removeMember(nodeName); + break; + + /* Transfer the current mastership to a specified node. */ + case TRANSFER_MASTER: + transferMaster(nodeName, timeout); + break; + + /* Update the network address of a specified node. */ + case UPDATE_ADDRESS: + updateAddress(nodeName, newHostName, newPort); + break; + + /* Delete a member */ + case DELETE: + deleteMember(nodeName); + break; + + default: + throw new AssertionError(); + } + } + } + + private DbGroupAdmin() { + } + + /** + * Create a DbGroupAdmin instance for programmatic use. + * + * @param groupName replication group name + * @param helperSockets set of host and port pairs for group members which + * can be queried to obtain group information. + */ + /* + * SSL deferred + * This constructor does not support non-default service net properties. + * See the other constructor forms which allow setting of net properties. + */ + public DbGroupAdmin(String groupName, + Set helperSockets) { + this(groupName, helperSockets, (ReplicationNetworkConfig)null); + } + + /** + * @hidden SSL deferred + * Create a DbGroupAdmin instance for programmatic use. + * + * @param groupName replication group name + * @param helperSockets set of host and port pairs for group members which + * can be queried to obtain group information. + * @param netPropsFile a File containing replication net property + * settings. This parameter is ignored if null. + * @throws FileNotFoundException if the netPropsFile does not exist + * @throws IllegalArgumentException if the netPropsFile contains + * invalid settings. + */ + public DbGroupAdmin(String groupName, + Set helperSockets, + File netPropsFile) + throws FileNotFoundException { + + this(groupName, helperSockets, makeRepNetConfig(netPropsFile)); + } + + /** + * @hidden SSL deferred + * Create a DbGroupAdmin instance for programmatic use. + * + * @param groupName replication group name + * @param helperSockets set of host and port pairs for group members which + * can be queried to obtain group information. + * @param netConfig replication net configuration - null allowable + * This parameter is ignored if null. + * @throws IllegalArgumentException if the netProps contains + * invalid settings. + */ + public DbGroupAdmin(String groupName, + Set helperSockets, + ReplicationNetworkConfig netConfig) { + this.groupName = groupName; + this.helperSockets = helperSockets; + this.channelFactory = initializeFactory(netConfig, groupName); + + createGroupAdmin(); + } + + /* Create the ReplicationGroupAdmin object. */ + private void createGroupAdmin() { + if (groupName == null) { + printUsage("Group name must be specified"); + } + + if ((helperSockets == null) || (helperSockets.size() == 0)) { + printUsage("Host and ports of helper nodes must be specified"); + } + + groupAdmin = new ReplicationGroupAdmin( + groupName, helperSockets, channelFactory); + } + + /** + * Display group information. Lists all members and the group master. Can + * be used when reviewing the group configuration. + */ + public void dumpGroup() { + System.out.println(getFormattedOutput()); + } + + /** + * Remove a node from the replication group. Once removed, a + * node cannot be added again to the group under the same node name. + * + *

        {@link NodeType#SECONDARY Secondary} nodes cannot be removed; they + * automatically leave the group when they are shut down or become + * disconnected from the master. + * + * @param name name of the node to be removed + * + * @see ReplicationGroupAdmin#removeMember + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public void removeMember(String name) { + if (name == null) { + printUsage("Node name must be specified"); + } + + groupAdmin.removeMember(name); + } + + /** + * @hidden internal, for use in disaster recovery [#23447] + * + * Deletes a node from the replication group, which allows the node to be + * added to the group again under the same name. + * + *

        {@link NodeType#SECONDARY Secondary} and {@link NodeType#EXTERNAL + * External} nodes cannot be deleted; they automatically leave the group + * when they are shut down or become disconnected from the master. + * + * @param name name of the node to be deleted + * + * @see ReplicationGroupAdmin#deleteMember + */ + public void deleteMember(String name) { + if (name == null) { + printUsage("Node name must be specified"); + } + + groupAdmin.deleteMember(name); + } + + /** + * Update the network address for a specified node. When updating the + * address of a node, the node cannot be alive. See {@link + * ReplicationGroupAdmin#updateAddress} for more information. + * + *

        The address of a {@link NodeType#SECONDARY} node cannot be updated + * with this method, since nodes must be members but not alive to be + * updated, and secondary nodes are not members when they are not alive. + * To change the address of a secondary node, restart the node with the + * updated address. + * + * @param nodeName the name of the node whose address will be updated + * @param newHostName the new host name of the node + * @param newPort the new port number of the node + */ + @SuppressWarnings("hiding") + public void updateAddress(String nodeName, + String newHostName, + int newPort) { + if (nodeName == null || newHostName == null) { + printUsage("Node name and new host name must be specified"); + } + + if (newPort <= 0) { + printUsage("Port of the new network address must be specified"); + } + + groupAdmin.updateAddress(nodeName, newHostName, newPort); + } + + /** + * Transfers the master role from the current master to one of the + * electable replicas specified in the argument list. + * + * @param nodeList comma-separated list of nodes + * @param timeout in + * same form as accepted by duration config params + * + * @see ReplicatedEnvironment#transferMaster + */ + @SuppressWarnings("hiding") + public void transferMaster(String nodeList, String timeout) { + String result = + groupAdmin.transferMaster(parseNodes(nodeList), + PropUtil.parseDuration(timeout), + TimeUnit.MILLISECONDS, + forceFlag); + System.out.println("The new master is: " + result); + } + + private Set parseNodes(String nodes) { + if (nodes == null) { + throw new IllegalArgumentException("node list may not be null"); + } + StringTokenizer st = new StringTokenizer(nodes, ","); + Set set = new HashSet(); + while (st.hasMoreElements()) { + set.add(st.nextToken()); + } + return set; + } + + /* + * This method presents group information in a user friendly way. Internal + * fields are hidden. + */ + private String getFormattedOutput() { + StringBuilder sb = new StringBuilder(); + RepGroupImpl repGroupImpl = groupAdmin.getGroup().getRepGroupImpl(); + + /* Get the master node name. */ + String masterName = groupAdmin.getMasterNodeName(); + + /* Get the electable nodes information. */ + sb.append("\nGroup: " + repGroupImpl.getName() + "\n"); + sb.append("Electable Members:\n"); + Set nodes = repGroupImpl.getElectableMembers(); + if (nodes.size() == 0) { + sb.append(" No electable members\n"); + } else { + for (RepNodeImpl node : nodes) { + String type = + masterName.equals(node.getName()) ? "master, " : ""; + sb.append(" " + node.getName() + " (" + type + + node.getHostName() + ":" + node.getPort() + ", " + + node.getBarrierState() + ")\n"); + } + } + + /* Get the monitors information. */ + sb.append("\nMonitor Members:\n"); + nodes = repGroupImpl.getMonitorMembers(); + if (nodes.size() == 0) { + sb.append(" No monitors\n"); + } else { + for (RepNodeImpl node : nodes) { + sb.append(" " + node.getName() + " (" + node.getHostName() + + ":" + node.getPort() + ")\n"); + } + } + + /* Get information about secondary nodes */ + sb.append("\nSecondary Members:\n"); + nodes = repGroupImpl.getSecondaryMembers(); + if (nodes.isEmpty()) { + sb.append(" No secondary members\n"); + } else { + for (final RepNodeImpl node : nodes) { + sb.append(" " + node.getName() + " (" + node.getHostName() + + ":" + node.getPort() + ", " + + node.getBarrierState() + ")\n"); + } + } + + /* Get information about external nodes */ + sb.append("\nExternal Members:\n"); + nodes = repGroupImpl.getExternalMembers(); + if (nodes.isEmpty()) { + sb.append(" No external members\n"); + } else { + for (final RepNodeImpl node : nodes) { + sb.append(" " + node.getName() + " (" + node.getHostName() + + ":" + node.getPort() + ", " + + node.getBarrierState() + ")\n"); + } + } + + return sb.toString(); + } + + private static ReplicationNetworkConfig makeRepNetConfig(File propFile) + throws FileNotFoundException { + + if (propFile == null) { + return ReplicationNetworkConfig.createDefault(); + } + + return ReplicationNetworkConfig.create(propFile); + } + + private static DataChannelFactory initializeFactory( + ReplicationNetworkConfig repNetConfig, + String logContext) { + + if (repNetConfig == null) { + repNetConfig = + ReplicationNetworkConfig.createDefault(); + } + + return DataChannelFactoryBuilder.construct(repNetConfig, logContext); + } +} diff --git a/src/com/sleepycat/je/rep/util/DbPing.java b/src/com/sleepycat/je/rep/util/DbPing.java new file mode 100644 index 0000000..6be64a8 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/DbPing.java @@ -0,0 +1,355 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.StringTokenizer; + +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol; +import com.sleepycat.je.rep.impl.BinaryNodeStateProtocol.BinaryNodeStateResponse; +import com.sleepycat.je.rep.impl.BinaryNodeStateService; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * This class provides the utility to request the current state of a replica in + * a JE replication group, see more details in + * {@link com.sleepycat.je.rep.NodeState}. + */ +public class DbPing { + /* The name of the state requested node. */ + private String nodeName; + /* The name of group which the requested node joins. */ + private String groupName; + /* The SocketAddress of the requested node. */ + private InetSocketAddress socketAddress; + /* The timeout value for building the connection. */ + private int socketTimeout = 10000; + /* The factory for channel creation */ + private DataChannelFactory channelFactory; + + private static final String undocumentedUsageString = + " -netProps # name of a property file containing\n" + + " # properties needed for replication\n" + + " # service access\n"; + + private static final String usageString = + "Usage: " + CmdUtil.getJavaCommand(DbPing.class) + "\n" + + " -nodeName # name of the node whose state is\n" + + " # requested\n" + + " -groupName # name of the group which the node\n" + + " # joins\n" + + " -nodeHost # the host name and port pair the\n" + + " # node used to join the group\n" + + " -socketTimeout # the timeout value for creating a\n" + + " # socket connection with the node,\n" + + " # default is 10 seconds if not set"; + + /* Undocumented usage - SSL deferred + * -netProps <optional> # name of a property file containing + * # properties needed for replication + * # service access + */ + + /** + * Usage: + *

        +     * java {com.sleepycat.je.rep.util.DbPing |
        +     *       -jar je-<version>.jar DbPing}
        +     *   -nodeName <node name> # name of the node whose state is
        +     *                               # requested
        +     *   -groupName <group name> # name of the group which the node joins
        +     *   -nodeHost <host:port> # the host name and port pair the node
        +     *                               # used to join the group
        +     *   -socketTimeout              # the timeout value for creating a
        +     *                               # socket connection with the node,
        +     *                               # default is 10 seconds if not set
        +     * 
        + */ + public static void main(String args[]) + throws Exception { + + DbPing ping = new DbPing(); + ping.parseArgs(args); + System.out.println(ping.getNodeState()); + } + + /** + * Print usage information for this utility. + * + * @param message the errors description. + */ + private void printUsage(String msg) { + if (msg != null) { + System.err.println(msg); + } + + System.err.println(usageString); + System.exit(-1); + } + + /** + * Parse the command line parameters. + * + * @param argv Input command line parameters. + */ + private void parseArgs(String argv[]) { + int argc = 0; + int nArgs = argv.length; + String netPropsName = null; + + if (nArgs == 0) { + printUsage(null); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-nodeName")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + } else { + printUsage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-groupName")) { + if (argc < nArgs) { + groupName = argv[argc++]; + } else { + printUsage("-groupName requires an argument"); + } + } else if (thisArg.equals("-nodeHost")) { + if (argc < nArgs) { + StringTokenizer st = + new StringTokenizer(argv[argc++], ":"); + if (st.countTokens() != 2) { + printUsage("Argument for -nodeHost is not valid."); + } + try { + socketAddress = new InetSocketAddress + (st.nextToken(), Integer.parseInt(st.nextToken())); + } catch (NumberFormatException e) { + printUsage("the port of -nodeHost is not valid"); + } + } else { + printUsage("-nodeHost requires an argument"); + } + } else if (thisArg.equals("-socketTimeout")) { + if (argc < nArgs) { + try { + socketTimeout = Integer.parseInt(argv[argc++]); + } catch (NumberFormatException e) { + printUsage("Argument for -socketTimeout is not valid"); + } + } else { + printUsage("-socketTimeout requires an argument"); + } + } else if (thisArg.equals("-netProps")) { + if (argc < nArgs) { + netPropsName = argv[argc++]; + } else { + printUsage("-netProps requires an argument"); + } + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + + if (socketTimeout <= 0) { + printUsage("-socketTimeout requires a positive integer number"); + } + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + if (netPropsName != null) { + try { + repNetConfig = + ReplicationNetworkConfig.create(new File(netPropsName)); + } catch (FileNotFoundException fnfe) { + printUsage("The net properties file " + netPropsName + + " does not exist: " + fnfe.getMessage()); + } catch (IllegalArgumentException iae) { + printUsage("The net properties file " + netPropsName + + " is not valid: " + iae.getMessage()); + } + } + + if (nodeName == null || groupName == null || socketAddress == null) { + printUsage("Node name, group name and the node host port are " + + "mandatory arguments, please configure."); + } + + this.channelFactory = initializeFactory(repNetConfig, nodeName); + } + + private DbPing() { + } + + /** + * Create a DbPing instance for programmatic use. + * + * @param repNode a class that implements + * {@link com.sleepycat.je.rep.ReplicationNode} + * @param groupName name of the group which the node joins + * @param socketTimeout timeout value for creating a socket connection + * with the node + */ + /* + * SSL deferred + * This constructor form does not support setting of non-default service + * access properties. + */ + public DbPing(ReplicationNode repNode, + String groupName, + int socketTimeout) { + this(repNode, groupName, socketTimeout, (ReplicationNetworkConfig)null); + } + + /** + * @hidden SSL deferred + * Create a DbPing instance for programmatic use. + * + * @param repNode a class that implements + * {@link com.sleepycat.je.rep.ReplicationNode} + * @param groupName name of the group which the node joins + * @param socketTimeout timeout value for creating a socket connection + * with the node + * @param netPropsFile a File containing replication net property + * settings. Null is allowed. + * @throws FileNotFoundException if the netPropsFile does not exist + * @throws IllegalArgumentException if the netProps file contains + * invalid settings. + */ + public DbPing(ReplicationNode repNode, + String groupName, + int socketTimeout, + File netPropsFile) + throws FileNotFoundException, IllegalArgumentException { + + this(repNode, groupName, socketTimeout, makeRepNetConfig(netPropsFile)); + } + + /** + * @hidden SSL deferred + * Create a DbPing instance for programmatic use. + * + * @param repNode a class that implements + * {@link com.sleepycat.je.rep.ReplicationNode} + * @param groupName name of the group which the node joins + * @param socketTimeout timeout value for creating a socket connection + * with the node + * @param netConfig a replication-net configuration object + * property settings. Null is allowed. + * @throws IllegalArgumentException if the netProps contains invalid + * settings. + */ + public DbPing(ReplicationNode repNode, + String groupName, + int socketTimeout, + ReplicationNetworkConfig netConfig) { + this(repNode, groupName, socketTimeout, + initializeFactory(netConfig, repNode.getName())); + } + + /** + * @hidden SSL deferred + * Create a DbPing instance for programmatic use. + * + * @param repNode a class that implements + * {@link com.sleepycat.je.rep.ReplicationNode} + * @param groupName name of the group which the node joins + * @param socketTimeout timeout value for creating a socket connection + * with the node + * @param channelFactory the factory for channel creation + * @throws IllegalArgumentException if the netProps contains invalid + * settings. + */ + public DbPing(ReplicationNode repNode, + String groupName, + int socketTimeout, + DataChannelFactory channelFactory) { + this.nodeName = repNode.getName(); + this.groupName = groupName; + this.socketAddress = repNode.getSocketAddress(); + this.socketTimeout = socketTimeout; + this.channelFactory = channelFactory; + } + + /* Get the state of the specified node. */ + public NodeState getNodeState() + throws IOException, ServiceConnectFailedException { + + BinaryNodeStateProtocol protocol = + new BinaryNodeStateProtocol(NameIdPair.NOCHECK, null); + DataChannel channel = null; + + try { + /* Build the connection. */ + channel = channelFactory.connect( + socketAddress, + new ConnectOptions(). + setTcpNoDelay(true). + setOpenTimeout(socketTimeout). + setReadTimeout(socketTimeout)); + ServiceDispatcher.doServiceHandshake + (channel, BinaryNodeStateService.SERVICE_NAME); + + /* Send a NodeState request to the node. */ + protocol.write + (protocol.new BinaryNodeStateRequest(nodeName, groupName), + channel); + + /* Get the response and return the NodeState. */ + BinaryNodeStateResponse response = + protocol.read(channel, BinaryNodeStateResponse.class); + + return response.convertToNodeState(); + } finally { + if (channel != null) { + channel.close(); + } + } + } + + private static ReplicationNetworkConfig makeRepNetConfig(File propFile) + throws FileNotFoundException { + + if (propFile == null) { + return ReplicationNetworkConfig.createDefault(); + } + + return ReplicationNetworkConfig.create((propFile)); + } + + private static DataChannelFactory initializeFactory( + ReplicationNetworkConfig repNetConfig, + String logContext) { + + if (repNetConfig == null) { + repNetConfig = ReplicationNetworkConfig.createDefault(); + } + + return DataChannelFactoryBuilder.construct(repNetConfig, logContext); + } +} diff --git a/src/com/sleepycat/je/rep/util/DbResetRepGroup.java b/src/com/sleepycat/je/rep/util/DbResetRepGroup.java new file mode 100644 index 0000000..ce6d1f0 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/DbResetRepGroup.java @@ -0,0 +1,267 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static com.sleepycat.je.rep.impl.RepParams.NODE_HOST_PORT; + +import java.io.File; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; + +/** + * A utility to reset the members of a replication group, replacing the group + * with a new group consisting of a single new member as described by the + * arguments supplied to the utility. + *

        + * This utility is useful when a copy of an existing replicated environment + * needs to be used at a different site, with the same data, but with a + * different initial node that can be used to grow the replication group as + * usual. The utility can also be used to change the group name associated with + * the environment. + *

        + * The reset environment has a different identity from the environment before + * the reset operation although it contains the same application data. To avoid + * confusion, the reset environment is assigned a new internal unique id. The + * unique id is checked whenever nodes attempt to communicate with each other + * and ensure that all nodes in a group are dealing with the same data. + *

        + * The reset process is typically accomplished using the steps outlined below. + * It's good practice to back up your environment before running any utilities + * that modify an environment. + *

          + *
        1. Use {@code DbResetRepGroup} to reset an existing environment. + * {@code DbResetRepGroup} can be used as a command line utility, and must be + * executed locally on the host specified in the -nodeHostPort argument. The + * host must also contain the environment directory. + * Alternatively, {@code DbResetRepGroup} may be used programmatically through + * the provided APIs.
        2. + *
        3. Once reset, the environment can be opened with a + * {@code ReplicatedEnvironment}, using the same node configuration as the one + * that was passed in to the utility. No helper host configuration is needed. + * Since the group consists of a single node, it will assume the role of a + * Master, so long as it is created as an electable node. + *
        4. Additional nodes may now be created and can join the group as newly + * created replicas, as described in {@code ReplicatedEnvironment}. Since these + * new nodes are empty, they should be configured to use the new master as + * their helper node, and will go through the + * {@link replication node lifecycle} + * to populate their environment directories. In this case, there will be data + * in the converted master that can only be transferred to the replica through + * a file copy executed with the help of a + * {@link com.sleepycat.je.rep.NetworkRestore}
        5. + *
        + *

        + * For example: + * + *

        + * // Run the utility on a copy of an existing replicated environment. Usually
        + * // this environment will have originated on a different node and its
        + * // replication group information will contain meta data referring to its
        + * // previous host. The utility will reset this metadata so that it has a
        + * // rep group (UniversalRepGroup) with a single node named nodeMars. The node
        + * // is associated with the machine mars and will communicate on port 5001.
        + *
        + * DbResetRepGroup resetUtility =
        + *     new DbResetRepGroup(envDirMars,          // env home dir
        + *                         "UniversalRepGroup", // group name
        + *                         "nodeMars",          // node name
        + *                         "mars:5001");        // node host,port
        + * resetUtility.reset();
        + *
        + * // Open the reset environment; it will take on the role of master.
        + * ReplicatedEnvironment nodeMars = new ReplicatedEnvironment(envDirMars, ...);
        + * ...
        + * // Bring up additional nodes, which will be initialized from
        + * // nodeMars. For example, from the machine venus you can now add a new
        + * // member to the group(UniversalRepGroup) as below.
        + *
        + * ReplicationConfig repConfig = null;
        + * try {
        + *     repConfig = new ReplicationConfig("UniversalRepGroup", // groupName
        + *                                       "nodeVenus",         // nodeName
        + *                                       "venus:5008");       // nodeHostPort
        + *     repConfig.setHelperHosts("mars:5001");
        + *
        + *     nodeVenus = new ReplicatedEnvironment(envDirB, repConfig, envConfig);
        + * } catch (InsufficientLogException insufficientLogEx) {
        + *
        + *     // log files will be copied from another node in the group
        + *     NetworkRestore restore = new NetworkRestore();
        + *     restore.execute(insufficientLogEx, new NetworkRestoreConfig());
        + *
        + *     // try opening the node now that the environment files have been
        + *     // restored on this machine.
        + *     nodeVenus = new ReplicatedEnvironment(envDirVenus,
        + *                                           repConfig,
        + *                                           envConfig);
        + * }
        + * ...
        + * 
        + */ +public class DbResetRepGroup { + + private File envHome; + private String groupName; + private String nodeName; + private String nodeHostPort; + + private static final String usageString = + "usage: java -cp je.jar " + + "com.sleepycat.je.rep.util.DbResetRepGroup\n" + + " -h # environment home directory\n" + + " -groupName # replication group name\n" + + " -nodeName # replicated node name\n" + + " -nodeHostPort # host name or IP address\n" + + " and port number to use\n" + + " for this node\n"; + + /** + * Usage: + *
        +     * java -cp je.jar com.sleepycat.je.rep.util.DbResetRepGroup
        +     *   -h <dir>                          # environment home directory
        +     *   -groupName <group name>           # replication group name
        +     *   -nodeName <node name>             # replicated node name
        +     *   -nodeHostPort <host name:port number> # host name or IP address
        +     *                                             and port number to use
        +     *                                             for this node
        +     * 
        + */ + public static void main(String[] args) { + DbResetRepGroup converter = new DbResetRepGroup(); + converter.parseArgs(args); + converter.reset(); + } + + private void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + private void parseArgs(String[] args) { + int argc = 0; + int nArgs = args.length; + + while (argc < nArgs) { + String thisArg = args[argc++].trim(); + if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(args[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-groupName")) { + if (argc < nArgs) { + groupName = args[argc++]; + } else { + printUsage("-groupName requires an argument"); + } + } else if (thisArg.equals("-nodeName")) { + if (argc < nArgs) { + nodeName = args[argc++]; + } else { + printUsage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-nodeHostPort")) { + if (argc < nArgs) { + nodeHostPort = args[argc++]; + try { + NODE_HOST_PORT.validateValue(nodeHostPort); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + printUsage("-nodeHostPort is illegal!"); + } + } else { + printUsage("-nodeHostPort requires an argument"); + } + } + } + + if (envHome == null) { + printUsage("-h is a required argument."); + } + + if (groupName == null) { + printUsage("-groupName is a required argument."); + } + + if (nodeName == null) { + printUsage("-nodeName is a required argument."); + } + + if (nodeHostPort == null) { + printUsage("-nodeHostPort is a required argument."); + } + } + + private DbResetRepGroup() { + } + + /** + * Create a DbResetRepGroup object for this node. + * + * @param envHome The node's replicated environment directory. The + * directory must be accessible on this host. + * @param groupName The name of the new replication group + * @param nodeName The node's name + * @param nodeHostPort The host and port for this node. The utility + * must be executed on this host. + */ + public DbResetRepGroup(File envHome, + String groupName, + String nodeName, + String nodeHostPort) { + this.envHome = envHome; + this.groupName = groupName; + this.nodeName = nodeName; + this.nodeHostPort = nodeHostPort; + } + + /** + * Replaces the existing group with the new group having a single new node + * as described by the constructor arguments. + * + * @see DbResetRepGroup + */ + public void reset() { + + Durability durability = + new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, + Durability.ReplicaAckPolicy.NONE); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(durability); + + ReplicationConfig repConfig = + new ReplicationConfig(groupName, nodeName, nodeHostPort); + repConfig.setHelperHosts(repConfig.getNodeHostPort()); + + /* Force the re-initialization upon open. */ + repConfig.setConfigParam(RepParams.RESET_REP_GROUP.getName(), "true"); + + /* Open the environment, thus replacing the group. */ + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + + repEnv.close(); + } +} diff --git a/src/com/sleepycat/je/rep/util/ReplicationGroupAdmin.java b/src/com/sleepycat/je/rep/util/ReplicationGroupAdmin.java new file mode 100644 index 0000000..15f5b57 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ReplicationGroupAdmin.java @@ -0,0 +1,678 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.util; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.logging.Formatter; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MasterTransferFailureException; +import com.sleepycat.je.rep.MemberActiveException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicaStateException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.elections.MasterValue; +import com.sleepycat.je.rep.elections.Protocol; +import com.sleepycat.je.rep.elections.TimebasedProposalGenerator; +import com.sleepycat.je.rep.impl.GroupService; +import com.sleepycat.je.rep.impl.RepGroupProtocol; +import com.sleepycat.je.rep.impl.RepGroupProtocol.EnsureOK; +import com.sleepycat.je.rep.impl.RepGroupProtocol.Fail; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupResponse; +import com.sleepycat.je.rep.impl.RepGroupProtocol.TransferOK; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.OK; +import com.sleepycat.je.rep.impl.TextProtocol.ProtocolError; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.ReplicationFormatter; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Administrative APIs for use by applications which do not have direct access + * to a replicated environment. The class supplies methods that can be + * used to list group members, remove members, update network addresses, and + * find the current master. + * + * Information is found and updated by querying nodes in the group. Because of + * that, ReplicationGroupAdmin can only obtain information when there is at + * least one node alive in the replication group. + */ +public class ReplicationGroupAdmin { + + private final String groupName; + private Set helperSockets; + private final Protocol electionsProtocol; + private final RepGroupProtocol groupProtocol; + private final Logger logger; + private final Formatter formatter; + private final DataChannelFactory channelFactory; + + /** + * Constructs a group admin object. + * + * @param groupName the name of the group to be administered + * @param helperSockets the sockets on which it can contact helper nodes + * in the replication group to carry out admin services. + */ + public ReplicationGroupAdmin(String groupName, + Set helperSockets) { + this(groupName, helperSockets, + ReplicationNetworkConfig.createDefault()); + } + + /** + * @hidden SSL deferred + * Constructs a group admin object. + * + * @param groupName the name of the group to be administered + * @param helperSockets the sockets on which it can contact helper nodes + * in the replication group to carry out admin services. + * @param repNetConfig a network configuration to use + */ + public ReplicationGroupAdmin(String groupName, + Set helperSockets, + ReplicationNetworkConfig repNetConfig) { + this(groupName, helperSockets, + initializeFactory(repNetConfig, groupName)); + } + + /** + * @hidden SSL deferred + * Constructs a group admin object. + * + * @param groupName the name of the group to be administered + * @param helperSockets the sockets on which it can contact helper nodes + * in the replication group to carry out admin services. + * @param channelFactory the factory for channel creation + */ + public ReplicationGroupAdmin(String groupName, + Set helperSockets, + DataChannelFactory channelFactory) { + this.groupName = groupName; + this.helperSockets = helperSockets; + this.channelFactory = channelFactory; + + electionsProtocol = + new Protocol(TimebasedProposalGenerator.getParser(), + MasterValue.getParser(), + groupName, + NameIdPair.NOCHECK, + null /* repImpl */, + channelFactory); + groupProtocol = + new RepGroupProtocol(groupName, NameIdPair.NOCHECK, null, + channelFactory); + logger = LoggerUtils.getLoggerFixedPrefix + (getClass(), NameIdPair.NOCHECK.toString()); + formatter = new ReplicationFormatter(NameIdPair.NOCHECK); + } + + /** + * Returns the helper sockets being used to contact a replication group + * member, in order to query for the information. + * + * @return the set of helper sockets. + */ + public Set getHelperSockets() { + return helperSockets; + } + + /** + * Sets the helper sockets being used to contact a replication group + * member, in order to query for the information. + * + * @param helperSockets the sockets on which it can contact helper nodes + * in the replication group to carry out admin services. + */ + public void setHelperSockets(Set helperSockets) { + this.helperSockets = helperSockets; + } + + /** + * Returns the name of the replication group. + * + * @return the group name. + */ + public String getGroupName() { + return groupName; + } + + /** + * Returns the socket address associated with the node that's currently + * the master. + * + * @return the socket address associated with the master + * + * @throws UnknownMasterException if the master was not found + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + private InetSocketAddress getMasterSocket() + throws UnknownMasterException, + EnvironmentFailureException { + + MasterValue masterValue = Learner.findMaster(electionsProtocol, + helperSockets, + logger, + null, + formatter); + return new InetSocketAddress(masterValue.getHostName(), + masterValue.getPort()); + } + + /** + * Returns the node name associated with the master + * + * @return the master node ID + * + * @throws UnknownMasterException if the master was not found + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public String getMasterNodeName() + throws UnknownMasterException, + EnvironmentFailureException { + MasterValue masterValue = Learner.findMaster(electionsProtocol, + helperSockets, + logger, + null, + formatter); + return masterValue.getNodeName(); + } + + /** + * @hidden + * Internal implementation class. + * + * Ensures that this monitor node is a member of the replication group, + * adding it to the group if it isn't already. + * + * @param monitor the monitor node + * + * @return the master node that was contacted to ensure the monitor + * + * @throws UnknownMasterException if the master was not found + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public ReplicationNode ensureMonitor(RepNodeImpl monitor) + throws UnknownMasterException, + EnvironmentFailureException { + + if (!monitor.getType().isMonitor()) { + throw EnvironmentFailureException.unexpectedState + ("Node type must be Monitor not: " + monitor.getType()); + } + + MasterValue masterValue = Learner.findMaster(electionsProtocol, + helperSockets, + logger, + null, + formatter); + EnsureOK okResp = (EnsureOK) doMessageExchange + (groupProtocol.new EnsureNode(monitor), EnsureOK.class); + + monitor.getNameIdPair().update(okResp.getNameIdPair()); + return new RepNodeImpl(new NameIdPair(masterValue.getNodeName()), + NodeType.ELECTABLE, + masterValue.getHostName(), + masterValue.getPort(), + /* JE version on monitor is not known */ + null); + } + + /** + * Removes this node from the group, so that it is no longer a member of + * the group. When removed, it will no longer be able to connect to a + * master, nor can it participate in elections. If the node is a {@link + * com.sleepycat.je.rep.monitor.Monitor} it will no longer be informed of + * election results. Once removed, a node cannot be added again to the + * group under the same node name. + *

        + * Ideally, the node being removed should be shut down before this call is + * issued. + *

        + * If the node is an active Replica the master will terminate + * its connection with the node and will not allow the replica to reconnect + * with the group, since it's no longer a member of the group. If the node + * wishes to re-join it should do so with a different node name. + *

        + * An active Master cannot be removed. It must first be shutdown, or + * transition to the Replica state before it can be removed + * from the group. + *

        + * {@link NodeType#SECONDARY Secondary} nodes cannot be removed; they + * automatically leave the group when they are shut down or become + * disconnected from the master. + * + * @param nodeName identifies the node being removed from the group + * + * @throws UnknownMasterException if the master was not found + * + * @throws IllegalArgumentException if the type of the node is {@code + * SECONDARY} + * + * @throws MemberNotFoundException if the node denoted by + * nodeName is not a member of the replication group + * + * @throws MasterStateException if the member being removed is currently + * the Master + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * @see Adding and Removing Nodes From the Group + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public void removeMember(String nodeName) + throws UnknownMasterException, + MemberNotFoundException, + MasterStateException, + EnvironmentFailureException { + + final String masterErrorMessage = "Cannot remove an active master"; + final RequestMessage request = + groupProtocol.new RemoveMember(nodeName); + + final RepNodeImpl node = checkMember( + nodeName, masterErrorMessage, /* electableOnly */ false); + if (node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Cannot remove node with transient ID: " + nodeName); + } + + doMessageExchange(request, OK.class); + } + + /** + * @hidden internal, for use in disaster recovery [#23447] + * + * Deletes this node from the group, so that it is no longer a member of + * the group. When deleted, it will not connect to a master, or participate + * in elections until the environment is reopened. If the node is a {@link + * com.sleepycat.je.rep.monitor.Monitor} it will no longer be informed of + * election results. Unlike removed nodes, deleted nodes are completely + * removed from the group, so they can be added again to the group under + * the same node name. + *

        + * The node being deleted must be shut down before this call is issued. + *

        + * If the node is an active Replica the master will terminate + * its connection with the node. + *

        + * An active Master cannot be deleted. It must first be shutdown, or + * transition to the Replica state before it can be deleted + * from the group. + *

        + * {@link NodeType#SECONDARY Secondary} and + * {@link NodeType#EXTERNAL External} nodes cannot be deleted; they + * automatically leave the group when they are shut down or become + * disconnected from the master. + * + * @param nodeName identifies the node being deleted from the group + * + * @throws UnknownMasterException if the master was not found + * + * @throws MemberActiveException if the type of the node is {@code + * SECONDARY} or {@code EXTERNAL}, or if the node is active + * + * @throws MemberNotFoundException if the node denoted by + * nodeName is not a member of the replication group + * + * @throws MasterStateException if the member being deleted is currently + * the Master + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void deleteMember(String nodeName) + throws UnknownMasterException, + MemberActiveException, + MemberNotFoundException, + MasterStateException, + EnvironmentFailureException { + + final String masterErrorMessage = "Cannot delete an active master"; + final RequestMessage request = + groupProtocol.new DeleteMember(nodeName); + + final RepNodeImpl node = checkMember( + nodeName, masterErrorMessage, /* electableOnly */ false); + if (node.getType().hasTransientId()) { + throw new IllegalArgumentException( + "Cannot delete node with transient ID: " + nodeName); + } + + doMessageExchange(request, OK.class); + } + + /** + * Returns the current composition of the group from the Master. + * + * @return the group description + * + * @throws UnknownMasterException if the master was not found + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public ReplicationGroup getGroup() + throws UnknownMasterException, + EnvironmentFailureException { + + GroupResponse resp = (GroupResponse) doMessageExchange + (groupProtocol.new GroupRequest(), GroupResponse.class); + + return new ReplicationGroup(resp.getGroup()); + } + + /** + * Returns the {@link com.sleepycat.je.rep.NodeState state} of a replicated + * node and state of the application where the node is + * running in. + * + * @param repNode a ReplicationNode includes those information which are + * needed to connect to the node + * @param socketConnectTimeout the timeout value for creating a socket + * connection with the replicated node + * + * @return the state of the replicated node + * + * @throws IOException if the machine is down or no response is returned + * + * @throws ServiceConnectFailedException if can't connect to the service + * running on the replicated node + */ + public NodeState getNodeState(ReplicationNode repNode, + int socketConnectTimeout) + throws IOException, ServiceConnectFailedException { + + DbPing ping = new DbPing( + repNode, groupName, socketConnectTimeout, channelFactory); + + return ping.getNodeState(); + } + + /** + * Update the network address for a specified member of the replication + * group. When updating the address of this target replication node, the + * node cannot be alive. One common use case is when the replication member + * must be moved to a new host, possibly because of machine failure. + *

        + * To make a network address change, take these steps: + *

          + *
        1. Shutdown the node that is being updated. + *
        2. Use this method to change the hostname and port of the node. + *
        3. Start the node on the new machine, or at its new port, using the new + * hostname/port. If the log files are available at the node, they will + * be reused. A network restore operation may need to be initiated by + * the application to copy over any needed log files if no log files are + * available, or if they have become obsolete. + *
        + *

        + * The address of a {@link NodeType#SECONDARY} node cannot be updated with + * this method, since nodes must be members but not alive to be updated, + * and secondary nodes are not members when they are not alive. To change + * the address of a secondary node, restart the node with the updated + * address. + * + * @param nodeName the name of the node whose address will be updated. + * @param newHostName the new host name of the node + * @param newPort the new port number of the node + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + * + * @throws MasterStateException if the member being updated is currently + * the master + * + * @throws MemberNotFoundException if the node denoted by + * nodeName is not a member of the replication group + * + * @throws ReplicaStateException if the member being updated is currently + * alive + * + * @throws UnknownMasterException if the master was not found + * + * @see DbResetRepGroup DbResetRepGroup, which can be used in a + * related but different use case to copy and move a group. + */ + /* + * TODO: EXTERNAL is hidden for now. The doc need updated to include + * EXTERNAL when it becomes public. + */ + public void updateAddress(String nodeName, String newHostName, int newPort) + throws EnvironmentFailureException, + MasterStateException, + MemberNotFoundException, + ReplicaStateException, + UnknownMasterException { + + final String masterErrorMessage = + "Can't update address for the current master."; + RequestMessage request = + groupProtocol.new UpdateAddress(nodeName, newHostName, newPort); + + checkMember(nodeName, masterErrorMessage, /* electableOnly */ false); + doMessageExchange(request, OK.class); + } + + /** + * Transfers the master state from the current master to one of the + * electable replicas supplied in the argument list. This method sends a + * request to the original master to perform the operation. + * + * @throws MasterTransferFailureException if the master transfer operation + * fails + * + * @throws UnknownMasterException if the master was not found + * + * @throws IllegalArgumentException if {@code nodeNames} contains the name + * of a node that is not electable + * + * @see ReplicatedEnvironment#transferMaster + */ + public String transferMaster(Set nodeNames, + int timeout, + TimeUnit timeUnit, + boolean force) + throws MasterTransferFailureException, + UnknownMasterException { + + for (String node : nodeNames) { + checkMember(node, null, /* electableOnly */ true); + } + final String nodeNameList = commaJoin(nodeNames); + final long timeoutMillis = timeUnit.toMillis(timeout); + final RequestMessage transferMaster = + groupProtocol.new TransferMaster(nodeNameList, + timeoutMillis, force); + TransferOK result = + (TransferOK)doMessageExchange(transferMaster, TransferOK.class); + return result.getWinner(); + } + + private String commaJoin(Set words) { + boolean first = true; + StringBuilder sb = new StringBuilder(); + for (String w : words) { + if (!first) { + sb.append(','); + } + sb.append(w); + first = false; + } + return sb.toString(); + } + + /* + * Check that the specified node is an appropriate target. For example, + * make sure it's a valid node in the group, and it's not the same as + * the original node. + */ + private RepNodeImpl checkMember(String nodeName, + String masterErrorMessage, + boolean electableOnly) + throws MasterStateException, + MemberNotFoundException { + + final RepGroupImpl group = getGroup().getRepGroupImpl(); + final RepNodeImpl node = group.getNode(nodeName); + + /* Check the membership. */ + if (node == null || + /* Creation is not yet acknowledged */ + (!node.isRemoved() && !node.isQuorumAck())) { + throw new MemberNotFoundException("Node: " + nodeName + + " is not a member of the " + + "group: " + groupName); + } + + if (electableOnly && !node.getType().isElectable()) { + throw new IllegalArgumentException("Node: " + nodeName + + " must have node type" + + " ELECTABLE, was " + + node.getType()); + } + + if (node.isRemoved() && node.isQuorumAck()) { + throw new MemberNotFoundException("Node: " + nodeName + + " is not currently a member " + + "of the group: " + groupName + + ", it has been removed."); + } + + /* Check if the node itself is the master. */ + if (masterErrorMessage != null) { + final InetSocketAddress masterAddress = getMasterSocket(); + if (masterAddress.equals(node.getSocketAddress())) { + throw new MasterStateException(masterErrorMessage); + } + } + + return node; + } + + /* Do a message exchange with the targeted master. */ + private ResponseMessage doMessageExchange(RequestMessage request, + Class respClass) + throws EnvironmentFailureException, + MasterStateException, + MemberNotFoundException, + UnknownMasterException { + + /* Do the communication. */ + final InetSocketAddress masterAddress = getMasterSocket(); + final MessageExchange me = groupProtocol.new MessageExchange + (masterAddress, GroupService.SERVICE_NAME, request); + me.run(); + + ResponseMessage resp = me.getResponseMessage(); + + if (resp == null) { + if (me.getException() != null) { + throw new UnknownMasterException + ("Problem communicating with master.", me.getException()); + } + + /* + * Returning null on success is part of the message protocol, the + * caller expects it. + */ + return null; + } + + if (respClass == null && resp instanceof Fail) { + throw getException(resp); + } + + if (respClass != null && + !(resp.getClass().getName().equals(respClass.getName()))) { + throw getException(resp); + } + + return resp; + } + + /** + * Examines the response and generates a meaningful error exception. + */ + private DatabaseException getException(ResponseMessage resp) { + if (resp == null) { + return EnvironmentFailureException.unexpectedState + ("No response to request"); + } + + if (resp instanceof Fail) { + Fail fail = (Fail) resp; + switch (fail.getReason()) { + case MEMBER_NOT_FOUND: + return new MemberNotFoundException(fail.getMessage()); + case IS_MASTER: + return new MasterStateException(fail.getMessage()); + case IS_REPLICA: + return new ReplicaStateException(fail.getMessage()); + case TRANSFER_FAIL: + // TODO: not worth it for now, but it wouldn't be hard to + // distinguish IllegalArg. cases here + return new MasterTransferFailureException + (fail.getMessage()); + default: + return EnvironmentFailureException. + unexpectedState(fail.getMessage()); + } + } + + if (resp instanceof ProtocolError) { + return EnvironmentFailureException.unexpectedState + (((ProtocolError)resp).getMessage()); + } + + return EnvironmentFailureException.unexpectedState + ("Response not recognized: " + resp); + } + + private static DataChannelFactory initializeFactory( + ReplicationNetworkConfig repNetConfig, + String logContext) { + + return DataChannelFactoryBuilder.construct(repNetConfig, logContext); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/Block.java b/src/com/sleepycat/je/rep/util/ldiff/Block.java new file mode 100644 index 0000000..9a47c4b --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/Block.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.util.Arrays; +import java.util.Formatter; + +public class Block implements java.io.Serializable { + + private static final long serialVersionUID = 111858779935447845L; + + /* The block ID. */ + private final int blockId; + + /* The actual records that the block holds. */ + int numRecords; + + /* + * For debugging support and to minimize the actual data that is + * transferred over the network, I store the beginKey and endKey as the + * index to each of the block. + * + * TODO to optimize: replace the {beginKey, endKey} by something like LSN. + */ + + /* The database key that the current block starts with. */ + private byte[] beginKey; + + /* The database key that the current block ends with. */ + private byte[] beginData; + + /* The rolling checksum computed from the sequence of Adler32 checksums. */ + private long rollingChksum; + + /* An md5 hash is also computed for each block. */ + private byte[] md5Hash; + + public Block(int blockId) { + this.blockId = blockId; + } + + int getBlockId() { + return blockId; + } + + int getNumRecords() { + return numRecords; + } + + public void setNumRecords(int numRecords) { + this.numRecords = numRecords; + } + + byte[] getBeginKey() { + return beginKey; + } + + public void setBeginKey(byte[] beginKey) { + this.beginKey = beginKey; + } + + byte[] getBeginData() { + return beginData; + } + + public void setBeginData(byte[] beginData) { + this.beginData = beginData; + } + + long getRollingChksum() { + return rollingChksum; + } + + public void setRollingChksum(long rollingChksum) { + this.rollingChksum = rollingChksum; + } + + byte[] getMd5Hash() { + return md5Hash; + } + + public void setMd5Hash(byte[] md5Hash) { + this.md5Hash = md5Hash; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (!(o instanceof Block)) { + return false; + } + final Block other = (Block) o; + return (this.blockId == other.blockId) && + (this.numRecords == other.numRecords) && + Arrays.equals(this.beginKey, other.beginKey) && + Arrays.equals(this.beginData, other.beginData) && + (this.rollingChksum == other.rollingChksum) && + Arrays.equals(this.md5Hash, other.md5Hash); + } + + @Override + public String toString() { + final Formatter fmt = new Formatter(); + fmt.format("Block %d: rollingChksum=%x md5Hash=%s numRecords=%d", + blockId, rollingChksum, Arrays.toString(md5Hash), + numRecords); + return fmt.toString(); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/BlockBag.java b/src/com/sleepycat/je/rep/util/ldiff/BlockBag.java new file mode 100644 index 0000000..e88910b --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/BlockBag.java @@ -0,0 +1,183 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; + +/** + * A bag of Blocks used during the LDiff process. Blocks are accessed by their + * checksum; when checksums collide, blocks are returned in insertion order. + */ +public class BlockBag implements Iterable { + /* Map checksums to the corresponding block's index in blocks. */ + private final HashMap> chksums; + /* Maintain the list of blocks in insertion order. */ + private final List blocks; + + /* + * The index in blocks of the first block that has not yet been removed. + * Items in blocks prior to blockIndex have been deleted from the bag. + */ + private int blockIndex; + + /** + * Instantiate a new BlockBag object. + */ + public BlockBag() { + blockIndex = 0; + blocks = new ArrayList(); + chksums = new HashMap>(); + } + + /** + * Adds a new Block to the bag. + * + * @param b The Block to be added. + */ + public void add(Block b) { + final Long chksum = b.getRollingChksum(); + final Integer indx = blocks.size(); + blocks.add(b); + List indices = chksums.get(chksum); + if (indices == null) { + indices = new ArrayList(); + } + indices.add(indx); + chksums.put(chksum, indices); + } + + /** + * Returns all Blocks in the bag with a given checksum. + * + * @param chksum The checksum to match + * @return A List of blocks with the given checksum, in insertion order, or + * null if no matching blocks were found. + */ + public List get(long chksum) { + List indices; + List ret; + + ret = new ArrayList(); + indices = chksums.get(new Long(chksum)); + if (indices == null) { + return null; + } + for (Integer indx : indices) { + int i = indx.intValue(); + if (i >= blockIndex) + ret.add(blocks.get(i)); + } + + if (ret.size() == 0) { + return null; + } + return ret; + } + + /** + * Returns an iterator over the blocks in the bag, in insertion order. + * + * @return an iterator over the blocks in the bag, in insertion order. + */ + @Override + public Iterator iterator() { + return new BagIterator(); + } + + /** + * Removes the given Block, plus any blocks inserted previous to the given + * Block. + * + * @param b The Block to remove. + * @return A List of all unmatched blocks, or null + */ + public List remove(Block b) { + final int startIndex = blockIndex; + while (blockIndex < blocks.size()) { + Block b2 = blocks.get(blockIndex); + blockIndex++; + if (b == b2) { + break; + } + } + + return (blockIndex - startIndex <= 1) ? null : blocks.subList( + startIndex, blockIndex - 1); + } + + /** + * Removes all blocks from the bag. + * + * @return A list of all blocks removed, or null if the bag is already + * empty. + */ + public List removeAll() { + List ret; + + ret = new ArrayList(); + while (blockIndex < blocks.size()) { + Block b = blocks.get(blockIndex); + blockIndex++; + ret.add(b); + } + + if (ret.size() == 0) { + return null; + } + return ret; + } + + public int getBlockIndex() { + return blockIndex; + } + + public Block getBlock() { + return blocks.get(blockIndex); + } + + /** + * Returns the number of blocks in this bag. + * + * @return the number of blocks in the bag + */ + public int size() { + return blocks.size() - blockIndex; + } + + private class BagIterator implements Iterator { + private int offset; + + BagIterator() { + offset = 0; + } + + @Override + public boolean hasNext() { + return (offset + blockIndex < blocks.size()); + } + + @Override + public void remove() { + BlockBag.this.remove(blocks.get(blockIndex)); + } + + @Override + public Block next() { + return blocks.get(blockIndex + offset++); + } + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/DiffRecordAnalyzer.java b/src/com/sleepycat/je/rep/util/ldiff/DiffRecordAnalyzer.java new file mode 100644 index 0000000..422ea84 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/DiffRecordAnalyzer.java @@ -0,0 +1,336 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.util.List; +import java.util.HashSet; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.util.ldiff.Protocol.RemoteDiffRequest; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.utilint.VLSN; + +/* + * Class used for figuring out the difference on local and remote databases, + * also getting the VLSN number for different records. + * + * This class only needs to traverse the local and remote databases once and + * do the check, this is because the different areas are sequential on the + * database. + * + * It uses a hash set to save the records on the local different area, and + * another hash set to save the records on the remote different area. Then + * traverse the records in local set to see whether it exists in the remote + * set, and do the same thing on records in remote set. + */ +public class DiffRecordAnalyzer { + public static final long DATABASE_END = -1; + + /* The analysis method used by network LDiff. */ + public static void doAnalysis(Database localDb, + Protocol protocol, + DataChannel channel, + DiffTracker tracker, + boolean doPrint) + throws Exception { + + List regions = tracker.getDiffRegions(); + Cursor localCursor = null; + try { + localCursor = localDb.openCursor(null, null); + + for (MismatchedRegion region : regions) { + if (region.isLocalAdditional()) { + printLocalAdditional(localCursor, region, doPrint); + continue; + } + + if (region.isRemoteAdditional()) { + HashSet records = + getDiffArea(protocol, channel, region); + if (doPrint) { + printAdditional(records, true); + } + records.clear(); + continue; + } + + HashSet localRecords = + getDiffArea(localCursor, region.getLocalBeginKey(), + region.getLocalBeginData(), + region.getLocalDiffSize()); + HashSet remoteRecords = + getDiffArea(protocol, channel, region); + if (doPrint) { + printDiffs(localRecords, remoteRecords); + } + localRecords.clear(); + remoteRecords.clear(); + } + } finally { + if (localCursor != null) { + localCursor.close(); + } + } + } + + /* The analysis method used by two local databases. */ + public static void doAnalysis(Database localDb, + Database remoteDb, + DiffTracker tracker, + boolean doPrint) + throws Exception { + + List regions = tracker.getDiffRegions(); + Cursor localCursor = null; + Cursor remoteCursor = null; + try { + localCursor = localDb.openCursor(null, null); + remoteCursor = remoteDb.openCursor(null, null); + + for (MismatchedRegion region : regions) { + if (region.isLocalAdditional()) { + printLocalAdditional(localCursor, region, doPrint); + continue; + } + + if (region.isRemoteAdditional()) { + HashSet records = + getDiffArea(remoteCursor, region.getRemoteBeginKey(), + region.getRemoteBeginData(), + region.getRemoteDiffSize()); + if (doPrint) { + printAdditional(records, true); + } + records.clear(); + continue; + } + + HashSet localRecords = + getDiffArea(localCursor, region.getLocalBeginKey(), + region.getLocalBeginData(), + region.getLocalDiffSize()); + HashSet remoteRecords = + getDiffArea(remoteCursor, region.getRemoteBeginKey(), + region.getRemoteBeginData(), + region.getRemoteDiffSize()); + if (doPrint) { + printDiffs(localRecords, remoteRecords); + } + localRecords.clear(); + remoteRecords.clear(); + } + } finally { + if (localCursor != null) { + localCursor.close(); + } + if (remoteCursor != null) { + remoteCursor.close(); + } + } + } + + /* Print local additional records. */ + private static void printLocalAdditional(Cursor cursor, + MismatchedRegion region, + boolean doPrint) + throws Exception { + + HashSet records = getDiffArea(cursor, + region.getLocalBeginKey(), + region.getLocalBeginData(), + region.getLocalDiffSize()); + if (doPrint) { + printAdditional(records, false); + } + records.clear(); + } + + private static void printAdditional(HashSet diffRecords, + boolean remote) { + String side = remote ? "Remote" : "Local"; + System.err.println("************************************************"); + System.err.println(side + " database has additional records, the " + + "additional range as following:"); + side = remote ? "remote" : "local"; + for (Record record : diffRecords) { + printRecord(record, side, false); + } + System.err.println("************************************************"); + } + + /* + * Print out the VLSN (if this record has) and the key's context of this + * record. + */ + private static void printRecord(Record record, + String side, + boolean different) { + System.err.print("Record with Key: ["); + byte[] keys = record.getKey(); + for (int i = 0; i < keys.length; i++) { + System.err.print(keys[i]); + if (i < (keys.length - 1)) { + System.err.print(" "); + } + } + System.err.print("]"); + if (record.getVLSN().getSequence() != -1) { + System.err.print(", VLSN: " + record.getVLSN()); + } + if (different) { + System.err.print(" does not exist on " + side + " database"); + } + System.err.println(); + } + + /* Return a different area on local database in a hash set. */ + private static HashSet getDiffArea(Cursor cursor, + byte[] beginKey, + byte[] beginData, + long diffSize) + throws Exception { + + HashSet records = new HashSet(); + LogManager logManager = DbInternal.getNonNullEnvImpl + (cursor.getDatabase().getEnvironment()).getLogManager(); + + DatabaseEntry key = new DatabaseEntry(beginKey); + DatabaseEntry data = new DatabaseEntry(beginData); + boolean scanToEnd = (diffSize == DATABASE_END ? true : false); + long count = 1; + + for (OperationStatus status = + cursor.getSearchBoth(key, data, LockMode.DEFAULT); + status == OperationStatus.SUCCESS; + status = cursor.getNext(key, data, LockMode.DEFAULT)) { + + if (!scanToEnd && count > diffSize) { + break; + } + records.add(new Record(key.getData(), data.getData(), + getVLSN(cursor, logManager))); + count++; + } + + return records; + } + + /* Used by LDiffService. */ + public static HashSet getDiffArea(Cursor cursor, + RemoteDiffRequest request) + throws Exception { + + return getDiffArea(cursor, request.getKey(), + request.getData(), request.getDiffSize()); + } + + /* Get the records of a different area on remote database in a HashSet. */ + private static HashSet getDiffArea(Protocol protocol, + DataChannel channel, + MismatchedRegion region) + throws Exception { + + protocol.write(protocol.new RemoteDiffRequest(region), channel); + + /* Check whether getting records on remote database is successful. */ + Message message = protocol.read(channel); + if (message.getOp() == Protocol.ERROR) { + throw new LDiffRecordRequestException + (((Protocol.Error) message).getErrorMessage()); + } + + if (message.getOp() != Protocol.DIFF_AREA_START) { + throw new ProtocolException + (message, Protocol.DiffAreaStart.class); + } + + /* Add those different records until protocol sees an end signal. */ + HashSet records = new HashSet(); + while (true) { + try { + Protocol.RemoteRecord record = + protocol.read(channel, Protocol.RemoteRecord.class); + records.add(new Record(record.getKey(), + record.getData(), + record.getVLSN())); + } catch (ProtocolException pe) { + if (pe.getUnexpectedMessage().getOp() != + Protocol.DIFF_AREA_END) { + throw pe; + } + break; + } + } + + return records; + } + + /* Compare the differences between two sets. */ + private static void printDiffs(HashSet localDiffs, + HashSet remoteDiffs) { + System.err.println("************************************************"); + System.err.println("Different records between local and remote database " + + "in a specific different area."); + for (Record record : localDiffs) { + if (!remoteDiffs.contains(record)) { + printRecord(record, "remote", true); + } + } + + for (Record record : remoteDiffs) { + if (!localDiffs.contains(record)) { + printRecord(record, "local", true); + } + } + System.err.println("************************************************"); + } + + /* + * Get the LSN that the cursor currently points to, there are two cases + * that a record doesn't have a VLSN: + * 1. compare between two local databases. + * 2. compare between two converted environments. + * + * In the above two cases, we actually return NULL_VLSN instead of null, so + * that the message doesn't complain about a null value. + */ + private static VLSN getVLSN(Cursor cursor, LogManager logManager) + throws Exception { + + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + cursorImpl.latchBIN(); + final long lsn = cursorImpl.getCurrentLsn(); + cursorImpl.releaseBIN(); + + WholeEntry entry = logManager.getLogEntryAllowInvisible(lsn); + + VLSN vlsn = entry.getHeader().getVLSN(); + if (vlsn == null) { + vlsn = VLSN.NULL_VLSN; + } + + return vlsn; + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/DiffTracker.java b/src/com/sleepycat/je/rep/util/ldiff/DiffTracker.java new file mode 100644 index 0000000..d258388 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/DiffTracker.java @@ -0,0 +1,111 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.util.ArrayList; +import java.util.List; + +/* + * This class is used for tracking the different regions between local and + * remote database, it saves the begin key/data pair and size of different area + * on both local and remote database. + */ +public class DiffTracker { + /* Start block for the block different area. */ + private Block startBlock; + /* Start position for the different block in the database. */ + private long remoteStart; + /* Size of a block different area. */ + private long remoteDiffSize; + /* Size of each block. */ + private final long blockSize; + /* Collection of different areas. */ + private final List regions; + + public DiffTracker(long blockSize) { + this.blockSize = blockSize; + regions = new ArrayList(); + } + + /* Set begin key/data pair and start position of a different block area. */ + public void setBlockDiffBegin(Block startBlock, int startIndex) { + this.startBlock = startBlock; + remoteStart = startIndex * blockSize; + } + + /* Calculate the size of a block different area. */ + public void calBlockDiffSize(int currentIndex) { + remoteDiffSize = (currentIndex - 1) * blockSize - remoteStart; + } + + /* Add the different local and remote different areas to the collection. */ + public void addDiffRegion(Window window) { + MismatchedRegion region = new MismatchedRegion(); + setBlockDiff(region, startBlock, remoteDiffSize, true); + setWindowDiff(region, window, window.getDiffSize(), true); + + if (!region.isNull()) { + regions.add(region); + } + + remoteDiffSize = 0; + } + + private void setWindowDiff(MismatchedRegion region, + Window window, + long diffSize, + boolean doCheck) { + if (doCheck && window.getDiffSize() == 0) { + return; + } + + region.setLocalBeginKey(window.getBeginKey()); + region.setLocalBeginData(window.getBeginData()); + region.setLocalDiffSize(diffSize); + } + + private void setBlockDiff(MismatchedRegion region, + Block block, + long diffSize, + boolean doCheck) { + if (doCheck && remoteDiffSize == 0) { + return; + } + + region.setRemoteBeginKey(block.getBeginKey()); + region.setRemoteBeginData(block.getBeginData()); + region.setRemoteDiffSize(diffSize); + } + + /* Add the window additional area to the difference collection. */ + public void addWindowAdditionalDiffs(Window window) { + MismatchedRegion region = new MismatchedRegion(); + setWindowDiff(region, window, DiffRecordAnalyzer.DATABASE_END, false); + regions.add(region); + } + + /* Add the block additional area to the different collection. */ + public void addBlockBagAdditionalDiffs(Window window, BlockBag blkBag) { + MismatchedRegion region = new MismatchedRegion(); + setBlockDiff(region, blkBag.getBlock(), + DiffRecordAnalyzer.DATABASE_END, false); + setWindowDiff(region, window, DiffRecordAnalyzer.DATABASE_END, true); + regions.add(region); + } + + /* Return the different regions. */ + public List getDiffRegions() { + return regions; + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiff.java b/src/com/sleepycat/je/rep/util/ldiff/LDiff.java new file mode 100644 index 0000000..4cb4dc3 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiff.java @@ -0,0 +1,855 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.logging.Level; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.SimpleChannelFactory; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * LDiff provides a mechanism for efficiently comparing two quiescent + * databases, typically residing on different machines connected by a + * network. The comparison is done at the logical level rather than the + * physical level, so that we can compare the contents of replicated databases + * where the logical contents may be identical, but the physical logs may be + * very different. If the databases are found to be different, it provides + * information that would help identify the specific nature of the differences. + * + * This class provides the external API used to initiate a comparison. + * + * For details, please review the document at: + * + * @see + * LDiff + */ +public class LDiff { + + private LDiffConfig cfg; + private File home1, home2; + private String file1, file2; + private DiffTracker tracker; + + private static final String usageString = "usage: " + + CmdUtil.getJavaCommand(LDiff.class) + + "\n" + + " -h

        [,] # environment home directory\n" + + " [-a] # analyze diff\n" + + " [-b ] # number of records to put in each block\n" + + " [-m ] # abort diff after a number of errors\n" + + " [-s ,] # database(s) to compare\n" + + " [-q] # be quiet, do not print to stdout"; + + private static final int SOCKET_TIMEOUT_MS = 10000; + + /** + * The main used by the LDiff utility. + * + * @param args The arguments accepted by the LDiff utility. + * + *
        +     * usage: java com.sleepycat.je.rep.util.ldiff.LDiff
        +     *             [-s database1,database2] -h dbEnvHome1[,dbEnvHome2]
        +     *             [-a] [-b blockSize] [-m maxErrors] [-q]
        +     * 
        + * + *

        + * -a - generate an analysis of the differences
        + * -b blockSize - the number of records to compare at one time
        + * -h dbEnvHome - the directory or directories containing environment(s) in + * which to perform the ldiff
        + * -m maxErrors - the maximum number of errors to detect before declaring + * the databases different and ending the operation.
        + * -s database1,database2 - the databases to ldiff.
        + * -q - be quiet, do not write to stdout + *

        + *

        + * If ldiff-ing a specific database, two database names must be specified. + * If no database names are given, two environments must be specified. If + * two database names and two environments are specified, the first + * database is opened in the first environment and the second database is + * opened in the second environment. + *

        + */ + public static void main(String[] args) { + LDiff differ = new LDiff(); + differ.parseArgs(args); + try { + if (differ.diff()) { + System.exit(0); + } else { + System.exit(1); + } + } catch (Exception e) { + e.printStackTrace(); + System.exit(1); + } + } + + private void parseArgs(String[] argv) { + cfg = new LDiffConfig(); + cfg.setVerbose(true); + + int argc = 0; + int nArgs = argv.length; + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-a")) { + cfg.setDiffAnalysis(true); + } else if (thisArg.equals("-b")) { + if (argc < nArgs) { + try { + cfg.setBlockSize(Integer.parseInt(argv[argc++])); + } catch (NumberFormatException nfe) { + printUsage("-b requires an integer argument"); + } + } else { + printUsage("-b requires an argument"); + } + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + String[] envDirs = argv[argc++].split(","); + if (envDirs.length > 2) { + printUsage("Only 2 environments supported"); + } + + home1 = new File(envDirs[0]); + if (envDirs.length == 2) { + home2 = new File(envDirs[1]); + } + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-m")) { + if (argc < nArgs) { + try { + cfg.setMaxErrors(Integer.parseInt(argv[argc++])); + } catch (NumberFormatException nfe) { + printUsage("-m requires an integer argument"); + } + } else { + printUsage("-m requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + String[] dbNames = argv[argc++].split(","); + if (dbNames.length != 2) { + printUsage("-s requires two database names"); + } + file1 = dbNames[0]; + file2 = dbNames[1]; + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-q")) { + cfg.setVerbose(false); + } else { + printUsage(thisArg + " is not a valid option."); + } + } + + if (home1 == null) { + printUsage("-h is a required argument"); + } + + if (home2 == null && file1 == null) { + printUsage("2 databases must be specified with 1 environment"); + } + } + + private void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + private LDiff() { + } + + /** + * Configure a new object with which to compare two databases. + * + * @param cfg the configuration parameters for the new object. + */ + public LDiff(LDiffConfig cfg) { + super(); + this.cfg = cfg; + } + + /* + * Run an LDiff called from the command line. What actually gets diffed + * depends upon the args passed in, either 2 environments, databases in two + * separate environments or databases in the same environment. + */ + private boolean diff() + throws Exception { + + EnvironmentConfig envConfiguration = new EnvironmentConfig(); + envConfiguration.setReadOnly(true); + envConfiguration.setCachePercent(40); + Environment env1 = new Environment(home1, envConfiguration); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + + Database db2; + if (home2 != null) { + Environment env2 = new Environment(home2, envConfiguration); + if (file1 == null) { + /* No dbs given, ldiff the environments. */ + boolean ret = diff(env1, env2); + env1.close(); + env2.close(); + return ret; + } + db2 = env2.openDatabase(null, file2, dbConfig); + } else { + db2 = env1.openDatabase(null, file2, dbConfig); + } + Database db1 = env1.openDatabase(null, file1, dbConfig); + + boolean ret = diff(db1, db2); + db1.close(); + db2.close(); + env1.close(); + + return ret; + } + + /** + * A mechanism for efficiently comparing all databases in two quiescent + * environments. + * + * @param env1 a valid, open Environment handle + * @param env2 a valid, open Environment handle + * @return true if all databases in env1 and env2 are identical + * @throws Exception + */ + public boolean diff(Environment env1, Environment env2) + throws Exception { + + List env1names = env1.getDatabaseNames(); + List env2names = env2.getDatabaseNames(); + boolean ret = (env1names.size() == env2names.size()); + if (!ret) { + output("Environments have different number of databases."); + } + for (String dbName : env1names) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + Database db1, db2; + try { + db1 = env1.openDatabase(null, dbName, dbConfig); + } catch (DatabaseNotFoundException e) { + /* Should never happen, ExclusiveCreate is false. */ + throw EnvironmentFailureException.unexpectedException(e); + } + try { + db2 = env2.openDatabase(null, dbName, dbConfig); + } catch (DatabaseNotFoundException e) { + /* There's a database in env1 that's not in env2. */ + db1.close(); + output(dbName + + " does not exist in " + env2.getHome().getName()); + ret = false; + continue; + } + + if (!diff(db1, db2)) { + ret = false; + } + db1.close(); + db2.close(); + } + if (ret) { + output("No differences exist between the two environments."); + } else { + output("Differences exist between the two environments."); + } + return ret; + } + + /** + * A mechanism for efficiently comparing two quiescent databases. + * + * @param db1 a valid, open Database handle + * @param db2 a valid, open Database handle + * @return true if the db1 and db2 are identical + * @throws Exception + */ + public boolean diff(Database db1, Database db2) + throws Exception { + + BlockBag bag = createBlockBag(db2); + final boolean ret = diff(db1, bag); + if (cfg.getVerbose()) { + final String db1Name = db1.getDatabaseName(); + final String db2Name = db2.getDatabaseName(); + final boolean namesMatch = db1Name.equals(db2Name); + if (ret) { + if (namesMatch) { + output("No differences in " + db1Name); + } else { + output(db1Name + " matches " + db2Name); + } + } else { + if (namesMatch) { + output("Differences in " + db1Name); + } else { + output(db1Name + " does not match " + db2Name); + } + } + } + + /* Do the analysis for these two databases. */ + if (cfg.getDiffAnalysis() && tracker.getDiffRegions().size() != 0) { + DiffRecordAnalyzer.doAnalysis(db1, db2, tracker, cfg.getVerbose()); + } + + return ret; + } + + /** + * A mechanism for efficiently comparing two quiescent environments, one + * local and one on a remote machine. This method assumes that only basic, + * unauthenticated communication is in use. + * + * @param env a valid, open Environment handle + * @param addr the address of the remote machine + * @return true if all the databases in both environments are the same + * @throws IOException if a network error occurs + * @throws ProtocolException if an unexpected message is received + * @throws ServiceConnectFailedException if the remote service was busy + * @throws Exception + */ + public boolean diff(Environment env, InetSocketAddress addr) + throws IOException, + ProtocolException, + ServiceConnectFailedException, + Exception { + return diff(env, addr, new SimpleChannelFactory()); + } + + /** + * A mechanism for efficiently comparing two quiescent environments, one + * local and one on a remote machine. + * + * @param env a valid, open Environment handle + * @param addr the address of the remote machine + * @param dcFactory the channel factory for connection creation + * @return true if all the databases in both environments are the same + * @throws IOException if a network error occurs + * @throws ProtocolException if an unexpected message is received + * @throws ServiceConnectFailedException if the remote service was busy + * @throws Exception + */ + public boolean diff(Environment env, + InetSocketAddress addr, + DataChannelFactory dcFactory) + throws IOException, + ProtocolException, + ServiceConnectFailedException, + Exception { + + List envNames = env.getDatabaseNames(); + boolean ret = true; + + DataChannel channel = connect(addr, dcFactory); + + final Protocol protocol = new Protocol( + new NameIdPair("Ldiff", -1), + DbInternal.getNonNullEnvImpl(env)); + protocol.write(protocol.new EnvDiff(), channel); + + /* + * Check that the number of local databases matches the number of + * remote databases. This is how we detect a remote db that doesn't + * exist locally. + */ + Protocol.EnvInfo msg = protocol.read(channel, Protocol.EnvInfo.class); + ret = (envNames.size() == msg.getNumberOfDBs()); + if (!ret) { + output("Number of databases in local and remote environments " + + "does not match."); + } + channel.close(); + + /* + * Run LDiff for every database in the local environment. If they all + * succeed, the environments match. + */ + for (String dbName : envNames) { + channel = connect(addr, dcFactory); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + Database db; + try { + db = env.openDatabase(null, dbName, dbConfig); + } catch (DatabaseNotFoundException e) { + /* Should never happen, ExclusiveCreate is false. */ + throw EnvironmentFailureException.unexpectedException(e); + } + + try { + if (!diff(db, channel)) { + ret = false; + } + } catch (ProtocolException pe) { + output(dbName + " does not exist in remote environment."); + ret = false; + } finally { + db.close(); + if (channel.isOpen()) { + channel.close(); + } + } + } + if (ret) { + output("Local environment matches remote."); + } else { + output("Local environment does not match remote."); + } + return ret; + } + + /** + * A mechanism for efficiently comparing two quiescent databases, one of + * which resides on a remote machine. + * + * @param db a valid, open Database handle + * @param addr the address of the remote host + * @param dcFactory the channel factory for connection creation + * @return true if the local Database and the remote Database are identical + * @throws IOException if a network error occurs + * @throws ProtocolException if the remote database does not exist + * @throws ServiceConnectFailedException if the remote service is busy + * @throws Exception + */ + public boolean diff(Database db, + InetSocketAddress addr, + DataChannelFactory dcFactory) + throws IOException, + ProtocolException, + ServiceConnectFailedException, + Exception { + + final DataChannel channel = connect(addr, dcFactory); + boolean ret; + try { + ret = diff(db, channel); + } finally { + channel.close(); + } + + return ret; + } + + private boolean diff(Database db, DataChannel channel) + throws IOException, ProtocolException, Exception { + + final Protocol protocol = new Protocol( + new NameIdPair("Ldiff", -1), + DbInternal.getNonNullEnvImpl(db.getEnvironment())); + protocol.write(protocol.new DbBlocks + (db.getDatabaseName(), cfg.getBlockSize()), channel); + + /* + * A protocol exception will be thrown here if the remote env does not + * have a database of the same name. + */ + protocol.read(channel, Protocol.BlockListStart.class); + + BlockBag bag = new BlockBag(); + Protocol.BlockInfo blockMsg; + while (true) { + try { + blockMsg = protocol.read(channel, Protocol.BlockInfo.class); + bag.add(blockMsg.getBlock()); + } catch (ProtocolException pe) { + if (pe.getUnexpectedMessage().getOp() != + Protocol.BLOCK_LIST_END) { + throw pe; + } + break; + } + + } + + boolean match = diff(db, bag); + if (match) { + output(db.getDatabaseName() + " matches remote database."); + } else { + output(db.getDatabaseName() + "does not match remote database."); + } + + if (cfg.getDiffAnalysis() && tracker.getDiffRegions().size() != 0) { + DiffRecordAnalyzer.doAnalysis + (db, protocol, channel, tracker, cfg.getVerbose()); + } + protocol.write(protocol.new Done(), channel); + + return match; + } + + /** + * A mechanism for efficiently comparing two quiescent databases, typically + * residing on different machines connected by a network. + * + * @param db a valid, open Database handle + * @param blkBag a bag of blocks to diff against db. + * + * @return true if the two comparing databases are identical. + * @throws Exception + */ + public boolean diff(Database db, BlockBag blkBag) + throws Exception { + + /* Suppose the two comparing databases are identical, by default. */ + boolean identical = true; + + /* + * window represents a BlockSize window into db. Initialize it to + * represent the block starting at the first key in db. + */ + Cursor cursor = db.openCursor(null, null); + + long pos = 1; + int numKeys = cfg.getBlockSize(); + Window window = new Window(cursor, numKeys); + + int errors = 0; + int maxerrors = cfg.getMaxErrors(); + tracker = new DiffTracker(numKeys); + while (window.getChecksum() != 0 && blkBag.size() > 0) { + + /* + * Find the block in the bag whose checksum and md5 match the + * current window. This block, if it exists, is match. + */ + Block match = findMatch(db.getEnvironment(), blkBag, window); + if (match != null) { + tracker.setBlockDiffBegin + (blkBag.getBlock(), blkBag.getBlockIndex()); + /* Remove match and any earlier blocks from the bag. */ + List removed = blkBag.remove(match); + if (removed != null) { + identical = false; + errors += removed.size(); + tracker.calBlockDiffSize(blkBag.getBlockIndex()); + if (maxerrors > 0 && errors >= maxerrors) { + break; + } + } + + tracker.addDiffRegion(window); + + /* Advance the window beyond the just matched block. */ + window.nextWindow(); + pos += window.size(); + + continue; + } + + identical = false; + LoggerUtils.envLogMsg + (Level.FINE, + DbInternal.getNonNullEnvImpl(db.getEnvironment()), + "Unmatched block at position " + pos); + errors++; + if (maxerrors > 0 && errors >= maxerrors) { + break; + } + + /* Roll the window forward by one key. */ + window.rollWindow(); + + if (window.getChecksum() != 0) { + pos++; + } + } + + cursor.close(); + + if (window.getChecksum() != 0) { + + /* + * We ran out of blocks in blkBag before we got to the end of db. + * Update the unmatched key range. + */ + LoggerUtils.envLogMsg + (Level.FINE, + DbInternal.getNonNullEnvImpl(db.getEnvironment()), + "Local Db has addtional records starting at " + pos + "."); + identical = false; + tracker.addWindowAdditionalDiffs(window); + } + + if (blkBag.size() > 0) { + /* All remaining blocks in the bag are unmatched. */ + for (Block b : blkBag) { + LoggerUtils.envLogMsg + (Level.FINE, + DbInternal.getNonNullEnvImpl(db.getEnvironment()), + "Unmatched remote block: " + b); + } + identical = false; + tracker.addBlockBagAdditionalDiffs(window, blkBag); + } + + return identical; + } + + /* For unit test only. */ + public List getDiffRegions() { + if (tracker == null) { + return null; + } + + return tracker.getDiffRegions(); + } + + /** + * Find the block in the bag whose rolling checksum and md5 match the given + * window. The md5 for the window is computed lazily, since it's more + * expensive to compute. There is a slight chance that multiple blocks in + * the bag will match the checksum and the md5 hash, return the first such + * block added to the bag in that case. + * + * @param blkBag a bag of blocks to search for a match + * @param window the block sized window of the db we're diffing + * @return A block which matches the window's checksum and the window's md5 + * hash, or null if no block matches. + */ + private Block findMatch(Environment env, BlockBag blkBag, Window window) { + List matches = blkBag.get(window.getChecksum()); + if (matches == null) { + return null; + } + + /* Delay the computation of the hash until we know we need it. */ + byte[] md5 = window.getMd5Hash(); + for (Block b : matches) { + if (Arrays.equals(b.getMd5Hash(), md5)) { + return b; + } + LoggerUtils.envLogMsg + (Level.FINE, + DbInternal.getNonNullEnvImpl(env), + "Found a remote block whose rolling checksum " + + "matches LB but md5 hash doesn't:" + b); + } + /* No matches. */ + return null; + } + + /** + * Create a bag of blocks from the records in a given database, using the + * configuration parameters specified when the LDiff object was created. + * + * @param db the database from which to create the bag of blocks + * @return a bag of blocks built from the records in db + */ + public BlockBag createBlockBag(Database db) { + BlockBag bag = new BlockBag(); + + /* Retrieve the key/data pairs and fill into blocks. */ + long start = System.currentTimeMillis(); + Iterator iter = iterator(db); + while (iter.hasNext()) { + bag.add(iter.next()); + } + long end = System.currentTimeMillis(); + LoggerUtils.envLogMsg + (Level.FINE, + DbInternal.getNonNullEnvImpl(db.getEnvironment()), + "Block bag created in : " + (end - start) + " ms."); + return bag; + } + + public Iterator iterator(Database db) { + return new LDiffIterator(db); + } + + /** + * Connect to addr and perform a service handshake. Retry as specified by + * the config object. + * + * @param addr the remote address to connect to + * @param dcFactory the channel factory for connection creation + * @return an open DataChannel + * @throws IOException if an exception occurs with the DataChannel + * @throws ServiceConnectFailedException if the remote service is busy + */ + private DataChannel connect(InetSocketAddress addr, + DataChannelFactory dcFactory) + throws IOException, ServiceConnectFailedException { + + int triesLeft = cfg.getMaxConnectionAttempts(); + DataChannel ret = null; + while (true) { + try { + ret = dcFactory.connect(addr, + new ConnectOptions(). + setBlocking(true). + setTcpNoDelay(true). + setOpenTimeout(SOCKET_TIMEOUT_MS). + setReadTimeout(SOCKET_TIMEOUT_MS)); + ServiceDispatcher.doServiceHandshake(ret, LDiffService.NAME); + break; + } catch (ServiceConnectFailedException scfe) { + if ((ret != null) && ret.isOpen()) { + ret.close(); + } + + /* + * Unable to connect because the remote service is busy. If + * the user requested it, keep re-trying. triesLeft == -1 + * means never abort. + */ + if (triesLeft > 0) { + triesLeft--; + } + + if (!cfg.getWaitIfBusy() || triesLeft == 0) { + throw scfe; + } + } + } + + return ret; + } + + private void output(String msg) { + if (cfg.getVerbose()) { + System.out.println(msg); + } + } + + /** + * The exception that is thrown when a database diff detects differences. + * + * TODO: we start simple, by just using it as a boolean indicator and + * perhaps a block id for unit test purposes? As as the local processing + * gains in sophistication will provide block and key (insert, update, + * delete) granularity identification of differences. + */ + @SuppressWarnings("serial") + class MismatchException extends Exception { + + public MismatchException(String message) { + super(message); + } + } + + private class LDiffIterator implements Iterator { + private Block cached; + private Cursor cursor; + private final Database db; + private DatabaseEntry lastKey, lastData; + private boolean more; + private int i; + private final int numKeys; + + public LDiffIterator(Database db) { + i = 0; + numKeys = LDiff.this.cfg.getBlockSize(); + cached = null; + more = true; + this.db = db; + + /* Prime the pump, get the first block in cached and set more. */ + next(); + } + + @Override + public boolean hasNext() { + return more; + } + + @Override + public void remove() { + + } + + @Override + public Block next() { + if (!more) { + throw new NoSuchElementException(); + } + + /* + * We don't want to return a block with 0 keys, but we can't know + * ahead of time whether the block will have any keys and the user + * likely called hasNext() already. So when asked for block i, we + * cache block i+1, check whether it's empty and return the + * previously cached block i. If block i+1 is empty, the next call + * to hasNext() will return false and the empty block won't be + * returned. + */ + cursor = db.openCursor(null, null); + if (lastKey == null) { + lastKey = new DatabaseEntry(); + lastData = new DatabaseEntry(); + } else { + cursor.getSearchBoth(lastKey, lastData, null); + } + Block ret = cached; + cached = LDiffUtil.readBlock(i++, cursor, numKeys); + if (cached.numRecords == 0) { + more = false; + } else { + cursor.getCurrent(lastKey, lastData, null); + } + cursor.close(); + return ret; + } + + @Override + protected void finalize() throws Throwable { + try { + cursor.close(); + } finally { + super.finalize(); + } + } + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiffConfig.java b/src/com/sleepycat/je/rep/util/ldiff/LDiffConfig.java new file mode 100644 index 0000000..fdccd12 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiffConfig.java @@ -0,0 +1,244 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +public class LDiffConfig { + private static final int DEFAULT_BLOCK_SIZE = 1 << 13; // 8k + private static final int DEFAULT_MAX_ERRORS = 0; + + private int maxErrors = DEFAULT_MAX_ERRORS; + private boolean diffAnalysis = false; + private int blockSize = DEFAULT_BLOCK_SIZE; + private boolean waitIfBusy = false; + private int maxConnectionAttempts = 1; + private int reconnectDelay = 0; + public boolean verbose = false; + + /** + * Return the maximum number of errors to analyze before ending the LDiff + * operation. + * + * @return the maximum number of errors to analyze before throwing + * MismatchException. + */ + public int getMaxErrors() { + return maxErrors; + } + + /** + * Configure the maximum number of errors to be analyzed before ending the + * LDiff operation. A value of zero forces the algorithm to run to + * completion. The default value is 0. + * + * @param max the maximum number of errors to be analyzed before ending the + * LDiff operation. + */ + public LDiffConfig setMaxErrors(int max) { + setMaxErrorsVoid(max); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxErrorsVoid(int max) { + this.maxErrors = max; + } + + /** + * Return whether an LDiff object will provide detailed analysis about diff + * failures. + * + * @return true if an LDiff object will provide detailed analysis about + * diff failures. + */ + public boolean getDiffAnalysis() { + return diffAnalysis; + } + + /** + * Configure an LDiff object to provide detailed analysis about diff + * failures. The default value is false. + * + * @param analysis if true, provides detailed analysis about the reason why + * the diff failed. The detailed analysis can be time consuming. + */ + public LDiffConfig setDiffAnalysis(boolean analysis) { + setDiffAnalysisVoid(analysis); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setDiffAnalysisVoid(boolean analysis) { + diffAnalysis = analysis; + } + + /** + * Return the number of records to include in each block analyzed by the + * LDiff operation. + * + * @return the number of records to include in each block analyzed by the + * LDiff operation. + */ + public int getBlockSize() { + return blockSize; + } + + /** + * Configure the number of records to include in each block analyzed by the + * LDiff operation. The default is 10240. + * + * @param size the number of records to include in each block analyzed by + * the LDiff operation. + */ + public LDiffConfig setBlockSize(int size) { + setBlockSizeVoid(size); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setBlockSizeVoid(int size) { + blockSize = size; + } + + /** + * Return whether or not the operation will wait for the remote service to + * become available if the remote service is busy. + * + * @return true if the LDiff operation will block until the remote service + * becomes available + */ + public boolean getWaitIfBusy() { + return waitIfBusy; + } + + /** + * Return the maximum number of times the operation will attempt to connect + * to the remote service before aborting. A value of -1 means the operation + * will never abort. + * + * @return the maximum number of times the operation will attempt to connect + * to the remote service before aborting. + */ + public int getMaxConnectionAttempts() { + return maxConnectionAttempts; + } + + /** + * Return the delay, in milliseconds, between reconnect attempts. + * + * @return the amount of time, in milliseconds, between reconnection + * attempts + */ + public int getReconnectDelay() { + return reconnectDelay; + } + + /** + * Configure whether or not the operation should wait for the remote + * service to become available, if the remote service is busy. + * + * @param wait if true, the LDiff operation will block until the remote + * node is available + * @param maxAttempts the number of times to attempt connecting to + * the service before aborting. Pass -1 to never abort. + * @param delay the number of milliseconds to wait between connection + * attempts. + */ + public LDiffConfig setWaitIfBusy(boolean wait, int maxAttempts, int delay) { + waitIfBusy = wait; + maxConnectionAttempts = maxAttempts; + reconnectDelay = delay; + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setWaitIfBusyVoid(boolean wait) { + this.waitIfBusy = wait; + } + + /** + * @hidden + * For the completement of setter methods. + */ + public LDiffConfig setMaxConnectionAttempts(int maxAttempts) { + setMaxConnectionAttemptsVoid(maxAttempts); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setMaxConnectionAttemptsVoid(int maxAttempts) { + this.maxConnectionAttempts = maxAttempts; + } + + /** + * @hidden + * For the completement of setter methods. + */ + public LDiffConfig setReconnectDelay(int delay) { + setReconnectDelayVoid(delay); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setReconnectDelayVoid(int delay) { + this.reconnectDelay = delay; + } + + /** + * Return whether or not the operation will output information on its + * success or failure. + * + * @return true if the operation will output information + */ + public boolean getVerbose() { + return verbose; + } + + /** + * Configure whether or not the operation will output information on its + * success or failure. + * + * @param verbose if true, the LDiff operation will output information + * as it compares databases + */ + public LDiffConfig setVerbose(boolean verbose) { + this.verbose = verbose; + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + */ + public void setVerboseVoid(boolean verbose) { + this.verbose = verbose; + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiffConfigBeanInfo.java b/src/com/sleepycat/je/rep/util/ldiff/LDiffConfigBeanInfo.java new file mode 100644 index 0000000..9897d3a --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiffConfigBeanInfo.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +public class LDiffConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(LDiffConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(LDiffConfig.class); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiffRecordRequestException.java b/src/com/sleepycat/je/rep/util/ldiff/LDiffRecordRequestException.java new file mode 100644 index 0000000..bb18e07 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiffRecordRequestException.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown when LDiff requesting records on remote database fails. + */ +public class LDiffRecordRequestException extends OperationFailureException { + + private static final long serialVersionUID = 1925430616L; + + /** + * For internal use only. + * @hidden + */ + public LDiffRecordRequestException(String message) { + super(null /*locker*/, false /*abortOnly*/, message, null /*cause*/); + } + + private LDiffRecordRequestException(String message, + LDiffRecordRequestException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new LDiffRecordRequestException(msg, this); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiffService.java b/src/com/sleepycat/je/rep/util/ldiff/LDiffService.java new file mode 100644 index 0000000..888c826 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiffService.java @@ -0,0 +1,232 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.io.IOException; +import java.util.Iterator; +import java.util.HashSet; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.util.ldiff.Protocol.DbBlocks; +import com.sleepycat.je.rep.util.ldiff.Protocol.EnvDiff; +import com.sleepycat.je.rep.util.ldiff.Protocol.RemoteDiffRequest; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ExecutingService; + +/** + * Implementation of the LDiff service that process requests from the LDiff + * client. It's the network level interface to the remote processing done as + * part of the ldiff implementation. + * + * Note that the service only processes one request at a time, so as not to + * overload the node. + */ +public class LDiffService extends ExecutingService { + + /* The service name. */ + public static final String NAME = "LDiff"; + + /* + * Determines whether the service is busy and will accept a new + * request. + */ + private final AtomicBoolean busy = new AtomicBoolean(false); + private final RepImpl repImpl; + private final ServiceDispatcher dispatcher; + + public LDiffService(ServiceDispatcher dispatcher, RepImpl repImpl) { + super(NAME, dispatcher); + this.repImpl = repImpl; + this.dispatcher = dispatcher; + dispatcher.register(this); + } + + public void shutdown() { + dispatcher.cancel(NAME); + } + + /* + * Returns busy if we are already processing a request. + */ + @Override + public boolean isBusy() { + return busy.get(); + } + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + if (!busy.compareAndSet(false, true)) { + throw EnvironmentFailureException.unexpectedState + ("Service is already busy"); + } + return new LDiffServiceRunnable(dataChannel); + } + + class LDiffServiceRunnable implements Runnable { + final DataChannel channel; + private ReplicatedEnvironment env; + private DatabaseConfig dbConfig = new DatabaseConfig(); + + LDiffServiceRunnable(DataChannel dataChannel) { + this.channel = dataChannel; + dbConfig.setReadOnly(true); + dbConfig.setAllowCreate(false); + } + + public void runLDiff(DbBlocks request, Protocol protocol) + throws IOException { + + Database db = null; + Cursor cursor = null; + try{ + db = env.openDatabase + (null, request.getDbName(), dbConfig); + protocol.write(protocol.new BlockListStart(), channel); + LDiffConfig cfg = new LDiffConfig(); + cfg.setBlockSize(request.getBlockSize()); + LDiff ldf = new LDiff(cfg); + /* Use the Iterator to stream the blocks across the wire. */ + Iterator blocks = ldf.iterator(db); + while (blocks.hasNext()) { + protocol.write(protocol.new BlockInfo(blocks.next()), + channel); + } + protocol.write(protocol.new BlockListEnd(), channel); + + /* Start to do the record difference analysis. */ + Message msg = protocol.read(channel); + if (msg.getOp() == Protocol.REMOTE_DIFF_REQUEST) { + cursor = db.openCursor(null, null); + sendDiffArea(cursor, (RemoteDiffRequest) msg, protocol); + runDiffAnalysis(cursor, protocol); + } else if (msg.getOp() != Protocol.DONE) { + protocol.write(protocol.new ProtocolError + ("Invalid message: " + msg), channel); + } + } catch (DatabaseNotFoundException e) { + protocol.write(protocol.new DbMismatch(e.getMessage()), + channel); + } finally { + if (cursor != null) { + cursor.close(); + } + if (db != null) { + db.close(); + } + } + } + + /* Get records for all different areas and send out. */ + private void runDiffAnalysis(Cursor cursor, + Protocol protocol) + throws IOException { + + while (true) { + Message msg = protocol.read(channel); + if (msg.getOp() == Protocol.REMOTE_DIFF_REQUEST) { + sendDiffArea(cursor, (RemoteDiffRequest) msg, protocol); + } else { + if (msg.getOp() != Protocol.DONE) { + protocol.write(protocol.new ProtocolError + ("Invalid message: " + msg), channel); + } + break; + } + } + } + + /* Send the different records of an area to the requested machine. */ + private void sendDiffArea(Cursor cursor, + RemoteDiffRequest request, + Protocol protocol) + throws IOException { + + /* Get the records in the different area. */ + HashSet records = null; + try { + records = DiffRecordAnalyzer.getDiffArea(cursor, request); + } catch (Exception e) { + protocol.write(protocol.new Error(e.getMessage()), channel); + throw new LDiffRecordRequestException(e.getMessage()); + } + + /* Write them out to the requested machine. */ + protocol.write(protocol.new DiffAreaStart(), channel); + for (Record record: records) { + protocol.write(protocol.new RemoteRecord(record), channel); + } + protocol.write(protocol.new DiffAreaEnd(), channel); + } + + public void runEnvDiff(EnvDiff request, Protocol protocol) + throws IOException { + + protocol.write(protocol.new EnvInfo + (env.getDatabaseNames().size()), channel); + } + + @Override + public void run() { + final Protocol protocol; + + try { + env = repImpl.makeEnvironment(); + protocol = new Protocol(new NameIdPair("Ldiff", -1), repImpl); + try { + channel.getSocketChannel().configureBlocking(true); + Message msg = protocol.read(channel); + if (msg.getOp() == Protocol.DB_BLOCKS) { + runLDiff((DbBlocks)msg, protocol); + } else if (msg.getOp() == Protocol.ENV_DIFF) { + runEnvDiff((EnvDiff)msg, protocol); + } + } catch (ProtocolException e) { + /* Unexpected message. */ + protocol.write + (protocol.new ProtocolError(e.getMessage()), + channel); + } finally { + if (channel.isOpen()) { + channel.close(); + } + } + } catch (IOException e) { + + /* + * Channel has already been closed, or the close itself + * failed. + */ + } finally { + if (env != null) { + env.close(); + } + if (!busy.compareAndSet(true, false)) { + throw EnvironmentFailureException.unexpectedState + ("Service is not busy"); + } + } + } + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/LDiffUtil.java b/src/com/sleepycat/je/rep/util/ldiff/LDiffUtil.java new file mode 100644 index 0000000..f107f97 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/LDiffUtil.java @@ -0,0 +1,167 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.io.File; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.utilint.Adler32; + +public class LDiffUtil { + + /* Convenient masking constant. */ + final static long MASK_32BIT = 0xffffffffl; + + /* To compute a MD5 hash for each block. */ + static MessageDigest md = null; + static { + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } + } + + /* Concatenate two byte arrays into one. */ + public static byte[] concatByteArray(byte[] a, byte[] b) { + if ((a == null) || (b == null)) { + return (a == null) ? b : a; + } + + int len = a.length + b.length; + byte[] dest = new byte[len]; + /* Copy the content of a to dest. */ + System.arraycopy(a, 0, dest, 0, a.length); + /* Copy the content from b to the remaining part of dest. */ + System.arraycopy(b, 0, dest, a.length, b.length); + + return dest; + } + + /** + * Each byte (Xi in the tech report) is replaced by a 32 bit Adler checksum + * of the bytes representing the concatenation of the key/value pair. + * + * @return the checksum + */ + public static int getXi(byte[] keyValue) { + Adler32 adler32 = new Adler32(); + adler32.update(keyValue, 0, keyValue.length); + return (int) adler32.getValue(); + } + + public static Block readBlock(int blockId, Cursor cursor, int numKeys) + throws DatabaseException { + + /* DatabaseEntry represents the key and data of each record. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + /* Adler32 to compute the rolling checksum of key/data pair. */ + Adler32 adler32 = new Adler32(); + int i = 0; + int a = 0, b = 0; + md.reset(); + Block block = new Block(blockId); + + /* Please pay attention to the check order in while loop. */ + while ((i < numKeys) && + (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS)) { + /* Indicates having a new block. */ + if (i == 0) { + block.setBeginKey(key.getData()); + block.setBeginData(data.getData()); + } + + /* Calculate rollingChksum on "key|data" bytes. */ + adler32.reset(); + adler32.update(key.getData(), 0, key.getData().length); + adler32.update(data.getData(), 0, data.getData().length); + final int xi = (int) adler32.getValue(); + a += xi; + b += a; + /* Update MessageDigest with "key|data" bytes. */ + md.update(key.getData()); + md.update(data.getData()); + i++; + } + + /* Allocate a block and return. */ + long cksum = (a & LDiffUtil.MASK_32BIT) | ((long) b << 32); + block.setRollingChksum(cksum); + block.setMd5Hash(md.digest()); + block.setNumRecords(i); + return block; + } + + public static Environment openEnv(String envDir) { + /* Open the database environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + /* envConfig.setTransactional(false); */ + envConfig.setAllowCreate(false); + envConfig.setReadOnly(true); + try { + return new Environment(new File(envDir), envConfig); + } catch (EnvironmentLockedException e) { + e.printStackTrace(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return null; + } + + public static Database openDb(Environment env, String dbName) { + + /* Open the remote database within that environment. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + /* dbConfig.setTransactional(false); */ + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + dbConfig.setSortedDuplicates(true); + try { + return env.openDatabase(null, dbName, dbConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + } + return null; + } + + public static void close(Environment env, Database db) { + if (db != null) { + try { + db.close(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/MismatchedRegion.java b/src/com/sleepycat/je/rep/util/ldiff/MismatchedRegion.java new file mode 100644 index 0000000..c15d296 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/MismatchedRegion.java @@ -0,0 +1,103 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +/* + * An object used to present the range of a different area on local and remote + * database. + * + * It uses the [key-remote-begin/data-remote-begin, different area size] to + * present a different area on the remote database and uses the + * [key-local-begin/data-local-begin, different area size] to present a + * different area on the local database. + */ +public class MismatchedRegion { + private byte[] remoteBeginKey; + private byte[] remoteBeginData; + private long remoteDiffSize; + private byte[] localBeginKey; + private byte[] localBeginData; + private long localDiffSize; + + public void setRemoteBeginKey(byte[] remoteBeginKey) { + this.remoteBeginKey = remoteBeginKey; + } + + public void setRemoteBeginData(byte[] remoteBeginData) { + this.remoteBeginData = remoteBeginData; + } + + public void setRemoteDiffSize(long remoteDiffSize) { + this.remoteDiffSize = remoteDiffSize; + } + + public void setLocalBeginKey(byte[] localBeginKey) { + this.localBeginKey = localBeginKey; + } + + public void setLocalBeginData(byte[] localBeginData) { + this.localBeginData = localBeginData; + } + + public void setLocalDiffSize(long localDiffSize) { + this.localDiffSize = localDiffSize; + } + + public byte[] getRemoteBeginKey() { + return remoteBeginKey; + } + + public byte[] getRemoteBeginData() { + return remoteBeginData; + } + + public long getRemoteDiffSize() { + return remoteDiffSize; + } + + public byte[] getLocalBeginKey() { + return localBeginKey; + } + + public byte[] getLocalBeginData() { + return localBeginData; + } + + public long getLocalDiffSize() { + return localDiffSize; + } + + /* + * Return true if the different area is an additional block on remote + * database. + */ + public boolean isRemoteAdditional() { + return (localDiffSize == 0) ? true : false; + } + + /* + * Return true if the different area is an additional block on local + * database. + */ + public boolean isLocalAdditional() { + return (remoteDiffSize == 0) ? true : false; + } + + /* Present a different area is meaningless. */ + public boolean isNull() { + return (remoteBeginKey == null) && (remoteBeginData == null) && + (localBeginKey == null) && (localBeginData == null) && + (localDiffSize == 0) && (remoteDiffSize == 0); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/Protocol.java b/src/com/sleepycat/je/rep/util/ldiff/Protocol.java new file mode 100644 index 0000000..de3bb5b --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/Protocol.java @@ -0,0 +1,489 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.utilint.VLSN; + +/** + * Protocol used by LDiff to request the blocks associated with a database and + * do the record level analysis. + * + * BLOCK LEVEL ANALYSIS + * ========================================= + * The request response sequence for a block list request is: + * + * EnvDiff -> EnvInfo -> DbBlocks -> BlockListStart [BlockInfo]+ BlockListEnd + * + * A DbMismatch response is sent back if the database does not exist, or if the + * database has different persistent attributes associated with it. + * + * Note that this request is designed to maximize overlap. That is, the + * responder could be working on block n, while the requester is working on + * blocks n+1 and beyond (to the end of the database). + * + * The above is the minimal set of messages, biased for performance when the + * databases are expected to match. + * + * RECORD LEVEL ANALYSIS + * ========================================= + * User can configure LDiff to do record level analysis by setting + * LDiffConfig.setDiffAnalysis(true), it can help you find out which records + * are different between two databases. + * + * The request response sequence for record level analysis is: + * + * RemoteDiffRequest -> DiffAreaStart|Error [RemoteRecord] DiffAreaEnd -> Done + * + * The local Environment would send out a RemoteDiffRequest to the remote + * Environment, the remote Environment can get all the records of a different + * area: + * 1. If there exists exceptions during the fetching process, remote + * Environment would send back an Error message, local Environment would + * throw out a LDiffRecordRequestException and exit. + * 2. If the fetching process is correct, remote Environment would send a + * DiffAreaStart message to the local Environment to notify that it now + * transfers RemoteRecords of a different area. + * 3. After all the records of a different area are transferred, remote + * Environment would send out a DiffAreaEnd message, which specifies + * transferring a different area is finished. + * 4. When all the RemoteDiffRequest are processed, local Environment would + * send a Done message to presents the record level analysis is done. + * + * TODO: 1) Protocol version matching 2) Block granularity sync request. 3) + * Protocol to narrow a diff down to a specific set of key inserts, updates and + * deletes. + */ +public class Protocol extends BinaryProtocol { + + static public final int VERSION = 2; + + public final static MessageOp DB_BLOCKS = + new MessageOp((short) 1, DbBlocks.class); + + public final static MessageOp DB_MISMATCH = + new MessageOp((short) 2, DbMismatch.class); + + public final static MessageOp BLOCK_LIST_START = + new MessageOp((short) 3, BlockListStart.class); + + public final static MessageOp BLOCK_INFO = + new MessageOp((short) 4, BlockInfo.class); + + public final static MessageOp BLOCK_LIST_END = + new MessageOp((short) 5, BlockListEnd.class); + + public final static MessageOp ENV_DIFF = + new MessageOp((short) 6, EnvDiff.class); + + public final static MessageOp ENV_INFO = + new MessageOp((short) 7, EnvInfo.class); + + public final static MessageOp REMOTE_DIFF_REQUEST = + new MessageOp((short) 8, RemoteDiffRequest.class); + + public final static MessageOp REMOTE_RECORD = + new MessageOp((short) 9, RemoteRecord.class); + + public final static MessageOp DIFF_AREA_START = + new MessageOp((short) 10, DiffAreaStart.class); + + public final static MessageOp DIFF_AREA_END = + new MessageOp((short) 11, DiffAreaEnd.class); + + public final static MessageOp DONE = new MessageOp((short) 12, Done.class); + + public final static MessageOp ERROR = + new MessageOp((short) 13, Error.class); + + public Protocol(NameIdPair nameIdPair, EnvironmentImpl envImpl) { + + /* + * Make the configured version the same as the code version for now to + * ignore protocol negotiation issues. + */ + super(nameIdPair, VERSION, VERSION, envImpl); + + initializeMessageOps(new MessageOp[] + { DB_BLOCKS, DB_MISMATCH, BLOCK_LIST_START, + BLOCK_INFO, BLOCK_LIST_END, ENV_DIFF, ENV_INFO, + REMOTE_DIFF_REQUEST, REMOTE_RECORD, DIFF_AREA_START, + DIFF_AREA_END, DONE, ERROR }); + } + + /** + * Message used to request a list of blocks. Note that the message only + * needs to identify a specific database, since the service itself is + * associated with a specific environment. + */ + public class DbBlocks extends SimpleMessage { + final String dbName; + final int blockSize; + + // TODO: add all the persistent properties of the database, so they can + // be checked. + public DbBlocks(String dbName, int blockSize) { + super(); + this.dbName = dbName; + this.blockSize = blockSize; + } + + public DbBlocks(ByteBuffer buffer) { + super(); + dbName = getString(buffer); + blockSize = LogUtils.readInt(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(dbName, blockSize); + } + + @Override + public MessageOp getOp() { + return DB_BLOCKS; + } + + public String getDbName() { + return dbName; + } + + public int getBlockSize() { + return blockSize; + } + } + + /** + * Issued in response to a database level mismatch either because the + * database itself does not exist at the node, or because it's properties + * are different. + */ + public class DbMismatch extends RejectMessage { + + public DbMismatch(String message) { + super(message); + } + + public DbMismatch(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return DB_MISMATCH; + } + } + + /** + * Denotes the start of the list of blocks. + */ + public class BlockListStart extends SimpleMessage { + + public BlockListStart() { + super(); + } + + @SuppressWarnings("unused") + public BlockListStart(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return BLOCK_LIST_START; + } + } + + /** + * Denotes the end of the list of blocks. + */ + public class BlockListEnd extends SimpleMessage { + + public BlockListEnd() { + super(); + } + + @SuppressWarnings("unused") + public BlockListEnd(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return BLOCK_LIST_END; + } + } + + /** + * Supplies the properties of an individual block. + */ + public class BlockInfo extends SimpleMessage { + /* The block associated with the request */ + final Block block; + + public BlockInfo(Block block) { + super(); + this.block = block; + } + + public BlockInfo(ByteBuffer buffer) { + super(); + block = new Block(LogUtils.readInt(buffer)); + block.setBeginKey(getByteArray(buffer)); + block.setBeginData(getByteArray(buffer)); + block.setMd5Hash(getByteArray(buffer)); + block.setRollingChksum(LogUtils.readLong(buffer)); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(block.getBlockId(), block.getBeginKey(), + block.getBeginData(), block.getMd5Hash(), + block.getRollingChksum()); + } + + @Override + public MessageOp getOp() { + return BLOCK_INFO; + } + + public Block getBlock() { + return block; + } + } + + /** + * Message used to present that an Environment is requesting to do a + * LDiff with another Environment. + */ + public class EnvDiff extends SimpleMessage { + public EnvDiff() { + super(); + } + + @SuppressWarnings("unused") + public EnvDiff(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return ENV_DIFF; + } + } + + /** + * Message used to present how many databases in a compared Environment. + */ + public class EnvInfo extends SimpleMessage { + final int numDBs; + + public EnvInfo(int numberOfDbs) { + super(); + numDBs = numberOfDbs; + } + + public EnvInfo(ByteBuffer buffer) { + super(); + numDBs = LogUtils.readInt(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(numDBs); + } + + @Override + public MessageOp getOp() { + return ENV_INFO; + } + + public int getNumberOfDBs() { + return numDBs; + } + } + + /** + * Message used to request records of a different area on the remote + * database. + */ + public class RemoteDiffRequest extends SimpleMessage { + final byte[] key; + final byte[] data; + final long diffSize; + + public RemoteDiffRequest(MismatchedRegion region) { + super(); + key = region.getRemoteBeginKey(); + data = region.getRemoteBeginData(); + diffSize = region.getRemoteDiffSize(); + } + + public RemoteDiffRequest(ByteBuffer buffer) { + super(); + key = getByteArray(buffer); + data = getByteArray(buffer); + diffSize = LogUtils.readInt(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(key, data, diffSize); + } + + @Override + public MessageOp getOp() { + return REMOTE_DIFF_REQUEST; + } + + public byte[] getKey() { + return key; + } + + public byte[] getData() { + return data; + } + + public long getDiffSize() { + return diffSize; + } + } + + /** + * Message used to transfer a record from remote to local database. + */ + public class RemoteRecord extends SimpleMessage { + final byte[] key; + final byte[] data; + final VLSN vlsn; + + public RemoteRecord(Record record) { + super(); + key = record.getKey(); + data = record.getData(); + vlsn = record.getVLSN(); + } + + public RemoteRecord(ByteBuffer buffer) { + super(); + key = getByteArray(buffer); + data = getByteArray(buffer); + vlsn = getVLSN(buffer); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(key, data, vlsn); + } + + @Override + public MessageOp getOp() { + return REMOTE_RECORD; + } + + public byte[] getKey() { + return key; + } + + public byte[] getData() { + return data; + } + + public VLSN getVLSN() { + return vlsn; + } + } + + /** + * Message used to present the transfer of a different area on remote + * database begins. + */ + public class DiffAreaStart extends SimpleMessage { + public DiffAreaStart() { + super(); + } + + @SuppressWarnings("unused") + public DiffAreaStart(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return DIFF_AREA_START; + } + } + + /** + * Message used to present the transfer of a different area on remote + * database is done. + */ + public class DiffAreaEnd extends SimpleMessage { + public DiffAreaEnd() { + super(); + } + + @SuppressWarnings("unused") + public DiffAreaEnd(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return DIFF_AREA_END; + } + } + + /** + * Message used to present the transfer of all the different data is done. + */ + public class Done extends SimpleMessage { + public Done() { + super(); + } + + @SuppressWarnings("unused") + public Done(ByteBuffer buffer) { + super(); + } + + @Override + public MessageOp getOp() { + return DONE; + } + } + + /** + * Message used to present an operation error on remote database. + */ + public class Error extends RejectMessage { + public Error(String message) { + super(message); + } + + public Error(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return ERROR; + } + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/Record.java b/src/com/sleepycat/je/rep/util/ldiff/Record.java new file mode 100644 index 0000000..8779a12 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/Record.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.util.Arrays; + +import com.sleepycat.je.utilint.VLSN; + +/* + * An object used to record a key/data pair in the different area, also + * saves the VLSN number for the record. + */ +public class Record { + private final byte[] key; + private final byte[] data; + private final VLSN vlsn; + byte[] mix; + + public Record(byte[] key, byte[] data, VLSN vlsn) { + this.key = key; + this.data = data; + this.vlsn = vlsn; + } + + /* + * Get the byte and data array together so that we can generate + * an unique hash code for this object. + */ + private void generateMix() { + mix = new byte[key.length + data.length]; + System.arraycopy(key, 0, mix, 0, key.length); + System.arraycopy(data, 0, mix, key.length, data.length); + } + + public byte[] getKey() { + return key; + } + + public byte[] getData() { + return data; + } + + public VLSN getVLSN() { + return vlsn; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (!(o instanceof Record)) { + return false; + } + + final Record record = (Record) o; + + return Arrays.equals(record.getKey(), getKey()) && + Arrays.equals(record.getData(), getData()); + } + + @Override + public int hashCode() { + if (mix == null && key != null && data != null) { + generateMix(); + } + + return Arrays.hashCode(mix); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/Window.java b/src/com/sleepycat/je/rep/util/ldiff/Window.java new file mode 100644 index 0000000..b4a0eb6 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/Window.java @@ -0,0 +1,210 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.utilint.Adler32; + +/** + * A rolling window of key/data pairs used by the ldiff algorithm. + */ +public class Window { + private final Cursor cursor; + private List window; + private final MessageDigest md; + private final int windowSize; + private long chksum; + + /* The begin key/data pair of a window. */ + private byte[] beginKey; + private byte[] beginData; + /* The size of a different area. */ + private long diffSize; + + /** + * Create a window of the given size, starting at the next record pointed + * at by the Cursor. + * + * @param cursor an open cursor on the database being diff'd + * @param windowSize the number of records to include in the window + * @throws Exception + */ + public Window(Cursor cursor, int windowSize) + throws Exception { + + this.cursor = cursor; + this.windowSize = windowSize; + /* To compute a MD5 hash for each block. */ + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + throw new Exception("MD5 hashes are required for ldiff."); + } + + nextWindow(); + } + + /** + * Roll a window forward by one key. The rolling checksum is adjusted to + * account for this move past one key/value pair. Note that the md5 hash + * associated with the window is computed on demand and is not updated + * here. + */ + public void rollWindow() + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + if (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + byte[] keyValue = LDiffUtil.concatByteArray(key.getData(), + data.getData()); + int removeXi = LDiffUtil.getXi(window.remove(0)); + window.add(keyValue); + int addXi = LDiffUtil.getXi(keyValue); + rollChecksum(removeXi, addXi); + } else { + chksum = 0; + } + diffSize++; + } + + /** + * Advance the window to the next block of records and update the rolling + * checksum. + */ + public void nextWindow() { + /* DatabaseEntry represents the key and data of each record. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + int i = 0; + window = new ArrayList(); + diffSize = 0; + + /* Please pay attention to the check order in while loop. */ + while ((i < windowSize) && + (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS)) { + if (i == 0) { + beginKey = key.getData(); + beginData = data.getData(); + } + window.add(LDiffUtil.concatByteArray(key.getData(), + data.getData())); + i++; + } + + setChecksum(); + } + + /** + * The checksum for the window. + * + * @return the checksum for the window. + */ + public long getChecksum() { + return chksum; + } + + public byte[] getBeginKey() { + return beginKey; + } + + public byte[] getBeginData() { + return beginData; + } + + public long getDiffSize() { + return diffSize; + } + + /** + * Compute the MD5 hash for the window. This is an expensive operation and + * should be used sparingly. + * + * @return the MD5 for the window. + */ + public byte[] getMd5Hash() { + /* Reset the Message Digest first. */ + md.reset(); + /* Feed the data into the Message Digest. */ + for (byte[] ba : window) { + md.update(ba); + } + return md.digest(); + } + + /** + * The number of records in the window. The size of the window will match + * the value set during instantiation, until the end of the database is + * reached. + * + * @return the number of records in the window. + */ + public int size() { + return window.size(); + } + + /** + * We use the rsync rolling checksum algorithm with the following changes: + * + * 1. Each byte (Xi in the tech report) is replaced by a 32 bit Adler + * checksum of the bytes representing the concatenation of the key/value + * pair. + * + * 2. The value for M is 64 instead of 32 to reduce the chances of false + * collisions on the rolling checksum, given our adaptation of the original + * algorithm to logically use 32 bit bytes. + */ + private void setChecksum() { + + /* Adler32 to compute the rolling checksum of key/data pair. */ + Adler32 adler32 = new Adler32(); + + int a = 0, b = 0; + for (int i = 0; i < size(); i++) { + byte[] element = window.get(i); + adler32.reset(); + adler32.update(element, 0, element.length); + final int xi = (int) adler32.getValue(); /* It's really 32 bits */ + a += xi; + b += (xi * (size() - i)); + } + chksum = (a & LDiffUtil.MASK_32BIT) | ((long) b << 32); + } + + /** + * Update the checksum by removing removeXi and adding addXi, according to + * the rsync algorithm. + * + * @param removeXi + * the value to remove from the checksum + * @param addXi + * the value to add to the checksum + */ + private void rollChecksum(int removeXi, int addXi) { + final int a = (int) chksum - removeXi + addXi; + final int b = (int) (chksum >> 32) - (removeXi * size()) + a; + chksum = (a & LDiffUtil.MASK_32BIT) | ((long) b << 32); + } +} diff --git a/src/com/sleepycat/je/rep/util/ldiff/package-info.java b/src/com/sleepycat/je/rep/util/ldiff/package-info.java new file mode 100644 index 0000000..18c9c30 --- /dev/null +++ b/src/com/sleepycat/je/rep/util/ldiff/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: LDiff debugging utility for comparing databases on different + * nodes in a group. + */ +package com.sleepycat.je.rep.util.ldiff; diff --git a/src/com/sleepycat/je/rep/util/package.html b/src/com/sleepycat/je/rep/util/package.html new file mode 100644 index 0000000..07fb33d --- /dev/null +++ b/src/com/sleepycat/je/rep/util/package.html @@ -0,0 +1,23 @@ + + + + + + +BDB JE High Availability command line utilities and helper classes. + +

        Package Specification

        +This package provides support for activities like administering and +starting up replication groups. + +@see Replication +Guide, Chapter 4: Utilities + + diff --git a/src/com/sleepycat/je/rep/utilint/BinaryProtocol.java b/src/com/sleepycat/je/rep/utilint/BinaryProtocol.java new file mode 100644 index 0000000..9564dd3 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/BinaryProtocol.java @@ -0,0 +1,1158 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_BYTES_READ; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_BYTES_WRITTEN; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_ENTRIES_WRITTEN_OLD_VERSION; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_BATCHED; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_READ; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_WRITTEN; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGE_BATCHES; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_READ_NANOS; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_WRITE_NANOS; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.utilint.StringUtils; + +/** + * Supplies the basic infrastructure for BinaryProtocols used to exchange + * messages by the replications stream and during network based restore + * operations. + * + * Note that this class and its subclasses are not synchronized. There must be + * one instance of this class per thread of control. + * + * IMPORTANT: Please avoid all uses of ByteBuffer.get/put when serializing + * message fields of types: long, int and short to avoid byte order issues. + * Use LogUtils.read/write methods instead, since they use a single canonical + * byte-independent representation. + */ +public abstract class BinaryProtocol { + + protected static final int MESSAGE_HEADER_SIZE = + 2 /* Message op id (short) */ + + 4 /* message size (int) */; + + /* Buffer reused to process the header of every message. */ + protected final ByteBuffer header = + ByteBuffer.allocate((MESSAGE_HEADER_SIZE)); + + /* The version as implemented by the actual code. */ + protected final int codeVersion; + + /* The version that this instance is actually configured to use. */ + /* It's not final to facilitate testing */ + protected int configuredVersion; + + /* Identifies the node using this protocol. */ + protected final NameIdPair nameIdPair; + + /* Maps the message op id to its canonical descriptor instance. */ + private final Map ops = new HashMap(); + private final int predefinedMessageCount; + + /* The max message size which will be accepted. */ + private final long maxMessageSize; + + /* Whether to use UTF8 or default encoding for Strings. */ + private final boolean useStringDefaultEncoding; + + /* + * The predefined messages. IMPORTANT: Note that predefined message ops + * start at 1000. to stay out of the way of subtype ops. + */ + public final MessageOp CLIENT_VERSION = + new MessageOp((short) 1001, ClientVersion.class); + + public final MessageOp SERVER_VERSION = + new MessageOp((short) 1002, ServerVersion.class); + + public final MessageOp INCOMPATIBLE_VERSION = + new MessageOp((short) 1003, IncompatibleVersion.class); + + public final MessageOp PROTOCOL_ERROR = + new MessageOp((short) 1004, ProtocolError.class); + + /* Statistics definition. */ + protected final StatGroup stats; + protected final LongStat nReadNanos; + protected final LongStat nWriteNanos; + protected final LongStat nBytesRead; + protected final LongStat nMessagesRead; + protected final LongStat nBytesWritten; + protected final LongStat nMessagesWritten; + protected final LongStat nMessagesBatched; + protected final LongStat nMessageBatches; + protected final LongStat nEntriesWrittenOldVersion; + + protected final Logger logger; + protected final Formatter formatter; + protected final EnvironmentImpl envImpl; + + /** + * Returns a Protocol object configured that implements the specified + * (supported) version. + * + * @param codeVersion the version actually implemented by the protocol. + * @param configuredVersion the version of the protocol that must be + * implemented/simulated by this protocol when communicating with the + * recipient. + */ + protected BinaryProtocol(NameIdPair nameIdPair, + int codeVersion, + int configuredVersion, + EnvironmentImpl envImpl) { + this.nameIdPair = nameIdPair; + this.codeVersion = codeVersion; + this.configuredVersion = configuredVersion; + this.envImpl = envImpl; + + if (envImpl != null) { + this.logger = LoggerUtils.getLogger(getClass()); + } else { + this.logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + } + this.formatter = new ReplicationFormatter(nameIdPair); + + stats = new StatGroup(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.GROUP_DESC); + nReadNanos = new LongStat(stats, N_READ_NANOS); + nWriteNanos = new LongStat(stats, N_WRITE_NANOS); + nBytesRead = new LongStat(stats, N_BYTES_READ); + nMessagesRead = new LongStat(stats, N_MESSAGES_READ); + nBytesWritten = new LongStat(stats, N_BYTES_WRITTEN); + nMessagesWritten = new LongStat(stats, N_MESSAGES_WRITTEN); + nMessagesBatched = new LongStat(stats, N_MESSAGES_BATCHED); + nMessageBatches = new LongStat(stats, N_MESSAGE_BATCHES); + nEntriesWrittenOldVersion = + new LongStat(stats, N_ENTRIES_WRITTEN_OLD_VERSION); + + /* Initialize with the pre-defined protocol messages. */ + for (MessageOp op : + new MessageOp[] { CLIENT_VERSION, + SERVER_VERSION, + INCOMPATIBLE_VERSION, + PROTOCOL_ERROR }) { + + if (ops.put(op.opId, op) != null) { + throw EnvironmentFailureException.unexpectedState + ("Duplicate op: " + op.opId); + } + } + predefinedMessageCount = ops.size(); + if (envImpl != null) { + DbConfigManager configManager = envImpl.getConfigManager(); + long mMSz = + configManager.getLong(RepParams.MAX_MESSAGE_SIZE); + maxMessageSize = (mMSz == 0) ? + (envImpl.getMemoryBudget().getMaxMemory() >> 1) : + mMSz; + useStringDefaultEncoding = configManager.getBoolean + (RepParams.PROTOCOL_OLD_STRING_ENCODING); + } else { + /* Some unit tests pass in null EnvImpl. */ + maxMessageSize = 1 << 20; + useStringDefaultEncoding = true; + } + } + + /* + * Must be invoked after the constructor has completed, to get around + * base/subclass initialization dependences; MessageOps are instances of + * nested classes declared in the subclass. + */ + protected void initializeMessageOps(MessageOp[] protocolOps) { + + for (MessageOp op : protocolOps) { + if (ops.put(op.opId, op) != null) { + throw EnvironmentFailureException.unexpectedState + ("Duplicate op: " + op.opId); + } + } + } + + /* + * The total number of messages defined by the protocol. Includes messages + * defined by the subclass. + */ + public int messageCount() { + return ops.size(); + } + + /* The messages defined in this class. */ + final public int getPredefinedMessageCount() { + return predefinedMessageCount; + } + + /** + * Returns the version associated with this protocol instance. Request + * message generated by this instance conform to this version and responses + * are expected to conform to this version as well. + * + * @return the version that is actually being used. + */ + public int getVersion() { + return configuredVersion; + } + + public StatGroup getStats(StatsConfig config) { + StatGroup ret = stats.cloneGroup(config.getClear()); + + return ret; + } + + public void resetStats() { + stats.clear(); + } + + /* Messages <= this size will use the shared buffer. */ + private static int CACHED_BUFFER_SIZE = 0x4000; + + /* + * The shared read and write buffers that are reused. There are + * two buffers, so that reading and writing can proceed in parallel. + */ + private final ByteBuffer cachedReadBuffer = + ByteBuffer.allocate(CACHED_BUFFER_SIZE); + + private final ByteBuffer cachedWriteBuffer = + ByteBuffer.allocate(CACHED_BUFFER_SIZE); + + /** + * Returns a read buffer of the requested size. + * + * @param size the size of the requested buffer in bytes + * @return the requested + */ + private ByteBuffer allocateReadBuffer(int size) { + if (size <= CACHED_BUFFER_SIZE ) { + cachedReadBuffer.rewind(); + cachedReadBuffer.limit(size); + return cachedReadBuffer; + } + return ByteBuffer.allocate(size); + } + + /** + * Returns a write buffer of the requested size. + * + * @param size the size of the requested buffer in bytes + * @return the requested + */ + private ByteBuffer allocateWriteBuffer(int size) { + if(size <= CACHED_BUFFER_SIZE ) { + cachedWriteBuffer.rewind(); + cachedWriteBuffer.limit(size); + return cachedWriteBuffer; + } + return ByteBuffer.allocate(size); + } + + /* Returns byte size of serialized string. */ + public int stringSize(String s) { + return stringToBytes(s).length + 4; + } + + /* Serialize the string into the buffer. */ + public void putString(String s, ByteBuffer buffer) { + byte[] b = stringToBytes(s); + LogUtils.writeInt(buffer, b.length); + buffer.put(b); + } + + /** + * Reconstitutes the string serialized by the above method. + * + * @param buffer the buffer containing the string + * + * @return the de-serialized string + */ + public String getString(ByteBuffer buffer) { + int length = LogUtils.readInt(buffer); + byte b[] = new byte[length]; + buffer.get(b); + return bytesToString(b); + } + + /** + * Responsible for converting a String to an encoded value for all + * protocols. + *

        + * In JE 5.0.36 and earlier, only the default encoding was supported. In + * later releases, a config param was added to force UTF-8 to be used. In + * JE 5.1 and later, the default will be to use UTF-8. [#20967] + * + * @see ReplicationConfig#PROTOCOL_OLD_STRING_ENCODING + */ + private byte[] stringToBytes(String s) { + if (useStringDefaultEncoding) { + return s.getBytes(); + } + return StringUtils.toUTF8(s); + } + + /** + * Responsible for converting an encoded value to a String for all + * protocols. + * + * @see #stringToBytes + */ + private String bytesToString(byte[] b) { + if (useStringDefaultEncoding) { + return new String(b); + } + return StringUtils.fromUTF8(b); + } + + /** + * The Operations that are part of the protocol. + */ + public static class MessageOp { + + /* The string denoting the operation for the request message. */ + private final short opId; + + /* The class used to represent the message. */ + private final Class messageClass; + /* The constructor used to create message instances. */ + private Constructor constructor; + + /* The label is used for debugging purposes. */ + private final String label; + + public MessageOp(short opId, Class messageClass) { + this.opId = opId; + this.messageClass = messageClass; + this.label = messageClass.getSimpleName(); + try { + constructor = messageClass.getConstructor + (messageClass.getEnclosingClass(), ByteBuffer.class); + } catch (SecurityException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (NoSuchMethodException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + public short getOpId() { + return opId; + } + + Class getMessageClass() { + return messageClass; + } + + public Constructor getConstructor() { + return constructor; + } + + @Override + public String toString() { + return label; + } + } + + /** + * Returns the Op from a message buffer. It's always the first item in the + * buffer. Leaves the message buffer positioned after the Op. + * + * @param messageBuffer a message buffer the the protocol + * @return the OpId + */ + private MessageOp getOp(ByteBuffer messageBuffer) { + short opId = LogUtils.readShort(messageBuffer); + final MessageOp messageOp = ops.get(opId); + + if (messageOp == null) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Unknown message op id:" + opId + + " Known op ids:" + Arrays.toString(ops.keySet().toArray())); + } + return messageOp; + } + + /* + * Used to indicate that an entity is formatable and can be serialized and + * de-serialized. + */ + interface WireFormatable { + + /* + * Returns a ByteBuffer holding the message in a representation + * suitable for use in a network transmission. The buffer is flipped + * and ready for relative reads. + */ + abstract ByteBuffer wireFormat(); + } + + /** + * Fills a dedicated empty buffer with bytes read from the channel. It + * flips the buffer after it has been filled, so it's ready for reading. + * + * @param channel the channel to be read + * @param buffer the buffer to be filled + * @throws IOException if the errors were encountered while reading from + * the channel or the buffer could not be filled with the expected number + * of bytes + */ + private void fillBuffer(ReadableByteChannel channel, + ByteBuffer buffer) + throws IOException { + + final long start = System.nanoTime(); + assert(buffer.position() == 0); + while (buffer.position() < buffer.limit()) { + int numRead = channel.read(buffer); + if (numRead <= 0) { + throw new IOException("Expected bytes: " + buffer.limit() + + " read bytes: " + buffer.position()); + } + } + nReadNanos.add(System.nanoTime() - start); + buffer.flip(); + } + + /** + * Read and parse an incoming message, specifying the incoming version. + * + * @param channel the channel to read from. Declared as a + * ReadableByteChannel rather than the more obvious SocketChannel to + * facilitate unit testing. + * + * @throws IOException + */ + public Message read(ReadableByteChannel channel) + throws IOException { + + /* Get the message header. */ + fillBuffer(channel, header); + + /* Use the type value to determine the message type. */ + MessageOp op = getOp(header); + try { + Constructor cons = op.getConstructor(); + + /* Read the size to determine the body of the message. */ + int messageBodySize = LogUtils.readInt(header); + nBytesRead.add(MESSAGE_HEADER_SIZE + messageBodySize); + nMessagesRead.increment(); + if (messageBodySize > 0) { + if (messageBodySize > maxMessageSize) { + throw EnvironmentFailureException.unexpectedState + ("Message op: " + op + " Body size: " + + messageBodySize + " is too large. maxSizeAllowed: " + + maxMessageSize + + "\nIf a larger value is needed, set the " + + "'je.rep.maxMessageSize' parameter."); + } + + ByteBuffer body = allocateReadBuffer(messageBodySize); + fillBuffer(channel, body); + return cons.newInstance(this, body); + } + + if (messageBodySize < 0) { + throw EnvironmentFailureException.unexpectedState + ("Message op: " + op + " Body size: " + messageBodySize); + } + /* No body */ + return cons.newInstance(this, null); + } catch (InstantiationException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (IllegalAccessException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (SecurityException e) { + throw EnvironmentFailureException.unexpectedException(e); + } catch (InvocationTargetException e) { + throw EnvironmentFailureException.unexpectedException(e); + } finally { + /* The header buffer will be reused, so clear it. */ + header.clear(); + } + } + + @SuppressWarnings("unchecked") + public T read(ReadableByteChannel channel, Class cl) + throws IOException, ProtocolException { + + Message message = read(channel); + + /* + * Note: the subclassing check instead of an equality check makes it + * convenient to deal with responses when there can be multiple + * possible but related responses. + */ + if (cl.isAssignableFrom(message.getClass())) { + return (T)message; + } + throw new ProtocolException(message, cl); + } + + /** + * Write a message out to a channel. + * @throws IOException + */ + public void write(Message message, NamedChannel namedChannel) + throws IOException { + + write(message, namedChannel, namedChannel.getNameIdPair()); + } + + /** + * Write a message out to a channel. + * @throws IOException + */ + public void write(Message message, WritableByteChannel channel) + throws IOException { + + write(message, channel, NameIdPair.NULL); + } + + /** + * Write a message out to a channel. + * @throws IOException + */ + private void write(Message message, + WritableByteChannel channel, + NameIdPair destinationNameIdPair) + throws IOException { + + final ByteBuffer messageBuffer = message.wireFormat(); + + nMessagesWritten.increment(); + + flushBuffer(channel, messageBuffer); + + if (logger.isLoggable(Level.FINER)) { + if (destinationNameIdPair == NameIdPair.NULL) { + /* No nameIdPair was supplied, so use the channel. */ + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINER, + "Sent " + message + " to " + channel); + } else { + LoggerUtils.logMsg(logger, envImpl, formatter, Level.FINER, + "Sent to " + + destinationNameIdPair.getName() + + ": "+ message); + } + } + + /* Rewind the message buffer in case it's a reusable wire format */ + messageBuffer.rewind(); + } + + /** + * Buffers the serialized form of the message (if possible) for later + * writes to the network. + * + * If buffering the message would result in an overflow, the current + * contents of the buffer are flushed before the message is added to the + * buffer. If the size of the message exceeds the size of the buffer, the + * message is flushed directly to the network. + * + * @param channel the channel to which the buffer is flushed on buffer + * overflows. + * + * @param batchWriteBuffer the buffer accumulating the serialized messages + * It's best for performance if this is a direct byte buffer, to avoid + * another copy when the buffer is finally flushed. + * + * @param nMessages the number of messages currently in the buffer, + * including this message, that is, nMessages is always > 0 upon entry. + * + * @param message the message to be buffered. + * + * @return the number of messages currently in the buffer after accounting + * for potential buffer flushes. + */ + public int bufferWrite(WritableByteChannel channel, + ByteBuffer batchWriteBuffer, + int nMessages, + Message message) + throws IOException { + + assert nMessages > 0 ; + + final ByteBuffer messageBuffer = message.wireFormat(); + + if (batchWriteBuffer.remaining() < messageBuffer.limit()) { + + flushBufferedWrites(channel, batchWriteBuffer, nMessages - 1); + /* 1 for the message we add below. */ + nMessages = 1; + + if (batchWriteBuffer.remaining() < messageBuffer.limit()) { + /* + * Buffer is too small for message, so write it directly. + * This write must always be preceded by a buffer flush. + */ + assert batchWriteBuffer.position() == 0 ; + nMessagesWritten.increment(); + flushBuffer(channel, messageBuffer); + nMessages = 0; + return nMessages; + } + } + + batchWriteBuffer.put(messageBuffer); + return nMessages; + } + + /** + * Flush all the messages accumulated by earlier calls to bufferWrite + * + * @param channel the channel to which the buffer is flushed. + * + * @param batchWriteBuffer the buffer containing the accumulated messages + * + * @param nMessages the number of messages in the batch + */ + public void flushBufferedWrites(WritableByteChannel channel, + ByteBuffer batchWriteBuffer, + int nMessages) + throws IOException { + + nMessagesWritten.add(nMessages); + + if (nMessages > 1) { + nMessagesBatched.add(nMessages); + nMessageBatches.increment(); + } + + batchWriteBuffer.flip(); + flushBuffer(channel, batchWriteBuffer); + batchWriteBuffer.clear(); + + return; + } + + /** + * Writes the entire contents of the buffer to the blocking channel. + */ + private void flushBuffer(WritableByteChannel channel, + ByteBuffer bb) + throws IOException { + + assert bb.position() == 0; + + if (bb.limit() == 0) { + return; + } + + final long start = System.nanoTime(); + + /** + * Even though the underlying channel is a blocking channel, there are + * circumstances where the channel.write() returns without writing all + * of the buffer, in seeming contradiction to the javadoc. One such + * example is when the peer fails in the middle of a write; in this + * case there is a "successful" partial write and the next write will + * result in an I/O exception. There may be other such examples as + * well, hence the defensive write loop below which tries to continue + * writes to their logical conclusion or to an IOE. + */ + while (bb.remaining() > 0) { + int bytes = channel.write(bb); + nBytesWritten.add(bytes); + if (bytes == 0) { + + /* + * This should not happen since it's a blocking channel, but + * the java doc is vague on this subject, so yield control if + * we are not making progress. + */ + Thread.yield(); + } + } + + nWriteNanos.add(System.nanoTime() - start); + } + + /** + * Base message class for all messages exchanged in the protocol. + * Serialized layout of a message: - opType (short) - size of the body of + * the message (int) - body of the message - message specific fields + *

        + * All non-abstract subclasses must implement a constructor with a + * ByteBuffer argument. This constructor is used during serialization to + * recreate the Message instance. It's considered good practice to declare + * all attributes of a message as final. It's a simple way to ensure that + * the above constructor has initialized all the attributes of the message. + */ + public abstract class Message implements WireFormatable { + + public abstract MessageOp getOp(); + + /** + * The default message consists of the operation enum and just a 0 + * length size. + */ + @Override + public ByteBuffer wireFormat() { + ByteBuffer messageBuffer = allocateInitializedBuffer(0); + messageBuffer.flip(); + return messageBuffer; + } + + @Override + public String toString() { + return getOp().toString(); + } + + /* + * For unit test support, so we can compare a message created for + * sending against a message received. Some message types need to + * override. + */ + public boolean match(Message other) { + return Arrays.equals(wireFormat().array().clone(), + other.wireFormat().array().clone()); + } + + /** + * Allocate a buffer for the message with the header initialized. + * + * @param size size of the message contents following the buffer + * + * @return the initialized buffer + */ + public ByteBuffer allocateInitializedBuffer(int size) { + ByteBuffer messageBuffer = + allocateWriteBuffer(MESSAGE_HEADER_SIZE + size); + LogUtils.writeShort(messageBuffer, getOp().getOpId()); + LogUtils.writeInt(messageBuffer, size); + return messageBuffer; + } + } + + /** + * Base class for simple messages. Ones where performance is not of the + * utmost importance and reflection can be used to simplify message + * serialization and de-serialization. + */ + protected abstract class SimpleMessage extends Message { + + /** + * Assembles a sequence of arguments into its byte based wire format. + * The method does the serialization in two steps. In step 1, it + * calculates the length of the buffer. In step 2, it assembles the + * bytes into the allocated buffer. The interpretive approach used here + * is suitable for the low performance requirements of the handshake + * protocol, but not for the core data stream itself. It's for this + * reason that the method is associated with the Handshake message + * class and not the Message class. + * + * @param arguments the arguments to be passed in the message + * + * @return a byte buffer containing the serialized form of the message + */ + protected ByteBuffer wireFormat(Object... arguments) { + int size = 0; + for (final Object obj : arguments) { + size += wireFormatSize(obj); + } + + /* Allocate the buffer and fill it up */ + final ByteBuffer buffer = allocateInitializedBuffer(size); + + for (final Object obj : arguments) { + putWireFormat(buffer, obj); + } + buffer.flip(); + return buffer; + } + + /** + * Put the bytes of the wire format for the object into the current + * position in the buffer. + * + * @param buffer the buffer + * @param obj the object + * @throws EnvironmentFailureException if the object is not supported + */ + protected void putWireFormat(final ByteBuffer buffer, + final Object obj) { + final Class cl = obj.getClass(); + if (cl == Long.class) { + LogUtils.writeLong(buffer, ((Long)obj).longValue()); + } else if (cl == Integer.class) { + LogUtils.writeInt(buffer, ((Integer)obj).intValue()); + } else if (cl == Short.class) { + LogUtils.writeShort(buffer, ((Short)obj).shortValue()); + } else if (cl == Byte.class) { + buffer.put(((Byte)obj).byteValue()); + } else if (cl == Boolean.class) { + buffer.put(((Boolean)obj).booleanValue() ? + (byte)1 : + (byte)0); + } else if (cl == VLSN.class) { + LogUtils.writeLong(buffer, ((VLSN)obj).getSequence()); + } else if (Enum.class.isAssignableFrom(cl)) { + /* An enum is stored as it's identifier string. */ + Enum e = (Enum)obj; + putString(e.name(), buffer); + } else if (cl == String.class) { + + /* + * A string is stored with its length followed by its + * contents. + */ + putString((String)obj, buffer); + } else if (cl == Double.class) { + /* Treat a Double as a String. */ + putString(((Double) obj).toString(), buffer); + } else if (cl == String[].class) { + String sa[] = (String[])obj; + LogUtils.writeInt(buffer, sa.length); + for (String element : sa) { + putString(element, buffer); + } + } else if (cl == byte[].class) { + putByteArray(buffer, (byte[])obj); + } else if (obj instanceof NameIdPair) { + /* instanceof used to accomodate ReadOnlyNameIdPair. */ + ((NameIdPair) obj).serialize(buffer, BinaryProtocol.this); + } else { + throw EnvironmentFailureException.unexpectedState( + "Unknown type: " + cl); + } + } + + /** + * Put the bytes for the wire format of the specified byte array into + * the current position in the buffer. + * + * @param buffer the buffer + * @param ba the byte array + */ + protected void putByteArray(ByteBuffer buffer, byte[] ba) { + LogUtils.writeInt(buffer, ba.length); + buffer.put(ba); + } + + /** + * Return the wire format size of the specified object. + * + * @param obj the object + * @return the size + * @throws EnvironmentFailureException if the object is not supported + */ + protected int wireFormatSize(final Object obj) { + final Class cl = obj.getClass(); + if (cl == Long.class) { + return 8; + } else if (cl == Integer.class) { + return 4; + } else if (cl == Short.class) { + return 2; + } else if (cl == Byte.class) { + return 1; + } else if (cl == Boolean.class) { + return 1; + } else if (cl == VLSN.class) { + return 8; + } else if (Enum.class.isAssignableFrom(cl)) { + return stringSize(((Enum)obj).name()); + } else if (cl == String.class) { + return stringSize((String)obj); + } else if (cl == Double.class) { + return stringSize(((Double) obj).toString()); + } else if (cl == String[].class) { + int size = 4; /* array length */ + final String sa[] = (String[])obj; + for (String element : sa) { + size += stringSize(element); + } + return size; + } else if (cl == byte[].class) { + return 4 + ((byte[])obj).length; + } else if (obj instanceof NameIdPair) { + /* instanceof used to accomodate ReadOnlyNameIdPair. */ + return ((NameIdPair) obj).serializedSize(BinaryProtocol.this); + } else { + throw EnvironmentFailureException.unexpectedState + ("Unknown type: " + cl); + } + } + + /** + * Reconstitutes an array of strings. + * @param buffer + * @return + */ + protected String[] getStringArray(ByteBuffer buffer) { + String sa[] = new String[LogUtils.readInt(buffer)]; + for (int i=0; i < sa.length; i++) { + sa[i] = getString(buffer); + } + return sa; + } + + protected byte[] getByteArray(ByteBuffer buffer) { + byte ba[] = new byte[LogUtils.readInt(buffer)]; + buffer.get(ba); + return ba; + } + + protected boolean getBoolean(ByteBuffer buffer) { + byte b = buffer.get(); + if (b == 0) { + return false; + } else if (b == 1) { + return true; + } else { + throw EnvironmentFailureException.unexpectedState + ("Unknown boolean value: " + b); + } + } + + protected VLSN getVLSN(ByteBuffer buffer) { + long vlsn = LogUtils.readLong(buffer); + return (vlsn == VLSN.NULL_VLSN_SEQUENCE) ? + VLSN.NULL_VLSN : + new VLSN(vlsn); + } + + protected > T getEnum(Class enumType, + ByteBuffer buffer) { + String enumName = getString(buffer); + return Enum.valueOf(enumType, enumName); + } + + protected Double getDouble(ByteBuffer buffer) { + return new Double(getString(buffer)); + } + + protected NameIdPair getNameIdPair(ByteBuffer buffer) { + return NameIdPair.deserialize(buffer, BinaryProtocol.this); + } + } + + /** + * The base class for reject responses to requests + */ + public abstract class RejectMessage extends SimpleMessage { + /* Must not be null */ + protected String errorMessage; + + protected RejectMessage(String errorMessage) { + super(); + + /* + * Replace the null message with an empty string since the Simple + * Message assumes non-null contents. + */ + if (errorMessage == null) { + this.errorMessage = " "; + } else { + this.errorMessage = errorMessage; + } + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(errorMessage); + + } + + public RejectMessage(ByteBuffer buffer) { + errorMessage = getString(buffer); + } + + public String getErrorMessage() { + return errorMessage; + } + + @Override + public String toString() { + return errorMessage; + } + } + + public class ProtocolError extends RejectMessage { + + public ProtocolError(String errorMessage) { + super(errorMessage); + } + + public ProtocolError(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return PROTOCOL_ERROR; + } + } + + /** + * Version broadcasts the sending node's protocol version. + */ + abstract class ProtocolVersion extends SimpleMessage { + + private final int version; + + private final int nodeId; + + public ProtocolVersion(int version) { + super(); + this.version = version; + this.nodeId = BinaryProtocol.this.nameIdPair.getId(); + } + + @Override + public ByteBuffer wireFormat() { + return wireFormat(version, nodeId); + } + + public ProtocolVersion(ByteBuffer buffer) { + version = LogUtils.readInt(buffer); + nodeId = LogUtils.readInt(buffer); + } + + /** + * @return the version + */ + public int getVersion() { + return version; + } + + /** + * The nodeId of the sender + * + * @return nodeId + */ + public int getNodeId() { + return nodeId; + } + } + + public class ClientVersion extends ProtocolVersion { + + public ClientVersion() { + super(codeVersion); + } + + public ClientVersion(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return CLIENT_VERSION; + } + } + + public class ServerVersion extends ProtocolVersion { + + public ServerVersion() { + super(codeVersion); + } + + public ServerVersion(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return SERVER_VERSION; + } + } + + public class IncompatibleVersion extends RejectMessage { + public IncompatibleVersion(String message) { + super(message); + } + + public IncompatibleVersion(ByteBuffer buffer) { + super(buffer); + } + + @Override + public MessageOp getOp() { + return INCOMPATIBLE_VERSION; + } + } + + /** + * Thrown in response to an unexpected response to a request. + */ + @SuppressWarnings("serial") + static public class ProtocolException extends InternalException { + private final Message unexpectedMessage; + + private final Class cl; + + /** + * Constructor used for message sequencing errors. + */ + public ProtocolException(Message unexpectedMessage, + Class cl) { + super(); + this.unexpectedMessage = unexpectedMessage; + this.cl = cl; + } + + public ProtocolException(String message) { + super(message); + this.unexpectedMessage = null; + this.cl = null; + } + + @Override + public String getMessage() { + return (unexpectedMessage != null) ? + ("Expected message type: " + cl + " but found: " + + unexpectedMessage.getClass() + + + /* + * Include information about the message, which is + * particularly useful for a RejectMessage + */ + ": " + unexpectedMessage) : + + super.getMessage(); + } + + public Message getUnexpectedMessage() { + return unexpectedMessage; + } + } +} diff --git a/src/com/sleepycat/je/rep/utilint/BinaryProtocolStatDefinition.java b/src/com/sleepycat/je/rep/utilint/BinaryProtocolStatDefinition.java new file mode 100644 index 0000000..f688f3d --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/BinaryProtocolStatDefinition.java @@ -0,0 +1,186 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for each BinaryProtocol statistics. + */ +public class BinaryProtocolStatDefinition { + + public static final String GROUP_NAME = "BinaryProtocol"; + public static final String GROUP_DESC = + "Network traffic due to the replication stream."; + + public static final String N_READ_NANOS_NAME = + "nReadNanos"; + public static final String N_READ_NANOS_DESC = + "The number of nanoseconds spent reading from the network channel."; + public static final StatDefinition N_READ_NANOS = + new StatDefinition( + N_READ_NANOS_NAME, + N_READ_NANOS_DESC); + + public static final String N_WRITE_NANOS_NAME = + "nWriteNanos"; + public static final String N_WRITE_NANOS_DESC = + "The number of nanoseconds spent writing to the network channel."; + public static final StatDefinition N_WRITE_NANOS = + new StatDefinition( + N_WRITE_NANOS_NAME, + N_WRITE_NANOS_DESC); + + public static final String N_BYTES_READ_NAME = + "nBytesRead"; + public static final String N_BYTES_READ_DESC = + "The number of bytes of Replication Stream read over the network. It " + + "does not include the TCP/IP overhead."; + public static final StatDefinition N_BYTES_READ = + new StatDefinition( + N_BYTES_READ_NAME, + N_BYTES_READ_DESC); + + public static final String N_MESSAGES_READ_NAME = + "nMessagesRead"; + public static final String N_MESSAGES_READ_DESC = + "The number of Replication Stream messages read over the network."; + public static final StatDefinition N_MESSAGES_READ = + new StatDefinition( + N_MESSAGES_READ_NAME, + N_MESSAGES_READ_DESC); + + public static final String N_BYTES_WRITTEN_NAME = + "nBytesWritten"; + public static final String N_BYTES_WRITTEN_DESC = + "The number of Replication Stream bytes written over the network."; + public static final StatDefinition N_BYTES_WRITTEN = + new StatDefinition( + N_BYTES_WRITTEN_NAME, + N_BYTES_WRITTEN_DESC); + + public static final String N_MESSAGES_WRITTEN_NAME = + "nMessagesWritten"; + public static final String N_MESSAGES_WRITTEN_DESC = + "The total number of Replication Stream messages written over the " + + "network."; + public static final StatDefinition N_MESSAGES_WRITTEN = + new StatDefinition( + N_MESSAGES_WRITTEN_NAME, + N_MESSAGES_WRITTEN_DESC); + + public static final String N_MESSAGES_BATCHED_NAME = + "nMessagesBatched"; + public static final String N_MESSAGES_BATCHED_DESC = + "The number of Replication Stream messages that were batched into " + + "larger network level writes instead of being written " + + "individually (a subset of N_MESSAGES_WRITTEN.)"; + public static final StatDefinition N_MESSAGES_BATCHED = + new StatDefinition( + N_MESSAGES_BATCHED_NAME, + N_MESSAGES_BATCHED_DESC); + + public static final String N_MESSAGE_BATCHES_NAME = + "nMessageBatches"; + public static final String N_MESSAGE_BATCHES_DESC = + "The number of message batches written."; + public static final StatDefinition N_MESSAGE_BATCHES = + new StatDefinition( + N_MESSAGE_BATCHES_NAME, + N_MESSAGE_BATCHES_DESC); + + public static final String MESSAGE_READ_RATE_NAME = + "messagesReadPerSecond"; + public static final String MESSAGE_READ_RATE_DESC = + "Incoming message throughput."; + public static final StatDefinition MESSAGE_READ_RATE = + new StatDefinition( + MESSAGE_READ_RATE_NAME, + MESSAGE_READ_RATE_DESC); + + public static final String MESSAGE_WRITE_RATE_NAME = + "messagesWrittenPerSecond"; + public static final String MESSAGE_WRITE_RATE_DESC = + "Outgoing message throughput."; + public static final StatDefinition MESSAGE_WRITE_RATE = + new StatDefinition( + MESSAGE_WRITE_RATE_NAME, + MESSAGE_WRITE_RATE_DESC); + + public static final String BYTES_READ_RATE_NAME = + "bytesReadPerSecond"; + public static final String BYTES_READ_RATE_DESC = + "Bytes read throughput."; + public static final StatDefinition BYTES_READ_RATE = + new StatDefinition( + BYTES_READ_RATE_NAME, + BYTES_READ_RATE_DESC); + + public static final String BYTES_WRITE_RATE_NAME = + "bytesWrittenPerSecond"; + public static final String BYTES_WRITE_RATE_DESC = + "Bytes written throughput."; + public static final StatDefinition BYTES_WRITE_RATE = + new StatDefinition( + BYTES_WRITE_RATE_NAME, + BYTES_WRITE_RATE_DESC); + + public static final String N_ACK_MESSAGES_NAME = + "nAckMessages"; + public static final String N_ACK_MESSAGES_DESC = + "Count of all singleton ACK messages."; + public static final StatDefinition N_ACK_MESSAGES = + new StatDefinition( + N_ACK_MESSAGES_NAME, + N_ACK_MESSAGES_DESC); + + public static final String N_GROUP_ACK_MESSAGES_NAME = + "nGroupAckMessages"; + public static final String N_GROUP_ACK_MESSAGES_DESC = + "Count of all group ACK messages."; + public static final StatDefinition N_GROUP_ACK_MESSAGES = + new StatDefinition( + N_GROUP_ACK_MESSAGES_NAME, + N_GROUP_ACK_MESSAGES_DESC); + + public static final String N_MAX_GROUPED_ACKS_NAME = + "nMaxGroupedAcks"; + public static final String N_MAX_GROUPED_ACKS_DESC = + "Max number of acks sent via a single group ACK message."; + public static final StatDefinition N_MAX_GROUPED_ACKS = + new StatDefinition( + N_MAX_GROUPED_ACKS_NAME, + N_MAX_GROUPED_ACKS_DESC); + + public static final String N_GROUPED_ACKS_NAME = + "nGroupedAcks"; + public static final String N_GROUPED_ACKS_DESC = + "Sum of all acks sent via group ACK messages."; + public static final StatDefinition N_GROUPED_ACKS = + new StatDefinition( + N_GROUPED_ACKS_NAME, + N_GROUPED_ACKS_DESC); + + public static final String N_ENTRIES_WRITTEN_OLD_VERSION_NAME = + "nEntriesOldVersion"; + public static final String N_ENTRIES_WRITTEN_OLD_VERSION_DESC = + "The number of messages containing log entries that were written to " + + "the replication stream using the previous log format, to support" + + " replication to a replica running an earlier version during an " + + "upgrade."; + public static final StatDefinition N_ENTRIES_WRITTEN_OLD_VERSION = + new StatDefinition( + N_ENTRIES_WRITTEN_OLD_VERSION_NAME, + N_ENTRIES_WRITTEN_OLD_VERSION_DESC); +} diff --git a/src/com/sleepycat/je/rep/utilint/DbCacheSizeRepEnv.java b/src/com/sleepycat/je/rep/utilint/DbCacheSizeRepEnv.java new file mode 100644 index 0000000..aa5b918 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbCacheSizeRepEnv.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.File; +import java.util.Map; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; + +/** + * Class for opening a ReplicatedEnvironment from a JE standalone utility, + * DbCacheSize. Must be instantiated from standalone JE using Class.forName. + */ +public class DbCacheSizeRepEnv + implements com.sleepycat.je.utilint.DbCacheSizeRepEnv { + + private static final int START_PORT = 30100; + private static final int PORT_RANGE = 100; + + @Override + public Environment open(File envHome, + EnvironmentConfig envConfig, + Map repParams) { + final String host = "localhost"; + final FreePortLocator locator = new FreePortLocator + (host, START_PORT, START_PORT + PORT_RANGE); + final int port = locator.next(); + final String hostPort = host + ':' + port; + final ReplicationConfig repConfig = new ReplicationConfig + ("DbCacheSizeGroup", "DbCacheSizeNode", hostPort); + repConfig.setHelperHosts(hostPort); + for (Map.Entry entry : repParams.entrySet()) { + repConfig.setConfigParam(entry.getKey(), entry.getValue()); + } + return new ReplicatedEnvironment(envHome, repConfig, envConfig); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbDumpGroup.java b/src/com/sleepycat/je/rep/utilint/DbDumpGroup.java new file mode 100644 index 0000000..4812a32 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbDumpGroup.java @@ -0,0 +1,161 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.File; +import java.io.PrintStream; +import java.util.LinkedList; +import java.util.List; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepGroupImpl; + +/** + * @hidden Dumps the contents of the replication group database. Reads the + * database directly, using a read only Environment. + * + * For internal use only. JE users should now use + * com.sleepycat.je.rep.DbGroupAdmin to display group information. + * + *

        + *   DbDumpGroup -h <envHome>
        + * 
        + */ +public class DbDumpGroup { + private final PrintStream out; + private File envHome = null; + private boolean dumpCount = false; + + private DbDumpGroup(PrintStream out) { + this.out = out; + } + + public static void main(String[] args) throws Exception { + DbDumpGroup dumper = new DbDumpGroup(System.out); + dumper.parseArgs(args); + try { + dumper.run(); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + + public void run() { + out.println("For internal use only. Consider using the public " + + "utility com.sleepycat.je.rep.DbGroupAdmin when " + + "displaying group information."); + + out.println("Environment: " + envHome); + if (dumpCount) { + dumpCount(); + } + dumpGroup(); + } + + /** + * Dumps the data item count of each database in the specified environment. + */ + private void dumpCount() { + + /* + * Initialize an environment configuration, and create an environment. + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + envConfig.setAllowCreate(false); + Environment env = new Environment(envHome, envConfig); + + List databaseNames = new LinkedList(); + databaseNames.addAll(env.getDatabaseNames()); + for (String dbName : databaseNames) { + + DatabaseConfig dbCfg = new DatabaseConfig(); + dbCfg.setAllowCreate(false); + dbCfg.setReadOnly(true); + DbInternal.setUseExistingConfig(dbCfg, true); + + Database db = env.openDatabase(null, dbName, dbCfg); + out.println("Database: " + dbName + ", Count: " + db.count()); + db.close(); + } + + env.close(); + } + + /** + * Dumps the contents of the replication group database. + */ + private void dumpGroup() { + RepGroupImpl group = RepGroupDB.getGroup(envHome); + out.println(group); + } + + /** + * Parse the command line parameters. + * + * @param argv Input command line parameters. + */ + public void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs == 0) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-dumpCount")) { + dumpCount = true; + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + } + + /** + * Print the usage of this utility. + * + * @param message + */ + private void printUsage(String msg) { + if (msg != null) { + out.println(msg); + } + + out.println("Usage: " + DbDumpGroup.class.getName()); + out.println(" -h # environment home directory"); + out.println(" -dumpCount # dump all databases' count in\n" + + " this Environment"); + System.exit(-1); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbFeederPrintLog.java b/src/com/sleepycat/je/rep/utilint/DbFeederPrintLog.java new file mode 100644 index 0000000..bcc5ca2 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbFeederPrintLog.java @@ -0,0 +1,266 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.stream.FeederReader; +import com.sleepycat.je.rep.stream.FeederSyncupReader; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.util.DbPrintLog; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * @hidden + * Dumps the log using VLSNReaders. These are special FileReaders which use the + * vlsnIndex to optimize their traversal of the log. Most common use for this + * utility is as a debugging device for the VLSNReaders, because it mimics the + * way feeders and syncup search the log. + */ +public class DbFeederPrintLog { + + /** + * Dump a JE log into human readable form. + * @throws InterruptedException + */ + private void dump(File envHome, + String groupName, + String nodeName, + String host, + boolean forward, + long startLsn, + VLSN startVLSN, + @SuppressWarnings("unused") boolean verbose) + throws IOException, + EnvironmentNotFoundException, + EnvironmentLockedException, + DatabaseException, + InterruptedException, + ChecksumException { + + /* Create a single replicator */ + Durability durability = new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(false); + envConfig.setTransactional(true); + envConfig.setDurability(durability); + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setConfigParam + (ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, "1 min"); + repConfig.setGroupName(groupName); + repConfig.setNodeName(nodeName); + int port = Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()); + String hostName = host + ":" + port; + repConfig.setNodeHostPort(hostName); + repConfig.setHelperHosts(hostName); + final ReplicatedEnvironment rep = + RepInternal.createDetachedEnv(envHome, repConfig, envConfig); + try { + ReplicatedEnvironment.State state = rep.getState(); + if (state != ReplicatedEnvironment.State.DETACHED) { + throw EnvironmentFailureException.unexpectedState + ("joinState=" + state); + } + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(rep); + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + if (forward) { + startLsn = DbLsn.NULL_LSN; + } else { + startLsn = envImpl.getFileManager().getLastUsedLsn(); + } + + int readBufferSize = + envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + FeederReader feederReader = null; + FeederSyncupReader backwardsReader = null; + + VLSN scanVLSN = startVLSN; + if (startVLSN.equals(VLSN.NULL_VLSN)) { + RepImpl repImpl = RepInternal.getNonNullRepImpl(rep); + VLSNRange range = repImpl.getVLSNIndex().getRange(); + if (forward) { + scanVLSN = range.getFirst(); + } else { + scanVLSN = range.getLast(); + } + } + if (scanVLSN.equals(VLSN.NULL_VLSN)) { + /* Give up, no VLSN entries in the index. */ + System.out.println(""); + System.out.println(""); + return; + } + + if (forward) { + feederReader = new FeederReader(envImpl, vlsnIndex, startLsn, + readBufferSize); + feederReader.initScan(scanVLSN); + } else { + backwardsReader = new FeederSyncupReader + (envImpl, vlsnIndex, + startLsn, readBufferSize, + scanVLSN, + DbLsn.NULL_LSN); + } + + OutputWireRecord record = null; + System.out.println(""); + long lastLsn = 0; + do { + if (forward) { + record = feederReader.scanForwards(scanVLSN, 0); + scanVLSN = scanVLSN.getNext(); + lastLsn = feederReader.getLastLsn(); + } else { + record = backwardsReader.scanBackwards(scanVLSN); + scanVLSN = scanVLSN.getPrev(); + lastLsn = backwardsReader.getLastLsn(); + } + if (record != null) { + System.out.println + ("lsn=" + DbLsn.getNoFormatString(lastLsn) + " " + record); + } + } while (record != null); + + System.out.println(""); + } finally { + rep.close(); + } + } + + public static void main(String[] argv) { + try { + int whichArg = 0; + String groupName = "repGroup"; + String host = "localHost"; + String nodeName = "Node1"; + long startLsn = DbLsn.NULL_LSN; + VLSN startVLSN = VLSN.NULL_VLSN; + boolean verbose = true; + boolean forward = true; + + /* Default to looking in current directory. */ + File envHome = new File("."); + Key.DUMP_TYPE = DumpType.BINARY; + + while (whichArg < argv.length) { + String nextArg = argv[whichArg]; + if (nextArg.equals("-h")) { + whichArg++; + envHome = new File(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-s")) { + whichArg++; + String arg = CmdUtil.getArg(argv, whichArg); + int slashOff = arg.indexOf("/"); + if (slashOff < 0) { + long startFileNum = CmdUtil.readLongNumber(arg); + startLsn = DbLsn.makeLsn(startFileNum, 0); + } else { + long startFileNum = + CmdUtil.readLongNumber(arg.substring(0, slashOff)); + long startOffset = CmdUtil.readLongNumber + (arg.substring(slashOff + 1)); + startLsn = DbLsn.makeLsn(startFileNum, startOffset); + } + } else if (nextArg.equals("-e")) { + whichArg++; + String arg = CmdUtil.getArg(argv, whichArg); + int slashOff = arg.indexOf("/"); + /* SuppressWarnings because -e is not yet implemented.*/ + if (slashOff < 0) { + @SuppressWarnings("unused") + long endFileNum = CmdUtil.readLongNumber(arg); + } else { + @SuppressWarnings("unused") + long endFileNum = + CmdUtil.readLongNumber(arg.substring(0, slashOff)); + @SuppressWarnings("unused") + long endOffset = CmdUtil.readLongNumber + (arg.substring(slashOff + 1)); + } + } else if (nextArg.equals("-q")) { + verbose = false; + } else if (nextArg.equals("-backward")) { + forward = false; + } else if (nextArg.equals("-vlsn")) { + whichArg++; + String arg = CmdUtil.getArg(argv, whichArg); + startVLSN = new VLSN(CmdUtil.readLongNumber(arg)); + } else { + System.err.println + (nextArg + " is not a supported option."); + usage(); + System.exit(-1); + } + whichArg++; + } + + DbFeederPrintLog printer = new DbFeederPrintLog(); + printer.dump(envHome, groupName, nodeName, host, forward, startLsn, + startVLSN, verbose); + + } catch (Throwable e) { + e.printStackTrace(); + System.out.println(e.getMessage()); + usage(); + System.exit(1); + } + } + + private static void usage() { + System.out.println("Usage: " + + CmdUtil.getJavaCommand(DbPrintLog.class)); + System.out.println(" -h "); + System.out.println(" -e "); + System.out.println(" -s "); + System.out.println(" -backward if specified, scan is backwards"); + System.out.println(" -vlsn "); + System.out.println(" -tx "); + System.out.println(" -q if specified, concise version is printed"); + System.out.println(" Default is verbose version.)"); + System.out.println("All arguments are optional"); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbNullNode.java b/src/com/sleepycat/je/rep/utilint/DbNullNode.java new file mode 100644 index 0000000..ed4d153 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbNullNode.java @@ -0,0 +1,402 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.utilint; + +import java.io.File; +import java.util.Date; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * @hidden + * + * DbNullNode is an internal debugging aid that simply starts up a node as part + * of a group. It's primarily useful for diagnosing node start up bugs, since + * it enables you to start up the nodes in a group if you have the associated + * log files. + *

        + * If the environment does not exist, and the -createEnv is specified it will + * create the environment (with the aid of helpers), using network restore to + * initialize it. This behavior can be used to advantage at a Replica node if + * the log files have been damaged for example and the Replica cannot proceed. + * It effectively restores the environment for the Replica, which can then + * proceed with the restored files using the actual application. + *

        + * Similarly, if a node does not exist in the environment and -createNode is + * specified, it will create a new node in either the existing environment, or + * if -createEnv is specified in a new environment. + *

        + * Note that the hostnames mentioned in the member database must be resolvable + * before the group can start up. The hostnames can be obtained by running + * DbDumpGroup. The hostnames can be "faked" on Linux, by adding appropriate + * entries to /etc/hosts or cnames to the DNS. + *

        + * If the hosts all use the same port, you will need to make provisions for + * virtual ip addresses that are then associated with the host. On Linux this + * can be done by adding an entry of the following type for each virtual ip in + * /etc/network/interfaces: + *

        + * iface eth0:1 inet static + * address 192.168.1.201 + * netmask 255.255.255.0 + * gateway 192.168.2.1 + * hwaddress ether XX:YY:ZZ:AA:BB:CC + *

        + * Use eth0:2, eth0:3, etc for each new virtual ip that is needed. Also, + * substitute the real NIC address for XX:YY:ZZ:AA:BB:CC in the iface stanza. + * + * Be careful when modifying your machine's configuration information since it + * can have unintended side-effects. So back up the above config files before + * making any changes. + */ +public class DbNullNode { + private static final String USAGE = + + "usage: " + CmdUtil.getJavaCommand(DbNullNode.class) + "\n" + + " -h \n" + + " -nodeName \n" + + " [-groupName ]\n" + + " [-hostPort ]\n" + + " [-createEnv]\n" + + " [-createNode]\n" + + " [-helpers , ...]\n" + + " [-designatedPrimary] \n" + + " [-electableGroupSize] \n" + + " [-updates \n" + + " [-nodeType ELECTABLE|SECONDARY]\n" + + " [-cacheMode ENUM_NAME]\n"; + + private File envHome; + private String nodeName; + private String hostPort; + private String groupName; + public String helpers; + + /* Permit creation of a new environment. */ + private boolean createNode = false; + private boolean createEnv = false; + + private final int pollIntervalMs = 60*1000; + + private boolean designatedPrimary = false; + + private int electableGroupSize = 0; + + /** + * If non-zero write the number of updates at each poll period when the + * node is a master to simulate write traffic. + */ + private int updates = 0; + + private NodeType nodeType = NodeType.ELECTABLE; + private CacheMode cacheMode = CacheMode.DEFAULT; + + private static final DatabaseConfig dbConfig = new DatabaseConfig(); + private static final TransactionConfig txnConfig = TransactionConfig.DEFAULT; + + static { + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(false); + }; + + public static void main(String[] argv) { + + DbNullNode runAction = new DbNullNode(); + runAction.parseArgs(argv); + + try { + runAction.run(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + + private void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs < 4) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-nodeName")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + } else { + printUsage("-nodeName requires an argument"); + } + } else if (thisArg.equals("-hostPort")) { + if (argc < nArgs) { + hostPort = argv[argc++]; + } else { + printUsage("-hostPort requires an argument"); + } + } else if (thisArg.equals("-groupName")) { + if (argc < nArgs) { + groupName = argv[argc++]; + } else { + printUsage("-groupName requires an argument"); + } + } else if (thisArg.equals("-createNode")) { + createNode = true; + } else if (thisArg.equals("-createEnv")) { + createEnv = true; + } else if (thisArg.equals("-helpers")) { + if (argc < nArgs) { + helpers = argv[argc++]; + } else { + printUsage("-helpers requires an argument"); + } + } else if ("-designatedPrimary".equals(thisArg)) { + designatedPrimary = true ; + } else if ("-electableGroupSize".equals(thisArg)) { + if (argc < nArgs) { + electableGroupSize = Integer.parseInt(argv[argc++]); + } else { + printUsage("-electableGroupSize requires a group size " + + "argument"); + } + } else if ("-updates".equals(thisArg)) { + if (argc < nArgs) { + updates = Integer.parseInt(argv[argc++]); + } else { + printUsage("-updates requires number of updates/period" + + "argument"); + } + } else if ("-nodeType".equals(thisArg)) { + if (argc < nArgs) { + nodeType = NodeType.valueOf(argv[argc++]); + } else { + printUsage("-nodeType requires a type argument"); + } + } else if ("-cacheMode".equals(thisArg)) { + if (argc < nArgs) { + cacheMode = CacheMode.valueOf(argv[argc++]); + } else { + printUsage("-cacheMode requires a type argument"); + } + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + if (createNode) { + /* Verify that the create arguments are all specified. */ + if ((nodeName == null) || + (hostPort == null) || + (groupName == null) || + (helpers == null)) { + printUsage("groupName, nodeName, nodeHost and helpers " + + "must all be specified when using -createNode"); + } + } + if (createEnv) { + if ((groupName == null) || (helpers == null)) { + printUsage("groupName and helpers " + + "must all be specified when using -createEnv"); + } + } + } + + + private void run() { + while (true) { + try { + checkParameters(); + openAndIdle(); + break; + } catch (InsufficientLogException ile) { + System.err.println("Restoring environment:" + envHome); + NetworkRestore networkRestore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + networkRestore.execute(ile, config); + System.err.println("Restored environment:" + envHome); + createEnv = false; /* The environment has been created. */ + continue; + } + } + } + + /* + * Verifies that the parameters supplied on the command line are consistent + * with any stored state in the environment. + */ + private void checkParameters() { + try { + if (!envHome.exists()) { + printUsage("Directory:" + envHome + " does not exist."); + } + final RepGroupImpl group = RepGroupDB.getGroup(envHome); + if (createEnv) { + printUsage("Environment exists:" + envHome + + "but -createEnv was specified."); + } + final RepNodeImpl node = group.getNode(nodeName); + if (node == null) { + if (! createNode) { + printUsage("The node:" + nodeName + + " is not a member of the group:" + group + + ". Use -createNode to create a new one."); + } + } else { + /* Node exists, check arguments if any. */ + if (groupName == null) { + groupName = group.getName(); + } else if (!groupName.equals(group.getName())) { + printUsage("-groupname:" + groupName + + ", does not match the name:" + group.getName() + + " in the environment."); + } + + if (hostPort == null) { + hostPort = node.getHostPortPair(); + } else if (!hostPort.equals(node.getHostPortPair())) { + System.err.println("-hostPort:" + hostPort + + ", does not match the hostPort:" + + node.getHostPortPair() + + " in the environment. " + + "Continuing ..."); + } + } + } catch (EnvironmentNotFoundException enf) { + if (!createEnv) { + printUsage("No existing environment:" + envHome + + ". Use -createEnv to create one"); + } + } + } + + /** + * Opens a replicated environment and idles in a loop printing out its + * state periodically. + * + * @throws InsufficientLogException so that the log files can be restored + * if necessary + */ + private void openAndIdle() + throws InsufficientLogException { + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setNodeName(nodeName); + repConfig.setGroupName(groupName); + repConfig.setNodeHostPort(hostPort); + repConfig.setDesignatedPrimary(designatedPrimary); + repConfig.setElectableGroupSizeOverride(electableGroupSize); + repConfig.setNodeType(nodeType); + + if (helpers != null) { + repConfig.setHelperHosts(helpers); + } + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(createEnv); + envConfig.setCacheMode(cacheMode); + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + RepNode repNode = RepInternal.getNonNullRepImpl(repEnv).getRepNode(); + + System.err.println("Handle created:" + repEnv + + " Node idling indefinitely..."); + try { + while (true) { + System.out.println(new Date() + + " State:" + repEnv.getState() + " " + + " VLSN range:" + + repNode.getVLSNIndex().getRange() + + " DTVLSN:" + repNode.getDTVLSN() + + repNode.dumpState()); + writeUpdates(repEnv); + Thread.sleep(pollIntervalMs); + } + } catch (InterruptedException e) { + System.err.println("Exiting"); + } + } + + /** + * Write updates if master. + */ + private void writeUpdates(ReplicatedEnvironment repEnv) { + if ((updates == 0) || ! repEnv.getState().isMaster()) { + return; + } + + Database db = null; + Transaction txn = null; + try { + txn = repEnv.beginTransaction(null, TransactionConfig.DEFAULT); + db = repEnv.openDatabase(txn, "DbNullDb", dbConfig); + txn.commit(); + txn = null; + DatabaseEntry key = new DatabaseEntry(new byte[]{1}); + IntegerBinding.intToEntry(0, key); + txn = repEnv.beginTransaction(null, txnConfig); + for (int i = 0; i < updates; i++) { + db.put(txn, key, key); + } + txn.commit(); + System.err.println(new Date() + " Wrote " + updates + " updates"); + txn = null; + } catch (Exception e) { + System.err.println("Update failed:" + e.getMessage()); + } finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + + private void printUsage(String msg) { + if (msg != null) { + System.out.println(msg); + } + System.out.println(USAGE); + System.exit(-1); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbRepRunAction.java b/src/com/sleepycat/je/rep/utilint/DbRepRunAction.java new file mode 100644 index 0000000..746ef31 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbRepRunAction.java @@ -0,0 +1,141 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.File; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * @hidden + * RepRunAction is a debugging aid that invokes a ReplicatedEnvironment recovery + * from the command line. + */ +public class DbRepRunAction { + private static final String USAGE = + + "usage: " + CmdUtil.getJavaCommand(DbRepRunAction.class) + "\n" + + " -h

        # environment home directory\n" + + " -group # groupName\n" + + " -name # nodeName\n" + + " -host # nodeHost\n" + + " -showVLSN (dump vlsn index )\n" + + " -checkpoint (forced )\n"; + + private File envHome; + private String nodeName; + private String nodeHost; + private String groupName; + private boolean showVLSN; + private boolean doCheckpoint; + + public static void main(String[] argv) { + + DbRepRunAction runAction = new DbRepRunAction(); + runAction.parseArgs(argv); + + try { + runAction.run(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + + private void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs < 4) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-name")) { + if (argc < nArgs) { + nodeName = argv[argc++]; + } else { + printUsage("-name requires an argument"); + } + } else if (thisArg.equals("-host")) { + if (argc < nArgs) { + nodeHost = argv[argc++]; + } else { + printUsage("-host requires an argument"); + } + } else if (thisArg.equals("-group")) { + if (argc < nArgs) { + groupName = argv[argc++]; + } else { + printUsage("-group requires an argument"); + } + } else if (thisArg.equals("-showVLSN")) { + showVLSN = true; + } else if (thisArg.equals("-checkpoint")) { + doCheckpoint = true; + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + } + + private void run() { + ReplicatedEnvironment repEnv = recover(); + if (showVLSN) { + RepInternal.getNonNullRepImpl(repEnv).getVLSNIndex().dumpDb(true); + } + if (doCheckpoint) { + repEnv.checkpoint(new CheckpointConfig().setForce(true)); + } + repEnv.close(); + } + + private ReplicatedEnvironment recover() { + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setNodeName(nodeName); + repConfig.setGroupName(groupName); + repConfig.setNodeHostPort(nodeHost); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + + return RepInternal.createDetachedEnv(envHome, + repConfig, + envConfig); + + } + + private void printUsage(String msg) { + if (msg != null) { + System.out.println(msg); + } + System.out.println(USAGE); + System.exit(-1); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbStreamVerify.java b/src/com/sleepycat/je/rep/utilint/DbStreamVerify.java new file mode 100644 index 0000000..bc2b947 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbStreamVerify.java @@ -0,0 +1,430 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.File; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileReader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepGroupDB.GroupBinding; +import com.sleepycat.je.rep.impl.RepGroupDB.NodeBinding; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.vlsn.VLSNBucket; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * For internal use only. + * Utility to verify the replication stream and VLSN index. Used file readers + * to traverse the log and check that VLSNs are sequential and that the VLSN + * index is consistent. + */ +public class DbStreamVerify { + + private static final String USAGE = + "usage: " + CmdUtil.getJavaCommand(DbStreamVerify.class) + "\n" + + " -h # environment home directory\n" + + " -s # start file\n" + + " -e # end file\n" + + " -verifyStream # check that replication stream is ascending\n" + + " -dumpVLSN # scan log file for log entries that make up the" + + " VLSN index, don't run verify.\n" + + " -dumpRepGroup # scan log file for log entries that make up the" + + " rep group db, don't run verify.\n" + + " -i # show invisible. If true, print invisible entries"+ + " when running verify mode.\n" + + " -v # verbose\n"; + + public static void main(String argv[]) + throws Exception { + + DbStreamVerify verify = new DbStreamVerify(System.out); + verify.parseArgs(argv); + + try { + verify.run(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + + private File envHome = null; + private boolean showInvisible = false; + private boolean verbose = false; + private boolean vlsnDump = false; + private boolean repGroupDump = false; + private boolean verifyStream = false; + private long startLsn = DbLsn.NULL_LSN; + private long endLsn = DbLsn.NULL_LSN; + + private final PrintStream out; + + private DbStreamVerify(PrintStream out) { + this.out = out; + } + + private void printUsage(String msg) { + if (msg != null) { + out.println(msg); + } + out.println(USAGE); + System.exit(-1); + } + + private void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs == 0) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-i")) { + showInvisible = true; + } else if (thisArg.equals("-v")) { + verbose = true; + } else if (thisArg.equals("-verifyStream")) { + verifyStream = true; + } else if (thisArg.equals("-dumpVLSN")) { + vlsnDump = true; + } else if (thisArg.equals("-dumpRepGroup")) { + repGroupDump = true; + } else if (thisArg.equals("-s")) { + startLsn = CmdUtil.readLsn(CmdUtil.getArg(argv, argc++)); + } else if (thisArg.equals("-e")) { + endLsn = CmdUtil.readLsn(CmdUtil.getArg(argv, argc++)); + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else { + printUsage(thisArg + " is not a valid argument"); + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + + if (!(vlsnDump || repGroupDump || verifyStream)) { + printUsage("Must specify -dumpVLSN, -dumpRepGroup or " + + "-verifyStream"); + } + } + + public void run() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + Environment env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + try { + FileReader reader; + if (vlsnDump) { + out.println("Dump VLSNIndex LNs"); + reader = new VLSNIndexReader(env, out, startLsn, endLsn); + } else if (repGroupDump) { + out.println("Dump RepGroup LNs"); + reader = new RepGroupReader(env, out, startLsn, endLsn); + } else if (verifyStream) { + out.println("Replication stream: check that vlsns ascend"); + reader = new VerifyReader(envImpl, out, startLsn, endLsn); + } else { + out.println("No action specified."); + return; + } + + while (reader.readNextEntry()) { + } + + if ((!vlsnDump) && (!repGroupDump)) { + ((VerifyReader) reader).displayLast(); + VLSNIndex.verifyDb(env, out, verbose); + } + + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(out); + System.exit(1); + } finally { + try { + env.close(); + } catch (Throwable e) { + e.printStackTrace(out); + System.exit(1); + } + } + } + + private class VerifyReader extends FileReader { + private VLSN lastVLSN = VLSN.NULL_VLSN; + private long lastLSN = DbLsn.NULL_LSN; + private final PrintStream out1; + + VerifyReader(EnvironmentImpl envImpl, PrintStream out, long startLsn, + long endLsn) { + super(envImpl, + 10000, + true, // forward + startLsn, + null, // singleFileNumber, + DbLsn.NULL_LSN, + endLsn); + this.out1 = out; + } + + @Override + protected boolean isTargetEntry() { + return (currentEntryHeader.getReplicated()); + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + VLSN currentVLSN = currentEntryHeader.getVLSN(); + long currentLSN = getLastLsn(); + + if (currentVLSN == null) { + throw new RuntimeException + (DbLsn.getNoFormatString(currentLSN) + + "Should be a replicated entry"); + } + + if (currentVLSN.isNull()) { + out1.println("unexpected LSN " + + DbLsn.getNoFormatString(getLastLsn()) + + " has vlsn " + currentVLSN); + } + + if ((lastVLSN != null) && lastVLSN.isNull()) { + + /* First entry seen */ + out1.println("first VLSN = " + currentVLSN + " at lsn " + + DbLsn.getNoFormatString(getLastLsn())); + } else if (!currentEntryHeader.isInvisible() && + !currentVLSN.follows(lastVLSN)) { + + /* Note the first entry, check for a gap. */ + out1.println("gap of " + + (currentVLSN.getSequence() - + lastVLSN.getSequence()) + + " Last=" + lastVLSN + " at lsn " + + DbLsn.getNoFormatString(lastLSN) + + " next=" + currentVLSN + " at lsn " + + DbLsn.getNoFormatString(currentLSN)); + + } + + /* Note the invisible log entries. */ + if (showInvisible && currentEntryHeader.isInvisible()) { + out1.println("VLSN " + currentVLSN + " at lsn " + + DbLsn.getNoFormatString(currentLSN) + + " is invisible."); + } + + if (!currentEntryHeader.isInvisible()) { + lastVLSN = currentVLSN; + lastLSN = currentLSN; + } + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } + + void displayLast() { + out1.println("LastVLSN = " + lastVLSN + " at " + + DbLsn.getNoFormatString(lastLSN)); + } + } + + /** + * A Decoder reader can dump the LNs from one of the JE internal databases + * and display the LNs in a deserialized format, for easy debugging. The + * target internal databases are the VLSNIndex and the RepGroup. We could + * add FileSummaryLNs later. + */ + private abstract class DecoderReader extends FileReader { + protected final PrintStream outStream; + private final Map> targetMap; + protected LNLogEntry targetEntry; + private final DatabaseId targetDbId; + + DecoderReader(Environment env, + PrintStream out, + long startLsn, + long endLsn, + String dbName) { + super(DbInternal.getNonNullEnvImpl(env), + 10000, + true, // forward + startLsn, + null, // singleFileNumber, + DbLsn.NULL_LSN, + endLsn); + this.outStream = out; + + targetMap = new HashMap>(); + + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isUserLNType()) { + targetMap.put(entryType.getTypeNum(), + (LNLogEntry) entryType.getNewLogEntry()); + } + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + Database db = env.openDatabase(null, dbName, dbConfig); + targetDbId = DbInternal.getDbImpl(db).getId(); + db.close(); + } + + @Override + protected boolean isTargetEntry() { + /* Is it a target entry? */ + targetEntry = targetMap.get(currentEntryHeader.getType()); + return targetEntry != null; + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + targetEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + targetEntry.postFetchInit(false /*isDupDb*/); + + if (!targetEntry.getDbId().equals(targetDbId)) { + return false; + } + + outStream.print("LSN=" + DbLsn.getNoFormatString(getLastLsn())); + + display(); + return true; + } + + protected abstract void display(); + } + + /* + * Read the LNs that compose the VLSNIndex. Displays the contents of each + * LN in VLSNBucket form. + */ + private class VLSNIndexReader extends DecoderReader { + + VLSNIndexReader(Environment env, + PrintStream out, + long startLsn, + long endLsn) { + super(env, out, startLsn, endLsn, + DbType.VLSN_MAP.getInternalName()); + } + + @Override + protected void display() { + DatabaseEntry key = new DatabaseEntry(targetEntry.getKey()); + long keyVal = LongBinding.entryToLong(key); + LN ln = targetEntry.getLN(); + if (ln.isDeleted()) { + outStream.println("key=" + keyVal + " "); + } else { + DatabaseEntry data = new DatabaseEntry(ln.getData()); + + if (keyVal == VLSNRange.RANGE_KEY) { + outStream.print(" range: "); + VLSNRange range = VLSNRange.readFromDatabase(data); + outStream.println(range); + } else { + outStream.print(" key=" + keyVal); + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + outStream.println(" " + bucket); + if (verbose) { + outStream.println("-------------------------------"); + bucket.dump(outStream); + outStream.println("-------------------------------\n"); + } + } + } + } + } + + /** + * Display the LNs that compose the RepGroupDb. + */ + private class RepGroupReader extends DecoderReader { + private NodeBinding nodeBinding; + + RepGroupReader(Environment env, PrintStream out, + long startLsn, long endLsn) { + super(env, out, startLsn, endLsn, + DbType.REP_GROUP.getInternalName()); + } + + @Override + protected void display() { + out.print(" VLSN=" + currentEntryHeader.getVLSN()); + + DatabaseEntry key = new DatabaseEntry(targetEntry.getKey()); + + LN ln = targetEntry.getLN(); + if (ln.isDeleted()) { + outStream.print(""); + } else { + DatabaseEntry data = new DatabaseEntry(ln.getData()); + String keyVal = StringBinding.entryToString(key); + if (keyVal.equals(RepGroupDB.GROUP_KEY)) { + final RepGroupImpl group = + new GroupBinding().entryToObject(data); + nodeBinding = new NodeBinding(group.getFormatVersion()); + outStream.print(" GroupInfo: "); + outStream.println(group); + } else { + outStream.print(" NodeInfo: " + keyVal); + if (nodeBinding == null) { + throw new IllegalStateException( + "Node entry before group entry"); + } + outStream.print(nodeBinding.entryToObject(data)); + } + } + } + } +} diff --git a/src/com/sleepycat/je/rep/utilint/DbSync.java b/src/com/sleepycat/je/rep/utilint/DbSync.java new file mode 100644 index 0000000..302a306 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/DbSync.java @@ -0,0 +1,340 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.impl.RepParams.NODE_HOST_PORT; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * DbSync is a utility for ensuring that a group of replication nodes have + * fully caught up on the replication stream. The target use case is + * testing. If a replication group has crashed abruptly, nodes may have closed + * without finishing the full replay of the replication stream and the + * environments might not have the same contents. This makes it impossible to + * compare the contents of the environments for correctness. + *

        + * DbSync assumes that all nodes are down. The utility is invoked for each node + * in the group. The node will come up and rejoin the group, causing the whole + * group to reach the same point in the replication stream. If the node becomes + * the master, it will issue a shutdown request. Otherwise, a node is a + * replica, and will wait for the shutdown message to come, and will then + * close. + */ +public class DbSync { + public static final String DBSYNC_ENV = "-env"; + public static final String DBSYNC_GROUP_NAME = "-groupName"; + public static final String DBSYNC_NODE_NAME = "-nodeName"; + public static final String DBSYNC_NODE_HOST = "-nodeHost"; + public static final String DBSYNC_HELPER_HOST = "-helperHost"; + public static final String DBSYNC_TIMEOUT = "-timeout"; + public static final String DBSYNC_NET_PROPS = "-netProps"; + private static final String FORMAT = "%1$-15s"; + + private String envHome; + private ReplicationConfig repConfig; + private EnvironmentConfig envConfig; + private String helperHost; + + /* The group shutdown timeout value, in milliseconds. */ + private long timeout; + + private static final String usageString = + "usage: " + CmdUtil.getJavaCommand(DbSync.class) + "\n" + + String.format(FORMAT, DBSYNC_ENV) + + "# environment home directory for the node\n" + + String.format(FORMAT, DBSYNC_GROUP_NAME) + + "# name of the replication group\n" + + String.format(FORMAT, DBSYNC_NODE_NAME) + + "# name of the node in the group\n" + + String.format(FORMAT, DBSYNC_NODE_HOST) + + "# host name or IP address and port number for the node\n" + + String.format(FORMAT, DBSYNC_HELPER_HOST) + + "# helperHost for the node\n" + + String.format(FORMAT, DBSYNC_TIMEOUT) + + "# time for the node to catch up with master, in milliseconds\n"; + + public static void main(String[] args) + throws Exception { + + DbSync syncup = new DbSync(); + syncup.parseArgs(args); + syncup.sync(); + } + + private void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + private void parseArgs(String[] args) + throws Exception { + + int argc = 0; + int nArgs = args.length; + + String nodeName = null; + String nodeHost = null; + String groupName = null; + String netPropsName = null; + + while (argc < nArgs) { + String thisArg = args[argc++].trim(); + if (thisArg.equals(DBSYNC_ENV)) { + if (argc < nArgs) { + envHome = args[argc++]; + } else { + printUsage(DBSYNC_ENV + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_GROUP_NAME)) { + if (argc < nArgs) { + groupName = args[argc++]; + } else { + printUsage(DBSYNC_GROUP_NAME + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_NODE_NAME)) { + if (argc < nArgs) { + nodeName = args[argc++]; + } else { + printUsage(DBSYNC_NODE_NAME + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_NODE_HOST)) { + if (argc < nArgs) { + nodeHost = args[argc++]; + } else { + printUsage(DBSYNC_NODE_HOST + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_HELPER_HOST)) { + if (argc < nArgs) { + helperHost = args[argc++]; + } else { + printUsage(DBSYNC_HELPER_HOST + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_TIMEOUT)) { + if (argc < nArgs) { + timeout = Long.parseLong(args[argc++]); + } else { + printUsage(DBSYNC_TIMEOUT + " requires an argument"); + } + } else if (thisArg.equals(DBSYNC_NET_PROPS)) { + if (argc < nArgs) { + netPropsName = args[argc++]; + } else { + printUsage(DBSYNC_NET_PROPS + " requires an argument"); + } + } + } + + if (envHome == null) { + printUsage(DBSYNC_ENV + " is a required argument."); + } + + if (groupName == null) { + printUsage(DBSYNC_GROUP_NAME + " is a required argument."); + } + + if (nodeName == null) { + printUsage(DBSYNC_NODE_NAME + " is a required argument."); + } + + if (nodeHost == null) { + printUsage(DBSYNC_NODE_HOST + " is a required argument."); + } + + if (helperHost == null) { + printUsage(DBSYNC_HELPER_HOST + " is a required argument."); + } + + if (timeout <= 0) { + printUsage(DBSYNC_TIMEOUT + " should be a positive long number."); + } + + try { + NODE_HOST_PORT.validateValue(nodeHost); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + printUsage("Host and Port pair for this node is illegal."); + } + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + if (netPropsName != null) { + try { + repNetConfig = + ReplicationNetworkConfig.create(new File(netPropsName)); + } catch (FileNotFoundException fnfe) { + printUsage("The netProps file " + netPropsName + + " does not exist."); + } catch (IllegalArgumentException iae) { + printUsage("The net properties file " + netPropsName + + " is not valid: " + iae.getMessage()); + } + } + + envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + repConfig = new ReplicationConfig(); + repConfig.setNodeName(nodeName); + repConfig.setGroupName(groupName); + repConfig.setNodeHostPort(nodeHost); + repConfig.setHelperHosts(helperHost); + + repConfig.setRepNetConfig(repNetConfig); + + } + + private DbSync() { + } + + /** + * Create a DbSync object for the purposed of syncing up a specific + * replication group. + * + * @param envHome The Environment home directories of this replica. + * @param helperHost The helper host for this replica. + * @param timeout The permitted time period, in milliseconds, for the + * replica to catch up with master. + */ + public DbSync(String envHome, + EnvironmentConfig envConfig, + ReplicationConfig repConfig, + String helperHost, + long timeout) { + + this.envHome = envHome; + this.envConfig = envConfig; + this.repConfig = repConfig; + this.helperHost = helperHost; + this.timeout = timeout; + } + + /** + * Open this replication node. Block until the node has opened, synced up, + * and closed. + */ + public void sync() + throws Exception { + + /* + * Set the ReplicaAckPolicy to ALL, so that all the replicas can get + * into a same sync point. + */ + Durability durability = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL); + + envConfig.setDurability(durability); + repConfig.setHelperHosts(helperHost); + + /* Exit if can't create a replicated Environment successfully. */ + StatusListener listener = new StatusListener(); + ReplicatedEnvironment repEnv = null; + try { + repEnv = new ReplicatedEnvironment(new File(envHome), + repConfig, + envConfig, + null, + QuorumPolicy.ALL); + repEnv.setStateChangeListener(listener); + } catch (Exception e) { + System.err.println("Can't successfully initialize " + + repConfig.getNodeName() + " because of " + e); + System.exit(-1); + } + + /* Wait until the node becomes active. */ + listener.awaitActiveState(); + + if (repEnv.getState().isMaster()) { + + /* + * If master, start a transaction as a way of ascertaining whether + * all nodes are up. Since the ReplicaAckPolicy is ALL, the + * transaction will only begin when all the nodes are available. + */ + Transaction txn = repEnv.beginTransaction(null, null); + txn.abort(); + + /* Invoke the group shutdown API. */ + repEnv.shutdownGroup(timeout, TimeUnit.SECONDS); + } else if (repEnv.getState().isReplica()) { + for (long i = 0; i < timeout; i++) { + try { + + /* + * The replica will throw a GroupShutdownException if it + * has received and processed the group close command from + * the master. + */ + repEnv.getState(); + Thread.sleep(1000); + } catch (GroupShutdownException e) { + break; + } + } + } + + /* Shutdown the rep node. */ + repEnv.close(); + } + + /** + * Wait for this node to become either a master or a replica. + */ + private class StatusListener implements StateChangeListener { + CountDownLatch activeLatch = new CountDownLatch(1); + + @Override + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + + switch (stateChangeEvent.getState()) { + case MASTER: + case REPLICA: + activeLatch.countDown(); + break; + default: + System.err.println + (repConfig.getNodeName() + + " is disconnected from group."); + break; + } + } + + public void awaitActiveState() + throws InterruptedException { + + activeLatch.await(); + } + } +} diff --git a/src/com/sleepycat/je/rep/utilint/FreePortLocator.java b/src/com/sleepycat/je/rep/utilint/FreePortLocator.java new file mode 100644 index 0000000..7ce8d8b --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/FreePortLocator.java @@ -0,0 +1,218 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.IOException; +import java.net.DatagramSocket; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; + +/** + * An iterator to iterate over the free ports on an interface. + */ +public class FreePortLocator { + + /** + * Whether to print debugging messages -- use this to find tests that are + * not closing ports. + */ + private static final boolean debug = + Boolean.getBoolean("test.debugFreePortLocator"); + + private final String hostname; + private final int portStart; + private final int portEnd; + + private int currPort; + + /** + * Constructor identifying the interface and the port range within which + * to look for free ports. The port range specified by the arguments + * must be < 32768, that is, it should be outside the dynamic port range + * that is typically configured on most machines. + * + * @see Anonymous ports + * for details regarding port configuration for tests. + */ + public FreePortLocator(String hostname, int portStart, int portEnd) { + super(); + assert portStart < portEnd; + + if ((portStart > 0x7fff) || (portEnd > 0x7fff)) { + throw new IllegalArgumentException + ("Invalid port range:" + portStart + " - " + portEnd + ". " + + "The port range must not extend past:" + 0x7fff + + " since the allocated ports could then overlap with " + + "dynamically assigned ports used by other services. "); + } + + this.hostname = hostname; + this.portStart = portStart; + this.portEnd = portEnd; + currPort = portStart; + } + + public int getPortStart() { + return portStart; + } + + public int getPortEnd() { + return portEnd; + } + + /** + * Returns the next free port. Note that it's possible that on a busy + * machine another process may grab the "free" port before it's actually + * used. + * + * There is somewhat AIsh aspect to the code below. In general it tries to + * be very conservative, using different techniques so that it works + * reasonably well on Linux, Mac OS and Windows. + * + * Note: The use of setReuseAddress after a bind operation may look + * dubious, since it runs counter to the API doc, but it helps based on + * actual tests. It's also the idiom used by Apache Camel to find a + * free port. It, at least, can't hurt. + */ + public int next() { + while (++currPort < portEnd) { + + /* Try connecting to the port to see if somebody is listening. */ + Socket s = null; + try { + s = new Socket(hostname, currPort); + /* Somebody is listening on the port. */ + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + " busy - socket"); + Thread.dumpStack(); + } + continue; + } catch (IOException e) { + /* Nobody is listening, continue with other tests. */ + } finally { + if (s != null){ + try { + s.close(); + } catch (IOException e) { + /* Unexpected, something's wrong, ignore the port. */ + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - socket close: " + e); + e.printStackTrace(); + } + continue; + } + } + } + + /* Try without a hostname */ + ServerSocket ss = null; + DatagramSocket ds = null; + try { + ss = new ServerSocket(currPort); + ss.setReuseAddress(true); + ds = new DatagramSocket(currPort); + ds.setReuseAddress(true); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server, datagram: " + e); + e.printStackTrace(); + } + continue; + } finally { + if (ds != null) { + ds.close(); + } + + if (ss != null) { + try { + ss.close(); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server close: " + e); + e.printStackTrace(); + } + continue; + } + } + } + + ss = null; + ds = null; + + /* try with a hostname */ + final InetSocketAddress sa = + new InetSocketAddress(hostname, currPort); + try { + ss = new ServerSocket(); + ss.setReuseAddress(true); + ss.bind(sa); + + ds = new DatagramSocket(sa); + ds.setReuseAddress(true); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server, datagram hostname: " + e); + e.printStackTrace(); + } + continue; + } finally { + if (ds != null) { + ds.close(); + } + + if (ss != null) { + try { + ss.close(); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server hostname close: " + e); + e.printStackTrace(); + } + continue; + } + } + } + + /* Survived port test gauntlet, return it. */ + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + " free"); + } + return currPort; + } + + throw new IllegalStateException + ("No more ports available in the range: " + + portStart + " - " + portEnd); + } + + /** + * Skip a number of ports. + */ + public void skip(int num) { + currPort += num; + } +} diff --git a/src/com/sleepycat/je/rep/utilint/HostPortPair.java b/src/com/sleepycat/je/rep/utilint/HostPortPair.java new file mode 100644 index 0000000..91db50e --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/HostPortPair.java @@ -0,0 +1,102 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; +import com.sleepycat.je.rep.impl.RepParams; + +/** + * Encapsulates the functionality around dealing with HostPort string pairs + * having the format: + * + * host[:port] + */ + +public class HostPortPair { + + static public final String SEPARATOR = ":"; + + /** + * Parses a hostPort pair into the socket it represents. + * @param hostPortPair + * @return socket address for this host pair + * + * @throws IllegalArgumentException via ReplicatedEnvironment and Monitor + * ctors. + */ + public static InetSocketAddress getSocket(String hostPortPair) { + if ("".equals(hostPortPair)) { + throw new IllegalArgumentException + ("Host and port pair was missing"); + } + int portStartIndex = hostPortPair.indexOf(SEPARATOR); + String hostName = hostPortPair; + int port = -1; + if (portStartIndex < 0) { + port = Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()); + } else { + hostName = hostPortPair.substring(0, portStartIndex); + port = + Integer.parseInt(hostPortPair.substring(portStartIndex+1)); + } + return new InetSocketAddress(hostName, port); + } + + /** + * Parses hostPort pairs into sockets it represents. + * + * @param hostPortPairs + * + * @return a set of socket addresses for these host pairs + */ + public static Set getSockets(String hostPortPairs) { + Set helpers = new HashSet(); + if (hostPortPairs != null) { + for (String hostPortPair : hostPortPairs.split(",")) { + final String hpp = hostPortPair.trim(); + if (hpp.length() > 0) { + helpers.add(getSocket(hpp)); + } + } + } + + return helpers; + } + + public static String getString(String host, int port) { + return host + SEPARATOR + port; + } + + /** + * Parses and returns the hostname string of a hostport pair + */ + public static String getHostname(String hostPortPair) { + int portStartIndex = hostPortPair.indexOf(SEPARATOR); + return (portStartIndex < 0) ? + hostPortPair : + hostPortPair.substring(0, portStartIndex); + } + + /** + * Parses and returns the port of a hostport pair + */ + public static int getPort(String hostPortPair) { + int portStartIndex = hostPortPair.indexOf(SEPARATOR); + return Integer.parseInt((portStartIndex < 0) ? + RepParams.DEFAULT_PORT.getDefault() : + hostPortPair.substring(portStartIndex+1)); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/IntRunningTotalStat.java b/src/com/sleepycat/je/rep/utilint/IntRunningTotalStat.java new file mode 100644 index 0000000..f11b1b6 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/IntRunningTotalStat.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Used to create running totals across the lifetime of the StatGroup. They + * cannot be cleared. + */ +public class IntRunningTotalStat extends IntStat { + + private static final long serialVersionUID = 1L; + + public IntRunningTotalStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + @Override + public void clear() { + /* Don't clear it because it's a running total. */ + } +} diff --git a/src/com/sleepycat/je/rep/utilint/LongMinZeroStat.java b/src/com/sleepycat/je/rep/utilint/LongMinZeroStat.java new file mode 100644 index 0000000..5694810 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/LongMinZeroStat.java @@ -0,0 +1,38 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.utilint; + +import com.sleepycat.je.utilint.LongMinStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; + +/** + * For stats where the min value in the range is zero, so that sums, averages, + * etc. based on positive ranges just work. + */ +public class LongMinZeroStat extends LongMinStat { + + private static final long serialVersionUID = 1L; + + public LongMinZeroStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + @Override + public Long get() { + Long value = super.get(); + return (value == Long.MAX_VALUE) ? 0 : value; + } +} diff --git a/src/com/sleepycat/je/rep/utilint/NamedChannel.java b/src/com/sleepycat/je/rep/utilint/NamedChannel.java new file mode 100644 index 0000000..07ade11 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/NamedChannel.java @@ -0,0 +1,90 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; + +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.impl.node.NameIdPair; + +/** + * Packages a DataChannel and a NameIdPair together so that logging + * messages can show the node name instead of the channel toString(); + */ +public class NamedChannel implements ByteChannel { + + private NameIdPair nameIdPair; + protected final DataChannel channel; + + public NamedChannel(DataChannel channel, NameIdPair nameIdPair) { + this.channel = channel; + this.nameIdPair = nameIdPair; + } + + /* + * NameIdPair unknown at this time. + */ + public NamedChannel(DataChannel channel) { + this.channel = channel; + this.nameIdPair = NameIdPair.NULL; + } + + public void setNameIdPair(NameIdPair nameIdPair) { + this.nameIdPair = nameIdPair; + } + + public NameIdPair getNameIdPair() { + return nameIdPair; + } + + public DataChannel getChannel() { + return channel; + } + + @Override + public String toString() { + if (getNameIdPair() == null) { + return getChannel().toString(); + } + + return "(" + getNameIdPair() + ")" + getChannel(); + } + + /* + * The following ByteChannel implementation methods delegate to the wrapped + * channel object. + */ + @Override + public int read(ByteBuffer dst) throws IOException { + return channel.read(dst); + } + + @Override + public void close() throws IOException { + channel.close(); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return channel.write(src); + } +} + diff --git a/src/com/sleepycat/je/rep/utilint/NamedChannelWithTimeout.java b/src/com/sleepycat/je/rep/utilint/NamedChannelWithTimeout.java new file mode 100644 index 0000000..965cabf --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/NamedChannelWithTimeout.java @@ -0,0 +1,176 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.utilint; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.ChannelTimeoutTask; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * NamedChannelWithTimeout permits association of timeouts with a DataChannel. + * This mechanism is necessary, since the standard mechanism for associating + * timeouts with sockets using Socket.setSoTimeout is not supported by nio + * SocketChannels. + */ +public class NamedChannelWithTimeout + extends NamedChannel { + + /* + * Denotes read activity associated with the channel. It's set each time a + * read is successfully executed on the channel. The presence of heartbeats + * is typically used to guarantee some minimum level of activity over the + * channel. + */ + private volatile boolean readActivity; + + /* + * The timeout associated with the channel. A value of zero indicates no + * timeout. + */ + private volatile int timeoutMs; + + /* Values to help with logging. */ + private final EnvironmentImpl envImpl; + private final Logger logger; + + /* The "time" of the last check for read activity on the channel. */ + private long lastCheckMs = 0l; + + public NamedChannelWithTimeout(RepNode repNode, + DataChannel channel, + int timeoutMs) { + this(repNode.getRepImpl(), + repNode.getLogger(), + repNode.getChannelTimeoutTask(), + channel, + timeoutMs); + } + + public NamedChannelWithTimeout(RepImpl repImpl, + Logger logger, + ChannelTimeoutTask channelTimeoutTask, + DataChannel channel, + int timeoutMs) { + super(channel); + this.timeoutMs = timeoutMs; + this.envImpl = repImpl; + this.logger = logger; + readActivity = true; + if (timeoutMs > 0) { + /* Only register with a timer, if a timeout is being requested. */ + channelTimeoutTask.register(this); + } + } + + /** + * Used to modify the timeout associated with the channel. + * + * @param timeoutMs the new timeout value + */ + public void setTimeoutMs(int timeoutMs) { + this.timeoutMs = timeoutMs; + /* Ensure that the next tick resets the time and counter. */ + readActivity = true; + } + + /* + * Methods below supply the Protocol for byte channel. The implementations + * wrap read operations to track i/o activity by setting readActivity. + * getCurrentActivity() can be used to determine if there was any read + * activity since an earlier call to the method. + */ + @Override + public int read(ByteBuffer dst) + throws IOException { + + final int bytes = channel.read(dst); + if (bytes > 0) { + readActivity = true; + } + return bytes; + } + + @Override + public void close() + throws IOException { + + channel.close(); + readActivity = false; + } + + private void resetActivityCounter(long timeMs) { + lastCheckMs = timeMs; + readActivity = false; + } + + /** + * Method invoked by the time thread to check on the channel on a periodic + * basis. Note that the time that is passed in is a "pseudo" time that is + * only meaningful for calculating time differences. + * + * @param timeMs the pseudo time + * + * @return true if the channel is active, false if it isn't and has been + * closed + */ + public boolean isActive(long timeMs) { + + if (!channel.isOpen()) { + /* some thread closed it. */ + return false; + } + + if (!channel.getSocketChannel().isConnected()) { + /* Not yet connected, wait for it to be connected. */ + return true; + } + + if (readActivity) { + resetActivityCounter(timeMs); + return true; + } + + if ((timeoutMs == 0) || (timeMs - lastCheckMs) < timeoutMs) { + return true; + } + + /* + * No activity, force the channel closed thus generating an + * AsynchronousCloseException in the read/write threads. + */ + LoggerUtils.info(logger, envImpl, + "Inactive channel: " + getNameIdPair() + + " forced close. Timeout: " + timeoutMs + "ms."); + final long startTime = System.currentTimeMillis(); + try { + channel.close(); + } catch (IOException e) { + /* Ignore the exception. */ + } + final long timeElapsed = System.currentTimeMillis() - startTime; + /* Log the unexpected closure time which is longer than 10 seconds */ + if (timeElapsed > 10000) { + LoggerUtils.info(logger, envImpl, + "Time to close inactive channel " + + getNameIdPair() + ": " + timeElapsed + "ms"); + } + return false; + } +} diff --git a/src/com/sleepycat/je/rep/utilint/RepUtils.java b/src/com/sleepycat/je/rep/utilint/RepUtils.java new file mode 100644 index 0000000..3d188ed --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/RepUtils.java @@ -0,0 +1,598 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_MESSAGES_WRITTEN; +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_WRITE_NANOS; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.nio.channels.SocketChannel; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.NoConsistencyRequiredPolicy; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.TimeConsistencyPolicy; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.net.SimpleDataChannel; +import com.sleepycat.je.utilint.PropUtil; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +public class RepUtils { + + public static final boolean DEBUG_PRINT_THREAD = true; + public static final boolean DEBUG_PRINT_TIME = true; + + /** + * Maps from uppercase ReplicaConsistencyPolicy name to the policy's + * format. + */ + private static final Map> + consistencyPolicyFormats = + new HashMap>(); + static { + addConsistencyPolicyFormat(TimeConsistencyPolicy.NAME, + new TimeConsistencyPolicyFormat()); + addConsistencyPolicyFormat(NoConsistencyRequiredPolicy.NAME, + new NoConsistencyRequiredPolicyFormat()); + } + + /* + * Canonical channel instance used to indicate that this is the last + * instance of a channel in a channel queue and that the queue is + * effectively closed. This value is typically used during a soft shutdown + * of a thread to cause the thread waiting on the queue to wake up and + * take notice. + */ + public final static DataChannel CHANNEL_EOF_MARKER; + + static { + try { + CHANNEL_EOF_MARKER = new SimpleDataChannel(SocketChannel.open()); + } catch (IOException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + /** + * If not null, called by openSocketChannel with the connect options before + * opening the socket -- for unit testing. + */ + public static volatile TestHook openSocketChannelHook; + + /** + * Define a new ConsistencyPolicyFormat. Should only be called outside of + * this class to add support custom policies for testing. Must be called + * when the system is quiescent, since the map is unsynchronized. + * + * @param name must be the first part of the policy string with a + * non-letter delimiter following it, or must be the entire policy string. + * + * @param format to register. + */ + public static void + addConsistencyPolicyFormat(final String name, + final ConsistencyPolicyFormat format) { + consistencyPolicyFormats.put + (name.toUpperCase(java.util.Locale.ENGLISH), format); + } + + /** + * ReplicaConsistencyPolicy must be stored as a String for use with + * ReplicationConfig and je.properties. ConsistencyPolicyFormat is an + * internal handler that formats and parses the string representation of + * the policy. Only a fixed number of string-representable policies are + * supported. Other policies that are not string-representable can only be + * used in TransactionConfig, not ReplicationConfig. For testing only, we + * allow defining new custom policies. + */ + public interface + ConsistencyPolicyFormat { + + String policyToString(final T policy); + + T stringToPolicy(final String string); + } + + private static class TimeConsistencyPolicyFormat + implements ConsistencyPolicyFormat { + + @Override + public String policyToString(final TimeConsistencyPolicy policy) { + return policy.getName() + + "(" + policy.getPermissibleLag(TimeUnit.MILLISECONDS) + + " ms," + policy.getTimeout(TimeUnit.MILLISECONDS) + + " ms)"; + } + + @Override + public TimeConsistencyPolicy stringToPolicy(final String string) { + /* Format: (, ) */ + String args = + string.substring(TimeConsistencyPolicy.NAME.length()); + if (args.charAt(0) != '(' || + args.charAt(args.length()-1) != ')') { + throw new IllegalArgumentException + ("Incorrect property value syntax: " + string); + } + int arg1 = args.indexOf(','); + if (arg1 == -1) { + throw new IllegalArgumentException + ("Incorrect property value syntax: " + string); + } + int lag = PropUtil.parseDuration(args.substring(1, arg1)); + int arg2 = args.indexOf(')'); + if (arg2 == -1) { + throw new IllegalArgumentException + ("Incorrect property value syntax: " + string); + } + int timeout = + PropUtil.parseDuration(args.substring(arg1 + 1, arg2)); + return new TimeConsistencyPolicy + (lag, TimeUnit.MILLISECONDS, timeout, TimeUnit.MILLISECONDS); + } + } + + private static class NoConsistencyRequiredPolicyFormat + implements ConsistencyPolicyFormat { + + @Override + public String + policyToString(final NoConsistencyRequiredPolicy policy) { + return NoConsistencyRequiredPolicy.NAME; + } + + @Override + public NoConsistencyRequiredPolicy + stringToPolicy(final String string) { + return NoConsistencyRequiredPolicy.NO_CONSISTENCY; + } + } + + /** + * Converts a policy into a string suitable for use as a property value + * in a je.properties file or elsewhere. + * + * @param policy the policy being converted. + * + * @return the formatted string representing the policy. + * + * @throws IllegalArgumentException if the specific policy does not have a + * property value format, via ReplicationConfig(Properties) ctor and + * setter. + * + * @see #getReplicaConsistencyPolicy(String) + */ + @SuppressWarnings("unchecked") + public static String getPropertyString(ReplicaConsistencyPolicy policy) + throws IllegalArgumentException { + + @SuppressWarnings("rawtypes") + ConsistencyPolicyFormat format = + consistencyPolicyFormats.get(policy.getName().toUpperCase()); + if (format == null) { + throw new IllegalArgumentException + ("Policy: " + policy + " cannot be used as a property"); + } + return format.policyToString(policy); + } + + /** + * Converts a property string into a policy instance. + * + * @param propertyValue the formatted string representing the policy. + * + * @return the policy computed from the string + * + * @throws IllegalArgumentException via ReplicationConfig(Properties) ctor + * and setter. + */ + public static ReplicaConsistencyPolicy + getReplicaConsistencyPolicy(String propertyValue) + throws IllegalArgumentException { + + final String upperCasePropertyValue = + propertyValue.toUpperCase(java.util.Locale.ENGLISH); + for (final Map.Entry> entry : + consistencyPolicyFormats.entrySet()) { + final String name = entry.getKey(); + if (upperCasePropertyValue.equals(name) || + (upperCasePropertyValue.startsWith(name) && + upperCasePropertyValue.length() > name.length() && + !Character.isLetter( + upperCasePropertyValue.charAt(name.length())))) { + ConsistencyPolicyFormat format = entry.getValue(); + return format.stringToPolicy(propertyValue); + } + } + throw new IllegalArgumentException + ("Invalid consistency policy: " + propertyValue); + } + + /** + * Like CountDownLatch, but makes provision in the await for the await, or + * more specifically the new awaitOrException method to be exited via an + * exception. + */ + public static class ExceptionAwareCountDownLatch extends CountDownLatch { + /* The environment that may need to be invalidated. */ + final EnvironmentImpl envImpl; + + /* The exception (if any) that caused the latch to be released */ + private final AtomicReference terminatingException = + new AtomicReference<>(); + + public ExceptionAwareCountDownLatch(EnvironmentImpl envImpl, + int count) { + super(count); + this.envImpl = envImpl; + } + + /** + * The method used to free an await, ensuring that it throws an + * exception at the awaitOrException. + * + * @param e the exception to be wrapped in a DatabaseException + * and thrown. + */ + public void releaseAwait(Exception e) { + terminatingException.compareAndSet( + null, prepareTerminatingException(e, envImpl)); + for (long count = getCount(); count > 0; count--) { + countDown(); + } + assert(getCount() == 0); + } + + /** + * Blocks, waiting for the latch to count down to zero, or until an + * {@code Exception} is provided. The exception is thrown in every + * thread that is waiting in this method. + * + * @see #releaseAwait + */ + public boolean awaitOrException(long timeout, TimeUnit unit) + throws InterruptedException, + DatabaseException { + + boolean done = super.await(timeout, unit); + if (!done) { + return done; + } + final DatabaseException e = terminatingException.get(); + if (e != null) { + throw addLocalStackTrace(e); + } + return done; + } + + public void awaitOrException() + throws InterruptedException, + DatabaseException { + awaitOrException(Integer.MAX_VALUE, TimeUnit.SECONDS); + } + + /** + * DO NOT use this method. Use awaitOrException instead, so that any + * outstanding exceptions are thrown. + */ + @Override + @Deprecated + public boolean await(long timeout, TimeUnit unit) { + throw EnvironmentFailureException.unexpectedState + ("Use awaitOrException() instead of await"); + } + } + + /** + * Like {@code LinkedBlockingQueue}, but provides a {@code + * pollOrException()} method that should be used instead of {@code poll()}, + * so that callers don't have to treat exception cases specially. + * + * @see ExceptionAwareCountDownLatch + */ + @SuppressWarnings("serial") + public static class ExceptionAwareBlockingQueue + extends LinkedBlockingQueue { + + final EnvironmentImpl envImpl; + final T dummyValue; + + private final AtomicReference terminatingException = + new AtomicReference<>(); + + public ExceptionAwareBlockingQueue(EnvironmentImpl envImpl, + T dummyValue) { + super(); + this.envImpl = envImpl; + this.dummyValue = dummyValue; + } + + public void releasePoll(Exception e) { + terminatingException.compareAndSet( + null, prepareTerminatingException(e, envImpl)); + add(dummyValue); + } + + public T pollOrException(long timeout, TimeUnit unit) + throws InterruptedException, + DatabaseException { + + T value = super.poll(timeout, unit); + if (value == null) { + return value; + } + final DatabaseException e = terminatingException.get(); + if (e != null) { + throw addLocalStackTrace(e); + } + return value; + } + + /** + * (Use {@link #pollOrException} instead. + */ + @Override + @Deprecated + public T poll(long timeout, TimeUnit unit) { + throw EnvironmentFailureException.unexpectedState + ("Use pollOrException() instead of poll()"); + } + } + + /** + * The terminating exception is wrapped in an EFE if it is not already a + * DatabaseException (which is unexpected). Also text is added to the + * message indicating it was thrown by an HA thread, since it will often be + * re-thrown in an app thread and the stack trace may be confusing. + */ + private static DatabaseException prepareTerminatingException( + final Exception e, + final EnvironmentImpl envImpl) { + + if (e == null) { + return null; + } + + final DatabaseException de = + (e instanceof DatabaseException) ? + ((DatabaseException) e) : + EnvironmentFailureException.unexpectedException(envImpl, e); + + de.addErrorMessage( + "Originally thrown by HA thread: " + + Thread.currentThread().getName()); + + return de; + } + + /** + * Ideally we should wrap the exception before rethrowing in a different + * thread, but this confuses exception handlers that call getCause. So + * instead we add the per-thread local stack trace to the message. + */ + private static DatabaseException addLocalStackTrace(DatabaseException e) { + e.addRethrownStackTrace(); + return e; + } + + /** + * Forces a shutdown of the channel ignoring any errors that may be + * encountered in the process. + * + * @param namedChannel the channel to be shutdown + */ + public static void shutdownChannel(NamedChannel namedChannel) { + if (namedChannel == null) { + return; + } + shutdownChannel(namedChannel.getChannel()); + } + + public static void shutdownChannel(DataChannel channel) { + if (channel == null) { + return; + } + + /* + * For SSL, shutting down the socket before shutting down the + * channel is a no-no. That results in SSLExceptions being + * thrown due to missing close_notify alerts. + */ + try { + channel.close(); + } catch (IOException e) { + /* Ignore */ + } + } + + /** + * Create a socket channel with the designated properties + * + * @param addr the remote endpoint socket address + * @param connectOpts connect options to be applied to the channel + * @return the connected channel + * + */ + public static SocketChannel openSocketChannel(InetSocketAddress addr, + ConnectOptions connectOpts) + throws IOException { + + assert TestHookExecute.doHookIfSet(openSocketChannelHook, connectOpts); + + final SocketChannel channel = SocketChannel.open(); + channel.configureBlocking(connectOpts.getBlocking()); + + final Socket socket = channel.socket(); + if (connectOpts.getReceiveBufferSize() != 0) { + socket.setReceiveBufferSize(connectOpts.getReceiveBufferSize()); + } + socket.setTcpNoDelay(connectOpts.getTcpNoDelay()); + socket.setSoTimeout(connectOpts.getReadTimeout()); + socket.setReuseAddress(connectOpts.getReuseAddr()); + + socket.connect(addr, connectOpts.getOpenTimeout()); + return channel; + } + + /** + * Create a Data channel with the designated properties + * + * @param addr the remote endpoint socket address + * @param factory DataChannel factory for channel creation + * @param connectOpts connect options to be applied to the channel + * @return the connected channel + * + */ + public static DataChannel openBlockingChannel(InetSocketAddress addr, + DataChannelFactory factory, + ConnectOptions connectOpts) + throws IOException { + return factory.connect(addr, connectOpts); + } + + /** + * Chains an old outstanding exception to the tail of a new one, so it's + * not lost. + * + * @param newt the new throwable + * @param oldt the old throwable + * @return the new throwable extended with the old cause + */ + public static Throwable chainExceptionCause(Throwable newt, + Throwable oldt) { + /* Don't lose the original exception */ + Throwable tail = newt; + while (tail.getCause() != null) { + tail = tail.getCause(); + } + tail.initCause(oldt); + return newt; + } + + public static String writeTimesString(StatGroup stats) { + long nMessagesWritten = stats.getLong(N_MESSAGES_WRITTEN); + long nWriteNanos = stats.getLong(N_WRITE_NANOS); + + long avgWriteNanos = + (nMessagesWritten <= 0) ? 0 : (nWriteNanos / nMessagesWritten); + + return String.format(" write time: %, dms Avg write time: %,dus", + nWriteNanos / 1000000, avgWriteNanos / 1000); + } + + /** + * Read replication access properties from a property file. + * + * @param props a Properties object into which the properties will be stored + * @param accessPropsFile an abstract File naming a file containing property + * settings. + * @return the input properties object, updated with the property settings + * found in the file. + * @throws IllegalArgumentException if the accessPropsFile contains + * invalid settings. + */ + public static Properties populateNetProps(Properties props, + File accessPropsFile) { + + Properties rawProps = new Properties(); + DbConfigManager.applyFileConfig(accessPropsFile, rawProps, + true); //forReplication + + /* filter out the properties that are not relevant */ + ReplicationNetworkConfig.applyRepNetProperties(rawProps, props); + return props; + } + + /** + * A simple debugging utility used to obtain information about the + * execution environment that's only available through some system utility, + * like netstat, or jps, etc. It's up to the caller to ensure the + * availability of the utility and ensure that it's on the search path. + * + * @args the arguments to a ProcessBuilder with args[0] being the command + * and args[1-...] being its args + * + * @return a string denoting the output from the command. Or a string + * prefixed by the word EXCEPTION, if an exception was encountered, + * followed by the exception class name and exception message. + */ + public static String exec(String... args) { + + final ByteArrayOutputStream bao = new ByteArrayOutputStream(1024); + final PrintStream output = new PrintStream(bao); + + try { + + final ProcessBuilder builder = new ProcessBuilder(args); + builder.redirectErrorStream(true); + + final Process process = builder.start(); + final InputStream is = process.getInputStream(); + final InputStreamReader isr = new InputStreamReader(is); + final BufferedReader br = new BufferedReader(isr); + + String line; + while ((line = br.readLine()) != null) { + output.println(line); + } + + } catch (Exception e) { + output.println("EXCEPTION:" + " class:" + e.getClass().getName() + + " message:" + e.getMessage()); + } + + return bao.toString(); + } + + /* + * Used to create deliberate clock skews for testing purposes. Replicator + * code should use it instead of invoking System.currentTimeMillis() + * directly. + */ + public static class Clock { + private final int skewMs; + + public Clock(int skewMs) { + this.skewMs = skewMs; + } + + public long currentTimeMillis() { + return System.currentTimeMillis() + skewMs; + } + } + +} diff --git a/src/com/sleepycat/je/rep/utilint/ReplicationFormatter.java b/src/com/sleepycat/je/rep/utilint/ReplicationFormatter.java new file mode 100644 index 0000000..12076d3 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/ReplicationFormatter.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.utilint.TracerFormatter; + +/** + * Formatter for replication log messages + */ +public class ReplicationFormatter extends TracerFormatter { + private final NameIdPair nameIdPair; + + public ReplicationFormatter(NameIdPair nameIdPair) { + super(); + this.nameIdPair = nameIdPair; + } + + @Override + protected void appendEnvironmentName(StringBuilder sb) { + sb.append(" [" + nameIdPair.getName() + "]"); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/ServiceDispatcher.java b/src/com/sleepycat/je/rep/utilint/ServiceDispatcher.java new file mode 100644 index 0000000..6599ef7 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/ServiceDispatcher.java @@ -0,0 +1,1247 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.impl.RepParams.BIND_INADDR_ANY; +import static com.sleepycat.je.rep.impl.RepParams.SO_BIND_WAIT_MS; +import static com.sleepycat.je.rep.impl.RepParams.SO_REUSEADDR; + +import java.io.IOException; +import java.io.PrintWriter; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.StandardSocketOptions; +import java.nio.ByteBuffer; +import java.nio.channels.Channel; +import java.nio.channels.Channels; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.subscription.ServerAuthMethod; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; +import com.sleepycat.je.rep.utilint.ServiceHandshake.AuthenticationMethod; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ByteChannelIOAdapter; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ClientHandshake; +import com.sleepycat.je.rep.utilint.ServiceHandshake.InitResult; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ServerHandshake; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StoppableThread; +import com.sleepycat.je.utilint.StoppableThreadFactory; + +/** + * ServiceDispatcher listens on a specific socket for service requests + * and dispatches control to the service that is being requested. A service + * request message has the format: + * + * Service: + * + * The format of the message is binary, with all text being encoded in ascii. + * + * Upon receipt of service request message, the new SocketChannel is queued for + * processing by the service in the Queue associated with the service. The + * SocketChannel is the responsibility of the service after this point. It can + * configure the channel to best suit the requirements of the specific service. + * + * The dispatcher returns a single byte to indicate success or failure. The + * byte value encodes a ServiceDispatcher.Response enumerator. + * + */ +public class ServiceDispatcher extends StoppableThread { + + /* The socket on which the dispatcher is listening */ + private InetSocketAddress socketAddress; + + /* + * The selector that watches for accept events on the server socket and + * on subsequent read events. + */ + private final Selector selector; + private SelectionKey scKey; + + /* The server socket channel */ + private ServerSocketChannel serverChannel; + + /* Determines whether new connections should be accepted. */ + private boolean processAcceptRequests = true; + + /* Maintains the error count, used primarily for testing. */ + private int errorCount = 0; + + /* + * Maps the service name to the queue of sockets processed by the + * service. + */ + private final Map serviceMap = + new ConcurrentHashMap(); + + /* The thread pool used to manage the threads used by services */ + private final ExecutorService pool; + + private final Logger logger; + private final Formatter formatter; + + /* + * A reference to a replicated environment, only used for error + * propagation when this dispatcher has been created for a replicated + * node. + */ + private final RepImpl repImpl; + + private final DataChannelFactory channelFactory; + + private AuthenticationMethod[] authOptions; + + /** + * The response to a service request. + * + * Do not rearrange the order of the enumerators, since their ordinal + * values are currently used in messages. + */ + public static enum Response { + + OK, BUSY, FORMAT_ERROR, UNKNOWN_SERVICE, PROCEED, INVALID, AUTHENTICATE; + + ByteBuffer byteBuffer() { + ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte)ordinal()); + buffer.flip(); + return buffer; + } + + public static Response get(int ordinal) { + if (ordinal < values().length) { + return values()[ordinal]; + } + return null; + } + } + + /** + * Create a ServiceDispatcher listening on a specific socket for service + * requests. This service dispatcher has been created on behalf of a + * replicated environment, and the node will be informed of any unexpected + * failures seen by the dispatcher. + * + * @param socketAddress the socket on which it listens for service + * requests. This address may be extended to cover all local addresses, if + * {@link RepParams#BIND_INADDR_ANY} has been set to true. + * + * @throws IOException if the socket could not be bound. + */ + public ServiceDispatcher(InetSocketAddress socketAddress, + RepImpl repImpl, + DataChannelFactory channelFactory) + throws IOException { + + super(repImpl, "ServiceDispatcher-" + socketAddress.getHostName() + + ":" + socketAddress.getPort()); + + this.repImpl = repImpl; + this.socketAddress = socketAddress; + this.channelFactory = channelFactory; + selector = Selector.open(); + + String poolName = "ServiceDispatcherPool"; + NameIdPair nameIdPair = NameIdPair.NULL; + if (repImpl == null) { + logger = LoggerUtils.getLoggerFormatterNeeded(getClass()); + } else { + logger = LoggerUtils.getLogger(getClass()); + nameIdPair = repImpl.getNameIdPair(); + poolName += "_" + nameIdPair; + } + + pool = Executors.newCachedThreadPool + (new StoppableThreadFactory(poolName, logger)); + + formatter = new ReplicationFormatter(nameIdPair); + + bindSocket(); + + setAuthOptions(); + } + + private void bindSocket() throws IOException { + + serverChannel = ServerSocketChannel.open(); + serverChannel.configureBlocking(false); + scKey = serverChannel.register(selector, SelectionKey.OP_ACCEPT); + ServerSocket acceptSocket = serverChannel.socket(); + /* No timeout */ + acceptSocket.setSoTimeout(0); + + InetSocketAddress bindAddress = socketAddress; + if (repImpl != null) { + if (repImpl.getConfigManager().getBoolean(SO_REUSEADDR)) { + + /* Only turn it on if requested. Otherwise let it default. */ + + serverChannel.setOption(StandardSocketOptions.SO_REUSEADDR, + true); + + acceptSocket.setReuseAddress(true); + } + + if (repImpl.getConfigManager().getBoolean(BIND_INADDR_ANY)) { + bindAddress = new InetSocketAddress((InetAddress)null, + socketAddress.getPort()); + } + } + + final int limitMs = (repImpl != null) ? + repImpl.getConfigManager().getInt(SO_BIND_WAIT_MS) : 0; + + /* Bind the socket */ + BindException bindException = null; + final int retryWaitMs = 1000; + int totalWaitMs; + for (totalWaitMs = 0; totalWaitMs <= limitMs; + totalWaitMs += retryWaitMs) { + try { + bindException = null; + acceptSocket.bind(bindAddress); + break; + } catch (BindException be) { + bindException = be; + try { + Thread.sleep(retryWaitMs); + } catch (InterruptedException e) { + throw bindException; + } + } + } + + if (bindException != null) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "ServiceDispatcher HostPort=" + + socketAddress.getHostName() + ":" + + socketAddress.getPort() + + " bind failed despite waiting for " + + limitMs + "ms"); + + if (limitMs > 0) { + /* + * Print information to help identify the process currently + * binding the required port. + */ + /* Print all java processes and their args */ + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + RepUtils.exec("jps", "-v")); + /* Print all processes binding tcp ports. */ + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + RepUtils.exec("netstat", "-lntp")); + } + + /* Failed after retrying. */ + throw bindException; + } else if (totalWaitMs != 0) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "ServiceDispatcher HostPort=" + + socketAddress.getHostName() + ":" + + socketAddress.getPort() + + " become available after: " + + totalWaitMs + "ms"); + } + } + + /** + * Convenience overloading for when the dispatcher is created without a + * replicated environment, e.g. when used by the Monitor, and in unit test + * situations. + * + * @see #ServiceDispatcher(InetSocketAddress, RepImpl, DataChannelFactory) + */ + public ServiceDispatcher(InetSocketAddress socketAddress, + DataChannelFactory channelFactory) + throws IOException { + + this(socketAddress, null /* repImpl */, channelFactory); + } + + /** + * Stop accepting new connections, while the individual services quiesce + * and shut themselves down. + */ + public void preShutdown() { + processAcceptRequests = false; + } + + /** + * Shuts down the dispatcher, so that it's no longer listening for service + * requests. The port is freed up upon return and the thread used to + * listen on the port is shutdown. + */ + public void shutdown() { + if (shutdownDone(logger)) { + return; + } + + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "ServiceDispatcher shutdown starting. HostPort=" + + socketAddress.getHostName() + ":" + + + socketAddress.getPort() + + " Registered services: " + serviceMap.keySet()); + + shutdownThread(logger); + + for (String serviceName : serviceMap.keySet()) { + cancel(serviceName); + } + + /* Shutdown any executing and queued service requests. */ + pool.shutdownNow(); + try { + serverChannel.socket().close(); + selector.close(); + } catch (IOException e) { + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.WARNING, + "Ignoring I/O error during close: " + + LoggerUtils.exceptionTypeAndMsg(e)); + } + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "ServiceDispatcher shutdown completed." + + " HostPort=" + socketAddress.getHostName() + + ":" + socketAddress.getPort()); + } + + @Override + protected int initiateSoftShutdown() { + selector.wakeup(); + return 0; + } + + /** + * @see StoppableThread#getLogger + */ + @Override + protected Logger getLogger() { + return logger; + } + + /** + * Logging interface for use by ServiceHandshake code. + */ + void logMsg(Level level, boolean noteError, String msg) { + if (noteError) { + errorCount++; + } + LoggerUtils.logMsg(logger, repImpl, formatter, level, msg); + } + + /** + * Used by the client to set up a channel for the service. It performs the + * initial handshake requesting the service and interprets the response to + * determine if it was successful. + * + * @param channel the channel that is the basis for the service + * @param serviceName the service running on the channel + * + * @throws IOException if the output stream could not be established + * @throws ServiceConnectFailedException if the connection could not be + * made. + */ + static public void doServiceHandshake(DataChannel channel, + String serviceName) + throws IOException, ServiceConnectFailedException { + + doServiceHandshake(channel, serviceName, null); + } + + /** + * A variation on the method above. It's used by the client to setup a + * channel for the service. It performs the initial handshake requesting + * the service and interpreting the response to determine if it was + * successful. + * + * @param channel the channel that is the basis for the service + * @param serviceName the service running on the channel + * @param authInfo a list of authentication methods supported by the + * caller. + * @throws ServiceConnectFailedException if the connection could not be + * made. + */ + static public void doServiceHandshake(DataChannel channel, + String serviceName, + AuthenticationMethod[] authInfo) + throws IOException, ServiceConnectFailedException { + + final ClientHandshake handshake = + new ClientHandshake(serviceName, + authInfo, + new ByteChannelIOAdapter(channel)); + + final Response response = handshake.process(); + if (response != Response.OK) { + throw new ServiceConnectFailedException(serviceName, response); + } + } + + /** + * Returns the next socketChannel created in response to a request for the + * service. The socketChannel and the associated socket is configured as + * requested in the arguments. + * + * @param serviceName the service for which the channel must be created. + * @param blocking true if the channel must be configured to block + * @param soTimeout the timeout for the underlying socket + * @return the configured channel or null if there are no more channels, + * because the service has been shut down. + * @throws InterruptedException + */ + public DataChannel takeChannel(String serviceName, + boolean blocking, + int soTimeout) + throws InterruptedException { + + while (true) { + Service service = serviceMap.get(serviceName); + if (service == null) { + throw EnvironmentFailureException.unexpectedState + ("Service: " + serviceName + " was not registered"); + } + if (! (service instanceof QueuingService)) { + throw EnvironmentFailureException.unexpectedState + ("Service: " + serviceName + " is not a queuing service"); + } + Socket socket = null; + DataChannel channel = null; + try { + channel = ((QueuingService)service).take(); + assert channel != null; + + if (channel == RepUtils.CHANNEL_EOF_MARKER) { + /* A pseudo channel to indicate EOF, return null */ + return null; + } + + channel.getSocketChannel().configureBlocking(blocking); + + socket = channel.getSocketChannel().socket(); + socket.setSoTimeout(soTimeout); + if (blocking) { + + /* + * Ensure that writes have been flushed. All message + * exchanges should be complete here, and we don't expect + * there to be any pending writes, but do this here in + * blocking mode to ensure that the writes complete without + * the need for a loop. + */ + channel.flush(); + } + + return channel; + } catch (IOException e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + "Unable to configure channel " + + "for '" + serviceName + "' service: " + + LoggerUtils.exceptionTypeAndMsg(e)); + try { + channel.close(); + } catch (IOException e1) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINEST, + "Cleanup failed for service: " + + serviceName + "\n" + + LoggerUtils.exceptionTypeAndMsg(e1)); + } + /* Wait for the next request. */ + continue; + } + } + } + + /** + * Returns the specific socket address associated with the dispatcher. + * Unlike getSocketBoundAddress() it can never return the wild card address + * INADDR_ANY. This is the address used by clients requesting + * ServiceDispatcher services. + * + * If {@link RepParams#BIND_INADDR_ANY} has been set to true, this is one + * of the addresses that the socket is associated with. + * + * @see #getSocketBoundAddress() + */ + public InetSocketAddress getSocketAddress() { + return socketAddress; + } + + /** + * For testing only. + * + * Returns the server socket address that was actually used to bind the + * socket. It's the wildcard address INADDR_ANY if the HA config {@link + * RepParams#BIND_INADDR_ANY} has been set to true. + */ + public InetAddress getSocketBoundAddress() { + return serverChannel.socket().getInetAddress(); + } + + /** + * Registers a service queue with the ServiceDispatcher. Requests for a + * service result in a new SocketChannel being created on which the service + * can communicate with the requester of the service. + * + * @param serviceName the name of the service being requested + * @param serviceQueue the queue that will be used to hold channels + * established for the service. + */ + public void register(String serviceName, + BlockingQueue serviceQueue) { + if (serviceName == null) { + throw EnvironmentFailureException.unexpectedState + ("The serviceName argument must not be null"); + } + if (serviceMap.containsKey(serviceName)) { + throw EnvironmentFailureException.unexpectedState + ("Service: " + serviceName + " is already registered"); + } + if (serviceQueue == null) { + throw EnvironmentFailureException.unexpectedState + ("The serviceQueue argument must not be null"); + } + serviceMap.put(serviceName, + new QueuingService(serviceName, serviceQueue)); + } + + public void register(Service service) { + if (service == null) { + throw EnvironmentFailureException.unexpectedState + ("The service argument must not be null"); + } + + if (serviceMap.containsKey(service.name)) { + throw EnvironmentFailureException.unexpectedState + ("Service: " + service.name + " is already registered"); + } + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Service: " + service.name + " registered."); + serviceMap.put(service.name, service); + } + + public boolean isRegistered(String serviceName) { + if (serviceName == null) { + throw EnvironmentFailureException.unexpectedState + ("The serviceName argument must not be null"); + } + return serviceMap.containsKey(serviceName); + } + + public void setSimulateIOException(String serviceName, + boolean simulateException) { + + Service service = serviceMap.get(serviceName); + if (service == null) { + throw new IllegalStateException + ("Service: " + serviceName + " is not registered"); + } + + service.setSimulateIOException(simulateException); + } + + /** + * Cancels the registration of a service. Subsequent attempts to access the + * service will be ignored and the channel will be closed and will not be + * queued. + * + * @param serviceName the name of the service being cancelled + */ + public void cancel(String serviceName) { + if (serviceName == null) { + throw EnvironmentFailureException.unexpectedState + ("The serviceName argument must not be null."); + } + Service service = serviceMap.remove(serviceName); + + if (service == null) { + throw EnvironmentFailureException.unexpectedState + ("Service: " + serviceName + " was not registered."); + } + service.cancel(); + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Service: " + serviceName + " shut down."); + } + + public DataChannelFactory getChannelFactory() { + return channelFactory; + } + + /** + * For testing purposes + */ + void addTestAuthentication(AuthenticationMethod[] authOpts) { + authOptions = authOpts; + } + + /** + * Sets authentication methods to service dispatcher + */ + private void setAuthOptions() { + + if (repImpl == null) { + authOptions = null; + return; + } + + final StreamAuthenticator auth = repImpl.getAuthenticator(); + if (auth == null) { + /* no authenticator, no auth methods */ + authOptions = null; + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "No server auth method"); + } else { + final AuthenticationMethod method = new ServerAuthMethod(auth); + authOptions = new AuthenticationMethod[]{method}; + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Server auth method: " + + method.getMechanismName()); + } + } + + /** + * Processes an accept event on the server socket. As a result of the + * processing a new socketChannel is created, and the selector is + * registered with the new channel so that it can process subsequent read + * events. + */ + private void processAccept() { + + SocketChannel socketChannel = null; + try { + socketChannel = serverChannel.accept(); + + if (!processAcceptRequests) { + closeChannel(socketChannel); + return; + } + socketChannel.configureBlocking(false); + final DataChannel dataChannel = + getChannelFactory().acceptChannel(socketChannel); + + /* + * If authenticationMethod is provided, use it. Otherwise if no + * authenticationMethods are provided but the channel is capable + * of determining trust, pass an empty Authentication + * array to ServerHandshake in order to trigger the trust check. + */ + final AuthenticationMethod[] authInfo = + (dataChannel.isTrustCapable() && authOptions == null) ? + new AuthenticationMethod[0] : + authOptions; + + final ServerHandshake initState = + new ServerHandshake(dataChannel, this, authInfo); + + /* Register the selector with the base SocketChannel */ + socketChannel.register(selector, SelectionKey.OP_READ, initState); + } catch (IOException e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + "Server accept exception: " + + LoggerUtils.exceptionTypeAndMsg(e)); + closeChannel(socketChannel); + } + } + + /** + * Processes read events on newly established socket channels. Input on the + * channel is verified to ensure that it is a service request. The read is + * accomplished in two parts, a read for the fixed size prefix and the name + * length byte, followed by a read of the variable length name itself. + * + * Errors result in the channel being closed(with the key being canceled + * as a result) and a null value being returned. + * + * If the service request is sane, we may require the connecting + * entity to authenticate itself. + * + * @param initState the InitState object associated with the new channel + * + * @return the ServiceName or null if there was insufficient input, or an + * error was encountered. + */ + private String processRead(ServerHandshake initState) { + try { + final InitResult result = initState.process(); + if (result == InitResult.FAIL) { + /* Probably already closed, but make sure */ + initState.getChannel().close(); + return null; + } + if (result == InitResult.REJECT) { + initState.getChannel().write(Response.INVALID.byteBuffer()); + initState.getChannel().close(); + return null; + } + + if (result == InitResult.DONE) { + return initState.getServiceName(); + } + /* Initial sequence not complete as yet, keep reading */ + return null; + } catch (IOException e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + "Exception during read: " + + LoggerUtils.exceptionTypeAndMsg(e)); + closeChannel(initState.getChannel()); + return null; + } + } + + /** + * Closes the channel, logging any resulting exceptions. + * + * @param channel the channel being closed + */ + private void closeChannel(Channel channel) { + if (channel != null) { + try { + channel.close(); + } catch (IOException e1) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + "Exception during cleanup: " + + LoggerUtils.exceptionTypeAndMsg(e1)); + } + } + } + + /** + * The central run method. It dispatches to the "accept" and "read" event + * processing methods. Upon a completed read, it verifies the validity of + * the service name and queues the channel for subsequent consumption + * by the service. + * + */ + @Override + public void run() { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Started ServiceDispatcher. HostPort=" + + socketAddress.getHostName() + ":" + + socketAddress.getPort()); + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "DataChannel factory: " + + getChannelFactory().getClass().getName()); + try { + while (true) { + try { + /** + * To make the dispatcher resilient to IP address change, + * we periodically check for such change and rebind the + * socket if that occurs. + * + * Speculation and rational: + * New communications fail sliently which is possibly + * caused by that each TCP session use both IP and port + * number as the identifier. Thus it will drop packages + * after IP address change. Yet no exception is raised in + * that situation. Therefore, we cannot rely on exception + * for detecting such a change, but instead use an active + * approach. + */ + boolean changed = false; + try { + changed = ipChanged(); + } catch (Exception e) { + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.INFO, + "Exception while check IP: " + + LoggerUtils.exceptionTypeAndMsg(e)); + } + if (changed) { + rebindSocket(); + } + final int result = selector.select(1000); + if (isShutdown()) { + return; + } + if (result == 0) { + continue; + } + } catch (Exception e) { + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.SEVERE, + "Server socket exception: " + + LoggerUtils.getStackTrace(e)); + throw EnvironmentFailureException.unexpectedException(e); + } + Set skeys = selector.selectedKeys(); + for (SelectionKey key : skeys) { + switch (key.readyOps()) { + + case SelectionKey.OP_ACCEPT: + processAccept(); + break; + + case SelectionKey.OP_READ: + final ServerHandshake initState = + (ServerHandshake) key.attachment(); + + final String serviceName = processRead(initState); + if (serviceName == null) { + break; + } + key.cancel(); + processService(initState.getChannel(), serviceName); + break; + + default: + throw EnvironmentFailureException.unexpectedState + ("Unexpected ops bit set: " + key.readyOps()); + } + } + /* All keys have been processed clear them. */ + skeys.clear(); + } + } finally { + /* + * Clean up any in-process connections that are still in the + * handshake phase. + */ + Iterator skIter = selector.keys().iterator(); + while (skIter.hasNext()) { + SelectionKey key = skIter.next(); + final ServerHandshake initState = + (ServerHandshake) key.attachment(); + if (initState != null) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Server closing in-process handshake"); + closeChannel(initState.getChannel()); + key.cancel(); + } + } + closeChannel(serverChannel); + cleanup(); + } + } + + private boolean ipChanged() throws Exception { + if (repImpl == null) { + return false; + } + InetAddress addr = InetAddress.getByName(repImpl.getHostName()); + String currentIP = addr.getHostAddress(); + String previousIP = socketAddress.getAddress().getHostAddress(); + boolean changed = !currentIP.equals(previousIP); + if (changed) { + LoggerUtils.logMsg + (logger, repImpl, formatter, Level.INFO, + "ServiceDispatcher IP changed, from " + previousIP + + " to " + currentIP); + } + return changed; + } + + private void rebindSocket() throws IOException { + if (repImpl == null) { + return; + } + scKey.cancel(); + serverChannel.close(); + socketAddress = repImpl.getSocket(); + + bindSocket(); + + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Rebind ServiceDispatcher socket: " + serverChannel.socket()); + } + + /** + * Performs the guts of the work underlying a service request. It validates + * the service request and writes an appropriate response to the channel. + * @param channel + * @param serviceName + */ + private void processService(DataChannel channel, String serviceName) { + final Service service = serviceMap.get(serviceName); + try { + if (service == null) { + errorCount++; + channel.write(Response.UNKNOWN_SERVICE.byteBuffer()); + closeChannel(channel); + /* + * Not unexpected in a distributed app due to calls being made + * before a service is actually registered. + */ + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Request for unknown Service: " + + serviceName + " Registered services: " + + serviceMap.keySet()); + return; + } + Response response = Response.OK; + if (service.isBusy()) { + response = Response.BUSY; + } + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Service response: " + response + + " for service: " + service.name); + + if (channel.write(response.byteBuffer()) == 0) { + throw EnvironmentFailureException.unexpectedState + ("Failed to write byte. Send buffer size: " + + channel.getSocketChannel().socket().getSendBufferSize()); + } + if (response == Response.OK) { + service.requestDispatch(channel); + } + } catch (IOException e) { + closeChannel(channel); + LoggerUtils.logMsg(logger, repImpl, formatter, Level.WARNING, + "IO error writing to channel for " + + "service: " + serviceName + + LoggerUtils.exceptionTypeAndMsg(e)); + } + } + + /** + * The abstract class underlying all services. + */ + static private abstract class Service { + + /* The name associated with the service. */ + final String name; + + private boolean simulateIOException = false; + + public Service(String name) { + super(); + if (name == null) { + throw EnvironmentFailureException.unexpectedState + ("Service name was null"); + } + this.name = name; + } + + /** + * Informs the service of a new request. The implementation of the + * method must not block. + * + * @param channel the channel on which the request was made + */ + abstract void requestDispatch(DataChannel channel); + + /** + * Used to limit a particular type of service to avoid excess load. + */ + public boolean isBusy() { + return false; + } + + /** + * Used during unit testing to simulate communications problems. + */ + public boolean simulateIOException() { + return simulateIOException; + } + + public void setSimulateIOException(boolean simulateIOException) { + this.simulateIOException = simulateIOException; + } + + /** + * Cancel the service as part of the registration being canceled. + */ + abstract void cancel(); + } + + /** + * A service where requests are simply added to the supplied queue. It's + * the responsibility of the service creator to drain the queue. This + * service is used when the service carries out a long-running dialog with + * the service requester. For example, a Feeder service. + */ + public class QueuingService extends Service { + /* Holds the queue of pending requests, one per channel */ + private final BlockingQueue queue; + + QueuingService(String serviceName, + BlockingQueue queue) { + super(serviceName); + this.queue = queue; + } + + DataChannel take() throws InterruptedException { + return queue.take(); + } + + @Override + void requestDispatch(DataChannel channel) { + if (simulateIOException()) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.INFO, + "Simulated test IO exception"); + try { + /* + * This will provoke an IOException later when we try to + * use the channel in takeChannel(). + */ + channel.close(); + } catch (IOException e) { + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINEST, + "Close failure in '" + name + + "' service: " + + LoggerUtils.exceptionTypeAndMsg(e)); + } + } + if (!queue.add(channel)) { + throw EnvironmentFailureException.unexpectedState + ("request queue overflow"); + } + } + + @Override + void cancel() { + /* + * Drain any existing pending requests. It's safe to just iterate + * since the service dispatcher has already stopped accepting new + * requests for the service. + */ + for (DataChannel channel : queue) { + try { + channel.close(); + } catch (IOException e) { + // Ignore it, it's only cleanup + } + } + queue.add(RepUtils.CHANNEL_EOF_MARKER); + } + } + + /** + * A queuing service that starts the thread that services the requests + * lazily, upon first request and terminates the thread when the service is + * unregistered. The thread must be "interrupt aware" and must exit when + * it receives an interrupt. + * + * This type of service is suitable for services that are used + * infrequently. + */ + public class LazyQueuingService extends QueuingService { + + private final Thread serviceThread; + + public LazyQueuingService(String serviceName, + BlockingQueue queue, + Thread serviceThread) { + + super(serviceName, queue); + this.serviceThread = serviceThread; + } + + @Override + void requestDispatch(DataChannel channel) { + + switch (serviceThread.getState()) { + + case NEW: + serviceThread.start(); + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Thread started for service: " + name); + break; + + case RUNNABLE: + case TIMED_WAITING: + case WAITING: + case BLOCKED: + /* Was previously activated. */ + LoggerUtils.logMsg(logger, repImpl, formatter, Level.FINE, + "Thread started for service: " + name); + break; + + default: + RuntimeException e = + EnvironmentFailureException.unexpectedState + ("Thread for service:" + name + + "is in state:" + serviceThread.getState()); + LoggerUtils.logMsg(logger, repImpl, formatter, + Level.WARNING, + LoggerUtils.exceptionTypeAndMsg(e)); + throw e; + } + super.requestDispatch(channel); + } + + @Override + /** + * Interrupts the thread to cause it to exit. + */ + void cancel() { + if (serviceThread.isAlive()) { + serviceThread.interrupt(); + try { + serviceThread.join(); + } catch (InterruptedException e) { + /* Ignore it on shutdown. */ + } + } + super.cancel(); + } + } + + /** + * A service that is run immediately in a thread allocated to it. Subtypes + * implement the getRunnable() method which provides the runnable object + * for the service. This service frees up the caller from managing the the + * threads associated with the service. The runnable must manage interrupts + * so that it can be shut down by the underlying thread pool. + */ + static public abstract class ExecutingService extends Service { + final private ServiceDispatcher dispatcher; + + public ExecutingService(String serviceName, + ServiceDispatcher dispatcher) { + super(serviceName); + this.dispatcher = dispatcher; + } + + public abstract Runnable getRunnable(DataChannel channel); + + @Override + void requestDispatch(DataChannel channel) { + dispatcher.pool.execute(getRunnable(channel)); + } + + @Override + protected void cancel() { + /* Nothing to do */ + } + } + + @SuppressWarnings("serial") + static public class ServiceConnectFailedException extends Exception { + final Response response; + final String serviceName; + + ServiceConnectFailedException(String serviceName, + Response response) { + assert(response != Response.OK); + this.response = response; + this.serviceName = serviceName; + } + + public Response getResponse() { + return response; + } + + @Override + public String getMessage() { + switch (response) { + case FORMAT_ERROR: + return "Bad message format, for service:" + serviceName; + + case UNKNOWN_SERVICE: + return "Unknown service request:" + serviceName; + + case BUSY: + return "Service was busy"; + + case INVALID: + return "Invalid response supplied"; + + case PROCEED: + return "Protocol continuation requested"; + + case AUTHENTICATE: + return "Authentication required"; + + case OK: + /* + * Don't expect an OK response to provoke an exception. + * Fall through. + */ + default: + throw EnvironmentFailureException.unexpectedState + ("Unexpected response:" + response + + " for service:" + serviceName); + } + } + } + + abstract public static class ExecutingRunnable implements Runnable { + protected final DataChannel channel; + protected final TextProtocol protocol; + protected final boolean expectResponse; + + public ExecutingRunnable(DataChannel channel, + TextProtocol protocol, + boolean expectResponse) { + this.channel = channel; + this.protocol = protocol; + this.expectResponse = expectResponse; + } + + /* Read request and send out response. */ + @Override + public void run() { + try { + channel.getSocketChannel().configureBlocking(true); + RequestMessage request = protocol.getRequestMessage(channel); + if (request == null) { + return; + } + ResponseMessage response = getResponse(request); + if (expectResponse && response != null) { + PrintWriter out = new PrintWriter + (Channels.newOutputStream(channel), true); + out.println(response.wireFormat()); + } else { + assert (response == null); + } + } catch (IOException e) { + logMessage("IO error on socket: " + + LoggerUtils.exceptionTypeAndMsg(e)); + return; + } finally { + if (channel.isOpen()) { + try { + channel.close(); + } catch (IOException e) { + logMessage("IO error on socket close: " + + LoggerUtils.exceptionTypeAndMsg(e)); + return; + } + } + } + } + + /* Get the response for a request. */ + abstract protected ResponseMessage getResponse(RequestMessage request) + throws IOException; + + /* Log the message. */ + abstract protected void logMessage(String message); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/ServiceHandshake.java b/src/com/sleepycat/je/rep/utilint/ServiceHandshake.java new file mode 100644 index 0000000..bab99b8 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/ServiceHandshake.java @@ -0,0 +1,1131 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; +import java.nio.channels.Channel; +import java.util.logging.Level; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.utilint.StringUtils; + +/** + * Provides Dispatcher service handshake logic, including identification of the + * service to be accessed and authentication for access to the service. + * + * The service initialization protocol looks like one of these flows. In the + * flows, the notation byte(X) indicates that X is a 1-byte network field and + * 2byte(X) indicates that X is a 2-byte network field. + * + * If No authentication is configured: + * Client Dispatcher + * -> "Service:" + byte(len) + + * <- byte(OK|BUSY|UNKNOWN_SERVICE) + * + * If authentication is configured: + * Client Dispatcher + * -> "Service:" + byte(len) + + * <- byte(AUTHENTICATE) + * -> "Authenticate:" + byte() + + * <- "Mechanism:" + 2byte() + + + * ":" + + * -> + * <- byte(OK|BUSY|UNKNOWN_SERVICE|INVALID) + * + * The code here is organized into client-side and server-side functions. + * Client-side code is presumed to operated in blocking mode and server-side + * code is presumed to operate in non-blocking mode. + */ +public class ServiceHandshake { + + /** + * The maximum length of a service name + */ + public static final int SERVICE_NAME_LIMIT = 127; + + /* + * Maximum number of times to retry a write operation + */ + private static final int CHANNEL_WRITE_ATTEMPT_LIMIT = 10; + + /** + * Operation-level result indicator + */ + public static enum InitResult { + /* A failure has been detected */ + FAIL, + /* Need to read additional input - only applicable server-side*/ + READ, + /* Successful completion */ + DONE, + /* Authentication failure, but no notification sent yet. */ + REJECT + }; + + /** + * Server-side handshake state + */ + public static class ServerHandshake { + /* The communication channel on which we are operating */ + private final DataChannel channel; + + /* The dispatcher on whose behalf we are operating */ + private final ServiceDispatcher dispatcher; + + /* The set of valid authentication mechanisms available for use */ + private final AuthenticationMethod[] authInfo; + + /* The current handshake operation being processed */ + private ServerInitOp currentOp; + + /* Discovered as part of the handshake protocol */ + private String serviceName; + + ServerHandshake(DataChannel dataChannel, + ServiceDispatcher dispatcher, + AuthenticationMethod[] authInfo) { + + this.channel = dataChannel; + this.dispatcher = dispatcher; + this.authInfo = authInfo; + currentOp = new ReceiveNameOp(this); + } + + DataChannel getChannel() { + return channel; + } + + /** + * Callback for underlying init operations upon successful discovery + * of the requested service name. + */ + void setServiceName(String serviceName) { + this.serviceName = serviceName; + } + + String getServiceName() { + return serviceName; + } + + /** + * Process the handshake incrementally, as channel is presumed to be + * in non-blocking mode. + * + * This function returns one of the following: + * InitResult.READ: handshake is not complete; more data needed + * InitResult.FAIL: handshake was rejected or otherwise failed + * the underlying data channel has been closed. + * InitResult.DONE: handshake is complete, and any authentication + * has been performed. A Response still needs to be sent to + * the client. Caller must do this after any additional validation + * that may be required. + */ + InitResult process() throws IOException { + final InitResult result = currentOp.processOp(channel); + if (result != InitResult.DONE) { + return result; + } + + if (channel.isTrustCapable()) { + if (channel.isTrusted()) { + /* + * Authentication of the requester has been handled at the + * DataChannel level, so no need to worry further about + * authentication. + */ + return InitResult.DONE; + } + logMsg(Level.WARNING, + false, // noteError + "DataChannel is trust-capable but is not trusted"); + + /* + * Defer rejecting the connection until the + * RequireAuthenticateOp step, in case there is an alternate + * authentication mechanism. + */ + } + + if (currentOp instanceof RequireAuthenticateOp || + authInfo == null) { + /* + * Either we've just successfully completed authentication or + * no authentication is required. + */ + return InitResult.DONE; + } + + /* Initiate the authentication step. */ + currentOp = new RequireAuthenticateOp(this, authInfo); + return currentOp.processOp(channel); + } + + void logMsg(Level level, boolean noteError, String msg) { + dispatcher.logMsg(level, noteError, msg); + } + } + + /** + * Client-side handshake state + */ + public static class ClientHandshake { + private String serviceName; + private AuthenticationMethod[] authInfo; + private IOAdapter ioAdapter; + + ClientHandshake(String serviceName, + AuthenticationMethod[] authInfo, + IOAdapter ioAdapter) { + this.authInfo = authInfo; + this.serviceName = serviceName; + this.ioAdapter = ioAdapter; + } + + /** + * Process the entire handshake sequence and report the final + * response code received from the service dispatcher. + */ + Response process() throws IOException { + final SendNameOp nameOp = new SendNameOp(this, serviceName); + final InitResult nameResult = nameOp.processOp(ioAdapter); + if (nameResult == InitResult.FAIL) { + return Response.INVALID; + } + + if (nameOp.getResponse() != Response.AUTHENTICATE) { + return nameOp.getResponse(); + } + + final DoAuthenticateOp authOp = + new DoAuthenticateOp(this, authInfo); + final InitResult authResult = authOp.processOp(ioAdapter); + if (authResult == InitResult.FAIL) { + return Response.INVALID; + } + return authOp.getResponse(); + } + } + + /** + * The base class of elemental service initialization protocol operations + * for the server side. + */ + public abstract static class ServerInitOp { + + /* The handshake to which the operation belongs */ + protected final ServerHandshake initState; + + protected ServerInitOp(ServerHandshake initState) { + this.initState = initState; + } + + /** + * Incrementally process the operation. The operation may require + * multiple passes since we are presumed to be in non-blocking mode. + * + * This function returns one of the following: + * InitResult.READ: operation is not complete; more data needed + * InitResult.FAIL: operation was rejected or otherwise failed + * the underlying data channel has been closed. + * InitResult.DONE: operation has completed + */ + protected abstract InitResult processOp(DataChannel channel) + throws IOException; + + /** + * Helper function to read until the buffer is full. + */ + protected InitResult fillBuffer(DataChannel channel, ByteBuffer buffer) + throws IOException { + + while (buffer.remaining() > 0) { + final int count = channel.read(buffer); + if (count < 0) { + /* Premature EOF */ + initState.logMsg(Level.WARNING, + true, // noteError + "Premature EOF on channel: " + channel + + ", service: " + + initState.getServiceName() + + " - read() returned: " + count); + closeChannel(channel); + return InitResult.FAIL; + } + if (count == 0) { + return InitResult.READ; + } + } + return InitResult.DONE; + } + + /** + * Helper function to write the contents of the buffer. + * We make the simplifying assumption here that we will be able + * to write as much as we want (within reason). + */ + protected InitResult sendBuffer(DataChannel channel, ByteBuffer buffer) + throws IOException { + int tryCount = 0; + while (buffer.remaining() > 0) { + final int count = channel.write(buffer); + if (count < 0) { + /* Premature EOF */ + initState.logMsg(Level.WARNING, + true, // noteError + "Premature EOF on channel: " + + channel + " write() returned: " + + count); + closeChannel(channel); + return InitResult.FAIL; + } else if (count == 0) { + tryCount++; + if (tryCount > CHANNEL_WRITE_ATTEMPT_LIMIT) { + initState.logMsg(Level.WARNING, + true, // noteError + "Failed to write to channel. " + + "Send buffer size: " + + channel.getSocketChannel().socket(). + getSendBufferSize()); + throw EnvironmentFailureException.unexpectedState( + "Failed to write to channel"); + } + } else { + tryCount = 0; + } + } + return InitResult.DONE; + } + + /** + * Closes the channel, logging any resulting exceptions. + * + * @param channel the channel being closed + */ + void closeChannel(Channel channel) { + if (channel != null) { + try { + channel.close(); + } catch (IOException e1) { + initState.logMsg( + Level.WARNING, + false, // noteError + "Exception during cleanup: " + e1.getMessage()); + } + } + } + } + + /** + * The base class of elemental service initialization protocol operations + * for the client side. The operation is assumed to be in blocking I/O + * mode. + */ + public abstract static class ClientInitOp { + + private Response response; + + protected final ClientHandshake initState; + + protected ClientInitOp(ClientHandshake initState) { + this.initState = initState; + } + + /** + * Process the operation in support of the handshake. + * This operation may consist of read and/or write operations. + * + * This function returns one of the following: + * InitResult.FAIL: operation was rejected or otherwise failed. The + * underlying communication channel needs to be closed by the + * caller. + * InitResult.DONE: operation has completed. The most recent + * Response from the server received during this operation is + * available for inspection. Note that this may return DONE + * with the response set to a failure response value (e.g. INVALID) + */ + protected abstract InitResult processOp(IOAdapter ioAdapter) + throws IOException; + + /** + * Response is set after processOp returns InitResult.DONE. + */ + Response getResponse() { + return response; + } + + protected void setResponse(Response response) { + this.response = response; + } + } + + /** + * Operations to communicate the requested service name. This is the + * initial portion of the message and if we have a connection from a + * foreign entity, this is most likely to detect an error. + */ + + /* The prefix for a service request. */ + private static final String REQUEST_PREFIX = "Service:"; + private static final byte[] REQUEST_PREFIX_BYTES = + StringUtils.toASCII(REQUEST_PREFIX); + + /** + * Server-side: expect client to provide a service name. + * Expected data format: + * Literal: "Service:" in ASCII encoding + * Length: 1 byte + * Service Name: bytes in ASCII encoding + */ + static class ReceiveNameOp extends ServerInitOp { + + /* + * The initial size is the prefix plus the byte that holds the + * length of the service name. + */ + private final int INITIAL_BUFFER_SIZE = REQUEST_PREFIX_BYTES.length+1; + + private ByteBuffer buffer; + + ReceiveNameOp(ServerHandshake initState) { + super(initState); + this.buffer = ByteBuffer.allocate(INITIAL_BUFFER_SIZE); + } + + @Override + protected InitResult processOp(DataChannel channel) throws IOException { + InitResult readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + buffer.flip(); + if (buffer.capacity() == INITIAL_BUFFER_SIZE) { + /* + * We've received exactly enough data to contain: + * "Service:" + * Make sure that it has the right format. + */ + final String prefix = StringUtils.fromASCII + (buffer.array(), 0, REQUEST_PREFIX.length()); + if (!prefix.equals(REQUEST_PREFIX)) { + initState.logMsg + (Level.WARNING, + true, // noteError + "Malformed service request: " + prefix); + channel.write(Response.FORMAT_ERROR.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + + /* Enlarge the buffer to read the service name as well */ + final int nameLength = buffer.get(INITIAL_BUFFER_SIZE-1); + if (nameLength <= 0) { + initState.logMsg + (Level.WARNING, + true, // noteError + "Bad service service name length: " + nameLength); + channel.write(Response.FORMAT_ERROR.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + final ByteBuffer newBuffer = + ByteBuffer.allocate(INITIAL_BUFFER_SIZE + nameLength); + newBuffer.put(buffer); + buffer = newBuffer; + + /* + * Recursive call to get the service name + */ + return processOp(channel); + } + + /* + * If we made it here, we have a complete service request + * message. Extract the service name from the buffer + */ + final String request = StringUtils.fromASCII(buffer.array()); + initState.setServiceName( + request.substring(REQUEST_PREFIX.length()+1)); + return InitResult.DONE; + } + } + + /** + * Client-side: Send the initial service request to the server and await + * a Response. + */ + static class SendNameOp extends ClientInitOp { + + String serviceName; + + SendNameOp(ClientHandshake initState, String serviceName) { + super(initState); + this.serviceName = serviceName; + } + + @Override + protected InitResult processOp(IOAdapter ioAdapter) throws IOException { + byte[] message = null; + try { + message = serviceRequestMessage(serviceName); + } catch (IllegalArgumentException iae) { + throw new IOException( + "Unable to encode requested service name"); + } + ioAdapter.write(message); + final byte[] responseBytes = new byte[1]; + final int result = ioAdapter.read(responseBytes); + if (result < 0) { + throw new IOException("No service response byte: " + result); + } + final Response response = Response.get(responseBytes[0]); + if (response == null) { + throw new IOException("Unexpected read response byte: " + + responseBytes[0]); + } + + setResponse(response); + return InitResult.DONE; + } + + /** + * Builds a service request suitable for sending over to a + * ServiceDispatcher. + * + * @param serviceName the service that is being requested. The + * service name must be less than SERVICE_NAME_LIMIT in size. + * + * @return the byte encoding of the service request message + */ + private static byte[] serviceRequestMessage(String serviceName) { + final byte[] serviceNameBytes = StringUtils.toASCII(serviceName); + if (serviceNameBytes.length > SERVICE_NAME_LIMIT) { + throw new IllegalArgumentException( + "The provided service name is too long: " + + serviceName); + } + + final int length = REQUEST_PREFIX_BYTES.length + 1 + + serviceNameBytes.length; + final ByteBuffer buffer = ByteBuffer.allocate(length); + buffer.put(REQUEST_PREFIX_BYTES). + put((byte)serviceNameBytes.length). + put(serviceNameBytes); + return buffer.array(); + } + } + + /* + * Top-level operations for handling service authentication. + * This class is used upon receipt of a service request which we have + * responded to with an AUTHENTICATE. The operation has two + * sub-phases: + * 1) negotiate an authentication methods, + * 2) complete the negotiated method. + */ + + /* The prefix for an authentication request. */ + private static final String AUTH_PREFIX = "Authenticate:"; + private static final byte[] AUTH_PREFIX_BYTES; + + /* The prefix for an authentication mechanism response. */ + private static final String AUTH_MECH_PREFIX = "Mechanism:"; + private static final byte[] AUTH_MECH_PREFIX_BYTES; + + static { + AUTH_PREFIX_BYTES = StringUtils.toASCII(AUTH_PREFIX); + AUTH_MECH_PREFIX_BYTES = StringUtils.toASCII(AUTH_MECH_PREFIX); + } + + /** + * Server side operation to communicate the need for authentication + * to the client, negotiate an authentication mechanism and then + * complete the authentication. + */ + static class RequireAuthenticateOp extends ServerInitOp { + + private final AuthenticationMethod[] authInfo; + private ExpectAuthRequestOp expectRequestOp; + private ServerInitOp authOp; + + RequireAuthenticateOp(ServerHandshake initState, + AuthenticationMethod[] authInfo) { + super(initState); + this.authInfo = authInfo; + } + + @Override + protected InitResult processOp(DataChannel channel) throws IOException { + + if (expectRequestOp == null) { + /* Tell the other end that they need to authenticate */ + Response response = Response.AUTHENTICATE; + InitResult writeResult = sendBuffer(channel, + response.byteBuffer()); + if (writeResult != InitResult.DONE) { + return writeResult; + } + expectRequestOp = + new ExpectAuthRequestOp(initState, authInfo); + } + + if (authOp == null) { + /* Waiting for the authentication initiation request */ + final InitResult readResult = + expectRequestOp.processOp(channel); + if (readResult != InitResult.DONE) { + return readResult; + } + + AuthenticationMethod selectedAuth = + expectRequestOp.getSelectedAuthentication(); + + if (selectedAuth == null) { + selectedAuth = new NoMatchAuthentication(); + } + authOp = selectedAuth.getServerOp(initState); + + /* Prepare the response message */ + final String authResponseStr = selectedAuth.getMechanismName() + + ":" + selectedAuth.getServerParams(); + final byte[] authResponseBytes = + StringUtils.toASCII(authResponseStr); + final int length = 1 + AUTH_MECH_PREFIX_BYTES.length + + 2 + authResponseBytes.length; + + final ByteBuffer buffer = ByteBuffer.allocate(length); + buffer.put(Response.PROCEED.byteBuffer()); + buffer.put(AUTH_MECH_PREFIX_BYTES); + putShort(buffer, (short) authResponseBytes.length); + buffer.put(authResponseBytes); + + buffer.flip(); + + /* Send the response message */ + final InitResult writeResult = sendBuffer(channel, buffer); + if (writeResult != InitResult.DONE) { + return writeResult; + } + } + + return authOp.processOp(channel); + } + } + + /** + * Client side operation to request authentication. This is done + * in response to a message from the server notifying us that authentication + * is required. The implementation of processOp() aggregates functionality + * from SendRequestOp, which tells the server what authentication we + * can provide, and then when we've agreed with the server on how to + * communicate, another ClientOp is created that provides the actual + * authentication implementation. + */ + static class DoAuthenticateOp extends ClientInitOp { + + private final AuthenticationMethod[] authInfo; + + DoAuthenticateOp(ClientHandshake initState, + AuthenticationMethod[] authInfo) { + super(initState); + this.authInfo = authInfo; + } + + @Override + protected InitResult processOp(IOAdapter ioAdapter) throws IOException { + + /* + * Tell the server that we want to authenticate ourselves, and + * negotiate with the server how that will happen. + */ + final SendRequestOp sendOp = new SendRequestOp(initState, authInfo); + final InitResult sendResult = sendOp.processOp(ioAdapter); + if (sendResult != InitResult.DONE) { + return sendResult; + } + + /* + * Now use the authOp determined by sendOp() to perform the + * actual authentication steps. + */ + final ClientInitOp authOp = sendOp.getAuthOp(); + final InitResult authResult = authOp.processOp(ioAdapter); + if (authResult == InitResult.DONE) { + setResponse(authOp.getResponse()); + } + return authResult; + } + } + + /** + * Server side Authentication request setup. + * Expect an authentication request that looks like: + * Authenticate: + * where len is a 1-byte length and mechanism list is a comma-separated + * list of available authentication mechanisms. Upon completion, + * selectedMechanism is non-null if the client supports an authentication + * mechanism in common with us. + */ + static class ExpectAuthRequestOp extends ServerInitOp { + + /* Three pieces of information that we need to get */ + final static int WAIT_FOR_REQUEST = 1; + final static int WAIT_FOR_LIST_SIZE = 2; + final static int WAIT_FOR_LIST = 3; + + private int phase; + private ByteBuffer buffer; + private AuthenticationMethod[] availableAuth; + private AuthenticationMethod selectedAuth; + + ExpectAuthRequestOp(ServerHandshake initState, + AuthenticationMethod[] authInfo) { + super(initState); + this.availableAuth = authInfo; + this.phase = WAIT_FOR_REQUEST; + this.buffer = ByteBuffer.allocate(AUTH_PREFIX_BYTES.length); + } + + /** + * Return the negotiated authentication mechanism. + * Returns null if no common authentication mechanism found. + */ + AuthenticationMethod getSelectedAuthentication() { + return selectedAuth; + } + + @Override + protected InitResult processOp(DataChannel channel) throws IOException { + InitResult readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + + if (phase == WAIT_FOR_REQUEST) { + final String prefix = StringUtils.fromASCII + (buffer.array(), 0, AUTH_PREFIX.length()); + if (!prefix.equals(AUTH_PREFIX)) { + initState.logMsg(Level.WARNING, + true, // noteError + "Malformed authentication request: " + + prefix); + sendBuffer(channel, Response.FORMAT_ERROR.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + + /* Get the length of auth mechanism list */ + buffer.clear(); + buffer.limit(1); + phase = WAIT_FOR_LIST_SIZE; + + readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + } + + if (phase == WAIT_FOR_LIST_SIZE) { + buffer.flip(); + final int mechListSize = buffer.get(); + if (mechListSize < 0) { + initState.logMsg(Level.WARNING, + true, // noteError + "Negative mechanism list size received: " + + mechListSize); + sendBuffer(channel, Response.FORMAT_ERROR.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + + /* Now get the list itself */ + buffer = ByteBuffer.allocate(mechListSize); + phase = WAIT_FOR_LIST; + + readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + } + + if (phase != WAIT_FOR_LIST) { + throw EnvironmentFailureException.unexpectedState( + "Unexpected state: + phase"); + } + + /* + * Get the mechanism list in string form and then split into + * constituent components. + */ + final String mechListStr = StringUtils.fromASCII + (buffer.array(), 0, buffer.capacity()); + final String[] mechList = mechListStr.split(","); + + /* Find the first available match */ + selectedAuth = findMatch(mechList, availableAuth); + + if (selectedAuth == null) { + /* No acceptable mechanism found */ + initState.logMsg + (Level.WARNING, + true, // noteError + "No acceptable authentication mechanism in list: " + + mechListStr); + sendBuffer(channel, Response.INVALID.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + + return InitResult.DONE; + } + } + + /** + * Client side: Send authentication request to the server. + */ + static class SendRequestOp extends ClientInitOp { + + private final AuthenticationMethod[] authInfo; + private ClientInitOp authOp; + + SendRequestOp(ClientHandshake initState, + AuthenticationMethod[] authInfo) { + super(initState); + this.authInfo = authInfo; + } + + @Override + protected InitResult processOp(IOAdapter ioAdapter) throws IOException { + + final byte[] responseByte = new byte[1]; + + /* + * Send the authenticate message to the service dispatcher, + * including reporting our available authentication mechanisms. + */ + ioAdapter.write(serviceAuthenticateMessage()); + + /* + * Wait for a response indicating whether to proceed with + * authentication + */ + int result = ioAdapter.read(responseByte); + if (result < 0) { + throw new IOException( + "No service authenticate response byte: " + result); + } + + final Response response = Response.get(responseByte[0]); + setResponse(response); + if (response == null) { + throw new IOException("Unexpected read response byte: " + + responseByte[0]); + } + if (response != Response.PROCEED) { + return InitResult.FAIL; + } + + /* + * response is PROCEED, so find out what mechanism we are + * to use. + */ + final byte[] mechPrefix = + new byte[AUTH_MECH_PREFIX_BYTES.length + 2]; + result = ioAdapter.read(mechPrefix); + if (result < 0) { + throw new IOException( + "EOF reading service authenticate response: " + result); + } + if (!arraysEqual(AUTH_MECH_PREFIX_BYTES, mechPrefix, + AUTH_MECH_PREFIX_BYTES.length)) { + throw new IOException( + "Unexpected service authenticate response: " + + encodeBytes(mechPrefix)); + } + final int mechLen = getShort(mechPrefix, + AUTH_MECH_PREFIX_BYTES.length); + if (mechLen < 0) { + throw new IOException( + "Invalid mechanism length received: " + mechLen); + } + + final byte[] mechBytes = new byte[mechLen]; + result = ioAdapter.read(mechBytes); + + if (result < 0) { + throw new IOException( + "EOF reading service authenticate mechanism: " + + result); + } + + /* + * The mechStr is a concatenation of mechanism name and + * mechanism parameters, if required, separated by a ':'. + */ + final String mechStr = + StringUtils.fromASCII(mechBytes, 0, mechBytes.length); + final String[] mechList = mechStr.split(":"); + + /* + * Get the authentication mechanism object based on the name. + */ + + AuthenticationMethod selectedAuth = + findMatch(new String[] { mechList[0] }, + authInfo); + if (selectedAuth == null) { + throw new IOException( + "Requested authentication mechanism not supported; " + + mechList[0]); + } + + /* + * Then get the client-side operation from the authentication + * mechanism object. + */ + authOp = selectedAuth.getClientOp( + initState, + mechList.length > 1 ? mechList[1] : ""); + + return InitResult.DONE; + } + + ClientInitOp getAuthOp() { + return authOp; + } + + /** + * Builds an authentication request suitable for sending over to a + * ServiceDispatcher. + * Looks like: Authenticate: + * + * @return the byte encoding of the authentication request message + */ + private byte[] serviceAuthenticateMessage() { + final byte[] mechListBytes = + StringUtils.toASCII(mechanisms(authInfo)); + final int length = + AUTH_PREFIX_BYTES.length + 1 + mechListBytes.length; + final ByteBuffer buffer = ByteBuffer.allocate(length); + buffer.put(AUTH_PREFIX_BYTES). + put((byte)mechListBytes.length). + put(mechListBytes); + return buffer.array(); + } + } + + /** + * Returns a comma-delimited list of authentication mechanism + * names from the authList argument + */ + static String mechanisms(AuthenticationMethod[] authList) { + StringBuilder mechList = new StringBuilder(); + if (authList != null) { + for (AuthenticationMethod auth : authList) { + if (mechList.length() > 0) { + mechList.append(","); + } + mechList.append(auth.getMechanismName()); + } + } + return mechList.toString(); + } + + /** + * Find the first Authentication instance whose mechanism name + * matches one of the entries in mechList. + */ + static AuthenticationMethod findMatch(String[] mechList, + AuthenticationMethod[] authList) { + /* find the first match */ + for (AuthenticationMethod auth : authList) { + for (String mech : mechList) { + if (mech.equals(auth.getMechanismName())) { + return auth; + } + } + } + return null; + } + + /** + * The base of all authentication implementations. + */ + public interface AuthenticationMethod { + String getMechanismName(); + String getServerParams(); + ServerInitOp getServerOp(ServerHandshake initState); + ClientInitOp getClientOp(ClientHandshake initState, String params); + } + + /** + * NoMatchAuthenticationOp communicates that none of the proposed + * authentication mechanisms available on the client are available on + * the dispatcher. + */ + static class NoMatchAuthentication implements AuthenticationMethod { + + /* The indicator for the no available authentication method. */ + static final String MECHANISM = "NoMatch"; + + @Override + public String getMechanismName() { + return MECHANISM; + } + + @Override + public ClientInitOp getClientOp(ClientHandshake initIgnored, + String paramsIgnored) { + /* This should never be called */ + return null; + } + + @Override + public String getServerParams() { + return ""; + } + + @Override + public ServerInitOp getServerOp(ServerHandshake initState) { + return new NoMatchAuthenticateOp(initState); + } + + static class NoMatchAuthenticateOp extends ServerInitOp { + + NoMatchAuthenticateOp(ServerHandshake initState) { + super(initState); + } + + @Override + protected InitResult processOp(DataChannel channel) throws IOException { + /* This always fails */ + sendBuffer(channel, Response.INVALID.byteBuffer()); + return InitResult.FAIL; + } + } + } + + /** + * A simple interface providing simple blocking I/O. + */ + public interface IOAdapter { + /** + * Read fully into buf + * @return the number of bytes read + */ + int read(byte[] buf) throws IOException; + + /** + * Write buf fully + * @return the number of bytes wrtten + */ + int write(byte[] buf) throws IOException; + } + + /** + * Implementation of IOAdapter based on a ByteChannel. + */ + static class ByteChannelIOAdapter implements IOAdapter { + private final ByteChannel channel; + + ByteChannelIOAdapter(ByteChannel channel) { + this.channel = channel; + } + @Override + public int read(byte[] buf) throws IOException { + return channel.read(ByteBuffer.wrap(buf)); + } + @Override + public int write(byte[] buf) throws IOException { + return channel.write(ByteBuffer.wrap(buf)); + } + } + + /** + * Implementation of IOAdapter based on a pair of streams. + */ + static class IOStreamIOAdapter implements IOAdapter { + private final DataInputStream dataInputStream; + private final OutputStream outputStream; + + IOStreamIOAdapter(InputStream input, OutputStream output) { + this.dataInputStream = new DataInputStream(input); + this.outputStream = output; + } + + @Override + public int read(byte[] buf) throws IOException { + dataInputStream.readFully(buf); + return buf.length; + } + + @Override + public int write(byte[] buf) throws IOException { + outputStream.write(buf); + outputStream.flush(); + return buf.length; + } + } + + /** + * Check whether the contents of two arrays are equal. + * + * @param array1 the first array to compare - must be non-null + * @param array2 the second array to compare - must be non-null + * @param len the number of bytes to compare - must be less than or + * equal to the length of the shorter of array1, array2 + * @return true if the first "len" bytes of the arrays are equal + */ + private static boolean arraysEqual(byte[] array1, byte[] array2, int len) { + for (int i = 0; i < len; i++) { + if (array1[i] != array2[i]) { + return false; + } + } + return true; + } + + private static String encodeBytes(byte[] bytes) { + StringBuilder sb = new StringBuilder(); + for (byte b : bytes) { + sb.append(String.format("%02X", b)); + } + return sb.toString(); + } + + /** + * Write a short to a ByteBuffer, in network byte order. + */ + private static void putShort(ByteBuffer buf, short i) { + byte b = (byte) ((i >> 8) & 0xff); + buf.put(b); + b = (byte) ((i >> 0) & 0xff); + buf.put(b); + } + + /** + * Read a short from a byte array, in network byte order. + */ + private static short getShort(byte[] buf, int off) { + return (short) (((buf[off] & 0xFF) << 8) + + ((buf[off+1] & 0xFF) << 0)); + } + +} diff --git a/src/com/sleepycat/je/rep/utilint/SimpleTxnMap.java b/src/com/sleepycat/je/rep/utilint/SimpleTxnMap.java new file mode 100644 index 0000000..58d08fe --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/SimpleTxnMap.java @@ -0,0 +1,195 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.txn.Txn; + +/** + * SimpleTxnMap provides a customized (but limited functionality) map that's + * well suited to the tracking of open transactions. Transactions are entered + * into this map when they are first created, referenced while they are alive + * via their transaction id and subsequently, removed upon commit or abort. So + * the map access pattern for each transaction looks like the sequence: + * + * put [get]* remove + * + * For JE applications, like KVS, transactions can pass through this map at the + * rate of 30K to 60K transactions/sec, so the map needs to process these + * operations efficiently. + * + * This map tries to be efficient for the put, get, remove operations by: + * + * 1) Avoiding any memory allocation for the typical: put, get, remove + * sequence. In contrast, a heap entry uses 24 bytes for each entry plus 16 + * bytes for the long object argument when using compressed oops. It could be + * that the heap storage could be replaced by stack storage for the long object + * argument since it's a downward lexical funarg, but I don't know if the jvm + * does such analysis. + * + * 2) Having a very short instruction code path for the typical case. + * + * The data structure used here is very simple, and consists of two maps. + * + * 1) An array based map indexed by the low bits of the transaction id. + * + * 2) A regular java Map + * + * The array based map is the preferred location for map entries. If the slot + * associated with the transaction id is occupied, we fall back to the the java + * Map. + * + * So the best case behavior is as if the map were implemented entirely as an + * array and the worst case is that we will do an extra integer mask, array + * index and compare operation before we resort to using the java Map. Given + * the behavior of transactions, we expect that the vast majority of the + * operations will be implemented by the array map. + * + * This class provides a minimal subset of the operations provided by Map. All + * methods are synchronized. This works well for replica replay in conjunction + * with a jvm's thread biased locking strategy, but we may need explicit locks + * for other usage. + * + * @param the type of Txn object stored as values in the map + */ +public class SimpleTxnMap { + + /* The low order bit mask used to mask the transaction Id */ + private final int cacheMask; + + /* + * The preferred array map. + * + * Invariant: The txn with a given id can be in exactly one of the maps, + * never in both. + * + */ + private final Txn arrayMap[]; + + /* The number of entries in just the array map. */ + private int arrayMapSize = 0; + + /* The backup map. */ + private final HashMap backupMap = new HashMap<>(); + + public SimpleTxnMap(int arrayMapSize) { + if (Integer.bitCount(arrayMapSize) != 1) { + throw new IllegalArgumentException("argument:" + arrayMapSize + + " must be a power of two"); + } + arrayMap = new Txn[arrayMapSize]; + cacheMask = arrayMapSize - 1; + } + + /** + * Adds a txn to the map. Note that the "put" operation in keeping with + * transaction behavior does not expect to be called while a txn with that + * ID is already in the map. + */ + public synchronized void put(T txn) { + assert get(txn.getId()) == null; + final long txnId = txn.getId(); + int i = (int)txn.getId() & cacheMask; + final Txn cachedTxn = arrayMap[i]; + if (cachedTxn == null) { + /* Free slot use it. */ + arrayMap[i] = txn; + arrayMapSize++; + return; + } + + /* Array slot occupied by a transaction, fall back to the map. */ + backupMap.put(txnId, txn); + } + + synchronized public T get(long txnId) { + @SuppressWarnings("unchecked") + T cachedTxn = (T)arrayMap[(int)txnId & cacheMask]; + if ((cachedTxn != null) && (cachedTxn.getId() == txnId)) { + assert ! backupMap.containsKey(txnId); + return cachedTxn; + } + return backupMap.get(txnId); + } + + /** + * Removes the txn with that key, if it exists. + * + * @return the Txn that was removed, or empty if it did not exist. + */ + synchronized public T remove(long txnId) { + final int i = (int)txnId & cacheMask; + @SuppressWarnings("unchecked") + T cachedTxn = (T)arrayMap[i]; + if ((cachedTxn != null) && (cachedTxn.getId() == txnId)) { + arrayMap[i] = null; + arrayMapSize--; + assert ! backupMap.containsKey(txnId); + return cachedTxn; + } + + /* + * Array slot empty, or occupied by a different transaction, + * check backup. + */ + return backupMap.remove(txnId); + } + + public synchronized int size() { + return backupMap.size() + arrayMapSize; + } + + public synchronized boolean isEmpty() { + return size() == 0; + } + + /** + * The methods below are not used in critical paths and are not optimized. + * They they are O(n) complexity. We can revisit with change in usage. + */ + + synchronized public void clear() { + backupMap.clear(); + Arrays.fill(arrayMap, null); + arrayMapSize = 0; + } + + /** + * Returns a new map containing the current snapshot of transactions in + * this map. + */ + synchronized public Map getMap() { + final Map map = new HashMap(backupMap); + for (Object element : arrayMap) { + @SuppressWarnings("unchecked") + final T txn = (T)element; + if (txn != null) { + T old = map.put(txn.getId(), txn); + assert old == null; + } + } + + return map; + } + + /** + * For test use only + */ + public HashMap getBackupMap() { + return backupMap; + } +} diff --git a/src/com/sleepycat/je/rep/utilint/SizeAwaitMap.java b/src/com/sleepycat/je/rep/utilint/SizeAwaitMap.java new file mode 100644 index 0000000..3a07c11 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/SizeAwaitMap.java @@ -0,0 +1,279 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.utilint.SizeAwaitMapStatDefinition.N_NO_WAITS; +import static com.sleepycat.je.rep.utilint.SizeAwaitMapStatDefinition.N_REAL_WAITS; +import static com.sleepycat.je.rep.utilint.SizeAwaitMapStatDefinition.N_WAIT_TIME; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.utilint.RepUtils.ExceptionAwareCountDownLatch; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Creates a Map that Threads can conveniently wait on to contain a specific + * number of entries, where the values optionally match a predicate. The wait + * functionality is provided by the sizeAwait() method defined by this + * class. Map values must not be null. + */ +public class SizeAwaitMap implements Map { + + /* The environment to use for exception reporting. */ + private final EnvironmentImpl envImpl; + + /* + * The predicate to apply to the value when counting entries or null to + * match all entries. + */ + private final Predicate predicate; + + /* + * The latch map. There is a latch for each threshold of interest to a + * thread. + */ + private final HashMap + thresholdLatches; + + /* The underlying map of interest to threads. */ + private final Map map = new HashMap(); + + /* + * The number of entries with values matching the predicate, or the total + * number of entries if the predicate is null. + */ + private int count = 0; + + private final StatGroup stats; + private final LongStat nNoWaits; + private final LongStat nRealWaits; + private final LongStat nWaitTime; + + /** + * Creates an instance of this class. + * + * @param envImpl the environment, used for exception handling + * @param predicate the predicate for counting matching entries, or + * {@code null} to match all entries + */ + public SizeAwaitMap(final EnvironmentImpl envImpl, + final Predicate predicate) { + this.envImpl = envImpl; + this.predicate = predicate; + thresholdLatches = + new HashMap(); + stats = new StatGroup(SizeAwaitMapStatDefinition.GROUP_NAME, + SizeAwaitMapStatDefinition.GROUP_DESC); + nNoWaits = new LongStat(stats, N_NO_WAITS); + nRealWaits = new LongStat(stats, N_REAL_WAITS); + nWaitTime = new LongStat(stats, N_WAIT_TIME); + } + + public StatGroup getStatistics() { + return stats; + } + + /** + * Causes the requesting thread to wait until the map reaches the specified + * size or the thread is interrupted. + * + * @param thresholdSize the size to wait for. + * + * @return true if the threshold was reached, false, if the wait timed out. + * + * @throws InterruptedException for the usual reasons, or if the map + * was cleared and the size threshold was not actually reached. + * + */ + public boolean sizeAwait(int thresholdSize, + long timeout, + TimeUnit unit) + throws InterruptedException { + + assert(thresholdSize >= 0); + ExceptionAwareCountDownLatch l = null; + synchronized (this) { + if (thresholdSize <= count) { + nNoWaits.increment(); + return true; + } + l = thresholdLatches.get(thresholdSize); + if (l == null) { + l = new ExceptionAwareCountDownLatch(envImpl, 1); + thresholdLatches.put(thresholdSize, l); + } + } + nRealWaits.increment(); + long startTime = System.currentTimeMillis(); + try { + return l.awaitOrException(timeout, unit); + } finally { + nWaitTime.add((System.currentTimeMillis() - startTime)); + } + } + + /** + * Used for unit tests only + * @return + */ + synchronized int latchCount() { + return thresholdLatches.size(); + } + + /** + * Notes the addition of a new value and counts down any latches that were + * assigned to that threshold. + */ + @Override + public synchronized V put(final K key, final V value) { + if (value == null) { + throw new IllegalArgumentException("Value must not be null"); + } + int countDelta = checkPredicate(value) ? 1 : 0; + final V oldValue = map.put(key, value); + if ((oldValue != null) && checkPredicate(oldValue)) { + countDelta--; + } + count += countDelta; + if (countDelta > 0) { + /* Incremented count */ + final CountDownLatch l = thresholdLatches.remove(count); + if (l != null) { + l.countDown(); + } + } + return oldValue; + } + + /** Checks if the value matches the predicate. */ + private boolean checkPredicate(final V value) { + return (predicate == null) || predicate.match(value); + } + + @Override + public synchronized V remove(Object key) { + final V oldValue = map.remove(key); + if ((oldValue != null) && checkPredicate(oldValue)) { + count--; + } + return oldValue; + } + + /** + * @deprecated Use {@link #clear(Exception)} instead. + */ + @Deprecated + @Override + public void clear() throws UnsupportedOperationException { + throw new UnsupportedOperationException(); + } + + /** + * Clears the underlying map and the latch map, after first counting them + * down, thus permitting any waiting threads to make progress. + * + * @cause the value is non-null if the map is being cleared in response to + * an exception and results in the exception being thrown in the waiting + * threads. It's null if the map is being cleared as part of a normal + * shutdown, in which case no exception is thrown. + */ + public synchronized void clear(Exception cause) { + for (ExceptionAwareCountDownLatch l : thresholdLatches.values()) { + l.releaseAwait(cause); + } + thresholdLatches.clear(); + map.clear(); + count = 0; + } + + /* The remaining methods below merely forward to the underlying map. */ + + @Override + public synchronized boolean containsKey(Object key) { + return map.containsKey(key); + } + + @Override + public synchronized boolean containsValue(Object value) { + return map.containsKey(value); + } + + /** + * The caller should synchronize on the map while accessing the return + * value. + */ + @Override + public synchronized Set> entrySet() { + return map.entrySet(); + } + + @Override + public synchronized V get(Object key) { + return map.get(key); + } + + @Override + public synchronized boolean isEmpty() { + return map.isEmpty(); + } + + /** + * The caller should synchronize on the map while accessing the return + * value. + */ + @Override + public synchronized Set keySet() { + return map.keySet(); + } + + @Override + public void putAll(Map t) { + throw EnvironmentFailureException.unexpectedState + ("putAll not supported"); + } + + @Override + public synchronized int size() { + return map.size(); + } + + /** + * The caller should synchronize on the map while accessing the return + * value. + */ + @Override + public synchronized Collection values() { + return map.values(); + } + + /** + * Specifies which values should be counted. + */ + public interface Predicate { + + /** + * Whether an entry with this value should included in the count of + * entries being waited for. + */ + boolean match(V value); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/SizeAwaitMapStatDefinition.java b/src/com/sleepycat/je/rep/utilint/SizeAwaitMapStatDefinition.java new file mode 100644 index 0000000..1cc4576 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/SizeAwaitMapStatDefinition.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Per-stat Metadata for each SizeAwaitMap statistics. + */ +public class SizeAwaitMapStatDefinition { + + public static final String GROUP_NAME = "SizeAwaitMap"; + public static final String GROUP_DESC = "SizeAwaitMap statistics"; + + public static StatDefinition N_NO_WAITS = + new StatDefinition + ("nNoWaits", + "Number of times the map size requirement was met, and the thread " + + "did not need to wait."); + + public static StatDefinition N_REAL_WAITS = + new StatDefinition + ("nRealWaits", + "Number of times the map size was less than the required size, and " + + "the thread had to wait to reach the map size."); + + public static StatDefinition N_WAIT_TIME = + new StatDefinition + ("nWaitTime", + "Totla time (in ms) spent waiting for the map to reach the " + + "required size."); +} diff --git a/src/com/sleepycat/je/rep/utilint/StatCaptureRepDefinitions.java b/src/com/sleepycat/je/rep/utilint/StatCaptureRepDefinitions.java new file mode 100644 index 0000000..43cfb1f --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/StatCaptureRepDefinitions.java @@ -0,0 +1,187 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.SortedSet; +import java.util.TreeSet; + +import com.sleepycat.je.rep.impl.node.FeederManagerStatDefinition; +import com.sleepycat.je.rep.impl.node.ReplayStatDefinition; +import com.sleepycat.je.rep.impl.node.ReplicaStatDefinition; +import com.sleepycat.je.rep.stream.FeederTxnStatDefinition; +import com.sleepycat.je.rep.vlsn.VLSNIndexStatDefinition; +import com.sleepycat.je.statcap.StatCaptureDefinitions; +import com.sleepycat.je.statcap.StatManager; +import com.sleepycat.je.utilint.StatDefinition; + +public class StatCaptureRepDefinitions extends StatCaptureDefinitions { + + private static StatDefinition[] feederStats = { + FeederManagerStatDefinition.N_FEEDERS_CREATED, + FeederManagerStatDefinition.N_FEEDERS_SHUTDOWN, + FeederManagerStatDefinition.N_MAX_REPLICA_LAG, + FeederManagerStatDefinition.N_MAX_REPLICA_LAG_NAME, + FeederManagerStatDefinition.REPLICA_DELAY_MAP, + FeederManagerStatDefinition.REPLICA_LAST_COMMIT_TIMESTAMP_MAP, + FeederManagerStatDefinition.REPLICA_LAST_COMMIT_VLSN_MAP, + FeederManagerStatDefinition.REPLICA_VLSN_LAG_MAP, + FeederManagerStatDefinition.REPLICA_VLSN_RATE_MAP + }; + + private static StatDefinition[] replayStats = { + ReplayStatDefinition.N_COMMITS, + ReplayStatDefinition.N_COMMIT_ACKS, + ReplayStatDefinition.N_COMMIT_SYNCS, + ReplayStatDefinition.N_COMMIT_NO_SYNCS, + ReplayStatDefinition.N_COMMIT_WRITE_NO_SYNCS, + ReplayStatDefinition.N_ABORTS, + ReplayStatDefinition.N_LNS, + ReplayStatDefinition.N_NAME_LNS, + ReplayStatDefinition.N_ELAPSED_TXN_TIME, + ReplayStatDefinition.N_MESSAGE_QUEUE_OVERFLOWS, + ReplayStatDefinition.MIN_COMMIT_PROCESSING_NANOS, + ReplayStatDefinition.MAX_COMMIT_PROCESSING_NANOS, + ReplayStatDefinition.TOTAL_COMMIT_PROCESSING_NANOS, + ReplayStatDefinition.TOTAL_COMMIT_LAG_MS, + ReplayStatDefinition.LATEST_COMMIT_LAG_MS, + ReplayStatDefinition.N_GROUP_COMMIT_TIMEOUTS, + ReplayStatDefinition.N_GROUP_COMMIT_MAX_EXCEEDED, + ReplayStatDefinition.N_GROUP_COMMIT_TXNS, + ReplayStatDefinition.N_GROUP_COMMITS + }; + + private static StatDefinition[] replicaStats = { + ReplicaStatDefinition.N_LAG_CONSISTENCY_WAITS, + ReplicaStatDefinition.N_LAG_CONSISTENCY_WAIT_MS, + ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAITS, + ReplicaStatDefinition.N_VLSN_CONSISTENCY_WAIT_MS + }; + + private static StatDefinition[] feedertxnStats = { + FeederTxnStatDefinition.TXNS_ACKED, + FeederTxnStatDefinition.TXNS_NOT_ACKED, + FeederTxnStatDefinition.TOTAL_TXN_MS, + FeederTxnStatDefinition.ACK_WAIT_MS, + FeederTxnStatDefinition.LAST_COMMIT_VLSN, + FeederTxnStatDefinition.LAST_COMMIT_TIMESTAMP, + FeederTxnStatDefinition.VLSN_RATE + }; + + private static StatDefinition[] binaryProtocolStats = { + BinaryProtocolStatDefinition.N_READ_NANOS, + BinaryProtocolStatDefinition.N_WRITE_NANOS, + BinaryProtocolStatDefinition.N_BYTES_READ, + BinaryProtocolStatDefinition.N_MESSAGES_READ, + BinaryProtocolStatDefinition.N_BYTES_WRITTEN, + BinaryProtocolStatDefinition.N_MESSAGE_BATCHES, + BinaryProtocolStatDefinition.N_MESSAGES_BATCHED, + BinaryProtocolStatDefinition.N_MESSAGES_WRITTEN, + BinaryProtocolStatDefinition.MESSAGE_READ_RATE, + BinaryProtocolStatDefinition.MESSAGE_WRITE_RATE, + BinaryProtocolStatDefinition.BYTES_READ_RATE, + BinaryProtocolStatDefinition.BYTES_WRITE_RATE, + BinaryProtocolStatDefinition.N_ACK_MESSAGES, + BinaryProtocolStatDefinition.N_GROUP_ACK_MESSAGES, + BinaryProtocolStatDefinition.N_MAX_GROUPED_ACKS, + BinaryProtocolStatDefinition.N_GROUPED_ACKS, + BinaryProtocolStatDefinition.N_ENTRIES_WRITTEN_OLD_VERSION + }; + + private static StatDefinition[] vlsnIndexStats = { + VLSNIndexStatDefinition.N_HITS, + VLSNIndexStatDefinition.N_MISSES, + VLSNIndexStatDefinition.N_HEAD_BUCKETS_DELETED, + VLSNIndexStatDefinition.N_TAIL_BUCKETS_DELETED, + VLSNIndexStatDefinition.N_BUCKETS_CREATED + }; + + /* + * Define min/max stats using the group name returned by loadStats not + * necessarily what is defined in the underlying statistic. Some groups are + * combined into a super group. + */ + public static StatManager.SDef[] minStats = { + new StatManager.SDef(ReplayStatDefinition.GROUP_NAME, + ReplayStatDefinition.MIN_COMMIT_PROCESSING_NANOS) + }; + + public static StatManager.SDef[] maxStats = { + new StatManager.SDef(FeederManagerStatDefinition.GROUP_NAME, + FeederManagerStatDefinition.N_MAX_REPLICA_LAG), + new StatManager.SDef(ReplayStatDefinition.GROUP_NAME, + ReplayStatDefinition.MAX_COMMIT_PROCESSING_NANOS), + new StatManager.SDef(BinaryProtocolStatDefinition.GROUP_NAME, + BinaryProtocolStatDefinition.N_MAX_GROUPED_ACKS) + }; + + public StatCaptureRepDefinitions() { + super(); + String groupname = FeederManagerStatDefinition.GROUP_NAME; + for (StatDefinition stat : feederStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = FeederTxnStatDefinition.GROUP_NAME; + for (StatDefinition stat : feedertxnStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = ReplayStatDefinition.GROUP_NAME; + for (StatDefinition stat : replayStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = ReplicaStatDefinition.GROUP_NAME; + for (StatDefinition stat : replicaStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = BinaryProtocolStatDefinition.GROUP_NAME; + for (StatDefinition stat : binaryProtocolStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = VLSNIndexStatDefinition.GROUP_NAME; + for (StatDefinition stat : vlsnIndexStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + } + + @Override + public SortedSet getStatisticProjections() { + SortedSet retval = new TreeSet(); + super.getProjectionsInternal(retval); + + String groupname = FeederManagerStatDefinition.GROUP_NAME; + for (StatDefinition stat : feederStats) { + retval.add(groupname + ":" + stat.getName()); + } + groupname = FeederTxnStatDefinition.GROUP_NAME; + for (StatDefinition stat : feedertxnStats) { + retval.add(groupname + ":" + stat.getName()); + } + groupname = ReplayStatDefinition.GROUP_NAME; + for (StatDefinition stat : replayStats) { + retval.add(groupname + ":" + stat.getName()); + } + groupname = ReplicaStatDefinition.GROUP_NAME; + for (StatDefinition stat : replicaStats) { + retval.add(groupname + ":" + stat.getName()); + } + groupname = BinaryProtocolStatDefinition.GROUP_NAME; + for (StatDefinition stat : binaryProtocolStats) { + retval.add(groupname + ":" + stat.getName()); + } + groupname = VLSNIndexStatDefinition.GROUP_NAME; + for (StatDefinition stat : vlsnIndexStats) { + retval.add(groupname + ":" + stat.getName()); + } + return retval; + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java b/src/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java new file mode 100644 index 0000000..da4c0e2 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import com.sleepycat.je.rep.net.DataChannel; +import java.nio.channels.SocketChannel; + +/** + * An abstract class that utilizes a delegate socketChannel for network + * I/O, but which provides an abstract ByteChannel interface for callers. + * This allows more interesting communication mechanisms to be introduced. + */ +abstract public class AbstractDataChannel implements DataChannel { + + /** + * The underlying socket channel + */ + protected final SocketChannel socketChannel; + + /** + * Constructor for sub-classes. + * @param socketChannel The underlying SocketChannel over which data will + * be sent. This should be the lowest-level socket so that select + * operations can be performed on it. + */ + protected AbstractDataChannel(SocketChannel socketChannel) { + this.socketChannel = socketChannel; + } + + /** + * Accessor for the underlying SocketChannel + * Callers may used the returned SocketChannel in order to query/modify + * connections attributes, but may not directly close, read from or write + * to the SocketChannel. + */ + @Override + public SocketChannel getSocketChannel() { + return socketChannel; + } +} + diff --git a/src/com/sleepycat/je/rep/utilint/net/AliasKeyManager.java b/src/com/sleepycat/je/rep/utilint/net/AliasKeyManager.java new file mode 100644 index 0000000..93da982 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/AliasKeyManager.java @@ -0,0 +1,112 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import java.security.PrivateKey; +import java.security.Principal; +import java.security.cert.X509Certificate; +import java.net.Socket; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.X509ExtendedKeyManager; + +/** + * An implementation of X509ExtendedKeyManager which delegates most operations + * to an underlying implementation, but which supports explicit selection of + * alias. + */ +public class AliasKeyManager extends X509ExtendedKeyManager { + + private final X509ExtendedKeyManager delegateKeyManager; + private final String serverAlias; + private final String clientAlias; + + /** + * Constructor. + * @param delegateKeyManager the underlying key manager to fulfill key + * retrieval requests + * @param serverAlias the alias to return for server context requests + * @param clientAlias the alias to return for client context requests + */ + public AliasKeyManager(X509ExtendedKeyManager delegateKeyManager, + String serverAlias, + String clientAlias) { + this.delegateKeyManager = delegateKeyManager; + this.serverAlias = serverAlias; + this.clientAlias = clientAlias; + } + + @Override + public String[] getClientAliases(String keyType, Principal[] issuers) { + return delegateKeyManager.getClientAliases(keyType, issuers); + } + + @Override + public String chooseClientAlias( + String[] keyType, Principal[] issuers, Socket socket) { + if (clientAlias != null) { + return clientAlias; + } + + return delegateKeyManager.chooseClientAlias(keyType, issuers, socket); + } + + @Override + public String[] getServerAliases(String keyType, Principal[] issuers) { + return delegateKeyManager.getServerAliases(keyType, issuers); + } + + @Override + public String chooseServerAlias( + String keyType, Principal[] issuers, Socket socket) { + + if (serverAlias != null) { + return serverAlias; + } + + return delegateKeyManager.chooseServerAlias(keyType, issuers, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String alias) { + return delegateKeyManager.getCertificateChain(alias); + } + + @Override + public PrivateKey getPrivateKey(String alias) { + return delegateKeyManager.getPrivateKey(alias); + } + + @Override + public String chooseEngineClientAlias(String[] keyType, + Principal[] issuers, + SSLEngine engine) { + if (clientAlias != null) { + return clientAlias; + } + return delegateKeyManager. + chooseEngineClientAlias(keyType, issuers, engine); + } + + @Override + public String chooseEngineServerAlias(String keyType, + Principal[] issuers, + SSLEngine engine) { + if (serverAlias != null) { + return serverAlias; + } + return delegateKeyManager. + chooseEngineServerAlias(keyType, issuers, engine); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/DataChannelFactoryBuilder.java b/src/com/sleepycat/je/rep/utilint/net/DataChannelFactoryBuilder.java new file mode 100644 index 0000000..6dc67b2 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/DataChannelFactoryBuilder.java @@ -0,0 +1,428 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.InstanceContext; +import com.sleepycat.je.rep.net.InstanceLogger; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.net.LoggerFactory; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TracerFormatter; + +/** + * Class for creating DataChannel instances. + */ +public class DataChannelFactoryBuilder { + + /** + * A count of the number of factories for which construction was attempted. + */ + private static final AtomicInteger factoryCount = new AtomicInteger(0); + + /** + * Construct the "default" DataChannelFactory that arises from an empty + * DataChannelFactory configuration. + */ + public static DataChannelFactory constructDefault() { + return new SimpleChannelFactory(); + } + + /** + * Construct a DataChannelFactory from the specified network + * configuration. + * The choice of DataChannelFactory type is determined by the setting + * of {@link ReplicationNetworkConfig#CHANNEL_TYPE je.rep.channelType}. + * + * If set to sslthen the internal SSL implementation is + * is used. If set to custom then a custom channel + * factory is constructed based on the setting of + * {@link ReplicationNetworkConfig#CHANNEL_FACTORY_CLASS je.rep.dataChannelFactoryClass} + * + * If set to basic or not set, SimpleChannelFactory + * is instantiated. + * + * @param repNetConfig The configuration to control factory building + * @return a DataChannelFactory + * @throws IllegalArgumentException if an invalid configuration + * property value or combination of values was specified. + */ + public static DataChannelFactory construct( + ReplicationNetworkConfig repNetConfig) + throws IllegalArgumentException { + + return construct(repNetConfig, (String) null); + } + + /** + * Construct a DataChannelFactory from the specified access + * configuration. + * The choice of DataChannelFactory type is determined by the setting + * of {@link ReplicationNetworkConfig#CHANNEL_TYPE je.rep.channelType}. + * + * If set to sslthen the internal SSL implementation is + * is used. If set to custom then a custom channel + * factory is constructed based on the setting of + * {@link ReplicationNetworkConfig#CHANNEL_FACTORY_CLASS je.rep.dataChannelFactoryClass} + * + * If set to basic or not set, SimpleChannelFactory + * is instantiated. + * + * @param repNetConfig The configuration to control factory building + * @param logContext A null-allowable String that contributes to the + * logging identifier for the factory. + * @return a DataChannelFactory + * @throws IllegalArgumentException if an invalid configuration + * property value or combination of values was specified. + */ + public static DataChannelFactory construct( + ReplicationNetworkConfig repNetConfig, String logContext) + throws IllegalArgumentException { + + final String logName = repNetConfig.getLogName(); + if (logName.isEmpty() && (logContext == null || logContext.isEmpty())) { + return construct(repNetConfig, (LoggerFactory) null); + } + + final String logId; + if (logName.isEmpty()) { + logId = logContext; + } else if (logContext == null || logContext.isEmpty()) { + logId = logName; + } else { + logId = logName + ":" + logContext; + } + final LoggerFactory loggerFactory = makeLoggerFactory(logId); + + return construct(repNetConfig, loggerFactory); + } + + /** + * Construct a DataChannelFactory from the specified access + * configuration. + * The choice of DataChannelFactory type is determined by the setting + * of {@link ReplicationNetworkConfig#CHANNEL_TYPE je.rep.channelType}. + * + * If set to sslthen the internal SSL implementation is + * is used. If set to custom then a custom channel + * factory is constructed based on the setting of + * {@link ReplicationNetworkConfig#CHANNEL_FACTORY_CLASS je.rep.dataChannelFactoryClass} + * + * If set to basic or not set, SimpleChannelFactory + * is instantiated. + * + * @param repNetConfig The configuration to control factory building + * @param loggerFactory A null-allowable LoggerFactory for use in channel + * factory construction + * @return a DataChannelFactory + * @throws IllegalArgumentException if an invalid configuration + * property value or combination of values was specified. + */ + public static DataChannelFactory construct( + ReplicationNetworkConfig repNetConfig, + LoggerFactory loggerFactory) + throws IllegalArgumentException { + + final String channelType = repNetConfig.getChannelType(); + final int factoryIndex = factoryCount.getAndIncrement(); + + /* + * Build the LoggerFactory if not provided by the caller + */ + if (loggerFactory == null) { + String logName = repNetConfig.getLogName(); + if (logName.isEmpty()) { + logName = Integer.toString(factoryIndex); + } + loggerFactory = makeLoggerFactory(logName); + } + + final InstanceContext context = + new InstanceContext(repNetConfig, loggerFactory); + + final String factoryClass = repNetConfig.getChannelFactoryClass(); + if (factoryClass == null || factoryClass.isEmpty()) { + if (channelType.equalsIgnoreCase("basic")) { + return new SimpleChannelFactory( + new InstanceParams(context, null)); + } + + if (channelType.equalsIgnoreCase("ssl")) { + return new SSLChannelFactory(new InstanceParams(context, null)); + } + + throw new IllegalArgumentException( + "The channelType setting '" + channelType + "' is not valid"); + } + + final String classParams = repNetConfig.getChannelFactoryParams(); + final InstanceParams factoryParams = + new InstanceParams(context, classParams); + return construct(factoryClass, factoryParams); + } + + /** + * Constructs a DataChannelFactory implementation. + * @param factoryClassName the name of the class to instantiate, + * which must implement DataChannelFactory + * @param factoryParams the context and factory arguments + * @return a newly constructed instance + * @throws IllegalArgumentException if the arguments are invalid + */ + private static DataChannelFactory construct( + String factoryClassName, InstanceParams factoryParams) + throws IllegalArgumentException { + + return (DataChannelFactory) constructObject( + factoryClassName, DataChannelFactory.class, + "data channel factory", + new CtorArgSpec(new Class[] { InstanceParams.class }, + new Object[] { factoryParams })); + } + + /** + * Instantiates a class based on a configuration specification. This method + * looks up a class of the specified name, then finds a constructor with + * an argument list that matches the caller's specification, and constructs + * an instance using that constructor and validates that the instance + * extends or implements the mustImplement class specified. + * + * @param instClassName the name of the class to instantiate + * @param mustImplement a class denoting a required base class or + * required implemented interface of the class whose name is + * specified by instClassName. + * @param miDesc a descriptive term for the mustImplement class + * @param ctorArgSpec specifies the required constructor signature and + * the values to be passed + * @return an instance of the specified class + * @throws IllegalArgumentException if any of the input arguments are + * invalid + */ + static Object constructObject(String instClassName, + Class mustImplement, + String miDesc, + CtorArgSpec ctorArgSpec) + throws IllegalArgumentException { + + /* + * Resolve the class + */ + Class instClass = null; + try { + instClass = Class.forName(instClassName); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException( + "Error resolving " + miDesc + " class " + + instClassName, cnfe); + } + + /* + * Find an appropriate constructor for the class. + */ + final Constructor constructor; + try { + constructor = instClass.getConstructor(ctorArgSpec.argTypes); + } catch (NoSuchMethodException nsme) { + throw new IllegalArgumentException( + "Unable to find an appropriate constructor for " + miDesc + + " class " + instClassName); + } + + /* + * Get an instance of the class. + */ + final Object instObject; + try { + instObject = constructor.newInstance(ctorArgSpec.argValues); + } catch (IllegalAccessException iae) { + /* Constructor is not accessible */ + throw new IllegalArgumentException( + "Error instantiating " + miDesc + " class " + instClassName + + ". Not accessible?", + iae); + } catch (IllegalArgumentException iae) { + /* Wrong arguments - should not be possible here */ + throw new IllegalArgumentException( + "Error instantiating " + miDesc + " class " + instClassName, + iae); + } catch (InstantiationException ie) { + /* Class is abstract */ + throw new IllegalArgumentException( + "Error instantiating " + miDesc + " class " + instClassName + + ". Class is abstract?", + ie); + } catch (InvocationTargetException ite) { + /* Exception thrown within constructor */ + throw new IllegalArgumentException( + "Error instantiating " + miDesc + " class " + instClassName + + ". Exception within constructor", + ite); + } + + /* + * In this context, the class must implement the specified + * interface. + */ + if (! (mustImplement.isAssignableFrom(instObject.getClass()))) { + throw new IllegalArgumentException( + "The " + miDesc + " class " + instClassName + + " does not implement " + mustImplement.getName()); + } + + return instObject; + } + + /** + * Creates a logger factory based on an EnvironmentImpl + * + * @param envImpl a non-null EnvironmentImpl + */ + public static LoggerFactory makeLoggerFactory(EnvironmentImpl envImpl) { + if (envImpl == null) { + throw new IllegalArgumentException("envImpl must not be null"); + } + + return new ChannelLoggerFactory(envImpl, null /* formatter */); + } + + /** + * Creates a logger factory based on a fixed string + * + * @param prefix a fixed string to be used as logger prefix + */ + public static LoggerFactory makeLoggerFactory(String prefix) { + if (prefix == null) { + throw new IllegalArgumentException("prefix must not be null"); + } + + final Formatter formatter = new ChannelFormatter(prefix); + + return new ChannelLoggerFactory(null, /* envImpl */ formatter); + } + + + /** + * A simple class that captures the proposed formal and actual argument + * lists to match against possible constructors. + */ + static class CtorArgSpec { + private final Class[] argTypes; + private final Object[] argValues; + + CtorArgSpec(Class[] argTypes, Object[] argValues) { + this.argTypes = argTypes; + this.argValues = argValues; + } + } + + /** + * A simple implementation of LoggerFactory that encapsulates the + * necessary information to do JE environment-friendly logging without + * needing to know JE HA internal logging. + */ + static class ChannelLoggerFactory implements LoggerFactory { + private final EnvironmentImpl envImpl; + private final Formatter formatter; + + /** + * Creates a LoggerFactory for use in construction of channel + * objects. The caller should supply either an EnvironmentImpl or a + * Formatter object. + * + * @param envImpl a possibly-null EnvironmentImpl + * @param formatter a possible null formatter + */ + ChannelLoggerFactory(EnvironmentImpl envImpl, + Formatter formatter) { + this.envImpl = envImpl; + this.formatter = formatter; + } + + /** + * @see LoggerFactory#getLogger(Class) + */ + @Override + public InstanceLogger getLogger(Class clazz) { + final Logger logger; + if (envImpl == null) { + logger = LoggerUtils.getLoggerFormatterNeeded(clazz); + } else { + logger = LoggerUtils.getLogger(clazz); + } + return new ChannelInstanceLogger(envImpl, formatter, logger); + } + } + + /** + * A simple implementation of InstanceLogger that encapuslates the + * necessary information to do JE environment-friendly logging without + * needing to know JE logging rules. + */ + static class ChannelInstanceLogger implements InstanceLogger { + private final EnvironmentImpl envImpl; + private final Formatter formatter; + private final Logger logger; + + /** + * Creates a ChannelInstanceLogger for use in construction of channel + * objects. The caller should supply either an EnvironmentImpl or a + * Formatter object. + * + * @param envImpl a possibly-null EnvironmentImpl + * @param formatter a possible null formatter + * @param logger a logger created via LoggerUtils.getLogger() + */ + ChannelInstanceLogger(EnvironmentImpl envImpl, + Formatter formatter, + Logger logger) { + this.envImpl = envImpl; + this.formatter = formatter; + this.logger = logger; + } + + /** + * @see InstanceLogger#log(Level, String) + */ + @Override + public void log(Level logLevel, String msg) { + LoggerUtils.logMsg(logger, envImpl, formatter, logLevel, msg); + } + } + + /** + * Formatter for log messages + */ + static class ChannelFormatter extends TracerFormatter { + private final String id; + + ChannelFormatter(String id) { + super(); + this.id = id; + } + + @Override + protected void appendEnvironmentName(StringBuilder sb) { + sb.append(" [" + id + "]"); + } + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLChannelFactory.java b/src/com/sleepycat/je/rep/utilint/net/SSLChannelFactory.java new file mode 100644 index 0000000..6848820 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLChannelFactory.java @@ -0,0 +1,976 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.ArrayList; +import java.util.Arrays; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509ExtendedKeyManager; + +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.InstanceContext; +import com.sleepycat.je.rep.net.InstanceLogger; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.net.SSLAuthenticator; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder.CtorArgSpec; + +/** + * A factory class for generating SSLDataChannel instances based on + * SocketChannel instances. + */ +public class SSLChannelFactory implements DataChannelFactory { + + /* + * Protocol to use in call to SSLContext.getInstance. This isn't a + * protocol per-se. Actual protocol selection is chosen at the time + * a connection is established based on enabled protocol settings for + * both client and server. + */ + private static final String SSL_CONTEXT_PROTOCOL = "TLS"; + + /** + * A system property to allow users to specify the correct X509 certificate + * algorithm name based on the JVMs they are using. + */ + private static final String X509_ALGO_NAME_PROPERTY = + "je.ssl.x509AlgoName"; + + /** + * The algorithm name of X509 certificate. It depends on the vendor of JVM. + */ + private static final String X509_ALGO_NAME = getX509AlgoName(); + + /** + * An SSLContext that will hold all the interesting connection parameter + * information for session creation in server mode. + */ + private final SSLContext serverSSLContext; + + /** + * An SSLContext that will hold all the interesting connection parameter + * information for session creation in client mode. + */ + private final SSLContext clientSSLContext; + + /** + * The base SSLParameters for use in channel creation. + */ + private final SSLParameters baseSSLParameters; + + /** + * An authenticator object for validating SSL session peers when acting + * in server mode + */ + private final SSLAuthenticator sslAuthenticator; + + /** + * A host verifier object for validating SSL session peers when acting + * in client mode + */ + private final HostnameVerifier sslHostVerifier; + + private final InstanceLogger logger; + + /** + * Constructor for use during creating based on access configuration + */ + public SSLChannelFactory(InstanceParams params) { + serverSSLContext = constructSSLContext(params, false); + clientSSLContext = constructSSLContext(params, true); + baseSSLParameters = + filterSSLParameters(constructSSLParameters(params), + serverSSLContext); + sslAuthenticator = constructSSLAuthenticator(params); + sslHostVerifier = constructSSLHostVerifier(params); + logger = params.getContext().getLoggerFactory().getLogger(getClass()); + } + + /** + * Constructor for use when SSL configuration objects have already + * been constructed. + */ + public SSLChannelFactory(SSLContext serverSSLContext, + SSLContext clientSSLContext, + SSLParameters baseSSLParameters, + SSLAuthenticator sslAuthenticator, + HostnameVerifier sslHostVerifier, + InstanceLogger logger) { + + this.serverSSLContext = serverSSLContext; + this.clientSSLContext = clientSSLContext; + this.baseSSLParameters = + filterSSLParameters(baseSSLParameters, serverSSLContext); + this.sslAuthenticator = sslAuthenticator; + this.sslHostVerifier = sslHostVerifier; + this.logger = logger; + } + + /** + * Construct a DataChannel wrapping the newly accepted SocketChannel + */ + @Override + public DataChannel acceptChannel(SocketChannel socketChannel) { + + final SocketAddress socketAddress = + socketChannel.socket().getRemoteSocketAddress(); + String host = null; + if (socketAddress == null) { + throw new IllegalArgumentException( + "socketChannel is not connected"); + } + + if (socketAddress instanceof InetSocketAddress) { + host = ((InetSocketAddress)socketAddress).getAddress().toString(); + } + + final SSLEngine engine = + serverSSLContext.createSSLEngine(host, + socketChannel.socket().getPort()); + engine.setSSLParameters(baseSSLParameters); + engine.setUseClientMode(false); + if (sslAuthenticator != null) { + engine.setWantClientAuth(true); + } + + return new SSLDataChannel(socketChannel, engine, null, null, + sslAuthenticator, logger); + } + + /** + * Construct a DataChannel wrapping a new connection to the specified + * address using the associated connection options. + */ + @Override + public DataChannel connect(InetSocketAddress addr, + ConnectOptions connectOptions) + throws IOException { + + final SocketChannel socketChannel = + RepUtils.openSocketChannel(addr, connectOptions); + + /* + * Figure out a good host to specify. This is used for session caching + * so it's not critical what answer we come up with, so long as it + * is relatively repeatable. + */ + String host = addr.getHostName(); + if (host == null) { + host = addr.getAddress().toString(); + } + + final SSLEngine engine = + clientSSLContext.createSSLEngine(host, addr.getPort()); + engine.setSSLParameters(baseSSLParameters); + engine.setUseClientMode(true); + + return new SSLDataChannel( + socketChannel, engine, host, sslHostVerifier, null, logger); + } + + /** + * Reads the KeyStore configured in the ReplicationNetworkConfig into + * memory. + */ + public static KeyStore readKeyStore(InstanceContext context) { + + KeyStoreInfo ksInfo = readKeyStoreInfo(context); + try { + return ksInfo.ks; + } finally { + ksInfo.clearPassword(); + } + } + + /** + * Checks whether the auth string is a valid authenticator specification + */ + public static boolean isValidAuthenticator(String authSpec) { + authSpec = authSpec.trim(); + + if (authSpec.equals("") || authSpec.equals("mirror")) { + return true; + } + + if (authSpec.startsWith("dnmatch(") && authSpec.endsWith(")")) { + try { + SSLDNAuthenticator.validate(authSpec); + return true; + } catch(IllegalArgumentException iae) { + return false; + } + } + + return false; + } + + /** + * Checks whether input string is a valid host verifier specification + */ + public static boolean isValidHostVerifier(String hvSpec) { + + hvSpec = hvSpec.trim(); + + if (hvSpec.equals("") || hvSpec.equals("mirror") || + hvSpec.equals("hostname")) { + return true; + } + + if (hvSpec.startsWith("dnmatch(") && hvSpec.endsWith(")")) { + try { + SSLDNHostVerifier.validate(hvSpec); + } catch (IllegalArgumentException iae) { + return false; + } + return true; + } + return false; + } + + /** + * Builds an SSLContext object for the specified access mode. + * @param params general instantiation information + * @param clientMode set to true if the SSLContext is being created for + * the client side of an SSL connection and false otherwise + */ + private static SSLContext constructSSLContext( + InstanceParams params, boolean clientMode) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) params.getContext().getRepNetConfig(); + + KeyManager[] kmList = null; + final KeyStoreInfo ksInfo = readKeyStoreInfo(params.getContext()); + + if (ksInfo != null) { + try { + + /* + * Determine whether a specific key is supposed to be used + */ + String ksAliasProp = clientMode ? + config.getSSLClientKeyAlias() : + config.getSSLServerKeyAlias(); + if (ksAliasProp != null && ksAliasProp.isEmpty()) { + ksAliasProp = null; + } + + kmList = buildKeyManagerList(ksInfo, ksAliasProp, clientMode); + } finally { + ksInfo.clearPassword(); + } + } + + TrustManager[] tmList = null; + final KeyStoreInfo tsInfo = readTrustStoreInfo(params.getContext()); + if (tsInfo != null) { + try { + tmList = buildTrustManagerList(tsInfo); + } finally { + tsInfo.clearPassword(); + } + } + + /* + * Get an SSLContext object + */ + SSLContext newContext = null; + try { + newContext = SSLContext.getInstance(SSL_CONTEXT_PROTOCOL); + } catch (NoSuchAlgorithmException nsae) { + throw new IllegalStateException( + "Unable to find a suitable SSLContext", nsae); + } + + /* + * Put it all together into the SSLContext object + */ + try { + newContext.init(kmList, tmList, null); + } catch (KeyManagementException kme) { + throw new IllegalStateException( + "Error establishing SSLContext", kme); + } + + return newContext; + } + + /** + * Builds a list of KeyManagers for incorporation into an SSLContext. + * + * @param ksInfo a KeyStoreInfo referencing the Keystore for which the + * the key manager list is to be built. + * @param ksAlias an optional KeyStore alias. If set, the key manager + * for X509 certs will always select the certificate with the specified + * alias. + * @param clientMode set to true if this is for the client side of + * an SSL connection and fals otherwise. + */ + private static KeyManager[] buildKeyManagerList(KeyStoreInfo ksInfo, + String ksAlias, + boolean clientMode) { + + /* + * Get a KeyManagerFactory + */ + final KeyManagerFactory kmf; + try { + kmf = KeyManagerFactory.getInstance(X509_ALGO_NAME); + } catch (NoSuchAlgorithmException nsae) { + throw new IllegalStateException( + "Unable to find a suitable KeyManagerFactory", nsae); + } + + /* + * Initialize the key manager factory + */ + try { + kmf.init(ksInfo.ks, ksInfo.ksPwd); + } catch (KeyStoreException kse) { + throw new IllegalStateException( + "Error processing keystore file " + ksInfo.ksFile, + kse); + } catch (NoSuchAlgorithmException nsae) { + throw new IllegalStateException( + "Unable to find appropriate algorithm for " + + "keystore file " + ksInfo.ksFile, nsae); + } catch (UnrecoverableKeyException uke) { + throw new IllegalStateException( + "Unable to recover key from keystore file " + + ksInfo.ksFile, uke); + } + + /* + * Get the list of key managers used + */ + KeyManager[] kmList = kmf.getKeyManagers(); + + /* + * If an alias was specified, we need to construct an + * AliasKeyManager, which will delegate to the correct + * underlying KeyManager, which we need to locate. + */ + if (ksAlias != null) { + + /* + * Locate the first appropriate keymanager in the list + */ + X509ExtendedKeyManager x509KeyManager = null; + for (KeyManager km : kmList) { + if (km instanceof X509ExtendedKeyManager) { + x509KeyManager = (X509ExtendedKeyManager) km; + break; + } + } + + if (x509KeyManager == null) { + throw new IllegalStateException( + "Unable to locate an X509ExtendedKeyManager " + + "corresponding to keyStore " + ksInfo.ksFile); + } + + kmList = new KeyManager[] { + (clientMode ? + new AliasKeyManager(x509KeyManager, null, ksAlias) : + new AliasKeyManager(x509KeyManager, ksAlias, null)) }; + } + + return kmList; + } + + /** + * Reads a KeyStore into memory based on the config. + */ + private static KeyStoreInfo readKeyStoreInfo(InstanceContext context) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) context.getRepNetConfig(); + + /* + * Determine what KeyStore file to access + */ + String ksProp = config.getSSLKeyStore(); + if (ksProp == null || ksProp.isEmpty()) { + ksProp = System.getProperty("javax.net.ssl.keyStore"); + } + + if (ksProp == null) { + return null; + } + + /* + * Determine what type of keystore to assume. If not specified + * loadStore determines the default + */ + final String ksTypeProp = config.getSSLKeyStoreType(); + + final char[] ksPw = getKeyStorePassword(context); + try { + if (ksPw == null) { + throw new IllegalArgumentException( + "Unable to open keystore without a password"); + } + + /* + * Get a KeyStore instance + */ + final KeyStore ks = loadStore(ksProp, ksPw, "keystore", ksTypeProp); + + return new KeyStoreInfo(ksProp, ks, ksPw); + } finally { + if (ksPw != null) { + Arrays.fill(ksPw, ' '); + } + } + } + + /** + * Finds the keystore password based on the input config. + */ + private static char[] getKeyStorePassword(InstanceContext context) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) context.getRepNetConfig(); + + char[] ksPw = null; + + /* + * Determine the password for the keystore file + * Try first using a password source, either explicit or + * constructed. + */ + PasswordSource ksPwSource = config.getSSLKeyStorePasswordSource(); + if (ksPwSource == null) { + ksPwSource = + constructKSPasswordSource(new InstanceParams(context, null)); + } + + if (ksPwSource != null) { + ksPw = ksPwSource.getPassword(); + } else { + /* Next look for an explicit password setting */ + String ksPwProp = config.getSSLKeyStorePassword(); + if (ksPwProp == null || ksPwProp.isEmpty()) { + + /* + * Finally, consider the standard Java Keystore + * password system property + */ + ksPwProp = + System.getProperty("javax.net.ssl.keyStorePassword"); + } + if (ksPwProp != null) { + ksPw = ksPwProp.toCharArray(); + } + } + + return ksPw; + } + + /** + * Builds a TrustManager list for the input Truststore for use in creating + * an SSLContext. + */ + private static TrustManager[] buildTrustManagerList(KeyStoreInfo tsInfo) { + + final TrustManagerFactory tmf; + try { + tmf = TrustManagerFactory.getInstance(X509_ALGO_NAME); + } catch (NoSuchAlgorithmException nsae) { + throw new IllegalStateException( + "Unable to find a suitable TrustManagerFactory", nsae); + } + + try { + tmf.init(tsInfo.ks); + } catch (KeyStoreException kse) { + throw new IllegalStateException( + "Error initializing truststore " + tsInfo.ksFile, kse); + } + + return tmf.getTrustManagers(); + } + + /** + * Based on the input config, read the configured TrustStore into memory. + */ + private static KeyStoreInfo readTrustStoreInfo(InstanceContext context) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) context.getRepNetConfig(); + + /* + * Determine what truststore file, if any, to use + */ + String tsProp = config.getSSLTrustStore(); + if (tsProp == null || tsProp.isEmpty()) { + tsProp = System.getProperty("javax.net.ssl.trustStore"); + } + + /* + * Determine what type of truststore to assume + */ + String tsTypeProp = config.getSSLTrustStoreType(); + if (tsTypeProp == null || tsTypeProp.isEmpty()) { + tsTypeProp = KeyStore.getDefaultType(); + } + + /* + * Build a TrustStore, if specified + */ + + if (tsProp != null) { + final KeyStore ts = + loadStore(tsProp, null, "truststore", tsTypeProp); + + return new KeyStoreInfo(tsProp, ts, null); + } + + return null; + } + + /** + * Create an SSLParameters base on the input configuration. + */ + private static SSLParameters constructSSLParameters( + InstanceParams params) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) params.getContext().getRepNetConfig(); + + /* + * Determine cipher suites configuration + */ + String cipherSuitesProp = config.getSSLCipherSuites(); + String[] cipherSuites = null; + if (cipherSuitesProp != null && !cipherSuitesProp.isEmpty()) { + cipherSuites = cipherSuitesProp.split(","); + } + + /* + * Determine protocols configuration + */ + String protocolsProp = config.getSSLProtocols(); + String[] protocols = null; + if (protocolsProp != null && !protocolsProp.isEmpty()) { + protocols = protocolsProp.split(","); + } + + return new SSLParameters(cipherSuites, protocols); + } + + /** + * Filter SSLParameter configuration to respect the supported + * configuration capabilities of the context. + */ + private static SSLParameters filterSSLParameters( + SSLParameters configParams, SSLContext filterContext) + throws IllegalArgumentException { + + SSLParameters suppParams = filterContext.getSupportedSSLParameters(); + + /* Filter the cipher suite selection */ + String[] configCipherSuites = configParams.getCipherSuites(); + if (configCipherSuites != null) { + final String[] suppCipherSuites = suppParams.getCipherSuites(); + configCipherSuites = + filterConfig(configCipherSuites, suppCipherSuites); + if (configCipherSuites.length == 0) { + throw new IllegalArgumentException( + "None of the configured SSL cipher suites are supported " + + "by the environment."); + } + } + + /* Filter the protocol selection */ + String[] configProtocols = + configParams.getProtocols(); + if (configProtocols != null) { + final String[] suppProtocols = suppParams.getProtocols(); + configProtocols = filterConfig(configProtocols, suppProtocols); + if (configProtocols.length == 0) { + throw new IllegalArgumentException( + "None of the configured SSL protocols are supported " + + "by the environment."); + } + } + + final SSLParameters newParams = + new SSLParameters(configCipherSuites, configProtocols); + newParams.setWantClientAuth(configParams.getWantClientAuth()); + newParams.setNeedClientAuth(configParams.getNeedClientAuth()); + return newParams; + } + + /** + * Return the intersection of configChoices and supported + */ + private static String[] filterConfig(String[] configChoices, + String[] supported) { + + ArrayList keep = new ArrayList(); + for (String choice : configChoices) { + for (String supp : supported) { + if (choice.equals(supp)) { + keep.add(choice); + break; + } + } + } + return keep.toArray(new String[keep.size()]); + } + + /** + * Build an SSLAuthenticator or HostnameVerifier based on property + * configuration. This method looks up a class of the specified name, + * then finds a constructor that has a single argument of type + * InstanceParams and constructs an instance with that constructor, then + * validates that the instance extends or implements the mustImplement + * class specified. + * + * @param params the parameters for constructing this factory. + * @param checkerClassName the name of the class to instantiate + * @param checkerClassParams the value of the configured String params + * argument + * @param mustImplement a class denoting a required base class or + * required implemented interface of the class whose name is + * specified by checkerClassName. + * @param miDesc a descriptive term for the class to be instantiated + * @return an instance of the specified class + */ + private static Object constructSSLChecker( + InstanceParams params, + String checkerClassName, + String checkerClassParams, + Class mustImplement, + String miDesc) { + + InstanceParams objParams = + new InstanceParams(params.getContext(), checkerClassParams); + + return DataChannelFactoryBuilder.constructObject( + checkerClassName, mustImplement, miDesc, + /* class(InstanceParams) */ + new CtorArgSpec( + new Class[] { InstanceParams.class }, + new Object[] { objParams })); + } + + /** + * Builds an SSLAuthenticator based on the input configuration referenced + * by params. + */ + private static SSLAuthenticator constructSSLAuthenticator( + InstanceParams params) + throws IllegalArgumentException { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) params.getContext().getRepNetConfig(); + + final String authSpec = config.getSSLAuthenticator(); + final String authClassName = config.getSSLAuthenticatorClass(); + + /* check for conflicts */ + if (authSpec != null && !authSpec.equals("") && + authClassName != null && !authClassName.equals("")) { + + throw new IllegalArgumentException( + "Cannot specify both authenticator and authenticatorClass"); + } + + if (authSpec != null && !authSpec.equals("")) { + /* construct an authenticator of a known type */ + return constructStdAuthenticator(params, authSpec); + } + + if (authClassName == null || authClassName.equals("")) { + return null; + } + + /* construct an authenticator using the specified class */ + final String authParams = config.getSSLAuthenticatorParams(); + + return (SSLAuthenticator) constructSSLChecker( + params, authClassName, authParams, SSLAuthenticator.class, + "authenticator"); + } + + /** + * Builds an SSLAuthenticator of a known type. + */ + private static SSLAuthenticator constructStdAuthenticator( + InstanceParams params, String authSpec) + throws IllegalArgumentException { + + authSpec = authSpec.trim(); + if (authSpec.startsWith("dnmatch(") && authSpec.endsWith(")")) { + /* a DN matching authenticator */ + final String match = + authSpec.substring("dnmatch(".length(), + authSpec.length()-1); + return new SSLDNAuthenticator( + new InstanceParams(params.getContext(), match)); + } else if (authSpec.equals("mirror")) { + /* a mirroring authenticator */ + return new SSLMirrorAuthenticator( + new InstanceParams(params.getContext(), null)); + } + + throw new IllegalArgumentException( + authSpec + " is not a valid authenticator specification."); + } + + /** + * Builds an HostnameVerifier based on the configuration referenced in + * params. + */ + private static HostnameVerifier constructSSLHostVerifier( + InstanceParams params) + throws IllegalArgumentException { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) params.getContext().getRepNetConfig(); + final String hvSpec = config.getSSLHostVerifier(); + final String hvClassName = config.getSSLHostVerifierClass(); + + /* Check for conflicts */ + if (hvSpec != null && !hvSpec.equals("") && + hvClassName != null && !hvClassName.equals("")) { + + throw new IllegalArgumentException( + "Cannot specify both hostVerifier and hostVerifierClass"); + } + + if (hvSpec != null && !hvSpec.equals("")) { + /* construct a host verifier of a known type */ + return constructStdHostVerifier(params, hvSpec); + } + + if (hvClassName == null || hvClassName.equals("")) { + return null; + } + + /* construct a host verifier using the specified class */ + final String hvParams = config.getSSLHostVerifierParams(); + + return (HostnameVerifier) constructSSLChecker( + params, hvClassName, hvParams, HostnameVerifier.class, + "hostname verifier"); + } + + /** + * Builds a HostnameVerifier of a known type. + */ + private static HostnameVerifier constructStdHostVerifier( + InstanceParams params, String hvSpec) + throws IllegalArgumentException { + + hvSpec = hvSpec.trim(); + if (hvSpec.startsWith("dnmatch(") && hvSpec.endsWith(")")) { + /* a DN matching host verifier */ + final String match = hvSpec.substring("dnmatch(".length(), + hvSpec.length()-1); + return new SSLDNHostVerifier( + new InstanceParams(params.getContext(), match)); + + } else if (hvSpec.equals("mirror")) { + /* a mirroring host verifier */ + return new SSLMirrorHostVerifier( + new InstanceParams(params.getContext(), null)); + + } else if (hvSpec.equals("hostname")) { + /* a standard hostname verifier */ + return new SSLStdHostVerifier( + new InstanceParams(params.getContext(), null)); + } + + throw new IllegalArgumentException( + hvSpec + " is not a valid hostVerifier specification."); + } + + /** + * Builds a PasswordSource instance via generic instantiation. + * + * @param params the parameters driving the instantiation + * @param pwdSrcClassName the name of the class to instantiate + * @param pwSrcParams a possibly null String that has been configured as + * an argument to the class's constructor. + * @return the new instance + */ + private static PasswordSource constructPasswordSource( + InstanceParams params, String pwdSrcClassName, String pwSrcParams) { + + final InstanceParams objParams = + new InstanceParams(params.getContext(), pwSrcParams); + + return (PasswordSource) + DataChannelFactoryBuilder.constructObject( + pwdSrcClassName, PasswordSource.class, "password source", + /* class(InstanceParams) */ + new CtorArgSpec( + new Class[] { InstanceParams.class }, + new Object[] { objParams })); + } + + /** + * Build a PasswordSource for the keystore based on the configuration + * referenced by params. + */ + private static PasswordSource constructKSPasswordSource( + InstanceParams params) { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) params.getContext().getRepNetConfig(); + + final String pwSrcClassName = + config.getSSLKeyStorePasswordClass(); + + if (pwSrcClassName == null || pwSrcClassName.equals("")) { + return null; + } + + final String pwSrcParams = + config.getSSLKeyStorePasswordParams(); + + return constructPasswordSource(params, pwSrcClassName, pwSrcParams); + } + + /** + * Load a keystore/truststore file into memory + * @param storeName the name of the store file + * @param storeFlavor a descriptive name of store type + * @param storeType JKS, etc + * @throws IllegalArgumentException if the specified parameters + * do now allow a store to be successfully loaded + */ + private static KeyStore loadStore(String storeName, + char[] storePassword, + String storeFlavor, + String storeType) + throws IllegalArgumentException { + + if (storeType == null || storeType.isEmpty()) { + storeType = KeyStore.getDefaultType(); + } + + final KeyStore ks; + try { + ks = KeyStore.getInstance(storeType); + } catch (KeyStoreException kse) { + throw new IllegalArgumentException( + "Unable to find a " + storeFlavor + " instance of type " + + storeType, + kse); + } + + final FileInputStream fis; + try { + fis = new FileInputStream(storeName); + } catch (FileNotFoundException fnfe) { + throw new IllegalArgumentException( + "Unable to locate specified " + storeFlavor + + " " + storeName, fnfe); + } + + try { + ks.load(fis, storePassword); + } catch (IOException ioe) { + throw new IllegalArgumentException( + "Error reading from " + storeFlavor + " file " + storeName, + ioe); + } catch (NoSuchAlgorithmException nsae) { + throw new IllegalArgumentException( + "Unable to check " + storeFlavor + " integrity: " + storeName, + nsae); + } catch (CertificateException ce) { + throw new IllegalArgumentException( + "Not all certificates could be loaded: " + storeName, + ce); + } finally { + try { + fis.close(); + } catch (IOException ioe) { + /* ignored */ + } + } + + return ks; + } + + /** + * Gets a proper algorithm name for the X.509 certificate key manager. If + * users already specify it via setting the system property of + * "je.ssl.x509AlgoName", use it directly. Otherwise, for IBM J9 VM, the + * name is "IbmX509". For Hotspot and other JVMs, the name of "SunX509" + * will be used. + * + * @return algorithm name for X509 certificate manager + */ + private static String getX509AlgoName() { + final String x509Name = System.getProperty(X509_ALGO_NAME_PROPERTY); + if (x509Name != null && !x509Name.isEmpty()) { + return x509Name; + } + final String jvmVendor = System.getProperty("java.vendor"); + if (jvmVendor.startsWith("IBM")) { + return "IbmX509"; + } + return "SunX509"; + } + + /** + * Internal class for communicating a pair of KeyStore and password + */ + private static class KeyStoreInfo { + private final String ksFile; + private final KeyStore ks; + private final char[] ksPwd; + + private KeyStoreInfo(String ksFile, KeyStore ks, char[] ksPwd) { + this.ksFile = ksFile; + this.ks = ks; + this.ksPwd = + (ksPwd == null) ? null : Arrays.copyOf(ksPwd, ksPwd.length); + } + + private void clearPassword() { + if (ksPwd != null) { + Arrays.fill(ksPwd, ' '); + } + } + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLDNAuthenticator.java b/src/com/sleepycat/je/rep/utilint/net/SSLDNAuthenticator.java new file mode 100644 index 0000000..1fea01b --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLDNAuthenticator.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import javax.net.ssl.SSLSession; + +import com.sleepycat.je.rep.net.SSLAuthenticator; +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * This is an implementation of SSLAuthenticator which authenticates based + * on the Distinguished Name (DN) in the SSL peer's certificate. Matching + * is done using Java regular expressions against the RFC1779 normalized + * DN. This may be used to match against the complete DN or just a portion, + * such as the CN portion. + */ + +public class SSLDNAuthenticator + extends SSLDNMatcher + implements SSLAuthenticator { + + /** + * Construct an SSLDNAuthenticator + * + * @param params The parameter for authentication creation. This class + * requires a Java regular expression to be applied to the subject + * common name. + */ + public SSLDNAuthenticator(InstanceParams params) { + super(params); + } + + /* + * Based on the information in the SSLSession object, should the peer + * be trusted as an internal entity? This should be called only after + * The SSL handshake has completed. + */ + @Override + public boolean isTrusted(SSLSession sslSession) { + return peerMatches(sslSession); + } + + /** + * Verify that the string is a valid pattern. + * @throws IllegalArgumentException if not a valid pattern. + */ + public static void validate(String regex) + throws IllegalArgumentException { + + validateRegex(regex); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLDNHostVerifier.java b/src/com/sleepycat/je/rep/utilint/net/SSLDNHostVerifier.java new file mode 100644 index 0000000..bf586ea --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLDNHostVerifier.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSession; + +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * This is an implementation of HostnameVerifier, which is intended to verify + * that the host to which we are connected is valid. This implementation is + * designed for the case where it is expected that the server's certificate + * does not match the host name, but instead, contains a well-known + * distinguished name (DN). This check verifies that the DN matches + * expectations. + *

        + * Matching is done using Java regular expressions against the RFC1779 + * normalized DN. The regular expression is applied against the entire DN + * string, but the regular expression could be constructed to treat only a + * portion of it as relevant. + */ + +public class SSLDNHostVerifier + extends SSLDNMatcher + implements HostnameVerifier { + + /** + * Construct an SSLDNHostVerifier + * + * @param params The parameter for authentication creation. This class + * requires a Java regular expression to be applied to the subject + * common name. + */ + public SSLDNHostVerifier(InstanceParams params) { + super(params); + } + + /** + * Checks whether an SSL connection has been made to the intended target. + * This should be called only after the SSL handshake has completed. + * + * @param targetHost the intended target of a network connection + * This parameter is not used by this implementation. + * @param sslSession the SSLSession that has been set up for the connection + * @return true if sslSession indicates that the connection has been made + * to the correct host + */ + @Override + public boolean verify(String targetHost, SSLSession sslSession) { + return peerMatches(sslSession); + } + + /** + * Verify that the string is a valid pattern. + * @throws IllegalArgumentException if not a valid pattern. + */ + public static void validate(String regex) + throws IllegalArgumentException { + + validateRegex(regex); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLDNMatcher.java b/src/com/sleepycat/je/rep/utilint/net/SSLDNMatcher.java new file mode 100644 index 0000000..ca2a16b --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLDNMatcher.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import java.security.Principal; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.security.auth.x500.X500Principal; + +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * This is an implementation of SSLAuthenticator which authenticates based + * on the Distinguished Name (DN) in the SSL peer's certificate. Matching + * is done using Java regular expressions against the RFC1779-formatted DN. + * This is typically used to match against the CN portion of the name. + */ + +class SSLDNMatcher { + + private final Pattern pattern; + + /** + * Construct an SSLDNMatcher + * + * @param params The instantiation params. The classParams must be + * a pattern to be matched to a Distinguished Name in an SSL certificate. + * The match pattern must be a valid Java regular expression. + * @throws IllegalArgumentException if the pattern is not a valid + * regular expression + */ + SSLDNMatcher(InstanceParams params) + throws IllegalArgumentException { + + this.pattern = compileRegex(params.getClassParams()); + } + + /* + * Check whether the peer certificate matches the configured expression. + */ + public boolean peerMatches(SSLSession sslSession) { + Principal principal = null; + try { + principal = sslSession.getPeerPrincipal(); + } catch (SSLPeerUnverifiedException pue) { + return false; + } + + if (principal != null) { + if (principal instanceof X500Principal) { + final X500Principal x500Principal = (X500Principal) principal; + final String name = + x500Principal.getName(X500Principal.RFC1779); + final Matcher m = pattern.matcher(name); + if (m.matches()) { + return true; + } + } + } + return false; + } + + private static Pattern compileRegex(String regex) + throws IllegalArgumentException { + try { + return Pattern.compile(regex); + } catch(PatternSyntaxException pse) { + throw new IllegalArgumentException( + "pattern is invalid", pse); + } + } + + static void validateRegex(String regex) + throws IllegalArgumentException { + + /* ignore the result */ + compileRegex(regex); + } +} + + diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java b/src/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java new file mode 100644 index 0000000..1eab234 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java @@ -0,0 +1,840 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.INFO; +import static javax.net.ssl.SSLEngineResult.HandshakeStatus; +import static javax.net.ssl.SSLEngineResult.Status; + +import java.io.IOException; +import java.net.SocketException; +import java.nio.BufferOverflowException; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.util.concurrent.locks.ReentrantLock; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; + +import com.sleepycat.je.rep.net.InstanceLogger; +import com.sleepycat.je.rep.net.SSLAuthenticator; + +/** + * SSLDataChannel provides SSL-based communications on top of a SocketChannel. + * We attempt to maintain a degree of compatibility with SocketChannel + * in terms of request completion semantics. In particular, + * If in blocking mode: + * read() will return at least one byte if the buffer has room + * write() will write the entire buffer + * If in non-blocking mode: + * read() and write are not guaranteed to consume or produce anything. + */ +public class SSLDataChannel extends AbstractDataChannel { + /** + * The SSLEngine that will manage the secure operations. + */ + private final SSLEngine sslEngine; + + /** + * raw bytes received from the SocketChannel - not yet unwrapped. + */ + private final ByteBuffer netRecvBuffer; + + /** + * raw bytes to be sent to the wire - already wrapped + */ + private final ByteBuffer netXmitBuffer; + + /** + * Bytes unwrapped and ready for application consumption. + */ + private final ByteBuffer appRecvBuffer; + + /** + * A dummy buffer used during handshake operations. + */ + private final ByteBuffer emptyXmitBuffer; + + /** + * Lock object for protection of appRecvBuffer, netRecvBuffer and SSLEngine + * unwrap() operations + */ + private final ReentrantLock readLock = new ReentrantLock(); + + /** + * Lock object for protection of netXmitBuffer and SSLEngine wrap() + * operations + */ + private final ReentrantLock writeLock = new ReentrantLock(); + + /* Set to true if we have closed the underlying socketChannel */ + private boolean channelClosed = false; + + /* + * Remember whether we did a closeInbound already. + */ + private volatile boolean sslInboundClosed = false; + + /** + * The String identifying the target host that we are connecting to, if + * this channel was created in client context. + */ + private final String targetHost; + + /** + * Possibly null authenticator object used for checking whether the + * peer for the negotiated session should be trusted. + */ + private final SSLAuthenticator authenticator; + + /** + * Possibly null host verifier object used for checking whether the + * peer for the negotiated session is correct based on the connection + * target. + */ + private final HostnameVerifier hostVerifier; + + /** + * Set to true when a handshake completes and a non-null authenticator + * acknowledges the session as trusted. + */ + private volatile boolean peerTrusted = false; + + private final InstanceLogger logger; + + /** + * Construct an SSLDataChannel given a SocketChannel and an SSLEngine + * + * @param socketChannel a SocketChannel over which SSL communcation will + * occur. This should generally be connected, but that is not + * absolutely required until the first read/write operation. + * @param sslEngine an SSLEngine instance that will control the SSL + * interaction with the peer. + */ + public SSLDataChannel(SocketChannel socketChannel, + SSLEngine sslEngine, + String targetHost, + HostnameVerifier hostVerifier, + SSLAuthenticator authenticator, + InstanceLogger logger) { + + super(socketChannel); + this.sslEngine = sslEngine; + this.targetHost = targetHost; + this.authenticator = authenticator; + this.hostVerifier = hostVerifier; + this.logger = logger; + SSLSession sslSession = sslEngine.getSession(); + + /* Determine the required buffer sizes */ + int netBufferSize = sslSession.getPacketBufferSize(); + int appBufferSize = sslSession.getApplicationBufferSize(); + + /* allocate the buffers */ + this.emptyXmitBuffer = ByteBuffer.allocate(1); + this.netXmitBuffer = ByteBuffer.allocate(3*netBufferSize); + this.appRecvBuffer = ByteBuffer.allocate(2*appBufferSize); + this.netRecvBuffer = ByteBuffer.allocate(2*netBufferSize); + } + + /** + * Is the channel encrypted? + * @return true if the channel is encrypted + */ + @Override + public boolean isSecure() { + return true; + } + + /** + * Is the channel capable of determining peer trust? + * In this case, we are capable only if the application has configured an + * SSL authenticator + * + * @return true if this data channel is capable of determining trust + */ + @Override + public boolean isTrustCapable() { + return authenticator != null; + } + + /** + * Is the channel peer trusted? + * A channel is trusted if the peer should be treated as authenticated. + * The meaning of this is context dependent. The channel will only be + * trusted if the configured peer authenticator says it should be trusted, + * so the creator of this SSLDataChannel knows what "trusted" means. + * + * @return true if the SSL peer should be trusted + */ + @Override + public boolean isTrusted() { + return peerTrusted; + } + + /** + * Read data into the toFill data buffer. + * + * @param toFill the data buffer into which data will be read. This buffer + * is expected to be ready for a put. It need not be empty. + * @return the count of bytes read into toFill. + */ + @Override + public int read(ByteBuffer toFill) throws IOException, SSLException { + return (int) read(new ByteBuffer[] { toFill }, 0, 1); + } + + @Override + public long read(ByteBuffer[] toFill) throws IOException, SSLException { + return read(toFill, 0, toFill.length); + } + + @Override + public long read(ByteBuffer toFill[], int offset, int length) + throws IOException, SSLException { + + if ((offset < 0) || + (length < 0) || + (offset > toFill.length - length)) { + throw new IndexOutOfBoundsException(); + } + + /* + * Short-circuit if there's no work to be done at this time. This + * avoids an unnecessary read() operation from blocking. + */ + int toFillRemaining = 0; + for (int i = offset; i < offset + length; ++i) { + toFillRemaining += toFill[i].remaining(); + } + if (toFillRemaining <= 0) { + return 0; + } + + /* + * In non-blocking mode, a preceding write operation might not have + * completed. + */ + if (!socketChannel.isBlocking()) { + flush_internal(); + } + + /* + * If we have data that is already unwrapped and ready to transfer, do + * it now + */ + readLock.lock(); + try { + if (appRecvBuffer.position() > 0) { + appRecvBuffer.flip(); + final int count = transfer(appRecvBuffer, + toFill, offset, length); + appRecvBuffer.compact(); + return count; + } + } finally { + readLock.unlock(); + } + + int readCount = 0; + while (readCount == 0) { + if (sslEngine.isInboundDone()) { + return -1; + } + + processAnyHandshakes(); + + /* See if we have unwrapped data available */ + readLock.lock(); + try { + if (appRecvBuffer.position() > 0) { + appRecvBuffer.flip(); + readCount = transfer(appRecvBuffer, + toFill, offset, length); + appRecvBuffer.compact(); + break; + } + } finally { + readLock.unlock(); + } + + if (sslEngine.getHandshakeStatus() == + HandshakeStatus.NOT_HANDSHAKING) { + + boolean progress = false; + readLock.lock(); + try { + if (netRecvBuffer.position() > 0) { + /* There is some data in the network buffer that may be + * able to be unwrapped. If so, we'll try to unwrap it. + * If that fails, then we may need more network data. + */ + final int initialPos = netRecvBuffer.position(); + netRecvBuffer.flip(); + final SSLEngineResult engineResult = + sslEngine.unwrap(netRecvBuffer, appRecvBuffer); + netRecvBuffer.compact(); + + final int updatedPos = netRecvBuffer.position(); + if (updatedPos != initialPos) { + /* We did something */ + progress = true; + } + + switch (engineResult.getStatus()) { + case BUFFER_UNDERFLOW: + /* Not enough data to do anything useful. */ + break; + + case BUFFER_OVERFLOW: + /* Shouldn't happen, but apparently there's not + * enough space in the application receive buffer */ + throw new BufferOverflowException(); + + case CLOSED: + /* We apparently got a CLOSE_NOTIFY */ + socketChannel.socket().shutdownInput(); + break; + + case OK: + break; + } + } + + if (!progress) { + final int count = socketChannel.read(netRecvBuffer); + + if (count < 0) { + readCount = count; + } else if (count == 0) { + /* Presumably we are in non-blocking mode */ + break; + } + } + } finally { + readLock.unlock(); + } + } + } + + if (readCount < 0) { + /* + * This will throw an SSLException if we haven't yet received a + * close_notify. + */ + sslEngine.closeInbound(); + sslInboundClosed = true; + } + + if (sslEngine.isInboundDone()) { + return -1; + } + + return readCount; + } + + @Override + public int write(ByteBuffer toSend) throws IOException, SSLException { + return (int) write(new ByteBuffer[] { toSend }, 0, 1); + } + + @Override + public long write(ByteBuffer[] toSend) throws IOException, SSLException { + return write(toSend, 0, toSend.length); + } + + @Override + public long write(ByteBuffer[] toSend, int offset, int length) + throws IOException, SSLException { + + if ((offset < 0) || + (length < 0) || + (offset > toSend.length - length)) { + throw new IndexOutOfBoundsException(); + } + + int toSendRemaining = 0; + for (int i = offset; i < offset + length; ++i) { + toSendRemaining += toSend[i].remaining(); + } + if (toSendRemaining == 0) { + return 0; + } + final int toSendTotal = toSendRemaining; + + /* + * Probably not needed, but just in case there's a backlog, start with + * a flush to clear out the network transmit buffer. + */ + flush_internal(); + + while (true) { + writeLock.lock(); + try { + final SSLEngineResult engineResult = + sslEngine.wrap(toSend, offset, length, netXmitBuffer); + + toSendRemaining -= engineResult.bytesConsumed(); + + switch (engineResult.getStatus()) { + case BUFFER_OVERFLOW: + /* + * Although we are flushing as part of the loop, we can + * still receive this because flush_internal isn't + * guaranteed to flush everything. + */ + break; + + case BUFFER_UNDERFLOW: + /* Should not be possible here */ + throw new BufferUnderflowException(); + + case CLOSED: + throw new SSLException( + "Attempt to write to a closed SSL Channel"); + + case OK: + break; + } + } finally { + writeLock.unlock(); + } + + processAnyHandshakes(); + flush_internal(); + + if (toSendRemaining == 0 || !socketChannel.isBlocking()) { + break; + } + } + + return toSendTotal - toSendRemaining; + } + + /** + * Attempt to flush any pending writes to the underlying socket buffer. + * The caller should ensure that it is the only thread accessing the + * DataChannel in order that the return value be meaningful. + * + * @return flush status + */ + @Override + public FlushStatus flush() + throws IOException { + + int n = flush_internal(); + if (writeLock.tryLock()) { + try { + SSLEngineResult.HandshakeStatus hstatus = + sslEngine.getHandshakeStatus(); + switch (hstatus) { + case NEED_TASK: + return FlushStatus.NEED_TASK; + case NEED_UNWRAP: + return FlushStatus.NEED_READ; + case NEED_WRAP: + /* + * We should not be here if we are the only thread doing + * handshake, so there must be another thread, they will + * flush after they wrap, our job is done here. + */ + return FlushStatus.DONE; + case FINISHED: + case NOT_HANDSHAKING: + break; + default: + assert false : "Unexpected handshake status."; + } + + if (n == 0) { + /* + * It is possible that there was nothing to flush last time + * we flushed but someone wrote something before we + * acquired the lock, so we flush again here + */ + n = flush_internal(); + } + + final int pos = netXmitBuffer.position(); + + if (pos == 0) { + return FlushStatus.DONE; + } + + if (n != 0) { + return FlushStatus.AGAIN; + } + + /* Here n == 0 and pos != 0, i.e., socket write busy. */ + return FlushStatus.WRITE_BUSY; + } finally { + writeLock.unlock(); + } + } + + /* + * If we weren't able to acquire the write lock, we can't be sure that + * everything has been flushed, and there's a good chance that someone + * else is writing (which the caller should have protected against in + * order to get a reliable answer). Just ask the caller to flush again. + */ + return FlushStatus.AGAIN; + } + + /** + * If any data is queued up to be sent in the network transmit buffer, try + * to push it out. + */ + private int flush_internal() throws IOException { + + int count = 0; + + /* + * Don't insist on getting a lock. If someone else has it, they will + * probably flush it for us. + */ + if (writeLock.tryLock()) { + try { + if (netXmitBuffer.position() == 0) { + return 0; + } + netXmitBuffer.flip(); + + /* + * try/finally to keep things clean, in case the socket channel + * gets closed + */ + try { + count = socketChannel.write(netXmitBuffer); + } finally { + netXmitBuffer.compact(); + } + } finally { + writeLock.unlock(); + } + } + return count; + } + + @Override + public void close() throws IOException, SSLException { + + try { + flush_internal(); + + if (!sslEngine.isOutboundDone()) { + sslEngine.closeOutbound(); + processAnyHandshakes(); + } else if (!sslEngine.isInboundDone()) { + if (sslInboundClosed) { + /* + * We only expect one handshake operation (the close) to + * happen at this point + */ + processOneHandshake(); + } + } + } finally { + synchronized(this) { + if (!channelClosed) { + channelClosed = true; + socketChannel.close(); + } + } + } + } + + @Override + public boolean isOpen() { + return socketChannel.isOpen(); + } + + /** + * Transfer as much data as possible from the src buffer to the dst + * buffers. + * + * @param src the source ByteBuffer - it is expected to be ready for a get. + * @param dsts the destination array of ByteBuffers, each of which is + * expected to be ready for a put. + * @param offset the offset within the buffer array of the first buffer + * into which bytes are to be transferred. + * @param length the maximum number of buffers to be accessed + * @return The number of bytes transfered from src to dst + */ + private int transfer(ByteBuffer src, + ByteBuffer[] dsts, + int offset, + int length) { + + int transferred = 0; + for (int i = offset; i < offset + length; ++i) { + final ByteBuffer dst = dsts[i]; + final int space = dst.remaining(); + + if (src.remaining() > space) { + /* not enough room for it all */ + final ByteBuffer slice = src.slice(); + slice.limit(space); + dst.put(slice); + src.position(src.position() + space); + transferred += space; + } else { + transferred += src.remaining(); + dst.put(src); + break; + } + } + return transferred; + } + + /** + * Repeatedly perform handshake operations while there is still + * more work to do. + */ + private void processAnyHandshakes() throws IOException { + + while (processOneHandshake()) { + /* do nothing */ + } + } + + /* + * Attempt a handshake step. + * + * @return true if it is appropriate to call this again immediately. + */ + private boolean processOneHandshake() throws IOException { + + int readCount = 0; + int flushCount = 0; + SSLEngineResult engineResult = null; + + switch (sslEngine.getHandshakeStatus()) { + case FINISHED: + /* + * Just finished handshaking. We shouldn't actually see this here + * as it is only supposed to be produced by a wrap or unwrap. + */ + return false; + + case NEED_TASK: + /* + * Need results from delegated tasks before handshaking can + * continue, so do them now. We assume that the tasks are done + * inline, and so we can return true here. + */ + runDelegatedTasks(); + return true; + + case NEED_UNWRAP: + { + boolean unwrapped = false; + + /* Attempt to flush anything that is pending */ + try { + flush_internal(); + } catch (SocketException se) { + } + + /* + * Attempt to process anything that is pending in the + * netRecvBuffer. + */ + readLock.lock(); + try { + if (netRecvBuffer.position() > 0) { + netRecvBuffer.flip(); + engineResult = + sslEngine.unwrap(netRecvBuffer, appRecvBuffer); + netRecvBuffer.compact(); + if (engineResult.getStatus() == Status.OK) { + unwrapped = true; + } + } + + if (!unwrapped && !sslEngine.isInboundDone()) { + /* + * Either we had nothing in the netRecvBuffer or there + * was not enough data to unwrap, so let's try getting + * some more. + * + * If a re-negotiation is happening and the + * appRecvBuffer was full, we could have received a + * BUFFER_OVERFLOW engineResult, in which case a read() + * is not really helpful here, but it's harmless and is + * a rare occurrence, so we won't worry about it. + */ + readCount = socketChannel.read(netRecvBuffer); + if (readCount < 0) { + try { + sslEngine.closeInbound(); + sslInboundClosed = true; + } catch (SSLException ssle) { + // ignore + } + } + + netRecvBuffer.flip(); + engineResult = + sslEngine.unwrap(netRecvBuffer, appRecvBuffer); + netRecvBuffer.compact(); + } + } finally { + readLock.unlock(); + } + } + + break; + + case NEED_WRAP: + /* + * Must send data to the remote side before handshaking can + * continue, so wrap() must be called. + */ + writeLock.lock(); + try { + engineResult = sslEngine.wrap(emptyXmitBuffer, netXmitBuffer); + } finally { + writeLock.unlock(); + } + + if (engineResult.getStatus() == SSLEngineResult.Status.CLOSED) { + /* + * If the engine is already closed, flush may fail, and that's + * ok, so squash any exceptions that happen + */ + try { + /* ignore the flush count */ + flush_internal(); + } catch (SocketException se) { + } + } else { + flushCount = flush_internal(); + } + break; + + case NOT_HANDSHAKING: + /* Not currently handshaking */ + return false; + } + + /* + * We may have done a wrap or unwrap above. Check the engineResult + */ + + if (engineResult != null) { + if (engineResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + /* + * Handshaking just completed. Here is our chance to do any + * session validation that might be required. + */ + if (sslEngine.getUseClientMode()) { + if (hostVerifier != null) { + peerTrusted = + hostVerifier.verify(targetHost, + sslEngine.getSession()); + if (peerTrusted) { + logger.log(FINE, + "SSL host verifier reports that " + + "connection target is valid"); + } else { + logger.log(INFO, + "SSL host verifier reports that " + + "connection target is NOT valid"); + throw new IOException( + "Server identity could not be verified"); + } + } + } else { + if (authenticator != null) { + peerTrusted = + authenticator.isTrusted(sslEngine.getSession()); + if (peerTrusted) { + logger.log(FINE, + "SSL authenticator reports that " + + "channel is trusted"); + } else { + logger.log(INFO, + "SSL authenticator reports that " + + "channel is NOT trusted"); + } + } + } + } + + switch (engineResult.getStatus()) { + case BUFFER_UNDERFLOW: + /* + * This must have resulted from an unwrap, meaning we need to + * do another read. If the last read did something useful, + * tell the caller to call us again. + */ + return readCount > 0; + + case BUFFER_OVERFLOW: + /* + * Either we were processing an unwrap and the appRecvBuffer is + * full or we were processing a wrap and the netXmitBuffer is + * full. For the unwrap case, the only way we can make progress + * is for the application to receive control. For the wrap + * case, we may be able to make progress if the flush + * did something useful. + */ + if ((sslEngine.getHandshakeStatus() == + HandshakeStatus.NEED_WRAP) && + flushCount > 0) { + return true; + } + return false; + + case CLOSED: + if (sslEngine.isOutboundDone()) { + try { + socketChannel.socket().shutdownOutput(); + } catch (Exception e) { + } + } + return false; + + case OK: + break; + } + } + + /* + * Tell the caller to try again. Cases where no handshake progress + * can be made should return false above. + */ + return true; + } + + private void runDelegatedTasks() { + Runnable task; + /* + * In theory, we could run these as a background job, but no need for + * that level of complication. Our server doesn't serve a large number + * of clients. + */ + while ((task = sslEngine.getDelegatedTask()) != null) { + task.run(); + } + } +} + + diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLMirrorAuthenticator.java b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorAuthenticator.java new file mode 100644 index 0000000..4851556 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorAuthenticator.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import javax.net.ssl.SSLSession; + +import com.sleepycat.je.rep.net.SSLAuthenticator; +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * This is an implementation of SSLAuthenticator that authenticates based on + * the certificate of the client matching the certificate that we would use when + * operating as a client. + */ + +public class SSLMirrorAuthenticator + extends SSLMirrorMatcher + implements SSLAuthenticator { + + /** + * Construct an SSLMirrorAuthenticator + * + * @param params the instantiation parameters. + * @throws IllegalArgumentException if the instance cannot be created due + * to a problem related to the input parameters + */ + public SSLMirrorAuthenticator(InstanceParams params) + throws IllegalArgumentException { + + super(params, false); + } + + /* + * Checks whether the peer should be trusted based on the information in + * the SSLSession object. This should be called only after the SSL + * handshake has completed. + */ + @Override + public boolean isTrusted(SSLSession sslSession) { + return peerMatches(sslSession); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLMirrorHostVerifier.java b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorHostVerifier.java new file mode 100644 index 0000000..17cce31 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorHostVerifier.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSession; + +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * This is an implementation of HostnameVerifier, which is intended to verify + * that the host to which we are connected is valid. This implementation + * authenticates based on the Distinguished Name (DN) in the certificate of + * the server matching the DN in the certificate that we would use when + * operating as a server. This is useful if deploying with a common SSL key + * for all hosts. + */ + +public class SSLMirrorHostVerifier + extends SSLMirrorMatcher + implements HostnameVerifier { + + /** + * Construct an SSLMirrorHostVerifier + * + * @param params the instantiation parameters. + * @throws IllegalArgumentException if the instance cannot be created due + * to a problem related to the input parameters + */ + public SSLMirrorHostVerifier(InstanceParams params) + throws IllegalArgumentException { + + super(params, true); + } + + /** + * Checks whether an SSL connection has been made to the intended target. + * This should be called only after the SSL handshake has completed. + * + * @param targetHost the host to which a connection is being established. + * This parameter is not used by this implementation. + * @param sslSession the established SSL session + * @return true if the sslSession is set up with the correct host + */ + @Override + public boolean verify(String targetHost, SSLSession sslSession) { + return peerMatches(sslSession); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLMirrorMatcher.java b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorMatcher.java new file mode 100644 index 0000000..21b1bc1 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLMirrorMatcher.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import static java.util.logging.Level.INFO; + +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.Principal; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.Enumeration; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.security.auth.x500.X500Principal; + +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.net.InstanceContext; +import com.sleepycat.je.rep.net.InstanceLogger; +import com.sleepycat.je.rep.net.InstanceParams; + +/** + * Common base class for mirror comparisons. Supports both authenticator and + * host verifier implementations. + */ + +class SSLMirrorMatcher { + + /* + * The Principal that represents us when in the expected peer's ssl mode. + */ + final private Principal ourPrincipal; + final private InstanceLogger logger; + + /** + * Construct an SSLMirrorMatcher + * + * @param params The instantiation parameters. + * @param clientMode set to true if the matcher will be evaluated + * as a client that has a server as a peer, or false if it will be + * evaluated as a server that has received a connection from a client. + * @throws IllegalArgumentException if the instance cannot be created due + * to a problem related to the input parameters + */ + public SSLMirrorMatcher(InstanceParams params, boolean clientMode) + throws IllegalArgumentException { + + ourPrincipal = determinePrincipal(params.getContext(), clientMode); + if (ourPrincipal == null) { + throw new IllegalArgumentException( + "Unable to determine a local principal for comparison " + + "with peer principals"); + } + logger = params.getContext().getLoggerFactory().getLogger(getClass()); + } + + /** + * Checks whether the SSL session peer's certificate DN matches our own. + * + * @param sslSession the SSL session that has been established with a peer + * @return true if the peer's certificate DN matches ours + */ + public boolean peerMatches(SSLSession sslSession) { + + if (ourPrincipal == null) { + return false; + } + + /* + * Get the peer principal, which should also be an X500Principal. + * We validate that here. + */ + Principal peerPrincipal = null; + try { + peerPrincipal = sslSession.getPeerPrincipal(); + } catch (SSLPeerUnverifiedException pue) { + return false; + } + + if (peerPrincipal == null || + ! (peerPrincipal instanceof X500Principal)) { + logger.log( + INFO, + "Unable to attempt peer validation - peer Principal is: " + + peerPrincipal); + return false; + } + + return ourPrincipal.equals(peerPrincipal); + } + + /** + * Attempt to determine the Principal that we take on when connecting + * in client or server context based on the ReplicationNetworkConfig. + * If we are unable to determine that principal, return null. + */ + private Principal determinePrincipal( + InstanceContext context, boolean clientMode) + throws IllegalArgumentException { + + final ReplicationSSLConfig config = + (ReplicationSSLConfig) context.getRepNetConfig(); + + /* + * Determine what alias would be used. It is allowable for this to be + * null. + */ + String aliasProp = clientMode ? + config.getSSLClientKeyAlias() : + config.getSSLServerKeyAlias(); + + final KeyStore keyStore = SSLChannelFactory.readKeyStore(context); + + if (aliasProp == null || aliasProp.isEmpty()) { + /* Since we weren't told which one to use, there better be + * only one option, or this might behave unexpectedly. */ + try { + if (keyStore.size() < 1) { + logger.log(INFO, "KeyStore is empty"); + return null; + } else if (keyStore.size() > 1) { + logger.log(INFO, "KeyStore has multiple entries but no " + + "alias was specified. Using the first one " + + "available."); + } + final Enumeration e = keyStore.aliases(); + aliasProp = e.nextElement(); + } catch (KeyStoreException kse) { + throw new IllegalArgumentException( + "Error accessing aliases from the keystore", kse); + } + } + + Certificate cert = null; + try { + cert = keyStore.getCertificate(aliasProp); + } catch (KeyStoreException kse) { + /* Shouldn't be possible */ + throw new IllegalArgumentException( + "Error accessing certificate with alias " + aliasProp + + " from the keystore", kse); + } + + if (cert == null) { + logger.log(INFO, "No certificate for alias " + aliasProp + + " found in KeyStore"); + throw new IllegalArgumentException( + "Unable to find a certificate in the keystore"); + } + + if (!(cert instanceof X509Certificate)) { + logger.log(INFO, "The certificate for alias " + aliasProp + + " is not an X509Certificate."); + throw new IllegalArgumentException( + "Unable to find a valid certificate in the keystore"); + } + + final X509Certificate x509Cert = (X509Certificate) cert; + return x509Cert.getSubjectX500Principal(); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SSLStdHostVerifier.java b/src/com/sleepycat/je/rep/utilint/net/SSLStdHostVerifier.java new file mode 100644 index 0000000..d85371a --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SSLStdHostVerifier.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import static java.util.logging.Level.INFO; + +import java.math.BigInteger; +import java.util.Collection; +import java.util.List; +import java.security.Principal; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.security.cert.CertificateParsingException; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.security.auth.x500.X500Principal; + +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.net.InstanceLogger; + +/** + * This is an implementation of HostnameVerifier which verifies that the + * host to which we are connected is valid using the standard SSL matching + * rules. That is, the host string that we are using to connect with + * must have a match to the common name or a subject alternative name. + */ +public class SSLStdHostVerifier implements HostnameVerifier { + + private final InstanceLogger logger; + + private final static int ALTNAME_DNS = 2; + private final static int ALTNAME_IP = 7; + + /** + * Construct an SSLStdHostVerifier + */ + public SSLStdHostVerifier(InstanceParams params) { + logger = params.getContext().getLoggerFactory().getLogger(getClass()); + } + + @Override + public boolean verify(String targetHost, SSLSession sslSession) { + if (targetHost == null) { + return false; + } + + Principal principal = null; + Certificate[] peerCerts = null; + try { + principal = sslSession.getPeerPrincipal(); + peerCerts = sslSession.getPeerCertificates(); + } catch (SSLPeerUnverifiedException pue) { + return false; + } + + if (principal != null && principal instanceof X500Principal) { + final X500Principal x500Principal = (X500Principal) principal; + final String name = x500Principal.getName(X500Principal.RFC1779); + if (targetHost.equalsIgnoreCase(name)) { + return true; + } + } + + /* Check for SubjectAlternativeNames */ + if (peerCerts[0] instanceof X509Certificate) { + + final X509Certificate peerCert = (X509Certificate)peerCerts[0]; + + Collection> altNames = null; + try { + altNames = peerCert.getSubjectAlternativeNames(); + } catch (CertificateParsingException cpe) { + final Principal issuerPrinc = peerCert.getIssuerX500Principal(); + final BigInteger serNo = peerCert.getSerialNumber(); + + logger.log(INFO, "Unable to parse peer certificate: " + + "issuer = " + issuerPrinc + + ", serialNumber = " + serNo); + + } + + if (altNames == null) { + return false; + } + + for (List altName : altNames) { + /* + * altName will be a 2-element list, with the first being + * the name type and the second being the "name". For + * DNS and IP entries, the "name" will be a string. + */ + final int nameType = ((Integer)altName.get(0)).intValue(); + if (nameType == ALTNAME_IP || nameType == ALTNAME_DNS) { + final String nameValue = (String)altName.get(1); + if (targetHost.equals(nameValue)) { + return true; + } + } + } + } + return false; + } +} + + diff --git a/src/com/sleepycat/je/rep/utilint/net/SimpleChannelFactory.java b/src/com/sleepycat/je/rep/utilint/net/SimpleChannelFactory.java new file mode 100644 index 0000000..bbfc2ee --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SimpleChannelFactory.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.utilint.RepUtils; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.SocketChannel; + +/** + * A factory class for generating SimpleDataChannel instances based on + * SocketChannel instances. + */ +public class SimpleChannelFactory implements DataChannelFactory { + + public SimpleChannelFactory() { + } + + /** + * Included for compatibility with the standard DataChannelFactory.Builder + * construction model. + */ + public SimpleChannelFactory(InstanceParams unusedParams) { + } + + @Override + public DataChannel acceptChannel(SocketChannel socketChannel) { + return new SimpleDataChannel(socketChannel); + } + + @Override + public DataChannel connect(InetSocketAddress addr, + ConnectOptions connectOptions) + throws IOException { + + final SocketChannel socketChannel = + RepUtils.openSocketChannel(addr, connectOptions); + return new SimpleDataChannel(socketChannel); + } +} diff --git a/src/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java b/src/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java new file mode 100644 index 0000000..9164475 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; + + +/** + * A basic concrete extension of DataChannel. + * This simply delegates operations directly to the underlying SocketChannel + */ +public class SimpleDataChannel extends AbstractDataChannel { + + /** + * Constructor for general use. + * + * @param socketChannel A SocketChannel, which should be connected. + */ + public SimpleDataChannel(SocketChannel socketChannel) { + super(socketChannel); + } + + /* + * The following ByteChannel implementation methods delegate to the wrapped + * channel object. + */ + + @Override + public int read(ByteBuffer dst) throws IOException { + return socketChannel.read(dst); + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return socketChannel.read(dsts); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) + throws IOException { + + return socketChannel.read(dsts, offset, length); + } + + @Override + public void close() throws IOException { + socketChannel.close(); + } + + @Override + public boolean isOpen() { + return socketChannel.isOpen(); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return socketChannel.write(src); + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return socketChannel.write(srcs); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) + throws IOException { + + return socketChannel.write(srcs, offset, length); + } + + /** + * Is the channel encrypted? + */ + @Override + public boolean isSecure() { + return false; + } + + /** + * Is the channel peer trusted? + */ + @Override + public boolean isTrusted() { + return false; + } + + /** + * Is the channel peer trust capable? + */ + @Override + public boolean isTrustCapable() { + return false; + } + + /** + * Returns DISABLED, since this implementation does not do any flushing. + */ + @Override + public FlushStatus flush() { + return FlushStatus.DISABLED; + } +} + diff --git a/src/com/sleepycat/je/rep/utilint/net/package-info.java b/src/com/sleepycat/je/rep/utilint/net/package-info.java new file mode 100644 index 0000000..f2a11cf --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/net/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Network utility classes. + */ +package com.sleepycat.je.rep.utilint.net; diff --git a/src/com/sleepycat/je/rep/utilint/package-info.java b/src/com/sleepycat/je/rep/utilint/package-info.java new file mode 100644 index 0000000..9cbbf54 --- /dev/null +++ b/src/com/sleepycat/je/rep/utilint/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Command line and programmatic utilities. + */ +package com.sleepycat.je.rep.utilint; diff --git a/src/com/sleepycat/je/rep/vlsn/GhostBucket.java b/src/com/sleepycat/je/rep/vlsn/GhostBucket.java new file mode 100644 index 0000000..0bcf5da --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/GhostBucket.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * A ghost bucket stands in as a placeholder for a set of vlsns that are + * unknown. This kind of bucket can only be present at the very beginning of + * the vlsn range. + * + * This fulfills an edge case that can arise when vlsns are inserted out of + * order, and log cleaner truncation lops off the leading edge of the index. + * For example, suppose vlsns were inserted in this order: + + * vlsnIndex.put(vlsn=2, lsn=1/2) + * vlsnIndex.put(vlsn=1, lsn=1/0) + * vlsnIndex.put(vlsn=3, lsn=1/3) + * ... + * vlsnIndex.put(vlsn=5, lsn=2/9) + * vlsnIndex.put(vlsn=4, lsn=2/0) + * vlsnIndex.put(vlsn=6, lsn=2/10) + * .. + * This results in an index that has two buckets. Bucket 1 = {vlsn 2,3} and + * bucket 2 = {vlsn 5,6}. If we log clean file 1, we will truncate log at vlsn + * 3, and the new range will be vlsn 4-> vlsn 6. But the beginning and end of + * each range needs to have a valid bucket, and there is no bucket to represent + * vlsn 4. A GhostBucket is added to the head of the bucket set. + */ +class GhostBucket extends VLSNBucket { + private long firstPossibleLsn; + private long lastPossibleLsn; + + GhostBucket(VLSN ghostVLSN, + long firstPossibleLsn, + long lastPossibleLsn) { + /* + * Use ghostVLSN for the firstVLSN, which will make the own(), + * getFirst, getLast() methods work. + */ + super(DbLsn.getFileNumber(firstPossibleLsn), // fileNumber + 0, // stride + 1, // maxMappings + 1, // maxDistance, + ghostVLSN); // firstVLSN + this.firstPossibleLsn = firstPossibleLsn; + this.lastPossibleLsn = lastPossibleLsn; + dirty = true; + } + + /** + * Ideally, this would be a constructor, but we have to read several + * items off the tuple input first before calling super(); + */ + static GhostBucket makeNewInstance(TupleInput ti) { + VLSN ghostVLSN = new VLSN(ti.readPackedLong()); + long firstLsn = ti.readPackedLong(); + long lastLsn = ti.readPackedLong(); + return new GhostBucket(ghostVLSN, firstLsn, lastLsn); + } + + @Override + boolean isGhost() { + return true; + } + + @Override + void writeToTupleOutput(TupleOutput to) { + to.writePackedLong(firstVLSN.getSequence()); + to.writePackedLong(firstPossibleLsn); + to.writePackedLong(lastPossibleLsn); + } + + /** + * Return a lsn as a starting point for a backward scan. + */ + @Override + public synchronized long getGTELsn(VLSN vlsn) { + return lastPossibleLsn; + } + + /** + * Return a lsn as a starting point for a forward scan. + */ + @Override + synchronized long getLTELsn(VLSN vlsn) { + return firstPossibleLsn; + } + + /** + * There is no mapping for this VLSN, so always return NULL_LSN. + */ + @Override + public synchronized long getLsn(VLSN vlsn) { + return DbLsn.NULL_LSN; + } + + /** + * Return a file number that is less or equal to the first mapped vlsn, + * for use in determining the CBVLSN. + */ + @Override + long getLTEFileNumber() { + return DbLsn.getFileNumber(firstPossibleLsn); + } + + @Override + long getGTEFileNumber() { + return DbLsn.getFileNumber(lastPossibleLsn); + } + + @Override + synchronized boolean put(VLSN vlsn, long lsn) { + throw EnvironmentFailureException.unexpectedState + ("Shouldn't be called"); + } + + @Override + VLSNBucket removeFromHead(EnvironmentImpl envImpl, + VLSN lastDuplicate) { + throw EnvironmentFailureException.unexpectedState + ("Shouldn't be called, only used in recovery merging."); + } + + @Override + void removeFromTail(VLSN startOfDelete, long prevLsn) { + + throw EnvironmentFailureException.unexpectedState + ("Shouldn't be called"); + } + + @Override + int getNumOffsets() { + return 0; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(""); + return sb.toString(); + } +} + diff --git a/src/com/sleepycat/je/rep/vlsn/LogItemCache.java b/src/com/sleepycat/je/rep/vlsn/LogItemCache.java new file mode 100644 index 0000000..cc8434f --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/LogItemCache.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.vlsn; + +import java.lang.ref.SoftReference; +import java.util.concurrent.atomic.AtomicReference; + +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * A no-wait cache used to retain the most recent VLSNs. The Feeders check this + * cache first for log entries to send out to the Replicas. Feeders that are + * feeding at the most up to date portion of the replication stream will likely + * hit in the cache, preventing a lookup in the log buffers or log files. + * + * The log item cache is made up of weak references so there is never any + * guarantee that even the most recent 32 entries are in there. + */ +class LogItemCache { + + /* + * Must be a power of 2 and small, typically around 32 entries. Increasing + * sizes typically yield diminishing returns. + */ + private final int cacheSize; + private final int sizeMask; + + /* + * Soft reference to array, so that the LogItems can be released when + * under GC pressure. + */ + private volatile SoftReference[]> + cacheReference = + new SoftReference[]>(null); + + private final LongStat nHits; + private final LongStat nMisses; + + /** + * Creates a log item size of the specified size. + * + * @param cacheSize it must be a power of two + * @param statGroup the statsGroup to which this cache adds its stats + * @throws IllegalArgumentException via ReplicatedEnvironment ctor. + */ + LogItemCache(int cacheSize, StatGroup statGroup) { + if (Integer.bitCount(cacheSize) != 1) { + throw new IllegalArgumentException + ("Bad cache size: " + cacheSize + "; it must be a power of 2"); + } + this.cacheSize = cacheSize; + sizeMask = cacheSize - 1; + nHits = new LongStat(statGroup, VLSNIndexStatDefinition.N_HITS); + nMisses = new LongStat(statGroup, VLSNIndexStatDefinition.N_MISSES); + } + + void put(VLSN vlsn, LogItem logItem) { + getArray()[(int)vlsn.getSequence() & sizeMask].set(logItem); + } + + LogItem get(VLSN vlsn) { + final LogItem item = + getArray()[(int)vlsn.getSequence() & sizeMask].get(); + if ((item != null) && item.header.getVLSN().equals(vlsn)) { + nHits.increment(); + return item; + } + + nMisses.increment(); + return null; + } + + /** + * For explicit release of references. + */ + void clear() { + for (AtomicReference element : getArray()) { + element.set(null); + } + } + + /** + * Returns the cache array, creating a new one, if the GC had cleared the + * reference to the earlier one. + *

        + * + * Note that there may be a slight inefficiency if getArray is called + * concurrently, and it had been cleared earlier, since it would be + * allocated twice and introduce a cache miss. This occurrence is + * infrequent enough that it's not worth the overhead of a sync mechanism. + * + * @return the underlying array, allocating a new one, if the previous one + * had been GC'd + */ + @SuppressWarnings("unchecked") + private final AtomicReference[] getArray() { + AtomicReference[] array = cacheReference.get(); + if (array == null) { + array = new AtomicReference[cacheSize]; + for (int i=0; i < array.length; i++) { + array[i] = new AtomicReference(); + } + cacheReference = + new SoftReference[]>(array); + } + return array; + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNBucket.java b/src/com/sleepycat/je/rep/vlsn/VLSNBucket.java new file mode 100644 index 0000000..e11d1d2 --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNBucket.java @@ -0,0 +1,1077 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileReader; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * A VLSNBucket instance represents a set of VLSN->LSN mappings. Buckets are + * usually not updated, except at times when the replication stream may have + * been reduced in size, by log cleaning or syncup. The VLSNBuckets in the + * VLSNIndex's VLSNTracker are written to disk and are persistent. There are + * also VLSNBuckets in the temporary recovery-time tracker that are used for + * collecting mappings found in the log during recovery. + * + * VLSNBuckets only hold mappings from a single log file. A single log file + * may be mapped by multiple VLSNBuckets though. + * + * As a tradeoff in space vs time, a VLSNBucket only stores a sparse set of + * mappings and the caller must use a VLSNReader to scan the log file and + * find any log entries not mapped directly by the bucket. In addition, + * the VLSN is not actually stored. Only the offset portion of the LSN is + * stored, and the VLSN is intuited by a stride field. Each VLSNBucket + * only maps a single file, though a single file may be mapped by several + * VLSNBuckets. + * + * For example, suppose a node had these VLSN->LSN mappings: + * + * VLSN LSN (file/offset) + * 9 10/100 + * 10 10/110 + * 11 10/120 + * 12 10/130 + * 13 10/140 + * 14 11/100 + * 15 11/120 + * + * The mappings in file 10 could be represented by a VLSNBucket with + * a stride of 4. That means the bucket would hold the mappings for + * 9 10/100, + * 13 10/140 + * + * And since the target log file number and the stride is known, the mappings + * can be represented in by the offset alone in this array: {100, 140}, rather + * than storing the whole lsn. + * + * Each bucket can also provide the mapping for the first and last VLSN it + * covers, even if the lastVLSN is not divisible by the stride. This is done to + * support forward and backward scanning. From the example above, the completed + * bucket can provide 9->10/100, 13->10/140, 15 -> 10/160 even though 15 is not + * a stride's worth away from 13. + * + * Because registering a VLSN->LSN mapping is done outside the log write latch, + * any inserts into the VLSNBucket may not be in order. However, when any + * VLSN is registered, we can assume that all VLSNs < that value do exist in + * the log. It's just an accident of timing that they haven't yet been + * registered. Note that out of order inserts into the buckets can create holes + * in the bucket's offset array, or cause the array to be shorter than + * anticipated. + * + * For example, if the insertion order into the bucket is vlsns 9, 15, we'll + * actually only keep an offset array of size 1. We have to be able to handle + * holes in the bucket, and can't count on filling them in when the lagging + * vlsn arrives, because it is possible that a reading thread will access the + * bucket before the laggard inserter arrives, or that the bucket might be + * flushed to disk, and become immutable. + */ +public class VLSNBucket { + + /* On-disk version. */ + private static final int VERSION = 1; + + /* File number for target file. */ + private final long fileNumber; + + /* Interval between VLSN values that are mapped. */ + private final int stride; + + protected VLSN firstVLSN = VLSN.NULL_VLSN; + protected VLSN lastVLSN = VLSN.NULL_VLSN; + private long lastLsn = DbLsn.NULL_LSN; + + /* + * The file offsets are really unsigned ints. The calls to put() are + * implemented to let us assume that the list is fully populated. A + * subclass of truncateableList has been used in order to provide access to + * the ArrayList.removeFromRange method. + */ + private TruncateableList fileOffsets; + + /* + * The max number of offsets and maxDistance help guide when to close the + * bucket and start a new one. Not persistent. + */ + private int maxMappings; + private int maxDistance; + + private static final int NO_OFFSET = 0; + + /* True if there are changes to the bucket that are not on disk. */ + boolean dirty; + + /* + * True if the VLSNBucket will not accept any more modifications; used to + * safeguard the bucket while the index is being written to disk. + */ + private boolean closed = false; + + VLSNBucket(long fileNumber, + int stride, + int maxMappings, + int maxDistance, + VLSN firstVLSN) { + this.fileNumber = fileNumber; + this.stride = stride; + this.maxMappings = maxMappings; + this.maxDistance = maxDistance; + + /* + * The VLSNs in the bucket are initialized to indicate what range + * should be covered by this bucket. But there may not be any offsets + * recorded either in the lastLsn or the fileOffset. + */ + this.firstVLSN = firstVLSN; + this.lastVLSN = firstVLSN; + + /* + * Initialize the file offsets with a -1 value to correspond to + * firstVLSN. + */ + fileOffsets = new TruncateableList(); + fileOffsets.add(0, NO_OFFSET); + } + + /* For reading from disk. */ + private VLSNBucket(TupleInput ti) { + fileNumber = ti.readPackedLong(); + stride = ti.readPackedInt(); + firstVLSN = new VLSN(ti.readPackedLong()); + lastVLSN = new VLSN(ti.readPackedLong()); + lastLsn = ti.readPackedLong(); + int size = ti.readPackedInt(); + fileOffsets = new TruncateableList(size); + for (int i = 0; i < size; i++) { + fileOffsets.add(i, DbLsn.getFileOffsetAsInt(ti.readUnsignedInt())); + } + } + + /** + * Record the LSN location for this VLSN. + * + * One key issue is that puts() are not synchronized, and the VLSNs may + * arrive out of order. If an out of order VLSN does arrive, we can still + * assume that the earlier VLSNs have been successfully logged. If a VLSN + * arrives that is divisible by the stride, and should be recorded in the + * fileOffsets, but is not the next VLSN that should be recorded, we'll pad + * out the fileOffsets list with placeholders. + * + * For example, suppose the stride is 3, and the first VLSN is 2. Then this + * bucket should record VLSN 2, 5, 8, ... etc. If VLSN 8 arrives before + * VLSN 5, VLSN 8 will be recorded, and VLSN 5 will have an offset + * placeholder of NO_OFFSET. It is a non-issue if VLSNs 3, 4, 6, 7 arrive + * out of order, because they would not have been recorded anyway. This + * should not happen often, because the stride should be fairly large, and + * the calls to put() should be close together. If the insertion order is + * vlsn 2, 5, 9, then the file offsets array will be a little short, and + * will only have 2 elements, instead of 3. + * + * We follow this policy because we must always have a valid begin and end + * point for the range. We must handle placeholders in all cases, and can't + * count of later vlsn inserts, because a bucket can become immutable at + * any time if it is flushed to disk. + * + * @return false if this bucket will not accept this VLSN. Generally, a + * refusal might happen because the bucket was full or the mapping is too + * large a distance away from the previous mapping. In that case, the + * tracker will start another bucket. + */ + synchronized boolean put(VLSN vlsn, long lsn) { + + if (closed) { + return false; + } + + if (!belongs(vlsn, lsn)) { + return false; + } + + /* + * Add it to the fileOffset if it's on a stride boundary and is the + * next mapping in the fileOffset list. + */ + if (isModulo(vlsn)) { + int index = getIndex(vlsn); + int listLen = fileOffsets.size(); + if (index < listLen) { + fileOffsets.set(index, DbLsn.getFileOffsetAsInt(lsn)); + } else if (index == listLen) { + fileOffsets.add(DbLsn.getFileOffsetAsInt(lsn)); + } else { + for (int i = listLen; i < index; i++) { + fileOffsets.add(NO_OFFSET); + } + fileOffsets.add(DbLsn.getFileOffsetAsInt(lsn)); + } + dirty = true; + } + + /* If the lastLsn is less, or not initialized, set it to this VLSN. */ + if ((lastVLSN.compareTo(vlsn) < 0) || + (lastLsn == DbLsn.NULL_LSN)) { + lastVLSN = vlsn; + lastLsn = lsn; + dirty = true; + } + + return true; + } + + /* + * Return true if this VLSN is on a stride boundary. Assumes + * !firstVLSN.isNull() + */ + private boolean isModulo(VLSN vlsn) { + return (((vlsn.getSequence() - firstVLSN.getSequence()) % stride) == + 0); + } + + private int getIndex(VLSN vlsn) { + assert isModulo(vlsn) : "Don't call getIndex on non-modulo VLSN " + + vlsn + " bucket=" + this; + + return (int) ((vlsn.getSequence() - firstVLSN.getSequence()) / stride); + } + + /** + * @return true if this VLSN->LSN mapping should go into this bucket. + */ + private boolean belongs(VLSN vlsn, long lsn) { + assert vlsn.compareTo(firstVLSN) >= 0 : + "firstVLSN = " + firstVLSN + " should not be greater than " + vlsn; + + if (DbLsn.getFileNumber(lsn) != fileNumber) { + /* Mappings must be for same file. */ + return false; + } + + if (emptyInternal()) { + return true; + } + + /* + * Some other thread beat us to the put() call and inserted a later + * mapping, so we know for sure that we fit in this bucket + */ + if (lastVLSN.compareTo(vlsn) > 0) { + return true; + } + + boolean onStrideBoundary = isModulo(vlsn); + if (onStrideBoundary && (fileOffsets.size() >= maxMappings)) { + /* Too full. */ + return false; + } + + /* + * Will this VLSN be next one recorded in the fileOffsets? If so, + * calculate the scan distance. + */ + if ((onStrideBoundary && (getIndex(vlsn) == fileOffsets.size())) || + lastVLSN.compareTo(vlsn) < 0) { + /* This VLSN is going in at the tail of the bucket. */ + int lastOffset = fileOffsets.get(fileOffsets.size() - 1); + if ((DbLsn.getFileOffset(lsn) - + DbLsn.convertIntFileOffsetToLong(lastOffset)) > + maxDistance) { + /* The scan distance is exceeded. */ + return false; + } + } + + return true; + } + + /** + * @return true if this bucket contains this mapping. + */ + synchronized boolean owns(VLSN vlsn) { + if (vlsn.equals(VLSN.NULL_VLSN)) { + return false; + } else if (firstVLSN.equals(VLSN.NULL_VLSN)) { + return false; + } else { + return (firstVLSN.compareTo(vlsn) <= 0) && + (lastVLSN.compareTo(vlsn) >= 0); + } + } + + synchronized VLSN getFirst() { + return firstVLSN; + } + + synchronized VLSN getLast() { + return lastVLSN; + } + + /** + * Return a file number that is less or equal to the first lsn mapped + * by this bucket. In standard VLSNBuckets, only one file is covered, so + * there is only one possible value. In GhostBuckets, multiple files could + * be covered. + * @return + */ + long getLTEFileNumber() { + return fileNumber; + } + + /* + * Similar to getLTEFileNumber, for this implementation there's only one + * possible file. + */ + long getGTEFileNumber() { + return fileNumber; + } + + synchronized boolean empty() { + return emptyInternal(); + } + + private boolean emptyInternal() { + return (firstVLSN.equals(lastVLSN) && + (lastLsn == DbLsn.NULL_LSN)); + } + + boolean follows(VLSN vlsn) { + return (firstVLSN.compareTo(vlsn) > 0); + } + + boolean precedes(VLSN vlsn) { + return (!lastVLSN.equals(VLSN.NULL_VLSN) && + (lastVLSN.compareTo(vlsn) < 0)); + } + + /** + * Returns the mapping whose VLSN is >= the VLSN parameter. For example, if + * the bucket holds mappings for vlsn 10, 13, 16, + * + * - the greater than or equal mapping for VLSN 10 is 10/lsn + * - the greater than or equal mapping for VLSN 11 is 13/lsn + * - the greater than or equal mapping for VLSN 13 is 13/lsn + * + * File offsets may be null in the middle of the file offsets array because + * of out of order mappings. This method must return a non-null lsn, and + * must account for null offsets. + * + * @return the mapping whose VLSN is >= the VLSN parameter. Will never + * return NULL_LSN, because the VLSNRange begin and end point are always + * mapped. + */ + public synchronized long getGTELsn(VLSN vlsn) { + + if (lastVLSN.equals(vlsn)) { + return lastLsn; + } + + int index; + if (firstVLSN.compareTo(vlsn) >= 0) { + + /* + * It's possible for vlsn to be < the firstVLSN if vlsn + * falls between two buckets. For example, if the buckets are: + * bucketA = vlsn 10-> 20 + * bucketB = vlsn 22->30 + * then vlsn 21 will fall between two buckets, and will get bucketB + */ + index = 0; + } else { + index = getGTEIndex(vlsn); + } + + /* + * This should never happen. Throw this exception to make debugging + * info available. + */ + if (index < 0) { + throw EnvironmentFailureException.unexpectedState + ("index=" + index + + " vlsn=" + vlsn + + " bucket=" + this); + } + + if (index >= fileOffsets.size()) { + return lastLsn; + } + int useIndex = findPopulatedIndex(index, true /* forward */); + int offset = fileOffsets.get(useIndex); + return offset == NO_OFFSET ? + lastLsn : DbLsn.makeLsn(fileNumber, offset); + } + + /** + * Return the index for the mapping >= this VLSN. Note that this is just + * a stride calculation, and a non-existent file offset index might be + * returned. + */ + private int getGTEIndex(VLSN vlsn) { + long diff = vlsn.getSequence() - firstVLSN.getSequence(); + return (int) ((diff + (stride - 1)) / stride); + } + + /** + * We'd like to return the mapping at startIndex for the get{LTE, GTE} + * Mapping methods, but the offsets may not be populated if put() calls + * have come out of order. Search for the next populated offset. + */ + private int findPopulatedIndex(int startIndex, boolean forward) { + if (forward) { + for (int i = startIndex; i < fileOffsets.size(); i++) { + if (fileOffsets.get(i) != NO_OFFSET) { + return i; + } + } + } else { + for (int i = startIndex; i >= 0; i--) { + if (fileOffsets.get(i) != NO_OFFSET) { + return i; + } + } + } + return startIndex; + } + + /** + * Returns the lsn whose VLSN is <= the VLSN parameter. For example, if + * the bucket holds mappings for vlsn 10, 13, 16, + * + * - the less than or equal mapping for VLSN 10 is 10/lsn + * - the less than or equal mapping for VLSN 11 is 10/lsn + * - the less than or equal mapping for VLSN 13 is 13/lsn + * + * File offsets may be null in the middle of the file offsets array because + * of out of order mappings. This method must return a non-null lsn, and + * must account for null offsets. + * + * @return the lsn whose VLSN is <= the VLSN parameter. Will never return + * NULL_LSN, because the VLSNRange begin and end points are always mapped. + */ + synchronized long getLTELsn(VLSN vlsn) { + + /* + * It's possible for vlsn to be greater than lastVLSN if vlsn falls + * between two buckets. + * For example, if the buckets are: + * bucketA = vlsn 10-> 20 + * bucketB = vlsn 22->30 + * then vlsn 21 will fall between two buckets, and will get bucketA + */ + if (lastVLSN.compareTo(vlsn) <= 0) { + return lastLsn; + } + + long diff = vlsn.getSequence() - firstVLSN.getSequence(); + + /* + * Make sure that the file offset array isn't unexpectedly short due to + * out of order inserts. + */ + int index = (int)(diff / stride); + if (index >= fileOffsets.size()) { + index = fileOffsets.size() - 1; + } + + int useIndex = findPopulatedIndex(index, false /* forward */); + int offset = fileOffsets.get(useIndex); + + assert offset != NO_OFFSET : "bucket should always have a non-null " + + "first offset. vlsn= " + vlsn + " bucket=" + this; + + return (DbLsn.makeLsn(fileNumber, offset)); + } + + /** + * @return the lsn whose VLSN is == the VLSN parameter or DbLsn.NULL_LSN if + * there is no mapping. Note that because of out of order puts, there may + * be missing mappings that appear later on. + */ + public synchronized long getLsn(VLSN vlsn) { + assert owns(vlsn) : "vlsn=" + vlsn + " " + this; + + if (lastVLSN.equals(vlsn)) { + return lastLsn; + } + + if (!isModulo(vlsn)) { + return DbLsn.NULL_LSN; + } + + int index = getIndex(vlsn); + if (index >= fileOffsets.size()) { + return DbLsn.NULL_LSN; + } + + int offset = fileOffsets.get(index); + if (offset == NO_OFFSET) { + return DbLsn.NULL_LSN; + } + + return DbLsn.makeLsn(fileNumber, offset); + } + + synchronized long getLastLsn() { + return lastLsn; + } + + /** + * Remove the mappings from this bucket that are for VLSNs <= + * lastDuplicate. If this results in a broken stride interval, package all + * those mappings into their own bucket and return it as a remainder + * bucket. + * + * For example, suppose this bucket has a stride of 5 and maps VLSN 10-23. + * Then it has mappings for 10, 15, 20, 23. + * + * If we need to remove mappings <= 16, we'll end up without a bucket that + * serves as a home base for vlsns 17,18,19. Those will be spun out into + * their own bucket, and this bucket will be adjusted to start at VLSN 20. + * This bucket should end up with + * + * - firstVLSN = 20 + * - fileOffset is an array of size 1, for the LSN for VLSN 20 + * - lastVLSN = 23 + * - lastLsn = the same as before + * + * The spun-off bucket should be: + * - firstVLSN = 17 + * - fileOffset is an array of size 1, for the LSN for VLSN 17 + * - lastVLSN = 19 + * - lastLsn = lsn for 19 + * + * @return the newly created bucket that holds mappings from a broken + * stride interval, or null if there was no need to create such a bucket. + */ + VLSNBucket removeFromHead(EnvironmentImpl envImpl, VLSN lastDuplicate) { + + if (empty()) { + return null; + } + + /* + * No overlap, this bucket owns mappngs that follow the duplicate + * range. + */ + if (lastDuplicate.compareTo(firstVLSN) < 0) { + return null; + } + + /* + * This whole bucket is to be deleted, all its mappings are <= the + * lastDuplicate. + */ + if (lastVLSN.compareTo(lastDuplicate) <= 0) { + fileOffsets = null; + firstVLSN = VLSN.NULL_VLSN; + lastVLSN = VLSN.NULL_VLSN; + lastLsn = DbLsn.NULL_LSN; + return null; + } + + VLSN indexVLSN = firstVLSN; + int newFirstIndex = -1; + + /* + * Find the mappings that still belong. Using the example above, we + * should find that we can delete fileOffset[0] and fileOffset[1] and + * preserve fileOffset[2] + */ + for (int i = 0; i < fileOffsets.size(); i++) { + if ((indexVLSN.compareTo(lastDuplicate) > 0) && + (fileOffsets.get(i) != NO_OFFSET)) { + newFirstIndex = i; + break; + } + indexVLSN = new VLSN(indexVLSN.getSequence() + stride); + } + + VLSNBucket remainder = null; + int lastOffset; + if (newFirstIndex == -1) { + + /* + * None of the VLSNs represented by the strided file offsets are + * needed anymore. This bucket consists solely of the last + * VLSN->LSN pair. + */ + lastOffset = fileOffsets.get(fileOffsets.size() - 1); + fileOffsets = new TruncateableList(); + fileOffsets.add(DbLsn.getFileOffsetAsInt(lastLsn)); + firstVLSN = lastVLSN; + } else { + /* Move the still-valid mappings to a new list. */ + assert (newFirstIndex > 0); + lastOffset = fileOffsets.get(newFirstIndex - 1); + TruncateableList newFileOffsets = + new TruncateableList + (fileOffsets.subList(newFirstIndex, fileOffsets.size())); + fileOffsets = newFileOffsets; + firstVLSN = new VLSN((newFirstIndex * stride) + + firstVLSN.getSequence()); + } + + if (!firstVLSN.equals(lastDuplicate.getNext())) { + + /* + * If lastDuplicate was not on the same stride boundary as our old + * bucket, we may have a broken bucket of mappings to preserve. + * Using our example numbers above, we still need to make sure + * there's a bucket that matches VLSNs 17, 18 19. + */ + long scanStart = DbLsn.makeLsn(fileNumber, lastOffset); + remainder = scanForNewBucket(envImpl, + lastDuplicate.getNext(), + firstVLSN.getPrev(), + scanStart); + } + + dirty = true; + return remainder; + } + + /** + * Scan the log fle for VLSN->LSN mappings for creating a new bucket. + */ + private VLSNBucket scanForNewBucket(EnvironmentImpl envImpl, + VLSN first, + VLSN last, + long startLsn) { + + VLSNBucket newBucket = new VLSNBucket(fileNumber, stride, + maxMappings, maxDistance, + first); + int readBufferSize = + envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_MAX_SIZE); + + NewBucketReader scanner = + new NewBucketReader(newBucket, envImpl, readBufferSize, first, + last, startLsn); + + while (!scanner.isDone() && (scanner.readNextEntry())) { + } + + assert scanner.isDone(); + + return newBucket; + } + + /** + * Remove the mappings from this bucket that are for VLSNs >= + * startOfDelete. Unlike removing from the head, we need not worry about + * breaking a bucket stride interval. + * + * If prevLsn is NULL_VLSN, we don't have a good value to cap the bucket. + * Instead, we'll have to delete the bucket back to whatever was the next + * available lsn. For example, suppose the bucket has these mappings. This + * strange bucket (stride 25 is missing) is possible if vlsn 26 arrived + * early, out of order. + * + * in fileOffset: 10 -> 101 + * in fileOffset: 15 -> no offset + * in fileOffset: 20 -> 201 + * lastVLSN->lastnLsn mapping 26 -> 250 + * + * If we have a prevLsn and the startOfDelete is 17, then we can create + * a new mapping + * in fileOffset: 10 -> 101 + * in fileOffset: 15 -> no offset + * lastVLSN->lastnLsn mapping 17 -> 190 + * + * If we don't have a prevLsn, then we know that we have to cut the bucket + * back to the largest known mapping, losing many mappings along the way. + * in fileOffset: 10 -> 101 + * lastVLSN->lastnLsn mapping 10 -> 101 + * + * If we are deleting in the vlsn area between the last stride and the + * last offset, (i.e. vlsn 23 is the startOfDelete) the with and without + * prevLSn cases would look like this: + * + * (there is a prevLsn, and 23 is startDelete. No need to truncate + * anything) + * in fileOffset: 10 -> 101 + * in fileOffset: 15 -> no offset + * in fileOffset: 20 -> 201 + * lastVLSN->lastnLsn mapping 23 -> prevLsn + * + * (there is no prevLsn, and 23 is startDelete) + * in fileOffset: 10 -> 101 + * in fileOffset: 15 -> no offset + * in fileOffset: 20 -> 201 + * lastVLSN->lastnLsn mapping 20 -> 201 + * + * @param startOfDelete is the VLSN that begins the range to delete, + * inclusive + * @param prevLsn is the lsn of startOfDelete.getPrev(). We'll be using it + * to cap off the end of the bucket, by assigning it to the lastLsn field. + */ + void removeFromTail(VLSN startOfDelete, long prevLsn) { + + if (empty()) { + return; + } + + if (lastVLSN.compareTo(startOfDelete) < 0) { + return; + } + + /* Delete all the mappings. */ + if (firstVLSN.compareTo(startOfDelete) >= 0) { + lastVLSN = firstVLSN; + lastLsn = DbLsn.NULL_LSN; + fileOffsets.clear(); + return; + } + + /* Delete some of the mappings. */ + int deleteIndex = getGTEIndex(startOfDelete); + + /* + * This should never happen, because the startOfDelete should be a vlsn + * that is >= the first vlsn and we handled the case where + * startOfDelete == firstVLSN already.) Throw this exception to make + * debugging info available. + */ + if (deleteIndex <= 0) { + throw EnvironmentFailureException.unexpectedState + ("deleteIndex=" + deleteIndex + + " startOfDelete=" + startOfDelete + + " bucket=" + this); + } + + /* See if there are any fileoffsets to prune off. */ + if (deleteIndex < fileOffsets.size()) { + + /* + * The startOfDeleteVLSN is a value between the firstVLSN and + * the last file offset. + */ + if (prevLsn == DbLsn.NULL_LSN) { + int lastPopulatedIndex = + findPopulatedIndex(deleteIndex-1, false); + if (lastPopulatedIndex != (deleteIndex -1)) { + deleteIndex = lastPopulatedIndex + 1; + } + } + fileOffsets.truncate(deleteIndex); + } else { + /* + * The startOfDelete vlsn is somewhere between the last file offset + * and the lastVLSN. + */ + if (prevLsn == DbLsn.NULL_LSN) { + int lastIndex = fileOffsets.size() - 1; + int lastPopulatedIndex = findPopulatedIndex(lastIndex, false); + if (lastPopulatedIndex < lastIndex) { + fileOffsets.truncate(lastPopulatedIndex); + } + } + } + + /* Now set the lastVLSN -> lastLSN mapping. */ + if (prevLsn == DbLsn.NULL_LSN) { + lastVLSN = new VLSN(((fileOffsets.size()-1) * stride) + + firstVLSN.getSequence()); + Integer lastOffset = fileOffsets.get(fileOffsets.size() - 1); + assert lastOffset != null; + lastLsn = DbLsn.makeLsn(fileNumber, lastOffset); + } else { + lastVLSN = startOfDelete.getPrev(); + lastLsn = prevLsn; + } + dirty = true; + } + + /* For unit tests */ + int getNumOffsets() { + return fileOffsets.size(); + } + + void close() { + closed = true; + } + + /** + * Write this bucket to the mapping database. + */ + void writeToDatabase(EnvironmentImpl envImpl, + DatabaseImpl bucketDbImpl, + Txn txn) { + + if (!dirty) { + return; + } + + Cursor c = null; + try { + c = DbInternal.makeCursor(bucketDbImpl, + txn, + CursorConfig.DEFAULT); + writeToDatabase(envImpl, c); + } finally { + if (c != null) { + c.close(); + } + } + } + + /** + * Write this bucket to the mapping database using a cursor. Note that + * this method must disable critical eviction. Critical eviction makes the + * calling thread search for a target IN node to evict. That target IN node + * may or may not be in the internal VLSN db. + * + * For example, when a new, replicated LN is inserted or modified, a + * new VLSN is allocated. To do so, the app thread that is executing the + * operation + * A1. Takes a BIN latch on a BIN in a replicated db + * A2. Takes the VLSNINdex mutex + * + * Anyone calling writeDatabase() has to take these steps: + * B1. Take the VLSNIndex mutex + * B2. Get a BIN latch for a BIN in the internal vlsn db. + * + * This difference in locking hierarchy could cause a deadlock except for + * the fact that A1 and B2 are guaranteed to be in different databases. If + * writeDatabase() also did critical eviction, it would have a step where + * it tried to get a BIN latch on a replicated db, and we'd have a + * deadlock. [#18475] + */ + void writeToDatabase(EnvironmentImpl envImpl, Cursor cursor) { + + if (!dirty) { + return; + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + LongBinding.longToEntry(firstVLSN.getSequence(), key); + VLSNBucketBinding bucketBinding = new VLSNBucketBinding(); + bucketBinding.objectToEntry(this, data); + + DbInternal.getCursorImpl(cursor).setAllowEviction(false); + OperationStatus status = cursor.put(key, data); + + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Unable to write VLSNBucket for file " + + fileNumber + " status=" + status); + } + dirty = false; + } + + /** + * Instantiate this from the database. Assumes that this bucket will not be + * used for insertion in the future. + */ + public static VLSNBucket readFromDatabase(DatabaseEntry data) { + + VLSNBucketBinding mapperBinding = new VLSNBucketBinding(); + VLSNBucket bucket = mapperBinding.entryToObject(data); + return bucket; + } + + void fillDataEntry(DatabaseEntry data) { + VLSNBucketBinding binding = new VLSNBucketBinding(); + binding.objectToEntry(this, data); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return String.format("", + fileNumber, fileNumber, + (fileOffsets == null) ? 0 : fileOffsets.size(), + stride, firstVLSN, lastVLSN, + DbLsn.getNoFormatString(lastLsn)); + } + + /** + * For debugging and tracing. + */ + public void dump(PrintStream out) { + if (fileOffsets == null) { + return; + } + + long vlsnVal = firstVLSN.getSequence(); + int newlineCounter = 0; + for (Integer offset : fileOffsets) { + out.printf(" [%d 0x%x]", vlsnVal, + DbLsn.convertIntFileOffsetToLong(offset)); + + vlsnVal += stride; + if (++newlineCounter > 6) { + out.println("\n"); + newlineCounter = 0; + } + } + + out.printf("\n---------Last: VLSN=%s LSN=%s", lastVLSN, + DbLsn.getNoFormatString(lastLsn)); + } + + boolean isGhost() { + return false; + } + + void writeToTupleOutput(TupleOutput to) { + + to.writePackedLong(fileNumber); + to.writePackedInt(stride); + to.writePackedLong(firstVLSN.getSequence()); + to.writePackedLong(lastVLSN.getSequence()); + to.writePackedLong(lastLsn); + to.writePackedInt(fileOffsets.size()); + for (Integer offset: fileOffsets) { + to.writeUnsignedInt(DbLsn.convertIntFileOffsetToLong(offset)); + } + } + + /** + * Marshals a VLSNBucket to a byte buffer to store in the database. + * Doesn't persist the file number, because that's the key of the database. + * A number of the fields are transient and are also not stored. + */ + private static class VLSNBucketBinding extends TupleBinding { + + @Override + public VLSNBucket entryToObject(TupleInput ti) { + + int onDiskVersion = ti.readPackedInt(); + if (onDiskVersion != VLSNBucket.VERSION) { + throw EnvironmentFailureException.unexpectedState + ("Don't expect version diff on_disk=" + onDiskVersion + + " source=" + VLSNBucket.VERSION); + } + boolean isGhost = ti.readBoolean(); + VLSNBucket bucket = null; + if (isGhost) { + bucket = GhostBucket.makeNewInstance(ti); + } else { + bucket = new VLSNBucket(ti); + } + return bucket; + } + + @Override + public void objectToEntry(VLSNBucket bucket, TupleOutput to) { + to.writePackedInt(VLSNBucket.VERSION); + to.writeBoolean(bucket.isGhost()); + bucket.writeToTupleOutput(to); + } + } + + /** + * Scan a specific section of log and generate a new VLSNBucket for + * this section. + */ + private static class NewBucketReader extends FileReader { + private final VLSNBucket remainderBucket; + private boolean done = false; + private final VLSN first; + private final VLSN last; + + public NewBucketReader(VLSNBucket remainderBucket, + EnvironmentImpl envImpl, + int readBufferSize, + VLSN first, + VLSN last, + long startLsn) { + super(envImpl, + readBufferSize, + true, // forward + startLsn, + null, // singleFileNumber + DbLsn.NULL_LSN, // endOfFileLsn + DbLsn.NULL_LSN); // finishLsn + + this.remainderBucket = remainderBucket; + this.first = first; + this.last = last; + } + + /** + * Return true if this entry is replicated and its VLSN >= the + * firstVLSN and the entry is not invisible. These entries will + * be used to bring the VLSNIndex up to speed. + */ + @Override + protected boolean isTargetEntry() { + return (!currentEntryHeader.isInvisible() && + entryIsReplicated() && + (currentEntryHeader.getVLSN().compareTo(first) >= 0)); + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + if (currentEntryHeader.getVLSN().compareTo(last) > 0) { + done = true; + } else { + remainderBucket.put(currentEntryHeader.getVLSN(), + getLastLsn()); + } + + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } + + boolean isDone() { + return done; + } + } + + @SuppressWarnings("serial") + private static class TruncateableList extends ArrayList { + + TruncateableList() { + super(); + } + + TruncateableList(int capacity) { + super(capacity); + } + + TruncateableList(List list) { + super(list); + } + + void truncate(int fromIndex) { + removeRange(fromIndex, size()); + } + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNIndex.java b/src/com/sleepycat/je/rep/vlsn/VLSNIndex.java new file mode 100644 index 0000000..628a05c --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNIndex.java @@ -0,0 +1,2527 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileSet; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.vlsn.VLSNRange.VLSNRangeBinding; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * A VLSN (Virtual LSN) is used to identify every log entry shared between + * members of the replication group. Since a JE log is identified by LSNs, we + * must have a way to map VLSN->LSNs in order to fetch a replicated log record + * from the local log, using the VLSN. The VLSNIndex implements those + * mappings. The VLSNIndex has these responsibilities: + * + * Generating new VLSNs. + * Only masters need to generate VLSNs, but any node may have the potential + * to be a master. The VLSN sequence must ascend over time and across + * recoveries, so the VSLN id must be preserved much like the database, node + * and txn ids. + * Maintaining the VLSN range. + * Although each node needs to receive and store each log entry from the + * replication stream, over time the part of the stream that is stored can be + * reduced, either by log cleaning, or by syncups which can truncate the + * replication stream. A node always holds a contiguous portion of the + * replication stream. The VLSN range identifies that portion by having the + * start and end VLSNs, as well as key landmarks such as the lastSync-able + * log entry and the last commit log entry. VLSN range information is used by + * elections and syncup. + * Gatekeeper for waiting for the most recently logged entries. + * Feeders block upon the VLSNIndex when they are trying to fetch the most + * recently logged entries. These recent log entries are held in a two level + * cache within the VLSNIndex. A call to VLSNIndex.waitForLsn() goes through + * this sequence: + * 1) check the log item stored in the vlsn wait latch, if the call did wait. + * 2) check the log item cache + * If both fail, the FeederReader will fetch the required log entry from log + * buffers or disk + * Providing the LSN mapping for a log record identified by its VLSN. + * The Feeders and the syncup protocol both need to retrieve log records + * by VLSN. To do that, we need an LSN mapping. + * + * Mappings are added to VLSNIndex when replicated log entries are written into + * the local log. Although all mappings are registered, the VLSNIndex does not + * keep every one, in order to save on disk and in-memory storage. Only a + * sparse set is kept. When searching for a log entry by VLSN, the caller uses + * the closest available mapping and then scans the log looking for that entry. + * + * The VLSNIndex relies on the assumption that VLSN tagged log entries are + * ordered and contiguous in the log. That is, the LSN for VLSN 1 is < the LSN + * for VLSN 2 < LSN for VLSN 3, and there is never a gap in the VLSNs. However, + * at node syncup, the replication stream may need to be truncated when rolling + * back a non-committed log entry. We can't literally truncate the log files + * because the JE logs contain intermingled transactional and non transactional + * information. Instead, the truncation is done both logically by amending the + * VLSNIndex, and physically by overmarking those entries in the JE + * logs. Because of that, a physical dump of the log may show some VLSN tagged + * entries as duplicate and/or out of order because they're abandoned log + * entries that are not logically part of the replication stream any more + * For example, the log can look like this: + * LSN 100, VLSN 1 + * LSN 200, VLSN 2 <- overmarked + * LSN 300, VLSN 3 <- overmarked + * --- syncup, rollback to VLSN 1, restart at VLSN 2 + * LSN 400, VLSN 2 + * LSN 500, VLSN 3 + * + * VLSN->LSN mappings are created under the log write latch, which ensures that + * all VLSN tagged log entries are ordered in the logical replication stream in + * the log. However, the mapping is added to the VLSNIndex outside the log + * write latch, so the VLSNIndex database may have a momentary gap. For + * example, + * + * t0- thread 1 logs entry at VLSN=1, LSN=100, within log write latch + * t1- thread 2 logs entry at VLSN=2, LSN=150, within log write latch + * t2- thread 3 logs entry at VLSN=3, LSN=200, within log write latch + * t3- thread 1 calls VLSNIndex.put(VLSN=1/LSN=100) + * t4- thread 3 calls VLSNIndex.put(VLSN=3/LSN=200) + * t5- thread 2 calls VLSNIndex.put(VLSN=2/LSN=150) + * + * At t4, the VLSNIndex contains 1/100, 3/200, but not 2/150. However, we know + * that the VLSNIndex always represents a contiguous range of VLSNs, so the + * fact that 2/150 is not yet is handled, and is just like the case where + * the VLSNIndex optimized away the mapping in order to keep the index sparse. + * + * We do guarantee that the start and end VLSNs in the range have mappings, in + * order to always be able to provide a LTE and GTE mapping for all valid + * VLSNs. Because of that, if a VLSN comes out of order, it does not update the + * range. Care must be taken when truncating the VLSNIndex from the head or the + * tail to ensure that the guaranteed existence of the start and end range + * mapping remains valid. + * + * Cache and persistent storage: + * + * The VLSN->LSN mappings in the range are grouped into instances of + * com.sleepycat.je.util.VLSNBucket. Each bucket knows the first and last VLSN + * within its mini-range. We observe these invariants + * - buckets are ordered by VLSN in the database and the bucket cache, + * - only the last bucket is the target of updates at any time, + * - a single bucket corresponds to a single file, but a single file may + * have multiple buckets covering it. + * + * While it would be nice to also guarantee that there are no gaps between + * buckets, ie: + * bucket(N-1).last == bucket(N).first - 1 + * bucket(N).last == bucket(N-1).first - 1 + * it is not possible to do so because we the put() call is not serialized + * because we don't want to add overhead to the log write latch. In order + * to permit out of order puts(), and to require that only the last bucket + * is updated, we must permit gaps between buckets. + * + * Mappings start out being cached in VLSNBuckets held in memory by the + * VLSNTracker. As the tracker fills, the buckets are flushed to persistent + * storage in a internal, non-replicated database. Both the database and + * the tracker cache hold key/value pairs where + * + * key = bucket.first + * data = bucket + * + * Since the first valid VLSN is 1, key = -1 is reserved for storage of the + * VLSNRange. + * + * Buckets are filled up as new VLSNs arrive (either because they've been + * generated by write operations on the master, or because they're incoming + * operations on the replica). They're flushed to disk periodically rather than + * with every new VLSN, because the update rate would have too much of a + * performance impact. Since there is this level of caching happening, we must + * be careful to write in-memory buckets to disk at well known points to + * support recoverability. The flushing must be instigated by a third party + * activity, such as checkpointing, rather than by the action of adding a new + * mapping. That's because mappings are registered by the logging system, and + * although we are not holding the log write latch at that point, it seems + * inadvisable to recursively generate another logging call on behalf of the + * flush. Currently the VLSNIndex is flushed to disk at every checkpoint. It + * can also optionally happen more often, and (TODO) we may want to do so + * because we've seen cases where checkpoints take a very long time. Perhaps we + * should flush when we flip to a new log file? + * + * Once written to disk, the buckets are generally not updated. Updates can + * happen when the range is truncated, such as for syncup rollback, but the + * system is quiescent at that time, and there are no new mappings created. Log + * cleaning can read the vlsnIndex and delete buckets, but will not modify + * mappings. The VLSNRange does naturally change often, and that data record + * does get updated. + * + * Recovery: + * + * The VLSN database is restored at recovery time just as all other databases + * are. However, there may be a portion of the VLSN range that was not flushed + * to disk. At recovery, we piggyback onto the log scanning done and re-track + * the any mappings found within the recovery range. Those mappings are merged + * into those stored on disk, so that the VLSNIndex correctly reflects the + * entire replication stream at startup. For example, suppose a log has: + * + * LSN + * 100 firstActiveLSN + * 200 Checkpoint start + * 300 VLSN 78 + * 400 VLSNIndex flushed here + * 500 Checkpoint end + * 600 VLSN 79 + * + * The VLSNIndex is initially populated with the version of the index found + * at LSN 400. That doesn't include VLSN 79. A tracking pass is done from + * checkpoint start -> end of log, which sweeps up VLSN 78 and VLSN 79 into + * a temporary tracker. That tracker is merged in the VLSNIndex, to update + * its mappings to VLSN 79. + * + * Note that the checkpoint VLSNIndex must encompass all vlsn mappings that are + * prior to the checkpoint start of that recovery period. This follows the + * general philosophy that checkpoint flushes all metadata, and recovery reads + * from checkpoint start onewards to add on any neede extra data. + * Retrieving mappings: + * + * Callers who need to retrieve mappings obtain a VLSNScanner, which acts as a + * cursor over the VLSNIndex. A VLSNScanner finds and saves the applicable + * VLSNBucket, and queries the bucket directly as long as it can provide + * mappings. This reduces the level of contention between multiple readers + * (feeders) and writers (application threads, or the replay thread) + * + * Synchronization hierarchy: + * + * To write a new mapping, you must have the mutex on the VLSIndex, and then + * the tracker, which lets you obtain the correct bucket, and then you must + * have a mutex on the bucket. To read a mapping, you must have the tracker + * mutex to obtain the right bucket. If you already have the right bucket in + * hand, you only need the bucket mutex. + * + * In truth, buckets which are not the "currentBucket" are not modified again, + * so a future optimization would allow for reading a mapping on a finished + * bucket without synchronization. + * + * The VLSNRange is updated as an atomic assignment to a volatile field after + * taking the mutex on the current bucket. It is read without a mutex, by + * looking at it as a volatile field. + * + * The hierarchy is + * VLSNIndex -> VLSNTracker -> VLSNBucket + * VLSNIndex -> VLSNTracker -> VLSNRange + * VLSNIndex -> VLSNIndex.mappingSynchronizer + * VLSNIndex.flushSynchronizer -> VLSNTracker + * + * Removing mappings vs reading mappings - sync on the range. + * + * We also need to consider that fact that callers of the VLSNIndex may be + * holding other mutex, or IN latches, and that the VLSNIndex methods may do + * database operations to read or write to the internal VLSN database. That can + * result in a nested database operation, and we need to be careful to avoid + * deadlocks. To be safe, we disable critical eviction [#18475] + * VLSNBucket.writeDatabase(). + * + * Writers + * ------- + * Allocating a new VLSN: bump() + * - sync on log write latch + * Note that since there is no synchronization on the VLSNINdex itself, + * [allocating new VLSN, logging its entry] and [flushing the vlsn index + * to disk] is not atomic. See awaitConsistency(). + * + * Adding a mapping: put() + * - sync on VLSNIndex + * -sync on VLSNTracker to access the right bucket, and possibly + * create a new bucket. Atomically modify the VLSNRange. + * + * Flushing mappings to disk: writeToDatabase() + * - sync on VLSNIndex.flushSyncrhonizer -> VLSNTracker + * + * Replica side syncup truncates the VLSNIndex from the end: + * - no synchronization needed, the system is quiescent, and we can assume + * that VLSNs are neither read nor written by other threads. + * + * Log cleaning truncates the VLSNIndex from the beginning: + * We assume that the log cleaner is prohibited from deleting files that are + * being used for current feeding. We can also assume that the end of the + * log is not being deleted, and that we're not conflict with put(). We do + * have to worry about conflicting with backwards scans when executing + * syncup as a feeder, and with flushing mappings to disk. Shall we + * disable log file deletion at this point? + * + * Steps to take: + * + * First change the VLSNRange: + * - sync on VLSNIndex + * - atomically modify the VLSNRange to ensure that no readers or + * writers touch the buckets that will be deleted. + * - sync on VLSNTracker to delete any dead buckets. Do that before + * updating the on-disk database, so that we don't lose any + * buckets to writeToDatabase(). + * - without synchronization, scan the database and non-transactionally + * delete any on-disk buckets that are <= the log cleaned file. + * + * Readers + * ------- + * Active forward feeder checks if a mapping exists, and waits if necessary + * - read the current VLSNRange w/out a mutex. If not satisfactory + * - sync on VLSNIndex + * - sync on VLSNIndex.mappingSynchronizer + * + * Active forward feeder reads a mapping: + * first - getBucket() + * - sync on VLSNTracker to access the right bucket + * if bucket is in hand + * - sync on target bucket to read bucket + */ +public class VLSNIndex { + + /* + * The length of time that a checkpoint will wait for the vlsn index to + * contain all vlsn->lsn mappings before the checkpoint start. + */ + public static int AWAIT_CONSISTENCY_MS = 60000; + + private final EnvironmentImpl envImpl; + + /* + * VLSN waiting: A Feeder may block waiting for the next available record + * in the replication stream. + + * vlsnPutLatch - Latch used to wait for the next VLSN put operation. + * putWaitVLSN - The VLSN associated with the vlsnPutLatch, it's only + * meaningful in the presence of a latch. + */ + private VLSNAwaitLatch vlsnPutLatch = null; + private VLSN putWaitVLSN = null; + + /* + * Consider replacing the mapping synchronizer with a lower overhead and + * multi-processor friendly CAS style nowait code sequence. + */ + private final Object mappingSynchronizer = new Object(); + private final Object flushSynchronizer = new Object(); + private final Logger logger; + + /* + * nextVLSNCounter is incremented under the log write latch, when used on + * the master. If this node transitions from replica to master, this + * counter must be initialized before write operations begin. It can also + * be used by both masters and replicas when checking vlsn consistency + * before checkpoints. + */ + private AtomicLong nextVLSNCounter; + + /* + * For storing the persistent version of the VLSNIndex. For keys > 0, + * the key is the VLSN sequence number, data = VLSNBucket. Key = -1 has + * a special data item, which is the VLSNRange. + */ + private DatabaseImpl mappingDbImpl; + + /* + * The tracker handles the real mechanics of maintaining the VLSN range + * and mappings. + */ + private VLSNTracker tracker; + + /* + * A wait-free cache of the most recent log items in the VLSN index. These + * items are important since they are the ones needed by the feeders that + * are responsible for supplying timely commit acknowledgments. + */ + private final LogItemCache logItemCache; + + /* + * Statistics associated with the VLSN index + */ + private final StatGroup statistics; + + private final LongStat nHeadBucketsDeleted; + + private final LongStat nTailBucketsDeleted; + + /* For testing [#20726] flushToDatabase while getGTEBucket is executing */ + private TestHook searchGTEHook; + + /** + * The mapping db's name is passed in as a parameter instead of the more + * intuitive approach of defining it within the class to facilitate unit + * testing of the VLSNIndex. + */ + public VLSNIndex(EnvironmentImpl envImpl, + String mappingDbName, + @SuppressWarnings("unused") + NameIdPair nameIdPair, + int vlsnStride, + int vlsnMaxMappings, + int vlsnMaxDistance, + RecoveryInfo recoveryInfo) + throws DatabaseException { + + this.envImpl = envImpl; + + /* + * initialize the logger early so it can be used by the following + * methods. + */ + logger = LoggerUtils.getLogger(getClass()); + + statistics = new StatGroup(VLSNIndexStatDefinition.GROUP_NAME, + VLSNIndexStatDefinition.GROUP_DESC); + nHeadBucketsDeleted = + new LongStat(statistics, + VLSNIndexStatDefinition.N_HEAD_BUCKETS_DELETED); + nTailBucketsDeleted = + new LongStat(statistics, + VLSNIndexStatDefinition.N_TAIL_BUCKETS_DELETED); + + init(mappingDbName, + vlsnStride, + vlsnMaxMappings, + vlsnMaxDistance, + recoveryInfo); + + logItemCache = new LogItemCache(envImpl.getConfigManager(). + getInt(RepParams.VLSN_LOG_CACHE_SIZE), + statistics); + } + + /** + * Initialize before this node begins working as a master. This node may + * become a Master directly after recovery, or it may transition to the + * master state after running for some time as a Replica. + *

        + * Reset the vlsnIndex so the VLSN sequence corresponds to what this node + * thinks is the next VLSN. + */ + public void initAsMaster() { + VLSN last = tracker.getRange().getLast(); + if (last.equals(VLSN.NULL_VLSN)) { + + /* + * If the master does the conversion, the started VLSN should start + * from 2 so that Replica would throw a LogRefreshRequiredException + * and do a NetworkRestore to copy the master logs. + */ + nextVLSNCounter = envImpl.needRepConvert() ? + new AtomicLong(1) : + new AtomicLong(0); + } else { + nextVLSNCounter = new AtomicLong(last.getSequence()); + } + } + + /** + * Initialize before this node begins working as a replica after being + * a master. + */ + public synchronized void initAsReplica() { + /* + * Clear the VLSN await mechanism, which is used for feeding and for + * checkpoint precondition checking. Used when this node transitions away + * from master status, to replica status. + */ + if (vlsnPutLatch != null) { + vlsnPutLatch.terminate(); + vlsnPutLatch = null; + } + + putWaitVLSN = null; + nextVLSNCounter = null; + } + + /* + * Return the VLSN to use for tagging the next replicated log entry. Must + * be called within the log write latch. + */ + public VLSN bump() { + return new VLSN(nextVLSNCounter.incrementAndGet()); + } + + public long getLatestAllocatedVal() { + return nextVLSNCounter.get(); + } + + /* + * Register a new VLSN->LSN mapping. This is called outside the log write + * latch, but within the LogManager log() call. It must not cause any + * logging of its own and should not cause I/O. + */ + public void put(LogItem logItem) { + + final VLSN vlsn = logItem.header.getVLSN(); + final long lsn = logItem.lsn; + final byte entryType = logItem.header.getType(); + + logItemCache.put(vlsn, logItem); + + synchronized (this) { + tracker.track(vlsn, lsn, entryType); + + synchronized (mappingSynchronizer) { + + /* + * Put() calls may come out of order, so free the wait latch if + * the incoming VLSN >= the waiting VLSN. For example, a feeder + * may be awaiting VLSN 100, but the call to put(101) comes in + * before the call to put(100). + */ + if ((vlsnPutLatch != null) && + vlsn.compareTo(putWaitVLSN) >= 0) { + vlsnPutLatch.setLogItem(logItem); + vlsnPutLatch.countDown(); + vlsnPutLatch = null; + putWaitVLSN = null; + } + } + } + + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.finest(logger, envImpl, "vlsnIndex put " + vlsn); + } + } + + /** + * Wait for the vlsn, or a higher numbered vlsn, to make its appearance in + * the VLSN index. + * + * @throws InterruptedException + * @throws WaitTimeOutException if the VLSN did not appear within waitTime + * or the latch was explicitly terminated. + * + * @return the LogItem associated with the vlsn, or null if the entry is + * now present in the log, but is not available in the LogItemCache. + */ + public LogItem waitForVLSN(VLSN vlsn, int waitTime) + throws InterruptedException, WaitTimeOutException { + + /* First check the volatile range field, without synchronizing. */ + VLSNRange useRange = tracker.getRange(); + if (useRange.getLast().compareTo(vlsn) >= 0) { + return logItemCache.get(vlsn); + } + + VLSNAwaitLatch waitLatch = null; + synchronized (this) { + useRange = tracker.getRange(); + if (useRange.getLast().compareTo(vlsn) >= 0) { + return logItemCache.get(vlsn); + } + + synchronized (mappingSynchronizer) { + /* The target VLSN hasn't arrived yet, we'll wait. */ + setupWait(vlsn); + + /* Copy the latch while synchronized. */ + waitLatch = vlsnPutLatch; + } + } + + /* + * Do any waiting outside the synchronization section. If the + * waited-for VLSN has already arrived, the waitLatch will have been + * counted down, and we'll go through. + */ + if (!waitLatch.await(waitTime, TimeUnit.MILLISECONDS) || + waitLatch.isTerminated()) { + /* Timed out waiting for an incoming VLSN, or was terminated. */ + throw new WaitTimeOutException(); + } + + if (! (tracker.getRange().getLast().compareTo(vlsn) >= 0)) { + throw EnvironmentFailureException. + unexpectedState(envImpl, "Waited for vlsn:" + vlsn + + " should be greater than last in range:" + + tracker.getRange().getLast()); + } + LogItem logItem = waitLatch.getLogItem(); + /* If we waited successfully, logItem can't be null. */ + return logItem.header.getVLSN().equals(vlsn) ? + logItem : + /* + * An out-of-order vlsn put, that is, a later VLSN arrived at + * the index before this one. We could look for it in the log + * item cache, but due to the very nature of the out of order + * put it's unlikely to be there and we would rather not incur + * the overhead of a failed lookup. + */ + null; + } + + /** + * For unit test only. + */ + synchronized VLSN getPutWaitVLSN() { + return putWaitVLSN; + } + + /** + * Setup the context for waiting for a not-yet-registered VLSN. + */ + private void setupWait(VLSN vlsn) { + if (vlsnPutLatch == null) { + putWaitVLSN = vlsn; + vlsnPutLatch = new VLSNAwaitLatch(); + } else { + /* There can only be on possible VLSN to wait on. */ + if (!vlsn.equals(putWaitVLSN)) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "unexpected get for VLSN: " + vlsn + + " already waiting for VLSN: " + putWaitVLSN + + " current range=" + getRange()); + } + } + } + + /** + * Prevents truncation of the head of the index range (the lower bound). + * Used at the beginning of syncup. After calling this method, the head of + * the range is prevented from changing and and the files in the range will + * not be deleted. Passing the returned value to {@link + * com.sleepycat.je.cleaner.FileProtector#removeFileProtection} will allow + * the head of the range to change and files to be deleted. + * + *

        It is important that a syncup does not synchronize on VLSNIndex, + * since this could block waiting for an expensive operation such as + * truncation. The synchronization for protecting the range head is + * therefore on VLSNTracker.

        + * + * @param lockerName the name of the protecting entity, i.e., the syncup, + * to be used in LogSizeStats. + * + * @return the ProtectedFileSet protecting the files in theVLSNIndex range. + */ + public ProtectedFileSet protectRangeHead(String lockerName) { + return tracker.protectRangeHead(lockerName); + } + + /** + * Returns the file at the lower bound of the current range. This method + * does not synchronize. + */ + public long getProtectedRangeStartFile() { + return tracker.getProtectedRangeStartFile(); + } + + /** + * Try to advance the VLSNIndex ProtectedFileRange and truncate the head + * of the VLSNIndex range, so that bytesNeeded can be freed by deleting + * files in this range. + * + * Remove all information from the VLSNIndex for VLSNs <= deleteEndpoint. + * Used by log cleaning. To properly coordinate with readers of the + * VLSNIndex, we need to update the range before updating the buckets. + * + * We assume that deleteEnd is always the last vlsn in a file, and because + * of that, truncations will never split a bucket. + * + * A truncation may leave a gap at the head of the vlsn index though. + * This could occur if the buckets have a gap, due to out of order VLSNs. + * For example, it's possible that the index has these buckets: + * + * bucket A: firstVLSN = 10, lastVLSN = 20 + * bucket B: firstVLSN = 22, lastVLSN = 30 + * + * If we truncate the index at 20 (deleteEnd == 20), then the resulting + * start of the range is 21, but the first bucket value is 22. In this + * case, we need to insert a ghost bucket. + * + * This method ensures that any changes are fsynced to disk before file + * deletion occurs. [#20702] + */ + public synchronized boolean tryTruncateFromHead(final long bytesNeeded) { + + final Pair truncateInfo = tracker.tryTruncateFromHead( + bytesNeeded, logItemCache); + + if (truncateInfo == null) { + /* No change to the range was needed/possible. */ + return false; + } + + truncateDatabaseFromHead(truncateInfo.first(), truncateInfo.second()); + return true; + } + + /** + * Forcibly truncate the VLSNIndex range head, in situations where the + * environment is quiescent and we know that truncation is safe. + * + * @param deleteEnd the last VLSN to be truncated. + * + * @param deleteFileNum the file having deleteEnd as its last VLSN. + */ + public synchronized void truncateFromHead(VLSN deleteEnd, + long deleteFileNum) { + + LoggerUtils.fine(logger, envImpl, + "head truncate with " + deleteEnd + + " delete file#:" + deleteFileNum); + + /* + * Since the range is the gatekeeper, update the tracker cache before + * the database, so that the range is adjusted first. + */ + if (!tracker.truncateFromHead( + deleteEnd, deleteFileNum, logItemCache)) { + /* No change to the range was needed. */ + return; + } + + truncateDatabaseFromHead(deleteEnd, deleteFileNum); + } + + private synchronized void truncateDatabaseFromHead(VLSN deleteEnd, + long deleteFileNum) { + + /* + * Be sure that the changes are fsynced before deleting any files. The + * changed vlsn index must be persisted so that there are no references + * to the deleted, cleaned files. Instead of using COMMIT_SYNC, use + * COMMIT_NO_SYNC with an explicit environment flush and fsync, because + * the latter ends the txn and releases locks sooner, and reduces + * possible lock contention on the VLSNIndex. Both feeders and write + * operations need to lock the VLSNIndex, so keeping lock contention + * minimal is essential. + * [#20702] + */ + TransactionConfig config = new TransactionConfig(); + config.setDurability(Durability.COMMIT_NO_SYNC); + Txn txn = Txn.createLocalTxn(envImpl, config); + boolean success = false; + try { + synchronized (flushSynchronizer) { + pruneDatabaseHead(deleteEnd, deleteFileNum, txn); + flushToDatabase(txn); + } + txn.commit(); + envImpl.flushLog(true /*fsync required*/); + success = true; + } finally { + if (!success) { + txn.abort(); + } + } + } + + /** + * Remove all information from the VLSNIndex for VLSNs >= deleteStart Used + * by replica side syncup, when the log is truncated. Assumes that the + * vlsnIndex is quiescent, and no writes are happening, although the + * cleaner may read the vlsnIndex. + * @throws DatabaseException + */ + public synchronized void truncateFromTail(VLSN deleteStart, long lastLsn) + throws DatabaseException { + + logItemCache.clear(); + VLSNRange currentRange = tracker.getRange(); + if (currentRange.getLast().getNext().equals(deleteStart)) { + + /* + * deleteStart directly follows what's in this range, no need to + * delete anything. + */ + return; + } + + /* + * The VLSNIndex has two parts -- the in-memory cache, and the + * database. Update the tracker, which holds the cache first, and then + * update the database. + */ + tracker.truncateFromTail(deleteStart, lastLsn); + + TransactionConfig config = new TransactionConfig(); + + /* + * Be sure to commit synchronously so that changes to the vlsn index + * are persisted before the log is truncated. There are no feeders or + * repstream write operations at this time, so the use of COMMIT_SYNC + * does not introduce any lock contention. [#20702] + */ + config.setDurability(Durability.COMMIT_SYNC); + Txn txn = Txn.createLocalTxn(envImpl, config); + boolean success = false; + try { + /* + * The tracker knows the boundary between VLSNs that are on disk + * and VLSNs that are within its cache, and maintains that info + * as mappings are added, and as the tracker/cache is flushed. + * But since we're potentially truncating mappings that were on + * disk, we need to update the tracker's notion of where the flush + * boundary is. + */ + VLSN lastOnDisk = pruneDatabaseTail(deleteStart, lastLsn, txn); + tracker.setLastOnDiskVLSN(lastOnDisk); + + /* + * Because mappings can come out of order, it's possible that + * buckets are not completely contiguous, and that truncating + * will result in the loss of the mapping for the end of the range. + * For example, suppose the buckets are like this: + * On disk: vlsn 13 -> bucket for vlsns 13-16 + * In tracker: vlsn 18 -> bucket for vlsns 18 -23 + * Truncating the vlsnIndex at 18 will make the last VLSN become + * 17, and removing the vlsn 18 bucket will result in no mapping + * for the new end range, vlsn 17. If so, the tracker should + * create a new mapping, of vlsn 17 -> lastlsn, to cap off the + * range and ensure that there are mappings for the start and end + * lsns. + */ + tracker.ensureRangeEndIsMapped(deleteStart.getPrev(), lastLsn); + flushToDatabase(txn); + txn.commit(); + success = true; + } finally { + if (!success) { + txn.abort(); + } + } + } + + /** + * All range points (first, last, etc) ought to be seen as one consistent + * group. Because of that, VLSNIndex doesn't offer getLastVLSN, + * getFirstVLSN type methods, to discourage the possibility of retrieving + * range points across two different range sets. + */ + public VLSNRange getRange() { + return tracker.getRange(); + } + + /** + * Returns the statistics associated with the VLSNIndex + * + * @return the vlsn statistics. + */ + public StatGroup getStats(StatsConfig config) { + return statistics.cloneGroup(config.getClear()); + } + + /** + * Return the nearest file number <= the log file that houses this VLSN. + * This method is meant to be efficient and will not incur I/O. If + * there is no available, it does an approximation. The requested VLSN + * must be within the VLSNIndex range. + * @throws DatabaseException + */ + public long getLTEFileNumber(VLSN vlsn) + throws DatabaseException { + + VLSNBucket bucket = getLTEBucket(vlsn); + return bucket.getLTEFileNumber(); + } + + /** + * The caller must ensure that the requested VLSN is within the VLSNIndex + * range; we assume that there is a valid bucket. + */ + public long getGTEFileNumber(VLSN vlsn) + throws DatabaseException { + + VLSNBucket bucket = getGTEBucket(vlsn, null); + return bucket.getGTEFileNumber(); + } + + /** + * The requested VLSN must be within the VLSNIndex range; we assume that + * there is a valid bucket. + */ + public long getGTELsn(VLSN vlsn) { + VLSNBucket bucket = getGTEBucket(vlsn, null); + return bucket.getGTELsn(vlsn); + } + + /** + * Get the vlsnBucket that owns this VLSN. If there is no such bucket, get + * the bucket that follows this VLSN. Must always return a bucket. + * + * Because this is unsynchronized, there is actually a remote chance that + * this call could view the VLSNIndex while a truncateFromTail() is going + * on, and see the index while it is logically inconsistent, should + * there be non-contiguous buckets in the vlsnIndex.. In that case, + * the caller will get an EnvironmentFailureException. Because the window + * is exceedingly small, requiring log cleaning and a rollback to collide + * in a very particular way, and because it is unpalatable to create + * synchronization hierarchy complexity for this tiny window, and because + * the problem is transient, this method is not synchronized. [#23491] + * + * @param currentBucketInUse is used only for debugging, to add to the + * error message if the GTEBucketFromDatabase fails. + * @throws DatabaseException + */ + public VLSNBucket getGTEBucket(VLSN vlsn, VLSNBucket currentBucketInUse) + throws DatabaseException { + + VLSNBucket bucket = tracker.getGTEBucket(vlsn); + + if (bucket == null) { + return getGTEBucketFromDatabase(vlsn, currentBucketInUse); + } + + return bucket; + } + + /** + * Get the vlsnBucket that owns this VLSN. If there is no such bucket, get + * the bucket that precedes this VLSN. Must always return a bucket. + * @throws DatabaseException + */ + VLSNBucket getLTEBucket(VLSN vlsn) + throws DatabaseException { + + VLSNBucket bucket = tracker.getLTEBucket(vlsn); + if (bucket == null) { + return getLTEBucketFromDatabase(vlsn); + } + return bucket; + } + + /** + * @return true if the status and key value indicate that this + * cursor is pointing at a valid bucket. Recall that the VLSNRange is + * stored in the same database at entry -1. + */ + private boolean isValidBucket(OperationStatus status, + DatabaseEntry key) { + + return ((status == OperationStatus.SUCCESS) && + (LongBinding.entryToLong(key) != VLSNRange.RANGE_KEY)); + } + + /* + * Get the bucket that matches this VLSN. If this vlsn is Y, then we want + * bucket at key X where X <= Y. If this method is called, we guarantee + * that a non-null bucket will be returned. + */ + public VLSNBucket getLTEBucketFromDatabase(VLSN vlsn) + throws DatabaseException { + + Cursor cursor = null; + Locker locker = null; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data= new DatabaseEntry(); + try { + locker = BasicLocker.createBasicLocker(envImpl); + cursor = makeCursor(locker); + + if (positionBeforeOrEqual(cursor, vlsn, key, data)) { + return VLSNBucket.readFromDatabase(data); + } + + /* Shouldn't get here. */ + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't find bucket for LTE VLSN " + vlsn + + " in database. tracker=" + tracker); + } finally { + if (cursor != null) { + cursor.close(); + } + + if (locker != null) { + locker.operationEnd(true); + } + } + } + + /** + * Return the bucket that holds a mapping >= this VLSN. If this method is + * called, we guarantee that a non-null bucket will be returned. + * + * At this point, we are sure that the target vlsn is within the range of + * vlsns held in the database. However, note that there is no explicit + * synchronization between this database search, and the + * VLSNTracker.flushToDatabase, which might be writing additional buckets + * to this database. This may affect the cases when the cursor search + * does not return a equality match on a bucket. [#20726] + * + * For example, suppose the database looks like this: + * key=vlsn 10, data = bucket: vlsn 10 -> lsn 0x10/100 + * vlsn 15 -> lsn 0x10/150 + * key=vlsn 20, data = bucket: vlsn 20 -> lsn 0x11/100 + * vlsn 25 -> lsn 0x11/150 + * If we are looking for a bucket for vlsn 22, there will not be a match + * from the call to cursor.getSearchKeyRange(key=22). The code that + * accounts for that will need to consider that new buckets may be flushed + * to the database while the search for a new bucket is going on. For + * example, + * + * key=vlsn 30, data = bucket: vlsn 30 -> lsn 0x12/100 + * vlsn 35 -> lsn 0x12/150 + * + * may be written to the database while we are searching for a bucket that + * owns vlsn 22. + */ + private VLSNBucket getGTEBucketFromDatabase(VLSN target, + VLSNBucket currentBucketInUse) + throws DatabaseException { + + Cursor cursor = null; + Locker locker = null; + try { + locker = BasicLocker.createBasicLocker(envImpl); + cursor = makeCursor(locker); + + /* + * Look at the bucket at key >= target.Will return null if no GTE + * bucket. + */ + VLSNBucket bucket = examineGTEBucket(target, cursor); + if (bucket != null) { + return bucket; + } + + /* + * We're here because we did not find a bucket >= target. Let's + * examine the last bucket in this database. We know that it will + * either be: + * + * 1) a bucket that's < target, but owns the mapping + * 2) if the index was appended to by VLSNTracker.flushToDatabase + * while the search is going on, the last bucket may be one + * that is > or >= target. + * Using the example above, the last bucket could be case 1: + * + * a bucket that is < target 22: + * key=vlsn 20, data = bucket: vlsn 20 -> lsn 0x11/100 + * vlsn 25 -> lsn 0x11/150 + * + * or case 2, a bucket that is >= target 22, because the index grew + * key=vlsn 30, data = bucket: vlsn 30 -> lsn 0x12/100 + * vlsn 35 -> lsn 0x12/150 + */ + assert(TestHookExecute.doHookIfSet(searchGTEHook)); + VLSNBucket endBucket = null; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getLast(key, data, + LockMode.DEFAULT); + if (isValidBucket(status, key)) { + endBucket = VLSNBucket.readFromDatabase(data); + if (endBucket.owns(target)) { + return endBucket; + } + + /* + * If this end bucket is not the owner of the target VLSN, we + * expect it to be a greaterThan bucket which was inserted + * because of a concurrent VLSNTracker.flushToDatabase call + * that did not exist when we did the previous + * cursor.getKeyRangeSearch (case 2), In that case, we can + * search again for the owning bucket. + */ + if (endBucket.follows(target)) { + bucket = examineGTEBucket(target, cursor); + if (bucket != null) { + return bucket; + } + } + } + + /* + * Shouldn't get here! There should have been a bucket in this + * database >= this target. + */ + + /* Dump the bucket database for debugging. */ + int count = 0; + StringBuilder sb = new StringBuilder(); + status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + Long keyValue = LongBinding.entryToLong(key); + sb.append("key => " + keyValue + "\n"); + + if (count == 0) { + VLSNRange range = VLSNRange.readFromDatabase(data); + sb.append("range =>" + range + "\n"); + } else { + bucket = VLSNBucket.readFromDatabase(data); + sb.append("bucket => " + bucket + "\n"); + } + + count++; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + + LoggerUtils.severe(logger, envImpl, "VLSNIndex Dump: " + + sb.toString()); + + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't find bucket for GTE VLSN " + target + + " in database. EndBucket=" + endBucket + "currentBucket=" + + currentBucketInUse + " tracker = " + tracker); + } finally { + if (cursor != null) { + cursor.close(); + } + + if (locker != null) { + locker.operationEnd(true); + } + } + } + + /** + * Find a bucket that is GTE the target, and sees if that bucket is + * the owner. If it is not the owner look at the previous bucket. + * @return null if no GTE bucket was found. + */ + private VLSNBucket examineGTEBucket(VLSN target, Cursor cursor) { + + /* getSearchKeyRange will return a bucket >= target if one exists */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + LongBinding.longToEntry(target.getSequence(), key); + OperationStatus status = + cursor.getSearchKeyRange(key, data, LockMode.DEFAULT); + + if (status == OperationStatus.SUCCESS) { + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + if (bucket.owns(target)) { + return bucket; + } + + /* + * The bucket we found is > than our target. Look at the + * previous one. + */ + status = cursor.getPrev(key, data, LockMode.DEFAULT); + if (isValidBucket(status, key)) { + VLSNBucket prevBucket = VLSNBucket.readFromDatabase(data); + if (prevBucket.owns(target)) { + return prevBucket; + } + } + + /* + * There is no bucket that owns this target, return the greater + * one. + */ + return bucket; + } + + /* No bucket at a key >= the target. */ + return null; + } + + /* + * Position this cursor at the largest value bucket which is <= the + * target VLSN. + * @return true if there is a bucket that fits this criteria, + */ + private boolean positionBeforeOrEqual(Cursor cursor, + VLSN vlsn, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + LongBinding.longToEntry(vlsn.getSequence(), key); + VLSNBucket bucket = null; + + /* getSearchKeyRange will give us a bucket >= Y. */ + OperationStatus status = + cursor.getSearchKeyRange(key, data, LockMode.DEFAULT); + + if (status == OperationStatus.SUCCESS) { + bucket = VLSNBucket.readFromDatabase(data); + if (bucket.owns(vlsn)) { + return true; + } + + /* The bucket we found is > than our VLSN. Get the previous one. */ + status = cursor.getPrev(key, data, LockMode.DEFAULT); + if (isValidBucket(status, key)) { + return true; + } + + /* Hey, nothing else in the database. */ + return false; + } + /* + * There was no bucket >= Y. Let's find the last bucket in this + * database then. It should be a bucket that's < Y. + */ + status = cursor.getLast(key, data, LockMode.DEFAULT); + if (isValidBucket(status, key)) { + return true; + } + + return false; + } + + /* + * Position this cursor at the smallest value bucket which is >= the + * target VLSN. + * @return true if there is a bucket that fits this criteria, + */ + private boolean positionAfterOrEqual(Cursor cursor, + VLSN vlsn, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + LongBinding.longToEntry(vlsn.getSequence(), key); + VLSNBucket bucket = null; + + /* getSearchKeyRange will give us a bucket >= Y. */ + OperationStatus status = + cursor.getSearchKeyRange(key, data, LockMode.DEFAULT); + + if (status == OperationStatus.SUCCESS) { + bucket = VLSNBucket.readFromDatabase(data); + if (bucket.owns(vlsn)) { + return true; + } + + /* + * This bucket is > our VLSN. Check the bucket before. + * - It might be a bucket that owns this VLSN + * - the prevbucket might precede this VLSN. + * - the record before might be the range. + * One way or another, there should always be a record before + * any bucket -- it's the range. + */ + status = cursor.getPrev(key, data, LockMode.DEFAULT); + assert status == OperationStatus.SUCCESS; + + if (isValidBucket(status, key)) { + bucket = VLSNBucket.readFromDatabase(data); + if (bucket.owns(vlsn)) { + return true; + } + } + + /* + * Move back to the original bucket, all those preceding buckets + * were unsatifactory. + */ + status = cursor.getNext(key, data, LockMode.DEFAULT); + return true; + } + /* + * There was no bucket >= Y. Let's find the last bucket in this + * database then. It should be a bucket that's < Y. + */ + status = cursor.getLast(key, data, LockMode.DEFAULT); + if (isValidBucket(status, key)) { + bucket = VLSNBucket.readFromDatabase(data); + if (bucket.owns(vlsn)) { + return true; + } + } + + return false; + } + + /* + * Remove all VLSN->LSN mappings <= deleteEnd + */ + private void pruneDatabaseHead(VLSN deleteEnd, + long deleteFileNum, + Txn txn) + throws DatabaseException { + Cursor cursor = null; + + try { + cursor = makeCursor(txn); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + if (!positionBeforeOrEqual(cursor, deleteEnd, key, data)) { + /* Nothing to do. */ + return; + } + + /* Delete this bucket and everything before this bucket. */ + + /* Avoid fetching the bucket itself, since it's not needed */ + final DatabaseEntry noData = new DatabaseEntry(); + noData.setPartial(0, 0, true); + int deleteCount = 0; + + do { + long keyValue = LongBinding.entryToLong(key); + if (keyValue == VLSNRange.RANGE_KEY) { + break; + } + + OperationStatus status = cursor.delete(); + + deleteCount++; + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't delete, got status of " + status + + " for delete of bucket " + keyValue + " deleteEnd=" + + deleteEnd); + } + } while (cursor.getPrev(key, noData, LockMode.DEFAULT) == + OperationStatus.SUCCESS); + + nHeadBucketsDeleted.add(deleteCount); + + /* + * Check the first real bucket, and see if we need to insert + * a ghost bucket. + */ + VLSN newStart = deleteEnd.getNext(); + LongBinding.longToEntry(1, key); + OperationStatus status = + cursor.getSearchKeyRange(key, data, LockMode.DEFAULT); + + /* No real buckets, nothing to adjust. */ + if (status != OperationStatus.SUCCESS) { + return; + } + + VLSNBucket firstBucket = VLSNBucket.readFromDatabase(data); + /* First bucket matches the range, nothing to adjust. */ + if (firstBucket.getFirst().equals(newStart)) { + return; + } + + if (firstBucket.getFirst().compareTo(newStart) < 0) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "newStart " + newStart + + " should be < first bucket:" + firstBucket); + } + + /* + * Add a ghost bucket so that there is a bucket to match the + * first item in the range. + */ + long nextFile = envImpl.getFileManager(). + getFollowingFileNum(deleteFileNum, + true /* forward */); + long lastPossibleLsn = firstBucket.getLsn(firstBucket.getFirst()); + VLSNBucket placeholder = + new GhostBucket(newStart, DbLsn.makeLsn(nextFile, 0), + lastPossibleLsn); + placeholder.writeToDatabase(envImpl, cursor); + + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + /* + * Remove all VLSN->LSN mappings >= deleteStart. Recall that the + * mappingDb is keyed by the first VLSN in the bucket. The replication + * stream will be quiescent when this is called. The caller must be + * sure that there are buckets in the database that cover deleteStart. + * + * @param lastLsn is the location, if known, of the vlsn at deleteStart -1. + * If the location is not know, NULL_LSN is used. In that case the pruning + * may need to delete mappings < deleteSTart, in order to keep the bucket + * capped with a legitimate lastLSN. If lastLsn is not NULL_LSN, then the + * deletion can precisely delete only mappings >= deleteStart, because it + * can always create a new deleteStart-1 -> lastLsn mapping to cap off the + * end range. + * @return lastVLSN left on disk. + */ + VLSN pruneDatabaseTail(VLSN deleteStart, long lastLsn, Txn txn) + throws DatabaseException { + + /* + * At this point, the tracker is accurate as to which vlsn is last + * on disk. + */ + VLSN lastOnDiskVLSN = tracker.getLastOnDisk(); + Cursor cursor = null; + + try { + cursor = makeCursor(txn); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + if (!positionAfterOrEqual(cursor, deleteStart, key, data)) { + /* + * No bucket that matches this criteria, everything on disk is + * < deleteStart, nothing to do. + */ + return lastOnDiskVLSN; + } + + /* + * Does this bucket straddle deleteStart? Then prune off part of + * the bucket. + */ + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + + if (bucket.getFirst().compareTo(deleteStart) < 0) { + bucket.removeFromTail(deleteStart, lastLsn); + lastOnDiskVLSN = bucket.getLast(); + bucket.fillDataEntry(data); + + OperationStatus status = cursor.putCurrent(data); + + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't update " + bucket); + } + + status = cursor.getNext(key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + return lastOnDiskVLSN; + } + } + + /* Delete everything after this bucket. */ + + /* Avoid fetching the bucket itself, since it's not needed */ + final DatabaseEntry noData = new DatabaseEntry(); + noData.setPartial(0, 0, true); + int deleteCount = 0; + + do { + OperationStatus status = cursor.delete(); + + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't delete after vlsn " + deleteStart + + " status=" + status); + } + deleteCount++; + + } while (cursor.getNext(key, noData, LockMode.DEFAULT) == + OperationStatus.SUCCESS); + + nTailBucketsDeleted.add(deleteCount); + + /* + * We've deleted some part of what was on disk. See what we're left + * with, and find the last mapping in the last bucket so we can say + * precisely which is the last vlsn mapped on disk and update the + * tracker cache. This last mapping may not be exactly + * deleteStart-1, if there is a gap in the mappings. + */ + OperationStatus status = cursor.getLast(key, data, + LockMode.DEFAULT); + + if (isValidBucket(status, key)) { + /* A valid bucket was returned */ + bucket = VLSNBucket.readFromDatabase(data); + lastOnDiskVLSN = bucket.getLast(); + } else { + /* + * No mappings in the database -- either there is nothing in + * the database, or we only have the special range record at + * key=-1 + */ + lastOnDiskVLSN = NULL_VLSN; + } + + } finally { + if (cursor != null) { + cursor.close(); + } + } + return lastOnDiskVLSN; + } + + /** + * At startup, we need to + * - get a handle onto the internal database which stores the VLSN index + * - read the latest on-disk version to initialize the tracker + * - find any VLSN->LSN mappings which were not saved in the on-disk + * version, and merge them in. These mappings weren't flushed because + * they occurred after the checkpoint end. They're found by the recovery + * procedure, and are added in now. + * + * This method will execute when the map is quiescent, and needs no + * synchronization. + */ + private void init(String mappingDbName, + int vlsnStride, + int vlsnMaxMappings, + int vlsnMaxDistance, + RecoveryInfo recoveryInfo) + throws DatabaseException { + + openMappingDatabase(mappingDbName); + + tracker = new VLSNTracker(envImpl, mappingDbImpl, vlsnStride, + vlsnMaxMappings, vlsnMaxDistance, + statistics); + + /* + * Put any in-memory mappings discovered during the recovery process + * into the fileMapperDb. That way, we'll preserve mappings that + * precede this recovery's checkpoint. + * + * For example, suppose the log looks like this: + * + * VLSN1 + * VLSN2 + * checkpoint start for this recovery, for the instantiation of the + * replicator + * checkpoint end for this recovery + * <- at this point in time, after the env comes up, we'll create + * the VLSN index. VLSN1 and VLSN2 were discovered during recovery and + * are recorded in memory. Normally a checkpoint flushes the VLSNIndex + * but the VLSNIndex isn't instantiated yet, because the VLSNIndex + * needs an initialized environment. + */ + merge((VLSNRecoveryTracker) recoveryInfo.vlsnProxy); + + /* Initialize ProtectedFileRange after VLSN range is determined. */ + VLSNRange range = tracker.getRange(); + if (!range.isEmpty()) { + long firstFile = getLTEFileNumber(range.getFirst()); + tracker.initProtectedFileRange(firstFile); + } + + /* + * When one or more reserved files are missing, truncate the index so + * it reflects the VLSN range for existing files. + */ + if (!recoveryInfo.lastMissingFileVLSN.isNull()) { + truncateFromHead( + recoveryInfo.lastMissingFileVLSN, + recoveryInfo.lastMissingFileNumber); + } + } + + /* + * Update this index, which was initialized with what's on disk, with + * mappings found during recovery. These mappings ought to either overlap + * what's on disk, or cover the range immediately after what's on disk. If + * it doesn't, the recovery mechanism, which flushes the mapping db at + * checkpoint is faulty and we've lost mappings. + * + * In other words, if this tracker holds the VLSN range a -> c, then the + * recovery tracker will have the VLSN range b -> d, where + * + * a <= b + * c <= d + * if c < b, then b == c+1 + * + * This method must be called when the index and tracker are quiescent, and + * there are no calls to track(). + * + * The recoveryTracker is the authoritative voice on what should be in the + * VLSN index. + */ + void merge(VLSNRecoveryTracker recoveryTracker) { + + if (recoveryTracker == null) { + flushToDatabase(Durability.COMMIT_SYNC); + return; + } + + if (recoveryTracker.isEmpty()) { + + /* + * Even though the recovery tracker has no mappings, it may have + * seen a rollback start that indicates that the VLSNIndex should + * be truncated. Setup the recovery tracker so it looks like + * it has a single mapping -- the matchpoint VLSN and LSN and + * proceed. Take this approach, rather than truncating the index, + * because we may need that matchpoint mapping to cap off the + * VLSN range. + * + * For example, suppose an index has mappings for VLSN 1, 5, 10, + * and the rollback is going to matchpoint 7. A pure truncation + * would lop off VLSN 10, making VLSN 5 the last mapping. We + * would then need to add on VLSN 7. + */ + VLSN lastMatchpointVLSN = recoveryTracker.getLastMatchpointVLSN(); + if (lastMatchpointVLSN.isNull()) { + return; + } + + /* + * Use a MATCHPOINT log entry to indicate that this is a syncable + * entry. This purposefully leaves the recovery tracker's range's + * lastTxnEnd null, so it will not overwrite the on disk + * tracker. This assumes that we will never rollback past the last + * txn end. + */ + recoveryTracker.track(lastMatchpointVLSN, + recoveryTracker.getLastMatchpointLsn(), + LogEntryType.LOG_MATCHPOINT.getTypeNum()); + } + + /* + * The mappings held in the recoveryTracker must either overlap what's + * on disk or immediately follow the last mapping on disk. If there + * is a gap between what is on disk and the recovery tracker, something + * went awry with the checkpoint scheme, which flushes the VLSN index + * at each checkpoint. We're in danger of losing some mappings. Most + * importantly, the last txnEnd VLSN in the range might not be right. + * + * The one exception is when the Environment has been converted from + * non-replicated and there are no VLSN entries in the VLSNIndex. In + * that case, it's valid that the entries seen from the recovery + * tracker may have a gap in VLSNs. For example, in a newly converted + * environment, the VLSN index range has NULL_VLSN as its last entry, + * but the first replicated log entry will start with 2. + * + * Note: EnvironmentImpl.needRepConvert() would more accurately convey + * the fact that this is the very first recovery following a + * conversion. But needRepConvert() on a replica is never true, and we + * need to disable this check on the replica's first recovery too. + */ + VLSN persistentLast = tracker.getRange().getLast(); + VLSN recoveryFirst = recoveryTracker.getRange().getFirst(); + if ((!(envImpl.isRepConverted() && persistentLast.isNull()) || + !envImpl.isRepConverted()) && + recoveryFirst.compareTo(persistentLast.getNext()) > 0) { + + throw EnvironmentFailureException.unexpectedState + (envImpl, "recoveryTracker should overlap or follow on disk " + + "last VLSN of " + persistentLast + " recoveryFirst= " + + recoveryFirst); + } + + VLSNRange currentRange = tracker.getRange(); + if (currentRange.getLast().getNext().equals(recoveryFirst)) { + /* No overlap, just append mappings found at recovery. */ + tracker.append(recoveryTracker); + flushToDatabase(Durability.COMMIT_SYNC); + return; + } + + /* + * The mappings in the recovery tracker should overwrite those in the + * VLSN index. + */ + TransactionConfig config = new TransactionConfig(); + config.setDurability(Durability.COMMIT_SYNC); + Txn txn = Txn.createLocalTxn(envImpl, config); + boolean success = false; + VLSN lastOnDiskVLSN; + try { + lastOnDiskVLSN = pruneDatabaseTail(recoveryFirst, DbLsn.NULL_LSN, + txn); + tracker.merge(lastOnDiskVLSN, recoveryTracker); + flushToDatabase(txn); + txn.commit(); + success = true; + } finally { + if (!success) { + txn.abort(); + } + } + } + + private void openMappingDatabase(String mappingDbName) + throws DatabaseException { + + final Locker locker = + Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + + try { + DbTree dbTree = envImpl.getDbTree(); + DatabaseImpl db = dbTree.getDb(locker, + mappingDbName, + null /* databaseHandle */, + false); + if (db == null) { + if (envImpl.isReadOnly()) { + /* This should have been caught earlier. */ + throw EnvironmentFailureException.unexpectedState + ("A replicated environment can't be opened read only."); + } + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReplicated(false); + db = dbTree.createInternalDb(locker, mappingDbName, dbConfig); + } + mappingDbImpl = db; + } finally { + locker.operationEnd(true); + } + } + + public synchronized void close() { + close(true); + } + + public synchronized void abnormalClose() { + close(false); + } + + public void close(boolean doFlush) + throws DatabaseException { + + try { + if (doFlush) { + flushToDatabase(Durability.COMMIT_SYNC); + } + + if (vlsnPutLatch != null) { + + /* + * This should be harmless because the feeders using the latch + * should all have been interrupted and shutdown. So just log + * this fact. + */ + vlsnPutLatch.terminate(); + LoggerUtils.fine + (logger, envImpl, + "Outstanding VLSN put latch cleared at close"); + } + } finally { + if (mappingDbImpl != null) { + envImpl.getDbTree().releaseDb(mappingDbImpl); + mappingDbImpl = null; + } + tracker.close(); + } + } + + /** For unit testing. */ + public DatabaseImpl getDatabaseImpl() { + return mappingDbImpl; + } + + /** + * Mappings are flushed to disk at close, and at checkpoints. + */ + public void flushToDatabase(Durability useDurability) { + + TransactionConfig config = new TransactionConfig(); + config.setDurability(useDurability); + Txn txn = Txn.createLocalTxn(envImpl, config); + boolean success = false; + try { + flushToDatabase(txn); + txn.commit(); + success = true; + } finally { + if (!success) { + txn.abort(); + } + } + } + + /** + * Mappings are flushed to disk at close, and at checkpoints. + */ + private void flushToDatabase(Txn txn) + throws DatabaseException { + synchronized (flushSynchronizer) { + tracker.flushToDatabase(mappingDbImpl, txn); + } + } + + /** + * For debugging and unit tests + * @throws DatabaseException + */ + public Map dumpDb(boolean display) { + + Cursor cursor = null; + Locker locker = null; + if (display) { + System.out.println(tracker); + } + + Map mappings = new HashMap(); + + try { + locker = BasicLocker.createBasicLocker(envImpl); + cursor = makeCursor(locker); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * The first item in the database is the VLSNRange. All subsequent + * items are VLSNBuckets. + */ + int count = 0; + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + Long keyValue = LongBinding.entryToLong(key); + + if (display) { + System.out.println("key => " + keyValue); + } + if (count == 0) { + VLSNRange range = VLSNRange.readFromDatabase(data); + if (display) { + System.out.println("range =>"); + System.out.println(range); + } + } else { + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + for (long i = bucket.getFirst().getSequence(); + i <= bucket.getLast().getSequence(); + i++) { + VLSN v = new VLSN(i); + long lsn = bucket.getLsn(v); + + if (lsn != DbLsn.NULL_LSN) { + mappings.put(v, lsn); + } + } + if (display) { + System.out.println("bucket =>"); + bucket.dump(System.out); + } + } + count++; + } + } finally { + if (cursor != null) { + cursor.close(); + } + + if (locker != null) { + locker.operationEnd(true); + } + } + + return mappings; + } + + /** + * For DbStreamVerify utility. Verify the on-disk database, disregarding + * the cached tracker. + * @throws DatabaseException + */ + @SuppressWarnings("null") + public static void verifyDb(Environment env, + PrintStream out, + boolean verbose) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + Database db = env.openDatabase + (null, DbType.VLSN_MAP.getInternalName(), dbConfig); + Cursor cursor = null; + try { + if (verbose) { + System.out.println("Verifying VLSN index"); + } + + cursor = db.openCursor(null, CursorConfig.READ_COMMITTED); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * The first item in the database is the VLSNRange. All subsequent + * items are VLSNBuckets. + */ + int count = 0; + VLSNRange range = null; + VLSNBucket lastBucket = null; + Long lastKey = null; + VLSN firstVLSNSeen = VLSN.NULL_VLSN; + VLSN lastVLSNSeen = VLSN.NULL_VLSN; + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + + Long keyValue = LongBinding.entryToLong(key); + + if (count == 0) { + if (keyValue != VLSNRange.RANGE_KEY) { + out.println("Wrong key value for range! " + range); + } + range = VLSNRange.readFromDatabase(data); + if (verbose) { + out.println("range=>" + range); + } + } else { + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + if (verbose) { + out.print("key=> " + keyValue); + out.println(" bucket=>" + bucket); + } + + if (lastBucket != null) { + if (lastBucket.getLast().compareTo(bucket.getFirst()) + >= 0) { + out.println("Buckets out of order."); + out.println("Last = " + lastKey + "/" + + lastBucket); + out.println("Current = " + keyValue + "/" + + bucket); + } + } + + lastBucket = bucket; + lastKey = keyValue; + if ((firstVLSNSeen != null) && firstVLSNSeen.isNull()) { + firstVLSNSeen = bucket.getFirst(); + } + lastVLSNSeen = bucket.getLast(); + } + count++; + } + + if (count == 0) { + out.println("VLSNIndex not on disk"); + return; + } + + if (firstVLSNSeen.compareTo(range.getFirst()) != 0) { + out.println("First VLSN in bucket = " + firstVLSNSeen + + " and doesn't match range " + range.getFirst()); + } + + if (lastVLSNSeen.compareTo(range.getLast()) != 0) { + out.println("Last VLSN in bucket = " + lastVLSNSeen + + " and doesn't match range " + range.getLast()); + } + + } finally { + if (cursor != null) { + cursor.close(); + } + + db.close(); + } + } + + /* For unit test support. Index needs to be quiescent */ + @SuppressWarnings("null") + public synchronized boolean verify(boolean verbose) + throws DatabaseException { + + if (!tracker.verify(verbose)) { + return false; + } + + + VLSNRange dbRange = null; + ArrayList firstVLSN = new ArrayList(); + ArrayList lastVLSN = new ArrayList(); + final Locker locker = BasicLocker.createBasicLocker(envImpl); + Cursor cursor = null; + + /* + * Synchronize so we don't try to verify while the checkpointer + * thread is calling flushToDatabase on the vlsnIndex. + */ + synchronized (flushSynchronizer) { + /* + * Read the on-disk range and buckets. + * -The tracker and the database buckets should not intersect. + * -The on-disk range should be a subset of the tracker range. + */ + try { + cursor = makeCursor(locker); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * Read the on-disk range and all the buckets. + */ + OperationStatus status = + cursor.getFirst(key, data, LockMode.DEFAULT); + if (status == OperationStatus.SUCCESS) { + VLSNRangeBinding rangeBinding = new VLSNRangeBinding(); + dbRange = rangeBinding.entryToObject(data); + + /* Collect info about the buckets. */ + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + VLSNBucket bucket = VLSNBucket.readFromDatabase(data); + + Long keyValue = LongBinding.entryToLong(key); + if (bucket.getFirst().getSequence() != keyValue) { + return false; + } + + firstVLSN.add(bucket.getFirst()); + lastVLSN.add(bucket.getLast()); + } + } + } finally { + if (cursor != null) { + cursor.close(); + } + + locker.operationEnd(true); + } + + /* + * Verify range. + */ + VLSNRange trackerRange = tracker.getRange(); + if (!trackerRange.verifySubset(verbose, dbRange)) { + return false; + } + } + + VLSN firstTracked = tracker.getFirstTracked(); + + /* The db range and the buckets need to be consistent. */ + VLSN firstOnDisk = null; + VLSN lastOnDisk = null; + if (firstVLSN.size() > 0) { + /* There are buckets in the database. */ + lastOnDisk = lastVLSN.get(lastVLSN.size()-1); + firstOnDisk = firstVLSN.get(0); + + if (!VLSNTracker.verifyBucketBoundaries(firstVLSN, lastVLSN)) { + return false; + } + + /* + * A VLSNIndex invariant is that there is always a mapping for the + * first and last VLSN in the range. However, if the log cleaner + * lops off the head of the index, leaving a bucket gap at the + * beginning of the index, we break this invariant. For example, + * suppose the index has + * + * bucketA - VLSNs 10 + * no bucket, due to out of order mapping - VLSN 11, 12 + * bucket B - VLSNs 13-15 + * + * If the cleaner deletes VLSN 10->11, VLSN 12 will be the start + * of the range, and needs a bucket. We'll do this by adding a + * bucket placeholder. + */ + if (dbRange.getFirst().compareTo(firstOnDisk) != 0) { + dumpMsg(verbose, "Range doesn't match buckets " + + dbRange + " firstOnDisk = " + firstOnDisk); + return false; + } + + /* The tracker should know what the last VLSN on disk is. */ + if (!lastOnDisk.equals(tracker.getLastOnDisk())) { + dumpMsg(verbose, "lastOnDisk=" + lastOnDisk + + " tracker=" + tracker.getLastOnDisk()); + return false; + } + + if (!firstTracked.equals(NULL_VLSN)) { + + /* + * The last bucket VLSN should precede the first tracker VLSN. + */ + if (firstTracked.compareTo(lastOnDisk.getNext()) < 0) { + dumpMsg(verbose, "lastOnDisk=" + lastOnDisk + + " firstTracked=" + firstTracked); + return false; + } + } + } + return true; + } + + private void dumpMsg(boolean verbose, String msg) { + if (verbose) { + System.out.println(msg); + } + } + + /* + * For unit test support only. Can only be called when replication stream + * is quiescent. + */ + public boolean isFlushedToDisk() { + return tracker.isFlushedToDisk(); + } + + /** + * Ensure that the in-memory vlsn index encompasses all logged entries + * before it is flushed to disk. A No-Op for non-replicated systems. + * + * The problem is in the interaction of logging and VLSN + * tracking. Allocating an new VLSN and logging a replicated log entry is + * done within the log write latch, without any VLSNINdex + * synchronization. That must be done to keep the log write latch critical + * section as small as possible, and to avoid any lock hiearchy issues. + * + * The VLSNIndex is updated after the log write latch critical section. The + * VLSNIndex is flushed to disk by checkpoint, and it is assumed that this + * persistent version of the index encompasses all VLSN entries prior to + * checkpoint start. Since the logging of a new VLSN, and the flushing of + * the index are not atomic, it's possible that the checkpointer may start + * the flush of the vlsnIndex before the last vlsn's mapping is recorded + * in the index. To obey the requirement that the checkpointed vlsn index + * encompass all mappings < checkpoint start, check that the vlsn index + * is up to date before the flush. + * [#19754] + * + * awaitConsistency() works by using the same waitForVLSN() method used by + * the Feeders. WaitForVLSN asserts that all feeders are waiting on single + * vlsn, to assure that no feeders are left in limbo, awaiting a vlsn that + * has gone by. This contract is valid for the feeders, because they wait + * for vlsns sequentially, consuming each one by one. However, this ckpter + * awaitConsistency functionality uses the nextVLSNCounter, which can + * leapfrog ahead arbitrarily, in this case: + * + * vlsn range holds 1 -> N-1 + * Feeder is present, awaiting vlsn N + * thread A bumps vlsn to N and writes record under log write latch + * thread B bumps vlsn to N + 1 and writes record under log write latch + * ckpter awaits consistency, using N+1, while feeders are awaiting N + * thread A puts VLSN N outside log write latch + * thread B puts VLSN N+1 outside log write latch + * + * Because of this, the ckpter must distinguish between what it is really + * waiting on (VLSN N+1) and what is can next wait on to fulfil the + * feeder waiting contract (VLSN N) + */ + public void awaitConsistency() { + + /* VLSNIndex is not initialized and in use yet, no need to wait. */ + if (nextVLSNCounter == null) { + return; + } + + VLSN vlsnAllocatedBeforeCkpt = null; + VLSN endOfRangePlusOne; + while (true) { + + /* + * If we retry, get a fresh VLSN value if and only if the + * previously determined vlsnAllocatedBeforeCkpt was decremented + * due to a logging failure. + */ + if (vlsnAllocatedBeforeCkpt == null) { + vlsnAllocatedBeforeCkpt = new VLSN(getLatestAllocatedVal()); + } else { + VLSN latestAllocated = new VLSN(getLatestAllocatedVal()); + if (latestAllocated.compareTo(vlsnAllocatedBeforeCkpt) < 0) { + LoggerUtils.info(logger, envImpl, + "Reducing awaitConsistency VLSN from " + + vlsnAllocatedBeforeCkpt + " to " + + latestAllocated); + vlsnAllocatedBeforeCkpt = latestAllocated; + } + } + + /* + * [#20165] Since the await is based on the nextVLSNCounter, it's + * possible that a feeder is already waiting on earlier VLSN. + * Safeguard against that by only waiting for one more than + * the end of the range, to avoid conflict with feeders. + * See method comments. + */ + endOfRangePlusOne = tracker.getRange().getLast().getNext(); + if (vlsnAllocatedBeforeCkpt.compareTo(endOfRangePlusOne) < 0) { + /* + * All vlsns allocated before the checkpoint are now in the + * range. + */ + break; + } + + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, envImpl, "awaitConsistency target=" + + endOfRangePlusOne + " allocatedBeforeCkpt=" + + vlsnAllocatedBeforeCkpt); + } + + try { + + waitForVLSN(endOfRangePlusOne, AWAIT_CONSISTENCY_MS); + if (endOfRangePlusOne.compareTo(vlsnAllocatedBeforeCkpt) >= 0) { + /* We reached the real target. */ + break; + } + + /* + * We got to the VLSN we waited for, but it's still earlier than + * vlsnAllocatedBeforeCkpt. Loop again. + */ + } catch (WaitTimeOutException e) { + LoggerUtils.severe(logger, envImpl, + "Retrying for vlsn index consistency " + + " before checkpoint, awaiting vlsn " + + endOfRangePlusOne + + " with ckpt consistency target of " + + vlsnAllocatedBeforeCkpt); + } catch (InterruptedException e) { + LoggerUtils.severe(logger, envImpl, + "Interrupted while awaiting vlsn index " + + "consistency before checkpoint, awaiting " + + "vlsn " + endOfRangePlusOne + + " with ckpt consistency target of " + + vlsnAllocatedBeforeCkpt + ", will retry"); + } + + /* + * If the environment was invalidated by other activity, get out of + * this loop because the vlsn we are waiting for may never + * come. Re-throw the invalidating exception to indicate that the + * checkpoint did not succeed. [#20919] + */ + envImpl.checkIfInvalid(); + } + } + + void setGTEHook(TestHook hook) { + searchGTEHook = hook; + } + + /** + * A cursor over the VLSNIndex. + */ + private abstract static class VLSNScanner { + VLSNBucket currentBucket; + final VLSNIndex vlsnIndex; + + /* + * This is purely for assertions. The VLSNScanner assumes that + * getStartingLsn() is called once before getLsn() is called. + */ + int startingLsnInvocations; + + VLSNScanner(VLSNIndex vlsnIndex) { + this.vlsnIndex = vlsnIndex; + startingLsnInvocations = 0; + } + + public abstract long getStartingLsn(VLSN vlsn); + + /** + * @param vlsn We're requesting a LSN mapping for this vlsn + * @return If there is a mapping for this VLSN, return it, else return + * NULL_LSN. We assume that we checked that this VLSN is in the + * VLSNIndex's range. + */ + public abstract long getPreciseLsn(VLSN vlsn); + } + + /** + * Assumes that VLSNs are scanned backwards. May be used by syncup to + * optimally search for matchpoints. + */ + public static class BackwardVLSNScanner extends VLSNScanner { + + public BackwardVLSNScanner(VLSNIndex vlsnIndex) { + super(vlsnIndex); + } + + /* + * Use the >= mapping for the requested VLSN to find the starting lsn + * to use for a scan. This can only be used on a VLSN that is known to + * be in the range. + */ + @Override + public long getStartingLsn(VLSN vlsn) { + + startingLsnInvocations++; + currentBucket = vlsnIndex.getGTEBucket(vlsn, null); + return currentBucket.getGTELsn(vlsn); + } + + /** + * @see VLSNScanner#getPreciseLsn + */ + @Override + public long getPreciseLsn(VLSN vlsn) { + assert startingLsnInvocations == 1 : "startingLsns() called " + + startingLsnInvocations + " times"; + + /* + * Ideally, we have a bucket that has the mappings for this VLSN. + * If we don't, we attempt to get the next applicable bucket. + */ + if (currentBucket != null) { + if (!currentBucket.owns(vlsn)) { + + /* + * This bucket doesn't own the VLSN. Is it because (a) + * there's a gap and two buckets don't abut, or (b) because + * we walked off the end of the current bucket, and we need + * a new one? Distinguish case (a) by seeing if the current + * bucket will be needed for an upcoming VLSN. + */ + if (currentBucket.precedes(vlsn)) { + return DbLsn.NULL_LSN; + } + + /* + * Case B: We've walked off the end of the current + * bucket. + */ + currentBucket = null; + } + } + + /* + * We walked off the end of the currentBucket. Get a new bucket, + * finding the closest bucket that would hold this mapping. + */ + if (currentBucket == null) { + currentBucket = vlsnIndex.getLTEBucket(vlsn); + + /* + * The next bucket doesn't own this vlsn, which means that + * we're in a gap between two buckets. Note: + * vlsnIndex.LTEBucket guards against returning null. + */ + if (!currentBucket.owns(vlsn)) { + return DbLsn.NULL_LSN; + } + } + + assert currentBucket.owns(vlsn) : "vlsn = " + vlsn + + " currentBucket=" + currentBucket; + + /* We're in the right bucket. */ + return currentBucket.getLsn(vlsn); + } + } + + /** + * Disable critical eviction for all VLSNIndex cursors. [#18475] An + * improvement would be to enable eviction, and do all database operations + * that are in a loop asynchronously. + */ + private Cursor makeCursor(Locker locker) { + Cursor cursor = DbInternal.makeCursor(mappingDbImpl, + locker, + CursorConfig.DEFAULT); + DbInternal.getCursorImpl(cursor).setAllowEviction(false); + return cursor; + } + + /** + * Scans VLSNs in a forward direction, used by feeders. + */ + public static class ForwardVLSNScanner extends VLSNScanner { + + + public ForwardVLSNScanner(VLSNIndex vlsnIndex) { + super(vlsnIndex); + } + + /** + * Use the <= mapping to the requested VLSN to find the starting lsn to + * use for a scan. This can only be used on a VLSN that is known to be + * in the range. + */ + @Override + public long getStartingLsn(VLSN vlsn) { + + startingLsnInvocations++; + currentBucket = vlsnIndex.getLTEBucket(vlsn); + return currentBucket.getLTELsn(vlsn); + } + + + /** + * @see VLSNScanner#getPreciseLsn + */ + @Override + public long getPreciseLsn(VLSN vlsn) { + return getLsn(vlsn, false /* approximate */); + } + + /** + * When doing an approximate search, the target vlsn may be a non-mapped + * vlsn within a bucket, or it may be between two different buckets. + * For example, suppose we have two buckets: + * + * vlsn 1 -> lsn 10 + * vlsn 5 -> lsn 50 + * vlsn 7 -> lsn 70 + * + * vlsn 20 -> lsn 120 + * vlsn 25 -> lsn 125 + * + * If the vlsn we are looking for is 4, the LTE lsn for an approximate + * return value will be vlsn 1-> lsn 10, in the same bucket. If we are + * looking for vlsn 9, the LTE lsn for an approximate return value will + * be vlsn 7->lsn 70, which is the last mapping in an earlier bucket. + * + * @param vlsn We're requesting a LSN mapping for this vlsn + * @return If there is a mapping for this VLSN, return it. If it does + * not exist, return the nearest non-null mapping, where nearest the + * <= LSN. We assume that we checked that this VLSN is in the + * VLSNIndex's range. + */ + public long getApproximateLsn(VLSN vlsn) { + return getLsn(vlsn, true /* approximate */); + } + + private long getLsn(VLSN vlsn, boolean approximate) { + + assert startingLsnInvocations == 1 : "startingLsns() called " + + startingLsnInvocations + " times"; + VLSNBucket debugBucket = currentBucket; + + /* + * Ideally, we have a bucket that has the mappings for this VLSN. + * If we don't, we attempt to get the next applicable bucket. + */ + if (currentBucket != null) { + if (!currentBucket.owns(vlsn)) { + + /* + * This bucket doesn't own the VLSN. Is it because (a) + * there's a gap and two buckets don't abut, or (b) because + * we walked off the end of the current bucket, and we need + * a new one? Distinguish case (a) by seeing if the current + * bucket will be needed for an upcoming VLSN. + */ + if (currentBucket.follows(vlsn)) { + /* Case A: No bucket available for this VLSN. */ + return approximate ? + findPrevLsn(vlsn) : DbLsn.NULL_LSN; + } + + /* Case B: We've walked off the end of the bucket. */ + currentBucket = null; + } + } + + /* + * We walked off the end of the currentBucket. Get a new bucket, + * finding the closest bucket that would hold this mapping. + */ + if (currentBucket == null) { + currentBucket = vlsnIndex.getGTEBucket(vlsn, debugBucket); + + /* + * The next bucket doesn't own this vlsn, which means that + * we're in a gap between two buckets. Note: + * vlsnIndex.getGTEBucket guards against returning null. + */ + if (!currentBucket.owns(vlsn)) { + return approximate ? findPrevLsn(vlsn) : DbLsn.NULL_LSN; + } + } + + assert currentBucket.owns(vlsn) : "vlsn = " + vlsn + + " currentBucket=" + currentBucket; + + if (approximate) { + + /* + * We're in the right bucket, and it owns this + * VLSN. Nevertheless, the bucket may or may not contain a + * mapping for this VLSN, so return the LTE version mapping. + */ + return currentBucket.getLTELsn(vlsn); + } + + return currentBucket.getLsn(vlsn); + } + + /* + * Find the lsn mapping that precedes the target. This assumes that + * no bucket owns the target vlsn -- that it's a vlsn that falls + * between buckets. + */ + private long findPrevLsn(VLSN target) { + VLSNBucket prevBucket = vlsnIndex.getLTEBucket(target); + assert !prevBucket.owns(target) : "target=" + target + + "prevBucket=" + prevBucket + " currentBucket=" + currentBucket; + return prevBucket.getLastLsn(); + } + } + + /** + * Associates the logItem with the latch, so that it's readily available + * when the latch is released. + */ + public static class VLSNAwaitLatch extends CountDownLatch { + /* The LogItem whose addition to the VLSN released the latch. */ + private LogItem logItem = null; + private boolean terminated = false; + + public VLSNAwaitLatch() { + super(1); + } + + public long getTriggerLSN() { + return logItem.lsn; + } + + public VLSN getTriggerVLSN() { + return logItem.header.getVLSN(); + } + + public void setLogItem(LogItem logItem) { + this.logItem = logItem; + } + + /** + * Returns the log item that caused the latch to be released. It's only + * meaningful after the latch has been released. + * + * @return log item or null if the latch timed out or it's wait was + * terminated + */ + public LogItem getLogItem() { + return logItem; + } + + /* Free up any waiters on this latch and shutdown. */ + public void terminate() { + terminated = true; + countDown(); + } + + public boolean isTerminated() { + return terminated; + } + } + + /* + * An exception primarily intended to implement non-local control flow + * upon a vlsn wait latch timeout. + */ + @SuppressWarnings("serial") + static public class WaitTimeOutException extends Exception { + + @Override + /* Eliminate unnecessary overhead. */ + public synchronized Throwable fillInStackTrace(){return null;} + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNIndexStatDefinition.java b/src/com/sleepycat/je/rep/vlsn/VLSNIndexStatDefinition.java new file mode 100644 index 0000000..41fed80 --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNIndexStatDefinition.java @@ -0,0 +1,75 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Statistics associated with the VLSN Index used by HA. + */ +public class VLSNIndexStatDefinition { + + public static final String GROUP_NAME = "VLSNIndex"; + + public static final String GROUP_DESC = "VLSN Index related stats."; + + public static final String N_HITS_NAME = + "nHits"; + public static final String N_HITS_DESC = + "Number of hits to the VLSN index cache"; + public static final StatDefinition N_HITS = + new StatDefinition( + N_HITS_NAME, + N_HITS_DESC); + + public static final String N_MISSES_NAME = + "nMisses"; + public static final String N_MISSES_DESC = + "Number of log entry misses upon access to the VLSN index cache. Upon" + + " a miss the Feeder will fetch the log enty from the log buffer, " + + "or the log file."; + public static final StatDefinition N_MISSES = + new StatDefinition( + N_MISSES_NAME, + N_MISSES_DESC); + + public static final String N_HEAD_BUCKETS_DELETED_NAME = + "nHeadBucketsDeleted"; + public static final String N_HEAD_BUCKETS_DELETED_DESC = + "Number of VLSN index buckets deleted at the head(the low end) of the" + + " VLSN index."; + public static final StatDefinition N_HEAD_BUCKETS_DELETED = + new StatDefinition( + N_HEAD_BUCKETS_DELETED_NAME, + N_HEAD_BUCKETS_DELETED_DESC); + + public static final String N_TAIL_BUCKETS_DELETED_NAME = + "nTailBucketsDeleted"; + public static final String N_TAIL_BUCKETS_DELETED_DESC = + "Number of VLSN index buckets deleted at the tail(the high end) of " + + "the index."; + public static final StatDefinition N_TAIL_BUCKETS_DELETED = + new StatDefinition( + N_TAIL_BUCKETS_DELETED_NAME, + N_TAIL_BUCKETS_DELETED_DESC); + + public static final String N_BUCKETS_CREATED_NAME = + "nBucketsCreated"; + public static final String N_BUCKETS_CREATED_DESC = + "Number of new VLSN buckets created in the VLSN index."; + public static final StatDefinition N_BUCKETS_CREATED = + new StatDefinition( + N_BUCKETS_CREATED_NAME, + N_BUCKETS_CREATED_DESC); +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNRange.java b/src/com/sleepycat/je/rep/vlsn/VLSNRange.java new file mode 100644 index 0000000..a3e1a27 --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNRange.java @@ -0,0 +1,413 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.VLSN; + +public class VLSNRange { + + /* On-disk version. */ + private static final int VERSION = 1; + public static final long RANGE_KEY = -1L; + static final VLSNRange EMPTY = new VLSNRange(NULL_VLSN, NULL_VLSN, + NULL_VLSN, NULL_VLSN); + + /* + * Information about the range of contiguous VLSN entries on this node. + * All the range values must be viewed together, to ensure a consistent set + * of values. + */ + private final VLSN first; + private final VLSN last; + private final byte commitType = LogEntryType.LOG_TXN_COMMIT.getTypeNum(); + private final byte abortType = LogEntryType.LOG_TXN_ABORT.getTypeNum(); + + /* + * VLSN of the last log entry in our VLSN range that can serve as a sync + * matchpoint. + * + * Currently lastSync and lastTxnEnd are the same value because a + * sync point is defined as a commit or abort; however, in the future the + * Matchpoint log entry may also be used for sync points and and then + * lastSync may be ahead of lastTxnEnd. + */ + private final VLSN lastSync; + private final VLSN lastTxnEnd; + + private VLSNRange(VLSN first, + VLSN last, + VLSN lastSync, + VLSN lastTxnEnd) { + this.first = first; + this.last = last; + this.lastSync = lastSync; + this.lastTxnEnd = lastTxnEnd; + } + + /** + * When the range is written out by the VLSNTracker, we must always be sure + * to update the tracker's lastVSLNOnDisk field. Return the last VLSN in + * the range as part of this method, to help ensure that update. + * @param envImpl + * @param dbImpl + * @param txn + */ + VLSN writeToDatabase(final EnvironmentImpl envImpl, + final DatabaseImpl dbImpl, + Txn txn) { + + VLSNRangeBinding binding = new VLSNRangeBinding(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + LongBinding.longToEntry(RANGE_KEY, key); + binding.objectToEntry(this, data); + + Cursor c = null; + try { + c = DbInternal.makeCursor(dbImpl, + txn, + CursorConfig.DEFAULT); + DbInternal.getCursorImpl(c).setAllowEviction(false); + + OperationStatus status = c.put(key, data); + if (status != OperationStatus.SUCCESS) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Unable to write VLSNRange, status = " + status); + } + } finally { + if (c != null) { + c.close(); + } + } + return last; + } + + public static VLSNRange readFromDatabase(DatabaseEntry data) { + VLSNRangeBinding binding = new VLSNRangeBinding(); + VLSNRange range = binding.entryToObject(data); + + return range; + } + + public VLSN getFirst() { + return first; + } + + public VLSN getLast() { + return last; + } + + public VLSN getLastSync() { + return lastSync; + } + + public VLSN getLastTxnEnd() { + return lastTxnEnd; + } + + /** + * Return the VLSN that should come after the lastVLSN. + */ + VLSN getUpcomingVLSN() { + return last.getNext(); + } + + /** + * @return true if this VLSN is within the range described by this class. + */ + public boolean contains(final VLSN vlsn) { + if (first.equals(NULL_VLSN)) { + return false; + } + + if ((first.compareTo(vlsn) <= 0) && (last.compareTo(vlsn) >= 0)) { + return true; + } + + return false; + } + + /** + * A new VLSN->LSN mapping has been registered in a bucket. Update the + * range accordingly. + */ + VLSNRange getUpdateForNewMapping(final VLSN newValue, + final byte entryTypeNum) { + VLSN newFirst = first; + VLSN newLast = last; + VLSN newLastSync = lastSync; + VLSN newLastTxnEnd = lastTxnEnd; + + if (first.equals(NULL_VLSN) || first.compareTo(newValue) > 0) { + newFirst = newValue; + } + + if (last.compareTo(newValue) < 0) { + newLast = newValue; + } + + if (LogEntryType.isSyncPoint(entryTypeNum)) { + if (lastSync.compareTo(newValue) < 0) { + newLastSync = newValue; + } + } + + if ((entryTypeNum == commitType) || (entryTypeNum == abortType)) { + if (lastTxnEnd.compareTo(newValue) < 0) { + newLastTxnEnd = newValue; + } + } + + return new VLSNRange(newFirst, newLast, newLastSync, newLastTxnEnd); + } + + /** + * Incorporate the information in "other" in this range. + */ + VLSNRange getUpdate(final VLSNRange other) { + VLSN newFirst = getComparison(first, other.first, + other.first.compareTo(first) < 0); + VLSN newLast = getComparison(last, other.last, + other.last.compareTo(last) > 0); + VLSN newLastSync = + getComparison(lastSync, other.lastSync, + other.lastSync.compareTo(lastSync) > 0); + VLSN newLastTxnEnd = + getComparison(lastTxnEnd, other.lastTxnEnd, + other.lastTxnEnd.compareTo(lastTxnEnd) > 0); + return new VLSNRange(newFirst, newLast, newLastSync, newLastTxnEnd); + } + + /** + * The "other" range is going to be appended to this range. + */ + VLSNRange merge(final VLSNRange other) { + VLSN newLast = getComparison(last, other.last, true); + VLSN newLastSync = getComparison(lastSync, other.lastSync, true); + VLSN newLastTxnEnd = getComparison(lastTxnEnd, other.lastTxnEnd, true); + return new VLSNRange(first, newLast, newLastSync, newLastTxnEnd); + } + + /* + * We can assume that deleteStart.getPrev() is either NULL_VLSN or is + * on a sync-able boundary. We can also assume that lastTxnEnd has not + * been changed. And lastly, we can assume that this range is not empty, + * since that was checked earlier on. + */ + VLSNRange shortenFromEnd(final VLSN deleteStart) { + VLSN newLast = deleteStart.getPrev(); + + assert newLast.compareTo(lastTxnEnd) >= 0 : + "Can't truncate at " + newLast + + " because it overwrites a commit at " + lastTxnEnd; + + if (newLast.equals(NULL_VLSN)) { + return new VLSNRange(NULL_VLSN, NULL_VLSN, NULL_VLSN, NULL_VLSN); + } + return new VLSNRange(first, newLast, newLast, lastTxnEnd); + } + + /* + * @return an new VLSNRange which starts at deleteEnd.getNext() + */ + VLSNRange shortenFromHead(final VLSN deleteEnd) { + + VLSN newFirst = null; + VLSN newLast = last; + if (deleteEnd.compareTo(last) == 0) { + newFirst = NULL_VLSN; + newLast = NULL_VLSN; + } else { + newFirst = deleteEnd.getNext(); + } + + /* We shouldn't be truncating the last sync */ + assert (lastSync.equals(NULL_VLSN) || + lastSync.compareTo(newFirst) >= 0 ) : + "Can't truncate lastSync= " + lastSync + " deleteEnd=" + deleteEnd; + + VLSN newTxnEnd = lastTxnEnd.compareTo(newFirst) > 0 ? + lastTxnEnd : NULL_VLSN; + + return new VLSNRange(newFirst, newLast, lastSync, newTxnEnd); + } + + /** + * Compare two VLSNs, normalizing for NULL_VLSN. If one of them is + * NULL_VLSN, return the other one. If neither are NULL_VLSN, use the + * result of the comparison, expressed as the value of "better" to decide + * which one to return. If "better" is true, return "otherVLSN". + */ + private VLSN getComparison(final VLSN thisVLSN, + final VLSN otherVLSN, + final boolean better) { + if (thisVLSN.equals(NULL_VLSN)) { + return otherVLSN; + } + + if (otherVLSN.equals(NULL_VLSN)) { + return thisVLSN; + } + + if (better) { + return otherVLSN; + } + + return thisVLSN; + } + + boolean isEmpty() { + return first.equals(NULL_VLSN); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("first=").append(first); + sb.append(" last=").append(last); + sb.append(" sync=").append(lastSync); + sb.append(" txnEnd=").append(lastTxnEnd); + return sb.toString(); + } + + /** + * Marshals a VLSNRange to a byte buffer to store in the database. + */ + static class VLSNRangeBinding extends TupleBinding { + + @Override + public VLSNRange entryToObject(final TupleInput ti) { + int onDiskVersion = ti.readPackedInt(); + if (onDiskVersion != VERSION) { + throw EnvironmentFailureException.unexpectedState + ("Don't expect version diff " + + "on_disk=" + onDiskVersion + + " source=" + + VERSION); + } + + VLSNRange range = + new VLSNRange(new VLSN(ti.readPackedLong()), // first + new VLSN(ti.readPackedLong()), // last + new VLSN(ti.readPackedLong()), // lastSync + new VLSN(ti.readPackedLong())); // lastTxnEnd + return range; + } + + @Override + public void objectToEntry(final VLSNRange range, + final TupleOutput to) { + /* No need to store the file number -- that's the key */ + to.writePackedInt(VERSION); + to.writePackedLong(range.getFirst().getSequence()); + to.writePackedLong(range.getLast().getSequence()); + to.writePackedLong(range.getLastSync().getSequence()); + to.writePackedLong(range.getLastTxnEnd().getSequence()); + } + } + + boolean verify(final boolean verbose) { + if (first.equals(NULL_VLSN)) { + if (!(last.equals(NULL_VLSN) && + (lastSync.equals(NULL_VLSN)) && + (lastTxnEnd.equals(NULL_VLSN)))) { + if (verbose) { + System.out.println("Range: All need to be NULL_VLSN " + + this); + } + return false; + } + } else { + if (first.compareTo(last) > 0) { + if (verbose) { + System.out.println("Range: first > last " + this); + } + return false; + } + + if (!lastSync.equals(NULL_VLSN)) { + if (lastSync.compareTo(last) > 0) { + if (verbose) { + System.out.println("Range: lastSync > last " + this); + } + return false; + } + } + + if (!lastTxnEnd.equals(NULL_VLSN)) { + if (lastTxnEnd.compareTo(last) > 0) { + if (verbose) { + System.out.println("Range: lastTxnEnd > last " + this); + } + return false; + } + } + } + return true; + } + + /** + * @return true if subsetRange is a subset of this range. + */ + boolean verifySubset(final boolean verbose, final VLSNRange subsetRange) { + if (subsetRange == null) { + return true; + } + + if ((subsetRange.getFirst().equals(NULL_VLSN)) && + (subsetRange.getLast().equals(NULL_VLSN)) && + (subsetRange.getLastSync().equals(NULL_VLSN)) && + (subsetRange.getLastTxnEnd().equals(NULL_VLSN))) { + return true; + } + + if (first.compareTo(subsetRange.getFirst()) > 0) { + if (verbose) { + System.out.println("Range: subset must be LTE: this=" + this + + " subset=" + subsetRange); + } + return false; + } + + if (first.equals(NULL_VLSN)) { + return true; + } + + if (last.compareTo(subsetRange.getLast()) < 0) { + if (verbose) { + System.out.println("Range: last must be GTE: this=" + this + + " subsetRange=" + subsetRange); + } + return false; + } + return true; + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNRecoveryTracker.java b/src/com/sleepycat/je/rep/vlsn/VLSNRecoveryTracker.java new file mode 100644 index 0000000..0bab700 --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNRecoveryTracker.java @@ -0,0 +1,135 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.VLSNRecoveryProxy; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * The VLSNRecoveryTracker is used as a transient tracker at recovery time. + * It gathers up VLSN->LSN mappings that are in the log, but not persisted to + * the mapping database. It has somewhat different preconditions from the + * parent VLSNTracker which affect the semantics of tracking. + * + * Unlike VLSNTracker, the track() method is guaranteed to be executed in a + * serial fashion. In addition, this tracker needs to "discover" where the + * range for this tracker should start. For example, suppose the on-disk + * VLSNIndex covers VLSNs 25 -> 200. Also, suppose that the recovery portion of + * the log holds VLSNs 190 -> 210 (an overlap of 190 -> 200) + * + * The VLSNIndex will be initialized with a range of 25 -> 200. We want the + * recovery tracker to hold VLSN mappings from 190 -> 210. We don't want it to + * just consult its range to determine where the next bucket starts. If we did + * that, the recovery tracker would start at VLSN 1. + * + * The VLSNRecoveryTracker must account for rollbacks and invisible log + * entries. It has the authoritative view of what is in the recovery part of the + * log and will override what is in the on-disk tracker. At merge time, the + * regular VLSNIndex must consult the VLSNRecoveryTracker's notion of what the + * highest VLSN value is. + * + * If we see a RollbackStart, the end of range is abruptly reset back to the + * matchpoint start. If we see non-invisible entries, the end of range may be + * incrementing. For example, suppose the log has:recovery tracker + * + * VLSN 10 tracks 10 + * VLSN 11 (invisible) skips + * VLSN 12 (invisible) skips + * VLSN 13 (invisible) skips + * rollback start to VLSN 9 truncates to 9, clear everything + * VLSN 10 tracks 10 + * VLSN 11 tracks 11 + * VLSN 12 tracks 12 + * rollback start to VLSN 11 truncates to 11 + * + * Suppose the on-disk VLSNIndex holds mappings for VLSN 1->13. A merge of the + * VLSN index and the recovery tracker would + * 1) truncate any VLSN > than the recovery tracker's high point -- so the + * VLSN index will drop mappings 12, 13 + * 2) will replace any VLSN index mappings with those held in the recovery + * tracker. + * The VLSNIndex should map 1 -> 11, with the 10 and 11 mapping provided by the + * recovery tracker. + */ +public final class VLSNRecoveryTracker + extends VLSNTracker implements VLSNRecoveryProxy { + + private byte rollbackType; + private VLSN lastMatchpointVLSN = VLSN.NULL_VLSN; + private long lastMatchpointLsn = DbLsn.NULL_LSN; + + public VLSNRecoveryTracker(EnvironmentImpl envImpl, + int stride, + int maxMappings, + int maxDistance) { + super(envImpl, stride, maxMappings, maxDistance); + + rollbackType = LogEntryType.LOG_ROLLBACK_START.getTypeNum(); + } + + /* VLSNRecoveryProxy.trackMapping */ + @Override + public void trackMapping(long lsn, + LogEntryHeader currentEntryHeader, + LogEntry targetLogEntry) { + + if (currentEntryHeader.getReplicated() && + !currentEntryHeader.isInvisible()) { + + VLSN vlsn = currentEntryHeader.getVLSN(); + track(vlsn, lsn, currentEntryHeader.getType()); + } else if (currentEntryHeader.getType() == rollbackType) { + RollbackStart rb = (RollbackStart) + ((SingleItemEntry) targetLogEntry).getMainItem(); + + lastMatchpointVLSN = rb.getMatchpointVLSN(); + lastMatchpointLsn = rb.getMatchpoint(); + if (range.getFirst().isNull()) { + return; + } + + if (range.getFirst().compareTo(lastMatchpointVLSN) > 0) { + /* Throw away all mappings. */ + initEmpty(); + return; + } + + if (range.getLast().compareTo(lastMatchpointVLSN) <= 0) { + /* Nothing to truncate. */ + return; + } + + truncateFromTail(lastMatchpointVLSN.getNext(), lastMatchpointLsn); + } + } + + public boolean isEmpty() { + return bucketCache.size() == 0; + } + + public VLSN getLastMatchpointVLSN() { + return lastMatchpointVLSN; + } + + public long getLastMatchpointLsn() { + return lastMatchpointLsn; + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/VLSNTracker.java b/src/com/sleepycat/je/rep/vlsn/VLSNTracker.java new file mode 100644 index 0000000..46644cd --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/VLSNTracker.java @@ -0,0 +1,1137 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; + +import java.util.ArrayList; +import java.util.NoSuchElementException; +import java.util.SortedMap; +import java.util.TreeMap; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileSet; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.vlsn.VLSNRange.VLSNRangeBinding; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; + +/** + * See @link{VLSNIndex} for an overview of the mapping system. The VLSNTracker + * packages the VLSNRange and the cached, in-memory VLSNBuckets. + * + * The tracker has a notion of the "currentBucket", which is the one receiving + * updates. All other cached buckets are finished and are awaiting a write to + * the database. Those finished buckets will only be updated in special + * circumstances, such as log cleaning or replication stream truncation, when + * we can assume that there will no readers viewing the buckets. + */ +class VLSNTracker { + private final EnvironmentImpl envImpl; + + /* The first mapping that is is the tracker cache. */ + private VLSN firstTrackedVLSN = NULL_VLSN; + + /* The last VLSN that is stored on disk. */ + private VLSN lastOnDiskVLSN = NULL_VLSN; + + /* + * A cache of buckets that are not on disk. The map key is the bucket's + * first VLSN. + */ + SortedMap bucketCache; + + /* + * The range should always be updated through assignment to a new + * VLSNRange, to ensure that the values stay consistent. The current bucket + * mutex must be taken in order to update this value, so that the current + * bucket is updated before the range is updated. When reading range + * fields, the caller must be sure to get a reference to a single range + * instance, so that there's no danger of getting inconsistent values. + */ + protected volatile VLSNRange range; + private boolean rangeTruncated; + + /* + * ProtectedFileRange protects files in 'range' from being deleted. The + * range start is changed during initialization and when the head of the + * index is truncated. It is changed while synchronized on 'this' to + * guarantees that files are not deleted while the range head is locked. + */ + private final FileProtector.ProtectedFileRange protectedFileRange; + + /* + * In the future, we may want to vary stride, maxMappings and maxDistance + * dynamically, in reaction to efficient the mappings look. + */ + private final int stride; + private final int maxMappings; + private final int maxDistance; + + private final LongStat nBucketsCreated; + + /* + * Create an VLSNTracker, with the range initialized from the mapping db. + */ + VLSNTracker(EnvironmentImpl envImpl, + DatabaseImpl mappingDbImpl, + int stride, + int maxMappings, + int maxDistance, + StatGroup statistics) + throws DatabaseException { + + this.stride = stride; + this.maxMappings = maxMappings; + this.maxDistance = maxDistance; + this.envImpl = envImpl; + nBucketsCreated = + new LongStat(statistics, VLSNIndexStatDefinition.N_BUCKETS_CREATED); + + bucketCache = new TreeMap<>(); + + /* + * Protect all files initially. The range lower bound will be advanced + * later during initialization. This special ProtectedFileRange does + * not impact LogSizeStats -- see FileProtector. + */ + final FileProtector fileProtector = envImpl.getFileProtector(); + + protectedFileRange = fileProtector.protectFileRange( + FileProtector.VLSN_INDEX_NAME, 0 /*rangeStart*/, + true /*protectVlsnIndex*/); + + fileProtector.setVLSNIndexProtectedFileRange(protectedFileRange); + + /* Read the current range information. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + LongBinding.longToEntry(VLSNRange.RANGE_KEY, key); + + Cursor cursor = null; + Locker locker = null; + try { + locker = BasicLocker.createBasicLocker(envImpl); + cursor = DbInternal.makeCursor(mappingDbImpl, + locker, + CursorConfig.DEFAULT); + DbInternal.getCursorImpl(cursor).setAllowEviction(false); + + OperationStatus status = cursor.getSearchKey(key, data, + LockMode.DEFAULT); + if (status == OperationStatus.SUCCESS) { + /* initialize the range from the database. */ + VLSNRangeBinding rangeBinding = new VLSNRangeBinding(); + range = rangeBinding.entryToObject(data); + lastOnDiskVLSN = range.getLast(); + } else if (status == OperationStatus.NOTFOUND) { + /* No mappings exist before. */ + range = VLSNRange.EMPTY; + } else { + throw EnvironmentFailureException.unexpectedState + ("VLSNTracker init: status=" + status); + } + } finally { + if (cursor != null) { + cursor.close(); + } + + if (locker != null) { + locker.operationEnd(true); + } + } + } + + /* + * Create an empty VLSNTracker. Used during recovery. + */ + VLSNTracker(EnvironmentImpl envImpl, + int stride, + int maxMappings, + int maxDistance) { + + this.envImpl = envImpl; + this.stride = stride; + this.maxMappings = maxMappings; + this.maxDistance = maxDistance; + this.protectedFileRange = null; + + /* Set up a temporary stat group for use during recovery */ + StatGroup statistics = + new StatGroup(VLSNIndexStatDefinition.GROUP_NAME, + VLSNIndexStatDefinition.GROUP_DESC); + nBucketsCreated = + new LongStat(statistics, + VLSNIndexStatDefinition.N_BUCKETS_CREATED); + + initEmpty(); + } + + void initEmpty() { + bucketCache = new TreeMap<>(); + range = VLSNRange.EMPTY; + } + + /** + * Return a bucket for reading a mapping for this VLSN. If vlsn is > + * lastOnDisk, a bucket is guaranteed to be returned. + */ + synchronized VLSNBucket getGTEBucket(VLSN vlsn) { + + if (lastOnDiskVLSN.compareTo(vlsn) >= 0) { + /* The bucket is in the database, not the cache. */ + return null; + } + + Long pivot = vlsn.getSequence() + 1; + SortedMap head = bucketCache.headMap(pivot); + VLSNBucket prevBucket = null; + if (head.size() > 0) { + prevBucket = head.get(head.lastKey()); + if (prevBucket.owns(vlsn)) { + return prevBucket; + } + } + + /* + * If the key is not in the headMap, we must return the next bucket + * with mappings that follow on. + */ + SortedMap tail = bucketCache.tailMap(pivot); + if (tail.size() > 0) { + VLSNBucket bucket = tail.get(tail.firstKey()); + assert (bucket.owns(vlsn) || bucket.follows(vlsn)) : + "VLSN " + vlsn + " got wrong bucket " + bucket; + return bucket; + } + + throw EnvironmentFailureException.unexpectedState + (envImpl, "VLSN " + vlsn + " should be held within this tracker. " + + this + " prevBucket=" + prevBucket); + } + + /** + * Get the bucket which holds a mapping for this VLSN. If there is no such + * bucket, get the one directly preceding it. If this VLSN is >= + * firstTrackedVLSN, then we should be able to guarantee that a bucket is + * returned. + */ + synchronized VLSNBucket getLTEBucket(VLSN vlsn) { + + if (firstTrackedVLSN.equals(NULL_VLSN) || + (firstTrackedVLSN.compareTo(vlsn) > 0)) { + return null; + } + + Long pivot = vlsn.getSequence() + 1; + SortedMap head = bucketCache.headMap(pivot); + if (head.size() > 0) { + return head.get(head.lastKey()); + } + + /* + * We shouldn't get here. Get the tail purely for creating a debugging + * message. + */ + SortedMap tail = bucketCache.tailMap(pivot); + VLSNBucket nextBucket = null; + if (tail.size() > 0) { + nextBucket = tail.get(tail.firstKey()); + } + throw EnvironmentFailureException.unexpectedState + (envImpl, "VLSN " + vlsn + " should be held within this tracker. " + + this + " nextBucket=" + nextBucket); + } + + /** + * Record a new VLSN->LSN mapping. We guarantee that the first and last + * VLSNs in the range have a mapping. If a VLSN comes out of order, we will + * discard it, and will not update the range. + */ + synchronized void track(VLSN vlsn, long lsn, byte entryTypeNum) { + + VLSNBucket currentBucket; + + if (vlsn.compareTo(lastOnDiskVLSN) < 0) { + + /* + * This VLSN is a laggard. It belongs to a bucket that has already + * gone to disk. Since on disk buckets can't be modified, throw + * this mapping away. Do be sure to update the range in case + * lastSync or lastTxnEnd should be updated as a result of this + * mapping. + */ + updateRange(vlsn, entryTypeNum); + return; + } + + if (bucketCache.size() == 0) { + /* Nothing in the tracker, add a new bucket. */ + currentBucket = new VLSNBucket(DbLsn.getFileNumber(lsn), stride, + maxMappings, maxDistance, vlsn); + nBucketsCreated.increment(); + + bucketCache.put(currentBucket.getFirst().getSequence(), + currentBucket); + } else { + /* Find the last bucket. Only the last bucket is updateable. */ + currentBucket = bucketCache.get(bucketCache.lastKey()); + } + + /* + * This VLSN is a laggard that was preceded by an earlier mapping which + * came unseasonably early. This VLSN can't fit into the current + * bucket, and since we only want to update the last bucket, we'll + * throw this mapping away. + */ + if (currentBucket.follows(vlsn)) { + updateRange(vlsn, entryTypeNum); + return; + } + + if (!currentBucket.put(vlsn, lsn)) { + + /* + * Couldn't put the mapping in this bucket. Close it and move to a + * new one. + */ + currentBucket = + new VLSNBucket(DbLsn.getFileNumber(lsn), stride, + maxMappings, maxDistance, vlsn); + nBucketsCreated.increment(); + bucketCache.put(currentBucket.getFirst().getSequence(), + currentBucket); + if (!currentBucket.put(vlsn, lsn)) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "Couldn't put VLSN " + vlsn + " into " + + currentBucket); + } + } + + updateRange(vlsn, entryTypeNum); + + /* + * Only update the firstTrackedVLSN if this mapping is really resident + * in a bucket. Don't update it if it's an out-of-order, skipped + * mapping. + */ + if (firstTrackedVLSN == NULL_VLSN) { + firstTrackedVLSN = vlsn; + } + } + + /** + * Update the range with a newly arrived VLSN. + */ + private void updateRange(VLSN vlsn, byte entryTypeNum) { + + /* + * Get a reference to the volatile range field so that we always see + * consistent values. + */ + VLSNRange currentRange = range; + range = currentRange.getUpdateForNewMapping(vlsn, entryTypeNum); + } + + /** + * Flush the tracker cache to disk. + * Ideally, we'd allow concurrent VLSN put() calls while a flush to database + * is happening. To do that, we need a more sophisticated synchronization + * scheme that defines the immutable vs. mutable portion of the tracker + * cache. See SR [#17689] + */ + synchronized void flushToDatabase(DatabaseImpl mappingDbImpl, Txn txn) { + + VLSNRange flushRange = range; + + if (bucketCache.size() == 0) { + /* + * No mappings to write out, but it's possible that the range + * changed due to a truncation, in which case we must update the + * database. RangeTruncated is used to reduce the chance that + * we are doing unnecessary writes of the range record to the db. + */ + if (rangeTruncated) { + lastOnDiskVLSN = flushRange.writeToDatabase(envImpl, + mappingDbImpl, txn); + rangeTruncated = false; + } + return; + } + + /* + * Save information about the portion of the cache that we are trying + * to flush. Close off the last bucket, so the flush portion becomes + * immutable. In the past, this was in a synchronization block. + * This isn't strictly needed right now, since the method is currently + * fully synchronized, but is a good practice, and will be + * necessary if future changes make it possible to have concurrent + * puts. + */ + VLSNBucket lastBucket = bucketCache.get(bucketCache.lastKey()); + lastBucket.close(); + + /* + * Write the flush portion to disk. Ideally, this portion would be done + * without synchronization on the tracker, to permit concurrent reads + * and writes to the tracker. Note that because there is no + * synchronization, the buckets must be flushed to disk in vlsn order, + * so that the vlsn index always looks consistent. There should be no + * gaps in the bucket range. This assumption permits concurrent access + * by VLSNIndex.getGTEBucketFromDatabase. + */ + VLSN currentLastVLSN = lastOnDiskVLSN; + for (Long key : bucketCache.keySet()) { + + VLSNBucket target = bucketCache.get(key); + + /* Do some sanity checking before we write this bucket to disk. */ + validateBeforeWrite(target, + flushRange, + currentLastVLSN, + target==lastBucket); + target.writeToDatabase(envImpl, mappingDbImpl, txn); + currentLastVLSN = target.getLast(); + } + + lastOnDiskVLSN = flushRange.writeToDatabase(envImpl, + mappingDbImpl, txn); + rangeTruncated = false; + + /* + * Update the tracker to remove the parts that have been written to + * disk. Update firstTrackedVLSN, lastOnDiskVLSN, and clear the bucket + * cache. + * + * In the current implementation, bucketCache is guaranteed not to + * change, so we can just clear the cache. If we had written the + * buckets out without synchronization, the cache might have gained + * mappings, and we wouuld have to be careful to set the + * firstTrackedVLSN to be the first VLSN of the remaining buckets, and + * to set lastTrackedVLSN to be the last VLSN in flushRange. + * + * We would also have to explicitly call SortedMap.remove() to remove + * the written buckets. Do not use TreeMap.tailSet, which would + * inadvertently leave us with the semantics of a SubMap, which has + * mandated key ranges. With rollbacks, we may end up adding buckets + * that are "repeats" in the future. + */ + bucketCache.clear(); + firstTrackedVLSN = NULL_VLSN; + } + + /** + * Do some sanity checking before the write, to help prevent any corruption. + */ + private void validateBeforeWrite(VLSNBucket target, + VLSNRange flushRange, + VLSN currentLastVLSN, + boolean isLastBucket) { + + if (!(currentLastVLSN.equals(NULL_VLSN)) && + (target.getFirst().compareTo(currentLastVLSN) <= 0)) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "target bucket overlaps previous bucket. " + + "currentLastVLSN= " + currentLastVLSN + " target=" + + target); + } + + if (target.getLast().compareTo(flushRange.getLast()) > 0) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "target bucket exceeds flush range. " + + "range= " + flushRange + " target=" + + target); + } + + if (isLastBucket) { + if (target.getLast().compareTo(flushRange.getLast()) != 0) { + throw EnvironmentFailureException.unexpectedState + (envImpl, "end of last bucket should match end of range. " + + "range= " + flushRange + " target=" + target); + } + } + } + + /** + * Initially the protectedFileRange is set to the file of the range start. + * It is advanced later when the range head is truncated. + */ + void initProtectedFileRange(long firstFile) { + protectedFileRange.advanceRange(firstFile); + } + + /** + * @see VLSNIndex#protectRangeHead + */ + synchronized ProtectedFileSet protectRangeHead(String lockerName) { + + /* + * Protect all files in the range. Because we are synchronized, we know + * that protectedFileRange will not be advanced and the range head will + * remain stable. + */ + return envImpl.getFileProtector().protectFileRange( + lockerName, protectedFileRange.getRangeStart(), + true /*protectVlsnIndex*/); + } + + /** + * Returns the file at the lower bound of the current range. This method + * does not synchronize. + */ + long getProtectedRangeStartFile() { + return protectedFileRange.getRangeStart(); + } + + /** + * Try to advance the VLSNIndex ProtectedFileRange and truncate the head + * of the tracker's range, so that bytesNeeded can be freed by deleting + * files in this range. + * + * @return {deleteEnd, deleteFileNum} pair if the range was changed, or + * null if no change is currently needed/possible. + * -- deleteEnd is the last VLSN to be truncated. + * -- deleteFileNum the file having deleteEnd as its last VLSN. + */ + synchronized Pair tryTruncateFromHead( + final long bytesNeeded, + final LogItemCache logItemCache) { + + /* Do not allow the index to become empty. */ + final long preserveVlsn = range.getLast().getSequence() - + envImpl.getConfigManager().getInt(RepParams.MIN_VLSN_INDEX_SIZE); + + if (preserveVlsn < 0) { + return null; + } + + /* Determine the vlsn/file after which to truncate the range. */ + final Pair truncateInfo = + envImpl.getFileProtector().checkVLSNIndexTruncation( + bytesNeeded, new VLSN(preserveVlsn)); + + if (truncateInfo == null) { + return null; + } + + /* + * It is possible that after checkVLSNIndexTruncation returns, a file + * in the range we expect to delete becomes protected. This is safe, + * because we know that a syncup will not begin while synchronized on + * the VLSNTracker, and feeders always advance their range (never + * retreat). It is also still beneficial to truncate the index because + * we expect the other protection for such a file to be removed soon, + * and then the file can be deleted quickly. + */ + return truncateFromHead( + truncateInfo.first(), truncateInfo.second(), logItemCache) ? + truncateInfo : null; + } + + /** + * Truncate the head of the tracker's range, and advance the + * ProtectedFileRange accordingly. + * + * @return true if range is changed, or false if no change is needed. + */ + synchronized boolean truncateFromHead( + VLSN deleteEnd, + long deleteFileNum, + LogItemCache logItemCache) { + + /* The caller should pass a non-null VLSN, but check for safety. */ + if (deleteEnd.equals(VLSN.NULL_VLSN)) { + return false; + } + + /* + * Check the VLSN found in the deleted file against the existing + * mappings. The range should not be empty, and doing the truncation + * should not remove the last sync point. We assume that once this + * environment has received any portion of the replication stream, it + * will maintain a minimum set of VLSNs. + */ + VLSNRange oldRange = range; + + if (oldRange.getFirst().compareTo(deleteEnd) > 0) { + /* deleteEnd has already been cast out of the index. */ + return false; + } + + if (oldRange.isEmpty()) { + throw EnvironmentFailureException.unexpectedState( + envImpl, "Didn't expect current range to be empty. " + + " End of delete range = " + deleteEnd); + } + + if (!oldRange.getLastSync().equals(NULL_VLSN) && + (deleteEnd.compareTo(oldRange.getLastSync()) > 0)) { + + throw EnvironmentFailureException.unexpectedState( + envImpl, "Can't log clean away last matchpoint. DeleteEnd= " + + deleteEnd + " lastSync=" + oldRange.getLastSync()); + } + + /* + * Sanity checks are over and we are committed to changing the range. + * + * Clear the LogItemCache in case it contains entries with VLSNs being + * truncated. This seems extremely unlikely, since the cache is a small + * number of entries at the end of the VLSN range. But perhaps it is + * simplest to clear it in order to guarantee safely, and incur the + * infrequent cost of a few cache misses. + */ + if (logItemCache != null) { + logItemCache.clear(); + } + + /* + * Advance the ProtectedFileRange to allow files in the truncated + * portion to be deleted. deleteFileNum is the file having deleteEnd as + * its highest VLSN. The VLSN range will now start with deleteEnd+1, so + * now we only need to protect files starting at deleteFileNum+1. + */ + protectedFileRange.advanceRange(deleteFileNum + 1); + + /* + * Update the in-memory, cached portion of the index. + * + * Update range first, to prevent others from attempting to read the + * truncated portions. Update the firstTrackedLSN last, after the + * buckets are removed; getLTEBucket relies on the firstTrackedLSN + * value. + */ + range = oldRange.shortenFromHead(deleteEnd); + + /* + * Ensure that the range is written to the db even if the resulting + * number of buckets is 0. + */ + rangeTruncated = true; + + /* + * afterDelete may not == range.getFirst, becayse range.getFirst may + * be NULL_VLSN if the entire vlsn index has been deleted. + */ + VLSN afterDelete = deleteEnd.getNext(); + + /* + * Check if the cached buckets needs to be modified. Suppose vlsns 1-8 + * are on disk, and vlsns 12->40 are in the tracker cache, with an out + * of order gap from 9 - 11. + * + * case 1: deleteEnd <= 8, Do not need to truncate anything in the + * bucket cacke, but we may have to make a ghost bucket to fill the + * gap. + * case 2: deleteEnd == 11, we don't need to modify the cache or + * create a ghost bucket. + * case 3: deleteEnd >8 and < 11 is in the gap, we need to create a + * ghost bucket to hold the beginning of the range.. + * case 4: deleteEnd >= 12, we need to modify the bucket cache. + */ + if (!lastOnDiskVLSN.equals(VLSN.NULL_VLSN) && + (lastOnDiskVLSN.compareTo(deleteEnd) >= 0)) { + + /* + * case 1: the buckets in the tracker cache are unaffected, all + * the pertinent mappings are on disk. + */ + if (lastOnDiskVLSN.equals(deleteEnd)) { + if (firstTrackedVLSN.compareTo(afterDelete) > 0) { + checkForGhostBucket(bucketCache, deleteFileNum); + } + lastOnDiskVLSN = NULL_VLSN; + } + return true; + } + + assert(!firstTrackedVLSN.equals(NULL_VLSN)); + + if (firstTrackedVLSN.equals(afterDelete)) { + /* case 2: The tracker is lined up perfectly with the new range. */ + lastOnDiskVLSN = NULL_VLSN; + return true; + } + + if (firstTrackedVLSN.compareTo(afterDelete) > 0) { + /* + * case 3: we have to make a ghost bucket. + */ + checkForGhostBucket(bucketCache, deleteFileNum); + lastOnDiskVLSN = NULL_VLSN; + return true; + } + + /* + * case 4: we have to prune the buckets. + */ + VLSNBucket owningBucket = getLTEBucket(deleteEnd); + Long retainBucketKey = owningBucket.getFirst().getNext().getSequence(); + SortedMap tail; + try { + /* + * We need to chop off part of the bucket cache. Find the portion + * to retain. + */ + tail = bucketCache.tailMap(retainBucketKey); + checkForGhostBucket(tail, deleteFileNum); + bucketCache = tail; + } catch (NoSuchElementException e) { + firstTrackedVLSN = NULL_VLSN; + bucketCache = new TreeMap<>(); + } + + lastOnDiskVLSN = NULL_VLSN; + return true; + } + + private void checkForGhostBucket(SortedMap buckets, + long deleteFileNum) { + /* + * The range has just been truncated. If the first bucket's first vlsn + * != the first in range, insert a GhostBucket. + */ + Long firstKey = buckets.firstKey(); + VLSNBucket firstBucket = buckets.get(firstKey); + VLSN firstRangeVLSN = range.getFirst(); + VLSN firstBucketVLSN = firstBucket.getFirst(); + if (!firstBucketVLSN.equals(firstRangeVLSN)) { + long nextFile = + envImpl.getFileManager().getFollowingFileNum(deleteFileNum, + true /* forward */); + long lastPossibleLsn = firstBucket.getLsn(firstBucketVLSN); + VLSNBucket placeholder = new GhostBucket(firstRangeVLSN, + DbLsn.makeLsn(nextFile, 0), + lastPossibleLsn); + buckets.put(firstRangeVLSN.getSequence(), placeholder); + } + } + + /** + * Remove the mappings for VLSNs >= deleteStart. This should only be used + * at syncup, or recovery. We can assume no new mappings are coming in, but + * the VLSNIndex may be read during this time; checkpoints continue to + * happen during rollbacks. Since syncup is always at a sync-able log + * entry, and since we don't roll back past a commit, we can assume that + * the lastSync can be changed to deleteStart.getPrev(), and lastTxnEnd is + * unchanged. The VLSNRange itself is updated before the tracker and + * on-disk buckets are pruned. + * + * The VLSNIndex guarantees that the beginning and end vlsns in the range + * have mappings so LTE and GTE search operations work. When truncating the + * mappings in the tracker, check to see if the new end vlsn has a mapping, + * since that may not happen if the buckets are cut in the middle, or have + * a gap due to out of order mapping. For example, suppose mapping for vlsn + * 20 came before the mappings for vlsn 17, 18 and 19. The buckets would + * have this gap: + * Bucket A: vlsns 10-16 + * Bucket B: vlsns 20-25 + * Bucket C: vlsns 26 - 30 + * If the new end of vlsn range is 11-14, 17, 18, 19, 21 - 24, or 27-29, + * we have to end the vlsn tracker with a new mapping of + * (vlsn (deleteStart-1) -> prevLsn) to cap off the buckets. + * + * Cases: + * End of range is in the gap -- either 17, 18, 19 + * 1. on disk buckets exist, but don't encompass the gap: bucket A on disk + * 2. all buckets are in the tracker cache: bucket A in tracker + * 3. on disk buckets encompass the gap: bucket A and B on disk. + */ + synchronized void truncateFromTail(VLSN deleteStart, long prevLsn) { + + /* + * Update the range first, it will define the range that the vlsnIndex + * covers. Then adjust any mappings held in the bucket cache. Don't + * update the lastOnDiskVLSN marker, which says what is on disk. Since + * the on-disk portion may also get truncated, the caller will update + * the lastOnDiskVLSN field when it inspects the database. + */ + VLSNRange oldRange = range; + range = oldRange.shortenFromEnd(deleteStart); + + /* + * Ensure that the range is written to the db even if the resulting + * number of buckets is 0. + */ + rangeTruncated = true; + + if (firstTrackedVLSN.equals(NULL_VLSN)) { + /* No mappings in this tracker */ + return; + } + + if (firstTrackedVLSN.compareTo(deleteStart) >= 0) { + + /* + * Everything in this tracker should be removed. In addition, the + * caller may be removing some items from the database. + */ + bucketCache.clear(); + firstTrackedVLSN = NULL_VLSN; + return; + } + + /* + * We need to do some pruning of the bucket in the cache. Find the + * buckets in the bucketCache that have mappings >= deleteStart, and + * remove their mappings. First establish the headset of buckets that + * should be preserved. + */ + VLSNBucket targetBucket = getGTEBucket(deleteStart); + Long targetKey = targetBucket.getFirst().getSequence(); + + /* The newCache has buckets that should be preserved. */ + SortedMap newCache = + new TreeMap<>(bucketCache.headMap(targetKey)); + + /* + * Prune any mappings >= deleteStart out of targetBucket. If it has any + * mappings left, add it to newCache. + */ + targetBucket.removeFromTail(deleteStart, prevLsn); + if (!targetBucket.empty()) { + newCache.put(targetBucket.getFirst().getSequence(), + targetBucket); + } + + bucketCache = newCache; + + /* + * Now all truncated mappings have been removed from the index. Check + * that the end point of the vlsn range is represented in this + * tracker. Since vlsn mappings can come out of order, it's + * possible that the buckets have a gap, and that truncation will + * cleave the bucket cache just at a point where a mapping is + * missing. For example, vlsn mappings come in this order: + * vlsn 16, vlsn 18, vlsn 17 + * causing the buckets to have this gap: + * bucket N: vlsn 10 - 16 + * bucket N+1: vlsn 18 - 20 + * and that the vlsn range is truncated with a deleteStart of vlsn 18. + * The new end range becomes 17, but the buckets do not have a + * mapping for vlsn 17. If so, create a mapping now. + * + * If we are at this point, we know that the tracker should contain + * a mapping for the last VLSN in the range. Fix it now, so the + * datastructure is as tidy as possible. + */ + boolean needEndMapping; + if (bucketCache.isEmpty()) { + needEndMapping = true; + } else { + VLSNBucket lastBucket = bucketCache.get(bucketCache.lastKey()); + needEndMapping = + (lastBucket.getLast().compareTo(range.getLast()) < 0); + } + + if (needEndMapping) { + addEndMapping(range.getLast(), prevLsn); + } + } + + /** + * Called by the VLSNIndex to see if we have to add a bucket in the tracker + * after pruning the on-disk storage. The get{LTE,GTE}Bucket methods + * assume that there are mappings for the first and last VLSN in the range. + * But since out of order vlsn mappings can cause non-contiguous buckets, + * the pruning may have lost the end mapping. [#23491] + */ + synchronized void ensureRangeEndIsMapped(VLSN lastVLSN, long lastLsn) { + + /* + * if lastOnDiskVLSN < lastVLSN < firstTrackedVLSN or + * lastOnDiskVLSN < lastVLSN and firstTrackedVLSN is null + * then we need to add a mapping for lastVLSN->lastLsn. Otherwise a + * mapping already exists. + */ + if (lastOnDiskVLSN.compareTo(lastVLSN) < 0) { + + /* + * The on-disk vlsn mappings aren't sufficient to cover the + * lastVLSN. There needs to be a mapping in the tracker for the + * end point of the range. + */ + if (!firstTrackedVLSN.equals(NULL_VLSN)) { + + /* + * There are mappings in the tracker, so we can assume + * that they cover the end point, because truncateFromTail() + * should have adjusted for that. But assert to be sure! + */ + if (lastVLSN.compareTo(firstTrackedVLSN) < 0) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Expected this tracker to cover vlsn " + lastVLSN + + " after truncateFromTail. LastOnDiskVLSN= " + + lastOnDiskVLSN + " tracker=" + this); + } + + return; + } + + /* + * There are no mappings in the tracker. The on disk mappings were + * pruned, and a gap was created between what was on disk and what + * is in the tracker cache. Add a mapping. + */ + addEndMapping(lastVLSN, lastLsn); + } + } + + /** + * Add a bucket holding a lastVLSN -> lastLsn mapping + */ + private void addEndMapping(VLSN lastVLSN, long lastLsn) { + /* Assert that it's in the range */ + assert (lastVLSN.compareTo(range.getLast()) == 0) : + "lastVLSN=" + lastVLSN + " lastLsn = " + lastLsn + + " range=" + range; + + VLSNBucket addBucket = + new VLSNBucket(DbLsn.getFileNumber(lastLsn), stride, + maxMappings, maxDistance, lastVLSN); + addBucket.put(lastVLSN, lastLsn); + nBucketsCreated.increment(); + bucketCache.put(addBucket.getFirst().getSequence(), addBucket); + if (firstTrackedVLSN.equals(NULL_VLSN)) { + firstTrackedVLSN = lastVLSN; + } + } + + /** + * Attempt to replace the mappings in this vlsnIndex for + * deleteStart->lastVLSN with those from the recovery mapper. + * Since we do not supply a prevLsn argument, we may not be able to "cap" + * the truncated vlsn bucket the same what that we can in + * truncateFromTail. Because of that, we may need to remove mappings that + * are < deleteStart. + * + * For example, suppose a bucket holds mappings + * 10 -> 101 + * 15 -> 201 + * 18 -> 301 + * If deleteStart == 17, we will have to delete all the way to vlsn 15. + * + * The maintenance of VLSNRange.lastSync and lastTxnEnd are simplified + * because of assumptions we can make because we are about to append + * mappings found by the recovery tracker that begin at deleteStart. If + * lastSync and lastTxnEnd are <= deleteStart, we know that they will + * either stay the same, or be replaced by lastSync and lastTxnEnd from the + * recoveryTracker. Even we delete mappings that are < deleteStart, we know + * that we did not roll back past an abort or commit, so that we do not + * have to updated the lastSync or lastTxnEnd value. + */ + void merge(VLSN prunedLastOnDiskVLSN, VLSNRecoveryTracker recoveryTracker) { + + VLSNRange oldRange = range; + range = oldRange.merge(recoveryTracker.range); + VLSN recoveryFirst = recoveryTracker.getRange().getFirst(); + + lastOnDiskVLSN = prunedLastOnDiskVLSN; + + /* + * Find the buckets in the bucketCache that have mappings >= + * recoveryFirst, and remove their mappings. First establish the + * headset of buckets that should be preserved. At this point, we + * have already pruned the database, so the bucket set may not + * be fully contiguous -- we may have pruned away mappings that + * would normally be in the database. + */ + if (bucketCache.size() == 0) { + /* Just take all the recovery tracker's mappings */ + bucketCache = recoveryTracker.bucketCache; + } else { + VLSNBucket targetBucket = getGTEBucket(recoveryFirst); + Long targetKey = targetBucket.getFirst().getSequence(); + + /* The newCache holds buckets that should be preserved. */ + SortedMap newCache = + new TreeMap<>(bucketCache.headMap(targetKey)); + + /* + * Prune any mappings >= recoveryFirst out of targetBucket. If it + * has any mappings left, add it to newCache. + */ + targetBucket.removeFromTail(recoveryFirst, DbLsn.NULL_LSN); + if (!targetBucket.empty()) { + newCache.put(targetBucket.getFirst().getSequence(), + targetBucket); + } + + newCache.putAll(recoveryTracker.bucketCache); + bucketCache = newCache; + } + + if (bucketCache.size() > 0) { + VLSNBucket firstBucket = bucketCache.get(bucketCache.firstKey()); + firstTrackedVLSN = firstBucket.getFirst(); + } + } + + void append(VLSNRecoveryTracker recoveryTracker) { + + /* + * This method assume that there is no overlap between this tracker + * and the recovery tracker. Everything in this tracker should precede + * the recovery tracker. + */ + if (!range.getLast().isNull()) { + if (range.getLast().compareTo + (recoveryTracker.getFirstTracked()) >= 0) { + + throw EnvironmentFailureException.unexpectedState + (envImpl, + "Expected this tracker to precede recovery tracker. " + + "This tracker= " + this + " recoveryTracker = " + + recoveryTracker); + } + } + + bucketCache.putAll(recoveryTracker.bucketCache); + VLSNRange currentRange = range; + range = currentRange.getUpdate(recoveryTracker.getRange()); + if (bucketCache.size() > 0) { + VLSNBucket firstBucket = bucketCache.get(bucketCache.firstKey()); + firstTrackedVLSN = firstBucket.getFirst(); + } + } + + VLSNRange getRange() { + return range; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(range); + sb.append(" firstTracked=").append(firstTrackedVLSN); + sb.append(" lastOnDiskVLSN=").append(lastOnDiskVLSN); + + for (VLSNBucket bucket: bucketCache.values()) { + sb.append("\n"); + sb.append(bucket); + } + return sb.toString(); + } + + /** + * For unit test support. + */ + synchronized boolean verify(boolean verbose) { + + if (!range.verify(verbose)) { + return false; + } + + /* Check that all the buckets are in order. */ + ArrayList firstVLSN = new ArrayList<>(); + ArrayList lastVLSN = new ArrayList<>(); + + for (VLSNBucket b : bucketCache.values()) { + firstVLSN.add(b.getFirst()); + lastVLSN.add(b.getLast()); + } + + if (!verifyBucketBoundaries(firstVLSN, lastVLSN)) { + return false; + } + + if (firstVLSN.size() > 0) { + if (!firstVLSN.get(0).equals(firstTrackedVLSN)) { + if (verbose) { + System.err.println("firstBucketVLSN = " + + firstVLSN.get(0) + " should equal " + + firstTrackedVLSN); + } + return false; + } + + VLSN lastBucketVLSN = lastVLSN.get(lastVLSN.size() -1); + if (!lastBucketVLSN.equals(range.getLast())) { + if (verbose) { + System.err.println("lastBucketVLSN = " + lastBucketVLSN + + " should equal " + range.getLast()); + } + return false; + } + } + + return true; + } + + /* + * Used to check bucket boundaries for both the in-memory flush list and + * the on-disk database buckets. Buckets may not be adjacent, but must be + * in order. + */ + static boolean verifyBucketBoundaries(ArrayList firstVLSN, + ArrayList lastVLSN) { + for (int i = 1; i < firstVLSN.size(); i++) { + VLSN first = firstVLSN.get(i); + VLSN prevLast = lastVLSN.get(i-1); + if (first.compareTo(prevLast.getNext()) < 0) { + System.out.println("Boundary problem: bucket " + i + + " first " + first + + " follows bucket.last " + prevLast); + return false; + } + } + return true; + } + + VLSN getFirstTracked() { + return firstTrackedVLSN; + } + + VLSN getLastOnDisk() { + return lastOnDiskVLSN; + } + + void setLastOnDiskVLSN(VLSN lastOnDisk) { + lastOnDiskVLSN = lastOnDisk; + } + + /* + * For unit test support only. Can only be called when replication stream + * is quiescent. + */ + boolean isFlushedToDisk() { + return lastOnDiskVLSN.equals(range.getLast()); + } + + void close() { + if (protectedFileRange != null) { + final FileProtector fileProtector = envImpl.getFileProtector(); + fileProtector.removeFileProtection(protectedFileRange); + fileProtector.setVLSNIndexProtectedFileRange(null); + } + } +} diff --git a/src/com/sleepycat/je/rep/vlsn/package-info.java b/src/com/sleepycat/je/rep/vlsn/package-info.java new file mode 100644 index 0000000..0c2749d --- /dev/null +++ b/src/com/sleepycat/je/rep/vlsn/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: VLSN index (in-memory and persistent) for mapping VLSNs to LSNs. + */ +package com.sleepycat.je.rep.vlsn; diff --git a/src/com/sleepycat/je/statcap/EnvStatsLogger.java b/src/com/sleepycat/je/statcap/EnvStatsLogger.java new file mode 100644 index 0000000..f925209 --- /dev/null +++ b/src/com/sleepycat/je/statcap/EnvStatsLogger.java @@ -0,0 +1,147 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.io.File; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.management.OperatingSystemMXBean; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.utilint.StatLogger; + +public class EnvStatsLogger implements EnvConfigObserver { + + private final EnvironmentImpl env; + private StatLogger stlog; + public static final String STATFILENAME = "je.config"; + public static final String STATFILEEXT = "csv"; + private static final String DELIMITER = ","; + private static final String QUOTE = "\""; + private static final int MAXROWCOUNT = 1000; + private static final int MAXFILECOUNT = 2; + private final StringBuffer sb = new StringBuffer(); + private final StringBuffer valb = new StringBuffer(); + private final Logger logger; + + public EnvStatsLogger(EnvironmentImpl env) { + + File statdirf; + + this.env = env; + logger = LoggerUtils.getLogger(getClass()); + String statdir = env.getConfigManager().get( + EnvironmentParams.STATS_FILE_DIRECTORY); + if (statdir == null || statdir.equals("")) { + statdirf = env.getEnvironmentHome(); + } else { + statdirf = new File(statdir); + } + + try { + stlog = new StatLogger(statdirf, + STATFILENAME, + STATFILEEXT, + MAXFILECOUNT, + MAXROWCOUNT); + } catch (IOException e) { + throw new IllegalStateException( + "Error accessing statistics capture file "+ + STATFILENAME + "." + STATFILEEXT + + " IO Exception: " + e); + } + } + + public void log() { + SortedMap envConfigMap = new TreeMap(); + EnvironmentConfig mc = env.cloneConfig(); + for (String colname : + EnvironmentParams.SUPPORTED_PARAMS.keySet()) { + envConfigMap.put("envcfg:" + colname, mc.getConfigParam(colname)); + } + addSystemStats(envConfigMap); + sb.setLength(0); + valb.setLength(0); + sb.append("time"); + valb.append(StatUtils.getDate(System.currentTimeMillis())); + for (Entry e : envConfigMap.entrySet()) { + if (sb.length() != 0) { + sb.append(DELIMITER); + valb.append(DELIMITER); + } + sb.append(e.getKey()); + valb.append(QUOTE + e.getValue() + QUOTE); + } + try { + stlog.setHeader(sb.toString()); + stlog.logDelta(valb.toString()); + } catch (IOException e) { + LoggerUtils.warning(logger, env, + " Error accessing environment statistics file " + + STATFILENAME + "." + STATFILEEXT + + " IO Exception: " + e); + } + sb.setLength(0); + valb.setLength(0); + } + + @Override + public void envConfigUpdate(DbConfigManager configMgr, + EnvironmentMutableConfig newConfig) { + log(); + } + + private void addSystemStats(Map statmap) { + OperatingSystemMXBean osbean = + ManagementFactory.getOperatingSystemMXBean(); + MemoryMXBean memoryBean = + ManagementFactory.getMemoryMXBean(); + + statmap.put("je:version", + JEVersion.CURRENT_VERSION.getVersionString()); + statmap.put("java:version", System.getProperty("java.version")); + statmap.put("java:vendor", System.getProperty("java.vendor")); + statmap.put("os:name", osbean.getName()); + statmap.put("os:version", osbean.getVersion()); + statmap.put("mc:arch", osbean.getArch()); + statmap.put("mc:processors", + Integer.toString(osbean.getAvailableProcessors())); + statmap.put("java:minMemory" , + Long.toString(memoryBean.getHeapMemoryUsage().getInit())); + statmap.put("java:maxMemory" , + Long.toString(memoryBean.getHeapMemoryUsage().getMax())); + List args = + ManagementFactory.getRuntimeMXBean().getInputArguments(); + sb.setLength(0); + for (String arg : args) { + sb.append(" " + arg); + } + statmap.put("java:args", sb.toString()); + sb.setLength(0); + } +} diff --git a/src/com/sleepycat/je/statcap/JvmStats.java b/src/com/sleepycat/je/statcap/JvmStats.java new file mode 100644 index 0000000..850f673 --- /dev/null +++ b/src/com/sleepycat/je/statcap/JvmStats.java @@ -0,0 +1,110 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; + +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; +import com.sleepycat.je.utilint.StatGroup; + +class JvmStats { + + private final List gcBeans = + ManagementFactory.getGarbageCollectorMXBeans(); + + private final MemoryMXBean memoryBean = + ManagementFactory.getMemoryMXBean(); + private final String GROUPNAME = "Jvm"; + private final String GROUPDEF = "Statistics capture jvm statistics."; + private final String GC_COUNT_DESC = "GC collection count."; + private final String GC_COLLECTION_TIME_DESC = "GC collection time."; + private final String GC_COUNT_NAME_SUFFIX = ".count"; + private final String GC_TIME_NAME_SUFFIX = ".time"; + + public static final StatDefinition LOAD_AVERAGE = + new StatDefinition("loadAverage", + "Average JVM system load.", + StatType.CUMULATIVE); + + public static final StatDefinition HEAP_MEMORY_USAGE = + new StatDefinition("heap", + "Heap memory usage.", + StatType.CUMULATIVE); + + private StatGroup prev = null; + + private final Map statdefmap = + new HashMap(); + + public JvmStats() { + for (GarbageCollectorMXBean gcBean : gcBeans) { + String name = gcBean.getName(); + String statname = name + GC_COUNT_NAME_SUFFIX; + StatDefinition sd = new StatDefinition(statname, GC_COUNT_DESC); + statdefmap.put(statname, sd); + statname = name + GC_TIME_NAME_SUFFIX; + sd = new StatDefinition(statname, GC_COLLECTION_TIME_DESC); + statdefmap.put(statname, sd); + } + statdefmap.put(LOAD_AVERAGE.getName(), LOAD_AVERAGE); + statdefmap.put(HEAP_MEMORY_USAGE.getName(), HEAP_MEMORY_USAGE); + } + + public StatGroup loadStats(StatsConfig sc) { + StatGroup retgroup; + + StatGroup sg = new StatGroup(GROUPNAME, GROUPDEF); + for (GarbageCollectorMXBean gcBean : gcBeans) { + String name = gcBean.getName(); + String statname = name + GC_COUNT_NAME_SUFFIX; + new LongStat( + sg, statdefmap.get(statname), gcBean.getCollectionCount()); + statname = name + GC_TIME_NAME_SUFFIX; + new LongStat( + sg, statdefmap.get(statname), gcBean.getCollectionTime()); + } + new LongStat(sg, LOAD_AVERAGE, (long) JVMSystemUtils.getSystemLoad()); + new LongStat( + sg, HEAP_MEMORY_USAGE, memoryBean.getHeapMemoryUsage().getUsed()); + + if (prev != null) { + retgroup = sg.computeInterval(prev); + } else { + retgroup = sg; + } + prev = sg; + return retgroup; + } + + public void addVMStatDefs(SortedSet projections) { + for (GarbageCollectorMXBean gcBean : gcBeans) { + projections.add( + GROUPNAME + ":" + gcBean.getName() + GC_COUNT_NAME_SUFFIX); + projections.add( + GROUPNAME + ":" + gcBean.getName() + GC_TIME_NAME_SUFFIX); + } + projections.add(GROUPNAME + ":" + LOAD_AVERAGE.getName()); + projections.add(GROUPNAME + ":" + HEAP_MEMORY_USAGE.getName()); + } +} diff --git a/src/com/sleepycat/je/statcap/StatCapture.java b/src/com/sleepycat/je/statcap/StatCapture.java new file mode 100644 index 0000000..64bc4f3 --- /dev/null +++ b/src/com/sleepycat/je/statcap/StatCapture.java @@ -0,0 +1,324 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.logging.Logger; + +import com.sleepycat.je.CustomStats; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DaemonThread; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Stat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.utilint.StatLogger; + +public class StatCapture extends DaemonThread implements EnvConfigObserver { + + public static final String STATFILENAME = "je.stat"; + public static final String STATFILEEXT = "csv"; + private static final String CUSTOMGROUPNAME = "Custom"; + private static final String DELIMITER = ","; + private static final String DELIMITERANDSPACE = ", "; + + private final StatManager statMgr; + + private final SortedSet statProjection; + + private final StatsConfig statsConfig; + + private final Integer statKey; + + private volatile StatLogger stlog = null; + private final StringBuffer values = new StringBuffer(); + private String currentHeader = null; + + private final JvmStats jvmstats = new JvmStats(); + private final CustomStats customStats; + private final String[] customStatHeader; + + private final Logger logger; + + /* + * Exception of last outputStats() call or null if call was successful. + * Used to limit the number of errors logged. + */ + private Exception lastCallException = null; + + public StatCapture(EnvironmentImpl environment, + String name, + long waitTime, + CustomStats customStats, + SortedSet statProjection, + StatManager statMgr) { + + super(waitTime, name, environment); + + logger = LoggerUtils.getLogger(getClass()); + environment.addConfigObserver(this); + + this.statMgr = statMgr; + statKey = statMgr.registerStatContext(); + + this.customStats = customStats; + this.statProjection = statProjection; + + /* + * Note that we fetch all stats, not just fast stats. Since the stat + * retrieval frequency is one minute and this is done by a background + * thread, there is no reason not to include all stats. + */ + statsConfig = new StatsConfig(); + statsConfig.setClear(true); + + /* Add jvm and custom statistics to the projection list. */ + jvmstats.addVMStatDefs(statProjection); + + if (customStats != null) { + final String[] customFldNames = customStats.getFieldNames(); + customStatHeader = new String[customFldNames.length]; + for (int i = 0; i < customFldNames.length; i++) { + customStatHeader[i] = + CUSTOMGROUPNAME + ":" + customFldNames[i]; + statProjection.add(customStatHeader[i]); + } + } else { + customStatHeader = null; + } + + envConfigUpdate(envImpl.getConfigManager(), null); + } + + private boolean collectStats() { + return stlog != null; + } + + /** + * Called whenever the DaemonThread wakes up from a sleep. + */ + @Override + protected void onWakeup() { + + if (!envImpl.isValid() || !collectStats()) { + return; + } + + outputStats(); + } + + @Override + public void requestShutdown() { + super.requestShutdown(); + + /* + * Check if env is valid outside of synchronized call to + * outputStats(). It is possible that a call to outputStats + * caused the invalidation and we would deadlock since that + * thread is holding the lock for this object and waiting for + * this thread to shutdown. + */ + if (!collectStats() || !envImpl.isValid()) { + return; + } + outputStats(); + } + + private synchronized void outputStats() { + + if (!collectStats() || !envImpl.isValid()) { + return; + } + + try { + SortedMap stats = getStats(); + + if (stats != null) { + if (currentHeader == null) { + values.setLength(0); + values.append("time"); + + for (Iterator nameit = statProjection.iterator(); + nameit.hasNext();) { + String statname = nameit.next(); + values.append(DELIMITER + statname); + } + stlog.setHeader(values.toString()); + currentHeader = values.toString(); + } + values.setLength(0); + values.append(StatUtils.getDate(System.currentTimeMillis())); + + for (Iterator nameit = statProjection.iterator(); + nameit.hasNext();) { + String statname = nameit.next(); + String val = stats.get(statname); + if (val != null) { + values.append(DELIMITER + val); + } else { + values.append(DELIMITERANDSPACE); + } + } + stlog.log(values.toString()); + values.setLength(0); + lastCallException = null; + } + } + catch (IOException e) { + if (lastCallException == null) { + LoggerUtils.warning(logger, envImpl, + "Error accessing statistics capture file " + + STATFILENAME + "." + STATFILEEXT + + " IO Exception: " + e.getMessage()); + } + lastCallException = e; + } + catch (Exception e) { + if (lastCallException == null) { + LoggerUtils.warning(logger, envImpl, + "Error accessing or writing statistics capture file " + + STATFILENAME + "." + STATFILEEXT + e + "\n" + + LoggerUtils.getStackTrace(e)); + } + lastCallException = e; + } + } + + private SortedMap getStats() { + final Collection envStats = new ArrayList( + statMgr.loadStats(statsConfig, statKey).getStatGroups()); + + if (envImpl.isReplicated()) { + Collection rsg = + envImpl.getRepStatGroups(statsConfig, statKey); + if (rsg != null) { + envStats.addAll(rsg); + } + } + + envStats.add(jvmstats.loadStats(statsConfig)); + + SortedMap statsMap = new TreeMap(); + + for (StatGroup sg : envStats) { + + for (Entry> e : + sg.getStats().entrySet()) { + + final String mapName = + (sg.getName() + ":" + e.getKey().getName()).intern(); + final Stat stat = e.getValue(); + if (stat.isNotSet()) { + statsMap.put(mapName, " "); + continue; + } + + final Object val = stat.get(); + + /* get stats back as strings. */ + final String str; + if ((val instanceof Float) || (val instanceof Double)) { + str = String.format("%.2f", val); + } else if (val instanceof Number) { + str = Long.toString(((Number) val).longValue()); + } else if (val != null) { + str = String.valueOf(val); + } else { + str = " "; + } + statsMap.put(mapName, str); + } + } + + if (customStats != null) { + String vals[] = customStats.getFieldValues(); + for (int i = 0; i < vals.length; i++) { + statsMap.put(customStatHeader[i], vals[i]); + } + } + return statsMap; + } + + public void envConfigUpdate(DbConfigManager configMgr, + EnvironmentMutableConfig unused) + throws DatabaseException { + + setWaitTime(configMgr.getDuration( + EnvironmentParams.STATS_COLLECT_INTERVAL)); + + if (envImpl.isReadOnly() || envImpl.isMemOnly() || + !configMgr.getBoolean(EnvironmentParams.STATS_COLLECT)) { + stlog = null; + return; + } + + final int maxFiles = + configMgr.getInt(EnvironmentParams.STATS_MAX_FILES); + + final int fileRowCount = + configMgr.getInt(EnvironmentParams.STATS_FILE_ROW_COUNT); + + if (stlog == null) { + + final String statdir = + configMgr.get(EnvironmentParams.STATS_FILE_DIRECTORY); + + final File statDir; + + if (statdir == null || statdir.equals("")) { + statDir = envImpl.getEnvironmentHome(); + } else { + statDir = new File(statdir); + + if (!statDir.exists()) { + /* TODO: require the user to create the directory. */ + statDir.mkdirs(); + } else if (!statDir.isDirectory()) { + throw new IllegalArgumentException( + "Specified statistic log directory " + + statDir.getAbsolutePath() + " is not a directory."); + } + } + + try { + stlog = new StatLogger( + statDir, STATFILENAME, STATFILEEXT, + maxFiles, fileRowCount); + + } catch (IOException e) { + throw new IllegalStateException( + " Error accessing statistics capture file " + + STATFILENAME + "." + STATFILEEXT + + " IO Exception: " + e.getMessage()); + } + } else { + stlog.setFileCount(maxFiles); + stlog.setRowCount(fileRowCount); + } + } +} diff --git a/src/com/sleepycat/je/statcap/StatCaptureDefinitions.java b/src/com/sleepycat/je/statcap/StatCaptureDefinitions.java new file mode 100644 index 0000000..6390146 --- /dev/null +++ b/src/com/sleepycat/je/statcap/StatCaptureDefinitions.java @@ -0,0 +1,379 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.util.HashMap; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; + +import com.sleepycat.je.cleaner.CleanerStatDefinition; +import com.sleepycat.je.dbi.DbiStatDefinition; +import com.sleepycat.je.evictor.Evictor.EvictionSource; +import com.sleepycat.je.evictor.EvictorStatDefinition; +import com.sleepycat.je.evictor.OffHeapStatDefinition; +import com.sleepycat.je.incomp.INCompStatDefinition; +import com.sleepycat.je.latch.LatchStatDefinition; +import com.sleepycat.je.log.LogStatDefinition; +import com.sleepycat.je.recovery.CheckpointStatDefinition; +import com.sleepycat.je.txn.LockStatDefinition; +import com.sleepycat.je.utilint.StatDefinition; + +/** + * Used to define the statistics that are projected into the + * statistics file. + * + */ +public class StatCaptureDefinitions { + + protected Map nameToDef; + + private static StatDefinition[] cleanerStats = { + CleanerStatDefinition.CLEANER_RUNS, + CleanerStatDefinition.CLEANER_TWO_PASS_RUNS, + CleanerStatDefinition.CLEANER_REVISAL_RUNS, + CleanerStatDefinition.CLEANER_DELETIONS, + CleanerStatDefinition.CLEANER_PENDING_LN_QUEUE_SIZE, + CleanerStatDefinition.CLEANER_INS_OBSOLETE, + CleanerStatDefinition.CLEANER_INS_CLEANED, + CleanerStatDefinition.CLEANER_INS_DEAD, + CleanerStatDefinition.CLEANER_INS_MIGRATED, + CleanerStatDefinition.CLEANER_BIN_DELTAS_OBSOLETE, + CleanerStatDefinition.CLEANER_BIN_DELTAS_CLEANED, + CleanerStatDefinition.CLEANER_BIN_DELTAS_DEAD, + CleanerStatDefinition.CLEANER_BIN_DELTAS_MIGRATED, + CleanerStatDefinition.CLEANER_LNS_OBSOLETE, + CleanerStatDefinition.CLEANER_LNS_CLEANED, + CleanerStatDefinition.CLEANER_LNS_DEAD, + CleanerStatDefinition.CLEANER_LNS_EXPIRED, + CleanerStatDefinition.CLEANER_LNS_LOCKED, + CleanerStatDefinition.CLEANER_LNS_MIGRATED, + CleanerStatDefinition.CLEANER_LNS_MARKED, + CleanerStatDefinition.CLEANER_LNQUEUE_HITS, + CleanerStatDefinition.CLEANER_PENDING_LNS_PROCESSED, + CleanerStatDefinition.CLEANER_MARKED_LNS_PROCESSED, + CleanerStatDefinition.CLEANER_TO_BE_CLEANED_LNS_PROCESSED, + CleanerStatDefinition.CLEANER_CLUSTER_LNS_PROCESSED, + CleanerStatDefinition.CLEANER_PENDING_LNS_LOCKED, + CleanerStatDefinition.CLEANER_ENTRIES_READ, + CleanerStatDefinition.CLEANER_DISK_READS, + CleanerStatDefinition.CLEANER_REPEAT_ITERATOR_READS, + CleanerStatDefinition.CLEANER_TOTAL_LOG_SIZE, + CleanerStatDefinition.CLEANER_ACTIVE_LOG_SIZE, + CleanerStatDefinition.CLEANER_RESERVED_LOG_SIZE, + CleanerStatDefinition.CLEANER_AVAILABLE_LOG_SIZE, + CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE, + CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE_MAP, + CleanerStatDefinition.CLEANER_MIN_UTILIZATION, + CleanerStatDefinition.CLEANER_MAX_UTILIZATION, + }; + + private static StatDefinition[] dbiStats = { + DbiStatDefinition.MB_SHARED_CACHE_TOTAL_BYTES, + DbiStatDefinition.MB_TOTAL_BYTES, + DbiStatDefinition.MB_DATA_BYTES, + DbiStatDefinition.MB_DATA_ADMIN_BYTES, + DbiStatDefinition.MB_DOS_BYTES, + DbiStatDefinition.MB_ADMIN_BYTES, + DbiStatDefinition.MB_LOCK_BYTES, + }; + + private static StatDefinition[] environmentStats = { + DbiStatDefinition.ENV_RELATCHES_REQUIRED, + DbiStatDefinition.ENV_CREATION_TIME, + DbiStatDefinition.ENV_BIN_DELTA_GETS, + DbiStatDefinition.ENV_BIN_DELTA_INSERTS, + DbiStatDefinition.ENV_BIN_DELTA_UPDATES, + DbiStatDefinition.ENV_BIN_DELTA_DELETES, + }; + + private static StatDefinition[] evictorStats = { + EvictorStatDefinition.EVICTOR_EVICTION_RUNS, + EvictorStatDefinition.EVICTOR_NODES_TARGETED, + EvictorStatDefinition.EVICTOR_NODES_EVICTED, + EvictorStatDefinition.EVICTOR_NODES_STRIPPED, + EvictorStatDefinition.EVICTOR_NODES_MUTATED, + EvictorStatDefinition.EVICTOR_NODES_PUT_BACK, + EvictorStatDefinition.EVICTOR_NODES_MOVED_TO_PRI2_LRU, + EvictorStatDefinition.EVICTOR_NODES_SKIPPED, + EvictorStatDefinition.EVICTOR_ROOT_NODES_EVICTED, + EvictorStatDefinition.EVICTOR_DIRTY_NODES_EVICTED, + EvictorStatDefinition.EVICTOR_LNS_EVICTED, + + EvictorStatDefinition.EVICTOR_SHARED_CACHE_ENVS, + + EvictorStatDefinition.LN_FETCH, + EvictorStatDefinition.LN_FETCH_MISS, + EvictorStatDefinition.UPPER_IN_FETCH, + EvictorStatDefinition.UPPER_IN_FETCH_MISS, + EvictorStatDefinition.BIN_FETCH, + EvictorStatDefinition.BIN_FETCH_MISS, + EvictorStatDefinition.BIN_DELTA_FETCH_MISS, + EvictorStatDefinition.BIN_FETCH_MISS_RATIO, + EvictorStatDefinition.FULL_BIN_MISS, + + EvictorStatDefinition.BIN_DELTA_BLIND_OPS, + + EvictorStatDefinition.CACHED_UPPER_INS, + EvictorStatDefinition.CACHED_BINS, + EvictorStatDefinition.CACHED_BIN_DELTAS, + + EvictorStatDefinition.THREAD_UNAVAILABLE, + + EvictorStatDefinition.CACHED_IN_SPARSE_TARGET, + EvictorStatDefinition.CACHED_IN_NO_TARGET, + EvictorStatDefinition.CACHED_IN_COMPACT_KEY, + + EvictorStatDefinition.PRI1_LRU_SIZE, + EvictorStatDefinition.PRI2_LRU_SIZE, + + EvictionSource.CACHEMODE.getNumBytesEvictedStatDef(), + EvictionSource.CRITICAL.getNumBytesEvictedStatDef(), + EvictionSource.DAEMON.getNumBytesEvictedStatDef(), + EvictionSource.EVICTORTHREAD.getNumBytesEvictedStatDef(), + EvictionSource.MANUAL.getNumBytesEvictedStatDef(), + }; + + private static StatDefinition[] offHeapStats = { + OffHeapStatDefinition.ALLOC_FAILURE, + OffHeapStatDefinition.ALLOC_OVERFLOW, + OffHeapStatDefinition.THREAD_UNAVAILABLE, + OffHeapStatDefinition.NODES_TARGETED, + OffHeapStatDefinition.CRITICAL_NODES_TARGETED, + OffHeapStatDefinition.NODES_EVICTED, + OffHeapStatDefinition.DIRTY_NODES_EVICTED, + OffHeapStatDefinition.NODES_STRIPPED, + OffHeapStatDefinition.NODES_MUTATED, + OffHeapStatDefinition.NODES_SKIPPED, + OffHeapStatDefinition.LNS_EVICTED, + OffHeapStatDefinition.LNS_LOADED, + OffHeapStatDefinition.LNS_STORED, + OffHeapStatDefinition.BINS_LOADED, + OffHeapStatDefinition.BINS_STORED, + OffHeapStatDefinition.CACHED_LNS, + OffHeapStatDefinition.CACHED_BINS, + OffHeapStatDefinition.CACHED_BIN_DELTAS, + OffHeapStatDefinition.TOTAL_BYTES, + OffHeapStatDefinition.TOTAL_BLOCKS, + OffHeapStatDefinition.LRU_SIZE, + }; + + private static StatDefinition[] inCompStats = { + INCompStatDefinition.INCOMP_SPLIT_BINS, + INCompStatDefinition.INCOMP_DBCLOSED_BINS, + INCompStatDefinition.INCOMP_CURSORS_BINS, + INCompStatDefinition.INCOMP_NON_EMPTY_BINS, + INCompStatDefinition.INCOMP_PROCESSED_BINS, + INCompStatDefinition.INCOMP_QUEUE_SIZE + }; + + private static StatDefinition[] latchStats = { + LatchStatDefinition.LATCH_NO_WAITERS, + LatchStatDefinition.LATCH_SELF_OWNED, + LatchStatDefinition.LATCH_CONTENTION, + LatchStatDefinition.LATCH_NOWAIT_SUCCESS, + LatchStatDefinition.LATCH_NOWAIT_UNSUCCESS, + LatchStatDefinition.LATCH_RELEASES + }; + + private static StatDefinition[] logStats = { + LogStatDefinition.FILEMGR_RANDOM_READS, + LogStatDefinition.FILEMGR_RANDOM_WRITES, + LogStatDefinition.FILEMGR_SEQUENTIAL_READS, + LogStatDefinition.FILEMGR_SEQUENTIAL_WRITES, + LogStatDefinition.FILEMGR_RANDOM_READ_BYTES, + LogStatDefinition.FILEMGR_RANDOM_WRITE_BYTES, + LogStatDefinition.FILEMGR_SEQUENTIAL_READ_BYTES, + LogStatDefinition.FILEMGR_SEQUENTIAL_WRITE_BYTES, + LogStatDefinition.FILEMGR_FILE_OPENS, + LogStatDefinition.FILEMGR_OPEN_FILES, + LogStatDefinition.FILEMGR_BYTES_READ_FROM_WRITEQUEUE, + LogStatDefinition.FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE, + LogStatDefinition.FILEMGR_READS_FROM_WRITEQUEUE, + LogStatDefinition.FILEMGR_WRITES_FROM_WRITEQUEUE, + LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW, + LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES, + LogStatDefinition.FSYNCMGR_FSYNCS, + LogStatDefinition.FSYNCMGR_FSYNC_REQUESTS, + LogStatDefinition.FSYNCMGR_TIMEOUTS, + LogStatDefinition.FILEMGR_LOG_FSYNCS, + LogStatDefinition.GRPCMGR_FSYNC_TIME, + LogStatDefinition.GRPCMGR_FSYNC_MAX_TIME, + LogStatDefinition.GRPCMGR_N_GROUP_COMMIT_REQUESTS, + LogStatDefinition.GRPCMGR_N_GROUP_COMMIT_WAITS, + LogStatDefinition.GRPCMGR_N_LOG_INTERVAL_EXCEEDED, + LogStatDefinition.GRPCMGR_N_LOG_MAX_GROUP_COMMIT, + LogStatDefinition.LOGMGR_REPEAT_FAULT_READS, + LogStatDefinition.LOGMGR_TEMP_BUFFER_WRITES, + LogStatDefinition.LOGMGR_END_OF_LOG, + LogStatDefinition.LBFP_NO_FREE_BUFFER, + LogStatDefinition.LBFP_NOT_RESIDENT, + LogStatDefinition.LBFP_MISS, + LogStatDefinition.LBFP_LOG_BUFFERS, + LogStatDefinition.LBFP_BUFFER_BYTES + }; + + private static StatDefinition[] checkpointStats = { + CheckpointStatDefinition.CKPT_CHECKPOINTS, + CheckpointStatDefinition.CKPT_LAST_CKPTID, + CheckpointStatDefinition.CKPT_FULL_IN_FLUSH, + CheckpointStatDefinition.CKPT_FULL_BIN_FLUSH, + CheckpointStatDefinition.CKPT_DELTA_IN_FLUSH, + CheckpointStatDefinition.CKPT_LAST_CKPT_INTERVAL, + CheckpointStatDefinition.CKPT_LAST_CKPT_START, + CheckpointStatDefinition.CKPT_LAST_CKPT_END + }; + + private static StatDefinition[] throughputStats = { + + DbiStatDefinition.THROUGHPUT_PRI_SEARCH, + DbiStatDefinition.THROUGHPUT_PRI_SEARCH_FAIL, + DbiStatDefinition.THROUGHPUT_SEC_SEARCH, + DbiStatDefinition.THROUGHPUT_SEC_SEARCH_FAIL, + DbiStatDefinition.THROUGHPUT_PRI_POSITION, + DbiStatDefinition.THROUGHPUT_SEC_POSITION, + DbiStatDefinition.THROUGHPUT_PRI_INSERT, + DbiStatDefinition.THROUGHPUT_PRI_INSERT_FAIL, + DbiStatDefinition.THROUGHPUT_SEC_INSERT, + DbiStatDefinition.THROUGHPUT_PRI_UPDATE, + DbiStatDefinition.THROUGHPUT_SEC_UPDATE, + DbiStatDefinition.THROUGHPUT_PRI_DELETE, + DbiStatDefinition.THROUGHPUT_PRI_DELETE_FAIL, + DbiStatDefinition.THROUGHPUT_SEC_DELETE, + }; + + private static StatDefinition[] lockStats = { + LockStatDefinition.LOCK_REQUESTS, + LockStatDefinition.LOCK_WAITS, + }; + + /* + * Define min/max stats using the group name returned by loadStats not + * necessarily what is defined in the underlying statistic. Some groups are + * combined into a super group. + */ + public static StatManager.SDef[] minStats = {}; + + public static StatManager.SDef[] maxStats = { + new StatManager.SDef(LogStatDefinition.GROUP_NAME, + LogStatDefinition.GRPCMGR_FSYNC_MAX_TIME) + }; + + public StatCaptureDefinitions() { + nameToDef = new HashMap(); + String groupname = EvictorStatDefinition.GROUP_NAME; + for (StatDefinition stat : evictorStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + for (StatDefinition stat : dbiStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = OffHeapStatDefinition.GROUP_NAME; + for (StatDefinition stat : offHeapStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = CheckpointStatDefinition.GROUP_NAME; + for (StatDefinition stat : checkpointStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = CleanerStatDefinition.GROUP_NAME; + for (StatDefinition stat : cleanerStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = LogStatDefinition.GROUP_NAME; + for (StatDefinition stat : logStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = LockStatDefinition.GROUP_NAME; + for (StatDefinition stat : lockStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + for (StatDefinition stat : latchStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = DbiStatDefinition.ENV_GROUP_NAME; + for (StatDefinition stat : environmentStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = INCompStatDefinition.GROUP_NAME; + for (StatDefinition stat : inCompStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + groupname = DbiStatDefinition.THROUGHPUT_GROUP_NAME; + for (StatDefinition stat : throughputStats) { + nameToDef.put(groupname + ":" + stat.getName(), stat); + } + } + + public SortedSet getStatisticProjections() { + SortedSet retval = new TreeSet(); + getProjectionsInternal(retval); + return retval; + } + + protected void getProjectionsInternal(SortedSet pmap) { + String groupname = EvictorStatDefinition.GROUP_NAME; + for (StatDefinition stat : evictorStats) { + pmap.add(groupname + ":" + stat.getName()); + } + for (StatDefinition stat : dbiStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = OffHeapStatDefinition.GROUP_NAME; + for (StatDefinition stat : offHeapStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = CheckpointStatDefinition.GROUP_NAME; + for (StatDefinition stat : checkpointStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = CleanerStatDefinition.GROUP_NAME; + for (StatDefinition stat : cleanerStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = LogStatDefinition.GROUP_NAME; + for (StatDefinition stat : logStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = LockStatDefinition.GROUP_NAME; + for (StatDefinition stat : lockStats) { + pmap.add(groupname + ":" + stat.getName()); + } + for (StatDefinition stat : latchStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = DbiStatDefinition.ENV_GROUP_NAME; + for (StatDefinition stat : environmentStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = INCompStatDefinition.GROUP_NAME; + for (StatDefinition stat : inCompStats) { + pmap.add(groupname + ":" + stat.getName()); + } + groupname = DbiStatDefinition.THROUGHPUT_GROUP_NAME; + for (StatDefinition stat : throughputStats) { + pmap.add(groupname + ":" + stat.getName()); + } + } + + /** + * Used to get a statistics definition. This method is used + * for testing purposes only. + * @param colname in format groupname:statname. + * @return statistics definition or null of not defined. + */ + public StatDefinition getDefinition(String colname) { + return nameToDef.get(colname); + } +} diff --git a/src/com/sleepycat/je/statcap/StatManager.java b/src/com/sleepycat/je/statcap/StatManager.java new file mode 100644 index 0000000..2b80cee --- /dev/null +++ b/src/com/sleepycat/je/statcap/StatManager.java @@ -0,0 +1,258 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LongMaxStat; +import com.sleepycat.je.utilint.LongMinStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatGroup; + +/** + * The StatManager provides functionality to acquire incremental statistics. + * A client registers itself and is returned a key. The key is used in + * subsequent calls to acquire statistics. The key is associated with a base + * set of statistic values. The base set is used to compute incremental + * statistics. Incremental statistics are computed interval by subtracting + * the base from the current set of values. The base values for the + * registered contexts are updated when statistics are cleared. + * + * For instance if you have a counter named X. The initial value is zero. + * Suppose there are two statistic contexts registered S1 + * (say for statcapture)and S2 (for the public api loadStats). The counter + * gets incremented to 10. S1 loads stats with clear=true. The statistic base + * for the other stat contexts, S2 is updated. The value in the base for X + * is set to (current value in base - current stat value) or (0 - 10). The + * value returned for stat X with respect to context S1 + * (the caller of loadStat) is (current value of X - base value) or 10-0. + * The value of X is cleared since getClear() =true. Later the value of X is + * incremented (value is now 1). Statistics are loaded for stat context S2. + * The value returned is current value - base value, or 1 - (-10) or 11. + */ +public class StatManager { + + /* Registered statistics base contexts */ + protected final Map statContextMap = + new HashMap(); + + private final UpdateMinMax updateMinMaxStat = + new UpdateMinMax(StatCaptureDefinitions.minStats, + StatCaptureDefinitions.maxStats); + + protected final EnvironmentImpl env; + + public StatManager(EnvironmentImpl env) { + this.env = env; + } + + public synchronized Integer registerStatContext() { + StatContext sctx = new StatContext(null); + int max = 0; + for (Integer key : statContextMap.keySet()) { + if (key > max) { + max = key; + } + } + Integer newkey = max + 1; + statContextMap.put(newkey, sctx); + return newkey; + } + + public synchronized EnvironmentStats loadStats(StatsConfig config, + Integer contextKey) { + StatContext sc = statContextMap.get(contextKey); + if (sc == null) { + throw EnvironmentFailureException.unexpectedState( + "Internal error stat context is not registered"); + } + /* load current statistics */ + EnvironmentStats curstats = env.loadStatsInternal(config); + Map cur = curstats.getStatGroupsMap(); + + /* compute statistics by using the base values from the context */ + Map base = sc.getBase(); + EnvironmentStats intervalStats; + if (base != null) { + intervalStats = computeIntervalStats(cur, base); + } else { + intervalStats = curstats; + } + + if (config.getClear()) { + + /* The underlying statistics were cleared so the base values + * for the registered contexts are updated to reflect the + * current statistic values. + */ + for (StatContext context : statContextMap.values()) { + if (context.getBase() != null) { + updateMinMaxStat.updateBase(context.getBase(), cur); + } + } + + for (StatContext context : statContextMap.values()) { + if (context == sc) { + context.setBase(null); + } else { + if (context.getBase() == null) { + context.setBase(cloneAndNegate(cur)); + } else { + // reset base + context.setBase( + computeIntervalStats( + context.getBase(), cur).getStatGroupsMap()); + } + } + } + } + return intervalStats; + } + + private EnvironmentStats computeIntervalStats( + Map current, + Map base) { + + EnvironmentStats envStats = new EnvironmentStats(); + + for (StatGroup cg : current.values()) { + StatGroup bg = base.get(cg.getName()); + envStats.setStatGroup(cg.computeInterval(bg)); + } + return envStats; + } + + protected Map cloneAndNegate(Map in) { + HashMap retval = new HashMap(); + for (Entrye : in.entrySet()) { + StatGroup negatedGroup = e.getValue().cloneGroup(false); + negatedGroup.negate(); + retval.put(e.getKey(), negatedGroup); + } + return retval; + } + + protected class StatContext { + private Map base; + private Map repbase = null; + + StatContext(Map base) { + this.base = base; + } + + void setBase(Map base) { + this.base = base; + } + + Map getBase() { + return base; + } + + public void setRepBase(Map base) { + this.repbase = base; + } + + public Map getRepBase() { + return repbase; + } + } + + public static class SDef { + private final String groupName; + private final StatDefinition definition; + + public SDef(String groupname, StatDefinition sd) { + definition = sd; + groupName = groupname; + } + + public String getGroupName() { + return groupName; + } + + public StatDefinition getDefinition() { + return definition; + } + } + + public class UpdateMinMax { + private final ArrayList minStats = new ArrayList(); + private final ArrayList maxStats = new ArrayList(); + + public UpdateMinMax(SDef[] minStatistics, SDef[] maxStatistics) { + for (SDef min : minStatistics) { + minStats.add(min); + } + + for (SDef max : maxStatistics) { + maxStats.add(max); + } + } + + public void updateBase(Map base, + Map other) { + for (SDef sd : minStats) { + StatGroup group = other.get(sd.groupName); + if (group == null) { + continue; + } + LongStat otherValue = + group.getLongStat(sd.definition); + if (otherValue == null) { + continue; + } + + LongMinStat baseStat = + base.get(sd.groupName).getLongMinStat(sd.definition); + + /* Check is stat is not yet in the base */ + if (baseStat == null) { + StatGroup sg = base.get(sd.groupName); + baseStat = (LongMinStat)otherValue.copyAndAdd(sg); + } + + baseStat.setMin(otherValue.get()); + } + for (SDef sd : maxStats) { + StatGroup group = other.get(sd.groupName); + if (group == null) { + continue; + } + LongStat otherValue = + group.getLongStat(sd.definition); + if (otherValue == null) { + continue; + } + LongMaxStat baseStat = + base.get(sd.groupName).getLongMaxStat(sd.definition); + + /* Check is stat is not yet in the base */ + if (baseStat == null) { + StatGroup sg = base.get(sd.groupName); + baseStat = (LongMaxStat)otherValue.copyAndAdd(sg); + } + + baseStat.setMax(otherValue.get()); + } + } + } +} diff --git a/src/com/sleepycat/je/statcap/StatUtils.java b/src/com/sleepycat/je/statcap/StatUtils.java new file mode 100644 index 0000000..2556e03 --- /dev/null +++ b/src/com/sleepycat/je/statcap/StatUtils.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import java.text.DateFormat; +import java.util.Date; + +import com.sleepycat.je.utilint.TracerFormatter; + +class StatUtils { + private static final DateFormat formatter = + TracerFormatter.makeDateFormat(); + private static final Date date = new Date(); + /** Returns a string representation of the specified time. */ + public static synchronized String getDate(final long millis) { + /* The date and formatter are not thread safe */ + date.setTime(millis); + return formatter.format(date); + } +} \ No newline at end of file diff --git a/src/com/sleepycat/je/statcap/package-info.java b/src/com/sleepycat/je/statcap/package-info.java new file mode 100644 index 0000000..08809ec --- /dev/null +++ b/src/com/sleepycat/je/statcap/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Statistics capture and output to je.stat.csv file. + */ +package com.sleepycat.je.statcap; diff --git a/src/com/sleepycat/je/tree/BIN.java b/src/com/sleepycat/je/tree/BIN.java new file mode 100644 index 0000000..ba9eeb4 --- /dev/null +++ b/src/com/sleepycat/je/tree/BIN.java @@ -0,0 +1,2556 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.Set; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.cleaner.LocalUtilizationTracker; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.utilint.DatabaseUtil; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.SizeofMarker; +import com.sleepycat.je.utilint.TinyHashSet; +import com.sleepycat.je.utilint.VLSN; + +/** + * A BIN represents a Bottom Internal Node in the JE tree. + * + * BIN-deltas + * ========== + * A BIN-delta is a BIN with the non-dirty slots omitted. A "full BIN", OTOH + * contains all slots. On disk and in memory, the format of a BIN-delta is the + * same as that of a BIN. In memory, a BIN object is actually a BIN-delta when + * the BIN-delta flag is set (IN.isBINDelta). On disk, the NewBINDelta log + * entry type (class BINDeltaLogEntry) is the only thing that distinguishes it + * from a full BIN, which has the BIN log entry type. + * + * BIN-deltas provides two benefits: Reduced writing and reduced memory usage. + * + * Reduced Writing + * --------------- + * Logging a BIN-delta rather a full BIN reduces writing significantly. The + * cost, however, is that two reads are necessary to reconstruct a full BIN + * from scratch. The reduced writing is worth this cost, particularly because + * less writing means less log cleaning. + * + * A BIN-delta is logged when 25% or less (configured with EnvironmentConfig + * TREE_BIN_DELTA) of the slots in a BIN are dirty. When a BIN-delta is logged, + * the dirty flag is cleared on the the BIN in cache. If more slots are + * dirtied and another BIN-delta is logged, it will contain all entries dirtied + * since the last full BIN was logged. In other words, BIN-deltas are + * cumulative and not chained, to avoid reading many (more than two) log + * entries to reconstruct a full BIN. The dirty flag on each slot is cleared + * only when a full BIN is logged. + * + * In addition to the cost of fetching two entries on a BIN cache miss, another + * drawback of the current approach is that dirtiness propagates upward in the + * Btree due to BIN-delta logging, causing repeated logging of upper INs. The + * slot of the parent IN contains the LSN of the most recent BIN-delta or full + * BIN that was logged. A BINDeltaLogEntry in turn contains the LSN of the + * last full BIN logged. + * + * Historical note: The pre-JE 5 implementation of OldBINDeltas worked + * differently and had a different cost/benefit trade-off. When an + * OldBINDelta was logged, its dirty flag was not cleared, causing it to be + * logged repeatedly at every checkpoint. A full BIN was logged after 10 + * deltas, to prevent endless logging of the same BIN. One benefit of this + * approach is that the BIN's parent IN was not dirtied when logging the + * OldBINDelta, preventing dirtiness from propagating upward. Another + * benefit is that the OldBINDelta was only processed by recovery, and did + * not have to be fetched to reconstruct a full BIN from scratch on a cache + * miss. But the cost (the logging of an OldBINDelta every checkpoint, even + * when it hadn't changed since the last time logged) outweighed the + * benefits. When the current approach was implemented in JE 5, performance + * improved due to less logging. + * + * In JE 6, deltas were also maintained in the Btree cache. This was done to + * provide the reduced memory benefits described in the next section. The + * log format for a delta was also changed. The OldBINDelta log format is + * different (not the same as the BIN format) and is supported for backward + * compatibility as the OldBINDeltaLogEntry. Its log entry type name is + * still BINDelta, which is why the new type is named NewBINDelta (for + * backward compatibility, log entry type names cannot be changed.) This is + * also why the spelling "BIN-delta" is used to refer to deltas in the new + * approach. The old BINDelta class was renamed to OldBINDelta and there is + * no longer a class named BINDelta. + * + * Reduced Memory Usage + * -------------------- + * In the Btree cache, a BIN may be represented as a full BIN or a BIN-delta. + * Eviction will mutate a full BIN to a BIN-delta in preference to discarding + * the entire BIN. A BIN-delta in cache occupies less memory than a full BIN, + * and can be exploited as follows: + * + * - When a full BIN is needed, it can be constructed with only one fetch + * rather than two, reducing IO overall. IN.fetchIN implements this + * optimization. + * + * - Certain operations can sometimes be performed using the BIN-delta alone, + * allowing such operations on a given data set to take place using less + * less IO (for a given cache size). + * + * The latter benefit is not yet implemented. No user CRUD operations are + * currently implemented using BIN-deltas. In the future we plan to implement + * the following operations using the BIN-delta alone. + * + * - Consider recording deletions in a BIN-delta. Currently, slot deletion + * prohibits a BIN-delta from being logged. To record deletion in + * BIN-deltas, slot deletion will have to be deferred until a full BIN is + * logged. + * + * - User reads by key, updates and deletions can be implemented if the key + * happens to appear in the BIN-delta. + * + * - The Cleaner can migrate an LN if its key happens to appear in the + * BIN-delta. This is similar to a user update operation, but in a + * different code path. + * + * - Insertions, deletions and updates can always be performed in a BIN-delta + * during replica replay, since the Master operation has already determined + * whether the key exists. + * + * - Recovery LN redo could also apply insertions, updates and inserts in the + * manner described. + * + * - Add idempotent put/delete operations, which can always be applied in a + * BIN-delta. + * + * - Store a hash of the keys in the full BIN in the BIN-delta and use it to + * perform the following in the delta: + * - putIfAbsent (true insertion) + * - get/delete/putIfPresent operations that return NOTFOUND + * - to avoid accumulating unnecessary deletions + * + * However, some internal operations do currently exploit BIN-deltas to avoid + * unnecessary IO. The following are currently implemented. + * + * - The Evictor and Checkpointer log a BIN-delta that is present in the + * cache, without having to fetch the full BIN. + * + * - The Cleaner can use the BIN-delta to avoid fetching when processing a BIN + * log entry (delta or full) and the BIN is not present in cache, + * + * To support BIB-delta-aware operations, the IN.fetchIN() and IN.getTarget() + * methods may return a BIN delta. IN.getTarget() will return whatever object + * is cached under the parent IN, and IN.fetchIN() will do a single I/O to + * fetch the most recently log record for the requested BIN, which may be a + * full BIN or a delta. Callers of these methods must be prepared to handle + * a BIN delta; either doing their operation directly on the delta, if + * possible, or mutating the delta to a full BIN by calling + * BIN.mutateToFullBIN(). + */ +public class BIN extends IN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + /** + * Used as the "empty rep" for the INLongRep lastLoggedSizes field. + * + * minLength is 1 because log sizes are unpredictable. + * + * allowSparseRep is false because all slots have log sizes and less + * mutation is better. + */ + private static final INLongRep.EmptyRep EMPTY_LAST_LOGGED_SIZES = + new INLongRep.EmptyRep(1, false); + + /** + * Used as the "empty rep" for the INLongRep vlsnCache field. + * + * minLength is 5 because VLSNS grow that large fairly quickly, and less + * mutation is better. The value 5 accomodates data set sizes up to 100 + * billion. If we want to improve memory utilization for smaller data sets + * or reduce mutation for larger data sets, we could dynamically determine + * a value based on the last assigned VLSN. + * + * allowSparseRep is false because either all slots typically have VLSNs, + * or none do, and less mutation is better. + */ + private static final INLongRep.EmptyRep EMPTY_VLSNS = + new INLongRep.EmptyRep(5, false); + + /** + * Used as the "empty rep" for the INLongRep offHeapLNIds field. + * + * minLength is 8 because memory IDs are 64-bit pointers. + * + * allowSparseRep is true because some workloads will only load LN IDs for + * a subset of the LNs in the BIN. + */ + private static final INLongRep.EmptyRep EMPTY_OFFHEAP_LN_IDS = + new INLongRep.EmptyRep(8, true); + + /** + * Used as the "empty rep" for the INLongRep expirationValues field. + * + * minLength is 1 because we expect most expiration values, which are an + * offset from a base day/hour, to fit in one byte. + * + * allowSparseRep is true because some workloads only set TTLs on some of + * the LNs in a BIN. + */ + private static final INLongRep.EmptyRep EMPTY_EXPIRATION = + new INLongRep.EmptyRep(1, true); + + /* + * The set of cursors that are currently referring to this BIN. + * This field is set to null when there are no cursors on this BIN. + */ + private TinyHashSet cursorSet; + + /* + * Support for logging BIN deltas. (Partial BIN logging) + */ + + /* + * If this is a delta, fullBinNEntries stores the number of entries + * in the full version of the BIN. This is a persistent field for + * BIN-delta logrecs only, and for log versions >= 10. + */ + private int fullBinNEntries = -1; + + /* + * If this is a delta, fullBinMaxEntries stores the max number of + * entries (capacity) in the full version of the BIN. This is a + * persistent field for BIN-delta logrecs only, and for log versions >= 10. + */ + private int fullBinMaxEntries = -1; + + /* + * If "this" is a BIN-delta, bloomFilter is a bloom-filter representation + * of the set of keys in the clean slots of the full version of the same + * BIN. It is used to allow blind put operations in deltas, by answering + * the question whether the put key is in the full BIN or not. See the + * javadoc of the TREE_BIN_DELTA_BLIND_PUTS config param for more info. + * This is a persistent field for BIN-delta logrecs only, and for log + * versions >= 10. + */ + byte[] bloomFilter; + + /* + * See comment in IN.java, right after the lastFullVersion data field. + */ + private long lastDeltaVersion = DbLsn.NULL_LSN; + + /* + * Caches the VLSN sequence for the LN entries in a BIN, when VLSN + * preservation and caching are configured. + * + * A VLSN is added to the cache when an LN is evicted from a BIN. When the + * LN is resident, there is no need for caching because the LN contains the + * VLSN. See BIN.setTarget. This strategy works because an LN is always + * cached during a read or write operation, and only evicted after that, + * based on eviction policies. + * + * For embedded LNs a VLSN is added to the cache every time the record is + * logged. Furthermore, the vlsn cache is made persistent for such LNs. + * + * An EMPTY_REP is used initially until the need arises to add a non-zero + * value. The cache will remain empty if LNs are never evicted or version + * caching is not configured, which is always the case for standalone JE. + */ + private INLongRep vlsnCache = EMPTY_VLSNS; + + /* + * Stores the size of the most recently written logrec of each LN, or zero + * if the size is unknown. + * + * We use INLongRep in spite of the fact that sizes are int not long; + * INLongRep will store the minimum number of bytes. An EMPTY_REP is + * used initially until the need arises to add a non-zero value. + */ + private INLongRep lastLoggedSizes = EMPTY_LAST_LOGGED_SIZES; + + /** + * When some LNs are in the off-heap cache, the offHeapLruId is this BIN's + * index in the off-heap LRU list. + */ + private INLongRep offHeapLNIds = EMPTY_OFFHEAP_LN_IDS; + private int offHeapLruId = -1; + + /** + * An expirationValues slot value is one more than the number of days/hours + * to add to the expirationBase to get the true expiration days/hours. A + * slot value of zero means no expiration, and a non-zero slot value is one + * greater than the actual offset to be added. The base is the smallest + * non-zero offset that has been encountered. + */ + private INLongRep expirationValues = EMPTY_EXPIRATION; + private int expirationBase = -1; + + /** + * Can be set to true by tests to prevent last logged sizes from being + * stored. + */ + public static boolean TEST_NO_LAST_LOGGED_SIZES = false; + + public BIN() { + } + + public BIN( + DatabaseImpl db, + byte[] identifierKey, + int capacity, + int level) { + + super(db, identifierKey, capacity, level); + } + + /** + * For Sizeof. + */ + public BIN(@SuppressWarnings("unused") SizeofMarker marker) { + super(marker); + } + + /** + * Create a new BIN. Need this because we can't call newInstance() + * without getting a 0 for nodeId. + */ + @Override + protected IN createNewInstance( + byte[] identifierKey, + int maxEntries, + int level) { + + return new BIN(getDatabase(), identifierKey, maxEntries, level); + } + + public BINReference createReference() { + return new BINReference( + getNodeId(), getDatabase().getId(), getIdentifierKey()); + } + + @Override + public boolean isBIN() { + return true; + } + + /* + * Return whether the shared latch for this kind of node should be of the + * "always exclusive" variety. Presently, only IN's are actually latched + * shared. BINs are latched exclusive only. + */ + @Override + boolean isAlwaysLatchedExclusively() { + return true; + } + + @Override + public String shortClassName() { + return "BIN"; + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + boolean isVLSNCachingEnabled() { + return (!databaseImpl.getSortedDuplicates() && getEnv().getCacheVLSN()); + } + + public void setCachedVLSN(int idx, long vlsn) { + + /* + * We do not cache the VLSN for dup DBs, because dup DBs are typically + * used only for indexes, and the overhead of VLSN maintenance would be + * wasted. Plus, although technically VLSN preservation might apply to + * dup DBs, the VLSNs are not reliably available since the LNs are + * immediately obsolete. + */ + if (!isVLSNCachingEnabled()) { + return; + } + setCachedVLSNUnconditional(idx, vlsn); + } + + void setCachedVLSNUnconditional(int idx, long vlsn) { + vlsnCache = vlsnCache.set( + idx, + (vlsn == VLSN.NULL_VLSN_SEQUENCE ? 0 : vlsn), + this); + } + + long getCachedVLSN(int idx) { + final long vlsn = vlsnCache.get(idx); + return (vlsn == 0 ? VLSN.NULL_VLSN_SEQUENCE : vlsn); + } + + /** + * Returns the VLSN. VLSN.NULL_VLSN_SEQUENCE (-1) is returned in two + * cases: + * 1) This is a standalone environment. + * 2) The VLSN is not cached (perhaps VLSN caching is not configured), and + * the allowFetch param is false. + * + * WARNING: Because the vlsnCache is only updated when an LN is evicted, it + * is critical that getVLSN returns the VLSN for a resident LN before + * getting the VLSN from the cache. + */ + public long getVLSN(int idx, boolean allowFetch, CacheMode cacheMode) { + + /* Must return the VLSN from the LN, if it is resident. */ + LN ln = (LN) getTarget(idx); + if (ln != null) { + return ln.getVLSNSequence(); + } + + /* Next try the vlsnCache. */ + long vlsn = getCachedVLSN(idx); + if (!VLSN.isNull(vlsn)) { + return vlsn; + } + + /* Next try the off-heap cache. */ + final OffHeapCache ohCache = getOffHeapCache(); + if (ohCache.isEnabled()) { + + vlsn = ohCache.loadVLSN(this, idx); + + if (!VLSN.isNull(vlsn)) { + return vlsn; + } + } + + /* As the last resort, fetch the LN if fetching is allowed. */ + if (!allowFetch || isEmbeddedLN(idx)) { + return vlsn; + } + + ln = fetchLN(idx, cacheMode); + if (ln != null) { + return ln.getVLSNSequence(); + } + + return VLSN.NULL_VLSN_SEQUENCE; + } + + /** For unit testing. */ + public INLongRep getVLSNCache() { + return vlsnCache; + } + + /** + * The last logged size is never needed when the LN is counted obsolete + * immediately, since it is only needed for counting an LN obsolete + * during an update or deletion. + * + * This method may not be called until after the database is initialized, + * i,e., it may not be called during readFromLog. + */ + @Override + boolean isLastLoggedSizeStored(int idx) { + + return mayHaveLastLoggedSizeStored() && !isEmbeddedLN(idx); + } + + @Override + boolean mayHaveLastLoggedSizeStored() { + + /* Check final static first so all test code is optimized away. */ + if (DatabaseUtil.TEST) { + /* Don't skew test measurements with internal DBs. */ + if (TEST_NO_LAST_LOGGED_SIZES && + !databaseImpl.getDbType().isInternal()) { + return false; + } + } + + return !databaseImpl.isLNImmediatelyObsolete(); + } + + /** + * Sets last logged size if necessary. + * + * This method does not dirty the IN because the caller methods dirty it, + * for example, when setting the LSN, key, or node. + * + * This method is sometimes called to add the logged size for a pre log + * version 9 BIN, for example, during fetchTarget and preload. This makes + * the logged size available for obsolete counting but does not dirty the + * IN, since that could cause an unexpected write of the IN being read. + * + * @param lastLoggedSize is positive if the size is known, zero if the size + * is unknown, or -1 if the size should not be changed because logging of + * the LN was deferred. + */ + @Override + public void setLastLoggedSize(int idx, int lastLoggedSize) { + + if ((lastLoggedSize < 0) || !isLastLoggedSizeStored(idx)) { + return; + } + + setLastLoggedSizeUnconditional(idx, lastLoggedSize); + } + + @Override + public void clearLastLoggedSize(int idx) { + + setLastLoggedSizeUnconditional(idx, 0); + } + + /** + * Sets the size without checking whether it is necessary. + * + * This method is used when reading from the log because the databaseImpl + * is not yet initialized and isLastLoggedSizeStored cannot be called. + * It is also called for efficiency reasons when it is known that storing + * the logged size is necessary, for example, when copying values between + * slots. + */ + @Override + void setLastLoggedSizeUnconditional(int idx, int lastLoggedSize) { + + lastLoggedSizes = lastLoggedSizes.set(idx, lastLoggedSize, this); + } + + /** + * @return a positive value if the size is known, or zero if unknown. + */ + @Override + public int getLastLoggedSize(int idx) { + + if (isLastLoggedSizeStored(idx)) { + return (int) lastLoggedSizes.get(idx); + } + + return 0; + } + + /** + * Sets the expiration time for a slot in days or hours. + */ + public void setExpiration(final int idx, int value, final boolean hours) { + + /* This slot has no expiration. */ + if (value == 0) { + expirationValues = expirationValues.set(idx, 0, this); + return; + } + + /* + * If this is the first slot with an expiration, initialize the base to + * the value and set the offset (slot value) to one. + */ + if (expirationBase == -1 || nEntries == 1) { + expirationBase = value; + setExpirationOffset(idx, 1); + setExpirationInHours(hours); + return; + } + + if (hours) { + /* Convert existing values to hours if necessary. */ + if (!isExpirationInHours()) { + + expirationBase *= 24; + setExpirationInHours(true); + + for (int i = 0; i < nEntries; i += 1) { + + if (i == idx) { + continue; + } + + final int offset = (int) expirationValues.get(i); + + if (offset == 0) { + continue; + } + + setExpirationOffset(i, ((offset - 1) * 24) + 1); + } + } + } else { + /* If values are stored in hours, convert days to hours. */ + if (isExpirationInHours()) { + value *= 24; + } + } + + /* + * Slot's expiration must not be less than the base. If it is, decrease + * the base and increase the offset in other slots accordingly. + */ + if (value < expirationBase) { + + final int adjustment = expirationBase - value; + expirationBase = value; + + for (int i = 0; i < nEntries; i += 1) { + + if (i == idx) { + continue; + } + + final int offset = (int) expirationValues.get(i); + + if (offset == 0) { + continue; + } + + setExpirationOffset(i, offset + adjustment); + } + } + + setExpirationOffset(idx, value - expirationBase + 1); + } + + public boolean hasExpirationValues() { + + return !expirationValues.isEmpty(); + } + + /** + * Returns the expiration time for a slot. The return value is in days or + * hours, depending on isExpirationTimeInHours. + */ + public int getExpiration(int idx) { + + final int offset = (int) expirationValues.get(idx); + + if (offset == 0) { + return 0; + } + + return offset - 1 + expirationBase; + } + + int getExpirationBase() { + return expirationBase; + } + + int getExpirationOffset(int idx) { + return (int) expirationValues.get(idx); + } + + void setExpirationBase(int base) { + expirationBase = base; + } + + void setExpirationOffset(int idx, int offset) { + expirationValues = expirationValues.set(idx, offset, this); + } + + /** + * Returns whether the slot is known-deleted, pending-deleted, or expired. + */ + public boolean isDefunct(int idx) { + return isDeleted(idx) || isExpired(idx); + } + + /** + * Returns whether the slot is known-deleted or pending-deleted. + */ + public boolean isDeleted(int idx) { + return isEntryKnownDeleted(idx) || isEntryPendingDeleted(idx); + } + + /** + * Returns whether the slot is expired. + */ + public boolean isExpired(int idx) { + return getEnv().isExpired(getExpiration(idx), isExpirationInHours()); + } + + public boolean isProbablyExpired(int idx) { + + return getEnv().expiresWithin( + getExpiration(idx), isExpirationInHours(), + getEnv().getTtlClockTolerance()); + } + + public int getLastLoggedSizeUnconditional(int idx) { + return (int) lastLoggedSizes.get(idx); + } + + public void setOffHeapLNId(int idx, long memId) { + + if (offHeapLNIds.get(idx) == memId) { + return; + } + + offHeapLNIds = offHeapLNIds.set(idx, memId, this); + } + + public void clearOffHeapLNIds() { + offHeapLNIds = offHeapLNIds.clear(this, EMPTY_OFFHEAP_LN_IDS); + } + + public long getOffHeapLNIdsMemorySize() { + return offHeapLNIds.getMemorySize(); + } + + public long getOffHeapLNId(int idx) { + + return offHeapLNIds.get(idx); + } + + public boolean hasOffHeapLNs() { + + return !offHeapLNIds.isEmpty(); + } + + public void setOffHeapLruId(int id) { + + assert id >= 0 || !hasOffHeapLNs(); + + offHeapLruId = id; + } + + public int getOffHeapLruId() { + return offHeapLruId; + } + + void freeOffHeapLN(int idx) { + getOffHeapCache().freeLN(this, idx); + } + + /** + * Updates the vlsnCache when an LN target is evicted. See vlsnCache. + */ + @Override + void setTarget(int idx, Node target) { + + if (target == null) { + final Node oldTarget = getTarget(idx); + if (oldTarget instanceof LN) { + setCachedVLSN(idx, ((LN) oldTarget).getVLSNSequence()); + } + } + super.setTarget(idx, target); + } + + /** + * Overridden to account for BIN-specific slot info. + */ + @Override + void appendEntryFromOtherNode(IN from, int fromIdx) { + + super.appendEntryFromOtherNode(from, fromIdx); + + final BIN fromBin = (BIN) from; + final int idx = nEntries - 1; + + setCachedVLSNUnconditional(idx, fromBin.getCachedVLSN(fromIdx)); + setLastLoggedSizeUnconditional(idx, from.getLastLoggedSize(fromIdx)); + + setExpiration( + idx, fromBin.getExpiration(fromIdx), + fromBin.isExpirationInHours()); + + final OffHeapCache ohCache = getOffHeapCache(); + + if (ohCache.isEnabled()) { + + offHeapLNIds = offHeapLNIds.set( + idx, fromBin.offHeapLNIds.get(fromIdx), this); + + ohCache.ensureOffHeapLNsInLRU(this); + } + } + + /** + * Overridden to account for BIN-specific slot info. + */ + @Override + void copyEntries(int from, int to, int n) { + super.copyEntries(from, to, n); + vlsnCache = vlsnCache.copy(from, to, n, this); + lastLoggedSizes = lastLoggedSizes.copy(from, to, n, this); + expirationValues = expirationValues.copy(from, to, n, this); + offHeapLNIds = offHeapLNIds.copy(from, to, n, this); + } + + /** + * Overridden to account for BIN-specific slot info. + */ + @Override + void clearEntry(int idx) { + super.clearEntry(idx); + setCachedVLSNUnconditional(idx, VLSN.NULL_VLSN_SEQUENCE); + setLastLoggedSizeUnconditional(idx, 0); + setExpiration(idx, 0, false); + offHeapLNIds = offHeapLNIds.set(idx, 0, this); + } + + /* + * Cursors + */ + + /* public for the test suite. */ + public Set getCursorSet() { + if (cursorSet == null) { + return Collections.emptySet(); + } + return cursorSet.copy(); + } + + /** + * Register a cursor with this BIN. Caller has this BIN already latched. + * @param cursor Cursor to register. + */ + public void addCursor(CursorImpl cursor) { + assert isLatchExclusiveOwner(); + if (cursorSet == null) { + cursorSet = new TinyHashSet(); + } + cursorSet.add(cursor); + } + + /** + * Unregister a cursor with this bin. Caller has this BIN already + * latched. + * + * @param cursor Cursor to unregister. + */ + public void removeCursor(CursorImpl cursor) { + assert isLatchExclusiveOwner(); + if (cursorSet == null) { + return; + } + cursorSet.remove(cursor); + if (cursorSet.size() == 0) { + cursorSet = null; + } + } + + /** + * @return the number of cursors currently referring to this BIN. + */ + public int nCursors() { + + /* + * Use a local var to concurrent assignment to the cursorSet field by + * another thread. This method is called via eviction without latching. + * LRU-TODO: with the new evictor this method is called with the node + * EX-latched. So, cleanup after the old evictor is scrapped. + */ + final TinyHashSet cursors = cursorSet; + if (cursors == null) { + return 0; + } + return cursors.size(); + } + + /** + * Adjust any cursors that are referring to this BIN. This method is + * called during a split operation. "this" is the BIN being split. + * newSibling is the new BIN into which the entries from "this" between + * newSiblingLow and newSiblingHigh have been copied. + * + * @param newSibling - the newSibling into which "this" has been split. + * @param newSiblingLow + * @param newSiblingHigh - the low and high entry of + * "this" that were moved into newSibling. + */ + @Override + void adjustCursors( + IN newSibling, + int newSiblingLow, + int newSiblingHigh) + { + assert newSibling.isLatchExclusiveOwner(); + assert this.isLatchExclusiveOwner(); + if (cursorSet == null) { + return; + } + int adjustmentDelta = (newSiblingHigh - newSiblingLow); + Iterator iter = cursorSet.iterator(); + + while (iter.hasNext()) { + CursorImpl cursor = iter.next(); + int cIdx = cursor.getIndex(); + cursor.assertBIN(this); + assert newSibling instanceof BIN; + + /* + * There are four cases to consider for cursor adjustments, + * depending on (1) how the existing node gets split, and (2) where + * the cursor points to currently. In cases 1 and 2, the id key of + * the node being split is to the right of the splitindex so the + * new sibling gets the node entries to the left of that index. + * This is indicated by "new sibling" to the left of the vertical + * split line below. The right side of the node contains entries + * that will remain in the existing node (although they've been + * shifted to the left). The vertical bar (^) indicates where the + * cursor currently points. + * + * case 1: + * + * We need to set the cursor's "bin" reference to point at the + * new sibling, but we don't need to adjust its index since that + * continues to be correct post-split. + * + * +=======================================+ + * | new sibling | existing node | + * +=======================================+ + * cursor ^ + * + * case 2: + * + * We only need to adjust the cursor's index since it continues + * to point to the current BIN post-split. + * + * +=======================================+ + * | new sibling | existing node | + * +=======================================+ + * cursor ^ + * + * case 3: + * + * Do nothing. The cursor continues to point at the correct BIN + * and index. + * + * +=======================================+ + * | existing Node | new sibling | + * +=======================================+ + * cursor ^ + * + * case 4: + * + * Adjust the "bin" pointer to point at the new sibling BIN and + * also adjust the index. + * + * +=======================================+ + * | existing Node | new sibling | + * +=======================================+ + * cursor ^ + */ + BIN ns = (BIN) newSibling; + if (newSiblingLow == 0) { + if (cIdx < newSiblingHigh) { + /* case 1 */ + iter.remove(); + cursor.setBIN(ns); + ns.addCursor(cursor); + } else { + /* case 2 */ + cursor.setIndex(cIdx - adjustmentDelta); + } + } else { + if (cIdx >= newSiblingLow) { + /* case 4 */ + cursor.setIndex(cIdx - newSiblingLow); + iter.remove(); + cursor.setBIN(ns); + ns.addCursor(cursor); + } + } + } + } + + /** + * For each cursor in this BIN's cursor set, ensure that the cursor is + * actually referring to this BIN. + */ + public void verifyCursors() { + if (cursorSet == null) { + return; + } + for (CursorImpl cursor : cursorSet) { + cursor.assertBIN(this); + } + } + + /** + * Adjust cursors referring to this BIN following an insert. + * + * @param insertIndex - The index of the new entry. + */ + @Override + void adjustCursorsForInsert(int insertIndex) { + + assert this.isLatchExclusiveOwner(); + if (cursorSet == null) { + return; + } + + for (CursorImpl cursor : cursorSet) { + int cIdx = cursor.getIndex(); + if (insertIndex <= cIdx) { + cursor.setIndex(cIdx + 1); + } + } + } + + /** + * Called when we know we are about to split on behalf of a key that is the + * minimum (leftSide) or maximum (!leftSide) of this node. This is + * achieved by just forcing the split to occur either one element in from + * the left or the right (i.e. splitIndex is 1 or nEntries - 1). + */ + @Override + IN splitSpecial( + IN parent, + int parentIndex, + IN grandParent, + int maxEntriesPerNode, + byte[] key, + boolean leftSide) + throws DatabaseException { + + int nEntries = getNEntries(); + + int index = findEntry(key, true, false); + + boolean exact = (index & IN.EXACT_MATCH) != 0; + index &= ~IN.EXACT_MATCH; + + if (leftSide && index < 0) { + return splitInternal( + parent, parentIndex, grandParent, maxEntriesPerNode, 1); + + } else if (!leftSide && !exact && index == (nEntries - 1)) { + return splitInternal( + parent, parentIndex, grandParent, maxEntriesPerNode, + nEntries - 1); + + } else { + return split( + parent, parentIndex, grandParent, maxEntriesPerNode); + } + } + + /** + * Compress a full BIN by removing any slots that are deleted or expired. + * + * This must not be a BIN-delta. No cursors can be present on the BIN. + * Caller is responsible for latching and unlatching this node. + * + * If the slot containing the identifier is removed, the identifier key + * will be changed to the key in the first remaining slot. + * + * Normally when a slot is removed, the IN is dirtied. However, during + * compression the BIN is not dirtied when a slot is removed. This is safe + * for the reasons described below. Note that the BIN being compressed is + * always a full BIN, not a delta. + * + * + If the BIN is not dirty and it does not become dirty before shutdown, + * i.e., it is not logged, then it is possible that this compression will + * be "lost". However, the state of the slot on disk is expired/deleted, + * and when the BIN is later fetched from disk, this state will be + * restored and the compression will be performed again. + * + * + If the slot is dirty, the BIN may also be dirty or may become dirty + * later, and be logged. Logging a delta would cause the information in + * the dirty slot to be lost. Therefore, when a dirty slot is removed, we + * set a flag that prohibits the next BIN logged from being a delta. + * + * This optimization (that we don't dirty the BIN and we allow logging a + * delta after removing a non-dirty slot) has one minor and one major + * impact: + * + * 1. When a slot is removed for a deleted record, normally the slot and + * the BIN will be dirty. Although it is unusual, we may encounter a + * non-dirty slot for a deleted record. This happens if the slot could not + * be removed by this method when a full BIN is logged, due to a lock or a + * cursor, and we compress the full BIN later. + * + * 2. When a slot is removed for an expired record, it is common that the + * slot will not be be dirty. In this case, without the optimization, the + * removal of expired slots would cause more logging and less deltas would + * be logged. + * + * @param localTracker is used only for temporary DBs, and may be specified + * to consolidate multiple tracking operations. If null, the tracking is + * performed immediately in this method. + * + * @return true if all deleted and expired slots were compressed, or false + * if one or more slots could not be compressed because we were unable to + * obtain a lock. A false return value means "try again later". + */ + public boolean compress(boolean compressDirtySlots, + final LocalUtilizationTracker localTracker) { + + /* + * If the environment is not yet recovered we can't rely on locks + * being set up to safeguard active data and so we can't compress + * safely. + */ + if (!databaseImpl.getEnv().isValid()) { + return true; + } + + if (nCursors() > 0) { + throw EnvironmentFailureException.unexpectedState(); + } + + if (isBINDelta()) { + throw EnvironmentFailureException.unexpectedState(); + } + + final DatabaseImpl db = getDatabase(); + final EnvironmentImpl envImpl = db.getEnv(); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + boolean setNewIdKey = false; + boolean anyLocked = false; + + for (int i = 0; i < getNEntries(); i++) { + + if (!compressDirtySlots && isDirty(i)) { + continue; + } + + final boolean expired = + envImpl.isExpired(getExpiration(i), isExpirationInHours()); + + final boolean deleted = isDeleted(i); + + if (!deleted && !expired) { + continue; + } + + /* + * We have to be able to lock the LN before we can compress the + * entry. If we can't, then skip over it. For a deleted record, a + * read lock is sufficient because it means the deletion has been + * committed, and other lockers don't hold read locks on a deleted + * record. For an expired record, a write lock is needed to prevent + * removal of a slot for a record that is read-locked elsewhere. + * In both cases it is more efficient to call isLockUncontended + * than to actually lock the LN, since we would release the lock + * immediately. + * + * We must be able to lock the LN even if isKnownDeleted is true, + * because locks protect the aborts. (Aborts may execute multiple + * operations, where each operation latches and unlatches. It's the + * LN lock that protects the integrity of the whole multi-step + * process.) + * + * For example, during abort, there may be cases where we have + * deleted and then added an LN during the same txn. This means + * that to undo/abort it, we first delete the LN (leaving + * knownDeleted set), and then add it back into the tree. We want + * to make sure the entry is in the BIN when we do the insert back + * in. + */ + final long lsn = getLsn(i); + + /* Can discard a NULL_LSN entry without locking. */ + if (lsn != DbLsn.NULL_LSN && + !lockManager.isLockUncontended(lsn)) { + + anyLocked = true; + continue; + } + + /* At this point, we know we can remove the slot. */ + if (entryKeys.compareKeys( + getIdentifierKey(), keyPrefix, i, haveEmbeddedData(i), + getKeyComparator()) == 0) { + + /* + * We're about to remove the entry with the idKey so the + * node will need a new idkey. + */ + setNewIdKey = true; + } + + /* + * When we compress a deleted slot in a deferred-write DB, we + * must either log a dirty LN or count it obsolete. However, if + * we compress an expired slot, neither is necessary; see TTL. + */ + if (!expired && db.isDeferredWriteMode()) { + + final LN ln = (LN) getTarget(i); + + if (ln != null && + ln.isDirty() && + !DbLsn.isTransient(lsn)) { + + if (db.isTemporary()) { + + /* + * When a previously logged LN in a temporary DB is + * dirty, we can count the LSN of the last logged LN as + * obsolete without logging. There is no requirement + * for the dirty deleted LN to be durable past + * recovery. There is no danger of the last logged LN + * being accessed again (after log cleaning, for + * example), since temp DBs do not survive recovery. + */ + if (localTracker != null) { + localTracker.countObsoleteNode( + lsn, ln.getGenericLogType(), + getLastLoggedSize(i), db); + } else { + envImpl.getLogManager().countObsoleteNode( + lsn, ln.getGenericLogType(), + getLastLoggedSize(i), db, + true /*countExact*/); + } + } else { + + /* + * When a previously logged deferred-write LN is dirty, + * we log the dirty deleted LN to make the deletion + * durable. The act of logging will also count the last + * logged LSN as obsolete. + */ + logDirtyLN(i, ln, true /*allowEviction*/); + } + } + } + + deleteEntry(i, false /*makeDirty*/, true /*validate*/); + + /* Since we're deleting the current entry, decrement the index. */ + i--; + } + + if (getNEntries() != 0 && setNewIdKey) { + setIdentifierKey(getKey(0), false /*makeDirty*/); + } + + if (getNEntries() == 0) { + /* This BIN is empty and expendable. */ + updateLRU(CacheMode.MAKE_COLD); // TODO actually make cold + } + + /* + * Reduce capacity if this BIN is larger than the configured capacity, + * and has less entries then the configured capacity. This could be due + * to enlarging the BIN during recovery (see reconstituteBIN) or + * because the configured capacity was changed. + */ + final int configuredCapacity = databaseImpl.getNodeMaxTreeEntries(); + if (getMaxEntries() > configuredCapacity && + getNEntries() < configuredCapacity) { + resize(configuredCapacity); + } + + return !anyLocked; + } + + /** + * This method is called opportunistically at certain places where a + * deleted slot is observed (when the slot's PendingDeleted or KnownDeleted + * flag is set), to ensure that the slot is compressed away. This is an + * attempt to process slots that were not compressed during the mainstream + * record deletion process because of cursors on the BIN during compress, + * or a crash prior to compression. + */ + public void queueSlotDeletion(final int idx) { + + /* + * If the next logrec for this BIN should be a BIN-delta, don't queue + * the BIN if the deleted slot is dirty, because removing dirty BIN + * slots prevents logging a delta. + */ + if (isDirty(idx) && shouldLogDelta()) { + return; + } + + getEnv().addToCompressorQueue(this); + } + + /* For debugging. Overrides method in IN. */ + @Override + boolean validateSubtreeBeforeDelete(int index) { + + assert(!isBINDelta()); + + return true; + } + + /** + * Check if this node fits the qualifications for being part of a deletable + * subtree. It may not have any LN children. + * + * We assume that this is only called under an assert. + */ + @Override + boolean isValidForDelete() + throws DatabaseException { + + assert(isLatchExclusiveOwner()); + + if (isBINDelta()) { + return false; + } + + int numValidEntries = 0; + + for (int i = 0; i < getNEntries(); i++) { + if (!isEntryKnownDeleted(i)) { + numValidEntries++; + } + } + + if (numValidEntries > 0) { // any valid entries, not eligible + return false; + } + if (nCursors() > 0) { // cursors on BIN, not eligible + return false; + } + return true; // 0 entries, no cursors + } + + @Override + public long compactMemory() { + final long oldSize = inMemorySize; + super.compactMemory(); + offHeapLNIds = offHeapLNIds.compact(this, EMPTY_OFFHEAP_LN_IDS); + expirationValues = expirationValues.compact(this, EMPTY_EXPIRATION); + return oldSize - inMemorySize; + } + + /** + * Adds vlsnCache size to computed memory size. + */ + @Override + public long computeMemorySize() { + + long size = super.computeMemorySize(); + + /* + * vlsnCache, lastLoggedSizes, etc, are null only when this method is + * called by the superclass constructor, i.e., before this class + * constructor has run. Luckily the initial representations have a + * memory size of zero, so we can ignore them in this case. + */ + if (vlsnCache != null) { + size += vlsnCache.getMemorySize(); + } + + if (lastLoggedSizes != null) { + size += lastLoggedSizes.getMemorySize(); + } + + if (expirationValues != null) { + size += expirationValues.getMemorySize(); + } + + if (offHeapLNIds != null) { + size += offHeapLNIds.getMemorySize(); + } + + if (bloomFilter != null) { + size += BINDeltaBloomFilter.getMemorySize(bloomFilter); + } + + return size; + } + + /* Utility method used during unit testing. */ + @Override + protected long printMemorySize() { + final long inTotal = super.printMemorySize(); + final long vlsnCacheOverhead = vlsnCache.getMemorySize(); + final long logSizesOverhead = lastLoggedSizes.getMemorySize(); + final long expirationOverhead = expirationValues.getMemorySize(); + final long offHeapLNIdOverhead = offHeapLNIds.getMemorySize(); + + final long binTotal = inTotal + + vlsnCacheOverhead + logSizesOverhead + offHeapLNIdOverhead; + + System.out.format( + "BIN: %d vlsns: %d logSizes: %d expiration: %d offHeapLNIds: %d %n", + binTotal, vlsnCacheOverhead, logSizesOverhead, expirationOverhead, + offHeapLNIdOverhead); + + return binTotal; + } + + @Override + protected long getFixedMemoryOverhead() { + return MemoryBudget.BIN_FIXED_OVERHEAD; + } + + /** + * Returns the treeAdmin memory in objects referenced by this BIN. + * Specifically, this refers to the DbFileSummaryMap held by + * MapLNs + */ + @Override + public long getTreeAdminMemorySize() { + + if (getDatabase().getId().equals(DbTree.ID_DB_ID)) { + long treeAdminMem = 0; + for (int i = 0; i < getMaxEntries(); i++) { + Node n = getTarget(i); + if (n != null) { + MapLN mapLN = (MapLN) n; + treeAdminMem += mapLN.getDatabase().getTreeAdminMemory(); + } + } + return treeAdminMem; + } else { + return 0; + } + } + + /** + * Reduce memory consumption. Note that evicting deferred-write LNs may + * require logging them, which will mark this BIN dirty. Compression of + * deleted slots will also mark the BIN dirty. + * + * The BIN should be latched by the caller. + * + * @return a long number encoding (a) the number of evicted bytes, and + * (b) whether this BIN is evictable. (b) will be false if the BIN has + * any cursors on it, or has any non-evictable children. + */ + @Override + public long partialEviction() { + + /* Try compressing non-dirty slots. */ + final long oldMemSize = inMemorySize; + getEnv().lazyCompress(this); + if (oldMemSize > inMemorySize) { + return oldMemSize - inMemorySize; + } + + /* Try LN eviction. Return if any were evicted. */ + final long lnBytesAndStatus = evictLNs(); + if ((lnBytesAndStatus & ~IN.NON_EVICTABLE_IN) != 0) { + return lnBytesAndStatus; + } + + /* Try discarding the VLSNCache. Return bytes and evictable status. */ + return discardVLSNCache() | lnBytesAndStatus; + } + + public long discardVLSNCache() { + + final long vlsnBytes = vlsnCache.getMemorySize(); + + if (vlsnBytes > 0) { + + int numEntries = getNEntries(); + for (int i = 0; i < numEntries; ++i) { + if (isEmbeddedLN(i)) { + return 0; + } + } + + vlsnCache = EMPTY_VLSNS; + updateMemorySize(0 - vlsnBytes); + } + + return vlsnBytes; + } + + /** + * Reduce memory consumption by evicting all LN targets. Note that this may + * cause LNs to be logged, which will mark this BIN dirty. + * + * The BIN should be latched by the caller. + * + * @return a long number encoding (a) the number of evicted bytes, and + * (b) whether this BIN is evictable. (b) will be false if the BIN has + * any cursors on it, or has any non-evictable children. + */ + public long evictLNs() + throws DatabaseException { + + assert isLatchExclusiveOwner() : + "BIN must be latched before evicting LNs"; + + /* + * We can't evict an LN which is pointed to by a cursor, in case that + * cursor has a reference to the LN object. We'll take the cheap choice + * and avoid evicting any LNs if there are cursors on this BIN. We + * could do a more expensive, precise check to see entries have which + * cursors. This is something we might move to later. + */ + if (nCursors() > 0) { + return IN.NON_EVICTABLE_IN; + } + + /* Try to evict each child LN. */ + long totalRemoved = 0; + long numLNsEvicted = 0; + boolean haveNonEvictableLN = false; + + for (int i = 0; i < getNEntries(); i++) { + + if (getTarget(i) == null) { + continue; + } + + long lnRemoved = evictLNInternal(i, false /*ifFetchedCold*/); + + if (lnRemoved < 0) { + haveNonEvictableLN = true; + } else { + totalRemoved += lnRemoved; + ++numLNsEvicted; + } + } + + /* + * compactMemory() may decrease the memory footprint by mutating the + * representations of the target and key sets. + */ + if (totalRemoved > 0) { + updateMemorySize(totalRemoved, 0); + totalRemoved += compactMemory(); + } + + getEvictor().incNumLNsEvicted(numLNsEvicted); + + if (haveNonEvictableLN) { + return (totalRemoved | IN.NON_EVICTABLE_IN); + } else { + return totalRemoved; + } + } + + public void evictLN(int index) { + evictLN(index, false /*ifFetchedCold*/); + } + + public void evictLN(int index, boolean ifFetchedCold) + throws DatabaseException { + + final long removed = evictLNInternal(index, ifFetchedCold); + + /* May decrease the memory footprint by changing the INTargetRep. */ + if (removed > 0) { + updateMemorySize(removed, 0); + compactMemory(); + } + } + + /** + * Evict a single LN if allowed. The amount of memory freed is returned + * and must be subtracted from the memory budget by the caller. + * + * @param ifFetchedCold If true, evict the LN only if it has the + * FetchedCold flag set. + * + * @return number of evicted bytes or -1 if the LN is not evictable. + */ + private long evictLNInternal(int index, boolean ifFetchedCold) + throws DatabaseException { + + final Node n = getTarget(index); + + assert(n == null || n instanceof LN); + + if (n == null) { + return 0; + } + + final LN ln = (LN) n; + + if (ifFetchedCold && !ln.getFetchedCold()) { + return 0; + } + + /* + * Don't evict MapLNs for open databases (LN.isEvictable) [#13415]. + */ + if (!ln.isEvictable(getLsn(index))) { + return -1; + } + + /* + * Log target if necessary. Do not allow eviction since we evict + * here and that would cause double-counting of the memory freed. + */ + logDirtyLN(index, ln, false /*allowEviction*/); + + /* Clear target. */ + setTarget(index, null); + ln.releaseMemoryBudget(); + + final OffHeapCache ohCache = getOffHeapCache(); + if (ohCache.isEnabled()) { + ohCache.storeEvictedLN(this, index, ln); + } + + return n.getMemorySizeIncludedByParent(); + } + + /** + * @see IN#logDirtyChildren + */ + @Override + public void logDirtyChildren() + throws DatabaseException { + + /* Look for LNs that are dirty or have never been logged before. */ + for (int i = 0; i < getNEntries(); i++) { + Node node = getTarget(i); + if (node != null) { + logDirtyLN(i, (LN) node, true /*allowEviction*/); + } + } + } + + /** + * Logs the LN at the given index if it is dirty. + */ + private void logDirtyLN( + int idx, + LN ln, + boolean allowEviction) + throws DatabaseException { + + final long currLsn = getLsn(idx); + + final boolean force = getDatabase().isDeferredWriteMode() && + DbLsn.isTransientOrNull(currLsn); + + if (force || ln.isDirty()) { + final DatabaseImpl dbImpl = getDatabase(); + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + /* Only deferred write databases should have dirty LNs. */ + assert(dbImpl.isDeferredWriteMode() || ln instanceof MapLN); + + /* + * Do not lock while logging. Locking of new LSN is performed by + * lockAfterLsnChange. This should never be part of the replication + * stream, because this is a deferred-write DB. + * + * No reason to include the previous record version in this logrec + * because this logrec will never be undone (DW databases are + * non-transactional) + */ + final LogItem logItem = ln.log( + envImpl, dbImpl, null /*locker*/, null /*writeLockInfo*/, + isEmbeddedLN(idx), getKey(idx), + getExpiration(idx), isExpirationInHours(), + isEmbeddedLN(idx), currLsn, getLastLoggedSize(idx), + false/*isInsertion*/, true /*backgroundIO*/, + ReplicationContext.NO_REPLICATE); + + updateEntry( + idx, logItem.lsn, ln.getVLSNSequence(), + logItem.size); + + /* Lock new LSN on behalf of existing lockers. */ + CursorImpl.lockAfterLsnChange( + dbImpl, currLsn, logItem.lsn, null /*excludeLocker*/); + + /* + * It is desirable to evict a non-dirty LN that is immediately + * obsolete, because it will never be fetched again. + */ + if (allowEviction && + (databaseImpl.isLNImmediatelyObsolete() || + isEmbeddedLN(idx))) { + evictLN(idx); + } + } + } + + /* + * Logging support + */ + + /** + * @see IN#getLogType + */ + @Override + public LogEntryType getLogType() { + return LogEntryType.LOG_BIN; + } + + /** + * Overrides the IN method to account for deltas. + * Public for unit testing. + */ + @Override + public long getLastDeltaLsn() { + return lastDeltaVersion; + } + + public void setLastDeltaLsn(long lsn) { + lastDeltaVersion = lsn; + } + + /* + * BIN delta support + */ + + public int getFullBinNEntries() { + if (isBINDelta()) { + return fullBinNEntries; + } else { + return nEntries; + } + } + + public void setFullBinNEntries(int n) { + assert(isBINDelta(false)); + fullBinNEntries = n; + } + + void incFullBinNEntries() { + assert(isBINDelta()); + ++fullBinNEntries; + } + + public int getFullBinMaxEntries() { + if (isBINDelta()) { + return fullBinMaxEntries; + } else { + return getMaxEntries(); + } + } + + public void setFullBinMaxEntries(int n) { + assert(isBINDelta(false)); + fullBinMaxEntries = n; + } + + int getDeltaCapacity(int numDirtyEntries) { + + boolean blindOps = + (getEnv().allowBlindOps() || getEnv().allowBlindPuts()); + + if (isBINDelta()) { + return getMaxEntries(); + } + + if (blindOps) { + return (getNEntries() * databaseImpl.getBinDeltaPercent()) / 100; + } + + return numDirtyEntries; + } + + boolean allowBlindPuts() { + boolean res = getEnv().allowBlindPuts(); + + if (res) { + res = res && databaseImpl.hasBtreeBinaryEqualityComparator(); + res = res && databaseImpl.hasDuplicateBinaryEqualityComparator(); + } + + return res; + } + + /* + * It is called in 3 cases listed below. In all cases, if blind puts are + * not allowed, the method returns null. + * + * 1. A full BIN is being mutated to an in-memory delta. A new filter will + * be created here and will be stored in the delta by the caller. + * 2. A full BIN is being logged as a delta. A new filter will be created + * here and will be written in the delta logrec by the caller. + * 3. An in-memory BIN-delta is being logged. If the delta has a bloom + * filter already, that filter will be returned and written into the + * logrec. The delta may not have a filter already because it was read + * from an older-version logfile; in this case we return null. + */ + byte[] createBloomFilter() { + + assert(bloomFilter == null || isBINDelta()); + + boolean blindPuts = allowBlindPuts(); + + if (!blindPuts) { + assert(bloomFilter == null); + return null; + } + + if (bloomFilter != null) { + /* + * We are here because we are logging a delta that has a filter + * already. We just need to log the existing filter. + */ + return bloomFilter; + } + + if (isBINDelta()) { + return null; + } + + int numKeys = getNEntries() - getNDeltas(); + int nbytes = BINDeltaBloomFilter.getByteSize(numKeys); + + byte[] bf = new byte[nbytes]; + + BINDeltaBloomFilter.HashContext hc = + new BINDeltaBloomFilter.HashContext(); + + if (keyPrefix != null) { + hc.hashKeyPrefix(keyPrefix); + } + + for (int i = 0; i < getNEntries(); ++i) { + + if (isDirty(i)) { + continue; + } + + byte[] suffix = entryKeys.getKey(i, haveEmbeddedData(i)); + if (suffix == null) { + suffix = Key.EMPTY_KEY; + } + + BINDeltaBloomFilter.add(bf, suffix, hc); + } + + return bf; + } + + public boolean mayHaveKeyInFullBin(byte[] key) { + + assert(isBINDelta()); + + if (bloomFilter == null) { + return true; + } + + return BINDeltaBloomFilter.contains(bloomFilter, key); + } + + /* + * Used in IN.getLogSize() only + */ + int getBloomFilterLogSize() { + + if (!allowBlindPuts()) { + return 0; + } + + if (isBINDelta()) { + if (bloomFilter != null) { + return BINDeltaBloomFilter.getLogSize(bloomFilter); + } + + return 0; + + } else { + assert(bloomFilter == null); + int numKeys = getNEntries() - getNDeltas(); + return BINDeltaBloomFilter.getLogSize(numKeys); + } + } + + boolean isDeltaProhibited() { + return (getProhibitNextDelta() || + getDatabase().isDeferredWriteMode() || + getLastFullLsn() == DbLsn.NULL_LSN); + } + + /** + * Decide whether to log a full or partial BIN, depending on the ratio of + * the delta size to full BIN size. + * + * Other factors are taken into account: + * + a delta cannot be logged if the BIN has never been logged before + * + deltas are not currently supported for DeferredWrite databases + * + this particular delta may have been prohibited because the cleaner is + * migrating the BIN or a dirty slot has been removed + * + if there are no dirty slots, we might as well log a full BIN + * + * The restriction on using BIN-deltas for deferred-write DBs is for + * reasons that are probably no longer relevant. However, we have not + * tested deltas with DW, so we still prohibit them. Because BIN-deltas + * may be in cache at the time a DB is opened in DW mode, a workaround is + * currently necessary: see Database.mutateDeferredWriteBINDeltas. + * + * @return true if we should log the deltas of this BIN + */ + public boolean shouldLogDelta() { + + if (isBINDelta()) { + /* + * Cannot assert that db is not in DeferredWrite mode. + * See Database.mutateDeferredWriteBINDeltas. + */ + assert !getProhibitNextDelta(); + assert getLastFullLsn() != DbLsn.NULL_LSN; + return true; + } + + /* Cheapest checks first. */ + if (isDeltaProhibited()) { + return false; + } + + /* Must count deltas to check further. */ + final int numDeltas = getNDeltas(); + + /* A delta with zero items is not valid. */ + if (numDeltas <= 0) { + return false; + } + + /* Check the configured BinDeltaPercent. */ + final int deltaLimit = + (getNEntries() * databaseImpl.getBinDeltaPercent()) / 100; + + return numDeltas <= deltaLimit; + } + + /** + * Returns whether mutateToBINDelta can be called. + */ + public boolean canMutateToBINDelta() { + return (!isBINDelta() && + shouldLogDelta() && + (nCursors() == 0)); + } + + /** + * Mutate to a delta (discard non-dirty entries and resize arrays). + * + * This method must be called with this node latched exclusively, and + * canMutateToBINDelta must return true. + * + * @return the number of bytes freed. + */ + public long mutateToBINDelta() { + + assert isLatchExclusiveOwner(); + assert canMutateToBINDelta(); + + if (getInListResident()) { + getEnv().getInMemoryINs().updateBINDeltaStat(1); + } + + final long oldSize = getInMemorySize(); + final int nDeltas = getNDeltas(); + final int capacity = getDeltaCapacity(nDeltas); + + bloomFilter = createBloomFilter(); + + initBINDelta(this, nDeltas, capacity, true); + + return oldSize - getInMemorySize(); + } + + /** + * This method assumes that "this" BIN is a delta and creates a clone of + * it. It is currently used by the DiskOrderedScanner only. The method + * does not clone the targets array. + */ + public BIN cloneBINDelta() { + + assert(isBINDelta()); + + final BIN bin = new BIN( + databaseImpl, getIdentifierKey(), 0/*capacity*/, getLevel()); + + bin.nodeId = nodeId; + bin.flags = flags; + bin.lastFullVersion = lastFullVersion; + + final int nDeltas = getNDeltas(); + initBINDelta(bin, nDeltas, nDeltas, false); + return bin; + } + + /** + * Replaces the contents of destBIN with the deltas in this BIN. + */ + private void initBINDelta( + final BIN destBIN, + final int nDeltas, + final int capacity, + final boolean copyTargets) { + + long[] longLSNs = null; + byte[] compactLSNs = null; + + if (entryLsnLongArray == null) { + compactLSNs = new byte[nDeltas * 4]; + } else { + longLSNs = new long[nDeltas]; + } + + final long[] vlsns = new long[nDeltas]; + final int[] sizes = new int[nDeltas]; + final byte[][] keys = new byte[nDeltas][]; + final byte[] states = new byte[nDeltas]; + long[] memIds = null; + Node[] targets = null; + int[] expiration = null; + + if (copyTargets) { + targets = new Node[nDeltas]; + memIds = new long[nDeltas]; + } + + if (expirationBase != -1) { + expiration = new int[nDeltas]; + } + + int j = 0; + for (int i = 0; i < getNEntries(); i += 1) { + + if (!isDirty(i)) { + freeOffHeapLN(i); + continue; + } + + if (entryLsnLongArray == null) { + int doff = j << 2; + int soff = i << 2; + compactLSNs[doff] = entryLsnByteArray[soff]; + compactLSNs[doff+1] = entryLsnByteArray[soff+1]; + compactLSNs[doff+2] = entryLsnByteArray[soff+2]; + compactLSNs[doff+3] = entryLsnByteArray[soff+3]; + } else { + longLSNs[j] = getLsn(i); + } + + keys[j] = entryKeys.get(i); + states[j] = getState(i); + + if (targets != null) { + targets[j] = getTarget(i); + } + + if (memIds != null) { + memIds[j] = getOffHeapLNId(i); + } + + vlsns[j] = getCachedVLSN(i); + sizes[j] = getLastLoggedSize(i); + + if (expiration != null) { + expiration[j] = getExpiration(i); + } + + j += 1; + } + + /* + * Do this before resetContent() because destBIN and "this" may be the + * same java obj + */ + destBIN.fullBinNEntries = getFullBinNEntries(); + destBIN.fullBinMaxEntries = getFullBinMaxEntries(); + + destBIN.resetContent( + capacity, nDeltas, + baseFileNumber, compactLSNs, longLSNs, + states, keyPrefix, keys, targets, + sizes, memIds, vlsns, + expiration, isExpirationInHours()); + + destBIN.setBINDelta(true); + + destBIN.compactMemory(); + } + + /** + * Replaces the contents of this BIN with the given contents. + * Used in mutating a full BIN to a BIN-delta or for creating + * a new BIN delta with the given content. + */ + private void resetContent( + final int capacity, + final int newNEntries, + final long baseFileNumber, + final byte[] compactLSNs, + final long[] longLSNs, + final byte[] states, + final byte[] keyPrefix, + final byte[][] keys, + final Node[] targets, + final int[] loggedSizes, + final long[] memIds, + final long[] vlsns, + final int[] expiration, + final boolean expirationInHours) { + + updateRepCacheStats(false); + + nEntries = newNEntries; + + this.baseFileNumber = baseFileNumber; + if (longLSNs == null) { + entryLsnByteArray = new byte[capacity << 2]; + entryLsnLongArray = null; + } else { + entryLsnByteArray = null; + entryLsnLongArray = new long[capacity]; + } + + this.keyPrefix = keyPrefix; + + entryKeys = new INKeyRep.Default(capacity); + entryTargets = INTargetRep.NONE; + vlsnCache = EMPTY_VLSNS; + lastLoggedSizes = EMPTY_LAST_LOGGED_SIZES; + expirationValues = EMPTY_EXPIRATION; + expirationBase = -1; + offHeapLNIds = EMPTY_OFFHEAP_LN_IDS; + + updateRepCacheStats(true); + + entryStates = new byte[capacity]; + + for (int i = 0; i < newNEntries; i += 1) { + + if (longLSNs == null) { + int off = i << 2; + entryLsnByteArray[off] = compactLSNs[off]; + entryLsnByteArray[off+1] = compactLSNs[off+1]; + entryLsnByteArray[off+2] = compactLSNs[off+2]; + entryLsnByteArray[off+3] = compactLSNs[off+3]; + } else { + entryLsnLongArray[i] = longLSNs[i]; + } + + entryKeys = entryKeys.set(i, keys[i], this); + entryStates[i] = states[i]; + + if (targets != null) { + entryTargets = entryTargets.set(i, targets[i], this); + } + + if (memIds != null) { + setOffHeapLNId(i, memIds[i]); + } + + if (expiration != null) { + setExpiration(i, expiration[i], expirationInHours); + } + + setLastLoggedSizeUnconditional(i, loggedSizes[i]); + setCachedVLSNUnconditional(i, vlsns[i]); + } + + updateMemorySize(inMemorySize, computeMemorySize()); + } + + /** + * Fetch the full BIN and apply the deltas in this BIN to it, then use the + * merged result to replace the contents of this BIN. + * + * This method must be called with this node latched exclusively. If 'this' + * is not a delta, this method does nothing. + */ + @Override + public void mutateToFullBIN(boolean leaveFreeSlot) { + + if (!isBINDelta()) { + return; + } + + final BIN fullBIN = fetchFullBIN(databaseImpl); + + mutateToFullBIN(fullBIN, leaveFreeSlot); + + getEvictor().incFullBINMissStats(); + } + + /** + * Mutates this delta to a full BIN by applying this delta to the fullBIN + * param and then replacing this BIN's contents with it. + * + * This method must be called with this node latched exclusively. 'this' + * must be a delta. + * + * After mutation, the full BIN is compressed and compacted. The + * compression is particularly important, since BIN-deltas in cache cannot + * be compressed. + * + * The method is public because it is called directly from FileProcessor + * when it finds a BIN that must be migrated. In that case, fullBIN is a + * full BIN that has just been read from the log, and it is not part of + * the memory-resident tree. + */ + public void mutateToFullBIN(BIN fullBIN, boolean leaveFreeSlot) { + + assert isLatchExclusiveOwner(); + assert isBINDelta() : this; + + byte[][] keys = null; + int i = 0; + + if (cursorSet != null) { + keys = new byte[cursorSet.size()][]; + + for (CursorImpl cursor : cursorSet) { + final int index = cursor.getIndex(); + if (index >= 0 && index < getNEntries()) { + keys[i] = cursor.getCurrentKey(true/*isLatched*/); + } + ++i; + } + } + + reconstituteBIN(databaseImpl, fullBIN, leaveFreeSlot); + + resetContent(fullBIN); + + setBINDelta(false); + + /* + * The fullBIN identifierKey may have changed when reconstituteBIN + * called BIN.compress. We cannot call setIdentifierKey in resetContent + * because assert(!isBINDelta()) will fail, so call it here. + */ + setIdentifierKey(fullBIN.getIdentifierKey(), false); + + if (cursorSet != null) { + + i = 0; + for (CursorImpl cursor : cursorSet) { + + if (keys[i] != null) { + /* + * Do not ask for an exact match from findEntry because if + * the cursor was on a KD slot, findEntry would return -1. + */ + int index = findEntry(keys[i], true, false); + + if ((index & IN.EXACT_MATCH) == 0) { + throw EnvironmentFailureException.unexpectedState( + getEnv(), "Failed to reposition cursor during " + + "mutation of a BIN delta to a full BIN"); + } + + index &= ~IN.EXACT_MATCH; + + assert(index >= 0 && index < getNEntries()); + cursor.setIndex(index); + } + ++i; + } + } + + getEnv().lazyCompress(this); + compactMemory(); + + if (getInListResident()) { + getEnv().getInMemoryINs().updateBINDeltaStat(-1); + } + } + + private BIN fetchFullBIN(DatabaseImpl dbImpl) { + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + final long lsn = getLastFullLsn(); + + try { + return (BIN) + envImpl.getLogManager().getEntryHandleFileNotFound(lsn); + + } catch (EnvironmentFailureException e) { + e.addErrorMessage(makeFetchErrorMsg(null, lsn, -1)); + throw e; + + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + makeFetchErrorMsg(e.toString(), lsn, -1), e); + } + } + + /** + * Replaces the contents of this BIN with the contents of the given BIN, + * including lsns, states, keys and targets. Key prefixing and key/target + * representations will also be those of the given BIN. + */ + private void resetContent(final BIN other) { + + updateRepCacheStats(false); + + nEntries = other.nEntries; + + baseFileNumber = other.baseFileNumber; + entryLsnByteArray = other.entryLsnByteArray; + entryLsnLongArray = other.entryLsnLongArray; + + keyPrefix = other.keyPrefix; + entryKeys = other.entryKeys; + + entryTargets = other.entryTargets; + + entryStates = other.entryStates; + + lastLoggedSizes = other.lastLoggedSizes; + + expirationValues = other.expirationValues; + expirationBase = other.expirationBase; + + offHeapLNIds = other.offHeapLNIds; + assert (getOffHeapLruId() >= 0) || !hasOffHeapLNs(); + + vlsnCache = other.vlsnCache; + + bloomFilter = null; + + updateMemorySize(inMemorySize, computeMemorySize()); + + updateRepCacheStats(true); + } + + private void resize(final int newCapacity) { + + assert newCapacity >= getNEntries(); + + updateRepCacheStats(false); + + if (entryLsnByteArray != null) { + entryLsnByteArray = Arrays.copyOfRange( + entryLsnByteArray, 0, newCapacity * 4); + } + if (entryLsnLongArray != null) { + entryLsnLongArray = Arrays.copyOfRange( + entryLsnLongArray, 0, newCapacity); + } + if (entryStates != null) { + entryStates = Arrays.copyOfRange( + entryStates, 0, newCapacity); + } + + entryKeys = entryKeys.resize(newCapacity); + entryTargets = entryTargets.resize(newCapacity); + lastLoggedSizes = lastLoggedSizes.resize(newCapacity); + expirationValues = expirationValues.resize(newCapacity); + offHeapLNIds = offHeapLNIds.resize(newCapacity); + vlsnCache = vlsnCache.resize(newCapacity); + + updateMemorySize(inMemorySize, computeMemorySize()); + + updateRepCacheStats(true); + } + + /** + * Create a BIN by fetching its most recent full version from the log and + * applying to it the deltas in this BIN delta. The new BIN is not added + * to the INList or the BTree. + * + * Called from DiskOrderedScanner.fetchAndProcessBINs() and + * DiskOrderedScanner.accumulateLNs() + * + * @return the full BIN with deltas applied. + */ + public BIN reconstituteBIN(DatabaseImpl dbImpl) { + final BIN fullBIN = fetchFullBIN(dbImpl); + reconstituteBIN(dbImpl, fullBIN, false /*leaveFreeSlot*/); + return fullBIN; + } + + /** + * Given a full version BIN, apply to it the deltas in this BIN delta. The + * fullBIN will then be complete, but its memory will not be compacted. + * + * Called from mutateToFullBIN() above and from SortedLSNTreewalker. + * + * @param leaveFreeSlot should be true if a slot will be inserted into the + * resulting full BIN, without first checking whether the full BIN must be + * split, and performing the split if necessary. If this param is true, the + * returned BIN will contain at least one free slot. If this param is + * false, a BIN with no free slots may be returned. For example, it is + * important that false is passed when a split will be performed, since if + * true were passed, the BIN would grow beyond its bounds unnecessarily. + */ + public void reconstituteBIN( + DatabaseImpl dbImpl, + BIN fullBIN, + boolean leaveFreeSlot) { + + fullBIN.setDatabase(dbImpl); + fullBIN.latch(CacheMode.UNCHANGED); + + try { + if (databaseImpl == null) { + setDatabase(dbImpl); + } + + assert fullBIN.getOffHeapLruId() < 0; + assert !fullBIN.hasOffHeapLNs(); + + /* + * The BIN's lastFullLsn is set here, while its lastLoggedLsn is + * set by postFetchInit or postRecoveryInit. + */ + fullBIN.setLastFullLsn(getLastFullLsn()); + + /* + * Compress the full BIN before applying deltas, to handle the + * following scenario: Non-dirty slots were compressed away + * earlier, leaving room for inserted records, and a delta was + * logged with the inserted records. The full version of the BIN + * (after compression) was not logged, because the BIN is not + * dirtied when non-dirty slots were compressed away. If we don't + * compress here, there may not be room in the original BIN for the + * slots inserted when applying the deltas. + * + * However, during recovery we can't compress because locking is + * not used during recovery, and the compressor may delete a slot + * for a record that is part of an active transaction. In addition, + * even when compression is performed here, it is possible that it + * doesn't compress all deleted/expired slots that were compressed + * originally in the scenario described, for one of the following + * reasons: + * + * + The record is locked temporarily by a read operation that + * will skip the record. Note that the compressor uses + * non-blocking locking. + * + * + If expiration has been disabled, or the system clock has been + * changed, slots that were expired originally may not be + * expired now. + * + * Therefore, in all cases we enlarge the BIN if necessary to hold + * all slots to be inserted when applying the delta. An extra slot + * is added if leaveFreeSlot is true, to handle cases + * where mutation to a full BIN is performed after calling + * Tree.searchSplitsAllowed, or one of the methods that calls it + * such as Tree.findBinForInsert and Tree.getParentBINForChildLN. + * If the search returns a BIN-delta without splitting, and then we + * must mutate to full BIN in order to insert, because blind + * insertions do not apply, then the scenario described can occur. + * + * If the BIN is enlarged, we add it to the compressor queue so it + * will be shrunk back down to the Database's configured maxEntries + * during normal compression. + */ + if (!dbImpl.getEnv().isInInit()) { + fullBIN.compress( + false /*compressDirtySlots*/, null /*localTracker*/); + } + int nInsertions = leaveFreeSlot ? 1 : 0; + for (int i = 0; i < getNEntries(); i += 1) { + final int foundIndex = fullBIN.findEntry( + getKey(i), true, false); + if (foundIndex < 0 || (foundIndex & IN.EXACT_MATCH) == 0) { + nInsertions += 1; + } + } + final int maxEntries = nInsertions + fullBIN.getNEntries(); + if (maxEntries > fullBIN.getMaxEntries()) { + fullBIN.resize(maxEntries); + dbImpl.getEnv().addToCompressorQueue(fullBIN); + } + + /* Process each delta. */ + for (int i = 0; i < getNEntries(); i++) { + + assert isDirty(i) : this; + + fullBIN.applyDelta( + getKey(i), getData(i), getLsn(i), getState(i), + getLastLoggedSize(i), getOffHeapLNId(i), + getCachedVLSN(i), getTarget(i), + getExpiration(i), isExpirationInHours()); + } + + /* + * The applied deltas will leave some slots dirty, which is + * necessary as a record of changes that will be included in the + * next delta. However, the BIN itself should not be dirty, + * because this delta is a persistent record of those changes. + */ + fullBIN.setDirty(false); + } finally { + fullBIN.releaseLatch(); + } + } + + /** + * Apply (insert, update) a given delta slot in this full BIN. + * Note: also called from OldBINDelta class. + */ + void applyDelta( + final byte[] key, + final byte[] data, + final long lsn, + final byte state, + final int lastLoggedSize, + final long ohLnId, + final long vlsn, + final Node child, + final int expiration, + final boolean expirationInHours) { + + /* + * The delta is the authoritative version of the entry. In all cases, + * it should supersede the entry in the full BIN. This is true even if + * the BIN Delta's entry is knownDeleted or if the full BIN's version + * is knownDeleted. Therefore we use the flavor of findEntry that will + * return a knownDeleted entry if the entry key matches (i.e. true, + * false) but still indicates exact matches with the return index. + * findEntry only returns deleted entries if third arg is false, but we + * still need to know if it's an exact match or not so indicateExact is + * true. + */ + int foundIndex = findEntry(key, true, false); + + if (foundIndex >= 0 && (foundIndex & IN.EXACT_MATCH) != 0) { + + foundIndex &= ~IN.EXACT_MATCH; + + /* + * The entry exists in the full version, update it with the delta + * info. Note that all state flags should be restored [#22848]. + */ + applyDeltaSlot( + foundIndex, child, lsn, lastLoggedSize, state, key, data); + + } else { + + /* + * The entry doesn't exist, insert the delta entry. We insert the + * entry even when it is known or pending deleted, since the + * deleted (and dirty) entry will be needed to log the next delta. + * [#20737] + */ + final int result = insertEntry1( + child, key, data, lsn, state, false/*blindInsertion*/); + + assert (result & INSERT_SUCCESS) != 0; + foundIndex = result & ~IN.INSERT_SUCCESS; + + setLastLoggedSizeUnconditional(foundIndex, lastLoggedSize); + } + + setCachedVLSNUnconditional(foundIndex, vlsn); + setOffHeapLNId(foundIndex, ohLnId); + setExpiration(foundIndex, expiration, expirationInHours); + } + + /* + * DbStat support. + */ + @Override + void accumulateStats(TreeWalkerStatsAccumulator acc) { + acc.processBIN(this, Long.valueOf(getNodeId()), getLevel()); + } +} diff --git a/src/com/sleepycat/je/tree/BINBoundary.java b/src/com/sleepycat/je/tree/BINBoundary.java new file mode 100644 index 0000000..c3f57f8 --- /dev/null +++ b/src/com/sleepycat/je/tree/BINBoundary.java @@ -0,0 +1,24 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Contains information about the BIN returned by a search. + */ +public class BINBoundary { + /** The last BIN was returned. */ + public boolean isLastBin; + /** The first BIN was returned. */ + public boolean isFirstBin; +} diff --git a/src/com/sleepycat/je/tree/BINDeltaBloomFilter.java b/src/com/sleepycat/je/tree/BINDeltaBloomFilter.java new file mode 100644 index 0000000..6219923 --- /dev/null +++ b/src/com/sleepycat/je/tree/BINDeltaBloomFilter.java @@ -0,0 +1,235 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; +import java.util.Random; + +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogUtils; + +/** + * A Bloom filter implementation, highly specialized for use in BIN deltas. + * Both space and computation times are minimized, with a potential small + * loss in accuracy. + * + * A nice introduction to bloom filters can be found here: + * http://en.wikipedia.org/wiki/Bloom_filter + */ +public class BINDeltaBloomFilter { + + /* + * Used to optimize creation of the bloom filter: Lets us avoid repeated + * (per key) hashing of the key prefix and repeated allocations of the + * RNG and the hashes array. + */ + public static class HashContext { + + public int[] hashes; + + public Random rng; + + public long initFNVvalue; + + public HashContext() { + hashes = new int[BINDeltaBloomFilter.K]; + rng = new Random(); + initFNVvalue = BINDeltaBloomFilter.FNVOffsetBasis; + } + + void hashKeyPrefix(byte[] prefix) { + initFNVvalue = BINDeltaBloomFilter.hashFNV(prefix, initFNVvalue); + } + } + + /* + * Params for the Fowler-Noll-Vo (FNV) hash function + */ + private static final long FNVOffsetBasis = 2166136261L; + private static final long FNVPrime = 16777619L; + + /* + * The m/n ratio, where m is the number of bits used by the bloom filter + * and n is the number of keys in the set represented by the bloom filter. + */ + private static final int M_N_RATIO = 8; + + /* + * The number of hash values to generate per key, when a key is added to + * the filter or when the key's membership is tested. + */ + private static final int K = 3; + + /* + * Add the given key to the given bloom filter + */ + public static void add(byte[] bf, byte[] key, HashContext hc) { + + hash(bf, key, hc); + + for (int idx : hc.hashes) { + setBit(bf, idx); + } + } + + /* + * Test set membership for the given key + */ + static boolean contains(byte[] bf, byte[] key) { + + HashContext hc = new HashContext(); + + hash(bf, key, hc); + + for (int idx : hc.hashes) { + if (!getBit(bf, idx)) { + return false; + } + } + + return true; + } + + /* + * Generate K hash values for the given key + */ + private static void hash(byte[] bf, byte[] key, HashContext hc) { + + assert(K == 3); + assert(hc.hashes.length == K); + + hc.rng.setSeed(hashFNV(key, hc.initFNVvalue)); + + int numBits = bf.length * 8; + + if (numBits <= 1024) { + int hash = hc.rng.nextInt(); + hc.hashes[0] = (hash & 0x000003FF) % numBits; + hash = hash >> 10; + hc.hashes[1] = (hash & 0x000003FF) % numBits; + hash = hash >> 10; + hc.hashes[2] = (hash & 0x000003FF) % numBits; + } else { + hc.hashes[0] = (int)((hc.rng.nextInt() & 0xFFFFFFFFL) % numBits); + hc.hashes[1] = (int)((hc.rng.nextInt() & 0xFFFFFFFFL) % numBits); + hc.hashes[2] = (int)((hc.rng.nextInt() & 0xFFFFFFFFL) % numBits); + } + } + + /* + * Fowler-Noll-Vo hash function + */ + private static long hashFNV(byte[] key, long initValue) { + + long hash = initValue; + + for (byte b : key) { + hash = (hash * FNVPrime) & 0xFFFFFFFF; + hash ^= b; + } + + return hash; + } + + + /* + * Get the total memory consumed by the given bloom filter. + */ + static int getMemorySize(byte[] bf) { + return MemoryBudget.byteArraySize(bf.length); + } + + /* + * Get the number of bytes needed to store the bitset of a bloom filter + * for the given number of keys. + */ + public static int getByteSize(int numKeys) { + assert(numKeys > 0); + int nbits = numKeys * M_N_RATIO; + return (nbits + 7) / 8; + } + + /* + * Get the log size of a bloom filter for the given number of keys. + */ + public static int getLogSize(int numKeys) { + int nbytes = getByteSize(numKeys); + return LogUtils.getPackedIntLogSize(nbytes) + nbytes; + } + + /* + * Get the log size of the given bloom filter + */ + public static int getLogSize(byte[] bf) { + return LogUtils.getByteArrayLogSize(bf); + } + + /* + * Write the given bloom filter to the given log buffer + */ + public static void writeToLog(byte[] bf, ByteBuffer buffer) { + LogUtils.writeByteArray(buffer, bf); + } + + /* + * Create and return a bloom filter by reading its byytes from the + * given log buffer. + */ + public static byte[] readFromLog(ByteBuffer buffer, int entryVersion) { + return LogUtils.readByteArray(buffer, false/*unpacked*/); + } + + /* + * + */ + public static void dumpLog(byte[] bf, StringBuilder sb, boolean verbose) { + + int nbits = bf.length * 8; + + sb.append(""); + for (int i = 0; i < nbits; ++i) { + sb.append(getBit(bf, i) ? 1 : 0); + } + sb.append(""); + } + + /* + * + */ + public static String toString(byte[] bf) { + + StringBuilder sb = new StringBuilder(); + + int nbits = bf.length * 8; + + for (int i = 0; i < nbits; ++i) { + sb.append(getBit(bf, i) ? 1 : 0); + } + return sb.toString(); + } + + /* + * + */ + private static void setBit(byte[] bf, int idx) { + bf[idx / 8] |= (1 << (idx % 8)); + } + + /* + * + */ + private static boolean getBit(byte[] bf, int idx) { + return ( (bf[idx / 8] & (1 << (idx % 8))) != 0 ); + } +} diff --git a/src/com/sleepycat/je/tree/BINReference.java b/src/com/sleepycat/je/tree/BINReference.java new file mode 100644 index 0000000..a84ee18 --- /dev/null +++ b/src/com/sleepycat/je/tree/BINReference.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.dbi.DatabaseId; + +/** + * A class that embodies a reference to a BIN that does not rely on a + * Java reference to the actual BIN. + */ +public class BINReference { + private final byte[] idKey; + private final long nodeId; + private final DatabaseId databaseId; + + BINReference(final long nodeId, + final DatabaseId databaseId, + final byte[] idKey) { + this.nodeId = nodeId; + this.databaseId = databaseId; + this.idKey = idKey; + } + + public long getNodeId() { + return nodeId; + } + + public DatabaseId getDatabaseId() { + return databaseId; + } + + public byte[] getKey() { + return idKey; + } + + /** + * Compare two BINReferences. + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof BINReference)) { + return false; + } + + return ((BINReference) obj).nodeId == nodeId; + } + + @Override + public int hashCode() { + return (int) nodeId; + } + + @Override + public String toString() { + return "idKey=" + Key.getNoFormatString(idKey) + + " nodeId = " + nodeId + + " db=" + databaseId; + } +} diff --git a/src/com/sleepycat/je/tree/ChildReference.java b/src/com/sleepycat/je/tree/ChildReference.java new file mode 100644 index 0000000..c0f2f52 --- /dev/null +++ b/src/com/sleepycat/je/tree/ChildReference.java @@ -0,0 +1,370 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; + +/** + * A ChildReference is a reference in the tree from parent to child. It + * contains a node reference, key, and LSN. + */ +public class ChildReference implements Loggable { + + private Node target; + private long lsn; + private byte[] key; + private byte state; + + /** + * Construct an empty child reference, for reading from the log. + */ + public ChildReference() { + init(null, Key.EMPTY_KEY, DbLsn.NULL_LSN, 0); + } + + /** + * Construct a ChildReference for inserting a new entry. + */ + public ChildReference(Node target, byte[] key, long lsn) { + init(target, key, lsn, EntryStates.DIRTY_BIT); + } + + /** + * Construct a ChildReference for inserting an existing entry. + */ + public ChildReference(Node target, + byte[] key, + long lsn, + byte existingState) { + init(target, key, lsn, existingState | EntryStates.DIRTY_BIT); + } + + private void init(Node target, + byte[] key, + long lsn, + int state) { + this.target = target; + this.key = key; + this.lsn = lsn; + this.state = (byte) state; + } + + /** + * Return the key for this ChildReference. + */ + public byte[] getKey() { + return key; + } + + /** + * Set the key for this ChildReference. + */ + public void setKey(byte[] key) { + this.key = key; + setDirty(); + } + + /** + * Fetch the target object that this ChildReference refers to. If the + * object is already in VM, then just return the reference to it. If the + * object is not in VM, then read the object from the log. If the object + * has been faulted in and the in arg is supplied, then the total memory + * size cache in the IN is invalidated. + * + * @param database The database that this ChildReference resides in. + * @param parent The IN that this ChildReference lives in. If + * the target is fetched (i.e. it is null on entry), then the + * total in memory count is invalidated in the IN. May be null. + * For example, the root is a ChildReference and there is no parent IN + * when the rootIN is fetched in. + * @return the Node object representing the target node in the tree, or + * null if there is no target of this ChildReference, or null if a + * pendingDelete or knownDeleted entry has been cleaned. + */ + public Node fetchTarget(DatabaseImpl database, IN parent) + throws DatabaseException { + + if (target == null) { + + if (lsn == DbLsn.NULL_LSN) { + if (!isKnownDeleted()) { + throw EnvironmentFailureException.unexpectedState( + IN.makeFetchErrorMsg( + "NULL_LSN without KnownDeleted", + parent, lsn, state, 0)); + } + /* Ignore a NULL_LSN (return null) if KnownDeleted is set. */ + return null; + } + + EnvironmentImpl envImpl = database.getEnv(); + try { + Node node = (Node) envImpl.getLogManager().getEntry(lsn); + + /* + * For now, fetchTarget is never used to fetch an LN. If + * this changes in the future, a CacheMode must be given + * as a param, and the parent BIN moved from the dirty LRU + * to the mixed LRU if the CacheMode is not EVICT_LN (as + * it is done in IN.fetchTarget()). + */ + assert(node.isIN()); + assert(!node.isBINDelta()); + + final IN in = (IN) node; + + in.latchNoUpdateLRU(database); + + node.postFetchInit(database, lsn); + target = node; + + in.releaseLatch(); + + if (parent != null) { + parent.updateMemorySize(null, target); + } + } catch (FileNotFoundException e) { + if (!isKnownDeleted() && + !isPendingDeleted()) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + IN.makeFetchErrorMsg( + e.toString(), parent, lsn, state, 0), + e); + } + + /* + * This is a LOG_FILE_NOT_FOUND for a KD or PD entry. + * + * Ignore. Cleaner got to it, so just return null. + */ + } catch (EnvironmentFailureException e) { + e.addErrorMessage( + IN.makeFetchErrorMsg(null, parent, lsn, state, 0)); + throw e; + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + IN.makeFetchErrorMsg(e.toString(), parent, lsn, state, 0), + e); + } + } + + return target; + } + + /* + * Return the state byte for this ChildReference. + */ + byte getState() { + return state; + } + + /** + * Return the target for this ChildReference. + */ + public Node getTarget() { + return target; + } + + /** + * Sets the target for this ChildReference. No need to make dirty, that + * state only applies to key and LSN. + */ + public void setTarget(Node target) { + this.target = target; + } + + /** + * Clear the target for this ChildReference. No need to make dirty, that + * state only applies to key and LSN. This method is public because it's + * safe and used by RecoveryManager. This can't corrupt the tree. + */ + public void clearTarget() { + this.target = null; + } + + /** + * Return the LSN for this ChildReference. + * + * @return the LSN for this ChildReference. + */ + public long getLsn() { + return lsn; + } + + /** + * Sets the target LSN for this ChildReference. + * + * @param lsn the target LSN. + */ + public void setLsn(long lsn) { + this.lsn = lsn; + setDirty(); + } + + /** + * Do deferredWrite optional logging check. + */ + void updateLsnAfterOptionalLog(DatabaseImpl dbImpl, long lsn) { + if ((lsn == DbLsn.NULL_LSN) && + dbImpl.isDeferredWriteMode()) { + /* + * Don't update the lsn -- we don't want to overwrite a + * non-null lsn. + */ + setDirty(); + } else { + setLsn(lsn); + } + } + + void setDirty() { + state |= EntryStates.DIRTY_BIT; + } + + /** + * @return true if the entry has been deleted, although the transaction the + * performed the deletion may not be committed. + */ + private boolean isPendingDeleted() { + return ((state & EntryStates.PENDING_DELETED_BIT) != 0); + } + + /** + * @return true if entry is deleted for sure. + */ + public boolean isKnownDeleted() { + return ((state & EntryStates.KNOWN_DELETED_BIT) != 0); + } + + /** + * @return true if the object is dirty. + */ + private boolean isDirty() { + return ((state & EntryStates.DIRTY_BIT) != 0); + } + + /* + * Support for logging. + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + return + LogUtils.getByteArrayLogSize(key) + // key + LogUtils.getPackedLongLogSize(lsn) + // LSN + 1; // state + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeByteArray(logBuffer, key); // key + LogUtils.writePackedLong(logBuffer, lsn); + logBuffer.put(state); // state + state &= EntryStates.CLEAR_DIRTY_BIT; + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + key = LogUtils.readByteArray(itemBuffer, unpacked); // key + lsn = LogUtils.readLong(itemBuffer, unpacked); // LSN + state = itemBuffer.get(); // state + state &= EntryStates.CLEAR_DIRTY_BIT; + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(Key.dumpString(key, 0)); + sb.append(DbLsn.toString(lsn)); + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + /* + * Dumping + */ + public String dumpString(int nspaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + if (lsn == DbLsn.NULL_LSN) { + sb.append(TreeUtils.indent(nspaces)); + sb.append(""); + } else { + sb.append(DbLsn.dumpString(lsn, nspaces)); + } + sb.append('\n'); + if (key == null) { + sb.append(TreeUtils.indent(nspaces)); + sb.append(""); + } else { + sb.append(Key.dumpString(key, nspaces)); + } + sb.append('\n'); + if (target == null) { + sb.append(TreeUtils.indent(nspaces)); + sb.append(""); + } else { + sb.append(target.dumpString(nspaces, true)); + } + sb.append('\n'); + sb.append(TreeUtils.indent(nspaces)); + sb.append(""); + sb.append(""); + sb.append(""); + return sb.toString(); + } + + @Override + public String toString() { + return dumpString(0, false); + } +} diff --git a/src/com/sleepycat/je/tree/CountEstimator.java b/src/com/sleepycat/je/tree/CountEstimator.java new file mode 100644 index 0000000..f143620 --- /dev/null +++ b/src/com/sleepycat/je/tree/CountEstimator.java @@ -0,0 +1,369 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.txn.LockType; + +/** + * Estimates the number of non-deleted BIN entries between two end points, + * using information returned in TrackingInfo from Tree.getParentINForChildIN. + * Used for estimating dup counts, e.g., for join query optimization. Accuracy + * is limited by the number of samples taken to compute the average number of + * entries at each level. Currently only two samples (at the end points) are + * taken, and because the tree is not balanced making the number of entries + * highly variable, the count can easily be off by a factor of two. + */ +public class CountEstimator { + + /* If exceeded, there must be a bug of some kind. */ + private static final int MAX_RETRIES_AFTER_SPLIT = 100; + + /** + * Returns an estimate of the number of records between two end points + * specified by begin/end cursor positions. + */ + public static long count(DatabaseImpl dbImpl, + CursorImpl beginCursor, + boolean beginInclusive, + CursorImpl endCursor, + boolean endInclusive) { + + /* If the two cursors are at the same position, return 1. */ + if (beginCursor.isOnSamePosition(endCursor)) { + return 1; + } + + /* Compute estimate for cursors at different positions. */ + final CountEstimator estimator = new CountEstimator(dbImpl); + + return estimator.count(beginCursor, endCursor) + + (beginInclusive ? 1 : 0) + + (endInclusive ? 1 : 0); + } + + private final DatabaseImpl dbImpl; + + private List beginStack; + private List endStack; + + private final List> avgEntriesStacks = + new ArrayList>(); + + private int levelCount; + private int rootLevel; + private float[] avgEntries; + + private CountEstimator(DatabaseImpl dbImpl) { + this.dbImpl = dbImpl; + } + + private long count(CursorImpl beginCursor, CursorImpl endCursor) { + + for (int numRetries = 0;; numRetries += 1) { + + /* + * If we have retried too many times, give up. This is probably + * due to a bug of some kind, and we shouldn't loop forever. + */ + if (numRetries > MAX_RETRIES_AFTER_SPLIT) { + throw EnvironmentFailureException.unexpectedState(); + } + + /* + * Set up the initial info for the computation. Retry if a split + * occurs. + */ + beginStack = beginCursor.getAncestorPath(); + if (beginStack == null) { + continue; + } + endStack = endCursor.getAncestorPath(); + if (endStack == null) { + continue; + } + + if (!findCommonAncestor()) { + continue; + } + + /* Get the the average entries from the two end points. */ + getAvgEntries(beginCursor, endCursor); + + /* + * Return the count. FUTURE: Taking more samples between the two + * end points would increase accuracy. + */ + return countTotal(); + } + } + + /** + * Find the common ancestor node for the two end points, which we'll call + * the root level. If no common ancestor can be found, return false to + * restart the process, because a split must have occurred in between + * getting the two stacks for the end points. + */ + private boolean findCommonAncestor() { + + levelCount = beginStack.size(); + if (levelCount != endStack.size()) { + /* Must have been a root split. */ + return false; + } + + rootLevel = -1; + + for (int level = levelCount - 1; level >= 0; level -= 1) { + + if (beginStack.get(level).nodeId == endStack.get(level).nodeId) { + rootLevel = level; + break; + } + } + if (rootLevel < 0) { + /* Must have been a split. */ + return false; + } + return true; + } + + /** + * This method starts with a preliminary average using just the two end + * points. + */ + private void getAvgEntries(CursorImpl beginCursor, CursorImpl endCursor) { + + avgEntriesStacks.clear(); + + if (!addAvgEntriesSample(beginStack)) { + sampleNextBIN(beginCursor, true /*moveForward*/); + } + + if (!addAvgEntriesSample(endStack)) { + sampleNextBIN(endCursor, false /*moveForward*/); + } + + computeAvgEntries(); + } + + /** + * FUTURE: use internal skip method instead, saving a btree lookup. + */ + private void sampleNextBIN( + CursorImpl beginOrEndCursor, + boolean moveForward) { + + final CursorImpl cursor = + beginOrEndCursor.cloneCursor(true /*samePosition*/); + + try { + cursor.latchBIN(); + if (moveForward) { + cursor.setOnLastSlot(); + } else { + cursor.setOnFirstSlot(); + } + + final OperationResult result = cursor.getNext( + null /*foundKey*/, null /*foundData*/, + LockType.NONE, false /*dirtyReadAll*/, + moveForward, true /*alreadyLatched*/, + null /*rangeConstraint*/); + + if (result != null) { + final List stack = cursor.getAncestorPath(); + if (stack != null) { + addAvgEntriesSample(stack); + } + } + } finally { + cursor.close(); + } + } + + /** + * At each level we compute the average number of entries. This will be + * used as a multipler to estimate the number of nodes for any IN at that + * level. + */ + private void computeAvgEntries() { + + avgEntries = new float[levelCount]; + + avgEntries[levelCount - 1] = 1.0F; + + if (avgEntriesStacks.size() == 0) { + return; + } + + for (int level = levelCount - 1; level > 0; level -= 1) { + long totalEntries = 0; + for (List stack : avgEntriesStacks) { + totalEntries += stack.get(level).entries; + } + final float avg = totalEntries / ((float) avgEntriesStacks.size()); + avgEntries[level - 1] = avg * avgEntries[level]; + } + } + + private boolean addAvgEntriesSample(List stack) { + if (isFirstBIN(stack) || isLastBIN(stack)) { + return false; + } + avgEntriesStacks.add(stack); + return true; + } + + private boolean isFirstBIN(List stack) { + for (int i = 0; i < stack.size() - 1; i += 1) { + final TrackingInfo info = stack.get(i); + if (info.index != 0) { + return false; + } + } + return true; + } + + private boolean isLastBIN(List stack) { + for (int i = 0; i < stack.size() - 1; i += 1) { + final TrackingInfo info = stack.get(i); + if (info.index != info.entries - 1) { + return false; + } + } + return true; + } + + /** + * Count the total for each node that is between the two end points. + */ + private long countTotal() { + long total = 0; + + /* Add nodes between the end points at the root level. */ + final int rootIndex1 = beginStack.get(rootLevel).index + 1; + final int rootIndex2 = endStack.get(rootLevel).index; + if (rootIndex2 > rootIndex1) { + total += Math.round((rootIndex2 - rootIndex1) * + avgEntries[rootLevel]); + } + + /* Add nodes under the end points at lower levels. */ + for (int level = rootLevel + 1; level < levelCount; level += 1) { + + /* Add nodes under left end point that are to its right. */ + final int leftIndex = beginStack.get(level).index; + final int lastIndex = beginStack.get(level).entries - 1; + if (lastIndex > leftIndex) { + total += Math.round((lastIndex - leftIndex) * + avgEntries[level]); + } + + /* Add nodes under right end point that are to its left. */ + final int rightIndex = endStack.get(level).index; + final int firstIndex = 0; + if (rightIndex > firstIndex) { + total += Math.round((rightIndex - firstIndex) * + avgEntries[level]); + } + } + + return total; + } + + /* For future use, if getKeyRatios is exposed in the API. */ + static class KeyRatios { + final double less; + final double equal; + final double greater; + + KeyRatios(double less, double equal, double greater) { + this.less = less; + this.equal = equal; + this.greater = greater; + } + + @Override + public String toString() { + return "less: " + less + + " equal: " + equal + + " greater: " + greater; + } + } + + /* + * For future use, if getKeyRatios is exposed in the API. Be sure to test + * boundary conditions when index is 0 or nEntries. + * + * Algorithm copied from __bam_key_range in BDB btree/bt_stat.c. + */ + static KeyRatios getKeyRatios(List infoByLevel, + boolean exact) { + double factor = 1.0; + double less = 0.0; + double greater = 0.0; + + /* + * At each level we know that INs greater than index contain keys + * greater than what we are looking for and those less than index are + * less than. The one pointed to by index may have some less, some + * greater or even equal. If index is equal to the number of entries, + * then the key is out of range and everything is less. + */ + for (final TrackingInfo info : infoByLevel) { + if (info.index == 0) { + greater += (factor * (info.entries - 1)) / info.entries; + } else if (info.index == info.entries) { + less += factor; + } else { + less += (factor * info.index) / info.entries; + greater += (factor * ((info.entries - info.index) - 1)) / + info.entries; + } + + /* Factor at next level down is 1/n'th the amount at this level. */ + factor /= info.entries; + + /* + System.out.println("factor: " + factor + + " less: " + less + + " greater: " + greater); + */ + } + + /* + * If there was an exact match then assign the 1/n'th factor to the key + * itself. Otherwise that factor belongs to those greater than the + * key, unless the key was out of range. + */ + final double equal; + if (exact) { + equal = factor; + } else { + if (less != 1.0) { + greater += factor; + } + equal = 0.0; + } + + return new KeyRatios(less, equal, greater); + } +} diff --git a/src/com/sleepycat/je/tree/CursorsExistException.java b/src/com/sleepycat/je/tree/CursorsExistException.java new file mode 100644 index 0000000..51bea42 --- /dev/null +++ b/src/com/sleepycat/je/tree/CursorsExistException.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Error to indicate that a bottom level BIN has cursors on it during a + * delete subtree operation. + */ +public class CursorsExistException extends Exception { + + private static final long serialVersionUID = 1051296202L; + + /* + * Throw this static instance, in order to reduce the cost of + * fill in the stack trace. + */ + public static final CursorsExistException CURSORS_EXIST = + new CursorsExistException(); + + /* Make the constructor public for serializability testing. */ + public CursorsExistException() { + } +} diff --git a/src/com/sleepycat/je/tree/DeltaInfo.java b/src/com/sleepycat/je/tree/DeltaInfo.java new file mode 100644 index 0000000..3c84c2a --- /dev/null +++ b/src/com/sleepycat/je/tree/DeltaInfo.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; + +/** + * DeltaInfo holds the delta for one BIN entry in a partial BIN log entry. + * The data here is all that we need to update a BIN to its proper state. + */ +public class DeltaInfo implements Loggable { + private byte[] key; + private long lsn; + private byte state; + + DeltaInfo(byte[] key, long lsn, byte state) { + this.key = key; + this.lsn = lsn; + this.state = state; + } + + /** + * For reading from the log only. + * + * Is public for Sizeof. + */ + public DeltaInfo() { + lsn = DbLsn.NULL_LSN; + } + + @Override + public int getLogSize() { + return + LogUtils.getByteArrayLogSize(key) + + LogUtils.getPackedLongLogSize(lsn) + // LSN + 1; // state + } + + @Override + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writeByteArray(logBuffer, key); + LogUtils.writePackedLong(logBuffer, lsn); + logBuffer.put(state); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + key = LogUtils.readByteArray(itemBuffer, unpacked); + lsn = LogUtils.readLong(itemBuffer, unpacked); + state = itemBuffer.get(); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(Key.dumpString(key, 0)); + sb.append(DbLsn.toString(lsn)); + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + /** + * Always return false, this item should never be compared. + */ + @Override + public boolean logicalEquals(Loggable other) { + return false; + } + + byte[] getKey() { + return key; + } + + byte getState() { + return state; + } + + boolean isKnownDeleted() { + return IN.isStateKnownDeleted(state); + } + + long getLsn() { + return lsn; + } + + /** + * Returns the number of bytes occupied by this object. Deltas are not + * stored in the Btree, but they are budgeted during a SortedLSNTreeWalker + * run. + */ + long getMemorySize() { + return MemoryBudget.DELTAINFO_OVERHEAD + + MemoryBudget.byteArraySize(key.length); + } +} diff --git a/src/com/sleepycat/je/tree/EntryStates.java b/src/com/sleepycat/je/tree/EntryStates.java new file mode 100644 index 0000000..a164de3 --- /dev/null +++ b/src/com/sleepycat/je/tree/EntryStates.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +public class EntryStates { + + /* + * If we run out of bits, the two OFFHEAP bits could re-use any of the bit + * values, since the former only appear on level 2 INs and the latter only + * on BINs. + */ + static final byte KNOWN_DELETED_BIT = 0x1; + static final byte CLEAR_KNOWN_DELETED_BIT = ~0x1; + static final byte DIRTY_BIT = 0x2; + static final byte CLEAR_DIRTY_BIT = ~0x2; + static final byte OFFHEAP_DIRTY_BIT = 0x4; + static final byte CLEAR_OFFHEAP_DIRTY_BIT = ~0x4; + static final byte PENDING_DELETED_BIT = 0x8; + static final byte CLEAR_PENDING_DELETED_BIT = ~0x8; + static final byte EMBEDDED_LN_BIT = 0x10; + static final byte CLEAR_EMBEDDED_LN_BIT = ~0x10; + static final byte NO_DATA_LN_BIT = 0x20; + static final byte CLEAR_NO_DATA_LN_BIT = ~0x20; + static final byte OFFHEAP_PRI2_BIT = 0x40; + static final byte CLEAR_OFFHEAP_PRI2_BIT = ~0x40; + + static final byte TRANSIENT_BITS = OFFHEAP_DIRTY_BIT | OFFHEAP_PRI2_BIT; + static final byte CLEAR_TRANSIENT_BITS = ~TRANSIENT_BITS; +} diff --git a/src/com/sleepycat/je/tree/FileSummaryLN.java b/src/com/sleepycat/je/tree/FileSummaryLN.java new file mode 100644 index 0000000..4721528 --- /dev/null +++ b/src/com/sleepycat/je/tree/FileSummaryLN.java @@ -0,0 +1,480 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.cleaner.FileSummary; +import com.sleepycat.je.cleaner.PackedOffsets; +import com.sleepycat.je.cleaner.TrackedFileSummary; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.utilint.StringUtils; + +/** + * A FileSummaryLN represents a Leaf Node in the UtilizationProfile database. + * + *

        The contents of the FileSummaryLN are not fixed until the moment at which + * the LN is added to the log. A base summary object contains the summary last + * added to the log. A tracked summary object contains live summary info being + * updated in real time. The tracked summary is added to the base summary just + * before logging it, and then the tracked summary is reset. This ensures that + * the logged summary will accurately reflect the totals calculated at the + * point in the log where the LN is added.

        + * + *

        This is all done in the writeToLog method, which operates under the log + * write latch. All utilization tracking must be done under the log write + * latch.

        + * + *

        In record version 1, obsolete offset tracking was added and multiple + * records are stored for a single file rather than a single record. Each + * record contains the offsets that were tracked since the last record was + * written. + * + *

        The key is 8 bytes: 4 bytes for the file number followed by 4 bytes for + * the sequence number. The lowest valued key for a given file contains the + * most recent summary information, while to get a complete list of obsolete + * offsets all records for the file must be read. A range search using just + * the first 4 bytes can be used to find the most recent record -- this is + * possible because the sequence number values are decreasing over time for a + * given file. Here are example keys for three summary records in file 1:

        + * + *
        + * (file=1, sequence=Integer.MAX_VALUE - 300)
        + * (file=1, sequence=Integer.MAX_VALUE - 200)
        + * (file=1, sequence=Integer.MAX_VALUE - 100)
        + * 
        + * + *

        The sequence number is the number of obsolete entries counted so far, + * subtracted from Integer.MAX_VALUE to cause the latest written record to have + * the lowest key.

        + * + *

        Log version information

        + *

        Version 0: Keys are old format strings. No obsolete detail is + * present.

        + *

        Version 1: Keys are two 4 byte integers: {file, sequence}. Obsolete + * detail is present. Some offsets may be invalid if RMW was used.

        + *

        Version 2: The RMW problem with invalid offsets was corrected. There is + * no data format change; all versions of JE 2.0.x can read version 1.

        + * + * @see com.sleepycat.je.cleaner.UtilizationProfile + */ +public final class FileSummaryLN extends LN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + private int extraMarshaledMemorySize; + private final FileSummary baseSummary; + private TrackedFileSummary trackedSummary; + private PackedOffsets obsoleteOffsets; + private boolean needOffsets; + private int entryVersion; + + /** + * Creates a new LN with a given base summary. + */ + public FileSummaryLN(FileSummary baseSummary) { + super(new byte[0]); + assert baseSummary != null; + this.baseSummary = baseSummary; + obsoleteOffsets = new PackedOffsets(); + entryVersion = -1; + } + + /** + * Creates an empty LN to be filled in from the log. + */ + public FileSummaryLN() { + baseSummary = new FileSummary(); + obsoleteOffsets = new PackedOffsets(); + } + + /** + * Creates a deleted FileSummaryLN. + * + * @param deletedMarker makes this constructor signature unique, the value + * passed doesn't matter. + */ + private FileSummaryLN(boolean deletedMarker) { + super((byte[]) null); + baseSummary = new FileSummary(); + obsoleteOffsets = new PackedOffsets(); + } + + /** + * Creates a deleted FileSummaryLN. + */ + public static LN makeDeletedLN() { + return new FileSummaryLN(true /*deletedMarker*/); + } + + /** + * Sets the live summary object that will be added to the base summary at + * the time the LN is logged. + */ + public void setTrackedSummary(TrackedFileSummary trackedSummary) { + this.trackedSummary = trackedSummary; + needOffsets = true; + } + + /** + * Returns the tracked summary, or null if setTrackedSummary was not + * called. + */ + public TrackedFileSummary getTrackedSummary() { + return trackedSummary; + } + + /** + * Returns the base summary for the file that is stored in the LN. + */ + public FileSummary getBaseSummary() { + return baseSummary; + } + + /** + * Returns the obsolete offsets for the file. + */ + public PackedOffsets getObsoleteOffsets() { + return obsoleteOffsets; + } + + /** + * Returns true if the given key for this LN is a String file number key. + * For the old version of the LN there will be a single record per file. + * + * If this is a version 0 log entry, the key is a string. However, such an + * LN may be migrated by the cleaner, in which case the version will be 1 + * or greater [#13061]. In the latter case, we can distinguish a string + * key by: + * + * 1) If the key is not 8 bytes long, it has to be a string key. + * + * 2) If the key is 8 bytes long, but bytes[4] is ascii "0" to "9", then it + * must be a string key. bytes[4] to bytes[7] are a sequence number that + * is the number of log entries counted. For this number to be greater + * than 0x30000000, the binary value of 4 digits starting with ascii "0", + * over 400 million log entries would have to occur in a single file; this + * should never happen. + * + * Note that having to rely on method (2) is unlikely. A string key will + * only be 8 bytes if the file number reach 8 decimal digits (10,000,000 to + * 99,999,999). This is a very large file number and unlikely to have + * occurred using JE 1.7.1 or earlier. + * + * In summary, the only time the algorithm here could fail is if there were + * more than 400 million log entries per file, and more than 10 million + * were written with JE 1.7.1 or earlier. + */ + public static boolean hasStringKey(byte[] bytes) { + + if (bytes.length != 8) { + return true; + } else { + return (bytes[4] >= '0' && bytes[4] <= '9'); + } + } + + /** + * Convert a FileSummaryLN key from a byte array to a long. The file + * number is the first 4 bytes of the key. + */ + public static long getFileNumber(byte[] bytes) { + + if (hasStringKey(bytes)) { + return Long.valueOf(StringUtils.fromUTF8(bytes)).longValue(); + } else { + ByteBuffer buf = ByteBuffer.wrap(bytes); + return LogUtils.readIntMSB(buf) & 0xFFFFFFFFL; + } + } + + /** + * Get the sequence number from the byte array. The sequence number is the + * last 4 bytes of the key. + */ + private static long getSequence(byte[] bytes) { + if (hasStringKey(bytes)) { + return 0; + } else { + ByteBuffer buf = ByteBuffer.wrap(bytes); + LogUtils.readIntMSB(buf); + return + (Integer.MAX_VALUE - LogUtils.readIntMSB(buf)) & 0xFFFFFFFFL; + } + } + + /** + * Returns the first 4 bytes of the key for the given file number. This + * can be used to do a range search to find the first LN for the file. + */ + public static byte[] makePartialKey(long fileNum) { + + byte[] bytes = new byte[4]; + ByteBuffer buf = ByteBuffer.wrap(bytes); + + LogUtils.writeIntMSB(buf, (int) fileNum); + + return bytes; + } + + /** + * Returns the full two-part key for a given file number and unique + * sequence. This can be used to insert a new LN. + * + * @param sequence is a unique identifier for the LN for the given file, + * and must be greater than the last sequence. + */ + public static byte[] makeFullKey(long fileNum, int sequence) { + + assert sequence >= 0; + + byte[] bytes = new byte[8]; + ByteBuffer buf = ByteBuffer.wrap(bytes); + + /* + * The sequence is subtracted from MAX_VALUE so that increasing values + * will be sorted first. This allows a simple range search to find the + * most recent value. + */ + LogUtils.writeIntMSB(buf, (int) fileNum); + LogUtils.writeIntMSB(buf, Integer.MAX_VALUE - sequence); + + return bytes; + } + + /** + * Initialize a node that has been faulted in from the log. If this FSLN + * contains version 1 offsets that can be incorrect when RMW was used, and + * if je.cleaner.rmwFix is enabled, discard the offsets. [#13158] + */ + @Override + public void postFetchInit(DatabaseImpl db, long sourceLsn) + throws DatabaseException { + + super.postFetchInit(db, sourceLsn); + + if (entryVersion == 1 && + db.getEnv().getCleaner().isRMWFixEnabled()) { + obsoleteOffsets = new PackedOffsets(); + } + } + + /* + * Dumping + */ + + @Override + public String toString() { + return dumpString(0, true); + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + sb.append(super.dumpString(nSpaces, dumpTags)); + sb.append('\n'); + if (!isDeleted()) { + sb.append(baseSummary.toString()); + sb.append(obsoleteOffsets.toString()); + } + return sb.toString(); + } + + /** + * Dump additional fields. Done this way so the additional info can + * be within the XML tags defining the dumped log entry. + */ + @Override + protected void dumpLogAdditional(StringBuilder sb, boolean verbose) { + if (!isDeleted()) { + baseSummary.dumpLog(sb, true); + if (verbose) { + obsoleteOffsets.dumpLog(sb, true); + } + } + } + + /* + * Logging + */ + + /** + * Return the correct log type for a FileSummaryLN. + * + * Note: FileSummaryLN will never be transactional. + */ + @Override + protected LogEntryType getLogType(boolean isInsert, + boolean isTransactional, + DatabaseImpl db) { + assert !isTransactional : "Txnl access to UP db not allowed"; + + return LogEntryType.LOG_FILESUMMARYLN; + } + + /** + * This log entry type is configured to perform marshaling (getLogSize and + * writeToLog) under the write log mutex. Otherwise, the size could change + * in between calls to these two methods as the result of utilizaton + * tracking. + */ + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + int size = super.getLogSize(logVersion, forReplication); + if (!isDeleted()) { + size += baseSummary.getLogSize(); + getOffsets(); + size += obsoleteOffsets.getLogSize(); + } + return size; + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + + /* + * Add the tracked (live) summary to the base summary before writing it + * to the log, and reset the tracked summary. When deleting the LN, + * the tracked summary is cleared explicitly and will be null. + */ + if (trackedSummary != null && !isDeleted()) { + baseSummary.add(trackedSummary); + getOffsets(); + /* Reset the totals to zero and clear the tracked offsets. */ + trackedSummary.reset(); + } + + super.writeToLog(logBuffer, logVersion, forReplication); + + if (!isDeleted()) { + baseSummary.writeToLog(logBuffer); + obsoleteOffsets.writeToLog(logBuffer); + } + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + this.entryVersion = entryVersion; + + super.readFromLog(itemBuffer, entryVersion); + + if (!isDeleted()) { + baseSummary.readFromLog(itemBuffer, entryVersion); + if (entryVersion > 0) { + obsoleteOffsets.readFromLog(itemBuffer, entryVersion); + } + } + } + + /** + * Should never be replicated. + */ + @Override + public boolean logicalEquals(Loggable other) { + return false; + } + + /** + * If tracked offsets may be present, get them so they are ready to be + * written to the log. + */ + private void getOffsets() { + assert !isDeleted(); + if (needOffsets) { + long[] offsets = trackedSummary.getObsoleteOffsets(); + if (offsets != null) { + int oldSize = obsoleteOffsets.getExtraMemorySize(); + obsoleteOffsets.pack(offsets); + int newSize = obsoleteOffsets.getExtraMemorySize(); + extraMarshaledMemorySize = newSize - oldSize; + } + needOffsets = false; + } + } + + /** + * Overrides this method to add space occupied by this object's fields. + */ + @Override + public long getMemorySizeIncludedByParent() { + return super.getMemorySizeIncludedByParent() + + (MemoryBudget.FILESUMMARYLN_OVERHEAD - + MemoryBudget.LN_OVERHEAD) + + obsoleteOffsets.getExtraMemorySize(); + } + + /** + * Clear out the obsoleteOffsets to save memory when the LN is deleted. + */ + @Override + void makeDeleted() { + super.makeDeleted(); + obsoleteOffsets = new PackedOffsets(); + } + + /** + * Adds the extra memory used by obsoleteOffsets to the parent BIN memory + * size. Must be called after LN is inserted into the BIN and logged, + * while the cursor is still positioned on the inserted LN. The BIN must + * be latched. [#17462] + * + *

        The obsoleteOffsets memory size is not intially budgeted in the usual + * way because PackedOffsets.pack (which changes the memory size) is called + * during marshalling (see getOffset). This amount is not counted in the + * parent IN size in the usual way, because LN logging / marshalling occurs + * after the LN is inserted in the BIN and its memory size has been counted + * (see CursorImpl.putInternal).

        + * + *

        Note that the tree memory usage cannot be updated directly in + * getOffsets because the tree memory usage must always be the sum of all + * IN sizes, and it is reset to this sum each checkpoint.

        + */ + @Override + public void addExtraMarshaledMemorySize(BIN parentBIN) { + if (extraMarshaledMemorySize != 0) { + assert trackedSummary != null; /* Must be set during the insert. */ + assert parentBIN.isLatchExclusiveOwner(); + parentBIN.updateMemorySize(0, extraMarshaledMemorySize); + extraMarshaledMemorySize = 0; + } + } + + @Override + public void dumpKey(StringBuilder sb, byte[] key) { + sb.append(""); + super.dumpKey(sb, key); + } +} diff --git a/src/com/sleepycat/je/tree/IN.java b/src/com/sleepycat/je/tree/IN.java new file mode 100644 index 0000000..0b92fd8 --- /dev/null +++ b/src/com/sleepycat/je/tree/IN.java @@ -0,0 +1,6495 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static com.sleepycat.je.EnvironmentFailureException.unexpectedState; + +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Comparator; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.cleaner.PackedObsoleteInfo; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.latch.LatchContext; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.latch.LatchTable; +import com.sleepycat.je.latch.SharedLatch; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogParams; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.dupConvert.DBIN; +import com.sleepycat.je.tree.dupConvert.DIN; +import com.sleepycat.je.tree.dupConvert.DupConvert; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.SizeofMarker; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * An IN represents an Internal Node in the JE tree. + * + * Explanation of KD (KnownDeleted) and PD (PendingDelete) entry flags + * =================================================================== + * + * PD: set for all LN entries that are deleted, even before the LN is + * committed. Is used as an authoritative (transactionally correct) indication + * that an LN is deleted. PD will be cleared if the txn for the deleted LN is + * aborted. + * + * KD: set under special conditions for entries containing LNs which are known + * to be obsolete. Not used for entries in an active/uncommitted transaction. + * + * First notice that IN.fetchLN will allow a FileNotFoundException when the + * PD or KD flag is set on the entry. And it will allow a NULL_LSN when the KD + * flag is set. + * + * KD was implemented first, and was originally used when the cleaner attempts + * to migrate an LN and discovers it is deleted (see Cleaner.migrateLN). We + * need KD because the INCompressor may not have run, and may not have + * compressed the BIN. There's the danger that we'll try to fetch that entry, + * and that the file was deleted by the cleaner. + * + * KD was used more recently when an unexpected exception occurs while logging + * an LN, after inserting the entry. Rather than delete the entry to clean up, + * we mark the entry KD so it won't cause a fetch error later. In this case + * the entry LSN is NULL_LSN. See Tree.insertNewSlot. + * + * PD is closely related to the first use of KD above (for cleaned deleted LNs) + * and came about because of a cleaner optimization we make. The cleaner + * considers all deleted LN log entries to be obsolete, without doing a tree + * lookup, and without any record of an obsolete offset. This makes the cost + * of cleaning of deleted LNs very low. For example, if the log looks like + * this: + * + * 100 LNA + * 200 delete of LNA + * + * then LSN 200 will be considered obsolete when this file is processed by the + * cleaner. After all, only two things can happen: (1) the txn commits, and we + * don't need LSN 200, because we can wipe this LN out of the tree, or (2) the + * txn aborts, and we don't need LSN 200, because we are going to revert to LSN + * 100/LNA. + * + * We set PD for the entry of a deleted LN at the time of the operation, and we + * clear PD if the transaction aborts. There is no real danger that this log + * entry will be processed by the cleaner before it's committed, because + * cleaning can only happen after the first active LSN. + * + * Just as in the first use of KD above, setting PD is necessary to avoid a + * fetch error, when the file is deleted by the cleaner but the entry + * containing the deleted LN has not been deleted by the INCompressor. + * + * PD is also set in replication rollback, when LNs are marked as + * invisible. + * + * When LSN locking was implemented (see CursorImpl.lockLN), the PD flag took + * on additional meaning. PD is used to determine whether an LN is deleted + * without fetching it, and therefore is relied on to be transactionally + * correct. + * + * In addition to the setting and use of the KD/PD flags described above, the + * situation is complicated by the fact that we must restore the state of these + * flags during abort, recovery, and set them properly during slot reuse. + * + * We have been meaning to consider whether PD and KD can be consolidated into + * one flag: simply the Deleted flag. The Deleted flag would be set in the + * same way as PD is currently set, as well as the second use of KD described + * above (when the LSN is NULL_LSN after an insertion error). The use of KD + * and PD for invisible entries and recovery rollback should also be + * considered. + * + * If we consolidate the two flags and set the Deleted flag during a delete + * operation (like PD), we'll have to remove optimizations (in CursorImpl for + * example) that consider a slot deleted when KD is set. Since KD is rarely + * set currently, this shouldn't have a noticeable performance impact. + */ +public class IN extends Node implements Comparable, LatchContext { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + private static final String TRACE_SPLIT = "Split:"; + private static final String TRACE_DELETE = "Delete:"; + + private static final int BYTES_PER_LSN_ENTRY = 4; + public static final int MAX_FILE_OFFSET = 0xfffffe; + private static final int THREE_BYTE_NEGATIVE_ONE = 0xffffff; + + /** + * Used as the "empty rep" for the INLongRep offHeapBINIds field. + * + * minLength is 3 because BIN IDs are LRU list indexes. Initially 100k + * indexes are allocated and the largest values are used first. + * + * allowSparseRep is true because some workloads will only load BIN IDs for + * a subset of the BINs in the IN. + */ + private static final INLongRep.EmptyRep EMPTY_OFFHEAP_BIN_IDS = + new INLongRep.EmptyRep(3, true); + + /* + * Levels: + * The mapping tree has levels in the 0x20000 -> 0x2ffff number space. + * The main tree has levels in the 0x10000 -> 0x1ffff number space. + * The duplicate tree levels are in 0-> 0xffff number space. + */ + public static final int DBMAP_LEVEL = 0x20000; + public static final int MAIN_LEVEL = 0x10000; + public static final int LEVEL_MASK = 0x0ffff; + public static final int MIN_LEVEL = -1; + public static final int BIN_LEVEL = MAIN_LEVEL | 1; + + /* Used to indicate that an exact match was found in findEntry. */ + public static final int EXACT_MATCH = (1 << 16); + + /* Used to indicate that an insert was successful. */ + public static final int INSERT_SUCCESS = (1 << 17); + + /* + * A bit flag set in the return value of partialEviction() to indicate + * whether the IN is evictable or not. + */ + public static final long NON_EVICTABLE_IN = (1L << 62); + + /* + * Boolean properties of an IN, encoded as bits inside the flags + * data member. + */ + private static final int IN_DIRTY_BIT = 0x1; + private static final int IN_RECALC_TOGGLE_BIT = 0x2; + private static final int IN_IS_ROOT_BIT = 0x4; + private static final int IN_HAS_CACHED_CHILDREN_BIT = 0x8; + private static final int IN_PRI2_LRU_BIT = 0x10; + private static final int IN_DELTA_BIT = 0x20; + private static final int IN_FETCHED_COLD_BIT = 0x40; + private static final int IN_FETCHED_COLD_OFFHEAP_BIT = 0x80; + private static final int IN_RESIDENT_BIT = 0x100; + private static final int IN_PROHIBIT_NEXT_DELTA_BIT = 0x200; + private static final int IN_EXPIRATION_IN_HOURS = 0x400; + + /* Tracing for LRU-related ops */ + private static final boolean traceLRU = false; + private static final boolean traceDeltas = false; + private static final Level traceLevel = Level.INFO; + + DatabaseImpl databaseImpl; + + private int level; + + /* The unique id of this node. */ + long nodeId; + + /* Some bits are persistent and some are not, see serialize. */ + int flags; + + /* + * The identifier key is a key that can be used used to search for this IN. + * Initially it is the key of the zeroth slot, but insertions prior to slot + * zero make this no longer true. It is always equal to some key in the + * IN, and therefore it is changed by BIN.compress when removing slots. + */ + private byte[] identifierKey; + + int nEntries; + + byte[] entryStates; + + /* + * entryKeys contains the keys in their entirety if key prefixing is not + * being used. If prefixing is enabled, then keyPrefix contains the prefix + * and entryKeys contains the suffixes. Records with small enough data + * (smaller than the value je.tree.maxEmbeddedLN param) are stored in + * their entirity (both key (or key suffix) and data) inside BINs. This is + * done by combining the record key and data as a two-part key (see the + * dbi/DupKeyData class) and storing the resulting array in entryKeys. + * A special case is when the record to be embedded has no data. Then, + * the two-part key format is not used, but instead the NO_DATA_LN_BIT + * is turned on in the slot's state. This saves the space overhead of + * using the two-part key format. + */ + INKeyRep entryKeys; + byte[] keyPrefix; + + /* + * The following entryLsnXXX fields are used for storing LSNs. There are + * two possible representations: a byte array based rep, and a long array + * based one. For compactness, the byte array rep is used initially. A + * single byte[] that uses four bytes per LSN is used. The baseFileNumber + * field contains the lowest file number of any LSN in the array. Then for + * each entry (four bytes each), the first byte contains the offset from + * the baseFileNumber of that LSN's file number. The remaining three bytes + * contain the file offset portion of the LSN. Three bytes will hold a + * maximum offset of 16,777,214 (0xfffffe), so with the default JE log file + * size of 10,000,000 bytes this works well. + * + * If either (1) the difference in file numbers exceeds 127 + * (Byte.MAX_VALUE) or (2) the file offset is greater than 16,777,214, then + * the byte[] based rep mutates to a long[] based rep. + * + * In the byte[] rep, DbLsn.NULL_LSN is represented by setting the file + * offset bytes for a given entry to -1 (0xffffff). + * + * Note: A compact representation will be changed to the non-compact one, + * if needed, but in the current implementation, the reverse mutation + * (from long to compact) never takes place. + */ + long baseFileNumber; + byte[] entryLsnByteArray; + long[] entryLsnLongArray; + public static boolean disableCompactLsns; // DbCacheSize only + + /* + * The children of this IN. Only the ones that are actually in the cache + * have non-null entries. Specialized sparse array represents are used to + * represent the entries. The representation can mutate as modifications + * are made to it. + */ + INTargetRep entryTargets; + + /* + * In a level 2 IN, the LRU IDs of the child BINs. + */ + private INLongRep offHeapBINIds = EMPTY_OFFHEAP_BIN_IDS; + + long inMemorySize; + + /* + * accumluted memory budget delta. Once this exceeds + * MemoryBudget.ACCUMULATED_LIMIT we inform the MemoryBudget that a change + * has occurred. See SR 12273. + */ + private int accumulatedDelta = 0; + + /* + * Max allowable accumulation of memory budget changes before MemoryBudget + * should be updated. This allows for consolidating multiple calls to + * updateXXXMemoryBudget() into one call. Not declared final so that the + * unit tests can modify it. See SR 12273. + */ + public static final int ACCUMULATED_LIMIT_DEFAULT = 1000; + public static int ACCUMULATED_LIMIT = ACCUMULATED_LIMIT_DEFAULT; + + /** + * References to the next and previous nodes in an LRU list. If the node + * is not in any LRUList, both of these will be null. If the node is at + * the front/back of an LRUList, prevLRUNode/nextLRUNode will point to + * the node itself. + */ + private IN nextLRUNode = null; + private IN prevLRUNode = null; + + /* + * Let L be the most recently written logrec for this IN instance. + * (a) If this is a UIN, lastFullVersion is the lsn of L. + * (b) If this is a BIN instance and L is a full-version logrec, + * lastFullVersion is the lsn of L. + * (c) If this is a BIN instance and L is a delta logrec, lastFullVersion + * is the lsn of the most recently written full-version logrec for the + * same BIN. + * + * It is set in 2 cases: + * + * (a) after "this" is created via reading a logrec L, lastFullVersion is + * set to L's lsn, if L is a UIN or a full BIN. (this is done in + * IN.postFetch/RecoveryInit(), via IN.setLastLoggedLsn()). If L is a BIN + * delta, lastFullVersion is set by BINDeltaLogEntry.readEntry() to + * L.prevFullLsn. + * + * (b) After logging a UIN or a full-BIN logrec, it is set to the LSN of + * the logrec written. This is done in IN.afterLog(). + * + * Notice that this is a persistent field, but except from case (c), when + * reading a logrec L, it is set not to the value found in L, but to the + * lsn of L. This is why its read/write is managed by the INLogEntry class + * rather than the IN readFromLog/writeFromLog methods. + */ + long lastFullVersion = DbLsn.NULL_LSN; + + /* + * BINs have a lastDeltaVersion data field as well, which is defined as + * follows: + * + * Let L be the most recently written logrec for this BIN instance. If + * L is a full-version logrec, lastDeltaVersion is NULL; otherwise it + * is the lsn of L. + * + * It is used for obsolete tracking. + * + * It is set in 2 cases: + * + * (a) after "this" is created via reading a logrec L, lastDeltaVersion + * is set to L's lsn, if L is a BIN-delta logrec, or to NULL if L is a + * full-BIN logrec (this is done in IN.postFetch/RecoveryInit(), via + * BIN.setLastLoggedLsn()). + * + * (b) After we write a logrec L for this BIN instance, lastDeltaVersion + * is set to NULL if L is a full-BIN logrec, or to L's lsn, if L is a + * BIN-delta logrec (this is done in BIN.afterLog()). + * + * Notice that this is a persistent field, but when reading a logrec L, + * it is set not to the value found in L, but to the lsn of L. This is why + * its read/write is managed by the INLogEntry class rather than the IN + * readFromLog/writeFromLog methods. + * + * private long lastDeltaVersion = DbLsn.NULL_LSN; + */ + + + /* + * A sequence of obsolete info that cannot be counted as obsolete until an + * ancestor IN is logged non-provisionally. + */ + private PackedObsoleteInfo provisionalObsolete; + + /* See convertDupKeys. */ + private boolean needDupKeyConversion; + + private int pinCount = 0; + + private SharedLatch latch; + + private IN parent; + + private TestHook fetchINHook; + + /** + * Create an empty IN, with no node ID, to be filled in from the log. + */ + public IN() { + init(null, Key.EMPTY_KEY, 0, 0); + } + + /** + * Create a new IN. + */ + public IN( + DatabaseImpl dbImpl, + byte[] identifierKey, + int capacity, + int level) { + + nodeId = dbImpl.getEnv().getNodeSequence().getNextLocalNodeId(); + + init(dbImpl, identifierKey, capacity, + generateLevel(dbImpl.getId(), level)); + + initMemorySize(); + } + + /** + * For Sizeof. + */ + public IN(@SuppressWarnings("unused") SizeofMarker marker) { + + /* + * Set all variable fields to null, since they are not part of the + * fixed overhead. + */ + entryTargets = null; + entryKeys = null; + keyPrefix = null; + entryLsnByteArray = null; + entryLsnLongArray = null; + entryStates = null; + + latch = LatchFactory.createSharedLatch( + LatchSupport.DUMMY_LATCH_CONTEXT, isAlwaysLatchedExclusively()); + + /* + * Use the latch to force it to grow to "runtime size". + */ + latch.acquireExclusive(); + latch.release(); + latch.acquireExclusive(); + latch.release(); + } + + /** + * Create a new IN. Need this because we can't call newInstance() without + * getting a 0 for nodeId. + */ + IN createNewInstance( + byte[] identifierKey, + int maxEntries, + int level) { + return new IN(databaseImpl, identifierKey, maxEntries, level); + } + + /** + * Initialize IN object. + */ + protected void init( + DatabaseImpl db, + @SuppressWarnings("hiding") + byte[] identifierKey, + int initialCapacity, + @SuppressWarnings("hiding") + int level) { + + setDatabase(db); + latch = LatchFactory.createSharedLatch( + this, isAlwaysLatchedExclusively()); + flags = 0; + nEntries = 0; + this.identifierKey = identifierKey; + entryTargets = INTargetRep.NONE; + entryKeys = new INKeyRep.Default(initialCapacity); + keyPrefix = null; + baseFileNumber = -1; + + /* + * Normally we start out with the compact LSN rep and then mutate to + * the long rep when needed. But for some purposes (DbCacheSize) we + * start out with the long rep and never use the compact rep. + */ + if (disableCompactLsns) { + entryLsnByteArray = null; + entryLsnLongArray = new long[initialCapacity]; + } else { + entryLsnByteArray = new byte[initialCapacity << 2]; + entryLsnLongArray = null; + } + + entryStates = new byte[initialCapacity]; + this.level = level; + } + + @Override + public final boolean isIN() { + return true; + } + + @Override + public final boolean isUpperIN() { + return !isBIN(); + } + + @Override + public final String getLatchName() { + return shortClassName() + getNodeId(); + } + + @Override + public final int getLatchTimeoutMs() { + return databaseImpl.getEnv().getLatchTimeoutMs(); + } + + @Override + public final LatchTable getLatchTable() { + return LatchSupport.btreeLatchTable; + } + + /* + * Return whether the shared latch for this kind of node should be of the + * "always exclusive" variety. Presently, only IN's are actually latched + * shared. BINs are latched exclusive only. + */ + boolean isAlwaysLatchedExclusively() { + return false; + } + + /** + * Latch this node if it is not latched by another thread. Update the LRU + * using the given cacheMode if the latch succeeds. + */ + public final boolean latchNoWait(CacheMode cacheMode) { + if (!latch.acquireExclusiveNoWait()) { + return false; + } + updateLRU(cacheMode); + return true; + } + + /** + * Latch this node exclusive and update the LRU using the given cacheMode. + */ + public void latch(CacheMode cacheMode) { + latch.acquireExclusive(); + updateLRU(cacheMode); + } + + /** + * Latch this node exclusive and update the LRU using the default cacheMode. + */ + public final void latch() { + latch(CacheMode.DEFAULT); + } + + /** + * Latch this node shared and update the LRU using the given cacheMode. + */ + @Override + public void latchShared(CacheMode cacheMode) { + latch.acquireShared(); + updateLRU(cacheMode); + } + + /** + * Latch this node shared and update the LRU using the default cacheMode. + */ + @Override + public final void latchShared() { + latchShared(CacheMode.DEFAULT); + } + + /** + * Latch this node exclusive and do not update the LRU or cause other + * related side effects. + * + * @param db is passed in order to initialize the database for an + * uninitialized node, which is necessary in order to latch it. + */ + public final void latchNoUpdateLRU(DatabaseImpl db) { + if (databaseImpl == null) { + databaseImpl = db; + } + latch.acquireExclusive(); + } + + /** + * Latch this node exclusive and do not update the LRU or cause other + * related side effects. + */ + public final void latchNoUpdateLRU() { + assert databaseImpl != null; + latch.acquireExclusive(); + } + + /** + * Release the latch on this node. + */ + @Override + public final void releaseLatch() { + latch.release(); + } + + /** + * Release the latch on this node if it is owned. + */ + public final void releaseLatchIfOwner() { + latch.releaseIfOwner(); + } + + /** + * @return true if this thread holds the IN's latch + */ + public final boolean isLatchOwner() { + return latch.isOwner(); + } + + public final boolean isLatchExclusiveOwner() { + return latch.isExclusiveOwner(); + } + + /* For unit testing. */ + public final int getLatchNWaiters() { + return latch.getNWaiters(); + } + + public final void updateLRU(CacheMode cacheMode) { + + if (!getInListResident()) { + return; + } + + switch (cacheMode) { + case UNCHANGED: + case MAKE_COLD: + break; + case DEFAULT: + case EVICT_LN: + case EVICT_BIN: + case KEEP_HOT: + setFetchedCold(false); + setFetchedColdOffHeap(false); + + if (isBIN() || !hasCachedChildrenFlag()) { + assert(isBIN() || !hasCachedChildren()); + getEvictor().moveBack(this); + } + break; + default: + assert false; + } + } + + /** + * This method should be used carefully. Unless this node and the parent + * are already known to be latched, call latchParent instead to access the + * parent safely. + */ + public IN getParent() { + return parent; + } + + public void setParent(IN in) { + assert in != null; + + /* + * Must hold EX-latch when changing a non-null parent. But when setting + * the parent initially (it is currently null), we assume it is being + * attached and no other threads have access to it. + */ + if (parent != null && !isLatchExclusiveOwner()) { + throw unexpectedState(); + } + + parent = in; + } + + /** + * Latches the parent exclusively, leaving this node latched. The parent + * must not already be latched. + * + * This node must be latched on entry and will be latched on exit. This + * node's latch may be released temporarily, in which case it will be + * ex-latched (since the parent is ex-latched, this isn't a drawback). + * + * Does not perform cache mode processing, since this node is already + * latched. + * + * @return the ex-latched parent, for which calling getKnownChildIndex with + * this node is guaranteed to succeed. + * + * @throws EnvironmentFailureException (fatal) if the parent latch is + * already held. + */ + public final IN latchParent() { + + assert latch.isOwner(); + assert !isRoot(); + assert getParent() != null; + + while (true) { + final IN p = getParent(); + + if (p.latch.acquireExclusiveNoWait()) { + return p; + } + + pin(); + try { + latch.release(); + p.latch.acquireExclusive(); + latch.acquireExclusive(); + } finally { + unpin(); + } + + if (getParent() == p) { + return p; + } + + p.latch.release(); + } + } + + /** + * Returns the index of the given child. Should only be called when the + * caller knows that the given child is resident. + */ + public int getKnownChildIndex(final Node child) { + + for (int i = 0; i < nEntries; i += 1) { + if (getTarget(i) == child) { + return i; + } + } + + throw unexpectedState(); + } + + public final synchronized void pin() { + assert(isLatchOwner()); + assert(pinCount >= 0); + ++pinCount; + } + + public final synchronized void unpin() { + assert(pinCount > 0); + --pinCount; + } + + public final synchronized boolean isPinned() { + assert(isLatchExclusiveOwner()); + assert(pinCount >= 0); + return pinCount > 0; + } + + /** + * Get the database for this IN. + */ + public final DatabaseImpl getDatabase() { + return databaseImpl; + } + + /** + * Set the database reference for this node. + */ + public final void setDatabase(DatabaseImpl db) { + databaseImpl = db; + } + + /* + * Get the database id for this node. + */ + public final DatabaseId getDatabaseId() { + return databaseImpl.getId(); + } + + @Override + public final EnvironmentImpl getEnvImplForFatalException() { + return databaseImpl.getEnv(); + } + + public final EnvironmentImpl getEnv() { + return databaseImpl.getEnv(); + } + + final Evictor getEvictor() { + return databaseImpl.getEnv().getEvictor(); + } + + final OffHeapCache getOffHeapCache() { + return databaseImpl.getEnv().getOffHeapCache(); + } + + /** + * Convenience method to return the database key comparator. + */ + public final Comparator getKeyComparator() { + return databaseImpl.getKeyComparator(); + } + + @Override + public final int getLevel() { + return level; + } + + public final int getNormalizedLevel() { + return level & LEVEL_MASK; + } + + private static int generateLevel(DatabaseId dbId, int newLevel) { + if (dbId.equals(DbTree.ID_DB_ID)) { + return newLevel | DBMAP_LEVEL; + } else { + return newLevel | MAIN_LEVEL; + } + } + + public final long getNodeId() { + return nodeId; + } + + /* For unit tests only. */ + final void setNodeId(long nid) { + nodeId = nid; + } + + /** + * We would like as even a hash distribution as possible so that the + * Evictor's LRU is as accurate as possible. ConcurrentHashMap takes the + * value returned by this method and runs its own hash algorithm on it. + * So a bit complement of the node ID is sufficient as the return value and + * is a little better than returning just the node ID. If we use a + * different container in the future that does not re-hash the return + * value, we should probably implement the Wang-Jenkins hash function here. + */ + @Override + public final int hashCode() { + return (int) ~getNodeId(); + } + + @Override + public final boolean equals(Object obj) { + if (!(obj instanceof IN)) { + return false; + } + IN in = (IN) obj; + return (this.getNodeId() == in.getNodeId()); + } + + /** + * Sort based on equality key. + */ + public final int compareTo(IN argIN) { + long argNodeId = argIN.getNodeId(); + long myNodeId = getNodeId(); + + if (myNodeId < argNodeId) { + return -1; + } else if (myNodeId > argNodeId) { + return 1; + } else { + return 0; + } + } + + public final boolean getDirty() { + return (flags & IN_DIRTY_BIT) != 0; + } + + public final void setDirty(boolean dirty) { + if (dirty) { + flags |= IN_DIRTY_BIT; + } else { + flags &= ~IN_DIRTY_BIT; + } + } + + @Override + public final boolean isBINDelta() { + assert(isUpperIN() || isLatchOwner()); + return (flags & IN_DELTA_BIT) != 0; + } + + /* + * This version of isBINDelta() takes a checkLatched param to allow + * for cases where it is ok to call the method without holding the + * BIN latch (e.g. in single-threaded tests, or when the BIN is not + * attached to the tree (and thus inaccessible from other threads)). + */ + @Override + public final boolean isBINDelta(boolean checkLatched) { + assert(!checkLatched || isUpperIN() || isLatchOwner()); + return (flags & IN_DELTA_BIT) != 0; + } + + final void setBINDelta(boolean delta) { + if (delta) { + flags |= IN_DELTA_BIT; + } else { + flags &= ~IN_DELTA_BIT; + } + } + + /** + * Indicates that the BIN was fetched from disk, or loaded from the + * off-heap cache, using CacheMode.UNCHANGED, and has not been accessed + * with another CacheMode. BINs in this state should be evicted from main + * cache as soon as they are no longer referenced by a cursor. If they were + * loaded from off-heap cache, they should be stored off-heap when they are + * evicted from main. The FetchedColdOffHeap flag indicates whether the + * BIN was loaded from off-heap cache. + */ + public final boolean getFetchedCold() { + return (flags & IN_FETCHED_COLD_BIT) != 0; + } + + /** @see #getFetchedCold() */ + public final void setFetchedCold(boolean val) { + if (val) { + flags |= IN_FETCHED_COLD_BIT; + } else { + flags &= ~IN_FETCHED_COLD_BIT; + } + } + + /** @see #getFetchedCold() */ + public final boolean getFetchedColdOffHeap() { + return (flags & IN_FETCHED_COLD_OFFHEAP_BIT) != 0; + } + + /** @see #getFetchedCold() */ + public final void setFetchedColdOffHeap(boolean val) { + if (val) { + flags |= IN_FETCHED_COLD_OFFHEAP_BIT; + } else { + flags &= ~IN_FETCHED_COLD_OFFHEAP_BIT; + } + } + + public final boolean getRecalcToggle() { + return (flags & IN_RECALC_TOGGLE_BIT) != 0; + } + + public final void setRecalcToggle(boolean toggle) { + if (toggle) { + flags |= IN_RECALC_TOGGLE_BIT; + } else { + flags &= ~IN_RECALC_TOGGLE_BIT; + } + } + + public final boolean isRoot() { + return (flags & IN_IS_ROOT_BIT) != 0; + } + + final void setIsRoot(boolean isRoot) { + setIsRootFlag(isRoot); + setDirty(true); + } + + private void setIsRootFlag(boolean isRoot) { + if (isRoot) { + flags |= IN_IS_ROOT_BIT; + } else { + flags &= ~IN_IS_ROOT_BIT; + } + } + + public final boolean hasCachedChildrenFlag() { + return (flags & IN_HAS_CACHED_CHILDREN_BIT) != 0; + } + + private void setHasCachedChildrenFlag(boolean value) { + if (value) { + flags |= IN_HAS_CACHED_CHILDREN_BIT; + } else { + flags &= ~IN_HAS_CACHED_CHILDREN_BIT; + } + } + + public final boolean isInPri2LRU() { + return (flags & IN_PRI2_LRU_BIT) != 0; + } + + /* public for unit tests */ + public final void setInPri2LRU(boolean value) { + if (value) { + flags |= IN_PRI2_LRU_BIT; + } else { + flags &= ~IN_PRI2_LRU_BIT; + } + } + + public boolean isExpirationInHours() { + return (flags & IN_EXPIRATION_IN_HOURS) != 0; + } + + void setExpirationInHours(boolean value) { + if (value) { + flags |= IN_EXPIRATION_IN_HOURS; + } else { + flags &= ~IN_EXPIRATION_IN_HOURS; + } + } + + /** + * @return the identifier key for this node. + */ + public final byte[] getIdentifierKey() { + return identifierKey; + } + + /** + * Set the identifier key for this node. + * + * @param key - the new identifier key for this node. + * + * @param makeDirty should normally be true, but may be false when an + * expired slot containing the identifier key has been deleted. + */ + public final void setIdentifierKey(byte[] key, boolean makeDirty) { + + assert(!isBINDelta()); + + /* + * The identifierKey is "intentionally" not kept track of in the + * memory budget. If we did, then it would look like this: + + int oldIDKeySz = (identifierKey == null) ? + 0 : + MemoryBudget.byteArraySize(identifierKey.length); + + int newIDKeySz = (key == null) ? + 0 : + MemoryBudget.byteArraySize(key.length); + updateMemorySize(newIDKeySz - oldIDKeySz); + + */ + identifierKey = key; + + if (makeDirty) { + setDirty(true); + } + } + + /** + * @return the number of entries in this node. + */ + public final int getNEntries() { + return nEntries; + } + + /** + * @return the maximum number of entries in this node. + * + * Overriden by TestIN in INEntryTestBase.java + */ + public int getMaxEntries() { + return entryStates.length; + } + + public final byte getState(int idx) { + return entryStates[idx]; + } + + /** + * @return true if the object is dirty. + */ + final boolean isDirty(int idx) { + return ((entryStates[idx] & EntryStates.DIRTY_BIT) != 0); + } + + /** + * @return true if the idx'th entry has been deleted, although the + * transaction that performed the deletion may not be committed. + */ + public final boolean isEntryPendingDeleted(int idx) { + return ((entryStates[idx] & EntryStates.PENDING_DELETED_BIT) != 0); + } + + /** + * Set pendingDeleted to true. + */ + public final void setPendingDeleted(int idx) { + + entryStates[idx] |= EntryStates.PENDING_DELETED_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * Set pendingDeleted to false. + */ + final void clearPendingDeleted(int idx) { + + entryStates[idx] &= EntryStates.CLEAR_PENDING_DELETED_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * @return true if the idx'th entry is deleted for sure. If a transaction + * performed the deletion, it has been committed. + */ + public final boolean isEntryKnownDeleted(int idx) { + return ((entryStates[idx] & EntryStates.KNOWN_DELETED_BIT) != 0); + } + + /** + * Set KD flag to true and clear the PD flag (PD does not need to be on + * if KD is on). + */ + public final void setKnownDeleted(int idx) { + + assert(isBIN()); + + entryStates[idx] |= EntryStates.KNOWN_DELETED_BIT; + entryStates[idx] &= EntryStates.CLEAR_PENDING_DELETED_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * Set knownDeleted flag to true and evict the child LN if cached. The + * child LN is evicted to save memory, since it will never be fetched + * again. + */ + public final void setKnownDeletedAndEvictLN(int index) { + + assert(isBIN()); + + setKnownDeleted(index); + + LN oldLN = (LN) getTarget(index); + if (oldLN != null) { + updateMemorySize(oldLN, null /* newNode */); + oldLN.releaseMemoryBudget(); + } + setTarget(index, null); + } + + /** + * Set knownDeleted to false. + */ + final void clearKnownDeleted(int idx) { + + assert(isBIN()); + + entryStates[idx] &= EntryStates.CLEAR_KNOWN_DELETED_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /* + * In the future we may want to move the following static methods to an + * EntryState utility class and share all state bit twidling among IN, + * ChildReference, and DeltaInfo. + */ + + /** + * Returns true if the given state is known deleted. + */ + static boolean isStateKnownDeleted(byte state) { + return ((state & EntryStates.KNOWN_DELETED_BIT) != 0); + } + + /** + * Returns true if the given state is pending deleted. + */ + static boolean isStatePendingDeleted(byte state) { + return ((state & EntryStates.PENDING_DELETED_BIT) != 0); + } + + /** + * Return true if the LN at the given slot is embedded. + */ + public final boolean isEmbeddedLN(int idx) { + return ((entryStates[idx] & EntryStates.EMBEDDED_LN_BIT) != 0); + } + + public static boolean isEmbeddedLN(byte state) { + return ((state & EntryStates.EMBEDDED_LN_BIT) != 0); + } + + /** + * Set embeddedLN to true. + */ + private void setEmbeddedLN(int idx) { + + entryStates[idx] |= EntryStates.EMBEDDED_LN_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * Set embeddedLN to false. + */ + private void clearEmbeddedLN(int idx) { + + entryStates[idx] &= EntryStates.CLEAR_EMBEDDED_LN_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * Return true if the LN at the given slot is an embedded LN with no data. + */ + public final boolean isNoDataLN(int idx) { + return ((entryStates[idx] & EntryStates.NO_DATA_LN_BIT) != 0); + } + + public static boolean isNoDataLN(byte state) { + return ((state & EntryStates.NO_DATA_LN_BIT) != 0); + } + + /** + * Set noDataLN to true. + */ + void setNoDataLN(int idx) { + + entryStates[idx] |= EntryStates.NO_DATA_LN_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /** + * Set noDataLN to false. + */ + private void clearNoDataLN(int idx) { + + entryStates[idx] &= EntryStates.CLEAR_NO_DATA_LN_BIT; + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + + /* + * + */ + public final boolean haveEmbeddedData(int idx) { + return (isEmbeddedLN(idx) && !isNoDataLN(idx)); + } + + /* For unit testing */ + public final int getNumEmbeddedLNs() { + int res = 0; + for (int i = 0; i < getNEntries(); ++i) { + if (isEmbeddedLN(i)) { + ++res; + } + } + + return res; + } + + /* For unit testing */ + public final INKeyRep getKeyVals() { + return entryKeys; + } + + public final byte[] getKeyPrefix() { + return keyPrefix; + } + + /* + * For unit testing only + */ + public final boolean hasKeyPrefix() { + return keyPrefix != null; + } + + /* This has default protection for access by the unit tests. */ + final void setKeyPrefix(byte[] keyPrefix) { + + assert databaseImpl != null; + + int prevLength = (this.keyPrefix == null) ? 0 : this.keyPrefix.length; + this.keyPrefix = keyPrefix; + /* Update the memory budgeting to reflect changes in the key prefix. */ + int currLength = (keyPrefix == null) ? 0 : keyPrefix.length; + updateMemorySize(prevLength, currLength); + } + + /** + * Return the idx'th key. If prefixing is enabled, construct a new byte[] + * containing the prefix and suffix. If prefixing is not enabled, just + * return the current byte[] in entryKeys. + */ + public final byte[] getKey(int idx) { + + assert idx < nEntries; + + byte[] key = entryKeys.getFullKey( + keyPrefix, idx, haveEmbeddedData(idx)); + + assert(key != null); + + return key; + } + + public final byte[] getData(int idx) { + + if (haveEmbeddedData(idx)) { + return entryKeys.getData(idx); + } + + if (isNoDataLN(idx)) { + return Key.EMPTY_KEY; + } + + return null; + } + + /** + * Returns the size of the key that is stored persistently, which will be + * the combined key-data for an embedded LN or duplicated DB record. + */ + int getStoredKeySize(int idx) { + return entryKeys.size(idx); + } + + /** + * Updates the key in the idx-th slot of this BIN, if the DB allows key + * updates and the new key is not identical to the current key in the slot. + * It also updates the data (if any) that is embedded with the key in the + * idx-slot, or embeds new data in that slot, is the "data" param is + * non-null, or removes embedded data, if "data" is null. Finally, it + * sets the EMBEDDED_LN_BIT and NO_DATA_LN_BIT flags in the slot's state. + * + * @param key is the key to set in the slot and is the LN key. + * + * @param data If the data portion of a record must be embedded in this + * BIN, "data" stores the record's data. Null otherwise. See also comment + * for the keyEntries field. + * + * @return true if a multi-slot change was made and the complete IN memory + * size must be updated. + */ + private boolean updateLNSlotKey(int idx, byte[] key, byte[] data) { + + assert(isBIN()); + + boolean haveEmbeddedData = haveEmbeddedData(idx); + + if (data == null) { + if (isEmbeddedLN(idx)) { + clearEmbeddedLN(idx); + clearNoDataLN(idx); + } + } else { + if (!isEmbeddedLN(idx)) { + setEmbeddedLN(idx); + } + if (data.length == 0) { + setNoDataLN(idx); + } else { + clearNoDataLN(idx); + } + } + + /* + * The new key may be null if a dup LN was deleted, in which case there + * is no need to update it. There is no need to compare keys if there + * is no comparator configured, since a key cannot be changed when the + * default comparator is used. + */ + if (key != null && + (databaseImpl.allowsKeyUpdates() || + DupConvert.needsConversion(databaseImpl)) && + !Arrays.equals(key, getKey(idx))) { + + setDirty(true); + return setKey(idx, key, data, false); + + } else if (haveEmbeddedData) { + + /* + * The key does not change, but the slot contains embedded data, + * which must now either be removed (if data == null or + * data.length == 0) or updated. + * TODO #21488: update the data only if it actually changes. + */ + setDirty(true); + entryStates[idx] |= EntryStates.DIRTY_BIT; + + INKeyRep.Type oldRepType = entryKeys.getType(); + entryKeys = entryKeys.setData(idx, data, this); + return oldRepType != entryKeys.getType(); + + } else if (data != null && data.length != 0) { + + /* + * The key does not change, but we now have to embed data in a slot + * that does not currently have embedded data. + */ + setDirty(true); + entryStates[idx] |= EntryStates.DIRTY_BIT; + + key = entryKeys.getKey(idx, false); + INKeyRep.Type oldRepType = entryKeys.getType(); + entryKeys = entryKeys.set(idx, key, data, this); + return oldRepType != entryKeys.getType(); + + } else { + return false; + } + } + + /* + * Convenience wrapper for setKey() method below + */ + private boolean insertKey( + int idx, + byte[] key, + byte[] data) { + + /* + * Set the id key when inserting the first entry. This is important + * when compression removes all entries from a BIN, and then an entry + * is inserted before the empty BIN is purged. + */ + if (nEntries == 1 && !isBINDelta()) { + setIdentifierKey(key, true /*makeDirty*/); + } + + return setKey(idx, key, data, true); + } + + // TODO re-enable this and figure out why it is firing + + private boolean idKeyIsSlotKey() { + + if (true) { + return true; + } + + if (!isBIN() || nEntries == 0) { + return true; + } + + for (int i = 0; i < nEntries; i += 1) { + + if (entryKeys.compareKeys( + identifierKey, keyPrefix, i, haveEmbeddedData(i), + databaseImpl.getKeyComparator()) == 0) { + + return true; + } + } + + return false; + } + + /* + * Convenience wrapper for setKey() method below. It is used for + * upper INs only, so no need to worry about the EMBEDDED_LN_BIT + * and NO_DATA_LN_BIT flags. + */ + private boolean updateKey( + int idx, + byte[] key, + byte[] data) { + return setKey(idx, key, data, false); + } + + /** + * This method inserts or updates a key at a given slot. In either case, + * the associated embedded data (if any) is inserted or updated as well, + * and the key prefix is adjusted, if necessary. + * + * In case of insertion (indicated by a true value for the isInsertion + * param), it is assumed that the idx slot does not store any valid info, + * so any change to the key prefix (if any) is due to the insertion of + * this new new key and not to the removal of the current key at the idx + * slot. + * + * In case of update, the method does not check if the current key is + * indeed different from the new key; it just updates the key + * unconditionally. If the slot has embedded data, that data will also + * be updated (if the data param is not null), or be removed (if the data + * param is null). If the slot does not have embedded data and the data + * param is not null, the given data will be embedded. + * + * Note: For BINs, the maintenance of the EMBEDDED_LN_BIT andNO_DATA_LN_BIT + * is done by the callers of this method. + * + * @param data If the data portion of a record must be embedded in this + * BIN, "data" stores the record's data. Null otherwise. See also comment + * for the keyEntries field. + * + * @return true if a multi-slot change was made and the complete IN memory + * size must be updated. + */ + public boolean setKey( + int idx, + byte[] key, + byte[] data, + boolean isInsertion) { + + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + + /* + * Only compute key prefix if prefixing is enabled and there's an + * existing prefix. + */ + if (databaseImpl.getKeyPrefixing() && keyPrefix != null) { + + int newPrefixLen = Key.getKeyPrefixLength( + keyPrefix, keyPrefix.length, key); + + if (newPrefixLen < keyPrefix.length) { + + /* + * The new key doesn't share the current prefix, so recompute + * the prefix and readjust all the existing suffixes. + */ + byte[] newPrefix = (isInsertion ? + Key.createKeyPrefix(keyPrefix, key) : + computeKeyPrefix(idx)); + + if (newPrefix != null) { + /* Take the new key into consideration for new prefix. */ + newPrefix = Key.createKeyPrefix(newPrefix, key); + } + + recalcSuffixes(newPrefix, key, data, idx); + return true; + + } else { + + INKeyRep.Type prevRepType = entryKeys.getType(); + + byte[] suffix = computeKeySuffix(keyPrefix, key); + entryKeys = entryKeys.set(idx, suffix, data, this); + + return prevRepType != entryKeys.getType(); + } + + } else if (keyPrefix != null) { + + /* + * Key prefixing has been turned off on this database, but there + * are existing prefixes. Remove prefixes for this IN. + */ + recalcSuffixes(null, key, data, idx); + return true; + + } else { + INKeyRep.Type oldRepType = entryKeys.getType(); + entryKeys = entryKeys.set(idx, key, data, this); + return oldRepType != entryKeys.getType(); + } + } + + /* + * Given 2 byte arrays, "prefix" and "key", where "prefix" is or stores + * a prefix of "key", allocate and return another byte array that stores + * the suffix of "key" w.r.t. "prefix". + */ + private static byte[] computeKeySuffix(byte[] prefix, byte[] key) { + + int prefixLen = (prefix == null ? 0 : prefix.length); + + if (prefixLen == 0) { + return key; + } + + int suffixLen = key.length - prefixLen; + byte[] ret = new byte[suffixLen]; + System.arraycopy(key, prefixLen, ret, 0, suffixLen); + return ret; + } + + /* + * Iterate over all keys in this IN and recalculate their suffixes based on + * newPrefix. If keyVal and idx are supplied, it means that entry[idx] is + * about to be changed to keyVal so use that instead of + * entryKeys.get(idx) when computing the new suffixes. If idx is < 0, + * and keyVal is null, then recalculate suffixes for all entries in this. + */ + private void recalcSuffixes( + byte[] newPrefix, + byte[] key, + byte[] data, + int idx) { + + for (int i = 0; i < nEntries; i++) { + + byte[] curKey = (i == idx ? key : getKey(i)); + + byte[] curData = null; + + if (i == idx) { + curData = data; + } else if (haveEmbeddedData(i)) { + curData = entryKeys.getData(i); + } + + byte[] suffix = computeKeySuffix(newPrefix, curKey); + + entryKeys = entryKeys.set(i, suffix, curData, this); + } + + setKeyPrefix(newPrefix); + } + + /** + * Forces computation of the key prefix, without requiring a split. + * Is public for use by DbCacheSize. + */ + public final void recalcKeyPrefix() { + + assert(!isBINDelta()); + + recalcSuffixes(computeKeyPrefix(-1), null, null, -1); + } + + /* + * Computes a key prefix based on all the keys in 'this'. Return null if + * the IN is empty or prefixing is not enabled or there is no common + * prefix for the keys. + */ + private byte[] computeKeyPrefix(int excludeIdx) { + + if (!databaseImpl.getKeyPrefixing() || nEntries <= 1) { + return null; + } + + int firstIdx = (excludeIdx == 0) ? 1 : 0; + byte[] curPrefixKey = getKey(firstIdx); + int prefixLen = curPrefixKey.length; + + /* + * Only need to look at first and last entries when keys are ordered + * byte-by-byte. But when there is a comparator, keys are not + * necessarily ordered byte-by-byte. [#21328] + */ + boolean byteOrdered; + if (true) { + /* Disable optimization for now. Needs testing. */ + byteOrdered = false; + } else { + byteOrdered = (databaseImpl.getKeyComparator() == null); + } + + if (byteOrdered) { + int lastIdx = nEntries - 1; + if (lastIdx == excludeIdx) { + lastIdx -= 1; + } + if (lastIdx > firstIdx) { + byte[] lastKey = getKey(lastIdx); + int newPrefixLen = Key.getKeyPrefixLength( + curPrefixKey, prefixLen, lastKey); + + if (newPrefixLen < prefixLen) { + curPrefixKey = lastKey; + prefixLen = newPrefixLen; + } + } + } else { + for (int i = firstIdx + 1; i < nEntries; i++) { + + if (i == excludeIdx) { + continue; + } + + byte[] curKey = getKey(i); + + int newPrefixLen = Key.getKeyPrefixLength( + curPrefixKey, prefixLen, curKey); + + if (newPrefixLen < prefixLen) { + curPrefixKey = curKey; + prefixLen = newPrefixLen; + } + } + } + + byte[] ret = new byte[prefixLen]; + System.arraycopy(curPrefixKey, 0, ret, 0, prefixLen); + + return ret; + } + + /* + * For debugging. + */ + final boolean verifyKeyPrefix() { + + byte[] computedKeyPrefix = computeKeyPrefix(-1); + if (keyPrefix == null) { + return computedKeyPrefix == null; + } + + if (computedKeyPrefix == null || + computedKeyPrefix.length < keyPrefix.length) { + System.out.println("VerifyKeyPrefix failed"); + System.out.println(dumpString(0, false)); + return false; + } + for (int i = 0; i < keyPrefix.length; i++) { + if (keyPrefix[i] != computedKeyPrefix[i]) { + System.out.println("VerifyKeyPrefix failed"); + System.out.println(dumpString(0, false)); + return false; + } + } + return true; + } + + /** + * Returns whether the given key is greater than or equal to the first key + * in the IN and less than or equal to the last key in the IN. This method + * is used to determine whether a key to be inserted belongs in this IN, + * without doing a tree search. If false is returned it is still possible + * that the key belongs in this IN, but a tree search must be performed to + * find out. + */ + public final boolean isKeyInBounds(byte[] key) { + + assert(!isBINDelta()); + + if (nEntries < 2) { + return false; + } + + Comparator comparator = getKeyComparator(); + int cmp; + + /* Compare key given to my first key. */ + cmp = entryKeys.compareKeys( + key, keyPrefix, 0, haveEmbeddedData(0), comparator); + + if (cmp < 0) { + return false; + } + + /* Compare key given to my last key. */ + int idx = nEntries - 1; + cmp = entryKeys.compareKeys( + key, keyPrefix, idx, haveEmbeddedData(idx), comparator); + + return cmp <= 0; + } + + /** + * Return the idx'th LSN for this entry. + * + * @return the idx'th LSN for this entry. + */ + public final long getLsn(int idx) { + + if (entryLsnLongArray == null) { + int offset = idx << 2; + int fileOffset = getFileOffset(offset); + if (fileOffset == -1) { + return DbLsn.NULL_LSN; + } else { + return DbLsn.makeLsn((baseFileNumber + + getFileNumberOffset(offset)), + fileOffset); + } + } else { + return entryLsnLongArray[idx]; + } + } + + /** + * Set the LSN of the idx'th slot, mark the slot dirty, and update + * memory consuption. Throw exception if the update is not legitimate. + */ + public void setLsn(int idx, long lsn) { + setLsn(idx, lsn, true); + } + + /** + * Set the LSN of the idx'th slot, mark the slot dirty, and update + * memory consuption. If "check" is true, throw exception if the + * update is not legitimate. + */ + private void setLsn(int idx, long lsn, boolean check) { + + if (!check || shouldUpdateLsn(getLsn(idx), lsn)) { + + int oldSize = computeLsnOverhead(); + + /* setLsnInternal can mutate to an array of longs. */ + setLsnInternal(idx, lsn); + + updateMemorySize(computeLsnOverhead() - oldSize); + entryStates[idx] |= EntryStates.DIRTY_BIT; + setDirty(true); + } + } + + /* + * Set the LSN of the idx'th slot. If the current storage for LSNs is the + * compact one, mutate it to the the non-compact, if necessary. + */ + final void setLsnInternal(int idx, long value) { + + /* Will implement this in the future. Note, don't adjust if mutating.*/ + //maybeAdjustCapacity(offset); + if (entryLsnLongArray != null) { + entryLsnLongArray[idx] = value; + return; + } + + int offset = idx << 2; + + if (value == DbLsn.NULL_LSN) { + setFileNumberOffset(offset, (byte) 0); + setFileOffset(offset, -1); + return; + } + + long thisFileNumber = DbLsn.getFileNumber(value); + + if (baseFileNumber == -1) { + /* First entry. */ + baseFileNumber = thisFileNumber; + setFileNumberOffset(offset, (byte) 0); + + } else { + + if (thisFileNumber < baseFileNumber) { + if (!adjustFileNumbers(thisFileNumber)) { + mutateToLongArray(idx, value); + return; + } + baseFileNumber = thisFileNumber; + } + + long fileNumberDifference = thisFileNumber - baseFileNumber; + if (fileNumberDifference > Byte.MAX_VALUE) { + mutateToLongArray(idx, value); + return; + } + + setFileNumberOffset( + offset, (byte) (thisFileNumber - baseFileNumber)); + //assert getFileNumberOffset(offset) >= 0; + } + + int fileOffset = (int) DbLsn.getFileOffset(value); + if (fileOffset > MAX_FILE_OFFSET) { + mutateToLongArray(idx, value); + return; + } + + setFileOffset(offset, fileOffset); + //assert getLsn(offset) == value; + } + + private boolean adjustFileNumbers(long newBaseFileNumber) { + + long oldBaseFileNumber = baseFileNumber; + for (int i = 0; + i < entryLsnByteArray.length; + i += BYTES_PER_LSN_ENTRY) { + if (getFileOffset(i) == -1) { + continue; + } + + long curEntryFileNumber = + oldBaseFileNumber + getFileNumberOffset(i); + long newCurEntryFileNumberOffset = + (curEntryFileNumber - newBaseFileNumber); + + if (newCurEntryFileNumberOffset > Byte.MAX_VALUE) { + long undoOffset = oldBaseFileNumber - newBaseFileNumber; + for (int j = i - BYTES_PER_LSN_ENTRY; + j >= 0; + j -= BYTES_PER_LSN_ENTRY) { + if (getFileOffset(j) == -1) { + continue; + } + setFileNumberOffset + (j, (byte) (getFileNumberOffset(j) - undoOffset)); + //assert getFileNumberOffset(j) >= 0; + } + return false; + } + setFileNumberOffset(i, (byte) newCurEntryFileNumberOffset); + + //assert getFileNumberOffset(i) >= 0; + } + return true; + } + + private void setFileNumberOffset(int offset, byte fileNumberOffset) { + entryLsnByteArray[offset] = fileNumberOffset; + } + + private byte getFileNumberOffset(int offset) { + return entryLsnByteArray[offset]; + } + + private void setFileOffset(int offset, int fileOffset) { + put3ByteInt(offset + 1, fileOffset); + } + + private int getFileOffset(int offset) { + return get3ByteInt(offset + 1); + } + + private void put3ByteInt(int offset, int value) { + entryLsnByteArray[offset++] = (byte) value; + entryLsnByteArray[offset++] = (byte) (value >>> 8); + entryLsnByteArray[offset] = (byte) (value >>> 16); + } + + private int get3ByteInt(int offset) { + int ret = (entryLsnByteArray[offset++] & 0xFF); + ret += (entryLsnByteArray[offset++] & 0xFF) << 8; + ret += (entryLsnByteArray[offset] & 0xFF) << 16; + if (ret == THREE_BYTE_NEGATIVE_ONE) { + ret = -1; + } + + return ret; + } + + private void mutateToLongArray(int idx, long value) { + int nElts = entryLsnByteArray.length >> 2; + long[] newArr = new long[nElts]; + for (int i = 0; i < nElts; i++) { + newArr[i] = getLsn(i); + } + newArr[idx] = value; + entryLsnLongArray = newArr; + entryLsnByteArray = null; + } + + /** + * For a deferred write database, ensure that information is not lost when + * a new LSN is assigned. Also ensures that a transient LSN is not + * accidentally assigned to a persistent entry. + * + * Because this method uses strict checking, prepareForSlotReuse must + * sometimes be called when a new logical entry is being placed in a slot, + * e.g., during an IN split or an LN slot reuse. + * + * The following transition is a NOOP and the LSN is not set: + * Any LSN to same value. + * The following transitions are allowed and cause the LSN to be set: + * Null LSN to transient LSN + * Null LSN to persistent LSN + * Transient LSN to persistent LSN + * Persistent LSN to new persistent LSN + * The following transitions should not occur and throw an exception: + * Transient LSN to null LSN + * Transient LSN to new transient LSN + * Persistent LSN to null LSN + * Persistent LSN to transient LSN + * + * The above imply that a transient or null LSN can overwrite only a null + * LSN. + */ + private final boolean shouldUpdateLsn(long oldLsn, long newLsn) { + + /* Save a little computation in packing/updating an unchanged LSN. */ + if (oldLsn == newLsn) { + return false; + } + /* The rules for a new null LSN can be broken in a read-only env. */ + if (newLsn == DbLsn.NULL_LSN && getEnv().isReadOnly()) { + return true; + } + /* Enforce LSN update rules. Assume lsn != oldLsn. */ + if (databaseImpl.isDeferredWriteMode()) { + if (oldLsn != DbLsn.NULL_LSN && DbLsn.isTransientOrNull(newLsn)) { + throw unexpectedState( + "DeferredWrite LSN update not allowed" + + " oldLsn = " + DbLsn.getNoFormatString(oldLsn) + + " newLsn = " + DbLsn.getNoFormatString(newLsn)); + } + } else { + if (DbLsn.isTransientOrNull(newLsn)) { + throw unexpectedState( + "LSN update not allowed" + + " oldLsn = " + DbLsn.getNoFormatString(oldLsn) + + " newLsn = " + DbLsn.getNoFormatString(newLsn)); + } + } + return true; + } + + /* For unit tests. */ + final long[] getEntryLsnLongArray() { + return entryLsnLongArray; + } + + /* For unit tests. */ + final byte[] getEntryLsnByteArray() { + return entryLsnByteArray; + } + + /* For unit tests. */ + final void initEntryLsn(int capacity) { + entryLsnLongArray = null; + entryLsnByteArray = new byte[capacity << 2]; + baseFileNumber = -1; + } + + /* Will implement this in the future. Note, don't adjust if mutating.*/ + /*** + private void maybeAdjustCapacity(int offset) { + if (entryLsnLongArray == null) { + int bytesNeeded = offset + BYTES_PER_LSN_ENTRY; + int currentBytes = entryLsnByteArray.length; + if (currentBytes < bytesNeeded) { + int newBytes = bytesNeeded + + (GROWTH_INCREMENT * BYTES_PER_LSN_ENTRY); + byte[] newArr = new byte[newBytes]; + System.arraycopy(entryLsnByteArray, 0, newArr, 0, + currentBytes); + entryLsnByteArray = newArr; + for (int i = currentBytes; + i < newBytes; + i += BYTES_PER_LSN_ENTRY) { + setFileNumberOffset(i, (byte) 0); + setFileOffset(i, -1); + } + } + } else { + int currentEntries = entryLsnLongArray.length; + int idx = offset >> 2; + if (currentEntries < idx + 1) { + int newEntries = idx + GROWTH_INCREMENT; + long[] newArr = new long[newEntries]; + System.arraycopy(entryLsnLongArray, 0, newArr, 0, + currentEntries); + entryLsnLongArray = newArr; + for (int i = currentEntries; i < newEntries; i++) { + entryLsnLongArray[i] = DbLsn.NULL_LSN; + } + } + } + } + ***/ + + /** + * The last logged size is not stored for UINs. + */ + boolean isLastLoggedSizeStored(int idx) { + return false; + } + + boolean mayHaveLastLoggedSizeStored() { + return false; + } + + /** + * The last logged size is not stored for UINs. + */ + public void setLastLoggedSize(int idx, int lastLoggedSize) { + } + + /** + * The last logged size is not stored for UINs. + */ + public void clearLastLoggedSize(int idx) { + } + + /** + * The last logged size is not stored for UINs. + */ + void setLastLoggedSizeUnconditional(int idx, int lastLoggedSize) { + } + + /** + * The last logged size is not stored for UINs. + */ + public int getLastLoggedSize(int idx) { + return 0; + } + + public void setOffHeapBINId(int idx, + int val, + boolean pri2, + boolean dirty) { + + assert getNormalizedLevel() == 2; + assert val >= 0; + + setOffHeapBINPri2(idx, pri2); + setOffHeapBINDirty(idx, dirty); + + final long newVal = val + 1; + final long oldVal = offHeapBINIds.get(idx); + + if (oldVal == newVal) { + return; + } + + assert oldVal == 0; + + offHeapBINIds = offHeapBINIds.set(idx, newVal, this); + } + + public void clearOffHeapBINId(int idx) { + + assert getNormalizedLevel() == 2; + + setOffHeapBINPri2(idx, false); + setOffHeapBINDirty(idx, false); + + final long oldVal = offHeapBINIds.get(idx); + + if (oldVal == 0) { + return; + } + + offHeapBINIds = offHeapBINIds.set(idx, 0, this); + + if (getInListResident() && + getNormalizedLevel() == 2 && + offHeapBINIds.isEmpty()) { + + getEvictor().moveToPri1LRU(this); + } + } + + public int getOffHeapBINId(int idx) { + return ((int) offHeapBINIds.get(idx)) - 1; + } + + public boolean hasOffHeapBINIds() { + return !offHeapBINIds.isEmpty(); + } + + public long getOffHeapBINIdsMemorySize() { + return offHeapBINIds.getMemorySize(); + } + + private void setOffHeapBINDirty(int idx, boolean val) { + if (val) { + entryStates[idx] |= EntryStates.OFFHEAP_DIRTY_BIT; + } else { + entryStates[idx] &= EntryStates.CLEAR_OFFHEAP_DIRTY_BIT; + } + } + + public boolean isOffHeapBINDirty(int idx) { + return (entryStates[idx] & EntryStates.OFFHEAP_DIRTY_BIT) != 0; + } + + private void setOffHeapBINPri2(int idx, boolean val) { + if (val) { + entryStates[idx] |= EntryStates.OFFHEAP_PRI2_BIT; + } else { + entryStates[idx] &= EntryStates.CLEAR_OFFHEAP_PRI2_BIT; + } + } + + public boolean isOffHeapBINPri2(int idx) { + return (entryStates[idx] & EntryStates.OFFHEAP_PRI2_BIT) != 0; + } + + public final INTargetRep getTargets() { + return entryTargets; + } + + /** + * Sets the idx'th target. No need to make dirty, that state only applies + * to key and LSN. + * + *

        WARNING: This method does not update the memory budget. The caller + * must update the budget.

        + */ + void setTarget(int idx, Node target) { + + assert isLatchExclusiveOwner() : + "Not latched for write " + getClass().getName() + + " id=" + getNodeId(); + + final Node curChild = entryTargets.get(idx); + + entryTargets = entryTargets.set(idx, target, this); + + if (target != null && target.isIN()) { + ((IN) target).setParent(this); + } + + if (isUpperIN()) { + if (target == null) { + + /* + * If this UIN has just lost its last cached child, set its + * hasCachedChildren flag to false and put it back to the + * LRU list. + */ + if (curChild != null && + hasCachedChildrenFlag() && + !hasCachedChildren()) { + + setHasCachedChildrenFlag(false); + + if (!isDIN()) { + if (traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " setTarget(): " + + " Adding UIN " + getNodeId() + + " to LRU after detaching chld " + + ((IN) curChild).getNodeId()); + } + getEvictor().addBack(this); + } + } + } else { + if (curChild == null && + !hasCachedChildrenFlag()) { + + setHasCachedChildrenFlag(true); + + if (traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " setTarget(): " + + " Removing UIN " + getNodeId() + + " after attaching child " + + ((IN) target).getNodeId()); + } + getEvictor().remove(this); + } + } + } + } + + /** + * Return the idx'th target. + * + * This method does not load children from off-heap cache, so it always + * returns null when then child is not in main cache. Note that when + * children are INs (this is not a BIN), when this method returns null it + * is does not imply that the child is non-dirty, because dirty BINs are + * stored off-heap. To fetch the current version from off-heap cache in + * that case, call loadIN instead. + */ + public final Node getTarget(int idx) { + return entryTargets.get(idx); + } + + /** + * Returns the idx-th child of "this" upper IN, fetching the child from + * the log and attaching it to its parent if it is not already attached. + * This method is used during tree searches. + * + * On entry, the parent must be latched already. + * + * If the child must be fetched from the log, the parent is unlatched. + * After the disk read is done, the parent is relatched. However, due to + * splits, it may not be the correct parent anymore. If so, the method + * will return null, and the caller is expected to restart the tree search. + * + * On return, the parent will be latched, unless null is returned or an + * exception is thrown. + * + * The "searchKey" param is the key that the caller is looking for. It is + * used by this method in determining if, after a disk read, "this" is + * still the correct parent for the child. "searchKey" may be null if the + * caller is doing a LEFT or RIGHT search. + */ + final IN fetchINWithNoLatch(int idx, + byte [] searchKey, + CacheMode cacheMode) { + return fetchINWithNoLatch(idx, searchKey, null, cacheMode); + } + + /** + * This variant of fetchIN() takes a SearchResult as a param, instead of + * an idx (it is used by Tree.getParentINForChildIN()). The ordinal number + * of the child to fetch is specified by result.index. result.index will + * be adjusted by this method if, after a disk read, the ordinal number + * of the child changes due to insertions, deletions or splits in the + * parent. + */ + final IN fetchINWithNoLatch(SearchResult result, + byte [] searchKey, + CacheMode cacheMode) { + return fetchINWithNoLatch(result.index, searchKey, result, cacheMode); + } + + /** + * Provides the implementation of the above two methods. + */ + private IN fetchINWithNoLatch( + int idx, + byte [] searchKey, + SearchResult result, + CacheMode cacheMode) { + + assert(isUpperIN()); + assert(isLatchOwner()); + + final EnvironmentImpl envImpl = getEnv(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + boolean isMiss = false; + boolean success = false; + + IN child = (IN)entryTargets.get(idx); + + if (child == null) { + + final long lsn = getLsn(idx); + + if (lsn == DbLsn.NULL_LSN) { + throw unexpectedState(makeFetchErrorMsg( + "NULL_LSN in upper IN", lsn, idx)); + } + + /* + * For safety we must get a copy of the BIN off-heap bytes while + * latched, but we can materialize the bytes while unlatched + * (further below) to reduce the work done while latched. + */ + byte[] ohBytes = null; + + if (getNormalizedLevel() == 2) { + ohBytes = ohCache.getBINBytes(this, idx); + } + + pin(); + try { + releaseLatch(); + + TestHookExecute.doHookIfSet(fetchINHook); + + if (ohBytes != null) { + child = ohCache.materializeBIN(envImpl, ohBytes); + } else { + final WholeEntry wholeEntry = envImpl.getLogManager(). + getLogEntryAllowInvisibleAtRecovery( + lsn, getLastLoggedSize(idx)); + + final LogEntry logEntry = wholeEntry.getEntry(); + + child = (IN) logEntry.getResolvedItem(databaseImpl); + + isMiss = true; + } + + latch(CacheMode.UNCHANGED); + + /* + * The following if statement relies on splits being logged + * immediately, or more precisely, the split node and its + * new sibling being logged immediately, while both siblings + * and their parent are latched exclusively. The reason for + * this is as follows: + * + * Let K be the search key. If we are doing a left-deep or + * right-deep search, K is -INF or +INF respectively. + * + * Let P be the parent IN (i.e., "this") and S be the slot at + * the idx position before P was unlatched above. Here, we + * view slots as independent objects, not identified by their + * position in an IN but by some unique (imaginary) and + * immutable id assigned to the slot when it is first inserted + * into an IN. + * + * Before unlatching P, S was the correct slot to follow down + * the tree looking for K. After P is unlatched and then + * relatched, let S' be the slot at the idx position, if P + * still has an idx position. We consider the following 2 cases: + * + * 1. S' exists and S'.LSN == S.LSN. Then S and S' are the same + * (logical) slot (because two different slots can never cover + * overlapping ranges of keys, and as a result, can never point + * to the same LSN). Then, S is still the correct slot to take + * down the tree, unless the range of keys covered by S has + * shrunk while P was unlatched. But the only way for S's key + * range to shrink is for its child IN to split, which could + * not have happened because if it did, the before and after + * LSNs of S would be different, given that splits are logged + * immediately. We conclude that the set of keys covered by + * S after P is relatched is the same or a superset of the keys + * covered by S before P was unlatched, and thus S (at the idx + * position) is still the correct slot to follow. + * + * 2. There is no idx position in P or S'.LSN != S.LSN. In + * this case we cannot be sure if S' (if it exists) is the + * the correct slot to follow. So, we (re)search for K in P + * to check if P is still the correct parent and find the + * correct slot to follow. If this search lands on the 1st or + * last slot in P, we may return null because using the key + * info contained in P only, we do not know the full range of + * keys covered by those two slots. If null is returned, the + * caller is expected to restart the tree search from the root. + * + * Notice that the if conditions are necessary before calling + * findEntry(). Without them, we could get into an infinite + * loop of search re-tries in the scenario where nothing changes + * in the tree and findEntry always lands on the 1st or last + * slot in P. The conditions guarantee that we may restart the + * tree search only if something changes with S while P is + * unlatched (S moves to a different position or a different + * IN or it points to a different LSN). + * + * Notice also that if P does not split while it is unlatched, + * the range of keys covered by P does not change either. This + * implies that the correct slot to follow *must* be inside P, + * and as a result, the 1st and last slots in P can be trusted. + * Unfortunately, however, we have no way to detecting reliably + * whether P splits or not. + * + * Special care for DBs in DW mode: + * + * For DBs in DW mode, special care must be taken because + * splits are not immediately logged. So, for DW DBs, to avoid + * a call to findEntry() we require that not only S'.LSN == + * S.LSN, but also the the child is not cached. These 2 + * conditions together guarantee that the child did not split + * while P was unlatched, because if the child did split, it + * was fetched and cached first, so after P is relatched, + * either the child would be still cached, or if it was evicted + * after the split, S.LSN would have changed. + */ + if (idx >= nEntries || + getLsn(idx) != lsn || + (databaseImpl.isDeferredWriteMode() && + entryTargets.get(idx) != null)) { + + if (searchKey == null) { + return null; + } + + idx = findEntry(searchKey, false, false); + + if ((idx == 0 || idx == nEntries - 1) && + !isKeyInBounds(searchKey)) { + return null; + } + } + + if (result != null) { + result.index = idx; + } + + /* + * "this" is still the correct parent and "idx" points to the + * correct slot to follow for the search down the tree. But + * what we fetched from the log may be out-of-date by now + * (because it was fetched and then updated by other threads) + * or it may not be the correct child anymore ("idx" was + * changed by the findEntry() call above). We check 5 cases: + * + * (a) There is already a cached child at the "idx" position. + * In this case, we return whatever is there because it has to + * be the most recent version of the appropriate child node. + * This is true even when a split or reverse split occurred. + * The check for isKeyInBounds above is critical in that case. + * + * (b) There is no cached child at the "idx" slot, but the slot + * LSN is not the same as the LSN we read from the log. This is + * the case if "idx" was changed by findEntry() or other + * threads fetched the same child as this thread, updated it, + * and then evicted it. The child we fetched is obsolete and + * should not be attached. For simplicity, we just return null + * in this (quite rare) case. + * + * (c) We loaded the BIN from off-heap cache and, similar to + * case (b), another thread has loaded the same child, modified + * it, and then evicted it, placing it off-heap again. It's LSN + * did not change because it wasn't logged. We determine + * whether the off-heap BIN has changed, and if so then + * null is returned. This is also rare. + * + * (d) The child was loaded from disk (not off-heap cache) but + * an off-heap cache entry for this BIN has appeared. Another + * thread loaded the BIN from disk and then it was moved + * off-heap, possibly after it was modified. We should use the + * off-heap version and for simplicity we return null. This is + * also rare. + * + * (e) Otherwise, we attach the fetched/loaded child to the + * parent. + */ + if (entryTargets.get(idx) != null) { + child = (IN) entryTargets.get(idx); + + } else if (getLsn(idx) != lsn) { + return null; + + } else if (ohBytes != null && + ohCache.haveBINBytesChanged(this, idx, ohBytes)) { + return null; + + } else if (ohBytes == null && + getOffHeapBINId(idx) >= 0) { + return null; + + } else { + child.latchNoUpdateLRU(databaseImpl); + + if (ohBytes != null) { + child.postLoadInit(this, idx); + } else { + child.postFetchInit(databaseImpl, lsn); + } + + attachNode(idx, child, null); + + child.releaseLatch(); + } + + success = true; + + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + makeFetchErrorMsg(null, lsn, idx), e); + + } catch (EnvironmentFailureException e) { + e.addErrorMessage(makeFetchErrorMsg(null, lsn, idx)); + throw e; + + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + makeFetchErrorMsg(e.toString(), lsn, idx), e); + } finally { + /* + * Release the parent latch if null is being returned. In this + * case, the parent was unlatched earlier during the disk read, + * and as a result, the caller cannot make any assumptions + * about the state of the parent. + * + * If we are returning or throwing out of this try block, the + * parent may or may not be latched. So, only release the latch + * if it is currently held. + */ + if (!success) { + if (child != null) { + child.incFetchStats(envImpl, isMiss); + } + releaseLatchIfOwner(); + } + + unpin(); + } + } + + assert(hasCachedChildren() == hasCachedChildrenFlag()); + + child.incFetchStats(envImpl, isMiss); + + return child; + } + + /** + * Returns the idx-th child of "this" upper IN, fetching the child from + * the log and attaching it to its parent if it is not already attached. + * + * On entry, the parent must be EX-latched already and it stays EX-latched + * for the duration of this method and on return (even in case of + * exceptions). + * + * @param idx The slot of the child to fetch. + */ + public IN fetchIN(int idx, CacheMode cacheMode) { + + assert(isUpperIN()); + if (!isLatchExclusiveOwner()) { + throw unexpectedState("EX-latch not held before fetch"); + } + + final EnvironmentImpl envImpl = getEnv(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + boolean isMiss = false; + + IN child = (IN) entryTargets.get(idx); + + if (child == null) { + + final long lsn = getLsn(idx); + + if (lsn == DbLsn.NULL_LSN) { + throw unexpectedState( + makeFetchErrorMsg("NULL_LSN in upper IN", lsn, idx)); + } + + try { + byte[] ohBytes = null; + + if (getNormalizedLevel() == 2) { + ohBytes = ohCache.getBINBytes(this, idx); + if (ohBytes != null) { + child = ohCache.materializeBIN(envImpl, ohBytes); + } + } + + if (child == null) { + final WholeEntry wholeEntry = envImpl.getLogManager(). + getLogEntryAllowInvisibleAtRecovery( + lsn, getLastLoggedSize(idx)); + + final LogEntry logEntry = wholeEntry.getEntry(); + child = (IN) logEntry.getResolvedItem(databaseImpl); + + isMiss = true; + } + + child.latchNoUpdateLRU(databaseImpl); + + if (ohBytes != null) { + child.postLoadInit(this, idx); + } else { + child.postFetchInit(databaseImpl, lsn); + } + + attachNode(idx, child, null); + + child.releaseLatch(); + + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + makeFetchErrorMsg(null, lsn, idx), e); + + } catch (EnvironmentFailureException e) { + e.addErrorMessage(makeFetchErrorMsg(null, lsn, idx)); + throw e; + + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + makeFetchErrorMsg(e.toString(), lsn, idx), e); + } + } + + assert(hasCachedChildren() == hasCachedChildrenFlag()); + + child.incFetchStats(envImpl, isMiss); + + return child; + } + + /** + * Returns the idx-th child of "this" upper IN, loading the child from + * off-heap and attaching it to its parent if it is not already attached + * and is cached off-heap. This method does not fetch from disk, and will + * return null if the child is not in the main or off-heap cache. + * + * On entry, the parent must be EX-latched already and it stays EX-latched + * for the duration of this method and on return (even in case of + * exceptions). + * + * @param idx The slot of the child to fetch. + * + * @return null if the LN is not in the main or off-heap cache. + */ + public IN loadIN(int idx, CacheMode cacheMode) { + + assert(isUpperIN()); + if (!isLatchExclusiveOwner()) { + throw unexpectedState("EX-latch not held before load"); + } + + IN child = (IN) entryTargets.get(idx); + + if (child != null) { + return child; + } + + if (getNormalizedLevel() != 2) { + return null; + } + + final EnvironmentImpl envImpl = getEnv(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + final long lsn = getLsn(idx); + + try { + final byte[] ohBytes = ohCache.getBINBytes(this, idx); + if (ohBytes == null) { + return null; + } + + child = ohCache.materializeBIN(envImpl, ohBytes); + child.latchNoUpdateLRU(databaseImpl); + child.postLoadInit(this, idx); + attachNode(idx, child, null); + child.releaseLatch(); + + return child; + + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + makeFetchErrorMsg(e.toString(), lsn, idx), e); + } + } + + /** + * Returns the target of the idx'th entry, fetching from disk if necessary. + * + * Null is returned in the following cases: + * + * 1. if the LSN is null and the KnownDeleted flag is set; or + * 2. if the LSN's file has been cleaned and: + * a. the PendingDeleted or KnownDeleted flag is set, or + * b. the entry is "probably expired". + * + * Note that checking for PD/KD before calling this method is not + * sufficient to ensure that null is not returned, because null is also + * returned for expired records. + * + * When null is returned, the caller must treat the record as deleted. + * + * Note that null can only be returned for a slot that could contain an LN, + * not other node types and not a DupCountLN since DupCountLNs are never + * deleted or expired. + * + * An exclusive latch must be held on this BIN. + * + * @return the LN or null. + */ + public final LN fetchLN(int idx, CacheMode cacheMode) { + + return (LN) fetchLN(idx, cacheMode, false); + } + + /* + * This method may return either an LN or a DIN child of a BIN. It is meant + * to be used from DupConvert only. + */ + public final Node fetchLNOrDIN(int idx, CacheMode cacheMode) { + + return fetchLN(idx, cacheMode, true); + } + + /* + * Underlying implementation of the above fetchLNXXX methods. + */ + private Node fetchLN(int idx, CacheMode cacheMode, boolean dupConvert) { + + assert(isBIN()); + + if (!isLatchExclusiveOwner()) { + throw unexpectedState("EX-latch not held before fetch"); + } + + if (isEntryKnownDeleted(idx)) { + return null; + } + + final BIN bin = (BIN) this; + final EnvironmentImpl envImpl = getEnv(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + boolean isMiss = false; + + Node child = entryTargets.get(idx); + + /* Fetch it from disk. */ + if (child == null) { + + final long lsn = getLsn(idx); + + if (lsn == DbLsn.NULL_LSN) { + throw unexpectedState(makeFetchErrorMsg( + "NULL_LSN without KnownDeleted", lsn, idx)); + } + + /* + * Fetch of immediately obsolete LN not allowed. The only exception + * is during conversion of an old-style dups DB. + */ + if (isEmbeddedLN(idx) || + (databaseImpl.isLNImmediatelyObsolete() && !dupConvert)) { + throw unexpectedState("May not fetch immediately obsolete LN"); + } + + try { + byte[] lnSlotKey = null; + + child = ohCache.loadLN(bin, idx, cacheMode); + + if (child == null) { + final WholeEntry wholeEntry = envImpl.getLogManager(). + getLogEntryAllowInvisibleAtRecovery( + lsn, getLastLoggedSize(idx)); + + /* Last logged size is not present before log version 9. */ + setLastLoggedSize( + idx, wholeEntry.getHeader().getEntrySize()); + + final LogEntry logEntry = wholeEntry.getEntry(); + + if (logEntry instanceof LNLogEntry) { + + final LNLogEntry lnEntry = + (LNLogEntry) wholeEntry.getEntry(); + + lnEntry.postFetchInit(databaseImpl); + + lnSlotKey = lnEntry.getKey(); + + if (cacheMode != CacheMode.EVICT_LN && + cacheMode != CacheMode.EVICT_BIN && + cacheMode != CacheMode.UNCHANGED && + cacheMode != CacheMode.MAKE_COLD) { + getEvictor().moveToPri1LRU(this); + } + } + + child = (Node) logEntry.getResolvedItem(databaseImpl); + + isMiss = true; + } + + child.postFetchInit(databaseImpl, lsn); + attachNode(idx, child, lnSlotKey); + + } catch (FileNotFoundException e) { + + if (!bin.isDeleted(idx) && + !bin.isProbablyExpired(idx)) { + + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + makeFetchErrorMsg(null, lsn, idx), e); + } + + /* + * Cleaner got to the log file, so just return null. It is safe + * to ignore a deleted file for a KD or PD entry because files + * with active txns will not be cleaned. + */ + return null; + + } catch (EnvironmentFailureException e) { + e.addErrorMessage(makeFetchErrorMsg(null, lsn, idx)); + throw e; + + } catch (RuntimeException e) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + makeFetchErrorMsg(e.toString(), lsn, idx), e); + } + } + + if (child.isLN()) { + final LN ln = (LN) child; + + if (cacheMode != CacheMode.UNCHANGED && + cacheMode != CacheMode.MAKE_COLD) { + ln.setFetchedCold(false); + } + + ohCache.freeRedundantLN(bin, idx, ln, cacheMode); + } + + child.incFetchStats(envImpl, isMiss); + + return child; + } + + /** + * Return the idx'th LN target, enforcing rules defined by the cache modes + * for the LN. This method should be called instead of getTarget when a + * cache mode applies to user operations such as reads and updates. + */ + public final LN getLN(int idx, CacheMode cacheMode) { + assert isBIN(); + + final LN ln = (LN) entryTargets.get(idx); + + if (ln == null) { + return null; + } + + if (cacheMode != CacheMode.UNCHANGED && + cacheMode != CacheMode.MAKE_COLD) { + ln.setFetchedCold(false); + } + + final OffHeapCache ohCache = getOffHeapCache(); + + if (ohCache.isEnabled()) { + ohCache.freeRedundantLN((BIN) this, idx, ln, cacheMode); + } + + return ln; + } + + /** + * Initialize a node that has been read in from the log. + */ + @Override + public final void postFetchInit(DatabaseImpl db, long fetchedLsn) { + assert isLatchExclusiveOwner(); + + commonInit(db); + setLastLoggedLsn(fetchedLsn); + convertDupKeys(); // must be after initMemorySize + addToMainCache(); + + if (isBIN()) { + setFetchedCold(true); + } + + /* See Database.mutateDeferredWriteBINDeltas. */ + if (db.isDeferredWriteMode()) { + mutateToFullBIN(false); + } + } + + /** + * Initialize a BIN loaded from off-heap cache. + * + * Does not call setLastLoggedLsn because materialization of the off-heap + * BIN initializes all fields including the last logged/delta LSNs. + */ + private void postLoadInit(IN parent, int idx) { + assert isLatchExclusiveOwner(); + + commonInit(parent.databaseImpl); + addToMainCache(); + + if (isBIN()) { + setFetchedCold(true); + setFetchedColdOffHeap(true); + } + + getEnv().getOffHeapCache().postBINLoad(parent, idx, (BIN) this); + } + + /** + * Initialize a node read in during recovery. + */ + public final void postRecoveryInit(DatabaseImpl db, long lastLoggedLsn) { + commonInit(db); + setLastLoggedLsn(lastLoggedLsn); + } + + /** + * Common actions of postFetchInit, postLoadInit and postRecoveryInit. + */ + private void commonInit(DatabaseImpl db) { + setDatabase(db); + initMemorySize(); // compute before adding to IN list + } + + /** + * Add to INList and perform eviction related initialization. + */ + private void addToMainCache() { + + getEnv().getInMemoryINs().add(this); + + if (!isDIN() && !isDBIN()) { + if (isUpperIN() && traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " postFetchInit(): " + + " Adding UIN to LRU: " + getNodeId()); + } + getEvictor().addBack(this); + } + + /* Compress full BINs after fetching or loading. */ + if (!(this instanceof DBIN || this instanceof DIN)) { + getEnv().lazyCompress(this); + } + } + + /** + * Needed only during duplicates conversion, not during normal operation. + * The needDupKeyConversion field will only be true when first upgrading + * from JE 4.1. After the first time an IN is converted, it will be + * written out in a later file format, so the needDupKeyConversion field + * will be false and this method will do nothing. See + * DupConvert.convertInKeys. + */ + private void convertDupKeys() { + /* Do not convert more than once. */ + if (!needDupKeyConversion) { + return; + } + needDupKeyConversion = false; + DupConvert.convertInKeys(databaseImpl, this); + } + + /** + * @see Node#incFetchStats + */ + @Override + final void incFetchStats(EnvironmentImpl envImpl, boolean isMiss) { + Evictor e = envImpl.getEvictor(); + if (isBIN()) { + e.incBINFetchStats(isMiss, isBINDelta(false/*checLatched*/)); + } else { + e.incUINFetchStats(isMiss); + } + } + + public String makeFetchErrorMsg( + final String msg, + final long lsn, + final int idx) { + + final byte state = idx >= 0 ? entryStates[idx] : 0; + + final long expirationTime; + + if (isBIN() && idx >= 0) { + + final BIN bin = (BIN) this; + + expirationTime = TTL.expirationToSystemTime( + bin.getExpiration(idx), isExpirationInHours()); + + } else { + expirationTime = 0; + } + + return makeFetchErrorMsg(msg, this, lsn, state, expirationTime); + } + + /** + * @param in parent IN. Is null when root is fetched. + */ + static String makeFetchErrorMsg( + String msg, + IN in, + long lsn, + byte state, + long expirationTime) { + + /* + * Bolster the exception with the LSN, which is critical for + * debugging. Otherwise, the exception propagates upward and loses the + * problem LSN. + */ + StringBuilder sb = new StringBuilder(); + + if (in == null) { + sb.append("fetchRoot of "); + } else if (in.isBIN()) { + sb.append("fetchLN of "); + } else { + sb.append("fetchIN of "); + } + + if (lsn == DbLsn.NULL_LSN) { + sb.append("null lsn"); + } else { + sb.append(DbLsn.getNoFormatString(lsn)); + } + + if (in != null) { + sb.append(" parent IN=").append(in.getNodeId()); + sb.append(" IN class=").append(in.getClass().getName()); + sb.append(" lastFullLsn="); + sb.append(DbLsn.getNoFormatString(in.getLastFullLsn())); + sb.append(" lastLoggedLsn="); + sb.append(DbLsn.getNoFormatString(in.getLastLoggedLsn())); + sb.append(" parent.getDirty()=").append(in.getDirty()); + } + + sb.append(" state=").append(state); + + sb.append(" expires="); + + if (expirationTime != 0) { + sb.append(TTL.formatExpirationTime(expirationTime)); + } else { + sb.append("never"); + } + + if (msg != null) { + sb.append(" ").append(msg); + } + + return sb.toString(); + } + + public final int findEntry( + byte[] key, + boolean indicateIfDuplicate, + boolean exact) { + + return findEntry(key, indicateIfDuplicate, exact, null /*Comparator*/); + } + + /** + * Find the entry in this IN for which key is LTE the key arg. + * + * Currently uses a binary search, but eventually, this may use binary or + * linear search depending on key size, number of entries, etc. + * + * This method guarantees that the key parameter, which is the user's key + * parameter in user-initiated search operations, is always the left hand + * parameter to the Comparator.compare method. This allows a comparator + * to perform specialized searches, when passed down from upper layers. + * + * This is public so that DbCursorTest can access it. + * + * Note that the 0'th entry's key is treated specially in an IN. It always + * compares lower than any other key. + * + * @param key - the key to search for. + * @param indicateIfDuplicate - true if EXACT_MATCH should + * be or'd onto the return value if key is already present in this node. + * @param exact - true if an exact match must be found. + * @return offset for the entry that has a key LTE the arg. 0 if key + * is less than the 1st entry. -1 if exact is true and no exact match + * is found. If indicateIfDuplicate is true and an exact match was found + * then EXACT_MATCH is or'd onto the return value. + */ + public final int findEntry( + byte[] key, + boolean indicateIfDuplicate, + boolean exact, + Comparator comparator) { + + assert idKeyIsSlotKey(); + + int high = nEntries - 1; + int low = 0; + int middle = 0; + + if (comparator == null) { + comparator = databaseImpl.getKeyComparator(); + } + + /* + * Special Treatment of 0th Entry + * ------------------------------ + * IN's are special in that they have a entry[0] where the key is a + * virtual key in that it always compares lower than any other key. + * BIN's don't treat key[0] specially. But if the caller asked for an + * exact match or to indicate duplicates, then use the key[0] and + * forget about the special entry zero comparison. + * + * We always use inexact searching to get down to the BIN, and then + * call findEntry separately on the BIN if necessary. So the behavior + * of findEntry is different for BINs and INs, because it's used in + * different ways. + * + * Consider a tree where the lowest key is "b" and we want to insert + * "a". If we did the comparison (with exact == false), we wouldn't + * find the correct (i.e. the left) path down the tree. So the + * virtual key ensures that "a" gets inserted down the left path. + * + * The insertion case is a good specific example. findBinForInsert + * does inexact searching in the INs only, not the BIN. + * + * There's nothing special about the 0th key itself, only the use of + * the 0th key in the comparison algorithm. + */ + boolean entryZeroSpecialCompare = + isUpperIN() && !exact && !indicateIfDuplicate; + + assert nEntries >= 0; + + while (low <= high) { + + middle = (high + low) / 2; + int s; + + if (middle == 0 && entryZeroSpecialCompare) { + s = 1; + } else { + s = entryKeys.compareKeys( + key, keyPrefix, middle, + haveEmbeddedData(middle), comparator); + } + + if (s < 0) { + high = middle - 1; + } else if (s > 0) { + low = middle + 1; + } else { + int ret; + if (indicateIfDuplicate) { + ret = middle | EXACT_MATCH; + } else { + ret = middle; + } + + if ((ret >= 0) && exact && isEntryKnownDeleted(ret & 0xffff)) { + return -1; + } else { + return ret; + } + } + } + + /* + * No match found. Either return -1 if caller wanted exact matches + * only, or return entry whose key is < search key. + */ + if (exact) { + return -1; + } else { + return high; + } + } + + /** + * Inserts a slot with the given key, lsn and child node into this IN, if + * a slot with the same key does not exist already. The state of the new + * slot is set to DIRTY. Assumes this node is already latched by the + * caller. + * + * @return true if the entry was successfully inserted, false + * if it was a duplicate. + * + * @throws EnvironmentFailureException if the node is full + * (it should have been split earlier). + */ + public final boolean insertEntry( + Node child, + byte[] key, + long childLsn) + throws DatabaseException { + + assert(!isBINDelta()); + + int res = insertEntry1( + child, key, null, childLsn, EntryStates.DIRTY_BIT, false); + + return (res & INSERT_SUCCESS) != 0; + } + + /** + * Inserts a slot with the given key, lsn and child node into this IN, if + * a slot with the same key does not exist already. The state of the new + * slot is set to DIRTY. Assumes this node is already latched by the + * caller. + * + * @param data If the data portion of a record must be embedded in this + * BIN, "data" stores the record's data. Null otherwise. See also comment + * for the keyEntries field. + * + * @return either (1) the index of location in the IN where the entry was + * inserted |'d with INSERT_SUCCESS, or (2) the index of the duplicate in + * the IN if the entry was found to be a duplicate. + * + * @throws EnvironmentFailureException if the node is full (it should have + * been split earlier). + */ + public final int insertEntry1( + Node child, + byte[] key, + byte[] data, + long childLsn, + boolean blindInsertion) { + + return insertEntry1( + child, key, data, childLsn, EntryStates.DIRTY_BIT, + blindInsertion); + } + + /** + * Inserts a slot with the given key, lsn, state, and child node into this + * IN, if a slot with the same key does not exist already. Assumes this + * node is already latched by the caller. + * + * This returns a failure if there's a duplicate match. The caller must do + * the processing to check if the entry is actually deleted and can be + * overwritten. This is foisted upon the caller rather than handled in this + * object because there may be some latch releasing/retaking in order to + * check a child LN. + * + * @param data If the data portion of a record must be embedded in this + * BIN, "data" stores the record's data. Null otherwise. See also comment + * for the keyEntries field. + * + * @return either (1) the index of location in the IN where the entry was + * inserted |'d with INSERT_SUCCESS, or (2) the index of the duplicate in + * the IN if the entry was found to be a duplicate. + * + * @throws EnvironmentFailureException if the node is full (it should have + * been split earlier). + */ + public final int insertEntry1( + Node child, + byte[] key, + byte[] data, + long childLsn, + byte state, + boolean blindInsertion) { + + /* + * Search without requiring an exact match, but do let us know the + * index of the match if there is one. + */ + int index = findEntry(key, true, false); + + if (index >= 0 && (index & EXACT_MATCH) != 0) { + + /* + * There is an exact match. Don't insert; let the caller decide + * what to do with this duplicate. + */ + return index & ~IN.EXACT_MATCH; + } + + /* + * There was no key match, but if this is a bin delta, there may be an + * exact match in the full bin. Mutate to full bin and search again. + * However, if we know for sure that the key does not exist in the full + * BIN, then don't mutate, unless there is no space in the delta to do + * the insertion. + */ + if (isBINDelta()) { + + BIN bin = (BIN)this; + + boolean doBlindInsertion = (nEntries < getMaxEntries()); + + if (doBlindInsertion && + !blindInsertion && + bin.mayHaveKeyInFullBin(key)) { + + doBlindInsertion = false; + } + + if (!doBlindInsertion) { + + mutateToFullBIN(true /*leaveFreeSlot*/); + + index = findEntry(key, true, false); + + if (index >= 0 && (index & EXACT_MATCH) != 0) { + return index & ~IN.EXACT_MATCH; + } + } else { + getEvictor().incBinDeltaBlindOps(); + + if (traceDeltas) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + (blindInsertion ? + " Blind insertion in BIN-delta " : + " Blind put in BIN-delta ") + + getNodeId() + " nEntries = " + + nEntries + " max entries = " + + getMaxEntries() + + " full BIN entries = " + + bin.getFullBinNEntries() + + " full BIN max entries = " + + bin.getFullBinMaxEntries()); + } + } + } + + if (nEntries >= getMaxEntries()) { + throw unexpectedState( + getEnv(), + "Node " + getNodeId() + + " should have been split before calling insertEntry" + + " is BIN-delta: " + isBINDelta() + + " num entries: " + nEntries + + " max entries: " + getMaxEntries()); + } + + /* There was no key match, so insert to the right of this entry. */ + index++; + + /* We found a spot for insert, shift entries as needed. */ + if (index < nEntries) { + int oldSize = computeLsnOverhead(); + + /* Adding elements to the LSN array can change the space used. */ + shiftEntriesRight(index); + + updateMemorySize(computeLsnOverhead() - oldSize); + } else { + nEntries++; + } + + if (isBINDelta()) { + ((BIN)this).incFullBinNEntries(); + } + + int oldSize = computeLsnOverhead(); + + if (data == null || databaseImpl.isDeferredWriteMode()) { + setTarget(index, child); + } + + setLsnInternal(index, childLsn); + + boolean multiSlotChange = insertKey(index, key, data); + + /* + * Do this after calling insert key to overwrite whatever state changes + * were done by the insertEntry() call. + */ + entryStates[index] = state; + + if (data != null) { + setEmbeddedLN(index); + if (data.length == 0) { + setNoDataLN(index); + } + } + + adjustCursorsForInsert(index); + + updateMemorySize(oldSize, + getEntryInMemorySize(index) + + computeLsnOverhead()); + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } + + setDirty(true); + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + + return (index | INSERT_SUCCESS); + } + + /** + * Removes the slot at index from this IN. Assumes this node is already + * latched by the caller. + * + * @param index The index of the entry to delete from the IN. + */ + public void deleteEntry(int index) { + deleteEntry(index, true /*makeDirty*/, true /*validate*/); + } + + /** + * Variant that allows specifying whether the IN is dirtied and whether + * validation takes place. 'validate' should be false only in tests. + * + * See BIN.compress and INCompressor for a discussion about why slots can + * be deleted without dirtying the BIN, and why the next delta is + * prohibited when the slot is dirty. + */ + void deleteEntry(int index, boolean makeDirty, boolean validate) { + + assert !isBINDelta(); + assert index >= 0 && index < nEntries; + assert !validate || validateSubtreeBeforeDelete(index); + + if (makeDirty) { + setDirty(true); + } + + if (isDirty(index)) { + setProhibitNextDelta(true); + } + + Node child = getTarget(index); + + final OffHeapCache ohCache = getEnv().getOffHeapCache(); + final int level = getNormalizedLevel(); + if (level == 1) { + ohCache.freeLN((BIN) this, index); + } else if (level == 2) { + ohCache.freeBIN((BIN) child, this, index); + } + + if (child != null && child.isIN()) { + IN childIN = (IN)child; + getEnv().getInMemoryINs().remove(childIN); + } + + updateMemorySize(getEntryInMemorySize(index), 0); + int oldLSNArraySize = computeLsnOverhead(); + + /* + * Do the actual deletion. Note: setTarget() must be called before + * copyEntries() so that the hasCachedChildrenFlag will be properly + * maintained. + */ + setTarget(index, null); + copyEntries(index + 1, index, nEntries - index - 1); + nEntries--; + + /* cleanup what used to be the last entry */ + clearEntry(nEntries); + + /* setLsnInternal can mutate to an array of longs. */ + updateMemorySize(oldLSNArraySize, computeLsnOverhead()); + + assert(isBIN() || hasCachedChildrenFlag() == hasCachedChildren()); + + /* + * Note that we don't have to adjust cursors for delete, since + * there should be nothing pointing at this record. + */ + traceDelete(Level.FINEST, index); + } + + /** + * WARNING: clearEntry() calls entryTargets.set() directly, instead of + * setTarget(). As a result, the hasCachedChildren flag of the IN is not + * updated here. The caller is responsible for updating this flag, if + * needed. + */ + void clearEntry(int idx) { + + entryTargets = entryTargets.set(idx, null, this); + entryKeys = entryKeys.set(idx, null, this); + offHeapBINIds = offHeapBINIds.set(idx, 0, this); + setLsnInternal(idx, DbLsn.NULL_LSN); + entryStates[idx] = 0; + } + + /** + * This method is called after the idx'th child of this node gets logged, + * and changes position as a result. + * + * @param newLSN The new on-disk position of the child. + * + * @param newVLSN The VLSN of the logrec at the new position. + * For LN children only. + * + * @param newSize The size of the logrec at the new position. + * For LN children only. + */ + public final void updateEntry( + int idx, + long newLSN, + long newVLSN, + int newSize) { + + setLsn(idx, newLSN); + + if (isBIN()) { + if (isEmbeddedLN(idx)) { + ((BIN)this).setCachedVLSN(idx, newVLSN); + } else { + setLastLoggedSize(idx, newSize); + } + } + + setDirty(true); + } + + /** + * This method is called only from BIN.applyDelta(). It applies the info + * extracted from a delta slot to the corresponding slot in the full BIN. + * + * Unlike other update methods, the LSN may be NULL_LSN if the KD flag is + * set. This allows applying a BIN-delta with a NULL_LSN and KD, for an + * invisible log entry for example. + * + * No need to do memory counting in this method because the BIN is not + * yet attached to the tree. + */ + final void applyDeltaSlot( + int idx, + Node node, + long lsn, + int lastLoggedSize, + byte state, + byte[] key, + byte[] data) { + + assert(isBIN()); + assert(!isBINDelta()); + assert(lsn != DbLsn.NULL_LSN || + (state & EntryStates.KNOWN_DELETED_BIT) != 0); + assert(node == null || data == null); + assert(!getInListResident()); + + ((BIN) this).freeOffHeapLN(idx); + + setLsn(idx, lsn, false/*check*/); + setLastLoggedSize(idx, lastLoggedSize); + setTarget(idx, node); + + updateLNSlotKey(idx, key, data); + + assert(isEmbeddedLN(idx) == isEmbeddedLN(state)); + assert(isNoDataLN(idx) == isNoDataLN(state)); + + entryStates[idx] = state; + + setDirty(true); + } + + /** + * Update the idx slot of this BIN to reflect a record insertion in an + * existing KD slot. It is called from CursorImpl.insertRecordInternal(), + * after logging the insertion op. + * + * @param newLN The LN associated with the new record. + * + * @param newLSN The LSN of the insertion logrec. + * + * @param newSize The size of the insertion logrec. + * + * @param newKey The value for the record's key. It is equal to the current + * key value in the slot, but may not be identical to that value if a + * custom comparator is used. + * + * @param newData If the record's data must be embedded in this BIN, "data" + * stores the record's data. Null otherwise. See also comment for the + * keyEntries field. + */ + public final void insertRecord( + int idx, + LN newLN, + long newLSN, + int newSize, + byte[] newKey, + byte[] newData, + int expiration, + boolean expirationInHours) { + + assert(isBIN()); + + final BIN bin = (BIN) this; + + bin.freeOffHeapLN(idx); // old version of the LN is stale + + long oldSlotSize = getEntryInMemorySize(idx); + + setLsn(idx, newLSN); + + boolean multiSlotChange = updateLNSlotKey(idx, newKey, newData); + + if (isEmbeddedLN(idx)) { + + clearLastLoggedSize(idx); + + bin.setCachedVLSN(idx, newLN.getVLSNSequence()); + + if (databaseImpl.isDeferredWriteMode()) { + setTarget(idx, newLN); + } + } else { + setTarget(idx, newLN); + setLastLoggedSize(idx, newSize); + } + + bin.setExpiration(idx, expiration, expirationInHours); + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + } + + clearKnownDeleted(idx); + clearPendingDeleted(idx); + setDirty(true); + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + } + + /** + * Update the idx slot of this BIN to reflect an update of the associated + * record. It is called from CursorImpl.updateRecordInternal(), after + * logging the update op. + * + * @param oldMemSize If the child LN was cached before the update op, it has + * already been updated in-place by the caller. In this case, oldMemSize + * stores the size of the child LN before the update, and it is used to do + * memory counting. Otherwise oldMemSize is 0 and the newly created LN has + * not been attached to the tree; it will be attached later by the caller, + * if needed. + * + * @param newLSN The LSN of the update logrec. + * + * @param newVLSN The VLSN of the update logrec. + * + * @param newSize The on-disk size of the update logrec. + * + * @param newKey The new value for the record's key. It is equal to the + * current value, but may not be identical to the current value if a + * custom comparator is used. It may be null, if the caller knows for + * sure that the key does not change. + * + * @param newData If the record's data must be embedded in this BIN, "data" + * stores the record's data. Null otherwise. See also comment for the + * keyEntries field. + */ + public final void updateRecord( + int idx, + long oldMemSize, + long newLSN, + long newVLSN, + int newSize, + byte[] newKey, + byte[] newData, + int expiration, + boolean expirationInHours) { + + assert(isBIN()); + + final BIN bin = (BIN) this; + + bin.freeOffHeapLN(idx); // old version of the LN is stale + + long oldSlotSize = getEntryInMemorySize(idx); + + setLsn(idx, newLSN); + + boolean multiSlotChange = updateLNSlotKey(idx, newKey, newData); + + if (isEmbeddedLN(idx)) { + clearLastLoggedSize(idx); + ((BIN)this).setCachedVLSN(idx, newVLSN); + } else { + setLastLoggedSize(idx, newSize); + } + + bin.setExpiration(idx, expiration, expirationInHours); + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + /* Update mem size for key change. */ + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + + /* Update mem size for node change. */ + Node newLN = entryTargets.get(idx); + long newMemSize = + (newLN != null ? newLN.getMemorySizeIncludedByParent() : 0); + updateMemorySize(oldMemSize, newMemSize); + } + + setDirty(true); + } + + /** + * Update the idx slot slot of this BIN to reflect a deletion of the + * associated record. It is called from CursorImpl.deleteCurrentRecord(), + * after logging the deletion op. + * + * @param oldMemSize If the child LN was cached before the deletion, it + * has already been updated in-place by the caller (the ln contents have + * been deleted). In this case, oldMemSize stores the in-memory size of + * the child LN before the update, and it is used to do memory counting. + * Otherwise oldMemSize is 0 and the newly created LN has not been attached + * to the tree; it will be attached later by the caller, if needed. + * + * @param newLSN The LSN of the deletion logrec. + * + * @param newVLSN The VLSN of the deletion logrec. + * + * @param newSize The on-disk size of the deletion logrec. + */ + public final void deleteRecord( + int idx, + long oldMemSize, + long newLSN, + long newVLSN, + int newSize) { + + assert(isBIN()); + + final BIN bin = (BIN) this; + + bin.freeOffHeapLN(idx); // old version of the LN is stale + + setLsn(idx, newLSN); + + if (isEmbeddedLN(idx)) { + clearLastLoggedSize(idx); + bin.setCachedVLSN(idx, newVLSN); + } else { + setLastLoggedSize(idx, newSize); + } + + if (entryTargets.get(idx) != null) { + /* Update mem size for node change. */ + assert(oldMemSize != 0); + Node newLN = entryTargets.get(idx); + long newMemSize = newLN.getMemorySizeIncludedByParent(); + updateMemorySize(oldMemSize, newMemSize); + } else { + assert(oldMemSize == 0); + } + + setPendingDeleted(idx); + setDirty(true); + } + + /** + * This method is used by the RecoveryManager to change the current version + * of a record, either to a later version (in case of redo), or to an + * earlier version (in case of undo). The current version may or may not be + * cached as a child LN of this BIN (it may be only in case of txn abort + * during normal processing). If it is, it is evicted. The new version is + * not attached to the in-memory tree, to save memory during crash + * recovery. + * + * @param idx The BIN slot for the record. + * + * @param lsn The LSN of the new record version. It may be null in case of + * undo, if the logrec that is being undone is an insertion and the record + * did not exist at all in the DB before that insertion. + * + * @param knownDeleted True if the new version is a committed deletion. + * + * @param pendingDeleted True if the new version is a deletion, which + * may or may not be committed. + * + * @param key The key of the new version. It is null only if we are undoing + * and the revert-to version was not embedded (in this case the key of the + * revert-to version is not stored in the logrec). If it is null and the + * DB allows key updates, the new record version is fetched from disk to + * retrieve its key, so that the key values stored in the BIN slots are + * always transactionally correct. + * + * @param data The data of the new version. It is non-null if and only if + * the new version must be embedded in the BIN. + * + * @param vlsn The VLSN of the new version. + * + * @param logrecSize The on-disk size of the logrec corresponding to the + * new version. It may be 0 (i.e. unknown) in case of undo. + */ + public final void recoverRecord( + int idx, + long lsn, + boolean knownDeleted, + boolean pendingDeleted, + byte[] key, + byte[] data, + long vlsn, + int logrecSize, + int expiration, + boolean expirationInHours) { + + assert(isBIN()); + + BIN bin = (BIN) this; + + bin.freeOffHeapLN(idx); // old version of the LN is stale + + if (lsn == DbLsn.NULL_LSN) { + + /* + * A NULL lsn means that we are undoing an insertion that was done + * without slot reuse. To undo such an insertion we evict the + * current version (it may cached only in case of normal txn abort) + * and set the KD flag in the slot. We also set the LSN to null to + * ensure that the slot does not point to a logrec that does not + * reflect the slot's current state. The slot can then be put on + * the compressor for complete removal. + */ + setKnownDeletedAndEvictLN(idx); + + setLsnInternal(idx, DbLsn.NULL_LSN); + + bin.queueSlotDeletion(idx); + + return; + } + + if (key == null && + databaseImpl.allowsKeyUpdates() && + !knownDeleted) { + + try { + WholeEntry wholeEntry = getEnv().getLogManager(). + getLogEntryAllowInvisibleAtRecovery( + lsn, getLastLoggedSize(idx)); + + LNLogEntry logrec = (LNLogEntry) wholeEntry.getEntry(); + logrec.postFetchInit(getDatabase()); + + key = logrec.getKey(); + logrecSize = wholeEntry.getHeader().getEntrySize(); + + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException( + getEnv(), EnvironmentFailureReason.LOG_FILE_NOT_FOUND, + makeFetchErrorMsg(null, lsn, idx), e); + } + } + + long oldSlotSize = getEntryInMemorySize(idx); + + setLsn(idx, lsn); + setTarget(idx, null); + + boolean multiSlotChange = updateLNSlotKey(idx, key, data); + + if (isEmbeddedLN(idx)) { + clearLastLoggedSize(idx); + bin.setCachedVLSN(idx, vlsn); + } else { + setLastLoggedSize(idx, logrecSize); + } + + if (knownDeleted) { + assert(!pendingDeleted); + setKnownDeleted(idx); + bin.queueSlotDeletion(idx); + } else { + clearKnownDeleted(idx); + if (pendingDeleted) { + setPendingDeleted(idx); + bin.queueSlotDeletion(idx); + } else { + clearPendingDeleted(idx); + } + } + + bin.setExpiration(idx, expiration, expirationInHours); + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + } + + setDirty(true); + } + + /** + * Update the cached-child and LSN properties of the idx-th slot. This + * method is used by the RecoveryManager.recoverChildIN() to change the + * version of a child IN, a later version The child IN may or may not be + * already attached to the tree. + */ + public final void recoverIN( + int idx, + Node node, + long lsn, + int lastLoggedSize) { + + long oldSlotSize = getEntryInMemorySize(idx); + + /* + * If we are about to detach a cached child IN, make sure that it is + * not in the INList. This is correct, because this method is called + * during the recovery phase where the INList is disabled, + */ + Node child = getTarget(idx); + assert(child == null || + !((IN)child).getInListResident() || + child == node/* this is needed by a unit test*/); + + setLsn(idx, lsn); + setLastLoggedSize(idx, lastLoggedSize); + setTarget(idx, node); + + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + + setDirty(true); + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + } + + /** + * Attach the given node as the idx-th child of "this" node. If the child + * node is an LN, update the key of the parent slot to the given key value, + * if that value is non-null and an update is indeed necessary. + * + * This method is called after the child node has been either (a) fetched + * in from disk and is not dirty, or (b) is a newly created instance that + * will be written out later by something like a checkpoint. In either + * case, the slot LSN does not need to be updated. + * + * Note: does not dirty the node unless the LN slot key is changed. + */ + public final void attachNode(int idx, Node node, byte[] newKey) { + + assert !(node instanceof IN) || ((IN) node).isLatchExclusiveOwner(); + + long oldSlotSize = getEntryInMemorySize(idx); + + /* Make sure we are not using this method to detach a cached child */ + assert(getTarget(idx) == null); + + setTarget(idx, node); + + boolean multiSlotChange = false; + + if (isBIN() && newKey != null) { + assert(!haveEmbeddedData(idx)); + multiSlotChange = updateLNSlotKey(idx, newKey, null); + } + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + } + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + } + + /* + * Detach from the tree the child node at the idx-th slot. + * + * The most common caller of this method is the evictor. If the child + * being evicted was dirty, it has just been logged and the lsn of the + * slot must be updated. + */ + public final void detachNode(int idx, boolean updateLsn, long newLsn) { + + long oldSlotSize = getEntryInMemorySize(idx); + + Node child = getTarget(idx); + + if (updateLsn) { + setLsn(idx, newLsn); + setDirty(true); + } + setTarget(idx, null); + + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + + if (child != null && child.isIN()) { + getEnv().getInMemoryINs().remove((IN) child); + } + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + } + + /** + * This method is used in DupConvert, where it is called to convert the + * keys of an upper IN that has just been fetched from the log and is not + * attached to in-memory tree yet. + */ + public final void convertKey(int idx, byte[] newKey) { + + long oldSlotSize = getEntryInMemorySize(idx); + + boolean multiSlotChange = updateKey(idx, newKey, null); + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSlotSize = getEntryInMemorySize(idx); + updateMemorySize(oldSlotSize, newSlotSize); + } + + setDirty(true); + + assert(isBIN() || hasCachedChildren() == hasCachedChildrenFlag()); + } + + void copyEntries(final int from, final int to, final int n) { + + entryTargets = entryTargets.copy(from, to, n, this); + entryKeys = entryKeys.copy(from, to, n, this); + offHeapBINIds = offHeapBINIds.copy(from, to, n, this); + + System.arraycopy(entryStates, from, entryStates, to, n); + + if (entryLsnLongArray == null) { + final int fromOff = from << 2; + final int toOff = to << 2; + final int nBytes = n << 2; + System.arraycopy(entryLsnByteArray, fromOff, + entryLsnByteArray, toOff, nBytes); + } else { + System.arraycopy(entryLsnLongArray, from, + entryLsnLongArray, to, + n); + } + } + + /** + * Return true if this node needs splitting. For the moment, needing to be + * split is defined by there being no free entries available. + */ + public final boolean needsSplitting() { + + if (isBINDelta()) { + BIN bin = (BIN)this; + int fullBinNEntries = bin.getFullBinNEntries(); + int fullBinMaxEntries = bin.getFullBinMaxEntries(); + + if (fullBinNEntries < 0) { + /* fullBinNEntries is unknown in logVersions < 10 */ + mutateToFullBIN(false /*leaveFreeSlot*/); + } else { + assert(fullBinNEntries > 0); + return ((fullBinMaxEntries - fullBinNEntries) < 1); + } + } + + return ((getMaxEntries() - nEntries) < 1); + } + + /** + * Split this into two nodes. Parent IN is passed in parent and should be + * latched by the caller. + * + * childIndex is the index in parent of where "this" can be found. + */ + public final IN split( + IN parent, + int childIndex, + IN grandParent, + int maxEntries) { + + return splitInternal(parent, childIndex, grandParent, maxEntries, -1); + } + + /** + * Called when we know we are about to split on behalf of a key that is the + * minimum (leftSide) or maximum (!leftSide) of this node. This is + * achieved by just forcing the split to occur either one element in from + * the left or the right (i.e. splitIndex is 1 or nEntries - 1). + */ + IN splitSpecial( + IN parent, + int parentIndex, + IN grandParent, + int maxEntriesPerNode, + byte[] key, + boolean leftSide) { + + int index = findEntry(key, false, false); + + if (leftSide && index == 0) { + return splitInternal( + parent, parentIndex, grandParent, maxEntriesPerNode, 1); + + } else if (!leftSide && index == (nEntries - 1)) { + return splitInternal( + parent, parentIndex, grandParent, maxEntriesPerNode, + nEntries - 1); + + } else { + return split( + parent, parentIndex, grandParent, maxEntriesPerNode); + } + } + + final IN splitInternal( + final IN parent, + final int childIndex, + final IN grandParent, + final int maxEntries, + int splitIndex) + throws DatabaseException { + + assert(!isBINDelta()); + + /* + * Find the index of the existing identifierKey so we know which IN + * (new or old) to put it in. + */ + if (identifierKey == null) { + throw unexpectedState(); + } + + final int idKeyIndex = findEntry(identifierKey, false, false); + + if (splitIndex < 0) { + splitIndex = nEntries / 2; + } + + /* Range of entries to copy to new sibling. */ + final int low, high; + + if (idKeyIndex < splitIndex) { + + /* + * Current node (this) keeps left half entries. Right half entries + * will go in the new node. + */ + low = splitIndex; + high = nEntries; + } else { + + /* + * Current node (this) keeps right half entries. Left half entries + * will go in the new node. + */ + low = 0; + high = splitIndex; + } + + final byte[] newIdKey = getKey(low); + long parentLsn; + + /* + * Ensure that max entries is large enough to hold the slots being + * moved to the new sibling, with one spare slot for insertions. This + * is important when the maxEntries param is less than nEntries in this + * node, which can occur when the user reduces the fanout or when this + * node has temporarily grown beyond its original fanout. + */ + final IN newSibling = createNewInstance( + newIdKey, + Math.max(maxEntries, high - low + 1), + level); + + newSibling.latch(CacheMode.UNCHANGED); + + try { + boolean addedNewSiblingToCompressorQueue = false; + final int newSiblingNEntries = (high - low); + final boolean haveCachedChildren = hasCachedChildrenFlag(); + + assert(isBIN() || haveCachedChildren == hasCachedChildren()); + + final BIN bin = isBIN() ? (BIN) this : null; + + /** + * Distribute entries among the split node and the new sibling. + */ + for (int i = low; i < high; i++) { + + if (!addedNewSiblingToCompressorQueue && + bin != null && + bin.isDefunct(i)) { + + addedNewSiblingToCompressorQueue = true; + getEnv().addToCompressorQueue((BIN) newSibling); + } + + newSibling.appendEntryFromOtherNode(this, i); + clearEntry(i); + } + + if (low == 0) { + shiftEntriesLeft(newSiblingNEntries); + } + + nEntries -= newSiblingNEntries; + setDirty(true); + + if (isUpperIN() && haveCachedChildren) { + setHasCachedChildrenFlag(hasCachedChildren()); + } + + assert(isBIN() || + hasCachedChildrenFlag() == hasCachedChildren()); + assert(isBIN() || + newSibling.hasCachedChildrenFlag() == + newSibling.hasCachedChildren()); + + adjustCursors(newSibling, low, high); + + /* + * If this node has no key prefix, calculate it now that it has + * been split. This must be done before logging, to ensure the + * prefix information is made persistent [#20799]. + */ + byte[] newKeyPrefix = computeKeyPrefix(-1); + recalcSuffixes(newKeyPrefix, null, null, -1); + + /* Apply compaction after prefixing [#20799]. */ + entryKeys = entryKeys.compact(this); + + /* Only recalc if there are multiple entries in newSibling. */ + if (newSibling.getNEntries() > 1) { + byte[] newSiblingPrefix = newSibling.computeKeyPrefix(-1); + newSibling.recalcSuffixes(newSiblingPrefix, null, null, -1); + /* initMemorySize calls entryKeys.compact. */ + newSibling.initMemorySize(); + } + + assert idKeyIsSlotKey(); + assert newSibling.idKeyIsSlotKey(); + + /* + * Update size. newSibling and parent are correct, but this IN has + * had its entries shifted and is not correct. + * + * Also, inMemorySize does not reflect changes that may have + * resulted from key prefixing related changes, it needs to be + * brought up to date, so update it appropriately for this and the + * above reason. + */ + EnvironmentImpl env = getEnv(); + INList inMemoryINs = env.getInMemoryINs(); + long oldMemorySize = inMemorySize; + long newSize = computeMemorySize(); + updateMemorySize(oldMemorySize, newSize); + + /* + * Parent refers to child through an element of the entries array. + * Depending on which half of the BIN we copied keys from, we + * either have to adjust one pointer and add a new one, or we have + * to just add a new pointer to the new sibling. + * + * We must use the provisional logging for two reasons: + * + * 1) All three log entries must be read atomically. The parent + * must get logged last, as all referred-to children must precede + * it. Provisional entries guarantee that all three are processed + * as a unit. Recovery skips provisional entries, so the changed + * children are only used if the parent makes it out to the log. + * + * 2) We log all they way to the root to avoid the "great aunt" + * problem (see LevelRecorder), and provisional logging is + * necessary during a checkpoint for levels less than + * maxFlushLevel. + * + * We prohibit compression during logging because there should be + * at least one entry in each IN. Note the use of getKey(0) below. + */ + long newSiblingLsn = + newSibling.optionalLogProvisionalNoCompress(parent); + + long myNewLsn = optionalLogProvisionalNoCompress(parent); + + assert nEntries > 0; + + /* + * When we update the parent entry, we make sure that we don't + * replace the parent's key that points at 'this' with a key that + * is > than the existing one. Replacing the parent's key with + * something > would effectively render a piece of the subtree + * inaccessible. So only replace the parent key with something + * <= the existing one. See tree/SplitTest.java for more details + * on the scenario. + */ + if (low == 0) { + + /* + * Change the original entry to point to the new child and add + * an entry to point to the newly logged version of this + * existing child. + */ + parent.prepareForSlotReuse(childIndex); + + parent.updateSplitSlot( + childIndex, newSibling, newSiblingLsn, newIdKey); + + boolean inserted = parent.insertEntry( + this, getKey(0), myNewLsn); + assert inserted; + } else { + + /* + * Update the existing child's LSN to reflect the newly logged + * version and insert new child into parent. + */ + parent.updateSplitSlot(childIndex, this, myNewLsn, getKey(0)); + + boolean inserted = parent.insertEntry( + newSibling, newIdKey, newSiblingLsn); + assert inserted; + } + + inMemoryINs.add(newSibling); + + /** + * Log the parent. Note that the root slot or grandparent slot is + * not updated with the parent's LSN here; this is done by + * Tree.forceSplit. + */ + if (parent.isRoot()) { + parentLsn = parent.optionalLog(); + } else { + parentLsn = parent.optionalLogProvisional(grandParent); + } + + /* Coordinate the split with an in-progress checkpoint. */ + env.getCheckpointer().coordinateSplitWithCheckpoint(newSibling); + + /* + * Check whether either the old or the new sibling must be added + * to the LRU (priority-1 LRUSet). + */ + assert(!isDIN() && !isDBIN()); + + if(isBIN() || !newSibling.hasCachedChildrenFlag()) { + if (isUpperIN() && traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + "split-newSibling " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " Adding UIN to LRU: " + + newSibling.getNodeId()); + } + getEvictor().addBack(newSibling); + } + + if (isUpperIN() && + haveCachedChildren && + !hasCachedChildrenFlag()) { + if (traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + "split-oldSibling " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " Adding UIN to LRU: " + getNodeId()); + } + getEvictor().addBack(this); + } + + /* Debug log this information. */ + traceSplit(Level.FINE, parent, + newSibling, parentLsn, myNewLsn, + newSiblingLsn, splitIndex, idKeyIndex, childIndex); + } finally { + newSibling.releaseLatch(); + } + + return newSibling; + } + + /** + * Used for moving entries between BINs during splits. + */ + void appendEntryFromOtherNode(IN from, int fromIdx) { + + assert(!isBINDelta()); + + final Node target = from.entryTargets.get(fromIdx); + final int ohBinId = from.getOffHeapBINId(fromIdx); + final boolean ohBinPri2 = from.isOffHeapBINPri2(fromIdx); + final boolean ohBinDirty = from.isOffHeapBINDirty(fromIdx); + final long lsn = from.getLsn(fromIdx); + final byte state = from.entryStates[fromIdx]; + final byte[] key = from.getKey(fromIdx); + final byte[] data = (from.haveEmbeddedData(fromIdx) ? + from.getData(fromIdx) : null); + + long oldSize = computeLsnOverhead(); + + ++nEntries; + + int idx = nEntries - 1; + + /* + * When calling setTarget for an IN child we must latch it, because + * setTarget sets the parent. + */ + if (target != null && target.isIN()) { + final IN in = (IN) target; + in.latchNoUpdateLRU(databaseImpl); + setTarget(idx, target); + in.releaseLatch(); + } else { + setTarget(idx, target); + } + + boolean multiSlotChange = insertKey(idx, key, data); + + /* setLsnInternal can mutate to an array of longs. */ + setLsnInternal(idx, lsn); + + entryStates[idx] = state; + + if (ohBinId >= 0) { + setOffHeapBINId(idx, ohBinId, ohBinPri2, ohBinDirty); + getOffHeapCache().setOwner(ohBinId, this); + } + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSize = getEntryInMemorySize(idx) + computeLsnOverhead(); + updateMemorySize(oldSize, newSize); + } + + setDirty(true); + } + + /** + * Update a slot that is being split. The slot to be updated here is the + * one that existed before the split. + * + * @param child The new child to be placed under the slot. May be the + * newly created sibling or the pre-existing sibling. + * @param lsn The new lsn of the child (the child was logged just before + * calling this method, so its slot lsn must be updated) + * @param key The new key for the slot. We should not actually update the + * slot key, because its value is the lower bound of the key range covered + * by the slot, and this lower bound does not change as a result of the + * split (the new slot created as a result of the split is placed to the + * right of the pre-existing slot). There is however one exception: the + * key can be updated if "idx" is the 0-slot. The 0-slot key is not a true + * lower bound; the actual lower bound for the 0-slot is the key in the + * parent slot for this IN. So, in this case, if the given key is less + * than the current one, it is better to update the key in order to better + * approximate the real lower bound (and thus make the isKeyInBounds() + * method more effective). + */ + private void updateSplitSlot( + int idx, + IN child, + long lsn, + byte[] key) { + + assert(isUpperIN()); + + long oldSize = getEntryInMemorySize(idx); + + setLsn(idx, lsn); + setTarget(idx, child); + + if (idx == 0) { + int s = entryKeys.compareKeys( + key, keyPrefix, idx, haveEmbeddedData(idx), + getKeyComparator()); + + boolean multiSlotChange = false; + if (s < 0) { + multiSlotChange = updateKey(idx, key, null/*data*/); + } + + if (multiSlotChange) { + updateMemorySize(inMemorySize, computeMemorySize()); + } else { + long newSize = getEntryInMemorySize(idx); + updateMemorySize(oldSize, newSize); + } + } else { + long newSize = getEntryInMemorySize(idx); + updateMemorySize(oldSize, newSize); + } + + setDirty(true); + + assert(hasCachedChildren() == hasCachedChildrenFlag()); + } + + /** + * Shift entries to the right by one position, starting with (and + * including) the entry at index. Increment nEntries by 1. Called + * in insertEntry1() + * + * @param index - The position to start shifting from. + */ + private void shiftEntriesRight(int index) { + copyEntries(index, index + 1, nEntries - index); + clearEntry(index); + nEntries++; + setDirty(true); + } + + /** + * Shift entries starting at the byHowMuch'th element to the left, thus + * removing the first byHowMuch'th elements of the entries array. This + * always starts at the 0th entry. Caller is responsible for decrementing + * nEntries. + * + * @param byHowMuch - The number of entries to remove from the left side + * of the entries array. + */ + private void shiftEntriesLeft(int byHowMuch) { + copyEntries(byHowMuch, 0, nEntries - byHowMuch); + for (int i = nEntries - byHowMuch; i < nEntries; i++) { + clearEntry(i); + } + setDirty(true); + } + + void adjustCursors( + IN newSibling, + int newSiblingLow, + int newSiblingHigh) { + /* Cursors never refer to IN's. */ + } + + void adjustCursorsForInsert(int insertIndex) { + /* Cursors never refer to IN's. */ + } + + /** + * Called prior to changing a slot to contain a different logical node. + * + * Necessary to support assertions for transient LSNs in shouldUpdateLsn. + * Examples: LN slot reuse, and splits where a new node is placed in an + * existing slot. + * + * Also needed to free the off-heap BIN associated with the old node. + * + * TODO: This method is no longer used for LN slot reuse, and freeing of + * the off-heap BIN could be done by the only caller, splitInternal, and + * then this method could be removed. + */ + public void prepareForSlotReuse(int idx) { + + if (databaseImpl.isDeferredWriteMode()) { + setLsn(idx, DbLsn.NULL_LSN, false/*check*/); + } + + final OffHeapCache ohCache = getOffHeapCache(); + if (ohCache.isEnabled() && getNormalizedLevel() == 2) { + ohCache.freeBIN((BIN) getTarget(idx), this, idx); + } + } + + /* + * Get the current memory consumption of this node + */ + public long getInMemorySize() { + return inMemorySize; + } + + /** + * Compute the current memory consumption of this node, after putting + * its keys in their compact representation, if possible. + */ + private void initMemorySize() { + entryKeys = entryKeys.compact(this); + inMemorySize = computeMemorySize(); + } + + /** + * Count up the memory usage attributable to this node alone. LNs children + * are counted by their BIN parents, but INs are not counted by their + * parents because they are resident on the IN list. The identifierKey is + * "intentionally" not kept track of in the memory budget. + */ + public long computeMemorySize() { + + long calcMemorySize = getFixedMemoryOverhead(); + + calcMemorySize += MemoryBudget.byteArraySize(entryStates.length); + + calcMemorySize += computeLsnOverhead(); + + for (int i = 0; i < nEntries; i++) { + calcMemorySize += getEntryInMemorySize(i); + } + + if (keyPrefix != null) { + calcMemorySize += MemoryBudget.byteArraySize(keyPrefix.length); + } + + if (provisionalObsolete != null) { + calcMemorySize += provisionalObsolete.getMemorySize(); + } + + calcMemorySize += entryTargets.calculateMemorySize(); + calcMemorySize += entryKeys.calculateMemorySize(); + + if (offHeapBINIds != null) { + calcMemorySize += offHeapBINIds.getMemorySize(); + } + + return calcMemorySize; + } + + /* + * Overridden by subclasses. + */ + protected long getFixedMemoryOverhead() { + return MemoryBudget.IN_FIXED_OVERHEAD; + } + + /* + * Compute the memory consumption for storing this node's LSNs + */ + private int computeLsnOverhead() { + return (entryLsnLongArray == null) ? + MemoryBudget.byteArraySize(entryLsnByteArray.length) : + MemoryBudget.ARRAY_OVERHEAD + + (entryLsnLongArray.length * + MemoryBudget.PRIMITIVE_LONG_ARRAY_ITEM_OVERHEAD); + } + + private long getEntryInMemorySize(int idx) { + + /* + * Do not count state size here, since it is counted as overhead + * during initialization. + */ + long ret = 0; + + /* + * Don't count the key size if the representation has already + * accounted for it. + */ + if (!entryKeys.accountsForKeyByteMemUsage()) { + + /* + * Materialize the key object only if needed, thus avoiding the + * object allocation cost when possible. + */ + final byte[] key = entryKeys.get(idx); + if (key != null) { + ret += MemoryBudget.byteArraySize(key.length); + } + } + + final Node target = entryTargets.get(idx); + if (target != null) { + ret += target.getMemorySizeIncludedByParent(); + } + return ret; + } + + /** + * Compacts the representation of the IN, if possible. + * + * Called by the evictor to reduce memory usage. Should not be called too + * often (e.g., every CRUD operation), since this could cause lots of + * memory allocations as the representations contract and expend, resulting + * in expensive GC. + * + * @return number of bytes reclaimed. + */ + public long compactMemory() { + + final long oldSize = inMemorySize; + final INKeyRep oldKeyRep = entryKeys; + + entryTargets = entryTargets.compact(this); + entryKeys = entryKeys.compact(this); + offHeapBINIds = offHeapBINIds.compact(this, EMPTY_OFFHEAP_BIN_IDS); + + /* + * Note that we only need to account for mem usage changes in the key + * rep here, not the target rep. The target rep, unlike the key rep, + * updates its mem usage internally, and the responsibility for mem + * usage of contained nodes is fixed -- it is always managed by the IN. + * + * When the key rep changes, the accountsForKeyByteMemUsage property + * also changes. Recalc the size of the entire IN, because + * responsibility for managing contained key byte mem usage has shifted + * between the key rep and the IN parent. + */ + if (entryKeys != oldKeyRep) { + updateMemorySize(inMemorySize, computeMemorySize()); + } + + return oldSize - inMemorySize; + } + + /** + * Returns the amount of memory currently budgeted for this IN. + */ + public long getBudgetedMemorySize() { + return inMemorySize - accumulatedDelta; + } + + /** + * Called as part of a memory budget reset (during a checkpoint) to clear + * the accumulated delta and return the total memory size. + */ + public long resetAndGetMemorySize() { + accumulatedDelta = 0; + return inMemorySize; + } + + protected void updateMemorySize(long oldSize, long newSize) { + long delta = newSize - oldSize; + updateMemorySize(delta); + } + + /* + * Called when a cached child is replaced by another cached child. + */ + void updateMemorySize(Node oldNode, Node newNode) { + long delta = 0; + if (newNode != null) { + delta = newNode.getMemorySizeIncludedByParent(); + } + + if (oldNode != null) { + delta -= oldNode.getMemorySizeIncludedByParent(); + } + updateMemorySize(delta); + } + + /* + * Change this.onMemorySize by the given delta and update the memory + * budget for the cache, but only if the accummulated delta for this + * node exceeds the ACCUMULATED_LIMIT threshold and this IN is actually + * on the IN list. (For example, when we create new INs, they are + * manipulated off the IN list before being added; if we updated the + * environment wide cache then, we'd end up double counting.) + */ + void updateMemorySize(long delta) { + + if (delta == 0) { + return; + } + + inMemorySize += delta; + + if (getInListResident()) { + + /* + * This assertion is disabled if the environment is invalid to + * avoid spurious assertions during testing of IO errors. If the + * environment is invalid, memory budgeting errors are irrelevant. + * [#21929] + */ + assert + inMemorySize >= getFixedMemoryOverhead() || + !getEnv().isValid(): + "delta: " + delta + " inMemorySize: " + inMemorySize + + " overhead: " + getFixedMemoryOverhead() + + " computed: " + computeMemorySize() + + " dump: " + toString() + assertPrintMemorySize(); + + accumulatedDelta += delta; + if (accumulatedDelta > ACCUMULATED_LIMIT || + accumulatedDelta < -ACCUMULATED_LIMIT) { + updateMemoryBudget(); + } + } + } + + /** + * Move the accumulated delta to the memory budget. + */ + public void updateMemoryBudget() { + final EnvironmentImpl env = getEnv(); + env.getInMemoryINs().memRecalcUpdate(this, accumulatedDelta); + env.getMemoryBudget().updateTreeMemoryUsage(accumulatedDelta); + accumulatedDelta = 0; + } + + /** + * Returns the treeAdmin memory in objects referenced by this IN. + * Specifically, this refers to the DbFileSummaryMap held by + * MapLNs + */ + public long getTreeAdminMemorySize() { + return 0; // by default, no treeAdminMemory + } + + /* + * Utility method used during unit testing. + */ + protected long printMemorySize() { + + final long inOverhead = getFixedMemoryOverhead(); + + final long statesOverhead = + MemoryBudget.byteArraySize(entryStates.length); + + final int lsnOverhead = computeLsnOverhead(); + + int entryOverhead = 0; + for (int i = 0; i < nEntries; i++) { + entryOverhead += getEntryInMemorySize(i); + } + + final int keyPrefixOverhead = (keyPrefix != null) ? + MemoryBudget.byteArraySize(keyPrefix.length) : 0; + + final int provisionalOverhead = (provisionalObsolete != null) ? + provisionalObsolete.getMemorySize() : 0; + + final long targetRepOverhead = entryTargets.calculateMemorySize(); + final long keyRepOverhead = entryKeys.calculateMemorySize(); + final long total = inOverhead + statesOverhead + lsnOverhead + + entryOverhead + keyPrefixOverhead + provisionalOverhead + + targetRepOverhead + keyRepOverhead; + + final long offHeapBINIdOverhead = offHeapBINIds.getMemorySize(); + + System.out.println(" nEntries:" + nEntries + + "/" + entryStates.length + + " in: " + inOverhead + + " states: " + statesOverhead + + " entry: " + entryOverhead + + " lsn: " + lsnOverhead + + " keyPrefix: " + keyPrefixOverhead + + " provisional: " + provisionalOverhead + + " targetRep(" + entryTargets.getType() + "): " + + targetRepOverhead + + " keyRep(" + entryKeys.getType() +"): " + + keyRepOverhead + + " offHeapBINIds: " + offHeapBINIdOverhead + + " Total: " + total + + " inMemorySize: " + inMemorySize); + return total; + } + + /* Utility method used to print memory size in an assertion. */ + private boolean assertPrintMemorySize() { + printMemorySize(); + return true; + } + + public boolean verifyMemorySize() { + + long calcMemorySize = computeMemorySize(); + if (calcMemorySize != inMemorySize) { + + String msg = "-Warning: Out of sync. Should be " + + calcMemorySize + " / actual: " + inMemorySize + + " node: " + getNodeId(); + LoggerUtils.envLogMsg(Level.INFO, getEnv(), msg); + + System.out.println(msg); + printMemorySize(); + + return false; + } + return true; + } + + /** + * Adds (increments) or removes (decrements) the cache stats for the key + * and target representations. Used when rep objects are being replaced + * with a new instance, rather than by calling their mutator methods. + * Specifically, it is called when mutating from full bin to bin delta + * or vice-versa. + */ + protected void updateRepCacheStats(boolean increment) { + assert(isBIN()); + entryKeys.updateCacheStats(increment, this); + entryTargets.updateCacheStats(increment, this); + } + + protected int getCompactMaxKeyLength() { + return getEnv().getCompactMaxKeyLength(); + } + + /** + * Called when adding/removing this IN to/from the INList. + */ + public void setInListResident(boolean resident) { + + if (!resident) { + /* Decrement the stats before clearing its residency */ + entryTargets.updateCacheStats(false, this); + entryKeys.updateCacheStats(false, this); + } + + if (resident) { + flags |= IN_RESIDENT_BIT; + } else { + flags &= ~IN_RESIDENT_BIT; + } + + if (resident) { + /* Increment the stats after setting its residency. */ + entryTargets.updateCacheStats(true, this); + entryKeys.updateCacheStats(true, this); + } + } + + /** + * Returns whether this IN is on the INList. + */ + public boolean getInListResident() { + return (flags & IN_RESIDENT_BIT) != 0; + } + + public IN getPrevLRUNode() { + return prevLRUNode; + } + + public void setPrevLRUNode(IN node) { + prevLRUNode = node; + } + + public IN getNextLRUNode() { + return nextLRUNode; + } + + public void setNextLRUNode(IN node) { + nextLRUNode = node; + } + + /** + * Try to compact or otherwise reclaim memory in this IN and return the + * number of bytes reclaimed. For example, a BIN should evict LNs, if + * possible. + * + * Used by the evictor to reclaim memory by some means short of evicting + * the entire node. If a positive value is returned, the evictor will + * postpone full eviction of this node. + */ + public long partialEviction() { + return 0; + } + + /** + * Returns whether any child is non-null in the main or off-heap cache. + */ + public boolean hasCachedChildren() { + assert isLatchOwner(); + + for (int i = 0; i < getNEntries(); i++) { + if (entryTargets.get(i) != null) { + return true; + } + } + + return false; + } + + /** + * Disallow delta on next log. Set to true (a) when we we delete a slot + * from a BIN, (b) when the cleaner marks a BIN as dirty so that it will + * be migrated during the next checkpoint. + */ + public void setProhibitNextDelta(boolean val) { + + if (!isBIN()) { + return; + } + + if (val) { + flags |= IN_PROHIBIT_NEXT_DELTA_BIT; + } else { + flags &= ~IN_PROHIBIT_NEXT_DELTA_BIT; + } + } + + public boolean getProhibitNextDelta() { + return (flags & IN_PROHIBIT_NEXT_DELTA_BIT) != 0; + } + + /* + * Validate the subtree that we're about to delete. Make sure there aren't + * more than one valid entry on each IN and that the last level of the tree + * is empty. Also check that there are no cursors on any bins in this + * subtree. Assumes caller is holding the latch on this parent node. + * + * While we could latch couple down the tree, rather than hold latches as + * we descend, we are presumably about to delete this subtree so + * concurrency shouldn't be an issue. + * + * @return true if the subtree rooted at the entry specified by "index" is + * ok to delete. + * + * Overriden by BIN class. + */ + boolean validateSubtreeBeforeDelete(int index) + throws DatabaseException { + + if (index >= nEntries) { + + /* + * There's no entry here, so of course this entry is deletable. + */ + return true; + } else { + IN child = fetchIN(index, CacheMode.UNCHANGED); + + boolean needToLatch = !child.isLatchExclusiveOwner(); + + try { + if (needToLatch) { + child.latch(CacheMode.UNCHANGED); + } + return child.isValidForDelete(); + } finally { + if (needToLatch && isLatchOwner()) { + child.releaseLatch(); + } + } + } + } + + /** + * Check if this node fits the qualifications for being part of a deletable + * subtree. It can only have one IN child and no LN children. + * + * Note: the method is overwritten by BIN and LN. + * BIN.isValidForDelete() will not fetch any child LNs. + * LN.isValidForDelete() simply returns false. + * + * We assume that this is only called under an assert. + */ + @Override + boolean isValidForDelete() + throws DatabaseException { + + assert(!isBINDelta()); + + /* + * Can only have one valid child, and that child should be + * deletable. + */ + if (nEntries > 1) { // more than 1 entry. + return false; + + } else if (nEntries == 1) { // 1 entry, check child + IN child = fetchIN(0, CacheMode.UNCHANGED); + boolean needToLatch = !child.isLatchExclusiveOwner(); + if (needToLatch) { + child.latch(CacheMode.UNCHANGED); + } + + boolean ret = false; + try { + if (child.isBINDelta()) { + return false; + } + + ret = child.isValidForDelete(); + + } finally { + if (needToLatch) { + child.releaseLatch(); + } + } + return ret; + } else { // 0 entries. + return true; + } + } + + /** + * Add self and children to this in-memory IN list. Called by recovery, can + * run with no latching. + */ + @Override + final void rebuildINList(INList inList) + throws DatabaseException { + + /* + * Recompute your in memory size first and then add yourself to the + * list. + */ + initMemorySize(); + + inList.add(this); + + boolean hasCachedChildren = false; + + /* + * Add your children if they're resident. (LNs know how to stop the + * flow). + */ + for (int i = 0; i < nEntries; i++) { + Node n = getTarget(i); + if (n != null) { + n.rebuildINList(inList); + hasCachedChildren = true; + } + if (getOffHeapBINId(i) >= 0) { + hasCachedChildren = true; + } + } + + if (isUpperIN()) { + if (hasCachedChildren) { + setHasCachedChildrenFlag(true); + } else { + setHasCachedChildrenFlag(false); + if (!isDIN()) { + if (traceLRU) { + LoggerUtils.envLogMsg( + traceLevel, getEnv(), + "rebuildINList " + + Thread.currentThread().getId() + + "-" + + Thread.currentThread().getName() + + "-" + getEnv().getName() + + " Adding UIN to LRU: " + + getNodeId()); + } + getEvictor().addBack(this); + } + } + } else if (isBIN() && !isDBIN()) { + getEvictor().addBack(this); + } + } + + /* + * DbStat support. + */ + void accumulateStats(TreeWalkerStatsAccumulator acc) { + acc.processIN(this, getNodeId(), getLevel()); + } + + /** + * Sets the last logged LSN, which for a BIN may be a delta. + * + * It is called from IN.postFetch/RecoveryInit(). If the logrec we have + * just read was a BINDelta, this.lastFullVersion has already been set (in + * BINDeltaLogEntry.readMainItem() or in OldBinDelta.reconstituteBIN()). + * So, this method will set this.lastDeltaVersion. Otherwise, if the + * logrec was a full BIN, this.lastFullVersion has not been set yet, + * and it will be set here. In this case, this.lastDeltaVersion will + * remain NULL. + */ + public void setLastLoggedLsn(long lsn) { + + if (isBIN()) { + if (getLastFullLsn() == DbLsn.NULL_LSN) { + setLastFullLsn(lsn); + } else { + ((BIN)this).setLastDeltaLsn(lsn); + } + } else { + setLastFullLsn(lsn); + } + } + + /** + * Returns the LSN of the last last logged version of this IN, or NULL_LSN + * if never logged. + */ + public final long getLastLoggedLsn() { + if (isBIN()) { + return (getLastDeltaLsn() != DbLsn.NULL_LSN ? + getLastDeltaLsn() : + getLastFullLsn()); + } + + return getLastFullLsn(); + } + + /** + * Sets the last full version LSN. + */ + public final void setLastFullLsn(long lsn) { + lastFullVersion = lsn; + } + + /** + * Returns the last full version LSN, or NULL_LSN if never logged. + */ + public final long getLastFullLsn() { + return lastFullVersion; + } + + /** + * Returns the last delta version LSN, or NULL_LSN if a delta was not last + * logged. For BINs, it just returns the value of the lastDeltaVersion + * field. Public for unit testing. + */ + public long getLastDeltaLsn() { + return DbLsn.NULL_LSN; + } + + /* + * Logging support + */ + + /** + * When splits and checkpoints intermingle in a deferred write databases, + * a checkpoint target may appear which has a valid target but a null LSN. + * Deferred write dbs are written out in checkpoint style by either + * Database.sync() or a checkpoint which has cleaned a file containing + * deferred write entries. For example, + * INa + * | + * BINb + * + * A checkpoint or Database.sync starts + * The INList is traversed, dirty nodes are selected + * BINb is bypassed on the INList, since it's not dirty + * BINb is split, creating a new sibling, BINc, and dirtying INa + * INa is selected as a dirty node for the ckpt + * + * If this happens, INa is in the selected dirty set, but not its dirty + * child BINb and new child BINc. + * + * In a durable db, the existence of BINb and BINc are logged + * anyway. But in a deferred write db, there is an entry that points to + * BINc, but no logged version. + * + * This will not cause problems with eviction, because INa can't be + * evicted until BINb and BINc are logged, are non-dirty, and are detached. + * But it can cause problems at recovery, because INa will have a null LSN + * for a valid entry, and the LN children of BINc will not find a home. + * To prevent this, search for all dirty children that might have been + * missed during the selection phase, and write them out. It's not + * sufficient to write only null-LSN children, because the existing sibling + * must be logged lest LN children recover twice (once in the new sibling, + * once in the old existing sibling. + * + * TODO: + * Would the problem above be solved by logging dirty nodes using a tree + * traversal (post-order), rather than using the dirty map? + * + * Overriden by BIN class. + */ + public void logDirtyChildren() + throws DatabaseException { + + assert(!isBINDelta()); + + EnvironmentImpl envImpl = getDatabase().getEnv(); + + /* Look for targets that are dirty. */ + for (int i = 0; i < getNEntries(); i++) { + + IN child = (IN) getTarget(i); + + if (child != null) { + child.latch(CacheMode.UNCHANGED); + try { + if (child.getDirty()) { + /* Ask descendants to log their children. */ + child.logDirtyChildren(); + long childLsn = + child.log(false, // allowDeltas + true, // isProvisional + true, // backgroundIO + this); // parent + + updateEntry( + i, childLsn, VLSN.NULL_VLSN_SEQUENCE, + 0/*lastLoggedSize*/); + } + } finally { + child.releaseLatch(); + } + } + } + } + + public final long log() { + return logInternal( + this, null, false /*allowDeltas*/, true /*allowCompress*/, + Provisional.NO, false /*backgroundIO*/, null /*parent*/); + } + + public final long log( + boolean allowDeltas, + boolean isProvisional, + boolean backgroundIO, + IN parent) { + + return logInternal( + this, null, allowDeltas, true /*allowCompress*/, + isProvisional ? Provisional.YES : Provisional.NO, + backgroundIO, parent); + } + + public final long log( + boolean allowDeltas, + Provisional provisional, + boolean backgroundIO, + IN parent) { + + return logInternal( + this, null, allowDeltas, true /*allowCompress*/, provisional, backgroundIO, + parent); + } + + public final long optionalLog() { + + if (databaseImpl.isDeferredWriteMode()) { + return getLastLoggedLsn(); + } else { + return logInternal( + this, null, false /*allowDeltas*/, true /*allowCompress*/, + Provisional.NO, false /*backgroundIO*/, null /*parent*/); + } + } + + public long optionalLogProvisional(IN parent) { + return optionalLogProvisional(parent, true /*allowCompress*/); + } + + long optionalLogProvisionalNoCompress(IN parent) { + return optionalLogProvisional(parent, false /*allowCompress*/); + } + + private long optionalLogProvisional(IN parent, boolean allowCompress) { + + if (databaseImpl.isDeferredWriteMode()) { + return getLastLoggedLsn(); + } else { + return logInternal( + this, null, false /*allowDeltas*/, allowCompress, + Provisional.YES, false /*backgroundIO*/, parent); + } + } + + public static long logEntry( + INLogEntry logEntry, + Provisional provisional, + boolean backgroundIO, + IN parent) { + + return logInternal( + null, logEntry, true /*allowDeltas*/, false /*allowCompress*/, + provisional, backgroundIO, parent); + } + + /** + * Bottleneck method for all IN logging. + * + * If 'node' is non-null, 'logEntry' must be null. + * If 'node' is null, 'logEntry' and 'parent' must be non-null. + * + * When 'logEntry' is non-null we are logging an off-heap BIN, and it is + * not resident in the main cache. The lastFull/DeltaLsns are not updated + * here, and this must be done instead by the caller. + * + * When 'node' is non-null, 'parent' may or may not be null. It must be + * non-null when logging provisionally, since obsolete LSNs are added to + * the parent's collection. + */ + private static long logInternal( + final IN node, + INLogEntry logEntry, + final boolean allowDeltas, + final boolean allowCompress, + final Provisional provisional, + final boolean backgroundIO, + final IN parent) { + + assert node == null || node.isLatchExclusiveOwner(); + assert parent == null || parent.isLatchExclusiveOwner(); + assert node != null || parent != null; + assert (node == null) != (logEntry == null); + + final DatabaseImpl dbImpl = + (node != null) ? node.getDatabase() : parent.getDatabase(); + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + final boolean countObsoleteNow = + provisional != Provisional.YES || dbImpl.isTemporary(); + + final boolean isBin = (node != null) ? + node.isBIN() : (parent.getNormalizedLevel() == 2); + + final BIN bin = (node != null && isBin) ? ((BIN) node) : null; + + final boolean isDelta; + + if (isBin) { + if (logEntry != null) { + /* + * When a logEntry is supplied (node/bin are null), the logic + * below is implemented by OffHeapCache.createBINLogEntry. + */ + isDelta = logEntry.isBINDelta(); + } else { + /* Compress non-dirty slots before determining delta status. */ + if (allowCompress) { + envImpl.lazyCompress(bin, false /*compressDirtySlots*/); + } + + isDelta = bin.isBINDelta() || + (allowDeltas && bin.shouldLogDelta()); + + /* Be sure that we didn't illegally mutate to a delta. */ + assert (!(isDelta && bin.isDeltaProhibited())); + + /* Also compress dirty slots, if we will not log a delta. */ + if (allowCompress && !isDelta) { + envImpl.lazyCompress(bin, true /*compressDirtySlots*/); + } + + /* + * Write dirty LNs in deferred-write databases after + * compression to reduce total logging, at least for temp DBs. + */ + if (dbImpl.isDeferredWriteMode()) { + bin.logDirtyChildren(); + } + + logEntry = isDelta ? + (new BINDeltaLogEntry(bin)) : + (new INLogEntry<>(bin)); + } + } else { + assert node != null; + + isDelta = false; + logEntry = new INLogEntry<>(node); + } + + final LogParams params = new LogParams(); + params.entry = logEntry; + params.provisional = provisional; + params.repContext = ReplicationContext.NO_REPLICATE; + params.nodeDb = dbImpl; + params.backgroundIO = backgroundIO; + + /* + * For delta logging: + * + Count lastDeltaVersion obsolete, if non-null. + * + Set lastDeltaVersion to newly logged LSN. + * + Leave lastFullVersion unchanged. + * + * For full version logging: + * + Count lastFullVersion and lastDeltaVersion obsolete, if non-null. + * + Set lastFullVersion to newly logged LSN. + * + Set lastDeltaVersion to null. + */ + final long oldLsn = + isDelta ? DbLsn.NULL_LSN : logEntry.getPrevFullLsn(); + + final long auxOldLsn = logEntry.getPrevDeltaLsn(); + + /* + * Determine whether to count the prior version of an IN (as well as + * accumulated provisionally obsolete LSNs for child nodes) obsolete + * when logging the new version. + * + * True is set if we are logging the IN non-provisionally, since the + * non-provisional version durably replaces the prior version and + * causes all provisional children to also become durable. + * + * True is also set if the database is temporary. Since we never use a + * temporary DB past recovery, prior versions of an IN are never used. + * [#16928] + */ + if (countObsoleteNow) { + params.oldLsn = oldLsn; + params.auxOldLsn = auxOldLsn; + params.packedObsoleteInfo = + (node != null) ? node.provisionalObsolete : null; + } + + /* Log it. */ + final LogItem item = envImpl.getLogManager().log(params); + + if (node != null) { + node.setDirty(false); + } + + if (countObsoleteNow) { + if (node != null) { + node.discardProvisionalObsolete(); + } + } else if (parent != null) { + parent.trackProvisionalObsolete(node, oldLsn); + parent.trackProvisionalObsolete(node, auxOldLsn); + /* + * TODO: + * The parent is null and provisional is YES when evicting the root + * of a DW DB. How does obsolete counting happen? + */ + } + + if (bin != null) { + /* + * When a logEntry is supplied (node/bin are null), the logic + * below is implemented by OffHeapCache.postBINLog. + */ + if (isDelta) { + bin.setLastDeltaLsn(item.lsn); + } else { + bin.setLastFullLsn(item.lsn); + bin.setLastDeltaLsn(DbLsn.NULL_LSN); + } + + bin.setProhibitNextDelta(false); + + } else if (node != null) { + node.setLastFullLsn(item.lsn); + } + + final Evictor evictor = envImpl.getEvictor(); + + if (node != null && evictor.getUseDirtyLRUSet()) { + + /* + * To capture all cases where a node needs to be moved to the + * priority-1 LRUSet after being cleaned, we invoke moveToPri1LRU() + * from IN.afterLog(). This includes the case where the node is + * being logged as part of being evicted, in which case we don't + * really want it to go back to the LRU. However, this is ok + * because moveToPri1LRU() checks whether the node is actually + * in the priority-2 LRUSet before moving it to the priority-1 + * LRUSet. + */ + if (traceLRU && node.isUpperIN()) { + LoggerUtils.envLogMsg( + traceLevel, envImpl, + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + + "-" + envImpl.getName() + + " afterLogCommon(): " + + " Moving UIN to mixed LRU: " + node.getNodeId()); + } + evictor.moveToPri1LRU(node); + } + + return item.lsn; + } + + /** + * Adds the given obsolete LSN and any tracked obsolete LSNs for the given + * child IN to this IN's tracking list. This method is called to track + * obsolete LSNs when a child IN is logged provisionally. Such LSNs + * cannot be considered obsolete until an ancestor IN is logged + * non-provisionally. + */ + void trackProvisionalObsolete(final IN childIN, final long obsoleteLsn) { + + final boolean moveChildInfo = + (childIN != null && childIN.provisionalObsolete != null); + + final boolean addChildLsn = (obsoleteLsn != DbLsn.NULL_LSN); + + if (!moveChildInfo && !addChildLsn) { + return; + } + + final int oldMemSize = (provisionalObsolete != null) ? + provisionalObsolete.getMemorySize() : 0; + + if (moveChildInfo) { + if (provisionalObsolete != null) { + /* Append child info to parent info. */ + provisionalObsolete.copyObsoleteInfo + (childIN.provisionalObsolete); + } else { + /* Move reference from child to parent. */ + provisionalObsolete = childIN.provisionalObsolete; + } + childIN.updateMemorySize( + 0 - childIN.provisionalObsolete.getMemorySize()); + childIN.provisionalObsolete = null; + } + + if (addChildLsn) { + if (provisionalObsolete == null) { + provisionalObsolete = new PackedObsoleteInfo(); + } + provisionalObsolete.addObsoleteInfo(obsoleteLsn); + } + + updateMemorySize(oldMemSize, + (provisionalObsolete != null) ? + provisionalObsolete.getMemorySize() : + 0); + } + + /** + * Discards the provisional obsolete tracking information in this node + * after it has been counted in the live tracker. This method is called + * after this node is logged non-provisionally. + */ + private void discardProvisionalObsolete() + throws DatabaseException { + + if (provisionalObsolete != null) { + updateMemorySize(0 - provisionalObsolete.getMemorySize()); + provisionalObsolete = null; + } + } + + /* + * NOOP for upper INs. Overriden by BIN class. + */ + public void mutateToFullBIN(boolean leaveFreeSlot) { + } + + private int getNEntriesToWrite(boolean deltasOnly) { + if (!deltasOnly) { + return nEntries; + } + return getNDeltas(); + } + + public final int getNDeltas() { + int n = 0; + for (int i = 0; i < nEntries; i++) { + if (!isDirty(i)) { + continue; + } + n += 1; + } + return n; + } + + /** + * @see Node#getGenericLogType + */ + @Override + public final LogEntryType getGenericLogType() { + return getLogType(); + } + + /** + * Get the log type of this node. + */ + public LogEntryType getLogType() { + return LogEntryType.LOG_IN; + } + + /** + * @see Loggable#getLogSize + * + * Overrriden by DIN and DBIN classes. + */ + @Override + public int getLogSize() { + return getLogSize(false); + } + + public final int getLogSize(boolean deltasOnly) { + + BIN bin = (isBIN() ? (BIN)this : null); + + boolean haveVLSNCache = (bin != null && bin.isVLSNCachingEnabled()); + + int size = 0; + + boolean haveExpiration = false; + + if (bin != null) { + int base = bin.getExpirationBase(); + haveExpiration = (base != -1); + size += LogUtils.getPackedIntLogSize(base); + } + + size += LogUtils.getPackedLongLogSize(nodeId); + size += LogUtils.getByteArrayLogSize(identifierKey); // identifier key + + if (keyPrefix != null) { + size += LogUtils.getByteArrayLogSize(keyPrefix); + } + + size += 1; // one byte for boolean flags + + final int nEntriesToWrite = getNEntriesToWrite(deltasOnly); + + final int maxEntriesToWrite = + (!deltasOnly ? + getMaxEntries() : + bin.getDeltaCapacity(nEntriesToWrite)); + + size += LogUtils.getPackedIntLogSize(nEntriesToWrite); + size += LogUtils.getPackedIntLogSize(level); + size += LogUtils.getPackedIntLogSize(maxEntriesToWrite); + + final boolean compactLsnsRep = (entryLsnLongArray == null); + size += LogUtils.getBooleanLogSize(); // compactLsnsRep + if (compactLsnsRep) { + size += LogUtils.INT_BYTES; // baseFileNumber + } + + for (int i = 0; i < nEntries; i++) { // entries + + if (deltasOnly && !isDirty(i)) { + continue; + } + + size += LogUtils.getByteArrayLogSize(entryKeys.get(i)) + // key + (compactLsnsRep ? LogUtils.INT_BYTES : + LogUtils.getLongLogSize()) + // LSN + 1; // state + + if (isLastLoggedSizeStored(i)) { + size += LogUtils.getPackedIntLogSize(getLastLoggedSize(i)); + } + + if (haveVLSNCache && isEmbeddedLN(i)) { + size += LogUtils.getPackedLongLogSize(bin.getCachedVLSN(i)); + } + + if (haveExpiration) { + size += + LogUtils.getPackedIntLogSize(bin.getExpirationOffset(i)); + } + } + + if (deltasOnly) { + size += LogUtils.getPackedIntLogSize(bin.getFullBinNEntries()); + size += LogUtils.getPackedIntLogSize(bin.getFullBinMaxEntries()); + + size += bin.getBloomFilterLogSize(); + } + + return size; + } + + /* + * Overridden by DIN and DBIN classes. + */ + @Override + public void writeToLog(ByteBuffer logBuffer) { + + serialize(logBuffer, false /*deltasOnly*/, true /*clearDirtyBits*/); + } + + public void writeToLog(ByteBuffer logBuffer, boolean deltasOnly) { + + serialize(logBuffer, deltasOnly, !deltasOnly /*clearDirtyBits*/); + } + + /** + * WARNING: In the case of BINs this method is not only used for logging + * but also for off-heap caching. Therefore, this method should not have + * side effects unless the clearDirtyBits param is true. + */ + public final void serialize(ByteBuffer logBuffer, + boolean deltasOnly, + boolean clearDirtyBits) { + + assert(!deltasOnly || isBIN()); + + BIN bin = (isBIN() ? (BIN)this : null); + + byte[] bloomFilter = (deltasOnly ? bin.createBloomFilter() : null); + + boolean haveExpiration = false; + + if (bin != null) { + int base = bin.getExpirationBase(); + haveExpiration = (base != -1); + LogUtils.writePackedInt(logBuffer, base); + } + + LogUtils.writePackedLong(logBuffer, nodeId); + + LogUtils.writeByteArray(logBuffer, identifierKey); + + boolean hasKeyPrefix = (keyPrefix != null); + boolean mayHaveLastLoggedSize = mayHaveLastLoggedSizeStored(); + boolean haveVLSNCache = (bin != null && bin.isVLSNCachingEnabled()); + + byte booleans = (byte) (isRoot() ? 1 : 0); + booleans |= (hasKeyPrefix ? 2 : 0); + booleans |= (mayHaveLastLoggedSize ? 4 : 0); + booleans |= (bloomFilter != null ? 8 : 0); + booleans |= (haveVLSNCache ? 16 : 0); + booleans |= (isExpirationInHours() ? 32 : 0); + + logBuffer.put(booleans); + + if (hasKeyPrefix) { + LogUtils.writeByteArray(logBuffer, keyPrefix); + } + + final int nEntriesToWrite = getNEntriesToWrite(deltasOnly); + + final int maxEntriesToWrite = + (!deltasOnly ? + getMaxEntries() : + bin.getDeltaCapacity(nEntriesToWrite)); + /* + if (deltasOnly) { + BIN bin = (BIN)this; + System.out.println( + "Logging BIN-delta: " + getNodeId() + + " is delta = " + isBINDelta() + + " nEntries = " + nEntriesToWrite + + " max entries = " + maxEntriesToWrite + + " full BIN entries = " + bin.getFullBinNEntries() + + " full BIN max entries = " + bin.getFullBinMaxEntries()); + } + */ + LogUtils.writePackedInt(logBuffer, nEntriesToWrite); + LogUtils.writePackedInt(logBuffer, level); + LogUtils.writePackedInt(logBuffer, maxEntriesToWrite); + + /* true if compact representation. */ + boolean compactLsnsRep = (entryLsnLongArray == null); + LogUtils.writeBoolean(logBuffer, compactLsnsRep); + if (compactLsnsRep) { + LogUtils.writeInt(logBuffer, (int) baseFileNumber); + } + + for (int i = 0; i < nEntries; i++) { + + if (deltasOnly && !isDirty(i)) { + continue; + } + + LogUtils.writeByteArray(logBuffer, entryKeys.get(i)); + + /* + * A NULL_LSN may be stored when an incomplete insertion occurs, + * but in that case the KnownDeleted flag must be set. See + * Tree.insert. [#13126] + */ + assert checkForNullLSN(i) : + "logging IN " + getNodeId() + " with null lsn child " + + " db=" + databaseImpl.getDebugName() + + " isDeferredWriteMode=" + databaseImpl.isDeferredWriteMode() + + " isTemporary=" + databaseImpl.isTemporary(); + + if (compactLsnsRep) { + int offset = i << 2; + int fileOffset = getFileOffset(offset); + logBuffer.put(getFileNumberOffset(offset)); + logBuffer.put((byte) (fileOffset & 0xff)); + logBuffer.put((byte) ((fileOffset >>> 8) & 0xff)); + logBuffer.put((byte) ((fileOffset >>> 16) & 0xff)); + } else { + LogUtils.writeLong(logBuffer, entryLsnLongArray[i]); + } + + logBuffer.put( + (byte) (entryStates[i] & EntryStates.CLEAR_TRANSIENT_BITS)); + + if (clearDirtyBits) { + entryStates[i] &= EntryStates.CLEAR_DIRTY_BIT; + } + + if (isLastLoggedSizeStored(i)) { + LogUtils.writePackedInt(logBuffer, getLastLoggedSize(i)); + } + + if (haveVLSNCache && isEmbeddedLN(i)) { + LogUtils.writePackedLong(logBuffer, bin.getCachedVLSN(i)); + } + + if (haveExpiration) { + LogUtils.writePackedInt( + logBuffer, bin.getExpirationOffset(i)); + } + } + + if (deltasOnly) { + LogUtils.writePackedInt(logBuffer, bin.getFullBinNEntries()); + LogUtils.writePackedInt(logBuffer, bin.getFullBinMaxEntries()); + + if (bloomFilter != null) { + BINDeltaBloomFilter.writeToLog(bloomFilter, logBuffer); + } + } + } + + /* + * Used for assertion to prevent writing a null lsn to the log. + */ + private boolean checkForNullLSN(int index) { + boolean ok; + if (isBIN()) { + ok = !(getLsn(index) == DbLsn.NULL_LSN && + (entryStates[index] & EntryStates.KNOWN_DELETED_BIT) == 0); + } else { + ok = (getLsn(index) != DbLsn.NULL_LSN); + } + return ok; + } + + /** + * Returns whether the given serialized IN is a BIN that may have + * expiration values. + */ + public boolean mayHaveExpirationValues( + ByteBuffer itemBuffer, + int entryVersion) { + + if (!isBIN() || entryVersion < 12) { + return false; + } + + itemBuffer.mark(); + int expirationBase = LogUtils.readPackedInt(itemBuffer); + itemBuffer.reset(); + + return (expirationBase != -1); + } + + @Override + public void readFromLog( + ByteBuffer itemBuffer, + int entryVersion) { + + materialize( + itemBuffer, entryVersion, + false /*deltasOnly*/, true /*clearDirtyBits*/); + } + + public void readFromLog( + ByteBuffer itemBuffer, + int entryVersion, + boolean deltasOnly) { + + materialize( + itemBuffer, entryVersion, + deltasOnly, !deltasOnly /*clearDirtyBits*/); + } + + /** + * WARNING: In the case of BINs this method is used not only for logging + * but also for off-heap caching. Therefore, this method should not have + * side effects unless the clearDirtyBits param is true or an older log + * version is passed (off-heap caching uses the current version). + */ + public final void materialize( + ByteBuffer itemBuffer, + int entryVersion, + boolean deltasOnly, + boolean clearDirtyBits) { + + assert(!deltasOnly || isBIN()); + + BIN bin = (isBIN() ? (BIN)this : null); + + boolean unpacked = (entryVersion < 6); + + boolean haveExpiration = false; + + if (bin != null && entryVersion >= 12) { + int base = LogUtils.readPackedInt(itemBuffer); + haveExpiration = (base != -1); + bin.setExpirationBase(base); + } + + nodeId = LogUtils.readLong(itemBuffer, unpacked); + identifierKey = LogUtils.readByteArray(itemBuffer, unpacked); + + byte booleans = itemBuffer.get(); + + setIsRootFlag((booleans & 1) != 0); + + if ((booleans & 2) != 0) { + keyPrefix = LogUtils.readByteArray(itemBuffer, unpacked); + } + + boolean mayHaveLastLoggedSize = ((booleans & 4) != 0); + assert !(mayHaveLastLoggedSize && (entryVersion < 9)); + + boolean hasBloomFilter = ((booleans & 8) != 0); + assert(!hasBloomFilter || (entryVersion >= 10 && deltasOnly)); + + boolean haveVLSNCache = ((booleans & 16) != 0); + assert !(haveVLSNCache && (entryVersion < 11)); + + setExpirationInHours((booleans & 32) != 0); + + nEntries = LogUtils.readInt(itemBuffer, unpacked); + level = LogUtils.readInt(itemBuffer, unpacked); + int length = LogUtils.readInt(itemBuffer, unpacked); + + entryTargets = INTargetRep.NONE; + entryKeys = new INKeyRep.Default(length); + baseFileNumber = -1; + long storedBaseFileNumber = -1; + if (disableCompactLsns) { + entryLsnByteArray = null; + entryLsnLongArray = new long[length]; + } else { + entryLsnByteArray = new byte[length << 2]; + entryLsnLongArray = null; + } + entryStates = new byte[length]; + boolean compactLsnsRep = false; + + if (entryVersion > 1) { + compactLsnsRep = LogUtils.readBoolean(itemBuffer); + if (compactLsnsRep) { + baseFileNumber = LogUtils.readInt(itemBuffer); + storedBaseFileNumber = baseFileNumber; + } + } + + for (int i = 0; i < nEntries; i++) { + + entryKeys = entryKeys.set( + i, LogUtils.readByteArray(itemBuffer, unpacked), this); + + long lsn; + if (compactLsnsRep) { + /* LSNs in compact form. */ + byte fileNumberOffset = itemBuffer.get(); + int fileOffset = (itemBuffer.get() & 0xff); + fileOffset |= ((itemBuffer.get() & 0xff) << 8); + fileOffset |= ((itemBuffer.get() & 0xff) << 16); + if (fileOffset == THREE_BYTE_NEGATIVE_ONE) { + lsn = DbLsn.NULL_LSN; + } else { + lsn = DbLsn.makeLsn + (storedBaseFileNumber + fileNumberOffset, fileOffset); + } + } else { + /* LSNs in long form. */ + lsn = LogUtils.readLong(itemBuffer); // LSN + } + + setLsnInternal(i, lsn); + + byte entryState = itemBuffer.get(); // state + + if (clearDirtyBits) { + entryState &= EntryStates.CLEAR_DIRTY_BIT; + } + + /* + * The MIGRATE_BIT (now the transient OFFHEAP_DIRTY_BIT) was + * accidentally written in a pre-JE 6 log version. + */ + if (entryVersion < 9) { + entryState &= EntryStates.CLEAR_TRANSIENT_BITS; + } + + /* + * A NULL_LSN is the remnant of an incomplete insertion and the + * KnownDeleted flag should be set. But because of bugs in prior + * releases, the KnownDeleted flag may not be set. So set it here. + * See Tree.insert. [#13126] + */ + if (entryVersion < 9 && lsn == DbLsn.NULL_LSN) { + entryState |= EntryStates.KNOWN_DELETED_BIT; + } + + entryStates[i] = entryState; + + if (mayHaveLastLoggedSize && !isEmbeddedLN(i)) { + setLastLoggedSizeUnconditional( + i, LogUtils.readPackedInt(itemBuffer)); + } + + if (haveVLSNCache && isEmbeddedLN(i)) { + bin.setCachedVLSNUnconditional( + i, LogUtils.readPackedLong(itemBuffer)); + } + + if (haveExpiration) { + bin.setExpirationOffset(i, LogUtils.readPackedInt(itemBuffer)); + } + } + + if (deltasOnly) { + setBINDelta(true); + + if (entryVersion >= 10) { + bin.setFullBinNEntries(LogUtils.readPackedInt(itemBuffer)); + bin.setFullBinMaxEntries(LogUtils.readPackedInt(itemBuffer)); + + if (hasBloomFilter) { + bin.bloomFilter = BINDeltaBloomFilter.readFromLog( + itemBuffer, entryVersion); + } + } + } + + /* Dup conversion will be done by postFetchInit. */ + needDupKeyConversion = (entryVersion < 8); + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + @Override + public final boolean logicalEquals(Loggable other) { + return false; + } + + /** + * @see Loggable#dumpLog + */ + @Override + public final void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(beginTag()); + + sb.append(""); + + sb.append(Key.dumpString(identifierKey, "idKey", 0)); + + // isRoot + sb.append(""); + + // level + sb.append(""); + + if (keyPrefix != null) { + sb.append(Key.dumpString(keyPrefix, "keyPrefix", 0)); + } + + // nEntries, length of entries array + sb.append(""); + + if (verbose) { + for (int i = 0; i < nEntries; i++) { + sb.append(""); + sb.append(Key.dumpString(getKey(i), 0)); + if (isEmbeddedLN(i)) { + sb.append(Key.dumpString(getData(i), "data", 0)); + } + sb.append(DbLsn.toString(getLsn(i))); + sb.append(""); + } + } + + sb.append(""); + + if (isBINDelta(false)) { + if (bin.bloomFilter != null) { + BINDeltaBloomFilter.dumpLog(bin.bloomFilter, sb, verbose); + } + } + + /* Add on any additional items from subclasses before the end tag. */ + dumpLogAdditional(sb); + + sb.append(endTag()); + } + + /** + * Allows subclasses to add additional fields before the end tag. If they + * just overload dumpLog, the xml isn't nested. + */ + protected void dumpLogAdditional(StringBuilder sb) { + } + + public String beginTag() { + return BEGIN_TAG; + } + + public String endTag() { + return END_TAG; + } + + /** + * For unit test support: + * @return a string that dumps information about this IN, without + */ + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(beginTag()); + sb.append('\n'); + } + + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + } else { + sb.append(nodeId); + } + sb.append('\n'); + + BIN bin = null; + if (isBIN()) { + bin = (BIN) this; + } + + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append(identifierKey == null ? + "" : + Key.dumpString(identifierKey, 0)); + sb.append(""); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append(keyPrefix == null ? "" : Key.dumpString(keyPrefix, 0)); + sb.append("\n"); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append( + ""); + if (bin != null) { + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + } + sb.append('\n'); + + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append('\n'); + + for (int i = 0; i < nEntries; i++) { + sb.append(TreeUtils.indent(nSpaces + 4)); + sb.append("\n"); + if (getLsn(i) == DbLsn.NULL_LSN) { + sb.append(TreeUtils.indent(nSpaces + 6)); + sb.append(""); + } else { + sb.append(DbLsn.dumpString(getLsn(i), nSpaces + 6)); + } + sb.append('\n'); + if (entryKeys.get(i) == null) { + sb.append(TreeUtils.indent(nSpaces + 6)); + sb.append(""); + } else { + sb.append(Key.dumpString(entryKeys.get(i), (nSpaces + 6))); + } + sb.append('\n'); + if (getOffHeapBINId(i) >= 0) { + sb.append("\n"); + } + if (bin != null && bin.getOffHeapLNId(i) != 0) { + sb.append("\n"); + } + if (entryTargets.get(i) == null) { + sb.append(TreeUtils.indent(nSpaces + 6)); + sb.append(""); + } else { + sb.append(entryTargets.get(i).dumpString(nSpaces + 6, true)); + } + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces + 4)); + sb.append(""); + sb.append('\n'); + } + + sb.append(TreeUtils.indent(nSpaces + 2)); + sb.append(""); + sb.append('\n'); + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(endTag()); + } + return sb.toString(); + } + + private void dumpSlotState(StringBuilder sb, int i, BIN bin) { + sb.append(" kd=\"").append(isEntryKnownDeleted(i)); + sb.append("\" pd=\"").append(isEntryPendingDeleted(i)); + sb.append("\" dirty=\"").append(isDirty(i)); + sb.append("\" embedded=\"").append(isEmbeddedLN(i)); + sb.append("\" noData=\"").append(isNoDataLN(i)); + if (bin != null) { + sb.append("\" logSize=\""); + sb.append(bin.getLastLoggedSizeUnconditional(i)); + long vlsn = bin.getCachedVLSN(i); + if (!VLSN.isNull(vlsn)) { + sb.append("\" vlsn=\"").append(vlsn); + } + } + if (bin != null && bin.getExpiration(i) != 0) { + sb.append("\" expires=\""); + sb.append(TTL.formatExpiration( + bin.getExpiration(i), bin.isExpirationInHours())); + } + sb.append("\""); + } + + /** + * Converts to an identifying string that is safe to output in a log. + * Keys are not included for security/privacy reasons. + */ + public String toSafeString(final int... slotIndexes) { + + final BIN bin = isBIN() ? (BIN) this : null; + final StringBuilder sb = new StringBuilder(); + + sb.append("IN nodeId=").append(getNodeId()); + sb.append(" lastLoggedLSN="); + sb.append(DbLsn.getNoFormatString(getLastLoggedLsn())); + sb.append(" lastFulLSN="); + sb.append(DbLsn.getNoFormatString(getLastFullLsn())); + sb.append(" level=").append(Integer.toHexString(getLevel())); + sb.append(" flags=").append(Integer.toHexString(flags)); + sb.append(" isBINDelta=").append(isBINDelta()); + sb.append(" nSlots=").append(getNEntries()); + + if (slotIndexes != null) { + for (final int i : slotIndexes) { + sb.append(" slot-").append(i).append(":["); + sb.append("lsn="); + sb.append(DbLsn.getNoFormatString(getLsn(i))); + sb.append(" offset="); + sb.append(DbLsn.getFileOffset(getLsn(i))); + if (bin != null) { + sb.append(" offset+logSize="); + sb.append(DbLsn.getFileOffset(getLsn(i)) + + bin.getLastLoggedSizeUnconditional(i)); + } + dumpSlotState(sb, i, bin); + sb.append("]"); + } + } + + return sb.toString(); + } + + @Override + public String toString() { + return dumpString(0, true); + } + + public String shortClassName() { + return "IN"; + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void traceSplit(Level level, + IN parent, + IN newSibling, + long parentLsn, + long myNewLsn, + long newSiblingLsn, + int splitIndex, + int idKeyIndex, + int childIndex) { + Logger logger = getEnv().getLogger(); + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(TRACE_SPLIT); + sb.append(" parent="); + sb.append(parent.getNodeId()); + sb.append(" child="); + sb.append(getNodeId()); + sb.append(" newSibling="); + sb.append(newSibling.getNodeId()); + sb.append(" parentLsn = "); + sb.append(DbLsn.getNoFormatString(parentLsn)); + sb.append(" childLsn = "); + sb.append(DbLsn.getNoFormatString(myNewLsn)); + sb.append(" newSiblingLsn = "); + sb.append(DbLsn.getNoFormatString(newSiblingLsn)); + sb.append(" splitIdx="); + sb.append(splitIndex); + sb.append(" idKeyIdx="); + sb.append(idKeyIndex); + sb.append(" childIdx="); + sb.append(childIndex); + LoggerUtils.logMsg(logger, + databaseImpl.getEnv(), + level, + sb.toString()); + } + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void traceDelete(Level level, int index) { + Logger logger = databaseImpl.getEnv().getLogger(); + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(TRACE_DELETE); + sb.append(" in=").append(getNodeId()); + sb.append(" index="); + sb.append(index); + LoggerUtils.logMsg(logger, + databaseImpl.getEnv(), + level, + sb.toString()); + } + } + + public final void setFetchINHook(TestHook hook) { + fetchINHook = hook; + } +} diff --git a/src/com/sleepycat/je/tree/INArrayRep.java b/src/com/sleepycat/je/tree/INArrayRep.java new file mode 100644 index 0000000..d103048 --- /dev/null +++ b/src/com/sleepycat/je/tree/INArrayRep.java @@ -0,0 +1,158 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import com.sleepycat.je.evictor.Evictor; + +/** + * The base class for the various array representations used by fields + * associated with an IN node. Storage efficiency, especially when JE is + * operating in a "cache full" environment is the prime motivation for the + * various representations. + *

        + * Each representation assumes that all read operations are done under a shared + * latch and all updates (set, copy and compact) are done under an exclusive + * latch. As a result, the representations themselves do not make any + * provisions for synchronization. + *

        + * The callers of all the potentially representation mutating methods: + *

          + *
        1. + * {@link #set(int, Object, IN)} + *
        2. + *
        3. + * {@link #copy(int, int, int, IN)} + *
        4. + *
        5. + * {@link #compact(IN)} + *
        6. + *
        + * must be careful to save the result value and use it for subsequent + * operations, since it could represent the new mutated object. + */ +public abstract class INArrayRep { + + public INArrayRep() { + } + + /* Returns the type associated with the representation. */ + public abstract REP_ENUM_TYPE getType(); + + /** + * Sets the array element at idx to the node. The underlying representation + * can change as a result of the set operation. + * + * @param idx the index to be set + * @param e the array elelement at the idx + * + * @return either this, or the new representation if there was a mutation. + */ + public abstract ARRAY_BASE_TYPE set(int idx, ELEMENT_TYPE e, IN parent); + + /** + * Returns the element at idx. + */ + public abstract ELEMENT_TYPE get(int idx); + + /** + * Copies n elements at index denoted by "from" to the index denoted by + * "to". Overlapping copies are supported. It's possible that the + * representation may mutate as a result of the copy. + * + * @param from the source (inclusive) of the copy + * @param to the target (inclusive) of the copy + * @param n the number of elements to be copied. + * + * @return either this, or the new representation if there was a mutation. + */ + public abstract ARRAY_BASE_TYPE copy(int from, int to, int n, IN parent); + + /** + * Chooses a more compact representation, if that's possible, otherwise + * does nothing. + *

        + * WARNING: This method must not change the memory size of the current + * representation and return 'this', without explicitly adjusting memory + * usage (via noteRepChange) before returning. Returning a new instance is + * the trigger for adjusting memory usage in the parent. + * + * @return this or a more compact representation. + */ + public abstract ARRAY_BASE_TYPE compact(IN parent); + + /** + * Changes the capacity, either truncating existing elements at the end if + * the capacity is reduced, or adding empty elements at the end if the + * capacity is enlarged. The caller guarantees that all truncated elements + * are unused. + */ + public abstract ARRAY_BASE_TYPE resize(int capacity); + + /** + * Returns the current memory size of the underlying representation in + * bytes. It returns the size of the representation, excluding the size of + * the elements contained in it. + * + * @return the memory size of the representation in bytes + */ + public abstract long calculateMemorySize(); + + /** + * Update the cache statistics for this representation. + * + * @param increment true the stat should be incremented, false if it must + * be decremented + * @param evictor the evictor that shoulds ths stat counters + */ + abstract void updateCacheStats(boolean increment, Evictor evictor); + + /** + * Updates the cache statistics associated with this representation. It + * should be invoked upon every creation, every rep change and finally when + * the IN node is decached. + * + * @param increment true if the stat is to be incremented, false if it is + * to be decremented + */ + final void updateCacheStats(boolean increment, IN parent) { + + if (!parent.getInListResident()) { + /* If the IN is not in the cache don't accumulate stats for it. */ + return; + } + + updateCacheStats(increment, parent.getEnv().getEvictor()); + } + + /** + * Performs the bookkeeping associated with a representation change. It + * accounts for the change in storage and adjusts the cache statistics. + * + * @param newRep the new representation that is replacing this one. + */ + final void noteRepChange( + INArrayRep newRep, + IN parent) { + + if (parent == null) { + /* Only true in unit tests. */ + return; + } + + parent.updateMemorySize(newRep.calculateMemorySize() - + calculateMemorySize()); + updateCacheStats(false, parent); + newRep.updateCacheStats(true, parent); + } +} diff --git a/src/com/sleepycat/je/tree/INKeyRep.java b/src/com/sleepycat/je/tree/INKeyRep.java new file mode 100644 index 0000000..8f0f8ab --- /dev/null +++ b/src/com/sleepycat/je/tree/INKeyRep.java @@ -0,0 +1,729 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import java.util.Arrays; +import java.util.Comparator; + +import com.sleepycat.je.dbi.DupKeyData; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * The abstract class that defines the various formats used to represent + * the keys associated with the IN node. The class is also used to store + * embedded records, where the actual key and the data portion of a record are + * stored together as a single byte sequence. + * + * There are currently two supported representations: + *

          + *
        1. A default representation Default that's capable of holding + * any set of keys.
        2. + *
        3. + * A compact representation MaxKeySize that's more efficient for + * holding small keys (LTE 16 bytes) in length. If key prefixing is in use this + * represents the unprefixed part of the key, since that's what is stored in + * this array.
        4. + *
        + *

        + * The choice of representation is made when an IN node is first read in from + * the log. The MaxKeySize representation is only used when it is + * more storage efficient than the default representation for the set of keys + * currently associated with the IN. + *

        + * Note that no attempt is currently made to optimize the storage + * representation as keys are added to, or removed from, the + * Default representation to minimize the chances of transitionary + * "back and forth" representation changes that could prove to be expensive. + */ +public abstract class INKeyRep + extends INArrayRep { + + /* The different representations for keys. */ + public enum Type { DEFAULT, MAX_KEY_SIZE }; + + public INKeyRep() { + } + + public abstract int length(); + + /** + * Returns true if the key bytes mem usage is accounted for internally + * here, or false if each key has a separate byte array and its mem usage + * is accounted for by the parent. + */ + public abstract boolean accountsForKeyByteMemUsage(); + + public abstract int size(int idx); + + public abstract INKeyRep set(int idx, byte[] key, byte[] data, IN parent); + + public abstract INKeyRep setData(int idx, byte[] data, IN parent); + + public abstract byte[] getData(int idx); + + public abstract byte[] getKey(int idx, boolean embeddedData); + + public abstract byte[] getFullKey( + byte[] prefix, + int idx, + boolean embeddedData); + + public abstract int compareKeys( + byte[] searchKey, + byte[] prefix, + int idx, + boolean embeddedData, + Comparator comparator); + + /** + * The default representation that's capable of storing keys of any size. + */ + public static class Default extends INKeyRep { + + private final byte[][] keys; + + Default(int nodeMaxEntries) { + this.keys = new byte[nodeMaxEntries][]; + } + + public Default(@SuppressWarnings("unused") SizeofMarker marker) { + keys = null; + } + + private Default(byte[][] keys) { + this.keys = keys; + } + + @Override + public INKeyRep resize(int capacity) { + return new Default(Arrays.copyOfRange(keys, 0, capacity)); + } + + @Override + public Type getType() { + return Type.DEFAULT; + } + + @Override + public int length() { + return keys.length; + } + + @Override + public INKeyRep set(int idx, byte[] key, IN parent) { + keys[idx] = key; + return this; + } + + @Override + public INKeyRep set(int idx, byte[] key, byte[] data, IN parent) { + + if (data == null || data.length == 0) { + keys[idx] = key; + } else { + keys[idx] = DupKeyData.combine(key, data); + } + return this; + } + + @Override + public INKeyRep setData(int idx, byte[] data, IN parent) { + + /* + * TODO #21488: optimize this to avoid creation of new combined + * key, when possible. + */ + return set(idx, getKey(idx, true), data, parent); + } + + @Override + public int size(int idx) { + return keys[idx].length; + } + + @Override + public byte[] get(int idx) { + return keys[idx]; + } + + @Override + public byte[] getData(int idx) { + + assert(keys[idx] != null); + return DupKeyData.getData(keys[idx], 0, keys[idx].length); + } + + @Override + public byte[] getKey(int idx, boolean embeddedData) { + + byte[] suffix = keys[idx]; + + if (suffix == null) { + return Key.EMPTY_KEY; + } else if (embeddedData) { + return DupKeyData.getKey(suffix, 0, suffix.length); + } else { + return suffix; + } + } + + @Override + public byte[] getFullKey( + byte[] prefix, + int idx, + boolean embeddedData) { + + if (prefix == null || prefix.length == 0) { + return getKey(idx, embeddedData); + } + + byte[] suffix = keys[idx]; + + if (suffix == null) { + assert(!embeddedData); + suffix = Key.EMPTY_KEY; + } + + int prefixLen = prefix.length; + int suffixLen; + + if (embeddedData) { + suffixLen = DupKeyData.getKeyLength(suffix, 0, suffix.length); + } else { + suffixLen = suffix.length; + } + + final byte[] key = new byte[prefixLen + suffixLen]; + System.arraycopy(prefix, 0, key, 0, prefixLen); + System.arraycopy(suffix, 0, key, prefixLen, suffixLen); + + return key; + } + + @Override + public int compareKeys( + byte[] searchKey, + byte[] prefix, + int idx, + boolean embeddedData, + Comparator comparator) + { + if (comparator != null) { + byte[] myKey = getFullKey(prefix, idx, embeddedData); + return Key.compareKeys(searchKey, myKey, comparator); + } + + int cmp = 0; + + if (prefix == null || prefix.length == 0) { + + return compareSuffixes( + searchKey, 0, searchKey.length, idx, embeddedData); + } + + cmp = Key.compareUnsignedBytes( + searchKey, 0, Math.min(searchKey.length, prefix.length), + prefix, 0, prefix.length); + + if (cmp == 0) { + + int searchKeyOffset = prefix.length; + int searchKeyLen = searchKey.length - prefix.length; + + return compareSuffixes( + searchKey, searchKeyOffset, searchKeyLen, + idx, embeddedData); + } + + return cmp; + } + + private int compareSuffixes( + byte[] searchKey, + int searchKeyOff, + int searchKeyLen, + int idx, + boolean embeddedData) { + + byte[] myKey = keys[idx]; + int myKeyLen; + + if (myKey == null) { + myKey = Key.EMPTY_KEY; + myKeyLen = 0; + } else if (embeddedData) { + myKeyLen = DupKeyData.getKeyLength(myKey, 0, myKey.length); + } else { + myKeyLen = myKey.length; + } + + return Key.compareUnsignedBytes( + searchKey, searchKeyOff, searchKeyLen, myKey, 0, myKeyLen); + } + + + @Override + public INKeyRep copy(int from, int to, int n, IN parent) { + System.arraycopy(keys, from, keys, to, n); + return this; + } + + /** + * Evolves to the MaxKeySize representation if that is more efficient + * for the current set of keys. Note that since all the keys must be + * examined to make the decision, there is a reasonable cost to the + * method and it should not be invoked indiscriminately. + */ + @Override + public INKeyRep compact(IN parent) { + + if (keys.length > MaxKeySize.MAX_KEYS) { + return this; + } + + final int compactMaxKeyLength = parent.getCompactMaxKeyLength(); + if (compactMaxKeyLength <= 0) { + return this; + } + + int keyCount = 0; + int maxKeyLength = 0; + int defaultKeyBytes = 0; + + for (byte[] key : keys) { + if (key != null) { + keyCount++; + if (key.length > maxKeyLength) { + maxKeyLength = key.length; + if (maxKeyLength > compactMaxKeyLength) { + return this; + } + } + defaultKeyBytes += MemoryBudget.byteArraySize(key.length); + } + } + + if (keyCount == 0) { + return this; + } + + long defaultSizeWithKeys = calculateMemorySize() + defaultKeyBytes; + + if (defaultSizeWithKeys > + MaxKeySize.calculateMemorySize(keys.length, maxKeyLength)) { + return compactToMaxKeySizeRep(maxKeyLength, parent); + } + + return this; + } + + private MaxKeySize compactToMaxKeySizeRep( + int maxKeyLength, + IN parent) { + + MaxKeySize newRep = + new MaxKeySize(keys.length, (short) maxKeyLength); + + for (int i = 0; i < keys.length; i++) { + INKeyRep rep = newRep.set(i, keys[i], parent); + assert rep == newRep; /* Rep remains unchanged. */ + } + + noteRepChange(newRep, parent); + + return newRep; + } + + @Override + public long calculateMemorySize() { + + /* + * Assume empty keys array. The memory consumed by the actual keys + * is accounted for by the IN.getEntryInMemorySize() method. + */ + return MemoryBudget.DEFAULT_KEYVALS_OVERHEAD + + MemoryBudget.objectArraySize(keys.length); + } + + @Override + public boolean accountsForKeyByteMemUsage() { + return false; + } + + @Override + void updateCacheStats(@SuppressWarnings("unused") boolean increment, + @SuppressWarnings("unused") Evictor evictor) { + /* No stats for the default representation. */ + } + } + + /** + * The compact representation that can be used to represent keys LTE 16 + * bytes in length. The keys are all represented inside a single byte array + * instead of having one byte array per key. Within the array, all keys are + * assigned a storage size equal to that taken up by the longest key, plus + * one byte to hold the actual key length. This makes key retreival fast. + * However, insertion and deletion for larger keys moves bytes proportional + * to the storage length of the keys. This is why the representation is + * restricted to keys LTE 16 bytes in size. + * + * On a 32 bit VM the per key overhead for the Default representation is 4 + * bytes for the pointer + 16 bytes for each byte array key object, for a + * total of 20 bytes/key. On a 64 bit machine the overheads are much + * larger: 8 bytes for the pointer plus 24 bytes per key. + * + * The more fully populated the IN the more the savings with this + * representation since the single byte array is sized to hold all the keys + * regardless of the actual number of keys that are present. + * + * It's worth noting that the storage savings here are realized in addition + * to the storage benefits of key prefixing, since the keys stored in the + * key array are the smaller key values after the prefix has been stripped, + * reducing the length of the key and making it more likely that it's small + * enough for this specialized representation. + */ + public static class MaxKeySize extends INKeyRep { + + private static final int LENGTH_BYTES = 1; + public static final byte DEFAULT_MAX_KEY_LENGTH = 16; + public static final int MAX_KEYS = 256; + private static final byte NULL_KEY = Byte.MAX_VALUE; + + /* + * The array is sized to hold all the keys associated with the IN node. + * Each key is allocated a fixed amount of storage equal to the maximum + * length of all the keys in the IN node + 1 byte to hold the size of + * each key. The length is biased, by -128. That is, a zero length + * key is represented by -128, a 1 byte key by -127, etc. + */ + private final byte[] keys; + + /* + * The number of butes used to store each key == + * DEFAULT_MAX_KEY_LENGTH (16) + LENGTH_BYTES (1) + */ + private final short fixedKeyLen; + + public MaxKeySize(int nodeMaxEntries, short maxKeyLen) { + + assert maxKeyLen < 255; + this.fixedKeyLen = (short) (maxKeyLen + LENGTH_BYTES); + this.keys = new byte[fixedKeyLen * nodeMaxEntries]; + + for (int i = 0; i < nodeMaxEntries; i++) { + INKeyRep rep = set(i, null, null); + assert rep == this; /* Rep remains unchanged. */ + } + } + + /* Only for use by Sizeof */ + public MaxKeySize(@SuppressWarnings("unused") SizeofMarker marker) { + keys = null; + fixedKeyLen = 0; + } + + private MaxKeySize(byte[] keys, short fixedKeyLen) { + this.keys = keys; + this.fixedKeyLen = fixedKeyLen; + } + + @Override + public INKeyRep resize(int capacity) { + return new MaxKeySize( + Arrays.copyOfRange(keys, 0, capacity * fixedKeyLen), + fixedKeyLen); + } + + @Override + public Type getType() { + return Type.MAX_KEY_SIZE; + } + + @Override + public int length() { + return keys.length / fixedKeyLen; + } + + @Override + public INKeyRep set(int idx, byte[] key, IN parent) { + + int slotOff = idx * fixedKeyLen; + + if (key == null) { + keys[slotOff] = NULL_KEY; + return this; + } + + if (key.length >= fixedKeyLen) { + Default newRep = expandToDefaultRep(parent); + return newRep.set(idx, key, parent); + } + + keys[slotOff] = (byte) (key.length + Byte.MIN_VALUE); + + slotOff += LENGTH_BYTES; + + System.arraycopy(key, 0, keys, slotOff, key.length); + + return this; + } + + @Override + public INKeyRep set(int idx, byte[] key, byte[] data, IN parent) { + + if (data == null || data.length == 0) { + return set(idx, key, parent); + } + + byte[] twoPartKey = DupKeyData.combine(key, data); + + return set(idx, twoPartKey, parent); + } + + @Override + public INKeyRep setData(int idx, byte[] data, IN parent) { + + /* + * TODO #21488: optimize this to avoid creation of new combined + * key, when possible. + */ + return set(idx, getKey(idx, true), data, parent); + } + + private Default expandToDefaultRep(IN parent) { + + final int capacity = length(); + final Default newRep = new Default(capacity); + + for (int i = 0; i < capacity; i++) { + final byte[] k = get(i); + INKeyRep rep = newRep.set(i, k, parent); + assert rep == newRep; /* Rep remains unchanged. */ + } + + noteRepChange(newRep, parent); + return newRep; + } + + @Override + public int size(int idx) { + + int slotOff = idx * fixedKeyLen; + + assert keys[slotOff] != NULL_KEY; + + return keys[slotOff] - Byte.MIN_VALUE; + } + + @Override + public byte[] get(int idx) { + + int slotOff = idx * fixedKeyLen; + + if (keys[slotOff] == NULL_KEY) { + return null; + } + + int slotLen = keys[slotOff] - Byte.MIN_VALUE; + + slotOff += LENGTH_BYTES; + + byte[] info = new byte[slotLen]; + System.arraycopy(keys, slotOff, info, 0, slotLen); + return info; + } + + @Override + public byte[] getData(int idx) { + + int slotOff = idx * fixedKeyLen; + + assert(keys[slotOff] != NULL_KEY); + + int slotLen = keys[slotOff] - Byte.MIN_VALUE; + + slotOff += LENGTH_BYTES; + + return DupKeyData.getData(keys, slotOff, slotLen); + } + + @Override + public byte[] getKey(int idx, boolean embeddedData) { + + int slotOff = idx * fixedKeyLen; + + if (keys[slotOff] == NULL_KEY) { + assert(!embeddedData); + return Key.EMPTY_KEY; + } + + int slotLen = keys[slotOff] - Byte.MIN_VALUE; + + slotOff += LENGTH_BYTES; + + if (embeddedData) { + return DupKeyData.getKey(keys, slotOff, slotLen); + } + + byte[] key = new byte[slotLen]; + System.arraycopy(keys, slotOff, key, 0, slotLen); + return key; + } + + @Override + public byte[] getFullKey( + byte[] prefix, + int idx, + boolean embeddedData) { + + if (prefix == null || prefix.length == 0) { + return getKey(idx, embeddedData); + } + + int slotOff = idx * fixedKeyLen; + + if (keys[slotOff] == NULL_KEY) { + assert(!embeddedData); + return prefix; + } + + int slotLen = keys[slotOff] - Byte.MIN_VALUE; + + slotOff += LENGTH_BYTES; + + int prefixLen = prefix.length; + int suffixLen; + + if (embeddedData) { + suffixLen = DupKeyData.getKeyLength(keys, slotOff, slotLen); + } else { + suffixLen = slotLen; + } + + byte[] key = new byte[suffixLen + prefixLen]; + System.arraycopy(prefix, 0, key, 0, prefixLen); + System.arraycopy(keys, slotOff, key, prefixLen, suffixLen); + return key; + } + + @Override + public int compareKeys( + byte[] searchKey, + byte[] prefix, + int idx, + boolean embeddedData, + Comparator comparator) { + + if (comparator != null) { + byte[] myKey = getFullKey(prefix, idx, embeddedData); + return Key.compareKeys(searchKey, myKey, comparator); + } + + int cmp = 0; + + if (prefix == null || prefix.length == 0) { + + return compareSuffixes( + searchKey, 0, searchKey.length, idx, embeddedData); + } + + cmp = Key.compareUnsignedBytes( + searchKey, 0, Math.min(searchKey.length, prefix.length), + prefix, 0, prefix.length); + + if (cmp == 0) { + + int searchKeyOff = prefix.length; + int searchKeyLen = searchKey.length - prefix.length; + + return compareSuffixes( + searchKey, searchKeyOff, searchKeyLen, + idx, embeddedData); + } + + return cmp; + } + + private int compareSuffixes( + byte[] searchKey, + int searchKeyOff, + int searchKeyLen, + int idx, + boolean embeddedData) { + + int myKeyOff = idx * fixedKeyLen; + int myKeyLen = 0; + + if (keys[myKeyOff] != NULL_KEY) { + + myKeyLen = keys[myKeyOff] - Byte.MIN_VALUE; + + myKeyOff += LENGTH_BYTES; + + if (embeddedData) { + myKeyLen = DupKeyData.getKeyLength( + keys, myKeyOff, myKeyLen); + } + } else { + assert(!embeddedData); + myKeyOff += LENGTH_BYTES; + } + + return Key.compareUnsignedBytes( + searchKey, searchKeyOff, searchKeyLen, + keys, myKeyOff, myKeyLen); + } + + @Override + public INKeyRep copy(int from, int to, int n, IN parent) { + System.arraycopy(keys, (from * fixedKeyLen), + keys, (to * fixedKeyLen), + n * fixedKeyLen); + return this; + } + + @Override + public INKeyRep compact(@SuppressWarnings("unused") IN parent) { + /* It's as compact as it gets. */ + return this; + } + + @Override + public long calculateMemorySize() { + return MemoryBudget.MAX_KEY_SIZE_KEYVALS_OVERHEAD + + MemoryBudget.byteArraySize(keys.length); + } + + private static long calculateMemorySize(int maxKeys, int maxKeySize) { + return MemoryBudget.MAX_KEY_SIZE_KEYVALS_OVERHEAD + + MemoryBudget.byteArraySize(maxKeys * + (maxKeySize + LENGTH_BYTES)); + } + + @Override + public boolean accountsForKeyByteMemUsage() { + return true; + } + + @Override + void updateCacheStats(boolean increment, Evictor evictor) { + if (increment) { + evictor.getNINCompactKey().incrementAndGet(); + } else { + evictor.getNINCompactKey().decrementAndGet(); + } + } + } +} diff --git a/src/com/sleepycat/je/tree/INLongRep.java b/src/com/sleepycat/je/tree/INLongRep.java new file mode 100644 index 0000000..7bdefbb --- /dev/null +++ b/src/com/sleepycat/je/tree/INLongRep.java @@ -0,0 +1,439 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import java.util.Arrays; + +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * Holds an array of non-negative long values, one for each slot in an IN. + * + * Zero is the default value and is returned when no value has been set. + * + * The EMPTY_REP is used at first, and is mutated as necessary as values are + * set. A minimum number of bytes per value is used, based on the largest + * value passed to set(). + * + * Optionally, a sparse rep is used when a value is set for EMPTY_REP. Up to 4 + * values are stored along with their indexes. When the 5th values is set, the + * rep is mutated to the default rep. + * + * This object calls IN.updateMemorySize to track the memory it uses. + * EMPTY_REP uses no memory because it is a singleton. + */ +public abstract class INLongRep { + + public abstract long get(int idx); + public abstract INLongRep set(int idx, long val, IN parent); + public abstract INLongRep compact(IN parent, EmptyRep emptyRep); + public abstract INLongRep clear(IN parent, EmptyRep emptyRep); + public abstract boolean isEmpty(); + public abstract INLongRep copy(int from, int to, int n, IN parent); + public abstract INLongRep resize(int capacity); + public abstract long getMemorySize(); + + /** + * Initially empty (all values are zero) but will mutate as needed when + * non-zero values are passed to set(). + */ + public static class EmptyRep extends INLongRep { + + final int minLength; + final boolean allowSparseRep; + + public EmptyRep(int minLength, boolean allowSparseRep) { + this.minLength = minLength; + this.allowSparseRep = allowSparseRep; + } + + @Override + public INLongRep resize(int capacity) { + return this; + } + + @Override + public long get(int idx) { + return 0; + } + + /** + * When adding to the cache the EMPTY_REP is mutated into a + * DefaultRep. + */ + @Override + public INLongRep set(int idx, long val, IN parent) { + + if (val == 0) { + return this; + } + + final INLongRep newCache; + + if (false /*TODO*/ && allowSparseRep) { + newCache = new SparseRep(minLength); + } else { + newCache = new DefaultRep(parent.getMaxEntries(), minLength); + } + + parent.updateMemorySize(getMemorySize(), newCache.getMemorySize()); + + return newCache.set(idx, val, parent); + } + + @Override + public INLongRep compact(IN parent, EmptyRep emptyRep) { + return this; + } + + @Override + public INLongRep clear(IN parent, EmptyRep emptyRep) { + return this; + } + + @Override + public boolean isEmpty() { + return true; + } + + @Override + public INLongRep copy(int from, int to, int n, IN parent) { + return this; + } + + /** + * An EMPTY_REP has no JE cache memory overhead because there is only + * one global instance. + */ + @Override + public long getMemorySize() { + return 0; + } + } + + public static class DefaultRep extends INLongRep { + + /** Maximum value indexed by number of bytes. */ + private static long[] MAX_VALUE = { + 0x0L, + 0xFFL, + 0xFFFFL, + 0xFFFFFFL, + 0xFFFFFFFFL, + 0xFFFFFFFFFFL, + 0xFFFFFFFFFFFFL, + 0xFFFFFFFFFFFFFFL, + 0x7FFFFFFFFFFFFFFFL, + }; + + private final byte[] byteArray; + final int bytesPerValue; + + public DefaultRep(int capacity, int nBytes) { + assert capacity >= 1; + assert nBytes >= 1; + assert nBytes <= 8; + + bytesPerValue = nBytes; + byteArray = new byte[capacity * bytesPerValue]; + } + + /* Only for use by the Sizeof utility. */ + public DefaultRep(@SuppressWarnings("unused") SizeofMarker marker) { + bytesPerValue = 0; + byteArray = null; + } + + private DefaultRep(byte[] byteArray, int bytesPerValue) { + this.byteArray = byteArray; + this.bytesPerValue = bytesPerValue; + } + + @Override + public DefaultRep resize(int capacity) { + return new DefaultRep( + Arrays.copyOfRange(byteArray, 0, capacity * bytesPerValue), + bytesPerValue); + } + + @Override + public long get(int idx) { + + int i = idx * bytesPerValue; + final int end = i + bytesPerValue; + + long val = (byteArray[i] & 0xFF); + + for (i += 1; i < end; i += 1) { + val <<= 8; + val |= (byteArray[i] & 0xFF); + } + + return val; + } + + /** + * Mutates to a DefaultRep with a larger number of bytes if necessary + * to hold the given value. + */ + @Override + public INLongRep set(int idx, long val, IN parent) { + + assert idx >= 0; + assert idx < byteArray.length / bytesPerValue; + assert val >= 0; + + /* + * If the value can't be represented using bytesPerValue, mutate + * to a cache with a larger number of bytes. + */ + if (val > MAX_VALUE[bytesPerValue]) { + + final int capacity = byteArray.length / bytesPerValue; + + INLongRep newRep; + + if (getClass() == SparseRep.class) { + newRep = new SparseRep(bytesPerValue + 1); + } else { + newRep = new DefaultRep(capacity, bytesPerValue + 1); + } + + parent.updateMemorySize( + getMemorySize(), newRep.getMemorySize()); + + /* + * Set new value in new cache, and copy other values from old + * cache. + */ + newRep = newRep.set(idx, val, parent); + + for (int i = 0; i < capacity; i += 1) { + if (i != idx) { + newRep = newRep.set(i, get(i), parent); + } + } + + return newRep; + } + + /* Set value in this cache. */ + int i = ((idx + 1) * bytesPerValue) - 1; + final int end = i - bytesPerValue; + + byteArray[i] = (byte) (val & 0xFF); + + for (i -= 1; i > end; i -= 1) { + val >>= 8; + byteArray[i] = (byte) (val & 0xFF); + } + + assert ((val & 0xFFFFFFFFFFFFFF00L) == 0) : val; + + return this; + } + + @Override + public INLongRep compact(IN parent, EmptyRep emptyRep) { + + if (isEmpty()) { + return clear(parent, emptyRep); + } + + return this; + } + + @Override + public INLongRep clear(IN parent, EmptyRep emptyRep) { + + parent.updateMemorySize( + getMemorySize(), emptyRep.getMemorySize()); + + return emptyRep; + } + + @Override + public boolean isEmpty() { + + for (byte b : byteArray) { + if (b != 0) { + return false; + } + } + + return true; + } + + @Override + public INLongRep copy(int from, int to, int n, IN parent) { + System.arraycopy(byteArray, + from * bytesPerValue, + byteArray, + to * bytesPerValue, + n * bytesPerValue); + return this; + } + + @Override + public long getMemorySize() { + return MemoryBudget.DEFAULT_LONG_REP_OVERHEAD + + MemoryBudget.byteArraySize(byteArray.length); + } + } + + public static class SparseRep extends DefaultRep { + + private static final int MAX_ENTRIES = 4; + + private final short[] idxs; + + public SparseRep(int nBytes) { + + super(MAX_ENTRIES, nBytes); + + idxs = new short[MAX_ENTRIES]; + Arrays.fill(idxs, (short) (-1)); + } + + /* Only for use by the Sizeof utility. */ + public SparseRep(@SuppressWarnings("unused") SizeofMarker marker) { + super(marker); + idxs = null; + } + + @Override + public SparseRep resize(int capacity) { + return this; + } + + @Override + public long get(int idx) { + + for (int i = 0; i < idxs.length; i += 1) { + if (idxs[i] == idx) { + return super.get(i); + } + } + + return 0; + } + + @Override + public INLongRep set(int idx, long val, IN parent) { + + int slot = -1; + + for (int i = 0; i < idxs.length; i++) { + + if (idxs[i] == idx) { + if (val == 0) { + idxs[i] = -1; + } + return super.set(i, val, parent); + } + + if (slot < 0 && idxs[i] == -1) { + slot = i; + } + } + + if (val == 0) { + return this; + } + + /* Have a free slot, use it. */ + if (slot >= 0) { + idxs[slot] = (short) idx; + return super.set(slot, val, parent); + } + + /* It's full, mutate it. */ + INLongRep newRep = + new DefaultRep(parent.getMaxEntries(), bytesPerValue); + + parent.updateMemorySize(getMemorySize(), newRep.getMemorySize()); + + for (int i = 0; i < idxs.length; i++) { + if (idxs[i] != -1) { + newRep = newRep.set(idxs[i], super.get(i), parent); + } + } + + return newRep.set(idx, val, parent); + } + + @Override + public INLongRep compact(IN parent, EmptyRep emptyRep) { + + if (isEmpty()) { + return clear(parent, emptyRep); + } + + return this; + } + + @Override + public INLongRep clear(IN parent, EmptyRep emptyRep) { + + parent.updateMemorySize( + getMemorySize(), emptyRep.getMemorySize()); + + return emptyRep; + } + + @Override + public boolean isEmpty() { + + for (short idx : idxs) { + if (idx != -1) { + return false; + } + } + + return true; + } + + @Override + public INLongRep copy(int from, int to, int n, IN parent) { + + INLongRep target = this; + + if ((to == from) || (n == 0)) { + /* Nothing to do */ + } else if (to < from) { + /* Copy ascending */ + for (int i = 0; i < n; i++) { + target = target.set(to++, get(from++), parent); + } + } else { + /* to > from. Copy descending */ + from += n; + to += n; + for (int i = 0; i < n; i++) { + target = target.set(--to, get(--from), parent); + } + } + + return target; + } + + @Override + public long getMemorySize() { + return super.getMemorySize() + + MemoryBudget.SPARSE_LONG_REP_OVERHEAD - + MemoryBudget.DEFAULT_KEYVALS_OVERHEAD + + MemoryBudget.shortArraySize(idxs.length); + } + } +} diff --git a/src/com/sleepycat/je/tree/INTargetRep.java b/src/com/sleepycat/je/tree/INTargetRep.java new file mode 100644 index 0000000..954c3dd --- /dev/null +++ b/src/com/sleepycat/je/tree/INTargetRep.java @@ -0,0 +1,368 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import java.util.Arrays; + +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * The abstract class that defines the various representations used to + * represent an array of target pointers to children of an IN node. These + * arrays can be sparse, so the non-default representations are designed to + * make efficient representations for the sparse cases. Each specialized + * representation is a subclass of INTargetReps. + * + * A new IN node starts out with the None representation and grows through a + * sparse into the full default representation. Subsequently, the default + * representation can be compacted into a Sparse or None representation + * whenever an IN is stripped. Note that representations do not currently move + * to more compact forms when entries are nulled to minimize the possibility of + * tansitionary representation changes, since each representation change has + * a cpu cost and a gc cost associated with it. + */ +public abstract class INTargetRep + extends INArrayRep { + + /* Single instance used for None rep. */ + public static final None NONE = new None(); + + /* Enumeration for the different types of supported representations. */ + public enum Type { DEFAULT, SPARSE, NONE } + + public INTargetRep() { + } + + /* The default non-sparse representation. It simply wraps an array. */ + public static class Default extends INTargetRep { + + /* The target nodes */ + private final Node[] targets; + + public Default(int capacity) { + this.targets = new Node[capacity]; + } + + /* Only for use by the Sizeof utility. */ + public Default(@SuppressWarnings("unused") SizeofMarker marker) { + targets = null; + } + + private Default(Node[] targets) { + this.targets = targets; + } + + @Override + public Default resize(int capacity) { + return new Default(Arrays.copyOfRange(targets, 0, capacity)); + } + + @Override + public Type getType() { + return Type.DEFAULT; + } + + @Override + public Node get(int idx) { + return targets[idx]; + } + + @Override + public INTargetRep set(int idx, Node node, IN parent) { + targets[idx] = node; + return this; + } + + @Override + public INTargetRep copy(int from, int to, int n, IN parent) { + System.arraycopy(targets, from, targets, to, n); + return this; + } + + @Override + public INTargetRep compact(IN parent) { + int count = 0; + for (Node target : targets) { + if (target != null) { + count++; + } + } + + if ((count > Sparse.MAX_ENTRIES) || + (targets.length > Sparse.MAX_INDEX)) { + return this; + } + + INTargetRep newRep = null; + if (count == 0) { + newRep = NONE; + } else { + newRep = new Sparse(targets.length); + for (int i=0; i < targets.length; i++) { + if (targets[i] != null) { + newRep.set(i, targets[i], parent); + } + } + } + + noteRepChange(newRep, parent); + return newRep; + } + + @Override + public long calculateMemorySize() { + return MemoryBudget.DEFAULT_TARGET_ENTRY_OVERHEAD + + MemoryBudget.objectArraySize(targets.length); + } + + @Override + public void updateCacheStats(@SuppressWarnings("unused") + boolean increment, + @SuppressWarnings("unused") + Evictor evictor) { + /* No stats for this default rep. */ + } + } + + /** + * Representation used when 1-4 children are cached. Note that the IN + * itself may have more children, but they are not currently cached. + * The INArrayRep is represented by two parallel arrays: an array of + * indices (idxs) and an array of values (targets). All elements that are + * not explicitly represented are null. + */ + public static class Sparse extends INTargetRep { + + /* The maximum number of entries that can be represented. */ + public static final int MAX_ENTRIES = 4; + + /* The maximum index that can be represented. */ + public static final int MAX_INDEX = Short.MAX_VALUE; + + /* + * The parallel arrays implementing the INArrayRep. + */ + final short idxs[] = new short[MAX_ENTRIES]; + final Node targets[] = new Node[MAX_ENTRIES]; + + public Sparse(int capacity) { + + /* Unroll initialization. */ + idxs[0] = idxs[1] = idxs[2] = idxs[3] = -1; + } + + /* Only for use by the Sizeof utility. */ + public Sparse(@SuppressWarnings("unused") SizeofMarker marker) { + } + + @Override + public Sparse resize(int capacity) { + return this; + } + + @Override + public Type getType() { + return Type.SPARSE; + } + + @Override + public Node get(int j) { + assert (j >= 0) && (j <= MAX_INDEX); + + /* Unrolled for loop */ + if (idxs[0] == j) { + return targets[0]; + } + if (idxs[1] == j) { + return targets[1]; + } + if (idxs[2] == j) { + return targets[2]; + } + if (idxs[3] == j) { + return targets[3]; + } + return null; + } + + @Override + public INTargetRep set(int j, Node node, IN parent) { + + assert (j >= 0) && (j <= MAX_INDEX); + + int slot = -1; + for (int i=0; i < targets.length; i++) { + + if (idxs[i] == j) { + targets[i] = node; + return this; + } + + if ((slot < 0) && (targets[i] == null)) { + slot = i; + } + } + + if (node == null) { + return this; + } + + /* Have a free slot, use it. */ + if (slot >= 0) { + targets[slot] = node; + idxs[slot] = (short)j; + return this; + } + + /* It's full, mutate it. */ + Default fe = new Default(parent.getMaxEntries()); + noteRepChange(fe, parent); + + for (int i=0; i < targets.length; i++) { + if (targets[i] != null) { + fe.set(idxs[i], targets[i], parent); + } + } + + return fe.set(j, node, parent); + } + + @Override + public INTargetRep copy(int from, int to, int n, IN parent) { + + INTargetRep target = this; + + if ((to == from) || (n == 0)) { + /* Nothing to do */ + } else if (to < from) { + /* Copy ascending */ + for (int i = 0; i < n; i++) { + target = target.set(to++, get(from++), parent); + } + } else { + /* to > from. Copy descending */ + from += n; + to += n; + for (int i = 0; i < n; i++) { + target = target.set(--to, get(--from), parent); + } + } + return target; + } + + @Override + public INTargetRep compact(IN parent) { + int count = 0; + for (Node target : targets) { + if (target != null) { + count++; + } + } + if (count == 0) { + None newRep = NONE; + noteRepChange(newRep, parent); + return newRep; + } + return this; + } + + @Override + public long calculateMemorySize() { + /* + * Note that fixed array sizes are already accounted for in the + * SPARSE_TARGET_ENTRY_OVERHEAD computed vis Sizeof. + */ + return MemoryBudget.SPARSE_TARGET_ENTRY_OVERHEAD; + } + + @Override + public void updateCacheStats(boolean increment, Evictor evictor) { + if (increment) { + evictor.getNINSparseTarget().incrementAndGet(); + } else { + evictor.getNINSparseTarget().decrementAndGet(); + } + } + } + + /** + * Representation used when an IN has no children cached. + */ + public static class None extends INTargetRep { + + private None() { + } + + /* Only for use by the Sizeof utility. */ + public None(@SuppressWarnings("unused") SizeofMarker marker) { + } + + @Override + public None resize(int capacity) { + return this; + } + + @Override + public Type getType() { + return Type.NONE; + } + + @Override + public Node get(@SuppressWarnings("unused") int idx) { + return null; + } + + @Override + public INTargetRep set(int idx, Node node, IN parent) { + + if (node == null) { + return this; + } + + INTargetRep targets = new Sparse(parent.getMaxEntries()); + noteRepChange(targets, parent); + return targets.set(idx, node, parent); + } + + @Override + public INTargetRep copy(@SuppressWarnings("unused") int from, + @SuppressWarnings("unused") int to, + @SuppressWarnings("unused") int n, + @SuppressWarnings("unused") IN parent) { + /* Nothing to copy. */ + return this; + } + + @Override + public INTargetRep compact(IN parent) { + return this; + } + + @Override + public long calculateMemorySize() { + /* A single static instance is used. */ + return 0; + } + + @Override + public void updateCacheStats(boolean increment, Evictor evictor) { + if (increment) { + evictor.getNINNoTarget().incrementAndGet(); + } else { + evictor.getNINNoTarget().decrementAndGet(); + } + } + } +} diff --git a/src/com/sleepycat/je/tree/Key.java b/src/com/sleepycat/je/tree/Key.java new file mode 100644 index 0000000..5ac80f5 --- /dev/null +++ b/src/com/sleepycat/je/tree/Key.java @@ -0,0 +1,340 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.util.Comparator; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.utilint.StringUtils; + +/** + * Key represents a JE B-Tree Key. Keys are immutable. Within JE, keys are + * usually represented as byte arrays rather than as Key instances in order to + * reduce the in-memory footprint. The static methods of this class are used to + * operate on the byte arrays. + * + * One exception is when keys are held within a collection. In that case, Key + * objects are instantiated so that keys are hashed and compared by value. + */ +public final class Key implements Comparable { + + public abstract static class DumpType { + + private String name; + + private DumpType(String name) { + this.name = name; + } + + public static final DumpType BINARY = new DumpType("BINARY") { + @Override + void dumpByteArrayInternal(StringBuilder sb, byte[] b) { + for (int i = 0; i < b.length; i++) { + sb.append(b[i] & 0xFF).append(" "); + } + } + }; + + public static final DumpType HEX = new DumpType("HEX") { + @Override + void dumpByteArrayInternal(StringBuilder sb, byte[] b) { + for (int i = 0; i < b.length; i++) { + sb.append(Integer.toHexString(b[i] & 0xFF)). + append(" "); + } + } + }; + + public static final DumpType TEXT = new DumpType("TEXT") { + @Override + void dumpByteArrayInternal(StringBuilder sb, byte[] b) { + sb.append(StringUtils.fromUTF8(b)); + } + }; + + public static final DumpType OBFUSCATE = new DumpType("OBFUSCATE") { + @Override + void dumpByteArrayInternal(StringBuilder sb, byte[] b) { + int len = b.length; + sb.append("[").append(len). + append(len == 1 ? " byte]" : " bytes]"); + } + }; + + public String dumpByteArray(byte[] b) { + StringBuilder sb = new StringBuilder(); + if (b != null) { + dumpByteArrayInternal(sb, b); + } else { + sb.append("null"); + } + return sb.toString(); + } + + @Override + public String toString() { + return name; + } + + abstract void dumpByteArrayInternal(StringBuilder sb, byte[] b); + } + + public static DumpType DUMP_TYPE = DumpType.BINARY; + + public static final byte[] EMPTY_KEY = new byte[0]; + + private byte[] key; + + /** + * Construct a new key from a byte array. + */ + public Key(byte[] key) { + if (key == null) { + this.key = null; + } else { + this.key = new byte[key.length]; + System.arraycopy(key, 0, this.key, 0, key.length); + } + } + + public static byte[] makeKey(DatabaseEntry dbt) { + byte[] entryKey = dbt.getData(); + if (entryKey == null) { + return EMPTY_KEY; + } else { + byte[] newKey = new byte[dbt.getSize()]; + System.arraycopy(entryKey, dbt.getOffset(), newKey, + 0, dbt.getSize()); + return newKey; + } + } + + /** + * Get the byte array for the key. + */ + public byte[] getKey() { + return key; + } + + /** + * Compare two keys. Standard compareTo function and returns. + * + * Note that any configured user comparison function is not used, and + * therefore this method should not be used for comparison of keys during + * Btree operations. + */ + public int compareTo(Key argKey) { + return compareUnsignedBytes(this.key, argKey.key); + } + + /** + * Support Set of Key in BINReference. + */ + @Override + public boolean equals(Object o) { + return (o instanceof Key) && (compareTo((Key)o) == 0); + } + + /** + * Support HashSet of Key in BINReference. + */ + @Override + public int hashCode() { + int code = 0; + for (int i = 0; i < key.length; i += 1) { + code += key[i]; + } + return code; + } + + /** + * Compare keys with an optional comparator. + */ + public static int compareKeys(byte[] key1, + int off1, + int len1, + byte[] key2, + int off2, + int len2, + Comparator comparator) { + if (comparator == null) { + return compareUnsignedBytes(key1, off1, len1, + key2, off2, len2); + } + if (off1 != 0 || len1 != key1.length) { + final byte[] b = new byte[len1]; + System.arraycopy(key1, off1, b, 0, len1); + key1 = b; + } + if (off2 != 0 || len2 != key2.length) { + final byte[] b = new byte[len2]; + System.arraycopy(key2, off2, b, 0, len2); + key2 = b; + } + return comparator.compare(key1, key2); + } + + /** + * Compare keys with an optional comparator. + */ + public static int compareKeys(byte[] key1, + byte[] key2, + Comparator comparator) { + if (comparator != null) { + return comparator.compare(key1, key2); + } else { + return compareUnsignedBytes(key1, key2); + } + } + + /** + * Compare keys with an optional comparator. + */ + public static int compareKeys(DatabaseEntry entry1, + DatabaseEntry entry2, + Comparator comparator) { + byte[] key1 = Key.makeKey(entry1); + byte[] key2 = Key.makeKey(entry2); + if (comparator != null) { + return comparator.compare(key1, key2); + } else { + return compareUnsignedBytes(key1, key2); + } + } + + /** + * Compare using a default unsigned byte comparison. + */ + private static int compareUnsignedBytes(byte[] key1, byte[] key2) { + return compareUnsignedBytes(key1, 0, key1.length, + key2, 0, key2.length); + } + + /** + * Compare using a default unsigned byte comparison. + */ + public static int compareUnsignedBytes(byte[] key1, + int off1, + int len1, + byte[] key2, + int off2, + int len2) { + int limit = Math.min(len1, len2); + + for (int i = 0; i < limit; i++) { + byte b1 = key1[i + off1]; + byte b2 = key2[i + off2]; + if (b1 == b2) { + continue; + } else { + + /* + * Remember, bytes are signed, so convert to shorts so that we + * effectively do an unsigned byte comparison. + */ + return (b1 & 0xff) - (b2 & 0xff); + } + } + + return (len1 - len2); + } + + /* + * Return the length of the common prefix between 2 keys. The 1st key + * consists of the first "a1Len" bytes of "key1". The second key is + * "key2". + */ + public static int getKeyPrefixLength(byte[] key1, int a1Len, byte[] key2) { + assert key1 != null && key2 != null; + + int a2Len = key2.length; + + int limit = Math.min(a1Len, a2Len); + + for (int i = 0; i < limit; i++) { + byte b1 = key1[i]; + byte b2 = key2[i]; + if (b1 != b2) { + return i; + } + } + + return limit; + } + + /* + * Return a new byte[] containing the common prefix of key1 and key2. + * Return null if there is no common prefix. + */ + public static byte[] createKeyPrefix(byte[] key1, byte[] key2) { + + int len = getKeyPrefixLength(key1, key1.length, key2); + if (len == 0) { + return null; + } + + byte[] ret = new byte[len]; + System.arraycopy(key1, 0, ret, 0, len); + + return ret; + } + + public static String dumpString(byte[] key, int nspaces) { + + return dumpString(key, "key", nspaces); + } + + public static String dumpString(byte[] key, String xmltag, int nspaces) { + + StringBuilder sb = new StringBuilder(); + + sb.append(TreeUtils.indent(nspaces)); + sb.append("<").append(xmltag).append(" v=\""); + + sb.append(getNoFormatString(key)); + + sb.append("\"/>"); + + return sb.toString(); + } + + /** + * Print the string w/out XML format. + */ + public static String getNoFormatString(byte[] key) { + + StringBuilder sb = new StringBuilder(); + + if (DUMP_TYPE == DumpType.BINARY || + DUMP_TYPE == DumpType.HEX) { + if (key == null) { + sb.append(""); + } else { + sb.append(DUMP_TYPE.dumpByteArray(key)); + } + } else if (DUMP_TYPE == DumpType.TEXT) { + sb.append(key == null ? "" : StringUtils.fromUTF8(key)); + } else if (DUMP_TYPE == DumpType.OBFUSCATE) { + if (key == null) { + sb.append(""); + } else { + int len = key.length; + sb.append("[").append(len); + sb.append(len == 1 ? " byte]" : " bytes]"); + } + } + + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/tree/LN.java b/src/com/sleepycat/je/tree/LN.java new file mode 100644 index 0000000..ee9b4bb --- /dev/null +++ b/src/com/sleepycat/je/tree/LN.java @@ -0,0 +1,1172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogParams; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.SizeofMarker; +import com.sleepycat.je.utilint.VLSN; + +/** + * An LN represents a Leaf Node in the JE tree. + */ +public class LN extends Node implements VersionedWriteLoggable { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + private byte[] data; + + /* + * Flags: bit fields + * + * -Dirty means that the in-memory version is not present on disk. + */ + private static final int DIRTY_BIT = 0x80000000; + private static final int CLEAR_DIRTY_BIT = ~DIRTY_BIT; + private static final int FETCHED_COLD_BIT = 0x40000000; + private int flags; // not persistent + + /** + * Create an empty LN, to be filled in from the log. If VLSNs are + * preserved for this environment, a VersionedLN will be created instead. + */ + public LN() { + this.data = null; + } + + /** + * Create a new LN from a byte array. Pass a null byte array to create a + * deleted LN. + * + * Does NOT copy the byte array, so after calling this method the array is + * "owned" by the Btree and should not be modified. + */ + public static LN makeLN(EnvironmentImpl envImpl, byte[] dataParam) { + if (envImpl.getPreserveVLSN()) { + return new VersionedLN(dataParam); + } + return new LN(dataParam); + } + + /** + * Create a new LN from a DatabaseEntry. Makes a copy of the byte array. + */ + public static LN makeLN(EnvironmentImpl envImpl, DatabaseEntry dbt) { + if (envImpl.getPreserveVLSN()) { + return new VersionedLN(dbt); + } + return new LN(dbt); + } + + /** + * Does NOT copy the byte array, so after calling this method the array is + * "owned" by the Btree and should not be modified. + */ + LN(final byte[] data) { + + if (data == null) { + this.data = null; + } else if (data.length == 0) { + this.data = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + this.data = data; + } + + setDirty(); + } + + /** + * Makes a copy of the byte array. + */ + LN(DatabaseEntry dbt) { + byte[] dat = dbt.getData(); + if (dat == null) { + data = null; + } else if (dbt.getPartial()) { + init(dat, + dbt.getOffset(), + dbt.getPartialOffset() + dbt.getSize(), + dbt.getPartialOffset(), + dbt.getSize()); + } else { + init(dat, dbt.getOffset(), dbt.getSize()); + } + setDirty(); + } + + /** For Sizeof. */ + public LN(SizeofMarker marker, DatabaseEntry dbt) { + this(dbt); + } + + private void init(byte[] data, int off, int len, int doff, int dlen) { + if (len == 0) { + this.data = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + this.data = new byte[len]; + System.arraycopy(data, off, this.data, doff, dlen); + } + } + + private void init(byte[] data, int off, int len) { + init(data, off, len, 0, len); + } + + public byte[] getData() { + return data; + } + + public boolean isDeleted() { + return (data == null); + } + + @Override + public boolean isLN() { + return true; + } + + void makeDeleted() { + data = null; + } + + public boolean isDirty() { + return ((flags & DIRTY_BIT) != 0); + } + + public void setDirty() { + flags |= DIRTY_BIT; + } + + public void clearDirty() { // TODO make private + flags &= CLEAR_DIRTY_BIT; + } + + public boolean getFetchedCold() { + return ((flags & FETCHED_COLD_BIT) != 0); + } + + public void setFetchedCold(boolean val) { + if (val) { + flags |= FETCHED_COLD_BIT; + } else { + flags &= ~FETCHED_COLD_BIT; + } + } + + @Override + public void postFetchInit(DatabaseImpl db, long sourceLsn) { + super.postFetchInit(db, sourceLsn); + + /* + * This flag is initially true for a fetched LN, and will be set to + * false if the LN is accessed with any CacheMode other than UNCHANGED. + */ + setFetchedCold(true); + } + + /** + * Called by CursorImpl to get the record version. + * + * If VLSNs are not preserved for this environment, returns -1 which is the + * sequence for VLSN.NULL_VLSN. + * + * If VLSNs are preserved for this environment, this method is overridden + * by VersionedLN which returns the VLSN sequence. + */ + public long getVLSNSequence() { + return VLSN.NULL_VLSN_SEQUENCE; + } + + /** + * Called by LogManager after writing an LN with a newly assigned VLSN, and + * called by LNLogEntry after reading the LN with the VLSN from the log + * entry header. + * + * If VLSNs are not preserved for this environment, does nothing. + * + * If VLSNs are preserved for this environment, this method is overridden + * by VersionedLN which stores the VLSN sequence. + */ + public void setVLSNSequence(long seq) { + /* Do nothing. */ + } + + /* + * If you get to an LN, this subtree isn't valid for delete. True, the LN + * may have been deleted, but you can't be sure without taking a lock, and + * the validate -subtree-for-delete process assumes that bin compressing + * has happened and there are no committed, deleted LNS hanging off the + * BIN. + */ + @Override + boolean isValidForDelete() { + return false; + } + + /** + * Returns true by default, but is overridden by MapLN to prevent eviction + * of open databases. This method is meant to be a guaranteed check and is + * used after a BIN has been selected for LN stripping but before actually + * stripping an LN. [#13415] + * @throws DatabaseException from subclasses. + */ + boolean isEvictable(long lsn) + throws DatabaseException { + + return true; + } + + public void delete() { + makeDeleted(); + setDirty(); + } + + public void modify(byte[] newData) { + data = newData; + setDirty(); + } + + /** + * Sets data to empty and returns old data. Called when converting an old + * format LN in a duplicates DB. + */ + public byte[] setEmpty() { + final byte[] retVal = data; + data = Key.EMPTY_KEY; + return retVal; + } + + /** + * Add yourself to the in memory list if you're a type of node that should + * belong. + */ + @Override + void rebuildINList(INList inList) { + /* + * Don't add, LNs don't belong on the list. + */ + } + + /** + * Compute the approximate size of this node in memory for evictor + * invocation purposes. + */ + @Override + public long getMemorySizeIncludedByParent() { + int size = MemoryBudget.LN_OVERHEAD; + if (data != null) { + size += MemoryBudget.byteArraySize(data.length); + } + return size; + } + + /** + * Release the memory budget for any objects referenced by this + * LN. For now, only release treeAdmin memory, because treeMemory + * is handled in aggregate at the IN level. Over time, transition + * all of the LN's memory budget to this, so we update the memory + * budget counters more locally. Called when we are releasing a LN + * for garbage collection. + */ + public void releaseMemoryBudget() { + // nothing to do for now, no treeAdmin memory + } + + public long getTreeAdminMemory() { + return 0; + } + + /* + * Dumping + */ + + public String beginTag() { + return BEGIN_TAG; + } + + public String endTag() { + return END_TAG; + } + + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder self = new StringBuilder(); + if (dumpTags) { + self.append(TreeUtils.indent(nSpaces)); + self.append(beginTag()); + self.append('\n'); + } + + self.append(super.dumpString(nSpaces + 2, true)); + self.append('\n'); + if (data != null) { + self.append(TreeUtils.indent(nSpaces+2)); + self.append(""); + self.append(Key.DUMP_TYPE.dumpByteArray(data)); + self.append(""); + self.append('\n'); + } + if (dumpTags) { + self.append(TreeUtils.indent(nSpaces)); + self.append(endTag()); + } + return self.toString(); + } + + /* + * Logging Support + */ + + /** + * Convenience logging method. See logInternal. + * + * For a deferred-write database, the logging will not actually occur and + * a transient LSN will be created and returned if the currLsn is NULL; + * otherwise the currLsn is returned. + * + * However, if the embeddedness of the LN changes, we must create a real + * logrec for the op. The following scenario is an example of what can + * go wrong if we don't log embeddedness changes: + * + * - R1 exists before DB is opened in DW mode. + * - R1 is embedded, so its on-disk image has been counted obsolete and + * might have been deleted already. + * - R1 is updated in DW mode, and its new version, R2, is not embedded + * (and not logged). Furthermore, the slot LSN points to R1 still. + * - R2 gets evicted and logged. As a result, R1 will be counted as + * obsolete again, because the slot points to R1 and says R1 is not + * embedded. + * + * If we do log embeddedness changes, then in the above scenario, R2 will + * be logged and this logging will not count R1 as obsolete, because the + * embedded flag in the slot still says that R1 is embedded. After R2 is + * logged, the slot is updated (both the slot LSN and the embedded flag). + * + * In general, the embedded flag in the slot must be in accord with the + * embedded flag in the last logged version of a record, even if multiple + * updates were done on the record since the last time it was logged and + * the current logging event. + */ + public LogItem optionalLog( + EnvironmentImpl envImpl, + DatabaseImpl dbImpl, + Locker locker, + WriteLockInfo writeLockInfo, + boolean newEmbeddedLN, + byte[] newKey, + int newExpiration, + boolean newExpirationInHours, + boolean currEmbeddedLN, + long currLsn, + int currSize, + boolean isInsertion, + ReplicationContext repContext) + throws DatabaseException { + + if (dbImpl.isDeferredWriteMode() && currEmbeddedLN == newEmbeddedLN) { + final LogItem item = new LogItem(); + item.lsn = assignTransientLsn(envImpl, dbImpl, currLsn, locker); + item.size = -1; + return item; + } else { + return logInternal( + envImpl, dbImpl, locker, writeLockInfo, + newEmbeddedLN, newKey, newExpiration, newExpirationInHours, + currEmbeddedLN, currLsn, currSize, + isInsertion, false /*backgroundIO*/, repContext); + } + } + + /** + * Convenience logging method, used to migrate an LN during cleaning. + * See logInternal. + */ + public LogItem log( + EnvironmentImpl envImpl, + DatabaseImpl dbImpl, + Locker locker, + WriteLockInfo writeLockInfo, + boolean newEmbeddedLN, + byte[] newKey, + int newExpiration, + boolean newExpirationInHours, + boolean currEmbeddedLN, + long currLsn, + int currSize, + boolean isInsertion, + boolean backgroundIO, + ReplicationContext repContext) + throws DatabaseException { + + return logInternal( + envImpl, dbImpl, locker, writeLockInfo, + newEmbeddedLN, newKey, newExpiration, newExpirationInHours, + currEmbeddedLN, currLsn, currSize, + isInsertion, backgroundIO, repContext); + } + + /** + * Generate and write to the log a logrec describing an operation O that + * is being performed on a record R with key K. O may be an insertion, + * update, deletion, migration, or in the case of a DW DB, an eviction + * or checkpoint of a dirty LN. + * + * Let T be the locker performing O. T is null in case of DW eviction/ckpt. + * Otherwise, T holds a lock on R and it will keep that lock until it + * terminates. In case of a CUD op, the lock is an exclusive one; in + * case of LN migration, it's a shared one (and T is non-transactional). + * + * - Let Rc be the current version of R (before O). The absence of R from + * the DB is considered as a special "deleted" version. Rc may be the + * deleted version. + * - If T is a Txn, let Ra be the version of R before T write-locked R. Ra + * may be the deleted version. Ra and Rc will be the same if O is the + * very 1st op on R by T. + * - Let Rn be R's new version (after O). Rc and Rn will be the same if O + * is migration or DW eviction/ckpt. + * + * - Let Ln be the LSN of the logrec that will be generated here to + * describe O. + * - Let Lc be the current LSN value in R's slot, or NULL if no such slot + * exists currently. If an R slot exists, then for a non-DW DB, Lc points + * to Rc, or may be NULL if Rc is the deleted version. But for a DW DB, + * Lc may point to an older version than Rc, or it may be transient. + * - If T is a Txn, let La be the LSN value in R's slot at the time T + * write-locked R, or NULL if no such slot existed at that time. + * + * @param isInsertion Whether this CUDop is an insertion (possibly with + * slot reuse. + * + * @param locker The locker T. If non-null, a write lock will be acquired + * by T on Ln's LSN. + * + * WARNING: Be sure to pass null for the locker param if the new LSN should + * not be locked. + * + * @param writeLockInfo It is non-null if and only if T is a Txn. It + * contains info that must be included in Ln to make it undoable if T + * aborts. Specifically, it contains: + * + * - abortKD : True if Ra is the deleted version; false otherwise. + * - abortLSN : The La LSN as defined above. + * - abortKey : The key of Ra, if Ra was embedded in the parent BIN and + * the containing DB allows key updates. + * - abortData : The data of Ra, if Ra was embedded in the parent BIN. + * + * When the new LSN is write-locked, a new WriteLockInfo is created and + * the above info is copied into it. Normally this parameter should be + * obtained from the prepareForInsert or prepareForUpdate method of + * CursorImpl.LockStanding. + * + * @param newEmbeddedLN Whether Rn will be embedded into the parent BIN. + * If true, Ln will be counted as an "immediately obsolete" logrec. + * + * @param newKey Rn's key. Note: Rn's data is not passed as a parameter to + * this method because it is stored in this LN. Rn (key and data) will be + * stored in Ln. Rn's key will also be stored in the parent BIN, and if + * newEmbeddedLN is true, Rn's data too will be stored there. + * + * @param newExpiration the new expiration time in days or hours. + * + * @param newExpirationInHours whether the new expiration time is in hours. + * + * @param currEmbeddedLN Whether Rc's data is embedded into the parent + * BIN. If true, Lc has already been counted obsolete. + * + * @param currLsn The Lc LSN as defined above. Is given as a param to this + * method to count the associated logrec as obsolete (which must done under + * the LWL), if it has not been counted already. + * + * @param currSize The size of Lc (needed for obsolete counting). + * + * @param isInsertion True if the operation is an insertion (including + * slot reuse). False otherwise. + */ + private LogItem logInternal( + final EnvironmentImpl envImpl, + final DatabaseImpl dbImpl, + final Locker locker, + final WriteLockInfo writeLockInfo, + final boolean newEmbeddedLN, + final byte[] newKey, + final int newExpiration, + final boolean newExpirationInHours, + final boolean currEmbeddedLN, + final long currLsn, + final int currSize, + final boolean isInsertion, + final boolean backgroundIO, + final ReplicationContext repContext) + throws DatabaseException { + + assert(getClass() == LN.class || + getClass() == VersionedLN.class || + !newEmbeddedLN); + + if (envImpl.isReadOnly()) { + /* Returning a NULL_LSN will not allow locking. */ + throw EnvironmentFailureException.unexpectedState( + "Cannot log LNs in read-only env."); + } + + /* + * Check that a replicated txn is used for writing to a replicated DB, + * and a non-replicated locker is used for writing to a + * non-replicated DB. This is critical for avoiding corruption when HA + * failover occurs [#23234] [#23330]. + * + * Two cases are exempt from this rule: + * + * - The locker is null only when performing internal logging (not a + * user operation), such as cleaner migration and deferred-write + * logging. This is always non-transactional and non-replicated, so + * we can skip this check. Note that the cleaner may migrate an LN + * in a replicated DB, but this is not part ot the rep stream. + * + * - Only NameLNs that identify replicated DBs are replicated, not + * all NameLNs in the naming DB, so the naming DB is exempt. + * + * This guard should never fire because of two checks made prior to + * logging: + * + * - When a user txn in a replicated environment is not configured for + * local-write and a write operation is attempted (or when the + * opposite is true), the Cursor class will throw + * UnsupportedOperationException. See Locker.isLocalWrite. + * + * - On a replica, writes to replicated DBs are disallowed even when + * local-write is false. This is enforced by the ReadonlyTxn class + * which throws ReplicaWriteException in this case. + */ + final boolean isNamingDB = dbImpl.getId().equals(DbTree.NAME_DB_ID); + + if (!isNamingDB && + envImpl.isReplicated() && + locker != null && + dbImpl.isReplicated() != locker.isReplicated()) { + + throw EnvironmentFailureException.unexpectedState( + (locker.isReplicated() ? + "Rep txn used to write to non-rep DB" : + "Non-rep txn used to write to rep DB") + + ", class = " + locker.getClass().getName() + + ", txnId = " + locker.getId() + + ", dbName = " + dbImpl.getDebugName()); + } + + /* + * As an additional safeguard, check that a replicated txn is used when + * the operation is part of the rep stream, and that the inverse is + * also true. The naming DB is exempt for the same reason as above. + */ + if (!isNamingDB) { + + boolean isRepLocker = (locker != null) && locker.isReplicated(); + + if (repContext.inReplicationStream() != isRepLocker) { + throw EnvironmentFailureException.unexpectedState( + (isRepLocker ? + "Rep txn used to write outside of rep stream" : + "Non-rep txn used to write in rep stream") + + ((locker != null) ? + (", class = " + locker.getClass().getName() + + ", txnId = " + locker.getId()) : + ", null locker") + + ", dbName = " + dbImpl.getDebugName()); + } + } + + LogEntryType entryType; + Txn txn = null; + long abortLsn = DbLsn.NULL_LSN; + boolean abortKD = false; + byte[] abortKey = null; + byte[] abortData = null; + long abortVLSN = VLSN.NULL_VLSN_SEQUENCE; + int abortExpiration = 0; + boolean abortExpirationInHours = false; + + LogParams params = new LogParams(); + + if (locker != null && locker.isTransactional()) { + + entryType = getLogType(isInsertion, true, dbImpl); + + txn = locker.getTxnLocker(); + assert(txn != null); + + abortLsn = writeLockInfo.getAbortLsn(); + abortKD = writeLockInfo.getAbortKnownDeleted(); + abortKey = writeLockInfo.getAbortKey(); + abortData = writeLockInfo.getAbortData(); + abortVLSN = writeLockInfo.getAbortVLSN(); + abortExpiration = writeLockInfo.getAbortExpiration(); + abortExpirationInHours = writeLockInfo.isAbortExpirationInHours(); + + params.obsoleteDupsAllowed = locker.isRolledBack(); + + } else { + entryType = getLogType(isInsertion, false, dbImpl); + } + + params.entry = createLogEntry( + entryType, dbImpl, txn, + abortLsn, abortKD, abortKey, abortData, abortVLSN, + abortExpiration, abortExpirationInHours, + newKey, newEmbeddedLN, newExpiration, newExpirationInHours, + repContext); + + /* + * Always log temporary DB LNs as provisional. This prevents the + * possibility of a FileNotFoundException during recovery, since + * temporary DBs are not checkpointed. And it speeds recovery -- + * temporary DBs are removed during recovery anyway. + */ + params.provisional = + (dbImpl.isTemporary() ? Provisional.YES : Provisional.NO); + + /* + * Dedice whether to count the current record version as obsolete. + * Rc should not be counted as obsolete if: + * (a) Rc == Ra; Ra (i.e. abortLsn) will be counted obsolete during + * commit, or + * (b) Rc was counted earlier as an "immediately obsolete" logrec. + * This includes the cases where the DB is a dups DB, or the current + * op is an insertion (which implies Rc is a deletion and as such has + * been counted already) or Rc is embedded. + */ + if (currLsn != abortLsn && + !dbImpl.isLNImmediatelyObsolete() && + !isInsertion && + !currEmbeddedLN) { + + params.oldLsn = currLsn; + params.oldSize = currSize; + } + + params.repContext = repContext; + params.backgroundIO = backgroundIO; + params.nodeDb = dbImpl; + + /* Save obsolete size information to be used during commit. */ + if (txn != null && currLsn == abortLsn) { + writeLockInfo.setAbortLogSize(currSize); + } + + LogItem item; + try { + if (txn != null) { + + /* + * Writing an LN_TX entry requires looking at the Txn's + * lastLoggedTxn. The Txn may be used by multiple threads so + * ensure that the view we get is consistent. [#17204] + */ + synchronized (txn) { + item = envImpl.getLogManager().log(params); + } + } else { + item = envImpl.getLogManager().log(params); + } + } catch (Throwable e) { + /* + * If any exception occurs while logging an LN, ensure that the + * environment is invalidated. This will also ensure that the txn + * cannot be committed. + */ + if (envImpl.isValid()) { + throw new EnvironmentFailureException( + envImpl, EnvironmentFailureReason.LOG_INCOMPLETE, + "LN could not be logged", e); + } else { + throw e; + } + } finally { + + /* + * Guarantee that if logging fails, we won't have a dirty LN in + * the Btree. This avoids incorrect assertions in other threads. + */ + clearDirty(); + } + + /** + * Lock the new LSN immediately after logging, with the BIN latched. + * Lock non-blocking, since no contention is possible on the new LSN. + * If the locker is transactional, a new WriteLockInfo is created for + * the new LSN and stored in the locker. lockResult points to that + * WriteLockInfo. Since this new WriteLockInfo and the WriteLockInfo + * given as input to this method refer to the same logical record, + * the info from the given WriteLockInfo is copied to the new one. + */ + if (locker != null) { + final long newLsn = item.lsn; + + final LockResult lockResult = locker.nonBlockingLock( + newLsn, LockType.WRITE, false /*jumpAheadOfWaiters*/, dbImpl); + + assert lockResult.getLockGrant() != LockGrantType.DENIED : + DbLsn.getNoFormatString(newLsn); + + lockResult.copyWriteLockInfo(writeLockInfo); + } + + /* In a dup DB, do not expect embedded LNs or non-empty data. */ + if (dbImpl.getSortedDuplicates() && + (newEmbeddedLN || (data != null && data.length > 0))) { + + throw EnvironmentFailureException.unexpectedState( + envImpl, + "[#25288] emb=" + newEmbeddedLN + + " key=" + Key.getNoFormatString(newKey) + + " data=" + Key.getNoFormatString(data) + + " vlsn=" + item.header.getVLSN() + + " lsn=" + DbLsn.getNoFormatString(currLsn)); + } + + return item; + } + + /* + * Each LN knows what kind of log entry it uses to log itself. Overridden + * by subclasses. + */ + LNLogEntry createLogEntry( + LogEntryType entryType, + DatabaseImpl dbImpl, + Txn txn, + long abortLsn, + boolean abortKD, + byte[] abortKey, + byte[] abortData, + long abortVLSN, + int abortExpiration, + boolean abortExpirationInHours, + byte[] newKey, + boolean newEmbeddedLN, + int newExpiration, + boolean newExpirationInHours, + ReplicationContext repContext) { + + return new LNLogEntry( + entryType, dbImpl.getId(), txn, + abortLsn, abortKD, abortKey, abortData, abortVLSN, + abortExpiration, abortExpirationInHours, + newKey, this, newEmbeddedLN, newExpiration, newExpirationInHours); + } + + /** + * @see Node#incFetchStats + */ + @Override + void incFetchStats(EnvironmentImpl envImpl, boolean isMiss) { + envImpl.getEvictor().incLNFetchStats(isMiss); + } + + /** + * @see Node#getGenericLogType + */ + @Override + public LogEntryType getGenericLogType() { + return getLogType(true, false, null); + } + + protected LogEntryType getLogType( + boolean isInsert, + boolean isTransactional, + DatabaseImpl db) { + + if (db != null) { + LogEntryType type = db.getDbType().getLogType(); + if (type != null) { + return type; + } + } + + if (isDeleted()) { + assert !isInsert; + return isTransactional ? + LogEntryType.LOG_DEL_LN_TRANSACTIONAL : + LogEntryType.LOG_DEL_LN; + } + + if (isInsert) { + return isTransactional ? + LogEntryType.LOG_INS_LN_TRANSACTIONAL : + LogEntryType.LOG_INS_LN; + } + + return isTransactional ? + LogEntryType.LOG_UPD_LN_TRANSACTIONAL : + LogEntryType.LOG_UPD_LN; + } + + /** + * The first time we optionally-log an LN in a DeferredWrite database, + * oldLsn will be NULL_LSN and we'll assign a new transient LSN. When we + * do subsequent optional-log operations, the old LSN will be non-null and + * to conserve transient LSNs we'll continue to use the previously assigned + * LSN rather than assigning a new one. And of course, when old LSN is + * persistent we'll continue to use it. + * + * If locker is non-null, this method write-locks the new LSN, whether it + * has been assigned by this method or not. + */ + private long assignTransientLsn(EnvironmentImpl envImpl, + DatabaseImpl dbImpl, + long oldLsn, + Locker locker) { + final long newLsn; + if (oldLsn != DbLsn.NULL_LSN) { + newLsn = oldLsn; + } else { + newLsn = envImpl.getNodeSequence().getNextTransientLsn(); + } + + /** + * Lock immediately after assigning a new LSN, with the BIN latched. + * Lock non-blocking, since no contention is possible on the new LSN. + */ + if (locker != null) { + final LockResult lockResult = locker.nonBlockingLock( + newLsn, LockType.WRITE, false /*jumpAheadOfWaiters*/, dbImpl); + + assert lockResult.getLockGrant() != LockGrantType.DENIED : + DbLsn.getNoFormatString(newLsn); + } + + return newLsn; + } + + /** + * @see VersionedWriteLoggable#getLastFormatChange + */ + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize() { + return getLogSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer) { + writeToLog( + logBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return calcLogSize(isDeleted() ? -1 : data.length); + } + + /** + * Calculates log size based on given dataLen, which is negative to + * calculate the size of a deleted LN. + */ + private int calcLogSize(int dataLen) { + + int size = 0; + + if (dataLen < 0) { + size += LogUtils.getPackedIntLogSize(-1); + } else { + size += LogUtils.getPackedIntLogSize(dataLen); + size += dataLen; + } + + return size; + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + + if (isDeleted()) { + LogUtils.writePackedInt(logBuffer, -1); + } else { + LogUtils.writePackedInt(logBuffer, data.length); + LogUtils.writeBytesNoLength(logBuffer, data); + } + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + if (entryVersion < 8) { + /* Discard node ID from older version entry. */ + LogUtils.readLong(itemBuffer, entryVersion < 6 /*unpacked*/); + } + + if (entryVersion < 6) { + boolean dataExists = LogUtils.readBoolean(itemBuffer); + if (dataExists) { + data = LogUtils.readByteArray(itemBuffer, true/*unpacked*/); + } + } else { + int size = LogUtils.readInt(itemBuffer, false/*unpacked*/); + if (size >= 0) { + data = LogUtils.readBytesNoLength(itemBuffer, size); + } + } + } + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } + + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof LN)) { + return false; + } + + LN otherLN = (LN) other; + + if (!Arrays.equals(getData(), otherLN.getData())) { + return false; + } + + return true; + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(beginTag()); + + if (data != null) { + sb.append(""); + if (verbose) { + sb.append(Key.DUMP_TYPE.dumpByteArray(data)); + } else { + sb.append("hidden"); + } + sb.append(""); + } + + dumpLogAdditional(sb, verbose); + + sb.append(endTag()); + } + + public void dumpKey(StringBuilder sb, byte[] key) { + sb.append(Key.dumpString(key, 0)); + } + + /* + * Allows subclasses to add additional fields before the end tag. + */ + protected void dumpLogAdditional(StringBuilder sb, + @SuppressWarnings("unused") + boolean verbose) { + } + + /** + * Account for FileSummaryLN's extra marshaled memory. [#17462] + */ + public void addExtraMarshaledMemorySize(BIN parentBIN) { + /* Do nothing here. Overwridden in FileSummaryLN. */ + } + + /* + * DatabaseEntry utilities + */ + + /** + * Copies the non-deleted LN's byte array to the entry. Does not support + * partial data. + */ + public void setEntry(DatabaseEntry entry) { + assert !isDeleted(); + int len = data.length; + byte[] bytes = new byte[len]; + System.arraycopy(data, 0, bytes, 0, len); + entry.setData(bytes); + } + + /** + * Copies the given byte array to the given destination entry, copying only + * partial data if the entry is specified to be partial. If the byte array + * is null, clears the entry. + */ + public static void setEntry(DatabaseEntry dest, byte[] bytes) { + + if (bytes != null) { + boolean partial = dest.getPartial(); + int off = partial ? dest.getPartialOffset() : 0; + int len = partial ? dest.getPartialLength() : bytes.length; + if (off + len > bytes.length) { + len = (off > bytes.length) ? 0 : bytes.length - off; + } + + byte[] newdata = null; + if (len == 0) { + newdata = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + newdata = new byte[len]; + System.arraycopy(bytes, off, newdata, 0, len); + } + dest.setData(newdata); + dest.setOffset(0); + dest.setSize(len); + } else { + dest.setData(null); + dest.setOffset(0); + dest.setSize(0); + } + } + + + /** + * Copies the given source entry to the given destination entry, copying + * only partial data if the destination entry is specified to be partial. + */ + public static void setEntry(DatabaseEntry dest, DatabaseEntry src) { + + if (src.getData() != null) { + byte[] srcBytes = src.getData(); + boolean partial = dest.getPartial(); + int off = partial ? dest.getPartialOffset() : 0; + int len = partial ? dest.getPartialLength() : srcBytes.length; + if (off + len > srcBytes.length) { + len = (off > srcBytes.length) ? 0 : srcBytes.length - off; + } + + byte[] newdata = null; + if (len == 0) { + newdata = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + newdata = new byte[len]; + System.arraycopy(srcBytes, off, newdata, 0, len); + } + dest.setData(newdata); + dest.setOffset(0); + dest.setSize(len); + } else { + dest.setData(null); + dest.setOffset(0); + dest.setSize(0); + } + } + + /** + * Returns a byte array that is a complete copy of the data in a + * non-partial entry. + */ + public static byte[] copyEntryData(DatabaseEntry entry) { + assert !entry.getPartial(); + int len = entry.getSize(); + final byte[] newData = + (len == 0) ? LogUtils.ZERO_LENGTH_BYTE_ARRAY : (new byte[len]); + System.arraycopy(entry.getData(), entry.getOffset(), + newData, 0, len); + return newData; + } + + /** + * Merges the partial entry with the given byte array, effectively applying + * a partial entry to an existing record, and returns a enw byte array. + */ + public static byte[] resolvePartialEntry(DatabaseEntry entry, + byte[] foundDataBytes ) { + assert foundDataBytes != null; + final int dlen = entry.getPartialLength(); + final int doff = entry.getPartialOffset(); + final int origlen = foundDataBytes.length; + final int oldlen = (doff + dlen > origlen) ? (doff + dlen) : origlen; + final int len = oldlen - dlen + entry.getSize(); + + final byte[] newData; + if (len == 0) { + newData = LogUtils.ZERO_LENGTH_BYTE_ARRAY; + } else { + newData = new byte[len]; + } + int pos = 0; + + /* Keep 0..doff of the old data (truncating if doff > length). */ + int slicelen = (doff < origlen) ? doff : origlen; + if (slicelen > 0) { + System.arraycopy(foundDataBytes, 0, newData, pos, slicelen); + } + pos += doff; + + /* Copy in the new data. */ + slicelen = entry.getSize(); + System.arraycopy(entry.getData(), entry.getOffset(), newData, pos, + slicelen); + pos += slicelen; + + /* Append the rest of the old data (if any). */ + slicelen = origlen - (doff + dlen); + if (slicelen > 0) { + System.arraycopy(foundDataBytes, doff + dlen, newData, pos, + slicelen); + } + + return newData; + } +} diff --git a/src/com/sleepycat/je/tree/MapLN.java b/src/com/sleepycat/je/tree/MapLN.java new file mode 100644 index 0000000..1fe877a --- /dev/null +++ b/src/com/sleepycat/je/tree/MapLN.java @@ -0,0 +1,345 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; + +/** + * A MapLN represents a Leaf Node in the JE Db Mapping Tree. + * + * MapLNs contain a DatabaseImpl, which in turn contains three categories of + * information - database configuration information, the per-database File + * Summary utilization information, and each database's btree root. While LNs + * are written to the log as the result of API operations which create new data + * records, MapLNs are written to the log as a result of configuration changes, + * utilization information changes, or updates to the btree which cascade up + * the tree and result in a new root. Because they serve as a bridge between + * the application data btree and the db mapping tree, MapLNs must be written + * with special rules, and should only be written from DbTree.modifyDbRoot. + * The basic rule is that in order to ensure that the MapLN contains the + * proper btree root, the btree root latch is used to protect both any logging + * of the MapLN, and any updates to the root lsn. + * + * Updates to the internal btree nodes obey a strict bottom up approach, in + * accordance with the log semantics which require that later log entries are + * known to supercede earlier log entries. In other words, for a btree that + * looks like + * MapLN + * | + * IN + * | + * BIN + * | + * LN + * we know that update operations cause the btree nodes must be logged in this + * order: LN, BIN, IN, MapLN, so that the reference to each on disk node is + * correct. (Note that logging order is special and different when the btree + * is initially created.) + * + * However, MapLNs may need to be written to disk at arbitrary points in time + * in order to save database config or utilization data. Those writes don't + * have the time and context to be done in a cascading-upwards fashion. We + * ensure that MapLNs are not erroneously written with an out of sync root by + * requiring that DbTree.modifyDbRoot takes the root latch for the application + * data btree. RootINs are also written with the root latch, so it serves to + * ensure that the root doesn't change during the time when the MapLN is + * written. For example, suppose thread 1 is doing a cascading-up MapLN write, + * and thread 2 is doing an arbitrary-point MapLN write: + * + * Thread 1 Thread 2 + * -------- -------- + * latch root latch BIN parent of MapLN + * log root IN + * log MapLN (Tree root) wants to log MapLN too -- but has to take + * to refer to new root IN root latch, so we'll get the right rootIN + * + * Without latching the root this could produce the following, incorrect log + * 30 LNa + * 40 BIN + * 50 IN (first version of root) + * 60 MapLN, refers to IN(50) + * ... + * 90 LNb + * 100 BIN + * 110 IN (second version of root) + * 120 CkptStart (the tree is not dirty, no IN will be logged during the + * ckpt interval)) + * .. something arbirarily writes out the MapLN + * 130 MapLN refers to first root, IN(50) <------ impossible + * + * While a MapLN can't be written out with the wrong root, it's possible + * for a rootIN to be logged without the MapLN, and for that rootIN not + * to be processed at recovery. Suppose a checkpoint begins and ends + * in the window between when a rootIN is written, and DbTree.modifyDbRoot is + * called: + * 300 log new root IN, + * update root reference in tree + * unlatch root + * + * 310 Checkpoint starts + * 320 Checkpoint ends + * ...if we crash here, before the MapLN is logged, , we won't see the new + * root IN at lsn 300. However, the IN is non-txnal and will be recreated + * during reply of txnal information (LNs) by normal recovery processing. + */ +public final class MapLN extends LN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + private final DatabaseImpl databaseImpl; + private boolean deleted; + + /** + * Create a new MapLn to hold a new databaseImpl. In the ideal world, we'd + * have a base LN class so that this MapLN doesn't have a superfluous data + * field, but we want to optimize the LN class for size and speed right + * now. + */ + public MapLN(DatabaseImpl db) { + super(new byte[0]); + databaseImpl = db; + deleted = false; + } + + /** + * Create an empty MapLN, to be filled in from the log. + */ + public MapLN() { + super(); + databaseImpl = new DatabaseImpl(); + } + + @Override + public boolean isDeleted() { + return deleted; + } + + @Override + void makeDeleted() { + deleted = true; + + /* Release all references to nodes held by this database. */ + databaseImpl.getTree().setRoot(null, true); + } + + public DatabaseImpl getDatabase() { + return databaseImpl; + } + + @Override + public boolean isDirty() { + return super.isDirty() || databaseImpl.isDirty(); + } + + /** + * Does a fast check without acquiring the MapLN write-lock. This is + * important because the overhead of requesting the lock is significant and + * unnecessary if this DB is open or the root IN is resident. When there + * are lots of databases open, this method will be called often during + * selection of BINs for eviction. [#13415] + */ + private boolean isEvictableInexact() { + /* Always prohibit eviction when je.env.dbEviction=false. */ + return databaseImpl.getEnv().getDbEviction() && + !databaseImpl.isInUse() && + !databaseImpl.getTree().isRootResident(); + } + + /** + * Does a guaranteed check by acquiring the write-lock and then calling + * isEvictableInexact. [#13415] Be sure to use the idDatabaseImpl, which + * owns this MapLN, rather than the databaseImpl housed within the MapLN + * for the lock call. The databaseImpl field refers to the database that + * the MapLN is representing. [#18524] + */ + @Override + boolean isEvictable(long lsn) + throws DatabaseException { + + boolean evictable = false; + + /* To prevent DB open, get a write-lock on the MapLN. */ + EnvironmentImpl envImpl = databaseImpl.getEnv(); + BasicLocker locker = BasicLocker.createBasicLocker(envImpl); + DatabaseImpl idDatabaseImpl = envImpl.getDbTree().getIdDatabaseImpl(); + try { + LockResult lockResult = locker.nonBlockingLock + (lsn, LockType.WRITE, false /*jumpAheadOfWaiters*/, + idDatabaseImpl); + + /* + * The isEvictableInexact result is guaranteed to hold true during + * LN stripping if it is still true after acquiring the write-lock. + */ + if (lockResult.getLockGrant() != LockGrantType.DENIED && + isEvictableInexact()) { + + /* + * While holding both the BIN latch and a write-lock on the + * MapLN, we are guaranteed that the DB is not currently open + * or otherwise in use. It cannot be subsequently opened or + * used until the BIN latch is released, since the BIN latch + * will block DbTree.getDb (called during DB open and by other + * callers needing to use the database). We will evict the LN + * before releasing the BIN latch. After releasing the BIN + * latch, if a caller of DbTree.getDb is waiting on the BIN + * latch, then it will fetch the evicted MapLN and proceed to + * open/use the database. + */ + evictable = true; + } + } finally { + /* Release the write-lock. The BIN latch is still held. */ + locker.operationEnd(); + } + + return evictable; + } + + /** + * Initialize a node that has been faulted in from the log. + */ + @Override + public void postFetchInit(DatabaseImpl db, long sourceLsn) { + + super.postFetchInit(db, sourceLsn); + + databaseImpl.setEnvironmentImpl(db.getEnv()); + } + + /** + * Compute the approximate size of this node in memory for evictor + * invocation purposes. Don't count the treeAdmin memory, because + * that goes into a different bucket. + */ + @Override + public long getMemorySizeIncludedByParent() { + return MemoryBudget.MAPLN_OVERHEAD; + } + + /** + * @see LN#releaseMemoryBudget + */ + @Override + public void releaseMemoryBudget() { + databaseImpl.releaseTreeAdminMemory(); + } + + @Override + public long getTreeAdminMemory() { + return databaseImpl.getTreeAdminMemory(); + } + + /* + * Dumping + */ + + @Override + public String toString() { + return dumpString(0, true); + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + sb.append(super.dumpString(nSpaces, dumpTags)); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + sb.append('\n'); + sb.append(databaseImpl.dumpString(nSpaces)); + return sb.toString(); + } + + /* + * Logging + */ + + /** + * Return the correct log entry type for a MapLN depends on whether it's + * transactional. + */ + @Override + protected LogEntryType getLogType(boolean isInsert, + boolean isTransactional, + DatabaseImpl db) { + assert(!isTransactional); + return LogEntryType.LOG_MAPLN; + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return super.getLogSize(logVersion, forReplication) + + databaseImpl.getLogSize() + + 1; // deleted + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + super.writeToLog(logBuffer, logVersion, forReplication); + databaseImpl.writeToLog(logBuffer); + byte booleans = (byte) (deleted ? 1 : 0); + logBuffer.put(booleans); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + super.readFromLog(itemBuffer, entryVersion); + databaseImpl.readFromLog(itemBuffer, entryVersion); + byte booleans = itemBuffer.get(); + deleted = (booleans & 1) != 0; + } + + /** + * Should never be replicated. + */ + @Override + public boolean logicalEquals(Loggable other) { + return false; + } + + /** + * Dump additional fields. Done this way so the additional info can be + * within the XML tags defining the dumped log entry. + */ + @Override + protected void dumpLogAdditional(StringBuilder sb, boolean verbose) { + databaseImpl.dumpLog(sb, verbose); + } +} diff --git a/src/com/sleepycat/je/tree/NameLN.java b/src/com/sleepycat/je/tree/NameLN.java new file mode 100644 index 0000000..7cb7098 --- /dev/null +++ b/src/com/sleepycat/je/tree/NameLN.java @@ -0,0 +1,227 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; + +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.txn.Txn; + +/** + * A NameLN represents a Leaf Node in the name->database id mapping tree. + */ +public final class NameLN extends LN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + private DatabaseId id; + private boolean deleted; + + /** + * In the ideal world, we'd have a base LN class so that this NameLN + * doesn't have a superfluous data field, but we want to optimize the LN + * class for size and speed right now. + */ + public NameLN(DatabaseId id) { + super(new byte[0]); + this.id = id; + deleted = false; + } + + /** + * Create an empty NameLN, to be filled in from the log. + */ + public NameLN() { + super(); + id = new DatabaseId(); + } + + @Override + public boolean isDeleted() { + return deleted; + } + + @Override + void makeDeleted() { + deleted = true; + } + + public DatabaseId getId() { + return id; + } + + public void setId(DatabaseId id) { + this.id = id; + } + + /* + * Dumping + */ + + @Override + public String toString() { + return dumpString(0, true); + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + sb.append(super.dumpString(nSpaces, dumpTags)); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + sb.append('\n'); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + sb.append('\n'); + return sb.toString(); + } + + /* + * Logging + */ + + /** + * Return the correct log entry type for a NameLN depends on whether it's + * transactional. + */ + @Override + protected LogEntryType getLogType(boolean isInsert, + boolean isTransactional, DatabaseImpl db) { + return isTransactional ? LogEntryType.LOG_NAMELN_TRANSACTIONAL : + LogEntryType.LOG_NAMELN; + } + + @Override + public Collection getEmbeddedLoggables() { + final Collection list = + new ArrayList<>(super.getEmbeddedLoggables()); + list.add(new DatabaseId()); + return list; + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return + super.getLogSize(logVersion, forReplication) + + id.getLogSize(logVersion, forReplication) + + 1; // deleted flag + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + super.writeToLog(logBuffer, logVersion, forReplication); + id.writeToLog(logBuffer, logVersion, forReplication); + byte booleans = (byte) (deleted ? 1 : 0); + logBuffer.put(booleans); + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + super.readFromLog(itemBuffer, entryVersion); // super class + id.readFromLog(itemBuffer, entryVersion); // id + byte booleans = itemBuffer.get(); + deleted = (booleans & 1) != 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof NameLN)) { + return false; + } + + NameLN otherLN = (NameLN) other; + + if (!super.logicalEquals(otherLN)) { + return false; + } + + if (!(id.equals(otherLN.id))) { + return false; + } + + if (deleted != otherLN.deleted) { + return false; + } + + return true; + } + + /** + * Dump additional fields. Done this way so the additional info can be + * within the XML tags defining the dumped log entry. + */ + @Override + protected void dumpLogAdditional(StringBuilder sb, boolean verbose) { + id.dumpLog(sb, true); + } + + /* + * Each LN knows what kind of log entry it uses to log itself. Overridden + * by subclasses. + */ + @Override + LNLogEntry createLogEntry( + LogEntryType entryType, + DatabaseImpl dbImpl, + Txn txn, + long abortLsn, + boolean abortKD, + byte[] abortKey, + byte[] abortData, + long abortVLSN, + int abortExpiration, + boolean abortExpirationInHours, + byte[] newKey, + boolean newEmbeddedLN, + int newExpiration, + boolean newExpirationInHours, + ReplicationContext repContext) { + + return new NameLNLogEntry(entryType, + dbImpl.getId(), + txn, + abortLsn, + abortKD, + newKey, + this, + repContext); + } +} diff --git a/src/com/sleepycat/je/tree/Node.java b/src/com/sleepycat/je/tree/Node.java new file mode 100644 index 0000000..c8cfbea --- /dev/null +++ b/src/com/sleepycat/je/tree/Node.java @@ -0,0 +1,174 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; + +/** + * A Node contains all the common base information for any JE B-Tree node. + */ +public abstract class Node implements Loggable { + + /* Used to mean null or none. See NodeSequence. */ + public static final long NULL_NODE_ID = -1L; + + protected Node() { + } + + /** + * Initialize a node that has been faulted in from the log. + */ + public void postFetchInit(DatabaseImpl db, long sourceLsn) + throws DatabaseException { + + /* Nothing to do. */ + } + + public void latch() { + } + + public void latchShared() + throws DatabaseException { + } + + public void latchShared(CacheMode ignore) + throws DatabaseException { + } + + public void releaseLatch() { + } + + /** + * Since DIN/DBIN/DupCountLN are no longer used in the Btree, this method + * should normally only be used by dup conversion or entities that do not + * access records via the Btree. + * + * @return true if this node is a duplicate-bearing node type, false + * if otherwise. + */ + public boolean containsDuplicates() { + return false; + } + + /** + * Cover for LN's and just return 0 since they'll always be at the bottom + * of the tree. + */ + public int getLevel() { + return 0; + } + + /** + * Add yourself to the in memory list if you're a type of node that + * should belong. + */ + abstract void rebuildINList(INList inList) + throws DatabaseException; + + /** + * @return true if you're part of a deletable subtree. + */ + abstract boolean isValidForDelete() + throws DatabaseException; + + public boolean isLN() { + return false; + } + + public boolean isIN() { + return false; + } + + public boolean isUpperIN() { + return false; + } + + + public boolean isBIN() { + return false; + } + + public boolean isBINDelta() { + return false; + } + + public boolean isBINDelta(boolean checkLatched) { + return false; + } + + public boolean isDIN() { + return false; + } + + public boolean isDBIN() { + return false; + } + + /** + * Return the approximate size of this node in memory, if this size should + * be included in its parents memory accounting. For example, all INs + * return 0, because they are accounted for individually. LNs must return a + * count, they're not counted on the INList. + */ + public long getMemorySizeIncludedByParent() { + return 0; + } + + /** + * Default toString method at the root of the tree. + */ + @Override + public String toString() { + return this.dumpString(0, true); + } + + public void dump(int nSpaces) { + System.out.print(dumpString(nSpaces, true)); + } + + String dumpString(int nSpaces, boolean dumpTags) { + return ""; + } + + public String getType() { + return getClass().getName(); + } + + /** + * We categorize fetch stats by the type of node, so node subclasses + * update different stats. + */ + abstract void incFetchStats(EnvironmentImpl envImpl, boolean isMiss); + + /** + * Returns the generic LogEntryType for this node. Returning the actual + * type used to log the node is not always possible. Specifically, for LN + * nodes the generic type is less specific than the actual type used to log + * the node: + * + A non-transactional type is always returned. + * + LOG_INS_LN is returned rather than LOG_UPD_LN. + * + LOG_DEL_LN is returned rather than LOG_DEL_DUPLN. + */ + public abstract LogEntryType getGenericLogType(); + + public long getTransactionId() { + return 0; + } +} diff --git a/src/com/sleepycat/je/tree/NodeNotEmptyException.java b/src/com/sleepycat/je/tree/NodeNotEmptyException.java new file mode 100644 index 0000000..f5516e2 --- /dev/null +++ b/src/com/sleepycat/je/tree/NodeNotEmptyException.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Error to indicate that a bottom level IN is not empty during a + * delete subtree operation. + */ +public class NodeNotEmptyException extends Exception { + + private static final long serialVersionUID = 933349511L; + + /* + * Throw this static instance, in order to reduce the cost of + * fill in the stack trace. + */ + public static final NodeNotEmptyException NODE_NOT_EMPTY = + new NodeNotEmptyException(); + + /* Make the constructor public for serializability testing. */ + public NodeNotEmptyException() { + } +} diff --git a/src/com/sleepycat/je/tree/OldBINDelta.java b/src/com/sleepycat/je/tree/OldBINDelta.java new file mode 100644 index 0000000..9f453c5 --- /dev/null +++ b/src/com/sleepycat/je/tree/OldBINDelta.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.SizeofMarker; +import com.sleepycat.je.utilint.VLSN; + +/** + * An OldBINDelta contains the information needed to create a partial (delta) + * BIN log entry. It also knows how to combine a full BIN log entry and a delta + * to generate a new BIN. + * + * An OldBINDelta is no longer written by this version of JE, but it may be + * read from a log file written by earlier versions. + */ +public class OldBINDelta implements Loggable { + + private final DatabaseId dbId; // owning db for this bin. + private long lastFullLsn; // location of last full version + private long prevDeltaLsn; // location of previous delta version + private final ArrayList deltas; // list of key/action changes + + /** + * For instantiating from the log. + */ + public OldBINDelta() { + dbId = new DatabaseId(); + lastFullLsn = DbLsn.NULL_LSN; + prevDeltaLsn = DbLsn.NULL_LSN; + deltas = new ArrayList(); + } + + /** + * For Sizeof. + */ + public OldBINDelta(@SuppressWarnings("unused") SizeofMarker marker) { + dbId = new DatabaseId(); + lastFullLsn = DbLsn.NULL_LSN; + prevDeltaLsn = DbLsn.NULL_LSN; + deltas = null; /* Computed separately. */ + } + + public DatabaseId getDbId() { + return dbId; + } + + public long getLastFullLsn() { + return lastFullLsn; + } + + /** + * @return the prior delta version of this BIN, or NULL_LSN if the prior + * version is a full BIN. The returned value is the LSN that is obsoleted + * by this delta. + */ + public long getPrevDeltaLsn() { + return prevDeltaLsn; + } + + /** + * Returns a key that can be used to find the BIN associated with this + * delta. The key of any slot will do. + */ + public byte[] getSearchKey() { + assert (deltas.size() > 0); + return deltas.get(0).getKey(); + } + + /** + * Create a BIN by fetching the full version and applying the deltas. + */ + public BIN reconstituteBIN(DatabaseImpl dbImpl) { + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + final BIN fullBIN = (BIN) + envImpl.getLogManager().getEntryHandleFileNotFound(lastFullLsn); + + reconstituteBIN(dbImpl, fullBIN); + + return fullBIN; + } + + /** + * Given a full version BIN, apply the deltas. + */ + public void reconstituteBIN(DatabaseImpl dbImpl, BIN fullBIN) { + + fullBIN.setDatabase(dbImpl); + fullBIN.latch(CacheMode.UNCHANGED); + try { + + /* + * The BIN's lastFullLsn is set here, while its lastLoggedLsn is + * set by postFetchInit or postRecoveryInit. + */ + fullBIN.setLastFullLsn(lastFullLsn); + + /* Process each delta. */ + for (int i = 0; i < deltas.size(); i++) { + final DeltaInfo info = deltas.get(i); + fullBIN.applyDelta( + info.getKey(), null/*data*/, info.getLsn(), + info.getState(), 0 /*lastLoggedSize*/, 0 /*memId*/, + VLSN.NULL_VLSN_SEQUENCE, null /*child*/, + 0 /*expiration*/, false /*expirationInHours*/); + } + + /* + * The applied deltas will leave some slots dirty, which is + * necessary as a record of changes that will be included in the + * next delta. However, the BIN itself should not be dirty, + * because this delta is a persistent record of those changes. + */ + fullBIN.setDirty(false); + } finally { + fullBIN.releaseLatch(); + } + } + + /* + * Logging support + */ + + @Override + public int getLogSize() { + throw new UnsupportedOperationException(); + } + + @Override + public void writeToLog(ByteBuffer logBuffer) { + throw new UnsupportedOperationException(); + } + + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + dbId.readFromLog(itemBuffer, entryVersion); + lastFullLsn = LogUtils.readLong(itemBuffer, (entryVersion < 6)); + if (entryVersion >= 8) { + prevDeltaLsn = LogUtils.readPackedLong(itemBuffer); + } + int numDeltas = LogUtils.readInt(itemBuffer, (entryVersion < 6)); + + for (int i=0; i < numDeltas; i++) { + DeltaInfo info = new DeltaInfo(); + info.readFromLog(itemBuffer, entryVersion); + deltas.add(info); + } + + /* Use minimum memory. */ + deltas.trimToSize(); + } + + public void dumpLog(StringBuilder sb, boolean verbose) { + dbId.dumpLog(sb, verbose); + sb.append(""); + sb.append(DbLsn.getNoFormatString(lastFullLsn)); + sb.append(""); + sb.append(""); + sb.append(DbLsn.getNoFormatString(prevDeltaLsn)); + sb.append(""); + sb.append(""); + for (int i = 0; i < deltas.size(); i++) { + DeltaInfo info = deltas.get(i); + info.dumpLog(sb, verbose); + } + } + + public long getTransactionId() { + return 0; + } + + /** + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + /** + * Returns the number of bytes occupied by this object. Deltas are not + * stored in the Btree, but they are budgeted during a SortedLSNTreeWalker + * run. + */ + public long getMemorySize() { + long size = MemoryBudget.BINDELTA_OVERHEAD + + MemoryBudget.ARRAYLIST_OVERHEAD + + MemoryBudget.objectArraySize(deltas.size()); + for (DeltaInfo info : deltas) { + size += info.getMemorySize(); + } + return size; + } +} diff --git a/src/com/sleepycat/je/tree/SearchResult.java b/src/com/sleepycat/je/tree/SearchResult.java new file mode 100644 index 0000000..e981624 --- /dev/null +++ b/src/com/sleepycat/je/tree/SearchResult.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Contains the result of a tree search + */ +public class SearchResult { + + public boolean exactParentFound; + public IN parent; + public int index; + /* + * Set to true if a search stopped because a child was not resident, and + * we are doing a do-not-fetch kind of search. + */ + public boolean childNotResident; + + public SearchResult() { + reset(); + } + + public void reset() { + exactParentFound = false; + parent = null; + index = -1; + childNotResident = false; + } + + @Override + public String toString() { + return + "exactParentFound="+ exactParentFound + + " parent=" + ((parent == null)? "null": + Long.toString(parent.getNodeId())) + + " index=" + index + + " childNotResident=" + childNotResident; + } +} diff --git a/src/com/sleepycat/je/tree/SplitRequiredException.java b/src/com/sleepycat/je/tree/SplitRequiredException.java new file mode 100644 index 0000000..c546e0b --- /dev/null +++ b/src/com/sleepycat/je/tree/SplitRequiredException.java @@ -0,0 +1,25 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Indicates that we need to return to the top of the tree in order to + * do a forced splitting pass. A checked exception is used to ensure that it + * is handled internally and not propagated through the API. + */ +@SuppressWarnings("serial") +class SplitRequiredException extends Exception { + public SplitRequiredException() { + } +} diff --git a/src/com/sleepycat/je/tree/StorageSize.java b/src/com/sleepycat/je/tree/StorageSize.java new file mode 100644 index 0000000..f326d46 --- /dev/null +++ b/src/com/sleepycat/je/tree/StorageSize.java @@ -0,0 +1,212 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Contains static methods for estimating record storage size. + * + * Currently this only applies to KVS because we assume that VLSNs are + * preserved. + */ +public class StorageSize { + + /* + * Maximum size of the per-LN overhead. + * + * The overhead is variable and depends on several factors, see + * LNLogEntry.getSize(). The following cases are considered: + * + * 25: cleaned and migrated LN (no txn info), no TTL: + * 22: header (type, checksum, flags, prevOffset, size, vlsn) + * 2: data length + * 1: flags + * + * 43: insertion, with TTL: + * 25: same as above + * 2: expiration + * 8: txnId + * 8: lastLoggedLsn + * + * 53: update, with TTL: + * 43: same as above + * 8: abortLsn + * 2: abortExpiration + * + * 50 is used as a conservative estimate for LN_OVERHEAD. Updates will be + * relatively infrequent. + */ + private final static int LN_OVERHEAD = 50; + + /* + * Maximum size of the per-slot overhead. + * + * The overhead is variable and depends on several factors, see + * IN.getLogSize. The following cases are considered: + * + * 11: Minimum for all cases + * 8: lsn + * 1: keySize + * 1: state + * 1: expiration + * + * 12: Secondary DB, with TTL + * 11: minimum above + * 1: data size + * + * 13: Separate LN in primary DB, with TTL + * 11: minimum above + * 2: lastLoggedSize + * + * 20: Embedded LN in primary DB, with TTL + * 11: minimum above + * 1: data size + * 8: vlsn + * + * 12 is used for SEC_SLOT_OVERHEAD as a conservative estimate. + * + * 14 is used for PRI_SLOT_OVERHEAD and in the customer formula for both + * the separate LN and embedded LN cases. The slot overhead for the + * embedded case will be larger, but in that case there are significant + * savings because the primary key is not duplicated. + */ + private final static int SEC_SLOT_OVERHEAD = 12; + private final static int PRI_SLOT_OVERHEAD = 14; + private final static int PRI_EMBEDDED_LN_SLOT_OVERHEAD = 20; + + /* Static methods only. */ + private StorageSize() {} + + /** + * Returns the estimated disk storage size for the record in the given BIN + * slot. This method does not fetch the LN. + *

        + * For KVS, a formula that customers will use to predict the storage for a + * given set of records, not including obsolete size (size available for + * reclamation by the cleaner), is as follows. + *

        + * The storage overhead for a single Row (JE primary record) is: + *

        +     *  Serialized size of the Row, all fields (JE key + data size)
        +     *    +
        +     *  Serialized size of the PrimaryKey fields (JE key size)
        +     *    +
        +     *  Fixed per-Row internal overhead (64: LN_OVERHEAD + PRI_SLOT_OVERHEAD)
        +     * 
        + * + * The storage overhead for an Index record is: + *
        +     *  Serialized size of the IndexKey fields (JE key size)
        +     *    +
        +     *  Serialized size of the PrimaryKey fields (JE data size)
        +     *    +
        +     *  Fixed per-IndexKey internal overhead (12: SEC_SLOT_OVERHEAD)
        +     * 
        + * + * This method returns the size estimate for an actual record based on the + * use of that formula, getting the key and data size (or lastLoggedSize) + * from the BIN. The amount calculated using the formula above will + * normally be larger than the size returned by this method, for several + * reasons: + *
          + *
        • + * This method uses the key size after it is reduced by prefix + * compression. + *
        • + *
        • + * For a separate (non-embedded) LN, this method uses the lastLoggedSize + * rather than adding LN_OVERHEAD to the data size (this is why + * LN_OVERHEAD is not referenced in code here). This is more accurate + * since the actual LN overhead is reduced due to integer packing, etc. + * Also, this method cannot fetch the LN, so the data size is unknown. + *
        • + *
        • + * For an embedded LN in a primary DB, the returned size does not + * include the LN size, since the LN is always obsolete. This means the + * primary key size is not counted redundantly and the LN_OVERHEAD is not + * included in the return value, as they are in the formula. These are + * significant differences, but since embedded LNs require a data size + * LTE 16, this is not expected to be a common use case. If it becomes + * common, we should add a new case for this to the customer formula. + *
        • + *
        + * + * In addition, the size returned by this method will normally be larger + * than the actual storage size on disk. This is because this method uses + * PRI_SLOT_OVERHEAD and SEC_SLOT_OVERHEAD to calculate the Btree slot + * space, rather than using the serialized size of the slot. These constant + * values are somewhat larger than the actual overheads, since they do not + * take into account integer packing, etc. See the comments above these + * constants. The serialized slot size was not used here for simplicity and + * speed, plus this additional size compensates for uncounted sizes such as + * per-BIN and UIN overhead. + * + * @return the estimated storage size, or zero when the size is unknown + * because a non-embedded LN is not resident and the LN was logged with a + * JE version prior to 6.0. + */ + public static int getStorageSize(final BIN bin, final int idx) { + + final int storedKeySize = bin.getStoredKeySize(idx); + + /* + * For a JE secondary DB record (KVS Index record), return: + * + * data-size + key-size + SEC_SLOT_OVERHEAD + * + * where data-size is serialized IndexKey size + * and key-size is serialized PrimaryKey size. + * + * The storedKeySize includes key-size, data-size, and one extra byte + * for data (primary key) size. We subtract it here because it is + * included in SEC_SLOT_OVERHEAD. + */ + if (bin.getDatabase().getSortedDuplicates()) { + return storedKeySize - 1 + SEC_SLOT_OVERHEAD; + } + + /* + * For an embedded-LN JE primary DB record (KVS Row): + * + * Return data-size + key-size + PRI_SLOT_OVERHEAD + * + * where (data-size + key-size) is serialized Row size + * and key-size is serialized PrimaryKey size + * + * The storedKeySize includes key-size, data-size, and one extra byte + * for data (primary key) size. We subtract it here because it is + * included in PRI_EMBEDDED_LN_SLOT_OVERHEAD. + */ + if (bin.isEmbeddedLN(idx)) { + return storedKeySize - 1 + PRI_EMBEDDED_LN_SLOT_OVERHEAD; + } + + /* + * For a separate (non-embedded) JE primary DB record (KVS Row): + * + * Return LN-log-size + key-size + PRI_SLOT_OVERHEAD + * + * where LN-log-size is LN_OVERHEAD (or less) + data-size + key-size + * and (data-size + key-size) is serialized Row size + * and key-size is serialized PrimaryKey size + * + * The storedKeySize is the key-size alone. + */ + final int lastLoggedSize = bin.getLastLoggedSize(idx); + if (lastLoggedSize == 0) { + /* Size is unknown. */ + return 0; + } + return lastLoggedSize + storedKeySize + PRI_SLOT_OVERHEAD; + } +} diff --git a/src/com/sleepycat/je/tree/TrackingInfo.java b/src/com/sleepycat/je/tree/TrackingInfo.java new file mode 100644 index 0000000..2fb3348 --- /dev/null +++ b/src/com/sleepycat/je/tree/TrackingInfo.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.utilint.DbLsn; + +/** + * Tracking info packages some tree tracing info. + */ +public class TrackingInfo { + public final long lsn; + public final long nodeId; + public final int entries; + public int index; + + TrackingInfo(long lsn, long nodeId, int entries) { + this.lsn = lsn; + this.nodeId = nodeId; + this.entries = entries; + } + + public TrackingInfo(long lsn, long nodeId, int entries, int index) { + this.lsn = lsn; + this.nodeId = nodeId; + this.entries = entries; + this.index = index; + } + + void setIndex(int index) { + this.index = index; + } + + @Override + public String toString() { + return "lsn=" + DbLsn.getNoFormatString(lsn) + + " node=" + nodeId + + " entries=" + entries + + " index=" + index; + } +} diff --git a/src/com/sleepycat/je/tree/Tree.java b/src/com/sleepycat/je/tree/Tree.java new file mode 100644 index 0000000..cf35493 --- /dev/null +++ b/src/com/sleepycat/je/tree/Tree.java @@ -0,0 +1,2796 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_RELATCHES_REQUIRED; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_ROOT_SPLITS; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_DESC; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_NAME; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.latch.LatchContext; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.latch.LatchTable; +import com.sleepycat.je.latch.SharedLatch; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; +import com.sleepycat.je.utilint.VLSN; + +/** + * Tree implements the JE B+Tree. + * + * A note on tree search patterns: + * There's a set of Tree.search* methods. Some clients of the tree use + * those search methods directly, whereas other clients of the tree + * tend to use methods built on top of search. + * + * The semantics of search* are + * they leave you pointing at a BIN or IN + * they don't tell you where the reference of interest is. + * The semantics of the get* methods are: + * they leave you pointing at a BIN or IN + * they return the index of the slot of interest + * they traverse down to whatever level is needed + * they are built on top of search* methods. + * For the future: + * Over time, we need to clarify which methods are to be used by clients + * of the tree. Preferably clients that call the tree use get*, although + * their are cases where they need visibility into the tree structure. + * + * Also, search* should return the location of the slot to save us a + * second binary search. + * + * Search Method Call Hierarchy + * ---------------------------- + * getFirst/LastNode + * search + * CALLED BY: + * CursorImpl.getFirstOrLast + * + * getNext/PrevBin + * getParentINForChildIN + * searchSubTree + * CALLED BY: + * DupConvert + * CursorImpl.getNext + * + * getParentINForChildIN + * IN.findParent + * does not use shared latching + * CALLED BY: + * Checkpointer.flushIN (doFetch=false, targetLevel=-1) + * FileProcessor.processIN (doFetch=true, targetLevel=LEVEL) + * Evictor.evictIN (doFetch=true, targetLevel=-1) + * RecoveryManager.replaceOrInsertChild (doFetch=true, targetLevel=-1) + * getNext/PrevBin (doFetch=true, targetLevel=-1) + * + * search + * searchSubTree + * CALLED BY: + * CursorImpl.searchAndPosition + * INCompressor to find BIN + * + * searchSubTree + * uses shared grandparent latching + * + * getParentBINForChildLN + * searchSplitsAllowed + * CALLED BY: + * RecoveryManager.redo + * RecoveryManager.recoveryUndo + * search + * CALLED BY: + * RecoveryManager.abortUndo + * RecoveryManager.rollbackUndo + * FileProcessor.processLN + * Cleaner.processPendingLN + * UtilizationProfile.verifyLsnIsObsolete (utility) + * + * findBinForInsert + * searchSplitsAllowed + * CALLED BY: + * CursorImpl.putInternal + * + * searchSplitsAllowed + * uses shared non-grandparent latching + * CALLED BY: + * DupConvert (instead of findBinForInsert, which needs a cursor) + * + * Possible Shared Latching Improvements + * ------------------------------------- + * By implementing shared latching for BINs we would get better concurrency in + * these cases: + * Reads when LN is in cache, or LN is not needed (key-only op, e.g., dups) + */ +public final class Tree implements Loggable { + + /* For debug tracing */ + private static final String TRACE_ROOT_SPLIT = "RootSplit:"; + + private DatabaseImpl database; + + private int maxTreeEntriesPerNode; + + private ChildReference root; + + /* + * Latch that must be held when using/accessing the root node. Protects + * against the root being changed out from underneath us by splitRoot. + * After the root IN is latched, the rootLatch can be released. + */ + private SharedLatch rootLatch; + + /* + * We don't need the stack trace on this so always throw a static and + * avoid the cost of Throwable.fillInStack() every time it's thrown. + * [#13354]. + */ + private static SplitRequiredException splitRequiredException = + new SplitRequiredException(); + + /* Stats */ + private StatGroup stats; + + /* The number of tree root splited. */ + private IntStat rootSplits; + /* The number of latch upgrades from shared to exclusive required. */ + private LongStat relatchesRequired; + + private final ThreadLocal treeStatsAccumulatorTL = + new ThreadLocal(); + + /* For unit tests */ + private TestHook waitHook; // used for generating race conditions + private TestHook searchHook; // [#12736] + private TestHook ckptHook; // [#13897] + private TestHook getParentINHook; + private TestHook fetchINHook; + + /** + * Embodies an enum for the type of search being performed. NORMAL means + * do a regular search down the tree. LEFT/RIGHT means search down the + * left/right side to find the first/last node in the tree. + */ + public static class SearchType { + /* Search types */ + public static final SearchType NORMAL = new SearchType(); + public static final SearchType LEFT = new SearchType(); + public static final SearchType RIGHT = new SearchType(); + + /* No lock types can be defined outside this class. */ + private SearchType() { + } + } + + /* + * Class that overrides ChildReference methods to enforce rules that apply + * to the root. + * + * Overrides fetchTarget() so that if the rootLatch is not held exclusively + * when the root is fetched, we upgrade it to exclusive. Also overrides + * setter methods to assert that an exclusive latch is held. + * + * Overrides setDirty to dirty the DatabaseImpl, so that the MapLN will be + * logged during the next checkpoint. This is critical when updating the + * root LSN. + */ + private class RootChildReference extends ChildReference { + + private RootChildReference() { + super(); + } + + private RootChildReference(Node target, byte[] key, long lsn) { + super(target, key, lsn); + } + + /* Caller is responsible for releasing rootLatch. */ + @Override + public Node fetchTarget(DatabaseImpl database, IN in) + throws DatabaseException { + + if (getTarget() == null && !rootLatch.isExclusiveOwner()) { + + rootLatch.release(); + rootLatch.acquireExclusive(); + + /* + * If the root field changed while unlatched then we have an + * invalid state and cannot continue. [#21686] + */ + if (this != root) { + throw EnvironmentFailureException.unexpectedState( + database.getEnv(), + "Root changed while unlatched, dbId=" + + database.getId()); + } + } + + return super.fetchTarget(database, in); + } + + @Override + public void setTarget(Node target) { + assert rootLatch.isExclusiveOwner(); + super.setTarget(target); + } + + @Override + public void clearTarget() { + assert rootLatch.isExclusiveOwner(); + super.clearTarget(); + } + + @Override + public void setLsn(long lsn) { + assert rootLatch.isExclusiveOwner(); + super.setLsn(lsn); + } + + @Override + void updateLsnAfterOptionalLog(DatabaseImpl dbImpl, long lsn) { + assert rootLatch.isExclusiveOwner(); + super.updateLsnAfterOptionalLog(dbImpl, lsn); + } + + @Override + void setDirty() { + super.setDirty(); + database.setDirty(); + } + } + + /** + * Create a new tree. + */ + public Tree(DatabaseImpl database) { + init(database); + setDatabase(database); + } + + /** + * Create a tree that's being read in from the log. + */ + public Tree() { + init(null); + maxTreeEntriesPerNode = 0; + } + + /** + * constructor helper + */ + private void init(DatabaseImpl database) { + this.root = null; + this.database = database; + + /* Do the stats definitions. */ + stats = new StatGroup(GROUP_NAME, GROUP_DESC); + relatchesRequired = new LongStat(stats, BTREE_RELATCHES_REQUIRED); + rootSplits = new IntStat(stats, BTREE_ROOT_SPLITS); + } + + /** + * Set the database for this tree. Used by recovery when recreating an + * existing tree. + */ + public void setDatabase(DatabaseImpl database) { + this.database = database; + + final EnvironmentImpl envImpl = database.getEnv(); + + /* + * The LatchContext for the root is special in that it is considered a + * Btree latch (the Btree latch table is used), but the context is not + * implemented by the IN class. + */ + final LatchContext latchContext = new LatchContext() { + @Override + public int getLatchTimeoutMs() { + return envImpl.getLatchTimeoutMs(); + } + @Override + public String getLatchName() { + return "RootLatch"; + } + @Override + public LatchTable getLatchTable() { + return LatchSupport.btreeLatchTable; + } + @Override + public EnvironmentImpl getEnvImplForFatalException() { + return envImpl; + } + }; + + rootLatch = LatchFactory.createSharedLatch( + latchContext, false /*exclusiveOnly*/); + + + maxTreeEntriesPerNode = database.getNodeMaxTreeEntries(); + } + + /** + * @return the database for this Tree. + */ + public DatabaseImpl getDatabase() { + return database; + } + + /** + * Called when latching a child and the parent is latched. Used to + * opportunistically validate the parent pointer. + */ + private static void latchChild(final IN parent, + final IN child, + final CacheMode cacheMode) { + child.latch(cacheMode); + + if (child.getParent() != parent) { + throw EnvironmentFailureException.unexpectedState(); + } + } + + /** + * Called when latching a child and the parent is latched. Used to + * opportunistically validate the parent pointer. + */ + private static void latchChildShared(final IN parent, + final IN child, + final CacheMode cacheMode) { + child.latchShared(cacheMode); + + if (child.getParent() != parent) { + throw EnvironmentFailureException.unexpectedState(); + } + } + + public void latchRootLatchExclusive() + throws DatabaseException { + + rootLatch.acquireExclusive(); + } + + public void releaseRootLatch() + throws DatabaseException { + + rootLatch.release(); + } + + /** + * Set the root for the tree. Should only be called within the root latch. + */ + public void setRoot(ChildReference newRoot, boolean notLatched) { + assert (notLatched || rootLatch.isExclusiveOwner()); + root = newRoot; + } + + public ChildReference makeRootChildReference( + Node target, + byte[] key, + long lsn) { + + return new RootChildReference(target, key, lsn); + } + + private RootChildReference makeRootChildReference() { + return new RootChildReference(); + } + + /* + * A tree doesn't have a root if (a) the root field is null, or (b) the + * root is non-null, but has neither a valid target nor a valid LSN. Case + * (b) can happen if the database is or was previously opened in deferred + * write mode. + * + * @return false if there is no real root. + */ + public boolean rootExists() { + if (root == null) { + return false; + } + + if ((root.getTarget() == null) && + (root.getLsn() == DbLsn.NULL_LSN)) { + return false; + } + + return true; + } + + /** + * Perform a fast check to see if the root IN is resident. No latching is + * performed. To ensure that the root IN is not loaded by another thread, + * this method should be called while holding a write lock on the MapLN. + * That will prevent opening the DB in another thread, and potentially + * loading the root IN. [#13415] + */ + public boolean isRootResident() { + return root != null && root.getTarget() != null; + } + + /** + * Helper to obtain the root IN with shared root latching. Optionally + * updates the generation of the root when latching it. + */ + public IN getRootIN(CacheMode cacheMode) + throws DatabaseException { + + return getRootINInternal(cacheMode, false/*exclusive*/); + } + + /** + * Helper to obtain the root IN with exclusive root latching. Optionally + * updates the generation of the root when latching it. + */ + public IN getRootINLatchedExclusive(CacheMode cacheMode) + throws DatabaseException { + + return getRootINInternal(cacheMode, true/*exclusive*/); + } + + private IN getRootINInternal(CacheMode cacheMode, boolean exclusive) + throws DatabaseException { + + rootLatch.acquireShared(); + try { + return getRootINRootAlreadyLatched(cacheMode, exclusive); + } finally { + rootLatch.release(); + } + } + + /** + * Helper to obtain the root IN, when the root latch is already held. + */ + public IN getRootINRootAlreadyLatched( + CacheMode cacheMode, + boolean exclusive) { + + if (!rootExists()) { + return null; + } + + final IN rootIN = (IN) root.fetchTarget(database, null); + + if (exclusive) { + rootIN.latch(cacheMode); + } else { + rootIN.latchShared(cacheMode); + } + return rootIN; + } + + public IN getResidentRootIN(boolean latched) + throws DatabaseException { + + IN rootIN = null; + if (rootExists()) { + rootIN = (IN) root.getTarget(); + if (rootIN != null && latched) { + rootIN.latchShared(CacheMode.UNCHANGED); + } + } + return rootIN; + } + + public IN withRootLatchedExclusive(WithRootLatched wrl) + throws DatabaseException { + + try { + rootLatch.acquireExclusive(); + return wrl.doWork(root); + } finally { + rootLatch.release(); + } + } + + public IN withRootLatchedShared(WithRootLatched wrl) + throws DatabaseException { + + try { + rootLatch.acquireShared(); + return wrl.doWork(root); + } finally { + rootLatch.release(); + } + } + + /** + * Get LSN of the rootIN. Obtained without latching, should only be + * accessed while quiescent. + */ + public long getRootLsn() { + if (root == null) { + return DbLsn.NULL_LSN; + } else { + return root.getLsn(); + } + } + + /** + * Cheaply calculates and returns the maximum possible number of LNs in the + * btree. + */ + public long getMaxLNs() { + + final int levels; + final int topLevelSlots; + rootLatch.acquireShared(); + try { + IN rootIN = (IN) root.fetchTarget(database, null); + levels = rootIN.getLevel() & IN.LEVEL_MASK; + topLevelSlots = rootIN.getNEntries(); + } finally { + rootLatch.release(); + } + return (long) (topLevelSlots * + Math.pow(database.getNodeMaxTreeEntries(), levels - 1)); + } + + /** + * Deletes a BIN specified by key from the tree. If the BIN resides in a + * subtree that can be pruned away, prune as much as possible, so we + * don't leave a branch that has no BINs. + * + * It's possible that the targeted BIN will now have entries, or will + * have resident cursors. Either will prevent deletion (see exceptions). + * + * Unlike splits, IN deletion does not immediately log the subtree parent + * or its ancestors. It is sufficient to simply dirty the subtree parent. + * Logging is not necessary for correctness, and if a checkpoint does not + * flush the subtree parent then recovery will add the BINs to the + * compressor queue when redoing the LN deletions. + * + * @param idKey - the identifier key of the node to delete. + * + * @throws NodeNotEmptyException if the BIN is not empty. The deletion is + * no longer possible. + * + * @throws CursorsExistException is the BIN has cursors. The deletion + * should be retried later by the INCompressor. + */ + public void delete(byte[] idKey) + throws NodeNotEmptyException, CursorsExistException { + + final EnvironmentImpl envImpl = database.getEnv(); + final Logger logger = envImpl.getLogger(); + + final List nodeLadder = searchDeletableSubTree(idKey); + + if (nodeLadder == null) { + /* + * The tree is empty, so do nothing. Root compression is no + * longer supported. Root compression has no impact on memory + * usage now that we evict the root IN. It reduces log space + * taken by INs for empty (but not removed) databases, yet + * requires logging a INDelete and MapLN; this provides very + * little benefit, if any. Because it requires extensive + * testing (which has not been done), this minor benefit is not + * worth the cost. And by removing it we no longer log + * INDelete, which reduces complexity going forward. [#17546] + */ + return; + } + + /* Detach this subtree. */ + final SplitInfo detachPoint = nodeLadder.get(0); + try { + final IN branchParent = detachPoint.parent; + final IN branchRoot = detachPoint.child; + + if (logger.isLoggable(Level.FINEST)) { + LoggerUtils.envLogMsg( + Level.FINEST, envImpl, + "Tree.delete() " + + Thread.currentThread().getId() + "-" + + Thread.currentThread().getName() + "-" + + envImpl.getName() + + " Deleting child node: " + branchRoot.getNodeId() + + " from parent node: " + branchParent.getNodeId() + + " parent has " + branchParent.getNEntries() + + " children"); + } + + branchParent.deleteEntry(detachPoint.index); + + /* + * Remove deleted INs from the INList/cache and count them as + * provisionally obsolete. The parent is not logged immediately, so + * we can't count them immediately obsolete. They will be counted + * obsolete when an ancestor is logged non-provisionally. [#21348] + */ + final INList inList = database.getEnv().getInMemoryINs(); + + for (final SplitInfo info : nodeLadder) { + + final IN child = info.child; + + assert !child.isBINDelta(false); + assert !(child.isUpperIN() && child.getNEntries() > 1); + assert !(child.isBIN() && child.getNEntries() > 0); + + /* + * Remove child from cache. The branch root was removed by + * deleteEntry above. + */ + if (child != branchRoot) { + inList.remove(child); + } + + /* Count full and delta versions as obsolete. */ + branchParent.trackProvisionalObsolete( + child, child.getLastFullLsn()); + + branchParent.trackProvisionalObsolete( + child, child.getLastDeltaLsn()); + } + + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.envLogMsg( + Level.FINE, envImpl, + "SubtreeRemoval: subtreeRoot = " + branchRoot.getNodeId()); + } + + } finally { + releaseNodeLadderLatches(nodeLadder); + } + } + + /** + * Search down the tree using a key, but instead of returning the BIN that + * houses that key, find the point where we can detach a deletable + * subtree. A deletable subtree is a branch where each IN has one child, + * and the bottom BIN has no entries and no resident cursors. That point + * can be found by saving a pointer to the lowest node in the path with + * more than one entry. + * + * INa + * / \ + * INb INc + * | | + * INd .. + * / \ + * INe .. + * | + * BINx (suspected of being empty) + * + * In this case, we'd like to prune off the subtree headed by INe. INd + * is the parent of this deletable subtree. + * + * The method returns a list of parent/child/index structures. In this + * example, the list will hold: + * INd/INe/index + * INe/BINx/index + * All three nodes will be EX-latched. + * + * @return null if the entire Btree is empty, or a list of SplitInfo for + * the branch to be deleted. If non-null is returned, the INs in the list + * will be EX-latched; otherwise, no INs will be latched. + * + * @throws NodeNotEmptyException if the BIN is not empty. + * + * @throws CursorsExistException is the BIN has cursors. + */ + private List searchDeletableSubTree(byte[] key) + throws NodeNotEmptyException, CursorsExistException { + + assert (key!= null); + + IN parent = getRootINLatchedExclusive(CacheMode.UNCHANGED); + + if (parent == null) { + /* Tree was never persisted. */ + return null; + } + + final ArrayList nodeLadder = new ArrayList<>(); + + try { + IN child; + IN pinedIN = null; + + do { + if (parent.getNEntries() == 0) { + throw EnvironmentFailureException.unexpectedState( + "Found upper IN with 0 entries"); + } + + if (parent.getNEntries() > 1) { + /* + * A node with more than one entry is the lowest potential + * branch parent. Unlatch/discard ancestors of this parent. + */ + for (final SplitInfo info : nodeLadder) { + info.parent.releaseLatch(); + } + nodeLadder.clear(); + pinedIN = null; + } else if (parent.isPinned()) { + pinedIN = parent; + } + + final int index = parent.findEntry(key, false, false); + assert index >= 0; + + /* Get the child node that matches. */ + child = parent.fetchIN(index, CacheMode.UNCHANGED); + + latchChild(parent, child, CacheMode.UNCHANGED); + + nodeLadder.add(new SplitInfo(parent, child, index)); + + /* Continue down a level */ + parent = child; + } while (!parent.isBIN()); + + if (pinedIN != null) { + throw CursorsExistException.CURSORS_EXIST; + } + + /* + * See if there is a reason we can't delete this BIN -- i.e. + * new items have been inserted, or a cursor exists on it. + */ + assert (child.isBIN()); + final BIN bin = (BIN) child; + + if (bin.getNEntries() != 0) { + throw NodeNotEmptyException.NODE_NOT_EMPTY; + } + + if (bin.isBINDelta()) { + throw EnvironmentFailureException.unexpectedState( + "Found BIN delta with 0 entries"); + } + + /* + * This case can happen if we are keeping a cursor on an empty + * BIN as we traverse. + */ + if (bin.nCursors() > 0 || child.isPinned()) { + throw CursorsExistException.CURSORS_EXIST; + } + + if (nodeLadder.get(0).parent.getNEntries() <= 1) { + /* The entire tree is empty. */ + releaseNodeLadderLatches(nodeLadder); + return null; + } + + return nodeLadder; + + } catch (Throwable e) { + releaseNodeLadderLatches(nodeLadder); + /* Release parent in case it was not added to nodeLadder. */ + parent.releaseLatchIfOwner(); + throw e; + } + } + + /** + * Release latched acquired by searchDeletableSubTree. Each child is + * latched, plus the parent of the first node (the branch parent). + */ + private void releaseNodeLadderLatches(List nodeLadder) + throws DatabaseException { + + if (nodeLadder.isEmpty()) { + return; + } + + nodeLadder.get(0).parent.releaseLatch(); + + for (final SplitInfo info : nodeLadder) { + info.child.releaseLatch(); + } + + nodeLadder.clear(); + } + + /** + * Find the leftmost node (IN or BIN) in the tree. + * + * @return the leftmost node in the tree, null if the tree is empty. The + * returned node is latched and the caller must release it. + */ + public BIN getFirstNode(CacheMode cacheMode) + throws DatabaseException { + + BIN bin = search( + null /*key*/, SearchType.LEFT, null /*binBoundary*/, + cacheMode, null /*comparator*/); + + if (bin != null) { + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + } + + return bin; + } + + /** + * Find the rightmost node (IN or BIN) in the tree. + * + * @return the rightmost node in the tree, null if the tree is empty. The + * returned node is latched and the caller must release it. + */ + public BIN getLastNode(CacheMode cacheMode) + throws DatabaseException { + + BIN bin = search( + null /*key*/, SearchType.RIGHT, null /*binBoundary*/, + cacheMode, null /*comparator*/); + + if (bin != null) { + bin.mutateToFullBIN(false /*leaveFreeSlot*/); + } + + return bin; + } + + /** + * Return a reference to the adjacent BIN. + * + * @param bin The BIN to find the next BIN for. This BIN is latched. + * + * @return The next BIN, or null if there are no more. The returned node + * is latched and the caller must release it. If null is returned, the + * argument BIN remains latched. + */ + public BIN getNextBin(BIN bin, CacheMode cacheMode) + throws DatabaseException { + + return (BIN) getNextIN(bin, true, false, cacheMode); + } + + /** + * Return a reference to the previous BIN. + * + * @param bin The BIN to find the next BIN for. This BIN is latched. + * + * @return The previous BIN, or null if there are no more. The returned + * node is latched and the caller must release it. If null is returned, + * the argument bin remains latched. + */ + public BIN getPrevBin(BIN bin, CacheMode cacheMode) + throws DatabaseException { + + return (BIN) getNextIN(bin, false, false, cacheMode); + } + + /** + * Returns the next IN in the tree before/after the given IN, and at the + * same level. For example, if a BIN is passed in the prevIn parameter, + * the next BIN will be returned. + * + * TODO: A possible problem with this method is that we don't know for + * certain whether it works properly in the face of splits. There are + * comments below indicating it does. But the Cursor.checkForInsertion + * method was apparently added because getNextBin/getPrevBin didn't work + * properly, and may skip a BIN. So at least it didn't work properly in + * the distant past. Archeology and possibly testing are needed to find + * the truth. Hopefully it does now work, and Cursor.checkForInsertion can + * be removed. + * + * TODO: To eliminate EX latches on upper INs, a new getParentINForChildIN + * is needed, which will return with both the parent and the grandparent + * SH-latched. If we do this, then in Tree.getNextIN() the call to + * searchSubtree() will be able to do grandparent latching, and the call + * to parent.fetchIN(index) will also be replace with a local version of + * grandparent latching. + */ + public IN getNextIN( + IN prevIn, + boolean forward, + boolean latchShared, + CacheMode cacheMode) { + + assert(prevIn.isLatchOwner()); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + prevIn.mutateToFullBIN(false /*leaveFreeSlot*/); + + /* + * Use the right most key (for a forward progressing cursor) or the + * left most key (for a backward progressing cursor) as the search key. + * The reason is that the IN may get split while finding the next IN so + * it's not safe to take the IN's identifierKey entry. If the IN gets + * split, then the right (left) most key will still be on the + * resultant node. The exception to this is that if there are no + * entries, we just use the identifier key. + */ + final byte[] searchKey; + + if (prevIn.getNEntries() == 0) { + searchKey = prevIn.getIdentifierKey(); + } else if (forward) { + searchKey = prevIn.getKey(prevIn.getNEntries() - 1); + } else { + searchKey = prevIn.getKey(0); + } + + final int targetLevel = prevIn.getLevel(); + IN curr = prevIn; + boolean currIsLatched = false; + IN parent = null; + IN nextIN = null; + boolean nextINIsLatched = false; + boolean normalExit = false; + + /* + * Ascend the tree until we find a level that still has nodes to the + * right (or left if !forward) of the path that we're on. If we reach + * the root level, we're done. + */ + try { + while (true) { + + /* + * Move up a level from where we are now and check to see if we + * reached the top of the tree. + */ + currIsLatched = false; + + if (curr.isRoot()) { + /* We've reached the root of the tree. */ + curr.releaseLatch(); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + normalExit = true; + return null; + } + + final SearchResult result = getParentINForChildIN( + curr, false, /*useTargetLevel*/ + true, /*doFetch*/ cacheMode); + + if (result.exactParentFound) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + parent = result.parent; + } else { + throw EnvironmentFailureException.unexpectedState( + "Failed to find parent for IN"); + } + + /* + * Figure out which entry we are in the parent. Add (subtract) + * 1 to move to the next (previous) one and check that we're + * still pointing to a valid child. Don't just use the result + * of the parent.findEntry call in getParentNode, because we + * want to use our explicitly chosen search key. + */ + int index = parent.findEntry(searchKey, false, false); + + final boolean moreEntriesThisIn; + + if (forward) { + index++; + moreEntriesThisIn = (index < parent.getNEntries()); + } else { + moreEntriesThisIn = (index > 0); + index--; + } + + if (moreEntriesThisIn) { + + /* + * There are more entries to the right of the current path + * in parent. Get the entry, and then descend down the + * left most path to an IN. + */ + nextIN = parent.fetchIN(index, cacheMode); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + if (nextIN.getLevel() == targetLevel) { + + if (latchShared) { + latchChildShared(parent, nextIN, cacheMode); + } else { + latchChild(parent, nextIN, cacheMode); + } + nextINIsLatched = true; + + nextIN.mutateToFullBIN(false /*leaveFreeSlot*/); + + parent.releaseLatch(); + parent = null; // to avoid falsely unlatching parent + + final TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + if (treeStatsAccumulator != null) { + nextIN.accumulateStats(treeStatsAccumulator); + } + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + normalExit = true; + return nextIN; + + } else { + + /* + * We landed at a higher level than the target level. + * Descend down to the appropriate level. + */ + assert(nextIN.isUpperIN()); + nextIN.latch(cacheMode); + nextINIsLatched = true; + + parent.releaseLatch(); + parent = null; // to avoid falsely unlatching parent + nextINIsLatched = false; + + final IN ret = searchSubTree( + nextIN, null, /*key*/ + (forward ? SearchType.LEFT : SearchType.RIGHT), + targetLevel, latchShared, cacheMode, + null /*comparator*/); + + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + + if (ret.getLevel() == targetLevel) { + normalExit = true; + return ret; + } else { + throw EnvironmentFailureException.unexpectedState( + "subtree did not have a IN at level " + + targetLevel); + } + } + } + + /* Nothing at this level. Ascend to a higher level. */ + curr = parent; + currIsLatched = true; + parent = null; // to avoid falsely unlatching parent below + } + } finally { + if (!normalExit) { + if (curr != null && currIsLatched) { + curr.releaseLatch(); + } + + if (parent != null) { + parent.releaseLatch(); + } + + if (nextIN != null && nextINIsLatched) { + nextIN.releaseLatch(); + } + } + } + } + + /** + * Search for the parent P of a given IN C (where C is viewed as a logical + * node; not as a java obj). If found, P is returned latched exclusively. + * The method is used when C has been accessed "directly", i.e., not via a + * tree search, and we need to perform an operation on C that requires an + * update to its parent. Such situations arise during eviction (C has been + * accessed via the LRU list), checkpointing, and recovery (C has been read + * from the log and is not attached to the in-memory tree). + * + * The method uses C's identifierKey to search down the tree until: + * + * (a) doFetch is false and we need to access a node that is not cached. + * In this case, we are actually looking for the cached copies of both C + * and its parent, so a cache miss on the path to C is considered a + * failure. This search mode is used by the evictor: to evict C (which has + * been retrieved from the LRU), its parent must be found and EX-latched; + * however, if the C has been evicted already by another thread, there is + * nothing to do (C will GC-ed). + * or + * (b) We reach a node whose node id equals the C's node id. In this case, + * we know for sure that C still belongs to the BTree and its parent has + * been found. + * or + * (c) useTargetLevel is true and we reach a node P that is at one level + * above C's level. We know that P contains a slot S whose corresponding + * key range includes C's identifierKey. Since we haven't read the child + * node under S to check its node id, we cannot know for sure that C is + * still in the tree. Nevertheless, we consider this situation a success, + * i.e., P is the parent node we are looking for. In this search mode, + * after this method returns, the caller is expected to take further + * action based on the info in slot S. For example, if C was created + * by reading a log entry at LSN L, and the LSN at slot S is also L, then + * we know P is the real parent (and we have thus saved a possible extra + * I/O to refetch the C node from the log to check its node id). This + * search mode is used by the cleaner. + * or + * (d) None of the above conditions occur and the bottom of the BTree is + * reached. In this case, no parent exists (the child node is an old + * version of a node that has been removed from the BTree). + * + * @param child The child node for which to find the parent. This node is + * latched by the caller and is unlatched by this function before returning + * to the caller. + * + * @param useTargetLevel If true, the search is considered successful if + * a node P is reached at one level above C's level. P is the parent to + * return to the caller. + * + * @param doFetch if false, stop the search if we run into a non-resident + * child and assume that no parent exists. + * + * @param cacheMode The CacheMode for affecting the hotness of the nodes + * visited during the search. + * + * @return a SearchResult object. If the parent has been found, + * result.foundExactMatch is true, result.parent refers to that node, and + * result.index is the slot for the child IN inside the parent IN. + * Otherwise, result.foundExactMatch is false and result.parent is null. + */ + public SearchResult getParentINForChildIN( + IN child, + boolean useTargetLevel, + boolean doFetch, + CacheMode cacheMode) + throws DatabaseException { + + return getParentINForChildIN( + child, useTargetLevel, doFetch, + cacheMode, null /*trackingList*/); + } + + + /** + * This version of getParentINForChildIN does the same thing as the version + * above, but also adds a "trackingList" param. If trackingList is not + * null, the LSNs of the parents visited along the way are added to the + * list, as a debug tracing mechanism. This is meant to stay in production, + * to add information to the log. + */ + public SearchResult getParentINForChildIN( + IN child, + boolean useTargetLevel, + boolean doFetch, + CacheMode cacheMode, + List trackingList) + throws DatabaseException { + + /* Sanity checks */ + if (child == null) { + throw EnvironmentFailureException.unexpectedState( + "getParentINForChildIN given null child node"); + } + + assert child.isLatchOwner(); + + /* + * Get information from child before releasing latch. + */ + long targetId = child.getNodeId(); + byte[] targetKey = child.getIdentifierKey(); + int targetLevel = (useTargetLevel ? child.getLevel() : -1); + int exclusiveLevel = child.getLevel() + 1; + boolean requireExactMatch = true; + + child.releaseLatch(); + + return getParentINForChildIN( + targetId, targetKey, targetLevel, + exclusiveLevel, requireExactMatch, doFetch, + cacheMode, trackingList); + } + + /** + * This version of getParentINForChildIN() is the actual implementation + * of the previous 2 versions (read the comments there), but it also + * implements one additional use cases via the extra "requireExactMatch" + * param. + * + * requireExactMatch == false && doFetch == false + * In this case we are actually looking for the lowest cached ancestor + * of the C node. The method will always return a node (considered as the + * "parent") unless the BTree is empty (has no nodes at all). The returned + * node must be latched, but not necessarily in EX mode. This search mode + * is used by the checkpointer. + * + * The exclusiveLevel param: + * In general, if exclusiveLevel == L, nodes above L will be SH latched and + * nodes at or below L will be EX-latched. In all current use cases, L is + * set to 1 + C.level. Note that if doFetch == false, the normalized + * exclusiveLevel must be >= 2 so that loadIN can be called. + */ + public SearchResult getParentINForChildIN( + long targetNodeId, + byte[] targetKey, + int targetLevel, + int exclusiveLevel, + boolean requireExactMatch, + boolean doFetch, + CacheMode cacheMode, + List trackingList) + throws DatabaseException { + + /* Call hook before latching. No latches are held. */ + TestHookExecute.doHookIfSet(getParentINHook); + + assert doFetch || (exclusiveLevel & IN.LEVEL_MASK) >= 2; + + /* + * SearchResult is initialized as follows: + * exactParentFound = false; + * parent = null; index = -1; childNotResident = false; + */ + SearchResult result = new SearchResult(); + + /* Get the tree root, SH-latched. */ + IN rootIN = getRootIN(cacheMode); + + if (rootIN == null) { + return result; + } + + /* If the root is the target node, there is no parent */ + assert(rootIN.getNodeId() != targetNodeId); + assert(rootIN.getLevel() >= exclusiveLevel) : + " rootLevel=" + rootIN.getLevel() + + " exLevel=" + exclusiveLevel; + + IN parent = rootIN; + IN child = null; + boolean success = false; + + try { + + if (rootIN.getLevel() <= exclusiveLevel) { + rootIN.releaseLatch(); + rootIN = getRootINLatchedExclusive(cacheMode); + assert(rootIN != null); + parent = rootIN; + } + + while (true) { + + assert(parent.getNEntries() > 0); + + result.index = parent.findEntry(targetKey, false, false); + + if (trackingList != null) { + trackingList.add(new TrackingInfo( + parent.getLsn(result.index), parent.getNodeId(), + parent.getNEntries(), result.index)); + } + + assert TestHookExecute.doHookIfSet(searchHook); + + if (targetLevel > 0 && parent.getLevel() == targetLevel + 1) { + result.exactParentFound = true; + result.parent = parent; + break; + } + + if (doFetch) { + child = parent.fetchINWithNoLatch( + result, targetKey, cacheMode); + + if (child == null) { + if (trackingList != null) { + trackingList.clear(); + } + result.reset(); + + TestHookExecute.doHookIfSet(fetchINHook, child); + + rootIN = getRootIN(cacheMode); + assert(rootIN != null); + + if (rootIN.getLevel() <= exclusiveLevel) { + rootIN.releaseLatch(); + rootIN = getRootINLatchedExclusive(cacheMode); + assert(rootIN != null); + } + + parent = rootIN; + continue; + } + } else { + + /* + * We can only call loadIN if we have an EX-latch on the + * parent. However, calling loadIN is only necessary when + * the parent is at level 2, since UINs are not cached + * off-heap, and exclusiveLevel is currently always >= 2. + */ + if (parent.getNormalizedLevel() == 2) { + child = parent.loadIN(result.index, cacheMode); + } else { + child = (IN) parent.getTarget(result.index); + } + } + + assert(child != null || !doFetch); + + if (child == null) { + if (requireExactMatch) { + parent.releaseLatch(); + } else { + result.parent = parent; + } + result.childNotResident = true; + break; + } + + if (child.getNodeId() == targetNodeId) { + result.exactParentFound = true; + result.parent = parent; + break; + } + + if (child.isBIN()) { + if (requireExactMatch) { + parent.releaseLatch(); + } else { + result.parent = parent; + } + break; + } + + /* We can search further down the tree. */ + if (child.getLevel() <= exclusiveLevel) { + latchChild(parent, child, cacheMode); + } else { + latchChildShared(parent, child, cacheMode); + } + + parent.releaseLatch(); + parent = child; + child = null; + } + + success = true; + + } finally { + + if (!success) { + if (parent.isLatchOwner()) { + parent.releaseLatch(); + } + + if (child != null && child.isLatchOwner()) { + child.releaseLatch(); + } + } + } + + if (result.parent != null) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(1); + } + assert((!doFetch && !requireExactMatch) || + result.parent.isLatchOwner()); + } + + return result; + } + + /** + * Return a reference to the parent of this LN. This searches through the + * tree and allows splits, if the splitsAllowed param is true. Set the + * tree location to the proper BIN parent whether or not the LN child is + * found. That's because if the LN is not found, recovery or abort will + * need to place it within the tree, and so we must point at the + * appropriate position. + * + *

        When this method returns with location.bin non-null, the BIN is + * latched and must be unlatched by the caller. Note that location.bin may + * be non-null even if this method returns false.

        + * + * @param location a holder class to hold state about the location + * of our search. Sort of an internal cursor. + * + * @param key key to navigate through main key + * + * @param splitsAllowed true if this method is allowed to cause tree splits + * as a side effect. In practice, recovery can cause splits, but abort + * can't. + * + * @param blindDeltaOps Normally, if this method lands on a BIN-delta and + * the search key is not in that delta, it will mutate the delta to a full + * BIN to make sure whether the search key exists in the tree or not. + * However, by passing true for blindDeltaOps, the caller indicates that + * it doesn't really care whether the key is in the tree or not: it is + * going to insert the key in the BIN-delta, if not already there, + * essentially overwritting the slot that may exist in the full BIN. So, + * if blindDeltaOps is true, the method will not mutate a BIN-delta parent + * (unless the BIN-delta has no space for a slot insertion). + * + * @param cacheMode The CacheMode for affecting the hotness of the tree. + * + * @return true if node found in tree. + * If false is returned and there is the possibility that we can insert + * the record into a plausible parent we must also set + * - location.bin (may be null if no possible parent found) + * - location.lnKey (don't need to set if no possible parent). + */ + public boolean getParentBINForChildLN( + TreeLocation location, + byte[] key, + boolean splitsAllowed, + boolean blindDeltaOps, + CacheMode cacheMode) + throws DatabaseException { + + /* + * Find the BIN that either points to this LN or could be its + * ancestor. + */ + location.reset(); + BIN bin; + int index; + + if (splitsAllowed) { + bin = searchSplitsAllowed(key, cacheMode, null /*comparator*/); + } else { + bin = search(key, cacheMode); + } + + if (bin == null) { + return false; + } + + try { + while (true) { + + location.bin = bin; + + index = bin.findEntry( + key, true /*indicateIfExact*/, false /*exactSearch*/); + + boolean match = (index >= 0 && + (index & IN.EXACT_MATCH) != 0); + + index &= ~IN.EXACT_MATCH; + location.index = index; + location.lnKey = key; + + /* + if (!match && bin.isBINDelta() && blindDeltaOps) { + System.out.println( + "Blind op on BIN-delta : " + bin.getNodeId() + + " nEntries = " + + bin.getNEntries() + + " max entries = " + + bin.getMaxEntries() + + " full BIN entries = " + + bin.getFullBinNEntries() + + " full BIN max entries = " + + bin.getFullBinMaxEntries()); + } + */ + + if (match) { + location.childLsn = bin.getLsn(index); + location.childLoggedSize = bin.getLastLoggedSize(index); + location.isKD = bin.isEntryKnownDeleted(index); + location.isEmbedded = bin.isEmbeddedLN(index); + + return true; + + } else { + + if (bin.isBINDelta() && + (!blindDeltaOps || + bin.getNEntries() >= bin.getMaxEntries())) { + + bin.mutateToFullBIN(splitsAllowed /*leaveFreeSlot*/); + location.reset(); + continue; + } + + return false; + } + } + + } catch (RuntimeException e) { + bin.releaseLatch(); + location.bin = null; + throw e; + } + } + + /** + * Find the BIN that is relevant to the insert. If the tree doesn't exist + * yet, then create the first IN and BIN. On return, the cursor is set to + * the BIN that is found or created, and the BIN is latched. + */ + public BIN findBinForInsert(final byte[] key, final CacheMode cacheMode) { + + boolean rootLatchIsHeld = false; + BIN bin = null; + + try { + long logLsn; + + /* + * We may have to try several times because of a small + * timing window, explained below. + */ + while (true) { + + rootLatchIsHeld = true; + rootLatch.acquireShared(); + + if (!rootExists()) { + + rootLatch.release(); + rootLatch.acquireExclusive(); + if (rootExists()) { + rootLatch.release(); + rootLatchIsHeld = false; + continue; + } + + final EnvironmentImpl env = database.getEnv(); + final INList inMemoryINs = env.getInMemoryINs(); + + /* + * This is an empty tree, either because it's brand new + * tree or because everything in it was deleted. Create an + * IN and a BIN. We could latch the rootIN here, but + * there's no reason to since we're just creating the + * initial tree and we have the rootLatch held. Remember + * that referred-to children must be logged before any + * references to their LSNs. + */ + IN rootIN = + new IN(database, key, maxTreeEntriesPerNode, 2); + rootIN.setIsRoot(true); + + rootIN.latch(cacheMode); + + /* First BIN in the tree, log provisionally right away. */ + bin = new BIN(database, key, maxTreeEntriesPerNode, 1); + bin.latch(cacheMode); + logLsn = bin.optionalLogProvisionalNoCompress(rootIN); + + /* + * Log the root right away. Leave the root dirty, because + * the MapLN is not being updated, and we want to avoid + * this scenario from [#13897], where the LN has no + * possible parent. + * provisional BIN + * root IN + * checkpoint start + * LN is logged + * checkpoint end + * BIN is dirtied, but is not part of checkpoint + */ + boolean insertOk = rootIN.insertEntry(bin, key, logLsn); + assert insertOk; + + logLsn = rootIN.optionalLog(); + rootIN.setDirty(true); /*force re-logging, see [#13897]*/ + + root = makeRootChildReference(rootIN, new byte[0], logLsn); + + rootIN.releaseLatch(); + + /* Add the new nodes to the in memory list. */ + inMemoryINs.add(bin); + inMemoryINs.add(rootIN); + env.getEvictor().addBack(bin); + + rootLatch.release(); + rootLatchIsHeld = false; + + break; + } else { + rootLatch.release(); + rootLatchIsHeld = false; + + /* + * There's a tree here, so search for where we should + * insert. However, note that a window exists after we + * release the root latch. We release the latch because the + * search method expects to take the latch. After the + * release and before search, the INCompressor may come in + * and delete the entire tree, so search may return with a + * null. + */ + bin = searchSplitsAllowed(key, cacheMode); + + if (bin == null) { + /* The tree was deleted by the INCompressor. */ + continue; + } else { + /* search() found a BIN where this key belongs. */ + break; + } + } + } + } finally { + if (rootLatchIsHeld) { + rootLatch.release(); + } + } + + /* testing hook to insert item into log. */ + assert TestHookExecute.doHookIfSet(ckptHook); + + return bin; + } + + /** + * Do a key based search, permitting pre-emptive splits. Returns the + * target node's parent. + */ + public BIN searchSplitsAllowed(byte[] key, CacheMode cacheMode) { + + return searchSplitsAllowed(key, cacheMode, null); + } + + + private BIN searchSplitsAllowed( + byte[] key, + CacheMode cacheMode, + Comparator comparator) { + + BIN insertTarget = null; + + while (insertTarget == null) { + + rootLatch.acquireShared(); + + boolean rootLatched = true; + boolean rootINLatched = false; + boolean success = false; + IN rootIN = null; + + /* + * Latch the rootIN, check if it needs splitting. If so split it + * and update the associated MapLN. To update the MapLN, we must + * lock it, which implies that all latches must be released prior + * to the lock, and as a result, the root may require splitting + * again or may be split by another thread. So we must restart + * the loop to get the latest root. + */ + try { + if (!rootExists()) { + return null; + } + + rootIN = (IN) root.fetchTarget(database, null); + + if (rootIN.needsSplitting()) { + + rootLatch.release(); + rootLatch.acquireExclusive(); + + if (!rootExists()) { + return null; + } + + rootIN = (IN) root.fetchTarget(database, null); + + if (rootIN.needsSplitting()) { + + splitRoot(cacheMode); + + rootLatch.release(); + rootLatched = false; + + EnvironmentImpl env = database.getEnv(); + env.getDbTree().optionalModifyDbRoot(database); + + continue; + } + } + + rootIN.latchShared(cacheMode); + rootINLatched = true; + success = true; + + } finally { + if (!success && rootINLatched) { + rootIN.releaseLatch(); + } + if (rootLatched) { + rootLatch.release(); + } + } + + /* + * Now, search the tree, doing splits if required. The rootIN + * is latched in SH mode, but this.root is not latched. If any + * splits are needed, this.root will first be latched exclusivelly + * and will stay latched until all splits are done. + */ + try { + assert(rootINLatched); + + insertTarget = searchSplitsAllowed( + rootIN, key, cacheMode, comparator); + + if (insertTarget == null) { + if (LatchSupport.TRACK_LATCHES) { + LatchSupport.expectBtreeLatchesHeld(0); + } + relatchesRequired.increment(); + database.getEnv().incRelatchesRequired(); + } + } catch (SplitRequiredException e) { + + /* + * The last slot in the root was used at the point when this + * thread released the rootIN latch in order to force splits. + * Retry. SR [#11147]. + */ + continue; + } + } + + return insertTarget; + } + + /** + * Search the tree, permitting preemptive splits. + * + * When this returns, parent will be unlatched unless parent is the + * returned IN. + */ + private BIN searchSplitsAllowed( + IN rootIN, + byte[] key, + CacheMode cacheMode, + Comparator comparator) + throws SplitRequiredException { + + assert(rootIN.isLatchOwner()); + if (!rootIN.isRoot()) { + throw EnvironmentFailureException.unexpectedState( + "A null or non-root IN was given as the parent"); + } + + int index; + IN parent = rootIN; + IN child = null; + boolean success = false; + + /* + * Search downward until we hit a node that needs a split. In that + * case, retreat to the top of the tree and force splits downward. + */ + try { + do { + if (parent.getNEntries() == 0) { + throw EnvironmentFailureException.unexpectedState( + "Found upper IN with 0 entries"); + } + + index = parent.findEntry(key, false, false, comparator); + assert index >= 0; + + child = parent.fetchINWithNoLatch(index, key, cacheMode); + + if (child == null) { + return null; // restart the search + } + + /* if child is a BIN, it is actually EX-latched */ + latchChildShared(parent, child, cacheMode); + + /* + * If we need to split, try compressing first and check again. + * Mutate to a full BIN because compression has no impact on a + * BIN-delta, and a full BIN is needed for splitting anyway. + */ + if (child.needsSplitting()) { + + child.mutateToFullBIN(false /*leaveFreeSlot*/); + + database.getEnv().lazyCompress( + child, true /*compressDirtySlots*/); + + if (child.needsSplitting()) { + + child.releaseLatch(); + parent.releaseLatch(); + + /* SR [#11144]*/ + assert TestHookExecute.doHookIfSet(waitHook); + + /* + * forceSplit may throw SplitRequiredException if it + * finds that the root needs splitting. Allow the + * exception to propagate up to the caller, who will + * do the root split. Otherwise, restart the search + * from the root IN again. + */ + rootIN = forceSplit(key, cacheMode); + parent = rootIN; + + assert(rootIN.isLatchOwner()); + if (!rootIN.isRoot()) { + throw EnvironmentFailureException.unexpectedState( + "A null or non-root IN was given as the parent"); + } + continue; + } + } + + /* Continue down a level */ + parent.releaseLatch(); + parent = child; + child = null; + + } while (!parent.isBIN()); + + success = true; + return (BIN)parent; + + } finally { + if (!success) { + if (child != null && child.isLatchOwner()) { + child.releaseLatch(); + } + if (parent != child && parent.isLatchOwner()) { + parent.releaseLatch(); + } + } + } + } + + /** + * Do pre-emptive splitting: search down the tree until we get to the BIN + * level, and split any nodes that fit the splittable requirement except + * for the root. If the root needs splitting, a splitRequiredException + * is thrown and the root split is handled at a higher level. + * + * Note that more than one node in the path may be splittable. For example, + * a tree might have a level2 IN and a BIN that are both splittable, and + * would be encountered by the same insert operation. + * + * Splits cause INs to be logged in all ancestors, including the root. This + * is to avoid the "great aunt" problem described in LevelRecorder. + * + * INs below the root are logged provisionally; only the root is logged + * non-provisionally. Provisional logging is necessary during a checkpoint + * for levels less than maxFlushLevel. + * + * This method acquires and holds this.rootLatch in EX mode during its + * whole duration (so splits are serialized). The rootLatch is released + * on return. + * + * @return the tree root node, latched in EX mode. This may be different + * than the tree root when this method was called, because no latches are + * held on entering this method. + * + * All latches are released in case of exception. + */ + private IN forceSplit(byte[] key, CacheMode cacheMode) + throws DatabaseException, SplitRequiredException { + + final ArrayList nodeLadder = new ArrayList(); + + boolean allLeftSideDescent = true; + boolean allRightSideDescent = true; + int index; + IN parent; + IN child = null; + IN rootIN = null; + + /* + * Latch the root in order to update the root LSN when we're done. + * Latch order must be: root, root IN. We'll leave this method with the + * original parent latched. + */ + rootLatch.acquireExclusive(); + + boolean success = false; + try { + /* The root IN may have been evicted. [#16173] */ + rootIN = (IN) root.fetchTarget(database, null); + parent = rootIN; + parent.latch(cacheMode); + + /* + * Another thread may have crept in and + * - used the last free slot in the parent, making it impossible + * to correctly propagate the split. + * - actually split the root, in which case we may be looking at + * the wrong subtree for this search. + * If so, throw and retry from above. SR [#11144] + */ + if (rootIN.needsSplitting()) { + throw splitRequiredException; + } + + /* + * Search downward to the BIN level, saving the information + * needed to do a split if necessary. + */ + do { + if (parent.getNEntries() == 0) { + throw EnvironmentFailureException.unexpectedState( + "Found upper IN with 0 entries"); + } + + /* Look for the entry matching key in the current node. */ + index = parent.findEntry(key, false, false); + assert index >= 0; + if (index != 0) { + allLeftSideDescent = false; + } + if (index != (parent.getNEntries() - 1)) { + allRightSideDescent = false; + } + + /* + * Get the child node that matches. We only need to work on + * nodes in residence. + */ + child = parent.loadIN(index, cacheMode); + + if (child == null) { + break; + } + + latchChild(parent, child, cacheMode); + + nodeLadder.add(new SplitInfo(parent, child, index)); + + /* Continue down a level */ + parent = child; + } while (!parent.isBIN()); + + boolean startedSplits = false; + + /* + * Process the accumulated nodes from the bottom up. Split each + * node if required. If the node should not split, we check if + * there have been any splits on the ladder yet. If there are none, + * we merely release the node, since there is no update. If splits + * have started, we need to propagate new LSNs upward, so we log + * the node and update its parent. + */ + long lastParentForSplit = Node.NULL_NODE_ID; + + for (int i = nodeLadder.size() - 1; i >= 0; i -= 1) { + final SplitInfo info = nodeLadder.get(i); + + child = info.child; + parent = info.parent; + index = info.index; + + /* Opportunistically split the node if it is full. */ + if (child.needsSplitting()) { + + child.mutateToFullBIN(false /*leaveFreeSlot*/); + + final IN grandParent = + (i > 0) ? nodeLadder.get(i - 1).parent : null; + + if (allLeftSideDescent || allRightSideDescent) { + child.splitSpecial( + parent, index, grandParent, maxTreeEntriesPerNode, + key, allLeftSideDescent); + } else { + child.split( + parent, index, grandParent, maxTreeEntriesPerNode); + } + + lastParentForSplit = parent.getNodeId(); + startedSplits = true; + + /* + * If the DB root IN was logged, update the DB tree's child + * reference. Now the MapLN is logically dirty. Be sure to + * flush the MapLN if we ever evict the root. + */ + if (parent.isRoot()) { + root.updateLsnAfterOptionalLog( + database, parent.getLastLoggedLsn()); + } + } else { + if (startedSplits) { + final long newChildLsn; + + /* + * If this child was the parent of a split, it's + * already logged by the split call. We just need to + * propagate the logging upwards. If this child is just + * a link in the chain upwards, log it. + */ + if (lastParentForSplit == child.getNodeId()) { + newChildLsn = child.getLastLoggedLsn(); + } else { + newChildLsn = child.optionalLogProvisional(parent); + } + + parent.updateEntry( + index, newChildLsn, VLSN.NULL_VLSN_SEQUENCE, + 0/*lastLoggedSize*/); + + /* + * The root is never a 'child' in nodeLadder so it must + * be logged separately. + */ + if (parent.isRoot()) { + + final long newRootLsn = parent.optionalLog(); + + root.updateLsnAfterOptionalLog( + database, newRootLsn); + } + } + } + child.releaseLatch(); + child = null; + } + success = true; + } finally { + if (!success) { + if (child != null) { + child.releaseLatchIfOwner(); + } + + for (SplitInfo info : nodeLadder) { + info.child.releaseLatchIfOwner(); + } + + if (rootIN != null) { + rootIN.releaseLatchIfOwner(); + } + } + + rootLatch.release(); + } + + return rootIN; + } + + /** + * Split the root of the tree. + */ + private void splitRoot(CacheMode cacheMode) + throws DatabaseException { + + /* + * Create a new root IN, insert the current root IN into it, and then + * call split. + */ + EnvironmentImpl env = database.getEnv(); + INList inMemoryINs = env.getInMemoryINs(); + + IN curRoot = null; + curRoot = (IN) root.fetchTarget(database, null); + curRoot.latch(cacheMode); + long curRootLsn = 0; + long logLsn = 0; + IN newRoot = null; + try { + + /* + * Make a new root IN, giving it an id key from the previous root. + */ + byte[] rootIdKey = curRoot.getKey(0); + newRoot = new IN(database, rootIdKey, maxTreeEntriesPerNode, + curRoot.getLevel() + 1); + newRoot.latch(cacheMode); + newRoot.setIsRoot(true); + curRoot.setIsRoot(false); + + /* + * Make the new root IN point to the old root IN. Log the old root + * provisionally, because we modified it so it's not the root + * anymore, then log the new root. We are guaranteed to be able to + * insert entries, since we just made this root. + */ + boolean logSuccess = false; + try { + curRootLsn = curRoot.optionalLogProvisional(newRoot); + + boolean inserted = newRoot.insertEntry( + curRoot, rootIdKey, curRootLsn); + assert inserted; + + logLsn = newRoot.optionalLog(); + logSuccess = true; + } finally { + if (!logSuccess) { + /* Something went wrong when we tried to log. */ + curRoot.setIsRoot(true); + } + } + + inMemoryINs.add(newRoot); + + /* + * Don't add the new root into the LRU because it has a cached + * child. + */ + + /* + * Make the tree's root reference point to this new node. Now the + * MapLN is logically dirty, but the change hasn't been logged. Be + * sure to flush the MapLN if we ever evict the root. + */ + root.setTarget(newRoot); + root.updateLsnAfterOptionalLog(database, logLsn); + curRoot.split(newRoot, 0, null, maxTreeEntriesPerNode); + root.setLsn(newRoot.getLastLoggedLsn()); + + } finally { + /* FindBugs ignore possible null pointer dereference of newRoot. */ + newRoot.releaseLatch(); + curRoot.releaseLatch(); + } + rootSplits.increment(); + traceSplitRoot(Level.FINE, TRACE_ROOT_SPLIT, newRoot, logLsn, + curRoot, curRootLsn); + } + + public BIN search(byte[] key, CacheMode cacheMode) { + + return search(key, SearchType.NORMAL, null, cacheMode, null); + } + + /** + * Search the tree, starting at the root. Depending on search type either + * (a) search for the BIN that *should* contain a given key, or (b) return + * the right-most or left-most BIN in the tree. + * + * Preemptive splitting is not done during the search. + * + * @param key - the key to search for, or null if searchType is LEFT or + * RIGHT. + * + * @param searchType - The type of tree search to perform. NORMAL means + * we're searching for key in the tree. LEFT/RIGHT means we're descending + * down the left or right side, resp. + * + * @param binBoundary - If non-null, information is returned about whether + * the BIN found is the first or last BIN in the database. + * + * @return - the BIN that matches the criteria, if any. Returns null if + * the root is null. BIN is latched (unless it's null) and must be + * unlatched by the caller. In a NORMAL search, it is the caller's + * responsibility to do the findEntry() call on the key and BIN to locate + * the entry (if any) that matches key. + */ + public BIN search( + byte[] key, + SearchType searchType, + BINBoundary binBoundary, + CacheMode cacheMode, + Comparator comparator) { + + IN rootIN = getRootIN(cacheMode); + + if (rootIN == null) { + return null; + } + + assert ((searchType != SearchType.LEFT && + searchType != SearchType.RIGHT) || key == null); + + if (binBoundary != null) { + binBoundary.isLastBin = true; + binBoundary.isFirstBin = true; + } + + boolean success = false; + int index; + IN parent = rootIN; + IN child = null; + + TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + + try { + if (treeStatsAccumulator != null) { + parent.accumulateStats(treeStatsAccumulator); + } + + do { + if (parent.getNEntries() == 0) { + throw EnvironmentFailureException.unexpectedState( + "Upper IN with 0 entries"); + } + + if (searchType == SearchType.NORMAL) { + index = parent.findEntry(key, false, false, comparator); + + } else if (searchType == SearchType.LEFT) { + index = 0; + + } else if (searchType == SearchType.RIGHT) { + index = parent.getNEntries() - 1; + + } else { + throw EnvironmentFailureException.unexpectedState( + "Invalid value of searchType: " + searchType); + } + + assert(index >= 0); + + if (binBoundary != null) { + if (index != parent.getNEntries() - 1) { + binBoundary.isLastBin = false; + } + if (index != 0) { + binBoundary.isFirstBin = false; + } + } + + child = parent.fetchINWithNoLatch(index, key, cacheMode); + + if (child == null) { + parent = getRootIN(cacheMode); + assert(parent != null); + if (treeStatsAccumulator != null) { + parent.accumulateStats(treeStatsAccumulator); + } + continue; + } + + /* Latch the child. Note: BINs are always latched exclusive. */ + latchChildShared(parent, child, cacheMode); + + if (treeStatsAccumulator != null) { + child.accumulateStats(treeStatsAccumulator); + } + + parent.releaseLatch(); + parent = child; + child = null; + + } while (!parent.isBIN()); + + success = true; + return (BIN)parent; + + } finally { + if (!success) { + + /* + * In [#14903] we encountered a latch exception below and the + * original exception was lost. Print the stack trace and + * allow the original exception to be thrown if this happens + * again, to get more information about the problem. + */ + try { + if (child != null && child.isLatchOwner()) { + child.releaseLatch(); + } + + if (parent != child && parent.isLatchOwner()) { + parent.releaseLatch(); + } + } catch (Exception e) { + LoggerUtils.traceAndLogException( + database.getEnv(), "Tree", "searchSubTreeInternal", "", + e); + } + } + } + } + + /* + * Search for the given key in the subtree rooted at the given parent IN. + * The search descends until the given target level, and the IN that + * contains or covers the key is returned latched in EX or SH mode as + * specified by the latchShared param. + * + * The method uses grandparent latching, but only if the parent is the + * root of the whole Btree and it is SH-latched on entry. + */ + private IN searchSubTree( + IN parent, + byte[] key, + SearchType searchType, + int targetLevel, + boolean latchShared, + CacheMode cacheMode, + Comparator comparator) { + + /* + * If a an intermediate IN (e.g., from getNextIN) was + * originally passed, it was latched exclusively. + */ + assert(parent != null && + (parent.isRoot() || + parent.isLatchExclusiveOwner())); + + if ((searchType == SearchType.LEFT || + searchType == SearchType.RIGHT) && + key != null) { + + /* + * If caller is asking for a right or left search, they shouldn't + * be passing us a key. + */ + throw EnvironmentFailureException.unexpectedState( + "searchSubTree passed key and left/right search"); + } + + assert(parent.isUpperIN()); + assert(parent.isLatchOwner()); + + boolean success = false; + int index; + IN subtreeRoot = parent; + IN child = null; + IN grandParent = null; + boolean childIsLatched = false; + boolean grandParentIsLatched = false; + boolean doGrandparentLatching = !parent.isLatchExclusiveOwner(); + + TreeWalkerStatsAccumulator treeStatsAccumulator = + getTreeStatsAccumulator(); + + try { + do { + if (treeStatsAccumulator != null) { + parent.accumulateStats(treeStatsAccumulator); + } + + assert(parent.getNEntries() > 0); + + if (searchType == SearchType.NORMAL) { + /* Look for the entry matching key in the current node. */ + index = parent.findEntry(key, false, false, comparator); + } else if (searchType == SearchType.LEFT) { + /* Left search, always take the 0th entry. */ + index = 0; + } else if (searchType == SearchType.RIGHT) { + /* Right search, always take the highest entry. */ + index = parent.getNEntries() - 1; + } else { + throw EnvironmentFailureException.unexpectedState( + "Invalid value of searchType: " + searchType); + } + + assert(index >= 0); + + /* + * Get the child IN. + * + * If the child is not cached and we are usimg grandparent + * latching, then: + * + * (a) If "parent" is not the subtree root, is is always + * SH-latched at this point. So, to fetch the child, we need to + * unlatch the parent and relatch it exclusively. Because we + * have the grandparent latch (in either SH or EX mode), the + * parent will not be evicted or detached from the tree and the + * index of the child within the parent won't change. After + * the parent is EX-latched, we can release the grandparent so + *. it won't be held while reading the child from the log. + * + * (b) If "parent" is the BTree root, it may be SH-latched. In + * this case, since there is no grandparent, we must unlatch + * the parent and relatch it in EX mode under the protection + * of the rootLatch; then we restart the do-loop. + * + * (c) If "parent" is the subtree root, but not the root of + * the full Btree, then it must be EX-latched already, and + * we can just fetch the child. + */ + child = (IN) parent.getTarget(index); + + if (child == null && doGrandparentLatching) { + + if (parent != subtreeRoot) { + + assert(!parent.isLatchExclusiveOwner()); + parent.releaseLatch(); + parent.latch(cacheMode); + grandParent.releaseLatch(); + grandParentIsLatched = false; + grandParent = null; + doGrandparentLatching = false; + + } else if (parent.isRoot() && + !parent.isLatchExclusiveOwner()) { + + parent.releaseLatch(); + subtreeRoot = getRootINLatchedExclusive(cacheMode); + parent = subtreeRoot; + assert(parent != null); + assert(grandParent == null); + doGrandparentLatching = false; + + continue; + } + + child = parent.fetchIN(index, cacheMode); + + } else if (child == null) { + + child = parent.fetchIN(index, CacheMode.UNCHANGED); + } + + /* After fetching the child we can release the grandparent. */ + if (grandParent != null) { + grandParent.releaseLatch(); + grandParentIsLatched = false; + } + + /* Latch the child. Note: BINs are always latched exclusive. */ + if (child.getLevel() == targetLevel) { + if (latchShared) { + child.latchShared(cacheMode); + } else { + child.latch(cacheMode); + } + } + else if (doGrandparentLatching) { + } else { + latchChild(parent, child, cacheMode); + } + childIsLatched = true; + + child.mutateToFullBIN(false /*leaveFreeSlot*/); + + if (treeStatsAccumulator != null) { + child.accumulateStats(treeStatsAccumulator); + } + + /* Continue down a level */ + if (doGrandparentLatching) { + grandParent = parent; + grandParentIsLatched = true; + } else { + parent.releaseLatch(); + } + + parent = child; + + } while (!parent.isBIN() && parent.getLevel() != targetLevel); + + success = true; + return child; + + } finally { + if (!success) { + + /* + * In [#14903] we encountered a latch exception below and the + * original exception was lost. Print the stack trace and + * allow the original exception to be thrown if this happens + * again, to get more information about the problem. + */ + try { + if (child != null && childIsLatched) { + child.releaseLatch(); + } + + if (parent != child) { + parent.releaseLatch(); + } + } catch (Exception e) { + LoggerUtils.traceAndLogException( + database.getEnv(), "Tree", "searchSubTreeInternal", "", + e); + } + } + + if (grandParent != null && grandParentIsLatched) { + grandParent.releaseLatch(); + } + } + } + + /** + * rebuildINList is used by recovery to add all the resident nodes to the + * IN list. + */ + public void rebuildINList() + throws DatabaseException { + + INList inMemoryList = database.getEnv().getInMemoryINs(); + + if (root != null) { + rootLatch.acquireShared(); + try { + Node rootIN = root.getTarget(); + if (rootIN != null) { + rootIN.rebuildINList(inMemoryList); + } + } finally { + rootLatch.release(); + } + } + } + + /** + * Debugging check that all resident nodes are on the INList and no stray + * nodes are present in the unused portion of the IN arrays. + */ + public void validateINList(IN parent) + throws DatabaseException { + + if (parent == null) { + parent = (IN) root.getTarget(); + } + + if (parent != null) { + INList inList = database.getEnv().getInMemoryINs(); + + if (!inList.contains(parent)) { + throw EnvironmentFailureException.unexpectedState( + "IN " + parent.getNodeId() + " missing from INList"); + } + + for (int i = 0;; i += 1) { + try { + Node node = parent.getTarget(i); + + if (i >= parent.getNEntries()) { + if (node != null) { + throw EnvironmentFailureException.unexpectedState( + "IN " + parent.getNodeId() + + " has stray node " + node + + " at index " + i); + } + byte[] key = parent.getKey(i); + if (key != null) { + throw EnvironmentFailureException.unexpectedState( + "IN " + parent.getNodeId() + + " has stray key " + key + + " at index " + i); + } + } + + if (node instanceof IN) { + validateINList((IN) node); + } + } catch (ArrayIndexOutOfBoundsException e) { + break; + } + } + } + } + + /* + * Logging support + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + int size = 1; // rootExists + if (root != null) { + size += root.getLogSize(); + } + return size; + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + byte booleans = (byte) ((root != null) ? 1 : 0); + logBuffer.put(booleans); + if (root != null) { + root.writeToLog(logBuffer); + } + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean rootExists = false; + byte booleans = itemBuffer.get(); + rootExists = (booleans & 1) != 0; + if (rootExists) { + root = makeRootChildReference(); + root.readFromLog(itemBuffer, entryVersion); + } + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + if (root != null) { + root.dumpLog(sb, verbose); + } + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } + + /** + * @return the TreeStats for this tree. + */ + int getTreeStats() { + return rootSplits.get(); + } + + private TreeWalkerStatsAccumulator getTreeStatsAccumulator() { + if (EnvironmentImpl.getThreadLocalReferenceCount() > 0) { + return treeStatsAccumulatorTL.get(); + } else { + return null; + } + } + + public void setTreeStatsAccumulator(TreeWalkerStatsAccumulator tSA) { + treeStatsAccumulatorTL.set(tSA); + } + + public void loadStats(StatsConfig config, BtreeStats btreeStats) { + /* Add the tree statistics to BtreeStats. */ + btreeStats.setTreeStats(stats.cloneGroup(false)); + + if (config.getClear()) { + relatchesRequired.clear(); + rootSplits.clear(); + } + } + + /* + * Debugging stuff. + */ + public void dump() { + System.out.println(dumpString(0)); + } + + public String dumpString(int nSpaces) { + StringBuilder sb = new StringBuilder(); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + sb.append('\n'); + if (root != null) { + sb.append(DbLsn.dumpString(root.getLsn(), nSpaces)); + sb.append('\n'); + IN rootIN = (IN) root.getTarget(); + if (rootIN == null) { + sb.append(""); + } else { + sb.append(rootIN.toString()); + } + sb.append('\n'); + } + sb.append(TreeUtils.indent(nSpaces)); + sb.append(""); + return sb.toString(); + } + + /** + * Unit test support to validate subtree pruning. Didn't want to make root + * access public. + */ + boolean validateDelete(int index) + throws DatabaseException { + + rootLatch.acquireShared(); + try { + IN rootIN = (IN) root.fetchTarget(database, null); + rootIN.latch(); + try { + return rootIN.validateSubtreeBeforeDelete(index); + } finally { + rootIN.releaseLatch(); + } + } finally { + rootLatch.release(); + } + } + + /* For unit testing only. */ + public void setWaitHook(TestHook hook) { + waitHook = hook; + } + + /* For unit testing only. */ + public void setSearchHook(TestHook hook) { + searchHook = hook; + } + + /* For unit testing only. */ + public void setCkptHook(TestHook hook) { + ckptHook = hook; + } + + /* For unit testing only. */ + public void setGetParentINHook(TestHook hook) { + getParentINHook = hook; + } + + public void setFetchINHook(TestHook hook) { + fetchINHook = hook; + } + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. + */ + private void traceSplitRoot(Level level, + String splitType, + IN newRoot, + long newRootLsn, + IN oldRoot, + long oldRootLsn) { + Logger logger = database.getEnv().getLogger(); + if (logger.isLoggable(level)) { + StringBuilder sb = new StringBuilder(); + sb.append(splitType); + sb.append(" newRoot=").append(newRoot.getNodeId()); + sb.append(" newRootLsn="). + append(DbLsn.getNoFormatString(newRootLsn)); + sb.append(" oldRoot=").append(oldRoot.getNodeId()); + sb.append(" oldRootLsn="). + append(DbLsn.getNoFormatString(oldRootLsn)); + LoggerUtils.logMsg( + logger, database.getEnv(), level, sb.toString()); + } + } + + private static class SplitInfo { + IN parent; + IN child; + int index; + + SplitInfo(IN parent, IN child, int index) { + this.parent = parent; + this.child = child; + this.index = index; + } + } +} diff --git a/src/com/sleepycat/je/tree/TreeLocation.java b/src/com/sleepycat/je/tree/TreeLocation.java new file mode 100644 index 0000000..795b498 --- /dev/null +++ b/src/com/sleepycat/je/tree/TreeLocation.java @@ -0,0 +1,65 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.utilint.DbLsn; + +/* + * TreeLocation is a cursor like object that keeps track of a location + * in a tree. It's used during recovery. + */ +public class TreeLocation { + + public BIN bin; // parent BIN for the target LN + public int index; // index of where the LN is or should go + public byte[] lnKey; // the key that represents this LN in this BIN + public long childLsn = DbLsn.NULL_LSN; // current LSN value in that slot. + public int childLoggedSize; + public boolean isKD = false; + public boolean isEmbedded = false; + + public void reset() { + bin = null; + index = -1; + lnKey = null; + childLsn = DbLsn.NULL_LSN; + childLoggedSize = 0; + isKD = false; + isEmbedded = false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(""); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/tree/TreeUtils.java b/src/com/sleepycat/je/tree/TreeUtils.java new file mode 100644 index 0000000..95d329d --- /dev/null +++ b/src/com/sleepycat/je/tree/TreeUtils.java @@ -0,0 +1,33 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Miscellaneous Tree utilities. + */ +public class TreeUtils { + + static private final String SPACES = + " " + + " " + + " " + + " "; + + /** + * For tree dumper. + */ + public static String indent(int nSpaces) { + return SPACES.substring(0, nSpaces); + } +} diff --git a/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java b/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java new file mode 100644 index 0000000..2551360 --- /dev/null +++ b/src/com/sleepycat/je/tree/TreeWalkerStatsAccumulator.java @@ -0,0 +1,27 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +/** + * Accumulates stats about a tree during tree walking. + */ +public interface TreeWalkerStatsAccumulator { + public void processIN(IN node, Long nid, int level); + + public void processBIN(BIN node, Long nid, int level); + + public void incrementLNCount(); + + public void incrementDeletedLNCount(); +} diff --git a/src/com/sleepycat/je/tree/VersionedLN.java b/src/com/sleepycat/je/tree/VersionedLN.java new file mode 100644 index 0000000..fb5975a --- /dev/null +++ b/src/com/sleepycat/je/tree/VersionedLN.java @@ -0,0 +1,72 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.SizeofMarker; +import com.sleepycat.je.utilint.VLSN; + +/** + * VersionedLN is used to provide an in-memory representation of an LN that + * makes its VLSN available through btree access. + * + * On disk, each log entry is composed of a header (je.log.LogEntryHeader) and + * a body (je.log.entry.LogEntry). When an LN is materialized in the Btree, it + * usually holds only the body, and does not have access to information in the + * log entry header, such as the VLSN. Since version based API operations need + * access to the VLSN, environments which are configured with + * je.rep.preserveRecordVersion=true instantiate VersionedLNs instead of LNs, + * in order to cache the VLSN with the LN, and make it cheaply available to + * Btree operations. + */ +public class VersionedLN extends LN { + + private long vlsnSequence = VLSN.NULL_VLSN_SEQUENCE; + + public VersionedLN() { + } + + VersionedLN(byte[] data) { + super(data); + } + + VersionedLN(DatabaseEntry dbt) { + super(dbt); + } + + /** For Sizeof. */ + public VersionedLN(SizeofMarker marker, DatabaseEntry dbt) { + super(dbt); + } + + @Override + public long getVLSNSequence() { + return vlsnSequence; + } + + @Override + public void setVLSNSequence(long seq) { + vlsnSequence = seq; + } + + /** + * Add additional size taken by this LN subclass. + */ + @Override + public long getMemorySizeIncludedByParent() { + return super.getMemorySizeIncludedByParent() + + MemoryBudget.VERSIONEDLN_OVERHEAD; + } +} diff --git a/src/com/sleepycat/je/tree/WithRootLatched.java b/src/com/sleepycat/je/tree/WithRootLatched.java new file mode 100644 index 0000000..71d0171 --- /dev/null +++ b/src/com/sleepycat/je/tree/WithRootLatched.java @@ -0,0 +1,25 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import com.sleepycat.je.DatabaseException; + +public interface WithRootLatched { + + /** + * doWork is called while the tree's root latch is held. + */ + public IN doWork(ChildReference root) + throws DatabaseException; +} diff --git a/src/com/sleepycat/je/tree/dupConvert/DBIN.java b/src/com/sleepycat/je/tree/dupConvert/DBIN.java new file mode 100644 index 0000000..3a153be --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/DBIN.java @@ -0,0 +1,160 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.TreeUtils; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * A DBIN represents an Duplicate Bottom Internal Node in the JE tree. + * + * Obsolete in log version 8, only used by DupConvert and some log readers. + */ +public final class DBIN extends BIN implements Loggable { + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + /** + * Full key for this set of duplicates. + */ + private byte[] dupKey; + + public DBIN() { + super(); + } + + /** + * For Sizeof, set all array fields to null, since they are not part of the + * fixed overhead. + */ + public DBIN(SizeofMarker marker) { + super(marker); + dupKey = null; + } + + @Override + public boolean isDBIN() { + return true; + } + + /** + * @return true if this node is a duplicate-bearing node type, false + * if otherwise. + */ + @Override + public boolean containsDuplicates() { + return true; + } + + @Override + protected long getFixedMemoryOverhead() { + return MemoryBudget.DBIN_FIXED_OVERHEAD; + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + /** + * For unit test support: + * @return a string that dumps information about this IN, without + */ + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(beginTag()); + sb.append('\n'); + + sb.append(TreeUtils.indent(nSpaces+2)); + sb.append(""); + sb.append(dupKey == null ? "" : Key.dumpString(dupKey, 0)); + sb.append(""); + sb.append('\n'); + + sb.append(super.dumpString(nSpaces, false)); + + sb.append(TreeUtils.indent(nSpaces)); + sb.append(endTag()); + return sb.toString(); + } + + /** + * @see IN#getLogType() + */ + @Override + public LogEntryType getLogType() { + return LogEntryType.LOG_DBIN; + } + + /* + * Logging support + */ + + /** + * @see Loggable#getLogSize + */ + @Override + public int getLogSize() { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see Loggable#writeToLog + */ + @Override + public void writeToLog(ByteBuffer logBuffer) { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see BIN#readFromLog + */ + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + super.readFromLog(itemBuffer, entryVersion); + dupKey = LogUtils.readByteArray(itemBuffer, (entryVersion < 6)); + } + + /** + * DBINS need to dump their dup key + */ + @Override + protected void dumpLogAdditional(StringBuilder sb) { + super.dumpLogAdditional(sb); + sb.append(Key.dumpString(dupKey, 0)); + } + + @Override + public String shortClassName() { + return "DBIN"; + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/DIN.java b/src/com/sleepycat/je/tree/dupConvert/DIN.java new file mode 100644 index 0000000..74dd16e --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/DIN.java @@ -0,0 +1,250 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.TreeUtils; +import com.sleepycat.je.utilint.SizeofMarker; + +/** + * An DIN represents an Duplicate Internal Node in the JE tree. + * + * Obsolete in log version 8, only used by DupConvert and some log readers. + */ +public final class DIN extends IN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + /** + * Full key for this set of duplicates. For example, if the tree + * contains k1/d1, k1/d2, k1/d3, the dupKey = k1. + */ + private byte[] dupKey; + + /** + * Reference to DupCountLN which stores the count. + */ + private ChildReference dupCountLNRef; + + /** + * Create an empty DIN, with no node ID, to be filled in from the log. + */ + public DIN() { + super(); + + dupCountLNRef = new ChildReference(); + init(null, Key.EMPTY_KEY, 0, 0); + } + + /** + * For Sizeof, set all array fields to null, since they are not part of the + * fixed overhead. + */ + public DIN(SizeofMarker marker) { + super(marker); + dupKey = null; + } + + @Override + public boolean isDIN() { + return true; + } + + public ChildReference getDupCountLNRef() { + return dupCountLNRef; + } + + /** + * @return true if this node is a duplicate-bearing node type, false + * if otherwise. + */ + @Override + public boolean containsDuplicates() { + return true; + } + + /** + * Count up the memory usage attributable to this node alone. LNs children + * are counted by their BIN/DIN parents, but INs are not counted by + * their parents because they are resident on the IN list. + */ + @Override + public long computeMemorySize() { + long size = super.computeMemorySize(); + if (dupCountLNRef != null) { + size += MemoryBudget.byteArraySize(dupCountLNRef.getKey().length); + if (dupCountLNRef.getTarget() != null) { + size += dupCountLNRef.getTarget(). + getMemorySizeIncludedByParent(); + } + } + return size; + } + + /* Utility method used during unit testing. */ + @Override + protected long printMemorySize() { + final long inTotal = super.printMemorySize(); + long dupKeySize = 0; + long dupLNSize = 0; + + if (dupCountLNRef != null) { + dupKeySize = MemoryBudget. + byteArraySize(dupCountLNRef.getKey().length); + if (dupCountLNRef.getTarget() != null) { + dupLNSize = + dupCountLNRef.getTarget().getMemorySizeIncludedByParent(); + } + } + + final long dupTotal = inTotal + dupKeySize + dupLNSize; + System.out.format("DIN: %d dkey: %d ln: %d %n", + dupTotal, dupKeySize, dupLNSize); + return dupTotal; + } + + @Override + protected long getFixedMemoryOverhead() { + return MemoryBudget.DIN_FIXED_OVERHEAD; + } + + /* + * Logging Support + */ + + /** + * @see IN#getLogType + */ + @Override + public LogEntryType getLogType() { + return LogEntryType.LOG_DIN; + } + + /** + * @see IN#getLogSize + */ + @Override + public int getLogSize() { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see IN#writeToLog + */ + @Override + public void writeToLog(ByteBuffer logBuffer) { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see IN#readFromLog + */ + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + boolean unpacked = (entryVersion < 6); + super.readFromLog(itemBuffer, entryVersion); + dupKey = LogUtils.readByteArray(itemBuffer, unpacked); + + /* DupCountLN */ + boolean dupCountLNRefExists = false; + byte booleans = itemBuffer.get(); + dupCountLNRefExists = (booleans & 1) != 0; + if (dupCountLNRefExists) { + dupCountLNRef.readFromLog(itemBuffer, entryVersion); + } else { + dupCountLNRef = null; + } + } + + /** + * DINS need to dump their dup key + */ + @Override + protected void dumpLogAdditional(StringBuilder sb) { + super.dumpLogAdditional(sb); + sb.append(Key.dumpString(dupKey, 0)); + if (dupCountLNRef != null) { + dupCountLNRef.dumpLog(sb, true); + } + } + + /* + * Dumping + */ + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + /** + * For unit test support: + * @return a string that dumps information about this DIN, without + */ + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(beginTag()); + sb.append('\n'); + } + + sb.append(TreeUtils.indent(nSpaces+2)); + sb.append(""); + sb.append(dupKey == null ? "" : + Key.dumpString(dupKey, 0)); + sb.append(""); + sb.append('\n'); + if (dupCountLNRef == null) { + sb.append(TreeUtils.indent(nSpaces+2)); + sb.append(""); + } else { + sb.append(dupCountLNRef.dumpString(nSpaces + 4, true)); + } + sb.append('\n'); + sb.append(super.dumpString(nSpaces, false)); + + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(endTag()); + } + return sb.toString(); + } + + @Override + public String toString() { + return dumpString(0, true); + } + + @Override + public String shortClassName() { + return "DIN"; + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/DupConvert.java b/src/com/sleepycat/je/tree/dupConvert/DupConvert.java new file mode 100644 index 0000000..f92ac58 --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/DupConvert.java @@ -0,0 +1,564 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.util.ArrayList; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.cleaner.LocalUtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DupKeyData; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockGrantType; +import com.sleepycat.je.txn.LockResult; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Performs post-recovery conversion of all dup DBs during Environment + * construction, when upgrading from JE 4.1 and earlier. In JE 5.0, duplicates + * are represented by a two-part (key + data) key, and empty data. In JE 4.1 + * and earlier, the key and data were separate as with non-dup DBs. + * + * Uses the DbTree.DUPS_CONVERTED_BIT to determine whether conversion of the + * environment is necessary. When all databases are successfully converted, + * this bit is set and the mapping tree is flushed. See + * EnvironmentImpl.convertDupDatabases. + * + * Uses DatabaseImpl.DUPS_CONVERTED to determine whether an individual database + * has been converted, to handle the case where the conversion crashes and is + * restarted later. When a database is successfully converted, this bit is set + * and the entire database is flushed using Database.sync. + * + * The conversion of each database is atomic -- either all INs or none are + * converted and made durable. This is accomplished by putting the database + * into Deferred Write mode so that splits won't log and eviction will be + * provisional (eviction will not flush the root IN if it is dirty). The + * Deferred Write mode is cleared after conversion is complete and + * Database.sync has been called. + * + * The memory budget is updated during conversion and daemon eviction is + * invoked periodically. This provides support for arbitrarily large DBs. + * + * Uses preload to load all dup trees (DINs/DBINs) prior to conversion, to + * minimize random I/O. See EnvironmentConfig.ENV_DUP_CONVERT_PRELOAD_ALL. + * + * The preload config does not specify loading of LNs, because we do not need + * to load LNs from DBINs. The fact that DBIN LNs are not loaded is the main + * reason that conversion is quick. LNs are converted lazily instead; see + * LNLogEntry.postFetchInit. The DBIN LNs do not need to be loaded because the + * DBIN slot key contains the LN 'data' that is needed to create the two-part + * key. + * + * Even when LN loading is not configured, it turns out that preload does load + * BIN (not DBIN) LNs in a dup DB, which is what we want. The singleton LNs + * must be loaded in order to get the LN data to create the two-part key. When + * preload has not loaded a singleton LN, it will be fetched during conversion. + * + * The DIN, DBIN and DupCount LSN are counted obsolete during conversion using + * a local utilization tracker. The tracker must not be flushed until the + * conversion of a database is complete. Inexact counting can be used, because + * DIN/DBIN/DupCountLN entries are automatically considered obsolete by the + * cleaner. Since only totals are tracked, the memory overhead of the local + * tracker is not substantial. + * + * Database Conversion Algorithm + * ----------------------------- + * 1. Set Deferred Write mode for the database. Preload the database, including + * INs/BINs/DINs/DBINs, but not LNs except for singleton LNs (LNs with a BIN + * parent). + * + * 2. Convert all IN/BIN keys to "prefix keys", which are defined by the + * DupKeyData class. This allows tree searches and slot insertions to work + * correctly as the conversion is performed. + * + * 3. Traverse through the BIN slots in forward order. + * + * 4. If a singleton LN is encountered, ensure it is loaded. IN.fetchLN() + * automatically updates the slot key if the LNLogEntry's key is different + * from the one already in the slot. Because LNLogEntry's key is converted + * on the fly, a two-part key is set in the slot as a side effect of + * fetching the LN. + * + * 5. If a DIN is encountered, first delete the BIN slot containing the DIN. + * Then iterate through all LNs in the DBINs of this dup tree, assign each + * a two-part key, and insert the slot into a BIN. The LSN and state flags + * of the DBIN slot are copied to the new BIN slot. + * + * 6. If a deleted singleton (BIN) LN is encountered, delete the slot rather + * than converting the key. If a deleted DBIN LN is encountered, simply + * discard it. + * + * 7. Count the DIN and DupCount LSN obsolete for each DIN encountered, using + * a local utilization tracker. + * + * 8. When all BIN slots have been processed, set the + * DatabaseImpl.DUPS_CONVERTED flag, call Database.sync to flush all INs and + * the MapLN, clear Deferred Write mode, and flush the local utilization + * tracker. + */ +public class DupConvert { + private final static boolean DEBUG = false; + + private final EnvironmentImpl envImpl; + private final DbTree dbTree; + private final boolean preloadAll; + private final PreloadConfig preloadConfig; + private LocalUtilizationTracker localTracker; + private long nConverted; // for debugging + + /* Current working tree position. */ + private BIN bin; + private int index; + + /** + * Creates a conversion object. + */ + public DupConvert(final EnvironmentImpl envImpl, final DbTree dbTree) { + this.envImpl = envImpl; + this.dbTree = dbTree; + this.preloadAll = envImpl.getConfigManager().getBoolean + (EnvironmentParams.ENV_DUP_CONVERT_PRELOAD_ALL); + this.preloadConfig = (envImpl.getDupConvertPreloadConfig() != null) ? + envImpl.getDupConvertPreloadConfig() : (new PreloadConfig()); + } + + /** + * Converts all dup DBs that need conversion. + */ + public void convertDatabases() { + if (DEBUG) { + System.out.println("DupConvert.convertDatabases"); + } + if (preloadAll) { + preloadAllDatabases(); + } + for (DatabaseId dbId : dbTree.getDbNamesAndIds().keySet()) { + final DatabaseImpl dbImpl = dbTree.getDb(dbId); + try { + if (!needsConversion(dbImpl)) { + continue; + } + convertDatabase(dbImpl); + } finally { + dbTree.releaseDb(dbImpl); + } + } + + assert noDupNodesPresent(); + } + + private boolean noDupNodesPresent() { + for (IN in : envImpl.getInMemoryINs()) { + if (in instanceof DIN || in instanceof DBIN) { + System.out.println(in.toString()); + return false; + } + } + return true; + } + + /** + * Preload all dup DBs to be converted. + */ + private void preloadAllDatabases() { + + final ArrayList dbsToConvert = + new ArrayList(); + try { + for (DatabaseId dbId : dbTree.getDbNamesAndIds().keySet()) { + final DatabaseImpl dbImpl = dbTree.getDb(dbId); + boolean releaseDbImpl = true; + try { + if (!needsConversion(dbImpl)) { + continue; + } + dbsToConvert.add(dbImpl); + releaseDbImpl = false; + } finally { + if (releaseDbImpl) { + dbTree.releaseDb(dbImpl); + } + } + } + + if (dbsToConvert.size() == 0) { + return; + } + + final DatabaseImpl[] dbArray = + new DatabaseImpl[dbsToConvert.size()]; + dbsToConvert.toArray(dbArray); + + envImpl.preload(dbArray, preloadConfig); + } finally { + for (DatabaseImpl dbImpl : dbsToConvert) { + dbTree.releaseDb(dbImpl); + } + } + } + + /** + * Returns whether the given DB needs conversion. + */ + public static boolean needsConversion(final DatabaseImpl dbImpl) { + return (dbImpl.getSortedDuplicates() && + !dbImpl.getDupsConverted() && + !dbImpl.isDeleted()); + } + + /** + * Converts a single database. + */ + private void convertDatabase(final DatabaseImpl dbImpl) { + if (DEBUG) { + System.out.println("DupConvert.convertDatabase " + + dbImpl.getId()); + } + final boolean saveDeferredWrite = dbImpl.isDurableDeferredWrite(); + try { + localTracker = new LocalUtilizationTracker(envImpl); + dbImpl.setDeferredWrite(true); + dbImpl.setKeyPrefixing(); + if (!preloadAll) { + dbImpl.preload(preloadConfig); + } + bin = dbImpl.getTree().getFirstNode(CacheMode.UNCHANGED); + if (bin == null) { + return; + } + index = -1; + while (getNextBinSlot()) { + convertBinSlot(); + } + dbImpl.setDupsConverted(); + dbImpl.sync(false /*flushLog*/); + envImpl.getUtilizationProfile().flushLocalTracker(localTracker); + } finally { + dbImpl.setDeferredWrite(saveDeferredWrite); + } + } + + /** + * Advances the bin/index fields to the next BIN slot. When moving past + * the last BIN slot, the bin field is set to null and false is returned. + * + * Enter/leave with bin field latched. + */ + private boolean getNextBinSlot() { + index += 1; + if (index < bin.getNEntries()) { + return true; + } + + /* Compact keys after finishing with a BIN. */ + bin.compactMemory(); + + assert bin.verifyMemorySize(); + + /* Cannot evict between BINs here, because a latch is held. */ + + bin = bin.getDatabase().getTree().getNextBin(bin, CacheMode.UNCHANGED); + if (bin == null) { + return false; + } + index = 0; + return true; + } + + /** + * Converts the bin/index slot, whether a singleton LN or a DIN root. + * + * Enter/leave with bin field latched, although bin field may change. + * + * When a singleton LN is converted, leaves with bin/index fields + * unchanged. + * + * When a dup tree is converted, leaves with bin/index fields set to last + * inserted slot. This is the slot of the highest key in the dup tree. + */ + private void convertBinSlot() { + if (DEBUG) { + System.out.println("DupConvert BIN LSN " + + DbLsn.getNoFormatString(bin.getLsn(index)) + + " index " + index + + " nEntries " + bin.getNEntries()); + } + /* Delete slot if LN is deleted. */ + if (isLNDeleted(bin, index)) { + deleteSlot(); + return; + } + + final Node node = bin.fetchLNOrDIN(index, CacheMode.DEFAULT); + + if (!node.containsDuplicates()) { + if (DEBUG) { + System.out.println("DupConvert BIN LN " + + Key.dumpString(bin.getKey(index), 0)); + } + /* Fetching a non-deleted LN updates the slot key; we're done. */ + assert node instanceof LN; + nConverted += 1; + return; + } + + /* + * Delete the slot containing the DIN before re-inserting the dup tree, + * so that the DIN slot key doesn't interfere with insertions. + * + * The DIN is evicted and memory usage is decremented. This is not + * exactly correct because we keep a local reference to the DIN until + * the dup tree is converted, but we tolerate this temporary + * inaccuracy. + */ + final byte[] binKey = bin.getKey(index); + final DIN din = (DIN) node; + deleteSlot(); + convertDin(din, binKey); + } + + /** + * Returns true if the LN at the given bin/index slot is permanently + * deleted. Returns false if it is not deleted, or if it is deleted but + * part of an unclosed, resurrected txn. + * + * Enter/leave with bin field latched. + */ + private boolean isLNDeleted(BIN checkBin, int checkIndex) { + + if (!checkBin.isEntryKnownDeleted(checkIndex) && + !checkBin.isEntryPendingDeleted(checkIndex)) { + /* Not deleted. */ + return false; + } + + final long lsn = checkBin.getLsn(checkIndex); + if (lsn == DbLsn.NULL_LSN) { + /* Can discard a NULL_LSN entry without locking. */ + return true; + } + + /* Lock LSN to guarantee deletedness. */ + final BasicLocker lockingTxn = BasicLocker.createBasicLocker(envImpl); + /* Don't allow this short-lived lock to be preempted/stolen. */ + lockingTxn.setPreemptable(false); + try { + final LockResult lockRet = lockingTxn.nonBlockingLock + (lsn, LockType.READ, false /*jumpAheadOfWaiters*/, + checkBin.getDatabase()); + if (lockRet.getLockGrant() == LockGrantType.DENIED) { + /* Is locked by a resurrected txn. */ + return false; + } + return true; + } finally { + lockingTxn.operationEnd(); + } + } + + /** + * Deletes the bin/index slot, assigned a new identifier key if needed. + * + * Enter/leave with bin field latched. + */ + private void deleteSlot() { + bin.deleteEntry(index); + if (index == 0 && bin.getNEntries() != 0) { + bin.setIdentifierKey(bin.getKey(0), true /*makeDirty*/); + } + index -= 1; + } + + /** + * Converts the given DIN and its descendants. + * + * Enter/leave with bin field latched, although bin field will change to + * last inserted slot. + */ + private void convertDin(final DIN din, final byte[] binKey) { + din.latch(); + try { + for (int i = 0; i < din.getNEntries(); i += 1) { + + final IN child = din.fetchIN(i, CacheMode.DEFAULT); + + assert(!child.isBINDelta(false)); + + if (child instanceof DBIN) { + final DBIN dbin = (DBIN) child; + dbin.latch(); + try { + for (int j = 0; j < dbin.getNEntries(); j += 1) { + if (!isLNDeleted(dbin, j)) { + convertDbinSlot(dbin, j, binKey); + } + } + assert dbin.verifyMemorySize(); + + /* Count DBIN obsolete. */ + if (dbin.getLastLoggedLsn() != DbLsn.NULL_LSN) { + localTracker.countObsoleteNodeInexact + (dbin.getLastLoggedLsn(), + dbin.getLogType(), 0, dbin.getDatabase()); + } + } finally { + dbin.releaseLatch(); + } + } else { + convertDin((DIN) child, binKey); + } + + /* Evict DIN child. */ + din.detachNode(i, false/*updateLsn*/, -1/*lsn*/); + } + + assert din.verifyMemorySize(); + + /* Count DIN and DupCountLN obsolete. */ + if (din.getLastLoggedLsn() != DbLsn.NULL_LSN) { + localTracker.countObsoleteNodeInexact + (din.getLastLoggedLsn(), din.getLogType(), 0, + din.getDatabase()); + } + final ChildReference dupCountRef = din.getDupCountLNRef(); + if (dupCountRef != null && + dupCountRef.getLsn() != DbLsn.NULL_LSN) { + localTracker.countObsoleteNodeInexact + (dupCountRef.getLsn(), LogEntryType.LOG_DUPCOUNTLN, 0, + din.getDatabase()); + } + } finally { + din.releaseLatch(); + } + } + + /** + * Converts the given DBIN slot, leaving bin/index set to the inserted + * BIN slot. + * + * Enter/leave with bin field latched, although bin field may change. + * + * If slot is inserted into current bin, leave bin field unchanged and + * set index field to inserted slot. + * + * If slot is inserted into a different bin, set bin/index fields to + * inserted slot. + */ + private void convertDbinSlot( + final DBIN dbin, + final int dbinIndex, + final byte[] binKey) { + + final byte[] newKey = + DupKeyData.replaceData(binKey, dbin.getKey(dbinIndex)); + + if (DEBUG) { + System.out.println("DupConvert DBIN LN " + + Key.dumpString(newKey, 0)); + } + + /* + * If the current BIN can hold the new slot, don't bother to do a + * search to find it. + */ + if (bin.needsSplitting() || !bin.isKeyInBounds(newKey)) { + + /* Compact keys after finishing with a BIN. */ + bin.compactMemory(); + + /* Evict without latches, before moving to a new BIN. */ + bin.releaseLatch(); + envImpl.daemonEviction(false /*backgroundIO*/); + + /* Find a BIN for insertion, split if necessary. */ + bin = dbin.getDatabase().getTree().searchSplitsAllowed( + newKey, CacheMode.UNCHANGED); + } + + final int newIndex = bin.insertEntry1( + null/*ln*/, newKey, null/*data*/, dbin.getLsn(dbinIndex), + dbin.getState(dbinIndex), false); + + if ((newIndex & IN.INSERT_SUCCESS) == 0) { + throw EnvironmentFailureException.unexpectedState + ("Key not inserted: " + Key.dumpString(newKey, 0) + + " DB: " + dbin.getDatabase().getId()); + } + + index = newIndex & ~IN.INSERT_SUCCESS; + + /* + * Evict LN from DBIN slot. Although we don't explicitly load DBIN LNs, + * it may have been loaded by recovery. + */ + dbin.detachNode(dbinIndex, false/*updateLsn*/, -1/*lsn*/); + + nConverted += 1; + } + + /** + * Changes all keys to "prefix keys" in the given IN. Called after reading + * an IN from disk via IN.postFetchInit. + * + * The conversion of IN keys is invoked from the IN class when an IN is + * fetched, rather than invoked from the DupConvert class directly, for + * performance and simplicity. If it were invoked from the DupConvert + * class, we would have to iterate over all INs in a separate initial pass. + * This is both more time consuming, and more complex to implement properly + * so that eviction is possible. Instead, conversion occurs when an old + * format IN is loaded. + * + * Enter/leave with 'in' unlatched. + */ + public static void convertInKeys(final DatabaseImpl dbImpl, final IN in) { + + /* Nothing to convert for non-duplicates DB. */ + if (!dbImpl.getSortedDuplicates()) { + return; + } + + /* DIN/DBIN do not need conversion either. */ + if (in instanceof DIN || in instanceof DBIN) { + return; + } + + for (int i = 0; i < in.getNEntries(); i += 1) { + byte[] oldKey = in.getKey(i); + byte[] newKey = + DupKeyData.makePrefixKey(oldKey, 0, oldKey.length); + + in.convertKey(i, newKey); + } + + byte[] oldKey = in.getIdentifierKey(); + byte[] newKey = DupKeyData.makePrefixKey(oldKey, 0, oldKey.length); + in.setIdentifierKey(newKey, true /*makeDirty*/); + + assert in.verifyMemorySize(); + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/DupCountLN.java b/src/com/sleepycat/je/tree/dupConvert/DupCountLN.java new file mode 100644 index 0000000..e1c2451 --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/DupCountLN.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.TreeUtils; + +/** + * A DupCountLN represents the transactional part of the root of a + * duplicate tree, specifically the count of dupes in the tree. + * + * Obsolete in log version 8, only used by DupConvert and some log readers. + */ +public final class DupCountLN extends LN { + + private static final String BEGIN_TAG = ""; + private static final String END_TAG = ""; + + private int dupCount; + + /** + * Create an empty DupCountLN, to be filled in from the log. + */ + public DupCountLN() { + super(); + dupCount = 0; + } + + public int getDupCount() { + return dupCount; + } + + /** + * @return true if this node is a duplicate-bearing node type, false + * if otherwise. + */ + @Override + public boolean containsDuplicates() { + return true; + } + + @Override + public boolean isDeleted() { + return false; + } + + /** + * Compute the approximate size of this node in memory for evictor + * invocation purposes. + */ + @Override + public long getMemorySizeIncludedByParent() { + return MemoryBudget.DUPCOUNTLN_OVERHEAD; + } + + /* + * Dumping + */ + + @Override + public String toString() { + return dumpString(0, true); + } + + @Override + public String beginTag() { + return BEGIN_TAG; + } + + @Override + public String endTag() { + return END_TAG; + } + + @Override + public String dumpString(int nSpaces, boolean dumpTags) { + StringBuilder sb = new StringBuilder(); + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(beginTag()); + sb.append('\n'); + } + sb.append(TreeUtils.indent(nSpaces+2)); + sb.append("").append('\n'); + sb.append(super.dumpString(nSpaces, false)); + if (dumpTags) { + sb.append(TreeUtils.indent(nSpaces)); + sb.append(endTag()); + } + return sb.toString(); + } + + /* + * Logging + */ + + /** + * Return the correct log entry type for a DupCountLN depends on whether + * it's transactional. + */ + @Override + protected LogEntryType getLogType(boolean isInsert, + boolean isTransactional, + DatabaseImpl db) { + return isTransactional ? LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL : + LogEntryType.LOG_DUPCOUNTLN; + } + + /** + * @see LN#getLogSize + */ + @Override + public int getLogSize() { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see LN#writeToLog + */ + @Override + public void writeToLog(ByteBuffer logBuffer) { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see LN#readFromLog + */ + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + + super.readFromLog(itemBuffer, entryVersion); + dupCount = LogUtils.readInt(itemBuffer, (entryVersion < 6)); + } + + /** + * @see Loggable#logicalEquals + * DupCountLNs are never replicated. + */ + @Override + public boolean logicalEquals(Loggable other) { + + return false; + } + + /** + * Dump additional fields + */ + @Override + protected void dumpLogAdditional(StringBuilder sb, boolean verbose) { + super.dumpLogAdditional(sb, verbose); + sb.append(""); + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/INDeleteInfo.java b/src/com/sleepycat/je/tree/dupConvert/INDeleteInfo.java new file mode 100644 index 0000000..40d0ec0 --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/INDeleteInfo.java @@ -0,0 +1,111 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.tree.Key; + +/** + * INDeleteInfo encapsulates the information logged about the removal of a + * child from an IN during IN compression. + * + * As of JE 3.3.87, INDelete is no longer logged becaue the root compression + * feature has been removed. However, INDelete must still be processed in log + * files created with 3.3.87 and earlier. [#17546] + * + * Obsolete in log version 8, only used by DupConvert and some log readers. + */ +public class INDeleteInfo implements Loggable { + + private long deletedNodeId; + private byte[] deletedIdKey; + private final DatabaseId dbId; + + /** + * Used by logging system only. + */ + public INDeleteInfo() { + dbId = new DatabaseId(); + } + + /* + * Accessors. + */ + public long getDeletedNodeId() { + return deletedNodeId; + } + + public byte[] getDeletedIdKey() { + return deletedIdKey; + } + + public DatabaseId getDatabaseId() { + return dbId; + } + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + deletedNodeId = LogUtils.readLong(itemBuffer, unpacked); + deletedIdKey = LogUtils.readByteArray(itemBuffer, unpacked); + dbId.readFromLog(itemBuffer, entryVersion); + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(Key.dumpString(deletedIdKey, 0)); + dbId.dumpLog(sb, verbose); + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/INDupDeleteInfo.java b/src/com/sleepycat/je/tree/dupConvert/INDupDeleteInfo.java new file mode 100644 index 0000000..a4ccb8d --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/INDupDeleteInfo.java @@ -0,0 +1,136 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree.dupConvert; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.tree.Key; + +/** + * INDupDeleteInfo encapsulates the information logged about the removal of a + * child from a duplicate IN during IN compression. + * + * Obsolete in log version 8, only used by DupConvert and some log readers. + */ +public class INDupDeleteInfo implements Loggable { + + private long deletedNodeId; + private byte[] deletedMainKey; + private byte[] deletedDupKey; + private final DatabaseId dbId; + private boolean dupRootDeletion; + + /** + * Used by logging system only. + */ + public INDupDeleteInfo() { + dbId = new DatabaseId(); + } + + /* + * Accessors. + */ + public long getDeletedNodeId() { + return deletedNodeId; + } + + public byte[] getDeletedMainKey() { + return deletedMainKey; + } + + public byte[] getDeletedDupKey() { + return deletedDupKey; + } + + public DatabaseId getDatabaseId() { + return dbId; + } + + /** + * Returns true if we are certain that this log entry reflects deletion of + * a DIN root. Returns false if it may or may not be a DIN root. [#18663] + */ + public boolean isDupRootDeletion() { + return dupRootDeletion; + } + + /* + * Logging support for writing. + */ + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer logBuffer) { + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + deletedNodeId = LogUtils.readLong(itemBuffer, unpacked); + deletedMainKey = + LogUtils.readByteArray(itemBuffer, unpacked); + deletedDupKey = LogUtils.readByteArray(itemBuffer, unpacked); + dbId.readFromLog(itemBuffer, entryVersion); + + /* + * This log entry is only logged for dup root deletion, starting in + * JE 2.1. We can't distinguish JE 2.1 through 3.2 using the log + * version, so we are only certain that this is a dup root deletion for + * version 6 (JE 3.3) and above. [#18663] + */ + dupRootDeletion = (entryVersion >= 6); + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(Key.dumpString(deletedMainKey, 0)); + sb.append(Key.dumpString(deletedDupKey, 0)); + dbId.dumpLog(sb, verbose); + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } +} diff --git a/src/com/sleepycat/je/tree/dupConvert/package-info.java b/src/com/sleepycat/je/tree/dupConvert/package-info.java new file mode 100644 index 0000000..1a037f2 --- /dev/null +++ b/src/com/sleepycat/je/tree/dupConvert/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Defunct Btree classes for old duplicate Btrees, only used during + * recovery. + */ +package com.sleepycat.je.tree.dupConvert; diff --git a/src/com/sleepycat/je/tree/package-info.java b/src/com/sleepycat/je/tree/package-info.java new file mode 100644 index 0000000..9abe930 --- /dev/null +++ b/src/com/sleepycat/je/tree/package-info.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Btree data structure (the JE main cache), including persistent + * nodes classes, and Btree access methods. + */ +package com.sleepycat.je.tree; diff --git a/src/com/sleepycat/je/trigger/PersistentTrigger.java b/src/com/sleepycat/je/trigger/PersistentTrigger.java new file mode 100644 index 0000000..2fd5a2c --- /dev/null +++ b/src/com/sleepycat/je/trigger/PersistentTrigger.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import java.io.Serializable; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; + +/** + * Placeholder to be used when persistent triggers are supported in the future. + * See warning at the top of Trigger.java + *

        + * Note that all subtypes of PersistentTrigger must be serializable, because + * they are stored persistently in the environment. + *

        + * The following table captures the relationship between the database + * granularity operations and their associated trigger methods. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Database OperationTrigger Method
        {@link Environment#openDatabase Environment.openDatabase} resulting in + * the creation of a new primary database. Or the first open of a database for + * write operations.{@link #open open}
        {@link Database#close Database.close} the close of a database that was + * opened for write operations.{@link #close close}
        {@link Environment#removeDatabase Environment.removeDatabase}{@link PersistentTrigger#remove remove}
        {@link Environment#truncateDatabase Environment.truncateDatabase}{@link PersistentTrigger#truncate truncate}
        {@link Environment#renameDatabase Environment.renameDatabase}{@link PersistentTrigger#rename rename}
        + *

        + */ +public interface PersistentTrigger extends Trigger, Serializable { + + /* Database operations */ + + /** + * The trigger method invoked after the open of the first {@link Database} + * writable handle. + * + * A call to the open trigger always precedes any subsequent calls to the + * {@link #put} and {@link #delete} triggers defined below, since the + * put and delete operations can only be invoked + * on a database handle. + *

        + * If the database is replicated, the replay mechanism on a + * Replica may open and close databases as it replays the + * replication stream. The maximum number of databases that may be open at + * any given time and the duration for which they can be left open can be + * controlled by configuring + * ReplicationConfig.REPLAY_MAX_OPEN_DB_HANDLES and + * ReplicationConfig.REPLAY_DB_HANDLE_TIMEOUT respectively. + *

        + * The method may be invoked when the database is first created, or + * subsequently when a new trigger is added to an existing database. As a + * result, a call to this trigger is always preceded by a call to the + * {@link #addTrigger(Transaction) addTrigger} trigger method. + * + * @param txn the active transaction associated with the operation. The + * argument is null if the operation is not transactional. + * + * @param environment a handle to the environment associated with the + * database being opened. The trigger code must not close the environment + * handle. + * + * @param isNew is true if the database was newly created as a result of + * the call to {@link Environment#openDatabase} + * + * @see Environment#openDatabase + */ + public void open(Transaction txn, Environment environment, boolean isNew); + + /** + * The trigger method associated with the close of the last writable + * {@link Database} handle. + *

        + * If the database is replicated, the replay mechanism on a + * Replica may open and close databases as it replays the + * replication stream. The maximum number of databases that may be open at + * any given time and the duration for which they can be left open can be + * controlled by configuring + * ReplicationConfig.REPLAY_MAX_OPEN_DB_HANDLES and + * ReplicationConfig.REPLAY_DB_HANDLE_TIMEOUT respectively. + *

        + * @see Database#close + */ + public void close(); + + /** + * The trigger method invoked after the successful removal of a primary + * {@link Database}. + * + * @param txn the transaction associated with the operation. The argument + * is null if the environment is non-transactional. + * + * @see Environment#removeDatabase + */ + public void remove(Transaction txn); + + /** + * The trigger method invoked after the successful truncation of a + * {@link Database}. + * + * @param txn the transaction associated with the operation. The argument + * is null if the environment is non-transactional. + * + * @see Environment#truncateDatabase + */ + public void truncate(Transaction txn); + + /** + * The trigger method invoked after the successful renaming of a primary + * {@link Database}. + * + * @param txn the transaction associated with the operation. The argument + * is null if the environment is non-transactional. + * + * @param newName it's current (new) name + * + * @see Environment#renameDatabase + */ + public void rename(Transaction txn, String newName); +} diff --git a/src/com/sleepycat/je/trigger/ReplicatedDatabaseTrigger.java b/src/com/sleepycat/je/trigger/ReplicatedDatabaseTrigger.java new file mode 100644 index 0000000..498984b --- /dev/null +++ b/src/com/sleepycat/je/trigger/ReplicatedDatabaseTrigger.java @@ -0,0 +1,187 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Transaction; + +/** + * ReplicatedTrigger defines trigger methods that are invoked on a + * replica when a replica needs to resume a transaction that's only partially + * present in the replica's logs and needs to be resumed so that it can be + * replayed to a conclusion, that is, to the point where the partial + * transaction has been committed or aborted by the master. + *

        + * WARNING: This interface is not currently supported. This means that, on a + * replica where transactions may be rolled back without a full environment + * shutdown, the repeatXxx methods cannot be used to handle this circumstance. + * To be safe, it is best to only use TransactionTrigger methods, namely + * TransactionTrigger.commit. + *

        + * WARNING: Only transient triggers are currently supported, and the + * documention below has not yet been updated to reflect this fact. See + * details at the top of Trigger.java. + *

        + * The trigger methods + * can be invoked in one of two circumstances: + *

          + *
        1. A new environment handle is opened on the replica and its logs contain a + * partial transaction.
        2. + *
        3. A new master is elected and a replica has to switch over to the new + * master, while in the midst of replaying a transaction.
        4. + *
        + *

        + * These trigger methods are only invoked if the partial transactions contain + * operations associated with triggers. + *

        + * + * Consider a transaction consisting of two put operations: + * + *
        + * put k1
        + * put k2
        + * commit t
        + * 
        + * + * In the absence of a replica or master failure this would normally result in + * the sequence of trigger calls: + * + *
        + * Trigger.put(t, k1, ...)
        + * Trigger.put(t, k2,....)
        + * Trigger.commit(t)
        + * 
        + * + * If the replica failed in the midst of the transaction replay, immediately + * after the first put operation, the sequence of trigger invocations before + * the replica went down would be: + * + *
        + *  Trigger.put(k1, ...)
        + * 
        + * + * followed by the trigger calls below when the replica handle was subsequently + * reopened: + * + *
        + *  ReplicatedTrigger.repeat(t)
        + *  Trigger.repeatPut(t, k1, ...)
        + *  Trigger.put(t, k2, ...)
        + *  Trigger.commit(t)
        + * 
        + * + * The interface defines one "repeat" trigger method for each of the trigger + * methods defined by Trigger. The methods are distinct from those + * defined by Trigger to highlight the fact that the trigger method is + * being invoked a second time for the same operation and the trigger method + * may not have completed the actions it intended to take when it was invoked + * the first time. For example, the trigger method may have been used to update + * a couple of local indexes and it was only finished with updating one local + * index and persisting it before the replica crashed. As a result the method + * may need to take special action to repair state maintained by it. + *

        + * A ReplicatedTrigger is associated with a replicated database via {@link + * DatabaseConfig#setTriggers DatabaseConfig.setTriggers}. For a replicated + * database, the ReplicatedTrigger interface must be implemented for all + * triggers. For a non-replicated database, implementing the ReplicatedTrigger + * interface is allowed, but the ReplicatedTrigger methods will not be called. + *

        + */ +public interface ReplicatedDatabaseTrigger extends Trigger { + + /** + * Used to inform the application that the trigger method calls associated + * with the partial transaction will be repeated. + * + * @param txn the partial transaction + */ + public void repeatTransaction(Transaction txn); + + /* Trigger lifecycle operations. */ + + /** + * The trigger method invoked when an addTrigger operation needs to be + * repeated. + * + * @see Trigger#addTrigger + */ + public void repeatAddTrigger(Transaction txn); + + /** + * The trigger method invoked when a removeTrigger operation needs to be + * repeated. + * + * @see Trigger#removeTrigger + */ + public void repeatRemoveTrigger(Transaction txn); + + /* Database Granularity operations */ + + /** + * The trigger method invoked when a database create trigger needs to be + * repeated. + * + * @see PersistentTrigger#open + */ + public void repeatCreate(Transaction txn); + + /** + * The trigger method invoked when a database remove trigger needs to be + * repeated. + * + * @see PersistentTrigger#remove + */ + public void repeatRemove(Transaction txn); + + /** + * The trigger method invoked when a database truncate trigger needs to be + * repeated. + * + * @see PersistentTrigger#truncate + */ + public void repeatTruncate(Transaction txn); + + /** + * The trigger method invoked when a database rename trigger needs to be + * repeated. + * + * @see PersistentTrigger#rename + */ + public void repeatRename(Transaction txn, String newName); + + /* Key/value granularity operations. */ + + /** + * The trigger method invoked when a database put trigger needs to be + * repeated. Note that this method differs from the corresponding + * Trigger.put method in that it omits the + * oldData argument. + * + * @see Trigger#put + */ + public void repeatPut(Transaction txn, + DatabaseEntry key, + DatabaseEntry newData); + + /** + * The trigger method invoked when a database delete trigger needs to be + * repeated. Note that this method differs from the corresponding + * Trigger.delete method in that it omits the + * oldData argument. + * + * @see Trigger#remove + */ + public void repeatDelete(Transaction txn, + DatabaseEntry key); +} diff --git a/src/com/sleepycat/je/trigger/TransactionTrigger.java b/src/com/sleepycat/je/trigger/TransactionTrigger.java new file mode 100644 index 0000000..1306b29 --- /dev/null +++ b/src/com/sleepycat/je/trigger/TransactionTrigger.java @@ -0,0 +1,90 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Transaction; + +/** + * TransactionTrigger defines the methods that track a transaction + * through its lifecycle. The following table captures the relationship between + * transaction lifecycle operations and their trigger methods. + *

        + * WARNING: Only transient triggers are currently supported, and the + * documention below has not yet been updated to reflect this fact. See + * details at the top of Trigger.java. Also see the warning at the top of + * ReplicatedDatabaseTrigger.java. + *

        + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Transaction OperationTrigger Method
        {@link Transaction#commit Transaction.commit}. If the database was was + * modified in the scope of the transaction.{@link #commit commit}
        {@link Transaction#abort Transaction.abort}. If the database was was + * modified in the scope of the transaction.{@link #abort abort}
        + *

        + * The use of method names in the above table is intended to cover all + * overloaded methods with that name. + *

        + * The trigger methods are also invoked for transactions that are implicitly + * initiated on behalf of the application in the following two cases: + *

          + *
        1. When using auto-commit.
        2. + *
        3. During the replay of transactions on a Replica when using a + * ReplicatedEnvironment.
        4. + *
        + *

        + * A TransactionTrigger is associated with a database via + * {@link DatabaseConfig#setTriggers DatabaseConfig.setTriggers}. + *

        + * Trigger applications that only make changes to the JE environment in the + * transaction scope of the transaction supplied to the + * DatbaseTrigger do not typically need to define Transaction + * triggers, since the changes they make are committed and rolled back + * automatically by this transaction. For example, triggers defined solely to + * create additional indexes in the environment do not need to define + * transaction triggers. Only sophisticated applications that manage state + * outside of JE, or in independent transactions typically define such + * triggers. + */ +public interface TransactionTrigger { + + /** + * The trigger method invoked after a transaction has been committed. The + * method is only invoked if the database was modified during the course of + * the transaction, that is, if a trigger method was invoked within the + * scope of the transaction. + * + * @param txn the transaction that was committed + */ + public abstract void commit(Transaction txn); + + /** + * The trigger method invoked after the transaction has been aborted. The + * method is only invoked if the database was modified during the course of + * the transaction, that is, if a trigger method was invoked within the + * scope of the transaction. + * + * @param txn the transaction that was aborted + */ + public abstract void abort(Transaction txn); +} diff --git a/src/com/sleepycat/je/trigger/Trigger.java b/src/com/sleepycat/je/trigger/Trigger.java new file mode 100644 index 0000000..2a7225c --- /dev/null +++ b/src/com/sleepycat/je/trigger/Trigger.java @@ -0,0 +1,177 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Transaction; + +/** + * Trigger defines the trigger methods associated with a database. + * They provide a mechanism to track the database definition operations used to + * manage the lifecycle of the database itself, as well as the record + * operations used to modify the contents of the database. + *

        + * WARNING: Only transient triggers are currently supported, and the + * documention below has not yet been updated to reflect this fact. The bottom + * line is that triggers are currently only useful and known to be reliable for + * maintaining a cache of database information on a replica, where the cache is + * initialized after opening the database (and configuring the trigger), and + * where only the TransactionTrigger.commit method is used. More specifically: + *

          + *
        • Although the {@link PersistentTrigger} interface exists, it may not + * currently be used reliably.
        • + *
        • Triggers must be configured on each node in a rep group separately, + * when needed. Specifically, a trigger configured on a master will not be + * automatically configured and invoked on the replicas in the group.
        • + *
        • Because only transient triggers are currently supported, trigger + * methods are only called after opening a database (when configuring the + * trigger in the DatabaseConfig), and are not called after closing the + * database.
        • + *
        • As a result of the above point, triggers are not called during + * recovery, and therefore cannot be reliably used to perform write operations + * using the tranaction passed to the trigger method.
        • + *
        • Also see the warning at the top of ReplicatedDatabaseTrigger.java.
        • + *
        + *

        + * The trigger methods {@link #put put} and {@link #delete delete} are used to + * track all record operations on the database. + *

        + *

        + * A trigger method takes a transaction as its first argument. If the + * environment is not transactional, the argument is null. In all other cases, + * it's a valid transaction ({@link Transaction#isValid() Transaction.isValid} + * is true) and the trigger can use this transaction to make it's own set of + * accompanying changes. + *

        + * If the invocation of a trigger results in a runtime exception, the + * transaction (if one was associated with the method) is invalidated and any + * subsequent triggers associated with the operation are skipped. It's the + * caller's responsibility to handle the exception and abort the invalidated + * transaction. If the exception is thrown during the replay of a transaction + * on a replica in an HA application, the environment is invalidated and a new + * environment handle must be created. + *

        + *

        + * A Trigger is associated with a database via + * {@link DatabaseConfig#setTriggers DatabaseConfig.setTriggers}. + *

        + */ +public interface Trigger { + + /** + * Returns the name associated with the trigger. All the triggers + * associated with a particular database must have unique names. + * + * @return the Trigger's name + */ + public String getName(); + + /** + * Sets the database name associated with this trigger. The JE trigger + * mechanism invokes this method to ensure that the trigger knows the name + * it's associated with across a rename of the database. + *

        + * This method is also invoked each time the trigger is de-serialized, so + * that the trigger does not need to store this information as part of it's + * serialized representation. + * + * @param databaseName the name of the database associated with this + * trigger + * + * @return this + */ + public Trigger setDatabaseName(String databaseName); + + /** + * Returns the result of the {@link #setDatabaseName(String)} operation. + * + * @return the name of the database associated with this trigger + */ + public String getDatabaseName(); + + /* Trigger lifecycle operations. */ + + /** + * The trigger method invoked when this trigger is added to the database. + * This is the very first trigger method that is invoked and it's invoked + * exactly once. If the database is replicated, it's invoked once per node + * on each node. + *

        + * @param txn the active transaction associated with the operation. The + * argument is null if the database is not transactional. + */ + public void addTrigger(Transaction txn); + + /** + * The trigger method invoked when this trigger is removed from the + * database, either as a result of opening the database with a different + * trigger configuration, or because the database it was associated with + * it has been removed. In the latter case, this trigger method follows + * the invocation of the {@link Persistent#remove} remove trigger. If the + * transaction is committed, there will be no subsequent trigger method + * invocations for this trigger. + * + * @param txn the active transaction associated with the operation. The + * argument is null if the database is not transactional. + */ + public void removeTrigger(Transaction txn); + + /* Record operations. */ + + /** + * The trigger method invoked after a successful put, that is, one that + * actually results in a modification to the database. + *

        + * If a new entry was inserted, oldData will be null and newData will be + * non-null. If an existing entry was updated, oldData and newData will + * be non-null. + *

        + * + * @param txn the active transaction associated with the operation. The + * argument is null if the database is non-transactional. + * + * @param key the non-null primary key + * + * @param oldData the data before the change, or null if the record + * did not exist. + * + * @param newData the non-null data after the change + */ + public void put(Transaction txn, + DatabaseEntry key, + DatabaseEntry oldData, + DatabaseEntry newData); + // TODO: make API provisions for put triggers where we optimize it not to + // fetch the oldData + + /** + * The trigger method invoked after a successful delete, that is, one that + * actually resulted in a key/value pair being removed. + *

        + * Truncating a database does not invoke this trigger; + * {@link PersistentTrigger#truncate} is invoked upon truncation. + *

        + * + * @param txn the active transaction associated with the operation. The + * argument is null if the database is non-transactional. + * + * @param key the non-null primary key + * + * @param oldData the non-null data that was associated with the deleted + * key + */ + public void delete(Transaction txn, + DatabaseEntry key, + DatabaseEntry oldData); +} diff --git a/src/com/sleepycat/je/trigger/package.html b/src/com/sleepycat/je/trigger/package.html new file mode 100644 index 0000000..5bf44d1 --- /dev/null +++ b/src/com/sleepycat/je/trigger/package.html @@ -0,0 +1,118 @@ + + + + +Triggers + + + + +

        +NOT YET RELEASED: Triggers provide a mechanism for automatically executing one +or more application defined trigger methods whenever a certain type of +operation is performed. The mechanism is automatic in that the method is +invoked automatically by JE and not the application. +

        +

        +All the trigger methods are, in the terminology generally associated +with RDBMS triggers, after triggers. An after trigger +method is only invoked after the successful completion of the +associated operation. It is not invoked if the operation associated +with the trigger method fails. +

        +

        +The trigger interfaces are organized so that the application can easily choose +to implement the minimal set of methods that meet their needs. +

        +
          +
        • +Trigger defines the methods that must be implemented by simple +trigger applications in a standalone JE environment that only make changes to +the JE environment in the scope of the transaction supplied to the trigger +method. +
        • +
        • ReplicatedDatabaseTrigger introduces additional methods that must be +implemented by the application so that it can deal correctly with +multiple invocations of a trigger per operation as a result of Syncup +operations on a Replica.
        • +
        • +Finally, TransactionTrigger defines trigger methods for transaction commit and +abort operations. They are intended for use by sophisticated applications that +maintain state outside of JE, for example, in a transient cache, or in an RDBMS. +
        • +
        +

        +Each of the interfaces described above defines a set of trigger +methods that must all work together to achieve a common purpose. The +application must typically supply non-null implementations for all the +methods in an interface or the application will likely be incomplete. +

        +

        Configuring Triggers

        +

        +Triggers are configured via the get/set methods defined on +DatabaseConfig. They are stored persistently in the +database. If the database is replicated, the association is replicated +at each node, so that the triggers can be run there as well. +

        + +

        Trigger Invocation

        + +

        +Multiple uniquely named Trigger instances may be associated with the +same database. In such cases, the triggers are +invoked in the order specified by the list argument to the trigger +setting methods defined by DatabaseConfig. +

        +

        +If the invocation of a trigger results in a runtime exception, the +transaction (if one was associated with the method) is invalidated and any +subsequent triggers also associated with the operation are skipped. It's the +caller's responsibility to handle the exception and abort the invalidated +transaction. +

        +The implementation of a trigger method may result in the invocation of +additional trigger methods. It's the application's responsibility to +ensure that the use of cascading triggers does not create an infinite +recursion. +

        +Trigger methods are not synchronized. It's up to the application to +make any necessary provisions for synchronization. On a related note, +a trigger method should not make any assumptions about the thread of +control in which it is invoked. That is, it may be invoked in the same +thread of control as the triggering operation, or it may be invoked in +a different thread. +

        +Trigger methods are expected to be lightweight. If they are required +to do substantial work, it may be best to queue the work so that the +method returns quickly and the bulk of the work is accomplished +asynchronously. + +

        Trigger Methods and Transactions

        + +

        +A trigger method takes a transaction as its first argument. The +argument is not null if the environment is transactional. The non-null +transaction argument to trigger methods defined by Trigger is always +valid and can be used by the method to make transactional changes. The +non-null transaction argument passed to the commit and abort triggers +associated with TransactionTrigger is no longer valid and cannot be +used to make transactional changes. +

        +

        +The transactional context associated with the code executing in a +trigger is exactly the same as that associated with any JE application +code; it is subject to exactly the same restrictions. For example, a +trigger method associated with a DatabaseTrigger executed on a replica +cannot make any modifications to a replicated database using the +transaction supplied to it. It can however modify local databases. +

        + + diff --git a/src/com/sleepycat/je/txn/BasicLocker.java b/src/com/sleepycat/je/txn/BasicLocker.java new file mode 100644 index 0000000..c38e5ee --- /dev/null +++ b/src/com/sleepycat/je/txn/BasicLocker.java @@ -0,0 +1,378 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; + +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * A non-transactional Locker that simply tracks locks and releases them when + * releaseNonTxnLocks or operationEnd is called. + */ +public class BasicLocker extends Locker { + + /* + * A BasicLocker can release all locks, so there is no need to distinguish + * between read and write locks. + * + * ownedLock is used for the first lock obtained, and ownedLockSet is + * instantiated and used only if more than one lock is obtained. This is + * an optimization for the common case where only one lock is held by a + * non-transactional locker. + * + * There's no need to track memory utilization for these non-txnal lockers, + * because the lockers are short lived. + */ + private Long ownedLock; + private Set ownedLockSet; + + private boolean lockingRequired; + + /** + * Creates a BasicLocker. + */ + protected BasicLocker(EnvironmentImpl env) { + super(env, + false, // readUncommittedDefault + false, // noWait + 0); // mandatedId + } + + public static BasicLocker createBasicLocker(EnvironmentImpl env) + throws DatabaseException { + + return new BasicLocker(env); + } + + /** + * Creates a BasicLocker with a noWait argument. + */ + protected BasicLocker(EnvironmentImpl env, boolean noWait) { + super(env, + false, // readUncommittedDefault + noWait, + 0); // mandatedId + } + + public static BasicLocker createBasicLocker(EnvironmentImpl env, + boolean noWait) + throws DatabaseException { + + return new BasicLocker(env, noWait); + } + + /** + * BasicLockers always have a fixed id, because they are never used for + * recovery. + */ + @Override + protected long generateId(TxnManager txnManager, + long ignore /* mandatedId */) { + return TxnManager.NULL_TXN_ID; + } + + @Override + protected void checkState(boolean ignoreCalledByAbort) { + /* Do nothing. */ + } + + @Override + protected LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws DatabaseException { + + /* Does nothing in BasicLocker. synchronized is for posterity. */ + synchronized (this) { + checkState(false); + } + + long timeout = 0; + boolean useNoWait = noWait || defaultNoWait; + if (!useNoWait) { + synchronized (this) { + timeout = getLockTimeout(); + } + } + + /* Ask for the lock. */ + LockGrantType grant = lockManager.lock + (lsn, this, lockType, timeout, useNoWait, jumpAheadOfWaiters, + database); + + return new LockResult(grant, null); + } + + @Override + public void preLogWithoutLock(DatabaseImpl database) { + } + + /** + * Get the txn that owns the lock on this node. Return null if there's no + * owning txn found. + */ + public Locker getWriteOwnerLocker(long lsn) + throws DatabaseException { + + return lockManager.getWriteOwnerLocker(Long.valueOf(lsn)); + } + + /** + * Is never transactional. + */ + @Override + public boolean isTransactional() { + return false; + } + + /** + * Is never serializable isolation. + */ + @Override + public boolean isSerializableIsolation() { + return false; + } + + /** + * Is never read-committed isolation. + */ + @Override + public boolean isReadCommittedIsolation() { + return false; + } + + /** + * No transactional locker is available. + */ + @Override + public Txn getTxnLocker() { + return null; + } + + /** + * Throws EnvironmentFailureException unconditionally. + * + * If we were to create a new BasicLocker here, it would not share locks + * with this locker, which violates the definition of this method. This + * method is not currently called in direct uses of BasicLocker and is + * overridden by subclasses where it is allowed (e.g., ThreadLocker and + * ReadCommittedLocker). + * @throws DatabaseException from subclasses. + */ + @Override + public Locker newNonTxnLocker() + throws DatabaseException { + + throw EnvironmentFailureException.unexpectedState(); + } + + /** + * Releases all locks, since all locks held by this locker are + * non-transactional. + */ + @Override + public synchronized void releaseNonTxnLocks() + throws DatabaseException { + + /* + * Don't remove locks from txn's lock collection until iteration is + * done, lest we get a ConcurrentModificationException during deadlock + * graph "display". [#9544] + */ + if (ownedLock != null) { + lockManager.release(ownedLock, this); + ownedLock = null; + } + if (ownedLockSet != null) { + for (final Long nid : ownedLockSet) { + lockManager.release(nid, this); + } + + /* Now clear lock collection. */ + ownedLockSet.clear(); + } + + /* Unload delete info, but don't wake up the compressor. */ + if ((deleteInfo != null) && + (deleteInfo.size() > 0)) { + envImpl.addToCompressorQueue(deleteInfo.values()); + deleteInfo.clear(); + } + } + + /** + * Release locks and close the cursor at the end of the operation. + */ + @Override + public void nonTxnOperationEnd() + throws DatabaseException { + + operationEnd(true); + } + + /** + * Release locks and close the cursor at the end of the operation. + */ + @Override + public void operationEnd(boolean operationOK) + throws DatabaseException { + + releaseNonTxnLocks(); + + /* Close this Locker. */ + close(); + } + + /** + * This txn doesn't store cursors. + * @throws DatabaseException in subclasses. + */ + @Override + public void registerCursor(CursorImpl cursor) { + lockingRequired = cursor.isInternalDbCursor(); + } + + /** + * This txn doesn't store cursors. + */ + @Override + public void unRegisterCursor(CursorImpl cursor) { + } + + @Override + public boolean lockingRequired() { + return lockingRequired; + } + + /* + * Transactional methods are all no-oped. + */ + + /** + * @return a dummy WriteLockInfo for this node. + */ + @Override + public WriteLockInfo getWriteLockInfo(long lsn) { + return WriteLockInfo.basicWriteLockInfo; + } + + @Override + public void markDeleteAtTxnEnd(DatabaseImpl db, boolean deleteAtCommit) + throws DatabaseException { + + if (deleteAtCommit) { + /* releaseDb will be called by startAndFinishDelete. */ + db.startAndFinishDelete(); + } else { + envImpl.getDbTree().releaseDb(db); + } + } + + /** + * Add a lock to set owned by this transaction. + */ + @Override + protected void addLock(Long lsn, + LockType type, + LockGrantType grantStatus) { + if ((ownedLock != null && + ownedLock.equals(lsn)) || + (ownedLockSet != null && + ownedLockSet.contains(lsn))) { + return; // Already owned + } + if (ownedLock == null) { + ownedLock = lsn; + } else { + if (ownedLockSet == null) { + ownedLockSet = new HashSet<>(); + } + ownedLockSet.add(lsn); + } + } + + /** + * Remove a lock from the set owned by this txn. + */ + @Override + void removeLock(long lsn) { + if (ownedLock != null && + ownedLock == lsn) { + ownedLock = null; + } else if (ownedLockSet != null) { + ownedLockSet.remove(lsn); + } + } + + /** + * A lock is being demoted. Move it from the write collection into the read + * collection. + */ + @Override + void moveWriteToReadLock(long lsn, Lock lock) { + } + + /** + * Stats. Note lack of synchronization while accessing Lock object. + * Appropriate for unit testing only. + */ + @Override + public StatGroup collectStats() + throws DatabaseException { + + StatGroup stats = + new StatGroup("Locker lock counts" , + "Read and write locks held by this locker"); + + IntStat nReadLocks = new IntStat(stats, LOCK_READ_LOCKS); + IntStat nWriteLocks = new IntStat(stats, LOCK_WRITE_LOCKS); + + if (ownedLock != null) { + Lock l = lockManager.lookupLock(ownedLock); + if (l != null) { + if (l.isOwnedWriteLock(this)) { + nWriteLocks.increment(); + } else { + nReadLocks.increment(); + } + } + } + if (ownedLockSet != null) { + for (Long nid : ownedLockSet) { + Lock l = lockManager.lookupLock(nid); + if (l != null) { + if (l.isOwnedWriteLock(this)) { + nWriteLocks.increment(); + } else { + nReadLocks.increment(); + } + } + } + } + return stats; + } +} diff --git a/src/com/sleepycat/je/txn/BuddyLocker.java b/src/com/sleepycat/je/txn/BuddyLocker.java new file mode 100644 index 0000000..0d626a0 --- /dev/null +++ b/src/com/sleepycat/je/txn/BuddyLocker.java @@ -0,0 +1,210 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Extends BasicLocker to share locks with another specific locker. + * + *

        In general, a BuddyLocker can be used whenever the primary (API) locker + * is in use, and we need to lock a node and release that lock before the + * primary locker transaction ends. In other words, for this particular lock + * we don't want to use two-phase locking. To accomplish that we use a + * separate BuddyLocker instance to hold the lock, while sharing locks with the + * primary locker. The BuddyLocker can be closed to release this particular + * lock, without releasing the other locks held by the primary locker.

        + * + *

        In particular, a ReadCommittedLocker extends BuddyLocker. The + * ReadCommittedLocker keeps track of read locks, while its buddy Txn keeps + * track of write locks. The two lockers must share locks to prevent + * conflicts.

        + * + *

        In addition, a BuddyLocker is used when acquiring a RANGE_INSERT lock. + * RANGE_INSERT only needs to be held until the point we have inserted the new + * node into the BIN. A separate locker is therefore used so we can release + * that lock separately when the insertion into the BIN is complete. But the + * RANGE_INSERT lock must not conflict with locks held by the primary locker. + * So a BuddyLocker is used that shares locks with the primary locker.

        + */ +public class BuddyLocker extends BasicLocker { + + private final Locker buddy; + + /** + * Creates a BuddyLocker. + */ + protected BuddyLocker(EnvironmentImpl env, Locker buddy) { + super(env, buddy.getDefaultNoWait()); + this.buddy = buddy; + buddy.addBuddy(this); + } + + public static BuddyLocker createBuddyLocker(EnvironmentImpl env, + Locker buddy) + throws DatabaseException { + + return new BuddyLocker(env, buddy); + } + + @Override + void close() { + super.close(); + buddy.removeBuddy(this); + } + + /** + * Returns the buddy locker. + */ + @Override + Locker getBuddy() { + return buddy; + } + + /** + * Forwards this call to the buddy locker. This object itself is never + * transactional but the buddy may be. + */ + @Override + public Txn getTxnLocker() { + return buddy.getTxnLocker(); + } + + /** + * Forwards this call to the buddy locker. This object itself is never + * transactional but the buddy may be. + */ + @Override + public Transaction getTransaction() { + return buddy.getTransaction(); + } + + /** + * Forwards this call to the base class and to the buddy locker. + */ + @Override + public void releaseNonTxnLocks() + throws DatabaseException { + + super.releaseNonTxnLocks(); + buddy.releaseNonTxnLocks(); + } + + /** + * Returns whether this locker can share locks with the given locker. + */ + @Override + public boolean sharesLocksWith(Locker other) { + + if (super.sharesLocksWith(other)) { + return true; + } else { + return (buddy == other || + other.getBuddy() == this || + buddy == other.getBuddy()); + } + } + + /** + * Returns the lock timeout of the buddy locker, since this locker has no + * independent timeout. + */ + @Override + public long getLockTimeout() { + return buddy.getLockTimeout(); + } + + /** + * Returns the transaction timeout of the buddy locker, since this locker + * has no independent timeout. + */ + @Override + public long getTxnTimeout() { + return buddy.getTxnTimeout(); + } + + /** + * Sets the lock timeout of the buddy locker, since this locker has no + * independent timeout. + */ + @Override + public void setLockTimeout(long timeout) { + buddy.setLockTimeout(timeout); + } + + /** + * Sets the transaction timeout of the buddy locker, since this locker has + * no independent timeout. + */ + @Override + public void setTxnTimeout(long timeout) { + buddy.setTxnTimeout(timeout); + } + + /** + * Returns whether the buddy locker is timed out, since this locker has no + * independent timeout. + */ + @Override + public boolean isTimedOut() { + return buddy.isTimedOut(); + } + + /** + * Returns the buddy locker's start time, since this locker has no + * independent timeout. + */ + @Override + public long getTxnStartMillis() { + return buddy.getTxnStartMillis(); + } + + /** + * Forwards to the buddy locker, since the buddy may be transactional. + */ + @Override + public void setOnlyAbortable(OperationFailureException cause) { + buddy.setOnlyAbortable(cause); + } + + /** + * Forwards to the parent buddy locker, so the buddy can check itself and + * all of its child buddies. + */ + @Override + public void checkPreempted(final Locker allowPreemptedLocker) + throws OperationFailureException { + + buddy.checkPreempted(allowPreemptedLocker); + } + + /** + * Consider this locker replicated if its buddy (Txn) is replicated. + */ + @Override + public boolean isReplicated() { + return buddy.isReplicated(); + } + + /** + * Consider this locker local-write if its buddy is local-write. + */ + @Override + public boolean isLocalWrite() { + return buddy.isLocalWrite(); + } +} diff --git a/src/com/sleepycat/je/txn/DummyLockManager.java b/src/com/sleepycat/je/txn/DummyLockManager.java new file mode 100644 index 0000000..8dba5e9 --- /dev/null +++ b/src/com/sleepycat/je/txn/DummyLockManager.java @@ -0,0 +1,273 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Set; +import java.util.List; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.StatGroup; + +/** + * DummyLockManager performs no locking for DS mode. + */ +public class DummyLockManager extends LockManager { + + /* + * Even though a user may specify isNoLocking for performance reasons, JE + * will sometimes still need to use locking internally (e.g. handle locks, + * and transactional access to internal db's). So we can not completely + * eliminate the Lock Manager. Instead, when isNoLocking is specified, we + * keep a standard Lock Manager around for use by cursors that access + * internal databases. Delegate to that as needed. + * [#16453] + */ + private final LockManager superiorLockManager; + + public DummyLockManager(EnvironmentImpl envImpl, + LockManager superiorLockManager) { + super(envImpl); + this.superiorLockManager = superiorLockManager; + } + + @Override + public Set getOwners(Long lsn) { + return superiorLockManager.getOwners(lsn); + } + + @Override + public List getWaiters(Long lsn) { + return superiorLockManager.getWaiters(lsn); + } + + @Override + public LockType getOwnedLockType(Long lsn, Locker locker) { + return superiorLockManager.getOwnedLockType(lsn, locker); + } + + @Override + public boolean isLockUncontended(Long lsn) { + return superiorLockManager.isLockUncontended(lsn); + } + + @Override + public boolean ownsOrSharesLock(Locker locker, Long lsn) { + return superiorLockManager.ownsOrSharesLock(locker, lsn); + } + + /** + * @see LockManager#lookupLock + */ + @Override + Lock lookupLock(Long lsn) + throws DatabaseException { + + Lock ret = superiorLockManager.lookupLock(lsn); + return ret; + } + + /** + * @see LockManager#attemptLock + */ + @Override + LockAttemptResult attemptLock(Long lsn, + Locker locker, + LockType type, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters) + throws DatabaseException { + + if (locker.lockingRequired()) { + return superiorLockManager.attemptLock + (lsn, locker, type, nonBlockingRequest, jumpAheadOfWaiters); + } + return new LockAttemptResult(null, LockGrantType.NEW, true); + } + + /** + * @see LockManager#getTimeoutInfo + */ + @Override + TimeoutInfo getTimeoutInfo( + boolean isLockNotTxnTimeout, + Locker locker, + long lsn, + LockType type, + LockGrantType grantType, + Lock useLock, + long timeout, + long start, + long now, + DatabaseImpl database, + Set owners, + List waiters) + throws DatabaseException { + + if (locker.lockingRequired()) { + return superiorLockManager.getTimeoutInfo( + isLockNotTxnTimeout, locker, lsn, type, grantType, useLock, + timeout, start, now, database, owners, waiters); + } + return null; + } + + /** + * @see LockManager#releaseAndFindNotifyTargets + */ + @Override + Set releaseAndFindNotifyTargets(long lsn, Locker locker) + throws DatabaseException { + + /* + * Unconditionally release the lock. This does not detract from the + * performance benefit of disabled locking, since this method is only + * called if a lock was previously acquired, i.e., it is held by a + * Locker. + * + * The comment below is now obsolete because handle locks are no longer + * transferred. + * If the release of the lock were conditional, a lock transferred + * between Lockers (as we do with Database handle locks) would never + * be released, since the destination Locker's lockingRequired + * property is not set to true. In general, it is safer to + * unconditionally release locks than to rely on the lockingRequired + * property. [#17985] + */ + return superiorLockManager.releaseAndFindNotifyTargets(lsn, locker); + } + + /** + * @see LockManager#demote + */ + @Override + void demote(long lsn, Locker locker) + throws DatabaseException { + + if (locker.lockingRequired()) { + superiorLockManager.demote(lsn, locker); + } else { + return; + } + } + + /** + * @see LockManager#isLocked + */ + @Override + boolean isLocked(Long lsn) + throws DatabaseException { + + return superiorLockManager.isLocked(lsn); + } + + /** + * @see LockManager#isOwner + */ + @Override + boolean isOwner(Long lsn, Locker locker, LockType type) + throws DatabaseException { + + return superiorLockManager.isOwner(lsn, locker, type); + } + + /** + * @see LockManager#isWaiter + */ + @Override + boolean isWaiter(Long lsn, Locker locker) + throws DatabaseException { + + return superiorLockManager.isWaiter(lsn, locker); + } + + /** + * @see LockManager#nWaiters + */ + @Override + int nWaiters(Long lsn) + throws DatabaseException { + + return superiorLockManager.nWaiters(lsn); + } + + /** + * @see LockManager#nOwners + */ + @Override + int nOwners(Long lsn) + throws DatabaseException { + + return superiorLockManager.nOwners(lsn); + } + + /** + * @see LockManager#getWriterOwnerLocker + */ + @Override + Locker getWriteOwnerLocker(Long lsn) + throws DatabaseException { + + return superiorLockManager.getWriteOwnerLocker(lsn); + } + + /** + * @see LockManager#validateOwnership + */ + @Override + boolean validateOwnership(Long lsn, + Locker locker, + LockType type, + boolean getOwnersAndWaiters, + boolean flushFromWaiters, + Set owners, + List waiters) + throws DatabaseException { + + if (locker.lockingRequired()) { + return superiorLockManager.validateOwnership( + lsn, locker, type, getOwnersAndWaiters, flushFromWaiters, + owners, waiters); + } + return true; + } + + /** + * @see LockManager#stealLock + */ + @Override + public LockAttemptResult stealLock(Long lsn, + Locker locker, + LockType lockType) + throws DatabaseException { + + if (locker.lockingRequired()) { + return superiorLockManager.stealLock + (lsn, locker, lockType); + } + return null; + } + + /** + * @see LockManager#dumpLockTable + */ + @Override + void dumpLockTable(StatGroup stats, boolean clear) + throws DatabaseException { + + superiorLockManager.dumpLockTable(stats, clear); + } +} diff --git a/src/com/sleepycat/je/txn/HandleLocker.java b/src/com/sleepycat/je/txn/HandleLocker.java new file mode 100644 index 0000000..88864d9 --- /dev/null +++ b/src/com/sleepycat/je/txn/HandleLocker.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Extends BasicLocker to share locks with another Locker that is being used to + * open a database. HandleLocker holds a read lock on the NameLN for the + * Database object, to prevent rename, removal or truncation of a database + * while it is open. The HandleLocker and the Locker used to open the database + * both hold a NameLN lock at the same time, so they must share locks to avoid + * conflicts. + * + * Accounts for the fact that the Txn may end before this locker ends by only + * keeping the Txn ID rather than a reference to the Txn object. A reference + * to a non-tnxl Locker is OK, OTOH, because it is short lived. + * + * Handle Locking Overview + * ------------------------ + * Environment.openDatabase calls Environment.setupDatabase, which calls + * Database.initHandleLocker to create the HandleLocker. setupDatabase ensures + * that the HandleLocker is passed to DbTree.getDb and createDb. These latter + * methods acquire a read lock on the NameLN for the HandleLocker, in addition + * to acquiring a read or write lock for the openDatabase locker. + * + * If setupDatabase is not successful, it ensures that locks are released via + * HandleLocker.endOperation. If setupDatabase is successful, the handle is + * returned by openDatabase, and Database.close must be called to release the + * lock. The handle lock is released by calling HandleLocker.endOperation. + * + * A special case is when a user txn is passed to openDatabase. If the txn + * aborts, the Database handle must be invalidated. When setupDatabase + * succeeds it passes the handle to Txn.addOpenedDatabase, which remembers the + * handle. Txn.abort invalidates the handle. + * + * NameLN Migration and LSN Changes [#20617] + * ----------------------------------------- + * When the log cleaner migrates a NameLN, its LSN changes and the new LSN is + * locked on behalf of all existing lockers by CursorImpl.lockAfterLsnChange. + * lockAfterLsnChange is also used when a dirty deferred-write LN is logged by + * BIN.logDirtyLN, as part of flushing a BIN during a checkpoint or eviction. + * + * Because handle lockers are legitimately very long lived, it is important + * that lockAfterLsnChange releases the locks on the old LSN, to avoid a steady + * accumulation of locks in a HandleLocker. Therefore, lockAfterLsnChange will + * release the lock on the old LSN, for HandleLockers only. Although it may be + * desirable to release the old LSN lock on other long lived lockers, it is too + * risky. In an experiment, this caused problems with demotion and upgrade, + * when a lock being demoted or upgraded was released. + * + * Because LSNs can change, it is also important that we don't rely on a single + * NameLN locker ID (LSN) as a data structure key for handle locks. This was + * acceptable when a stable Node ID was used as a lock ID, but is no longer + * appropriate now that mutable LSNs are used as lock IDs. + */ +public class HandleLocker extends BasicLocker { + + private final long shareWithTxnId; + private final Locker shareWithNonTxnlLocker; + + /** + * Creates a HandleLocker. + */ + protected HandleLocker(EnvironmentImpl env, Locker buddy) { + super(env); + shareWithTxnId = + buddy.isTransactional() ? buddy.getId() : TxnManager.NULL_TXN_ID; + shareWithNonTxnlLocker = + buddy.isTransactional() ? null : buddy; + } + + public static HandleLocker createHandleLocker(EnvironmentImpl env, + Locker buddy) + throws DatabaseException { + + return new HandleLocker(env, buddy); + } + + /** + * Returns whether this locker can share locks with the given locker. + */ + @Override + public boolean sharesLocksWith(Locker other) { + + if (super.sharesLocksWith(other)) { + return true; + } + if (shareWithTxnId != TxnManager.NULL_TXN_ID && + shareWithTxnId == other.getId()) { + return true; + } + if (shareWithNonTxnlLocker != null && + shareWithNonTxnlLocker == other) { + return true; + } + return false; + } + + /** + * Because handle lockers are legitimately very long lived, it is important + * that lockAfterLsnChange releases the locks on the old LSN, to avoid a + * steady accumulation of locks in a HandleLocker + */ + @Override + public boolean allowReleaseLockAfterLsnChange() { + return true; + } +} diff --git a/src/com/sleepycat/je/txn/Lock.java b/src/com/sleepycat/je/txn/Lock.java new file mode 100644 index 0000000..d1d095f --- /dev/null +++ b/src/com/sleepycat/je/txn/Lock.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.MemoryBudget; + +/** + * A Lock embodies the lock state of an LSN. It includes a set of owners and + * a list of waiters. + */ +interface Lock { + + /** + * Get a list of waiters for debugging and error messages. + */ + public List getWaitersListClone(); + + /** + * Remove this locker from the waiter list. + */ + public void flushWaiter(Locker locker, + MemoryBudget mb, + int lockTableIndex); + + /** + * Get a new Set of the owners. + */ + public Set getOwnersClone(); + + /** + * Return true if locker is an owner of this Lock for lockType, + * false otherwise. + * + * This method is only used by unit tests. + */ + public boolean isOwner(Locker locker, LockType lockType); + + /** + * Return true if locker is an owner of this Lock and this is a write + * lock. + */ + public boolean isOwnedWriteLock(Locker locker); + + /** + * Returns the LockType if the given locker owns this lock, or null if the + * lock is not owned. + */ + public LockType getOwnedLockType(Locker locker); + + /** + * Return true if locker is a waiter on this Lock. + * + * This method is only used by unit tests. + */ + public boolean isWaiter(Locker locker); + + public int nWaiters(); + + public int nOwners(); + + /** + * Attempts to acquire the lock and returns the LockGrantType. + * + * Assumes we hold the lockTableLatch when entering this method. + */ + public LockAttemptResult lock(LockType requestType, + Locker locker, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters, + MemoryBudget mb, + int lockTableIndex) + throws DatabaseException; + + /** + * Releases a lock and moves the next waiter(s) to the owners. + * @return + * - null if we were not the owner, + * - a non-empty set if owners should be notified after releasing, + * - an empty set if no notification is required. + */ + public Set release(Locker locker, + MemoryBudget mb, + int lockTableIndex); + + /** + * Removes all owners except for the given owner, and sets the Preempted + * property on the removed owners. + */ + public void stealLock(Locker locker, MemoryBudget mb, int lockTableIndex) + throws DatabaseException; + + /** + * Downgrade a write lock to a read lock. + */ + public void demote(Locker locker); + + /** + * Return the locker that has a write ownership on this lock. If no + * write owner exists, return null. + */ + public Locker getWriteOwnerLocker(); + + public boolean isThin(); + + /** + * Debug dumper. + */ + public String toString(); +} diff --git a/src/com/sleepycat/je/txn/LockAttemptResult.java b/src/com/sleepycat/je/txn/LockAttemptResult.java new file mode 100644 index 0000000..f10abec --- /dev/null +++ b/src/com/sleepycat/je/txn/LockAttemptResult.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +/** + * This is just a struct to hold a multi-value return. + */ +public class LockAttemptResult { + public final boolean success; + final Lock useLock; + public final LockGrantType lockGrant; + + LockAttemptResult(Lock useLock, + LockGrantType lockGrant, + boolean success) { + + this.useLock = useLock; + this.lockGrant = lockGrant; + this.success = success; + } +} diff --git a/src/com/sleepycat/je/txn/LockConflict.java b/src/com/sleepycat/je/txn/LockConflict.java new file mode 100644 index 0000000..017cac2 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockConflict.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +/** + * LockConflict is a type safe enumeration of lock conflict types. Methods on + * LockConflict objects are used to determine whether a conflict exists and, if + * so, how it should be handled. + */ +class LockConflict { + + static final LockConflict ALLOW = new LockConflict(true, false); + static final LockConflict BLOCK = new LockConflict(false, false); + static final LockConflict RESTART = new LockConflict(false, true); + + private boolean allowed; + private boolean restart; + + /** + * No conflict types can be defined outside this class. + */ + private LockConflict(boolean allowed, boolean restart) { + this.allowed = allowed; + this.restart= restart; + } + + /** + * This method is called first to determine whether the locks is allowed. + * If true, there is no conflict. If false, there is a conflict and the + * requester must wait for or be denied the lock, or (if getRestart returns + * true) an exception should be thrown to cause the requester's operation + * to be restarted. + */ + boolean getAllowed() { + return allowed; + } + + /** + * This method is called when getAllowed returns false to determine whether + * an exception should be thrown to cause the requester's operation to be + * restarted. If getAllowed returns false and this method returns false, + * the requester should wait for or be denied the lock, depending on the + * request mode. If getAllowed returns true, this method will always + * return false. + */ + boolean getRestart() { + return restart; + } +} diff --git a/src/com/sleepycat/je/txn/LockGrantType.java b/src/com/sleepycat/je/txn/LockGrantType.java new file mode 100644 index 0000000..9485cbd --- /dev/null +++ b/src/com/sleepycat/je/txn/LockGrantType.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +/** + * LockGrantType is an enumeration of the possible results of a lock attempt. + */ +public enum LockGrantType { + + /** + * The locker did not previously own a lock on the node, and a new lock has + * been granted. + */ + NEW, + + /** + * The locker did not previously own a lock on the node, and must wait for + * a new lock because a conflicting lock is held by another locker. + */ + WAIT_NEW, + + /** + * The locker previously owned a read lock on the node, and a write lock + * has been granted by upgrading the lock from read to write. + */ + PROMOTION, + + /** + * The locker previously owned a read lock on the node, and must wait for a + * lock upgrade because a conflicting lock is held by another locker. + */ + WAIT_PROMOTION, + + /** + * The locker already owns the requested lock, and no new lock or upgrade + * is needed. + */ + EXISTING, + + /** + * The lock request was a non-blocking one and the lock has not been + * granted because a conflicting lock is held by another locker. + */ + DENIED, + + /** + * The lock has not been granted because a conflicting lock is held by + * another locker, and a RangeRestartException must be thrown. + */ + WAIT_RESTART, + + /** + * No lock has been granted because LockType.NONE was requested. + */ + NONE_NEEDED, +} diff --git a/src/com/sleepycat/je/txn/LockImpl.java b/src/com/sleepycat/je/txn/LockImpl.java new file mode 100644 index 0000000..f15d1f4 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockImpl.java @@ -0,0 +1,845 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.dbi.MemoryBudget; + +/** + * A Lock embodies the lock state of a LSN. It includes a set of owners and + * a list of waiters. + */ +public // for Sizeof +class LockImpl implements Lock { + private static final int REMOVE_LOCKINFO_OVERHEAD = + 0 - MemoryBudget.LOCKINFO_OVERHEAD; + + /** + * A single locker always appears only once in the logical set of owners. + * The owners set is always in one of the following states. + * + * 1- Empty + * 2- A single writer + * 3- One or more readers + * 4- Multiple writers or a mix of readers and writers, all for + * txns which share locks (all ThreadLocker instances for the same + * thread) + * + * Both ownerSet and waiterList are a collection of LockInfo. Since the + * common case is that there is only one owner or waiter, we have added an + * optimization to avoid the cost of collections. FirstOwner and + * firstWaiter are used for the first owner or waiter of the lock, and the + * corresponding collection is instantiated and used only if more owners + * arrive. + * + * In terms of memory accounting, we count up the cost of each added or + * removed LockInfo, but not the cost of the HashSet/List entry + * overhead. We could do the latter for more precise accounting. + */ + private LockInfo firstOwner; + private Set ownerSet; + private LockInfo firstWaiter; + private List waiterList; + + /** + * Create a Lock. + */ + public LockImpl() { + } + + /* Used when releasing lock. */ + LockImpl(LockImpl lock) { + this.firstOwner = lock.firstOwner; + this.ownerSet = lock.ownerSet; + this.firstWaiter = lock.firstWaiter; + this.waiterList = lock.waiterList; + } + + /* Used when mutating from a ThinLockImpl to a LockImpl. */ + LockImpl(LockInfo firstOwner) { + this.firstOwner = firstOwner; + } + + /** + * The first waiter goes into the firstWaiter member variable. Once the + * waiterList is made, all appended waiters go into waiterList, even after + * the firstWaiter goes away and leaves that field null, so as to leave the + * list ordered. + */ + private void addWaiterToEndOfList(LockInfo waiter, + MemoryBudget mb, + int lockTableIndex) { + /* Careful: order important! */ + if (waiterList == null) { + if (firstWaiter == null) { + firstWaiter = waiter; + } else { + waiterList = new ArrayList(); + waiterList.add(waiter); + } + } else { + waiterList.add(waiter); + } + mb.updateLockMemoryUsage + (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex); + } + + /** + * Add this waiter to the front of the list. + */ + private void addWaiterToHeadOfList(LockInfo waiter, + MemoryBudget mb, + int lockTableIndex) { + /* Shuffle the current first waiter down a slot. */ + if (firstWaiter != null) { + if (waiterList == null) { + waiterList = new ArrayList(); + } + waiterList.add(0, firstWaiter); + } + + firstWaiter = waiter; + mb.updateLockMemoryUsage + (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex); + } + + /** + * Get a list of waiters for debugging and error messages. + */ + public List getWaitersListClone() { + List dumpWaiters = new ArrayList(); + if (firstWaiter != null) { + dumpWaiters.add(firstWaiter); + } + + if (waiterList != null) { + dumpWaiters.addAll(waiterList); + } + + return dumpWaiters; + } + + /** + * Remove this locker from the waiter list. + */ + public void flushWaiter(Locker locker, + MemoryBudget mb, + int lockTableIndex) { + if ((firstWaiter != null) && (firstWaiter.getLocker() == locker)) { + firstWaiter = null; + mb.updateLockMemoryUsage + (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex); + } else if (waiterList != null) { + Iterator iter = waiterList.iterator(); + while (iter.hasNext()) { + LockInfo info = iter.next(); + if (info.getLocker() == locker) { + iter.remove(); + mb.updateLockMemoryUsage + (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex); + return; + } + } + } + } + + private void addOwner(LockInfo newLock, + MemoryBudget mb, + int lockTableIndex) { + if (firstOwner == null) { + firstOwner = newLock; + } else { + if (ownerSet == null) { + ownerSet = new HashSet(); + } + ownerSet.add(newLock); + } + mb.updateLockMemoryUsage + (MemoryBudget.LOCKINFO_OVERHEAD, lockTableIndex); + } + + /** + * Get a new Set of the owners. + */ + public Set getOwnersClone() { + + /* No need to update memory usage, the returned Set is transient. */ + Set owners; + if (ownerSet != null) { + owners = new HashSet(ownerSet); + } else { + owners = new HashSet(); + } + if (firstOwner != null) { + owners.add(firstOwner); + } + return owners; + } + + /** + * Remove this LockInfo from the owner set and clear its memory budget. + */ + private boolean flushOwner(LockInfo oldOwner, + MemoryBudget mb, + int lockTableIndex) { + boolean removed = false; + if (oldOwner != null) { + if (firstOwner == oldOwner) { + firstOwner = null; + removed = true; + } else if (ownerSet != null) { + removed = ownerSet.remove(oldOwner); + } + } + + if (removed) { + mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, lockTableIndex); + } + return removed; + } + + /** + * Remove this locker from the owner set. + */ + private LockInfo flushOwner(Locker locker, + MemoryBudget mb, + int lockTableIndex) { + LockInfo flushedInfo = null; + if ((firstOwner != null) && + (firstOwner.getLocker() == locker)) { + flushedInfo = firstOwner; + firstOwner = null; + } else if (ownerSet != null) { + Iterator iter = ownerSet.iterator(); + while (iter.hasNext()) { + LockInfo o = iter.next(); + if (o.getLocker() == locker) { + iter.remove(); + flushedInfo = o; + } + } + } + if (flushedInfo != null) { + mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, lockTableIndex); + } + + return flushedInfo; + } + + /** + * Returns the owner LockInfo for a locker, or null if locker is not an + * owner. + */ + private LockInfo getOwnerLockInfo(Locker locker) { + if ((firstOwner != null) && (firstOwner.getLocker() == locker)) { + return firstOwner; + } + + if (ownerSet != null) { + Iterator iter = ownerSet.iterator(); + while (iter.hasNext()) { + LockInfo o = iter.next(); + if (o.getLocker() == locker) { + return o; + } + } + } + + return null; + } + + /** + * Return true if locker is an owner of this Lock for lockType, + * false otherwise. + */ + public boolean isOwner(Locker locker, LockType lockType) { + LockInfo o = getOwnerLockInfo(locker); + return o != null && o.getLockType() == lockType; + } + + /** + * Return true if locker is an owner of this Lock and this is a write + * lock. + */ + public boolean isOwnedWriteLock(Locker locker) { + LockInfo o = getOwnerLockInfo(locker); + return o != null && o.getLockType().isWriteLock(); + } + + public LockType getOwnedLockType(Locker locker) { + LockInfo o = getOwnerLockInfo(locker); + return (o != null) ? o.getLockType() : null; + } + + /** + * Return true if locker is a waiter on this Lock. + * + * This method is only used by unit tests. + */ + public boolean isWaiter(Locker locker) { + + if (firstWaiter != null) { + if (firstWaiter.getLocker() == locker) { + return true; + } + } + + if (waiterList != null) { + Iterator iter = waiterList.iterator(); + while (iter.hasNext()) { + LockInfo info = iter.next(); + if (info.getLocker() == locker) { + return true; + } + } + } + return false; + } + + public int nWaiters() { + int count = 0; + if (firstWaiter != null) { + count++; + } + if (waiterList != null) { + count += waiterList.size(); + } + return count; + } + + public int nOwners() { + int count = 0; + if (firstOwner != null) { + count++; + } + + if (ownerSet != null) { + count += ownerSet.size(); + } + return count; + } + + /** + * Attempts to acquire the lock and returns the LockGrantType. + * + * Assumes we hold the lockTableLatch when entering this method. + */ + public LockAttemptResult lock(LockType requestType, + Locker locker, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters, + MemoryBudget mb, + int lockTableIndex) { + + assert validateRequest(locker); // intentional side effect + + /* Request an ordinary lock by checking the owners list. */ + LockInfo newLock = new LockInfo(locker, requestType); + LockGrantType grant = tryLock + (newLock, jumpAheadOfWaiters || (nWaiters() == 0), mb, + lockTableIndex); + + /* Do we have to wait for this lock? */ + if (grant == LockGrantType.WAIT_NEW || + grant == LockGrantType.WAIT_PROMOTION || + grant == LockGrantType.WAIT_RESTART) { + + /* + * If the request type can cause a restart and a restart conflict + * does not already exist, then we have to check the waiters list + * for restart conflicts. A restart conflict must take precedence + * or it may be missed. + */ + if (requestType.getCausesRestart() && + grant != LockGrantType.WAIT_RESTART) { + LockInfo waiter = null; + Iterator iter = null; + + if (waiterList != null) { + iter = waiterList.iterator(); + } + + if (firstWaiter != null) { + waiter = firstWaiter; + } else if ((iter != null) && (iter.hasNext())) { + waiter = iter.next(); + } + + while (waiter != null) { + + /* + * Check for a restart conflict. Ignore LockType.RESTART + * in the waiter list when checking for conflicts. + */ + Locker waiterLocker = waiter.getLocker(); + LockType waiterType = waiter.getLockType(); + if (waiterType != LockType.RESTART && + locker != waiterLocker && + !locker.sharesLocksWith(waiterLocker)) { + LockConflict conflict = + waiterType.getConflict(requestType); + if (conflict.getRestart()) { + grant = LockGrantType.WAIT_RESTART; + break; + } + } + + /* Move to the next waiter, if it's in the list. */ + if ((iter != null) && (iter.hasNext())) { + waiter = iter.next(); + } else { + waiter = null; + } + } + } + + /* Add the waiter or deny the lock as appropriate. */ + if (nonBlockingRequest) { + grant = LockGrantType.DENIED; + } else { + if (grant == LockGrantType.WAIT_PROMOTION) { + /* + * By moving our waiter to the top of the list we reduce + * the time window where deadlocks can occur due to the + * promotion. + */ + addWaiterToHeadOfList(newLock, mb, lockTableIndex); + } else { + assert grant == LockGrantType.WAIT_NEW || + grant == LockGrantType.WAIT_RESTART; + + /* + * If waiting to restart, change the lock type to RESTART + * to avoid granting the lock later. We wait until the + * RESTART waiter moves to the head of waiter list to + * prevent the requester from spinning performing repeated + * restarts, but we don't grant the lock. + */ + if (grant == LockGrantType.WAIT_RESTART) { + newLock.setLockType(LockType.RESTART); + } + + addWaiterToEndOfList(newLock, mb, lockTableIndex); + } + } + } + + /* Set 'success' later. */ + return new LockAttemptResult(this, grant, false); + } + + /** + * Releases a lock and moves the next waiter(s) to the owners. + * @return + * null if we were not the owner, + * a non-empty set if owners should be notified after releasing, + * an empty set if no notification is required. + */ + public Set release(Locker locker, + MemoryBudget mb, + int lockTableIndex) { + + LockInfo removedLock = flushOwner(locker, mb, lockTableIndex); + if (removedLock == null) { + /* Not owner. */ + return null; + } + + Set lockersToNotify = Collections.emptySet(); + + if (nWaiters() == 0) { + /* No more waiters, so no one to notify. */ + return lockersToNotify; + } + + /* + * Move the next set of waiters to the owners set. Iterate through the + * firstWaiter field, then the waiterList. + */ + LockInfo waiter = null; + Iterator iter = null; + boolean isFirstWaiter = false; + + if (waiterList != null) { + iter = waiterList.iterator(); + } + + if (firstWaiter != null) { + waiter = firstWaiter; + isFirstWaiter = true; + } else if ((iter != null) && (iter.hasNext())) { + waiter = iter.next(); + } + + while (waiter != null) { + /* Make the waiter an owner if the lock can be acquired. */ + LockType waiterType = waiter.getLockType(); + Locker waiterLocker = waiter.getLocker(); + LockGrantType grant; + if (waiterType == LockType.RESTART) { + /* Special case for restarts: see rangeInsertConflict. */ + grant = rangeInsertConflict(waiterLocker) ? + LockGrantType.WAIT_NEW : LockGrantType.NEW; + } else { + /* Try locking. */ + grant = tryLock(waiter, true, mb, lockTableIndex); + } + /* Check if granted. */ + if (grant == LockGrantType.NEW || + grant == LockGrantType.EXISTING || + grant == LockGrantType.PROMOTION) { + /* Remove it from the waiters list. */ + if (isFirstWaiter) { + firstWaiter = null; + } else { + iter.remove(); + } + if (lockersToNotify == Collections.EMPTY_SET) { + lockersToNotify = new HashSet(); + } + lockersToNotify.add(waiterLocker); + mb.updateLockMemoryUsage + (REMOVE_LOCKINFO_OVERHEAD, lockTableIndex); + } else { + assert grant == LockGrantType.WAIT_NEW || + grant == LockGrantType.WAIT_PROMOTION || + grant == LockGrantType.WAIT_RESTART; + /* Stop on first waiter that cannot be an owner. */ + break; + } + + /* Move to the next waiter, if it's in the list. */ + if ((iter != null) && (iter.hasNext())) { + waiter = iter.next(); + isFirstWaiter = false; + } else { + waiter = null; + } + } + return lockersToNotify; + } + + public void stealLock(Locker locker, MemoryBudget mb, int lockTableIndex) { + + if (firstOwner != null) { + Locker thisLocker = firstOwner.getLocker(); + if (thisLocker != locker && + thisLocker.getPreemptable()) { + thisLocker.setPreempted(); + firstOwner = null; + mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, + lockTableIndex); + } + } + + if (ownerSet != null) { + Iterator iter = ownerSet.iterator(); + while (iter.hasNext()) { + LockInfo lockInfo = iter.next(); + Locker thisLocker = lockInfo.getLocker(); + if (thisLocker != locker && + thisLocker.getPreemptable()) { + thisLocker.setPreempted(); + iter.remove(); + mb.updateLockMemoryUsage(REMOVE_LOCKINFO_OVERHEAD, + lockTableIndex); + } + } + } + } + + /** + * Called from lock() to try locking a new request, and from release() to + * try locking a waiting request. + * + * @param newLock is the lock that is requested. + * + * @param firstWaiterInLine determines whether to grant the lock when a + * NEW lock can be granted, but other non-conflicting owners exist; for + * example, when a new READ lock is requested but READ locks are held by + * other owners. This parameter should be true if the requestor is the + * first waiter in line (or if there are no waiters), and false otherwise. + * + * @param mb is the current memory budget. + * + * @return LockGrantType.EXISTING, NEW, PROMOTION, WAIT_RESTART, WAIT_NEW + * or WAIT_PROMOTION. + */ + private LockGrantType tryLock(LockInfo newLock, + boolean firstWaiterInLine, + MemoryBudget mb, + int lockTableIndex) { + + /* If no one owns this right now, just grab it. */ + if (nOwners() == 0) { + addOwner(newLock, mb, lockTableIndex); + return LockGrantType.NEW; + } + + Locker locker = newLock.getLocker(); + LockType requestType = newLock.getLockType(); + LockUpgrade upgrade = null; + LockInfo lockToUpgrade = null; + boolean ownerExists = false; + boolean ownerConflicts = false; + + /* + * Iterate through the current owners. See if there is a current owner + * who has to be upgraded from read to write. Also track whether there + * is a conflict with another owner. + */ + LockInfo owner = null; + Iterator iter = null; + + if (ownerSet != null) { + iter = ownerSet.iterator(); + } + + if (firstOwner != null) { + owner = firstOwner; + } else if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } + + while (owner != null) { + Locker ownerLocker = owner.getLocker(); + LockType ownerType = owner.getLockType(); + if (locker == ownerLocker) { + + /* + * Requestor currently holds this lock: check for upgrades. + * If no type change is needed, return EXISTING now to avoid + * iterating further; otherwise, we need to check for conflicts + * before granting the upgrade. + */ + assert (upgrade == null); // An owner should appear only once + upgrade = ownerType.getUpgrade(requestType); + if (upgrade.getUpgrade() == null) { + return LockGrantType.EXISTING; + } else { + lockToUpgrade = owner; + } + } else { + + /* + * Requestor does not hold this lock: check for conflicts. + * If the owner shares locks with the requestor, ignore it; + * otherwise, if a restart conflict exists, return it now; + * otherwise, save the conflict information. + */ + if (!locker.sharesLocksWith(ownerLocker) && + !ownerLocker.sharesLocksWith(locker)) { + LockConflict conflict = ownerType.getConflict(requestType); + if (conflict.getRestart()) { + return LockGrantType.WAIT_RESTART; + } else { + if (!conflict.getAllowed()) { + ownerConflicts = true; + } + ownerExists = true; + } + } + } + + /* Move on to the next owner. */ + if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } else { + owner = null; + } + } + + /* Now handle the upgrade or conflict as appropriate. */ + if (upgrade != null) { + /* The requestor holds this lock. */ + LockType upgradeType = upgrade.getUpgrade(); + assert upgradeType != null; + if (!ownerConflicts) { + /* No conflict: grant the upgrade. */ + lockToUpgrade.setLockType(upgradeType); + return upgrade.getPromotion() ? + LockGrantType.PROMOTION : LockGrantType.EXISTING; + } else { + /* Upgrade cannot be granted at this time. */ + return LockGrantType.WAIT_PROMOTION; + } + } else { + /* The requestor doesn't hold this lock. */ + if (!ownerConflicts && (!ownerExists || firstWaiterInLine)) { + /* No conflict: grant the lock. */ + addOwner(newLock, mb, lockTableIndex); + return LockGrantType.NEW; + } else { + /* Lock cannot be granted at this time. */ + return LockGrantType.WAIT_NEW; + } + } + } + + /** + * Called from release() when a RESTART request is waiting to determine if + * any RANGE_INSERT owners exist. We can't call tryLock for a RESTART + * lock because it must never be granted. + */ + private boolean rangeInsertConflict(Locker waiterLocker) { + + LockInfo owner = null; + Iterator iter = null; + + if (ownerSet != null) { + iter = ownerSet.iterator(); + } + + if (firstOwner != null) { + owner = firstOwner; + } else if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } + + while (owner != null) { + Locker ownerLocker = owner.getLocker(); + if (ownerLocker != waiterLocker && + !ownerLocker.sharesLocksWith(waiterLocker) && + owner.getLockType() == LockType.RANGE_INSERT) { + return true; + } + + /* Move on to the next owner. */ + if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } else { + owner = null; + } + } + + return false; + } + + /** + * Downgrade a write lock to a read lock. + */ + public void demote(Locker locker) { + LockInfo owner = getOwnerLockInfo(locker); + if (owner != null) { + LockType type = owner.getLockType(); + if (type.isWriteLock()) { + owner.setLockType((type == LockType.RANGE_WRITE) ? + LockType.RANGE_READ : LockType.READ); + } + } + } + + /** + * Return the locker that has a write ownership on this lock. If no + * write owner exists, return null. + */ + public Locker getWriteOwnerLocker() { + + LockInfo owner = null; + Iterator iter = null; + + if (ownerSet != null) { + iter = ownerSet.iterator(); + } + + if (firstOwner != null) { + owner = firstOwner; + } else if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } + + while (owner != null) { + /* Return locker if it owns a write lock. */ + if (owner.getLockType().isWriteLock()) { + return owner.getLocker(); + } + + /* Move on to the next owner. */ + if ((iter != null) && (iter.hasNext())) { + owner = iter.next(); + } else { + owner = null; + } + } + + return null; + } + + /** + * Debugging aid, validation before a lock request. + */ + private boolean validateRequest(Locker locker) { + if (firstWaiter != null) { + if (firstWaiter.getLocker() == locker) { + assert false : "locker " + locker + + " is already on waiters list."; + } + } + + if (waiterList != null) { + Iterator iter = waiterList.iterator(); + while (iter.hasNext()) { + LockInfo o = iter.next(); + if (o.getLocker() == locker) { + assert false : "locker " + locker + + " is already on waiters list."; + } + } + } + return true; + } + + public boolean isThin() { + return false; + } + + /** + * Debug dumper. + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(" LockAddr:").append(System.identityHashCode(this)); + sb.append(" Owners:"); + if (nOwners() == 0) { + sb.append(" (none)"); + } else { + if (firstOwner != null) { + sb.append(firstOwner); + } + + if (ownerSet != null) { + Iterator iter = ownerSet.iterator(); + while (iter.hasNext()) { + LockInfo info = iter.next(); + sb.append(info); + } + } + } + + sb.append(" Waiters:"); + if (nWaiters() == 0) { + sb.append(" (none)"); + } else { + sb.append(getWaitersListClone()); + } + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/txn/LockInfo.java b/src/com/sleepycat/je/txn/LockInfo.java new file mode 100644 index 0000000..9e14ae3 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockInfo.java @@ -0,0 +1,141 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Collections; +import java.util.Map; +import java.util.WeakHashMap; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * LockInfo is a class that embodies information about a lock instance. The + * holding thread and the locktype are all contained in the object. + * + * This class is public for unit tests. + */ +public class LockInfo implements Cloneable { + protected Locker locker; + protected LockType lockType; + + private static boolean deadlockStackTrace = false; + private static Map traceExceptionMap = + Collections.synchronizedMap(new WeakHashMap()); + @SuppressWarnings("serial") + private static class StackTraceAtLockTime extends Exception {} + + /** + * Called when the je.txn.deadlockStackTrace property is changed. + */ + static void setDeadlockStackTrace(boolean enable) { + deadlockStackTrace = enable; + } + + /** + * For unit testing only. + */ + public static boolean getDeadlockStackTrace() { + return deadlockStackTrace; + } + + /** + * Construct a new LockInfo. public for Sizeof program. + */ + public LockInfo(Locker locker, LockType lockType) { + this.locker = locker; + this.lockType = lockType; + + if (deadlockStackTrace) { + traceExceptionMap.put(this, new StackTraceAtLockTime()); + } + } + + /** + * Clone from given LockInfo. Use this constructor when copying a LockInfo + * and its identity should be copied (e.g., when mutating a thin lock to a + * thick lock) to ensure that debugging info is retained. + */ + LockInfo(LockInfo other) { + this.locker = other.locker; + this.lockType = other.lockType; + + if (deadlockStackTrace) { + traceExceptionMap.put(this, traceExceptionMap.get(other)); + } + } + + /** + * Change this lockInfo over to the prescribed locker. + */ + void setLocker(Locker locker) { + this.locker = locker; + } + + /** + * @return The transaction associated with this Lock. + */ + public Locker getLocker() { + return locker; + } + + /** + * @return The LockType associated with this Lock. + */ + void setLockType(LockType lockType) { + this.lockType = lockType; + } + + /** + * @return The LockType associated with this Lock. + */ + LockType getLockType() { + return lockType; + } + + @Override + public Object clone() + throws CloneNotSupportedException { + + return super.clone(); + } + + /** + * Debugging + */ + public void dump() { + System.out.println(this); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(500); + + buf.append(""); + + if (deadlockStackTrace) { + Exception traceException = traceExceptionMap.get(this); + if (traceException != null) { + buf.append(" lock taken at: "); + buf.append(LoggerUtils.getStackTrace(traceException)); + } + } + + return buf.toString(); + } +} diff --git a/src/com/sleepycat/je/txn/LockManager.java b/src/com/sleepycat/je/txn/LockManager.java new file mode 100644 index 0000000..4d4c318 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockManager.java @@ -0,0 +1,2228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.txn.LockStatDefinition.GROUP_DESC; +import static com.sleepycat.je.txn.LockStatDefinition.GROUP_NAME; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_OWNERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_REQUESTS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_TOTAL; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITERS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DeadlockException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockStats; +import com.sleepycat.je.LockTimeoutException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.TransactionTimeoutException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.dbi.RangeRestartException; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TinyHashSet; + +/** + * LockManager manages locks. + * + * Note that locks are counted as taking up part of the JE cache. + */ +public abstract class LockManager implements EnvConfigObserver { + + /* + * The total memory cost for a lock is the Lock object, plus its entry and + * key in the lock hash table. + * + * The addition and removal of Lock objects, and the corresponding cost of + * their hashmap entry and key are tracked through the LockManager. + */ + private static final long TOTAL_LOCKIMPL_OVERHEAD = + MemoryBudget.LOCKIMPL_OVERHEAD + + MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.LONG_OVERHEAD; + + static final long TOTAL_THINLOCKIMPL_OVERHEAD = + MemoryBudget.THINLOCKIMPL_OVERHEAD + + MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.LONG_OVERHEAD; + + private static final long REMOVE_TOTAL_LOCKIMPL_OVERHEAD = + 0 - TOTAL_LOCKIMPL_OVERHEAD; + + private static final long REMOVE_TOTAL_THINLOCKIMPL_OVERHEAD = + 0 - TOTAL_THINLOCKIMPL_OVERHEAD; + + private static final long THINLOCK_MUTATE_OVERHEAD = + MemoryBudget.LOCKIMPL_OVERHEAD - + MemoryBudget.THINLOCKIMPL_OVERHEAD + + MemoryBudget.LOCKINFO_OVERHEAD; + + private static final List EMPTY_THREAD_LOCKERS = + Collections.emptyList(); + + /* Hook called after a lock is requested. */ + public static TestHook afterLockHook; + + /* + * Hook called after waitingFor is set and before doing deadlock + * detection. This aims to inject some wait-action for the current + * thread. + */ + static TestHook simulatePartialDeadlockHook; + + final int nLockTables; + final Latch[] lockTableLatches; + private final Map[] lockTables; // keyed by LSN + private final EnvironmentImpl envImpl; + private final MemoryBudget memoryBudget; + + private final StatGroup stats; + private final LongStat nRequests; /* number of time a request was made. */ + private final LongStat nWaits; /* number of time a request blocked. */ + + private static RangeRestartException rangeRestartException = + new RangeRestartException(); + private static boolean lockTableDump = false; + + /** + * Maps a thread to a set of ThreadLockers. Currently this map is only + * maintained (non-null) in a replicated environment because it is only + * needed for determining when to throw LockPreemptedException. + * + * Access to the map need not be synchronized because it is a + * ConcurrentHashMap. Access to the TinyHashSet stored for each thread + * need not be synchronized, since it is only accessed by a single thread. + * + * A TinyHashSet is used because typically only a single ThreadLocker per + * thread will be open at one time. + * + * @see ThreadLocker#checkPreempted + * [#16513] + */ + private final Map> threadLockers; + + /* + * @SuppressWarnings is used to stifle a type safety complaint about the + * assignment of lockTables = new Map[nLockTables]. There's no way to + * specify the type of the array. + */ + @SuppressWarnings("unchecked") + public LockManager(final EnvironmentImpl envImpl) { + + final DbConfigManager configMgr = envImpl.getConfigManager(); + nLockTables = configMgr.getInt(EnvironmentParams.N_LOCK_TABLES); + lockTables = new Map[nLockTables]; + lockTableLatches = new Latch[nLockTables]; + for (int i = 0; i < nLockTables; i++) { + lockTables[i] = new HashMap(); + lockTableLatches[i] = LatchFactory.createExclusiveLatch( + envImpl, "Lock Table " + i, true /*collectStats*/); + } + this.envImpl = envImpl; + memoryBudget = envImpl.getMemoryBudget(); + + stats = new StatGroup(GROUP_NAME, GROUP_DESC); + nRequests = new LongStat(stats, LOCK_REQUESTS); + nWaits = new LongStat(stats, LOCK_WAITS); + + /* Initialize mutable properties and register for notifications. */ + envConfigUpdate(configMgr, null); + envImpl.addConfigObserver(this); + + if (envImpl.isReplicated()) { + threadLockers = + new ConcurrentHashMap>(); + } else { + threadLockers = null; + } + } + + /** + * Process notifications of mutable property changes. + */ + public void envConfigUpdate(final DbConfigManager configMgr, + final EnvironmentMutableConfig ignore) { + + LockInfo.setDeadlockStackTrace(configMgr.getBoolean + (EnvironmentParams.TXN_DEADLOCK_STACK_TRACE)); + + setLockTableDump(configMgr.getBoolean + (EnvironmentParams.TXN_DUMPLOCKS)); + } + + /** + * Called when the je.txn.dumpLocks property is changed. + */ + private static void setLockTableDump(boolean enable) { + lockTableDump = enable; + } + + int getLockTableIndex(Long lsn) { + return (((int) lsn.longValue()) & 0x7fffffff) % nLockTables; + } + + int getLockTableIndex(long lsn) { + return (((int) lsn) & 0x7fffffff) % nLockTables; + } + + private static long timeRemain(final long timeout, final long startTime) { + return (timeout - (System.currentTimeMillis() - startTime)); + } + + /** + * Attempt to acquire a lock of 'type' on 'lsn'. + * + * @param lsn The LSN to lock. + * + * @param locker The Locker to lock this on behalf of. + * + * @param type The lock type requested. + * + * @param timeout milliseconds to time out after if lock couldn't be + * obtained. 0 means block indefinitely. Not used if nonBlockingRequest + * is true. + * + * @param nonBlockingRequest if true, means don't block if lock can't be + * acquired, and ignore the timeout parameter. + * + * @param jumpAheadOfWaiters grant the lock before other waiters, if any. + * + * @return a LockGrantType indicating whether the request was fulfilled or + * not. LockGrantType.NEW means the lock grant was fulfilled and the + * caller did not previously hold the lock. PROMOTION means the lock was + * granted and it was a promotion from READ to WRITE. EXISTING means the + * lock was already granted (not a promotion). DENIED means the lock was + * not granted when nonBlockingRequest is true. + * + * @throws LockConflictException if lock could not be acquired. If + * nonBlockingRequest is true, a LockConflictException is never thrown. + * Otherwise, if the lock acquisition would result in a deadlock, + * DeadlockException is thrown. Otherwise, if the lock timeout interval + * elapses and no deadlock is detected, LockTimeoutException is thrown. + */ + public LockGrantType lock(final long lsn, + final Locker locker, + final LockType type, + long timeout, + final boolean nonBlockingRequest, + final boolean jumpAheadOfWaiters, + final DatabaseImpl database) + throws LockConflictException, DatabaseException { + + assert timeout >= 0; + + /* No lock needed for dirty-read, return as soon as possible. */ + if (type == LockType.NONE) { + return LockGrantType.NONE_NEEDED; + } + + final long startTime; + LockAttemptResult result; + LockGrantType grant; + + synchronized (locker) { + + /* Attempt to lock without any initial wait. */ + result = attemptLock( + lsn, locker, type, nonBlockingRequest, jumpAheadOfWaiters); + + grant = result.lockGrant; + + /* If we got the lock or a non-blocking lock was denied, return. */ + if (result.success || grant == LockGrantType.DENIED) { + assert nonBlockingRequest || result.success; + if (afterLockHook != null) { + afterLockHook.doHook(); + } + return grant; + } + + if (LatchSupport.TRACK_LATCHES) { + if (!nonBlockingRequest) { + LatchSupport.expectBtreeLatchesHeld(0); + } + } + + /* + * We must have gotten WAIT_* from the lock request. We know that + * this is a blocking request, because if it wasn't, Lock.lock + * would have returned DENIED. Go wait! + */ + assert !nonBlockingRequest; + + locker.setWaitingFor(lsn, type); + + /* currentTimeMillis is expensive. Only call it if we will wait. */ + startTime = System.currentTimeMillis(); + + /* + * If there is a txn timeout, and the txn time remaining is less + * than the lock timeout, then use the txn time remaining instead. + */ + final long txnTimeout = locker.getTxnTimeout(); + if (txnTimeout > 0) { + final long txnTimeRemaining = + timeRemain(txnTimeout, locker.getTxnStartMillis()); + + if (timeout == 0 || txnTimeRemaining < timeout) { + timeout = Math.max(1, txnTimeRemaining); + } + } + + /* + * After the deadlock detection delay, if this locker is the owner, + * we're done. + */ + if (performDeadlockDetectionDelay( + lsn, locker, type, grant, timeout, startTime)) { + return grant; + } + } + + DeadlockChecker lastDC = null; + + /* + * Repeatedly try waiting for the lock. If a deadlock is detected, + * notify the victim. When this locker becomes the owner, we're done. + * + * If there is a deadlock and this locker is the victim, waitForLock + * will throw DeadlockException. If there is a deadlock and this locker + * is not the victim, we call notifyVictim. If the deadlock cannot be + * broken by notifying the victim, notifyVictim will throw + * DeadlockException. + * + * If a timeout occurs and a deadlock was detected, DeadlockException + * will be thrown by waitForLock or notifyVictim. If a timeout occurs + * and no deadlock was detected, a timeout exception will be thrown by + * waitForLock. + */ + while (true) { + final WaitForLockResult waitResult; + + synchronized (locker) { + waitResult = waitForLock( + result, lsn, locker, type, lastDC, timeout, startTime, + database); + } + + result = waitResult.getResult(); + grant = result.lockGrant; + lastDC = waitResult.getDeadLockChecker(); + final Locker victim = waitResult.getVictim(); + + if (victim == null) { + /* The locker owns the lock and no deadlock was detected. */ + return grant; + } + + /* + * A deadlock is detected and this locker is not the victim. + * Notify the victim. + */ + final Pair nvResult = notifyVictim( + victim, locker, lsn, type, lastDC, timeout, startTime, + database); + + if (nvResult.first()) { + + /* + * The deadlock was broken and this locker is now the owner. + * finishLock will clear locker.waitingFor, which must be + * protected by the locker mutex. + */ + synchronized (locker) { + finishLock(locker, lsn, type, grant); + } + return grant; + } + + /* + * A deadlock is no longer present, or a deadlock with a different + * victim was detected. And the timeout has not expired. Retry. + */ + lastDC = nvResult.second(); + } + } + + /** + * Waits for a lock that was previously requested. Handles deadlocks and + * timeouts. However, does not notify the victim when a deadlock is + * detected and the current locker is not the victim; in that case the + * caller must notify the victim. This method cannot notify the victim + * because it is synchronized on the current locker, and we can synchronize + * on only one locker at a time. + * + * This method must be called while synchronized on the locker, for several + * reasons: + * 1. The locker will be modified if the lock is acquired by stealLock + * and when finishLock is called. + * 2. It must be synchronized when performing the lock delay. + * + * @return the lock result and a null victim if locking was successful, or + * a non-null victim if a deadlock is detected and the current locker is + * not the victim. + * + * @throws DeadlockException when a deadlock is detected and the current + * locker is the victim. + * + * @throws LockTimeoutException when the timeout elapses and no deadlock is + * detected. + * + * @throws TransactionTimeoutException when the transaction time limit was + * exceeded. + */ + private WaitForLockResult waitForLock( + LockAttemptResult result, + final Long lsn, + final Locker locker, + final LockType type, + DeadlockChecker lastDC, + final long timeout, + final long startTime, + final DatabaseImpl database) { + + final boolean isImportunate = locker.getImportunate(); + final boolean waitForever = (timeout == 0); + Locker victim = null; + + if (simulatePartialDeadlockHook != null) { + simulatePartialDeadlockHook.doHook(); + } + + /* + * There are two reasons for the loop below. + * + * 1. When another thread detects a deadlock and notifies this thread, + * it will wakeup before the timeout interval has expired. We must loop + * again to perform deadlock detection. Normally, if the deadlock + * detected by the other thread is still present, this locker will be + * selected as the victim and we will throw DeadlockException below. + * + * 2. Even when another JE locking thread does not notify this thread, + * this thread may wake up before the timeout has elapsed due to + * "spurious wakeups" -- see Object.wait(). + */ + while (true) { + + /* + * Perform deadlock detection before waiting. + * + * When the locker is a ReplayTxn (isImportunate is true), we + * must steal the lock even if when we encounter a deadlock. + * + * There are two reasons that we do not check for deadlocks + * here for a ReplayTxn: + * + * 1. If we were to detect a deadlock here, we could not + * control which locker will be chosen as the victim. The + * ReplayTxn might be chosen as the victim and then it would be + * aborted by throwing DeadlockException. ReplayTxns may not be + * aborted and a LockConflictException should not be thrown. + * + * 2. If the ReplayTxn deadlocks with other txns, the other + * txns will detect deadlock. + * + If another txn is chosen as the victim, then ReplayTxn + * will acquire the lock. + * + If ReplayTxn is chosen as the victim, ReplayTxn will be + * notified and will wake up and steal the lock. This is + * efficient, since a long wait will not be needed. + */ + if (envImpl.getDeadlockDetection() && !isImportunate) { + + /* Do deadlock detect */ + final DeadlockResult dlr = checkAndHandleDeadlock( + locker, lsn, type, timeout, database); + + if (dlr.isOwner) { + break; + } + + if (dlr.trueDeadlock) { + victim = dlr.victim; + lastDC = dlr.dc; + break; + } + + /* + * Else do nothing. We did not detect a true deadlock and + * this locker does not own the lock, so wait again with the + * time remaining. + */ + } else { + /* + * Check ownership before waiting, since we release the locker + * mutex between calling attemptLock and waitForLock. This + * check prevents a missed notification: + * locker1 locker2 + * wait for a lock + * release the lock and notify locker1 + * locker1.wait() + */ + if (isOwner(lsn, locker, type)) { + break; + } + } + + try { + if (waitForever) { + locker.wait(0); + } else { + locker.wait(Math.max(1, timeRemain(timeout, startTime))); + } + } catch (InterruptedException IE) { + throw new ThreadInterruptedException(envImpl, IE); + } + + final boolean lockerTimedOut = locker.isTimedOut(); + final long now = System.currentTimeMillis(); + + final boolean thisLockTimedOut = + (!waitForever && (now - startTime) >= timeout); + + final boolean isRestart = + (result.lockGrant == LockGrantType.WAIT_RESTART); + + /* + * Try to get accurate owners and waiters of requested + * lock when deciding to possibly throw LockTimeoutException + */ + final Set owners = new HashSet<>(); + final List waiters = new ArrayList<>(); + + final boolean getOwnersAndWaiters = + (lockerTimedOut || thisLockTimedOut) && !isImportunate; + + /* + * Only flush the waiters if isRestart && !isImportunate. If + * lockerTimedOut or thisLockTimedOut, we will flush the waiters + * before throwing an exception further below. + */ + final boolean flushFromWaiters = isRestart && !isImportunate; + + if (validateOwnership( + lsn, locker, type, getOwnersAndWaiters, flushFromWaiters, + owners, waiters)) { + + break; + } + + if (isImportunate) { + result = stealLock(lsn, locker, type); + if (result.success) { + break; + } + /* Lock holder is non-preemptable, wait again. */ + continue; + } + + /* After a restart conflict the lock will not be held. */ + if (isRestart) { + throw rangeRestartException; + } + + if (!thisLockTimedOut && !lockerTimedOut) { + continue; + } + + final DeadlockResult dlr = + checkAndHandleDeadlock(locker, lsn, type, timeout, database); + + if (dlr.isOwner) { + break; + } + + if (dlr.trueDeadlock) { + lastDC = dlr.dc; + } + + /* Flush lock from waiters before throwing exception. */ + if (validateOwnership( + lsn, locker, type, + false /*getOwnersAndWaiters*/, + true /*flushFromWaiters*/, null, null)) { + + break; + } + + if (lastDC != null) { + throw makeDeadlockException( + lastDC, locker, timeout, false /*isVictim*/, database); + } + + /* + * When both types of timeouts occur, throw TransactionTimeout. + * Otherwise TransactionTimeout may never be thrown, because when + * the txn times out, the lock probably also times out. + */ + if (lockerTimedOut) { + throw makeTimeoutException( + false /*isLockNotTxnTimeout*/, locker, lsn, type, + result.lockGrant, result.useLock, + locker.getTxnTimeout(), locker.getTxnStartMillis(), + now, database, owners, waiters); + } else { + throw makeTimeoutException( + true /*isLockNotTxnTimeout*/, locker, lsn, type, + result.lockGrant, result.useLock, + timeout, startTime, + now, database, owners, waiters); + } + } + + if (victim == null) { + assert isOwner(lsn, locker, type); + finishLock(locker, lsn, type, result.lockGrant); + } + + return new WaitForLockResult(victim, lastDC, result); + } + + private void finishLock( + final Locker locker, + final Long nid, + final LockType type, + final LockGrantType grant) { + + locker.clearWaitingFor(); + + assert EnvironmentImpl.maybeForceYield(); + + locker.addLock(nid, type, grant); + + if (afterLockHook != null) { + afterLockHook.doHook(); + } + } + + /** + * Returns the Lockers that own a lock on the given LSN. Note that when + * this method returns, there is nothing to prevent these lockers from + * releasing the lock or being closed. + */ + public abstract Set getOwners(Long lsn); + + Set getOwnersInternal(final Long lsn, final int lockTableIndex) { + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + if (useLock == null) { + return null; + } + return useLock.getOwnersClone(); + } + + /** + * Returns the Lockers that wait on a lock on the given LSN. + */ + public abstract List getWaiters(Long lsn); + + List getWaitersInternal(final Long lsn, + final int lockTableIndex) { + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + if (useLock == null) { + return null; + } + return useLock.getWaitersListClone(); + } + + /** + * Returns the LockType if the given locker owns a lock on the given node, + * or null if the lock is not owned. + */ + public abstract LockType getOwnedLockType(Long lsn, Locker locker); + + LockType getOwnedLockTypeInternal(final Long lsn, + final Locker locker, + final int lockTableIndex) { + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + if (useLock == null) { + return null; + } + return useLock.getOwnedLockType(locker); + } + + public abstract boolean isLockUncontended(Long lsn); + + boolean isLockUncontendedInternal(final Long lsn, + final int lockTableIndex) { + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + if (useLock == null) { + return true; + } + return useLock.nWaiters() == 0 && + useLock.nOwners() == 0; + } + + public abstract boolean ownsOrSharesLock(Locker locker, Long lsn); + + boolean ownsOrSharesLockInternal(final Locker locker, + final Long lsn, + final int lockTableIndex) { + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + if (useLock == null) { + return false; + } + for (final LockInfo info : getOwnersInternal(lsn, lockTableIndex)) { + final Locker owner = info.getLocker(); + if (owner == locker || + owner.sharesLocksWith(locker) || + locker.sharesLocksWith(owner)) { + return true; + } + } + return false; + } + + abstract Lock lookupLock(Long lsn) + throws DatabaseException; + + Lock lookupLockInternal(final Long lsn, final int lockTableIndex) { + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + return lockTable.get(lsn); + } + + abstract LockAttemptResult attemptLock(Long lsn, + Locker locker, + LockType type, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters) + throws DatabaseException; + + LockAttemptResult attemptLockInternal(final Long lsn, + final Locker locker, + final LockType type, + final boolean nonBlockingRequest, + final boolean jumpAheadOfWaiters, + final int lockTableIndex) + throws DatabaseException { + + nRequests.increment(); + + /* Get the target lock. */ + final Map lockTable = lockTables[lockTableIndex]; + Lock useLock = lockTable.get(lsn); + if (useLock == null) { + useLock = new ThinLockImpl(); + lockTable.put(lsn, useLock); + memoryBudget.updateLockMemoryUsage( + TOTAL_THINLOCKIMPL_OVERHEAD, lockTableIndex); + } + + /* + * Attempt to lock. Possible return values are NEW, PROMOTION, DENIED, + * EXISTING, WAIT_NEW, WAIT_PROMOTION, WAIT_RESTART. + */ + final LockAttemptResult lar = useLock.lock + (type, locker, nonBlockingRequest, jumpAheadOfWaiters, + memoryBudget, lockTableIndex); + if (lar.useLock != useLock) { + /* The lock mutated from ThinLockImpl to LockImpl. */ + useLock = lar.useLock; + lockTable.put(lsn, useLock); + /* We still have the overhead of the hashtable (locktable). */ + memoryBudget.updateLockMemoryUsage + (THINLOCK_MUTATE_OVERHEAD, lockTableIndex); + } + final LockGrantType lockGrant = lar.lockGrant; + boolean success = false; + + /* Was the attempt successful? */ + if ((lockGrant == LockGrantType.NEW) || + (lockGrant == LockGrantType.PROMOTION)) { + locker.addLock(lsn, type, lockGrant); + success = true; + } else if (lockGrant == LockGrantType.EXISTING) { + success = true; + } else if (lockGrant == LockGrantType.DENIED) { + /* Locker.lock will throw LockNotAvailableException. */ + } else { + nWaits.increment(); + } + return new LockAttemptResult(useLock, lockGrant, success); + } + + /** + * Performs the deadlock detection delay, if needed. + * + * This method must be called while synchronized on the locker because it + * calls locker.wait and finishLock. + * + * @return true if the locker is the owner after the delay. + */ + private boolean performDeadlockDetectionDelay( + final Long lsn, + final Locker locker, + final LockType type, + final LockGrantType grant, + final long timeout, + final long startTime) { + + /* + * If dd is enabled, and there is a dd delay, and the locker is not + * importunate, perform the delay here. See waitForLock for a + * discussion of importunate lockers and dd. + */ + long ddDelay = envImpl.getDeadlockDetectionDelay(); + + if (!envImpl.getDeadlockDetection() || + locker.getImportunate() || + ddDelay == 0) { + + return false; + } + + final boolean waitForever = (timeout == 0); + + if (!waitForever) { + ddDelay = Math.min( + ddDelay, timeRemain(timeout, startTime)); + } + + try { + locker.wait(Math.max(1, ddDelay)); + } catch (InterruptedException IE) { + throw new ThreadInterruptedException(envImpl, IE); + } + + /* If the locker now owns the lock, we're done. */ + if (isOwner(lsn, locker, type)) { + finishLock(locker, lsn, type, grant); + return true; + } + + /* Otherwise, we'll do deadlock detection in waitForLock. */ + return false; + } + + /* + * @return DeadlockHandleResult dlhr, where + * dlhr.isOwner is true if the current locker owns the lock; + * dlhr.trueDeadlock is true if a true deadlock is detected and + * the current locker does not own the lock; + * dlhr.victim is not null if a victim is chosen and the victim + * is not the current locker; + * dlhr.dc will never be null, for simplicity, but it is useful only + * when a true deadlock is detected. + * @throws DeadlockException if the victim is the current locker + */ + private DeadlockResult checkAndHandleDeadlock( + final Locker locker, + final Long lsn, + final LockType type, + final long timeout, + DatabaseImpl database) { + + boolean isOwner = false; + boolean hasTrueDeadlock = false; + Locker targetedVictim = null; + DeadlockChecker dc; + for (int round = 0;; round++) { + dc = new DeadlockChecker(locker, lsn, type); + + if (dc.hasCycle()){ + if (dc.hasTrueDeadlock()) { + + targetedVictim = dc.chooseTargetedLocker(); + + if (targetedVictim != locker) { + /* + * There is a window where after we chose a victim, + * another thread notifies the same victim and the + * deadlock is handled. + * + * So if now the current locker owns the lock, we do + * no longer need to notify the victim. + * + * If the current locker does not own the lock, we will + * return the victim. The outer caller will notify the + * victim. Even if, when the outer caller notifies the + * victim, and the current locker owns the lock, this + * does not impact correctness. We just do one more + * redundant step to notify the victim, which has been + * already notified. + */ + if (isOwner(lsn, locker, type)) { + isOwner = true; + targetedVictim = null; + break; + } + + hasTrueDeadlock = true; + break; + } + + /* + * The targeted victim is this locker, so we will throw + * DeadlockException. But first we must call + * validateOwnership() to flush the locker from waiter + * list. + * + * Normally, validateOwnership() will first check whether + * the current locker owns the lock. But here there is no + * possibility that the current locker can own the lock. + */ + if (validateOwnership( + lsn, locker, type, + false /*getOwnersAndWaiters*/, + true /*flushFromWaiters*/, null, null)) { + + isOwner = true; + targetedVictim = null; + break; + } + + throw makeDeadlockException( + dc, locker, timeout, true /*isVictim*/, database); + + } else { + if (isOwner(lsn, locker, type)) { + isOwner = true; + break; + } + + if (round >= 10) { + break; + } + } + } else { + if (isOwner(lsn, locker, type)) { + isOwner = true; + } + break; + } + } + + return new DeadlockResult( + isOwner, hasTrueDeadlock, targetedVictim, dc); + } + + /* + * Notify the targetedVictim to cause it to abort, and wait for the + * deadlock to be broken. + * + * @return {done, lastDC}. done is true if the deadlock has been broken and + * currentLocker is now the owner. done is false in 2 cases: + * 1. a deadlock is no longer present; + * 2. a deadlock with a different victim is detected; + * When done is false, the caller should retry. + * + * @throws DeadlockException if the original deadlock (or a deadlock with + * the same victim) is not broken, and the timeout is exceeded. + */ + private Pair notifyVictim( + final Locker targetedVictim, + final Locker currentLocker, + final Long lsn, + final LockType type, + DeadlockChecker lastDC, + final long timeout, + final long startTime, + DatabaseImpl database) { + + final boolean waitForever = (timeout == 0); + + while (true) { + /* + * Check for a timeout first, to guarantee that we do not "live + * lock" when deadlocks are repeatedly created and resolved in + * other threads. + */ + if (!waitForever && timeRemain(timeout, startTime) <= 0) { + + /* + * The original timeout was exceeded. Flush the current locker + * from the waiters list, and throw DeadlockException using the + * last known deadlock info. + */ + if (validateOwnership( + lsn, currentLocker, type, + false /*getOwnersAndWaiters*/, + true /*flushFromWaiters*/, null, null)) { + + /* The currentLocker unexpectedly became the owner. */ + return new Pair<>(true, lastDC); + } + + throw makeDeadlockException( + lastDC, currentLocker, timeout, false /*isVictim*/, + database); + } + + /* + * Notify the victim and sleep for 1ms to allow the victim to + * wakeup and abort. + */ + synchronized (targetedVictim) { + targetedVictim.notify(); + } + + try { + Thread.sleep(1); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + + /* If currentLocker is the owner, the deadlock was broken. */ + if (isOwner(lsn, currentLocker, type)) { + return new Pair<>(true, lastDC); + } + + final DeadlockChecker dc = + new DeadlockChecker(currentLocker, lsn, type); + + /* + * If the deadlock was broken, but currentLocker is not the owner, + * then let the caller retry. + */ + if (!(dc.hasCycle() && dc.hasTrueDeadlock())) { + return new Pair<>(false, lastDC); + } + + /* We found a true deadlock. */ + lastDC = dc; + + /* + * If the victim is different, then the original deadlock was + * broken but there is now a new deadlock. Let the caller handle + * the new deadlock. + */ + if (dc.chooseTargetedLocker() != targetedVictim) { + return new Pair<>(false, lastDC); + } + + /* + * The victim is the same, so for simplicity we assume it is the + * same deadlock. Retry. + */ + } + } + + private LockConflictException makeDeadlockException( + final DeadlockChecker dc, + final Locker locker, + final long timeout, + final boolean isVictim, + final DatabaseImpl database){ + + StringBuilder msg = new StringBuilder(); + msg.append("Deadlock was detected. "); + if (isVictim) { + msg.append("Locker: \"").append(locker); + msg.append("\" was chosen randomly as the victim.\n"); + } else { + msg.append("Unable to break deadlock using random victim "); + msg.append("selection within the timeout interval. "); + msg.append("Current locker: \"").append(locker); + msg.append("\" must be aborted.\n"); + } + + if (database != null) { + msg.append("DB: ").append(database.getDebugName()).append(". "); + } + + msg.append("Timeout: "); + if (timeout == 0) { + msg.append("none.\n"); + } else { + msg.append(timeout).append("ms.\n"); + } + + msg.append(dc); + + final LockConflictException ex = new DeadlockException( + locker, msg.toString()); + + ex.setOwnerTxnIds(getTxnIds(dc.getOwnersForRootLock())); + ex.setWaiterTxnIds(getTxnIds(dc.getWaitersForRootLock())); + ex.setTimeoutMillis(timeout); + + return ex; + } + + private LockConflictException makeTimeoutException( + final boolean isLockNotTxnTimeout, + final Locker locker, + final long lsn, + final LockType type, + final LockGrantType grantType, + final Lock useLock, + final long timeout, + final long start, + final long now, + final DatabaseImpl database, + final Set owners, + final List waiters) { + + /* + * getTimeoutInfo synchronizes on the lock table. The timeout exception + * must be created outside that synchronization block because its ctor + * invalidates the txn, sometimes synchronizing on the buddy locker. + * The order of mutex acquisition must always be 1) locker, 2) lock + * table. + */ + final TimeoutInfo info = getTimeoutInfo( + isLockNotTxnTimeout, locker, lsn, type, grantType, useLock, + timeout, start, now, database, owners, waiters); + + final LockConflictException ex = + isLockNotTxnTimeout ? + new LockTimeoutException(locker, info.message) : + new TransactionTimeoutException(locker, info.message); + + ex.setOwnerTxnIds(getTxnIds(info.owners)); + ex.setWaiterTxnIds(getTxnIds(info.waiters)); + ex.setTimeoutMillis(timeout); + + return ex; + } + + static class TimeoutInfo { + final String message; + final Set owners; + final List waiters; + + TimeoutInfo(final String message, + final Set owners, + final List waiters) { + this.message = message; + this.owners = owners; + this.waiters = waiters; + } + } + + /** + * Create a informative lock or txn timeout message. + */ + abstract TimeoutInfo getTimeoutInfo( + boolean isLockNotTxnTimeout, + Locker locker, + long lsn, + LockType type, + LockGrantType grantType, + Lock useLock, + long timeout, + long start, + long now, + DatabaseImpl database, + Set owners, + List waiters); + + /** + * Do the real work of creating an lock or txn timeout message. + */ + TimeoutInfo getTimeoutInfoInternal( + final boolean isLockNotTxnTimeout, + final Locker locker, + final long lsn, + final LockType type, + final LockGrantType grantType, + final Lock useLock, + final long timeout, + final long start, + final long now, + final DatabaseImpl database, + final Set owners, + final List waiters) { + + /* + * Because we're accessing parts of the lock, need to have protected + * access to the lock table because things can be changing out from + * underneath us. This is a big hammer to grab for so long while we + * traverse the graph, but it's only when we have a deadlock and we're + * creating a debugging message. + * + * The alternative would be to handle ConcurrentModificationExceptions + * and retry until none of them happen. + */ + if (lockTableDump) { + System.out.println("++++++++++ begin lock table dump ++++++++++"); + for (int i = 0; i < nLockTables; i++) { + boolean success = false; + for (int j = 0; j < 3; j++) { + try { + final StringBuilder sb = new StringBuilder(); + dumpToStringNoLatch(sb, i); + System.out.println(sb.toString()); + success = true; + break; // for j... + } catch (ConcurrentModificationException CME) { + // continue + } + } + if (!success) { + System.out.println("Couldn't dump locktable " + i); + } + } + System.out.println("++++++++++ end lock table dump ++++++++++"); + } + + final StringBuilder sb = new StringBuilder(); + sb.append(isLockNotTxnTimeout ? "Lock" : "Transaction"); + sb.append(" expired. Locker ").append(locker); + sb.append(": waited for lock"); + + if (database != null) { + sb.append(" on database=").append(database.getDebugName()); + } + sb.append(" LockAddr:").append(System.identityHashCode(useLock)); + sb.append(" LSN=").append(DbLsn.getNoFormatString(lsn)); + sb.append(" type=").append(type); + sb.append(" grant=").append(grantType); + sb.append(" timeoutMillis=").append(timeout); + sb.append(" startTime=").append(start); + sb.append(" endTime=").append(now); + sb.append("\nOwners: ").append(owners); + sb.append("\nWaiters: ").append(waiters).append("\n"); + return new TimeoutInfo(sb.toString(), owners, waiters); + } + + private long[] getTxnIds(final Collection c) { + final long[] ret = new long[c.size()]; + final Iterator iter = c.iterator(); + int i = 0; + while (iter.hasNext()) { + final LockInfo info = iter.next(); + ret[i++] = info.getLocker().getId(); + } + + return ret; + } + + /** + * Release a lock and possibly notify any waiters that they have been + * granted the lock. + * + * @param lsn The LSN of the lock to release. + * + * @return true if the lock is released successfully, false if + * the lock is not currently being held. + */ + public boolean release(final long lsn, final Locker locker) + throws DatabaseException { + + final Set newOwners = + releaseAndFindNotifyTargets(lsn, locker); + + if (newOwners == null) { + return false; + } + + if (newOwners.size() > 0) { + + /* + * There is a new set of owners and/or there are restart + * waiters that should be notified. + */ + for (Locker newOwner : newOwners) { + /* Use notifyAll to support multiple threads per txn. */ + synchronized (newOwner) { + newOwner.notifyAll(); + } + + assert EnvironmentImpl.maybeForceYield(); + } + } + + return true; + } + + /** + * Release the lock, and return the set of new owners to notify, if any. + * + * @return + * null if the lock does not exist or the given locker was not the owner, + * a non-empty set if owners should be notified after releasing, + * an empty set if no notification is required. + */ + abstract Set releaseAndFindNotifyTargets(long lsn, + Locker locker) + throws DatabaseException; + + /** + * Do the real work of releaseAndFindNotifyTargets + */ + Set releaseAndFindNotifyTargetsInternal(final long lsn, + final Locker locker, + final int lockTableIndex) { + final Map lockTable = lockTables[lockTableIndex]; + Lock lock = lockTable.get(lsn); + if (lock == null) { + lock = lockTable.get(lsn); + } + + if (lock == null) { + /* Lock doesn't exist. */ + return null; + } + + final Set newOwners = + lock.release(locker, memoryBudget, lockTableIndex); + + if (newOwners == null) { + /* Not owner. */ + return null; + } + + /* If it's not in use at all, remove it from the lock table. */ + if ((lock.nWaiters() == 0) && + (lock.nOwners() == 0)) { + + lockTable.remove(lsn); + + if (lock.isThin()) { + memoryBudget.updateLockMemoryUsage + (REMOVE_TOTAL_THINLOCKIMPL_OVERHEAD, lockTableIndex); + } else { + memoryBudget.updateLockMemoryUsage + (REMOVE_TOTAL_LOCKIMPL_OVERHEAD, lockTableIndex); + } + } else { + + /** + * In the deadlock detection process, in order to check that a + * cycle still exists, we need to detect lock release. + * + * During release, we will create a new lock object. During + * deadlock detection, we compare the lock reference in the cycle + * with the current lock reference; if they are unequal, the lock + * was released. + */ + if (lock.isThin()) { + lock = new ThinLockImpl((ThinLockImpl)lock); + } else { + lock = new LockImpl((LockImpl)lock); + } + + lockTable.put(lsn, lock); + } + + return newOwners; + } + + /** + * Demote a lock from write to read. Call back to the owning locker to + * move this to its read collection. + * @param lsn The lock to release. + */ + abstract void demote(long lsn, Locker locker) + throws DatabaseException; + + /** + * Do the real work of demote. + */ + void demoteInternal(final long lsn, + final Locker locker, + final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock useLock = lockTable.get(lsn); + /* Lock may or may not be currently held. */ + if (useLock != null) { + useLock.demote(locker); + locker.moveWriteToReadLock(lsn, useLock); + } + } + + /** + * Test the status of the lock on LSN. If any transaction holds any + * lock on it, true is returned. If no transaction holds a lock on it, + * false is returned. + * + * This method is only used by unit tests. + * + * @param lsn The LSN to check. + * @return true if any transaction holds any lock on the LSN. false + * if no lock is held by any transaction. + */ + abstract boolean isLocked(Long lsn) + throws DatabaseException; + + /** + * Do the real work of isLocked. + */ + boolean isLockedInternal(final Long lsn, final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock entry = lockTable.get(lsn); + return (entry != null) && entry.nOwners() != 0; + + } + + /** + * Return true if this locker owns this a lock of this type on given node. + */ + abstract boolean isOwner(Long lsn, Locker locker, LockType type) + throws DatabaseException; + + /** + * Do the real work of isOwner. + */ + boolean isOwnerInternal(final Long lsn, + final Locker locker, + final LockType type, + final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock entry = lockTable.get(lsn); + return entry != null && entry.isOwner(locker, type); + } + + /** + * Return true if this locker is waiting on this lock. + * + * This method is only used by unit tests. + */ + abstract boolean isWaiter(Long lsn, Locker locker) + throws DatabaseException; + + /** + * Do the real work of isWaiter. + */ + boolean isWaiterInternal(final Long lsn, + final Locker locker, + final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock entry = lockTable.get(lsn); + return entry != null && entry.isWaiter(locker); + } + + /** + * Return the number of waiters for this lock. + */ + abstract int nWaiters(Long lsn) + throws DatabaseException; + + /** + * Do the real work of nWaiters. + */ + int nWaitersInternal(final Long lsn, final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock entry = lockTable.get(lsn); + return entry == null ? -1 : entry.nWaiters(); + } + + /** + * Return the number of owners of this lock. + */ + abstract int nOwners(Long lsn) + throws DatabaseException; + + /** + * Do the real work of nWaiters. + */ + int nOwnersInternal(final Long lsn, final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock entry = lockTable.get(lsn); + return entry == null ? -1 : entry.nOwners(); + } + + /** + * @return the transaction that owns the write lock for this + */ + abstract Locker getWriteOwnerLocker(Long lsn) + throws DatabaseException; + + /** + * Do the real work of getWriteOwnerLocker. + */ + Locker getWriteOwnerLockerInternal(final Long lsn, + final int lockTableIndex) { + + final Map lockTable = lockTables[lockTableIndex]; + final Lock lock = lockTable.get(lsn); + if (lock == null) { + return null; + } else if (lock.nOwners() > 1) { + /* not a write lock */ + return null; + } else { + return lock.getWriteOwnerLocker(); + } + } + + /* + * Check if the locker owns the lock. If the locker owns the lock, this + * function immediately returns true. If the locker cannot get ownership, + * according to the arguments, this function will choose to do the + * following: + * get the current owners and waiters of the lock + * flush this locker from the set of waiters + * + * Note that the ownership checking and the flushing action should be done + * in a critical section to prevent any orphaning of the + * lock -- we must be in a critical section between the time that we check + * ownership and when we flush any waiters (SR #10103) + * + * Concretely, this function is called in the following places: + * + * In waitForLock(): + * After the wait for timeout. Here, only if txn or lock times out, we + * get the owners and waiters which will be used in timeout exception. + * If Restart is true, we flush the locker from the waiter list. + * + * Before throwing DeadlockEx or TimeoutEx when txn or lock times out. + * Here we only need to flush the locker from waiter list. We do NOT + * need to get owners and waiters. This is because DeadlockEx does + * not need owners/waiters information at all and TimeoutEx uses the + * owners/waiters information which is gotten when locker.wait() + * wakes up. + * + * In notifyVictim(): + * After the victim notification fails due to timeout. Here we only + * need to flush the locker from waiter list to prepare for throwing + * DeadlockEx. + * + * In checkAndHandleDeadlock(): + * After we choose the current locker itself as the victim, we want + * to throw DeadlockEx to abort this locker. Here we also only + * need to flush the locker from waiter list to prepare for throwing + * DeadlockEx. + * + * The real work is done in the following validateOwnershipInternal(). + * + * @return true if locker is the owner. + */ + abstract boolean validateOwnership(Long lsn, + Locker locker, + LockType type, + boolean getOwnersAndWaiters, + boolean flushFromWaiters, + Set owners, + List waiters) + throws DatabaseException; + + boolean validateOwnershipInternal(final Long lsn, + final Locker locker, + final LockType type, + final boolean getOwnersAndWaiters, + final boolean flushFromWaiters, + final int lockTableIndex, + final Set owners, + final List waiters) { + + if (isOwnerInternal(lsn, locker, type, lockTableIndex)) { + return true; + } + + if (getOwnersAndWaiters) { + /* + * getOwnersInternal/getWaitersInternal may return null when the + * lock corresponding to the lsn has already not existed. + */ + if (owners != null) { + final Set localOwners = + getOwnersInternal(lsn, lockTableIndex); + + if (localOwners != null) { + owners.addAll(localOwners); + } + } + + if (waiters != null) { + final List localWaiters = + getWaitersInternal(lsn, lockTableIndex); + + if (localWaiters != null) { + waiters.addAll(localWaiters); + } + } + } + + if (flushFromWaiters) { + final Lock entry = lockTables[lockTableIndex].get(lsn); + if (entry != null) { + entry.flushWaiter(locker, memoryBudget, lockTableIndex); + } + } + + return false; + } + + public abstract LockAttemptResult stealLock(Long lsn, + Locker locker, + LockType lockType) + throws DatabaseException; + + LockAttemptResult stealLockInternal(final Long lsn, + final Locker locker, + final LockType lockType, + final int lockTableIndex) + throws DatabaseException { + + final Lock entry = lockTables[lockTableIndex].get(lsn); + assert entry != null : "Lock " + DbLsn.getNoFormatString(lsn) + + " for txn " + locker.getId() + " does not exist"; + + /* + * Note that flushWaiter may do nothing, because the lock may have been + * granted to our locker after the prior call to attemptLock and before + * the call to this method. + */ + entry.flushWaiter(locker, memoryBudget, lockTableIndex); + + /* Remove all owners except for our owner. */ + entry.stealLock(locker, memoryBudget, lockTableIndex); + + /* + * The lock attempt normally succeeds, but can fail if the lock holder + * is non-preemptable. + */ + return attemptLockInternal( + lsn, locker, lockType, false /*nonBlockingRequest*/, + false /*jumpAheadOfWaiters*/, lockTableIndex); + } + + /** + * Called when a ThreadLocker is created. + */ + void registerThreadLocker(final ThreadLocker locker) { + if (threadLockers == null) { + return; + } + final Thread thread = Thread.currentThread(); + final TinyHashSet set = threadLockers.get(thread); + if (set != null) { + final boolean added = set.add(locker); + assert added; + } else { + threadLockers.put(thread, new TinyHashSet<>(locker)); + } + } + + /** + * Called when a ThreadLocker is closed. + */ + void unregisterThreadLocker(final ThreadLocker locker) { + if (threadLockers == null) { + return; + } + final Thread thread = Thread.currentThread(); + final TinyHashSet set = threadLockers.get(thread); + assert set != null; + final boolean removed = set.remove(locker); + assert removed; + if (threadLockers.size() == 0) { + threadLockers.remove(thread); + } + } + + /** + * Returns an iterator over all thread lockers for the given thread, or + * an empty iterator if none. + */ + Iterator getThreadLockers(final Thread thread) { + if (threadLockers == null) { + return EMPTY_THREAD_LOCKERS.iterator(); + } + final TinyHashSet set = threadLockers.get(thread); + if (set == null) { + return EMPTY_THREAD_LOCKERS.iterator(); + } + return set.iterator(); + } + + /** + * Statistics + */ + LockStats lockStat(final StatsConfig config) + throws DatabaseException { + + final StatGroup latchStats = new StatGroup( + "Locktable latches", "Shows lock table contention"); + + for (int i = 0; i < nLockTables; i++) { + latchStats.addAll(lockTableLatches[i].getStats()); + } + + /* Dump info about the lock table. */ + final StatGroup tableStats = new StatGroup( + "Locktable", "The types of locks held in the lock table"); + + if (!config.getFast()) { + dumpLockTable(tableStats, false /*clear*/); + } + + return new LockStats(stats.cloneGroup(config.getClear()), + latchStats.cloneGroup(config.getClear()), + tableStats.cloneGroup(config.getClear())); + } + + public StatGroup loadStats(final StatsConfig config) { + + final StatGroup copyStats = stats.cloneGroup(config.getClear()); + + final StatGroup latchStats = new StatGroup( + "Locktable latches", "Shows lock table contention"); + + for (int i = 0; i < nLockTables; i++) { + latchStats.addAll(lockTableLatches[i].getStats()); + if (config.getClear()) { + lockTableLatches[i].clearStats(); + } + } + + /* Add all the latch stats to the whole stats group. */ + copyStats.addAll(latchStats); + + final StatGroup tableStats = new StatGroup( + "Locktable", "The types of locks held in the lock table"); + + if (!config.getFast()) { + dumpLockTable(tableStats, config.getClear()); + } + + /* Add all the lock table stats to the whole stats group. */ + copyStats.addAll(tableStats); + + return copyStats; + } + + /** + * Dump the lock table to the lock stats. + */ + abstract void dumpLockTable(StatGroup tableStats, boolean clear) + throws DatabaseException; + + /** + * Do the real work of dumpLockTableInternal. + */ + void dumpLockTableInternal(final StatGroup tableStats, + final int i, + final boolean clear) { + + final StatGroup oneTable = new StatGroup( + "Single lock table", "Temporary stat group"); + + final IntStat totalLocks = new IntStat(oneTable, LOCK_TOTAL); + final IntStat waiters = new IntStat(oneTable, LOCK_WAITERS); + final IntStat owners = new IntStat(oneTable, LOCK_OWNERS); + final IntStat readLocks = new IntStat(oneTable, LOCK_READ_LOCKS); + final IntStat writeLocks = new IntStat(oneTable, LOCK_WRITE_LOCKS); + + final Map lockTable = lockTables[i]; + totalLocks.add(lockTable.size()); + + for (final Lock lock : lockTable.values()) { + waiters.add(lock.nWaiters()); + owners.add(lock.nOwners()); + + /* Go through all the owners for a lock. */ + for (final LockInfo info : lock.getOwnersClone()) { + if (info.getLockType().isWriteLock()) { + writeLocks.increment(); + } else { + readLocks.increment(); + } + } + } + + tableStats.addAll(oneTable); + } + + /** + * Debugging + */ + public void dump() + throws DatabaseException { + + System.out.println(dumpToString()); + } + + private String dumpToString() + throws DatabaseException { + + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < nLockTables; i++) { + lockTableLatches[i].acquireExclusive(); + try { + dumpToStringNoLatch(sb, i); + } finally { + lockTableLatches[i].release(); + } + } + return sb.toString(); + } + + private void dumpToStringNoLatch(final StringBuilder sb, + final int whichTable) { + + final Map lockTable = lockTables[whichTable]; + + for (final Map.Entry entry : lockTable.entrySet()) { + final Long lsn = entry.getKey(); + final Lock lock = entry.getValue(); + + sb.append("---- LSN: "). + append(DbLsn.getNoFormatString(lsn)). + append("----\n"); + + sb.append(lock); + sb.append('\n'); + } + } + + /* + * Internal class for deadlock detection. + */ + private class DeadlockChecker { + + private final Locker rootLocker; + private final Long lsn; + private final LockType rootLocktype; + + /** + * The owners and waiters for the root Lock which will be contained in + * DeadlockException. + */ + private Set ownersForRootLock; + private List waitersForRootLock; + + private List cycle = new ArrayList<>(); + + /** + * Creates an instance of this class. + * + * @param locker the locker which is waiting for the lock + * @param lsn the lock ID which the locker is waiting for + * @param locktype the request type for the lock + */ + DeadlockChecker(final Locker locker, + final Long lsn, + final LockType locktype) { + this.rootLocker = locker; + this.lsn = lsn; + this.rootLocktype = locktype; + } + + Locker chooseTargetedLocker() { + /* + * Java 8 can directly use method ArrayList.sort + * (Comparator c), but Java 7 does not have this method. + */ + final CycleNode[] cycleNodeArray = cycle.toArray(new CycleNode[0]); + final CycleNodeComparator cnc = new CycleNodeComparator(); + Arrays.sort(cycleNodeArray, cnc); + return cycleNodeArray[getTargetedLockerIndex()].getLocker(); + } + + /* + * This method should guarantee that the same deadlock will return the + * same Locker index that will be targeted for abort. + */ + int getTargetedLockerIndex() { + long sum = 0; + int nLockers = 0; + for (final CycleNode cn : cycle) { + /* + * Sum the Lock pointers (System.identityHashCode(lock)) + * rather than the locker IDs and LSNs. Since the + * identityHashCode will change when a Lock is released + * and new Lock is allocated, this will ensure that we don't + * always pick the same victim for the same deadlock, if the + * same deadlock (same locks and lockers) happens repeatedly. + */ + sum += System.identityHashCode(cn.getLock()); + nLockers++; + } + + /* + * Note that System.identityHashCode may return a negative value + * on AIX, so we use Math.abs() below. + */ + return (int)(Math.abs(sum) % nLockers); + } + + boolean hasCycle() { + getOwnerAndWaitersForRootLocker(); + /* + * The rootLocker may own several locks, so we do not know + * which one involves in the deadlock cycle. So we just set + * the type of lock owned by rootLocker to null. + */ + return hasCycleInternal(rootLocker, lsn, rootLocktype, null); + } + + boolean hasCycleInternal(final Locker checkedLocker, + final Long lsn, + final LockType requestLocktype, + final LockType ownLockType) { + + final Lock checkedLock; + final Set ownersForCheckedLock; + + /* + * When entering this function, we think that the checkedLocker is + * waiting for the lsn(checkedLock), so we want to continue to get + * the owners(ownersForCheckedLock) of the lsn. + * + * But we need to first check whether now the checkedLocker owns + * the lsn(checkedLock). + */ + final int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + if (isOwnerInternal(lsn, checkedLocker, + requestLocktype, lockTableIndex)) { + return false; + } + final Map lockTable = lockTables[lockTableIndex]; + checkedLock = lockTable.get(lsn); + ownersForCheckedLock = getOwnersInternal(lsn, lockTableIndex); + } + + if (ownersForCheckedLock == null) { + return false; + } + + /* + * checkedLock may be null. If so, then ownersForCheckedLock must + * be null. So if ownersForCheckedLock is non-null, then + * checkedLock must be non-null. + */ + final CycleNode cn = new CycleNode( + checkedLocker, lsn, checkedLock, requestLocktype, ownLockType); + + cycle.add(cn); + + for (final LockInfo info : ownersForCheckedLock) { + final Locker locker = info.getLocker(); + final LockType ownType = info.getLockType(); + final Long waitsFor = locker.getWaitingFor(); + final LockType requestType = locker.getWaitingForType(); + /* + * The constraint "locker != checkedLocker" handles the + * following scenario: + * locker owns a read lock and now it wants to acquire a + * write lock, but it needs to wait. Then this locker will + * check for a deadlock and it will check the owner(s) of + * the lock. Because this locker itself is also the owner + * of this lock, it will choose itself as the "new owner" + * and then check the owners of the waitingFor of the + * "new owner". Then it will continue to check itself. This + * will cause java.lang.StackOverflowError + * + * Without this constraint, + * com.sleepycat.je.DatabaseConfigTest.testConfigConflict will + * get java.lang.StackOverflowError. + * + * With this constraint, we STILL can detect the following + * deadlock: + * locker1 locker2 + * + * hold read lock on lsn + * + * hold read lock on lsn + * + * acquire write lock(wait_promotion) + * + * acquire write lock(wait_promotion) + */ + if (locker != checkedLocker) { + if (locker == rootLocker) { + /* Found a cycle */ + return true; + } + + /* + * This handles the situation when a partial cycle exists. + * If we do not handle a "partial cycle", a locker may + * detect a deadlock, but the true deadlock exists in two + * other different lockers. Then, this locker will would + * infinitely invoke hasCycleInternal(). + */ + for (int index = 0; index < cycle.size(); index++) { + if (cycle.get(index).getLocker() == locker) { + /* Get partial cycle. It is the true deadlock. */ + cycle.subList(0, index).clear(); + return true; + } + } + + if (waitsFor != null && + requestType != null && + ownType != null) { + + if (hasCycleInternal( + locker, waitsFor, requestType, ownType)) { + return true; + } + } + } + } + + cycle.remove(cn); + return false; + } + + boolean hasTrueDeadlock() { + + for (final CycleNode cn : cycle) { + final Lock lock = cn.getLock(); + final Long lsn = cn.getLsn(); + final Lock realtimeLock; + + final int lockTableIndex = getLockTableIndex(lsn); + synchronized (lockTableLatches[lockTableIndex]) { + final Map lockTable = + lockTables[lockTableIndex]; + realtimeLock = lockTable.get(lsn); + } + + if (realtimeLock != lock) { + return false; + } + } + return true; + } + + /* + * In order to get consistent owners and waiters, we cannot call + * getOwners(lsn) and getWaiters(lsn) separately. We get them + * atomically while synchronized on the lock table. + */ + void getOwnerAndWaitersForRootLocker() { + final int lockTableIndex = getLockTableIndex(lsn); + synchronized (lockTableLatches[lockTableIndex]) { + + final Set localOwners = + getOwnersInternal(lsn, lockTableIndex); + + if (localOwners != null) { + ownersForRootLock = localOwners; + } + + final List localWaiters = + getWaitersInternal(lsn, lockTableIndex); + + if (localWaiters != null) { + waitersForRootLock = localWaiters; + } + } + } + + Set getOwnersForRootLock() { + return ownersForRootLock; + } + + List getWaitersForRootLock() { + return waitersForRootLock; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + Lock preLock = null; + Long preLsn = null; + + for (final CycleNode cn : cycle) { + + final Locker locker = cn.getLocker(); + final Lock lock = cn.getLock(); + final Long lsn = cn.getLsn(); + final LockType requestType = cn.getRequestLockType(); + final LockType ownType = cn.getOwnLockType(); + + if (preLock != null) { + sb.append("Locker: \""); + sb.append(locker).append("\" owns lock: "); + sb.append(System.identityHashCode(preLock)); + sb.append("(LSN: "); + sb.append(DbLsn.getNoFormatString(preLsn)); + sb.append(", ownedType: ").append(ownType).append("). "); + } + + sb.append("Locker: \""); + sb.append(locker).append("\" waits for lock: "); + sb.append(System.identityHashCode(lock)).append("(LSN: "); + sb.append(DbLsn.getNoFormatString(lsn)); + sb.append(", requestType: ").append(requestType).append(")."); + sb.append("\n"); + + preLock = lock; + preLsn = lsn; + } + + return sb.toString(); + } + + class CycleNodeComparator implements Comparator { + @Override + public int compare (Object obj1, Object obj2) { + final CycleNode nc1 = (CycleNode) obj1; + final CycleNode nc2 = (CycleNode) obj2; + return (int) (nc1.getLocker().getWaiterThreadId() - + nc2.getLocker().getWaiterThreadId()); + } + } + + /** + * Represents each node in the cycle. + */ + class CycleNode { + + /** + * The locker which waits on the lock. + */ + private final Locker locker; + + /** + * The lsn which represents the lock. It will not change if + * the lock is released. + */ + private final Long lsn; + + /** + * The Lock instance. Releasing a lock will re-create a new Lock + * object. By comparing it with the lock gotten by lsn, we can + * determine whether the lock was released during the deadlock + * detection process. + */ + private final Lock lock; + + /* The lock request type. */ + private LockType requestLockType; + + /* + * If this locker is involved in a cycle, is must own some lock. + */ + private LockType ownLockType; + + CycleNode(final Locker locker, + final Long lsn, + final Lock lock, + final LockType requestLockType, + final LockType ownLockType) { + this.locker = locker; + this.lsn = lsn; + this.lock = lock; + this.requestLockType = requestLockType; + this.ownLockType = ownLockType; + } + + private Locker getLocker() { + return locker; + } + + private Long getLsn() { + return lsn; + } + + private Lock getLock() { + return lock; + } + + private LockType getRequestLockType() { + return requestLockType; + } + + private LockType getOwnLockType() { + return ownLockType; + } + } + } + + /** + * Returned by waitForLock(). + */ + private static class WaitForLockResult { + private final Locker targetVictim; + private final DeadlockChecker dc; + private final LockAttemptResult result; + + WaitForLockResult(final Locker targetVictim, + final DeadlockChecker dc, + final LockAttemptResult result) { + this.targetVictim = targetVictim; + this.dc = dc; + this.result = result; + } + + Locker getVictim() { + return targetVictim; + } + + DeadlockChecker getDeadLockChecker() { + return dc; + } + + LockAttemptResult getResult() { + return result; + } + } + + /** + * The result of checking for deadlocks, and handling a deadlock if one + * is found. + */ + private static class DeadlockResult { + private final boolean isOwner; + private final boolean trueDeadlock; + private final Locker victim; + private final DeadlockChecker dc; + + DeadlockResult( + final boolean isOwner, + final boolean trueDeadlock, + final Locker victim, + final DeadlockChecker dc) { + + this.isOwner = isOwner; + this.trueDeadlock = trueDeadlock; + this.victim = victim; + this.dc = dc; + } + } +} diff --git a/src/com/sleepycat/je/txn/LockResult.java b/src/com/sleepycat/je/txn/LockResult.java new file mode 100644 index 0000000..73688e0 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockResult.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.utilint.DbLsn; + +/** + * LockResult is the return type of Locker.lock(). It encapsulates a + * LockGrantType (the return type of LockManager.lock()) and a WriteLockInfo. + * + * The WriteLockInfo field is non-null if (a) the locker is transactional, and + * (b) the request was for a WRITE or WRITE_RANGE lock, and (c) the request was + * not a non-blocking request that got denied. If so, the WriteLockInfo is + * either a newly created one or a pre-existing one if the same locker had + * write-locked the same LSN before. + */ +public class LockResult { + private LockGrantType grant; + private WriteLockInfo wli; + + /* Made public for unittests */ + public LockResult(LockGrantType grant, WriteLockInfo wli) { + this.grant = grant; + this.wli = wli; + } + + public LockGrantType getLockGrant() { + return grant; + } + + public WriteLockInfo getWriteLockInfo() { + return wli; + } + + /* + * Method called from CursorImpl.LockStanding.prepareForUpdate() + */ + public void setAbortInfo( + long abortLsn, + boolean abortKD, + byte[] abortKey, + byte[] abortData, + long abortVLSN, + int abortExpiration, + boolean abortExpirationInHours, + DatabaseImpl db) { + + /* + * Do not overwrite abort info if this locker has logged the + * associated record before. + */ + if (wli != null && wli.getNeverLocked()) { + if (abortLsn != DbLsn.NULL_LSN) { + wli.setAbortLsn(abortLsn); + wli.setAbortKnownDeleted(abortKD); + wli.setAbortKey(abortKey); + wli.setAbortData(abortData); + wli.setAbortVLSN(abortVLSN); + wli.setAbortExpiration(abortExpiration, abortExpirationInHours); + wli.setDb(db); + } + wli.setNeverLocked(false); + } + } + + /** + * Used to copy write lock info when an LSN is changed. + */ + public void copyWriteLockInfo(WriteLockInfo fromInfo) { + if (fromInfo != null && wli != null) { + wli.copyAllInfo(fromInfo); + wli.setNeverLocked(false); + } + } +} diff --git a/src/com/sleepycat/je/txn/LockStatDefinition.java b/src/com/sleepycat/je/txn/LockStatDefinition.java new file mode 100644 index 0000000..b3e91fb --- /dev/null +++ b/src/com/sleepycat/je/txn/LockStatDefinition.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * Per-stat Metadata for JE lock statistics. + */ +public class LockStatDefinition { + + public static final String GROUP_NAME = "Locks"; + public static final String GROUP_DESC = + "Record locking is used to provide transactional capabilities."; + + public static final String LOCK_READ_LOCKS_NAME = + "nReadLocks"; + public static final String LOCK_READ_LOCKS_DESC = + "Number of read locks currently held."; + public static final StatDefinition LOCK_READ_LOCKS = + new StatDefinition( + LOCK_READ_LOCKS_NAME, + LOCK_READ_LOCKS_DESC, + StatType.CUMULATIVE); + + public static final String LOCK_WRITE_LOCKS_NAME = + "nWriteLocks"; + public static final String LOCK_WRITE_LOCKS_DESC = + "Number of write locks currently held."; + public static final StatDefinition LOCK_WRITE_LOCKS = + new StatDefinition( + LOCK_WRITE_LOCKS_NAME, + LOCK_WRITE_LOCKS_DESC, + StatType.CUMULATIVE); + + public static final String LOCK_OWNERS_NAME = + "nOwners"; + public static final String LOCK_OWNERS_DESC = + "Number of lock owners in lock table."; + public static final StatDefinition LOCK_OWNERS = + new StatDefinition( + LOCK_OWNERS_NAME, + LOCK_OWNERS_DESC, + StatType.CUMULATIVE); + + public static final String LOCK_REQUESTS_NAME = + "nRequests"; + public static final String LOCK_REQUESTS_DESC = + "Number of times a lock request was made."; + public static final StatDefinition LOCK_REQUESTS = + new StatDefinition( + LOCK_REQUESTS_NAME, + LOCK_REQUESTS_DESC); + + public static final String LOCK_TOTAL_NAME = + "nTotalLocks"; + public static final String LOCK_TOTAL_DESC = + "Number of locks current in lock table."; + public static final StatDefinition LOCK_TOTAL = + new StatDefinition( + LOCK_TOTAL_NAME, + LOCK_TOTAL_DESC, + StatType.CUMULATIVE); + + public static final String LOCK_WAITS_NAME = + "nWaits"; + public static final String LOCK_WAITS_DESC = + "Number of times a lock request blocked."; + public static final StatDefinition LOCK_WAITS = + new StatDefinition( + LOCK_WAITS_NAME, + LOCK_WAITS_DESC); + + public static final String LOCK_WAITERS_NAME = + "nWaiters"; + public static final String LOCK_WAITERS_DESC = + "Number of transactions waiting for a lock."; + public static final StatDefinition LOCK_WAITERS = + new StatDefinition( + LOCK_WAITERS_NAME, + LOCK_WAITERS_DESC, + StatType.CUMULATIVE); +} diff --git a/src/com/sleepycat/je/txn/LockType.java b/src/com/sleepycat/je/txn/LockType.java new file mode 100644 index 0000000..bb56934 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockType.java @@ -0,0 +1,211 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +/** + * LockType is a type safe enumeration of all lock types. Methods on LockType + * objects can be used to determine whether a type conflicts with another + * type or can be upgraded to another type. + */ +public class LockType { + + /** + * Lock types. Indexes must be kept manually synchronized in the matrixes + * below. + */ + public static final LockType READ = + new LockType(0, false, "READ"); + public static final LockType WRITE = + new LockType(1, true, "WRITE"); + public static final LockType RANGE_READ = + new LockType(2, false, "RANGE_READ"); + public static final LockType RANGE_WRITE = + new LockType(3, true, "RANGE_WRITE"); + public static final LockType RANGE_INSERT = + new LockType(4, false, "RANGE_INSERT"); + + /** + * NONE is used for requesting a dirty read and does not appear in the + * conflict or upgrade matrices. + */ + public static final LockType NONE = + new LockType(5, false, "NONE"); + + /** + * RESTART is used for waiting for a restart and does not appear in the + * conflict or upgrade matrices. + */ + public static final LockType RESTART = + new LockType(6, false, "RESTART"); + + /** + * Whenever the conflict matrix is changed be sure to update this. For + * every type that can cause a RESTART result call setCausesRestart. This + * could have been determined programmatically but I chose to maintain it + * manually to avoid extra code size. + */ + static { + RANGE_READ.setCausesRestart(); + RANGE_WRITE.setCausesRestart(); + } + + /** + * Lock conflict matrix. + * @see #getConflict + */ + private static LockConflict[][] conflictMatrix = { + { // READ is held and there is a request for: + LockConflict.ALLOW, // READ + LockConflict.BLOCK, // WRITE + LockConflict.ALLOW, // RANGE_READ + LockConflict.BLOCK, // RANGE_WRITE + LockConflict.ALLOW, // RANGE_INSERT + }, + { // WRITE is held and there is a request for: + LockConflict.BLOCK, // READ + LockConflict.BLOCK, // WRITE + LockConflict.BLOCK, // RANGE_READ + LockConflict.BLOCK, // RANGE_WRITE + LockConflict.ALLOW, // RANGE_INSERT + }, + { // RANGE_READ is held and there is a request for: + LockConflict.ALLOW, // READ + LockConflict.BLOCK, // WRITE + LockConflict.ALLOW, // RANGE_READ + LockConflict.BLOCK, // RANGE_WRITE + LockConflict.BLOCK, // RANGE_INSERT + }, + { // RANGE_WRITE is held and there is a request for: + LockConflict.BLOCK, // READ + LockConflict.BLOCK, // WRITE + LockConflict.BLOCK, // RANGE_READ + LockConflict.BLOCK, // RANGE_WRITE + LockConflict.BLOCK, // RANGE_INSERT + }, + { // RANGE_INSERT is held and there is a request for: + LockConflict.ALLOW, // READ + LockConflict.ALLOW, // WRITE + LockConflict.RESTART, // RANGE_READ + LockConflict.RESTART, // RANGE_WRITE + LockConflict.ALLOW, // RANGE_INSERT + }, + }; + + /** + * Lock upgrade matrix. + * @see #getUpgrade + */ + private static LockUpgrade[][] upgradeMatrix = { + { // READ is held and there is a request for: + LockUpgrade.EXISTING, // READ + LockUpgrade.WRITE_PROMOTE, // WRITE + LockUpgrade.RANGE_READ_IMMED, // RANGE_READ + LockUpgrade.RANGE_WRITE_PROMOTE, // RANGE_WRITE + LockUpgrade.ILLEGAL, // RANGE_INSERT + }, + { // WRITE is held and there is a request for: + LockUpgrade.EXISTING, // READ + LockUpgrade.EXISTING, // WRITE + LockUpgrade.RANGE_WRITE_IMMED, // RANGE_READ + LockUpgrade.RANGE_WRITE_IMMED, // RANGE_WRITE + LockUpgrade.ILLEGAL, // RANGE_INSERT + }, + { // RANGE_READ is held and there is a request for: + LockUpgrade.EXISTING, // READ + LockUpgrade.RANGE_WRITE_PROMOTE, // WRITE + LockUpgrade.EXISTING, // RANGE_READ + LockUpgrade.RANGE_WRITE_PROMOTE, // RANGE_WRITE + LockUpgrade.ILLEGAL, // RANGE_INSERT + }, + { // RANGE_WRITE is held and there is a request for: + LockUpgrade.EXISTING, // READ + LockUpgrade.EXISTING, // WRITE + LockUpgrade.EXISTING, // RANGE_READ + LockUpgrade.EXISTING, // RANGE_WRITE + LockUpgrade.ILLEGAL, // RANGE_INSERT + }, + { // RANGE_INSERT is held and there is a request for: + LockUpgrade.ILLEGAL, // READ + LockUpgrade.ILLEGAL, // WRITE + LockUpgrade.ILLEGAL, // RANGE_READ + LockUpgrade.ILLEGAL, // RANGE_WRITE + LockUpgrade.EXISTING, // RANGE_INSERT + }, + }; + + private int index; + private boolean write; + private String name; + private boolean causesRestart; + + /** + * No lock types can be defined outside this class. + */ + private LockType(int index, boolean write, String name) { + this.index = index; + this.write = write; + this.name = name; + } + + /** + * Returns true if this is a WRITE or RANGE_WRITE lock. For RANGE_INSERT, + * false is returned because RANGE_INSERT is used to lock the key following + * the insertion key, not the insertion key itself. + */ + public final boolean isWriteLock() { + return write; + } + + /** + * Specifies that when this type is requested it can result in + * LockGrantType.RESTART. + */ + private void setCausesRestart() { + causesRestart = true; + } + + /** + * Returns whether when this type is requested it can result in + * LockGrantType.RESTART. + */ + final boolean getCausesRestart() { + return causesRestart; + } + + /** + * Returns the LockConfict that results when this lock type is held and the + * given lock type is requested by another locker. + */ + LockConflict getConflict(LockType requestedType) { + return conflictMatrix[index][requestedType.index]; + } + + /** + * Returns the LockUpgrade that results when this lock type is held and the + * given lock type is requested by the same locker. + * + *

        For the returned LockUpgrade object, getIllegal will never return + * true because this method fires an assertion if getIllegal returns true. + */ + LockUpgrade getUpgrade(LockType requestedType) { + LockUpgrade upgrade = upgradeMatrix[index][requestedType.index]; + assert !upgrade.getIllegal() : toString() + " to " + requestedType; + return upgrade; + } + + @Override + public String toString() { + return name; + } +} diff --git a/src/com/sleepycat/je/txn/LockUpgrade.java b/src/com/sleepycat/je/txn/LockUpgrade.java new file mode 100644 index 0000000..f34a969 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockUpgrade.java @@ -0,0 +1,101 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +/** + * LockUpgrade is a type safe enumeration of lock upgrade types. Methods on + * LockUpgrade objects are used to determine whether an upgrade is needed and, + * if so, how it should be handled. + */ +public class LockUpgrade { + + /* + * Due to static initialization circularities between LockUpgrade and + * LockType, the LockUpgrade.upgrade field of each of these LockUpgrades + * will get filled in by a piece of static code in EnvironmentImpl. + * [#16496] + */ + public static final LockUpgrade ILLEGAL = + new LockUpgrade(null, false, true); + + public static final LockUpgrade EXISTING = + new LockUpgrade(null, false, false); + + public static final LockUpgrade WRITE_PROMOTE = + new LockUpgrade(null /*LockType.WRITE*/, true, false); + + public static final LockUpgrade RANGE_READ_IMMED = + new LockUpgrade(null /*LockType.RANGE_READ*/, false, false); + + public static final LockUpgrade RANGE_WRITE_IMMED = + new LockUpgrade(null /*LockType.RANGE_WRITE*/, false, false); + + public static final LockUpgrade RANGE_WRITE_PROMOTE = + new LockUpgrade(null /*LockType.RANGE_WRITE*/, true, false); + + private LockType upgrade; + private boolean promotion; + private boolean illegal; + + /** + * No upgrade types can be defined outside this class. + */ + private LockUpgrade(LockType upgrade, boolean promotion, boolean illegal) { + this.upgrade = upgrade; + this.promotion = promotion; + this.illegal = illegal; + } + + /** + * This method is called to determine whether the upgrade is illegal. + * If true is returned, an internal error has occurred. This should never + * happen since RANGE_INSERT should never be requested along with other + * locks by the same locker; a separate locker is used for RANGE_INSERT + * locks. + */ + boolean getIllegal() { + return illegal; + } + + /** + * This method is called first to determine whether an upgrade to a new + * lock type is needed, and what the new lock type should be. If null is + * returned, the existing lock should be unchanged and no upgrade is + * needed. If non-null is returned, an upgrade to the returned type should + * be performed; in this case, call getPromotion to determine how to do the + * upgrade. + */ + LockType getUpgrade() { + return upgrade; + } + + /** + * @hidden + */ + public void setUpgrade(LockType upgrade) { + this.upgrade = upgrade; + } + + /** + * This method is called when getUpgrade returns non-null to determine + * whether the upgrade is a true promotion or can be granted immediately. + * A true promotion is a change from read to write locking, and may require + * waiting if the write lock conflicts with a lock held by another locker. + * An upgrade that is not a promotion is just a type change, and never + * causes a lock conflict. + */ + boolean getPromotion() { + return promotion; + } +} diff --git a/src/com/sleepycat/je/txn/Locker.java b/src/com/sleepycat/je/txn/Locker.java new file mode 100644 index 0000000..cfcb098 --- /dev/null +++ b/src/com/sleepycat/je/txn/Locker.java @@ -0,0 +1,936 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.BINReference; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Locker instances are JE's route to locking and transactional support. This + * class is the abstract base class for BasicLocker, ThreadLocker, Txn, + * MasterTxn and ReadonlyTxn. Locker instances are in fact only a transaction + * shell to get to the lock manager, and don't guarantee transactional + * semantics. + * + * Txn (includes Txns marked autoTxn) MasterTxn and ReadonlyTxn instances are + * truly transactional. They have potentially different transaction begin and + * end behaviors. + */ +public abstract class Locker { + + protected EnvironmentImpl envImpl; + protected LockManager lockManager; + + protected long id; // transaction id + private boolean readUncommittedDefault; // read-uncommitted is default + + /* Timeouts */ + boolean defaultNoWait; // true for non-blocking + private long lockTimeoutMillis; // timeout period for lock, in ms + private long txnTimeoutMillis; // timeout period for txns, in ms + private long txnStartMillis; // for txn timeout determination + + /* The lsn of the lock that this locker is waiting for. */ + private Long waitingFor; + + /* The LockType corresponding to waitingFor. */ + private LockType waitingForType; + + /* The current thread ID when waitingFor is set. */ + private long waiterThreadId; + + /* + * DeleteInfo refers to BINReferences that should be sent to the + * INCompressor for asynchronous compressing after the transaction ends. + */ + Map deleteInfo; + + /** + * The thread that created this locker. Used for debugging, and by the + * ThreadLocker subclass. Note that thread may be null if the Locker is + * instantiated by reading the log. + */ + protected Thread thread; + + /** + * Set to false when close() is called. After that point no other locker + * operations should occur. + */ + private boolean isOpen = true; + + /** + * True if my locks can be preempted/stolen. + */ + private boolean preemptable = true; + + /** + * Non-null if a lock has been stolen from this locker by the HA replayer. + */ + private RuntimeException preemptedCause; + + /** + * Non-null if this locker is replacing another locker that is in the + * process of closing because a cursor is being moved. + */ + private Locker closingLocker; + + /** + * Create a locker id. This constructor is called very often, so it should + * be as streamlined as possible. It should never be called directly, + * because the mandatedId mechanism only works if the generateId() method + * is overridden to use the mandatedId value. + * + * @param readUncommittedDefault if true, this transaction does + * read-uncommitted by default + * @param noWait if true, non-blocking lock requests are used. + */ + protected Locker(EnvironmentImpl envImpl, + boolean readUncommittedDefault, + boolean noWait, + long mandatedId) { + + TxnManager txnManager = envImpl.getTxnManager(); + this.lockManager = txnManager.getLockManager(); + this.id = generateId(txnManager, mandatedId); + this.envImpl = envImpl; + this.readUncommittedDefault = readUncommittedDefault; + this.waitingFor = null; + this.waitingForType = null; + this.waiterThreadId = -1; + + /* get the default lock timeout. */ + defaultNoWait = noWait; + lockTimeoutMillis = getInitialLockTimeout(); + + /* + * Check the default txn timeout. If non-zero, remember the txn start + * time. + */ + txnTimeoutMillis = envImpl.getTxnTimeout(); + + if (txnTimeoutMillis != 0) { + txnStartMillis = System.currentTimeMillis(); + } else { + txnStartMillis = 0; + } + + /* Save the thread used to create the locker. */ + thread = Thread.currentThread(); + + /* Do lazy initialization of deleteInfo, to conserve memory. */ + } + + /** + * For reading from the log. + */ + Locker() { + defaultNoWait = false; + } + + protected long getInitialLockTimeout() { + return envImpl.getLockTimeout(); + } + + public EnvironmentImpl getEnvironment() { + return envImpl; + } + + /** + * A Locker has to generate its next id. Some subtypes, like BasicLocker, + * have a single id for all instances because they are never used for + * recovery. Other subtypes ask the txn manager for an id or use a + * specific, mandated id. + */ + protected abstract long generateId(TxnManager txnManager, long mandatedId); + + /** + * @return the transaction's id. + */ + public long getId() { + return id; + } + + /** + * @return the default no-wait (non-blocking) setting. + */ + public boolean getDefaultNoWait() { + return defaultNoWait; + } + + public void setDefaultNoWait(boolean noWait) { + defaultNoWait = noWait; + } + + /** + * Get the lock timeout period for this locker, in milliseconds + * + * WARNING: Be sure to always access the timeout with this accessor, since + * it is overridden in BuddyLocker. + */ + public synchronized long getLockTimeout() { + return lockTimeoutMillis; + } + + /** + * Set the lock timeout period for any locks in this transaction, + * in milliseconds. + * + * @param timeout The timeout value for the transaction lifetime, in + * milliseconds. A value of 0 disables timeouts for the transaction. + * + * @throws IllegalArgumentException via Transaction.setLockTimeout + */ + public synchronized void setLockTimeout(long timeout) { + + if (timeout < 0) { + throw new IllegalArgumentException + ("the timeout value cannot be negative"); + } else if (timeout > Math.pow(2, 32)) { + throw new IllegalArgumentException + ("the timeout value cannot be greater than 2^32"); + } + + lockTimeoutMillis = timeout; + } + + /** + * Set the timeout period for this transaction, in milliseconds. + * + * @param timeout The timeout value for the transaction lifetime, in + * microseconds. A value of 0 disables timeouts for the transaction. + * + * @throws IllegalArgumentException via Transaction.setLockTimeout + */ + public synchronized void setTxnTimeout(long timeout) { + + if (timeout < 0) { + throw new IllegalArgumentException + ("the timeout value cannot be negative"); + } else if (timeout > Math.pow(2, 32)) { + throw new IllegalArgumentException + ("the timeout value cannot be greater than 2^32"); + } + + txnTimeoutMillis = timeout; + if (txnTimeoutMillis != 0) { + txnStartMillis = System.currentTimeMillis(); + } else { + txnStartMillis = 0; + } + } + + /** + * @return true if transaction was created with read-uncommitted as a + * default. + */ + public boolean isReadUncommittedDefault() { + return readUncommittedDefault; + } + + void setWaitingFor(Long lsn, LockType type) { + waitingFor = lsn; + waitingForType = type; + waiterThreadId = Thread.currentThread().getId(); + } + + void clearWaitingFor() { + waitingFor = null; + waitingForType = null; + waiterThreadId = -1; + } + + Long getWaitingFor() { + return waitingFor; + } + + LockType getWaitingForType() { + return waitingForType; + } + + long getWaiterThreadId() { + return waiterThreadId; + } + + /** + * Set the state of a transaction to abort-only. Should ONLY be called + * by OperationFailureException. + */ + public void setOnlyAbortable(OperationFailureException cause) { + /* no-op unless Txn. */ + } + + /** + * Set the state of a transaction's IMPORTUNATE bit. + */ + public void setImportunate(boolean importunate) { + /* no-op unless Txn. */ + } + + /** + * Get the state of a transaction's IMPORTUNATE bit. + */ + public boolean getImportunate() { + return false; + } + + /** + * Allows/disallows my locks from being stolen/preemted. + */ + public void setPreemptable(boolean preemptable) { + this.preemptable = preemptable; + } + + /** + * Returns whether my locks can be stolen/preemted. + */ + public boolean getPreemptable() { + return preemptable; + } + + /** + * Called when a lock is stolen from this locker by the HA replayer. + */ + public void setPreempted() { + + /* + * Record the stack trace when a lock is stolen. This will provide + * more "cause" information when it is wrapped in a + * LockPreemptedException that is thrown later -- see checkPreempted. + */ + preemptedCause = new RuntimeException + ("Lock was preempted by the replication replayer"); + } + + /** + * Called when obtaining a lock to cause a LockPreemptedException to be + * thrown if a lock was preempted earlier. + * + * This operation is split into two methods, checkPreempted and + * throwIfPreempted, so that Txn.checkPreempted can call throwIfPreempted + * for all its BuddyLockers without causing an infinite recursion. This + * method is overridden by BuddyLocker to forward the call to its parent + * buddy (the Txn), and by Txn to check all its child buddies. + * + * @param allowPreemptedLocker is a locker that is being closed as the + * result of a cursor move operation. If the operation is successful then + * allowPreemptedLocker will be closed, and the fact that a lock has been + * stolen from allowPreemptedLocker can be ignored. + */ + public void checkPreempted(final Locker allowPreemptedLocker) + throws OperationFailureException { + + throwIfPreempted(allowPreemptedLocker); + } + + /** + * Called by checkPreempted to cause a LockPreemptedException to be thrown + * if a lock was preempted earlier. Creating the LockPreemptedException + * sets the txn to abort-only. + * + * @see #checkPreempted + */ + final void throwIfPreempted(final Locker allowPreemptedLocker) + throws OperationFailureException { + + if (this != allowPreemptedLocker && + preemptedCause != null) { + throw envImpl.createLockPreemptedException(this, preemptedCause); + } + } + + /** For unit testing. */ + final boolean isPreempted() { + return (preemptedCause != null); + } + + /** + * This method is called to set the closingLocker when a cursor has been + * duplicated prior to being moved. The new locker is informed of the old + * locker, so that a preempted lock taken by the old locker can be ignored. + * When the operation is complete, this method is called to clear the + * closingLocker so that a reference to the old closed locker is not held + * by this object. [#16513] + * + * @param closingLocker the old locker that will be closed if the new + * cursor (using this locker) is moved successfully. + */ + public void setClosingLocker(final Locker closingLocker) { + this.closingLocker = closingLocker; + } + + /** + * See ThreadLocker.allowMultithreadedAccess. + */ + boolean setAllowMultithreadedAccess(boolean allow) { + /* Do nothing by default. Is overridden by ThreadLocker. */ + return false; + } + + protected abstract void checkState(boolean ignoreCalledByAbort) + throws DatabaseException; + + /** + * Overridden to perform actions in a non-transactional cursor when it is + * opened, for example, ReplicaThreadLocker performs consistency checks. + */ + public void openCursorHook(DatabaseImpl dbImpl) { + /* Do nothing. */ + } + + /** + * Returns whether a transaction is method indicates whether the txn is + * part of the rep stream. + * + * A replicated txn must be used for writing to a replicated DB, and a + * non-replicated txn must be used for writing to a non-replicated DB. + * This is critical for avoiding corruption when HA failovers occur + * [#23234] [#23330]. + * + * See guard in LN.logInternal. + */ + public boolean isReplicated() { + return TxnManager.isReplicatedTxn(id); + } + + /** + * Returns true if writes may only be to non-replicated DBs using this + * locker, or false if writes may only be to replicated DBs. + * + * By default (this implementation) local-write is true, since it is + * allowed for all non-txnal lockers and for all lockers in a standalone + * environment. This method is overridden and returns false for + * for user transactions in a replicated environment that are not + * explicitly configured for local-write. + * + * This method is used to describe a locker's configured usage for checking + * the validity of an API write operation. This is checked by Cursor + * methods at the beginning of each write operation. + */ + public boolean isLocalWrite() { + return true; + } + + /** + * Returns whether writes are prohibited using this locker. + */ + public boolean isReadOnly() { + return false; + } + + /* + * Obtain and release locks. + */ + + /** + * Abstract method to a blocking or non-blocking lock of the given type on + * the given LSN. Unlike the lock() method, this method does not throw + * LockNotAvailableException and can therefore be used by nonBlockingLock + * to probe for a lock without the overhead of an exception stack trace. + * + * @param lsn is the node to lock. + * + * @param lockType is the type of lock to request. + * + * @param noWait is true to override the defaultNoWait setting. If true, + * or if defaultNoWait is true, throws LockNotAvailableException if the + * lock cannot be granted without waiting. + * + * @param jumpAheadOfWaiters grant the lock before other waiters, if any. + * + * @param database is the database containing lsn. + * + * @throws LockConflictException if a blocking lock could not be acquired. + */ + abstract LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws LockConflictException, DatabaseException; + + /** + * Request a blocking or non-blocking lock of the given type on the given + * LSN. + * + * @param lsn is the node to lock. + * + * @param lockType is the type of lock to request. + * + * @param noWait is true to override the defaultNoWait setting. If true, + * or if defaultNoWait is true, throws LockNotAvailableException if the + * lock cannot be granted without waiting. + * + * @param database is the database containing lsn. + * + * @throws LockNotAvailableException if a non-blocking lock was denied. + * + * @throws LockConflictException if a blocking lock could not be acquired. + */ + public LockResult lock(long lsn, + LockType lockType, + boolean noWait, + DatabaseImpl database) + throws LockNotAvailableException, LockConflictException { + + final LockResult result = lockInternal + (lsn, lockType, noWait, false /*jumpAheadOfWaiters*/, database); + + if (result.getLockGrant() == LockGrantType.DENIED) { + /* DENIED can only be returned for a non-blocking lock. */ + throw new LockNotAvailableException( + this, "Non-blocking lock was denied."); + } else { + checkPreempted(closingLocker); + return result; + } + } + + /** + * Request a non-blocking lock of the given type on the given LSN. + * + *

        Unlike lock(), this method returns LockGrantType.DENIED if the lock + * is denied rather than throwing LockNotAvailableException. This method + * should therefore not be used as the final lock for a user operation, + * since in that case LockNotAvailableException should be thrown for a + * denied lock. It is normally used only to probe for a lock internally, + * and other recourse is taken if the lock is denied.

        + * + * @param lsn is the node to lock. + * + * @param lockType is the type of lock to request. + * + * @param jumpAheadOfWaiters grant the lock before other waiters, if any. + * + * @param database is the database containing LSN. + */ + public LockResult nonBlockingLock(long lsn, + LockType lockType, + boolean jumpAheadOfWaiters, + DatabaseImpl database) { + final LockResult result = lockInternal + (lsn, lockType, true /*noWait*/, jumpAheadOfWaiters, database); + if (result.getLockGrant() != LockGrantType.DENIED) { + checkPreempted(closingLocker); + } + return result; + } + + /** + * Release the lock on this LN and remove from the transaction's owning + * set. + */ + public synchronized boolean releaseLock(long lsn) + throws DatabaseException { + + boolean ret = lockManager.release(lsn, this); + removeLock(lsn); + return ret; + } + + /** + * Revert this lock from a write lock to a read lock. + */ + public void demoteLock(long lsn) + throws DatabaseException { + + /* + * If successful, the lock manager will call back to the transaction + * and adjust the location of the lock in the lock collection. + */ + lockManager.demote(lsn, this); + } + + /** + * Called when an LN is logged by an operation that will not hold the lock + * such as eviction/checkpoint deferred-write logging or cleaner LN + * migration. We must acquire a lock on the new LSN on behalf of every + * locker that currently holds a lock on the old LSN. + * + * Lock is non-blocking because no contention is possible on the new LSN. + * + * Because this locker is being used by multiple threads, this method may + * be called for a locker that has been closed or for which the lock on the + * old LSN has been released. Unlike other locking methods, in this case + * we simply return rather than report an error. + */ + public synchronized void lockAfterLsnChange(long oldLsn, + long newLsn, + DatabaseImpl dbImpl) { + if (!isValid()) { + /* Locker was recently closed, made abort-only, etc. */ + return; + } + + final LockType lockType = lockManager.getOwnedLockType(oldLsn, this); + if (lockType == null) { + /* Lock was recently released. */ + return; + } + + final LockResult lockResult = nonBlockingLock + (newLsn, lockType, false /*jumpAheadOfWaiters*/, dbImpl); + + if (lockResult.getLockGrant() == LockGrantType.DENIED) { + throw EnvironmentFailureException.unexpectedState + ("No contention is possible on new LSN: " + + DbLsn.getNoFormatString(newLsn) + + " old LSN: " + DbLsn.getNoFormatString(oldLsn) + + " LockType: " + lockType); + } + } + + /** + * In the case where logging occurs before locking, allow lockers to reject + * the operation (e.g., if writing on a replica) and also prepare to undo + * in the (very unlikely) event that logging succeeds but locking fails. + */ + public abstract void preLogWithoutLock(DatabaseImpl database); + + /** + * Throws ReplicaWriteException if called for a locker on a Replica. This + * implementation does nothing but is overridden by replication lockers. + * [#20543] + */ + public void disallowReplicaWrite() { + } + + /** + * Returns whether this locker is transactional. + */ + public abstract boolean isTransactional(); + + /** + * Returns whether the isolation level of this locker is serializable. + */ + public abstract boolean isSerializableIsolation(); + + /** + * Returns whether the isolation level of this locker is read-committed. + */ + public abstract boolean isReadCommittedIsolation(); + + /** + * Returns the underlying Txn if the locker is transactional, or null if + * the locker is non-transactional. For a Txn-based locker, this method + * returns 'this'. For a BuddyLocker, this method may return the buddy. + */ + public abstract Txn getTxnLocker(); + + /** + * Returns a Transaction if the locker is transctional, or null otherwise. + */ + public Transaction getTransaction() { + return null; + } + + /** + * Only BuddyLockers have buddies. + */ + Locker getBuddy() { + return null; + } + + /** + * Creates a fresh non-transactional locker, while retaining any + * transactional locks held by this locker. This method is called when the + * cursor for this locker is cloned. + * + *

        This method must return a locker that shares locks with this + * locker, e.g., a ThreadLocker.

        + * + *

        In general, transactional lockers return 'this' when this method is + * called, while non-transactional lockers return a new instance.

        + */ + public abstract Locker newNonTxnLocker() + throws DatabaseException; + + /** + * Releases any non-transactional locks held by this locker. This method + * is called when the cursor moves to a new position or is closed. + * + *

        In general, transactional lockers do nothing when this method is + * called, while non-transactional lockers release all locks as if + * operationEnd were called.

        + */ + public abstract void releaseNonTxnLocks() + throws DatabaseException; + + /** + * Releases locks and closes the locker at the end of a non-transactional + * cursor operation. For a transctional cursor this method should do + * nothing, since locks must be held until transaction end. + */ + public abstract void nonTxnOperationEnd() + throws DatabaseException; + + /** + * By default the set of buddy lockers is not maintained. This is + * overridden by Txn. + */ + void addBuddy(BuddyLocker buddy) { + } + + /** + * By default the set of buddy lockers is not maintained. This is + * overridden by Txn. + */ + void removeBuddy(BuddyLocker buddy) { + } + + /** + * Returns whether this locker can share locks with the given locker. + */ + public boolean sharesLocksWith(Locker other) { + return false; + } + + /** + * The equivalent of calling operationEnd(true). + */ + public final void operationEnd() + throws DatabaseException { + + operationEnd(true); + } + + /** + * A SUCCESS status equals operationOk. + */ + public final void operationEnd(OperationStatus status) + throws DatabaseException { + + operationEnd(status == OperationStatus.SUCCESS); + } + + /** + * Different types of transactions do different things when the operation + * ends. Txn does nothing, auto Txn commits or aborts, and BasicLocker (and + * its subclasses) just releases locks. + * + * @param operationOK is whether the operation succeeded, since + * that may impact ending behavior. (i.e for an auto Txn) + */ + public abstract void operationEnd(boolean operationOK) + throws DatabaseException; + + /** + * Should be called by all subclasses when the locker is no longer used. + * For Txns and auto Txns this is at commit or abort. For + * non-transactional lockers it is at operationEnd. + */ + void close() + throws DatabaseException { + + isOpen = false; + } + + /** + * Used to determine whether the locker is usable. + * + * FUTURE: Note that this method is overridden by Txn, and Txn.abort sets + * the state to closed when it begins rather than when it ends, but calls + * close() (the method above) when it ends. This is not ideal and deserves + * attention in the future. + */ + public boolean isValid() { + return isOpen; + } + + /** + * @see Txn#addOpenedDatabase + */ + public void addOpenedDatabase(Database dbHandle) { + } + + /** + * @see HandleLocker#allowReleaseLockAfterLsnChange + */ + public boolean allowReleaseLockAfterLsnChange() { + return false; + } + + /** + * Tell this transaction about a cursor. + */ + public abstract void registerCursor(CursorImpl cursor); + + /** + * Remove a cursor from this txn. + */ + public abstract void unRegisterCursor(CursorImpl cursor); + + /** + * Returns true if locking is required for this Locker. All Txnal lockers + * require it; most BasicLockers do not, but BasicLockers on internal dbs + * do. + */ + public abstract boolean lockingRequired(); + + /* + * Transactional support + */ + + /** + * @return the WriteLockInfo for this node. + */ + public abstract WriteLockInfo getWriteLockInfo(long lsn); + + /** + * Database operations like remove and truncate leave behind + * residual DatabaseImpls that must be purged at transaction + * commit or abort. + */ + public abstract void markDeleteAtTxnEnd(DatabaseImpl db, + boolean deleteAtCommit) + throws DatabaseException; + + /** + * Add delete information, to be added to the inCompressor queue when the + * transaction ends. + */ + public void addDeleteInfo(BIN bin) { + + /* + * Skip queue addition if a delta will be logged. In this case the + * slot compression will occur in BIN.beforeLog, when a full version is + * logged. + */ + if (bin.shouldLogDelta()) { + return; + } + + synchronized (this) { + /* Maintain only one binRef per node. */ + if (deleteInfo == null) { + deleteInfo = new HashMap<>(); + } + Long nodeId = bin.getNodeId(); + if (deleteInfo.containsKey(nodeId)) { + return; + } + deleteInfo.put(nodeId, bin.createReference()); + } + } + + /* + * Manage locks owned by this transaction. Note that transactions that will + * be multithreaded must override these methods and provide synchronized + * implementations. + */ + + /** + * Add a lock to set owned by this transaction. + */ + protected abstract void addLock(Long lsn, + LockType type, + LockGrantType grantStatus) + throws DatabaseException; + + /** + * Remove the lock from the set owned by this transaction. If specified to + * LockManager.release, the lock manager will call this when its releasing + * a lock. + */ + abstract void removeLock(long lsn) + throws DatabaseException; + + /** + * A lock is being demoted. Move it from the write collection into the read + * collection. + */ + abstract void moveWriteToReadLock(long lsn, Lock lock); + + /** + * Get lock count, for per transaction lock stats, for internal debugging. + */ + public abstract StatGroup collectStats() + throws DatabaseException; + + /* + * Check txn timeout, if set. Called by the lock manager when blocking on a + * lock. + */ + public boolean isTimedOut() { + long timeout = getTxnTimeout(); + if (timeout != 0) { + long diff = System.currentTimeMillis() - txnStartMillis; + if (diff > timeout) { + return true; + } + } + return false; + } + + /** + * Get the transaction timeout period for this locker, in milliseconds + * + * public for jca/ra/JELocalTransaction. + * + * WARNING: Be sure to always access the timeout with this accessor, since + * it is overridden in BuddyLocker. + */ + public synchronized long getTxnTimeout() { + return txnTimeoutMillis; + } + + long getTxnStartMillis() { + return txnStartMillis; + } + + /** + * @return if this locker has ever been rolled back. + */ + public boolean isRolledBack() { + return false; // most Locker types will never roll back. + } + + /** + * This method is safe to call without synchronizing and this fact is + * relied on by LockManager when creating exception messages. + */ + @Override + public String toString() { + String className = getClass().getName(); + className = className.substring(className.lastIndexOf('.') + 1); + + return System.identityHashCode(this) + " " + Long.toString(id) + "_" + + ((thread == null) ? "" : thread.getName()) + "_" + + className; + } + + /** + * Dump lock table, for debugging + */ + public void dumpLockTable() + throws DatabaseException { + + lockManager.dump(); + } +} diff --git a/src/com/sleepycat/je/txn/LockerFactory.java b/src/com/sleepycat/je/txn/LockerFactory.java new file mode 100644 index 0000000..e263d37 --- /dev/null +++ b/src/com/sleepycat/je/txn/LockerFactory.java @@ -0,0 +1,222 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ReplicationContext; + +/** + * Factory of static methods for creating Locker objects. + */ +public class LockerFactory { + + /** + * Get a locker for a write operation, checking whether the db and + * environment is transactional or not. Must return a non null locker. + */ + public static Locker getWritableLocker(final Environment env, + final Transaction userTxn, + final boolean isInternalDb, + final boolean dbIsTransactional, + final boolean autoTxnIsReplicated) { + + return getWritableLocker( + env, userTxn, isInternalDb, dbIsTransactional, + autoTxnIsReplicated, null /*autoCommitConfig*/); + } + + /** + * Get a locker for a write operation. + * + * @param autoTxnIsReplicated is true if this transaction is + * executed on a rep group master, and needs to be broadcast. + * Currently, all application-created transactions are of the type + * com.sleepycat.je.txn.Txn, and are replicated if the parent + * environment is replicated. Auto Txns are trickier because they may + * be created for a local write operation, such as log cleaning. + */ + public static Locker getWritableLocker( + final Environment env, + final Transaction userTxn, + final boolean isInternalDb, + final boolean dbIsTransactional, + final boolean autoTxnIsReplicated, + TransactionConfig autoCommitConfig) { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final boolean envIsTransactional = envImpl.isTransactional(); + + if (userTxn == null) { + final Transaction xaLocker = env.getThreadTransaction(); + if (xaLocker != null) { + return DbInternal.getLocker(xaLocker); + } + } + + if (dbIsTransactional && userTxn == null) { + + if (autoCommitConfig == null) { + autoCommitConfig = DbInternal.getDefaultTxnConfig(env); + } + + return Txn.createAutoTxn( + envImpl, autoCommitConfig, + (autoTxnIsReplicated ? + ReplicationContext.MASTER : + ReplicationContext.NO_REPLICATE)); + + } + + if (userTxn == null) { + /* Non-transactional user operations use ThreadLocker. */ + return + ThreadLocker.createThreadLocker(envImpl, autoTxnIsReplicated); + } + + /* + * The user provided a transaction, so the environment and the + * database had better be opened transactionally. + */ + if (!isInternalDb && !envIsTransactional) { + throw new IllegalArgumentException( + "A Transaction cannot be used because the"+ + " environment was opened non-transactionally"); + } + if (!dbIsTransactional) { + throw new IllegalArgumentException( + "A Transaction cannot be used because the" + + " database was opened non-transactionally"); + } + + /* + * Use the locker for the given transaction. For read-committed, + * wrap the given transactional locker in a special locker for that + * isolation level. + */ + final Locker locker = DbInternal.getLocker(userTxn); + if (locker.isReadCommittedIsolation()) { + return ReadCommittedLocker.createReadCommittedLocker( + envImpl, locker); + } + + return locker; + } + + /** + * Get a locker for a read or cursor operation. + */ + public static Locker getReadableLocker( + final Database dbHandle, + final Transaction userTxn, + final boolean readCommittedIsolation) { + + return getReadableLocker( + dbHandle, + (userTxn != null) ? DbInternal.getLocker(userTxn) : null, + readCommittedIsolation); + } + + /** + * Get a locker for this database handle for a read or cursor operation. + */ + public static Locker getReadableLocker( + final Database dbHandle, + Locker locker, + boolean readCommittedIsolation) { + + final DatabaseImpl dbImpl = DbInternal.getDbImpl(dbHandle); + + if (!dbImpl.isTransactional() && + locker != null && + locker.isTransactional()) { + throw new IllegalArgumentException( + "A Transaction cannot be used because the" + + " database was opened non-transactionally"); + } + + /* Don't reuse a non-transactional locker. */ + if (locker != null && !locker.isTransactional()) { + locker = null; + } + + /* + * Request read-committed if that isolation level is configured for the + * locker being reused, or if true is passed for the parameter (this is + * the case when read-committed is configured for the cursor). + */ + if (locker != null && locker.isReadCommittedIsolation()) { + readCommittedIsolation = true; + } + + final boolean autoTxnIsReplicated = + dbImpl.isReplicated() && + dbImpl.getEnv().isReplicated(); + + return getReadableLocker( + dbHandle.getEnvironment(), locker, autoTxnIsReplicated, + readCommittedIsolation); + } + + /** + * Get a locker for a read or cursor operation. + */ + private static Locker getReadableLocker( + final Environment env, + final Locker locker, + final boolean autoTxnIsReplicated, + final boolean readCommittedIsolation) { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + if (locker == null) { + final Transaction xaTxn = env.getThreadTransaction(); + if (xaTxn != null) { + return DbInternal.getLocker(xaTxn); + } + /* Non-transactional user operations use ThreadLocker. */ + return + ThreadLocker.createThreadLocker(envImpl, autoTxnIsReplicated); + } + + /* + * Use the given locker. For read-committed, wrap the given + * transactional locker in a special locker for that isolation level. + */ + if (readCommittedIsolation) { + return ReadCommittedLocker.createReadCommittedLocker( + envImpl, locker); + } + + return locker; + } + + /** + * Get a non-transactional locker for internal database operations. Always + * non replicated. + * + * This method is not called for user txns and should not throw a Java + * runtime exception (IllegalArgument, etc). + */ + public static Locker getInternalReadOperationLocker( + final EnvironmentImpl envImpl) { + + return BasicLocker.createBasicLocker(envImpl); + } +} diff --git a/src/com/sleepycat/je/txn/PreparedTxn.java b/src/com/sleepycat/je/txn/PreparedTxn.java new file mode 100644 index 0000000..407796a --- /dev/null +++ b/src/com/sleepycat/je/txn/PreparedTxn.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ReplicationContext; + +/** + * A PreparedTxn is used at recovery for processing a TXN_PREPARE log entry. It + * is provides essentially the same functionality as a TXN but lets the calling + * code set the transaction id. + */ +public class PreparedTxn extends Txn { + + private PreparedTxn(EnvironmentImpl envImpl, + TransactionConfig config, + long mandatedId) + throws DatabaseException { + + super(envImpl, config, ReplicationContext.NO_REPLICATE, mandatedId); + } + + public static PreparedTxn createPreparedTxn(EnvironmentImpl envImpl, + TransactionConfig config, + long mandatedId) + throws DatabaseException { + + PreparedTxn ret = null; + try { + ret = new PreparedTxn(envImpl, config, mandatedId); + } catch (DatabaseException DE) { + ret.close(false); + throw DE; + } + return ret; + } + + /** + * PrepareTxns use the mandated id. + */ + @Override + protected long generateId(TxnManager txnManager, long mandatedId) { + return mandatedId; + } +} diff --git a/src/com/sleepycat/je/txn/ReadCommittedLocker.java b/src/com/sleepycat/je/txn/ReadCommittedLocker.java new file mode 100644 index 0000000..5015059 --- /dev/null +++ b/src/com/sleepycat/je/txn/ReadCommittedLocker.java @@ -0,0 +1,177 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.tree.BIN; + +/** + * Extends BuddyLocker to acquire write locks using the buddy locker (the + * transaction locker). This is used for ReadCommitted (Degree 2) isolation. + */ +public class ReadCommittedLocker extends BuddyLocker { + + /** + * Creates a ReadCommittedLocker. + * @param buddy is a transactional locker that will be used for acquiring + * write locks. + */ + private ReadCommittedLocker(EnvironmentImpl env, Locker buddy) { + + /* + * If the buddy param is a read-committed locker, reach in to get its + * transactional buddy locker. + */ + super(env, + (buddy instanceof ReadCommittedLocker) ? + ((ReadCommittedLocker) buddy).getBuddy() : buddy); + + assert(getBuddy() instanceof Txn); + } + + public static ReadCommittedLocker createReadCommittedLocker( + EnvironmentImpl env, + Locker buddy) + throws DatabaseException { + + return new ReadCommittedLocker(env, buddy); + } + + /** + * Returns a new ReadCommittedLocker that shares locks with this locker by + * virtue of both lockers only holding READ locks. The buddy locker + * underlying both ReadCommittedLocker lockers is the same transactional + * locker, so WRITE locks are also shared. + */ + @Override + public Locker newNonTxnLocker() + throws DatabaseException { + + /* + * getBuddy().newNonTxnLocker() will return the transactional buddy + * locker itself (same as getBuddy), but we call newNonTxnLocker for + * consistency. + */ + return ReadCommittedLocker.createReadCommittedLocker + (envImpl, getBuddy().newNonTxnLocker()); + } + + /** + * Forwards write locks to the buddy locker (the transaction locker). + * + * @see Locker#lockInternal + */ + @Override + protected LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws DatabaseException { + + if (lockType.isWriteLock()) { + return getBuddy().lockInternal + (lsn, lockType, noWait, jumpAheadOfWaiters, database); + } else { + return super.lockInternal + (lsn, lockType, noWait, jumpAheadOfWaiters, database); + } + } + + /** + * Releases the lock from this locker, or if not owned by this locker then + * releases it from the buddy locker. + */ + @Override + public boolean releaseLock(long lsn) + throws DatabaseException { + + boolean ret = true; + if (!lockManager.release(lsn, this)) { + ret = lockManager.release(lsn, getBuddy()); + } + removeLock(lsn); + return ret; + } + + /** + * @return the WriteLockInfo for this node. + */ + @Override + public WriteLockInfo getWriteLockInfo(long lsn) { + return getBuddy().getWriteLockInfo(lsn); + } + + /** + * Write operations are handled by the buddy Txn. + */ + @Override + public void markDeleteAtTxnEnd(DatabaseImpl db, boolean deleteAtCommit) { + getBuddy().markDeleteAtTxnEnd(db, deleteAtCommit); + } + + /** + * Forwards this method to the transactional buddy. The buddy handles + * write locks and therefore handles delete information. + */ + @Override + public void addDeleteInfo(BIN bin) { + getBuddy().addDeleteInfo(bin); + } + + /** + * Forwards this method to the transactional buddy. The buddy Txn tracks + * cursors. + */ + @Override + public void registerCursor(CursorImpl cursor) { + getBuddy().registerCursor(cursor); + } + + /** + * Forwards this method to the transactional buddy. The buddy Txn tracks + * cursors. + */ + @Override + public void unRegisterCursor(CursorImpl cursor) { + getBuddy().unRegisterCursor(cursor); + } + + /** + * ReadCommittedLockers always require locking. + */ + @Override + public boolean lockingRequired() { + return true; + } + + /** + * Is always transactional because the buddy locker is transactional. + */ + @Override + public boolean isTransactional() { + return true; + } + + /** + * Is always read-committed isolation. + */ + @Override + public boolean isReadCommittedIsolation() { + return true; + } +} diff --git a/src/com/sleepycat/je/txn/RollbackEnd.java b/src/com/sleepycat/je/txn/RollbackEnd.java new file mode 100644 index 0000000..b1be5c2 --- /dev/null +++ b/src/com/sleepycat/je/txn/RollbackEnd.java @@ -0,0 +1,126 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; + +/** + * This class indicates the end of a partial rollback at syncup. This is a + * non-replicated entry. Although this is a replication class, it resides in + * the utilint package because it is referenced in LogEntryType.java and is + * used in a general way at recovery. + */ +public class RollbackEnd implements Loggable { + + private long matchpointLSN; + private long rollbackStartLSN; + /* For debugging in the field */ + private Timestamp time; + + public RollbackEnd(long matchpointLSN, long rollbackStartLSN) { + this.matchpointLSN = matchpointLSN; + this.rollbackStartLSN = rollbackStartLSN; + time = new Timestamp(System.currentTimeMillis()); + } + + /** + * For constructing from the log. + */ + public RollbackEnd() { + } + + public long getMatchpoint() { + return matchpointLSN; + } + + public long getRollbackStart() { + return rollbackStartLSN; + } + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + return LogUtils.getPackedLongLogSize(matchpointLSN) + + LogUtils.getPackedLongLogSize(rollbackStartLSN) + + LogUtils.getTimestampLogSize(time); + + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer buffer) { + LogUtils.writePackedLong(buffer, matchpointLSN); + LogUtils.writePackedLong(buffer, rollbackStartLSN); + LogUtils.writeTimestamp(buffer, time); + } + + /** + * @see Loggable#readFromLog + */ + @SuppressWarnings("unused") + public void readFromLog(ByteBuffer buffer, int entryVersion) { + matchpointLSN = LogUtils.readPackedLong(buffer); + rollbackStartLSN = LogUtils.readPackedLong(buffer); + /* the timestamp is packed -- double negative, unpacked == false */ + time = LogUtils.readTimestamp(buffer, false /* unpacked. */); + } + + /** + * @see Loggable#dumpLog + */ + @SuppressWarnings("unused") + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(" matchpointLSN="); + sb.append(DbLsn.getNoFormatString(matchpointLSN)); + sb.append(" rollbackStartLSN="); + sb.append(DbLsn.getNoFormatString(rollbackStartLSN)); + sb.append(" time=").append(time); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + */ + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof RollbackEnd)) { + return false; + } + + RollbackEnd otherRE = (RollbackEnd) other; + return (rollbackStartLSN == otherRE.rollbackStartLSN) && + (matchpointLSN == otherRE.matchpointLSN) && + (time.equals(otherRE.time)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpLog(sb, true); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/txn/RollbackStart.java b/src/com/sleepycat/je/txn/RollbackStart.java new file mode 100644 index 0000000..1f676d0 --- /dev/null +++ b/src/com/sleepycat/je/txn/RollbackStart.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.je.utilint.VLSN; + +/** + * This class indicates the end of a partial rollback at syncup. This is a + * non-replicated entry. Although this is a replication related class, it + * resides in the utilint package because it is referenced in + * LogEntryType.java, and is used in a general way at recovery. + */ +public class RollbackStart implements Loggable { + + /* The matchpoint that is the logical start of this rollback period. */ + private VLSN matchpointVLSN; + private long matchpointLSN; + + /* + * The active txn list are unfinished transactions that will be rolled back + * by syncup. + */ + private Set activeTxnIds; + + /* For debugging in the field */ + private Timestamp time; + + public RollbackStart(VLSN matchpointVLSN, + long matchpointLSN, + Set activeTxnIds) { + this.matchpointVLSN = matchpointVLSN; + this.matchpointLSN = matchpointLSN; + this.activeTxnIds = activeTxnIds; + time = new Timestamp(System.currentTimeMillis()); + } + + /** + * For constructing from the log. + */ + public RollbackStart() { + } + + public long getMatchpoint() { + return matchpointLSN; + } + + public Set getActiveTxnIds() { + return activeTxnIds; + } + + public VLSN getMatchpointVLSN() { + return matchpointVLSN; + } + + /** + * @see Loggable#getLogSize + */ + public int getLogSize() { + int size = LogUtils.getPackedLongLogSize(matchpointVLSN.getSequence()) + + LogUtils.getPackedLongLogSize(matchpointLSN) + + LogUtils.getTimestampLogSize(time) + + LogUtils.getPackedIntLogSize(activeTxnIds.size()); + + for (Long id : activeTxnIds) { + size += LogUtils.getPackedLongLogSize(id); + } + + return size; + } + + /** + * @see Loggable#writeToLog + */ + public void writeToLog(ByteBuffer buffer) { + LogUtils.writePackedLong(buffer, matchpointVLSN.getSequence()); + LogUtils.writePackedLong(buffer, matchpointLSN); + LogUtils.writeTimestamp(buffer, time); + LogUtils.writePackedInt(buffer, activeTxnIds.size()); + for (Long id : activeTxnIds) { + LogUtils.writePackedLong(buffer, id); + } + } + + /**" + * @see Loggable#readFromLog + */ + public void readFromLog(ByteBuffer buffer, int entryVersion) { + matchpointVLSN = new VLSN(LogUtils.readPackedLong(buffer)); + matchpointLSN = LogUtils.readPackedLong(buffer); + /* the timestamp is packed -- double negative, unpacked == false */ + time = LogUtils.readTimestamp(buffer, false /* unpacked. */); + int setSize = LogUtils.readPackedInt(buffer); + activeTxnIds = new HashSet(setSize); + for (int i = 0; i < setSize; i++) { + activeTxnIds.add(LogUtils.readPackedLong(buffer)); + } + } + + /** + * @see Loggable#dumpLog + */ + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(" matchpointVLSN=").append(matchpointVLSN.getSequence()); + sb.append(" matchpointLSN="); + sb.append(DbLsn.getNoFormatString(matchpointLSN)); + + /* Make sure the active txns are listed in order, partially for the sake + * of the LoggableTest unit test, which expects the toString() for two + * equivalent objects to always display the same, and partially for + * ease of debugging. + */ + List displayTxnIds = new ArrayList(activeTxnIds); + Collections.sort(displayTxnIds); + sb.append(" activeTxnIds=") .append(displayTxnIds); + sb.append("\" time=\"").append(time); + } + + /** + * @see Loggable#getTransactionId + */ + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + */ + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof RollbackStart)) { + return false; + } + + RollbackStart otherRS = (RollbackStart) other; + + return (matchpointVLSN.equals(otherRS.matchpointVLSN) && + (matchpointLSN == otherRS.matchpointLSN) && + time.equals(otherRS.time) && + activeTxnIds.equals(otherRS.activeTxnIds)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + dumpLog(sb, true); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/txn/SyncedLockManager.java b/src/com/sleepycat/je/txn/SyncedLockManager.java new file mode 100644 index 0000000..4a38ab6 --- /dev/null +++ b/src/com/sleepycat/je/txn/SyncedLockManager.java @@ -0,0 +1,270 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Set; +import java.util.List; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.utilint.StatGroup; + +/** + * SyncedLockManager uses the synchronized keyword to implement its critical + * sections. + */ +public class SyncedLockManager extends LockManager { + + public SyncedLockManager(EnvironmentImpl envImpl) { + super(envImpl); + } + + @Override + public Set getOwners(Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return getOwnersInternal(lsn, lockTableIndex); + } + } + + @Override + public List getWaiters(Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return getWaitersInternal(lsn, lockTableIndex); + } + } + + @Override + public LockType getOwnedLockType(Long lsn, Locker locker) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return getOwnedLockTypeInternal(lsn, locker, lockTableIndex); + } + } + + @Override + public boolean isLockUncontended(Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return isLockUncontendedInternal(lsn, lockTableIndex); + } + } + + @Override + public boolean ownsOrSharesLock(Locker locker, Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return ownsOrSharesLockInternal(locker, lsn, lockTableIndex); + } + } + + /** + * @see LockManager#attemptLock + */ + @Override + Lock lookupLock(Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return lookupLockInternal(lsn, lockTableIndex); + } + } + + /** + * @see LockManager#attemptLock + */ + @Override + LockAttemptResult attemptLock(Long lsn, + Locker locker, + LockType type, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters) + throws DatabaseException { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return attemptLockInternal + (lsn, locker, type, nonBlockingRequest, jumpAheadOfWaiters, + lockTableIndex); + } + } + + /** + * @see LockManager#getTimeoutInfo + */ + @Override + TimeoutInfo getTimeoutInfo( + boolean isLockNotTxnTimeout, + Locker locker, + long lsn, + LockType type, + LockGrantType grantType, + Lock useLock, + long timeout, + long start, + long now, + DatabaseImpl database, + Set owners, + List waiters) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return getTimeoutInfoInternal( + isLockNotTxnTimeout, locker, lsn, type, grantType, useLock, + timeout, start, now, database, owners, waiters); + } + } + + /** + * @see LockManager#releaseAndNotifyTargets + */ + @Override + Set releaseAndFindNotifyTargets(long lsn, Locker locker) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return releaseAndFindNotifyTargetsInternal + (lsn, locker, lockTableIndex); + } + } + + /** + * @see LockManager#demote + */ + @Override + void demote(long lsn, Locker locker) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + demoteInternal(lsn, locker, lockTableIndex); + } + } + + /** + * @see LockManager#isLocked + */ + @Override + boolean isLocked(Long lsn) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return isLockedInternal(lsn, lockTableIndex); + } + } + + /** + * @see LockManager#isOwner + */ + @Override + boolean isOwner(Long lsn, Locker locker, LockType type) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return isOwnerInternal(lsn, locker, type, lockTableIndex); + } + } + + /** + * @see LockManager#isWaiter + */ + @Override + boolean isWaiter(Long lsn, Locker locker) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return isWaiterInternal(lsn, locker, lockTableIndex); + } + } + + /** + * @see LockManager#nWaiters + */ + @Override + int nWaiters(Long lsn) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return nWaitersInternal(lsn, lockTableIndex); + } + } + + /** + * @see LockManager#nOwners + */ + @Override + int nOwners(Long lsn) { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return nOwnersInternal(lsn, lockTableIndex); + } + } + + /** + * @see LockManager#getWriterOwnerLocker + */ + @Override + Locker getWriteOwnerLocker(Long lsn) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return getWriteOwnerLockerInternal(lsn, lockTableIndex); + } + } + + /** + * @see LockManager#validateOwnership + */ + @Override + boolean validateOwnership(Long lsn, + Locker locker, + LockType type, + boolean getOwnersAndWaiters, + boolean flushFromWaiters, + Set owners, + List waiters) { + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return validateOwnershipInternal( + lsn, locker, type, getOwnersAndWaiters, flushFromWaiters, + lockTableIndex, owners, waiters); + } + } + + /** + * @see LockManager#stealLock + */ + @Override + public LockAttemptResult stealLock(Long lsn, + Locker locker, + LockType lockType) + throws DatabaseException { + + int lockTableIndex = getLockTableIndex(lsn); + synchronized(lockTableLatches[lockTableIndex]) { + return stealLockInternal(lsn, locker, lockType, lockTableIndex); + } + } + + /** + * @see LockManager#dumpLockTable + */ + @Override + void dumpLockTable(StatGroup stats, boolean clear) { + for (int i = 0; i < nLockTables; i++) { + synchronized(lockTableLatches[i]) { + dumpLockTableInternal(stats, i, clear); + } + } + } +} diff --git a/src/com/sleepycat/je/txn/ThinLockImpl.java b/src/com/sleepycat/je/txn/ThinLockImpl.java new file mode 100644 index 0000000..c8e08da --- /dev/null +++ b/src/com/sleepycat/je/txn/ThinLockImpl.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.MemoryBudget; + +/** + * Implements a lightweight Lock with no waiters and only a single Owner. If, + * during an operation (lock) more than one owner or waiter is required, then + * this will mutate to a LockImpl, perform the requested operation, and return + * the new LockImpl to the caller. + * + * public for Sizeof. + */ +public class ThinLockImpl extends LockInfo implements Lock { + + /** + * Create a Lock. Public for Sizeof. + */ + public ThinLockImpl() { + super(null, null); + } + + /* Used when releasing lock */ + ThinLockImpl(ThinLockImpl tl) { + super(tl.getLocker(), tl.getLockType()); + } + + public List getWaitersListClone() { + return Collections.emptyList(); + } + + public void flushWaiter(Locker locker, + MemoryBudget mb, + int lockTableIndex) { + + /* Do nothing. */ + return; + } + + public Set getOwnersClone() { + + Set ret = new HashSet(); + if (locker != null) { + ret.add(this); + } + return ret; + } + + public boolean isOwner(Locker locker, LockType lockType) { + return locker == this.locker && lockType == this.lockType; + } + + public boolean isOwnedWriteLock(Locker locker) { + + if (locker != this.locker) { + return false; + } + + if (this.lockType != null) { + return this.lockType.isWriteLock(); + } else { + return false; + } + } + + public LockType getOwnedLockType(Locker locker) { + if (locker != this.locker) { + return null; + } + return this.lockType; + } + + public boolean isWaiter(Locker locker) { + + /* There can never be waiters on Thin Locks. */ + return false; + } + + public int nWaiters() { + return 0; + } + + public int nOwners() { + return (locker == null ? 0 : 1); + } + + public LockAttemptResult lock(LockType requestType, + Locker locker, + boolean nonBlockingRequest, + boolean jumpAheadOfWaiters, + MemoryBudget mb, + int lockTableIndex) + throws DatabaseException { + + if (this.locker != null && + this.locker != locker) { + /* Lock is already held by someone else so mutate. */ + Lock newLock = new LockImpl(new LockInfo(this)); + return newLock.lock(requestType, locker, nonBlockingRequest, + jumpAheadOfWaiters, mb, lockTableIndex); + } + + LockGrantType grant = null; + if (this.locker == null) { + this.locker = locker; + this.lockType = requestType; + grant = LockGrantType.NEW; + } else { + + /* The requestor holds this lock. Check for upgrades. */ + LockUpgrade upgrade = lockType.getUpgrade(requestType); + if (upgrade.getUpgrade() == null) { + grant = LockGrantType.EXISTING; + } else { + LockType upgradeType = upgrade.getUpgrade(); + assert upgradeType != null; + this.lockType = upgradeType; + grant = (upgrade.getPromotion() ? + LockGrantType.PROMOTION : + LockGrantType.EXISTING); + } + } + return new LockAttemptResult(this, grant, false); + } + + public Set release(Locker locker, + MemoryBudget mb, + int lockTableIndex) { + + if (locker == this.locker) { + this.locker = null; + this.lockType = null; + return Collections.emptySet(); + } else { + return null; + } + } + + public void stealLock(Locker locker, MemoryBudget mb, int lockTableIndex) { + if (this.locker != locker && + this.locker.getPreemptable()) { + this.locker.setPreempted(); + this.locker = null; + } + } + + public void demote(Locker locker) { + + if (this.lockType.isWriteLock()) { + this.lockType = (lockType == LockType.RANGE_WRITE) ? + LockType.RANGE_READ : LockType.READ; + } + } + + public Locker getWriteOwnerLocker() { + + if (lockType != null && + lockType.isWriteLock()) { + return locker; + } else { + return null; + } + } + + public boolean isThin() { + return true; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(" ThinLockAddr:").append(System.identityHashCode(this)); + sb.append(" Owner:"); + if (nOwners() == 0) { + sb.append(" (none)"); + } else { + sb.append(locker); + } + + sb.append(" Waiters: (none)"); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/txn/ThreadLocker.java b/src/com/sleepycat/je/txn/ThreadLocker.java new file mode 100644 index 0000000..5163dfd --- /dev/null +++ b/src/com/sleepycat/je/txn/ThreadLocker.java @@ -0,0 +1,141 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Iterator; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Extends BasicLocker to share locks among all lockers for the same thread. + * This locker is used when a JE entry point is called with a null transaction + * parameter. + */ +public class ThreadLocker extends BasicLocker { + + /** + * Set to allow this locker to be used by multiple threads. This mode + * should only be set temporarily, for example, while locking in + * lockAfterLsnChange. + */ + private boolean allowMultithreadedAccess; + + /** + * Creates a ThreadLocker. + */ + public ThreadLocker(EnvironmentImpl env) { + super(env); + lockManager.registerThreadLocker(this); + } + + public static ThreadLocker createThreadLocker(EnvironmentImpl env, + boolean replicated) + throws DatabaseException { + + return (env.isReplicated() && replicated) ? + env.createRepThreadLocker() : + new ThreadLocker(env); + } + + @Override + void close() { + super.close(); + lockManager.unregisterThreadLocker(this); + } + + /** + * Checks for preemption in all thread lockers for this thread. + */ + @Override + public void checkPreempted(final Locker allowPreemptedLocker) + throws OperationFailureException { + + final Iterator iter = + lockManager.getThreadLockers(thread); + while (iter.hasNext()) { + final ThreadLocker locker = iter.next(); + locker.throwIfPreempted(allowPreemptedLocker); + } + } + + /** + * Set the allowMultithreadedAccess mode during execution of this method + * because a ThreadLocker is not normally allowed to perform locking from + * more than one thread. + */ + @Override + public synchronized void lockAfterLsnChange(long oldLsn, + long newLsn, + DatabaseImpl dbImpl) { + final boolean oldVal = allowMultithreadedAccess; + allowMultithreadedAccess = true; + try { + super.lockAfterLsnChange(oldLsn, newLsn, dbImpl); + } finally { + allowMultithreadedAccess = oldVal; + } + } + + /** + * Check that this locker is not used in the wrong thread. + * + * @throws IllegalStateException via all Cursor methods that use a + * non-transactional locker. + */ + @Override + protected synchronized void checkState(boolean ignoreCalledByAbort) { + if (!allowMultithreadedAccess && thread != Thread.currentThread()) { + throw new IllegalStateException + ("Non-transactional Cursors may not be used in multiple " + + "threads; Cursor was created in " + thread + + " but used in " + Thread.currentThread()); + } + } + + /** + * Returns a new non-transactional locker that shares locks with this + * locker by virtue of being a ThreadLocker for the same thread. + */ + @Override + public Locker newNonTxnLocker() + throws DatabaseException { + + checkState(false); + return newEmptyThreadLockerClone(); + } + + public ThreadLocker newEmptyThreadLockerClone() { + return new ThreadLocker(envImpl); + } + + /** + * Returns whether this locker can share locks with the given locker. + * Locks are shared when both lockers are ThreadLocker instances for the + * same thread. + */ + @Override + public boolean sharesLocksWith(Locker other) { + + if (super.sharesLocksWith(other)) { + return true; + } else if (other instanceof ThreadLocker) { + return thread == other.thread; + } else { + return false; + } + } +} diff --git a/src/com/sleepycat/je/txn/Txn.java b/src/com/sleepycat/je/txn/Txn.java new file mode 100644 index 0000000..da4cc61 --- /dev/null +++ b/src/com/sleepycat/je/txn/Txn.java @@ -0,0 +1,2492 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_TOTAL; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static com.sleepycat.je.utilint.DbLsn.NULL_LSN; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.transaction.xa.XAResource; +import javax.transaction.xa.Xid; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.LogParams; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.log.entry.AbortLogEntry; +import com.sleepycat.je.log.entry.CommitLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.RecoveryManager; +import com.sleepycat.je.tree.TreeLocation; +import com.sleepycat.je.txn.TxnChain.CompareSlot; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TinyHashSet; +import com.sleepycat.je.utilint.VLSN; + +/** + * A Txn is the internal representation of a transaction created by a call to + * Environment.txnBegin. This class must support multi-threaded use. + */ +public class Txn extends Locker implements VersionedWriteLoggable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /* Use an AtomicInteger to record cursors opened under this txn. */ + private final AtomicInteger cursors = new AtomicInteger(); + + /* Internal txn flags. */ + private byte txnFlags; + /* Set if prepare() has been called on this transaction. */ + private static final byte IS_PREPARED = 1; + /* Set if xa_end(TMSUSPEND) has been called on this transaction. */ + private static final byte XA_SUSPENDED = 2; + /* Set if this rollback() has been called on this transaction. */ + private static final byte PAST_ROLLBACK = 4; + + /* + * Set if this transaction may abort other transactions holding a needed + * lock. Note that this bit flag and the setImportunate method could be + * removed in favor of overriding getImportunate in ReplayTxn. This was + * not done, for now, to avoid changing importunate tests that use a Txn + * and call setImportunate. [#16513] + */ + private static final byte IMPORTUNATE = 8; + + /* Holds the public Transaction state. */ + private Transaction.State txnState; + + /* Information about why a Txn was made only abortable. */ + private OperationFailureException onlyAbortableCause; + + /* + * A Txn can be used by multiple threads. Modification to the read and + * write lock collections is done by synchronizing on the txn. + */ + private Set readLocks; // key is LSN + private Map writeInfo; // key is LSN + + /* + * A set of BuddyLockers that have this locker as their buddy. Currently + * this set is only maintained (non-null) in a replicated environment + * because it is only needed for determining when to throw + * LockPreemptedException. If null, it can be assumed that no other + * thread will change it. If non-null, access should be synchronized on + * the buddyLockers object. TinyHashSet is used because it is optimized + * for 0 to 2 entries, and normally a Txn will have at most two buddy + * lockers (for read-committed mode). + */ + private TinyHashSet buddyLockers; + + private static final int READ_LOCK_OVERHEAD = + MemoryBudget.HASHSET_ENTRY_OVERHEAD; + private static final int WRITE_LOCK_OVERHEAD = + MemoryBudget.HASHMAP_ENTRY_OVERHEAD + + MemoryBudget.WRITE_LOCKINFO_OVERHEAD; + + /* + * We have to keep a set of DatabaseCleanupInfo objects so after commit or + * abort of Environment.truncateDatabase() or Environment.removeDatabase(), + * we can appropriately purge the unneeded MapLN and DatabaseImpl. + * Synchronize access to this set on this object. + */ + protected Set deletedDatabases; + + /* + * We need a map of the latest databaseImpl objects to drive the undo + * during an abort, because it's too hard to look up the database object in + * the mapping tree. (The normal code paths want to take locks, add + * cursors, etc. + */ + protected Map undoDatabases; + + /** + * @see #addOpenedDatabase + * @see HandleLocker + */ + protected Set openedDatabaseHandles; + + /* + * First LSN logged for this transaction -- used for keeping track of the + * first active LSN point, for checkpointing. This field is not persistent. + * + * [#16861] This field is volatile to avoid making getFirstActiveLsn + * synchronized, which causes a deadlock in HA. + */ + protected volatile long firstLoggedLsn = NULL_LSN; + + /* + * Last LSN logged for this transaction. Serves as the handle onto the + * chained log entries belonging to this transaction. Is persistent. + */ + protected long lastLoggedLsn = NULL_LSN; + + /* + * The LSN used to commit the transaction. One of commitLSN or abortLSN + * must be set after a commit() or abort() operation. Note that a commit() + * may set abortLSN, if the commit failed, and the transaction had to be + * aborted. + */ + protected long commitLsn = NULL_LSN; + + /* The LSN used to record the abort of the transaction. */ + long abortLsn = NULL_LSN; + + /* The configured durability at the time the transaction was created. */ + private Durability defaultDurability; + + /* The durability used for the actual commit. */ + private Durability commitDurability; + + /* Whether to use Serializable isolation (prevent phantoms). */ + private boolean serializableIsolation; + + /* Whether to use Read-Committed isolation. */ + private boolean readCommittedIsolation; + + /* + * In-memory size, in bytes. A Txn tracks the memory needed for itself and + * the readlock, writeInfo, undoDatabases, and deletedDatabases + * collections, including the cost of each collection entry. However, the + * actual Lock object memory cost is maintained within the Lock class. + */ + private int inMemorySize; + + /* + * Accumulated memory budget delta. Once this exceeds ACCUMULATED_LIMIT we + * inform the MemoryBudget that a change has occurred. + */ + private int accumulatedDelta = 0; + + /* + * The set of databases for which triggers were invoked during the + * course of this transaction. It's null if no triggers were invoked. + */ + private Set triggerDbs = null; + + /* + * The user Transaction handle associated with this Txn. It's null if there + * isn't one, e.g. it's an internal transaction. + */ + private Transaction transaction; + + /* + * Max allowable accumulation of memory budget changes before MemoryBudget + * should be updated. This allows for consolidating multiple calls to + * updateXXXMemoryBudget() into one call. Not declared final so that unit + * tests can modify this. See SR 12273. + */ + public static int ACCUMULATED_LIMIT = 10000; + + /* + * Each Txn instance has a handle on a ReplicationContext instance for use + * in logging a TxnCommit or TxnAbort log entries. + */ + protected ReplicationContext repContext; + + /* + * Used to track mixed mode (sync/durability) transaction API usage. When + * the sync based api is removed, these tracking ivs can be as well. + */ + private boolean explicitSyncConfigured = false; + private boolean explicitDurabilityConfigured = false; + + /* Determines whether the transaction is auto-commit */ + private boolean isAutoCommit = false; + + private boolean readOnly; + + /** + * Constructor for reading from log. + */ + public Txn() { + lastLoggedLsn = NULL_LSN; + } + + protected Txn(EnvironmentImpl envImpl, + TransactionConfig config, + ReplicationContext repContext) { + this(envImpl, config, repContext, 0L /*mandatedId */ ); + } + + /** + * A non-zero mandatedId is specified only by subtypes which arbitrarily + * impose a transaction id value onto the transaction. This is done by + * implementing a version of Locker.generateId() which uses the proposed + * id. + */ + protected Txn(EnvironmentImpl envImpl, + TransactionConfig config, + ReplicationContext repContext, + long mandatedId) + throws DatabaseException { + + /* + * Initialize using the config but don't hold a reference to it, since + * it has not been cloned. + */ + super(envImpl, config.getReadUncommitted(), config.getNoWait(), + mandatedId); + initTxn(config); + this.repContext = repContext; + } + + public static Txn createLocalTxn(EnvironmentImpl envImpl, + TransactionConfig config) { + return new Txn(envImpl, config, ReplicationContext.NO_REPLICATE); + } + + public static Txn createLocalAutoTxn(EnvironmentImpl envImpl, + TransactionConfig config) { + Txn txn = createLocalTxn(envImpl, config); + txn.isAutoCommit = true; + return txn; + } + + /* + * Make a transaction for a user instigated transaction. Whether the + * environment is replicated or not determines whether a MasterTxn or + * a plain local Txn is returned. + */ + static Txn createUserTxn(EnvironmentImpl envImpl, + TransactionConfig config) { + + Txn ret = null; + try { + ret = envImpl.isReplicated() ? + envImpl.createRepUserTxn(config) : + createLocalTxn(envImpl, config); + } catch (DatabaseException DE) { + if (ret != null) { + ret.close(false); + } + throw DE; + } + return ret; + } + + static Txn createAutoTxn(EnvironmentImpl envImpl, + TransactionConfig config, + ReplicationContext repContext) + throws DatabaseException { + + Txn ret = null; + try { + if (envImpl.isReplicated() && repContext.inReplicationStream()) { + ret = envImpl.createRepUserTxn(config); + } else { + ret = new Txn(envImpl, config, repContext); + } + + ret.isAutoCommit = true; + } catch (DatabaseException DE) { + if (ret != null) { + ret.close(false); + } + throw DE; + } + return ret; + } + + @SuppressWarnings("deprecation") + private void initTxn(TransactionConfig config) + throws DatabaseException { + + serializableIsolation = config.getSerializableIsolation(); + readCommittedIsolation = config.getReadCommitted(); + defaultDurability = config.getDurability(); + if (defaultDurability == null) { + explicitDurabilityConfigured = false; + defaultDurability = config.getDurabilityFromSync(envImpl); + } else { + explicitDurabilityConfigured = true; + } + explicitSyncConfigured = + config.getSync() || config.getNoSync() || config.getWriteNoSync(); + + assert (!(explicitDurabilityConfigured && explicitSyncConfigured)); + + readOnly = config.getReadOnly(); + + lastLoggedLsn = NULL_LSN; + firstLoggedLsn = NULL_LSN; + + txnFlags = 0; + setState(Transaction.State.OPEN); + + if (envImpl.isReplicated()) { + buddyLockers = new TinyHashSet(); + } + + txnBeginHook(config); + + /* + * Note: readLocks, writeInfo, undoDatabases, deleteDatabases are + * initialized lazily in order to conserve memory. WriteInfo and + * undoDatabases are treated as a package deal, because they are both + * only needed if a transaction does writes. + * + * When a lock is added to this transaction, we add the collection + * entry overhead to the memory cost, but don't add the lock + * itself. That's taken care of by the Lock class. + */ + updateMemoryUsage(MemoryBudget.TXN_OVERHEAD); + + if (registerImmediately()) { + this.envImpl.getTxnManager().registerTxn(this); + } + } + + /** + * True if this transaction should be registered with the transaction + * manager immediately at startup. True for all transactions except for + * those ReplayTxns which were created as transformed master transactions. + */ + protected boolean registerImmediately() { + return true; + } + + @Override + void addBuddy(BuddyLocker buddy) { + if (buddyLockers != null) { + synchronized (buddyLockers) { + buddyLockers.add(buddy); + } + } + } + + @Override + void removeBuddy(BuddyLocker buddy) { + if (buddyLockers != null) { + synchronized (buddyLockers) { + buddyLockers.remove(buddy); + } + } + } + + /** + * UserTxns get a new unique id for each instance. + */ + @Override + @SuppressWarnings("unused") + protected long generateId(TxnManager txnManager, + long ignore /* mandatedId */) { + return txnManager.getNextTxnId(); + } + + /** + * Access to last LSN. + */ + public long getLastLsn() { + return lastLoggedLsn; + } + + /** + * + * Returns the durability used for the commit operation. It's only + * available after a commit operation has been initiated. + * + * @return the durability associated with the commit, or null if the + * commit has not yet been initiated. + */ + public Durability getCommitDurability() { + return commitDurability; + } + + /** + * Returns the durability associated the transaction at the time it's first + * created. + * + * @return the durability associated with the transaction at creation. + */ + public Durability getDefaultDurability() { + return defaultDurability; + } + + public boolean getPrepared() { + return (txnFlags & IS_PREPARED) != 0; + } + + public void setPrepared(boolean prepared) { + if (prepared) { + txnFlags |= IS_PREPARED; + } else { + txnFlags &= ~IS_PREPARED; + } + } + + public void setSuspended(boolean suspended) { + if (suspended) { + txnFlags |= XA_SUSPENDED; + } else { + txnFlags &= ~XA_SUSPENDED; + } + } + + public boolean isSuspended() { + return (txnFlags & XA_SUSPENDED) != 0; + } + + protected void setRollback() { + txnFlags |= PAST_ROLLBACK; + } + + /** + * @return if this transaction has ever executed a rollback. + * A Rollback is an undo of the transaction that can return either to the + * original pre-txn state, or to an intermediate intra-txn state. An abort + * always returns the txn to the pre-txn state. + */ + @Override + public boolean isRolledBack() { + return (txnFlags & PAST_ROLLBACK) != 0; + } + + /** + * Gets a lock on this LSN and, if it is a write lock, saves an abort + * LSN. Caller will set the abortLsn later, after the write lock has been + * obtained. + * + * @throws IllegalStateException via API read/write methods if the txn is + * closed, in theory. However, this should not occur from a user API call, + * because the API methods first call Transaction.getLocker, which will + * throw IllegalStateException if the txn is closed. It might occur, + * however, if the transaction ends in the window between the call to + * getLocker and the lock attempt. + * + * @throws OperationFailureException via API read/write methods if an + * OperationFailureException occurred earlier and set the txn to + * abort-only. + * + * @see Locker#lockInternal + * @Override + */ + @Override + protected LockResult lockInternal(long lsn, + LockType lockType, + boolean noWait, + boolean jumpAheadOfWaiters, + DatabaseImpl database) + throws DatabaseException { + + long timeout = 0; + boolean useNoWait = noWait || defaultNoWait; + synchronized (this) { + checkState(false); + if (!useNoWait) { + timeout = getLockTimeout(); + } + } + + /* Ask for the lock. */ + LockGrantType grant = lockManager.lock + (lsn, this, lockType, timeout, useNoWait, jumpAheadOfWaiters, + database); + + WriteLockInfo info = null; + if (writeInfo != null) { + if (grant != LockGrantType.DENIED && lockType.isWriteLock()) { + synchronized (this) { + info = writeInfo.get(Long.valueOf(lsn)); + /* Save the latest version of this database for undoing. */ + undoDatabases.put(database.getId(), database); + } + } + } + + return new LockResult(grant, info); + } + + /** + * Prepare to undo in the (very unlikely) event that logging succeeds but + * locking fails. Subclasses should call super.preLogWithoutLock. [#22875] + */ + @Override + public synchronized void preLogWithoutLock(DatabaseImpl database) { + ensureWriteInfo(); + undoDatabases.put(database.getId(), database); + } + + /** + * @throws IllegalStateException via XAResource + */ + public synchronized int prepare(Xid xid) + throws DatabaseException { + + if ((txnFlags & IS_PREPARED) != 0) { + throw new IllegalStateException + ("prepare() has already been called for Transaction " + + id + "."); + } + + checkState(false); + if (checkCursorsForClose()) { + throw new IllegalStateException + ("Transaction " + id + + " prepare failed because there were open cursors."); + } + + setPrepared(true); + envImpl.getTxnManager().notePrepare(); + if (writeInfo == null) { + return XAResource.XA_RDONLY; + } + + SingleItemEntry prepareEntry = + SingleItemEntry.create(LogEntryType.LOG_TXN_PREPARE, + new TxnPrepare(id,xid)); + /* Flush required. */ + LogManager logManager = envImpl.getLogManager(); + logManager.logForceFlush(prepareEntry, + true, // fsyncrequired + ReplicationContext.NO_REPLICATE); + + return XAResource.XA_OK; + } + + public void commit(Xid xid) + throws DatabaseException { + + commit(Durability.COMMIT_SYNC); + envImpl.getTxnManager().unRegisterXATxn(xid, true); + return; + } + + public void abort(Xid xid) + throws DatabaseException { + + abort(true /* forceFlush */); + envImpl.getTxnManager().unRegisterXATxn(xid, false); + return; + } + + /** + * Call commit() with the default sync configuration property. + */ + public long commit() + throws DatabaseException { + + return commit(defaultDurability); + } + + /** + * Commit this transaction; it involves the following logical steps: + * + * 1. Run pre-commit hook. + * + * 2. Release read locks. + * + * 3. Log a txn commit record and flush the log as indicated by the + * durability policy. + * + * 4. Run the post-commit hook. + * + * 5. Add deleted LN info to IN compressor queue. + * + * 6. Release all write locks + * + * If this transaction has not made any changes to the database, that is, + * it is a read-only transaction, no entry is made to the log. Otherwise, + * a concerted effort is made to log a commit entry, or an abort entry, + * but NOT both. If exceptions are encountered and neither entry can be + * logged, a EnvironmentFailureException is thrown. + * + * Error conditions (in contrast to Exceptions) always result in the + * environment being invalidated and the Error being propagated back to the + * application. In addition, if the environment is made invalid in another + * thread, or the transaction is closed by another thread, then we + * propagate the exception and we do not attempt to abort. This special + * handling is prior to the pre-commit stage. + * + * From an exception handling viewpoint the commit goes through two stages: + * a pre-commit stage spanning steps 1-3, and a post-commit stage + * spanning steps 4-5. The post-commit stage is entered only after a commit + * entry has been successfully logged. + * + * Any exceptions detected during the pre-commit stage results in an + * attempt to log an abort entry. A NULL commitLsn (and abortLsn) + * indicates that we are in the pre-commit stage. Note in particular, that + * if the log of the commit entry (step 3) fails due to an IOException, + * then the lower levels are responsible for wrapping it in a + * EnvironmentFailureException which is propagated directly to the + * application. + * + * Exceptions thrown in the post-commit stage are examined to see if they + * are expected and must be propagated back to the caller after completing + * any pending cleanup; some replication exceptions fall into this + * category. If the exception was unexpected, the environment is + * invalidated and a EnvironmentFailureException is thrown instead. The + * current implementation only allows propagation of exceptions from the + * post-commit hook, since we do not expect exceptions from any of the + * other post-commit operations. + * + * When there are multiple failures in commit(), we want the caller to + * receive the first exception, to make the problem manifest. So an effort + * is made to preserve that primary exception and propagate it instead of + * any following, secondary exceptions. The secondary exception is always + * logged in such a circumstance. + * + * @throws IllegalStateException via Transaction.commit if cursors are + * open. + * + * @throws OperationFailureException via Transaction.commit if an + * OperationFailureException occurred earlier and set the txn to + * abort-only. + * + * Note that IllegalStateException should never be thrown by + * Transaction.commit because of a closed txn, since Transaction.commit and + * abort set the Transaction.txn to null and disallow subsequent method + * calls (other than abort). So in a sense the call to checkState(true) in + * this method is unnecessary, although perhaps a good safeguard. + */ + public long commit(Durability durability) + throws DatabaseException { + + /* + * If frozen, throw the appropriate exception, but don't attempt to + * make any changes to cleanup the exception. + */ + checkIfFrozen(true /* isCommit */); + + /* + * A post commit exception that needs to be propagated back to the + * caller. Its throw is delayed until the post commit cleanup has been + * completed. + */ + DatabaseException queuedPostCommitException = null; + + this.commitDurability = durability; + + try { + + synchronized (this) { + checkState(false); + if (checkCursorsForClose()) { + throw new IllegalStateException + ("Transaction " + id + + " commit failed because there were open cursors."); + } + + /* + * Do the pre-commit hook before executing any commit related + * actions like releasing locks. + */ + if (updateLoggedForTxn()) { + preLogCommitHook(); + } + + /* + * Release all read locks, clear lock collection. Optimize for + * the case where there are no read locks. + */ + int numReadLocks = clearReadLocks(); + + /* + * Log the commit if we ever logged any modifications for this + * txn. Refraining from logging empty commits is more efficient + * and makes for fewer edge cases for HA. Note that this is not + * the same as the question of whether we have held any write + * locks. Various scenarios, like RMW txns and + * Cursor.putNoOverwrite can take write locks without having + * actually made any modifications. + * + * If we have outstanding write locks, we must release them + * even if we won't log a commit. TODO: This may have been + * true in the past because of dbhandle write locks that were + * transferred away, but is probably no longer true. + */ + int numWriteLocks = 0; + Collection obsoleteLsns = null; + if (writeInfo != null) { + numWriteLocks = writeInfo.size(); + obsoleteLsns = getObsoleteLsnInfo(); + } + + /* + * If nothing was written to log for this txn, no need to log a + * commit. + */ + if (updateLoggedForTxn()) { + final LogItem commitItem = + logCommitEntry(durability.getLocalSync(), + obsoleteLsns); + commitLsn = commitItem.lsn; + + try { + postLogCommitHook(commitItem); + } catch (DatabaseException hookException) { + if (txnState == Transaction.State.MUST_ABORT) { + throw EnvironmentFailureException. + unexpectedException + ("postLogCommitHook may not set MUST_ABORT", + hookException); + } + if (!propagatePostCommitException(hookException)) { + throw hookException; + } + queuedPostCommitException = hookException; + } + } + + /* + * Set database state for deletes before releasing any write + * locks. + */ + setDeletedDatabaseState(true); + + /* Release all write locks, clear lock collection. */ + if (numWriteLocks > 0) { + releaseWriteLocks(); + } + writeInfo = null; + + /* Unload delete info, but don't wake up the compressor. */ + if ((deleteInfo != null) && deleteInfo.size() > 0) { + envImpl.addToCompressorQueue(deleteInfo.values()); + deleteInfo.clear(); + } + traceCommit(numWriteLocks, numReadLocks); + } + + /* + * Purge any databaseImpls not needed as a result of the commit. Be + * sure to do this outside the synchronization block, to avoid + * conflict w/ checkpointer. + */ + cleanupDatabaseImpls(true); + + /* + * Unregister this txn. Be sure to do this outside the + * synchronization block, to avoid conflict w/ checkpointer. + */ + close(true); + + if (queuedPostCommitException == null) { + TriggerManager.runCommitTriggers(this); + return commitLsn; + } + } catch (Error e) { + envImpl.invalidate(e); + throw e; + } catch (RuntimeException commitException) { + if (!envImpl.isValid()) { + /* Env is invalid, propagate exception. */ + throw commitException; + } + if (commitLsn != NULL_LSN) { + /* An unfiltered post commit exception */ + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_INCOMPLETE, + "Failed after commiting transaction " + + id + + " during post transaction cleanup." + + "Original exception = " + + commitException.getMessage(), + commitException); + } + + /* + * If this transaction is frozen, just bail out, and don't try + * to clean up with an abort. + */ + checkIfFrozen(true); + throwPreCommitException(durability, commitException); + } finally { + + /* + * Final catch-all to ensure state is set, in case close(boolean) + * is not called. + */ + if (txnState == Transaction.State.OPEN) { + setState(Transaction.State.COMMITTED); + } + } + throw queuedPostCommitException; + } + + /** + * Releases all write locks, nulls the lock collection. + */ + protected void releaseWriteLocks() throws DatabaseException { + if (writeInfo == null) { + return; + } + for (Long lsn : writeInfo.keySet()) { + lockManager.release(lsn, this); + } + writeInfo = null; + } + + /** + * Aborts the current transaction and throws the pre-commit Exception, + * wrapped in a Database exception if it isn't already a DatabaseException. + * + * If the attempt at writing the abort entry fails, that is, if neither an + * abort entry, nor a commit entry was successfully written to the log, the + * environment is invalidated and a EnvironmentFailureException is thrown. + * Note that for HA, it's necessary that either a commit or abort entry be + * made in the log, so that it can be replayed to the replicas and the + * transaction is not left in limbo at the other nodes. + * + * @param durability used to determine whether the abort record should be + * flushed to the log. + * @param preCommitException the exception being handled. + * @throws DatabaseException this is the normal return for the method. + */ + private void throwPreCommitException(Durability durability, + RuntimeException preCommitException) { + + try { + abortInternal(durability.getLocalSync() == SyncPolicy.SYNC); + LoggerUtils.traceAndLogException(envImpl, "Txn", "commit", + "Commit of transaction " + id + + " failed", preCommitException); + } catch (Error e) { + envImpl.invalidate(e); + throw e; + } catch (RuntimeException abortT2) { + if (!envImpl.isValid()) { + /* Env already invalid, propagate exception. */ + throw abortT2; + } + String message = "Failed while attempting to commit transaction " + + id + ". The attempt to abort also failed. " + + "The original exception seen from commit = " + + preCommitException.getMessage() + + " The exception from the cleanup = " + + abortT2.getMessage(); + if ((writeInfo != null) && (abortLsn == NULL_LSN)) { + /* Failed to log an abort or commit entry */ + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_INCOMPLETE, + message, preCommitException); + } + + /* + * An abort entry has been written, so we can proceed. Log the + * secondary exception, but throw the more meaningful original + * exception. + */ + LoggerUtils.envLogMsg(Level.WARNING, envImpl, message); + /* The preCommitException exception will be thrown below. */ + } + postLogCommitAbortHook(); + + /* + * Abort entry was written, wrap the exception if necessary and throw + * it. An IllegalStateException is thrown by commit() when cursors are + * open. + */ + if (preCommitException instanceof DatabaseException || + preCommitException instanceof IllegalStateException) { + throw preCommitException; + } + + /* Now throw an exception that shows the commit problem. */ + throw EnvironmentFailureException.unexpectedException + ("Failed while attempting to commit transaction " + + id + ", aborted instead. Original exception = " + + preCommitException.getMessage(), + preCommitException); + } + + /** + * Creates and logs the txn commit entry, enforcing the flush/Sync + * behavior. + * + * @param flushSyncBehavior the local durability requirements + * + * @return the committed log item + * + * @throws DatabaseException + */ + private LogItem logCommitEntry(SyncPolicy flushSyncBehavior, + Collection obsoleteLsns) + throws DatabaseException { + + LogManager logManager = envImpl.getLogManager(); + assert checkForValidReplicatorNodeId(); + + final CommitLogEntry commitEntry = + new CommitLogEntry(new TxnCommit(id, + lastLoggedLsn, + getReplicatorNodeId(), + getDTVLSN())); + + LogParams params = new LogParams(); + params.entry = commitEntry; + params.provisional = Provisional.NO; + params.repContext = repContext; + + params.obsoleteWriteLockInfo = obsoleteLsns; + + switch (flushSyncBehavior) { + + case SYNC: + params.flushRequired = true; + params.fsyncRequired = true; + break; + + case WRITE_NO_SYNC: + params.flushRequired = true; + params.fsyncRequired = false; + break; + + default: + params.flushRequired = false; + params.fsyncRequired = false; + break; + } + + /* + * Do a final pre-log check just before the logging call, to minimize + * the window where the POSSIBLY_COMMITTED state may be set. [#21264] + */ + preLogCommitCheck(); + + /* Log the commit with requested durability. */ + boolean logSuccess = false; + try { + LogItem item = logManager.log(params); + logSuccess = true; + return item; + } catch (RuntimeException e) { + + /* + * Exceptions thrown during logging are expected to be fatal. + * Ensure that the environment is invalidated when a non-fatal + * exception is unexpectedly thrown, since the commit durability is + * unknown [#21264]. + */ + if (envImpl.isValid()) { + throw EnvironmentFailureException.unexpectedException + (envImpl, + "Unexpected non-fatal exception while logging commit", + e); + } + throw e; + } catch (Error e) { + /* Ensure that the environment is invalidated. [#21264] */ + envImpl.invalidate(e); + throw e; + } finally { + + /* + * If logging fails, there is still a possibility that the commit + * is durable. [#21264] + */ + if (!logSuccess) { + setState(Transaction.State.POSSIBLY_COMMITTED); + } + } + } + + /** + * Pre-log check for an invalid environment or interrupted thread (this + * thread may have been interrupted but we haven't found out yet, because + * we haven't done a wait or an I/O) to narrow the time window where a + * commit could become partially durable. See getPartialDurability. + * [#21264] + */ + private void preLogCommitCheck() { + if (Thread.interrupted()) { + throw new ThreadInterruptedException + (envImpl, "Thread interrupted prior to logging the commit"); + } + envImpl.checkIfInvalid(); + } + + /* + * A replicated txn must know the node of the master which issued it. + */ + private boolean checkForValidReplicatorNodeId() { + if (isReplicated()) { + if (getReplicatorNodeId() == 0) { + return false; + } + + /* + return (repContext.getClientVLSN() != null) && + (!repContext.getClientVLSN().isNull()); + */ + } + return true; + } + + /** + * Extract obsolete LSN info from writeInfo. Do not add a WriteInfo if a + * slot with a deleted LN was reused (abortKnownDeleted), to avoid double + * counting. And count each abortLSN only once. + */ + private Collection getObsoleteLsnInfo() { + + /* + * A Map is used to prevent double counting abortLNS if there is more + * then one WriteLockInfo with the same abortLSN in this txn, which can + * occur when the txn has performed more than one CUD ops on the same + * record. + */ + Map map = new HashMap(); + + for (WriteLockInfo info : writeInfo.values()) { + maybeCountObsoleteLSN(map, info); + } + + return map.values(); + } + + private void maybeCountObsoleteLSN( + Map obsoleteLsnSet, + WriteLockInfo info) { + + if (info.getAbortLsn() == DbLsn.NULL_LSN || + info.getAbortKnownDeleted()) { + return; + } + + if ((info.getDb() != null) && + info.getDb().isLNImmediatelyObsolete()) { + /* Was already counted obsolete during logging. */ + return; + } + + if (info.getAbortData() != null) { + /* Was already counted obsolete during logging. */ + return; + } + + final Long longLsn = Long.valueOf(info.getAbortLsn()); + + if (!obsoleteLsnSet.containsKey(longLsn)) { + obsoleteLsnSet.put(longLsn, info); + } + } + + /** + * Abort this transaction. This flavor does not return an LSN, nor does it + * require the logging of a durable abort record. + */ + public void abort() + throws DatabaseException { + + if (isClosed()) { + return; + } + abort(false /* forceFlush */); + } + + /** + * Abort this transaction. Steps are: + * 1. Release LN read locks. + * 2. Write a txn abort entry to the log. This is used for log file + * cleaning optimization and replication, and there's no need to + * guarantee a flush to disk. + * 3. Find the last LN log entry written for this txn, and use that + * to traverse the log looking for nodes to undo. For each node, + * use the same undo logic as recovery to undo the transaction. Note + * that we walk the log in order to undo in reverse order of the + * actual operations. For example, suppose the txn did this: + * delete K1/D1 (in LN 10) + * create K1/D1 (in LN 20) + * If we process LN10 before LN 20, we'd inadvertently create a + * duplicate tree of "K1", which would be fatal for the mapping tree. + * 4. Release the write lock for this LN. + * + * An abort differs from a rollback in that the former always undoes every + * operation, and returns it to the pre-txn state. A rollback may return + * the txn to an intermediate state, or to the pre-txn state. + */ + public long abort(boolean forceFlush) + throws DatabaseException { + + return abortInternal(forceFlush); + } + + /** + * @throws IllegalStateException via Transaction.abort if cursors are open. + * + * Note that IllegalStateException should never be thrown by + * Transaction.abort because of a closed txn, since Transaction.commit and + * abort set the Transaction.txn to null and disallow subsequent method + * calls (other than abort). So in a sense the call to checkState(true) in + * this method is unnecessary, although perhaps a good safeguard. + */ + private long abortInternal(boolean forceFlush) + throws DatabaseException { + + /* + * If frozen, throw the appropriate exception, but don't attempt to + * make any changes to cleanup the exception. + */ + boolean hooked = false; + checkIfFrozen(false); + + try { + try { + synchronized (this) { + checkState(true); + + /* + * State is set to ABORTED before undo, so that other + * threads cannot access this txn in the middle of undo. + * [#19321] + */ + setState(Transaction.State.ABORTED); + + /* Log the abort. */ + if (updateLoggedForTxn()) { + preLogAbortHook(); + hooked = true; + assert checkForValidReplicatorNodeId(); + assert (commitLsn == NULL_LSN) && + (abortLsn == NULL_LSN); + final AbortLogEntry abortEntry = + new AbortLogEntry( + new TxnAbort(id, lastLoggedLsn, + getReplicatorNodeId(), + getDTVLSN())); + abortLsn = forceFlush ? + envImpl.getLogManager(). + logForceFlush(abortEntry, + true /* fsyncRequired */, + repContext) : + envImpl.getLogManager().log(abortEntry, + repContext); + } + } + } finally { + if (hooked) { + postLogAbortHook(); + hooked = false; + } + + /* + * undo must be called outside the synchronization block to + * preserve locking order: For non-blocking locks, the BIN + * is latched before synchronizing on the Txn. If we were + * to synchronize while calling undo, this order would be + * reversed. + */ + undo(); + } + + /* + * Purge any databaseImpls not needed as a result of the abort. Be + * sure to do this outside the synchronization block, to avoid + * conflict w/ checkpointer. + */ + cleanupDatabaseImpls(false); + + synchronized (this) { + boolean openCursors = checkCursorsForClose(); + Logger logger = envImpl.getLogger(); + if (logger.isLoggable(Level.FINE)) { + LoggerUtils.fine(logger, envImpl, + "Abort: id = " + id + " openCursors= " + + openCursors); + } + + /* Invalidate any Db handles protected by this txn. */ + if (openedDatabaseHandles != null) { + for (Database handle : openedDatabaseHandles) { + DbInternal.invalidate(handle); + } + } + /* Delay the exception until cleanup is complete. */ + if (openCursors) { + envImpl.checkIfInvalid(); + throw new IllegalStateException + ("Transaction " + id + + " detected open cursors while aborting"); + } + } + } finally { + + /* + * The close method, which unregisters the txn, and must be called + * after undo and cleanupDatabaseImpls. A transaction must remain + * registered until all actions that modify/dirty INs are complete; + * see Checkpointer class comments for details. [#19321] + * + * close must be called, even though the state has already been set + * to ABORTED above, for two reasons: 1) To unregister the txn, and + * 2) to allow subclasses to override the close method. + * + * close must be called outside the synchronization block to avoid + * conflict w/ checkpointer. + */ + close(false); + + if (abortLsn != NULL_LSN) { + TriggerManager.runAbortTriggers(this); + } + } + + return abortLsn; + } + + /** + * Undo write operations and release all resources held by the transaction. + */ + protected void undo() + throws DatabaseException { + + /* + * We need to undo, or reverse the effect of any applied operations on + * the in-memory btree. We also need to make the latest version of any + * record modified by the transaction obsolete. + */ + Set alreadyUndoneLsns = new HashSet(); + Set alreadyUndoneSlots = new TreeSet(); + TreeLocation location = new TreeLocation(); + long undoLsn = lastLoggedLsn; + + try { + while (undoLsn != NULL_LSN) { + UndoReader undo = + UndoReader.create(envImpl, undoLsn, undoDatabases); + /* + * Only undo the first instance we see of any node. All log + * entries for a given node have the same abortLsn, so we don't + * need to undo it multiple times. + */ + if (firstInstance( + alreadyUndoneLsns, alreadyUndoneSlots, undo)) { + + RecoveryManager.abortUndo( + envImpl.getLogger(), Level.FINER, location, + undo.db, undo.logEntry, undoLsn); + + countObsoleteExact(undoLsn, undo, isRolledBack()); + } + + /* Move on to the previous log entry for this txn. */ + undoLsn = undo.logEntry.getUserTxn().getLastLsn(); + } + } catch (DatabaseException e) { + String lsnMsg = "LSN=" + DbLsn.getNoFormatString(undoLsn); + LoggerUtils.traceAndLogException(envImpl, "Txn", "undo", + lsnMsg, e); + e.addErrorMessage(lsnMsg); + throw e; + } catch (RuntimeException e) { + throw EnvironmentFailureException.unexpectedException + ("Txn undo for LSN=" + DbLsn.getNoFormatString(undoLsn), e); + } + + /* + * Release all read locks after the undo (since the undo may need to + * read in mapLNs). + */ + if (readLocks != null) { + clearReadLocks(); + } + + /* Set database state for deletes before releasing any write locks. */ + setDeletedDatabaseState(false); + + /* Throw away write lock collection, don't retain any locks. */ + Set empty = Collections.emptySet(); + clearWriteLocks(empty); + + /* + * Let the delete related info (binreferences and dbs) get gc'ed. Don't + * explicitly iterate and clear -- that's far less efficient, gives GC + * wrong input. + */ + deleteInfo = null; + } + + /** + * For an explanation of obsoleteDupsAllowed, see ReplayTxn.rollback. + */ + private void countObsoleteExact(long undoLsn, UndoReader undo, + boolean obsoleteDupsAllowed) { + /* + * "Immediately obsolete" LNs are counted as obsolete when they are + * logged, so no need to repeat here. + */ + if (undo.logEntry.isImmediatelyObsolete(undo.db)) { + return; + } + + LogManager logManager = envImpl.getLogManager(); + + if (obsoleteDupsAllowed) { + logManager.countObsoleteNodeDupsAllowed + (undoLsn, + null, // type + undo.logEntrySize, + undo.db); + } else { + logManager.countObsoleteNode(undoLsn, + null, // type + undo.logEntrySize, + undo.db, + true); // countExact + } + } + + /** + * Release any write locks that are not in the retainedNodes set. + */ + protected void clearWriteLocks(Set retainedNodes) + throws DatabaseException { + + if (writeInfo == null) { + return; + } + + /* Release all write locks, clear lock collection. */ + Iterator> iter = + writeInfo.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + Long lsn = entry.getKey(); + + /* Release any write locks not in the retained set. */ + if (!retainedNodes.contains(lsn)) { + lockManager.release(lsn, this); + iter.remove(); + } + } + + if (writeInfo.size() == 0) { + writeInfo = null; + } + } + + protected int clearReadLocks() + throws DatabaseException { + + int numReadLocks = 0; + if (readLocks != null) { + numReadLocks = readLocks.size(); + Iterator iter = readLocks.iterator(); + while (iter.hasNext()) { + Long rLockNid = iter.next(); + lockManager.release(rLockNid, this); + } + readLocks = null; + } + return numReadLocks; + } + + /** + * Called by LNLogEntry.postLogWork() via the LogManager (while still under + * the LWL) after a transactional LN is logged. Also called by the recovery + * manager when logging a transaction aware object. + * + * This method is synchronized by the caller, by being called within the + * log latch. Record the last LSN for this transaction, to create the + * transaction chain, and also record the LSN in the write info for abort + * logic. + */ + public synchronized void addLogInfo(long lastLsn) { + /* Save the last LSN for maintaining the transaction LSN chain. */ + lastLoggedLsn = lastLsn; + + /* + * Save handle to LSN for aborts. + * + * If this is the first LSN, save it for calculating the first LSN + * of any active txn, for checkpointing. + */ + if (firstLoggedLsn == NULL_LSN) { + firstLoggedLsn = lastLsn; + } + } + + /** + * [#16861] The firstLoggedLsn field is volatile to avoid making + * getFirstActiveLsn synchronized, which causes a deadlock in HA. + * + * @return first logged LSN, to aid recovery undo + */ + public long getFirstActiveLsn() { + return firstLoggedLsn; + } + + /** + * @return true if this txn has logged any log entries. + */ + protected boolean updateLoggedForTxn() { + return (lastLoggedLsn != DbLsn.NULL_LSN); + } + + /** + * @param dbImpl databaseImpl to remove + * @param deleteAtCommit true if this databaseImpl should be cleaned on + * commit, false if it should be cleaned on abort. + */ + @Override + public synchronized void markDeleteAtTxnEnd(DatabaseImpl dbImpl, + boolean deleteAtCommit) { + int delta = 0; + if (deletedDatabases == null) { + deletedDatabases = new HashSet(); + delta += MemoryBudget.HASHSET_OVERHEAD; + } + + deletedDatabases.add(new DatabaseCleanupInfo(dbImpl, + deleteAtCommit)); + delta += MemoryBudget.HASHSET_ENTRY_OVERHEAD + + MemoryBudget.OBJECT_OVERHEAD; + updateMemoryUsage(delta); + + /* releaseDb will be called by cleanupDatabaseImpls. */ + } + + public Set getDeletedDatabases() { + return deletedDatabases; + } + + /* + * Leftover databaseImpls that are a by-product of database operations like + * removeDatabase(), truncateDatabase() will be deleted after the write + * locks are released. However, do set the database state appropriately + * before the locks are released. + */ + protected void setDeletedDatabaseState(boolean isCommit) { + if (deletedDatabases != null) { + Iterator iter = deletedDatabases.iterator(); + while (iter.hasNext()) { + DatabaseCleanupInfo info = iter.next(); + if (info.deleteAtCommit == isCommit) { + info.dbImpl.startDeleteProcessing(); + } + } + } + } + + /** + * Cleanup leftover databaseImpls that are a by-product of database + * operations like removeDatabase(), truncateDatabase(). + * + * This method must be called outside the synchronization on this txn, + * because it calls finishDeleteProcessing, which gets the TxnManager's + * allTxns latch. The checkpointer also gets the allTxns latch, and within + * that latch, needs to synchronize on individual txns, so we must avoid a + * latching hiearchy conflict. + * + * [#16861] FUTURE: Perhaps this special handling is no longer needed, now + * that firstLoggedLsn is volatile and getFirstActiveLsn is not + * synchronized. + */ + protected void cleanupDatabaseImpls(boolean isCommit) + throws DatabaseException { + + if (deletedDatabases != null) { + /* Make a copy of the deleted databases while synchronized. */ + DatabaseCleanupInfo[] infoArray; + synchronized (this) { + infoArray = new DatabaseCleanupInfo[deletedDatabases.size()]; + deletedDatabases.toArray(infoArray); + } + for (DatabaseCleanupInfo info : infoArray) { + if (info.deleteAtCommit == isCommit) { + + /* + * If deletedDatabases contains same databases with + * different deleteAtCommit, firstly release the database, + * then delete it. [#19636] + */ + if (checkRepeatedDeletedDB(infoArray, info)) { + envImpl.getDbTree().releaseDb(info.dbImpl); + } + /* releaseDb will be called by finishDeleteProcessing. */ + info.dbImpl.finishDeleteProcessing(); + } else if(!checkRepeatedDeletedDB(infoArray, info)){ + + /* + * If deletedDatabases contains same databases with + * different deleteAtCommit, do nothing. [#19636] + */ + envImpl.getDbTree().releaseDb(info.dbImpl); + } + } + deletedDatabases = null; + } + } + + private boolean checkRepeatedDeletedDB(DatabaseCleanupInfo[] infoArray, + DatabaseCleanupInfo info) { + for (DatabaseCleanupInfo element : infoArray) { + if (element.dbImpl.getId().equals(info.dbImpl.getId()) && + element.deleteAtCommit != info.deleteAtCommit){ + return true; + } + } + return false; + } + + private synchronized void ensureWriteInfo() { + if (writeInfo == null) { + writeInfo = new HashMap(); + undoDatabases = new HashMap(); + updateMemoryUsage(MemoryBudget.TWOHASHMAPS_OVERHEAD); + } + } + + /** + * Add lock to the appropriate queue. + */ + @Override + protected synchronized void addLock(Long lsn, + LockType type, + LockGrantType grantStatus) { + if (type.isWriteLock()) { + + ensureWriteInfo(); + writeInfo.put(lsn, new WriteLockInfo()); + + int delta = WRITE_LOCK_OVERHEAD; + + if ((grantStatus == LockGrantType.PROMOTION) || + (grantStatus == LockGrantType.WAIT_PROMOTION)) { + readLocks.remove(lsn); + delta -= READ_LOCK_OVERHEAD; + } + updateMemoryUsage(delta); + } else { + addReadLock(lsn); + } + } + + private void addReadLock(Long lsn) { + int delta = 0; + if (readLocks == null) { + readLocks = new HashSet(); + delta = MemoryBudget.HASHSET_OVERHEAD; + } + + readLocks.add(lsn); + delta += READ_LOCK_OVERHEAD; + updateMemoryUsage(delta); + } + + /** + * Remove the lock from the set owned by this transaction. If specified to + * LockManager.release, the lock manager will call this when its releasing + * a lock. Usually done because the transaction doesn't need to really keep + * the lock, i.e for a deleted record. + */ + @Override + protected + synchronized void removeLock(long lsn) { + + /* + * We could optimize by passing the lock type so we know which + * collection to look in. Be careful of demoted locks, which have + * shifted collection. + * + * Don't bother updating memory utilization here -- we'll update at + * transaction end. + */ + if ((readLocks != null) && + readLocks.remove(lsn)) { + updateMemoryUsage(0 - READ_LOCK_OVERHEAD); + } else if ((writeInfo != null) && + (writeInfo.remove(lsn) != null)) { + updateMemoryUsage(0 - WRITE_LOCK_OVERHEAD); + } + } + + /** + * A lock is being demoted. Move it from the write collection into the read + * collection. + */ + @Override + @SuppressWarnings("unused") + synchronized void moveWriteToReadLock(long lsn, Lock lock) { + + boolean found = false; + if ((writeInfo != null) && + (writeInfo.remove(lsn) != null)) { + found = true; + updateMemoryUsage(0 - WRITE_LOCK_OVERHEAD); + } + + assert found : "Couldn't find lock for Node " + lsn + + " in writeInfo Map."; + addReadLock(lsn); + } + + private void updateMemoryUsage(int delta) { + inMemorySize += delta; + accumulatedDelta += delta; + if (accumulatedDelta > ACCUMULATED_LIMIT || + accumulatedDelta < -ACCUMULATED_LIMIT) { + envImpl.getMemoryBudget().updateTxnMemoryUsage(accumulatedDelta); + accumulatedDelta = 0; + } + } + + /** + * Returns the amount of memory currently budgeted for this transaction. + */ + int getBudgetedMemorySize() { + return inMemorySize - accumulatedDelta; + } + + /** + * @return the WriteLockInfo for this node. + */ + @Override + public WriteLockInfo getWriteLockInfo(long lsn) { + WriteLockInfo wli = null; + synchronized (this) { + if (writeInfo != null) { + wli = writeInfo.get(lsn); + } + } + + if (wli == null) { + throw EnvironmentFailureException.unexpectedState + ("writeInfo is null in Txn.getWriteLockInfo"); + } + return wli; + } + + /** + * Is always transactional. + */ + @Override + public boolean isTransactional() { + return true; + } + + /** + * Determines whether this is an auto transaction. + */ + public boolean isAutoTxn() { + return isAutoCommit; + } + + @Override + public boolean isReadOnly() { + return readOnly; + } + + /** + * Is serializable isolation if so configured. + */ + @Override + public boolean isSerializableIsolation() { + return serializableIsolation; + } + + /** + * Is read-committed isolation if so configured. + */ + @Override + public boolean isReadCommittedIsolation() { + return readCommittedIsolation; + } + + /** + * Returns true if the sync api was used for configuration + */ + public boolean getExplicitSyncConfigured() { + return explicitSyncConfigured; + } + + /** + * Returns true if the durability api was used for configuration. + */ + public boolean getExplicitDurabilityConfigured() { + return explicitDurabilityConfigured; + } + + /** + * This is a transactional locker. + */ + @Override + public Txn getTxnLocker() { + return this; + } + + /** + * Returns 'this', since this locker holds no non-transactional locks. + * Since this is returned, sharing of locks is obviously supported. + */ + @Override + public Locker newNonTxnLocker() { + return this; + } + + /** + * This locker holds no non-transactional locks. + */ + @Override + public void releaseNonTxnLocks() { + } + + /** + * Created transactions do nothing at the end of the operation. + */ + @Override + public void nonTxnOperationEnd() { + } + + /* + * @see com.sleepycat.je.txn.Locker#operationEnd(boolean) + */ + @Override + public void operationEnd(boolean operationOK) + throws DatabaseException { + + if (!isAutoCommit) { + /* Created transactions do nothing at the end of the operation. */ + return; + } + + if (operationOK) { + commit(); + } else { + abort(false); // no sync required + } + } + + /** + * Called at the end of a database open operation to add the database + * handle to a user txn. When a user txn aborts, handles opened using that + * txn are invalidated. + * + * A non-txnal locker or auto-commit txn does not retain the handle, + * because the open database operation will succeed or fail atomically and + * no database invalidation is needed at a later time. + * + * @see HandleLocker + */ + @Override + public synchronized void addOpenedDatabase(Database dbHandle) { + if (isAutoCommit) { + return; + } + if (openedDatabaseHandles == null) { + openedDatabaseHandles = new HashSet(); + } + openedDatabaseHandles.add(dbHandle); + } + + /** + * Increase the counter if a new Cursor is opened under this transaction. + */ + @Override + @SuppressWarnings("unused") + public void registerCursor(CursorImpl cursor) { + cursors.getAndIncrement(); + } + + /** + * Decrease the counter if a Cursor is closed under this transaction. + */ + @Override + @SuppressWarnings("unused") + public void unRegisterCursor(CursorImpl cursor) { + cursors.getAndDecrement(); + } + + /* + * Txns always require locking. + */ + @Override + public boolean lockingRequired() { + return true; + } + + /** + * Check if all cursors associated with the txn are closed. If not, those + * open cursors will be forcibly closed. + * @return true if open cursors exist + */ + private boolean checkCursorsForClose() { + return (cursors.get() != 0); + } + + /** + * stats + */ + @Override + public StatGroup collectStats() { + StatGroup stats = + new StatGroup("Transaction lock counts" , + "Read and write locks held by transaction " + id); + + IntStat statReadLocks = new IntStat(stats, LOCK_READ_LOCKS); + IntStat statWriteLocks = new IntStat(stats, LOCK_WRITE_LOCKS); + IntStat statTotalLocks = new IntStat(stats, LOCK_TOTAL); + + synchronized (this) { + int nReadLocks = (readLocks == null) ? 0 : readLocks.size(); + statReadLocks.add(nReadLocks); + int nWriteLocks = (writeInfo == null) ? 0 : writeInfo.size(); + statWriteLocks.add(nWriteLocks); + statTotalLocks.add(nReadLocks + nWriteLocks); + } + + return stats; + } + + /** + * Set the state of a transaction to abort-only. Should ONLY be called + * by OperationFailureException. + */ + @Override + public void setOnlyAbortable(OperationFailureException cause) { + assert cause != null; + setState(Transaction.State.MUST_ABORT); + onlyAbortableCause = cause; + } + + /** + * Set the state of a transaction's IMPORTUNATE bit. + */ + @Override + public void setImportunate(boolean importunate) { + if (importunate) { + txnFlags |= IMPORTUNATE; + } else { + txnFlags &= ~IMPORTUNATE; + } + } + + /** + * Get the state of a transaction's IMPORTUNATE bit. + */ + @Override + public boolean getImportunate() { + return (txnFlags & IMPORTUNATE) != 0; + } + + /** + * Checks for preemption in this locker and all its child buddies. Does + * NOT call checkPreempted on its child buddies, since this would cause an + * infinite recursion. + */ + @Override + public void checkPreempted(final Locker allowPreemptedLocker) + throws OperationFailureException { + + /* First check this locker. */ + throwIfPreempted(allowPreemptedLocker); + + /* + * Then check our buddy lockers. It's OK to call throwIfPreempted while + * synchronized on buddyLockers, since it takes no locks. + */ + if (buddyLockers != null) { + synchronized (buddyLockers) { + for (BuddyLocker buddy : buddyLockers) { + buddy.throwIfPreempted(allowPreemptedLocker); + } + } + } + } + + /** + * Throw an exception if the transaction is not open. + * + * If calledByAbort is true, it means we're being called from abort(). But + * once closed, a Transaction never calls abort(). See comment at the top + * of abortInternal. + * + * Caller must invoke with "this" synchronized. + */ + @Override + public void checkState(boolean calledByAbort) + throws DatabaseException { + + switch (txnState) { + + case OPEN: + return; + + case MUST_ABORT: + + /* Don't complain if the user is doing what we asked. */ + if (calledByAbort) { + return; + } + + /* + * Throw the original exception that caused the txn to be set + * to abort-only, wrapped in a new exception of the same class. + * That way, both stack traces are available and the user can + * specify a meaningful class in their catch statement. + * + * It's ok for FindBugs to whine about id not being + * synchronized. + */ + throw onlyAbortableCause.wrapSelf + ("Transaction " + id + + " must be aborted, caused by: " + onlyAbortableCause); + + default: + /* All other states are equivalent to closed. */ + + /* + * It's ok for FindBugs to whine about id not being + * synchronized. + */ + throw new IllegalStateException + ("Transaction " + id + " has been closed."); + } + } + + /** + * Close and unregister this txn. + */ + public void close(boolean isCommit) { + + if (isCommit) { + /* Set final state to COMMITTED, if not set earlier. */ + if (txnState == Transaction.State.OPEN) { + setState(Transaction.State.COMMITTED); + } + } else { + /* This was set earlier by abort, but here also for safety. */ + setState(Transaction.State.ABORTED); + } + + /* + * UnregisterTxn must be called outside the synchronization on this + * txn, because it gets the TxnManager's allTxns latch. The + * checkpointer also gets the allTxns latch, and within that latch, + * needs to synchronize on individual txns, so we must avoid a latching + * hierarchy conflict. + * + * [#16861] FUTURE: Perhaps this special handling is no longer needed, + * now that firstLoggedLsn is volatile and getFirstActiveLsn is not + * synchronized. + */ + envImpl.getTxnManager().unRegisterTxn(this, isCommit); + + /* Set the superclass Locker state to closed. */ + close(); + } + + private synchronized void setState(Transaction.State state) { + txnState = state; + } + + public Transaction.State getState() { + return txnState; + } + + @Override + public boolean isValid() { + return txnState == Transaction.State.OPEN; + } + + public boolean isClosed() { + return txnState != Transaction.State.OPEN && + txnState != Transaction.State.MUST_ABORT; + } + + public boolean isOnlyAbortable() { + return txnState == Transaction.State.MUST_ABORT; + } + + /** + * This method is overridden by HA txn subclasses and returns the node id + * of the master node that committed or aborted the txn. + */ + protected int getReplicatorNodeId() { + /* Non replicated txns don't use a node ID. */ + return 0; + } + + /** + * This method is overridden by replication txn subclasses and returns the + * DTVLSN associated with the Txn. + */ + protected long getDTVLSN() { + /* Non replicated txns don't use VLSNs. */ + return VLSN.UNINITIALIZED_VLSN_SEQUENCE; + } + + /* + * Log support + */ + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize() { + return getLogSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer) { + writeToLog( + logBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return LogUtils.getPackedLongLogSize(id) + + LogUtils.getPackedLongLogSize( + forReplication ? DbLsn.NULL_LSN : lastLoggedLsn); + } + + /** + * It's ok for FindBugs to whine about id not being synchronized. + */ + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + LogUtils.writePackedLong(logBuffer, id); + LogUtils.writePackedLong(logBuffer, + forReplication ? DbLsn.NULL_LSN : lastLoggedLsn); + } + + /** + * It's ok for FindBugs to whine about id not being synchronized. + */ + @Override + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + id = LogUtils.readLong(logBuffer, (entryVersion < 6)); + lastLoggedLsn = LogUtils.readLong(logBuffer, (entryVersion < 6)); + } + + @Override + public boolean hasReplicationFormat() { + return false; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + return false; + } + + @Override + @SuppressWarnings("unused") + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + sb.append(DbLsn.toString(lastLoggedLsn)); + sb.append(""); + } + + @Override + public long getTransactionId() { + return getId(); + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof Txn)) { + return false; + } + + return id == ((Txn) other).id; + } + + /** + * Send trace messages to the java.util.logger. Don't rely on the logger + * alone to conditionalize whether we send this message, we don't even want + * to construct the message if the level is not enabled. The string + * construction can be numerous enough to show up on a performance profile. + */ + private void traceCommit(int numWriteLocks, int numReadLocks) { + Logger logger = envImpl.getLogger(); + if (logger.isLoggable(Level.FINE)) { + StringBuilder sb = new StringBuilder(); + sb.append(" Commit: id = ").append(id); + sb.append(" numWriteLocks=").append(numWriteLocks); + sb.append(" numReadLocks = ").append(numReadLocks); + LoggerUtils.fine(logger, envImpl, sb.toString()); + } + } + + /** + * Store information about a DatabaseImpl that will have to be + * purged at transaction commit or abort. This handles cleanup after + * operations like Environment.truncateDatabase, + * Environment.removeDatabase. Cleanup like this is done outside the + * usual transaction commit or node undo processing, because + * the mapping tree is always auto Txn'ed to avoid deadlock and is + * essentially non-transactional. + */ + public static class DatabaseCleanupInfo { + DatabaseImpl dbImpl; + + /* if true, clean on commit. If false, clean on abort. */ + boolean deleteAtCommit; + + DatabaseCleanupInfo(DatabaseImpl dbImpl, + boolean deleteAtCommit) { + this.dbImpl = dbImpl; + this.deleteAtCommit = deleteAtCommit; + } + + /** + * Make sure that a set of DatabaseCleanupInfo only has one entry + * per databaseImpl/deleteAtCommit tuple. + */ + @Override + public boolean equals(Object obj) { + if (!(obj instanceof DatabaseCleanupInfo)) { + return false; + } + + DatabaseCleanupInfo other = (DatabaseCleanupInfo) obj; + return (dbImpl.equals(other.dbImpl)) && + (deleteAtCommit == other.deleteAtCommit); + } + + @Override + public int hashCode() { + return dbImpl.hashCode(); + } + } + + /* Transaction hooks used for replication support. */ + + /** + * A replicated environment introduces some new considerations when + * entering a transaction scope via an Environment.transactionBegin() + * operation. + * + * On a Replica, the transactionBegin() operation must wait until the + * Replica has synched up to where it satisfies the ConsistencyPolicy that + * is in effect. + * + * On a Master, the transactionBegin() must wait until the Feeder has + * sufficient connections to ensure that it can satisfy the + * ReplicaAckPolicy, since if it does not, it will fail at commit() and the + * work done in the transaction will need to be undone. + * + * This hook provides the mechanism for implementing the above support for + * replicated transactions. It ignores all non-replicated transactions. + * + * The hook throws ReplicaStateException, if a Master switches to a Replica + * state while waiting for its Replicas connections. Changes from a Replica + * to a Master are handled transparently to the application. Exceptions + * manifest themselves as DatabaseException at the interface to minimize + * use of Replication based exceptions in core JE. + * + * @param config the transaction config that applies to the txn + * + * @throws DatabaseException if there is a failure + */ + protected void txnBeginHook(TransactionConfig config) + throws DatabaseException { + + /* Overridden by Txn subclasses when appropriate */ + } + + /** + * This hook is invoked before the commit of a transaction that made + * changes to a replicated environment. It's invoked for transactions + * executed on the master or replica, but is only relevant to transactions + * being done on the master. When invoked for a transaction on a replica + * the implementation just returns. + * + * The hook is invoked at a very specific point in the normal commit + * sequence: immediately before the commit log entry is written to the log. + * It represents the last chance to abort the transaction and provides an + * opportunity to make some final checks before allowing the commit can go + * ahead. Note that it should be possible to abort the transaction at the + * time the hook is invoked. + * + * After invocation of the "pre" hook one of the "post" hooks: + * postLogCommitHook or postLogAbortHook must always be invoked. + * + * Exceptions thrown by this hook result in the transaction being aborted + * and the exception being propagated back to the application. + * + * @throws DatabaseException if there was a problem and that the + * transaction should be aborted. + */ + protected void preLogCommitHook() + throws DatabaseException { + + /* Overridden by Txn subclasses when appropriate */ + } + + /** + * This hook is invoked after the commit record has been written to the + * log, but before write locks have been released, so that other + * application cannot see the changes made by the transaction. At this + * point the transaction has been committed by the Master. + * + * Exceptions thrown by this hook result in the transaction being completed + * on the Master, that is, locks are released, etc. and the exception is + * propagated back to the application. + * + * @param commitItem the commit item that was just logged + * + * @throws DatabaseException to indicate that there was a replication + * related problem that needs to be communicated back to the application. + */ + protected void postLogCommitHook(LogItem commitItem) + throws DatabaseException { + + /* Overridden by Txn subclasses when appropriate */ + } + + protected void preLogAbortHook() + throws DatabaseException { + + /* Override by Txn subclasses when appropriate */ + } + + /** + * Invoked if the transaction associated with the preLogCommitHook was + * subsequently aborted, for example due to a lack of disk space. This + * method is responsible for any cleanup that may need to be done as a + * result of the abort. + * + * Note that only one of the "post" hooks (commit or abort) is invoked + * following the invocation of the "pre" hook. + */ + protected void postLogCommitAbortHook() { + /* Overridden by Txn subclasses when appropriate */ + } + + protected void postLogAbortHook() { + /* Overridden by Txn subclasses when appropriate */ + } + + /** + * Returns the CommitToken associated with a successful replicated commit. + * + * @see com.sleepycat.je.Transaction#getCommitToken + */ + public CommitToken getCommitToken() { + return null; + } + + /** + * Identifies exceptions that may be propagated back to the caller during + * the postCommit phase of a transaction commit. + * + * @param postCommitException the exception being evaluated + * + * @return true if the exception must be propagated back to the caller, + * false if the exception indicates there is a serious problem with the + * commit operation and the environment should be invalidated. + */ + protected boolean + propagatePostCommitException(DatabaseException postCommitException) { + return false; + } + + /** + * Use the marker Sets to record whether this is the first time we've see + * this logical node. + */ + private boolean firstInstance(Set seenLsns, + Set seenSlots, + UndoReader undo) { + final LNLogEntry undoEntry = undo.logEntry; + final long abortLsn1 = undoEntry.getAbortLsn(); + if (abortLsn1 != DbLsn.NULL_LSN) { + return seenLsns.add(abortLsn1); + } + final CompareSlot slot = new CompareSlot(undo.db, undoEntry); + return seenSlots.add(slot); + } + + /** + * Accumulates the set of databases for which transaction commit/abort + * triggers must be run. + * + * @param dbImpl the database that associated with the trigger + */ + public void noteTriggerDb(DatabaseImpl dbImpl) { + if (triggerDbs == null) { + triggerDbs = + Collections.synchronizedSet(new HashSet()); + } + triggerDbs.add(dbImpl); + } + + /** + * Returns the set of databases for which transaction commit/abort + * triggers must be run. Returns Null if no triggers need to be run. + */ + public Set getTriggerDbs() { + return triggerDbs; + } + + /** Get the set of lock ids owned by this transaction */ + public Set getWriteLockIds() { + if (writeInfo == null) { + Set empty = Collections.emptySet(); + return empty; + } + + return writeInfo.keySet(); + } + + /* For unit tests. */ + public Set getReadLockIds() { + if (readLocks == null) { + return new HashSet(); + } + return new HashSet(readLocks); + } + + public EnvironmentImpl getEnvironmentImpl() { + return envImpl; + } + + public void setTransaction(Transaction transaction) { + this.transaction = transaction; + } + + @Override + public Transaction getTransaction() { + return (transaction != null) ? + transaction : + (transaction = new AutoTransaction(this)); + } + + private static class AutoTransaction extends Transaction { + + protected AutoTransaction(Txn txn) { + /* AutoTransactions do not have a convenient environment handle. */ + super(txn.getEnvironmentImpl().getInternalEnvHandle(), txn); + } + + @Override + public synchronized void commit() + throws DatabaseException { + + EnvironmentFailureException.unexpectedState + ("commit() not permitted on an auto transaction"); + } + + @Override + public synchronized void commit + (@SuppressWarnings("unused") Durability durability) { + EnvironmentFailureException.unexpectedState + ("commit() not permitted on an auto transaction"); + } + + @Override + public synchronized void commitNoSync() + throws DatabaseException { + + EnvironmentFailureException.unexpectedState + ("commit() not permitted on an auto transaction"); + } + + @Override + public synchronized void commitWriteNoSync() + throws DatabaseException { + + EnvironmentFailureException.unexpectedState + ("commit() not permitted on an auto transaction"); + } + + @Override + public synchronized void abort() + throws DatabaseException { + + EnvironmentFailureException.unexpectedState + ("abort() not permitted on an auto transaction"); + } + } + + public Map getUndoDatabases() { + return undoDatabases; + } + + /** + * Txn freezing is used to prevent changes to transaction lock contents. A + * frozen transaction should ignore any transaction commit/abort + * requests. This is used only by MasterTxns, as a way of holding a + * transaction stable while cloning it to serve as a ReplayTxn during + * master->replica transitions. + * @param isCommit true if called by commit. + */ + protected void checkIfFrozen(boolean isCommit) + throws DatabaseException { + return; + } + + /* + * Used when creating a subset of MasterTxns. Using an explicit method + * like this rather than checking class types insulates us from any + * assumptions about the class hierarchy. + */ + public boolean isMasterTxn() { + return false; + } +} diff --git a/src/com/sleepycat/je/txn/TxnAbort.java b/src/com/sleepycat/je/txn/TxnAbort.java new file mode 100644 index 0000000..5e41c50 --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnAbort.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.log.Loggable; + +/** + * Transaction abort. + */ +public class TxnAbort extends VersionedWriteTxnEnd { + + public TxnAbort(long id, long lastLsn, int masterId, long dtvlsn) { + super(id, lastLsn, masterId, dtvlsn); + } + + /** + * For constructing from the log. + */ + public TxnAbort() { + } + + @Override + protected String getTagName() { + return "TxnAbort"; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof TxnAbort)) + return false; + + TxnAbort otherAbort = (TxnAbort) other; + + return ((id == otherAbort.id) && + (repMasterNodeId == otherAbort.repMasterNodeId)); + } +} diff --git a/src/com/sleepycat/je/txn/TxnChain.java b/src/com/sleepycat/je/txn/TxnChain.java new file mode 100644 index 0000000..ffde9f5 --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnChain.java @@ -0,0 +1,437 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.io.FileNotFoundException; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * TxnChain supports "txn rollback", which undoes the write operations for a + * given txn to an arbitrary point. Txn rollback (and TxnChain construction) + * is done in 2 occasions: + * 1. During normal processing, when an ongoing txn must be rolled-back due to + * a syncup operation (see rep/txn/ReplayTxn.java). + * 2. During recovery, to process a "rollback period" (see RollbackTracker.java) + * + * In the JE log, the logrecs that make up a txn are chained, but each logrec + * contains undo info that refers to the pre-txn version of the associated + * record, which may not be the immediately previous version, if the txn writes + * the same record multiple times. For example, a log looks like this: + * + * lsn key data abortlsn + * 100 A 10 null_lsn (first instance of record A) + * 150 B 100 null_lsn (first instance of record B) + * ..... txn begins ..... + * 200 A 20 100 + * 300 A deleted 100 + * 400 B 200 150 + * 500 A 30 100 + * 600 C 10 null_lsn + * + * When reading the log, we can find all the records in the transaction. This + * chain exists: + * 500->400->300->200->null_lsn + * + * To rollback to an arbitrary entry in the transaction, we need a chain of all + * the records that occupied a given BIN slot during the transaction. + * chain. The key, data, and comparators are used to determine which records + * hash to the same slot, mimicking the btree itself. + * + * + * 300 400 500 600 + * | | | | + * \ / \ / \ / \ / + * + * 200 150 300 null_lsn revertToLsn + * true revertKD + */ +public class TxnChain { + + private final EnvironmentImpl envImpl; + + /* + * Null if we are in recovery. Otherwise, it points to the same map as the + * undoDatabases field of the ReplayTxn. + */ + private final Map undoDatabases; + + /* + * Set of LSNs that will not be undone (i.e. the preserved portion of the + * txn's log chain). + */ + private final Set remainingLockedNodes; + + /* + * For each logrec that will be undone, revertList contains a RevertInfo + * obj, which refers to the record version to revert to. The list is + * ordered in reverse LSN order. + */ + private final LinkedList revertList; + + /* The last applied VLSN in this txn, after rollback has occurred. */ + private VLSN lastValidVLSN; + + /* + * Find the previous version for all entries in this transaction. Used by + * recovery. This differs from the constructor used by syncup rollback + * which is instigated by the txn. In this case, there is no cache of + * DatabaseImpls. + */ + public TxnChain( + long lastLoggedLsn, + long txnId, + long matchpoint, + EnvironmentImpl envImpl) { + + this(lastLoggedLsn, txnId, matchpoint, null, envImpl); + } + + /* + * Find the previous version for all entries in this transaction. + * DatabaseImpls used during txn chain creation are taken from the + * transaction's undoDatabases cache. + */ + public TxnChain( + long lastLoggedLsn, + long txnId, + long matchpoint, + Map undoDatabases, + EnvironmentImpl envImpl) + throws DatabaseException { + + LogManager logManager = envImpl.getLogManager(); + + this.envImpl = envImpl; + this.undoDatabases = undoDatabases; + + remainingLockedNodes = new HashSet(); + + /* + * A map that stores for each record R the revert info for the + * latest R-logrec seen during the backwards traversal of the + * txn chain done below. + */ + TreeMap recordsMap = + new TreeMap(); + + revertList = new LinkedList(); + + /* + * Traverse this txn's entire logrec chain and record revert info + * for each logrec in the chain. Start the traversal with the last + * logrec generated by this txn and move backwards. + */ + long currLsn = lastLoggedLsn; + + try { + lastValidVLSN = VLSN.NULL_VLSN; + + while (currLsn != DbLsn.NULL_LSN) { + + WholeEntry wholeEntry = + logManager.getLogEntryAllowInvisible(currLsn); + + LNLogEntry currLogrec = + (LNLogEntry) wholeEntry.getEntry(); + + DatabaseImpl dbImpl = getDatabaseImpl(currLogrec.getDbId()); + + if (dbImpl == null) { + + if (undoDatabases != null) { + throw EnvironmentFailureException.unexpectedState( + envImpl, // fatal error, this is a corruption + "DB missing during non-recovery rollback, dbId=" + + currLogrec.getDbId() + " txnId=" + txnId); + } + + /* + * For recovery rollback, simply skip this entry when the + * DB has been deleted. This has no impact on the chain + * for other LNs. This LN will not be processed by + * recovery rollback, since RollbackTracker.rollback + * ignores LNs in deleted DBs. [#22071] [#22052] + */ + currLsn = currLogrec.getUserTxn().getLastLsn(); + continue; + } + + currLogrec.postFetchInit(dbImpl); + + try { + /* + * Let L be the current logrec, and let R and T be the + * record and txn associated with L. If T wrote R again + * after L, let Ln be the 1st R-logrec by T after L. + */ + CompareSlot recId = new CompareSlot(dbImpl, currLogrec); + + RevertInfo ri = recordsMap.get(recId); + + /* + * If Ln exists, update the RevertInfo created earlier for + * Ln so that it now refers to the L version of R. + */ + if (ri != null) { + ri.revertLsn = currLsn; + ri.revertKD = false; + ri.revertPD = currLogrec.isDeleted(); + + ri.revertKey = + (dbImpl.allowsKeyUpdates() ? + currLogrec.getKey() : null); + + ri.revertData = + (currLogrec.isEmbeddedLN() ? + currLogrec.getData() : null); + + ri.revertVLSN = + (currLogrec.isEmbeddedLN() ? + currLogrec.getLN().getVLSNSequence() : + VLSN.NULL_VLSN_SEQUENCE); + + ri.revertExpiration = currLogrec.getExpiration(); + + ri.revertExpirationInHours = + currLogrec.isExpirationInHours(); + } + + /* + * If L will be rolled back, assume that it is the 1st + * R-logrec by T and thus set its revert info to refer + * to the pre-T version of R. + */ + if (DbLsn.compareTo(currLsn, matchpoint) > 0) { + + ri = new RevertInfo( + currLogrec.getAbortLsn(), + currLogrec.getAbortKnownDeleted(), + currLogrec.getAbortKey(), + currLogrec.getAbortData(), + currLogrec.getAbortVLSN(), + currLogrec.getAbortExpiration(), + currLogrec.isAbortExpirationInHours()); + + revertList.add(ri); + recordsMap.put(recId, ri); + + } else { + + /* + * We are done with record R, so remove it from the + * map, if it is still there. + */ + if (ri != null) { + recordsMap.remove(recId); + } + + remainingLockedNodes.add(currLsn); + + if (lastValidVLSN != null && + lastValidVLSN.isNull() && + wholeEntry.getHeader().getVLSN() != null && + !wholeEntry.getHeader().getVLSN().isNull()) { + + lastValidVLSN = wholeEntry.getHeader().getVLSN(); + } + } + + /* Move on to the previous logrec for this txn. */ + currLsn = currLogrec.getUserTxn().getLastLsn(); + + } finally { + releaseDatabaseImpl(dbImpl); + } + } + } catch (FileNotFoundException e) { + throw EnvironmentFailureException.promote( + envImpl, EnvironmentFailureReason.LOG_INTEGRITY, + "Problem finding intermediates for txn " + txnId + + " at lsn " + DbLsn.getNoFormatString(currLsn), e); + } + } + + /** + * Hide the details of whether we are getting a databaseImpl from the txn's + * cache, or whether we're fetching it from the dbMapTree at recovery or + * during master->replica transition. + */ + private DatabaseImpl getDatabaseImpl(DatabaseId dbId) { + if (undoDatabases != null) { + return undoDatabases.get(dbId); + } + + return envImpl.getDbTree().getDb(dbId); + } + + /** Only needed if we are in recovery, and fetched the DatabaseImpl. */ + private void releaseDatabaseImpl(DatabaseImpl dbImpl) { + if (undoDatabases == null) { + envImpl.getDbTree().releaseDb(dbImpl); + } + } + + /** + * Returns LSNs for all nodes that should remain locked by the txn. Note + * that when multiple versions of a record were locked by the txn, the LSNs + * of all versions are returned. Only the latest version will actually be + * locked. + */ + public Set getRemainingLockedNodes() { + return remainingLockedNodes; + } + + /** + * Return information about the next item on the transaction chain and + * remove it from the chain. + */ + public RevertInfo pop() { + return revertList.remove(); + } + + public VLSN getLastValidVLSN() { + return lastValidVLSN; + } + + @Override + public String toString() { + return revertList.toString(); + } + + public static class RevertInfo { + + public long revertLsn; + public boolean revertKD; + public boolean revertPD; + public byte[] revertKey; + public byte[] revertData; + public long revertVLSN; + public int revertExpiration; + public boolean revertExpirationInHours; + + RevertInfo( + long revertLsn, + boolean revertKD, + byte[] revertKey, + byte[] revertData, + long revertVLSN, + int revertExpiration, + boolean revertExpirationInHours) { + + this.revertLsn = revertLsn; + this.revertKD = revertKD; + this.revertPD = false; + this.revertKey = revertKey; + this.revertData = revertData; + this.revertVLSN = revertVLSN; + this.revertExpiration = revertExpiration; + this.revertExpirationInHours = revertExpirationInHours; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("revertLsn="); + sb.append(DbLsn.getNoFormatString(revertLsn)); + sb.append(" revertKD=").append(revertKD); + sb.append(" revertPD=").append(revertPD); + if (revertKey != null) { + sb.append(" revertKey="); + sb.append(Key.getNoFormatString(revertKey)); + } + if (revertData != null) { + sb.append(" revertData="); + sb.append(Key.getNoFormatString(revertData)); + } + sb.append(" revertVLSN=").append(revertVLSN); + sb.append(" revertExpires="); + sb.append(TTL.formatExpiration( + revertExpiration, revertExpirationInHours)); + return sb.toString(); + } + } + + /** + * Compare two keys using the appropriate comparator. Keys from different + * databases should never be equal. + */ + public static class CompareSlot implements Comparable { + + private final DatabaseImpl dbImpl; + private final byte[] key; + + public CompareSlot(DatabaseImpl dbImpl, LNLogEntry undoEntry) { + this(dbImpl, undoEntry.getKey()); + } + + private CompareSlot(DatabaseImpl dbImpl, byte[] key) { + this.dbImpl = dbImpl; + this.key = key; + } + + public int compareTo(CompareSlot other) { + int dbCompare = dbImpl.getId().compareTo(other.dbImpl.getId()); + if (dbCompare != 0) { + /* LNs are from different databases. */ + return dbCompare; + } + + /* Compare keys. */ + return Key.compareKeys(key, other.key, dbImpl.getKeyComparator()); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof CompareSlot)) { + return false; + } + return compareTo((CompareSlot) other) == 0; + } + + @Override + public int hashCode() { + + /* + * Disallow use of HashSet/HashMap/etc. TreeSet/TreeMap/etc should + * be used instead when a CompareSlot is used as a key. + * + * Because a comparator may be configured that compares only a part + * of the key, a hash code cannot take into account the key or + * data, because hashCode() must return the same value for two + * objects whenever equals() returns true. We could hash the DB ID + * alone, but that would not produce an efficient hash table. + */ + throw EnvironmentFailureException.unexpectedState + ("Hashing not supported"); + } + } +} diff --git a/src/com/sleepycat/je/txn/TxnCommit.java b/src/com/sleepycat/je/txn/TxnCommit.java new file mode 100644 index 0000000..dd75be7 --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnCommit.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.VLSN; + +/** + * Transaction commit. + */ +public class TxnCommit extends VersionedWriteTxnEnd { + + public TxnCommit(long id, long lastLsn, int masterId, long dtvlsn) { + super(id, lastLsn, masterId, dtvlsn); + if ((masterId > 0) && (dtvlsn < VLSN.NULL_VLSN_SEQUENCE)) { + /* + * Note that the dtvln will be NULL when a Txn is created on a + * master, so allow for it. + */ + throw new IllegalStateException("DTVLSN value:" + dtvlsn); + } + } + + /** + * For constructing from the log. + */ + public TxnCommit() { + } + + @Override + protected String getTagName() { + return "TxnCommit"; + } + + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof TxnCommit)) { + return false; + } + + TxnCommit otherCommit = (TxnCommit) other; + + return ((id == otherCommit.id) && + (repMasterNodeId == otherCommit.repMasterNodeId)); + } +} diff --git a/src/com/sleepycat/je/txn/TxnEnd.java b/src/com/sleepycat/je/txn/TxnEnd.java new file mode 100644 index 0000000..913081b --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnEnd.java @@ -0,0 +1,101 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Timestamp; + +/** + * The base class for records that mark the end of a transaction. + */ +public abstract class TxnEnd implements Loggable { + + long id; + Timestamp time; + long lastLsn; + + /* For replication - master node which wrote this record. */ + int repMasterNodeId; + + /** + * The txn commit VLSN that was acknowledged by at least a majority of the + * nodes either at the time of this commit, or eventually via a heartbeat. + * This VLSN must typically be less than the VLSN associated with the + * TxnEnd itself, when it's written to the log. In cases of mixed mode + * operation (when a pre-DTVLSN is serving as a feeder to a DTVLSN aware + * replica) it may be equal to the VLSN associated with the TxnEnd. + */ + long dtvlsn; + + TxnEnd(long id, long lastLsn, int repMasterNodeId, long dtvlsn) { + this.id = id; + time = new Timestamp(System.currentTimeMillis()); + this.lastLsn = lastLsn; + this.repMasterNodeId = repMasterNodeId; + this.dtvlsn = dtvlsn; + } + + /** + * For constructing from the log + */ + public TxnEnd() { + lastLsn = DbLsn.NULL_LSN; + } + + /* + * Accessors. + */ + public long getId() { + return id; + } + + public Timestamp getTime() { + return time; + } + + long getLastLsn() { + return lastLsn; + } + + public int getMasterNodeId() { + return repMasterNodeId; + } + + @Override + public long getTransactionId() { + return id; + } + + public long getDTVLSN() { + return dtvlsn; + } + + public void setDTVLSN(long dtvlsn) { + this.dtvlsn = dtvlsn; + } + + /** + * Returns true if there are changes that have been logged for this entry. + * It's unusual for such a record to not have associated changes, since + * such commit/abort entries are typically optimized away. When present + * they typically represent records used to persist uptodate DTVLSN + * information as part of the entry. + */ + public boolean hasLoggedEntries() { + return (lastLsn != DbLsn.NULL_LSN); + } + + protected abstract String getTagName(); +} diff --git a/src/com/sleepycat/je/txn/TxnManager.java b/src/com/sleepycat/je/txn/TxnManager.java new file mode 100644 index 0000000..6bafbb7 --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnManager.java @@ -0,0 +1,456 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ABORTS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ACTIVE; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ACTIVE_TXNS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_BEGINS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_COMMITS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XAABORTS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XACOMMITS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XAPREPARES; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import javax.transaction.xa.Xid; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionStats; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.latch.SharedLatch; +import com.sleepycat.je.utilint.ActiveTxnArrayStat; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; + +/** + * Class to manage transactions. Basically a Set of all transactions with add + * and remove methods and a latch around the set. + */ +public class TxnManager { + + /* + * All NullTxns share the same id so as not to eat from the id number + * space. + * + * Negative transaction ids are used by the master node of a replication + * group. That sequence begins at -10 to avoid conflict with the + * NULL_TXN_ID and leave room for other special purpose ids. + */ + static final long NULL_TXN_ID = -1; + private static final long FIRST_NEGATIVE_ID = -10; + private LockManager lockManager; + private final EnvironmentImpl envImpl; + private final SharedLatch allTxnsLatch; + private final Map allTxns; + + /* Maps Xids to Txns. */ + private final Map allXATxns; + + /* Maps Threads to Txns when there are thread implied transactions. */ + private final Map thread2Txn; + + /* + * Positive and negative transaction ids are used in a replicated system, + * to let replicated transactions intermingle with local transactions. + */ + private final AtomicLong lastUsedLocalTxnId; + private final AtomicLong lastUsedReplicatedTxnId; + private final AtomicInteger nActiveSerializable; + + /* Locker Stats */ + private final StatGroup stats; + private final IntStat nActive; + private final LongStat numBegins; + private final LongStat numCommits; + private final LongStat numAborts; + private final LongStat numXAPrepares; + private final LongStat numXACommits; + private final LongStat numXAAborts; + private final ActiveTxnArrayStat activeTxns; + private volatile long nTotalCommits = 0; + + public TxnManager(EnvironmentImpl envImpl) { + lockManager = new SyncedLockManager(envImpl); + + if (envImpl.isNoLocking()) { + lockManager = new DummyLockManager(envImpl, lockManager); + } + + this.envImpl = envImpl; + allTxnsLatch = LatchFactory.createSharedLatch( + envImpl, "TxnManager.allTxns", false /*exclusiveOnly*/); + allTxns = new ConcurrentHashMap(); + allXATxns = Collections.synchronizedMap(new HashMap()); + thread2Txn = new ConcurrentHashMap(); + + lastUsedLocalTxnId = new AtomicLong(0); + lastUsedReplicatedTxnId = new AtomicLong(FIRST_NEGATIVE_ID); + nActiveSerializable = new AtomicInteger(0); + + /* Do the stats definition. */ + stats = new StatGroup("Transaction", "Transaction statistics"); + nActive = new IntStat(stats, TXN_ACTIVE); + numBegins = new LongStat(stats, TXN_BEGINS); + numCommits = new LongStat(stats, TXN_COMMITS); + numAborts = new LongStat(stats, TXN_ABORTS); + numXAPrepares = new LongStat(stats, TXN_XAPREPARES); + numXACommits = new LongStat(stats, TXN_XACOMMITS); + numXAAborts = new LongStat(stats, TXN_XAABORTS); + activeTxns = new ActiveTxnArrayStat(stats, TXN_ACTIVE_TXNS); + } + + /** + * Set the txn id sequence. + */ + public void setLastTxnId(long lastReplicatedTxnId, long lastLocalId) { + lastUsedReplicatedTxnId.set(lastReplicatedTxnId); + lastUsedLocalTxnId.set(lastLocalId); + } + + /** + * Get the last used id, for checkpoint info. + */ + public long getLastLocalTxnId() { + return lastUsedLocalTxnId.get(); + } + + public long getLastReplicatedTxnId() { + return lastUsedReplicatedTxnId.get(); + } + + public long getNextReplicatedTxnId() { + return lastUsedReplicatedTxnId.decrementAndGet(); + } + + /* @return true if this id is for a replicated txn. */ + public static boolean isReplicatedTxn(long txnId) { + return (txnId <= FIRST_NEGATIVE_ID); + } + + /** + * Get the next transaction id for a non-replicated transaction. Note + * than in the future, a replicated node could conceivable issue an + * application level, non-replicated transaction. + */ + long getNextTxnId() { + return lastUsedLocalTxnId.incrementAndGet(); + } + + /* + * Tracks the lowest replicated transaction id used during a replay of the + * replication stream, so that it's available as the starting point if this + * replica transitions to being the master. + */ + public void updateFromReplay(long replayTxnId) { + assert !envImpl.isMaster(); + assert replayTxnId < 0 : + "replay txn id is unexpectedly positive " + replayTxnId; + + if (replayTxnId < lastUsedReplicatedTxnId.get()) { + lastUsedReplicatedTxnId.set(replayTxnId); + } + } + + /** + * Returns commit counter that is never cleared (as stats are), so it can + * be used for monitoring un-flushed txns. + */ + public long getNTotalCommits() { + return nTotalCommits; + } + + /** + * Create a new transaction. + * @param parent for nested transactions, not yet supported + * @param txnConfig specifies txn attributes + * @return the new txn + */ + public Txn txnBegin(Transaction parent, TransactionConfig txnConfig) + throws DatabaseException { + + return Txn.createUserTxn(envImpl, txnConfig); + } + + /** + * Give transactions and environment access to lock manager. + */ + public LockManager getLockManager() { + return lockManager; + } + + /** + * Called when txn is created. + */ + public void registerTxn(Txn txn) { + allTxnsLatch.acquireShared(); + try { + allTxns.put(txn, txn); + if (txn.isSerializableIsolation()) { + nActiveSerializable.incrementAndGet(); + } + numBegins.increment(); + } finally { + allTxnsLatch.release(); + } + } + + /** + * Called when txn ends. + */ + void unRegisterTxn(Txn txn, boolean isCommit) { + allTxnsLatch.acquireShared(); + try { + allTxns.remove(txn); + + /* Remove any accumulated MemoryBudget delta for the Txn. */ + envImpl.getMemoryBudget(). + updateTxnMemoryUsage(0 - txn.getBudgetedMemorySize()); + if (isCommit) { + numCommits.increment(); + nTotalCommits += 1; + } else { + numAborts.increment(); + } + if (txn.isSerializableIsolation()) { + nActiveSerializable.decrementAndGet(); + } + } finally { + allTxnsLatch.release(); + } + } + + /** + * Called when txn is created. + */ + public void registerXATxn(Xid xid, Txn txn, boolean isPrepare) { + if (!allXATxns.containsKey(xid)) { + allXATxns.put(xid, txn); + envImpl.getMemoryBudget().updateTxnMemoryUsage + (MemoryBudget.HASHMAP_ENTRY_OVERHEAD); + } + + if (isPrepare) { + numXAPrepares.increment(); + } + } + + /** + * Called when XATransaction is prepared. + */ + public void notePrepare() { + numXAPrepares.increment(); + } + + /** + * Called when txn ends. + * + * @throws IllegalStateException via XAResource + */ + void unRegisterXATxn(Xid xid, boolean isCommit) + throws DatabaseException { + + if (allXATxns.remove(xid) == null) { + throw new IllegalStateException + ("XA Transaction " + xid + " is not registered."); + } + envImpl.getMemoryBudget().updateTxnMemoryUsage + (0 - MemoryBudget.HASHMAP_ENTRY_OVERHEAD); + if (isCommit) { + numXACommits.increment(); + } else { + numXAAborts.increment(); + } + } + + /** + * Retrieve a Txn object from an Xid. + */ + public Txn getTxnFromXid(Xid xid) { + return allXATxns.get(xid); + } + + /** + * Called when txn is assoc'd with this thread. + */ + public void setTxnForThread(Transaction txn) { + + Thread curThread = Thread.currentThread(); + if (txn == null) { + unsetTxnForThread(); + } else { + thread2Txn.put(curThread, txn); + } + } + + /** + * Called when txn is assoc'd with this thread. + */ + public Transaction unsetTxnForThread() { + Thread curThread = Thread.currentThread(); + return thread2Txn.remove(curThread); + } + + /** + * Retrieve a Txn object for this Thread. + */ + public Transaction getTxnForThread() { + return thread2Txn.get(Thread.currentThread()); + } + + public Xid[] XARecover() { + Set xidSet = allXATxns.keySet(); + Xid[] ret = new Xid[xidSet.size()]; + ret = xidSet.toArray(ret); + + return ret; + } + + /** + * Returns whether there are any active serializable transactions, + * excluding the transaction given (if non-null). This is intentionally + * returned without latching, since latching would not make the act of + * reading an integer more atomic than it already is. + */ + public boolean + areOtherSerializableTransactionsActive(Locker excludeLocker) { + int exclude = + (excludeLocker != null && + excludeLocker.isSerializableIsolation()) ? + 1 : 0; + return (nActiveSerializable.get() - exclude > 0); + } + + /** + * Get the earliest LSN of all the active transactions, for checkpoint. + * Returns NULL_LSN is no transaction is currently active. + */ + public long getFirstActiveLsn() { + + /* + * Note that the latching hierarchy calls for synchronizing on + * allTxns first, then synchronizing on individual txns. + */ + long firstActive = DbLsn.NULL_LSN; + allTxnsLatch.acquireExclusive(); + try { + Iterator iter = allTxns.keySet().iterator(); + while (iter.hasNext()) { + long txnFirstActive = iter.next().getFirstActiveLsn(); + if (firstActive == DbLsn.NULL_LSN) { + firstActive = txnFirstActive; + } else if (txnFirstActive != DbLsn.NULL_LSN) { + if (DbLsn.compareTo(txnFirstActive, firstActive) < 0) { + firstActive = txnFirstActive; + } + } + } + } finally { + allTxnsLatch.release(); + } + + return firstActive; + } + + /* + * Statistics + */ + + /** + * Collect transaction related stats. + */ + public TransactionStats txnStat(StatsConfig config) { + TransactionStats txnStats = null; + allTxnsLatch.acquireShared(); + try { + nActive.set(allTxns.size()); + TransactionStats.Active[] activeSet = + new TransactionStats.Active[nActive.get()]; + Iterator iter = allTxns.keySet().iterator(); + int i = 0; + while (iter.hasNext() && i < activeSet.length) { + Locker txn = iter.next(); + activeSet[i] = new TransactionStats.Active + (txn.toString(), txn.getId(), 0); + i++; + } + activeTxns.set(activeSet); + txnStats = new TransactionStats(stats.cloneGroup(false)); + if (config.getClear()) { + numCommits.clear(); + numAborts.clear(); + numXACommits.clear(); + numXAAborts.clear(); + } + } finally { + allTxnsLatch.release(); + } + + return txnStats; + } + + public StatGroup loadStats(StatsConfig config) { + return lockManager.loadStats(config); + } + + /** + * Collect lock related stats. + */ + public LockStats lockStat(StatsConfig config) + throws DatabaseException { + + return lockManager.lockStat(config); + } + + /** + * Examine the transaction set and return Txn that match the class or are + * subclasses of that class. This method is used to obtain Master and Replay + * TXns for HA + */ + public Set getTxns(Class txnClass) { + final Set targetSet = new HashSet<>(); + + allTxnsLatch.acquireShared(); + try { + Set all = allTxns.keySet(); + for (Txn t: all) { + if (txnClass.isAssignableFrom(t.getClass())) { + @SuppressWarnings("unchecked") + final T t2 = (T)t; + targetSet.add(t2); + } + } + } finally { + allTxnsLatch.release(); + } + + return targetSet; + } +} diff --git a/src/com/sleepycat/je/txn/TxnPrepare.java b/src/com/sleepycat/je/txn/TxnPrepare.java new file mode 100644 index 0000000..7d896d0 --- /dev/null +++ b/src/com/sleepycat/je/txn/TxnPrepare.java @@ -0,0 +1,96 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.nio.ByteBuffer; + +import javax.transaction.xa.Xid; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.utilint.DbLsn; + +/** + * This class writes out a transaction prepare record. + */ +public class TxnPrepare extends TxnEnd implements Loggable { + + private Xid xid; + + public TxnPrepare(long id, Xid xid) { + /* LastLSN is never used. */ + super(id, DbLsn.NULL_LSN, + 0 /* masterNodeId, never replicated. */, + 0l /* dtvlsn, never replicated. */); + this.xid = xid; + } + + /** + * For constructing from the log. + */ + public TxnPrepare() { + } + + public Xid getXid() { + return xid; + } + + /* + * Log support + */ + + @Override + protected String getTagName() { + return "TxnPrepare"; + } + + @Override + public int getLogSize() { + return LogUtils.getPackedLongLogSize(id) + + LogUtils.getTimestampLogSize(time) + + LogUtils.getXidSize(xid); + } + + @Override + public void writeToLog(ByteBuffer logBuffer) { + LogUtils.writePackedLong(logBuffer, id); + LogUtils.writeTimestamp(logBuffer, time); + LogUtils.writeXid(logBuffer, xid); + } + + @Override + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + boolean unpacked = (entryVersion < 6); + id = LogUtils.readLong(logBuffer, unpacked); + time = LogUtils.readTimestamp(logBuffer, unpacked); + xid = LogUtils.readXid(logBuffer); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append("<").append(getTagName()); + sb.append(" id=\"").append(id); + sb.append("\" time=\"").append(time); + sb.append("\">"); + sb.append(xid); // xid already formatted as xml + sb.append(""); + } + + /** + * Always return false, this item should never be compared. + */ + public boolean logicalEquals(Loggable other) { + return false; + } +} diff --git a/src/com/sleepycat/je/txn/UndoReader.java b/src/com/sleepycat/je/txn/UndoReader.java new file mode 100644 index 0000000..1870ddf --- /dev/null +++ b/src/com/sleepycat/je/txn/UndoReader.java @@ -0,0 +1,115 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.util.Map; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LNFileReader; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Convenience class to package together the different steps and fields needed + * for reading a log entry for undoing. Is used for both txn aborts and + * recovery undos. + */ +public class UndoReader { + + public final LNLogEntry logEntry; + public final LN ln; + private final long lsn; + public final int logEntrySize; + public final DatabaseImpl db; + + private UndoReader(LNLogEntry logEntry, + LN ln, + long lsn, + int logEntrySize, + DatabaseImpl db) { + this.logEntry = logEntry; + this.ln = ln; + this.lsn = lsn; + this.logEntrySize = logEntrySize; + this.db = db; + } + + /** + * Set up an UndoReader when doing an undo or txn partial rollback for a + * live txn. + *

        + * Never returns null. The DB ID of the LN must be present in + * undoDatabases, or a fatal exception is thrown. + */ + public static UndoReader create( + EnvironmentImpl envImpl, + long undoLsn, + Map undoDatabases) { + + final WholeEntry wholeEntry = envImpl.getLogManager(). + getWholeLogEntryHandleFileNotFound(undoLsn); + final int logEntrySize = wholeEntry.getHeader().getEntrySize(); + final LNLogEntry logEntry = (LNLogEntry) wholeEntry.getEntry(); + final DatabaseId dbId = logEntry.getDbId(); + final DatabaseImpl db = undoDatabases.get(dbId); + if (db == null) { + throw EnvironmentFailureException.unexpectedState + (envImpl, + "DB not found during non-recovery undo/rollback, id=" + dbId); + } + logEntry.postFetchInit(db); + final LN ln = logEntry.getLN(); + final long lsn = undoLsn; + ln.postFetchInit(db, undoLsn); + + return new UndoReader(logEntry, ln, lsn, logEntrySize, db); + } + + /** + * Set up an UndoReader when doing a recovery partial rollback. In that + * case, we have a file reader positioned at the pertinent log entry. + *

        + * This method calls DbTree.getDb. The caller is responsible for calling + * DbTree.releaseDb on the db field. + *

        + * Null is returned if the DB ID of the LN has been deleted. + */ + public static UndoReader createForRecovery(LNFileReader reader, + DbTree dbMapTree) { + final LNLogEntry logEntry = reader.getLNLogEntry(); + final DatabaseId dbId = logEntry.getDbId(); + final DatabaseImpl db = dbMapTree.getDb(dbId); + if (db == null) { + return null; + } + logEntry.postFetchInit(db); + final LN ln = logEntry.getLN(); + final long lsn = reader.getLastLsn(); + ln.postFetchInit(db, lsn); + final int logEntrySize = reader.getLastEntrySize(); + + return new UndoReader(logEntry, ln, lsn, logEntrySize, db); + } + + @Override + public String toString() { + return ln + " lsn=" + DbLsn.getNoFormatString(lsn); + } +} diff --git a/src/com/sleepycat/je/txn/VersionedWriteTxnEnd.java b/src/com/sleepycat/je/txn/VersionedWriteTxnEnd.java new file mode 100644 index 0000000..91ce490 --- /dev/null +++ b/src/com/sleepycat/je/txn/VersionedWriteTxnEnd.java @@ -0,0 +1,185 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.VersionedWriteLoggable; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.PackedInteger; + +/** + * Based class for commit and abort records, which are replicated. + * The log formats for commit and abort are identical. + */ +public abstract class VersionedWriteTxnEnd + extends TxnEnd implements VersionedWriteLoggable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 13; + + VersionedWriteTxnEnd(long id, long lastLsn, int masterId, long dtvlsn) { + super(id, lastLsn, masterId, dtvlsn); + } + + /** + * For constructing from the log. + */ + public VersionedWriteTxnEnd() { + } + + /* + * Log support for writing. + */ + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize() { + return getLogSize(LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer) { + writeToLog( + logBuffer, LogEntryType.LOG_VERSION, false /*forReplication*/); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + + if (dtvlsn == VLSN.NULL_VLSN_SEQUENCE) { + throw new IllegalStateException("DTVLSN is null"); + } + + return LogUtils.getPackedLongLogSize(id) + + LogUtils.getTimestampLogSize(time) + + LogUtils.getPackedLongLogSize( + forReplication ? DbLsn.NULL_LSN : lastLsn) + + LogUtils.getPackedIntLogSize(repMasterNodeId) + + ((logVersion >= LogEntryType.LOG_VERSION_DURABLE_VLSN) ? + LogUtils.getPackedLongLogSize(dtvlsn) : 0); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int entryVersion, + final boolean forReplication) { + + if (entryVersion >= 12) { + LogUtils.writePackedLong(logBuffer, + forReplication ? DbLsn.NULL_LSN : lastLsn); + } + LogUtils.writePackedLong(logBuffer, id); + LogUtils.writeTimestamp(logBuffer, time); + if (entryVersion < 12) { + LogUtils.writePackedLong(logBuffer, + forReplication ? DbLsn.NULL_LSN : lastLsn); + } + LogUtils.writePackedInt(logBuffer, repMasterNodeId); + + if (entryVersion >= LogEntryType.LOG_VERSION_DURABLE_VLSN) { + if (dtvlsn == VLSN.NULL_VLSN_SEQUENCE) { + throw new IllegalStateException("Unexpected null dtvlsn"); + } + LogUtils.writePackedLong(logBuffer, dtvlsn); + } + } + + @Override + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + final boolean isUnpacked = (entryVersion < 6); + + if (entryVersion >= 12) { + lastLsn = LogUtils.readLong(logBuffer, isUnpacked); + } + id = LogUtils.readLong(logBuffer, isUnpacked); + time = LogUtils.readTimestamp(logBuffer, isUnpacked); + if (entryVersion < 12) { + lastLsn = LogUtils.readLong(logBuffer, isUnpacked); + } + if (entryVersion >= 6) { + repMasterNodeId = LogUtils.readInt(logBuffer, + false /* unpacked */); + } + + if (entryVersion >= LogEntryType.LOG_VERSION_DURABLE_VLSN) { + dtvlsn = LogUtils.readPackedLong(logBuffer); + if (dtvlsn == VLSN.NULL_VLSN_SEQUENCE) { + throw new IllegalStateException("Unexpected null dtvlsn"); + } + } else { + /* + * Distinguished value to make it clear that the value was derived + * from an old log entry. + */ + dtvlsn = VLSN.UNINITIALIZED_VLSN_SEQUENCE; + } + } + + @Override + public boolean hasReplicationFormat() { + return true; + } + + @Override + public boolean isReplicationFormatWorthwhile(final ByteBuffer logBuffer, + final int srcVersion, + final int destVersion) { + /* + * It is too much trouble to parse versions older than 12, because the + * lastLsn is not at the front in older versions. + */ + if (srcVersion < 12) { + return false; + } + + /* + * If the size of lastLsn is greater than one (meaning it is not + * NULL_LSN), then we should re-serialize. + */ + return PackedInteger.getReadLongLength( + logBuffer.array(), + logBuffer.arrayOffset() + logBuffer.position()) > 1; + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append("<").append(getTagName()); + sb.append(" id=\"").append(id); + sb.append("\" time=\"").append(time); + sb.append("\" master=\"").append(repMasterNodeId); + sb.append("\" dtvlsn=\"").append(dtvlsn); + sb.append("\">"); + sb.append(DbLsn.toString(lastLsn)); + sb.append(""); + } +} diff --git a/src/com/sleepycat/je/txn/WriteLockInfo.java b/src/com/sleepycat/je/txn/WriteLockInfo.java new file mode 100644 index 0000000..1ca2c62 --- /dev/null +++ b/src/com/sleepycat/je/txn/WriteLockInfo.java @@ -0,0 +1,218 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/* + * Given a locker T and a record R, a WriteLockInfo stores the info needed to + * undo the write ops (insertions, deletions, or updates) performed by T on + * R, if T aborts. Specifically, it stores the info needed to restore R to its + * "abort" version, i.e., the version of R that existed at the time when T + * locked R for the 1st time (and before T actually performed its 1st write + * op on R). Given that only transactional lockers may abort, WriteLockInfo + * are used by Txn lockers only. + * + * Notice that, conceptually, to write R, T locks R in exclusive mode and + * retains that lock until it terminates (commits or aborts). In other words, + * T locks R only once. However, because locks are acquired on LSNs and LSNs + * change every time a record is written, T may actually lock R multiple times, + * Everytime T locks a new LSN of R, it creates a new WriteLockIno. However, + * care is taken so that all WriteLockInfos for the same logical record and + * the same txn store the same info about the abort version (for details see + * CursorImpl.LockStanding, CursorImpl.lockLN(), and LN.logInternals()). In + * fact, if T writes R multiple times and then aborts, only one of the logrecs + * generated by T for R will be undone, restoring R to its abort version; the + * rest will be skipped. + * + * Info about the aort version is needed during commit as well. Specifically, + * its LSN and on-disk size are needed to count the abort version obsolete + * when T commits. + */ +public class WriteLockInfo { + + /* + * The LSN of the record's abort version. This is stored persistently in + * each logrec renerated by T on R. May be null if R was created by T + * (will definitely be null if the txn did not reuse an existing slot for + * the new record). + */ + private long abortLsn; + + /* + * Whether the record's abort version is a deletion version or not. + * It is stored persistently in each logrec renerated by T on R. + */ + private boolean abortKnownDeleted; + + /* + * See comment for abortData field below. + */ + private byte[] abortKey; + + /* + * If the record's abort version was embedded in a BIN, the associated + * logrec that contains that version may have been cleaned away by the + * time the txn aborts. So, we must save in each logrec the data portion + * of the abort version. abortdata serves this purpose. If key updates + * are allowed in the containing DB, the key of the abort version is + * saved in abortKey as well. Finally, if VLSN caching in BINs is enabled, + * the VLSN of the abort version is saved in abortVLSN as well. + */ + private byte[] abortData; + + /* + * See comment for abortData field above. + */ + private long abortVLSN = VLSN.NULL_VLSN_SEQUENCE; + + /* + * The on-disk size of the abort version, or zero if abortLsn is NULL_LSN + * or if the size is not known. Used for obsolete counting during commit. + * Not stored persistently. + */ + private int abortLogSize; + + /* Abort expiration time. Is negative if in hours, positive if in days. */ + private int abortExpiration; + + /* + * The containing database, or null if abortLsn is NULL_LSN. Used for + * obsolete counting during a commit. + */ + private DatabaseImpl db; + + /* + * True if the LSN has never been locked before by this Txn. Used so we + * can determine when to set abortLsn. + */ + private boolean neverLocked; + + static final WriteLockInfo basicWriteLockInfo = new WriteLockInfo(); + + // public for Sizeof + public WriteLockInfo() { + abortLsn = DbLsn.NULL_LSN; + abortKnownDeleted = false; + neverLocked = true; + } + + public boolean getAbortKnownDeleted() { + return abortKnownDeleted; + } + + public void setAbortKnownDeleted(boolean v) { + abortKnownDeleted = v; + } + + public long getAbortLsn() { + return abortLsn; + } + + public void setAbortLsn(long abortLsn) { + this.abortLsn = abortLsn; + } + + public byte[] getAbortKey() { + return abortKey; + } + + public void setAbortKey(byte[] v) { + abortKey = v; + } + + public byte[] getAbortData() { + return abortData; + } + + public void setAbortData(byte[] v) { + abortData = v; + } + + public long getAbortVLSN() { + return abortVLSN; + } + + public void setAbortVLSN(long v) { + abortVLSN = v; + } + + public int getAbortLogSize() { + return abortLogSize; + } + + public void setAbortLogSize(int logSize) { + abortLogSize = logSize; + } + + public void setAbortExpiration(int expiration, boolean expirationInHours) { + abortExpiration = expirationInHours ? (-expiration) : expiration; + } + + public int getAbortExpiration() { + return Math.abs(abortExpiration); + } + + public boolean isAbortExpirationInHours() { + return abortExpiration < 0; + } + + public DatabaseImpl getDb() { + return db; + } + + public void setDb(DatabaseImpl db) { + this.db = db; + } + + public boolean getNeverLocked() { + return neverLocked; + } + + public void setNeverLocked(boolean neverLocked) { + this.neverLocked = neverLocked; + } + + /* + * Copy all the information needed to create a clone of the lock. + */ + public void copyAllInfo(WriteLockInfo source) { + abortLsn = source.abortLsn; + abortKnownDeleted = source.abortKnownDeleted; + abortKey = source.abortKey; + abortData = source.abortData; + abortVLSN = source.abortVLSN; + abortLogSize = source.abortLogSize; + abortExpiration = source.abortExpiration; + db = source.db; + neverLocked = source.neverLocked; + } + + @Override + public String toString() { + return "abortLsn=" + + DbLsn.getNoFormatString(abortLsn) + + " abortKnownDeleted=" + abortKnownDeleted + + " abortKey=" + Key.getNoFormatString(abortKey) + + " abortData=" + Key.getNoFormatString(abortData) + + " abortLogSize=" + abortLogSize + + " abortVLSN=" + String.format("%,d", abortVLSN) + + " abortExpiration=" + getAbortExpiration() + + " abortExpirationInHours=" + isAbortExpirationInHours() + + " neverLocked=" + neverLocked; + } +} diff --git a/src/com/sleepycat/je/txn/package-info.java b/src/com/sleepycat/je/txn/package-info.java new file mode 100644 index 0000000..2c93a10 --- /dev/null +++ b/src/com/sleepycat/je/txn/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Transaction management and locking (concurrency control). + */ +package com.sleepycat.je.txn; \ No newline at end of file diff --git a/src/com/sleepycat/je/util/ConsoleHandler.java b/src/com/sleepycat/je/util/ConsoleHandler.java new file mode 100644 index 0000000..728959b --- /dev/null +++ b/src/com/sleepycat/je/util/ConsoleHandler.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.util.logging.Formatter; +import java.util.logging.Level; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * JE instances of java.util.logging.Logger are configured to use this + * implementation of java.util.logging.ConsoleHandler. By default, the + * handler's level is {@link Level#OFF}. To enable the console output, use the + * standard java.util.logging.LogManager configuration to set the desired + * level: + *

        + * com.sleepycat.je.util.ConsoleHandler.level=ALL
        + * 
        + * JE augments the java.util.logging API with a JE environment parameter for + * setting handler levels. This is described in greater detail in + * {@link + * Chapter 12.Administering Berkeley DB Java Edition Applications} + * + * @see + * Chapter 12. Logging + * @see Using JE Trace Logging + */ +public class ConsoleHandler extends java.util.logging.ConsoleHandler { + + /* + * Using a JE specific handler lets us enable and disable output for the + * entire library, and specify an environment specific format. + */ + public ConsoleHandler(Formatter formatter, EnvironmentImpl envImpl) { + super(); + + /* Messages may be formatted with an environment specific tag. */ + setFormatter(formatter); + + Level level = null; + String propertyName = getClass().getName() + ".level"; + + if (envImpl != null) { + level = + LoggerUtils.getHandlerLevel(envImpl.getConfigManager(), + EnvironmentParams.JE_CONSOLE_LEVEL, + propertyName); + } else { + /* If envImpl instance is null, level is decided by properties. */ + String levelProperty = LoggerUtils.getLoggerProperty(propertyName); + if (levelProperty == null) { + level = Level.OFF; + } else { + level = Level.parse(levelProperty); + } + } + + setLevel(level); + } +} + diff --git a/src/com/sleepycat/je/util/DbBackup.java b/src/com/sleepycat/je/util/DbBackup.java new file mode 100644 index 0000000..92b0717 --- /dev/null +++ b/src/com/sleepycat/je/util/DbBackup.java @@ -0,0 +1,811 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.util.NavigableSet; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.cleaner.FileProtector; +import com.sleepycat.je.cleaner.FileProtector.ProtectedActiveFileSet; +import com.sleepycat.je.cleaner.FileProtector.ProtectedFileRange; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.EmptyLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + +/** + * DbBackup is a helper class for stopping and restarting JE background + * activity in an open environment in order to simplify backup operations. It + * also lets the application create a backup which can support restoring the + * environment to a specific point in time. + *

        + * Backing up without DbBackup + *

        + * Because JE has an append only log file architecture, it is always possible + * to do a hot backup without the use of DbBackup by copying all log files + * (.jdb files) to your archival location. As long as the log files are copied + * in alphabetical order, (numerical in effect) and all log files are + * copied, the environment can be successfully backed up without any need to + * stop database operations or background activity. This means that your + * backup operation must do a loop to check for the creation of new log files + * before deciding that the backup is finished. For example: + *

        + * time    files in                    activity
        + *         environment
        + *
        + *  t0     000000001.jdb     Backup starts copying file 1
        + *         000000003.jdb
        + *         000000004.jdb
        + *
        + *  t1     000000001.jdb     JE log cleaner migrates portion of file 3 to newly
        + *         000000004.jdb     created file 5 and deletes file 3. Backup finishes
        + *         000000005.jdb     file 1, starts copying file 4. Backup MUST include
        + *                           file 5 for a consistent backup!
        + *
        + *  t2     000000001.jdb     Backup finishes copying file 4, starts and
        + *         000000004.jdb     finishes file 5, has caught up. Backup ends.
        + *         000000005.jdb
        + *
        + *

        + * In the example above, the backup operation must be sure to copy file 5, + * which came into existence after the backup had started. If the backup + * stopped operations at file 4, the backup set would include only file 1 and + * 4, omitting file 3, which would be an inconsistent set. + *

        + * Also note that log file 5 may not have filled up before it was copied to + * archival storage. On the next backup, there might be a newer, larger version + * of file 5, and that newer version should replace the older file 5 in archive + * storage. + *

        + * Using the approach above, as opposed to using DbBackup, will copy all files + * including {@link EnvironmentStats#getReservedLogSize reserved files} as well + * as {@link EnvironmentStats#getActiveLogSize active files}. A large number of + * reserved files may be present in an HA Environment, and they are essentially + * wasted space in a backup. Using DbBackup is strongly recommended for this + * reason, as well as to reduce the complexity of file copying. + *

        + * Backing up with DbBackup + *

        + * DbBackup helps simplify application backup by defining the set of {@link + * EnvironmentStats#getActiveLogSize active files} that must be copied for each + * backup operation. If the environment directory has read/write protection, + * the application must pass DbBackup an open, read/write environment handle. + *

        + * When entering backup mode, JE determines the set of active files needed for + * a consistent backup, and freezes all changes to those files. The application + * can copy that defined set of files and finish operation without checking for + * the ongoing creation of new files. Also, there will be no need to check for + * a newer version of the last file on the next backup. + *

        + * In the example above, if DbBackup was used at t0, the application would only + * have to copy files 1, 3 and 4 to back up. On a subsequent backup, the + * application could start its copying at file 5. There would be no need to + * check for a newer version of file 4. + *

        + * When it is important to minimize the time that it takes to recover using a + * backup, a checkpoint should be performed immediately before calling {@link + * #startBackup}. This will reduce recovery time when opening the environment + * with the restored log files. A checkpoint is performed explicitly by + * calling {@link Environment#checkpoint} using a config object for which + * {@link CheckpointConfig#setForce setForce(true)} has been called. + *

        + * Performing simple/full backups + *

        + * The following examples shows how to perform a full backup. A checkpoint is + * performed to minimize recovery time. + *

        + * void myBackup(Environment env, File destDir) {
        + *     DbBackup backupHelper = new DbBackup(env);
        + *
        + *     // Optional: Do a checkpoint to reduce recovery time after a restore.
        + *     env.checkpoint(new CheckpointConfig().setForce(true));
        + *
        + *     // Start backup, find out what needs to be copied.
        + *     backupHelper.startBackup();
        + *     try {
        + *         // Copy the necessary files to archival storage.
        + *         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
        + *         myCopyFiles(env, backupHelper, filesToCopy, destDir);
        + *     } finally {
        + *         // Remember to exit backup mode, or the JE cleaner cannot delete
        + *         // log files and disk usage will grow without bounds.
        + *        backupHelper.endBackup();
        + *     }
        + * }
        + *
        + * void myCopyFiles(
        + *     Environment env,
        + *     DbBackup backupHelper,
        + *     String[] filesToCopy,
        + *     File destDir) {
        + *
        + *     for (String fileName : filesToCopy) {
        + *         // Copy fileName to destDir.
        + *         // See {@link LogVerificationReadableByteChannel} and
        + *         // {@link LogVerificationInputStream}.
        + *         ....
        + *
        + *         // Remove protection to allow file to be deleted in order to reclaim
        + *         // disk space.
        + *         backupHelper.removeFileProtection(fileName);
        + *     }
        + * }
        + * 
        + * When copying files to the backup directory, it is critical that each file is + * verified before or during the copy. If a file is copied that is corrupt + * (due to an earlier disk failure that went unnoticed, for example), the + * backup will be invalid and provide a false sense of security. + *

        + * The {@link LogVerificationInputStream example here} shows how to implement + * the {@code myCopyFiles} method using {@link + * LogVerificationInputStream}. A {@link LogVerificationReadableByteChannel} + * could also be used for higher performance copying. A filter input stream is + * used to verify the file efficiently as it is being read. If you choose to + * use a script for copying files, the {@link DbVerifyLog} command line tool + * can be used instead. + *

        + * Assuming that the full backup copied files into an empty directory, to + * restore you can simply copy these files back into another empty directory. + *

        + * Always start with an empty directory as the destination for a full backup or + * a restore, to ensure that no unused files are present. Unused files -- + * perhaps the residual of an earlier environment or an earlier backup -- will + * take up space, and they will never be deleted by the JE log cleaner. Also + * note that such files will not be used by JE for calculating utilization and + * will not appear in the {@link DbSpace} output. + *

        + * Performing incremental backups + *

        + * Incremental backups are used to reduce the number of files copied during + * each backup. Compared to a full backup, there are two additional pieces of + * information needed for an incremental backup: the number of the last file in + * the previous backup, and a list of the active files in the environment + * directory at the time of the current backup, i.e., the current snapshot. + * Their purpose is explained below. + *

        + * The number of the last file in the previous backup is used to avoid copying + * files that are already present in the backup set. This file number must be + * obtained before beginning the backup, either by checking the backup archive, + * or getting this value from a stored location. For example, the last file + * number could be written to a special file in the backup set at the time of a + * backup, and then read from the special file before starting the next backup. + *

        + * The list of files in the current snapshot, which should be obtained by + * calling {@link #getLogFilesInSnapshot} (after calling {@link #startBackup}), + * is used to avoid unused files after a restore, and may also be used to + * reduce the size of the backup set. How to use this list is described below. + *

        + * Some applications need the ability to restore to the point in time of any of + * the incremental backups that were made in the past, and other applications + * only need to restore to the point in time of the most recent backup. + * Accordingly, the list of current files (that is made at the time of the + * backup), should be used in one of two ways. + *

          + *
        1. If you only need to restore to the point in time of the most recent + * backup, then the list should be used to delete unused files from the + * backup set. After copying all files during the backup, any file that is + * not present in the list may then be deleted from the backup set. + * This both reduces the size of the backup set, and ensures that unused + * files will not be present in the backup set and therefore will not be + * restored.
        2. + *
        3. If you need to keep all log files from each backup so you can restore + * to more than one point in time, then the list for each backup should be + * saved with the backup file set so it can be used during a restore. During + * the restore, only the files in the list should be copied, starting with an + * empty destination directory. This ensures that unused files will not be + * restored.
        4. + *
        + *

        + * The following two examples shows how to perform an incremental backup. In + * the first example, the list of current files is used to delete files from + * the backup set that are no longer needed. + *

        + * void myBackup(Environment env, File destDir) {
        + *
        + *     // Get the file number of the last file in the previous backup.
        + *     long lastFileInPrevBackup =  ...
        + *
        + *     DbBackup backupHelper = new DbBackup(env, lastFileInPrevBackup);
        + *
        + *     // Optional: Do a checkpoint to reduce recovery time after a restore.
        + *     env.checkpoint(new CheckpointConfig().setForce(true));
        + *
        + *     // Start backup, find out what needs to be copied.
        + *     backupHelper.startBackup();
        + *     try {
        + *         // Copy the necessary files to archival storage.
        + *         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
        + *         myCopyFiles(env, backupHelper, filesToCopy, destDir);
        + *
        + *         // Delete files that are no longer needed.
        + *         // WARNING: This should only be done after copying all new files.
        + *         String[] filesInSnapshot = backupHelper.getLogFilesInSnapshot();
        + *         myDeleteUnusedFiles(destDir, filesInSnapshot);
        + *
        + *         // Update knowledge of last file saved in the backup set.
        + *         lastFileInPrevBackup = backupHelper.getLastFileInBackupSet();
        + *         // Save lastFileInPrevBackup persistently here ...
        + *     } finally {
        + *         // Remember to exit backup mode, or the JE cleaner cannot delete
        + *         // log files and disk usage will grow without bounds.
        + *        backupHelper.endBackup();
        + *     }
        + * }
        + *
        + * void myDeleteUnusedFiles(File destDir, String[] filesInSnapshot) {
        + *     // For each file in destDir that is NOT in filesInSnapshot, it should
        + *     // be deleted from destDir to save disk space in the backup set, and to
        + *     // ensure that unused files will not be restored.
        + * }
        + *
        + * See myCopyFiles further above.
        + * 
        + *

        + * When performing backups as shown in the first example above, to restore you + * can simply copy all files from the backup set into an empty directory. + *

        + * In the second example below, the list of current files is saved with the + * backup set so it can be used during a restore. The backup set will + * effectively hold multiple backups that can be used to restore to different + * points in time. + *

        + * void myBackup(Environment env, File destDir) {
        + *
        + *     // Get the file number of the last file in the previous backup.
        + *     long lastFileInPrevBackup =  ...
        + *
        + *     DbBackup backupHelper = new DbBackup(env, lastFileInPrevBackup);
        + *
        + *     // Optional: Do a checkpoint to reduce recovery time after a restore.
        + *     env.checkpoint(new CheckpointConfig().setForce(true));
        + *
        + *     // Start backup, find out what needs to be copied.
        + *     backupHelper.startBackup();
        + *     try {
        + *         // Copy the necessary files to archival storage.
        + *         String[] filesToCopy = backupHelper.getLogFilesInBackupSet();
        + *         myCopyFiles(env, backupHelper, filesToCopy, destDir);
        + *
        + *         // Save current list of files with backup data set.
        + *         String[] filesInSnapshot = backupHelper.getLogFilesInSnapshot();
        + *         // Save filesInSnapshot persistently here ...
        + *
        + *         // Update knowledge of last file saved in the backup set.
        + *         lastFileInPrevBackup = backupHelper.getLastFileInBackupSet();
        + *         // Save lastFileInPrevBackup persistently here ...
        + *     } finally {
        + *         // Remember to exit backup mode, or the JE cleaner cannot delete
        + *         // log files and disk usage will grow without bounds.
        + *        backupHelper.endBackup();
        + *     }
        + * }
        + *
        + * See myCopyFiles further above.
        + * 
        + *

        + * When performing backups as shown in the second example above, to restore you + * must choose one of the file lists that was saved. You may choose the list + * written by the most recent backup, or a list written by an earlier backup. + * To restore, the files in the list should be copied into an empty destination + * directory. + *

        + * Restoring from a backup + *

        + * As described in the sections above, the restore procedure is to copy the + * files from a backup set into an empty directory. Depending on the type of + * backup that was performed (see above), either all files from the backup set + * are copied, or only the files on a list that was created during the backup. + *

        + * There is one additional consideration when performing a restore, under the + * following condition: + *

          + *
        • Incremental backups are used, AND + *
            + *
          • the backup was created using DbBackup with JE 6.2 or earlier, + * OR
          • + *
          • the backup was created in a read-only JE environment.
          • + *
          + *
        • + *
        + *

        + * If the above condition holds, after copying the files an additional step is + * needed. To enable the creation of future incremental backups using the + * restored files, the {@link + * com.sleepycat.je.EnvironmentConfig#ENV_RECOVERY_FORCE_NEW_FILE} parameter + * should be set to true when opening the JE Environment for the first time + * after the restore. When this parameter is set to true, the last .jdb file + * restored will not be modified when opening the Environment, and the next + * .jdb file will be created and will become the end-of-log file. + *

        + * WARNING: When the above special condition is true and this property is + * not set to true when opening the environment for the first time + * after a restore, then the backup set that was restored may not be used as + * the basis for future incremental backups. If a future incremental backup + * were performed based on this backup set, it would be incomplete and data + * would be lost if that incremental backup were restored. + *

        + * When JE 6.3 or later is used to create the backup, and the backup is created + * in a read-write environment (the usual case), this extra step is + * unnecessary. In this case, {@link #startBackup} will have added an + * "immutable file" marker to the last file in the backup and this will prevent + * that file from being modified, just as if the + * {@code ENV_RECOVERY_FORCE_NEW_FILE} parameter were set to true. + */ +public class DbBackup { + + private final EnvironmentImpl envImpl; + private final boolean envIsReadOnly; + private final long firstFileInBackup; + private long lastFileInBackup = -1; + private boolean backupStarted; + private boolean networkRestore; + private ProtectedActiveFileSet protectedFileSet; + private NavigableSet snapshotFiles; + /* Status presents whether this back up is invalid because of roll back. */ + private boolean invalid; + /* The rollback start file number. */ + private long rollbackStartedFileNumber; + /* For unit tests. */ + private TestHook testHook; + + /** + * Creates a DbBackup helper for a full backup. + * + *

        This is equivalent to using {@link #DbBackup(Environment,long)} and + * passing {@code -1} for the {@code lastFileInPrevBackup} parameter.

        + * + * @param env with an open, valid environment handle. If the environment + * directory has read/write permissions, the environment handle must be + * configured for read/write. + * + * @throws IllegalArgumentException if the environment directory has + * read/write permissions, but the environment handle is not configured for + * read/write. + */ + public DbBackup(Environment env) + throws DatabaseException { + + this(env, -1); + } + + /** + * Creates a DbBackup helper for an incremental backup. + * + * @param env with an open, valid environment handle. If the environment + * directory has read/write permissions, the environment handle must be + * configured for read/write. + * + * @param lastFileInPrevBackup the last file in the previous backup set + * when performing an incremental backup, or {@code -1} to perform a full + * backup. The first file in this backup set will be the file following + * {@code lastFileInPrevBackup}. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalArgumentException if the environment directory has + * read/write permissions, but the environment handle is not configured for + * read/write. + */ + public DbBackup(Environment env, long lastFileInPrevBackup) { + this(env, DbInternal.getNonNullEnvImpl(env), lastFileInPrevBackup); + } + + /** + * @hidden + * For internal use only. + */ + public DbBackup(EnvironmentImpl envImpl) { + this(null, envImpl, -1); + } + + /** + * This is the true body of the DbBackup constructor. The env param may be + * null when this class is used internally. + */ + private DbBackup(Environment env, + EnvironmentImpl envImpl, + long lastFileInPrevBackup) { + + /* Check that the Environment is open. */ + if (env != null) { + DbInternal.checkOpen(env); + } + + this.envImpl = envImpl; + + /* + * If the environment is writable, we need a r/w environment handle + * in order to flip the file. + */ + envIsReadOnly = envImpl.getFileManager().checkEnvHomePermissions(true); + if ((!envIsReadOnly) && envImpl.isReadOnly()) { + throw new IllegalArgumentException + ("Environment handle may not be read-only when directory " + + "is read-write"); + } + + firstFileInBackup = lastFileInPrevBackup + 1; + } + + /** + * Start backup mode in order to determine the definitive backup set needed + * at this point in time. + * + *

        This method determines the last file in the backup set, which is the + * last log file in the environment at this point in time. Following this + * method call, all new data will be written to other, new log files. In + * other words, the last file in the backup set will not be modified after + * this method returns.

        + * + *

        WARNING: After calling this method, deletion of log files in + * the backup set by the JE log cleaner will be disabled until {@link + * #endBackup()} is called. To prevent unbounded growth of disk usage, be + * sure to call {@link #endBackup()} to re-enable log file deletion. + * Additionally, the Environment can't be closed until endBackup() is + * called. + *

        + * + * @throws com.sleepycat.je.rep.LogOverwriteException if a replication + * operation is overwriting log files. The backup can not proceed because + * files may be invalid. The backup may be attempted at a later time. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if a backup is already in progress + */ + public synchronized void startBackup() + throws DatabaseException { + + if (backupStarted) { + throw new IllegalStateException("startBackup was already called"); + } + + /* Throw a LogOverwriteException if the Environment is rolling back. */ + if (!envImpl.addDbBackup(this)) { + throw envImpl.createLogOverwriteException + ("A replication operation is overwriting log files. The " + + "backup can not proceed because files may be invalid. The " + + "backup may be attempted at a later time."); + } + + final FileProtector fileProtector = envImpl.getFileProtector(); + + /* + * For a network restore we use a different backup name/id and also + * protect the 2 newest (highest numbered) reserved files. This is a + * safeguard to ensure that the restored node can function as a master. + * In a future release this approach may be improved by additionally + * copying all reserved files to the restored node, but only after it + * has recovered using the active files [#25783]. + */ + final long backupId = networkRestore ? + envImpl.getNodeSequence().getNextNetworkRestoreId() : + envImpl.getNodeSequence().getNextBackupId(); + + final String backupName = + (networkRestore ? + FileProtector.NETWORK_RESTORE_NAME : + FileProtector.BACKUP_NAME) + + "-" + backupId; + + final int nReservedFiles = networkRestore ? 2 : 0; + + /* + * Protect all files from deletion momentarily, while determining the + * last file and protecting the active files. + */ + final ProtectedFileRange allFilesProtected = + fileProtector.protectFileRange(backupName + "-init", 0); + + try { + if (envIsReadOnly) { + /* + * All files are currently immutable, so the backup list is the + * current set of files. However, we can't add a marker to the + * last file in list, and therefore it will not be immutable + * after being restored to a read-write directory (unless the + * user sets ENV_RECOVERY_FORCE_NEW_FILE after restoring). + */ + lastFileInBackup = envImpl.getFileManager().getLastFileNum(); + } else { + /* + * Flip the log so that all files in the backup list are + * immutable. But first, write an "immutable file" marker in + * the last file in the backup, so it cannot be modified after + * it is restored. Recovery enforces this rule. + */ + LogEntry marker = new SingleItemEntry<>( + LogEntryType.LOG_IMMUTABLE_FILE, new EmptyLogEntry()); + + long markerLsn = envImpl.getLogManager().log( + marker, ReplicationContext.NO_REPLICATE); + + envImpl.forceLogFileFlip(); + lastFileInBackup = DbLsn.getFileNumber(markerLsn); + } + + /* + * Protect all active files from deletion. This includes files + * prior to firstFileInBackup, in order to get a snapshot of all + * active files. New files do not need protection, since the backup + * set does not include them. lastFileInBackup will be protected by + * protectActiveFiles because it is not the last file in the env. + */ + protectedFileSet = fileProtector.protectActiveFiles( + backupName, nReservedFiles, false /*protectNewFiles*/); + + } finally { + fileProtector.removeFileProtection(allFilesProtected); + } + + /* At this point, endBackup must be called to undo file protection. */ + backupStarted = true; + + /* Files after lastFileInBackup do not need protection. */ + protectedFileSet.truncateTail(lastFileInBackup); + + /* Snapshot the complete backup file set. */ + snapshotFiles = protectedFileSet.getProtectedFiles(); + + /* + * Now that we have the snapshot, files before firstFileInBackup do not + * need protection. + */ + protectedFileSet.truncateHead(firstFileInBackup); + } + + /** + * End backup mode, thereby re-enabling normal deletion of log files by the + * JE log cleaner. + * + * @throws com.sleepycat.je.rep.LogOverwriteException if a replication + * operation has overwritten log files. Any copied files should be + * considered invalid and discarded. The backup may be attempted at a + * later time. + * + * @throws com.sleepycat.je.EnvironmentFailureException if an unexpected, + * internal or environment-wide failure occurs. + * + * @throws IllegalStateException if a backup has not been started. + */ + public synchronized void endBackup() { + checkBackupStarted(); + backupStarted = false; + + assert TestHookExecute.doHookIfSet(testHook); + + envImpl.getFileProtector().removeFileProtection(protectedFileSet); + + envImpl.removeDbBackup(this); + + /* If this back up is invalid, throw a LogOverwriteException. */ + if (invalid) { + invalid = false; + throw envImpl.createLogOverwriteException + ("A replication operation has overwritten log files from " + + "file " + rollbackStartedFileNumber + ". Any copied files " + + "should be considered invalid and discarded. The backup " + + "may be attempted at a later time."); + } + } + + /** + * Can only be called in backup mode, after startBackup() has been called. + * + * @return the file number of the last file in the current backup set. + * Save this value to reduce the number of files that must be copied at + * the next backup session. + * + * @throws IllegalStateException if a backup has not been started. + */ + public synchronized long getLastFileInBackupSet() { + checkBackupStarted(); + return lastFileInBackup; + } + + /** + * Get the minimum list of files that must be copied for this backup. When + * performing an incremental backup, this consists of the set of active + * files that are greater than the last file copied in the previous backup + * session. When performing a full backup, this consists of the set of all + * active files. Can only be called in backup mode, after startBackup() has + * been called. + * + *

        The file numbers returned are in the range from the constructor + * parameter {@code lastFileInPrevBackup + 1} to the last log file at the + * time that {@link #startBackup} was called.

        + * + * @return the names of all files to be copied, sorted in alphabetical + * order. The return values are generally simple file names, not full + * paths. However, if multiple data directories are being used (i.e. the + * {@link + * je.log.nDataDirectories} parameter is non-0), then the file names are + * prepended with the associated "dataNNN/" prefix, where "dataNNN/" is + * the data directory name within the environment home directory and "/" + * is the relevant file separator for the platform. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if a backup has not been started. + */ + public synchronized String[] getLogFilesInBackupSet() { + checkBackupStarted(); + return getFileNames(snapshotFiles.tailSet(firstFileInBackup, true)); + } + + /** + * Get the minimum list of files that must be copied for this backup. This + * consists of the set of active files that are greater than the last file + * copied in the previous backup session. Can only be called in backup + * mode, after startBackup() has been called. + * + * @param lastFileInPrevBackup file number of last file copied in the last + * backup session, obtained from getLastFileInBackupSet(). + * + * @return the names of all the files to be copied that come after + * lastFileInPrevBackup. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if a backup has not been started. + * + * @deprecated replaced by {@link #getLogFilesInBackupSet()}; pass + * lastFileInPrevBackup to the {@link #DbBackup(Environment,long)} + * constructor. + */ + @Deprecated + public synchronized String[] getLogFilesInBackupSet( + long lastFileInPrevBackup) { + + checkBackupStarted(); + + return getFileNames( + snapshotFiles.tailSet(lastFileInPrevBackup + 1, true)); + } + + /** + * Get the list of all active files that are needed for the environment at + * the point of time when backup mode started, i.e., the current snapshot. + * Can only be called in backup mode, after startBackup() has been called. + * + *

        When performing an incremental backup, this method is called to + * determine the files that would needed for a restore. As described in + * the examples at the top of this class, this list can be used to avoid + * unused files after a restore, and may also be used to reduce the size of + * the backup set.

        + * + *

        When performing a full backup this method is normally not needed, + * since in that case it returns the same set of files that is returned by + * {@link #getLogFilesInBackupSet()}.

        + * + * @return the names of all files in the snapshot, sorted in alphabetical + * order. The return values are generally simple file names, not full + * paths. However, if multiple data directories are being used (i.e. the + * {@link + * je.log.nDataDirectories} parameter is non-0), then the file names are + * prepended with the associated "dataNNN/" prefix, where "dataNNN/" is + * the data directory name within the environment home directory and "/" + * is the relevant file separator for the platform. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IllegalStateException if a backup has not been started. + */ + public synchronized String[] getLogFilesInSnapshot() { + checkBackupStarted(); + + return getFileNames(snapshotFiles); + } + + private String[] getFileNames(NavigableSet fileSet) { + + final FileManager fileManager = envImpl.getFileManager(); + final String[] names = new String[fileSet.size()]; + int i = 0; + + for (Long file : fileSet) { + names[i] = fileManager.getPartialFileName(file); + i += 1; + } + + return names; + } + + /** + * Removes protection for a file in the backup set. This method should be + * called after copying a file, so that it may be deleted to avoid + * exceeding disk usage limits. + * + * @param fileName a file name that has already been copied, in the format + * returned by {@link #getLogFilesInBackupSet} . + * + * @since 7.5 + */ + public synchronized void removeFileProtection(String fileName) { + checkBackupStarted(); + + protectedFileSet.removeFile( + envImpl.getFileManager().getNumFromName(fileName)); + } + + private void checkBackupStarted() { + if (!backupStarted) { + throw new IllegalStateException("startBackup was not called"); + } + } + + /** + * For internal use only. + * @hidden + * Returns true if a backup has been started and is in progress. + */ + public synchronized boolean backupIsOpen() { + return backupStarted; + } + + /** + * For internal use only. + * @hidden + * + * Invalidate this backup if replication overwrites the log. + */ + public void invalidate(long fileNumber) { + invalid = true; + this.rollbackStartedFileNumber = fileNumber; + } + + /** + * For internal use only. + * @hidden + * + * Marks this backup as a network restore. Causes the protected file set + * name/id to be set specially, and two reserved files to be included. + * See {@link #startBackup()}. + * + * Another approach (for future consideration) is to factor out the part + * of DbBackup that is needed by network restore into utility methods, + * so that special code for network restore can be removed from DbBackup. + * The shared portion should be just the startBackup code. + */ + public void setNetworkRestore() { + networkRestore = true; + } + + /** + * For internal use only. + * @hidden + * + * A test entry point used to simulate the environment is now rolling back, + * and this TestHook would invalidate the in progress DbBackups. + */ + public void setTestHook(TestHook testHook) { + this.testHook = testHook; + } +} diff --git a/src/com/sleepycat/je/util/DbCacheSize.java b/src/com/sleepycat/je/util/DbCacheSize.java new file mode 100644 index 0000000..48f4967 --- /dev/null +++ b/src/com/sleepycat/je/util/DbCacheSize.java @@ -0,0 +1,2222 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.math.BigInteger; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.PreloadStatus; +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbCacheSizeRepEnv; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Estimates the in-memory cache size needed to hold a specified data set. + * + * To get an estimate of the in-memory footprint for a given database, + * specify the number of records and database characteristics and DbCacheSize + * will return an estimate of the cache size required for holding the + * database in memory. Based on this information a JE main cache size can be + * chosen and then configured using {@link EnvironmentConfig#setCacheSize} or + * using the {@link EnvironmentConfig#MAX_MEMORY} property. An off-heap cache + * may also be optionally configured using {@link + * EnvironmentConfig#setOffHeapCacheSize} or using the {@link + * EnvironmentConfig#MAX_OFF_HEAP_MEMORY} property. + * + *

        Importance of the JE Cache

        + * + * The JE cache is not an optional cache. It is used to hold the metadata for + * accessing JE data. In fact the JE cache size is probably the most critical + * factor to JE performance, since Btree nodes will have to be fetched during a + * database read or write operation if they are not in cache. During a single + * read or write operation, at each level of the Btree that a fetch is + * necessary, an IO may be necessary at a different disk location for each + * fetch. In addition, if internal nodes (INs) are not in cache, then write + * operations will cause additional copies of the INs to be written to storage, + * as modified INs are moved out of the cache to make room for other parts of + * the Btree during subsequent operations. This additional fetching and + * writing means that sizing the cache too small to hold the INs will result in + * lower operation performance. + *

        + * For best performance, all Btree nodes should fit in the JE cache, including + * leaf nodes (LNs), which hold the record data, and INs, which hold record + * keys and other metadata. However, because system memory is limited, it is + * sometimes necessary to size the cache to hold all or at least most INs, but + * not the LNs. This utility estimates the size necessary to hold only INs, + * and the size to hold INs and LNs. + *

        + * In addition, a common problem with large caches is that Java GC overhead + * can become significant. When a Btree node is evicted from the JE main + * cache based on JE's LRU algorithm, typically the node will have been + * resident in the JVM heap for an extended period of time, and will be + * expensive to GC. Therefore, when most or all LNs do not fit in + * the main cache, using {@link CacheMode#EVICT_LN} can be beneficial to + * reduce the Java GC cost of collecting the LNs as they are moved out of the + * main cache. With EVICT_LN, the LNs only reside in the JVM heap for a short + * period and are cheap to collect. A recommended approach is to size the JE + * main cache to hold only INs, and size the Java heap to hold that amount plus + * the amount needed for GC working space and application objects, leaving + * any additional memory for use by the file system cache or the off-heap + * cache. Tests show this approach results in lower GC overhead and more + * predictable latency. + *

        + * Another issue is that 64-bit JVMs store object references using less space + * when the heap size is slightly less than 32GiB. When the heap size is 32GiB + * or more, object references are larger and less data can be cached per GiB of + * memory. This JVM feature is enabled with the + * Compressed Oops + * (-XX:+UseCompressedOops) option, although in modern JVMs it is + * on by default. Because of this factor, and because Java GC overhead is + * usually higher with larger heaps, a maximum heap size slightly less than + * 32GiB is recommended, along with Compressed Oops option. + *

        + * Of course, the JE main cache size must be less than the heap size since the + * main cache is stored in the heap. In fact, around 30% of free space should + * normally be reserved in the heap for use by Java GC, to avoid high GC + * overheads. For example, if the application uses roughly 2GiB of the heap, + * then with a 32GiB heap the JE main cache should normally be no more than + * 20GiB. + *

        + * As of JE 6.4, an optional off-heap cache may be configured in addition to + * the main JE cache. See {@link EnvironmentConfig#setOffHeapCacheSize} for + * information about the trade-offs in using an off-heap cache. When the + * {@code -offheap} argument is specified, this utility displays sizing + * information for both the main and off-heap caches. The portion of the data + * set that fits in the main cache, and the off-heap size needed to hold the + * rest of the data set, will be shown. The main cache size can be specified + * with the {@code -maincache} argument, or is implied to be the amount needed + * to hold all internal nodes if this argument is omitted. Omitting this + * argument is appropriate when {@link CacheMode#EVICT_LN} is used, since only + * internal nodes will be stored in the main cache. + *

        + * To reduce Java GC overhead, sometimes a small main cache is used along + * with an off-heap cache. Note that it is important that the size the main + * cache is at least large enough to hold all the upper INs (the INs at level + * 2 and above). This is because the off-heap cache does not contain upper + * INs, it only contains LNs and bottom internal nodes (BINs). When a level 2 + * IN is evicted from the main cache, its children (BINs and LNs) in the + * off-heap cache, if any, must also be evicted, which can be undesirable, + * especially if the off-heap cache is not full. This utility displays the + * main cache size needed to hold all upper INs, and displays a warning if + * this is smaller than the main cache size specified. + * + *

        Estimating the JE Cache Size

        + * + * Estimating JE in-memory sizes is not straightforward for several reasons. + * There is some fixed overhead for each Btree internal node, so fanout + * (maximum number of child entries per parent node) and degree of node + * sparseness impacts memory consumption. In addition, JE uses various compact + * in-memory representations that depend on key sizes, data sizes, key + * prefixing, how many child nodes are resident, etc. The physical proximity + * of node children also allows compaction of child physical address values. + *

        + * Therefore, when running this utility it is important to specify all {@link + * EnvironmentConfig} and {@link DatabaseConfig} settings that will be used in + * a production system. The {@link EnvironmentConfig} settings are specified + * by command line options for each property, using the same names as the + * {@link EnvironmentConfig} parameter name values. For example, {@link + * EnvironmentConfig#LOG_FILE_MAX}, which influences the amount of memory used + * to store physical record addresses, can be specified on the command line as: + *

        + * {@code -je.log.fileMax LENGTH} + *

        + * To be sure that this utility takes into account all relevant settings, + * especially as the utility is enhanced in future versions, it is best to + * specify all {@link EnvironmentConfig} settings used by the application. + *

        + * The {@link DatabaseConfig} settings are specified using command line options + * defined by this utility. + *

          + *
        • {@code -nodemax ENTRIES} corresponds to {@link + * DatabaseConfig#setNodeMaxEntries}.
        • + *
        • {@code -duplicates} corresponds to passing true to {@link + * DatabaseConfig#setSortedDuplicates}. Note that duplicates are configured + * for DPL MANY_TO_ONE and MANY_TO_MANY secondary indices.
        • + *
        • {@code -keyprefix LENGTH} corresponds to passing true {@link + * DatabaseConfig#setKeyPrefixing}. Note that key prefixing is always used + * when duplicates are configured.
        • + *
        + *

        + * This utility estimates the JE cache size by creating an in-memory + * Environment and Database. In addition to the size of the Database, the + * minimum overhead for the Environment is output. The Environment overhead + * shown is likely to be smaller than actually needed because it doesn't take + * into account use of memory by JE daemon threads (cleaner, checkpointer, etc) + * the memory used for locks that are held by application operations and + * transactions, the memory for HA network connections, etc. An additional + * amount should be added to account for these factors. + *

        + * This utility estimates the cache size for a single JE Database, or a logical + * table spread across multiple databases (as in the case of Oracle NoSQL DB, + * for example). To estimate the size for multiple databases/tables with + * different configuration parameters or different key and data sizes, run + * this utility for each database/table and sum the sizes. If you are summing + * multiple runs for multiple databases/tables that are opened in a single + * Environment, the overhead size for the Environment should only be added once. + *

        + * In some applications with databases/tables having variable key and data + * sizes, it may be difficult to determine the key and data size input + * parameters for this utility. If a representative data set can be created, + * one approach is to use the {@link DbPrintLog} utility with the {@code -S} + * option to find the average key and data size for all databases/tables, and + * use these values as input parameters, as if there were only a single + * database/tables. With this approach, it is important that the {@code + * DatabaseConfig} parameters are the same, or at least similar, for all + * databases/tables. + * + *

        Key Prefixing and Compaction

        + * + * Key prefixing deserves special consideration. It can significantly reduce + * the size of the cache and is generally recommended; however, the benefit can + * be difficult to predict. Key prefixing, in turn, impacts the benefits of + * key compaction, and the use of the {@link + * EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH} parameter. + *

        + * For a given data set, the impact of key prefixing is determined by how many + * leading bytes are in common for the keys in a single bottom internal node + * (BIN). For example, if keys are assigned sequentially as long (8 byte) + * integers, and the {@link DatabaseConfig#setNodeMaxEntries maximum entries + * per node} is 128 (the default value) then 6 or 7 of the 8 bytes of the key + * will have a common prefix in each BIN. Of course, when records are deleted, + * the number of prefixed bytes may be reduced because the range of key values + * in a BIN will be larger. For this example we will assume that, on average, + * 5 bytes in each BIN are a common prefix leaving 3 bytes per key that are + * unprefixed. + *

        + * Key compaction is applied when the number of unprefixed bytes is less than a + * configured value; see {@link EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH}. + * In the example, the 3 unprefixed bytes per key is less than the default used + * for key compaction (16 bytes). This means that each key will use 16 bytes + * of memory, in addition to the amount used for the prefix for each BIN. The + * per-key overhead could be reduced by changing the {@code + * TREE_COMPACT_MAX_KEY_LENGTH} parameter to a smaller value, but care should + * be taken to ensure the compaction will be effective as keys are inserted and + * deleted over time. + *

        + * Because key prefixing depends so much on the application key format and the + * way keys are assigned, the number of expected prefix bytes must be estimated + * by the user and specified to DbCacheSize using the {@code -keyprefix} + * argument. + * + *

        Key Prefixing and Duplicates

        + * + * When {@link DatabaseConfig#setSortedDuplicates duplicates} are configured + * for a Database (including DPL MANY_TO_ONE and MANY_TO_MANY secondary + * indices), key prefixing is always used. This is because the internal key in + * a duplicates database BIN is formed by concatenating the user-specified key + * and data. In secondary databases with duplicates configured, the data is + * the primary key, so the internal key is the concatenation of the secondary + * key and the primary key. + *

        + * Key prefixing is always used for duplicates databases because prefixing is + * necessary to store keys efficiently. When the number of duplicates per + * unique user-specified key is more than the number of entries per BIN, the + * entire user-specified key will be the common prefix. + *

        + * For example, a database that stores user information may use email address + * as the primary key and zip code as a secondary key. The secondary index + * database will be a duplicates database, and the internal key stored in the + * BINs will be a two part key containing zip code followed by email address. + * If on average there are more users per zip code than the number of entries + * in a BIN, then the key prefix will normally be at least as long as the zip + * code key. If there are less (more than one zip code appears in each BIN), + * then the prefix will be shorter than the zip code key. + *

        + * It is also possible for the key prefix to be larger than the secondary key. + * If for one secondary key value (one zip code) there are a large number of + * primary keys (email addresses), then a single BIN may contain concatenated + * keys that all have the same secondary key (same zip code) and have primary + * keys (email addresses) that all have some number of prefix bytes in common. + * Therefore, when duplicates are specified it is possible to specify a prefix + * size that is larger than the key size. + * + *

        Small Data Sizes and Embedded LNs

        + * + * Another special data representation involves small data sizes. When the + * data size of a record is less than or equal to {@link + * EnvironmentConfig#TREE_MAX_EMBEDDED_LN} (16 bytes, by default), the data + * is stored (embedded) in the BIN, and the LN is not stored in cache at all. + * This increases the size needed to hold all INs in cache, but it decreases + * the size needed to hold the complete data set. If the data size specified + * when running this utility is less than or equal to TREE_MAX_EMBEDDED_LN, + * the size displayed for holding INs only will be the same as the size + * displayed for holdings INs and LNs. + *

        + * See {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN} for information about + * the trade-offs in using the embedded LNs feature. + * + *

        Record Versions and Oracle NoSQL Database

        + * + * This note applies only to when JE is used with Oracle NoSQL DB. In Oracle + * NoSQL DB, an internal JE environment configuration parameter is always + * used: {@code -je.rep.preserveRecordVersion true}. This allows using record + * versions in operations such as "put if version", "delete if version", etc. + * This feature performs best when the cache is sized large enough to hold the + * record versions. + *

        + * When using JE with Oracle NoSQL DB, always add {@code + * -je.rep.preserveRecordVersion true} to the command line. This ensures that + * the cache sizes calculated are correct, and also outputs an additional line + * showing how much memory is required to hold the internal nodes and record + * versions (but not the leaf nodes). This is the minimum recommended size + * when the "... if version" operations are used. + * + *

        Running the DbCacheSize utility

        + * + * Usage: + *
        + * java { com.sleepycat.je.util.DbCacheSize |
        + *        -jar je-<version>.jar DbCacheSize }
        + *  -records COUNT
        + *      # Total records (key/data pairs); required
        + *  -key BYTES
        + *      # Average key bytes per record; required
        + *  [-data BYTES]
        + *      # Average data bytes per record; if omitted no leaf
        + *      # node sizes are included in the output; required with
        + *      # -duplicates, and specifies the primary key length
        + *  [-offheap]
        + *      # Indicates that an off-heap cache will be used.
        + *  [-maincache BYTES]
        + *      # The size of the main cache (in the JVM heap).
        + *      # The size of the off-heap cache displayed is the
        + *      # additional amount needed to hold the data set.
        + *      # If omitted, the main cache size is implied to
        + *      # be the amount needed to hold all internal nodes.
        + *      # Ignored if -offheap is not also specified.
        + *  [-keyprefix BYTES]
        + *      # Expected size of the prefix for the keys in each
        + *      # BIN; default: key prefixing is not configured;
        + *      # required with -duplicates
        + *  [-nodemax ENTRIES]
        + *      # Number of entries per Btree node; default: 128
        + *  [-orderedinsertion]
        + *      # Assume ordered insertions and no deletions, so BINs
        + *      # are 100% full; default: unordered insertions and/or
        + *      # deletions, BINs are 70% full
        + *  [-duplicates]
        + *      # Indicates that sorted duplicates are used, including
        + *      # MANY_TO_ONE and MANY_TO_MANY secondary indices;
        + *      # default: false
        + *  [-ttl]
        + *      # Indicates that TTL is used; default: false
        + *  [-replicated]
        + *      # Use a ReplicatedEnvironment; default: false
        + *  [-ENV_PARAM_NAME VALUE]...
        + *      # Any number of EnvironmentConfig parameters and
        + *      # ReplicationConfig parameters (if -replicated)
        + *  [-btreeinfo]
        + *      # Outputs additional Btree information
        + *  [-outputproperties]
        + *      # Writes Java properties file to System.out
        + * 
        + *

        + * You should run DbCacheSize on the same target platform and JVM for which you + * are sizing the cache, as cache sizes will vary. You may also need to + * specify -d32 or -d64 depending on your target, if the default JVM mode is + * not the same as the mode to be used in production. + *

        + * To take full advantage of JE cache memory, it is strongly recommended that + * compressed oops + * (-XX:+UseCompressedOops) is specified when a 64-bit JVM is used + * and the maximum heap size is less than 32 GB. As described in the + * referenced documentation, compressed oops is sometimes the default JVM mode + * even when it is not explicitly specified in the Java command. However, if + * compressed oops is desired then it must be explicitly specified in + * the Java command when running DbCacheSize or a JE application. If it is not + * explicitly specified then JE will not aware of it, even if it is the JVM + * default setting, and will not take it into account when calculating cache + * memory sizes. + *

        + * For example: + *

        + * $ java -jar je-X.Y.Z.jar DbCacheSize -records 554719 -key 16 -data 100
        + *
        + *  === Environment Cache Overhead ===
        + *
        + *  3,157,213 minimum bytes
        + *
        + * To account for JE daemon operation, record locks, HA network connections, etc,
        + * a larger amount is needed in practice.
        + *
        + *  === Database Cache Size ===
        + *
        + *  Number of Bytes  Description
        + *  ---------------  -----------
        + *       23,933,736  Internal nodes only
        + *      107,206,616  Internal nodes and leaf nodes
        + * 
        + *

        + * This indicates that the minimum memory size to hold only the internal nodes + * of the Database Btree is approximately 24MB. The maximum size to hold the + * entire database, both internal nodes and data records, is approximately + * 107MB. To either of these amounts, at least 3MB (plus more for locks and + * daemons) should be added to account for the environment overhead. + *

        + * The following example adds the use of an off-heap cache, where the main + * cache size is specified to be 30MB. + *

        + * $ java -jar je-X.Y.Z.jar DbCacheSize -records 554719 -key 16 -data 100 \
        + *      -offheap -maincache 30000000
        + *
        + *  === Environment Cache Overhead ===
        + *
        + *  5,205,309 minimum bytes
        + *
        + * To account for JE daemon operation, record locks, HA network connections, etc,
        + * a larger amount is needed in practice.
        + *
        + *  === Database Cache Size ===
        + *
        + *  Number of Bytes  Description
        + *  ---------------  -----------
        + *       23,933,736  Internal nodes only: MAIN cache
        + *                0  Internal nodes only: OFF-HEAP cache
        + *       24,794,691  Internal nodes and leaf nodes: MAIN cache
        + *       70,463,604  Internal nodes and leaf nodes: OFF-HEAP cache
        + * 
        + * There are several things of interest in the output. + *
          + *
        • The environment overhead is larger because of memory used for the + * off-heap LRU.
        • + *
        • To cache only internal nodes, an off-heap cache is not needed since + * the internal nodes take around 24MB, which when added to the 5MB + * overhead is less than the 30MB main cache specified. This is why the + * number of bytes on the second line is zero.
        • + *
        • To cache all nodes, the main cache size specified should be used + * (25MB added to the 5MB overhead is 30MB), and an off-heap cache of + * around 71MB should be configured.
        • + *
        + * + *

        Output Properties

        + * + *

        + * When {@code -outputproperties} is specified, a list of properties in Java + * properties file format will be written to System.out, instead of the output + * shown above. The properties and their meanings are listed below. + *

          + *
        • The following properties are always output (except allNodes, see + * below). They describe the estimated size of the main cache. + *
            + *
          • overhead: The environment overhead, as shown + * under Environment Cache Overhead above.
          • + *
          • internalNodes: The Btree size in the main + * cache for holding the internal nodes. This is the "Internal nodes + * only" line above (followed by "MAIN cache" when {@code -offheap} is + * specified).
          • + *
          • internalNodesAndVersions: The Btree size needed + * to hold the internal nodes and record versions in the main cache. + * This value is zero when {@code -offheap} is specified; currently JE + * does not cache record versions off-heap unless their associated LNs + * are also cached off-heap, so there is no way to calculate this + * property.
          • + *
          • allNodes: The Btree size in the main cache + * needed to hold all nodes. This is the "Internal nodes and leaf + * nodes" line above (followed by "MAIN cache" when {@code -offheap} is + * specified). This property is not output unless {@code -data} is + * specified.
          • + *
          + *
        • The following properties are output only when {@code -offheap} is + * specified. They describe the estimated size of the off-heap cache. + *
            + *
          • minMainCache: The minimum size of the main + * cache needed to hold all upper INs. When the {@code -maincache} + * value specified is less than this minimum, not all internal nodes + * can be cached. See the discussion further above.
          • + *
          • offHeapInternalNodes: The size of the off-heap + * cache needed to hold the internal nodes. This is the "Internal nodes + * only: OFF_HEAP cache" line above.
          • + *
          • offHeapAllNodes: The size of the off-heap cache + * needed to hold all nodes. This is the "Internal nodes and leaf + * nodes: OFF_HEAP cache" line above. This property is not output + * unless {@code -data} is specified.
          • + *
          + *
        • The following properties are deprecated but are output for + * compatibility with earlier releases. + *
            + *
          • minInternalNodes, maxInternalNodes, minAllNodes, and (when + * {@code -data} is specified) maxAllNodes
          • + *
          + *
        + * + * @see EnvironmentConfig#setCacheSize + * @see EnvironmentConfig#setOffHeapCacheSize + * @see CacheMode + * + * @see Cache Statistics: + * Sizing + */ +public class DbCacheSize { + + /* + * Undocumented command line options, used for comparing calculated to + * actual cache sizes during testing. + * + * [-measure] + * # Causes main program to write a database to find + * # the actual cache size; default: do not measure; + * # without -data, measures internal nodes only + * + * Only use -measure without -orderedinsertion when record count is 100k or + * less, to avoid endless attempts to find an unused key value via random + * number generation. Also note that measured amounts will be slightly + * less than calculated amounts because the number of prefix bytes is + * larger for smaller key values, which are sequential integers from zero + * to max records minus one. + */ + + private static final NumberFormat INT_FORMAT = + NumberFormat.getIntegerInstance(); + + private static final String MAIN_HEADER = + " Number of Bytes Description\n" + + " --------------- -----------"; + // 123456789012345678 + // 12 + private static final int MIN_COLUMN_WIDTH = 18; + private static final String COLUMN_SEPARATOR = " "; + + /* IN density for non-ordered insertion. */ + private static final int DEFAULT_DENSITY = 70; + /* IN density for ordered insertion. */ + private static final int ORDERED_DENSITY = 100; + + /* Parameters. */ + private final EnvironmentConfig envConfig = new EnvironmentConfig(); + private final Map repParams = new HashMap<>(); + private long records = 0; + private int keySize = 0; + private int dataSize = -1; + private boolean offHeapCache = false; + private boolean assumeEvictLN = false; + private long mainCacheSize = 0; + private long mainDataSize = 0; + private int nodeMaxEntries = 128; + private int binMaxEntries = -1; + private int keyPrefix = 0; + private boolean orderedInsertion = false; + private boolean duplicates = false; + private boolean replicated = false; + private boolean useTTL = false; + private boolean outputProperties = false; + private boolean doMeasure = false; + private boolean btreeInfo = false; + + /* Calculated values. */ + private long envOverhead; + private long uinWithTargets; + private long uinNoTargets; + private long uinOffHeapBINIds; + private long binNoLNsOrVLSNs; + private long binNoLNsWithVLSNs; + private long binWithLNsAndVLSNs; + private long binOffHeapWithLNIds; + private long binOffHeapNoLNIds; + private long binOffHeapLNs; + private long binOffHeapLNIds; + private long mainMinDataSize; + private long mainNoLNsOrVLSNs; + private long mainNoLNsWithVLSNs; + private long mainWithLNsAndVLSNs; + private long offHeapNoLNsOrVLSNs; + private long offHeapWithLNsAndVLSNs; + private long nMainBINsNoLNsOrVLSNs; + private long nMainBINsWithLNsAndVLSNs; + private long nMainLNsWithLNsAndVLSNs; + private long measuredMainNoLNsOrVLSNs; + private long measuredMainNoLNsWithVLSNs; + private long measuredMainWithLNsAndVLSNs; + private long measuredOffHeapNoLNsOrVLSNs; + private long measuredOffHeapWithLNsAndVLSNs; + private long preloadMainNoLNsOrVLSNs; + private long preloadMainNoLNsWithVLSNs; + private long preloadMainWithLNsAndVLSNs; + private int nodeAvg; + private int binAvg; + private int btreeLevels; + private long nBinNodes; + private long nUinNodes; + private long nLevel2Nodes; + + private File tempDir; + + DbCacheSize() { + } + + void parseArgs(String[] args) { + for (int i = 0; i < args.length; i += 1) { + String name = args[i]; + String val = null; + if (i < args.length - 1 && !args[i + 1].startsWith("-")) { + i += 1; + val = args[i]; + } + if (name.equals("-records")) { + if (val == null) { + usage("No value after -records"); + } + try { + records = Long.parseLong(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (records <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-key")) { + if (val == null) { + usage("No value after -key"); + } + try { + keySize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (keySize <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-data")) { + if (val == null) { + usage("No value after -data"); + } + try { + dataSize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (dataSize < 0) { + usage(val + " is not a non-negative integer"); + } + } else if (name.equals("-offheap")) { + if (val != null) { + usage("No value allowed after " + name); + } + offHeapCache = true; + } else if (name.equals("-maincache")) { + if (val == null) { + usage("No value after -maincache"); + } + try { + mainCacheSize = Long.parseLong(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (mainCacheSize <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-keyprefix")) { + if (val == null) { + usage("No value after -keyprefix"); + } + try { + keyPrefix = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (keyPrefix < 0) { + usage(val + " is not a non-negative integer"); + } + } else if (name.equals("-orderedinsertion")) { + if (val != null) { + usage("No value allowed after " + name); + } + orderedInsertion = true; + } else if (name.equals("-duplicates")) { + if (val != null) { + usage("No value allowed after " + name); + } + duplicates = true; + } else if (name.equals("-ttl")) { + if (val != null) { + usage("No value allowed after " + name); + } + useTTL = true; + } else if (name.equals("-replicated")) { + if (val != null) { + usage("No value allowed after " + name); + } + replicated = true; + } else if (name.equals("-nodemax")) { + if (val == null) { + usage("No value after -nodemax"); + } + try { + nodeMaxEntries = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (nodeMaxEntries <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-binmax")) { + if (val == null) { + usage("No value after -binmax"); + } + try { + binMaxEntries = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (binMaxEntries <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-density")) { + usage + ("-density is no longer supported, see -orderedinsertion"); + } else if (name.equals("-overhead")) { + usage("-overhead is no longer supported"); + } else if (name.startsWith("-je.")) { + if (val == null) { + usage("No value after " + name); + } + if (name.startsWith("-je.rep.")) { + repParams.put(name.substring(1), val); + } else { + envConfig.setConfigParam(name.substring(1), val); + } + } else if (name.equals("-measure")) { + if (val != null) { + usage("No value allowed after " + name); + } + doMeasure = true; + } else if (name.equals("-outputproperties")) { + if (val != null) { + usage("No value allowed after " + name); + } + outputProperties = true; + } else if (name.equals("-btreeinfo")) { + if (val != null) { + usage("No value allowed after " + name); + } + btreeInfo = true; + } else { + usage("Unknown arg: " + name); + } + } + + if (records == 0) { + usage("-records not specified"); + } + if (keySize == 0) { + usage("-key not specified"); + } + } + + void cleanup() { + if (tempDir != null) { + emptyTempDir(); + tempDir.delete(); + } + } + + long getMainNoLNsOrVLSNs() { + return mainNoLNsOrVLSNs; + } + + long getMainNoLNsWithVLSNs() { + return mainNoLNsWithVLSNs; + } + + long getOffHeapWithLNsAndVLSNs() { + return offHeapWithLNsAndVLSNs; + } + + long getOffHeapNoLNsOrVLSNs() { + return offHeapNoLNsOrVLSNs; + } + + long getMainWithLNsAndVLSNs() { + return mainWithLNsAndVLSNs; + } + + long getMeasuredMainNoLNsOrVLSNs() { + return measuredMainNoLNsOrVLSNs; + } + + long getMeasuredMainNoLNsWithVLSNs() { + return measuredMainNoLNsWithVLSNs; + } + + long getMeasuredMainWithLNsAndVLSNs() { + return measuredMainWithLNsAndVLSNs; + } + + long getMeasuredOffHeapNoLNsOrVLSNs() { + return measuredOffHeapNoLNsOrVLSNs; + } + + long getMeasuredOffHeapWithLNsAndVLSNs() { + return measuredOffHeapWithLNsAndVLSNs; + } + + long getPreloadMainNoLNsOrVLSNs() { + return preloadMainNoLNsOrVLSNs; + } + + long getPreloadMainNoLNsWithVLSNs() { + return preloadMainNoLNsWithVLSNs; + } + + long getPreloadMainWithLNsAndVLSNs() { + return preloadMainWithLNsAndVLSNs; + } + + /** + * Runs DbCacheSize as a command line utility. + * For command usage, see {@link DbCacheSize class description}. + */ + public static void main(final String[] args) + throws Throwable { + + final DbCacheSize dbCacheSize = new DbCacheSize(); + try { + dbCacheSize.parseArgs(args); + dbCacheSize.calculateCacheSizes(); + if (dbCacheSize.outputProperties) { + dbCacheSize.printProperties(System.out); + } else { + dbCacheSize.printCacheSizes(System.out); + } + if (dbCacheSize.doMeasure) { + dbCacheSize.measure(System.out); + } + } finally { + dbCacheSize.cleanup(); + } + } + + /** + * Prints usage and calls System.exit. + */ + private static void usage(final String msg) { + + if (msg != null) { + System.out.println(msg); + } + + System.out.println + ("usage:" + + "\njava " + CmdUtil.getJavaCommand(DbCacheSize.class) + + "\n -records " + + "\n # Total records (key/data pairs); required" + + "\n -key " + + "\n # Average key bytes per record; required" + + "\n [-data ]" + + "\n # Average data bytes per record; if omitted no leaf" + + "\n # node sizes are included in the output; required with" + + "\n # -duplicates, and specifies the primary key length" + + "\n [-offheap]" + + "\n # Indicates that an off-heap cache will be used." + + "\n [-maincache ]" + + "\n # The size of the main cache (in the JVM heap)." + + "\n # The size of the off-heap cache displayed is the" + + "\n # additional amount needed to hold the data set." + + "\n # If omitted, the main cache size is implied to" + + "\n # be the amount needed to hold all internal nodes." + + "\n # Ignored if -offheap is not also specified." + + "\n [-keyprefix ]" + + "\n # Expected size of the prefix for the keys in each" + + "\n # BIN; default: zero, key prefixing is not configured;" + + "\n # required with -duplicates" + + "\n [-nodemax ]" + + "\n # Number of entries per Btree node; default: 128" + + "\n [-orderedinsertion]" + + "\n # Assume ordered insertions and no deletions, so BINs" + + "\n # are 100% full; default: unordered insertions and/or" + + "\n # deletions, BINs are 70% full" + + "\n [-duplicates]" + + "\n # Indicates that sorted duplicates are used, including" + + "\n # MANY_TO_ONE and MANY_TO_MANY secondary indices;" + + "\n # default: false" + + "\n [-ttl]" + + "\n # Indicates that TTL is used; default: false" + + "\n [-replicated]" + + "\n # Use a ReplicatedEnvironment; default: false" + + "\n [-ENV_PARAM_NAME VALUE]..." + + "\n # Any number of EnvironmentConfig parameters and" + + "\n # ReplicationConfig parameters (if -replicated)" + + "\n [-btreeinfo]" + + "\n # Outputs additional Btree information" + + "\n [-outputproperties]" + + "\n # Writes Java properties to System.out"); + + System.exit(2); + } + + /** + * Calculates estimated cache sizes. + */ + void calculateCacheSizes() { + + if (binMaxEntries <= 0) { + binMaxEntries = nodeMaxEntries; + } + + final Environment env = openCalcEnvironment(true); + boolean success = false; + try { + IN.ACCUMULATED_LIMIT = 0; + + envOverhead = env.getStats(null).getCacheTotalBytes(); + + if (offHeapCache) { + + assumeEvictLN = (mainCacheSize == 0); + + if (mainCacheSize > 0 && + mainCacheSize - envOverhead <= 1024 * 1024) { + + throw new IllegalArgumentException( + "The -maincache value must be at least 1 MiB larger" + + " than the environment overhead (" + + INT_FORMAT.format(envOverhead) + ')'); + } + } + + final int density = + orderedInsertion ? ORDERED_DENSITY : DEFAULT_DENSITY; + + nodeAvg = (nodeMaxEntries * density) / 100; + binAvg = (binMaxEntries * density) / 100; + + calcTreeSizes(env); + calcNNodes(); + calcMainCacheSizes(); + + /* + * With an off-heap cache, if all UINs don't fit in main then we + * can't fit all internal nodes, much less all nodes, in both + * caches. We adjust the number of records downward so all UINs do + * fit in main (there is no point in configuring a cache that can + * never be filled) and then recalculate the number of nodes. + */ + if (offHeapCache) { + + if (mainCacheSize == 0) { + mainCacheSize = mainNoLNsOrVLSNs + envOverhead; + } + + mainDataSize = mainCacheSize - envOverhead; + mainMinDataSize = calcLevel2AndAboveSize(); + + if (mainMinDataSize > mainDataSize) { + records *= ((double) mainDataSize) / mainMinDataSize; + calcNNodes(); + calcMainCacheSizes(); + } + + calcOffHeapNoLNsOrVLSNs(); + calcOffHeapWithLNsAndVLSNs(); + } + + success = true; + } finally { + + IN.ACCUMULATED_LIMIT = IN.ACCUMULATED_LIMIT_DEFAULT; + + /* + * Do not propagate exception thrown by Environment.close if + * another exception is currently in flight. + */ + try { + env.close(); + } catch (RuntimeException e) { + if (success) { + throw e; + } + } + } + } + + private long calcLevel2AndAboveSize() { + assert offHeapCache; + + return ((nUinNodes - nLevel2Nodes) * uinWithTargets) + + (nLevel2Nodes * (uinNoTargets + uinOffHeapBINIds)); + } + + private void calcNNodes() { + + nBinNodes = (records + binAvg - 1) / binAvg; + btreeLevels = 1; + nUinNodes = 0; + nLevel2Nodes = 0; + + for (long nodes = nBinNodes / nodeAvg;; nodes /= nodeAvg) { + + if (nodes == 0) { + nodes = 1; // root + } + + if (btreeLevels == 2) { + assert nLevel2Nodes == 0; + nLevel2Nodes = nodes; + } + + nUinNodes += nodes; + btreeLevels += 1; + + if (nodes == 1) { + break; + } + } + } + + /** + * Calculates main cache sizes as if there were no off-heap cache. During + * off-heap cache size calculations, these numbers may be revised. + */ + private void calcMainCacheSizes() { + + final long mainUINs = nUinNodes * uinWithTargets; + + mainNoLNsOrVLSNs = + (nBinNodes * binNoLNsOrVLSNs) + mainUINs; + + mainNoLNsWithVLSNs = + (nBinNodes * binNoLNsWithVLSNs) + mainUINs; + + mainWithLNsAndVLSNs = + (nBinNodes * binWithLNsAndVLSNs) + mainUINs; + } + + private void calcOffHeapNoLNsOrVLSNs() { + assert offHeapCache; + + mainNoLNsWithVLSNs = 0; + + /* + * If all INs fit in main, then no off-heap cache is needed. + */ + if (mainNoLNsOrVLSNs <= mainDataSize) { + offHeapNoLNsOrVLSNs = 0; + nMainBINsNoLNsOrVLSNs = nBinNodes; + return; + } + + mainNoLNsOrVLSNs = mainDataSize; + + /* + * If not all BINs fit in main, then put as many BINs in main as + * possible, and the rest off-heap. + */ + final long mainSpare = (mainDataSize > calcLevel2AndAboveSize()) ? + (mainDataSize - calcLevel2AndAboveSize()) : 0; + + final long nMainBINs = mainSpare / binNoLNsOrVLSNs; + final long nOffHeapBins = nBinNodes - nMainBINs; + + offHeapNoLNsOrVLSNs = nOffHeapBins * binOffHeapNoLNIds; + nMainBINsNoLNsOrVLSNs = nMainBINs; + } + + private void calcOffHeapWithLNsAndVLSNs() { + assert offHeapCache; + + /* + * If everything fits in main, then no off-heap cache is needed. + */ + if (mainWithLNsAndVLSNs <= mainDataSize) { + offHeapWithLNsAndVLSNs = 0; + nMainBINsWithLNsAndVLSNs = nBinNodes; + nMainLNsWithLNsAndVLSNs = (binOffHeapLNs == 0) ? 0 : records; + return; + } + + mainWithLNsAndVLSNs = mainDataSize; + + /* + * If LNs are not stored separately (they are embedded or duplicates + * are configured), then only internal nodes are relevant. + */ + if (binOffHeapLNs == 0) { + offHeapWithLNsAndVLSNs = offHeapNoLNsOrVLSNs; + nMainBINsWithLNsAndVLSNs = nMainBINsNoLNsOrVLSNs; + nMainLNsWithLNsAndVLSNs = 0; + return; + } + + /* + * If all BINs fit in main, then compute how many BINs will have main + * LNs and how many off-heap LNs. The number that have main LNs is + * the amount of main cache to spare (if all BINs had off-heap LNs) + * divided by the added size required to hold the LNs in one BIN. + */ + final long mainWithOffHeapLNIds = + mainNoLNsOrVLSNs + (nBinNodes * binOffHeapLNIds); + + if (mainWithOffHeapLNIds <= mainDataSize) { + + final long mainSpare = (mainDataSize > mainNoLNsOrVLSNs) ? + (mainDataSize - mainNoLNsOrVLSNs) : 0; + + final long nBINsWithMainLNs = mainSpare / + (binWithLNsAndVLSNs - binNoLNsOrVLSNs); + + final long nBINsWithOffHeapLNs = nBinNodes - nBINsWithMainLNs; + + offHeapWithLNsAndVLSNs = nBINsWithOffHeapLNs * binOffHeapLNs; + nMainBINsWithLNsAndVLSNs = nMainBINsNoLNsOrVLSNs; + nMainLNsWithLNsAndVLSNs = nBINsWithMainLNs * nodeAvg; + return; + } + + /* + * If not all BINs fit in main, then put as many BINs in main as + * possible, and the rest off-heap. Put all LNs off-heap. + */ + final long mainSpare = (mainDataSize > calcLevel2AndAboveSize()) ? + (mainDataSize - calcLevel2AndAboveSize()) : 0; + + final long nMainBINs = mainSpare / (binNoLNsOrVLSNs + binOffHeapLNIds); + final long nOffHeapBins = nBinNodes - nMainBINs; + + offHeapWithLNsAndVLSNs = + (nOffHeapBins * binOffHeapWithLNIds) + + (nBinNodes * binOffHeapLNs); + + nMainBINsWithLNsAndVLSNs = nMainBINs; + nMainLNsWithLNsAndVLSNs = 0; + } + + private void calcTreeSizes(final Environment env) { + + if (nodeMaxEntries != binMaxEntries) { + throw new IllegalArgumentException( + "-binmax not currently supported because a per-BIN max is" + + " not implemented in the Btree, so we can't measure" + + " an actual BIN node with the given -binmax value"); + } + assert nodeAvg == binAvg; + + if (nodeAvg > 0xFFFF) { + throw new IllegalArgumentException( + "Entries per node (" + nodeAvg + ") is greater than 0xFFFF"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* + * Either a one or two byte key is used, depending on whether a single + * byte can hold the key for nodeAvg entries. + */ + final byte[] keyBytes = new byte[(nodeAvg <= 0xFF) ? 1 : 2]; + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + final WriteOptions options = new WriteOptions(); + if (useTTL) { + options.setTTL(30, TimeUnit.DAYS); + } + + /* Insert nodeAvg records into a single BIN. */ + final Database db = openDatabase(env, true); + for (int i = 0; i < nodeAvg; i += 1) { + + if (keyBytes.length == 1) { + keyBytes[0] = (byte) i; + } else { + assert keyBytes.length == 2; + keyBytes[0] = (byte) (i >> 8); + keyBytes[1] = (byte) i; + } + + setKeyData(keyBytes, keyPrefix, keyEntry, dataEntry); + + final OperationResult result = db.put( + null, keyEntry, dataEntry, + duplicates ? Put.NO_DUP_DATA : Put.NO_OVERWRITE, + options); + + if (result == null) { + throw new IllegalStateException(); + } + } + + /* Position a cursor at the first record to get the BIN. */ + final Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getFirst(keyEntry, dataEntry, null); + assert status == OperationStatus.SUCCESS; + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + bin.latchNoUpdateLRU(); + + /* + * Calculate BIN size including LNs. The recalcKeyPrefix and + * compactMemory methods are called to simulate normal operation. + * Normally prefixes are recalculated when a IN is split, and + * compactMemory is called after fetching a IN or evicting an LN. + */ + bin.recalcKeyPrefix(); + bin.compactMemory(); + binWithLNsAndVLSNs = bin.getInMemorySize(); + + /* + * Evict all LNs so we can calculate BIN size without LNs. This is + * simulated by calling partialEviction directly. + */ + if (offHeapCache) { + final long prevSize = getOffHeapCacheSize(envImpl); + + bin.partialEviction(); + + binOffHeapLNs = 0; + for (int i = 0; i < nodeAvg; i += 1) { + binOffHeapLNs += getOffHeapLNSize(bin, 0); + } + + assert getOffHeapCacheSize(envImpl) - prevSize == binOffHeapLNs; + + binOffHeapLNIds = bin.getOffHeapLNIdsMemorySize(); + + } else { + bin.partialEviction(); + + binOffHeapLNs = 0; + binOffHeapLNIds = 0; + } + + assert !bin.hasCachedChildren(); + + binNoLNsWithVLSNs = bin.getInMemorySize() - binOffHeapLNIds; + + /* + * Another variant is when VLSNs are cached, since they are evicted + * after the LNs in a separate step. This is simulated by calling + * partialEviction a second time. + */ + if (duplicates || !envImpl.getCacheVLSN()) { + assert bin.getVLSNCache().getMemorySize() == 0; + + } else { + assert bin.getVLSNCache().getMemorySize() > 0; + + bin.partialEviction(); + + if (dataSize <= bin.getEnv().getMaxEmbeddedLN()) { + assert bin.getVLSNCache().getMemorySize() > 0; + } else { + assert bin.getVLSNCache().getMemorySize() == 0; + } + } + + /* There are no LNs or VLSNs remaining. */ + binNoLNsOrVLSNs = bin.getInMemorySize() - binOffHeapLNIds; + + /* + * To calculate IN size, get parent/root IN and artificially fill the + * slots with nodeAvg entries. + */ + final IN in = DbInternal.getDbImpl(db). + getTree(). + getRootINLatchedExclusive(CacheMode.DEFAULT); + assert bin == in.getTarget(0); + + for (int i = 1; i < nodeAvg; i += 1) { + + final int result = in.insertEntry1( + bin, bin.getKey(i), null, bin.getLsn(i), + false/*blindInsertion*/); + + assert (result & IN.INSERT_SUCCESS) != 0; + assert i == (result & ~IN.INSERT_SUCCESS); + } + + in.recalcKeyPrefix(); + in.compactMemory(); + uinWithTargets = in.getInMemorySize(); + uinNoTargets = uinWithTargets - in.getTargets().calculateMemorySize(); + + if (offHeapCache) { + + in.releaseLatch(); + + long bytesFreed = envImpl.getEvictor().doTestEvict( + bin, Evictor.EvictionSource.CACHEMODE); + + assert bytesFreed > 0; + + in.latchNoUpdateLRU(); + + final int binId = in.getOffHeapBINId(0); + assert binId >= 0; + + binOffHeapWithLNIds = getOffHeapBINSize(in, 0); + + bytesFreed = envImpl.getOffHeapCache().stripLNs(in, 0); + + binOffHeapNoLNIds = getOffHeapBINSize(in, 0); + + assert bytesFreed == + binOffHeapLNs + (binOffHeapWithLNIds - binOffHeapNoLNIds); + + for (int i = 1; i < nodeAvg; i += 1) { + in.setOffHeapBINId(i, binId, false, false); + } + + uinOffHeapBINIds = in.getOffHeapBINIdsMemorySize(); + + /* Cleanup to avoid assertions during env close. */ + for (int i = 1; i < nodeAvg; i += 1) { + in.clearOffHeapBINId(i); + } + + in.releaseLatch(); + + } else { + binOffHeapWithLNIds = 0; + uinOffHeapBINIds = 0; + + bin.releaseLatch(); + in.releaseLatch(); + } + + db.close(); + } + + private long getMainDataSize(final Environment env) { + return DbInternal.getNonNullEnvImpl(env). + getMemoryBudget().getTreeMemoryUsage(); + } + + private long getOffHeapCacheSize(final EnvironmentImpl envImpl) { + assert offHeapCache; + return envImpl.getOffHeapCache().getAllocator().getUsedBytes(); + } + + private long getOffHeapLNSize(final BIN bin, final int i) { + assert offHeapCache; + + final OffHeapCache ohCache = bin.getEnv().getOffHeapCache(); + + final long memId = bin.getOffHeapLNId(i); + if (memId == 0) { + return 0; + } + + return ohCache.getAllocator().totalSize(memId); + } + + private long getOffHeapBINSize(final IN parent, final int i) { + assert offHeapCache; + + final OffHeapCache ohCache = parent.getEnv().getOffHeapCache(); + + final int lruId = parent.getOffHeapBINId(0); + assert lruId >= 0; + + final long memId = ohCache.getMemId(lruId); + assert memId != 0; + + return ohCache.getAllocator().totalSize(memId); + } + + private void setKeyData(final byte[] keyBytes, + final int keyOffset, + final DatabaseEntry keyEntry, + final DatabaseEntry dataEntry) { + final byte[] fullKey; + if (duplicates) { + fullKey = new byte[keySize + dataSize]; + } else { + fullKey = new byte[keySize]; + } + + if (keyPrefix + keyBytes.length > fullKey.length) { + throw new IllegalArgumentException( + "Key doesn't fit, allowedLen=" + fullKey.length + + " keyLen=" + keyBytes.length + " prefixLen=" + keyPrefix); + } + + System.arraycopy(keyBytes, 0, fullKey, keyOffset, keyBytes.length); + + final byte[] finalKey; + final byte[] finalData; + if (duplicates) { + finalKey = new byte[keySize]; + finalData = new byte[dataSize]; + System.arraycopy(fullKey, 0, finalKey, 0, keySize); + System.arraycopy(fullKey, keySize, finalData, 0, dataSize); + } else { + finalKey = fullKey; + finalData = new byte[Math.max(0, dataSize)]; + } + + keyEntry.setData(finalKey); + dataEntry.setData(finalData); + } + + /** + * Prints Java properties for information collected by calculateCacheSizes. + * Min/max sizes are output for compatibility with earlier versions; in the + * past, min and max were different values. + */ + private void printProperties(final PrintStream out) { + out.println("overhead=" + envOverhead); + out.println("internalNodes=" + mainNoLNsOrVLSNs); + out.println("internalNodesAndVersions=" + mainNoLNsWithVLSNs); + if (dataSize >= 0) { + out.println("allNodes=" + mainWithLNsAndVLSNs); + } + if (offHeapCache) { + out.println("minMainCache=" + (mainMinDataSize + envOverhead)); + out.println("offHeapInternalNodes=" + offHeapNoLNsOrVLSNs); + if (dataSize >= 0) { + out.println("offHeapAllNodes=" + offHeapWithLNsAndVLSNs); + } + } + out.println("# Following are deprecated"); + out.println("minInternalNodes=" + mainNoLNsOrVLSNs); + out.println("maxInternalNodes=" + mainNoLNsOrVLSNs); + if (dataSize >= 0) { + out.println("minAllNodes=" + mainWithLNsAndVLSNs); + out.println("maxAllNodes=" + mainWithLNsAndVLSNs); + } + } + + /** + * Prints information collected by calculateCacheSizes. + */ + void printCacheSizes(final PrintStream out) { + + final String mainSuffix = offHeapCache ? ": MAIN cache" : ""; + final String offHeapSuffix = ": OFF-HEAP cache"; + + out.println(); + out.println("=== Environment Cache Overhead ==="); + out.println(); + out.print(INT_FORMAT.format(envOverhead)); + out.println(" minimum bytes"); + out.println(); + out.println( + "To account for JE daemon operation, record locks, HA network " + + "connections, etc,"); + out.println("a larger amount is needed in practice."); + out.println(); + out.println("=== Database Cache Size ==="); + out.println(); + out.println(MAIN_HEADER); + + out.println(line( + mainNoLNsOrVLSNs, "Internal nodes only" + mainSuffix)); + + if (offHeapCache) { + out.println(line( + offHeapNoLNsOrVLSNs, "Internal nodes only" + offHeapSuffix)); + } + + if (dataSize >= 0) { + if (!offHeapCache && mainNoLNsWithVLSNs != mainNoLNsOrVLSNs) { + out.println(line( + mainNoLNsWithVLSNs, + "Internal nodes and record versions" + mainSuffix)); + } + + out.println(line( + mainWithLNsAndVLSNs, + "Internal nodes and leaf nodes" + mainSuffix)); + + if (offHeapCache) { + out.println(line( + offHeapWithLNsAndVLSNs, + "Internal nodes and leaf nodes" + offHeapSuffix)); + } + + if (mainNoLNsOrVLSNs == mainWithLNsAndVLSNs && + offHeapNoLNsOrVLSNs == offHeapWithLNsAndVLSNs){ + + if (duplicates) { + out.println( + "\nNote that leaf nodes do not use additional memory" + + " because the database is" + + "\nconfigured for duplicates. In addition, record" + + " versions are not applicable."); + } else { + out.println( + "\nNote that leaf nodes do not use additional memory" + + " because with a small" + + "\ndata size, the LNs are embedded in the BINs." + + " In addition, record versions" + + "\n(if configured) are always cached in this mode."); + } + + } + } else { + if (!duplicates) { + out.println("\nTo get leaf node sizing specify -data"); + } + } + + if (offHeapCache && mainMinDataSize > mainDataSize) { + out.println( + "\nWARNING: The information above applies to a data set of " + + INT_FORMAT.format(records) + " records," + + "\nnot the number of records specified, because the main" + + " cache size specified is " + + "\ntoo small to hold all upper INs. This prevents all" + + " internal nodes (or leaf" + + "\nnodes) from fitting into cache, and the data set was" + + " reduced accordingly. To" + + "\nfit all internal nodes in cache with the specified " + + " number of records, specify" + + "\na main cache size of at least " + + INT_FORMAT.format(mainMinDataSize + envOverhead) + " bytes."); + } + + if (btreeInfo) { + out.println(); + out.println("=== Calculated Btree Information ==="); + out.println(); + out.println(line(btreeLevels, "Btree levels")); + out.println(line(nUinNodes, "Upper internal nodes")); + out.println(line(nBinNodes, "Bottom internal nodes")); + + if (offHeapCache) { + out.println(); + out.println("--- BINs and LNs in Main Cache vs Off-heap ---"); + out.println(); + out.println(line( + nMainBINsNoLNsOrVLSNs, + "Internal nodes only, BINs" + mainSuffix)); + out.println(line( + nBinNodes - nMainBINsNoLNsOrVLSNs, + "Internal nodes only, BINs" + offHeapSuffix)); + out.println(line( + nMainBINsWithLNsAndVLSNs, + "Internal nodes and leaf nodes, BINs" + mainSuffix)); + out.println(line( + nBinNodes - nMainBINsWithLNsAndVLSNs, + "Internal nodes and leaf nodes, BINs" + offHeapSuffix)); + out.println(line( + nMainLNsWithLNsAndVLSNs, + "Internal nodes and leaf nodes, LNs" + mainSuffix)); + out.println(line( + records - nMainLNsWithLNsAndVLSNs, + "Internal nodes and leaf nodes, LNs" + offHeapSuffix)); + } + } + + out.println(); + out.println("For further information see the DbCacheSize javadoc."); + } + + private String line(final long num, final String comment) { + + final StringBuilder buf = new StringBuilder(100); + + column(buf, INT_FORMAT.format(num)); + buf.append(COLUMN_SEPARATOR); + buf.append(comment); + + return buf.toString(); + } + + private void column(final StringBuilder buf, final String str) { + + int start = buf.length(); + + while (buf.length() - start + str.length() < MIN_COLUMN_WIDTH) { + buf.append(' '); + } + + buf.append(str); + } + + /** + * For testing, insert the specified data set and initialize + * measuredMainNoLNsWithVLSNs and measuredMainWithLNsAndVLSNs. + */ + void measure(final PrintStream out) { + + Environment env = openMeasureEnvironment( + true /*createNew*/, false /*setMainSize*/); + try { + IN.ACCUMULATED_LIMIT = 0; + + Database db = openDatabase(env, true); + + if (out != null) { + out.println( + "Measuring with maximum cache size: " + + INT_FORMAT.format(env.getConfig().getCacheSize()) + + " and (for off-heap) main data size: " + + INT_FORMAT.format(mainDataSize)); + } + + insertRecords(out, env, db); + + if (offHeapCache) { + db.close(); + env.close(); + env = null; + env = openMeasureEnvironment( + false /*createNew*/, false /*setMainSize*/); + db = openDatabase(env, false); + + readRecords(out, env, db, false /*readData*/); + evictMainToDataSize(db, mainDataSize); + + measuredMainNoLNsOrVLSNs = getStats( + out, env, "After read keys only, evict main to size"); + + measuredOffHeapNoLNsOrVLSNs = + getOffHeapCacheSize(DbInternal.getNonNullEnvImpl(env)); + + readRecords(out, env, db, true /*readData*/); + evictMainToDataSize(db, mainDataSize); + + measuredMainWithLNsAndVLSNs = getStats( + out, env, "After read all, evict main to size"); + + measuredOffHeapWithLNsAndVLSNs = + getOffHeapCacheSize(DbInternal.getNonNullEnvImpl(env)); + + } else { + measuredMainWithLNsAndVLSNs = getStats( + out, env, "After insert"); + + trimLNs(db); + + measuredMainNoLNsWithVLSNs = getStats( + out, env, "After trimLNs"); + + trimVLSNs(db); + + measuredMainNoLNsOrVLSNs = getStats( + out, env, "After trimVLSNs"); + } + + db.close(); + env.close(); + env = null; + + env = openMeasureEnvironment( + false /*createNew*/, offHeapCache /*setMainSize*/); + db = openDatabase(env, false); + + PreloadStatus status = preloadRecords(out, db, false /*loadLNs*/); + + preloadMainNoLNsOrVLSNs = getStats( + out, env, + "Internal nodes only after preload (" + + status + ")"); + + if (assumeEvictLN) { + preloadMainWithLNsAndVLSNs = preloadMainNoLNsOrVLSNs; + } else { + status = preloadRecords(out, db, true /*loadLNs*/); + + preloadMainWithLNsAndVLSNs = getStats( + out, env, + "All nodes after preload (" + + status + ")"); + } + + if (!offHeapCache) { + trimLNs(db); + + preloadMainNoLNsWithVLSNs = getStats( + out, env, + "Internal nodes plus VLSNs after preload (" + + status + ")"); + } + + db.close(); + env.close(); + env = null; + + } finally { + + IN.ACCUMULATED_LIMIT = IN.ACCUMULATED_LIMIT_DEFAULT; + + /* + * Do not propagate exception thrown by Environment.close if + * another exception is currently in flight. + */ + if (env != null) { + try { + env.close(); + } catch (RuntimeException ignore) { + } + } + } + } + + private Environment openMeasureEnvironment(final boolean createNew, + final boolean setMainSize) { + + final EnvironmentConfig config = envConfig.clone(); + + if (setMainSize) { + config.setCacheSize(mainCacheSize); + + /* + * Normally the main cache size is left "unlimited", meaning that + * log buffers will be maximum sized (1 MB each). Here we limit the + * main cache size in order to use the off-heap cache. But with a + * smaller main cache, the log buffers will be smaller. Use maximum + * sized log buffers so we can compare totals with the case where + * we don't set the cache size. + */ + config.setConfigParam( + EnvironmentConfig.LOG_TOTAL_BUFFER_BYTES, + String.valueOf(3 << 20)); + } else { + config.setCachePercent(90); + } + + if (offHeapCache) { + config.setOffHeapCacheSize(1024 * 1024 * 1024); + } else { + config.setOffHeapCacheSize(0); + } + + return openEnvironment(config, createNew); + } + + private Environment openCalcEnvironment(final boolean createNew) { + + final EnvironmentConfig config = envConfig.clone(); + + if (offHeapCache) { + config.setOffHeapCacheSize(1024 * 1024 * 1024); + } else { + config.setOffHeapCacheSize(0); + } + + /* The amount of disk space needed is quite small. */ + config.setConfigParam( + EnvironmentConfig.FREE_DISK, String.valueOf(1L << 20)); + + return openEnvironment(config, createNew); + } + + private Environment openEnvironment(final EnvironmentConfig config, + final boolean createNew) { + mkTempDir(); + + if (createNew) { + emptyTempDir(); + } + + config.setTransactional(true); + config.setDurability(Durability.COMMIT_NO_SYNC); + config.setAllowCreate(createNew); + + /* Daemons interfere with cache size measurements. */ + config.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + config.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + config.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + config.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + config.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + + /* Evict in small chunks. */ + config.setConfigParam( + EnvironmentConfig.EVICTOR_EVICT_BYTES, "1024"); + + final Environment newEnv; + + if (replicated) { + try { + final Class repEnvClass = Class.forName + ("com.sleepycat.je.rep.utilint.DbCacheSizeRepEnv"); + final DbCacheSizeRepEnv repEnv = + (DbCacheSizeRepEnv) repEnvClass.newInstance(); + newEnv = repEnv.open(tempDir, config, repParams); + } catch (ClassNotFoundException | + InstantiationException | + IllegalAccessException e) { + throw new IllegalStateException(e); + } + } else { + if (!repParams.isEmpty()) { + throw new IllegalArgumentException( + "Cannot set replication params in a standalone " + + "environment. May add -replicated."); + } + newEnv = new Environment(tempDir, config); + } + + /* + * LSN compaction is typically effective (in a realistic data set) only + * when the file size fits in 3 bytes and sequential keys are written. + * Since a tiny data set is use for estimating, and a small data set + * for testing, we disable the compact representation when it is + * unlikely to be effective. + */ + final long fileSize = Integer.parseInt( + newEnv.getConfig().getConfigParam(EnvironmentConfig.LOG_FILE_MAX)); + + if ((fileSize > IN.MAX_FILE_OFFSET) || !orderedInsertion) { + IN.disableCompactLsns = true; + } + + /* + * Preallocate 1st chunk of LRU entries, so it is counted in env + * overhead. + */ + if (offHeapCache) { + DbInternal.getNonNullEnvImpl(newEnv). + getOffHeapCache().preallocateLRUEntries(); + } + + return newEnv; + } + + private void mkTempDir() { + if (tempDir == null) { + try { + tempDir = File.createTempFile("DbCacheSize", null); + } catch (IOException e) { + throw new IllegalStateException(e); + } + /* createTempFile creates a file, but we want a directory. */ + tempDir.delete(); + tempDir.mkdir(); + } + } + + private void emptyTempDir() { + if (tempDir == null) { + return; + } + final File[] children = tempDir.listFiles(); + if (children != null) { + for (File child : children) { + child.delete(); + } + } + } + + private Database openDatabase(final Environment env, + final boolean createNew) { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(createNew); + dbConfig.setExclusiveCreate(createNew); + dbConfig.setNodeMaxEntries(nodeMaxEntries); + dbConfig.setKeyPrefixing(keyPrefix > 0); + dbConfig.setSortedDuplicates(duplicates); + return env.openDatabase(null, "foo", dbConfig); + } + + /** + * Inserts records and ensures that no eviction occurs. LNs (and VLSNs) + * are left intact. + */ + private void insertRecords(final PrintStream out, + final Environment env, + final Database db) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + final int lastKey = (int) (records - 1); + final byte[] lastKeyBytes = BigInteger.valueOf(lastKey).toByteArray(); + final int maxKeyBytes = lastKeyBytes.length; + + final int keyOffset; + if (keyPrefix == 0) { + keyOffset = 0; + } else { + + /* + * Calculate prefix length for generated keys and adjust key offset + * to produce the desired prefix length. + */ + final int nodeAvg = orderedInsertion ? + nodeMaxEntries : + ((nodeMaxEntries * DEFAULT_DENSITY) / 100); + final int prevKey = lastKey - (nodeAvg * 2); + final byte[] prevKeyBytes = + padLeft(BigInteger.valueOf(prevKey).toByteArray(), + maxKeyBytes); + int calcPrefix = 0; + while (calcPrefix < lastKeyBytes.length && + calcPrefix < prevKeyBytes.length && + lastKeyBytes[calcPrefix] == prevKeyBytes[calcPrefix]) { + calcPrefix += 1; + } + keyOffset = keyPrefix - calcPrefix; + } + + /* Generate random keys. */ + List rndKeys = null; + if (!orderedInsertion) { + rndKeys = new ArrayList(lastKey + 1); + for (int i = 0; i <= lastKey; i += 1) { + rndKeys.add(i); + } + Collections.shuffle(rndKeys, new Random(123)); + } + + final WriteOptions options = new WriteOptions(); + if (useTTL) { + options.setTTL(30, TimeUnit.DAYS); + } + + final Transaction txn = env.beginTransaction(null, null); + final Cursor cursor = db.openCursor(txn, null); + boolean success = false; + try { + for (int i = 0; i <= lastKey; i += 1) { + final int keyVal = orderedInsertion ? i : rndKeys.get(i); + final byte[] keyBytes = padLeft( + BigInteger.valueOf(keyVal).toByteArray(), maxKeyBytes); + setKeyData(keyBytes, keyOffset, keyEntry, dataEntry); + + final OperationResult result = cursor.put( + keyEntry, dataEntry, + duplicates ? Put.NO_DUP_DATA : Put.NO_OVERWRITE, + options); + + if (result == null && !orderedInsertion) { + i -= 1; + continue; + } + if (result == null) { + throw new IllegalStateException("Could not insert"); + } + + if (i % 10000 == 0) { + checkForEviction(env, i); + if (out != null) { + out.print("."); + out.flush(); + } + } + } + success = true; + } finally { + cursor.close(); + if (success) { + txn.commit(); + } else { + txn.abort(); + } + } + + checkForEviction(env, lastKey); + + /* Checkpoint to speed recovery and reset the memory budget. */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* Let's be sure the memory budget is updated. */ + iterateBINs(db, new BINVisitor() { + @Override + public boolean visitBIN(final BIN bin) { + bin.updateMemoryBudget(); + return true; + } + }); + } + + /** + * Reads all keys, optionally reading the data. + */ + private void readRecords(final PrintStream out, + final Environment env, + final Database db, + final boolean readData) { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + if (!readData) { + dataEntry.setPartial(0, 0, true); + } + + final ReadOptions options = new ReadOptions(); + + if (assumeEvictLN) { + options.setCacheMode(CacheMode.EVICT_LN); + } + + try (final Cursor cursor = db.openCursor(null, null)) { + while (cursor.get(keyEntry, dataEntry, Get.NEXT, options) != + null) { + } + } + } + + + private void checkForEviction(Environment env, int recNum) { + final EnvironmentStats stats = env.getStats(null); + if (stats.getOffHeapNodesTargeted() > 0) { + getStats(System.out, env, "Out of off-heap cache"); + throw new IllegalStateException( + "*** Ran out of off-heap cache at record " + recNum + + " -- try increasing off-heap cache size ***"); + } + if (stats.getNNodesTargeted() > 0) { + getStats(System.out, env, "Out of main cache"); + throw new IllegalStateException( + "*** Ran out of main cache at record " + recNum + + " -- try increasing Java heap size ***"); + } + } + + private void trimLNs(final Database db) { + iterateBINs(db, new BINVisitor() { + @Override + public boolean visitBIN(final BIN bin) { + bin.evictLNs(); + bin.updateMemoryBudget(); + return true; + } + }); + } + + private void trimVLSNs(final Database db) { + iterateBINs(db, new BINVisitor() { + @Override + public boolean visitBIN(final BIN bin) { + bin.discardVLSNCache(); + bin.updateMemoryBudget(); + return true; + } + }); + } + + private void evictMainToDataSize(final Database db, + final long dataSize) { + + if (getMainDataSize(db.getEnvironment()) <= dataSize) { + return; + } + + boolean keepGoing = iterateBINs(db, new BINVisitor() { + @Override + public boolean visitBIN(final BIN bin) { + bin.evictLNs(); + bin.discardVLSNCache(); + bin.updateMemoryBudget(); + return getMainDataSize(db.getEnvironment()) > dataSize; + } + }); + + if (!keepGoing) { + return; + } + + final Evictor evictor = + DbInternal.getNonNullEnvImpl(db.getEnvironment()).getEvictor(); + + keepGoing = iterateBINs(db, new BINVisitor() { + @Override + public boolean visitBIN(final BIN bin) { + evictor.doTestEvict(bin, Evictor.EvictionSource.CACHEMODE); + return getMainDataSize(db.getEnvironment()) > dataSize; + } + }); + + assert !keepGoing; + } + + private interface BINVisitor { + boolean visitBIN(BIN bin); + } + + private boolean iterateBINs(final Database db, final BINVisitor visitor) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(0, 0, true); + + final Cursor c = db.openCursor(null, null); + BIN prevBin = null; + boolean keepGoing = true; + + while (keepGoing && + c.getNext(key, data, LockMode.READ_UNCOMMITTED) == + OperationStatus.SUCCESS) { + + final BIN bin = DbInternal.getCursorImpl(c).getBIN(); + + if (bin == prevBin) { + continue; + } + + if (prevBin != null) { + prevBin.latch(); + keepGoing = visitor.visitBIN(prevBin); + prevBin.releaseLatchIfOwner(); + } + + prevBin = bin; + } + + c.close(); + + if (keepGoing && prevBin != null) { + prevBin.latch(); + visitor.visitBIN(prevBin); + prevBin.releaseLatch(); + } + + return keepGoing; + } + + /** + * Pads the given array with zeros on the left, and returns an array of + * the given size. + */ + private byte[] padLeft(byte[] data, int size) { + assert data.length <= size; + if (data.length == size) { + return data; + } + final byte[] b = new byte[size]; + System.arraycopy(data, 0, b, size - data.length, data.length); + return b; + } + + /** + * Preloads the database. + */ + private PreloadStatus preloadRecords(final PrintStream out, + final Database db, + final boolean loadLNs) { + Thread thread = null; + if (out != null) { + thread = new Thread() { + @Override + public void run() { + while (true) { + try { + out.print("."); + out.flush(); + Thread.sleep(5 * 1000); + } catch (InterruptedException e) { + break; + } + } + } + }; + thread.start(); + } + final PreloadStats stats; + try { + stats = db.preload(new PreloadConfig().setLoadLNs(loadLNs)); + } finally { + if (thread != null) { + thread.interrupt(); + } + } + if (thread != null) { + try { + thread.join(); + } catch (InterruptedException e) { + throw new RuntimeExceptionWrapper(e); + } + } + + /* + * When preloading with an off-heap cache, the main cache will overflow + * a little by design. We evict here to bring it down below the + * maximum, and clear the stats so that the getStats method in this + * class doesn't complain about the eviction later on. + */ + final Environment env = db.getEnvironment(); + if (offHeapCache) { + env.evictMemory(); + env.getStats(StatsConfig.CLEAR); + } + + return stats.getStatus(); + } + + /** + * Returns the Btree size, and prints a few other stats for testing. + */ + private long getStats(final PrintStream out, + final Environment env, + final String msg) { + if (out != null) { + out.println(); + out.println(msg + ':'); + } + + final EnvironmentStats stats = env.getStats(null); + + final long dataSize = getMainDataSize(env); + + if (out != null) { + out.println( + "MainCache= " + INT_FORMAT.format(stats.getCacheTotalBytes()) + + " Data= " + INT_FORMAT.format(dataSize) + + " BINs= " + INT_FORMAT.format(stats.getNCachedBINs()) + + " UINs= " + INT_FORMAT.format(stats.getNCachedUpperINs()) + + " CacheMiss= " + INT_FORMAT.format(stats.getNCacheMiss()) + + " OffHeapCache= " + + INT_FORMAT.format(stats.getOffHeapTotalBytes()) + + " OhLNs= " + INT_FORMAT.format(stats.getOffHeapCachedLNs()) + + " OhBIN= " + INT_FORMAT.format(stats.getOffHeapCachedBINs()) + + " OhBINDeltas= " + + INT_FORMAT.format(stats.getOffHeapCachedBINDeltas())); + } + + if (stats.getNNodesTargeted() > 0) { + throw new IllegalStateException( + "*** All records did not fit in the cache ***"); + } + if (stats.getOffHeapNodesTargeted() > 0) { + throw new IllegalStateException( + "*** All records did not fit in the off-heap cache ***"); + } + return dataSize; + } +} diff --git a/src/com/sleepycat/je/util/DbDeleteReservedFiles.java b/src/com/sleepycat/je/util/DbDeleteReservedFiles.java new file mode 100644 index 0000000..63f141b --- /dev/null +++ b/src/com/sleepycat/je/util/DbDeleteReservedFiles.java @@ -0,0 +1,241 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.Pair; + +/** + * Command line utility used to delete reserved files explicitly, when + * attempting to recover from a disk-full condition. + * + *

        When using HA ({@link com.sleepycat.je.rep.ReplicatedEnvironment}), + * cleaned files are {@link EnvironmentStats#getReservedLogSize() reserved} + * and are not deleted until a disk limit is approached. Normally the + * {@link com.sleepycat.je.EnvironmentConfig#MAX_DISK} and + * {@link com.sleepycat.je.EnvironmentConfig#FREE_DISK} limits will + * cause the reserved files to be deleted automatically to prevent + * filling the disk. However, if these limits are both set to zero, or disk + * space is used outside of the JE environment, it is possible for the disk + * to become full. Manual recovery from this situation may require deleting + * the reserved files without opening the JE Environment using the + * application. This situation is not expected, but the {@code + * DbDeleteReservedFiles} utility provides a safeguard.

        + * + *

        Depending on the arguments given, the utility will either delete or list + * the oldest reserved files. The files deleted or listed are those that can + * be deleted in order to free the amount specified. Note that size deleted + * may be larger than the specified size, because only whole files can be + * deleted.

        + * + *
        + * java { com.sleepycat.je.util.DbDeleteReservedFiles |
        + *        -jar je-<version>.jar DbDeleteReservedFiles }
        + *   -h <dir>            # environment home directory
        + *   -s <size in MB>     # desired size to be freed in MB
        + *  [-l]                       # list reserved files/sizes, do not delete
        + *  [-V]                       # print JE version number
        + *
        + * + *

        When the application uses custom key comparators, be sure to add the + * jars or classes to the classpath that contain the application's comparator + * classes.

        + * + *

        This utility opens the JE Environment in read-only mode in order to + * determine which files are reserved. To speed up this process, specify + * a large Java heap size when running the utility; 32 GB is recommended.

        + */ +public class DbDeleteReservedFiles { + + private static long ONE_MB = 1L << 20; + + private static final String USAGE = + "usage: " + + CmdUtil.getJavaCommand(DbDeleteReservedFiles.class) + "\n" + + " -h # environment home directory\n" + + " -s # desired size to delete in MB\n" + + " [-l] # list files only, do not delete\n" + + " [-V] # print JE version number"; + + /* + * The instance methods in this class are currently only used for + * testing and are not public because we do not know of a use case for + * this class other as a command line utility. Also, the env must be + * closed in order to delete reserved files. But we could expose an API + * later if necessary. + */ + + public static void main(final String[] args) { + try { + final DbDeleteReservedFiles util = new DbDeleteReservedFiles(args); + final Pair> result = util.execute(); + util.printResult(result.first(), result.second()); + System.exit(0); + } catch (UsageException e) { + System.err.println(e.getMessage()); + System.exit(2); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + + private File envHome; + private long deleteMb; + private boolean list; + + DbDeleteReservedFiles(final String[] args) + throws UsageException { + + for (int i = 0; i < args.length; i += 1) { + final String name = args[i]; + String val = null; + + if (i < args.length - 1 && !args[i + 1].startsWith("-")) { + i += 1; + val = args[i]; + } + + switch (name) { + + case "-h": + if (val == null) { + throw usage("No value after -h"); + } + envHome = new File(val); + break; + + case "-s": + if (val == null) { + throw usage("No value after -s"); + } + try { + deleteMb = Long.parseLong(val); + } catch (NumberFormatException e) { + throw usage(val + " is not a number"); + } + if (deleteMb <= 0) { + throw usage(val + " is not a positive integer"); + } + break; + + case "-l": + list = true; + break; + } + } + + if (envHome == null) { + throw usage("-h is required"); + } + + if (deleteMb == 0) { + throw usage("-s is required"); + } + } + + Pair> execute() { + + final Environment env = new Environment( + envHome, + new EnvironmentConfig().setReadOnly(true)); + + final EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); + final FileManager fileManager = envImpl.getFileManager(); + + final SortedSet reservedFiles = + envImpl.getFileProtector().getReservedFileInfo().second(); + + final SortedMap filesToDelete = new TreeMap<>(); + long deleteBytes = 0; + + for (final Long fileNum : reservedFiles) { + + final File file = new File(fileManager.getFullFileName(fileNum)); + final long len = file.length(); + filesToDelete.put(file, len); + deleteBytes += len; + + if (deleteBytes / ONE_MB >= deleteMb) { + break; + } + } + + env.close(); + + if (!list) { + for (final File file : filesToDelete.keySet()) { + file.delete(); + } + } + + return new Pair<>(deleteBytes / ONE_MB, filesToDelete); + } + + private void printResult(final long size, + final SortedMap files) { + + final StringBuilder msg = new StringBuilder( + String.format("File Size (MB) %n")); + + for (final Map.Entry entry : files.entrySet()) { + final File file = entry.getKey(); + final long len = entry.getValue(); + msg.append(String.format( + "%s %,d %n", file.getName(), len / ONE_MB)); + } + + msg.append(String.format("Total size (MB): %,d %n", size)); + + if (list) { + msg.append("Files were NOT deleted."); + } else { + msg.append("Files were deleted."); + } + + System.out.println(msg); + } + + private static class UsageException extends Exception { + UsageException(final String msg) { + super(msg); + } + } + + private static UsageException usage(final String msg) { + + StringBuilder builder = new StringBuilder(); + + if (msg != null) { + builder.append(msg); + builder.append(String.format("%n")); + } + + builder.append(USAGE); + + return new UsageException(builder.toString()); + } +} diff --git a/src/com/sleepycat/je/util/DbDump.java b/src/com/sleepycat/je/util/DbDump.java new file mode 100644 index 0000000..9114c26 --- /dev/null +++ b/src/com/sleepycat/je/util/DbDump.java @@ -0,0 +1,473 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Iterator; +import java.util.List; +import java.util.logging.Level; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Dump the contents of a database. This utility may be used programmatically + * or from the command line. + * + *

        When using this utility as a command line program, and the + * application uses custom key comparators, be sure to add the jars or + * classes to the classpath that contain the application's comparator + * classes.

        + * + *
        + * java { com.sleepycat.je.util.DbDump |
        + *        -jar je-<version>.jar DbDump }
        + *   -h <dir>           # environment home directory
        + *  [-f <fileName>]     # output file, for non -rR dumps
        + *  [-l]                # list databases in the environment
        + *  [-p]                # output printable characters
        + *  [-r]                # salvage mode
        + *  [-R]                # aggressive salvage mode
        + *  [-d] <directory>    # directory for *.dump files (salvage mode)
        + *  [-s <databaseName>] # database to dump
        + *  [-v]                # verbose in salvage mode
        + *  [-V]                # print JE version number
        + *
        + * See {@link DbDump#main} for a full description of the + * command line arguments. + *

        + * To dump a database to a stream from code: + *

        + *    DbDump dump = new DbDump(env, databaseName, outputStream, boolean);
        + *    dump.dump();
        + * 
        + * + *

        + * Because a DATA=END marker is used to terminate the dump of + * each database, multiple databases can be dumped and loaded using a single + * stream. The {@link DbDump#dump} method leaves the stream positioned after + * the last line written and the {@link DbLoad#load} method leaves the stream + * positioned after the last line read.

        + */ +public class DbDump { + private static final int VERSION = 3; + + protected File envHome = null; + protected Environment env; + protected String dbName = null; + protected boolean formatUsingPrintable; + private boolean dupSort; + private String outputFileName = null; + protected String outputDirectory = null; + protected PrintStream outputFile = null; + protected boolean doScavengerRun = false; + protected boolean doAggressiveScavengerRun = false; + protected boolean verbose = false; + + private static final String usageString = + "usage: " + CmdUtil.getJavaCommand(DbDump.class) + "\n" + + " -h # environment home directory\n" + + " [-f ] # output file, for non -rR dumps\n" + + " [-l] # list databases in the environment\n" + + " [-p] # output printable characters\n" + + " [-r] # salvage mode\n" + + " [-R] # aggressive salvage mode\n" + + " [-d] # directory for *.dump files (salvage mode)\n" + + " [-s ] # database to dump\n" + + " [-v] # verbose in salvage mode\n" + + " [-V] # print JE version number\n"; + + private DbDump() { + } + + /** + * @deprecated Please use the 4-arg ctor without outputDirectory instead. + */ + @Deprecated + public DbDump(Environment env, + String dbName, + PrintStream outputFile, + String outputDirectory, + boolean formatUsingPrintable) { + init(env, dbName, outputFile, formatUsingPrintable); + } + + /** + * Create a DbDump object for a specific environment and database. + * + * @param env The Environment containing the database to dump. + * @param dbName The name of the database to dump. + * @param outputFile The output stream to dump the database to. + * @param formatUsingPrintable true if the dump should use printable + * characters. + */ + public DbDump(Environment env, + String dbName, + PrintStream outputFile, + boolean formatUsingPrintable) { + init(env, dbName, outputFile, formatUsingPrintable); + } + + private void init(Environment env, + String dbName, + PrintStream outputFile, + boolean formatUsingPrintable) { + this.envHome = env.getHome(); + this.env = env; + this.dbName = dbName; + this.outputFile = outputFile; + this.formatUsingPrintable = formatUsingPrintable; + } + + /** + * The main used by the DbDump utility. + * + * @param argv The arguments accepted by the DbDump utility. + * + *
        +     * usage: java { com.sleepycat.je.util.DbDump | -jar
        +     * je-<version>.jar DbDump }
        +     *             [-f output-file] [-l] [-p] [-V]
        +     *             [-s database] -h dbEnvHome [-rR] [-v]
        +     *             [-d directory]
        +     * 
        + * + *
        + *
        + * -f - the file to dump to. If omitted, output is to System.out. + * Does not apply when -r or -R is used. + *
        + * -l - list the databases in the environment. + *
        + * -p - output printable characters. + *
        If characters in either the key or data items are printing + * characters (as defined by isprint(3)), use printing characters in file + * to represent them. This option permits users to use standard text + * editors and tools to modify the contents of databases.
        + *
        + * -V - display the version of the JE library. + *
        + * -s database - the database to dump. Does not apply when -r or -R is + * used. + *
        + * -h dbEnvHome - the directory containing the database environment. + *
        + * -d directory - the output directory for *.dump files. Applies only when + * -r or -R is used. + *
        + * -v - print progress information to stdout for -r or -R mode. + *
        + * -r - Salvage data from possibly corrupt data files. + *
        + * The records for all databases are output. The records for each database + * are saved into <databaseName>.dump files in the current directory. + *

        + * This option recreates the Btree structure in memory, so as large a heap + * size as possible should be specified. If -r cannot be used due to + * insufficient memory, use -R instead. + *

        + * When used on uncorrupted data files, this option should return + * equivalent data to a normal dump, but most likely in a different order; + * in other words, it should output a transactionally correct data set. + * However, there is one exception where not all committed records will be + * output: + *

          + *
        • When a committed transaction spans more than one .jdb file, and + * the last file in this set of files has been deleted by the log + * cleaner but earlier files have not, records for that transaction + * that appear in the earlier files will not be output. This is because + * the Commit entry in the last file is missing, and DbDump believes + * that the transaction was not committed. Such missing output should + * be relatively rare. Note that records in deleted files will be + * output, because they were migrated forward by the log cleaner and + * are no longer associated with a transaction.
        • + *
        + *
        + *
        + * -R - Aggressively salvage data from a possibly corrupt file. + *
        + *

        + * The records for all databases are output. The records for each database + * are saved into <databaseName>.dump files in the current directory. + *

        + * Unlike -r, the -R option does not recreate the Btree structure in + * memory. However, it does use a bit set to track all committed + * transactions, so as large a heap size as possible should be specified. + *

        + * -R also differs from -r in that -R does not return a transactionally + * correct data set. This is because the Btree information is not + * reconstructed in memory. Therefore, data dumped in this fashion will + * almost certainly have to be edited by hand or other means before or + * after the data is reloaded. Be aware of the following abnormalities. + *

          + *
        • Deleted records are often output. An application specific + * technique should normally be used to correct for this.
        • + *
        • Multiple versions of the same record are sometimes output. When + * this happens, the more recent version of a record is output first. + * Therefore, the -n option should normally be used when running + * DbLoad.
        • + *
        • When a committed transaction spans more than one .jdb file, and + * the last file in this set of files has been deleted by the log + * cleaner but earlier files have not, records for that transaction + * that appear in the earlier files will not be output. This is because + * the Commit entry in the last file is missing, and DbDump believes + * that the transaction was not committed. Such missing output should + * be relatively rare. Note that records in deleted files will be + * output, because they were migrated forward by the log cleaner and + * are no longer associated with a transaction. (This abnormality also + * occurs with -r.)
        • + *
        + *
        + *
        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public static void main(String argv[]) + throws Exception { + + DbDump dumper = new DbDump(); + boolean listDbs = dumper.parseArgs(argv); + if (dumper.doScavengerRun) { + dumper.openEnv(false); + dumper = new DbScavenger(dumper.env, + dumper.outputDirectory, + dumper.formatUsingPrintable, + dumper.doAggressiveScavengerRun, + dumper.verbose); + ((DbScavenger) dumper).setDumpCorruptedBounds(true); + } + + if (listDbs) { + dumper.listDbs(); + System.exit(0); + } + + try { + dumper.dump(); + } catch (Throwable T) { + T.printStackTrace(); + } finally { + dumper.env.close(); + if (dumper.outputFile != null && + dumper.outputFile != System.out) { + dumper.outputFile.close(); + } + } + } + + private void listDbs() + throws EnvironmentNotFoundException, EnvironmentLockedException { + + openEnv(true); + + List dbNames = env.getDatabaseNames(); + Iterator iter = dbNames.iterator(); + while (iter.hasNext()) { + String name = iter.next(); + System.out.println(name); + } + } + + protected void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + protected boolean parseArgs(String argv[]) + throws IOException { + + int argc = 0; + int nArgs = argv.length; + boolean listDbs = false; + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-p")) { + formatUsingPrintable = true; + } else if (thisArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else if (thisArg.equals("-l")) { + listDbs = true; + } else if (thisArg.equals("-r")) { + doScavengerRun = true; + } else if (thisArg.equals("-R")) { + doScavengerRun = true; + doAggressiveScavengerRun = true; + } else if (thisArg.equals("-f")) { + if (argc < nArgs) { + outputFileName = argv[argc++]; + } else { + printUsage("-f requires an argument"); + } + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + String envDir = argv[argc++]; + envHome = new File(envDir); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-d")) { + if (argc < nArgs) { + outputDirectory = argv[argc++]; + } else { + printUsage("-d requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + dbName = argv[argc++]; + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-v")) { + verbose = true; + } else { + printUsage(thisArg + " is not a valid option."); + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + + if (!listDbs && + !doScavengerRun) { + if (dbName == null) { + printUsage("Must supply a database name if -l not supplied."); + } + } + + if (outputFileName == null) { + outputFile = System.out; + } else { + outputFile = new PrintStream(new FileOutputStream(outputFileName)); + } + + return listDbs; + } + + /* + * Begin DbDump API. From here on there should be no calls to printUsage, + * System.xxx.print, or System.exit. + */ + protected void openEnv(boolean doRecovery) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + if (env == null) { + EnvironmentConfig envConfiguration = new EnvironmentConfig(); + envConfiguration.setReadOnly(true); + /* Don't run recovery. */ + envConfiguration.setConfigParam + (EnvironmentParams.ENV_RECOVERY.getName(), + doRecovery ? "true" : "false"); + /* Even without recovery, scavenger needs comparators. */ + envConfiguration.setConfigParam + (EnvironmentParams.ENV_COMPARATORS_REQUIRED.getName(), "true"); + + env = new Environment(envHome, envConfiguration); + } + } + + /** + * Perform the dump. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws IOException in subclasses. + */ + public void dump() + throws EnvironmentNotFoundException, + EnvironmentLockedException, + DatabaseNotFoundException, + IOException { + + openEnv(true); + + LoggerUtils.envLogMsg(Level.INFO, DbInternal.getNonNullEnvImpl(env), + "DbDump.dump of " + dbName + " starting"); + + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + Database db; + try { + db = env.openDatabase(null, dbName, dbConfig); + } catch (DatabaseExistsException e) { + /* Should never happen, ExclusiveCreate is false. */ + throw EnvironmentFailureException.unexpectedException(e); + } + dupSort = db.getConfig().getSortedDuplicates(); + + printHeader(outputFile, dupSort, formatUsingPrintable); + + Cursor cursor = db.openCursor(null, null); + while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + dumpOne(outputFile, foundKey.getData(), formatUsingPrintable); + dumpOne(outputFile, foundData.getData(), formatUsingPrintable); + } + cursor.close(); + db.close(); + outputFile.println("DATA=END"); + + LoggerUtils.envLogMsg(Level.INFO, DbInternal.getNonNullEnvImpl(env), + "DbDump.dump of " + dbName + " ending"); + } + + protected void printHeader(PrintStream o, + boolean dupSort, + boolean formatUsingPrintable) { + o.println("VERSION=" + VERSION); + if (formatUsingPrintable) { + o.println("format=print"); + } else { + o.println("format=bytevalue"); + } + o.println("type=btree"); + o.println("dupsort=" + (dupSort ? "1" : "0")); + o.println("HEADER=END"); + } + + protected void dumpOne(PrintStream o, byte[] ba, + boolean formatUsingPrintable) { + StringBuilder sb = new StringBuilder(); + sb.append(' '); + CmdUtil.formatEntry(sb, ba, formatUsingPrintable); + o.println(sb.toString()); + } +} diff --git a/src/com/sleepycat/je/util/DbFilterStats.java b/src/com/sleepycat/je/util/DbFilterStats.java new file mode 100644 index 0000000..834fbc5 --- /dev/null +++ b/src/com/sleepycat/je/util/DbFilterStats.java @@ -0,0 +1,355 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.je.utilint.CmdUtil; + +/** + * Transform one or more je.stat.csv statistics files and + * write the output to stdout. A set of column names is used to + * specify the order and which columns are written to the output. + * The utility is used to create an output file that is easier to + * analyze by projecting and ordering only the data that is required. + * Each user specified column name will either be a exact match of a + * column in the file or a prefix match. In order to output the "time" + * and all "Op" group statistics a column list "time,Op" could be used. + * Multiple input files are processed in the order specified on the + * command line. Duplicate column headers are suppressed in the output + * when processing multiple input files. + * + */ + +public class DbFilterStats { + + private static final String USAGE = + "usage: " + CmdUtil.getJavaCommand(DbFilterStats.class) + "\n" + + " [-f ] # use file for projection list\n" + + " [-p \"\"] # use specified projection list\n" + + " [] # list of statistic file names"; + + private static final String DELIMITER = ","; + + private File projectionFile = null; + private String projectionArg = null; + private final List inputFiles = new ArrayList(); + + /* list of colunms/prefixes to project */ + private final List projList = new ArrayList(); + private String header = null; + private String[] fileColHeader = null; + private final StringBuffer rowBuf = new StringBuffer(); + /* used to save name/value from file */ + private final Map valMap = + new HashMap(); + private final Splitter tokenizer = new Splitter(','); + + /** + * The main used by the DbFilterStats utility. + * + * @param argv An array of command line arguments to the DbFilterStats + * utility. + * + *
        +     * usage: java { com.sleepycat.je.util.DbFilterStats | -jar
        +     * je.jar DbFilterStats }
        +     *  -f  <projection file>
        +     *  -p  <column projection list> A comma separated list of column
        +     *      names to project.
        +     *  <stat file> [<stat file>]
        +     * 
        + * + *

        At least one argument must be specified.

        + */ + public static void main(String argv[]) { + DbFilterStats dbf = new DbFilterStats(); + int retstatus = dbf.execute(argv) ? 0 : -1; + System.exit(retstatus); + } + + /** + * Performs the processing of the DbFilterStats command. + * + * @param argv DbFilterStats command arguments + * @return true if command is successful, otherwise false + */ + public boolean execute(String argv[]) { + boolean retcode = true; + + if (argv.length == 0) { + System.err.println(USAGE); + return retcode; + } + + try { + DbFilterStats dbf = new DbFilterStats(); + dbf.parseArgs(argv); + dbf.validateParams(); + dbf.processFiles(); + } catch (IllegalArgumentException e) { + retcode = false; + } + return retcode; + } + + private void processFiles() { + for (File f : inputFiles) { + processFile(f); + } + } + + /** + * processFile will form the list of columns based on the projection + * list. The column data to stdout. If the header has not been output + * or is different that what was previously written, the column header for + * the projected columns is written to stdout. + * + * @param statFile comma delimited file with a header + */ + private void processFile(File statFile) { + String row; + BufferedReader fr = null; + List outProj = null; + + try { + fr = new BufferedReader(new FileReader(statFile)); + while ((row = fr.readLine()) != null) { + String[] cols = parseRow(row, false); + if (outProj == null) { + /* form output projection list from header */ + outProj = new ArrayList(); + Map colNameMap = + new HashMap(); + for (String cname : cols) { + colNameMap.put(cname, cname); + } + + for (String projName : projList ) { + if (colNameMap.get(projName) != null) { + outProj.add(projName); + } else { + for (String colName : cols) { + if (colName.startsWith(projName)) { + outProj.add(colName); + } + } + } + } + + if (header == null || !header.equals(row)) { + /* output header row */ + outputRow(outProj); + header = row; + fileColHeader = cols; + } + } else { + if (cols.length != fileColHeader.length) { + printFatal("Invalid stat file " + + statFile.getAbsolutePath() + + " header/columns are not equal."); + } + /* put column name/value in map*/ + valMap.clear(); + for (int i = 0; i < cols.length; i++) { + valMap.put(fileColHeader[i], cols[i]); + } + /* form output row based on projection list */ + rowBuf.setLength(0); + for (String pname : outProj) { + if (rowBuf.length() != 0) { + rowBuf.append(DELIMITER); + } + String value = valMap.get(pname); + if (value != null) { + rowBuf.append(value); + } else { + rowBuf.append(" "); + } + } + System.out.println(rowBuf.toString()); + } + } + } catch (FileNotFoundException e) { + printFatal( + "Error occured accessing stat file " + + statFile.getAbsolutePath()); + } catch (IOException e) { + printFatal( + "IOException occured accessing stat file " + + statFile.getAbsolutePath() + " exception " + e); + } finally { + if (fr != null) { + try { + fr.close(); + } + catch (IOException e) { + /* eat exception */ + } + } + } + } + + private void outputRow(List cvals) { + rowBuf.setLength(0); + for (String val : cvals) { + if (rowBuf.length() != 0) { + rowBuf.append(DELIMITER); + } + rowBuf.append(val); + } + System.out.println(rowBuf.toString()); + } + + private void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + inputFiles.clear(); + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-f")) { + if (argc < nArgs) { + projectionFile = new File(argv[argc++]); + } else { + printUsage("-f requires an argument"); + } + } else if (thisArg.equals("-p")) { + if (argc < nArgs) { + projectionArg = argv[argc++]; + } else { + printUsage("-p requires an argument"); + } + } else { + inputFiles.add(new File(thisArg)); + } + } + } + + private void validateParams() { + projList.clear(); + if (inputFiles.size() == 0) { + printUsage("requires statistic file argument"); + } + + for (File f : inputFiles) { + if (!f.exists()) { + printFatal("Specified stat file " + f.getAbsolutePath() + + " does not exist."); + } + if (f.isDirectory()) { + printFatal("Specified stat file " + f.getAbsolutePath() + + " is not a file."); + } + } + + if (projectionFile == null && projectionArg == null) { + printUsage("requires either -p or -f argument"); + } + + /* add command line projections */ + if (projectionArg != null) { + addProjections(projectionArg); + } + + /* add projection file projections */ + if (projectionFile != null) { + if (!projectionFile.exists()) { + printFatal("Specified projection file " + + projectionFile.getAbsolutePath() + + " does not exist."); + } + if (projectionFile.isDirectory()) { + printFatal("Specified projection file " + + projectionFile.getAbsolutePath() + + " is not a file."); + } + formProjections(projectionFile); + } + } + + private void formProjections(File projFile) { + String row; + BufferedReader fr = null; + + try { + fr = new BufferedReader(new FileReader(projFile)); + row = fr.readLine(); + if (row == null) { + printFatal("Invalid projection file " + + projFile.getAbsolutePath()); + } + addProjections(row); + } catch (FileNotFoundException e) { + printFatal( + "Error occured accessing projection file " + + projFile.getAbsolutePath()); + } catch (IOException e) { + printFatal( + "IOException occured accessing projection file " + + projFile.getAbsolutePath() + e); + } finally { + if (fr != null) { + try { + fr.close(); + } + catch (IOException e) { + /* eat exception */ + } + } + } + } + + private String[] parseRow(String row, boolean trimIt) { + String [] vals = tokenizer.tokenize(row); + if (trimIt) { + for (int i = 0; i < vals.length; i++) { + vals[i] = vals[i].trim(); + } + } + return vals; + } + + private void addProjections(String collist) { + String[] names = parseRow(collist, true); + for (String name : names) { + if (name.length() == 0 ) { + printFatal("Projection list contained a empty entry."); + } + projList.add(name); + } + } + + private void printUsage(String msg) { + if (msg != null) { + System.err.println(msg); + } + System.err.println(USAGE); + throw new IllegalArgumentException(msg); + } + + private void printFatal(String msg) { + System.err.println(msg); + throw new IllegalArgumentException(msg); + } +} diff --git a/src/com/sleepycat/je/util/DbLoad.java b/src/com/sleepycat/je/util/DbLoad.java new file mode 100644 index 0000000..d2f5e50 --- /dev/null +++ b/src/com/sleepycat/je/util/DbLoad.java @@ -0,0 +1,646 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.Date; +import java.util.logging.Level; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Loads a database from a dump file generated by {@link DbDump}. + * This utility may be used programmatically or from the command line. + * + *

        When using this utility as a command line program, and the + * application uses custom key comparators, be sure to add the jars or + * classes to the classpath that contain the application's comparator + * classes.

        + * + *
        + * java { com.sleepycat.je.util.DbLoad |
        + *        -jar je-<version>.jar DbLoad }
        + *     -h <dir>            # environment home directory
        + *    [-f <fileName>]      # input file
        + *    [-n]                 # no overwrite mode
        + *    [-T]                 # input file is in text mode
        + *    [-I]                 # ignore unknown parameters
        + *    [-c name=value]      # config values
        + *    [-s <databaseName> ] # database to load
        + *    [-v]                 # show progress
        + *    [-V]                 # print JE version number
        + *
        + * See {@link DbLoad#main} for a full description of the + * command line arguments. + *

        + * To load a database to a stream from code: + *

        + *    DbLoad loader = new DbLoad();
        + *    loader.setEnv(env);
        + *    loader.setDbName(dbName);
        + *    loader.setInputStream(stream);
        + *    loader.setNoOverwrite(noOvrwr);
        + *    loader.setTextFileMode(tfm);
        + *    loader.load();
        + * 
        + * + *

        Because a DATA=END marker is used to terminate the dump of + * each database, multiple databases can be dumped and loaded using a single + * stream. The {@link DbDump#dump} method leaves the stream positioned after + * the last line written and the {@link DbLoad#load} method leaves the stream + * positioned after the last line read.

        + */ +public class DbLoad { + private static final boolean DEBUG = false; + + protected Environment env; + private boolean formatUsingPrintable; + private String dbName; + private BufferedReader reader; + private boolean noOverwrite; + private boolean textFileMode; + private boolean dupSort; + private boolean ignoreUnknownConfig; + private boolean commandLine; + private long progressInterval; + private long totalLoadBytes; + + private static final String usageString = + "usage: " + CmdUtil.getJavaCommand(DbLoad.class) + "\n" + + " -h # environment home directory\n" + + " [-f ] # input file\n" + + " [-n] # no overwrite mode\n" + + " [-T] # input file is in text mode\n" + + " [-I] # ignore unknown parameters\n" + + " [-c name=value] # config values\n" + + " [-s ] # database to load\n" + + " [-v] # show progress\n" + + " [-V] # print JE version number"; + + /** + * The main used by the DbLoad utility. + * + * @param argv The arguments accepted by the DbLoad utility. + * + *
        +     * usage: java { com.sleepycat.je.util.DbLoad | -jar
        +     * je-<version>.jar DbLoad }
        +     *             [-f input-file] [-n] [-V] [-v] [-T] [-I]
        +     *             [-c name=value]
        +     *             [-s database] -h dbEnvHome
        +     * 
        + * + *

        -f - the file to load from (in DbDump format)
        + * -n - no overwrite mode. Do not overwrite existing data.
        + * -V - display the version of the JE library.
        + * -T - input file is in Text mode.
        + * -I - ignore unknown parameters in the config file.

        + * + *

        If -f is not specified, the dump is read from System.in.

        + * + *

        The -T option allows JE applications to easily load text files into + * databases.

        + * + *

        The -I option allows loading databases that were dumped with the + * Berkeley DB C product, when the dump file contains parameters not known + * to JE.

        + * + *

        The input must be paired lines of text, where the first line of the + * pair is the key item, and the second line of the pair is its + * corresponding data item.

        + * + *

        A simple escape mechanism, where newline and backslash (\) characters + * are special, is applied to the text input. Newline characters are + * interpreted as record separators. Backslash characters in the text will + * be interpreted in one of two ways: If the backslash character precedes + * another backslash character, the pair will be interpreted as a literal + * backslash. If the backslash character precedes any other character, the + * two characters following the backslash will be interpreted as a + * hexadecimal specification of a single character; for example, \0a is a + * newline character in the ASCII character set.

        + * + *

        For this reason, any backslash or newline characters that naturally + * occur in the text input must be escaped to avoid misinterpretation by + * db_load.

        + * + *

        -c name=value - Specify configuration options ignoring any value they + * may have based on the input. The command-line format is name=value. See + * the Supported Keywords section below for a list of keywords supported by + * the -c option.

        + * + *

        -s database - the database to load.
        + * -h dbEnvHome - the directory containing the database environment.
        + * -v - report progress

        + * + *

        Supported Keywords
        + * version=N - specify the version of the input file. Currently only + * version 3 is supported.
        + * format - specify the format of the file. Allowable values are "print" + * and "bytevalue".
        + * dupsort - specify whether the database allows duplicates or not. + * Allowable values are "true" and "false".
        + * type - specifies the type of database. Only "btree" is allowed.
        + * database - specifies the name of the database to be loaded.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public static void main(String argv[]) + throws Exception { + + DbLoad loader = parseArgs(argv); + try { + loader.load(); + } catch (Throwable e) { + e.printStackTrace(); + } + + loader.env.close(); + } + + private static void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + private static DbLoad parseArgs(String argv[]) + throws Exception { + + boolean noOverwrite = false; + boolean textFileMode = false; + boolean ignoreUnknownConfig = false; + boolean showProgressInterval = false; + + int argc = 0; + int nArgs = argv.length; + String inputFileName = null; + File envHome = null; + String dbName = null; + long progressInterval = 0; + DbLoad ret = new DbLoad(); + ret.setCommandLine(true); + + while (argc < nArgs) { + String thisArg = argv[argc++].trim(); + if (thisArg.equals("-n")) { + noOverwrite = true; + } else if (thisArg.equals("-T")) { + textFileMode = true; + } else if (thisArg.equals("-I")) { + ignoreUnknownConfig = true; + } else if (thisArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else if (thisArg.equals("-f")) { + if (argc < nArgs) { + inputFileName = argv[argc++]; + } else { + printUsage("-f requires an argument"); + } + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + dbName = argv[argc++]; + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-c")) { + if (argc < nArgs) { + try { + ret.loadConfigLine(argv[argc++]); + } catch (IllegalArgumentException e) { + printUsage("-c: " + e.getMessage()); + } + } else { + printUsage("-c requires an argument"); + } + } else if (thisArg.equals("-v")) { + showProgressInterval = true; + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + + long totalLoadBytes = 0; + InputStream is; + if (inputFileName == null) { + is = System.in; + if (showProgressInterval) { + + /* + * Can't show progress if we don't know how big the stream + * is. + */ + printUsage("-v requires -f"); + } + } else { + is = new FileInputStream(inputFileName); + if (showProgressInterval) { + totalLoadBytes = ((FileInputStream) is).getChannel().size(); + /* Use 5% intervals. */ + progressInterval = totalLoadBytes / 20; + } + } + BufferedReader reader = new BufferedReader(new InputStreamReader(is)); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + ret.setEnv(env); + ret.setDbName(dbName); + ret.setInputReader(reader); + ret.setNoOverwrite(noOverwrite); + ret.setTextFileMode(textFileMode); + ret.setIgnoreUnknownConfig(ignoreUnknownConfig); + ret.setProgressInterval(progressInterval); + ret.setTotalLoadBytes(totalLoadBytes); + return ret; + } + + /* + * Begin DbLoad API. From here on there should be no calls to printUsage, + * System.xxx.print, or System.exit. + */ + + /** + * Creates a DbLoad object. + */ + public DbLoad() { + } + + /** + * If true, enables output of warning messages. Command line behavior is + * not available via the public API. + */ + private void setCommandLine(boolean commandLine) { + this.commandLine = commandLine; + } + + /** + * Sets the Environment to load from. + * + * @param env The environment. + */ + public void setEnv(Environment env) { + this.env = env; + } + + /** + * Sets the database name to load. + * + * @param dbName database name + */ + public void setDbName(String dbName) { + this.dbName = dbName; + } + + /** + * Sets the BufferedReader to load from. + * + * @param reader The BufferedReader. + */ + public void setInputReader(BufferedReader reader) { + this.reader = reader; + } + + /** + * Sets whether the load should overwrite existing data or not. + * + * @param noOverwrite True if existing data should not be overwritten. + */ + public void setNoOverwrite(boolean noOverwrite) { + this.noOverwrite = noOverwrite; + } + + /** + * Sets whether the load data is in text file format. + * + * @param textFileMode True if the load data is in text file format. + */ + public void setTextFileMode(boolean textFileMode) { + this.textFileMode = textFileMode; + } + + /** + * Sets whether to ignore unknown parameters in the config file. This + * allows loading databases that were dumped with the Berkeley DB C + * product, when the dump file contains parameters not known to JE. + * + * @param ignoreUnknownConfigMode True to ignore unknown parameters in + * the config file. + */ + public void setIgnoreUnknownConfig(boolean ignoreUnknownConfigMode) { + this.ignoreUnknownConfig = ignoreUnknownConfigMode; + } + + /** + * If progressInterval is set, progress status messages are generated to + * stdout at set percentages of the load. + * + * @param progressInterval Specifies the percentage intervals for status + * messages. If 0, no messages are generated. + */ + public void setProgressInterval(long progressInterval) { + this.progressInterval = progressInterval; + } + + /** + * Used for progress status messages. Must be set to greater than + * 0 if the progressInterval is greater than 0. + * + * @param totalLoadBytes number of input bytes to be loaded. + */ + public void setTotalLoadBytes(long totalLoadBytes) { + this.totalLoadBytes = totalLoadBytes; + } + + public boolean load() + throws IOException, DatabaseException { + + LoggerUtils.envLogMsg(Level.INFO, DbInternal.getNonNullEnvImpl(env), + "DbLoad.load of " + dbName + " starting"); + + if (progressInterval > 0) { + System.out.println("Load start: " + new Date()); + } + + if (textFileMode) { + formatUsingPrintable = true; + } else { + loadHeader(); + } + + if (dbName == null) { + throw new IllegalArgumentException + ("Must supply a database name if -l not supplied."); + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(dupSort); + dbConfig.setAllowCreate(true); + Database db; + try { + db = env.openDatabase(null, dbName, dbConfig); + } catch (DatabaseNotFoundException e) { + /* Should never happen, AllowCreate is true. */ + throw EnvironmentFailureException.unexpectedException(e); + } catch (DatabaseExistsException e) { + /* Should never happen, ExclusiveCreate is false. */ + throw EnvironmentFailureException.unexpectedException(e); + } + + loadData(db); + + db.close(); + + LoggerUtils.envLogMsg(Level.INFO, DbInternal.getNonNullEnvImpl(env), + "DbLoad.load of " + dbName + " ending."); + + if (progressInterval > 0) { + System.out.println("Load end: " + new Date()); + } + + return true; + } + + private void loadConfigLine(String line) { + int equalsIdx = line.indexOf('='); + if (equalsIdx < 0) { + throw new IllegalArgumentException + ("Invalid header parameter: " + line); + } + + String keyword = line.substring(0, equalsIdx).trim().toLowerCase(); + String value = line.substring(equalsIdx + 1).trim(); + + if (keyword.equals("version")) { + if (DEBUG) { + System.out.println("Found version: " + line); + } + if (!value.equals("3")) { + throw new IllegalArgumentException + ("Version " + value + " is not supported."); + } + } else if (keyword.equals("format")) { + value = value.toLowerCase(); + if (value.equals("print")) { + formatUsingPrintable = true; + } else if (value.equals("bytevalue")) { + formatUsingPrintable = false; + } else { + throw new IllegalArgumentException + (value + " is an unknown value for the format keyword"); + } + if (DEBUG) { + System.out.println("Found format: " + formatUsingPrintable); + } + } else if (keyword.equals("dupsort")) { + value = value.toLowerCase(); + if (value.equals("true") || + value.equals("1")) { + dupSort = true; + } else if (value.equals("false") || + value.equals("0")) { + dupSort = false; + } else { + throw new IllegalArgumentException + (value + " is an unknown value for the dupsort keyword"); + } + if (DEBUG) { + System.out.println("Found dupsort: " + dupSort); + } + } else if (keyword.equals("type")) { + value = value.toLowerCase(); + if (!value.equals("btree")) { + throw new IllegalArgumentException + (value + " is not a supported database type."); + } + if (DEBUG) { + System.out.println("Found type: " + line); + } + } else if (keyword.equals("database")) { + if (dbName == null) { + dbName = value; + } + if (DEBUG) { + System.out.println("DatabaseImpl: " + dbName); + } + } else if (!ignoreUnknownConfig) { + throw new IllegalArgumentException + ("'" + line + "' is not understood."); + } + } + + private void loadHeader() + throws IOException { + + if (DEBUG) { + System.out.println("loading header"); + } + String line = reader.readLine(); + while (line != null && + !line.equals("HEADER=END")) { + loadConfigLine(line); + line = reader.readLine(); + } + } + + private void loadData(Database db) + throws DatabaseException, IOException { + + String keyLine = reader.readLine(); + String dataLine = null; + int count = 0; + long totalBytesRead = 0; + long lastTime = System.currentTimeMillis(); + long bytesReadThisInterval = 0; + + while (keyLine != null && + !keyLine.equals("DATA=END")) { + dataLine = reader.readLine(); + if (dataLine == null) { + throw new IllegalArgumentException("No data to match key " + + keyLine); + } + /* Add one for \n or \r. */ + bytesReadThisInterval += dataLine.length() + 1; + byte[] keyBytes = loadLine(keyLine.trim()); + byte[] dataBytes = loadLine(dataLine.trim()); + + DatabaseEntry key = new DatabaseEntry(keyBytes); + DatabaseEntry data = new DatabaseEntry(dataBytes); + + if (noOverwrite) { + if (db.putNoOverwrite(null, key, data) == + OperationStatus.KEYEXIST) { + /* Calling println is OK only from command line. */ + if (commandLine) { + System.err.println("Key exists: " + key); + } + } + } else { + db.put(null, key, data); + } + + count++; + if ((progressInterval > 0) && + (bytesReadThisInterval > progressInterval)) { + totalBytesRead += bytesReadThisInterval; + bytesReadThisInterval -= progressInterval; + long now = System.currentTimeMillis(); + System.out.println("loaded " + count + " records " + + (now - lastTime) + " ms - % completed: " + + ((100 * totalBytesRead) / totalLoadBytes)); + lastTime = now; + } + + keyLine = reader.readLine(); + if (keyLine == null) { + throw new IllegalArgumentException("No \"DATA=END\""); + } + bytesReadThisInterval += keyLine.length() + 1; + } + } + + private byte[] loadLine(String line) + throws DatabaseException { + + if (formatUsingPrintable) { + return readPrintableLine(line); + } + int nBytes = line.length() / 2; + byte[] ret = new byte[nBytes]; + int charIdx = 0; + for (int i = 0; i < nBytes; i++, charIdx += 2) { + int b2 = Character.digit(line.charAt(charIdx), 16); + b2 <<= 4; + b2 += Character.digit(line.charAt(charIdx + 1), 16); + ret[i] = (byte) b2; + } + return ret; + } + + private static byte backSlashValue = + ((byte) ('\\')) & 0xff; + + private byte[] readPrintableLine(String line) + throws DatabaseException { + + /* nBytes is the max number of bytes that this line could turn into. */ + int maxNBytes = line.length(); + byte[] ba = new byte[maxNBytes]; + int actualNBytes = 0; + + for (int charIdx = 0; charIdx < maxNBytes; charIdx++) { + char c = line.charAt(charIdx); + if (c == '\\') { + if (++charIdx < maxNBytes) { + char c1 = line.charAt(charIdx); + if (c1 == '\\') { + ba[actualNBytes++] = backSlashValue; + } else { + if (++charIdx < maxNBytes) { + char c2 = line.charAt(charIdx); + int b = Character.digit(c1, 16); + b <<= 4; + b += Character.digit(c2, 16); + ba[actualNBytes++] = (byte) b; + } else { + throw + new IllegalArgumentException("Corrupted file"); + } + } + } else { + throw new IllegalArgumentException("Corrupted file"); + } + } else { + ba[actualNBytes++] = (byte) (c & 0xff); + } + } + + if (maxNBytes == actualNBytes) { + return ba; + } else { + byte[] ret = new byte[actualNBytes]; + System.arraycopy(ba, 0, ret, 0, actualNBytes); + return ret; + } + } +} diff --git a/src/com/sleepycat/je/util/DbPrintLog.java b/src/com/sleepycat/je/util/DbPrintLog.java new file mode 100644 index 0000000..08905d9 --- /dev/null +++ b/src/com/sleepycat/je/util/DbPrintLog.java @@ -0,0 +1,393 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.lang.reflect.Constructor; + +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.DumpFileReader; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LastFileReader; +import com.sleepycat.je.log.PrintFileReader; +import com.sleepycat.je.log.StatsFileReader; +import com.sleepycat.je.log.VLSNDistributionReader; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Dumps the contents of the log in XML format to System.out. + * + *

        To print an environment log:

        + * + *
        + *      DbPrintLog.main(argv);
        + * 
        + */ +public class DbPrintLog { + + /** + * Dump a JE log into human readable form. + */ + public void dump(File envHome, + String entryTypes, + String txnIds, + long startLsn, + long endLsn, + boolean verbose, + boolean stats, + boolean repEntriesOnly, + boolean csvFormat, + boolean forwards, + boolean vlsnDistribution, + String customDumpReaderClass) + throws EnvironmentNotFoundException, + EnvironmentLockedException { + + dump(envHome, entryTypes, "" /*dbIds*/, txnIds, startLsn, endLsn, + verbose, stats, repEntriesOnly, csvFormat, forwards, + vlsnDistribution, customDumpReaderClass); + } + + private void dump(File envHome, + String entryTypes, + String dbIds, + String txnIds, + long startLsn, + long endLsn, + boolean verbose, + boolean stats, + boolean repEntriesOnly, + boolean csvFormat, + boolean forwards, + boolean vlsnDistribution, + String customDumpReaderClass) { + EnvironmentImpl env = + CmdUtil.makeUtilityEnvironment(envHome, true); + FileManager fileManager = env.getFileManager(); + fileManager.setIncludeDeletedFiles(true); + int readBufferSize = + env.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + /* Configure the startLsn and endOfFileLsn if reading backwards. */ + long endOfFileLsn = DbLsn.NULL_LSN; + if (startLsn == DbLsn.NULL_LSN && endLsn == DbLsn.NULL_LSN && + !forwards) { + LastFileReader fileReader = + new LastFileReader(env, readBufferSize); + while (fileReader.readNextEntry()) { + } + startLsn = fileReader.getLastValidLsn(); + endOfFileLsn = fileReader.getEndOfLog(); + } + + try { + + /* + * Make a reader. First see if a custom debug class is available, + * else use the default versions. + */ + DumpFileReader reader = null; + if (customDumpReaderClass != null) { + + reader = getDebugReader( + customDumpReaderClass, env, readBufferSize, + startLsn, endLsn, endOfFileLsn, entryTypes, txnIds, + verbose, repEntriesOnly, forwards); + + } else { + if (stats) { + + reader = new StatsFileReader( + env, readBufferSize, startLsn, endLsn, endOfFileLsn, + entryTypes, dbIds, txnIds, verbose, repEntriesOnly, + forwards); + + } else if (vlsnDistribution) { + + reader = new VLSNDistributionReader( + env, readBufferSize, startLsn, endLsn, endOfFileLsn, + verbose, forwards); + + } else { + + reader = new PrintFileReader( + env, readBufferSize, startLsn, endLsn, endOfFileLsn, + entryTypes, dbIds, txnIds, verbose, repEntriesOnly, + forwards); + } + } + + /* Enclose the output in a tag to keep proper XML syntax. */ + if (!csvFormat) { + System.out.println(""); + } + + while (reader.readNextEntry()) { + } + + reader.summarize(csvFormat); + if (!csvFormat) { + System.out.println(""); + } + } finally { + env.close(); + } + } + + /** + * The main used by the DbPrintLog utility. + * + * @param argv An array of command line arguments to the DbPrintLog + * utility. + * + *
        +     * usage: java { com.sleepycat.je.util.DbPrintLog | -jar
        +     * je-<version>.jar DbPrintLog }
        +     *  -h <envHomeDir>
        +     *  -s  <start file number or LSN, in hex>
        +     *  -e  <end file number or LSN, in hex>
        +     *  -k  <binary|hex|text|obfuscate> (format for dumping the key/data)
        +     *  -db <targeted db ids, comma separated>
        +     *  -tx <targeted txn ids, comma separated>
        +     *  -ty <targeted entry types, comma separated>
        +     *  -S  show summary of log entries
        +     *  -SC show summary of log entries in CSV format
        +     *  -r  only print replicated log entries
        +     *  -b  scan log backwards. The entire log must be scanned, cannot be used
        +     *      with -s or -e
        +     *  -q  if specified, concise version is printed,
        +     *      default is verbose version
        +     *  -c  <name of custom dump reader class> if specified, DbPrintLog
        +     *      will attempt to load a class of this name, which will be used to
        +     *      process log entries. Used to customize formatting and dumping when
        +     *      debugging files.
        +     * 
        + * + *

        All arguments are optional. The current directory is used if {@code + * -h} is not specified.

        + */ + public static void main(String[] argv) { + try { + int whichArg = 0; + String entryTypes = null; + String dbIds = null; + String txnIds = null; + long startLsn = DbLsn.NULL_LSN; + long endLsn = DbLsn.NULL_LSN; + boolean verbose = true; + boolean stats = false; + boolean csvFormat = false; + boolean repEntriesOnly = false; + boolean forwards = true; + String customDumpReaderClass = null; + boolean vlsnDistribution = false; + + /* Default to looking in current directory. */ + File envHome = new File("."); + Key.DUMP_TYPE = DumpType.BINARY; + + while (whichArg < argv.length) { + String nextArg = argv[whichArg]; + if (nextArg.equals("-h")) { + whichArg++; + envHome = new File(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-ty")) { + whichArg++; + entryTypes = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-db")) { + whichArg++; + dbIds = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-tx")) { + whichArg++; + txnIds = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-s")) { + whichArg++; + startLsn = CmdUtil.readLsn(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-e")) { + whichArg++; + endLsn = CmdUtil.readLsn(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-k")) { + whichArg++; + String dumpType = CmdUtil.getArg(argv, whichArg); + if (dumpType.equalsIgnoreCase("text")) { + Key.DUMP_TYPE = DumpType.TEXT; + } else if (dumpType.equalsIgnoreCase("hex")) { + Key.DUMP_TYPE = DumpType.HEX; + } else if (dumpType.equalsIgnoreCase("binary")) { + Key.DUMP_TYPE = DumpType.BINARY; + } else if (dumpType.equalsIgnoreCase("obfuscate")) { + Key.DUMP_TYPE = DumpType.OBFUSCATE; + } else { + System.err.println + (dumpType + + " is not a supported dump format type."); + } + } else if (nextArg.equals("-q")) { + verbose = false; + } else if (nextArg.equals("-b")) { + forwards = false; + } else if (nextArg.equals("-S")) { + stats = true; + } else if (nextArg.equals("-SC")) { + stats = true; + csvFormat = true; + } else if (nextArg.equals("-r")) { + repEntriesOnly = true; + } else if (nextArg.equals("-c")) { + whichArg++; + customDumpReaderClass = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-vd")) { + /* + * An unadvertised option which displays vlsn distribution + * in a log, for debugging. + */ + vlsnDistribution = true; + } else { + System.err.println + (nextArg + " is not a supported option."); + usage(); + System.exit(-1); + } + whichArg++; + } + + /* Don't support scan backwards when -s or -e is enabled. */ + if ((startLsn != DbLsn.NULL_LSN || endLsn != DbLsn.NULL_LSN) && + !forwards) { + throw new UnsupportedOperationException + ("Backwards scans are not supported when -s or -e are " + + "used. They can only be used against the entire log."); + } + + DbPrintLog printer = new DbPrintLog(); + printer.dump(envHome, entryTypes, dbIds, txnIds, startLsn, endLsn, + verbose, stats, repEntriesOnly, csvFormat, forwards, + vlsnDistribution, customDumpReaderClass); + + } catch (Throwable e) { + e.printStackTrace(); + System.out.println(e.getMessage()); + usage(); + System.exit(1); + } + } + + private static void usage() { + System.out.println("Usage: " + + CmdUtil.getJavaCommand(DbPrintLog.class)); + System.out.println(" -h "); + System.out.println(" -s "); + System.out.println(" -e "); + System.out.println(" -k " + + "(format for dumping the key and data)"); + System.out.println(" -db "); + System.out.println(" -tx "); + System.out.println(" -ty "); + System.out.println(" -S show Summary of log entries"); + System.out.println(" -SC show Summary of log entries in CSV format"); + System.out.println(" -r only print replicated log entries"); + System.out.println(" -b scan all the log files backwards, don't "); + System.out.println(" support scan between two log files"); + System.out.println(" -q if specified, concise version is printed"); + System.out.println(" Default is verbose version.)"); + System.out.println(" -c if specified, "); + System.out.println(" attempt to load this class to use for the "); + System.out.println(" formatting of dumped log entries"); + System.out.println("All arguments are optional"); + } + + /** + * If a custom dump reader class is specified, we'll use that for + * DbPrintLog instead of the regular DumpFileReader. The custom reader must + * have DumpFileReader as a superclass ancestor. Its constructor must have + * this signature: + * + * public class FooReader extends DumpFileReader { + * + * public FooReader(EnvironmentImpl env, + * Integer readBufferSize, + * Long startLsn, + * Long finishLsn, + * Long endOfFileLsn, + * String entryTypes, + * String txnIds, + * Boolean verbose, + * Boolean repEntriesOnly, + * Boolean forwards) + * super(env, readBufferSize, startLsn, finishLsn, endOfFileLsn, + * entryTypes, txnIds, verbose, repEntriesOnly, forwards); + * + * See com.sleepycat.je.util.TestDumper, on the test side, for an example. + */ + private DumpFileReader getDebugReader(String customDumpReaderClass, + EnvironmentImpl env, + int readBufferSize, + long startLsn, + long finishLsn, + long endOfFileLsn, + String entryTypes, + String txnIds, + boolean verbose, + boolean repEntriesOnly, + boolean forwards) { + Class debugClass = null; + try { + debugClass = Class.forName(customDumpReaderClass); + } catch (Exception e) { + throw new IllegalArgumentException + ("-c was specified, but couldn't load " + + customDumpReaderClass + " ", e); + } + + Class args[] = { EnvironmentImpl.class, + Integer.class, // readBufferSize + Long.class, // startLsn + Long.class, // finishLsn + Long.class, // endOfFileLsn + String.class, // entryTypes + String.class, // txnIds + Boolean.class, // verbose + Boolean.class, // repEntriesOnly + Boolean.class }; // forwards + + DumpFileReader debugReader = null; + try { + Constructor con = + debugClass.getConstructor(args); + debugReader = (DumpFileReader) con.newInstance(env, + readBufferSize, + startLsn, + finishLsn, + endOfFileLsn, + entryTypes, + txnIds, + verbose, + repEntriesOnly, + forwards); + } catch (Exception e) { + throw new IllegalStateException + ("-c was specified, but couldn't instantiate " + + customDumpReaderClass + " ", e); + } + + return debugReader; + } +} diff --git a/src/com/sleepycat/je/util/DbRunAction.java b/src/com/sleepycat/je/util/DbRunAction.java new file mode 100644 index 0000000..1768d15 --- /dev/null +++ b/src/com/sleepycat/je/util/DbRunAction.java @@ -0,0 +1,413 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.text.DecimalFormat; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.cleaner.VerifyUtils; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * @hidden + * For internal use only. + * DbRunAction is a debugging aid that can invoke a JE operation or background + * activity from the command line. + * + * batchClean calls Environment.cleanLog() in a loop + * checkpoint calls Environment.checkpoint() with force=true + * compress calls Environment.compress + * evict calls Environment.preload, then evictMemory + * removeDb calls Environment.removeDatabase, but doesn't do any cleaning + * removeDbAndClean calls removeDatabase, then cleanLog in a loop + * activateCleaner wakes up the cleaner, and then the main thread waits + * until you type "y" to the console before calling Environment.close(). + * The control provided by the prompt is necessary for daemon activities + * because often threads check and bail out if the environment is closed. + * verifyUtilization calls CleanerTestUtils.verifyUtilization() to compare + * utilization as calculated by UtilizationProfile to utilization as + * calculated by UtilizationFileReader. + */ +public class DbRunAction { + + private static final int BATCH_CLEAN = 1; // app-driven batch cleaning + private static final int COMPRESS = 2; + private static final int EVICT = 3; + private static final int CHECKPOINT = 4; + private static final int REMOVEDB = 5; + private static final int REMOVEDB_AND_CLEAN = 6; + private static final int ACTIVATE_CLEANER_THREADS = 7; + // wake up cleaner threads + private static final int VERIFY_UTILIZATION = 8; + + public static void main(String[] argv) { + + long recoveryStart = 0; + long actionStart = 0; + long actionEnd = 0; + + try { + int whichArg = 0; + if (argv.length == 0) { + usage(); + System.exit(1); + } + + String dbName = null; + int doAction = 0; + String envHome = "."; + boolean readOnly = false; + boolean printStats = false; + + while (whichArg < argv.length) { + String nextArg = argv[whichArg]; + + if (nextArg.equals("-h")) { + whichArg++; + envHome = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-a")) { + whichArg++; + String action = CmdUtil.getArg(argv, whichArg); + if (action.equalsIgnoreCase("batchClean")) { + doAction = BATCH_CLEAN; + } else if (action.equalsIgnoreCase("compress")) { + doAction = COMPRESS; + } else if (action.equalsIgnoreCase("checkpoint")) { + doAction = CHECKPOINT; + } else if (action.equalsIgnoreCase("evict")) { + doAction = EVICT; + } else if (action.equalsIgnoreCase("removedb")) { + doAction = REMOVEDB; + } else if (action.equalsIgnoreCase("removedbAndClean")) { + doAction = REMOVEDB_AND_CLEAN; + } else if (action.equalsIgnoreCase("activateCleaner")) { + doAction = ACTIVATE_CLEANER_THREADS; + } else if (action.equalsIgnoreCase("verifyUtilization")) { + doAction = VERIFY_UTILIZATION; + } else { + usage(); + System.exit(1); + } + } else if (nextArg.equals("-ro")) { + readOnly = true; + } else if (nextArg.equals("-s")) { + dbName = argv[++whichArg]; + } else if (nextArg.equals("-stats")) { + printStats = true; + } else { + throw new IllegalArgumentException + (nextArg + " is not a supported option."); + } + whichArg++; + } + + EnvironmentConfig envConfig = new EnvironmentConfig(); + + /* Don't debug log to the database log. */ + if (readOnly) { + envConfig.setConfigParam + (EnvironmentParams.JE_LOGGING_DBLOG.getName(), "false"); + + envConfig.setReadOnly(true); + } + + /* + * If evicting, scan the given database first and don't run the + * background evictor. + */ + if (doAction == EVICT) { + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE.getName(), + "1000"); + } + + /* + * If cleaning, disable the daemon cleaner threads. The work being + * done by these threads is aborted when the environment is closed, + * which can result in incomplete log cleaning. + */ + if (doAction == BATCH_CLEAN) { + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + } + + recoveryStart = System.currentTimeMillis(); + + Environment env = + new Environment(new File(envHome), envConfig); + + CheckpointConfig forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + + Thread statsPrinter = null; + if (printStats) { + statsPrinter = new StatsPrinter(env); + statsPrinter.start(); + } + + boolean promptForShutdown = false; + actionStart = System.currentTimeMillis(); + switch(doAction) { + case BATCH_CLEAN: + /* Since this is batch cleaning, repeat until no progress. */ + while (true) { + int nFiles = env.cleanLog(); + System.out.println("Files cleaned: " + nFiles); + if (nFiles == 0) { + break; + } + } + env.checkpoint(forceConfig); + break; + case COMPRESS: + env.compress(); + break; + case CHECKPOINT: + env.checkpoint(forceConfig); + break; + case EVICT: + preload(env, dbName); + break; + case REMOVEDB: + removeAndClean(env, dbName, false); + break; + case REMOVEDB_AND_CLEAN: + removeAndClean(env, dbName, true); + break; + case ACTIVATE_CLEANER_THREADS: + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + envImpl.getCleaner().wakeupActivate(); + promptForShutdown = true; + break; + case VERIFY_UTILIZATION: + EnvironmentImpl envImpl2 = + DbInternal.getNonNullEnvImpl(env); + VerifyUtils. verifyUtilization + (envImpl2, + true, // expectAccurateObsoleteLNCount + true, // expectAccurateObsoleteLNSize + true); // expectAccurateDbUtilization + break; + } + actionEnd = System.currentTimeMillis(); + + if (promptForShutdown) { + + /* + * If the requested action is a daemon driven one, we don't + * want the main thread to shutdown the environment until we + * say we're ready + */ + waitForShutdown(); + } + if (statsPrinter != null) { + statsPrinter.interrupt(); + statsPrinter.join(); + } + env.close(); + } catch (Exception e) { + e.printStackTrace(); + System.out.println(e.getMessage()); + usage(); + System.exit(1); + } finally { + DecimalFormat f = new DecimalFormat(); + f.setMaximumFractionDigits(2); + + long recoveryDuration = actionStart - recoveryStart; + System.out.println("\nrecovery time = " + + f.format(recoveryDuration) + + " millis " + + f.format((double)recoveryDuration/60000) + + " minutes"); + + long actionDuration = actionEnd - actionStart; + System.out.println("action time = " + + f.format(actionDuration) + + " millis " + + f.format(actionDuration/60000) + + " minutes"); + } + } + + private static void removeAndClean(Environment env, + String name, + boolean doCleaning) + throws Exception { + + long a, c, d, e, f; + + Transaction txn = null; + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + + a = System.currentTimeMillis(); + env.removeDatabase(txn, name); + c = System.currentTimeMillis(); + + int cleanedCount = 0; + if (doCleaning) { + while (env.cleanLog() > 0) { + cleanedCount++; + } + } + d = System.currentTimeMillis(); + + System.out.println("cleanedCount=" + cleanedCount); + e = 0; + f = 0; + if (cleanedCount > 0) { + e = System.currentTimeMillis(); + env.checkpoint(force); + f = System.currentTimeMillis(); + } + + System.out.println("Remove of " + name + + " remove: " + getSecs(a, c) + + " clean: " + getSecs(c, d) + + " checkpoint: " + getSecs(e, f)); + } + + private static String getSecs(long start, long end) { + return (end-start) / 1000 + " secs"; + } + + private static void preload(Environment env, String dbName) + throws Exception { + + System.out.println("Preload starting"); + Database db = env.openDatabase(null, dbName, null); + Cursor cursor = db.openCursor(null, null); + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + int count = 0; + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + count++; + if ((count % 50000) == 0) { + System.out.println(count + "..."); + } + } + System.out.println("Preloaded " + count + " records"); + } finally { + cursor.close(); + db.close(); + } + } + + @SuppressWarnings("unused") + private static void doEvict(Environment env) + throws DatabaseException { + + /* Push the cache size down by half to force eviction. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + long cacheUsage = envImpl.getMemoryBudget().getCacheMemoryUsage(); + EnvironmentMutableConfig c = new EnvironmentMutableConfig(); + c.setCacheSize(cacheUsage/2); + env.setMutableConfig(c); + + long start = System.currentTimeMillis(); + env.evictMemory(); + long end = System.currentTimeMillis(); + + DecimalFormat f = new DecimalFormat(); + f.setMaximumFractionDigits(2); + System.out.println("evict time=" + f.format(end-start)); + } + + private static void waitForShutdown() + throws IOException { + + System.out.println + ("Wait for daemon activity to run. When ready to stop, type (y)"); + BufferedReader reader = + new BufferedReader(new InputStreamReader(System.in)); + do { + String val = reader.readLine(); + if (val != null && + (val.equalsIgnoreCase("y") || + val.equalsIgnoreCase("yes"))) { + break; + } else { + System.out.println("Shutdown? (y)"); + } + } while (true); + } + + private static class StatsPrinter extends Thread { + + private Environment env; + + StatsPrinter(Environment env) { + this.env = env; + } + + @Override + public void run() { + + StatsConfig clearConfig = new StatsConfig(); + clearConfig.setClear(true); + + while (true) { + try { + synchronized (this) { + wait(30 * 1000); + } + EnvironmentStats stats = env.getStats(clearConfig); + System.out.println("\n" + stats + "\n"); + } catch (DatabaseException e) { + e.printStackTrace(); + break; + } catch (InterruptedException e) { + break; + } + } + } + } + + private static void usage() { + System.out.println("Usage: \n " + + CmdUtil.getJavaCommand(DbRunAction.class)); + System.out.println(" -h "); + System.out.println(" -a "); + System.out.println(" -ro (read-only - defaults to read-write)"); + System.out.println(" -s (for removeDb)"); + System.out.println(" -stats (print every 30 seconds)"); + } +} diff --git a/src/com/sleepycat/je/util/DbScavenger.java b/src/com/sleepycat/je/util/DbScavenger.java new file mode 100644 index 0000000..ee2e93b --- /dev/null +++ b/src/com/sleepycat/je/util/DbScavenger.java @@ -0,0 +1,467 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LastFileReader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.ScavengerFileReader; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.txn.TxnChain.CompareSlot; +import com.sleepycat.je.utilint.BitMap; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.utilint.StringUtils; + +/** + * Used to retrieve as much data as possible from a corrupted environment. + * This utility is meant to be used programmatically, and is the equivalent + * to the -R or -r options for {@link DbDump}. + *

        + * To scavenge a database: + *

        + *  DbScavenger scavenger =
        + *      new DbScavenger(env, outputDirectory, , , );
        + *  scavenger.dump();
        + *
        + * + *

        + * The recovered databases will put placed in the outputDirectory with ".dump" + * file suffixes. The format of the .dump files will be suitable for use with + * DbLoad. + */ + +public class DbScavenger extends DbDump { + private static final int FLUSH_INTERVAL = 100; + private int readBufferSize; + private EnvironmentImpl envImpl; + + /* + * Set of committed txn ids that have been seen so far. Positive IDs are + * for non-replicated txns, and negative IDs are for replicated txns. + */ + private BitMap positiveCommittedTxnIdsSeen; + private BitMap negativeCommittedTxnIdsSeen; + + /* + * Set of LN Node Ids that have been seen so far. + */ + private Set lnNodesSeen; + + /* + * Map of database id to database names. + */ + private Map dbIdToName; + + /* + * Map of database id to DatabaseImpl. + */ + private Map dbIdToImpl; + + /* + * Map of database id to the .dump file output stream for that database. + */ + private Map dbIdToOutputStream; + + private boolean dumpCorruptedBounds = false; + + private int flushCounter = 0; + private long lastTime; + + /** + * Create a DbScavenger object for a specific environment. + *

        + * @param env The Environment containing the database to dump. + * @param outputDirectory The directory to create the .dump files in. + * @param formatUsingPrintable true if the dump should use printable + * characters. + * @param doAggressiveScavengerRun true if true, then all data records are + * dumped, regardless of whether they are the latest version or not. + * @param verbose true if status output should be written to System.out + * during scavenging. + */ + public DbScavenger(Environment env, + String outputDirectory, + boolean formatUsingPrintable, + boolean doAggressiveScavengerRun, + boolean verbose) { + super(env, null, null, formatUsingPrintable); + + this.doAggressiveScavengerRun = doAggressiveScavengerRun; + this.dbIdToName = new HashMap(); + this.dbIdToImpl = new HashMap(); + this.dbIdToOutputStream = new HashMap(); + this.verbose = verbose; + this.outputDirectory = outputDirectory; + } + + /** + * Set to true if corrupted boundaries should be dumped out. + */ + public void setDumpCorruptedBounds(boolean dumpCorruptedBounds) { + this.dumpCorruptedBounds = dumpCorruptedBounds; + } + + /** + * Start the scavenger run. + */ + @Override + public void dump() + throws EnvironmentNotFoundException, + EnvironmentLockedException, + IOException { + + openEnv(false); + + envImpl = DbInternal.getNonNullEnvImpl(env); + DbConfigManager cm = envImpl.getConfigManager(); + readBufferSize = cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + /* + * Find the end of the log. + */ + LastFileReader reader = new LastFileReader(envImpl, readBufferSize); + while (reader.readNextEntry()) { + } + + /* Tell the fileManager where the end of the log is. */ + long lastUsedLsn = reader.getLastValidLsn(); + long nextAvailableLsn = reader.getEndOfLog(); + envImpl.getFileManager().setLastPosition(nextAvailableLsn, + lastUsedLsn, + reader.getPrevOffset()); + + try { + /* Pass 1: Scavenge the dbtree. */ + if (verbose) { + System.out.println("Pass 1: " + new Date()); + } + scavengeDbTree(lastUsedLsn, nextAvailableLsn); + + /* Pass 2: Scavenge the databases. */ + if (verbose) { + System.out.println("Pass 2: " + new Date()); + } + scavenge(lastUsedLsn, nextAvailableLsn); + + if (verbose) { + System.out.println("End: " + new Date()); + } + } finally { + closeOutputStreams(); + } + } + + /* + * Scan the log looking for records that are relevant for scavenging the db + * tree. + */ + private void scavengeDbTree(long lastUsedLsn, long nextAvailableLsn) + throws DatabaseException { + + positiveCommittedTxnIdsSeen = new BitMap(); + negativeCommittedTxnIdsSeen = new BitMap(); + lnNodesSeen = new TreeSet(); + + final ScavengerFileReader scavengerReader = + new ScavengerFileReader(envImpl, readBufferSize, lastUsedLsn, + DbLsn.NULL_LSN, nextAvailableLsn) { + protected void processEntryCallback(LogEntry entry, + LogEntryType entryType) + throws DatabaseException { + + processDbTreeEntry(entry, entryType); + } + }; + + scavengerReader.setTargetType(LogEntryType.LOG_MAPLN); + scavengerReader.setTargetType(LogEntryType.LOG_NAMELN_TRANSACTIONAL); + scavengerReader.setTargetType(LogEntryType.LOG_NAMELN); + scavengerReader.setTargetType(LogEntryType.LOG_TXN_COMMIT); + scavengerReader.setTargetType(LogEntryType.LOG_TXN_ABORT); + lastTime = System.currentTimeMillis(); + long fileNum = -1; + while (scavengerReader.readNextEntry()) { + fileNum = reportProgress(fileNum, + scavengerReader.getLastLsn()); + } + } + + private long reportProgress(long fileNum, long lastLsn) { + + long currentFile = DbLsn.getFileNumber(lastLsn); + if (verbose) { + if (currentFile != fileNum) { + long now = System.currentTimeMillis(); + System.out.println("processing file " + + FileManager.getFileName(currentFile, + ".jdb ") + + (now-lastTime) + " ms"); + lastTime = now; + } + } + + return currentFile; + } + + /* + * Look at an entry and determine if it should be processed for scavenging. + */ + private boolean checkProcessEntry(LogEntry entry, + LogEntryType entryType, + boolean pass2) { + boolean isTransactional = entryType.isTransactional(); + + /* + * If entry is txnal... + * if a commit record, add to committed txn id set + * if an abort record, ignore it and don't process. + * if an LN, check if it's in the committed txn id set. + * If it is, continue processing, otherwise ignore it. + */ + if (isTransactional) { + final long txnId = entry.getTransactionId(); + if (entryType.equals(LogEntryType.LOG_TXN_COMMIT)) { + setCommittedTxn(txnId); + /* No need to process this entry further. */ + return false; + } + + if (entryType.equals(LogEntryType.LOG_TXN_ABORT)) { + /* No need to process this entry further. */ + return false; + } + + if (!isCommittedTxn(txnId)) { + return false; + } + } + + /* + * Check the nodeId to see if we've already seen it or not. + */ + if (entry instanceof LNLogEntry) { + + final LNLogEntry lnEntry = (LNLogEntry) entry; + final long dbId = lnEntry.getDbId().getId(); + final DatabaseImpl db = dbIdToImpl.get(dbId); + /* Must call postFetchInit if true is returned. */ + if (db != null) { + lnEntry.postFetchInit(db); + } else { + lnEntry.postFetchInit(false /*isDupDb*/); + } + + /* + * If aggressive or if processing DbTree entries, don't worry about + * whether this node has been processed already. + */ + if (doAggressiveScavengerRun || !pass2) { + return true; + } + + if (db == null) { + throw EnvironmentFailureException.unexpectedState + ("Database info not available for DB ID: " + dbId); + } + return lnNodesSeen.add(new CompareSlot(db, lnEntry)); + } + + return false; + } + + /* + * Called once for each log entry during the pass 1 (dbtree). + */ + private void processDbTreeEntry(LogEntry entry, LogEntryType entryType) + throws DatabaseException { + + boolean processThisEntry = + checkProcessEntry(entry, entryType, false); + + if (processThisEntry && + (entry instanceof LNLogEntry)) { + LNLogEntry lnEntry = (LNLogEntry) entry; + LN ln = lnEntry.getLN(); + if (ln instanceof NameLN) { + String name = StringUtils.fromUTF8(lnEntry.getKey()); + Long dbId = Long.valueOf(((NameLN) ln).getId().getId()); + if (dbIdToName.containsKey(dbId) && + !dbIdToName.get(dbId).equals(name)) { + throw EnvironmentFailureException.unexpectedState + ("Already name mapped for dbId: " + dbId + + " changed from " + dbIdToName.get(dbId) + + " to " + name); + } else { + dbIdToName.put(dbId, name); + } + } + + if (ln instanceof MapLN) { + DatabaseImpl db = ((MapLN) ln).getDatabase(); + Long dbId = db.getId().getId(); + /* Use latest version to get most recent comparators. */ + if (!dbIdToImpl.containsKey(dbId)) { + dbIdToImpl.put(dbId, db); + } + } + } + } + + /* + * Pass 2: scavenge the regular (non-dbtree) environment. + */ + private void scavenge(long lastUsedLsn, long nextAvailableLsn) + throws DatabaseException { + + final ScavengerFileReader scavengerReader = + new ScavengerFileReader(envImpl, readBufferSize, lastUsedLsn, + DbLsn.NULL_LSN, nextAvailableLsn) { + protected void processEntryCallback(LogEntry entry, + LogEntryType entryType) + throws DatabaseException { + + processRegularEntry(entry, entryType); + } + }; + + /* + * Note: committed transaction id map has been created already, no + * need to read TXN_COMMITS on this pass. + */ + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isUserLNType()) { + scavengerReader.setTargetType(entryType); + } + } + scavengerReader.setDumpCorruptedBounds(dumpCorruptedBounds); + + long progressFileNum = -1; + while (scavengerReader.readNextEntry()) { + progressFileNum = reportProgress(progressFileNum, + scavengerReader.getLastLsn()); + } + } + + /* + * Process an entry during pass 2. + */ + private void processRegularEntry(LogEntry entry, LogEntryType entryType) + throws DatabaseException { + + boolean processThisEntry = + checkProcessEntry(entry, entryType, true); + + if (processThisEntry) { + LNLogEntry lnEntry = (LNLogEntry) entry; + Long dbId = Long.valueOf(lnEntry.getDbId().getId()); + LN ln = lnEntry.getLN(); + + /* Create output file even if we don't process a deleted entry. */ + PrintStream out = getOutputStream(dbId); + + if (!ln.isDeleted()) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + lnEntry.getUserKeyData(key, data); + dumpOne(out, key.getData(), formatUsingPrintable); + dumpOne(out, data.getData(), formatUsingPrintable); + if ((++flushCounter % FLUSH_INTERVAL) == 0) { + out.flush(); + flushCounter = 0; + } + } + } + } + + /* + * Return the output stream for the .dump file for database with id dbId. + * If an output stream has not already been created, then create one. + */ + private PrintStream getOutputStream(Long dbId) + throws DatabaseException { + + PrintStream ret = dbIdToOutputStream.get(dbId); + if (ret != null) { + return ret; + } + String name = dbIdToName.get(dbId); + if (name == null) { + name = "db" + dbId; + } + File file = new File(outputDirectory, name + ".dump"); + try { + ret = new PrintStream(new FileOutputStream(file), false); + } catch (FileNotFoundException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + dbIdToOutputStream.put(dbId, ret); + DatabaseImpl db = dbIdToImpl.get(dbId); + boolean dupSort = (db != null) ? db.getSortedDuplicates() : false; + printHeader(ret, dupSort, formatUsingPrintable); + return ret; + } + + private void closeOutputStreams() { + + Iterator iter = dbIdToOutputStream.values().iterator(); + while (iter.hasNext()) { + PrintStream s = iter.next(); + s.println("DATA=END"); + s.close(); + } + } + + private void setCommittedTxn(final long txnId) { + if (txnId >= 0) { + positiveCommittedTxnIdsSeen.set(txnId); + } else { + negativeCommittedTxnIdsSeen.set(0 - txnId); + } + } + + private boolean isCommittedTxn(final long txnId) { + if (txnId >= 0) { + return positiveCommittedTxnIdsSeen.get(txnId); + } else { + return negativeCommittedTxnIdsSeen.get(0 - txnId); + } + } +} diff --git a/src/com/sleepycat/je/util/DbSpace.java b/src/com/sleepycat/je/util/DbSpace.java new file mode 100644 index 0000000..95eb301 --- /dev/null +++ b/src/com/sleepycat/je/util/DbSpace.java @@ -0,0 +1,558 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.PrintStream; +import java.text.DateFormat; +import java.text.ParsePosition; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Date; +import java.util.Map; +import java.util.NavigableSet; +import java.util.SortedMap; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.cleaner.ExpirationProfile; +import com.sleepycat.je.cleaner.ExpirationTracker; +import com.sleepycat.je.cleaner.FileProcessor; +import com.sleepycat.je.cleaner.FileSummary; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.UtilizationFileReader; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.FormatUtil; +import com.sleepycat.je.utilint.Pair; + +/** + * DbSpace displays the disk space utilization for an environment. + *

        + * usage: java { com.sleepycat.je.util.DbSpace |
        + *               -jar je-<version>.jar DbSpace }
        + *          -h <dir># environment home directory
        + *         [-q]     # quiet, print grand totals only
        + *         [-u]     # sort by average utilization
        + *         [-d]     # dump file summary details
        + *         [-r]     # recalculate utilization (expensive)
        + *         [-R]     # recalculate expired data (expensive)
        + *         [-s]     # start file number or LSN, in hex
        + *         [-e]     # end file number or LSN, in hex
        + *         [-t]     # time for calculating expired data
        + *                  #   format: yyyy-MM-dd'T'HHZ
        + *                  #  example: 2016-03-09T22-0800
        + *         [-V]     # print JE version number
        + * 
        + */ +public class DbSpace { + + private static final String DATE_FORMAT = "yyyy-MM-dd'T'HHZ"; + private static final String DATE_EXAMPLE = "2016-03-17T22-0800"; + + private static final String USAGE = + "usage: " + CmdUtil.getJavaCommand(DbSpace.class) + "\n" + + " -h # environment home directory\n" + + " [-q] # quiet, print grand totals only\n" + + " [-u] # sort by average utilization\n" + + " [-d] # dump file summary details\n" + + " [-r] # recalculate utilization (expensive)\n" + + " [-R] # recalculate expired data (expensive)\n" + + " [-s] # start file number or LSN, in hex\n" + + " [-e] # end file number or LSN, in hex\n" + + " [-t] # time for calculating expired data,\n" + + " # format: " + DATE_FORMAT + "\n" + + " # example: " + DATE_EXAMPLE + "\n" + + " [-V] # print JE version number"; + + public static void main(String argv[]) + throws Exception { + + DbSpace space = new DbSpace(); + space.parseArgs(argv); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + Environment env = new Environment(space.envHome, envConfig); + space.initEnv(DbInternal.getNonNullEnvImpl(env)); + + try { + space.print(System.out); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } finally { + try { + env.close(); + } catch (Throwable e) { + e.printStackTrace(System.err); + System.exit(1); + } + } + } + + private File envHome = null; + private EnvironmentImpl envImpl; + private boolean quiet = false; + private boolean sorted = false; + private boolean details = false; + private boolean doRecalcUtil = false; + private boolean doRecalcExpired = false; + private long startLsn = DbLsn.NULL_LSN; + private long finishLsn = DbLsn.NULL_LSN; + private long targetTime = System.currentTimeMillis(); + + private DbSpace() { + } + + /** + * Creates a DbSpace object for calculating utilization using an open + * Environment. + */ + public DbSpace(Environment env, + boolean quiet, + boolean details, + boolean sorted) { + this(DbInternal.getNonNullEnvImpl(env), quiet, details, sorted); + } + + /** + * For internal use only. + * @hidden + */ + public DbSpace(EnvironmentImpl envImpl, + boolean quiet, + boolean details, + boolean sorted) { + initEnv(envImpl); + this.quiet = quiet; + this.details = details; + this.sorted = sorted; + } + + private void initEnv(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + } + + private void printUsage(String msg) { + if (msg != null) { + System.err.println(msg); + } + System.err.println(USAGE); + System.exit(-1); + } + + private void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + + if (nArgs == 0) { + printUsage(null); + System.exit(0); + } + + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-q")) { + quiet = true; + } else if (thisArg.equals("-u")) { + sorted = true; + } else if (thisArg.equals("-d")) { + details = true; + } else if (thisArg.equals("-r")) { + doRecalcUtil = true; + } else if (thisArg.equals("-R")) { + doRecalcExpired = true; + } else if (thisArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + startLsn = CmdUtil.readLsn(argv[argc++]); + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-e")) { + if (argc < nArgs) { + finishLsn = CmdUtil.readLsn(argv[argc++]); + } else { + printUsage("-e requires an argument"); + } + } else if (thisArg.equals("-t")) { + if (argc < nArgs) { + String s = argv[argc++]; + DateFormat format = new SimpleDateFormat(DATE_FORMAT); + ParsePosition pp = new ParsePosition(0); + Date date = format.parse(s, pp); + if (date != null) { + targetTime = date.getTime(); + } else { + printUsage( + "-t doesn't match format: " + DATE_FORMAT + + " example: " + DATE_EXAMPLE); + } + } else { + printUsage("-t requires an argument"); + } + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + + if (doRecalcUtil && doRecalcExpired) { + printUsage("-r and -R cannot both be used"); + } + } + + /** + * Sets the recalculation property, which if true causes a more expensive + * recalculation of utilization to be performed for debugging purposes. + * This property is false by default. + */ + public void setRecalculate(boolean recalc) { + this.doRecalcUtil = recalc; + } + + /** + * Sets the start file number, which is a lower bound on the range of + * files for which utilization is reported and (optionally) recalculated. + * By default there is no lower bound. + */ + public void setStartFile(long startFile) { + this.startLsn = startFile; + } + + /** + * Sets the ending file number, which is an upper bound on the range of + * files for which utilization is reported and (optionally) recalculated. + * By default there is no upper bound. + */ + public void setEndFile(long endFile) { + this.finishLsn = endFile; + } + + /** + * Sets the time for calculating expired data. + */ + public void setTime(long time) { + targetTime = time; + } + + /** + * Calculates utilization and prints a report to the given output stream. + */ + public void print(PrintStream out) + throws DatabaseException { + + long startFile = (startLsn != DbLsn.NULL_LSN) ? + DbLsn.getFileNumber(startLsn) : 0; + + long finishFile = (finishLsn != DbLsn.NULL_LSN) ? + DbLsn.getFileNumber(finishLsn) : Long.MAX_VALUE; + + SortedMap map = + envImpl.getUtilizationProfile().getFileSummaryMap(true). + subMap(startFile, finishFile); + + map.keySet().removeAll( + envImpl.getCleaner().getFileSelector().getInProgressFiles()); + + Map recalcMap = + doRecalcUtil ? + UtilizationFileReader.calcFileSummaryMap( + envImpl, startLsn, finishLsn) : null; + + ExpirationProfile expProfile = + new ExpirationProfile(envImpl.getExpirationProfile()); + + expProfile.refresh(targetTime); + + int fileIndex = 0; + + Summary totals = new Summary(); + Summary[] summaries = null; + + if (!quiet) { + summaries = new Summary[map.size()]; + } + + for (Map.Entry entry : map.entrySet()) { + + Long fileNum = entry.getKey(); + FileSummary fs = entry.getValue(); + + FileSummary recalcFs = null; + + if (recalcMap != null) { + recalcFs = recalcMap.get(fileNum); + } + + int expiredSize = expProfile.getExpiredBytes(fileNum); + int recalcExpiredSize = -1; + ExpirationTracker expTracker = null; + + if (doRecalcExpired) { + + FileProcessor fileProcessor = + envImpl.getCleaner().createProcessor(); + + expTracker = fileProcessor.countExpiration(fileNum); + recalcExpiredSize = expTracker.getExpiredBytes(targetTime); + } + + Summary summary = new Summary( + fileNum, fs, recalcFs, expiredSize, recalcExpiredSize); + + if (summaries != null) { + summaries[fileIndex] = summary; + } + + if (details) { + + out.println( + "File 0x" + Long.toHexString(fileNum) + + " expired: " + expiredSize + + " histogram: " + expProfile.toString(fileNum) + + " " + fs); + + if (recalcMap != null) { + out.println( + "Recalc util 0x" + Long.toHexString(fileNum) + + " " + recalcFs); + } + + if (expTracker != null) { + out.println( + "Recalc expiration 0x" + Long.toHexString(fileNum) + + " recalcExpired: " + recalcExpiredSize + + " recalcHistogram: " + expTracker.toString()); + } + } + + totals.add(summary); + fileIndex += 1; + } + + if (details) { + out.println(); + } + + out.println( + doRecalcExpired ? Summary.RECALC_EXPIRED_HEADER : + (doRecalcUtil ? Summary.RECALC_HEADER : Summary.HEADER)); + + if (summaries != null) { + if (sorted) { + Arrays.sort(summaries, new Comparator() { + public int compare(Summary s1, Summary s2) { + return s1.avgUtilization() - s2.avgUtilization(); + } + }); + } + for (Summary summary : summaries) { + summary.print(out); + } + } + + totals.print(out); + + if (totals.expiredSize > 0) { + + DateFormat format = new SimpleDateFormat(DATE_FORMAT); + + out.format( + "%nAs of %s, %,d kB are expired, resulting in the%n" + + "differences between minimum and maximum utilization.%n", + + format.format(targetTime), totals.expiredSize / 1024); + } + + Pair> reservedFileInfo = + envImpl.getFileProtector().getReservedFileInfo(); + + long reservedSize = reservedFileInfo.first(); + NavigableSet reservedFiles = reservedFileInfo.second(); + boolean printReservedFiles = !quiet && !reservedFiles.isEmpty(); + + out.format( + "%n%,d kB are used by additional reserved files%s%n", + reservedSize / 1024, + printReservedFiles ? ":" : "."); + + if (printReservedFiles) { + out.println(FormatUtil.asHexString(reservedFiles)); + } + } + + private class Summary { + + static final String HEADER = + " % Utilized\n" + + " File Size (kB) Avg Min Max \n" + + "-------- --------- ---- --- ---"; + // 12345678 123456789 123 123 123 + // 12 12 12 12 + // TOTALS: + + static final String RECALC_HEADER = + " % Utilized Recalculated\n" + + " File Size (kB) Avg Min Max Avg Min Max\n" + + "-------- --------- --- --- --- --- --- ---"; + // 12345678 123456789 123 123 123 123 123 123 + // 12 12 12 12 12 12 12 + // TOTALS: + + static final String RECALC_EXPIRED_HEADER = + " % Utilized w/Expiration\n" + + " File Size (kB) Avg Min Max Recalculated\n" + + "-------- --------- --- --- --- ------------"; + // 12345678 123456789 123 123 123 123 + // 12 12 12 12 123456 + // TOTALS: + + Long fileNum; + long totalSize; + long obsoleteSize; + long recalcObsoleteSize; + long expiredSize; + long recalcExpiredSize; + + Summary() {} + + Summary(Long fileNum, + FileSummary summary, + FileSummary recalcSummary, + int expiredSize, + int recalcExpiredSize) { + this.fileNum = fileNum; + totalSize = summary.totalSize; + obsoleteSize = summary.getObsoleteSize(); + if (recalcSummary != null) { + recalcObsoleteSize = recalcSummary.getObsoleteSize(); + } + this.expiredSize = Math.min(expiredSize, totalSize); + this.recalcExpiredSize = Math.min(recalcExpiredSize, totalSize); + } + + void add(Summary o) { + totalSize += o.totalSize; + obsoleteSize += o.obsoleteSize; + recalcObsoleteSize += o.recalcObsoleteSize; + expiredSize += o.expiredSize; + recalcExpiredSize += o.recalcExpiredSize; + } + + void print(PrintStream out) { + + if (fileNum != null) { + pad(out, Long.toHexString(fileNum.longValue()), 8, '0'); + } else { + out.print(" TOTALS "); + } + + int kb = (int) (totalSize / 1024); + + out.print(" "); + pad(out, Integer.toString(kb), 9, ' '); + out.print(" "); + pad(out, Integer.toString(avgUtilization()), 3, ' '); + out.print(" "); + pad(out, Integer.toString(minUtilization()), 3, ' '); + out.print(" "); + pad(out, Integer.toString(maxUtilization()), 3, ' '); + + if (doRecalcExpired) { + + out.print(" "); + pad(out, Integer.toString(expRecalcUtilization()), 3, ' '); + + } else if (doRecalcUtil) { + + out.print(" "); + pad(out, Integer.toString(avgRecalcUtilization()), 3, ' '); + out.print(" "); + pad(out, Integer.toString(minRecalcUtilization()), 3, ' '); + out.print(" "); + pad(out, Integer.toString(maxRecalcUtilization()), 3, ' '); + } + + out.println(); + } + + int avgUtilization() { + return (minUtilization() + maxUtilization()) / 2; + } + + int minUtilization() { + return minUtilization(obsoleteSize, expiredSize); + } + + int maxUtilization() { + return maxUtilization(obsoleteSize, expiredSize); + } + + int expRecalcUtilization() { + return minUtilization(obsoleteSize, recalcExpiredSize); + } + + int avgRecalcUtilization() { + return (minRecalcUtilization() + maxRecalcUtilization()) / 2; + } + + int minRecalcUtilization() { + return minUtilization(recalcObsoleteSize, expiredSize); + } + + int maxRecalcUtilization() { + return maxUtilization(recalcObsoleteSize, expiredSize); + } + + private int minUtilization(long obsolete, long expired) { + return FileSummary.utilization( + Math.min(obsolete + expired, totalSize), + totalSize); + } + + private int maxUtilization(long obsolete, long expired) { + return FileSummary.utilization( + Math.max(obsolete, expired), + totalSize); + } + + private void pad(PrintStream out, + String val, + int digits, + char padChar) { + int padSize = digits - val.length(); + for (int i = 0; i < padSize; i += 1) { + out.print(padChar); + } + out.print(val); + } + } +} diff --git a/src/com/sleepycat/je/util/DbStat.java b/src/com/sleepycat/je/util/DbStat.java new file mode 100644 index 0000000..fa05f32 --- /dev/null +++ b/src/com/sleepycat/je/util/DbStat.java @@ -0,0 +1,150 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.PrintStream; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DatabaseStats; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.txn.BasicLocker; + +public class DbStat extends DbVerify { + /* + private String usageString = + "usage: " + CmdUtil.getJavaCommand(DbStat.class) + "\n" + + " [-V] -s database -h dbEnvHome [-v progressInterval]\n"; + */ + + private int progressInterval = 0; + + public static void main(String argv[]) + throws DatabaseException { + + DbStat stat = new DbStat(); + stat.parseArgs(argv); + + int ret = 1; + try { + stat.openEnv(); + if (stat.stats(System.err)) { + ret = 0; + } + stat.closeEnv(); + } catch (Throwable T) { + ret = 1; + T.printStackTrace(System.err); + } + + System.exit(ret); + } + + DbStat() { + } + + public DbStat(Environment env, String dbName) { + super(env, dbName, false); + } + + @Override + void parseArgs(String argv[]) { + + int argc = 0; + int nArgs = argv.length; + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + dbName = argv[argc++]; + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-v")) { + if (argc < nArgs) { + progressInterval = Integer.parseInt(argv[argc++]); + if (progressInterval <= 0) { + printUsage("-v requires a positive argument"); + } + } else { + printUsage("-v requires an argument"); + } + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + + if (dbName == null) { + printUsage("-s is a required argument"); + } + } + + public boolean stats(PrintStream out) + throws DatabaseNotFoundException { + + final StatsConfig statsConfig = new StatsConfig(); + statsConfig.setShowProgressStream(out); + if (progressInterval > 0) { + statsConfig.setShowProgressInterval(progressInterval); + } + + try { + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final DbTree dbTree = envImpl.getDbTree(); + BasicLocker locker = + BasicLocker.createBasicLocker(envImpl, false /*noWait*/); + DatabaseImpl dbImpl; + + try { + dbImpl = dbTree.getDb(locker, dbName, null, false); + } finally { + locker.operationEnd(); + } + + if (dbImpl == null || dbImpl.isDeleted()) { + return false; + } + + try { + final DatabaseStats stats = dbImpl.stat(statsConfig); + out.println(stats); + } finally { + dbTree.releaseDb(dbImpl); + } + + } catch (DatabaseException DE) { + return false; + } + + return true; + } +} diff --git a/src/com/sleepycat/je/util/DbTruncateLog.java b/src/com/sleepycat/je/util/DbTruncateLog.java new file mode 100644 index 0000000..e652c2c --- /dev/null +++ b/src/com/sleepycat/je/util/DbTruncateLog.java @@ -0,0 +1,159 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * DbTruncateLog is a utility that lets the user truncate JE log starting at a + * specified file and offset to the last log file, inclusive. Generally used in + * replication systems for handling + * com.sleepycat.je.rep.RollbackProhibitedException, to permit the application + * to interject application specific handling. Should be used with caution. + *

        + * The parameters for DbTruncateLog are provided through the + * RollbackProhibitedException instance, and the exception message. The goal is + * to truncate the JE log after a specified file number and file offset. + * DbTruncateLog will automatically delete all log entries after that specified + * log entry. + *

        + * For example, suppose the JE log consists of these files: + *

        + *    00000002.jdb
        + *    0000000e.jdb
        + *    0000000f.jdb
        + *    00000010.jdb
        + *    00000012.jdb
        + *    0000001d.jdb
        + *    0000001e.jdb
        + *    0000001f.jdb
        + * 
        + * And the log must be truncated at file 0x1d, offset 0x34567, users should use + * the following command: + *
          + *
        1. DbTruncateLog -h <envDir> -f 0x1d -o 0x34567
        2. + *
        + */ +public class DbTruncateLog { + + private long truncateFileNum = -1; + private long truncateOffset = -1; + private File envHome; + + /** + * Usage: + *
        +     *  -h environmentDirectory
        +     *  -f file number. If hex, prefix with "0x"
        +     *  -o file offset byte. If hex, prefix with "0x"
        +     * 
        + * For example, to truncate a log to file 0xa, offset 0x1223: + *
        +     * DbTruncateLog -h <environmentDir> -f 0xa -o 0x1223
        +     * 
        + */ + public static void main(String[] argv) { + try { + DbTruncateLog truncator = new DbTruncateLog(); + truncator.parseArgs(argv); + truncator.truncateLog(); + } catch (Exception e) { + e.printStackTrace(); + usage(); + System.exit(1); + } + } + + public DbTruncateLog() { + } + + private void parseArgs(String[] argv) { + int whichArg = 0; + boolean seenFile = false; + boolean seenOffset = false; + + while (whichArg < argv.length) { + String nextArg = argv[whichArg]; + + if (nextArg.equals("-h")) { + whichArg++; + envHome = new File(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-f")) { + whichArg++; + truncateFileNum = + CmdUtil.readLongNumber(CmdUtil.getArg(argv, whichArg)); + seenFile = true; + } else if (nextArg.equals("-o")) { + whichArg++; + truncateOffset = + CmdUtil.readLongNumber(CmdUtil.getArg(argv, whichArg)); + seenOffset = true; + } else { + throw new IllegalArgumentException + (nextArg + " is not a supported option."); + } + whichArg++; + } + + if (envHome == null) { + usage(); + System.exit(1); + } + + if ((!seenFile) || (!seenOffset)) { + usage(); + System.exit(1); + } + } + + private void truncateLog() + throws IOException { + + truncateLog(envHome, truncateFileNum, truncateOffset); + } + + /** + * @hidden + * Truncate the JE log to the given file and offset. For unit tests. + */ + public void truncateLog(File env, + long truncFileNum, + long truncOffset) + throws IOException { + + /* Make a read/write environment */ + EnvironmentImpl envImpl = + CmdUtil.makeUtilityEnvironment(env, false); + + /* Go through the file manager to get the JE file. Truncate. */ + envImpl.getFileManager().truncateLog(truncFileNum, truncOffset); + + envImpl.close(); + } + + private static void usage() { + System.out.println("Usage: " + + CmdUtil.getJavaCommand(DbTruncateLog.class)); + System.out.println(" -h "); + System.out.println(" -f "); + System.out.println(" -o "); + System.out.println("JE log will be deleted starting from the " + + "position presented by the file number and " + + "offset to the end, inclusive."); + } +} diff --git a/src/com/sleepycat/je/util/DbVerify.java b/src/com/sleepycat/je/util/DbVerify.java new file mode 100644 index 0000000..90b2070 --- /dev/null +++ b/src/com/sleepycat/je/util/DbVerify.java @@ -0,0 +1,304 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.PrintStream; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.util.verify.BtreeVerifier; +import com.sleepycat.je.utilint.CmdUtil; + +/** + * Verifies the internal structures of a database. + * + *

        When using this utility as a command line program, and the + * application uses custom key comparators, be sure to add the jars or + * classes to the classpath that contain the application's comparator + * classes.

        + * + *

        To verify a database and write the errors to stream:

        + * + *
        + *    DbVerify verifier = new DbVerify(env, dbName, quiet);
        + *    verifier.verify();
        + * 
        + */ +public class DbVerify { + + private static final String usageString = + "usage: " + CmdUtil.getJavaCommand(DbVerify.class) + "\n" + + " -h # environment home directory\n" + + " [-c ] # check cleaner metadata\n" + + " [-q ] # quiet, exit with success or failure\n" + + " [-s ] # database to verify\n" + + " [-v ] # progress notification interval\n" + + " [-bs ] # how many records to check each batch\n" + + " [-d ] # delay in ms between batches\n" + + " [-vdr] # verify data records (read LNs)\n" + + " [-V] # print JE version number"; + + File envHome = null; + Environment env; + String dbName = null; + + private VerifyConfig verifyConfig = new VerifyConfig(); + + /** + * The main used by the DbVerify utility. + * + * @param argv The arguments accepted by the DbVerify utility. + * + *
        +     * usage: java { com.sleepycat.je.util.DbVerify | -jar
        +     * je-<version>.jar DbVerify }
        +     *             [-q] [-V] -s database -h dbEnvHome [-v progressInterval]
        +     *             [-bs batchSize] [-d delayMs] [-vdr]
        +     * 
        + * + *

        + * -V - show the version of the JE library.
        + * -s - specify the database to verify
        + * -h - specify the environment directory
        + * -q - work quietly and don't display errors
        + * -v - report intermediate statistics every progressInterval Leaf Nodes
        + * -bs - specify how many records to check each batch
        + * -d - specify the delay in ms between batches
        + * -vdr - verify data records (read LNs)
        + *

        + * + *

        Note that the DbVerify command line cannot be used to verify the + * integrity of secondary databases, because this feature requires the + * secondary databases to have been opened by the application. To verify + * secondary database integrity, use {@link Environment#verify} or + * {@link com.sleepycat.je.Database#verify} instead, from within the + * application.

        + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public static void main(String argv[]) + throws DatabaseException { + + DbVerify verifier = new DbVerify(); + verifier.parseArgs(argv); + + boolean ret = false; + try { + verifier.openEnv(); + ret = verifier.verify(System.err); + verifier.closeEnv(); + } catch (Throwable T) { + T.printStackTrace(System.err); + } + + /* + * Show the status, only omit if the user asked for a quiet run and + * didn't specify a progress interval, in which case we can assume + * that they really don't want any status output. + * + * If the user runs this from the command line, presumably they'd + * like to see the status. + */ + if (verifier.verifyConfig.getPrintInfo() || + (verifier.verifyConfig.getShowProgressInterval() > 0)) { + System.err.println("Exit status = " + ret); + } + + System.exit(ret ? 0 : -1); + } + + DbVerify() { + } + + /** + * Creates a DbVerify object for a specific environment and database. + * + * @param env The Environment containing the database to verify. + * + * @param dbName The name of the database to verify. + * + * @param quiet true if the verification should not produce errors to the + * output stream + * + * @deprecated as of 7.5, use {@link Environment#verify} or + * {@link com.sleepycat.je.Database#verify} instead. These methods allow + * specifying all {@link VerifyConfig} properties. + */ + public DbVerify(Environment env, + String dbName, + boolean quiet) { + this.env = env; + this.dbName = dbName; + verifyConfig.setPrintInfo(!quiet); + } + + void printUsage(String msg) { + System.err.println(msg); + System.err.println(usageString); + System.exit(-1); + } + + void parseArgs(String argv[]) { + verifyConfig.setPrintInfo(true); + verifyConfig.setBatchDelay(0, TimeUnit.MILLISECONDS); + + int argc = 0; + int nArgs = argv.length; + while (argc < nArgs) { + String thisArg = argv[argc++]; + if (thisArg.equals("-q")) { + verifyConfig.setPrintInfo(false); + } else if (thisArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else if (thisArg.equals("-h")) { + if (argc < nArgs) { + envHome = new File(argv[argc++]); + } else { + printUsage("-h requires an argument"); + } + } else if (thisArg.equals("-s")) { + if (argc < nArgs) { + dbName = argv[argc++]; + } else { + printUsage("-s requires an argument"); + } + } else if (thisArg.equals("-v")) { + if (argc < nArgs) { + int progressInterval = Integer.parseInt(argv[argc++]); + if (progressInterval <= 0) { + printUsage("-v requires a positive argument"); + } + verifyConfig.setShowProgressInterval(progressInterval); + } else { + printUsage("-v requires an argument"); + } + } else if (thisArg.equals("-bs")) { + if (argc < nArgs) { + int batchSize = Integer.parseInt(argv[argc++]); + if (batchSize <= 0) { + printUsage("-bs requires a positive argument"); + } + verifyConfig.setBatchSize(batchSize); + } else { + printUsage("-bs requires an argument"); + } + } else if (thisArg.equals("-d")) { + if (argc < nArgs) { + long delayMs = Long.parseLong(argv[argc++]); + if (delayMs < 0) { + printUsage("-d requires a positive argument"); + } + verifyConfig.setBatchDelay(delayMs, TimeUnit.MILLISECONDS); + } else { + printUsage("-d requires an argument"); + } + } else if (thisArg.equals("-vdr")) { + verifyConfig.setVerifyDataRecords(true); + } else if (thisArg.equals("-vor")) { + verifyConfig.setVerifyObsoleteRecords(true); + } + } + + if (envHome == null) { + printUsage("-h is a required argument"); + } + } + + void openEnv() + throws Exception { + + if (env == null) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + env = new Environment(envHome, envConfig); + } + } + + void closeEnv() { + try { + if (env != null) { + env.close(); + } + } finally { + env = null; + } + } + + /** + * Verifies a database and write errors found to a stream. + * + * @param out The stream to write errors to. + * + * @return true if the verification found no errors. + * Currently true is always returned when this method returns normally, + * i.e., when no exception is thrown. + * + * @throws EnvironmentFailureException if a corruption is detected, or if + * an unexpected, internal or environment-wide failure occurs. If a + * persistent corruption is detected, + * {@link EnvironmentFailureException#isCorrupted()} will return true. + * + * @deprecated as of 7.5, use {@link Environment#verify} or + * {@link com.sleepycat.je.Database#verify} instead. These methods allow + * specifying all {@link VerifyConfig} properties. + */ + public boolean verify(PrintStream out) + throws DatabaseException { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final BtreeVerifier verifier = new BtreeVerifier(envImpl); + verifyConfig.setShowProgressStream(out); + verifier.setBtreeVerifyConfig(verifyConfig); + + if (dbName == null) { + verifier.verifyAll(); + } else { + /* Get DB ID from name. */ + BasicLocker locker = + BasicLocker.createBasicLocker(envImpl, false /*noWait*/); + final DbTree dbTree = envImpl.getDbTree(); + DatabaseImpl dbImpl = null; + DatabaseId dbId; + + try { + dbImpl = dbTree.getDb(locker, dbName, null, false); + if (dbImpl == null) { + return true; + } + dbId = dbImpl.getId(); + } finally { + dbTree.releaseDb(dbImpl); + locker.operationEnd(); + } + + verifier.verifyDatabase(dbName, dbId); + } + + return true; + } +} diff --git a/src/com/sleepycat/je/util/DbVerifyLog.java b/src/com/sleepycat/je/util/DbVerifyLog.java new file mode 100644 index 0000000..6ad7938 --- /dev/null +++ b/src/com/sleepycat/je/util/DbVerifyLog.java @@ -0,0 +1,351 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.util.verify.VerifierUtils; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PropUtil; + +/** + * Verifies the checksums in one or more log files. + * + *

        This class may be instantiated and used programmatically, or used as a + * command line utility as described below.

        + * + *
        + * usage: java { com.sleepycat.je.util.DbVerifyLog |
        + *               -jar je-<version>.jar DbVerifyLog }
        + *  [-h <dir>]      # environment home directory
        + *  [-s <file>]     # starting (minimum) file number
        + *  [-e <file>]     # ending (one past the maximum) file number
        + *  [-d <millis>]   # delay in ms between reads (default is zero)
        + *  [-V]                  # print JE version number"
        + * 
        + * + *

        All arguments are optional. The current directory is used if {@code -h} + * is not specified. File numbers may be specified in hex (preceded by {@code + * 0x}) or decimal format. For convenience when copy/pasting from other + * output, LSN format (<file>/<offset>) is also allowed.

        + */ +public class DbVerifyLog { + + private static final String USAGE = + "usage: " + CmdUtil.getJavaCommand(DbVerifyLog.class) + "\n" + + " [-h ] # environment home directory\n" + + " [-s ] # starting (minimum) file number\n" + + " [-e ] # ending (one past the maximum) file number\n" + + " [-d ] # delay in ms between reads (default is zero)\n" + + " [-V] # print JE version number"; + + private final EnvironmentImpl envImpl; + private final int readBufferSize; + private volatile boolean stopVerify = false; + + private long delayMs = 0; + + /** + * Creates a utility object for verifying the checksums in log files. + * + *

        The read buffer size is {@link + * EnvironmentConfig#LOG_ITERATOR_READ_SIZE}.

        + * + * @param env the {@code Environment} associated with the log. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public DbVerifyLog(final Environment env) { + this(env, 0); + } + + /** + * Creates a utility object for verifying log files. + * + * @param env the {@code Environment} associated with the log. + * + * @param readBufferSize is the buffer size to use. If a value less than + * or equal to zero is specified, {@link + * EnvironmentConfig#LOG_ITERATOR_READ_SIZE} is used. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public DbVerifyLog(final Environment env, final int readBufferSize) { + this(DbInternal.getNonNullEnvImpl(env), readBufferSize); + } + + /** + * @hidden + */ + public DbVerifyLog(final EnvironmentImpl envImpl, + final int readBufferSize) { + this.readBufferSize = (readBufferSize > 0) ? + readBufferSize : + envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + this.envImpl = envImpl; + } + + /** + * Verifies all log files in the environment. + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws IOException if an IOException occurs while reading a log file. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void verifyAll() + throws LogVerificationException, IOException { + + /* The same reason with BtreeVerifier.verifyAll. */ + if (stopVerify) { + return; + } + + LoggerUtils.envLogMsg( + Level.INFO, envImpl, "Start verify of data files"); + + verify(0, Long.MAX_VALUE); + + LoggerUtils.envLogMsg( + Level.INFO, envImpl, "End verify of data files"); + } + + /** + * Verifies the given range of log files in the environment. + * + * @param startFile is the lowest numbered log file to be verified. + * + * @param endFile is one greater than the highest numbered log file to be + * verified. + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws IOException if an IOException occurs while reading a log file. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public void verify(final long startFile, final long endFile) + throws LogVerificationException, IOException { + + try { + final FileManager fileManager = envImpl.getFileManager(); + final File homeDir = envImpl.getEnvironmentHome(); + final String[] fileNames = + fileManager.listFileNames(startFile, endFile - 1); + final ByteBuffer buf = ByteBuffer.allocateDirect(readBufferSize); + + for (final String fileName : fileNames) { + /* + * When env is closed, the current executing dataVerifier task + * should be canceled asap. So when env is closed, + * setStopVerifyFlag() is called in DataVerifier.shutdown(). + * Here stopVerify is checked to determine whether dataVerifier + * task continues. + */ + if (stopVerify) { + return; + } + + final File file = new File(homeDir, fileName); + /* + * If JE enables Cleaner, then it is possible that Cleaner + * deletes one or more files, whose fileNum is between + * startFile and endFile, during the for-loop. So for each + * loop, the current 'file' may be deleted by the Cleaner, then + * 'new FileInputStream' will throw FileNotFoundException. + * + * In addition, now JE has a daemon thread to detect the + * unexpected log file deletion. So if FileNotFoundException is + * caused by unexpected log deletion, then that daemon thread + * will catch this abnormal situation. Here, we just ignore + * this exception. + */ + FileInputStream fis; + try { + fis = new FileInputStream(file); + } catch (FileNotFoundException fne) { + continue; + } + + final FileChannel fic = fis.getChannel(); + final LogVerificationReadableByteChannel vic = + new LogVerificationReadableByteChannel(envImpl, fic, fileName); + + IOException ioe = null; + try { + while (vic.read(buf) != -1) { + buf.clear(); + + /* Return as soon as possible if shutdown. */ + if (stopVerify) { + return; + } + + if (delayMs > 0) { + try { + Thread.sleep(delayMs); + } catch (InterruptedException e) { + throw new ThreadInterruptedException( + envImpl, e); + } + } + } + } catch (IOException e) { + ioe = e; + throw ioe; + } finally { + + try { + /* + * vic.close aims to close associated channel fic, but + * it may be redundant because fis.close also closes fic. + */ + fis.close(); + vic.close(); + } catch (IOException e) { + if (ioe == null) { + throw e; + } + } + } + } + } catch (LogVerificationException lve) { + + VerifierUtils.createMarkerFileFromException( + RestoreRequired.FailureType.LOG_CHECKSUM, + lve, + envImpl, + EnvironmentFailureReason.LOG_CHECKSUM); + + throw lve; + } + } + + public static void main(String[] argv) { + try { + + File envHome = new File("."); + long startFile = 0; + long endFile = Long.MAX_VALUE; + long delayMs = 0; + + for (int whichArg = 0; whichArg < argv.length; whichArg += 1) { + final String nextArg = argv[whichArg]; + if (nextArg.equals("-h")) { + whichArg++; + envHome = new File(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-s")) { + whichArg++; + String arg = CmdUtil.getArg(argv, whichArg); + final int slashOff = arg.indexOf("/"); + if (slashOff >= 0) { + arg = arg.substring(0, slashOff); + } + startFile = CmdUtil.readLongNumber(arg); + } else if (nextArg.equals("-e")) { + whichArg++; + String arg = CmdUtil.getArg(argv, whichArg); + final int slashOff = arg.indexOf("/"); + if (slashOff >= 0) { + arg = arg.substring(0, slashOff); + } + endFile = CmdUtil.readLongNumber(arg); + } else if (nextArg.equals("-d")) { + whichArg++; + delayMs = + CmdUtil.readLongNumber(CmdUtil.getArg(argv, whichArg)); + } else if (nextArg.equals("-V")) { + System.out.println(JEVersion.CURRENT_VERSION); + System.exit(0); + } else { + printUsageAndExit("Unknown argument: " + nextArg); + } + } + + final EnvironmentImpl envImpl = + CmdUtil.makeUtilityEnvironment(envHome, true /*readOnly*/); + final DbVerifyLog verifier = new DbVerifyLog(envImpl, 0); + /* Set the delay time specified by -d flag. */ + verifier.setReadDelay(delayMs, TimeUnit.MILLISECONDS); + verifier.verify(startFile, endFile); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(); + printUsageAndExit(e.toString()); + } + } + + private static void printUsageAndExit(String msg) { + if (msg != null) { + System.err.println(msg); + } + System.err.println(USAGE); + System.exit(1); + } + + /** + * Configures the delay between file reads during verification. A delay + * between reads is needed to allow other JE components, such as HA, to + * make timely progress. + * + *

        By default there is no read delay (it is zero).

        + * + *

        Note that when using the {@link EnvironmentConfig#ENV_RUN_VERIFIER + * background data verifier}, the delay between reads is + * {@link EnvironmentConfig#VERIFY_LOG_READ_DELAY}.

        + * + * @param delay the delay between reads or zero for no delay. + * + * @param unit the {@code TimeUnit} of the delay value. May be + * null only if delay is zero. + */ + public void setReadDelay(long delay, TimeUnit unit) { + delayMs = PropUtil.durationToMillis(delay, unit); + } + + /** + * @hidden + * For internal use only. + */ + public void setStopVerifyFlag(boolean val) { + stopVerify = val; + } +} diff --git a/src/com/sleepycat/je/util/FileHandler.java b/src/com/sleepycat/je/util/FileHandler.java new file mode 100644 index 0000000..0277ab3 --- /dev/null +++ b/src/com/sleepycat/je/util/FileHandler.java @@ -0,0 +1,114 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.IOException; +import java.util.logging.ErrorManager; +import java.util.logging.Formatter; +import java.util.logging.Level; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * JE instances of java.util.logging.Logger are configured to use this + * implementation of java.util.logging.FileHandler. By default, the handler's + * level is {@link Level#INFO} To enable the console output, use the standard + * java.util.logging.LogManager configuration to set the desired level: + *
        + * com.sleepycat.je.util.FileHandler.level=INFO
        + * 
        + *

        + * The default destination for this output is a circular set of files named + * <environmentHome>/je.info.# The logging file size can be configured + * with standard java.util.logging.FileHandler configuration. + *

        + * JE augments the java.util.logging API with a JE environment parameter for + * setting handler levels. This is described in greater detail in + * {@link + * Chapter 12.Administering Berkeley DB Java Edition Applications} + * + * @see + * Chapter 12. Logging + * @see Using JE Trace Logging + */ +public class FileHandler extends java.util.logging.FileHandler { + + /* + * The default ErrorManager will blindly write to stderr when it sees an + * exception. For instance, when we send an interrupt() to the Rep Node + * we can see an InterruptedIOException written to stderr, but it never + * gets passed to the caller. For several tests, this causes irrelevant + * stack traces to spew out even though no execption is ever thrown at us. + * e.g. + * + * ------------- Standard Error ----------------- + * java.util.logging.ErrorManager: 2 + * java.io.InterruptedIOException + * at java.io.FileOutputStream.writeBytes(Native Method) + * at java.io.FileOutputStream.write(FileOutputStream.java:260) + * at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:65) + * at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:123) + * at java.util.logging.FileHandler$MeteredStream.flush(FileHandler.java:143) + * at sun.nio.cs.StreamEncoder.implFlush(StreamEncoder.java:278) + * at sun.nio.cs.StreamEncoder.flush(StreamEncoder.java:122) + * at java.io.OutputStreamWriter.flush(OutputStreamWriter.java:212) at java.util.logging.StreamHandler.flush(StreamHandler.java:225) at java.util.logging.FileHandler.publish(FileHandler.java:556) + * at com.sleepycat.je.utilint.FileRedirectHandler.publish(FileRedirectHandler.java:54) + * at java.util.logging.Logger.log(Logger.java:458) + * at java.util.logging.Logger.doLog(Logger.java:480) + * at java.util.logging.Logger.log(Logger.java:503) + * at com.sleepycat.je.utilint.LoggerUtils.logMsg(LoggerUtils.java:343) + * at com.sleepycat.je.utilint.LoggerUtils.info(LoggerUtils.java:395) + * at com.sleepycat.je.rep.impl.node.FeederManager.runFeeders(FeederManager.java:449) + * at com.sleepycat.je.rep.impl.node.RepNode.run(RepNode.java:1198) + */ + public static boolean STIFLE_DEFAULT_ERROR_MANAGER = false; + + /* + * Using a JE specific handler lets us enable and disable output for the + * entire library, and specify an environment specific format and level + * default. + */ + public FileHandler(String pattern, + int limit, + int count, + Formatter formatter, + EnvironmentImpl envImpl) + throws SecurityException, IOException { + + super(pattern, limit, count, true /* append */); + + ErrorManager em = new ErrorManager() { + public void error(String msg, Exception e, int code) { + if (STIFLE_DEFAULT_ERROR_MANAGER) { + System.out.println + ("FileHandler stifled exception: " + e); + } else { + super.error(msg, e, code); + } + } + }; + setErrorManager(em); + + /* Messages may be formatted with an environment specific tag. */ + setFormatter(formatter); + + Level level = LoggerUtils.getHandlerLevel + (envImpl.getConfigManager(), EnvironmentParams.JE_FILE_LEVEL, + getClass().getName() + ".level"); + + setLevel(level); + } +} diff --git a/src/com/sleepycat/je/util/LogVerificationException.java b/src/com/sleepycat/je/util/LogVerificationException.java new file mode 100644 index 0000000..c923290 --- /dev/null +++ b/src/com/sleepycat/je/util/LogVerificationException.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.IOException; + +/** + * Thrown during log verification if a checksum cannot be verified or a log + * entry is determined to be invalid by examining its contents. + * + *

        This class extends {@code IOException} so that it can be thrown by the + * {@code InputStream} methods of {@link LogVerificationInputStream}.

        + */ +public class LogVerificationException extends IOException { + private static final long serialVersionUID = 1L; + + public LogVerificationException(final String message) { + super(message); + } + + public LogVerificationException(final String message, + final Throwable cause) { + super(message); + initCause(cause); + } +} diff --git a/src/com/sleepycat/je/util/LogVerificationInputStream.java b/src/com/sleepycat/je/util/LogVerificationInputStream.java new file mode 100644 index 0000000..f9192de --- /dev/null +++ b/src/com/sleepycat/je/util/LogVerificationInputStream.java @@ -0,0 +1,273 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.IOException; +import java.io.InputStream; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LogVerifier; + +/** + * Verifies the checksums in an {@code InputStream} for a log file in a JE + * {@code Environment}. + * + *

        This {@code InputStream} reads input from some other given {@code + * InputStream}, and verifies checksums while reading. Its primary intended + * use is to verify log files that are being copied as part of a programmatic + * backup. It is critical that invalid files are not added to a backup set, + * since then both the live environment and the backup will be invalid.

        + * + *

        The following example verifies log files as they are being copied. The + * {@link DbBackup} class should normally be used to obtain the array of files + * to be copied.

        + * + * + * + *
        + *  void copyFiles(final Environment env,
        + *                 final String[] fileNames,
        + *                 final File destDir,
        + *                 final int bufSize)
        + *      throws IOException, DatabaseException {
        + *
        + *      final File srcDir = env.getHome();
        + *
        + *      for (final String fileName : fileNames) {
        + *
        + *          final File destFile = new File(destDir, fileName);
        + *          final FileOutputStream fos = new FileOutputStream(destFile);
        + *
        + *          final File srcFile = new File(srcDir, fileName);
        + *          final FileInputStream fis = new FileInputStream(srcFile);
        + *          final LogVerificationInputStream vis =
        + *              new LogVerificationInputStream(env, fis, fileName);
        + *
        + *          final byte[] buf = new byte[bufSize];
        + *
        + *          try {
        + *              while (true) {
        + *                  final int len = vis.read(buf);
        + *                  if (len < 0) {
        + *                      break;
        + *                  }
        + *                  fos.write(buf, 0, len);
        + *              }
        + *          } finally {
        + *              fos.close();
        + *              vis.close();
        + *          }
        + *      }
        + *  }
        + * 
        + * + *

        It is important to read the entire underlying input stream until the + * end-of-file is reached to detect incomplete entries at the end of the log + * file.

        + * + *

        Note that {@code mark} and {@code reset} are not supported and {@code + * markSupported} returns false. The default {@link InputStream} + * implementation of these methods is used.

        + * + * @see DbBackup + * @see DbVerifyLog + */ +public class LogVerificationInputStream extends InputStream { + + private static final int SKIP_BUF_SIZE = 2048; + + private final InputStream in; + private final LogVerifier verifier; + private byte[] skipBuf; + + /** + * Creates a verification input stream. + * + * @param env the {@code Environment} associated with the log. + * + * @param in the underlying {@code InputStream} for the log to be read. + * + * @param fileName the file name of the input stream, for reporting in the + * {@code LogVerificationException}. This should be a simple file name of + * the form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + public LogVerificationInputStream(final Environment env, + final InputStream in, + final String fileName) { + this(DbInternal.getNonNullEnvImpl(env), in, fileName, -1L); + } + + /** + * Internal constructor. If fileNum is less than zero, it is derived from + * fileName. + */ + LogVerificationInputStream(final EnvironmentImpl envImpl, + final InputStream in, + final String fileName, + final long fileNum) { + verifier = new LogVerifier(envImpl, fileName, fileNum); + this.in = in; + } + + /** + * {@inheritDoc} + * + *

        This method reads the underlying {@code InputStream} and verifies the + * contents of the stream.

        + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public int read() + throws IOException { + + /* + * This method will rarely, if ever, be called when reading a file, so + * allocating a new byte array is not a performance issue and is the + * simplest approach. + */ + final byte[] b = new byte[1]; + final int lenRead = read(b, 0, 1); + return (lenRead <= 0) ? lenRead : (b[0] & 0xff); + } + + /** + * {@inheritDoc} + * + *

        This method reads the underlying {@code InputStream} and verifies the + * contents of the stream.

        + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public int read(final byte b[]) + throws IOException { + + return read(b, 0, b.length); + } + + /** + * {@inheritDoc} + * + *

        This method reads the underlying {@code InputStream} and verifies the + * contents of the stream.

        + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public int read(final byte b[], final int off, final int len) + throws IOException { + + final int lenRead = in.read(b, off, len); + if (lenRead <= 0) { + if (lenRead < 0) { + verifier.verifyAtEof(); + } + return lenRead; + } + + verifier.verify(b, off, lenRead); + + return lenRead; + } + + /** + * {@inheritDoc} + * + *

        This method reads the underlying {@code InputStream} in order to + * skip the required number of bytes and verifies the contents of the + * stream. A temporary buffer is allocated lazily for reading.

        + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + */ + @Override + public long skip(final long bytesToSkip) + throws IOException { + + if (bytesToSkip <= 0) { + return 0; + } + + /* + * Like InputStream.skip, we lazily allocate a skip buffer. We must + * read the data in order to validate the checksum. Unlike the + * InputStream.skip implementation, we cannot use a static buffer + * because we do process the data and cannot allow multiple threads to + * share the same buffer. + */ + if (skipBuf == null) { + skipBuf = new byte[SKIP_BUF_SIZE]; + } + + long remaining = bytesToSkip; + while (remaining > 0) { + final int lenRead = read + (skipBuf, 0, (int) Math.min(SKIP_BUF_SIZE, remaining)); + if (lenRead < 0) { + break; + } + remaining -= lenRead; + } + + return bytesToSkip - remaining; + } + + /** + * {@inheritDoc} + * + *

        This method simply performs in.available(). + */ + @Override + public int available() + throws IOException { + + return in.available(); + } + + /** + * {@inheritDoc} + * + *

        This method simply performs {@code in.close()}. + */ + @Override + public void close() + throws IOException { + + in.close(); + } +} diff --git a/src/com/sleepycat/je/util/LogVerificationReadableByteChannel.java b/src/com/sleepycat/je/util/LogVerificationReadableByteChannel.java new file mode 100644 index 0000000..ec71f70 --- /dev/null +++ b/src/com/sleepycat/je/util/LogVerificationReadableByteChannel.java @@ -0,0 +1,216 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.LogVerifier; + +/** + * Verifies the checksums in a {@link ReadableByteChannel} for a log file in a + * JE {@link Environment}. This class is similar to the {@link + * LogVerificationInputStream} class, but permits using NIO channels and direct + * buffers to provide better copying performance. + * + *

        This {@code ReadableByteChannel} reads input from some other given {@code + * ReadableByteChannel}, and verifies checksums while reading. Its primary + * intended use is to verify log files that are being copied as part of a + * programmatic backup. It is critical that invalid files are not added to a + * backup set, since then both the live environment and the backup will be + * invalid. + * + *

        The following example verifies log files as they are being copied. The + * {@link DbBackup} class should normally be used to obtain the array of files + * to be copied. + * + * + * + *

        + *  void copyFilesNIO(final Environment env,
        + *                    final String[] fileNames,
        + *                    final File destDir,
        + *                    final int bufSize)
        + *      throws IOException, DatabaseException {
        + *
        + *      final File srcDir = env.getHome();
        + *
        + *      for (final String fileName : fileNames) {
        + *
        + *          final File destFile = new File(destDir, fileName);
        + *          final FileOutputStream fos = new FileOutputStream(destFile);
        + *          final FileChannel foc = fos.getChannel();
        + *
        + *          final File srcFile = new File(srcDir, fileName);
        + *          final FileInputStream fis = new FileInputStream(srcFile);
        + *          final FileChannel fic = fis.getChannel();
        + *          final LogVerificationReadableByteChannel vic =
        + *              new LogVerificationReadableByteChannel(env, fic, fileName);
        + *
        + *          final ByteBuffer buf = ByteBuffer.allocateDirect(bufSize);
        + *
        + *          try {
        + *              while (true) {
        + *                  final int len = vic.read(buf);
        + *                  if (len < 0) {
        + *                      break;
        + *                  }
        + *                  buf.flip();
        + *                  foc.write(buf);
        + *                  buf.clear();
        + *              }
        + *          } finally {
        + *              fos.close();
        + *              vic.close();
        + *          }
        + *      }
        + *  }
        + * 
        + * + *

        It is important to read the entire underlying input stream until the + * end-of-file is reached to detect incomplete entries at the end of the log + * file. + * + * @see DbBackup + * @see DbVerifyLog + * @see LogVerificationInputStream + */ +public class LogVerificationReadableByteChannel + implements ReadableByteChannel { + + private static final int TEMP_SIZE = 8192; + private final ReadableByteChannel channel; + private final LogVerifier verifier; + private byte[] tempArray; + + /** + * Creates a verification input stream. + * + * @param env the {@code Environment} associated with the log + * + * @param channel the underlying {@code ReadableByteChannel} for the log to + * be read + * + * @param fileName the file name of the input stream, for reporting in the + * {@code LogVerificationException}. This should be a simple file name of + * the form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public LogVerificationReadableByteChannel( + final Environment env, + final ReadableByteChannel channel, + final String fileName) { + + this(DbInternal.getNonNullEnvImpl(env), channel, fileName); + } + + /** + * Creates a verification input stream. + * + * @param envImpl the {@code EnvironmentImpl} associated with the log + * + * @param channel the underlying {@code ReadableByteChannel} for the log to + * be read + * + * @param fileName the file name of the input stream, for reporting in the + * {@code LogVerificationException}. This should be a simple file name of + * the form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @hidden + */ + public LogVerificationReadableByteChannel( + final EnvironmentImpl envImpl, + final ReadableByteChannel channel, + final String fileName) { + + this.channel = channel; + verifier = new LogVerifier(envImpl, fileName); + } + + /** + * {@inheritDoc} + * + *

        This method reads the underlying {@code ReadableByteChannel} and + * verifies the contents of the stream. + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + @Override + public synchronized int read(final ByteBuffer buffer) + throws IOException { + + final int start = buffer.position(); + final int count = channel.read(buffer); + if (count < 0) { + verifier.verifyAtEof(); + } else { + if (buffer.hasArray()) { + verifier.verify(buffer.array(), buffer.arrayOffset() + start, + count); + } else { + if (tempArray == null) { + tempArray = new byte[TEMP_SIZE]; + } + buffer.position(start); + int len = count; + while (len > 0) { + final int chunk = Math.min(len, TEMP_SIZE); + buffer.get(tempArray, 0, chunk); + verifier.verify(tempArray, 0, chunk); + len -= chunk; + } + } + } + return count; + } + + /** + * {@inheritDoc} + * + *

        This method calls {@code close} on the underlying channel. + */ + @Override + synchronized public void close() + throws IOException { + + channel.close(); + } + + /** + * {@inheritDoc} + * + *

        This method calls {@code isOpen} on the underlying channel. + */ + @Override + public boolean isOpen() { + return channel.isOpen(); + } +} diff --git a/src/com/sleepycat/je/util/Splitter.java b/src/com/sleepycat/je/util/Splitter.java new file mode 100644 index 0000000..ba020f0 --- /dev/null +++ b/src/com/sleepycat/je/util/Splitter.java @@ -0,0 +1,115 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.util.ArrayList; +import java.util.List; + +/** + * Splitter is used to split a string based on a delimiter. + * Support includes double quoted strings, and the escape character. + * Raw tokens are returned that include the double quotes, white space, + * and escape characters. + * + */ +public class Splitter { + private static final char QUOTECHAR = '"'; + private static final char ESCAPECHAR = '\\'; + private final char delimiter; + private final List tokens = new ArrayList(); + private enum StateType {COLLECT, COLLECTANY, QUOTE}; + private StateType prevState; + private StateType state; + private int startIndex; + private int curIndex; + private String row; + + public Splitter(char delimiter) { + this.delimiter = delimiter; + } + + public String[] tokenize(String inrow) { + row = inrow; + state = StateType.COLLECT; + tokens.clear(); + startIndex = 0; + curIndex = 0; + for (int cur = 0; cur < row.length(); cur++) { + char c = row.charAt(cur); + switch (state) { + case COLLECT : + if (isDelimiter(c)) { + outputToken(); + startIndex = cur + 1; + curIndex = startIndex; + } else { + if (isQuote(c) && isQuoteState()) { + state = StateType.QUOTE; + } else if (isEscape(c)) { + prevState = state; + state = StateType.COLLECTANY; + } + curIndex++; + } + break; + case COLLECTANY: + curIndex++; + state = prevState; + break; + case QUOTE: + if (isEscape(c)) { + prevState = state; + state = StateType.COLLECTANY; + } else if (isQuote(c)) { + state = StateType.COLLECT; + } + curIndex++; + break; + } + } + outputToken(); + String[] retvals = new String[tokens.size()]; + tokens.toArray(retvals); + return retvals; + } + + private boolean isQuote(char c) { + return (c == QUOTECHAR) ? true : false; + } + + private boolean isEscape(char c) { + return (c == ESCAPECHAR) ? true : false; + } + + private boolean isDelimiter(char c) { + return (c == delimiter) ? true : false; + } + + private void outputToken() { + if (startIndex < curIndex) { + tokens.add(row.substring(startIndex, curIndex)); + } else { + tokens.add(""); + } + } + + private boolean isQuoteState() { + for (int i = startIndex; i < curIndex; i++) { + if (!Character.isWhitespace(row.charAt(i))) { + return false; + } + } + return true; + } +} diff --git a/src/com/sleepycat/je/util/package.html b/src/com/sleepycat/je/util/package.html new file mode 100644 index 0000000..15514f1 --- /dev/null +++ b/src/com/sleepycat/je/util/package.html @@ -0,0 +1,25 @@ + + + + + + +Supporting utilities. + +

        Package Specification

        +This package provides support for activities like +loading and dumping data. Most utilities can be used as a command line +tool or called programmatically. + +@see [Getting Started Guide] + + + diff --git a/src/com/sleepycat/je/util/verify/BtreeVerifier.java b/src/com/sleepycat/je/util/verify/BtreeVerifier.java new file mode 100644 index 0000000..63f7035 --- /dev/null +++ b/src/com/sleepycat/je/util/verify/BtreeVerifier.java @@ -0,0 +1,2095 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util.verify; + +import java.io.File; +import java.io.PrintStream; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.SecondaryAssociation; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryIntegrityException; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.cleaner.UtilizationProfile; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.TreeWalkerStatsAccumulator; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.StatsAccumulator; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.utilint.StringUtils; + +public class BtreeVerifier { + + private final static LockType LOCKTYPE_NOLOCK = LockType.NONE; + private final static ReadOptions NOLOCK_UNCHANGED = new ReadOptions(); + private final static ReadOptions READLOCK_UNCHANGED = new ReadOptions(); + + static { + NOLOCK_UNCHANGED.setCacheMode(CacheMode.UNCHANGED); + NOLOCK_UNCHANGED.setLockMode(LockMode.READ_UNCOMMITTED); + + READLOCK_UNCHANGED.setCacheMode(CacheMode.UNCHANGED); + READLOCK_UNCHANGED.setLockMode(LockMode.DEFAULT); + } + + private final EnvironmentImpl envImpl; + private final FileManager fileManager; + private final LogManager logManager; + private final DbConfigManager configMgr; + private final Logger logger; + private final UtilizationProfile up; + private final FileSizeCache fsCache; + private final ObsoleteOffsetsCache ooCache; + + private volatile boolean stopVerify = false; + private VerifyConfig btreeVerifyConfig = new VerifyConfig(); + + public static TestHook databaseOperBeforeBatchCheckHook; + public static TestHook databaseOperDuringBatchCheckHook; + + /** + * Creates a BtreeVerifier object for Btree verification. + */ + public BtreeVerifier(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + this.fileManager = envImpl.getFileManager(); + this.configMgr = envImpl.getConfigManager(); + this.logManager = envImpl.getLogManager(); + this.logger = envImpl.getLogger(); + this.up = envImpl.getUtilizationProfile(); + this.fsCache = createFileSizeCache(); + this.ooCache = new ObsoleteOffsetsCache(); + } + + /** + * Verifies all databases in the environment, including idDatabase and + * nameDatabase. + */ + public void verifyAll() + throws DatabaseException { + + /* + * This aims to guarantee that only if DataVerifier.shutdown is + * called, then BtreeVerifier will do nothing, including not + * verifying the nameDatabase and mapDatabase. + * + * Without this, the following interleaving may appear. The premise + * is that DataVerifier.shutdown is called immediately after + * DataVerifier is created. + * + * T1 Timer + * verifyTask is created + * verifyTask is scheduled + * DataVerifier.shutdown is called + * verifyTask.cancel() + * set stop verify flag + * timer.cancel() + * check 'task == null || !task.isRunning' + * Return true because !task.isRunning + * + * Due to some reason, although + * verifyTask.cancel() and + * timer.cancel() is called, + * verifyTask can still execute + * once. So DataVerifier.shutdown + * does not achieve its target. + * After we add the following code, even if verifyTask can execute, + * it will do nothing. BtreeVerifier and DbVerifyLog will just return + * because now we have already set the stop flag to be true. + */ + if (stopVerify) { + return; + } + + DbTree dbTree = envImpl.getDbTree(); + + final PrintStream out = + (btreeVerifyConfig.getShowProgressStream() != null) ? + btreeVerifyConfig.getShowProgressStream() : System.err; + + final String startMsg = "Start verify all databases"; + final String stopMsg = "End verify all databases"; + + if (btreeVerifyConfig.getPrintInfo()) { + out.println(startMsg); + } + LoggerUtils.envLogMsg(Level.INFO, envImpl, startMsg); + + try { + /* Verify NameDb and MappingDb. */ + verifyOneDb( + DbType.ID.getInternalName(), DbTree.ID_DB_ID, out, + true /*verifyAll*/); + + verifyOneDb( + DbType.NAME.getInternalName(), DbTree.NAME_DB_ID, out, + true /*verifyAll*/); + + /* + * Verify all the remaining databases. + * + * Get a cursor db on the naming tree. The cursor is used to get + * the name for logging, as well as the ID of each DB. Each DB + * is verified by batch, e.g. verifying 1000 records each time. So + * for each batch, the DB ID will be used to get real-time + * DatabaseImpl. If the databaseImpl is valid, i.e. not null and + * not deleted, then the next batch of records will be verified. + * + * This aims to leave a window where the DatabaseImpl is not in use + * between batches, to allow db truncate/remove operations to run. + */ + class Traversal implements CursorImpl.WithCursor { + + public boolean withCursor( + CursorImpl cursor, + @SuppressWarnings("unused") DatabaseEntry key, + @SuppressWarnings("unused") DatabaseEntry data) + throws DatabaseException { + + if (stopVerify) { + return false; + } + + final NameLN nameLN = + (NameLN) cursor.lockAndGetCurrentLN(LOCKTYPE_NOLOCK); + + if (nameLN != null && !nameLN.isDeleted()) { + + final DatabaseId dbId = nameLN.getId(); + + final String dbName = + StringUtils.fromUTF8(key.getData()); + + verifyOneDb(dbName, dbId, out, true /*verifyAll*/); + } + return true; + } + } + + Traversal traversal = new Traversal(); + + CursorImpl.traverseDbWithCursor( + dbTree.getNameDatabaseImpl(), LOCKTYPE_NOLOCK, + true /*allowEviction*/, traversal); + + } finally { + if (btreeVerifyConfig.getPrintInfo()) { + out.println(stopMsg); + } + LoggerUtils.envLogMsg(Level.INFO, envImpl, stopMsg); + } + } + + /** + * Verify one database. + */ + public BtreeStats verifyDatabase(String dbName, DatabaseId dbId) { + + PrintStream out = btreeVerifyConfig.getShowProgressStream(); + if (out == null) { + out = System.err; + } + + return verifyOneDb(dbName, dbId, out, false /*verifyAll*/); + } + + /** + * Verify one database, a batch at a time. + * + * @param verifyAll if true, we won't log INFO messages for every database + * to avoid cluttering the trace log. + */ + private BtreeStats verifyOneDb( + String dbName, + DatabaseId dbId, + PrintStream out, + boolean verifyAll) { + + final String startMsg = "Start verify database: " + dbName; + final String stopMsg = "End verify database: " + dbName; + + if (btreeVerifyConfig.getPrintInfo()) { + out.println(startMsg); + } + if (!verifyAll) { + LoggerUtils.envLogMsg(Level.INFO, envImpl, startMsg); + } + + try { + final int batchSize = btreeVerifyConfig.getBatchSize(); + final long batchDelay = + btreeVerifyConfig.getBatchDelay(TimeUnit.MILLISECONDS); + + /* + * The accumulated information for this database. + */ + final VerifierStatsAccumulator statsAcc = + new VerifierStatsAccumulator( + out, btreeVerifyConfig.getShowProgressInterval()); + + /* Check whether this DatabaseImpl is primary or secondary db. */ + envImpl.checkOpen(); + DbTree dbTree = envImpl.getDbTree(); + DatabaseImpl dbImpl = dbTree.getDb(dbId); + + boolean isSecondaryDb = false; + SecondaryDatabase secDb = null; + Database priDb = null; + try { + if (dbImpl == null || dbImpl.isDeleted()) { + return new BtreeStats(); + } + + Set referringHandles = dbImpl.getReferringHandles(); + for (Database db : referringHandles) { + priDb = db; + if (db instanceof SecondaryDatabase) { + isSecondaryDb = true; + secDb = (SecondaryDatabase) db; + priDb = null; + break; + } + } + } finally { + dbTree.releaseDb(dbImpl); + } + + DatabaseEntry lastKey = null; + DatabaseEntry lastData = null; + + while (true) { + envImpl.checkOpen(); + dbTree = envImpl.getDbTree(); + dbImpl = dbTree.getDb(dbId); + + try { + if (stopVerify) { + break; + } + + if (dbImpl == null || dbImpl.isDeleted()) { + break; + } + + if (databaseOperBeforeBatchCheckHook != null) { + if (priDb != null) { + databaseOperBeforeBatchCheckHook.doHook(priDb); + } else { + databaseOperBeforeBatchCheckHook.doHook(secDb); + } + } + + WalkDatabaseTreeResult result = walkDatabaseTree( + dbImpl, isSecondaryDb, priDb, secDb, statsAcc, + lastKey, lastData, batchSize); + + if (result.noMoreRecords) { + break; + } + + lastKey = result.lastKey; + lastData = result.lastData; + } finally { + dbTree.releaseDb(dbImpl); + } + + if (batchDelay > 0) { + try { + Thread.sleep(batchDelay); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(envImpl, e); + } + } + } + + final BtreeStats stats = new BtreeStats(); + stats.setDbImplStats(statsAcc.getStats()); + + if (btreeVerifyConfig.getPrintInfo()) { + /* + * Intentionally use print, not println, because + * stats.toString() puts in a newline too. + */ + out.print(stats); + } + + return stats; + + } catch (BtreeVerificationException bve) { + /* + * A persistent corruption is detected due to the btree + * corruption, or a checksum exception was encountered when + * trying to read the entry from disk to determine whether + * the corruption is persistent. + */ + if (bve.getCause() instanceof ChecksumException) { + /* + * When a checksum exception occurs during processing of a + * Btree corruption, the checksum error should override, + * because it means that the log entry on disk is probably + * meaningless. In other words, this is really a media + * corruption, not a corruption caused by a bug. + */ + throw VerifierUtils.createMarkerFileFromException( + RestoreRequired.FailureType.LOG_CHECKSUM, + bve.getCause(), + envImpl, + EnvironmentFailureReason.LOG_CHECKSUM); + } else { + throw VerifierUtils.createMarkerFileFromException( + RestoreRequired.FailureType.BTREE_CORRUPTION, + bve, + envImpl, + EnvironmentFailureReason.BTREE_CORRUPTION); + } + } finally { + if (btreeVerifyConfig.getPrintInfo()) { + out.println(stopMsg); + } + if (!verifyAll) { + LoggerUtils.envLogMsg(Level.INFO, envImpl, stopMsg); + } + } + } + + /* + * This method is called in StatsAccumulator.verifyNode, which means that + * this method will execute every time it encounters one upperIN or BIN. + * + * In this method, only the basic structure issue of IN and the dangling + * LSN issue for upperIN are checked. The dangling LSN issue for BIN + * and other features verification, e.g. VERIFY_SECONDARIES, + * VERIFY_DATA_RECORDS and VERIFY_OBSOLETE_RECORDS, are checked when + * the cursor positions at each slot. + */ + private void basicBtreeVerify(Node node) { + /* + * When accessing upper IN, shared latch is used most of the time. It + * is OK to hold this latch longer than usual (because it is shared). + * So the dangling LSN issue for all slots of this upperIN can be + * checked without releasing the latch. + */ + if (node.isUpperIN()) { + verifyDanglingLSNAndObsoleteRecordsAllSlots(node); + } + + /* + * For upperIN and BIN, their basic structure is checked here. This may + * also hold the latch for a long time. + */ + verifyCommonStructure(node); + } + + /* + * Possible basic structure may contain: + * 1. keyPrefix + * 2. inMemorySize + * 3. parent IN + * 4. ordered Keys + * 5. identifier Key and so on. + * + * On 1, the keyPrefix cannot be re-calculated from the full keys here, + * since the full keys are not stored in the IN. We could get the full key + * from the LNs, but this would be very slow. + * + * On 2, the inMemorySize may be slightly inaccurate, and this would not be + * considered corruption. It is recalculated during checkpoints to account + * for errors. + * + * For 3, we should verify that the node's parent is correct, i.e. the + * parent should have a slot that refers to the child using the correct + * key. But this has already been done in the current code: + * There are three places to call IN.accumulateStats, i.e. calling + * acc.processIN: + * 1. Tree.getNextIN + * 2. Tree.search + * 3. Tree.searchSubTree + * + * At these places, before calling IN.accumulateStats, the current + * code uses latchChildShared or latchChild to check whether the + * parent is right when holding the parent latch and child latch. + * + * For 4 and 5, we can check for corruption here. + * For 4, whole keys need to be obtained using IN.getKey. + * For 5, user's comparator function needs to be called if exists. + */ + private void verifyCommonStructure(Node node) { + assert node.isIN(); + IN in = (IN) node; + + verifyOrderedKeys(in); + verifyIdentifierKey(in); + } + + /* + * Here we can not get DatabaseImpl from IN, because the IN may be + * read directly from file. + */ + private int verifyOrderedKeysInternal(IN in, DatabaseImpl dbImpl) { + Comparator userCompareToFcn = dbImpl.getKeyComparator(); + + for (int i = 1; i < in.getNEntries(); i++) { + byte[] key1 = in.getKey(i); + byte[] key2 = in.getKey(i - 1); + + int s = Key.compareKeys(key1, key2, userCompareToFcn); + if (s <= 0) { + return i; + } + } + return 0; + } + + private void verifyOrderedKeys(IN in) { + DatabaseImpl dbImpl = in.getDatabase(); + final int corruptIndex = verifyOrderedKeysInternal(in, dbImpl); + if (corruptIndex == 0) { + return; + } + + final Pair targetLsns = getTargetLsns(in); + + /* For security/privacy, we cannot output keys. */ + final String label = "IN keys are out of order. "; + final String msg1 = label + + in.toSafeString(corruptIndex - 1, corruptIndex); + + IN inFromFile = getINFromFile(targetLsns, dbImpl, msg1); + + try { + final int newCorruptIndex = + verifyOrderedKeysInternal(inFromFile, dbImpl); + + if (newCorruptIndex == 0) { + throw EnvironmentFailureException.unexpectedState( + envImpl, transientMsg(msg1)); + } else { + final String msg2 = label + + inFromFile.toSafeString( + newCorruptIndex - 1, newCorruptIndex); + + throw new BtreeVerificationException(persistentMsg(msg2)); + } + } finally { + inFromFile.releaseLatchIfOwner(); + } + } + + private void verifyIdentifierKey(IN in) { + + DatabaseImpl dbImpl = in.getDatabase(); + if (verifyIdentifierKeyInternal(in, dbImpl)) { + return; + } + + final Pair targetLsns = getTargetLsns(in); + + /* For security/privacy, we cannot output keys. */ + final String label = "IdentifierKey not present in any slot. "; + final String msg1 = label + in.toSafeString(null); + + IN inFromFile = getINFromFile(targetLsns, dbImpl, msg1); + + try { + if (verifyIdentifierKeyInternal(inFromFile, dbImpl)) { + throw EnvironmentFailureException.unexpectedState( + envImpl, transientMsg(msg1)); + } else { + final String msg2 = label + inFromFile.toSafeString(null); + + throw new BtreeVerificationException(persistentMsg(msg2)); + } + } finally { + inFromFile.releaseLatchIfOwner(); + } + } + + private boolean verifyIdentifierKeyInternal(IN in, DatabaseImpl dbImpl) { + + /* + * This check can only be done for full BIN, not upperIn and BIN-delta. + * Besides, if the slot number is 0, then we may also not check this. + */ + if (in.isUpperIN() || in.isBINDelta() || in.getNEntries() == 0) { + return true; + } + + byte[] identifierKey = in.getIdentifierKey(); + if (identifierKey == null) { + return false; + } + + /* + * There are two problematic cases about identifierKey which are caused + * by some errors in previous code: + * + * (1). The identifierKey is a prefix key due to the DupConvert bug. + * + * When reading log files written by JE 4.1 or earlier, the + * identifier key may be incorrect because DupConvert did not + * convert it correctly. DupConvert converts the identifier key to + * a prefix key, so it will not match the complete key in any slot. + * + * We should probably fix DupConvert. But even if we fix it now, + * it won't help users of JE 5.0 and above who have already upgraded + * from JE 4.1 or earlier, because DupConvert is only used when + * reading log files written by JE 4.1 or earlier. + * + * This issue seems harmless, at least no user reports errors caused + * by it. So we can choose to ignore this issue. Normally, we can + * identify this issue by checking the end of the key for the + * PREFIX_ONLY value. But unfortunately this will also ignore + * identifier keys that happen to have the PREFIX_ONLY value at the + * end of a complete key(in the user's data). + * + * Considering the following second issue, we choose to not check + * identifierKey for environments who is initially created with + * LogEntryType.LOG_VERSION being LT 15, where 15 is just the new + * log version of JE after we fix the following second issue. + * + * (2). The identifierKey is not in any slot due to the BIN-delta + * mutation bug. + * + * The fullBIN identifierKey may have changed when reconstituteBIN + * called BIN.compress. The previous code forgot to reset it. Now + * we fix this by reseting the identifier in BIN.mutateToFullBIN. + * + * For the problematic identifierKey which is caused by the + * BIN-delta mutation bug, we do not have good methods to correct + * them. We can only detect them. + * + * The problem with detecting them is that we know it is incorrect + * in past releases, but even when it is incorrect, we don't know + * the impact on the app in a particular case. It is possible that + * the app is working OK, even though the identifier key is + * incorrect. So if we detect it and the app stops working + * (because we invalidate the env) then we may be making things + * worse for the app -- this may not be what the user wants. + * + * So combing above (1) and (2), we need to add a way to know the + * earliest log version of the env. Then we can only validate the + * identifierKey when this version is >= 15, where 15 is just the new + * log version of JE after we fix (2). See DbTree.initialLogVersion + * and LogEntryType.LOG_VERSION. + */ + if (envImpl.getDbTree().getInitialLogVersion() < 15) { + return true; + } + + Comparator userCompareToFcn = dbImpl.getKeyComparator(); + + for (int i = 0; i < in.getNEntries(); i++) { + byte[] key = in.getKey(i); + if (Key.compareKeys(identifierKey, key, userCompareToFcn) == 0) { + return true; + } + } + + return false; + } + + /* + * For upperIN, we verify all the slots at one time. + * + * Note that for upperINs, we only need to verify dangling LSN issue + * and basic structure issue. The former is checked here and basic + * structure issue is checked in following verifyCommonStructure. + */ + private void verifyDanglingLSNAndObsoleteRecordsAllSlots(Node node) { + assert node.isUpperIN(); + IN in = (IN) node; + for (int i = 0; i < in.getNEntries(); i++) { + verifyDanglingLSNAndObsoleteRecordsOneSlot(i, in, false /*isBin*/); + } + } + + private void verifyDanglingLSNAndObsoleteRecordsOneSlot( + int index, + IN in, + boolean isBin) { + + /* If the slot of BIN is defunct, then just return. */ + if (isBin && ((BIN) in).isDefunct(index)) { + return; + } + + verifyDanglingLSN(index, in, isBin); + verifyObsoleteRecords(index, in, isBin); + } + + /* + * Verify the dangling LSN issue for each slot of BIN or IN. + */ + private void verifyDanglingLSN(int index, IN in, boolean isBin) { + + /* + * If the environment is opened with setting LOG_MEMORY_ONLY be + * true, there will be no log files. We just ignore it. + */ + if (envImpl.isMemOnly()) { + return; + } + + DatabaseImpl dbImpl = in.getDatabase(); + + DanglingLSNCheckResult result = + verifyDanglingLSNInternal(index, in, isBin, dbImpl); + + if (result.problematicIndex < 0) { + return; + } + + final Pair targetLsns = getTargetLsns(in); + + /* For security/privacy, we cannot output keys. */ + final String label = "LSN is invalid. "; + final String msg1 = + label + result.getReason() + + in.toSafeString(result.problematicIndex); + + IN inFromFile = getINFromFile(targetLsns, dbImpl, msg1); + + try { + boolean findAgain = false; + for (int i = 0; i < inFromFile.getNEntries(); i++) { + result = + verifyDanglingLSNInternal(i, inFromFile, isBin, dbImpl); + if (result.problematicIndex >= 0) { + findAgain = true; + break; + } + } + + if (!findAgain) { + throw EnvironmentFailureException.unexpectedState( + envImpl, transientMsg(msg1)); + } else { + final String msg2 = + label + result.getReason() + + inFromFile.toSafeString(result.problematicIndex); + + throw new BtreeVerificationException(persistentMsg(msg2)); + } + } finally { + inFromFile.releaseLatchIfOwner(); + } + } + + private DanglingLSNCheckResult verifyDanglingLSNInternal( + int index, + IN in, + boolean isBin, + DatabaseImpl databaseImpl) { + + /* + * For BIN, if the database has duplicates or the the LN is an + * embedded LN, or the slot is deleted, we do not check the + * dangling LSN issue. + */ + if (isBin && + (in.isEmbeddedLN(index) || databaseImpl.getSortedDuplicates() || + databaseImpl.isLNImmediatelyObsolete() || + ((BIN) in).isDefunct(index))) { + return DanglingLSNCheckResult.NO_DANGLING_LSN; + } + + final long curLsn = in.getLsn(index); + if (DbLsn.isTransientOrNull(curLsn)) { + return DanglingLSNCheckResult.NO_DANGLING_LSN; + } + final long fileNum = DbLsn.getFileNumber(curLsn); + final long fileOffset = DbLsn.getFileOffset(curLsn); + + /* + * Check whether the corresponding file exist and whether the + * LSN's offset is less than the file's length. + */ + final int lastLoggedSize = in.getLastLoggedSize(index); + final FileSizeInfo fsInfo = getFileSize(fileNum); + if (fileOffset + lastLoggedSize > fsInfo.size) { + if (fsInfo.size == -1) { + return new DanglingLSNCheckResult(index, true, fsInfo); + } + return new DanglingLSNCheckResult(index, false, fsInfo); + } + + return DanglingLSNCheckResult.NO_DANGLING_LSN; + } + + private static class DanglingLSNCheckResult { + + private static final DanglingLSNCheckResult NO_DANGLING_LSN = + new DanglingLSNCheckResult(-1, true, null); + + /* + * -1 means that no dangling LSN issue exists. An integer which + * is gte 0 shows that location of the problematic slot. + */ + int problematicIndex; + + /* + * True means the issue is because the file does not exist. False + * means that the issue is because the log entry exceeds the end + * of the file. + */ + boolean fileNotExist; + FileSizeInfo fsInfo; + + DanglingLSNCheckResult( + int problematicIndex, + boolean fileNotExist, + FileSizeInfo fsInfo) { + this.problematicIndex = problematicIndex; + this.fileNotExist = fileNotExist; + this.fsInfo = fsInfo; + } + + String getReason() { + return (fileNotExist ? "File does not exist. " : + "Offset[+lastLoggerSize] exceeds the end of the file. ") + + "fileSize=" + fsInfo.size + ". " + fsInfo.getReason(); + } + } + + private static class FileSizeInfo { + boolean sizeFromLastFile; + + /* + * True if the file size was previously in the FileSizeCache, + * false if it is calculated and added to the cache. + */ + boolean sizeFromCache; + int size; + + FileSizeInfo( + boolean sizeFromLastFile, + boolean sizeFromCache, + int size) { + this.sizeFromLastFile = sizeFromLastFile; + this.sizeFromCache = sizeFromCache; + this.size = size; + } + + String getReason() { + return (sizeFromLastFile ? "File size from last file" : + (sizeFromCache ? "File size previously cached" : + "File size added to cache")) + ". "; + } + } + + /** + * @return if the FileSizeInfo.size is gte 0, then it means that + * the file does exist. + */ + private FileSizeInfo getFileSize(long fileNum) { + /* + * The last file is a special case, because its totalSize is changing + * and this file in the FileSummary is not volatile. For the last file + * we can use getNextLsn to get the fileNum and offset of the last + * file. + */ + long nextLsn = fileManager.getNextLsn(); + if (fileNum == DbLsn.getFileNumber(nextLsn)) { + return new FileSizeInfo( + true, false, (int) DbLsn.getFileOffset(nextLsn)); + } else { + Pair result = fsCache.getFileSize(fileNum); + return new FileSizeInfo(false, result.first(), result.second()); + } + } + + private interface FileSizeCache { + + /** + * @return {wasCached, size} + */ + Pair getFileSize(long fileNum); + } + + private FileSizeCache createFileSizeCache() { + + /* + * Currently we don't use the UtilizationProfile for getting file + * sizes because testing has shown it is inaccurate. This needs + * further debugging. + */ + final boolean USE_UP = false; + if (USE_UP) { + return new UPFileSizeCache(); + } else { + return new DirectFileSizeCache(); + } + } + + /** + * Used to get file sizes directly from the File class. + */ + private class DirectFileSizeCache implements FileSizeCache { + + private final Map cache; + + DirectFileSizeCache() { + cache = new HashMap<>(); + } + + @Override + public Pair getFileSize(long fileNum) { + + Integer size = cache.get(fileNum); + if (size != null) { + return new Pair<>(true, size); + } + + final File file = new File(fileManager.getFullFileName(fileNum)); + size = (int) file.length(); + cache.put(fileNum, size); + return new Pair<>(false, size); + } + } + + /* + * Use a map to cache the file total size info. + * 1. First call UtilizationProfile.getFileSizeSummaryMap to get the + * initial copy info. + * 2. When a file is not present in the cached map, call + * UtilizationProfile.getFileSize to get it and add its total size + * to the cached map. + * 3. The last file is a special case, because its totalSize is changing + * and this file in the FileSummary is not volatile. For the last file, + * we handle it in getFileSize, i.e. using getNextLsn to get the fileNum + * and offset of the last file. + */ + private class UPFileSizeCache implements FileSizeCache { + + final SortedMap fileSizeSummaryMap; + + UPFileSizeCache() { + fileSizeSummaryMap = up.getFileSizeSummaryMap(); + } + + @Override + public Pair getFileSize(long fileNum) { + + if (fileSizeSummaryMap.containsKey(fileNum)) { + return new Pair<>(true, fileSizeSummaryMap.get(fileNum)); + } + + int size = up.getFileSize(fileNum); + if (size != -1) { + fileSizeSummaryMap.put(fileNum, size); + } + return new Pair<>(false, size); + } + } + + /* + * Verify the obsolete records issue for each slot of BIN or IN. + */ + private void verifyObsoleteRecords(int index, IN in, boolean isBin) { + if (!btreeVerifyConfig.getVerifyObsoleteRecords()) { + return; + } + + final DatabaseImpl databaseImpl = in.getDatabase(); + /* + * For BIN, if the database is duplicate or the the LN is + * embedded LN, we do not check the dangling LSN issue. + */ + if (isBin && + (in.isEmbeddedLN(index) || databaseImpl.getSortedDuplicates() || + databaseImpl.isLNImmediatelyObsolete())) { + return; + } + + final long curLsn = in.getLsn(index); + final long fileNum = DbLsn.getFileNumber(curLsn); + + /* + * TODO: How to check the corruption is persistent? + * For dangling LSN, we can read the latest written entry from the + * log. Although the CRUD operations may cause some slots of the + * read log entry to be obsolete, for normal case, the file + * containing these slots should have not been deleted. [Is this + * right?]. So checking the logged entry is rational. + * + * But for checking obsolete records, the slots of the read log + * entry, at the current time point, can really locate at the + * obsolete offsets. Then is it still rational to re-check the + * read log entry?. The answer is true. + * + * If an IN slot has an LSN that is obsolete, and that slot was + * added or change recently and has not been flushed to disk, + * then the corruption is not persistent. So re-fetching the IN from + * disk is needed only to see if the LSN is persistently present + * in the slot. + */ + final long[] offsets = ooCache.getOffsets(fileNum); + + /* + * If the active lsn exists in the obsolete lsn offsets, throw + * EFE.unexpectedException. + */ + if (Arrays.binarySearch(offsets, DbLsn.getFileOffset(curLsn)) >= 0) { + throw new EnvironmentFailureException( + envImpl, + EnvironmentFailureReason.UNEXPECTED_EXCEPTION_FATAL, + "Active lsn is obsolete: " + DbLsn.getNoFormatString(curLsn) + + in.toSafeString(index)); + } + } + + /* + * Similar to FileSummaryCache but holds obsolete LSN offsets. + * + * This cache may contain outdated information, since LSNs may become + * obsolete during the verification process, and the cache is not updated. + * This is OK because: + * - an obsolete LSN can never become active again, and + * - there is no requirement to detect corruption that occurs during the + * scan. + */ + private class ObsoleteOffsetsCache { + final SortedMap obsoleteOffsetsMap; + + ObsoleteOffsetsCache() { + obsoleteOffsetsMap = new TreeMap<>(); + } + + long[] getOffsets(long fileNum) { + if (obsoleteOffsetsMap.containsKey(fileNum)) { + return obsoleteOffsetsMap.get(fileNum); + } + + long[] offsets = up.getObsoleteDetailSorted(fileNum); + obsoleteOffsetsMap.put(fileNum, offsets); + return offsets; + } + } + + private String persistentMsg(String msg) { + return "Btree corruption was detected and is persistent. Re-opening " + + "the Environment is not possible without restoring from backup " + + " or from another node. " + msg; + } + + private String transientMsg(String msg) { + return "Btree corruption was detected in memory, but does not appear" + + "to be persistent. Re-opening the Environment may be possible. " + + msg; + } + + private Pair getTargetLsns(IN in) { + long targetLsn1; + long targetLsn2 = DbLsn.NULL_LSN; + if (in.isUpperIN()) { + targetLsn1 = in.getLastFullLsn(); + targetLsn2 = DbLsn.NULL_LSN; + } else { + BIN bin = (BIN) in; + long lastDeltaVersion = bin.getLastDeltaLsn(); + if (lastDeltaVersion == DbLsn.NULL_LSN) { + /* + * The most recently written logrec for this BIN instance + * is full BIN. + */ + targetLsn1 = bin.getLastFullLsn(); + } else { + /* + * The most recently written logrec for this BIN instance + * is BIN-delta. + */ + targetLsn1 = lastDeltaVersion; + targetLsn2 = bin.getLastFullLsn(); + } + } + return new Pair<>(targetLsn1, targetLsn2); + } + + /* + * When detecting btree corruption, we want to directly read the related + * BIN, or BIN-delta, or both from the log file to confirm whether the + * corruption is persistent. + * + * @return latched IN. + */ + private IN getINFromFile( + final Pair targetLsns, + final DatabaseImpl dbImpl, + final String msg) { + + WholeEntry entry; + WholeEntry optionalFullBinEntry = null; + + /* Read the entry directly from log */ + try { + entry = logManager.getLogEntryDirectFromFile(targetLsns.first()); + if (targetLsns.second() != DbLsn.NULL_LSN) { + optionalFullBinEntry = + logManager.getLogEntryDirectFromFile(targetLsns.second()); + } + if (entry == null && optionalFullBinEntry == null) { + throw EnvironmentFailureException.unexpectedState( + envImpl, transientMsg(msg)); + } + } catch (ChecksumException ce) { + throw new BtreeVerificationException(null, ce); + } + + IN inFromFile = null; + if (entry != null) { + inFromFile = (IN) entry.getEntry().getMainItem(); + } + + if (optionalFullBinEntry != null) { + BIN optionalFullBin = + (BIN) optionalFullBinEntry.getEntry().getMainItem(); + if (inFromFile != null) { + ((BIN) inFromFile).reconstituteBIN( + dbImpl, optionalFullBin, false); + } + inFromFile = optionalFullBin; + } + + inFromFile.latchNoUpdateLRU(dbImpl); + return inFromFile; + } + + private static class WalkDatabaseTreeResult { + DatabaseEntry lastKey; + DatabaseEntry lastData; + boolean noMoreRecords; + + private static final WalkDatabaseTreeResult NO_MORE_RECORDS = + new WalkDatabaseTreeResult(null, null, true); + + WalkDatabaseTreeResult( + DatabaseEntry lastKey, + DatabaseEntry lastData, + boolean noMoreRecords) { + + this.lastKey = lastKey; + this.lastData = lastData; + this.noMoreRecords = noMoreRecords; + } + } + + private boolean findFirstRecord( + DatabaseImpl dbImpl, + Cursor cursor, + DatabaseEntry lastKey, + DatabaseEntry lastData) { + + DatabaseEntry usedKey = new DatabaseEntry(); + DatabaseEntry usedData = new DatabaseEntry(); + /* The first record of this db. */ + if (lastKey == null) { + return cursor.get( + usedKey, usedData, Get.FIRST, NOLOCK_UNCHANGED) != null; + } + + /* Find the first record according to (lastKey, lastData). */ + usedKey = new DatabaseEntry( + lastKey.getData(), lastKey.getOffset(), lastKey.getSize()); + usedData = new DatabaseEntry( + lastData.getData(), lastData.getOffset(), lastData.getSize()); + + boolean isDuplicated = dbImpl.getSortedDuplicates(); + OperationResult result = null; + if (isDuplicated) { + result = cursor.get( + usedKey, usedData, Get.SEARCH_BOTH_GTE, NOLOCK_UNCHANGED); + if (result != null) { + if (!usedData.equals(lastData)) { + /* Find next dup of lastKey. */ + return true; + } + + /* + * Find lastKey/lastData. Move to the next dup of lastKey or + * move to the first dup of next key. + */ + return cursor.get( + usedKey, usedData, Get.NEXT, NOLOCK_UNCHANGED) != null; + } else { + result = cursor.get( + usedKey, usedData, Get.SEARCH_GTE, NOLOCK_UNCHANGED); + if (result == null) { + /* No more records. */ + return false; + } + + if (!usedKey.equals(lastKey)) { + /* Find the first dup of next key. */ + return true; + } + + /* + * Find the first dup of lastKey. Skip over dups of lastKey + * to the first dup of next key. This may miss "phantoms" but + * that is OK -- see comments 26 and 28 in [#25960]. + */ + return cursor.get( + usedKey, usedData, Get.NEXT_NO_DUP, + NOLOCK_UNCHANGED) != null; + } + } else { + result = cursor.get( + usedKey, usedData, Get.SEARCH_GTE, NOLOCK_UNCHANGED); + if (result == null) { + /* No more records. */ + return false; + } + + if (!usedKey.equals(lastKey)) { + /* Find next key. */ + return true; + } + + /* Find lastKey. Move to next key. */ + return cursor.get( + usedKey, usedData, Get.NEXT, NOLOCK_UNCHANGED) != null; + } + } + + /** + * Verify one batch of records for the given DB. + */ + private WalkDatabaseTreeResult walkDatabaseTree( + DatabaseImpl dbImpl, + boolean isSecondaryDb, + Database priDb, + SecondaryDatabase secDb, + TreeWalkerStatsAccumulator statsAcc, + DatabaseEntry lastKey, + DatabaseEntry lastData, + int batchSize) { + + /* Traverse the database. */ + Tree tree = dbImpl.getTree(); + EnvironmentImpl.incThreadLocalReferenceCount(); + final Locker locker = + LockerFactory.getInternalReadOperationLocker(envImpl); + Cursor cursor = DbInternal.makeCursor(dbImpl, locker, null, false); + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + cursorImpl.setTreeStatsAccumulator(statsAcc); + tree.setTreeStatsAccumulator(statsAcc); + + /* + * Use local caching to reduce DbTree.getDb overhead. Do not call + * releaseDb after getDb with the dbCache, since the entire dbCache + * will be released at the end of this method. + */ + final Map dbCache = new HashMap<>(); + + try { + /* + * Four parts need to be checked: basic, index, primary record and + * obsolete. 'basic' and 'obsolete' are checked for each slot + * for both secondary db and primary db, and they do not need + * the data portion. + * + * Data portion is needed only for the following two situations: + * 1. Db is secondary and index needs to be checked + * 2. Db is primary, verifySecondaries and verifyDataRecords are + * both true. + * + * Actually, now we have the following combinations: + * verifySecondaries/verifyDataRecords Meaning + * + * No No Do not read the primary LN. + * Do not verify any secondaries. + * + * Yes No Do not read the primary LN. + * Check that the secondary records refer to + * existing primary records. + * + * No Yes Read the LN as a basic check. + * Do not verify any secondaries. + * + * Yes Yes Read the LN as a basic check. + * Check that the secondary records refer to + * existing primary records. + * Check that primary records refer to + * existing secondary records. + * + * According to above combinations, only when verifySecondaries + * and verifyDataRecords are both true, for a primary database, + * we will check that primary records refer to existing secondary + * records. + * + * But only if verifyDataRecords is true, for a primary database, + * we need to check that the primary LN is valid, i.e. we need + * to read data portion. This is why we do not use + * verifyPrimaryDataRecords to replace (!isSecondaryDb && + * btreeVerifyConfig.getVerifyDataRecords()) when determining + * whether we need to read the data portion. + */ + boolean verifyPrimaryDataRecords = + priDb != null && + btreeVerifyConfig.getVerifySecondaries() && + btreeVerifyConfig.getVerifyDataRecords(); + boolean verifySecondary = + isSecondaryDb && + btreeVerifyConfig.getVerifySecondaries(); + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + if (!(verifySecondary || + (priDb != null && btreeVerifyConfig.getVerifyDataRecords()))) { + + foundData.setPartial(0, 0, true); + } + + /* Whether the first record for this round check exists. */ + if (!findFirstRecord(dbImpl, cursor, lastKey, lastData)) { + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + + /* + * The previous readPrimaryAfterGet implementation has a problem + * when used in btree verification: it cannot detect + * corruption when secDirtyRead is true and the primary record + * is NOT_FOUND. In this situation, we don't have any locks, + * so we don't know the true current state of either the primary or + * secondary record. + * + * Therefore, for the index verification, we need to lock the + * secondary first. And then use non-blocking lock to lock + * primary record to avoid deadlock. If we cannot lock the primary + * record, we can just skip the verification. + * + * If verifyPrimaryDataRecords is true, we will first get the + * record without acquiring a lock in this method and then try + * to acquire a Read lock in verifyPrimaryData. So in + * walkDatabaseTee we use READLOCK_UNCHANGED only when + * verifySecondary is true. + */ + int recordCount = 0; + while (++recordCount <= batchSize) { + + /* Stop the verification process asap. */ + if (stopVerify) { + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + + try { + /* + * <1> For primary database: + * 1. The cursor.get(CURRENT, NEXT) used in this method + * all use lockMode NOLOCK_UNCHANGED. So there will not + * be a LockConflictException. + * 2. The (foundKey, foundData) will not be used in this + * method, so it is OK that their data array is null. + * The cursor.get(CURRENT, NEXT) in this method only aims + * to locate position. Note that, although we may verify + * primary record data, we will do that in + * verifyPrimaryData. + * + * <2> For secondary database (NOT verify secondary): the + * same with primary database. + * + * <3> For secondary database (verify secondary), a + * simple approach is problematic: + * 1. Before verifying the secondary record, we first need + * to READ lock the secondary record. So + * LockConflictException may be thrown. + * 2. The (foundKey, foundData) will be used to find the + * corresponding primary record. So foundData (priKey) can + * not be null. + * 3. We need to use nonSticky==true to avoid a deadlock + * when calling cursor.get(NEXT). But if cursor.get(NEXT) + * cannot succeed due to LockConflictException or + * something else, the cursorImpl will be reset, i.e. its + * previous location will be lost. This is not what we + * expect. + * + * The solution: + * 1. Use nonSticky==true + * 2. Use LockMode.READ_UNCOMMITTED when doing Get.NEXT. + * This can resolve 3 above. + * Because Get.NEXT will not acquire lock, if more + * records exist, Get.NEXT can always succeed, + * i.e. Get.NEXT can move to next record. So + * 'nonSticky==true' will not cause the cursorImpl to + * move to an invalid position. + * 3. Use Get.CURRENT with LockMode.DEFAULT to + * lock the record and read the record. + * This can resolve 1 above. + * This will acquire a READ lock on the record. + * 4. If Get.CURRENT in (3) returns null, i.e. the record + * may have been deleted, then we will throw an internal + * exception to cause the cursor to move to next slot. + * This will resolve 2 above. + */ + if (!isSecondaryDb || !verifySecondary) { + if (recordCount == 1) { + cursor.get( + foundKey, foundData, Get.CURRENT, + NOLOCK_UNCHANGED); + } + } else { + OperationResult result = cursor.get( + foundKey, foundData, Get.CURRENT, + READLOCK_UNCHANGED); + if (result == null) { + throw new MoveToNextRecordException(); + } + } + + /* + * Note that if we change this code to set nonSticky to be + * false for the cursor, then Get.NEXT will create a new + * CursorImpl, and we must refresh the CursorImpl variable. + */ + cursorImpl.latchBIN(); + BIN bin = cursorImpl.getBIN(); + try { + verifyDanglingLSNAndObsoleteRecordsOneSlot( + cursorImpl.getIndex(), bin, true); + } finally { + cursorImpl.releaseBIN(); + } + + if (databaseOperDuringBatchCheckHook != null) { + if (priDb != null) { + databaseOperDuringBatchCheckHook.doHook(priDb); + } else { + databaseOperDuringBatchCheckHook.doHook(secDb); + } + } + + /* + * When verifying index or foreign constraint, we + * first READ-lock the secondary record and then try + * to non-blocking READ-lock the primary record. Using + * non-blocking is to avoid deadlocks, since we are locking + * in the reverse of the usual order. + * + * If the non-blocking lock fails with + * LockNotAvailableException, we will not be able to detect + * corruption and we should ignore this exception and + * continue verification. In this case the primary record + * is write-locked and is being modified by another thread, + * so it is OK to skip this verification step in this case. + * This is a compromise. + */ + if (verifySecondary) { + + /* + * When isCorrupted returns true we should stop + * verifying this db, just like when + * SecondaryIntegrityException is thrown. + */ + if (DbInternal.isCorrupted(secDb)) { + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + + /* For secondary database, check index integrity. */ + verifyIndex( + dbImpl, secDb, cursor, foundKey, foundData); + + /* For secondary database, check foreign constraint. */ + verifyForeignConstraint( + secDb, cursor, foundKey, dbCache); + } + + /* For a primary database, verify data. */ + if (verifyPrimaryDataRecords) { + verifyPrimaryData(dbImpl, priDb, cursor, dbCache); + } + + /* + * Even if we do not need the data part, for example, for + * a secondary database which does not need to check + * index issue, we may still need the data part to locate + * the first record of next batch. So for the last record + * of this batch, we need to get the data part. + */ + if (recordCount == batchSize - 1) { + foundData = new DatabaseEntry(); + } + + /* + * For the last record of each batch, we should do all + * above check. But we can NOT continue to get NEXT + * record. + */ + if (recordCount == batchSize) { + break; + } + + if (cursor.get( + foundKey, foundData, Get.NEXT, + NOLOCK_UNCHANGED) == null) { + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + + } catch (StopDbVerificationException sve) { + /* + * StopDbVerificationException is thrown when + * 1. In verifyIndex, a SecondaryIntegrityException, which + * is caused by index corruption, or a + * IllegalStateException, which is caused by accessing + * the closed primary database, is caught. + * 2. In verifyForeignConstraint, the DatabaseImpl of the + * foreign database can not be gotten or the + * corresponding foreign record does not exist. + * For both situations, we must stop verification of this + * db, but we should allow verification of other dbs to + * continue. + * + * No warning message needs to be logged here. For SIE, + * a message has already been logged when throwing SIE at + * the lower level. + */ + + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + + } catch (LockConflictException|MoveToNextRecordException e) { + /* + * LockConflictException can be thrown by + * Cursor.get(CURRENT) with READLOCK_UNCHANGED, which + * could be due to a normal timeout. Just move the cursor + * to next record. + * + * MoveToNextRecordException indicates that + * cursor.get(CURRENT) returns null because the record has + * been deleted. Just move the cursor to next record. + * + * These two exceptions should not prevent verification of + * other records in the same db, so we simply ignore it. + * + * If the cursor.get(NEXT, NOLOCK_UNCHANGED) here catches + * an exception, which will not be LockConflictException + * because NOLOCK_UNCHANGED is used, this is an unknown + * and unexpected exception, we just handle it in the same + * way as the following RuntimeException. + * + * TODO: A verification statistic is needed to find out + * how many times this happens. This should be returned + * and logged at the end of verification. + */ + try { + if (cursor.get( + foundKey, foundData, Get.NEXT, + NOLOCK_UNCHANGED) == null) { + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + } catch (RuntimeException re) { + LoggerUtils.logMsg( + logger, envImpl, Level.SEVERE, + "Exception aborted btree verification of db " + + dbImpl.getDebugName() + + ", verification of all dbs will stop. " + e); + + setStopVerifyFlag(true); + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + + } catch (EnvironmentFailureException| + BtreeVerificationException e) { + throw e; + + } catch (RuntimeException e) { + /* + * Consider all other exceptions. e.g. the + * OperationFailureException thrown by cursor.get which + * is not LockConflicExceptionException, to be fatal to + * the entire verification process, since we don't know + * what caused them. + */ + LoggerUtils.logMsg( + logger, envImpl, Level.SEVERE, + "Exception aborted btree verification of db " + + dbImpl.getDebugName() + + ", verification of all dbs will stop. " + e); + + setStopVerifyFlag(true); + return WalkDatabaseTreeResult.NO_MORE_RECORDS; + } + } + + return new WalkDatabaseTreeResult(foundKey, foundData, false); + + } finally { + cursorImpl.setTreeStatsAccumulator(null); + tree.setTreeStatsAccumulator(null); + EnvironmentImpl.decThreadLocalReferenceCount(); + + cursor.close(); + locker.operationEnd(); + + /* Release all cached DBs. */ + envImpl.getDbTree().releaseDbs(dbCache); + } + } + + private void verifyIndex( + final DatabaseImpl dbImpl, + final SecondaryDatabase secDb, + final Cursor cursor, + final DatabaseEntry key, + final DatabaseEntry priKey) + throws StopDbVerificationException { + + assert secDb != null; + + try { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().lockInterruptibly(); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(dbImpl.getEnv(), e); + } + + try { + final SecondaryAssociation secAssoc = + DbInternal.getSecondaryAssociation(secDb); + if (secAssoc.isEmpty()) { + return; + } + + final Database priDb = secAssoc.getPrimary(priKey); + if (priDb == null) { + return; + } + + /* + * We only need to check whether the primary record exists, we + * do not need the data. + */ + final DatabaseEntry priData = new DatabaseEntry(); + priData.setPartial(0, 0, true); + + /* + * Currently the secondary record is locked. In order to avoid + * deadlock, here we use the non-blocking lock. In order to + * release the lock on the primary record, we create a new + * Locker to acquire the lock and release the lock in finally + * block. + */ + final Locker locker = + LockerFactory.getInternalReadOperationLocker(envImpl); + locker.setDefaultNoWait(true); + + try { + /* + * Cursor.readPrimaryAfterGet may return true or false, but for + * both cases, they do NOT indicate index corruption. Only + * throwing SecondaryIntegrityException means index corruption. + */ + DbInternal.readPrimaryAfterGet( + cursor, priDb, key, priKey, priData, LockMode.DEFAULT, + false /*secDirtyRead*/, false /*lockPrimaryOnly*/, + true /*verifyOnly*/, locker, secDb, secAssoc); + } catch (LockNotAvailableException e) { + /* Ignored -- see comment in walkDatabaseTree. */ + } finally { + /* Release primary record lock. */ + locker.operationEnd(); + } + } catch (SecondaryIntegrityException sie) { + /* + * Because currently the primary database is not marked as + * CORRUPT, if we catch SIE here, it indicates that this SIE + * was thrown by Cursor.readPrimaryAfterGet. Log related error + * message here. + */ + LoggerUtils.logMsg( + logger, envImpl, Level.WARNING, + "Secondary corruption is detected during btree " + + "verification. " + sie); + + throw new StopDbVerificationException(); + + } catch (IllegalStateException ise) { + /* + * IllegalStateException is thrown when the primary database, + * which is obtained via SecondaryAssociation.getPrimary, is + * accessed after it is closed. For non-KVS apps, a secondary + * database may only map to one unique primary database, and this + * database will have already been closed. Therefore we just stop + * the verification of the secondary database. In KVS, other primary + * DBs (partitions) may still be open, but stopping verification of + * the index is still acceptable. + */ + throw new StopDbVerificationException(); + } finally { + dbImpl.getEnv().getSecondaryAssociationLock().readLock().unlock(); + } + } + + private void verifyForeignConstraint( + final SecondaryDatabase secDb, + final Cursor cursor, + final DatabaseEntry secKey, + final Map dbCache) + throws StopDbVerificationException { + + assert secDb != null; + + final Database foreignDb = + DbInternal.getPrivateSecondaryConfig(secDb).getForeignKeyDatabase(); + if (foreignDb == null) { + return; + } + + final DatabaseId foreignDbId; + try { + foreignDbId = DbInternal.getDbImpl(foreignDb).getId(); + } catch (IllegalStateException|OperationFailureException e) { + throw new StopDbVerificationException(); + } + + envImpl.checkOpen(); + final DbTree dbTree = envImpl.getDbTree(); + final DatabaseImpl foreignDbImpl = + dbTree.getDb(foreignDbId, -1, dbCache); + + if (foreignDbImpl == null || foreignDbImpl.isDeleted()) { + /* This database is deleted. */ + throw new StopDbVerificationException(); + } + + /* + * We only need to check whether the corresponding record exists + * in the foreign database. + */ + final DatabaseEntry tmpData = new DatabaseEntry(); + tmpData.setPartial(0, 0, true); + + /* Use the non-blocking lock. */ + final Locker locker = + LockerFactory.getInternalReadOperationLocker(envImpl); + locker.setDefaultNoWait(true); + + try (final Cursor foreignCursor = + DbInternal.makeCursor(foreignDbImpl, locker, null, + true/*retainNonTxnLocks*/)) { + + final OperationResult result; + try { + result = foreignCursor.get( + secKey, tmpData, Get.SEARCH, READLOCK_UNCHANGED); + } catch (LockNotAvailableException lnae) { + /* Ignored -- see comment in walkDatabaseTree. */ + return; + } finally { + locker.operationEnd();; + } + + /* + * When a foreign key issue is found, we should first + * generate SecondaryIntegrityException (rather than + * ForeignConstraintException) to set the secondary database as + * corrupt, and then throw StopDbVerificationException to cause + * walkDatabaseTree to stop checking this secondary database. + */ + if (result == null) { + + setSecondaryDbCorrupt( + secDb, + DbInternal.getCursorImpl(cursor).getLocker(), + "Secondary key does not exist in foreign database " + + DbInternal.getDbDebugName(foreignDb), + secKey, + null/*priKey*/, + DbInternal.getCursorImpl(cursor).getExpirationTime()); + + throw new StopDbVerificationException(); + } + } + } + + private void verifyPrimaryData( + final DatabaseImpl dbImpl, + final Database priDb, + final Cursor cursor, + final Map dbCache) { + + assert priDb != null; + + try { + dbImpl.getEnv().getSecondaryAssociationLock(). + readLock().lockInterruptibly(); + } catch (InterruptedException e) { + throw new ThreadInterruptedException(dbImpl.getEnv(), e); + } + + try { + final SecondaryAssociation secAssoc = + DbInternal.getSecondaryAssociation(priDb); + if (secAssoc.isEmpty()) { + return; + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * 1. Read the primary data portion with Read lock. + * 2. If null is returned, this primary record is deleted. We + * just ignore it. + * 3. If non-null is returned, the cursor, which is used by + * walkDatabaseTree, owns a read lock on the primary record. + * 4. If LockConflictException is thrown, then this primary + * record is locked. Just return. + */ + try { + if (cursor.get( + key, data, Get.CURRENT, READLOCK_UNCHANGED) == null) { + return; + } + } catch (LockConflictException e) { + return; + } + + /* + * If checkSecondaryKeysExist cannot find the secondary record, + * it will throw SIE. At that time, the cursor used in + * checkSecondaryKeysExist is not at a meaningful slot, so we get + * the expirationTime of the corresponding primary record here + * and then pass it to checkSecondaryKeysExist. + */ + for (final SecondaryDatabase secDb : secAssoc.getSecondaries(key)) { + /* + * If the primary database is removed from the + * SecondaryAssociation, then we will skip checking any + * secondary database. + * + * Besides, if the primary database is removed from the + * SecondaryAssociation, secAssoc.getPrimary may throw + * exception. + */ + try { + if (secAssoc.getPrimary(key) != priDb ) { + return; + } + } catch (Exception e) { + return; + } + + /* + * If the secondary database is in population phase, it + * may be reasonable that the BtreeVerifier can not find + * the corresponding secondary records for the checked + * primary record, because the primary record has not been + * populated to the secondary database. + */ + if (secDb.isIncrementalPopulationEnabled()) { + continue; + } + + checkSecondaryKeysExist( + priDb, secDb, key, data, dbCache, secAssoc, + DbInternal.getCursorImpl(cursor).getExpirationTime()); + } + } finally { + dbImpl.getEnv().getSecondaryAssociationLock().readLock().unlock(); + } + } + + private void checkSecondaryKeysExist( + final Database priDb, + final SecondaryDatabase secDb, + DatabaseEntry priKey, + DatabaseEntry priData, + final Map dbCache, + final SecondaryAssociation secAssoc, + final long expirationTime) { + + if (DbInternal.isCorrupted(secDb)) { + /* + * If the secondary database is set to be CORRUPT, then we will + * not check this database. Just quick return. + */ + return; + } + + final SecondaryConfig secondaryConfig = + DbInternal.getPrivateSecondaryConfig(secDb); + final SecondaryKeyCreator keyCreator = secondaryConfig.getKeyCreator(); + final SecondaryMultiKeyCreator multiKeyCreator = + secondaryConfig.getMultiKeyCreator(); + + if (keyCreator == null && multiKeyCreator == null) { + assert priDb.getConfig().getReadOnly(); + return; + } + + final DatabaseId secDbId; + try { + secDbId = DbInternal.getDbImpl(secDb).getId(); + } catch (IllegalStateException|OperationFailureException e) { + /* + * We want to continue to check the following primary records, + * so we just return. + */ + return; + } + + envImpl.checkOpen(); + final DbTree dbTree = envImpl.getDbTree(); + final DatabaseImpl secDbImpl = dbTree.getDb(secDbId, -1, dbCache); + + if (secDbImpl == null || secDbImpl.isDeleted()) { + /* + * We want to continue to check the following primary records, + * so we just return. + */ + return; + } + + final String errMsg = + "Secondary is corrupt: the primary record contains a " + + "key that is not present in this secondary database."; + + if (keyCreator != null) { + /* Each primary record may have a single secondary key. */ + assert multiKeyCreator == null; + + DatabaseEntry secKey = new DatabaseEntry(); + if (!keyCreator.createSecondaryKey( + secDb, priKey, priData, secKey)) { + /* This primary record has no secondary keys. */ + return; + } + + checkOneSecondaryKeyExists( + secDb, secDbImpl, priKey, secKey, expirationTime, + errMsg, priDb, secAssoc); + + return; + } + + /* Each primary record may have multiple secondary keys. */ + + /* Get secondary keys. */ + final Set secKeys = new HashSet<>(); + multiKeyCreator.createSecondaryKeys( + secDb, priKey, priData, secKeys); + if (secKeys.isEmpty()) { + /* This primary record has no secondary keys. */ + return; + } + + /* + * Check each secondary key. + */ + for (final DatabaseEntry secKey : secKeys) { + + if (!checkOneSecondaryKeyExists( + secDb, secDbImpl, priKey, secKey, expirationTime, + errMsg, priDb, secAssoc)) { + return; + } + } + } + + private boolean checkOneSecondaryKeyExists( + final SecondaryDatabase secDb, + final DatabaseImpl secDbImpl, + final DatabaseEntry priKey, + final DatabaseEntry secKey, + final long expirationTime, + final String errMsg, + final Database priDb, + final SecondaryAssociation secAssoc) { + + final Locker locker = + LockerFactory.getInternalReadOperationLocker(envImpl); + + try (final Cursor checkCursor = DbInternal.makeCursor( + secDbImpl, locker, null, false/*retainNonTxnLocks*/)) { + + if (checkCursor.get(secKey, priKey, Get.SEARCH_BOTH, + NOLOCK_UNCHANGED) == null) { + + /* Same reason with that in verifyPrimaryData. */ + try { + if (secAssoc.getPrimary(priKey) != priDb || + secDb.isIncrementalPopulationEnabled()) { + return false; + } + } catch (Exception e) { + return false; + } + + /* + * Can not find the corresponding secondary key. + * So an index issue exists. + */ + setSecondaryDbCorrupt( + secDb, locker, errMsg, secKey, priKey, + expirationTime); + + return false; + } + } finally { + locker.operationEnd(); + } + + return true; + } + + private void setSecondaryDbCorrupt( + final SecondaryDatabase secDb, + final Locker locker, + final String errMsg, + final DatabaseEntry secKey, + final DatabaseEntry priKey, + final long expirationTime) { + + if (!DbInternal.isCorrupted(secDb)) { + + final SecondaryIntegrityException sie = + new SecondaryIntegrityException( + secDb, locker, errMsg, DbInternal.getDbDebugName(secDb), + secKey, priKey, expirationTime); + + LoggerUtils.logMsg( + logger, envImpl, Level.WARNING, + "Secondary corruption is detected during btree " + + "verification. " + sie); + } + } + + void setStopVerifyFlag(boolean val) { + stopVerify = val; + } + + public void setBtreeVerifyConfig(VerifyConfig btreeVerifyConfig) { + this.btreeVerifyConfig = btreeVerifyConfig; + } + + private class VerifierStatsAccumulator extends StatsAccumulator { + VerifierStatsAccumulator( + PrintStream progressStream, + int progressInterval) { + super(progressStream, progressInterval); + } + + @Override + public void verifyNode(Node node) { + + /* + * Exceptions thrown by basicBtreeVerify should invalidate the + * env, so we cannot simply log the error and continue here. We + * must allow the exception to be thrown upwards. + */ + basicBtreeVerify(node); + } + } + + /* + * StopDbVerificationException is thrown when + * 1. In verifyIndex, a SecondaryIntegrityException, which + * is caused by index corruption, or a + * IllegalStateException, which is caused by accessing + * the closed primary database, is caught. + * 2. In verifyForeignConstraint, the DatabaseImpl of the + * foreign database cannot be gotten or the + * corresponding foreign record does not exist. + * This exception causes walkDatabaseTree stop checking the + * secondary database. + */ + private static class StopDbVerificationException extends Exception { + private static final long serialVersionUID = 1L; + } + + /* + * Thrown in walkDatabaseTree to indicate that cursor.get(CURRENT) returns + * null because the record has been deleted. Just let the cursor move to + * next record. + */ + private static class MoveToNextRecordException extends Exception { + private static final long serialVersionUID = 1L; + } + + /** + * Thrown during btree verification if a persistent btree corruption is + * detected. + * + * This is an internal exception and ideally it should be a checked + * exception(not a RuntimeException) so that we can confirm statically + * that it is always handled. But this would require changes to + * CursorImpl.WithCursor interface, so for now a runtime exception is used. + */ + private static class BtreeVerificationException extends RuntimeException { + private static final long serialVersionUID = 1L; + + public BtreeVerificationException(final String message) { + super(message); + } + + public BtreeVerificationException( + final String message, + final Throwable cause) { + + super(message); + initCause(cause); + } + } +} diff --git a/src/com/sleepycat/je/util/verify/DataVerifier.java b/src/com/sleepycat/je/util/verify/DataVerifier.java new file mode 100644 index 0000000..7c3c554 --- /dev/null +++ b/src/com/sleepycat/je/util/verify/DataVerifier.java @@ -0,0 +1,311 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util.verify; + +import static com.sleepycat.je.config.EnvironmentParams.ENV_RUN_VERIFIER; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_BTREE; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_BTREE_BATCH_DELAY; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_BTREE_BATCH_SIZE; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_DATA_RECORDS; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_LOG; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_LOG_READ_DELAY; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_MAX_TARDINESS; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_OBSOLETE_RECORDS; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_SCHEDULE; +import static com.sleepycat.je.config.EnvironmentParams.VERIFY_SECONDARIES; + +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.DbVerifyLog; +import com.sleepycat.je.utilint.CronScheduleParser; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.StoppableThread; + +/** + * Periodically perform checksum verification, Btree verification, or both, + * depending on {@link com.sleepycat.je.EnvironmentConfig#VERIFY_LOG} and + * {@link com.sleepycat.je.EnvironmentConfig#VERIFY_BTREE}. + * + * The first-time start time and the period of the verification is determined + * by {@link com.sleepycat.je.EnvironmentConfig#VERIFY_SCHEDULE}. + */ +public class DataVerifier { + private final EnvironmentImpl envImpl; + private final Timer timer; + private VerifyTask verifyTask; + private boolean verifyLog; + private boolean verifyBtree; + private final DbVerifyLog dbLogVerifier; + private final BtreeVerifier dbTreeVerifier; + + private long verifyDelay; + private long verifyInterval; + private String cronSchedule; + + private boolean shutdownRequest = false; + + private final String VERIFIER_SCHEDULE = "test.je.env.verifierSchedule"; + + public DataVerifier(EnvironmentImpl envImpl) { + + this.envImpl = envImpl; + this.timer = new Timer( + envImpl.makeDaemonThreadName( + Environment.DATA_CORRUPTION_VERIFIER_NAME), + true /*isDaemon*/); + dbLogVerifier = new DbVerifyLog(envImpl, 0); + dbTreeVerifier = new BtreeVerifier(envImpl); + } + + /** + * Applies the new configuration, then cancels and reschedules the verify + * task as needed. + */ + public void configVerifyTask(DbConfigManager configMgr) { + + if (!updateConfig(configMgr)) { + return; + } + + synchronized (this) { + if (!shutdownRequest) { + cancel(); + + if (cronSchedule != null) { + verifyTask = new VerifyTask(envImpl); + + /* + * Use Timer.scheduleAtFixedRate to instead of + * Timer.schedule, since this is a long running task and + * the next task is NOT expected to be scheduled for + * 24 hours later, it is expected to be scheduled at + * a fixed time. + */ + timer.scheduleAtFixedRate( + verifyTask, verifyDelay, verifyInterval); + } + } + } + } + + private void cancel() { + if (verifyTask != null) { + verifyTask.cancel(); + } + + /* + * Stop verifier as soon as possible when it is disabled via + * EnvironmentMutableConfig. + */ + dbLogVerifier.setStopVerifyFlag(true); + dbTreeVerifier.setStopVerifyFlag(true); + } + + public void requestShutdown() { + synchronized (this) { + shutdownRequest = true; + cancel(); + timer.cancel(); + } + } + + public void shutdown() { + requestShutdown(); + + final int timeoutMs = 30000; + + final PollCondition cond = new PollCondition(2, timeoutMs) { + @Override + protected boolean condition() { + /* Copy verifyTask since it may change in another thread. */ + final VerifyTask task = verifyTask; + return task == null || !task.isRunning; + } + }; + + if (!cond.await()) { + LoggerUtils.warning( + envImpl.getLogger(), envImpl, + "Unable to shutdown data verifier after " + timeoutMs + "ms"); + } + } + + public long getVerifyDelay() { + return verifyDelay; + } + + public long getVerifyInterval() { + return verifyInterval; + } + + public VerifyTask getVerifyTask() { + return verifyTask; + } + + public String getCronSchedule() { + return cronSchedule; + } + + /** + * Applies the new configuration and returns whether it changed. + */ + private boolean updateConfig(DbConfigManager configMgr) { + + /* + * If set to false (which is not the default). + */ + if (!configMgr.getBoolean(ENV_RUN_VERIFIER)) { + if (cronSchedule == null) { + return false; + } + cronSchedule = null; + verifyDelay = 0; + verifyInterval = 0; + return true; + } else { + String newCronSchedule = configMgr.get(VERIFY_SCHEDULE); + + /* + * If the data verifier schedule is set by system property and + * it is not set through the JE source code explicitly, then + * the system property will be used. + * + * This is used for JE unit test and JE standalone test. + */ + String sysPropVerifySchedule = + System.getProperty(VERIFIER_SCHEDULE); + if (sysPropVerifySchedule != null && + !configMgr.isSpecified(VERIFY_SCHEDULE)) { + newCronSchedule = sysPropVerifySchedule; + } + + if (CronScheduleParser.checkSame(cronSchedule, newCronSchedule)) { + return false; + } + CronScheduleParser csp = new CronScheduleParser(newCronSchedule); + verifyDelay = csp.getDelayTime(); + verifyInterval = csp.getInterval(); + cronSchedule = newCronSchedule; + return true; + } + } + + class VerifyTask extends TimerTask { + private final EnvironmentImpl envImpl; + private volatile boolean isRunning; + + VerifyTask(EnvironmentImpl envImpl) { + this.envImpl = envImpl; + } + + private void updateConfig() { + DbConfigManager configMgr = envImpl.getConfigManager(); + + verifyLog = configMgr.getBoolean(VERIFY_LOG); + verifyBtree = configMgr.getBoolean(VERIFY_BTREE); + + dbLogVerifier.setReadDelay( + configMgr.getDuration(VERIFY_LOG_READ_DELAY), + TimeUnit.MILLISECONDS); + + final VerifyConfig btreeVerifyConfig = new VerifyConfig(); + btreeVerifyConfig.setVerifySecondaries( + configMgr.getBoolean(VERIFY_SECONDARIES)); + btreeVerifyConfig.setVerifyDataRecords( + configMgr.getBoolean(VERIFY_DATA_RECORDS)); + btreeVerifyConfig.setVerifyObsoleteRecords( + configMgr.getBoolean(VERIFY_OBSOLETE_RECORDS)); + btreeVerifyConfig.setBatchSize( + configMgr.getInt(VERIFY_BTREE_BATCH_SIZE)); + btreeVerifyConfig.setBatchDelay( + configMgr.getDuration(VERIFY_BTREE_BATCH_DELAY), + TimeUnit.MILLISECONDS); + dbTreeVerifier.setBtreeVerifyConfig(btreeVerifyConfig); + + /* + * 1. Why call dbTreeVerifier.setVerifyFlag here, rather than + * immediately after call cancel in configVerifyTask? + * If we do that, then maybe before BtreeVerifier really gets + * the stop status of the flag, we set the status of the flag + * to be OK again. Then the previous task can not be stopped + * as soon as possible. + * If we do here, this means that one new task has already + * started, so the previous task has already been stopped. + * 2. Why use synchronized? + * Consider the following scenario: + * shutdown Thread task Thread + * + * !shutdownRequest + * + * shutdownRequest = true + * dbTreeVerifier.setStopVerifyFlag + * + * dbTreeVerifier.setVerifyFlag + */ + synchronized (DataVerifier.this) { + if (!shutdownRequest) { + dbLogVerifier.setStopVerifyFlag(false); + dbTreeVerifier.setStopVerifyFlag(false); + } + } + } + + @Override + public void run() { + /* + * If the scheduled execution time of the scheduled run lags + * very far from the current time due to the long-time current + * verification, the scheduled run is just skipped. + */ + if (System.currentTimeMillis() - scheduledExecutionTime() >= + envImpl.getConfigManager().getDuration(VERIFY_MAX_TARDINESS)) { + return; + } + + isRunning = true; + boolean success = false; + updateConfig(); + try { + if (verifyLog) { + dbLogVerifier.verifyAll(); + } + if (verifyBtree) { + dbTreeVerifier.verifyAll(); + } + success = true; + } catch (EnvironmentFailureException efe) { + /* Do nothing. Just cancel this timer in finally. */ + } catch (Throwable e) { + if (envImpl.isValid()) { + StoppableThread.handleUncaughtException( + envImpl.getLogger(), envImpl, Thread.currentThread(), + e); + } + } finally { + if (!success) { + requestShutdown(); + } + isRunning = false; + } + } + } +} diff --git a/src/com/sleepycat/je/util/verify/VerifierUtils.java b/src/com/sleepycat/je/util/verify/VerifierUtils.java new file mode 100644 index 0000000..a11d58e --- /dev/null +++ b/src/com/sleepycat/je/util/verify/VerifierUtils.java @@ -0,0 +1,92 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.util.verify; + +import java.util.Properties; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.utilint.LoggerUtils; + +public class VerifierUtils { + + private static final String EXCEPTION_KEY = "ex"; + + private static final String RESTORE_REQUIRED_MSG = + "The environment may not be opened due to persistent data " + + "corruption that was detected earlier. The marker file " + + "(7fffffff.jdb) may be deleted to allow recovery, but " + + "this is normally unsafe and not recommended. " + + "Original exception:\n"; + + /** + * Create the restore marker file from Exception origException and return + * an EFE that can be thrown by the caller. The EFE will invalidate the + * environment. + * + * @param failureType the failure type that should be recorded in the + * RestoreRequired log entry. + * @param origException the exception contains the properties that are + * stored to the marker file. + */ + public static EnvironmentFailureException createMarkerFileFromException( + RestoreRequired.FailureType failureType, + Throwable origException, + EnvironmentImpl envImpl, + EnvironmentFailureReason reason) { + + String markerFileError = ""; + + /* + * If env is read-only (for example when using the DbVerify command + * line) we cannot create the marker file, but we should still create + * and return an invalidating EFE indicating persistent corruption. + */ + if (!envImpl.isReadOnly()) { + final Properties props = new Properties(); + + props.setProperty( + EXCEPTION_KEY, origException.toString() + "\n" + + LoggerUtils.getStackTrace(origException)); + + final RestoreMarker restoreMarker = new RestoreMarker( + envImpl.getFileManager(), envImpl.getLogManager()); + + try { + restoreMarker.createMarkerFile(failureType, props); + } catch (RestoreMarker.FileCreationException e) { + markerFileError = " " + e.getMessage(); + } + } + + return new EnvironmentFailureException( + envImpl, + reason, + "Persistent corruption detected: " + origException.toString() + + markerFileError, + origException); + } + + /* + * Get a message referencing the original data corruption exception. + */ + public static String getRestoreRequiredMessage( + RestoreRequired restoreRequired) { + + Properties p = restoreRequired.getProperties(); + return RESTORE_REQUIRED_MSG + p.get(EXCEPTION_KEY); + } +} diff --git a/src/com/sleepycat/je/util/verify/package-info.java b/src/com/sleepycat/je/util/verify/package-info.java new file mode 100644 index 0000000..b8d00b3 --- /dev/null +++ b/src/com/sleepycat/je/util/verify/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Scheduled data verifier and Btree verification. + */ +package com.sleepycat.je.util.verify; diff --git a/src/com/sleepycat/je/utilint/ActiveTxnArrayStat.java b/src/com/sleepycat/je/utilint/ActiveTxnArrayStat.java new file mode 100644 index 0000000..66ab34a --- /dev/null +++ b/src/com/sleepycat/je/utilint/ActiveTxnArrayStat.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.TransactionStats.Active; + +/** + * An array of active Txn stats. + */ +public class ActiveTxnArrayStat extends Stat { + private static final long serialVersionUID = 1L; + + private Active[] array; + + public ActiveTxnArrayStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public ActiveTxnArrayStat(StatGroup group, + StatDefinition definition, + Active[] array) { + super(group, definition); + this.array = array; + } + + @Override + public Active[] get() { + return array; + } + + @Override + public void set(Active[] array) { + this.array = array; + } + + @Override + public void add(Stat other) { + throw EnvironmentFailureException.unexpectedState + ("ActiveTxnArrayStat doesn't support the add operation."); + } + + @Override + public void clear() { + if (array != null && array.length > 0) { + for (int i = 0; i < array.length; i++) { + array[i] = new Active(array[i].getName(), 0, 0); + } + } + } + + @Override + public Stat computeInterval(Stat base) { + return copy(); + } + + @Override + public void negate() { + throw EnvironmentFailureException.unexpectedState + ("ActiveTxnArrayStat doesn't support the negate operation."); + } + + @Override + public ActiveTxnArrayStat copy() { + try { + ActiveTxnArrayStat ret = (ActiveTxnArrayStat) super.clone(); + if (array != null && array.length > 0) { + Active[] newArray = new Active[array.length]; + System.arraycopy + (array, 0, newArray, 0, array.length); + ret.set(newArray); + } + + return ret; + } catch (CloneNotSupportedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + @Override + protected String getFormattedValue() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + if (array != null && array.length > 0) { + for (Active active : array) { + sb.append(" txnId = " + Stat.FORMAT.format(active.getId()) + + " txnName = " + active.getName() + "\n"); + } + } + sb.append("]"); + + return sb.toString(); + } + + @Override + public boolean isNotSet() { + if ( array == null) { + return true; + } + + if (array.length == 0) { + return true; + } + + return false; + } +} diff --git a/src/com/sleepycat/je/utilint/Adler32.java b/src/com/sleepycat/je/utilint/Adler32.java new file mode 100644 index 0000000..7e6f4db --- /dev/null +++ b/src/com/sleepycat/je/utilint/Adler32.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.zip.Checksum; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Adler32 checksum implementation. + * + * This class is used rather than the native java.util.zip.Adler32 class + * because we have seen a JIT problem when calling the Adler32 class using + * the Server JVM on Linux and Solaris. Specifically, we suspect this may + * be Bug Parade number 4965907. See SR [#9376]. We also believe that this + * bug is fixed in Java 5 and therefore only use this class conditionally + * if we find that we're in a 1.4 JVM. [#13354]. + * + * The Adler32 checksum is discussed in RFC1950. The sample implementation + * from this RFC is shown below: + * + *
        + *    #define BASE 65521  largest prime smaller than 65536
        + *    unsigned long update_adler32(unsigned long adler,
        + *       unsigned char *buf, int len)
        + *    {
        + *      unsigned long s1 = adler & 0xffff;
        + *      unsigned long s2 = (adler >> 16) & 0xffff;
        + *      int n;
        + *
        + *      for (n = 0; n < len; n++) {
        + *        s1 = (s1 + buf[n]) % BASE;
        + *        s2 = (s2 + s1)     % BASE;
        + *      }
        + *      return (s2 << 16) + s1;
        + *    }
        + *
        + *    unsigned long adler32(unsigned char *buf, int len)
        + *    {
        + *      return update_adler32(1L, buf, len);
        + *    }
        + * 
        + * + * The NMAX optimization is so that we don't have to do modulo calculations + * on every iteration. NMAX is the max number of additions to make + * before you have to perform the modulo calculation. + */ +public class Adler32 implements Checksum { + + /* This class and the ctor are public for the unit tests. */ + public static class ChunkingAdler32 extends java.util.zip.Adler32 { + int adler32ChunkSize = 0; + + public ChunkingAdler32(int adler32ChunkSize) { + this.adler32ChunkSize = adler32ChunkSize; + } + + @Override + public void update(byte[] b, int off, int len) { + if (len < adler32ChunkSize) { + super.update(b, off, len); + return; + } + + int i = 0; + while (i < len) { + int bytesRemaining = len - i; + int nBytesThisChunk = + Math.min(bytesRemaining, adler32ChunkSize); + super.update(b, off + i, nBytesThisChunk); + i += nBytesThisChunk; + } + } + } + + public static Checksum makeChecksum() { + if (EnvironmentImpl.USE_JAVA5_ADLER32) { + int adler32ChunkSize = EnvironmentImpl.getAdler32ChunkSize(); + if (adler32ChunkSize > 0) { + return new ChunkingAdler32(adler32ChunkSize); + } else { + return new java.util.zip.Adler32(); + } + } else { + return new Adler32(); + } + } + + private long adler = 1; + + /* + * BASE is the largest prime number smaller than 65536 + * NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 + */ + private static final int BASE = 65521; + private static final int NMAX = 5552; + + /** + * Update current Adler-32 checksum given the specified byte. + */ + public void update(int b) { + long s1 = adler & 0xffff; + long s2 = (adler >> 16) & 0xffff; + s1 = (s1 + (b & 0xff)) % BASE; + s2 = (s1 + s2) % BASE; + adler = (s2 << 16) | s1; + } + + /** + * Update current Adler-32 checksum given the specified byte array. + */ + public void update(byte[] b, int off, int len) { + long s1 = adler & 0xffff; + long s2 = (adler >> 16) & 0xffff; + + while (len > 0) { + int k = len < NMAX ? len : NMAX; + len -= k; + + /* This does not benefit from loop unrolling. */ + while (k-- > 0) { + s1 += (b[off++] & 0xff); + s2 += s1; + } + + s1 %= BASE; + s2 %= BASE; + } + adler = (s2 << 16) | s1; + } + + /** + * Reset Adler-32 checksum to initial value. + */ + public void reset() { + adler = 1; + } + + /** + * Returns current checksum value. + */ + public long getValue() { + return adler; + } +} diff --git a/src/com/sleepycat/je/utilint/AtomicIntStat.java b/src/com/sleepycat/je/utilint/AtomicIntStat.java new file mode 100644 index 0000000..2f6b9aa --- /dev/null +++ b/src/com/sleepycat/je/utilint/AtomicIntStat.java @@ -0,0 +1,116 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A int JE stat that uses {@link AtomicInteger} to be thread safe. + */ +public class AtomicIntStat extends Stat { + private static final long serialVersionUID = 1L; + + private final AtomicInteger counter; + + public AtomicIntStat(StatGroup group, StatDefinition definition) { + super(group, definition); + counter = new AtomicInteger(); + } + + AtomicIntStat(StatDefinition definition, int value) { + super(definition); + counter = new AtomicInteger(value); + } + + @Override + public Integer get() { + return counter.get(); + } + + @Override + public void set(Integer newValue) { + counter.set(newValue); + } + + public void increment() { + counter.incrementAndGet(); + } + + public void decrement() { + counter.decrementAndGet(); + } + + public void add(int count) { + counter.addAndGet(count); + } + + @Override + public void add(Stat other) { + counter.addAndGet(other.get()); + } + + @Override + public void clear() { + counter.set(0); + } + + @Override + public Stat computeInterval(Stat base) { + AtomicIntStat ret = copy(); + if (definition.getType() == StatType.INCREMENTAL) { + ret.set(counter.get() - base.get()); + } + return ret; + } + + @Override + public void negate() { + if (definition.getType() == StatType.INCREMENTAL) { + + /* + * Negate the value atomically, retrying if another change + * intervenes. This loop emulates the behavior of + * AtomicInteger.getAndIncrement. + */ + while (true) { + final int current = counter.get(); + if (counter.compareAndSet(current, -current)) { + return; + } + } + } + } + + @Override + public AtomicIntStat copy() { + return new AtomicIntStat(definition, counter.get()); + } + + @Override + public AtomicIntStat copyAndClear() { + return new AtomicIntStat(definition, counter.getAndSet(0)); + } + + @Override + protected String getFormattedValue() { + return Stat.FORMAT.format(counter.get()); + } + + @Override + public boolean isNotSet() { + return (counter.get() == 0); + } +} diff --git a/src/com/sleepycat/je/utilint/AtomicLongComponent.java b/src/com/sleepycat/je/utilint/AtomicLongComponent.java new file mode 100644 index 0000000..ef16c8e --- /dev/null +++ b/src/com/sleepycat/je/utilint/AtomicLongComponent.java @@ -0,0 +1,86 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * A stat component based on an AtomicLong. + */ +public class AtomicLongComponent + extends MapStatComponent { + + final AtomicLong val; + + /** Creates an instance of this class. */ + AtomicLongComponent() { + val = new AtomicLong(); + } + + private AtomicLongComponent(long val) { + this.val = new AtomicLong(val); + } + + /** + * Sets the stat to the specified value. + * + * @param newValue the new value + */ + public void set(long newValue) { + val.set(newValue); + } + + /** + * Adds the specified value. + * + * @param inc the value to add. + */ + public void add(long inc) { + val.addAndGet(inc); + } + + @Override + public Long get() { + return val.get(); + } + + @Override + public void clear() { + val.set(0); + } + + @Override + public AtomicLongComponent copy() { + return new AtomicLongComponent(val.get()); + } + + @Override + protected String getFormattedValue(boolean useCommas) { + if (useCommas) { + return Stat.FORMAT.format(val.get()); + } else { + return val.toString(); + } + } + + @Override + public boolean isNotSet() { + return val.get() == 0; + } + + @Override + public String toString() { + return val.toString(); + } +} diff --git a/src/com/sleepycat/je/utilint/AtomicLongMapStat.java b/src/com/sleepycat/je/utilint/AtomicLongMapStat.java new file mode 100644 index 0000000..e4ac258 --- /dev/null +++ b/src/com/sleepycat/je/utilint/AtomicLongMapStat.java @@ -0,0 +1,109 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A JE stat that maintains a map of individual values based on AtomicLong + * which can be looked up with a String key, and that returns results as a + * formatted string. + */ +public final class AtomicLongMapStat + extends MapStat { + + private static final long serialVersionUID = 1L; + + /** + * Creates an instance of this class. + * + * @param group the owning group + * @param definition the associated definition + */ + public AtomicLongMapStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + private AtomicLongMapStat(AtomicLongMapStat other) { + super(other); + } + + /** + * Creates, stores, and returns a new stat for the specified key. + * + * @param key the key + * @return the new stat + */ + public synchronized AtomicLongComponent createStat(String key) { + assert key != null; + final AtomicLongComponent stat = new AtomicLongComponent(); + statMap.put(key, stat); + return stat; + } + + @Override + public AtomicLongMapStat copy() { + return new AtomicLongMapStat(this); + } + + /** The base argument must be an instance of AtomicLongMapStat. */ + @Override + public AtomicLongMapStat computeInterval(Stat base) { + assert base instanceof AtomicLongMapStat; + final AtomicLongMapStat copy = copy(); + if (definition.getType() != StatType.INCREMENTAL) { + return copy; + } + final AtomicLongMapStat baseMapStat = (AtomicLongMapStat) base; + synchronized (copy) { + for (final Entry entry : + copy.statMap.entrySet()) { + + final AtomicLongComponent baseValue; + synchronized (baseMapStat) { + baseValue = baseMapStat.statMap.get(entry.getKey()); + } + if (baseValue != null) { + final AtomicLongComponent entryValue = entry.getValue(); + entryValue.val.getAndAdd(-baseValue.get()); + } + } + } + return copy; + } + + @Override + public synchronized void negate() { + if (definition.getType() == StatType.INCREMENTAL) { + for (final AtomicLongComponent stat : statMap.values()) { + final AtomicLong atomicVal = stat.val; + + /* + * Negate the value atomically, retrying if another change + * intervenes. This loop emulates the behavior of + * AtomicLong.getAndIncrement. + */ + while (true) { + final long val = atomicVal.get(); + if (atomicVal.compareAndSet(val, -val)) { + break; + } + } + } + } + } +} diff --git a/src/com/sleepycat/je/utilint/AtomicLongStat.java b/src/com/sleepycat/je/utilint/AtomicLongStat.java new file mode 100644 index 0000000..101d126 --- /dev/null +++ b/src/com/sleepycat/je/utilint/AtomicLongStat.java @@ -0,0 +1,116 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A long JE stat that uses {@link AtomicLong} to be thread safe. + */ +public class AtomicLongStat extends Stat { + private static final long serialVersionUID = 1L; + + private final AtomicLong counter; + + public AtomicLongStat(StatGroup group, StatDefinition definition) { + super(group, definition); + counter = new AtomicLong(); + } + + AtomicLongStat(StatDefinition definition, long value) { + super(definition); + counter = new AtomicLong(value); + } + + @Override + public Long get() { + return counter.get(); + } + + @Override + public void set(Long newValue) { + counter.set(newValue); + } + + public void increment() { + counter.incrementAndGet(); + } + + public void decrement() { + counter.decrementAndGet(); + } + + public void add(long count) { + counter.addAndGet(count); + } + + @Override + public void add(Stat other) { + counter.addAndGet(other.get()); + } + + @Override + public void clear() { + counter.set(0L); + } + + @Override + public Stat computeInterval(Stat base) { + AtomicLongStat ret = copy(); + if (definition.getType() == StatType.INCREMENTAL) { + ret.set(counter.get() - base.get()); + } + return ret; + } + + @Override + public void negate() { + if (definition.getType() == StatType.INCREMENTAL) { + + /* + * Negate the value atomically, retrying if another change + * intervenes. This loop emulates the behavior of + * AtomicLong.getAndIncrement. + */ + while (true) { + final long current = counter.get(); + if (counter.compareAndSet(current, -current)) { + return; + } + } + } + } + + @Override + public AtomicLongStat copy() { + return new AtomicLongStat(definition, counter.get()); + } + + @Override + public AtomicLongStat copyAndClear() { + return new AtomicLongStat(definition, counter.getAndSet(0)); + } + + @Override + protected String getFormattedValue() { + return Stat.FORMAT.format(counter.get()); + } + + @Override + public boolean isNotSet() { + return (counter.get() == 0); + } +} diff --git a/src/com/sleepycat/je/utilint/BaseStat.java b/src/com/sleepycat/je/utilint/BaseStat.java new file mode 100644 index 0000000..6ded5b8 --- /dev/null +++ b/src/com/sleepycat/je/utilint/BaseStat.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.Serializable; + +/** + * The basic interface for accessing and clearing statistics for use in both + * standalone statistics and component statistics contained in a {@link + * MapStat}. + * + * @param the type of the statistic value + */ +public abstract class BaseStat implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * Returns the value of the statistic. + * + * @return the value + */ + public abstract T get(); + + /** Resets the statistic to its initial state. */ + public abstract void clear(); + + /** + * Returns a copy of this statistic. + * + * @return a copy + */ + public abstract BaseStat copy(); + + /** + * Returns the value of the statistic as a formatted string. + * + * @return the value as a formatted string + */ + protected abstract String getFormattedValue(); + + /** + * Returns whether the statistic is in its initial state. + * + * @return if the statistic is in its initial state + */ + public abstract boolean isNotSet(); +} diff --git a/src/com/sleepycat/je/utilint/BitMap.java b/src/com/sleepycat/je/utilint/BitMap.java new file mode 100644 index 0000000..0def4ad --- /dev/null +++ b/src/com/sleepycat/je/utilint/BitMap.java @@ -0,0 +1,133 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.BitSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * Bitmap which supports indexing with long arguments. java.util.BitSet + * provides all the functionality and performance we need, but requires integer + * indexing. + * + * Long indexing is implemented by keeping a Map of java.util.BitSets, where + * each bitset covers 2^16 bits worth of values. The Bitmap may be sparse, in + * that each segment is only instantiated when needed. + * + * Note that this class is currently not thread safe; adding a new bitset + * segment is not protected. + */ +public class BitMap { + + private static final int SEGMENT_SIZE = 16; + private static final int SEGMENT_MASK = 0xffff; + + /* + * Map of segment value -> bitset, where the segment value is index >>16 + */ + private Map bitSegments; + + public BitMap() { + bitSegments = new HashMap(); + } + + /* + * @throws IndexOutOfBoundsException if index is negative. + */ + public void set(long index) + throws IndexOutOfBoundsException { + + if (index < 0) { + throw new IndexOutOfBoundsException(index + " is negative."); + } + + BitSet bitset = getBitSet(index, true); + if (bitset == null) { + throw EnvironmentFailureException.unexpectedState + (index + " is out of bounds"); + } + int useIndex = getIntIndex(index); + bitset.set(useIndex); + } + + /* + * @throws IndexOutOfBoundsException if index is negative. + */ + public boolean get(long index) + throws IndexOutOfBoundsException { + + if (index < 0) { + throw new IndexOutOfBoundsException(index + " is negative."); + } + + BitSet bitset = getBitSet(index, false); + if (bitset == null) { + return false; + } + + int useIndex = getIntIndex(index); + return bitset.get(useIndex); + } + + /* + * Since the BitMap is implemented by a collection of BitSets, return + * the one which covers the numeric range for this index. + * + * @param index the bit we want to access + * @param allowCreate if true, return the BitSet that would hold this + * index even if it wasn't previously set. If false, return null + * if the bit has not been set. + */ + private BitSet getBitSet(long index, boolean allowCreate) { + + Long segmentId = Long.valueOf(index >> SEGMENT_SIZE); + + BitSet bitset = bitSegments.get(segmentId); + if (allowCreate) { + if (bitset == null) { + bitset = new BitSet(); + bitSegments.put(segmentId, bitset); + } + } + + return bitset; + } + + private int getIntIndex(long index) { + return (int) (index & SEGMENT_MASK); + } + + /* For unit testing. */ + int getNumSegments() { + return bitSegments.size(); + } + + /* + * Currently for unit testing, though note that java.util.BitSet does + * support cardinality(). + */ + int cardinality() { + int count = 0; + Iterator iter = bitSegments.values().iterator(); + while (iter.hasNext()) { + BitSet b = iter.next(); + count += b.cardinality(); + } + return count; + } +} diff --git a/src/com/sleepycat/je/utilint/BooleanStat.java b/src/com/sleepycat/je/utilint/BooleanStat.java new file mode 100644 index 0000000..836a61b --- /dev/null +++ b/src/com/sleepycat/je/utilint/BooleanStat.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A boolean JE stat. + */ +public class BooleanStat extends Stat { + private static final long serialVersionUID = 1L; + + private Boolean value; + + public BooleanStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + @Override + public Boolean get() { + return value; + } + + @Override + public void set(Boolean newValue) { + value = newValue; + } + + @Override + public void add(Stat otherStat) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + value = false; + } + + @Override + public Stat computeInterval(Stat base) { + return super.copy(); + } + + @Override + public void negate() { + } + + @Override + protected String getFormattedValue() { + return value.toString(); + } + + @Override + public boolean isNotSet() { + return false; // We can't tell if a boolean is not set. + } +} diff --git a/src/com/sleepycat/je/utilint/CmdUtil.java b/src/com/sleepycat/je/utilint/CmdUtil.java new file mode 100644 index 0000000..79d7e43 --- /dev/null +++ b/src/com/sleepycat/je/utilint/CmdUtil.java @@ -0,0 +1,153 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.File; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Convenience methods for command line utilities. + */ +public class CmdUtil { + + /** + * @throws IllegalArgumentException via main + */ + public static String getArg(String[] argv, int whichArg) + throws IllegalArgumentException { + + if (whichArg < argv.length) { + return argv[whichArg]; + } else { + throw new IllegalArgumentException(); + } + } + + /** + * Parse a string into a long. If the string starts with 0x, this is a hex + * number, else it's decimal. + */ + public static long readLongNumber(String longVal) { + if (longVal.startsWith("0x")) { + return Long.parseLong(longVal.substring(2), 16); + } else { + return Long.parseLong(longVal); + } + } + + /** + * Convert a string that is either 0xabc or 0xabc/0x123 into an lsn. + */ + public static long readLsn(String lsnVal) { + int slashOff = lsnVal.indexOf("/"); + if (slashOff < 0) { + long fileNum = readLongNumber(lsnVal); + return DbLsn.makeLsn(fileNum, 0); + } else { + long fileNum = readLongNumber(lsnVal.substring(0, slashOff)); + long offset = CmdUtil.readLongNumber + (lsnVal.substring(slashOff + 1)); + return DbLsn.makeLsn(fileNum, offset); + } + } + + private static final String printableChars = + "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; + + public static void formatEntry(StringBuilder sb, + byte[] entryData, + boolean formatUsingPrintable) { + for (byte element : entryData) { + int b = element & 0xff; + if (formatUsingPrintable) { + if (isPrint(b)) { + if (b == 0134) { /* backslash */ + sb.append('\\'); + } + sb.append(printableChars.charAt(b - 33)); + } else { + sb.append('\\'); + String hex = Integer.toHexString(b); + if (b < 16) { + sb.append('0'); + } + sb.append(hex); + } + } else { + String hex = Integer.toHexString(b); + if (b < 16) { + sb.append('0'); + } + sb.append(hex); + } + } + } + + private static boolean isPrint(int b) { + return (b < 0177) && (040 < b); + } + + /** + * Create an environment suitable for utilities. Utilities should in + * general send trace output to the console and not to the db log. + */ + public static EnvironmentImpl makeUtilityEnvironment(File envHome, + boolean readOnly) + throws EnvironmentNotFoundException, EnvironmentLockedException { + + EnvironmentConfig config = new EnvironmentConfig(); + config.setReadOnly(readOnly); + + /* Don't debug log to the database log. */ + config.setConfigParam(EnvironmentParams.JE_LOGGING_DBLOG.getName(), + "false"); + + /* Don't run recovery. */ + config.setConfigParam(EnvironmentParams.ENV_RECOVERY.getName(), + "false"); + + /* Apply the configuration in the je.properties file. */ + DbConfigManager.applyFileConfig + (envHome, DbInternal.getProps(config), false); + + EnvironmentImpl envImpl = + new EnvironmentImpl(envHome, + config, + null); + envImpl.finishInit(config); + + return envImpl; + } + + /** + * Returns a description of the java command for running a utility, without + * arguments. For utilities the last name of the class name can be + * specified when "-jar je.jar" is used. + */ + public static String getJavaCommand(Class cls) { + + String clsName = cls.getName(); + String lastName = clsName.substring(clsName.lastIndexOf('.') + 1); + + return "java { " + cls.getName() + " | -jar je-.jar " + lastName + " }"; + } +} diff --git a/src/com/sleepycat/je/utilint/CollectionUtils.java b/src/com/sleepycat/je/utilint/CollectionUtils.java new file mode 100644 index 0000000..0db45b9 --- /dev/null +++ b/src/com/sleepycat/je/utilint/CollectionUtils.java @@ -0,0 +1,154 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static java.util.Collections.emptySet; + +import java.io.Serializable; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.Comparator; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; + +/** + * Java Collection utilities. + */ +public final class CollectionUtils { + + /** This class cannot be instantiated. */ + private CollectionUtils() { + throw new AssertionError(); + } + + /** + * An empty, unmodifiable, serializable, sorted set used for + * emptySortedSet. + */ + private static class EmptySortedSet extends AbstractSet + implements SortedSet, Serializable { + + private static final long serialVersionUID = 1; + + @SuppressWarnings("rawtypes") + static final SortedSet INSTANCE = new EmptySortedSet(); + + @SuppressWarnings("rawtypes") + private static Iterator ITER = new Iterator() { + @Override + public boolean hasNext() { return false; } + @Override + public Object next() { throw new NoSuchElementException(); } + @Override + public void remove() { + throw new UnsupportedOperationException("remove"); + } + }; + + /* Implement SortedSet */ + + @Override + public Comparator comparator() { return null; } + @Override + public SortedSet subSet(E fromElement, E toElement) { + return emptySortedSet(); + } + @Override + public SortedSet headSet(E toElement) { return emptySortedSet(); } + @Override + public SortedSet tailSet(E fromElement) { return emptySortedSet(); } + @Override + public E first() { throw new NoSuchElementException(); } + @Override + public E last() { throw new NoSuchElementException(); } + + /* Implement Set */ + + @SuppressWarnings("unchecked") + @Override + public Iterator iterator() { return (Iterator) ITER; } + @Override + public int size() { return 0; } + + /** Use canonical instance. */ + private Object readResolve() { return INSTANCE; } + } + + /** + * An empty, unmodifiable, serializable, sorted map used for + * emptySortedMap. + */ + private static class EmptySortedMap extends AbstractMap + implements SortedMap, Serializable { + + private static final long serialVersionUID = 1; + + @SuppressWarnings("rawtypes") + static final SortedMap INSTANCE = + new EmptySortedMap(); + + /* Implement SortedMap */ + + @Override + public Comparator comparator() { return null; } + @Override + public SortedMap subMap(K fromKey, K toKey) { + return emptySortedMap(); + } + @Override + public SortedMap headMap(K toKey) { return emptySortedMap(); } + @Override + public SortedMap tailMap(K fromKey) { return emptySortedMap(); } + @Override + public K firstKey() { throw new NoSuchElementException(); } + @Override + public K lastKey() { throw new NoSuchElementException(); } + + /* Implement Map */ + + @Override + public Set> entrySet() { return emptySet(); } + + /** Use canonical instance. */ + private Object readResolve() { return INSTANCE; } + } + + /** + * Returns an empty, immutable, serializable sorted set. + * + * @param the element type + * @return the empty sorted set + */ + /* TODO: Replace with Collections.emptySortedSet in Java 8 */ + @SuppressWarnings("unchecked") + public static SortedSet emptySortedSet() { + return (SortedSet) EmptySortedSet.INSTANCE; + } + + /** + * Returns an empty, immutable, serializable sorted map. + * + * @param the key type + * @param the value type + * @return the empty sorted map + */ + /* TODO: Replace with Collections.emptySortedMap in Java 8 */ + @SuppressWarnings("unchecked") + public static SortedMap emptySortedMap() { + return (SortedMap) EmptySortedMap.INSTANCE; + } +} diff --git a/src/com/sleepycat/je/utilint/ConfiguredRedirectHandler.java b/src/com/sleepycat/je/utilint/ConfiguredRedirectHandler.java new file mode 100644 index 0000000..79bf462 --- /dev/null +++ b/src/com/sleepycat/je/utilint/ConfiguredRedirectHandler.java @@ -0,0 +1,76 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.logging.Handler; +import java.util.logging.LogRecord; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Redirects logging messages to the owning environment's application + * configured handler, if one was specified through + * EnvironmentConfig.setLoggingHandler(). Handlers for JE logging can be + * configured through EnvironmentConfig, to support handlers which: + * - require a constructor with arguments + * - is specific to this environment, and multiple environments exist in the + * same process. + */ +public class ConfiguredRedirectHandler extends Handler { + + public ConfiguredRedirectHandler() { + /* No need to call super, this handler is not truly publishing. */ + } + + @Override + public void publish(LogRecord record) { + Handler h = getEnvSpecificConfiguredHandler(); + if ((h != null) && (h.isLoggable(record))) { + h.publish(record); + } + } + + private Handler getEnvSpecificConfiguredHandler() { + EnvironmentImpl envImpl = + LoggerUtils.envMap.get(Thread.currentThread()); + + /* + * Prefer to lose logging output, rather than risk a + * NullPointerException if the caller forgets to set and release the + * environmentImpl. + */ + if (envImpl == null) { + return null; + } + + return envImpl.getConfiguredHandler(); + } + + @Override + public void close() + throws SecurityException { + Handler h = getEnvSpecificConfiguredHandler(); + if (h != null) { + h.close(); + } + } + + @Override + public void flush() { + Handler h = getEnvSpecificConfiguredHandler(); + if (h != null) { + h.flush(); + } + } +} diff --git a/src/com/sleepycat/je/utilint/ConsoleRedirectHandler.java b/src/com/sleepycat/je/utilint/ConsoleRedirectHandler.java new file mode 100644 index 0000000..9115bad --- /dev/null +++ b/src/com/sleepycat/je/utilint/ConsoleRedirectHandler.java @@ -0,0 +1,59 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.logging.LogRecord; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Redirects logging messages to the owning environment's console handler, so + * that messages can be prefixed with an environment name. See LoggerUtils.java + * for an explanation of why loggers must be instantiated per-class rather than + * per-class-instance. + * + * In rare cases, this ConsoleHandler may be used to actually publish on its + * own. + */ +public class ConsoleRedirectHandler extends java.util.logging.ConsoleHandler { + + public ConsoleRedirectHandler() { + super(); + } + + @Override + public void publish(LogRecord record) { + EnvironmentImpl envImpl = + LoggerUtils.envMap.get(Thread.currentThread()); + + /* + * If the caller forgets to set and release the envImpl so there is no + * envImpl, or if we are logging before the envImpl is completely set, + * log to the generic ConsoleHandler without an identifying + * prefix. That way, we get a message, but don't risk a + * NullPointerException. + */ + if (envImpl == null){ + super.publish(record); + return; + } + + if (envImpl.getConsoleHandler() == null){ + super.publish(record); + return; + } + + envImpl.getConsoleHandler().publish(record); + } +} diff --git a/src/com/sleepycat/je/utilint/CronScheduleParser.java b/src/com/sleepycat/je/utilint/CronScheduleParser.java new file mode 100644 index 0000000..8de8312 --- /dev/null +++ b/src/com/sleepycat/je/utilint/CronScheduleParser.java @@ -0,0 +1,352 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.Calendar; + +import com.sleepycat.je.EnvironmentConfig; + +/** + * This class aims to parser {@link EnvironmentConfig#VERIFY_SCHEDULE} which + * is a cron-style expression. + * + *

        The cron-style expression can be very complicate, for example containing + * *, ?, / and so on. But now we only handle the simplest situation. We will + * continually add the code to handle more complicated situations if needed + * in the future.

        + * + *

        Constraint for current version: + *

      • The standard string should be "* * * * *", i.e. there are 5 fields and + * 4 blank space + *
      • Each filed can only be an int value or *. + *
      • Can not specify dayOfMonth and dayOfWeek simultaneously + *
      • Can not specify dayOfMonth or month. Because if so, we will need to + * consider the days of that month and furthermore whether that year is + * leap year for February. The difference of the number of days for + * each month make it complicate to calculate the delay and the interval. + *
      • If the field is an int value, then the value should be in + * the correct range, i.e. minute(0-59), hour(0-23), dayOfWeek(0-6), + * where dayOfMonth(1-31) and month(1-12) can not be specified. + *
      • If dayOfWeek is a concrete value, then minute or hour can not be '*'. + * For example, we can not use "0 * * * 5", i.e. we can not specify that + * we want the verifier to run every hour only on Friday. Because it may + * be complicate to calculate the stop time point and at least we need + * to add a variable. + *
      • The same reason, if hour is a concrete value, minute can not be '*'. + *

        + */ +public class CronScheduleParser { + private static String errorMess = + "The style of " + EnvironmentConfig.VERIFY_SCHEDULE + + " is not right. "; + private static int spaceNum = 4; + private static int fieldNum = 5; + + public static String nullCons = "The argument should not be null."; + public static String cons1 = + "The standard string should be '* * * * *', i.e. there are " + + fieldNum + "fields and " + spaceNum + "blank space."; + public static String cons2 = "Each filed can only be a int value or *."; + public static String cons3 = + "Can not specify dayOfWeek and dayOfMonth simultaneously."; + public static String cons4 = "Can not specify dayOfMonth or month."; + public static String cons5 = "Range Error: "; + public static String cons6 = + "If the day of the week is a concrete day, then the minute and the" + + "hour should also be concrete."; + public static String cons7 = + "If the hour is a concrete day, then minute should also be concrete"; + + private static long millsOneDay = 24 * 60 * 60 * 1000; + private static long millsOneHour = 60 * 60 * 1000; + private static long millsOneMinute = 60 * 1000; + + private long delay; + private long interval; + + public static Calendar curCal; + public static TestHook setCurCalHook; + + /** + * The construction function will first validate the cron-style string + * and then parser the string to get the interval of the cron-style task + * represented by the string and to get the wait-time(delay) to first + * time start the cron-style task. + * + * @param cronSchedule The cron-style string. + */ + public CronScheduleParser(String cronSchedule) { + validate(cronSchedule); + parser(cronSchedule); + } + + /** + * Check whether two cron-style strings are same, i.e. both are null or + * the content of the two strings are same. + * + * @param cronvSchedule1 The first cron-style string. + * @param cronSchedule2 The second cron-style string. + * + * @return true if the two cron-style strings are same. + */ + public static boolean checkSame( + final String cronvSchedule1, + final String cronSchedule2) { + + if (cronvSchedule1 == null && cronSchedule2 ==null) { + return true; + } + + if (cronvSchedule1 == null || cronSchedule2 ==null) { + return false; + } + + if (cronvSchedule1.equals(cronSchedule2)) { + return true; + } + + return false; + } + + /** + * @return delay The wait-time to first time start the cron-style task + * represented by the cron-style string. + */ + public long getDelayTime() { + return delay; + } + + /** + * @return interval The interval of the cron-style task represented by the + * cron-style string. + */ + public long getInterval() { + return interval; + } + + private void assertDelay() { + assert delay >= 0 : + "Delay is: " + delay + "; interval is: " + interval; + } + + private void parser(final String cronSchedule) { + /* Get or Set the current calendar. */ + curCal = Calendar.getInstance(); + curCal.set(Calendar.SECOND, 0); + curCal.set(Calendar.MILLISECOND, 0); + if (setCurCalHook != null) { + setCurCalHook.doHook(); + } + int curDayOfWeek = curCal.get(Calendar.DAY_OF_WEEK); + int curHour = curCal.get(Calendar.HOUR_OF_DAY); + int curMinute = curCal.get(Calendar.MINUTE); + + /* + * Previously, we use Calendar.getInstance() to initialize scheduleCal, + * which aims to let scheduleCal have some similar/same attributes + * with curCal, such as the day of the week. But we may use + * setCurCalHook.doHook to set curCal to be a future week, now + * using Calendar.getInstance() can not achieve the original purpose. + */ + Calendar scheduleCal = (Calendar) curCal.clone(); + scheduleCal.set(Calendar.SECOND, 0); + scheduleCal.set(Calendar.MILLISECOND, 0); + String[] timeArray = cronSchedule.split(" "); + + /* dayofWeek is a concrete value. */ + if (!timeArray[4].equals("*")) { + interval = 7 * millsOneDay; + int tmpDayOfWeek = Integer.valueOf(timeArray[4]) + 1; + int tmpHour = Integer.valueOf(timeArray[1]); + int tmpMinute = Integer.valueOf(timeArray[0]); + + scheduleCal.set(Calendar.DAY_OF_WEEK, tmpDayOfWeek); + scheduleCal.set(Calendar.HOUR_OF_DAY, tmpHour); + scheduleCal.set(Calendar.MINUTE, tmpMinute); + + if (tmpDayOfWeek < curDayOfWeek || + (tmpDayOfWeek == curDayOfWeek && tmpHour < curHour) || + (tmpDayOfWeek == curDayOfWeek && tmpHour == curHour && + tmpMinute < curMinute)) { + /* add 7 days to set next week */ + scheduleCal.add(Calendar.DATE, 7); + } + delay = scheduleCal.getTimeInMillis() - curCal.getTimeInMillis(); + + assertDelay(); + return; + } + + if (!timeArray[1].equals("*")) { + interval = millsOneDay; + int tmpHour = Integer.valueOf(timeArray[1]); + int tmpMinute = Integer.valueOf(timeArray[0]); + + /* + * Guarantee that both dayOfWeek is same when dayOfWeek is * in + * cronSchedule. + */ + scheduleCal.set(Calendar.DAY_OF_WEEK, curDayOfWeek); + scheduleCal.set(Calendar.HOUR_OF_DAY, tmpHour); + scheduleCal.set(Calendar.MINUTE, tmpMinute); + + if (tmpHour < curHour || + (tmpHour == curHour && tmpMinute < curMinute)) { + /* to set next day */ + scheduleCal.add(Calendar.DATE, 1); + } + delay = scheduleCal.getTimeInMillis() - curCal.getTimeInMillis(); + + assertDelay(); + return; + } + + if (!timeArray[0].equals("*")) { + interval = millsOneHour; + int tmpMinute = Integer.valueOf(timeArray[0]); + + /* + * Guarantee that both dayOfWeek and both hour are same whe + * dayOfWeek and hour are * in cronSchedule. + */ + scheduleCal.set(Calendar.DAY_OF_WEEK, curDayOfWeek); + scheduleCal.set(Calendar.HOUR_OF_DAY, curHour); + scheduleCal.set(Calendar.MINUTE, tmpMinute); + + if (tmpMinute < curMinute) { + /* to set next hour */ + scheduleCal.add(Calendar.HOUR, 1); + } + delay = scheduleCal.getTimeInMillis() - curCal.getTimeInMillis(); + + assertDelay(); + return; + } + + if (timeArray[0].equals("*")) { + interval = millsOneMinute; + delay = 0; + assertDelay(); + return; + } + } + + private void validate(final String cronSchedule) { + + if (cronSchedule == null) { + throw new IllegalArgumentException(errorMess + nullCons); + } + + /* + * Constraint 1: The standard string should be "* * * * *", i.e. + * there are 5 fields and 4 blank space. + */ + int spaceCount = 0; + for (int i = 0; i < cronSchedule.length(); i++) { + char c = cronSchedule.charAt(i); + if (c == 32 ) { /* The ASCII value of ' ' is 32. */ + spaceCount++; + } + } + if (spaceCount != spaceNum || + cronSchedule.split(" ").length != fieldNum) { + throw new IllegalArgumentException(errorMess + cons1); + } + + String[] timeArray = cronSchedule.split(" "); + /* + * Constraint 2: Each filed can only be an int value or *. + */ + for (String str : timeArray) { + try { + Integer.valueOf(str); + } catch (NumberFormatException e) { + if (!str.equals("*")) { + throw new IllegalArgumentException(errorMess + cons2); + } + } + } + + /* + * Constraint 3: Can not specify dayOfMonth and dayOfWeek + * simultaneously. + */ + if (!timeArray[2].equals("*") && !timeArray[4].equals("*")) { + throw new IllegalArgumentException(errorMess + cons3); + } + + /* + * Constraint 4: Can not specify dayOfMonth or month. + */ + if (!timeArray[2].equals("*") || !timeArray[3].equals("*")) { + throw new IllegalArgumentException(errorMess + cons4); + } + + /* + * Constraint 5: If the field is a int value, then the value should + * be in the correct range. + */ + if (!timeArray[0].equals("*")) { + int min = Integer.valueOf(timeArray[0]); + if (min < 0 || min > 59) { + throw new IllegalArgumentException + (errorMess + cons5 + "The minute should be (0-59)."); + } + } + + if (!timeArray[1].equals("*")) { + int hour = Integer.valueOf(timeArray[1]); + if (hour < 0 || hour > 23) { + throw new IllegalArgumentException + (errorMess + cons5 + "The hour should be (0-23)."); + } + } + + if (!timeArray[4].equals("*")) { + int dayOfWeek = Integer.valueOf(timeArray[4]); + if (dayOfWeek < 0 || dayOfWeek > 6) { + throw new IllegalArgumentException + (errorMess + cons5 + "The day of the week should" + + "be (0-6)."); + } + } + + /* + * Constraint 6: If dayOfWeek is a concrete value, then minute or + * hour can not be '*'. + */ + if (!timeArray[4].equals("*")) { + if (timeArray[0].equals("*") || timeArray[1].equals("*")) { + throw new IllegalArgumentException(errorMess + cons6); + } + } + + /* + * Constraint 7: If hour is a concrete value, minute can not be '*'. + */ + if (!timeArray[1].equals("*")) { + if (timeArray[0].equals("*")) { + throw new IllegalArgumentException(errorMess + cons7); + } + } + + /* + if (timeArray[0].equals("*")) { + throw new IllegalArgumentException + (errorMes + "User specify the verifier to run every minute." + + "This is too frequent."); + } + */ + } +} \ No newline at end of file diff --git a/src/com/sleepycat/je/utilint/DaemonRunner.java b/src/com/sleepycat/je/utilint/DaemonRunner.java new file mode 100644 index 0000000..02603be --- /dev/null +++ b/src/com/sleepycat/je/utilint/DaemonRunner.java @@ -0,0 +1,25 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * An object capable of running (run/pause/shutdown/etc) a daemon thread. + * See DaemonThread for details. + */ +public interface DaemonRunner { + void runOrPause(boolean run); + void requestShutdown(); + void shutdown(); + int getNWakeupRequests(); +} diff --git a/src/com/sleepycat/je/utilint/DaemonThread.java b/src/com/sleepycat/je/utilint/DaemonThread.java new file mode 100644 index 0000000..b9c8d89 --- /dev/null +++ b/src/com/sleepycat/je/utilint/DaemonThread.java @@ -0,0 +1,292 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * A daemon thread. Also see StoppableThread for an alternative daemon + * construct. + */ +public abstract class DaemonThread implements DaemonRunner, Runnable { + + private static final int JOIN_MILLIS = 10; + private volatile long waitTime; + private final Object synchronizer = new Object(); + private Thread thread; + protected String name; + protected int nWakeupRequests; + public static boolean stifleExceptionChatter = false; + + /* Fields shared between threads must be 'volatile'. */ + private volatile boolean shutdownRequest = false; + private volatile boolean paused = false; + + /* This is not volatile because it is only an approximation. */ + private boolean running = false; + + /* Fields for DaemonErrorListener, enabled only during testing. */ + protected final EnvironmentImpl envImpl; + private static final String ERROR_LISTENER = "setErrorListener"; + /* Logger used in DaemonThread's subclasses. */ + protected final Logger logger; + + public DaemonThread(final long waitTime, + final String name, + final EnvironmentImpl envImpl) { + this.waitTime = waitTime; + this.name = envImpl.makeDaemonThreadName(name); + this.envImpl = envImpl; + this.logger = createLogger(); + } + + protected Logger createLogger() { + return LoggerUtils.getLogger(getClass()); + } + + /** + * For testing. + */ + public Thread getThread() { + return thread; + } + + /** + * If run is true, starts the thread if not started or unpauses it + * if already started; if run is false, pauses the thread if + * started or does nothing if not started. + * + * Note that no thread is created unless run is true at some time. That + * way, threads are conserved in cases where the app wants to run their + * own threads. This can be important when many JE envs are in the same + * process, in which case a shared cache is often used. + */ + public void runOrPause(boolean run) { + if (run) { + paused = false; + if (thread != null) { + wakeup(); + } else { + thread = new Thread(this, name); + thread.setDaemon(true); + thread.start(); + } + } else { + paused = true; + } + } + + public void requestShutdown() { + shutdownRequest = true; + } + + /** + * Requests shutdown and calls join() to wait for the thread to stop. + */ + public void shutdown() { + if (thread != null) { + shutdownRequest = true; + while (thread.isAlive()) { + synchronized (synchronizer) { + synchronizer.notifyAll(); + } + try { + thread.join(JOIN_MILLIS); + } catch (InterruptedException e) { + + /* + * Klockwork - ok + * Don't say anything about exceptions here. + */ + } + } + thread = null; + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + public void wakeup() { + if (!paused) { + synchronized (synchronizer) { + synchronizer.notifyAll(); + } + } + } + + public void run() { + while (!shutdownRequest) { + try { + /* Do a unit of work. */ + int numTries = 0; + long maxRetries = nDeadlockRetries(); + while (numTries <= maxRetries && + !shutdownRequest && + !paused) { + try { + nWakeupRequests++; + running = true; + onWakeup(); + break; + } catch (LockConflictException e) { + } finally { + running = false; + } + numTries++; + } + /* Wait for notify, timeout or interrupt. */ + if (!shutdownRequest) { + synchronized (synchronizer) { + if (waitTime == 0 || paused) { + synchronizer.wait(); + } else { + synchronizer.wait(waitTime); + } + } + } + } catch (InterruptedException e) { + notifyExceptionListener(e); + if (!stifleExceptionChatter) { + logger.info + ("Shutting down " + this + " due to exception: " + e); + } + shutdownRequest = true; + + assert checkErrorListener(e); + } catch (Exception e) { + notifyExceptionListener(e); + if (!stifleExceptionChatter) { + + /* + * If the exception caused the environment to become + * invalid, then shutdownRequest will have been set to true + * by EnvironmentImpl.invalidate, which is called by the + * EnvironmentFailureException ctor. + */ + logger.log(Level.SEVERE, + this.toString() + " caught exception, " + e + + (shutdownRequest ? " Exiting" : " Continuing"), + e); + } + + assert checkErrorListener(e); + } catch (Error e) { + assert checkErrorListener(e); + envImpl.invalidate(e); /* [#21929] */ + notifyExceptionListener(envImpl.getInvalidatingException()); + + /* + * Since there is no uncaught exception handler (yet) we + * shutdown the thread here and log the exception. + */ + shutdownRequest = true; + logger.log(Level.SEVERE, "Error caught in " + this, e); + } + } + } + + private void notifyExceptionListener(Exception e) { + if (envImpl == null) { + return; + } + final ExceptionListener listener = envImpl.getExceptionListener(); + if (listener == null) { + return; + } + listener.exceptionThrown(DbInternal.makeExceptionEvent(e, name)); + } + + /** + * If Daemon Thread throws errors and exceptions, this function will catch + * it and throw a EnvironmentFailureException, and fail the test. + * + * Only used during testing. + */ + public boolean checkErrorListener(Throwable e) { + if (Boolean.getBoolean(ERROR_LISTENER)) { + if (!stifleExceptionChatter) { + logger.severe(name + " " + LoggerUtils.getStackTrace(e)); + } + new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.TEST_INVALIDATE, + "Daemon thread failed during testing", e); + } + + return true; + } + + /** + * Returns the number of retries to perform when Deadlock Exceptions + * occur. + */ + protected long nDeadlockRetries() { + return 0; + } + + /** + * onWakeup is synchronized to ensure that multiple invocations of the + * DaemonThread aren't made. + */ + abstract protected void onWakeup() + throws DatabaseException; + + /** + * Returns whether shutdown has been requested. This method should be + * used to to terminate daemon loops. + */ + protected boolean isShutdownRequested() { + return shutdownRequest; + } + + /** + * Returns whether the daemon is currently paused/disabled. This method + * should be used to to terminate daemon loops. + */ + protected boolean isPaused() { + return paused; + } + + /** + * Returns whether the onWakeup method is currently executing. This is + * only an approximation and is used to avoid unnecessary wakeups. + */ + public boolean isRunning() { + return running; + } + + public void setWaitTime(long waitTime) { + this.waitTime = waitTime; + } + + /** + * For unit testing. + */ + public int getNWakeupRequests() { + return nWakeupRequests; + } +} diff --git a/src/com/sleepycat/je/utilint/DatabaseUtil.java b/src/com/sleepycat/je/utilint/DatabaseUtil.java new file mode 100644 index 0000000..de40650 --- /dev/null +++ b/src/com/sleepycat/je/utilint/DatabaseUtil.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.DatabaseEntry; + +/** + * Utils for use in the db package. + */ +public class DatabaseUtil { + + /* + * The global JE test mode flag. When true, certain instrumentation is + * turned on. This flag is always true during unit testing. + */ + public static final boolean TEST = Boolean.getBoolean("JE_TEST"); + + /** + * Throw an exception if the parameter is null. + * + * @throws IllegalArgumentException via any API method + */ + public static void checkForNullParam(final Object param, + final String name) { + if (param == null) { + throw new IllegalArgumentException(name + " cannot be null"); + } + } + + /** + * Throw an exception if the parameter is a null or 0-length array. + * + * @throws IllegalArgumentException via any API method + */ + public static void checkForZeroLengthArrayParam(final Object[] param, + final String name) { + checkForNullParam(param, name); + + if (param.length == 0) { + throw new IllegalArgumentException( + "'" + name + "' param cannot be zero length"); + } + } + + /** + * Throw an exception if the entry is null or the data field is not set. + * + * @throws IllegalArgumentException via any API method that takes a + * required DatabaseEntry param + */ + public static void checkForNullDbt(final DatabaseEntry entry, + final String name, + final boolean checkData) { + if (entry == null) { + throw new IllegalArgumentException( + "'" + name + "' param cannot be null"); + } + + if (checkData) { + if (entry.getData() == null) { + throw new IllegalArgumentException( + "Data field for '" + name + "' param cannot be null"); + } + } + } + + /** + * Throw an exception if the entry has the partial flag set. + */ + public static void checkForPartial(final DatabaseEntry entry, + final String name) { + if (entry.getPartial()) { + throw new IllegalArgumentException( + "'" + name + "' param may not be partial"); + } + } +} diff --git a/src/com/sleepycat/je/utilint/DbCacheSizeRepEnv.java b/src/com/sleepycat/je/utilint/DbCacheSizeRepEnv.java new file mode 100644 index 0000000..48054f1 --- /dev/null +++ b/src/com/sleepycat/je/utilint/DbCacheSizeRepEnv.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.File; +import java.util.Map; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * Interface for opening a ReplicatedEnvironment from a JE standalone utility, + * DbCacheSize. Implemented by com.sleepycat.je.rep.utilint.DbCacheSizeRepEnv, + * which must be instantiated from standalone JE using Class.forName. + */ +public interface DbCacheSizeRepEnv { + public Environment open(File envHome, + EnvironmentConfig envConfig, + Map repParams); +} diff --git a/src/com/sleepycat/je/utilint/DbLsn.java b/src/com/sleepycat/je/utilint/DbLsn.java new file mode 100644 index 0000000..d54e58a --- /dev/null +++ b/src/com/sleepycat/je/utilint/DbLsn.java @@ -0,0 +1,285 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.File; +import java.util.Arrays; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.tree.TreeUtils; + +/** + * DbLsn is a class that operates on Log Sequence Numbers (LSNs). An LSN is a + * long comprised of a file number (32b) and offset within that file (32b) + * which references a unique record in the database environment log. While + * LSNs are represented as long's, we operate on them using an abstraction and + * return longs from these methods so that we don't have to worry about the + * lack of unsigned quantities. + */ +public class DbLsn { + static final long INT_MASK = 0xFFFFFFFFL; + + public static final long MAX_FILE_OFFSET = 0xFFFFFFFFL; + + /* Signifies a transient LSN. */ + private static final long MAX_FILE_NUM = 0xFFFFFFFFL; + + public static final long NULL_LSN = -1; + + private DbLsn() { + } + + public static long makeLsn(long fileNumber, long fileOffset) { + return fileOffset & INT_MASK | + ((fileNumber & INT_MASK) << 32); + } + + /** + * This flavor of makeLsn is used when the file offset has been stored + * in 32 bits, as is done in the VLSNBucket. + */ + public static long makeLsn(long fileNumber, int fileOffset) { + return fileOffset & INT_MASK | + ((fileNumber & INT_MASK) << 32); + } + + /** + * For transient LSNs we use the MAX_FILE_NUM and the ascending sequence of + * offsets. + */ + public static long makeTransientLsn(long fileOffset) { + return makeLsn(DbLsn.MAX_FILE_NUM, fileOffset); + } + + /** + * A transient LSN is defined as one with a file number of MAX_FILE_NUM. + */ + public static boolean isTransient(long lsn) { + return getFileNumber(lsn) == MAX_FILE_NUM; + } + + public static boolean isTransientOrNull(long lsn) { + return lsn == NULL_LSN || isTransient(lsn); + } + + public static long longToLsn(Long lsn) { + if (lsn == null) { + return NULL_LSN; + } + + return lsn.longValue(); + } + + /** + * Return the file number for this DbLsn. + * @return the number for this DbLsn. + */ + public static long getFileNumber(long lsn) { + return (lsn >> 32) & INT_MASK; + } + + /** + * Return the file offset for this DbLsn. + * @return the offset for this DbLsn. + */ + public static long getFileOffset(long lsn) { + return (lsn & INT_MASK); + } + + /* + * The file offset is really an unsigned int. If we are using the + * file offset as a value, we must be careful to manipulate it as a long + * in order not to lose the last bit of data. If we are only storing + * the file offset, we can treat it as an Integer in order to save + * 32 bits of space. + */ + public static int getFileOffsetAsInt(long lsn) { + return (int) getFileOffset(lsn); + } + + public static long convertIntFileOffsetToLong(int storedLsn) { + return storedLsn & 0xffffffffL; + } + + private static int compareLong(long l1, long l2) { + if (l1 < l2) { + return -1; + } else if (l1 > l2) { + return 1; + } else { + return 0; + } + } + + public static int compareTo(long lsn1, long lsn2) { + if (lsn1 == NULL_LSN || + lsn2 == NULL_LSN) { + throw EnvironmentFailureException.unexpectedState + ("NULL_LSN lsn1=" + getNoFormatString(lsn1) + + " lsn2=" + getNoFormatString(lsn2)); + } + + long fileNumber1 = getFileNumber(lsn1); + long fileNumber2 = getFileNumber(lsn2); + if (fileNumber1 == fileNumber2) { + return compareLong(getFileOffset(lsn1), getFileOffset(lsn2)); + } + return compareLong(fileNumber1, fileNumber2); + } + + public static String toString(long lsn) { + return ""; + } + + public static String getNoFormatString(long lsn) { + return "0x" + Long.toHexString(getFileNumber(lsn)) + "/0x" + + Long.toHexString(getFileOffset(lsn)); + } + + public static String dumpString(long lsn, int nSpaces) { + StringBuilder sb = new StringBuilder(); + sb.append(TreeUtils.indent(nSpaces)); + sb.append(toString(lsn)); + return sb.toString(); + } + + /** + * Return the logsize in bytes between these two LSNs. This is an + * approximation; the logs might actually be a little more or less in + * size. This assumes that no log files have been cleaned. + */ + public static long getNoCleaningDistance(long thisLsn, + long otherLsn, + long logFileSize) { + long diff = 0; + + assert thisLsn != NULL_LSN; + /* First figure out how many files lay between the two. */ + long myFile = getFileNumber(thisLsn); + if (otherLsn == NULL_LSN) { + otherLsn = 0; + } + long otherFile = getFileNumber(otherLsn); + if (myFile == otherFile) { + diff = Math.abs(getFileOffset(thisLsn) - getFileOffset(otherLsn)); + } else if (myFile > otherFile) { + diff = calcDiff(myFile - otherFile, + logFileSize, thisLsn, otherLsn); + } else { + diff = calcDiff(otherFile - myFile, + logFileSize, otherLsn, thisLsn); + } + return diff; + } + + /** + * Return the logsize in bytes between these two LSNs. This is an + * approximation; the logs might actually be a little more or less in + * size. This assumes that log files might have been cleaned. + */ + public static long getWithCleaningDistance(long thisLsn, + long otherLsn, + long logFileSize, + FileManager fileManager) { + long diff = 0; + + assert thisLsn != NULL_LSN; + /* First figure out how many files lay between the two. */ + long myFile = getFileNumber(thisLsn); + if (otherLsn == NULL_LSN) { + otherLsn = 0; + } + long otherFile = getFileNumber(otherLsn); + if (myFile == otherFile) { + diff = Math.abs(getFileOffset(thisLsn) - getFileOffset(otherLsn)); + } else { + /* Figure out how many files lie between. */ + Long[] fileNums = fileManager.getAllFileNumbers(); + int myFileIdx = Arrays.binarySearch(fileNums, + Long.valueOf(myFile)); + int otherFileIdx = + Arrays.binarySearch(fileNums, Long.valueOf(otherFile)); + if (myFileIdx > otherFileIdx) { + diff = calcDiff(myFileIdx - otherFileIdx, + logFileSize, thisLsn, otherLsn); + } else { + diff = calcDiff(otherFileIdx - myFileIdx, + logFileSize, otherLsn, thisLsn); + } + } + return diff; + } + + private static long calcDiff(long fileDistance, + long logFileSize, + long laterLsn, + long earlierLsn) { + long diff = fileDistance * logFileSize; + diff += getFileOffset(laterLsn); + diff -= getFileOffset(earlierLsn); + return diff; + } + + /** + * Returns the number of bytes between two LSNs, counting the true size of + * each intermediate file. Assumes that all files in the LSN range are + * currently protected from cleaner deletion, e.g., during recovery. Uses + * File.length and does not perturb the FileManager's file handle cache. + */ + public static long getTrueDistance(final long thisLsn, + final long otherLsn, + final FileManager fileManager) { + + final long lsn1; + final long lsn2; + + if (compareTo(thisLsn, otherLsn) < 0) { + lsn1 = thisLsn; + lsn2 = otherLsn; + } else { + lsn1 = otherLsn; + lsn2 = thisLsn; + } + + final long file1 = getFileNumber(lsn1); + final long file2 = getFileNumber(lsn2); + + long dist = getFileOffset(lsn2) - getFileOffset(lsn1); + + if (file1 == file2) { + return dist; + } + + final Long[] fileNums = fileManager.getAllFileNumbers(); + + final int idx1 = Arrays.binarySearch(fileNums, file1); + final int idx2 = Arrays.binarySearch(fileNums, file2); + + /* + * File2 has already been counted, and we've already subtracted the + * offset of file1. Add lengths of file1 to file2-1. + */ + for (int i = idx1; i < idx2; i += 1) { + final String path = fileManager.getFullFileName(fileNums[i]); + dist += new File(path).length(); + } + + return dist; + } +} diff --git a/src/com/sleepycat/je/utilint/DoubleExpMovingAvg.java b/src/com/sleepycat/je/utilint/DoubleExpMovingAvg.java new file mode 100644 index 0000000..a470e10 --- /dev/null +++ b/src/com/sleepycat/je/utilint/DoubleExpMovingAvg.java @@ -0,0 +1,197 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.text.DecimalFormat; + +/** + * A double JE stat component generated from an exponential moving average over + * a specified time period of values supplied with associated times, to support + * averaging values that are generated at irregular intervals. + */ +public class DoubleExpMovingAvg + extends MapStatComponent { + + private static final long serialVersionUID = 1L; + + /** Number format for output. */ + static final DecimalFormat FORMAT = + new DecimalFormat("###,###,###,###,###,###,###.##"); + + /** The name of this stat. */ + private final String name; + + /** The averaging period in milliseconds. */ + private final long periodMillis; + + /** + * The time in milliseconds specified with the previous value, or 0 if no + * values have been provided. Synchronize on this instance when accessing + * this field. + */ + private long prevTime; + + /** + * The current average, or 0 if no values have been provided. Synchronize + * on this instance when accessing this field. + */ + private double avg; + + /** + * Creates an instance of this class. The {@code periodMillis} represents + * the time period in milliseconds over which values will be averaged. + * + * @param name the name of this stat + * @param periodMillis the averaging period in milliseconds + */ + public DoubleExpMovingAvg(String name, long periodMillis) { + assert name != null; + assert periodMillis > 0; + this.name = name; + this.periodMillis = periodMillis; + } + + /** + * Creates an instance of this class as a copy of another instance. + * + * @param other the other instance to copy + */ + DoubleExpMovingAvg(DoubleExpMovingAvg other) { + name = other.name; + periodMillis = other.periodMillis; + synchronized (this) { + synchronized (other) { + prevTime = other.prevTime; + avg = other.avg; + } + } + } + + /** + * Returns the name of this stat. + * + * @return the name of this stat + */ + public String getName() { + return name; + } + + /** + * Adds a new value to the average, ignoring values that are not newer than + * time of the previous call. + * + * @param value the new value + * @param time the current time in milliseconds + */ + public synchronized void add(double value, long time) { + assert time > 0; + if (time <= prevTime) { + return; + } + if (prevTime == 0) { + avg = value; + } else { + + /* + * Compute the exponential moving average, as described in: + * http://en.wikipedia.org/wiki/ + * Moving_average#Application_to_measuring_computer_performance + */ + double m = Math.exp(-((time - prevTime)/((double) periodMillis))); + avg = ((1-m) * value) + (m * avg); + } + prevTime = time; + } + + /** + * Add the values from another average. + * + * @param other the other average + */ + public void add(DoubleExpMovingAvg other) { + final double otherValue; + final long otherTime; + synchronized (other) { + if (other.isNotSet()) { + return; + } + otherValue = other.avg; + otherTime = other.prevTime; + } + add(otherValue, otherTime); + } + + /** Returns the current average as a primitive value. */ + synchronized double getPrimitive() { + return avg; + } + + /** Returns the current average, or 0 if no values have been added. */ + @Override + public Double get() { + return getPrimitive(); + } + + @Override + public synchronized void clear() { + prevTime = 0; + avg = 0; + } + + @Override + public DoubleExpMovingAvg copy() { + return new DoubleExpMovingAvg(this); + } + + @Override + protected synchronized String getFormattedValue(boolean useCommas) { + if (isNotSet()) { + return "unknown"; + } else if (Double.isNaN(avg)) { + return "NaN"; + } else if (useCommas) { + return FORMAT.format(avg); + } else { + return String.format("%.2f", avg); + } + } + + @Override + public synchronized boolean isNotSet() { + return prevTime == 0; + } + + @Override + public synchronized String toString() { + return "DoubleExpMovingAvg[name=" + name + ", avg=" + avg + + ", prevTime=" + prevTime + ", periodMillis=" + periodMillis + "]"; + } + + /** Synchronize access to fields. */ + private synchronized void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + } + + /** Synchronize access to fields. */ + private synchronized void writeObject(ObjectOutputStream out) + throws IOException { + + out.defaultWriteObject(); + } +} diff --git a/src/com/sleepycat/je/utilint/EventTrace.java b/src/com/sleepycat/je/utilint/EventTrace.java new file mode 100644 index 0000000..8b3dba7 --- /dev/null +++ b/src/com/sleepycat/je/utilint/EventTrace.java @@ -0,0 +1,334 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.PrintStream; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Internal class used for transient event tracing. Subclass this with + * specific events. Subclasses should have toString methods for display and + * events should be added by calling EventTrace.addEvent(); + */ +public class EventTrace { + private static int MAX_EVENTS = 100; + + public static final boolean TRACE_EVENTS = false; + + static AtomicInteger currentEvent = new AtomicInteger(0); + + static final EventTrace[] events = new EventTrace[MAX_EVENTS]; + static final int[] threadIdHashes = new int[MAX_EVENTS]; + public static volatile boolean disableEvents = false; + + protected String comment; + + public EventTrace(String comment) { + this.comment = comment; + } + + public EventTrace() { + comment = null; + } + + @Override + public String toString() { + return comment; + } + + /** + * Always return true so this method can be used with asserts: + * i.e. assert addEvent(xxx); + */ + public static boolean addEvent(EventTrace event) { + if (disableEvents) { + return true; + } + int nextEventIdx = currentEvent.getAndIncrement() % MAX_EVENTS; + events[nextEventIdx] = event; + threadIdHashes[nextEventIdx] = + System.identityHashCode(Thread.currentThread()); + return true; + } + + /* + * Always return true so this method can be used with asserts: + * i.e. assert addEvent(xxx); + */ + public static boolean addEvent(String comment) { + if (disableEvents) { + return true; + } + return addEvent(new EventTrace(comment)); + } + + public static void dumpEvents() { + dumpEvents(System.out); + } + + public static void dumpEvents(PrintStream out) { + + if (disableEvents) { + return; + } + out.println("----- Event Dump -----"); + EventTrace[] oldEvents = events; + int[] oldThreadIdHashes = threadIdHashes; + disableEvents = true; + + int j = 0; + for (int i = currentEvent.get(); j < MAX_EVENTS; i++) { + EventTrace ev = oldEvents[i % MAX_EVENTS]; + if (ev != null) { + int thisEventIdx = i % MAX_EVENTS; + out.print(oldThreadIdHashes[thisEventIdx] + " "); + out.println(j + "(" + thisEventIdx + "): " + ev); + } + j++; + } + } + + public static class ExceptionEventTrace extends EventTrace { + private Exception event; + + public ExceptionEventTrace() { + event = new Exception(); + } + + @Override + public String toString() { + return LoggerUtils.getStackTrace(event); + } + } +} + + /* + public static class EvictEvent extends EventTrace { + long nodeId; + int addr; + + public EvictEvent(String comment, long nodeId, int addr) { + super(comment); + this.nodeId = nodeId; + this.addr = addr; + } + + public static void addEvent(String comment, IN node) { + long nodeId = node.getNodeId(); + int addr = System.identityHashCode(node); + EventTrace.addEvent(new EvictEvent(comment, nodeId, addr)); + } + + public String toString() { + StringBuilder sb = new StringBuilder(comment); + sb.append(" IN: ").append(nodeId); + sb.append(" sIH ").append(addr); + return sb.toString(); + } + } + + public static class CursorTrace extends EventTrace { + long nodeId; + int index; + + public CursorTrace(String comment, long nodeId, int index) { + super(comment); + this.nodeId = nodeId; + this.index = index; + } + + public static void addEvent(String comment, CursorImpl cursor) { + long nodeId = cursor.getCurrentNodeId(); + EventTrace.addEvent + (new CursorTrace(comment, nodeId, cursor.getIndex())); + } + + public String toString() { + StringBuilder sb = new StringBuilder(comment); + sb.append(" BIN: ").append(nodeId); + sb.append(" idx: ").append(index); + return sb.toString(); + } + } + */ + +/* + class CursorEventTrace extends EventTrace { + private String comment; + private Node node1; + private Node node2; + + CursorEventTrace(String comment, Node node1, Node node2) { + this.comment = comment; + this.node1 = node1; + this.node2 = node2; + } + + public String toString() { + StringBuilder sb = new StringBuilder(comment); + if (node1 != null) { + sb.append(" "); + sb.append(node1.getNodeId()); + } + if (node2 != null) { + sb.append(" "); + sb.append(node2.getNodeId()); + } + return sb.toString(); + } + } + +*/ +/* + + static class UndoEventTrace extends EventTrace { + private String comment; + private boolean success; + private Node node; + private DbLsn logLsn; + private Node parent; + private boolean found; + private boolean replaced; + private boolean inserted; + private DbLsn replacedLsn; + private DbLsn abortLsn; + private int index; + + UndoEventTrace(String comment) { + this.comment = comment; + } + + UndoEventTrace(boolean success, + Node node, + DbLsn logLsn, + Node parent, + boolean found, + boolean replaced, + boolean inserted, + DbLsn replacedLsn, + DbLsn abortLsn, + int index) { + this.comment = null; + this.success = success; + this.node = node; + this.logLsn = logLsn; + this.parent = parent; + this.found = found; + this.replaced = replaced; + this.inserted = inserted; + this.replacedLsn = replacedLsn; + this.abortLsn = abortLsn; + this.index = index; + } + + public String toString() { + if (comment != null) { + return comment; + } + StringBuilder sb = new StringBuilder(); + sb.append(" success=").append(success); + sb.append(" node="); + sb.append(node.getNodeId()); + sb.append(" logLsn="); + sb.append(logLsn.getNoFormatString()); + if (parent != null) { + sb.append(" parent=").append(parent.getNodeId()); + } + sb.append(" found="); + sb.append(found); + sb.append(" replaced="); + sb.append(replaced); + sb.append(" inserted="); + sb.append(inserted); + if (replacedLsn != null) { + sb.append(" replacedLsn="); + sb.append(replacedLsn.getNoFormatString()); + } + if (abortLsn != null) { + sb.append(" abortLsn="); + sb.append(abortLsn.getNoFormatString()); + } + sb.append(" index=").append(index); + return sb.toString(); + } + } + */ +/* + class CursorAdjustEventTrace extends EventTrace { + private int insertIndex; + private int cursorIndex; + private long nodeId; + + CursorAdjustEventTrace(int insertIndex, int cursorIndex) { + this.insertIndex = insertIndex; + this.cursorIndex = cursorIndex; + this.nodeId = getNodeId(); + } + + public String toString() { + StringBuilder sb = new StringBuilder("cursor adjust "); + sb.append(insertIndex).append(" "); + sb.append(cursorIndex).append(" "); + sb.append(nodeId); + return sb.toString(); + } + } + +*/ +/* + class CompressEventTrace extends EventTrace { + private int entryIndex; + private long nodeId; + + CompressEventTrace(int entryIndex) { + this.entryIndex = entryIndex; + this.nodeId = getNodeId(); + } + + public String toString() { + StringBuilder sb = new StringBuilder("bin compress "); + sb.append(entryIndex).append(" "); + sb.append(nodeId); + return sb.toString(); + } + } + +*/ +/* + class TreeEventTrace extends EventTrace { + private String comment; + private Node node1; + private Node node2; + + TreeEventTrace(String comment, Node node1, Node node2) { + this.comment = comment; + this.node1 = node1; + this.node2 = node2; + } + + public String toString() { + StringBuilder sb = new StringBuilder(comment); + if (node1 != null) { + sb.append(" "); + sb.append(node1.getNodeId()); + } + if (node2 != null) { + sb.append(" "); + sb.append(node2.getNodeId()); + } + return sb.toString(); + } + } + +*/ diff --git a/src/com/sleepycat/je/utilint/FileRedirectHandler.java b/src/com/sleepycat/je/utilint/FileRedirectHandler.java new file mode 100644 index 0000000..b4658ab --- /dev/null +++ b/src/com/sleepycat/je/utilint/FileRedirectHandler.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.logging.Handler; +import java.util.logging.LogRecord; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Redirects logging messages to the the owning environment's file handler, so + * that messages can be prefixed with an environment name and sent to the + * correct logging file. + * + * This class must not extend FileHandler itself, since FileHandlers open their + * target log files at construction time, and this FileHandler is meant to be + * stateless. + */ +public class FileRedirectHandler extends Handler { + + public FileRedirectHandler() { + /* No need to call super, this handler is not truly publishing. */ + } + + @Override + public void publish(LogRecord record) { + EnvironmentImpl envImpl = + LoggerUtils.envMap.get(Thread.currentThread()); + + /* + * Prefer to lose logging output, rather than risk a + * NullPointerException if the caller forgets to set and release the + * environmentImpl. + */ + if (envImpl == null) { + return; + } + + /* + * The FileHandler is not always created for an environment, because + * creating a FileHandler automatically creates a logging file, and + * we avoid doing that for read only environments. Because of that, + * there may legitimately be no environment file handler. + */ + if (envImpl.getFileHandler() == null) { + return; + } + + envImpl.getFileHandler().publish(record); + } + + @Override + public void close() + throws SecurityException { + + /* + * Nothing to do. The redirect target file handler is closed by + * the environment. + */ + } + + @Override + public void flush() { + + /* + * Nothing to do. If we want to flush this logger explicitly, flush + * the underlying envImpl's handler. + */ + } +} diff --git a/src/com/sleepycat/je/utilint/FileStoreInfo.java b/src/com/sleepycat/je/utilint/FileStoreInfo.java new file mode 100644 index 0000000..31fad6b --- /dev/null +++ b/src/com/sleepycat/je/utilint/FileStoreInfo.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; + +/** + * Provides information about the file store associated with a specific file. + * This class is a wrapper for the information made available by the {@code + * java.nio.file.FileStore} class introduced in Java 7, but using reflection to + * permit Java 6 to determine cleanly at runtime that file stores are not + * supported. + * + * TODO: + * We no longer support Java 6 so much of this mechanism can be removed. Other + * code can assume that a FileStore is always available. Also, if an + * IOException is thrown by FileStore methods, instead of tolerating this we + * should invalidate the Environment, since this is standard JE policy. + * However, we may want to leave the factory interface in place to support + * testing with arbitrary disk space limits. + */ +public abstract class FileStoreInfo { + + /** + * The full name of the Java 7 FileStore class, which must be present for + * this class to be supported. + */ + public static final String FILE_STORE_CLASS = "java.nio.file.FileStore"; + + /** The full name of the Java 7 implementation factory class. */ + private static final String JAVA7_FILE_STORE_FACTORY_CLASS = + "com.sleepycat.je.utilint.Java7FileStoreInfo$Java7Factory"; + + /** The standard factory. */ + private static final Factory standardFactory = createFactory(); + + /** If not null, a factory to use for testing. */ + private static volatile Factory testFactory = null; + + /** A factory interface for getting FileStoreInfo instances. */ + interface Factory { + + /** @see #checkSupported */ + void factoryCheckSupported(); + + /** @see #getInfo */ + abstract FileStoreInfo factoryGetInfo(String file) + throws IOException; + } + + /** A factory class whose operations fail with a given exception. */ + private static class FailingFactory implements Factory { + final RuntimeException exception; + FailingFactory(final RuntimeException exception) { + this.exception = exception; + } + @Override + public void factoryCheckSupported() { + throw exception; + } + @Override + public FileStoreInfo factoryGetInfo(@SuppressWarnings("unused") + String file) { + throw exception; + } + } + + /** Support subclasses. */ + protected FileStoreInfo() { } + + /** Create the standard factory. */ + private static Factory createFactory() { + try { + Class.forName(FILE_STORE_CLASS); + } catch (ClassNotFoundException e) { + return new FailingFactory( + new UnsupportedOperationException( + "FileStoreInfo is only supported for Java 7 and later")); + } + try { + return Class.forName(JAVA7_FILE_STORE_FACTORY_CLASS) + .asSubclass(Factory.class) + .newInstance(); + } catch (Exception e) { + return new FailingFactory( + new IllegalStateException( + "Problem accessing class " + + JAVA7_FILE_STORE_FACTORY_CLASS + ": " + e, + e)); + } + } + + /** + * Checks whether the current Java runtime supports providing information + * about file stores. Returns normally if called on a Java 7 runtime + * or later, otherwise throws {@link UnsupportedOperationException}. + * + * @throws UnsupportedOperationException if the current runtime does not + * support file stores + */ + public static final synchronized void checkSupported() { + getFactory().factoryCheckSupported(); + } + + /** Returns the current factory. */ + private static synchronized Factory getFactory() { + return (testFactory == null) ? standardFactory : testFactory; + } + + /** For testing: specifies the factory, or null for the default. */ + public static void setFactory(final Factory factory) { + testFactory = factory; + } + + /** + * Returns a {@link FileStoreInfo} instance that provides information about + * the file store associated with the specified file. Throws {@link + * UnsupportedOperationException} if called on a Java runtime prior to Java + * 7. Equal objects will be returned for all files associated with the + * same file store. + * + * @param file the file + * @return an instance of {@code FileStoreInfo} + * @throws UnsupportedOperationException if called on a Java runtime prior + * to Java 7 + * @throws IllegalStateException if an unexpected exception occurs when + * attempting to use reflection to access the underlying implementation + * @throws IOException if an I/O error occurs + */ + public static FileStoreInfo getInfo(final String file) + throws IOException { + + return getFactory().factoryGetInfo(file); + } + + /** + * Returns the size, in bytes, of the file store. + * + * @return the size of the file store, in bytes + * @throws IOException if an I/O error occurs + */ + public abstract long getTotalSpace() + throws IOException; + + /** + * Returns the number of bytes available in the file store. + * + * @return the number of bytes available + * @throws IOException if an I/O error occurs + */ + public abstract long getUsableSpace() + throws IOException; +} diff --git a/src/com/sleepycat/je/utilint/FloatStat.java b/src/com/sleepycat/je/utilint/FloatStat.java new file mode 100644 index 0000000..1ba006e --- /dev/null +++ b/src/com/sleepycat/je/utilint/FloatStat.java @@ -0,0 +1,78 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A Float JE stat. + */ +public class FloatStat extends Stat { + private static final long serialVersionUID = 1L; + + private float val; + + public FloatStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public FloatStat(StatGroup group, StatDefinition definition, float val) { + super(group, definition); + this.val = val; + } + + @Override + public Float get() { + return val; + } + + @Override + public void set(Float newValue) { + val = newValue; + } + + @Override + public void add(Stat otherStat) { + val += otherStat.get(); + } + + @Override + public Stat computeInterval(Stat base) { + Stat ret = copy(); + if (definition.getType() == StatType.INCREMENTAL) { + ret.set(get() - base.get()); + } + return ret; + } + + @Override + public void negate() { + val = -val; + } + + @Override + public void clear() { + val = 0; + } + + @Override + protected String getFormattedValue() { + return Float.toString(val); + } + + @Override + public boolean isNotSet() { + return (val == 0); + } +} diff --git a/src/com/sleepycat/je/utilint/FormatUtil.java b/src/com/sleepycat/je/utilint/FormatUtil.java new file mode 100644 index 0000000..e346fa5 --- /dev/null +++ b/src/com/sleepycat/je/utilint/FormatUtil.java @@ -0,0 +1,72 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.SortedSet; + +/** + * A home for misc formatting utilities. + */ +public class FormatUtil { + + /** + * Utility class to convert a sorted set of long values to a compact string + * suitable for printing. The representation is made compact by identifying + * ranges so that the sorted set can be represented as a sequence of hex + * ranges and singletons. + */ + public static String asHexString(SortedSet set) { + + if (set.isEmpty()) { + return ""; + } + + final StringBuilder sb = new StringBuilder(); + java.util.Iterator i = set.iterator(); + long rstart = i.next(); + long rend = rstart; + + while (i.hasNext()) { + final long f= i.next(); + if (f == (rend + 1)) { + /* Continue the existing range. */ + rend++; + continue; + } + + /* flush and start new range */ + flushRange(sb, rstart, rend); + rstart = rend = f; + }; + + flushRange(sb, rstart, rend); + return sb.toString(); + } + + private static void flushRange(final StringBuilder sb, + long rstart, + long rend) { + if (rstart == -1) { + return; + } + + if (rstart == rend) { + sb.append(" 0x").append(Long.toHexString(rstart)); + } else { + sb.append(" 0x").append(Long.toHexString(rstart)). + append("-"). + append("0x").append(Long.toHexString(rend)); + } + } +} diff --git a/src/com/sleepycat/je/utilint/FormatterRedirectHandler.java b/src/com/sleepycat/je/utilint/FormatterRedirectHandler.java new file mode 100644 index 0000000..c5b5983 --- /dev/null +++ b/src/com/sleepycat/je/utilint/FormatterRedirectHandler.java @@ -0,0 +1,55 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.logging.Formatter; +import java.util.logging.Level; +import java.util.logging.LogRecord; + +/** + * Redirect the ConsoleHandler to use a specific Formatter. This is the + * same redirect approach used in + * com.sleepycat.je.utilint.ConsoleRedirectHandler, but in this case, an + * environment (and its associated stored console handler) is not available. + * In order to still have prefixed logging output, we incur the higher level + * cost of resetting the formatter. + */ +public class FormatterRedirectHandler + extends java.util.logging.ConsoleHandler { + + /* + * We want console logging to be determined by the level for + * com.sleepycat.je.util.ConsoleHandler. Check that handler's level and use + * it to set FormatterRedirectHandler explicitly. + */ + private static final String HANDLER_LEVEL = + com.sleepycat.je.util.ConsoleHandler.class.getName() + ".level"; + + public FormatterRedirectHandler() { + super(); + + String level = LoggerUtils.getLoggerProperty(HANDLER_LEVEL); + setLevel((level == null) ? Level.OFF : Level.parse(level)); + } + + @Override + public void publish(LogRecord record) { + Formatter formatter = + LoggerUtils.formatterMap.get(Thread.currentThread()); + if (formatter != null) { + setFormatter(formatter); + } + super.publish(record); + } +} diff --git a/src/com/sleepycat/je/utilint/HexFormatter.java b/src/com/sleepycat/je/utilint/HexFormatter.java new file mode 100644 index 0000000..d4607eb --- /dev/null +++ b/src/com/sleepycat/je/utilint/HexFormatter.java @@ -0,0 +1,24 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +public class HexFormatter { + static public String formatLong(long l) { + StringBuilder sb = new StringBuilder(); + sb.append(Long.toHexString(l)); + sb.insert(0, "0000000000000000".substring(0, 16 - sb.length())); + sb.insert(0, "0x"); + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/utilint/IntStat.java b/src/com/sleepycat/je/utilint/IntStat.java new file mode 100644 index 0000000..748c1c6 --- /dev/null +++ b/src/com/sleepycat/je/utilint/IntStat.java @@ -0,0 +1,86 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * An integer JE stat. + */ +public class IntStat extends Stat { + private static final long serialVersionUID = 1L; + + private int counter; + + public IntStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public IntStat(StatGroup group, StatDefinition definition, int counter) { + super(group, definition); + this.counter = counter; + } + + @Override + public Integer get() { + return counter; + } + + @Override + public void set(Integer newValue) { + counter = newValue; + } + + public void increment() { + counter++; + } + + public void add(int count) { + counter += count; + } + + @Override + public void add(Stat otherStat) { + counter += otherStat.get(); + } + + @Override + public Stat computeInterval(Stat base) { + Stat ret = copy(); + if (definition.getType() == StatType.INCREMENTAL) { + ret.set(counter - base.get()); + } + return ret; + } + + @Override + public void negate() { + counter = -counter; + } + + @Override + public void clear() { + counter = 0; + } + + @Override + protected String getFormattedValue() { + return Stat.FORMAT.format(counter); + } + + @Override + public boolean isNotSet() { + return (counter == 0); + } +} diff --git a/src/com/sleepycat/je/utilint/IntegralLongAvg.java b/src/com/sleepycat/je/utilint/IntegralLongAvg.java new file mode 100644 index 0000000..3c1e77f --- /dev/null +++ b/src/com/sleepycat/je/utilint/IntegralLongAvg.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +public class IntegralLongAvg extends Number { + + private static final long serialVersionUID = 1L; + private long numerator; + private long denominator; + private long factor = 1; + + public IntegralLongAvg (long numerator, long denominator, long factor) { + this.numerator = numerator; + this.denominator = denominator; + this.factor = factor; + } + + public IntegralLongAvg (long numerator, long denominator) { + this.numerator = numerator; + this.denominator = denominator; + } + + public IntegralLongAvg (IntegralLongAvg val) { + this.numerator = val.numerator; + this.denominator = val.denominator; + this.factor = val.factor; + } + + public void add(IntegralLongAvg other) { + numerator += other.numerator; + denominator += other.denominator; + } + + public void subtract(IntegralLongAvg other) { + numerator -= other.numerator; + denominator -= other.denominator; + } + + public long compute() { + return (denominator != 0) ? + (numerator * factor) / denominator : + 0; + } + + public long getNumerator() { + return numerator; + } + + public void setNumerator(long numerator) { + this.numerator = numerator; + } + + public long getDenominator() { + return denominator; + } + + public void setDenominator(long denominator) { + this.denominator = denominator; + } + + @Override + public int intValue() { + return (int)compute(); + } + + @Override + public long longValue() { + return compute(); + } + + @Override + public float floatValue() { + return compute(); + } + + @Override + public double doubleValue() { + return compute(); + } +} diff --git a/src/com/sleepycat/je/utilint/IntegralLongAvgStat.java b/src/com/sleepycat/je/utilint/IntegralLongAvgStat.java new file mode 100644 index 0000000..eb67360 --- /dev/null +++ b/src/com/sleepycat/je/utilint/IntegralLongAvgStat.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * A long stat which represents a average whose value is Integral. + */ +public class IntegralLongAvgStat extends Stat { + + private static final long serialVersionUID = 1L; + private IntegralLongAvg value; + + public IntegralLongAvgStat(StatGroup group, + StatDefinition definition, + long numerator, + long denominator, + long factor) { + super(group, definition); + value = new IntegralLongAvg(numerator, denominator, factor); + } + + public IntegralLongAvgStat(StatGroup group, + StatDefinition definition, + long numerator, + long denominator) { + super(group, definition); + value = new IntegralLongAvg(numerator, denominator); + } + + @Override + public IntegralLongAvg get() { + return value; + } + + @Override + public void set(IntegralLongAvg newValue) { + value = newValue; + } + + @Override + public void add(Stat otherStat) { + value.add(otherStat.get()); + } + + @Override + public Stat computeInterval(Stat base) { + IntegralLongAvgStat ret = copy(); + ret.value.subtract(base.get()); + return ret; + } + + @Override + public void negate() { + if (value != null) { + value.setDenominator(-value.getDenominator()); + value.setNumerator(-value.getNumerator()); + } + } + + @Override + public IntegralLongAvgStat copy() { + try { + IntegralLongAvgStat ret = (IntegralLongAvgStat) super.clone(); + ret.value = new IntegralLongAvg(value); + return ret; + } catch (CloneNotSupportedException unexpected) { + throw EnvironmentFailureException.unexpectedException(unexpected); + } + } + + @Override + public void clear() { + value = null; + } + + @Override + protected String getFormattedValue() { + return (value != null) ? + Stat.FORMAT.format(get()) : + Stat.FORMAT.format(0); + } + + @Override + public boolean isNotSet() { + return (value == null); + } +} diff --git a/src/com/sleepycat/je/utilint/IntegralRateStat.java b/src/com/sleepycat/je/utilint/IntegralRateStat.java new file mode 100644 index 0000000..ad46c00 --- /dev/null +++ b/src/com/sleepycat/je/utilint/IntegralRateStat.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A long stat which represents a rate whose value is Integral. + */ +public class IntegralRateStat extends LongStat { + private static final long serialVersionUID = 1L; + + private final long factor; + + public IntegralRateStat(StatGroup group, + StatDefinition definition, + Stat divisor, + Stat dividend, + long factor) { + super(group, definition); + this.factor = factor; + + calculateRate(divisor, dividend); + } + + /* Calculate the rate based on the two stats. */ + private void calculateRate(Stat divisor, + Stat dividend) { + if (divisor == null || dividend == null) { + counter = 0; + } else { + counter = (dividend.get().longValue() != 0) ? + (divisor.get().longValue() * factor) / + dividend.get().longValue() : + 0; + } + } +} diff --git a/src/com/sleepycat/je/utilint/InternalException.java b/src/com/sleepycat/je/utilint/InternalException.java new file mode 100644 index 0000000..0871928 --- /dev/null +++ b/src/com/sleepycat/je/utilint/InternalException.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + + +/** + * Some internal inconsistency exception. + */ +public class InternalException extends RuntimeException { + + private static final long serialVersionUID = 1584673689L; + + public InternalException() { + super(); + } + + public InternalException(String message) { + super(message); + } + + public InternalException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/src/com/sleepycat/je/utilint/JVMSystemUtils.java b/src/com/sleepycat/je/utilint/JVMSystemUtils.java new file mode 100644 index 0000000..a4087bb --- /dev/null +++ b/src/com/sleepycat/je/utilint/JVMSystemUtils.java @@ -0,0 +1,180 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.File; +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; +import java.lang.reflect.Method; +import java.util.List; + +public class JVMSystemUtils { + + public static final boolean ZING_JVM; + static { + final String vendor = System.getProperty("java.vendor"); + final String vmName = System.getProperty("java.vm.name"); + /* + * Check java.vm.name to distinguish Zing from Zulu, as recommended + * by Azul. + */ + ZING_JVM = vendor != null && vmName != null && + vendor.equals("Azul Systems, Inc.") && vmName.contains("Zing"); + } + + /** + * Zing will bump the heap up to 1 GB if -Xmx is smaller. + */ + public static final int MIN_HEAP_MB = ZING_JVM ? 1024 : 0; + + private static final String ZING_MANAGEMENT_FACTORY_CLASS = + "com.azul.zing.management.ManagementFactory"; + + private static final String ZING_ACCESS_ERROR = + "Could not access Zing management bean." + + " Make sure -XX:+UseZingMXBeans was specified."; + + private static OperatingSystemMXBean osBean = + ManagementFactory.getOperatingSystemMXBean(); + + private static final String MATCH_FILE_SEPARATOR = + "\\" + File.separatorChar; + + /* + * Get the system load average for the last minute. + * + * This method is no longer needed and could be removed. It was originally + * used to perform reflection when we supported Java 5, but from Java 6 + * onward the getSystemLoadAverage method can be called directly. However, + * it is a commonly used utility method, so we have chosen not to remove + * it, for now at least. + */ + public static double getSystemLoad() { + return osBean.getSystemLoadAverage(); + } + + /** + * Returns the max amount of memory in the heap available, using an + * approach that depends on the JVM vendor, OS, etc. + * + * May return Long.MAX_VALUE if there is no inherent limit. + */ + public static long getRuntimeMaxMemory() { + + /* Runtime.maxMemory is unreliable on MacOS Java 1.4.2. */ + if ("Mac OS X".equals(System.getProperty("os.name"))) { + final String jvmVersion = System.getProperty("java.version"); + if (jvmVersion != null && jvmVersion.startsWith("1.4.2")) { + return Long.MAX_VALUE; /* Undetermined heap size. */ + } + } + + /* + * Runtime.maxMemory is unreliable on Zing. Call + * MemoryMXBean.getApplicationObjectHeapUsableMemory instead. + */ + if (ZING_JVM) { + try { + final Class factoryClass = + Class.forName(ZING_MANAGEMENT_FACTORY_CLASS); + + final Method getBeanMethod = + factoryClass.getMethod("getMemoryMXBean"); + + final Object memoryBean = getBeanMethod.invoke(null); + final Class beanClass = memoryBean.getClass(); + + final Method getMaxMemoryMethod = beanClass.getMethod( + "getApplicationObjectHeapUsableMemory"); + + return (Long) getMaxMemoryMethod.invoke(memoryBean); + + } catch (Exception e) { + throw new IllegalStateException(ZING_ACCESS_ERROR, e); + } + } + + /* Standard approach. */ + return Runtime.getRuntime().maxMemory(); + } + + /** + * Returns the size of the System Zing Memory pool. This is the max memory + * for all running Zing JVMs. + */ + public static long getSystemZingMemorySize() { + try { + if (!ZING_JVM) { + throw new IllegalStateException("Only allowed under Zing"); + } + + final Class factoryClass = + Class.forName(ZING_MANAGEMENT_FACTORY_CLASS); + + final Method getPoolsMethod = + factoryClass.getMethod("getMemoryPoolMXBeans"); + + final java.util.List pools = + (java.util.List) getPoolsMethod.invoke(null); + + final Class poolClass = pools.get(0).getClass(); + final Method getNameMethod = poolClass.getMethod("getName"); + final Method getSizeMethod = poolClass.getMethod("getCurrentSize"); + + for (Object pool : pools) { + if ("System Zing Memory".equals(getNameMethod.invoke(pool))) { + return (Long) getSizeMethod.invoke(pool); + } + } + + throw new IllegalStateException( + "System Zing Memory pool not found"); + + } catch (Exception e) { + throw new IllegalStateException(ZING_ACCESS_ERROR, e); + } + } + + /** + * Appends Zing-specific Java args, should be called before starting a + * Java process. + */ + public static void addZingJVMArgs(List command) { + insertZingJVMArgs(command, command.size()); + } + + /** + * Insert Zing-specific Java args after the 'java' command, if 'java' is + * the 0th element. + */ + public static void insertZingJVMArgs(List command) { + if (!JVMSystemUtils.ZING_JVM) { + return; + } + String[] prog = command.get(0).split(MATCH_FILE_SEPARATOR); + if (prog[prog.length - 1].equals("java")) { + insertZingJVMArgs(command, 1); + } + } + + /* + * -XX:+UseZingMXBeans must be specified when running under Zing. + */ + private static void insertZingJVMArgs(List command, int insertAt) { + if (!JVMSystemUtils.ZING_JVM) { + return; + } + command.add(insertAt, "-XX:+UseZingMXBeans"); + } +} diff --git a/src/com/sleepycat/je/utilint/JarMain.java b/src/com/sleepycat/je/utilint/JarMain.java new file mode 100644 index 0000000..4e83962 --- /dev/null +++ b/src/com/sleepycat/je/utilint/JarMain.java @@ -0,0 +1,125 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.HashMap; + +/** + * Used as the main class for the manifest of the je.jar file, and so it is + * executed when running: java -jar je.jar. The first argument must be the + * final part of the class name of a utility in the com.sleepycat.je.util + * package, e.g., DbDump. All following parameters are passed to the main + * method of the utility and are processed as usual. + * + * Apart from the package, this class is ambivalent about the name of the + * utility specified; the only requirement is that it must be a public static + * class and must contain a public static main method. + */ +public class JarMain { + + private static final String USAGE = "usage: java [options...]"; + + /* Use a HashMap to allow the utilities to live in multiple packages. */ + private static HashMap utilPrefixMap = + new HashMap(); + + /* Map each utility name to its package. */ + static { + /* The utilities in directory com/sleepycat/je/util. */ + utilPrefixMap.put("DbCacheSize", "com.sleepycat.je.util."); + utilPrefixMap.put("DbDump", "com.sleepycat.je.util."); + utilPrefixMap.put("DbDeleteReservedFiles", "com.sleepycat.je.util."); + utilPrefixMap.put("DbFilterStats", "com.sleepycat.je.util."); + utilPrefixMap.put("DbLoad", "com.sleepycat.je.util."); + utilPrefixMap.put("DbPrintLog", "com.sleepycat.je.util."); + utilPrefixMap.put("DbTruncateLog", "com.sleepycat.je.util."); + utilPrefixMap.put("DbRunAction", "com.sleepycat.je.util."); + utilPrefixMap.put("DbScavenger", "com.sleepycat.je.util."); + utilPrefixMap.put("DbSpace", "com.sleepycat.je.util."); + utilPrefixMap.put("DbStat", "com.sleepycat.je.util."); + utilPrefixMap.put("DbVerify", "com.sleepycat.je.util."); + utilPrefixMap.put("DbVerifyLog", "com.sleepycat.je.util."); + + /* The utilities in directory com/sleepycat/je/rep/util. */ + utilPrefixMap.put("DbEnableReplication", + "com.sleepycat.je.rep.util."); + utilPrefixMap.put("DbGroupAdmin", "com.sleepycat.je.rep.util."); + utilPrefixMap.put("DbPing", "com.sleepycat.je.rep.util."); + utilPrefixMap.put("LDiff", "com.sleepycat.je.rep.util.ldiff."); + + /* The utilities in directory com/sleepycat/je/rep/utilint. */ + utilPrefixMap.put("DbDumpGroup", "com.sleepycat.je.rep.utilint."); + utilPrefixMap.put("DbFeederPrintLog", "com.sleepycat.je.rep.utilint."); + utilPrefixMap.put("DbStreamVerify", "com.sleepycat.je.rep.utilint."); + utilPrefixMap.put("DbSync", "com.sleepycat.je.rep.utilint."); + utilPrefixMap.put("DbRepRunAction", "com.sleepycat.je.rep.utilint."); + utilPrefixMap.put("DbNullNode", "com.sleepycat.je.rep.utilint."); + } + + /* List all the available utilities. */ + private static String availableUtilities() { + StringBuilder sbuf = new StringBuilder(); + for (String util : utilPrefixMap.keySet()) { + sbuf.append(utilPrefixMap.get(util)); + sbuf.append(util); + sbuf.append("\n"); + } + + return sbuf.toString(); + } + + public static void main(String[] args) { + try { + if (args.length < 1) { + usage("Missing utility name"); + } + + String utilPrefix = utilPrefixMap.get(args[0]); + if (utilPrefix == null) { + System.out.println("Available utilities are: "); + System.out.println(availableUtilities()); + usage("No such utility"); + } + + Class cls = Class.forName(utilPrefix + args[0]); + + Method mainMethod = cls.getMethod + ("main", new Class[] { String[].class }); + + String[] mainArgs = new String[args.length - 1]; + System.arraycopy(args, 1, mainArgs, 0, mainArgs.length); + + mainMethod.invoke(null, new Object[] { mainArgs }); + } catch (InvocationTargetException ITE) { + if (ITE.getCause() != null) { + ITE.getCause().printStackTrace(); + usage(ITE.getCause().toString()); + } else { + System.err.println("Problem invoking main method:"); + ITE.printStackTrace(System.err); + } + } catch (Throwable e) { + e.printStackTrace(); + usage(e.toString()); + } + } + + private static void usage(String msg) { + System.err.println(msg); + System.err.println(USAGE); + System.exit(-1); + } +} diff --git a/src/com/sleepycat/je/utilint/Java7FileStoreInfo.java b/src/com/sleepycat/je/utilint/Java7FileStoreInfo.java new file mode 100644 index 0000000..351f936 --- /dev/null +++ b/src/com/sleepycat/je/utilint/Java7FileStoreInfo.java @@ -0,0 +1,93 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.FileSystems; +import java.nio.file.Files; + +/** + * An implementation of {@link FileStoreInfo} that uses Java 7 facilities. + * Until we require Java 7, this class should only be referenced via + * reflection. + */ +class Java7FileStoreInfo extends FileStoreInfo { + + /** The underlying Java 7 file store. */ + private final FileStore fileStore; + + /** The associated Factory. */ + static class Java7Factory implements Factory { + @Override + public void factoryCheckSupported() { } + @Override + public FileStoreInfo factoryGetInfo(final String file) + throws IOException { + + return new Java7FileStoreInfo(file); + } + } + + /** + * Creates an instance for the specified file. + * + * @param file the file + * @throws IllegalArgumentException if the argument is {@code null} + * @throws IOException if there is an I/O error + */ + Java7FileStoreInfo(final String file) + throws IOException { + + if (file == null) { + throw new IllegalArgumentException("The file must not be null"); + } + fileStore = Files.getFileStore(FileSystems.getDefault().getPath(file)); + } + + @Override + public long getTotalSpace() + throws IOException { + + return fileStore.getTotalSpace(); + } + + @Override + public long getUsableSpace() + throws IOException { + + return fileStore.getUsableSpace(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } else if (!(obj instanceof Java7FileStoreInfo)) { + return false; + } else { + return fileStore.equals(((Java7FileStoreInfo) obj).fileStore); + } + } + + @Override + public int hashCode() { + return 197 + (fileStore.hashCode() ^ 199); + } + + @Override + public String toString() { + return fileStore.toString(); + } +} diff --git a/src/com/sleepycat/je/utilint/LSNStat.java b/src/com/sleepycat/je/utilint/LSNStat.java new file mode 100644 index 0000000..c8bee37 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LSNStat.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * A long JE stat. + */ +public class LSNStat extends LongStat{ + private static final long serialVersionUID = 1L; + + public LSNStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public LSNStat(StatGroup group, StatDefinition definition, long counter) { + super(group, definition); + this.counter = counter; + } + + @Override + public void add(Stat other) { + throw EnvironmentFailureException.unexpectedState( + "LongArrayStat doesn't support the add operation."); + } + + @Override + public Stat computeInterval(Stat base) { + return copy(); + } + + @Override + public void negate() { + } + + @Override + protected String getFormattedValue() { + return DbLsn.getNoFormatString(counter); + } +} diff --git a/src/com/sleepycat/je/utilint/LogVerifier.java b/src/com/sleepycat/je/utilint/LogVerifier.java new file mode 100644 index 0000000..b78f681 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LogVerifier.java @@ -0,0 +1,433 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.ChecksumValidator; +import com.sleepycat.je.log.FileHeader; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.util.LogVerificationException; + +/** + * Verifies the checksums in the contents of a log file in a JE {@code + * Environment}. + * + *

        The caller supplies the contents of the log file by passing arrays of + * bytes in a series of calls to the {@link #verify} method, which verifies the + * checksums for log records, and by calling the {@link #verifyAtEof} when the + * entire contents are complete, to detect incomplete entries at the end of the + * file. The primary intended use of this class is to verify the contents of + * log files that are being copied as part of a programmatic backup. It is + * critical that invalid files are not added to a backup set, since then both + * the live environment and the backup will be invalid. + * + * @see com.sleepycat.je.util.LogVerificationInputStream + */ +public class LogVerifier { + + private static final byte FILE_HEADER_TYPE_NUM = + LogEntryType.LOG_FILE_HEADER.getTypeNum(); + + private final EnvironmentImpl envImpl; + private final String fileName; + private final long fileNum; + + /* Stream verification state information. */ + private enum State { + INIT, FIXED_HEADER, VARIABLE_HEADER, ITEM, FILE_HEADER_ITEM, INVALID + } + private State state; + private long entryStart; + private long prevEntryStart; + private final ChecksumValidator validator; + private final ByteBuffer headerBuf; + private LogEntryHeader header; + private int itemPosition; + private int logVersion; + + /** + * Creates a log verifier. + * + * @param env the {@code Environment} associated with the log + * + * @param fileName the file name of the log, for reporting in the {@code + * LogVerificationException}. This should be a simple file name of the + * form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public LogVerifier(final Environment env, final String fileName) { + this(DbInternal.getNonNullEnvImpl(env), fileName); + } + + /** + * Creates a log verifier. + * + * @param envImpl the {@code EnvironmentImpl} associated with the log + * + * @param fileName the file name of the log, for reporting in the {@code + * LogVerificationException}. This should be a simple file name of the + * form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public LogVerifier(final EnvironmentImpl envImpl, final String fileName) { + this(envImpl, fileName, -1L); + } + + /** + *

        Creates a log verifier for use with an internal environment. If + * {@code fileNum} is less than zero, it is derived from {@code fileName}. + * + * @param envImpl the {@code EnvironmentImpl} associated with the log + * + * @param fileName the file name of the log, for reporting in the {@code + * LogVerificationException}. This should be a simple file name of the + * form {@code NNNNNNNN.jdb}, where NNNNNNNN is the file number in + * hexadecimal format. + * + * @param fileNum the file number + */ + public LogVerifier(final EnvironmentImpl envImpl, + final String fileName, + final long fileNum) { + this.envImpl = envImpl; + this.fileName = fileName; + this.fileNum = (fileNum >= 0) ? + fileNum : envImpl.getFileManager().getNumFromName(fileName); + state = State.INIT; + entryStart = 0L; + prevEntryStart = 0L; + validator = new ChecksumValidator(); + + /* + * The headerBuf is used to hold the fixed entry header, variable entry + * header portion, and file header entry. + */ + headerBuf = ByteBuffer.allocate + (Math.max(LogEntryHeader.MAX_HEADER_SIZE, FileHeader.entrySize())); + + /* Initial log version for reading the file header. */ + logVersion = LogEntryType.UNKNOWN_FILE_HEADER_VERSION; + } + + /** + * Verifies the next portion of the log file. + * + * @param buf the buffer containing the log file bytes + * + * @param off the start offset of the log file bytes in the buffer + * + * @param len the number of log file bytes in the buffer + * + * @throws LogVerificationException if a checksum cannot be verified or a + * log entry is determined to be invalid by examining its contents + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public void verify(final byte[] buf, final int off, final int len) + throws LogVerificationException { + + final int endOffset = off + len; + int curOffset = off; + while (curOffset < endOffset) { + final int remaining = endOffset - curOffset; + switch (state) { + case INIT: + processInit(); + break; + case FIXED_HEADER: + curOffset = processFixedHeader(buf, curOffset, remaining); + break; + case VARIABLE_HEADER: + curOffset = processVariableHeader(buf, curOffset, remaining); + break; + case FILE_HEADER_ITEM: + curOffset = processFileHeaderItem(buf, curOffset, remaining); + break; + case ITEM: + curOffset = processItem(buf, curOffset, remaining); + break; + case INVALID: + throw newVerifyException + ("May not read after LogVerificationException is thrown"); + default: + assert false; + } + } + } + + /** + * Checks that the log file ends with a complete log entry, after having + * completed verifying the log file contents through calls to {@link + * #verify}. + * + * @throws LogVerificationException if the stream does not end with a + * complete log entry + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs + */ + public void verifyAtEof() + throws LogVerificationException { + + /* State should be INIT at EOF. */ + if (state == State.INIT) { + return; + } + + /* Ignore partial entry at end of last log file. */ + if (fileNum == envImpl.getFileManager().getLastFileNum()) { + return; + } + + /* Report partial entry at end of any other file. */ + throw newVerifyException("Entry is incomplete"); + } + + /** + * Initializes all state variables before the start of a log entry. Moves + * the state to FIXED_HEADER, the first part of a log entry. + */ + private void processInit() { + validator.reset(); + headerBuf.clear(); + header = null; + itemPosition = 0; + state = State.FIXED_HEADER; + } + + /** + * Processes the fixed initial portion of a log entry. After all bytes for + * the fixed portion are read, moves the state to VARIABLE_HEADER if the + * header contains a variable portion, or to ITEM if it does not. + */ + private int processFixedHeader(final byte[] buf, + final int curOffset, + final int remaining) + throws LogVerificationException { + + assert header == null; + + final int maxSize = LogEntryHeader.MIN_HEADER_SIZE; + final int processSize = + Math.min(remaining, maxSize - headerBuf.position()); + + headerBuf.put(buf, curOffset, processSize); + assert headerBuf.position() <= maxSize; + + if (headerBuf.position() == maxSize) { + headerBuf.flip(); + try { + header = new LogEntryHeader( + headerBuf, logVersion, DbLsn.makeLsn(fileNum, entryStart)); + } catch (ChecksumException e) { + throw newVerifyException( + "Invalid header bytes=" + + Arrays.toString(headerBuf.array()), + e); + } + + if (header.getPrevOffset() != prevEntryStart) { + throw newVerifyException( + "Header prevOffset=0x" + + Long.toHexString(header.getPrevOffset()) + + " but prevEntryStart=0x" + + Long.toHexString(prevEntryStart)); + } + + /* If the header is invisible, turn off the invisible bit. */ + if (header.isInvisible()) { + LogEntryHeader.turnOffInvisible(headerBuf, 0); + } + + /* Do not validate the bytes of the checksum itself. */ + validator.update(headerBuf.array(), + LogEntryHeader.CHECKSUM_BYTES, + maxSize - LogEntryHeader.CHECKSUM_BYTES); + + if (header.isVariableLength()) { + headerBuf.clear(); + state = State.VARIABLE_HEADER; + } else if (header.getType() == FILE_HEADER_TYPE_NUM) { + headerBuf.clear(); + state = State.FILE_HEADER_ITEM; + } else { + state = State.ITEM; + } + } + + return curOffset + processSize; + } + + /** + * Processes the variable portion of a log entry. After all bytes for the + * variable portion are read, moves the state to ITEM. + */ + private int processVariableHeader(final byte[] buf, + final int curOffset, + final int remaining) { + assert header != null; + assert header.isVariableLength(); + + final int maxSize = header.getVariablePortionSize(); + final int processSize = + Math.min(remaining, maxSize - headerBuf.position()); + + headerBuf.put(buf, curOffset, processSize); + assert headerBuf.position() <= maxSize; + + if (headerBuf.position() == maxSize) { + headerBuf.flip(); + header.readVariablePortion(headerBuf); + validator.update(headerBuf.array(), 0, maxSize); + + if (header.getType() == FILE_HEADER_TYPE_NUM) { + headerBuf.clear(); + state = State.FILE_HEADER_ITEM; + } else { + state = State.ITEM; + } + } + + return curOffset + processSize; + } + + private int processFileHeaderItem(final byte[] buf, + final int curOffset, + final int remaining) + throws LogVerificationException { + + assert header != null; + assert logVersion == LogEntryType.UNKNOWN_FILE_HEADER_VERSION; + + final int maxSize = FileHeader.entrySize(); + final int processSize = + Math.min(remaining, maxSize - headerBuf.position()); + + headerBuf.put(buf, curOffset, processSize); + assert headerBuf.position() <= maxSize; + + if (headerBuf.position() == maxSize) { + validator.update(headerBuf.array(), 0, maxSize); + try { + validator.validate(header.getChecksum(), fileNum, entryStart); + } catch (ChecksumException e) { + throw newVerifyException(e); + } + + headerBuf.flip(); + LogEntry fileHeaderEntry = + LogEntryType.LOG_FILE_HEADER.getNewLogEntry(); + fileHeaderEntry.readEntry(envImpl, header, headerBuf); + FileHeader fileHeaderItem = + (FileHeader) fileHeaderEntry.getMainItem(); + + /* Log version in the file header applies to all other entries. */ + logVersion = fileHeaderItem.getLogVersion(); + + prevEntryStart = entryStart; + entryStart += header.getSize() + maxSize; + state = State.INIT; + } + + return curOffset + processSize; + } + + /** + * Processes the item portion of a log entry. After all bytes for the item + * are read, moves the state back to INIT and bumps the entryStart. + */ + private int processItem(final byte[] buf, + final int curOffset, + final int remaining) + throws LogVerificationException { + + assert header != null; + + final int maxSize = header.getItemSize(); + final int processSize = Math.min(remaining, maxSize - itemPosition); + + validator.update(buf, curOffset, processSize); + itemPosition += processSize; + assert itemPosition <= maxSize; + + if (itemPosition == maxSize) { + try { + validator.validate(header.getChecksum(), fileNum, entryStart); + } catch (ChecksumException e) { + /* + LogEntryType lastEntryType = LogEntryType.findType(header.getType()); + System.out.println(); + System.out.println( + "Checksum error in logrec of tyoe " + + lastEntryType.toStringNoVersion() + " log version: " + logVersion); + System.out.println(); + */ + throw newVerifyException(e); + } + + prevEntryStart = entryStart; + entryStart += header.getSize() + maxSize; + state = State.INIT; + } + + return curOffset + processSize; + } + + private LogVerificationException newVerifyException(String reason) { + return newVerifyException(reason, null); + } + + private LogVerificationException newVerifyException(Throwable cause) { + return newVerifyException(cause.toString(), cause); + } + + private LogVerificationException newVerifyException(String reason, + Throwable cause) { + state = State.INVALID; + + final String logEntrySize; + + if (header != null) { + logEntrySize = + String.valueOf(header.getSize() + header.getItemSize()); + } else { + logEntrySize = "unknown"; + } + + return new LogVerificationException + ("Log is invalid, fileName: " + fileName + + " fileNumber: 0x" + Long.toHexString(fileNum) + + " logEntryOffset: 0x" + Long.toHexString(entryStart) + + " logEntrySize: " + logEntrySize + + " verifyState: " + state + + " reason: " + reason, + cause); + } +} diff --git a/src/com/sleepycat/je/utilint/LoggerUtils.java b/src/com/sleepycat/je/utilint/LoggerUtils.java new file mode 100644 index 0000000..d196cc8 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LoggerUtils.java @@ -0,0 +1,587 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Formatter; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.Trace; + +/** + * Logging Architecture + * =========================== + * JE uses the java.util.logging package. The ability to dynamically specify + * logging levels per component is important functionality for the system. + * Logging output is directed to the console, to the je.info files, and in + * special cases, to a MemoryHandler. The latter is meant for debugging and + * field support. + * + * Logging output from multiple environments may end up going to the same + * handler, either because a single process is executing multiple environments, + * or because the output of multiple environments, such as a replication group, + * is combined in a single display. Because of that, it's important for logging + * output to be prefixed with an environment id so it can be distinguished by + * environment. + * + * Loggers managed by java.util.logging.LogManager are supposed to be + * maintained with a weak reference by the LogManager. In our experience, + * loggers do not seem to be released, and seem to accumulate in + * memory. Because of that, we create a single logger per JE class, rather than + * a logger per class instance. + * + * The latter would be more convenient, because we wish to use environment + * specific information, such as the environment name as a prefix, or the + * location of the je.info file, when creating output. Restricting ourselves to + * a single per-class logger requires that we keep the logger and its + * associated handlers and formatters stateless, because the logger may be + * shared by multiple environments. To compensate for that, we use per-thread + * state to permit per-environment customization of the logging output (that is + * the logging prefix) and file handler location. Because we've seen some + * performance issues with ThreadLocals, we elected instead to maintain a + * per-thread map to store state information needed by the logger. + * + * This state information is: + * + * - the environment impl from the envMap(from which one can obtain the prefix + * and the console, file and memory handlers to use) + * + * - or if the environment impl is null because the component executes without + * an environment, the output will go to only a console handler. It will use a + * particular formatter to prefix the output with a useful id. This is obtained + * from the formatter map. + * + * + * With this scheme, a JE process has a maximum of + * - N loggers, where N is the number of classes which get loggers + * - 3 handlers * number of environments, because each environment creates + * a Console, File and Memory handler. + * + * How To Use Logging in a JE Class + * ======================================= + * Creating a Logger: There are three kinds of loggers that a class may chose + * to use. + * + * 1. A class with a reference to EnvironmentImpl or RepImpl should use + * LoggerUtils.getLogger(Class) to create a logger which prefixes its output + * with an environment id. When a logger is obtained this way, the logger + * should not be used directly. Instead, LoggerUtils provides several methods + * like this: + * LoggerUtils.severe() equals to logger.severe + * LoggerUtils.warning() equals to logger.warning + * etc + * LoggerUtils.logMsg(Logger, EnvironmentImpl, Level, String) equals to + * logger.log(Level, String) + * + * 2. A class without an EnvironmentImpl which still has some kind of custom + * information to prepend to the logging output should use + * LoggerUtils.getFormatterNeeded(). For example, + * com.sleepycat.je.rep.monitor.Monitor does not have an environment, but does + * have a NameIdPair, and it can insert that information via a specific + * Formatter. When using this logger, the class must create and maintain a + * Formatter instance to pass as a logging parameter. When using this flavor, + * use: + * LoggerUtils.logMsg(Logger, Formatter, Level, String) where the + * formatter is the one created by the using class. + * + * 3. A logger without an EnvironmentImpl does not prefix or customize the + * logging output, and uses LoggerUtils.getLoggerFixedPrefix to create a + * logger. In this case, use the usual java.util.logging.Logger logging + * methods. + * + * Note: there are some JE classes which only conditionally reference an + * environment. In that case, the environment must also conditionally create + * a logger, and then use the wrapper methods which use both an environmentImpl + * and a formatter. For example: + * + * if (envImpl != null) { + * logger = LoggerUtils.getLogger(getClass()); + * } else { + * logger = LoggerUtils.getLoggerFormatterNeeded(); + * } + * formatter = new Formatter(.....); + * + * Then use LoggerUtils.logMsg(Logger, EnvironmentImpl, Formatter, Level, + * String) instead of Logger.log(Level, String) + */ +public class LoggerUtils { + + /* + * Environment state to be used by a logger. Must be set and released + * per logger call. + */ + static final Map envMap = + new ConcurrentHashMap(); + + /* + * Formatter state to be used by a logger. Must be set and released + * per logger call. Used by logging calls that do not have an available + * environment. + */ + static final Map formatterMap = + new ConcurrentHashMap(); + + public static final String NO_ENV = ".noEnv"; + public static final String FIXED_PREFIX = ".fixedPrefix"; + private static final String PUSH_LEVEL = ".push.level"; + + /* Used to prevent multiple full thread dumps. */ + private static final Object fullThreadDumpMutex = new Object(); + + /** + * Get a logger which is configured to use the shared console, memory, and + * file handlers of an EnvironmentImpl and prefixes all messages with an + * environment identifier. Use this for classes which have a reference + * to an EnvironmentImpl (or RepImpl). + * + * When a logger is obtained this way, the logger should not be used + * directly. Instead, the wrapper methods in LoggerUtils which put and + * remove the environment from the envMap must be used, so that the logging + * output can be properly prefixed and redirected to the correct + * environment. + */ + public static Logger getLogger(Class cl) { + + Logger logger = createLogger(cl.getName()); + + /* Check whether the logger already has existing handlers. */ + boolean hasConsoleHandler = false; + boolean hasFileHandler = false; + boolean hasConfiguredHandler = false; + + /* + * [#18277] Add null check of logger.getHandlers() because the Resin + * app server's implementation of logging can return null instead of an + * empty array. + */ + Handler[] handlers = logger.getHandlers(); + if (handlers != null) { + for (Handler h : handlers) { + + /* + * Intentionally check for java.util.logging.ConsoleHandler + * rather than ConsoleRedirectHandler, because the loggers that + * do not have a custom prefix use the ConsoleHandler + * directly. Having ConsoleRedirectHandler extend + * ConsoleHandler lets us have a model where the user only have + * to set com.sleepycat.je.util.ConsoleHandler in their logging + * properties file. + */ + if (h instanceof java.util.logging.ConsoleHandler) { + hasConsoleHandler = true; + } + + if (h instanceof FileRedirectHandler) { + hasFileHandler = true; + } + + if (h instanceof ConfiguredRedirectHandler) { + hasConfiguredHandler = true; + } + } + } + + if (!hasConsoleHandler) { + logger.addHandler(new ConsoleRedirectHandler()); + } + + if (!hasFileHandler) { + logger.addHandler(new FileRedirectHandler()); + } + + if (!hasConfiguredHandler) { + logger.addHandler(new ConfiguredRedirectHandler()); + } + + return logger; + } + + /** + * Get a logger which only publishes to a console handler. The logging + * output is prefixed in a custom way, using the formatter map to access + * the proper state. This should be used by a class that does not have + * an EnvironmentImpl, but still wishes to prepend some kind of custom + * prefix to the logging output. + * + * When a logger is obtained this way, the logger should not be used + * directly. Instead, the wrapper methods in LoggerUtils which use a + * Formatter parameter, and put and remove the environment from the + * formatterMap must be used, so that the logging output can be properly + * prefixed and redirected to the correct environment. + */ + public static Logger getLoggerFormatterNeeded(Class cl) { + + /* + * By convention, loggers that use redirect handlers are named with the + * class name. Name logger that don't use redirecting differently, in + * order to avoid conflicts when a single class uses both redirecting + * and fixed prefix loggers. + */ + Logger logger = createLogger(cl.getName() + NO_ENV); + + /* Add a new handler if a console handler does not already exist. */ + if (!hasConsoleHandler(logger)) { + logger.addHandler(new FormatterRedirectHandler()); + } + + return logger; + } + + /* Convenience method for getLoggerFixedPrefix. */ + public static Logger getLoggerFixedPrefix(Class cl, + String prefix) { + return getLoggerFixedPrefix(cl, prefix, null); + } + + /** + * Get a logger that uses the generic console handler, with no attempt to + * use thread local state to customize the message prefix. + */ + public static Logger getLoggerFixedPrefix(Class cl, + String prefix, + EnvironmentImpl envImpl) { + + /* + * By convention, loggers that use redirect handlers are named with the + * class name. Name logger that don't use redirecting differently, in + * order to avoid conflicts when a single class uses both redirecting + * and fixed prefix loggers. + */ + Logger logger = createLogger(cl.getName() + FIXED_PREFIX); + + /* Check whether the logger already has this handler. */ + if (!hasConsoleHandler(logger)) { + logger.addHandler(new com.sleepycat.je.util.ConsoleHandler + (new TracerFormatter(prefix), envImpl)); + } + + return logger; + } + + /* + * Return true if this logger already has a console handler. + */ + private static boolean hasConsoleHandler(Logger logger) { + + /* + * [#18277] Add null check of logger.getHandlers() because the Resin + * app server's implementation of logging can return null instead of an + * empty array. + */ + Handler[] handlers = logger.getHandlers(); + if (handlers == null) { + return false; + } + + for (Handler h : handlers) { + if (h instanceof java.util.logging.ConsoleHandler) { + return true; + } + } + + return false; + } + + /* Create a logger for the specified class name. */ + private static Logger createLogger(String className) { + + /* + * No need to set level values explicitly. This is managed in the + * standard way by java.util.logging.LogManager. + */ + Logger logger = Logger.getLogger(className); + + /* + * We've debated permitting the logger to use parental handlers, which + * would permit using the standard java.util.logging policy of setting + * tbe property com.sleepycat.je.handlers as a way of customizing + * handlers. This was not useful because of the need to specify + * handlers per environment, and also caused a process monitor echo + * issue within NoSQL DB. + */ + logger.setUseParentHandlers(false); + + return logger; + } + + /* Get the value of a specified Logger property. */ + public static String getLoggerProperty(String property) { + java.util.logging.LogManager mgr = + java.util.logging.LogManager.getLogManager(); + + return mgr.getProperty(property); + } + + /** + * Get the push level for the MemoryHandler. + */ + public static Level getPushLevel(String name) { + String propertyValue = getLoggerProperty(name + PUSH_LEVEL); + + Level level = Level.OFF; + if (propertyValue != null) { + level = Level.parse(propertyValue); + } + + return level; + } + + /** + * Log a message using this logger. We expect that this logger is one that + * has been configured to expect an environment. This utility method should + * be used to ensure that the thread specific context is pushed before + * logging, and cleared afterwards. + */ + public static void logMsg(Logger useLogger, + EnvironmentImpl envImpl, + Level logLevel, + String msg) { + /* Set thread specific context. */ + if (envImpl != null) { + envMap.put(Thread.currentThread(), envImpl); + } + try { + useLogger.log(logLevel, msg); + } finally { + /* Clear thread specific context. */ + envMap.remove(Thread.currentThread()); + } + } + + /** + * Use the environment logger. + */ + public static void envLogMsg(Level logLevel, + EnvironmentImpl envImpl, + String msg) { + logMsg(envImpl.getLogger(), envImpl, logLevel, msg); + } + + /** + * Log a message using this logger. The logger may be either one that + * expects to use the state in the envMap (obtained via getLogger(), or it + * may be one that expects to use the state in the formatter map (obtained + * via getLoggerFormatterNeeded(). This method checks whether the + * EnvironmentImpl is null or not and choses the appropriate state type to + * use. + */ + public static void logMsg(Logger useLogger, + EnvironmentImpl envImpl, + Formatter formatter, + Level logLevel, + String msg) { + if (envImpl != null) { + logMsg(useLogger, envImpl, logLevel, msg); + } else { + logMsg(useLogger, formatter, logLevel, msg); + } + } + + /* Some convenience methods. */ + public static void severe(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.SEVERE, msg); + } + + public static void warning(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.WARNING, msg); + } + + public static void info(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.INFO, msg); + } + + public static void fine(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.FINE, msg); + } + + public static void finer(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.FINER, msg); + } + + public static void finest(Logger useLogger, + EnvironmentImpl envImpl, + String msg) { + logMsg(useLogger, envImpl, Level.FINEST, msg); + } + + /** + * Log a message with this logger. This utility method should be used in + * tandem with loggers obtained via getLoggerFormatterNeeded() to ensure + * that the thread specific Formatter is pushed before logging, and cleared + * afterwards. + */ + public static void logMsg(Logger useLogger, + Formatter formatter, + Level logLevel, + String msg) { + /* Set thread specific Formatter. */ + if (formatter != null) { + formatterMap.put(Thread.currentThread(), formatter); + } + try { + useLogger.log(logLevel, msg); + } finally { + /* Clear thread specific Formatter. */ + formatterMap.remove(Thread.currentThread()); + } + } + + /** + * Logger method for recording an exception and stacktrace to both the + * java.util.logging system and the .jdb files. + */ + public static void traceAndLogException(EnvironmentImpl envImpl, + String sourceClass, + String sourceMethod, + String msg, + Throwable t) { + String traceMsg = msg + "\n" + getStackTrace(t); + + envMap.put(Thread.currentThread(), envImpl); + try { + envImpl.getLogger().logp + (Level.SEVERE, sourceClass, sourceMethod, traceMsg); + } finally { + envMap.remove(Thread.currentThread()); + } + Trace.trace(envImpl, traceMsg); + } + + /** + * Records a message both to the java.util.logging loggers and through + * the trace system which writes to the .jdb files. The logLevel parameter + * only applies to the java.util.logging system. Trace messages are + * unconditionally written to the .jdb files. + * + * Because of that, this method should be used sparingly, for critical + * messages. + */ + public static void traceAndLog(Logger logger, + EnvironmentImpl envImpl, + Level logLevel, + String msg) { + logMsg(logger, envImpl, logLevel, msg); + Trace.trace(envImpl, msg); + } + + /** Return a String version of a stack trace */ + public static String getStackTrace(Throwable t) { + StringWriter sw = new StringWriter(); + t.printStackTrace(new PrintWriter(sw)); + String stackTrace = sw.toString(); + stackTrace = stackTrace.replaceAll("<", "<"); + stackTrace = stackTrace.replaceAll(">", ">"); + + return stackTrace; + } + + /** Return the stack trace of the caller, for debugging. */ + public static String getStackTrace() { + Exception e = new Exception(); + return getStackTrace(e); + } + + /* Get the level for ConsoleHandler and FileHandler. */ + public static Level getHandlerLevel(DbConfigManager configManager, + ConfigParam param, + String levelName) { + boolean changed = false; + + /* Check if the level params are set. */ + String level = configManager.get(param); + if (!param.getDefault().equals(level)) { + changed = true; + } + + /* Get the level from the java.util.logging configuration system. */ + String propertyLevel = getLoggerProperty(levelName); + + /* + * If the params are not set, and levels are set in the properties + * file, then set the level from properties file. + */ + if (!changed && propertyLevel != null) { + level = propertyLevel; + } + + return Level.parse(level); + } + + /** + * Logs a full thread dump as if jstack were piped to the je.info file. + * + * Only one dump per EnvironmentImpl lifetime is allowed. Allowing multiple + * dumps can causes them to be interleaved, and risks filling the je.info + * files with repeated dumps. The envImpl should be invalidated when this + * method is called, so one dump should be enough. + */ + public static void fullThreadDump(Logger logger, + EnvironmentImpl envImpl, + Level level) { + + if (!logger.isLoggable(level)) { + return; + } + + synchronized (fullThreadDumpMutex) { + + if (envImpl != null) { + if (envImpl.getDidFullThreadDump()) { + return; + } + + envImpl.setDidFullThreadDump(true); + } + + Map stackTraces = + Thread.getAllStackTraces(); + + for (Map.Entry stme : + stackTraces.entrySet()) { + logMsg(logger, envImpl, level, stme.getKey().toString()); + for (StackTraceElement ste : stme.getValue()) { + logMsg(logger, envImpl, level, " " + ste); + } + } + } + } + + /** + * Displays both the exception class and the message. Use you wnat a + * relatively terse display of the exception (i.e. omitting stacktrace). + * Prefer to use this over exception.getMessage(), as some exceptions have + * null messages. + */ + public static String exceptionTypeAndMsg(Exception e) { + return e.getClass() + " : " + e.getMessage(); + } +} diff --git a/src/com/sleepycat/je/utilint/LongArrayStat.java b/src/com/sleepycat/je/utilint/LongArrayStat.java new file mode 100644 index 0000000..6285e29 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongArrayStat.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * A Long array JE stat. + */ +public class LongArrayStat extends Stat { + private static final long serialVersionUID = 1L; + + protected long[] array; + + public LongArrayStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public LongArrayStat(StatGroup group, + StatDefinition definition, + long[] array) { + super(group, definition); + this.array = array; + } + + @Override + public long[] get() { + return array; + } + + @Override + public void set(long[] array) { + this.array = array; + } + + @Override + public void add(Stat other) { + throw EnvironmentFailureException.unexpectedState + ("LongArrayStat doesn't support the add operation."); + } + + @Override + public Stat computeInterval(Stat base) { + return copy(); + } + + @Override + public void negate() { + throw EnvironmentFailureException.unexpectedState + ("LongArrayStat doesn't support the negate operation."); + } + + @Override + public void clear() { + if (array != null && array.length > 0) { + for (int i = 0; i < array.length; i++) { + array[i] = 0; + } + } + } + + @Override + public LongArrayStat copy() { + try { + LongArrayStat ret = (LongArrayStat) super.clone(); + if (array != null && array.length > 0) { + long[] newArray = new long[array.length]; + System.arraycopy + (array, 0, newArray, array.length - 1, array.length); + ret.set(newArray); + } + + return ret; + } catch (CloneNotSupportedException e) { + throw EnvironmentFailureException.unexpectedException(e); + } + } + + @Override + protected String getFormattedValue() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + if (array != null && array.length > 0) { + boolean first = true; + for (int i = 0; i < array.length; i++) { + if (array[i] > 0) { + if (!first) { + sb.append("; "); + } + first = false; + sb.append("level ").append(i).append(": count="); + sb.append(Stat.FORMAT.format(array[i])); + } + } + } + sb.append("]"); + + return sb.toString(); + } + + @Override + public boolean isNotSet() { + if (array == null) { + return true; + } + + if (array.length == 0) { + return true; + } + + return false; + } +} diff --git a/src/com/sleepycat/je/utilint/LongAvgRate.java b/src/com/sleepycat/je/utilint/LongAvgRate.java new file mode 100644 index 0000000..f188ea7 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongAvgRate.java @@ -0,0 +1,252 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.concurrent.TimeUnit; + +/** + * A long JE stat component generated from an exponential moving average over a + * specified time period of the rate of change in a long value over time. + */ +public class LongAvgRate extends MapStatComponent { + private static final long serialVersionUID = 1L; + + /** + * The minimum number of milliseconds for computing rate changes, to avoid + * quantizing errors. + */ + public static final long MIN_PERIOD = 200; + + /** The time unit for reporting the result. */ + private final TimeUnit reportTimeUnit; + + /** The average of the rate values. */ + private final DoubleExpMovingAvg avg; + + /** + * The previous value, or 0. Synchronize on this instance when accessing + * this field. + */ + private long prevValue; + + /** + * The time in milliseconds of the previous value, or 0. Synchronize on + * this instance when accessing this field. + */ + private long prevTime; + + /** + * Creates an instance of this class. + * + * @param name the name of this stat + * @param periodMillis the averaging period in milliseconds + * @param reportTimeUnit the time unit for reporting the result + */ + public LongAvgRate(String name, + long periodMillis, + TimeUnit reportTimeUnit) { + avg = new DoubleExpMovingAvg(name, periodMillis); + assert reportTimeUnit != null; + this.reportTimeUnit = reportTimeUnit; + } + + /** + * Creates an instance of this class as a copy of another instance. + * + * @param other the other instance to copy + */ + private LongAvgRate(LongAvgRate other) { + avg = new DoubleExpMovingAvg(other.avg.copy()); + reportTimeUnit = other.reportTimeUnit; + synchronized (this) { + synchronized (other) { + prevValue = other.prevValue; + prevTime = other.prevTime; + } + } + } + + /** + * Returns the name of this stat. + * + * @return the name of this stat + */ + public String getName() { + return avg.getName(); + } + + /** + * Adds a new value to the average, ignoring values that are less than + * {@link #MIN_PERIOD} milliseconds older than the last entry. + * + * @param value the new value + * @param time the current time in milliseconds + */ + public synchronized void add(long value, long time) { + assert time > 0; + if (prevTime != 0) { + final long deltaTime = time - prevTime; + if (deltaTime < MIN_PERIOD) { + return; + } + avg.add(((double) (value - prevValue)) / ((double) deltaTime), + time); + } + prevValue = value; + prevTime = time; + } + + /** + * Update with more recent values from another stat. + * + * @param other the other stat + */ + public void add(LongAvgRate other) { + final LongAvgRate copyOther = other.copy(); + synchronized (this) { + synchronized (copyOther) { + addInternal(copyOther); + } + } + } + + /** + * Do an add, letting the caller arrange to synchronize on this instance + * and the argument safely. + */ + private void addInternal(LongAvgRate other) { + assert(Thread.holdsLock(this)); + assert(Thread.holdsLock(other)); + + /* + * Only use the other values if they are newer by more than the + * minimum + */ + if ((other.prevTime - prevTime) > MIN_PERIOD) { + avg.add(other.avg); + prevValue = other.prevValue; + prevTime = other.prevTime; + } + } + + /** + * Create and return a new stat that includes the most recent values from + * this stat and another stat. + * + * @param other the other stat + * @return a copy containing all new values + */ + public LongAvgRate copyLatest(LongAvgRate other) { + final LongAvgRate otherCopy = other.copy(); + synchronized (this) { + synchronized (otherCopy) { + if (prevTime > otherCopy.prevTime) { + otherCopy.addInternal(this); + return otherCopy; + } + final LongAvgRate result = copy(); + synchronized (result) { + result.addInternal(otherCopy); + return result; + } + } + } + } + + /** + * Returns the time the last new value was added, or 0 if no values have + * been added. + * + * @return the time or 0 + */ + synchronized long getPrevTime() { + return prevTime; + } + + /** + * Returns the current average rate, or 0 if no rate has been computed. + */ + @Override + public Long get() { + return getPrimitive(); + } + + /** Returns the current average rate as a primitive value. */ + private long getPrimitive() { + final double inMillis = avg.getPrimitive(); + if (reportTimeUnit == MILLISECONDS) { + return Math.round(inMillis); + } else if (reportTimeUnit.compareTo(MILLISECONDS) < 0) { + return Math.round( + inMillis / reportTimeUnit.convert(1, MILLISECONDS)); + } else { + return Math.round(inMillis * reportTimeUnit.toMillis(1)); + } + } + + @Override + public synchronized void clear() { + avg.clear(); + prevValue = 0; + prevTime = 0; + } + + @Override + public LongAvgRate copy() { + return new LongAvgRate(this); + } + + @Override + protected String getFormattedValue(boolean useCommas) { + if (isNotSet()) { + return "unknown"; + } + final long val = getPrimitive(); + if (useCommas) { + return Stat.FORMAT.format(val); + } else { + return Long.toString(val); + } + } + + @Override + public boolean isNotSet() { + return avg.isNotSet(); + } + + @Override + public synchronized String toString() { + return "LongAvgRate[" + avg + ", prevValue=" + prevValue + + ", prevTime=" + prevTime + "]"; + } + + /** Synchronize access to fields. */ + private synchronized void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + } + + /** Synchronize access to fields. */ + private synchronized void writeObject(ObjectOutputStream out) + throws IOException { + + out.defaultWriteObject(); + } +} diff --git a/src/com/sleepycat/je/utilint/LongAvgRateMapStat.java b/src/com/sleepycat/je/utilint/LongAvgRateMapStat.java new file mode 100644 index 0000000..e2a070f --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongAvgRateMapStat.java @@ -0,0 +1,178 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A JE stat that maintains a map of individual {@link LongAvgRate} values + * which can be looked up with a String key, and that returns results as a + * formatted string. + */ +public final class LongAvgRateMapStat extends MapStat { + + private static final long serialVersionUID = 1L; + + /** The averaging period in milliseconds. */ + protected final long periodMillis; + + /** The time unit for reporting rates. */ + private final TimeUnit reportTimeUnit; + + /** + * The time the last stat was removed. This value is used to determine + * which entries should be included when calling computeInterval. + * Synchronize on this instance when accessing this field. + */ + private long removeStatTimestamp; + + /** + * Creates an instance of this class. The definition type must be + * INCREMENTAL. + * + * @param group the owning group + * @param definition the associated definition + * @param periodMillis the sampling period in milliseconds + * @param reportTimeUnit the time unit for reporting rates + */ + public LongAvgRateMapStat(StatGroup group, + StatDefinition definition, + long periodMillis, + TimeUnit reportTimeUnit) { + super(group, definition); + assert definition.getType() == StatType.INCREMENTAL; + assert periodMillis > 0; + assert reportTimeUnit != null; + this.periodMillis = periodMillis; + this.reportTimeUnit = reportTimeUnit; + } + + private LongAvgRateMapStat(LongAvgRateMapStat other) { + super(other); + periodMillis = other.periodMillis; + reportTimeUnit = other.reportTimeUnit; + synchronized (this) { + synchronized (other) { + removeStatTimestamp = other.removeStatTimestamp; + } + } + } + + /** + * Creates, stores, and returns a new stat for the specified key. + * + * @param key the key + * @return the new stat + */ + public synchronized LongAvgRate createStat(String key) { + assert key != null; + final LongAvgRate stat = new LongAvgRate( + definition.getName() + ":" + key, periodMillis, reportTimeUnit); + statMap.put(key, stat); + return stat; + } + + /** + * Note the removal time, so that computeInterval can tell if an empty map + * is newer than a non-empty one. + */ + @Override + public synchronized void removeStat(String key) { + removeStat(key, System.currentTimeMillis()); + } + + /** Remove a stat and specify the time of the removal -- for testing. */ + synchronized void removeStat(String key, long time) { + removeStatTimestamp = time; + super.removeStat(key); + } + + @Override + public LongAvgRateMapStat copy() { + return new LongAvgRateMapStat(this); + } + + /** + * Creates a new map that contains entries for all keys that appear in + * whichever of this map or the argument is newer, with those entries + * updated with any values from both maps. Treats this map as newest if + * both have the same timestamp. This method does not compute negative + * intervals, since negation does not work properly for this non-additive + * stat. The base argument must be a LongAvgRateMapStat. + */ + @Override + public LongAvgRateMapStat computeInterval(Stat base) { + assert base instanceof LongAvgRateMapStat; + final LongAvgRateMapStat copy = copy(); + final LongAvgRateMapStat baseCopy = + (LongAvgRateMapStat) base.copy(); + if (copy.getLatestTime() < baseCopy.getLatestTime()) { + return copy.updateLatest(baseCopy); + } + return baseCopy.updateLatest(copy); + } + + /** + * Update this map to reflect changes from the argument, including merging + * latest changes, removing entries not in the argument, and adding ones + * not in this instance. + */ + private synchronized LongAvgRateMapStat updateLatest( + final LongAvgRateMapStat latest) { + + synchronized (latest) { + for (final Iterator> i = + statMap.entrySet().iterator(); + i.hasNext(); ) { + final Entry e = i.next(); + final LongAvgRate latestStat = + latest.statMap.get(e.getKey()); + if (latestStat != null) { + e.getValue().add(latestStat); + } else { + i.remove(); + } + } + + for (final Entry e : + latest.statMap.entrySet()) { + final String key = e.getKey(); + if (!statMap.containsKey(key)) { + statMap.put(key, e.getValue()); + } + } + } + return this; + } + + /** + * Returns the most recent time any component stat was modified, including + * the time of the latest stat removal. + */ + private synchronized long getLatestTime() { + long latestTime = removeStatTimestamp; + for (final LongAvgRate stat : statMap.values()) { + latestTime = Math.max(latestTime, stat.getPrevTime()); + } + return latestTime; + } + + /** Do nothing for this non-additive stat. */ + @Override + public synchronized void negate() { } +} diff --git a/src/com/sleepycat/je/utilint/LongAvgRateStat.java b/src/com/sleepycat/je/utilint/LongAvgRateStat.java new file mode 100644 index 0000000..5af70d4 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongAvgRateStat.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A long JE stat generated from an exponential moving average over a + * specified time period of the rate of change in a value over time. + */ +public class LongAvgRateStat extends Stat { + private static final long serialVersionUID = 1L; + + /** The underlying average rate. */ + private final LongAvgRate avg; + + /** + * Creates an instance of this class. The definition type must be + * INCREMENTAL. + * + * @param group the statistics group + * @param definition the statistics definition + * @param periodMillis the averaging period in milliseconds + * @param reportTimeUnit the time unit for reporting the rate + */ + public LongAvgRateStat(StatGroup group, + StatDefinition definition, + long periodMillis, + TimeUnit reportTimeUnit) { + super(group, definition); + assert definition.getType() == StatType.INCREMENTAL; + avg = new LongAvgRate( + definition.getName(), periodMillis, reportTimeUnit); + } + + private LongAvgRateStat(StatDefinition definition, LongAvgRate avg) { + super(definition); + this.avg = avg; + } + + /** + * Adds a new value to the average. + * + * @param value the new value + * @param time the current time + */ + public void add(long value, long time) { + avg.add(value, time); + } + + @Override + public Long get() { + return avg.get(); + } + + @Override + public void clear() { + avg.clear(); + } + + @Override + public LongAvgRateStat copy() { + return new LongAvgRateStat(definition, avg.copy()); + } + + @Override + protected String getFormattedValue() { + return avg.getFormattedValue(); + } + + @Override + public boolean isNotSet() { + return avg.isNotSet(); + } + + /** @throws UnsupportedOperationException always */ + @Override + public void set(Long newValue) { + throw new UnsupportedOperationException(); + } + + /** @throws UnsupportedOperationException always */ + @Override + public void add(Stat other) { + throw new UnsupportedOperationException(); + } + + /** + * Create a stat that includes the newest entries from this stat and the + * base stat. This method does not use negative intervals, since negation + * does not work properly for this non-additive stat. The base argument + * must be a LongAvgRateStat. + */ + @Override + public LongAvgRateStat computeInterval(Stat base) { + assert base instanceof LongAvgRateStat; + final LongAvgRate baseAvg = ((LongAvgRateStat) base).avg; + return new LongAvgRateStat(definition, avg.copyLatest(baseAvg)); + } + + /** Do nothing for this non-additive stat. */ + @Override + public void negate() { } +} diff --git a/src/com/sleepycat/je/utilint/LongDiffMapStat.java b/src/com/sleepycat/je/utilint/LongDiffMapStat.java new file mode 100644 index 0000000..7e0c721 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongDiffMapStat.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A JE stat that maintains a map of individual {@link LongDiffStat} values + * which can be looked up with a String key, and that returns results as a + * formatted string. Only supports CUMULATIVE stats. + */ +public final class LongDiffMapStat extends MapStat { + private static final long serialVersionUID = 1L; + + /** + * The maximum time, in milliseconds, that a computed difference is + * valid. + */ + private final long validityMillis; + + /** + * Creates an instance of this class. The definition type must be + * CUMULATIVE. + * + * @param group the owning group + * @param definition the associated definition + * @param validityMillis the amount of time, in milliseconds, which a + * computed difference remains valid + */ + public LongDiffMapStat(StatGroup group, + StatDefinition definition, + long validityMillis) { + super(group, definition); + assert definition.getType() == StatType.CUMULATIVE; + assert validityMillis > 0; + this.validityMillis = validityMillis; + } + + private LongDiffMapStat(LongDiffMapStat other) { + super(other); + validityMillis = other.validityMillis; + } + + /** + * Creates, stores, and returns a new stat for the specified key and base + * stat. + * + * @param key the new key + * @param base the base stat + * @return the new stat + */ + public synchronized LongDiffStat createStat(String key, Stat base) { + final LongDiffStat stat = new LongDiffStat(base, validityMillis); + statMap.put(key, stat); + return stat; + } + + @Override + public LongDiffMapStat copy() { + return new LongDiffMapStat(this); + } + + /** Ignores base for a non-additive stat. */ + @Override + public LongDiffMapStat computeInterval(Stat base) { + return copy(); + } + + /** Does nothing for a non-additive stat. */ + @Override + public synchronized void negate() { } +} diff --git a/src/com/sleepycat/je/utilint/LongDiffStat.java b/src/com/sleepycat/je/utilint/LongDiffStat.java new file mode 100644 index 0000000..0230fa6 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongDiffStat.java @@ -0,0 +1,189 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * A long JE stat component that computes the difference between another stat + * and a specified value. Reports 0 if the value is greater than the stat + * value. The computed difference remains valid for a specified amount of + * time, which should represent the maximum amount of time expected to elapse + * between when new values are provided. If no value is specified within the + * validity interval, then the difference is recomputed using the current base + * stat value and the last specified value. The idea is to treat the specified + * value as up-to-date for a certain period of time, and then represent that + * the lack of updates means it is falling behind. + */ +public class LongDiffStat extends MapStatComponent { + private static final long serialVersionUID = 1L; + + /** The stat that supplies the base value for computing differences. */ + private final Stat base; + + /** + * The maximum time, in milliseconds, that a computed difference is + * valid. + */ + private final long validityMillis; + + /** + * The previous value, or 0. Synchronize on this instance when accessing + * this field. + */ + private long prevValue; + + /** + * The time in milliseconds of the previous value, or 0. Synchronize on + * this instance when accessing this field. + */ + private long prevTime; + + /** + * The last computed difference, or 0. Synchronize on this instance when + * accessing this field. + */ + private long diff; + + /** + * Creates an instance of this class. + * + * @param base the base stat used for computing differences + * @param validityMillis the amount of time, in milliseconds, which a + * computed difference remains valid + */ + public LongDiffStat(Stat base, long validityMillis) { + assert base != null; + assert validityMillis > 0; + this.base = base; + this.validityMillis = validityMillis; + } + + private LongDiffStat(LongDiffStat other) { + base = other.base.copy(); + validityMillis = other.validityMillis; + synchronized (this) { + synchronized (other) { + prevValue = other.prevValue; + prevTime = other.prevTime; + diff = other.diff; + } + } + } + + /** + * Returns the value of the stat for the specified time. + * + * @param time the time + * @return the value of the stat + */ + public long get(long time) { + assert time > 0; + synchronized (this) { + if (prevTime == 0) { + return 0; + } + if (time < (prevTime + validityMillis)) { + return diff; + } + } + final long baseValue = base.get(); + synchronized (this) { + return Math.max(baseValue - prevValue, 0); + } + } + + /** + * Specifies a new value for the current time. + * + * @param newValue the new value + */ + public void set(long newValue) { + set(newValue, System.currentTimeMillis()); + } + + /** + * Specifies a new value for the specified time. + * + * @param newValue the new value + * @param time the time + */ + public void set(long newValue, long time) { + assert time > 0; + final long baseValue = base.get(); + synchronized (this) { + prevValue = newValue; + prevTime = time; + diff = Math.max(baseValue - newValue, 0); + } + } + + /** + * Returns the value of the stat for the current time. + */ + @Override + public Long get() { + return get(System.currentTimeMillis()); + } + + @Override + public synchronized void clear() { + prevValue = 0; + prevTime = 0; + diff = 0; + } + + @Override + public LongDiffStat copy() { + return new LongDiffStat(this); + } + + @Override + protected synchronized String getFormattedValue(boolean useCommas) { + if (isNotSet()) { + return "Unknown"; + } else if (useCommas) { + return Stat.FORMAT.format(get(System.currentTimeMillis())); + } else { + return String.valueOf(get(System.currentTimeMillis())); + } + } + + @Override + public synchronized boolean isNotSet() { + return prevTime == 0; + } + + @Override + public synchronized String toString() { + return "LongDiffStat[prevValue=" + prevValue + + ", prevTime=" + prevTime + ", diff=" + diff + "]"; + } + + /** Synchronize access to fields. */ + private synchronized void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + } + + /** Synchronize access to fields. */ + private synchronized void writeObject(ObjectOutputStream out) + throws IOException { + + out.defaultWriteObject(); + } +} diff --git a/src/com/sleepycat/je/utilint/LongMaxStat.java b/src/com/sleepycat/je/utilint/LongMaxStat.java new file mode 100644 index 0000000..1670510 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongMaxStat.java @@ -0,0 +1,72 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A long stat which maintains a maximum value. It is initialized to + * Long.MIN_VALUE. The setMax() methods assigns the counter to + * MAX(counter, new value). + */ +public class LongMaxStat extends LongStat { + private static final long serialVersionUID = 1L; + + public LongMaxStat(StatGroup group, StatDefinition definition) { + super(group, definition); + clear(); + } + + public LongMaxStat(StatGroup group, + StatDefinition definition, + long counter) { + super(group, definition); + this.counter = counter; + } + + @Override + public void clear() { + set(Long.MIN_VALUE); + } + + /** + * Set stat to MAX(current stat value, newValue). + * + * @return true if the max value was updated. + */ + public boolean setMax(long newValue) { + if (counter < newValue) { + counter = newValue; + return true; + } + return false; + } + + @Override + public Stat computeInterval(Stat base) { + return (counter < base.get() ? base.copy() : copy()); + } + + @Override + public void negate() { + } + + @Override + protected String getFormattedValue() { + if (counter == Long.MIN_VALUE) { + return "NONE"; + } + + return Stat.FORMAT.format(counter); + } +} + diff --git a/src/com/sleepycat/je/utilint/LongMaxZeroStat.java b/src/com/sleepycat/je/utilint/LongMaxZeroStat.java new file mode 100644 index 0000000..ae819ed --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongMaxZeroStat.java @@ -0,0 +1,33 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * For stats where the min value in the range is zero, so that sums, averages, + * etc. based on positive ranges just work. + */ +public class LongMaxZeroStat extends LongMaxStat { + + private static final long serialVersionUID = 1L; + + public LongMaxZeroStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + @Override + public Long get() { + Long value = super.get(); + return (value == Long.MIN_VALUE) ? 0 : value; + } +} diff --git a/src/com/sleepycat/je/utilint/LongMinStat.java b/src/com/sleepycat/je/utilint/LongMinStat.java new file mode 100644 index 0000000..679b0d9 --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongMinStat.java @@ -0,0 +1,65 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A long stat which maintains a minimum value. It is intialized to + * Long.MAX_VALUE. The setMin() method assigns the counter to + * MIN(counter, new value). + */ +public class LongMinStat extends LongStat { + private static final long serialVersionUID = 1L; + + public LongMinStat(StatGroup group, StatDefinition definition) { + super(group, definition); + clear(); + } + + public LongMinStat(StatGroup group, + StatDefinition definition, + long counter) { + super(group, definition); + this.counter = counter; + } + + @Override + public void clear() { + set(Long.MAX_VALUE); + } + + /** + * Set stat to MIN(current stat value, newValue). + */ + public void setMin(long newValue) { + counter = (counter > newValue) ? newValue : counter; + } + + @Override + public Stat computeInterval(Stat base) { + return (counter > base.get() ? base.copy() : copy()); + } + + @Override + public void negate() { + } + + @Override + protected String getFormattedValue() { + if (counter == Long.MAX_VALUE) { + return "NONE"; + } + + return Stat.FORMAT.format(counter); + } +} diff --git a/src/com/sleepycat/je/utilint/LongStat.java b/src/com/sleepycat/je/utilint/LongStat.java new file mode 100644 index 0000000..a7e579c --- /dev/null +++ b/src/com/sleepycat/je/utilint/LongStat.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A long JE stat. + */ +public class LongStat extends Stat { + private static final long serialVersionUID = 1L; + + protected long counter; + + public LongStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + public LongStat(StatGroup group, StatDefinition definition, long counter) { + super(group, definition); + this.counter = counter; + } + + @Override + public Long get() { + return counter; + } + + @Override + public void set(Long newValue) { + counter = newValue; + } + + public void increment() { + counter++; + } + + public void add(long count) { + counter += count; + } + + public void max(long count) { + if (count > counter) { + count = counter; + } + } + + @Override + public void add(Stat other) { + counter += other.get(); + } + + @Override + public Stat computeInterval(Stat base) { + Stat ret = copy(); + if (definition.getType() == StatType.INCREMENTAL) { + ret.set(counter - base.get()); + } + return ret; + } + + @Override + public void negate () { + if (definition.getType() == StatType.INCREMENTAL) { + counter = -counter; + } + } + + @Override + public void clear() { + counter = 0L; + } + + @Override + protected String getFormattedValue() { + return Stat.FORMAT.format(counter); + } + + @Override + public boolean isNotSet() { + return (counter == 0); + } +} diff --git a/src/com/sleepycat/je/utilint/MapStat.java b/src/com/sleepycat/je/utilint/MapStat.java new file mode 100644 index 0000000..f125fc6 --- /dev/null +++ b/src/com/sleepycat/je/utilint/MapStat.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static com.sleepycat.je.utilint.CollectionUtils.emptySortedMap; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; +import java.util.TreeMap; + +import com.sleepycat.je.utilint.StatDefinition.StatType; + +/** + * A base class for JE stats that map String keys to component statistics, and + * that return results as formatted strings. + * + * @param the value type of the individual statistics + * @param the class of the individual statistics + */ +public abstract class MapStat> + extends Stat { + private static final long serialVersionUID = 1L; + + /** + * Maps keys to individual statistics. Synchronize on the MapStat instance + * when accessing this field. + */ + protected final Map statMap = + + /* Use a sorted map so that the output is sorted */ + new TreeMap<>(); + + /** + * Creates an instance of this class. + * + * @param group the owning group + * @param definition the associated definition + */ + protected MapStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + /** + * Creates an instance of this class as a copy of another instance. This + * instance should be a new instance to avoid lock ordering concerns. + * + * @param other the instance to copy + */ + protected MapStat(MapStat other) { + super(other.definition); + synchronized (this) { + synchronized (other) { + for (final Entry entry : other.statMap.entrySet()) { + statMap.put(entry.getKey(), entry.getValue().copy()); + } + } + } + } + + /** + * Removes the individual statistic associated with the specified key. + * + * @param key the key + */ + public synchronized void removeStat(String key) { + assert key != null; + statMap.remove(key); + } + + /** + * Returns a map from keys to values of individual statistics, ignoring + * individual statistics that are not set. + * + * @return map from keys to values of individual statistics + */ + public synchronized SortedMap getMap() { + SortedMap ret = null; + for (final Entry entry : statMap.entrySet()) { + final C stat = entry.getValue(); + if (stat.isNotSet()) { + continue; + } + if (ret == null) { + ret = new TreeMap<>(); + } + ret.put(entry.getKey(), stat.get()); + } + if (ret == null) { + return emptySortedMap(); + } + return ret; + } + + /** + * Returns the map as a string in the format returned by {@link + * #getFormattedValue}, but with values presented without using commas. + */ + @Override + public String get() { + return getFormattedValue(false); + } + + @Override + public synchronized void clear() { + if (definition.getType() == StatType.INCREMENTAL) { + for (final C stat : statMap.values()) { + stat.clear(); + } + } + } + + /** + * This implementation returns the keys and values of the individual + * statistics in the format: {@code KEY=VALUE[;KEY=VALUE]}. + */ + @Override + protected String getFormattedValue() { + return getFormattedValue(true /* useCommas */); + } + + private synchronized String getFormattedValue(boolean useCommas) { + final StringBuilder sb = new StringBuilder(); + boolean first = true; + for (final Entry entry : statMap.entrySet()) { + final C value = entry.getValue(); + if (value.isNotSet()) { + continue; + } + if (!first) { + sb.append(';'); + } else { + first = false; + } + sb.append(entry.getKey()).append('='); + final String formattedValue = + value.getFormattedValue(useCommas); + assert useCommas || (formattedValue.indexOf(',') == -1) + : "Formatted value doesn't obey useCommas: " + formattedValue; + sb.append(formattedValue); + } + return sb.toString(); + } + + @Override + public synchronized boolean isNotSet() { + for (final C stat : statMap.values()) { + if (!stat.isNotSet()) { + return false; + } + } + return true; + } + + /** @throws UnsupportedOperationException always */ + @Override + public void set(String value) { + throw new UnsupportedOperationException( + "The set method is not supported"); + } + + /** @throws UnsupportedOperationException always */ + @Override + public void add(Stat other) { + throw new UnsupportedOperationException( + "The add method is not supported"); + } + + /** This implementation adds synchronization. */ + @Override + public synchronized Stat copyAndClear() { + return super.copyAndClear(); + } + + /** Synchronize access to fields. */ + private synchronized void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + } + + /** Synchronize access to fields. */ + private synchronized void writeObject(ObjectOutputStream out) + throws IOException { + + out.defaultWriteObject(); + } +} diff --git a/src/com/sleepycat/je/utilint/MapStatComponent.java b/src/com/sleepycat/je/utilint/MapStatComponent.java new file mode 100644 index 0000000..475163c --- /dev/null +++ b/src/com/sleepycat/je/utilint/MapStatComponent.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * The interface for individual stat components included in a {@link MapStat}. + * + * @param the type of the statistic value + * @param the type of the component + */ +public abstract class MapStatComponent> + extends BaseStat { + + /** + * Returns the value of the statistic as a formatted string, either using + * or not using commas as requested. Implementations should make sure that + * the result does not contain commas when useCommas is false, because the + * value will be used in a comma-separated value file, where embedded + * commas would cause problems. + * + * @param useCommas whether to use commas + * @return the value as a formatted string + */ + protected abstract String getFormattedValue(boolean useCommas); + + /** Implement this overloading to use commas. */ + @Override + protected String getFormattedValue() { + return getFormattedValue(true); + } + + /** Narrow the return type to the component type. */ + @Override + public abstract C copy(); +} diff --git a/src/com/sleepycat/je/utilint/Matchpoint.java b/src/com/sleepycat/je/utilint/Matchpoint.java new file mode 100644 index 0000000..e961bc5 --- /dev/null +++ b/src/com/sleepycat/je/utilint/Matchpoint.java @@ -0,0 +1,125 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import com.sleepycat.je.log.BasicVersionedWriteLoggable; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.log.VersionedWriteLoggable; + +/** + * This class writes out a log entry that can be used for replication syncup. + * It can be issued arbitrarily by the master at any point, in order to bound + * the syncup interval in much the way that a checkpoint bounds the recovery + * interval. The entry will a replicated one, which means that it will be + * tagged with a VLSN. + * + * Although this is a replication class, it resides in the utilint package + * because it is referenced in LogEntryType.java. + * + * TODO: This is currently not used. When it is used, it will be the first + * replicated log entry that does not have a real txn id. All replicated + * entries are expected to have negative ids, and the matchpoint should be + * exempt from Replay.updateSequences, or it should pass in a special reserved + * negative id, so as not to incur the assertion in Replay.updateSequences, + * that the txn id is <0. + */ +public class Matchpoint extends BasicVersionedWriteLoggable { + + /** + * The log version of the most recent format change for this loggable. + * + * @see #getLastFormatChange + */ + private static final int LAST_FORMAT_CHANGE = 8; + + /* Time of issue. */ + private Timestamp time; + + /* For replication - master node which wrote this record. */ + private int repMasterNodeId; + + public Matchpoint(int repMasterNodeId) { + this.repMasterNodeId = repMasterNodeId; + time = new Timestamp(System.currentTimeMillis()); + } + + /** + * For constructing from the log. + */ + public Matchpoint() { + } + + public int getMasterNodeId() { + return repMasterNodeId; + } + + @Override + public int getLastFormatChange() { + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + return Collections.emptyList(); + } + + @Override + public int getLogSize(final int logVersion, final boolean forReplication) { + return LogUtils.getTimestampLogSize(time) + + LogUtils.getPackedIntLogSize(repMasterNodeId); + } + + @Override + public void writeToLog(final ByteBuffer logBuffer, + final int logVersion, + final boolean forReplication) { + LogUtils.writeTimestamp(logBuffer, time); + LogUtils.writePackedInt(logBuffer, repMasterNodeId); + } + + @Override + public void readFromLog(ByteBuffer logBuffer, int entryVersion) { + time = LogUtils.readTimestamp(logBuffer, false /* isUnpacked. */); + repMasterNodeId = LogUtils.readInt(logBuffer, false /* unpacked */); + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + @Override + public long getTransactionId() { + return 0; + } + + @Override + public boolean logicalEquals(Loggable other) { + if (!(other instanceof Matchpoint)) { + return false; + } + + Matchpoint otherMatchpoint = (Matchpoint) other; + return (otherMatchpoint.time.equals(time) && + (otherMatchpoint.repMasterNodeId == repMasterNodeId)); + } +} diff --git a/src/com/sleepycat/je/utilint/NanoTimeUtil.java b/src/com/sleepycat/je/utilint/NanoTimeUtil.java new file mode 100644 index 0000000..fda947b --- /dev/null +++ b/src/com/sleepycat/je/utilint/NanoTimeUtil.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + + +/** + * Utility class for dealing with special cases of System.nanoTime + */ +public class NanoTimeUtil { + + /** + * Special compare function for comparing times returned by + * System.nanoTime() to protect against numerical overflows. + * + * @return a negative integer, zero, or a positive integer as the + * first argument is less than, equal to, or greater than the second. + * + * @see System#nanoTime + */ + public static long compare(long t1, long t2) { + return t1 - t2; + } +} diff --git a/src/com/sleepycat/je/utilint/NoClearAtomicLongStat.java b/src/com/sleepycat/je/utilint/NoClearAtomicLongStat.java new file mode 100644 index 0000000..bcdaff3 --- /dev/null +++ b/src/com/sleepycat/je/utilint/NoClearAtomicLongStat.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A version of {@link AtomicLongStat} that does not reset its value when + * cleared. + */ +public class NoClearAtomicLongStat extends AtomicLongStat { + private static final long serialVersionUID = 1L; + + public NoClearAtomicLongStat(StatGroup group, StatDefinition definition) { + super(group, definition); + } + + NoClearAtomicLongStat(StatDefinition definition, long value) { + super(definition, value); + } + + /** Never clear this stat. */ + @Override + public void clear() { } + + @Override + public AtomicLongStat copy() { + return new NoClearAtomicLongStat(definition, get()); + } + + /** Never clear this stat. */ + @Override + public AtomicLongStat copyAndClear() { + return copy(); + } +} diff --git a/src/com/sleepycat/je/utilint/Pair.java b/src/com/sleepycat/je/utilint/Pair.java new file mode 100644 index 0000000..c879676 --- /dev/null +++ b/src/com/sleepycat/je/utilint/Pair.java @@ -0,0 +1,41 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * Generic immutable pair, intended for grouping two data elements when a more + * specific class is unwarranted. + */ +public class Pair { + private final FIRST first; + private final SECOND second; + + public Pair(FIRST first, SECOND second) { + this.first = first; + this.second = second; + } + + public FIRST first() { + return first; + } + + public SECOND second() { + return second; + } + + @Override + public String toString() { + return "[" + first + ", " + second + "]"; + } +} diff --git a/src/com/sleepycat/je/utilint/PollCondition.java b/src/com/sleepycat/je/utilint/PollCondition.java new file mode 100644 index 0000000..b8d9f2e --- /dev/null +++ b/src/com/sleepycat/je/utilint/PollCondition.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * Utility class that permits a "poll based" waiting for a condition. + */ +public abstract class PollCondition { + + private final long checkPeriodMs; + private final long timeoutMs; + + public PollCondition(long checkPeriodMs, + long timeoutMs) { + super(); + assert checkPeriodMs <= timeoutMs; + this.checkPeriodMs = checkPeriodMs; + this.timeoutMs = timeoutMs; + } + + protected abstract boolean condition(); + + public boolean await() { + + if (condition()) { + return true; + } + + final long timeLimit = System.currentTimeMillis() + timeoutMs; + do { + try { + Thread.sleep(checkPeriodMs); + } catch (InterruptedException e) { + return false; + } + if (condition()) { + return true; + } + } while (System.currentTimeMillis() < timeLimit); + + return false; + } +} diff --git a/src/com/sleepycat/je/utilint/PropUtil.java b/src/com/sleepycat/je/utilint/PropUtil.java new file mode 100644 index 0000000..c9d260a --- /dev/null +++ b/src/com/sleepycat/je/utilint/PropUtil.java @@ -0,0 +1,222 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.StringTokenizer; +import java.util.concurrent.TimeUnit; + +/** + * Convenience methods for handling JE properties. + */ +public class PropUtil { + + private static final long NS_IN_MS = 1000000; + private static final long NS_IN_SEC = 1000000000; + private static final long NS_IN_MINUTE = NS_IN_SEC * 60; + private static final long NS_IN_HOUR = NS_IN_MINUTE * 60; + + /** + * Converts the given duration (interval value plus unit) to milliseconds, + * ensuring that any given value greater than zero converts to at least one + * millisecond to avoid a zero millisecond result, since Object.wait(0) + * waits forever. + * + * @throws IllegalArgumentException if the duration argument is illegal. + * Thrown via API setter methods such as Transaction.setLockTimeout. + */ + public static int durationToMillis(final long val, final TimeUnit unit) { + if (val == 0) { + /* Allow zero duration with null unit. */ + return 0; + } + if (unit == null) { + throw new IllegalArgumentException + ("Duration TimeUnit argument may not be null if interval " + + "is non-zero"); + } + if (val < 0) { + throw new IllegalArgumentException + ("Duration argument may not be negative: " + val); + } + final long newVal = unit.toMillis(val); + if (newVal == 0) { + /* Input val is positive, so return at least one. */ + return 1; + } + if (newVal > Integer.MAX_VALUE) { + throw new IllegalArgumentException + ("Duration argument may not be greater than " + + "Integer.MAX_VALUE milliseconds: " + newVal); + } + return (int) newVal; + } + + /** + * Converts the given duration value in milliseconds to the given unit. + * + * @throws IllegalArgumentException if the unit is null. Thrown via API + * getter methods such as Transaction.getLockTimeout. + */ + public static long millisToDuration(final int val, final TimeUnit unit) { + if (unit == null) { + throw new IllegalArgumentException + ("TimeUnit argument may not be null"); + } + return unit.convert(val, TimeUnit.MILLISECONDS); + } + + /** + * Parses a String duration property (time + optional unit) and returns the + * value in millis. + * + * @throws IllegalArgumentException if the duration string is illegal. + * Thrown via the Environment ctor and setMutableConfig, and likewise for a + * ReplicatedEnvironment. + */ + public static int parseDuration(final String property) { + long ns = parseDurationNS(property); + long millis = ns / NS_IN_MS; + + /* If input val is positive, return at least one. */ + if (ns > 0 && millis == 0) { + return 1; + } + if (millis > Integer.MAX_VALUE) { + throw new IllegalArgumentException( + "Duration argument may not be greater than " + + "Integer.MAX_VALUE milliseconds: " + property); + } + + return (int)millis; + } + + /** + * Parses a String duration property (time + optional unit) and returns the + * value in nanos. + * + * @throws IllegalArgumentException if the duration string is illegal. + * Thrown via the Environment ctor and setMutableConfig, and likewise for a + * ReplicatedEnvironment. + */ + public static long parseDurationNS(final String property) { + StringTokenizer tokens = + new StringTokenizer(property.toUpperCase(java.util.Locale.ENGLISH), + " \t"); + if (!tokens.hasMoreTokens()) { + throw new IllegalArgumentException("Duration argument is empty"); + } + final long time; + try { + time = Long.parseLong(tokens.nextToken()); + } catch (NumberFormatException e) { + throw new IllegalArgumentException + ("Duration argument does not start with a long integer: " + + property); + } + /* Convert time from specified unit to millis. */ + long ns; + if (tokens.hasMoreTokens()) { + final String unitName = tokens.nextToken(); + if (tokens.hasMoreTokens()) { + throw new IllegalArgumentException + ("Duration argument has extra characters after unit: " + + property); + } + try { + final TimeUnit unit = TimeUnit.valueOf(unitName); + ns = TimeUnit.NANOSECONDS.convert(time, unit); + } catch (IllegalArgumentException e) { + try { + final IEEETimeUnit unit = IEEETimeUnit.valueOf(unitName); + ns = unit.toNanos(time); + } catch (IllegalArgumentException e2) { + throw new IllegalArgumentException + ("Duration argument has unknown unit name: " + + property); + } + } + } else { + /* Default unit is micros. */ + ns = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MICROSECONDS); + } + /* If input val is positive, return at least one. */ + if (time > 0 && ns == 0) { + return 1; + } + return ns; + } + + /** + * Formats a String duration property (time + optional unit). + * value in millis. + */ + public static String formatDuration(long time, TimeUnit unit) { + return String.valueOf(time) + ' ' + unit.name(); + } + + /** + * Support for conversion of IEEE time units. Although names are defined + * in uppercase, we uppercase the input string before calling + * IEEETimeUnit.valueOf, in order to support input names in both upper and + * lower case. + */ + private enum IEEETimeUnit { + + /* Nanoseconds */ + NS() { + long toNanos(long val) { + return nanosUnit.convert(val, TimeUnit.NANOSECONDS); + } + }, + + /* Microseconds */ + US() { + long toNanos(long val) { + return nanosUnit.convert(val, TimeUnit.MICROSECONDS); + } + }, + + /* Milliseconds */ + MS() { + long toNanos(long val) { + return nanosUnit.convert(val, TimeUnit.MILLISECONDS); + } + }, + + /* Seconds */ + S() { + long toNanos(long val) { + return nanosUnit.convert(val, TimeUnit.SECONDS); + } + }, + + /* Minutes */ + MIN() { + long toNanos(long val) { + return val * NS_IN_MINUTE; + } + }, + + /* Hours */ + H() { + long toNanos(long val) { + return val * NS_IN_HOUR; + } + }; + + private static final TimeUnit nanosUnit = TimeUnit.NANOSECONDS; + + abstract long toNanos(long val); + } +} diff --git a/src/com/sleepycat/je/utilint/RelatchRequiredException.java b/src/com/sleepycat/je/utilint/RelatchRequiredException.java new file mode 100644 index 0000000..96fdd82 --- /dev/null +++ b/src/com/sleepycat/je/utilint/RelatchRequiredException.java @@ -0,0 +1,33 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + + +/* + * Singleton class to indicate that something needs to be relatched for + * exclusive access due to a fetch occurring. + */ +@SuppressWarnings("serial") +public class RelatchRequiredException extends Exception { + public static RelatchRequiredException relatchRequiredException = + new RelatchRequiredException(); + + private RelatchRequiredException() { + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } +} diff --git a/src/com/sleepycat/je/utilint/SizeofMarker.java b/src/com/sleepycat/je/utilint/SizeofMarker.java new file mode 100644 index 0000000..9ee4766 --- /dev/null +++ b/src/com/sleepycat/je/utilint/SizeofMarker.java @@ -0,0 +1,22 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * Special marker interface used by Sizeof when performing memory overhead + * calculations. + */ +public interface SizeofMarker { + +} diff --git a/src/com/sleepycat/je/utilint/Stat.java b/src/com/sleepycat/je/utilint/Stat.java new file mode 100644 index 0000000..2158f36 --- /dev/null +++ b/src/com/sleepycat/je/utilint/Stat.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.text.DecimalFormat; + +import com.sleepycat.je.EnvironmentFailureException; + +/** + * Base class for all JE statistics. A single Stat embodies a value and + * definition. See StatGroup for a description of how to create and display + * statistics. + * + * Note that Stat intentionally does not contain the statistics value itself. + * Instead, the concrete subclass will implement the value as the appropriate + * primitive type. That's done to avoid wrapper classes like Integer and Long, + * and to keep the overhead of statistics low. + */ +public abstract class Stat extends BaseStat implements Cloneable { + private static final long serialVersionUID = 1L; + + public static final DecimalFormat FORMAT = + new DecimalFormat("###,###,###,###,###,###,###"); + + protected final StatDefinition definition; + + /** + * A stat registers itself with an owning group. + */ + Stat(StatGroup group, StatDefinition definition) { + this.definition = definition; + group.register(this); + } + + /** + * Creates an instance without registering it with the owning group, for + * creating copies without using clone. For constructing an unregistered + * instance. + */ + Stat(StatDefinition definition) { + this.definition = definition; + } + + /** + * Set the stat value. + */ + public abstract void set(T newValue); + + /** + * Add the value of "other" to this stat. + */ + public abstract void add(Stat other); + + /** + * Compute interval value with respect to the base value. + */ + public abstract Stat computeInterval(Stat base); + + /** + * Negate the value. + */ + public abstract void negate(); + + @Override + public Stat copy() { + @SuppressWarnings("unchecked") + final Stat copy; + try { + copy = (Stat) super.clone(); + } catch (CloneNotSupportedException unexpected) { + throw EnvironmentFailureException.unexpectedException(unexpected); + } + return copy; + } + + /** + * Return a copy of this statistic and add to group. + */ + public Stat copyAndAdd(StatGroup group) { + Stat newCopy = copy(); + group.register(newCopy); + return newCopy; + } + + /** + * Return a copy of this stat, and clear the stat's value. + */ + public Stat copyAndClear() { + Stat newCopy = copy(); + clear(); + return newCopy; + } + + public StatDefinition getDefinition() { + return definition; + } + + @Override + public String toString() { + return definition.getName() + "=" + getFormattedValue(); + } + + /** + * Includes the per-stat description in the output string. + */ + public String toStringVerbose() { + return definition.getName() + "=" + getFormattedValue() + + "\n\t\t" + definition.getDescription(); + } +} diff --git a/src/com/sleepycat/je/utilint/StatDefinition.java b/src/com/sleepycat/je/utilint/StatDefinition.java new file mode 100644 index 0000000..d109071 --- /dev/null +++ b/src/com/sleepycat/je/utilint/StatDefinition.java @@ -0,0 +1,105 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.Serializable; + +/** + * Per-stat Metadata for JE statistics. The name and description are meant to + * available in a verbose display of stats, and should be meaningful for users. + */ +public class StatDefinition implements Comparable, Serializable { + private static final long serialVersionUID = 1L; + + /* + * A CUMULATIVE statistic is a statistic that is never cleared + * (represents totals) or whose value is computed from the system + * state at the time the statistic is acquired. + * An INCREMENTAL statistic is cleared when StatConfig.getClear + * is true. The value of the statistic represent an incremental + * value since the last clear. + */ + public enum StatType { + INCREMENTAL, + CUMULATIVE + } + + private final String name; + private final String description; + private final StatType type; + + /** + * Convenience constructor used for INCREMENTAL stats. + * @param name + * @param description + */ + public StatDefinition(String name, String description) { + this.name = name; + this.description = description; + this.type = StatType.INCREMENTAL; + } + + /** + * Constructor + * @param name + * @param description + * @param type + */ + public StatDefinition(String name, String description, StatType type) { + this.name = name; + this.description = description; + this.type = type; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public StatType getType() { + return type; + } + + @Override + public String toString() { + return name + ": " + description; + } + + @Override + public int compareTo(Object other) { + return toString().compareTo(other.toString()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof StatDefinition)) { + return false; + } + + StatDefinition other = (StatDefinition) obj; + return (name.equals(other.name)); + } + + @Override + public int hashCode() { + return name.hashCode(); + } +} diff --git a/src/com/sleepycat/je/utilint/StatGroup.java b/src/com/sleepycat/je/utilint/StatGroup.java new file mode 100644 index 0000000..b94552d --- /dev/null +++ b/src/com/sleepycat/je/utilint/StatGroup.java @@ -0,0 +1,454 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.Serializable; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; +import java.util.TreeMap; + +import com.sleepycat.je.TransactionStats.Active; + +/** + * The Stats infrastructure provides context for JE statistics. Each statistic + * has these attributes: + * - metadata - specifically, a name and description + * - each statistic is associated with a parent stat group, which itself has + * a name and description. + * - support for the StatsConfig.clear semantics + * - a way to print statistics in a user friendly way. + * + * To create a statistic variable, instantiate one of the concrete subclasses + * of Stat. Each concrete subclass should hold the methods that are needed to + * best set and display the value. For example, instead of using LongStat to + * hold a timestamp or LSN value, use TimestampStat or LSNStat. A Stat instance + * needs to specify a StatDefinition. There may be multiple Stat variables in + * different components that share a StatDefinition. They are differentiated + * when displayed by their parent StatGroup. + * + * Each Stat instance is associated with a StatGroup, which holds the + * collection of stats that belong to a given component. Each member of the + * StatGroup has a unique StatDefinition. StatGroups can be combined, in order + * to accumulate values. For example, the LockManager may have multiple lock + * tables. Each lock table keeps its own latch statistics. When LockStats are + * generated, the StatsGroup for each latch is collected and rolled up into a + * single StatGroup, using the addAll(StatGroup) method. + * + * The Stats infrastructure is for internal use only. Public API classes like + * EnvironmentStats, LockStats, etc, contain StatGroups. A call to retrieve + * stats is implemented by getting a clone of the StatGroups held by the + * components like the cleaner, the incompressor, the LockManager, etc. The + * public API classes provide getter methods that reach into the StatGroups to + * return the specific stat value. + * + * To add a statistic, create the Stat variable in the component where it is + * being used and associate it with a StatGroup. The Stat infrastructure does + * the rest of the work for plumbing that statistic up to the public API + * class. Each API class must provide a getter method to access the specific + * statistic. Currently, this is done manually. + */ +public class StatGroup implements Serializable { + private static final long serialVersionUID = 1L; + + /* + * User understandable description of the grouping. The description may + * indicate that these stats are meant for internal use. + */ + private final String groupName; + private final String groupDescription; + private final Map> stats; + + public StatGroup(String groupName, String groupDescription) { + this(groupName, groupDescription, + new HashMap>()); + } + + private StatGroup(String groupName, + String groupDescription, + Map> values) { + this.groupName = groupName; + this.groupDescription = groupDescription; + this.stats = Collections.synchronizedMap(values); + } + + /** + * Returns a synchronized, unmodifiable view of the stats in this group. + * Note that the returned set can still be modified by other threads, so + * the caller needs to take that into account. + */ + public Map> getStats() { + return Collections.unmodifiableMap(stats); + } + + /** + * Add a stat to the group. + */ + void register(Stat oneStat) { + Stat prev = stats.put(oneStat.getDefinition(), oneStat); + assert (prev == null) : "prev = " + prev + " oneStat=" + + oneStat.getDefinition(); + } + + /** + * Add all the stats from the other group into this group. If both groups + * have the same stat, add the values. The caller must make sure that no + * stats are added to or removed from the argument during this call. + * + * @throws ConcurrentModificationException if the addition or removal of + * stats in the argument is detected + */ + @SuppressWarnings("unchecked") + public void addAll(StatGroup other) { + + for (Entry> entry : + other.stats.entrySet()) { + + StatDefinition definition = entry.getKey(); + Stat localStat; + synchronized (stats) { + localStat = stats.get(definition); + if (localStat == null) { + stats.put(definition, entry.getValue()); + continue; + } + } + + /* + * Cast to get around type problem. We know it's the same stat type + * because the definition is the same, but the compiler doesn't + * know that. + */ + @SuppressWarnings("rawtypes") + Stat additionalValue = entry.getValue(); + localStat.add(additionalValue); + } + } + + /** + * The caller must make sure that no stats are added to or removed from + * this stat group while this method is being called. + * + * @throws ConcurrentModificationException if the addition or removal of + * stats in this group is detected + */ + @SuppressWarnings("unchecked") + public StatGroup computeInterval(StatGroup baseGroup) { + Map> intervalValues = + new HashMap>(); + + for (Entry> entry : + stats.entrySet()) { + StatDefinition definition = entry.getKey(); + Stat statValue = entry.getValue(); + @SuppressWarnings("rawtypes") + Stat baseStat = baseGroup.stats.get(definition); + if (baseStat == null) { + intervalValues.put(definition, statValue.copy()); + } else { + intervalValues.put(definition, + statValue.computeInterval(baseStat)); + } + } + return new StatGroup(groupName, groupDescription, intervalValues); + } + + /** + * Clear all stats in a StatGroup. + */ + public void clear() { + synchronized (stats) { + for (Stat s : stats.values()) { + s.clear(); + } + } + } + + /** + * Negates all stats in a StatGroup. + */ + public void negate() { + synchronized (stats) { + for (Stat s : stats.values()) { + s.negate(); + } + } + } + + public String getName() { + return groupName; + } + + public String getDescription() { + return groupDescription; + } + + /** + * @return a Stats class that copies the value of all stats in the group + */ + public StatGroup cloneGroup(boolean clear) { + + Map> copyValues = + new HashMap>(); + + synchronized (stats) { + for (Stat s : stats.values()) { + if (clear) { + copyValues.put(s.getDefinition(), s.copyAndClear()); + } else { + copyValues.put(s.getDefinition(), s.copy()); + } + } + } + return new StatGroup(groupName, groupDescription, copyValues); + } + + /** + * Return the stat associated with the specified definition, or null if not + * found. + * + * @return the stat or null + */ + public Stat getStat(StatDefinition definition) { + return stats.get(definition); + } + + public int getInt(StatDefinition definition) { + int retval; + Stat s = stats.get(definition); + if (s == null) { + retval = 0; + } else if (s instanceof IntStat) { + retval = ((IntStat) s).get(); + } else if (s instanceof AtomicIntStat) { + retval = ((AtomicIntStat) s).get(); + } else { + assert false : "Internal error calling getInt with" + + " unexpected stat type: " + s.getClass().getName(); + retval = 0; + } + return retval; + } + + public LongStat getLongStat(StatDefinition definition) { + return (LongStat) stats.get(definition); + } + + public long getLong(StatDefinition definition) { + long retval = 0; + Stat s = stats.get(definition); + if (s == null) { + retval= 0; + } else if (s instanceof LongStat) { + retval = ((LongStat)s).get(); + } else if (s instanceof AtomicLongStat) { + retval = ((AtomicLongStat)s).get(); + } else if (s instanceof IntegralLongAvgStat) { + retval = ((IntegralLongAvgStat)s).get().compute(); + } else { + assert false: "Internal error calling getLong() with "+ + "unknown stat type."; + } + return retval; + } + + public IntegralLongAvgStat getIntegralLongAvgStat( + StatDefinition definition) { + return (IntegralLongAvgStat) stats.get(definition); + } + + public LongMinStat getLongMinStat(StatDefinition definition) { + return (LongMinStat) stats.get(definition); + } + + public LongMaxStat getLongMaxStat(StatDefinition definition) { + return (LongMaxStat) stats.get(definition); + } + + public AtomicLongStat getAtomicLongStat(StatDefinition definition) { + return (AtomicLongStat) stats.get(definition); + } + + public Long getAtomicLong(StatDefinition definition) { + AtomicLongStat s = (AtomicLongStat) stats.get(definition); + if (s == null) { + return 0L; + } else { + return s.get(); + } + } + + public Active[] getActiveTxnArray(StatDefinition definition) { + ActiveTxnArrayStat s = (ActiveTxnArrayStat) stats.get(definition); + if (s == null) { + return null; + } else { + return s.get(); + } + } + + public long[] getLongArray(StatDefinition definition) { + LongArrayStat s = (LongArrayStat) stats.get(definition); + if (s == null) { + return null; + } else { + return s.get(); + } + } + + public float getFloat(StatDefinition definition) { + FloatStat s = (FloatStat) stats.get(definition); + if (s == null) { + return 0; + } else { + return s.get(); + } + } + + public boolean getBoolean(StatDefinition definition) { + BooleanStat s = (BooleanStat) stats.get(definition); + if (s == null) { + return false; + } else { + return s.get(); + } + } + + public String getString(StatDefinition definition) { + StringStat s = (StringStat) stats.get(definition); + if (s == null) { + return null; + } else { + return s.get(); + } + } + + @SuppressWarnings("unchecked") + public SortedMap getMap(StatDefinition definition) { + MapStat s = (MapStat) stats.get(definition); + if (s == null) { + return null; + } else { + return s.getMap(); + } + } + + /* + * Add this group's information to the jconsole tip map. + */ + public void addToTipMap(Map tips) { + tips.put(getName(), getDescription()); + for (StatDefinition d: stats.keySet()) { + tips.put(d.getName(), d.getDescription()); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(groupName).append(": "); + sb.append(groupDescription).append("\n"); + + /* Order the stats for consistent display. */ + Map> sortedStats; + synchronized (stats) { + sortedStats = new TreeMap>(stats); + } + for (Stat s : sortedStats.values()) { + sb.append("\t").append(s).append("\n"); + } + + return sb.toString(); + } + + /** + * Includes the per-stat description in the output string. + */ + public String toStringVerbose() { + StringBuilder sb = new StringBuilder(); + sb.append(groupName).append(": "); + sb.append(groupDescription).append("\n"); + + /* Order the stats for consistent display.*/ + Map> sortedStats; + synchronized (stats) { + sortedStats = new TreeMap>(stats); + } + for (Stat s : sortedStats.values()) { + sb.append("\t").append(s.toStringVerbose()).append("\n"); + } + return sb.toString(); + } + + /** + * Only print values that are not null or zero. + */ + public String toStringConcise() { + + boolean headerPrinted = false; + StringBuilder sb = new StringBuilder(); + + /* Order the stats for consistent display.*/ + Map> sortedStats; + synchronized (stats) { + sortedStats = new TreeMap>(stats); + } + + for (Stat s : sortedStats.values()) { + + if (s.isNotSet()) { + continue; + } + + /* + * Print the group name lazily, in case no fields in this group are + * set at all. In that case, this method will not print anything. + */ + if (!headerPrinted) { + sb.append(groupName + "\n"); + headerPrinted = true; + } + sb.append("\t").append(s).append("\n"); + } + return sb.toString(); + } + + /** + * Return a string suitable for using as the header for a .csv file. + */ + public String getCSVHeader() { + StringBuilder sb = new StringBuilder(); + synchronized (stats) { + for (StatDefinition def : stats.keySet()) { + sb.append(groupName + "_" + def.getName() + ","); + } + } + return sb.toString(); + } + + /** + * Return a string suitable for using as the data for a .csv file. + */ + public String getCSVData() { + StringBuilder sb = new StringBuilder(); + synchronized (stats) { + for (Stat s : stats.values()) { + sb.append(s.getFormattedValue() + ","); + } + } + return sb.toString(); + } +} diff --git a/src/com/sleepycat/je/utilint/StatsAccumulator.java b/src/com/sleepycat/je/utilint/StatsAccumulator.java new file mode 100644 index 0000000..3f98117 --- /dev/null +++ b/src/com/sleepycat/je/utilint/StatsAccumulator.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BINS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_ENTRIES_HISTOGRAM; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_DELETED_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_INS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_IN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_MAINTREE_MAXDEPTH; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_DESC; +import static com.sleepycat.je.dbi.BTreeStatDefinition.GROUP_NAME; + +import java.io.PrintStream; +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.TreeWalkerStatsAccumulator; + +public class StatsAccumulator implements TreeWalkerStatsAccumulator { + private final Set inNodeIdsSeen = new HashSet(); + private final Set binNodeIdsSeen = new HashSet(); + private long[] insSeenByLevel = null; + private long[] binsSeenByLevel = null; + private long[] binEntriesHistogram = null; + private long lnCount = 0; + private long deletedLNCount = 0; + private int mainTreeMaxDepth = 0; + + public PrintStream progressStream; + int progressInterval; + + /* The max levels we ever expect to see in a tree. */ + private static final int MAX_LEVELS = 100; + + public StatsAccumulator( + PrintStream progressStream, + int progressInterval) { + + this.progressStream = progressStream; + this.progressInterval = progressInterval; + + insSeenByLevel = new long[MAX_LEVELS]; + binsSeenByLevel = new long[MAX_LEVELS]; + binEntriesHistogram = new long[10]; + } + + public void verifyNode(@SuppressWarnings("unused") Node node) { + } + + @Override + public void processIN(IN node, Long nid, int level) { + if (inNodeIdsSeen.add(nid)) { + tallyLevel(level, insSeenByLevel); + verifyNode(node); + } + } + + @Override + public void processBIN(BIN node, Long nid, int level) { + if (binNodeIdsSeen.add(nid)) { + tallyLevel(level, binsSeenByLevel); + verifyNode(node); + tallyEntries(node, binEntriesHistogram); + } + } + + private void tallyLevel(int levelArg, long[] nodesSeenByLevel) { + int level = levelArg; + if (level >= IN.MAIN_LEVEL) { + /* Count DBMAP_LEVEL as main level. [#22209] */ + level &= IN.LEVEL_MASK; + if (level > mainTreeMaxDepth) { + mainTreeMaxDepth = level; + } + } + + nodesSeenByLevel[level]++; + } + + @Override + public void incrementLNCount() { + lnCount++; + if (progressInterval != 0 && progressStream != null) { + if ((lnCount % progressInterval) == 0) { + progressStream.println(getStats()); + } + } + } + + @Override + public void incrementDeletedLNCount() { + deletedLNCount++; + } + + private void tallyEntries(BIN bin, long[] binEntriesHistogram) { + int nEntries = bin.getNEntries(); + int nonDeletedEntries = 0; + for (int i = 0; i < nEntries; i++) { + /* KD and PD determine deletedness. */ + if (!bin.isEntryPendingDeleted(i) && + !bin.isEntryKnownDeleted(i)) { + nonDeletedEntries++; + } + } + + int bucket = (nonDeletedEntries * 100) / (bin.getMaxEntries() + 1); + bucket /= 10; + binEntriesHistogram[bucket]++; + } + + Set getINNodeIdsSeen() { + return inNodeIdsSeen; + } + + Set getBINNodeIdsSeen() { + return binNodeIdsSeen; + } + + long[] getINsByLevel() { + return insSeenByLevel; + } + + long[] getBINsByLevel() { + return binsSeenByLevel; + } + + long[] getBINEntriesHistogram() { + return binEntriesHistogram; + } + + long getLNCount() { + return lnCount; + } + + long getDeletedLNCount() { + return deletedLNCount; + } + + int getMainTreeMaxDepth() { + return mainTreeMaxDepth; + } + + public StatGroup getStats() { + StatGroup group = new StatGroup(GROUP_NAME, GROUP_DESC); + new LongStat(group, BTREE_IN_COUNT, getINNodeIdsSeen().size()); + new LongStat(group, BTREE_BIN_COUNT, getBINNodeIdsSeen().size()); + new LongStat(group, BTREE_LN_COUNT, getLNCount()); + new LongStat(group, BTREE_DELETED_LN_COUNT, getDeletedLNCount()); + new IntStat(group, BTREE_MAINTREE_MAXDEPTH, getMainTreeMaxDepth()); + new LongArrayStat(group, BTREE_INS_BYLEVEL, getINsByLevel()); + new LongArrayStat(group, BTREE_BINS_BYLEVEL, getBINsByLevel()); + new LongArrayStat(group, BTREE_BIN_ENTRIES_HISTOGRAM, + getBINEntriesHistogram()) { + @Override + protected String getFormattedValue() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + if (array != null && array.length > 0) { + boolean first = true; + for (int i = 0; i < array.length; i++) { + if (array[i] > 0) { + if (!first) { + sb.append("; "); + } + + first = false; + int startPct = i * 10; + int endPct = (i + 1) * 10 - 1; + sb.append(startPct).append("-"); + sb.append(endPct).append("%: "); + sb.append(Stat.FORMAT.format(array[i])); + } + } + } + + sb.append("]"); + + return sb.toString(); + } + }; + + return group; + } +} diff --git a/src/com/sleepycat/je/utilint/StoppableThread.java b/src/com/sleepycat/je/utilint/StoppableThread.java new file mode 100644 index 0000000..66ee19d --- /dev/null +++ b/src/com/sleepycat/je/utilint/StoppableThread.java @@ -0,0 +1,492 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentWedgedException; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * A StoppableThread is a daemon that obeys the following mandates: + * - it sets the daemon property for the thread + * - an uncaught exception handler is always registered + * - the thread registers with the JE exception listener mechanism. + * - its shutdown method can only be executed once. StoppableThreads are not + * required to implement shutdown() methods, because in some cases their + * shutdown processing must be coordinated by an owning, parent thread. + * + * StoppableThread is an alternative to the DaemonThread. It also assumes that + * the thread's run() method may be more complex than that of the work-queue, + * task oriented DaemonThread. + * + * A StoppableThread's run method should catch and handle all exceptions. By + * default, unhandled exceptions are considered programming errors, and + * invalidate the environment, but StoppableThreads may supply alternative + * uncaught exception handling. + * + * StoppableThreads usually are created with an EnvironmentImpl, but on + * occasion an environment may not be available (for components that can + * execute without an environment). In that case, the thread obviously does not + * invalidate the environment. + * + * Note that the StoppableThread.cleanup must be invoked upon, or soon after, + * thread exit. + */ +public abstract class StoppableThread extends Thread { + + /* The environment, if any, that's associated with this thread. */ + protected final EnvironmentImpl envImpl; + + /* + * Shutdown can only be executed once. The shutdown field protects against + * multiple invocations. + */ + private final AtomicBoolean shutdown = new AtomicBoolean(false); + + /* The exception (if any) that forced this node to shut down. */ + private Exception savedShutdownException = null; + + /* Total cpu time used by thread */ + private long totalCpuTime = -1; + + /* Total user time used by thread */ + private long totalUserTime = -1; + + /** + * The default wait period for an interrupted thread to exit as part of a + * hard shutdown. + */ + private static final int DEFAULT_INTERRUPT_WAIT_MS = 10 * 1000; + + /** + * The wait period for joining a thread in which shutdown is running. + * Use a large timeout since we want the shutdown to complete normally, + * if at all possible. + */ + private static final int WAIT_FOR_SHUTDOWN_MS = + DEFAULT_INTERRUPT_WAIT_MS * 3; + + protected StoppableThread(final String threadName) { + this(null, null, null, threadName); + } + + protected StoppableThread(final EnvironmentImpl envImpl, + final String threadName) { + this(envImpl, null /* handler */, null /* runnable */,threadName); + } + + protected StoppableThread(final EnvironmentImpl envImpl, + final UncaughtExceptionHandler handler, + final String threadName) { + this(envImpl, handler, null /* runnable */, threadName); + } + + protected StoppableThread(final EnvironmentImpl envImpl, + final UncaughtExceptionHandler handler, + final Runnable runnable, + final String threadName) { + super(null, runnable, threadName); + this.envImpl = envImpl; + + /* + * Set the daemon property so this thread will not hang up the + * application. + */ + setDaemon(true); + + setUncaughtExceptionHandler + ((handler == null) ? new UncaughtHandler() : handler); + } + + /** + * @return a logger to use when logging uncaught exceptions. + */ + abstract protected Logger getLogger(); + + /** + * Returns the exception if any that provoked the shutdown + * + * @return the exception, or null if it was a normal shutdown + */ + public Exception getSavedShutdownException() { + return savedShutdownException; + } + + public void saveShutdownException(Exception shutdownException) { + savedShutdownException = shutdownException; + } + + public boolean isShutdown() { + return shutdown.get(); + } + + /** + * If the shutdown flag is false, set it to true and return false; in this + * case the caller should perform shutdown, including calling {@link + * #shutdownThread}. If the shutdown flag is true, wait for this thread to + * exit and return true; in this case the caller should not perform + * shutdown. + * + * When shutdownDone is initially called by thread X (including from the + * run method of the thread being shutdown), then a thread Y calling + * shutdownDone should simply return without performing shutdown (this is + * when shutdownDone returns true). In this case it is important that this + * method calls {@link #waitForExit} in thread Y to ensure that thread X + * really dies, or that an EnvironmentWedgedException is thrown if X does + * not die. In particular it is important that all JE threads have died and + * released their resources when Environment.close returns to the app + * thread, or that EWE is thrown if any JE threads have not died. This + * allows the app to reliably re-open the env, or exit the process if + * necessary. [#25648] + * + * Note than when thread X has sub-components and manages their threads, + * thread X's shutdown method will call shutdown for its managed threads. + * Waiting for exit of thread X will therefore wait for exit of its managed + * threads, assuming that all shutdown methods calls shutdownDone as + * described. + * + * @param logger the logger on which to log messages + * + * @return true if shutdown is already set. + */ + protected boolean shutdownDone(Logger logger) { + + if (shutdown.compareAndSet(false, true)) { + return false; + } + + waitForExit(logger); + return true; + } + + /** + * Must be invoked upon, or soon after, exit from the thread to perform + * any cleanup, and ensure that any allocated resources are freed. + */ + protected void cleanup() { + } + + /* + * A static method to handle the uncaught exception. This method + * can be called in other places, such as in FileManager. + * + * When an uncaught exception occurs, log it, publish it to the + * exception handler, and invalidate the environment. + */ + public static void handleUncaughtException( + final Logger useLogger, + final EnvironmentImpl envImpl, + final Thread t, + final Throwable e) { + + if (useLogger != null) { + String envName = (envImpl == null)? "" : envImpl.getName(); + String message = envName + ":" + t.getName() + + " exited unexpectedly with exception " + e; + if (e != null) { + message += LoggerUtils.getStackTrace(e); + } + + if (envImpl != null) { + /* + * If we have an environment, log this to all three + * handlers. + */ + LoggerUtils.severe(useLogger, envImpl, message); + } else { + /* + * We don't have an environment, but at least log this + * to the console. + */ + useLogger.log(Level.SEVERE, message); + } + } + + + if (envImpl == null) { + return; + } + + /* + * If not already invalid, invalidate environment by creating an + * EnvironmentFailureException. + */ + if (envImpl.isValid()) { + + /* + * Create the exception to invalidate the environment, but do + * not throw it since the handle is invoked in some internal + * JVM thread and the exception is not meaningful to the + * invoker. + */ + @SuppressWarnings("unused") + EnvironmentFailureException unused = + new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.UNCAUGHT_EXCEPTION, + e); + } + + final ExceptionListener exceptionListener = + envImpl.getExceptionListener(); + + if (exceptionListener != null) { + exceptionListener.exceptionThrown( + DbInternal.makeExceptionEvent( + envImpl.getInvalidatingException(), t.getName())); + } + } + + /** + * An uncaught exception should invalidate the environment. Check if the + * environmentImpl is null, because there are a few cases where a + * StoppableThread is created for components that work both in replicated + * nodes and independently. + */ + private class UncaughtHandler implements UncaughtExceptionHandler { + + /** + * When an uncaught exception occurs, log it, publish it to the + * exception handler, and invalidate the environment. + */ + @Override + public void uncaughtException(Thread t, Throwable e) { + Logger useLogger = getLogger(); + handleUncaughtException(useLogger, envImpl, t, e); + } + } + + /** + * This method is invoked from another thread of control to shutdown this + * thread. The method tries shutting down the thread using a variety of + * techniques, starting with the gentler techniques in order to limit of + * stopping the thread on the overall process and proceeding to harsher + * techniques: + * + * 1) It first tries a "soft" shutdown by invoking + * initiateSoftShutdown(). This is the technique of choice. + * Each StoppableThread is expected to make provisions for a clean shutdown + * via this method. The techniques used to implement this method may vary + * based upon the specifics of the thread. + * + * 2) If that fails it interrupts the thread. + * + * 3) If the thread does not respond to the interrupt, it invalidates the + * environment. + * + * All Stoppable threads are expected to catch an interrupt, clean up and + * then exit. The cleanup may involve invalidation of the environment, if + * the thread is not in a position to handle the interrupt cleanly. + * + * If the method has to resort to step 3, it means that thread and other + * resources may not have been freed and it would be best to exit and + * restart the process itself to ensure they are freed. In this case an + * EnvironmentWedgedException is used to invalidate the env, and the EWE + * will be thrown when the app calls Environment.close. + * + * @param logger the logger on which to log messages + */ + public void shutdownThread(Logger logger) { + + /* + * Save resource usage, since it will not be available once the + * thread has exited. + */ + ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); + if (threadBean.isThreadCpuTimeSupported()) { + totalCpuTime = threadBean.getThreadCpuTime(getId()); + totalUserTime = threadBean.getThreadUserTime(getId()); + } else if (threadBean.isCurrentThreadCpuTimeSupported() && + Thread.currentThread() == this) { + totalCpuTime = threadBean.getCurrentThreadCpuTime(); + totalUserTime = threadBean.getCurrentThreadUserTime(); + } + + if (Thread.currentThread() == this) { + /* Shutdown was called from this thread's run method. */ + return; + } + + try { + LoggerUtils.info(logger, envImpl, + getName() + " soft shutdown initiated."); + + final int waitMs = initiateSoftShutdown(); + + /* + * Wait for a soft shutdown to take effect, the preferred method + * for thread shutdown. + */ + if (waitMs >= 0) { + join(waitMs); + } + + if (!isAlive()) { + LoggerUtils.fine(logger, envImpl, this + " has exited."); + return; + } + + LoggerUtils.warning( + logger, envImpl, + "Soft shutdown failed for thread:" + this + + " after waiting for " + waitMs + + "ms resorting to interrupt."); + + interrupt(); + + /* + * The thread must make provision to handle and exit on an + * interrupt. + */ + final long joinWaitTime = + (waitMs > 0) ? 2 * waitMs : DEFAULT_INTERRUPT_WAIT_MS; + + join(joinWaitTime); + + if (!isAlive()) { + LoggerUtils.warning(logger, envImpl, + this + " shutdown via interrupt."); + return; + } + + /* + * Failed to shutdown thread despite all attempts. It's + * possible that the thread has a bug and/or is unable to + * to get to an interruptible point. + */ + final String msg = this + + " shutdown via interrupt FAILED. " + + "Thread still alive despite waiting for " + + joinWaitTime + "ms."; + + LoggerUtils.severe(logger, envImpl, msg); + LoggerUtils.fullThreadDump(logger, envImpl, Level.SEVERE); + + if (envImpl != null) { + @SuppressWarnings("unused") + EnvironmentFailureException unused = + new EnvironmentWedgedException(envImpl, msg); + } + } catch (InterruptedException e1) { + LoggerUtils.warning( + logger, envImpl, + "Interrupted while shutting down thread:" + this); + } + } + + /** + * Used to wait for thread shutdown, when {@link #shutdownDone} returns + * true because it has been called by another thread. + */ + private void waitForExit(Logger logger) { + + assert shutdown.get(); + + if (Thread.currentThread() == this) { + /* Shutdown was called from this thread's run method. */ + return; + } + + try { + join(WAIT_FOR_SHUTDOWN_MS); + + if (!isAlive()) { + return; + } + + /* + * For some reason, shutdown has not finished. This is unlikely, + * but possible. As in shutdownThread, we try interrupting the + * thread before giving up. + */ + LoggerUtils.warning( + logger, envImpl, + "Soft shutdown failed for thread:" + this + + " after waiting for " + WAIT_FOR_SHUTDOWN_MS + + "ms, resorting to interrupt in wait-for-shutdown."); + + interrupt(); + join(WAIT_FOR_SHUTDOWN_MS); + + if (!isAlive()) { + return; + } + + /* + * Failed to shutdown thread despite all attempts. It's + * possible that the thread has a bug and/or is unable to + * to get to an interruptible point. + */ + final String msg = this + + " shutdown via interrupt FAILED during wait-for-shutdown. " + + "Thread still alive despite waiting for " + + WAIT_FOR_SHUTDOWN_MS + "ms."; + + LoggerUtils.severe(logger, envImpl, msg); + LoggerUtils.fullThreadDump(logger, envImpl, Level.SEVERE); + + if (envImpl != null) { + @SuppressWarnings("unused") + EnvironmentFailureException unused = + new EnvironmentWedgedException(envImpl, msg); + } + } catch (InterruptedException e1) { + LoggerUtils.warning( + logger, envImpl, + "Interrupted during wait-for-shutdown:" + this); + } + } + + /** + * Threads that use shutdownThread() must define this method. It's invoked + * by shutdownThread as an attempt at a soft shutdown. + * + * This method makes provisions for this thread to exit on its own. The + * technique used to make the thread exit can vary based upon the nature of + * the service being provided by the thread. For example, the thread may be + * known to poll some shutdown flag on a periodic basis, or it may detect + * that a channel that it waits on has been closed by this method. + * + * @return the amount of time in ms that the shutdownThread method will + * wait for the thread to exit. A -ve value means that the method will not + * wait. A zero value means it will wait indefinitely. + */ + protected int initiateSoftShutdown() { + return -1; + } + + /** + * Returns the total cpu time associated with the thread, after the thread + * has been shutdown. + */ + public long getTotalCpuTime() { + return totalCpuTime; + } + + /** + * Returns the total cpu time associated with the thread, after the thread + * has been shutdown. + */ + public long getTotalUserTime() { + return totalUserTime; + } +} diff --git a/src/com/sleepycat/je/utilint/StoppableThreadFactory.java b/src/com/sleepycat/je/utilint/StoppableThreadFactory.java new file mode 100644 index 0000000..c08209f --- /dev/null +++ b/src/com/sleepycat/je/utilint/StoppableThreadFactory.java @@ -0,0 +1,82 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.concurrent.ThreadFactory; +import java.util.logging.Logger; + +import com.sleepycat.je.dbi.EnvironmentImpl; + +/** + * Create a thread factory that returns threads that are legitimate + * StoppableThreads. Like StoppableThreads, if an environment is provided, the + * threads will invalidate if an exception is not handled, and are registered + * with the exception listener.If a logger is provided, StoppableThreads log + * exception information. + * + * This factory is used in conjunction with the ExecutorService and + * ThreadExecutorPool models. + */ +public class StoppableThreadFactory implements ThreadFactory { + + private final String threadName; + private final Logger logger; + private final EnvironmentImpl envImpl; + + /** + * This kind of StoppableThreadFactory will cause invalidation if an + * unhandled exception occurs. + */ + public StoppableThreadFactory(EnvironmentImpl envImpl, + String threadName, + Logger logger) { + this.threadName = threadName; + this.logger = logger; + this.envImpl = envImpl; + } + + /** + * This kind of StoppableThreadFactory will NOT cause invalidation if an + * unhandled exception occurs, because there is no environment provided. + */ + public StoppableThreadFactory(String threadName, Logger logger) { + this(null, threadName, logger); + } + + public Thread newThread(Runnable runnable) { + return new StoppablePoolThread(envImpl, runnable, threadName, logger); + } + + /* + * A fairly plain implementation of the abstract StoppableThread class, + * for use by the factory. + */ + private static class StoppablePoolThread extends StoppableThread { + private final Logger logger; + + StoppablePoolThread(EnvironmentImpl envImpl, + Runnable runnable, + String threadName, + Logger logger) { + super(envImpl, null, runnable, threadName); + this.logger = logger; + } + + @Override + protected Logger getLogger() { + return logger; + } + } +} + diff --git a/src/com/sleepycat/je/utilint/StringStat.java b/src/com/sleepycat/je/utilint/StringStat.java new file mode 100644 index 0000000..1016f8d --- /dev/null +++ b/src/com/sleepycat/je/utilint/StringStat.java @@ -0,0 +1,75 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * A stat that saves a string; a way to save general information for later + * display and access. + */ +public class StringStat extends Stat { + private static final long serialVersionUID = 1L; + + private String value; + + public StringStat(StatGroup group, + StatDefinition definition) { + super(group, definition); + } + + public StringStat(StatGroup group, + StatDefinition definition, + String initialValue) { + super(group, definition); + value = initialValue; + } + + @Override + public String get() { + return value; + } + + @Override + public void set(String newValue) { + value = newValue; + } + + @Override + public void add(Stat otherStat) { + value += otherStat.get(); + } + + @Override + public Stat computeInterval(Stat base) { + return copy(); + } + + @Override + public void negate() { + } + + @Override + public void clear() { + value = null; + } + + @Override + protected String getFormattedValue() { + return value; + } + + @Override + public boolean isNotSet() { + return (value == null); + } +} diff --git a/src/com/sleepycat/je/utilint/TestHook.java b/src/com/sleepycat/je/utilint/TestHook.java new file mode 100644 index 0000000..2ce1aac --- /dev/null +++ b/src/com/sleepycat/je/utilint/TestHook.java @@ -0,0 +1,44 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; + +/** + * TestHook is used to induce testing behavior that can't be provoked + * externally. For example, unit tests may use hooks to throw IOExceptions, or + * to cause waiting behavior. + * + * To use this, a unit test should implement TestHook with a class that + * overrides the desired method. The desired code will have a method that + * allows the unit test to specify a hook, and will execute the hook if it is + * non-null. This should be done within an assert like so: + * + * assert TestHookExecute(myTestHook); + * + * See Tree.java for examples. + */ +public interface TestHook { + + public void hookSetup(); + + public void doIOHook() + throws IOException; + + public void doHook(); + + public void doHook(T obj); + + public T getHookValue(); +} diff --git a/src/com/sleepycat/je/utilint/TestHookAdapter.java b/src/com/sleepycat/je/utilint/TestHookAdapter.java new file mode 100644 index 0000000..48b86fa --- /dev/null +++ b/src/com/sleepycat/je/utilint/TestHookAdapter.java @@ -0,0 +1,44 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; + +public class TestHookAdapter implements TestHook { + + @Override + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook() { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook(T obj) { + throw new UnsupportedOperationException(); + } + + @Override + public T getHookValue() { + throw new UnsupportedOperationException(); + } +} diff --git a/src/com/sleepycat/je/utilint/TestHookExecute.java b/src/com/sleepycat/je/utilint/TestHookExecute.java new file mode 100644 index 0000000..c768c61 --- /dev/null +++ b/src/com/sleepycat/je/utilint/TestHookExecute.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * Execute a test hook if set. This wrapper is used so that test hook execution + * can be packaged into a single statement that can be done within an assert + * statement. + */ +public class TestHookExecute { + + public static boolean doHookSetupIfSet(TestHook testHook) { + if (testHook != null) { + testHook.hookSetup(); + } + return true; + } + + public static boolean doHookIfSet(TestHook testHook) { + if (testHook != null) { + testHook.doHook(); + } + return true; + } + + public static boolean doHookIfSet(TestHook testHook, T obj) { + if (testHook != null) { + testHook.doHook(obj); + } + return true; + } +} diff --git a/src/com/sleepycat/je/utilint/Timestamp.java b/src/com/sleepycat/je/utilint/Timestamp.java new file mode 100644 index 0000000..b69b8d0 --- /dev/null +++ b/src/com/sleepycat/je/utilint/Timestamp.java @@ -0,0 +1,158 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +/** + * Duplicate of java.sql.Timestamp which keeps our implementation constant in + * case the java.sql.Timestamp implementation changes incompatibly. This way + * we can write it to disk and not worry about upgrading the log file. + */ +public class Timestamp extends java.util.Date { + + static final long serialVersionUID = 2745179027874758501L; + + private int nanos; + + public Timestamp(long time) { + super((time / 1000) * 1000); + nanos = (int) ((time % 1000) * 1000000); + if (nanos < 0) { + nanos = 1000000000 + nanos; + super.setTime(((time / 1000) - 1) * 1000); + } + } + + public long getTime() { + long time = super.getTime(); + return (time + (nanos / 1000000)); + } + + public String toString() { + + int year = super.getYear() + 1900; + int month = super.getMonth() + 1; + int day = super.getDate(); + int hour = super.getHours(); + int minute = super.getMinutes(); + int second = super.getSeconds(); + String yearString; + String monthString; + String dayString; + String hourString; + String minuteString; + String secondString; + String nanosString; + String zeros = "000000000"; + String yearZeros = "0000"; + StringBuffer timestampBuf; + + if (year < 1000) { + /* Add leading zeros. */ + yearString = "" + year; + yearString = yearZeros.substring(0, (4 - yearString.length())) + + yearString; + } else { + yearString = "" + year; + } + + if (month < 10) { + monthString = "0" + month; + } else { + monthString = Integer.toString(month); + } + + if (day < 10) { + dayString = "0" + day; + } else { + dayString = Integer.toString(day); + } + + if (hour < 10) { + hourString = "0" + hour; + } else { + hourString = Integer.toString(hour); + } + + if (minute < 10) { + minuteString = "0" + minute; + } else { + minuteString = Integer.toString(minute); + } + + if (second < 10) { + secondString = "0" + second; + } else { + secondString = Integer.toString(second); + } + + if (nanos == 0) { + nanosString = "0"; + } else { + nanosString = Integer.toString(nanos); + + /* Add leading zeros. */ + nanosString = zeros.substring(0, (9 - nanosString.length())) + + nanosString; + + /* Truncate trailing zeros. */ + char[] nanosChar = new char[nanosString.length()]; + nanosString.getChars(0, nanosString.length(), nanosChar, 0); + int truncIndex = 8; + while (nanosChar[truncIndex] == '0') { + truncIndex--; + } + + nanosString = new String(nanosChar, 0, truncIndex + 1); + } + + /* Do a string buffer here instead. */ + timestampBuf = new StringBuffer(20 + nanosString.length()); + timestampBuf.append(yearString); + timestampBuf.append("-"); + timestampBuf.append(monthString); + timestampBuf.append("-"); + timestampBuf.append(dayString); + timestampBuf.append(" "); + timestampBuf.append(hourString); + timestampBuf.append(":"); + timestampBuf.append(minuteString); + timestampBuf.append(":"); + timestampBuf.append(secondString); + timestampBuf.append("."); + timestampBuf.append(nanosString); + + return (timestampBuf.toString()); + } + + public boolean equals(Timestamp ts) { + if (super.equals(ts)) { + if (nanos == ts.nanos) { + return true; + } else { + return false; + } + } else { + return false; + } + } + + public boolean equals(Object ts) { + if (ts instanceof Timestamp) { + return this.equals((Timestamp)ts); + } else { + return false; + } + } +} + diff --git a/src/com/sleepycat/je/utilint/TinyHashSet.java b/src/com/sleepycat/je/utilint/TinyHashSet.java new file mode 100644 index 0000000..1439c85 --- /dev/null +++ b/src/com/sleepycat/je/utilint/TinyHashSet.java @@ -0,0 +1,224 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; + +import static com.sleepycat.je.EnvironmentFailureException.assertState; + +/** + * TinyHashSet is used to optimize (for speed, not space) the case where a + * HashSet generally holds one or two elements. This saves us the cost of + * creating the HashSet and related elements as well as call Object.hashCode(). + * It was designed for holding the cursors of a BIN, which are often no more + * than two in number. + * + * If (elem1 != null || elem2 != null), they are the only elements in the + * TinyHashSet. If (set != null) then only the set's elements are in the + * TinyHashSet. + * + * It should never be true that: + * (elem1 != null || elem2 != null) and (set != null). + * + * This class does not support adding null elements, and only supports a few + * of the methods in the Set interface. + */ +public class TinyHashSet implements Iterable { + + private Set set; + private T elem1; + private T elem2; + + /** + * Creates an empty set. + */ + public TinyHashSet() { + } + + /** + * Creates a set with one element. + */ + public TinyHashSet(T o) { + elem1 = o; + } + + /* + * Will return a fuzzy value if not under synchronized control. + */ + public int size() { + if (elem1 != null && elem2 != null) { + return 2; + } + if (elem1 != null || elem2 != null) { + return 1; + } + if (set != null) { + return set.size(); + } + return 0; + } + + public boolean contains(T o) { + assertState(o != null); + assertState((elem1 == null && elem2 == null) || (set == null)); + if (set != null) { + return set.contains(o); + } + if (elem1 != null && (elem1 == o || elem1.equals(o))) { + return true; + } + if (elem2 != null && (elem2 == o || elem2.equals(o))) { + return true; + } + return false; + } + + public boolean remove(T o) { + assertState(o != null); + assertState((elem1 == null && elem2 == null) || (set == null)); + if (set != null) { + if (!set.remove(o)) { + return false; + } + /* + if (set.size() > 2) { + return true; + } + final Iterator iter = set.iterator(); + if (iter.hasNext()) { + elem1 = iter.next(); + if (iter.hasNext()) { + elem2 = iter.next(); + } + } + set = null; + */ + return true; + } + if (elem1 != null && (elem1 == o || elem1.equals(o))) { + elem1 = null; + return true; + } + if (elem2 != null && (elem2 == o || elem2.equals(o))) { + elem2 = null; + return true; + } + return false; + } + + public boolean add(T o) { + assertState(o != null); + assertState((elem1 == null && elem2 == null) || (set == null)); + if (set != null) { + return set.add(o); + } + if (elem1 != null && (elem1 == o || elem1.equals(o))) { + return false; + } + if (elem2 != null && (elem2 == o || elem2.equals(o))) { + return false; + } + if (elem1 == null) { + elem1 = o; + return true; + } + if (elem2 == null) { + elem2 = o; + return true; + } + set = new HashSet(5); + set.add(elem1); + set.add(elem2); + elem1 = null; + elem2 = null; + return set.add(o); + } + + public Set copy() { + assertState((elem1 == null && elem2 == null) || (set == null)); + if (set != null) { + return new HashSet(set); + } + final Set ret = new HashSet(); + if (elem1 != null) { + ret.add(elem1); + } + if (elem2 != null) { + ret.add(elem2); + } + return ret; + } + + public Iterator iterator() { + assertState((elem1 == null && elem2 == null) || (set == null)); + if (set != null) { + return set.iterator(); + } + return new TwoElementIterator(this, elem1, elem2); + } + + /* + * Iterator that returns only elem1 and elem2. + */ + private static class TwoElementIterator implements Iterator { + final TinyHashSet parent; + final T elem1; + final T elem2; + boolean returnedElem1; + boolean returnedElem2; + + TwoElementIterator(TinyHashSet parent, T elem1, T elem2) { + this.parent = parent; + this.elem1 = elem1; + this.elem2 = elem2; + returnedElem1 = (elem1 == null); + returnedElem2 = (elem2 == null); + } + + public boolean hasNext() { + return !returnedElem1 || !returnedElem2; + } + + public T next() { + if (!returnedElem1) { + returnedElem1 = true; + return elem1; + } + if (!returnedElem2) { + returnedElem2 = true; + return elem2; + } + throw new NoSuchElementException(); + } + + /** + * Examine elements in the reverse order they were returned, to remove + * the last returned element when both elements were returned. + */ + public void remove() { + if (returnedElem2 && elem2 != null) { + parent.elem2 = null; + return; + } + if (returnedElem1 && elem1 != null) { + parent.elem1 = null; + return; + } + assertState(false); + } + } +} diff --git a/src/com/sleepycat/je/utilint/TracerFormatter.java b/src/com/sleepycat/je/utilint/TracerFormatter.java new file mode 100644 index 0000000..4b597cb --- /dev/null +++ b/src/com/sleepycat/je/utilint/TracerFormatter.java @@ -0,0 +1,113 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.TimeZone; +import java.util.logging.Formatter; +import java.util.logging.LogRecord; + +/** + * Formatter for java.util.logging output. + */ +public class TracerFormatter extends Formatter { + + private static final String FORMAT = "yyyy-MM-dd HH:mm:ss.SSS z"; + private static final TimeZone TIMEZONE = TimeZone.getTimeZone("UTC"); + + private final Date date; + private final DateFormat formatter; + private String envName; + + public TracerFormatter() { + date = new Date(); + formatter = makeDateFormat(); + } + + public TracerFormatter(String envName) { + this(); + this.envName = envName; + } + + /** + * Return a formatted date for the specified time. Use this method for + * thread safety, since Date and DateFormat are not thread safe. + * + * @param millis the time in milliseconds + * @return the formatted date + */ + public synchronized String getDate(long millis) { + date.setTime(millis); + + return formatter.format(date); + } + + /** + * Format the log record in this form: + * + * @param record the log record to be formatted. + * @return a formatted log record + */ + @Override + public String format(LogRecord record) { + StringBuilder sb = new StringBuilder(); + + String dateVal = getDate(record.getMillis()); + sb.append(dateVal); + sb.append(" "); + sb.append(record.getLevel().getLocalizedName()); + appendEnvironmentName(sb); + sb.append(" "); + sb.append(formatMessage(record)); + sb.append("\n"); + + getThrown(record, sb); + + return sb.toString(); + } + + protected void appendEnvironmentName(StringBuilder sb) { + if (envName != null) { + sb.append(" [" + envName + "]"); + } + } + + protected void getThrown(LogRecord record, StringBuilder sb) { + if (record.getThrown() != null) { + try { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + record.getThrown().printStackTrace(pw); + pw.close(); + sb.append(sw.toString()); + } catch (Exception ex) { + /* Ignored. */ + } + } + } + + /** + * Return a DateFormat object that uses the standard format and the UTC + * timezone. + */ + public static DateFormat makeDateFormat() { + final DateFormat df = new SimpleDateFormat(FORMAT); + df.setTimeZone(TIMEZONE); + return df; + } +} diff --git a/src/com/sleepycat/je/utilint/VLSN.java b/src/com/sleepycat/je/utilint/VLSN.java new file mode 100644 index 0000000..d987445 --- /dev/null +++ b/src/com/sleepycat/je/utilint/VLSN.java @@ -0,0 +1,233 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.utilint; + +import java.io.Serializable; +import java.nio.ByteBuffer; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.Loggable; + +public class VLSN implements Loggable, Comparable, Serializable { + private static final long serialVersionUID = 1L; + + public static final int LOG_SIZE = 8; + + public static final int NULL_VLSN_SEQUENCE = -1; + public static final VLSN NULL_VLSN = new VLSN(NULL_VLSN_SEQUENCE); + public static final VLSN FIRST_VLSN = new VLSN(1); + + /* + * The distinguished value used to represent VLSN values that have not + * been set in log entry fields, because the field did not exist in that + * version of the log or in a non-HA commit/abort variant of a log entry. + */ + public static final int UNINITIALIZED_VLSN_SEQUENCE = 0; + + /* + * A replicated log entry is identified by a sequence id. We may change the + * VLSN implementation so it's not a first-class object, in order to reduce + * its in-memory footprint. In that case, the VLSN value would be a long, + * and this class would provide static utility methods. + */ + private long sequence; // sequence number + + public VLSN(long sequence) { + this.sequence = sequence; + } + + /** + * Constructor for VLSNs that are read from disk. + */ + public VLSN() { + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof VLSN)) { + return false; + } + + VLSN otherVLSN = (VLSN) obj; + return (otherVLSN.sequence == sequence); + } + + final public boolean equals(VLSN otherVLSN) { + return (otherVLSN != null) && (otherVLSN.sequence == sequence); + } + + @Override + public int hashCode() { + return Long.valueOf(sequence).hashCode(); + } + + public long getSequence() { + return sequence; + } + + public final boolean isNull() { + return sequence == NULL_VLSN.sequence; + } + + public static boolean isNull(long sequence) { + return sequence == NULL_VLSN.sequence; + } + + /** + * Return a VLSN which would follow this one. + */ + public VLSN getNext() { + return isNull() ? FIRST_VLSN : new VLSN(sequence + 1); + } + + /** + * Return a VLSN which would precede this one. + */ + public VLSN getPrev() { + return (isNull() || (sequence == 1)) ? + NULL_VLSN : + new VLSN(sequence - 1); + } + + /** + * Return true if this VLSN's sequence directly follows the "other" + * VLSN. This handles the case where "other" is a NULL_VLSN. + */ + public boolean follows(VLSN other) { + return ((other.isNull() && sequence == 1) || + ((!other.isNull()) && + (other.getSequence() == (sequence - 1)))); + } + + /** + * Compares this VLSN's sequence with the specified VLSN's sequence for + * order. Returns a negative integer, zero, or a positive integer as this + * sequence is less than, equal to, or greater than the specified sequence. + */ + @Override + public int compareTo(VLSN other) { + + if ((sequence == NULL_VLSN.sequence) && + (other.sequence == NULL_VLSN.sequence)) { + return 0; + } + + if (sequence == NULL_VLSN.sequence) { + /* If "this" is null, the other VLSN is always greater. */ + return -1; + } + + if (other.sequence == NULL_VLSN.sequence) { + /* If the "other" is null, this VLSN is always greater. */ + return 1; + } + + long otherSequence = other.getSequence(); + if ((sequence - otherSequence) > 0) { + return 1; + } else if (sequence == otherSequence) { + return 0; + } else { + return -1; + } + } + + /** + * Returns the smaller of two VLSNS, ignoring NULL_VLSN values if one value + * is not NULL_VLSN. + * + * @param a a VLSN + * @param b another VLSN + * @return the smaller of {@code a} and {@code b}, ignoring NULL_VLSN + * unless both are NULL_VLSN + * @throws IllegalArgumentException if either argument is null + */ + public static VLSN min(final VLSN a, final VLSN b) { + if ((a == null) || (b == null)) { + throw new IllegalArgumentException( + "The arguments must not be null"); + } + if (a.isNull()) { + return b; + } else if (b.isNull()) { + return a; + } + return (a.compareTo(b) <= 0) ? a : b; + } + + /** + * @see Loggable#getLogSize + */ + @Override + public int getLogSize() { + return LOG_SIZE; + } + + /** + * @see Loggable#writeToLog + */ + @Override + public void writeToLog(ByteBuffer buffer) { + LogUtils.writeLong(buffer, sequence); + } + + /* + * Reading from a byte buffer + */ + + /** + * @see Loggable#readFromLog + */ + @Override + public void readFromLog(ByteBuffer buffer, int entryVersion) { + sequence = LogUtils.readLong(buffer); + } + + /** + * @see Loggable#dumpLog + */ + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + sb.append(""); + } + + /** + * @see Loggable#getTransactionId + */ + @Override + public long getTransactionId() { + return 0; + } + + /** + * @see Loggable#logicalEquals + */ + @Override + public boolean logicalEquals(Loggable other) { + + if (!(other instanceof VLSN)) { + return false; + } + + return sequence == ((VLSN) other).sequence; + } + + @Override + public String toString() { + return String.format("%,d", sequence); + } +} diff --git a/src/com/sleepycat/je/utilint/package-info.java b/src/com/sleepycat/je/utilint/package-info.java new file mode 100644 index 0000000..507ab30 --- /dev/null +++ b/src/com/sleepycat/je/utilint/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Misc utility classes, including some stat classes. + */ +package com.sleepycat.je.utilint; diff --git a/src/com/sleepycat/persist/BasicCursor.java b/src/com/sleepycat/persist/BasicCursor.java new file mode 100644 index 0000000..401b574 --- /dev/null +++ b/src/com/sleepycat/persist/BasicCursor.java @@ -0,0 +1,389 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Iterator; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.compat.DbCompat.OpReadOptions; +import com.sleepycat.compat.DbCompat.OpResult; +/* */ +import com.sleepycat.je.CacheMode; +/* */ +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +/* */ +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.WriteOptions; +/* */ +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * Implements EntityCursor and uses a ValueAdapter so that it can enumerate + * either keys or entities. + * + * @author Mark Hayes + */ +class BasicCursor implements EntityCursor { + + RangeCursor cursor; + ValueAdapter adapter; + boolean updateAllowed; + DatabaseEntry key; + DatabaseEntry pkey; + DatabaseEntry data; + + BasicCursor(RangeCursor cursor, + ValueAdapter adapter, + boolean updateAllowed) { + this.cursor = cursor; + this.adapter = adapter; + this.updateAllowed = updateAllowed; + key = adapter.initKey(); + pkey = adapter.initPKey(); + data = adapter.initData(); + } + + public V first() + throws DatabaseException { + + return first(null); + } + + public V first(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getFirst(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V last() + throws DatabaseException { + + return last(null); + } + + public V last(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getLast(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V next() + throws DatabaseException { + + return next(null); + } + + public V next(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getNext(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V nextDup() + throws DatabaseException { + + return nextDup(null); + } + + public V nextDup(LockMode lockMode) + throws DatabaseException { + + checkInitialized(); + return returnValue( + cursor.getNextDup(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V nextNoDup() + throws DatabaseException { + + return nextNoDup(null); + } + + public V nextNoDup(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getNextNoDup( + key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V prev() + throws DatabaseException { + + return prev(null); + } + + public V prev(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getPrev(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V prevDup() + throws DatabaseException { + + return prevDup(null); + } + + public V prevDup(LockMode lockMode) + throws DatabaseException { + + checkInitialized(); + return returnValue( + cursor.getPrevDup(key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V prevNoDup() + throws DatabaseException { + + return prevNoDup(null); + } + + public V prevNoDup(LockMode lockMode) + throws DatabaseException { + + return returnValue( + cursor.getPrevNoDup( + key, pkey, data, OpReadOptions.make(lockMode))); + } + + public V current() + throws DatabaseException { + + return current(null); + } + + public V current(LockMode lockMode) + throws DatabaseException { + + checkInitialized(); + return returnValue( + cursor.getCurrent(key, pkey, data, OpReadOptions.make(lockMode))); + } + + /* */ + public EntityResult get(Get getType, ReadOptions options) + throws DatabaseException { + + OpReadOptions opOptions = OpReadOptions.make(options); + + switch (getType) { + case CURRENT: + return returnResult( + cursor.getCurrent(key, pkey, data, opOptions)); + case FIRST: + return returnResult( + cursor.getFirst(key, pkey, data, opOptions)); + case LAST: + return returnResult( + cursor.getLast(key, pkey, data, opOptions)); + case NEXT: + return returnResult( + cursor.getNext(key, pkey, data, opOptions)); + case NEXT_DUP: + return returnResult( + cursor.getNextDup(key, pkey, data, opOptions)); + case NEXT_NO_DUP: + return returnResult( + cursor.getNextNoDup(key, pkey, data, opOptions)); + case PREV: + return returnResult( + cursor.getPrev(key, pkey, data, opOptions)); + case PREV_DUP: + return returnResult( + cursor.getPrevDup(key, pkey, data, opOptions)); + case PREV_NO_DUP: + return returnResult( + cursor.getPrevNoDup(key, pkey, data, opOptions)); + default: + throw new IllegalArgumentException( + "getType not allowed: " + getType); + } + } + /* */ + + public int count() + throws DatabaseException { + + checkInitialized(); + return cursor.count(); + } + + /* */ + public long countEstimate() + throws DatabaseException { + + checkInitialized(); + return cursor.getCursor().countEstimate(); + } + /* */ + + /* */ + /* for FUTURE use + public long skipNext(long maxCount) { + return skipNext(maxCount, null); + } + + public long skipNext(long maxCount, LockMode lockMode) { + checkInitialized(); + return cursor.getCursor().skipNext + (maxCount, BasicIndex.NO_RETURN_ENTRY, BasicIndex.NO_RETURN_ENTRY, + lockMode); + } + + public long skipPrev(long maxCount) { + return skipPrev(maxCount, null); + } + + public long skipPrev(long maxCount, LockMode lockMode) { + checkInitialized(); + return cursor.getCursor().skipPrev + (maxCount, BasicIndex.NO_RETURN_ENTRY, BasicIndex.NO_RETURN_ENTRY, + lockMode); + } + */ + /* */ + + public Iterator iterator() { + return iterator(null); + } + + public Iterator iterator(LockMode lockMode) { + return new BasicIterator(this, lockMode); + } + + public boolean update(V entity) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + return update(entity, null) != null; + } + /* */ + + if (!updateAllowed) { + throw new UnsupportedOperationException( + "Update not allowed on a secondary index"); + } + checkInitialized(); + adapter.valueToData(entity, data); + + return cursor.getCursor().putCurrent(data) == OperationStatus.SUCCESS; + } + + /* */ + public OperationResult update(V entity, WriteOptions options) + throws DatabaseException { + + if (!updateAllowed) { + throw new UnsupportedOperationException( + "Update not allowed on a secondary index"); + } + checkInitialized(); + adapter.valueToData(entity, data); + + return cursor.getCursor().put(null, data, Put.CURRENT, options); + } + /* */ + + public boolean delete() + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + return delete(null) != null; + } + /* */ + + checkInitialized(); + return cursor.getCursor().delete() == OperationStatus.SUCCESS; + } + + /* */ + public OperationResult delete(WriteOptions options) + throws DatabaseException { + + checkInitialized(); + return cursor.getCursor().delete(options); + } + /* */ + + public EntityCursor dup() + throws DatabaseException { + + return new BasicCursor(cursor.dup(true), adapter, updateAllowed); + } + + public void close() + throws DatabaseException { + + cursor.close(); + } + + /* */ + public void setCacheMode(CacheMode cacheMode) { + cursor.getCursor().setCacheMode(cacheMode); + } + /* */ + + /* */ + public CacheMode getCacheMode() { + return cursor.getCursor().getCacheMode(); + } + /* */ + + void checkInitialized() + throws IllegalStateException { + + if (!cursor.isInitialized()) { + throw new IllegalStateException + ("Cursor is not initialized at a valid position"); + } + } + + V returnValue(OpResult opResult) { + V value; + if (opResult.isSuccess()) { + value = adapter.entryToValue(key, pkey, data); + } else { + value = null; + } + /* Clear entries to save memory. */ + adapter.clearEntries(key, pkey, data); + return value; + } + + /* */ + EntityResult returnResult(OpResult opResult) { + V value = returnValue(opResult); + return (value != null) ? + new EntityResult<>(value, opResult.jeResult) : + null; + } + /* */ +} diff --git a/src/com/sleepycat/persist/BasicIndex.java b/src/com/sleepycat/persist/BasicIndex.java new file mode 100644 index 0000000..eb8f7f8 --- /dev/null +++ b/src/com/sleepycat/persist/BasicIndex.java @@ -0,0 +1,298 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +/* */ +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.WriteOptions; +/* */ +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * Implements EntityIndex using a ValueAdapter. This class is abstract and + * does not implement get()/map()/sortedMap() because it doesn't have access + * to the entity binding. + * + * @author Mark Hayes + */ +abstract class BasicIndex implements EntityIndex { + + static final DatabaseEntry NO_RETURN_ENTRY; + static { + NO_RETURN_ENTRY = new DatabaseEntry(); + NO_RETURN_ENTRY.setPartial(0, 0, true); + } + + Database db; + boolean transactional; + boolean sortedDups; + boolean locking; + boolean concurrentDB; + Class keyClass; + EntryBinding keyBinding; + KeyRange emptyRange; + ValueAdapter keyAdapter; + ValueAdapter entityAdapter; + + BasicIndex(Database db, + Class keyClass, + EntryBinding keyBinding, + ValueAdapter entityAdapter) + throws DatabaseException { + + this.db = db; + DatabaseConfig config = db.getConfig(); + transactional = config.getTransactional(); + sortedDups = config.getSortedDuplicates(); + locking = + DbCompat.getInitializeLocking(db.getEnvironment().getConfig()); + Environment env = db.getEnvironment(); + concurrentDB = DbCompat.getInitializeCDB(env.getConfig()); + this.keyClass = keyClass; + this.keyBinding = keyBinding; + this.entityAdapter = entityAdapter; + + emptyRange = new KeyRange(config.getBtreeComparator()); + keyAdapter = new KeyValueAdapter(keyClass, keyBinding); + } + + public Database getDatabase() { + return db; + } + + /* + * Of the EntityIndex methods only get()/map()/sortedMap() are not + * implemented here and therefore must be implemented by subclasses. + */ + + public boolean contains(K key) + throws DatabaseException { + + return contains(null, key, null); + } + + public boolean contains(Transaction txn, K key, LockMode lockMode) + throws DatabaseException { + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = NO_RETURN_ENTRY; + keyBinding.objectToEntry(key, keyEntry); + + OperationStatus status = db.get(txn, keyEntry, dataEntry, lockMode); + return (status == OperationStatus.SUCCESS); + } + + public long count() + throws DatabaseException { + + if (DbCompat.DATABASE_COUNT) { + return DbCompat.getDatabaseCount(db); + } else { + long count = 0; + DatabaseEntry key = NO_RETURN_ENTRY; + DatabaseEntry data = NO_RETURN_ENTRY; + CursorConfig cursorConfig = locking ? + CursorConfig.READ_UNCOMMITTED : null; + Cursor cursor = db.openCursor(null, cursorConfig); + try { + OperationStatus status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + if (sortedDups) { + count += cursor.count(); + } else { + count += 1; + } + status = cursor.getNextNoDup(key, data, null); + } + } finally { + cursor.close(); + } + return count; + } + } + + /* */ + + public long count(long memoryLimit) + throws DatabaseException { + + return db.count(memoryLimit); + } + + /* */ + + public boolean delete(K key) + throws DatabaseException { + + return delete(null, key); + } + + public boolean delete(Transaction txn, K key) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + return delete(txn, key, null) != null; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationStatus status = db.delete(txn, keyEntry); + return (status == OperationStatus.SUCCESS); + } + + /* */ + public OperationResult delete(Transaction txn, K key, WriteOptions options) + throws DatabaseException { + + DatabaseEntry keyEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + return db.delete(txn, keyEntry, options); + } + /* */ + + public EntityCursor keys() + throws DatabaseException { + + return keys(null, null); + } + + public EntityCursor keys(Transaction txn, CursorConfig config) + throws DatabaseException { + + return cursor(txn, emptyRange, keyAdapter, config); + } + + public EntityCursor entities() + throws DatabaseException { + + return cursor(null, emptyRange, entityAdapter, null); + } + + public EntityCursor entities(Transaction txn, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, emptyRange, entityAdapter, config); + } + + public EntityCursor keys(K fromKey, boolean fromInclusive, + K toKey, boolean toInclusive) + throws DatabaseException { + + return cursor(null, fromKey, fromInclusive, toKey, toInclusive, + keyAdapter, null); + } + + public EntityCursor keys(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, fromKey, fromInclusive, toKey, toInclusive, + keyAdapter, config); + } + + public EntityCursor entities(K fromKey, boolean fromInclusive, + K toKey, boolean toInclusive) + throws DatabaseException { + + return cursor(null, fromKey, fromInclusive, toKey, toInclusive, + entityAdapter, null); + } + + public EntityCursor entities(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, fromKey, fromInclusive, toKey, toInclusive, + entityAdapter, config); + } + + private EntityCursor cursor(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + ValueAdapter adapter, + CursorConfig config) + throws DatabaseException { + + DatabaseEntry fromEntry = null; + if (fromKey != null) { + fromEntry = new DatabaseEntry(); + keyBinding.objectToEntry(fromKey, fromEntry); + } + DatabaseEntry toEntry = null; + if (toKey != null) { + toEntry = new DatabaseEntry(); + keyBinding.objectToEntry(toKey, toEntry); + } + KeyRange range = emptyRange.subRange + (fromEntry, fromInclusive, toEntry, toInclusive); + return cursor(txn, range, adapter, config); + } + + private EntityCursor cursor(Transaction txn, + KeyRange range, + ValueAdapter adapter, + CursorConfig config) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, config); + RangeCursor rangeCursor = + new RangeCursor(range, null/*pkRange*/, sortedDups, cursor); + return new BasicCursor(rangeCursor, adapter, isUpdateAllowed()); + } + + abstract boolean isUpdateAllowed(); + + /* */ + static void checkGetType(Get getType) { + + if (getType != Get.SEARCH) { + throw new IllegalArgumentException( + "getType not allowed: " + getType); + } + } + /* */ +} diff --git a/src/com/sleepycat/persist/BasicIterator.java b/src/com/sleepycat/persist/BasicIterator.java new file mode 100644 index 0000000..4af132b --- /dev/null +++ b/src/com/sleepycat/persist/BasicIterator.java @@ -0,0 +1,88 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Implements Iterator for an arbitrary EntityCursor. + * + * @author Mark Hayes + */ +class BasicIterator implements Iterator { + + private EntityCursor entityCursor; + private ForwardCursor forwardCursor; + private LockMode lockMode; + private V nextValue; + + /** + * An EntityCursor is given and the remove() method is supported. + */ + BasicIterator(EntityCursor entityCursor, LockMode lockMode) { + this.entityCursor = entityCursor; + this.forwardCursor = entityCursor; + this.lockMode = lockMode; + } + + /** + * A ForwardCursor is given and the remove() method is not supported. + */ + BasicIterator(ForwardCursor forwardCursor, LockMode lockMode) { + this.forwardCursor = forwardCursor; + this.lockMode = lockMode; + } + + public boolean hasNext() { + if (nextValue == null) { + try { + nextValue = forwardCursor.next(lockMode); + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + return nextValue != null; + } else { + return true; + } + } + + public V next() { + if (hasNext()) { + V v = nextValue; + nextValue = null; + return v; + } else { + throw new NoSuchElementException(); + } + } + + public void remove() { + if (entityCursor == null) { + throw new UnsupportedOperationException(); + } + try { + if (!entityCursor.delete()) { + throw new IllegalStateException + ("Record at cursor position is already deleted"); + } + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } +} diff --git a/src/com/sleepycat/persist/DataValueAdapter.java b/src/com/sleepycat/persist/DataValueAdapter.java new file mode 100644 index 0000000..220bb0f --- /dev/null +++ b/src/com/sleepycat/persist/DataValueAdapter.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * A ValueAdapter where the "value" is the data, although the data in this case + * is the primary key in a KeysIndex. + * + * @author Mark Hayes + */ +class DataValueAdapter implements ValueAdapter { + + private EntryBinding dataBinding; + + DataValueAdapter(Class keyClass, EntryBinding dataBinding) { + this.dataBinding = dataBinding; + } + + public DatabaseEntry initKey() { + return new DatabaseEntry(); + } + + public DatabaseEntry initPKey() { + return null; + } + + public DatabaseEntry initData() { + return new DatabaseEntry(); + } + + public void clearEntries(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + key.setData(null); + data.setData(null); + } + + public V entryToValue(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + return (V) dataBinding.entryToObject(data); + } + + public void valueToData(V value, DatabaseEntry data) { + throw new UnsupportedOperationException + ("Cannot change the data in a key-only index"); + } +} diff --git a/src/com/sleepycat/persist/DatabaseNamer.java b/src/com/sleepycat/persist/DatabaseNamer.java new file mode 100644 index 0000000..345f583 --- /dev/null +++ b/src/com/sleepycat/persist/DatabaseNamer.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.Database; // for javadoc + +/** + * + * @hidden + * + * Determines the file names to use for primary and secondary databases. + * + *

        Each {@link PrimaryIndex} and {@link SecondaryIndex} is represented + * internally as a Berkeley DB {@link Database}. The file names of primary and + * secondary indices must be unique within the environment, so that each index + * is stored in a separate database file.

        + * + *

        By default, the file names of primary and secondary databases are + * defined as follows.

        + * + *

        The syntax of a primary index database file name is:

        + *
           STORE_NAME-ENTITY_CLASS
        + *

        Where STORE_NAME is the name parameter passed to {@link + * EntityStore#EntityStore EntityStore} and ENTITY_CLASS is name of the class + * passed to {@link EntityStore#getPrimaryIndex getPrimaryIndex}.

        + * + *

        The syntax of a secondary index database file name is:

        + *
           STORE_NAME-ENTITY_CLASS-KEY_NAME
        + *

        Where KEY_NAME is the secondary key name passed to {@link + * EntityStore#getSecondaryIndex getSecondaryIndex}.

        + * + *

        The default naming described above is implemented by the built-in {@link + * DatabaseNamer#DEFAULT} object. An application may supply a custom {@link + * DatabaseNamer} to overrride the default naming scheme. For example, a + * custom namer could place all database files in a subdirectory with the name + * of the store. A custom namer could also be used to name files according to + * specific file system restrictions.

        + * + *

        The custom namer object must be an instance of the {@code DatabaseNamer} + * interface and is configured using {@link StoreConfig#setDatabaseNamer + * setDatabaseNamer}.

        + * + *

        When copying or removing all databases in a store, there is one further + * consideration. There are two internal databases that must be kept with the + * other databases in the store in order for the store to be used. These + * contain the data formats and sequences for the store. Their entity class + * names are:

        + * + *
           com.sleepycat.persist.formats
        + *
           com.sleepycat.persist.sequences
        + * + *

        With default database naming, databases with the following names will be + * present each store.

        + * + *
           STORE_NAME-com.sleepycat.persist.formats
        + *
           STORE_NAME-com.sleepycat.persist.sequences
        + * + *

        These databases must normally be included with copies of other databases + * in the store. They should not be modified by the application.

        + */ +public interface DatabaseNamer { + + /** + * Returns the name of the file to be used to store the dataabase for the + * given store, entity class and key. This method may not return null. + * + * @param storeName the name of the {@link EntityStore}. + * + * @param entityClassName the complete name of the entity class for a + * primary or secondary index. + * + * @param keyName the key name identifying a secondary index, or null for + * a primary index. + * + * @return the file name. + */ + public String getFileName(String storeName, + String entityClassName, + String keyName); + + /** + * The default database namer. + * + *

        The {@link #getFileName getFileName} method of this namer returns the + * {@code storeName}, {@code entityClassName} and {@code keyName} + * parameters as follows:

        + * + *
        +     * if (keyName != null) {
        +     *     return storeName + '-' + entityClassName + '-' + keyName;
        +     * } else {
        +     *     return storeName + '-' + entityClassName;
        +     * }
        + */ + public static final DatabaseNamer DEFAULT = new DatabaseNamer() { + + public String getFileName(String storeName, + String entityClassName, + String keyName) { + if (keyName != null) { + return storeName + '-' + entityClassName + '-' + keyName; + } else { + return storeName + '-' + entityClassName; + } + } + }; +} diff --git a/src/com/sleepycat/persist/EntityCursor.java b/src/com/sleepycat/persist/EntityCursor.java new file mode 100644 index 0000000..f68f75c --- /dev/null +++ b/src/com/sleepycat/persist/EntityCursor.java @@ -0,0 +1,1280 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Iterator; + +/* */ +import com.sleepycat.je.CacheMode; +/* */ +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DuplicateDataException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationFailureException; +/* */ +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.WriteOptions; +/* */ +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * Traverses entity values or key values and allows deleting or updating the + * entity at the current cursor position. The value type (V) is either an + * entity class or a key class, depending on how the cursor was opened. + * + *

        {@code EntityCursor} objects are not thread-safe. Cursors + * should be opened, used and closed by a single thread.

        + * + *

        Cursors are opened using the {@link EntityIndex#keys} and {@link + * EntityIndex#entities} family of methods. These methods are available for + * objects of any class that implements {@link EntityIndex}: {@link + * PrimaryIndex}, {@link SecondaryIndex}, and the indices returned by {@link + * SecondaryIndex#keysIndex} and {@link SecondaryIndex#subIndex}. A {@link + * ForwardCursor}, which implements a subset of cursor operations, is also + * available via the {@link EntityJoin#keys} and {@link EntityJoin#entities} + * methods.

        + * + *

        Values are always returned by a cursor in key order, where the key is + * defined by the underlying {@link EntityIndex}. For example, a cursor on a + * {@link SecondaryIndex} returns values ordered by secondary key, while an + * index on a {@link PrimaryIndex} or a {@link SecondaryIndex#subIndex} returns + * values ordered by primary key.

        + * + *

        WARNING: Cursors must always be closed to prevent resource leaks + * which could lead to the index becoming unusable or cause an + * OutOfMemoryError. To ensure that a cursor is closed in the + * face of exceptions, call {@link #close} in a finally block. For example, + * the following code traverses all Employee entities and closes the cursor + * whether or not an exception occurs:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * EntityStore store = ...
        + *
        + * {@code PrimaryIndex} primaryIndex =
        + *     store.getPrimaryIndex(Long.class, Employee.class);
        + *
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     for (Employee entity = cursor.first();
        + *                   entity != null;
        + *                   entity = cursor.next()) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Initializing the Cursor Position

        + * + *

        When it is opened, a cursor is not initially positioned on any value; in + * other words, it is uninitialized. Most methods in this interface initialize + * the cursor position but certain methods, for example, {@link #current} and + * {@link #delete}, throw {@link IllegalStateException} when called for an + * uninitialized cursor.

        + * + *

        Note that the {@link #next} and {@link #prev} methods return the first or + * last value respectively for an uninitialized cursor. This allows the loop + * in the example above to be rewritten as follows:

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     Employee entity;
        + *     while ((entity = cursor.next()) != null) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Cursors and Iterators

        + * + *

        The {@link #iterator} method can be used to return a standard Java {@code + * Iterator} that returns the same values that the cursor returns. For + * example:

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     {@code Iterator} i = cursor.iterator();
        + *     while (i.hasNext()) {
        + *          Employee entity = i.next();
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        The {@link Iterable} interface is also extended by {@link EntityCursor} + * to allow using the cursor as the target of a Java "foreach" statement:

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        The iterator uses the cursor directly, so any changes to the cursor + * position impact the iterator and vice versa. The iterator advances the + * cursor by calling {@link #next()} when {@link Iterator#hasNext} or {@link + * Iterator#next} is called. Because of this interaction, to keep things + * simple it is best not to mix the use of an {@code EntityCursor} + * {@code Iterator} with the use of the {@code EntityCursor} traversal methods + * such as {@link #next()}, for a single {@code EntityCursor} object.

        + * + *

        Key Ranges

        + * + *

        A key range may be specified when opening the cursor, to restrict the + * key range of the cursor to a subset of the complete range of keys in the + * index. A {@code fromKey} and/or {@code toKey} parameter may be specified + * when calling {@link EntityIndex#keys(Object,boolean,Object,boolean)} or + * {@link EntityIndex#entities(Object,boolean,Object,boolean)}. The key + * arguments may be specified as inclusive or exclusive values.

        + * + *

        Whenever a cursor with a key range is moved, the key range bounds will be + * checked, and the cursor will never be positioned outside the range. The + * {@link #first} cursor value is the first existing value in the range, and + * the {@link #last} cursor value is the last existing value in the range. For + * example, the following code traverses Employee entities with keys from 100 + * (inclusive) to 200 (exclusive):

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities(100, true, 200, false);
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Duplicate Keys

        + * + *

        When using a cursor for a {@link SecondaryIndex}, the keys in the index + * may be non-unique (duplicates) if {@link SecondaryKey#relate} is {@link + * Relationship#MANY_TO_ONE MANY_TO_ONE} or {@link Relationship#MANY_TO_MANY + * MANY_TO_MANY}. For example, a {@code MANY_TO_ONE} {@code + * Employee.department} secondary key is non-unique because there are multiple + * Employee entities with the same department key value. The {@link #nextDup}, + * {@link #prevDup}, {@link #nextNoDup} and {@link #prevNoDup} methods may be + * used to control how non-unique keys are returned by the cursor.

        + * + *

        {@link #nextDup} and {@link #prevDup} return the next or previous value + * only if it has the same key as the current value, and null is returned when + * a different key is encountered. For example, these methods can be used to + * return all employees in a given department.

        + * + *

        {@link #nextNoDup} and {@link #prevNoDup} return the next or previous + * value with a unique key, skipping over values that have the same key. For + * example, these methods can be used to return the first employee in each + * department.

        + * + *

        For example, the following code will find the first employee in each + * department with {@link #nextNoDup} until it finds a department name that + * matches a particular regular expression. For each matching department it + * will find all employees in that department using {@link #nextDup}.

        + * + *
        + * {@code SecondaryIndex} secondaryIndex =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "department");
        + *
        + * String regex = ...;
        + * {@code EntityCursor} cursor = secondaryIndex.entities();
        + * try {
        + *     for (Employee entity = cursor.first();
        + *                   entity != null;
        + *                   entity = cursor.nextNoDup()) {
        + *         if (entity.department.matches(regex)) {
        + *             while (entity != null) {
        + *                 // Do something with the matching entities...
        + *                 entity = cursor.nextDup();
        + *             }
        + *         }
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Updating and Deleting Entities with a Cursor

        + * + *

        The {@link #update} and {@link #delete} methods operate on the entity at + * the current cursor position. Cursors on any type of index may be used to + * delete entities. For example, the following code deletes all employees in + * departments which have names that match a particular regular expression:

        + * + *
        + * {@code SecondaryIndex} secondaryIndex =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "department");
        + *
        + * String regex = ...;
        + * {@code EntityCursor} cursor = secondaryIndex.entities();
        + * try {
        + *     for (Employee entity = cursor.first();
        + *                   entity != null;
        + *                   entity = cursor.nextNoDup()) {
        + *         if (entity.department.matches(regex)) {
        + *             while (entity != null) {
        + *                 cursor.delete();
        + *                 entity = cursor.nextDup();
        + *             }
        + *         }
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Note that the cursor can be moved to the next (or previous) value after + * deleting the entity at the current position. This is an important property + * of cursors, since without it you would not be able to easily delete while + * processing multiple values with a cursor. A cursor positioned on a deleted + * entity is in a special state. In this state, {@link #current} will return + * null, {@link #delete} will return false, and {@link #update} will return + * false.

        + * + *

        The {@link #update} method is supported only if the value type is an + * entity class (not a key class) and the underlying index is a {@link + * PrimaryIndex}; in other words, for a cursor returned by one of the {@link + * PrimaryIndex#entities} methods. For example, the following code changes all + * employee names to uppercase:

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     for (Employee entity = cursor.first();
        + *                   entity != null;
        + *                   entity = cursor.next()) {
        + *         entity.name = entity.name.toUpperCase();
        + *         cursor.update(entity);
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + * @author Mark Hayes + */ +public interface EntityCursor extends ForwardCursor { + + /** + * Moves the cursor to the first value and returns it, or returns null if + * the cursor range is empty. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the first value, or null if the cursor range is empty. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V first() + throws DatabaseException; + + /** + * Moves the cursor to the first value and returns it, or returns null if + * the cursor range is empty. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the first value, or null if the cursor range is empty. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V first(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the last value and returns it, or returns null if + * the cursor range is empty. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the last value, or null if the cursor range is empty. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V last() + throws DatabaseException; + + /** + * Moves the cursor to the last value and returns it, or returns null if + * the cursor range is empty. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the last value, or null if the cursor range is empty. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V last(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the next value and returns it, or returns null + * if there are no more values in the cursor range. If the cursor is + * uninitialized, this method is equivalent to {@link #first}. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the next value, or null if there are no more values in the + * cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V next() + throws DatabaseException; + + /** + * Moves the cursor to the next value and returns it, or returns null + * if there are no more values in the cursor range. If the cursor is + * uninitialized, this method is equivalent to {@link #first}. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the next value, or null if there are no more values in the + * cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V next(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the next value with the same key (duplicate) and + * returns it, or returns null if no more values are present for the key at + * the current position. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the next value with the same key, or null if no more values are + * present for the key at the current position. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V nextDup() + throws DatabaseException; + + /** + * Moves the cursor to the next value with the same key (duplicate) and + * returns it, or returns null if no more values are present for the key at + * the current position. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the next value with the same key, or null if no more values are + * present for the key at the current position. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V nextDup(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the next value with a different key and returns it, + * or returns null if there are no more unique keys in the cursor range. + * If the cursor is uninitialized, this method is equivalent to {@link + * #first}. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the next value with a different key, or null if there are no + * more unique keys in the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V nextNoDup() + throws DatabaseException; + + /** + * Moves the cursor to the next value with a different key and returns it, + * or returns null if there are no more unique keys in the cursor range. + * If the cursor is uninitialized, this method is equivalent to {@link + * #first}. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the next value with a different key, or null if there are no + * more unique keys in the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V nextNoDup(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the previous value and returns it, or returns null + * if there are no preceding values in the cursor range. If the cursor is + * uninitialized, this method is equivalent to {@link #last}. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the previous value, or null if there are no preceding values in + * the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prev() + throws DatabaseException; + + /** + * Moves the cursor to the previous value and returns it, or returns null + * if there are no preceding values in the cursor range. If the cursor is + * uninitialized, this method is equivalent to {@link #last}. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the previous value, or null if there are no preceding values in + * the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prev(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the previous value with the same key (duplicate) and + * returns it, or returns null if no preceding values are present for the + * key at the current position. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the previous value with the same key, or null if no preceding + * values are present for the key at the current position. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prevDup() + throws DatabaseException; + + /** + * Moves the cursor to the previous value with the same key (duplicate) and + * returns it, or returns null if no preceding values are present for the + * key at the current position. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the previous value with the same key, or null if no preceding + * values are present for the key at the current position. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prevDup(LockMode lockMode) + throws DatabaseException; + + /** + * Moves the cursor to the preceding value with a different key and returns + * it, or returns null if there are no preceding unique keys in the cursor + * range. If the cursor is uninitialized, this method is equivalent to + * {@link #last}. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the previous value with a different key, or null if there are no + * preceding unique keys in the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prevNoDup() + throws DatabaseException; + + /** + * Moves the cursor to the preceding value with a different key and returns + * it, or returns null if there are no preceding unique keys in the cursor + * range. If the cursor is uninitialized, this method is equivalent to + * {@link #last}. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the previous value with a different key, or null if there are no + * preceding unique keys in the cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V prevNoDup(LockMode lockMode) + throws DatabaseException; + + /** + * Returns the value at the cursor position, or null if the value at the + * cursor position has been deleted. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the value at the cursor position, or null if it has been + * deleted. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V current() + throws DatabaseException; + + /** + * Returns the value at the cursor position, or null if the value at the + * cursor position has been deleted. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the value at the cursor position, or null if it has been + * deleted. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V current(LockMode lockMode) + throws DatabaseException; + + /* */ + + /** + * Moves the cursor according to the specified {@link Get} type and returns + * the value at the updated position. + * + *

        The following table lists each allowed operation. Also specified is + * whether the cursor must be initialized (positioned on a value) before + * calling this method. See the individual {@link Get} operations for more + * information.

        + * + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        Get operationDescriptionCursor position
        must be initialized?
        {@link Get#CURRENT}Accesses the current value.yes
        {@link Get#FIRST}Finds the first value in the cursor range.no
        {@link Get#LAST}Finds the last value in the cursor range.no
        {@link Get#NEXT}Moves to the next value.no**
        {@link Get#NEXT_DUP}Moves to the next value with the same key.yes
        {@link Get#NEXT_NO_DUP}Moves to the next value with a different key.no**
        {@link Get#PREV}Moves to the previous value.no**
        {@link Get#PREV_DUP}Moves to the previous value with the same key.yes
        {@link Get#PREV_NO_DUP}Moves to the previous value with a different key.no**
        + * + *

        ** - For these 'next' and 'previous' operations the cursor may be + * uninitialized, in which case the cursor will be moved to the first or + * last value in the cursor range, respectively.

        + * + * @param getType the Get operation type. Must be one of the values listed + * above. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the EntityResult, including the value at the new cursor + * position, or null if the requested value is not present in the cursor + * range. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + EntityResult get(Get getType, ReadOptions options) + throws DatabaseException; + /* */ + + /** + * Returns the number of values (duplicates) for the key at the cursor + * position, or returns zero if all values for the key have been deleted. + * Returns one or zero if the underlying index has unique keys. + * + * + *

        The cost of this method is directly proportional to the number of + * values.

        + * + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the number of duplicates, or zero if all values for the current + * key have been deleted. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + int count() + throws DatabaseException; + + /* */ + /** + * Returns a rough estimate of the number of values (duplicates) for the + * key at the cursor position, or returns zero if all values for the key + * have been deleted. Returns one or zero if the underlying index has + * unique keys. + * + *

        If the underlying index has non-unique keys, a quick estimate of the + * number of values is computed using information in the Btree. Because + * the Btree is unbalanced, in some cases the estimate may be off by a + * factor of two or more. The estimate is accurate when the number of + * records is less than the configured {@link + * DatabaseConfig#setNodeMaxEntries NodeMaxEntries}.

        + * + *

        The cost of this method is fixed, rather than being proportional to + * the number of values. Because its accuracy is variable, this method + * should normally be used when accuracy is not required, such as for query + * optimization, and a fixed cost operation is needed. For example, this + * method is used internally for determining the index processing order in + * an {@link EntityJoin}.

        + * + * @return an estimate of the count of the number of data items for the key + * to which the cursor refers. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + long countEstimate() + throws DatabaseException; + /* */ + + /* */ + /* for FUTURE use + * Moves the cursor forward by a number of values and returns the number + * moved, or returns zero if there are no values following the cursor + * position. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #next} with {@link LockMode#READ_UNCOMMITTED} + * to skip over the desired number of values, and then calling {@link + * #current} with {@link LockMode#DEFAULT} parameter to return the final + * value.

        + * + *

        With regard to performance, this method is optimized to skip over + * values using a smaller number of Btree operations. When there is no + * contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * com.sleepycat.je.EnvironmentConfig#NODE_MAX_ENTRIES} setting. When + * there is contention on BINs or fetching BINs is required, the scan is + * broken up into smaller operations to avoid blocking other threads for + * long time periods.

        + * + * @param maxCount the maximum number of values to skip, i.e., the maximum + * number by which the cursor should be moved; must be greater than + * zero. + * + * @return the number of values skipped, i.e., the number by which the + * cursor has moved; if zero is returned, the cursor position is + * unchanged. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + long skipNext(long maxCount); + */ + /* */ + + /* */ + /* for FUTURE use + * Moves the cursor forward by a number of values and returns the number + * moved, or returns zero if there are no values following the cursor + * position. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #next} with {@link LockMode#READ_UNCOMMITTED} + * to skip over the desired number of values, and then calling {@link + * #current} with the {@code lockMode} parameter to return the final + * value.

        + * + *

        With regard to performance, this method is optimized to skip over + * values using a smaller number of Btree operations. When there is no + * contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * com.sleepycat.je.EnvironmentConfig#NODE_MAX_ENTRIES} setting. When + * there is contention on BINs or fetching BINs is required, the scan is + * broken up into smaller operations to avoid blocking other threads for + * long time periods.

        + * + * @param maxCount the maximum number of values to skip, i.e., the maximum + * number by which the cursor should be moved; must be greater than + * zero. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the number of values skipped, i.e., the number by which the + * cursor has moved; if zero is returned, the cursor position is + * unchanged. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + long skipNext(long maxCount, LockMode lockMode); + */ + /* */ + + /* */ + /* for FUTURE use + * Moves the cursor backward by a number of values and returns the number + * moved, or returns zero if there are no values following the cursor + * position. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #prev} with {@link LockMode#READ_UNCOMMITTED} + * to skip over the desired number of values, and then calling {@link + * #current} with {@link LockMode#DEFAULT} parameter to return the final + * value.

        + * + *

        With regard to performance, this method is optimized to skip over + * values using a smaller number of Btree operations. When there is no + * contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * com.sleepycat.je.EnvironmentConfig#NODE_MAX_ENTRIES} setting. When + * there is contention on BINs or fetching BINs is required, the scan is + * broken up into smaller operations to avoid blocking other threads for + * long time periods.

        + * + * @param maxCount the maximum number of values to skip, i.e., the maximum + * number by which the cursor should be moved; must be greater than + * zero. + * + * @return the number of values skipped, i.e., the number by which the + * cursor has moved; if zero is returned, the cursor position is + * unchanged. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + long skipPrev(long maxCount); + */ + /* */ + + /* */ + /* for FUTURE use + * Moves the cursor backward by a number of values and returns the number + * moved, or returns zero if there are no values following the cursor + * position. + * + *

        Without regard to performance, calling this method is equivalent to + * repeatedly calling {@link #prev} with {@link LockMode#READ_UNCOMMITTED} + * to skip over the desired number of values, and then calling {@link + * #current} with the {@code lockMode} parameter to return the final + * value.

        + * + *

        With regard to performance, this method is optimized to skip over + * values using a smaller number of Btree operations. When there is no + * contention on the bottom internal nodes (BINs) and all BINs are in + * cache, the number of Btree operations is reduced by roughly two orders + * of magnitude, where the exact number depends on the {@link + * com.sleepycat.je.EnvironmentConfig#NODE_MAX_ENTRIES} setting. When + * there is contention on BINs or fetching BINs is required, the scan is + * broken up into smaller operations to avoid blocking other threads for + * long time periods.

        + * + * @param maxCount the maximum number of values to skip, i.e., the maximum + * number by which the cursor should be moved; must be greater than + * zero. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the number of values skipped, i.e., the number by which the + * cursor has moved; if zero is returned, the cursor position is + * unchanged. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + long skipPrev(long maxCount, LockMode lockMode); + */ + /* */ + + /** + * Returns an iterator over the key range, starting with the value + * following the current position or at the first value if the cursor is + * uninitialized. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the iterator. + */ + Iterator iterator(); + + /** + * Returns an iterator over the key range, starting with the value + * following the current position or at the first value if the cursor is + * uninitialized. + * + * @param lockMode the lock mode to use for all operations performed + * using the iterator, or null to use {@link LockMode#DEFAULT}. + * + * @return the iterator. + */ + Iterator iterator(LockMode lockMode); + + /** + * Replaces the entity at the cursor position with the given entity. + * + * @param entity the entity to replace the entity at the current position. + * + * @return true if successful or false if the entity at the current + * position was previously deleted. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws UnsupportedOperationException if the index is read only or if + * the value type is not an entity type. + * + * + * @throws DuplicateDataException if the old and new data are not equal + * according to the configured duplicate comparator or default comparator. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean update(V entity) + throws DatabaseException; + + /* */ + /** + * Replaces the entity at the cursor position with the given entity, + * using a WriteOptions parameter and returning an OperationResult. + * + * @param entity the entity to replace the entity at the current position. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if successful or null if the entity at the + * current position was previously deleted. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws UnsupportedOperationException if the index is read only or if + * the value type is not an entity type. + * + * @throws DuplicateDataException if the old and new data are not equal + * according to the configured duplicate comparator or default comparator. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + OperationResult update(V entity, WriteOptions options) + throws DatabaseException; + /* */ + + /** + * Deletes the entity at the cursor position. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws UnsupportedOperationException if the index is read only. + * + * @return true if successful or false if the entity at the current + * position has been deleted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean delete() + throws DatabaseException; + + /* */ + /** + * Deletes the entity at the cursor position, using a WriteOptions + * parameter and returning an OperationResult. + * + * @throws IllegalStateException if the cursor is uninitialized. + * + * @throws UnsupportedOperationException if the index is read only. + * + * @return the OperationResult if successful or null if the entity at the + * current position was previously deleted. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + OperationResult delete(WriteOptions options) + throws DatabaseException; + /* */ + + /** + * Duplicates the cursor at the cursor position. The returned cursor will + * be initially positioned at the same position as this current cursor, and + * will inherit this cursor's {@link Transaction} and {@link CursorConfig}. + * + * @return the duplicated cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor dup() + throws DatabaseException; + + /** + * Closes the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + void close() + throws DatabaseException; + + /* */ + /** + * Changes the {@code CacheMode} default used for subsequent operations + * performed using this cursor. For a newly opened cursor, the default is + * {@link CacheMode#DEFAULT}. Note that the default is always overridden by + * a non-null cache mode that is specified via {@link ReadOptions} or + * {@link WriteOptions}. + * + * @param cacheMode is the default {@code CacheMode} used for subsequent + * operations using this cursor, or null to configure the Database or + * Environment default. + * + * @see CacheMode + */ + void setCacheMode(CacheMode cacheMode); + /* */ + + /* */ + /** + * Returns the default {@code CacheMode} used for subsequent operations + * performed using this cursor. If {@link #setCacheMode} has not been + * called with a non-null value, the configured Database or Environment + * default is returned. + * + * @return the {@code CacheMode} default used for subsequent operations + * using this cursor. + * + * @see CacheMode + */ + CacheMode getCacheMode(); + /* */ +} diff --git a/src/com/sleepycat/persist/EntityIndex.java b/src/com/sleepycat/persist/EntityIndex.java new file mode 100644 index 0000000..a624234 --- /dev/null +++ b/src/com/sleepycat/persist/EntityIndex.java @@ -0,0 +1,1216 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +/* */ +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.WriteOptions; +/* */ + +/** + * The interface for accessing keys and entities via a primary or secondary + * index. + * + *

        {@code EntityIndex} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code EntityIndex} object.

        + * + *

        An index is conceptually a map. {key:value} mappings are + * stored in the index and accessed by key. In fact, for interoperability with + * other libraries that use the standard Java {@link Map} or {@link SortedMap} + * interfaces, an {@code EntityIndex} may be accessed via these standard + * interfaces by calling the {@link #map} or {@link #sortedMap} methods.

        + * + *

        {@code EntityIndex} is an interface that is implemented by several + * classes in this package for different purposes. Depending on the context, + * the key type (K) and value type (V) of the index take on different meanings. + * The different classes that implement {@code EntityIndex} are:

        + *
          + *
        • {@link PrimaryIndex} maps primary keys to entities.
        • + *
        • {@link SecondaryIndex} maps secondary keys to entities.
        • + *
        • {@link SecondaryIndex#keysIndex} maps secondary keys to primary + * keys.
        • + *
        • {@link SecondaryIndex#subIndex} maps primary keys to entities, for the + * subset of entities having a specified secondary key.
        • + *
        + * + *

        In all cases, the index key type (K) is a primary or secondary key class. + * The index value type (V) is an entity class in all cases except for a {@link + * SecondaryIndex#keysIndex}, when it is a primary key class.

        + * + *

        In the following example, a {@code Employee} entity with a {@code + * MANY_TO_ONE} secondary key is defined.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + * + *

        Consider that we have stored the entities below:

        + * + *
        + * + * + * + * + * + * + *
        Entities
        IDDepartmentName
        1EngineeringJane Smith
        2SalesJoan Smith
        3EngineeringJohn Smith
        4SalesJim Smith
        + * + *

        {@link PrimaryIndex} maps primary keys to entities:

        + * + *
        + * {@code PrimaryIndex} primaryIndex =
        + *     store.getPrimaryIndex(Long.class, Employee.class);
        + * + *
        + * + * + * + * + * + * + *
        primaryIndex
        Primary KeyEntity
        11EngineeringJane Smith
        22SalesJoan Smith
        33EngineeringJohn Smith
        44SalesJim Smith
        + * + *

        {@link SecondaryIndex} maps secondary keys to entities:

        + * + *
        + * {@code SecondaryIndex} secondaryIndex =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "department");
        + * + *
        + * + * + * + * + * + * + *
        secondaryIndex
        Secondary KeyEntity
        Engineering1EngineeringJane Smith
        Engineering3EngineeringJohn Smith
        Sales2SalesJoan Smith
        Sales4SalesJim Smith
        + * + *

        {@link SecondaryIndex#keysIndex} maps secondary keys to primary + * keys:

        + * + *
        + * {@code EntityIndex} keysIndex = secondaryIndex.keysIndex();
        + * + *
        + * + * + * + * + * + * + *
        keysIndex
        Secondary KeyPrimary Key
        Engineering1
        Engineering3
        Sales2
        Sales4
        + * + *

        {@link SecondaryIndex#subIndex} maps primary keys to entities, for the + * subset of entities having a specified secondary key:

        + * + *
        + * {@code EntityIndex} subIndex = secondaryIndex.subIndex("Engineering");
        + * + *
        + * + * + * + * + *
        subIndex
        Primary KeyEntity
        11EngineeringJane Smith
        33EngineeringJohn Smith
        + * + *

        Accessing the Index

        + * + *

        An {@code EntityIndex} provides a variety of methods for retrieving + * entities from an index. It also provides methods for deleting entities. + * However, it does not provide methods for inserting and updating. To insert + * and update entities, use the {@link PrimaryIndex#put} family of methods in + * the {@link PrimaryIndex} class.

        + * + *

        An {@code EntityIndex} supports two mechanisms for retrieving + * entities:

        + *
          + *
        1. The {@link #get} method returns a single value for a given key. If there + * are multiple values with the same secondary key (duplicates), it returns the + * first entity in the duplicate set.
        2. + *
        3. An {@link EntityCursor} can be obtained using the {@link #keys} and + * {@link #entities} family of methods. A cursor can be used to return all + * values in the index, including duplicates. A cursor can also be used to + * return values within a specified range of keys.
        4. + *
        + * + *

        Using the example entities above, calling {@link #get} on the primary + * index will always return the employee with the given ID, or null if no such + * ID exists. But calling {@link #get} on the secondary index will retrieve + * the first employee in the given department, which may not be very + * useful:

        + * + *
        + * Employee emp = primaryIndex.get(1);      // Returns by unique ID
        + * emp = secondaryIndex.get("Engineering"); // Returns first in department
        + * + *

        Using a cursor, you can iterate through all duplicates in the secondary + * index:

        + * + *
        + * {@code EntityCursor} cursor = secondaryIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         if (entity.department.equals("Engineering")) {
        + *             // Do something with the entity...
        + *         }
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        But for a large database it is much more efficient to iterate over only + * those entities with the secondary key you're searching for. This could be + * done by restricting a cursor to a range of keys:

        + * + *
        + * {@code EntityCursor} cursor =
        + *     secondaryIndex.entities("Engineering", true, "Engineering", true);
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        However, when you are interested only in the entities with a particular + * secondary key value, it is more convenient to use a sub-index:

        + * + *
        + * {@code EntityIndex} subIndex = secondaryIndex.subIndex("Engineering");
        + * {@code EntityCursor} cursor = subIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        In addition to being more convenient than a cursor range, a sub-index + * allows retrieving by primary key:

        + * + *
        + * Employee emp = subIndex.get(1);
        + * + *

        When using a sub-index, all operations performed on the sub-index are + * restricted to the single key that was specified when the sub-index was + * created. For example, the following returns null because employee 2 is not + * in the Engineering department and therefore is not part of the + * sub-index:

        + * + *
        + * Employee emp = subIndex.get(2);
        + * + *

        For more information on using cursors and cursor ranges, see {@link + * EntityCursor}.

        + * + *

        Note that when using an index, keys and values are stored and retrieved + * by value not by reference. In other words, if an entity object is stored + * and then retrieved, or retrieved twice, each object will be a separate + * instance. For example, in the code below the assertion will always + * fail.

        + *
        + * MyKey key = ...;
        + * MyEntity entity1 = index.get(key);
        + * MyEntity entity2 = index.get(key);
        + * assert entity1 == entity2; // always fails!
        + * 
        + * + *

        Deleting from the Index

        + * + *

        Any type of index may be used to delete entities with a specified key by + * calling {@link #delete}. The important thing to keep in mind is that + * all entities with the specified key are deleted. In a primary index, + * at most a single entity is deleted:

        + * + *
        + * primaryIndex.delete(1); // Deletes a single employee by unique ID
        + * + *

        But in a secondary index, multiple entities may be deleted:

        + * + *
        + * secondaryIndex.delete("Engineering"); // Deletes all Engineering employees
        + * + *

        This begs this question: How can a single entity be deleted without + * knowing its primary key? The answer is to use cursors. After locating an + * entity using a cursor, the entity can be deleted by calling {@link + * EntityCursor#delete}.

        + * + *

        Transactions

        + * + *

        Transactions can be used to provide standard ACID (Atomicity, + * Consistency, Integrity and Durability) guarantees when retrieving, storing + * and deleting entities. This section provides a brief overview of how to use + * transactions with the Direct Persistence Layer. For more information on + * using transactions, see Writing + * Transactional Applications.

        + * + *

        Transactions may be used only with a transactional {@link EntityStore}, + * which is one for which {@link StoreConfig#setTransactional + * StoreConfig.setTransactional(true)} has been called. Likewise, a + * transactional store may only be used with a transactional {@link + * Environment}, which is one for which {@link + * EnvironmentConfig#setTransactional EnvironmentConfig.setTransactional(true)} + * has been called. For example:

        + * + *
        + * EnvironmentConfig envConfig = new EnvironmentConfig();
        + * envConfig.setTransactional(true);
        + * envConfig.setAllowCreate(true);
        + * Environment env = new Environment(new File("/my/data"), envConfig);
        + *
        + * StoreConfig storeConfig = new StoreConfig();
        + * storeConfig.setTransactional(true);
        + * storeConfig.setAllowCreate(true);
        + * EntityStore store = new EntityStore(env, "myStore", storeConfig);
        + * + *

        Transactions are represented by {@link Transaction} objects, which are + * part of the {@link com.sleepycat.je Base API}. Transactions are created + * using the {@link Environment#beginTransaction Environment.beginTransaction} + * method.

        + * + *

        A transaction will include all operations for which the transaction + * object is passed as a method argument. All retrieval, storage and deletion + * methods have an optional {@link Transaction} parameter for this purpose. + * When a transaction is passed to a method that opens a cursor, all retrieval, + * storage and deletion operations performed using that cursor will be included + * in the transaction.

        + * + *

        A transaction may be committed by calling {@link Transaction#commit} or + * aborted by calling {@link Transaction#abort}. For example, two employees + * may be deleted atomically with a transaction; other words, either both are + * deleted or neither is deleted:

        + * + *
        + * Transaction txn = env.beginTransaction(null, null);
        + * try {
        + *     primaryIndex.delete(txn, 1);
        + *     primaryIndex.delete(txn, 2);
        + *     txn.commit();
        + *     txn = null;
        + * } finally {
        + *     if (txn != null) {
        + *         txn.abort();
        + *     }
        + * }
        + * + *

        WARNING: Transactions must always be committed or aborted to + * prevent resource leaks which could lead to the index becoming unusable or + * cause an OutOfMemoryError. To ensure that a transaction is + * aborted in the face of exceptions, call {@link Transaction#abort} in a + * finally block.

        + * + *

        For a transactional store, storage and deletion operations are always + * transaction protected, whether or not a transaction is explicitly used. A + * null transaction argument means to perform the operation using auto-commit, + * or the implied thread transaction if an XAEnvironment is being used. A + * transaction is automatically started as part of the operation and is + * automatically committed if the operation completes successfully. The + * transaction is automatically aborted if an exception occurs during the + * operation, and the exception is re-thrown to the caller. For example, each + * employee is deleted using a an auto-commit transaction below, but it is + * possible that employee 1 will be deleted and employee 2 will not be deleted, + * if an error or crash occurs while deleting employee 2:

        + * + *
        + * primaryIndex.delete(null, 1);
        + * primaryIndex.delete(null, 2);
        + * + *

        When retrieving entities, a null transaction argument means to perform + * the operation non-transactionally. The operation is performed outside the + * scope of any transaction, without providing transactional ACID guarantees. + * If an implied thread transaction is present (i.e. if an XAEnvironment is + * being used), that transaction is used. When a non-transactional store is + * used, transactional ACID guarantees are also not provided.

        + * + *

        For non-transactional and auto-commit usage, overloaded signatures for + * retrieval, storage and deletion methods are provided to avoid having to pass + * a null transaction argument. For example, {@link #delete} may be called + * instead of {@link #delete(Transaction,Object)}. For example, the following + * code is equivalent to the code above where null was passed for the + * transaction:

        + * + *
        + * primaryIndex.delete(1);
        + * primaryIndex.delete(2);
        + * + *

        For retrieval methods the overloaded signatures also include an optional + * {@link LockMode} parameter, and overloaded signatures for opening cursors + * include an optional {@link CursorConfig} parameter. These parameters are + * described further below in the Locking and Lock Modes section.

        + * + *

        Transactions and Cursors

        + * + *

        There are two special consideration when using cursors with transactions. + * First, for a transactional store, a non-null transaction must be passed to + * methods that open a cursor if that cursor will be used to delete or update + * entities. Cursors do not perform auto-commit when a null transaction is + * explicitly passed or implied by the method signature. For example, the + * following code will throw {@link DatabaseException} when the {@link + * EntityCursor#delete} method is called:

        + * + *
        + * // Does not work with a transactional store!
        + * {@code EntityCursor} cursor = primaryIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         cursor.delete(); // Will throw DatabaseException.
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        Instead, the {@link #entities(Transaction,CursorConfig)} signature must + * be used and a non-null transaction must be passed:

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities(txn, null);
        + * try {
        + *     for (Employee entity : cursor) {
        + *         cursor.delete();
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        The second consideration is that error handling is more complex when + * using both transactions and cursors, for the following reasons:

        + *
          + *
        1. When an exception occurs, the transaction should be aborted.
        2. + *
        3. Cursors must be closed whether or not an exception occurs.
        4. + *
        5. Cursors must be closed before committing or aborting the + * transaction.
        6. + *
        + * + *

        For example:

        + * + *
        + * Transaction txn = env.beginTransaction(null, null);
        + * {@code EntityCursor} cursor = null;
        + * try {
        + *     cursor = primaryIndex.entities(txn, null);
        + *     for (Employee entity : cursor) {
        + *         cursor.delete();
        + *     }
        + *     cursor.close();
        + *     cursor = null;
        + *     txn.commit();
        + *     txn = null;
        + * } finally {
        + *     if (cursor != null) {
        + *         cursor.close();
        + *     }
        + *     if (txn != null) {
        + *         txn.abort();
        + *     }
        + * }
        + * + *

        Locking and Lock Modes

        + * + *

        This section provides a brief overview of locking and describes how lock + * modes are used with the Direct Persistence Layer. For more information on + * locking, see Writing + * Transactional Applications.

        + * + *

        When using transactions, locks are normally acquired on each entity that + * is retrieved or stored. The locks are used to isolate one transaction from + * another. Locks are normally released only when the transaction is committed + * or aborted.

        + * + *

        When not using transactions, locks are also normally acquired on each + * entity that is retrieved or stored. However, these locks are released when + * the operation is complete. When using cursors, in order to provide + * cursor stability locks are held until the cursor is moved to a + * different entity or closed.

        + * + *

        This default locking behavior provides full transactional ACID guarantees + * and cursor stability. However, application performance can sometimes be + * improved by compromising these guarantees. As described in Writing + * Transactional Applications, the {@link LockMode} and {@link + * CursorConfig} parameters are two of the mechanisms that can be used to make + * compromises.

        + * + *

        For example, imagine that you need an approximate count of all entities + * matching certain criterion, and it is acceptable for entities to be changed + * by other threads or other transactions while performing this query. {@link + * LockMode#READ_UNCOMMITTED} can be used to perform the retrievals without + * acquiring any locks. This reduces memory consumption, does less processing, + * and improves concurrency.

        + * + *
        + * {@code EntityCursor} cursor = primaryIndex.entities(txn, null);
        + * try {
        + *     Employee entity;
        + *     while ((entity = cursor.next(LockMode.READ_UNCOMMITTED)) != null) {
        + *         // Examine the entity and accumulate totals...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        The {@link LockMode} parameter specifies locking behavior on a + * per-operation basis. If null or {@link LockMode#DEFAULT} is specified, the + * default lock mode is used.

        + * + *

        It is also possible to specify the default locking behavior for a cursor + * using {@link CursorConfig}. The example below is equivalent to the example + * above:

        + * + *
        + * CursorConfig config = new CursorConfig();
        + * config.setReadUncommitted(true);
        + * {@code EntityCursor} cursor = primaryIndex.entities(txn, config);
        + * try {
        + *     Employee entity;
        + *     while ((entity = cursor.next()) != null) {
        + *         // Examine the entity and accumulate totals...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + * + *

        Note that {@code READ_UNCOMMITTED} can be used with a key cursor to + * reduce I/O, potentially providing significant performance benefits. See Key Cursor Optimization with + * READ_UNCOMMITTED

        + * + * + *

        The use of other lock modes, cursor configuration, and transaction + * configuration are discussed in Writing + * Transactional Applications.

        + * + *

        Performing Transaction Retries

        + * + *

        Lock conflict handling is another important topic discussed in Writing + * Transactional Applications. To go along with that material, here we + * show a lock conflict handling loop in the context of the Direct Persistence + * Layer. The example below shows deleting all entities in a primary index in + * a single transaction. If a lock conflict occurs, the transaction is aborted + * and the operation is retried.

        + * + * + *

        This is a DPL version of the equivalent example code + * for the base API.

        + * + *

        The following example code illustrates the recommended approach. Note + * that the {@code Environment.beginTransaction} and {@code Transaction.commit} + * calls are intentially inside the {@code try} block. When using JE-HA, this + * will make it easy to add a {@code catch} for other exceptions that can be + * resolved by retrying the transaction, such as consistency exceptions.

        + * + * + *
        + *  void doTransaction(final Environment env,
        + *                     final {@code PrimaryIndex} primaryIndex,
        + *                     final int maxTries)
        + *      throws DatabaseException {
        + *
        + *      boolean success = false;
        + *      long sleepMillis = 0;
        + *      for (int i = 0; i < maxTries; i++) {
        + *          // Sleep before retrying.
        + *          if (sleepMillis != 0) {
        + *              Thread.sleep(sleepMillis);
        + *              sleepMillis = 0;
        + *          }
        + *          Transaction txn = null;
        + *          try {
        + *              txn = env.beginTransaction(null, null);
        + *              final {@code EntityCursor} cursor =
        + *                  primaryIndex.entities(txn, null);
        + *              try {
        + *                  // INSERT APP-SPECIFIC CODE HERE:
        + *                  // Perform read and write operations, for example:
        + *                  for (Employee entity : cursor) {
        + *                      cursor.delete();
        + *                  }
        + *              } finally {
        + *                  cursor.close();
        + *              }
        + *              txn.commit();
        + *              success = true;
        + *              return;
        + *          } catch (LockConflictException e) {
        + *              sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000;
        + *              continue;
        + *          } finally {
        + *              if (!success) {
        + *                  if (txn != null) {
        + *                      txn.abort();
        + *                  }
        + *              }
        + *          }
        + *      }
        + *      // INSERT APP-SPECIFIC CODE HERE:
        + *      // Transaction failed, despite retries.
        + *      // Take some app-specific course of action.
        + *  }
        + * + *

        Low Level Access

        + * + *

        Each Direct Persistence Layer index is associated with an underlying + * {@link Database} or {@link SecondaryDatabase} defined in the {@link + * com.sleepycat.je Base API}. At this level, an index is a Btree managed by + * the Berkeley DB Java Edition transactional storage engine. Although you may + * never need to work at the {@code Base API} level, keep in mind that some + * types of performance tuning can be done by configuring the underlying + * databases. See the {@link EntityStore} class for more information on + * database and sequence configuration.

        + * + *

        If you wish to access an index using the {@code Base API}, you may call + * the {@link PrimaryIndex#getDatabase} or {@link SecondaryIndex#getDatabase} + * method to get the underlying database. To translate between entity or key + * objects and {@link DatabaseEntry} objects at this level, use the bindings + * returned by {@link PrimaryIndex#getEntityBinding}, {@link + * PrimaryIndex#getKeyBinding}, and {@link SecondaryIndex#getKeyBinding}.

        + * + * @author Mark Hayes + */ +public interface EntityIndex { + + /** + * Returns the underlying database for this index. + * + * @return the database. + */ + Database getDatabase(); + + /** + * Checks for existence of a key in this index. + * + *

        The operation will not be transaction protected, and {@link + * LockMode#DEFAULT} is used implicitly.

        + * + * + *

        {@code READ_UNCOMMITTED} can be used with this method to reduce I/O, + * since the record data item will not be read. This is the same benefit + * as described in Key Cursor + * Optimization with READ_UNCOMMITTED

        + * + * + * @param key the key to search for. + * + * @return whether the key exists in the index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean contains(K key) + throws DatabaseException; + + /** + * Checks for existence of a key in this index. + * + * + *

        {@code READ_UNCOMMITTED} can be used with this method to reduce I/O, + * since the record data item will not be read. This is the same benefit + * as described in Key Cursor + * Optimization with READ_UNCOMMITTED

        + * + * + * @param txn the transaction used to protect this operation, or null + * if the operation should not be transaction protected. + * + * @param key the key to search for. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return whether the key exists in the index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean contains(Transaction txn, K key, LockMode lockMode) + throws DatabaseException; + + /** + * Gets an entity via a key of this index. + * + *

        The operation will not be transaction protected, and {@link + * LockMode#DEFAULT} is used implicitly.

        + * + * @param key the key to search for. + * + * @return the value mapped to the given key, or null if the key is not + * present in the index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V get(K key) + throws DatabaseException; + + /** + * Gets an entity via a key of this index. + * + * @param txn the transaction used to protect this operation, or null + * if the operation should not be transaction protected. + * + * @param key the key to search for. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the value mapped to the given key, or null if the key is not + * present in the index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V get(Transaction txn, K key, LockMode lockMode) + throws DatabaseException; + + /* */ + /** + * Gets an entity via a key of this index, using Get type and ReadOptions + * parameters, and returning an EntityResult. + * + * @param txn the transaction used to protect this operation, or null + * if the operation should not be transaction protected. + * + * @param key the key to search for. + * + * @param getType must be {@link Get#SEARCH}. + * + * @param options the ReadOptions, or null to use default options. + * + * @return the EntityResult, including the value mapped to the given key, + * or null if the key is not present in the index. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + EntityResult get(Transaction txn, + K key, + Get getType, + ReadOptions options) + throws DatabaseException; + /* */ + + /** + * Returns a non-transactional count of the entities in this index. + * + * + * + *

        This operation is faster than obtaining a count by scanning the index + * manually, and will not perturb the current contents of the cache. + * However, the count is not guaranteed to be accurate if there are + * concurrent updates. Note that this method does scan a significant + * portion of the index and should be considered a fairly expensive + * operation.

        + * + *

        This operation will disable deletion of log files by the JE log + * cleaner during its execution and will consume a certain amount of + * memory (but without affecting the memory that is available for the + * JE cache). To avoid excessive memory consumption (and a potential + * {@code OutOfMemoryError}) this method places an internal limit on + * its memory consumption. If this limit is reached, the method will + * still work properly, but its performance will degrade. To specify + * a different memory limit than the one used by this method, use the + * {@link EntityIndex#count(long memoryLimit)} method.

        + * + * + * + * @return the number of entities in this index. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + long count() + throws DatabaseException; + + /* */ + + /** + * Returns a non-transactional count of the entities in this index. + * + *

        This operation is faster than obtaining a count by scanning the index + * manually, and will not perturb the current contents of the cache. + * However, the count is not guaranteed to be accurate if there are + * concurrent updates. Note that this method does scan a significant + * portion of the index and should be considered a fairly expensive + * operation.

        + * + *

        This operation will disable deletion of log files by the JE log + * cleaner during its execution and will consume a certain amount of + * memory (but without affecting the memory that is available for the + * JE cache). To avoid excessive memory consumption (and a potential + * {@code OutOfMemoryError}) this method takes as input an upper bound + * on the memory it may consume. If this limit is reached, the method + * will still work properly, but its performance will degrade.

        + * + * @return the number of entities in this index. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + long count(long memoryLimit) + throws DatabaseException; + + /* */ + + /** + * Deletes all entities with a given index key. + * + *

        Auto-commit is used implicitly if the store is transactional.

        + * + * @param key the key to search for. + * + * @return whether any entities were deleted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean delete(K key) + throws DatabaseException; + + /** + * Deletes all entities with a given index key. + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param key the key to search for. + * + * @return whether any entities were deleted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + boolean delete(Transaction txn, K key) + throws DatabaseException; + + /* */ + /** + * Deletes all entities with a given index key, using a WriteOptions + * parameter and returning an OperationResult. + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param key the key to search for. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if any entities were deleted, else null. If + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + OperationResult delete(Transaction txn, K key, WriteOptions options) + throws DatabaseException; + /* */ + + /** + * Opens a cursor for traversing all keys in this index. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly. If the + * store is transactional, the cursor may not be used to update or delete + * entities.

        + * + * + *

        Note that {@code READ_UNCOMMITTED} can be used with a key cursor to + * reduce I/O, potentially providing significant performance benefits. See + * Key Cursor Optimization with + * READ_UNCOMMITTED

        + * + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor keys() + throws DatabaseException; + + /** + * Opens a cursor for traversing all keys in this index. + * + * + *

        Note that {@code READ_UNCOMMITTED} can be used with a key cursor to + * reduce I/O, potentially providing significant performance benefits. See + * Key Cursor Optimization with + * READ_UNCOMMITTED

        + * + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor keys(Transaction txn, CursorConfig config) + throws DatabaseException; + + /** + * Opens a cursor for traversing all entities in this index. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly. If the + * store is transactional, the cursor may not be used to update or delete + * entities.

        + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor entities() + throws DatabaseException; + + /** + * Opens a cursor for traversing all entities in this index. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor entities(Transaction txn, + CursorConfig config) + throws DatabaseException; + + /** + * Opens a cursor for traversing keys in a key range. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly. If the + * store is transactional, the cursor may not be used to update or delete + * entities.

        + * + * + *

        Note that {@code READ_UNCOMMITTED} can be used with a key cursor to + * reduce I/O, potentially providing significant performance benefits. See + * Key Cursor Optimization with + * READ_UNCOMMITTED

        + * + * + * @param fromKey is the lower bound of the key range, or null if the range + * has no lower bound. + * + * @param fromInclusive is true if keys greater than or equal to fromKey + * should be included in the key range, or false if only keys greater than + * fromKey should be included. + * + * @param toKey is the upper bound of the key range, or null if the range + * has no upper bound. + * + * @param toInclusive is true if keys less than or equal to toKey should be + * included in the key range, or false if only keys less than toKey should + * be included. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor keys(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) + throws DatabaseException; + + /** + * Opens a cursor for traversing keys in a key range. + * + * + *

        Key Cursor Optimization with + * READ_UNCOMMITTED

        + * + *

        Using a key cursor potentially has a large performance benefit when + * the {@code READ_UNCOMMITTED} isolation mode is used. In this case, if + * the record data is not in the JE cache, it will not be read from disk. + * The performance benefit is potentially large because random access disk + * reads may be reduced. Examples are:

        + *
          + *
        • Scanning all records in key order, when the entity is not needed and + * {@code READ_UNCOMMITTED} isolation is acceptable.
        • + *
        • Skipping over records quickly to perform approximate pagination with + * {@code READ_UNCOMMITTED} isolation.
        • + *
        + * + *

        For other isolation modes ({@code READ_COMMITTED}, {@code + * REPEATABLE_READ} and {@code SERIALIZABLE}), the performance benefit of a + * key cursor is not as significant. In this case, the data item must be + * read into the JE cache if it is not already present, in order to lock + * the record. The only performance benefit is that the data will not be + * copied from the JE cache to the application's entry parameter, and will + * not be unmarshalled into an entity object.

        + * + *

        For information on specifying isolation modes, see {@link LockMode}, + * {@link CursorConfig} and {@link com.sleepycat.je.TransactionConfig}.

        + * + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param fromKey is the lower bound of the key range, or null if the range + * has no lower bound. + * + * @param fromInclusive is true if keys greater than or equal to fromKey + * should be included in the key range, or false if only keys greater than + * fromKey should be included. + * + * @param toKey is the upper bound of the key range, or null if the range + * has no upper bound. + * + * @param toInclusive is true if keys less than or equal to toKey should be + * included in the key range, or false if only keys less than toKey should + * be included. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor keys(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException; + + /** + * Opens a cursor for traversing entities in a key range. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly. If the + * store is transactional, the cursor may not be used to update or delete + * entities.

        + * + * @param fromKey is the lower bound of the key range, or null if the range + * has no lower bound. + * + * @param fromInclusive is true if keys greater than or equal to fromKey + * should be included in the key range, or false if only keys greater than + * fromKey should be included. + * + * @param toKey is the upper bound of the key range, or null if the range + * has no upper bound. + * + * @param toInclusive is true if keys less than or equal to toKey should be + * included in the key range, or false if only keys less than toKey should + * be included. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor entities(K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive) + throws DatabaseException; + + /** + * Opens a cursor for traversing entities in a key range. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param fromKey is the lower bound of the key range, or null if the range + * has no lower bound. + * + * @param fromInclusive is true if keys greater than or equal to fromKey + * should be included in the key range, or false if only keys greater than + * fromKey should be included. + * + * @param toKey is the upper bound of the key range, or null if the range + * has no upper bound. + * + * @param toInclusive is true if keys less than or equal to toKey should be + * included in the key range, or false if only keys less than toKey should + * be included. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + EntityCursor entities(Transaction txn, + K fromKey, + boolean fromInclusive, + K toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException; + + /** + * Returns a standard Java map based on this entity index. The {@link + * StoredMap} returned is defined by the {@linkplain + * com.sleepycat.collections Collections API}. Stored collections conform + * to the standard Java collections framework interface. + * + * @return the map. + */ + Map map(); + + /** + * Returns a standard Java sorted map based on this entity index. The + * {@link StoredSortedMap} returned is defined by the {@linkplain + * com.sleepycat.collections Collections API}. Stored collections conform + * to the standard Java collections framework interface. + * + * @return the map. + */ + SortedMap sortedMap(); +} diff --git a/src/com/sleepycat/persist/EntityJoin.java b/src/com/sleepycat/persist/EntityJoin.java new file mode 100644 index 0000000..5a5d959 --- /dev/null +++ b/src/com/sleepycat/persist/EntityJoin.java @@ -0,0 +1,385 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.EnvironmentFailureException; +/* */ +import com.sleepycat.je.JoinCursor; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; + +/** + * Performs an equality join on two or more secondary keys. + * + *

        {@code EntityJoin} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code EntityJoin} object.

        + * + *

        An equality join is a match on all entities in a given primary index that + * have two or more specific secondary key values. Note that key ranges may + * not be matched by an equality join, only exact keys are matched.

        + * + *

        For example:

        + *
        + *  // Index declarations -- see package summary example.
        + *  //
        + *  {@literal PrimaryIndex personBySsn;}
        + *  {@literal SecondaryIndex personByParentSsn;}
        + *  {@literal SecondaryIndex personByEmployerIds;}
        + *  Employer employer = ...;
        + *
        + *  // Match on all Person objects having parentSsn "111-11-1111" and also
        + *  // containing an employerId of employer.id.  In other words, match on all
        + *  // of Bob's children that work for a given employer.
        + *  //
        + *  {@literal EntityJoin join = new EntityJoin(personBySsn);}
        + *  join.addCondition(personByParentSsn, "111-11-1111");
        + *  join.addCondition(personByEmployerIds, employer.id);
        + *
        + *  // Perform the join operation by traversing the results with a cursor.
        + *  //
        + *  {@literal ForwardCursor results = join.entities();}
        + *  try {
        + *      for (Person person : results) {
        + *          System.out.println(person.ssn + ' ' + person.name);
        + *      }
        + *  } finally {
        + *      results.close();
        + *  }
        + * + * @author Mark Hayes + */ +public class EntityJoin { + + private PrimaryIndex primary; + private List conditions; + + /** + * Creates a join object for a given primary index. + * + * @param index the primary index on which the join will operate. + */ + public EntityJoin(PrimaryIndex index) { + primary = index; + conditions = new ArrayList(); + } + + /** + * Adds a secondary key condition to the equality join. Only entities + * having the given key value in the given secondary index will be returned + * by the join operation. + * + * @param index the secondary index containing the given key value. + * + * @param key the key value to match during the join. + * + * @param the secondary key class. + */ + public void addCondition(SecondaryIndex index, SK key) { + + /* Make key entry. */ + DatabaseEntry keyEntry = new DatabaseEntry(); + index.getKeyBinding().objectToEntry(key, keyEntry); + + /* Use keys database if available. */ + Database db = index.getKeysDatabase(); + if (db == null) { + db = index.getDatabase(); + } + + /* Add condition. */ + conditions.add(new Condition(db, keyEntry)); + } + + /** + * Opens a cursor that returns the entities qualifying for the join. The + * join operation is performed as the returned cursor is accessed. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly.

        + * + * @return the cursor. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws IllegalStateException if less than two conditions were added. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public ForwardCursor entities() + throws DatabaseException { + + return entities(null, null); + } + + /** + * Opens a cursor that returns the entities qualifying for the join. The + * join operation is performed as the returned cursor is accessed. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws IllegalStateException if less than two conditions were added. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public ForwardCursor entities(Transaction txn, CursorConfig config) + throws DatabaseException { + + return new JoinForwardCursor(txn, config, false); + } + + /** + * Opens a cursor that returns the primary keys of entities qualifying for + * the join. The join operation is performed as the returned cursor is + * accessed. + * + *

        The operations performed with the cursor will not be transaction + * protected, and {@link CursorConfig#DEFAULT} is used implicitly.

        + * + * @return the cursor. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws IllegalStateException if less than two conditions were added. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public ForwardCursor keys() + throws DatabaseException { + + return keys(null, null); + } + + /** + * Opens a cursor that returns the primary keys of entities qualifying for + * the join. The join operation is performed as the returned cursor is + * accessed. + * + * @param txn the transaction used to protect all operations performed with + * the cursor, or null if the operations should not be transaction + * protected. If the store is non-transactional, null must be specified. + * For a transactional store the transaction is optional for read-only + * access and required for read-write access. + * + * @param config the cursor configuration that determines the default lock + * mode used for all cursor operations, or null to implicitly use {@link + * CursorConfig#DEFAULT}. + * + * @return the cursor. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws IllegalStateException if less than two conditions were added. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public ForwardCursor keys(Transaction txn, CursorConfig config) + throws DatabaseException { + + return new JoinForwardCursor(txn, config, true); + } + + private static class Condition { + + private Database db; + private DatabaseEntry key; + + Condition(Database db, DatabaseEntry key) { + this.db = db; + this.key = key; + } + + Cursor openCursor(Transaction txn, CursorConfig config) + throws DatabaseException { + + OperationStatus status; + Cursor cursor = db.openCursor(txn, config); + try { + DatabaseEntry data = BasicIndex.NO_RETURN_ENTRY; + status = cursor.getSearchKey(key, data, null); + } catch (DatabaseException e) { + try { + cursor.close(); + } catch (DatabaseException ignored) {} + throw e; + } + if (status == OperationStatus.SUCCESS) { + return cursor; + } else { + cursor.close(); + return null; + } + } + } + + private class JoinForwardCursor implements ForwardCursor { + + private Cursor[] cursors; + private JoinCursor joinCursor; + private boolean doKeys; + + JoinForwardCursor(Transaction txn, CursorConfig config, boolean doKeys) + throws DatabaseException { + + this.doKeys = doKeys; + try { + cursors = new Cursor[conditions.size()]; + for (int i = 0; i < cursors.length; i += 1) { + Condition cond = conditions.get(i); + Cursor cursor = cond.openCursor(txn, config); + if (cursor == null) { + /* Leave joinCursor null. */ + doClose(null); + return; + } + cursors[i] = cursor; + } + joinCursor = primary.getDatabase().join(cursors, null); + } catch (DatabaseException e) { + /* doClose will throw e. */ + doClose(e); + } + } + + public V next() + throws DatabaseException { + + return next(null); + } + + public V next(LockMode lockMode) + throws DatabaseException { + + if (joinCursor == null) { + return null; + } + if (doKeys) { + DatabaseEntry key = new DatabaseEntry(); + OperationStatus status = joinCursor.getNext(key, lockMode); + if (status == OperationStatus.SUCCESS) { + EntryBinding binding = primary.getKeyBinding(); + return (V) binding.entryToObject(key); + } + } else { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = + joinCursor.getNext(key, data, lockMode); + if (status == OperationStatus.SUCCESS) { + EntityBinding binding = primary.getEntityBinding(); + return (V) binding.entryToObject(key, data); + } + } + return null; + } + + public Iterator iterator() { + return iterator(null); + } + + public Iterator iterator(LockMode lockMode) { + return new BasicIterator(this, lockMode); + } + + public void close() + throws DatabaseException { + + doClose(null); + } + + private void doClose(DatabaseException firstException) + throws DatabaseException { + + if (joinCursor != null) { + try { + joinCursor.close(); + joinCursor = null; + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + } + for (int i = 0; i < cursors.length; i += 1) { + Cursor cursor = cursors[i]; + if (cursor != null) { + try { + cursor.close(); + cursors[i] = null; + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + } + } + if (firstException != null) { + throw firstException; + } + } + } +} diff --git a/src/com/sleepycat/persist/EntityResult.java b/src/com/sleepycat/persist/EntityResult.java new file mode 100644 index 0000000..3d8a1d7 --- /dev/null +++ b/src/com/sleepycat/persist/EntityResult.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.OperationResult; + +/** + * Used to return an entity value from a 'get' operation along with an + * OperationResult. If the operation fails, null is returned. If the operation + * succeeds and a non-null EntityResult is returned, the contained entity value + * and OperationResult are guaranteed to be non-null. + */ +public class EntityResult { + + private final V value; + private final OperationResult result; + + EntityResult(V value, OperationResult result) { + assert value != null; + assert result != null; + + this.value = value; + this.result = result; + } + + /** + * Returns the entity value resulting from the operation. + * + * @return the non-null entity value. + */ + public V value() { + return value; + } + + /** + * Returns the OperationResult resulting from the operation. + * + * @return the non-null OperationResult. + */ + public OperationResult result() { + return result; + } +} diff --git a/src/com/sleepycat/persist/EntityStore.java b/src/com/sleepycat/persist/EntityStore.java new file mode 100644 index 0000000..e58697a --- /dev/null +++ b/src/com/sleepycat/persist/EntityStore.java @@ -0,0 +1,941 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.io.Closeable; +import java.util.Set; + +import com.sleepycat.je.Database; // for javadoc +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +/* */ +import com.sleepycat.je.EnvironmentFailureException ; // for javadoc +import com.sleepycat.je.OperationFailureException ; // for javadoc +/* */ +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.Sequence; +import com.sleepycat.je.SequenceConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.evolve.EvolveConfig; +import com.sleepycat.persist.evolve.EvolveStats; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.impl.Store; +import com.sleepycat.persist.model.DeleteAction; +import com.sleepycat.persist.model.Entity; // for javadoc +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * A store for managing persistent entity objects. + * + *

        {@code EntityStore} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code EntityStore} object.

        + * + *

        See the package + * summary example for an example of using an {@code EntityStore}.

        + * + *

        Before creating an EntityStore you must create an {@link + * Environment} object using the Berkeley DB engine API. The environment may + * contain any number of entity stores and their associated databases, as well + * as other databases not associated with an entity store.

        + * + *

        An entity store is based on an {@link EntityModel}: a data model which + * defines persistent classes (entity classes), primary keys, + * secondary keys, and relationships between entities. A primary index is + * created for each entity class. An associated secondary index is created for + * each secondary key. The {@link Entity}, {@link PrimaryKey} and {@link + * SecondaryKey} annotations may be used to define entities and keys.

        + * + *

        To use an EntityStore, first obtain {@link PrimaryIndex} and + * {@link SecondaryIndex} objects by calling {@link #getPrimaryIndex + * getPrimaryIndex} and {@link #getSecondaryIndex getSecondaryIndex}. Then use + * these indices to store and access entity records by key.

        + * + *

        Although not normally needed, you can also use the entity store along + * with the {@link com.sleepycat.je Base API}. Methods in the {@link + * PrimaryIndex} and {@link SecondaryIndex} classes may be used to obtain + * databases and bindings. The databases may be used directly for accessing + * entity records. The bindings should be called explicitly to translate + * between {@link com.sleepycat.je.DatabaseEntry} objects and entity model + * objects.

        + * + *

        Each primary and secondary index is associated internally with a {@link + * Database}. With any of the above mentioned use cases, methods are provided + * that may be used for database performance tuning. The {@link + * #setPrimaryConfig setPrimaryConfig} and {@link #setSecondaryConfig + * setSecondaryConfig} methods may be called anytime before a database is + * opened via {@link #getPrimaryIndex getPrimaryIndex} or {@link + * #getSecondaryIndex getSecondaryIndex}. The {@link #setSequenceConfig + * setSequenceConfig} method may be called anytime before {@link #getSequence + * getSequence} is called or {@link #getPrimaryIndex getPrimaryIndex} is called + * for a primary index associated with that sequence.

        + * + * + *

        Database Names

        + * + *

        The database names of primary and secondary indices are designed to be + * unique within the environment and identifiable for debugging and use with + * tools such as {@link com.sleepycat.je.util.DbDump} and {@link + * com.sleepycat.je.util.DbLoad}.

        + * + *

        The syntax of a primary index database name is:

        + *
           persist#STORE_NAME#ENTITY_CLASS
        + *

        Where STORE_NAME is the name parameter passed to {@link #EntityStore + * EntityStore} and ENTITY_CLASS is name of the class passed to {@link + * #getPrimaryIndex getPrimaryIndex}.

        + * + *

        The syntax of a secondary index database name is:

        + *
           persist#STORE_NAME#ENTITY_CLASS#KEY_NAME
        + *

        Where KEY_NAME is the secondary key name passed to {@link + * #getSecondaryIndex getSecondaryIndex}.

        + * + *

        Although you should never have to construct these names manually, + * understanding their syntax is useful for several reasons:

        + *
          + *
        • Exception messages sometimes contain the database name, from which you + * can identify the entity class and secondary key.
        • + *
        • If you create other databases in the same environment that are not + * part of an EntityStore, to avoid naming conflicts the other + * database names should not begin with "persist#".
        • + *
        • If you are using {@link com.sleepycat.je.util.DbDump} or {@link + * com.sleepycat.je.util.DbLoad} to perform a backup or copy databases between + * environments, knowing the database names can be useful. Normally you will + * dump or load all database names starting with + * "persist#STORE_NAME#".
        • + *
        + * + *

        If you are copying all databases in a store as mentioned in the last + * point above, there is one further consideration. There are two internal + * databases that must be kept with the other databases in the store in order + * for the store to be used. These contain the data formats and sequences for + * the store:

        + *
           persist#STORE_NAME#com.sleepycat.persist.formats
        + *
           persist#STORE_NAME#com.sleepycat.persist.sequences
        + *

        These databases must normally be included with copies of other databases + * in the store. They should not be modified by the application.

        + * + *

        For example, the following code snippet removes all databases for a given + * store in a single transaction.

        + *
        + *  Environment env = ...
        + *  EntityStore store = ...
        + *  Transaction txn = env.beginTransaction(null, null);
        + *  String prefix = "persist#" + store.getStoreName() + "#";
        + *  for (String dbName : env.getDatabaseNames()) {
        + *      if (dbName.startsWith(prefix)) {
        + *          env.removeDatabase(txn, dbName);
        + *      }
        + *  }
        + *  txn.commit();
        + * + * + * + * @author Mark Hayes + */ +public class EntityStore + /* */ + implements Closeable + /* */ + { + + private Store store; + + /** + * Opens an entity store in a given environment. + * + * @param env an open Berkeley DB Environment. + * + * @param storeName the name of the entity store within the given + * environment. An empty string is allowed. Named stores may be used to + * distinguish multiple sets of persistent entities for the same entity + * classes in a single environment. Underlying database names are prefixed + * with the store name. + * + * @param config the entity store configuration, or null to use default + * configuration properties. + * + * @throws StoreExistsException when the {@link + * StoreConfig#setExclusiveCreate ExclusiveCreate} configuration parameter + * is true and the store's internal catalog database already exists. + * + * @throws StoreNotFoundException when when the {@link + * StoreConfig#setAllowCreate AllowCreate} configuration parameter is false + * and the store's internal catalog database does not exist. + * + * @throws IncompatibleClassException if an incompatible class change has + * been made and mutations are not configured for handling the change. See + * {@link com.sleepycat.persist.evolve Class Evolution} for more + * information. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the store does not exist and the {@link + * StoreConfig#setAllowCreate AllowCreate} parameter is true, then one of + * the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public EntityStore(Environment env, String storeName, StoreConfig config) + throws StoreExistsException, + StoreNotFoundException, + IncompatibleClassException, + DatabaseException { + + store = new Store(env, storeName, config, false /*rawAccess*/); + } + + /** + * Returns the environment associated with this store. + * + * @return the environment. + */ + public Environment getEnvironment() { + return store.getEnvironment(); + } + + /** + * Returns a copy of the entity store configuration. + * + * @return the config. + */ + public StoreConfig getConfig() { + return store.getConfig(); + } + + /** + * Returns the name of this store. + * + * @return the name. + */ + public String getStoreName() { + return store.getStoreName(); + } + + /* */ + /** + * Returns the names of all entity stores in the given environment. + * + * @return the store names. An empty set is returned if no stores are + * present. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public static Set getStoreNames(Environment env) + throws DatabaseException { + + return Store.getStoreNames(env); + } + /* */ + + /* */ + /** + * @hidden + * For internal use only. + */ + public boolean isReplicaUpgradeMode() { + return store.isReplicaUpgradeMode(); + } + /* */ + + /** + * Returns the current entity model for this store. The current model is + * derived from the configured entity model and the live entity class + * definitions. + * + * @return the model. + */ + public EntityModel getModel() { + return store.getModel(); + } + + /** + * Returns the set of mutations that were configured when the store was + * opened, or if none were configured, the set of mutations that were + * configured and stored previously. + * + * @return the mutations. + */ + public Mutations getMutations() { + return store.getMutations(); + } + + /** + * Returns the primary index for a given entity class, opening it if + * necessary. + * + *

        If they are not already open, the primary and secondary databases for + * the entity class are created/opened together in a single internal + * transaction. When the secondary indices are opened, that can cascade to + * open other related primary indices.

        + * + * @param primaryKeyClass the class of the entity's primary key field, or + * the corresponding primitive wrapper class if the primary key field type + * is a primitive. + * + * @param entityClass the entity class for which to open the primary index. + * + * @param the primary key class. + * + * @param the entity class. + * + * @return the primary index. + * + * @throws IllegalArgumentException if the entity class or classes + * referenced by it are not persistent, or the primary key class does not + * match the entity's primary key field, or if metadata for the entity or + * primary key is invalid. + * + * + * @throws IndexNotAvailableException in a replicated environment if this + * Replica's persistent classes have been upgraded to define a new index, + * but the Master has not yet been upgraded. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the index does not exist and the {@link + * StoreConfig#setReadOnly ReadOnly} parameter is false, then one of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public PrimaryIndex + getPrimaryIndex(Class primaryKeyClass, Class entityClass) + throws DatabaseException { + + try { + return store.getPrimaryIndex + (primaryKeyClass, primaryKeyClass.getName(), + entityClass, entityClass.getName()); + } catch (IndexNotAvailableException e) { + if (!store.attemptRefresh()) { + throw e; + } + return store.getPrimaryIndex + (primaryKeyClass, primaryKeyClass.getName(), + entityClass, entityClass.getName()); + } + } + + /** + * Returns a secondary index for a given primary index and secondary key, + * opening it if necessary. + * + *

        NOTE: If the secondary key field is declared in a subclass + * of the entity class, use {@link #getSubclassIndex} instead.

        + * + *

        If a {@link SecondaryKey#relatedEntity} is used and the primary index + * for the related entity is not already open, it will be opened by this + * method. That will, in turn, open its secondary indices, which can + * cascade to open other primary indices.

        + * + * @param primaryIndex the primary index associated with the returned + * secondary index. The entity class of the primary index, or one of its + * superclasses, must contain a secondary key with the given secondary key + * class and key name. + * + * @param keyClass the class of the secondary key field, or the + * corresponding primitive wrapper class if the secondary key field type is + * a primitive. + * + * @param keyName the name of the secondary key field, or the {@link + * SecondaryKey#name} if this name annotation property was specified. + * + * @param the secondary key class. + * + * @param the primary key class. + * + * @param the entity class. + * + * @return the secondary index. + * + * @throws IllegalArgumentException if the entity class or one of its + * superclasses does not contain a key field of the given key class and key + * name, or if the metadata for the secondary key is invalid. + * + * + * @throws IndexNotAvailableException in a replicated environment if this + * Replica's persistent classes have been upgraded to define a new index, + * but the Master has not yet been upgraded. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the index does not exist and the {@link + * StoreConfig#setReadOnly ReadOnly} parameter is false, then one of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public SecondaryIndex + getSecondaryIndex(PrimaryIndex primaryIndex, + Class keyClass, + String keyName) + throws DatabaseException { + + try { + return store.getSecondaryIndex + (primaryIndex, primaryIndex.getEntityClass(), + primaryIndex.getEntityClass().getName(), + keyClass, keyClass.getName(), keyName); + } catch (IndexNotAvailableException e) { + if (!store.attemptRefresh()) { + throw e; + } + return store.getSecondaryIndex + (primaryIndex, primaryIndex.getEntityClass(), + primaryIndex.getEntityClass().getName(), + keyClass, keyClass.getName(), keyName); + } + } + + /** + * Returns a secondary index for a secondary key in an entity subclass, + * opening it if necessary. + * + *

        If a {@link SecondaryKey#relatedEntity} is used and the primary index + * for the related entity is not already open, it will be opened by this + * method. That will, in turn, open its secondary indices, which can + * cascade to open other primary indices.

        + * + * @param primaryIndex the primary index associated with the returned + * secondary index. The entity class of the primary index, or one of its + * superclasses, must contain a secondary key with the given secondary key + * class and key name. + * + * @param entitySubclass a subclass of the entity class for the primary + * index. The entity subclass must contain a secondary key with the given + * secondary key class and key name. + * + * @param keyClass the class of the secondary key field, or the + * corresponding primitive wrapper class if the secondary key field type is + * a primitive. + * + * @param keyName the name of the secondary key field, or the {@link + * SecondaryKey#name} if this name annotation property was specified. + * + * @param the secondary key class. + * + * @param the primary key class. + * + * @param the entity class. + * + * @param the entity sub-class. + * + * @return the secondary index. + * + * @throws IllegalArgumentException if the given entity subclass does not + * contain a key field of the given key class and key name, or if the + * metadata for the secondary key is invalid. + * + * + * @throws IndexNotAvailableException in a replicated environment if this + * Replica's persistent classes have been upgraded to define a new index, + * but the Master has not yet been upgraded. + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. If the index does not exist and the {@link + * StoreConfig#setReadOnly ReadOnly} parameter is false, then one of the Write + * Operation Failures may also occur. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public SecondaryIndex + getSubclassIndex(PrimaryIndex primaryIndex, + Class entitySubclass, + Class keyClass, + String keyName) + throws DatabaseException { + + /* Make subclass metadata available before getting the index. */ + getModel().getClassMetadata(entitySubclass.getName()); + + try { + return store.getSecondaryIndex + (primaryIndex, entitySubclass, + primaryIndex.getEntityClass().getName(), + keyClass, keyClass.getName(), keyName); + } catch (IndexNotAvailableException e) { + if (!store.attemptRefresh()) { + throw e; + } + return store.getSecondaryIndex + (primaryIndex, entitySubclass, + primaryIndex.getEntityClass().getName(), + keyClass, keyClass.getName(), keyName); + } + } + + /** + * Performs conversion of unevolved objects in order to reduce lazy + * conversion overhead. Evolution may be performed concurrently with + * normal access to the store. + * + *

        Conversion is performed one entity class at a time. An entity class + * is converted only if it has {@link Mutations} associated with it via + * {@link StoreConfig#setMutations StoreConfig.setMutations}.

        + * + *

        Conversion of an entity class is performed by reading each entity, + * converting it if necessary, and updating it if conversion was performed. + * When all instances of an entity class are converted, references to the + * appropriate {@link Mutations} are deleted. Therefore, if this method is + * called twice successfully without changing class definitions, the second + * call will do nothing.

        + * + * @param config the EvolveConfig. + * + * @return the EvolveStats. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @see com.sleepycat.persist.evolve Class Evolution + */ + public EvolveStats evolve(EvolveConfig config) + throws DatabaseException { + + return store.evolve(config); + } + + /** + * Deletes all instances of this entity class and its (non-entity) + * subclasses. + * + *

        The primary database for the given entity class will be truncated and + * all secondary databases will be removed. The primary and secondary + * databases associated with the entity class must not be open except by + * this store, since database truncation/removal is only possible when the + * database is not open.

        + * + *

        The primary and secondary databases for the entity class will be + * closed by this operation and the existing {@link PrimaryIndex} and + * {@link SecondaryIndex} objects will be invalidated. To access the + * indexes, the user must call {@link #getPrimaryIndex} and {@link + * #getSecondaryIndex} after this operation is complete.

        + * + *

        Auto-commit is used implicitly if the store is transactional.

        + * + * @param entityClass the entity class whose instances are to be deleted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void truncateClass(Class entityClass) + throws DatabaseException { + + store.truncateClass(null, entityClass); + } + + /** + * Deletes all instances of this entity class and its (non-entity) + * subclasses. + * + *

        The primary database for the given entity class will be truncated and + * all secondary databases will be removed. The primary and secondary + * databases associated with the entity class must not be open except by + * this store, since database truncation/removal is only possible when the + * database is not open.

        + * + *

        The primary and secondary databases for the entity class will be + * closed by this operation and the existing {@link PrimaryIndex} and + * {@link SecondaryIndex} objects will be invalidated. To access the + * indexes, the user must call {@link #getPrimaryIndex} and {@link + * #getSecondaryIndex} after this operation is complete.

        + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param entityClass the entity class whose instances are to be deleted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void truncateClass(Transaction txn, Class entityClass) + throws DatabaseException { + + store.truncateClass(txn, entityClass); + } + + /* */ + /** + * Flushes each modified index to disk that was opened in deferred-write + * mode. + * + *

        All indexes are opened in deferred-write mode if true was passed to + * {@link StoreConfig#setDeferredWrite} for the store.

        + * + *

        Alternatively, individual databases may be configured for deferred + * write using {@link DatabaseConfig#setDeferredWrite} along with {@link + * #getPrimaryConfig} and {@link #setPrimaryConfig}. Caution should be + * used when configuring only some databases for deferred-write, since + * durability will be different for these databases than for other + * databases in the same store.

        + * + *

        This method is equivalent to calling {@link Database#sync} for each + * deferred-write index Database that is open for this store. + * + *

        Instead of calling this method, {@link Environment#sync} may be used. + * The difference is that this method will only flush the databases for + * this store, while {@link Environment#sync} will sync all deferred-write + * databases currently open for the environment and will also perform a + * full checkpoint. This method is therefore less expensive than a full + * sync of the environment.

        + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void sync() + throws DatabaseException { + + store.sync(); + } + /* */ + + /** + * Closes the primary and secondary databases for the given entity class + * that were opened via this store. The caller must ensure that the + * primary and secondary indices for the entity class are no longer in + * use. + * + *

        The primary and secondary databases for the entity class will be + * closed by this operation and the existing {@link PrimaryIndex} and + * {@link SecondaryIndex} objects will be invalidated. To access the + * indexes, the user must call {@link #getPrimaryIndex} and {@link + * #getSecondaryIndex} after this operation is complete.

        + * + * @param entityClass the entity class whose databases are to be closed. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void closeClass(Class entityClass) + throws DatabaseException { + + store.closeClass(entityClass); + } + + /** + * Closes all databases and sequences that were opened via this store. The + * caller must ensure that no databases opened via this store are in use. + * + * + *

        WARNING: To prevent memory leaks, the application must call this + * method even when the Environment has become invalid. While this is not + * necessary for Database objects, it is necessary for EntityStore objects + * to prevent the accumulation of memory in the global DPL metadata cache. + * + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void close() + throws DatabaseException { + + store.close(); + } + + /** + * Returns a named sequence for using Berkeley DB engine API directly, + * opening it if necessary. + * + * @param name the sequence name, which is normally defined using the + * {@link PrimaryKey#sequence} annotation property. + * + * @return the open sequence for the given sequence name. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public Sequence getSequence(String name) + throws DatabaseException { + + return store.getSequence(name); + } + + /** + * Returns the default Berkeley DB engine API configuration for a named key + * sequence. + * + *

        The returned configuration is as follows. All other properties have + * default values.

        + *
          + *
        • The {@link SequenceConfig#setInitialValue InitialValue} is one.
        • + *
        • The {@link SequenceConfig#setRange Range} minimum is one.
        • + *
        • The {@link SequenceConfig#setCacheSize CacheSize} is 100.
        • + *
        • {@link SequenceConfig#setAutoCommitNoSync AutoCommitNoSync} is + * true.
        • + *
        • {@link SequenceConfig#setAllowCreate AllowCreate} is set to the + * inverse of the store {@link StoreConfig#setReadOnly ReadOnly}. + * setting.
        • + *
        + * + * @param name the sequence name, which is normally defined using the + * {@link PrimaryKey#sequence} annotation property. + * + * @return the default configuration for the given sequence name. + */ + public SequenceConfig getSequenceConfig(String name) { + return store.getSequenceConfig(name); + } + + /** + * Configures a named key sequence using the Berkeley DB engine API. + * + *

        To be compatible with the entity model and the Direct Persistence + * Layer, the configuration should be retrieved using {@link + * #getSequenceConfig getSequenceConfig}, modified, and then passed to this + * method. The following configuration properties may not be changed:

        + *
          + *
        • {@link SequenceConfig#setExclusiveCreate ExclusiveCreate}
        • + *
        + *

        In addition, {@link SequenceConfig#setAllowCreate AllowCreate} must be + * the inverse of {@code ReadOnly}

        + * + *

        If the range is changed to include the value zero, see {@link + * PrimaryKey} for restrictions.

        + * + * @param name the sequence name, which is normally defined using the + * {@link PrimaryKey#sequence} annotation property. + * + * @param config the configuration to use for the given sequence name. + * + * @throws IllegalArgumentException if the configuration is incompatible + * with the entity model or the Direct Persistence Layer. + * + * @throws IllegalStateException if the sequence has already been opened. + */ + public void setSequenceConfig(String name, SequenceConfig config) { + store.setSequenceConfig(name, config); + } + + /** + * Returns the default primary database Berkeley DB engine API + * configuration for an entity class. + * + *

        The returned configuration is as follows. All other properties have + * default values.

        + *
          + *
        • {@link DatabaseConfig#setTransactional Transactional} is set to + * match {@link StoreConfig#setTransactional StoreConfig}.
        • + *
        • {@link DatabaseConfig#setAllowCreate AllowCreate} is set to the + * inverse of the store {@link StoreConfig#setReadOnly ReadOnly}. + * setting.
        • + *
        • {@link DatabaseConfig#setReadOnly ReadOnly} is set to match + * {@link StoreConfig#setReadOnly StoreConfig}.
        • + * * + *
        • {@link DatabaseConfig#setDeferredWrite DeferredWrite} is set to + * match {@link StoreConfig#setDeferredWrite StoreConfig}.
        • + *
        • {@link DatabaseConfig#setTemporary Temporary} is set to + * match {@link StoreConfig#setTemporary StoreConfig}.
        • + * * + *
        • {@link DatabaseConfig#setBtreeComparator BtreeComparator} is set to + * an internal class if a key comparator is used.
        • + *
        + * + * @param entityClass the entity class identifying the primary database. + * + * @return the default configuration for the given entity class. + */ + public DatabaseConfig getPrimaryConfig(Class entityClass) { + return store.getPrimaryConfig(entityClass); + } + + /** + * Configures the primary database for an entity class using the Berkeley + * DB engine API. + * + *

        To be compatible with the entity model and the Direct Persistence + * Layer, the configuration should be retrieved using {@link + * #getPrimaryConfig getPrimaryConfig}, modified, and then passed to this + * method. The following configuration properties may not be changed:

        + *
          + *
        • {@link DatabaseConfig#setExclusiveCreate ExclusiveCreate}
        • + *
        • {@link DatabaseConfig#setSortedDuplicates SortedDuplicates}
        • + * * + *
        • {@link DatabaseConfig#setTemporary Temporary}
        • + * * + *
        • {@link DatabaseConfig#setBtreeComparator BtreeComparator}
        • + *
        + *

        In addition, {@link DatabaseConfig#setAllowCreate AllowCreate} must be + * the inverse of {@code ReadOnly}

        + * + * @param entityClass the entity class identifying the primary database. + * + * @param config the configuration to use for the given entity class. + * + * @throws IllegalArgumentException if the configuration is incompatible + * with the entity model or the Direct Persistence Layer. + * + * @throws IllegalStateException if the database has already been opened. + */ + public void setPrimaryConfig(Class entityClass, DatabaseConfig config) { + store.setPrimaryConfig(entityClass, config); + } + + /** + * Returns the default secondary database Berkeley DB engine API + * configuration for an entity class and key name. + * + *

        The returned configuration is as follows. All other properties have + * default values.

        + *
          + *
        • {@link DatabaseConfig#setTransactional Transactional} is set to + * match the primary database.
        • + *
        • {@link DatabaseConfig#setAllowCreate AllowCreate} is set to the + * inverse of the primary database {@link DatabaseConfig#setReadOnly + * ReadOnly} setting.
        • + *
        • {@link DatabaseConfig#setReadOnly ReadOnly} is set to match + * the primary database.
        • + * * + *
        • {@link DatabaseConfig#setDeferredWrite DeferredWrite} is set to + * match the primary database.
        • + *
        • {@link DatabaseConfig#setTemporary Temporary} is set to + * match {@link StoreConfig#setTemporary StoreConfig}.
        • + * * + *
        • {@link DatabaseConfig#setBtreeComparator BtreeComparator} is set to + * an internal class if a key comparator is used.
        • + *
        • {@link DatabaseConfig#setSortedDuplicates SortedDuplicates} is set + * according to {@link SecondaryKey#relate}.
        • + *
        • {@link SecondaryConfig#setAllowPopulate AllowPopulate} is set to + * true when a secondary key is added to an existing primary index.
        • + *
        • {@link SecondaryConfig#setKeyCreator KeyCreator} or {@link + * SecondaryConfig#setMultiKeyCreator MultiKeyCreator} is set to an + * internal instance.
        • + *
        • {@link SecondaryConfig#setForeignMultiKeyNullifier + * ForeignMultiKeyNullifier} is set to an internal instance if {@link + * SecondaryKey#onRelatedEntityDelete} is {@link DeleteAction#NULLIFY}.
        • + *
        + * + * @param entityClass the entity class containing the given secondary key + * name. + * + * @param keyName the name of the secondary key field, or the {@link + * SecondaryKey#name} if this name annotation property was specified. + * + * @return the default configuration for the given secondary key. + */ + public SecondaryConfig getSecondaryConfig(Class entityClass, + String keyName) { + return store.getSecondaryConfig(entityClass, keyName); + } + + /** + * Configures a secondary database for an entity class and key name using + * the Berkeley DB engine API. + * + *

        To be compatible with the entity model and the Direct Persistence + * Layer, the configuration should be retrieved using {@link + * #getSecondaryConfig getSecondaryConfig}, modified, and then passed to + * this method. The following configuration properties may not be + * changed:

        + *
          + *
        • {@link DatabaseConfig#setExclusiveCreate ExclusiveCreate}
        • + *
        • {@link DatabaseConfig#setSortedDuplicates SortedDuplicates}
        • + *
        • {@link DatabaseConfig#setBtreeComparator BtreeComparator}
        • + *
        • {@link DatabaseConfig#setDuplicateComparator + * DuplicateComparator}
        • + * * + *
        • {@link DatabaseConfig#setTemporary Temporary}
        • + * * + *
        • {@link SecondaryConfig#setAllowPopulate AllowPopulate}
        • + *
        • {@link SecondaryConfig#setKeyCreator KeyCreator}
        • + *
        • {@link SecondaryConfig#setMultiKeyCreator MultiKeyCreator}
        • + *
        • {@link SecondaryConfig#setForeignKeyNullifier + * ForeignKeyNullifier}
        • + *
        • {@link SecondaryConfig#setForeignMultiKeyNullifier + * ForeignMultiKeyNullifier}
        • + *
        • {@link SecondaryConfig#setForeignKeyDeleteAction + * ForeignKeyDeleteAction}
        • + *
        • {@link SecondaryConfig#setForeignKeyDatabase + * ForeignKeyDatabase}
        • + *
        + *

        In addition, {@link DatabaseConfig#setAllowCreate AllowCreate} must be + * the inverse of {@code ReadOnly}

        + * + * @param entityClass the entity class containing the given secondary key + * name. + * + * @param keyName the name of the secondary key field, or the {@link + * SecondaryKey#name} if this name annotation property was specified. + * + * @param config the configuration to use for the given secondary key. + * + * @throws IllegalArgumentException if the configuration is incompatible + * with the entity model or the Direct Persistence Layer. + * + * @throws IllegalStateException if the database has already been opened. + */ + public void setSecondaryConfig(Class entityClass, + String keyName, + SecondaryConfig config) { + store.setSecondaryConfig(entityClass, keyName, config); + } +} diff --git a/src/com/sleepycat/persist/EntityValueAdapter.java b/src/com/sleepycat/persist/EntityValueAdapter.java new file mode 100644 index 0000000..b973ceb --- /dev/null +++ b/src/com/sleepycat/persist/EntityValueAdapter.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * A ValueAdapter where the "value" is the entity. + * + * @author Mark Hayes + */ +class EntityValueAdapter implements ValueAdapter { + + private EntityBinding entityBinding; + private boolean isSecondary; + + EntityValueAdapter(Class entityClass, + EntityBinding entityBinding, + boolean isSecondary) { + this.entityBinding = entityBinding; + this.isSecondary = isSecondary; + } + + public DatabaseEntry initKey() { + return new DatabaseEntry(); + } + + public DatabaseEntry initPKey() { + return isSecondary ? (new DatabaseEntry()) : null; + } + + public DatabaseEntry initData() { + return new DatabaseEntry(); + } + + public void clearEntries(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + key.setData(null); + if (isSecondary) { + pkey.setData(null); + } + data.setData(null); + } + + public V entryToValue(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + return (V) entityBinding.entryToObject(isSecondary ? pkey : key, data); + } + + public void valueToData(V value, DatabaseEntry data) { + entityBinding.objectToData(value, data); + } +} diff --git a/src/com/sleepycat/persist/ForwardCursor.java b/src/com/sleepycat/persist/ForwardCursor.java new file mode 100644 index 0000000..548c87c --- /dev/null +++ b/src/com/sleepycat/persist/ForwardCursor.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.io.Closeable; +import java.util.Iterator; + +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.EnvironmentFailureException ; // for javadoc +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationFailureException ; // for javadoc +/* */ + +/** + * Cursor operations limited to traversing forward. See {@link EntityCursor} + * for general information on cursors. + * + *

        {@code ForwardCursor} objects are not thread-safe. Cursors + * should be opened, used and closed by a single thread.

        + * + *

        WARNING: Cursors must always be closed to prevent resource leaks + * which could lead to the index becoming unusable or cause an + * OutOfMemoryError. To ensure that a cursor is closed in the + * face of exceptions, close it in a finally block.

        + * + * @author Mark Hayes + */ +public interface ForwardCursor extends Iterable + /* */ + , Closeable + /* */ + { + + /** + * Moves the cursor to the next value and returns it, or returns null + * if there are no more values in the cursor range. If the cursor is + * uninitialized, this method returns the first value. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the next value, or null if there are no more values in the + * cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V next() + throws DatabaseException; + + /** + * Moves the cursor to the next value and returns it, or returns null + * if there are no more values in the cursor range. If the cursor is + * uninitialized, this method returns the first value. + * + * @param lockMode the lock mode to use for this operation, or null to + * use {@link LockMode#DEFAULT}. + * + * @return the next value, or null if there are no more values in the + * cursor range. + * + * + * @throws OperationFailureException if one of the Read Operation + * Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + V next(LockMode lockMode) + throws DatabaseException; + + /** + * Returns an iterator over the key range, starting with the value + * following the current position or at the first value if the cursor is + * uninitialized. + * + *

        {@link LockMode#DEFAULT} is used implicitly.

        + * + * @return the iterator. + */ + Iterator iterator(); + + /** + * Returns an iterator over the key range, starting with the value + * following the current position or at the first value if the cursor is + * uninitialized. + * + * @param lockMode the lock mode to use for all operations performed + * using the iterator, or null to use {@link LockMode#DEFAULT}. + * + * @return the iterator. + */ + Iterator iterator(LockMode lockMode); + + /** + * Closes the cursor. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + void close() + throws DatabaseException; +} diff --git a/src/com/sleepycat/persist/IndexNotAvailableException.java b/src/com/sleepycat/persist/IndexNotAvailableException.java new file mode 100644 index 0000000..c07bba9 --- /dev/null +++ b/src/com/sleepycat/persist/IndexNotAvailableException.java @@ -0,0 +1,86 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown by the {@link EntityStore#getPrimaryIndex getPrimaryIndex}, {@link + * EntityStore#getSecondaryIndex getSecondaryIndex} and {@link + * EntityStore#getSubclassIndex getSubclassIndex} when an index has not yet + * been created. + * + * + * This exception can be thrown in two circumstances. + *
          + *
        1. It can be thrown in a replicated environment when the Replica has been + * upgraded to contain new persistent classes that define a new primary or + * secondary index, but the Master has not yet been upgraded. The index does + * not exist because the Master has not yet been upgraded with the new classes. + * If the application is aware of when the Master is upgraded, it can wait for + * that to occur and then open the index. Or, the application may repeatedly + * try to open the index until it becomes available.
        2. + *
        3. + * + *

          It can be thrown when opening an environment read-only with new + * persistent classes that define a new primary or secondary index. The index + * does not exist because the environment has not yet been opened read-write + * with the new classes. When the index is created by a read-write + * application, the read-only application must close and re-open the + * environment in order to open the new index.

          + * + *
        4. + *
        + * + * + * @author Mark Hayes + */ +public class IndexNotAvailableException extends OperationFailureException { + + private static final long serialVersionUID = 1L; + + /** + * For internal use only. + * + * @hidden + * + * + * @param message the message. + */ + public IndexNotAvailableException(String message) { + super(message); + } + + /* */ + + /** + * For internal use only. + * @hidden + */ + private IndexNotAvailableException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new IndexNotAvailableException(msg, this); + } + + /* */ +} diff --git a/src/com/sleepycat/persist/KeySelector.java b/src/com/sleepycat/persist/KeySelector.java new file mode 100644 index 0000000..bda8e77 --- /dev/null +++ b/src/com/sleepycat/persist/KeySelector.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +/** + * This is package-private to hide it until we implemented unsorted access. + * + * Implemented to select keys to be returned by an unsorted {@code + * ForwardCursor}. + * + *

        The reason for implementing a selector, rather than filtering the objects + * returned by the {@link ForwardCursor}, is to improve performance when not + * all keys are to be processed. Keys are passed to this interface without + * retrieving record data or locking, so it is less expensive to return false + * from this method than to retrieve the object from the cursor.

        + * + * see EntityIndex#unsortedKeys + * see EntityIndex#unsortedEntities + * + * @author Mark Hayes + */ +interface KeySelector { + + /** + * Returns whether a given key should be returned via the cursor. + * + *

        This method should not assume that the given key is for a committed + * record or not, nor should it assume that the key will be returned via + * the cursor if this method returns true. The record for this key will + * not be locked until this method returns. If, when the record is locked, + * the record is found to be uncommitted or deleted, the key will not be + * returned via the cursor.

        + */ + boolean selectKey(K key); +} diff --git a/src/com/sleepycat/persist/KeyValueAdapter.java b/src/com/sleepycat/persist/KeyValueAdapter.java new file mode 100644 index 0000000..22e1390 --- /dev/null +++ b/src/com/sleepycat/persist/KeyValueAdapter.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * A ValueAdapter where the "value" is the key (the primary key in a primary + * index or the secondary key in a secondary index). + * + * @author Mark Hayes + */ +class KeyValueAdapter implements ValueAdapter { + + private EntryBinding keyBinding; + + KeyValueAdapter(Class keyClass, EntryBinding keyBinding) { + this.keyBinding = keyBinding; + } + + public DatabaseEntry initKey() { + return new DatabaseEntry(); + } + + public DatabaseEntry initPKey() { + return null; + } + + public DatabaseEntry initData() { + return BasicIndex.NO_RETURN_ENTRY; + } + + public void clearEntries(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + key.setData(null); + } + + public V entryToValue(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + return (V) keyBinding.entryToObject(key); + } + + public void valueToData(V value, DatabaseEntry data) { + throw new UnsupportedOperationException + ("Cannot change the data in a key-only index"); + } +} diff --git a/src/com/sleepycat/persist/KeysIndex.java b/src/com/sleepycat/persist/KeysIndex.java new file mode 100644 index 0000000..9d77a70 --- /dev/null +++ b/src/com/sleepycat/persist/KeysIndex.java @@ -0,0 +1,138 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +/* */ +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.Transaction; + +/** + * The EntityIndex returned by SecondaryIndex.keysIndex(). This index maps + * secondary key to primary key. In Berkeley DB internal terms, this is a + * secondary database that is opened without associating it with a primary. + * + * @author Mark Hayes + */ +class KeysIndex extends BasicIndex { + + private EntryBinding pkeyBinding; + private SortedMap map; + + KeysIndex(Database db, + Class keyClass, + EntryBinding keyBinding, + Class pkeyClass, + EntryBinding pkeyBinding) + throws DatabaseException { + + super(db, keyClass, keyBinding, + new DataValueAdapter(pkeyClass, pkeyBinding)); + this.pkeyBinding = pkeyBinding; + } + + /* + * Of the EntityIndex methods only get()/map()/sortedMap() are implemented + * here. All other methods are implemented by BasicIndex. + */ + + public PK get(SK key) + throws DatabaseException { + + return get(null, key, null); + } + + public PK get(Transaction txn, SK key, LockMode lockMode) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + EntityResult result = get( + txn, key, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + return result != null ? result.value() : null; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry pkeyEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationStatus status = db.get(txn, keyEntry, pkeyEntry, lockMode); + + if (status == OperationStatus.SUCCESS) { + return (PK) pkeyBinding.entryToObject(pkeyEntry); + } else { + return null; + } + } + + /* */ + public EntityResult get(Transaction txn, + SK key, + Get getType, + ReadOptions options) + throws DatabaseException { + + checkGetType(getType); + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry pkeyEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationResult result = db.get( + txn, keyEntry, pkeyEntry, getType, options); + + if (result != null) { + return new EntityResult<>( + (PK) pkeyBinding.entryToObject(pkeyEntry), + result); + } else { + return null; + } + } + /* */ + + public Map map() { + return sortedMap(); + } + + public synchronized SortedMap sortedMap() { + if (map == null) { + map = new StoredSortedMap(db, keyBinding, pkeyBinding, false); + } + return map; + } + + boolean isUpdateAllowed() { + return false; + } +} diff --git a/src/com/sleepycat/persist/PrimaryIndex.java b/src/com/sleepycat/persist/PrimaryIndex.java new file mode 100644 index 0000000..00c7c9c --- /dev/null +++ b/src/com/sleepycat/persist/PrimaryIndex.java @@ -0,0 +1,752 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DbInternal; +/* */ +import com.sleepycat.je.Environment; +/* */ +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +/* */ +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +/* */ +import com.sleepycat.je.WriteOptions; +/* */ +import com.sleepycat.persist.impl.PersistEntityBinding; +import com.sleepycat.persist.impl.PersistKeyAssigner; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; + +/** + * The primary index for an entity class and its primary key. + * + *

        {@code PrimaryIndex} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code PrimaryIndex} object.

        + * + *

        {@code PrimaryIndex} implements {@link EntityIndex} to map the primary + * key type (PK) to the entity type (E).

        + * + *

        The {@link Entity} annotation may be used to define an entity class and + * the {@link PrimaryKey} annotation may be used to define a primary key as + * shown in the following example.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     String name;
        + *
        + *     Employee(long id, String name) {
        + *         this.id = id;
        + *         this.name = name;
        + *     }
        + *
        + *     private Employee() {} // For bindings
        + * }
        + * + *

        To obtain the {@code PrimaryIndex} for a given entity class, call {@link + * EntityStore#getPrimaryIndex EntityStore.getPrimaryIndex}, passing the + * primary key class and the entity class. For example:

        + * + *
        + * EntityStore store = new EntityStore(...);
        + *
        + * {@code PrimaryIndex} primaryIndex =
        + *     store.getPrimaryIndex(Long.class, Employee.class);
        + * + *

        Note that {@code Long.class} is passed as the primary key class, but the + * primary key field has the primitive type {@code long}. When a primitive + * primary key field is used, the corresponding primitive wrapper class is used + * to access the primary index. For more information on key field types, see + * {@link PrimaryKey}.

        + * + *

        The {@code PrimaryIndex} provides the primary storage and access methods + * for the instances of a particular entity class. Entities are inserted and + * updated in the {@code PrimaryIndex} by calling a method in the family of + * {@link #put} methods. The {@link #put} method will insert the entity if no + * entity with the same primary key already exists. If an entity with the same + * primary key does exist, it will update the entity and return the existing + * (old) entity. For example:

        + * + *
        + * Employee oldEntity;
        + * oldEntity = primaryIndex.put(new Employee(1, "Jane Smith"));    // Inserts an entity
        + * assert oldEntity == null;
        + * oldEntity = primaryIndex.put(new Employee(2, "Joan Smith"));    // Inserts an entity
        + * assert oldEntity == null;
        + * oldEntity = primaryIndex.put(new Employee(2, "Joan M. Smith")); // Updates an entity
        + * assert oldEntity != null;
        + * + *

        The {@link #putNoReturn} method can be used to avoid the overhead of + * returning the existing entity, when the existing entity is not important to + * the application. The return type of {@link #putNoReturn} is void. For + * example:

        + * + *
        + * primaryIndex.putNoReturn(new Employee(1, "Jane Smith"));    // Inserts an entity
        + * primaryIndex.putNoReturn(new Employee(2, "Joan Smith"));    // Inserts an entity
        + * primaryIndex.putNoReturn(new Employee(2, "Joan M. Smith")); // Updates an entity
        + * + *

        The {@link #putNoOverwrite} method can be used to ensure that an existing + * entity is not overwritten. {@link #putNoOverwrite} returns true if the + * entity was inserted, or false if an existing entity exists and no action was + * taken. For example:

        + * + *
        + * boolean inserted;
        + * inserted = primaryIndex.putNoOverwrite(new Employee(1, "Jane Smith"));    // Inserts an entity
        + * assert inserted;
        + * inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan Smith"));    // Inserts an entity
        + * assert inserted;
        + * inserted = primaryIndex.putNoOverwrite(new Employee(2, "Joan M. Smith")); // No action was taken!
        + * assert !inserted;
        + * + *

        Primary key values must be unique, in other words, each instance of a + * given entity class must have a distinct primary key value. Rather than + * assigning the unique primary key values yourself, a sequence can be + * used to assign sequential integer values automatically, starting with the + * value 1 (one). A sequence is defined using the {@link PrimaryKey#sequence} + * annotation property. For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey(sequence="ID")}
        + *     long id;
        + *
        + *     String name;
        + *
        + *     Employee(String name) {
        + *         this.name = name;
        + *     }
        + *
        + *     private Employee() {} // For bindings
        + * }
        + * + *

        The name of the sequence used above is "ID". Any name can be used. If + * the same sequence name is used in more than one entity class, the sequence + * will be shared by those classes, in other words, a single sequence of + * integers will be used for all instances of those classes. See {@link + * PrimaryKey#sequence} for more information.

        + * + *

        Any method in the family of {@link #put} methods may be used to insert + * entities where the primary key is assigned from a sequence. When the {@link + * #put} method returns, the primary key field of the entity object will be set + * to the assigned key value. For example:

        + * + *
        + * Employee employee;
        + * employee = new Employee("Jane Smith");
        + * primaryIndex.putNoReturn(employee);    // Inserts an entity
        + * assert employee.id == 1;
        + * employee = new Employee("Joan Smith");
        + * primaryIndex.putNoReturn(employee);    // Inserts an entity
        + * assert employee.id == 2;
        + * + *

        This begs the question: How do you update an existing entity, without + * assigning a new primary key? The answer is that the {@link #put} methods + * will only assign a new key from the sequence if the primary key field is + * zero or null (for reference types). If an entity with a non-zero and + * non-null key field is passed to a {@link #put} method, any existing entity + * with that primary key value will be updated. For example:

        + * + *
        + * Employee employee;
        + * employee = new Employee("Jane Smith");
        + * primaryIndex.putNoReturn(employee);    // Inserts an entity
        + * assert employee.id == 1;
        + * employee = new Employee("Joan Smith");
        + * primaryIndex.putNoReturn(employee);    // Inserts an entity
        + * assert employee.id == 2;
        + * employee.name = "Joan M. Smith";
        + * primaryIndex.putNoReturn(employee);    // Updates an existing entity
        + * assert employee.id == 2;
        + * + *

        Since {@code PrimaryIndex} implements the {@link EntityIndex} interface, + * it shares the common index methods for retrieving and deleting entities, + * opening cursors and using transactions. See {@link EntityIndex} for more + * information on these topics.

        + * + *

        Note that when using an index, keys and values are stored and retrieved + * by value not by reference. In other words, if an entity object is stored + * and then retrieved, or retrieved twice, each object will be a separate + * instance. For example, in the code below the assertion will always + * fail.

        + *
        + * MyKey key = ...;
        + * MyEntity entity1 = new MyEntity(key, ...);
        + * index.put(entity1);
        + * MyEntity entity2 = index.get(key);
        + * assert entity1 == entity2; // always fails!
        + * 
        + * + * @author Mark Hayes + */ +public class PrimaryIndex extends BasicIndex { + + private Class entityClass; + private EntityBinding entityBinding; + private SortedMap map; + private PersistKeyAssigner keyAssigner; + + /** + * Creates a primary index without using an EntityStore. + * + *

        This constructor is not normally needed and is provided for + * applications that wish to use custom bindings along with the Direct + * Persistence Layer. Normally, {@link EntityStore#getPrimaryIndex + * getPrimaryIndex} is used instead.

        + * + *

        Note that when this constructor is used directly, primary keys cannot + * be automatically assigned from a sequence. The key assignment feature + * requires knowledge of the primary key field, which is only available if + * an EntityStore is used. Of course, primary keys may be + * assigned from a sequence manually before calling the put + * methods in this class.

        + * + * @param database the primary database. + * + * @param keyClass the class of the primary key. + * + * @param keyBinding the binding to be used for primary keys. + * + * @param entityClass the class of the entities stored in this index. + * + * @param entityBinding the binding to be used for entities. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public PrimaryIndex(Database database, + Class keyClass, + EntryBinding keyBinding, + Class entityClass, + EntityBinding entityBinding) + throws DatabaseException { + + super(database, keyClass, keyBinding, + new EntityValueAdapter(entityClass, entityBinding, false)); + + this.entityClass = entityClass; + this.entityBinding = entityBinding; + + if (entityBinding instanceof PersistEntityBinding) { + keyAssigner = + ((PersistEntityBinding) entityBinding).getKeyAssigner(); + } + } + + /** + * Returns the primary key class for this index. + * + * @return the key class. + */ + public Class getKeyClass() { + return keyClass; + } + + /** + * Returns the primary key binding for this index. + * + * @return the key binding. + */ + public EntryBinding getKeyBinding() { + return keyBinding; + } + + /** + * Returns the entity class for this index. + * + * @return the entity class. + */ + public Class getEntityClass() { + return entityClass; + } + + /** + * Returns the entity binding for this index. + * + * @return the entity binding. + */ + public EntityBinding getEntityBinding() { + return entityBinding; + } + + /** + * Inserts an entity and returns null, or updates it if the primary key + * already exists and returns the existing entity. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + *

        Auto-commit is used implicitly if the store is transactional.

        + * + * @param entity the entity to be inserted or updated. + * + * @return the existing entity that was updated, or null if the entity was + * inserted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public E put(E entity) + throws DatabaseException { + + return put(null, entity); + } + + /** + * Inserts an entity and returns null, or updates it if the primary key + * already exists and returns the existing entity. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param entity the entity to be inserted or updated. + * + * @return the existing entity that was updated, or null if the entity was + * inserted. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public E put(Transaction txn, E entity) + throws DatabaseException { + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + assignKey(entity, keyEntry); + + boolean autoCommit = false; + Environment env = db.getEnvironment(); + if (transactional && + txn == null && + DbCompat.getThreadTransaction(env) == null) { + txn = env.beginTransaction(null, getAutoCommitTransactionConfig()); + autoCommit = true; + } + + CursorConfig cursorConfig = null; + if (concurrentDB) { + cursorConfig = new CursorConfig(); + DbCompat.setWriteCursor(cursorConfig, true); + } + boolean failed = true; + Cursor cursor = db.openCursor(txn, cursorConfig); + LockMode lockMode = locking ? LockMode.RMW : null; + try { + while (true) { + OperationStatus status = + cursor.getSearchKey(keyEntry, dataEntry, lockMode); + if (status == OperationStatus.SUCCESS) { + E existing = + entityBinding.entryToObject(keyEntry, dataEntry); + entityBinding.objectToData(entity, dataEntry); + cursor.put(keyEntry, dataEntry); + failed = false; + return existing; + } else { + entityBinding.objectToData(entity, dataEntry); + status = cursor.putNoOverwrite(keyEntry, dataEntry); + if (status != OperationStatus.KEYEXIST) { + failed = false; + return null; + } + } + } + } finally { + cursor.close(); + if (autoCommit) { + if (failed) { + txn.abort(); + } else { + txn.commit(); + } + } + } + } + + /** + * Inserts an entity, or updates it if the primary key already exists (does + * not return the existing entity). This method may be used instead of + * {@link #put(Object)} to save the overhead of returning the existing + * entity. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + *

        Auto-commit is used implicitly if the store is transactional.

        + * + * @param entity the entity to be inserted or updated. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void putNoReturn(E entity) + throws DatabaseException { + + putNoReturn(null, entity); + } + + /** + * Inserts an entity, or updates it if the primary key already exists (does + * not return the existing entity). This method may be used instead of + * {@link #put(Transaction,Object)} to save the overhead of returning the + * existing entity. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param entity the entity to be inserted or updated. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void putNoReturn(Transaction txn, E entity) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + put(txn, entity, Put.OVERWRITE, null); + return; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + assignKey(entity, keyEntry); + entityBinding.objectToData(entity, dataEntry); + + db.put(txn, keyEntry, dataEntry); + } + + /** + * Inserts an entity and returns true, or returns false if the primary key + * already exists. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + *

        Auto-commit is used implicitly if the store is transactional.

        + * + * @param entity the entity to be inserted. + * + * @return true if the entity was inserted, or false if an entity with the + * same primary key is already present. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public boolean putNoOverwrite(E entity) + throws DatabaseException { + + return putNoOverwrite(null, entity); + } + + /** + * Inserts an entity and returns true, or returns false if the primary key + * already exists. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param entity the entity to be inserted. + * + * @return true if the entity was inserted, or false if an entity with the + * same primary key is already present. + * + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public boolean putNoOverwrite(Transaction txn, E entity) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + return put(txn, entity, Put.NO_OVERWRITE, null) != null; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + assignKey(entity, keyEntry); + entityBinding.objectToData(entity, dataEntry); + + OperationStatus status = db.putNoOverwrite(txn, keyEntry, dataEntry); + + return (status == OperationStatus.SUCCESS); + } + + /* */ + /** + * Inserts or updates an entity, using Put type and WriteOptions + * parameters, and returning an OperationResult. + * + *

        If a {@link PrimaryKey#sequence} is used and the primary key field of + * the given entity is null or zero, this method will assign the next value + * from the sequence to the primary key field of the given entity.

        + * + * @param txn the transaction used to protect this operation, null to use + * auto-commit, or null if the store is non-transactional. + * + * @param entity the entity to be inserted. + * + * @param putType is {@link Put#OVERWRITE} or {@link Put#NO_OVERWRITE}. + * + * @param options the WriteOptions, or null to use default options. + * + * @return the OperationResult if the record is written, else null. If + * {@code Put.NO_OVERWRITE} is used, null is returned if an entity with the + * same primary key is already present. If {@code Put.OVERWRITE} is used, + * null is never returned. + * + * @throws OperationFailureException if one of the Write + * Operation Failures occurs. + * + * @throws EnvironmentFailureException if an unexpected, internal or + * environment-wide failure occurs. + * + * @throws DatabaseException the base class for all BDB exceptions. + * + * @since 7.0 + */ + public OperationResult put(Transaction txn, + E entity, + Put putType, + WriteOptions options) { + + if (putType != Put.OVERWRITE && putType != Put.NO_OVERWRITE) { + throw new IllegalArgumentException( + "putType not allowed: " + putType); + } + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + assignKey(entity, keyEntry); + entityBinding.objectToData(entity, dataEntry); + + return db.put(txn, keyEntry, dataEntry, putType, options); + } + /* */ + + /** + * If we are assigning primary keys from a sequence, assign the next key + * and set the primary key field. + */ + private void assignKey(E entity, DatabaseEntry keyEntry) + throws DatabaseException { + + if (keyAssigner != null) { + if (!keyAssigner.assignPrimaryKey(entity, keyEntry)) { + entityBinding.objectToKey(entity, keyEntry); + } + } else { + entityBinding.objectToKey(entity, keyEntry); + } + } + + /* + * Of the EntityIndex methods only get()/map()/sortedMap() are implemented + * here. All other methods are implemented by BasicIndex. + */ + + public E get(PK key) + throws DatabaseException { + + return get(null, key, null); + } + + public E get(Transaction txn, PK key, LockMode lockMode) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + EntityResult result = get( + txn, key, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + return result != null ? result.value() : null; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationStatus status = db.get(txn, keyEntry, dataEntry, lockMode); + + if (status == OperationStatus.SUCCESS) { + return makeEntity(key, keyEntry, dataEntry); + } else { + return null; + } + } + + /* */ + public EntityResult get(Transaction txn, + PK key, + Get getType, + ReadOptions options) + throws DatabaseException { + + checkGetType(getType); + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationResult result = db.get( + txn, keyEntry, dataEntry, getType, options); + + if (result != null) { + return new EntityResult<>( + makeEntity(key, keyEntry, dataEntry), + result); + } else { + return null; + } + } + /* */ + + private E makeEntity(PK key, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry) { + + return (entityBinding instanceof PersistEntityBinding) ? + (E)((PersistEntityBinding) entityBinding). + entryToObjectWithPriKey(key, dataEntry) : + entityBinding.entryToObject(keyEntry, dataEntry); + } + + public Map map() { + return sortedMap(); + } + + public synchronized SortedMap sortedMap() { + if (map == null) { + map = new StoredSortedMap(db, keyBinding, entityBinding, true); + } + return map; + } + + /** + * + * @hidden + * + * For internal use only. + * + * Used for obtaining the auto-commit txn config from the store, which + * overrides this method to return it. + */ + /* */ + protected + /* */ + TransactionConfig getAutoCommitTransactionConfig() { + return null; + } + + boolean isUpdateAllowed() { + return true; + } +} diff --git a/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java b/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java new file mode 100644 index 0000000..846bdb9 --- /dev/null +++ b/src/com/sleepycat/persist/PrimaryKeyValueAdapter.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * A ValueAdapter where the "value" is the primary key. + * + * @author Mark Hayes + */ +class PrimaryKeyValueAdapter implements ValueAdapter { + + private EntryBinding keyBinding; + + PrimaryKeyValueAdapter(Class keyClass, EntryBinding keyBinding) { + this.keyBinding = keyBinding; + } + + public DatabaseEntry initKey() { + return new DatabaseEntry(); + } + + public DatabaseEntry initPKey() { + return new DatabaseEntry(); + } + + public DatabaseEntry initData() { + return BasicIndex.NO_RETURN_ENTRY; + } + + public void clearEntries(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + key.setData(null); + pkey.setData(null); + } + + public V entryToValue(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data) { + return (V) keyBinding.entryToObject(pkey); + } + + public void valueToData(V value, DatabaseEntry data) { + throw new UnsupportedOperationException + ("Cannot change the data in a key-only index"); + } +} diff --git a/src/com/sleepycat/persist/SecondaryIndex.java b/src/com/sleepycat/persist/SecondaryIndex.java new file mode 100644 index 0000000..cbe142a --- /dev/null +++ b/src/com/sleepycat/persist/SecondaryIndex.java @@ -0,0 +1,1051 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +/* */ +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.persist.model.DeleteAction; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * The secondary index for an entity class and a secondary key. + * + *

        {@code SecondaryIndex} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code SecondaryIndex} object.

        + * + *

        {@code SecondaryIndex} implements {@link EntityIndex} to map the + * secondary key type (SK) to the entity type (E). In other words, entities + * are accessed by secondary key values.

        + * + *

        The {@link SecondaryKey} annotation may be used to define a secondary key + * as shown in the following example.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + * + *

        Before obtaining a {@code SecondaryIndex}, the {@link PrimaryIndex} must + * be obtained for the entity class. To obtain the {@code SecondaryIndex} call + * {@link EntityStore#getSecondaryIndex EntityStore.getSecondaryIndex}, passing + * the primary index, the secondary key class and the secondary key name. For + * example:

        + * + *
        + * EntityStore store = new EntityStore(...);
        + *
        + * {@code PrimaryIndex} primaryIndex =
        + *     store.getPrimaryIndex(Long.class, Employee.class);
        + *
        + * {@code SecondaryIndex} secondaryIndex =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "department");
        + * + *

        Since {@code SecondaryIndex} implements the {@link EntityIndex} + * interface, it shares the common index methods for retrieving and deleting + * entities, opening cursors and using transactions. See {@link EntityIndex} + * for more information on these topics.

        + * + *

        {@code SecondaryIndex} does not provide methods for inserting + * and updating entities. That must be done using the {@link + * PrimaryIndex}.

        + * + *

        Note that a {@code SecondaryIndex} has three type parameters {@code } or in the example {@code } while a {@link + * PrimaryIndex} has only two type parameters {@code } or {@code }. This is because a {@code SecondaryIndex} has an extra level of + * mapping: It maps from secondary key to primary key, and then from primary + * key to entity. For example, consider this entity:

        + * + *
        + * + * + *
        IDDepartmentName
        1EngineeringJane Smith
        + * + *

        The {@link PrimaryIndex} maps from id directly to the entity, or from + * primary key 1 to the "Jane Smith" entity in the example. The {@code + * SecondaryIndex} maps from department to id, or from secondary key + * "Engineering" to primary key 1 in the example, and then uses the {@code + * PrimaryIndex} to map from the primary key to the entity.

        + * + *

        Because of this extra type parameter and extra level of mapping, a {@code + * SecondaryIndex} can provide more than one mapping, or view, of the entities + * in the primary index. The main mapping of a {@code SecondaryIndex} is to + * map from secondary key (SK) to entity (E), or in the example, from the + * String department key to the Employee entity. The {@code SecondaryIndex} + * itself, by implementing {@code EntityIndex}, provides this + * mapping.

        + * + *

        The second mapping provided by {@code SecondaryIndex} is from secondary + * key (SK) to primary key (PK), or in the example, from the String department + * key to the Long id key. The {@link #keysIndex} method provides this + * mapping. When accessing the keys index, the primary key is returned rather + * than the entity. When only the primary key is needed and not the entire + * entity, using the keys index is less expensive than using the secondary + * index because the primary index does not have to be accessed.

        + * + *

        The third mapping provided by {@code SecondaryIndex} is from primary key + * (PK) to entity (E), for the subset of entities having a given secondary key + * (SK). This mapping is provided by the {@link #subIndex} method. A + * sub-index is convenient when you are interested in working with the subset + * of entities having a particular secondary key value, for example, all + * employees in a given department.

        + * + *

        All three mappings, along with the mapping provided by the {@link + * PrimaryIndex}, are shown using example data in the {@link EntityIndex} + * interface documentation. See {@link EntityIndex} for more information.

        + * + *

        Note that when using an index, keys and values are stored and retrieved + * by value not by reference. In other words, if an entity object is stored + * and then retrieved, or retrieved twice, each object will be a separate + * instance. For example, in the code below the assertion will always + * fail.

        + *
        + * MyKey key = ...;
        + * MyEntity entity1 = index.get(key);
        + * MyEntity entity2 = index.get(key);
        + * assert entity1 == entity2; // always fails!
        + * 
        + * + *

        One-to-One Relationships

        + * + *

        A {@link Relationship#ONE_TO_ONE ONE_TO_ONE} relationship, although less + * common than other types of relationships, is the simplest type of + * relationship. A single entity is related to a single secondary key value. + * For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=ONE_TO_ONE)}
        + *     String ssn;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@code SecondaryIndex} employeeBySsn =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "ssn");
        + * + *

        With a {@link Relationship#ONE_TO_ONE ONE_TO_ONE} relationship, the + * secondary key must be unique; in other words, no two entities may have the + * same secondary key value. If an attempt is made to store an entity having + * the same secondary key value as another existing entity, a {@link + * DatabaseException} will be thrown.

        + * + *

        Because the secondary key is unique, it is useful to lookup entities by + * secondary key using {@link EntityIndex#get}. For example:

        + * + *
        + * Employee employee = employeeBySsn.get(mySsn);
        + * + *

        Many-to-One Relationships

        + * + *

        A {@link Relationship#MANY_TO_ONE MANY_TO_ONE} relationship is the most + * common type of relationship. One or more entities is related to a single + * secondary key value. For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@code SecondaryIndex} employeeByDepartment =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "department");
        + * + *

        With a {@link Relationship#MANY_TO_ONE MANY_TO_ONE} relationship, the + * secondary key is not required to be unique; in other words, more than one + * entity may have the same secondary key value. In this example, more than + * one employee may belong to the same department.

        + * + *

        The most convenient way to access the employees in a given department is + * by using a sub-index. For example:

        + * + *
        + * {@code EntityIndex} subIndex = employeeByDepartment.subIndex(myDept);
        + * {@code EntityCursor} cursor = subIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        One-to-Many Relationships

        + * + *

        In a {@link Relationship#ONE_TO_MANY ONE_TO_MANY} relationship, a single + * entity is related to one or more secondary key values. For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=ONE_TO_MANY)}
        + *     {@literal Set emailAddresses = new HashSet;}
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@code SecondaryIndex} employeeByEmail =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "emailAddresses");
        + * + *

        With a {@link Relationship#ONE_TO_MANY ONE_TO_MANY} relationship, the + * secondary key must be unique; in other words, no two entities may have the + * same secondary key value. In this example, no two employees may have the + * same email address. If an attempt is made to store an entity having the + * same secondary key value as another existing entity, a {@link + * DatabaseException} will be thrown.

        + * + *

        Because the secondary key is unique, it is useful to lookup entities by + * secondary key using {@link EntityIndex#get}. For example:

        + * + *
        + * Employee employee = employeeByEmail.get(myEmailAddress);
        + * + *

        The secondary key field for a {@link Relationship#ONE_TO_MANY + * ONE_TO_MANY} relationship must be an array or collection type. To access + * the email addresses of an employee, simply access the collection field + * directly. For example:

        + * + *
        + * Employee employee = primaryIndex.get(1); // Get the entity by primary key
        + * employee.emailAddresses.add(myNewEmail); // Add an email address
        + * primaryIndex.putNoReturn(1, employee);   // Update the entity
        + * + *

        Many-to-Many Relationships

        + * + *

        In a {@link Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, one + * or more entities is related to one or more secondary key values. For + * example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_MANY)}
        + *     {@literal Set organizations = new HashSet;}
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@code SecondaryIndex} employeeByOrganization =
        + *     store.getSecondaryIndex(primaryIndex, String.class, "organizations");
        + * + *

        With a {@link Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, the + * secondary key is not required to be unique; in other words, more than one + * entity may have the same secondary key value. In this example, more than + * one employee may belong to the same organization.

        + * + *

        The most convenient way to access the employees in a given organization + * is by using a sub-index. For example:

        + * + *
        + * {@code EntityIndex} subIndex = employeeByOrganization.subIndex(myOrg);
        + * {@code EntityCursor} cursor = subIndex.entities();
        + * try {
        + *     for (Employee entity : cursor) {
        + *         // Do something with the entity...
        + *     }
        + * } finally {
        + *     cursor.close();
        + * }
        + * + *

        The secondary key field for a {@link Relationship#MANY_TO_MANY + * MANY_TO_MANY} relationship must be an array or collection type. To access + * the organizations of an employee, simply access the collection field + * directly. For example:

        + * + *
        + * Employee employee = primaryIndex.get(1); // Get the entity by primary key
        + * employee.organizations.remove(myOldOrg); // Remove an organization
        + * primaryIndex.putNoReturn(1, employee);   // Update the entity
        + * + *

        Foreign Key Constraints for Related Entities

        + * + *

        In all the examples above the secondary key is treated only as a simple + * value, such as a {@code String} department field. In many cases, that is + * sufficient. But in other cases, you may wish to constrain the secondary + * keys of one entity class to be valid primary keys of another entity + * class. For example, a Department entity may also be defined:

        + * + *
        + * {@literal @Entity}
        + * class Department {
        + *
        + *     {@literal @PrimaryKey}
        + *     String name;
        + *
        + *     String missionStatement;
        + *
        + *     private Department() {}
        + * }
        + * + *

        You may wish to constrain the department field values of the Employee + * class in the examples above to be valid primary keys of the Department + * entity class. In other words, you may wish to ensure that the department + * field of an Employee will always refer to a valid Department entity.

        + * + *

        You can implement this constraint yourself by validating the department + * field before you store an Employee. For example:

        + * + *
        + * {@code PrimaryIndex} departmentIndex =
        + *     store.getPrimaryIndex(String.class, Department.class);
        + *
        + * void storeEmployee(Employee employee) throws DatabaseException {
        + *     if (departmentIndex.contains(employee.department)) {
        + *         primaryIndex.putNoReturn(employee);
        + *     } else {
        + *         throw new IllegalArgumentException("Department does not exist: " +
        + *                                            employee.department);
        + *     }
        + * }
        + * + *

        Or, instead you could define the Employee department field as a foreign + * key, and this validation will be done for you when you attempt to store the + * Employee entity. For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + * + *

        The {@code relatedEntity=Department.class} above defines the department + * field as a foreign key that refers to a Department entity. Whenever a + * Employee entity is stored, its department field value will be checked to + * ensure that a Department entity exists with that value as its primary key. + * If no such Department entity exists, then a {@link DatabaseException} is + * thrown, causing the transaction to be aborted (assuming that transactions + * are used).

        + * + *

        This begs the question: What happens when a Department entity is deleted + * while one or more Employee entities have department fields that refer to + * the deleted department's primary key? If the department were allowed to be + * deleted, the foreign key constraint for the Employee department field would + * be violated, because the Employee department field would refer to a + * department that does not exist.

        + * + *

        By default, when this situation arises the system does not allow the + * department to be deleted. Instead, a {@link DatabaseException} is thrown, + * causing the transaction to be aborted. In this case, in order to delete a + * department, the department field of all Employee entities must first be + * updated to refer to a different existing department, or set to null. This + * is the responsibility of the application.

        + * + *

        There are two additional ways of handling deletion of a Department + * entity. These alternatives are configured using the {@link + * SecondaryKey#onRelatedEntityDelete} annotation property. Setting this + * property to {@link DeleteAction#NULLIFY} causes the Employee department + * field to be automatically set to null when the department they refer to is + * deleted. This may or may not be desirable, depending on application + * policies. For example:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@code @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class,
        + *                                       onRelatedEntityDelete=NULLIFY)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + * + *

        The {@link DeleteAction#CASCADE} value, on the other hand, causes the + * Employee entities to be automatically deleted when the department they refer + * to is deleted. This is probably not desirable in this particular example, + * but is useful for parent-child relationships. For example:

        + * + *
        + * {@literal @Entity}
        + * class Order {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     String description;
        + *
        + *     private Order() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class OrderItem {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@code @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Order.class,
        + *                                       onRelatedEntityDelete=CASCADE)}
        + *     long orderId;
        + *
        + *     String description;
        + *
        + *     private OrderItem() {}
        + * }
        + * + *

        The OrderItem orderId field refers to its "parent" Order entity. When an + * Order entity is deleted, it may be useful to automatically delete its + * "child" OrderItem entities.

        + * + *

        For more information, see {@link SecondaryKey#onRelatedEntityDelete}.

        + * + *

        One-to-Many versus Many-to-One for Related Entities

        + * + *

        When there is a conceptual Many-to-One relationship such as Employee to + * Department as illustrated in the examples above, the relationship may be + * implemented either as Many-to-One in the Employee class or as One-to-Many in + * the Department class.

        + * + *

        Here is the Many-to-One approach.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Department.class)}
        + *     String department;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class Department {
        + *
        + *     {@literal @PrimaryKey}
        + *     String name;
        + *
        + *     String missionStatement;
        + *
        + *     private Department() {}
        + * }
        + * + *

        And here is the One-to-Many approach.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class Department {
        + *
        + *     {@literal @PrimaryKey}
        + *     String name;
        + *
        + *     String missionStatement;
        + *
        + *     {@literal @SecondaryKey(relate=ONE_TO_MANY, relatedEntity=Employee.class)}
        + *     {@literal Set employees = new HashSet;}
        + *
        + *     private Department() {}
        + * }
        + * + *

        Which approach is best? The Many-to-One approach better handles large + * number of entities on the to-Many side of the relationship because it + * doesn't store a collection of keys as an entity field. With Many-to-One a + * Btree is used to store the collection of keys and the Btree can easily + * handle very large numbers of keys. With One-to-Many, each time a related + * key is added or removed the entity on the One side of the relationship, + * along with the complete collection of related keys, must be updated. + * Therefore, if large numbers of keys may be stored per relationship, + * Many-to-One is recommended.

        + * + *

        If the number of entities per relationship is not a concern, then you may + * wish to choose the approach that is most natural in your application data + * model. For example, if you think of a Department as containing employees + * and you wish to modify the Department object each time an employee is added + * or removed, then you may wish to store a collection of Employee keys in the + * Department object (One-to-Many).

        + * + *

        Note that if you have a One-to-Many relationship and there is no related + * entity, then you don't have a choice -- you have to use One-to-Many because + * there is no entity on the to-Many side of the relationship where a + * Many-to-One key could be defined. An example is the Employee to email + * addresses relationship discussed above:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=ONE_TO_MANY)}
        + *     {@literal Set emailAddresses = new HashSet;}
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + * + *

        For sake of argument imagine that each employee has thousands of email + * addresses and employees frequently add and remove email addresses. To + * avoid the potential performance problems associated with updating the + * Employee entity every time an email address is added or removed, you could + * create an EmployeeEmailAddress entity and use a Many-to-One relationship as + * shown below:

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class EmployeeEmailAddress {
        + *
        + *     {@literal @PrimaryKey}
        + *     String emailAddress;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Employee.class)}
        + *     long employeeId;
        + *
        + *     private EmployeeEmailAddress() {}
        + * }
        + * + *

        Key Placement with Many-to-Many for Related Entities

        + * + *

        As discussed in the section above, one drawback of a to-Many relationship + * (One-to-Many was discussed above and Many-to-Many is discussed here) is that + * it requires storing a collection of keys in an entity. Each time a key is + * added or removed, the containing entity must be updated. This has potential + * performance problems when there are large numbers of entities on the to-Many + * side of the relationship, in other words, when there are large numbers of + * keys in each secondary key field collection.

        + * + *

        If you have a Many-to-Many relationship with a reasonably small number of + * entities on one side of the relationship and a large number of entities on + * the other side, you can avoid the potential performance problems by defining + * the secondary key field on the side with a small number of entities.

        + * + *

        For example, in an Employee-to-Organization relationship, the number of + * organizations per employee will normally be reasonably small but the number + * of employees per organization may be very large. Therefore, to avoid + * potential performance problems, the secondary key field should be defined in + * the Employee class as shown below.

        + * + *
        + * {@literal @Entity}
        + * class Employee {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Organization.class)}
        + *     {@literal Set organizations = new HashSet;}
        + *
        + *     String name;
        + *
        + *     private Employee() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class Organization {
        + *
        + *     {@literal @PrimaryKey}
        + *     String name;
        + *
        + *     String description;
        + * }
        + * + *

        If instead a {@code Set members} key had been defined in the + * Organization class, this set could potentially have a large number of + * elements and performance problems could result.

        + * + *

        Many-to-Many Versus a Relationship Entity

        + * + *

        If you have a Many-to-Many relationship with a large number of entities + * on both sides of the relationship, you can avoid the potential + * performance problems by using a relationship entity. A + * relationship entity defines the relationship between two other entities + * using two Many-to-One relationships.

        + * + *

        Imagine a relationship between cars and trucks indicating whenever a + * particular truck was passed on the road by a particular car. A given car + * may pass a large number of trucks and a given truck may be passed by a large + * number of cars. First look at a Many-to-Many relationship between these two + * entities:

        + * + *
        + * {@literal @Entity}
        + * class Car {
        + *
        + *     {@literal @PrimaryKey}
        + *     String licenseNumber;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Truck.class)}
        + *     {@literal Set trucksPassed = new HashSet;}
        + *
        + *     String color;
        + *
        + *     private Car() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class Truck {
        + *
        + *     {@literal @PrimaryKey}
        + *     String licenseNumber;
        + *
        + *     int tons;
        + *
        + *     private Truck() {}
        + * }
        + * + *

        With the Many-to-Many approach above, the {@code trucksPassed} set could + * potentially have a large number of elements and performance problems could + * result.

        + * + *

        To apply the relationship entity approach we define a new entity class + * named CarPassedTruck representing a single truck passed by a single car. We + * remove the secondary key from the Car class and use two secondary keys in + * the CarPassedTruck class instead.

        + * + *
        + * {@literal @Entity}
        + * class Car {
        + *
        + *     {@literal @PrimaryKey}
        + *     String licenseNumber;
        + *
        + *     String color;
        + *
        + *     private Car() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class Truck {
        + *
        + *     {@literal @PrimaryKey}
        + *     String licenseNumber;
        + *
        + *     int tons;
        + *
        + *     private Truck() {}
        + * }
        + *
        + * {@literal @Entity}
        + * class CarPassedTruck {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)}
        + *     String carLicense;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)}
        + *     String truckLicense;
        + *
        + *     private CarPassedTruck() {}
        + * }
        + * + *

        The CarPassedTruck entity can be used to access the relationship by car + * license or by truck license.

        + * + *

        You may use the relationship entity approach because of the potential + * performance problems mentioned above. Or, you may choose to use this + * approach in order to store other information about the relationship. For + * example, if for each car that passes a truck you wish to record how much + * faster the car was going than the truck, then a relationship entity is the + * logical place to store that property. In the example below the + * speedDifference property is added to the CarPassedTruck class.

        + * + *
        + * {@literal @Entity}
        + * class CarPassedTruck {
        + *
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Car.class)}
        + *     String carLicense;
        + *
        + *     {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Truck.class)}
        + *     String truckLicense;
        + *
        + *     int speedDifference;
        + *
        + *     private CarPassedTruck() {}
        + * }
        + * + *

        Be aware that the relationship entity approach adds overhead compared to + * Many-to-Many. There is one additional entity and one additional secondary + * key. These factors should be weighed against its advantages and the + * relevant application access patterns should be considered.

        + * + * @author Mark Hayes + */ +public class SecondaryIndex extends BasicIndex { + + private SecondaryDatabase secDb; + private Database keysDb; + private PrimaryIndex priIndex; + private EntityBinding entityBinding; + private EntityIndex keysIndex; + private SortedMap map; + + /** + * Creates a secondary index without using an EntityStore. + * When using an {@link EntityStore}, call {@link + * EntityStore#getSecondaryIndex getSecondaryIndex} instead. + * + *

        This constructor is not normally needed and is provided for + * applications that wish to use custom bindings along with the Direct + * Persistence Layer. Normally, {@link EntityStore#getSecondaryIndex + * getSecondaryIndex} is used instead.

        + * + * @param database the secondary database used for all access other than + * via a {@link #keysIndex}. + * + * @param keysDatabase another handle on the secondary database, opened + * without association to the primary, and used only for access via a + * {@link #keysIndex}. If this argument is null and the {@link #keysIndex} + * method is called, then the keys database will be opened automatically; + * however, the user is then responsible for closing the keys database. To + * get the keys database in order to close it, call {@link + * #getKeysDatabase}. + * + * @param primaryIndex the primary index associated with this secondary + * index. + * + * @param secondaryKeyClass the class of the secondary key. + * + * @param secondaryKeyBinding the binding to be used for secondary keys. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public SecondaryIndex(SecondaryDatabase database, + Database keysDatabase, + PrimaryIndex primaryIndex, + Class secondaryKeyClass, + EntryBinding secondaryKeyBinding) + throws DatabaseException { + + super(database, secondaryKeyClass, secondaryKeyBinding, + new EntityValueAdapter(primaryIndex.getEntityClass(), + primaryIndex.getEntityBinding(), + true)); + secDb = database; + keysDb = keysDatabase; + priIndex = primaryIndex; + entityBinding = primaryIndex.getEntityBinding(); + } + + /** + * Returns the underlying secondary database for this index. + * + * @return the secondary database. + */ + @Override + public SecondaryDatabase getDatabase() { + return secDb; + } + + /** + * Returns the underlying secondary database that is not associated with + * the primary database and is used for the {@link #keysIndex}. + * + * @return the keys database. + */ + public Database getKeysDatabase() { + return keysDb; + } + + /** + * Returns the primary index associated with this secondary index. + * + * @return the primary index. + */ + public PrimaryIndex getPrimaryIndex() { + return priIndex; + } + + /** + * Returns the secondary key class for this index. + * + * @return the class. + */ + public Class getKeyClass() { + return keyClass; + } + + /** + * Returns the secondary key binding for the index. + * + * @return the key binding. + */ + public EntryBinding getKeyBinding() { + return keyBinding; + } + + /** + * Returns a read-only keys index that maps secondary key to primary key. + * When accessing the keys index, the primary key is returned rather than + * the entity. When only the primary key is needed and not the entire + * entity, using the keys index is less expensive than using the secondary + * index because the primary index does not have to be accessed. + * + *

        Note the following in the unusual case that you are not + * using an EntityStore: This method will open the keys + * database, a second database handle for the secondary database, if it is + * not already open. In this case, if you are not using an + * EntityStore, then you are responsible for closing the + * database returned by {@link #getKeysDatabase} before closing the + * environment. If you are using an EntityStore, the + * keys database will be closed automatically by {@link + * EntityStore#close}.

        + * + * @return the keys index. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public synchronized EntityIndex keysIndex() + throws DatabaseException { + + if (keysIndex == null) { + if (keysDb == null) { + DatabaseConfig config = secDb.getConfig(); + config.setReadOnly(true); + config.setAllowCreate(false); + config.setExclusiveCreate(false); + keysDb = DbCompat.openDatabase + (db.getEnvironment(), null /*txn*/, + DbCompat.getDatabaseFile(secDb), + secDb.getDatabaseName(), + config); + if (keysDb == null) { + throw new IllegalStateException + ("Could not open existing DB, file: " + + DbCompat.getDatabaseFile(secDb) + " name: " + + secDb.getDatabaseName()); + } + } + keysIndex = new KeysIndex + (keysDb, keyClass, keyBinding, + priIndex.getKeyClass(), priIndex.getKeyBinding()); + } + return keysIndex; + } + + /** + * Returns an index that maps primary key to entity for the subset of + * entities having a given secondary key (duplicates). A sub-index is + * convenient when you are interested in working with the subset of + * entities having a particular secondary key value. + * + *

        When using a {@link Relationship#MANY_TO_ONE MANY_TO_ONE} or {@link + * Relationship#MANY_TO_MANY MANY_TO_MANY} secondary key, the sub-index + * represents the left (MANY) side of a relationship.

        + * + * @param key the secondary key that identifies the entities in the + * sub-index. + * + * @return the sub-index. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public EntityIndex subIndex(SK key) + throws DatabaseException { + + return new SubIndex(this, entityBinding, key); + } + + /* + * Of the EntityIndex methods only get()/map()/sortedMap() are implemented + * here. All other methods are implemented by BasicIndex. + */ + + public E get(SK key) + throws DatabaseException { + + return get(null, key, null); + } + + public E get(Transaction txn, SK key, LockMode lockMode) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + EntityResult result = get( + txn, key, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + return result != null ? result.value() : null; + } + /* */ + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationStatus status = + secDb.get(txn, keyEntry, pkeyEntry, dataEntry, lockMode); + + if (status == OperationStatus.SUCCESS) { + return entityBinding.entryToObject(pkeyEntry, dataEntry); + } else { + return null; + } + } + + /* */ + public EntityResult get(Transaction txn, + SK key, + Get getType, + ReadOptions options) + throws DatabaseException { + + checkGetType(getType); + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + keyBinding.objectToEntry(key, keyEntry); + + OperationResult result = secDb.get( + txn, keyEntry, pkeyEntry, dataEntry, getType, options); + + if (result != null) { + return new EntityResult<>( + entityBinding.entryToObject(pkeyEntry, dataEntry), + result); + } else { + return null; + } + } + /* */ + + public Map map() { + return sortedMap(); + } + + public synchronized SortedMap sortedMap() { + if (map == null) { + map = new StoredSortedMap(db, keyBinding, entityBinding, true); + } + return map; + } + + /** + * + * @hidden + * + * For internal use only. + * + * Used for obtaining the auto-commit txn config from the store, which + * overrides this method to return it. + */ + /* */ + protected + /* */ + TransactionConfig getAutoCommitTransactionConfig() { + return null; + } + + boolean isUpdateAllowed() { + return false; + } +} diff --git a/src/com/sleepycat/persist/StoreConfig.java b/src/com/sleepycat/persist/StoreConfig.java new file mode 100644 index 0000000..e7fc326 --- /dev/null +++ b/src/com/sleepycat/persist/StoreConfig.java @@ -0,0 +1,567 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; // for javadoc +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawStore; // for javadoc + +/** + * Configuration properties used with an {@link EntityStore} or {@link + * RawStore}. + * + *

        {@code StoreConfig} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code StoreConfig} object.

        + * + *

        See the package + * summary example for an example of using a {@code StoreConfig}.

        + * + * @author Mark Hayes + */ +public class StoreConfig implements Cloneable { + + /** + * The default store configuration containing properties as if the + * configuration were constructed and not modified. + */ + public static final StoreConfig DEFAULT = new StoreConfig(); + + private boolean allowCreate; + private boolean exclusiveCreate; + private boolean transactional; + private boolean readOnly; + /* */ + private boolean replicated = true; + private boolean deferredWrite; + private boolean temporary; + /* */ + private boolean secondaryBulkLoad; + private EntityModel model; + private Mutations mutations; + private DatabaseNamer databaseNamer = DatabaseNamer.DEFAULT; + + /** + * Creates an entity store configuration object with default properties. + */ + public StoreConfig() { + } + + /** + * Returns a shallow copy of the configuration. + * + * @return the clone. + * + * @deprecated As of JE 4.0.13, replaced by {@link StoreConfig#clone()}. + */ + public StoreConfig cloneConfig() { + try { + return (StoreConfig) super.clone(); + } catch (CloneNotSupportedException cannotHappen) { + return null; + } + } + + /** + * Returns a shallow copy of the configuration. + */ + @Override + public StoreConfig clone() { + try { + return (StoreConfig) super.clone(); + } catch (CloneNotSupportedException cannotHappen) { + return null; + } + } + + /** + * Specifies whether creation of a new store is allowed. By default this + * property is false. + * + *

        If this property is false and the internal store metadata database + * does not exist, {@link DatabaseException} will be thrown when the store + * is opened.

        + * + * @param allowCreate whether creation of a new store is allowed. + * + * @return 'this'. + */ + public StoreConfig setAllowCreate(boolean allowCreate) { + setAllowCreateVoid(allowCreate); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param allowCreate whether creation of a new store is allowed. + */ + public void setAllowCreateVoid(boolean allowCreate) { + this.allowCreate = allowCreate; + } + + /** + * Returns whether creation of a new store is allowed. + * + * @return whether creation of a new store is allowed. + */ + public boolean getAllowCreate() { + return allowCreate; + } + + /** + * Specifies whether opening an existing store is prohibited. By default + * this property is false. + * + *

        If this property is true and the internal store metadata database + * already exists, {@link DatabaseException} will be thrown when the store + * is opened.

        + * + * @param exclusiveCreate whether opening an existing store is prohibited. + * + * @return 'this'. + */ + public StoreConfig setExclusiveCreate(boolean exclusiveCreate) { + setExclusiveCreateVoid(exclusiveCreate); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param exclusiveCreate whether opening an existing store is prohibited. + */ + public void setExclusiveCreateVoid(boolean exclusiveCreate) { + this.exclusiveCreate = exclusiveCreate; + } + + /** + * Returns whether opening an existing store is prohibited. + * + * @return whether opening an existing store is prohibited. + */ + public boolean getExclusiveCreate() { + return exclusiveCreate; + } + + /** + * Sets the transactional configuration property. By default this property + * is false. + * + *

        This property is true to open all store indices for transactional + * access. True may not be specified if the environment is not also + * transactional.

        + * + * @param transactional whether the store is transactional. + * + * @return 'this'. + */ + public StoreConfig setTransactional(boolean transactional) { + setTransactionalVoid(transactional); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param transactional whether the store is transactional. + */ + public void setTransactionalVoid(boolean transactional) { + this.transactional = transactional; + } + + /** + * Returns the transactional configuration property. + * + * @return whether the store is transactional. + */ + public boolean getTransactional() { + return transactional; + } + + /** + * Sets the read-only configuration property. By default this property is + * false. + * + *

        This property is true to open all store indices for read-only access, + * or false to open them for read-write access. False may not be specified + * if the environment is read-only.

        + * + * @param readOnly whether the store is read-only. + * + * @return 'this'. + */ + public StoreConfig setReadOnly(boolean readOnly) { + setReadOnlyVoid(readOnly); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param readOnly whether the store is read-only. + */ + public void setReadOnlyVoid(boolean readOnly) { + this.readOnly = readOnly; + } + + /** + * Returns the read-only configuration property. + * + * @return whether the store is read-only. + */ + public boolean getReadOnly() { + return readOnly; + } + + /* */ + /** + * Configures a store to be replicated or non-replicated, in a replicated + * Environment. By default this property is true, meaning that by default + * a store is replicated in a replicated Environment. + *

        + * In a non-replicated Environment, this property is ignored. All stores + * are non-replicated in a non-replicated Environment. + * + * @param replicated whether the store is replicated. + * + * @return 'this'. + * + * @see + * Non-replicated + * Databases in a Replicated Environment + */ + public StoreConfig setReplicated(boolean replicated) { + setReplicatedVoid(replicated); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + * + * @param replicated whether the store is replicated. + */ + public void setReplicatedVoid(boolean replicated) { + this.replicated = replicated; + } + + /** + * Returns the replicated property for the store. + *

        + * This method returns true by default. However, in a non-replicated + * Environment, this property is ignored. All stores are non-replicated + * in a non-replicated Environment. + * + * @return whether the store is replicated. + * + * @see #setReplicated + */ + public boolean getReplicated() { + return replicated; + } + + /** + * Sets the deferred-write configuration property. By default this + * property is false. + * + *

        This property is true to open all store index databases for + * deferred-write access. True may not be specified if the store is + * transactional.

        + * + *

        Deferred write stores avoid disk I/O and are not guaranteed to be + * persistent until {@link EntityStore#sync} or {@link Environment#sync} is + * called or the store is closed normally. This mode is particularly geared + * toward stores that frequently modify and delete data records. See the + * Getting Started Guide, Database chapter for a full description of the + * mode.

        + * + * @param deferredWrite whether the store is deferred-write. + * + * @return 'this'. + * + * @see #setTransactional + */ + public StoreConfig setDeferredWrite(boolean deferredWrite) { + setDeferredWriteVoid(deferredWrite); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + * + * @param deferredWrite whether the store is deferred-write. + */ + public void setDeferredWriteVoid(boolean deferredWrite) { + this.deferredWrite = deferredWrite; + } + + /** + * Returns the deferred-write configuration property. + * + * @return whether the store is deferred-write. + */ + public boolean getDeferredWrite() { + return deferredWrite; + } + + /** + * Sets the temporary configuration property. By default this property is + * false. + * + *

        This property is true to open all store databases as temporary + * databases. True may not be specified if the store is transactional.

        + * + *

        Temporary stores avoid disk I/O and are not persistent -- they are + * deleted when the store is closed or after a crash. This mode is + * particularly geared toward in-memory stores. See the Getting Started + * Guide, Database chapter for a full description of the mode.

        + * + * @param temporary whether the store is temporary. + * + * @return 'this'. + * + * @see #setTransactional + */ + public StoreConfig setTemporary(boolean temporary) { + setTemporaryVoid(temporary); + return this; + } + + /** + * @hidden + * The void return setter for use by Bean editors. + * + * @param temporary whether the store is temporary. + */ + public void setTemporaryVoid(boolean temporary) { + this.temporary = temporary; + } + + /** + * Returns the temporary configuration property. + * + * @return whether the store is temporary. + */ + public boolean getTemporary() { + return temporary; + } + /* */ + + /** + * Sets the bulk-load-secondaries configuration property. By default this + * property is false. + * + *

        This property is true to cause the initial creation of secondary + * indices to be performed as a bulk load. If this property is true and + * {@link EntityStore#getSecondaryIndex EntityStore.getSecondaryIndex} has + * never been called for a secondary index, that secondary index will not + * be created or written as records are written to the primary index. In + * addition, if that secondary index defines a foreign key constraint, the + * constraint will not be enforced.

        + * + *

        The secondary index will be populated later when the {@code + * getSecondaryIndex} method is called for the first time for that index, + * or when the store is closed and re-opened with this property set to + * false and the primary index is obtained. In either case, the secondary + * index is populated by reading through the entire primary index and + * adding records to the secondary index as needed. While populating the + * secondary, foreign key constraints will be enforced and an exception is + * thrown if a constraint is violated.

        + * + *

        When loading a primary index along with secondary indexes from a + * large input data set, configuring a bulk load of the secondary indexes + * is sometimes more performant than updating the secondary indexes each + * time the primary index is updated. The absence of foreign key + * constraints during the load also provides more flexibility.

        + * + * @param secondaryBulkLoad whether bulk-load-secondaries is used. + * + * @return 'this'. + */ + public StoreConfig setSecondaryBulkLoad(boolean secondaryBulkLoad) { + setSecondaryBulkLoadVoid(secondaryBulkLoad); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param secondaryBulkLoad whether bulk-load-secondaries is used. + */ + public void setSecondaryBulkLoadVoid(boolean secondaryBulkLoad) { + this.secondaryBulkLoad = secondaryBulkLoad; + } + + /** + * Returns the bulk-load-secondaries configuration property. + * + * @return whether bulk-load-secondaries is used. + */ + public boolean getSecondaryBulkLoad() { + return secondaryBulkLoad; + } + + /** + * Sets the entity model that defines entity classes and index keys. + * + *

        If null is specified or this method is not called, an {@link + * AnnotationModel} instance is used by default.

        + * + * @param model the EntityModel. + * + * @return 'this'. + */ + public StoreConfig setModel(EntityModel model) { + setModelVoid(model); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param model the EntityModel. + */ + public void setModelVoid(EntityModel model) { + this.model = model; + } + + /** + * Returns the entity model that defines entity classes and index keys. + * + * @return the EntityModel. + */ + public EntityModel getModel() { + return model; + } + + /** + * Configures mutations for performing lazy evolution of stored instances. + * Existing mutations for this store are not cleared, so the mutations + * required are only those changes that have been made since the store was + * last opened. Some new mutations may override existing specifications, + * and some may be supplemental. + * + *

        If null is specified and the store already exists, the previously + * specified mutations are used. The mutations are stored persistently in + * serialized form.

        + * + *

        Mutations must be available to handle all changes to classes that are + * incompatible with the class definitions known to this store. See {@link + * Mutations} and {@link com.sleepycat.persist.evolve Class Evolution} for + * more information.

        + * + *

        If an incompatible class change has been made and mutations are not + * available for handling the change, {@link IncompatibleClassException} + * will be thrown when creating an {@link EntityStore}.

        + * + * @param mutations the Mutations. + * + * @return 'this'. + */ + public StoreConfig setMutations(Mutations mutations) { + setMutationsVoid(mutations); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param mutations the Mutations. + */ + public void setMutationsVoid(Mutations mutations) { + this.mutations = mutations; + } + + /** + * Returns the configured mutations for performing lazy evolution of stored + * instances. + * + * @return the Mutations. + */ + public Mutations getMutations() { + return mutations; + } + + /** + * + * @hidden + * + * Specifies the object reponsible for naming of files and databases. + * + * By default this property is {@link DatabaseNamer#DEFAULT}. + * + * @param databaseNamer the DatabaseNamer. + * + * @return 'this'. + * + * @throws NullPointerException if a null parameter value is passed. + */ + public StoreConfig setDatabaseNamer(DatabaseNamer databaseNamer) { + setDatabaseNamerVoid(databaseNamer); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param databaseNamer the DatabaseNamer. + */ + public void setDatabaseNamerVoid(DatabaseNamer databaseNamer) { + if (databaseNamer == null) { + throw new NullPointerException(); + } + this.databaseNamer = databaseNamer; + } + + /** + * + * @hidden + * + * Returns the object reponsible for naming of files and databases. + * + * @return the DatabaseNamer. + */ + public DatabaseNamer getDatabaseNamer() { + return databaseNamer; + } +} diff --git a/src/com/sleepycat/persist/StoreConfigBeanInfo.java b/src/com/sleepycat/persist/StoreConfigBeanInfo.java new file mode 100644 index 0000000..a7dcf66 --- /dev/null +++ b/src/com/sleepycat/persist/StoreConfigBeanInfo.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +public class StoreConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(StoreConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(StoreConfig.class); + } +} diff --git a/src/com/sleepycat/persist/StoreExistsException.java b/src/com/sleepycat/persist/StoreExistsException.java new file mode 100644 index 0000000..57e5f13 --- /dev/null +++ b/src/com/sleepycat/persist/StoreExistsException.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown by the {@link EntityStore} constructor when the {@link + * StoreConfig#setExclusiveCreate ExclusiveCreate} configuration parameter is + * true and the store's internal catalog database already exists. + * + * @author Mark Hayes + */ +public class StoreExistsException extends OperationFailureException { + + private static final long serialVersionUID = 1; + + /** + * For internal use only. + * + * @hidden + * + */ + public StoreExistsException(String message) { + super(message); + } + + /* */ + + /** + * For internal use only. + * @hidden + */ + private StoreExistsException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new StoreExistsException(msg, this); + } + + /* */ +} diff --git a/src/com/sleepycat/persist/StoreNotFoundException.java b/src/com/sleepycat/persist/StoreNotFoundException.java new file mode 100644 index 0000000..015344c --- /dev/null +++ b/src/com/sleepycat/persist/StoreNotFoundException.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.OperationFailureException; + +/** + * Thrown by the {@link EntityStore} constructor when the {@link + * StoreConfig#setAllowCreate AllowCreate} configuration parameter is false and + * the store's internal catalog database does not exist. + * + * @author Mark Hayes + */ +public class StoreNotFoundException extends OperationFailureException { + + private static final long serialVersionUID = 1895430616L; + + /** + * For internal use only. + * + * @hidden + * + */ + public StoreNotFoundException(String message) { + super(message); + } + + /* */ + + /** + * For internal use only. + * + * @hidden + * + */ + private StoreNotFoundException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * + * @hidden + * + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new StoreNotFoundException(msg, this); + } + + /* */ +} diff --git a/src/com/sleepycat/persist/SubIndex.java b/src/com/sleepycat/persist/SubIndex.java new file mode 100644 index 0000000..e94a3d7 --- /dev/null +++ b/src/com/sleepycat/persist/SubIndex.java @@ -0,0 +1,422 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import java.util.Map; +import java.util.SortedMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.compat.DbCompat.OpResult; +import com.sleepycat.compat.DbCompat.OpWriteOptions; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DbInternal; +/* */ +import com.sleepycat.je.Environment; +/* */ +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.OperationResult; +/* */ +import com.sleepycat.je.OperationStatus; +/* */ +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.WriteOptions; +/* */ +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * The EntityIndex returned by SecondaryIndex.subIndex. A SubIndex, in JE + * internal terms, is a duplicates btree for a single key in the main btree. + * From the user's viewpoint, the keys are primary keys. This class implements + * that viewpoint. In general, getSearchBoth and getSearchBothRange are used + * where in a normal index getSearchKey and getSearchRange would be used. The + * main tree key is always implied, not passed as a parameter. + * + * @author Mark Hayes + */ +class SubIndex implements EntityIndex { + + private SecondaryIndex secIndex; + private SecondaryDatabase db; + private boolean transactional; + private boolean sortedDups; + private boolean locking; + private boolean concurrentDB; + private DatabaseEntry keyEntry; + private Object keyObject; + private KeyRange singleKeyRange; + private EntryBinding pkeyBinding; + private KeyRange emptyPKeyRange; + private EntityBinding entityBinding; + private ValueAdapter keyAdapter; + private ValueAdapter entityAdapter; + private SortedMap map; + + SubIndex(SecondaryIndex secIndex, + EntityBinding entityBinding, + SK key) + throws DatabaseException { + + this.secIndex = secIndex; + db = secIndex.getDatabase(); + transactional = secIndex.transactional; + sortedDups = secIndex.sortedDups; + locking = + DbCompat.getInitializeLocking(db.getEnvironment().getConfig()); + Environment env = db.getEnvironment(); + concurrentDB = DbCompat.getInitializeCDB(env.getConfig()); + keyObject = key; + keyEntry = new DatabaseEntry(); + secIndex.keyBinding.objectToEntry(key, keyEntry); + singleKeyRange = secIndex.emptyRange.subRange(keyEntry); + + PrimaryIndex priIndex = secIndex.getPrimaryIndex(); + pkeyBinding = priIndex.keyBinding; + emptyPKeyRange = priIndex.emptyRange; + this.entityBinding = entityBinding; + + keyAdapter = new PrimaryKeyValueAdapter + (priIndex.keyClass, priIndex.keyBinding); + entityAdapter = secIndex.entityAdapter; + } + + public Database getDatabase() { + return db; + } + + public boolean contains(PK key) + throws DatabaseException { + + return contains(null, key, null); + } + + public boolean contains(Transaction txn, PK key, LockMode lockMode) + throws DatabaseException { + + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = BasicIndex.NO_RETURN_ENTRY; + pkeyBinding.objectToEntry(key, pkeyEntry); + + OperationStatus status = + db.getSearchBoth(txn, keyEntry, pkeyEntry, dataEntry, lockMode); + return (status == OperationStatus.SUCCESS); + } + + public E get(PK key) + throws DatabaseException { + + return get(null, key, null); + } + + public E get(Transaction txn, PK key, LockMode lockMode) + throws DatabaseException { + + /* */ + if (DbCompat.IS_JE) { + EntityResult result = get( + txn, key, Get.SEARCH, DbInternal.getReadOptions(lockMode)); + return result != null ? result.value() : null; + } + /* */ + + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + pkeyBinding.objectToEntry(key, pkeyEntry); + + OperationStatus status = + db.getSearchBoth(txn, keyEntry, pkeyEntry, dataEntry, lockMode); + + if (status == OperationStatus.SUCCESS) { + return (E) entityBinding.entryToObject(pkeyEntry, dataEntry); + } else { + return null; + } + } + + /* */ + public EntityResult get(Transaction txn, + PK key, + Get getType, + ReadOptions options) + throws DatabaseException { + + BasicIndex.checkGetType(getType); + + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + pkeyBinding.objectToEntry(key, pkeyEntry); + + OperationResult result = db.get( + txn, keyEntry, pkeyEntry, dataEntry, Get.SEARCH_BOTH, options); + + if (result != null) { + return new EntityResult<>( + (E) entityBinding.entryToObject(pkeyEntry, dataEntry), + result); + } else { + return null; + } + } + /* */ + + public long count() + throws DatabaseException { + + CursorConfig cursorConfig = locking ? + CursorConfig.READ_UNCOMMITTED : null; + EntityCursor cursor = keys(null, cursorConfig); + try { + if (cursor.next() != null) { + return cursor.count(); + } else { + return 0; + } + } finally { + cursor.close(); + } + } + + /* */ + + public long count(long memoryLimit) + throws DatabaseException { + + return count(); + } + + /* */ + + public boolean delete(PK key) + throws DatabaseException { + + return delete(null, key); + } + + public boolean delete(Transaction txn, PK key) + throws DatabaseException { + + return deleteInternal(txn, key, OpWriteOptions.EMPTY).isSuccess(); + } + + /* */ + public OperationResult delete(Transaction txn, + PK key, + WriteOptions options) + throws DatabaseException { + + return deleteInternal(txn, key, OpWriteOptions.make(options)).jeResult; + } + /* */ + + private OpResult deleteInternal(Transaction txn, + PK key, + OpWriteOptions options) + throws DatabaseException { + + DatabaseEntry pkeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = BasicIndex.NO_RETURN_ENTRY; + pkeyBinding.objectToEntry(key, pkeyEntry); + + boolean autoCommit = false; + Environment env = db.getEnvironment(); + if (transactional && + txn == null && + DbCompat.getThreadTransaction(env) == null) { + txn = env.beginTransaction + (null, secIndex.getAutoCommitTransactionConfig()); + autoCommit = true; + } + + boolean failed = true; + CursorConfig cursorConfig = null; + if (concurrentDB) { + cursorConfig = new CursorConfig(); + DbCompat.setWriteCursor(cursorConfig, true); + } + SecondaryCursor cursor = db.openSecondaryCursor(txn, cursorConfig); + try { + /* */ + if (DbCompat.IS_JE) { + ReadOptions readOptions; + if (options.jeOptions != null && + options.jeOptions.getCacheMode() != null) { + readOptions = new ReadOptions(); + readOptions.setLockMode(LockMode.RMW); + readOptions.setCacheMode(options.jeOptions.getCacheMode()); + } else { + readOptions = LockMode.RMW.toReadOptions(); + } + OperationResult result = cursor.get( + keyEntry, pkeyEntry, dataEntry, Get.SEARCH_BOTH, + readOptions); + if (result != null) { + result = cursor.delete(options.jeOptions); + } + failed = false; + return OpResult.make(result); + } + /* */ + OperationStatus status = cursor.getSearchBoth + (keyEntry, pkeyEntry, dataEntry, + locking ? LockMode.RMW : null); + if (status == OperationStatus.SUCCESS) { + status = cursor.delete(); + } + failed = false; + return OpResult.make(status); + } finally { + cursor.close(); + if (autoCommit) { + if (failed) { + txn.abort(); + } else { + txn.commit(); + } + } + } + } + + public EntityCursor keys() + throws DatabaseException { + + return keys(null, null); + } + + public EntityCursor keys(Transaction txn, CursorConfig config) + throws DatabaseException { + + return cursor(txn, null, keyAdapter, config); + } + + public EntityCursor entities() + throws DatabaseException { + + return cursor(null, null, entityAdapter, null); + } + + public EntityCursor entities(Transaction txn, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, null, entityAdapter, config); + } + + public EntityCursor keys(PK fromKey, + boolean fromInclusive, + PK toKey, + boolean toInclusive) + throws DatabaseException { + + return cursor(null, fromKey, fromInclusive, toKey, toInclusive, + keyAdapter, null); + } + + public EntityCursor keys(Transaction txn, + PK fromKey, + boolean fromInclusive, + PK toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, fromKey, fromInclusive, toKey, toInclusive, + keyAdapter, config); + } + + public EntityCursor entities(PK fromKey, + boolean fromInclusive, + PK toKey, + boolean toInclusive) + throws DatabaseException { + + return cursor(null, fromKey, fromInclusive, toKey, toInclusive, + entityAdapter, null); + } + + public EntityCursor entities(Transaction txn, + PK fromKey, + boolean fromInclusive, + PK toKey, + boolean toInclusive, + CursorConfig config) + throws DatabaseException { + + return cursor(txn, fromKey, fromInclusive, toKey, toInclusive, + entityAdapter, config); + } + + private EntityCursor cursor(Transaction txn, + PK fromKey, + boolean fromInclusive, + PK toKey, + boolean toInclusive, + ValueAdapter adapter, + CursorConfig config) + throws DatabaseException { + + DatabaseEntry fromEntry = null; + if (fromKey != null) { + fromEntry = new DatabaseEntry(); + pkeyBinding.objectToEntry(fromKey, fromEntry); + } + DatabaseEntry toEntry = null; + if (toKey != null) { + toEntry = new DatabaseEntry(); + pkeyBinding.objectToEntry(toKey, toEntry); + } + KeyRange pkeyRange = emptyPKeyRange.subRange + (fromEntry, fromInclusive, toEntry, toInclusive); + return cursor(txn, pkeyRange, adapter, config); + } + + private EntityCursor cursor(Transaction txn, + KeyRange pkeyRange, + ValueAdapter adapter, + CursorConfig config) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, config); + RangeCursor rangeCursor = + new RangeCursor(singleKeyRange, pkeyRange, sortedDups, cursor); + return new SubIndexCursor(rangeCursor, adapter); + } + + public Map map() { + return sortedMap(); + } + + public synchronized SortedMap sortedMap() { + if (map == null) { + map = (SortedMap) ((StoredSortedMap) secIndex.sortedMap()). + duplicatesMap(keyObject, pkeyBinding); + } + return map; + } +} diff --git a/src/com/sleepycat/persist/SubIndexCursor.java b/src/com/sleepycat/persist/SubIndexCursor.java new file mode 100644 index 0000000..606f4ad --- /dev/null +++ b/src/com/sleepycat/persist/SubIndexCursor.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.LockMode; +/* */ +import com.sleepycat.je.ReadOptions; +/* */ +import com.sleepycat.util.keyrange.RangeCursor; + +/** + * The cursor for a SubIndex treats Dup and NoDup operations specially because + * the SubIndex never has duplicates -- the keys are primary keys. So a + * next/prevDup operation always returns null, and a next/prevNoDup operation + * actually does next/prev. + * + * @author Mark Hayes + */ +class SubIndexCursor extends BasicCursor { + + SubIndexCursor(RangeCursor cursor, ValueAdapter adapter) { + super(cursor, adapter, false/*updateAllowed*/); + } + + @Override + public EntityCursor dup() + throws DatabaseException { + + return new SubIndexCursor(cursor.dup(true), adapter); + } + + @Override + public V nextDup(LockMode lockMode) { + checkInitialized(); + return null; + } + + @Override + public V nextNoDup(LockMode lockMode) + throws DatabaseException { + + return next(lockMode); + } + + @Override + public V prevDup(LockMode lockMode) { + checkInitialized(); + return null; + } + + @Override + public V prevNoDup(LockMode lockMode) + throws DatabaseException { + + return prev(lockMode); + } + + /* */ + public EntityResult get(Get getType, ReadOptions options) + throws DatabaseException { + + switch (getType) { + case NEXT_DUP: + return null; + case NEXT_NO_DUP: + return super.get(Get.NEXT, options); + case PREV_DUP: + return null; + case PREV_NO_DUP: + return super.get(Get.PREV, options); + default: + return super.get(getType, options); + } + } + /* */ +} diff --git a/src/com/sleepycat/persist/ValueAdapter.java b/src/com/sleepycat/persist/ValueAdapter.java new file mode 100644 index 0000000..550004a --- /dev/null +++ b/src/com/sleepycat/persist/ValueAdapter.java @@ -0,0 +1,71 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist; + +import com.sleepycat.je.DatabaseEntry; + +/** + * An adapter that translates between database entries (key, primary key, data) + * and a "value", which may be either the key, primary key, or entity. This + * interface is used to implement a generic index and cursor (BasicIndex and + * BasicCursor). If we didn't use this approach, we would need separate index + * and cursor implementations for each type of value that can be returned. In + * other words, this interface is used to reduce class explosion. + * + * @author Mark Hayes + */ +interface ValueAdapter { + + /** + * Creates a DatabaseEntry for the key or returns null if the key is not + * needed. + */ + DatabaseEntry initKey(); + + /** + * Creates a DatabaseEntry for the primary key or returns null if the + * primary key is not needed. + */ + DatabaseEntry initPKey(); + + /** + * Creates a DatabaseEntry for the data or returns null if the data is not + * needed. BasicIndex.NO_RETURN_ENTRY may be returned if the data argument + * is required but we don't need it. + */ + DatabaseEntry initData(); + + /** + * Sets the data array of the given entries to null, based on knowledge of + * which entries are non-null and are not NO_RETURN_ENTRY. + */ + void clearEntries(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data); + + /** + * Returns the appropriate "value" (key, primary key, or entity) using the + * appropriate bindings for that purpose. + */ + V entryToValue(DatabaseEntry key, + DatabaseEntry pkey, + DatabaseEntry data); + + /** + * Converts an entity value to a data entry using an entity binding, or + * throws UnsupportedOperationException if this is not appropriate. Called + * by BasicCursor.update. + */ + void valueToData(V value, DatabaseEntry data); +} diff --git a/src/com/sleepycat/persist/evolve/Conversion.java b/src/com/sleepycat/persist/evolve/Conversion.java new file mode 100644 index 0000000..c2b9b3f --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Conversion.java @@ -0,0 +1,445 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.io.Serializable; + +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; + +/** + * Converts an old version of an object value to conform to the current class + * or field definition. + * + *

        The {@code Conversion} interface is implemented by the user. A + * {@code Conversion} instance is passed to the {@link Converter#Converter} + * constructor.

        + * + *

        The {@code Conversion} interface extends {@link Serializable} and the + * {@code Conversion} instance is serialized for storage using standard Java + * serialization. Normally, the {@code Conversion} class should only have + * transient fields that are initialized in the {@link #initialize} method. + * While non-transient fields are allowed, care must be taken to only include + * fields that are serializable and will not pull in large amounts of data.

        + * + *

        When a class conversion is specified, two special considerations + * apply:

        + *
          + *
        1. A class conversion is only applied when to instances of that class. The + * conversion will not be applied when the class when it appears as a + * superclass of the instance's class. In this case, a conversion for the + * instance's class must also be specified.
        2. + *
        3. Although field renaming (as well as all other changes) is handled by the + * conversion method, a field Renamer is still needed when a secondary key + * field is renamed and field Deleter is still needed when a secondary key + * field is deleted. This is necessary for evolution of the metadata; + * specifically, if the key name changes the database must be renamed and if + * the key field is deleted the secondary database must be deleted.
        4. + *
        + * + *

        The {@code Conversion} class must implement the standard equals method. + * See {@link #equals} for more information.

        + * + *

        Conversions of simple types are generally simple. For example, a {@code + * String} field that contains only integer values can be easily converted to + * an {@code int} field:

        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Persistent}
        + *  class Address {
        + *      String zipCode;
        + *      ...
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Persistent(version=1)}
        + *  class Address {
        + *      int zipCode;
        + *      ...
        + *  }
        + *
        + *  // The conversion class.
        + *  //
        + *  class MyConversion1 implements Conversion {
        + *
        + *      public void initialize(EntityModel model) {
        + *          // No initialization needed.
        + *      }
        + *
        + *      public Object convert(Object fromValue) {
        + *          return Integer.valueOf((String) fromValue);
        + *      }
        + *
        + *      {@code @Override}
        + *      public boolean equals(Object o) {
        + *          return o instanceof MyConversion1;
        + *      }
        + *  }
        + *
        + *  // Create a field converter mutation.
        + *  //
        + *  Converter converter = new Converter(Address.class.getName(), 0,
        + *                                      "zipCode", new MyConversion1());
        + *
        + *  // Configure the converter as described {@link Mutations here}.
        + * + *

        A conversion may perform arbitrary transformations on an object. For + * example, a conversion may transform a single String address field into an + * Address object containing four fields for street, city, state and zip + * code.

        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Person {
        + *      String address;
        + *      ...
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Entity(version=1)}
        + *  class Person {
        + *      Address address;
        + *      ...
        + *  }
        + *
        + *  // The new address class.
        + *  //
        + *  {@literal @Persistent}
        + *  class Address {
        + *      String street;
        + *      String city;
        + *      String state;
        + *      int zipCode;
        + *      ...
        + *  }
        + *
        + *  class MyConversion2 implements Conversion {
        + *      private transient RawType addressType;
        + *
        + *      public void initialize(EntityModel model) {
        + *          addressType = model.getRawType(Address.class.getName());
        + *      }
        + *
        + *      public Object convert(Object fromValue) {
        + *
        + *          // Parse the old address and populate the new address fields
        + *          //
        + *          String oldAddress = (String) fromValue;
        + *          {@literal Map addressValues = new HashMap();}
        + *          addressValues.put("street", parseStreet(oldAddress));
        + *          addressValues.put("city", parseCity(oldAddress));
        + *          addressValues.put("state", parseState(oldAddress));
        + *          addressValues.put("zipCode", parseZipCode(oldAddress));
        + *
        + *          // Return new raw Address object
        + *          //
        + *          return new RawObject(addressType, addressValues, null);
        + *      }
        + *
        + *      {@code @Override}
        + *      public boolean equals(Object o) {
        + *          return o instanceof MyConversion2;
        + *      }
        + *
        + *      private String parseStreet(String oldAddress) { ... }
        + *      private String parseCity(String oldAddress) { ... }
        + *      private String parseState(String oldAddress) { ... }
        + *      private Integer parseZipCode(String oldAddress) { ... }
        + *  }
        + *
        + *  // Create a field converter mutation.
        + *  //
        + *  Converter converter = new Converter(Person.class.getName(), 0,
        + *                                      "address", new MyConversion2());
        + *
        + *  // Configure the converter as described {@link Mutations here}.
        + * + *

        Note that when a conversion returns a {@link RawObject}, it must return + * it with a {@link RawType} that is current as defined by the current class + * definitions. The proper types can be obtained from the {@link EntityModel} + * in the conversion's {@link #initialize initialize} method.

        + * + *

        A variation on the example above is where several fields in a class + * (street, city, state and zipCode) are converted to a single field (address). + * In this case a class converter rather than a field converter is used.

        + * + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Person {
        + *      String street;
        + *      String city;
        + *      String state;
        + *      int zipCode;
        + *      ...
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Entity(version=1)}
        + *  class Person {
        + *      Address address;
        + *      ...
        + *  }
        + *
        + *  // The new address class.
        + *  //
        + *  {@literal @Persistent}
        + *  class Address {
        + *      String street;
        + *      String city;
        + *      String state;
        + *      int zipCode;
        + *      ...
        + *  }
        + *
        + *  class MyConversion3 implements Conversion {
        + *      private transient RawType newPersonType;
        + *      private transient RawType addressType;
        + *
        + *      public void initialize(EntityModel model) {
        + *          newPersonType = model.getRawType(Person.class.getName());
        + *          addressType = model.getRawType(Address.class.getName());
        + *      }
        + *
        + *      public Object convert(Object fromValue) {
        + *
        + *          // Get field value maps for old and new objects.
        + *          //
        + *          RawObject person = (RawObject) fromValue;
        + *          {@literal Map personValues = person.getValues();}
        + *          {@literal Map addressValues = new HashMap();}
        + *          RawObject address = new RawObject(addressType, addressValues, null);
        + *
        + *          // Remove the old address fields and insert the new one.
        + *          //
        + *          addressValues.put("street", personValues.remove("street"));
        + *          addressValues.put("city", personValues.remove("city"));
        + *          addressValues.put("state", personValues.remove("state"));
        + *          addressValues.put("zipCode", personValues.remove("zipCode"));
        + *          personValues.put("address", address);
        + *
        + *          return new RawObject(newPersonType, personValues, person.getSuper());
        + *      }
        + *
        + *      {@code @Override}
        + *      public boolean equals(Object o) {
        + *          return o instanceof MyConversion3;
        + *      }
        + *  }
        + *
        + *  // Create a class converter mutation.
        + *  //
        + *  Converter converter = new Converter(Person.class.getName(), 0,
        + *                                      new MyConversion3());
        + *
        + *  // Configure the converter as described {@link Mutations here}.
        + * + * + *

        A conversion can also handle changes to class hierarchies. For example, + * if a "name" field originally declared in class A is moved to its superclass + * B, a conversion can move the field value accordingly:

        + * + *
        + *  // The old classes.  Version 0 is implied.
        + *  //
        + *  {@literal @Persistent}
        + *  class A extends B {
        + *      String name;
        + *      ...
        + *  }
        + *  {@literal @Persistent}
        + *  abstract class B {
        + *      ...
        + *  }
        + *
        + *  // The new classes.  A new version number must be assigned.
        + *  //
        + *  {@literal @Persistent(version=1)}
        + *  class A extends B {
        + *      ...
        + *  }
        + *  {@literal @Persistent(version=1)}
        + *  abstract class B {
        + *      String name;
        + *      ...
        + *  }
        + *
        + *  class MyConversion4 implements Conversion {
        + *      private transient RawType newAType;
        + *      private transient RawType newBType;
        + *
        + *      public void initialize(EntityModel model) {
        + *          newAType = model.getRawType(A.class.getName());
        + *          newBType = model.getRawType(B.class.getName());
        + *      }
        + *
        + *      public Object convert(Object fromValue) {
        + *          RawObject oldA = (RawObject) fromValue;
        + *          RawObject oldB = oldA.getSuper();
        + *          {@literal Map aValues = oldA.getValues();}
        + *          {@literal Map bValues = oldB.getValues();}
        + *          bValues.put("name", aValues.remove("name"));
        + *          RawObject newB = new RawObject(newBType, bValues, oldB.getSuper());
        + *          RawObject newA = new RawObject(newAType, aValues, newB);
        + *          return newA;
        + *      }
        + *
        + *      {@code @Override}
        + *      public boolean equals(Object o) {
        + *          return o instanceof MyConversion4;
        + *      }
        + *  }
        + *
        + *  // Create a class converter mutation.
        + *  //
        + *  Converter converter = new Converter(A.class.getName(), 0,
        + *                                      new MyConversion4());
        + *
        + *  // Configure the converter as described {@link Mutations here}.
        + * + *

        A conversion may return an instance of a different class entirely, as + * long as it conforms to current class definitions and is the type expected + * in the given context (a subtype of the old type, or a type compatible with + * the new field type). For example, a field that is used to discriminate + * between two types of objects could be removed and replaced by two new + * subclasses:

        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Persistent}
        + *  class Pet {
        + *      boolean isCatNotDog;
        + *      ...
        + *  }
        + *
        + *  // The new classes.  A new version number must be assigned to the Pet class.
        + *  //
        + *  {@literal @Persistent(version=1)}
        + *  class Pet {
        + *      ...
        + *  }
        + *  {@literal @Persistent}
        + *  class Cat extends Pet {
        + *      ...
        + *  }
        + *  {@literal @Persistent}
        + *  class Dog extends Pet {
        + *      ...
        + *  }
        + *
        + *  class MyConversion5 implements Conversion {
        + *      private transient RawType newPetType;
        + *      private transient RawType dogType;
        + *      private transient RawType catType;
        + *
        + *      public void initialize(EntityModel model) {
        + *          newPetType = model.getRawType(Pet.class.getName());
        + *          dogType = model.getRawType(Dog.class.getName());
        + *          catType = model.getRawType(Cat.class.getName());
        + *      }
        + *
        + *      public Object convert(Object fromValue) {
        + *          RawObject pet = (RawObject) fromValue;
        + *          {@literal Map petValues = pet.getValues();}
        + *          Boolean isCat = (Boolean) petValues.remove("isCatNotDog");
        + *          RawObject newPet = new RawObject(newPetType, petValues,
        + *                                           pet.getSuper());
        + *          RawType newSubType = isCat ? catType : dogType;
        + *          return new RawObject(newSubType, Collections.emptyMap(), newPet);
        + *      }
        + *
        + *      {@code @Override}
        + *      public boolean equals(Object o) {
        + *          return o instanceof MyConversion5;
        + *      }
        + *  }
        + *
        + *  // Create a class converter mutation.
        + *  //
        + *  Converter converter = new Converter(Pet.class.getName(), 0,
        + *                                      new MyConversion5());
        + *
        + *  // Configure the converter as described {@link Mutations here}.
        + * + *

        The primary limitation of a conversion is that it may access at most a + * single entity instance at one time. Conversions involving multiple entities + * at once may be made by performing a store conversion.

        + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public interface Conversion extends Serializable { + + /** + * Initializes the conversion, allowing it to obtain raw type information + * from the entity model. + * + * @param model the EntityModel. + */ + void initialize(EntityModel model); + + /** + * Converts an old version of an object value to conform to the current + * class or field definition. + * + *

        If a {@link RuntimeException} is thrown by this method, it will be + * thrown to the original caller. Similarly, a {@link + * IllegalArgumentException} will be thrown to the original caller if the + * object returned by this method does not conform to current class + * definitions.

        + * + *

        The class of the input and output object may be one of the simple + * types or {@link RawObject}. For primitive types, the primitive wrapper + * class is used.

        + * + * @param fromValue the object value being converted. The type of this + * value is defined by the old class version that is being converted. + * + * @return the converted object. The type of this value must conform to + * a current class definition. If this is a class conversion, it must + * be the current version of the class. If this is a field conversion, it + * must be of a type compatible with the current declared type of the + * field. + */ + Object convert(Object fromValue); + + /** + * The standard {@code equals} method that must be implemented by + * conversion class. + * + *

        When mutations are specified when opening a store, the specified and + * previously stored mutations are compared for equality. If they are + * equal, there is no need to replace the existing mutations in the stored + * catalog. To accurately determine equality, the conversion class must + * implement the {@code equals} method.

        + * + *

        If the {@code equals} method is not explicitly implemented by the + * conversion class or a superclass other than {@code Object}, {@code + * IllegalArgumentException} will be thrown when the store is opened.

        + * + *

        Normally whenever {@code equals} is implemented the {@code hashCode} + * method should also be implemented to support hash sets and maps. + * However, hash sets and maps containing Conversion objects + * are not used by the DPL and therefore the DPL does not require + * {@code hashCode} to be implemented.

        + */ + boolean equals(Object other); +} diff --git a/src/com/sleepycat/persist/evolve/Converter.java b/src/com/sleepycat/persist/evolve/Converter.java new file mode 100644 index 0000000..18367d0 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Converter.java @@ -0,0 +1,143 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.lang.reflect.Method; + +import com.sleepycat.compat.DbCompat; + +/** + * A mutation for converting an old version of an object value to conform to + * the current class or field definition. For example: + * + *
        + *  package my.package;
        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Person {
        + *      // ...
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Entity(version=1)}
        + *  class Person {
        + *      // Incompatible changes were made here...
        + *  }
        + *
        + *  // Add a converter mutation.
        + *  //
        + *  Mutations mutations = new Mutations();
        + *
        + *  mutations.addConverter(new Converter(Person.class.getName(), 0,
        + *                                       new MyConversion()));
        + *
        + *  // Configure the mutations as described {@link Mutations here}.
        + * + *

        See {@link Conversion} for more information.

        + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class Converter extends Mutation { + + private static final long serialVersionUID = 4558176842096181863L; + + private Conversion conversion; + + /** + * Creates a mutation for converting all instances of the given class + * version to the current version of the class. + * + * @param className the class to which this mutation applies. + * @param classVersion the class version to which this mutation applies. + * @param conversion converter instance. + */ + public Converter(String className, + int classVersion, + Conversion conversion) { + this(className, classVersion, null, conversion); + } + + /** + * Creates a mutation for converting all values of the given field in the + * given class version to a type compatible with the current declared type + * of the field. + * + * @param declaringClassName the class to which this mutation applies. + * @param declaringClassVersion the class version to which this mutation + * applies. + * @param fieldName field name to which this mutation applies. + * @param conversion converter instance. + */ + public Converter(String declaringClassName, + int declaringClassVersion, + String fieldName, + Conversion conversion) { + super(declaringClassName, declaringClassVersion, fieldName); + this.conversion = conversion; + + /* Require explicit implementation of the equals method. */ + Class cls = conversion.getClass(); + try { + Method m = cls.getMethod("equals", Object.class); + if (m.getDeclaringClass() == Object.class) { + throw new IllegalArgumentException + ("Conversion class does not implement the equals method " + + "explicitly (Object.equals is not sufficient): " + + cls.getName()); + } + } catch (NoSuchMethodException e) { + throw DbCompat.unexpectedException(e); + } + } + + /** + * Returns the converter instance specified to the constructor. + * + * @return the converter instance. + */ + public Conversion getConversion() { + return conversion; + } + + /** + * Returns true if the conversion objects are equal in this object and + * given object, and if the {@link Mutation#equals} superclass method + * returns true. + */ + @Override + public boolean equals(Object other) { + if (other instanceof Converter) { + Converter o = (Converter) other; + return conversion.equals(o.conversion) && + super.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return conversion.hashCode() + super.hashCode(); + } + + @Override + public String toString() { + return "[Converter " + super.toString() + + " Conversion: " + conversion + ']'; + } +} diff --git a/src/com/sleepycat/persist/evolve/DeletedClassException.java b/src/com/sleepycat/persist/evolve/DeletedClassException.java new file mode 100644 index 0000000..f198870 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/DeletedClassException.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import com.sleepycat.je.OperationFailureException; + +/** + * While reading from an index, an instance of a deleted class version was + * encountered. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class DeletedClassException extends OperationFailureException { + + private static final long serialVersionUID = 518500929L; + + /** + * For internal use only. + * + * @hidden + * + */ + public DeletedClassException(String message) { + super(message); + } + + /* */ + + /** + * For internal use only. + * @hidden + */ + private DeletedClassException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new DeletedClassException(msg, this); + } + + /* */ +} diff --git a/src/com/sleepycat/persist/evolve/Deleter.java b/src/com/sleepycat/persist/evolve/Deleter.java new file mode 100644 index 0000000..29b425b --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Deleter.java @@ -0,0 +1,108 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * A mutation for deleting an entity class or field. + * + *

        WARNING: The data for the deleted class or field will be + * destroyed and will be recoverable only by restoring from backup. If you + * wish to convert the instance data to a different type or format, use a + * {@link Conversion} mutation instead.

        + * + *

        For example, to delete a field:

        + * + *
        + *  package my.package;
        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Person {
        + *      String name;
        + *      String favoriteColors;
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Entity(version=1)}
        + *  class Person {
        + *      String name;
        + *  }
        + *
        + *  // Add the mutation for deleting a field.
        + *  //
        + *  Mutations mutations = new Mutations();
        + *
        + *  mutations.addDeleter(new Deleter(Person.class.getName(), 0,
        + *                                   "favoriteColors");
        + *
        + *  // Configure the mutations as described {@link Mutations here}.
        + * + *

        To delete an entity class:

        + * + *
        + *  package my.package;
        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Statistics {
        + *      ...
        + *  }
        + *
        + *  // Add the mutation for deleting a class.
        + *  //
        + *  Mutations mutations = new Mutations();
        + *
        + *  mutations.addDeleter(new Deleter("my.package.Statistics", 0));
        + *
        + *  // Configure the mutations as described {@link Mutations here}.
        + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class Deleter extends Mutation { + + private static final long serialVersionUID = 446348511871654947L; + + /** + * Creates a mutation for deleting an entity class. + * + * @param className the class to which this mutation applies. + * @param classVersion the class version to which this mutation applies. + */ + public Deleter(String className, int classVersion) { + super(className, classVersion, null); + } + + /** + * Creates a mutation for deleting the given field from all instances of + * the given class version. + * + * @param declaringClass the class to which this mutation applies. + * @param declaringClassVersion the class version to which this mutation + * applies. + * @param fieldName field name to which this mutation applies. + */ + public Deleter(String declaringClass, int declaringClassVersion, + String fieldName) { + super(declaringClass, declaringClassVersion, fieldName); + } + + @Override + public String toString() { + return "[Deleter " + super.toString() + ']'; + } +} diff --git a/src/com/sleepycat/persist/evolve/EntityConverter.java b/src/com/sleepycat/persist/evolve/EntityConverter.java new file mode 100644 index 0000000..653dfe2 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EntityConverter.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * A subclass of Converter that allows specifying keys to be deleted. + * + *

        When a Converter is used with an entity class, secondary keys cannot be + * automatically deleted based on field deletion, because field Deleter objects + * are not used in conjunction with a Converter mutation. The EntityConverter + * can be used instead of a plain Converter to specify the key names to be + * deleted.

        + * + *

        It is not currently possible to rename or insert secondary keys when + * using a Converter mutation with an entity class.

        + * + * @see Converter + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class EntityConverter extends Converter { + + private static final long serialVersionUID = -988428985370593743L; + + private Set deletedKeys; + + /** + * Creates a mutation for converting all instances of the given entity + * class version to the current version of the class. + * + * @param entityClassName the entity class to which this mutation applies. + * @param classVersion the class version to which this mutation applies. + * @param conversion converter instance. + * @param deletedKeys the set of key names that are to be deleted. + */ + public EntityConverter(String entityClassName, + int classVersion, + Conversion conversion, + Set deletedKeys) { + super(entityClassName, classVersion, null, conversion); + + /* Eclipse objects to assigning with a ternary operator. */ + if (deletedKeys != null) { + this.deletedKeys = new HashSet(deletedKeys); + } else { + this.deletedKeys = Collections.emptySet(); + } + } + + /** + * Returns the set of key names that are to be deleted. + * + * @return the set of key names that are to be deleted. + */ + public Set getDeletedKeys() { + return Collections.unmodifiableSet(deletedKeys); + } + + /** + * Returns true if the deleted and renamed keys are equal in this object + * and given object, and if the {@link Converter#equals} superclass method + * returns true. + */ + @Override + public boolean equals(Object other) { + if (other instanceof EntityConverter) { + EntityConverter o = (EntityConverter) other; + return deletedKeys.equals(o.deletedKeys) && + super.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return deletedKeys.hashCode() + super.hashCode(); + } + + @Override + public String toString() { + return "[EntityConverter " + super.toString() + + " DeletedKeys: " + deletedKeys + ']'; + } +} diff --git a/src/com/sleepycat/persist/evolve/EvolveConfig.java b/src/com/sleepycat/persist/evolve/EvolveConfig.java new file mode 100644 index 0000000..614bd62 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveConfig.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import com.sleepycat.persist.EntityStore; + +/** + * Configuration properties for eager conversion of unevolved objects. This + * configuration is used with {@link EntityStore#evolve EntityStore.evolve}. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class EvolveConfig implements Cloneable { + + private Set classesToEvolve; + private EvolveListener evolveListener; + + /** + * Creates an evolve configuration with default properties. + */ + public EvolveConfig() { + classesToEvolve = new HashSet(); + } + + /** + * Returns a shallow copy of the configuration. + * + * @return a shallow copy of the configuration. + * + * @deprecated As of JE 4.0.13, replaced by {@link + * EvolveConfig#clone()}. + */ + public EvolveConfig cloneConfig() { + try { + return (EvolveConfig) super.clone(); + } catch (CloneNotSupportedException cannotHappen) { + return null; + } + } + + /** + * Returns a shallow copy of the configuration. + */ + @Override + public EvolveConfig clone() { + try { + return (EvolveConfig) super.clone(); + } catch (CloneNotSupportedException cannotHappen) { + return null; + } + } + + /** + * Adds an entity class for a primary index to be converted. If no classes + * are added, all indexes that require evolution will be converted. + * + * @param entityClass the entity class name. + * + * @return 'this'. + */ + public EvolveConfig addClassToEvolve(String entityClass) { + classesToEvolve.add(entityClass); + return this; + } + + /** + * Returns an unmodifiable set of the entity classes to be evolved. + * + * @return an unmodifiable set of the entity classes to be evolved. + */ + public Set getClassesToEvolve() { + return Collections.unmodifiableSet(classesToEvolve); + } + + /** + * Sets a progress listener that is notified each time an entity is read. + * + * @param listener the EvolveListener. + * + * @return 'this'. + */ + public EvolveConfig setEvolveListener(EvolveListener listener) { + setEvolveListenerVoid(listener); + return this; + } + + /** + * + * @hidden + * + * The void return setter for use by Bean editors. + * + * @param listener the EvolveListener. + */ + public void setEvolveListenerVoid(EvolveListener listener) { + this.evolveListener = listener; + } + + /** + * Returns the progress listener that is notified each time an entity is + * read. + * + * @return the EvolveListener. + */ + public EvolveListener getEvolveListener() { + return evolveListener; + } +} diff --git a/src/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.java b/src/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.java new file mode 100644 index 0000000..64366d8 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveConfigBeanInfo.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import com.sleepycat.util.ConfigBeanInfoBase; + +import java.beans.BeanDescriptor; +import java.beans.PropertyDescriptor; + +public class EvolveConfigBeanInfo extends ConfigBeanInfoBase { + + @Override + public BeanDescriptor getBeanDescriptor() { + return getBdescriptor(EvolveConfig.class); + } + + @Override + public PropertyDescriptor[] getPropertyDescriptors() { + return getPdescriptor(EvolveConfig.class); + } +} diff --git a/src/com/sleepycat/persist/evolve/EvolveEvent.java b/src/com/sleepycat/persist/evolve/EvolveEvent.java new file mode 100644 index 0000000..b89dd63 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveEvent.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * The event passed to the EvolveListener interface during eager entity + * evolution. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class EvolveEvent { + + private EvolveStats stats; + private String entityClassName; + + EvolveEvent() { + this.stats = new EvolveStats(); + } + + void update(String entityClassName) { + this.entityClassName = entityClassName; + } + + /** + * The cumulative statistics gathered during eager evolution. + * + * @return the cumulative statistics. + */ + public EvolveStats getStats() { + return stats; + } + + /** + * The class name of the current entity class being converted. + * + * @return the class name. + */ + public String getEntityClassName() { + return entityClassName; + } +} diff --git a/src/com/sleepycat/persist/evolve/EvolveInternal.java b/src/com/sleepycat/persist/evolve/EvolveInternal.java new file mode 100644 index 0000000..47a8acd --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveInternal.java @@ -0,0 +1,50 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * + * @hidden + * + * Internal access class that should not be used by applications. + * + * @author Mark Hayes + */ +public class EvolveInternal { + + /** + * Internal access method that should not be used by applications. + * + * @return the EvolveEvent. + */ + public static EvolveEvent newEvent() { + return new EvolveEvent(); + } + + /** + * Internal access method that should not be used by applications. + * + * @param event the EvolveEvent. + * @param entityClassName the class name. + * @param nRead the number read. + * @param nConverted the number converted. + */ + public static void updateEvent(EvolveEvent event, + String entityClassName, + int nRead, + int nConverted) { + event.update(entityClassName); + event.getStats().add(nRead, nConverted); + } +} diff --git a/src/com/sleepycat/persist/evolve/EvolveListener.java b/src/com/sleepycat/persist/evolve/EvolveListener.java new file mode 100644 index 0000000..5b8056b --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveListener.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * The listener interface called during eager entity evolution. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public interface EvolveListener { + + /** + * The listener method called during eager entity evolution. + * + * @param event the EvolveEvent. + * + * @return true to continue evolution or false to stop. + */ + boolean evolveProgress(EvolveEvent event); +} diff --git a/src/com/sleepycat/persist/evolve/EvolveStats.java b/src/com/sleepycat/persist/evolve/EvolveStats.java new file mode 100644 index 0000000..e878c6e --- /dev/null +++ b/src/com/sleepycat/persist/evolve/EvolveStats.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * Statistics accumulated during eager entity evolution. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class EvolveStats { + + private int nRead; + private int nConverted; + + EvolveStats() { + } + + void add(int nRead, int nConverted) { + this.nRead += nRead; + this.nConverted += nConverted; + } + + /** + * The total number of entities read during eager evolution. + * + * @return the number of entities read. + */ + public int getNRead() { + return nRead; + } + + /** + * The total number of entities converted during eager evolution. + * + * @return the number of entities converted. + */ + public int getNConverted() { + return nConverted; + } +} diff --git a/src/com/sleepycat/persist/evolve/IncompatibleClassException.java b/src/com/sleepycat/persist/evolve/IncompatibleClassException.java new file mode 100644 index 0000000..867092a --- /dev/null +++ b/src/com/sleepycat/persist/evolve/IncompatibleClassException.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import com.sleepycat.je.OperationFailureException; + +/** + * A class has been changed incompatibly and no mutation has been configured to + * handle the change or a new class version number has not been assigned. + * + * + *

        In a replicated environment, this exception is also thrown when upgrading + * an application (persistent classes have been changed) and an upgraded node + * is elected Master before all of the Replica nodes have been upgraded. See + * Upgrading a Replication Group + * for more information.

        + * + * + * @see com.sleepycat.persist.EntityStore#EntityStore EntityStore.EntityStore + * @see com.sleepycat.persist.model.Entity#version + * @see com.sleepycat.persist.model.Persistent#version + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class IncompatibleClassException extends OperationFailureException { + + private static final long serialVersionUID = 2103957824L; + + public IncompatibleClassException(String message) { + super(message); + } + + /* */ + + /** + * For internal use only. + * @hidden + */ + private IncompatibleClassException(String message, + OperationFailureException cause) { + super(message, cause); + } + + /** + * For internal use only. + * @hidden + */ + @Override + public OperationFailureException wrapSelf(String msg) { + return new IncompatibleClassException(msg, this); + } + + /* */ +} diff --git a/src/com/sleepycat/persist/evolve/Mutation.java b/src/com/sleepycat/persist/evolve/Mutation.java new file mode 100644 index 0000000..ab03221 --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Mutation.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.io.Serializable; + +/** + * The base class for all mutations. + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public abstract class Mutation implements Serializable { + + private static final long serialVersionUID = -8094431582953129268L; + + private String className; + private int classVersion; + private String fieldName; + + Mutation(String className, int classVersion, String fieldName) { + this.className = className; + this.classVersion = classVersion; + this.fieldName = fieldName; + } + + /** + * Returns the class to which this mutation applies. + * + * @return the class to which this mutation applies. + */ + public String getClassName() { + return className; + } + + /** + * Returns the class version to which this mutation applies. + * + * @return the class version to which this mutation applies. + */ + public int getClassVersion() { + return classVersion; + } + + /** + * Returns the field name to which this mutation applies, or null if this + * mutation applies to the class itself. + * + * @return the field name to which this mutation applies, or null. + */ + public String getFieldName() { + return fieldName; + } + + /** + * Returns true if the class name, class version and field name are equal + * in this object and given object. + */ + @Override + public boolean equals(Object other) { + if (other instanceof Mutation) { + Mutation o = (Mutation) other; + return className.equals(o.className) && + classVersion == o.classVersion && + ((fieldName != null) ? fieldName.equals(o.fieldName) + : (o.fieldName == null)); + } else { + return false; + } + } + + @Override + public int hashCode() { + return className.hashCode() + + classVersion + + ((fieldName != null) ? fieldName.hashCode() : 0); + } + + @Override + public String toString() { + return "Class: " + className + " Version: " + classVersion + + ((fieldName != null) ? (" Field: " + fieldName) : ""); + } +} diff --git a/src/com/sleepycat/persist/evolve/Mutations.java b/src/com/sleepycat/persist/evolve/Mutations.java new file mode 100644 index 0000000..f522c3a --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Mutations.java @@ -0,0 +1,244 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +import java.io.Serializable; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; + +/** + * A collection of mutations for configuring class evolution. + * + *

        Mutations are configured when a store is opened via {@link + * StoreConfig#setMutations StoreConfig.setMutations}. For example:

        + * + *
        + *  Mutations mutations = new Mutations();
        + *  // Add mutations...
        + *  StoreConfig config = new StoreConfig();
        + *  config.setMutations(mutations);
        + *  EntityStore store = new EntityStore(env, "myStore", config);
        + * + *

        Mutations cause data conversion to occur lazily as instances are read + * from the store. The {@link EntityStore#evolve EntityStore.evolve} method + * may also be used to perform eager conversion.

        + * + *

        Not all incompatible class changes can be handled via mutations. For + * example, complex refactoring may require a transformation that manipulates + * multiple entity instances at once. Such changes are not possible with + * mutations but can made by performing a store conversion.

        + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class Mutations implements Serializable { + + private static final long serialVersionUID = -1744401530444812916L; + + private Map renamers; + private Map deleters; + private Map converters; + + /** + * Creates an empty set of mutations. + */ + public Mutations() { + renamers = new HashMap(); + deleters = new HashMap(); + converters = new HashMap(); + } + + /** + * Returns true if no mutations are present. + * + * @return true if no mutations are present. + */ + public boolean isEmpty() { + return renamers.isEmpty() && + deleters.isEmpty() && + converters.isEmpty(); + } + + /** + * Adds a renamer mutation. + * + * @param renamer the Renamer. + */ + public void addRenamer(Renamer renamer) { + renamers.put(new Key(renamer), renamer); + } + + /** + * Returns the renamer mutation for the given class, version and field, or + * null if none exists. A null field name should be specified to get a + * class renamer. + * + * @param className the class name. + * + * @param classVersion the class version. + * + * @param fieldName the field name in the given class version. + * + * @return the Renamer, or null. + */ + public Renamer getRenamer(String className, + int classVersion, + String fieldName) { + return renamers.get(new Key(className, classVersion, fieldName)); + } + + /** + * Returns an unmodifiable collection of all renamer mutations. + * + * @return the renamers. + */ + public Collection getRenamers() { + return renamers.values(); + } + + /** + * Adds a deleter mutation. + * + * @param deleter the Deleter. + */ + public void addDeleter(Deleter deleter) { + deleters.put(new Key(deleter), deleter); + } + + /** + * Returns the deleter mutation for the given class, version and field, or + * null if none exists. A null field name should be specified to get a + * class deleter. + * + * @param className the class name. + * + * @param classVersion the class version. + * + * @param fieldName the field name. + * + * @return the Deleter, or null. + */ + public Deleter getDeleter(String className, + int classVersion, + String fieldName) { + return deleters.get(new Key(className, classVersion, fieldName)); + } + + /** + * Returns an unmodifiable collection of all deleter mutations. + * + * @return the deleters. + */ + public Collection getDeleters() { + return deleters.values(); + } + + /** + * Adds a converter mutation. + * + * @param converter the Converter. + */ + public void addConverter(Converter converter) { + converters.put(new Key(converter), converter); + } + + /** + * Returns the converter mutation for the given class, version and field, + * or null if none exists. A null field name should be specified to get a + * class converter. + * + * @param className the class name. + * + * @param classVersion the class version. + * + * @param fieldName the field name. + * + * @return the Converter, or null. + */ + public Converter getConverter(String className, + int classVersion, + String fieldName) { + return converters.get(new Key(className, classVersion, fieldName)); + } + + /** + * Returns an unmodifiable collection of all converter mutations. + * + * @return the converters. + */ + public Collection getConverters() { + return converters.values(); + } + + private static class Key extends Mutation { + static final long serialVersionUID = 2793516787097085621L; + + Key(String className, int classVersion, String fieldName) { + super(className, classVersion, fieldName); + } + + Key(Mutation mutation) { + super(mutation.getClassName(), + mutation.getClassVersion(), + mutation.getFieldName()); + } + } + + /** + * Returns true if this collection has the same set of mutations as the + * given collection and all mutations are equal. + */ + @Override + public boolean equals(Object other) { + if (other instanceof Mutations) { + Mutations o = (Mutations) other; + return renamers.equals(o.renamers) && + deleters.equals(o.deleters) && + converters.equals(o.converters); + } else { + return false; + } + } + + @Override + public int hashCode() { + return renamers.hashCode() + + deleters.hashCode() + + converters.hashCode(); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + if (renamers.size() > 0) { + buf.append(renamers.values()); + } + if (deleters.size() > 0) { + buf.append(deleters.values()); + } + if (converters.size() > 0) { + buf.append(converters.values()); + } + if (buf.length() > 0) { + return buf.toString(); + } else { + return "[Empty Mutations]"; + } + } +} diff --git a/src/com/sleepycat/persist/evolve/Renamer.java b/src/com/sleepycat/persist/evolve/Renamer.java new file mode 100644 index 0000000..991a21a --- /dev/null +++ b/src/com/sleepycat/persist/evolve/Renamer.java @@ -0,0 +1,129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.evolve; + +/** + * A mutation for renaming a class or field without changing the instance or + * field value. For example: + *
        + *  package my.package;
        + *
        + *  // The old class.  Version 0 is implied.
        + *  //
        + *  {@literal @Entity}
        + *  class Person {
        + *      String name;
        + *  }
        + *
        + *  // The new class.  A new version number must be assigned.
        + *  //
        + *  {@literal @Entity(version=1)}
        + *  class Human {
        + *      String fullName;
        + *  }
        + *
        + *  // Add the mutations.
        + *  //
        + *  Mutations mutations = new Mutations();
        + *
        + *  mutations.addRenamer(new Renamer("my.package.Person", 0,
        + *                                   Human.class.getName()));
        + *
        + *  mutations.addRenamer(new Renamer("my.package.Person", 0,
        + *                                   "name", "fullName"));
        + *
        + *  // Configure the mutations as described {@link Mutations here}.
        + * + * + *

        In a replicated environment, renaming an entity class or secondary key + * field may require handling the {@link + * com.sleepycat.je.rep.DatabasePreemptedException} during the upgrade process. + * See + * Upgrading a Replication Group + * for more information.

        + * + * + * @see com.sleepycat.persist.evolve Class Evolution + * @author Mark Hayes + */ +public class Renamer extends Mutation { + + private static final long serialVersionUID = 2238151684405810427L; + + private String newName; + + /** + * Creates a mutation for renaming the class of all instances of the given + * class version. + * + * @param fromClass the class to rename. + * @param fromVersion the class version to rename. + * @param toClass the new class name. + */ + public Renamer(String fromClass, int fromVersion, String toClass) { + super(fromClass, fromVersion, null); + newName = toClass; + } + + /** + * Creates a mutation for renaming the given field for all instances of the + * given class version. + * + * @param declaringClass the class to which this mutation applies. + * @param declaringClassVersion the class version to which this mutation + * applies. + * @param fromField field name in the given class version. + * @param toField the new field name. + */ + public Renamer(String declaringClass, int declaringClassVersion, + String fromField, String toField) { + super(declaringClass, declaringClassVersion, fromField); + newName = toField; + } + + /** + * Returns the new class or field name specified in the constructor. + * + * @return the new name. + */ + public String getNewName() { + return newName; + } + + /** + * Returns true if the new class name is equal in this object and given + * object, and if the {@link Mutation#equals} method returns true. + */ + @Override + public boolean equals(Object other) { + if (other instanceof Renamer) { + Renamer o = (Renamer) other; + return newName.equals(o.newName) && + super.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return newName.hashCode() + super.hashCode(); + } + + @Override + public String toString() { + return "[Renamer " + super.toString() + + " NewName: " + newName + ']'; + } +} diff --git a/src/com/sleepycat/persist/evolve/package.html b/src/com/sleepycat/persist/evolve/package.html new file mode 100644 index 0000000..465b61a --- /dev/null +++ b/src/com/sleepycat/persist/evolve/package.html @@ -0,0 +1,402 @@ + + +Utilities for managing class evolution of persistent objects. + +

        Class Evolution

        + +

        For persistent data that is not short lived, changes to persistent classes +are almost inevitable. Some changes are compatible with existing types, and +data conversion for these changes is performed automatically and transparently. +Other changes are not compatible with existing types. Mutations can be used to +explicitly manage many types of incompatible changes.

        + +

        Not all incompatible class changes can be handled via mutations. For +example, complex refactoring may require a transformation that manipulates +multiple entity instances at once. Such changes are not possible with +mutations but can be made by performing a store +conversion.

        + +

        The different categories of type changes are described below.

        + +

        Key Field Changes

        + +

        Unlike entity data, key data is not versioned. Therefore, the physical key +format for an index is fixed once the index has been opened, and the changes +allowed for key fields are very limited. The only changes allowed for key +fields are:

        +
          +
        • The name of a key field may be changed, as long as this change is +accompanied by a {@link com.sleepycat.persist.evolve.Renamer} mutation.
        • +
        • A primitive type may be changed to its corresponding primitive wrapper +type. This is a compatible change.
        • +
        • For primary key fields and fields of a composite key class, a primitive +wrapper type may be changed to its corresponding primitive type. This is +allowed because these key fields with reference types may never have null +values. This is a compatible change.
        • +
        + +

        Any other changes to a key field are incompatible and may be made only by +performing a store conversion.

        + +

        Key ordering, including the behavior of a custom {@link +java.lang.Comparable}, is also fixed, since keys are stored in order in the +index. The specifications for key ordering may not be changed, and the +developer is responsible for not changing the behavior of a {@code Comparable} +key class. WARNING:: Changing the behavior of a {@code +Comparable} key class is likely to make the index unusable.

        + +

        Compatible Type Changes

        + +

        Entity data, unlike key data, is versioned. Therefore, some changes can be +made compatibly and other changes can be handled via mutations. Compatible +changes are defined below. To make a compatible class change, a mutation is +not required; however, the class version must be assigned a new (greater) +integer value.

        + +

        Changes to a class hierarchy are compatible in some cases. A new class may +be inserted in the hierarchy. A class may be deleted from the hierarchy as +long as one of the following is true: 1) it contains no persistent fields, 2) +any persistent fields are deleted with field Deleter mutations, or 3) the class +is deleted with a class Deleter mutation. Classes in an existing hierarchy may +not be reordered compatibly, and fields may not moved from one class to another +compatibly; for such changes a class Converter mutation is required.

        + +

        Changes to field types in entity class definitions are compatible when they +conform to the Java Language Specification definitions for Widening +Primitive Conversions and Widening +Reference Conversions. For example, a smaller integer +type may be changed to a larger integer type, and a reference type may be +changed to one of its supertypes. Automatic widening conversions are performed +as described in the Java Language Specification.

        + +

        Primitive types may also be compatibly changed to their corresponding +primitive wrapper types, or to the wrapper type for a widened primitive type. +However, changing from a primitive wrapper type to a primitive type is not a +compatible change since existing null values could not be represented.

        + +

        Integer primitive types (byte, short, char, int, long) and their primitive +wrapper types may be compatibly changed to the BigInteger type.

        + +

        Enum values may be added compatibly, but may not be deleted or renamed. As +long as new values are declared after existing values, the default sort order +for enum key fields will match the declaration order, i.e, the default sort +order will match the enum ordinal order. If a new value is inserted (declared +before an existing value), it will be sorted after all existing values but +before newly added values. However, these ordering rules are only guaranteed +for enums containing up to 631 values and only if existing values are not +reordered. If more than 631 values are declared or the declarations of +existing values are reordered, then the default sort order will be arbitrary +and will not match the declaration (ordinal) order.

        + +

        In addition, adding fields to a class is a compatible change. When a +persistent instance of a class is read that does not contain the new field, the +new field is initialized by the default constructor.

        + +

        All other changes to instance fields are considered incompatible. +Incompatible changes may be handled via mutations, as described next.

        + +

        Note that whenever a class is changed, either compatibly or incompatibly, a +new (higher) class version number must be assigned. See {@link +com.sleepycat.persist.model.Entity#version} and {@link +com.sleepycat.persist.model.Persistent#version} for information on assigning +class version numbers.

        + +

        Mutations

        + +

        There are three types of mutations: {@link +com.sleepycat.persist.evolve.Renamer}, {@link +com.sleepycat.persist.evolve.Deleter} and {@link +com.sleepycat.persist.evolve.Converter}.

        + +

        A class or field can be renamed using a {@link +com.sleepycat.persist.evolve.Renamer}. Renaming is not expensive, since it +does not involve conversion of instance data.

        + +

        A class or field can be deleted using a {@link +com.sleepycat.persist.evolve.Deleter}.

        +
          +
        • Deleting an entity class causes removal of the primary and secondary +indices for the store, on other words, removal of all store entities for that +class and its subclasses. Removal is performed when the store is opened. A +{@link com.sleepycat.persist.evolve.Deleter} should be used for an entity class +in all of the following circumstances: +
            +
          • When removing the entity class itself.
          • +
          • When removing {@link com.sleepycat.persist.model.Entity} from the class + to make it non-persistent.
          • +
          • When removing {@link com.sleepycat.persist.model.Entity} from the class + and adding {@link com.sleepycat.persist.model.Persistent}, to use it as an + embedded persistent class but not an entity class. The version of the class + must be incremented in this case.
          • +
          +
        • + +
        • Deleting a non-entity class does not itself cause deletion of instance +data, but is needed to inform DPL that the deleted class will not be used. +Instances of the deleted class must be handled (discarded or converted to +another class) by {@link com.sleepycat.persist.evolve.Deleter} or {@link +com.sleepycat.persist.evolve.Converter} mutations for the field or enclosing +class that contain embedded instances of the deleted class. A {@link +com.sleepycat.persist.evolve.Deleter} should be used for a non-entity class in +all of the following circumstances: +
            +
          • When removing the persistent class itself.
          • +
          • When removing {@link com.sleepycat.persist.model.Persistent} from the + class to make it non-persistent.
          • +
          • When removing {@link com.sleepycat.persist.model.Persistent} from the + class and adding {@link com.sleepycat.persist.model.Entity}, to use it as an + entity class but not an embedded persistent class. The version of the class + must be incremented in this case.
          • +
          +
        • + +
        • Deleting a field causes automatic conversion of the instances containing +that field, in order to discard the field values.
        • +
        + +

        Other incompatible changes are handled by creating a {@link +com.sleepycat.persist.evolve.Converter} mutation and implementing a {@link +com.sleepycat.persist.evolve.Conversion#convert Conversion.convert} method that +manipulates the raw objects and/or simple values directly. The {@code convert} +method is passed an object of the old incompatible type and it returns an +object of a current type.

        + +

        Conversions can be specified in two ways: for specific fields or for all +instances of a class. A different {@link +com.sleepycat.persist.evolve.Converter} constructor is used in each case. +Field-specific conversions are used instead of class conversions when both are +applicable.

        + +

        Note that a class conversion may be not specified for an enum class. A +field conversion, or a class conversion for the class declaring the field, may +be used.

        + +

        Note that each mutation is applied to a specific class version number. The +class version must be explicitly specified in a mutation for two reasons:

        +
          +
        1. This provides safety in the face of multiple unconverted versions of a +given type. Without a version, a single conversion method would have to handle +multiple input types, and would have to distinguish between them by examining +the data or type information.
        2. +
        3. This allows arbitrary changes to be made. For example, a series of name +changes may reuse a given name for more than one version. To identify the +specific type being converted or renamed, a version number is needed.
        4. +
        +

        See {@link com.sleepycat.persist.model.Entity#version} and {@link +com.sleepycat.persist.model.Persistent#version} for information on assigning +class version numbers.

        + +

        Mutations are therefore responsible for converting each existing +incompatible class version to the current version as defined by a current class +definition. For example, consider that class-version A-1 is initially changed +to A-2 and a mutation is added for converting A-1 to A-2. If later changes in +version A-3 occur before converting all A-1 instances to version A-2, the +converter for A-1 will have to be changed. Instead of converting from A-1 to +A-2 it will need to convert from A-1 to A-3. In addition, a mutation +converting A-2 to A-3 will be needed.

        + +

        When a {@link com.sleepycat.persist.evolve.Converter} mutation applies to a +given object, other mutations that may apply to that object are not +automatically performed. It is the responsibility of the {@link +com.sleepycat.persist.evolve.Converter} to return an object that conforms to +the current class definition, including renaming fields and classes. If the +input object has nested objects or superclasses that also need conversion, the +converter must perform these nested conversions before returning the final +converted object. This rule avoids the complexity and potential errors that +could result if a converter mutation were automatically combined with other +mutations in an arbitrary manner.

        + +

        The {@link com.sleepycat.persist.EntityStore#evolve EntityStore.evolve} +method may optionally be used to ensure that all instances of an old class +version are converted to the current version.

        + +

        Other Metadata Changes

        + +

        When a class that happens to be an entity class is renamed, it remains an +entity class. When a field that happens to be a primary or +secondary key field is renamed, its metadata remains intact as well.

        + +

        When the {@link com.sleepycat.persist.model.SecondaryKey} annotation is +added to an existing field, a new index is created automatically. The +new index will be populated by reading the entire primary index when the +primary index is opened.

        + +

        When the {@link com.sleepycat.persist.model.SecondaryKey} annotation is +included with a new field, a new index is created automatically. The +new field is required to be a reference type (not a primitive) and must be +initialized to null (the default behavior) in the default constructor. +Entities will be indexed by the field when they are stored with a non-null key +value.

        + +

        When a field with the {@link com.sleepycat.persist.model.SecondaryKey} +annotation is deleted, or when the {@link +com.sleepycat.persist.model.SecondaryKey} annotation is removed from a field +without deleting it, the secondary index is removed (dropped). Removal occurs +when the store is opened.

        + +

        The {@link com.sleepycat.persist.model.SecondaryKey#relate +SecondaryKey.relate} property may NOT be changed. All other properties of a +{@link com.sleepycat.persist.model.SecondaryKey} may be changed, although +avoiding changes that cause foreign key integrity errors is the responsibility +of the application developer. For example, if the {@link +com.sleepycat.persist.model.SecondaryKey#relatedEntity} property is added but +not all existing secondary keys reference existing primary keys for the related +entity, foreign key integrity errors may occur.

        + +

        The {@link com.sleepycat.persist.model.PrimaryKey} annotation may NOT be +removed from a field in an entity class.

        + +

        The {@link com.sleepycat.persist.model.PrimaryKey#sequence} property may be +added, removed, or changed to a different name.

        + +

        The {@link com.sleepycat.persist.model.Persistent#proxyFor} property may NOT +be added, removed, or changed to a different class.

        + +

        Warnings on Testing and Backups

        + +

        The application developer is responsible for verifying that class evolution +works properly before deploying with a changed set of persistent classes. The +DPL will report errors when old class definitions cannot be evolved, for +example, when a mutation is missing. To test that no such errors will occur, +application test cases must include instances of all persistent classes.

        + +

        Converter mutations require special testing. Since the application +conversion method is allowed to return instances of any type, the DPL cannot +check that the proper type is returned until the data is accessed. To avoid +data access errors, application test cases must cover converter mutations for +all potential input and output types.

        + +

        When secondary keys are dropped or entity classes are deleted, the +underlying databases are deleted and cannot be recovered from the store. This +takes place when the store is opened. It is strongly recommended that a backup +of the entire store is made before opening the store and causing class +evolution to proceed.

        + +

        Store Conversion

        + +

        When mutations are not sufficient for handling class changes, a full store +conversion may be performed. This is necessary for two particular types of +class changes:

        +
          +
        • A change to a physical key format, for example, a change from type +{@code int} to type {@code long}.
        • +
        • A conversion that involves multiple entities at once, for example, +combining two separate entity classes into a new single entity class.
        • +
        + +

        To perform a full store conversion, a program is written that performs the +following steps to copy the data from the old store to a new converted +store:

        +
          +
        1. The old store is opened as a {@link com.sleepycat.persist.raw.RawStore} and +the new store is opened as an {@link com.sleepycat.persist.EntityStore}.
        2. +
        3. All entities are read from the old store. Entities are read using a {@link +com.sleepycat.persist.raw.RawStore} to allow access to entities for which no +compatible class exists.
        4. +
        5. The {@link com.sleepycat.persist.raw.RawObject} entities are then converted +to the format desired. Raw objects can be arbitrarily manipulated as needed. +The updated raw objects must conform to the new evolved class definitions.
        6. +
        7. The updated raw entities are converted to live objects by calling the +{@link com.sleepycat.persist.model.EntityModel#convertRawObject +EntityModel.convertRawObject} method of the new store. This method converts +raw objects obtained from a different store, as long as they conform to the new +evolved class definitions.
        8. +
        9. The new live objects are written to the new {@link +com.sleepycat.persist.EntityStore} using a {@link +com.sleepycat.persist.PrimaryIndex} as usual.
        10. +
        + +

        To perform such a conversion, two separate stores must be open at once. +Both stores may be in the same {@link com.sleepycat.je.Environment}, if +desired, by giving them different store names. But since all data is being +rewritten, there are performance advantages to creating the new store in a new +fresh environment: the data will be compacted as it is written, and the old +store can be removed very quickly by deleting the old environment directory +after the conversion is complete.

        + + + +

        Upgrading a Replication Group

        + +

        When changes to persistent classes are made in a {@link +com.sleepycat.je.rep.ReplicatedEnvironment}, special handling is necessary when +the application is upgraded on the nodes in the replication group. Upgraded +means that the application on a node is stopped, the updated application +classes are installed, and the application is started again.

        + +

        As usual in any sort of replication group upgrade, the Replica nodes must be +upgraded first and the Master node must be upgraded last. If an upgraded node +is elected Master before all of the Replica nodes have been upgraded, either +because of a user error or an unexpected failover, the {@link +com.sleepycat.persist.evolve.IncompatibleClassException} will be thrown.

        + +

        There are two considerations that must be taken into account during the +upgrade process: new indexes that are temporarily unavailable on a Replica, +and exceptions that result from renamed entity classes and secondary keys.

        + +

        Note that these considerations only apply when a hot upgrade is performed, +i.e., when the replication group will contain a mix of upgraded and +non-upgraded nodes. If all nodes in the group are first taken down and then +the nodes are upgraded and restarted, then no special considerations are +necessary and this documentation is not applicable.

        + +

        Defining New Indexes in a Replication Group

        + +

        When a new entity class is added, which defines a new {@code +PrimaryIndex}, or a new secondary key is added, which defines a new {@code +SecondaryIndex}, the indexes will not be immediately available on an upgraded +node. A new index will not be fully available (i.e., on every node) until all +the nodes have been upgraded, the index has been created (and populated, in the +case of a secondary index) on the Master node, and the index has been +replicated to each Replica node via the replication stream.

        + +

        When a node is first upgraded it will start out as a Replica node, and any +newly defined indexes will not be available. The application has two choices +for handling this condition.

        +
          +
        1. An application may be able to coordinate among its nodes, by its own means, +to inform all nodes when an index has been created and populated on the Master. +Such an application can choose to access a new index only after it knows the +index is available. Such coordination is not directly supported by JE, +although a transaction with a {@link com.sleepycat.je.CommitToken} may be used +to simplify the coordination process.
        2. + +
        3. An application may call {@link +com.sleepycat.persist.EntityStore#getPrimaryIndex getPrimaryIndex} or {@link +com.sleepycat.persist.EntityStore#getSecondaryIndex getSecondaryIndex} to +determine whether an index is available. An {@link +com.sleepycat.persist.IndexNotAvailableException} is thrown by these methods +when the index has not yet been created or when a secondary index is currently +being populated via the replication stream.
        4. +
        + +

        When an upgraded node is elected Master (this is typically near the end of +the the upgrade process), it must call {@link +com.sleepycat.persist.EntityStore#getPrimaryIndex getPrimaryIndex} to create +each new primary index, and {@link +com.sleepycat.persist.EntityStore#getSecondaryIndex getSecondaryIndex} to +create and populate each new secondary index. A newly elected Master node that +was just upgraded should be prepared for a delay when {@link +com.sleepycat.persist.EntityStore#getSecondaryIndex getSecondaryIndex} is +called to create and populate a new secondary index.

        + +

        Renaming Entity Classes and Keys in a Replication Group

        + +

        When a DPL entity class or secondary key field is renamed by an application +using a {@link com.sleepycat.persist.evolve.Renamer} mutation, this will result +internally in the underlying database for that entity class or secondary key +being renamed. The actual renaming of the database first occurs on the +upgraded Master node and is then replicated to each Replica node.

        + +

        When the application on a Master or Replica node first accesses the store +after the database has been renamed, a {@link +com.sleepycat.je.rep.DatabasePreemptedException} will be thrown. When this +happens, the application must close any cursors and transactions that are open +for that store, and then close the store and reopen it.

        + + + + + diff --git a/src/com/sleepycat/persist/impl/AbstractInput.java b/src/com/sleepycat/persist/impl/AbstractInput.java new file mode 100644 index 0000000..d8de0fa --- /dev/null +++ b/src/com/sleepycat/persist/impl/AbstractInput.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +/** + * Base class for EntityInput implementations. RecordInput cannot use this + * base class because it extends TupleInput, so it repeats the code here. + * + * @author Mark Hayes + */ +abstract class AbstractInput implements EntityInput { + + Catalog catalog; + boolean rawAccess; + + AbstractInput(Catalog catalog, boolean rawAccess) { + this.catalog = catalog; + this.rawAccess = rawAccess; + } + + public Catalog getCatalog() { + return catalog; + } + + public boolean isRawAccess() { + return rawAccess; + } + + public boolean setRawAccess(boolean rawAccessParam) { + boolean original = rawAccess; + rawAccess = rawAccessParam; + return original; + } +} diff --git a/src/com/sleepycat/persist/impl/Accessor.java b/src/com/sleepycat/persist/impl/Accessor.java new file mode 100644 index 0000000..45a4ef6 --- /dev/null +++ b/src/com/sleepycat/persist/impl/Accessor.java @@ -0,0 +1,277 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +/** + * Field binding operations implemented via reflection (ReflectionAccessor) or + * bytecode enhancement (EnhancedAccessor). + * + *

        Normally we read the set of all secondary key fields first and then the + * set of all non-key fields, reading each set in order of field name. But + * when reading an old format record we must account for the following + * class evolution conversions:

        + *
          + *
        • Convert a field: pass value thru converter
        • + *
        • Widen a field type: pass value thru widener
        • + *
        • Add a field: don't read the new field
        • + *
        • Delete a field: skip the deleted field
        • + *
        • Rename a field: read field in a different order
        • + *
        + *

        To support these operations, the methods for reading fields allow reading + * specific ranges of fields as well as all fields. For example, all fields + * up to a deleted field could be read, and then all fields from the following + * field onward.

        + * + * @author Mark Hayes + */ +interface Accessor { + + /** + * A large field value to use instead of Integer.MAX_VALUE, to work around + * Java JIT compiler bug when doing an (X <= Integer.MAX_VALUE) as would be + * done in readXxxKeyFields methods. + */ + final int MAX_FIELD_NUM = Integer.MAX_VALUE - 1; + + /** + * Creates a new instance of the target class using its default + * constructor. + */ + Object newInstance(); + + /** + * Creates a new one dimensional array of the given length, having the + * target class as its component type. + * + *

        Using a special method for a one dimensional array, which can be + * implemented by bytecode generation, is a compromise. We use reflection + * to create multidimensional arrays. We could in the future generate code + * to create arrays as they are encountered, if there is a need to avoid + * reflection for multidimensional arrays.

        + */ + Object newArray(int len); + + /** + * Returns whether the primary key field is null (for a reference type) or + * zero (for a primitive integer type). Null and zero are used as an + * indication that the key should be assigned from a sequence. + */ + boolean isPriKeyFieldNullOrZero(Object o); + + /** + * Writes the primary key field value to the given EntityOutput. + * + *

        To write a primary key with a reference type, this method must call + * EntityOutput.writeKeyObject.

        + * + * @param o is the object whose primary key field is to be written. + * + * @param output the output data to write to. + */ + void writePriKeyField(Object o, EntityOutput output) + throws RefreshException; + + /** + * Reads the primary key field value from the given EntityInput. + * + *

        To read a primary key with a reference type, this method must call + * EntityInput.readKeyObject.

        + * + * @param o is the object whose primary key field is to be read. + * + * @param input the input data to read from. + */ + void readPriKeyField(Object o, EntityInput input) + throws RefreshException; + + /** + * Writes all secondary key field values to the given EntityOutput, + * writing fields in super classes first and in name order within class. + * + * @param o is the object whose secondary key fields are to be written. + * + *

        If the primary key has a reference type, this method must call + * EntityOutput.registerPriKeyObject before writing any other fields.

        + * + * @param output the output data to write to. + */ + void writeSecKeyFields(Object o, EntityOutput output) + throws RefreshException; + + /** + * Reads a range of secondary key field values from the given EntityInput, + * reading fields in super classes first and in name order within class. + * + *

        If the primary key has a reference type, this method must call + * EntityInput.registerPriKeyObject before reading any other fields.

        + * + *

        To read all fields, pass -1 for superLevel, zero for startField and + * MAX_FIELD_NUM for endField. Fields from super classes are read + * first.

        + * + *

        To read a specific range of fields, pass a non-negative number for + * superLevel and the specific indices of the field range to be read in the + * class at that level.

        + * + * @param o is the object whose secondary key fields are to be read. + * + * @param input the input data to read from. + * + * @param startField the starting field index in the range of fields to + * read. To read all fields, the startField should be zero. + * + * @param endField the ending field index in the range of fields to read. + * To read all fields, the endField should be MAX_FIELD_NUM. + * + * @param superLevel is a non-negative number to read the fields of the + * class that is the Nth super instance; or a negative number to read + * fields in all classes. + */ + void readSecKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException; + + /** + * Writes all non-key field values to the given EntityOutput, writing + * fields in super classes first and in name order within class. + * + * @param o is the object whose non-key fields are to be written. + * + * @param output the output data to write to. + */ + void writeNonKeyFields(Object o, EntityOutput output) + throws RefreshException; + + /** + * Reads a range of non-key field values from the given EntityInput, + * reading fields in super classes first and in name order within class. + * + *

        To read all fields, pass -1 for superLevel, zero for startField and + * MAX_FIELD_NUM for endField. Fields from super classes are read + * first.

        + * + *

        To read a specific range of fields, pass a non-negative number for + * superLevel and the specific indices of the field range to be read in the + * class at that level.

        + * + * @param o is the object whose non-key fields are to be read. + * + * @param input the input data to read from. + * + * @param startField the starting field index in the range of fields to + * read. To read all fields, the startField should be zero. + * + * @param endField the ending field index in the range of fields to read. + * To read all fields, the endField should be MAX_FIELD_NUM. + * + * @param superLevel is a non-negative number to read the fields of the + * class that is the Nth super instance; or a negative number to read + * fields in all classes. + */ + void readNonKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException; + + /** + * Writes all composite key field values to the given EntityOutput, writing + * in declared field number order. + * + * @param o the composite key object whose fields are to be written. + * + * @param output the output data to write to. + */ + void writeCompositeKeyFields(Object o, EntityOutput output) + throws RefreshException; + + /** + * Reads all composite key field values from the given EntityInput, + * reading in declared field number order. + * + * @param o the composite key object whose fields are to be read. + * + * @param input the input data to read from. + */ + void readCompositeKeyFields(Object o, EntityInput input) + throws RefreshException; + + /** + * Returns the value of a given field, representing primitives as primitive + * wrapper objects. + * + * @param o is the object containing the key field. + * + * @param field is the field index. + * + * @param superLevel is a positive number to identify the field of the + * class that is the Nth super instance; or zero to identify the field in + * this class. + * + * @param isSecField is true for a secondary key field or false for a + * non-key field. + * + * @return the current field value, or null for a reference type field + * that is null. + */ + Object getField(Object o, + int field, + int superLevel, + boolean isSecField); + + /** + * Changes the value of a given field, representing primitives as primitive + * wrapper objects. + * + * @param o is the object containing the key field. + * + * @param field is the field index. + * + * @param superLevel is a positive number to identify the field of the + * class that is the Nth super instance; or zero to identify the field in + * this class. + * + * @param isSecField is true for a secondary key field or false for a + * non-key field. + * + * @param value is the new value of the field, or null to set a reference + * type field to null. + */ + void setField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value); + + /** + * Changes the value of the primary key field, representing primitives as + * primitive wrapper objects. + * + * @param o is the object containing the primary key field. + * + * @param field is the field index. + * + * @param superLevel is a positive number to identify the field of the + * class that is the Nth super instance; or zero to identify the field in + * this class. + * + * @param value is the new value of the field, or null to set a reference + * type field to null. + */ + void setPriField(Object o, Object value); +} diff --git a/src/com/sleepycat/persist/impl/Catalog.java b/src/com/sleepycat/persist/impl/Catalog.java new file mode 100644 index 0000000..1acd61c --- /dev/null +++ b/src/com/sleepycat/persist/impl/Catalog.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.IdentityHashMap; +import java.util.Map; + +import com.sleepycat.persist.raw.RawObject; + +/** + * Catalog operation interface used by format classes. + * + * @see PersistCatalog + * @see SimpleCatalog + * @see ReadOnlyCatalog + * + * @author Mark Hayes + */ +interface Catalog { + + /* + * The catalog version is returned by getInitVersion and is the version of + * the serialized format classes loaded from the stored catalog. When a + * field is added, for example, the version can be checked to determine how + * to initialize the field in Format.initialize. + * + * -1: The version is considered to be -1 when reading the beta version of + * the catalog data. At this point no version field was stored, but we can + * distinguish the beta stored format. See PersistCatalog. + * + * 0: The first released version of the catalog data, after beta. At this + * point no version field was stored, but it is initialized to zero when + * the PersistCatalog.Data object is de-serialized. + * + * 1: Add the ComplexFormat.ConvertFieldReader.oldFieldNum field. [#15797] + */ + static final int BETA_VERSION = -1; + static final int CURRENT_VERSION = 1; + + /** + * See above. + */ + int getInitVersion(Format format, boolean forReader); + + /** + * Returns a format for a given ID, or throws an exception. This method is + * used when reading an object from the byte array format. + * + * @param expectStored is true if reading a record from a database, and + * therefore the format ID is expected to be stored also. If the format ID + * is not stored, a RefreshException is thrown. + * + * @throws IllegalStateException if the formatId does not correspond to a + * persistent class. This is an internal consistency error. + */ + Format getFormat(int formatId, boolean expectStored) + throws RefreshException; + + /** + * Returns a format for a given class, or throws an exception. This method + * is used when writing an object that was passed in by the user. + * + * @param checkEntitySubclassIndexes is true if we're expecting this format + * to be an entity subclass and therefore subclass secondary indexes should + * be opened. + * + * @throws IllegalArgumentException if the class is not persistent. This + * is a user error. + */ + Format getFormat(Class cls, boolean checkEntitySubclassIndexes) + throws RefreshException; + + /** + * Returns a format by class name. Unlike {@link + * #getFormat(Class,boolean)}, the format will not be created if it is not + * already known. + */ + Format getFormat(String className); + + /** + * @see PersistCatalog#createFormat + */ + Format createFormat(String clsName, Map newFormats); + + /** + * @see PersistCatalog#createFormat + */ + Format createFormat(Class type, Map newFormats); + + /** + * @see PersistCatalog#isRawAccess + */ + boolean isRawAccess(); + + /** + * @see PersistCatalog#convertRawObject + */ + Object convertRawObject(RawObject o, IdentityHashMap converted) + throws RefreshException; + + /** + * @see PersistCatalog#resolveClass + */ + Class resolveClass(String clsName) + throws ClassNotFoundException; + + /** + * @see PersistCatalog#resolveKeyClass + */ + Class resolveKeyClass(String clsName); +} diff --git a/src/com/sleepycat/persist/impl/CollectionProxy.java b/src/com/sleepycat/persist/impl/CollectionProxy.java new file mode 100644 index 0000000..9456327 --- /dev/null +++ b/src/com/sleepycat/persist/impl/CollectionProxy.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.raw.RawObject; + +/** + * Proxy for Collection types. + * + * @author Mark Hayes + */ +@Persistent +abstract class CollectionProxy + implements PersistentProxy> { + + private E[] elements; + + protected CollectionProxy() {} + + public final void initializeProxy(Collection collection) { + elements = (E[]) new Object[collection.size()]; + int i = 0; + for (E element : collection) { + elements[i] = element; + i += 1; + } + } + + public final Collection convertProxy() { + Collection collection = newInstance(elements.length); + for (E element : elements) { + collection.add(element); + } + return collection; + } + + protected abstract Collection newInstance(int size); + + @Persistent(proxyFor=ArrayList.class) + static class ArrayListProxy extends CollectionProxy { + + protected ArrayListProxy() {} + + protected Collection newInstance(int size) { + return new ArrayList(size); + } + } + + @Persistent(proxyFor=LinkedList.class) + static class LinkedListProxy extends CollectionProxy { + + protected LinkedListProxy() {} + + protected Collection newInstance(int size) { + return new LinkedList(); + } + } + + @Persistent(proxyFor=HashSet.class) + static class HashSetProxy extends CollectionProxy { + + protected HashSetProxy() {} + + protected Collection newInstance(int size) { + return new HashSet(size); + } + } + + @Persistent(proxyFor=TreeSet.class) + static class TreeSetProxy extends CollectionProxy { + + protected TreeSetProxy() {} + + protected Collection newInstance(int size) { + return new TreeSet(); + } + } + + static Object[] getElements(RawObject collection) { + Object value = null; + while (value == null && collection != null) { + Map values = collection.getValues(); + if (values != null) { + value = values.get("elements"); + if (value == null) { + collection = collection.getSuper(); + } + } + } + if (value == null || !(value instanceof RawObject)) { + throw new IllegalStateException + ("Collection proxy for a secondary key field must " + + "contain a field named 'elements'"); + } + RawObject rawObj = (RawObject) value; + Format format = (Format) rawObj.getType(); + if (!format.isArray() || + format.getComponentType().getId() != Format.ID_OBJECT) { + throw new IllegalStateException + ("Collection proxy 'elements' field must be an Object array"); + } + return rawObj.getElements(); + } + + static void setElements(RawObject collection, Object[] elements) { + RawObject value = null; + while (value == null && collection != null) { + Map values = collection.getValues(); + if (values != null) { + value = (RawObject) values.get("elements"); + if (value != null) { + values.put("elements", + new RawObject(value.getType(), elements)); + } else { + collection = collection.getSuper(); + } + } + } + if (value == null) { + throw DbCompat.unexpectedState(); + } + } + + static void copyElements(RecordInput input, + Format format, + Format keyFormat, + Set results) + throws RefreshException { + + /* + * This could be optimized by traversing the byte format of the + * collection's elements array. + */ + RawObject collection = (RawObject) format.newInstance(input, true); + collection = (RawObject) format.readObject(collection, input, true); + Object[] elements = getElements(collection); + if (elements != null) { + for (Object elem : elements) { + RecordOutput output = + new RecordOutput(input.getCatalog(), true); + output.writeKeyObject(elem, keyFormat); + DatabaseEntry entry = new DatabaseEntry(); + TupleBase.outputToEntry(output, entry); + results.add(entry); + } + } + } +} diff --git a/src/com/sleepycat/persist/impl/ComparatorCatalog.java b/src/com/sleepycat/persist/impl/ComparatorCatalog.java new file mode 100644 index 0000000..fac9e42 --- /dev/null +++ b/src/com/sleepycat/persist/impl/ComparatorCatalog.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.Map; + +/** + * Read-only catalog used by a PersistComparator to return simple formats plus + * reconstituted enum formats. + * + * @author Mark Hayes + */ +class ComparatorCatalog extends SimpleCatalog { + + private final Map formatMap; + + ComparatorCatalog(final ClassLoader classLoader, + final Map formatMap) { + super(classLoader); + this.formatMap = formatMap; + } + + public Format getFormat(final String className) { + if (formatMap != null) { + final Format f = formatMap.get(className); + if (f != null) { + return f; + } + } + return super.getFormat(className); + } +} diff --git a/src/com/sleepycat/persist/impl/ComplexFormat.java b/src/com/sleepycat/persist/impl/ComplexFormat.java new file mode 100644 index 0000000..063f9b6 --- /dev/null +++ b/src/com/sleepycat/persist/impl/ComplexFormat.java @@ -0,0 +1,2313 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.Serializable; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.Deleter; +import com.sleepycat.persist.evolve.EntityConverter; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.DeleteAction; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.FieldMetadata; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKeyMetadata; +import com.sleepycat.persist.raw.RawField; +import com.sleepycat.persist.raw.RawObject; + +/** + * Format for persistent complex classes that are not composite key classes. + * This includes entity classes and subclasses. + * + * @author Mark Hayes + */ +public class ComplexFormat extends Format { + + private static final long serialVersionUID = -2847843033590454917L; + + private ClassMetadata clsMeta; + private EntityMetadata entityMeta; + private FieldInfo priKeyField; + private List secKeyFields; + private List nonKeyFields; + private FieldReader secKeyFieldReader; + private FieldReader nonKeyFieldReader; + private Map oldToNewKeyMap; + private Map newToOldFieldMap; + private boolean evolveNeeded; + private transient Accessor objAccessor; + private transient Accessor rawAccessor; + private transient ComplexFormat entityFormat; + private transient Map secKeyAddresses; + private transient volatile Map rawFields; + private transient volatile FieldInfo[] rawInputFields; + private transient volatile int[] rawInputLevels; + private transient volatile int rawInputDepth; + + /** + * This field contains the names of secondary keys that are incorrectly + * ordered because, in an earlier version, we failed to set the dup + * comparator. This bug applies only when the primary key has a + * comparator. The bug was fixed by setting the dup comparator to the + * primary key comparator, for all new secondary databases. [#17252] + * + * A field containing an empty set signifies that no keys are incorrectly + * ordered, while a null field signifies that all keys are incorrect (when + * the primary key has a comparator). The field is assigned to an empty + * set when declared, so that it will be null only when a pre-fix version + * of the format is deserialized. (With Java serialization, when a field is + * added to a class and a previously serialized instance is deserialized, + * the new field will always be null). + * + * This field is used to determine when a dup comparator should be set. We + * cannot set the comparator for secondary databases created prior to the + * bug fix, since ordering cannot be changed for existing records. See + * isSecKeyIncorrectlyOrdered and setSecKeyCorrectlyOrdered. + * + * This field does not count in comparisons of formats during evolution. + * When the user wants to correct the ordering for an incorrectly ordered + * secondary database, she must delete the database but does not need to + * increment the class version. In other words, this is information about + * the database order but is not considered class metadata. + */ + private Set incorrectlyOrderedSecKeys = new HashSet(); + + /** + * In JE 5.0 we changed the format for String fields. Instead of treating + * the String as an object with a format ID embedded in the serialized + * bytes, we treat it as a primitive and do not include the format ID. + * This works well because a field declared to be type String cannot be + * used to store any other object, and because the String tuple format + * supports null values. + * + * A field containing false signifies that the old String format was used + * when the entity was written, while a true value signifies that the new + * String format was used. The field is assigned to true when declared, so + * that it will be false only when a pre-JE 5.0 version of the format is + * deserialized. (With Java serialization, when a boolean field is added to + * a class and a previously serialized instance is deserialized, the new + * field will always be false). + */ + private boolean newStringFormat = true; + + ComplexFormat(Catalog catalog, + Class cls, + ClassMetadata clsMeta, + EntityMetadata entityMeta) { + super(catalog, cls); + this.clsMeta = clsMeta; + this.entityMeta = entityMeta; + secKeyFields = new ArrayList(); + nonKeyFields = FieldInfo.getInstanceFields(cls, clsMeta); + + /* + * Validate primary key metadata and move primary key field from + * nonKeyFields to priKeyField. + */ + if (clsMeta.getPrimaryKey() != null) { + String fieldName = clsMeta.getPrimaryKey().getName(); + FieldInfo field = FieldInfo.getField(nonKeyFields, fieldName); + if (field == null) { + throw new IllegalArgumentException + ("Primary key field does not exist: " + + getClassName() + '.' + fieldName); + } + nonKeyFields.remove(field); + priKeyField = field; + } + + /* + * Validate secondary key metadata and move secondary key fields from + * nonKeyFields to secKeyFields. + */ + if (clsMeta.getSecondaryKeys() != null) { + for (SecondaryKeyMetadata secKeyMeta : + clsMeta.getSecondaryKeys().values()) { + String fieldName = secKeyMeta.getName(); + FieldInfo field = FieldInfo.getField(nonKeyFields, fieldName); + if (field == null) { + throw new IllegalArgumentException + ("Secondary key field does not exist: " + + getClassName() + '.' + fieldName); + } + Class fieldCls = field.getFieldClass(getCatalog()); + Relationship rel = secKeyMeta.getRelationship(); + if (rel == Relationship.ONE_TO_MANY || + rel == Relationship.MANY_TO_MANY) { + if (!PersistKeyCreator.isManyType(fieldCls)) { + throw new IllegalArgumentException + ("ONE_TO_MANY and MANY_TO_MANY keys must" + + " have an array or Collection type: " + + getClassName() + '.' + fieldName); + } + } else { + if (PersistKeyCreator.isManyType(fieldCls)) { + throw new IllegalArgumentException + ("ONE_TO_ONE and MANY_TO_ONE keys must not" + + " have an array or Collection type: " + + getClassName() + '.' + fieldName); + } + } + if (fieldCls.isPrimitive() && + secKeyMeta.getDeleteAction() == DeleteAction.NULLIFY) { + throw new IllegalArgumentException + ("NULLIFY may not be used with primitive fields: " + + getClassName() + '.' + fieldName); + } + nonKeyFields.remove(field); + secKeyFields.add(field); + } + } + + /* Sort each group of fields by name. */ + Collections.sort(secKeyFields); + Collections.sort(nonKeyFields); + } + + @Override + void migrateFromBeta(Map formatMap) { + super.migrateFromBeta(formatMap); + if (priKeyField != null) { + priKeyField.migrateFromBeta(formatMap); + } + for (FieldInfo field : secKeyFields) { + field.migrateFromBeta(formatMap); + } + for (FieldInfo field : nonKeyFields) { + field.migrateFromBeta(formatMap); + } + } + + /** + * Returns getSuperFormat cast to ComplexFormat. It is guaranteed that all + * super formats of a ComplexFormat are a ComplexFormat. + */ + ComplexFormat getComplexSuper() { + return (ComplexFormat) getSuperFormat(); + } + + /** + * Returns getLatestVersion cast to ComplexFormat. It is guaranteed that + * all versions of a ComplexFormat are a ComplexFormat. + */ + private ComplexFormat getComplexLatest() { + return (ComplexFormat) getLatestVersion(); + } + + FieldInfo getPriKeyFieldInfo() { + return priKeyField; + } + + String getPriKeyField() { + if (clsMeta.getPrimaryKey() != null) { + return clsMeta.getPrimaryKey().getName(); + } else { + return null; + } + } + + @Override + boolean isEntity() { + return clsMeta.isEntityClass(); + } + + @Override + boolean isModelClass() { + return true; + } + + @Override + public ClassMetadata getClassMetadata() { + return clsMeta; + } + + @Override + public EntityMetadata getEntityMetadata() { + return entityMeta; + } + + @Override + ComplexFormat getEntityFormat() { + if (isInitialized()) { + /* The transient entityFormat field is set by initialize(). */ + return entityFormat; + } else { + + /* + * If not initialized, the entity format can be found by traversing + * the super formats. However, this is only possible for an + * existing format which has its superFormat field set. + */ + if (isNew()) { + throw DbCompat.unexpectedState(toString()); + } + for (ComplexFormat format = this; + format != null; + format = format.getComplexSuper()) { + if (format.isEntity()) { + return format; + } + } + return null; + } + } + + @Override + void setEvolveNeeded(boolean needed) { + evolveNeeded = needed; + } + + @Override + boolean getEvolveNeeded() { + return evolveNeeded; + } + + @Override + boolean getNewStringFormat() { + if (getEntityFormat() == null) { + throw DbCompat.unexpectedState(); + } + return newStringFormat; + } + + @Override + public Map getFields() { + + /* + * Synchronization is not required since rawFields is immutable. If + * by chance we create two maps when two threads execute this block, no + * harm is done. But be sure to assign the rawFields field only after + * the map is fully populated. + */ + if (rawFields == null) { + Map map = new HashMap(); + if (priKeyField != null) { + map.put(priKeyField.getName(), priKeyField); + } + for (RawField field : secKeyFields) { + map.put(field.getName(), field); + } + for (RawField field : nonKeyFields) { + map.put(field.getName(), field); + } + rawFields = map; + } + return rawFields; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + Class cls = getType(); + /* Collect field formats. */ + if (priKeyField != null) { + priKeyField.collectRelatedFormats(catalog, newFormats); + } + for (FieldInfo field : secKeyFields) { + field.collectRelatedFormats(catalog, newFormats); + } + for (FieldInfo field : nonKeyFields) { + field.collectRelatedFormats(catalog, newFormats); + } + /* Collect TO_MANY secondary key field element class formats. */ + if (entityMeta != null) { + for (SecondaryKeyMetadata secKeyMeta : + entityMeta.getSecondaryKeys().values()) { + String elemClsName = secKeyMeta.getElementClassName(); + if (elemClsName != null) { + Class elemCls = catalog.resolveKeyClass(elemClsName); + catalog.createFormat(elemCls, newFormats); + } + } + } + /* Recursively collect superclass formats. */ + Class superCls = cls.getSuperclass(); + if (superCls != Object.class) { + Format superFormat = catalog.createFormat(superCls, newFormats); + if (!(superFormat instanceof ComplexFormat)) { + throw new IllegalArgumentException + ("The superclass of a complex type must not be a" + + " composite key class or a simple type class: " + + superCls.getName()); + } + } + /* Collect proxied format. */ + String proxiedClsName = clsMeta.getProxiedClassName(); + if (proxiedClsName != null) { + catalog.createFormat(proxiedClsName, newFormats); + } + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + + Class type = getType(); + boolean useEnhanced = false; + if (type != null) { + useEnhanced = EnhancedAccessor.isEnhanced(type); + } + /* Initialize all fields. */ + if (priKeyField != null) { + priKeyField.initialize(catalog, model, initVersion); + } + for (FieldInfo field : secKeyFields) { + field.initialize(catalog, model, initVersion); + } + for (FieldInfo field : nonKeyFields) { + field.initialize(catalog, model, initVersion); + } + /* Set the superclass format for a new (never initialized) format. */ + ComplexFormat superFormat = getComplexSuper(); + if (type != null && superFormat == null) { + Class superCls = type.getSuperclass(); + if (superCls != Object.class) { + superFormat = + (ComplexFormat) catalog.getFormat(superCls.getName()); + setSuperFormat(superFormat); + } + } + /* Initialize the superclass format and validate the super accessor. */ + if (superFormat != null) { + superFormat.initializeIfNeeded(catalog, model); + Accessor superAccessor = superFormat.objAccessor; + if (type != null && superAccessor != null) { + if (useEnhanced) { + if (!(superAccessor instanceof EnhancedAccessor)) { + throw new IllegalStateException + ("The superclass of an enhanced class must also " + + "be enhanced: " + getClassName() + + " extends " + superFormat.getClassName()); + } + } else { + if (!(superAccessor instanceof ReflectionAccessor)) { + throw new IllegalStateException + ("The superclass of an unenhanced class must " + + "not be enhanced: " + getClassName() + + " extends " + superFormat.getClassName()); + } + } + } + } + /* Find entity format, if any. */ + for (ComplexFormat format = this; + format != null; + format = format.getComplexSuper()) { + if (format.isEntity()) { + entityFormat = format; + break; + } + } + + /* + * Ensure that the current entity metadata is always referenced in + * order to return it to the user and to properly construct secondary + * key addresses. Secondary key metadata can change in an entity + * subclass or be created when a new subclass is used, but this will + * not cause evolution of the entity class; instead, the metadata is + * updated here. [#16467] + */ + if (isEntity() && isCurrentVersion()) { + entityMeta = model.getEntityMetadata(getClassName()); + } + + /* Disallow proxy class that extends an entity class. [#15950] */ + if (clsMeta.getProxiedClassName() != null && entityFormat != null) { + throw new IllegalArgumentException + ("A proxy may not be an entity: " + getClassName()); + } + /* Disallow primary keys on entity subclasses. [#15757] */ + if (entityFormat != null && + entityFormat != this && + priKeyField != null) { + throw new IllegalArgumentException + ("A PrimaryKey may not appear on an Entity subclass: " + + getClassName() + " field: " + priKeyField.getName()); + } + /* Create the accessors. */ + if (type != null) { + if (useEnhanced) { + objAccessor = new EnhancedAccessor(catalog, type, this); + } else { + Accessor superObjAccessor = + (superFormat != null) ? superFormat.objAccessor : null; + objAccessor = new ReflectionAccessor + (catalog, type, superObjAccessor, priKeyField, + secKeyFields, nonKeyFields); + } + } + Accessor superRawAccessor = + (superFormat != null) ? superFormat.rawAccessor : null; + rawAccessor = new RawAccessor + (this, superRawAccessor, priKeyField, secKeyFields, nonKeyFields); + + /* Initialize secondary key field addresses. */ + EntityMetadata latestEntityMeta = null; + if (entityFormat != null) { + latestEntityMeta = + entityFormat.getLatestVersion().getEntityMetadata(); + } + if (latestEntityMeta != null) { + secKeyAddresses = new HashMap(); + ComplexFormat thisLatest = getComplexLatest(); + if (thisLatest != this) { + thisLatest.initializeIfNeeded(catalog, model); + } + nextKeyLoop: + for (SecondaryKeyMetadata secKeyMeta : + latestEntityMeta.getSecondaryKeys().values()) { + String clsName = secKeyMeta.getDeclaringClassName(); + String fieldName = secKeyMeta.getName(); + int superLevel = 0; + for (ComplexFormat format = this; + format != null; + format = format.getComplexSuper()) { + if (clsName.equals + (format.getLatestVersion().getClassName())) { + String useFieldName = null; + if (format.newToOldFieldMap != null && + format.newToOldFieldMap.containsKey(fieldName)) { + useFieldName = + format.newToOldFieldMap.get(fieldName); + } else { + useFieldName = fieldName; + } + boolean isSecField; + int fieldNum; + FieldInfo info = FieldInfo.getField + (format.secKeyFields, useFieldName); + if (info != null) { + isSecField = true; + fieldNum = format.secKeyFields.indexOf(info); + } else { + isSecField = false; + info = FieldInfo.getField + (format.nonKeyFields, useFieldName); + if (info == null) { + /* Field not present in old format. */ + assert thisLatest != this; + thisLatest.checkNewSecKeyInitializer + (secKeyMeta); + continue nextKeyLoop; + } + fieldNum = format.nonKeyFields.indexOf(info); + } + FieldAddress addr = new FieldAddress + (isSecField, fieldNum, superLevel, format, + info.getType()); + secKeyAddresses.put(secKeyMeta.getKeyName(), addr); + } + superLevel += 1; + } + } + } + } + + /** + * Checks that the type of a new secondary key is not a primitive and that + * the default contructor does not initialize it to a non-null value. + */ + private void checkNewSecKeyInitializer(SecondaryKeyMetadata secKeyMeta) { + if (objAccessor != null) { + + /* + * If this format represents an abstract class, we will not do the + * following check. When initializing this abstract class's + * subclass, which is not abstract, the new added secondary key + * will be checked then. [#19358] + */ + if (Modifier.isAbstract(this.getType().getModifiers())) { + return; + } + FieldAddress addr = secKeyAddresses.get(secKeyMeta.getKeyName()); + Object obj = objAccessor.newInstance(); + Object val = objAccessor.getField + (obj, addr.fieldNum, addr.superLevel, addr.isSecField); + if (val != null) { + if (addr.keyFormat.isPrimitive()) { + throw new IllegalArgumentException + ("For a new secondary key field the field type must " + + "not be a primitive -- class: " + + secKeyMeta.getDeclaringClassName() + " field: " + + secKeyMeta.getName()); + } else { + throw new IllegalArgumentException + ("For a new secondary key field the default " + + "constructor must not initialize the field to a " + + "non-null value -- class: " + + secKeyMeta.getDeclaringClassName() + " field: " + + secKeyMeta.getName()); + } + } + } + } + + private boolean nullOrEqual(Object o1, Object o2) { + if (o1 == null) { + return o2 == null; + } else { + return o1.equals(o2); + } + } + + @Override + Object newArray(int len) { + return objAccessor.newArray(len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) { + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + return accessor.newInstance(); + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.readSecKeyFields(o, input, 0, Accessor.MAX_FIELD_NUM, -1); + accessor.readNonKeyFields(o, input, 0, Accessor.MAX_FIELD_NUM, -1); + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.writeSecKeyFields(o, output); + accessor.writeNonKeyFields(o, output); + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + /* + * Synchronization is not required since rawInputFields, rawInputLevels + * and rawInputDepth are immutable. If by chance we create duplicate + * values when two threads execute this block, no harm is done. But be + * sure to assign the fields only after the values are fully populated. + */ + FieldInfo[] fields = rawInputFields; + int[] levels = rawInputLevels; + int depth = rawInputDepth; + if (fields == null || levels == null || depth == 0) { + + /* + * The volatile fields are not yet set. Prepare to process the + * class hierarchy, storing class formats in order from the highest + * superclass down to the current class. + */ + depth = 0; + int nFields = 0; + for (ComplexFormat format = this; + format != null; + format = format.getComplexSuper()) { + nFields += format.getNFields(); + depth += 1; + } + ComplexFormat[] hierarchy = new ComplexFormat[depth]; + int level = depth; + for (ComplexFormat format = this; + format != null; + format = format.getComplexSuper()) { + level -= 1; + hierarchy[level] = format; + } + assert level == 0; + + /* Populate levels and fields in parallel. */ + levels = new int[nFields]; + fields = new FieldInfo[nFields]; + int index = 0; + + /* + * The primary key is the first field read/written. We use the + * first primary key field encountered going from this class upward + * in the class hierarchy. + */ + if (getEntityFormat() != null) { + for (level = depth - 1; level >= 0; level -= 1) { + ComplexFormat format = hierarchy[level]; + if (format.priKeyField != null) { + levels[index] = level; + fields[index] = format.priKeyField; + index += 1; + break; + } + } + assert index == 1; + } + + /* + * Secondary key fields are read/written next, from the highest + * base class downward. + */ + for (level = 0; level < depth; level += 1) { + ComplexFormat format = hierarchy[level]; + for (FieldInfo field : format.secKeyFields) { + levels[index] = level; + fields[index] = field; + index += 1; + } + } + + /* + * Other fields are read/written last, from the highest base class + * downward. + */ + for (level = 0; level < depth; level += 1) { + ComplexFormat format = hierarchy[level]; + for (FieldInfo field : format.nonKeyFields) { + levels[index] = level; + fields[index] = field; + index += 1; + } + } + + /* We're finished -- update the volatile fields for next time. */ + assert index == fields.length; + rawInputFields = fields; + rawInputLevels = levels; + rawInputDepth = depth; + } + + /* + * Create an objects array that is parallel to the fields and levels + * arrays, but contains the RawObject for each slot from which the + * field value can be retrieved. The predetermined level for each + * field determines which RawObject in the instance hierarchy to use. + */ + RawObject[] objectsByLevel = new RawObject[depth]; + int level = depth; + for (RawObject raw = rawObject; raw != null; raw = raw.getSuper()) { + if (level == 0) { + throw new IllegalArgumentException + ("RawObject has too many superclasses: " + + rawObject.getType().getClassName()); + } + level -= 1; + objectsByLevel[level] = raw; + } + if (level > 0) { + throw new IllegalArgumentException + ("RawObject has too few superclasses: " + + rawObject.getType().getClassName()); + } + assert level == 0; + RawObject[] objects = new RawObject[fields.length]; + for (int i = 0; i < objects.length; i += 1) { + objects[i] = objectsByLevel[levels[i]]; + } + + /* Create the persistent object and convert all RawObject fields. */ + EntityInput in = new RawComplexInput + (catalog, rawAccess, converted, fields, objects); + Object o = newInstance(in, rawAccess); + converted.put(rawObject, o); + if (getEntityFormat() != null) { + readPriKey(o, in, rawAccess); + } + return readObject(o, in, rawAccess); + } + + @Override + boolean isPriKeyNullOrZero(Object o, boolean rawAccess) { + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + return accessor.isPriKeyFieldNullOrZero(o); + } + + @Override + void writePriKey(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.writePriKeyField(o, output); + } + + @Override + public void readPriKey(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.readPriKeyField(o, input); + } + + @Override + public String getOldKeyName(final String keyName) { + if (newToOldFieldMap != null && + newToOldFieldMap.containsKey(keyName)) { + return newToOldFieldMap.get(keyName); + } else { + return keyName; + } + } + + @Override + boolean nullifySecKey(Catalog catalog, + Object entity, + String keyName, + Object keyElement) { + if (secKeyAddresses == null) { + throw DbCompat.unexpectedState(); + } + FieldAddress addr = secKeyAddresses.get(keyName); + if (addr != null) { + Object oldVal = rawAccessor.getField + (entity, addr.fieldNum, addr.superLevel, addr.isSecField); + if (oldVal != null) { + if (keyElement != null) { + RawObject container = (RawObject) oldVal; + Object[] a1 = container.getElements(); + boolean isArray = (a1 != null); + if (!isArray) { + a1 = CollectionProxy.getElements(container); + } + if (a1 != null) { + for (int i = 0; i < a1.length; i += 1) { + if (keyElement.equals(a1[i])) { + int len = a1.length - 1; + Object[] a2 = new Object[len]; + System.arraycopy(a1, 0, a2, 0, i); + System.arraycopy(a1, i + 1, a2, i, len - i); + if (isArray) { + rawAccessor.setField + (entity, addr.fieldNum, + addr.superLevel, addr.isSecField, + new RawObject + (container.getType(), a2)); + } else { + CollectionProxy.setElements(container, a2); + } + return true; + } + } + } + return false; + } else { + rawAccessor.setField + (entity, addr.fieldNum, addr.superLevel, + addr.isSecField, null); + return true; + } + } else { + return false; + } + } else { + return false; + } + } + + @Override + void skipContents(RecordInput input) + throws RefreshException { + + skipToSecKeyField(input, Accessor.MAX_FIELD_NUM); + skipToNonKeyField(input, Accessor.MAX_FIELD_NUM); + } + + @Override + void copySecMultiKey(RecordInput input, Format keyFormat, Set results) + throws RefreshException { + + CollectionProxy.copyElements(input, this, keyFormat, results); + } + + @Override + Format skipToSecKey(RecordInput input, String keyName) + throws RefreshException { + + if (secKeyAddresses == null) { + throw DbCompat.unexpectedState(); + } + FieldAddress addr = secKeyAddresses.get(keyName); + if (addr != null) { + if (addr.isSecField) { + addr.clsFormat.skipToSecKeyField(input, addr.fieldNum); + } else { + skipToSecKeyField(input, Accessor.MAX_FIELD_NUM); + addr.clsFormat.skipToNonKeyField(input, addr.fieldNum); + } + return addr.keyFormat; + } else { + return null; + } + } + + private int getNFields() { + return ((priKeyField != null) ? 1 : 0) + + secKeyFields.size() + + nonKeyFields.size(); + } + + private void skipToSecKeyField(RecordInput input, int toFieldNum) + throws RefreshException { + + ComplexFormat superFormat = getComplexSuper(); + if (superFormat != null) { + superFormat.skipToSecKeyField(input, Accessor.MAX_FIELD_NUM); + } + int maxNum = Math.min(secKeyFields.size(), toFieldNum); + for (int i = 0; i < maxNum; i += 1) { + input.skipField(secKeyFields.get(i).getType()); + } + } + + private void skipToNonKeyField(RecordInput input, int toFieldNum) + throws RefreshException { + + ComplexFormat superFormat = getComplexSuper(); + if (superFormat != null) { + superFormat.skipToNonKeyField(input, Accessor.MAX_FIELD_NUM); + } + int maxNum = Math.min(nonKeyFields.size(), toFieldNum); + for (int i = 0; i < maxNum; i += 1) { + input.skipField(nonKeyFields.get(i).getType()); + } + } + + private static class FieldAddress { + + boolean isSecField; + int fieldNum; + int superLevel; + ComplexFormat clsFormat; + Format keyFormat; + + FieldAddress(boolean isSecField, + int fieldNum, + int superLevel, + ComplexFormat clsFormat, + Format keyFormat) { + this.isSecField = isSecField; + this.fieldNum = fieldNum; + this.superLevel = superLevel; + this.clsFormat = clsFormat; + this.keyFormat = keyFormat; + } + } + + @Override + boolean evolve(Format newFormatParam, Evolver evolver) { + + /* Disallow evolution to a non-complex format. */ + if (!(newFormatParam instanceof ComplexFormat)) { + evolver.addMissingMutation + (this, newFormatParam, + "Converter is required when a complex type is changed " + + "to a simple type or enum type"); + return false; + } + ComplexFormat newFormat = (ComplexFormat) newFormatParam; + Mutations mutations = evolver.getMutations(); + boolean thisChanged = false; + boolean hierarchyChanged = false; + Map allKeyNameMap = new HashMap(); + + /* The Evolver has already ensured that entities evolve to entities. */ + assert isEntity() == newFormat.isEntity(); + assert isEntity() == (entityMeta != null); + assert newFormat.isEntity() == (newFormat.entityMeta != null); + + /* + * Keep track of the old and new entity class names for use in deleting + * and renaming secondary keys below. If the oldEntityClass is + * non-null this also signifies an entity class or subclass. Note that + * getEntityFormat cannot be called on a newly created format during + * evolution because its super format property is not yet initialized. + * [#16253] + */ + String oldEntityClass; + String newEntityClass; + if (isEntity()) { + oldEntityClass = getClassName(); + newEntityClass = newFormat.getClassName(); + } else { + oldEntityClass = null; + newEntityClass = null; + } + + /* + * Evolve all superclass formats, even when a deleted class appears in + * the hierarchy. This ensures that the super format's + * getLatestVersion/getComplexLatest method can be used accurately + * below. + */ + for (ComplexFormat oldSuper = getComplexSuper(); + oldSuper != null; + oldSuper = oldSuper.getComplexSuper()) { + Converter converter = mutations.getConverter + (oldSuper.getClassName(), oldSuper.getVersion(), null); + if (converter != null) { + evolver.addMissingMutation + (this, newFormatParam, + "Converter is required for this subclass when a " + + "Converter appears on its superclass: " + converter); + return false; + } + if (!evolver.evolveFormat(oldSuper)) { + return false; + } + if (!oldSuper.isCurrentVersion()) { + if (oldSuper.isDeleted()) { + if (!oldSuper.evolveDeletedClass(evolver)) { + return false; + } + } + if (oldSuper.oldToNewKeyMap != null) { + allKeyNameMap.putAll(oldSuper.oldToNewKeyMap); + } + hierarchyChanged = true; + } + } + + /* + * Compare the old and new class hierarhies and decide whether each + * change is allowed or not: + * + Old deleted and removed superclass -- allowed + * + Old empty and removed superclass -- allowed + * + Old non-empty and removed superclass -- not allowed + * + Old superclass repositioned in the hierarchy -- not allowed + * + New inserted superclass -- allowed + */ + Class newFormatCls = newFormat.getExistingType(); + Class newSuper = newFormatCls; + List newLevels = new ArrayList(); + int newLevel = 0; + newLevels.add(newLevel); + + /* + * When this format has a new superclass, we treat it as a change to + * this format as well as to the superclass hierarchy. + */ + if (getSuperFormat() == null) { + if (newFormatCls.getSuperclass() != Object.class) { + thisChanged = true; + hierarchyChanged = true; + } + } else { + if (!getSuperFormat().getLatestVersion().getClassName().equals + (newFormatCls.getSuperclass().getName())) { + thisChanged = true; + hierarchyChanged = true; + } + } + + for (ComplexFormat oldSuper = getComplexSuper(); + oldSuper != null; + oldSuper = oldSuper.getComplexSuper()) { + + /* Find the matching superclass in the new hierarchy. */ + String oldSuperName = oldSuper.getLatestVersion().getClassName(); + Class foundNewSuper = null; + int tryNewLevel = newLevel; + for (Class newSuper2 = newSuper.getSuperclass(); + newSuper2 != Object.class; + newSuper2 = newSuper2.getSuperclass()) { + tryNewLevel += 1; + if (oldSuperName.equals(newSuper2.getName())) { + foundNewSuper = newSuper2; + newLevel = tryNewLevel; + if (oldSuper.isEntity()) { + assert oldEntityClass == null; + assert newEntityClass == null; + oldEntityClass = oldSuper.getClassName(); + newEntityClass = foundNewSuper.getName(); + } + break; + } + } + + if (foundNewSuper != null) { + + /* + * We found the old superclass in the new hierarchy. Traverse + * through the superclass formats that were skipped over above + * when finding it. + */ + for (Class newSuper2 = newSuper.getSuperclass(); + newSuper2 != foundNewSuper; + newSuper2 = newSuper2.getSuperclass()) { + + /* + * The class hierarchy changed -- a new class was inserted. + */ + hierarchyChanged = true; + + /* + * Check that the new formats skipped over above are not at + * a different position in the old hierarchy. + */ + for (ComplexFormat oldSuper2 = oldSuper.getComplexSuper(); + oldSuper2 != null; + oldSuper2 = oldSuper2.getComplexSuper()) { + String oldSuper2Name = + oldSuper2.getLatestVersion().getClassName(); + if (oldSuper2Name.equals(newSuper2.getName())) { + evolver.addMissingMutation + (this, newFormatParam, + "Class Converter is required when a " + + "superclass is moved in the class " + + "hierarchy: " + newSuper2.getName()); + return false; + } + } + } + newSuper = foundNewSuper; + newLevels.add(newLevel); + } else { + + /* + * We did not find the old superclass in the new hierarchy. + * The class hierarchy changed, since an old class no longer + * appears. + */ + hierarchyChanged = true; + + /* Check that the old class can be safely removed. */ + if (!oldSuper.isDeleted()) { + ComplexFormat oldSuperLatest = + oldSuper.getComplexLatest(); + if (oldSuperLatest.getNFields() != 0) { + evolver.addMissingMutation + (this, newFormatParam, + "When a superclass is removed from the class " + + "hierarchy, the superclass or all of its " + + "persistent fields must be deleted with a " + + "Deleter: " + + oldSuperLatest.getClassName()); + return false; + } + } + + if (oldEntityClass != null && isCurrentVersion()) { + Map secKeys = + oldSuper.clsMeta.getSecondaryKeys(); + for (FieldInfo field : oldSuper.secKeyFields) { + SecondaryKeyMetadata meta = + getSecondaryKeyMetadataByFieldName + (secKeys, field.getName()); + assert meta != null; + allKeyNameMap.put(meta.getKeyName(), null); + } + } + + /* + * Add the DO_NOT_READ_ACCESSOR level to prevent an empty class + * (no persistent fields) from being read via the Accessor. + */ + newLevels.add(EvolveReader.DO_NOT_READ_ACCESSOR); + } + } + + /* Make FieldReaders for this format if needed. */ + int result = evolveAllFields(newFormat, evolver); + if (result == Evolver.EVOLVE_FAILURE) { + return false; + } + if (result == Evolver.EVOLVE_NEEDED) { + thisChanged = true; + } + if (oldToNewKeyMap != null) { + allKeyNameMap.putAll(oldToNewKeyMap); + } + + /* Require new version number if this class was changed. */ + if (thisChanged && + !evolver.checkUpdatedVersion + ("Changes to the fields or superclass were detected", this, + newFormat)) { + return false; + } + + /* Rename and delete the secondary databases. */ + if (allKeyNameMap.size() > 0 && + oldEntityClass != null && + newEntityClass != null && + isCurrentVersion()) { + for (Map.Entry entry : allKeyNameMap.entrySet()) { + String oldKeyName = entry.getKey(); + String newKeyName = entry.getValue(); + if (newKeyName != null) { + evolver.renameSecondaryDatabase + (oldEntityClass, newEntityClass, + oldKeyName, newKeyName); + } else { + evolver.deleteSecondaryDatabase + (oldEntityClass, oldKeyName); + } + } + } + + /* + * Use an EvolveReader if needed. + * + * We force evolution to occur if the old format did not use the new + * String format. We do not require the user to bump the version + * number, since the format change is internal. Note that we could + * optimize by only forcing evolution if this format may contain + * Strings. [#19247] + */ + if (hierarchyChanged || thisChanged || !newStringFormat) { + Reader reader = new EvolveReader(newLevels); + evolver.useEvolvedFormat(this, reader, newFormat); + } else { + evolver.useOldFormat(this, newFormat); + } + return true; + } + + @Override + boolean evolveMetadata(Format newFormatParam, + Converter converter, + Evolver evolver) { + assert !isDeleted(); + assert isEntity(); + assert newFormatParam.isEntity(); + ComplexFormat newFormat = (ComplexFormat) newFormatParam; + + if (!checkKeyTypeChange + (newFormat, entityMeta.getPrimaryKey(), + newFormat.entityMeta.getPrimaryKey(), "primary key", + evolver)) { + return false; + } + + Set deletedKeys; + if (converter instanceof EntityConverter) { + EntityConverter entityConverter = (EntityConverter) converter; + deletedKeys = entityConverter.getDeletedKeys(); + } else { + deletedKeys = Collections.emptySet(); + } + + Map oldSecondaryKeys = + entityMeta.getSecondaryKeys(); + Map newSecondaryKeys = + newFormat.entityMeta.getSecondaryKeys(); + Set insertedKeys = + new HashSet(newSecondaryKeys.keySet()); + + for (SecondaryKeyMetadata oldMeta : oldSecondaryKeys.values()) { + String keyName = oldMeta.getKeyName(); + if (deletedKeys.contains(keyName)) { + if (isCurrentVersion()) { + evolver.deleteSecondaryDatabase(getClassName(), keyName); + } + } else { + SecondaryKeyMetadata newMeta = newSecondaryKeys.get(keyName); + if (newMeta == null) { + evolver.addInvalidMutation + (this, newFormat, converter, + "Existing key not found in new entity metadata: " + + keyName); + return false; + } + insertedKeys.remove(keyName); + String keyLabel = "secondary key: " + keyName; + if (!checkKeyTypeChange + (newFormat, oldMeta, newMeta, keyLabel, evolver)) { + return false; + } + if (!checkSecKeyMetadata + (newFormat, oldMeta, newMeta, evolver)) { + return false; + } + } + } + + if (!insertedKeys.isEmpty()) { + evolver.addEvolveError + (this, newFormat, "Error", + "New keys " + insertedKeys + + " not allowed when using a Converter with an entity class"); + } + + return true; + } + + /** + * Checks that changes to secondary key metadata are legal. + */ + private boolean checkSecKeyMetadata(Format newFormat, + SecondaryKeyMetadata oldMeta, + SecondaryKeyMetadata newMeta, + Evolver evolver) { + if (oldMeta.getRelationship() != newMeta.getRelationship()) { + evolver.addEvolveError + (this, newFormat, + "Change detected in the relate attribute (Relationship) " + + "of a secondary key", + "Old key: " + oldMeta.getKeyName() + + " relate: " + oldMeta.getRelationship() + + " new key: " + newMeta.getKeyName() + + " relate: " + newMeta.getRelationship()); + return false; + } + return true; + } + + /** + * Checks that the type of a key field did not change, as known from + * metadata when a class conversion is used. + */ + private boolean checkKeyTypeChange(Format newFormat, + FieldMetadata oldMeta, + FieldMetadata newMeta, + String keyLabel, + Evolver evolver) { + String oldClass = oldMeta.getClassName(); + String newClass = newMeta.getClassName(); + if (!oldClass.equals(newClass)) { + Format oldType = getCatalog().getFormat(oldClass); + Format newType = getCatalog().getFormat(newClass); + if (oldType == null || newType == null || + ((oldType.getWrapperFormat() == null || + oldType.getWrapperFormat().getId() != + newType.getId()) && + (newType.getWrapperFormat() == null || + newType.getWrapperFormat().getId() != + oldType.getId()))) { + evolver.addEvolveError + (this, newFormat, + "Type change detected for " + keyLabel, + "Old field type: " + oldClass + + " is not compatible with the new type: " + + newClass + + " old field: " + oldMeta.getName() + + " new field: " + newMeta.getName()); + return false; + } + } + return true; + } + + /** + * Special case for creating FieldReaders for a deleted class when it + * appears in the class hierarchy of its non-deleted subclass. + */ + private boolean evolveDeletedClass(Evolver evolver) { + assert isDeleted(); + if (secKeyFieldReader == null || nonKeyFieldReader == null) { + if (priKeyField != null && + getEntityFormat() != null && + !getEntityFormat().isDeleted()) { + evolver.addEvolveError + (this, this, + "Class containing primary key field was deleted ", + "Primary key is needed in an entity class hierarchy: " + + priKeyField.getName()); + return false; + } else { + secKeyFieldReader = new SkipFieldReader(0, secKeyFields); + nonKeyFieldReader = new SkipFieldReader(0, nonKeyFields); + return true; + } + } else { + return true; + } + } + + /** + * Creates a FieldReader for secondary key fields and non-key fields if + * necessary. Checks the primary key field if necessary. Does not evolve + * superclass format fields. + */ + private int evolveAllFields(ComplexFormat newFormat, Evolver evolver) { + + assert !isDeleted(); + secKeyFieldReader = null; + nonKeyFieldReader = null; + oldToNewKeyMap = null; + + /* Evolve primary key field. */ + boolean evolveFailure = false; + boolean localEvolveNeeded = false; + if (priKeyField != null) { + int result = evolver.evolveRequiredKeyField + (this, newFormat, priKeyField, newFormat.priKeyField); + if (result == Evolver.EVOLVE_FAILURE) { + evolveFailure = true; + } else if (result == Evolver.EVOLVE_NEEDED) { + localEvolveNeeded = true; + } + } + + /* Copy the incorrectlyOrderedSecKeys from old format to new format. */ + copyIncorrectlyOrderedSecKeys(newFormat); + + /* Evolve secondary key fields. */ + FieldReader reader = evolveFieldList + (secKeyFields, newFormat.secKeyFields, true, + newFormat.nonKeyFields, newFormat, evolver); + if (reader == FieldReader.EVOLVE_FAILURE) { + evolveFailure = true; + } else if (reader != null) { + localEvolveNeeded = true; + } + if (reader != FieldReader.EVOLVE_NEEDED) { + secKeyFieldReader = reader; + } + + /* Evolve non-key fields. */ + reader = evolveFieldList + (nonKeyFields, newFormat.nonKeyFields, false, + newFormat.secKeyFields, newFormat, evolver); + if (reader == FieldReader.EVOLVE_FAILURE) { + evolveFailure = true; + } else if (reader != null) { + localEvolveNeeded = true; + } + if (reader != FieldReader.EVOLVE_NEEDED) { + nonKeyFieldReader = reader; + } + + /* Return result. */ + if (evolveFailure) { + return Evolver.EVOLVE_FAILURE; + } else if (localEvolveNeeded) { + return Evolver.EVOLVE_NEEDED; + } else { + return Evolver.EVOLVE_NONE; + } + } + + /** + * Returns a FieldReader that reads no fields. + * + * Instead of adding a DoNothingFieldReader class, we use a + * MultiFieldReader with an empty field list. We do not add a new + * FieldReader class to avoid changing the catalog format. [#15524] + */ + private FieldReader getDoNothingFieldReader() { + List emptyList = Collections.emptyList(); + return new MultiFieldReader(emptyList); + } + + /** + * Evolves a list of fields, either secondary key or non-key fields, for a + * single class format. + * + * @return a FieldReader if field evolution is needed, null if no evolution + * is needed, or FieldReader.EVOLVE_FAILURE if an evolution error occurs. + */ + private FieldReader evolveFieldList(List oldFields, + List newFields, + boolean isOldSecKeyField, + List otherNewFields, + ComplexFormat newFormat, + Evolver evolver) { + Mutations mutations = evolver.getMutations(); + boolean evolveFailure = false; + boolean localEvolveNeeded = false; + boolean readerNeeded = false; + List fieldReaders = new ArrayList(); + FieldReader currentReader = null; + int newFieldsMatched = 0; + + /* + * Add FieldReaders to the list in old field storage order, since that + * is the order in which field values must be read. + */ + fieldLoop: + for (int oldFieldIndex = 0; + oldFieldIndex < oldFields.size(); + oldFieldIndex += 1) { + + FieldInfo oldField = oldFields.get(oldFieldIndex); + String oldName = oldField.getName(); + SecondaryKeyMetadata oldMeta = null; + if (isOldSecKeyField) { + oldMeta = getSecondaryKeyMetadataByFieldName + (clsMeta.getSecondaryKeys(), oldName); + assert oldMeta != null; + } + + /* Get field mutations. */ + Renamer renamer = mutations.getRenamer + (getClassName(), getVersion(), oldName); + Deleter deleter = mutations.getDeleter + (getClassName(), getVersion(), oldName); + Converter converter = mutations.getConverter + (getClassName(), getVersion(), oldName); + if (deleter != null && (converter != null || renamer != null)) { + evolver.addInvalidMutation + (this, newFormat, deleter, + "Field Deleter is not allowed along with a Renamer or " + + "Converter for the same field: " + oldName); + evolveFailure = true; + continue fieldLoop; + } + + /* + * Match old and new field by name, taking into account the Renamer + * mutation. If the @SecondaryKey annotation was added or removed, + * the field will have moved from one of the two field lists to the + * other. + */ + String newName = (renamer != null) ? + renamer.getNewName() : oldName; + boolean nameChanged = false; + if (!oldName.equals(newName)) { + if (newToOldFieldMap == null) { + newToOldFieldMap = new HashMap(); + } + newToOldFieldMap.put(newName, oldName); + nameChanged = true; + } + int newFieldIndex = FieldInfo.getFieldIndex(newFields, newName); + FieldInfo newField = null; + boolean isNewSecKeyField = isOldSecKeyField; + if (newFieldIndex >= 0) { + newField = newFields.get(newFieldIndex); + + /* + * Change the key name in incorrectlyOrderedSecKeys of the new + * format. + */ + if (nameChanged && + newFormat.incorrectlyOrderedSecKeys != null && + newFormat.incorrectlyOrderedSecKeys.remove(oldName)) { + newFormat.incorrectlyOrderedSecKeys.add(newName); + } + + /* + * [#18961] If the order of the field has been changed, we will + * create a PlainFieldReader for it. + */ + if (newFieldIndex != oldFieldIndex) { + localEvolveNeeded = true; + readerNeeded = true; + } + } else { + newFieldIndex = FieldInfo.getFieldIndex + (otherNewFields, newName); + if (newFieldIndex >= 0) { + newField = otherNewFields.get(newFieldIndex); + isNewSecKeyField = !isOldSecKeyField; + } + localEvolveNeeded = true; + readerNeeded = true; + + /* + * Remove the key in incorrectlyOrderedSecKeys of the new + * format. + */ + if (newFormat.incorrectlyOrderedSecKeys != null) { + newFormat.incorrectlyOrderedSecKeys.remove(oldName); + } + } + + /* Apply field Deleter and continue. */ + if (deleter != null) { + if (newField != null) { + evolver.addInvalidMutation + (this, newFormat, deleter, + "Field Deleter is not allowed when the persistent " + + "field is still present: " + oldName); + evolveFailure = true; + } + /* A SkipFieldReader can read multiple sequential fields. */ + if (currentReader instanceof SkipFieldReader && + currentReader.acceptField + (oldFieldIndex, newFieldIndex, isNewSecKeyField)) { + currentReader.addField(oldField); + } else { + currentReader = new SkipFieldReader + (oldFieldIndex, oldField); + fieldReaders.add(currentReader); + readerNeeded = true; + localEvolveNeeded = true; + } + if (isOldSecKeyField) { + if (oldToNewKeyMap == null) { + oldToNewKeyMap = new HashMap(); + } + oldToNewKeyMap.put(oldMeta.getKeyName(), null); + } + continue fieldLoop; + } else { + if (newField == null) { + evolver.addMissingMutation + (this, newFormat, + "Field is not present or not persistent: " + + oldName); + evolveFailure = true; + continue fieldLoop; + } + } + + /* + * The old field corresponds to a known new field, and no Deleter + * mutation applies. + */ + newFieldsMatched += 1; + + /* Get and process secondary key metadata changes. */ + SecondaryKeyMetadata newMeta = null; + if (isOldSecKeyField && isNewSecKeyField) { + newMeta = getSecondaryKeyMetadataByFieldName + (newFormat.clsMeta.getSecondaryKeys(), newName); + assert newMeta != null; + + /* Validate metadata changes. */ + if (!checkSecKeyMetadata + (newFormat, oldMeta, newMeta, evolver)) { + evolveFailure = true; + continue fieldLoop; + } + + /* + * Check for a renamed key and save the old-to-new mapping for + * use in renaming the secondary database and for key + * extraction. + */ + String oldKeyName = oldMeta.getKeyName(); + String newKeyName = newMeta.getKeyName(); + if (!oldKeyName.equals(newKeyName)) { + if (oldToNewKeyMap == null) { + oldToNewKeyMap = new HashMap(); + } + oldToNewKeyMap.put(oldName, newName); + localEvolveNeeded = true; + } + } else if (isOldSecKeyField && !isNewSecKeyField) { + if (oldToNewKeyMap == null) { + oldToNewKeyMap = new HashMap(); + } + oldToNewKeyMap.put(oldMeta.getKeyName(), null); + } + + /* Apply field Converter and continue. */ + if (converter != null) { + if (isOldSecKeyField) { + evolver.addInvalidMutation + (this, newFormat, converter, + "Field Converter is not allowed for secondary key " + + "fields: " + oldName); + evolveFailure = true; + } else { + currentReader = new ConvertFieldReader + (converter, oldFieldIndex, newFieldIndex, + isNewSecKeyField); + fieldReaders.add(currentReader); + readerNeeded = true; + localEvolveNeeded = true; + } + continue fieldLoop; + } + + /* + * Evolve the declared version of the field format and all versions + * more recent, and the formats for all of their subclasses. While + * we're at it, check to see if all possible classes are converted. + */ + boolean allClassesConverted = true; + Format oldFieldFormat = oldField.getType(); + for (Format formatVersion = oldFieldFormat.getLatestVersion(); + true; + formatVersion = formatVersion.getPreviousVersion()) { + assert formatVersion != null; + if (!evolver.evolveFormat(formatVersion)) { + evolveFailure = true; + continue fieldLoop; + } + if (!formatVersion.isNew() && + !evolver.isClassConverted(formatVersion)) { + allClassesConverted = false; + } + Set subclassFormats = + evolver.getSubclassFormats(formatVersion); + if (subclassFormats != null) { + for (Format format2 : subclassFormats) { + if (!evolver.evolveFormat(format2)) { + evolveFailure = true; + continue fieldLoop; + } + if (!format2.isNew() && + !evolver.isClassConverted(format2)) { + allClassesConverted = false; + } + } + } + if (formatVersion == oldFieldFormat) { + break; + } + } + + /* + * Check for compatible field types and apply a field widener if + * needed. If no widener is needed, fall through and apply a + * PlainFieldReader. + */ + Format oldLatestFormat = oldFieldFormat.getLatestVersion(); + Format newFieldFormat = newField.getType(); + if (oldLatestFormat.getClassName().equals + (newFieldFormat.getClassName()) && + !oldLatestFormat.isDeleted()) { + /* Formats are identical. Fall through. */ + } else if (allClassesConverted) { + /* All old classes will be converted. Fall through. */ + localEvolveNeeded = true; + } else if (WidenerInput.isWideningSupported + (oldLatestFormat, newFieldFormat, isOldSecKeyField)) { + /* Apply field widener and continue. */ + currentReader = new WidenFieldReader + (oldLatestFormat, newFieldFormat, newFieldIndex, + isNewSecKeyField); + fieldReaders.add(currentReader); + readerNeeded = true; + localEvolveNeeded = true; + continue fieldLoop; + } else { + boolean refWidened = false; + if (!newFieldFormat.isPrimitive() && + !oldLatestFormat.isPrimitive() && + !oldLatestFormat.isDeleted() && + !evolver.isClassConverted(oldLatestFormat)) { + Class oldCls = oldLatestFormat.getExistingType(); + Class newCls = newFieldFormat.getExistingType(); + if (newCls.isAssignableFrom(oldCls)) { + refWidened = true; + } + } + if (refWidened) { + /* A reference type has been widened. Fall through. */ + localEvolveNeeded = true; + } else { + /* Types are not compatible. */ + evolver.addMissingMutation + (this, newFormat, + "Old field type: " + oldLatestFormat.getClassName() + + " is not compatible with the new type: " + + newFieldFormat.getClassName() + + " for field: " + oldName); + evolveFailure = true; + continue fieldLoop; + } + } + + /* + * Old to new field conversion is not needed or is automatic. Read + * fields as if no evolution is needed. A PlainFieldReader can + * read multiple sequential fields. + */ + if (currentReader instanceof PlainFieldReader && + currentReader.acceptField + (oldFieldIndex, newFieldIndex, isNewSecKeyField)) { + currentReader.addField(oldField); + } else { + currentReader = new PlainFieldReader + (oldFieldIndex, newFieldIndex, isNewSecKeyField); + fieldReaders.add(currentReader); + } + } + + /* + * If there are new fields, then the old fields must be read using a + * reader, even if the old field list is empty. Using the accessor + * directly will read fields in the wrong order and will read fields + * that were moved between lists (when adding and dropping + * @SecondaryKey). [#15524] + */ + if (newFieldsMatched < newFields.size()) { + localEvolveNeeded = true; + readerNeeded = true; + } + + if (evolveFailure) { + return FieldReader.EVOLVE_FAILURE; + } else if (readerNeeded) { + if (fieldReaders.size() == 0) { + return getDoNothingFieldReader(); + } else if (fieldReaders.size() == 1) { + return fieldReaders.get(0); + } else { + return new MultiFieldReader(fieldReaders); + } + } else if (localEvolveNeeded) { + return FieldReader.EVOLVE_NEEDED; + } else { + return null; + } + } + + /** + * Base class for all FieldReader subclasses. A FieldReader reads one or + * more fields in the old format data, and may call the new format Accessor + * to set the field values. + */ + private static abstract class FieldReader implements Serializable { + + static final FieldReader EVOLVE_NEEDED = + new PlainFieldReader(0, 0, false); + static final FieldReader EVOLVE_FAILURE = + new PlainFieldReader(0, 0, false); + + private static final long serialVersionUID = 866041475399255164L; + + FieldReader() { + } + + void initialize(Catalog catalog, + int initVersion, + ComplexFormat oldParentFormat, + ComplexFormat newParentFormat, + boolean isOldSecKey) { + } + + boolean acceptField(int oldFieldIndex, + int newFieldIndex, + boolean isNewSecKeyField) { + return false; + } + + void addField(FieldInfo oldField) { + throw DbCompat.unexpectedState(); + } + + abstract void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException; + } + + /** + * Reads a continguous block of fields that have the same format in the old + * and new formats. + */ + private static class PlainFieldReader extends FieldReader { + + private static final long serialVersionUID = 1795593463439931402L; + + private int startField; + private int endField; + private boolean secKeyField; + private transient int endOldField; + + PlainFieldReader(int oldFieldIndex, + int newFieldIndex, + boolean isNewSecKeyField) { + endOldField = oldFieldIndex; + startField = newFieldIndex; + endField = newFieldIndex; + secKeyField = isNewSecKeyField; + } + + @Override + boolean acceptField(int oldFieldIndex, + int newFieldIndex, + boolean isNewSecKeyField) { + return oldFieldIndex == endOldField + 1 && + newFieldIndex == endField + 1 && + secKeyField == isNewSecKeyField; + } + + @Override + void addField(FieldInfo oldField) { + endField += 1; + endOldField += 1; + } + + @Override + final void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException { + + if (secKeyField) { + accessor.readSecKeyFields + (o, input, startField, endField, superLevel); + } else { + accessor.readNonKeyFields + (o, input, startField, endField, superLevel); + } + } + } + + /** + * Skips a continguous block of fields that exist in the old format but not + * in the new format. + */ + private static class SkipFieldReader extends FieldReader { + + private static final long serialVersionUID = -3060281692155253098L; + + private List fieldFormats; + private transient int endField; + + SkipFieldReader(int startField, List fields) { + endField = startField + fields.size() - 1; + fieldFormats = new ArrayList(fields.size()); + for (FieldInfo field : fields) { + fieldFormats.add(field.getType()); + } + } + + SkipFieldReader(int startField, FieldInfo oldField) { + endField = startField; + fieldFormats = new ArrayList(); + fieldFormats.add(oldField.getType()); + } + + @Override + boolean acceptField(int oldFieldIndex, + int newFieldIndex, + boolean isNewSecKeyField) { + return oldFieldIndex == endField + 1; + } + + @Override + void addField(FieldInfo oldField) { + endField += 1; + fieldFormats.add(oldField.getType()); + } + + @Override + final void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException { + + for (Format format : fieldFormats) { + input.skipField(format); + } + } + } + + /** + * Converts a single field using a field Converter. + */ + private static class ConvertFieldReader extends FieldReader { + + private static final long serialVersionUID = 8736410481633998710L; + + private Converter converter; + private int oldFieldNum; + private int fieldNum; + private boolean secKeyField; + private transient Format oldFormat; + private transient Format newFormat; + + ConvertFieldReader(Converter converter, + int oldFieldIndex, + int newFieldIndex, + boolean isNewSecKeyField) { + this.converter = converter; + oldFieldNum = oldFieldIndex; + fieldNum = newFieldIndex; + secKeyField = isNewSecKeyField; + } + + @Override + void initialize(Catalog catalog, + int initVersion, + ComplexFormat oldParentFormat, + ComplexFormat newParentFormat, + boolean isOldSecKey) { + + /* + * The oldFieldNum field was added as part of a bug fix. If not + * present in this version of the catalog, we assume it is equal to + * the new field index. The code prior to the bug fix assumes the + * old and new fields have the same index. [#15797] + */ + if (initVersion < 1) { + oldFieldNum = fieldNum; + } + + if (isOldSecKey) { + oldFormat = + oldParentFormat.secKeyFields.get(oldFieldNum).getType(); + } else { + oldFormat = + oldParentFormat.nonKeyFields.get(oldFieldNum).getType(); + } + if (secKeyField) { + newFormat = + newParentFormat.secKeyFields.get(fieldNum).getType(); + } else { + newFormat = + newParentFormat.nonKeyFields.get(fieldNum).getType(); + } + } + + @Override + final void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException { + + /* Create and read the old format instance in raw mode. */ + boolean currentRawMode = input.setRawAccess(true); + Object value; + try { + if (oldFormat.isPrimitive()) { + value = input.readKeyObject(oldFormat); + } else if (oldFormat.getId() == Format.ID_STRING) { + value = input.readStringObject(); + } else { + value = input.readObject(); + } + } finally { + input.setRawAccess(currentRawMode); + } + + /* Convert the raw instance to the current format. */ + Catalog catalog = input.getCatalog(); + value = converter.getConversion().convert(value); + + /* Use a RawSingleInput to convert and type-check the value. */ + EntityInput rawInput = new RawSingleInput + (catalog, currentRawMode, null, value, newFormat); + + if (secKeyField) { + accessor.readSecKeyFields + (o, rawInput, fieldNum, fieldNum, superLevel); + } else { + accessor.readNonKeyFields + (o, rawInput, fieldNum, fieldNum, superLevel); + } + } + } + + /** + * Widens a single field using a field Converter. + */ + private static class WidenFieldReader extends FieldReader { + + private static final long serialVersionUID = -2054520670170407282L; + + private int fromFormatId; + private int toFormatId; + private int fieldNum; + private boolean secKeyField; + + WidenFieldReader(Format oldFormat, + Format newFormat, + int newFieldIndex, + boolean isNewSecKeyField) { + fromFormatId = oldFormat.getId(); + toFormatId = newFormat.getId(); + fieldNum = newFieldIndex; + secKeyField = isNewSecKeyField; + } + + @Override + final void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException { + + /* The Accessor reads the field value from a WidenerInput. */ + EntityInput widenerInput = new WidenerInput + (input, fromFormatId, toFormatId); + + if (secKeyField) { + accessor.readSecKeyFields + (o, widenerInput, fieldNum, fieldNum, superLevel); + } else { + accessor.readNonKeyFields + (o, widenerInput, fieldNum, fieldNum, superLevel); + } + } + } + + /** + * A FieldReader composed of other FieldReaders, and that calls them in + * sequence. Used when more than one FieldReader is needed for a list of + * fields. + */ + private static class MultiFieldReader extends FieldReader { + + private static final long serialVersionUID = -6035976787562441473L; + + private List subReaders; + + MultiFieldReader(List subReaders) { + this.subReaders = subReaders; + } + + @Override + void initialize(Catalog catalog, + int initVersion, + ComplexFormat oldParentFormat, + ComplexFormat newParentFormat, + boolean isOldSecKey) { + for (FieldReader reader : subReaders) { + reader.initialize + (catalog, initVersion, oldParentFormat, newParentFormat, + isOldSecKey); + } + } + + @Override + final void readFields(Object o, + EntityInput input, + Accessor accessor, + int superLevel) + throws RefreshException { + + for (FieldReader reader : subReaders) { + reader.readFields(o, input, accessor, superLevel); + } + } + } + + /** + * The Reader for evolving ComplexFormat instances. Reads the old format + * data one class (one level in the class hierarchy) at a time. If an + * Accessor is used at a given level, the Accessor is used for the + * corresponding level in the new class hierarchy (classes may be + * inserted/deleted during evolution). At each level, a FieldReader is + * called to evolve the secondary key and non-key lists of fields. + */ + private static class EvolveReader implements Reader { + + static final int DO_NOT_READ_ACCESSOR = Integer.MAX_VALUE; + + private static final long serialVersionUID = -1016140948306913283L; + + private transient ComplexFormat newFormat; + + /** + * oldHierarchy contains the formats of the old class hierarchy in most + * to least derived class order. + */ + private transient ComplexFormat[] oldHierarchy; + + /** + * newHierarchyLevels contains the corresponding level in the new + * hierarchy for each format in oldHierarchy. newHierarchyLevels is + * indexed by the oldHierarchy index. + */ + private int[] newHierarchyLevels; + + EvolveReader(List newHierarchyLevelsList) { + int oldDepth = newHierarchyLevelsList.size(); + newHierarchyLevels = new int[oldDepth]; + newHierarchyLevelsList.toArray(); + for (int i = 0; i < oldDepth; i += 1) { + newHierarchyLevels[i] = newHierarchyLevelsList.get(i); + } + } + + public void initializeReader(Catalog catalog, + EntityModel model, + int initVersion, + Format oldFormatParam) { + + ComplexFormat oldFormat = (ComplexFormat) oldFormatParam; + newFormat = oldFormat.getComplexLatest(); + newFormat.initializeIfNeeded(catalog, model); + + /* Create newHierarchy array. */ + int newDepth = 0; + for (Format format = newFormat; + format != null; + format = format.getSuperFormat()) { + newDepth += 1; + } + ComplexFormat[] newHierarchy = new ComplexFormat[newDepth]; + int level = 0; + for (ComplexFormat format = newFormat; + format != null; + format = format.getComplexSuper()) { + newHierarchy[level] = format; + level += 1; + } + assert level == newDepth; + + /* Create oldHierarchy array and initialize FieldReaders. */ + int oldDepth = newHierarchyLevels.length; + oldHierarchy = new ComplexFormat[oldDepth]; + level = 0; + for (ComplexFormat oldFormat2 = oldFormat; + oldFormat2 != null; + oldFormat2 = oldFormat2.getComplexSuper()) { + oldHierarchy[level] = oldFormat2; + int level2 = newHierarchyLevels[level]; + ComplexFormat newFormat2 = (level2 != DO_NOT_READ_ACCESSOR) ? + newHierarchy[level2] : null; + level += 1; + if (oldFormat2.secKeyFieldReader != null) { + oldFormat2.secKeyFieldReader.initialize + (catalog, initVersion, oldFormat2, newFormat2, true); + } + if (oldFormat2.nonKeyFieldReader != null) { + oldFormat2.nonKeyFieldReader.initialize + (catalog, initVersion, oldFormat2, newFormat2, false); + } + } + assert level == oldDepth; + } + + public Object newInstance(EntityInput input, boolean rawAccess) { + return newFormat.newInstance(input, rawAccess); + } + + public void readPriKey(Object o, + EntityInput input, + boolean rawAccess) + throws RefreshException { + + /* No conversion necessary for primary keys. */ + newFormat.readPriKey(o, input, rawAccess); + } + + public Object readObject(Object o, + EntityInput input, + boolean rawAccess) + throws RefreshException { + + /* Use the Accessor for the new format. */ + Accessor accessor = rawAccess ? newFormat.rawAccessor + : newFormat.objAccessor; + + /* Read old format fields from the top-most class downward. */ + int maxMinusOne = oldHierarchy.length - 1; + + /* Read secondary key fields with the adjusted superclass level. */ + for (int i = maxMinusOne; i >= 0; i -= 1) { + FieldReader reader = oldHierarchy[i].secKeyFieldReader; + int newLevel = newHierarchyLevels[i]; + if (reader != null) { + reader.readFields(o, input, accessor, newLevel); + } else if (newLevel != DO_NOT_READ_ACCESSOR) { + accessor.readSecKeyFields + (o, input, 0, Accessor.MAX_FIELD_NUM, newLevel); + } + } + + /* Read non-key fields with the adjusted superclass level. */ + for (int i = maxMinusOne; i >= 0; i -= 1) { + FieldReader reader = oldHierarchy[i].nonKeyFieldReader; + int newLevel = newHierarchyLevels[i]; + if (reader != null) { + reader.readFields(o, input, accessor, newLevel); + } else if (newLevel != DO_NOT_READ_ACCESSOR) { + accessor.readNonKeyFields + (o, input, 0, Accessor.MAX_FIELD_NUM, newLevel); + } + } + return o; + } + + public Accessor getAccessor(boolean rawAccess) { + return newFormat.getAccessor(rawAccess); + } + } + + /** + * The secondary key metadata map (ClassMetadata.getSecondaryKeys) is keyed + * by key name, not field name. Key name can be different than field name + * when a @SecondaryKey name property is specified. To look up metadata + * by field name, we must do a linear search. Luckily, the number of keys + * per class is never very large. [#16819] + */ + static SecondaryKeyMetadata + getSecondaryKeyMetadataByFieldName(Map + secKeys, + String fieldName) { + for (SecondaryKeyMetadata meta : secKeys.values()) { + if (meta.getName().equals(fieldName)) { + return meta; + } + } + return null; + } + + /** + * Called when opening an existing secondary database that should have a + * dup comparator configured. If true is returned, then this secondary + * index may have been previously opened without a dup comparator set, and + * therefore no dup comparator should be set on this database. If false is + * returned, the dup comparator should be set by the caller. + */ + boolean isSecKeyIncorrectlyOrdered(String keyName) { + return incorrectlyOrderedSecKeys == null || + incorrectlyOrderedSecKeys.contains(keyName); + } + + /** + * Called when creating a new secondary database that should have a dup + * comparator configured. If true is returned, then this secondary index + * may have been previously opened without a dup comparator set; this + * method will update this format to indicate that the dup comparator is + * now allowed, and the caller should flush the catalog. If false is + * returned, the caller need not flush the catalog. + */ + boolean setSecKeyCorrectlyOrdered(String keyName) { + if (incorrectlyOrderedSecKeys != null) { + return incorrectlyOrderedSecKeys.remove(keyName); + } + incorrectlyOrderedSecKeys = new HashSet(); + assert entityMeta != null; + for (String name : entityMeta.getSecondaryKeys().keySet()) { + if (!name.equals(keyName)) { + incorrectlyOrderedSecKeys.add(name); + } + } + return true; + } + + /* + * Copy the incorrectlyOrderedSecKeys of old format to new format. Used + * during evolution. + */ + private void copyIncorrectlyOrderedSecKeys(ComplexFormat newFormat) { + /* Only copy from the latest version format. */ + if (this == this.getLatestVersion()) { + newFormat.incorrectlyOrderedSecKeys = + this.incorrectlyOrderedSecKeys == null ? + null : + new HashSet(this.incorrectlyOrderedSecKeys); + } + } + + /** + * For unit testing. + */ + public Set getIncorrectlyOrderedSecKeys() { + return incorrectlyOrderedSecKeys; + } + + @Override + public Accessor getAccessor(boolean rawAccess) { + return rawAccess ? rawAccessor : objAccessor; + } +} diff --git a/src/com/sleepycat/persist/impl/CompositeKeyFormat.java b/src/com/sleepycat/persist/impl/CompositeKeyFormat.java new file mode 100644 index 0000000..5e19cbd --- /dev/null +++ b/src/com/sleepycat/persist/impl/CompositeKeyFormat.java @@ -0,0 +1,364 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.FieldMetadata; +import com.sleepycat.persist.raw.RawField; +import com.sleepycat.persist.raw.RawObject; + +/** + * Format for a composite key class. + * + * This class is similar to ComplexFormat in that a composite key class and + * other complex classes have fields, and the Accessor interface is used to + * access those fields. Composite key classes are different in the following + * ways: + * + * - The superclass must be Object. No inheritance is allowed. + * + * - All instance fields must be annotated with @KeyField, which determines + * their order in the data bytes. + * + * - Although fields may be reference types (primitive wrappers or other simple + * reference types), they are stored as if they were primitives. No object + * format ID is stored, and the class of the object must be the declared + * classs of the field; i.e., no polymorphism is allowed for key fields. + * In other words, a composite key is stored as an ordinary tuple as defined + * in the com.sleepycat.bind.tuple package. This keeps the key small and + * gives it a well defined sort order. + * + * - If the key class implements Comparable, it is called by the Database + * btree comparator. It must therefore be available during JE recovery, + * before the store and catalog have been opened. To support this, this + * format can be constructed during recovery. A SimpleCatalog singleton + * instance is used to provide a catalog of simple types that is used by + * the composite key format. + * + * - When interacting with the Accessor, the composite key format treats the + * Accessor's non-key fields as its key fields. The Accessor's key fields + * are secondary keys, while the composite format's key fields are the + * component parts of a single key. + * + * @author Mark Hayes + */ +public class CompositeKeyFormat extends Format { + + private static final long serialVersionUID = 306843428409314630L; + + private ClassMetadata metadata; + private List fields; + private transient Accessor objAccessor; + private transient Accessor rawAccessor; + private transient volatile Map rawFields; + private transient volatile FieldInfo[] rawInputFields; + + static String[] getFieldNameArray(List list) { + int index = 0; + String[] a = new String[list.size()]; + for (FieldMetadata f : list) { + a[index] = f.getName(); + index += 1; + } + return a; + } + + /** + * Creates a new composite key format. + */ + CompositeKeyFormat(Catalog catalog, + Class cls, + ClassMetadata metadata, + List fieldMeta) { + this(catalog, cls, metadata, getFieldNameArray(fieldMeta)); + } + + /** + * Reconsistitues a composite key format after a PersistComparator is + * deserialized. + */ + CompositeKeyFormat(Catalog catalog, Class cls, String[] fieldNames) { + this(catalog, cls, null /*metadata*/, fieldNames); + } + + private CompositeKeyFormat(Catalog catalog, + Class cls, + ClassMetadata metadata, + String[] fieldNames) { + super(catalog, cls); + this.metadata = metadata; + + /* Check that the superclass is Object. */ + Class superCls = cls.getSuperclass(); + if (superCls != Object.class) { + throw new IllegalArgumentException + ("Composite key class must be derived from Object: " + + cls.getName()); + } + + /* Populate fields list in fieldNames order. */ + List instanceFields = + FieldInfo.getInstanceFields(cls, metadata); + fields = new ArrayList(instanceFields.size()); + for (String fieldName : fieldNames) { + FieldInfo field = null; + for (FieldInfo tryField : instanceFields) { + if (fieldName.equals(tryField.getName())) { + field = tryField; + break; + } + } + if (field == null) { + throw new IllegalArgumentException + ("Composite key field is not an instance field: " + + getClassName() + '.' + fieldName); + } + fields.add(field); + instanceFields.remove(field); + Class fieldCls = field.getFieldClass(getCatalog()); + if (!SimpleCatalog.isSimpleType(fieldCls) && + !fieldCls.isEnum()) { + throw new IllegalArgumentException + ("Composite key field is not a simple type or enum: " + + getClassName() + '.' + fieldName); + } + } + if (instanceFields.size() > 0) { + throw new IllegalArgumentException + ("All composite key instance fields must be key fields: " + + getClassName() + '.' + instanceFields.get(0).getName()); + } + } + + List getFieldInfo() { + return fields; + } + + @Override + void migrateFromBeta(Map formatMap) { + super.migrateFromBeta(formatMap); + for (FieldInfo field : fields) { + field.migrateFromBeta(formatMap); + } + } + + @Override + boolean isModelClass() { + return true; + } + + @Override + public ClassMetadata getClassMetadata() { + if (metadata == null) { + throw DbCompat.unexpectedState(getClassName()); + } + return metadata; + } + + @Override + public Map getFields() { + + /* + * Lazily create the raw type information. Synchronization is not + * required since this object is immutable. If by chance we create two + * maps when two threads execute this block, no harm is done. But be + * sure to assign the rawFields field only after the map is fully + * populated. + */ + if (rawFields == null) { + Map map = new HashMap(); + for (RawField field : fields) { + map.put(field.getName(), field); + } + rawFields = map; + } + return rawFields; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + /* Collect field formats. */ + for (FieldInfo field : fields) { + field.collectRelatedFormats(catalog, newFormats); + } + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + /* Initialize all fields. */ + for (FieldInfo field : fields) { + field.initialize(catalog, model, initVersion); + } + /* Create the accessor. */ + Class type = getType(); + if (type != null) { + if (EnhancedAccessor.isEnhanced(type)) { + objAccessor = new EnhancedAccessor(catalog, type, fields); + } else { + objAccessor = new ReflectionAccessor(catalog, type, fields); + } + } + rawAccessor = new RawAccessor(this, fields); + } + + @Override + Object newArray(int len) { + return objAccessor.newArray(len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) { + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + return accessor.newInstance(); + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.readCompositeKeyFields(o, input); + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + Accessor accessor = rawAccess ? rawAccessor : objAccessor; + accessor.writeCompositeKeyFields(o, output); + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + /* + * Synchronization is not required since rawInputFields is immutable. + * If by chance we create duplicate values when two threads execute + * this block, no harm is done. But be sure to assign the field only + * after the values are fully populated. + */ + FieldInfo[] myFields = rawInputFields; + if (myFields == null) { + myFields = new FieldInfo[fields.size()]; + fields.toArray(myFields); + rawInputFields = myFields; + } + if (rawObject.getSuper() != null) { + throw new IllegalArgumentException + ("RawObject has too many superclasses: " + + rawObject.getType().getClassName()); + } + RawObject[] objects = new RawObject[myFields.length]; + Arrays.fill(objects, rawObject); + EntityInput in = new RawComplexInput + (catalog, rawAccess, converted, myFields, objects); + Object o = newInstance(in, rawAccess); + converted.put(rawObject, o); + return readObject(o, in, rawAccess); + } + + @Override + void skipContents(RecordInput input) + throws RefreshException { + + int maxNum = fields.size(); + for (int i = 0; i < maxNum; i += 1) { + fields.get(i).getType().skipContents(input); + } + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + int maxNum = fields.size(); + for (int i = 0; i < maxNum; i += 1) { + fields.get(i).getType().copySecKey(input, output); + } + } + + @Override + Format getSequenceKeyFormat() { + if (fields.size() != 1) { + throw new IllegalArgumentException + ("A composite key class used with a sequence may contain " + + "only a single key field: " + getClassName()); + } + return fields.get(0).getType().getSequenceKeyFormat(); + } + + @Override + boolean evolve(Format newFormatParam, Evolver evolver) { + + /* Disallow evolution to a non-composite format. */ + if (!(newFormatParam instanceof CompositeKeyFormat)) { + evolver.addEvolveError + (this, newFormatParam, null, + "A composite key class may not be changed to a different " + + "type"); + return false; + } + CompositeKeyFormat newFormat = (CompositeKeyFormat) newFormatParam; + + /* Check for added or removed key fields. */ + if (fields.size() != newFormat.fields.size()) { + evolver.addEvolveError + (this, newFormat, + "Composite key class fields were added or removed ", + "Old fields: " + fields + + " new fields: " + newFormat.fields); + return false; + } + + /* Check for modified key fields. */ + boolean newVersion = false; + for (int i = 0; i < fields.size(); i += 1) { + int result = evolver.evolveRequiredKeyField + (this, newFormat, fields.get(i), + newFormat.fields.get(i)); + if (result == Evolver.EVOLVE_FAILURE) { + return false; + } + if (result == Evolver.EVOLVE_NEEDED) { + newVersion = true; + } + } + + /* + * We never need to use a custom reader because the physical key field + * formats never change. But we do create a new evolved format when + * a type changes (primitive <-> primitive wrapper) so that the new + * type information is correct. + */ + if (newVersion) { + evolver.useEvolvedFormat(this, newFormat, newFormat); + } else { + evolver.useOldFormat(this, newFormat); + } + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/ConverterReader.java b/src/com/sleepycat/persist/impl/ConverterReader.java new file mode 100644 index 0000000..bd28613 --- /dev/null +++ b/src/com/sleepycat/persist/impl/ConverterReader.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.raw.RawObject; + +/** + * Reader for invoking a class Converter mutation. + * + * @author Mark Hayes + */ +public class ConverterReader implements Reader { + + private static final long serialVersionUID = -305788321064984348L; + + private Converter converter; + private transient Format oldFormat; + + ConverterReader(Converter converter) { + this.converter = converter; + } + + public void initializeReader(Catalog catalog, + EntityModel model, + int initVersion, + Format oldFormat) { + this.oldFormat = oldFormat; + } + + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + /* Create the old format RawObject. */ + return oldFormat.newInstance(input, true); + } + + public void readPriKey(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + /* Read the old format RawObject's primary key. */ + oldFormat.readPriKey(o, input, true); + } + + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + Catalog catalog = input.getCatalog(); + + /* Read the old format RawObject and convert it. */ + boolean currentRawMode = input.setRawAccess(true); + try { + o = oldFormat.readObject(o, input, true); + } finally { + input.setRawAccess(currentRawMode); + } + o = converter.getConversion().convert(o); + + /* Convert the current format RawObject to a live Object. */ + if (!rawAccess && o instanceof RawObject) { + o = catalog.convertRawObject((RawObject) o, null); + } + return o; + } + + public Accessor getAccessor(boolean rawAccess) { + return oldFormat.getAccessor(rawAccess); + } +} diff --git a/src/com/sleepycat/persist/impl/Enhanced.java b/src/com/sleepycat/persist/impl/Enhanced.java new file mode 100644 index 0000000..d25bdf5 --- /dev/null +++ b/src/com/sleepycat/persist/impl/Enhanced.java @@ -0,0 +1,175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +/** + * Interface implemented by a persistent class via bytecode enhancement. + * + *

        See {@link Accessor} for method documentation. {@link EnhancedAccessor} + * implements Accessor and forwards all calls to methods in the Enhanced + * class.

        + * + *

        Each class that implements this interface (including its subclasses and + * superclasses except for Object) must also implement a static block that + * registers a prototype instance by calling + * EnhancedAccessor.registerPrototype. Other instances are created from the + * protype instance using {@link #bdbNewInstance}.

        + * + *
        static { EnhancedAccessor.registerPrototype(new Xxx()); }
        + * + *

        An example of the generated code for reading and writing fields is shown + * below.

        + * + *
        + *  private int f1;
        + *  private String f2;
        + *  private MyClass f3;
        + *
        + *  public void bdbWriteNonKeyFields(EntityOutput output) {
        + *
        + *      super.bdbWriteNonKeyFields(output);
        + *
        + *      output.writeInt(f1);
        + *      output.writeObject(f2, null);
        + *      output.writeObject(f3, null);
        + *  }
        + *
        + *  public void bdbReadNonKeyFields(EntityInput input,
        + *                                  int startField,
        + *                                  int endField,
        + *                                  int superLevel) {
        + *
        + *      if (superLevel != 0) {
        + *          super.bdbReadNonKeyFields(input, startField, endField,
        + *                                    superLevel - 1);
        + *      }
        + *      if (superLevel <= 0) {
        + *          switch (startField) {
        + *          case 0:
        + *              f1 = input.readInt();
        + *              if (endField == 0) break;
        + *          case 1:
        + *              f2 = (String) input.readObject();
        + *              if (endField == 1) break;
        + *          case 2:
        + *              f3 = (MyClass) input.readObject();
        + *          }
        + *      }
        + *  }
        + * 
        + * + * @author Mark Hayes + */ +public interface Enhanced { + + /** + * @see Accessor#newInstance + */ + Object bdbNewInstance(); + + /** + * @see Accessor#newArray + */ + Object bdbNewArray(int len); + + /** + * Calls the super class method if this class does not contain the primary + * key field. + * + * @see Accessor#isPriKeyFieldNullOrZero + */ + boolean bdbIsPriKeyFieldNullOrZero(); + + /** + * Calls the super class method if this class does not contain the primary + * key field. + * + * @see Accessor#writePriKeyField + */ + void bdbWritePriKeyField(EntityOutput output, Format format) + throws RefreshException; + + /** + * Calls the super class method if this class does not contain the primary + * key field. + * + * @see Accessor#readPriKeyField + */ + void bdbReadPriKeyField(EntityInput input, Format format) + throws RefreshException; + + /** + * @see Accessor#writeSecKeyFields + */ + void bdbWriteSecKeyFields(EntityOutput output) + throws RefreshException; + + /** + * @see Accessor#readSecKeyFields + */ + void bdbReadSecKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException; + + /** + * @see Accessor#writeNonKeyFields + */ + void bdbWriteNonKeyFields(EntityOutput output) + throws RefreshException; + + /** + * @see Accessor#readNonKeyFields + */ + void bdbReadNonKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException; + + /** + * @see Accessor#writeCompositeKeyFields + */ + void bdbWriteCompositeKeyFields(EntityOutput output, Format[] formats) + throws RefreshException; + + /** + * @see Accessor#readCompositeKeyFields + */ + void bdbReadCompositeKeyFields(EntityInput input, Format[] formats) + throws RefreshException; + + /** + * @see Accessor#getField + */ + Object bdbGetField(Object o, + int field, + int superLevel, + boolean isSecField); + + /** + * @see Accessor#setField + */ + void bdbSetField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value); + + /** + * @see Accessor#setPriField + */ + void bdbSetPriField(Object o, Object value); +} diff --git a/src/com/sleepycat/persist/impl/EnhancedAccessor.java b/src/com/sleepycat/persist/impl/EnhancedAccessor.java new file mode 100644 index 0000000..e36736d --- /dev/null +++ b/src/com/sleepycat/persist/impl/EnhancedAccessor.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.lang.reflect.Modifier; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.compat.DbCompat; + +/** + * Implements Accessor for a complex persistent class. + * + * @author Mark Hayes + */ +public class EnhancedAccessor implements Accessor { + + private static final Map classRegistry = + Collections.synchronizedMap(new HashMap()); + + /* Is public for unit tests. */ + public static final boolean EXPECT_ENHANCED = + "true".equals(System.getProperty("expectEnhanced")); + + private Enhanced prototype; + private Format priKeyFormat; + private Format[] compositeKeyFormats; + private Class type; + + /** + * Registers a prototype instance, and should be called during + * initialization of the prototype class. The prototype may be null for + * an abstract class. + */ + public static void registerClass(String className, Enhanced prototype) { + classRegistry.put(className, prototype); + } + + /** + * Returns whether a given class is a (registered) enhanced class. + */ + static boolean isEnhanced(Class type) { + boolean enhanced = classRegistry.containsKey(type.getName()); + if (!enhanced && EXPECT_ENHANCED) { + throw new IllegalStateException + ("Test was run with expectEnhanced=true but class " + + type.getName() + " is not enhanced"); + } + return enhanced; + } + + private EnhancedAccessor(Class type) { + this.type = type; + prototype = classRegistry.get(type.getName()); + assert prototype != null || Modifier.isAbstract(type.getModifiers()); + } + + /** + * Creates an accessor for a complex type. + */ + EnhancedAccessor(Catalog catalog, Class type, ComplexFormat format) { + this(type); + + /* + * Find the primary key format for this format or one of its superclass + * formats. + */ + ComplexFormat declaringFormat = format; + while (declaringFormat != null) { + FieldInfo priKeyField = declaringFormat.getPriKeyFieldInfo(); + if (priKeyField != null) { + priKeyFormat = catalog.getFormat(priKeyField.getClassName()); + break; + } else { + declaringFormat = declaringFormat.getComplexSuper(); + } + } + } + + /** + * Creates an accessor for a composite key type. + */ + EnhancedAccessor(Catalog catalog, Class type, List fieldInfos) { + this(type); + final int nFields = fieldInfos.size(); + compositeKeyFormats = new Format[nFields]; + for (int i = 0; i < nFields; i += 1) { + compositeKeyFormats[i] = + catalog.getFormat(fieldInfos.get(i).getClassName()); + } + } + + public Object newInstance() { + if (prototype == null) { + /* Abstract class -- internal error. */ + throw DbCompat.unexpectedState(); + } + return prototype.bdbNewInstance(); + } + + public Object newArray(int len) { + if (prototype == null) { + /* Abstract class -- use reflection for now. */ + return Array.newInstance(type, len); + } + return prototype.bdbNewArray(len); + } + + public boolean isPriKeyFieldNullOrZero(Object o) { + if (priKeyFormat == null) { + throw DbCompat.unexpectedState + ("No primary key: " + o.getClass().getName()); + } + return ((Enhanced) o).bdbIsPriKeyFieldNullOrZero(); + } + + public void writePriKeyField(Object o, EntityOutput output) + throws RefreshException { + + if (priKeyFormat == null) { + throw DbCompat.unexpectedState + ("No primary key: " + o.getClass().getName()); + } + ((Enhanced) o).bdbWritePriKeyField(output, priKeyFormat); + } + + public void readPriKeyField(Object o, EntityInput input) + throws RefreshException { + + if (priKeyFormat == null) { + throw DbCompat.unexpectedState + ("No primary key: " + o.getClass().getName()); + } + ((Enhanced) o).bdbReadPriKeyField(input, priKeyFormat); + } + + public void writeSecKeyFields(Object o, EntityOutput output) + throws RefreshException { + + ((Enhanced) o).bdbWriteSecKeyFields(output); + } + + public void readSecKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + ((Enhanced) o).bdbReadSecKeyFields + (input, startField, endField, superLevel); + } + + public void writeNonKeyFields(Object o, EntityOutput output) + throws RefreshException { + + ((Enhanced) o).bdbWriteNonKeyFields(output); + } + + public void readNonKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + ((Enhanced) o).bdbReadNonKeyFields + (input, startField, endField, superLevel); + } + + public void writeCompositeKeyFields(Object o, EntityOutput output) + throws RefreshException { + + ((Enhanced) o).bdbWriteCompositeKeyFields(output, compositeKeyFormats); + } + + public void readCompositeKeyFields(Object o, EntityInput input) + throws RefreshException { + + ((Enhanced) o).bdbReadCompositeKeyFields(input, compositeKeyFormats); + } + + public Object getField(Object o, + int field, + int superLevel, + boolean isSecField) { + return ((Enhanced) o).bdbGetField(o, field, superLevel, isSecField); + } + + public void setField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + ((Enhanced) o).bdbSetField(o, field, superLevel, isSecField, value); + } + + public void setPriField(Object o, Object value) { + ((Enhanced) o).bdbSetPriField(o, value); + } +} diff --git a/src/com/sleepycat/persist/impl/EntityInput.java b/src/com/sleepycat/persist/impl/EntityInput.java new file mode 100644 index 0000000..fc3bc57 --- /dev/null +++ b/src/com/sleepycat/persist/impl/EntityInput.java @@ -0,0 +1,130 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.math.BigDecimal; +import java.math.BigInteger; + +/** + * Used for reading object fields. + * + *

        Unlike TupleInput, Strings are returned by {@link #readObject} when using + * this class.

        + * + * @author Mark Hayes + */ +public interface EntityInput { + + /** + * Returns the Catalog associated with this input. + */ + Catalog getCatalog(); + + /** + * Return whether this input is in raw mode, i.e., whether it is returning + * raw instances. + */ + boolean isRawAccess(); + + /** + * Changes raw mode and returns the original mode, which is normally + * restored later. For temporarily changing the mode during a conversion. + */ + boolean setRawAccess(boolean rawAccessParam); + + /** + * Called via Accessor to read all fields with reference types, except for + * the primary key field and composite key fields (see readKeyObject + * below). + */ + Object readObject() + throws RefreshException; + + /** + * Called for a primary key field or a composite key field with a reference + * type. + * + *

        For such key fields, no formatId is present nor can the object + * already be present in the visited object set.

        + */ + Object readKeyObject(Format format) + throws RefreshException; + + /** + * Called for a String field, that is not a primary key field or a + * composite key field with a reference type. + * + *

        For the new String format, no formatId is present nor can the object + * already be present in the visited object set. For the old String + * format, this method simply calls readObject for compatibility.

        + */ + Object readStringObject() + throws RefreshException; + + /** + * Called via Accessor.readSecKeyFields for a primary key field with a + * reference type. This method must be called before reading any other + * fields. + */ + void registerPriKeyObject(Object o); + + /** + * Called via Accessor.readSecKeyFields for a primary String key field. + * This method must be called before reading any other fields. + */ + void registerPriStringKeyObject(Object o); + + /** + * Called by ObjectArrayFormat and PrimitiveArrayFormat to read the array + * length. + */ + int readArrayLength(); + + /** + * Called by EnumFormat to read and return index of the enum constant. + */ + int readEnumConstant(String[] names); + + /** + * Called via PersistKeyCreator to skip fields prior to the secondary key + * field. Also called during class evolution so skip deleted fields. + */ + void skipField(Format declaredFormat) + throws RefreshException; + + /* The following methods are a subset of the methods in TupleInput. */ + + String readString() + throws RefreshException; + char readChar() + throws RefreshException; + boolean readBoolean() + throws RefreshException; + byte readByte() + throws RefreshException; + short readShort() + throws RefreshException; + int readInt() + throws RefreshException; + long readLong() + throws RefreshException; + float readSortedFloat() + throws RefreshException; + double readSortedDouble() + throws RefreshException; + BigInteger readBigInteger() + throws RefreshException; + BigDecimal readSortedBigDecimal() + throws RefreshException; +} diff --git a/src/com/sleepycat/persist/impl/EntityOutput.java b/src/com/sleepycat/persist/impl/EntityOutput.java new file mode 100644 index 0000000..8024e9d --- /dev/null +++ b/src/com/sleepycat/persist/impl/EntityOutput.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * Used for writing object fields. + * + *

        Unlike TupleOutput, Strings should be passed to {@link #writeObject} when + * using this class.

        + * + *

        Note that currently there is only one implementation of EntityOutput: + * RecordOutput. There is no RawObjectOutput implemention because we currently + * have no need to convert from persistent objects to RawObject instances. + * The EntityOutput interface is only for symmetry with EntityInput and in case + * we need RawObjectOutput in the future.

        + * + * @author Mark Hayes + */ +public interface EntityOutput { + + /** + * Called via Accessor to write all fields with reference types, except for + * the primary key field and composite key fields (see writeKeyObject + * below). + */ + void writeObject(Object o, Format fieldFormat) + throws RefreshException; + + /** + * Called for a primary key field or composite key field with a reference + * type. + */ + void writeKeyObject(Object o, Format fieldFormat) + throws RefreshException; + + /** + * Called via Accessor.writeSecKeyFields for a primary key field with a + * reference type. This method must be called before writing any other + * fields. + */ + void registerPriKeyObject(Object o); + + /** + * Called by ObjectArrayFormat and PrimitiveArrayFormat to write the array + * length. + */ + void writeArrayLength(int length); + + /** + * Called by EnumFormat to write the given index of the enum constant. + */ + void writeEnumConstant(String[] names, int index); + + /* The following methods are a subset of the methods in TupleOutput. */ + + TupleOutput writeString(String val); + TupleOutput writeChar(int val); + TupleOutput writeBoolean(boolean val); + TupleOutput writeByte(int val); + TupleOutput writeShort(int val); + TupleOutput writeInt(int val); + TupleOutput writeLong(long val); + TupleOutput writeSortedFloat(float val); + TupleOutput writeSortedDouble(double val); + TupleOutput writeBigInteger(BigInteger val); + TupleOutput writeSortedBigDecimal(BigDecimal val); +} diff --git a/src/com/sleepycat/persist/impl/EnumFormat.java b/src/com/sleepycat/persist/impl/EnumFormat.java new file mode 100644 index 0000000..a155e90 --- /dev/null +++ b/src/com/sleepycat/persist/impl/EnumFormat.java @@ -0,0 +1,253 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawObject; + +/** + * Format for all enum types. + * + * In this class we resort to using reflection to allocate arrays of enums. + * If there is a need for it, reflection could be avoided in the future by + * generating code as new array formats are encountered. + * + * @author Mark Hayes + */ +public class EnumFormat extends Format { + + private static final long serialVersionUID = 1069833955604373538L; + + private String[] names; + private transient Object[] values; + + EnumFormat(Catalog catalog, Class type) { + super(catalog, type); + values = type.getEnumConstants(); + names = new String[values.length]; + for (int i = 0; i < names.length; i += 1) { + names[i] = ((Enum) values[i]).name(); + } + } + + /** + * For use in a deserialized CompositeKeyFormat. + */ + EnumFormat(Catalog catalog, Class type, String[] enumData) { + super(catalog, type); + names = enumData; + } + + /** + * Returns data needed for serialization of a CompositeKeyFormat. + */ + String[] getFormatData() { + return names; + } + + @Override + public boolean isEnum() { + return true; + } + + @Override + public List getEnumConstants() { + return Arrays.asList(names); + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + if (values == null) { + initValues(); + } + } + + private void initValues() { + Class cls = getType(); + if (cls != null) { + values = new Object[names.length]; + for (int i = 0; i < names.length; i += 1) { + try { + values[i] = Enum.valueOf(cls, names[i]); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException + ("Deletion and renaming of enum values is not " + + "supported: " + names[i], e); + } + } + } + } + + @Override + Object newArray(int len) { + return Array.newInstance(getType(), len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) { + int index = input.readEnumConstant(names); + if (rawAccess) { + return new RawObject(this, names[index]); + } else { + return values[index]; + } + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) { + /* newInstance reads the value -- do nothing here. */ + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + if (rawAccess) { + String name = ((RawObject) o).getEnum(); + for (int i = 0; i < names.length; i += 1) { + if (names[i].equals(name)) { + output.writeEnumConstant(names, i); + return; + } + } + } else { + for (int i = 0; i < values.length; i += 1) { + if (o == values[i]) { + output.writeEnumConstant(names, i); + return; + } + } + } + throw DbCompat.unexpectedState("Bad enum: " + o); + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) { + String name = rawObject.getEnum(); + for (int i = 0; i < names.length; i += 1) { + if (names[i].equals(name)) { + Object o = values[i]; + converted.put(rawObject, o); + return o; + } + } + throw new IllegalArgumentException + ("Enum constant is not defined: " + name); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(input.getPackedIntByteLength()); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + int len = input.getPackedIntByteLength(); + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), len); + input.skipFast(len); + } + + @Override + boolean evolve(Format newFormatParam, Evolver evolver) { + if (!(newFormatParam instanceof EnumFormat)) { + evolver.addEvolveError + (this, newFormatParam, + "Incompatible enum type changed detected", + "An enum class may not be changed to a non-enum type"); + /* For future: + evolver.addMissingMutation + (this, newFormatParam, + "Converter is required when an enum class is changed to " + + "a non-enum type"); + */ + return false; + } + + final EnumFormat newFormat = (EnumFormat) newFormatParam; + + /* Return quickly if the enum was not changed at all. */ + if (Arrays.equals(names, newFormat.names)) { + evolver.useOldFormat(this, newFormat); + return true; + } + + final List newNamesList = Arrays.asList(newFormat.names); + final Set newNamesSet = new HashSet(newNamesList); + final List oldNamesList = Arrays.asList(names); + + /* Deletion (or renaming, which appears as deletion) is not allowed. */ + if (!newNamesSet.containsAll(oldNamesList)) { + final Set oldNamesSet = new HashSet(oldNamesList); + oldNamesSet.removeAll(newNamesSet); + evolver.addEvolveError + (this, newFormat, + "Incompatible enum type changed detected", + "Enum values may not be removed: " + oldNamesSet); + } + + /* Use a List for additional names to preserve ordinal order. */ + final List additionalNamesList = + new ArrayList(newNamesList); + additionalNamesList.removeAll(oldNamesList); + final int nAdditionalNames = additionalNamesList.size(); + + /* + * If there are no aditional names, the new and old formats are + * equivalent. This is the case where only the declaration order was + * changed. + */ + if (nAdditionalNames == 0) { + evolver.useOldFormat(this, newFormat); + return true; + } + + /* + * Evolve the new format. It should use the old names array, but with + * any additional names appended. [#17140] + */ + final int nOldNames = names.length; + newFormat.names = new String[nOldNames + nAdditionalNames]; + System.arraycopy(names, 0, newFormat.names, 0, nOldNames); + for (int i = 0; i < nAdditionalNames; i += 1) { + newFormat.names[nOldNames + i] = additionalNamesList.get(i); + } + newFormat.initValues(); + + /* + * Because we never change the array index (stored integer value) for + * an enum value, the new format can read the values written by the old + * format (newFormat is used as the Reader in the 2nd param below). + */ + evolver.useEvolvedFormat(this, newFormat, newFormat); + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/Evolver.java b/src/com/sleepycat/persist/impl/Evolver.java new file mode 100644 index 0000000..af2846d --- /dev/null +++ b/src/com/sleepycat/persist/impl/Evolver.java @@ -0,0 +1,773 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.ArrayList; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.Deleter; +import com.sleepycat.persist.evolve.Mutation; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.SecondaryKeyMetadata; + +/** + * Evolves each old format that is still relevant if necessary, using Mutations + * to configure deleters, renamers, and converters. + * + * @author Mark Hayes + */ +class Evolver { + + static final int EVOLVE_NONE = 0; + static final int EVOLVE_NEEDED = 1; + static final int EVOLVE_FAILURE = 2; + + private PersistCatalog catalog; + private String storePrefix; + private Mutations mutations; + private Map newFormats; + private boolean forceEvolution; + private boolean disallowClassChanges; + private boolean nestedFormatsChanged; + private Map changedFormats; + private StringBuilder errors; + private Set deleteDbs; + private Map renameDbs; + private Map renameFormats; + private Map evolvedFormats; + private List unprocessedFormats; + private Map> subclassMap; + + Evolver(PersistCatalog catalog, + String storePrefix, + Mutations mutations, + Map newFormats, + boolean forceEvolution, + boolean disallowClassChanges) { + this.catalog = catalog; + this.storePrefix = storePrefix; + this.mutations = mutations; + this.newFormats = newFormats; + this.forceEvolution = forceEvolution; + this.disallowClassChanges = disallowClassChanges; + changedFormats = new IdentityHashMap(); + errors = new StringBuilder(); + deleteDbs = new HashSet(); + renameDbs = new HashMap(); + renameFormats = new IdentityHashMap(); + evolvedFormats = new HashMap(); + unprocessedFormats = new ArrayList(); + subclassMap = catalog.getSubclassMap(); + } + + final Mutations getMutations() { + return mutations; + } + + /** + * Returns whether any formats were changed during evolution, and therefore + * need to be stored in the catalog. + */ + boolean areFormatsChanged() { + return !changedFormats.isEmpty(); + } + + /** + * Returns whether the given format was changed during evolution. + */ + boolean isFormatChanged(Format format) { + return changedFormats.containsKey(format); + } + + private void setFormatsChanged(Format oldFormat) { + checkClassChangesAllowed(oldFormat); + changedFormats.put(oldFormat, oldFormat); + nestedFormatsChanged = true; + /* PersistCatalog.expectNoClassChanges is true in unit tests only. */ + if (PersistCatalog.expectNoClassChanges) { + throw new IllegalStateException("expectNoClassChanges"); + } + } + + private void checkClassChangesAllowed(Format oldFormat) { + if (disallowClassChanges) { + throw new IllegalStateException + ("When performing an upgrade changes are not allowed " + + "but were made to: " + oldFormat.getClassName()); + } + } + + /** + * Returns the set of formats for a specific superclass format, or null if + * the superclass is not a complex type or has not subclasses. + */ + Set getSubclassFormats(Format superFormat) { + return subclassMap.get(superFormat); + } + + /** + * Returns an error string if any mutations are invalid or missing, or + * returns null otherwise. If non-null is returned, the store may not be + * opened. + */ + String getErrors() { + if (errors.length() > 0) { + return errors.toString() + "\n---\n" + + "(Note that when upgrading an application in a replicated " + + "environment, this exception may indicate that the Master " + + "was mistakenly upgraded before this Replica could be " + + "upgraded, and the solution is to upgrade this Replica.)"; + } else { + return null; + } + } + + /** + * Adds a newline and the given error. + */ + private void addError(String error) { + if (errors.length() > 0) { + errors.append("\n---\n"); + } + errors.append(error); + } + + private String getClassVersionLabel(Format format, String prefix) { + if (format != null) { + return prefix + + " class: " + format.getClassName() + + " version: " + format.getVersion(); + } else { + return ""; + } + } + + /** + * Adds a specified error when no specific mutation is involved. + */ + void addEvolveError(Format oldFormat, + Format newFormat, + String scenario, + String error) { + checkClassChangesAllowed(oldFormat); + if (scenario == null) { + scenario = "Error"; + } + addError(scenario + " when evolving" + + getClassVersionLabel(oldFormat, "") + + getClassVersionLabel(newFormat, " to") + + " Error: " + error); + } + + /** + * Adds an error for an invalid mutation. + */ + void addInvalidMutation(Format oldFormat, + Format newFormat, + Mutation mutation, + String error) { + checkClassChangesAllowed(oldFormat); + addError("Invalid mutation: " + mutation + + getClassVersionLabel(oldFormat, " For") + + getClassVersionLabel(newFormat, " New") + + " Error: " + error); + } + + /** + * Adds an error for a missing mutation. + */ + void addMissingMutation(Format oldFormat, + Format newFormat, + String error) { + checkClassChangesAllowed(oldFormat); + addError("Mutation is missing to evolve" + + getClassVersionLabel(oldFormat, "") + + getClassVersionLabel(newFormat, " to") + + " Error: " + error); + } + + /** + * Called by PersistCatalog for all non-entity formats. + */ + void addNonEntityFormat(Format oldFormat) { + unprocessedFormats.add(oldFormat); + } + + /** + * Called by PersistCatalog after calling evolveFormat or + * addNonEntityFormat for all old formats. + * + * We do not require deletion of an unreferenced class for two + * reasons: 1) built-in proxy classes may not be referenced, 2) the + * user may wish to declare persistent classes that are not yet used. + */ + void finishEvolution() { + /* Process unreferenced classes. */ + for (Format oldFormat : unprocessedFormats) { + oldFormat.setUnused(true); + evolveFormat(oldFormat); + } + } + + /** + * Called by PersistCatalog for all entity formats, and by Format.evolve + * methods for all potentially referenced non-entity formats. + */ + boolean evolveFormat(Format oldFormat) { + if (oldFormat.isNew()) { + return true; + } + boolean result; + Format oldEntityFormat = oldFormat.getEntityFormat(); + boolean trackEntityChanges = oldEntityFormat != null; + boolean saveNestedFormatsChanged = nestedFormatsChanged; + if (trackEntityChanges) { + nestedFormatsChanged = false; + } + Integer oldFormatId = oldFormat.getId(); + if (evolvedFormats.containsKey(oldFormatId)) { + result = evolvedFormats.get(oldFormatId); + } else { + evolvedFormats.put(oldFormatId, true); + result = evolveFormatInternal(oldFormat); + evolvedFormats.put(oldFormatId, result); + } + if (oldFormat.getLatestVersion().isNew()) { + nestedFormatsChanged = true; + } + if (trackEntityChanges) { + if (nestedFormatsChanged) { + Format latest = oldEntityFormat.getLatestVersion(); + if (latest != null) { + latest.setEvolveNeeded(true); + } + } + nestedFormatsChanged = saveNestedFormatsChanged; + } + return result; + } + + /** + * Tries to evolve a given existing format to the current version of the + * class and returns false if an invalid mutation is encountered or the + * configured mutations are not sufficient. + */ + private boolean evolveFormatInternal(Format oldFormat) { + + /* Predefined formats and deleted classes never need evolving. */ + if (Format.isPredefined(oldFormat) || oldFormat.isDeleted()) { + return true; + } + + /* Get class mutations. */ + String oldName = oldFormat.getClassName(); + int oldVersion = oldFormat.getVersion(); + Renamer renamer = mutations.getRenamer(oldName, oldVersion, null); + Deleter deleter = mutations.getDeleter(oldName, oldVersion, null); + Converter converter = + mutations.getConverter(oldName, oldVersion, null); + if (deleter != null && (converter != null || renamer != null)) { + addInvalidMutation + (oldFormat, null, deleter, + "Class Deleter not allowed along with a Renamer or " + + "Converter for the same class"); + return false; + } + + /* + * For determining the new name, arrays get special treatment. The + * component format is evolved in the process, and we disallow muations + * for arrays. + */ + String newName; + if (oldFormat.isArray()) { + if (deleter != null || converter != null || renamer != null) { + Mutation mutation = (deleter != null) ? deleter : + ((converter != null) ? converter : renamer); + addInvalidMutation + (oldFormat, null, mutation, + "Mutations not allowed for an array"); + return false; + } + Format compFormat = oldFormat.getComponentType(); + if (!evolveFormat(compFormat)) { + return false; + } + Format latest = compFormat.getLatestVersion(); + if (latest != compFormat) { + newName = (latest.isArray() ? "[" : "[L") + + latest.getClassName() + ';'; + } else { + newName = oldName; + } + } else { + newName = (renamer != null) ? renamer.getNewName() : oldName; + } + + /* Try to get the new class format. Save exception for later. */ + Format newFormat; + String newFormatException; + try { + Class newClass = catalog.resolveClass(newName); + try { + newFormat = catalog.createFormat(newClass, newFormats); + assert newFormat != oldFormat : newFormat.getClassName(); + newFormatException = null; + } catch (Exception e) { + newFormat = null; + newFormatException = e.toString(); + } + } catch (ClassNotFoundException e) { + newFormat = null; + newFormatException = e.toString(); + } + + if (newFormat != null) { + + /* + * If the old format is not the existing latest version and the new + * format is not an existing format, then we must evolve the latest + * old version to the new format first. We cannot evolve old + * format to a new format that may be discarded because it is equal + * to the latest existing format (which will remain the current + * version). + */ + if (oldFormat != oldFormat.getLatestVersion() && + newFormat.getPreviousVersion() == null) { + assert newFormats.containsValue(newFormat); + Format oldLatestFormat = oldFormat.getLatestVersion(); + if (!evolveFormat(oldLatestFormat)) { + return false; + } + if (oldLatestFormat == oldLatestFormat.getLatestVersion()) { + /* newFormat is no longer relevant [#21869]. */ + newFormats.remove(newFormat.getClassName()); + newFormat = oldLatestFormat; + } + } + + /* + * If the old format was previously evolved to the new format + * (which means the new format is actually an existing format), + * then there is nothing to do. This is the case where no class + * changes were made. + * + * However, if mutations were specified when opening the catalog + * that are different than the mutations last used, then we must + * force the re-evolution of all old formats. + */ + if (!forceEvolution && + newFormat == oldFormat.getLatestVersion()) { + return true; + } + } + + /* Apply class Renamer and continue if successful. */ + if (renamer != null) { + if (!applyClassRenamer(renamer, oldFormat, newFormat)) { + return false; + } + } + + /* Apply class Converter and return. */ + if (converter != null) { + if (oldFormat.isEntity()) { + if (newFormat == null || !newFormat.isEntity()) { + addInvalidMutation + (oldFormat, newFormat, converter, + "Class converter not allowed for an entity class " + + "that is no longer present or not having an " + + "@Entity annotation"); + return false; + } + if (!oldFormat.evolveMetadata(newFormat, converter, this)) { + return false; + } + } + return applyConverter(converter, oldFormat, newFormat); + } + + /* Apply class Deleter and return. */ + boolean needDeleter = + (newFormat == null) || + (newFormat.isEntity() != oldFormat.isEntity()); + if (deleter != null) { + if (!needDeleter) { + addInvalidMutation + (oldFormat, newFormat, deleter, + "Class deleter not allowed when the class and its " + + "@Entity or @Persistent annotation is still present"); + return false; + } + return applyClassDeleter(deleter, oldFormat, newFormat); + } else { + if (needDeleter) { + if (newFormat == null) { + assert newFormatException != null; + /* FindBugs newFormat known to be null excluded. */ + addMissingMutation + (oldFormat, newFormat, newFormatException); + } else { + addMissingMutation + (oldFormat, newFormat, + "@Entity switched to/from @Persistent"); + } + return false; + } + } + + /* + * Class-level mutations have been applied. Now apply field mutations + * (for complex classes) or special conversions (enum conversions, for + * example) by calling the old format's evolve method. + */ + return oldFormat.evolve(newFormat, this); + } + + /** + * Use the old format and discard the new format. Called by + * Format.evolve when the old and new formats are identical. + */ + void useOldFormat(Format oldFormat, Format newFormat) { + Format renamedFormat = renameFormats.get(oldFormat); + if (renamedFormat != null) { + + /* + * The format was renamed but, because this method is called, we + * know that no other class changes were made. Use the new/renamed + * format as the reader. + */ + assert renamedFormat == newFormat; + useEvolvedFormat(oldFormat, renamedFormat, renamedFormat); + } else if (newFormat != null && + (oldFormat.getVersion() != newFormat.getVersion() || + !oldFormat.isCurrentVersion())) { + + /* + * If the user wants a new version number, but ther are no other + * changes, we will oblige. Or, if an attempt is being made to + * use an old version, then the following events happened and we + * must evolve the old format: + * 1) The (previously) latest version of the format was evolved + * because it is not equal to the live class version. Note that + * evolveFormatInternal always evolves the latest version first. + * 2) We are now attempting to evolve an older version of the same + * format, and it happens to be equal to the live class version. + * However, we're already committed to the new format, and we must + * evolve all versions. + * [#16467] + */ + useEvolvedFormat(oldFormat, newFormat, newFormat); + } else { + /* The new format is discarded. */ + catalog.useExistingFormat(oldFormat); + if (newFormat != null) { + newFormats.remove(newFormat.getClassName()); + } + } + } + + /** + * Install an evolver Reader in the old format. Called by Format.evolve + * when the old and new formats are not identical. + */ + void useEvolvedFormat(Format oldFormat, + Reader evolveReader, + Format newFormat) { + oldFormat.setReader(evolveReader); + if (newFormat != null) { + oldFormat.setLatestVersion(newFormat); + } + setFormatsChanged(oldFormat); + } + + private boolean applyClassRenamer(Renamer renamer, + Format oldFormat, + Format newFormat) { + if (!checkUpdatedVersion(renamer, oldFormat, newFormat)) { + return false; + } + if (oldFormat.isEntity() && oldFormat.isCurrentVersion()) { + String newClassName = newFormat.getClassName(); + String oldClassName = oldFormat.getClassName(); + /* Queue the renaming of the primary and secondary databases. */ + renameDbs.put + (Store.makePriDbName(storePrefix, oldClassName), + Store.makePriDbName(storePrefix, newClassName)); + for (SecondaryKeyMetadata keyMeta : + oldFormat.getEntityMetadata().getSecondaryKeys().values()) { + String keyName = keyMeta.getKeyName(); + renameDbs.put + (Store.makeSecDbName(storePrefix, oldClassName, keyName), + Store.makeSecDbName(storePrefix, newClassName, keyName)); + } + } + + /* + * Link the old format to the renamed format so that we can detect the + * rename in useOldFormat. + */ + renameFormats.put(oldFormat, newFormat); + + setFormatsChanged(oldFormat); + return true; + } + + /** + * Called by ComplexFormat when a secondary key name is changed. + */ + void renameSecondaryDatabase(String oldEntityClass, + String newEntityClass, + String oldKeyName, + String newKeyName) { + renameDbs.put + (Store.makeSecDbName(storePrefix, oldEntityClass, oldKeyName), + Store.makeSecDbName(storePrefix, newEntityClass, newKeyName)); + } + + private boolean applyClassDeleter(Deleter deleter, + Format oldFormat, + Format newFormat) { + if (!checkUpdatedVersion(deleter, oldFormat, newFormat)) { + return false; + } + if (oldFormat.isEntity() && oldFormat.isCurrentVersion()) { + /* Queue the deletion of the primary and secondary databases. */ + String className = oldFormat.getClassName(); + deleteDbs.add(Store.makePriDbName(storePrefix, className)); + for (SecondaryKeyMetadata keyMeta : + oldFormat.getEntityMetadata().getSecondaryKeys().values()) { + deleteDbs.add(Store.makeSecDbName + (storePrefix, className, keyMeta.getKeyName())); + } + } + + /* + * Set the format to deleted last, so that the above test using + * isCurrentVersion works properly. + */ + oldFormat.setDeleted(true); + if (newFormat != null) { + oldFormat.setLatestVersion(newFormat); + } + + setFormatsChanged(oldFormat); + return true; + } + + /** + * Called by ComplexFormat when a secondary key is dropped. + */ + void deleteSecondaryDatabase(String oldEntityClass, String keyName) { + deleteDbs.add(Store.makeSecDbName + (storePrefix, oldEntityClass, keyName)); + } + + private boolean applyConverter(Converter converter, + Format oldFormat, + Format newFormat) { + if (!checkUpdatedVersion(converter, oldFormat, newFormat)) { + return false; + } + Reader reader = new ConverterReader(converter); + useEvolvedFormat(oldFormat, reader, newFormat); + return true; + } + + boolean isClassConverted(Format format) { + return format.getReader() instanceof ConverterReader; + } + + private boolean checkUpdatedVersion(Mutation mutation, + Format oldFormat, + Format newFormat) { + if (newFormat != null && + !oldFormat.isEnum() && + newFormat.getVersion() <= oldFormat.getVersion()) { + addInvalidMutation + (oldFormat, newFormat, mutation, + "A new higher version number must be assigned"); + return false; + } else { + return true; + } + } + + boolean checkUpdatedVersion(String scenario, + Format oldFormat, + Format newFormat) { + if (newFormat != null && + !oldFormat.isEnum() && + newFormat.getVersion() <= oldFormat.getVersion()) { + addEvolveError + (oldFormat, newFormat, scenario, + "A new higher version number must be assigned"); + return false; + } else { + return true; + } + } + + void renameAndRemoveDatabases(Store store, Transaction txn) + throws DatabaseException { + + for (String dbName : deleteDbs) { + String[] fileAndDbNames = store.parseDbName(dbName); + /* Do nothing if database does not exist. */ + DbCompat.removeDatabase + (store.getEnvironment(), txn, + fileAndDbNames[0], fileAndDbNames[1]); + } + + /* + * An importunate locker must be used to rename databases, since rename + * with Database handles open is not currently possible. This is the + * same sort of operation as performed by the HA replayer. If the + * evolution (and rename here) occurs in a replication group upgrade, + * this will cause DatabasePreemptedException the next time the + * database is accessed (via the store or otherwise). In a standalone + * environment, this won't happen because the database won't be open; + * evolve occurs before opening the database in this case. [#16655] + */ + boolean saveImportunate = false; + if (txn != null) { + saveImportunate = DbCompat.setImportunate(txn, true); + } + try { + for (Map.Entry entry : renameDbs.entrySet()) { + String oldName = entry.getKey(); + String newName = entry.getValue(); + String[] oldFileAndDbNames = store.parseDbName(oldName); + String[] newFileAndDbNames = store.parseDbName(newName); + /* Do nothing if database does not exist. */ + DbCompat.renameDatabase + (store.getEnvironment(), txn, + oldFileAndDbNames[0], oldFileAndDbNames[1], + newFileAndDbNames[0], newFileAndDbNames[1]); + } + } finally { + if (txn != null) { + DbCompat.setImportunate(txn, saveImportunate); + } + } + } + + /** + * Evolves a primary key field or composite key field. + */ + int evolveRequiredKeyField(Format oldParent, + Format newParent, + FieldInfo oldField, + FieldInfo newField) { + int result = EVOLVE_NONE; + String oldName = oldField.getName(); + final String FIELD_KIND = + "primary key field or composite key class field"; + final String FIELD_LABEL = + FIELD_KIND + ": " + oldName; + + if (newField == null) { + addMissingMutation + (oldParent, newParent, + "Field is missing and deletion is not allowed for a " + + FIELD_LABEL); + return EVOLVE_FAILURE; + } + + /* Check field mutations. Only a Renamer is allowed. */ + Deleter deleter = mutations.getDeleter + (oldParent.getClassName(), oldParent.getVersion(), oldName); + if (deleter != null) { + addInvalidMutation + (oldParent, newParent, deleter, + "Deleter is not allowed for a " + FIELD_LABEL); + return EVOLVE_FAILURE; + } + Converter converter = mutations.getConverter + (oldParent.getClassName(), oldParent.getVersion(), oldName); + if (converter != null) { + addInvalidMutation + (oldParent, newParent, converter, + "Converter is not allowed for a " + FIELD_LABEL); + return EVOLVE_FAILURE; + } + Renamer renamer = mutations.getRenamer + (oldParent.getClassName(), oldParent.getVersion(), oldName); + String newName = newField.getName(); + if (renamer != null) { + if (!renamer.getNewName().equals(newName)) { + addInvalidMutation + (oldParent, newParent, converter, + "Converter is not allowed for a " + FIELD_LABEL); + return EVOLVE_FAILURE; + } + result = EVOLVE_NEEDED; + } else { + if (!oldName.equals(newName)) { + addMissingMutation + (oldParent, newParent, + "Renamer is required when field name is changed from: " + + oldName + " to: " + newName); + return EVOLVE_FAILURE; + } + } + + /* + * Evolve the declared version of the field format. + */ + Format oldFieldFormat = oldField.getType(); + if (!evolveFormat(oldFieldFormat)) { + return EVOLVE_FAILURE; + } + Format oldLatestFormat = oldFieldFormat.getLatestVersion(); + if (oldFieldFormat != oldLatestFormat) { + result = EVOLVE_NEEDED; + } + Format newFieldFormat = newField.getType(); + + if (oldLatestFormat.getClassName().equals + (newFieldFormat.getClassName())) { + /* Formats are identical. */ + return result; + } else if ((oldLatestFormat.getWrapperFormat() != null && + oldLatestFormat.getWrapperFormat().getId() == + newFieldFormat.getId()) || + (newFieldFormat.getWrapperFormat() != null && + newFieldFormat.getWrapperFormat().getId() == + oldLatestFormat.getId())) { + /* Primitive <-> primitive wrapper type change. */ + return EVOLVE_NEEDED; + } else { + /* Type was changed incompatibly. */ + addEvolveError + (oldParent, newParent, + "Type may not be changed for a " + FIELD_KIND, + "Old field type: " + oldLatestFormat.getClassName() + + " is not compatible with the new type: " + + newFieldFormat.getClassName() + + " for field: " + oldName); + return EVOLVE_FAILURE; + } + } +} diff --git a/src/com/sleepycat/persist/impl/FieldInfo.java b/src/com/sleepycat/persist/impl/FieldInfo.java new file mode 100644 index 0000000..2a0b22d --- /dev/null +++ b/src/com/sleepycat/persist/impl/FieldInfo.java @@ -0,0 +1,258 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.Serializable; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawField; +import com.sleepycat.persist.model.FieldMetadata; +import com.sleepycat.persist.model.ClassMetadata; + +/** + * A field definition used by ComplexFormat and CompositeKeyFormat. + * + *

        Note that the equals(), compareTo() and hashCode() methods only use the + * name field in this class. Comparing two FieldInfo objects is only done when + * both are declared in the same class, so comparing the field name is + * sufficient.

        + * + * @author Mark Hayes + */ +class FieldInfo implements RawField, Serializable, Comparable { + + private static final long serialVersionUID = 2062721100372306296L; + + /** + * Returns a list of all non-transient non-static fields that are declared + * in the given class. + */ + static List getInstanceFields(Class cls, + ClassMetadata clsMeta) { + List fields = null; + if (clsMeta != null) { + Collection persistentFields = + clsMeta.getPersistentFields(); + if (persistentFields != null) { + fields = new ArrayList(persistentFields.size()); + String clsName = cls.getName(); + for (FieldMetadata fieldMeta : persistentFields) { + if (!clsName.equals(fieldMeta.getDeclaringClassName())) { + throw new IllegalArgumentException + ("Persistent field " + fieldMeta + + " must be declared in " + clsName); + } + Field field; + try { + field = cls.getDeclaredField(fieldMeta.getName()); + } catch (NoSuchFieldException e) { + throw new IllegalArgumentException + ("Persistent field " + fieldMeta + + " is not declared in this class"); + } + if (!field.getType().getName().equals + (fieldMeta.getClassName())) { + throw new IllegalArgumentException + ("Persistent field " + fieldMeta + + " must be of type " + field.getType().getName()); + } + if (Modifier.isStatic(field.getModifiers())) { + throw new IllegalArgumentException + ("Persistent field " + fieldMeta + + " may not be static"); + } + fields.add(new FieldInfo(field)); + } + } + } + if (fields == null) { + Field[] declaredFields = cls.getDeclaredFields(); + fields = new ArrayList(declaredFields.length); + for (Field field : declaredFields) { + int mods = field.getModifiers(); + if (!Modifier.isTransient(mods) && !Modifier.isStatic(mods)) { + fields.add(new FieldInfo(field)); + } + } + } + return fields; + } + + static FieldInfo getField(List fields, String fieldName) { + int i = getFieldIndex(fields, fieldName); + if (i >= 0) { + return fields.get(i); + } else { + return null; + } + } + + static int getFieldIndex(List fields, String fieldName) { + for (int i = 0; i < fields.size(); i += 1) { + FieldInfo field = fields.get(i); + if (fieldName.equals(field.getName())) { + return i; + } + } + return -1; + } + + private String name; + private String className; + private Format format; + private transient Class cls; + private transient Field field; + + private FieldInfo(Field field) { + name = field.getName(); + cls = field.getType(); + className = cls.getName(); + this.field = field; + } + + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + + /* + * Prior to intialization we save the newly created format in the + * format field so that it can be used by class evolution. But note + * that it may be replaced by the initialize method. [#16233] + */ + format = catalog.createFormat(cls, newFormats); + + /* + * If the created format is a NonPersistentFormat, and the field is a + * map or a collection, then the generic types of this field are + * ParameterizedTypes, e.g., Map, so the formats of + * the generic types for this field, i.e., MyClass1 and MyClass2 will + * be created here. [#19377] + */ + Class cls = field.getType(); + if (format instanceof NonPersistentFormat && + (java.util.Map.class.isAssignableFrom(cls) || + java.util.Collection.class.isAssignableFrom(cls))) { + if (field != null && + field.getGenericType() instanceof ParameterizedType) { + collectParameterizedTypeFormats(catalog, newFormats, + (ParameterizedType)field.getGenericType()); + } + } + } + + /* + * Create formats for the parameterized types, e.g., will create formats + * for MyClass1 and MyClass2 when meeting Map>, + * where MyClass1 and MyClass2 are instance of java.lang.Class. + */ + private void + collectParameterizedTypeFormats(Catalog catalog, + Map newFormats, + ParameterizedType parameType) { + Type[] types = parameType.getActualTypeArguments(); + for (int i = 0; i < types.length; i++) { + if (types[i] instanceof ParameterizedType) { + collectParameterizedTypeFormats(catalog, newFormats, + (ParameterizedType)types[i]); + } else if (types[i] instanceof Class) { + + /* + * Only use Catalog.createFormat to create the format for the + * class which is instance of java.lang.class. + */ + catalog.createFormat((Class)types[i], newFormats); + } + } + } + + void migrateFromBeta(Map formatMap) { + if (format == null) { + format = formatMap.get(className); + if (format == null) { + throw DbCompat.unexpectedState(className); + } + } + } + + void initialize(Catalog catalog, EntityModel model, int initVersion) { + + /* + * Reset the format if it was never initialized, which can occur when a + * new format instance created during class evolution and discarded + * because nothing changed. [#16233] + * + * Note that the format field may be null when used in a composite key + * format used as a key comparator (via PersistComparator). In that + * case (null format), we must not attempt to reset the format. + */ + if (format != null && format.isNew()) { + format = catalog.getFormat(className); + } + } + + Class getFieldClass(Catalog catalog) { + if (cls == null) { + try { + cls = catalog.resolveClass(className); + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedException(e); + } + } + return cls; + } + + String getClassName() { + return className; + } + + public String getName() { + return name; + } + + public Format getType() { + return format; + } + + public int compareTo(FieldInfo o) { + return name.compareTo(o.name); + } + + @Override + public boolean equals(Object other) { + if (other instanceof FieldInfo) { + FieldInfo o = (FieldInfo) other; + return name.equals(o.name); + } else { + return false; + } + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return "[Field name: " + name + " class: " + className + ']'; + } +} diff --git a/src/com/sleepycat/persist/impl/Format.java b/src/com/sleepycat/persist/impl/Format.java new file mode 100644 index 0000000..aefba3f --- /dev/null +++ b/src/com/sleepycat/persist/impl/Format.java @@ -0,0 +1,1172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.Serializable; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.FieldMetadata; +import com.sleepycat.persist.model.PrimaryKeyMetadata; +import com.sleepycat.persist.model.SecondaryKeyMetadata; +import com.sleepycat.persist.raw.RawField; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; + +/** + * The base class for all object formats. Formats are used to define the + * stored layout for all persistent classes, including simple types. + * + * The design documentation below describes the storage format for entities and + * its relationship to information stored per format in the catalog. + * + * Requirements + * ------------ + * + Provides EntityBinding for objects and EntryBinding for keys. + * + Provides SecondaryKeyCreator, SecondaryMultiKeyCreator and + * SecondaryMultiKeyNullifier (SecondaryKeyNullifier is redundant). + * + Works with reflection and bytecode enhancement. + * + For reflection only, works with any entity model not just annotations. + * + Bindings are usable independently of the persist API. + * + Performance is almost equivalent to hand coded tuple bindings. + * + Small performance penalty for compatible class changes (new fields, + * widening). + * + Secondary key create/nullify do not have to deserialize the entire record; + * in other words, store secondary keys at the start of the data. + * + * Class Format + * ------------ + * Every distinct class format is given a unique format ID. Class IDs are not + * equivalent to class version numbers (as in the version property of @Entity + * and @Persistent) because the format can change when the version number does + * not. Changes that cause a unique format ID to be assigned are: + * + * + Add field. + * + Widen field type. + * + Change primitive type to primitive wrapper class. + * + Add or drop secondary key. + * + Any incompatible class change. + * + * The last item, incompatible class changes, also correspond to a class + * version change. + * + * For each distinct class format the following information is conceptually + * stored in the catalog, keyed by format ID. + * + * - Class name + * - Class version number + * - Superclass format + * - Kind: simple, enum, complex, array + * - For kind == simple: + * - Primitive class + * - For kind == enum: + * - Array of constant names, sorted by name. + * - For kind == complex: + * - Primary key fieldInfo, or null if no primary key is declared + * - Array of secondary key fieldInfo, sorted by field name + * - Array of other fieldInfo, sorted by field name + * - For kind == array: + * - Component class format + * - Number of array dimensions + * - Other metadata for RawType + * + * Where fieldInfo is: + * - Field name + * - Field class + * - Other metadata for RawField + * + * Data Layout + * ----------- + * For each entity instance the data layout is as follows: + * + * instanceData: formatId keyFields... nonKeyFields... + * keyFields: fieldValue... + * nonKeyFields: fieldValue... + * + * The formatId is the (positive non-zero) ID of a class format, defined above. + * This is ID of the most derived class of the instance. It is stored as a + * packed integer. + * + * Following the format ID, zero or more sets of secondary key field values + * appear, followed by zero or more sets of other class field values. + * + * The keyFields are the sets of secondary key fields for each class in order + * of the highest superclass first. Within a class, fields are ordered by + * field name. + * + * The nonKeyFields are the sets of other non-key fields for each class in + * order of the highest superclass first. Within a class, fields are ordered + * by field name. + * + * A field value is: + * + * fieldValue: primitiveValue + * | nullId + * | instanceRef + * | instanceData + * | simpleValue + * | enumValue + * | arrayValue + * + * For a primitive type, a primitive value is used as defined for tuple + * bindings. For float and double, sorted float and sorted double tuple values + * are used. + * + * For a non-primitive type with a null value, a nullId is used that has a zero + * (illegal formatId) value. This includes String and other simple reference + * types. The formatId is stored as a packed integer, meaning that it is + * stored as a single zero byte. + * + * For a non-primitive type, an instanceRef is used for a non-null instance + * that appears earlier in the data byte array. An instanceRef is the negation + * of the byte offset of the instanceData that appears earlier. It is stored + * as a packed integer. + * + * The remaining rules apply only to reference types with non-null values that + * do not appear earlier in the data array. + * + * For an array type, an array formatId is used that identifies the component + * type and the number of array dimensions. This is followed by an array + * length (stored as a packed integer) and zero or more fieldValue elements. + * For an array with N+1 dimensions where N is greater than zero, the leftmost + * dimension is enumerated such that each fieldValue element is itself an array + * of N dimensions or null. + * + * arrayValue: formatId length fieldValue... + * + * For an enum type, an enumValue is used, consisting of a formatId that + * identifies the enum class and an enumIndex (stored as a packed integer) that + * identifies the constant name in the enum constant array of the enum class + * format: + * + * enumValue: formatId enumIndex + * + * For a simple type, a simpleValue is used. This consists of the formatId + * that identifies the class followed by the simple type value. For a + * primitive wrapper type the simple type value is the corresponding primitive, + * for a Date it is the milliseconds as a long primitive, and for BigInteger or + * BigDecimal it is a byte array as defined for tuple bindings of these types. + * + * simpleValue: formatId value + * + * For all other complex types, an instanceData is used, which is defined + * above. + * + * Secondary Keys + * -------------- + * For secondary key support we must account for writing and nullifying + * specific keys. Rather than instantiating the entity and then performing + * the secondary key operation, we strive to perform the secondary key + * operation directly on the byte format. + * + * To create a secondary key we skip over other fields and then copy the bytes + * of the embedded key. This approach is very efficient because a) the entity + * is not instantiated, and b) the secondary keys are stored at the beginning + * of the byte format and can be quickly read. + * + * To nullify we currently instantiate the raw entity, set the key field to null + * (or remove it from the array/collection), and convert the raw entity back to + * bytes. Although the performance of this approach is not ideal because it + * requires serialization, it avoids the complexity of modifying the packed + * serialized format directly, adjusting references to key objects, etc. Plus, + * when we nullify a key we are going to write the record, so the serialization + * overhead may not be significant. For the record, I tried implementing + * nullification of the bytes directly and found it was much too complex. + * + * Lifecycle + * --------- + * Format are managed by a Catalog class. Simple formats are managed by + * SimpleCatalog, and are copied from the SimpleCatalog by PersistCatalog. + * Other formats are managed by PersistCatalog. The lifecycle of a format + * instance is: + * + * - Constructed by the catalog when a format is requested for a Class + * that currently has no associated format. + * + * - The catalog calls setId() and adds the format to its format list + * (indexed by format id) and map (keyed by class name). + * + * - The catalog calls collectRelatedFormats(), where a format can create + * additional formats that it needs, or that should also be persistent. + * + * - The catalog calls initializeIfNeeded(), which calls the initialize() + * method of the format class. + * + * - initialize() should initialize any transient fields in the format. + * initialize() can assume that all related formats are available in the + * catalog. It may call initializeIfNeeded() for those related formats, if + * it needs to interact with an initialized related format; this does not + * cause a cycle, because initializeIfNeeded() does nothing for an already + * initialized format. + * + * - The catalog creates a group of related formats at one time, and then + * writes its entire list of formats to the catalog DB as a single record. + * This grouping reduces the number of writes. + * + * - When a catalog is opened and the list of existing formats is read. After + * a format is deserialized, its initializeIfNeeded() method is called. + * setId() and collectRelatedFormats() are not called, since the ID and + * related formats are stored in serialized fields. + * + * - There are two modes for opening an existing catalog: raw mode and normal + * mode. In raw mode, the old format is used regardless of whether it + * matches the current class definition; in fact the class is not accessed + * and does not need to be present. + * + * - In normal mode, for each existing format that is initialized, a new format + * is also created based on the current class and metadata definition. If + * the two formats are equal, the new format is discarded. If they are + * unequal, the new format becomes the current format and the old format's + * evolve() method is called. evolve() is responsible for adjusting the + * old format for class evolution. Any number of non-current formats may + * exist for a given class, and are setup to evolve the single current format + * for the class. + * + * @author Mark Hayes + */ +public abstract class Format implements Reader, RawType, Serializable { + + private static final long serialVersionUID = 545633644568489850L; + + /** Null reference. */ + static final int ID_NULL = 0; + /** Object */ + static final int ID_OBJECT = 1; + /** Boolean */ + static final int ID_BOOL = 2; + static final int ID_BOOL_W = 3; + /** Byte */ + static final int ID_BYTE = 4; + static final int ID_BYTE_W = 5; + /** Short */ + static final int ID_SHORT = 6; + static final int ID_SHORT_W = 7; + /** Integer */ + static final int ID_INT = 8; + static final int ID_INT_W = 9; + /** Long */ + static final int ID_LONG = 10; + static final int ID_LONG_W = 11; + /** Float */ + static final int ID_FLOAT = 12; + static final int ID_FLOAT_W = 13; + /** Double */ + static final int ID_DOUBLE = 14; + static final int ID_DOUBLE_W = 15; + /** Character */ + static final int ID_CHAR = 16; + static final int ID_CHAR_W = 17; + /** String */ + static final int ID_STRING = 18; + /** BigInteger */ + static final int ID_BIGINT = 19; + /** BigDecimal */ + static final int ID_BIGDEC = 20; + /** Date */ + static final int ID_DATE = 21; + /** Number */ + static final int ID_NUMBER = 22; + + /** First simple type. */ + static final int ID_SIMPLE_MIN = 2; + /** Last simple type. */ + static final int ID_SIMPLE_MAX = 21; + /** Last predefined ID, after which dynamic IDs are assigned. */ + static final int ID_PREDEFINED = 30; + + static boolean isPredefined(Format format) { + return format.getId() <= ID_PREDEFINED; + } + + private int id; + private String className; + private Reader reader; + private Format superFormat; + private Format latestFormat; + private Format previousFormat; + private Set supertypes; + private boolean deleted; + private boolean unused; + private transient Catalog catalog; + private transient Class type; + private transient Format proxiedFormat; + private transient boolean initialized; + + /** + * Creates a new format for a given class. + */ + Format(final Catalog catalog, final Class type) { + this(catalog, type.getName()); + this.type = type; + addSupertypes(); + } + + /** + * Creates a format for class evolution when no class may be present. + */ + Format(final Catalog catalog, final String className) { + assert catalog != null; + assert className != null; + this.catalog = catalog; + this.className = className; + latestFormat = this; + supertypes = new HashSet(); + } + + /** + * Special handling for JE 3.0.12 beta formats. + */ + void migrateFromBeta(Map formatMap) { + if (latestFormat == null) { + latestFormat = this; + } + } + + /** + * Initialize transient catalog field after deserialization. This must + * occur before any other usage. + */ + void initCatalog(final Catalog catalog) { + assert catalog != null; + this.catalog = catalog; + } + + final boolean isNew() { + return id == 0; + } + + final Catalog getCatalog() { + return catalog; + } + + /** + * Returns the format ID. + */ + public final int getId() { + return id; + } + + /** + * Called by the Catalog to set the format ID when a new format is added to + * the format list, before calling initializeIfNeeded(). + */ + final void setId(int id) { + this.id = id; + } + + /** + * Returns the class that this format represents. This method will return + * null in rawAccess mode, or for an unevolved format. + */ + final Class getType() { + return type; + } + + /** + * Called to get the type when it is known to exist for an uninitialized + * format. + */ + final Class getExistingType() { + assert catalog != null; + if (type == null) { + try { + type = catalog.resolveClass(className); + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedException(e); + } + } + return type; + } + + /** + * Returns the object for reading objects of the latest format. For the + * latest version format, 'this' is returned. For prior version formats, a + * reader that converts this version to the latest version is returned. + */ + final Reader getReader() { + + /* + * For unit testing, record whether any un-evolved formats are + * encountered. + */ + if (this != reader) { + PersistCatalog.unevolvedFormatsEncountered = true; + } + + return reader; + } + + /** + * Changes the reader during format evolution. + */ + final void setReader(Reader reader) { + this.reader = reader; + } + + /** + * Returns the format of the superclass. + */ + final Format getSuperFormat() { + return superFormat; + } + + /** + * Called to set the format of the superclass during initialize(). + */ + final void setSuperFormat(Format superFormat) { + this.superFormat = superFormat; + } + + /** + * Returns the format that is proxied by this format. If non-null is + * returned, then this format is a PersistentProxy. + */ + final Format getProxiedFormat() { + return proxiedFormat; + } + + /** + * Called by ProxiedFormat to set the proxied format. + */ + final void setProxiedFormat(Format proxiedFormat) { + this.proxiedFormat = proxiedFormat; + } + + /** + * If this is the latest/evolved format, returns this; otherwise, returns + * the current version of this format. Note that this WILL return a + * format for a deleted class if the latest format happens to be deleted. + */ + final Format getLatestVersion() { + return latestFormat; + } + + /** + * Returns the previous version of this format in the linked list of + * versions, or null if this is the only version. + */ + public final Format getPreviousVersion() { + return previousFormat; + } + + /** + * Called by Evolver to set the latest format when this old format is + * evolved. + */ + final void setLatestVersion(Format newFormat) { + + /* + * If this old format is the former latest version, link it to the new + * latest version. This creates a singly linked list of versions + * starting with the latest. + */ + if (latestFormat == this) { + newFormat.previousFormat = this; + } + + latestFormat = newFormat; + } + + /** + * Returns whether the class for this format was deleted. + */ + public final boolean isDeleted() { + return deleted; + } + + /** + * Called by the Evolver when applying a Deleter mutation. + */ + final void setDeleted(boolean deleted) { + this.deleted = deleted; + } + + /** + * Called by the Evolver for a format that is never referenced. + */ + final void setUnused(boolean unused) { + this.unused = unused; + } + + /** + * Called by the Evolver with true when an entity format or any of its + * nested format were changed. Called by Store.evolve when an entity has + * been fully converted. Overridden by ComplexFormat. + */ + void setEvolveNeeded(boolean needed) { + throw DbCompat.unexpectedState(); + } + + /** + * Overridden by ComplexFormat. + */ + boolean getEvolveNeeded() { + throw DbCompat.unexpectedState(); + } + + /** + * For an entity format, returns whether the entity was written using the + * new String format. For a non-entity format, this method should not be + * called. + * + * Overridden by ComplexFormat. + */ + boolean getNewStringFormat() { + throw DbCompat.unexpectedState(); + } + + final boolean isInitialized() { + return initialized; + } + + /** + * Called by the Catalog to initialize a format, and may also be called + * during initialize() for a related format to ensure that the related + * format is initialized. This latter case is allowed to support + * bidirectional dependencies. This method will do nothing if the format + * is already intialized. + */ + final void initializeIfNeeded(Catalog catalog, EntityModel model) { + assert catalog != null; + + if (!initialized) { + initialized = true; + this.catalog = catalog; + + /* Initialize objects serialized by an older Format class. */ + if (latestFormat == null) { + latestFormat = this; + } + if (reader == null) { + reader = this; + } + + /* + * The class is only guaranteed to be available in live (not raw) + * mode, for the current version of the format. + */ + if (type == null && + isCurrentVersion() && + (isSimple() || !catalog.isRawAccess())) { + getExistingType(); + } + + /* Perform subclass-specific initialization. */ + initialize(catalog, model, + catalog.getInitVersion(this, false /*forReader*/)); + reader.initializeReader + (catalog, model, + catalog.getInitVersion(this, true /*forReader*/), + this); + } + } + + /** + * Called to initialize a separate Reader implementation. This method is + * called when no separate Reader exists, and does nothing. + */ + public void initializeReader(Catalog catalog, + EntityModel model, + int initVersion, + Format oldFormat) { + } + + /** + * Adds all interfaces and superclasses to the supertypes set. + */ + private void addSupertypes() { + addInterfaces(type); + Class stype = type.getSuperclass(); + while (stype != null && stype != Object.class) { + supertypes.add(stype.getName()); + addInterfaces(stype); + stype = stype.getSuperclass(); + } + } + + /** + * Recursively adds interfaces to the supertypes set. + */ + private void addInterfaces(Class cls) { + Class[] interfaces = cls.getInterfaces(); + for (Class iface : interfaces) { + if (iface != Enhanced.class) { + supertypes.add(iface.getName()); + addInterfaces(iface); + } + } + } + + /** + * Certain formats (ProxiedFormat for example) prohibit nested fields that + * reference the parent object. [#15815] + */ + boolean areNestedRefsProhibited() { + return false; + } + + /* -- Start of RawType interface methods. -- */ + + public String getClassName() { + return className; + } + + public int getVersion() { + ClassMetadata meta = getClassMetadata(); + if (meta != null) { + return meta.getVersion(); + } else { + return 0; + } + } + + public Format getSuperType() { + return superFormat; + } + + /* -- RawType methods that are overridden as needed in subclasses. -- */ + + public boolean isSimple() { + return false; + } + + public boolean isPrimitive() { + return false; + } + + public boolean isEnum() { + return false; + } + + public List getEnumConstants() { + return null; + } + + public boolean isArray() { + return false; + } + + public int getDimensions() { + return 0; + } + + public Format getComponentType() { + return null; + } + + public Map getFields() { + return null; + } + + public ClassMetadata getClassMetadata() { + return null; + } + + public EntityMetadata getEntityMetadata() { + return null; + } + + /* -- End of RawType methods. -- */ + + /* -- Methods that may optionally be overridden by subclasses. -- */ + + /** + * Called by EntityOutput in rawAccess mode to determine whether an object + * type is allowed to be assigned to a given field type. + */ + boolean isAssignableTo(Format format) { + if (proxiedFormat != null) { + return proxiedFormat.isAssignableTo(format); + } else { + return format == this || + format.id == ID_OBJECT || + supertypes.contains(format.className); + } + } + + /** + * For primitive types only, returns their associated wrapper type. + */ + Format getWrapperFormat() { + return null; + } + + /** + * Returns whether this format class is an entity class. + */ + boolean isEntity() { + return false; + } + + /** + * Returns whether this class is present in the EntityModel. Returns false + * for a simple type, array type, or enum type. + */ + boolean isModelClass() { + return false; + } + + /** + * For an entity class or subclass, returns the base entity class; returns + * null in other cases. + */ + ComplexFormat getEntityFormat() { + return null; + } + + /** + * Called for an existing format that may not equal the current format for + * the same class. + * + *

        If this method returns true, then it must have determined one of two + * things: + * - that the old and new formats are equal, and it must have called + * Evolver.useOldFormat; or + * - that the old format can be evolved to the new format, and it must + * have called Evolver.useEvolvedFormat.

        + * + *

        If this method returns false, then it must have determined that the + * old format could not be evolved to the new format, and it must have + * called Evolver.addInvalidMutation, addMissingMutation or + * addEvolveError.

        + */ + abstract boolean evolve(Format newFormat, Evolver evolver); + + /** + * Called when a Converter handles evolution of a class, but we may still + * need to evolve the metadata. + */ + boolean evolveMetadata(Format newFormat, + Converter converter, + Evolver evolver) { + return true; + } + + /** + * Returns whether this format is the current format for its class. If + * false is returned, this format is setup to evolve to the current format. + */ + final boolean isCurrentVersion() { + return latestFormat == this && !deleted; + } + + /** + * Returns whether this format has the same class as the given format, + * irrespective of version changes and renaming. + */ + final boolean isSameClass(Format other) { + return latestFormat == other.latestFormat; + } + + /* -- Abstract methods that must be implemented by subclasses. -- */ + + /** + * Initializes an uninitialized format, initializing its related formats + * (superclass formats and array component formats) first. + */ + abstract void initialize(Catalog catalog, + EntityModel model, + int initVersion); + + /** + * Calls catalog.createFormat for formats that this format depends on, or + * that should also be persistent. + */ + abstract void collectRelatedFormats(Catalog catalog, + Map newFormats); + + /* + * The remaining methods are used to read objects from data bytes via + * EntityInput, and to write objects as data bytes via EntityOutput. + * Ultimately these methods call methods in the Accessor interface to + * get/set fields in the object. Most methods have a rawAccess parameter + * that determines whether the object is a raw object or a real persistent + * object. + * + * The first group of methods are abstract and must be implemented by + * format classes. The second group have default implementations that + * throw UnsupportedOperationException and may optionally be overridden. + */ + + /** + * Creates an array of the format's class of the given length, as if + * Array.newInstance(getType(), len) were called. Formats implement this + * method for specific classes, or call the accessor, to avoid the + * reflection overhead of Array.newInstance. + */ + abstract Object newArray(int len); + + /** + * Creates a new instance of the target class using its default + * constructor. Normally this creates an empty object, and readObject() is + * called next to fill in the contents. This is done in two steps to allow + * the instance to be registered by EntityInput before reading the + * contents. This allows the fields in an object or a nested object to + * refer to the parent object in a graph. + * + * Alternatively, this method may read all or the first portion of the + * data, rather than that being done by readObject(). This is required for + * simple types and enums, where the object cannot be created without + * reading the data. In these cases, there is no possibility that the + * parent object will be referenced by the child object in the graph. It + * should not be done in other cases, or the graph references may not be + * maintained faithfully. + * + * Is public only in order to implement the Reader interface. Note that + * this method should only be called directly in raw conversion mode or + * during conversion of an old format. Normally it should be called via + * the getReader method and the Reader interface. + */ + public abstract Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException; + + /** + * Called after newInstance() to read the rest of the data bytes and fill + * in the object contents. If the object was read completely by + * newInstance(), this method does nothing. + * + * Is public only in order to implement the Reader interface. Note that + * this method should only be called directly in raw conversion mode or + * during conversion of an old format. Normally it should be called via + * the getReader method and the Reader interface. + */ + public abstract Object readObject(Object o, + EntityInput input, + boolean rawAccess) + throws RefreshException; + + /** + * Writes a given instance of the target class to the output data bytes. + * This is the complement of the newInstance()/readObject() pair. + */ + abstract void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException; + + /** + * Skips over the object's contents, as if readObject() were called, but + * without returning an object. Used for extracting secondary key bytes + * without having to instantiate the object. For reference types, the + * format ID is read just before calling this method, so this method is + * responsible for skipping everything following the format ID. + */ + abstract void skipContents(RecordInput input) + throws RefreshException; + + /* -- More methods that may optionally be overridden by subclasses. -- */ + + /** + * When extracting a secondary key, called to skip over all fields up to + * the given secondary key field. Returns the format of the key field + * found, or null if the field is not present (nullified) in the object. + */ + Format skipToSecKey(RecordInput input, String keyName) + throws RefreshException { + + throw DbCompat.unexpectedState(toString()); + } + + /** + * Called after skipToSecKey() to copy the data bytes of a singular + * (XXX_TO_ONE) key field. + */ + void copySecKey(RecordInput input, RecordOutput output) { + throw DbCompat.unexpectedState(toString()); + } + + /** + * Called after skipToSecKey() to copy the data bytes of an array or + * collection (XXX_TO_MANY) key field. + */ + void copySecMultiKey(RecordInput input, Format keyFormat, Set results) + throws RefreshException { + + throw DbCompat.unexpectedState(toString()); + } + + /** + * Nullifies the given key field in the given RawObject -- rawAccess mode + * is implied. + */ + boolean nullifySecKey(Catalog catalog, + Object entity, + String keyName, + Object keyElement) { + throw DbCompat.unexpectedState(toString()); + } + + /** + * Returns whether the entity's primary key field is null or zero, as + * defined for primary keys that are assigned from a sequence. + */ + boolean isPriKeyNullOrZero(Object o, boolean rawAccess) { + throw DbCompat.unexpectedState(toString()); + } + + /** + * Gets the primary key field from the given object and writes it to the + * given output data bytes. This is a separate operation because the + * primary key data bytes are stored separately from the rest of the + * record. + */ + void writePriKey(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + throw DbCompat.unexpectedState(toString()); + } + + /** + * Reads the primary key from the given input bytes and sets the primary + * key field in the given object. This is complement of writePriKey(). + * + * Is public only in order to implement the Reader interface. Note that + * this method should only be called directly in raw conversion mode or + * during conversion of an old format. Normally it should be called via + * the getReader method and the Reader interface. + */ + public void readPriKey(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + throw DbCompat.unexpectedState(toString()); + } + + /** + * For an entity class or subclass, returns the old key name for the given + * key name that has been renamed, or returns the given key name if it has + * not been renamed. + */ + public String getOldKeyName(final String keyName) { + throw DbCompat.unexpectedState(toString()); + } + + /** + * Validates and returns the simple integer key format for a sequence key + * associated with this format. + * + * For a composite key type, the format of the one and only field is + * returned. For a simple integer type, this format is returned. + * Otherwise (the default implementation), an IllegalArgumentException is + * thrown. + */ + Format getSequenceKeyFormat() { + throw new IllegalArgumentException + ("Type not allowed for sequence: " + getClassName()); + } + + /** + * Converts a RawObject to a current class object and adds the converted + * pair to the converted map. + */ + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + throw DbCompat.unexpectedState(toString()); + } + + /** + * Currently, only FBigDec will return true. It is a workaround for reading + * the BigDecimal data stored by BigDecimal proxy before je4.1. + */ + public boolean allowEvolveFromProxy() { + return false; + } + + public Accessor getAccessor(boolean rawAccess) { + return null; + } + + @Override + public String toString() { + final String INDENT = " "; + final String INDENT2 = INDENT + " "; + StringBuilder buf = new StringBuilder(500); + if (isSimple()) { + addTypeHeader(buf, "SimpleType"); + buf.append(" primitive=\""); + buf.append(isPrimitive()); + buf.append("\"/>\n"); + } else if (isEnum()) { + addTypeHeader(buf, "EnumType"); + buf.append(">\n"); + for (String constant : getEnumConstants()) { + buf.append(INDENT); + buf.append(""); + buf.append(constant); + buf.append("\n"); + } + buf.append("\n"); + } else if (isArray()) { + addTypeHeader(buf, "ArrayType"); + buf.append(" componentId=\""); + buf.append(getComponentType().getVersion()); + buf.append("\" componentClass=\""); + buf.append(getComponentType().getClassName()); + buf.append("\" dimensions=\""); + buf.append(getDimensions()); + buf.append("\"/>\n"); + } else { + addTypeHeader(buf, "ComplexType"); + Format superType = getSuperType(); + if (superType != null) { + buf.append(" superTypeId=\""); + buf.append(superType.getId()); + buf.append("\" superTypeClass=\""); + buf.append(superType.getClassName()); + buf.append('"'); + } + Format proxiedFormat = getProxiedFormat(); + if (proxiedFormat != null) { + buf.append(" proxiedTypeId=\""); + buf.append(proxiedFormat.getId()); + buf.append("\" proxiedTypeClass=\""); + buf.append(proxiedFormat.getClassName()); + buf.append('"'); + } + PrimaryKeyMetadata priMeta = null; + Map secondaryKeys = null; + List compositeKeyFields = null; + ClassMetadata clsMeta = getClassMetadata(); + if (clsMeta != null) { + compositeKeyFields = clsMeta.getCompositeKeyFields(); + priMeta = clsMeta.getPrimaryKey(); + secondaryKeys = clsMeta.getSecondaryKeys(); + } + buf.append(" kind=\""); + buf.append(isEntity() ? "entity" : + ((compositeKeyFields != null) ? "compositeKey" : + "persistent")); + buf.append("\">\n"); + Map fields = getFields(); + if (fields != null) { + for (RawField field : fields.values()) { + String name = field.getName(); + RawType type = field.getType(); + buf.append(INDENT); + buf.append("\n"); + } + EntityMetadata entMeta = getEntityMetadata(); + if (entMeta != null) { + buf.append(INDENT); + buf.append("\n"); + priMeta = entMeta.getPrimaryKey(); + if (priMeta != null) { + buf.append(INDENT2); + buf.append("\n"); + } + secondaryKeys = entMeta.getSecondaryKeys(); + if (secondaryKeys != null) { + for (SecondaryKeyMetadata secMeta : + secondaryKeys.values()) { + buf.append(INDENT2); + buf.append("\n"); + } + } + buf.append("\n"); + } + } + buf.append("\n"); + } + return buf.toString(); + } + + private void addTypeHeader(StringBuilder buf, String elemName) { + buf.append('<'); + buf.append(elemName); + buf.append(" id=\""); + buf.append(getId()); + buf.append("\" class=\""); + buf.append(getClassName()); + buf.append("\" version=\""); + buf.append(getVersion()); + buf.append('"'); + Format currVersion = getLatestVersion(); + if (currVersion != null) { + buf.append(" currentVersionId=\""); + buf.append(currVersion.getId()); + buf.append('"'); + } + Format prevVersion = getPreviousVersion(); + if (prevVersion != null) { + buf.append(" previousVersionId=\""); + buf.append(prevVersion.getId()); + buf.append('"'); + } + } +} diff --git a/src/com/sleepycat/persist/impl/KeyLocation.java b/src/com/sleepycat/persist/impl/KeyLocation.java new file mode 100644 index 0000000..d093e7f --- /dev/null +++ b/src/com/sleepycat/persist/impl/KeyLocation.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +/** + * Holder for the input and format of a key. Used when copying secondary keys. + * Returned by RecordInput.getKeyLocation(). + * + * @author Mark Hayes + */ +class KeyLocation { + + RecordInput input; + Format format; + + KeyLocation(RecordInput input, Format format) { + this.input = input; + this.format = format; + } +} diff --git a/src/com/sleepycat/persist/impl/MapProxy.java b/src/com/sleepycat/persist/impl/MapProxy.java new file mode 100644 index 0000000..e9bcdbf --- /dev/null +++ b/src/com/sleepycat/persist/impl/MapProxy.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.TreeMap; + +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; + +/** + * Proxy for a Map. + * + * @author Mark Hayes + */ +@Persistent +abstract class MapProxy implements PersistentProxy> { + + private K[] keys; + private V[] values; + + protected MapProxy() {} + + public final void initializeProxy(Map map) { + int size = map.size(); + keys = (K[]) new Object[size]; + values = (V[]) new Object[size]; + int i = 0; + for (Map.Entry entry : map.entrySet()) { + keys[i] = entry.getKey(); + values[i] = entry.getValue(); + i += 1; + } + } + + public final Map convertProxy() { + int size = values.length; + Map map = newInstance(size); + for (int i = 0; i < size; i += 1) { + map.put(keys[i], values[i]); + } + return map; + } + + protected abstract Map newInstance(int size); + + @Persistent(proxyFor=HashMap.class) + static class HashMapProxy extends MapProxy { + + protected HashMapProxy() {} + + protected Map newInstance(int size) { + return new HashMap(size); + } + } + + @Persistent(proxyFor=TreeMap.class) + static class TreeMapProxy extends MapProxy { + + protected TreeMapProxy() {} + + protected Map newInstance(int size) { + return new TreeMap(); + } + } + + @Persistent(proxyFor=LinkedHashMap.class) + static class LinkedHashMapProxy extends MapProxy { + + protected LinkedHashMapProxy() {} + + protected Map newInstance(int size) { + return new LinkedHashMap(); + } + } +} diff --git a/src/com/sleepycat/persist/impl/NonPersistentFormat.java b/src/com/sleepycat/persist/impl/NonPersistentFormat.java new file mode 100644 index 0000000..d626ac8 --- /dev/null +++ b/src/com/sleepycat/persist/impl/NonPersistentFormat.java @@ -0,0 +1,76 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.util.Map; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.model.EntityModel; + +/** + * Format for a non-persistent class that is only used for declared field + * types and arrays. Currently used only for Object and interface types. + * + * @author Mark Hayes + */ +class NonPersistentFormat extends Format { + + private static final long serialVersionUID = -7488355830875148784L; + + NonPersistentFormat(Catalog catalog, Class type) { + super(catalog, type); + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + } + + @Override + Object newArray(int len) { + return Array.newInstance(getType(), len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) { + throw DbCompat.unexpectedState + ("Cannot instantiate non-persistent class: " + getClassName()); + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) { + throw DbCompat.unexpectedState(); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + throw DbCompat.unexpectedState(); + } + + @Override + void skipContents(RecordInput input) { + throw DbCompat.unexpectedState(); + } + + @Override + boolean evolve(Format newFormat, Evolver evolver) { + evolver.useOldFormat(this, newFormat); + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/ObjectArrayFormat.java b/src/com/sleepycat/persist/impl/ObjectArrayFormat.java new file mode 100644 index 0000000..d4a53bf --- /dev/null +++ b/src/com/sleepycat/persist/impl/ObjectArrayFormat.java @@ -0,0 +1,228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawObject; + +/** + * An array of objects having a specified number of dimensions. All + * multidimensional arrays are handled by this class, since even a primitive + * array of more than one dimension is an array of objects, where the component + * objects may be primitive arrays. The {@link PrimitiveArrayFormat} class + * handles primitive arrays of one dimension only. + * + * In this class, and {@link PrimitiveArrayFormat}, we resort to using + * reflection to allocate multidimensional arrays. If there is a need for it, + * reflection could be avoided in the future by generating code as new array + * formats are encountered. + * + * @author Mark Hayes + */ +public class ObjectArrayFormat extends Format { + + private static final long serialVersionUID = 4317004346690441892L; + + private Format componentFormat; + private int nDimensions; + private transient Format useComponentFormat; + + ObjectArrayFormat(Catalog catalog, Class type) { + super(catalog, type); + String name = getClassName(); + for (nDimensions = 0; + name.charAt(nDimensions) == '['; + nDimensions += 1) { + } + } + + @Override + public boolean isArray() { + return true; + } + + @Override + public int getDimensions() { + return nDimensions; + } + + @Override + public Format getComponentType() { + return (useComponentFormat != null) ? + useComponentFormat : componentFormat; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + Class cls = getType().getComponentType(); + catalog.createFormat(cls, newFormats); + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + /* Set the component format for a new (never initialized) format. */ + if (componentFormat == null) { + Class cls = getType().getComponentType(); + componentFormat = catalog.getFormat(cls.getName()); + } + useComponentFormat = componentFormat.getLatestVersion(); + } + + @Override + boolean isAssignableTo(Format format) { + if (super.isAssignableTo(format)) { + return true; + } + if (format instanceof ObjectArrayFormat) { + ObjectArrayFormat other = (ObjectArrayFormat) format; + if (useComponentFormat.isAssignableTo(other.useComponentFormat)) { + return true; + } + } + return false; + } + + @Override + Object newArray(int len) { + return Array.newInstance(getType(), len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) { + int len = input.readArrayLength(); + if (rawAccess) { + return new RawObject(this, new Object[len]); + } else { + return useComponentFormat.newArray(len); + } + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + Object[] a; + if (rawAccess) { + a = ((RawObject) o).getElements(); + } else { + a = (Object[]) o; + } + if (useComponentFormat.getId() == Format.ID_STRING) { + for (int i = 0; i < a.length; i += 1) { + a[i] = input.readStringObject(); + } + } else { + for (int i = 0; i < a.length; i += 1) { + a[i] = input.readObject(); + } + } + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + Object[] a; + if (rawAccess) { + a = ((RawObject) o).getElements(); + } else { + a = (Object[]) o; + } + output.writeArrayLength(a.length); + if (useComponentFormat.getId() == Format.ID_STRING) { + for (int i = 0; i < a.length; i += 1) { + output.writeString((String)a[i]); + } + } else { + for (int i = 0; i < a.length; i += 1) { + output.writeObject(a[i], useComponentFormat); + } + } + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + RawArrayInput input = new RawArrayInput + (catalog, rawAccess, converted, rawObject, useComponentFormat); + Object a = newInstance(input, rawAccess); + converted.put(rawObject, a); + return readObject(a, input, rawAccess); + } + + @Override + void skipContents(RecordInput input) + throws RefreshException { + + int len = input.readPackedInt(); + for (int i = 0; i < len; i += 1) { + input.skipField(useComponentFormat); + } + } + + @Override + void copySecMultiKey(RecordInput input, Format keyFormat, Set results) + throws RefreshException { + + int len = input.readPackedInt(); + for (int i = 0; i < len; i += 1) { + KeyLocation loc = input.getKeyLocation(useComponentFormat); + if (loc == null) { + throw new IllegalArgumentException + ("Secondary key values in array may not be null"); + } + if (loc.format != useComponentFormat) { + throw DbCompat.unexpectedState + (useComponentFormat.getClassName()); + } + int off1 = loc.input.getBufferOffset(); + useComponentFormat.skipContents(loc.input); + int off2 = loc.input.getBufferOffset(); + DatabaseEntry entry = new DatabaseEntry + (loc.input.getBufferBytes(), off1, off2 - off1); + results.add(entry); + } + } + + @Override + boolean evolve(Format newFormat, Evolver evolver) { + + /* + * When the class name of the component changes, we need a new format + * that references it. Otherwise, don't propogate changes from + * components upward to their arrays. + */ + Format latest = componentFormat.getLatestVersion(); + if (latest != componentFormat && + !latest.getClassName().equals(componentFormat.getClassName())) { + evolver.useEvolvedFormat(this, newFormat, newFormat); + } else { + evolver.useOldFormat(this, newFormat); + } + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/PersistCatalog.java b/src/com/sleepycat/persist/impl/PersistCatalog.java new file mode 100644 index 0000000..42fb6cc --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistCatalog.java @@ -0,0 +1,1429 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.rep.ReplicaWriteException; +/* */ +import com.sleepycat.persist.DatabaseNamer; +import com.sleepycat.persist.StoreExistsException; +import com.sleepycat.persist.StoreNotFoundException; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.ModelInternal; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * The catalog of class formats for a store, along with its associated model + * and mutations. + * + * @author Mark Hayes + */ +public class PersistCatalog implements Catalog { + + private static final int MAX_TXN_RETRIES = 10; + + /** + * Key to Data record in the catalog database. In the JE 3.0.12 beta + * version the formatList record is stored under this key and is converted + * to a Data object when it is read. + */ + private static final byte[] DATA_KEY = getIntBytes(-1); + + /** + * Key to a JE 3.0.12 beta version mutations record in the catalog + * database. This record is no longer used because mutations are stored in + * the Data record and is deleted when the beta version is detected. + */ + private static final byte[] BETA_MUTATIONS_KEY = getIntBytes(-2); + + private static byte[] getIntBytes(int val) { + DatabaseEntry entry = new DatabaseEntry(); + IntegerBinding.intToEntry(val, entry); + assert entry.getSize() == 4 && entry.getData().length == 4; + return entry.getData(); + } + + /** + * Used by unit tests. + */ + public static boolean expectNoClassChanges; + public static boolean unevolvedFormatsEncountered; + + /** + * The object stored under DATA_KEY in the catalog database. + */ + private static class Data implements Serializable { + + static final long serialVersionUID = 7515058069137413261L; + + List formatList; + Mutations mutations; + int version; + } + + /** + * A list of all formats indexed by formatId. Element zero is unused and + * null, since IDs start at one; this avoids adjusting the ID to index the + * list. Some elements are null to account for predefined IDs that are not + * used. + * + *

        This field, like formatMap, is volatile because it is reassigned + * when dynamically adding new formats. See {@link #addNewFormat}.

        + */ + private volatile List formatList; + + /** + * A map of the current/live formats in formatList, indexed by class name. + * + *

        This field, like formatList, is volatile because it is reassigned + * when dynamically adding new formats. See {@link #addNewFormat}.

        + */ + private volatile Map formatMap; + + /** + * A map of the latest formats (includes deleted formats) in formatList, + * indexed by class name. + * + *

        This field, like formatMap, is volatile because it is reassigned + * when dynamically adding new formats. See {@link #addNewFormat}.

        + */ + private volatile Map latestFormatMap; + + /** + * A temporary map of proxied class name to proxy class name. Used during + * catalog creation, and then set to null. This map is used to force proxy + * formats to be created prior to proxied formats. [#14665] + */ + private Map proxyClassMap; + + private final Environment env; + private final boolean rawAccess; + private EntityModel model; + private StoredModel storedModel; + private Mutations mutations; + private final Database db; + private int openCount; + private boolean readOnly; + private final boolean transactional; + + /** + * If a Replica is upgraded, local in-memory evolution may take place prior + * to the Master being upgraded. In that case, the size of the formatList + * will be greater than nStoredFormats. In this case, the readOnly + * state field will be set to true. We must be sure not to write the + * metadata in this state. [#16655] + */ + private volatile int nStoredFormats; + + /** + * The Store is normally present but may be null in unit tests (for + * example, BindingTest). + */ + private final Store store; + + /** + * The Evolver and catalog Data are non-null during catalog initialization, + * and null otherwise. + */ + private Evolver initEvolver; + private Data initData; + + /** + * Creates a new catalog, opening the database and reading it from a given + * catalog database if it already exists. All predefined formats and + * formats for the given model are added. For modified classes, old + * formats are defined based on the rules for compatible class changes and + * the given mutations. If any format is changed or added, and the + * database is not read-only, write the initialized catalog to the + * database. + */ + public PersistCatalog(final Environment env, + final String storePrefix, + final String dbName, + final DatabaseConfig dbConfig, + final EntityModel modelParam, + final Mutations mutationsParam, + final boolean rawAccess, + final Store store) + throws StoreExistsException, + StoreNotFoundException, + IncompatibleClassException, + DatabaseException { + + this.env = env; + this.rawAccess = rawAccess; + this.store = store; + this.transactional = dbConfig.getTransactional(); + + /* store may be null for testing. */ + String[] fileAndDbNames = (store != null) ? + store.parseDbName(dbName) : + Store.parseDbName(dbName, DatabaseNamer.DEFAULT); + + /* + * Use a null (auto-commit) transaction for opening the database, so + * that the database is opened even if a ReplicaWriteException occurs + * when attempting to evolve the metadata. We will close the database + * if another exception occurs in the finally statement below. + */ + db = DbCompat.openDatabase(env, null /*txn*/, fileAndDbNames[0], + fileAndDbNames[1], dbConfig); + if (db == null) { + String dbNameMsg = store.getDbNameMessage(fileAndDbNames); + if (dbConfig.getExclusiveCreate()) { + throw new StoreExistsException + ("Catalog DB already exists and ExclusiveCreate=true, " + + dbNameMsg); + } else { + assert !dbConfig.getAllowCreate(); + throw new StoreNotFoundException + ("Catalog DB does not exist and AllowCreate=false, " + + dbNameMsg); + } + } + openCount = 1; + boolean success = false; + try { + initAndRetry(storePrefix, modelParam, mutationsParam); + success = true; + } finally { + if (!success) { + close(); + } + } + } + + /** + * Creates a new catalog when a Replica refresh occurs. Uses some + * information from the old catalog directly in the new catalog, but all + * formats are created from scratch and class evolution is attempted. + */ + PersistCatalog(final PersistCatalog oldCatalog, final String storePrefix) + throws DatabaseException { + + db = oldCatalog.db; + store = oldCatalog.store; + env = oldCatalog.env; + rawAccess = oldCatalog.rawAccess; + openCount = oldCatalog.openCount; + transactional = oldCatalog.transactional; + + initAndRetry(storePrefix, oldCatalog.model, oldCatalog.mutations); + } + + private void initAndRetry(final String storePrefix, + final EntityModel modelParam, + final Mutations mutationsParam) + throws DatabaseException { + + for (int i = 0;; i += 1) { + Transaction txn = null; + if (transactional && DbCompat.getThreadTransaction(env) == null) { + txn = + env.beginTransaction(null, store.getAutoCommitTxnConfig()); + } + boolean success = false; + try { + init(txn, storePrefix, modelParam, mutationsParam); + success = true; + return; + } catch (LockConflictException e) { + + /* + * It is very unlikely that two threads opening the same + * EntityStore will cause a lock conflict. However, because we + * read-modify-update the catalog record, + * LockPreemptedException must be handled in a replicated JE + * environment. Since LockPreemptedException is a + * LockConfictException, it is simplest to retry when any + * LockConfictException occurs. + */ + if (i >= MAX_TXN_RETRIES) { + throw e; + } + continue; + } finally { + + /* + * If the catalog is read-only we abort rather than commit, + * because a ReplicaWriteException may have occurred. + * ReplicaWriteException invalidates the transaction, and there + * are no writes to commit anyway. [#16655] + */ + if (txn != null) { + if (success && !isReadOnly()) { + txn.commit(); + } else { + txn.abort(); + } + } + } + } + } + + private void init(final Transaction txn, + final String storePrefix, + final EntityModel modelParam, + final Mutations mutationsParam) + throws DatabaseException { + + try { + initData = readData(txn); + mutations = initData.mutations; + if (mutations == null) { + mutations = new Mutations(); + } + + /* + * When the beta version is detected, force a re-write of the + * catalog and disallow class changes. This brings the catalog up + * to date so that evolution can proceed correctly from then on. + */ + boolean betaVersion = (initData.version == BETA_VERSION); + boolean needWrite = betaVersion; + boolean disallowClassChanges = betaVersion; + + /* + * Store the given mutations if they are different from the stored + * mutations, and force evolution to apply the new mutations. + */ + boolean forceEvolution = false; + if (mutationsParam != null && + !mutations.equals(mutationsParam)) { + mutations = mutationsParam; + needWrite = true; + forceEvolution = true; + } + + final ClassLoader envClassLoader = DbCompat.getClassLoader(env); + + /* Get the existing format list, or copy it from SimpleCatalog. */ + formatList = initData.formatList; + if (formatList == null) { + formatList = SimpleCatalog.getAllSimpleFormats(envClassLoader); + + /* + * Special cases: Object and Number are predefined but are not + * simple types. + */ + Format format = new NonPersistentFormat(this, Object.class); + format.setId(Format.ID_OBJECT); + formatList.set(Format.ID_OBJECT, format); + format = new NonPersistentFormat(this, Number.class); + format.setId(Format.ID_NUMBER); + formatList.set(Format.ID_NUMBER, format); + } else { + /* Pick up any new predefined simple types. */ + if (SimpleCatalog.addMissingSimpleFormats(envClassLoader, + formatList)) { + needWrite = true; + } + nStoredFormats = formatList.size(); + } + + /* Initialize transient catalog field before further use. */ + for (Format format : formatList) { + if (format != null) { + format.initCatalog(this); + } + } + + /* Special handling for JE 3.0.12 beta formats. */ + if (betaVersion) { + Map formatMap = new HashMap(); + for (Format format : formatList) { + if (format != null) { + formatMap.put(format.getClassName(), format); + } + } + for (Format format : formatList) { + if (format != null) { + format.migrateFromBeta(formatMap); + } + } + } + + /* + * If we should not use the current model, initialize the stored + * model and return. + */ + formatMap = new HashMap(formatList.size()); + latestFormatMap = new HashMap(formatList.size()); + if (rawAccess) { + for (Format format : formatList) { + if (format != null) { + String name = format.getClassName(); + if (format.isCurrentVersion()) { + formatMap.put(name, format); + } + if (format == format.getLatestVersion()) { + latestFormatMap.put(name, format); + } + } + } + if (modelParam != null) { + model = modelParam; + storedModel = (StoredModel) modelParam; + } else { + storedModel = new StoredModel(this); + model = storedModel; + } + ModelInternal.setClassLoader(model, envClassLoader); + for (Format format : formatList) { + if (format != null) { + format.initializeIfNeeded(this, model); + } + } + initModelAndMutations(); + return; + } + + /* + * We are opening a store that uses the current model. Default to + * the AnnotationModel if no model is specified. + */ + if (modelParam != null) { + model = modelParam; + } else { + model = new AnnotationModel(); + } + ModelInternal.setClassLoader(model, envClassLoader); + storedModel = null; + + /* + * Add all predefined (simple) formats to the format map. The + * current version of other formats will be added below. + */ + for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) { + Format simpleFormat = formatList.get(i); + if (simpleFormat != null) { + formatMap.put(simpleFormat.getClassName(), simpleFormat); + } + } + + /* + * Known classes are those explicitly registered by the user via + * the model, plus the predefined proxy classes. + */ + List knownClasses = + new ArrayList(model.getKnownClasses()); + /* Also adds the special classes, i.e., enum or array. [#19377] */ + knownClasses.addAll(model.getKnownSpecialClasses()); + addPredefinedProxies(knownClasses); + + /* + * Create a temporary map of proxied class name to proxy class + * name, using all known formats and classes. This map is used to + * force proxy formats to be created prior to proxied formats. + * [#14665] + */ + proxyClassMap = new HashMap(); + for (Format oldFormat : formatList) { + if (oldFormat == null || Format.isPredefined(oldFormat)) { + continue; + } + String oldName = oldFormat.getClassName(); + Renamer renamer = mutations.getRenamer + (oldName, oldFormat.getVersion(), null); + String newName = + (renamer != null) ? renamer.getNewName() : oldName; + addProxiedClass(newName, false /*isKnownClass*/); + } + for (String className : knownClasses) { + addProxiedClass(className, true /*isKnownClass*/); + } + + /* + * Add known formats from the model and the predefined proxies. + * In general, classes will not be present in an AnnotationModel + * until an instance is stored, in which case an old format exists. + * However, registered proxy classes are an exception and must be + * added in advance. And the user may choose to register new + * classes in advance. The more formats we define in advance, the + * less times we have to write to the catalog database. + */ + Map newFormats = new HashMap(); + for (String className : knownClasses) { + createFormat(className, newFormats); + } + + /* + * Perform class evolution for all old formats, and throw an + * exception that contains the messages for all of the errors in + * mutations or in the definition of new classes. + */ + initEvolver = new Evolver + (this, storePrefix, mutations, newFormats, forceEvolution, + disallowClassChanges); + for (Format oldFormat : formatList) { + if (oldFormat == null || Format.isPredefined(oldFormat)) { + continue; + } + if (oldFormat.isEntity()) { + initEvolver.evolveFormat(oldFormat); + } else { + initEvolver.addNonEntityFormat(oldFormat); + } + } + initEvolver.finishEvolution(); + String errors = initEvolver.getErrors(); + if (errors != null) { + throw new IncompatibleClassException(errors); + } + + /* + * Add the new formats remaining. New formats that are equal to + * old formats were removed from the newFormats map above. + */ + for (Format newFormat : newFormats.values()) { + addFormat(newFormat); + } + + /* Initialize all formats. */ + for (Format format : formatList) { + if (format != null) { + format.initializeIfNeeded(this, model); + if (format == format.getLatestVersion()) { + latestFormatMap.put(format.getClassName(), format); + } + } + } + + final boolean formatsChanged = + newFormats.size() > 0 || + initEvolver.areFormatsChanged(); + needWrite |= formatsChanged; + + /* For unit testing. */ + if (expectNoClassChanges && formatsChanged) { + throw new IllegalStateException + ("Unexpected changes " + + " newFormats.size=" + newFormats.size() + + " areFormatsChanged=" + initEvolver.areFormatsChanged()); + } + + readOnly = db.getConfig().getReadOnly(); + + /* Write the catalog if anything changed. */ + if (needWrite && !readOnly) { + + /* */ + try { + /* */ + + /* + * Only rename/remove databases if we are going to update + * the catalog to reflect those class changes. + */ + initEvolver.renameAndRemoveDatabases(store, txn); + + /* + * Note that we use the Data object that was read above, + * and the beta version determines whether to delete the + * old mutations record. + */ + initData.formatList = formatList; + initData.mutations = mutations; + writeData(txn, initData); + /* */ + } catch (ReplicaWriteException e) { + readOnly = true; + } + /* */ + } + initModelAndMutations(); + } finally { + + /* + * Fields needed only for the duration of this ctor and which + * should be null afterwards. + */ + proxyClassMap = null; + initData = null; + initEvolver = null; + } + } + + private void initModelAndMutations() { + + /* + * Give the model a reference to the catalog to fully initialize + * the model. Only then may we initialize the Converter mutations, + * which themselves may call model methods and expect the model to + * be fully initialized. + */ + ModelInternal.setCatalog(model, this); + for (Converter converter : mutations.getConverters()) { + converter.getConversion().initialize(model); + } + } + + public void getEntityFormats(Collection entityFormats) { + for (Format format : formatMap.values()) { + if (format.isEntity()) { + entityFormats.add(format); + } + } + } + + private void addProxiedClass(String className, boolean isKnownClass) { + ClassMetadata metadata = model.getClassMetadata(className); + if (metadata != null) { + String proxiedClassName = metadata.getProxiedClassName(); + if (proxiedClassName != null) { + + /* + * If the class is a registered known class, need to check if + * registering proxy class is allowed or not. Currently, only + * SimpleType is not allowed to register a proxy class. + */ + if (isKnownClass) { + try { + Class type = resolveClass(proxiedClassName); + + /* + * Check if the proxied class is allowed to register a + * proxy class. If not, IllegalArgumentException will + * be thrown. + */ + if(!SimpleCatalog.allowRegisterProxy(type)) { + throw new IllegalArgumentException + ("Registering proxy is not allowed for " + + proxiedClassName + + ", which is a built-in simple type."); + } + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedState + ("Class does not exist: " + proxiedClassName); + } + } + proxyClassMap.put(proxiedClassName, className); + } + } + } + + private void addPredefinedProxies(List knownClasses) { + knownClasses.add(CollectionProxy.ArrayListProxy.class.getName()); + knownClasses.add(CollectionProxy.LinkedListProxy.class.getName()); + knownClasses.add(CollectionProxy.HashSetProxy.class.getName()); + knownClasses.add(CollectionProxy.TreeSetProxy.class.getName()); + knownClasses.add(MapProxy.HashMapProxy.class.getName()); + knownClasses.add(MapProxy.TreeMapProxy.class.getName()); + knownClasses.add(MapProxy.LinkedHashMapProxy.class.getName()); + } + + /** + * Returns a map from format to a set of its superclass formats. The + * format for simple types, enums and class Object are not included. Only + * complex types have superclass formats as defined by + * Format.getSuperFormat. + */ + Map> getSubclassMap() { + Map> subclassMap = + new HashMap>(); + for (Format format : formatList) { + if (format == null || Format.isPredefined(format)) { + continue; + } + Format superFormat = format.getSuperFormat(); + if (superFormat != null) { + Set subclass = subclassMap.get(superFormat); + if (subclass == null) { + subclass = new HashSet(); + subclassMap.put(superFormat, subclass); + } + subclass.add(format); + } + } + return subclassMap; + } + + /** + * Returns the model parameter, default model or stored model. + */ + public EntityModel getResolvedModel() { + return model; + } + + /** + * Increments the reference count for a catalog that is already open. + */ + public void openExisting() { + openCount += 1; + } + + /** + * Returns true if the user opened the store read-only, or we're running in + * Replica upgrade mode. + */ + public boolean isReadOnly() { + return readOnly; + } + + /** + * Decrements the reference count and closes the catalog DB when it reaches + * zero. Returns true if the database was closed or false if the reference + * count is still non-zero and the database was left open. + */ + public boolean close() + throws DatabaseException { + + if (openCount == 0) { + throw DbCompat.unexpectedState("Catalog is not open"); + } else { + openCount -= 1; + if (openCount == 0) { + db.close(); + return true; + } else { + return false; + } + } + } + + /** + * Returns the current merged mutations. + */ + public Mutations getMutations() { + return mutations; + } + + /** + * Convenience method that gets the class for the given class name and + * calls createFormat with the class object. + */ + public Format createFormat(String clsName, + Map newFormats) { + Class type; + try { + type = resolveClass(clsName); + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedState + ("Class does not exist: " + clsName); + } + return createFormat(type, newFormats); + } + + /** + * If the given class format is not already present in the given map and + * a format for this class name does not already exist, creates an + * uninitialized format, adds it to the map, and also collects related + * formats in the map. + */ + public Format createFormat(Class type, Map newFormats) { + /* Return a new or existing format for this class. */ + String className = type.getName(); + Format format = getFormatFromMap(type, newFormats); + if (format != null) { + return format; + } + format = getFormatFromMap(type, formatMap); + if (format != null) { + return format; + } + /* Simple types are predefined. */ + assert !SimpleCatalog.isSimpleType(type) : className; + + /* + * Although metadata is only needed for a complex type, call + * getClassMetadata for all types to support checks for illegal + * metadata on other types. + */ + ClassMetadata metadata = model.getClassMetadata(className); + /* Create format of the appropriate type. */ + String proxyClassName = null; + if (proxyClassMap != null) { + proxyClassName = proxyClassMap.get(className); + } + if (proxyClassName != null) { + format = new ProxiedFormat(this, type, proxyClassName); + } else if (type.isArray()) { + format = type.getComponentType().isPrimitive() ? + (new PrimitiveArrayFormat(this, type)) : + (new ObjectArrayFormat(this, type)); + } else if (type.isEnum()) { + format = new EnumFormat(this, type); + } else if (type.getEnclosingClass() != null && + type.getEnclosingClass().isEnum()) { + + /* + * If the type is an anonymous class of an enum class, the format + * which represents the enum class will be created. [#18357] + */ + format = new EnumFormat(this, type.getEnclosingClass()); + } else if (type == Object.class || type.isInterface()) { + format = new NonPersistentFormat(this, type); + } else { + if (metadata == null) { + throw new IllegalArgumentException + ("Class could not be loaded or is not persistent: " + + className); + } + if (metadata.getCompositeKeyFields() != null && + (metadata.getPrimaryKey() != null || + metadata.getSecondaryKeys() != null)) { + throw new IllegalArgumentException + ("A composite key class may not have primary or" + + " secondary key fields: " + type.getName()); + } + + /* + * Check for inner class before default constructor, to give a + * specific error message for each. + */ + if (type.getEnclosingClass() != null && + !Modifier.isStatic(type.getModifiers())) { + throw new IllegalArgumentException + ("Inner classes not allowed: " + type.getName()); + } + try { + type.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw new IllegalArgumentException + ("No default constructor: " + type.getName(), e); + } + if (metadata.getCompositeKeyFields() != null) { + format = new CompositeKeyFormat + (this, type, metadata, + metadata.getCompositeKeyFields()); + } else { + EntityMetadata entityMetadata = + model.getEntityMetadata(className); + format = + new ComplexFormat(this, type, metadata, entityMetadata); + } + } + /* Collect new format along with any related new formats. */ + newFormats.put(className, format); + format.collectRelatedFormats(this, newFormats); + + return format; + } + + private Format getFormatFromMap(Class type, + Map formats) { + Format format = formats.get(type.getName()); + if (format != null) { + return format; + } else if (type.getEnclosingClass() != null && + type.getEnclosingClass().isEnum()) { + + /* + * If the type is an anonymous class of this enum class, the format + * which represents the enum class will be returned. [#18357] + */ + format = formats.get(type.getEnclosingClass().getName()); + if (format != null) { + return format; + } + } + return null; + } + + /** + * Adds a format and makes it the current format for the class. + */ + private void addFormat(Format format) { + addFormat(format, formatList, formatMap); + } + + /** + * Adds a format to the given the format collections, for use when + * dynamically adding formats. + */ + private void addFormat(Format format, + List list, + Map map) { + format.setId(list.size()); + list.add(format); + map.put(format.getClassName(), format); + } + + /** + * Installs an existing format when no evolution is needed, i.e, when the + * new and old formats are identical. + */ + void useExistingFormat(Format oldFormat) { + assert oldFormat.isCurrentVersion(); + formatMap.put(oldFormat.getClassName(), oldFormat); + } + + /** + * Returns a set of all persistent (non-simple type) class names. + */ + Set getModelClasses() { + Set classes = new HashSet(); + for (Format format : formatMap.values()) { + if (format.isModelClass()) { + classes.add(format.getClassName()); + } + } + return Collections.unmodifiableSet(classes); + } + + /** + * Returns all formats as RawTypes. + */ + public List getAllRawTypes() { + List list = new ArrayList(); + for (RawType type : formatList) { + if (type != null) { + list.add(type); + } + } + return Collections.unmodifiableList(list); + } + + /** + * When a format is intialized, this method is called to get the version + * of the serialized object to be initialized. See Catalog. + */ + public int getInitVersion(Format format, boolean forReader) { + + if (initData == null || initData.formatList == null || + format.getId() >= initData.formatList.size()) { + + /* + * For new formats, use the current version. If initData is null, + * the Catalog ctor is finished and the format must be new. If the + * ctor is in progress, the format is new if its ID is greater than + * the ID of all pre-existing formats. + */ + return Catalog.CURRENT_VERSION; + } else { + + /* + * Get the version of a pre-existing format during execution of the + * Catalog ctor. The initData field is non-null, but initEvolver + * may be null if the catalog is opened in raw mode. + */ + assert initData != null; + + if (forReader) { + + /* + * Get the version of the evolution reader for a pre-existing + * format. Use the current version if the format changed + * during class evolution, otherwise use the stored version. + */ + return (initEvolver != null && + initEvolver.isFormatChanged(format)) ? + Catalog.CURRENT_VERSION : initData.version; + } else { + /* Always used the stored version for a pre-existing format. */ + return initData.version; + } + } + } + + public Format getFormat(final int formatId, final boolean expectStored) + throws RefreshException { + + if (formatId < 0) { + throw DbCompat.unexpectedState + ("Format ID " + formatId + " is negative," + + " may indicate data corruption."); + } + + /** + * If we're attempting to read a record containing a format ID that is + * greater than the maximum known stored format, then we refresh the + * formats from disk, expecting that the stored formats have been + * updated by the Master node. Note that format IDs greater than + * nStoredFormats may exist in the formatList, if evolution took place + * on this Replica in a read-only mode. Such formats are never written + * (Replicas do not write) and cannot be used for reading an existing + * record. [#16655] + * + * Do not perform this check if we did not get the format ID from a + * stored record (expectStored is false). For example, this would cause + * an erroneous RefreshException when this method is called during a + * convertRawObject operation, which calls this method to get a fresh + * copy of a format that may not be stored. [#18690] + */ + if (expectStored && formatId >= nStoredFormats) { + assert store != null; + throw new RefreshException(store, this, formatId); + } + + Format format = formatList.get(formatId); + if (format == null) { + throw DbCompat.unexpectedState + ("Format ID " + formatId + " has null format," + + " may indicate data corruption."); + } + + /* + * Currently we can't throw DeletedClassException because we should not + * do this if we're being called during a Conversion, and we don't have + * that state information available. + */ + /* + if (format.isDeleted()) { + throw new DeletedClassException + ("Class " + format.getClassName() + + " was deleted with a Deleter muation, format ID " + + formatId + '.'); + } + */ + + return format; + } + + /** + * Get a format for a given class, creating it if it does not exist. + * + *

        This method is called for top level entity instances by + * PersistEntityBinding. When a new entity subclass format is added we + * call Store.checkEntitySubclassSecondaries to ensure that all secondary + * databases have been opened, before storing the entity. We do this here + * while not holding a synchronization mutex, not in addNewFormat, to avoid + * deadlocks. checkEntitySubclassSecondaries synchronizes on the Store. + * [#16399]

        + * + *

        Historical note: At one time we opened / created the secondary + * databases rather than requiring the user to open them, see [#15247]. + * Later we found this to be problematic since a user txn may have locked + * primary records, see [#16399].

        + */ + public Format getFormat(Class cls, boolean checkEntitySubclassIndexes) + throws RefreshException { + + Format format = formatMap.get(cls.getName()); + if (format == null) { + if (model != null) { + format = addNewFormat(cls); + /* Detect and handle new entity subclass. [#15247] */ + if (checkEntitySubclassIndexes && store != null) { + Format entityFormat = format.getEntityFormat(); + if (entityFormat != null && entityFormat != format) { + try { + store.checkEntitySubclassSecondaries + (entityFormat.getEntityMetadata(), + cls.getName()); + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + } + } + if (format == null) { + throw new IllegalArgumentException + ("Class is not persistent: " + cls.getName()); + } + } + + return format; + } + + public Format getFormat(String className) { + return formatMap.get(className); + } + + public Format getLatestVersion(String className) { + return latestFormatMap.get(className); + } + + /** + * Returns the name of an entity class to be used to form the database + * name. Normally this is the same as the class name, but in replica + * upgrade mode it may be an earlier version of a renamed class. Returns + * null if there is no stored version of the class. [#16655] + */ + public String getDatabaseClassName(final String className) { + final Format format = getStoredFormat(className); + if (format == null) { + return null; + } + return format.getClassName(); + } + + /** + * Similar to getDatabaseClassName but instead handles an earlier version + * of a renamed key. [#16655] + */ + public String getDatabaseKeyName(final String className, + final String keyName) { + final Format format = getStoredFormat(className); + if (format == null) { + return null; + } + return format.getOldKeyName(keyName); + } + + private Format getStoredFormat(final String className) { + Format format = getFormat(className); + while (format != null && format.getId() >= nStoredFormats) { + format = format.getPreviousVersion(); + } + return format; + } + + /** + * Metadata needs refreshing when a Replica with stale metadata is elected + * master, and then a user write operation is attempted. [#16655] + */ + void checkWriteInReplicaUpgradeMode() + throws RefreshException { + + if (nStoredFormats < formatList.size()) { + throw new RefreshException(store, this, -1 /*formatId*/); + } + } + + /** + * For unit testing. + */ + boolean isReplicaUpgradeMode() { + return nStoredFormats < formatList.size(); + } + + /** + * Adds a format for a new class. Returns the format added for the given + * class, or throws an exception if the given class is not persistent. + * + *

        This method uses a copy-on-write technique to add new formats without + * impacting other threads.

        + */ + private synchronized Format addNewFormat(Class cls) + throws RefreshException { + + /* + * After synchronizing, check whether another thread has added the + * format needed. Note that this is not the double-check technique + * because the formatMap field is volatile and is not itself checked + * for null. (The double-check technique is known to be flawed in + * Java.) + */ + Format format = getFormatFromMap(cls, formatMap); + if (format != null) { + return format; + } + + /* Copy the read-only format collections. */ + List newFormatList = new ArrayList(formatList); + Map newFormatMap = + new HashMap(formatMap); + Map newLatestFormatMap = + new HashMap(latestFormatMap); + + /* Add the new format and all related new formats. */ + Map newFormats = new HashMap(); + format = createFormat(cls, newFormats); + for (Format newFormat : newFormats.values()) { + addFormat(newFormat, newFormatList, newFormatMap); + } + + /* + * Initialize new formats using a read-only catalog because we can't + * update this catalog until after we store it (below). + */ + Catalog newFormatCatalog = new ReadOnlyCatalog + (ModelInternal.getClassLoader(model), newFormatList, newFormatMap); + for (Format newFormat : newFormats.values()) { + newFormat.initializeIfNeeded(newFormatCatalog, model); + newLatestFormatMap.put(newFormat.getClassName(), newFormat); + } + + /* + * Write the updated catalog using auto-commit, then assign the new + * collections. The database write must occur before the collections + * are used, since a format must be persistent before it can be + * referenced by a data record. + * + * In readOnly mode, which includes Replica upgrade mode, we should not + * attempt to write since we could be elected Master and write stale + * metadata. If ReplicaWriteException occurs then we transition to + * Replica upgrade mode in the same manner as in the init() method. + * This can happen when no schema change is made except for one or more + * new entity classes. The new entity class will not be detected by + * evolution (during init()) but will be detected here if the user + * calls getPrimaryIndex. [#16655] + */ + if (!readOnly) { + try { + Data newData = new Data(); + newData.formatList = newFormatList; + newData.mutations = mutations; + writeDataCheckStale(newData); + /* */ + } catch (ReplicaWriteException e) { + readOnly = true; + /* */ + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + } + formatList = newFormatList; + formatMap = newFormatMap; + latestFormatMap = newLatestFormatMap; + + return format; + } + + /** + * Used to write the catalog when a format has been changed, for example, + * when Store.evolve has updated a Format's EvolveNeeded property. Uses + * auto-commit. + */ + public synchronized void flush(Transaction txn) + throws DatabaseException { + + Data newData = new Data(); + newData.formatList = formatList; + newData.mutations = mutations; + writeData(txn, newData); + } + + /** + * Returns the number of stored formats. + */ + int getNFormats() { + return nStoredFormats; + } + + /** + * Reads catalog Data, converting old versions as necessary. An empty + * Data object is returned if no catalog data currently exists. Null is + * never returned. + */ + private Data readData(Transaction txn) + throws DatabaseException { + + Data oldData; + DatabaseEntry key = new DatabaseEntry(DATA_KEY); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = db.get(txn, key, data, null); + if (status == OperationStatus.SUCCESS) { + ByteArrayInputStream bais = new ByteArrayInputStream + (data.getData(), data.getOffset(), data.getSize()); + try { + ObjectInputStream ois = new ObjectInputStream(bais); + Object object = ois.readObject(); + assert ois.available() == 0; + if (object instanceof Data) { + oldData = (Data) object; + } else { + if (!(object instanceof List)) { + throw DbCompat.unexpectedState + (object.getClass().getName()); + } + oldData = new Data(); + oldData.formatList = (List) object; + oldData.version = BETA_VERSION; + } + return oldData; + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedException(e); + } catch (IOException e) { + throw DbCompat.unexpectedException(e); + } + } else { + oldData = new Data(); + oldData.version = Catalog.CURRENT_VERSION; + } + return oldData; + } + + /** + * Metadata needs refreshing when a Replica with stale metadata is elected + * master, and then a user write operation is attempted that also requires + * a metadata update. [#16655] + */ + boolean isMetadataStale(Transaction txn) + throws DatabaseException { + + Data oldData = readData(txn); + + return (oldData.formatList != null && + oldData.formatList.size() > nStoredFormats); + } + + /** + * Writes catalog Data after checking for stale metadata. + */ + private void writeDataCheckStale(Data newData) + throws DatabaseException, RefreshException { + + for (int i = 0;; i += 1) { + Transaction txn = null; + if (transactional && DbCompat.getThreadTransaction(env) == null) { + txn = + env.beginTransaction(null, store.getAutoCommitTxnConfig()); + } + boolean success = false; + try { + if (isMetadataStale(txn)) { + throw new RefreshException(store, this, -1 /*formatId*/); + } + writeData(txn, newData); + success = true; + return; + } catch (LockConflictException e) { + + /* + * A lock conflict should not occur because writes to the + * catalog DB are in synchronized methods. However, because we + * read-modify-update the catalog record, + * LockPreemptedException must be handled in a replicated JE + * environment. Since LockPreemptedException is a + * LockConfictException, it is simplest to retry when any + * LockConfictException occurs. + */ + if (i >= MAX_TXN_RETRIES) { + throw e; + } + continue; + } finally { + + /* + * If the catalog is read-only we abort rather than commit, + * because a ReplicaWriteException may have occurred. + * ReplicaWriteException invalidates the transaction, and there + * are no writes to commit anyway. [#16655] + */ + if (txn != null) { + if (success && !isReadOnly()) { + txn.commit(); + } else { + txn.abort(); + } + } + } + } + } + + /** + * Writes catalog Data. Does not check for stale metadata. + */ + private void writeData(Transaction txn, Data newData) + throws DatabaseException { + + /* Catalog data is written in the current version. */ + boolean wasBetaVersion = (newData.version == BETA_VERSION); + newData.version = CURRENT_VERSION; + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(newData); + } catch (IOException e) { + throw DbCompat.unexpectedException(e); + } + DatabaseEntry key = new DatabaseEntry(DATA_KEY); + DatabaseEntry data = new DatabaseEntry(baos.toByteArray()); + db.put(txn, key, data); + + /* + * Delete the unused beta mutations record if we read the beta version + * record earlier. + */ + if (wasBetaVersion) { + key.setData(BETA_MUTATIONS_KEY); + db.delete(txn, key); + } + + nStoredFormats = newData.formatList.size(); + } + + public boolean isRawAccess() { + return rawAccess; + } + + public Object convertRawObject(RawObject o, IdentityHashMap converted) + throws RefreshException { + + Format format = (Format) o.getType(); + if (this == format.getCatalog()) { + /* Ensure a fresh format is used, in case of Replica refresh. */ + format = getFormat(format.getId(), false /*expectStored*/); + } else { + + /* + * Use the corresponding format in this catalog when the external + * raw object was created using a different catalog. Create the + * format if it does not already exist, for example, when this + * store is empty. [#16253]. + */ + String clsName = format.getClassName(); + Class cls; + try { + cls = resolveClass(clsName); + format = getFormat(cls, true /*checkEntitySubclassIndexes*/); + } catch (ClassNotFoundException e) { + format = null; + } + if (format == null) { + throw new IllegalArgumentException + ("External raw type not found: " + clsName); + } + } + Format proxiedFormat = format.getProxiedFormat(); + if (proxiedFormat != null) { + format = proxiedFormat; + } + if (converted == null) { + converted = new IdentityHashMap(); + } + return format.convertRawObject(this, false, o, converted); + } + + public Class resolveClass(String clsName) + throws ClassNotFoundException { + + return SimpleCatalog.resolveClass + (clsName, ModelInternal.getClassLoader(model)); + } + + public Class resolveKeyClass(String clsName) { + return SimpleCatalog.resolveKeyClass + (clsName, ModelInternal.getClassLoader(model)); + } +} diff --git a/src/com/sleepycat/persist/impl/PersistComparator.java b/src/com/sleepycat/persist/impl/PersistComparator.java new file mode 100644 index 0000000..4fc0352 --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistComparator.java @@ -0,0 +1,162 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.Serializable; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.compat.DbCompat; +/* */ +import com.sleepycat.je.DatabaseComparator; +/* */ + +/** + * The btree comparator for persistent key classes. The serialized form of + * this comparator is stored in the BDB JE database descriptor so that the + * comparator can be re-created during recovery. + * + * @author Mark Hayes + */ +public class PersistComparator implements + /* */ + DatabaseComparator, + /* */ + Comparator, Serializable { + + private static final long serialVersionUID = 5221576538843355317L; + + private String keyClassName; + private String[] comositeFieldOrder; + private Map fieldFormatData; + private transient PersistKeyBinding binding; + + public PersistComparator(PersistKeyBinding binding) { + this.binding = binding; + /* Save info necessary to recreate binding during deserialization. */ + final CompositeKeyFormat format = + (CompositeKeyFormat) binding.keyFormat; + keyClassName = format.getClassName(); + comositeFieldOrder = CompositeKeyFormat.getFieldNameArray + (format.getClassMetadata().getCompositeKeyFields()); + /* Currently only enum formats have per-class data. */ + for (FieldInfo field : format.getFieldInfo()) { + Format fieldFormat = field.getType(); + if (fieldFormat.isEnum()) { + EnumFormat enumFormat = (EnumFormat) fieldFormat; + if (fieldFormatData == null) { + fieldFormatData = new HashMap(); + } + fieldFormatData.put(enumFormat.getClassName(), + enumFormat.getFormatData()); + } + } + } + + /** + * In BDB JE this method will be called after the comparator is + * deserialized, including during recovery. We must construct the binding + * here, without access to the stored catalog since recovery is not + * complete. + */ + public void initialize(ClassLoader loader) { + final Catalog catalog; + if (fieldFormatData == null) { + catalog = new ComparatorCatalog(loader, null); + } else { + final Map enumFormats = + new HashMap(); + catalog = new ComparatorCatalog(loader, enumFormats); + for (Map.Entry entry : + fieldFormatData.entrySet()) { + final String fldClassName = entry.getKey(); + final String[] enumNames = entry.getValue(); + final Class fldClass; + try { + fldClass = catalog.resolveClass(fldClassName); + } catch (ClassNotFoundException e) { + throw new IllegalStateException(e); + } + enumFormats.put(fldClassName, + new EnumFormat(catalog, fldClass, enumNames)); + } + for (Format fldFormat : enumFormats.values()) { + fldFormat.initializeIfNeeded(catalog, null /*model*/); + } + } + final Class keyClass; + try { + keyClass = catalog.resolveClass(keyClassName); + } catch (ClassNotFoundException e) { + throw new IllegalStateException(e); + } + binding = new PersistKeyBinding(catalog, keyClass, + comositeFieldOrder); + } + + public int compare(byte[] b1, byte[] b2) { + + /* + * In BDB JE, the binding is initialized by the initialize method. In + * BDB, the binding is intialized by the constructor. + */ + if (binding == null) { + throw DbCompat.unexpectedState("Not initialized"); + } + + try { + Comparable k1 = + (Comparable) binding.bytesToObject(b1, 0, b1.length); + Comparable k2 = + (Comparable) binding.bytesToObject(b2, 0, b2.length); + + return k1.compareTo(k2); + } catch (RefreshException e) { + + /* + * Refresh is not applicable to PersistComparator, which is used + * during recovery. All field formats used by the comparator are + * guaranteed to be predefined, because they must be primitive or + * primitive wrapper types. So they are always present in the + * catalog, and cannot change as the result of class evolution or + * replication. + */ + throw DbCompat.unexpectedException(e); + } + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("[DPL comparator "); + b.append(" keyClassName = ").append(keyClassName); + b.append(" comositeFieldOrder = ["); + for (String s : comositeFieldOrder) { + b.append(s).append(','); + } + b.append(']'); + b.append(" fieldFormatData = {"); + for (Map.Entry entry : fieldFormatData.entrySet()) { + b.append(entry.getKey()).append(": ["); + for (String s : entry.getValue()) { + b.append(s).append(','); + } + b.append(']'); + } + b.append('}'); + b.append(']'); + return b.toString(); + } +} diff --git a/src/com/sleepycat/persist/impl/PersistEntityBinding.java b/src/com/sleepycat/persist/impl/PersistEntityBinding.java new file mode 100644 index 0000000..85eb205 --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistEntityBinding.java @@ -0,0 +1,324 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.persist.raw.RawObject; + +/** + * A persistence entity binding for a given entity class. + * + * @author Mark Hayes + */ +public class PersistEntityBinding implements EntityBinding { + + /* See Store.refresh for an explanation of the use of volatile fields. */ + volatile PersistCatalog catalog; + volatile Format entityFormat; + final boolean rawAccess; + PersistKeyAssigner keyAssigner; + + /** + * Creates a key binding for a given entity class. + */ + public PersistEntityBinding(final PersistCatalog catalogParam, + final String entityClassName, + final boolean rawAccess) { + catalog = catalogParam; + + try { + entityFormat = getOrCreateFormat(catalog, entityClassName, + rawAccess); + } catch (RefreshException e) { + /* Must assign catalog field in constructor. */ + catalog = e.refresh(); + try { + entityFormat = getOrCreateFormat(catalog, entityClassName, + rawAccess); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + if (!entityFormat.isEntity()) { + throw new IllegalArgumentException + ("Not an entity class: " + entityClassName); + } + this.rawAccess = rawAccess; + } + + public PersistKeyAssigner getKeyAssigner() { + return keyAssigner; + } + + public Object entryToObject(final DatabaseEntry key, + final DatabaseEntry data) { + try { + return entryToObjectInternal(key, null, data); + } catch (RefreshException e) { + e.refresh(); + try { + return entryToObjectInternal(key, null, data); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + /** + * This method will be used in PrimaryIndex.get, where the primary key is + * known to DPL. This method will force to call readEntityWithPriKey to + * directly assign primary key to the de-serialized object. + */ + public Object entryToObjectWithPriKey(final Object priKey, + final DatabaseEntry data) { + try { + if (priKey == null) { + throw new + IllegalArgumentException("Primary key cannot be null."); + } + return entryToObjectInternal(null, priKey, data); + } catch (RefreshException e) { + e.refresh(); + try { + return entryToObjectInternal(null, priKey, data); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + /** + * This method is always called after reading an entity. If a refresh is + * needed, we detect that in PersistCatalog.getFormat(int). + */ + private Object entryToObjectInternal(final DatabaseEntry key, + final Object priKey, + final DatabaseEntry data) + throws RefreshException { + + return readEntity(catalog, key, priKey, data, rawAccess); + } + + /** + * Creates the instance, reads the entity key first to track visited + * entities correctly, then reads the data and returns the entity. + * + * This is a special case of EntityInput.readObject for a top level entity. + * Special treatments are: + * - The formatId must be >= 0; since this is the top level instance, it + * cannot refer to a visited object nor be a null reference. + * - The entity is not checked for existence in the visited object set; + * entities cannot be referenced by another entity. + * - Reader.readPriKey must be called prior to calling Reader.readObject. + */ + static Object readEntity(Catalog useCatalog, + DatabaseEntry key, + Object priKey, + DatabaseEntry data, + boolean rawAccess) + throws RefreshException { + + RecordInput dataInput = new RecordInput + (useCatalog, rawAccess, null, 0, + data.getData(), data.getOffset(), data.getSize()); + int initialOffset = dataInput.getBufferOffset(); + int formatId = dataInput.readPackedInt(); + Format format = useCatalog.getFormat(formatId, true /*expectStored*/); + dataInput.registerEntityFormat(format); + Reader reader = format.getReader(); + Object entity = reader.newInstance(dataInput, rawAccess); + if (priKey == null) { + /* If priKey is null, need to deserialize the primary key. */ + RecordInput keyInput = new RecordInput + (useCatalog, rawAccess, null, 0, + key.getData(), key.getOffset(), key.getSize()); + reader.readPriKey(entity, keyInput, rawAccess); + } else { + + /* + * If priKey is not null, directly assign it to the primary key + * field. [#19248] + */ + Accessor accessor = + reader.getAccessor(entity instanceof RawObject ? + true : + rawAccess); + if (accessor == null) { + accessor = format.getLatestVersion().getReader(). + getAccessor(entity instanceof RawObject ? + true : + rawAccess); + } + accessor.setPriField(entity, priKey); + } + dataInput.registerEntity(entity, initialOffset); + entity = reader.readObject(entity, dataInput, rawAccess); + return entity; + } + + public void objectToData(final Object entity, final DatabaseEntry data) { + try { + objectToDataInternal(entity, data); + } catch (RefreshException e) { + e.refresh(); + try { + objectToDataInternal(entity, data); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + /** + * This method is always called before writing an entity. If a refresh is + * needed, we detect that here. + */ + private void objectToDataInternal(final Object entity, + final DatabaseEntry data) + throws RefreshException { + + Format format = getValidFormat(entity); + /* Before a write, check whether a refresh is needed. [#16655] */ + catalog.checkWriteInReplicaUpgradeMode(); + writeEntity(format, catalog, entity, data, rawAccess); + } + + /** + * Writes the formatId and object, and returns the bytes. + * + * This is a special case of EntityOutput.writeObject for a top level + * entity. Special treatments are: + * - The entity may not be null. + * - The entity is not checked for existence in the visited object set; + * entities cannot be referenced by another entity. + */ + static void writeEntity(Format format, + Catalog catalog, + Object entity, + DatabaseEntry data, + boolean rawAccess) + throws RefreshException { + + RecordOutput output = new RecordOutput(catalog, rawAccess); + output.registerEntity(entity); + output.writePackedInt(format.getId()); + format.writeObject(entity, output, rawAccess); + TupleBase.outputToEntry(output, data); + } + + public void objectToKey(final Object entity, final DatabaseEntry key) { + try { + objectToKeyInternal(entity, key); + } catch (RefreshException e) { + e.refresh(); + try { + objectToKeyInternal(entity, key); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private void objectToKeyInternal(final Object entity, + final DatabaseEntry key) + throws RefreshException { + + /* + * Write the primary key field as a special case since the output + * format is for a key binding, not entity data. + */ + Format format = getValidFormat(entity); + RecordOutput output = new RecordOutput(catalog, rawAccess); + + /* Write the primary key and return the bytes. */ + format.writePriKey(entity, output, rawAccess); + TupleBase.outputToEntry(output, key); + } + + /** + * Returns the format for the given entity and validates it, throwing an + * exception if it is invalid for this binding. + */ + private Format getValidFormat(Object entity) + throws RefreshException { + + /* A null entity is not allowed. */ + if (entity == null) { + throw new IllegalArgumentException("An entity may not be null"); + } + + /* + * Get the format. getFormat throws IllegalArgumentException if the + * class is not persistent. + */ + Format format; + if (rawAccess) { + if (!(entity instanceof RawObject)) { + throw new IllegalArgumentException + ("Entity must be a RawObject"); + } + format = (Format) ((RawObject) entity).getType(); + } else { + format = catalog.getFormat + (entity.getClass(), true /*checkEntitySubclassIndexes*/); + } + + /* Check that the entity class/subclass is valid for this binding. */ + if (format.getEntityFormat() != entityFormat) { + throw new IllegalArgumentException + ("The entity class (" + format.getClassName() + + ") must be this entity class or a subclass of it: " + + entityFormat.getClassName()); + } + + return format; + } + + /** + * Utility method for getting or creating a format as appropriate for + * bindings and key creators. + */ + static Format getOrCreateFormat(Catalog useCatalog, + String clsName, + boolean rawAccess) + throws RefreshException { + + if (rawAccess) { + Format format = useCatalog.getFormat(clsName); + if (format == null) { + throw new IllegalArgumentException + ("Not a persistent class: " + clsName); + } + return format; + } else { + Class cls = useCatalog.resolveKeyClass(clsName); + return useCatalog.getFormat(cls, + true /*checkEntitySubclassIndexes*/); + } + } + + /** + * See Store.refresh. + */ + void refresh(final PersistCatalog newCatalog) { + catalog = newCatalog; + entityFormat = newCatalog.getFormat(entityFormat.getClassName()); + if (keyAssigner != null) { + keyAssigner.refresh(newCatalog); + } + } +} diff --git a/src/com/sleepycat/persist/impl/PersistKeyAssigner.java b/src/com/sleepycat/persist/impl/PersistKeyAssigner.java new file mode 100644 index 0000000..033383c --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistKeyAssigner.java @@ -0,0 +1,101 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Sequence; + +/** + * Assigns primary keys from a Sequence. + * + * This class is used directly by PrimaryIndex, not via an interface. To avoid + * making a public interface, the PersistEntityBinding contains a reference to + * a PersistKeyAssigner, and the PrimaryIndex gets the key assigner from the + * binding. See the PrimaryIndex constructor for more information. + * + * @author Mark Hayes + */ +public class PersistKeyAssigner { + + /* See Store.refresh for an explanation of the use of volatile fields. */ + private volatile Catalog catalog; + private volatile Format keyFieldFormat; + private volatile Format entityFormat; + private final boolean rawAccess; + private final Sequence sequence; + + PersistKeyAssigner(PersistKeyBinding keyBinding, + PersistEntityBinding entityBinding, + Sequence sequence) { + catalog = keyBinding.catalog; + /* getSequenceKeyFormat will validate the field type for a sequence. */ + keyFieldFormat = keyBinding.keyFormat.getSequenceKeyFormat(); + entityFormat = entityBinding.entityFormat; + rawAccess = entityBinding.rawAccess; + this.sequence = sequence; + } + + public boolean assignPrimaryKey(Object entity, DatabaseEntry key) + throws DatabaseException { + + try { + return assignPrimaryKeyInternal(entity, key); + } catch (RefreshException e) { + e.refresh(); + try { + return assignPrimaryKeyInternal(entity, key); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private boolean assignPrimaryKeyInternal(Object entity, DatabaseEntry key) + throws DatabaseException, RefreshException { + + /* + * The keyFieldFormat is the format of a simple integer field. For a + * composite key class it is the contained integer field. By writing + * the Long sequence value using that format, the output data can then + * be read to construct the actual key instance, whether it is a simple + * or composite key class, and assign it to the primary key field in + * the entity object. + */ + if (entityFormat.isPriKeyNullOrZero(entity, rawAccess)) { + Long value = sequence.get(null, 1); + RecordOutput output = new RecordOutput(catalog, rawAccess); + keyFieldFormat.writeObject(value, output, rawAccess); + TupleBase.outputToEntry(output, key); + EntityInput input = new RecordInput + (catalog, rawAccess, null, 0, + key.getData(), key.getOffset(), key.getSize()); + entityFormat.getReader().readPriKey(entity, input, rawAccess); + return true; + } else { + return false; + } + } + + /** + * See Store.refresh. + */ + void refresh(final PersistCatalog newCatalog) { + catalog = newCatalog; + entityFormat = catalog.getFormat(entityFormat.getClassName()); + keyFieldFormat = catalog.getFormat(keyFieldFormat.getClassName()); + } +} diff --git a/src/com/sleepycat/persist/impl/PersistKeyBinding.java b/src/com/sleepycat/persist/impl/PersistKeyBinding.java new file mode 100644 index 0000000..40c5a0e --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistKeyBinding.java @@ -0,0 +1,154 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; + +/** + * A persistence key binding for a given key class. + * + * @author Mark Hayes + */ +public class PersistKeyBinding implements EntryBinding { + + /* See Store.refresh for an explanation of the use of volatile fields. */ + volatile Catalog catalog; + volatile Format keyFormat; + final boolean rawAccess; + + /** + * Creates a key binding for a given key class. + */ + public PersistKeyBinding(Catalog catalogParam, + String clsName, + boolean rawAccess) { + catalog = catalogParam; + try { + keyFormat = PersistEntityBinding.getOrCreateFormat + (catalog, clsName, rawAccess); + } catch (RefreshException e) { + /* Must assign catalog field in constructor. */ + catalog = e.refresh(); + try { + keyFormat = PersistEntityBinding.getOrCreateFormat + (catalog, clsName, rawAccess); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + if (!keyFormat.isSimple() && + !keyFormat.isEnum() && + !(keyFormat.getClassMetadata() != null && + keyFormat.getClassMetadata().getCompositeKeyFields() != null)) { + throw new IllegalArgumentException + ("Key class is not a simple type, an enum, or a composite " + + "key class (composite keys must include @KeyField " + + "annotations): " + + clsName); + } + this.rawAccess = rawAccess; + } + + /** + * Creates a key binding dynamically for use by PersistComparator. Formats + * are created from scratch rather than using a shared catalog. + */ + PersistKeyBinding(final Catalog catalog, + final Class cls, + final String[] compositeFieldOrder) { + this.catalog = catalog; + keyFormat = new CompositeKeyFormat(catalog, cls, compositeFieldOrder); + keyFormat.initializeIfNeeded(catalog, null /*model*/); + rawAccess = false; + } + + /** + * Binds bytes to an object for use by PersistComparator as well as + * entryToObject. + */ + Object bytesToObject(byte[] bytes, int offset, int length) + throws RefreshException { + + return readKey(keyFormat, catalog, bytes, offset, length, rawAccess); + } + + /** + * Binds bytes to an object for use by PersistComparator as well as + * entryToObject. + */ + static Object readKey(Format keyFormat, + Catalog catalog, + byte[] bytes, + int offset, + int length, + boolean rawAccess) + throws RefreshException { + + EntityInput input = new RecordInput + (catalog, rawAccess, null, 0, bytes, offset, length); + return input.readKeyObject(keyFormat); + } + + public Object entryToObject(DatabaseEntry entry) { + try { + return entryToObjectInternal(entry); + } catch (RefreshException e) { + e.refresh(); + try { + return entryToObjectInternal(entry); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private Object entryToObjectInternal(DatabaseEntry entry) + throws RefreshException { + + return bytesToObject + (entry.getData(), entry.getOffset(), entry.getSize()); + } + + public void objectToEntry(Object object, DatabaseEntry entry) { + try { + objectToEntryInternal(object, entry); + } catch (RefreshException e) { + e.refresh(); + try { + objectToEntryInternal(object, entry); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private void objectToEntryInternal(Object object, DatabaseEntry entry) + throws RefreshException { + + RecordOutput output = new RecordOutput(catalog, rawAccess); + output.writeKeyObject(object, keyFormat); + TupleBase.outputToEntry(output, entry); + } + + /** + * See Store.refresh. + */ + void refresh(final PersistCatalog newCatalog) { + catalog = newCatalog; + keyFormat = catalog.getFormat(keyFormat.getClassName()); + } +} diff --git a/src/com/sleepycat/persist/impl/PersistKeyCreator.java b/src/com/sleepycat/persist/impl/PersistKeyCreator.java new file mode 100644 index 0000000..b1c5bcd --- /dev/null +++ b/src/com/sleepycat/persist/impl/PersistKeyCreator.java @@ -0,0 +1,254 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.Collection; +import java.util.Set; + +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.ForeignMultiKeyNullifier; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKeyMetadata; +import com.sleepycat.persist.raw.RawObject; + +/** + * A persistence secondary key creator/nullifier. This class always uses + * rawAccess=true to avoid depending on the presence of the proxy class. + * + * @author Mark Hayes + */ +public class PersistKeyCreator implements SecondaryKeyCreator, + SecondaryMultiKeyCreator, + ForeignMultiKeyNullifier { + + static boolean isManyType(Class cls) { + return cls.isArray() || Collection.class.isAssignableFrom(cls); + } + + /* See Store.refresh for an explanation of the use of volatile fields. */ + private volatile Catalog catalog; + private volatile Format priKeyFormat; + private final String keyName; + private volatile Format keyFormat; + private final boolean toMany; + + /** + * Creates a key creator/nullifier for a given entity class and key name. + */ + public PersistKeyCreator(Catalog catalogParam, + EntityMetadata entityMeta, + String keyClassName, + SecondaryKeyMetadata secKeyMeta, + boolean rawAccess) { + catalog = catalogParam; + try { + getFormats(entityMeta, keyClassName, secKeyMeta, rawAccess); + } catch (RefreshException e) { + /* Must assign catalog field in constructor. */ + catalog = e.refresh(); + try { + getFormats(entityMeta, keyClassName, secKeyMeta, rawAccess); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + keyName = secKeyMeta.getKeyName(); + Relationship rel = secKeyMeta.getRelationship(); + toMany = (rel == Relationship.ONE_TO_MANY || + rel == Relationship.MANY_TO_MANY); + } + + private void getFormats(EntityMetadata entityMeta, + String keyClassName, + SecondaryKeyMetadata secKeyMeta, + boolean rawAccess) + throws RefreshException { + + priKeyFormat = PersistEntityBinding.getOrCreateFormat + (catalog, entityMeta.getPrimaryKey().getClassName(), rawAccess); + keyFormat = PersistEntityBinding.getOrCreateFormat + (catalog, keyClassName, rawAccess); + if (keyFormat == null) { + throw new IllegalArgumentException + ("Not a key class: " + keyClassName); + } + if (keyFormat.isPrimitive()) { + throw new IllegalArgumentException + ("Use a primitive wrapper class instead of class: " + + keyFormat.getClassName()); + } + } + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + try { + return createSecondaryKeyInternal(secondary, key, data, result); + } catch (RefreshException e) { + e.refresh(); + try { + return createSecondaryKeyInternal(secondary, key, data, + result); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private boolean createSecondaryKeyInternal(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) + throws RefreshException { + + if (toMany) { + throw DbCompat.unexpectedState(); + } + KeyLocation loc = moveToKey(key, data); + if (loc != null) { + RecordOutput output = new RecordOutput + (catalog, true /*rawAccess*/); + loc.format.copySecKey(loc.input, output); + TupleBase.outputToEntry(output, result); + return true; + } else { + /* Key field is not present or null. */ + return false; + } + } + + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) { + try { + createSecondaryKeysInternal(secondary, key, data, results); + } catch (RefreshException e) { + e.refresh(); + try { + createSecondaryKeysInternal(secondary, key, data, results); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private void createSecondaryKeysInternal(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) + throws RefreshException { + + if (!toMany) { + throw DbCompat.unexpectedState(); + } + KeyLocation loc = moveToKey(key, data); + if (loc != null) { + loc.format.copySecMultiKey(loc.input, keyFormat, results); + } + /* Else key field is not present or null. */ + } + + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry secKey) { + try { + return nullifyForeignKeyInternal(secondary, key, data, secKey); + } catch (RefreshException e) { + e.refresh(); + try { + return nullifyForeignKeyInternal(secondary, key, data, secKey); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + private boolean nullifyForeignKeyInternal(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry secKey) + throws RefreshException { + + /* Deserialize the entity and get its current class format. */ + RawObject entity = (RawObject) PersistEntityBinding.readEntity + (catalog, key, null, data, true /*rawAccess*/); + Format entityFormat = (Format) entity.getType(); + + /* + * Set the key to null. For a TO_MANY key, pass the key object to be + * removed from the array/collection. + */ + Object secKeyObject = null; + if (toMany) { + secKeyObject = PersistKeyBinding.readKey + (keyFormat, catalog, secKey.getData(), secKey.getOffset(), + secKey.getSize(), true /*rawAccess*/); + } + if (entityFormat.nullifySecKey + (catalog, entity, keyName, secKeyObject)) { + + /* + * Using the current format for the entity, serialize the modified + * entity back to the data entry. + */ + PersistEntityBinding.writeEntity + (entityFormat, catalog, entity, data, true /*rawAccess*/); + return true; + } else { + /* Key field is not present or null. */ + return false; + } + } + + /** + * Returns the location from which the secondary key field can be copied. + */ + private KeyLocation moveToKey(DatabaseEntry priKey, DatabaseEntry data) + throws RefreshException { + + RecordInput input = new RecordInput + (catalog, true /*rawAccess*/, priKey, priKeyFormat.getId(), + data.getData(), data.getOffset(), data.getSize()); + int formatId = input.readPackedInt(); + Format entityFormat = + catalog.getFormat(formatId, true /*expectStored*/); + input.registerEntityFormat(entityFormat); + Format fieldFormat = entityFormat.skipToSecKey(input, keyName); + if (fieldFormat != null) { + /* Returns null if key field is null. */ + return input.getKeyLocation(fieldFormat); + } else { + /* Key field is not present in this class. */ + return null; + } + } + + /** + * See Store.refresh. + */ + void refresh(final PersistCatalog newCatalog) { + catalog = newCatalog; + keyFormat = catalog.getFormat(keyFormat.getClassName()); + priKeyFormat = catalog.getFormat(priKeyFormat.getClassName()); + } +} diff --git a/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java b/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java new file mode 100644 index 0000000..f668d11 --- /dev/null +++ b/src/com/sleepycat/persist/impl/PrimitiveArrayFormat.java @@ -0,0 +1,150 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawObject; + +/** + * An array of primitives having one dimension. Multidimensional arrays are + * handled by {@link ObjectArrayFormat}. + * + * @author Mark Hayes + */ +public class PrimitiveArrayFormat extends Format { + + private static final long serialVersionUID = 8285299924106073591L; + + private SimpleFormat componentFormat; + + PrimitiveArrayFormat(Catalog catalog, Class type) { + super(catalog, type); + } + + @Override + public boolean isArray() { + return true; + } + + @Override + public int getDimensions() { + return 1; + } + + @Override + public Format getComponentType() { + return componentFormat; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + /* Component type is simple and simple type formats are predefined. */ + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + + /* + * getExistingType is allowed (to support raw mode) because primitive + * arrays are always available in Java. + */ + componentFormat = (SimpleFormat) + catalog.getFormat(getExistingType().getComponentType().getName()); + } + + @Override + Object newArray(int len) { + return Array.newInstance(getType(), len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + int len = input.readArrayLength(); + if (rawAccess) { + return new RawObject(this, new Object[len]); + } else { + return componentFormat.newPrimitiveArray(len, input); + } + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + if (rawAccess) { + Object[] a = ((RawObject) o).getElements(); + for (int i = 0; i < a.length; i += 1) { + a[i] = componentFormat.newInstance(input, true); + componentFormat.readObject(a[i], input, true); + } + } + /* Else, do nothing -- newInstance reads the value. */ + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + if (rawAccess) { + Object[] a = ((RawObject) o).getElements(); + output.writeArrayLength(a.length); + for (int i = 0; i < a.length; i += 1) { + componentFormat.writeObject(a[i], output, true); + } + } else { + componentFormat.writePrimitiveArray(o, output); + } + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + RawArrayInput input = new RawArrayInput + (catalog, rawAccess, converted, rawObject, componentFormat); + Object a = newInstance(input, rawAccess); + converted.put(rawObject, a); + return readObject(a, input, rawAccess); + } + + @Override + void skipContents(RecordInput input) { + int len = input.readPackedInt(); + componentFormat.skipPrimitiveArray(len, input); + } + + @Override + void copySecMultiKey(RecordInput input, Format keyFormat, Set results) { + int len = input.readPackedInt(); + componentFormat.copySecMultiKeyPrimitiveArray(len, input, results); + } + + @Override + boolean evolve(Format newFormat, Evolver evolver) { + evolver.useOldFormat(this, newFormat); + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/ProxiedFormat.java b/src/com/sleepycat/persist/impl/ProxiedFormat.java new file mode 100644 index 0000000..952f12e --- /dev/null +++ b/src/com/sleepycat/persist/impl/ProxiedFormat.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Array; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.raw.RawObject; + +/** + * Format for types proxied by a PersistentProxy. + * + * @author Mark Hayes + */ +public class ProxiedFormat extends Format { + + private static final long serialVersionUID = -1000032651995478768L; + + private Format proxyFormat; + private transient String proxyClassName; + + ProxiedFormat(Catalog catalog, Class proxiedType, String proxyClassName) { + super(catalog, proxiedType); + this.proxyClassName = proxyClassName; + } + + /** + * Returns the proxy class name. The proxyClassName field is non-null for + * a constructed object and null for a de-serialized object. Whenever the + * proxyClassName field is null (for a de-serialized object), the + * proxyFormat will be non-null. + */ + private String getProxyClassName() { + if (proxyClassName != null) { + return proxyClassName; + } else { + assert proxyFormat != null; + return proxyFormat.getClassName(); + } + } + + /** + * In the future if we implement container proxies, which support nested + * references to the container, then we will return false if this is a + * container proxy. [#15815] + */ + @Override + boolean areNestedRefsProhibited() { + return true; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + /* Collect the proxy format. */ + assert proxyClassName != null; + catalog.createFormat(proxyClassName, newFormats); + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + /* Set the proxy format for a new (never initialized) format. */ + if (proxyFormat == null) { + assert proxyClassName != null; + proxyFormat = catalog.getFormat(proxyClassName); + } + /* Make the linkage from proxy format to proxied format. */ + proxyFormat.setProxiedFormat(this); + } + + @Override + Object newArray(int len) { + return Array.newInstance(getType(), len); + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + Reader reader = proxyFormat.getReader(); + if (rawAccess) { + return reader.newInstance(null, true); + } else { + + /* + * Note that the read object will not be a PersistentProxy if + * a class converter mutation is used. In this case, the reader + * will be ConverterReader. ConverterReader.readObject + * will call ProxiedFormat.convertRawObject, which will call + * PersistentProxy.convertProxy to convert the proxy. So we do not + * need another call to the convertProxy method. [#19312] + */ + Object o = reader.readObject(reader.newInstance(null, false), + input, false); + if (o instanceof PersistentProxy) { + o = ((PersistentProxy) o).convertProxy(); + } + return o; + } + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException { + + if (rawAccess) { + o = proxyFormat.getReader().readObject(o, input, true); + } + /* Else, do nothing here -- newInstance reads the value. */ + return o; + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) + throws RefreshException { + + if (rawAccess) { + proxyFormat.writeObject(o, output, true); + } else { + PersistentProxy proxy = + (PersistentProxy) proxyFormat.newInstance(null, false); + proxy.initializeProxy(o); + proxyFormat.writeObject(proxy, output, false); + } + } + + @Override + Object convertRawObject(Catalog catalog, + boolean rawAccess, + RawObject rawObject, + IdentityHashMap converted) + throws RefreshException { + + PersistentProxy proxy = (PersistentProxy) proxyFormat.convertRawObject + (catalog, rawAccess, rawObject, converted); + Object o = proxy.convertProxy(); + converted.put(rawObject, o); + return o; + } + + @Override + void skipContents(RecordInput input) + throws RefreshException { + + proxyFormat.skipContents(input); + } + + @Override + void copySecMultiKey(RecordInput input, Format keyFormat, Set results) + throws RefreshException { + + CollectionProxy.copyElements(input, this, keyFormat, results); + } + + @Override + boolean evolve(Format newFormatParam, Evolver evolver) { + if (!(newFormatParam instanceof ProxiedFormat)) { + + /* + * A workaround for reading the BigDecimal data stored by + * BigDecimal proxy before je4.1. + * + * The BigDecimal proxy has a proxied format for BigDecimal, which + * is a built-in SimpleType. We will evolve this ProxiedFormat of + * BigDecimal to the SimpleFormat. In other words, the conversion + * from a BigDecimal proxied format to a BigDecimal SimpleFormat is + * allowed, and the old format can be used as the reader of the old + * data. + */ + if (newFormatParam.allowEvolveFromProxy()) { + evolver.useEvolvedFormat(this, this, newFormatParam); + return true; + } + evolver.addEvolveError + (this, newFormatParam, null, + "A proxied class may not be changed to a different type"); + return false; + } + ProxiedFormat newFormat = (ProxiedFormat) newFormatParam; + if (!evolver.evolveFormat(proxyFormat)) { + return false; + } + Format newProxyFormat = proxyFormat.getLatestVersion(); + if (!newProxyFormat.getClassName().equals + (newFormat.getProxyClassName())) { + evolver.addEvolveError + (this, newFormat, null, + "The proxy class for this type has been changed from: " + + newProxyFormat.getClassName() + " to: " + + newFormat.getProxyClassName()); + return false; + } + if (newProxyFormat != proxyFormat) { + evolver.useEvolvedFormat(this, this, newFormat); + } else { + evolver.useOldFormat(this, newFormat); + } + return true; + } +} diff --git a/src/com/sleepycat/persist/impl/RawAbstractInput.java b/src/com/sleepycat/persist/impl/RawAbstractInput.java new file mode 100644 index 0000000..a7cbd4b --- /dev/null +++ b/src/com/sleepycat/persist/impl/RawAbstractInput.java @@ -0,0 +1,252 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.IdentityHashMap; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.raw.RawObject; + +/** + * Base class for EntityInput implementations that type-check RawObject + * instances and convert them to regular persistent objects, via the + * Format.convertRawObject method. + * + * The subclass implements readNext which should call checkAndConvert before + * returning the final value. + * + * @author Mark Hayes + */ +abstract class RawAbstractInput extends AbstractInput { + + private IdentityHashMap converted; + + RawAbstractInput(Catalog catalog, + boolean rawAccess, + IdentityHashMap converted) { + super(catalog, rawAccess); + this.converted = converted; + } + + public Object readObject() + throws RefreshException { + + return readNext(); + } + + public Object readKeyObject(Format format) + throws RefreshException { + + return readNext(); + } + + public Object readStringObject() + throws RefreshException { + + return readNext(); + } + + public void registerPriKeyObject(Object o) { + } + + public void registerPriStringKeyObject(Object o) { + } + + public int readArrayLength() { + throw DbCompat.unexpectedState(); + } + + public int readEnumConstant(String[] names) { + throw DbCompat.unexpectedState(); + } + + public void skipField(Format declaredFormat) { + } + + abstract Object readNext() + throws RefreshException; + + Object checkAndConvert(Object o, Format declaredFormat) + throws RefreshException { + + if (o == null) { + if (declaredFormat.isPrimitive()) { + throw new IllegalArgumentException + ("A primitive type may not be null or missing: " + + declaredFormat.getClassName()); + } + } else if (declaredFormat.isSimple()) { + if (declaredFormat.isPrimitive()) { + if (o.getClass() != + declaredFormat.getWrapperFormat().getType()) { + throw new IllegalArgumentException + ("Raw value class: " + o.getClass().getName() + + " must be the wrapper class for a primitive type: " + + declaredFormat.getClassName()); + } + } else { + if (o.getClass() != declaredFormat.getType()) { + throw new IllegalArgumentException + ("Raw value class: " + o.getClass().getName() + + " must be the declared class for a simple type: " + + declaredFormat.getClassName()); + } + } + } else { + if (o instanceof RawObject) { + Object o2 = null; + if (!rawAccess) { + if (converted != null) { + o2 = converted.get(o); + } else { + converted = new IdentityHashMap(); + } + } + if (o2 != null) { + o = o2; + } else { + if (!rawAccess) { + o = catalog.convertRawObject((RawObject) o, converted); + } + } + } else { + if (!SimpleCatalog.isSimpleType(o.getClass())) { + throw new IllegalArgumentException + ("Raw value class: " + o.getClass().getName() + + " must be RawObject a simple type"); + } + } + if (rawAccess) { + checkRawType(catalog, o, declaredFormat); + } else { + if (!declaredFormat.getType().isAssignableFrom(o.getClass())) { + throw new IllegalArgumentException + ("Raw value class: " + o.getClass().getName() + + " is not assignable to type: " + + declaredFormat.getClassName()); + } + } + } + return o; + } + + static Format checkRawType(Catalog catalog, + Object o, + Format declaredFormat) + throws RefreshException { + + assert declaredFormat != null; + Format format; + if (o instanceof RawObject) { + format = (Format) ((RawObject) o).getType(); + /* Ensure a fresh format is used, in case of Replica refresh. */ + format = catalog.getFormat(format.getId(), false /*expectStored*/); + } else { + format = catalog.getFormat(o.getClass(), + false /*checkEntitySubclassIndexes*/); + if (!format.isSimple() || format.isEnum()) { + throw new IllegalArgumentException + ("Not a RawObject or a non-enum simple type: " + + format.getClassName()); + } + } + if (!format.isAssignableTo(declaredFormat)) { + throw new IllegalArgumentException + ("Not a subtype of the field's declared class " + + declaredFormat.getClassName() + ": " + + format.getClassName()); + } + if (!format.isCurrentVersion()) { + throw new IllegalArgumentException + ("Raw type version is not current. Class: " + + format.getClassName() + " Version: " + + format.getVersion()); + } + Format proxiedFormat = format.getProxiedFormat(); + if (proxiedFormat != null) { + format = proxiedFormat; + } + return format; + } + + /* The following methods are a subset of the methods in TupleInput. */ + + public String readString() + throws RefreshException { + + return (String) readNext(); + } + + public char readChar() + throws RefreshException { + + return ((Character) readNext()).charValue(); + } + + public boolean readBoolean() + throws RefreshException { + + return ((Boolean) readNext()).booleanValue(); + } + + public byte readByte() + throws RefreshException { + + return ((Byte) readNext()).byteValue(); + } + + public short readShort() + throws RefreshException { + + return ((Short) readNext()).shortValue(); + } + + public int readInt() + throws RefreshException { + + return ((Integer) readNext()).intValue(); + } + + public long readLong() + throws RefreshException { + + return ((Long) readNext()).longValue(); + } + + public float readSortedFloat() + throws RefreshException { + + return ((Float) readNext()).floatValue(); + } + + public double readSortedDouble() + throws RefreshException { + + return ((Double) readNext()).doubleValue(); + } + + public BigDecimal readSortedBigDecimal() + throws RefreshException { + + return (BigDecimal) readNext(); + } + + public BigInteger readBigInteger() + throws RefreshException { + + return (BigInteger) readNext(); + } +} diff --git a/src/com/sleepycat/persist/impl/RawAccessor.java b/src/com/sleepycat/persist/impl/RawAccessor.java new file mode 100644 index 0000000..cbee0df --- /dev/null +++ b/src/com/sleepycat/persist/impl/RawAccessor.java @@ -0,0 +1,297 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.raw.RawObject; + +/** + * Implements Accessor for RawObject access. + * + * @author Mark Hayes + */ +class RawAccessor implements Accessor { + + private Format parentFormat; + private Accessor superAccessor; + private FieldInfo priKeyField; + private List secKeyFields; + private List nonKeyFields; + private boolean isCompositeKey; + + RawAccessor(Format parentFormat, + Accessor superAccessor, + FieldInfo priKeyField, + List secKeyFields, + List nonKeyFields) { + this.parentFormat = parentFormat; + this.superAccessor = superAccessor; + this.priKeyField = priKeyField; + this.secKeyFields = secKeyFields; + this.nonKeyFields = nonKeyFields; + } + + RawAccessor(Format parentFormat, + List nonKeyFields) { + this.parentFormat = parentFormat; + this.nonKeyFields = nonKeyFields; + secKeyFields = Collections.emptyList(); + isCompositeKey = true; + } + + public Object newInstance() { + RawObject superObject; + if (superAccessor != null) { + superObject = ((RawObject) superAccessor.newInstance()); + } else { + superObject = null; + } + return new RawObject + (parentFormat, new HashMap(), superObject); + } + + public Object newArray(int len) { + throw DbCompat.unexpectedState(); + } + + public boolean isPriKeyFieldNullOrZero(Object o) { + if (priKeyField != null) { + Object val = getValue(o, priKeyField); + Format format = priKeyField.getType(); + if (format.isPrimitive()) { + return ((Number) val).longValue() == 0L; + } else { + return val == null; + } + } else if (superAccessor != null) { + return superAccessor.isPriKeyFieldNullOrZero(getSuper(o)); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } + + public void writePriKeyField(Object o, EntityOutput output) + throws RefreshException { + + if (priKeyField != null) { + Object val = getValue(o, priKeyField); + Format format = priKeyField.getType(); + output.writeKeyObject(val, format); + } else if (superAccessor != null) { + superAccessor.writePriKeyField(getSuper(o), output); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } + + public void readPriKeyField(Object o, EntityInput input) + throws RefreshException { + + if (priKeyField != null) { + Format format = priKeyField.getType(); + Object val = input.readKeyObject(format); + setValue(o, priKeyField, val); + } else if (superAccessor != null) { + superAccessor.readPriKeyField(getSuper(o), input); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } + + public void writeSecKeyFields(Object o, EntityOutput output) + throws RefreshException { + + if (priKeyField != null && + !priKeyField.getType().isPrimitive() && + priKeyField.getType().getId() != Format.ID_STRING) { + output.registerPriKeyObject(getValue(o, priKeyField)); + } + if (superAccessor != null) { + superAccessor.writeSecKeyFields(getSuper(o), output); + } + for (int i = 0; i < secKeyFields.size(); i += 1) { + writeField(o, secKeyFields.get(i), output); + } + } + + public void readSecKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + if (priKeyField != null && + !priKeyField.getType().isPrimitive() && + priKeyField.getType().getId() != Format.ID_STRING) { + input.registerPriKeyObject(getValue(o, priKeyField)); + } else if (priKeyField != null && + priKeyField.getType().getId() == Format.ID_STRING) { + input.registerPriStringKeyObject(getValue(o, priKeyField)); + } + if (superLevel != 0 && superAccessor != null) { + superAccessor.readSecKeyFields + (getSuper(o), input, startField, endField, superLevel - 1); + } else { + if (superLevel > 0) { + throw DbCompat.unexpectedState("Super class does not exist"); + } + } + if (superLevel <= 0) { + for (int i = startField; + i <= endField && i < secKeyFields.size(); + i += 1) { + readField(o, secKeyFields.get(i), input); + } + } + } + + public void writeNonKeyFields(Object o, EntityOutput output) + throws RefreshException { + + if (superAccessor != null) { + superAccessor.writeNonKeyFields(getSuper(o), output); + } + for (int i = 0; i < nonKeyFields.size(); i += 1) { + writeField(o, nonKeyFields.get(i), output); + } + } + + public void readNonKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + if (superLevel != 0 && superAccessor != null) { + superAccessor.readNonKeyFields + (getSuper(o), input, startField, endField, superLevel - 1); + } else { + if (superLevel > 0) { + throw DbCompat.unexpectedState("Super class does not exist"); + } + } + if (superLevel <= 0) { + for (int i = startField; + i <= endField && i < nonKeyFields.size(); + i += 1) { + readField(o, nonKeyFields.get(i), input); + } + } + } + + public void writeCompositeKeyFields(Object o, EntityOutput output) + throws RefreshException { + + for (int i = 0; i < nonKeyFields.size(); i += 1) { + writeField(o, nonKeyFields.get(i), output); + } + } + + public void readCompositeKeyFields(Object o, EntityInput input) + throws RefreshException { + + for (int i = 0; i < nonKeyFields.size(); i += 1) { + readField(o, nonKeyFields.get(i), input); + } + } + + public Object getField(Object o, + int field, + int superLevel, + boolean isSecField) { + if (superLevel > 0) { + return superAccessor.getField + (getSuper(o), field, superLevel - 1, isSecField); + } + FieldInfo fld = + isSecField ? secKeyFields.get(field) : nonKeyFields.get(field); + return getValue(o, fld); + } + + public void setField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + if (superLevel > 0) { + superAccessor.setField + (getSuper(o), field, superLevel - 1, isSecField, value); + return; + } + FieldInfo fld = + isSecField ? secKeyFields.get(field) : nonKeyFields.get(field); + setValue(o, fld, value); + } + + public void setPriField(Object o, Object value) { + if (priKeyField != null) { + setValue(o, priKeyField, value); + } else if (superAccessor != null) { + superAccessor.setPriField(getSuper(o), value); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } + + private RawObject getSuper(Object o) { + return ((RawObject) o).getSuper(); + } + + private Object getValue(Object o, FieldInfo field) { + return ((RawObject) o).getValues().get(field.getName()); + } + + private void setValue(Object o, FieldInfo field, Object val) { + ((RawObject) o).getValues().put(field.getName(), val); + } + + private void writeField(Object o, FieldInfo field, EntityOutput output) + throws RefreshException { + + Object val = getValue(o, field); + Format format = field.getType(); + if (isCompositeKey || format.isPrimitive()) { + output.writeKeyObject(val, format); + } else if (format.getId() == Format.ID_STRING) { + output.writeString((String) val); + } else { + output.writeObject(val, format); + } + } + + private void readField(Object o, FieldInfo field, EntityInput input) + throws RefreshException { + + Format format = field.getType(); + Object val; + if (isCompositeKey || format.isPrimitive()) { + val = input.readKeyObject(format); + } else if (format.getId() == Format.ID_STRING) { + val = input.readStringObject(); + } else { + val = input.readObject(); + } + setValue(o, field, val); + } + + public FieldInfo getPriKeyField() { + return priKeyField; + } +} diff --git a/src/com/sleepycat/persist/impl/RawArrayInput.java b/src/com/sleepycat/persist/impl/RawArrayInput.java new file mode 100644 index 0000000..48ecc55 --- /dev/null +++ b/src/com/sleepycat/persist/impl/RawArrayInput.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.persist.raw.RawObject; + +import java.util.IdentityHashMap; + +/** + * Extends RawAbstractInput to convert array (ObjectArrayFormat and + * PrimitiveArrayteKeyFormat) RawObject instances. + * + * @author Mark Hayes + */ +class RawArrayInput extends RawAbstractInput { + + private Object[] array; + private int index; + private Format componentFormat; + + RawArrayInput(Catalog catalog, + boolean rawAccess, + IdentityHashMap converted, + RawObject raw, + Format componentFormat) { + super(catalog, rawAccess, converted); + array = raw.getElements(); + this.componentFormat = componentFormat; + } + + @Override + public int readArrayLength() { + return array.length; + } + + @Override + Object readNext() + throws RefreshException { + + Object o = array[index++]; + return checkAndConvert(o, componentFormat); + } +} diff --git a/src/com/sleepycat/persist/impl/RawComplexInput.java b/src/com/sleepycat/persist/impl/RawComplexInput.java new file mode 100644 index 0000000..eda85d7 --- /dev/null +++ b/src/com/sleepycat/persist/impl/RawComplexInput.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import com.sleepycat.persist.raw.RawObject; + +import java.util.IdentityHashMap; + +/** + * Extends RawAbstractInput to convert complex (ComplexFormat and + * CompositeKeyFormat) RawObject instances. + * + * @author Mark Hayes + */ +class RawComplexInput extends RawAbstractInput { + + private FieldInfo[] fields; + private RawObject[] objects; + private int index; + + RawComplexInput(Catalog catalog, + boolean rawAccess, + IdentityHashMap converted, + FieldInfo[] fields, + RawObject[] objects) { + super(catalog, rawAccess, converted); + this.fields = fields; + this.objects = objects; + } + + @Override + Object readNext() + throws RefreshException { + + RawObject raw = objects[index]; + FieldInfo field = fields[index]; + index += 1; + Format format = field.getType(); + Object o = raw.getValues().get(field.getName()); + return checkAndConvert(o, format); + } +} diff --git a/src/com/sleepycat/persist/impl/RawSingleInput.java b/src/com/sleepycat/persist/impl/RawSingleInput.java new file mode 100644 index 0000000..fab8998 --- /dev/null +++ b/src/com/sleepycat/persist/impl/RawSingleInput.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.IdentityHashMap; + +/** + * Extends RawAbstractInput to convert array (ObjectArrayFormat and + * PrimitiveArrayteKeyFormat) RawObject instances. + * + * @author Mark Hayes + */ +class RawSingleInput extends RawAbstractInput { + + private Object singleValue; + private Format declaredFormat; + + RawSingleInput(Catalog catalog, + boolean rawAccess, + IdentityHashMap converted, + Object singleValue, + Format declaredFormat) { + super(catalog, rawAccess, converted); + this.singleValue = singleValue; + this.declaredFormat = declaredFormat; + } + + @Override + Object readNext() + throws RefreshException { + + return checkAndConvert(singleValue, declaredFormat); + } +} diff --git a/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java b/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java new file mode 100644 index 0000000..d12e6ed --- /dev/null +++ b/src/com/sleepycat/persist/impl/ReadOnlyCatalog.java @@ -0,0 +1,104 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.raw.RawObject; + +/** + * Read-only catalog operations used when initializing new formats. This + * catalog is used temprarily when the main catalog has not been updated yet, + * but the new formats need to do catalog lookups. + * + * @see PersistCatalog#addNewFormat + * + * @author Mark Hayes + */ +class ReadOnlyCatalog implements Catalog { + + private final ClassLoader classLoader; + private List formatList; + private Map formatMap; + + ReadOnlyCatalog(ClassLoader classLoader, + List formatList, + Map formatMap) { + this.classLoader = classLoader; + this.formatList = formatList; + this.formatMap = formatMap; + } + + public int getInitVersion(Format format, boolean forReader) { + return Catalog.CURRENT_VERSION; + } + + public Format getFormat(int formatId, boolean expectStored) { + try { + Format format = formatList.get(formatId); + if (format == null) { + throw DbCompat.unexpectedState + ("Format does not exist: " + formatId); + } + return format; + } catch (NoSuchElementException e) { + throw DbCompat.unexpectedState + ("Format does not exist: " + formatId); + } + } + + public Format getFormat(Class cls, boolean checkEntitySubclassIndexes) { + Format format = formatMap.get(cls.getName()); + if (format == null) { + throw new IllegalArgumentException + ("Class is not persistent: " + cls.getName()); + } + return format; + } + + public Format getFormat(String className) { + return formatMap.get(className); + } + + public Format createFormat(String clsName, + Map newFormats) { + throw DbCompat.unexpectedState(); + } + + public Format createFormat(Class type, Map newFormats) { + throw DbCompat.unexpectedState(); + } + + public boolean isRawAccess() { + return false; + } + + public Object convertRawObject(RawObject o, IdentityHashMap converted) { + throw DbCompat.unexpectedState(); + } + + public Class resolveClass(String clsName) + throws ClassNotFoundException { + + return SimpleCatalog.resolveClass(clsName, classLoader); + } + + public Class resolveKeyClass(String clsName) { + return SimpleCatalog.resolveKeyClass(clsName, classLoader); + } +} diff --git a/src/com/sleepycat/persist/impl/Reader.java b/src/com/sleepycat/persist/impl/Reader.java new file mode 100644 index 0000000..36a5297 --- /dev/null +++ b/src/com/sleepycat/persist/impl/Reader.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.io.Serializable; + +import com.sleepycat.persist.model.EntityModel; + +/** + * Interface to the "read object" methods of the Format class. For the + * latest version format, the Format object provides the implementation of + * these methods. For an older version format, an evolver object implements + * this interface to convert from the old to new format. + * + * See {@link Format} for a description of each method. + * @author Mark Hayes + */ +interface Reader extends Serializable { + + void initializeReader(Catalog catalog, + EntityModel model, + int initVersion, + Format oldFormat); + + Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException; + + void readPriKey(Object o, EntityInput input, boolean rawAccess) + throws RefreshException; + + Object readObject(Object o, EntityInput input, boolean rawAccess) + throws RefreshException; + + Accessor getAccessor(boolean rawAccess); +} diff --git a/src/com/sleepycat/persist/impl/RecordInput.java b/src/com/sleepycat/persist/impl/RecordInput.java new file mode 100644 index 0000000..9244ebd --- /dev/null +++ b/src/com/sleepycat/persist/impl/RecordInput.java @@ -0,0 +1,346 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.je.DatabaseEntry; + +/** + * Implements EntityInput to read record key-data pairs. Extends TupleInput to + * implement the subset of TupleInput methods that are defined in the + * EntityInput interface. + * + * @author Mark Hayes + */ +class RecordInput extends TupleInput implements EntityInput { + + /* Initial size of visited map. */ + static final int VISITED_INIT_SIZE = 50; + + /* + * Offset to indicate that the visited object is stored in the primary key + * byte array. + */ + static final int PRI_KEY_VISITED_OFFSET = Integer.MAX_VALUE - 1; + + /* Used by RecordOutput to prevent illegal nested references. */ + static final int PROHIBIT_REF_OFFSET = Integer.MAX_VALUE - 2; + + /* Used by RecordInput to prevent illegal nested references. */ + static final Object PROHIBIT_REF_OBJECT = new Object(); + + static final String PROHIBIT_NESTED_REF_MSG = + "Cannot embed a reference to a proxied object in the proxy; for " + + "example, a collection may not be an element of the collection " + + "because collections are proxied"; + + private Catalog catalog; + private boolean rawAccess; + private Map visited; + private DatabaseEntry priKeyEntry; + private int priKeyFormatId; + private boolean newStringFormat; + + /** + * Creates a new input with a empty/null visited map. + */ + RecordInput(Catalog catalog, + boolean rawAccess, + DatabaseEntry priKeyEntry, + int priKeyFormatId, + byte[] buffer, + int offset, + int length) { + super(buffer, offset, length); + this.catalog = catalog; + this.rawAccess = rawAccess; + this.priKeyEntry = priKeyEntry; + this.priKeyFormatId = priKeyFormatId; + this.visited = new HashMap(VISITED_INIT_SIZE); + } + + /** + * Copy contructor where a new offset can be specified. + */ + private RecordInput(RecordInput other, int offset) { + this(other.catalog, other.rawAccess, other.priKeyEntry, + other.priKeyFormatId, other.buf, offset, other.len); + visited = other.visited; + } + + /** + * Copy contructor where a DatabaseEntry can be specified. + */ + private RecordInput(RecordInput other, DatabaseEntry entry) { + this(other.catalog, other.rawAccess, other.priKeyEntry, + other.priKeyFormatId, entry.getData(), entry.getOffset(), + entry.getSize()); + visited = other.visited; + } + + /** + * @see EntityInput#getCatalog + */ + public Catalog getCatalog() { + return catalog; + } + + /** + * @see EntityInput#isRawAccess + */ + public boolean isRawAccess() { + return rawAccess; + } + + /** + * @see EntityInput#setRawAccess + */ + public boolean setRawAccess(boolean rawAccessParam) { + boolean original = rawAccess; + rawAccess = rawAccessParam; + return original; + } + + /** + * @see EntityInput#readObject + */ + public Object readObject() + throws RefreshException { + + /* Save the current offset before reading the format ID. */ + Integer visitedOffset = off; + RecordInput useInput = this; + int formatId = readPackedInt(); + Object o = null; + + /* For a zero format ID, return a null instance. */ + if (formatId == Format.ID_NULL) { + return null; + } + + /* For a negative format ID, lookup an already visited instance. */ + if (formatId < 0) { + int offset = (-(formatId + 1)); + o = visited.get(offset); + if (o == RecordInput.PROHIBIT_REF_OBJECT) { + throw new IllegalArgumentException + (RecordInput.PROHIBIT_NESTED_REF_MSG); + } + if (o != null) { + /* Return a previously visited object. */ + return o; + } else { + + /* + * When reading starts from a non-zero offset, we may have to + * go back in the stream and read the referenced object. This + * happens when reading secondary key fields. + */ + visitedOffset = offset; + if (offset == RecordInput.PRI_KEY_VISITED_OFFSET) { + assert priKeyEntry != null && priKeyFormatId > 0; + useInput = new RecordInput(this, priKeyEntry); + formatId = priKeyFormatId; + } else { + useInput = new RecordInput(this, offset); + formatId = useInput.readPackedInt(); + } + } + } + + /* + * Add a visted object slot that prohibits nested references to this + * object during the call to Reader.newInstance below. The newInstance + * method is allowed to read nested fields (in which case + * Reader.readObject further below does nothing) under certain + * conditions, but under these conditions we do not support nested + * references to the parent object. [#15815] + */ + visited.put(visitedOffset, RecordInput.PROHIBIT_REF_OBJECT); + + /* Create the object using the format indicated. */ + Format format = catalog.getFormat(formatId, true /*expectStored*/); + Reader reader = format.getReader(); + o = reader.newInstance(useInput, rawAccess); + + /* + * Set the newly created object in the map of visited objects. This + * must be done before calling Reader.readObject, which allows the + * object to contain a reference to itself. + */ + visited.put(visitedOffset, o); + + /* + * Finish reading the object. Then replace it in the visited map in + * case a converted object is returned by readObject. + */ + Object o2 = reader.readObject(o, useInput, rawAccess); + if (o != o2) { + visited.put(visitedOffset, o2); + } + return o2; + } + + /** + * @see EntityInput#readKeyObject + */ + public Object readKeyObject(Format format) + throws RefreshException { + + /* Create and read the object using the given key format. */ + Reader reader = format.getReader(); + Object o = reader.newInstance(this, rawAccess); + return reader.readObject(o, this, rawAccess); + } + + /** + * @see EntityInput#readStringObject + */ + public Object readStringObject() + throws RefreshException { + + if (!newStringFormat) { + return readObject(); + } + return readString(); + } + + /** + * Called when copying secondary keys, for an input that is positioned on + * the secondary key field. Handles references to previously occurring + * objects, returning a different RecordInput than this one if appropriate. + */ + KeyLocation getKeyLocation(Format fieldFormat) + throws RefreshException { + + RecordInput input = this; + if (fieldFormat.getId() == Format.ID_STRING && newStringFormat) { + + /* + * In new JE version, we do not store format ID for String data, + * So we have to read the real String data to see if the String + * data is null or not. [#19247] + */ + int len = input.getStringByteLength(); + String strKey = input.readString(); + input.skipFast(0 - len); + if(strKey == null) { + /* String key field is null. */ + return null; + } + } else if (!fieldFormat.isPrimitive()) { + int formatId = input.readPackedInt(); + if (formatId == Format.ID_NULL) { + /* Key field is null. */ + return null; + } + if (formatId < 0) { + int offset = (-(formatId + 1)); + if (offset == RecordInput.PRI_KEY_VISITED_OFFSET) { + assert priKeyEntry != null && priKeyFormatId > 0; + input = new RecordInput(this, priKeyEntry); + formatId = priKeyFormatId; + } else { + input = new RecordInput(this, offset); + formatId = input.readPackedInt(); + } + } + fieldFormat = catalog.getFormat(formatId, true /*expectStored*/); + } + /* Key field is non-null. */ + return new KeyLocation(input, fieldFormat); + } + + /** + * @see EntityInput#registerPriKeyObject + */ + public void registerPriKeyObject(Object o) { + + /* + * PRI_KEY_VISITED_OFFSET is used as the visited offset to indicate + * that the visited object is stored in the primary key byte array. + */ + visited.put(RecordInput.PRI_KEY_VISITED_OFFSET, o); + } + + /** + * @see EntityInput#registerPriKeyObject + */ + public void registerPriStringKeyObject(Object o) { + + /* + * In JE 5.0 and later, String is treated as a primitive type, so a + * String object does not need to be registered. But in earlier + * versions, Strings are treated as any other object and must be + * registered. [#19247] + */ + if (!newStringFormat) { + visited.put(RecordInput.PRI_KEY_VISITED_OFFSET, o); + } + } + + /** + * Registers the top level entity before reading it, to allow nested fields + * to reference their parent entity. [#17525] + */ + public void registerEntity(Object entity, int initialOffset) { + visited.put(initialOffset, entity); + } + + /** + * Registers the entity format before reading it, so that old-format String + * fields can be read properly. [#19247] + */ + public void registerEntityFormat(Format entityFormat) { + newStringFormat = entityFormat.getNewStringFormat(); + } + + /** + * @see EntityInput#skipField + */ + public void skipField(Format declaredFormat) + throws RefreshException { + + if (declaredFormat != null && declaredFormat.isPrimitive()) { + declaredFormat.skipContents(this); + } else if (declaredFormat != null && + declaredFormat.getId() == Format.ID_STRING && + newStringFormat) { + + /* + * In the new JE version, we treat String as primitive, and will + * not store format ID for String data. [#19247] + */ + declaredFormat.skipContents(this); + } else { + int formatId = readPackedInt(); + if (formatId > 0) { + Format format = + catalog.getFormat(formatId, true /*expectStored*/); + format.skipContents(this); + } + } + } + + public int readArrayLength() { + return readPackedInt(); + } + + public int readEnumConstant(String[] names) { + return readPackedInt(); + } +} diff --git a/src/com/sleepycat/persist/impl/RecordOutput.java b/src/com/sleepycat/persist/impl/RecordOutput.java new file mode 100644 index 0000000..765a36d --- /dev/null +++ b/src/com/sleepycat/persist/impl/RecordOutput.java @@ -0,0 +1,196 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.IdentityHashMap; +import java.util.Map; + +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.persist.raw.RawObject; + +/** + * Implements EntityOutput to write record key-data pairs. Extends TupleOutput + * to implement the subset of TupleOutput methods that are defined in the + * EntityOutput interface. + * + * @author Mark Hayes + */ +class RecordOutput extends TupleOutput implements EntityOutput { + + private Catalog catalog; + private boolean rawAccess; + private Map visited; + + /** + * Creates a new output with an empty/null visited map. + */ + RecordOutput(Catalog catalog, boolean rawAccess) { + + super(); + this.catalog = catalog; + this.rawAccess = rawAccess; + this.visited = new IdentityHashMap(); + } + + /** + * @see EntityOutput#writeObject + */ + public void writeObject(Object o, Format fieldFormat) + throws RefreshException { + + /* For a null instance, write a zero format ID. */ + if (o == null) { + writePackedInt(Format.ID_NULL); + return; + } + + /* + * For an already visited instance, output a reference to it. The + * reference is the negation of the visited offset minus one. + */ + Integer offset = visited.get(o); + if (offset != null) { + if (offset == RecordInput.PROHIBIT_REF_OFFSET) { + throw new IllegalArgumentException + (RecordInput.PROHIBIT_NESTED_REF_MSG); + } else { + writePackedInt(-(offset + 1)); + return; + } + } + + /* + * Get and validate the format. Catalog.getFormat(Class) throws + * IllegalArgumentException if the class is not persistent. We don't + * need to check the fieldFormat (and it will be null) for non-raw + * access because field type checking is enforced by Java. + */ + Format format; + if (rawAccess) { + format = RawAbstractInput.checkRawType(catalog, o, fieldFormat); + } else { + + /* + * Do not attempt to open subclass indexes in case this is an + * embedded entity. We will detect that error below, but we must + * not fail first when attempting to open the secondaries. + */ + format = catalog.getFormat + (o.getClass(), false /*checkEntitySubclassIndexes*/); + } + if (format.getProxiedFormat() != null) { + throw new IllegalArgumentException + ("May not store proxy classes directly: " + + format.getClassName()); + } + /* Check for embedded entity classes and subclasses. */ + if (format.getEntityFormat() != null) { + throw new IllegalArgumentException + ("References to entities are not allowed: " + + o.getClass().getName()); + } + + /* + * Remember that we visited this instance. Certain formats + * (ProxiedFormat for example) prohibit nested fields that reference + * the parent object. [#15815] + */ + boolean prohibitNestedRefs = format.areNestedRefsProhibited(); + Integer visitedOffset = size(); + visited.put(o, prohibitNestedRefs ? RecordInput.PROHIBIT_REF_OFFSET : + visitedOffset); + + /* Finally, write the formatId and object value. */ + writePackedInt(format.getId()); + format.writeObject(o, this, rawAccess); + + /* Always allow references from siblings that follow. */ + if (prohibitNestedRefs) { + visited.put(o, visitedOffset); + } + } + + /** + * @see EntityOutput#writeKeyObject + */ + public void writeKeyObject(Object o, Format fieldFormat) + throws RefreshException { + + /* Key objects must not be null and must be of the declared class. */ + if (o == null) { + throw new IllegalArgumentException + ("Key field object may not be null"); + } + Format format; + if (rawAccess) { + if (o instanceof RawObject) { + format = (Format) ((RawObject) o).getType(); + } else { + format = catalog.getFormat + (o.getClass(), false /*checkEntitySubclassIndexes*/); + /* Expect primitive wrapper class in raw mode. */ + if (fieldFormat.isPrimitive()) { + fieldFormat = fieldFormat.getWrapperFormat(); + } + } + } else { + format = catalog.getFormat(o.getClass(), + false /*checkEntitySubclassIndexes*/); + } + if (fieldFormat != format) { + throw new IllegalArgumentException + ("The key field object class (" + o.getClass().getName() + + ") must be the field's declared class: " + + fieldFormat.getClassName()); + } + + /* Write the object value (no formatId is written for keys). */ + fieldFormat.writeObject(o, this, rawAccess); + } + + /** + * @see EntityOutput#registerPriKeyObject + */ + public void registerPriKeyObject(Object o) { + + /* + * PRI_KEY_VISITED_OFFSET is used as the visited offset to indicate + * that the visited object is stored in the primary key byte array. + */ + visited.put(o, RecordInput.PRI_KEY_VISITED_OFFSET); + } + + /** + * Registers the top level entity before writing it, to allow nested fields + * to reference their parent entity. [#17525] + */ + public void registerEntity(Object entity) { + assert size() == 0; + visited.put(entity, size()); + } + + /** + * @see EntityOutput#writeArrayLength + */ + public void writeArrayLength(int length) { + writePackedInt(length); + } + + /** + * @see EntityOutput#writeEnumConstant + */ + public void writeEnumConstant(String[] names, int index) { + writePackedInt(index); + } +} diff --git a/src/com/sleepycat/persist/impl/ReflectionAccessor.java b/src/com/sleepycat/persist/impl/ReflectionAccessor.java new file mode 100644 index 0000000..3a03a52 --- /dev/null +++ b/src/com/sleepycat/persist/impl/ReflectionAccessor.java @@ -0,0 +1,537 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.AccessibleObject; +import java.lang.reflect.Array; +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Modifier; +import java.util.List; + +import com.sleepycat.compat.DbCompat; + +/** + * Implements Accessor using reflection. + * + * @author Mark Hayes + */ +class ReflectionAccessor implements Accessor { + + private static final FieldAccess[] EMPTY_KEYS = {}; + + private Class type; + private Accessor superAccessor; + private Constructor constructor; + private FieldAccess priKey; + private FieldAccess[] secKeys; + private FieldAccess[] nonKeys; + + private ReflectionAccessor(Class type, Accessor superAccessor) { + this.type = type; + this.superAccessor = superAccessor; + try { + constructor = type.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw DbCompat.unexpectedState(type.getName()); + } + if (!Modifier.isPublic(type.getModifiers()) || + !Modifier.isPublic(constructor.getModifiers())) { + setAccessible(constructor, type.getName() + "()"); + } + } + + /** + * Creates an accessor for a complex type. + */ + ReflectionAccessor(Catalog catalog, + Class type, + Accessor superAccessor, + FieldInfo priKeyField, + List secKeyFields, + List nonKeyFields) { + this(type, superAccessor); + if (priKeyField != null) { + priKey = getField(catalog, priKeyField, + true /*isRequiredKeyField*/); + } else { + priKey = null; + } + if (secKeyFields.size() > 0) { + secKeys = getFields(catalog, secKeyFields, + false /*isRequiredKeyField*/); + } else { + secKeys = EMPTY_KEYS; + } + if (nonKeyFields.size() > 0) { + nonKeys = getFields(catalog, nonKeyFields, + false /*isRequiredKeyField*/); + } else { + nonKeys = EMPTY_KEYS; + } + } + + /** + * Creates an accessor for a composite key type. + */ + ReflectionAccessor(Catalog catalog, + Class type, + List fieldInfos) { + this(type, null); + priKey = null; + secKeys = EMPTY_KEYS; + nonKeys = getFields(catalog, fieldInfos, true /*isRequiredKeyField*/); + } + + private FieldAccess[] getFields(Catalog catalog, + List fieldInfos, + boolean isRequiredKeyField) { + int index = 0; + FieldAccess[] fields = new FieldAccess[fieldInfos.size()]; + for (FieldInfo info : fieldInfos) { + fields[index] = getField(catalog, info, isRequiredKeyField); + index += 1; + } + return fields; + } + + private FieldAccess getField(Catalog catalog, + FieldInfo fieldInfo, + boolean isRequiredKeyField) { + Field field; + try { + field = type.getDeclaredField(fieldInfo.getName()); + } catch (NoSuchFieldException e) { + throw DbCompat.unexpectedException(e); + } + if (!Modifier.isPublic(type.getModifiers()) || + !Modifier.isPublic(field.getModifiers())) { + setAccessible(field, field.getName()); + } + Class fieldCls = field.getType(); + if (fieldCls.isPrimitive()) { + assert SimpleCatalog.isSimpleType(fieldCls); + return new PrimitiveAccess + (field, (SimpleFormat) catalog.getFormat(fieldCls.getName())); + } else if (isRequiredKeyField) { + Format format = catalog.getFormat(fieldInfo.getClassName()); + assert format != null; + return new KeyObjectAccess(field, format); + } else if (fieldCls == String.class) { + return new StringAccess(field); + } else { + return new ObjectAccess(field); + } + } + + private void setAccessible(AccessibleObject object, String memberName) { + try { + object.setAccessible(true); + } catch (SecurityException e) { + throw new IllegalStateException + ("Unable to access non-public member: " + + type.getName() + '.' + memberName + + ". Please configure the Java Security Manager setting: " + + " ReflectPermission suppressAccessChecks", e); + } + } + + public Object newInstance() { + try { + return constructor.newInstance(); + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } catch (InstantiationException e) { + throw DbCompat.unexpectedException(e); + } catch (InvocationTargetException e) { + throw DbCompat.unexpectedException(e); + } + } + + public Object newArray(int len) { + return Array.newInstance(type, len); + } + + public boolean isPriKeyFieldNullOrZero(Object o) { + try { + if (priKey != null) { + return priKey.isNullOrZero(o); + } else if (superAccessor != null) { + return superAccessor.isPriKeyFieldNullOrZero(o); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void writePriKeyField(Object o, EntityOutput output) + throws RefreshException { + + try { + if (priKey != null) { + priKey.write(o, output); + } else if (superAccessor != null) { + superAccessor.writePriKeyField(o, output); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void readPriKeyField(Object o, EntityInput input) + throws RefreshException { + + try { + if (priKey != null) { + priKey.read(o, input); + } else if (superAccessor != null) { + superAccessor.readPriKeyField(o, input); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void writeSecKeyFields(Object o, EntityOutput output) + throws RefreshException { + + try { + + /* + * In JE 5.0, String is treated as primitive type, so String does + * not need to be registered. [#19247] + */ + if (priKey != null && !priKey.isPrimitive && !priKey.isString) { + output.registerPriKeyObject(priKey.field.get(o)); + } + if (superAccessor != null) { + superAccessor.writeSecKeyFields(o, output); + } + for (int i = 0; i < secKeys.length; i += 1) { + secKeys[i].write(o, output); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void readSecKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + try { + if (priKey != null && !priKey.isPrimitive && !priKey.isString) { + input.registerPriKeyObject(priKey.field.get(o)); + } else if (priKey != null && priKey.isString) { + input.registerPriStringKeyObject(priKey.field.get(o)); + } + if (superLevel != 0 && superAccessor != null) { + superAccessor.readSecKeyFields + (o, input, startField, endField, superLevel - 1); + } else { + if (superLevel > 0) { + throw DbCompat.unexpectedState + ("Superclass does not exist"); + } + } + if (superLevel <= 0) { + for (int i = startField; + i <= endField && i < secKeys.length; + i += 1) { + secKeys[i].read(o, input); + } + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void writeNonKeyFields(Object o, EntityOutput output) + throws RefreshException { + + try { + if (superAccessor != null) { + superAccessor.writeNonKeyFields(o, output); + } + for (int i = 0; i < nonKeys.length; i += 1) { + nonKeys[i].write(o, output); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void readNonKeyFields(Object o, + EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + try { + if (superLevel != 0 && superAccessor != null) { + superAccessor.readNonKeyFields + (o, input, startField, endField, superLevel - 1); + } else { + if (superLevel > 0) { + throw DbCompat.unexpectedState + ("Superclass does not exist"); + } + } + if (superLevel <= 0) { + for (int i = startField; + i <= endField && i < nonKeys.length; + i += 1) { + nonKeys[i].read(o, input); + } + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void writeCompositeKeyFields(Object o, EntityOutput output) + throws RefreshException { + + try { + for (int i = 0; i < nonKeys.length; i += 1) { + nonKeys[i].write(o, output); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void readCompositeKeyFields(Object o, EntityInput input) + throws RefreshException { + + try { + for (int i = 0; i < nonKeys.length; i += 1) { + nonKeys[i].read(o, input); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public Object getField(Object o, + int field, + int superLevel, + boolean isSecField) { + if (superLevel > 0) { + return superAccessor.getField + (o, field, superLevel - 1, isSecField); + } + try { + Field fld = + isSecField ? secKeys[field].field : nonKeys[field].field; + return fld.get(o); + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void setField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + if (superLevel > 0) { + superAccessor.setField + (o, field, superLevel - 1, isSecField, value); + return; + } + try { + Field fld = + isSecField ? secKeys[field].field : nonKeys[field].field; + fld.set(o, value); + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + public void setPriField(Object o, Object value) { + try { + if (priKey != null) { + priKey.field.set(o, value); + } else if (superAccessor != null) { + superAccessor.setPriField(o, value); + } else { + throw DbCompat.unexpectedState("No primary key field"); + } + } catch (IllegalAccessException e) { + throw DbCompat.unexpectedException(e); + } + } + + /** + * Abstract base class for field access classes. + */ + private static abstract class FieldAccess { + + Field field; + boolean isPrimitive; + boolean isString = false; + + FieldAccess(Field field) { + this.field = field; + isPrimitive = field.getType().isPrimitive(); + isString = + field.getType().getName().equals(String.class.getName()); + } + + /** + * Writes a field. + */ + abstract void write(Object o, EntityOutput out) + throws IllegalAccessException, RefreshException; + + /** + * Reads a field. + */ + abstract void read(Object o, EntityInput in) + throws IllegalAccessException, RefreshException; + + /** + * Returns whether a field is null (for reference types) or zero (for + * primitive integer types). This implementation handles the reference + * types. + */ + boolean isNullOrZero(Object o) + throws IllegalAccessException { + + return field.get(o) == null; + } + } + + /** + * Access for fields with object types. + */ + private static class ObjectAccess extends FieldAccess { + + ObjectAccess(Field field) { + super(field); + } + + @Override + void write(Object o, EntityOutput out) + throws IllegalAccessException, RefreshException { + + out.writeObject(field.get(o), null); + } + + @Override + void read(Object o, EntityInput in) + throws IllegalAccessException, RefreshException { + + field.set(o, in.readObject()); + } + } + + /** + * Access for primary key fields and composite key fields with object + * types. + */ + private static class KeyObjectAccess extends FieldAccess { + + private Format format; + + KeyObjectAccess(Field field, Format format) { + super(field); + this.format = format; + } + + @Override + void write(Object o, EntityOutput out) + throws IllegalAccessException, RefreshException { + + out.writeKeyObject(field.get(o), format); + } + + @Override + void read(Object o, EntityInput in) + throws IllegalAccessException, RefreshException { + + field.set(o, in.readKeyObject(format)); + } + } + + /** + * Access for String fields, that are not primary key fields or composite + * key fields with object types. + */ + private static class StringAccess extends FieldAccess { + StringAccess(Field field) { + super(field); + } + + @Override + void write(Object o, EntityOutput out) + throws IllegalAccessException, RefreshException { + + out.writeString((String) field.get(o)); + } + + @Override + void read(Object o, EntityInput in) + throws IllegalAccessException, RefreshException { + + field.set(o, in.readStringObject()); + } + } + + /** + * Access for fields with primitive types. + */ + private static class PrimitiveAccess extends FieldAccess { + + private SimpleFormat format; + + PrimitiveAccess(Field field, SimpleFormat format) { + super(field); + this.format = format; + } + + @Override + void write(Object o, EntityOutput out) + throws IllegalAccessException { + + format.writePrimitiveField(o, out, field); + } + + @Override + void read(Object o, EntityInput in) + throws IllegalAccessException, RefreshException { + + format.readPrimitiveField(o, in, field); + } + + @Override + boolean isNullOrZero(Object o) + throws IllegalAccessException { + + return field.getLong(o) == 0; + } + } +} diff --git a/src/com/sleepycat/persist/impl/RefreshException.java b/src/com/sleepycat/persist/impl/RefreshException.java new file mode 100644 index 0000000..8c9c704 --- /dev/null +++ b/src/com/sleepycat/persist/impl/RefreshException.java @@ -0,0 +1,142 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +/** + * Thrown and handled internally when metadata must be refreshed on a Replica. + * + * There are several scenarios for refreshing DPL metadata: + * + * 1. Read entity record on Replica that has stale in-memory metadata. + * + * When an entity record that references new metadata (for example, a never + * before encountered class) is written on the Master, the metadata is + * written and replicated prior to writing and replicating the entity + * record. However, the Replica's in-memory cache of metadata (the + * PersistCatalog object) is not synchronously updated when the metadata is + * replicated. When the entity record that references the newly replicated + * metadata is read on the Replica, the DPL must refresh the in-memory + * metadata cache by reading it from the catalog database. + * + * Note that this scenario occurs even without class evolution/upgrade, for + * two reasons. First, the Master does not write all metadata at once; + * metadata is added to the catalog incrementally as new persistent classes + * are encountered. Second, even when all metadata is written intially by + * the Master, the Replica may read the catalog before the Master has + * completed metadata updates. + * + * Implementation: + * + PersistCatalog.getFormat(int) throws RefreshException when the given + * format ID is not in the in-memory catalog. + * + The binding method that is calling getFormat catches RefreshException, + * calls RefreshException.refresh to read the updated metadata, and + * retries the operation. + * + * Tests: + * c.s.je.rep.persist.test.UpgradeTest.testIncrementalMetadataChanges + * c.s.je.rep.persist.test.UpgradeTest.testUpgrade + * + * 2. Write entity record on Master that is in Replica Upgrade Mode. + * + * When a Replica is upgraded with new persistent classes (see + * evolve/package.html doc) the DPL will evolve the existing metadata and + * update the in-memory metadata cache (PersistCatalog object), but will not + * write the metadata; instead, it will enter Replica Upgrade Mode. In this + * mode, the Replica will convert old format data to new format data as + * records are read, using the in-memory evolved metadata. This allows the + * Replica application to perform entity read operations with the new + * persistent classes, during the upgrade process. + * + * When this Replica is elected Master, the application will begin writing + * entity records. Note that the new metadata has not yet been written to + * the catalog database. In Replica Upgrade Mode, the current in-memory + * metadata cannot be written to disk, since the catalog database may be + * stale, i.e., it have been updated by the Master after the Replica's + * in-memory metadata was evolved. Therefore, before the first entity + * record is written, the newly elected Master must read the latest + * metadata, perform evolution of the metadata again, write the metadata, + * and then write the entity record. + * + * Implementation: + * + The catalog enters Replica Upgrade Mode when a new or evolved format is + * added to the catalog, and a ReplicaWriteException occurs when + * attempting to write the metadata. Replica Upgrade Mode is defined as + * when the number of in-memory formats is greater than the number of + * stored formats. + * + Before an entity record is written, PersistEntityBinding.objectToData + * is called to convert the entity object to record data. + * + objectToData calls PersistCatalog.checkWriteInReplicaUpgradeMode, + * which throws RefreshExeption in Replica Upgrade Mode. + * + objectToData catches RefreshException, calls RefreshException.refresh + * to read the updated metadata, and retries the operation. + * + * Tests: + * c.s.je.rep.persist.test.UpgradeTest.testElectedMasterWithStaleMetadata + * c.s.je.rep.persist.test.UpgradeTest.testRefreshAfterFirstWrite + * c.s.je.rep.persist.test.UpgradeTest.testUpgrade + * + * 3. Write metadata on Master that is in Replica Upgrade Mode. + * + * This third scenario is more unusual than the first two. It occurs when + * a Replica with stale metadata is elected Master, but is not in Replica + * Upgrade Mode. The new Master must refresh metadata before writing + * metadata. See the test case for more information. + * + * Implementation: + * + On a Master with stale metadata, the application tries to write an + * entity record that refers to a class that has not been encountered + * before. + * + Before the entity record is written, PersistEntityBinding.objectToData + * is called to convert the entity object to record data. + * + objectToData calls PersistCatalog.addNewFormat during serialization, + * which attempts to write metadata by by calling writeDataCheckStale. + * + writeDataCheckStale reads the existing metadata and detects that it + * has changed since metadata was last read, and throws RefreshExeption. + * + objectToData catches RefreshException, calls RefreshException.refresh + * to read the updated metadata, and retries the operation. + * + * Tests: + * c.s.je.rep.persist.test.UpgradeTest.testRefreshBeforeWrite + */ +public class RefreshException extends Exception { + + private final Store store; + private final PersistCatalog catalog; + private final int formatId; + + RefreshException(final Store store, + final PersistCatalog catalog, + final int formatId) { + this.store = store; + this.catalog = catalog; + this.formatId = formatId; + } + + /** + * This method must be called to handle this exception in the binding + * methods, after the stack has unwound. The binding methods should retry + * the operation once after calling this method. If the operation fails + * again, then corruption rather than stale metadata is the likely cause + * of the problem, and an exception will be thrown to that effect. + * [#16655] + */ + public PersistCatalog refresh() { + return store.refresh(catalog, formatId, this); + } + + @Override + public String getMessage() { + return "formatId=" + formatId; + } +} diff --git a/src/com/sleepycat/persist/impl/SimpleCatalog.java b/src/com/sleepycat/persist/impl/SimpleCatalog.java new file mode 100644 index 0000000..2716696 --- /dev/null +++ b/src/com/sleepycat/persist/impl/SimpleCatalog.java @@ -0,0 +1,276 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.util.ClassResolver; + +/** + * A static catalog containing simple types only. Once created, this catalog + * is immutable. + * + * For bindings accessed by a PersistComparator during recovery, the + * SimpleCatalog provides formats for all simple types. To reduce redundant + * format objects, the SimpleCatalog's formats are copied when creating a + * regular PersistCatalog. + * + * This class also contains utility methods for dealing with primitives. + * + * @author Mark Hayes + */ +public class SimpleCatalog implements Catalog { + + private static final Map keywordToPrimitive; + static { + keywordToPrimitive = new HashMap(8); + keywordToPrimitive.put("boolean", Boolean.TYPE); + keywordToPrimitive.put("char", Character.TYPE); + keywordToPrimitive.put("byte", Byte.TYPE); + keywordToPrimitive.put("short", Short.TYPE); + keywordToPrimitive.put("int", Integer.TYPE); + keywordToPrimitive.put("long", Long.TYPE); + keywordToPrimitive.put("float", Float.TYPE); + keywordToPrimitive.put("double", Double.TYPE); + } + + private static final Map primitiveTypeToWrapper; + static { + primitiveTypeToWrapper = new HashMap(8); + primitiveTypeToWrapper.put(Boolean.TYPE, Boolean.class); + primitiveTypeToWrapper.put(Character.TYPE, Character.class); + primitiveTypeToWrapper.put(Byte.TYPE, Byte.class); + primitiveTypeToWrapper.put(Short.TYPE, Short.class); + primitiveTypeToWrapper.put(Integer.TYPE, Integer.class); + primitiveTypeToWrapper.put(Long.TYPE, Long.class); + primitiveTypeToWrapper.put(Float.TYPE, Float.class); + primitiveTypeToWrapper.put(Double.TYPE, Double.class); + } + + private static final SimpleCatalog instance = new SimpleCatalog(null); + + static boolean isSimpleType(Class type) { + return instance.formatMap.containsKey(type.getName()); + } + + static Class primitiveToWrapper(Class type) { + Class cls = primitiveTypeToWrapper.get(type); + if (cls == null) { + throw DbCompat.unexpectedState(type.getName()); + } + return cls; + } + + public static Class resolveClass(String className, ClassLoader loader) + throws ClassNotFoundException { + + Class cls = keywordToPrimitive.get(className); + if (cls == null) { + cls = ClassResolver.resolveClass(className, loader); + } + return cls; + } + + public static Class resolveKeyClass(String className, ClassLoader loader) { + Class cls = keywordToPrimitive.get(className); + if (cls != null) { + cls = primitiveTypeToWrapper.get(cls); + } else { + try { + cls = ClassResolver.resolveClass(className, loader); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException + ("Key class not found: " + className); + } + } + return cls; + } + + public static String keyClassName(String className) { + Class cls = keywordToPrimitive.get(className); + if (cls != null) { + cls = primitiveTypeToWrapper.get(cls); + return cls.getName(); + } else { + return className; + } + } + + static List getAllSimpleFormats(ClassLoader loader) { + return new ArrayList(new SimpleCatalog(loader).formatList); + } + + static boolean addMissingSimpleFormats(ClassLoader loader, + List copyToList) { + boolean anyCopied = false; + SimpleCatalog tempCatalog = null; + for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) { + final Format thisFormat = instance.formatList.get(i); + final Format otherFormat = copyToList.get(i); + if (thisFormat != null && otherFormat == null) { + assert thisFormat.getWrapperFormat() == null; + if (tempCatalog == null) { + tempCatalog = new SimpleCatalog(loader); + } + copyToList.set(i, tempCatalog.formatList.get(i)); + anyCopied = true; + } + } + return anyCopied; + } + + private final ClassLoader classLoader; + private final List formatList; + private final Map formatMap; + + SimpleCatalog(final ClassLoader classLoader) { + this.classLoader = classLoader; + + /* + * Reserve slots for all predefined IDs, so that that next ID assigned + * will be Format.ID_PREDEFINED plus one. + */ + int initCapacity = Format.ID_PREDEFINED * 2; + formatList = new ArrayList(initCapacity); + formatMap = new HashMap(initCapacity); + + for (int i = 0; i <= Format.ID_PREDEFINED; i += 1) { + formatList.add(null); + } + + /* Initialize all predefined formats. */ + setFormat(Format.ID_BOOL, new SimpleFormat.FBool(this, true)); + setFormat(Format.ID_BOOL_W, new SimpleFormat.FBool(this, false)); + setFormat(Format.ID_BYTE, new SimpleFormat.FByte(this, true)); + setFormat(Format.ID_BYTE_W, new SimpleFormat.FByte(this, false)); + setFormat(Format.ID_SHORT, new SimpleFormat.FShort(this, true)); + setFormat(Format.ID_SHORT_W, new SimpleFormat.FShort(this, false)); + setFormat(Format.ID_INT, new SimpleFormat.FInt(this, true)); + setFormat(Format.ID_INT_W, new SimpleFormat.FInt(this, false)); + setFormat(Format.ID_LONG, new SimpleFormat.FLong(this, true)); + setFormat(Format.ID_LONG_W, new SimpleFormat.FLong(this, false)); + setFormat(Format.ID_FLOAT, new SimpleFormat.FFloat(this, true)); + setFormat(Format.ID_FLOAT_W, new SimpleFormat.FFloat(this, false)); + setFormat(Format.ID_DOUBLE, new SimpleFormat.FDouble(this, true)); + setFormat(Format.ID_DOUBLE_W, new SimpleFormat.FDouble(this, false)); + setFormat(Format.ID_CHAR, new SimpleFormat.FChar(this, true)); + setFormat(Format.ID_CHAR_W, new SimpleFormat.FChar(this, false)); + setFormat(Format.ID_STRING, new SimpleFormat.FString(this)); + setFormat(Format.ID_BIGINT, new SimpleFormat.FBigInt(this)); + setFormat(Format.ID_BIGDEC, new SimpleFormat.FBigDec(this)); + setFormat(Format.ID_DATE, new SimpleFormat.FDate(this)); + + /* Tell primitives about their wrapper class. */ + setWrapper(Format.ID_BOOL, Format.ID_BOOL_W); + setWrapper(Format.ID_BYTE, Format.ID_BYTE_W); + setWrapper(Format.ID_SHORT, Format.ID_SHORT_W); + setWrapper(Format.ID_INT, Format.ID_INT_W); + setWrapper(Format.ID_LONG, Format.ID_LONG_W); + setWrapper(Format.ID_FLOAT, Format.ID_FLOAT_W); + setWrapper(Format.ID_DOUBLE, Format.ID_DOUBLE_W); + setWrapper(Format.ID_CHAR, Format.ID_CHAR_W); + } + + /** + * Sets a format for which space in the formatList has been preallocated, + * and makes it the current format for the class. + */ + private void setFormat(int id, SimpleFormat format) { + format.setId(id); + format.initializeIfNeeded(this, null /*model*/); + formatList.set(id, format); + formatMap.put(format.getClassName(), format); + } + + /** + * Tells a primitive format about the format for its corresponding + * primitive wrapper class. + */ + private void setWrapper(int primitiveId, int wrapperId) { + SimpleFormat primitiveFormat = formatList.get(primitiveId); + SimpleFormat wrapperFormat = formatList.get(wrapperId); + primitiveFormat.setWrapperFormat(wrapperFormat); + } + + public int getInitVersion(Format format, boolean forReader) { + return Catalog.CURRENT_VERSION; + } + + public Format getFormat(int formatId, boolean expectStored) { + Format format; + try { + format = formatList.get(formatId); + if (format == null) { + throw DbCompat.unexpectedState + ("Not a simple type: " + formatId); + } + return format; + } catch (NoSuchElementException e) { + throw DbCompat.unexpectedState + ("Not a simple type: " + formatId); + } + } + + public Format getFormat(Class cls, boolean checkEntitySubclassIndexes) { + Format format = formatMap.get(cls.getName()); + if (format == null) { + throw new IllegalArgumentException + ("Not a simple type: " + cls.getName()); + } + return format; + } + + public Format getFormat(String className) { + return formatMap.get(className); + } + + public Format createFormat(String clsName, + Map newFormats) { + throw DbCompat.unexpectedState(); + } + + public Format createFormat(Class type, Map newFormats) { + throw DbCompat.unexpectedState(); + } + + public boolean isRawAccess() { + return false; + } + + public Object convertRawObject(RawObject o, IdentityHashMap converted) { + throw DbCompat.unexpectedState(); + } + + public Class resolveClass(String clsName) + throws ClassNotFoundException { + + return SimpleCatalog.resolveClass(clsName, classLoader); + } + + public Class resolveKeyClass(String clsName) { + return SimpleCatalog.resolveKeyClass(clsName, classLoader); + } + + /* Registering proxy is not allowed for SimpleType. */ + public static boolean allowRegisterProxy(Class type) { + return !isSimpleType(type); + } +} diff --git a/src/com/sleepycat/persist/impl/SimpleFormat.java b/src/com/sleepycat/persist/impl/SimpleFormat.java new file mode 100644 index 0000000..465cabb --- /dev/null +++ b/src/com/sleepycat/persist/impl/SimpleFormat.java @@ -0,0 +1,939 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.lang.reflect.Field; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Date; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.persist.model.EntityModel; + +/** + * Format for simple types, including primitives. Additional methods are + * included to optimize the handling of primitives. Other classes such as + * PrimitiveArrayFormat and ReflectAccessor take advantage of these methods. + * + * @author Mark Hayes + */ +public abstract class SimpleFormat extends Format { + + private static final long serialVersionUID = 4595245575868697702L; + + private final boolean primitive; + private SimpleFormat wrapperFormat; + + SimpleFormat(Catalog catalog, Class type, boolean primitive) { + super(catalog, type); + this.primitive = primitive; + } + + void setWrapperFormat(SimpleFormat wrapperFormat) { + this.wrapperFormat = wrapperFormat; + } + + @Override + Format getWrapperFormat() { + return wrapperFormat; + } + + @Override + public boolean isSimple() { + return true; + } + + @Override + public boolean isPrimitive() { + return primitive; + } + + @Override + void collectRelatedFormats(Catalog catalog, + Map newFormats) { + } + + @Override + void initialize(Catalog catalog, EntityModel model, int initVersion) { + } + + @Override + public Object readObject(Object o, EntityInput input, boolean rawAccess) { + /* newInstance reads the value -- do nothing here. */ + return o; + } + + @Override + boolean evolve(Format newFormat, Evolver evolver) { + evolver.useOldFormat(this, newFormat); + return true; + } + + /* -- Begin methods to be overridden by primitive formats only. -- */ + + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + throw DbCompat.unexpectedState(); + } + + void writePrimitiveArray(Object o, EntityOutput output) { + throw DbCompat.unexpectedState(); + } + + int getPrimitiveLength() { + throw DbCompat.unexpectedState(); + } + + /** + * @throws IllegalAccessException from subclasses. + */ + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + throw DbCompat.unexpectedState(); + } + + /** + * @throws IllegalAccessException from subclasses. + */ + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + throw DbCompat.unexpectedState(); + } + + /* -- End methods to be overridden by primitive formats only. -- */ + + void skipPrimitiveArray(int len, RecordInput input) { + input.skipFast(len * getPrimitiveLength()); + } + + void copySecMultiKeyPrimitiveArray(int len, + RecordInput input, + Set results) { + int primLen = getPrimitiveLength(); + for (int i = 0; i < len; i += 1) { + DatabaseEntry entry = new DatabaseEntry + (input.getBufferBytes(), input.getBufferOffset(), primLen); + results.add(entry); + input.skipFast(primLen); + } + } + + public static class FBool extends SimpleFormat { + + private static final long serialVersionUID = -7724949525068533451L; + + FBool(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Boolean.TYPE : Boolean.class, + primitive); + } + + @Override + Object newArray(int len) { + return new Boolean[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Boolean.valueOf(input.readBoolean()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeBoolean(((Boolean) o).booleanValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(1); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + boolean[] a = new boolean[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readBoolean(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + boolean[] a = (boolean[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeBoolean(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 1; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setBoolean(o, input.readBoolean()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeBoolean(field.getBoolean(o)); + } + } + + public static class FByte extends SimpleFormat { + + private static final long serialVersionUID = 3651752958101447257L; + + FByte(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Byte.TYPE : Byte.class, primitive); + } + + @Override + Object newArray(int len) { + return new Byte[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Byte.valueOf(input.readByte()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeByte(((Number) o).byteValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(1); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + byte[] a = new byte[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readByte(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + byte[] a = (byte[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeByte(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 1; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setByte(o, input.readByte()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeByte(field.getByte(o)); + } + + @Override + Format getSequenceKeyFormat() { + return this; + } + } + + public static class FShort extends SimpleFormat { + + private static final long serialVersionUID = -4909138198491785624L; + + FShort(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Short.TYPE : Short.class, primitive); + } + + @Override + Object newArray(int len) { + return new Short[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Short.valueOf(input.readShort()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeShort(((Number) o).shortValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(2); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + short[] a = new short[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readShort(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + short[] a = (short[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeShort(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 2; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setShort(o, input.readShort()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeShort(field.getShort(o)); + } + + @Override + Format getSequenceKeyFormat() { + return this; + } + } + + public static class FInt extends SimpleFormat { + + private static final long serialVersionUID = 2695910006049980013L; + + FInt(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Integer.TYPE : Integer.class, + primitive); + } + + @Override + Object newArray(int len) { + return new Integer[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Integer.valueOf(input.readInt()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeInt(((Number) o).intValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(4); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + int[] a = new int[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readInt(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + int[] a = (int[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeInt(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 4; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setInt(o, input.readInt()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeInt(field.getInt(o)); + } + + @Override + Format getSequenceKeyFormat() { + return this; + } + } + + public static class FLong extends SimpleFormat { + + private static final long serialVersionUID = 1872661106534776520L; + + FLong(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Long.TYPE : Long.class, primitive); + } + + @Override + Object newArray(int len) { + return new Long[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Long.valueOf(input.readLong()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeLong(((Number) o).longValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(8); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), 8); + input.skipFast(8); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + long[] a = new long[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readLong(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + long[] a = (long[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeLong(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 8; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setLong(o, input.readLong()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeLong(field.getLong(o)); + } + + @Override + Format getSequenceKeyFormat() { + return this; + } + } + + public static class FFloat extends SimpleFormat { + + private static final long serialVersionUID = 1033413049495053602L; + + FFloat(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Float.TYPE : Float.class, primitive); + } + + @Override + Object newArray(int len) { + return new Float[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Float.valueOf(input.readSortedFloat()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeSortedFloat(((Number) o).floatValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(4); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + float[] a = new float[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readSortedFloat(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + float[] a = (float[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeSortedFloat(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 4; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setFloat(o, input.readSortedFloat()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeSortedFloat(field.getFloat(o)); + } + } + + public static class FDouble extends SimpleFormat { + + private static final long serialVersionUID = 646904456811041423L; + + FDouble(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Double.TYPE : Double.class, primitive); + } + + @Override + Object newArray(int len) { + return new Double[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Double.valueOf(input.readSortedDouble()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeSortedDouble(((Number) o).doubleValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(8); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), 8); + input.skipFast(8); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + double[] a = new double[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readSortedDouble(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + double[] a = (double[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeSortedDouble(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 8; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setDouble(o, input.readSortedDouble()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeSortedDouble(field.getDouble(o)); + } + } + + public static class FChar extends SimpleFormat { + + private static final long serialVersionUID = -7609118195770005374L; + + FChar(Catalog catalog, boolean primitive) { + super(catalog, primitive ? Character.TYPE : Character.class, + primitive); + } + + @Override + Object newArray(int len) { + return new Character[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return Character.valueOf(input.readChar()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeChar(((Character) o).charValue()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(2); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast(input.readFast()); + output.writeFast(input.readFast()); + } + + @Override + Object newPrimitiveArray(int len, EntityInput input) + throws RefreshException { + + char[] a = new char[len]; + for (int i = 0; i < len; i += 1) { + a[i] = input.readChar(); + } + return a; + } + + @Override + void writePrimitiveArray(Object o, EntityOutput output) { + char[] a = (char[]) o; + int len = a.length; + output.writeArrayLength(len); + for (int i = 0; i < len; i += 1) { + output.writeChar(a[i]); + } + } + + @Override + int getPrimitiveLength() { + return 2; + } + + @Override + void readPrimitiveField(Object o, EntityInput input, Field field) + throws IllegalAccessException, RefreshException { + + field.setChar(o, input.readChar()); + } + + @Override + void writePrimitiveField(Object o, EntityOutput output, Field field) + throws IllegalAccessException { + + output.writeChar(field.getChar(o)); + } + } + + public static class FString extends SimpleFormat { + + private static final long serialVersionUID = 5710392786480064612L; + + FString(Catalog catalog) { + super(catalog, String.class, false); + } + + @Override + Object newArray(int len) { + return new String[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return input.readString(); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeString((String) o); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(input.getStringByteLength()); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + int len = input.getStringByteLength(); + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), len); + input.skipFast(len); + } + } + + public static class FBigInt extends SimpleFormat { + + private static final long serialVersionUID = -5027098112507644563L; + + FBigInt(Catalog catalog) { + super(catalog, BigInteger.class, false); + } + + @Override + Object newArray(int len) { + return new BigInteger[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return input.readBigInteger(); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeBigInteger((BigInteger) o); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(input.getBigIntegerByteLength()); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + int len = input.getBigIntegerByteLength(); + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), len); + input.skipFast(len); + } + } + + public static class FBigDec extends SimpleFormat { + private static final long serialVersionUID = 6108874887143696463L; + + FBigDec(Catalog catalog) { + super(catalog, BigDecimal.class, false); + } + + @Override + Object newArray(int len) { + return new BigDecimal[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return input.readSortedBigDecimal(); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeSortedBigDecimal((BigDecimal) o); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(input.getSortedBigDecimalByteLength()); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + int len = input.getSortedBigDecimalByteLength(); + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), len); + input.skipFast(len); + } + + @Override + public boolean allowEvolveFromProxy() { + return true; + } + } + + public static class FDate extends SimpleFormat { + + private static final long serialVersionUID = -5665773229869034145L; + + FDate(Catalog catalog) { + super(catalog, Date.class, false); + } + + @Override + Object newArray(int len) { + return new Date[len]; + } + + @Override + public Object newInstance(EntityInput input, boolean rawAccess) + throws RefreshException { + + return new Date(input.readLong()); + } + + @Override + void writeObject(Object o, EntityOutput output, boolean rawAccess) { + output.writeLong(((Date) o).getTime()); + } + + @Override + void skipContents(RecordInput input) { + input.skipFast(8); + } + + @Override + void copySecKey(RecordInput input, RecordOutput output) { + output.writeFast + (input.getBufferBytes(), input.getBufferOffset(), 8); + input.skipFast(8); + } + } +} diff --git a/src/com/sleepycat/persist/impl/Store.java b/src/com/sleepycat/persist/impl/Store.java new file mode 100644 index 0000000..2b97e8b --- /dev/null +++ b/src/com/sleepycat/persist/impl/Store.java @@ -0,0 +1,1903 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.WeakHashMap; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.Durability; +/* */ +import com.sleepycat.je.Environment; +import com.sleepycat.je.ForeignKeyDeleteAction; +/* */ +import com.sleepycat.je.LockConflictException; +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.Sequence; +import com.sleepycat.je.SequenceConfig; +/* */ +import com.sleepycat.je.SequenceExistsException; +import com.sleepycat.je.SequenceNotFoundException; +/* */ +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +/* */ +import com.sleepycat.je.rep.NoConsistencyRequiredPolicy; +/* */ +import com.sleepycat.persist.DatabaseNamer; +import com.sleepycat.persist.IndexNotAvailableException; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.StoreExistsException; +import com.sleepycat.persist.StoreNotFoundException; +import com.sleepycat.persist.evolve.EvolveConfig; +import com.sleepycat.persist.evolve.EvolveEvent; +import com.sleepycat.persist.evolve.EvolveInternal; +import com.sleepycat.persist.evolve.EvolveListener; +import com.sleepycat.persist.evolve.EvolveStats; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.DeleteAction; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.PrimaryKeyMetadata; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKeyMetadata; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * Base implementation for EntityStore and RawStore. The methods here + * correspond directly to those in EntityStore; see EntityStore documentation + * for details. + * + * @author Mark Hayes + */ +public class Store { + + public static final String NAME_SEPARATOR = "#"; + private static final String NAME_PREFIX = "persist" + NAME_SEPARATOR; + private static final String DB_NAME_PREFIX = "com.sleepycat.persist."; + private static final String CATALOG_DB = DB_NAME_PREFIX + "formats"; + private static final String SEQUENCE_DB = DB_NAME_PREFIX + "sequences"; + + private static Map> catalogPool = + new WeakHashMap>(); + + /* For unit testing. */ + private static SyncHook syncHook; + public static boolean expectFlush; + + private final Environment env; + private final boolean rawAccess; + private volatile PersistCatalog catalog; + private EntityModel model; + private final StoreConfig storeConfig; + private final String storeName; + private final String storePrefix; + private final Map priIndexMap; + private final Map secIndexMap; + private final Map priConfigMap; + private final Map secConfigMap; + private final Map keyBindingMap; + private Database sequenceDb; + private final Map sequenceMap; + private final Map sequenceConfigMap; + private final IdentityHashMap deferredWriteDatabases; + private final Map> inverseRelatedEntityMap; + private final TransactionConfig autoCommitTxnConfig; + private final TransactionConfig autoCommitNoWaitTxnConfig; + + public Store(Environment env, + String storeName, + StoreConfig config, + boolean rawAccess) + throws StoreExistsException, + StoreNotFoundException, + IncompatibleClassException, + DatabaseException { + + this.env = env; + this.storeName = storeName; + this.rawAccess = rawAccess; + + if (env == null || storeName == null) { + throw new NullPointerException + ("env and storeName parameters must not be null"); + } + + storeConfig = (config != null) ? + config.clone() : + StoreConfig.DEFAULT; + + autoCommitTxnConfig = new TransactionConfig(); + autoCommitNoWaitTxnConfig = new TransactionConfig(); + autoCommitNoWaitTxnConfig.setNoWait(true); + /* */ + if (!storeConfig.getReplicated()) { + final Durability envDurability = env.getConfig().getDurability(); + configForNonRepDb(autoCommitTxnConfig, envDurability); + configForNonRepDb(autoCommitNoWaitTxnConfig, envDurability); + } + /* */ + + model = config.getModel(); + + storePrefix = NAME_PREFIX + storeName + NAME_SEPARATOR; + priIndexMap = new HashMap(); + secIndexMap = new HashMap(); + priConfigMap = new HashMap(); + secConfigMap = new HashMap(); + keyBindingMap = new HashMap(); + sequenceMap = new HashMap(); + sequenceConfigMap = new HashMap(); + deferredWriteDatabases = new IdentityHashMap(); + + if (rawAccess) { + /* Open a read-only catalog that uses the stored model. */ + if (model != null) { + throw new IllegalArgumentException + ("A model may not be specified when opening a RawStore"); + } + DatabaseConfig dbConfig = new DatabaseConfig(); + /* */ + dbConfig.setReplicated(storeConfig.getReplicated()); + /* */ + dbConfig.setReadOnly(true); + dbConfig.setTransactional + (storeConfig.getTransactional()); + catalog = new PersistCatalog + (env, storePrefix, storePrefix + CATALOG_DB, dbConfig, + null /*model*/, config.getMutations(), rawAccess, this); + } else { + /* Open the shared catalog that uses the current model. */ + synchronized (catalogPool) { + Map catalogMap = catalogPool.get(env); + if (catalogMap == null) { + catalogMap = new HashMap(); + catalogPool.put(env, catalogMap); + } + catalog = catalogMap.get(storeName); + if (catalog != null) { + catalog.openExisting(); + } else { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(storeConfig.getAllowCreate()); + dbConfig.setExclusiveCreate + (storeConfig.getExclusiveCreate()); + /* */ + dbConfig.setTemporary(storeConfig.getTemporary()); + dbConfig.setReplicated(storeConfig.getReplicated()); + /* */ + dbConfig.setReadOnly(storeConfig.getReadOnly()); + dbConfig.setTransactional + (storeConfig.getTransactional()); + DbCompat.setTypeBtree(dbConfig); + catalog = new PersistCatalog + (env, storePrefix, storePrefix + CATALOG_DB, dbConfig, + model, config.getMutations(), rawAccess, this); + catalogMap.put(storeName, catalog); + } + } + } + + /* + * If there is no model parameter, use the default or stored model + * obtained from the catalog. + */ + model = catalog.getResolvedModel(); + + /* + * For each existing entity with a relatedEntity reference, create an + * inverse map (back pointer) from the class named in the relatedEntity + * to the class containing the secondary key. This is used to open the + * class containing the secondary key whenever we open the + * relatedEntity class, to configure foreign key constraints. Note that + * we do not need to update this map as new primary indexes are + * created, because opening the new index will setup the foreign key + * constraints. [#15358] + */ + inverseRelatedEntityMap = new HashMap>(); + List entityFormats = new ArrayList(); + catalog.getEntityFormats(entityFormats); + for (Format entityFormat : entityFormats) { + EntityMetadata entityMeta = entityFormat.getEntityMetadata(); + for (SecondaryKeyMetadata secKeyMeta : + entityMeta.getSecondaryKeys().values()) { + String relatedClsName = secKeyMeta.getRelatedEntity(); + if (relatedClsName != null) { + Set inverseClassNames = + inverseRelatedEntityMap.get(relatedClsName); + if (inverseClassNames == null) { + inverseClassNames = new HashSet(); + inverseRelatedEntityMap.put + (relatedClsName, inverseClassNames); + } + inverseClassNames.add(entityMeta.getClassName()); + } + } + } + } + + public Environment getEnvironment() { + return env; + } + + public StoreConfig getConfig() { + return storeConfig.clone(); + } + + public String getStoreName() { + return storeName; + } + + /* */ + public static Set getStoreNames(Environment env) + throws DatabaseException { + + Set set = new HashSet(); + for (Object o : env.getDatabaseNames()) { + String s = (String) o; + if (s.startsWith(NAME_PREFIX)) { + int start = NAME_PREFIX.length(); + int end = s.indexOf(NAME_SEPARATOR, start); + set.add(s.substring(start, end)); + } + } + return set; + } + /* */ + + /** + * For unit testing. + */ + public boolean isReplicaUpgradeMode() { + return catalog.isReplicaUpgradeMode(); + } + + public EntityModel getModel() { + return model; + } + + public Mutations getMutations() { + return catalog.getMutations(); + } + + /** + * A getPrimaryIndex with extra parameters for opening a raw store. + * primaryKeyClass and entityClass are used for generic typing; for a raw + * store, these should always be Object.class and RawObject.class. + * primaryKeyClassName is used for consistency checking and should be null + * for a raw store only. entityClassName is used to identify the store and + * may not be null. + */ + public synchronized PrimaryIndex + getPrimaryIndex(Class primaryKeyClass, + String primaryKeyClassName, + Class entityClass, + String entityClassName) + throws DatabaseException, IndexNotAvailableException { + + assert (rawAccess && entityClass == RawObject.class) || + (!rawAccess && entityClass != RawObject.class); + assert (rawAccess && primaryKeyClassName == null) || + (!rawAccess && primaryKeyClassName != null); + + checkOpen(); + + InternalPrimaryIndex priIndex = + priIndexMap.get(entityClassName); + if (priIndex == null) { + + /* Check metadata. */ + EntityMetadata entityMeta = checkEntityClass(entityClassName); + PrimaryKeyMetadata priKeyMeta = entityMeta.getPrimaryKey(); + if (primaryKeyClassName == null) { + primaryKeyClassName = priKeyMeta.getClassName(); + } else { + String expectClsName = + SimpleCatalog.keyClassName(priKeyMeta.getClassName()); + if (!primaryKeyClassName.equals(expectClsName)) { + throw new IllegalArgumentException + ("Wrong primary key class: " + primaryKeyClassName + + " Correct class is: " + expectClsName); + } + } + + /* Create bindings. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityClassName, rawAccess); + PersistKeyBinding keyBinding = getKeyBinding(primaryKeyClassName); + + /* If not read-only, get the primary key sequence. */ + String seqName = priKeyMeta.getSequenceName(); + if (!storeConfig.getReadOnly() && seqName != null) { + entityBinding.keyAssigner = new PersistKeyAssigner + (keyBinding, entityBinding, getSequence(seqName)); + } + + /* + * Use a single transaction for opening the primary DB and its + * secondaries. If opening any secondary fails, abort the + * transaction and undo the changes to the state of the store. + * Also support undo if the store is non-transactional. + * + * Use a no-wait transaction to avoid blocking on a Replica while + * attempting to open an index that is currently being populated + * via the replication stream from the Master. + */ + Transaction txn = null; + DatabaseConfig dbConfig = getPrimaryConfig(entityMeta); + if (dbConfig.getTransactional() && + DbCompat.getThreadTransaction(env) == null) { + txn = env.beginTransaction(null, autoCommitNoWaitTxnConfig); + } + PrimaryOpenState priOpenState = + new PrimaryOpenState(entityClassName); + final boolean saveAllowCreate = dbConfig.getAllowCreate(); + boolean success = false; + try { + + /* + * The AllowCreate setting is false in read-only / Replica + * upgrade mode. In this mode new primaries are not available. + * They can be opened later when the upgrade is complete on the + * Master, by calling getSecondaryIndex. [#16655] + */ + if (catalog.isReadOnly()) { + dbConfig.setAllowCreate(false); + } + + /* + * Open the primary database. Account for database renaming + * by calling getDatabaseClassName. The dbClassName will be + * null if the format has not yet been stored. [#16655]. + */ + Database db = null; + final String dbClassName = + catalog.getDatabaseClassName(entityClassName); + if (dbClassName != null) { + final String[] fileAndDbNames = + parseDbName(storePrefix + dbClassName); + /* */ + try { + /* */ + db = DbCompat.openDatabase(env, txn, fileAndDbNames[0], + fileAndDbNames[1], + dbConfig); + /* */ + } catch (LockConflictException e) { + /* Treat this as if the database does not exist. */ + } + /* */ + } + if (db == null) { + throw new IndexNotAvailableException + ("PrimaryIndex not yet available on this Replica, " + + "entity class: " + entityClassName); + } + + priOpenState.addDatabase(db); + + /* Create index object. */ + priIndex = new InternalPrimaryIndex(db, primaryKeyClass, + keyBinding, entityClass, + entityBinding); + + /* Update index and database maps. */ + priIndexMap.put(entityClassName, priIndex); + if (DbCompat.getDeferredWrite(dbConfig)) { + deferredWriteDatabases.put(db, null); + } + + /* If not read-only, open all associated secondaries. */ + if (!dbConfig.getReadOnly()) { + openSecondaryIndexes(txn, entityMeta, priOpenState); + + /* + * To enable foreign key contraints, also open all primary + * indexes referring to this class via a relatedEntity + * property in another entity. [#15358] + */ + Set inverseClassNames = + inverseRelatedEntityMap.get(entityClassName); + if (inverseClassNames != null) { + for (String relatedClsName : inverseClassNames) { + getRelatedIndex(relatedClsName); + } + } + } + success = true; + } finally { + dbConfig.setAllowCreate(saveAllowCreate); + if (success) { + if (txn != null) { + txn.commit(); + } + } else { + if (txn != null) { + txn.abort(); + } + priOpenState.undoState(); + } + } + } + return priIndex; + } + + /** + * Holds state information about opening a primary index and its secondary + * indexes. Used to undo the state of this object if the transaction + * opening the primary and secondaries aborts. Also used to close all + * databases opened during this process for a non-transactional store. + */ + private class PrimaryOpenState { + + private String entityClassName; + private IdentityHashMap databases; + private Set secNames; + + PrimaryOpenState(String entityClassName) { + this.entityClassName = entityClassName; + databases = new IdentityHashMap(); + secNames = new HashSet(); + } + + /** + * Save a database that was opening during this operation. + */ + void addDatabase(Database db) { + databases.put(db, null); + } + + /** + * Save the name of a secondary index that was opening during this + * operation. + */ + void addSecondaryName(String secName) { + secNames.add(secName); + } + + /** + * Reset all state information and closes any databases opened, when + * this operation fails. This method should be called for both + * transactional and non-transsactional operation. + * + * For transactional operations on JE, we don't strictly need to close + * the databases since the transaction abort will do that. However, + * closing them is harmless on JE, and required for DB core. + */ + void undoState() { + for (Database db : databases.keySet()) { + try { + db.close(); + } catch (Exception ignored) { + } + } + priIndexMap.remove(entityClassName); + for (String secName : secNames) { + secIndexMap.remove(secName); + } + for (Database db : databases.keySet()) { + deferredWriteDatabases.remove(db); + } + } + } + + /** + * Opens a primary index related via a foreign key (relatedEntity). + * Related indexes are not opened in the same transaction used by the + * caller to open a primary or secondary. It is OK to leave the related + * index open when the caller's transaction aborts. It is only important + * to open a primary and its secondaries atomically. + */ + private PrimaryIndex getRelatedIndex(String relatedClsName) + throws DatabaseException { + + PrimaryIndex relatedIndex = priIndexMap.get(relatedClsName); + if (relatedIndex == null) { + EntityMetadata relatedEntityMeta = + checkEntityClass(relatedClsName); + Class relatedKeyCls; + String relatedKeyClsName; + Class relatedCls; + if (rawAccess) { + relatedCls = RawObject.class; + relatedKeyCls = Object.class; + relatedKeyClsName = null; + } else { + try { + relatedCls = catalog.resolveClass(relatedClsName); + } catch (ClassNotFoundException e) { + throw new IllegalArgumentException + ("Related entity class not found: " + + relatedClsName); + } + relatedKeyClsName = SimpleCatalog.keyClassName + (relatedEntityMeta.getPrimaryKey().getClassName()); + relatedKeyCls = catalog.resolveKeyClass(relatedKeyClsName); + } + + /* + * Cycles are prevented here by adding primary indexes to the + * priIndexMap as soon as they are created, before opening related + * indexes. + */ + relatedIndex = getPrimaryIndex + (relatedKeyCls, relatedKeyClsName, + relatedCls, relatedClsName); + } + return relatedIndex; + } + + /** + * A getSecondaryIndex with extra parameters for opening a raw store. + * keyClassName is used for consistency checking and should be null for a + * raw store only. + */ + public synchronized SecondaryIndex + getSecondaryIndex(PrimaryIndex primaryIndex, + Class entityClass, + String entityClassName, + Class keyClass, + String keyClassName, + String keyName) + throws DatabaseException, IndexNotAvailableException { + + assert (rawAccess && keyClassName == null) || + (!rawAccess && keyClassName != null); + + checkOpen(); + + EntityMetadata entityMeta = null; + SecondaryKeyMetadata secKeyMeta = null; + + /* Validate the subclass for a subclass index. */ + if (entityClass != primaryIndex.getEntityClass()) { + entityMeta = model.getEntityMetadata(entityClassName); + assert entityMeta != null; + secKeyMeta = checkSecKey(entityMeta, keyName); + String subclassName = entityClass.getName(); + String declaringClassName = secKeyMeta.getDeclaringClassName(); + if (!subclassName.equals(declaringClassName)) { + throw new IllegalArgumentException + ("Key for subclass " + subclassName + + " is declared in a different class: " + + makeSecName(declaringClassName, keyName)); + } + + /* + * Get/create the subclass format to ensure it is stored in the + * catalog, even if no instances of the subclass are stored. + * [#16399] + */ + try { + catalog.getFormat(entityClass, + false /*checkEntitySubclassIndexes*/); + } catch (RefreshException e) { + e.refresh(); + try { + catalog.getFormat(entityClass, + false /*checkEntitySubclassIndexes*/); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + /* + * Even though the primary is already open, we can't assume the + * secondary is open because we don't automatically open all + * secondaries when the primary is read-only. Use auto-commit (a null + * transaction) since we're opening only one database. + */ + String secName = makeSecName(entityClassName, keyName); + InternalSecondaryIndex secIndex = secIndexMap.get(secName); + if (secIndex == null) { + if (entityMeta == null) { + entityMeta = model.getEntityMetadata(entityClassName); + assert entityMeta != null; + } + if (secKeyMeta == null) { + secKeyMeta = checkSecKey(entityMeta, keyName); + } + + /* Check metadata. */ + if (keyClassName == null) { + keyClassName = getSecKeyClass(secKeyMeta); + } else { + String expectClsName = getSecKeyClass(secKeyMeta); + if (!keyClassName.equals(expectClsName)) { + throw new IllegalArgumentException + ("Wrong secondary key class: " + keyClassName + + " Correct class is: " + expectClsName); + } + } + + /* + * Account for database renaming. The dbClassName or dbKeyName + * will be null if the format has not yet been stored. [#16655] + */ + final String dbClassName = + catalog.getDatabaseClassName(entityClassName); + final String dbKeyName = + catalog.getDatabaseKeyName(entityClassName, keyName); + if (dbClassName != null && dbKeyName != null) { + + /* + * Use a no-wait transaction to avoid blocking on a Replica + * while attempting to open an index that is currently being + * populated via the replication stream from the Master. + */ + Transaction txn = null; + if (getPrimaryConfig(entityMeta).getTransactional() && + DbCompat.getThreadTransaction(env) == null) { + txn = env.beginTransaction(null, + autoCommitNoWaitTxnConfig); + } + boolean success = false; + try { + + /* + * The doNotCreate param is true below in read-only / + * Replica upgrade mode. In this mode new secondaries are + * not available. They can be opened later when the + * upgrade is complete on the Master, by calling + * getSecondaryIndex. [#16655] + */ + secIndex = openSecondaryIndex + (txn, primaryIndex, entityClass, entityMeta, + keyClass, keyClassName, secKeyMeta, secName, + makeSecName(dbClassName, dbKeyName), + catalog.isReadOnly() /*doNotCreate*/, + null /*priOpenState*/); + success = true; + /* */ + } catch (LockConflictException e) { + /* Treat this as if the database does not exist. */ + /* */ + } finally { + if (success) { + if (txn != null) { + txn.commit(); + } + } else { + if (txn != null) { + txn.abort(); + } + } + } + } + if (secIndex == null) { + throw new IndexNotAvailableException + ("SecondaryIndex not yet available on this Replica, " + + "entity class: " + entityClassName + ", key name: " + + keyName); + } + } + return secIndex; + } + + /** + * Opens secondary indexes for a given primary index metadata. + */ + private void openSecondaryIndexes(Transaction txn, + EntityMetadata entityMeta, + PrimaryOpenState priOpenState) + throws DatabaseException { + + String entityClassName = entityMeta.getClassName(); + PrimaryIndex priIndex = + priIndexMap.get(entityClassName); + assert priIndex != null; + Class entityClass = priIndex.getEntityClass(); + + for (SecondaryKeyMetadata secKeyMeta : + entityMeta.getSecondaryKeys().values()) { + String keyName = secKeyMeta.getKeyName(); + String secName = makeSecName(entityClassName, keyName); + SecondaryIndex secIndex = + secIndexMap.get(secName); + if (secIndex == null) { + String keyClassName = getSecKeyClass(secKeyMeta); + /* RawMode: should not require class. */ + Class keyClass = catalog.resolveKeyClass(keyClassName); + + /* + * Account for database renaming. The dbClassName or dbKeyName + * will be null if the format has not yet been stored. [#16655] + */ + final String dbClassName = + catalog.getDatabaseClassName(entityClassName); + final String dbKeyName = + catalog.getDatabaseKeyName(entityClassName, keyName); + if (dbClassName != null && dbKeyName != null) { + + /* + * The doNotCreate param is true below in two cases: + * 1- When SecondaryBulkLoad=true, new secondaries are not + * created/populated until getSecondaryIndex is called. + * 2- In read-only / Replica upgrade mode, new secondaries + * are not openeed when the primary is opened. They can + * be opened later when the upgrade is complete on the + * Master, by calling getSecondaryIndex. [#16655] + */ + openSecondaryIndex + (txn, priIndex, entityClass, entityMeta, + keyClass, keyClassName, secKeyMeta, + secName, makeSecName(dbClassName, dbKeyName), + (storeConfig.getSecondaryBulkLoad() || + catalog.isReadOnly()) /*doNotCreate*/, + priOpenState); + } + } + } + } + + /** + * Opens a secondary index with a given transaction and adds it to the + * secIndexMap. We assume that the index is not already open. + */ + private InternalSecondaryIndex + openSecondaryIndex(Transaction txn, + PrimaryIndex primaryIndex, + Class entityClass, + EntityMetadata entityMeta, + Class keyClass, + String keyClassName, + SecondaryKeyMetadata secKeyMeta, + String secName, + String dbSecName, + boolean doNotCreate, + PrimaryOpenState priOpenState) + throws DatabaseException { + + assert !secIndexMap.containsKey(secName); + String[] fileAndDbNames = parseDbName(storePrefix + dbSecName); + SecondaryConfig config = + getSecondaryConfig(secName, entityMeta, keyClassName, secKeyMeta); + Database priDb = primaryIndex.getDatabase(); + DatabaseConfig priConfig = priDb.getConfig(); + + String relatedClsName = secKeyMeta.getRelatedEntity(); + if (relatedClsName != null) { + PrimaryIndex relatedIndex = getRelatedIndex(relatedClsName); + config.setForeignKeyDatabase(relatedIndex.getDatabase()); + } + + if (config.getTransactional() != priConfig.getTransactional() || + DbCompat.getDeferredWrite(config) != + DbCompat.getDeferredWrite(priConfig) || + config.getReadOnly() != priConfig.getReadOnly()) { + throw new IllegalArgumentException + ("One of these properties was changed to be inconsistent" + + " with the associated primary database: " + + " Transactional, DeferredWrite, ReadOnly"); + } + + PersistKeyBinding keyBinding = getKeyBinding(keyClassName); + + SecondaryDatabase db = openSecondaryDatabase + (txn, fileAndDbNames, primaryIndex, + secKeyMeta.getKeyName(), config, doNotCreate); + if (db == null) { + assert doNotCreate; + return null; + } + + InternalSecondaryIndex secIndex = + new InternalSecondaryIndex(db, primaryIndex, keyClass, keyBinding, + getKeyCreator(config)); + + /* Update index and database maps. */ + secIndexMap.put(secName, secIndex); + if (DbCompat.getDeferredWrite(config)) { + deferredWriteDatabases.put(db, null); + } + if (priOpenState != null) { + priOpenState.addDatabase(db); + priOpenState.addSecondaryName(secName); + } + return secIndex; + } + + /** + * Open a secondary database, setting AllowCreate, ExclusiveCreate and + * AllowPopulate appropriately. We either set all three of these params to + * true or all to false. This ensures that we only populate a database + * when it is created, never if it just happens to be empty. [#16399] + * + * We also handle correction of a bug in duplicate ordering. See + * ComplexFormat.incorrectlyOrderedSecKeys. + * + * @param doNotCreate is true when StoreConfig.getSecondaryBulkLoad is true + * and we are opening a secondary as a side effect of opening a primary, + * i.e., getSecondaryIndex is not being called. If doNotCreate is true and + * the database does not exist, we silently ignore the failure to create + * the DB and return null. When getSecondaryIndex is subsequently called, + * the secondary database will be created and populated from the primary -- + * a bulk load. + */ + private SecondaryDatabase + openSecondaryDatabase(final Transaction txn, + final String[] fileAndDbNames, + final PrimaryIndex priIndex, + final String keyName, + final SecondaryConfig config, + final boolean doNotCreate) + throws DatabaseException { + + assert config.getAllowPopulate(); + assert !config.getExclusiveCreate(); + final Database priDb = priIndex.getDatabase(); + final ComplexFormat entityFormat = (ComplexFormat) + ((PersistEntityBinding) priIndex.getEntityBinding()).entityFormat; + final boolean saveAllowCreate = config.getAllowCreate(); + /* */ + final boolean saveOverrideDuplicateComparator = + config.getOverrideDuplicateComparator(); + /* */ + final Comparator saveDupComparator = + config.getDuplicateComparator(); + try { + if (doNotCreate) { + config.setAllowCreate(false); + } + /* First try creating a new database, populate if needed. */ + if (config.getAllowCreate()) { + config.setExclusiveCreate(true); + /* AllowPopulate is true; comparators are set. */ + final SecondaryDatabase db = DbCompat.openSecondaryDatabase + (env, txn, fileAndDbNames[0], fileAndDbNames[1], priDb, + config); + if (db != null) { + /* For unit testing. */ + boolean doFlush = false; + /* Update dup ordering bug info. [#17252] */ + if (config.getDuplicateComparator() != null && + entityFormat.setSecKeyCorrectlyOrdered(keyName)) { + catalog.flush(txn); + doFlush = true; + } + + /* + * expectFlush is false except when set by + * SecondaryDupOrderTest. + */ + assert !expectFlush || doFlush; + + return db; + } + } + /* Next try opening an existing database. */ + config.setAllowCreate(false); + config.setAllowPopulate(false); + config.setExclusiveCreate(false); + + /* Account for dup ordering bug. [#17252] */ + if (config.getDuplicateComparator() != null && + entityFormat.isSecKeyIncorrectlyOrdered(keyName)) { + /* */ + config.setOverrideDuplicateComparator(false); + /* */ + config.setDuplicateComparator((Comparator) null); + } + final SecondaryDatabase db = DbCompat.openSecondaryDatabase + (env, txn, fileAndDbNames[0], fileAndDbNames[1], priDb, + config); + return db; + } finally { + config.setAllowPopulate(true); + config.setExclusiveCreate(false); + config.setAllowCreate(saveAllowCreate); + /* */ + config.setOverrideBtreeComparator(saveOverrideDuplicateComparator); + /* */ + config.setDuplicateComparator(saveDupComparator); + } + } + + /** + * Checks that all secondary indexes defined in the given entity metadata + * are already open. This method is called when a new entity subclass + * is encountered when an instance of that class is stored. [#16399] + * + * @throws IllegalArgumentException if a secondary is not open. + */ + synchronized void + checkEntitySubclassSecondaries(final EntityMetadata entityMeta, + final String subclassName) + throws DatabaseException { + + if (storeConfig.getSecondaryBulkLoad()) { + return; + } + + final String entityClassName = entityMeta.getClassName(); + + for (final SecondaryKeyMetadata secKeyMeta : + entityMeta.getSecondaryKeys().values()) { + final String keyName = secKeyMeta.getKeyName(); + final String secName = makeSecName(entityClassName, keyName); + if (!secIndexMap.containsKey(secName)) { + throw new IllegalArgumentException + ("Entity subclasses defining a secondary key must be " + + "registered by calling EntityModel.registerClass or " + + "EntityStore.getSubclassIndex before storing an " + + "instance of the subclass: " + subclassName); + } + } + } + + /* */ + public void sync() + throws DatabaseException { + + List dbs = new ArrayList(); + synchronized (this) { + dbs.addAll(deferredWriteDatabases.keySet()); + } + for (Database db : dbs) { + db.sync(); + /* Call hook for unit testing. */ + if (syncHook != null) { + syncHook.onSync(db); + } + } + } + /* */ + + public void truncateClass(Class entityClass) + throws DatabaseException { + + truncateClass(null, entityClass); + } + + public synchronized void truncateClass(Transaction txn, Class entityClass) + throws DatabaseException { + + checkOpen(); + checkWriteAllowed(); + + /* Close primary and secondary databases. */ + closeClass(entityClass); + + String clsName = entityClass.getName(); + EntityMetadata entityMeta = checkEntityClass(clsName); + + boolean autoCommit = false; + if (storeConfig.getTransactional() && + txn == null && + DbCompat.getThreadTransaction(env) == null) { + txn = env.beginTransaction(null, autoCommitTxnConfig); + autoCommit = true; + } + + /* + * Truncate the primary first and let any exceptions propogate + * upwards. Then remove each secondary, only throwing the first + * exception. + */ + boolean success = false; + try { + boolean primaryExists = + truncateIfExists(txn, storePrefix + clsName); + if (primaryExists) { + DatabaseException firstException = null; + for (SecondaryKeyMetadata keyMeta : + entityMeta.getSecondaryKeys().values()) { + /* Ignore secondaries that do not exist. */ + removeIfExists + (txn, + storePrefix + + makeSecName(clsName, keyMeta.getKeyName())); + } + if (firstException != null) { + throw firstException; + } + } + success = true; + } finally { + if (autoCommit) { + if (success) { + txn.commit(); + } else { + txn.abort(); + } + } + } + } + + private boolean truncateIfExists(Transaction txn, String dbName) + throws DatabaseException { + + String[] fileAndDbNames = parseDbName(dbName); + return DbCompat.truncateDatabase + (env, txn, fileAndDbNames[0], fileAndDbNames[1]); + } + + private boolean removeIfExists(Transaction txn, String dbName) + throws DatabaseException { + + String[] fileAndDbNames = parseDbName(dbName); + return DbCompat.removeDatabase + (env, txn, fileAndDbNames[0], fileAndDbNames[1]); + } + + public synchronized void closeClass(Class entityClass) + throws DatabaseException { + + checkOpen(); + String clsName = entityClass.getName(); + EntityMetadata entityMeta = checkEntityClass(clsName); + + PrimaryIndex priIndex = priIndexMap.get(clsName); + if (priIndex != null) { + /* Close the secondaries first. */ + DatabaseException firstException = null; + for (SecondaryKeyMetadata keyMeta : + entityMeta.getSecondaryKeys().values()) { + + String secName = makeSecName(clsName, keyMeta.getKeyName()); + SecondaryIndex secIndex = secIndexMap.get(secName); + if (secIndex != null) { + Database db = secIndex.getDatabase(); + firstException = closeDb(db, firstException); + firstException = + closeDb(secIndex.getKeysDatabase(), firstException); + secIndexMap.remove(secName); + deferredWriteDatabases.remove(db); + } + } + /* Close the primary last. */ + Database db = priIndex.getDatabase(); + firstException = closeDb(db, firstException); + priIndexMap.remove(clsName); + deferredWriteDatabases.remove(db); + + /* Throw the first exception encountered. */ + if (firstException != null) { + throw firstException; + } + } + } + + public synchronized void close() + throws DatabaseException { + + if (catalog == null) { + return; + } + + DatabaseException firstException = null; + try { + if (rawAccess) { + boolean allClosed = catalog.close(); + assert allClosed; + } else { + synchronized (catalogPool) { + Map catalogMap = + catalogPool.get(env); + assert catalogMap != null; + boolean removeFromCatalog = true; + try { + removeFromCatalog = catalog.close(); + } finally { + /* + * Remove it if the reference count goes to zero, or + * when an exception is thrown while closing the db. + */ + if (removeFromCatalog) { + catalogMap.remove(storeName); + } + } + } + } + catalog = null; + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + for (Sequence seq : sequenceMap.values()) { + try { + seq.close(); + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + } + firstException = closeDb(sequenceDb, firstException); + for (SecondaryIndex index : secIndexMap.values()) { + firstException = closeDb(index.getDatabase(), firstException); + firstException = closeDb(index.getKeysDatabase(), firstException); + } + for (PrimaryIndex index : priIndexMap.values()) { + firstException = closeDb(index.getDatabase(), firstException); + } + if (firstException != null) { + throw firstException; + } + } + + public synchronized Sequence getSequence(String name) + throws DatabaseException { + + checkOpen(); + + if (storeConfig.getReadOnly()) { + throw new IllegalStateException("Store is read-only"); + } + + Sequence seq = sequenceMap.get(name); + if (seq == null) { + if (sequenceDb == null) { + String[] fileAndDbNames = + parseDbName(storePrefix + SEQUENCE_DB); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(storeConfig.getTransactional()); + dbConfig.setAllowCreate(true); + /* */ + dbConfig.setReplicated(storeConfig.getReplicated()); + dbConfig.setTemporary(storeConfig.getTemporary()); + /* */ + DbCompat.setTypeBtree(dbConfig); + sequenceDb = DbCompat.openDatabase + (env, null /*txn*/, fileAndDbNames[0], fileAndDbNames[1], + dbConfig); + assert sequenceDb != null; + } + + DatabaseEntry entry = new DatabaseEntry(); + StringBinding.stringToEntry(name, entry); + /* */ + try { + /* */ + seq = sequenceDb.openSequence(null /*txn*/, entry, + getSequenceConfig(name)); + /* */ + } catch (SequenceExistsException e) { + /* Should never happen, ExclusiveCreate is false. */ + throw DbCompat.unexpectedException(e); + } catch (SequenceNotFoundException e) { + /* Should never happen, ALlowCreate is true. */ + throw DbCompat.unexpectedException(e); + } + /* */ + sequenceMap.put(name, seq); + } + return seq; + } + + public synchronized SequenceConfig getSequenceConfig(String name) { + checkOpen(); + SequenceConfig config = sequenceConfigMap.get(name); + if (config == null) { + config = new SequenceConfig(); + config.setInitialValue(1); + config.setRange(1, Long.MAX_VALUE); + config.setCacheSize(100); + config.setAutoCommitNoSync(true); + config.setAllowCreate(!storeConfig.getReadOnly()); + sequenceConfigMap.put(name, config); + } + return config; + } + + public synchronized void setSequenceConfig(String name, + SequenceConfig config) { + checkOpen(); + if (config.getExclusiveCreate() || + config.getAllowCreate() == storeConfig.getReadOnly()) { + throw new IllegalArgumentException + ("One of these properties was illegally changed: " + + "AllowCreate, ExclusiveCreate"); + } + if (sequenceMap.containsKey(name)) { + throw new IllegalStateException + ("Cannot set config after Sequence is open"); + } + sequenceConfigMap.put(name, config); + } + + public synchronized DatabaseConfig getPrimaryConfig(Class entityClass) { + checkOpen(); + String clsName = entityClass.getName(); + EntityMetadata meta = checkEntityClass(clsName); + return getPrimaryConfig(meta).cloneConfig(); + } + + private synchronized DatabaseConfig getPrimaryConfig(EntityMetadata meta) { + String clsName = meta.getClassName(); + DatabaseConfig config = priConfigMap.get(clsName); + if (config == null) { + config = new DatabaseConfig(); + config.setTransactional(storeConfig.getTransactional()); + config.setAllowCreate(!storeConfig.getReadOnly()); + config.setReadOnly(storeConfig.getReadOnly()); + DbCompat.setTypeBtree(config); + /* */ + config.setReplicated(storeConfig.getReplicated()); + config.setTemporary(storeConfig.getTemporary()); + config.setDeferredWrite(storeConfig.getDeferredWrite()); + config.setOverrideBtreeComparator(true); + /* */ + setBtreeComparator(config, meta.getPrimaryKey().getClassName()); + priConfigMap.put(clsName, config); + } + return config; + } + + public synchronized void setPrimaryConfig(Class entityClass, + DatabaseConfig config) { + checkOpen(); + String clsName = entityClass.getName(); + if (priIndexMap.containsKey(clsName)) { + throw new IllegalStateException + ("Cannot set config after DB is open"); + } + EntityMetadata meta = checkEntityClass(clsName); + DatabaseConfig dbConfig = getPrimaryConfig(meta); + if (config.getExclusiveCreate() || + config.getAllowCreate() == config.getReadOnly() || + config.getSortedDuplicates() || + /* */ + config.getTemporary() != dbConfig.getTemporary() || + /* */ + config.getBtreeComparator() != dbConfig.getBtreeComparator()) { + throw new IllegalArgumentException + ("One of these properties was illegally changed: " + + "AllowCreate, ExclusiveCreate, SortedDuplicates, Temporary " + + "or BtreeComparator, "); + } + if (!DbCompat.isTypeBtree(config)) { + throw new IllegalArgumentException("Only type BTREE allowed"); + } + priConfigMap.put(clsName, config); + } + + public synchronized SecondaryConfig getSecondaryConfig(Class entityClass, + String keyName) { + checkOpen(); + String entityClsName = entityClass.getName(); + EntityMetadata entityMeta = checkEntityClass(entityClsName); + SecondaryKeyMetadata secKeyMeta = checkSecKey(entityMeta, keyName); + String keyClassName = getSecKeyClass(secKeyMeta); + String secName = makeSecName(entityClass.getName(), keyName); + return (SecondaryConfig) getSecondaryConfig + (secName, entityMeta, keyClassName, secKeyMeta).cloneConfig(); + } + + private SecondaryConfig getSecondaryConfig(String secName, + EntityMetadata entityMeta, + String keyClassName, + SecondaryKeyMetadata + secKeyMeta) { + SecondaryConfig config = secConfigMap.get(secName); + if (config == null) { + /* Set common properties to match the primary DB. */ + DatabaseConfig priConfig = getPrimaryConfig(entityMeta); + config = new SecondaryConfig(); + config.setTransactional(priConfig.getTransactional()); + config.setAllowCreate(!priConfig.getReadOnly()); + config.setReadOnly(priConfig.getReadOnly()); + DbCompat.setTypeBtree(config); + /* */ + config.setReplicated(priConfig.getReplicated()); + config.setTemporary(priConfig.getTemporary()); + config.setDeferredWrite(priConfig.getDeferredWrite()); + config.setOverrideBtreeComparator(true); + config.setOverrideDuplicateComparator(true); + /* */ + /* Set secondary properties based on metadata. */ + config.setAllowPopulate(true); + Relationship rel = secKeyMeta.getRelationship(); + config.setSortedDuplicates(rel == Relationship.MANY_TO_ONE || + rel == Relationship.MANY_TO_MANY); + setBtreeComparator(config, keyClassName); + config.setDuplicateComparator(priConfig.getBtreeComparator()); + PersistKeyCreator keyCreator = new PersistKeyCreator + (catalog, entityMeta, keyClassName, secKeyMeta, rawAccess); + if (rel == Relationship.ONE_TO_MANY || + rel == Relationship.MANY_TO_MANY) { + config.setMultiKeyCreator(keyCreator); + } else { + config.setKeyCreator(keyCreator); + } + DeleteAction deleteAction = secKeyMeta.getDeleteAction(); + if (deleteAction != null) { + ForeignKeyDeleteAction baseDeleteAction; + switch (deleteAction) { + case ABORT: + baseDeleteAction = ForeignKeyDeleteAction.ABORT; + break; + case CASCADE: + baseDeleteAction = ForeignKeyDeleteAction.CASCADE; + break; + case NULLIFY: + baseDeleteAction = ForeignKeyDeleteAction.NULLIFY; + break; + default: + throw DbCompat.unexpectedState(deleteAction.toString()); + } + config.setForeignKeyDeleteAction(baseDeleteAction); + if (deleteAction == DeleteAction.NULLIFY) { + config.setForeignMultiKeyNullifier(keyCreator); + } + } + secConfigMap.put(secName, config); + } + return config; + } + + public synchronized void setSecondaryConfig(Class entityClass, + String keyName, + SecondaryConfig config) { + checkOpen(); + String entityClsName = entityClass.getName(); + EntityMetadata entityMeta = checkEntityClass(entityClsName); + SecondaryKeyMetadata secKeyMeta = checkSecKey(entityMeta, keyName); + String keyClassName = getSecKeyClass(secKeyMeta); + String secName = makeSecName(entityClass.getName(), keyName); + if (secIndexMap.containsKey(secName)) { + throw new IllegalStateException + ("Cannot set config after DB is open"); + } + SecondaryConfig dbConfig = + getSecondaryConfig(secName, entityMeta, keyClassName, secKeyMeta); + if (config.getExclusiveCreate() || + config.getAllowCreate() == config.getReadOnly() || + config.getSortedDuplicates() != dbConfig.getSortedDuplicates() || + config.getBtreeComparator() != dbConfig.getBtreeComparator() || + config.getDuplicateComparator() != null || + /* */ + config.getTemporary() != dbConfig.getTemporary() || + /* */ + config.getAllowPopulate() != dbConfig.getAllowPopulate() || + config.getKeyCreator() != dbConfig.getKeyCreator() || + config.getMultiKeyCreator() != dbConfig.getMultiKeyCreator() || + config.getForeignKeyNullifier() != + dbConfig.getForeignKeyNullifier() || + config.getForeignMultiKeyNullifier() != + dbConfig.getForeignMultiKeyNullifier() || + config.getForeignKeyDeleteAction() != + dbConfig.getForeignKeyDeleteAction() || + config.getForeignKeyDatabase() != null) { + throw new IllegalArgumentException + ("One of these properties was illegally changed: " + + " AllowCreate, ExclusiveCreate, SortedDuplicates," + + " BtreeComparator, DuplicateComparator, Temporary," + + " AllowPopulate, KeyCreator, MultiKeyCreator," + + " ForeignKeyNullifer, ForeignMultiKeyNullifier," + + " ForeignKeyDeleteAction, ForeignKeyDatabase"); + } + if (!DbCompat.isTypeBtree(config)) { + throw new IllegalArgumentException("Only type BTREE allowed"); + } + secConfigMap.put(secName, config); + } + + private static String makeSecName(String entityClsName, String keyName) { + return entityClsName + NAME_SEPARATOR + keyName; + } + + static String makePriDbName(String storePrefix, String entityClsName) { + return storePrefix + entityClsName; + } + + static String makeSecDbName(String storePrefix, + String entityClsName, + String keyName) { + return storePrefix + makeSecName(entityClsName, keyName); + } + + /** + * Parses a whole DB name and returns an array of 2 strings where element 0 + * is the file name (always null for JE, always non-null for DB core) and + * element 1 is the logical DB name (always non-null for JE, may be null + * for DB core). + */ + public String[] parseDbName(String wholeName) { + return parseDbName(wholeName, storeConfig.getDatabaseNamer()); + } + + /** + * Allows passing a namer to a static method for testing. + */ + public static String[] parseDbName(String wholeName, DatabaseNamer namer) { + String[] result = new String[2]; + if (DbCompat.SEPARATE_DATABASE_FILES) { + String[] splitName = wholeName.split(NAME_SEPARATOR); + assert splitName.length == 3 || splitName.length == 4 : wholeName; + assert splitName[0].equals("persist") : wholeName; + String storeName = splitName[1]; + String clsName = splitName[2]; + String keyName = (splitName.length > 3) ? splitName[3] : null; + result[0] = namer.getFileName(storeName, clsName, keyName); + result[1] = null; + } else { + result[0] = null; + result[1] = wholeName; + } + return result; + } + + /** + * Creates a message identifying the database from the pair of strings + * returned by parseDbName. + */ + String getDbNameMessage(String[] names) { + if (DbCompat.SEPARATE_DATABASE_FILES) { + return "file: " + names[0]; + } else { + return "database: " + names[1]; + } + } + + private void checkOpen() { + if (catalog == null) { + throw new IllegalStateException("Store has been closed"); + } + } + + private void checkWriteAllowed() { + if (catalog.isReadOnly()) { + throw new IllegalStateException + ("Store is read-only or is operating as a Replica"); + } + } + + private EntityMetadata checkEntityClass(String clsName) { + EntityMetadata meta = model.getEntityMetadata(clsName); + if (meta == null) { + throw new IllegalArgumentException + ("Class could not be loaded or is not an entity class: " + + clsName); + } + return meta; + } + + private SecondaryKeyMetadata checkSecKey(EntityMetadata entityMeta, + String keyName) { + SecondaryKeyMetadata secKeyMeta = + entityMeta.getSecondaryKeys().get(keyName); + if (secKeyMeta == null) { + throw new IllegalArgumentException + ("Not a secondary key: " + + makeSecName(entityMeta.getClassName(), keyName)); + } + return secKeyMeta; + } + + private String getSecKeyClass(SecondaryKeyMetadata secKeyMeta) { + String clsName = secKeyMeta.getElementClassName(); + if (clsName == null) { + clsName = secKeyMeta.getClassName(); + } + return SimpleCatalog.keyClassName(clsName); + } + + private PersistKeyBinding getKeyBinding(String keyClassName) { + PersistKeyBinding binding = keyBindingMap.get(keyClassName); + if (binding == null) { + binding = new PersistKeyBinding(catalog, keyClassName, rawAccess); + keyBindingMap.put(keyClassName, binding); + } + return binding; + } + + private PersistKeyCreator getKeyCreator(final SecondaryConfig config) { + PersistKeyCreator keyCreator = + (PersistKeyCreator) config.getKeyCreator(); + if (keyCreator != null) { + return keyCreator; + } + keyCreator = (PersistKeyCreator) config.getMultiKeyCreator(); + assert keyCreator != null; + return keyCreator; + } + + private void setBtreeComparator(DatabaseConfig config, String clsName) { + if (!rawAccess) { + PersistKeyBinding binding = getKeyBinding(clsName); + Format format = binding.keyFormat; + if (format instanceof CompositeKeyFormat) { + Class keyClass = format.getType(); + if (Comparable.class.isAssignableFrom(keyClass)) { + config.setBtreeComparator(new PersistComparator(binding)); + } + } + } + } + + private DatabaseException closeDb(Database db, + DatabaseException firstException) { + if (db != null) { + try { + db.close(); + } catch (DatabaseException e) { + if (firstException == null) { + firstException = e; + } + } + } + return firstException; + } + + public EvolveStats evolve(EvolveConfig config) + throws DatabaseException { + + checkOpen(); + checkWriteAllowed(); + + /* + * Before starting, ensure that we are not in Replica Upgrade Mode and + * the catalog metadata is not stale. If this node is a Replica, a + * ReplicaWriteException will occur further below. + */ + if (catalog.isReplicaUpgradeMode() || catalog.isMetadataStale(null)) { + attemptRefresh(); + } + + /* To ensure consistency use a single catalog instance. [#16655] */ + final PersistCatalog useCatalog = catalog; + List toEvolve = new ArrayList(); + Set configToEvolve = config.getClassesToEvolve(); + if (configToEvolve.isEmpty()) { + useCatalog.getEntityFormats(toEvolve); + } else { + for (String name : configToEvolve) { + Format format = useCatalog.getFormat(name); + if (format == null) { + throw new IllegalArgumentException + ("Class to evolve is not persistent: " + name); + } + if (!format.isEntity()) { + throw new IllegalArgumentException + ("Class to evolve is not an entity class: " + name); + } + toEvolve.add(format); + } + } + + EvolveEvent event = EvolveInternal.newEvent(); + for (Format format : toEvolve) { + if (format.getEvolveNeeded()) { + evolveIndex(format, event, config.getEvolveListener()); + format.setEvolveNeeded(false); + useCatalog.flush(null); + } + } + + return event.getStats(); + } + + private void evolveIndex(Format format, + EvolveEvent event, + EvolveListener listener) + throws DatabaseException { + + /* We may make this configurable later. */ + final int WRITES_PER_TXN = 1; + + Class entityClass = format.getType(); + String entityClassName = format.getClassName(); + EntityMetadata meta = model.getEntityMetadata(entityClassName); + String keyClassName = meta.getPrimaryKey().getClassName(); + keyClassName = SimpleCatalog.keyClassName(keyClassName); + DatabaseConfig dbConfig = getPrimaryConfig(meta); + + PrimaryIndex index = getPrimaryIndex + (Object.class, keyClassName, entityClass, entityClassName); + Database db = index.getDatabase(); + + EntityBinding binding = index.getEntityBinding(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + CursorConfig cursorConfig = null; + Transaction txn = null; + if (dbConfig.getTransactional()) { + txn = env.beginTransaction(null, autoCommitTxnConfig); + cursorConfig = CursorConfig.READ_COMMITTED; + } + + Cursor cursor = null; + int nWritten = 0; + try { + cursor = db.openCursor(txn, cursorConfig); + OperationStatus status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + boolean oneWritten = false; + if (evolveNeeded(key, data, binding)) { + cursor.putCurrent(data); + oneWritten = true; + nWritten += 1; + } + /* Update event stats, even if no listener. [#17024] */ + EvolveInternal.updateEvent + (event, entityClassName, 1, oneWritten ? 1 : 0); + if (listener != null) { + if (!listener.evolveProgress(event)) { + break; + } + } + if (txn != null && nWritten >= WRITES_PER_TXN) { + cursor.close(); + cursor = null; + txn.commit(); + txn = null; + txn = env.beginTransaction(null, autoCommitTxnConfig); + cursor = db.openCursor(txn, cursorConfig); + DatabaseEntry saveKey = KeyRange.copy(key); + status = cursor.getSearchKeyRange(key, data, null); + if (status == OperationStatus.SUCCESS && + KeyRange.equalBytes(key, saveKey)) { + status = cursor.getNext(key, data, null); + } + } else { + status = cursor.getNext(key, data, null); + } + } + } finally { + if (cursor != null) { + cursor.close(); + } + if (txn != null) { + if (nWritten > 0) { + txn.commit(); + } else { + txn.abort(); + } + } + } + } + + /** + * Checks whether the given data is in the current format by translating it + * to/from an object. If true is returned, data is updated. + */ + private boolean evolveNeeded(DatabaseEntry key, + DatabaseEntry data, + EntityBinding binding) { + Object entity = binding.entryToObject(key, data); + DatabaseEntry newData = new DatabaseEntry(); + binding.objectToData(entity, newData); + if (data.equals(newData)) { + return false; + } else { + byte[] bytes = newData.getData(); + int off = newData.getOffset(); + int size = newData.getSize(); + data.setData(bytes, off, size); + return true; + } + } + + /** + * For unit testing. + */ + public static void setSyncHook(SyncHook hook) { + syncHook = hook; + } + + /** + * For unit testing. + */ + public interface SyncHook { + void onSync(Database db); + } + + /** + * Attempts to refresh metadata and returns whether a refresh occurred. + * May be called when we expect that updated metadata may be available on + * disk, and if so could be used to satisfy the user's request. For + * example, if an index is requested and not available, we can try a + * refresh and the check for the index again. + */ + public boolean attemptRefresh() { + final PersistCatalog oldCatalog = catalog; + final PersistCatalog newCatalog = + refresh(oldCatalog, -1 /*errorFormatId*/, null /*cause*/); + return oldCatalog != newCatalog; + } + + /** + * Called via RefreshException.refresh when handling the RefreshException + * in the binding methods, when a Replica detects that its in-memory + * metadata is stale. + * + * During refresh, objects that are visible to the user must not be + * re-created, since the user may have a reference to them. The + * PersistCatalog is re-created by this method, and the additional objects + * listed below are refreshed without creating a new instance. The + * refresh() method of non-indented classes is called, and these methods + * forward the call to indented classes. + * + * PersistCatalog + * EntityModel + * PrimaryIndex + * PersistEntityBinding + * PersistKeyAssigner + * SecondaryIndex + * PersistKeyCreator + * PersistKeyBinding + * + * These objects have volatile catalog and format fields. When a refresh + * in one thread changes these fields, other threads should notice the + * changes ASAP. However, it is not necessary that all access to these + * fields is synchronized. It is Ok for a mix of old and new fields to be + * used at any point in time. If an old object is used after a refresh, + * the need for a refresh may be detected, causing another call to this + * method. In most cases the redundant refresh will be avoided (see check + * below), but in some cases an extra unnecessary refresh may be performed. + * This is undesirable, but is not dangerous. Synchronization must be + * avoided to prevent blocking during read/write operations. + * + * [#16655] + */ + synchronized PersistCatalog refresh(final PersistCatalog oldCatalog, + final int errorFormatId, + final RefreshException cause) { + + /* + * While synchronized, check to see whether metadata has already been + * refreshed. + */ + if (oldCatalog != catalog) { + /* Another thread refreshed the metadata -- nothing to do. */ + return catalog; + } + + /* + * First refresh the catalog information, then check that the new + * metadata contains the format ID we're interested in using. + */ + try { + catalog = new PersistCatalog(oldCatalog, storePrefix); + } catch (DatabaseException e) { + throw RuntimeExceptionWrapper.wrapIfNeeded(e); + } + + if (errorFormatId >= catalog.getNFormats()) { + /* Even with current metadata, the format is out of range. */ + throw DbCompat.unexpectedException + ("Catalog could not be refreshed, may indicate corruption, " + + "errorFormatId=" + errorFormatId + " nFormats=" + + catalog.getNFormats() + ", .", cause); + } + + /* + * Finally refresh all other objects that directly reference catalog + * and format objects. + */ + for (InternalPrimaryIndex index : priIndexMap.values()) { + index.refresh(catalog); + } + for (InternalSecondaryIndex index : secIndexMap.values()) { + index.refresh(catalog); + } + for (PersistKeyBinding binding : keyBindingMap.values()) { + binding.refresh(catalog); + } + for (SecondaryConfig config : secConfigMap.values()) { + PersistKeyCreator keyCreator = getKeyCreator(config); + keyCreator.refresh(catalog); + } + + return catalog; + } + + private class InternalPrimaryIndex extends PrimaryIndex { + + private final PersistEntityBinding entityBinding; + + InternalPrimaryIndex(final Database database, + final Class keyClass, + final PersistKeyBinding keyBinding, + final Class entityClass, + final PersistEntityBinding entityBinding) + throws DatabaseException { + + super(database, keyClass, keyBinding, entityClass, entityBinding); + this.entityBinding = entityBinding; + } + + void refresh(final PersistCatalog newCatalog) { + entityBinding.refresh(newCatalog); + } + + /* */ + @Override + protected TransactionConfig getAutoCommitTransactionConfig() { + return autoCommitTxnConfig; + } + /* */ + } + + private class InternalSecondaryIndex + extends SecondaryIndex { + + private final PersistKeyCreator keyCreator; + + InternalSecondaryIndex(final SecondaryDatabase database, + final PrimaryIndex primaryIndex, + final Class secondaryKeyClass, + final PersistKeyBinding secondaryKeyBinding, + final PersistKeyCreator keyCreator) + throws DatabaseException { + + super(database, null /*keysDatabase*/, primaryIndex, + secondaryKeyClass, secondaryKeyBinding); + this.keyCreator = keyCreator; + } + + void refresh(final PersistCatalog newCatalog) { + keyCreator.refresh(newCatalog); + } + + /* */ + @Override + protected TransactionConfig getAutoCommitTransactionConfig() { + return autoCommitTxnConfig; + } + /* */ + } + + TransactionConfig getAutoCommitTxnConfig() { + return autoCommitTxnConfig; + } + + /* */ + /** + * Configures a TransactionConfig for auto-commit operations on a + * non-replicated transactional database on a replicated node. We use: + * - default SyncPolicy for the environment + * - ReplicaAckPolicy.NONE to avoid consistency checks on the Master + * - Consistency.NONE to avoid consistency checks on the Replica + * - localWrite=true to allow writing + */ + private static void configForNonRepDb(TransactionConfig config, + Durability envDurability) { + config.setDurability(new Durability( + envDurability.getLocalSync(), envDurability.getReplicaSync(), + Durability.ReplicaAckPolicy.NONE)); + config.setConsistencyPolicy( + NoConsistencyRequiredPolicy.NO_CONSISTENCY); + config.setLocalWrite(true); + } + /* */ +} diff --git a/src/com/sleepycat/persist/impl/StoredModel.java b/src/com/sleepycat/persist/impl/StoredModel.java new file mode 100644 index 0000000..524b682 --- /dev/null +++ b/src/com/sleepycat/persist/impl/StoredModel.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.util.Set; + +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; + +/** + * The EntityModel used when a RawStore is opened. The metadata and raw type + * information comes from the catalog directly, without using the current + * class definitions. + * + * @author Mark Hayes + */ +class StoredModel extends EntityModel { + + private volatile PersistCatalog catalog; + private volatile Set knownClasses; + + StoredModel(final PersistCatalog catalog) { + this.catalog = catalog; + } + + /** + * This method is used to initialize the model when catalog creation is + * complete, and reinitialize it when a Replica refresh occurs. + */ + @Override + protected void setCatalog(final PersistCatalog newCatalog) { + super.setCatalog(newCatalog); + this.catalog = newCatalog; + knownClasses = newCatalog.getModelClasses(); + } + + @Override + public ClassMetadata getClassMetadata(String className) { + ClassMetadata metadata = null; + Format format = catalog.getFormat(className); + if (format != null && format.isCurrentVersion()) { + metadata = format.getClassMetadata(); + } + return metadata; + } + + @Override + public EntityMetadata getEntityMetadata(String className) { + EntityMetadata metadata = null; + Format format = catalog.getFormat(className); + if (format != null && format.isCurrentVersion()) { + metadata = format.getEntityMetadata(); + } + return metadata; + } + + @Override + public Set getKnownClasses() { + return knownClasses; + } +} diff --git a/src/com/sleepycat/persist/impl/WidenerInput.java b/src/com/sleepycat/persist/impl/WidenerInput.java new file mode 100644 index 0000000..f3aedc4 --- /dev/null +++ b/src/com/sleepycat/persist/impl/WidenerInput.java @@ -0,0 +1,605 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.impl; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import com.sleepycat.compat.DbCompat; + +/** + * Widens a value returned by another input when any readXxx method is called. + * Used to cause an Accessor to read a widened value. + * + * For non-key fields we support all Java primitive widening: + * - byte to short, int, long, float, double, BigInteger and BigDecimal + * - short to int, long, float, double, BigInteger and BigDecimal + * - char to int, long, float, double, BigInteger and BigDecimal + * - int to long, float, double, BigInteger and BigDecimal + * - long to float, double, BigInteger and BigDecimal + * - float to double + * + * For non-key fields we also support: + * - Java reference widening + * - primitive to primitive wrapper + * - Java primitive widening to corresponding primitive wrappers + * - Java widening of primitive wrapper to primitive wrapper + * + * For secondary keys fields we ONLY support: + * - primitive to primitive wrapper + * + * But for primary keys and composite key fields we ONLY support: + * - primitive to primitive wrapper + * - primitive wrapper to primitive + * These conversions don't require any converter, since the stored format is + * not changed. A WidenerInput is not used for these changes. + * + * @author Mark Hayes + */ +class WidenerInput extends AbstractInput { + + private EntityInput input; + private int fromFormatId; + private int toFormatId; + + /** + * Returns whether widening is supported by this class. If false is + * returned by this method, then widening is disallowed and a field + * converter or deleter is necessary. + */ + static boolean isWideningSupported(Format fromFormat, + Format toFormat, + boolean isSecKeyField) { + int fromFormatId = fromFormat.getId(); + int toFormatId = toFormat.getId(); + + switch (fromFormatId) { + case Format.ID_BOOL: + switch (toFormatId) { + case Format.ID_BOOL_W: + return true; + default: + return false; + } + case Format.ID_BYTE: + switch (toFormatId) { + case Format.ID_BYTE_W: + return true; + case Format.ID_SHORT: + case Format.ID_SHORT_W: + case Format.ID_INT: + case Format.ID_INT_W: + case Format.ID_LONG: + case Format.ID_LONG_W: + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_BYTE_W: + switch (toFormatId) { + case Format.ID_SHORT_W: + case Format.ID_INT_W: + case Format.ID_LONG_W: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_SHORT: + switch (toFormatId) { + case Format.ID_SHORT_W: + return true; + case Format.ID_INT: + case Format.ID_INT_W: + case Format.ID_LONG: + case Format.ID_LONG_W: + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_SHORT_W: + switch (toFormatId) { + case Format.ID_INT_W: + case Format.ID_LONG_W: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_INT: + switch (toFormatId) { + case Format.ID_INT_W: + return true; + case Format.ID_LONG: + case Format.ID_LONG_W: + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_INT_W: + switch (toFormatId) { + case Format.ID_LONG_W: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_LONG: + switch (toFormatId) { + case Format.ID_LONG_W: + return true; + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_LONG_W: + switch (toFormatId) { + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_FLOAT: + switch (toFormatId) { + case Format.ID_FLOAT_W: + return true; + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return !isSecKeyField; + default: + return false; + } + case Format.ID_FLOAT_W: + switch (toFormatId) { + case Format.ID_DOUBLE_W: + return !isSecKeyField; + default: + return false; + } + case Format.ID_DOUBLE: + switch (toFormatId) { + case Format.ID_DOUBLE_W: + return true; + default: + return false; + } + case Format.ID_CHAR: + switch (toFormatId) { + case Format.ID_CHAR_W: + return true; + case Format.ID_INT: + case Format.ID_INT_W: + case Format.ID_LONG: + case Format.ID_LONG_W: + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_CHAR_W: + switch (toFormatId) { + case Format.ID_INT_W: + case Format.ID_LONG_W: + case Format.ID_FLOAT_W: + case Format.ID_DOUBLE_W: + case Format.ID_BIGINT: + return !isSecKeyField; + default: + return false; + } + case Format.ID_STRING: + switch (toFormatId) { + case Format.ID_OBJECT: + return !isSecKeyField; + default: + return false; + } + default: + return false; + } + } + + WidenerInput(EntityInput input, int fromFormatId, int toFormatId) { + super(input.getCatalog(), input.isRawAccess()); + this.input = input; + this.fromFormatId = fromFormatId; + this.toFormatId = toFormatId; + } + + public void registerPriKeyObject(Object o) { + input.registerPriKeyObject(o); + } + + public void registerPriStringKeyObject(Object o) { + input.registerPriStringKeyObject(o); + } + + public int readArrayLength() { + throw DbCompat.unexpectedState(); + } + + public int readEnumConstant(String[] names) { + throw DbCompat.unexpectedState(); + } + + public void skipField(Format declaredFormat) { + throw DbCompat.unexpectedState(); + } + + public String readString() { + throw DbCompat.unexpectedState(); + } + + public Object readKeyObject(Format fromFormat) + throws RefreshException { + + return readObject(); + } + + public Object readObject() + throws RefreshException { + + switch (fromFormatId) { + case Format.ID_BOOL: + checkToFormat(Format.ID_BOOL_W); + return input.readBoolean(); + case Format.ID_BYTE: + return byteToObject(input.readByte()); + case Format.ID_BYTE_W: + Byte b = (Byte) input.readObject(); + return (b != null) ? byteToObject(b) : null; + case Format.ID_SHORT: + return shortToObject(input.readShort()); + case Format.ID_SHORT_W: + Short s = (Short) input.readObject(); + return (s != null) ? shortToObject(s) : null; + case Format.ID_INT: + return intToObject(input.readInt()); + case Format.ID_INT_W: + Integer i = (Integer) input.readObject(); + return (i != null) ? intToObject(i) : null; + case Format.ID_LONG: + return longToObject(input.readLong()); + case Format.ID_LONG_W: + Long l = (Long) input.readObject(); + return (l != null) ? longToObject(l) : null; + case Format.ID_FLOAT: + return floatToObject(input.readSortedFloat()); + case Format.ID_FLOAT_W: + Float f = (Float) input.readObject(); + return (f != null) ? floatToObject(f) : null; + case Format.ID_DOUBLE: + checkToFormat(Format.ID_DOUBLE_W); + return input.readSortedDouble(); + case Format.ID_CHAR: + return charToObject(input.readChar()); + case Format.ID_CHAR_W: + Character c = (Character) input.readObject(); + return (c != null) ? charToObject(c) : null; + case Format.ID_STRING: + checkToFormat(Format.ID_OBJECT); + return input.readStringObject(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + private Object byteToObject(byte v) { + switch (toFormatId) { + case Format.ID_BYTE: + case Format.ID_BYTE_W: + return Byte.valueOf(v); + case Format.ID_SHORT: + case Format.ID_SHORT_W: + return Short.valueOf(v); + case Format.ID_INT: + case Format.ID_INT_W: + return Integer.valueOf(v); + case Format.ID_LONG: + case Format.ID_LONG_W: + return Long.valueOf(v); + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + case Format.ID_BIGINT: + return BigInteger.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + private Object shortToObject(short v) { + switch (toFormatId) { + case Format.ID_SHORT: + case Format.ID_SHORT_W: + return Short.valueOf(v); + case Format.ID_INT: + case Format.ID_INT_W: + return Integer.valueOf(v); + case Format.ID_LONG: + case Format.ID_LONG_W: + return Long.valueOf(v); + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + case Format.ID_BIGINT: + return BigInteger.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + private Object intToObject(int v) { + switch (toFormatId) { + case Format.ID_INT: + case Format.ID_INT_W: + return Integer.valueOf(v); + case Format.ID_LONG: + case Format.ID_LONG_W: + return Long.valueOf(v); + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + case Format.ID_BIGINT: + return BigInteger.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + private Object longToObject(long v) { + switch (toFormatId) { + case Format.ID_LONG: + case Format.ID_LONG_W: + return Long.valueOf(v); + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + case Format.ID_BIGINT: + return BigInteger.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + private Object floatToObject(float v) { + switch (toFormatId) { + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + private Object charToObject(char v) { + switch (toFormatId) { + case Format.ID_CHAR: + case Format.ID_CHAR_W: + return Character.valueOf(v); + case Format.ID_INT: + case Format.ID_INT_W: + return Integer.valueOf(v); + case Format.ID_LONG: + case Format.ID_LONG_W: + return Long.valueOf(v); + case Format.ID_FLOAT: + case Format.ID_FLOAT_W: + return Float.valueOf(v); + case Format.ID_DOUBLE: + case Format.ID_DOUBLE_W: + return Double.valueOf(v); + case Format.ID_BIGINT: + return BigInteger.valueOf(v); + default: + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + public char readChar() { + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + + public boolean readBoolean() { + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + + public byte readByte() { + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + + public short readShort() + throws RefreshException { + + checkToFormat(Format.ID_SHORT); + switch (fromFormatId) { + case Format.ID_BYTE: + return input.readByte(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public int readInt() + throws RefreshException { + + checkToFormat(Format.ID_INT); + switch (fromFormatId) { + case Format.ID_BYTE: + return input.readByte(); + case Format.ID_SHORT: + return input.readShort(); + case Format.ID_CHAR: + return input.readChar(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public long readLong() + throws RefreshException { + + checkToFormat(Format.ID_LONG); + switch (fromFormatId) { + case Format.ID_BYTE: + return input.readByte(); + case Format.ID_SHORT: + return input.readShort(); + case Format.ID_INT: + return input.readInt(); + case Format.ID_CHAR: + return input.readChar(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public float readSortedFloat() + throws RefreshException { + + checkToFormat(Format.ID_FLOAT); + switch (fromFormatId) { + case Format.ID_BYTE: + return input.readByte(); + case Format.ID_SHORT: + return input.readShort(); + case Format.ID_INT: + return input.readInt(); + case Format.ID_LONG: + return input.readLong(); + case Format.ID_CHAR: + return input.readChar(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public double readSortedDouble() + throws RefreshException { + + checkToFormat(Format.ID_DOUBLE); + switch (fromFormatId) { + case Format.ID_BYTE: + return input.readByte(); + case Format.ID_SHORT: + return input.readShort(); + case Format.ID_INT: + return input.readInt(); + case Format.ID_LONG: + return input.readLong(); + case Format.ID_FLOAT: + return input.readSortedFloat(); + case Format.ID_CHAR: + return input.readChar(); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public BigInteger readBigInteger() + throws RefreshException { + + checkToFormat(Format.ID_BIGINT); + switch (fromFormatId) { + case Format.ID_BYTE: + return BigInteger.valueOf(input.readByte()); + case Format.ID_SHORT: + return BigInteger.valueOf(input.readShort()); + case Format.ID_INT: + return BigInteger.valueOf(input.readInt()); + case Format.ID_LONG: + return BigInteger.valueOf(input.readLong()); + case Format.ID_CHAR: + return BigInteger.valueOf(input.readChar()); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + public BigDecimal readSortedBigDecimal() + throws RefreshException { + checkToFormat(Format.ID_BIGDEC); + switch (fromFormatId) { + case Format.ID_BYTE: + return BigDecimal.valueOf(input.readByte()); + case Format.ID_SHORT: + return BigDecimal.valueOf(input.readShort()); + case Format.ID_INT: + return BigDecimal.valueOf(input.readInt()); + case Format.ID_LONG: + return BigDecimal.valueOf(input.readLong()); + case Format.ID_CHAR: + return BigDecimal.valueOf(input.readChar()); + default: + throw DbCompat.unexpectedState(String.valueOf(fromFormatId)); + } + } + + private void checkToFormat(int id) { + if (toFormatId != id) { + throw DbCompat.unexpectedState(String.valueOf(toFormatId)); + } + } + + public Object readStringObject(){ + throw DbCompat.unexpectedState(); + } +} diff --git a/src/com/sleepycat/persist/impl/package-info.java b/src/com/sleepycat/persist/impl/package-info.java new file mode 100644 index 0000000..d7469d5 --- /dev/null +++ b/src/com/sleepycat/persist/impl/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Direct Persistence Layer (DPL) implementation. + */ +package com.sleepycat.persist.impl; diff --git a/src/com/sleepycat/persist/model/AnnotationModel.java b/src/com/sleepycat/persist/model/AnnotationModel.java new file mode 100644 index 0000000..0f5d98f --- /dev/null +++ b/src/com/sleepycat/persist/model/AnnotationModel.java @@ -0,0 +1,443 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; + +/** + * The default annotation-based entity model. An AnnotationModel + * is based on annotations that are specified for entity classes and their key + * fields. + * + *

        {@code AnnotationModel} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code AnnotationModel} object.

        + * + *

        The set of persistent classes in the annotation model is the set of all + * classes with the {@link Persistent} or {@link Entity} annotation.

        + * + *

        The annotations used to define persistent classes are: {@link Entity}, + * {@link Persistent}, {@link PrimaryKey}, {@link SecondaryKey} and {@link + * KeyField}. A good starting point is {@link Entity}.

        + * + * @author Mark Hayes + */ +public class AnnotationModel extends EntityModel { + + private static class EntityInfo { + PrimaryKeyMetadata priKey; + Map secKeys = + new HashMap(); + } + + private Map classMap; + private Map entityMap; + + /* + * This set records the special classes, i.e., enum and array type. + * [#19377] + */ + private Set registeredSpecialClasses; + + /** + * Constructs a model for annotated entity classes. + */ + public AnnotationModel() { + super(); + classMap = new HashMap(); + entityMap = new HashMap(); + registeredSpecialClasses = new HashSet(); + } + + /* EntityModel methods */ + + @Override + public synchronized Set getKnownClasses() { + return Collections.unmodifiableSet + (new HashSet(classMap.keySet())); + } + + @Override + public Set getKnownSpecialClasses() { + return Collections.unmodifiableSet(registeredSpecialClasses); + } + + @Override + public synchronized EntityMetadata getEntityMetadata(String className) { + /* Call getClassMetadata to collect metadata. */ + getClassMetadata(className); + /* Return the collected entity metadata. */ + EntityInfo info = entityMap.get(className); + if (info != null) { + return new EntityMetadata + (className, info.priKey, + Collections.unmodifiableMap(info.secKeys)); + } else { + return null; + } + } + + @Override + public synchronized ClassMetadata getClassMetadata(String className) { + ClassMetadata metadata = classMap.get(className); + if (metadata == null) { + Class type; + try { + type = resolveClass(className); + } catch (ClassNotFoundException e) { + return null; + } + + /* + * Adds enum or array types to registeredSpecialClasses set, and + * does not create metadata for them. [#19377] + */ + if (type.isEnum() || + type.isArray()) { + registeredSpecialClasses.add(className); + } + + /* Get class annotation. */ + Entity entity = type.getAnnotation(Entity.class); + Persistent persistent = type.getAnnotation(Persistent.class); + if (entity == null && persistent == null) { + return null; + } + if (type.isEnum() || + type.isInterface() || + type.isPrimitive()) { + throw new IllegalArgumentException + ("@Entity and @Persistent not allowed for enum, " + + "interface, or primitive type: " + type.getName()); + } + if (entity != null && persistent != null) { + throw new IllegalArgumentException + ("Both @Entity and @Persistent are not allowed: " + + type.getName()); + } + boolean isEntity; + int version; + String proxiedClassName; + if (entity != null) { + isEntity = true; + version = entity.version(); + proxiedClassName = null; + } else { + isEntity = false; + version = persistent.version(); + Class proxiedClass = persistent.proxyFor(); + proxiedClassName = (proxiedClass != void.class) ? + proxiedClass.getName() : null; + } + /* Get instance fields. */ + List fields = new ArrayList(); + boolean nonDefaultRules = getInstanceFields(fields, type); + Collection nonDefaultFields = null; + if (nonDefaultRules) { + nonDefaultFields = new ArrayList(fields.size()); + for (Field field : fields) { + nonDefaultFields.add(new FieldMetadata + (field.getName(), field.getType().getName(), + type.getName())); + } + nonDefaultFields = + Collections.unmodifiableCollection(nonDefaultFields); + } + /* Get the rest of the metadata and save it. */ + metadata = new ClassMetadata + (className, version, proxiedClassName, isEntity, + getPrimaryKey(type, fields), + getSecondaryKeys(type, fields), + getCompositeKeyFields(type, fields), + nonDefaultFields); + classMap.put(className, metadata); + /* Add any new information about entities. */ + updateEntityInfo(metadata); + } + return metadata; + } + + /** + * Fills in the fields array and returns true if the default rules for + * field persistence were overridden. + */ + private boolean getInstanceFields(List fields, Class type) { + boolean nonDefaultRules = false; + for (Field field : type.getDeclaredFields()) { + boolean notPersistent = + (field.getAnnotation(NotPersistent.class) != null); + boolean notTransient = + (field.getAnnotation(NotTransient.class) != null); + if (notPersistent && notTransient) { + throw new IllegalArgumentException + ("Both @NotTransient and @NotPersistent not allowed"); + } + if (notPersistent || notTransient) { + nonDefaultRules = true; + } + int mods = field.getModifiers(); + + if (!Modifier.isStatic(mods) && + !notPersistent && + (!Modifier.isTransient(mods) || notTransient)) { + /* Field is DPL persistent. */ + fields.add(field); + } else { + /* If non-persistent, no other annotations should be used. */ + if (field.getAnnotation(PrimaryKey.class) != null || + field.getAnnotation(SecondaryKey.class) != null || + field.getAnnotation(KeyField.class) != null) { + throw new IllegalArgumentException + ("@PrimaryKey, @SecondaryKey and @KeyField not " + + "allowed on non-persistent field"); + } + } + } + return nonDefaultRules; + } + + private PrimaryKeyMetadata getPrimaryKey(Class type, + List fields) { + Field foundField = null; + String sequence = null; + for (Field field : fields) { + PrimaryKey priKey = field.getAnnotation(PrimaryKey.class); + if (priKey != null) { + if (foundField != null) { + throw new IllegalArgumentException + ("Only one @PrimaryKey allowed: " + type.getName()); + } else { + foundField = field; + sequence = priKey.sequence(); + if (sequence.length() == 0) { + sequence = null; + } + } + } + } + if (foundField != null) { + return new PrimaryKeyMetadata + (foundField.getName(), foundField.getType().getName(), + type.getName(), sequence); + } else { + return null; + } + } + + private Map + getSecondaryKeys(Class type, List fields) { + + Map map = null; + for (Field field : fields) { + SecondaryKey secKey = field.getAnnotation(SecondaryKey.class); + if (secKey != null) { + Relationship rel = secKey.relate(); + String elemClassName = null; + if (rel == Relationship.ONE_TO_MANY || + rel == Relationship.MANY_TO_MANY) { + elemClassName = getElementClass(field); + } + String keyName = secKey.name(); + if (keyName.length() == 0) { + keyName = field.getName(); + } + Class relatedClass = secKey.relatedEntity(); + String relatedEntity = (relatedClass != void.class) ? + relatedClass.getName() : null; + DeleteAction deleteAction = (relatedEntity != null) ? + secKey.onRelatedEntityDelete() : null; + SecondaryKeyMetadata metadata = new SecondaryKeyMetadata + (field.getName(), field.getType().getName(), + type.getName(), elemClassName, keyName, rel, + relatedEntity, deleteAction); + if (map == null) { + map = new HashMap(); + } + if (map.put(keyName, metadata) != null) { + throw new IllegalArgumentException + ("Only one @SecondaryKey with the same name allowed: " + + type.getName() + '.' + keyName); + } + } + } + if (map != null) { + map = Collections.unmodifiableMap(map); + } + return map; + } + + private String getElementClass(Field field) { + Class cls = field.getType(); + if (cls.isArray()) { + return cls.getComponentType().getName(); + } + if (Collection.class.isAssignableFrom(cls)) { + Type[] typeArgs = null; + if (field.getGenericType() instanceof ParameterizedType) { + typeArgs = ((ParameterizedType) field.getGenericType()). + getActualTypeArguments(); + } + if (typeArgs == null || + typeArgs.length != 1 || + !(typeArgs[0] instanceof Class)) { + throw new IllegalArgumentException + ("Collection typed secondary key field must have a" + + " single generic type argument and a wildcard or" + + " type bound is not allowed: " + + field.getDeclaringClass().getName() + '.' + + field.getName()); + } + return ((Class) typeArgs[0]).getName(); + } + throw new IllegalArgumentException + ("ONE_TO_MANY or MANY_TO_MANY secondary key field must have" + + " an array or Collection type: " + + field.getDeclaringClass().getName() + '.' + field.getName()); + } + + private List getCompositeKeyFields(Class type, + List fields) { + List list = null; + for (Field field : fields) { + KeyField keyField = field.getAnnotation(KeyField.class); + if (keyField != null) { + int value = keyField.value(); + if (value < 1 || value > fields.size()) { + throw new IllegalArgumentException + ("Unreasonable @KeyField index value " + value + + ": " + type.getName()); + } + if (list == null) { + list = new ArrayList(fields.size()); + } + if (value <= list.size() && list.get(value - 1) != null) { + throw new IllegalArgumentException + ("@KeyField index value " + value + + " is used more than once: " + type.getName()); + } + while (value > list.size()) { + list.add(null); + } + FieldMetadata metadata = new FieldMetadata + (field.getName(), field.getType().getName(), + type.getName()); + list.set(value - 1, metadata); + } + } + if (list != null) { + if (list.size() < fields.size()) { + throw new IllegalArgumentException + ("@KeyField is missing on one or more instance fields: " + + type.getName()); + } + for (int i = 0; i < list.size(); i += 1) { + if (list.get(i) == null) { + throw new IllegalArgumentException + ("@KeyField is missing for index value " + (i + 1) + + ": " + type.getName()); + } + } + } + if (list != null) { + list = Collections.unmodifiableList(list); + } + return list; + } + + /** + * Add newly discovered metadata to our stash of entity info. This info + * is maintained as it is discovered because it would be expensive to + * create it on demand -- all class metadata would have to be traversed. + */ + private void updateEntityInfo(ClassMetadata metadata) { + + /* + * Find out whether this class or its superclass is an entity. In the + * process, traverse all superclasses to load their metadata -- this + * will populate as much entity info as possible. + */ + String entityClass = null; + PrimaryKeyMetadata priKey = null; + Map secKeys = + new HashMap(); + for (ClassMetadata data = metadata; data != null;) { + if (data.isEntityClass()) { + if (entityClass != null) { + throw new IllegalArgumentException + ("An entity class may not be derived from another" + + " entity class: " + entityClass + + ' ' + data.getClassName()); + } + entityClass = data.getClassName(); + } + /* Save first primary key encountered. */ + if (priKey == null) { + priKey = data.getPrimaryKey(); + } + /* Save all secondary keys encountered by key name. */ + Map classSecKeys = + data.getSecondaryKeys(); + if (classSecKeys != null) { + for (SecondaryKeyMetadata secKey : classSecKeys.values()) { + secKeys.put(secKey.getKeyName(), secKey); + } + } + /* Load superclass metadata. */ + Class cls; + try { + cls = resolveClass(data.getClassName()); + } catch (ClassNotFoundException e) { + throw DbCompat.unexpectedException(e); + } + cls = cls.getSuperclass(); + if (cls != Object.class) { + data = getClassMetadata(cls.getName()); + if (data == null) { + throw new IllegalArgumentException + ("Persistent class has non-persistent superclass: " + + cls.getName()); + } + } else { + data = null; + } + } + + /* Add primary and secondary key entity info. */ + if (entityClass != null) { + EntityInfo info = entityMap.get(entityClass); + if (info == null) { + info = new EntityInfo(); + entityMap.put(entityClass, info); + } + if (priKey == null) { + throw new IllegalArgumentException + ("Entity class has no primary key: " + entityClass); + } + info.priKey = priKey; + info.secKeys.putAll(secKeys); + } + } +} diff --git a/src/com/sleepycat/persist/model/BytecodeEnhancer.java b/src/com/sleepycat/persist/model/BytecodeEnhancer.java new file mode 100644 index 0000000..d03e019 --- /dev/null +++ b/src/com/sleepycat/persist/model/BytecodeEnhancer.java @@ -0,0 +1,1758 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static com.sleepycat.asm.Opcodes.AALOAD; +import static com.sleepycat.asm.Opcodes.ACC_ABSTRACT; +import static com.sleepycat.asm.Opcodes.ACC_PRIVATE; +import static com.sleepycat.asm.Opcodes.ACC_PUBLIC; +import static com.sleepycat.asm.Opcodes.ACC_STATIC; +import static com.sleepycat.asm.Opcodes.ACC_TRANSIENT; +import static com.sleepycat.asm.Opcodes.ACONST_NULL; +import static com.sleepycat.asm.Opcodes.ALOAD; +import static com.sleepycat.asm.Opcodes.ANEWARRAY; +import static com.sleepycat.asm.Opcodes.ARETURN; +import static com.sleepycat.asm.Opcodes.ASM4; +import static com.sleepycat.asm.Opcodes.BIPUSH; +import static com.sleepycat.asm.Opcodes.CHECKCAST; +import static com.sleepycat.asm.Opcodes.DCMPL; +import static com.sleepycat.asm.Opcodes.DCONST_0; +import static com.sleepycat.asm.Opcodes.DUP; +import static com.sleepycat.asm.Opcodes.FCMPL; +import static com.sleepycat.asm.Opcodes.FCONST_0; +import static com.sleepycat.asm.Opcodes.GETFIELD; +import static com.sleepycat.asm.Opcodes.GOTO; +import static com.sleepycat.asm.Opcodes.ICONST_0; +import static com.sleepycat.asm.Opcodes.ICONST_1; +import static com.sleepycat.asm.Opcodes.ICONST_2; +import static com.sleepycat.asm.Opcodes.ICONST_3; +import static com.sleepycat.asm.Opcodes.ICONST_4; +import static com.sleepycat.asm.Opcodes.ICONST_5; +import static com.sleepycat.asm.Opcodes.IFEQ; +import static com.sleepycat.asm.Opcodes.IFGT; +import static com.sleepycat.asm.Opcodes.IFLE; +import static com.sleepycat.asm.Opcodes.IFNE; +import static com.sleepycat.asm.Opcodes.IFNONNULL; +import static com.sleepycat.asm.Opcodes.IF_ICMPNE; +import static com.sleepycat.asm.Opcodes.ILOAD; +import static com.sleepycat.asm.Opcodes.INVOKEINTERFACE; +import static com.sleepycat.asm.Opcodes.INVOKESPECIAL; +import static com.sleepycat.asm.Opcodes.INVOKESTATIC; +import static com.sleepycat.asm.Opcodes.INVOKEVIRTUAL; +import static com.sleepycat.asm.Opcodes.IRETURN; +import static com.sleepycat.asm.Opcodes.ISUB; +import static com.sleepycat.asm.Opcodes.LCMP; +import static com.sleepycat.asm.Opcodes.LCONST_0; +import static com.sleepycat.asm.Opcodes.NEW; +import static com.sleepycat.asm.Opcodes.POP; +import static com.sleepycat.asm.Opcodes.PUTFIELD; +import static com.sleepycat.asm.Opcodes.RETURN; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.asm.AnnotationVisitor; +import com.sleepycat.asm.Attribute; +import com.sleepycat.asm.ClassVisitor; +import com.sleepycat.asm.FieldVisitor; +import com.sleepycat.asm.Label; +import com.sleepycat.asm.MethodVisitor; +import com.sleepycat.asm.Type; +import com.sleepycat.compat.DbCompat; + +/** + * An ASM ClassVisitor that examines a class, throws NotPersistentException if + * it is not persistent, or enhances it if it is persistent. A class is + * persistent if it contains the @Entity or @Persistent annotations. A + * resulting enhanced class implements the com.sleepycat.persist.impl.Enhanced + * interface. + * + *

        NotPersistentException is thrown to abort the transformation in order to + * avoid making two passes over the class file (one to look for the annotations + * and another to enhance the bytecode) or outputing a class that isn't + * enhanced. By aborting the transformation as soon as we detect that the + * annotations are missing, we make only one partial pass for a non-persistent + * class.

        + * + * @author Mark Hayes + */ +class BytecodeEnhancer extends ClassVisitor { + + /** Thrown when we determine that a class is not persistent. */ + @SuppressWarnings("serial") + static class NotPersistentException extends RuntimeException {} + + /** A static instance is used to avoid fillInStaceTrace overhead. */ + private static final NotPersistentException NOT_PERSISTENT = + new NotPersistentException(); + + private static final Map PRIMITIVE_WRAPPERS = + new HashMap(); + static { + PRIMITIVE_WRAPPERS.put(Boolean.class.getName(), Type.BOOLEAN); + PRIMITIVE_WRAPPERS.put(Character.class.getName(), Type.CHAR); + PRIMITIVE_WRAPPERS.put(Byte.class.getName(), Type.BYTE); + PRIMITIVE_WRAPPERS.put(Short.class.getName(), Type.SHORT); + PRIMITIVE_WRAPPERS.put(Integer.class.getName(), Type.INT); + PRIMITIVE_WRAPPERS.put(Long.class.getName(), Type.LONG); + PRIMITIVE_WRAPPERS.put(Float.class.getName(), Type.FLOAT); + PRIMITIVE_WRAPPERS.put(Double.class.getName(), Type.DOUBLE); + } + + private String className; + private String superclassName; + private boolean isPersistent; + private boolean isAbstract; + private boolean hasDefaultConstructor; + private boolean hasPersistentSuperclass; + private boolean isCompositeKey; + private FieldInfo priKeyField; + private List secKeyFields; + private List nonKeyFields; + private String staticBlockMethod; + + BytecodeEnhancer(ClassVisitor parentVisitor) { + super(ASM4, parentVisitor); + secKeyFields = new ArrayList(); + nonKeyFields = new ArrayList(); + } + + @Override + public void visit(int version, + int access, + String name, + String sig, + String superName, + String[] interfaces) { + className = name; + superclassName = superName; + final String ENHANCED = "com/sleepycat/persist/impl/Enhanced"; + if (containsString(interfaces, ENHANCED)) { + throw abort(); + } + interfaces = appendString(interfaces, ENHANCED); + isAbstract = ((access & ACC_ABSTRACT) != 0); + hasPersistentSuperclass = + (superName != null && !superName.equals("java/lang/Object")); + super.visit(version, access, name, sig, superName, interfaces); + } + + @Override + public void visitSource(String source, String debug) { + super.visitSource(source, debug); + } + + @Override + public AnnotationVisitor visitAnnotation(String desc, boolean visible) { + if (desc.equals("Lcom/sleepycat/persist/model/Entity;") || + desc.equals("Lcom/sleepycat/persist/model/Persistent;")) { + isPersistent = true; + } + return super.visitAnnotation(desc, visible); + } + + @Override + public FieldVisitor visitField(int access, + String name, + String desc, + String sig, + Object value) { + if (!isPersistent) { + throw abort(); + } + FieldVisitor ret = super.visitField(access, name, desc, sig, value); + if ((access & ACC_STATIC) == 0) { + FieldInfo info = new FieldInfo(ret, name, desc, + (access & ACC_TRANSIENT) != 0); + nonKeyFields.add(info); + ret = info; + } + return ret; + } + + @Override + public MethodVisitor visitMethod(int access, + String name, + String desc, + String sig, + String[] exceptions) { + if (!isPersistent) { + throw abort(); + } + if ("".equals(name) && "()V".equals(desc)) { + hasDefaultConstructor = true; + } + if ("".equals(name)) { + if (staticBlockMethod != null) { + throw DbCompat.unexpectedState(); + } + staticBlockMethod = "bdbExistingStaticBlock"; + return cv.visitMethod + (ACC_PRIVATE + ACC_STATIC, staticBlockMethod, "()V", null, + null); + } + return super.visitMethod(access, name, desc, sig, exceptions); + } + + @Override + public void visitEnd() { + if (!isPersistent || !hasDefaultConstructor) { + throw abort(); + } + /* Generate new code at the end of the class. */ + sortFields(); + genBdbNewInstance(); + genBdbNewArray(); + genBdbIsPriKeyFieldNullOrZero(); + genBdbWritePriKeyField(); + genBdbReadPriKeyField(); + genBdbWriteSecKeyFields(); + genBdbReadSecKeyFields(); + genBdbWriteNonKeyFields(); + genBdbReadNonKeyFields(); + genBdbWriteCompositeKeyFields(); + genBdbReadCompositeKeyFields(); + genBdbGetField(); + genBdbSetField(); + genBdbSetPriField(); + genStaticBlock(); + super.visitEnd(); + } + + private void sortFields() { + /* + System.out.println("AllFields: " + nonKeyFields); + //*/ + if (nonKeyFields.size() == 0) { + return; + } + isCompositeKey = true; + for (FieldInfo field : nonKeyFields) { + if (field.order == null) { + isCompositeKey = false; + } + } + if (isCompositeKey) { + Collections.sort(nonKeyFields, new Comparator() { + public int compare(FieldInfo f1, FieldInfo f2) { + return f1.order.value - f2.order.value; + } + }); + } else { + for (int i = 0; i < nonKeyFields.size();) { + FieldInfo field = nonKeyFields.get(i); + if (field.isTransient) { + nonKeyFields.remove(i); + } else if (field.isPriKey) { + if (priKeyField == null) { + priKeyField = field; + nonKeyFields.remove(i); + } + } else if (field.isSecKey) { + secKeyFields.add(field); + nonKeyFields.remove(i); + } else { + i += 1; + } + } + Comparator cmp = new Comparator() { + public int compare(FieldInfo f1, FieldInfo f2) { + return f1.name.compareTo(f2.name); + } + }; + Collections.sort(secKeyFields, cmp); + Collections.sort(nonKeyFields, cmp); + } + /* + System.out.println("PriKey: " + priKeyField); + System.out.println("SecKeys: " + secKeyFields); + System.out.println("NonKeys: " + nonKeyFields); + //*/ + } + + /** + * Outputs code in a static block to register the prototype instance: + * + * static { + * EnhancedAccessor.registerClass(TheClassName, new TheClass()); + * // or for an abstract class: + * EnhancedAccessor.registerClass(TheClassName, null); + * } + */ + private void genStaticBlock() { + MethodVisitor mv = + cv.visitMethod(ACC_STATIC, "", "()V", null, null); + mv.visitCode(); + if (staticBlockMethod != null) { + mv.visitMethodInsn + (INVOKESTATIC, className, staticBlockMethod, "()V"); + } + mv.visitLdcInsn(className.replace('/', '.')); + if (isAbstract) { + mv.visitInsn(ACONST_NULL); + } else { + mv.visitTypeInsn(NEW, className); + mv.visitInsn(DUP); + mv.visitMethodInsn(INVOKESPECIAL, className, "", "()V"); + } + mv.visitMethodInsn + (INVOKESTATIC, "com/sleepycat/persist/impl/EnhancedAccessor", + "registerClass", + "(Ljava/lang/String;Lcom/sleepycat/persist/impl/Enhanced;)V"); + mv.visitInsn(RETURN); + mv.visitMaxs(3, 0); + mv.visitEnd(); + } + + /** + * public Object bdbNewInstance() { + * return new TheClass(); + * // or if abstract: + * return null; + * } + */ + private void genBdbNewInstance() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbNewInstance", "()Ljava/lang/Object;", null, null); + mv.visitCode(); + if (isAbstract) { + mv.visitInsn(ACONST_NULL); + mv.visitInsn(ARETURN); + mv.visitMaxs(1, 1); + } else { + mv.visitTypeInsn(NEW, className); + mv.visitInsn(DUP); + mv.visitMethodInsn(INVOKESPECIAL, className, "", "()V"); + mv.visitInsn(ARETURN); + mv.visitMaxs(2, 1); + } + mv.visitEnd(); + } + + /** + * public Object bdbNewArray(int len) { + * return new TheClass[len]; + * // or if abstract: + * return null; + * } + */ + private void genBdbNewArray() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbNewArray", "(I)Ljava/lang/Object;", null, null); + mv.visitCode(); + if (isAbstract) { + mv.visitInsn(ACONST_NULL); + mv.visitInsn(ARETURN); + mv.visitMaxs(1, 2); + } else { + mv.visitVarInsn(ILOAD, 1); + mv.visitTypeInsn(ANEWARRAY, className); + mv.visitInsn(ARETURN); + mv.visitMaxs(1, 2); + mv.visitEnd(); + } + } + + /** + * public boolean bdbIsPriKeyFieldNullOrZero() { + * return theField == null; // or zero or false, as appropriate + * // or if no primary key but has superclass: + * return super.bdbIsPriKeyFieldNullOrZero(); + * } + */ + private void genBdbIsPriKeyFieldNullOrZero() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbIsPriKeyFieldNullOrZero", "()Z", null, null); + mv.visitCode(); + if (priKeyField != null) { + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + Label l0 = new Label(); + if (isRefType(priKeyField.type)) { + mv.visitJumpInsn(IFNONNULL, l0); + } else { + genBeforeCompareToZero(mv, priKeyField.type); + mv.visitJumpInsn(IFNE, l0); + } + mv.visitInsn(ICONST_1); + Label l1 = new Label(); + mv.visitJumpInsn(GOTO, l1); + mv.visitLabel(l0); + mv.visitInsn(ICONST_0); + mv.visitLabel(l1); + } else if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbIsPriKeyFieldNullOrZero", + "()Z"); + } else { + mv.visitInsn(ICONST_0); + } + mv.visitInsn(IRETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + } + + /** + * public void bdbWritePriKeyField(EntityOutput output, Format format) { + * output.writeKeyObject(theField, format); + * // or + * output.writeInt(theField); // and other simple types + * // or if no primary key but has superclass: + * return super.bdbWritePriKeyField(output, format); + * } + */ + private void genBdbWritePriKeyField() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbWritePriKeyField", + "(Lcom/sleepycat/persist/impl/EntityOutput;" + + "Lcom/sleepycat/persist/impl/Format;)V", + null, null); + mv.visitCode(); + if (priKeyField != null) { + if (!genWriteSimpleKeyField(mv, priKeyField)) { + /* For a non-simple type, call EntityOutput.writeKeyObject. */ + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + mv.visitVarInsn(ALOAD, 2); + mv.visitMethodInsn + (INVOKEINTERFACE, + "com/sleepycat/persist/impl/EntityOutput", + "writeKeyObject", + "(Ljava/lang/Object;" + + "Lcom/sleepycat/persist/impl/Format;)V"); + } + } else if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 2); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbWritePriKeyField", + "(Lcom/sleepycat/persist/impl/EntityOutput;" + + "Lcom/sleepycat/persist/impl/Format;)V"); + } + mv.visitInsn(RETURN); + mv.visitMaxs(3, 3); + mv.visitEnd(); + } + + /** + * public void bdbReadPriKeyField(EntityInput input, Format format) { + * theField = (TheFieldClass) input.readKeyObject(format); + * // or + * theField = input.readInt(); // and other simple types + * // or if no primary key but has superclass: + * super.bdbReadPriKeyField(input, format); + * } + */ + private void genBdbReadPriKeyField() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbReadPriKeyField", + "(Lcom/sleepycat/persist/impl/EntityInput;" + + "Lcom/sleepycat/persist/impl/Format;)V", + null, null); + mv.visitCode(); + if (priKeyField != null) { + if (!genReadSimpleKeyField(mv, priKeyField)) { + /* For a non-simple type, call EntityInput.readKeyObject. */ + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 2); + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readKeyObject", + "(Lcom/sleepycat/persist/impl/Format;)" + + "Ljava/lang/Object;"); + mv.visitTypeInsn(CHECKCAST, getTypeInstName(priKeyField.type)); + mv.visitFieldInsn + (PUTFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + } + } else if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 2); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbReadPriKeyField", + "(Lcom/sleepycat/persist/impl/EntityInput;" + + "Lcom/sleepycat/persist/impl/Format;)V"); + } + mv.visitInsn(RETURN); + mv.visitMaxs(3, 3); + mv.visitEnd(); + } + + /** + * public void bdbWriteSecKeyFields(EntityOutput output) { + * output.registerPriKeyObject(priKeyField); // if an object + * super.bdbWriteSecKeyFields(EntityOutput output); // if has super + * output.writeInt(secKeyField1); + * output.writeObject(secKeyField2, null); + * // etc + * } + */ + private void genBdbWriteSecKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbWriteSecKeyFields", + "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); + mv.visitCode(); + + /* + * In JE 5.0, String is treated as primitive type, so String does + * not need to be registered. [#19247] + */ + if (priKeyField != null && + isRefType(priKeyField.type) && + !priKeyField.isString) { + genRegisterPrimaryKey(mv, false); + } + if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbWriteSecKeyFields", + "(Lcom/sleepycat/persist/impl/EntityOutput;)V"); + } + for (FieldInfo field : secKeyFields) { + genWriteField(mv, field); + } + mv.visitInsn(RETURN); + mv.visitMaxs(2, 2); + mv.visitEnd(); + } + + /** + * public void bdbReadSecKeyFields(EntityInput input, + * int startField, + * int endField, + * int superLevel) { + * input.registerPriKeyObject(priKeyField); // if an object + * // if has super: + * if (superLevel != 0) { + * super.bdbReadSecKeyFields(..., superLevel - 1); + * } + * if (superLevel <= 0) { + * switch (startField) { + * case 0: + * secKeyField1 = input.readInt(); + * if (endField == 0) break; + * case 1: + * secKeyField2 = (String) input.readObject(); + * if (endField == 1) break; + * case 2: + * secKeyField3 = input.readInt(); + * } + * } + * } + */ + private void genBdbReadSecKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbReadSecKeyFields", + "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); + mv.visitCode(); + if (priKeyField != null && + isRefType(priKeyField.type) && + !priKeyField.isString) { + genRegisterPrimaryKey(mv, true); + } else if (priKeyField != null && priKeyField.isString) { + genRegisterPrimaryStringKey(mv); + } + genReadSuperKeyFields(mv, true); + genReadFieldSwitch(mv, secKeyFields); + mv.visitInsn(RETURN); + mv.visitMaxs(5, 5); + mv.visitEnd(); + } + + /** + * output.registerPriKeyObject(priKeyField); + * // or + * input.registerPriKeyObject(priKeyField); + */ + private void genRegisterPrimaryKey(MethodVisitor mv, boolean input) { + String entityInputOrOutputClass = + input ? "com/sleepycat/persist/impl/EntityInput" + : "com/sleepycat/persist/impl/EntityOutput"; + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + mv.visitMethodInsn + (INVOKEINTERFACE, entityInputOrOutputClass, "registerPriKeyObject", + "(Ljava/lang/Object;)V"); + } + + /** + * input.registerPriStringKeyObject(priKeyField); + */ + private void genRegisterPrimaryStringKey(MethodVisitor mv) { + String entityInputOrOutputClass = + "com/sleepycat/persist/impl/EntityInput"; + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + mv.visitMethodInsn + (INVOKEINTERFACE, entityInputOrOutputClass, + "registerPriStringKeyObject", "(Ljava/lang/Object;)V"); + } + + /** + * public void bdbWriteNonKeyFields(EntityOutput output) { + * // like bdbWriteSecKeyFields but does not call registerPriKeyObject + * } + */ + private void genBdbWriteNonKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbWriteNonKeyFields", + "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); + mv.visitCode(); + if (!isCompositeKey) { + if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbWriteNonKeyFields", + "(Lcom/sleepycat/persist/impl/EntityOutput;)V"); + } + for (FieldInfo field : nonKeyFields) { + genWriteField(mv, field); + } + } + mv.visitInsn(RETURN); + mv.visitMaxs(2, 2); + mv.visitEnd(); + } + + /** + * public void bdbReadNonKeyFields(EntityInput input, + * int startField, + * int endField, + * int superLevel) { + * // like bdbReadSecKeyFields but does not call registerPriKeyObject + * } + */ + private void genBdbReadNonKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbReadNonKeyFields", + "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); + mv.visitCode(); + if (!isCompositeKey) { + genReadSuperKeyFields(mv, false); + genReadFieldSwitch(mv, nonKeyFields); + } + mv.visitInsn(RETURN); + mv.visitMaxs(5, 5); + mv.visitEnd(); + } + + /** + * public void bdbWriteCompositeKeyFields(EntityOutput output, + * Format[] formats) { + * output.writeInt(compositeKeyField1); + * output.writeKeyObject(compositeKeyField2, formats[1]); + * // etc + * } + */ + private void genBdbWriteCompositeKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbWriteCompositeKeyFields", + "(Lcom/sleepycat/persist/impl/EntityOutput;" + + "[Lcom/sleepycat/persist/impl/Format;)V", + null, null); + mv.visitCode(); + if (isCompositeKey) { + for (int i = 0; i < nonKeyFields.size(); i += 1) { + FieldInfo field = nonKeyFields.get(i); + if (!genWriteSimpleKeyField(mv, field)) { + /* For a non-simple type, call writeKeyObject. */ + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, field.name, + field.type.getDescriptor()); + mv.visitVarInsn(ALOAD, 2); + if (i <= Byte.MAX_VALUE) { + mv.visitIntInsn(BIPUSH, i); + } else { + mv.visitLdcInsn(new Integer(i)); + } + mv.visitInsn(AALOAD); + mv.visitMethodInsn + (INVOKEINTERFACE, + "com/sleepycat/persist/impl/EntityOutput", + "writeKeyObject", + "(Ljava/lang/Object;" + + "Lcom/sleepycat/persist/impl/Format;)V"); + } + } + } + mv.visitInsn(RETURN); + mv.visitMaxs(3, 3); + mv.visitEnd(); + } + + /** + * public void bdbReadCompositeKeyFields(EntityInput input, + * Format[] formats) { + * compositeKeyField1 = input.readInt(); + * compositeKeyField2 = input.readKeyObject(formats[1]); + * } + */ + private void genBdbReadCompositeKeyFields() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbReadCompositeKeyFields", + "(Lcom/sleepycat/persist/impl/EntityInput;" + + "[Lcom/sleepycat/persist/impl/Format;)V", + null, null); + mv.visitCode(); + if (isCompositeKey) { + for (int i = 0; i < nonKeyFields.size(); i += 1) { + FieldInfo field = nonKeyFields.get(i); + /* Ignore non-simple (illegal) types for composite key. */ + if (!genReadSimpleKeyField(mv, field)) { + /* For a non-simple type, call readKeyObject. */ + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 2); + if (i <= Byte.MAX_VALUE) { + mv.visitIntInsn(BIPUSH, i); + } else { + mv.visitLdcInsn(new Integer(i)); + } + mv.visitInsn(AALOAD); + mv.visitMethodInsn + (INVOKEINTERFACE, + "com/sleepycat/persist/impl/EntityInput", + "readKeyObject", + "(Lcom/sleepycat/persist/impl/Format;)" + + "Ljava/lang/Object;"); + mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type)); + mv.visitFieldInsn + (PUTFIELD, className, field.name, + field.type.getDescriptor()); + } + } + } + mv.visitInsn(RETURN); + mv.visitMaxs(5, 5); + mv.visitEnd(); + } + + /** + * output.writeInt(field); // and other primitives + * // or + * output.writeObject(field, null); + */ + private void genWriteField(MethodVisitor mv, FieldInfo field) { + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, field.name, field.type.getDescriptor()); + int sort = field.type.getSort(); + if (field.isString) { + + /* + * In JE 5.0, we treat String as primitive, and will not store + * format ID for String data. [#19247] + */ + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeString", + "(Ljava/lang/String;)Lcom/sleepycat/bind/tuple/TupleOutput;"); + mv.visitInsn(POP); + } else if (sort == Type.OBJECT || sort == Type.ARRAY) { + mv.visitInsn(ACONST_NULL); + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeObject", + "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); + } else { + genWritePrimitive(mv, sort); + } + } + + /** + * Generates writing of a simple type key field, or returns false if the + * key field is not a simple type (i.e., it is a composite key type). + * + * output.writeInt(theField); // and other primitives + * // or + * output.writeInt(theField.intValue()); // and other simple types + * // or returns false + */ + private boolean genWriteSimpleKeyField(MethodVisitor mv, FieldInfo field) { + if (genWritePrimitiveField(mv, field)) { + return true; + } + String fieldClassName = field.type.getClassName(); + if (!isSimpleRefType(fieldClassName)) { + return false; + } + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, field.name, field.type.getDescriptor()); + Integer sort = PRIMITIVE_WRAPPERS.get(fieldClassName); + if (sort != null) { + genUnwrapPrimitive(mv, sort); + genWritePrimitive(mv, sort); + } else if (fieldClassName.equals(Date.class.getName())) { + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/util/Date", "getTime", "()J"); + genWritePrimitive(mv, Type.LONG); + } else if (fieldClassName.equals(String.class.getName())) { + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeString", + "(Ljava/lang/String;)Lcom/sleepycat/bind/tuple/TupleOutput;"); + mv.visitInsn(POP); + } else if (fieldClassName.equals(BigInteger.class.getName())) { + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeBigInteger", + "(Ljava/math/BigInteger;)Lcom/sleepycat/bind/tuple/TupleOutput;"); + mv.visitInsn(POP); + } else if (fieldClassName.equals(BigDecimal.class.getName())) { + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeSortedBigDecimal", + "(Ljava/math/BigDecimal;)Lcom/sleepycat/bind/tuple/TupleOutput;"); + mv.visitInsn(POP); + } else { + throw DbCompat.unexpectedState(fieldClassName); + } + return true; + } + + private boolean genWritePrimitiveField(MethodVisitor mv, FieldInfo field) { + int sort = field.type.getSort(); + if (sort == Type.OBJECT || sort == Type.ARRAY) { + return false; + } + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, field.name, field.type.getDescriptor()); + genWritePrimitive(mv, sort); + return true; + } + + /** + * // if has super: + * if (superLevel != 0) { + * super.bdbReadXxxKeyFields(..., superLevel - 1); + * } + */ + private void genReadSuperKeyFields(MethodVisitor mv, + boolean areSecKeyFields) { + if (hasPersistentSuperclass) { + Label next = new Label(); + mv.visitVarInsn(ILOAD, 4); + mv.visitJumpInsn(IFEQ, next); + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ILOAD, 2); + mv.visitVarInsn(ILOAD, 3); + mv.visitVarInsn(ILOAD, 4); + mv.visitInsn(ICONST_1); + mv.visitInsn(ISUB); + String name = areSecKeyFields ? "bdbReadSecKeyFields" + : "bdbReadNonKeyFields"; + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, name, + "(Lcom/sleepycat/persist/impl/EntityInput;III)V"); + mv.visitLabel(next); + } + } + + /** + * public void bdbReadXxxKeyFields(EntityInput input, + * int startField, + * int endField, + * int superLevel) { + * // ... + * if (superLevel <= 0) { + * switch (startField) { + * case 0: + * keyField1 = input.readInt(); + * if (endField == 0) break; + * case 1: + * keyField2 = (String) input.readObject(); + * if (endField == 1) break; + * case 2: + * keyField3 = input.readInt(); + * } + * } + */ + private void genReadFieldSwitch(MethodVisitor mv, List fields) { + int nFields = fields.size(); + if (nFields > 0) { + mv.visitVarInsn(ILOAD, 4); + Label pastSwitch = new Label(); + mv.visitJumpInsn(IFGT, pastSwitch); + Label[] labels = new Label[nFields]; + for (int i = 0; i < nFields; i += 1) { + labels[i] = new Label(); + } + mv.visitVarInsn(ILOAD, 2); + mv.visitTableSwitchInsn(0, nFields - 1, pastSwitch, labels); + for (int i = 0; i < nFields; i += 1) { + FieldInfo field = fields.get(i); + mv.visitLabel(labels[i]); + genReadField(mv, field); + if (i < nFields - 1) { + Label nextCase = labels[i + 1]; + mv.visitVarInsn(ILOAD, 3); + if (i == 0) { + mv.visitJumpInsn(IFNE, nextCase); + } else { + switch (i) { + case 1: + mv.visitInsn(ICONST_1); + break; + case 2: + mv.visitInsn(ICONST_2); + break; + case 3: + mv.visitInsn(ICONST_3); + break; + case 4: + mv.visitInsn(ICONST_4); + break; + case 5: + mv.visitInsn(ICONST_5); + break; + default: + mv.visitIntInsn(BIPUSH, i); + } + mv.visitJumpInsn(IF_ICMPNE, nextCase); + } + mv.visitJumpInsn(GOTO, pastSwitch); + } + } + mv.visitLabel(pastSwitch); + } + } + + /** + * field = input.readInt(); // and other primitives + * // or + * field = (FieldClass) input.readObject(); + */ + private void genReadField(MethodVisitor mv, FieldInfo field) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + if (field.isString) { + + /* + * In JE 5.0, we treat String as primitive, and will not store + * format ID for String data. [#19247] + */ + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readStringObject", "()Ljava/lang/Object;"); + mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type)); + } else if (isRefType(field.type)) { + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readObject", "()Ljava/lang/Object;"); + mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type)); + } else { + genReadPrimitive(mv, field.type.getSort()); + } + mv.visitFieldInsn + (PUTFIELD, className, field.name, field.type.getDescriptor()); + } + + /** + * Generates reading of a simple type key field, or returns false if the + * key field is not a simple type (i.e., it is a composite key type). + * + * field = input.readInt(); // and other primitives + * // or + * field = Integer.valueOf(input.readInt()); // and other simple types + * // or returns false + */ + private boolean genReadSimpleKeyField(MethodVisitor mv, FieldInfo field) { + if (genReadPrimitiveField(mv, field)) { + return true; + } + String fieldClassName = field.type.getClassName(); + if (!isSimpleRefType(fieldClassName)) { + return false; + } + Integer sort = PRIMITIVE_WRAPPERS.get(fieldClassName); + if (sort != null) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + genReadPrimitive(mv, sort); + genWrapPrimitive(mv, sort); + } else if (fieldClassName.equals(Date.class.getName())) { + /* Date is a special case because we use NEW instead of valueOf. */ + mv.visitVarInsn(ALOAD, 0); + mv.visitTypeInsn(NEW, "java/util/Date"); + mv.visitInsn(DUP); + mv.visitVarInsn(ALOAD, 1); + genReadPrimitive(mv, Type.LONG); + mv.visitMethodInsn + (INVOKESPECIAL, "java/util/Date", "", "(J)V"); + } else if (fieldClassName.equals(String.class.getName())) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readString", "()Ljava/lang/String;"); + } else if (fieldClassName.equals(BigInteger.class.getName())) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readBigInteger", "()Ljava/math/BigInteger;"); + } else if (fieldClassName.equals(BigDecimal.class.getName())) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readSortedBigDecimal", "()Ljava/math/BigDecimal;"); + } else { + throw DbCompat.unexpectedState(fieldClassName); + } + mv.visitFieldInsn + (PUTFIELD, className, field.name, field.type.getDescriptor()); + return true; + } + + private boolean genReadPrimitiveField(MethodVisitor mv, FieldInfo field) { + int sort = field.type.getSort(); + if (sort == Type.OBJECT || sort == Type.ARRAY) { + return false; + } + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + genReadPrimitive(mv, sort); + mv.visitFieldInsn + (PUTFIELD, className, field.name, field.type.getDescriptor()); + return true; + } + + /** + * public Object bdbGetField(Object o, + * int field, + * int superLevel, + * boolean isSecField) { + * if (superLevel > 0) { + * // if has superclass: + * return super.bdbGetField + * (o, field, superLevel - 1, isSecField); + * } else if (isSecField) { + * switch (field) { + * case 0: + * return Integer.valueOf(f2); + * case 1: + * return f3; + * case 2: + * return f4; + * } + * } else { + * switch (field) { + * case 0: + * return Integer.valueOf(f5); + * case 1: + * return f6; + * case 2: + * return f7; + * } + * } + * return null; + * } + */ + private void genBdbGetField() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbGetField", + "(Ljava/lang/Object;IIZ)Ljava/lang/Object;", null, null); + mv.visitCode(); + mv.visitVarInsn(ILOAD, 3); + Label l0 = new Label(); + mv.visitJumpInsn(IFLE, l0); + Label l1 = new Label(); + if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ILOAD, 2); + mv.visitVarInsn(ILOAD, 3); + mv.visitInsn(ICONST_1); + mv.visitInsn(ISUB); + mv.visitVarInsn(ILOAD, 4); + mv.visitMethodInsn + (INVOKESPECIAL, className, "bdbGetField", + "(Ljava/lang/Object;IIZ)Ljava/lang/Object;"); + mv.visitInsn(ARETURN); + } else { + mv.visitJumpInsn(GOTO, l1); + } + mv.visitLabel(l0); + mv.visitVarInsn(ILOAD, 4); + Label l2 = new Label(); + mv.visitJumpInsn(IFEQ, l2); + genGetFieldSwitch(mv, secKeyFields, l1); + mv.visitLabel(l2); + genGetFieldSwitch(mv, nonKeyFields, l1); + mv.visitLabel(l1); + mv.visitInsn(ACONST_NULL); + mv.visitInsn(ARETURN); + mv.visitMaxs(1, 5); + mv.visitEnd(); + } + + /** + * mv.visitVarInsn(ILOAD, 2); + * Label l0 = new Label(); + * Label l1 = new Label(); + * Label l2 = new Label(); + * mv.visitTableSwitchInsn(0, 2, TheDefLabel, new Label[] { l0, l1, l2 }); + * mv.visitLabel(l0); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitFieldInsn(GETFIELD, TheClassName, "f2", "I"); + * mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "valueOf", + * "(I)Ljava/lang/Integer;"); + * mv.visitInsn(ARETURN); + * mv.visitLabel(l1); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitFieldInsn(GETFIELD, TheClassName, "f3", "Ljava/lang/String;"); + * mv.visitInsn(ARETURN); + * mv.visitLabel(l2); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitFieldInsn(GETFIELD, TheClassName, "f4", "Ljava/lang/String;"); + * mv.visitInsn(ARETURN); + */ + private void genGetFieldSwitch(MethodVisitor mv, + List fields, + Label defaultLabel) { + int nFields = fields.size(); + if (nFields == 0) { + mv.visitJumpInsn(GOTO, defaultLabel); + return; + } + Label[] labels = new Label[nFields]; + for (int i = 0; i < nFields; i += 1) { + labels[i] = new Label(); + } + mv.visitVarInsn(ILOAD, 2); + mv.visitTableSwitchInsn(0, nFields - 1, defaultLabel, labels); + for (int i = 0; i < nFields; i += 1) { + FieldInfo field = fields.get(i); + mv.visitLabel(labels[i]); + mv.visitVarInsn(ALOAD, 0); + mv.visitFieldInsn + (GETFIELD, className, field.name, field.type.getDescriptor()); + if (!isRefType(field.type)) { + genWrapPrimitive(mv, field.type.getSort()); + } + mv.visitInsn(ARETURN); + } + } + + /** + * public void bdbSetField(Object o, + * int field, + * int superLevel, + * boolean isSecField, + * Object value) { + * if (superLevel > 0) { + * // if has superclass: + * super.bdbSetField + * (o, field, superLevel - 1, isSecField, value); + * } else if (isSecField) { + * switch (field) { + * case 0: + * f2 = ((Integer) value).intValue(); + * case 1: + * f3 = (String) value; + * case 2: + * f4 = (String) value; + * } + * } else { + * switch (field) { + * case 0: + * f5 = ((Integer) value).intValue(); + * case 1: + * f6 = (String) value; + * case 2: + * f7 = (String) value; + * } + * } + * } + */ + private void genBdbSetField() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbSetField", + "(Ljava/lang/Object;IIZLjava/lang/Object;)V", null, null); + mv.visitCode(); + mv.visitVarInsn(ILOAD, 3); + Label l0 = new Label(); + mv.visitJumpInsn(IFLE, l0); + if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ILOAD, 2); + mv.visitVarInsn(ILOAD, 3); + mv.visitInsn(ICONST_1); + mv.visitInsn(ISUB); + mv.visitVarInsn(ILOAD, 4); + mv.visitVarInsn(ALOAD, 5); + mv.visitMethodInsn + (INVOKESPECIAL, className, "bdbSetField", + "(Ljava/lang/Object;IIZLjava/lang/Object;)V"); + } + mv.visitInsn(RETURN); + mv.visitLabel(l0); + mv.visitVarInsn(ILOAD, 4); + Label l2 = new Label(); + mv.visitJumpInsn(IFEQ, l2); + Label l1 = new Label(); + genSetFieldSwitch(mv, secKeyFields, l1); + mv.visitLabel(l2); + genSetFieldSwitch(mv, nonKeyFields, l1); + mv.visitLabel(l1); + mv.visitInsn(RETURN); + mv.visitMaxs(2, 6); + mv.visitEnd(); + } + + /** + * public void bdbSetPriField(Object o, Object value) { + * if (priKeyField != null) { + * thisField = (TheFieldClass) value; + * } else if (super != null) { + * // if has superclass: + * super.bdbSetPriField(o, value) + * } + * } + */ + private void genBdbSetPriField() { + MethodVisitor mv = cv.visitMethod + (ACC_PUBLIC, "bdbSetPriField", + "(Ljava/lang/Object;Ljava/lang/Object;)V", null, null); + mv.visitCode(); + if (priKeyField != null) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 2); + if (isRefType(priKeyField.type)) { + mv.visitTypeInsn(CHECKCAST, getTypeInstName(priKeyField.type)); + } else { + int sort = priKeyField.type.getSort(); + mv.visitTypeInsn + (CHECKCAST, + getPrimitiveWrapperClass(sort).replace('.', '/')); + genUnwrapPrimitive(mv, sort); + } + mv.visitFieldInsn + (PUTFIELD, className, priKeyField.name, + priKeyField.type.getDescriptor()); + } else if (hasPersistentSuperclass) { + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 1); + mv.visitVarInsn(ALOAD, 2); + mv.visitMethodInsn + (INVOKESPECIAL, superclassName, "bdbSetPriField", + "(Ljava/lang/Object;Ljava/lang/Object;)V"); + } + mv.visitInsn(RETURN); + mv.visitMaxs(3, 3); + mv.visitEnd(); + } + + /** + * mv.visitVarInsn(ILOAD, 2); + * Label l0 = new Label(); + * Label l1 = new Label(); + * Label l2 = new Label(); + * mv.visitTableSwitchInsn(0, 2, TheDefLabel, new Label[] { l0, l1, l2 }); + * mv.visitLabel(l0); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitVarInsn(ALOAD, 5); + * mv.visitTypeInsn(CHECKCAST, "java/lang/Integer"); + * mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Integer", "intValue", + * "()I"); + * mv.visitFieldInsn(PUTFIELD, TheClassName, "f2", "I"); + * mv.visitLabel(l1); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitVarInsn(ALOAD, 5); + * mv.visitTypeInsn(CHECKCAST, "java/lang/String"); + * mv.visitFieldInsn(PUTFIELD, TheClassName, "f3", "Ljava/lang/String;"); + * mv.visitLabel(l2); + * mv.visitVarInsn(ALOAD, 0); + * mv.visitVarInsn(ALOAD, 5); + * mv.visitTypeInsn(CHECKCAST, "java/lang/String"); + * mv.visitFieldInsn(PUTFIELD, TheClassName, "f4", "Ljava/lang/String;"); + */ + private void genSetFieldSwitch(MethodVisitor mv, + List fields, + Label defaultLabel) { + int nFields = fields.size(); + if (nFields == 0) { + mv.visitJumpInsn(GOTO, defaultLabel); + return; + } + Label[] labels = new Label[nFields]; + for (int i = 0; i < nFields; i += 1) { + labels[i] = new Label(); + } + mv.visitVarInsn(ILOAD, 2); + mv.visitTableSwitchInsn(0, nFields - 1, defaultLabel, labels); + for (int i = 0; i < nFields; i += 1) { + FieldInfo field = fields.get(i); + mv.visitLabel(labels[i]); + mv.visitVarInsn(ALOAD, 0); + mv.visitVarInsn(ALOAD, 5); + if (isRefType(field.type)) { + mv.visitTypeInsn(CHECKCAST, getTypeInstName(field.type)); + } else { + int sort = field.type.getSort(); + mv.visitTypeInsn + (CHECKCAST, + getPrimitiveWrapperClass(sort).replace('.', '/')); + genUnwrapPrimitive(mv, sort); + } + mv.visitFieldInsn + (PUTFIELD, className, field.name, field.type.getDescriptor()); + mv.visitInsn(RETURN); + } + } + + private void genWritePrimitive(MethodVisitor mv, int sort) { + switch (sort) { + case Type.BOOLEAN: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeBoolean", "(Z)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.CHAR: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeChar", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.BYTE: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeByte", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.SHORT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeShort", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.INT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.LONG: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeLong", "(J)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.FLOAT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeSortedFloat", + "(F)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + case Type.DOUBLE: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", + "writeSortedDouble", + "(D)Lcom/sleepycat/bind/tuple/TupleOutput;"); + break; + default: + throw DbCompat.unexpectedState(String.valueOf(sort)); + } + /* The write methods always return 'this' and we always discard it. */ + mv.visitInsn(POP); + } + + private void genReadPrimitive(MethodVisitor mv, int sort) { + switch (sort) { + case Type.BOOLEAN: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readBoolean", "()Z"); + break; + case Type.CHAR: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readChar", "()C"); + break; + case Type.BYTE: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readByte", "()B"); + break; + case Type.SHORT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readShort", "()S"); + break; + case Type.INT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readInt", "()I"); + break; + case Type.LONG: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readLong", "()J"); + break; + case Type.FLOAT: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readSortedFloat", "()F"); + break; + case Type.DOUBLE: + mv.visitMethodInsn + (INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", + "readSortedDouble", "()D"); + break; + default: + throw DbCompat.unexpectedState(String.valueOf(sort)); + } + } + + private void genWrapPrimitive(MethodVisitor mv, int sort) { + switch (sort) { + case Type.BOOLEAN: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Boolean", "valueOf", + "(Z)Ljava/lang/Boolean;"); + break; + case Type.CHAR: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Character", "valueOf", + "(C)Ljava/lang/Character;"); + break; + case Type.BYTE: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Byte", "valueOf", + "(B)Ljava/lang/Byte;"); + break; + case Type.SHORT: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Short", "valueOf", + "(S)Ljava/lang/Short;"); + break; + case Type.INT: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Integer", "valueOf", + "(I)Ljava/lang/Integer;"); + break; + case Type.LONG: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Long", "valueOf", + "(J)Ljava/lang/Long;"); + break; + case Type.FLOAT: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Float", "valueOf", + "(F)Ljava/lang/Float;"); + break; + case Type.DOUBLE: + mv.visitMethodInsn + (INVOKESTATIC, "java/lang/Double", "valueOf", + "(D)Ljava/lang/Double;"); + break; + default: + throw DbCompat.unexpectedState(String.valueOf(sort)); + } + } + + private void genUnwrapPrimitive(MethodVisitor mv, int sort) { + switch (sort) { + case Type.BOOLEAN: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Boolean", "booleanValue", "()Z"); + break; + case Type.CHAR: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Character", "charValue", "()C"); + break; + case Type.BYTE: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Byte", "byteValue", "()B"); + break; + case Type.SHORT: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Short", "shortValue", "()S"); + break; + case Type.INT: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Integer", "intValue", "()I"); + break; + case Type.LONG: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Long", "longValue", "()J"); + break; + case Type.FLOAT: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Float", "floatValue", "()F"); + break; + case Type.DOUBLE: + mv.visitMethodInsn + (INVOKEVIRTUAL, "java/lang/Double", "doubleValue", "()D"); + break; + default: + throw DbCompat.unexpectedState(String.valueOf(sort)); + } + } + + /** + * Returns the type name for a visitTypeInsn operand, which is the internal + * name for an object type and the descriptor for an array type. Must not + * be called for a non-reference type. + */ + private static String getTypeInstName(Type type) { + if (type.getSort() == Type.OBJECT) { + return type.getInternalName(); + } else if (type.getSort() == Type.ARRAY) { + return type.getDescriptor(); + } else { + throw DbCompat.unexpectedState(); + } + } + + /** + * Call this method before comparing a non-reference operand to zero as an + * int, for example, with IFNE, IFEQ, IFLT, etc. If the operand is a long, + * float or double, this method will compare it to zero and leave the + * result as an int operand. + */ + private static void genBeforeCompareToZero(MethodVisitor mv, Type type) { + switch (type.getSort()) { + case Type.LONG: + mv.visitInsn(LCONST_0); + mv.visitInsn(LCMP); + break; + case Type.FLOAT: + mv.visitInsn(FCONST_0); + mv.visitInsn(FCMPL); + break; + case Type.DOUBLE: + mv.visitInsn(DCONST_0); + mv.visitInsn(DCMPL); + break; + } + } + + /** + * Returns true if the given class is a primitive wrapper, Date or String. + */ + static boolean isSimpleRefType(String className) { + return (PRIMITIVE_WRAPPERS.containsKey(className) || + className.equals(BigInteger.class.getName()) || + className.equals(BigDecimal.class.getName()) || + className.equals(Date.class.getName()) || + className.equals(String.class.getName())); + } + + /** + * Returns the wrapper class for a primitive. + */ + static String getPrimitiveWrapperClass(int primitiveSort) { + for (Map.Entry entry : + PRIMITIVE_WRAPPERS.entrySet()) { + if (entry.getValue() == primitiveSort) { + return entry.getKey(); + } + } + throw DbCompat.unexpectedState(String.valueOf(primitiveSort)); + } + + /** + * Returns true if the given type is an object or array. + */ + private static boolean isRefType(Type type) { + int sort = type.getSort(); + return (sort == Type.OBJECT || sort == Type.ARRAY); + } + + /** + * Returns whether a string array contains a given string. + */ + private static boolean containsString(String[] a, String s) { + if (a != null) { + for (String t : a) { + if (s.equals(t)) { + return true; + } + } + } + return false; + } + + /** + * Appends a string to a string array. + */ + private static String[] appendString(String[] a, String s) { + if (a != null) { + int len = a.length; + String[] a2 = new String[len + 1]; + System.arraycopy(a, 0, a2, 0, len); + a2[len] = s; + return a2; + } else { + return new String[] { s }; + } + } + + /** + * Aborts the enhancement process when we determine that enhancement is + * unnecessary or not possible. + */ + private NotPersistentException abort() { + return NOT_PERSISTENT; + } + + private static class FieldInfo extends FieldVisitor { + + FieldVisitor parent; + String name; + Type type; + OrderInfo order; + boolean isPriKey; + boolean isSecKey; + boolean isTransient; + boolean isString = false; + + FieldInfo(FieldVisitor parent, + String name, + String desc, + boolean isTransient) { + super(ASM4); + this.parent = parent; + this.name = name; + this.isTransient = isTransient; + type = Type.getType(desc); + if (type.getClassName().equals(String.class.getName())) { + isString = true; + } + } + + public AnnotationVisitor visitAnnotation(String desc, + boolean visible) { + AnnotationVisitor ret = parent.visitAnnotation(desc, visible); + if (desc.equals + ("Lcom/sleepycat/persist/model/KeyField;")) { + order = new OrderInfo(ret); + ret = order; + } else if (desc.equals + ("Lcom/sleepycat/persist/model/PrimaryKey;")) { + isPriKey = true; + } else if (desc.equals + ("Lcom/sleepycat/persist/model/SecondaryKey;")) { + isSecKey = true; + } else if (desc.equals + ("Lcom/sleepycat/persist/model/NotPersistent;")) { + isTransient = true; + } else if (desc.equals + ("Lcom/sleepycat/persist/model/NotTransient;")) { + isTransient = false; + } + return ret; + } + + public void visitAttribute(Attribute attr) { + parent.visitAttribute(attr); + } + + public void visitEnd() { + parent.visitEnd(); + } + + @Override + public String toString() { + String label; + if (isPriKey) { + label = "PrimaryKey"; + } else if (isSecKey) { + label = "SecondaryKey"; + } else if (order != null) { + label = "CompositeKeyField " + order.value; + } else { + label = "NonKeyField"; + } + return "[" + label + ' ' + name + ' ' + type + ']'; + } + } + + private static class OrderInfo extends AnnotationInfo { + + int value; + + OrderInfo(AnnotationVisitor parent) { + super(parent); + } + + @Override + public void visit(String name, Object value) { + if (name.equals("value")) { + this.value = (Integer) value; + } + parent.visit(name, value); + } + } + + private static abstract class AnnotationInfo extends AnnotationVisitor { + + AnnotationVisitor parent; + + AnnotationInfo(AnnotationVisitor parent) { + super(ASM4); + this.parent = parent; + } + + public void visit(String name, Object value) { + parent.visit(name, value); + } + + public AnnotationVisitor visitAnnotation(String name, String desc) { + return parent.visitAnnotation(name, desc); + } + + public AnnotationVisitor visitArray(String name) { + return parent.visitArray(name); + } + + public void visitEnum(String name, String desc, String value) { + parent.visitEnum(name, desc, value); + } + + public void visitEnd() { + parent.visitEnd(); + } + } +} diff --git a/src/com/sleepycat/persist/model/ClassEnhancer.java b/src/com/sleepycat/persist/model/ClassEnhancer.java new file mode 100644 index 0000000..888844b --- /dev/null +++ b/src/com/sleepycat/persist/model/ClassEnhancer.java @@ -0,0 +1,329 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.lang.instrument.ClassFileTransformer; +import java.lang.instrument.Instrumentation; +import java.security.ProtectionDomain; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.StringTokenizer; + +import com.sleepycat.asm.ClassReader; +import com.sleepycat.asm.ClassVisitor; +import com.sleepycat.asm.ClassWriter; + +/** + * Enhances the bytecode of persistent classes to provide efficient access to + * fields and constructors, and to avoid special security policy settings for + * accessing non-public members. Classes are enhanced if they are annotated + * with {@link Entity} or {@link Persistent}. + * + *

        {@code ClassEnhancer} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code ClassEnhancer} object.

        + * + *

        As described in the package summary, bytecode + * enhancement may be used either at runtime or offline (at build time).

        + * + *

        To use enhancement offline, this class may be used as a {@link #main main + * program}. + * + * It may also be used via an {@link ClassEnhancerTask ant task}. + * + *

        + * + *

        For enhancement at runtime, this class provides the low level support + * needed to transform class bytes during class loading. To configure runtime + * enhancement you may use one of the following approaches:

        + *
          + *
        1. The BDB {@code je-.jar} or {@code db.jar} file may be used as + * an instrumentation agent as follows: + *
          {@literal java -javaagent:=enhance:packageNames ...}
          + * {@code packageNames} is a comma separated list of packages containing + * persistent classes. Sub-packages of these packages are also searched. If + * {@code packageNames} is omitted then all packages known to the current + * classloader are searched. + *

          The "-v" option may be included in the comma separated list to print the + * name of each class that is enhanced.

        2. + *
        3. The {@link #enhance} method may be called to implement a class loader + * that performs enhancement. Using this approach, it is the developer's + * responsibility to implement and configure the class loader.
        4. + *
        + * + * @author Mark Hayes + */ +public class ClassEnhancer implements ClassFileTransformer { + + private static final String AGENT_PREFIX = "enhance:"; + + private Set packagePrefixes; + private boolean verbose; + + /** + * Enhances classes in the directories specified. The class files are + * replaced when they are enhanced, without changing the file modification + * date. For example: + * + *
        java -cp je-<version>.jar com.sleepycat.persist.model.ClassEnhancer ./classes
        + * + *

        The "-v" argument may be specified to print the name of each class + * file that is enhanced. The total number of class files enhanced will + * always be printed.

        + * + * @param args one or more directories containing classes to be enhanced. + * Subdirectories of these directories will also be searched. Optionally, + * -v may be included to print the name of every class file enhanced. + * + * @throws Exception if a problem occurs. + */ + public static void main(String[] args) throws Exception { + try { + boolean verbose = false; + List fileList = new ArrayList(); + for (int i = 0; i < args.length; i += 1) { + String arg = args[i]; + if (arg.startsWith("-")) { + if ("-v".equals(args[i])) { + verbose = true; + } else { + throw new IllegalArgumentException + ("Unknown arg: " + arg); + } + } else { + fileList.add(new File(arg)); + } + } + ClassEnhancer enhancer = new ClassEnhancer(); + enhancer.setVerbose(verbose); + int nFiles = 0; + for (File file : fileList) { + nFiles += enhancer.enhanceFile(file); + } + if (nFiles > 0) { + System.out.println("Enhanced: " + nFiles + " files"); + } + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + /** + * Enhances classes as specified by a JVM -javaagent argument. + * + * @param args see java.lang.instrument.Instrumentation. + * + * @param inst see java.lang.instrument.Instrumentation. + * + * @see java.lang.instrument.Instrumentation + */ + public static void premain(String args, Instrumentation inst) { + if (!args.startsWith(AGENT_PREFIX)) { + throw new IllegalArgumentException + ("Unknown javaagent args: " + args + + " Args must start with: \"" + AGENT_PREFIX + '"'); + } + args = args.substring(AGENT_PREFIX.length()); + Set packageNames = null; + boolean verbose = false; + if (args.length() > 0) { + packageNames = new HashSet(); + StringTokenizer tokens = new StringTokenizer(args, ","); + while (tokens.hasMoreTokens()) { + String token = tokens.nextToken(); + if (token.startsWith("-")) { + if (token.equals("-v")) { + verbose = true; + } else { + throw new IllegalArgumentException + ("Unknown javaagent arg: " + token); + } + } else { + packageNames.add(token); + } + } + } + ClassEnhancer enhancer = new ClassEnhancer(packageNames); + enhancer.setVerbose(verbose); + inst.addTransformer(enhancer); + } + + /** + * Creates a class enhancer that searches all packages. + */ + public ClassEnhancer() { + } + + /** + * Sets verbose mode. + * + *

        True may be specified to print the name of each class file that is + * enhanced. This property is false by default.

        + * + * @param verbose whether to use verbose mode. + */ + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + /** + * Gets verbose mode. + * + * @return whether to use verbose mode. + * + * @see #setVerbose + */ + public boolean getVerbose() { + return verbose; + } + + /** + * Creates a class enhancer that searches a given set of packages. + * + * @param packageNames a set of packages to search for persistent + * classes. Sub-packages of these packages are also searched. If empty or + * null, all packages known to the current classloader are searched. + */ + public ClassEnhancer(Set packageNames) { + if (packageNames != null) { + packagePrefixes = new HashSet(); + for (String name : packageNames) { + packagePrefixes.add(name + '.'); + } + } + } + + public byte[] transform(ClassLoader loader, + String className, + Class classBeingRedefined, + ProtectionDomain protectionDomain, + byte[] classfileBuffer) { + className = className.replace('/', '.'); + byte[] bytes = enhance(className, classfileBuffer); + if (verbose && bytes != null) { + System.out.println("Enhanced: " + className); + } + return bytes; + } + + /** + * Enhances the given class bytes if the class is annotated with {@link + * Entity} or {@link Persistent}. + * + * @param className the class name in binary format; for example, + * "my.package.MyClass$Name", or null if no filtering by class name + * should be performed. + * + * @param classBytes are the class file bytes to be enhanced. + * + * @return the enhanced bytes, or null if no enhancement was performed. + */ + public byte[] enhance(String className, byte[] classBytes) { + if (className != null && packagePrefixes != null) { + for (String prefix : packagePrefixes) { + if (className.startsWith(prefix)) { + return enhanceBytes(classBytes); + } + } + return null; + } else { + return enhanceBytes(classBytes); + } + } + + int enhanceFile(File file) + throws IOException { + + int nFiles = 0; + if (file.isDirectory()) { + String[] names = file.list(); + if (names != null) { + for (int i = 0; i < names.length; i += 1) { + nFiles += enhanceFile(new File(file, names[i])); + } + } + } else if (file.getName().endsWith(".class")) { + byte[] newBytes = enhanceBytes(readFile(file)); + if (newBytes != null) { + long modified = file.lastModified(); + writeFile(file, newBytes); + file.setLastModified(modified); + nFiles += 1; + if (verbose) { + System.out.println("Enhanced: " + file); + } + } + } + return nFiles; + } + + private byte[] readFile(File file) + throws IOException { + + byte[] bytes = new byte[(int) file.length()]; + FileInputStream in = new FileInputStream(file); + try { + in.read(bytes); + } finally { + in.close(); + } + return bytes; + } + + private void writeFile(File file, byte[] bytes) + throws IOException { + + FileOutputStream out = new FileOutputStream(file); + try { + out.write(bytes); + } finally { + out.close(); + } + } + + private byte[] enhanceBytes(byte[] bytes) { + + /* + * The writer is at the end of the visitor chain. Pass COMPUTE_FRAMES + * to calculate stack size, for safety. + */ + ClassWriter writer = new ClassWriter(ClassWriter.COMPUTE_FRAMES); + ClassVisitor visitor = writer; + + /* The enhancer is at the beginning of the visitor chain. */ + visitor = new BytecodeEnhancer(visitor); + + /* The reader processes the class and invokes the visitors. */ + ClassReader reader = new ClassReader(bytes); + try { + + /* + * Pass false for skipDebug since we are rewriting the class and + * should include all information. + */ + reader.accept(visitor, 0); + return writer.toByteArray(); + } catch (BytecodeEnhancer.NotPersistentException e) { + /* The class is not persistent and should not be enhanced. */ + return null; + } + } +} diff --git a/src/com/sleepycat/persist/model/ClassEnhancerTask.java b/src/com/sleepycat/persist/model/ClassEnhancerTask.java new file mode 100644 index 0000000..03e5b73 --- /dev/null +++ b/src/com/sleepycat/persist/model/ClassEnhancerTask.java @@ -0,0 +1,114 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.tools.ant.BuildException; +import org.apache.tools.ant.DirectoryScanner; +import org.apache.tools.ant.Task; +import org.apache.tools.ant.types.FileSet; + +/** + * An {@code ant} task for running the {@link ClassEnhancer}. + * + *

        {@code ClassEnhancerTask} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code ClassEnhancerTask} object.

        + * + *

        Note that in the BDB Java Edition product, the {@code ClassEnhancerTask} + * class is included in {@code je-.jar}. However, in the BDB + * (C-based) product, it is not included in {@code db.jar} because the build is + * not dependent on the Ant libraries. Therefore, in the BDB product, the + * application must compile the {@code + * java/src/com/sleepycat/persist/model/ClassEnhancerTask.java} source file and + * ensure that the compiled class is available to the Ant task. For example + * the following Ant task definitions could be used.

        + * + *

        For BDB Java Edition product:

        + *
        + * {@literal }
        + * + *

        For BDB (C-based Edition) product:

        + *
        + * {@literal }
        + * + *

        The class enhancer task element has no attributes. It may contain one or + * more nested {@code fileset} elements specifying the classes to be enhanced. + * The class files are replaced when they are enhanced, without changing the + * file modification date. For example:

        + * + *
        + * {@literal }
        + *     {@literal }
        + *         {@literal }
        + *     {@literal }
        + * {@literal }
        + * + *

        The verbose attribute may be specified as "true", "yes" or "on" (like + * other Ant boolean attributes) to print the name of each class file that is + * enhanced. The total number of class files enhanced will always be + * printed.

        + * + * @author Mark Hayes + */ +public class ClassEnhancerTask extends Task { + + private List fileSets = new ArrayList(); + private boolean verbose; + + public void execute() throws BuildException { + if (fileSets.size() == 0) { + throw new BuildException("At least one fileset must be specified"); + } + try { + int nFiles = 0; + ClassEnhancer enhancer = new ClassEnhancer(); + enhancer.setVerbose(verbose); + for (FileSet fileSet : fileSets) { + DirectoryScanner scanner = + fileSet.getDirectoryScanner(getProject()); + String[] fileNames = scanner.getIncludedFiles(); + for (String fileName : fileNames) { + File file = new File(scanner.getBasedir(), fileName); + try { + nFiles += enhancer.enhanceFile(file); + } catch (IOException e) { + throw new BuildException(e); + } + } + } + if (nFiles > 0) { + System.out.println("Enhanced: " + nFiles + " files"); + } + } catch (RuntimeException e) { + e.printStackTrace(); + throw e; + } + } + + public void addConfiguredFileset(FileSet files) { + fileSets.add(files); + } + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } +} diff --git a/src/com/sleepycat/persist/model/ClassMetadata.java b/src/com/sleepycat/persist/model/ClassMetadata.java new file mode 100644 index 0000000..303a6a0 --- /dev/null +++ b/src/com/sleepycat/persist/model/ClassMetadata.java @@ -0,0 +1,239 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.io.Serializable; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * The metadata for a persistent class. A persistent class may be specified + * with the {@link Entity} or {@link Persistent} annotation. + * + *

        {@code ClassMetadata} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code ClassMetadata} object.

        + * + *

        This and other metadata classes are classes rather than interfaces to + * allow adding properties to the model at a future date without causing + * incompatibilities. Any such property will be given a default value and + * its use will be optional.

        + * + * @author Mark Hayes + */ +public class ClassMetadata implements Serializable { + + private static final long serialVersionUID = -2520207423701776679L; + + private String className; + private int version; + private String proxiedClassName; + private boolean entityClass; + private PrimaryKeyMetadata primaryKey; + private Map secondaryKeys; + private List compositeKeyFields; + private Collection persistentFields; + + /** + * Used by an {@code EntityModel} to construct persistent class metadata. + * The optional {@link #getPersistentFields} property will be set to null. + * + * @param className the class name. + * @param version the version. + * @param proxiedClassName the proxied class name. + * @param entityClass whether the class is an entity class. + * @param primaryKey the primary key metadata. + * @param secondaryKeys the secondary key metadata. + * @param compositeKeyFields the composite key field metadata. + */ + public ClassMetadata(String className, + int version, + String proxiedClassName, + boolean entityClass, + PrimaryKeyMetadata primaryKey, + Map secondaryKeys, + List compositeKeyFields) { + + this(className, version, proxiedClassName, entityClass, primaryKey, + secondaryKeys, compositeKeyFields, null /*persistentFields*/); + } + + /** + * Used by an {@code EntityModel} to construct persistent class metadata. + * + * @param className the class name. + * @param version the version. + * @param proxiedClassName the proxied class name. + * @param entityClass whether the class is an entity class. + * @param primaryKey the primary key metadata. + * @param secondaryKeys the secondary key metadata. + * @param compositeKeyFields the composite key field metadata. + * @param persistentFields the persistent field metadata. + */ + public ClassMetadata(String className, + int version, + String proxiedClassName, + boolean entityClass, + PrimaryKeyMetadata primaryKey, + Map secondaryKeys, + List compositeKeyFields, + Collection persistentFields) { + this.className = className; + this.version = version; + this.proxiedClassName = proxiedClassName; + this.entityClass = entityClass; + this.primaryKey = primaryKey; + this.secondaryKeys = secondaryKeys; + this.compositeKeyFields = compositeKeyFields; + this.persistentFields = persistentFields; + } + + /** + * Returns the name of the persistent class. + * + * @return the name of the persistent class. + */ + public String getClassName() { + return className; + } + + /** + * Returns the version of this persistent class. This may be specified + * using the {@link Entity#version} or {@link Persistent#version} + * annotation. + * + * @return the version of this persistent class. + */ + public int getVersion() { + return version; + } + + /** + * Returns the class name of the proxied class if this class is a {@link + * PersistentProxy}, or null otherwise. + * + * @return the class name of the proxied class, or null. + */ + public String getProxiedClassName() { + return proxiedClassName; + } + + /** + * Returns whether this class is an entity class. + * + * @return whether this class is an entity class. + */ + public boolean isEntityClass() { + return entityClass; + } + + /** + * Returns the primary key metadata for a key declared in this class, or + * null if none is declared. This may be specified using the {@link + * PrimaryKey} annotation. + * + * @return the primary key metadata, or null. + */ + public PrimaryKeyMetadata getPrimaryKey() { + return primaryKey; + } + + /** + * Returns an unmodifiable map of key name (which may be different from + * field name) to secondary key metadata for all secondary keys declared in + * this class, or null if no secondary keys are declared in this class. + * This metadata may be specified using {@link SecondaryKey} annotations. + * + * @return the unmodifiable map, or null. + */ + public Map getSecondaryKeys() { + return secondaryKeys; + } + + /** + * Returns an unmodifiable list of metadata for the fields making up a + * composite key, or null if this is a not a composite key class. The + * order of the fields in the returned list determines their stored order + * and may be specified using the {@link KeyField} annotation. When the + * composite key class does not implement {@link Comparable}, the order of + * the fields is the relative sort order. + * + * @return the unmodifiable list, or null. + */ + public List getCompositeKeyFields() { + return compositeKeyFields; + } + + /** + * Returns an unmodifiable list of metadata for the persistent fields in + * this class, or null if the default rules for persistent fields should be + * used. All fields returned must be declared in this class and must be + * non-static. + * + *

        By default (if null is returned) the persistent fields of a class + * will be all declared instance fields that are non-transient (are not + * declared with the transient keyword). The default rules + * may be overridden by an {@link EntityModel}. For example, the {@link + * AnnotationModel} overrides the default rules when the {@link + * NotPersistent} or {@link NotTransient} annotation is specified.

        + * + * @return the unmodifiable list, or null. + */ + public Collection getPersistentFields() { + return persistentFields; + } + + @Override + public boolean equals(Object other) { + if (other instanceof ClassMetadata) { + ClassMetadata o = (ClassMetadata) other; + return version == o.version && + entityClass == o.entityClass && + nullOrEqual(className, o.className) && + nullOrEqual(proxiedClassName, o.proxiedClassName) && + nullOrEqual(primaryKey, o.primaryKey) && + nullOrEqual(secondaryKeys, o.secondaryKeys) && + nullOrEqual(compositeKeyFields, o.compositeKeyFields); + } else { + return false; + } + } + + @Override + public int hashCode() { + return version + + (entityClass ? 1 : 0) + + hashCode(className) + + hashCode(proxiedClassName) + + hashCode(primaryKey) + + hashCode(secondaryKeys) + + hashCode(compositeKeyFields); + } + + static boolean nullOrEqual(Object o1, Object o2) { + if (o1 == null) { + return o2 == null; + } else { + return o1.equals(o2); + } + } + + static int hashCode(Object o) { + if (o != null) { + return o.hashCode(); + } else { + return 0; + } + } +} diff --git a/src/com/sleepycat/persist/model/DeleteAction.java b/src/com/sleepycat/persist/model/DeleteAction.java new file mode 100644 index 0000000..91454fb --- /dev/null +++ b/src/com/sleepycat/persist/model/DeleteAction.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + + +/** + * Specifies the action to take when a related entity is deleted having a + * primary key value that exists as a secondary key value for this entity. + * This can be specified using a {@link SecondaryKey#onRelatedEntityDelete} + * annotation. + * + * @author Mark Hayes + */ +public enum DeleteAction { + + /** + * The default action, {@code ABORT}, means that an exception is thrown in + * order to abort the current transaction. + * + * On BDB JE, a {@link com.sleepycat.je.DeleteConstraintException} is + * thrown. + * + */ + ABORT, + + /** + * If {@code CASCADE} is specified, then this entity will be deleted also, + * which could in turn trigger further deletions, causing a cascading + * effect. + */ + CASCADE, + + /** + * If {@code NULLIFY} is specified, then the secondary key in this entity + * is set to null and this entity is updated. For a secondary key field + * that has an array or collection type, the array or collection element + * will be removed by this action. The secondary key field must have a + * reference (not a primitive) type in order to specify this action. + */ + NULLIFY; +} diff --git a/src/com/sleepycat/persist/model/Entity.java b/src/com/sleepycat/persist/model/Entity.java new file mode 100644 index 0000000..0ee91ea --- /dev/null +++ b/src/com/sleepycat/persist/model/Entity.java @@ -0,0 +1,262 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.TYPE; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; + +/** + * Indicates a persistent entity class. For each entity class, a {@link + * PrimaryIndex} can be used to store and access instances of that class. + * Optionally, one or more {@link SecondaryIndex} objects may be used to access + * entity instances by secondary key. + * + *

        Entity Subclasses and Superclasses

        + * + *

        An entity class may have any number of subclasses and superclasses; + * however, none of these may themselves be entity classes (annotated with + * {@code Entity}).

        + * + *

        Entity superclasses (which must be annotated with {@code Persistent}, not + * {@code Entity}) are used to share common definitions among entity classes. + * Fields in an entity superclass may be defined as primary or secondary keys. + * For example, the following {@code BaseClass} defines the primary key for any + * number of entity classes, using a single sequence to assign primary key + * values that will be unique across all entity classes that use it. The + * entity class {@code Pet} extends the base class, implicitly defining a + * primary index

        + * + *
        + *  {@literal @Persistent}
        + *  class BaseClass {
        + *      {@literal @PrimaryKey(sequence="ID")}
        + *      long id;
        + *  }
        + *
        + *  {@literal @Entity}
        + *  class Pet extends BaseClass {
        + *      {@literal @SecondaryKey(relate=ONE_TO_ONE)}
        + *      String name;
        + *      float height;
        + *      float weight;
        + *  }
        + * + *

        Entity subclasses (which must be annotated with {@code Persistent}, not + * {@code Entity}) are used to provide polymorphism within a single {@code + * PrimaryIndex}. Instances of the entity class and its subclasses are stored + * in the same {@code PrimaryIndex}. For example, the entity class {@code Pet} + * defines a primary index that will contain instances of it and its + * subclasses, including {@code Cat} which is defined below.

        + * + *

        Fields in an entity subclass may be defined as secondary keys, and such + * secondary keys can only be used to query instances of the subclass. For + * example, although the primary key ({@code id}) and secondary key ({@code + * name}) can be used to retrieve any {@code Pet} instance, the entity subclass + * {@code Cat} defines a secondary key ({@code finickyness}) that only applies + * to {@code Cat} instances. Querying by this key will never retrieve a {@code + * Dog} instance, if such a subclass existed, because a {@code Dog} instance + * will never contain a {@code finickyness} key.

        + * + *
        + *  {@literal @Persistent}
        + *  class Cat extends Pet {
        + *      {@literal @SecondaryKey(relate=MANY_TO_ONE)}
        + *      int finickyness;
        + *  }
        + * + *

        WARNING: Entity subclasses that define secondary keys must be + * registered prior to storing an instance of the class. This can be done in + * two ways:

        + *
          + *
        1. The {@link EntityModel#registerClass registerClass} method may be called + * to register the subclass before opening the entity store.
        2. + *
        3. The {@link EntityStore#getSubclassIndex getSubclassIndex} method may be + * called to implicitly register the subclass after opening the entity + * store.
        4. + *
        + * + *

        Persistent Fields and Types

        + * + *

        All non-transient instance fields of an entity class, as well as its + * superclasses and subclasses, are persistent. {@code static} and {@code + * transient} fields are not persistent. The persistent fields of a class may + * be {@code private}, package-private (default access), {@code protected} or + * {@code public}.

        + * + *

        It is worthwhile to note the reasons that object persistence is defined + * in terms of fields rather than properties (getters and setters). This + * allows business methods (getters and setters) to be defined independently of + * the persistent state of an object; for example, a setter method may perform + * validation that could not be performed if it were called during object + * deserialization. Similarly, this allows public methods to evolve somewhat + * independently of the (typically non-public) persistent fields.

        + * + *

        Simple Types

        + * + *

        Persistent types are divided into simple types, enum types, complex + * types, and array types. Simple types and enum types are single valued, + * while array types may contain multiple elements and complex types may + * contain one or more named fields.

        + * + *

        Simple types include:

        + *
          + *
        • Java primitive types: {@code boolean, char, byte, short, int, long, + * float, double}
        • + *
        • The wrapper classes for Java primitive types
        • + *
        • {@link java.math.BigDecimal}
        • + *
        • {@link java.math.BigInteger}
        • + *
        • {@link java.lang.String}
        • + *
        • {@link java.util.Date}
        • + *
        + * + *

        When null values are required (for optional key fields, for example), + * primitive wrapper classes must be used instead of primitive types.

        + * + *

        Simple types, enum types and array types do not require annotations to + * make them persistent.

        + * + *

        Complex and Proxy Types

        + * + *

        Complex persistent classes must be annotated with {@link Entity} or + * {@link Persistent}, or must be proxied by a persistent proxy class + * (described below). This includes entity classes, subclasses and + * superclasses, and all other complex classes referenced via fields of these + * classes.

        + * + *

        All complex persistent classes must have a default constructor. The + * default constructor may be {@code private}, package-private (default + * access), {@code protected}, or {@code public}. Other constructors are + * allowed but are not used by the persistence mechanism.

        + * + *

        It is sometimes desirable to store instances of a type that is externally + * defined and cannot be annotated or does not have a default constructor; for + * example, a class defined in the Java standard libraries or a 3rd party + * library. In this case, a {@link PersistentProxy} class may be used to + * represent the stored values for the externally defined type. The proxy + * class itself must be annotated with {@link Persistent} like other persistent + * classes, and the {@link Persistent#proxyFor} property must be specified.

        + * + *

        For convenience, built-in proxy classes are included for several common + * classes (listed below) in the Java library. If you wish, you may define + * your own {@link PersistentProxy} to override these built-in proxies.

        + *
          + *
        • {@link java.util.HashSet}
        • + *
        • {@link java.util.TreeSet}
        • + *
        • {@link java.util.HashMap}
        • + *
        • {@link java.util.TreeMap}
        • + *
        • {@link java.util.ArrayList}
        • + *
        • {@link java.util.LinkedList}
        • + *
        + * + *

        Complex persistent types should in general be application-defined + * classes. This gives the application control over the persistent state and + * its evolution over time.

        + * + *

        Other Type Restrictions

        + * + *

        Entity classes and subclasses may not be used in field declarations for + * persistent types. Fields of entity classes and subclasses must be simple + * types or non-entity persistent types (annotated with {@link Persistent} not + * with {@link Entity}).

        + * + *

        Entity classes, subclasses and superclasses may be {@code abstract} and + * may implement arbitrary interfaces. Interfaces do not need to be annotated + * with {@link Persistent} in order to be used in a persistent class, since + * interfaces do not contain instance fields.

        + * + *

        Persistent instances of static nested classes are allowed, but the nested + * class must be annotated with {@link Persistent} or {@link Entity}. Inner + * classes (non-static nested classes, including anonymous classes) are not + * currently allowed as persistent types.

        + * + *

        Arrays of simple and persistent complex types are allowed as fields of + * persistent types. Arrays may be multidimensional. However, an array may + * not be stored as a top level instance in a primary index. Only instances of + * entity classes and subclasses may be top level instances in a primary + * index.

        + * + *

        Embedded Objects

        + * + *

        As stated above, the embedded (or member) non-transient non-static fields + * of an entity class are themselves persistent and are stored along with their + * parent entity object. This allows embedded objects to be stored in an + * entity to an arbitrary depth.

        + * + *

        There is no arbitrary limit to the nesting depth of embedded objects + * within an entity; however, there is a practical limit. When an entity is + * marshalled, each level of nesting is implemented internally via recursive + * method calls. If the nesting depth is large enough, a {@code + * StackOverflowError} can occur. In practice, this has been observed with a + * nesting depth of 12,000, using the default Java stack size.

        + * + *

        This restriction on the nesting depth of embedded objects does not apply + * to cyclic references, since these are handled specially as described + * below.

        + * + *

        Object Graphs

        + * + *

        When an entity instance is stored, the graph of objects referenced via + * its fields is stored and retrieved as a graph. In other words, if a single + * instance is referenced by two or more fields when the entity is stored, the + * same will be true when the entity is retrieved.

        + * + *

        When a reference to a particular object is stored as a member field + * inside that object or one of its embedded objects, this is called a cyclic + * reference. Because multiple references to a single object are stored as + * such, cycles are also represented correctly and do not cause infinite + * recursion or infinite processing loops. If an entity containing a cyclic + * reference is stored, the cyclic reference will be present when the entity is + * retrieved.

        + * + *

        Note that the stored object graph is restricted in scope to a single + * entity instance. This is because each entity instance is stored separately. + * If two entities have a reference to the same object when stored, they will + * refer to two separate instances when the entities are retrieved.

        + * + * @see Persistent + * @see PrimaryKey + * @see SecondaryKey + * @see KeyField + * + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(TYPE) +public @interface Entity { + + /** + * Identifies a new version of a class when an incompatible class change + * has been made. Prior versions of a class are referred to by version + * number to perform class evolution and conversion using {@link + * Mutations}. + * + *

        The first version of a class is version zero, if {@link #version} is + * not specified. When an incompatible class change is made, a version + * number must be assigned using {@link #version} that is higher than the + * previous version number for the class. If this is not done, an {@link + * IncompatibleClassException} will be thrown when the store is opened.

        + * + * @return the version. + */ + int version() default 0; +} diff --git a/src/com/sleepycat/persist/model/EntityMetadata.java b/src/com/sleepycat/persist/model/EntityMetadata.java new file mode 100644 index 0000000..1a8c7ab --- /dev/null +++ b/src/com/sleepycat/persist/model/EntityMetadata.java @@ -0,0 +1,102 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.io.Serializable; +import java.util.Map; + +/** + * The metadata for a persistent entity class. An entity class may be + * specified with the {@link Entity} annotation. + * + *

        {@code EntityMetadata} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code EntityMetadata} object.

        + * + * @author Mark Hayes + */ +public class EntityMetadata implements Serializable { + + private static final long serialVersionUID = 4224509631681963159L; + + private String className; + private PrimaryKeyMetadata primaryKey; + private Map secondaryKeys; + + /** + * Used by an {@code EntityModel} to construct entity metadata. + * + * @param className the class name. + * @param primaryKey the primary key metadata. + * @param secondaryKeys the secondary key metadata. + */ + public EntityMetadata(String className, + PrimaryKeyMetadata primaryKey, + Map secondaryKeys) { + this.className = className; + this.primaryKey = primaryKey; + this.secondaryKeys = secondaryKeys; + } + + /** + * Returns the name of the entity class. + * + * @return the name of the entity class. + */ + public String getClassName() { + return className; + } + + /** + * Returns the primary key metadata for this entity. Note that the primary + * key field may be declared in this class or in a subclass. This metadata + * may be specified using the {@link PrimaryKey} annotation. + * + * @return the primary key metadata. + */ + public PrimaryKeyMetadata getPrimaryKey() { + return primaryKey; + } + + /** + * Returns an unmodifiable map of key name to secondary key metadata, or + * an empty map if no secondary keys are defined for this entity. The + * returned map contains a mapping for each secondary key of this entity, + * including secondary keys declared in subclasses and superclasses. This + * metadata may be specified using {@link SecondaryKey} annotations. + * + * @return the secondary key metadata. + */ + public Map getSecondaryKeys() { + return secondaryKeys; + } + + @Override + public boolean equals(Object other) { + if (other instanceof EntityMetadata) { + EntityMetadata o = (EntityMetadata) other; + return ClassMetadata.nullOrEqual(className, o.className) && + ClassMetadata.nullOrEqual(primaryKey, o.primaryKey) && + ClassMetadata.nullOrEqual(secondaryKeys, o.secondaryKeys); + } else { + return false; + } + } + + @Override + public int hashCode() { + return ClassMetadata.hashCode(className) + + ClassMetadata.hashCode(primaryKey) + + ClassMetadata.hashCode(secondaryKeys); + } +} diff --git a/src/com/sleepycat/persist/model/EntityModel.java b/src/com/sleepycat/persist/model/EntityModel.java new file mode 100644 index 0000000..39e8fc3 --- /dev/null +++ b/src/com/sleepycat/persist/model/EntityModel.java @@ -0,0 +1,398 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.impl.Format; +import com.sleepycat.persist.impl.PersistCatalog; +import com.sleepycat.persist.impl.RefreshException; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; +import com.sleepycat.util.ClassResolver; + +/** + * The base class for classes that provide entity model metadata. An {@link + * EntityModel} defines entity classes, primary keys, secondary keys, and + * relationships between entities. For each entity class that is part of the + * model, a single {@link PrimaryIndex} object and zero or more {@link + * SecondaryIndex} objects may be accessed via an {@link EntityStore}. + * + *

        The built-in entity model, the {@link AnnotationModel}, is based on + * annotations that are added to entity classes and their key fields. + * Annotations are used in the examples in this package, and it is expected + * that annotations will normally be used; most readers should therefore skip + * to the {@link AnnotationModel} class. However, a custom entity model class + * may define its own metadata. This can be used to define entity classes and + * keys using mechanisms other than annotations.

        + * + *

        A concrete entity model class should extend this class and implement the + * {@link #getClassMetadata}, {@link #getEntityMetadata} and {@link + * #getKnownClasses} methods.

        + * + *

        This is an abstract class rather than an interface to allow adding + * capabilities to the model at a future date without causing + * incompatibilities. For example, a method may be added in the future for + * returning new information about the model and subclasses may override this + * method to return the new information. Any new methods will have default + * implementations that return default values, and the use of the new + * information will be optional.

        + * + * @author Mark Hayes + */ +public abstract class EntityModel { + + private volatile PersistCatalog catalog; + private ClassLoader classLoader; + + /** + * The default constructor for use by subclasses. + */ + protected EntityModel() { + } + + /** + * Returns whether the model is associated with an open store. + * + *

        The {@link #registerClass} method may only be called when the model + * is not yet open. Certain other methods may only be called when the + * model is open:

        + *
          + *
        • {@link #convertRawObject}
        • + *
        • {@link #getAllRawTypeVersions}
        • + *
        • {@link #getRawType}
        • + *
        • {@link #getRawTypeVersion}
        • + *
        + * + * @return whether the model is associated with an open store. + */ + public final boolean isOpen() { + return catalog != null; + } + + /** + * Registers a persistent class, most importantly, a {@link + * PersistentProxy} class or entity subclass. Also registers an enum or + * array class. + * + *

        Any persistent class , enum class or array may be registered in + * advance of using it, to avoid the overhead of updating the catalog + * database when an instance of the class is first stored. This method + * must be called in three cases:

        + *
          + *
        1. to register all {@link PersistentProxy} classes, and
        2. + *
        3. to register an entity subclass defining a secondary key, if {@link + * EntityStore#getSubclassIndex getSubclassIndex} is not called for the + * subclass, and
        4. + *
        5. to register all new enum or array classes, if the these enum or + * array classes are unknown for DPL but will be used in a Converter + * mutation. + *
        6. + *
        + * + *

        For example:

        + * + *
        +     * EntityModel model = new AnnotationModel();
        +     * model.registerClass(MyProxy.class);
        +     * model.registerClass(MyEntitySubclass.class);
        +     * model.registerClass(MyEnum.class);
        +     * model.registerClass(MyArray[].class);
        +     *
        +     * StoreConfig config = new StoreConfig();
        +     * ...
        +     * config.setModel(model);
        +     *
        +     * EntityStore store = new EntityStore(..., config);
        + * + *

        This method must be called before opening a store based on this + * model.

        + * + * @param persistentClass the class to register. + * + * @throws IllegalStateException if this method is called for a model that + * is associated with an open store. + * + * @throws IllegalArgumentException if the given class is not persistent + * or has a different class loader than previously registered classes. + */ + public final void registerClass(Class persistentClass) { + if (catalog != null) { + throw new IllegalStateException("Store is already open"); + } else { + String className = persistentClass.getName(); + ClassMetadata meta = getClassMetadata(className); + if (meta == null && + !persistentClass.isEnum() && + !persistentClass.isArray()) { + throw new IllegalArgumentException + ("Class is not persistent, or is not an enum or array: " + + className); + } + } + } + + /** + * + * @hidden + * + * Internal access method that should not be used by applications. + * + * This method is used to initialize the model when catalog creation is + * complete, and reinitialize it when a Replica refresh occurs. See + * Store.refresh. + * + * @param newCatalog the catalog. + */ + protected void setCatalog(final PersistCatalog newCatalog) { + this.catalog = newCatalog; + } + + /** + * + * @hidden + * + * Internal access method that should not be used by applications. + * + * This method is called during EntityStore construction, before using the + * model. + */ + void setClassLoader(final ClassLoader loader) { + this.classLoader = loader; + } + + /** + * + * @hidden + * + * Internal access method that should not be used by applications. + */ + ClassLoader getClassLoader() { + return classLoader; + } + + /** + * Returns the metadata for a given persistent class name, including proxy + * classes and entity classes. + * + * @param className the class name. + * + * @return the metadata or null if the class is not persistent or does not + * exist. + */ + public abstract ClassMetadata getClassMetadata(String className); + + /** + * Returns the metadata for a given entity class name. + * + * @param className the class name. + * + * @return the metadata or null if the class is not an entity class or does + * not exist. + */ + public abstract EntityMetadata getEntityMetadata(String className); + + /** + * Returns the names of all known persistent classes. A type becomes known + * when an instance of the type is stored for the first time or metadata or + * type information is queried for a specific class name. + * + * @return an unmodifiable set of class names. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public abstract Set getKnownClasses(); + + /** + * Returns the names of all known persistent enum and array classes that + * may be used to store persistent data. This differs from + * {@link #getKnownClasses}, which does not return enum and array classes + * because they have no metadata. + * + * @return an unmodifiable set of enum and array class names. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public Set getKnownSpecialClasses() { + return Collections.emptySet(); + } + + /** + * Returns the type information for the current version of a given class, + * or null if the class is not currently persistent. + * + * @param className the name of the current version of the class. + * + * @return the RawType. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public final RawType getRawType(String className) { + if (catalog != null) { + return catalog.getFormat(className); + } else { + throw new IllegalStateException("Store is not open"); + } + } + + /** + * Returns the type information for a given version of a given class, + * or null if the given version of the class is unknown. + * + * @param className the name of the latest version of the class. + * + * @param version the desired version of the class. + * + * @return the RawType. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public final RawType getRawTypeVersion(String className, int version) { + if (catalog != null) { + Format format = catalog.getLatestVersion(className); + while (format != null) { + if (version == format.getVersion()) { + return format; + } + } + return null; + } else { + throw new IllegalStateException("Store is not open"); + } + } + + /** + * Returns all known versions of type information for a given class name, + * or null if no persistent version of the class is known. + * + * @param className the name of the latest version of the class. + * + * @return an unmodifiable list of types for the given class name in order + * from most recent to least recent. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public final List getAllRawTypeVersions(String className) { + if (catalog != null) { + Format format = catalog.getLatestVersion(className); + if (format != null) { + List list = new ArrayList(); + while (format != null) { + list.add(format); + format = format.getPreviousVersion(); + } + return Collections.unmodifiableList(list); + } else { + return null; + } + } else { + throw new IllegalStateException("Store is not open"); + } + } + + /** + * Returns all versions of all known types. + * + * @return an unmodifiable list of types. + * + * @throws IllegalStateException if this method is called for a model that + * is not associated with an open store. + */ + public final List getAllRawTypes() { + if (catalog != null) { + return catalog.getAllRawTypes(); + } else { + throw new IllegalStateException("Store is not open"); + } + } + + /** + * Converts a given raw object to a live object according to the current + * class definitions. + * + *

        The given raw object must conform to the current class definitions. + * However, the raw type ({@link RawObject#getType}) is allowed to be from + * a different store, as long as the class names and the value types match. + * This allows converting raw objects that are read from one store to live + * objects in another store, for example, in a conversion program.

        + * + * @param raw the RawObject. + * + * @return the live object. + */ + public final Object convertRawObject(RawObject raw) { + try { + return catalog.convertRawObject(raw, null); + } catch (RefreshException e) { + e.refresh(); + try { + return catalog.convertRawObject(raw, null); + } catch (RefreshException e2) { + throw DbCompat.unexpectedException(e2); + } + } + } + + /** + * Should be called by entity model implementations instead of calling + * Class.forName whenever loading an application class. This method honors + * the BDB JE environment's ClassLoader property and uses {@link + * ClassResolver} to implement the class loading policy. + * + * @param className the class name. + * + * @return the Class. + * + * @throws ClassNotFoundException if the class is not found. + */ + public Class resolveClass(String className) + throws ClassNotFoundException { + + return ClassResolver.resolveClass(className, classLoader); + } + + /** + * @param className the class name. + * + * @return the Class. + * + * @throws ClassNotFoundException if the class is not found. + * + * @deprecated use {@link #resolveClass} instead. This method does not + * use the environment's ClassLoader property. + */ + public static Class classForName(String className) + throws ClassNotFoundException { + + try { + return Class.forName + (className, true /*initialize*/, + Thread.currentThread().getContextClassLoader()); + } catch (ClassNotFoundException e) { + return Class.forName(className); + } + } +} diff --git a/src/com/sleepycat/persist/model/FieldMetadata.java b/src/com/sleepycat/persist/model/FieldMetadata.java new file mode 100644 index 0000000..170f225 --- /dev/null +++ b/src/com/sleepycat/persist/model/FieldMetadata.java @@ -0,0 +1,103 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import java.io.Serializable; + +/** + * The metadata for a key field. This class defines common properties for + * singular and composite key fields. + * + *

        {@code FieldMetadata} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code FieldMetadata} object.

        + * + * @author Mark Hayes + */ +public class FieldMetadata implements Serializable { + + private static final long serialVersionUID = -9037650229184174279L; + + private String name; + private String className; + private String declaringClassName; + + /** + * Used by an {@code EntityModel} to construct field metadata. + * + * @param name the field name. + * @param className the class name. + * @param declaringClassName the name of the class where the field is + * declared. + */ + public FieldMetadata(String name, + String className, + String declaringClassName) { + this.name = name; + this.className = className; + this.declaringClassName = declaringClassName; + } + + /** + * Returns the field name. + * + * @return the field name. + */ + public String getName() { + return name; + } + + /** + * Returns the class name of the field type. + * + * @return the class name. + */ + public String getClassName() { + return className; + } + + /** + * Returns the name of the class where the field is declared. + * + * @return the name of the class where the field is declared. + */ + public String getDeclaringClassName() { + return declaringClassName; + } + + @Override + public boolean equals(Object other) { + if (other instanceof FieldMetadata) { + FieldMetadata o = (FieldMetadata) other; + return ClassMetadata.nullOrEqual(name, o.name) && + ClassMetadata.nullOrEqual(className, o.className) && + ClassMetadata.nullOrEqual(declaringClassName, + o.declaringClassName); + } else { + return false; + } + } + + @Override + public int hashCode() { + return ClassMetadata.hashCode(name) + + ClassMetadata.hashCode(className) + + ClassMetadata.hashCode(declaringClassName); + } + + @Override + public String toString() { + return "[FieldMetadata name: " + name + " className: " + className + + " declaringClassName: " + declaringClassName + ']'; + } +} diff --git a/src/com/sleepycat/persist/model/KeyField.java b/src/com/sleepycat/persist/model/KeyField.java new file mode 100644 index 0000000..f4e4532 --- /dev/null +++ b/src/com/sleepycat/persist/model/KeyField.java @@ -0,0 +1,134 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import com.sleepycat.je.Environment; + +/** + * Indicates the sorting position of a key field in a composite key class when + * the {@code Comparable} interface is not implemented. The {@code KeyField} + * integer value specifies the sort order of this field within the set of + * fields in the composite key. + * + *

        If the field type of a {@link PrimaryKey} or {@link SecondaryKey} is a + * composite key class containing more than one key field, then a {@code + * KeyField} annotation must be present on each non-transient instance field of + * the composite key class. The {@code KeyField} value must be a number + * between one and the number of non-transient instance fields declared in the + * composite key class.

        + * + *

        Note that a composite key class is a flat container for one or more + * simple type fields. All non-transient instance fields in the composite key + * class are key fields, and its superclass must be {@code Object}.

        + * + *

        For example:

        + *
        + *  {@literal @Entity}
        + *  class Animal {
        + *      {@literal @PrimaryKey}
        + *      Classification classification;
        + *      ...
        + *  }
        + *
        + *  {@literal @Persistent}
        + *  class Classification {
        + *      {@literal @KeyField(1) String kingdom;}
        + *      {@literal @KeyField(2) String phylum;}
        + *      {@literal @KeyField(3) String clazz;}
        + *      {@literal @KeyField(4) String order;}
        + *      {@literal @KeyField(5) String family;}
        + *      {@literal @KeyField(6) String genus;}
        + *      {@literal @KeyField(7) String species;}
        + *      {@literal @KeyField(8) String subspecies;}
        + *      ...
        + *  }
        + * + *

        This causes entities to be sorted first by {@code kingdom}, then by + * {@code phylum} within {@code kingdom}, and so on.

        + * + *

        The fields in a composite key class may not be null.

        + * + *

        Custom Sort Order

        + * + *

        To override the default sort order, a composite key class may implement + * the {@link Comparable} interface. This allows overriding the sort order and + * is therefore useful even when there is only one key field in the composite + * key class. For example, the following class sorts Strings using a Canadian + * collator:

        + * + *
        + *  import java.text.Collator;
        + *  import java.util.Locale;
        + *
        + *  {@literal @Entity}
        + *  class Animal {
        + *      ...
        + *      {@literal @SecondaryKey(relate=ONE_TO_ONE)}
        + *      CollatedString canadianName;
        + *      ...
        + *  }
        + *
        + *  {@literal @Persistent}
        + *  {@literal class CollatedString implements Comparable} {
        + *
        + *      static Collator collator = Collator.getInstance(Locale.CANADA);
        + *
        + *      {@literal @KeyField(1)}
        + *      String value;
        + *
        + *      CollatedString(String value) { this.value = value; }
        + *
        + *      private CollatedString() {}
        + *
        + *      public int compareTo(CollatedString o) {
        + *          return collator.compare(value, o.value);
        + *      }
        + *  }
        + * + *

        Several important rules should be considered when implementing a custom + * comparison method. Failure to follow these rules may result in the primary + * or secondary index becoming unusable; in other words, the store will not be + * able to function.

        + *
          + *
        1. The comparison method must always return the same result, given the same + * inputs. The behavior of the comparison method must not change over + * time.
        2. + *
        3. A corollary to the first rule is that the behavior of the comparison + * method must not be dependent on state which may change over time. For + * example, if the above collation method used the default Java locale, and the + * default locale is changed, then the sort order will change.
        4. + *
        5. The comparison method must not assume that it is called after the store + * has been opened. With Berkeley DB Java Edition, the comparison method is + * called during database recovery, which occurs in the {@link Environment} + * constructor.
        6. + *
        7. The comparison method must not assume that it will only be called with + * keys that are currently present in the database. The comparison method will + * occasionally be called with deleted keys or with keys for records that were + * not part of a committed transaction.
        8. + *
        + * + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(FIELD) +public @interface KeyField { + + int value(); +} diff --git a/src/com/sleepycat/persist/model/ModelInternal.java b/src/com/sleepycat/persist/model/ModelInternal.java new file mode 100644 index 0000000..008397b --- /dev/null +++ b/src/com/sleepycat/persist/model/ModelInternal.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import com.sleepycat.persist.impl.PersistCatalog; + +/** + * + * @hidden + * + * Internal access class that should not be used by applications. + * + * @author Mark Hayes + */ +public class ModelInternal { + + /** + * Internal access method that should not be used by applications. + * + * @param model the EntityModel. + * @param catalog the PersistCatalog. + */ + public static void setCatalog(EntityModel model, PersistCatalog catalog) { + model.setCatalog(catalog); + } + + /** + * Internal access method that should not be used by applications. + * + * @param model the EntityModel. + * @param loader the ClassLoader. + */ + public static void setClassLoader(EntityModel model, ClassLoader loader) { + /* Do not overwrite loader with null value. */ + if (loader != null) { + model.setClassLoader(loader); + } + } + + /** + * Internal access method that should not be used by applications. + * + * @param model the EntityModel. + * @return the ClassLoader. + */ + public static ClassLoader getClassLoader(EntityModel model) { + return model.getClassLoader(); + } +} diff --git a/src/com/sleepycat/persist/model/NotPersistent.java b/src/com/sleepycat/persist/model/NotPersistent.java new file mode 100644 index 0000000..c5a61af --- /dev/null +++ b/src/com/sleepycat/persist/model/NotPersistent.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +/** + * Overrides the default rules for field persistence and defines a field as + * being non-persistent even when it is not declared with the + * transient keyword. + * + *

        By default, the persistent fields of a class are all declared instance + * fields that are non-transient (are not declared with the + * transient keyword). The default rules may be overridden by + * specifying the {@link NotPersistent} or {@link NotTransient} annotation.

        + * + *

        For example, the following field is non-transient (persistent) with + * respect to Java serialization but is transient with respect to the DPL.

        + * + *
        + *      {@code @NotPersistent}
        + *      int myField;
        + * }
        + * 
        + * + * @see NotTransient + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(FIELD) +public @interface NotPersistent { +} diff --git a/src/com/sleepycat/persist/model/NotTransient.java b/src/com/sleepycat/persist/model/NotTransient.java new file mode 100644 index 0000000..0f601f0 --- /dev/null +++ b/src/com/sleepycat/persist/model/NotTransient.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +/** + * Overrides the default rules for field persistence and defines a field as + * being persistent even when it is declared with the transient + * keyword. + * + *

        By default, the persistent fields of a class are all declared instance + * fields that are non-transient (are not declared with the + * transient keyword). The default rules may be overridden by + * specifying the {@link NotPersistent} or {@link NotTransient} annotation.

        + * + *

        For example, the following field is transient with respect to Java + * serialization but is persistent with respect to the DPL.

        + * + *
        + *      {@code @NotTransient}
        + *      transient int myField;
        + * }
        + * 
        + * + * @see NotPersistent + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(FIELD) +public @interface NotTransient { +} diff --git a/src/com/sleepycat/persist/model/Persistent.java b/src/com/sleepycat/persist/model/Persistent.java new file mode 100644 index 0000000..8582f8c --- /dev/null +++ b/src/com/sleepycat/persist/model/Persistent.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.TYPE; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +/** + * Identifies a persistent class that is not an {@link Entity} class or a + * simple type. + * + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(TYPE) +public @interface Persistent { + + /** + * Identifies a new version of a class when an incompatible class change + * has been made. + * + * @return the version. + * + * @see Entity#version + */ + int version() default 0; + + /** + * Specifies the class that is proxied by this {@link PersistentProxy} + * instance. + * + * @return the Class. + * + * @see PersistentProxy + */ + Class proxyFor() default void.class; +} diff --git a/src/com/sleepycat/persist/model/PersistentProxy.java b/src/com/sleepycat/persist/model/PersistentProxy.java new file mode 100644 index 0000000..238a94e --- /dev/null +++ b/src/com/sleepycat/persist/model/PersistentProxy.java @@ -0,0 +1,136 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import com.sleepycat.persist.evolve.Converter; // for javadoc +import com.sleepycat.persist.raw.RawStore; // for javadoc + +/** + * Implemented by a proxy class to represent the persistent state of a + * (non-persistent) proxied class. Normally classes that are outside the scope + * of the developer's control must be proxied since they cannot be annotated, + * and because it is desirable to insulate the stored format from changes to + * the instance fields of the proxied class. This is useful for classes in the + * standard Java libraries, for example. + * + *

        {@code PersistentProxy} objects are not required to be thread-safe. A + * single thread will create and call the methods of a given {@code + * PersistentProxy} object.

        + * + *

        There are three requirements for a proxy class:

        + *
          + *
        1. It must implement the PersistentProxy interface.
        2. + *
        3. It must be specified as a persistent proxy class in the entity model. + * When using the {@link AnnotationModel}, a proxy class is indicated by the + * {@link Persistent} annotation with the {@link Persistent#proxyFor} + * property.
        4. + *
        5. It must be explicitly registered by calling {@link + * EntityModel#registerClass} before opening the store.
        6. + *
        + * + *

        In order to serialize an instance of the proxied class before it is + * stored, an instance of the proxy class is created. The proxied instance is + * then passed to the proxy's {@link #initializeProxy initializeProxy} method. + * When this method returns, the proxy instance contains the state of the + * proxied instance. The proxy instance is then serialized and stored in the + * same way as for any persistent object.

        + * + *

        When an instance of the proxy object is deserialized after it is + * retrieved from storage, its {@link #convertProxy} method is called. The + * instance of the proxied class returned by this method is then returned as a + * field in the persistent instance.

        + * + *

        For example:

        + *
        + *  import java.util.Locale;
        + *
        + *  {@literal @Persistent(proxyFor=Locale.class)}
        + *  class LocaleProxy implements {@literal PersistentProxy} {
        + *
        + *      String language;
        + *      String country;
        + *      String variant;
        + *
        + *      private LocaleProxy() {}
        + *
        + *      public void initializeProxy(Locale object) {
        + *          language = object.getLanguage();
        + *          country = object.getCountry();
        + *          variant = object.getVariant();
        + *      }
        + *
        + *      public Locale convertProxy() {
        + *          return new Locale(language, country, variant);
        + *      }
        + *  }
        + * + *

        The above definition allows the {@code Locale} class to be used in any + * persistent class, for example:

        + *
        + *  {@literal @Persistent}
        + *  class LocalizedText {
        + *      String text;
        + *      Locale locale;
        + *  }
        + * + *

        A proxied class may not be used as a superclass for a persistent class or + * entity class. For example, the following is not allowed.

        + *
        + *  {@literal @Persistent}
        + *  class LocalizedText extends Locale { // NOT ALLOWED
        + *      String text;
        + *  }
        + * + *

        A proxy for proxied class P does not handle instances of subclasses of P. + * To proxy a subclass of P, a separate proxy class is needed.

        + * + *

        Several built in proxy types + * are used implicitly. An application defined proxy will be used instead of a + * built-in proxy, if both exist for the same proxied class.

        + * + *

        With respect to class evolution, a proxy instance is no different than + * any other persistent instance. When using a {@link RawStore} or {@link + * Converter}, only the raw data of the proxy instance will be visible. Raw + * data for the proxied instance never exists.

        + * + *

        Currently a proxied object may not contain a reference to itself. For + * simple proxied objects such as the Locale class shown above, this naturally + * won't occur. But for proxied objects that are containers -- the built-in + * Collection and Map classes for example -- this can occur if the container is + * added as an element of itself. This should be avoided. If an attempt to + * store such an object is made, an {@code IllegalArgumentException} will be + * thrown.

        + * + *

        Note that a proxy class may not be a subclass of an entity class.

        + * + * @author Mark Hayes + */ +public interface PersistentProxy { + + /** + * Copies the state of a given proxied class instance to this proxy + * instance. + * + * @param object the proxied class instance. + */ + void initializeProxy(T object); + + /** + * Returns a new proxied class instance to which the state of this proxy + * instance has been copied. + * + * @return the new proxied class instance. + */ + T convertProxy(); +} diff --git a/src/com/sleepycat/persist/model/PrimaryKey.java b/src/com/sleepycat/persist/model/PrimaryKey.java new file mode 100644 index 0000000..2b06a26 --- /dev/null +++ b/src/com/sleepycat/persist/model/PrimaryKey.java @@ -0,0 +1,182 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; + +/** + * Indicates the primary key field of an entity class. The value of the + * primary key field is the unique identifier for the entity in a {@link + * PrimaryIndex}. + * + *

        {@link PrimaryKey} may appear on at most one declared field per + * class.

        + * + *

        Primary key values may be automatically assigned as sequential integers + * using a {@link #sequence}. In this case the type of the key field is + * restricted to a simple integer type.

        + * + *

        A primary key field may not be null, unless it is being assigned from a + * sequence.

        + * + *

        Key Field Types

        + * + *

        The type of a key field must either be one of the following:

        + *
          + *
        • Any of the simple types.
        • + *
        • An enum type.
        • + *
        • A composite key class containing one or more simple type or enum + * fields.
        • + *
        + *

        Array types are not allowed.

        + * + *

        When using a composite key class, each field of the composite key class + * must be annotated with {@link KeyField} to identify the storage order and + * default sort order. See {@link KeyField} for an example and more + * information on composite keys.

        + * + *

        Key Sort Order

        + * + *

        Key field types, being simple types, have a well defined and reasonable + * default sort order, described below. This sort order is based on a storage + * encoding that allows a fast byte-by-byte comparison.

        + *
          + *
        • All simple types except for {@code String} are encoded so that they are + * sorted as expected, that is, as if the {@link Comparable#compareTo} method + * of their class (or, for primitives, their wrapper class) is called.
        • + *
        • Strings are encoded as UTF-8 byte arrays. Zero (0x0000) character + * values are UTF encoded as non-zero values, and therefore embedded zeros in + * the string are supported. The sequence {@literal {0xC0,0x80}} is used to + * encode a zero character. This UTF encoding is the same one used by native + * Java UTF libraries. However, this encoding of zero does impact the + * lexicographical ordering, and zeros will not be sorted first (the natural + * order) or last. For all character values other than zero, the default UTF + * byte ordering is the same as the Unicode lexicographical character + * ordering.
        • + *
        + * + *

        When using a composite key class with more than one field, the sorting + * order among fields is determined by the {@link KeyField} annotations. To + * override the default sort order, you can use a composite key class that + * implements {@link Comparable}. This allows overriding the sort order and is + * therefore useful even when there is only one key field in the composite key + * class. See Custom Sort Order + * for more information on sorting of composite keys.

        + * + *

        Inherited Primary Key

        + * + *

        If it does not appear on a declared field in the entity class, {@code + * PrimaryKey} must appear on a field of an entity superclass. In the + * following example, the primary key on the base class is used:

        + * + *
        + * {@literal @Persistent}
        + * class BaseClass {
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *     ...
        + * }
        + * {@literal @Entity}
        + * class Employee extends BaseClass {
        + *     // inherits id primary key
        + *     ...
        + * }
        + * + *

        If more than one class with {@code PrimaryKey} is present in a class + * hierarchy, the key in the most derived class is used. In this case, primary + * key fields in superclasses are "shadowed" and are not persistent. In the + * following example, the primary key in the base class is not used and is not + * persistent:

        + *
        + * {@literal @Persistent}
        + * class BaseClass {
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *     ...
        + * }
        + * {@literal @Entity}
        + * class Employee extends BaseClass {
        + *     // overrides id primary key
        + *     {@literal @PrimaryKey}
        + *     String uuid;
        + *     ...
        + * }
        + * + *

        Note that a {@code PrimaryKey} is not allowed on entity subclasses. The + * following is illegal and will cause an {@code IllegalArgumentException} when + * trying to store an {@code Employee} instance:

        + *
        + * {@literal @Entity}
        + * class Person {
        + *     {@literal @PrimaryKey}
        + *     long id;
        + *     ...
        + * }
        + * {@literal @Persistent}
        + * class Employee extends Person {
        + *     {@literal @PrimaryKey}
        + *     String uuid;
        + *     ...
        + * }
        + * + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(FIELD) +public @interface PrimaryKey { + + /** + * The name of a sequence from which to assign primary key values + * automatically. If a non-empty string is specified, sequential integers + * will be assigned from the named sequence. + * + *

        A single sequence may be used for more than one entity class by + * specifying the same sequence name for each {@code PrimaryKey}. For + * each named sequence, a {@link com.sleepycat.je.Sequence} will be used to + * assign key values. For more information on configuring sequences, see + * {@link EntityStore#setSequenceConfig EntityStore.setSequenceConfig}.

        + * + *

        To use a sequence, the type of the key field must be a primitive + * integer type ({@code byte}, {@code short}, {@code int} or {@code long}) + * or the primitive wrapper class for one of these types. A composite key + * class may also be used to override sort order, but it may contain only a + * single key field, and this field must have one of the types previously + * mentioned.

        + * + *

        When an entity with a primary key sequence is stored using one of the + * put methods in the {@link PrimaryIndex}, a new key will be + * assigned if the primary key field in the entity instance is null (for a + * reference type) or zero (for a primitive integer type). Specifying zero + * for a primitive integer key field is allowed because the initial value + * of the sequence is one (not zero) by default. If the sequence + * configuration is changed such that zero is part of the sequence, then + * the field type must be a primitive wrapper class and the field value + * must be null to cause a new key to be assigned.

        + * + *

        When one of the put methods in the {@link PrimaryIndex} + * is called and a new key is assigned, the assigned value is returned to + * the caller via the key field of the entity object that is passed as a + * parameter.

        + * + * @return the sequence name or an empty string. + */ + String sequence() default ""; +} diff --git a/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java b/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java new file mode 100644 index 0000000..6543378 --- /dev/null +++ b/src/com/sleepycat/persist/model/PrimaryKeyMetadata.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +/** + * The metadata for a primary key field. A primary key may be specified with + * the {@link PrimaryKey} annotation. + * + *

        {@code PrimaryKeyMetadata} objects are thread-safe. Multiple threads may + * safely call the methods of a shared {@code PrimaryKeyMetadata} object.

        + * + * @author Mark Hayes + */ +public class PrimaryKeyMetadata extends FieldMetadata { + + private static final long serialVersionUID = 2946863622972437018L; + + private String sequenceName; + + /** + * Used by an {@code EntityModel} to construct primary key metadata. + * + * @param name the field name. + * @param className the class name. + * @param declaringClassName the name of the class where the field is + * declared. + * @param sequenceName the sequence name. + */ + public PrimaryKeyMetadata(String name, + String className, + String declaringClassName, + String sequenceName) { + super(name, className, declaringClassName); + this.sequenceName = sequenceName; + } + + /** + * Returns the name of the sequence for assigning key values. This may be + * specified using the {@link PrimaryKey#sequence} annotation. + * + * @return the sequence name. + */ + public String getSequenceName() { + return sequenceName; + } + + @Override + public boolean equals(Object other) { + if (other instanceof PrimaryKeyMetadata) { + PrimaryKeyMetadata o = (PrimaryKeyMetadata) other; + return super.equals(o) && + ClassMetadata.nullOrEqual(sequenceName, o.sequenceName); + } else { + return false; + } + } + + @Override + public int hashCode() { + return super.hashCode() + ClassMetadata.hashCode(sequenceName); + } +} diff --git a/src/com/sleepycat/persist/model/Relationship.java b/src/com/sleepycat/persist/model/Relationship.java new file mode 100644 index 0000000..ce4b2d2 --- /dev/null +++ b/src/com/sleepycat/persist/model/Relationship.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +/** + * Defines the relationship between instances of the entity class and the + * secondary keys. This can be specified using a {@link SecondaryKey#relate} + * annotation. + * + * @author Mark Hayes + */ +public enum Relationship { + + /** + * Relates many entities to one secondary key. + * + *

        The secondary index will have non-unique keys; in other words, + * duplicates will be allowed.

        + * + *

        The secondary key field is singular, in other words, it may not be an + * array or collection type.

        + */ + MANY_TO_ONE, + + /** + * Relates one entity to many secondary keys. + * + *

        The secondary index will have unique keys, in other words, duplicates + * will not be allowed.

        + * + *

        The secondary key field must be an array or collection type.

        + */ + ONE_TO_MANY, + + /** + * Relates many entities to many secondary keys. + * + *

        The secondary index will have non-unique keys, in other words, + * duplicates will be allowed.

        + * + *

        The secondary key field must be an array or collection type.

        + */ + MANY_TO_MANY, + + /** + * Relates one entity to one secondary key. + * + *

        The secondary index will have unique keys, in other words, duplicates + * will not be allowed.

        + * + *

        The secondary key field is singular, in other words, it may not be an + * array or collection type.

        + */ + ONE_TO_ONE; +} diff --git a/src/com/sleepycat/persist/model/SecondaryKey.java b/src/com/sleepycat/persist/model/SecondaryKey.java new file mode 100644 index 0000000..ccf74a9 --- /dev/null +++ b/src/com/sleepycat/persist/model/SecondaryKey.java @@ -0,0 +1,228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; + +/** + * Indicates a secondary key field of an entity class. The value of the + * secondary key field is a unique or non-unique identifier for the entity and + * is accessed via a {@link com.sleepycat.persist.SecondaryIndex}. + * + *

        {@code SecondaryKey} may appear on any number of fields in an entity + * class, subclasses and superclasses. For a secondary key field in the entity + * class or one of its superclasses, all entity instances will be indexed by + * that field (if it is non-null). For a secondary key field in an entity + * subclass, only instances of that subclass will be indexed by that field (if + * it is non-null).

        + * + *

        If a secondary key field is null, the entity will not be indexed by that + * key. In other words, the entity cannot be queried by that secondary key nor + * can the entity be found by iterating through the secondary index.

        + * + *

        For a given entity class and its superclasses and subclasses, no two + * secondary keys may have the same name. By default, the field name + * identifies the secondary key and the secondary index for a given entity + * class. {@link #name} may be specified to override this default.

        + * + *

        Using {@link #relate}, instances of the entity class are related to + * secondary keys in a many-to-one, one-to-many, many-to-many, or one-to-one + * relationship. This required property specifies the cardinality of + * each side of the relationship.

        + * + *

        A secondary key may optionally be used to form a relationship with + * instances of another entity class using {@link #relatedEntity} and {@link + * #onRelatedEntityDelete}. This establishes foreign key constraints + * for the secondary key.

        + * + *

        The secondary key field type must be a Set, Collection or array type when + * a x-to-many relationship is used or a singular type when an + * x-to-one relationship is used; see {@link #relate}.

        + * + *

        The field type (or element type, when a Set, Collection or array type is + * used) of a secondary key field must follow the same rules as for a + * primary key type. The key sort order is also the same.

        + * + *

        For a secondary key field with a collection type, a type parameter must + * be used to specify the element type. For example {@code Collection} + * is allowed but {@code Collection} is not.

        + * + * @author Mark Hayes + */ +@Documented @Retention(RUNTIME) @Target(FIELD) +public @interface SecondaryKey { + + /** + * Defines the relationship between instances of the entity class and the + * secondary keys. + * + *

        The table below summarizes how to create all four variations of + * relationships.

        + *
        + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
        RelationshipField typeKey typeExample
        {@link Relationship#ONE_TO_ONE}SingularUniqueA person record with a unique social security number + * key.
        {@link Relationship#MANY_TO_ONE}SingularDuplicatesA person record with a non-unique employer key.
        {@link Relationship#ONE_TO_MANY}Set/Collection/arrayUniqueA person record with multiple unique email address keys.
        {@link Relationship#MANY_TO_MANY}Set/Collection/arrayDuplicatesA person record with multiple non-unique organization + * keys.
        + *
        + * + *

        For a many-to-x relationship, the secondary index will + * have non-unique keys; in other words, duplicates will be allowed. + * Conversely, for one-to-x relationship, the secondary index + * will have unique keys.

        + * + *

        For a x-to-one relationship, the secondary key field is + * singular; in other words, it may not be a Set, Collection or array type. + * Conversely, for a x-to-many relationship, the secondary key + * field must be a Set, Collection or array type. A collection type is any + * implementation of {@link java.util.Collection}.

        + * + *

        For a x-to-many relationship, the field type should normally + * be {@link java.util.Set} (or a subtype of this interface). This + * accurately expresses the fact that an Entity may not have two identical + * secondary keys. For flexibility, a {@link java.util.Collection} (or a + * subtype of this interface) or an array type may also be used. In that + * case, any duplicate key values in the Collection or array are + * ignored.

        + * + * @return the Relationship. + */ + Relationship relate(); + + /** + * Specifies the entity to which this entity is related, for establishing + * foreign key constraints. Values of this secondary key will be + * constrained to the set of primary key values for the given entity class. + * + *

        The given class must be an entity class. This class is called the + * related entity or foreign entity.

        + * + *

        When a related entity class is specified, a check (foreign key + * constraint) is made every time a new secondary key value is stored for + * this entity, and every time a related entity is deleted.

        + * + *

        Whenever a new secondary key value is stored for this entity, it is + * checked to ensure it exists as a primary key value of the related + * entity. If it does not, an exception is thrown by the {@link + * PrimaryIndex} {@code put} method. + * + * On BDB JE, a {@link com.sleepycat.je.ForeignConstraintException} will be + * thrown. + * + *

        + * + *

        Whenever a related entity is deleted and its primary key value exists + * as a secondary key value for this entity, the action is taken that is + * specified using the {@link #onRelatedEntityDelete} property.

        + * + *

        Together, these two checks guarantee that a secondary key value for + * this entity will always exist as a primary key value for the related + * entity. Note, however, that a transactional store must be configured + * to guarantee this to be true in the face of a crash; see {@link + * StoreConfig#setTransactional}.

        + * + * @return the related entity class, or void.class if none is specified. + */ + Class relatedEntity() default void.class; + + /** + * Specifies the action to take when a related entity is deleted having a + * primary key value that exists as a secondary key value for this entity. + * + *

        Note: This property only applies when {@link #relatedEntity} + * is specified to define the related entity.

        + * + *

        The default action, {@link DeleteAction#ABORT ABORT}, means that an + * exception is thrown in order to abort the current transaction. + * + * On BDB JE, a {@link com.sleepycat.je.DeleteConstraintException} is + * thrown. + * + *

        + * + *

        If {@link DeleteAction#CASCADE CASCADE} is specified, then this + * entity will be deleted also. This in turn could trigger further + * deletions, causing a cascading effect.

        + * + *

        If {@link DeleteAction#NULLIFY NULLIFY} is specified, then the + * secondary key in this entity is set to null and this entity is updated. + * If the key field type is singular, the field value is set to null; + * therefore, to specify {@code NULLIFY} for a singular key field type, a + * primitive wrapper type must be used instead of a primitive type. If the + * key field type is an array or collection type, the key is deleted from + * the array (the array is resized) or from the collection (using {@link + * java.util.Collection#remove Collection.remove}).

        + * + * @return the DeleteAction, or {@link DeleteAction#ABORT} if none is + * specified. + */ + DeleteAction onRelatedEntityDelete() default DeleteAction.ABORT; + + /** + * Specifies the name of the key in order to use a name that is different + * than the field name. + * + *

        This is convenient when prefixes or suffices are used on field names. + * For example:

        + *
        +     *  class Person {
        +     *      {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class, name="parentSsn")}
        +     *      String m_parentSsn;
        +     *  }
        + * + *

        It can also be used to uniquely name a key when multiple secondary + * keys for a single entity class have the same field name. For example, + * an entity class and its subclass may both have a field named 'date', + * and both fields are used as secondary keys. The {@code name} property + * can be specified for one or both fields to give each key a unique + * name.

        + * + * @return the key name that overrides the field name, or empty string if + * none is specified. + */ + String name() default ""; +} diff --git a/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java b/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java new file mode 100644 index 0000000..ef4a951 --- /dev/null +++ b/src/com/sleepycat/persist/model/SecondaryKeyMetadata.java @@ -0,0 +1,146 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.model; + +/** + * The metadata for a secondary key field. A secondary key may be specified + * with the {@link SecondaryKey} annotation. + * + *

        {@code SecondaryKeyMetadata} objects are thread-safe. Multiple threads + * may safely call the methods of a shared {@code SecondaryKeyMetadata} + * object.

        + * + * @author Mark Hayes + */ +public class SecondaryKeyMetadata extends FieldMetadata { + + private static final long serialVersionUID = 8118924993396722502L; + + private String keyName; + private Relationship relationship; + private String elementClassName; + private String relatedEntity; + private DeleteAction deleteAction; + + /** + * Used by an {@code EntityModel} to construct secondary key metadata. + * + * @param name the field name. + * @param className the class name. + * @param declaringClassName the name of the class where the field is + * declared. + * @param elementClassName the element class name. + * @param keyName the key name. + * @param relationship the Relationship. + * @param relatedEntity the class name of the related (foreign) entity. + * @param deleteAction the DeleteAction. + */ + public SecondaryKeyMetadata(String name, + String className, + String declaringClassName, + String elementClassName, + String keyName, + Relationship relationship, + String relatedEntity, + DeleteAction deleteAction) { + super(name, className, declaringClassName); + this.elementClassName = elementClassName; + this.keyName = keyName; + this.relationship = relationship; + this.relatedEntity = relatedEntity; + this.deleteAction = deleteAction; + } + + /** + * Returns the class name of the array or collection element for a {@link + * Relationship#ONE_TO_MANY ONE_TO_MANY} or {@link + * Relationship#MANY_TO_MANY MANY_TO_MANY} relationship, or null for a + * Relationship#ONE_TO_ONE ONE_TO_ONE} or {@link Relationship#MANY_TO_ONE + * MANY_TO_ONE} relationship. + * + * @return the element class name. + */ + public String getElementClassName() { + return elementClassName; + } + + /** + * Returns the key name, which may be different from the field name. + * + * @return the key name. + */ + public String getKeyName() { + return keyName; + } + + /** + * Returns the relationship between instances of the entity class and the + * secondary keys. This may be specified using the {@link + * SecondaryKey#relate} annotation. + * + * @return the Relationship. + */ + public Relationship getRelationship() { + return relationship; + } + + /** + * Returns the class name of the related (foreign) entity, for which + * foreign key constraints are specified using the {@link + * SecondaryKey#relatedEntity} annotation. + * + * @return the class name of the related (foreign) entity. + */ + public String getRelatedEntity() { + return relatedEntity; + } + + /** + * Returns the action to take when a related entity is deleted having a + * primary key value that exists as a secondary key value for this entity. + * This may be specified using the {@link + * SecondaryKey#onRelatedEntityDelete} annotation. + * + * @return the DeleteAction. + */ + public DeleteAction getDeleteAction() { + return deleteAction; + } + + @Override + public boolean equals(Object other) { + if (other instanceof SecondaryKeyMetadata) { + SecondaryKeyMetadata o = (SecondaryKeyMetadata) other; + return super.equals(o) && + relationship == o.relationship && + ClassMetadata.nullOrEqual(deleteAction, o.deleteAction) && + ClassMetadata.nullOrEqual(keyName, o.keyName) && + ClassMetadata.nullOrEqual(elementClassName, + o.elementClassName) && + ClassMetadata.nullOrEqual(relatedEntity, o.relatedEntity); + } else { + return false; + } + } + + @Override + public int hashCode() { + return super.hashCode() + + relationship.hashCode() + + ClassMetadata.hashCode(deleteAction) + + ClassMetadata.hashCode(keyName) + + ClassMetadata.hashCode(elementClassName) + + ClassMetadata.hashCode(relatedEntity); + } +} diff --git a/src/com/sleepycat/persist/model/package.html b/src/com/sleepycat/persist/model/package.html new file mode 100644 index 0000000..f2d6f88 --- /dev/null +++ b/src/com/sleepycat/persist/model/package.html @@ -0,0 +1,5 @@ + + +Annotations for defining a persistent object model. + + diff --git a/src/com/sleepycat/persist/package.html b/src/com/sleepycat/persist/package.html new file mode 100644 index 0000000..1863b38 --- /dev/null +++ b/src/com/sleepycat/persist/package.html @@ -0,0 +1,605 @@ + + +The Direct Persistence Layer (DPL) adds a persistent object model to the +Berkeley DB transactional engine. + +

        Package Specification

        + + + +

        Introduction

        + +

        The Direct Persistence Layer (DPL) was designed to meet the following +requirements.

        +
          +
        • A type safe and convenient API is provided for accessing persistent +objects. The use of Java generic types, although optional, is fully exploited +to provide type safety. For example: +
          +{@literal PrimaryIndex employerById = ...;}
          +long employerId = ...;
          +Employer employer = employerById.get(employerId);
          +
        • +
        • All Java types are allowed to be persistent without requiring that they +implement special interfaces. Persistent fields may be {@code private}, +package-private (default access), {@code protected}, or {@code public}. No +hand-coding of bindings is required. However, each persistent class must have +a default constructor. For example: +
          +{@literal @Persistent}
          +class Address {
          +    String street;
          +    String city;
          +    String state;
          +    int zipCode;
          +    private Address() {}
          +}
          +
        • +
        • Bytecode enhancement provides fully optimized bindings that do not use Java +reflection.
        • +
        • It is easy to define primary and secondary keys. No external schema is +required and Java annotations may be used for defining all metadata. +Extensions may derive metadata from other sources. For example, the following +Employer class is defined as a persistent entity with a primary key field +{@code id} and the secondary key field {@code name}:
        • +
          +{@literal @Entity}
          +class Employer {
          +
          +    {@literal @PrimaryKey(sequence="ID")}
          +    long id;
          +
          +    {@literal @SecondaryKey(relate=ONE_TO_ONE)}
          +    String name;
          +
          +    Address address;
          +
          +    private Employer() {}
          +}
          +
        • Interoperability with external components is supported via the Java +collections framework. Any primary or secondary index can be accessed using a +standard java.util collection. For example: +
          {@literal java.util.SortedMap map = employerByName.sortedMap();}
          +
        • +
        • Class evolution is explicitly supported. Compatible changes (adding fields +and type widening) are performed automatically and transparently. For example, +without any special configuration a {@code street2} field may be added to the +{@code Address} class and the type of the {@code zipCode} field may be changed +from {@code int} to {@code long}: +
          +{@literal @Persistent}
          +class Address {
          +    String street;
          +    String street2;
          +    String city;
          +    String state;
          +    long zipCode;
          +    private Address() {}
          +}
          +Many incompatible class changes, such as renaming fields or refactoring a +single class, can be performed using {@link +com.sleepycat.persist.evolve.Mutations Mutations}. Mutations are automatically +applied lazily as data is accessed, avoiding downtime to convert large +databases during a software upgrade. +

          Complex refactoring involving multiple classes may be performed using the a +store conversion. The DPL +always provides access to your data via a {@code RawStore}, no matter what +changes have been made to persistent classes.

          +
        • +
          +
        • The performance of the Berkeley DB transactional engine is not compromised. +Operations are internally mapped directly to the engine API, object bindings +are lightweight, and all engine tuning parameters are available. For example, +a "dirty read" may be performed using an optional {@link +com.sleepycat.je.LockMode LockMode} parameter: +
          Employer employer = employerByName.get(null, "Gizmo Inc", LockMode.READ_UNCOMMITTED);
          +For high performance applications, {@link com.sleepycat.je.DatabaseConfig +DatabaseConfig} parameters may be used to tune the performance of the Berkeley +DB engine. For example, the size of an internal Btree node can be specified +as follows: +
          +DatabaseConfig config = store.getPrimaryConfig(Employer.class);
          +config.setNodeMaxEntries(64);
          +store.setPrimaryConfig(config);
          +
        • +
        + +

        The Entity Model

        + +

        The DPL is intended for applications that represent persistent domain +objects using Java classes. An entity class is an ordinary Java class +that has a primary key and is stored and accessed using a primary index. It +may also have any number of secondary keys, and entities may be accessed by +secondary key using a secondary index.

        + +

        An entity class may be defined with the {@link +com.sleepycat.persist.model.Entity Entity} annotation. For each entity class, +its primary key may be defined using the {@link +com.sleepycat.persist.model.PrimaryKey PrimaryKey} annotation and any number of +secondary keys may be defined using the {@link +com.sleepycat.persist.model.SecondaryKey SecondaryKey} annotation.

        + +

        In the following example, the {@code Person.ssn} (social security number) +field is the primary key and the {@code Person.employerIds} field is a +many-to-many secondary key.

        +
        +{@literal @Entity}
        +class Person {
        +
        +    {@literal @PrimaryKey}
        +    String ssn;
        +
        +    String name;
        +    Address address;
        +
        +    {@literal @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class)}
        +    {@literal Set employerIds = new HashSet();}
        +
        +    private Person() {} // For bindings
        +}
        + +

        A set of entity classes constitutes an entity model. In addition +to isolated entity classes, an entity model may contain relationships between +entities. Relationships may be defined using the {@link +com.sleepycat.persist.model.SecondaryKey SecondaryKey} annotation. +Many-to-one, one-to-many, many-to-many and one-to-one relationships are +supported, as well as foreign key constraints.

        + +

        In the example above, a relationship between the {@code Person} and {@code +Employer} entities is defined via the {@code Person.employerIds} field. The +{@code relatedEntity=Employer.class} annotation property establishes foreign +key constraints to guarantee that every element of the {@code employerIds} set +is a valid {@code Employer} primary key.

        + +

        For more information on the entity model, see the {@link +com.sleepycat.persist.model.AnnotationModel AnnotationModel} and the {@link +com.sleepycat.persist.model.Entity Entity} annotation.

        + +

        The root object in the DPL is the {@link com.sleepycat.persist.EntityStore +EntityStore}. An entity store manages any number of objects for each entity +class defined in the model. The store provides access to the primary and +secondary indices for each entity class, for example:

        + +
        +EntityStore store = new EntityStore(...);
        +
        +{@literal PrimaryIndex personBySsn} =
        +    store.getPrimaryIndex(String.class, Person.class);
        + +

        A brief example

        + +

        The following example shows how to define an entity model and how to store +and access persistent objects. Exception handling is omitted for brevity.

        + +
        +import java.io.File;
        +import java.util.HashSet;
        +import java.util.Set;
        +
        +import com.sleepycat.je.DatabaseException;
        +import com.sleepycat.je.Environment;
        +import com.sleepycat.je.EnvironmentConfig;
        +import com.sleepycat.persist.EntityCursor;
        +import com.sleepycat.persist.EntityIndex;
        +import com.sleepycat.persist.EntityStore;
        +import com.sleepycat.persist.PrimaryIndex;
        +import com.sleepycat.persist.SecondaryIndex;
        +import com.sleepycat.persist.StoreConfig;
        +import com.sleepycat.persist.model.Entity;
        +import com.sleepycat.persist.model.Persistent;
        +import com.sleepycat.persist.model.PrimaryKey;
        +import com.sleepycat.persist.model.SecondaryKey;
        +import static com.sleepycat.persist.model.DeleteAction.NULLIFY;
        +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE;
        +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY;
        +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE;
        +import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY;
        +
        +// An entity class.
        +//
        +{@literal @Entity}
        +class Person {
        +
        +    {@literal @PrimaryKey}
        +    String ssn;
        +
        +    String name;
        +    Address address;
        +
        +    {@literal @SecondaryKey(relate=MANY_TO_ONE, relatedEntity=Person.class)}
        +    String parentSsn;
        +
        +    {@literal @SecondaryKey(relate=ONE_TO_MANY)}
        +    {@literal Set emailAddresses = new HashSet();}
        +
        +    {@code @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=Employer.class,
        +                                       onRelatedEntityDelete=NULLIFY)}
        +    {@code Set employerIds = new HashSet();}
        +
        +    Person(String name, String ssn, String parentSsn) {
        +        this.name = name;
        +        this.ssn = ssn;
        +        this.parentSsn = parentSsn;
        +    }
        +
        +    private Person() {} // For bindings
        +}
        +
        +// Another entity class.
        +//
        +{@literal @Entity}
        +class Employer {
        +
        +    {@literal @PrimaryKey(sequence="ID")}
        +    long id;
        +
        +    {@literal @SecondaryKey(relate=ONE_TO_ONE)}
        +    String name;
        +
        +    Address address;
        +
        +    Employer(String name) {
        +        this.name = name;
        +    }
        +
        +    private Employer() {} // For bindings
        +}
        +
        +// A persistent class used in other classes.
        +//
        +{@literal @Persistent}
        +class Address {
        +    String street;
        +    String city;
        +    String state;
        +    int zipCode;
        +    private Address() {} // For bindings
        +}
        +
        +// The data accessor class for the entity model.
        +//
        +class PersonAccessor {
        +
        +    // Person accessors
        +    //
        +    {@literal PrimaryIndex personBySsn;}
        +    {@literal SecondaryIndex personByParentSsn;}
        +    {@literal SecondaryIndex personByEmailAddresses;}
        +    {@literal SecondaryIndex personByEmployerIds;}
        +
        +    // Employer accessors
        +    //
        +    {@literal PrimaryIndex employerById;}
        +    {@literal SecondaryIndex employerByName;}
        +
        +    // Opens all primary and secondary indices.
        +    //
        +    public PersonAccessor(EntityStore store)
        +        throws DatabaseException {
        +
        +        personBySsn = store.getPrimaryIndex(
        +            String.class, Person.class);
        +
        +        personByParentSsn = store.getSecondaryIndex(
        +            personBySsn, String.class, "parentSsn");
        +
        +        personByEmailAddresses = store.getSecondaryIndex(
        +            personBySsn, String.class, "emailAddresses");
        +
        +        personByEmployerIds = store.getSecondaryIndex(
        +            personBySsn, Long.class, "employerIds");
        +
        +        employerById = store.getPrimaryIndex(
        +            Long.class, Employer.class);
        +
        +        employerByName = store.getSecondaryIndex(
        +            employerById, String.class, "name"); 
        +    }
        +}
        +
        +// Open a transactional Berkeley DB engine environment.
        +//
        +EnvironmentConfig envConfig = new EnvironmentConfig();
        +envConfig.setAllowCreate(true);
        +envConfig.setTransactional(true);
        +Environment env = new Environment(new File("/my/data"), envConfig);
        +
        +// Open a transactional entity store.
        +//
        +StoreConfig storeConfig = new StoreConfig();
        +storeConfig.setAllowCreate(true);
        +storeConfig.setTransactional(true);
        +EntityStore store = new EntityStore(env, "PersonStore", storeConfig);
        +
        +// Initialize the data access object.
        +//
        +PersonAccessor dao = new PersonAccessor(store);
        +
        +// Add a parent and two children using the Person primary index.  Specifying a
        +// non-null parentSsn adds the child Person to the sub-index of children for
        +// that parent key.
        +//
        +dao.personBySsn.put(new Person("Bob Smith", "111-11-1111", null));
        +dao.personBySsn.put(new Person("Mary Smith", "333-33-3333", "111-11-1111"));
        +dao.personBySsn.put(new Person("Jack Smith", "222-22-2222", "111-11-1111"));
        +
        +// Print the children of a parent using a sub-index and a cursor.
        +//
        +{@literal EntityCursor children =}
        +    dao.personByParentSsn.subIndex("111-11-1111").entities();
        +try {
        +    for (Person child : children) {
        +        System.out.println(child.ssn + ' ' + child.name);
        +    }
        +} finally {
        +    children.close();
        +}
        +
        +// Get Bob by primary key using the primary index.
        +//
        +Person bob = dao.personBySsn.get("111-11-1111");
        +assert bob != null;
        +
        +// Create two employers.  Their primary keys are assigned from a sequence.
        +//
        +Employer gizmoInc = new Employer("Gizmo Inc");
        +Employer gadgetInc = new Employer("Gadget Inc");
        +dao.employerById.put(gizmoInc);
        +dao.employerById.put(gadgetInc);
        +
        +// Bob has two jobs and two email addresses.
        +//
        +bob.employerIds.add(gizmoInc.id);
        +bob.employerIds.add(gadgetInc.id);
        +bob.emailAddresses.add("bob@bob.com");
        +bob.emailAddresses.add("bob@gmail.com");
        +
        +// Update Bob's record.
        +//
        +dao.personBySsn.put(bob);
        +
        +// Bob can now be found by both email addresses.
        +//
        +bob = dao.personByEmailAddresses.get("bob@bob.com");
        +assert bob != null;
        +bob = dao.personByEmailAddresses.get("bob@gmail.com");
        +assert bob != null;
        +
        +// Bob can also be found as an employee of both employers.
        +//
        +{@literal EntityIndex employees;}
        +employees = dao.personByEmployerIds.subIndex(gizmoInc.id);
        +assert employees.contains("111-11-1111");
        +employees = dao.personByEmployerIds.subIndex(gadgetInc.id);
        +assert employees.contains("111-11-1111");
        +
        +// When an employer is deleted, the onRelatedEntityDelete=NULLIFY for the
        +// employerIds key causes the deleted ID to be removed from Bob's employerIds.
        +//
        +dao.employerById.delete(gizmoInc.id);
        +bob = dao.personBySsn.get("111-11-1111");
        +assert !bob.employerIds.contains(gizmoInc.id);
        +
        +store.close();
        +env.close();
        +
        +

        The example illustrates several characteristics of the DPL:

        +
          +
        • Persistent data and keys are defined in terms of instance fields. For +brevity the example does not show getter and setter methods, although these +would normally exist to provide encapsulation. The DPL accesses fields during +object serialization and deserialization, rather than calling getter/setter +methods, leaving business methods free to enforce arbitrary validation rules. +For example: +
          +{@literal @Persistent}
          +public class ConstrainedValue {
          +
          +    private int min;
          +    private int max;
          +    private int value;
          +
          +    private ConstrainedValue() {} // For bindings
          +
          +    public ConstrainedValue(int min, int max) {
          +        this.min = min;
          +        this.max = max;
          +        value = min;
          +    }
          +
          +    public setValue(int value) {
          +        if (value < min || value > max) {
          +            throw new IllegalArgumentException("out of range");
          +        }
          +        this.value = value;
          +    }
          +}
          +
          +The above {@code setValue} method would not work if it were called during +object deserialization, since the order of setting fields is arbitrary. The +{@code min} and {@code max} fields may not be set before the {@code value} is +set. +
        • +
          +
        • The example creates a transactional store and therefore all operations are +transaction protected. Because no explicit transactions are used, auto-commit +is used implicitly. + +

          Explicit transactions may also be used to group multiple operations in a +single transaction, and all access methods have optional transaction +parameters. For example, the following two operations are performed atomically +in a transaction: +

          +Transaction txn = env.beginTransaction(null, null);
          +dao.employerById.put(txn, gizmoInc);
          +dao.employerById.put(txn, gadgetInc);
          +txn.commit();
          +
          +
        • +
        • To provide maximum performance, the DPL operations map directly to the +Btree operations of the Berkeley DB engine. Unlike other persistence +approaches, keys and indices are exposed for direct access and performance +tuning. +

          Queries are implemented by calling methods of the primary and secondary +indices. An {@link com.sleepycat.persist.EntityJoin EntityJoin} class is also +available for performing equality joins. For example, the following code +queries all of Bob's children that work for Gizmo Inc: +

          +{@literal EntityJoin join = new EntityJoin(dao.personBySsn);}
          +
          +join.addCondition(dao.personByParentSsn, "111-11-1111");
          +join.addCondition(dao.personByEmployerIds, gizmoInc.id);
          +
          +{@literal ForwardCursor results = join.entities();}
          +try {
          +    for (Person person : results) {
          +        System.out.println(person.ssn + ' ' + person.name);
          +    }
          +} finally {
          +    results.close();
          +}
          +
        • +
        • Object relationships are based on keys. When a {@code Person} with a given +employer ID in its {@code employerIds} set is stored, the {@code Person} object +becomes part of the collection of employees for that employer. This collection +of employees is accessed using a {@link +com.sleepycat.persist.SecondaryIndex#subIndex SecondaryIndex.subIndex} for the +employer ID, as shown below: +
          +{@literal EntityCursor employees =}
          +    dao.personByEmployerIds.subIndex(gizmoInc.id).entities();
          +try {
          +    for (Person employee : employees) {
          +        System.out.println(employee.ssn + ' ' + employee.name);
          +    }
          +} finally {
          +    employees.close();
          +}
          +
        • +
        • Note that when Bob's employer is deleted in the example, the {@code Person} +object for Bob is refetched to see the change to its {@code employerIds}. This +is because objects are accessed by value, not by reference. In other words, no +object cache or "persistence context" is maintained by the DPL. The low level +caching of the embedded Berkeley DB engine, combined with lightweight object +bindings, provides maximum performance.
        • +
        + +

        Which API to use?

        + +

        The Berkeley DB engine has a {@link com.sleepycat.je Base API}, a {@link +com.sleepycat.collections Collections API} and a {@link com.sleepycat.persist +Direct Persistence Layer (DPL)}. Follow these guidelines if you are not sure +which API to use:

        +
          +
        • When Java classes are used to represent domain objects in an application, +the DPL is recommended. The more domain classes, the more value there is in +using annotations to define your schema.
        • +
          +
        • When porting an application between Berkeley DB and Berkeley DB Java +Edition, or when you've chosen not to use Java classes to represent domain +objects, then the Base API is recommended. You may also prefer to use this API +if you have very few domain classes.
        • +
          +
        • The Collections API is useful for interoperating with external components +because it conforms to the standard Java Collections Framework. It is +therefore useful in combination with both the Base API and the DPL. You may +prefer this API because it provides the familiar Java Collections +interface.
        • +
        + +

        Java 1.5 dependencies

        + +

        The DPL uses two features of Java 1.5: generic types and annotations. If +you wish to avoid using these two Java 1.5 features, the DPL provides options +for doing so.

        + +

        Generic Types

        + +

        Generic types are used to provide type safety, especially for the {@link +com.sleepycat.persist.PrimaryIndex PrimaryIndex}, {@link +com.sleepycat.persist.SecondaryIndex SecondaryIndex}, and {@link +com.sleepycat.persist.EntityCursor EntityCursor} classes. If you don't wish to +use generic types, you can simply not declare your index and cursor objects +using generic type parameters. This is the same as using the Java 1.5 +Collections Framework without using generic types.

        + +

        Annotations

        + +

        If you don't wish to use annotations, you can provide another source of +metadata by implementing an {@link com.sleepycat.persist.model.EntityModel +EntityModel} class. For example, naming conventions, static members, or an XML +configuration file might be used as a source of metadata. However, if you +don't use annotations then you won't be able to use bytecode enhancement, which +is described next.

        + +

        Bytecode Enhancement

        + +

        The persistent fields of a class may be private, package-private, protected +or public. The DPL can access persistent fields either by bytecode enhancement +or by reflection.

        + +

        Bytecode enhancement may be used to fully optimize binding performance and +to avoid the use of Java reflection. In applications that are CPU bound, +avoiding Java reflection can have a significant performance impact.

        + +

        Bytecode enhancement may be performed either at runtime or at build time +(offline). When enhancement is performed at runtime, persistent classes are +enhanced as they are loaded. When enhancement is performed offline, class +files are enhanced during a post-compilation step. + +Both a main program and an Ant task are provided for performing offline +enhancement. + +Enhanced classes are used to efficiently access all fields and default +constructors, including non-public members.

        + +

        See {@link com.sleepycat.persist.model.ClassEnhancer ClassEnhancer} for +bytecode enhancement configuration details.

        + +

        If bytecode enhancement is not used as described above, the DPL will use +reflection for accessing persistent fields and the default constructor. The +{@link java.lang.reflect.AccessibleObject#setAccessible +AccessibleObject.setAccessible} method is called by the DPL to enable access to +non-public fields and constructors. If you are running under a Java security +manager you must configure your security policy to allow the following +permission:

        + +

        {@code permission java.lang.reflect.ReflectPermission "suppressAccessChecks";} + +

        There are three cases where setting the above permission is not +required:

        +
          +
        1. If you are not running under a Java Security Manager, then access to +non-public members via reflection is not restricted. This is the default for +J2SE.
        2. +
          +
        3. If all persistent fields and default constructors are {@code public} then +they can be accessed via reflection without special permissions, even when +running under a Java Security Manager. However, declaring {@code public} +instance fields is not recommended because it discourages encapsulation.
        4. +
          +
        5. If bytecode enhancement is used as described above, then reflection will +not be used.
        6. +
        + +

        It is well known that executing generated code is faster than reflection. +However, this performance difference may or may not impact a given application +since it may be overshadowed by other factors. Performance testing in a +realistic usage scenario is the best way to determine the impact. If you are +determined to avoid the use of reflection then option 3 above is +recommended.

        + + + diff --git a/src/com/sleepycat/persist/raw/RawField.java b/src/com/sleepycat/persist/raw/RawField.java new file mode 100644 index 0000000..eb7c146 --- /dev/null +++ b/src/com/sleepycat/persist/raw/RawField.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.raw; + +/** + * The definition of a field in a {@link RawType}. + * + *

        {@code RawField} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code RawField} object.

        + * + * @author Mark Hayes + */ +public interface RawField { + + /** + * Returns the name of the field. + * + * @return the name of the field. + */ + String getName(); + + /** + * Returns the type of the field, without expanding parameterized types, + * or null if the type is an interface type or the Object class. + * + * @return the type of the field. + */ + RawType getType(); +} diff --git a/src/com/sleepycat/persist/raw/RawObject.java b/src/com/sleepycat/persist/raw/RawObject.java new file mode 100644 index 0000000..782f0b6 --- /dev/null +++ b/src/com/sleepycat/persist/raw/RawObject.java @@ -0,0 +1,336 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.raw; + +import java.util.Arrays; +import java.util.Map; +import java.util.TreeSet; + +import com.sleepycat.persist.evolve.Conversion; +import com.sleepycat.persist.model.EntityModel; + +/** + * A raw instance that can be used with a {@link RawStore} or {@link + * Conversion}. A RawObject is used to represent instances of + * complex types (persistent classes with fields), arrays, and enum values. It + * is not used to represent non-enum simple types, which are represented as + * simple objects. This includes primitives, which are represented as + * instances of their wrapper class. + * + *

        {@code RawObject} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code RawObject} object.

        + * + * @author Mark Hayes + */ +public class RawObject { + + private static final String INDENT = " "; + + private RawType type; + private Map values; + private Object[] elements; + private String enumConstant; + private RawObject superObject; + + /** + * Creates a raw object with a given set of field values for a complex + * type. + * + * @param type the type of this raw object. + * + * @param values a map of field name to value for each declared field in + * the class, or null to create an empty map. Each value in the map is a + * {@link RawObject}, a simple + * type instance, or null. + * + * @param superObject the instance of the superclass, or null if the + * superclass is {@code Object}. + * + * @throws IllegalArgumentException if the type argument is an array type. + */ + public RawObject(RawType type, + Map values, + RawObject superObject) { + if (type == null || values == null) { + throw new NullPointerException(); + } + this.type = type; + this.values = values; + this.superObject = superObject; + } + + /** + * Creates a raw object with the given array elements for an array type. + * + * @param type the type of this raw object. + * + * @param elements an array of elements. Each element in the array is a + * {@link RawObject}, a simple + * type instance, or null. + * + * @throws IllegalArgumentException if the type argument is not an array + * type. + */ + public RawObject(RawType type, Object[] elements) { + if (type == null || elements == null) { + throw new NullPointerException(); + } + this.type = type; + this.elements = elements; + } + + /** + * Creates a raw object with the given enum value for an enum type. + * + * @param type the type of this raw object. + * + * @param enumConstant the String value of this enum constant; must be + * one of the Strings returned by {@link RawType#getEnumConstants}. + * + * @throws IllegalArgumentException if the type argument is not an array + * type. + */ + public RawObject(RawType type, String enumConstant) { + if (type == null || enumConstant == null) { + throw new NullPointerException(); + } + this.type = type; + this.enumConstant = enumConstant; + } + + /** + * Returns the raw type information for this raw object. + * + *

        Note that if this object is unevolved, the returned type may be + * different from the current type returned by {@link + * EntityModel#getRawType EntityModel.getRawType} for the same class name. + * This can only occur in a {@link Conversion#convert + * Conversion.convert}.

        + * + * @return the RawType. + */ + public RawType getType() { + return type; + } + + /** + * Returns a map of field name to value for a complex type, or null for an + * array type or an enum type. The map contains a String key for each + * declared field in the class. Each value in the map is a {@link + * RawObject}, a simple + * type instance, or null. + * + *

        There will be an entry in the map for every field declared in this + * type, as determined by {@link RawType#getFields} for the type returned + * by {@link #getType}. Values in the map may be null for fields with + * non-primitive types.

        + * + * @return the map of field name to value, or null. + */ + public Map getValues() { + return values; + } + + /** + * Returns the array of elements for an array type, or null for a complex + * type or an enum type. Each element in the array is a {@link RawObject}, + * a simple type instance, + * or null. + * + * @return the array of elements, or null. + */ + public Object[] getElements() { + return elements; + } + + /** + * Returns the enum constant String for an enum type, or null for a complex + * type or an array type. The String returned will be one of the Strings + * returned by {@link RawType#getEnumConstants}. + * + * @return the enum constant String, or null. + */ + public String getEnum() { + return enumConstant; + } + + /** + * Returns the instance of the superclass, or null if the superclass is + * {@code Object} or {@code Enum}. + * + * @return the instance of the superclass, or null. + */ + public RawObject getSuper() { + return superObject; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (!(other instanceof RawObject)) { + return false; + } + RawObject o = (RawObject) other; + if (type != o.type) { + return false; + } + if (!Arrays.deepEquals(elements, o.elements)) { + return false; + } + if (enumConstant != null) { + if (!enumConstant.equals(o.enumConstant)) { + return false; + } + } else { + if (o.enumConstant != null) { + return false; + } + } + if (values != null) { + if (!values.equals(o.values)) { + return false; + } + } else { + if (o.values != null) { + return false; + } + } + if (superObject != null) { + if (!superObject.equals(o.superObject)) { + return false; + } + } else { + if (o.superObject != null) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return System.identityHashCode(type) + + Arrays.deepHashCode(elements) + + (enumConstant != null ? enumConstant.hashCode() : 0) + + (values != null ? values.hashCode() : 0) + + (superObject != null ? superObject.hashCode() : 0); + } + + /** + * Returns an XML representation of the raw object. + */ + @Override + public String toString() { + StringBuilder buf = new StringBuilder(500); + formatRawObject(buf, "", null, false); + return buf.toString(); + } + + private void formatRawObject(StringBuilder buf, + String indent, + String id, + boolean isSuper) { + if (type.isEnum()) { + buf.append(indent); + buf.append(""); + buf.append(enumConstant); + buf.append("\n"); + } else { + String indent2 = indent + INDENT; + String endTag; + buf.append(indent); + if (type.isArray()) { + buf.append("\n"); + + if (superObject != null) { + superObject.formatRawObject(buf, indent2, null, true); + } + if (type.isArray()) { + for (int i = 0; i < elements.length; i += 1) { + formatValue(buf, indent2, String.valueOf(i), elements[i]); + } + } else { + TreeSet keys = new TreeSet(values.keySet()); + for (String name : keys) { + formatValue(buf, indent2, name, values.get(name)); + } + } + buf.append(indent); + buf.append(endTag); + buf.append("\n"); + } + } + + private static void formatValue(StringBuilder buf, + String indent, + String id, + Object val) { + if (val == null) { + buf.append(indent); + buf.append("\n"); + } else if (val instanceof RawObject) { + ((RawObject) val).formatRawObject(buf, indent, id, false); + } else { + buf.append(indent); + buf.append(""); + buf.append(val.toString()); + buf.append("\n"); + } + } + + private static void formatId(StringBuilder buf, String id) { + if (id != null) { + if (Character.isDigit(id.charAt(0))) { + buf.append(" index=\""); + } else { + buf.append(" field=\""); + } + buf.append(id); + buf.append('"'); + } + } +} diff --git a/src/com/sleepycat/persist/raw/RawStore.java b/src/com/sleepycat/persist/raw/RawStore.java new file mode 100644 index 0000000..a48d4d0 --- /dev/null +++ b/src/com/sleepycat/persist/raw/RawStore.java @@ -0,0 +1,197 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.raw; + +import java.io.Closeable; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.StoreExistsException; +import com.sleepycat.persist.StoreNotFoundException; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.impl.Store; +import com.sleepycat.persist.model.EntityModel; + +/** + * Provides access to the raw data in a store for use by general purpose tools. + * A RawStore provides access to stored entities without using + * entity classes or key classes. Keys are represented as simple type objects + * or, for composite keys, as {@link RawObject} instances, and entities are + * represented as {@link RawObject} instances. + * + *

        {@code RawStore} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code RawStore} object.

        + * + *

        When using a {@code RawStore}, the current persistent class definitions + * are not used. Instead, the previously stored metadata and class definitions + * are used. This has several implications:

        + *
          + *
        1. An {@code EntityModel} may not be specified using {@link + * StoreConfig#setModel}. In other words, the configured model must be + * null (the default).
        2. + *
        3. When storing entities, their format will not automatically be evolved + * to the current class definition, even if the current class definition has + * changed.
        4. + *
        + * + * @author Mark Hayes + */ +public class RawStore + /* */ + implements Closeable + /* */ + { + + private Store store; + + /** + * Opens an entity store for raw data access. + * + * @param env an open Berkeley DB environment. + * + * @param storeName the name of the entity store within the given + * environment. + * + * @param config the store configuration, or null to use default + * configuration properties. + * + * @throws StoreNotFoundException when the {@link + * StoreConfig#setAllowCreate AllowCreate} configuration parameter is false + * and the store's internal catalog database does not exist. + * + * @throws IllegalArgumentException if the Environment is + * read-only and the config ReadOnly property is false. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public RawStore(Environment env, String storeName, StoreConfig config) + throws StoreNotFoundException, DatabaseException { + + try { + store = new Store(env, storeName, config, true /*rawAccess*/); + } catch (StoreExistsException e) { + /* Should never happen, ExclusiveCreate not used. */ + throw DbCompat.unexpectedException(e); + } catch (IncompatibleClassException e) { + /* Should never happen, evolution is not performed. */ + throw DbCompat.unexpectedException(e); + } + } + + /** + * Opens the primary index for a given entity class. + * + * @param entityClass the name of the entity class. + * + * @return the PrimaryIndex. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public PrimaryIndex getPrimaryIndex(String entityClass) + throws DatabaseException { + + return store.getPrimaryIndex + (Object.class, null, RawObject.class, entityClass); + } + + /** + * Opens the secondary index for a given entity class and secondary key + * name. + * + * @param entityClass the name of the entity class. + * + * @param keyName the secondary key name. + * + * @return the SecondaryIndex. + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public SecondaryIndex + getSecondaryIndex(String entityClass, String keyName) + throws DatabaseException { + + return store.getSecondaryIndex + (getPrimaryIndex(entityClass), RawObject.class, entityClass, + Object.class, null, keyName); + } + + /** + * Returns the environment associated with this store. + * + * @return the Environment. + */ + public Environment getEnvironment() { + return store.getEnvironment(); + } + + /** + * Returns a copy of the entity store configuration. + * + * @return the StoreConfig. + */ + public StoreConfig getConfig() { + return store.getConfig(); + } + + /** + * Returns the name of this store. + * + * @return the store name. + */ + public String getStoreName() { + return store.getStoreName(); + } + + /** + * Returns the last configured and stored entity model for this store. + * + * @return the EntityModel. + */ + public EntityModel getModel() { + return store.getModel(); + } + + /** + * Returns the set of mutations that were configured and stored previously. + * + * @return the Mutations. + */ + public Mutations getMutations() { + return store.getMutations(); + } + + /** + * Closes all databases and sequences that were opened by this model. No + * databases opened via this store may be in use. + * + *

        WARNING: To guard against memory leaks, the application should + * discard all references to the closed handle. While BDB makes an effort + * to discard references from closed objects to the allocated memory for an + * environment, this behavior is not guaranteed. The safe course of action + * for an application is to discard all references to closed BDB + * objects.

        + * + * @throws DatabaseException the base class for all BDB exceptions. + */ + public void close() + throws DatabaseException { + + store.close(); + } +} diff --git a/src/com/sleepycat/persist/raw/RawType.java b/src/com/sleepycat/persist/raw/RawType.java new file mode 100644 index 0000000..cbb34eb --- /dev/null +++ b/src/com/sleepycat/persist/raw/RawType.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.raw; + +import java.util.List; +import java.util.Map; + +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.Persistent; + +/** + * The type definition for a simple or complex persistent type, or an array + * of persistent types. + * + *

        {@code RawType} objects are thread-safe. Multiple threads may safely + * call the methods of a shared {@code RawType} object.

        + * + * @author Mark Hayes + */ +public interface RawType { + + /** + * Returns the class name for this type in the format specified by {@link + * Class#getName}. + * + *

        If this class currently exists (has not been removed or renamed) then + * the class name may be passed to {@link Class#forName} to get the current + * {@link Class} object. However, if this raw type is not the current + * version of the class, this type information may differ from that of the + * current {@link Class}.

        + * + * @return the class name. + */ + String getClassName(); + + /** + * Returns the class version for this type. For simple types, zero is + * always returned. + * + * @return the version. + * + * @see Entity#version + * @see Persistent#version + */ + int getVersion(); + + /** + * Returns the internal unique ID for this type. + * + * @return the ID. + */ + int getId(); + + /** + * Returns whether this is a + * simple type: + * primitive, primitive wrapper, BigInteger, BigDecimal, String or Date. + * + *

        If true is returned, {@link #isPrimitive} can be called for more + * information, and a raw value of this type is represented as a simple + * type object (not as a {@link RawObject}).

        + * + *

        If false is returned, this is a complex type, an array type (see + * {@link #isArray}), or an enum type, and a raw value of this type is + * represented as a {@link RawObject}.

        + * + * @return whether this is a simple type. + */ + boolean isSimple(); + + /** + * Returns whether this type is a Java primitive: char, byte, short, int, + * long, float or double. + * + *

        If true is returned, this is also a simple type. In other words, + * primitive types are a subset of simple types.

        + * + *

        If true is returned, a raw value of this type is represented as a + * non-null instance of the primitive type's wrapper class. For example, + * an int raw value is represented as an + * Integer.

        + * + * @return whether this is a Java primitive. + */ + boolean isPrimitive(); + + /** + * Returns whether this is an enum type. + * + *

        If true is returned, a value of this type is a {@link RawObject} and + * the enum constant String is available via {@link RawObject#getEnum}.

        + * + *

        If false is returned, then this is a complex type, an array type (see + * {@link #isArray}), or a simple type (see {@link #isSimple}).

        + * + * @return whether this is a enum type. + */ + boolean isEnum(); + + /** + * Returns an unmodifiable list of the names of the enum instances, or null + * if this is not an enum type. + * + * @return the list of enum names. + */ + List getEnumConstants(); + + /** + * Returns whether this is an array type. Raw value arrays are represented + * as {@link RawObject} instances. + * + *

        If true is returned, the array component type is returned by {@link + * #getComponentType} and the number of array dimensions is returned by + * {@link #getDimensions}.

        + * + *

        If false is returned, then this is a complex type, an enum type (see + * {@link #isEnum}), or a simple type (see {@link #isSimple}).

        + * + * @return whether this is an array type. + */ + boolean isArray(); + + /** + * Returns the number of array dimensions, or zero if this is not an array + * type. + * + * @return the number of array dimensions, or zero if this is not an array + * type. + */ + int getDimensions(); + + /** + * Returns the array component type, or null if this is not an array type. + * + * @return the array component type, or null if this is not an array type. + */ + RawType getComponentType(); + + /** + * Returns a map of field name to raw field for each non-static + * non-transient field declared in this class, or null if this is not a + * complex type (in other words, this is a simple type or an array type). + * + * @return a map of field name to raw field, or null. + */ + Map getFields(); + + /** + * Returns the type of the superclass, or null if the superclass is Object + * or this is not a complex type (in other words, this is a simple type or + * an array type). + * + * @return the type of the superclass, or null. + */ + RawType getSuperType(); + + /** + * Returns the original model class metadata used to create this class, or + * null if this is not a model class. + * + * @return the model class metadata, or null. + */ + ClassMetadata getClassMetadata(); + + /** + * Returns the original model entity metadata used to create this class, or + * null if this is not an entity class. + * + * @return the model entity metadata, or null. + */ + EntityMetadata getEntityMetadata(); + + /** + * Returns whether this type has been deleted using a class {@code Deleter} + * mutation. A deleted type may be returned by {@link + * com.sleepycat.persist.model.EntityModel#getRawTypeVersion + * EntityModel.getRawTypeVersion} or {@link + * com.sleepycat.persist.model.EntityModel#getAllRawTypeVersions + * EntityModel.getAllRawTypeVersions}. + * + * @return whether this type has been deleted. + */ + boolean isDeleted(); + + /** + * Returns an XML representation of the raw type. + */ + String toString(); +} diff --git a/src/com/sleepycat/persist/raw/package.html b/src/com/sleepycat/persist/raw/package.html new file mode 100644 index 0000000..ff02d24 --- /dev/null +++ b/src/com/sleepycat/persist/raw/package.html @@ -0,0 +1,5 @@ + + +Raw data access for general purpose tools and manual conversions. + + diff --git a/src/com/sleepycat/util/ClassResolver.java b/src/com/sleepycat/util/ClassResolver.java new file mode 100644 index 0000000..ad4f060 --- /dev/null +++ b/src/com/sleepycat/util/ClassResolver.java @@ -0,0 +1,142 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectStreamClass; + +/** + * Implements policies for loading user-supplied classes. The {@link + * #resolveClass} method should be used to load all user-supplied classes, and + * the {@link Stream} class should be used as a replacement for + * ObjectInputStream to deserialize instances of user-supplied classes. + *

        + * The ClassLoader specified as a param should be the one configured using + * EnvironmentConfig.setClassLoader. This loader is used, if non-null. If the + * loader param is null, but a non-null thread-context loader is available, the + * latter is used. If the loader param and thread-context loader are both + * null, or if they fail to load a class by throwing ClassNotFoundException, + * then the default Java mechanisms for determining the class loader are used. + */ +public class ClassResolver { + + /** + * A specialized ObjectInputStream that supports use of a user-specified + * ClassLoader. + * + * If the loader param and thread-context loader are both null, of if they + * throw ClassNotFoundException, then ObjectInputStream.resolveClass is + * called, which has its own special rules for class loading. + */ + public static class Stream extends ObjectInputStream { + + private final ClassLoader classLoader; + + public Stream(InputStream in, ClassLoader classLoader) + throws IOException { + + super(in); + this.classLoader = classLoader; + } + + @Override + protected Class resolveClass(ObjectStreamClass desc) + throws IOException, ClassNotFoundException { + + ClassNotFoundException firstException = null; + if (classLoader != null) { + try { + return Class.forName(desc.getName(), false /*initialize*/, + classLoader); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + } + final ClassLoader threadLoader = + Thread.currentThread().getContextClassLoader(); + if (threadLoader != null) { + try { + return Class.forName(desc.getName(), false /*initialize*/, + threadLoader); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + } + try { + return super.resolveClass(desc); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + throw firstException; + } + } + + /** + * A specialized Class.forName method that supports use of a user-specified + * ClassLoader. + * + * If the loader param and thread-context loader are both null, of if they + * throw ClassNotFoundException, then Class.forName is called and the + * "current loader" (the one used to load JE) will be used. + * + * @param className the class name. + * @param classLoader the ClassLoader. + * @return the Class. + * @throws ClassNotFoundException if the class is not found. + */ + public static Class resolveClass(String className, + ClassLoader classLoader) + throws ClassNotFoundException { + + ClassNotFoundException firstException = null; + if (classLoader != null) { + try { + return Class.forName(className, true /*initialize*/, + classLoader); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + } + final ClassLoader threadLoader = + Thread.currentThread().getContextClassLoader(); + if (threadLoader != null) { + try { + return Class.forName(className, true /*initialize*/, + threadLoader); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + } + try { + return Class.forName(className); + } catch (ClassNotFoundException e) { + if (firstException == null) { + firstException = e; + } + } + throw firstException; + } +} diff --git a/src/com/sleepycat/util/ConfigBeanInfoBase.java b/src/com/sleepycat/util/ConfigBeanInfoBase.java new file mode 100644 index 0000000..2b49543 --- /dev/null +++ b/src/com/sleepycat/util/ConfigBeanInfoBase.java @@ -0,0 +1,290 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +import java.beans.BeanDescriptor; +import java.beans.EventSetDescriptor; +import java.beans.IntrospectionException; +import java.beans.PropertyDescriptor; +import java.beans.SimpleBeanInfo; +import java.lang.reflect.Method; +import java.util.ArrayList; + +/* + * If someone add a property in some FooConfig.java, + * (1) If the setter/getter methods are setFoo/getFoo, the name of the + * property should be "foo", which means the first letter of the property + * name should be lower case. + * (2) The setter method for this property setProperty should return "this", + * and setPropertyVoid method which returns void value must be added. + * The return type of the getter method should be the same as the + * parameter of the setter method. + * (3) The setter method and getter method must be added into + * FooConfigBeanInfo; + * (4) If for some of the setter methods in the FooConfig.java, setterVoid + * methods are not necessary, then add the name of such setter methods + * into the ArrayList ignoreMethods within the corresponding + * FooConfigBeanInfo.getPropertyDescriptors method. For example, + * setMaxSeedTestHook method in DiskOrderedCursorConfig.java is only used + * for unit tests, so "setMaxSeedTestHook" is added into ignoreMethods + * list within DiskOrderedCursorConfigBeanInfo.getPropertyDescriptors. + * + * + * If someone adds a new FooConfig.java, + * (1) The definition of setter/getter mehods and the names of the properties + * should follow the rules described above. + * (2) There must be FooConfigBeanInfo.java. You can write it according to + * the current beaninfo classes. + * (3) "PackagePath.FooConfig" must be added into the unit test: + * com.sleepycat.je.ConfigBeanInfoTest. + * + * If someond remove an existing FooConfig.java, then "PackagePath.FooConfig" + * must be deleted in the unit test com.sleepycat.je.ConfigBeanInfoTest. + */ +public class ConfigBeanInfoBase extends SimpleBeanInfo { + private static java.awt.Image iconColor16 = null; + private static java.awt.Image iconColor32 = null; + private static java.awt.Image iconMono16 = null; + private static java.awt.Image iconMono32 = null; + private static String iconNameC16 = null; + private static String iconNameC32 = null; + private static String iconNameM16 = null; + private static String iconNameM32 = null; + + private static final int defaultPropertyIndex = -1; + private static final int defaultEventIndex = -1; + + protected static ArrayList propertiesName = new ArrayList(); + protected static ArrayList + getterAndSetterMethods = new ArrayList(); + + protected static ArrayList ignoreMethods = new ArrayList(); + + /* + * Get the propertis' infomation, including all the properties's names + * and their getter/setter methods. + */ + protected static void getPropertiesInfo(Class cls) { + propertiesName.clear(); + getterAndSetterMethods.clear(); + try { + + /* Get all of the public methods. */ + ArrayList allMethodNames = new ArrayList(); + Method[] methods = cls.getMethods(); + for (int i = 0; i < methods.length; i++) { + allMethodNames.add(methods[i].getName()); + } + for (int i = 0; i < allMethodNames.size(); i++) { + String name = allMethodNames.get(i); + String subName = name.substring(0, 3); + + /* If it is a setter method. */ + if (subName.equals("set")) { + if (isIgnoreMethods(name)) { + continue; + } + String propertyName = name.substring(3); + Method getterMethod = null; + try { + getterMethod = cls.getMethod("get" + propertyName); + } catch (NoSuchMethodException e) { + getterMethod = null; + } + if (getterMethod != null) { + getterAndSetterMethods.add("get" + propertyName); + getterAndSetterMethods.add(name + "Void"); + + /* + * Add the real property name into propertiesName. + * if the names of setter/getter methods are + * setFoo/getFoo, the name of the property should be + * "foo". + */ + propertiesName.add + (propertyName.substring(0, 1).toLowerCase() + + propertyName.substring(1)); + } + } + } + } catch (SecurityException e) { + e.printStackTrace(); + } + } + + private static boolean isIgnoreMethods(String methodName) { + for (int i = 0; i < ignoreMethods.size(); i++) { + if (ignoreMethods.get(i).equals(methodName)) { + return true; + } + } + return false; + } + + protected static PropertyDescriptor[] getPdescriptor(Class cls) { + getPropertiesInfo(cls); + final int propertyNum = propertiesName.size(); + assert propertyNum * 2 == getterAndSetterMethods.size(); + PropertyDescriptor[] properties = new PropertyDescriptor[propertyNum]; + try { + for (int i = 0, j = 0; i < propertyNum; i += 1, j += 2) { + properties[i] = new PropertyDescriptor + (propertiesName.get(i), cls, getterAndSetterMethods.get(j), + getterAndSetterMethods.get(j + 1)); + } + } catch(IntrospectionException e) { + e.printStackTrace(); + } + return properties; + } + + protected static BeanDescriptor getBdescriptor(Class cls) { + BeanDescriptor beanDescriptor = new BeanDescriptor(cls, null); + return beanDescriptor; + } + + /** + * Gets the bean's BeanDescriptors. + * + * @param cls the Class. + * + * @return BeanDescriptor describing the editable + * properties of this bean. May return null if the + * information should be obtained by automatic analysis. + */ + public BeanDescriptor getBeanDescriptor(Class cls) { + return null; + } + + /** + * Gets the bean's PropertyDescriptors. + * + * @param cls the Class. + * + * @return An array of PropertyDescriptors describing the editable + * properties supported by this bean. May return null if the + * information should be obtained by automatic analysis. + *

        + * If a property is indexed, then its entry in the result array will + * belong to the IndexedPropertyDescriptor subclass of PropertyDescriptor. + * A client of getPropertyDescriptors can use "instanceof" to check + * if a given PropertyDescriptor is an IndexedPropertyDescriptor. + */ + public PropertyDescriptor[] getPropertyDescriptors(Class cls) { + return null; + } + + /** + * Gets the bean's EventSetDescriptors. + * + * @return An array of EventSetDescriptors describing the kinds of + * events fired by this bean. May return null if the information + * should be obtained by automatic analysis. + */ + public EventSetDescriptor[] getEventSetDescriptors() { + EventSetDescriptor[] eventSets = new EventSetDescriptor[0]; + return eventSets; + } + + /** + * A bean may have a "default" property that is the property that will + * mostly commonly be initially chosen for update by human's who are + * customizing the bean. + * @return Index of default property in the PropertyDescriptor array + * returned by getPropertyDescriptors. + *

        Returns -1 if there is no default property. + */ + public int getDefaultPropertyIndex() { + return defaultPropertyIndex; + } + + /** + * A bean may have a "default" event that is the event that will + * mostly commonly be used by human's when using the bean. + * @return Index of default event in the EventSetDescriptor array + * returned by getEventSetDescriptors. + *

        Returns -1 if there is no default event. + */ + public int getDefaultEventIndex() { + return defaultEventIndex; + } + + /** + * This method returns an image object that can be used to + * represent the bean in toolboxes, toolbars, etc. Icon images + * will typically be GIFs, but may in future include other formats. + *

        + * Beans aren't required to provide icons and may return null from + * this method. + *

        + * There are four possible flavors of icons (16x16 color, + * 32x32 color, 16x16 mono, 32x32 mono). If a bean choses to only + * support a single icon we recommend supporting 16x16 color. + *

        + * We recommend that icons have a "transparent" background + * so they can be rendered onto an existing background. + * + * @param iconKind The kind of icon requested. This should be + * one of the constant values ICON_COLOR_16x16, ICON_COLOR_32x32, + * ICON_MONO_16x16, or ICON_MONO_32x32. + * @return An image object representing the requested icon. May + * return null if no suitable icon is available. + */ + public java.awt.Image getIcon(int iconKind) { + switch (iconKind) { + case ICON_COLOR_16x16: + if (iconNameC16 == null) { + return null; + } else { + if (iconColor16 == null) { + iconColor16 = loadImage(iconNameC16); + } + return iconColor16; + } + + case ICON_COLOR_32x32: + if (iconNameC32 == null) { + return null; + } else { + if (iconColor32 == null) { + iconColor32 = loadImage(iconNameC32); + } + return iconColor32; + } + + case ICON_MONO_16x16: + if (iconNameM16 == null) { + return null; + } else { + if (iconMono16 == null) { + iconMono16 = loadImage(iconNameM16); + } + return iconMono16; + } + + case ICON_MONO_32x32: + if (iconNameM32 == null) { + return null; + } else { + if (iconMono32 == null) { + iconMono32 = loadImage(iconNameM32); + } + return iconMono32; + } + + default: + return null; + } + } +} diff --git a/src/com/sleepycat/util/ExceptionUnwrapper.java b/src/com/sleepycat/util/ExceptionUnwrapper.java new file mode 100644 index 0000000..4b3e962 --- /dev/null +++ b/src/com/sleepycat/util/ExceptionUnwrapper.java @@ -0,0 +1,73 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +/** + * Unwraps nested exceptions by calling the {@link + * ExceptionWrapper#getCause()} method for exceptions that implement the + * {@link ExceptionWrapper} interface. Does not currently support the Java 1.4 + * Throwable.getCause() method. + * + * @author Mark Hayes + */ +public class ExceptionUnwrapper { + + /** + * Unwraps an Exception and returns the underlying Exception, or throws an + * Error if the underlying Throwable is an Error. + * + * @param e is the Exception to unwrap. + * + * @return the underlying Exception. + * + * @throws Error if the underlying Throwable is an Error. + * + * @throws IllegalArgumentException if the underlying Throwable is not an + * Exception or an Error. + */ + public static Exception unwrap(Exception e) { + + Throwable t = unwrapAny(e); + if (t instanceof Exception) { + return (Exception) t; + } else if (t instanceof Error) { + throw (Error) t; + } else { + throw new IllegalArgumentException("Not Exception or Error: " + t); + } + } + + /** + * Unwraps an Exception and returns the underlying Throwable. + * + * @param e is the Exception to unwrap. + * + * @return the underlying Throwable. + */ + public static Throwable unwrapAny(Throwable e) { + + while (true) { + if (e instanceof ExceptionWrapper) { + Throwable e2 = ((ExceptionWrapper) e).getCause(); + if (e2 == null) { + return e; + } else { + e = e2; + } + } else { + return e; + } + } + } +} diff --git a/src/com/sleepycat/util/ExceptionWrapper.java b/src/com/sleepycat/util/ExceptionWrapper.java new file mode 100644 index 0000000..9e7da65 --- /dev/null +++ b/src/com/sleepycat/util/ExceptionWrapper.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +/** + * Interface implemented by exceptions that can contain nested exceptions. + * + * @author Mark Hayes + */ +public interface ExceptionWrapper { + + /** + * Returns the nested exception or null if none is present. + * + * @return the nested exception or null if none is present. + * + * @deprecated replaced by {@link #getCause}. + */ + Throwable getDetail(); + + /** + * Returns the nested exception or null if none is present. + * + *

        This method is intentionally defined to be the same signature as the + * java.lang.Throwable.getCause method in Java 1.4 and + * greater. By defining this method to return a nested exception, the Java + * 1.4 runtime will print the nested stack trace.

        + * + * @return the nested exception or null if none is present. + */ + Throwable getCause(); +} diff --git a/src/com/sleepycat/util/FastInputStream.java b/src/com/sleepycat/util/FastInputStream.java new file mode 100644 index 0000000..5c032c4 --- /dev/null +++ b/src/com/sleepycat/util/FastInputStream.java @@ -0,0 +1,218 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +import java.io.InputStream; + +/** + * A replacement for ByteArrayInputStream that does not synchronize every + * byte read. + * + *

        This class extends {@link InputStream} and its read() + * methods allow it to be used as a standard input stream. In addition, it + * provides readFast() methods that are not declared to throw + * IOException. IOException is never thrown by this + * class.

        + * + * @author Mark Hayes + */ +public class FastInputStream extends InputStream { + + protected int len; + protected int off; + protected int mark; + protected byte[] buf; + + /** + * Creates an input stream. + * + * @param buffer the data to read. + */ + public FastInputStream(byte[] buffer) { + + buf = buffer; + len = buffer.length; + } + + /** + * Creates an input stream. + * + * @param buffer the data to read. + * + * @param offset the byte offset at which to begin reading. + * + * @param length the number of bytes to read. + */ + public FastInputStream(byte[] buffer, int offset, int length) { + + buf = buffer; + off = offset; + len = offset + length; + } + + // --- begin ByteArrayInputStream compatible methods --- + + @Override + public int available() { + + return len - off; + } + + @Override + public boolean markSupported() { + + return true; + } + + @Override + public void mark(int readLimit) { + + mark = off; + } + + @Override + public void reset() { + + off = mark; + } + + @Override + public long skip(long count) { + + int myCount = (int) count; + if (myCount + off > len) { + myCount = len - off; + } + skipFast(myCount); + return myCount; + } + + @Override + public int read() { + return readFast(); + } + + @Override + public int read(byte[] toBuf) { + + return readFast(toBuf, 0, toBuf.length); + } + + @Override + public int read(byte[] toBuf, int offset, int length) { + + return readFast(toBuf, offset, length); + } + + // --- end ByteArrayInputStream compatible methods --- + + /** + * Equivalent to skip() but takes an int parameter instead of a + * long, and does not check whether the count given is larger than the + * number of remaining bytes. + * + * @param count the number of bytes to skip. + * + * @see #skip(long) + */ + public final void skipFast(int count) { + off += count; + } + + /** + * Equivalent to read() but does not throw + * IOException. + * + * @return the next byte of data, or -1 if at the end of the stream. + * + * @see #read() + */ + public final int readFast() { + + return (off < len) ? (buf[off++] & 0xff) : (-1); + } + + /** + * Equivalent to read(byte[]) but does not throw + * IOException. + + * @param toBuf the buffer into which the data is read. + * + * @return the number of bytes read, or -1 if at the end of the stream. + * + * @see #read(byte[]) + */ + public final int readFast(byte[] toBuf) { + + return readFast(toBuf, 0, toBuf.length); + } + + /** + * Equivalent to read(byte[],int,int) but does not throw + * IOException. + * + * @param toBuf the buffer into which the data is read. + * + * @param offset the start offset in array at which the data is written. + * + * @param length the maximum number of bytes to read. + * + * @return the number of bytes read, or -1 if at the end of the stream. + * + * @see #read(byte[],int,int) + */ + public final int readFast(byte[] toBuf, int offset, int length) { + + int avail = len - off; + if (avail <= 0) { + return -1; + } + if (length > avail) { + length = avail; + } + System.arraycopy(buf, off, toBuf, offset, length); + off += length; + return length; + } + + /** + * Returns the underlying data being read. + * + * @return the underlying data. + */ + public final byte[] getBufferBytes() { + + return buf; + } + + /** + * Returns the offset at which data is being read from the buffer. + * + * @return the offset at which data is being read. + */ + public final int getBufferOffset() { + + return off; + } + + /** + * Returns the end of the buffer being read. + * + * @return the end of the buffer. + */ + public final int getBufferLength() { + + return len; + } +} diff --git a/src/com/sleepycat/util/FastOutputStream.java b/src/com/sleepycat/util/FastOutputStream.java new file mode 100644 index 0000000..3054566 --- /dev/null +++ b/src/com/sleepycat/util/FastOutputStream.java @@ -0,0 +1,300 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; + +/** + * A replacement for ByteArrayOutputStream that does not synchronize every + * byte read. + * + *

        This class extends {@link OutputStream} and its write() + * methods allow it to be used as a standard output stream. In addition, it + * provides writeFast() methods that are not declared to throw + * IOException. IOException is never thrown by this + * class.

        + * + * @author Mark Hayes + */ +public class FastOutputStream extends OutputStream { + + /** + * The default initial size of the buffer if no initialSize parameter is + * specified. This constant is 100 bytes. + */ + public static final int DEFAULT_INIT_SIZE = 100; + + /** + * The default amount that the buffer is increased when it is full. This + * constant is zero, which means to double the current buffer size. + */ + public static final int DEFAULT_BUMP_SIZE = 0; + + private int len; + private int bumpLen; + private byte[] buf; + + /* + * We can return the same byte[] for 0 length arrays. + */ + private static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; + + /** + * Creates an output stream with default sizes. + */ + public FastOutputStream() { + + initBuffer(DEFAULT_INIT_SIZE, DEFAULT_BUMP_SIZE); + } + + /** + * Creates an output stream with a default bump size and a given initial + * size. + * + * @param initialSize the initial size of the buffer. + */ + public FastOutputStream(int initialSize) { + + initBuffer(initialSize, DEFAULT_BUMP_SIZE); + } + + /** + * Creates an output stream with a given bump size and initial size. + * + * @param initialSize the initial size of the buffer. + * + * @param bumpSize the amount to increment the buffer. + */ + public FastOutputStream(int initialSize, int bumpSize) { + + initBuffer(initialSize, bumpSize); + } + + /** + * Creates an output stream with a given initial buffer and a default + * bump size. + * + * @param buffer the initial buffer; will be owned by this object. + */ + public FastOutputStream(byte[] buffer) { + + buf = buffer; + bumpLen = DEFAULT_BUMP_SIZE; + } + + /** + * Creates an output stream with a given initial buffer and a given + * bump size. + * + * @param buffer the initial buffer; will be owned by this object. + * + * @param bumpSize the amount to increment the buffer. If zero (the + * default), the current buffer size will be doubled when the buffer is + * full. + */ + public FastOutputStream(byte[] buffer, int bumpSize) { + + buf = buffer; + bumpLen = bumpSize; + } + + private void initBuffer(int bufferSize, int bumplength) { + buf = new byte[bufferSize]; + this.bumpLen = bumplength; + } + + // --- begin ByteArrayOutputStream compatible methods --- + + public int size() { + + return len; + } + + public void reset() { + + len = 0; + } + + @Override + public void write(int b) { + + writeFast(b); + } + + @Override + public void write(byte[] fromBuf) { + + writeFast(fromBuf); + } + + @Override + public void write(byte[] fromBuf, int offset, int length) { + + writeFast(fromBuf, offset, length); + } + + public void writeTo(OutputStream out) throws IOException { + + out.write(buf, 0, len); + } + + @Override + public String toString() { + + return new String(buf, 0, len); + } + + public String toString(String encoding) + throws UnsupportedEncodingException { + + return new String(buf, 0, len, encoding); + } + + public byte[] toByteArray() { + + if (len == 0) { + return ZERO_LENGTH_BYTE_ARRAY; + } + byte[] toBuf = new byte[len]; + System.arraycopy(buf, 0, toBuf, 0, len); + + return toBuf; + } + + // --- end ByteArrayOutputStream compatible methods --- + + /** + * Equivalent to write(int) but does not throw + * IOException. + * + * @param b the byte to write. + * + * @see #write(int) + */ + public final void writeFast(int b) { + + if (len + 1 > buf.length) + bump(1); + + buf[len++] = (byte) b; + } + + /** + * Equivalent to write(byte[]) but does not throw + * IOException. + * + * @param fromBuf the buffer to write. + * + * @see #write(byte[]) + */ + public final void writeFast(byte[] fromBuf) { + + int needed = len + fromBuf.length - buf.length; + if (needed > 0) + bump(needed); + + System.arraycopy(fromBuf, 0, buf, len, fromBuf.length); + len += fromBuf.length; + } + + /** + * Equivalent to write(byte[],int,int) but does not throw + * IOException. + * + * @param fromBuf the buffer to write. + * + * @param offset the start offset in the buffer. + * + * @param length the number of bytes to write. + * + * @see #write(byte[],int,int) + */ + public final void writeFast(byte[] fromBuf, int offset, int length) { + + int needed = len + length - buf.length; + if (needed > 0) + bump(needed); + + System.arraycopy(fromBuf, offset, buf, len, length); + len += length; + } + + /** + * Returns the buffer owned by this object. + * + * @return the buffer. + */ + public byte[] getBufferBytes() { + + return buf; + } + + /** + * Returns the offset of the internal buffer. + * + * @return always zero currently. + */ + public int getBufferOffset() { + + return 0; + } + + /** + * Returns the length used in the internal buffer, i.e., the offset at + * which data will be written next. + * + * @return the buffer length. + */ + public int getBufferLength() { + + return len; + } + + /** + * Ensure that at least the given number of bytes are available in the + * internal buffer. + * + * @param sizeNeeded the number of bytes desired. + */ + public void makeSpace(int sizeNeeded) { + + int needed = len + sizeNeeded - buf.length; + if (needed > 0) + bump(needed); + } + + /** + * Skip the given number of bytes in the buffer. + * + * @param sizeAdded number of bytes to skip. + */ + public void addSize(int sizeAdded) { + + len += sizeAdded; + } + + private void bump(int needed) { + + /* Double the buffer if the bumpLen is zero. */ + int bump = (bumpLen > 0) ? bumpLen : buf.length; + + byte[] toBuf = new byte[buf.length + needed + bump]; + + System.arraycopy(buf, 0, toBuf, 0, len); + + buf = toBuf; + } +} diff --git a/src/com/sleepycat/util/IOExceptionWrapper.java b/src/com/sleepycat/util/IOExceptionWrapper.java new file mode 100644 index 0000000..d355f5c --- /dev/null +++ b/src/com/sleepycat/util/IOExceptionWrapper.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +import java.io.IOException; + +/** + * An IOException that can contain nested exceptions. + * + * @author Mark Hayes + */ +public class IOExceptionWrapper + extends IOException implements ExceptionWrapper { + + private static final long serialVersionUID = 753416466L; + + private Throwable e; + + public IOExceptionWrapper(Throwable e) { + + super(e.getMessage()); + this.e = e; + } + + /** + * @deprecated replaced by {@link #getCause}. + */ + public Throwable getDetail() { + + return e; + } + + @Override + public Throwable getCause() { + + return e; + } +} diff --git a/src/com/sleepycat/util/PackedInteger.java b/src/com/sleepycat/util/PackedInteger.java new file mode 100644 index 0000000..74ca710 --- /dev/null +++ b/src/com/sleepycat/util/PackedInteger.java @@ -0,0 +1,1072 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +/** + * Static methods for reading and writing packed integers. + * + *

        Most applications should use the classes in the {@link + * com.sleepycat.bind.tuple} package rather than using this class directly.

        + * + * @see Integer Formats + */ +public class PackedInteger { + + /** + * The maximum number of bytes needed to store an int value (5). + */ + public static final int MAX_LENGTH = 5; + + /** + * The maximum number of bytes needed to store a long value (9). + */ + public static final int MAX_LONG_LENGTH = 9; + + /** + * Reads a packed integer at the given buffer offset and returns it. + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the integer that was read. + */ + public static int readInt(byte[] buf, int off) { + + boolean negative; + int byteLen; + + int b1 = buf[off++]; + if (b1 < -119) { + negative = true; + byteLen = -b1 - 119; + } else if (b1 > 119) { + negative = false; + byteLen = b1 - 119; + } else { + return b1; + } + + int value = buf[off++] & 0xFF; + if (byteLen > 1) { + value |= (buf[off++] & 0xFF) << 8; + if (byteLen > 2) { + value |= (buf[off++] & 0xFF) << 16; + if (byteLen > 3) { + value |= (buf[off++] & 0xFF) << 24; + } + } + } + + return negative ? (-value - 119) : (value + 119); + } + + /** + * Reads a packed long integer at the given buffer offset and returns it. + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the long integer that was read. + */ + public static long readLong(byte[] buf, int off) { + + boolean negative; + int byteLen; + + int b1 = buf[off++]; + if (b1 < -119) { + negative = true; + byteLen = -b1 - 119; + } else if (b1 > 119) { + negative = false; + byteLen = b1 - 119; + } else { + return b1; + } + + long value = buf[off++] & 0xFFL; + if (byteLen > 1) { + value |= (buf[off++] & 0xFFL) << 8; + if (byteLen > 2) { + value |= (buf[off++] & 0xFFL) << 16; + if (byteLen > 3) { + value |= (buf[off++] & 0xFFL) << 24; + if (byteLen > 4) { + value |= (buf[off++] & 0xFFL) << 32; + if (byteLen > 5) { + value |= (buf[off++] & 0xFFL) << 40; + if (byteLen > 6) { + value |= (buf[off++] & 0xFFL) << 48; + if (byteLen > 7) { + value |= (buf[off++] & 0xFFL) << 56; + } + } + } + } + } + } + } + + return negative ? (-value - 119) : (value + 119); + } + + /** + * Returns the number of bytes that would be read by {@link #readInt}. + * + *

        Because the length is stored in the first byte, this method may be + * called with only the first byte of the packed integer in the given + * buffer. This method only accesses one byte at the given offset.

        + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the number of bytes that would be read. + */ + public static int getReadIntLength(byte[] buf, int off) { + + int b1 = buf[off]; + if (b1 < -119) { + return -b1 - 119 + 1; + } else if (b1 > 119) { + return b1 - 119 + 1; + } else { + return 1; + } + } + + /** + * Returns the number of bytes that would be read by {@link #readLong}. + * + *

        Because the length is stored in the first byte, this method may be + * called with only the first byte of the packed integer in the given + * buffer. This method only accesses one byte at the given offset.

        + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the number of bytes that would be read. + */ + public static int getReadLongLength(byte[] buf, int off) { + + /* The length is stored in the same way for int and long. */ + return getReadIntLength(buf, off); + } + + /** + * Writes a packed integer starting at the given buffer offset and returns + * the next offset to be written. + * + * @param buf the buffer to write to. + * + * @param offset the offset in the buffer at which to start writing. + * + * @param value the integer to be written. + * + * @return the offset past the bytes written. + */ + public static int writeInt(byte[] buf, int offset, int value) { + + int byte1Off = offset; + boolean negative; + + if (value < -119) { + negative = true; + value = -value - 119; + } else if (value > 119) { + negative = false; + value = value - 119; + } else { + buf[offset++] = (byte) value; + return offset; + } + offset++; + + buf[offset++] = (byte) value; + if ((value & 0xFFFFFF00) == 0) { + buf[byte1Off] = negative ? (byte) -120 : (byte) 120; + return offset; + } + + buf[offset++] = (byte) (value >>> 8); + if ((value & 0xFFFF0000) == 0) { + buf[byte1Off] = negative ? (byte) -121 : (byte) 121; + return offset; + } + + buf[offset++] = (byte) (value >>> 16); + if ((value & 0xFF000000) == 0) { + buf[byte1Off] = negative ? (byte) -122 : (byte) 122; + return offset; + } + + buf[offset++] = (byte) (value >>> 24); + buf[byte1Off] = negative ? (byte) -123 : (byte) 123; + return offset; + } + + /** + * Writes a packed long integer starting at the given buffer offset and + * returns the next offset to be written. + * + * @param buf the buffer to write to. + * + * @param offset the offset in the buffer at which to start writing. + * + * @param value the long integer to be written. + * + * @return the offset past the bytes written. + */ + public static int writeLong(byte[] buf, int offset, long value) { + + int byte1Off = offset; + boolean negative; + + if (value < -119) { + negative = true; + value = -value - 119; + } else if (value > 119) { + negative = false; + value = value - 119; + } else { + buf[offset++] = (byte) value; + return offset; + } + offset++; + + buf[offset++] = (byte) value; + if ((value & 0xFFFFFFFFFFFFFF00L) == 0) { + buf[byte1Off] = negative ? (byte) -120 : (byte) 120; + return offset; + } + + buf[offset++] = (byte) (value >>> 8); + if ((value & 0xFFFFFFFFFFFF0000L) == 0) { + buf[byte1Off] = negative ? (byte) -121 : (byte) 121; + return offset; + } + + buf[offset++] = (byte) (value >>> 16); + if ((value & 0xFFFFFFFFFF000000L) == 0) { + buf[byte1Off] = negative ? (byte) -122 : (byte) 122; + return offset; + } + + buf[offset++] = (byte) (value >>> 24); + if ((value & 0xFFFFFFFF00000000L) == 0) { + buf[byte1Off] = negative ? (byte) -123 : (byte) 123; + return offset; + } + + buf[offset++] = (byte) (value >>> 32); + if ((value & 0xFFFFFF0000000000L) == 0) { + buf[byte1Off] = negative ? (byte) -124 : (byte) 124; + return offset; + } + + buf[offset++] = (byte) (value >>> 40); + if ((value & 0xFFFF000000000000L) == 0) { + buf[byte1Off] = negative ? (byte) -125 : (byte) 125; + return offset; + } + + buf[offset++] = (byte) (value >>> 48); + if ((value & 0xFF00000000000000L) == 0) { + buf[byte1Off] = negative ? (byte) -126 : (byte) 126; + return offset; + } + + buf[offset++] = (byte) (value >>> 56); + buf[byte1Off] = negative ? (byte) -127 : (byte) 127; + return offset; + } + + /** + * Returns the number of bytes that would be written by {@link #writeInt}. + * + * @param value the integer to be written. + * + * @return the number of bytes that would be used to write the given + * integer. + */ + public static int getWriteIntLength(int value) { + + if (value < -119) { + value = -value - 119; + } else if (value > 119) { + value = value - 119; + } else { + return 1; + } + + if ((value & 0xFFFFFF00) == 0) { + return 2; + } + if ((value & 0xFFFF0000) == 0) { + return 3; + } + if ((value & 0xFF000000) == 0) { + return 4; + } + return 5; + } + + /** + * Returns the number of bytes that would be written by {@link #writeLong}. + * + * @param value the long integer to be written. + * + * @return the number of bytes that would be used to write the given long + * integer. + */ + public static int getWriteLongLength(long value) { + + if (value < -119) { + value = -value - 119; + } else if (value > 119) { + value = value - 119; + } else { + return 1; + } + + if ((value & 0xFFFFFFFFFFFFFF00L) == 0) { + return 2; + } + if ((value & 0xFFFFFFFFFFFF0000L) == 0) { + return 3; + } + if ((value & 0xFFFFFFFFFF000000L) == 0) { + return 4; + } + if ((value & 0xFFFFFFFF00000000L) == 0) { + return 5; + } + if ((value & 0xFFFFFF0000000000L) == 0) { + return 6; + } + if ((value & 0xFFFF000000000000L) == 0) { + return 7; + } + if ((value & 0xFF00000000000000L) == 0) { + return 8; + } + return 9; + } + + /** + * Reads a sorted packed integer at the given buffer offset and returns it. + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the integer that was read. + */ + public static int readSortedInt(byte[] buf, int off) { + + int byteLen; + boolean negative; + + /* The first byte of the buf stores the length of the value part. */ + int b1 = buf[off++] & 0xff; + /* Adjust the byteLen to the real length of the value part. */ + if (b1 < 0x08) { + byteLen = 0x08 - b1; + negative = true; + } else if (b1 > 0xf7) { + byteLen = b1 - 0xf7; + negative = false; + } else { + return b1 - 127; + } + + /* + * The following bytes on the buf store the value as a big endian + * integer. We extract the significant bytes from the buf and put them + * into the value in big endian order. + */ + int value; + if (negative) { + value = 0xFFFFFFFF; + } else { + value = 0; + } + if (byteLen > 3) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 2) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 1) { + value = (value << 8) | (buf[off++] & 0xFF); + } + value = (value << 8) | (buf[off++] & 0xFF); + + /* + * After get the adjusted value, we have to adjust it back to the + * original value. + */ + if (negative) { + value -= 119; + } else { + value += 121; + } + return value; + } + + /** + * Reads a sorted packed long integer at the given buffer offset and + * returns it. + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the long integer that was read. + */ + public static long readSortedLong(byte[] buf, int off) { + + int byteLen; + boolean negative; + + /* The first byte of the buf stores the length of the value part. */ + int b1 = buf[off++] & 0xff; + /* Adjust the byteLen to the real length of the value part. */ + if (b1 < 0x08) { + byteLen = 0x08 - b1; + negative = true; + } else if (b1 > 0xf7) { + byteLen = b1 - 0xf7; + negative = false; + } else { + return b1 - 127; + } + + /* + * The following bytes on the buf store the value as a big endian + * integer. We extract the significant bytes from the buf and put them + * into the value in big endian order. + */ + long value; + if (negative) { + value = 0xFFFFFFFFFFFFFFFFL; + } else { + value = 0; + } + if (byteLen > 7) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 6) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 5) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 4) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 3) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 2) { + value = (value << 8) | (buf[off++] & 0xFF); + } + if (byteLen > 1) { + value = (value << 8) | (buf[off++] & 0xFF); + } + value = (value << 8) | (buf[off++] & 0xFF); + + /* + * After obtaining the adjusted value, we have to adjust it back to the + * original value. + */ + if (negative) { + value -= 119; + } else { + value += 121; + } + return value; + } + + /** + * Returns the number of bytes that would be read by {@link + * #readSortedInt}. + * + *

        Because the length is stored in the first byte, this method may be + * called with only the first byte of the packed integer in the given + * buffer. This method only accesses one byte at the given offset.

        + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the number of bytes that would be read. + */ + public static int getReadSortedIntLength(byte[] buf, int off) { + + /* The first byte of the buf stores the length of the value part. */ + int b1 = buf[off] & 0xff; + if (b1 < 0x08) { + return 1 + 0x08 - b1; + } + if (b1 > 0xf7) { + return 1 + b1 - 0xf7; + } + return 1; + } + + /** + * Returns the number of bytes that would be read by {@link + * #readSortedLong}. + * + *

        Because the length is stored in the first byte, this method may be + * called with only the first byte of the packed integer in the given + * buffer. This method only accesses one byte at the given offset.

        + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading. + * + * @return the number of bytes that would be read. + */ + public static int getReadSortedLongLength(byte[] buf, int off) { + + /* The length is stored in the same way for int and long. */ + return getReadSortedIntLength(buf, off); + } + + /** + * Writes a packed sorted integer starting at the given buffer offset and + * returns the next offset to be written. + * + * @param buf the buffer to write to. + * + * @param offset the offset in the buffer at which to start writing. + * + * @param value the integer to be written. + * + * @return the offset past the bytes written. + */ + public static int writeSortedInt(byte[] buf, int offset, int value) { + + /* + * Values in the inclusive range [-119,120] are stored in a single + * byte. For values outside that range, the first byte stores the + * number of additional bytes. The additional bytes store + * (value + 119 for negative and value - 121 for positive) as an + * unsigned big endian integer. + */ + int byte1Off = offset; + offset++; + if (value < -119) { + + /* + * If the value < -119, then first adjust the value by adding 119. + * Then the adjusted value is stored as an unsigned big endian + * integer. + */ + value += 119; + + /* + * Store the adjusted value as an unsigned big endian integer. + * For an negative integer, from left to right, the first + * significant byte is the byte which is not equal to 0xFF. Also + * please note that, because the adjusted value is stored in big + * endian integer, we extract the significant byte from left to + * right. + * + * In the left to right order, if the first byte of the adjusted + * value is a significant byte, it will be stored in the 2nd byte + * of the buf. Then we will look at the 2nd byte of the adjusted + * value to see if this byte is the significant byte, if yes, this + * byte will be stored in the 3rd byte of the buf, and the like. + */ + if ((value | 0x00FFFFFF) != 0xFFFFFFFF) { + buf[offset++] = (byte) (value >> 24); + } + if ((value | 0x0000FFFF) != 0xFFFFFFFF) { + buf[offset++] = (byte) (value >> 16); + } + if ((value | 0x000000FF) != 0xFFFFFFFF) { + buf[offset++] = (byte) (value >> 8); + } + buf[offset++] = (byte) value; + + /* + * valueLen is the length of the value part stored in buf. Because + * the first byte of buf is used to stored the length, we need + * to subtract one. + */ + int valueLen = offset - byte1Off - 1; + + /* + * The first byte stores the number of additional bytes. Here we + * store the result of 0x08 - valueLen, rather than directly store + * valueLen. The reason is to implement natural sort order for + * byte-by-byte comparison. + */ + buf[byte1Off] = (byte) (0x08 - valueLen); + } else if (value > 120) { + + /* + * If the value > 120, then first adjust the value by subtracting + * 121. Then the adjusted value is stored as an unsigned big endian + * integer. + */ + value -= 121; + + /* + * Store the adjusted value as an unsigned big endian integer. + * For a positive integer, from left to right, the first + * significant byte is the byte which is not equal to 0x00. + * + * In the left to right order, if the first byte of the adjusted + * value is a significant byte, it will be stored in the 2nd byte + * of the buf. Then we will look at the 2nd byte of the adjusted + * value to see if this byte is the significant byte, if yes, this + * byte will be stored in the 3rd byte of the buf, and the like. + */ + if ((value & 0xFF000000) != 0) { + buf[offset++] = (byte) (value >> 24); + } + if ((value & 0xFFFF0000) != 0) { + buf[offset++] = (byte) (value >> 16); + } + if ((value & 0xFFFFFF00) != 0) { + buf[offset++] = (byte) (value >> 8); + } + buf[offset++] = (byte) value; + + /* + * valueLen is the length of the value part stored in buf. Because + * the first byte of buf is used to stored the length, we need to + * subtract one. + */ + int valueLen = offset - byte1Off - 1; + + /* + * The first byte stores the number of additional bytes. Here we + * store the result of 0xF7 + valueLen, rather than directly store + * valueLen. The reason is to implement natural sort order for + * byte-by-byte comparison. + */ + buf[byte1Off] = (byte) (0xF7 + valueLen); + } else { + + /* + * If -119 <= value <= 120, only one byte is needed to store the + * value. The stored value is the original value plus 127. + */ + buf[byte1Off] = (byte) (value + 127); + } + return offset; + } + + /** + * Writes a packed sorted long integer starting at the given buffer offset + * and returns the next offset to be written. + * + * @param buf the buffer to write to. + * + * @param offset the offset in the buffer at which to start writing. + * + * @param value the long integer to be written. + * + * @return the offset past the bytes written. + */ + public static int writeSortedLong(byte[] buf, int offset, long value) { + + /* + * Values in the inclusive range [-119,120] are stored in a single + * byte. For values outside that range, the first byte stores the + * number of additional bytes. The additional bytes store + * (value + 119 for negative and value - 121 for positive) as an + * unsigned big endian integer. + */ + int byte1Off = offset; + offset++; + if (value < -119) { + + /* + * If the value < -119, then first adjust the value by adding 119. + * Then the adjusted value is stored as an unsigned big endian + * integer. + */ + value += 119; + + /* + * Store the adjusted value as an unsigned big endian integer. + * For an negative integer, from left to right, the first + * significant byte is the byte which is not equal to 0xFF. Also + * please note that, because the adjusted value is stored in big + * endian integer, we extract the significant byte from left to + * right. + * + * In the left to right order, if the first byte of the adjusted + * value is a significant byte, it will be stored in the 2nd byte + * of the buf. Then we will look at the 2nd byte of the adjusted + * value to see if this byte is the significant byte, if yes, this + * byte will be stored in the 3rd byte of the buf, and the like. + */ + if ((value | 0x00FFFFFFFFFFFFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 56); + } + if ((value | 0x0000FFFFFFFFFFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 48); + } + if ((value | 0x000000FFFFFFFFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 40); + } + if ((value | 0x00000000FFFFFFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 32); + } + if ((value | 0x0000000000FFFFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 24); + } + if ((value | 0x000000000000FFFFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 16); + } + if ((value | 0x00000000000000FFL) != 0xFFFFFFFFFFFFFFFFL) { + buf[offset++] = (byte) (value >> 8); + } + buf[offset++] = (byte) value; + + /* + * valueLen is the length of the value part stored in buf. Because + * the first byte of buf is used to stored the length, so we need + * to minus one. + */ + int valueLen = offset - byte1Off - 1; + + /* + * The first byte stores the number of additional bytes. Here we + * store the result of 0x08 - valueLen, rather than directly store + * valueLen. The reason is to implement nature sort order for + * byte-by-byte comparison. + */ + buf[byte1Off] = (byte) (0x08 - valueLen); + } else if (value > 120) { + + /* + * If the value > 120, then first adjust the value by subtracting + * 119. Then the adjusted value is stored as an unsigned big endian + * integer. + */ + value -= 121; + + /* + * Store the adjusted value as an unsigned big endian integer. + * For a positive integer, from left to right, the first + * significant byte is the byte which is not equal to 0x00. + * + * In the left to right order, if the first byte of the adjusted + * value is a significant byte, it will be stored in the 2nd byte + * of the buf. Then we will look at the 2nd byte of the adjusted + * value to see if this byte is the significant byte, if yes, this + * byte will be stored in the 3rd byte of the buf, and the like. + */ + if ((value & 0xFF00000000000000L) != 0L) { + buf[offset++] = (byte) (value >> 56); + } + if ((value & 0xFFFF000000000000L) != 0L) { + buf[offset++] = (byte) (value >> 48); + } + if ((value & 0xFFFFFF0000000000L) != 0L) { + buf[offset++] = (byte) (value >> 40); + } + if ((value & 0xFFFFFFFF00000000L) != 0L) { + buf[offset++] = (byte) (value >> 32); + } + if ((value & 0xFFFFFFFFFF000000L) != 0L) { + buf[offset++] = (byte) (value >> 24); + } + if ((value & 0xFFFFFFFFFFFF0000L) != 0L) { + buf[offset++] = (byte) (value >> 16); + } + if ((value & 0xFFFFFFFFFFFFFF00L) != 0L) { + buf[offset++] = (byte) (value >> 8); + } + buf[offset++] = (byte) value; + + /* + * valueLen is the length of the value part stored in buf. Because + * the first byte of buf is used to stored the length, so we need + * to minus one. + */ + int valueLen = offset - byte1Off - 1; + + /* + * The first byte stores the number of additional bytes. Here we + * store the result of 0xF7 + valueLen, rather than directly store + * valueLen. The reason is to implement nature sort order for + * byte-by-byte comparison. + */ + buf[byte1Off] = (byte) (0xF7 + valueLen); + } else { + + /* + * If -119 <= value <= 120, only one byte is needed to store the + * value. The stored value is the original value adds 127. + */ + buf[byte1Off] = (byte) (value + 127); + } + return offset; + } + + /** + * Returns the number of bytes that would be written by {@link + * #writeSortedInt}. + * + * @param value the integer to be written. + * + * @return the number of bytes that would be used to write the given + * integer. + */ + public static int getWriteSortedIntLength(int value) { + + if (value < -119) { + /* Adjust the value. */ + value += 119; + + /* + * Find the left most significant byte of the adjusted value, and + * return the length accordingly. + */ + if ((value | 0x000000FF) == 0xFFFFFFFF) { + return 2; + } + if ((value | 0x0000FFFF) == 0xFFFFFFFF) { + return 3; + } + if ((value | 0x00FFFFFF) == 0xFFFFFFFF) { + return 4; + } + } else if (value > 120) { + /* Adjust the value. */ + value -= 121; + + /* + * Find the left most significant byte of the adjusted value, and + * return the length accordingly. + */ + if ((value & 0xFFFFFF00) == 0) { + return 2; + } + if ((value & 0xFFFF0000) == 0) { + return 3; + } + if ((value & 0xFF000000) == 0) { + return 4; + } + } else { + + /* + * If -119 <= value <= 120, only one byte is needed to store the + * value. + */ + return 1; + } + return 5; + } + + /** + * Returns the number of bytes that would be written by {@link + * #writeSortedLong}. + * + * @param value the long integer to be written. + * + * @return the number of bytes that would be used to write the given long + * integer. + */ + public static int getWriteSortedLongLength(long value) { + + if (value < -119) { + /* Adjust the value. */ + value += 119; + + /* + * Find the left most significant byte of the adjusted value, and + * return the length accordingly. + */ + if ((value | 0x00000000000000FFL) == 0xFFFFFFFFFFFFFFFFL) { + return 2; + } + if ((value | 0x000000000000FFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 3; + } + if ((value | 0x0000000000FFFFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 4; + } + if ((value | 0x00000000FFFFFFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 5; + } + if ((value | 0x000000FFFFFFFFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 6; + } + if ((value | 0x0000FFFFFFFFFFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 7; + } + if ((value | 0x00FFFFFFFFFFFFFFL) == 0xFFFFFFFFFFFFFFFFL) { + return 8; + } + } else if (value > 120) { + /* Adjust the value. */ + value -= 121; + + /* + * Find the left most significant byte of the adjusted value, and + * return the length accordingly. + */ + if ((value & 0xFFFFFFFFFFFFFF00L) == 0L) { + return 2; + } + if ((value & 0xFFFFFFFFFFFF0000L) == 0L) { + return 3; + } + if ((value & 0xFFFFFFFFFF000000L) == 0L) { + return 4; + } + if ((value & 0xFFFFFFFF00000000L) == 0L) { + return 5; + } + if ((value & 0xFFFFFF0000000000L) == 0L) { + return 6; + } + if ((value & 0xFFFF000000000000L) == 0L) { + return 7; + } + if ((value & 0xFF00000000000000L) == 0L) { + return 8; + } + } else { + + /* + * If -119 <= value <= 120, only one byte is needed to store the + * value. + */ + return 1; + } + return 9; + } + + /* */ + + /** + * @hidden + * Reads a reverse-packed integer ending at the given buffer offset and + * returns it. + * + * To get the length of a reverse-packed integer before reading, call + * {@link #getReadIntLength} passing the offset to the last byte. + * + * @param buf the buffer to read from. + * + * @param off the offset in the buffer at which to start reading, which is + * the index of the last byte of the integer in the buffer. + * + * @return the integer that was read. + */ + public static int readReverseInt(byte[] buf, int off) { + + boolean negative; + int byteLen; + + int b1 = buf[off]; + if (b1 < -119) { + negative = true; + byteLen = -b1 - 119; + } else if (b1 > 119) { + negative = false; + byteLen = b1 - 119; + } else { + return b1; + } + + int value = buf[--off] & 0xFF; + if (byteLen > 1) { + value |= (buf[--off] & 0xFF) << 8; + if (byteLen > 2) { + value |= (buf[--off] & 0xFF) << 16; + if (byteLen > 3) { + value |= (buf[--off] & 0xFF) << 24; + } + } + } + + return negative ? (-value - 119) : (value + 119); + } + + /** + * @hidden + * Writes a reverse-packed integer starting at the given buffer offset and + * returns the next offset to be written. + * + * To get the length of a reverse-packed integer before writing, call + * {@link #getWriteIntLength}. + * + * @param buf the buffer to write to. + * + * @param off the offset in the buffer at which to start writing, which + * will be the index of the first byte of the integer in the buffer. + * + * @param value the integer to be written. + * + * @return the offset past the bytes written. + */ + public static int writeReverseInt(byte[] buf, int off, int value) { + + final boolean negative; + + final int nextOff = off + getWriteIntLength(value); + if (value < -119) { + negative = true; + value = -value - 119; + } else if (value > 119) { + negative = false; + value = value - 119; + } else { + buf[off] = (byte) value; + return off + 1; + } + final int byte1Off = nextOff - 1; + off = byte1Off; + + buf[--off] = (byte) value; + if ((value & 0xFFFFFF00) == 0) { + buf[byte1Off] = negative ? (byte) -120 : (byte) 120; + return nextOff; + } + + buf[--off] = (byte) (value >>> 8); + if ((value & 0xFFFF0000) == 0) { + buf[byte1Off] = negative ? (byte) -121 : (byte) 121; + return nextOff; + } + + buf[--off] = (byte) (value >>> 16); + if ((value & 0xFF000000) == 0) { + buf[byte1Off] = negative ? (byte) -122 : (byte) 122; + return nextOff; + } + + buf[--off] = (byte) (value >>> 24); + buf[byte1Off] = negative ? (byte) -123 : (byte) 123; + return nextOff; + } + + /* */ +} diff --git a/src/com/sleepycat/util/RuntimeExceptionWrapper.java b/src/com/sleepycat/util/RuntimeExceptionWrapper.java new file mode 100644 index 0000000..59ace18 --- /dev/null +++ b/src/com/sleepycat/util/RuntimeExceptionWrapper.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +/** + * A RuntimeException that can contain nested exceptions. + * + * @author Mark Hayes + */ +public class RuntimeExceptionWrapper extends RuntimeException + implements ExceptionWrapper { + + /** + * Wraps the given exception if it is not a {@code RuntimeException}. + * + * @param e any exception. + * + * @return {@code e} if it is a {@code RuntimeException}, otherwise a + * {@code RuntimeExceptionWrapper} for {@code e}. + */ + public static RuntimeException wrapIfNeeded(Throwable e) { + if (e instanceof RuntimeException) { + return (RuntimeException) e; + } + return new RuntimeExceptionWrapper(e); + } + + private static final long serialVersionUID = 1106961350L; + + public RuntimeExceptionWrapper(Throwable e) { + + super(e); + } + + /** + * @deprecated replaced by {@link #getCause}. + */ + public Throwable getDetail() { + + return getCause(); + } +} diff --git a/src/com/sleepycat/util/UtfOps.java b/src/com/sleepycat/util/UtfOps.java new file mode 100644 index 0000000..4440fcd --- /dev/null +++ b/src/com/sleepycat/util/UtfOps.java @@ -0,0 +1,289 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util; + +/** + * UTF operations with more flexibility than is provided by DataInput and + * DataOutput. + * + * @author Mark Hayes + */ +public class UtfOps { + + private static byte[] EMPTY_BYTES = {}; + private static String EMPTY_STRING = ""; + + /** + * Returns the byte length of a null terminated UTF string, not including + * the terminator. + * + * @param bytes the data containing the UTF string. + * + * @param offset the beginning of the string the measure. + * + * @throws IndexOutOfBoundsException if no zero terminator is found. + * + * @return the number of bytes. + */ + public static int getZeroTerminatedByteLength(byte[] bytes, int offset) + throws IndexOutOfBoundsException { + + int len = 0; + while (bytes[offset++] != 0) { + len++; + } + return len; + } + + /** + * Returns the byte length of the UTF string that would be created by + * converting the given characters to UTF. + * + * @param chars the characters that would be converted. + * + * @return the byte length of the equivalent UTF data. + */ + public static int getByteLength(char[] chars) { + + return getByteLength(chars, 0, chars.length); + } + + /** + * Returns the byte length of the UTF string that would be created by + * converting the given characters to UTF. + * + * @param chars the characters that would be converted. + * + * @param offset the first character to be converted. + * + * @param length the number of characters to be converted. + * + * @return the byte length of the equivalent UTF data. + */ + public static int getByteLength(char[] chars, int offset, int length) { + + int len = 0; + length += offset; + for (int i = offset; i < length; i++) { + int c = chars[i]; + if ((c >= 0x0001) && (c <= 0x007F)) { + len++; + } else if (c > 0x07FF) { + len += 3; + } else { + len += 2; + } + } + return len; + } + + /** + * Returns the number of characters represented by the given UTF string. + * + * @param bytes the UTF string. + * + * @return the number of characters. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int getCharLength(byte[] bytes) + throws IllegalArgumentException, IndexOutOfBoundsException { + + return getCharLength(bytes, 0, bytes.length); + } + + /** + * Returns the number of characters represented by the given UTF string. + * + * @param bytes the data containing the UTF string. + * + * @param offset the first byte to be converted. + * + * @param length the number of byte to be converted. + * + * @return the number of characters. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int getCharLength(byte[] bytes, int offset, int length) + throws IllegalArgumentException, IndexOutOfBoundsException { + + int charCount = 0; + length += offset; + while (offset < length) { + switch ((bytes[offset] & 0xff) >> 4) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + offset++; + break; + case 12: case 13: + offset += 2; + break; + case 14: + offset += 3; + break; + default: + throw new IllegalArgumentException(); + } + charCount++; + } + return charCount; + } + + /** + * Converts byte arrays into character arrays. + * + * @param bytes the source byte data to convert + * + * @param byteOffset the offset into the byte array at which + * to start the conversion + * + * @param chars the destination array + * + * @param charOffset the offset into chars at which to begin the copy + * + * @param len the amount of information to copy into chars + * + * @param isByteLen if true then len is a measure of bytes, otherwise + * len is a measure of characters + * + * @return the byte offset after converting the bytes. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static int bytesToChars(byte[] bytes, int byteOffset, + char[] chars, int charOffset, + int len, boolean isByteLen) + throws IllegalArgumentException, IndexOutOfBoundsException { + + int char1, char2, char3; + len += isByteLen ? byteOffset : charOffset; + while ((isByteLen ? byteOffset : charOffset) < len) { + char1 = bytes[byteOffset++] & 0xff; + switch ((char1 & 0xff) >> 4) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + chars[charOffset++] = (char) char1; + break; + case 12: case 13: + char2 = bytes[byteOffset++]; + if ((char2 & 0xC0) != 0x80) { + throw new IllegalArgumentException(); + } + chars[charOffset++] = (char)(((char1 & 0x1F) << 6) | + (char2 & 0x3F)); + break; + case 14: + char2 = bytes[byteOffset++]; + char3 = bytes[byteOffset++]; + if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) + throw new IllegalArgumentException(); + chars[charOffset++] = (char)(((char1 & 0x0F) << 12) | + ((char2 & 0x3F) << 6) | + ((char3 & 0x3F) << 0)); + break; + default: + throw new IllegalArgumentException(); + } + } + return byteOffset; + } + + /** + * Converts character arrays into byte arrays. + * + * @param chars the source character data to convert + * + * @param charOffset the offset into the character array at which + * to start the conversion + * + * @param bytes the destination array + * + * @param byteOffset the offset into bytes at which to begin the copy + * + * @param charLength the length of characters to copy into bytes + */ + public static void charsToBytes(char[] chars, int charOffset, + byte[] bytes, int byteOffset, + int charLength) { + charLength += charOffset; + for (int i = charOffset; i < charLength; i++) { + int c = chars[i]; + if ((c >= 0x0001) && (c <= 0x007F)) { + bytes[byteOffset++] = (byte) c; + } else if (c > 0x07FF) { + bytes[byteOffset++] = (byte) (0xE0 | ((c >> 12) & 0x0F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 6) & 0x3F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } else { + bytes[byteOffset++] = (byte) (0xC0 | ((c >> 6) & 0x1F)); + bytes[byteOffset++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } + } + } + + /** + * Converts byte arrays into strings. + * + * @param bytes the source byte data to convert + * + * @param offset the offset into the byte array at which + * to start the conversion + * + * @param length the number of bytes to be converted. + * + * @return the string. + * + * @throws IndexOutOfBoundsException if a UTF character sequence at the end + * of the data is not complete. + * + * @throws IllegalArgumentException if an illegal UTF sequence is + * encountered. + */ + public static String bytesToString(byte[] bytes, int offset, int length) + throws IllegalArgumentException, IndexOutOfBoundsException { + + if (length == 0) return EMPTY_STRING; + int charLen = UtfOps.getCharLength(bytes, offset, length); + char[] chars = new char[charLen]; + UtfOps.bytesToChars(bytes, offset, chars, 0, length, true); + return new String(chars, 0, charLen); + } + + /** + * Converts strings to byte arrays. + * + * @param string the string to convert. + * + * @return the UTF byte array. + */ + public static byte[] stringToBytes(String string) { + + if (string.length() == 0) return EMPTY_BYTES; + char[] chars = string.toCharArray(); + byte[] bytes = new byte[UtfOps.getByteLength(chars)]; + UtfOps.charsToBytes(chars, 0, bytes, 0, chars.length); + return bytes; + } +} diff --git a/src/com/sleepycat/util/keyrange/KeyRange.java b/src/com/sleepycat/util/keyrange/KeyRange.java new file mode 100644 index 0000000..49ac8c2 --- /dev/null +++ b/src/com/sleepycat/util/keyrange/KeyRange.java @@ -0,0 +1,356 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.keyrange; + +import java.util.Comparator; + +import com.sleepycat.je.DatabaseEntry; + +/** + * Encapsulates a key range for use with a RangeCursor. + */ +public class KeyRange { + + /* + * We can return the same byte[] for 0 length arrays. + */ + public static final byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; + + Comparator comparator; + DatabaseEntry beginKey; + DatabaseEntry endKey; + boolean singleKey; + boolean beginInclusive; + boolean endInclusive; + + /** + * Creates an unconstrained key range. + */ + public KeyRange(Comparator comparator) { + this.comparator = comparator; + } + + /** + * Creates a range for a single key. + */ + public KeyRange subRange(DatabaseEntry key) + throws KeyRangeException { + + if (!check(key)) { + throw new KeyRangeException("singleKey out of range"); + } + KeyRange range = new KeyRange(comparator); + range.beginKey = key; + range.endKey = key; + range.beginInclusive = true; + range.endInclusive = true; + range.singleKey = true; + return range; + } + + /** + * Creates a range that is the intersection of this range and the given + * range parameters. + */ + public KeyRange subRange(DatabaseEntry beginKey, boolean beginInclusive, + DatabaseEntry endKey, boolean endInclusive) + throws KeyRangeException { + + if (beginKey == null) { + beginKey = this.beginKey; + beginInclusive = this.beginInclusive; + } else if (!check(beginKey, beginInclusive)) { + throw new KeyRangeException("beginKey out of range"); + } + if (endKey == null) { + endKey = this.endKey; + endInclusive = this.endInclusive; + } else if (!check(endKey, endInclusive)) { + throw new KeyRangeException("endKey out of range"); + } + KeyRange range = new KeyRange(comparator); + range.beginKey = beginKey; + range.endKey = endKey; + range.beginInclusive = beginInclusive; + range.endInclusive = endInclusive; + return range; + } + + /** + * Returns whether this is a single-key range. + */ + public final boolean isSingleKey() { + return singleKey; + } + + /** + * Returns the key of a single-key range, or null if not a single-key + * range. + */ + public final DatabaseEntry getSingleKey() { + + return singleKey ? beginKey : null; + } + + /** + * Returns whether this range has a begin or end bound. + */ + public final boolean hasBound() { + + return endKey != null || beginKey != null; + } + + /** + * Formats this range as a string for debugging. + */ + @Override + public String toString() { + + return "[KeyRange " + beginKey + ' ' + beginInclusive + + endKey + ' ' + endInclusive + + (singleKey ? " single" : ""); + } + + /** + * Returns whether a given key is within range. + */ + public boolean check(DatabaseEntry key) { + + if (singleKey) { + return (compare(key, beginKey) == 0); + } else { + return checkBegin(key, true) && checkEnd(key, true); + } + } + + /** + * Returns whether a given key is within range. + */ + public boolean check(DatabaseEntry key, boolean inclusive) { + + if (singleKey) { + return (compare(key, beginKey) == 0); + } else { + return checkBegin(key, inclusive) && checkEnd(key, inclusive); + } + } + + /** + * Returns whether the given key is within range with respect to the + * beginning of the range. + * + *

        The inclusive parameter should be true for checking a key read from + * the database; this will require that the key is within range. When + * inclusive=false the key is allowed to be equal to the beginKey for the + * range; this is used for checking a new exclusive bound of a + * sub-range.

        + * + *

        Note that when inclusive=false and beginInclusive=true our check is + * not exactly correct because in theory we should allow the key to be "one + * less" than the existing bound; however, checking for "one less" is + * impossible so we do the best we can and test the bounds + * conservatively.

        + */ + public boolean checkBegin(DatabaseEntry key, boolean inclusive) { + + if (beginKey == null) { + return true; + } else if (!beginInclusive && inclusive) { + return compare(key, beginKey) > 0; + } else { + return compare(key, beginKey) >= 0; + } + } + + /** + * Returns whether the given key is within range with respect to the + * end of the range. See checkBegin for details. + */ + public boolean checkEnd(DatabaseEntry key, boolean inclusive) { + + if (endKey == null) { + return true; + } else if (!endInclusive && inclusive) { + return compare(key, endKey) < 0; + } else { + return compare(key, endKey) <= 0; + } + } + + /** + * Compares two keys, using the user comparator if there is one. + */ + public int compare(DatabaseEntry key1, DatabaseEntry key2) { + + if (comparator != null) { + return comparator.compare(getByteArray(key1), getByteArray(key2)); + } else { + return compareBytes + (key1.getData(), key1.getOffset(), key1.getSize(), + key2.getData(), key2.getOffset(), key2.getSize()); + + } + } + + /** + * Copies a byte array. + */ + public static byte[] copyBytes(byte[] bytes) { + + byte[] a = new byte[bytes.length]; + System.arraycopy(bytes, 0, a, 0, a.length); + return a; + } + + /** + * Compares two keys as unsigned byte arrays, which is the default + * comparison used by JE/DB. + */ + public static int compareBytes(byte[] data1, int offset1, int size1, + byte[] data2, int offset2, int size2) { + + for (int i = 0; i < size1 && i < size2; i++) { + + int b1 = 0xFF & data1[offset1 + i]; + int b2 = 0xFF & data2[offset2 + i]; + if (b1 < b2) + return -1; + else if (b1 > b2) + return 1; + } + + if (size1 < size2) + return -1; + else if (size1 > size2) + return 1; + else + return 0; + } + + /** + * Compares two byte arrays for equality. + */ + public static boolean equalBytes(byte[] data1, int offset1, int size1, + byte[] data2, int offset2, int size2) { + if (size1 != size2) { + return false; + } + for (int i = 0; i < size1; i += 1) { + if (data1[i + offset1] != data2[i + offset2]) { + return false; + } + } + return true; + } + + /** + * Returns a copy of an entry. + */ + public static DatabaseEntry copy(DatabaseEntry from) { + return new DatabaseEntry(getByteArray(from)); + } + + /** + * Copies one entry to another. + */ + public static void copy(DatabaseEntry from, DatabaseEntry to) { + to.setData(getByteArray(from)); + to.setOffset(0); + } + + /** + * Returns an entry's byte array, copying it if the entry offset is + * non-zero. + */ + public static byte[] getByteArray(DatabaseEntry entry) { + return getByteArrayInternal(entry, Integer.MAX_VALUE); + } + + public static byte[] getByteArray(DatabaseEntry entry, int maxBytes) { + return getByteArrayInternal(entry, maxBytes); + } + + private static byte[] getByteArrayInternal(DatabaseEntry entry, + int maxBytes) { + + byte[] bytes = entry.getData(); + if (bytes == null) return null; + int size = Math.min(entry.getSize(), maxBytes); + byte[] data; + if (size == 0) { + data = ZERO_LENGTH_BYTE_ARRAY; + } else { + data = new byte[size]; + System.arraycopy(bytes, entry.getOffset(), data, 0, size); + } + return data; + } + + /** + * Returns the two DatabaseEntry objects have the same data value. + */ + public static boolean equalBytes(DatabaseEntry e1, DatabaseEntry e2) { + + if (e1 == null && e2 == null) { + return true; + } + if (e1 == null || e2 == null) { + return false; + } + + byte[] d1 = e1.getData(); + byte[] d2 = e2.getData(); + int s1 = e1.getSize(); + int s2 = e2.getSize(); + int o1 = e1.getOffset(); + int o2 = e2.getOffset(); + + if (d1 == null && d2 == null) { + return true; + } + if (d1 == null || d2 == null) { + return false; + } + if (s1 != s2) { + return false; + } + for (int i = 0; i < s1; i += 1) { + if (d1[o1 + i] != d2[o2 + i]) { + return false; + } + } + return true; + } + + /** + * Converts the byte array of this thang to space-separated integers, + * and suffixed by the record number if applicable. + * + * @param dbt the thang to convert. + * + * @return the resulting string. + */ + public static String toString(DatabaseEntry dbt) { + + int len = dbt.getOffset() + dbt.getSize(); + StringBuilder buf = new StringBuilder(len * 2); + byte[] data = dbt.getData(); + for (int i = dbt.getOffset(); i < len; i++) { + String num = Integer.toHexString(data[i]); + if (num.length() < 2) buf.append('0'); + buf.append(num); + } + return buf.toString(); + } +} diff --git a/src/com/sleepycat/util/keyrange/KeyRangeException.java b/src/com/sleepycat/util/keyrange/KeyRangeException.java new file mode 100644 index 0000000..502ac72 --- /dev/null +++ b/src/com/sleepycat/util/keyrange/KeyRangeException.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.keyrange; + +/** + * An exception thrown when a key is out of range. + * + * @author Mark Hayes + */ +public class KeyRangeException extends IllegalArgumentException { + + private static final long serialVersionUID = 1048575489L; + + /** + * Creates a key range exception. + */ + public KeyRangeException(String msg) { + + super(msg); + } +} diff --git a/src/com/sleepycat/util/keyrange/RangeCursor.java b/src/com/sleepycat/util/keyrange/RangeCursor.java new file mode 100644 index 0000000..f872f9c --- /dev/null +++ b/src/com/sleepycat/util/keyrange/RangeCursor.java @@ -0,0 +1,1496 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.keyrange; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.compat.DbCompat.OpReadOptions; +import com.sleepycat.compat.DbCompat.OpResult; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.Get; +/* */ +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryCursor; + +/** + * A cursor-like interface that enforces a key range. The method signatures + * are actually those of SecondaryCursor, but the pKey parameter may be null. + * It was done this way to avoid doubling the number of methods. + * + *

        This is not a fully general implementation of a range cursor and should + * not be used directly by applications; however, it may evolve into a + * generally useful range cursor some day.

        + * + * @author Mark Hayes + */ +public class RangeCursor implements Cloneable { + + /** + * The cursor and secondary cursor are the same object. The secCursor is + * null if the database is not a secondary database. + */ + private Cursor cursor; + private SecondaryCursor secCursor; + + /** + * The range is always non-null, but may be unbounded meaning that it is + * open and not used. + */ + private KeyRange range; + + /** + * The pkRange may be non-null only if the range is a single-key range + * and the cursor is a secondary cursor. It further restricts the range of + * primary keys in a secondary database. + */ + private KeyRange pkRange; + + /** + * If the DB supported sorted duplicates, then calling + * Cursor.getSearchBothRange is allowed. + */ + private boolean sortedDups; + + /** + * The privXxx entries are used only when the range is bounded. We read + * into these private entries to avoid modifying the caller's entry + * parameters in the case where we read successfully but the key is out of + * range. In that case we return NOTFOUND and we want to leave the entry + * parameters unchanged. + */ + private DatabaseEntry privKey; + private DatabaseEntry privPKey; + private DatabaseEntry privData; + + /** + * The initialized flag is set to true whenever we successfully position + * the cursor. It is used to implement the getNext/Prev logic for doing a + * getFirst/Last when the cursor is not initialized. We can't rely on + * Cursor to do that for us, since if we position the underlying cursor + * successfully but the key is out of range, we have no way to set the + * underlying cursor to uninitialized. A range cursor always starts in the + * uninitialized state. + */ + private boolean initialized; + + /** + * Creates a range cursor with a duplicate range. + */ + public RangeCursor(KeyRange range, + KeyRange pkRange, + boolean sortedDups, + Cursor cursor) { + if (pkRange != null && !range.singleKey) { + throw new IllegalArgumentException(); + } + this.range = range; + this.pkRange = pkRange; + this.sortedDups = sortedDups; + this.cursor = cursor; + init(); + if (pkRange != null && secCursor == null) { + throw new IllegalArgumentException(); + } + } + + /** + * Create a cloned range cursor. The caller must clone the underlying + * cursor before using this constructor, because cursor open/close is + * handled specially for CDS cursors outside this class. + */ + public RangeCursor dup(boolean samePosition) + throws DatabaseException { + + try { + RangeCursor c = (RangeCursor) super.clone(); + c.cursor = dupCursor(cursor, samePosition); + c.init(); + return c; + } catch (CloneNotSupportedException neverHappens) { + return null; + } + } + + /** + * Used for opening and duping (cloning). + */ + private void init() { + + if (cursor instanceof SecondaryCursor) { + secCursor = (SecondaryCursor) cursor; + } else { + secCursor = null; + } + + if (range.hasBound()) { + privKey = new DatabaseEntry(); + privPKey = new DatabaseEntry(); + privData = new DatabaseEntry(); + } else { + privKey = null; + privPKey = null; + privData = null; + } + } + + /** + * Returns whether the cursor is initialized at a valid position. + */ + public boolean isInitialized() { + return initialized; + } + + /** + * Returns the underlying cursor. Used for cloning. + */ + public Cursor getCursor() { + return cursor; + } + + /** + * When an unbounded range is used, this method is called to use the + * callers entry parameters directly, to avoid the extra step of copying + * between the private entries and the caller's entries. + */ + private void setParams(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data) { + privKey = key; + privPKey = pKey; + privData = data; + } + + /** + * Dups the cursor, sets the cursor and secCursor fields to the duped + * cursor, and returns the old cursor. Always call endOperation in a + * finally clause after calling beginOperation. + * + *

        If the returned cursor == the cursor field, the cursor is + * uninitialized and was not duped; this case is handled correctly by + * endOperation.

        + */ + private Cursor beginOperation() + throws DatabaseException { + + Cursor oldCursor = cursor; + if (initialized) { + cursor = dupCursor(cursor, true); + if (secCursor != null) { + secCursor = (SecondaryCursor) cursor; + } + } else { + return cursor; + } + return oldCursor; + } + + /** + * If the operation succeeded, leaves the duped cursor in place and closes + * the oldCursor. If the operation failed, moves the oldCursor back in + * place and closes the duped cursor. oldCursor may be null if + * beginOperation was not called, in cases where we don't need to dup + * the cursor. Always call endOperation when a successful operation ends, + * in order to set the initialized field. + */ + private void endOperation(Cursor oldCursor, + OpResult result, + DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data) + throws DatabaseException { + + if (result.isSuccess()) { + if (oldCursor != null && oldCursor != cursor) { + closeCursor(oldCursor); + } + if (key != null) { + swapData(key, privKey); + } + if (pKey != null && secCursor != null) { + swapData(pKey, privPKey); + } + if (data != null) { + swapData(data, privData); + } + initialized = true; + } else { + if (oldCursor != null && oldCursor != cursor) { + closeCursor(cursor); + cursor = oldCursor; + if (secCursor != null) { + secCursor = (SecondaryCursor) cursor; + } + } + } + } + + /** + * Swaps the contents of the two entries. Used to return entry data to + * the caller when the operation was successful. + */ + private static void swapData(DatabaseEntry e1, DatabaseEntry e2) { + + byte[] d1 = e1.getData(); + int o1 = e1.getOffset(); + int s1 = e1.getSize(); + + e1.setData(e2.getData(), e2.getOffset(), e2.getSize()); + e2.setData(d1, o1, s1); + } + + /** + * Shares the same byte array, offset and size between two entries. + * Used when copying the entry data is not necessary because it is known + * that the underlying operation will not modify the entry, for example, + * with getSearchKey. + */ + private static void shareData(DatabaseEntry from, DatabaseEntry to) { + + if (from != null) { + to.setData(from.getData(), from.getOffset(), from.getSize()); + } + } + + public OpResult getFirst(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetFirst(options); + endOperation(null, result, null, null, null); + return result; + } + if (pkRange != null && pkRange.isSingleKey()) { + KeyRange.copy(range.beginKey, privKey); + KeyRange.copy(pkRange.beginKey, privPKey); + result = doGetSearchBoth(options); + endOperation(null, result, key, pKey, data); + return result; + } + if (pkRange != null) { + KeyRange.copy(range.beginKey, privKey); + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + if (pkRange.beginKey == null || !sortedDups) { + result = doGetSearchKey(options); + } else { + KeyRange.copy(pkRange.beginKey, privPKey); + result = doGetSearchBothRange(options); + if (result.isSuccess() && + !pkRange.beginInclusive && + pkRange.compare(privPKey, pkRange.beginKey) == 0) { + result = doGetNextDup(options); + } + } + if (result.isSuccess() && + !pkRange.check(privPKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } else if (range.singleKey) { + KeyRange.copy(range.beginKey, privKey); + result = doGetSearchKey(options); + endOperation(null, result, key, pKey, data); + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + if (range.beginKey == null) { + result = doGetFirst(options); + } else { + KeyRange.copy(range.beginKey, privKey); + result = doGetSearchKeyRange(options); + if (result.isSuccess() && + !range.beginInclusive && + range.compare(privKey, range.beginKey) == 0) { + result = doGetNextNoDup(options); + } + } + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + return result; + } + + /** + * This method will restart the operation when a key range is used and an + * insertion at the end of the key range is performed in another thread. + * The restarts are needed because a sequence of cursor movements is + * performed, and serializable isolation cannot be relied on to prevent + * insertions in other threads. Without the restarts, getLast could return + * NOTFOUND when keys in the range exist. This may only be an issue for JE + * since it uses record locking, while DB core uses page locking. + */ + public OpResult getLast(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result = OpResult.FAILURE; + + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetLast(options); + endOperation(null, result, null, null, null); + return result; + } + + Cursor oldCursor = beginOperation(); + try { + if (pkRange != null) { + result = getLastInPKeyRange(options); + + /* Final check on candidate key and pKey value. */ + if (result.isSuccess() && + !(range.check(privKey) && pkRange.check(privPKey))) { + result = OpResult.FAILURE; + } + } else { + result = getLastInKeyRange(options); + + /* Final check on candidate key value. */ + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } + + return result; + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + + /** + * Performs getLast operation when a main key range is specified but + * pkRange is null. Does everything but the final checks for key in range, + * i.e., when SUCCESS is returned the caller should do the final check. + */ + private OpResult getLastInKeyRange(OpReadOptions options) + throws DatabaseException { + + /* Without an endKey, getLast returns the candidate record. */ + if (range.endKey == null) { + return doGetLast(options); + } + + /* + * K stands for the main key at the cursor position in the comments + * below. + */ + while (true) { + KeyRange.copy(range.endKey, privKey); + OpResult result = doGetSearchKeyRange(options); + + if (result.isSuccess()) { + + /* Found K >= endKey. */ + if (range.endInclusive && + range.compare(range.endKey, privKey) == 0) { + + /* K == endKey and endKey is inclusive. */ + + if (!sortedDups) { + /* If dups are not configured, we're done. */ + return result; + } + + /* + * If there are dups, we're positioned at endKey's first + * dup and we want to move to its last dup. Move to the + * first dup for the next main key (getNextNoDup) and then + * the prev record. In the absence of insertions by other + * threads, the prev record is the last dup for endKey. + */ + result = doGetNextNoDup(options); + if (result.isSuccess()) { + + /* + * K > endKey. Move backward to the last dup for + * endKey. + */ + result = doGetPrev(options); + } else { + + /* + * endKey is the last main key in the DB. Its last dup + * is the last key in the DB. + */ + result = doGetLast(options); + } + } else { + + /* + * K > endKey or endKey is exclusive (and K >= endKey). In + * both cases, moving to the prev key finds the last key in + * the range, whether or not there are dups. + */ + result = doGetPrev(options); + } + } else { + + /* + * There are no keys >= endKey in the DB. The last key in the + * range is the last key in the DB. + */ + result = doGetLast(options); + } + + if (!result.isSuccess()) { + return result; + } + + if (!range.checkEnd(privKey, true)) { + + /* + * The last call above (getPrev or getLast) returned a key + * outside the endKey range. Another thread must have inserted + * this key. Start over. + */ + continue; + } + + return result; + } + } + + /** + * Performs getLast operation when both a main key range (which must be a + * single key range) and a pkRange are specified. Does everything but the + * final checks for key and pKey in range, i.e., when SUCCESS is returned + * the caller should do the final two checks. + */ + private OpResult getLastInPKeyRange(OpReadOptions options) + throws DatabaseException { + + /* We can do an exact search when range and pkRange are single keys. */ + if (pkRange.isSingleKey()) { + KeyRange.copy(range.beginKey, privKey); + KeyRange.copy(pkRange.beginKey, privPKey); + return doGetSearchBoth(options); + } + + /* + * When dups are not configured, getSearchKey for the main key returns + * the only possible candidate record. + */ + if (!sortedDups) { + KeyRange.copy(range.beginKey, privKey); + return doGetSearchKey(options); + } + + /* + * K stands for the main key and D for the duplicate (data item) at the + * cursor position in the comments below + */ + while (true) { + + if (pkRange.endKey != null) { + + KeyRange.copy(range.beginKey, privKey); + KeyRange.copy(pkRange.endKey, privPKey); + OpResult result = doGetSearchBothRange(options); + + if (result.isSuccess()) { + + /* Found D >= endKey. */ + if (!pkRange.endInclusive || + pkRange.compare(pkRange.endKey, privPKey) != 0) { + + /* + * D > endKey or endKey is exclusive (and D >= endKey). + * In both cases, moving to the prev dup finds the last + * key in the range. + */ + result = doGetPrevDup(options); + + if (!result.isSuccess()) { + return result; + } + + if (!pkRange.checkEnd(privPKey, true)) { + + /* + * getPrevDup returned a key outside the endKey + * range. Another thread must have inserted this + * key. Start over. + */ + continue; + } + } + /* Else D == endKey and endKey is inclusive. */ + + return result; + } + /* Else there are no dups >= endKey. Fall through. */ + } + + /* + * We're here for one of two reasons: + * 1. pkRange.endKey == null. + * 2. There are no dups >= endKey for the main key (status + * returned by getSearchBothRange above was not SUCCESS). + * In both cases, the last dup in the range is the last dup for the + * main key. + */ + KeyRange.copy(range.beginKey, privKey); + OpResult result = doGetSearchKey(options); + + if (!result.isSuccess()) { + return result; + } + + /* + * K == the main key and D is its first dup. We want to move to its + * last dup. Move to the first dup for the next main key; + * (getNextNoDup) and then the prev record. In the absence of + * insertions by other threads, the prev record is the last dup for + * the main key. + */ + result = doGetNextNoDup(options); + + if (result.isSuccess()) { + + /* + * K > main key and D is its first dup. Move to the prev record + * which should be the last dup for the main key. + */ + result = doGetPrev(options); + } else { + + /* + * The main key specified is the last main key in the DB. Its + * last dup is the last record in the DB. + */ + result = doGetLast(options); + } + + if (!result.isSuccess()) { + return result; + } + + if (!range.checkEnd(privKey, true)) { + + /* + * The last call above (getPrev or getLast) returned a key + * outside the endKey range. Another thread must have inserted + * this key. Start over. + */ + continue; + } + + return result; + } + } + + public OpResult getNext(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!initialized) { + return getFirst(key, pKey, data, options); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetNext(options); + endOperation(null, result, null, null, null); + return result; + } + if (pkRange != null) { + if (pkRange.endKey == null) { + result = doGetNextDup(options); + endOperation(null, result, key, pKey, data); + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetNextDup(options); + if (result.isSuccess() && + !pkRange.checkEnd(privPKey, true)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + } else if (range.singleKey) { + result = doGetNextDup(options); + endOperation(null, result, key, pKey, data); + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetNext(options); + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + return result; + } + + public OpResult getNextNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!initialized) { + return getFirst(key, pKey, data, options); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetNextNoDup(options); + endOperation(null, result, null, null, null); + return result; + } + if (range.singleKey) { + result = OpResult.FAILURE; + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetNextNoDup(options); + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + return result; + } + + public OpResult getPrev(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!initialized) { + return getLast(key, pKey, data, options); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetPrev(options); + endOperation(null, result, null, null, null); + return result; + } + if (pkRange != null) { + if (pkRange.beginKey == null) { + result = doGetPrevDup(options); + endOperation(null, result, key, pKey, data); + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetPrevDup(options); + if (result.isSuccess() && + !pkRange.checkBegin(privPKey, true)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + } else if (range.singleKey) { + result = doGetPrevDup(options); + endOperation(null, result, key, pKey, data); + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetPrev(options); + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + return result; + } + + public OpResult getPrevNoDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!initialized) { + return getLast(key, pKey, data, options); + } + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetPrevNoDup(options); + endOperation(null, result, null, null, null); + return result; + } + if (range.singleKey) { + result = OpResult.FAILURE; + } else { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetPrevNoDup(options); + if (result.isSuccess() && + !range.check(privKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } + return result; + } + + public OpResult getSearchKey(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetSearchKey(options); + endOperation(null, result, null, null, null); + return result; + } + if (!range.check(key)) { + result = OpResult.FAILURE; + } else if (pkRange != null) { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + shareData(key, privKey); + result = doGetSearchKey(options); + if (result.isSuccess() && + !pkRange.check(privPKey)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } else { + shareData(key, privKey); + result = doGetSearchKey(options); + endOperation(null, result, key, pKey, data); + } + return result; + } + + public OpResult getSearchBoth(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetSearchBoth(options); + endOperation(null, result, null, null, null); + return result; + } + if (!range.check(key) || + (pkRange != null && !pkRange.check(pKey))) { + result = OpResult.FAILURE; + } else { + shareData(key, privKey); + if (secCursor != null) { + shareData(pKey, privPKey); + } else { + shareData(data, privData); + } + result = doGetSearchBoth(options); + endOperation(null, result, key, pKey, data); + } + return result; + } + + public OpResult getSearchKeyRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result = OpResult.FAILURE; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetSearchKeyRange(options); + endOperation(null, result, null, null, null); + return result; + } + Cursor oldCursor = beginOperation(); + try { + shareData(key, privKey); + result = doGetSearchKeyRange(options); + if (result.isSuccess() && + (!range.check(privKey) || + (pkRange != null && !pkRange.check(pKey)))) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + return result; + } + + public OpResult getSearchBothRange(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result = OpResult.FAILURE; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetSearchBothRange(options); + endOperation(null, result, null, null, null); + return result; + } + Cursor oldCursor = beginOperation(); + try { + shareData(key, privKey); + if (secCursor != null) { + shareData(pKey, privPKey); + } else { + shareData(data, privData); + } + result = doGetSearchBothRange(options); + if (result.isSuccess() && + (!range.check(privKey) || + (pkRange != null && !pkRange.check(pKey)))) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + return result; + } + + public OpResult getSearchRecordNumber(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetSearchRecordNumber(options); + endOperation(null, result, null, null, null); + return result; + } + if (!range.check(key)) { + result = OpResult.FAILURE; + } else { + shareData(key, privKey); + result = doGetSearchRecordNumber(options); + endOperation(null, result, key, pKey, data); + } + return result; + } + + public OpResult getNextDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + if (!initialized) { + throw new IllegalStateException("Cursor not initialized"); + } + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetNextDup(options); + endOperation(null, result, null, null, null); + } else if (pkRange != null && pkRange.endKey != null) { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetNextDup(options); + if (result.isSuccess() && + !pkRange.checkEnd(privPKey, true)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } else { + result = doGetNextDup(options); + endOperation(null, result, key, pKey, data); + } + return result; + } + + public OpResult getPrevDup(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + if (!initialized) { + throw new IllegalStateException("Cursor not initialized"); + } + OpResult result; + if (!range.hasBound()) { + setParams(key, pKey, data); + result = doGetPrevDup(options); + endOperation(null, result, null, null, null); + } else if (pkRange != null && pkRange.beginKey != null) { + result = OpResult.FAILURE; + Cursor oldCursor = beginOperation(); + try { + result = doGetPrevDup(options); + if (result.isSuccess() && + !pkRange.checkBegin(privPKey, true)) { + result = OpResult.FAILURE; + } + } finally { + endOperation(oldCursor, result, key, pKey, data); + } + } else { + result = doGetPrevDup(options); + endOperation(null, result, key, pKey, data); + } + return result; + } + + public OpResult getCurrent(DatabaseEntry key, + DatabaseEntry pKey, + DatabaseEntry data, + OpReadOptions options) + throws DatabaseException { + + if (!initialized) { + throw new IllegalStateException("Cursor not initialized"); + } + if (secCursor != null && pKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + key, pKey, data, Get.CURRENT, options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getCurrent(key, pKey, data, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get(key, data, Get.CURRENT, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getCurrent(key, data, options.getLockMode())); + } + } + + /* + * Pass-thru methods. + */ + + public void close() + throws DatabaseException { + + closeCursor(cursor); + } + + public int count() + throws DatabaseException { + + return cursor.count(); + } + + public OperationStatus delete() + throws DatabaseException { + + return cursor.delete(); + } + + public OperationStatus put(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return cursor.put(key, data); + } + + public OperationStatus putNoOverwrite(DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + return cursor.putNoOverwrite(key, data); + } + + public OperationStatus putNoDupData(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return cursor.putNoDupData(key, data); + } + + public OperationStatus putCurrent(DatabaseEntry data) + throws DatabaseException { + + return cursor.putCurrent(data); + } + + public OperationStatus putAfter(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return DbCompat.putAfter(cursor, key, data); + } + + public OperationStatus putBefore(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + return DbCompat.putBefore(cursor, key, data); + } + + private OpResult doGetFirst(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.FIRST, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getFirst( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.FIRST, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getFirst(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetLast(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.LAST, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getLast( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.LAST, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getLast(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetNext(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.NEXT, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getNext( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.NEXT, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getNext(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetNextDup(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.NEXT_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getNextDup( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.NEXT_DUP, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getNextDup(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetNextNoDup(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.NEXT_NO_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getNextNoDup( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.NEXT_NO_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getNextNoDup(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetPrev(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.PREV, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getPrev( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.PREV, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getPrev(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetPrevDup(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.PREV_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getPrevDup( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.PREV_DUP, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getPrevDup(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetPrevNoDup(OpReadOptions options) + throws DatabaseException { + + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.PREV_NO_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getPrevNoDup( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.PREV_NO_DUP, + options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getPrevNoDup(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetSearchKey(OpReadOptions options) + throws DatabaseException { + + if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) { + return OpResult.FAILURE; + } + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.SEARCH, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getSearchKey( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.SEARCH, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getSearchKey(privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetSearchKeyRange(OpReadOptions options) + throws DatabaseException { + + if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) { + return OpResult.FAILURE; + } + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.SEARCH_GTE, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getSearchKeyRange( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.SEARCH_GTE, options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getSearchKeyRange( + privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetSearchBoth(OpReadOptions options) + throws DatabaseException { + + if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) { + return OpResult.FAILURE; + } + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.SEARCH_BOTH, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getSearchBoth( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.SEARCH_BOTH, + options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getSearchBoth( + privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetSearchBothRange(OpReadOptions options) + throws DatabaseException { + + if (checkRecordNumber() && DbCompat.getRecordNumber(privKey) <= 0) { + return OpResult.FAILURE; + } + if (secCursor != null && privPKey != null) { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + secCursor.get( + privKey, privPKey, privData, Get.SEARCH_BOTH_GTE, + options.jeOptions)); + } + /* */ + return OpResult.make( + secCursor.getSearchBothRange( + privKey, privPKey, privData, options.getLockMode())); + } else { + /* */ + if (DbCompat.IS_JE) { + return OpResult.make( + cursor.get( + privKey, privData, Get.SEARCH_BOTH_GTE, + options.jeOptions)); + } + /* */ + return OpResult.make( + cursor.getSearchBothRange( + privKey, privData, options.getLockMode())); + } + } + + private OpResult doGetSearchRecordNumber(OpReadOptions options) + throws DatabaseException { + + if (DbCompat.getRecordNumber(privKey) <= 0) { + return OpResult.FAILURE; + } + if (secCursor != null && privPKey != null) { + return OpResult.make( + DbCompat.getSearchRecordNumber( + secCursor, privKey, privPKey, privData, + options.getLockMode())); + } else { + return OpResult.make( + DbCompat.getSearchRecordNumber( + cursor, privKey, privData, options.getLockMode())); + } + } + + /* + * Protected methods for duping and closing cursors. These are overridden + * by the collections API to implement cursor pooling for CDS. + */ + + /** + * Dups the given cursor. + */ + protected Cursor dupCursor(Cursor cursor, boolean samePosition) + throws DatabaseException { + + return cursor.dup(samePosition); + } + + /** + * Closes the given cursor. + */ + protected void closeCursor(Cursor cursor) + throws DatabaseException { + + cursor.close(); + } + + /** + * If the database is a RECNO or QUEUE database, we know its keys are + * record numbers. We treat a non-positive record number as out of bounds, + * that is, we return NOTFOUND rather than throwing + * IllegalArgumentException as would happen if we passed a non-positive + * record number into the DB cursor. This behavior is required by the + * collections interface. + */ + protected boolean checkRecordNumber() { + return false; + } +} diff --git a/src/com/sleepycat/util/keyrange/package-info.java b/src/com/sleepycat/util/keyrange/package-info.java new file mode 100644 index 0000000..550ec0c --- /dev/null +++ b/src/com/sleepycat/util/keyrange/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Key range cursor operations for use in collections API and DPL. + */ +package com.sleepycat.util.keyrange; diff --git a/src/com/sleepycat/util/package.html b/src/com/sleepycat/util/package.html new file mode 100644 index 0000000..6fd61de --- /dev/null +++ b/src/com/sleepycat/util/package.html @@ -0,0 +1,5 @@ + + +General utilities used throughout Berkeley DB. + + diff --git a/src/com/sleepycat/utilint/ActivityCounter.java b/src/com/sleepycat/utilint/ActivityCounter.java new file mode 100644 index 0000000..f565654 --- /dev/null +++ b/src/com/sleepycat/utilint/ActivityCounter.java @@ -0,0 +1,180 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +/** + * Tracks the number of operations begun, as a way of measuring level of + * activity. Is capable of displaying thread dumps if the activity level + * reaches a specified ceiling + */ +public class ActivityCounter { + + private final AtomicInteger activeCount; + private final AtomicBoolean threadDumpInProgress; + private volatile long lastThreadDumpTime; + private volatile int numCompletedDumps; + private final int activeThreshold; + private final int maxNumDumps; + private final AtomicInteger maxActivity; + + /* + * Thread dumps can only happen this many milliseconds apart, to avoid + * overwhelming the system. + */ + private final long requiredIntervalMillis; + + private final Logger logger; + + private final ExecutorService dumper; + + public ActivityCounter(int activeThreshold, + long requiredIntervalMillis, + int maxNumDumps, + Logger logger) { + + activeCount = new AtomicInteger(0); + threadDumpInProgress = new AtomicBoolean(false); + maxActivity = new AtomicInteger(0); + + this.activeThreshold = activeThreshold; + this.requiredIntervalMillis = requiredIntervalMillis; + this.maxNumDumps = maxNumDumps; + this.logger = logger; + + dumper = Executors.newSingleThreadExecutor(); + } + + /* An operation has started. */ + public void start() { + int numActive = activeCount.incrementAndGet(); + int max = maxActivity.get(); + if (numActive > max) { + maxActivity.compareAndSet(max, numActive); + } + check(numActive); + } + + /* An operation has finished. */ + public void finish() { + activeCount.decrementAndGet(); + } + + public int getAndClearMaxActivity() { + return maxActivity.getAndSet(0); + } + + private boolean intervalIsTooShort() { + /* Don't do a thread dump if the last dump was too recent */ + long interval = System.currentTimeMillis() - lastThreadDumpTime; + return interval < requiredIntervalMillis; + } + + /** + * If the activity level is above a threshold, there is no other thread + * that is dumping now, and a dump hasn't happened for a while, dump + * thread stack traces. + */ + private void check(int numActive) { + + /* Activity is low, no need to do any dumps. */ + if (numActive <= activeThreshold) { + return; + } + + if (numCompletedDumps >= maxNumDumps) { + return; + } + + /* Don't do a thread dump if the last dump was too recent */ + if (intervalIsTooShort()) { + return; + } + + /* There's one in progress. */ + if (threadDumpInProgress.get()) { + return; + } + + /* + * Let's do a dump. The ExecutorServices guarantees that all activity + * executes in a single thread, so further serialization and + * synchronization is handled there. + */ + dumper.execute(new GetStackTraces()); + } + + /** + * For unit test support. + */ + public int getNumCompletedDumps() { + return numCompletedDumps; + } + + private class GetStackTraces implements Runnable { + + public void run() { + + if (intervalIsTooShort()) { + return; + } + + if (!threadDumpInProgress.compareAndSet(false, true)) { + logger.warning("Unexpected: ActivityCounter stack trace " + + "dumper saw threadDumpInProgress flag set."); + return; + } + + try { + lastThreadDumpTime = System.currentTimeMillis(); + dumpThreads(); + numCompletedDumps++; + } finally { + boolean reset = threadDumpInProgress.compareAndSet(true, false); + assert reset : "ThreadDump should have been in progress"; + } + } + + private void dumpThreads() { + + int whichDump = numCompletedDumps; + + logger.info("[Dump " + whichDump + + " --Dumping stack traces for all threads]"); + + Map stackTraces = + Thread.getAllStackTraces(); + + for (Map.Entry stme : + stackTraces.entrySet()) { + if (stme.getKey() == Thread.currentThread()) { + continue; + } + logger.info(stme.getKey().toString()); + for (StackTraceElement ste : stme.getValue()) { + logger.info(" " + ste); + } + } + + logger.info("[Dump " + whichDump + " --Thread dump completed]"); + } + } +} + diff --git a/src/com/sleepycat/utilint/Latency.java b/src/com/sleepycat/utilint/Latency.java new file mode 100644 index 0000000..5c553dd --- /dev/null +++ b/src/com/sleepycat/utilint/Latency.java @@ -0,0 +1,223 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.text.DecimalFormat; + +/** + * A struct holding the min, max, avg, 95th, and 99th percentile measurements + * for the collection of values held in a LatencyStat. + */ +public class Latency implements Serializable, Cloneable { + private static final long serialVersionUID = 1L; + + private static final DecimalFormat FORMAT = + new DecimalFormat("###,###,###,###,###,###,###.##"); + + private int maxTrackedLatencyMillis; + private int min; + private int max; + private float avg; + private int totalOps; + private int percent95; + private int percent99; + + /* + * This field should be called requestsOverflow, but is left opsOverflow + * for serialization compatibility with JE 5.0.69 and earlier. + */ + private int opsOverflow; + + /* + * The totalRequests field was added in JE 5.0.70. When an object + * serialized by JE 5.0.69 or earler is deserialized here, this field is + * initialized here to 0 by Java and then set equal to totalOps by + * readObject. Setting totalRequests to totalOps is accurate for + * single-op-per-request stats. It is inaccurate for + * multiple-op-per-request stats, but the best we can do with the + * information we have available. + */ + private int totalRequests; + + /** + * Creates a Latency with a maxTrackedLatencyMillis and all fields with + * zero values. + */ + public Latency(int maxTrackedLatencyMillis) { + this.maxTrackedLatencyMillis = maxTrackedLatencyMillis; + } + + public Latency(int maxTrackedLatencyMillis, + int minMillis, + int maxMillis, + float avg, + int totalOps, + int totalRequests, + int percent95, + int percent99, + int requestsOverflow) { + this.maxTrackedLatencyMillis = maxTrackedLatencyMillis; + this.min = minMillis; + this.max = maxMillis; + this.avg = avg; + this.totalOps = totalOps; + this.totalRequests = totalRequests; + this.percent95 = percent95; + this.percent99 = percent99; + this.opsOverflow = requestsOverflow; + } + + /* See totalRequests field. */ + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + + if (totalRequests == 0) { + totalRequests = totalOps; + } + } + + @Override + public Latency clone() { + try { + return (Latency) super.clone(); + } catch (CloneNotSupportedException e) { + /* Should never happen. */ + throw new IllegalStateException(e); + } + } + + @Override + public String toString() { + if (totalOps == 0) { + return "No operations"; + } + + return "maxTrackedLatencyMillis=" + + FORMAT.format(maxTrackedLatencyMillis) + + " totalOps=" + FORMAT.format(totalOps) + + " totalReq=" + FORMAT.format(totalRequests) + + " reqOverflow=" + FORMAT.format(opsOverflow) + + " min=" + FORMAT.format(min) + + " max=" + FORMAT.format(max) + + " avg=" + FORMAT.format(avg) + + " 95%=" + FORMAT.format(percent95) + + " 99%=" + FORMAT.format(percent99); + } + + /** + * @return the number of operations recorded by this stat. + */ + public int getTotalOps() { + return totalOps; + } + + /** + * @return the number of requests recorded by this stat. + */ + public int getTotalRequests() { + return totalRequests; + } + + /** + * @return the number of requests which exceed the max expected latency + */ + public int getRequestsOverflow() { + return opsOverflow; + } + + /** + * @return the max expected latency for this kind of operation + */ + public int getMaxTrackedLatencyMillis() { + return maxTrackedLatencyMillis; + } + + /** + * @return the fastest latency tracked + */ + public int getMin() { + return min; + } + + /** + * @return the slowest latency tracked + */ + public int getMax() { + return max; + } + + /** + * @return the average latency tracked + */ + public float getAvg() { + return avg; + } + + /** + * @return the 95th percentile latency tracked by the histogram + */ + public int get95thPercent() { + return percent95; + } + + /** + * @return the 99th percentile latency tracked by the histogram + */ + public int get99thPercent() { + return percent99; + } + + /** + * Add the measurements from "other" and recalculate the min, max, and + * average values. The 95th and 99th percentile are not recalculated, + * because the histogram from LatencyStatis not available, and those values + * can't be generated. + */ + public void rollup(Latency other) { + if (other == null || other.totalOps == 0 || other.totalRequests == 0) { + throw new IllegalStateException + ("Can't rollup a Latency that doesn't have any data"); + } + + if (maxTrackedLatencyMillis != other.maxTrackedLatencyMillis) { + throw new IllegalStateException + ("Can't rollup a Latency whose maxTrackedLatencyMillis is " + + "different"); + } + + if (min > other.min) { + min = other.min; + } + + if (max < other.max) { + max = other.max; + } + + avg = ((totalRequests * avg) + (other.totalRequests * other.avg)) / + (totalRequests + other.totalRequests); + + /* Clear out 95th and 99th. They have become invalid. */ + percent95 = 0; + percent99 = 0; + + totalOps += other.totalOps; + totalRequests += other.totalRequests; + opsOverflow += other.opsOverflow; + } +} diff --git a/src/com/sleepycat/utilint/LatencyStat.java b/src/com/sleepycat/utilint/LatencyStat.java new file mode 100644 index 0000000..22e459f --- /dev/null +++ b/src/com/sleepycat/utilint/LatencyStat.java @@ -0,0 +1,324 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerArray; +import java.util.concurrent.atomic.AtomicLong; + +/** + * A stat that keeps track of latency in milliseconds and presents average, + * min, max, 95th and 99th percentile values. + */ +public class LatencyStat implements Cloneable { + + private static final long serialVersionUID = 1L; + + /* + * The maximum tracked latency, in milliseconds, it's also the size of the + * configurable array which is used to save latencies. + */ + private final int maxTrackedLatencyMillis; + + private static class Values { + + /* The number of total operations that have been tracked. */ + final AtomicInteger numOps; + + /* The number of total requests that have been tracked. */ + final AtomicInteger numRequests; + + /* The number of total nanoseconds that have been tracked. */ + final AtomicLong totalNanos; + + /* + * Array is indexed by latency in millis and elements contain the + * number of ops for that latency. + */ + final AtomicIntegerArray histogram; + + /* + * Min and max latency. They may both exceed maxTrackedLatencyMillis. + * A volatile int rather than an AtomicInteger is used because + * AtomicInteger has no min() or max() method, so there is no advantage + * to using it. + */ + volatile int minIncludingOverflow; + volatile int maxIncludingOverflow; + + /* Number of requests whose latency exceed maxTrackedLatencyMillis. */ + final AtomicInteger requestsOverflow; + + Values(final int maxTrackedLatencyMillis) { + histogram = new AtomicIntegerArray(maxTrackedLatencyMillis); + numOps = new AtomicInteger(); + numRequests = new AtomicInteger(); + requestsOverflow = new AtomicInteger(); + totalNanos = new AtomicLong(); + minIncludingOverflow = Integer.MAX_VALUE; + maxIncludingOverflow = 0; + } + } + + /* + * Contains the values tracked by set() and reported by calculate(). + * + * To clear the values, this field is assigned a new instance. This + * prevents uninitialized values when set() and clear() run concurrently. + * Methods that access the values (set and calculate) should assign + * trackedValues to a local var and perform all access using the local var, + * so that clear() will not impact the computation. + * + * Concurrent access by set() and calculate() is handled differently. The + * numOps and numRequests fields are incremented by set() last, and are + * checked first by calculate(). If numOps or numRequests is zero, + * calculate() will return an empty Latency object. If numOps and + * numRequests are non-zero, calculate() may still return latency values + * that are inconsistent, when set() runs concurrently. But at least + * calculate() won't return uninitialized latency values. Without + * synchronizing set(), this is the best we can do. Synchronizing set() + * might introduce contention during CRUD operations. + */ + private volatile Values trackedValues; + + private int saveMin; + private int saveMax; + private float saveAvg; + private int saveNumOps; + private int saveNumRequests; + private int save95; + private int save99; + private int saveRequestsOverflow; + + public LatencyStat(long maxTrackedLatencyMillis) { + this.maxTrackedLatencyMillis = (int) maxTrackedLatencyMillis; + clear(); + } + + public void clear() { + clearInternal(); + } + + /** + * Returns and clears the current stats. + */ + private synchronized Values clearInternal() { + final Values values = trackedValues; + + /* + * Create a new instance to support concurrent access. See {@link + * #trackedValues}. + */ + trackedValues = new Values(maxTrackedLatencyMillis); + + return values; + } + + /** + * Generate the min, max, avg, 95th and 99th percentile for the collected + * measurements. Do not clear the measurement collection. + */ + public Latency calculate() { + return calculate(false); + } + + /** + * Generate the min, max, avg, 95th and 99th percentile for the collected + * measurements, then clear the measurement collection. + */ + public Latency calculateAndClear() { + return calculate(true); + } + + /** + * Calculate may be called on a stat that is concurrently updating, so + * while it has to be thread safe, it's a bit inaccurate when there's + * concurrent activity. That tradeoff is made in order to avoid the cost of + * synchronization during the set() method. See {@link #trackedValues}. + */ + private synchronized Latency calculate(boolean clear) { + + /* + * Use a local var to support concurrent access. See {@link + * #trackedValues}. + */ + final Values values = clear ? clearInternal() : trackedValues; + + /* + * Check numOps and numReqests first and return an empty Latency if + * either one is zero. This ensures that we don't report partially + * computed values when they are zero. This works because the other + * values are calculated first by set(), and numOps and numRequests are + * incremented last. + */ + final int totalOps = values.numOps.get(); + final int totalRequests = values.numRequests.get(); + if (totalOps == 0 || totalRequests == 0) { + return new Latency(maxTrackedLatencyMillis); + } + + final long totalNanos = values.totalNanos.get(); + final int nOverflow = values.requestsOverflow.get(); + final int maxIncludingOverflow = values.maxIncludingOverflow; + final int minIncludingOverflow = values.minIncludingOverflow; + + final float avgMs = (float) ((totalNanos * 1e-6) / totalRequests); + + /* + * The 95% and 99% values will be -1 if there are no recorded latencies + * in the histogram. + */ + int percent95 = -1; + int percent99 = -1; + + /* + * Min/max can be inaccurate because of concurrent set() calls, i.e., + * values may be from a mixture of different set() calls. Bound the + * min/max to the average, so they are sensible. + */ + final int avgMsInt = Math.round(avgMs); + int max = Math.max(avgMsInt, maxIncludingOverflow); + int min = Math.min(avgMsInt, minIncludingOverflow); + + final int percent95Count; + final int percent99Count; + final int nTrackedRequests = totalRequests - nOverflow; + if (nTrackedRequests == 1) { + /* For one request, always include it in the 95% and 99%. */ + percent95Count = 1; + percent99Count = 1; + } else { + /* Otherwise truncate: never include the last/highest request. */ + percent95Count = (int) (nTrackedRequests * .95); + percent99Count = (int) (nTrackedRequests * .99); + } + + final int histogramLength = values.histogram.length(); + int numRequestsSeen = 0; + for (int latency = 0; latency < histogramLength; latency++) { + + final int count = values.histogram.get(latency); + + if (count == 0) { + continue; + } + + if (min > latency) { + min = latency; + } + + if (max < latency) { + max = latency; + } + + if (numRequestsSeen < percent95Count) { + percent95 = latency; + } + + if (numRequestsSeen < percent99Count) { + percent99 = latency; + } + + numRequestsSeen += count; + } + + saveMax = max; + saveMin = min; + saveAvg = avgMs; + saveNumOps = totalOps; + saveNumRequests = totalRequests; + save95 = percent95; + save99 = percent99; + saveRequestsOverflow = nOverflow; + + return new Latency(maxTrackedLatencyMillis, saveMin, saveMax, saveAvg, + saveNumOps, saveNumRequests, save95, save99, + saveRequestsOverflow); + } + + /** + * Record a single operation that took place in a request of "nanolatency" + * nanos. + */ + public void set(long nanoLatency) { + set(1, nanoLatency); + } + + /** + * Record "numRecordedOps" (one or more) operations that took place in a + * single request of "nanoLatency" nanos. + */ + public void set(int numRecordedOps, long nanoLatency) { + + /* ignore negative values [#22466] */ + if (nanoLatency < 0) { + return; + } + + /* + * Use a local var to support concurrent access. See {@link + * #trackedValues}. + */ + final Values values = trackedValues; + + /* Round the latency to determine where to mark the histogram. */ + final int millisRounded = + (int) ((nanoLatency + (1000000l / 2)) / 1000000l); + + /* Record this latency. */ + if (millisRounded >= maxTrackedLatencyMillis) { + values.requestsOverflow.incrementAndGet(); + } else { + values.histogram.incrementAndGet(millisRounded); + } + + /* + * Update the min/max latency if necessary. This is not atomic, so we + * loop to account for lost updates. + */ + while (values.maxIncludingOverflow < millisRounded) { + values.maxIncludingOverflow = millisRounded; + } + while (values.minIncludingOverflow > millisRounded) { + values.minIncludingOverflow = millisRounded; + } + + /* + * Keep a count of latency that is precise enough to record sub + * millisecond values. + */ + values.totalNanos.addAndGet(nanoLatency); + + /* + * Increment numOps and numRequests last so that calculate() won't use + * other uninitialized values when numOps or numRequests is zero. + */ + values.numOps.addAndGet(numRecordedOps); + values.numRequests.incrementAndGet(); + } + + public boolean isEmpty() { + return (trackedValues.numOps.get() == 0) || + (trackedValues.numRequests.get() == 0); + } + + @Override + public String toString() { + final Latency results = + new Latency(maxTrackedLatencyMillis, saveMin, saveMax, saveAvg, + saveNumRequests, saveNumOps, save95, save99, + saveRequestsOverflow); + return results.toString(); + } +} diff --git a/src/com/sleepycat/utilint/StatLogger.java b/src/com/sleepycat/utilint/StatLogger.java new file mode 100644 index 0000000..fdb85f2 --- /dev/null +++ b/src/com/sleepycat/utilint/StatLogger.java @@ -0,0 +1,276 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; + +import com.sleepycat.je.EnvironmentFailureException; + +public class StatLogger { + private final File logFile; + private final String fileext; + private final String filename; + private final File logDir; + private int maxFileCount; + private int maxRowCount; + private String header = null; + private String lastVal = null; + private int currentRowCount; + + /** + * StatLogger is used to write to a log file that contain a header followed + * by a set of data rows. Parameters control the size and number of + * rotating log files used. For a rotating set of files, as each file + * reaches a given size limit, it is closed, rotated out, and a new + * file opened. The name of the log file is filename.fileext. Successively + * older files are named by adding "0", "1", "2", etc into the file name. + * The format is filename.[version number].fileext. + * + * @param logdir Log file directory. + * @param filename Name of the log file. + * @param fileext Extent of the log file. + * @param filecount Maximum number of rotating log files to be saved. + * @param rowcount Maximum number of rows in a log file. + * + * @throws IOException if log file or directory cannot be accessed. + * @throws IllegalArgumentException if the log directory is not + * a directory or if the log file is not a file. + */ + public StatLogger(File logdir, + String filename, + String fileext, + int filecount, + int rowcount) throws IOException { + + logFile = new File(logdir.getAbsolutePath() + File.separator + + filename + "." + fileext); + this.maxFileCount = filecount - 1; + this.maxRowCount = rowcount; + this.filename = filename; + this.fileext = fileext; + this.logDir = logdir; + if (logFile.exists()) { + + if (!logFile.isFile()) { + throw new IllegalArgumentException( + "Statistic log file" + logFile.getAbsolutePath() + + " exists but is not a file."); + } + + header = getHeader(); + /* set current row count. */ + getLastRow(); + } + } + + /** + * Sets the maximum log file row count. + * + * @param rowcount The maximum number of rows per log file. + */ + public void setRowCount(int rowcount) { + maxRowCount = rowcount; + } + + /** + * Set the maximum number of log files to keep after rotation. + * + * @param filecount The maximum number of log files to keep. + */ + public void setFileCount(int filecount) { + filecount--; + if (maxFileCount > filecount) { + /* remove files that are greater then the new filecount */ + for (int i = maxFileCount; i > filecount; i--) { + File deleme = new File(formFn(i - 2)); + if (deleme.exists()) { + deleme.delete(); + } + } + } + maxFileCount = filecount; + } + + /** + * Sets the log file header. A new log file may be created if + * the header does not match the header in the existing file. + * + * @param val Header row data. + * + * @throws + */ + public void setHeader(String val) throws IOException { + if (!val.equals(header)) { + if (header != null) { + /* file headers are different so rotate files */ + rotateFiles(); + } + currentRowCount++; + write(val); + header = val; + lastVal = null; + } + } + + /** + * log writes the string to the log file. + * + * @param val Value to write to the log. + * @throws IOException + */ + public void log(String val) throws IOException { + if (currentRowCount >= maxRowCount) + { + rotateFiles(); + currentRowCount++; + write(header); + } + currentRowCount++; + write(val); + lastVal = val; + + } + + /** + * logDelta writes the string if the string is different + * than the last written log record. The first column is + * ignored when checking for a difference (current + * implementation has the time the record is logged as the + * first column. + * + * @param val value write to the log. + * @throws IOException + */ + public void logDelta(String val) throws IOException { + + if (header == null) { + throw EnvironmentFailureException.unexpectedState( + "Unexpected state logHeader not called before logDelta."); + } + if (lastVal == null) { + lastVal = getLastRow(); + } + String lastNoFirst = null; + if (lastVal != null) { + lastNoFirst = lastVal.substring(lastVal.indexOf(',') + 1); + } + if (!val.substring(val.indexOf(',') + 1).equals(lastNoFirst)) { + log(val); + } + } + + private String getHeader() throws IOException { + String header; + BufferedReader fr = null; + try { + fr = new BufferedReader(new FileReader(logFile)); + header = fr.readLine(); + } catch (FileNotFoundException e) { + throw new IOException( + "Error occured accessing statistic log file " + + "FileNotFoundException " + + logFile.getAbsolutePath(), e); + } finally { + if (fr != null) { + try { + fr.close(); + } + catch (IOException e) { + /* eat exception */ + } + } + } + + return header; + } + + private String getLastRow() throws IOException { + String row; + BufferedReader fr = null; + currentRowCount = 0; + try { + fr = new BufferedReader(new FileReader(logFile)); + String prevrow = null; + while ((row = fr.readLine()) != null) { + currentRowCount++; + prevrow = row; + } + return prevrow; + + } catch (FileNotFoundException e) { + throw new IOException( + "Error occured accessing statistic log file " + + "FileNotFoundException " + + logFile.getAbsolutePath(), e); + } finally { + if (fr != null) { + try { + fr.close(); + } + catch (IOException e) { + /* eat exception */ + } + } + } + } + + private String formFn(int version) { + if (version < 0) { + return logDir.getAbsolutePath() + File.separator + + filename + "." + fileext; + } else { + return logDir.getAbsolutePath() + File.separator + filename + + "." + version + "." + fileext; + } + } + + private void write(String val) throws IOException + { + PrintWriter ps = null; + try { + ps = new PrintWriter(new FileWriter(logFile, true)); + ps.println(val); + } catch (FileNotFoundException e) { + throw new IOException( + "Error occured accessing statistic log file " + + "FileNotFoundException " + + logFile.getAbsolutePath(), e); + } finally { + if (ps != null) { + ps.flush(); + ps.close(); + } + } + } + + private void rotateFiles() { + File cf = new File(formFn(maxFileCount - 1)); + if (cf.exists()) { + cf.delete(); + } + for (int i = maxFileCount - 2; i >= -1; i--) { + cf = new File(formFn(i)); + if (cf.exists()) { + cf.renameTo(new File(formFn(i + 1))); + } + } + currentRowCount = 0; + } +} diff --git a/src/com/sleepycat/utilint/StatsTracker.java b/src/com/sleepycat/utilint/StatsTracker.java new file mode 100644 index 0000000..a856ed6 --- /dev/null +++ b/src/com/sleepycat/utilint/StatsTracker.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Logger; + +/** + * Maintain interval and cumulative stats for a given set of operations, as + * well as a activityCounter that generates thread dumps if operations take too + * long. The markStart and markFinish methods can be used to bracket each + * tracked operation. + */ +public class StatsTracker { + + /* Latency stats. */ + private final Map intervalLatencies; + private final Map cumulativeLatencies; + + /* + * ActivityCounter tracks throughput and dumps thread stacktraces when + * throughput drops. + */ + private final ActivityCounter activityCounter; + + /** + * The logger is used for activity stack traces. + */ + public StatsTracker(T[] opTypes, + Logger stackTraceLogger, + int activeThreadThreshold, + long threadDumpIntervalMillis, + int threadDumpMax, + int maxTrackedLatencyMillis) { + + this.intervalLatencies = new HashMap(); + this.cumulativeLatencies = new HashMap(); + + for (T opType : opTypes) { + intervalLatencies.put + (opType, new LatencyStat(maxTrackedLatencyMillis)); + cumulativeLatencies.put + (opType, new LatencyStat(maxTrackedLatencyMillis)); + } + + activityCounter = new ActivityCounter(activeThreadThreshold, + threadDumpIntervalMillis, + threadDumpMax, + stackTraceLogger); + } + + /** + * Track the start of a operation. + * @return the value of System.nanoTime, for passing to markFinish. + */ + public long markStart() { + activityCounter.start(); + return System.nanoTime(); + } + + /** + * Track the end of an operation. + * @param startTime should be the value returned by the corresponding call + * to markStart + */ + public void markFinish(T opType, long startTime) { + markFinish(opType, startTime, 1); + } + /** + * Track the end of an operation. + * @param startTime should be the value returned by the corresponding call + * to markStart + */ + public void markFinish(T opType, long startTime, int numOperations) { + try { + if (numOperations == 0) { + return; + } + + if (opType != null) { + long elapsed = System.nanoTime() - startTime; + intervalLatencies.get(opType).set(numOperations, elapsed); + cumulativeLatencies.get(opType).set(numOperations, elapsed); + } + } finally { + /* Must be invoked to clear the ActivityCounter stats. */ + activityCounter.finish(); + } + } + + /** + * Should be called after each interval latency stat collection, to reset + * for the next period's collection. + */ + public void clearLatency() { + for (Map.Entry e : intervalLatencies.entrySet()) { + e.getValue().clear(); + } + } + + public Map getIntervalLatency() { + return intervalLatencies; + } + + public Map getCumulativeLatency() { + return cumulativeLatencies; + } + + /** + * For unit test support. + */ + public int getNumCompletedDumps() { + return activityCounter.getNumCompletedDumps(); + } +} diff --git a/src/com/sleepycat/utilint/StringUtils.java b/src/com/sleepycat/utilint/StringUtils.java new file mode 100644 index 0000000..812dc42 --- /dev/null +++ b/src/com/sleepycat/utilint/StringUtils.java @@ -0,0 +1,154 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CharacterCodingException; + +public class StringUtils { + + private final static Charset ASCII = Charset.forName("US-ASCII"); + private final static Charset UTF8 = Charset.forName("UTF-8"); + + /** + * In all src and test code, the String(byte[], ...) constructor and + * String.getBytes method must always be passed a Charset, to avoid + * portability issues. Otherwise, portability issues will occur when + * running on a JVM plataform with a non-western default charset, the + * EBCDIC encoding (on z/OS), etc. [#20967] + *

        + * In most cases, the UTF8 or ASCII charset should be used for portability. + * UTF8 should be used when any character may be represented. ASCII can be + * used when all characters are in the ASCII range. The default charset + * should only be used when handling user-input data directly, e.g., + * console input/output or user-visible files. + *

        + * Rather than passing the charset as a string (getBytes("UTF-8")), the + * Charset objects defined here should be passed (getBytes(UTF8)). Not + * only is using a Charset object slightly more efficient because it avoids + * a lookup, even more importantly it avoids having to clutter code with a + * catch for java.io.UnsupportedEncodingException, which should never be + * thrown for the "UTF-8" or "US-ASCII" charsets. + */ + public static byte[] toUTF8(String str) { + try { + return str.getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + /** + * @return a buffer with position set to 0 + */ + public static ByteBuffer toUTF8(CharBuffer chars) { + try { + final CharsetEncoder utf8Encoder = UTF8.newEncoder(); + return utf8Encoder.encode(chars); + } catch (CharacterCodingException e) { + // Should never happen. + throw new RuntimeException(e); + } + } + + public static String fromUTF8(byte[] bytes) { + try { + return new String(bytes, "UTF-8"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + public static String fromUTF8(byte[] bytes, int offset, int len) { + try { + return new String(bytes, offset, len, "UTF-8"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + /** + * @return a buffer with position set to 0 + */ + public static CharBuffer fromUTF8(ByteBuffer bytes) { + try { + final CharsetDecoder utf8Decoder = UTF8.newDecoder(); + return utf8Decoder.decode(bytes); + } catch (CharacterCodingException e) { + // Should never happen. + throw new RuntimeException(e); + } + } + + public static byte[] toASCII(String str) { + try { + return str.getBytes("US-ASCII"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + /** + * @return a buffer with position set to 0 + */ + public static ByteBuffer toASCII(CharBuffer chars) { + try { + final CharsetEncoder asciiEncoder = ASCII.newEncoder(); + return asciiEncoder.encode(chars); + } catch (CharacterCodingException e) { + // Should never happen. + throw new RuntimeException(e); + } + } + + public static String fromASCII(byte[] bytes) { + try { + return new String(bytes, "US-ASCII"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + public static String fromASCII(byte[] bytes, int offset, int len) { + try { + return new String(bytes, offset, len, "US-ASCII"); + } catch (UnsupportedEncodingException e) { + /* Should never happen. */ + throw new RuntimeException(e); + } + } + + /** + * @return a buffer with position set to 0 + */ + public static CharBuffer fromASCII(ByteBuffer bytes) { + try { + final CharsetDecoder asciiDecoder = ASCII.newDecoder(); + return asciiDecoder.decode(bytes); + } catch (CharacterCodingException e) { + // Should never happen. + throw new RuntimeException(e); + } + } +} diff --git a/src/com/sleepycat/utilint/package-info.java b/src/com/sleepycat/utilint/package-info.java new file mode 100644 index 0000000..0c4d9b2 --- /dev/null +++ b/src/com/sleepycat/utilint/package-info.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +/** + * INTERNAL: Misc utility classes used by JE, BDB and NoSQL DB. + */ +package com.sleepycat.utilint; diff --git a/test/com/sleepycat/bind/serial/test/MarshalledObject.java b/test/com/sleepycat/bind/serial/test/MarshalledObject.java new file mode 100644 index 0000000..40f825a --- /dev/null +++ b/test/com/sleepycat/bind/serial/test/MarshalledObject.java @@ -0,0 +1,132 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial.test; + +import java.io.Serializable; + +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * @author Mark Hayes + */ +@SuppressWarnings("serial") +public class MarshalledObject + implements Serializable, MarshalledTupleKeyEntity { + + private String data; + private transient String primaryKey; + private String indexKey1; + private String indexKey2; + + public MarshalledObject(String data, String primaryKey, + String indexKey1, String indexKey2) { + this.data = data; + this.primaryKey = primaryKey; + this.indexKey1 = indexKey1; + this.indexKey2 = indexKey2; + } + + public boolean equals(Object o) { + + try { + MarshalledObject other = (MarshalledObject) o; + + return this.data.equals(other.data) && + this.primaryKey.equals(other.primaryKey) && + this.indexKey1.equals(other.indexKey1) && + this.indexKey2.equals(other.indexKey2); + } catch (Exception e) { + return false; + } + } + + public String getData() { + + return data; + } + + public String getPrimaryKey() { + + return primaryKey; + } + + public String getIndexKey1() { + + return indexKey1; + } + + public String getIndexKey2() { + + return indexKey2; + } + + public int expectedKeyLength() { + + return primaryKey.length() + 1; + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(primaryKey); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + primaryKey = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey1); + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey2.length() > 0) { + keyOutput.writeString(indexKey2); + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey1 = ""; + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey2.length() > 0) { + indexKey2 = ""; + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } +} diff --git a/test/com/sleepycat/bind/serial/test/NullClassCatalog.java b/test/com/sleepycat/bind/serial/test/NullClassCatalog.java new file mode 100644 index 0000000..89110ae --- /dev/null +++ b/test/com/sleepycat/bind/serial/test/NullClassCatalog.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial.test; + +import java.io.ObjectStreamClass; +import java.math.BigInteger; + +import com.sleepycat.bind.serial.ClassCatalog; + +/** + * NullCatalog is a dummy Catalog implementation that simply + * returns large (8 byte) class IDs so that ObjectOutput + * can be simulated when computing a serialized size. + * + * @author Mark Hayes + */ +class NullClassCatalog implements ClassCatalog { + + private long id = Long.MAX_VALUE; + + public void close() { + } + + public byte[] getClassID(ObjectStreamClass classFormat) { + return BigInteger.valueOf(id--).toByteArray(); + } + + public ObjectStreamClass getClassFormat(byte[] classID) { + return null; // ObjectInput not supported + } + + public ClassLoader getClassLoader() { + return null; + } +} diff --git a/test/com/sleepycat/bind/serial/test/SerialBindingTest.java b/test/com/sleepycat/bind/serial/test/SerialBindingTest.java new file mode 100644 index 0000000..aaf782d --- /dev/null +++ b/test/com/sleepycat/bind/serial/test/SerialBindingTest.java @@ -0,0 +1,313 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.Serializable; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.SerialSerialBinding; +import com.sleepycat.bind.serial.TupleSerialMarshalledBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.test.TestBase; + +/** + * @author Mark Hayes + */ +public class SerialBindingTest extends TestBase { + + private ClassCatalog catalog; + private DatabaseEntry buffer; + private DatabaseEntry keyBuffer; + + @Before + public void setUp() { + + catalog = new TestClassCatalog(); + buffer = new DatabaseEntry(); + keyBuffer = new DatabaseEntry(); + } + + @After + public void tearDown() { + + /* Ensure that GC can cleanup. */ + catalog = null; + buffer = null; + keyBuffer = null; + } + + private void primitiveBindingTest(Object val) { + + Class cls = val.getClass(); + SerialBinding binding = new SerialBinding(catalog, cls); + + binding.objectToEntry(val, buffer); + assertTrue(buffer.getSize() > 0); + + Object val2 = binding.entryToObject(buffer); + assertSame(cls, val2.getClass()); + assertEquals(val, val2); + + Object valWithWrongCls = (cls == String.class) + ? ((Object) new Integer(0)) : ((Object) new String("")); + try { + binding.objectToEntry(valWithWrongCls, buffer); + } catch (IllegalArgumentException expected) {} + } + + @Test + public void testPrimitiveBindings() { + + primitiveBindingTest("abc"); + primitiveBindingTest(new Character('a')); + primitiveBindingTest(new Boolean(true)); + primitiveBindingTest(new Byte((byte) 123)); + primitiveBindingTest(new Short((short) 123)); + primitiveBindingTest(new Integer(123)); + primitiveBindingTest(new Long(123)); + primitiveBindingTest(new Float(123.123)); + primitiveBindingTest(new Double(123.123)); + } + + @Test + public void testNullObjects() { + + SerialBinding binding = new SerialBinding(catalog, null); + buffer.setSize(0); + binding.objectToEntry(null, buffer); + assertTrue(buffer.getSize() > 0); + assertEquals(null, binding.entryToObject(buffer)); + } + + @Test + public void testSerialSerialBinding() { + + SerialBinding keyBinding = new SerialBinding(catalog, String.class); + SerialBinding valueBinding = new SerialBinding(catalog, String.class); + EntityBinding binding = new MySerialSerialBinding(keyBinding, + valueBinding); + + String val = "key#value?indexKey"; + binding.objectToData(val, buffer); + assertTrue(buffer.getSize() > 0); + binding.objectToKey(val, keyBuffer); + assertTrue(keyBuffer.getSize() > 0); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertEquals(val, result); + } + + // also tests TupleSerialBinding since TupleSerialMarshalledBinding extends + // it + @Test + public void testTupleSerialMarshalledBinding() { + + SerialBinding valueBinding = new SerialBinding(catalog, + MarshalledObject.class); + EntityBinding binding = + new TupleSerialMarshalledBinding(valueBinding); + + MarshalledObject val = new MarshalledObject("abc", "primary", + "index1", "index2"); + binding.objectToData(val, buffer); + assertTrue(buffer.getSize() > 0); + binding.objectToKey(val, keyBuffer); + assertEquals(val.expectedKeyLength(), keyBuffer.getSize()); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + assertEquals("primary", val.getPrimaryKey()); + assertEquals("index1", val.getIndexKey1()); + assertEquals("index2", val.getIndexKey2()); + } + + @Test + public void testBufferSize() { + + CaptureSizeBinding binding = + new CaptureSizeBinding(catalog, String.class); + + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize); + + binding.setSerialBufferSize(1000); + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertEquals(1000, binding.bufSize); + } + + private static class CaptureSizeBinding extends SerialBinding { + + int bufSize; + + CaptureSizeBinding(ClassCatalog classCatalog, Class baseClass) { + super(classCatalog, baseClass); + } + + @Override + public FastOutputStream getSerialOutput(Object object) { + FastOutputStream fos = super.getSerialOutput(object); + bufSize = fos.getBufferBytes().length; + return fos; + } + } + + @Test + public void testBufferOverride() { + + FastOutputStream out = new FastOutputStream(10); + CachedOutputBinding binding = + new CachedOutputBinding(catalog, String.class, out); + + binding.used = false; + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertTrue(binding.used); + + binding.used = false; + binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer); + assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer)); + assertTrue(binding.used); + + binding.used = false; + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertTrue(binding.used); + } + + private static class CachedOutputBinding extends SerialBinding { + + FastOutputStream out; + boolean used; + + CachedOutputBinding(ClassCatalog classCatalog, + Class baseClass, + FastOutputStream out) { + super(classCatalog, baseClass); + this.out = out; + } + + @Override + public FastOutputStream getSerialOutput(Object object) { + out.reset(); + used = true; + return out; + } + } + + private static class MySerialSerialBinding extends SerialSerialBinding { + + private MySerialSerialBinding(SerialBinding keyBinding, + SerialBinding valueBinding) { + + super(keyBinding, valueBinding); + } + + @Override + public Object entryToObject(Object keyInput, Object valueInput) { + + return "" + keyInput + '#' + valueInput; + } + + @Override + public Object objectToKey(Object object) { + + String s = (String) object; + int i = s.indexOf('#'); + if (i < 0 || i == s.length() - 1) { + throw new IllegalArgumentException(s); + } else { + return s.substring(0, i); + } + } + + @Override + public Object objectToData(Object object) { + + String s = (String) object; + int i = s.indexOf('#'); + if (i < 0 || i == s.length() - 1) { + throw new IllegalArgumentException(s); + } else { + return s.substring(i + 1); + } + } + } + + /** + * Tests that overriding SerialBinding.getClassLoader is possible. This is + * a crude test because to create a truly working class loader is a large + * undertaking. + */ + @Test + public void testClassloaderOverride() { + DatabaseEntry entry = new DatabaseEntry(); + + SerialBinding binding = new CustomLoaderBinding + (catalog, null, new FailureClassLoader()); + + try { + binding.objectToEntry(new MyClass(), entry); + binding.entryToObject(entry); + fail(); + } catch (RuntimeException e) { + assertTrue(e.getMessage().startsWith("expect failure")); + } + } + + private static class CustomLoaderBinding extends SerialBinding { + + private final ClassLoader loader; + + CustomLoaderBinding(ClassCatalog classCatalog, + Class baseClass, + ClassLoader loader) { + + super(classCatalog, baseClass); + this.loader = loader; + } + + @Override + public ClassLoader getClassLoader() { + return loader; + } + } + + private static class FailureClassLoader extends ClassLoader { + + @Override + public Class loadClass(String name) { + throw new RuntimeException("expect failure: " + name); + } + } + + @SuppressWarnings("serial") + private static class MyClass implements Serializable { + } +} diff --git a/test/com/sleepycat/bind/serial/test/TestClassCatalog.java b/test/com/sleepycat/bind/serial/test/TestClassCatalog.java new file mode 100644 index 0000000..75d8e06 --- /dev/null +++ b/test/com/sleepycat/bind/serial/test/TestClassCatalog.java @@ -0,0 +1,74 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.serial.test; + +import java.io.ObjectStreamClass; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; + +/** + * @author Mark Hayes + */ +public class TestClassCatalog implements ClassCatalog { + + private final Map idToDescMap = + new HashMap(); + private final Map nameToIdMap = + new HashMap(); + private int nextId = 1; + + public TestClassCatalog() { + } + + public void close() { + } + + public synchronized byte[] getClassID(ObjectStreamClass desc) { + String className = desc.getName(); + Integer intId = nameToIdMap.get(className); + if (intId == null) { + intId = nextId; + nextId += 1; + + idToDescMap.put(intId, desc); + nameToIdMap.put(className, intId); + } + DatabaseEntry entry = new DatabaseEntry(); + IntegerBinding.intToEntry(intId, entry); + return entry.getData(); + } + + public synchronized ObjectStreamClass getClassFormat(byte[] byteId) + throws DatabaseException { + + DatabaseEntry entry = new DatabaseEntry(); + entry.setData(byteId); + int intId = IntegerBinding.entryToInt(entry); + + ObjectStreamClass desc = (ObjectStreamClass) idToDescMap.get(intId); + if (desc == null) { + throw new RuntimeException("classID not found"); + } + return desc; + } + + public ClassLoader getClassLoader() { + return null; + } +} diff --git a/test/com/sleepycat/bind/test/BindingSpeedTest.java b/test/com/sleepycat/bind/test/BindingSpeedTest.java new file mode 100644 index 0000000..874acd1 --- /dev/null +++ b/test/com/sleepycat/bind/test/BindingSpeedTest.java @@ -0,0 +1,482 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.test; + +import static org.junit.Assert.assertTrue; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.OutputStreamWriter; +import java.io.Serializable; +import java.io.Writer; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.List; + +import javax.xml.parsers.SAXParserFactory; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import com.sleepycat.bind.serial.SerialInput; +import com.sleepycat.bind.serial.SerialOutput; +import com.sleepycat.bind.serial.test.TestClassCatalog; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.util.FastInputStream; +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class BindingSpeedTest extends TestBase { + + static final String JAVA_UNSHARED = "java-unshared".intern(); + static final String JAVA_SHARED = "java-shared".intern(); + static final String JAVA_EXTERNALIZABLE = "java-externalizable".intern(); + static final String XML_SAX = "xml-sax".intern(); + static final String TUPLE = "tuple".intern(); + static final String REFLECT_METHOD = "reflectMethod".intern(); + static final String REFLECT_FIELD = "reflectField".intern(); + + static final int RUN_COUNT = 1000; + static final boolean VERBOSE = false; + + @Parameters + public static List genParams(){ + + return Arrays.asList(new Object[][]{{JAVA_UNSHARED}, {JAVA_SHARED}, + {JAVA_EXTERNALIZABLE}, {XML_SAX}, + {TUPLE}, {REFLECT_METHOD}, + {REFLECT_FIELD}}); + } + + private String command; + private FastOutputStream fo; + private TupleOutput to; + private TestClassCatalog jtc; + private byte[] buf; + private XMLReader parser; + private Method[] getters; + private Method[] setters; + private Field[] fields; + + public BindingSpeedTest(String name) { + + command = name; + customName = "BindingSpeedTest." + name; + } + + @Test + public void runTest() + throws Exception { + + SharedTestUtils.printTestName(customName); + boolean isTuple = false; + boolean isReflectMethod = false; + boolean isReflectField = false; + boolean isXmlSax = false; + boolean isSerial = false; + boolean isShared = false; + boolean isExternalizable = false; + + if (command == TUPLE) { + isTuple = true; + } else if (command == REFLECT_METHOD) { + isReflectMethod = true; + } else if (command == REFLECT_FIELD) { + isReflectField = true; + } else if (command == XML_SAX) { + isXmlSax = true; + } else if (command == JAVA_UNSHARED) { + isSerial = true; + } else if (command == JAVA_SHARED) { + isSerial = true; + isShared = true; + } else if (command == JAVA_EXTERNALIZABLE) { + isSerial = true; + isShared = true; + isExternalizable = true; + } else { + throw new Exception("invalid command: " + command); + } + + // Do initialization + + if (isTuple) { + initTuple(); + } else if (isReflectMethod) { + initReflectMethod(); + } else if (isReflectField) { + initReflectField(); + } else if (isXmlSax) { + initXmlSax(); + } else if (isSerial) { + if (isShared) { + initSerialShared(); + } else { + initSerialUnshared(); + } + } + + // Prime the Java compiler + + int size = 0; + for (int i = 0; i < RUN_COUNT; i += 1) { + + if (isTuple) { + size = runTuple(); + } else if (isReflectMethod) { + size = runReflectMethod(); + } else if (isReflectField) { + size = runReflectField(); + } else if (isXmlSax) { + size = runXmlSax(); + } else if (isSerial) { + if (isShared) { + if (isExternalizable) { + size = runSerialExternalizable(); + } else { + size = runSerialShared(); + } + } else { + size = runSerialUnshared(); + } + } + } + + // Then run the timing tests + + long startTime = System.currentTimeMillis(); + + for (int i = 0; i < RUN_COUNT; i += 1) { + if (isTuple) { + size = runTuple(); + } else if (isReflectMethod) { + size = runReflectMethod(); + } else if (isReflectField) { + size = runReflectField(); + } else if (isXmlSax) { + size = runXmlSax(); + } else if (isSerial) { + if (isShared) { + if (isExternalizable) { + size = runSerialExternalizable(); + } else { + size = runSerialShared(); + } + } else { + size = runSerialUnshared(); + } + } + } + + long stopTime = System.currentTimeMillis(); + + assertTrue("data size too big", size < 250); + + if (VERBOSE) { + System.out.println(command); + System.out.println("data size: " + size); + System.out.println("run time: " + + ((stopTime - startTime) / (double) RUN_COUNT)); + } + } + + @After + public void tearDown() { + + /* Ensure that GC can cleanup. */ + command = null; + fo = null; + to = null; + jtc = null; + buf = null; + parser = null; + } + + void initSerialUnshared() { + fo = new FastOutputStream(); + } + + int runSerialUnshared() + throws Exception { + + fo.reset(); + ObjectOutputStream oos = new ObjectOutputStream(fo); + oos.writeObject(new Data()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + ObjectInputStream ois = new ObjectInputStream(fi); + ois.readObject(); + return bytes.length; + } + + void initSerialShared() { + jtc = new TestClassCatalog(); + fo = new FastOutputStream(); + } + + int runSerialShared() + throws Exception { + + fo.reset(); + SerialOutput oos = new SerialOutput(fo, jtc); + oos.writeObject(new Data()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + SerialInput ois = new SerialInput(fi, jtc); + ois.readObject(); + return (bytes.length - SerialOutput.getStreamHeader().length); + } + + int runSerialExternalizable() + throws Exception { + + fo.reset(); + SerialOutput oos = new SerialOutput(fo, jtc); + oos.writeObject(new Data2()); + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + SerialInput ois = new SerialInput(fi, jtc); + ois.readObject(); + return (bytes.length - SerialOutput.getStreamHeader().length); + } + + void initTuple() { + buf = new byte[500]; + to = new TupleOutput(buf); + } + + int runTuple() { + to.reset(); + new Data().writeTuple(to); + + TupleInput ti = new TupleInput( + to.getBufferBytes(), to.getBufferOffset(), + to.getBufferLength()); + new Data().readTuple(ti); + + return to.getBufferLength(); + } + + void initReflectMethod() + throws Exception { + + initTuple(); + + Class cls = Data.class; + + getters = new Method[5]; + getters[0] = cls.getMethod("getField1", new Class[0]); + getters[1] = cls.getMethod("getField2", new Class[0]); + getters[2] = cls.getMethod("getField3", new Class[0]); + getters[3] = cls.getMethod("getField4", new Class[0]); + getters[4] = cls.getMethod("getField5", new Class[0]); + + setters = new Method[5]; + setters[0] = cls.getMethod("setField1", new Class[] {String.class}); + setters[1] = cls.getMethod("setField2", new Class[] {String.class}); + setters[2] = cls.getMethod("setField3", new Class[] {Integer.TYPE}); + setters[3] = cls.getMethod("setField4", new Class[] {Integer.TYPE}); + setters[4] = cls.getMethod("setField5", new Class[] {String.class}); + } + + int runReflectMethod() + throws Exception { + + to.reset(); + Data data = new Data(); + to.writeString((String) getters[0].invoke(data, (Object[]) null)); + to.writeString((String) getters[1].invoke(data, (Object[]) null)); + to.writeInt(((Integer) getters[2].invoke(data, (Object[]) null)).intValue()); + to.writeInt(((Integer) getters[3].invoke(data, (Object[]) null)).intValue()); + to.writeString((String) getters[4].invoke(data, (Object[]) null)); + + TupleInput ti = new TupleInput( + to.getBufferBytes(), to.getBufferOffset(), + to.getBufferLength()); + data = new Data(); + setters[0].invoke(data, new Object[] {ti.readString()}); + setters[1].invoke(data, new Object[] {ti.readString()}); + setters[2].invoke(data, new Object[] {new Integer(ti.readInt())}); + setters[3].invoke(data, new Object[] {new Integer(ti.readInt())}); + setters[4].invoke(data, new Object[] {ti.readString()}); + + return to.getBufferLength(); + } + + void initReflectField() + throws Exception { + + initTuple(); + + Class cls = Data.class; + + fields = new Field[5]; + fields[0] = cls.getField("field1"); + fields[1] = cls.getField("field2"); + fields[2] = cls.getField("field3"); + fields[3] = cls.getField("field4"); + fields[4] = cls.getField("field5"); + } + + int runReflectField() + throws Exception { + + to.reset(); + Data data = new Data(); + to.writeString((String) fields[0].get(data)); + to.writeString((String) fields[1].get(data)); + to.writeInt(((Integer) fields[2].get(data)).intValue()); + to.writeInt(((Integer) fields[3].get(data)).intValue()); + to.writeString((String) fields[4].get(data)); + + TupleInput ti = new TupleInput( + to.getBufferBytes(), to.getBufferOffset(), + to.getBufferLength()); + data = new Data(); + fields[0].set(data, ti.readString()); + fields[1].set(data, ti.readString()); + fields[2].set(data, new Integer(ti.readInt())); + fields[3].set(data, new Integer(ti.readInt())); + fields[4].set(data, ti.readString()); + + return to.getBufferLength(); + } + + void initXmlSax() + throws Exception { + + buf = new byte[500]; + fo = new FastOutputStream(); + SAXParserFactory saxFactory = SAXParserFactory.newInstance(); + saxFactory.setNamespaceAware(true); + parser = saxFactory.newSAXParser().getXMLReader(); + } + + int runXmlSax() + throws Exception { + + fo.reset(); + OutputStreamWriter writer = new OutputStreamWriter(fo, "UTF-8"); + new Data().writeXmlText(writer); + + byte[] bytes = fo.toByteArray(); + FastInputStream fi = new FastInputStream(bytes); + InputSource input = new InputSource(new InputStreamReader(fi, "UTF-8")); + parser.parse(input); + + //InputStreamReader reader = new InputStreamReader(fi); + //new Data().readXmlText(??); + + return bytes.length; + } + + static class Data2 extends Data implements Externalizable { + + public Data2() {} + + public void readExternal(ObjectInput in) + throws IOException { + + field1 = in.readUTF(); + field2 = in.readUTF(); + field3 = in.readInt(); + field4 = in.readInt(); + field5 = in.readUTF(); + } + + public void writeExternal(ObjectOutput out) + throws IOException { + + out.writeUTF(field1); + out.writeUTF(field2); + out.writeInt(field3); + out.writeInt(field4); + out.writeUTF(field5); + } + } + + @SuppressWarnings("serial") + static class Data implements Serializable { + + public String field1 = "field1"; + public String field2 = "field2"; + public int field3 = 333; + public int field4 = 444; + public String field5 = "field5"; + + public String getField1() { return field1; } + public String getField2() { return field2; } + public int getField3() { return field3; } + public int getField4() { return field4; } + public String getField5() { return field5; } + + public void setField1(String v) { field1 = v; } + public void setField2(String v) { field2 = v; } + public void setField3(int v) { field3 = v; } + public void setField4(int v) { field4 = v; } + public void setField5(String v) { field5 = v; } + + void readTuple(TupleInput _input) { + + field1 = _input.readString(); + field2 = _input.readString(); + field3 = _input.readInt(); + field4 = _input.readInt(); + field5 = _input.readString(); + } + + void writeTuple(TupleOutput _output) { + + _output.writeString(field1); + _output.writeString(field2); + _output.writeInt(field3); + _output.writeInt(field4); + _output.writeString(field5); + } + + void writeXmlText(Writer writer) throws IOException { + + writer.write(""); + writer.write(""); + writer.write(field1); + writer.write(""); + writer.write(field2); + writer.write(""); + writer.write(String.valueOf(field3)); + writer.write(""); + writer.write(String.valueOf(field4)); + writer.write(""); + writer.write(field5); + writer.write(""); + writer.flush(); + } + } +} diff --git a/test/com/sleepycat/bind/tuple/test/MarshalledObject.java b/test/com/sleepycat/bind/tuple/test/MarshalledObject.java new file mode 100644 index 0000000..05a3a0a --- /dev/null +++ b/test/com/sleepycat/bind/tuple/test/MarshalledObject.java @@ -0,0 +1,141 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple.test; + +import com.sleepycat.bind.tuple.MarshalledTupleEntry; +import com.sleepycat.bind.tuple.MarshalledTupleKeyEntity; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * @author Mark Hayes + */ +public class MarshalledObject + implements MarshalledTupleEntry, MarshalledTupleKeyEntity { + + private String data; + private String primaryKey; + private String indexKey1; + private String indexKey2; + + public MarshalledObject() { + } + + MarshalledObject(String data, String primaryKey, + String indexKey1, String indexKey2) { + + this.data = data; + this.primaryKey = primaryKey; + this.indexKey1 = indexKey1; + this.indexKey2 = indexKey2; + } + + String getData() { + + return data; + } + + String getPrimaryKey() { + + return primaryKey; + } + + String getIndexKey1() { + + return indexKey1; + } + + String getIndexKey2() { + + return indexKey2; + } + + int expectedDataLength() { + + return data.length() + 1 + + indexKey1.length() + 1 + + indexKey2.length() + 1; + } + + int expectedKeyLength() { + + return primaryKey.length() + 1; + } + + public void marshalEntry(TupleOutput dataOutput) { + + dataOutput.writeString(data); + dataOutput.writeString(indexKey1); + dataOutput.writeString(indexKey2); + } + + public void unmarshalEntry(TupleInput dataInput) { + + data = dataInput.readString(); + indexKey1 = dataInput.readString(); + indexKey2 = dataInput.readString(); + } + + public void marshalPrimaryKey(TupleOutput keyOutput) { + + keyOutput.writeString(primaryKey); + } + + public void unmarshalPrimaryKey(TupleInput keyInput) { + + primaryKey = keyInput.readString(); + } + + public boolean marshalSecondaryKey(String keyName, TupleOutput keyOutput) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey1); + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey1.length() > 0) { + keyOutput.writeString(indexKey2); + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } + + public boolean nullifyForeignKey(String keyName) { + + if ("1".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey1 = ""; + return true; + } else { + return false; + } + } else if ("2".equals(keyName)) { + if (indexKey1.length() > 0) { + indexKey2 = ""; + return true; + } else { + return false; + } + } else { + throw new IllegalArgumentException("Unknown keyName: " + keyName); + } + } +} diff --git a/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java b/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java new file mode 100644 index 0000000..8a56c80 --- /dev/null +++ b/test/com/sleepycat/bind/tuple/test/TupleBindingTest.java @@ -0,0 +1,486 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.tuple.BigDecimalBinding; +import com.sleepycat.bind.tuple.BigIntegerBinding; +import com.sleepycat.bind.tuple.BooleanBinding; +import com.sleepycat.bind.tuple.ByteBinding; +import com.sleepycat.bind.tuple.CharacterBinding; +import com.sleepycat.bind.tuple.DoubleBinding; +import com.sleepycat.bind.tuple.FloatBinding; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.PackedIntegerBinding; +import com.sleepycat.bind.tuple.PackedLongBinding; +import com.sleepycat.bind.tuple.ShortBinding; +import com.sleepycat.bind.tuple.SortedBigDecimalBinding; +import com.sleepycat.bind.tuple.SortedDoubleBinding; +import com.sleepycat.bind.tuple.SortedFloatBinding; +import com.sleepycat.bind.tuple.SortedPackedIntegerBinding; +import com.sleepycat.bind.tuple.SortedPackedLongBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleInputBinding; +import com.sleepycat.bind.tuple.TupleMarshalledBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.bind.tuple.TupleTupleMarshalledBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.util.FastOutputStream; + +/** + * @author Mark Hayes + */ +public class TupleBindingTest { + + private DatabaseEntry buffer; + private DatabaseEntry keyBuffer; + + @Before + public void setUp() { + + buffer = new DatabaseEntry(); + keyBuffer = new DatabaseEntry(); + } + + @After + public void tearDown() { + + /* Ensure that GC can cleanup. */ + buffer = null; + keyBuffer = null; + } + + + private void primitiveBindingTest(Class primitiveCls, Class compareCls, + Object val, int byteSize) { + + TupleBinding binding = TupleBinding.getPrimitiveBinding(primitiveCls); + + /* Test standard object binding. */ + + binding.objectToEntry(val, buffer); + assertEquals(byteSize, buffer.getSize()); + + Object val2 = binding.entryToObject(buffer); + assertSame(compareCls, val2.getClass()); + assertEquals(val, val2); + + Object valWithWrongCls = (primitiveCls == String.class) + ? ((Object) new Integer(0)) : ((Object) new String("")); + try { + binding.objectToEntry(valWithWrongCls, buffer); + } + catch (ClassCastException expected) {} + + /* Test nested tuple binding. */ + forMoreCoverageTest(binding, val); + } + + private void forMoreCoverageTest(TupleBinding val1,Object val2) { + + TupleOutput output = new TupleOutput(); + output.writeString("abc"); + val1.objectToEntry(val2, output); + output.writeString("xyz"); + + TupleInput input = new TupleInput(output); + assertEquals("abc", input.readString()); + Object val3 = val1.entryToObject(input); + assertEquals("xyz", input.readString()); + + assertEquals(0, input.available()); + assertSame(val2.getClass(), val3.getClass()); + assertEquals(val2, val3); + } + + @Test + public void testPrimitiveBindings() { + + primitiveBindingTest(String.class, String.class, + "abc", 4); + + primitiveBindingTest(Character.class, Character.class, + new Character('a'), 2); + primitiveBindingTest(Boolean.class, Boolean.class, + new Boolean(true), 1); + primitiveBindingTest(Byte.class, Byte.class, + new Byte((byte) 123), 1); + primitiveBindingTest(Short.class, Short.class, + new Short((short) 123), 2); + primitiveBindingTest(Integer.class, Integer.class, + new Integer(123), 4); + primitiveBindingTest(Long.class, Long.class, + new Long(123), 8); + primitiveBindingTest(Float.class, Float.class, + new Float(123.123), 4); + primitiveBindingTest(Double.class, Double.class, + new Double(123.123), 8); + + primitiveBindingTest(Character.TYPE, Character.class, + new Character('a'), 2); + primitiveBindingTest(Boolean.TYPE, Boolean.class, + new Boolean(true), 1); + primitiveBindingTest(Byte.TYPE, Byte.class, + new Byte((byte) 123), 1); + primitiveBindingTest(Short.TYPE, Short.class, + new Short((short) 123), 2); + primitiveBindingTest(Integer.TYPE, Integer.class, + new Integer(123), 4); + primitiveBindingTest(Long.TYPE, Long.class, + new Long(123), 8); + primitiveBindingTest(Float.TYPE, Float.class, + new Float(123.123), 4); + primitiveBindingTest(Double.TYPE, Double.class, + new Double(123.123), 8); + + DatabaseEntry entry = new DatabaseEntry(); + + StringBinding.stringToEntry("abc", entry); + assertEquals(4, entry.getData().length); + assertEquals("abc", StringBinding.entryToString(entry)); + + new StringBinding().objectToEntry("abc", entry); + assertEquals(4, entry.getData().length); + + StringBinding.stringToEntry(null, entry); + assertEquals(2, entry.getData().length); + assertEquals(null, StringBinding.entryToString(entry)); + + new StringBinding().objectToEntry(null, entry); + assertEquals(2, entry.getData().length); + + CharacterBinding.charToEntry('a', entry); + assertEquals(2, entry.getData().length); + assertEquals('a', CharacterBinding.entryToChar(entry)); + + new CharacterBinding().objectToEntry(new Character('a'), entry); + assertEquals(2, entry.getData().length); + + BooleanBinding.booleanToEntry(true, entry); + assertEquals(1, entry.getData().length); + assertEquals(true, BooleanBinding.entryToBoolean(entry)); + + new BooleanBinding().objectToEntry(Boolean.TRUE, entry); + assertEquals(1, entry.getData().length); + + ByteBinding.byteToEntry((byte) 123, entry); + assertEquals(1, entry.getData().length); + assertEquals((byte) 123, ByteBinding.entryToByte(entry)); + + ShortBinding.shortToEntry((short) 123, entry); + assertEquals(2, entry.getData().length); + assertEquals((short) 123, ShortBinding.entryToShort(entry)); + + new ByteBinding().objectToEntry(new Byte((byte) 123), entry); + assertEquals(1, entry.getData().length); + + IntegerBinding.intToEntry(123, entry); + assertEquals(4, entry.getData().length); + assertEquals(123, IntegerBinding.entryToInt(entry)); + + new IntegerBinding().objectToEntry(new Integer(123), entry); + assertEquals(4, entry.getData().length); + + LongBinding.longToEntry(123, entry); + assertEquals(8, entry.getData().length); + assertEquals(123, LongBinding.entryToLong(entry)); + + new LongBinding().objectToEntry(new Long(123), entry); + assertEquals(8, entry.getData().length); + + FloatBinding.floatToEntry((float) 123.123, entry); + assertEquals(4, entry.getData().length); + assertTrue(((float) 123.123) == FloatBinding.entryToFloat(entry)); + + new FloatBinding().objectToEntry(new Float((float) 123.123), entry); + assertEquals(4, entry.getData().length); + + DoubleBinding.doubleToEntry(123.123, entry); + assertEquals(8, entry.getData().length); + assertTrue(123.123 == DoubleBinding.entryToDouble(entry)); + + new DoubleBinding().objectToEntry(new Double(123.123), entry); + assertEquals(8, entry.getData().length); + + BigIntegerBinding.bigIntegerToEntry + (new BigInteger("1234567890123456"), entry); + assertEquals(9, entry.getData().length); + assertTrue((new BigInteger("1234567890123456")).equals + (BigIntegerBinding.entryToBigInteger(entry))); + + new BigIntegerBinding().objectToEntry + (new BigInteger("1234567890123456"), entry); + assertEquals(9, entry.getData().length); + forMoreCoverageTest(new BigIntegerBinding(), + new BigInteger("1234567890123456")); + + SortedFloatBinding.floatToEntry((float) 123.123, entry); + assertEquals(4, entry.getData().length); + assertTrue(((float) 123.123) == + SortedFloatBinding.entryToFloat(entry)); + + new SortedFloatBinding().objectToEntry + (new Float((float) 123.123), entry); + assertEquals(4, entry.getData().length); + forMoreCoverageTest(new SortedFloatBinding(), + new Float((float) 123.123)); + + SortedDoubleBinding.doubleToEntry(123.123, entry); + assertEquals(8, entry.getData().length); + assertTrue(123.123 == SortedDoubleBinding.entryToDouble(entry)); + + new SortedDoubleBinding().objectToEntry(new Double(123.123), entry); + assertEquals(8, entry.getData().length); + forMoreCoverageTest(new SortedDoubleBinding(), + new Double(123.123)); + + PackedIntegerBinding.intToEntry(1234, entry); + assertEquals(5, entry.getData().length); + assertTrue(1234 == PackedIntegerBinding.entryToInt(entry)); + + new PackedIntegerBinding().objectToEntry + (new Integer(1234), entry); + assertEquals(5, entry.getData().length); + forMoreCoverageTest(new PackedIntegerBinding(), + new Integer(1234)); + + PackedLongBinding.longToEntry(1234, entry); + assertEquals(9, entry.getData().length); + assertTrue(1234 == PackedLongBinding.entryToLong(entry)); + + new PackedLongBinding().objectToEntry(new Long(1234), entry); + assertEquals(9, entry.getData().length); + forMoreCoverageTest(new PackedLongBinding(), new Long(1234)); + + BigDecimalBinding.bigDecimalToEntry + (new BigDecimal("123456789.123456789"), entry); + assertEquals(5 * 2 + new BigDecimal("123456789.123456789"). + unscaledValue().toByteArray().length, + entry.getData().length); + assertTrue((new BigDecimal("123456789.123456789")).equals + (BigDecimalBinding.entryToBigDecimal(entry))); + + SortedPackedIntegerBinding.intToEntry(1234, entry); + assertEquals(5, entry.getData().length); + assertTrue(1234 == SortedPackedIntegerBinding.entryToInt(entry)); + + new SortedPackedIntegerBinding().objectToEntry + (new Integer(1234), entry); + assertEquals(5, entry.getData().length); + forMoreCoverageTest(new SortedPackedIntegerBinding(), + new Integer(1234)); + + SortedPackedLongBinding.longToEntry(1234, entry); + assertEquals(9, entry.getData().length); + assertTrue(1234 == SortedPackedLongBinding.entryToLong(entry)); + + new SortedPackedLongBinding().objectToEntry(new Long(1234), entry); + assertEquals(9, entry.getData().length); + forMoreCoverageTest(new SortedPackedLongBinding(), new Long(1234)); + + BigDecimalBinding.bigDecimalToEntry + (new BigDecimal("123456789.123456789"), entry); + assertEquals(5 * 2 + new BigDecimal("123456789.123456789"). + unscaledValue().toByteArray().length, + entry.getData().length); + assertTrue((new BigDecimal("123456789.123456789")).equals + (BigDecimalBinding.entryToBigDecimal(entry))); + + new BigDecimalBinding().objectToEntry + (new BigDecimal("123456789.123456789"), entry); + assertEquals(5 * 2 + new BigDecimal("123456789.123456789"). + unscaledValue().toByteArray().length, + entry.getData().length); + forMoreCoverageTest(new BigDecimalBinding(), + new BigDecimal("123456789.123456789")); + + SortedBigDecimalBinding.bigDecimalToEntry + (new BigDecimal("123456789.123456"), entry); + assertEquals(1 + 5 + 5 * 2 + 1, entry.getData().length); + assertEquals(0,(new BigDecimal("123456789.123456")).compareTo + (SortedBigDecimalBinding.entryToBigDecimal(entry))); + + new SortedBigDecimalBinding().objectToEntry + (new BigDecimal("123456789.123456E100"), entry); + assertEquals(1 + 5 + 5 * 2 + 1, entry.getData().length); + forMoreCoverageTest(new SortedBigDecimalBinding(), + new BigDecimal("123456789.123456")); + } + + @Test + public void testTupleInputBinding() { + + EntryBinding binding = new TupleInputBinding(); + + TupleOutput out = new TupleOutput(); + out.writeString("abc"); + binding.objectToEntry(new TupleInput(out), buffer); + assertEquals(4, buffer.getSize()); + + Object result = binding.entryToObject(buffer); + assertTrue(result instanceof TupleInput); + TupleInput in = (TupleInput) result; + assertEquals("abc", in.readString()); + assertEquals(0, in.available()); + } + + // also tests TupleBinding since TupleMarshalledBinding extends it + @Test + public void testTupleMarshalledBinding() { + + EntryBinding binding = + new TupleMarshalledBinding(MarshalledObject.class); + + MarshalledObject val = new MarshalledObject("abc", "", "", ""); + binding.objectToEntry(val, buffer); + assertEquals(val.expectedDataLength(), buffer.getSize()); + + Object result = binding.entryToObject(buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + } + + // also tests TupleTupleBinding since TupleTupleMarshalledBinding extends + // it + @Test + public void testTupleTupleMarshalledBinding() { + + EntityBinding binding = + new TupleTupleMarshalledBinding(MarshalledObject.class); + + MarshalledObject val = new MarshalledObject("abc", "primary", + "index1", "index2"); + binding.objectToData(val, buffer); + assertEquals(val.expectedDataLength(), buffer.getSize()); + binding.objectToKey(val, keyBuffer); + assertEquals(val.expectedKeyLength(), keyBuffer.getSize()); + + Object result = binding.entryToObject(keyBuffer, buffer); + assertTrue(result instanceof MarshalledObject); + val = (MarshalledObject) result; + assertEquals("abc", val.getData()); + assertEquals("primary", val.getPrimaryKey()); + assertEquals("index1", val.getIndexKey1()); + assertEquals("index2", val.getIndexKey2()); + } + + @Test + public void testBufferSize() { + + CaptureSizeBinding binding = new CaptureSizeBinding(); + + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertEquals(FastOutputStream.DEFAULT_INIT_SIZE, binding.bufSize); + + binding.setTupleBufferSize(1000); + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertEquals(1000, binding.bufSize); + } + + private class CaptureSizeBinding extends TupleBinding { + + int bufSize; + + CaptureSizeBinding() { + super(); + } + + @Override + public TupleOutput getTupleOutput(Object object) { + TupleOutput out = super.getTupleOutput(object); + bufSize = out.getBufferBytes().length; + return out; + } + + @Override + public Object entryToObject(TupleInput input) { + return input.readString(); + } + + @Override + public void objectToEntry(Object object, TupleOutput output) { + assertEquals(bufSize, output.getBufferBytes().length); + output.writeString((String) object); + } + } + + @Test + public void testBufferOverride() { + + TupleOutput out = new TupleOutput(new byte[10]); + CachedOutputBinding binding = new CachedOutputBinding(out); + + binding.used = false; + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertTrue(binding.used); + + binding.used = false; + binding.objectToEntry("aaaaaaaaaaaaaaaaaaaaaa", buffer); + assertEquals("aaaaaaaaaaaaaaaaaaaaaa", binding.entryToObject(buffer)); + assertTrue(binding.used); + + binding.used = false; + binding.objectToEntry("x", buffer); + assertEquals("x", binding.entryToObject(buffer)); + assertTrue(binding.used); + } + + private class CachedOutputBinding extends TupleBinding { + + TupleOutput out; + boolean used; + + CachedOutputBinding(TupleOutput out) { + super(); + this.out = out; + } + + @Override + public TupleOutput getTupleOutput(Object object) { + out.reset(); + used = true; + return out; + } + + @Override + public Object entryToObject(TupleInput input) { + return input.readString(); + } + + @Override + public void objectToEntry(Object object, TupleOutput output) { + assertSame(out, output); + output.writeString((String) object); + } + } +} diff --git a/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java b/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java new file mode 100644 index 0000000..5db8d01 --- /dev/null +++ b/test/com/sleepycat/bind/tuple/test/TupleFormatTest.java @@ -0,0 +1,1203 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Arrays; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseEntry; + +/** + * @author Mark Hayes + */ +public class TupleFormatTest { + + private TupleInput in; + private TupleOutput out; + private DatabaseEntry buffer; + + @Before + public void setUp() { + + buffer = new DatabaseEntry(); + out = new TupleOutput(); + } + + @After + public void tearDown() { + + /* Ensure that GC can cleanup. */ + in = null; + out = null; + buffer = null; + } + + private void copyOutputToInput() { + + TupleBinding.outputToEntry(out, buffer); + assertEquals(out.size(), buffer.getSize()); + in = TupleBinding.entryToInput(buffer); + assertEquals(in.available(), buffer.getSize()); + assertEquals(in.getBufferLength(), buffer.getSize()); + } + + private void stringTest(String val) { + + out.reset(); + out.writeString(val); + assertEquals(val.length() + 1, out.size()); // assume 1-byte chars + copyOutputToInput(); + assertEquals(val, in.readString()); + assertEquals(0, in.available()); + } + + @Test + public void testString() { + + stringTest(""); + stringTest("a"); + stringTest("abc"); + + out.reset(); + out.writeString("abc"); + out.writeString("defg"); + assertEquals(9, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString()); + assertEquals("defg", in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString("abc"); + out.writeString("defg"); + out.writeString("hijkl"); + assertEquals(15, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString()); + assertEquals("defg", in.readString()); + assertEquals("hijkl", in.readString()); + assertEquals(0, in.available()); + } + + private void fixedStringTest(char[] val) { + + out.reset(); + out.writeString(val); + assertEquals(val.length, out.size()); // assume 1 byte chars + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readString(val2); + assertTrue(Arrays.equals(val, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readString(val.length); + assertTrue(Arrays.equals(val, val3.toCharArray())); + assertEquals(0, in.available()); + } + + @Test + public void testFixedString() { + + fixedStringTest(new char[0]); + fixedStringTest(new char[] {'a'}); + fixedStringTest(new char[] {'a', 'b', 'c'}); + + out.reset(); + out.writeString(new char[] {'a', 'b', 'c'}); + out.writeString(new char[] {'d', 'e', 'f', 'g'}); + assertEquals(7, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString(3)); + assertEquals("defg", in.readString(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeString(new char[] {'a', 'b', 'c'}); + out.writeString(new char[] {'d', 'e', 'f', 'g'}); + out.writeString(new char[] {'h', 'i', 'j', 'k', 'l'}); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readString(3)); + assertEquals("defg", in.readString(4)); + assertEquals("hijkl", in.readString(5)); + assertEquals(0, in.available()); + } + + @Test + public void testNullString() { + + out.reset(); + out.writeString((String) null); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString((String) null); + out.writeString("x"); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(2, in.available()); + assertEquals("x", in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString("x"); + out.writeString((String) null); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals("x", in.readString()); + assertEquals(2, in.available()); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + + out.reset(); + out.writeString((String) null); + out.writeInt(123); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(null, in.readString()); + assertEquals(4, in.available()); + assertEquals(123, in.readInt()); + assertEquals(0, in.available()); + + out.reset(); + out.writeInt(123); + out.writeString((String) null); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(123, in.readInt()); + assertEquals(2, in.available()); + assertEquals(null, in.readString()); + assertEquals(0, in.available()); + } + + private void charsTest(char[] val) { + + for (int mode = 0; mode < 2; mode += 1) { + out.reset(); + switch (mode) { + case 0: out.writeChars(val); break; + case 1: out.writeChars(new String(val)); break; + default: throw new IllegalStateException(); + } + assertEquals(val.length * 2, out.size()); + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readChars(val2); + assertTrue(Arrays.equals(val, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readChars(val.length); + assertTrue(Arrays.equals(val, val3.toCharArray())); + assertEquals(0, in.available()); + } + } + + @Test + public void testChars() { + + charsTest(new char[0]); + charsTest(new char[] {'a'}); + charsTest(new char[] {'a', 'b', 'c'}); + + out.reset(); + out.writeChars("abc"); + out.writeChars("defg"); + assertEquals(7 * 2, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readChars(3)); + assertEquals("defg", in.readChars(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeChars("abc"); + out.writeChars("defg"); + out.writeChars("hijkl"); + assertEquals(12 * 2, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readChars(3)); + assertEquals("defg", in.readChars(4)); + assertEquals("hijkl", in.readChars(5)); + assertEquals(0, in.available()); + } + + private void bytesTest(char[] val) { + + char[] valBytes = new char[val.length]; + for (int i = 0; i < val.length; i += 1) + valBytes[i] = (char) (val[i] & 0xFF); + + for (int mode = 0; mode < 2; mode += 1) { + out.reset(); + switch (mode) { + case 0: out.writeBytes(val); break; + case 1: out.writeBytes(new String(val)); break; + default: throw new IllegalStateException(); + } + assertEquals(val.length, out.size()); + copyOutputToInput(); + char[] val2 = new char[val.length]; + in.readBytes(val2); + assertTrue(Arrays.equals(valBytes, val2)); + assertEquals(0, in.available()); + in.reset(); + String val3 = in.readBytes(val.length); + assertTrue(Arrays.equals(valBytes, val3.toCharArray())); + assertEquals(0, in.available()); + } + } + + @Test + public void testBytes() { + + bytesTest(new char[0]); + bytesTest(new char[] {'a'}); + bytesTest(new char[] {'a', 'b', 'c'}); + bytesTest(new char[] {0x7F00, 0x7FFF, 0xFF00, 0xFFFF}); + + out.reset(); + out.writeBytes("abc"); + out.writeBytes("defg"); + assertEquals(7, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readBytes(3)); + assertEquals("defg", in.readBytes(4)); + assertEquals(0, in.available()); + + out.reset(); + out.writeBytes("abc"); + out.writeBytes("defg"); + out.writeBytes("hijkl"); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals("abc", in.readBytes(3)); + assertEquals("defg", in.readBytes(4)); + assertEquals("hijkl", in.readBytes(5)); + assertEquals(0, in.available()); + } + + private void booleanTest(boolean val) { + + out.reset(); + out.writeBoolean(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals(val, in.readBoolean()); + assertEquals(0, in.available()); + } + + @Test + public void testBoolean() { + + booleanTest(true); + booleanTest(false); + + out.reset(); + out.writeBoolean(true); + out.writeBoolean(false); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(true, in.readBoolean()); + assertEquals(false, in.readBoolean()); + assertEquals(0, in.available()); + + out.reset(); + out.writeBoolean(true); + out.writeBoolean(false); + out.writeBoolean(true); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(true, in.readBoolean()); + assertEquals(false, in.readBoolean()); + assertEquals(true, in.readBoolean()); + assertEquals(0, in.available()); + } + + private void unsignedByteTest(int val) { + + unsignedByteTest(val, val); + } + + private void unsignedByteTest(int val, int expected) { + + out.reset(); + out.writeUnsignedByte(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedByte()); + } + + @Test + public void testUnsignedByte() { + + unsignedByteTest(0); + unsignedByteTest(1); + unsignedByteTest(254); + unsignedByteTest(255); + unsignedByteTest(256, 0); + unsignedByteTest(-1, 255); + unsignedByteTest(-2, 254); + unsignedByteTest(-255, 1); + + out.reset(); + out.writeUnsignedByte(0); + out.writeUnsignedByte(1); + out.writeUnsignedByte(255); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(0, in.readUnsignedByte()); + assertEquals(1, in.readUnsignedByte()); + assertEquals(255, in.readUnsignedByte()); + assertEquals(0, in.available()); + } + + private void unsignedShortTest(int val) { + + unsignedShortTest(val, val); + } + + private void unsignedShortTest(int val, int expected) { + + out.reset(); + out.writeUnsignedShort(val); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedShort()); + } + + @Test + public void testUnsignedShort() { + + unsignedShortTest(0); + unsignedShortTest(1); + unsignedShortTest(255); + unsignedShortTest(256); + unsignedShortTest(257); + unsignedShortTest(Short.MAX_VALUE - 1); + unsignedShortTest(Short.MAX_VALUE); + unsignedShortTest(Short.MAX_VALUE + 1); + unsignedShortTest(0xFFFF - 1); + unsignedShortTest(0xFFFF); + unsignedShortTest(0xFFFF + 1, 0); + unsignedShortTest(0x7FFF0000, 0); + unsignedShortTest(0xFFFF0000, 0); + unsignedShortTest(-1, 0xFFFF); + unsignedShortTest(-2, 0xFFFF - 1); + unsignedShortTest(-0xFFFF, 1); + + out.reset(); + out.writeUnsignedShort(0); + out.writeUnsignedShort(1); + out.writeUnsignedShort(0xFFFF); + assertEquals(6, out.size()); + copyOutputToInput(); + assertEquals(0, in.readUnsignedShort()); + assertEquals(1, in.readUnsignedShort()); + assertEquals(0xFFFF, in.readUnsignedShort()); + assertEquals(0, in.available()); + } + + private void unsignedIntTest(long val) { + + unsignedIntTest(val, val); + } + + private void unsignedIntTest(long val, long expected) { + + out.reset(); + out.writeUnsignedInt(val); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals(expected, in.readUnsignedInt()); + } + + @Test + public void testUnsignedInt() { + + unsignedIntTest(0L); + unsignedIntTest(1L); + unsignedIntTest(255L); + unsignedIntTest(256L); + unsignedIntTest(257L); + unsignedIntTest(Short.MAX_VALUE - 1L); + unsignedIntTest(Short.MAX_VALUE); + unsignedIntTest(Short.MAX_VALUE + 1L); + unsignedIntTest(Integer.MAX_VALUE - 1L); + unsignedIntTest(Integer.MAX_VALUE); + unsignedIntTest(Integer.MAX_VALUE + 1L); + unsignedIntTest(0xFFFFFFFFL - 1L); + unsignedIntTest(0xFFFFFFFFL); + unsignedIntTest(0xFFFFFFFFL + 1L, 0L); + unsignedIntTest(0x7FFFFFFF00000000L, 0L); + unsignedIntTest(0xFFFFFFFF00000000L, 0L); + unsignedIntTest(-1, 0xFFFFFFFFL); + unsignedIntTest(-2, 0xFFFFFFFFL - 1L); + unsignedIntTest(-0xFFFFFFFFL, 1L); + + out.reset(); + out.writeUnsignedInt(0L); + out.writeUnsignedInt(1L); + out.writeUnsignedInt(0xFFFFFFFFL); + assertEquals(12, out.size()); + copyOutputToInput(); + assertEquals(0L, in.readUnsignedInt()); + assertEquals(1L, in.readUnsignedInt()); + assertEquals(0xFFFFFFFFL, in.readUnsignedInt()); + assertEquals(0L, in.available()); + } + + private void byteTest(int val) { + + out.reset(); + out.writeByte(val); + assertEquals(1, out.size()); + copyOutputToInput(); + assertEquals((byte) val, in.readByte()); + } + + @Test + public void testByte() { + + byteTest(0); + byteTest(1); + byteTest(-1); + byteTest(Byte.MAX_VALUE - 1); + byteTest(Byte.MAX_VALUE); + byteTest(Byte.MAX_VALUE + 1); + byteTest(Byte.MIN_VALUE + 1); + byteTest(Byte.MIN_VALUE); + byteTest(Byte.MIN_VALUE - 1); + byteTest(0x7F); + byteTest(0xFF); + byteTest(0x7FFF); + byteTest(0xFFFF); + byteTest(0x7FFFFFFF); + byteTest(0xFFFFFFFF); + + out.reset(); + out.writeByte(0); + out.writeByte(1); + out.writeByte(-1); + assertEquals(3, out.size()); + copyOutputToInput(); + assertEquals(0, in.readByte()); + assertEquals(1, in.readByte()); + assertEquals(-1, in.readByte()); + assertEquals(0, in.available()); + } + + private void shortTest(int val) { + + out.reset(); + out.writeShort(val); + assertEquals(2, out.size()); + copyOutputToInput(); + assertEquals((short) val, in.readShort()); + } + + @Test + public void testShort() { + + shortTest(0); + shortTest(1); + shortTest(-1); + shortTest(Short.MAX_VALUE - 1); + shortTest(Short.MAX_VALUE); + shortTest(Short.MAX_VALUE + 1); + shortTest(Short.MIN_VALUE + 1); + shortTest(Short.MIN_VALUE); + shortTest(Short.MIN_VALUE - 1); + shortTest(0x7F); + shortTest(0xFF); + shortTest(0x7FFF); + shortTest(0xFFFF); + shortTest(0x7FFFFFFF); + shortTest(0xFFFFFFFF); + + out.reset(); + out.writeShort(0); + out.writeShort(1); + out.writeShort(-1); + assertEquals(3 * 2, out.size()); + copyOutputToInput(); + assertEquals(0, in.readShort()); + assertEquals(1, in.readShort()); + assertEquals(-1, in.readShort()); + assertEquals(0, in.available()); + } + + private void intTest(int val) { + + out.reset(); + out.writeInt(val); + assertEquals(4, out.size()); + copyOutputToInput(); + assertEquals(val, in.readInt()); + } + + @Test + public void testInt() { + + intTest(0); + intTest(1); + intTest(-1); + intTest(Integer.MAX_VALUE - 1); + intTest(Integer.MAX_VALUE); + intTest(Integer.MAX_VALUE + 1); + intTest(Integer.MIN_VALUE + 1); + intTest(Integer.MIN_VALUE); + intTest(Integer.MIN_VALUE - 1); + intTest(0x7F); + intTest(0xFF); + intTest(0x7FFF); + intTest(0xFFFF); + intTest(0x7FFFFFFF); + intTest(0xFFFFFFFF); + + out.reset(); + out.writeInt(0); + out.writeInt(1); + out.writeInt(-1); + assertEquals(3 * 4, out.size()); + copyOutputToInput(); + assertEquals(0, in.readInt()); + assertEquals(1, in.readInt()); + assertEquals(-1, in.readInt()); + assertEquals(0, in.available()); + } + + private void longTest(long val) { + + out.reset(); + out.writeLong(val); + assertEquals(8, out.size()); + copyOutputToInput(); + assertEquals(val, in.readLong()); + } + + @Test + public void testLong() { + + longTest(0); + longTest(1); + longTest(-1); + longTest(Long.MAX_VALUE - 1); + longTest(Long.MAX_VALUE); + longTest(Long.MAX_VALUE + 1); + longTest(Long.MIN_VALUE + 1); + longTest(Long.MIN_VALUE); + longTest(Long.MIN_VALUE - 1); + longTest(0x7F); + longTest(0xFF); + longTest(0x7FFF); + longTest(0xFFFF); + longTest(0x7FFFFFFF); + longTest(0xFFFFFFFF); + longTest(0x7FFFFFFFFFFFFFFFL); + longTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeLong(0); + out.writeLong(1); + out.writeLong(-1); + assertEquals(3 * 8, out.size()); + copyOutputToInput(); + assertEquals(0, in.readLong()); + assertEquals(1, in.readLong()); + assertEquals(-1, in.readLong()); + assertEquals(0, in.available()); + } + + private void floatTest(double val) { + + out.reset(); + out.writeFloat((float) val); + assertEquals(4, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Float.isNaN(in.readFloat())); + } else { + assertEquals((float) val, in.readFloat(), 0); + } + } + + @Test + public void testFloat() { + + floatTest(0); + floatTest(1); + floatTest(-1); + floatTest(1.0); + floatTest(0.1); + floatTest(-1.0); + floatTest(-0.1); + floatTest(Float.NaN); + floatTest(Float.NEGATIVE_INFINITY); + floatTest(Float.POSITIVE_INFINITY); + floatTest(Short.MAX_VALUE); + floatTest(Short.MIN_VALUE); + floatTest(Integer.MAX_VALUE); + floatTest(Integer.MIN_VALUE); + floatTest(Long.MAX_VALUE); + floatTest(Long.MIN_VALUE); + floatTest(Float.MAX_VALUE); + floatTest(Float.MAX_VALUE + 1); + floatTest(Float.MIN_VALUE + 1); + floatTest(Float.MIN_VALUE); + floatTest(Float.MIN_VALUE - 1); + floatTest(0x7F); + floatTest(0xFF); + floatTest(0x7FFF); + floatTest(0xFFFF); + floatTest(0x7FFFFFFF); + floatTest(0xFFFFFFFF); + floatTest(0x7FFFFFFFFFFFFFFFL); + floatTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeFloat(0); + out.writeFloat(1); + out.writeFloat(-1); + assertEquals(3 * 4, out.size()); + copyOutputToInput(); + assertEquals(0, in.readFloat(), 0); + assertEquals(1, in.readFloat(), 0); + assertEquals(-1, in.readFloat(), 0); + assertEquals(0, in.available(), 0); + } + + private void doubleTest(double val) { + + out.reset(); + out.writeDouble(val); + assertEquals(8, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Double.isNaN(in.readDouble())); + } else { + assertEquals(val, in.readDouble(), 0); + } + } + + @Test + public void testDouble() { + + doubleTest(0); + doubleTest(1); + doubleTest(-1); + doubleTest(1.0); + doubleTest(0.1); + doubleTest(-1.0); + doubleTest(-0.1); + doubleTest(Double.NaN); + doubleTest(Double.NEGATIVE_INFINITY); + doubleTest(Double.POSITIVE_INFINITY); + doubleTest(Short.MAX_VALUE); + doubleTest(Short.MIN_VALUE); + doubleTest(Integer.MAX_VALUE); + doubleTest(Integer.MIN_VALUE); + doubleTest(Long.MAX_VALUE); + doubleTest(Long.MIN_VALUE); + doubleTest(Float.MAX_VALUE); + doubleTest(Float.MIN_VALUE); + doubleTest(Double.MAX_VALUE - 1); + doubleTest(Double.MAX_VALUE); + doubleTest(Double.MAX_VALUE + 1); + doubleTest(Double.MIN_VALUE + 1); + doubleTest(Double.MIN_VALUE); + doubleTest(Double.MIN_VALUE - 1); + doubleTest(0x7F); + doubleTest(0xFF); + doubleTest(0x7FFF); + doubleTest(0xFFFF); + doubleTest(0x7FFFFFFF); + doubleTest(0xFFFFFFFF); + doubleTest(0x7FFFFFFFFFFFFFFFL); + doubleTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeDouble(0); + out.writeDouble(1); + out.writeDouble(-1); + assertEquals(3 * 8, out.size()); + copyOutputToInput(); + assertEquals(0, in.readDouble(), 0); + assertEquals(1, in.readDouble(), 0); + assertEquals(-1, in.readDouble(), 0); + assertEquals(0, in.available(), 0); + } + + private void sortedFloatTest(double val) { + + out.reset(); + out.writeSortedFloat((float) val); + assertEquals(4, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Float.isNaN(in.readSortedFloat())); + } else { + assertEquals((float) val, in.readSortedFloat(), 0); + } + } + + @Test + public void testSortedFloat() { + + sortedFloatTest(0); + sortedFloatTest(1); + sortedFloatTest(-1); + sortedFloatTest(1.0); + sortedFloatTest(0.1); + sortedFloatTest(-1.0); + sortedFloatTest(-0.1); + sortedFloatTest(Float.NaN); + sortedFloatTest(Float.NEGATIVE_INFINITY); + sortedFloatTest(Float.POSITIVE_INFINITY); + sortedFloatTest(Short.MAX_VALUE); + sortedFloatTest(Short.MIN_VALUE); + sortedFloatTest(Integer.MAX_VALUE); + sortedFloatTest(Integer.MIN_VALUE); + sortedFloatTest(Long.MAX_VALUE); + sortedFloatTest(Long.MIN_VALUE); + sortedFloatTest(Float.MAX_VALUE); + sortedFloatTest(Float.MAX_VALUE + 1); + sortedFloatTest(Float.MIN_VALUE + 1); + sortedFloatTest(Float.MIN_VALUE); + sortedFloatTest(Float.MIN_VALUE - 1); + sortedFloatTest(0x7F); + sortedFloatTest(0xFF); + sortedFloatTest(0x7FFF); + sortedFloatTest(0xFFFF); + sortedFloatTest(0x7FFFFFFF); + sortedFloatTest(0xFFFFFFFF); + sortedFloatTest(0x7FFFFFFFFFFFFFFFL); + sortedFloatTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeSortedFloat(0); + out.writeSortedFloat(1); + out.writeSortedFloat(-1); + assertEquals(3 * 4, out.size()); + copyOutputToInput(); + assertEquals(0, in.readSortedFloat(), 0); + assertEquals(1, in.readSortedFloat(), 0); + assertEquals(-1, in.readSortedFloat(), 0); + assertEquals(0, in.available(), 0); + } + + private void sortedDoubleTest(double val) { + + out.reset(); + out.writeSortedDouble(val); + assertEquals(8, out.size()); + copyOutputToInput(); + if (Double.isNaN(val)) { + assertTrue(Double.isNaN(in.readSortedDouble())); + } else { + assertEquals(val, in.readSortedDouble(), 0); + } + } + + @Test + public void testSortedDouble() { + + sortedDoubleTest(0); + sortedDoubleTest(1); + sortedDoubleTest(-1); + sortedDoubleTest(1.0); + sortedDoubleTest(0.1); + sortedDoubleTest(-1.0); + sortedDoubleTest(-0.1); + sortedDoubleTest(Double.NaN); + sortedDoubleTest(Double.NEGATIVE_INFINITY); + sortedDoubleTest(Double.POSITIVE_INFINITY); + sortedDoubleTest(Short.MAX_VALUE); + sortedDoubleTest(Short.MIN_VALUE); + sortedDoubleTest(Integer.MAX_VALUE); + sortedDoubleTest(Integer.MIN_VALUE); + sortedDoubleTest(Long.MAX_VALUE); + sortedDoubleTest(Long.MIN_VALUE); + sortedDoubleTest(Float.MAX_VALUE); + sortedDoubleTest(Float.MIN_VALUE); + sortedDoubleTest(Double.MAX_VALUE - 1); + sortedDoubleTest(Double.MAX_VALUE); + sortedDoubleTest(Double.MAX_VALUE + 1); + sortedDoubleTest(Double.MIN_VALUE + 1); + sortedDoubleTest(Double.MIN_VALUE); + sortedDoubleTest(Double.MIN_VALUE - 1); + sortedDoubleTest(0x7F); + sortedDoubleTest(0xFF); + sortedDoubleTest(0x7FFF); + sortedDoubleTest(0xFFFF); + sortedDoubleTest(0x7FFFFFFF); + sortedDoubleTest(0xFFFFFFFF); + sortedDoubleTest(0x7FFFFFFFFFFFFFFFL); + sortedDoubleTest(0xFFFFFFFFFFFFFFFFL); + + out.reset(); + out.writeSortedDouble(0); + out.writeSortedDouble(1); + out.writeSortedDouble(-1); + assertEquals(3 * 8, out.size()); + copyOutputToInput(); + assertEquals(0, in.readSortedDouble(), 0); + assertEquals(1, in.readSortedDouble(), 0); + assertEquals(-1, in.readSortedDouble(), 0); + assertEquals(0, in.available(), 0); + } + + private void packedIntTest(int val, int size) { + + out.reset(); + out.writePackedInt(val); + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getPackedIntByteLength()); + assertEquals(val, in.readPackedInt()); + } + + @Test + public void testPackedInt() { + + /* Exhaustive value testing is in PackedIntTest. */ + packedIntTest(119, 1); + packedIntTest(0xFFFF + 119, 3); + packedIntTest(Integer.MAX_VALUE, 5); + + out.reset(); + out.writePackedInt(119); + out.writePackedInt(0xFFFF + 119); + out.writePackedInt(Integer.MAX_VALUE); + assertEquals(1 + 3 + 5, out.size()); + copyOutputToInput(); + assertEquals(119, in.readPackedInt(), 0); + assertEquals(0xFFFF + 119, in.readPackedInt(), 0); + assertEquals(Integer.MAX_VALUE, in.readPackedInt(), 0); + assertEquals(0, in.available(), 0); + } + + private void packedLongTest(long val, int size) { + + out.reset(); + out.writePackedLong(val); + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getPackedLongByteLength()); + assertEquals(val, in.readPackedLong()); + } + + @Test + public void testPackedLong() { + + /* Exhaustive value testing is in PackedIntTest. */ + packedLongTest(119, 1); + packedLongTest(0xFFFFFFFFL + 119, 5); + packedLongTest(Long.MAX_VALUE, 9); + + out.reset(); + out.writePackedLong(119); + out.writePackedLong(0xFFFFFFFFL + 119); + out.writePackedLong(Long.MAX_VALUE); + assertEquals(1 + 5 + 9, out.size()); + copyOutputToInput(); + assertEquals(119, in.readPackedLong(), 0); + assertEquals(0xFFFFFFFFL + 119, in.readPackedLong(), 0); + assertEquals(Long.MAX_VALUE, in.readPackedLong(), 0); + assertEquals(0, in.available(), 0); + } + + private void sortedPackedIntTest(int val, int size) { + + out.reset(); + out.writeSortedPackedInt(val); + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getSortedPackedIntByteLength()); + assertEquals(val, in.readSortedPackedInt()); + } + + @Test + public void testSortedPackedInt() { + + /* Exhaustive value testing is in sortedPackedIntTest. */ + sortedPackedIntTest(-1, 1); + sortedPackedIntTest(0, 1); + sortedPackedIntTest(1, 1); + sortedPackedIntTest(-119, 1); + sortedPackedIntTest(120, 1); + sortedPackedIntTest(121, 2); + sortedPackedIntTest(-120, 2); + sortedPackedIntTest(0xFF + 121, 2); + sortedPackedIntTest(0xFFFFFF00 - 119, 2); + sortedPackedIntTest(0xFF + 122, 3); + sortedPackedIntTest(0xFFFFFF00 - 120, 3); + sortedPackedIntTest(0xFFFF + 121, 3); + sortedPackedIntTest(0xFFFF0000 - 119, 3); + sortedPackedIntTest(0xFFFF + 122, 4); + sortedPackedIntTest(0xFFFF0000 - 120, 4); + sortedPackedIntTest(0xFFFFFF + 121, 4); + sortedPackedIntTest(0xFF000000 - 119, 4); + sortedPackedIntTest(0xFFFFFF + 122, 5); + sortedPackedIntTest(0xFF000000 - 120, 5); + sortedPackedIntTest(Integer.MAX_VALUE - 1, 5); + sortedPackedIntTest(Integer.MAX_VALUE, 5); + sortedPackedIntTest(Integer.MAX_VALUE + 1, 5); + sortedPackedIntTest(Integer.MIN_VALUE + 1, 5); + sortedPackedIntTest(Integer.MIN_VALUE, 5); + sortedPackedIntTest(Integer.MIN_VALUE - 1, 5); + + out.reset(); + out.writeSortedPackedInt(120); + out.writeSortedPackedInt(0xFFFF + 121); + out.writeSortedPackedInt(Integer.MAX_VALUE); + assertEquals(1 + 3 + 5, out.size()); + copyOutputToInput(); + assertEquals(120, in.readSortedPackedInt(), 0); + assertEquals(0xFFFF + 121, in.readSortedPackedInt(), 0); + assertEquals(Integer.MAX_VALUE, in.readSortedPackedInt(), 0); + assertEquals(0, in.available(), 0); + } + + private void sortedPackedLongTest(long val, int size) { + + out.reset(); + out.writeSortedPackedLong(val); + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getSortedPackedLongByteLength()); + assertEquals(val, in.readSortedPackedLong()); + } + + @Test + public void testSortedPackedLong() { + + /* Exhaustive value testing is in sortedPackedLongTest. */ + sortedPackedLongTest(-1L, 1); + sortedPackedLongTest(0L, 1); + sortedPackedLongTest(1L, 1); + sortedPackedLongTest(-119L, 1); + sortedPackedLongTest(120L, 1); + sortedPackedLongTest(121L, 2); + sortedPackedLongTest(-120L, 2); + sortedPackedLongTest(0xFFL + 121, 2); + sortedPackedLongTest(0xFFFFFFFFFFFFFF00L - 119, 2); + sortedPackedLongTest(0xFFL + 122, 3); + sortedPackedLongTest(0xFFFFFFFFFFFFFF00L - 120, 3); + sortedPackedLongTest(0xFFFFL + 121, 3); + sortedPackedLongTest(0xFFFFFFFFFFFF0000L - 119, 3); + sortedPackedLongTest(0xFFFFL + 122, 4); + sortedPackedLongTest(0xFFFFFFFFFFFF0000L - 120, 4); + sortedPackedLongTest(0xFFFFFFL + 121, 4); + sortedPackedLongTest(0xFFFFFFFFFF000000L - 119, 4); + sortedPackedLongTest(0xFFFFFFL + 122, 5); + sortedPackedLongTest(0xFFFFFFFFFF000000L - 120, 5); + sortedPackedLongTest(0xFFFFFFFFL + 121, 5); + sortedPackedLongTest(0xFFFFFFFF00000000L - 119, 5); + sortedPackedLongTest(0xFFFFFFFFL + 122, 6); + sortedPackedLongTest(0xFFFFFFFF00000000L - 120, 6); + sortedPackedLongTest(0xFFFFFFFFFFL + 121, 6); + sortedPackedLongTest(0xFFFFFF0000000000L - 119, 6); + sortedPackedLongTest(0xFFFFFFFFFFL + 122, 7); + sortedPackedLongTest(0xFFFFFF0000000000L - 120, 7); + sortedPackedLongTest(0xFFFFFFFFFFFFL + 121, 7); + sortedPackedLongTest(0xFFFF000000000000L - 119, 7); + sortedPackedLongTest(0xFFFFFFFFFFFFL + 122, 8); + sortedPackedLongTest(0xFFFF000000000000L - 120, 8); + sortedPackedLongTest(0xFFFFFFFFFFFFFFL + 121, 8); + sortedPackedLongTest(0xFF00000000000000L - 119, 8); + sortedPackedLongTest(Long.MAX_VALUE - 1, 9); + sortedPackedLongTest(Long.MAX_VALUE, 9); + sortedPackedLongTest(Long.MAX_VALUE + 1, 9); + sortedPackedLongTest(Long.MIN_VALUE + 1, 9); + sortedPackedLongTest(Long.MIN_VALUE, 9); + sortedPackedLongTest(Long.MIN_VALUE - 1, 9); + + out.reset(); + out.writeSortedPackedLong(120); + out.writeSortedPackedLong(0xFFFFFFL + 122); + out.writeSortedPackedLong(Long.MAX_VALUE); + assertEquals(1 + 5 + 9, out.size()); + copyOutputToInput(); + assertEquals(120L, in.readSortedPackedLong(), 0); + assertEquals(0xFFFFFFL + 122, in.readSortedPackedLong(), 0); + assertEquals(Long.MAX_VALUE, in.readSortedPackedLong(), 0); + assertEquals(0, in.available(), 0); + } + + private void bigIntegerTest(BigInteger val) { + + out.reset(); + out.writeBigInteger(val); + int size = val.toByteArray().length + 2; + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getBigIntegerByteLength()); + assertEquals(val, in.readBigInteger()); + } + + @Test + public void testBigInteger() { + + /* Exhaustive value testing is in bigIntegerTest. */ + bigIntegerTest(BigInteger.valueOf(-1)); + bigIntegerTest(BigInteger.ZERO); + bigIntegerTest(BigInteger.ONE); + bigIntegerTest(BigInteger.TEN); + bigIntegerTest(BigInteger.valueOf(Long.MIN_VALUE)); + bigIntegerTest(BigInteger.valueOf(Long.MAX_VALUE)); + bigIntegerTest(new BigInteger("-11111111111111111111")); + bigIntegerTest(new BigInteger("11111111111111111111")); + + out.reset(); + out.writeBigInteger(BigInteger.ZERO); + out.writeBigInteger(BigInteger.valueOf(Long.MAX_VALUE)); + out.writeBigInteger(BigInteger.valueOf(Long.MIN_VALUE)); + int size = BigInteger.ZERO.toByteArray().length + 2 + + BigInteger.valueOf(Long.MAX_VALUE).toByteArray().length + 2 + + BigInteger.valueOf(Long.MIN_VALUE).toByteArray().length + 2; + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(BigInteger.ZERO, in.readBigInteger()); + assertEquals(BigInteger.valueOf(Long.MAX_VALUE), in.readBigInteger()); + assertEquals(BigInteger.valueOf(Long.MIN_VALUE), in.readBigInteger()); + assertEquals(0, in.available()); + } + + private void bigDecimalTest(BigDecimal val, int scaleLen) { + + out.reset(); + out.writeBigDecimal(val); + BigInteger unscaledVal = val.unscaledValue(); + int lenOfUnscaleValLen = 1; + int unscaledValLen = unscaledVal.toByteArray().length; + int size = scaleLen + lenOfUnscaleValLen + unscaledValLen; + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getBigDecimalByteLength()); + assertEquals(val, in.readBigDecimal()); + } + + @Test + public void testBigDecimal() { + + /* Exhaustive value testing is in BigDecimal. */ + bigDecimalTest(new BigDecimal("0.0"), 1); + bigDecimalTest(new BigDecimal("0.123456789"), 1); + bigDecimalTest(new BigDecimal("-0.123456789"), 1); + bigDecimalTest(new BigDecimal("12300000000"), 1); + bigDecimalTest(new BigDecimal("-123456789123456789.123456789123456789"), + 1); + bigDecimalTest(new BigDecimal("123456789.123456789E700"), 3); + bigDecimalTest(new BigDecimal(BigInteger.valueOf(Long.MAX_VALUE), + Integer.MAX_VALUE), 5); + bigDecimalTest(new BigDecimal(BigInteger.valueOf(Long.MIN_VALUE), + Integer.MIN_VALUE), 5); + bigDecimalTest(new BigDecimal("-11111111111111111111"), 1); + bigDecimalTest(new BigDecimal("11111111111111111111"), 1); + + out.reset(); + BigDecimal bigDecimal1 = new BigDecimal("0.0"); + BigDecimal bigDecimal2 = new BigDecimal("123456789.123456789E700"); + BigDecimal bigDecimal3 = new + BigDecimal(BigInteger.valueOf(Long.MIN_VALUE), Integer.MIN_VALUE); + out.writeBigDecimal(bigDecimal1); + out.writeBigDecimal(bigDecimal2); + out.writeBigDecimal(bigDecimal3); + int size = bigDecimal1.unscaledValue().toByteArray().length + 1 + 1 + + bigDecimal2.unscaledValue().toByteArray().length + 1 + 3 + + bigDecimal3.unscaledValue().toByteArray().length + 1 + 5; + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(bigDecimal1, in.readBigDecimal()); + assertEquals(bigDecimal2, in.readBigDecimal()); + assertEquals(bigDecimal3, in.readBigDecimal()); + assertEquals(0, in.available()); + } + + private void sortedBigDecimalTest(BigDecimal val, int size) { + + out.reset(); + out.writeSortedBigDecimal(val); + assertEquals(size, out.size()); + copyOutputToInput(); + assertEquals(size, in.getSortedBigDecimalByteLength()); + + /* + * Because the precision of sorted BigDecimal cannot be preserved, we + * use compareTo rather than equals for the comparison. + */ + assertEquals(0, val.compareTo(in.readSortedBigDecimal())); + } + + @Test + public void testSortedBigDecimal() { + + /* Exhaustive value testing is in BigDecimal. */ + sortedBigDecimalTest(new BigDecimal("0"), 1 + 1 + 1 + 1); + sortedBigDecimalTest(new BigDecimal("-9876543219876"), + 1 + 1 + 5 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("987654321987000"), + 1 + 1 + 5 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("-987654321987.654321987"), + 1 + 1 + 5 + 5 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("987654321.0000654"), + 1 + 1 + 5 + 3 + 1); + sortedBigDecimalTest(new BigDecimal("-0.9876543219876"), + 1 + 1 + 5 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("9876.543210000000009876"), + 1 + 1 + 5 + 1 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("0.0000987654321000"), + 1 + 1 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("123456789.123456789E700"), + 1 + 3 + 5 + 5 + 1); + sortedBigDecimalTest(new BigDecimal("-123456789.123456789E-67000"), + 1 + 4 + 5 + 5 + 1); + + out.reset(); + BigDecimal bigDecimal1 = new BigDecimal("0"); + BigDecimal bigDecimal2 = new BigDecimal("-9876543219876"); + BigDecimal bigDecimal3 = new BigDecimal("-987654321987.654321987"); + out.writeSortedBigDecimal(bigDecimal1); + out.writeSortedBigDecimal(bigDecimal2); + out.writeSortedBigDecimal(bigDecimal3); + int size = 1 + 1 + 1 + 1 + + 1 + 1 + 5 + 5 + 1 + + 1 + 1 + 5 + 5 + 5 + 1; + assertEquals(size, out.size()); + copyOutputToInput(); + + /* + * Because the precision of sorted BigDecimal cannot be preserved, we + * use compareTo rather than equals for the comparison. + */ + assertEquals(0, bigDecimal1.compareTo(in.readSortedBigDecimal())); + assertEquals(0, bigDecimal2.compareTo(in.readSortedBigDecimal())); + assertEquals(0, bigDecimal3.compareTo(in.readSortedBigDecimal())); + assertEquals(0, in.available()); + } +} diff --git a/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java b/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java new file mode 100644 index 0000000..70f9185 --- /dev/null +++ b/test/com/sleepycat/bind/tuple/test/TupleOrderingTest.java @@ -0,0 +1,578 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.bind.tuple.test; + +import static org.junit.Assert.fail; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.TupleOutput; + +/** + * @author Mark Hayes + */ +public class TupleOrderingTest { + + private TupleOutput out; + private byte[] prevBuf; + + @Before + public void setUp() { + + out = new TupleOutput(); + prevBuf = null; + } + + @After + public void tearDown() { + + /* Ensure that GC can cleanup. */ + out = null; + prevBuf = null; + } + + /** + * Each tuple written must be strictly less than (by comparison of bytes) + * the tuple written just before it. The check() method compares bytes + * just written to those written before the previous call to check(). + */ + private void check() { + + check(-1); + } + + private void check(int dataIndex) { + + byte[] buf = new byte[out.size()]; + System.arraycopy(out.getBufferBytes(), out.getBufferOffset(), + buf, 0, buf.length); + if (prevBuf != null) { + int errOffset = -1; + int len = Math.min(prevBuf.length, buf.length); + boolean areEqual = true; + for (int i = 0; i < len; i += 1) { + int val1 = prevBuf[i] & 0xFF; + int val2 = buf[i] & 0xFF; + if (val1 < val2) { + areEqual = false; + break; + } else if (val1 > val2) { + errOffset = i; + break; + } + } + if (areEqual) { + if (prevBuf.length < buf.length) { + areEqual = false; + } else if (prevBuf.length > buf.length) { + areEqual = false; + errOffset = buf.length + 1; + } + } + if (errOffset != -1 || areEqual) { + StringBuilder msg = new StringBuilder(); + if (errOffset != -1) { + msg.append("Left >= right at byte offset " + errOffset); + } else if (areEqual) { + msg.append("Bytes are equal"); + } else { + throw new IllegalStateException(); + } + msg.append("\nLeft hex bytes: "); + for (int i = 0; i < prevBuf.length; i += 1) { + msg.append(' '); + int val = prevBuf[i] & 0xFF; + if ((val & 0xF0) == 0) { + msg.append('0'); + } + msg.append(Integer.toHexString(val)); + } + msg.append("\nRight hex bytes:"); + for (int i = 0; i < buf.length; i += 1) { + msg.append(' '); + int val = buf[i] & 0xFF; + if ((val & 0xF0) == 0) { + msg.append('0'); + } + msg.append(Integer.toHexString(val)); + } + if (dataIndex >= 0) { + msg.append("\nData index: " + dataIndex); + } + fail(msg.toString()); + } + } + prevBuf = buf; + out.reset(); + } + + private void reset() { + + prevBuf = null; + out.reset(); + } + + @Test + public void testString() { + + final String[] DATA = { + "", "\u0001", "\u0002", + "A", "a", "ab", "b", "bb", "bba", + "c", "c\u0001", "d", + new String(new char[] { 0x7F }), + new String(new char[] { 0x7F, 0 }), + new String(new char[] { 0xFF }), + new String(new char[] { Character.MAX_VALUE }), + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeString(DATA[i]); + check(i); + } + reset(); + out.writeString("a"); + check(); + out.writeString("a"); + out.writeString(""); + check(); + out.writeString("a"); + out.writeString(""); + out.writeString("a"); + check(); + out.writeString("a"); + out.writeString("b"); + check(); + out.writeString("aa"); + check(); + out.writeString("b"); + check(); + } + + @Test + public void testFixedString() { + + final char[][] DATA = { + {}, {'a'}, {'a', 'b'}, {'b'}, {'b', 'b'}, {0x7F}, {0xFF}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeString(DATA[i]); + check(i); + } + } + + @Test + public void testChars() { + + final char[][] DATA = { + {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'}, + {0x7F}, {0x7F, 0}, {0xFF}, {0xFF, 0}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeChars(DATA[i]); + check(i); + } + } + + @Test + public void testBytes() { + + final char[][] DATA = { + {}, {0}, {'a'}, {'a', 0}, {'a', 'b'}, {'b'}, {'b', 'b'}, + {0x7F}, {0xFF}, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeBytes(DATA[i]); + check(i); + } + } + + @Test + public void testBoolean() { + + final boolean[] DATA = { + false, true + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeBoolean(DATA[i]); + check(i); + } + } + + @Test + public void testUnsignedByte() { + + final int[] DATA = { + 0, 1, 0x7F, 0xFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedByte(DATA[i]); + check(i); + } + } + + @Test + public void testUnsignedShort() { + + final int[] DATA = { + 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedShort(DATA[i]); + check(i); + } + } + + @Test + public void testUnsignedInt() { + + final long[] DATA = { + 0, 1, 0xFE, 0xFF, 0x800, 0x7FFF, 0xFFFF, 0x80000, + 0x7FFFFFFF, 0x80000000, 0xFFFFFFFF + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeUnsignedInt(DATA[i]); + check(i); + } + } + + @Test + public void testByte() { + + final byte[] DATA = { + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeByte(DATA[i]); + check(i); + } + } + + @Test + public void testShort() { + + final short[] DATA = { + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeShort(DATA[i]); + check(i); + } + } + + @Test + public void testInt() { + + final int[] DATA = { + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeInt(DATA[i]); + check(i); + } + } + + @Test + public void testLong() { + + final long[] DATA = { + Long.MIN_VALUE, Long.MIN_VALUE + 1, + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + Long.MAX_VALUE - 1, Long.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeLong(DATA[i]); + check(i); + } + } + + @Test + public void testFloat() { + + // Only positive floats and doubles are ordered deterministically + + final float[] DATA = { + 0, Float.MIN_VALUE, 2 * Float.MIN_VALUE, + (float) 0.01, (float) 0.02, (float) 0.99, + 1, (float) 1.01, (float) 1.02, (float) 1.99, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE, + Long.MAX_VALUE / 2, Long.MAX_VALUE, + Float.MAX_VALUE, + Float.POSITIVE_INFINITY, + Float.NaN, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeFloat(DATA[i]); + check(i); + } + } + + @Test + public void testDouble() { + + // Only positive floats and doubles are ordered deterministically + + final double[] DATA = { + 0, Double.MIN_VALUE, 2 * Double.MIN_VALUE, + 0.001, 0.002, 0.999, + 1, 1.001, 1.002, 1.999, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + Long.MAX_VALUE / 2, Long.MAX_VALUE, + Float.MAX_VALUE, Double.MAX_VALUE, + Double.POSITIVE_INFINITY, + Double.NaN, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeDouble(DATA[i]); + check(i); + } + } + + @Test + public void testSortedFloat() { + + final float[] DATA = { + Float.NEGATIVE_INFINITY, + (- Float.MAX_VALUE), + Long.MIN_VALUE, + Long.MIN_VALUE / 2, + Integer.MIN_VALUE, + Short.MIN_VALUE, + Short.MIN_VALUE + 1, + Byte.MIN_VALUE, + Byte.MIN_VALUE + 1, + (float) -1.99, + (float) -1.02, + (float) -1.01, + -1, + (float) -0.99, + (float) -0.02, + (float) -0.01, + 2 * (- Float.MIN_VALUE), + (- Float.MIN_VALUE), + 0, + Float.MIN_VALUE, + 2 * Float.MIN_VALUE, + (float) 0.01, + (float) 0.02, + (float) 0.99, + 1, + (float) 1.01, + (float) 1.02, + (float) 1.99, + Byte.MAX_VALUE - 1, + Byte.MAX_VALUE, + Short.MAX_VALUE - 1, + Short.MAX_VALUE, + Integer.MAX_VALUE, + Long.MAX_VALUE / 2, + Long.MAX_VALUE, + Float.MAX_VALUE, + Float.POSITIVE_INFINITY, + Float.NaN, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeSortedFloat(DATA[i]); + check(i); + } + } + + @Test + public void testSortedDouble() { + + final double[] DATA = { + Double.NEGATIVE_INFINITY, + (- Double.MAX_VALUE), + (- Float.MAX_VALUE), + Long.MIN_VALUE, + Long.MIN_VALUE / 2, + Integer.MIN_VALUE, + Short.MIN_VALUE, + Short.MIN_VALUE + 1, + Byte.MIN_VALUE, + Byte.MIN_VALUE + 1, + -1.999, + -1.002, + -1.001, + -1, + -0.999, + -0.002, + -0.001, + 2 * (- Double.MIN_VALUE), + (- Double.MIN_VALUE), + 0, + Double.MIN_VALUE, + 2 * Double.MIN_VALUE, + 0.001, + 0.002, + 0.999, + 1, + 1.001, + 1.002, + 1.999, + Byte.MAX_VALUE - 1, + Byte.MAX_VALUE, + Short.MAX_VALUE - 1, + Short.MAX_VALUE, + Integer.MAX_VALUE - 1, + Integer.MAX_VALUE, + Long.MAX_VALUE / 2, + Long.MAX_VALUE, + Float.MAX_VALUE, + Double.MAX_VALUE, + Double.POSITIVE_INFINITY, + Double.NaN, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeSortedDouble(DATA[i]); + check(i); + } + } + + @Test + public void testPackedIntAndLong() { + /* Only packed int/long values from 0 to 630 are ordered correctly */ + for (int i = 0; i <= 630; i += 1) { + out.writePackedInt(i); + check(i); + } + reset(); + for (int i = 0; i <= 630; i += 1) { + out.writePackedLong(i); + check(i); + } + } + + @Test + public void testSortedPackedInt() { + final int[] DATA = { + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeSortedPackedInt(DATA[i]); + check(i); + } + } + + @Test + public void testSortedPackedLong() { + final long[] DATA = { + Long.MIN_VALUE, Long.MIN_VALUE + 1, + Integer.MIN_VALUE, Integer.MIN_VALUE + 1, + Short.MIN_VALUE, Short.MIN_VALUE + 1, + Byte.MIN_VALUE, Byte.MIN_VALUE + 1, + -1, 0, 1, + Byte.MAX_VALUE - 1, Byte.MAX_VALUE, + Short.MAX_VALUE - 1, Short.MAX_VALUE, + Integer.MAX_VALUE - 1, Integer.MAX_VALUE, + Long.MAX_VALUE - 1, Long.MAX_VALUE, + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeSortedPackedLong(DATA[i]); + check(i); + } + } + + @Test + public void testBigInteger() { + final BigInteger[] DATA = { + new BigInteger("-1111111111111111111111111"), + new BigInteger("-11111111111111111111"), + BigInteger.valueOf(Long.MIN_VALUE), + BigInteger.valueOf(Long.MIN_VALUE + 1), + BigInteger.valueOf(Integer.MIN_VALUE), + BigInteger.valueOf(Integer.MIN_VALUE + 1), + BigInteger.valueOf(Short.MIN_VALUE), + BigInteger.valueOf(Short.MIN_VALUE + 1), + BigInteger.valueOf(Byte.MIN_VALUE), + BigInteger.valueOf(Byte.MIN_VALUE + 1), + BigInteger.valueOf(-1), + BigInteger.ZERO, BigInteger.ONE, + BigInteger.valueOf(Byte.MAX_VALUE - 1), + BigInteger.valueOf(Byte.MAX_VALUE), + BigInteger.valueOf(Short.MAX_VALUE - 1), + BigInteger.valueOf(Short.MAX_VALUE), + BigInteger.valueOf(Integer.MAX_VALUE - 1), + BigInteger.valueOf(Integer.MAX_VALUE), + BigInteger.valueOf(Long.MAX_VALUE - 1), + BigInteger.valueOf(Long.MAX_VALUE), + new BigInteger("11111111111111111111"), + new BigInteger("1111111111111111111111111"), + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeBigInteger(DATA[i]); + check(i); + } + } + + @Test + public void testSortedBigDecimal() { + final BigDecimal[] DATA = { + new BigDecimal(BigInteger.valueOf(Long.MIN_VALUE), + Short.MIN_VALUE), + new BigDecimal("-9999999999999999999.9999999999999999999"), + new BigDecimal("-123456789.123456789"), + new BigDecimal("-0.9999999999999999999999999999999999999"), + new BigDecimal("-123456789.123456789E-700"), + new BigDecimal("-123456789.123456789E-6700"), + new BigDecimal(BigInteger.valueOf(Long.MIN_VALUE), + Short.MAX_VALUE), + new BigDecimal("0.0"), + new BigDecimal(BigInteger.valueOf(Long.MAX_VALUE), + Short.MAX_VALUE), + new BigDecimal("0.9999999999999999999999999999999999999"), + new BigDecimal("123456789.123456789"), + new BigDecimal("9999999999999999999.9999999999999999999"), + new BigDecimal("123456789.123456789E700"), + new BigDecimal("123456789.123456789E6700"), + new BigDecimal(BigInteger.valueOf(Long.MAX_VALUE), + Short.MIN_VALUE), + }; + for (int i = 0; i < DATA.length; i += 1) { + out.writeSortedBigDecimal(DATA[i]); + check(i); + } + } +} diff --git a/test/com/sleepycat/collections/KeyRangeTest.java b/test/com/sleepycat/collections/KeyRangeTest.java new file mode 100644 index 0000000..10ac7fc --- /dev/null +++ b/test/com/sleepycat/collections/KeyRangeTest.java @@ -0,0 +1,426 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Comparator; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.keyrange.KeyRange; +import com.sleepycat.util.keyrange.KeyRangeException; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @author Mark Hayes + */ +public class KeyRangeTest extends TestBase { + + private static boolean VERBOSE = false; + + private static final byte FF = (byte) 0xFF; + + private static final byte[][] KEYS = { + /* 0 */ {1}, + /* 1 */ {FF}, + /* 2 */ {FF, 0}, + /* 3 */ {FF, 0x7F}, + /* 4 */ {FF, FF}, + /* 5 */ {FF, FF, 0}, + /* 6 */ {FF, FF, 0x7F}, + /* 7 */ {FF, FF, FF}, + }; + private static byte[][] EXTREME_KEY_BYTES = { + /* 0 */ {0}, + /* 1 */ {FF, FF, FF, FF}, + }; + + private Environment env; + private Database store; + private DataView view; + private DataCursor cursor; + + private void openDb(Comparator comparator) + throws Exception { + + File dir = SharedTestUtils.getNewDir(); + ByteArrayBinding dataBinding = new ByteArrayBinding(); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + DbCompat.setInitializeCache(envConfig, true); + env = new Environment(dir, envConfig); + DatabaseConfig dbConfig = new DatabaseConfig(); + DbCompat.setTypeBtree(dbConfig); + dbConfig.setAllowCreate(true); + if (comparator != null) { + DbCompat.setBtreeComparator(dbConfig, comparator); + } + store = DbCompat.testOpenDatabase + (env, null, "test.db", null, dbConfig); + view = new DataView(store, dataBinding, dataBinding, null, true, null); + } + + private void closeDb() + throws Exception { + + store.close(); + store = null; + env.close(); + env = null; + } + + @After + public void tearDown() { + try { + if (store != null) { + store.close(); + } + } catch (Exception e) { + System.out.println("Exception ignored during close: " + e); + } + try { + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Exception ignored during close: " + e); + } + /* Ensure that GC can cleanup. */ + env = null; + store = null; + view = null; + cursor = null; + } + + @Test + public void testScan() throws Exception { + openDb(null); + doScan(false); + closeDb(); + } + + @Test + public void testScanComparator() throws Exception { + openDb(new ReverseComparator()); + doScan(true); + closeDb(); + } + + private void doScan(boolean reversed) throws Exception { + + byte[][] keys = new byte[KEYS.length][]; + final int end = KEYS.length - 1; + cursor = new DataCursor(view, true); + for (int i = 0; i <= end; i++) { + keys[i] = KEYS[i]; + cursor.put(keys[i], KEYS[i], null, false); + } + cursor.close(); + byte[][] extremeKeys = new byte[EXTREME_KEY_BYTES.length][]; + for (int i = 0; i < extremeKeys.length; i++) { + extremeKeys[i] = EXTREME_KEY_BYTES[i]; + } + + // with empty range + + cursor = new DataCursor(view, false); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + + // begin key only, inclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, keys[i], true, null, false, reversed); + expectRange(KEYS, i, end, reversed); + cursor.close(); + } + + // begin key only, exclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, keys[i], false, null, false, reversed); + expectRange(KEYS, i + 1, end, reversed); + cursor.close(); + } + + // end key only, inclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, null, false, keys[i], true, reversed); + expectRange(KEYS, 0, i, reversed); + cursor.close(); + } + + // end key only, exclusive + + for (int i = 0; i <= end; i++) { + cursor = newCursor(view, null, false, keys[i], false, reversed); + expectRange(KEYS, 0, i - 1, reversed); + cursor.close(); + } + + // begin and end keys, inclusive and exclusive + + for (int i = 0; i <= end; i++) { + for (int j = i; j <= end; j++) { + // begin inclusive, end inclusive + + cursor = newCursor(view, keys[i], true, keys[j], + true, reversed); + expectRange(KEYS, i, j, reversed); + cursor.close(); + + // begin inclusive, end exclusive + + cursor = newCursor(view, keys[i], true, keys[j], + false, reversed); + expectRange(KEYS, i, j - 1, reversed); + cursor.close(); + + // begin exclusive, end inclusive + + cursor = newCursor(view, keys[i], false, keys[j], + true, reversed); + expectRange(KEYS, i + 1, j, reversed); + cursor.close(); + + // begin exclusive, end exclusive + + cursor = newCursor(view, keys[i], false, keys[j], + false, reversed); + expectRange(KEYS, i + 1, j - 1, reversed); + cursor.close(); + } + } + + // single key range + + for (int i = 0; i <= end; i++) { + cursor = new DataCursor(view, false, keys[i]); + expectRange(KEYS, i, i, reversed); + cursor.close(); + } + + // start with lower extreme (before any existing key) + + cursor = newCursor(view, extremeKeys[0], true, null, false, reversed); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + + // start with higher extreme (after any existing key) + + cursor = newCursor(view, null, false, extremeKeys[1], true, reversed); + expectRange(KEYS, 0, end, reversed); + cursor.close(); + } + + private DataCursor newCursor(DataView view, + Object beginKey, boolean beginInclusive, + Object endKey, boolean endInclusive, + boolean reversed) + throws Exception { + + if (reversed) { + return new DataCursor(view, false, + endKey, endInclusive, + beginKey, beginInclusive); + } else { + return new DataCursor(view, false, + beginKey, beginInclusive, + endKey, endInclusive); + } + } + + private void expectRange(byte[][] bytes, int first, int last, + boolean reversed) + throws DatabaseException { + + int i; + boolean init; + for (init = true, i = first;; i++, init = false) { + if (checkRange(bytes, first, last, i <= last, + reversed, !reversed, init, i)) { + break; + } + } + for (init = true, i = last;; i--, init = false) { + if (checkRange(bytes, first, last, i >= first, + reversed, reversed, init, i)) { + break; + } + } + } + + private boolean checkRange(byte[][] bytes, int first, int last, + boolean inRange, boolean reversed, + boolean forward, boolean init, + int i) + throws DatabaseException { + + OperationStatus s; + if (forward) { + if (init) { + s = cursor.getFirst(false); + } else { + s = cursor.getNext(false); + } + } else { + if (init) { + s = cursor.getLast(false); + } else { + s = cursor.getPrev(false); + } + } + + String msg = " " + (forward ? "next" : "prev") + " i=" + i + + " first=" + first + " last=" + last + + (reversed ? " reversed" : " not reversed"); + + // check that moving past ends doesn't move the cursor + if (s == OperationStatus.SUCCESS && i == first) { + OperationStatus s2 = reversed ? cursor.getNext(false) + : cursor.getPrev(false); + assertEquals(msg, OperationStatus.NOTFOUND, s2); + } + if (s == OperationStatus.SUCCESS && i == last) { + OperationStatus s2 = reversed ? cursor.getPrev(false) + : cursor.getNext(false); + assertEquals(msg, OperationStatus.NOTFOUND, s2); + } + + byte[] val = (s == OperationStatus.SUCCESS) + ? ((byte[]) cursor.getCurrentValue()) + : null; + + if (inRange) { + assertNotNull("RangeNotFound" + msg, val); + + if (!Arrays.equals(val, bytes[i])){ + printBytes(val); + printBytes(bytes[i]); + fail("RangeKeyNotEqual" + msg); + } + if (VERBOSE) { + System.out.println("GotRange" + msg); + } + return false; + } else { + assertEquals("RangeExceeded" + msg, OperationStatus.NOTFOUND, s); + return true; + } + } + + private void printBytes(byte[] bytes) { + + for (int i = 0; i < bytes.length; i += 1) { + System.out.print(Integer.toHexString(bytes[i] & 0xFF)); + System.out.print(' '); + } + System.out.println(); + } + + @Test + public void testSubRanges() { + + DatabaseEntry begin = new DatabaseEntry(); + DatabaseEntry begin2 = new DatabaseEntry(); + DatabaseEntry end = new DatabaseEntry(); + DatabaseEntry end2 = new DatabaseEntry(); + KeyRange range = new KeyRange(null); + KeyRange range2 = null; + + /* Base range [1, 2] */ + begin.setData(new byte[] { 1 }); + end.setData(new byte[] { 2 }); + range = range.subRange(begin, true, end, true); + + /* Subrange (0, 1] is invalid **. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 1 }); + try { + range2 = range.subRange(begin2, false, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [1, 3) is invalid. */ + begin2.setData(new byte[] { 1 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, true, end2, false); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [2, 2] is valid. */ + begin2.setData(new byte[] { 2 }); + end2.setData(new byte[] { 2 }); + range2 = range.subRange(begin2, true, end2, true); + + /* Subrange [0, 1] is invalid. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 1 }); + try { + range2 = range.subRange(begin2, true, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange (0, 3] is invalid. */ + begin2.setData(new byte[] { 0 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, false, end2, true); + fail(); + } catch (KeyRangeException expected) {} + + /* Subrange [3, 3) is invalid. */ + begin2.setData(new byte[] { 3 }); + end2.setData(new byte[] { 3 }); + try { + range2 = range.subRange(begin2, true, end2, false); + fail(); + } catch (KeyRangeException expected) {} + } + + @SuppressWarnings("serial") + public static class ReverseComparator implements Comparator, + Serializable { + public int compare(byte[] d1, byte[] d2) { + int cmp = KeyRange.compareBytes(d1, 0, d1.length, + d2, 0, d2.length); + if (cmp < 0) { + return 1; + } else if (cmp > 0) { + return -1; + } else { + return 0; + } + } + } +} diff --git a/test/com/sleepycat/collections/test/CollectionTest.java b/test/com/sleepycat/collections/test/CollectionTest.java new file mode 100644 index 0000000..766b119 --- /dev/null +++ b/test/com/sleepycat/collections/test/CollectionTest.java @@ -0,0 +1,2990 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentMap; +import java.util.regex.Pattern; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.collections.MapEntryParameter; +import com.sleepycat.collections.StoredCollection; +import com.sleepycat.collections.StoredCollections; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredEntrySet; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredKeySet; +import com.sleepycat.collections.StoredList; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.StoredSortedEntrySet; +import com.sleepycat.collections.StoredSortedKeySet; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredSortedValueSet; +import com.sleepycat.collections.StoredValueSet; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class CollectionTest extends TestBase { + + private static final int NONE = 0; + private static final int SUB = 1; + private static final int HEAD = 2; + private static final int TAIL = 3; + + /* + * For long tests we permute testStoredIterator to test both StoredIterator + * and BlockIterator. When testing BlockIterator, we permute the maxKey + * over the array values below. BlockIterator's block size is 10. So we + * test below the block size (6), at the block size (10), and above it (14 + * and 22). + */ + protected static final int DEFAULT_MAX_KEY = 6; + private static final int[] MAX_KEYS = {6, 10, 14, 22}; + + private boolean testStoredIterator; + private static int maxKey; /* Must be a multiple of 2. */ + protected static int beginKey = 1; + private static int endKey; + + private Environment env; + private Database store; + private Database index; + private final boolean isEntityBinding; + private final boolean isAutoCommit; + private TestStore testStore; + private String testName; + private final EntryBinding keyBinding; + private final EntryBinding valueBinding; + private final EntityBinding entityBinding; + private TransactionRunner readRunner; + private TransactionRunner writeRunner; + private TransactionRunner writeIterRunner; + private TestEnv testEnv; + + private StoredMap map; + private StoredMap imap; // insertable map (primary store for indexed map) + private StoredSortedMap smap; // sorted map (null or equal to map) + private StoredMap saveMap; + private StoredSortedMap saveSMap; + private int rangeType; + private StoredList list; + private StoredList ilist; // insertable list (primary store for index list) + private StoredList saveList; + private StoredKeySet keySet; + private StoredValueSet valueSet; + + @Parameters + public static List genParams() { + if (SharedTestUtils.runLongTests()){ + List list = baseParams(true, DEFAULT_MAX_KEY); + + for (int i : MAX_KEYS) + list.addAll(baseParams(false, i)); + + return list; + } + return baseParams(false, 6); + } + + private static List baseParams(boolean storedIter, + int maximumKey){ + + List list = new ArrayList(); + for (int i = 0; i < TestEnv.ALL.length; i += 1) { + for (int j = 0; j < TestStore.ALL.length; j += 1) { + for (int k = 0; k < 2; k += 1) { + boolean entityBinding = (k != 0); + + list.add(new Object[] {TestEnv.ALL[i], TestStore.ALL[j], + entityBinding, false, storedIter, maximumKey}); + + if (TestEnv.ALL[i].isTxnMode()) { + list.add(new Object[] + {TestEnv.ALL[i], TestStore.ALL[j], entityBinding, + true, storedIter, maximumKey}); + } + } + } + } + + return list; + } + + public CollectionTest(TestEnv testEnv, + TestStore testStore, + boolean isEntityBinding, + boolean isAutoCommit, + boolean storedIter, + int maxKey) { + + this.testEnv = testEnv; + this.testStore = testStore; + this.isEntityBinding = isEntityBinding; + this.isAutoCommit = isAutoCommit; + + keyBinding = testStore.getKeyBinding(); + valueBinding = testStore.getValueBinding(); + entityBinding = testStore.getEntityBinding(); + + setParams(storedIter, maxKey); + customName = testName; + } + + private void setParams(boolean storedIter, int maximumKey) { + + testStoredIterator = storedIter; + maxKey = maximumKey; + endKey = maximumKey; + + testName = testEnv.getName() + '-' + testStore.getName() + + (isEntityBinding ? "-entity" : "-value") + + (isAutoCommit ? "-autoCommit" : "") + + (testStoredIterator ? "-storedIter" : "") + + ((maxKey != DEFAULT_MAX_KEY) ? ("-maxKey-" + maxKey) : ""); + } + + + @Test + public void runTest() + throws Exception { + + try { + env = testEnv.open(testName); + // For testing auto-commit, use a normal (transactional) runner for + // all reading and for writing via an iterator, and a do-nothing + // runner for writing via collections; if auto-commit is tested, + // the per-collection auto-commit property will be set elsewhere. + // + TransactionRunner normalRunner = newTransactionRunner(env); + normalRunner.setAllowNestedTransactions( + DbCompat.NESTED_TRANSACTIONS); + TransactionRunner nullRunner = new NullTransactionRunner(env); + readRunner = nullRunner; + if (isAutoCommit) { + writeRunner = nullRunner; + writeIterRunner = testStoredIterator ? normalRunner + : nullRunner; + } else { + writeRunner = normalRunner; + writeIterRunner = normalRunner; + } + + store = testStore.open(env, "unindexed.db"); + testUnindexed(); + store.close(); + store = null; + + TestStore indexOf = testStore.getIndexOf(); + if (indexOf != null) { + store = indexOf.open(env, "indexed.db"); + index = testStore.openIndex(store, "index.db"); + testIndexed(); + index.close(); + index = null; + store.close(); + store = null; + } + env.close(); + env = null; + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } finally { + if (index != null) { + try { + index.close(); + } catch (Exception e) { + } + } + if (store != null) { + try { + store.close(); + } catch (Exception e) { + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + } + } + /* Ensure that GC can cleanup. */ + index = null; + store = null; + env = null; + readRunner = null; + writeRunner = null; + writeIterRunner = null; + map = null; + imap = null; + smap = null; + saveMap = null; + saveSMap = null; + list = null; + ilist = null; + saveList = null; + keySet = null; + valueSet = null; + testEnv = null; + testStore = null; + } + } + + /** + * Is overridden in XACollectionTest. + * @throws DatabaseException from subclasses. + */ + protected TransactionRunner newTransactionRunner(Environment env) + throws DatabaseException { + + return new TransactionRunner(env); + } + + void testCreation(StoredContainer cont, int expectSize) { + assertEquals(index != null, cont.isSecondary()); + assertEquals(testStore.isOrdered(), cont.isOrdered()); + assertEquals(testStore.areKeyRangesAllowed(), + cont.areKeyRangesAllowed()); + assertEquals(testStore.areKeysRenumbered(), cont.areKeysRenumbered()); + assertEquals(testStore.areDuplicatesAllowed(), + cont.areDuplicatesAllowed()); + assertEquals(testEnv.isTxnMode(), cont.isTransactional()); + assertEquals(expectSize, cont.size()); + } + + void testMapCreation(ConcurrentMap map) { + assertTrue(map.values() instanceof Set); + assertEquals(testStore.areKeyRangesAllowed(), + map.keySet() instanceof SortedSet); + assertEquals(testStore.areKeyRangesAllowed(), + map.entrySet() instanceof SortedSet); + assertEquals(testStore.areKeyRangesAllowed() && isEntityBinding, + map.values() instanceof SortedSet); + } + + void testUnindexed() + throws Exception { + + // create primary map + if (testStore.areKeyRangesAllowed()) { + if (isEntityBinding) { + smap = new StoredSortedMap(store, keyBinding, + entityBinding, + testStore.getKeyAssigner()); + valueSet = new StoredSortedValueSet(store, entityBinding, + true); + } else { + smap = new StoredSortedMap(store, keyBinding, + valueBinding, + testStore.getKeyAssigner()); + // sorted value set is not possible since key cannot be derived + // for performing subSet, etc. + } + keySet = new StoredSortedKeySet(store, keyBinding, true); + map = smap; + } else { + if (isEntityBinding) { + map = new StoredMap(store, keyBinding, entityBinding, + testStore.getKeyAssigner()); + valueSet = new StoredValueSet(store, entityBinding, true); + } else { + map = new StoredMap(store, keyBinding, valueBinding, + testStore.getKeyAssigner()); + valueSet = new StoredValueSet(store, valueBinding, true); + } + smap = null; + keySet = new StoredKeySet(store, keyBinding, true); + } + imap = map; + + // create primary list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + ilist = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + ilist = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + list = ilist; + } else { + try { + if (isEntityBinding) { + ilist = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + ilist = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + fail(); + } catch (IllegalArgumentException expected) {} + } + + testCreation(map, 0); + if (list != null) { + testCreation(list, 0); + } + testMapCreation(map); + addAll(); + testAll(); + } + + void testIndexed() + throws Exception { + + // create primary map + if (isEntityBinding) { + map = new StoredMap(store, keyBinding, entityBinding, + testStore.getKeyAssigner()); + } else { + map = new StoredMap(store, keyBinding, valueBinding, + testStore.getKeyAssigner()); + } + imap = map; + smap = null; + // create primary list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + list = new StoredList(store, entityBinding, + testStore.getKeyAssigner()); + } else { + list = new StoredList(store, valueBinding, + testStore.getKeyAssigner()); + } + ilist = list; + } + + addAll(); + readAll(); + + // create indexed map (keySet/valueSet) + if (testStore.areKeyRangesAllowed()) { + if (isEntityBinding) { + map = smap = new StoredSortedMap(index, keyBinding, + entityBinding, true); + valueSet = new StoredSortedValueSet(index, entityBinding, + true); + } else { + map = smap = new StoredSortedMap(index, keyBinding, + valueBinding, true); + // sorted value set is not possible since key cannot be derived + // for performing subSet, etc. + } + keySet = new StoredSortedKeySet(index, keyBinding, true); + } else { + if (isEntityBinding) { + map = new StoredMap(index, keyBinding, entityBinding, true); + valueSet = new StoredValueSet(index, entityBinding, true); + } else { + map = new StoredMap(index, keyBinding, valueBinding, true); + valueSet = new StoredValueSet(index, valueBinding, true); + } + smap = null; + keySet = new StoredKeySet(index, keyBinding, true); + } + + // create indexed list + if (testStore.hasRecNumAccess()) { + if (isEntityBinding) { + list = new StoredList(index, entityBinding, true); + } else { + list = new StoredList(index, valueBinding, true); + } + } else { + try { + if (isEntityBinding) { + list = new StoredList(index, entityBinding, true); + } else { + list = new StoredList(index, valueBinding, true); + } + fail(); + } catch (IllegalArgumentException expected) {} + } + + testCreation(map, maxKey); + testCreation((StoredContainer) map.values(), maxKey); + testCreation((StoredContainer) map.keySet(), maxKey); + testCreation((StoredContainer) map.entrySet(), maxKey); + if (list != null) { + testCreation(list, maxKey); + } + testMapCreation(map); + testAll(); + } + + void testAll() + throws Exception { + + checkKeySetAndValueSet(); + readAll(); + updateAll(); + readAll(); + if (!map.areKeysRenumbered()) { + removeOdd(); + readEven(); + addOdd(); + readAll(); + removeOddIter(); + readEven(); + if (imap.areDuplicatesAllowed()) { + addOddDup(); + } else { + addOdd(); + } + readAll(); + removeOddEntry(); + readEven(); + addOdd(); + readAll(); + if (isEntityBinding) { + removeOddEntity(); + readEven(); + addOddEntity(); + readAll(); + } + bulkOperations(); + } + if (isListAddAllowed()) { + removeOddList(); + readEvenList(); + addOddList(); + readAll(); + if (!isEntityBinding) { + removeOddListValue(); + readEvenList(); + addOddList(); + readAll(); + } + } + if (list != null) { + bulkListOperations(); + } else { + listOperationsNotAllowed(); + } + if (smap != null) { + readWriteRange(SUB, 1, 1); + readWriteRange(HEAD, 1, 1); + readWriteRange(SUB, 1, maxKey); + readWriteRange(HEAD, 1, maxKey); + readWriteRange(TAIL, 1, maxKey); + readWriteRange(SUB, 1, 3); + readWriteRange(HEAD, 1, 3); + readWriteRange(SUB, 2, 2); + readWriteRange(SUB, 2, maxKey); + readWriteRange(TAIL, 2, maxKey); + readWriteRange(SUB, maxKey, maxKey); + readWriteRange(TAIL, maxKey, maxKey); + readWriteRange(SUB, maxKey + 1, maxKey + 1); + readWriteRange(TAIL, maxKey + 1, maxKey + 1); + readWriteRange(SUB, 0, 0); + readWriteRange(HEAD, 0, 0); + } + updateAll(); + readAll(); + if (map.areDuplicatesAllowed()) { + readWriteDuplicates(); + readAll(); + } else { + duplicatesNotAllowed(); + readAll(); + } + if (testEnv.isCdbMode()) { + testCdbLocking(); + } + removeAll(); + if (!map.areKeysRenumbered()) { + testConcurrentMap(); + } + if (isListAddAllowed()) { + testIterAddList(); + clearAll(); + } + if (imap.areDuplicatesAllowed()) { + testIterAddDuplicates(); + clearAll(); + } + if (isListAddAllowed()) { + addAllList(); + readAll(); + removeAllList(); + } + appendAll(); + } + + void checkKeySetAndValueSet() { + + // use bulk operations to check that explicitly constructed + // keySet/valueSet are equivalent + assertEquals(keySet, imap.keySet()); + if (valueSet != null) { + assertEquals(valueSet, imap.values()); + } + } + + Iterator iterator(Collection storedCollection) { + + if (testStoredIterator) { + return ((StoredCollection) storedCollection).storedIterator(); + } else { + return storedCollection.iterator(); + } + } + + void addAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + assertTrue(imap.isEmpty()); + Iterator iter = iterator(imap.entrySet()); + try { + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + assertEquals(0, imap.keySet().toArray().length); + assertEquals(0, imap.keySet().toArray(new Object[0]).length); + assertEquals(0, imap.entrySet().toArray().length); + assertEquals(0, imap.entrySet().toArray(new Object[0]).length); + assertEquals(0, imap.values().toArray().length); + assertEquals(0, imap.values().toArray(new Object[0]).length); + + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.keySet().contains(key)); + assertTrue(!imap.values().contains(val)); + assertNull(imap.put(key, val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.keySet().contains(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + checkDupsSize(1, imap.duplicates(key)); + } + assertTrue(!imap.isEmpty()); + } + }); + } + + void appendAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + assertTrue(imap.isEmpty()); + + TestKeyAssigner keyAssigner = testStore.getKeyAssigner(); + if (keyAssigner != null) { + keyAssigner.reset(); + } + + for (int i = beginKey; i <= endKey; i += 1) { + boolean useList = (i & 1) == 0; + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + if (keyAssigner != null) { + if (useList && ilist != null) { + assertEquals(i - 1, ilist.append(val)); + } else { + assertEquals(key, imap.append(val)); + } + assertEquals(val, imap.get(key)); + } else { + Long recnoKey; + if (useList && ilist != null) { + recnoKey = new Long(ilist.append(val) + 1); + } else { + recnoKey = (Long) imap.append(val); + } + assertNotNull(recnoKey); + Object recnoVal; + if (isEntityBinding) { + recnoVal = makeEntity(recnoKey.intValue(), i); + } else { + recnoVal = val; + } + assertEquals(recnoVal, imap.get(recnoKey)); + } + } + } + }); + } + + void updateAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + checkDupsSize(1, imap.duplicates(key)); + if (ilist != null) { + int idx = i - 1; + assertEquals(val, ilist.set(idx, val)); + } + } + updateIter(map.entrySet()); + updateIter(map.values()); + if (beginKey <= endKey) { + ListIterator iter = (ListIterator) iterator(map.keySet()); + try { + assertNotNull(iter.next()); + iter.set(makeKey(beginKey)); + fail(); + } catch (UnsupportedOperationException e) { + } finally { + StoredIterator.close(iter); + } + } + if (list != null) { + updateIter(list); + } + } + }); + } + + void updateIter(final Collection coll) + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + Pattern suppressedError = Pattern.compile("BDB1004.*"); + Object oldErrHandler = DbCompat.getErrorHandler(env); + ListIterator iter = (ListIterator) iterator(coll); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Object obj = iter.next(); + if (map.isOrdered()) { + assertEquals(i, intIter(coll, obj)); + } + if (index != null) { + try { + setValuePlusOne(iter, obj); + fail(); + } catch (UnsupportedOperationException e) {} + } else if + (((StoredCollection) coll).areDuplicatesOrdered()) { + DbCompat.suppressError(env, suppressedError); + try { + setValuePlusOne(iter, obj); + fail(); + } catch (RuntimeException e) { + Exception e2 = ExceptionUnwrapper.unwrap(e); + assertTrue(e2.getClass().getName(), + e2 instanceof IllegalArgumentException || + e2 instanceof DatabaseException); + } + DbCompat.setErrorHandler(env, oldErrHandler); + } else { + setValuePlusOne(iter, obj); + /* Ensure iterator position is correct. */ + if (map.isOrdered()) { + assertTrue(iter.hasPrevious()); + obj = iter.previous(); + assertEquals(i, intIter(coll, obj)); + assertTrue(iter.hasNext()); + obj = iter.next(); + assertEquals(i, intIter(coll, obj)); + } + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void setValuePlusOne(ListIterator iter, Object obj) { + + if (obj instanceof Map.Entry) { + Map.Entry entry = (Map.Entry) obj; + Long key = (Long) entry.getKey(); + Object oldVal = entry.getValue(); + Object val = makeVal(key.intValue() + 1); + if (isEntityBinding) { + try { + // must fail on attempt to change the key via an entity + entry.setValue(val); + fail(); + } catch (IllegalArgumentException e) {} + val = makeEntity(key.intValue(), key.intValue() + 1); + } + entry.setValue(val); + assertEquals(val, entry.getValue()); + assertEquals(val, map.get(key)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + entry.setValue(oldVal); + assertEquals(oldVal, entry.getValue()); + assertEquals(oldVal, map.get(key)); + assertTrue(map.duplicates(key).contains(oldVal)); + checkDupsSize(1, map.duplicates(key)); + } else { + Object oldVal = obj; + Long key = makeKey(intVal(obj)); + Object val = makeVal(key.intValue() + 1); + if (isEntityBinding) { + try { + // must fail on attempt to change the key via an entity + iter.set(val); + fail(); + } catch (IllegalArgumentException e) {} + val = makeEntity(key.intValue(), key.intValue() + 1); + } + iter.set(val); + assertEquals(val, map.get(key)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + iter.set(oldVal); + assertEquals(oldVal, map.get(key)); + assertTrue(map.duplicates(key).contains(oldVal)); + checkDupsSize(1, map.duplicates(key)); + } + } + + void removeAll() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + assertTrue(!map.isEmpty()); + ListIterator iter = null; + try { + if (list != null) { + iter = (ListIterator) iterator(list); + } else { + iter = (ListIterator) iterator(map.values()); + } + iteratorSetAndRemoveNotAllowed(iter); + + Object val = iter.next(); + assertNotNull(val); + iter.remove(); + iteratorSetAndRemoveNotAllowed(iter); + + if (index == null) { + val = iter.next(); + assertNotNull(val); + iter.set(val); + + if (map.areDuplicatesAllowed()) { + iter.add(makeVal(intVal(val), intVal(val) + 1)); + iteratorSetAndRemoveNotAllowed(iter); + } + } + } finally { + StoredIterator.close(iter); + } + map.clear(); + assertTrue(map.isEmpty()); + assertTrue(map.entrySet().isEmpty()); + assertTrue(map.keySet().isEmpty()); + assertTrue(map.values().isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void clearAll() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + map.clear(); + assertTrue(map.isEmpty()); + } + }); + } + + /** + * Tests that removing while iterating works properly, especially when + * removing everything in the key range or everything from some point to + * the end of the range. [#15858] + */ + void removeIter() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + ListIterator iter; + + /* Save contents. */ + HashMap savedMap = + new HashMap(map); + assertEquals(savedMap, map); + + /* Remove all moving forward. */ + iter = (ListIterator) iterator(map.keySet()); + try { + while (iter.hasNext()) { + assertNotNull(iter.next()); + iter.remove(); + } + assertTrue(!iter.hasNext()); + assertTrue(!iter.hasPrevious()); + assertTrue(map.isEmpty()); + } finally { + StoredIterator.close(iter); + } + + /* Restore contents. */ + imap.putAll(savedMap); + assertEquals(savedMap, map); + + /* Remove all moving backward. */ + iter = (ListIterator) iterator(map.keySet()); + try { + while (iter.hasNext()) { + assertNotNull(iter.next()); + } + while (iter.hasPrevious()) { + assertNotNull(iter.previous()); + iter.remove(); + } + assertTrue(map.toString(), !iter.hasNext()); + assertTrue(!iter.hasPrevious()); + assertTrue(map.isEmpty()); + } finally { + StoredIterator.close(iter); + } + + /* Restore contents. */ + imap.putAll(savedMap); + assertEquals(savedMap, map); + + int first = Math.max(1, beginKey); + int last = Math.min(maxKey, endKey); + + /* Skip N forward, remove all from that point forward. */ + for (int readTo = first + 1; readTo <= last; readTo += 1) { + iter = (ListIterator) iterator(map.keySet()); + try { + for (int i = first; i < readTo; i += 1) { + assertTrue(iter.hasNext()); + assertNotNull(iter.next()); + } + for (int i = readTo; i <= last; i += 1) { + assertTrue(iter.hasNext()); + assertNotNull(iter.next()); + iter.remove(); + } + assertTrue(!iter.hasNext()); + assertTrue(iter.hasPrevious()); + assertEquals(readTo - first, map.size()); + } finally { + StoredIterator.close(iter); + } + + /* Restore contents. */ + for (Map.Entry entry : savedMap.entrySet()) { + if (!imap.entrySet().contains(entry)) { + imap.put(entry.getKey(), entry.getValue()); + } + } + assertEquals(savedMap, map); + } + + /* Skip N backward, remove all from that point backward. */ + for (int readTo = last - 1; readTo >= first; readTo -= 1) { + iter = (ListIterator) iterator(map.keySet()); + try { + while (iter.hasNext()) { + assertNotNull(iter.next()); + } + for (int i = last; i > readTo; i -= 1) { + assertTrue(iter.hasPrevious()); + assertNotNull(iter.previous()); + } + for (int i = readTo; i >= first; i -= 1) { + assertTrue(iter.hasPrevious()); + assertNotNull(iter.previous()); + iter.remove(); + } + assertTrue(!iter.hasPrevious()); + assertTrue(iter.hasNext()); + assertEquals(last - readTo, map.size()); + } finally { + StoredIterator.close(iter); + } + + /* Restore contents. */ + for (Map.Entry entry : savedMap.entrySet()) { + if (!imap.entrySet().contains(entry)) { + imap.put(entry.getKey(), entry.getValue()); + } + } + assertEquals(savedMap, map); + } + } + }); + } + + void iteratorSetAndRemoveNotAllowed(ListIterator i) { + + try { + i.remove(); + fail(); + } catch (IllegalStateException e) {} + + if (index == null) { + try { + Object val = makeVal(1); + i.set(val); + fail(); + } catch (IllegalStateException e) {} + } + } + + void removeOdd() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + boolean toggle = false; + for (int i = beginKey; i <= endKey; i += 2) { + toggle = !toggle; + Long key = makeKey(i); + Object val = makeVal(i); + if (toggle) { + assertTrue(map.keySet().contains(key)); + assertTrue(map.keySet().remove(key)); + assertTrue(!map.keySet().contains(key)); + } else { + assertTrue(map.containsValue(val)); + Object oldVal = map.remove(key); + assertEquals(oldVal, val); + assertTrue(!map.containsKey(key)); + assertTrue(!map.containsValue(val)); + } + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void removeOddEntity() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertTrue(map.values().contains(val)); + assertTrue(map.values().remove(val)); + assertTrue(!map.values().contains(val)); + assertNull(map.get(key)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + }); + } + + void removeOddEntry() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = mapEntry(i); + assertTrue(map.entrySet().contains(val)); + assertTrue(map.entrySet().remove(val)); + assertTrue(!map.entrySet().contains(val)); + assertNull(map.get(key)); + } + } + }); + } + + void removeOddIter() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + Iterator iter = iterator(map.keySet()); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertNotNull(key); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), key); + } + if ((key.intValue() & 1) != 0) { + iter.remove(); + } + } + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void removeOddList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 2) { + // remove by index + // (with entity binding, embbeded keys in values are + // being changed so we can't use values for comparison) + int idx = (i - beginKey) / 2; + Object val = makeVal(i); + if (!isEntityBinding) { + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + assertEquals(idx, list.indexOf(val)); + } + assertNotNull(list.get(idx)); + if (isEntityBinding) { + assertNotNull(list.remove(idx)); + } else { + assertTrue(list.contains(val)); + assertEquals(val, list.remove(idx)); + } + assertTrue(!list.remove(val)); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + } + } + }); + } + + void removeOddListValue() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 2) { + // for non-entity case remove by value + // (with entity binding, embbeded keys in values are + // being changed so we can't use values for comparison) + int idx = (i - beginKey) / 2; + Object val = makeVal(i); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + assertEquals(idx, list.indexOf(val)); + assertTrue(list.remove(val)); + assertTrue(!list.remove(val)); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + } + } + }); + } + + void addOdd() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + // add using Map.put() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertNull(imap.put(key, val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.put(key, val)); + } + } + } + }); + } + + void addOddEntity() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + // add using Map.values().add() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.values().contains(val)); + assertTrue(imap.values().add(val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + } + } + }); + } + + void addOddDup() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + // add using Map.duplicates().add() + for (int i = beginKey; i <= endKey; i += 2) { + Long key = makeKey(i); + Object val = makeVal(i); + assertNull(imap.get(key)); + assertTrue(!imap.values().contains(val)); + assertTrue(imap.duplicates(key).add(val)); + assertEquals(val, imap.get(key)); + assertTrue(imap.values().contains(val)); + assertTrue(imap.duplicates(key).contains(val)); + checkDupsSize(1, imap.duplicates(key)); + assertTrue(!imap.duplicates(key).add(val)); + if (isEntityBinding) { + assertTrue(!imap.values().add(val)); + } + } + } + }); + } + + void addOddList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 2) { + int idx = i - beginKey; + Object val = makeVal(i); + assertTrue(!list.contains(val)); + assertTrue(!val.equals(list.get(idx))); + list.add(idx, val); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + } + } + }); + } + + void addAllList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + Object val = makeVal(i); + assertTrue(!list.contains(val)); + assertTrue(list.add(val)); + assertTrue(list.contains(val)); + assertEquals(val, list.get(idx)); + } + } + }); + } + + void removeAllList() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + assertTrue(!list.isEmpty()); + list.clear(); + assertTrue(list.isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + assertNull(list.get(idx)); + } + } + }); + } + + /** + * Tests ConcurentMap methods implemented by StordMap. Starts with an + * empty DB and ends with an empty DB. [#16218] + */ + void testConcurrentMap() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = makeVal(i); + Object valPlusOne = makeVal(i, i + 1); + assertFalse(imap.containsKey(key)); + + assertNull(imap.putIfAbsent(key, val)); + assertEquals(val, imap.get(key)); + + assertEquals(val, imap.putIfAbsent(key, val)); + assertEquals(val, imap.get(key)); + + if (!imap.areDuplicatesAllowed()) { + assertEquals(val, imap.replace(key, valPlusOne)); + assertEquals(valPlusOne, imap.get(key)); + + assertEquals(valPlusOne, imap.replace(key, val)); + assertEquals(val, imap.get(key)); + + assertFalse(imap.replace(key, valPlusOne, val)); + assertEquals(val, imap.get(key)); + + assertTrue(imap.replace(key, val, valPlusOne)); + assertEquals(valPlusOne, imap.get(key)); + + assertTrue(imap.replace(key, valPlusOne, val)); + assertEquals(val, imap.get(key)); + } + + assertFalse(imap.remove(key, valPlusOne)); + assertTrue(imap.containsKey(key)); + + assertTrue(imap.remove(key, val)); + assertFalse(imap.containsKey(key)); + + assertNull(imap.replace(key, val)); + assertFalse(imap.containsKey(key)); + } + } + }); + } + + void testIterAddList() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + ListIterator i = (ListIterator) iterator(list); + try { + assertTrue(!i.hasNext()); + i.add(makeVal(3)); + assertTrue(!i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(3, intVal(i.previous())); + + i.add(makeVal(1)); + assertTrue(i.hasPrevious()); + assertTrue(i.hasNext()); + assertEquals(1, intVal(i.previous())); + assertTrue(i.hasNext()); + assertEquals(1, intVal(i.next())); + assertTrue(i.hasNext()); + assertEquals(3, intVal(i.next())); + assertEquals(3, intVal(i.previous())); + + assertTrue(i.hasNext()); + i.add(makeVal(2)); + assertTrue(i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(2, intVal(i.previous())); + assertTrue(i.hasNext()); + assertEquals(2, intVal(i.next())); + assertTrue(i.hasNext()); + assertEquals(3, intVal(i.next())); + + assertTrue(!i.hasNext()); + i.add(makeVal(4)); + i.add(makeVal(5)); + assertTrue(!i.hasNext()); + assertEquals(5, intVal(i.previous())); + assertEquals(4, intVal(i.previous())); + assertEquals(3, intVal(i.previous())); + assertEquals(2, intVal(i.previous())); + assertEquals(1, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + } finally { + StoredIterator.close(i); + } + } + }); + } + + void testIterAddDuplicates() + throws Exception { + + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + assertNull(imap.put(makeKey(1), makeVal(1))); + ListIterator i = + (ListIterator) iterator(imap.duplicates(makeKey(1))); + try { + if (imap.areDuplicatesOrdered()) { + i.add(makeVal(1, 4)); + i.add(makeVal(1, 2)); + i.add(makeVal(1, 3)); + while (i.hasPrevious()) i.previous(); + assertEquals(1, intVal(i.next())); + assertEquals(2, intVal(i.next())); + assertEquals(3, intVal(i.next())); + assertEquals(4, intVal(i.next())); + assertTrue(!i.hasNext()); + } else { + assertEquals(1, intVal(i.next())); + i.add(makeVal(1, 2)); + i.add(makeVal(1, 3)); + assertTrue(!i.hasNext()); + assertTrue(i.hasPrevious()); + assertEquals(3, intVal(i.previous())); + assertEquals(2, intVal(i.previous())); + assertEquals(1, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + i.add(makeVal(1, 4)); + i.add(makeVal(1, 5)); + assertTrue(i.hasNext()); + assertEquals(5, intVal(i.previous())); + assertEquals(4, intVal(i.previous())); + assertTrue(!i.hasPrevious()); + assertEquals(4, intVal(i.next())); + assertEquals(5, intVal(i.next())); + assertEquals(1, intVal(i.next())); + assertEquals(2, intVal(i.next())); + assertEquals(3, intVal(i.next())); + assertTrue(!i.hasNext()); + } + } finally { + StoredIterator.close(i); + } + } + }); + } + + void readAll() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() { + // map + + assertNotNull(map.toString()); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + Object val = map.get(key); + assertEquals(makeVal(i), val); + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(val)); + assertTrue(map.keySet().contains(key)); + assertTrue(map.values().contains(val)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + } + assertNull(map.get(makeKey(-1))); + assertNull(map.get(makeKey(0))); + assertNull(map.get(makeKey(beginKey - 1))); + assertNull(map.get(makeKey(endKey + 1))); + checkDupsSize(0, map.duplicates(makeKey(-1))); + checkDupsSize(0, map.duplicates(makeKey(0))); + checkDupsSize(0, map.duplicates(makeKey(beginKey - 1))); + checkDupsSize(0, map.duplicates(makeKey(endKey + 1))); + + // entrySet + + Set set = map.entrySet(); + assertNotNull(set.toString()); + assertEquals(beginKey > endKey, set.isEmpty()); + Iterator iter = iterator(set); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Map.Entry entry = (Map.Entry) iter.next(); + Long key = (Long) entry.getKey(); + Object val = entry.getValue(); + if (map instanceof SortedMap) { + assertEquals(intKey(key), i); + } + assertEquals(intKey(key), intVal(val)); + assertTrue(set.contains(entry)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Map.Entry[] entries = + (Map.Entry[]) set.toArray(new Map.Entry[0]); + assertNotNull(entries); + assertEquals(endKey - beginKey + 1, entries.length); + for (int i = beginKey; i <= endKey; i += 1) { + Map.Entry entry = entries[i - beginKey]; + assertNotNull(entry); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), entry.getKey()); + assertEquals(makeVal(i), entry.getValue()); + } + } + readIterator(set, iterator(set), beginKey, endKey); + if (smap != null) { + SortedSet sset = (SortedSet) set; + if (beginKey == 1 && endKey >= 1) { + readIterator(sset, + iterator(sset.subSet(mapEntry(1), + mapEntry(2))), + 1, 1); + } + if (beginKey <= 2 && endKey >= 2) { + readIterator(sset, + iterator(sset.subSet(mapEntry(2), + mapEntry(3))), + 2, 2); + } + if (beginKey <= endKey) { + readIterator(sset, + iterator(sset.subSet + (mapEntry(endKey), + mapEntry(endKey + 1))), + endKey, endKey); + } + if (isSubMap()) { + if (beginKey <= endKey) { + if (rangeType != TAIL) { + try { + sset.subSet(mapEntry(endKey + 1), + mapEntry(endKey + 2)); + fail(); + } catch (IllegalArgumentException e) {} + } + if (rangeType != HEAD) { + try { + sset.subSet(mapEntry(0), + mapEntry(1)); + fail(); + } catch (IllegalArgumentException e) {} + } + } + } else { + readIterator(sset, + iterator(sset.subSet + (mapEntry(endKey + 1), + mapEntry(endKey + 2))), + endKey, endKey - 1); + readIterator(sset, + iterator(sset.subSet(mapEntry(0), + mapEntry(1))), + 0, -1); + } + } + + // keySet + + set = map.keySet(); + assertNotNull(set.toString()); + assertEquals(beginKey > endKey, set.isEmpty()); + iter = iterator(set); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertTrue(set.contains(key)); + Object val = map.get(key); + if (map instanceof SortedMap) { + assertEquals(key, makeKey(i)); + } + assertEquals(intKey(key), intVal(val)); + } + assertTrue("" + beginKey + ' ' + endKey, !iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Long[] keys = (Long[]) set.toArray(new Long[0]); + assertNotNull(keys); + assertEquals(endKey - beginKey + 1, keys.length); + for (int i = beginKey; i <= endKey; i += 1) { + Long key = keys[i - beginKey]; + assertNotNull(key); + if (map instanceof SortedMap) { + assertEquals(makeKey(i), key); + } + } + readIterator(set, iterator(set), beginKey, endKey); + + // values + + Collection coll = map.values(); + assertNotNull(coll.toString()); + assertEquals(beginKey > endKey, coll.isEmpty()); + iter = iterator(coll); + try { + for (int i = beginKey; i <= endKey; i += 1) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + Object[] values = coll.toArray(); + assertNotNull(values); + assertEquals(endKey - beginKey + 1, values.length); + for (int i = beginKey; i <= endKey; i += 1) { + Object val = values[i - beginKey]; + assertNotNull(val); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + readIterator(coll, iterator(coll), beginKey, endKey); + + // list + + if (list != null) { + assertNotNull(list.toString()); + assertEquals(beginKey > endKey, list.isEmpty()); + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + Object val = list.get(idx); + assertEquals(makeVal(i), val); + assertTrue(list.contains(val)); + assertEquals(idx, list.indexOf(val)); + assertEquals(idx, list.lastIndexOf(val)); + } + ListIterator li = (ListIterator) iterator(list); + try { + for (int i = beginKey; i <= endKey; i += 1) { + int idx = i - beginKey; + assertTrue(li.hasNext()); + assertEquals(idx, li.nextIndex()); + Object val = li.next(); + assertEquals(makeVal(i), val); + assertEquals(idx, li.previousIndex()); + } + assertTrue(!li.hasNext()); + } finally { + StoredIterator.close(li); + } + if (beginKey < endKey) { + li = list.listIterator(1); + try { + for (int i = beginKey + 1; i <= endKey; i += 1) { + int idx = i - beginKey; + assertTrue(li.hasNext()); + assertEquals(idx, li.nextIndex()); + Object val = li.next(); + assertEquals(makeVal(i), val); + assertEquals(idx, li.previousIndex()); + } + assertTrue(!li.hasNext()); + } finally { + StoredIterator.close(li); + } + } + values = list.toArray(); + assertNotNull(values); + assertEquals(endKey - beginKey + 1, values.length); + for (int i = beginKey; i <= endKey; i += 1) { + Object val = values[i - beginKey]; + assertNotNull(val); + assertEquals(makeVal(i), val); + } + readIterator(list, iterator(list), beginKey, endKey); + } + + // first/last + + if (smap != null) { + if (beginKey <= endKey && + beginKey >= 1 && beginKey <= maxKey) { + assertEquals(makeKey(beginKey), + smap.firstKey()); + assertEquals(makeKey(beginKey), + ((SortedSet) smap.keySet()).first()); + Object entry = ((SortedSet) smap.entrySet()).first(); + assertEquals(makeKey(beginKey), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(beginKey), + ((SortedSet) smap.values()).first()); + } + } else { + assertNull(smap.firstKey()); + assertNull(((SortedSet) smap.keySet()).first()); + assertNull(((SortedSet) smap.entrySet()).first()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).first()); + } + } + if (beginKey <= endKey && + endKey >= 1 && endKey <= maxKey) { + assertEquals(makeKey(endKey), + smap.lastKey()); + assertEquals(makeKey(endKey), + ((SortedSet) smap.keySet()).last()); + Object entry = ((SortedSet) smap.entrySet()).last(); + assertEquals(makeKey(endKey), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(endKey), + ((SortedSet) smap.values()).last()); + } + } else { + assertNull(smap.lastKey()); + assertNull(((SortedSet) smap.keySet()).last()); + assertNull(((SortedSet) smap.entrySet()).last()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).last()); + } + } + } + } + }); + } + + void readEven() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() { + int readBegin = ((beginKey & 1) != 0) ? + (beginKey + 1) : beginKey; + int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey; + int readIncr = 2; + + // map + + for (int i = beginKey; i <= endKey; i += 1) { + Long key = makeKey(i); + if ((i & 1) == 0) { + Object val = map.get(key); + assertEquals(makeVal(i), val); + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(val)); + assertTrue(map.keySet().contains(key)); + assertTrue(map.values().contains(val)); + assertTrue(map.duplicates(key).contains(val)); + checkDupsSize(1, map.duplicates(key)); + } else { + Object val = makeVal(i); + assertTrue(!map.containsKey(key)); + assertTrue(!map.containsValue(val)); + assertTrue(!map.keySet().contains(key)); + assertTrue(!map.values().contains(val)); + assertTrue(!map.duplicates(key).contains(val)); + checkDupsSize(0, map.duplicates(key)); + } + } + + // entrySet + + Set set = map.entrySet(); + assertEquals(beginKey > endKey, set.isEmpty()); + Iterator iter = iterator(set); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Map.Entry entry = (Map.Entry) iter.next(); + Long key = (Long) entry.getKey(); + Object val = entry.getValue(); + if (map instanceof SortedMap) { + assertEquals(intKey(key), i); + } + assertEquals(intKey(key), intVal(val)); + assertTrue(set.contains(entry)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + // keySet + + set = map.keySet(); + assertEquals(beginKey > endKey, set.isEmpty()); + iter = iterator(set); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Long key = (Long) iter.next(); + assertTrue(set.contains(key)); + Object val = map.get(key); + if (map instanceof SortedMap) { + assertEquals(key, makeKey(i)); + } + assertEquals(intKey(key), intVal(val)); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + // values + + Collection coll = map.values(); + assertEquals(beginKey > endKey, coll.isEmpty()); + iter = iterator(coll); + try { + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + if (map instanceof SortedMap) { + assertEquals(makeVal(i), val); + } + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + + // list not used since keys may not be renumbered for this + // method to work in general + + // first/last + + if (smap != null) { + if (readBegin <= readEnd && + readBegin >= 1 && readBegin <= maxKey) { + assertEquals(makeKey(readBegin), + smap.firstKey()); + assertEquals(makeKey(readBegin), + ((SortedSet) smap.keySet()).first()); + Object entry = ((SortedSet) smap.entrySet()).first(); + assertEquals(makeKey(readBegin), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(readBegin), + ((SortedSet) smap.values()).first()); + } + } else { + assertNull(smap.firstKey()); + assertNull(((SortedSet) smap.keySet()).first()); + assertNull(((SortedSet) smap.entrySet()).first()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).first()); + } + } + if (readBegin <= readEnd && + readEnd >= 1 && readEnd <= maxKey) { + assertEquals(makeKey(readEnd), + smap.lastKey()); + assertEquals(makeKey(readEnd), + ((SortedSet) smap.keySet()).last()); + Object entry = ((SortedSet) smap.entrySet()).last(); + assertEquals(makeKey(readEnd), + ((Map.Entry) entry).getKey()); + if (smap.values() instanceof SortedSet) { + assertEquals(makeVal(readEnd), + ((SortedSet) smap.values()).last()); + } + } else { + assertNull(smap.lastKey()); + assertNull(((SortedSet) smap.keySet()).last()); + assertNull(((SortedSet) smap.entrySet()).last()); + if (smap.values() instanceof SortedSet) { + assertNull(((SortedSet) smap.values()).last()); + } + } + } + } + }); + } + + void readEvenList() + throws Exception { + + readRunner.run(new TransactionWorker() { + public void doWork() { + int readBegin = ((beginKey & 1) != 0) ? + (beginKey + 1) : beginKey; + int readEnd = ((endKey & 1) != 0) ? (endKey - 1) : endKey; + int readIncr = 2; + + assertEquals(beginKey > endKey, list.isEmpty()); + ListIterator iter = (ListIterator) iterator(list); + try { + int idx = 0; + for (int i = readBegin; i <= readEnd; i += readIncr) { + assertTrue(iter.hasNext()); + assertEquals(idx, iter.nextIndex()); + Object val = iter.next(); + assertEquals(idx, iter.previousIndex()); + if (isEntityBinding) { + assertEquals(i, intVal(val)); + } else { + assertEquals(makeVal(i), val); + } + idx += 1; + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + } + }); + } + + void readIterator(Collection coll, Iterator iter, + int beginValue, int endValue) { + + ListIterator li = (ListIterator) iter; + boolean isList = (coll instanceof List); + Iterator clone = null; + try { + // at beginning + assertTrue(!li.hasPrevious()); + assertTrue(!li.hasPrevious()); + try { li.previous(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(-1, li.previousIndex()); + } + if (endValue < beginValue) { + // is empty + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + } + // loop thru all and collect in array + int[] values = new int[endValue - beginValue + 1]; + for (int i = beginValue; i <= endValue; i += 1) { + assertTrue(iter.hasNext()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.nextIndex()); + } + int value = intIter(coll, iter.next()); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + values[i - beginValue] = value; + if (((StoredCollection) coll).isOrdered()) { + assertEquals(i, value); + } else { + assertTrue(value >= beginValue); + assertTrue(value <= endValue); + } + } + // at end + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + // clone at same position + clone = StoredCollections.iterator(iter); + assertTrue(!clone.hasNext()); + // loop thru in reverse + for (int i = endValue; i >= beginValue; i -= 1) { + assertTrue(li.hasPrevious()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.previousIndex()); + } + int value = intIter(coll, li.previous()); + if (isList) { + assertEquals(idx, li.nextIndex()); + } + assertEquals(values[i - beginValue], value); + } + // clone should not have changed + assertTrue(!clone.hasNext()); + // at beginning + assertTrue(!li.hasPrevious()); + try { li.previous(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(-1, li.previousIndex()); + } + // loop thru with some back-and-forth + for (int i = beginValue; i <= endValue; i += 1) { + assertTrue(iter.hasNext()); + int idx = i - beginKey; + if (isList) { + assertEquals(idx, li.nextIndex()); + } + Object obj = iter.next(); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + assertEquals(obj, li.previous()); + if (isList) { + assertEquals(idx, li.nextIndex()); + } + assertEquals(obj, iter.next()); + if (isList) { + assertEquals(idx, li.previousIndex()); + } + int value = intIter(coll, obj); + assertEquals(values[i - beginValue], value); + } + // at end + assertTrue(!iter.hasNext()); + try { iter.next(); } catch (NoSuchElementException e) {} + if (isList) { + assertEquals(Integer.MAX_VALUE, li.nextIndex()); + } + } finally { + StoredIterator.close(iter); + StoredIterator.close(clone); + } + } + + void bulkOperations() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + HashMap hmap = new HashMap(); + for (int i = Math.max(1, beginKey); + i <= Math.min(maxKey, endKey); + i += 1) { + hmap.put(makeKey(i), makeVal(i)); + } + assertEquals(hmap, map); + assertEquals(hmap.entrySet(), map.entrySet()); + assertEquals(hmap.keySet(), map.keySet()); + assertEquals(map.values(), hmap.values()); + + assertTrue(map.entrySet().containsAll(hmap.entrySet())); + assertTrue(map.keySet().containsAll(hmap.keySet())); + assertTrue(map.values().containsAll(hmap.values())); + + map.clear(); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertEquals(hmap, map); + + assertTrue(map.entrySet().removeAll(hmap.entrySet())); + assertTrue(map.entrySet().isEmpty()); + assertTrue(!map.entrySet().removeAll(hmap.entrySet())); + assertTrue(imap.entrySet().addAll(hmap.entrySet())); + assertTrue(map.entrySet().containsAll(hmap.entrySet())); + assertTrue(!imap.entrySet().addAll(hmap.entrySet())); + assertEquals(hmap, map); + + assertTrue(!map.entrySet().retainAll(hmap.entrySet())); + assertEquals(hmap, map); + assertTrue(map.entrySet().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertEquals(hmap, map); + + assertTrue(map.values().removeAll(hmap.values())); + assertTrue(map.values().isEmpty()); + assertTrue(!map.values().removeAll(hmap.values())); + if (isEntityBinding) { + assertTrue(imap.values().addAll(hmap.values())); + assertTrue(map.values().containsAll(hmap.values())); + assertTrue(!imap.values().addAll(hmap.values())); + } else { + imap.putAll(hmap); + } + assertEquals(hmap, map); + + assertTrue(!map.values().retainAll(hmap.values())); + assertEquals(hmap, map); + assertTrue(map.values().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertEquals(hmap, map); + + assertTrue(map.keySet().removeAll(hmap.keySet())); + assertTrue(map.keySet().isEmpty()); + assertTrue(!map.keySet().removeAll(hmap.keySet())); + assertTrue(imap.keySet().addAll(hmap.keySet())); + assertTrue(imap.keySet().containsAll(hmap.keySet())); + if (index != null) { + assertTrue(map.keySet().isEmpty()); + } + assertTrue(!imap.keySet().addAll(hmap.keySet())); + // restore values to non-null + imap.keySet().removeAll(hmap.keySet()); + imap.putAll(hmap); + assertEquals(hmap, map); + + assertTrue(!map.keySet().retainAll(hmap.keySet())); + assertEquals(hmap, map); + assertTrue(map.keySet().retainAll(Collections.EMPTY_SET)); + assertTrue(map.isEmpty()); + imap.putAll(hmap); + assertEquals(hmap, map); + } + }); + } + + void bulkListOperations() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() { + ArrayList alist = new ArrayList(); + for (int i = beginKey; i <= endKey; i += 1) { + alist.add(makeVal(i)); + } + + assertEquals(alist, list); + assertTrue(list.containsAll(alist)); + + if (isListAddAllowed()) { + list.clear(); + assertTrue(list.isEmpty()); + assertTrue(ilist.addAll(alist)); + assertEquals(alist, list); + } + + assertTrue(!list.retainAll(alist)); + assertEquals(alist, list); + + if (isListAddAllowed()) { + assertTrue(list.retainAll(Collections.EMPTY_SET)); + assertTrue(list.isEmpty()); + assertTrue(ilist.addAll(alist)); + assertEquals(alist, list); + } + + if (isListAddAllowed() && !isEntityBinding) { + // deleting in a renumbered list with entity binding will + // change the values dynamically, making it very difficult + // to test + assertTrue(list.removeAll(alist)); + assertTrue(list.isEmpty()); + assertTrue(!list.removeAll(alist)); + assertTrue(ilist.addAll(alist)); + assertTrue(list.containsAll(alist)); + assertEquals(alist, list); + } + + if (isListAddAllowed() && !isEntityBinding) { + // addAll at an index is also very difficult to test with + // an entity binding + + // addAll at first index + ilist.addAll(beginKey, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(beginKey); + assertEquals(alist, list); + + // addAll at last index + ilist.addAll(endKey, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(endKey); + assertEquals(alist, list); + + // addAll in the middle + ilist.addAll(endKey - 1, alist); + assertTrue(list.containsAll(alist)); + assertEquals(2 * alist.size(), countElements(list)); + for (int i = beginKey; i <= endKey; i += 1) + ilist.remove(endKey - 1); + assertEquals(alist, list); + } + } + }); + } + + void readWriteRange(final int type, final int rangeBegin, + final int rangeEnd) + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + setRange(type, rangeBegin, rangeEnd); + createOutOfRange(rangeBegin, rangeEnd); + if (rangeType != TAIL) { + writeOutOfRange(new Long(rangeEnd + 1)); + } + if (rangeType != HEAD) { + writeOutOfRange(new Long(rangeBegin - 1)); + } + if (rangeBegin <= rangeEnd) { + updateAll(); + } + if (rangeBegin < rangeEnd && !map.areKeysRenumbered()) { + bulkOperations(); + removeIter(); + } + readAll(); + clearRange(); + } + }); + } + + void setRange(int type, int rangeBegin, int rangeEnd) { + + rangeType = type; + saveMap = map; + saveSMap = smap; + saveList = list; + int listBegin = rangeBegin - beginKey; + boolean canMakeSubList = (list != null && listBegin>= 0); + if (!canMakeSubList) { + list = null; + } + if (list != null) { + try { + list.subList(-1, 0); + fail(); + } catch (IndexOutOfBoundsException e) { } + } + switch (type) { + + case SUB: + smap = (StoredSortedMap) smap.subMap(makeKey(rangeBegin), + makeKey(rangeEnd + 1)); + if (canMakeSubList) { + list = (StoredList) list.subList(listBegin, + rangeEnd + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + (saveSMap).subMap( + makeKey(rangeBegin), true, + makeKey(rangeEnd + 1), false)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).subSet( + mapEntry(rangeBegin), true, + mapEntry(rangeEnd + 1), false)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).subSet( + makeKey(rangeBegin), true, + makeKey(rangeEnd + 1), false)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).subSet( + makeVal(rangeBegin), true, + makeVal(rangeEnd + 1), false)); + } + break; + case HEAD: + smap = (StoredSortedMap) smap.headMap(makeKey(rangeEnd + 1)); + if (canMakeSubList) { + list = (StoredList) list.subList(0, + rangeEnd + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + (saveSMap).headMap( + makeKey(rangeEnd + 1), false)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).headSet( + mapEntry(rangeEnd + 1), false)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).headSet( + makeKey(rangeEnd + 1), false)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).headSet( + makeVal(rangeEnd + 1), false)); + } + break; + case TAIL: + smap = (StoredSortedMap) smap.tailMap(makeKey(rangeBegin)); + if (canMakeSubList) { + list = (StoredList) list.subList(listBegin, + maxKey + 1 - beginKey); + } + // check for equivalent ranges + assertEquals(smap, + (saveSMap).tailMap( + makeKey(rangeBegin), true)); + assertEquals(smap.entrySet(), + ((StoredSortedEntrySet) saveSMap.entrySet()).tailSet( + mapEntry(rangeBegin), true)); + assertEquals(smap.keySet(), + ((StoredSortedKeySet) saveSMap.keySet()).tailSet( + makeKey(rangeBegin), true)); + if (smap.values() instanceof SortedSet) { + assertEquals(smap.values(), + ((StoredSortedValueSet) saveSMap.values()).tailSet( + makeVal(rangeBegin), true)); + } + break; + default: throw new RuntimeException(); + } + map = smap; + beginKey = rangeBegin; + if (rangeBegin < 1 || rangeEnd > maxKey) { + endKey = rangeBegin - 1; // force empty range for readAll() + } else { + endKey = rangeEnd; + } + } + + void clearRange() { + + rangeType = NONE; + beginKey = 1; + endKey = maxKey; + map = saveMap; + smap = saveSMap; + list = saveList; + } + + void createOutOfRange(int rangeBegin, int rangeEnd) { + // map + + if (rangeType != TAIL) { + try { + smap.subMap(makeKey(rangeBegin), makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + smap.headMap(makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + checkDupsSize(0, smap.duplicates(makeKey(rangeEnd + 2))); + } + if (rangeType != HEAD) { + try { + smap.subMap(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + smap.tailMap(makeKey(rangeBegin - 1)); + fail(); + } catch (IllegalArgumentException e) { } + checkDupsSize(0, smap.duplicates(makeKey(rangeBegin - 1))); + } + + // keySet + + if (rangeType != TAIL) { + SortedSet sset = (SortedSet) map.keySet(); + try { + sset.subSet(makeKey(rangeBegin), makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(makeKey(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + iterator(sset.subSet(makeKey(rangeEnd + 1), + makeKey(rangeEnd + 2))); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + SortedSet sset = (SortedSet) map.keySet(); + try { + sset.subSet(makeKey(rangeBegin - 1), makeKey(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(makeKey(rangeBegin - 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + iterator(sset.subSet(makeKey(rangeBegin - 1), + makeKey(rangeBegin))); + fail(); + } catch (IllegalArgumentException e) { } + } + + // entrySet + + if (rangeType != TAIL) { + SortedSet sset = (SortedSet) map.entrySet(); + try { + sset.subSet(mapEntry(rangeBegin), mapEntry(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(mapEntry(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + iterator(sset.subSet(mapEntry(rangeEnd + 1), + mapEntry(rangeEnd + 2))); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + SortedSet sset = (SortedSet) map.entrySet(); + try { + sset.subSet(mapEntry(rangeBegin - 1), mapEntry(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(mapEntry(rangeBegin - 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + iterator(sset.subSet(mapEntry(rangeBegin - 1), + mapEntry(rangeBegin))); + fail(); + } catch (IllegalArgumentException e) { } + } + + // values + + if (map.values() instanceof SortedSet) { + SortedSet sset = (SortedSet) map.values(); + if (rangeType != TAIL) { + try { + sset.subSet(makeVal(rangeBegin), + makeVal(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.headSet(makeVal(rangeEnd + 2)); + fail(); + } catch (IllegalArgumentException e) { } + } + if (rangeType != HEAD) { + try { + sset.subSet(makeVal(rangeBegin - 1), + makeVal(rangeEnd + 1)); + fail(); + } catch (IllegalArgumentException e) { } + try { + sset.tailSet(makeVal(rangeBegin - 1)); + fail(); + } catch (IllegalArgumentException e) { } + } + } + + // list + + if (list != null) { + int size = rangeEnd - rangeBegin + 1; + try { + list.subList(0, size + 1); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(-1, size); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(2, 1); + fail(); + } catch (IndexOutOfBoundsException e) { } + try { + list.subList(size, size); + fail(); + } catch (IndexOutOfBoundsException e) { } + } + } + + void writeOutOfRange(Long badNewKey) { + try { + map.put(badNewKey, makeVal(badNewKey)); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.toString(), index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + map.keySet().add(badNewKey); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + map.values().add(makeEntity(badNewKey)); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(isEntityBinding && index == null); + } catch (UnsupportedOperationException e) { + assertTrue(!(isEntityBinding && index == null)); + } + if (list != null) { + int i = badNewKey.intValue() - beginKey; + try { + list.set(i, makeVal(i)); + fail(); + } catch (IndexOutOfBoundsException e) { + assertTrue(index == null); + } catch (UnsupportedOperationException e) { + assertTrue(index != null); + } + try { + list.add(i, makeVal(badNewKey)); + fail(); + } catch (UnsupportedOperationException e) { + } + } + } + + void readWriteDuplicates() + throws Exception { + + writeRunner.run(new TransactionWorker() { + public void doWork() throws Exception { + if (index == null) { + readWritePrimaryDuplicates(beginKey); + readWritePrimaryDuplicates(beginKey + 1); + readWritePrimaryDuplicates(endKey); + readWritePrimaryDuplicates(endKey - 1); + } else { + readWriteIndexedDuplicates(beginKey); + readWriteIndexedDuplicates(beginKey + 1); + readWriteIndexedDuplicates(endKey); + readWriteIndexedDuplicates(endKey - 1); + } + } + }); + } + + void readWritePrimaryDuplicates(int i) + throws Exception { + + Collection dups; + // make duplicate values + final Long key = makeKey(i); + final Object[] values = new Object[5]; + for (int j = 0; j < values.length; j += 1) { + values[j] = isEntityBinding + ? makeEntity(i, i + j) + : makeVal(i + j); + } + // add duplicates + outerLoop: for (int writeMode = 0;; writeMode += 1) { + //System.out.println("write mode " + writeMode); + switch (writeMode) { + case 0: + case 1: { + // write with Map.put() + for (int j = 1; j < values.length; j += 1) { + map.put(key, values[j]); + } + break; + } + case 2: { + // write with Map.duplicates().add() + dups = map.duplicates(key); + for (int j = 1; j < values.length; j += 1) { + dups.add(values[j]); + } + break; + } + case 3: { + // write with Map.duplicates().iterator().add() + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + Collection dups = map.duplicates(key); + Iterator iter = iterator(dups); + assertEquals(values[0], iter.next()); + assertTrue(!iter.hasNext()); + try { + for (int j = 1; j < values.length; j += 1) { + ((ListIterator) iter).add(values[j]); + } + } finally { + StoredIterator.close(iter); + } + } + }); + break; + } + case 4: { + // write with Map.values().add() + if (!isEntityBinding) { + continue; + } + Collection set = map.values(); + for (int j = 1; j < values.length; j += 1) { + set.add(values[j]); + } + break; + } + default: { + break outerLoop; + } + } + checkDupsSize(values.length, map.duplicates(key)); + // read duplicates + readDuplicates(i, key, values); + // remove duplicates + switch (writeMode) { + case 0: { + // remove with Map.remove() + checkDupsSize(values.length, map.duplicates(key)); + map.remove(key); // remove all values + checkDupsSize(0, map.duplicates(key)); + map.put(key, values[0]); // put back original value + checkDupsSize(1, map.duplicates(key)); + break; + } + case 1: { + // remove with Map.keySet().remove() + map.keySet().remove(key); // remove all values + map.put(key, values[0]); // put back original value + break; + } + case 2: { + // remove with Map.duplicates().clear() + dups = map.duplicates(key); + dups.clear(); // remove all values + dups.add(values[0]); // put back original value + break; + } + case 3: { + // remove with Map.duplicates().iterator().remove() + writeIterRunner.run(new TransactionWorker() { + public void doWork() { + Collection dups = map.duplicates(key); + Iterator iter = iterator(dups); + try { + for (int j = 0; j < values.length; j += 1) { + assertEquals(values[j], iter.next()); + if (j != 0) { + iter.remove(); + } + } + } finally { + StoredIterator.close(iter); + } + } + }); + break; + } + case 4: { + // remove with Map.values().remove() + if (!isEntityBinding) { + throw new IllegalStateException(); + } + Collection set = map.values(); + for (int j = 1; j < values.length; j += 1) { + set.remove(values[j]); + } + break; + } + default: throw new IllegalStateException(); + } + // verify that only original value is present + dups = map.duplicates(key); + assertTrue(dups.contains(values[0])); + for (int j = 1; j < values.length; j += 1) { + assertTrue(!dups.contains(values[j])); + } + checkDupsSize(1, dups); + } + } + + void readWriteIndexedDuplicates(int i) { + Object key = makeKey(i); + Object[] values = new Object[3]; + values[0] = makeVal(i); + for (int j = 1; j < values.length; j += 1) { + values[j] = isEntityBinding + ? makeEntity(endKey + j, i) + : makeVal(i); + } + // add duplicates + for (int j = 1; j < values.length; j += 1) { + imap.put(makeKey(endKey + j), values[j]); + } + // read duplicates + readDuplicates(i, key, values); + // remove duplicates + for (int j = 1; j < values.length; j += 1) { + imap.remove(makeKey(endKey + j)); + } + checkDupsSize(1, map.duplicates(key)); + } + + void readDuplicates(int i, Object key, Object[] values) { + + boolean isOrdered = map.isOrdered(); + Collection dups; + Iterator iter; + // read with Map.duplicates().iterator() + dups = map.duplicates(key); + checkDupsSize(values.length, dups); + iter = iterator(dups); + try { + for (int j = 0; j < values.length; j += 1) { + assertTrue(iter.hasNext()); + Object val = iter.next(); + assertEquals(values[j], val); + } + assertTrue(!iter.hasNext()); + } finally { + StoredIterator.close(iter); + } + // read with Map.values().iterator() + Collection clone = ((StoredCollection) map.values()).toList(); + iter = iterator(map.values()); + try { + for (int j = beginKey; j < i; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeVal(j))); + if (isOrdered) { + assertEquals(makeVal(j), val); + } + } + for (int j = 0; j < values.length; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(values[j])); + if (isOrdered) { + assertEquals(values[j], val); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeVal(j))); + if (isOrdered) { + assertEquals(makeVal(j), val); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + // read with Map.entrySet().iterator() + clone = ((StoredCollection) map.entrySet()).toList(); + iter = iterator(map.entrySet()); + try { + for (int j = beginKey; j < i; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(j))); + if (isOrdered) { + assertEquals(makeVal(j), entry.getValue()); + assertEquals(makeKey(j), entry.getKey()); + } + } + for (int j = 0; j < values.length; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(makeKey(i), values[j]))); + if (isOrdered) { + assertEquals(values[j], entry.getValue()); + assertEquals(makeKey(i), entry.getKey()); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Map.Entry entry = (Map.Entry) iter.next(); + assertTrue(clone.remove(mapEntry(j))); + if (isOrdered) { + assertEquals(makeVal(j), entry.getValue()); + assertEquals(makeKey(j), entry.getKey()); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + // read with Map.keySet().iterator() + clone = ((StoredCollection) map.keySet()).toList(); + iter = iterator(map.keySet()); + try { + for (int j = beginKey; j < i; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeKey(j))); + if (isOrdered) { + assertEquals(makeKey(j), val); + } + } + if (true) { + // only one key is iterated for all duplicates + Object val = iter.next(); + assertTrue(clone.remove(makeKey(i))); + if (isOrdered) { + assertEquals(makeKey(i), val); + } + } + for (int j = i + 1; j <= endKey; j += 1) { + Object val = iter.next(); + assertTrue(clone.remove(makeKey(j))); + if (isOrdered) { + assertEquals(makeKey(j), val); + } + } + assertTrue(!iter.hasNext()); + assertTrue(clone.isEmpty()); + } finally { + StoredIterator.close(iter); + } + } + + void duplicatesNotAllowed() { + + Collection dups = map.duplicates(makeKey(beginKey)); + try { + dups.add(makeVal(beginKey)); + fail(); + } catch (UnsupportedOperationException expected) { } + ListIterator iter = (ListIterator) iterator(dups); + try { + iter.add(makeVal(beginKey)); + fail(); + } catch (UnsupportedOperationException expected) { + } finally { + StoredIterator.close(iter); + } + } + + void listOperationsNotAllowed() { + + ListIterator iter = (ListIterator) iterator(map.values()); + try { + try { + iter.nextIndex(); + fail(); + } catch (UnsupportedOperationException expected) { } + try { + iter.previousIndex(); + fail(); + } catch (UnsupportedOperationException expected) { } + } finally { + StoredIterator.close(iter); + } + } + + void testCdbLocking() { + + Iterator readIterator; + Iterator writeIterator; + StoredKeySet set = (StoredKeySet) map.keySet(); + + // can open two CDB read cursors + readIterator = set.storedIterator(false); + try { + Iterator readIterator2 = set.storedIterator(false); + StoredIterator.close(readIterator2); + } finally { + StoredIterator.close(readIterator); + } + + // can open two CDB write cursors + writeIterator = set.storedIterator(true); + try { + Iterator writeIterator2 = set.storedIterator(true); + StoredIterator.close(writeIterator2); + } finally { + StoredIterator.close(writeIterator); + } + + // cannot open CDB write cursor when read cursor is open, + readIterator = set.storedIterator(false); + try { + writeIterator = set.storedIterator(true); + fail(); + StoredIterator.close(writeIterator); + } catch (IllegalStateException e) { + } finally { + StoredIterator.close(readIterator); + } + + if (index == null) { + // cannot put() with read cursor open + readIterator = set.storedIterator(false); + try { + map.put(makeKey(1), makeVal(1)); + fail(); + } catch (IllegalStateException e) { + } finally { + StoredIterator.close(readIterator); + } + + // cannot append() with write cursor open with RECNO/QUEUE only + writeIterator = set.storedIterator(true); + try { + if (testStore.isQueueOrRecno()) { + try { + map.append(makeVal(1)); + fail(); + } catch (IllegalStateException e) {} + } else { + map.append(makeVal(1)); + } + } finally { + StoredIterator.close(writeIterator); + } + } + } + + Object makeVal(int key) { + + if (isEntityBinding) { + return makeEntity(key); + } else { + return new Long(key + 100); + } + } + + Object makeVal(int key, int val) { + + if (isEntityBinding) { + return makeEntity(key, val); + } else { + return makeVal(val); + } + } + + Object makeEntity(int key, int val) { + + return new TestEntity(key, val + 100); + } + + int intVal(Object val) { + + if (isEntityBinding) { + return ((TestEntity) val).value - 100; + } else { + return ((Long) val).intValue() - 100; + } + } + + int intKey(Object key) { + + return ((Long) key).intValue(); + } + + Object makeVal(Long key) { + + return makeVal(key.intValue()); + } + + Object makeEntity(int key) { + + return makeEntity(key, key); + } + + Object makeEntity(Long key) { + + return makeEntity(key.intValue()); + } + + int intIter(Collection coll, Object value) { + + if (coll instanceof StoredKeySet) { + return intKey(value); + } else { + if (coll instanceof StoredEntrySet) { + value = ((Map.Entry) value).getValue(); + } + return intVal(value); + } + } + + Map.Entry mapEntry(Object key, Object val) { + + return new MapEntryParameter(key, val); + } + + Map.Entry mapEntry(int key) { + + return new MapEntryParameter(makeKey(key), makeVal(key)); + } + + Long makeKey(int key) { + + return new Long(key); + } + + boolean isSubMap() { + + return rangeType != NONE; + } + + void checkDupsSize(int expected, Collection coll) { + + assertEquals(expected, coll.size()); + if (coll instanceof StoredCollection) { + StoredIterator i = ((StoredCollection) coll).storedIterator(false); + try { + int actual = 0; + if (i.hasNext()) { + i.next(); + actual = i.count(); + } + assertEquals(expected, actual); + } finally { + StoredIterator.close(i); + } + } + } + + private boolean isListAddAllowed() { + + return list != null && testStore.isQueueOrRecno() && + list.areKeysRenumbered(); + } + + private int countElements(Collection coll) { + + int count = 0; + Iterator iter = iterator(coll); + try { + while (iter.hasNext()) { + iter.next(); + count += 1; + } + } finally { + StoredIterator.close(iter); + } + return count; + } +} diff --git a/test/com/sleepycat/collections/test/ForeignKeyTest.java b/test/com/sleepycat/collections/test/ForeignKeyTest.java new file mode 100644 index 0000000..f3e0632 --- /dev/null +++ b/test/com/sleepycat/collections/test/ForeignKeyTest.java @@ -0,0 +1,365 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialMarshalledKeyCreator; +import com.sleepycat.bind.serial.test.MarshalledObject; +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DeleteConstraintException; +/* */ +import com.sleepycat.je.Environment; +/* */ +import com.sleepycat.je.ForeignConstraintException; +/* */ +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class ForeignKeyTest extends TestBase { + + private static final ForeignKeyDeleteAction[] ACTIONS = { + ForeignKeyDeleteAction.ABORT, + ForeignKeyDeleteAction.NULLIFY, + ForeignKeyDeleteAction.CASCADE, + }; + private static final String[] ACTION_LABELS = { + "ABORT", + "NULLIFY", + "CASCADE", + }; + + @Parameters + public static List genParams() { + List params = new ArrayList(); + for (TestEnv testEnv : TestEnv.ALL) { + int i = 0; + for (ForeignKeyDeleteAction action : ACTIONS) { + params.add(new Object[]{testEnv, action, ACTION_LABELS[i]}); + i ++; + } + } + + return params; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private TupleSerialFactory factory; + private Database store1; + private Database store2; + private SecondaryDatabase index1; + private SecondaryDatabase index2; + private Map storeMap1; + private Map storeMap2; + private Map indexMap1; + private Map indexMap2; + private final ForeignKeyDeleteAction onDelete; + + public ForeignKeyTest(TestEnv testEnv, ForeignKeyDeleteAction onDelete, + String onDeleteLabel) { + + customName = + "ForeignKeyTest-" + testEnv.getName() + '-' + onDeleteLabel; + + this.testEnv = testEnv; + this.onDelete = onDelete; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + SharedTestUtils.printTestName(customName); + env = testEnv.open(customName); + createDatabase(); + } + + @After + public void tearDown() { + + try { + if (index1 != null) { + index1.close(); + } + if (index2 != null) { + index2.close(); + } + if (store1 != null) { + store1.close(); + } + if (store2 != null) { + store2.close(); + } + if (catalog != null) { + catalog.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + env = null; + testEnv = null; + catalog = null; + store1 = null; + store2 = null; + index1 = null; + index2 = null; + factory = null; + storeMap1 = null; + storeMap2 = null; + indexMap1 = null; + indexMap2 = null; + } + } + + @Test + public void runTest() + throws Exception { + + try { + createViews(); + writeAndRead(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + + private void createDatabase() + throws Exception { + + catalog = new StoredClassCatalog(openDb("catalog.db")); + factory = new TupleSerialFactory(catalog); + assertSame(catalog, factory.getCatalog()); + + store1 = openDb("store1.db"); + store2 = openDb("store2.db"); + index1 = openSecondaryDb(factory, "1", store1, "index1.db", null); + index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(true); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory, + String keyName, + Database primary, + String file, + Database foreignStore) + throws Exception { + + TupleSerialMarshalledKeyCreator keyCreator = + factory.getKeyCreator(MarshalledObject.class, keyName); + + SecondaryConfig secConfig = new SecondaryConfig(); + DbCompat.setTypeBtree(secConfig); + secConfig.setTransactional(testEnv.isTxnMode()); + secConfig.setAllowCreate(true); + secConfig.setKeyCreator(keyCreator); + if (foreignStore != null) { + secConfig.setForeignKeyDatabase(foreignStore); + secConfig.setForeignKeyDeleteAction(onDelete); + if (onDelete == ForeignKeyDeleteAction.NULLIFY) { + secConfig.setForeignKeyNullifier(keyCreator); + } + } + + return DbCompat.testOpenSecondaryDatabase + (env, null, file, null, primary, secConfig); + } + + private void createViews() { + storeMap1 = factory.newMap(store1, String.class, + MarshalledObject.class, true); + storeMap2 = factory.newMap(store2, String.class, + MarshalledObject.class, true); + indexMap1 = factory.newMap(index1, String.class, + MarshalledObject.class, true); + indexMap2 = factory.newMap(index2, String.class, + MarshalledObject.class, true); + } + + private void writeAndRead() + throws Exception { + + CurrentTransaction txn = CurrentTransaction.getInstance(env); + if (txn != null) { + txn.beginTransaction(null); + } + + MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", ""); + assertNull(storeMap1.put(null, o1)); + + assertEquals(o1, storeMap1.get("pk1")); + assertEquals(o1, indexMap1.get("ik1")); + + MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1"); + assertNull(storeMap2.put(null, o2)); + + assertEquals(o2, storeMap2.get("pk2")); + assertEquals(o2, indexMap2.get("pk1")); + + if (txn != null) { + txn.commitTransaction(); + txn.beginTransaction(null); + } + + /* + * store1 contains o1 with primary key "pk1" and index key "ik1". + * + * store2 contains o2 with primary key "pk2" and foreign key "pk1", + * which is the primary key of store1. + */ + + if (onDelete == ForeignKeyDeleteAction.ABORT) { + + /* Test that we abort trying to delete a referenced key. */ + + try { + storeMap1.remove("pk1"); + fail(); + /* */ + } catch (DeleteConstraintException expected) { + /* */ + } catch (RuntimeExceptionWrapper expected) { + assertTrue(expected.getCause() instanceof DatabaseException); + assertTrue(!DbCompat.NEW_JE_EXCEPTIONS); + } + if (txn != null) { + txn.abortTransaction(); + txn.beginTransaction(null); + } + + /* Test that we can put a record into store2 with a null foreign + * key value. */ + + o2 = new MarshalledObject("data2", "pk2", "", ""); + assertNotNull(storeMap2.put(null, o2)); + assertEquals(o2, storeMap2.get("pk2")); + + /* The index2 record should have been deleted since the key was set + * to null above. */ + + assertNull(indexMap2.get("pk1")); + + /* Test that now we can delete the record in store1, since it is no + * longer referenced. */ + + assertNotNull(storeMap1.remove("pk1")); + assertNull(storeMap1.get("pk1")); + assertNull(indexMap1.get("ik1")); + + } else if (onDelete == ForeignKeyDeleteAction.NULLIFY) { + + /* Delete the referenced key. */ + + assertNotNull(storeMap1.remove("pk1")); + assertNull(storeMap1.get("pk1")); + assertNull(indexMap1.get("ik1")); + + /* The store2 record should still exist, but should have an empty + * secondary key since it was nullified. */ + + o2 = (MarshalledObject) storeMap2.get("pk2"); + assertNotNull(o2); + assertEquals("data2", o2.getData()); + assertEquals("pk2", o2.getPrimaryKey()); + assertEquals("", o2.getIndexKey1()); + assertEquals("", o2.getIndexKey2()); + + } else if (onDelete == ForeignKeyDeleteAction.CASCADE) { + + /* Delete the referenced key. */ + + assertNotNull(storeMap1.remove("pk1")); + assertNull(storeMap1.get("pk1")); + assertNull(indexMap1.get("ik1")); + + /* The store2 record should have deleted also. */ + + assertNull(storeMap2.get("pk2")); + assertNull(indexMap2.get("pk1")); + + } else { + throw new IllegalStateException(); + } + + /* + * Test that a foreign key value may not be used that is not present + * in the foreign store. "pk2" is not in store1 in this case. + */ + assertNull(storeMap1.get("pk2")); + MarshalledObject o3 = new MarshalledObject("data3", "pk3", "", "pk2"); + try { + storeMap2.put(null, o3); + fail(); + /* */ + } catch (ForeignConstraintException expected) { + /* */ + } catch (RuntimeExceptionWrapper expected) { + assertTrue(expected.getCause() instanceof DatabaseException); + assertTrue(!DbCompat.NEW_JE_EXCEPTIONS); + } + + if (txn != null) { + txn.abortTransaction(); + } + } +} diff --git a/test/com/sleepycat/collections/test/IterDeadlockTest.java b/test/com/sleepycat/collections/test/IterDeadlockTest.java new file mode 100644 index 0000000..457fbf3 --- /dev/null +++ b/test/com/sleepycat/collections/test/IterDeadlockTest.java @@ -0,0 +1,218 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Iterator; +import java.util.ListIterator; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.ByteArrayBinding; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Tests the fix for [#10516], where the StoredIterator constructor was not + * closing the cursor when an exception occurred. For example, a deadlock + * exception might occur if the constructor was unable to move the cursor to + * the first element. + * @author Mark Hayes + */ +public class IterDeadlockTest extends TestBase { + + private static final byte[] ONE = { 1 }; + + private Environment env; + private Database store1; + private Database store2; + private StoredSortedMap map1; + private StoredSortedMap map2; + private final ByteArrayBinding binding = new ByteArrayBinding(); + + @Before + public void setUp() + throws Exception { + + env = TestEnv.TXN.open("IterDeadlockTest"); + store1 = openDb("store1.db"); + store2 = openDb("store2.db"); + map1 = new StoredSortedMap(store1, binding, binding, true); + map2 = new StoredSortedMap(store2, binding, binding, true); + } + + @After + public void tearDown() { + + if (store1 != null) { + try { + store1.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (store2 != null) { + try { + store2.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + /* Allow GC of DB objects in the test case. */ + env = null; + store1 = null; + store2 = null; + map1 = null; + map2 = null; + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(true); + config.setAllowCreate(true); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + @Test + public void testIterDeadlock() + throws Exception { + + final Object parent = new Object(); + final Object child1 = new Object(); + final Object child2 = new Object(); + final TransactionRunner runner = new TransactionRunner(env); + runner.setMaxRetries(0); + + /* Write a record in each db. */ + runner.run(new TransactionWorker() { + public void doWork() { + assertNull(map1.put(ONE, ONE)); + assertNull(map2.put(ONE, ONE)); + } + }); + + /* + * A thread to open iterator 1, then wait to be notified, then open + * iterator 2. + */ + final Thread thread1 = new Thread(new Runnable() { + public void run() { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + synchronized (child1) { + ListIterator i1 = + (ListIterator) map1.values().iterator(); + i1.next(); + i1.set(ONE); /* Write lock. */ + StoredIterator.close(i1); + synchronized (parent) { parent.notify(); } + child1.wait(); + Iterator i2 = map2.values().iterator(); + assertTrue(i2.hasNext()); + StoredIterator.close(i2); + } + } + }); + } catch (LockConflictException expected) { + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + /* + * A thread to open iterator 2, then wait to be notified, then open + * iterator 1. + */ + final Thread thread2 = new Thread(new Runnable() { + public void run() { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + synchronized (child2) { + ListIterator i2 = + (ListIterator) map2.values().iterator(); + i2.next(); + i2.set(ONE); /* Write lock. */ + StoredIterator.close(i2); + synchronized (parent) { parent.notify(); } + child2.wait(); + Iterator i1 = map1.values().iterator(); + assertTrue(i1.hasNext()); + StoredIterator.close(i1); + } + } + }); + } catch (LockConflictException expected) { + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + } + }); + + /* + * Open iterator 1 in thread 1, then iterator 2 in thread 2, then let + * the threads run to open the other iterators and cause a deadlock. + */ + synchronized (parent) { + thread1.start(); + parent.wait(); + thread2.start(); + parent.wait(); + synchronized (child1) { child1.notify(); } + synchronized (child2) { child2.notify(); } + thread1.join(); + thread2.join(); + } + + /* + * Before the fix for [#10516] we would get an exception indicating + * that cursors were not closed, when closing the stores below. + */ + store1.close(); + store1 = null; + store2.close(); + store2 = null; + env.close(); + env = null; + } +} diff --git a/test/com/sleepycat/collections/test/IterRepositionTest.java b/test/com/sleepycat/collections/test/IterRepositionTest.java new file mode 100644 index 0000000..6fa749c --- /dev/null +++ b/test/com/sleepycat/collections/test/IterRepositionTest.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; + +import java.util.Iterator; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.StoredValueSet; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * In general the BlockIterator (default iterator() of the collections API) + * is tested by CollectionTest. Additional testing for repositioning is here. + * The BlockIterator does not keep an open cursor or hold locks, it caches a + * block of records at a time. When reaching the end of the block it must + * reposition to the next record, which is a little tricky in JE and deserves + * special testing. + */ +public class IterRepositionTest extends TestBase { + + private Environment env; + + @Before + public void setUp() throws Exception { + super.setUp(); + env = TestEnv.TXN.open("IterRepositionTest"); + } + + @After + public void tearDown() throws Exception { + env.close(); + super.tearDown(); + } + + /** + * Tests a bug when the BlockIterator repositioned at an earlier key when + * the last key in the previous block was deleted. + */ + @Test + public void testDeleteLastKeyBug() throws Exception { + + final Database db = openDb("foo"); + + final EntryBinding binding = new IntegerBinding(); + + final StoredSortedMap map = new StoredSortedMap<>( + db, binding, binding, true); + + final StoredValueSet set = + (StoredValueSet) map.values(); + + /* + * Use a block size of 5 and fill one block. + */ + set.setIteratorBlockSize(5); + + for (int i = 1; i <= 5; i += 1) { + assertNull(map.put(0, i)); + } + + /* + * Move iterator to last (5th) record. + */ + final Iterator iter = set.iterator(); + for (Integer i = 1; i <= 5; i += 1) { + assertEquals(i, iter.next()); + } + + /* + * Delete the last (5th) record using a different iterator. + */ + final Iterator deleteIter = set.iterator(); + for (Integer i = 1; i <= 5; i += 1) { + assertEquals(i, deleteIter.next()); + } + deleteIter.remove(); + + /* + * Prior to the bug fix, hasNext below returned true because the + * DataCursor.repositionRange method positioned to an earlier record. + */ + assertFalse(iter.hasNext()); + + db.close(); + } + + private Database openDb(final String file) + throws Exception { + + final DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setSortedDuplicates(true); + config.setAllowCreate(true); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } +} diff --git a/test/com/sleepycat/collections/test/JoinTest.java b/test/com/sleepycat/collections/test/JoinTest.java new file mode 100644 index 0000000..3c23b83 --- /dev/null +++ b/test/com/sleepycat/collections/test/JoinTest.java @@ -0,0 +1,223 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.test.MarshalledObject; +import com.sleepycat.collections.StoredCollection; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +public class JoinTest extends TestBase + implements TransactionWorker { + + private static final String MATCH_DATA = "d4"; // matches both keys = "yes" + private static final String MATCH_KEY = "k4"; // matches both keys = "yes" + private static final String[] VALUES = {"yes", "yes"}; + + private Environment env; + private TransactionRunner runner; + private StoredClassCatalog catalog; + private TupleSerialFactory factory; + private Database store; + private SecondaryDatabase index1; + private SecondaryDatabase index2; + private StoredMap storeMap; + private StoredMap indexMap1; + private StoredMap indexMap2; + + public JoinTest() { + customName = "JoinTest"; + } + + @Before + public void setUp() + throws Exception { + + SharedTestUtils.printTestName(customName); + env = TestEnv.TXN.open(customName); + runner = new TransactionRunner(env); + createDatabase(); + } + + @After + public void tearDown() { + + try { + if (index1 != null) { + index1.close(); + } + if (index2 != null) { + index2.close(); + } + if (store != null) { + store.close(); + } + if (catalog != null) { + catalog.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + index1 = null; + index2 = null; + store = null; + catalog = null; + env = null; + runner = null; + factory = null; + storeMap = null; + indexMap1 = null; + indexMap2 = null; + } + } + + @Test + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() { + createViews(); + writeAndRead(); + } + + private void createDatabase() + throws Exception { + + catalog = new StoredClassCatalog(openDb("catalog.db")); + factory = new TupleSerialFactory(catalog); + assertSame(catalog, factory.getCatalog()); + + store = openDb("store.db"); + index1 = openSecondaryDb(store, "index1.db", "1"); + index2 = openSecondaryDb(store, "index2.db", "2"); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(true); + config.setAllowCreate(true); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + private SecondaryDatabase openSecondaryDb(Database primary, + String file, + String keyName) + throws Exception { + + SecondaryConfig secConfig = new SecondaryConfig(); + DbCompat.setTypeBtree(secConfig); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + DbCompat.setSortedDuplicates(secConfig, true); + secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class, + keyName)); + + return DbCompat.testOpenSecondaryDatabase + (env, null, file, null, primary, secConfig); + } + + private void createViews() { + storeMap = factory.newMap(store, String.class, + MarshalledObject.class, true); + indexMap1 = factory.newMap(index1, String.class, + MarshalledObject.class, true); + indexMap2 = factory.newMap(index2, String.class, + MarshalledObject.class, true); + } + + private void writeAndRead() { + // write records: Data, PrimaryKey, IndexKey1, IndexKey2 + assertNull(storeMap.put(null, + new MarshalledObject("d1", "k1", "no", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d2", "k2", "no", "no"))); + assertNull(storeMap.put(null, + new MarshalledObject("d3", "k3", "no", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d4", "k4", "yes", "yes"))); + assertNull(storeMap.put(null, + new MarshalledObject("d5", "k5", "yes", "no"))); + + Object o; + Map.Entry e; + + // join values with index maps + o = doJoin((StoredCollection) storeMap.values()); + assertEquals(MATCH_DATA, ((MarshalledObject) o).getData()); + + // join keySet with index maps + o = doJoin((StoredCollection) storeMap.keySet()); + assertEquals(MATCH_KEY, o); + + // join entrySet with index maps + o = doJoin((StoredCollection) storeMap.entrySet()); + e = (Map.Entry) o; + assertEquals(MATCH_KEY, e.getKey()); + assertEquals(MATCH_DATA, ((MarshalledObject) e.getValue()).getData()); + } + + private Object doJoin(StoredCollection coll) { + + StoredContainer[] indices = { indexMap1, indexMap2 }; + StoredIterator i = coll.join(indices, VALUES, null); + try { + assertTrue(i.hasNext()); + Object result = i.next(); + assertNotNull(result); + assertFalse(i.hasNext()); + return result; + } finally { i.close(); } + } +} diff --git a/test/com/sleepycat/collections/test/NullTransactionRunner.java b/test/com/sleepycat/collections/test/NullTransactionRunner.java new file mode 100644 index 0000000..51f20bd --- /dev/null +++ b/test/com/sleepycat/collections/test/NullTransactionRunner.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.Environment; +import com.sleepycat.util.ExceptionUnwrapper; + +class NullTransactionRunner extends TransactionRunner { + + NullTransactionRunner(Environment env) { + + super(env); + } + + public void run(TransactionWorker worker) + throws Exception { + + try { + worker.doWork(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } +} diff --git a/test/com/sleepycat/collections/test/NullValueTest.java b/test/com/sleepycat/collections/test/NullValueTest.java new file mode 100644 index 0000000..f02d522 --- /dev/null +++ b/test/com/sleepycat/collections/test/NullValueTest.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.serial.ClassCatalog; +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.TupleSerialBinding; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Unit test for [#19085]. The collections API supports storing and retrieving + * null values, as long as the value binding supports null values. + */ +public class NullValueTest extends TestBase + implements TransactionWorker { + + private Environment env; + private ClassCatalog catalog; + private Database db; + private TransactionRunner runner; + + public NullValueTest() { + + customName = "NullValueTest"; + } + + @Before + public void setUp() + throws Exception { + + SharedTestUtils.printTestName(customName); + env = TestEnv.TXN.open(customName); + runner = new TransactionRunner(env); + open(); + } + + @After + public void tearDown() { + if (catalog != null) { + try { + catalog.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (db != null) { + try { + db.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + catalog = null; + db = null; + env = null; + } + + private void open() + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + DbCompat.setTypeBtree(dbConfig); + + Database catalogDb = DbCompat.testOpenDatabase(env, null, "catalog", + null, dbConfig); + catalog = new StoredClassCatalog(catalogDb); + + db = DbCompat.testOpenDatabase(env, null, "test", null, dbConfig); + } + + @Test + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() { + expectSuccessWithBindingThatDoesSupportNull(); + expectExceptionWithBindingThatDoesNotSupportNull(); + expectExceptionWithWithEntityBinding(); + } + + private void expectSuccessWithBindingThatDoesSupportNull() { + + /* Pass null for baseClass to support null values. */ + final EntryBinding dataBinding = + new SerialBinding(catalog, null /*baseClass*/); + + final StoredMap map = new StoredMap + (db, new IntegerBinding(), dataBinding, true); + + /* Store a null value.*/ + map.put(1, null); + + /* Get the null value. */ + assertNull(map.get(1)); + + for (String value : map.values()) { + assertNull(value); + } + + for (Map.Entry entry : map.entrySet()) { + assertEquals(Integer.valueOf(1), entry.getKey()); + assertNull(entry.getValue()); + } + + map.remove(1); + } + + private void expectExceptionWithBindingThatDoesNotSupportNull() { + + /* Pass non-null for baseClass, null values will not be allowed. */ + final EntryBinding dataBinding = + new SerialBinding(catalog, String.class /*baseClass*/); + + final StoredMap map = new StoredMap + (db, new IntegerBinding(), dataBinding, true); + + try { + map.put(1, null); + fail(); + } catch (IllegalArgumentException expected) { + } + } + + public void expectExceptionWithWithEntityBinding() { + + final EntityBinding entityBinding = + new MyEntityBinding(catalog); + + final StoredMap map = + new StoredMap + (db, new IntegerBinding(), entityBinding, true); + + try { + map.put(1, null); + fail(); + } catch (IllegalArgumentException expected) { + } + } + + static class MyEntity { + int key; + String data; + } + + static class MyEntityBinding extends TupleSerialBinding { + + MyEntityBinding(ClassCatalog catalog) { + super(catalog, String.class); + } + + public MyEntity entryToObject(TupleInput keyInput, String data) { + final MyEntity entity = new MyEntity(); + entity.key = keyInput.readInt(); + entity.data = data; + return entity; + } + + public void objectToKey(MyEntity entity, TupleOutput keyOutput) { + keyOutput.writeInt(entity.key); + } + + public String objectToData(MyEntity entity) { + return entity.data; + } + } +} diff --git a/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java b/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java new file mode 100644 index 0000000..13f5d4b --- /dev/null +++ b/test/com/sleepycat/collections/test/SecondaryDeadlockTest.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.Database; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Tests whether secondary access can cause a self-deadlock when reading via a + * secondary because the collections API secondary implementation in DB 4.2 + * opens two cursors. Part of the problem in [#10516] was because the + * secondary get() was not done in a txn. This problem should not occur in DB + * 4.3 and JE -- an ordinary deadlock occurs instead and is detected. + * + * @author Mark Hayes + */ +public class SecondaryDeadlockTest extends TestBase { + + private static final Long N_ONE = new Long(1); + private static final Long N_101 = new Long(101); + private static final int N_ITERS = 20; + private static final int MAX_RETRIES = 1000; + + private Environment env; + private Database store; + private Database index; + private StoredSortedMap storeMap; + private StoredSortedMap indexMap; + private Exception exception; + + public SecondaryDeadlockTest() { + + customName = "SecondaryDeadlockTest"; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + env = TestEnv.TXN.open("SecondaryDeadlockTest"); + store = TestStore.BTREE_UNIQ.open(env, "store.db"); + index = TestStore.BTREE_UNIQ.openIndex(store, "index.db"); + storeMap = new StoredSortedMap(store, + TestStore.BTREE_UNIQ.getKeyBinding(), + TestStore.BTREE_UNIQ.getValueBinding(), + true); + indexMap = new StoredSortedMap(index, + TestStore.BTREE_UNIQ.getKeyBinding(), + TestStore.BTREE_UNIQ.getValueBinding(), + true); + } + + @After + public void tearDown() { + + if (index != null) { + try { + index.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (store != null) { + try { + store.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } + } + /* Allow GC of DB objects in the test case. */ + env = null; + store = null; + index = null; + storeMap = null; + indexMap = null; + } + + @Test + public void testSecondaryDeadlock() + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setMaxRetries(MAX_RETRIES); + + /* + * This test deadlocks a lot at degree 3 serialization. In debugging + * this I discovered it was not due to phantom prevention per se but + * just to a change in timing. + */ + TransactionConfig txnConfig = new TransactionConfig(); + runner.setTransactionConfig(txnConfig); + + /* + * A thread to do put() and delete() via the primary, which will lock + * the primary first then the secondary. Uses transactions. + */ + final Thread thread1 = new Thread(new Runnable() { + public void run() { + try { + /* The TransactionRunner performs retries. */ + for (int i = 0; i < N_ITERS; i +=1 ) { + runner.run(new TransactionWorker() { + public void doWork() { + assertEquals(null, storeMap.put(N_ONE, N_101)); + } + }); + runner.run(new TransactionWorker() { + public void doWork() { + assertEquals(N_101, storeMap.remove(N_ONE)); + } + }); + } + } catch (Exception e) { + e.printStackTrace(); + exception = e; + } + } + }, "ThreadOne"); + + /* + * A thread to get() via the secondary, which will lock the secondary + * first then the primary. Does not use a transaction. + */ + final Thread thread2 = new Thread(new Runnable() { + public void run() { + try { + for (int i = 0; i < N_ITERS; i +=1 ) { + for (int j = 0; j < MAX_RETRIES; j += 1) { + try { + Object value = indexMap.get(N_ONE); + assertTrue(value == null || + N_101.equals(value)); + break; + } catch (Exception e) { + e = ExceptionUnwrapper.unwrap(e); + if (e instanceof LockConflictException) { + continue; /* Retry on deadlock. */ + } else { + throw e; + } + } + } + } + } catch (Exception e) { + e.printStackTrace(); + exception = e; + } + } + }, "ThreadTwo"); + + thread1.start(); + thread2.start(); + thread1.join(); + thread2.join(); + + index.close(); + index = null; + store.close(); + store = null; + env.close(); + env = null; + + if (exception != null) { + fail(exception.toString()); + } + } +} diff --git a/test/com/sleepycat/collections/test/TestDataBinding.java b/test/com/sleepycat/collections/test/TestDataBinding.java new file mode 100644 index 0000000..081d994 --- /dev/null +++ b/test/com/sleepycat/collections/test/TestDataBinding.java @@ -0,0 +1,38 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * @author Mark Hayes + */ +class TestDataBinding implements EntryBinding { + + public Object entryToObject(DatabaseEntry data) { + + if (data.getSize() != 1) { + throw new IllegalStateException("size=" + data.getSize()); + } + byte val = data.getData()[data.getOffset()]; + return new Long(val); + } + + public void objectToEntry(Object object, DatabaseEntry data) { + + byte val = ((Number) object).byteValue(); + data.setData(new byte[] { val }, 0, 1); + } +} diff --git a/test/com/sleepycat/collections/test/TestEntity.java b/test/com/sleepycat/collections/test/TestEntity.java new file mode 100644 index 0000000..08d431c --- /dev/null +++ b/test/com/sleepycat/collections/test/TestEntity.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +/** + * @author Mark Hayes + */ +class TestEntity { + + int key; + int value; + + TestEntity(int key, int value) { + + this.key = key; + this.value = value; + } + + public boolean equals(Object o) { + + try { + TestEntity e = (TestEntity) o; + return e.key == key && e.value == value; + } catch (ClassCastException e) { + return false; + } + } + + public int hashCode() { + + return key; + } + + public String toString() { + + return "[key " + key + " value " + value + ']'; + } +} diff --git a/test/com/sleepycat/collections/test/TestEntityBinding.java b/test/com/sleepycat/collections/test/TestEntityBinding.java new file mode 100644 index 0000000..0752836 --- /dev/null +++ b/test/com/sleepycat/collections/test/TestEntityBinding.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * @author Mark Hayes + */ +class TestEntityBinding implements EntityBinding { + + private boolean isRecNum; + + TestEntityBinding(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public Object entryToObject(DatabaseEntry key, DatabaseEntry value) { + + byte keyByte; + if (isRecNum) { + if (key.getSize() != 4) { + throw new IllegalStateException(); + } + keyByte = (byte) RecordNumberBinding.entryToRecordNumber(key); + } else { + if (key.getSize() != 1) { + throw new IllegalStateException(); + } + keyByte = key.getData()[key.getOffset()]; + } + if (value.getSize() != 1) { + throw new IllegalStateException(); + } + byte valByte = value.getData()[value.getOffset()]; + return new TestEntity(keyByte, valByte); + } + + public void objectToKey(Object object, DatabaseEntry key) { + + byte val = (byte) ((TestEntity) object).key; + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(val, key); + } else { + key.setData(new byte[] { val }, 0, 1); + } + } + + public void objectToData(Object object, DatabaseEntry value) { + + byte val = (byte) ((TestEntity) object).value; + value.setData(new byte[] { val }, 0, 1); + } +} diff --git a/test/com/sleepycat/collections/test/TestKeyAssigner.java b/test/com/sleepycat/collections/test/TestKeyAssigner.java new file mode 100644 index 0000000..9a85b2b --- /dev/null +++ b/test/com/sleepycat/collections/test/TestKeyAssigner.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.collections.PrimaryKeyAssigner; +import com.sleepycat.je.DatabaseEntry; + +/** + * @author Mark Hayes + */ +class TestKeyAssigner implements PrimaryKeyAssigner { + + private byte next = 1; + private final boolean isRecNum; + + TestKeyAssigner(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public void assignKey(DatabaseEntry keyData) { + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(next, keyData); + } else { + keyData.setData(new byte[] { next }, 0, 1); + } + next += 1; + } + + void reset() { + + next = 1; + } +} diff --git a/test/com/sleepycat/collections/test/TestKeyCreator.java b/test/com/sleepycat/collections/test/TestKeyCreator.java new file mode 100644 index 0000000..e5fa96d --- /dev/null +++ b/test/com/sleepycat/collections/test/TestKeyCreator.java @@ -0,0 +1,61 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; + +/** + * Unused until secondaries are available. + * @author Mark Hayes + */ +class TestKeyCreator implements SecondaryKeyCreator { + + private final boolean isRecNum; + + TestKeyCreator(boolean isRecNum) { + + this.isRecNum = isRecNum; + } + + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry primaryKeyData, + DatabaseEntry valueData, + DatabaseEntry indexKeyData) { + if (valueData.getSize() == 0) { + return false; + } + if (valueData.getSize() != 1) { + throw new IllegalStateException(); + } + byte val = valueData.getData()[valueData.getOffset()]; + if (val == 0) { + return false; // fixed-len pad value + } + val -= 100; + if (isRecNum) { + RecordNumberBinding.recordNumberToEntry(val, indexKeyData); + } else { + indexKeyData.setData(new byte[] { val }, 0, 1); + } + return true; + } + + public void clearIndexKey(DatabaseEntry valueData) { + + throw new RuntimeException("not supported"); + } +} diff --git a/test/com/sleepycat/collections/test/TestSR15721.java b/test/com/sleepycat/collections/test/TestSR15721.java new file mode 100644 index 0000000..8e78e16 --- /dev/null +++ b/test/com/sleepycat/collections/test/TestSR15721.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Chao Huang + */ +public class TestSR15721 extends TestBase { + + private Environment env; + private CurrentTransaction currentTxn; + + @Before + public void setUp() + throws Exception { + + env = TestEnv.TXN.open("TestSR15721"); + currentTxn = CurrentTransaction.getInstance(env); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + env = null; + currentTxn = null; + } + } + + /** + * Tests that the CurrentTransaction instance doesn't indeed allow GC to + * reclaim while attached environment is open. [#15721] + */ + @Test + public void testSR15721Fix() + throws Exception { + + int hash = currentTxn.hashCode(); + int hash2 = -1; + + currentTxn = CurrentTransaction.getInstance(env); + hash2 = currentTxn.hashCode(); + assertTrue(hash == hash2); + + currentTxn.beginTransaction(null); + currentTxn = null; + hash2 = -1; + + for (int i = 0; i < 10; i += 1) { + byte[] x = null; + try { + x = new byte[Integer.MAX_VALUE - 1]; + fail(); + } catch (OutOfMemoryError expected) { + } + assertNull(x); + + System.gc(); + } + + currentTxn = CurrentTransaction.getInstance(env); + hash2 = currentTxn.hashCode(); + currentTxn.commitTransaction(); + + assertTrue(hash == hash2); + } +} diff --git a/test/com/sleepycat/collections/test/TestStore.java b/test/com/sleepycat/collections/test/TestStore.java new file mode 100644 index 0000000..6148d07 --- /dev/null +++ b/test/com/sleepycat/collections/test/TestStore.java @@ -0,0 +1,284 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.bind.EntityBinding; +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.bind.RecordNumberBinding; +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.SecondaryConfig; + +/** + * @author Mark Hayes + */ +class TestStore { + + static final TestKeyCreator BYTE_EXTRACTOR = new TestKeyCreator(false); + static final TestKeyCreator RECNO_EXTRACTOR = new TestKeyCreator(true); + static final EntryBinding VALUE_BINDING = new TestDataBinding(); + static final EntryBinding BYTE_KEY_BINDING = VALUE_BINDING; + static final EntryBinding RECNO_KEY_BINDING = new RecordNumberBinding(); + static final EntityBinding BYTE_ENTITY_BINDING = + new TestEntityBinding(false); + static final EntityBinding RECNO_ENTITY_BINDING = + new TestEntityBinding(true); + static final TestKeyAssigner BYTE_KEY_ASSIGNER = + new TestKeyAssigner(false); + static final TestKeyAssigner RECNO_KEY_ASSIGNER = + new TestKeyAssigner(true); + + static final TestStore BTREE_UNIQ; + static final TestStore BTREE_DUP; + static final TestStore BTREE_DUPSORT; + static final TestStore BTREE_RECNUM; + static final TestStore HASH_UNIQ; + static final TestStore HASH_DUP; + static final TestStore HASH_DUPSORT; + static final TestStore QUEUE; + static final TestStore RECNO; + static final TestStore RECNO_RENUM; + + static final TestStore[] ALL; + static { + List list = new ArrayList(); + SecondaryConfig config; + + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + BTREE_UNIQ = new TestStore("btree-uniq", config); + BTREE_UNIQ.indexOf = BTREE_UNIQ; + list.add(BTREE_UNIQ); + + if (DbCompat.INSERTION_ORDERED_DUPLICATES) { + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setUnsortedDuplicates(config, true); + BTREE_DUP = new TestStore("btree-dup", config); + BTREE_DUP.indexOf = null; // indexes must use sorted dups + list.add(BTREE_DUP); + } else { + BTREE_DUP = null; + } + + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setSortedDuplicates(config, true); + BTREE_DUPSORT = new TestStore("btree-dupsort", config); + BTREE_DUPSORT.indexOf = BTREE_UNIQ; + list.add(BTREE_DUPSORT); + + if (DbCompat.BTREE_RECNUM_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeBtree(config); + DbCompat.setBtreeRecordNumbers(config, true); + BTREE_RECNUM = new TestStore("btree-recnum", config); + BTREE_RECNUM.indexOf = BTREE_RECNUM; + list.add(BTREE_RECNUM); + } else { + BTREE_RECNUM = null; + } + + if (DbCompat.HASH_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + HASH_UNIQ = new TestStore("hash-uniq", config); + HASH_UNIQ.indexOf = HASH_UNIQ; + list.add(HASH_UNIQ); + + if (DbCompat.INSERTION_ORDERED_DUPLICATES) { + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + DbCompat.setUnsortedDuplicates(config, true); + HASH_DUP = new TestStore("hash-dup", config); + HASH_DUP.indexOf = null; // indexes must use sorted dups + list.add(HASH_DUP); + } else { + HASH_DUP = null; + } + + config = new SecondaryConfig(); + DbCompat.setTypeHash(config); + DbCompat.setSortedDuplicates(config, true); + HASH_DUPSORT = new TestStore("hash-dupsort", config); + HASH_DUPSORT.indexOf = HASH_UNIQ; + list.add(HASH_DUPSORT); + } else { + HASH_UNIQ = null; + HASH_DUP = null; + HASH_DUPSORT = null; + } + + if (DbCompat.QUEUE_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeQueue(config); + QUEUE = new TestStore("queue", config); + QUEUE.indexOf = QUEUE; + list.add(QUEUE); + } else { + QUEUE = null; + } + + if (DbCompat.RECNO_METHOD) { + config = new SecondaryConfig(); + DbCompat.setTypeRecno(config); + RECNO = new TestStore("recno", config); + RECNO.indexOf = RECNO; + list.add(RECNO); + + config = new SecondaryConfig(); + DbCompat.setTypeRecno(config); + DbCompat.setRenumbering(config, true); + RECNO_RENUM = new TestStore("recno-renum", config); + RECNO_RENUM.indexOf = null; // indexes must have stable keys + list.add(RECNO_RENUM); + } else { + RECNO = null; + RECNO_RENUM = null; + } + + ALL = new TestStore[list.size()]; + list.toArray(ALL); + } + + private String name; + private SecondaryConfig config; + private TestStore indexOf; + private boolean isRecNumFormat; + + private TestStore(String name, SecondaryConfig config) { + + this.name = name; + this.config = config; + + isRecNumFormat = isQueueOrRecno() || + (DbCompat.isTypeBtree(config) && + DbCompat.getBtreeRecordNumbers(config)); + } + + EntryBinding getValueBinding() { + + return VALUE_BINDING; + } + + EntryBinding getKeyBinding() { + + return isRecNumFormat ? RECNO_KEY_BINDING : BYTE_KEY_BINDING; + } + + EntityBinding getEntityBinding() { + + return isRecNumFormat ? RECNO_ENTITY_BINDING : BYTE_ENTITY_BINDING; + } + + TestKeyAssigner getKeyAssigner() { + + if (isQueueOrRecno()) { + return null; + } else { + if (isRecNumFormat) { + return RECNO_KEY_ASSIGNER; + } else { + return BYTE_KEY_ASSIGNER; + } + } + } + + String getName() { + + return name; + } + + boolean isOrdered() { + + return !DbCompat.isTypeHash(config); + } + + boolean isQueueOrRecno() { + + return DbCompat.isTypeQueue(config) || DbCompat.isTypeRecno(config); + } + + boolean areKeyRangesAllowed() { + return isOrdered() && !isQueueOrRecno(); + } + + boolean areDuplicatesAllowed() { + + return DbCompat.getSortedDuplicates(config) || + DbCompat.getUnsortedDuplicates(config); + } + + boolean hasRecNumAccess() { + + return isRecNumFormat; + } + + boolean areKeysRenumbered() { + + return hasRecNumAccess() && + (DbCompat.isTypeBtree(config) || + DbCompat.getRenumbering(config)); + } + + TestStore getIndexOf() { + + return DbCompat.SECONDARIES ? indexOf : null; + } + + Database open(Environment env, String fileName) + throws DatabaseException { + + int fixedLen = (isQueueOrRecno() ? 1 : 0); + return openDb(env, fileName, fixedLen, null); + } + + Database openIndex(Database primary, String fileName) + throws DatabaseException { + + int fixedLen = (isQueueOrRecno() ? 4 : 0); + config.setKeyCreator(isRecNumFormat ? RECNO_EXTRACTOR + : BYTE_EXTRACTOR); + Environment env = primary.getEnvironment(); + return openDb(env, fileName, fixedLen, primary); + } + + private Database openDb(Environment env, String fileName, int fixedLen, + Database primary) + throws DatabaseException { + + if (fixedLen > 0) { + DbCompat.setRecordLength(config, fixedLen); + DbCompat.setRecordPad(config, 0); + } else { + DbCompat.setRecordLength(config, 0); + } + config.setAllowCreate(true); + DbCompat.setReadUncommitted(config, true); + config.setTransactional(CurrentTransaction.getInstance(env) != null); + if (primary != null) { + return DbCompat.testOpenSecondaryDatabase + (env, null, fileName, null, primary, config); + } else { + return DbCompat.testOpenDatabase + (env, null, fileName, null, config); + } + } +} diff --git a/test/com/sleepycat/collections/test/TransactionTest.java b/test/com/sleepycat/collections/test/TransactionTest.java new file mode 100644 index 0000000..77c10c6 --- /dev/null +++ b/test/com/sleepycat/collections/test/TransactionTest.java @@ -0,0 +1,996 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.Iterator; +import java.util.List; +import java.util.SortedSet; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.StoredCollections; +import com.sleepycat.collections.StoredContainer; +import com.sleepycat.collections.StoredIterator; +import com.sleepycat.collections.StoredList; +import com.sleepycat.collections.StoredSortedMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +public class TransactionTest extends TestBase { + + private static final Long ONE = new Long(1); + private static final Long TWO = new Long(2); + private static final Long THREE = new Long(3); + + + private Environment env; + private CurrentTransaction currentTxn; + private Database store; + private StoredSortedMap map; + private TestStore testStore = TestStore.BTREE_UNIQ; + + public TransactionTest() { + + customName = "TransactionTest"; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + env = TestEnv.TXN.open("TransactionTests"); + currentTxn = CurrentTransaction.getInstance(env); + store = testStore.open(env, dbName(0)); + map = new StoredSortedMap(store, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + } + + @After + public void tearDown() { + + try { + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + store = null; + env = null; + currentTxn = null; + map = null; + testStore = null; + } + } + + private String dbName(int i) { + + return "txn-test-" + i; + } + + @Test + public void testGetters() + throws Exception { + + assertNotNull(env); + assertNotNull(currentTxn); + assertNull(currentTxn.getTransaction()); + + currentTxn.beginTransaction(null); + assertNotNull(currentTxn.getTransaction()); + currentTxn.commitTransaction(); + assertNull(currentTxn.getTransaction()); + + currentTxn.beginTransaction(null); + assertNotNull(currentTxn.getTransaction()); + currentTxn.abortTransaction(); + assertNull(currentTxn.getTransaction()); + + // read-uncommitted property should be inherited + + assertTrue(!isReadUncommitted(map)); + assertTrue(!isReadUncommitted(map.values())); + assertTrue(!isReadUncommitted(map.keySet())); + assertTrue(!isReadUncommitted(map.entrySet())); + + StoredSortedMap other = (StoredSortedMap) + StoredCollections.configuredMap + (map, CursorConfig.READ_UNCOMMITTED); + assertTrue(isReadUncommitted(other)); + assertTrue(isReadUncommitted(other.values())); + assertTrue(isReadUncommitted(other.keySet())); + assertTrue(isReadUncommitted(other.entrySet())); + assertTrue(!isReadUncommitted(map)); + assertTrue(!isReadUncommitted(map.values())); + assertTrue(!isReadUncommitted(map.keySet())); + assertTrue(!isReadUncommitted(map.entrySet())); + + // read-committed property should be inherited + + assertTrue(!isReadCommitted(map)); + assertTrue(!isReadCommitted(map.values())); + assertTrue(!isReadCommitted(map.keySet())); + assertTrue(!isReadCommitted(map.entrySet())); + + other = (StoredSortedMap) + StoredCollections.configuredMap + (map, CursorConfig.READ_COMMITTED); + assertTrue(isReadCommitted(other)); + assertTrue(isReadCommitted(other.values())); + assertTrue(isReadCommitted(other.keySet())); + assertTrue(isReadCommitted(other.entrySet())); + assertTrue(!isReadCommitted(map)); + assertTrue(!isReadCommitted(map.values())); + assertTrue(!isReadCommitted(map.keySet())); + assertTrue(!isReadCommitted(map.entrySet())); + } + + @Test + public void testTransactional() + throws Exception { + + // is transactional because DB_AUTO_COMMIT was passed to + // Database.open() + // + assertTrue(map.isTransactional()); + store.close(); + store = null; + + // is not transactional + // + DatabaseConfig dbConfig = new DatabaseConfig(); + DbCompat.setTypeBtree(dbConfig); + dbConfig.setAllowCreate(true); + Database db = DbCompat.testOpenDatabase + (env, null, dbName(1), null, dbConfig); + map = new StoredSortedMap(db, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + assertTrue(!map.isTransactional()); + map.put(ONE, ONE); + readCheck(map, ONE, ONE); + db.close(); + + // is transactional + // + dbConfig.setTransactional(true); + currentTxn.beginTransaction(null); + db = DbCompat.testOpenDatabase + (env, currentTxn.getTransaction(), dbName(2), null, dbConfig); + currentTxn.commitTransaction(); + map = new StoredSortedMap(db, testStore.getKeyBinding(), + testStore.getValueBinding(), true); + assertTrue(map.isTransactional()); + currentTxn.beginTransaction(null); + map.put(ONE, ONE); + readCheck(map, ONE, ONE); + currentTxn.commitTransaction(); + db.close(); + } + + @Test + public void testExceptions() + throws Exception { + + try { + currentTxn.commitTransaction(); + fail(); + } catch (IllegalStateException expected) {} + + try { + currentTxn.abortTransaction(); + fail(); + } catch (IllegalStateException expected) {} + } + + @Test + public void testNested() + throws Exception { + + if (!DbCompat.NESTED_TRANSACTIONS) { + return; + } + assertNull(currentTxn.getTransaction()); + + Transaction txn1 = currentTxn.beginTransaction(null); + assertNotNull(txn1); + assertTrue(txn1 == currentTxn.getTransaction()); + + assertNull(map.get(ONE)); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + Transaction txn2 = currentTxn.beginTransaction(null); + assertNotNull(txn2); + assertTrue(txn2 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + + Transaction txn3 = currentTxn.beginTransaction(null); + assertNotNull(txn3); + assertTrue(txn3 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + assertTrue(txn1 != txn3); + assertTrue(txn2 != txn3); + + assertNull(map.put(THREE, THREE)); + assertEquals(THREE, map.get(THREE)); + + Transaction txn = currentTxn.abortTransaction(); + assertTrue(txn == txn2); + assertTrue(txn == currentTxn.getTransaction()); + assertNull(map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + + txn3 = currentTxn.beginTransaction(null); + assertNotNull(txn3); + assertTrue(txn3 == currentTxn.getTransaction()); + assertTrue(txn1 != txn2); + assertTrue(txn1 != txn3); + assertTrue(txn2 != txn3); + + assertNull(map.put(THREE, THREE)); + assertEquals(THREE, map.get(THREE)); + + txn = currentTxn.commitTransaction(); + assertTrue(txn == txn2); + assertTrue(txn == currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + + txn = currentTxn.commitTransaction(); + assertTrue(txn == txn1); + assertTrue(txn == currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + + txn = currentTxn.commitTransaction(); + assertNull(txn); + assertNull(currentTxn.getTransaction()); + assertEquals(THREE, map.get(THREE)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + } + + @Test + public void testRunnerCommit() + throws Exception { + + commitTest(false); + } + + @Test + public void testExplicitCommit() + throws Exception { + + commitTest(true); + } + + private void commitTest(final boolean explicit) + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS); + + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn1 = currentTxn.getTransaction(); + assertNotNull(txn1); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn2 = currentTxn.getTransaction(); + assertNotNull(txn2); + if (DbCompat.NESTED_TRANSACTIONS) { + assertTrue(txn1 != txn2); + } else { + assertTrue(txn1 == txn2); + } + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + if (DbCompat.NESTED_TRANSACTIONS && explicit) { + currentTxn.commitTransaction(); + } + } + }); + + Transaction txn3 = currentTxn.getTransaction(); + assertSame(txn1, txn3); + + assertEquals(TWO, map.get(TWO)); + assertEquals(ONE, map.get(ONE)); + } + }); + + assertNull(currentTxn.getTransaction()); + } + + @Test + public void testRunnerAbort() + throws Exception { + + abortTest(false); + } + + @Test + public void testExplicitAbort() + throws Exception { + + abortTest(true); + } + + private void abortTest(final boolean explicit) + throws Exception { + + final TransactionRunner runner = new TransactionRunner(env); + runner.setAllowNestedTransactions(DbCompat.NESTED_TRANSACTIONS); + + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn1 = currentTxn.getTransaction(); + assertNotNull(txn1); + assertNull(map.put(ONE, ONE)); + assertEquals(ONE, map.get(ONE)); + + if (DbCompat.NESTED_TRANSACTIONS) { + try { + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + final Transaction txn2 = + currentTxn.getTransaction(); + assertNotNull(txn2); + assertTrue(txn1 != txn2); + assertNull(map.put(TWO, TWO)); + assertEquals(TWO, map.get(TWO)); + if (explicit) { + currentTxn.abortTransaction(); + } else { + throw new IllegalArgumentException( + "test-abort"); + } + } + }); + assertTrue(explicit); + } catch (IllegalArgumentException e) { + assertTrue(!explicit); + assertEquals("test-abort", e.getMessage()); + } + } + + Transaction txn3 = currentTxn.getTransaction(); + assertSame(txn1, txn3); + + assertEquals(ONE, map.get(ONE)); + assertNull(map.get(TWO)); + } + }); + + assertNull(currentTxn.getTransaction()); + } + + @Test + public void testReadCommittedCollection() + throws Exception { + + StoredSortedMap degree2Map = (StoredSortedMap) + StoredCollections.configuredSortedMap + (map, CursorConfig.READ_COMMITTED); + + // original map is not read-committed + assertTrue(!isReadCommitted(map)); + + // all read-committed containers are read-uncommitted + assertTrue(isReadCommitted(degree2Map)); + assertTrue(isReadCommitted + (StoredCollections.configuredMap + (map, CursorConfig.READ_COMMITTED))); + assertTrue(isReadCommitted + (StoredCollections.configuredCollection + (map.values(), CursorConfig.READ_COMMITTED))); + assertTrue(isReadCommitted + (StoredCollections.configuredSet + (map.keySet(), CursorConfig.READ_COMMITTED))); + assertTrue(isReadCommitted + (StoredCollections.configuredSortedSet + ((SortedSet) map.keySet(), + CursorConfig.READ_COMMITTED))); + + if (DbCompat.RECNO_METHOD) { + // create a list just so we can call configuredList() + Database listStore = TestStore.RECNO_RENUM.open(env, "foo"); + List list = new StoredList(listStore, TestStore.VALUE_BINDING, + true); + assertTrue(isReadCommitted + (StoredCollections.configuredList + (list, CursorConfig.READ_COMMITTED))); + listStore.close(); + } + + map.put(ONE, ONE); + doReadCommitted(degree2Map, null); + } + + private static boolean isReadCommitted(Object container) { + StoredContainer storedContainer = (StoredContainer) container; + /* We can't use getReadCommitted until is is added to DB core. */ + return storedContainer.getCursorConfig() != null && + storedContainer.getCursorConfig().getReadCommitted(); + } + + @Test + public void testReadCommittedTransaction() + throws Exception { + + TransactionConfig config = new TransactionConfig(); + config.setReadCommitted(true); + doReadCommitted(map, config); + } + + private void doReadCommitted(final StoredSortedMap degree2Map, + TransactionConfig txnConfig) + throws Exception { + + map.put(ONE, ONE); + TransactionRunner runner = new TransactionRunner(env); + runner.setTransactionConfig(txnConfig); + assertNull(currentTxn.getTransaction()); + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNotNull(currentTxn.getTransaction()); + + /* Do a read-committed get(), the lock is not retained. */ + assertEquals(ONE, degree2Map.get(ONE)); + + /* + * If we were not using read-committed, the following write of + * key ONE with an auto-commit transaction would self-deadlock + * since two transactions in the same thread would be + * attempting to lock the same key, one for write and one for + * read. This test passes if we do not deadlock. + */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + testStore.getKeyBinding().objectToEntry(ONE, key); + testStore.getValueBinding().objectToEntry(TWO, value); + store.put(null, key, value); + } + }); + assertNull(currentTxn.getTransaction()); + } + + @Test + public void testReadUncommittedCollection() + throws Exception { + + StoredSortedMap dirtyMap = (StoredSortedMap) + StoredCollections.configuredSortedMap + (map, CursorConfig.READ_UNCOMMITTED); + + // original map is not read-uncommitted + assertTrue(!isReadUncommitted(map)); + + // all read-uncommitted containers are read-uncommitted + assertTrue(isReadUncommitted(dirtyMap)); + assertTrue(isReadUncommitted + (StoredCollections.configuredMap + (map, CursorConfig.READ_UNCOMMITTED))); + assertTrue(isReadUncommitted + (StoredCollections.configuredCollection + (map.values(), CursorConfig.READ_UNCOMMITTED))); + assertTrue(isReadUncommitted + (StoredCollections.configuredSet + (map.keySet(), CursorConfig.READ_UNCOMMITTED))); + assertTrue(isReadUncommitted + (StoredCollections.configuredSortedSet + ((SortedSet) map.keySet(), CursorConfig.READ_UNCOMMITTED))); + + if (DbCompat.RECNO_METHOD) { + // create a list just so we can call configuredList() + Database listStore = TestStore.RECNO_RENUM.open(env, "foo"); + List list = new StoredList(listStore, TestStore.VALUE_BINDING, + true); + assertTrue(isReadUncommitted + (StoredCollections.configuredList + (list, CursorConfig.READ_UNCOMMITTED))); + listStore.close(); + } + + doReadUncommitted(dirtyMap); + } + + private static boolean isReadUncommitted(Object container) { + StoredContainer storedContainer = (StoredContainer) container; + return storedContainer.getCursorConfig() != null && + storedContainer.getCursorConfig().getReadUncommitted(); + } + + @Test + public void testReadUncommittedTransaction() + throws Exception { + + TransactionRunner runner = new TransactionRunner(env); + TransactionConfig config = new TransactionConfig(); + config.setReadUncommitted(true); + runner.setTransactionConfig(config); + assertNull(currentTxn.getTransaction()); + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNotNull(currentTxn.getTransaction()); + doReadUncommitted(map); + } + }); + assertNull(currentTxn.getTransaction()); + } + + /** + * Tests that the CurrentTransaction static WeakHashMap does indeed allow + * GC to reclaim tine environment when it is closed. At one point this was + * not working because the value object in the map has a reference to the + * environment. This was fixed by wrapping the Environment in a + * WeakReference. [#15444] + * + * This test only succeeds intermittently, probably due to its reliance + * on the GC call. + */ + @Test + public void testCurrentTransactionGC() + throws Exception { + + /* + * This test can have indeterminate results because it depends on + * a finalize count, so it's not part of the default run. + */ + if (!SharedTestUtils.runLongTests()) { + return; + } + + final StringBuilder finalizedFlag = new StringBuilder(); + + class MyEnv extends Environment { + + /** + * @throws FileNotFoundException from DB core. + */ + MyEnv(File home, EnvironmentConfig config) + throws DatabaseException, FileNotFoundException { + + super(home, config); + } + + @Override + protected void finalize() { + finalizedFlag.append('.'); + } + } + + MyEnv myEnv = new MyEnv(env.getHome(), env.getConfig()); + CurrentTransaction myCurrTxn = CurrentTransaction.getInstance(myEnv); + + store.close(); + store = null; + map = null; + + env.close(); + env = null; + + myEnv.close(); + myEnv = null; + + myCurrTxn = null; + currentTxn = null; + + for (int i = 0; i < 10; i += 1) { + byte[] x = null; + try { + x = new byte[Integer.MAX_VALUE - 1]; + } catch (OutOfMemoryError expected) { + } + assertNull(x); + System.gc(); + } + + for (int i = 0; i < 10; i += 1) { + System.gc(); + } + + assertTrue(finalizedFlag.length() > 0); + } + + private synchronized void doReadUncommitted(StoredSortedMap dirtyMap) + throws Exception { + + // start thread one + ReadUncommittedThreadOne t1 = new ReadUncommittedThreadOne(env, this); + t1.start(); + wait(); + + // put ONE + synchronized (t1) { t1.notify(); } + wait(); + readCheck(dirtyMap, ONE, ONE); + assertTrue(!dirtyMap.isEmpty()); + + // abort ONE + synchronized (t1) { t1.notify(); } + t1.join(); + readCheck(dirtyMap, ONE, null); + assertTrue(dirtyMap.isEmpty()); + + // start thread two + ReadUncommittedThreadTwo t2 = new ReadUncommittedThreadTwo(env, this); + t2.start(); + wait(); + + // put TWO + synchronized (t2) { t2.notify(); } + wait(); + readCheck(dirtyMap, TWO, TWO); + assertTrue(!dirtyMap.isEmpty()); + + // commit TWO + synchronized (t2) { t2.notify(); } + t2.join(); + readCheck(dirtyMap, TWO, TWO); + assertTrue(!dirtyMap.isEmpty()); + } + + private static class ReadUncommittedThreadOne extends Thread { + + private final CurrentTransaction currentTxn; + private final TransactionTest parent; + private final StoredSortedMap map; + + private ReadUncommittedThreadOne(Environment env, + TransactionTest parent) { + + this.currentTxn = CurrentTransaction.getInstance(env); + this.parent = parent; + this.map = parent.map; + } + + @Override + public synchronized void run() { + + try { + assertNull(currentTxn.getTransaction()); + assertNotNull(currentTxn.beginTransaction(null)); + assertNotNull(currentTxn.getTransaction()); + readCheck(map, ONE, null); + synchronized (parent) { parent.notify(); } + wait(); + + // put ONE + assertNull(map.put(ONE, ONE)); + readCheck(map, ONE, ONE); + synchronized (parent) { parent.notify(); } + wait(); + + // abort ONE + assertNull(currentTxn.abortTransaction()); + assertNull(currentTxn.getTransaction()); + } catch (Exception e) { + throw new RuntimeExceptionWrapper(e); + } + } + } + + private static class ReadUncommittedThreadTwo extends Thread { + + private final Environment env; + private final CurrentTransaction currentTxn; + private final TransactionTest parent; + private final StoredSortedMap map; + + private ReadUncommittedThreadTwo(Environment env, + TransactionTest parent) { + + this.env = env; + this.currentTxn = CurrentTransaction.getInstance(env); + this.parent = parent; + this.map = parent.map; + } + + @Override + public synchronized void run() { + + try { + final TransactionRunner runner = new TransactionRunner(env); + final Object thread = this; + assertNull(currentTxn.getTransaction()); + + runner.run(new TransactionWorker() { + public void doWork() throws Exception { + assertNotNull(currentTxn.getTransaction()); + readCheck(map, TWO, null); + synchronized (parent) { parent.notify(); } + thread.wait(); + + // put TWO + assertNull(map.put(TWO, TWO)); + readCheck(map, TWO, TWO); + synchronized (parent) { parent.notify(); } + thread.wait(); + + // commit TWO + } + }); + assertNull(currentTxn.getTransaction()); + } catch (Exception e) { + throw new RuntimeExceptionWrapper(e); + } + } + } + + private static void readCheck(StoredSortedMap checkMap, Object key, + Object expect) { + if (expect == null) { + assertNull(checkMap.get(key)); + assertTrue(checkMap.tailMap(key).isEmpty()); + assertTrue(!checkMap.tailMap(key).containsKey(key)); + assertTrue(!checkMap.keySet().contains(key)); + assertTrue(checkMap.duplicates(key).isEmpty()); + Iterator i = checkMap.keySet().iterator(); + try { + while (i.hasNext()) { + assertTrue(!key.equals(i.next())); + } + } finally { StoredIterator.close(i); } + } else { + assertEquals(expect, checkMap.get(key)); + assertEquals(expect, checkMap.tailMap(key).get(key)); + assertTrue(!checkMap.tailMap(key).isEmpty()); + assertTrue(checkMap.tailMap(key).containsKey(key)); + assertTrue(checkMap.keySet().contains(key)); + assertTrue(checkMap.values().contains(expect)); + assertTrue(!checkMap.duplicates(key).isEmpty()); + assertTrue(checkMap.duplicates(key).contains(expect)); + Iterator i = checkMap.keySet().iterator(); + try { + boolean found = false; + while (i.hasNext()) { + if (expect.equals(i.next())) { + found = true; + } + } + assertTrue(found); + } + finally { StoredIterator.close(i); } + } + } + + /** + * Tests transaction retries performed by TransationRunner. + * + * This test is too sensitive to how lock conflict detection works on JE to + * make it work properly on DB core. + */ + /* */ + @Test + public void testRetry() + throws Exception { + + final AtomicInteger tries = new AtomicInteger(); + final AtomicInteger releaseLockAfterTries = new AtomicInteger(); + final Transaction txn1 = env.beginTransaction(null, null); + final Cursor txn1Cursor = + store.openCursor(txn1, CursorConfig.READ_COMMITTED); + final TransactionRunner runner = new TransactionRunner(env); + + final TransactionWorker worker = new TransactionWorker() { + public void doWork() throws Exception { + tries.getAndIncrement(); + if (releaseLockAfterTries.get() == tries.get()) { + /* With READ_COMMITTED, getNext releases the lock. */ + txn1Cursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null); + } + Transaction txn2 = currentTxn.getTransaction(); + assertNotNull(txn2); + txn2.setLockTimeout(10 * 1000); /* Speed up the test. */ + assertTrue(txn1 != txn2); + map.put(ONE, TWO); + } + }; + + /* Insert ONE and TWO with auto-commit. Leave no records locked. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + + testStore.getKeyBinding().objectToEntry(ONE, key); + testStore.getValueBinding().objectToEntry(ONE, value); + OperationStatus status = store.put(null, key, value); + assertSame(OperationStatus.SUCCESS, status); + + testStore.getKeyBinding().objectToEntry(TWO, key); + testStore.getValueBinding().objectToEntry(TWO, value); + status = store.put(null, key, value); + assertSame(OperationStatus.SUCCESS, status); + + /* + * Disable timouts in txn1 so that the timeout will occur in txn2 (in + * TransactionRunner). + */ + txn1.setLockTimeout(0); + + /* + * Read ONE with txn1 and leave it locked. Expect the default number + * of retries and then a lock conflict. + */ + status = txn1Cursor.getFirst(key, value, null); + assertSame(OperationStatus.SUCCESS, status); + int expectTries = TransactionRunner.DEFAULT_MAX_RETRIES + 1; + releaseLockAfterTries.set(0); + tries.set(0); + try { + runner.run(worker); + fail(); + } catch (LockConflictException expected) { + } + assertEquals(expectTries, tries.get()); + + /* Same as above but use a custom number of retries. */ + status = txn1Cursor.getFirst(key, value, null); + assertSame(OperationStatus.SUCCESS, status); + expectTries = 5; + runner.setMaxRetries(expectTries - 1); + releaseLockAfterTries.set(0); + tries.set(0); + try { + runner.run(worker); + fail(); + } catch (LockConflictException expected) { + } + assertEquals(expectTries, tries.get()); + + /* + * In this variant the TransactionWorker will move the txn1Cursor + * forward to free the lock after 3 tries. The 4th try will succeed, + * so do not expect a lock conflict. + */ + status = txn1Cursor.getFirst(key, value, null); + assertSame(OperationStatus.SUCCESS, status); + expectTries = 3; + releaseLockAfterTries.set(expectTries); + tries.set(0); + runner.run(worker); + assertEquals(expectTries, tries.get()); + + /* Cleanup. */ + txn1Cursor.close(); + txn1.abort(); + } + /* */ + + /** + * Tests transaction retries performed by TransationRunner. + * + * This test is too sensitive to how lock conflict detection works on JE to + * make it work properly on DB core. + */ + /* */ + @Test + public void testExceptionHandler() + throws Exception { + + class RetriesExceeded extends Exception {} + final int customMaxRetries = TransactionRunner.DEFAULT_MAX_RETRIES * 2; + + final TransactionRunner runner = new TransactionRunner(env) { + @Override + public int handleException(Exception e, + int retries, + int maxRetries) + throws Exception { + if (e instanceof LockConflictException) { + if (retries >= maxRetries) { + throw new RetriesExceeded(); + } else { + return customMaxRetries; + } + } else { + throw e; + } + } + }; + + final Transaction txn1 = env.beginTransaction(null, null); + final AtomicInteger tries = new AtomicInteger(); + + final TransactionWorker worker = new TransactionWorker() { + public void doWork() throws Exception { + tries.getAndIncrement(); + Transaction txn2 = currentTxn.getTransaction(); + assertNotNull(txn2); + txn2.setLockTimeout(10 * 1000); /* Speed up the test. */ + assertTrue(txn1 != txn2); + map.put(ONE, TWO); + } + }; + + /* Insert ONE with txn1. Leave the record locked. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + + testStore.getKeyBinding().objectToEntry(ONE, key); + testStore.getValueBinding().objectToEntry(ONE, value); + OperationStatus status = store.put(txn1, key, value); + assertSame(OperationStatus.SUCCESS, status); + + /* + * Disable timouts in txn1 so that the timeout will occur in txn2 (in + * TransactionRunner). + */ + txn1.setLockTimeout(0); + + /* Expect the custom number of retries and the custom exception. */ + tries.set(0); + try { + runner.run(worker); + fail(); + } catch (RetriesExceeded expected) { + } + assertEquals(customMaxRetries + 1, tries.get()); + + /* Cleanup. */ + txn1.abort(); + } + /* */ +} diff --git a/test/com/sleepycat/collections/test/XACollectionTest.java b/test/com/sleepycat/collections/test/XACollectionTest.java new file mode 100644 index 0000000..1e4891c --- /dev/null +++ b/test/com/sleepycat/collections/test/XACollectionTest.java @@ -0,0 +1,141 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.collections.test; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import javax.transaction.xa.XAResource; + +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.log.LogUtils.XidImpl; +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.test.TestEnv; +import com.sleepycat.utilint.StringUtils; + +/** + * Runs CollectionTest with special TestEnv and TransactionRunner objects to + * simulate XA transactions. + * + *

        This test is currently JE-only and will not compile on DB core.

        + */ +public class XACollectionTest extends CollectionTest { + + @Parameters + public static List genParams() { + EnvironmentConfig config = new EnvironmentConfig(); + config.setTransactional(true); + TestEnv xaTestEnv = new XATestEnv(config); + List params = new ArrayList(); + for (int j = 0; j < TestStore.ALL.length; j += 1) { + for (int k = 0; k < 2; k += 1) { + boolean entityBinding = (k != 0); + params.add(new Object[] + {xaTestEnv, TestStore.ALL[j], entityBinding}); + } + } + + return params; + } + + public XACollectionTest(TestEnv testEnv, + TestStore testStore, + boolean isEntityBinding) { + super(testEnv, testStore, isEntityBinding, false /*isAutoCommit*/, + false, DEFAULT_MAX_KEY); + } + + @Override + protected TransactionRunner newTransactionRunner(Environment env) { + return new XARunner((XAEnvironment) env); + } + + private static class XATestEnv extends TestEnv { + + private XATestEnv(EnvironmentConfig config) { + super("XA", config); + } + + @Override + protected Environment newEnvironment(File dir, + EnvironmentConfig config) + throws DatabaseException { + + return new XAEnvironment(dir, config); + } + } + + private static class XARunner extends TransactionRunner { + + private final XAEnvironment xaEnv; + private static int sequence; + + private XARunner(XAEnvironment env) { + super(env); + xaEnv = env; + } + + @Override + public void run(TransactionWorker worker) + throws Exception { + + if (xaEnv.getThreadTransaction() == null) { + for (int i = 0;; i += 1) { + sequence += 1; + XidImpl xid = new XidImpl + (1, StringUtils.toUTF8(String.valueOf(sequence)), + null); + try { + xaEnv.start(xid, XAResource.TMNOFLAGS); + worker.doWork(); + int ret = xaEnv.prepare(xid); + xaEnv.end(xid, XAResource.TMSUCCESS); + if (ret != XAResource.XA_RDONLY) { + xaEnv.commit(xid, false); + } + return; + } catch (Exception e) { + e = ExceptionUnwrapper.unwrap(e); + try { + xaEnv.end(xid, XAResource.TMSUCCESS); + xaEnv.rollback(xid); + } catch (Exception e2) { + e2.printStackTrace(); + throw e; + } + if (i >= getMaxRetries() || + !(e instanceof LockConflictException)) { + throw e; + } + } + } + } else { /* Nested */ + try { + worker.doWork(); + } catch (Exception e) { + throw ExceptionUnwrapper.unwrap(e); + } + } + } + } +} diff --git a/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java b/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java new file mode 100644 index 0000000..f3cd6df --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/CatalogCornerCaseTest.java @@ -0,0 +1,92 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test.serial; + +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +public class CatalogCornerCaseTest extends TestBase { + + private Environment env; + + public CatalogCornerCaseTest() { + + customName = "CatalogCornerCaseTest"; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + SharedTestUtils.printTestName(customName); + env = TestEnv.BDB.open(customName); + } + + @After + public void tearDown() { + + try { + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + env = null; + } + } + + @Test + public void testReadOnlyEmptyCatalog() + throws Exception { + + String file = "catalog.db"; + + /* Create an empty database. */ + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + DbCompat.setTypeBtree(config); + Database db = + DbCompat.testOpenDatabase(env, null, file, null, config); + db.close(); + + /* Open the empty database read-only. */ + config.setAllowCreate(false); + config.setReadOnly(true); + db = DbCompat.testOpenDatabase(env, null, file, null, config); + + /* Expect exception when creating the catalog. */ + try { + new StoredClassCatalog(db); + fail(); + } catch (RuntimeException e) { } + db.close(); + } +} diff --git a/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java new file mode 100644 index 0000000..6281f2c --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTest.java @@ -0,0 +1,193 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test.serial; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.File; +import java.io.ObjectStreamClass; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Runs part two of the StoredClassCatalogTest. This part is run with the + * new/updated version of TestSerial in the classpath. It uses the + * environment and databases created by StoredClassCatalogTestInit. It + * verifies that it can read objects serialized using the old class format, + * and that it can create new objects with the new class format. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class StoredClassCatalogTest extends TestBase + implements TransactionWorker { + + static final String CATALOG_FILE = "catalogtest-catalog.db"; + static final String STORE_FILE = "catalogtest-store.db"; + + @Parameters + public static List genParams() { + List params = new ArrayList(); + for (TestEnv testEnv : TestEnv.ALL) + params.add(new Object[]{testEnv}); + + return params; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private StoredClassCatalog catalog2; + private Database store; + private Map map; + private TransactionRunner runner; + + public StoredClassCatalogTest(TestEnv testEnv) { + + this.testEnv = testEnv; + customName = makeTestName(testEnv); + } + + static String makeTestName(TestEnv testEnv) { + return "StoredClassCatalogTest-" + testEnv.getName(); + } + + @Before + public void setUp() + throws Exception { + + SharedTestUtils.printTestName(customName); + + /* + * Copy the environment generated by StoredClassCatalogTestInit in + * test dest dir, which is required to perform this test. + */ + SharedTestUtils.copyDir( + new File(SharedTestUtils.getDestDir(), customName), + new File(SharedTestUtils.getTestDir(), customName)); + + env = testEnv.open(customName, false); + runner = new TransactionRunner(env); + + catalog = new StoredClassCatalog(openDb(CATALOG_FILE, false)); + catalog2 = new StoredClassCatalog(openDb("catalog2.db", true)); + + SerialBinding keyBinding = new SerialBinding(catalog, + String.class); + SerialBinding valueBinding = new SerialBinding(catalog, + TestSerial.class); + store = openDb(STORE_FILE, false); + + map = new StoredMap(store, keyBinding, valueBinding, true); + } + + private Database openDb(String file, boolean create) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(create); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + @After + public void tearDown() { + + try { + if (catalog != null) { + catalog.close(); + catalog.close(); // should have no effect + } + if (catalog2 != null) { + catalog2.close(); + } + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.err.println("Ignored exception during tearDown: "); + e.printStackTrace(); + } finally { + /* Ensure that GC can cleanup. */ + catalog = null; + catalog2 = null; + store = null; + env = null; + testEnv = null; + map = null; + runner = null; + } + } + + @Test + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() + throws Exception { + + TestSerial one = (TestSerial) map.get("one"); + TestSerial two = (TestSerial) map.get("two"); + assertNotNull(one); + assertNotNull(two); + assertEquals(one, two.getOther()); + assertNull(one.getStringField()); + assertNull(two.getStringField()); + + TestSerial three = new TestSerial(two); + assertNotNull(three.getStringField()); + map.put("three", three); + three = (TestSerial) map.get("three"); + assertEquals(two, three.getOther()); + + ObjectStreamClass desc = ObjectStreamClass.lookup(TestSerial.class); + + assertNotNull(catalog.getClassID(desc)); + assertNotNull(catalog.getClassID(desc)); + + // test with empty catalog + assertNotNull(catalog2.getClassID(desc)); + assertNotNull(catalog2.getClassID(desc)); + } +} diff --git a/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java new file mode 100644 index 0000000..545ff83 --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/StoredClassCatalogTestInit.java @@ -0,0 +1,169 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test.serial; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.collections.StoredMap; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Runs part one of the StoredClassCatalogTest. This part is run with the + * old/original version of TestSerial in the classpath. It creates a fresh + * environment and databases containing serialized versions of the old class. + * When StoredClassCatalogTest is run, it will read these objects from the + * database created here. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class StoredClassCatalogTestInit extends TestBase + implements TransactionWorker { + + static final String CATALOG_FILE = StoredClassCatalogTest.CATALOG_FILE; + static final String STORE_FILE = StoredClassCatalogTest.STORE_FILE; + + @Parameters + public static List genParams() { + List params = new ArrayList(); + for (TestEnv testEnv : TestEnv.ALL) + params.add(new Object[]{testEnv}); + + return params; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private Database store; + private Map map; + private TransactionRunner runner; + + public StoredClassCatalogTestInit(TestEnv testEnv) { + + this.testEnv = testEnv; + customName = StoredClassCatalogTest.makeTestName(testEnv); + } + + @Before + public void setUp() + throws Exception { + + SharedTestUtils.printTestName(customName); + env = testEnv.open(customName); + runner = new TransactionRunner(env); + + catalog = new StoredClassCatalog(openDb(CATALOG_FILE)); + + SerialBinding keyBinding = new SerialBinding(catalog, String.class); + SerialBinding valueBinding = + new SerialBinding(catalog, TestSerial.class); + store = openDb(STORE_FILE); + + map = new StoredMap(store, keyBinding, valueBinding, true); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + DbCompat.setTypeBtree(config); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(true); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + @After + public void tearDown() + throws Exception { + + try { + if (catalog != null) { + catalog.close(); + catalog.close(); // should have no effect + } + if (store != null) { + store.close(); + } + if (env != null) { + env.close(); + } + + /* + * Copy environment generated by this test to test dest dir. + * Since the environment is necessary for StoreClassCatalogTest. + */ + SharedTestUtils.copyDir(testEnv.getDirectory(customName, false), + new File(SharedTestUtils.getDestDir(), customName)); + + } catch (Exception e) { + System.err.println("Ignored exception during tearDown: "); + e.printStackTrace(); + } finally { + /* Ensure that GC can cleanup. */ + catalog = null; + store = null; + env = null; + testEnv = null; + map = null; + runner = null; + } + } + + @Test + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() { + TestSerial one = new TestSerial(null); + TestSerial two = new TestSerial(one); + assertNull("Likely the classpath contains the wrong version of the" + + " TestSerial class, the 'original' version is required", + one.getStringField()); + assertNull(two.getStringField()); + map.put("one", one); + map.put("two", two); + one = (TestSerial) map.get("one"); + two = (TestSerial) map.get("two"); + assertEquals(one, two.getOther()); + assertNull(one.getStringField()); + assertNull(two.getStringField()); + } +} diff --git a/test/com/sleepycat/collections/test/serial/TestSerial.java b/test/com/sleepycat/collections/test/serial/TestSerial.java new file mode 100644 index 0000000..c03dd02 --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/TestSerial.java @@ -0,0 +1,75 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test.serial; + +/** + * @see StoredClassCatalogTest + * @author Mark Hayes + */ +class TestSerial implements java.io.Serializable { + + static final long serialVersionUID = -3738980000390384920L; + + private int i = 123; + private TestSerial other; + + // The following field 's' was added after this class was compiled and + // serialized instances were saved in resource files. This allows testing + // that the original stored instances can be deserialized after changing + // the class. The serialVersionUID is needed for this according to Java + // serialization rules, and was generated with the serialver tool. + // + private String s = "string"; + + TestSerial(TestSerial other) { + + this.other = other; + } + + TestSerial getOther() { + + return other; + } + + int getIntField() { + + return i; + } + + String getStringField() { + + return s; // this returned null before field 's' was added. + } + + public boolean equals(Object object) { + + try { + TestSerial o = (TestSerial) object; + if ((o.other == null) ? (this.other != null) + : (!o.other.equals(this.other))) { + return false; + } + if (this.i != o.i) { + return false; + } + // the following test was not done before field 's' was added + if ((o.s == null) ? (this.s != null) + : (!o.s.equals(this.s))) { + return false; + } + return true; + } catch (ClassCastException e) { + return false; + } + } +} diff --git a/test/com/sleepycat/collections/test/serial/TestSerial.java.original b/test/com/sleepycat/collections/test/serial/TestSerial.java.original new file mode 100644 index 0000000..4d11d34 --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/TestSerial.java.original @@ -0,0 +1,71 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved. + * + */ +package com.sleepycat.collections.test.serial; + +/** + * @see StoredClassCatalogTest + * @author Mark Hayes + */ +class TestSerial implements java.io.Serializable +{ + static final long serialVersionUID = -3738980000390384920L; + + private int i = 123; + private TestSerial other; + + // The following field 's' was added after this class was compiled and + // serialized instances were saved in resource files. This allows testing + // that the original stored instances can be deserialized after changing + // the class. The serialVersionUID is needed for this according to Java + // serialization rules, and was generated with the serialver tool. + // + //private String s = "string"; + + TestSerial(TestSerial other) + { + this.other = other; + } + + TestSerial getOther() + { + return other; + } + + int getIntField() + { + return i; + } + + String getStringField() + { + return null; // this returned null before field 's' was added. + } + + public boolean equals(Object object) + { + try + { + TestSerial o = (TestSerial) object; + if ((o.other == null) ? (this.other != null) + : (!o.other.equals(this.other))) + return false; + if (this.i != o.i) + return false; + // the following test was not done before field 's' was added + /* + if ((o.s == null) ? (this.s != null) + : (!o.s.equals(this.s))) + return false; + */ + return true; + } + catch (ClassCastException e) + { + return false; + } + } +} diff --git a/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java b/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java new file mode 100644 index 0000000..18f3962 --- /dev/null +++ b/test/com/sleepycat/collections/test/serial/TupleSerialFactoryTest.java @@ -0,0 +1,252 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.collections.test.serial; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.serial.test.MarshalledObject; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.collections.TupleSerialFactory; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ + +@RunWith(Parameterized.class) +public class TupleSerialFactoryTest extends TestBase + implements TransactionWorker { + + @Parameters + public static List genParams() { + List params = new ArrayList(); + for (TestEnv testEnv : TestEnv.ALL) { + for (int sorted = 0; sorted < 2; sorted += 1) { + params.add(new Object[]{testEnv, sorted != 0 }); + } + } + + return params; + } + + private TestEnv testEnv; + private Environment env; + private StoredClassCatalog catalog; + private TransactionRunner runner; + private TupleSerialFactory factory; + private Database store1; + private Database store2; + private SecondaryDatabase index1; + private SecondaryDatabase index2; + private final boolean isSorted; + private Map storeMap1; + private Map storeMap2; + private Map indexMap1; + private Map indexMap2; + + public TupleSerialFactoryTest(TestEnv testEnv, boolean isSorted) { + + + this.testEnv = testEnv; + this.isSorted = isSorted; + + String name = "TupleSerialFactoryTest-" + testEnv.getName(); + name += isSorted ? "-sorted" : "-unsorted"; + customName = name; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + SharedTestUtils.printTestName(customName); + env = testEnv.open(customName); + runner = new TransactionRunner(env); + + createDatabase(); + } + + @After + public void tearDown() { + + try { + if (index1 != null) { + index1.close(); + } + if (index2 != null) { + index2.close(); + } + if (store1 != null) { + store1.close(); + } + if (store2 != null) { + store2.close(); + } + if (catalog != null) { + catalog.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("Ignored exception during tearDown: " + e); + } finally { + /* Ensure that GC can cleanup. */ + index1 = null; + index2 = null; + store1 = null; + store2 = null; + catalog = null; + env = null; + testEnv = null; + runner = null; + factory = null; + storeMap1 = null; + storeMap2 = null; + indexMap1 = null; + indexMap2 = null; + } + } + + @Test + public void runTest() + throws Exception { + + runner.run(this); + } + + public void doWork() { + createViews(); + writeAndRead(); + } + + private void createDatabase() + throws Exception { + + catalog = new StoredClassCatalog(openDb("catalog.db")); + factory = new TupleSerialFactory(catalog); + assertSame(catalog, factory.getCatalog()); + + store1 = openDb("store1.db"); + store2 = openDb("store2.db"); + index1 = openSecondaryDb(factory, "1", store1, "index1.db", null); + index2 = openSecondaryDb(factory, "2", store2, "index2.db", store1); + } + + private Database openDb(String file) + throws Exception { + + DatabaseConfig config = new DatabaseConfig(); + config.setTransactional(testEnv.isTxnMode()); + config.setAllowCreate(true); + DbCompat.setTypeBtree(config); + + return DbCompat.testOpenDatabase(env, null, file, null, config); + } + + private SecondaryDatabase openSecondaryDb(TupleSerialFactory factory, + String keyName, + Database primary, + String file, + Database foreignStore) + throws Exception { + + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(testEnv.isTxnMode()); + secConfig.setAllowCreate(true); + DbCompat.setTypeBtree(secConfig); + secConfig.setKeyCreator(factory.getKeyCreator(MarshalledObject.class, + keyName)); + if (foreignStore != null) { + secConfig.setForeignKeyDatabase(foreignStore); + secConfig.setForeignKeyDeleteAction( + ForeignKeyDeleteAction.CASCADE); + } + + return DbCompat.testOpenSecondaryDatabase + (env, null, file, null, primary, secConfig); + } + + private void createViews() { + if (isSorted) { + storeMap1 = factory.newSortedMap(store1, String.class, + MarshalledObject.class, true); + storeMap2 = factory.newSortedMap(store2, String.class, + MarshalledObject.class, true); + indexMap1 = factory.newSortedMap(index1, String.class, + MarshalledObject.class, true); + indexMap2 = factory.newSortedMap(index2, String.class, + MarshalledObject.class, true); + } else { + storeMap1 = factory.newMap(store1, String.class, + MarshalledObject.class, true); + storeMap2 = factory.newMap(store2, String.class, + MarshalledObject.class, true); + indexMap1 = factory.newMap(index1, String.class, + MarshalledObject.class, true); + indexMap2 = factory.newMap(index2, String.class, + MarshalledObject.class, true); + } + } + + private void writeAndRead() { + MarshalledObject o1 = new MarshalledObject("data1", "pk1", "ik1", ""); + assertNull(storeMap1.put(null, o1)); + + assertEquals(o1, storeMap1.get("pk1")); + assertEquals(o1, indexMap1.get("ik1")); + + MarshalledObject o2 = new MarshalledObject("data2", "pk2", "", "pk1"); + assertNull(storeMap2.put(null, o2)); + + assertEquals(o2, storeMap2.get("pk2")); + assertEquals(o2, indexMap2.get("pk1")); + + /* + * store1 contains o1 with primary key "pk1" and index key "ik1" + * store2 contains o2 with primary key "pk2" and foreign key "pk1" + * which is the primary key of store1 + */ + + storeMap1.remove("pk1"); + assertNull(storeMap1.get("pk1")); + assertNull(indexMap1.get("ik1")); + assertNull(storeMap2.get("pk2")); + assertNull(indexMap2.get("pk1")); + } +} diff --git a/test/com/sleepycat/je/ApiTest.java b/test/com/sleepycat/je/ApiTest.java new file mode 100644 index 0000000..2ad2fd5 --- /dev/null +++ b/test/com/sleepycat/je/ApiTest.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.fail; + +import org.junit.Test; + +/** + * Test parameter handling for api methods. + */ +public class ApiTest { + + @Test + public void testBasic() { + try { + new Environment(null, null); + fail("Should get exception"); + } catch (IllegalArgumentException e) { + // expected exception + } catch (Exception e) { + fail("Shouldn't get other exception"); + } + } +} diff --git a/test/com/sleepycat/je/ClassLoaderTest.java b/test/com/sleepycat/je/ClassLoaderTest.java new file mode 100644 index 0000000..bb3ed75 --- /dev/null +++ b/test/com/sleepycat/je/ClassLoaderTest.java @@ -0,0 +1,311 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +import java.io.File; +import java.util.Comparator; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.serial.SerialBinding; +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.SimpleClassLoader; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests the Environment ClassLoader property. + */ +public class ClassLoaderTest extends DualTestCase { + + public interface LoadedClass { + + Comparator getReverseComparator(); + + Class> getReverseComparatorClass(); + + Object getSerializableInstance(); + + void writeEntities(EntityStore store); + + void readEntities(EntityStore store); + } + + private static final String LOADED_CLASS_IMPL = + "com.sleepycat.je.LoadedClassImpl"; + + private final File envHome; + private Environment env; + private ClassLoader myLoader; + private LoadedClass loadedClass; + + public ClassLoaderTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + + final File classDir = new File(System.getProperty("testclassloader")); + final ClassLoader parentClassLoader = + Thread.currentThread().getContextClassLoader(); + myLoader = new SimpleClassLoader(parentClassLoader, classDir); + + final Class cls = + Class.forName(LOADED_CLASS_IMPL, true /*initialize*/, myLoader); + loadedClass = (LoadedClass) cls.newInstance(); + } + + abstract class WithThreadLoader { + + void exec() { + final ClassLoader save = + Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(myLoader); + try { + run(); + } finally { + Thread.currentThread().setContextClassLoader(save); + } + } + + abstract void run(); + } + + private void openEnv(boolean configLoader) { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setClassLoader(configLoader ? myLoader : null); + env = create(envHome, envConfig); + } + + private void closeEnv() { + close(env); + env = null; + } + + @Test + public void testDbComparatorWithThreadLoader() + throws Exception { + + new WithThreadLoader() { + void run() { + checkDbComparator(false /*configLoader*/); + } + }.exec(); + } + + @Test + public void testDbComparatorWithEnvLoader() + throws Exception { + + checkDbComparator(true /*configLoader*/); + } + + private void checkDbComparator(boolean configLoader) { + + /* Create new env and dbs. */ + + /* First DB using comparator stored by class name. */ + openEnv(configLoader); + Database db = openDbWithComparator(true /*cmpByClass*/); + insertReverseOrder(db); + readReverseOrder(db); + db.close(); + + /* Second DB using comparator stored as an instance. */ + db = openDbWithComparator(false /*cmpByClass*/); + insertReverseOrder(db); + readReverseOrder(db); + db.close(); + closeEnv(); + + /* Open existing env and dbs. */ + + /* First DB using comparator stored by class name. */ + openEnv(configLoader); + db = openDbWithComparator(true /*cmpByClass*/); + readReverseOrder(db); + db.close(); + + /* Second DB using comparator stored as an instance. */ + db = openDbWithComparator(false /*cmpByClass*/); + readReverseOrder(db); + db.close(); + closeEnv(); + } + + private Database openDbWithComparator(boolean cmpByClass) { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + if (cmpByClass) { + dbConfig.setBtreeComparator + (loadedClass.getReverseComparatorClass()); + dbConfig.setDuplicateComparator + (loadedClass.getReverseComparatorClass()); + } else { + dbConfig.setBtreeComparator + (loadedClass.getReverseComparator()); + dbConfig.setDuplicateComparator + (loadedClass.getReverseComparator()); + } + final String name = "testDB" + + (cmpByClass ? "CmpByClass" : "CmpByInstance"); + return env.openDatabase(null, name, dbConfig); + } + + private void insertReverseOrder(Database db) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Insert non-dups. */ + IntegerBinding.intToEntry(1, data); + for (int i = 1; i <= 5; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + } + + /* Insert dups. */ + IntegerBinding.intToEntry(5, key); + for (int i = 2; i <= 5; i += 1) { + IntegerBinding.intToEntry(i, data); + assertSame(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + } + } + + private void readReverseOrder(Database db) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Read non-dups. */ + final Cursor c = db.openCursor(null, null); + for (int i = 5; i >= 1; i -= 1) { + assertSame(OperationStatus.SUCCESS, + c.getNextNoDup(key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(key)); + } + assertSame(OperationStatus.NOTFOUND, + c.getNextNoDup(key, data, LockMode.DEFAULT)); + + /* Read dups. */ + assertSame(OperationStatus.SUCCESS, + c.getFirst(key, data, LockMode.DEFAULT)); + assertEquals(5, IntegerBinding.entryToInt(key)); + assertEquals(5, IntegerBinding.entryToInt(data)); + for (int i = 4; i >= 1; i -= 1) { + assertSame(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals(5, IntegerBinding.entryToInt(key)); + assertEquals(i, IntegerBinding.entryToInt(data)); + } + + c.close(); + } + + @Test + public void testSerialBindingWithThreadLoader() + throws Exception { + + new WithThreadLoader() { + void run() { + checkSerialBinding(false /*configLoader*/); + } + }.exec(); + } + + @Test + public void testSerialBindingWithEnvLoader() + throws Exception { + + checkSerialBinding(true /*configLoader*/); + } + + private void checkSerialBinding(boolean configLoader) { + + openEnv(configLoader); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "catalog", dbConfig); + + final StoredClassCatalog catalog = new StoredClassCatalog(db); + final SerialBinding binding = new SerialBinding(catalog, null); + + final Object o = loadedClass.getSerializableInstance(); + final DatabaseEntry entry = new DatabaseEntry(); + + binding.objectToEntry(o, entry); + final Object o2 = binding.entryToObject(entry); + assertEquals(o, o2); + + db.close(); + closeEnv(); + } + + @Test + public void testDPLWithThreadLoader() + throws Exception { + + new WithThreadLoader() { + void run() { + checkDPL(false /*configLoader*/); + } + }.exec(); + } + + @Test + public void testDPLWithEnvLoader() + throws Exception { + + checkDPL(true /*configLoader*/); + } + + private void checkDPL(boolean configLoader) { + + openEnv(configLoader); + final StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + EntityStore store = new EntityStore(env, "foo", config); + loadedClass.writeEntities(store); + store.close(); + closeEnv(); + + openEnv(configLoader); + store = new EntityStore(env, "foo", config); + loadedClass.readEntities(store); + store.close(); + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/ConfigBeanInfoTest.java b/test/com/sleepycat/je/ConfigBeanInfoTest.java new file mode 100644 index 0000000..7922454 --- /dev/null +++ b/test/com/sleepycat/je/ConfigBeanInfoTest.java @@ -0,0 +1,292 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.fail; + +import java.beans.BeanInfo; +import java.beans.IntrospectionException; +import java.beans.Introspector; +import java.beans.PropertyDescriptor; +import java.lang.reflect.Method; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/* + * Test if the config classes follow the rules described in com.sleepycat.util. + * ConfigBeanInfoBase. + */ +public class ConfigBeanInfoTest extends TestBase { + + /* + * The list of all the config classes. If you create a new config class, + * you will need to add it into this list. + */ + private static String[] configClasses = { + "com.sleepycat.je.CheckpointConfig", + "com.sleepycat.je.CursorConfig", + "com.sleepycat.je.DatabaseConfig", + "com.sleepycat.je.DiskOrderedCursorConfig", + "com.sleepycat.je.EnvironmentConfig", + "com.sleepycat.je.EnvironmentMutableConfig", + "com.sleepycat.je.JoinConfig", + "com.sleepycat.je.PreloadConfig", + "com.sleepycat.je.SecondaryConfig", + "com.sleepycat.je.SequenceConfig", + "com.sleepycat.je.StatsConfig", + "com.sleepycat.je.TransactionConfig", + "com.sleepycat.je.VerifyConfig", + "com.sleepycat.je.dbi.ReplicatedDatabaseConfig", + "com.sleepycat.je.rep.NetworkRestoreConfig", + "com.sleepycat.je.rep.ReplicationBasicConfig", + "com.sleepycat.je.rep.ReplicationConfig", + "com.sleepycat.je.rep.ReplicationMutableConfig", + "com.sleepycat.je.rep.ReplicationNetworkConfig", + "com.sleepycat.je.rep.ReplicationSSLConfig", + "com.sleepycat.je.rep.util.ldiff.LDiffConfig", + "com.sleepycat.persist.evolve.EvolveConfig", + "com.sleepycat.persist.StoreConfig", + "com.sleepycat.je.rep.monitor.MonitorConfig", + }; + + /* The methods which are not included in the tests. */ + private static String[] ignoreMethods = { + "com.sleepycat.je.DiskOrderedCursorConfig.setMaxSeedTestHook", + }; + + /* + * Test if a FooConfig.java has a related FooCongigBeanInfo.java, which + * records the setter/getter methods for all properties. + */ + @Test + public void testBeanInfoExist() { + for (int i = 0; i < configClasses.length; i++) { + try { + Class configClass = Class.forName(configClasses[i]); + + /* Get the public methods. */ + Method[] methods = configClass.getMethods(); + for (int j = 0; j < methods.length; j++) { + String name = methods[j].getName(); + String subName = name.substring(0, 3); + String propertyName = name.substring(3); + + /* If it is a setter method. */ + if (subName.equals("set")) { + if (isIgnoreMethods(configClasses[i] + "." + name)) { + continue; + } + String getterName = "get" + propertyName; + try { + + /* + * Check if there is a corresponding getter method. + * This getter method cannot contain any + * parameters, which is consistent to the design + * patterns for properties in javabeans.If not, + * NoSuchMethodException will be thrown and caught. + */ + Method getter = configClass.getMethod(getterName); + + /* + * According to Design Patterns for Properties + * (refer to sun javabeans spec), the setter method + * should only contain one parameter, and the type + * of the parameter is the same as the return type + * of the getter method. + */ + if (methods[j].getParameterTypes().length != 1 || + !methods[j].getParameterTypes()[0]. + equals(getter.getReturnType())) { + break; + } + + /* + * If the property has setter and getter methods, + * then we need to add a FooConfigBeanInfo class + *for this FooConfig class. If the return type of + * the setter method is not a void, we will need to + * add a setter method called setPropertyNameVoid + * method with a void return type. + */ + try { + Class beanInfoClass = Class.forName( + configClasses[i] + "BeanInfo"); + } catch (ClassNotFoundException e) { + + /* If no FooConfigBeanInfo class exists. */ + fail("Have not defined beanInfo class: " + + configClasses[i] + "BeanInfo"); + } catch (SecurityException e) { + e.printStackTrace(); + } + try { + BeanInfo info = Introspector. + getBeanInfo(configClass); + PropertyDescriptor[] descriptors = + info.getPropertyDescriptors(); + + boolean ifFound = false; + + /* + * Check if the setter method of the property + * has been defined in the beaninfo class. + */ + for (int k = 0; k < descriptors.length; ++k) { + String methodName = descriptors[k]. + getWriteMethod().getName(); + + /* + * The name of the setter method can be + * setPropertyName or setPropertyNameVoid. + */ + if (methodName.equals(name) || + methodName.equals(name + "Void")) { + ifFound = true; + break; + } + } + + if (!ifFound) { + fail("No setter method " + name + + "[Void] for " + configClasses[i] + + "BeanInfo"); + } + } catch (IntrospectionException e) { + e.printStackTrace(); + } + } catch (NoSuchMethodException e) { + + /* + * There is no corresponding getter method for this + * property. So we do nothing. + */ + } + } + } + } catch (SecurityException e) { + e.printStackTrace(); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } + } + + /* + * Test that, for a property, there must be two setter methods, one is + * setFoo which will return "this", one is setFooVoid, which will return + * void. + */ + @Test + public void testSetterReturnThisAndVoid() { + for (int i = 0; i < configClasses.length; i++) { + try { + Class configClass = Class.forName(configClasses[i]); + /* Get the public methods. */ + Method[] methods = configClass.getMethods(); + for (int j = 0; j < methods.length; j++) { + String methodName = methods[j].getName(); + String subName = methodName.substring(0, 3); + String propertyName = methodName.substring(3); + Class returnType = methods[j].getReturnType(); + + /* If it is a setter method. */ + if (subName.equals("set")) { + if (isIgnoreMethods(configClasses[i] + "." + + methodName)) { + continue; + } + String getterName = "get" + propertyName; + try { + + /* + * Check if there is a corresponding getter method. + * This getter method cannot contain any + * parameters, which is consistent to the design + * patterns for properties in javabeans If not, + * NoSuchMethodException will be thrown and caught. + */ + Method getter = configClass.getMethod(getterName); + + /* + * According to Design Patterns for Properties + * (refer to sun javabeans spec), the setter method + * should only contain one parameter, and the type + * of the parameter is the same as the return type + * of the getter method. + */ + if (methods[j].getParameterTypes().length != 1 || + !methods[j].getParameterTypes()[0]. + equals(getter.getReturnType())) { + break; + } + + /* + * The setter method setPropertyName should return + * "this". + */ + assert returnType.isAssignableFrom(configClass) : + "The setter method" + + configClass.getName() + "." + methodName + + " should return " + configClass.getName(); + + /* + * Check if there is a corresponding setter method, + * called setFooVoid, which return void, + */ + String setterVoidName = methodName + "Void"; + try { + Method setterVoid = + configClass.getMethod(setterVoidName, + methods[j].getParameterTypes()); + returnType = setterVoid.getReturnType(); + assert returnType.toString().equals("void") : + "The setter method" + + configClass.getName() + "." + + setterVoidName + " should return " + + "void"; + } catch (NoSuchMethodException e) { + fail("There should be a setter method " + + setterVoidName + + ", which should return void"); + } + } catch (NoSuchMethodException e) { + + /* + * If there is no corresponding getter method for + * this property, then we do not regard this + * property as a valid property, which is + * consistent to javabeans' design patterns. So we + * do nothing. + */ + } + } + } + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } + } + + private boolean isIgnoreMethods(String methodName) { + for (int i = 0; i < ignoreMethods.length; i++) { + if (ignoreMethods[i].equals(methodName)) { + return true; + } + } + return false; + } +} diff --git a/test/com/sleepycat/je/CursorEdgeTest.java b/test/com/sleepycat/je/CursorEdgeTest.java new file mode 100644 index 0000000..89a1f57 --- /dev/null +++ b/test/com/sleepycat/je/CursorEdgeTest.java @@ -0,0 +1,729 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Test edge case in cursor traversals. In particular, look at duplicates and + * sets of keys interspersed with deletions. + */ +public class CursorEdgeTest extends DualTestCase { + + private static final boolean DEBUG = false; + private Environment env; + private final File envHome; + private boolean operationStarted; + + public CursorEdgeTest() { + envHome = SharedTestUtils.getTestDir(); + } + + public void initEnv() + throws DatabaseException { + + /* + * Create an environment w/transactions and a max node size of 6. + * Be sure to disable the compressor, we want some holes in the + * tree. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_INCOMPRESSOR. + getName(), + "false"); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + } + + /** + * Insert a number of duplicates, making sure that the duplicate tree + * has multiple bins. Make sure that we can skip over the duplicates and + * find the right value. + */ + @Test + public void testSearchOnDuplicatesWithDeletions() + throws Throwable { + + initEnv(); + + Database myDb = null; + Cursor cursor = null; + try { + /* Set up a db */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + myDb = env.openDatabase(null, "foo", dbConfig); + + /* + * Insert k1/d1, then a duplicate range of k2/d1 -> k2/d15, then + * k3/d1. Now delete the beginning part of the duplicate range, + * trying to get more than a whole bin's worth (k2/d1 -> + * k2/d7). Because the compressor is not enabled, there will be a + * hole in the k2 range. While we're at it, delete k2/d10 - k2/d13 + * too, make sure we can traverse a hole in the middle of the + * duplicate range. + */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + myDb.put(null, key, data); // k1/d1 + key.setData(TestUtils.getTestArray(3)); + myDb.put(null, key, data); // k3/d1 + + /* insert k2 range */ + key.setData(TestUtils.getTestArray(2)); + for (int i = 1; i <= 15; i++) { + data.setData(TestUtils.getTestArray(i)); + myDb.put(null, key, data); + } + + /* Now delete k2/d1 -> k2/d7 */ + Transaction txn = + env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor = myDb.openCursor(txn, CursorConfig.DEFAULT); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, LockMode.DEFAULT)); + for (int i = 0; i < 7; i ++) { + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + } + + /* Also delete k2/d10 - k2/d13 */ + data.setData(TestUtils.getTestArray(10)); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(key, data, LockMode.DEFAULT)); + for (int i = 0; i < 3; i ++) { + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + } + + /* Double check what's in the tree */ + if (DEBUG) { + Cursor checkCursor = myDb.openCursor(txn, + CursorConfig.DEFAULT); + while (checkCursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + System.out.println("key=" + + TestUtils.getTestVal(key.getData()) + + " data=" + + TestUtils.getTestVal(data.getData())); + } + checkCursor.close(); + } + cursor.close(); + cursor = null; + txn.commit(); + + if (DualTestCase.isReplicatedTest(getClass())) { + return; + } + + /* + * Now make sure we can find k2/d8 + */ + Cursor readCursor = myDb.openCursor(null, CursorConfig.DEFAULT); + key.setData(TestUtils.getTestArray(2)); + + /* Use key search */ + assertEquals(OperationStatus.SUCCESS, + readCursor.getSearchKey(key, data, LockMode.DEFAULT)); + assertEquals(2, TestUtils.getTestVal(key.getData())); + assertEquals(8, TestUtils.getTestVal(data.getData())); + + /* Use range search */ + assertEquals(OperationStatus.SUCCESS, + readCursor.getSearchKeyRange(key, data, + LockMode.DEFAULT)); + assertEquals(2, TestUtils.getTestVal(key.getData())); + assertEquals(8, TestUtils.getTestVal(data.getData())); + + /* Use search both */ + data.setData(TestUtils.getTestArray(8)); + assertEquals(OperationStatus.SUCCESS, + readCursor.getSearchBoth(key, data, + LockMode.DEFAULT)); + assertEquals(2, TestUtils.getTestVal(key.getData())); + assertEquals(8, TestUtils.getTestVal(data.getData())); + + /* Use search both range, starting data at 8 */ + data.setData(TestUtils.getTestArray(8)); + assertEquals(OperationStatus.SUCCESS, + readCursor.getSearchBothRange(key, data, + LockMode.DEFAULT)); + assertEquals(2, TestUtils.getTestVal(key.getData())); + assertEquals(8, TestUtils.getTestVal(data.getData())); + + /* Use search both range, starting at 1 */ + data.setData(TestUtils.getTestArray(1)); + assertEquals(OperationStatus.SUCCESS, + readCursor.getSearchBothRange(key, data, + LockMode.DEFAULT)); + assertEquals(2, TestUtils.getTestVal(key.getData())); + assertEquals(8, TestUtils.getTestVal(data.getData())); + + /* + * Make sure we can find k2/d13 with a range search. + */ + + /* + * Insert a set of duplicates, k5/d0 -> k5/d9, then delete all of + * them (but don't compress). Make sure no form of search every + * finds them. + */ + key.setData(TestUtils.getTestArray(5)); + for (int i = 0; i < 10; i++) { + data.setData(TestUtils.getTestArray(i)); + myDb.put(null, key, data); + } + myDb.delete(null, key); // delete all k5's + + /* All searches on key 5 should fail */ + assertFalse(readCursor.getSearchKey(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS); + assertFalse(readCursor.getSearchKeyRange(key, data, + LockMode.DEFAULT) == + OperationStatus.SUCCESS); + data.setData(TestUtils.getTestArray(0)); + assertFalse(readCursor.getSearchBoth(key, data, + LockMode.DEFAULT) == + OperationStatus.SUCCESS); + assertFalse(readCursor.getSearchBothRange(key, data, + LockMode.DEFAULT) == + OperationStatus.SUCCESS); + + /* All ranges on key 4 should also fail. */ + key.setData(TestUtils.getTestArray(4)); + assertFalse(readCursor.getSearchKeyRange(key, data, + LockMode.DEFAULT) == + OperationStatus.SUCCESS); + assertFalse(readCursor.getSearchBothRange(key, data, + LockMode.DEFAULT) == + OperationStatus.SUCCESS); + + readCursor.close(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (cursor != null) { + cursor.close(); + } + myDb.close(); + close(env); + } + } + + /** + * Test the case where we allow duplicates in the database, but don't + * actually insert a duplicate. So we have a key/value pair and do a + * getSearchBothRange using key and data-1 (i.e. we land on the key, but + * just before the data in the dup set (which isn't a dup set since there's + * only one). getSearchBothRange should land on the key/value pair in this + * case. See SR #9248. + */ + @Test + public void testSearchBothWithOneDuplicate() + throws Throwable { + + initEnv(); + + Database myDb = null; + Cursor cursor = null; + try { + if (DualTestCase.isReplicatedTest(getClass())) { + return; + } + + /* Set up a db */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + myDb = env.openDatabase(null, "foo", dbConfig); + + /* Put one record */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + myDb.put(null, key, data); + + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(0)); + cursor = myDb.openCursor(null, CursorConfig.DEFAULT); + OperationStatus status = + cursor.getSearchBothRange(key, data, LockMode.DEFAULT); + assertSame(status, OperationStatus.SUCCESS); + assertEquals(1, TestUtils.getTestVal(key.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + } finally { + if (cursor != null) { + cursor.close(); + } + + if (myDb != null) { + myDb.close(); + } + close(env); + } + } + + /** + * Tests a bug fix to CursorImpl.fetchCurrent [#11195]. + * + * T1 inserts K1-D1 and holds WRITE on K1-D1 (no dup tree yet) + * T2 calls getFirst and waits for READ on K1-D1 + * T1 inserts K1-D2 which creates the dup tree + * T1 commits, allowing T2 to proceed + * + * T2 is in the middle of CursorImpl.fetchCurrent, and assumes incorrectly + * that it has a lock on an LN in BIN; actually the LN was replaced by a + * DIN and a ClassCastException occurs. + */ + @Test + public void testGetCurrentDuringDupTreeCreation() + throws Throwable { + + initEnv(); + + /* Set up a db */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + final Database myDb = env.openDatabase(null, "foo", dbConfig); + + /* T1 inserts K1-D1. */ + Transaction t1 = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + myDb.put(t1, key, data); + + /* T2 calls getFirst. */ + JUnitThread thread = new JUnitThread("getFirst") { + @Override + public void testBody() + throws DatabaseException { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction t2 = env.beginTransaction(null, null); + operationStarted = true; + Cursor cursor = myDb.openCursor(t2, null); + OperationStatus status = cursor.getFirst(key, data, null); + assertEquals(1, TestUtils.getTestVal(key.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + assertEquals(OperationStatus.SUCCESS, status); + cursor.close(); + t2.commit(Durability.COMMIT_NO_SYNC); + } + }; + thread.start(); + while (!operationStarted) { + Thread.yield(); + } + Thread.sleep(10); + + /* T1 inserts K1-D2. */ + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(2)); + myDb.put(t1, key, data); + t1.commit(Durability.COMMIT_NO_SYNC); + + try { + thread.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + myDb.close(); + close(env); + } + + /** + * Tests a bug fix to CursorImpl.fetchCurrent [#11700] that caused + * ArrayIndexOutOfBoundsException. + */ + @Test + public void testGetPrevNoDupWithEmptyTree() + throws Throwable { + + initEnv(); + + OperationStatus status; + + /* + * Set up a db + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(null, "foo", dbConfig); + + /* + * Insert two sets of duplicates. + */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + myDb.put(null, key, data); + data.setData(TestUtils.getTestArray(2)); + myDb.put(null, key, data); + + key.setData(TestUtils.getTestArray(2)); + data.setData(TestUtils.getTestArray(1)); + myDb.put(null, key, data); + data.setData(TestUtils.getTestArray(2)); + myDb.put(null, key, data); + + /* + * Delete all duplicates with a cursor. + */ + Cursor cursor = myDb.openCursor(null, null); + while ((status = cursor.getNext(key, data, null)) == + OperationStatus.SUCCESS) { + cursor.delete(); + } + + /* + * Compress to empty the two DBINs. The BIN will not be deleted + * because a cursor is attached to it. This causes a cursor to be + * positioned on an empty DBIN, which brings out the bug. + */ + env.compress(); + + /* + * Before the bug fix, getPrevNoDup caused + * ArrayIndexOutOfBoundsException. + */ + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + cursor.close(); + myDb.close(); + close(env); + env = null; + } + + /* + * Check that non transactional cursors can't do update operations against + * a transactional database. + */ + @Test + public void testNonTxnalCursorNoUpdates() + throws Throwable { + + initEnv(); + + Database myDb = null; + SecondaryDatabase mySecDb = null; + Cursor cursor = null; + SecondaryCursor secCursor = null; + try { + /* Set up a db with a secondary, insert something. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + myDb = env.openDatabase(null, "foo", dbConfig); + + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setKeyCreator(new KeyCreator()); + mySecDb = env.openSecondaryDatabase(null, "fooSecDb", myDb, + secConfig); + + /* Insert something. */ + DatabaseEntry key = new DatabaseEntry(new byte[1]); + assertEquals(myDb.put(null, key, key), OperationStatus.SUCCESS); + + if (DualTestCase.isReplicatedTest(getClass())) { + return; + } + + /* Open a non-txnal cursor on the primary database. */ + cursor = myDb.openCursor(null, null); + DatabaseEntry data = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + + /* All updates should be prohibited. */ + updatesShouldBeProhibited(cursor); + + /* Open a secondary non-txnal cursor. */ + secCursor = mySecDb.openSecondaryCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNext(key, data, LockMode.DEFAULT)); + + /* All updates should be prohibited. */ + updatesShouldBeProhibited(secCursor); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (secCursor != null) { + secCursor.close(); + } + + if (cursor != null) { + cursor.close(); + } + + if (mySecDb != null) { + mySecDb.close(); + } + + myDb.close(); + close(env); + } + } + + /* Updates should not be possible with this cursor. */ + private void updatesShouldBeProhibited(Cursor c) { + try { + c.delete(); + fail("Should not be able to do a delete"); + } catch (UnsupportedOperationException e) { + checkForCursorUpdateException(false, e); + } + + DatabaseEntry key = new DatabaseEntry(new byte[0]); + DatabaseEntry data = new DatabaseEntry(new byte[0]); + + try { + c.put(key, data); + fail("Should not be able to do a put"); + } catch (UnsupportedOperationException e) { + checkForCursorUpdateException(c instanceof SecondaryCursor, e); + } + + try { + c.putCurrent(data); + fail("Should not be able to do a putCurrent"); + } catch (UnsupportedOperationException e) { + checkForCursorUpdateException(c instanceof SecondaryCursor, e); + } + + try { + c.putNoDupData(key, data); + fail("Should not be able to do a putNoDupData"); + } catch (UnsupportedOperationException e) { + checkForCursorUpdateException(c instanceof SecondaryCursor, e); + } + + try { + c.putNoOverwrite(key, data); + fail("Should not be able to do a putNoOverwrite"); + } catch (UnsupportedOperationException e) { + checkForCursorUpdateException(c instanceof SecondaryCursor, e); + } + } + + private void checkForCursorUpdateException(boolean isSecUpdateError, + RuntimeException e) { + + /* + * Check that it's a transaction or secondary problem. Crude, but since + * we don't want to add exception types, necessary. + */ + String msg = e.getMessage(); + if (isSecUpdateError) { + assertTrue( + msg, msg.startsWith("Operation not allowed on a secondary")); + } else { + assertTrue(msg, msg.contains("a Transaction was not supplied")); + } + } + + private static class KeyCreator implements SecondaryKeyCreator { + public boolean createSecondaryKey(SecondaryDatabase secondaryDb, + DatabaseEntry keyEntry, + DatabaseEntry dataEntry, + DatabaseEntry resultEntry) { + resultEntry.setData(dataEntry.getData()); + return true; + } + } + + /** + * Tests that when a LockNotAvailableException is thrown as the result of a + * cursor operation, all latches are released properly. There are two + * cases corresponding to the two methods in CursorImpl -- + * lockLNDeletedAllowed and lockDupCountLN, which lock leaf LNs and dup + * count LNs, respectively -- that handle locking and latching. These + * methods optimize by not releasing latches while obtaining a non-blocking + * lock. Prior to the fix for [#15142], these methods did not release + * latches when LockNotAvailableException, which can occur when a + * transaction is configured for "no wait". + */ + @Test + public void testNoWaitLatchRelease() + throws Throwable { + + initEnv(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Open the database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + /* Insert record 1. */ + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + db.put(null, key, data); + + /* Open cursor1 with txn1 and lock record 1. */ + Transaction txn1 = env.beginTransaction(null, null); + Cursor cursor1 = db.openCursor(txn1, null); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + OperationStatus status = cursor1.getSearchBoth(key, data, null); + assertSame(status, OperationStatus.SUCCESS); + assertEquals(1, TestUtils.getTestVal(key.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + + /* Open cursor2 with no-wait txn2 and try to delete record 1. */ + TransactionConfig noWaitConfig = new TransactionConfig(); + noWaitConfig.setNoWait(true); + Transaction txn2 = env.beginTransaction(null, noWaitConfig); + Cursor cursor2 = db.openCursor(txn2, null); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + status = cursor2.getSearchBoth(key, data, null); + assertSame(status, OperationStatus.SUCCESS); + assertEquals(1, TestUtils.getTestVal(key.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + try { + cursor2.delete(); + fail("Expected LockNotAvailableException"); + } catch (LockNotAvailableException expected) { + } + + /* + * Before the [#15142] bug fix, this could have failed. However, that + * failure was not reproducible because all callers of + * lockLNDeletedAllowed redundantly release the BIN latches. So this is + * just an extra check to ensure such a bug is never introduced. + */ + assertEquals(0, LatchSupport.nBtreeLatchesHeld()); + + /* Close cursors and txns to release locks. */ + cursor1.close(); + cursor2.close(); + txn1.commit(); + txn2.commit(); + + /* + * Since DupCountLNs are no longer used, the second portion of the test + * was deleted. + */ + + /* Close all. */ + db.close(); + close(env); + } + + /** + * Checks that an uncommitted deleted record will cause other txns that + * access it to block. This reproduces a bug [#22892] where other txns + * accessing the record would return NOTFOUND rather than blocking. + */ + @Test + public void testReadDeletedUncommitted() + throws Throwable { + + initEnv(); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + final Database db = env.openDatabase(null, "foo", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Insert record 1. */ + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + db.put(null, key, data); + + /* Use txn1 to delete record 1. Leave txn1 open. */ + final Transaction txn1 = env.beginTransaction(null, null); + key.setData(TestUtils.getTestArray(1)); + OperationStatus status = db.delete(txn1, key); + assertSame(status, OperationStatus.SUCCESS); + + /* Try to read record 1 with no-wait txn2. Should fail. */ + final TransactionConfig txn2Config = new TransactionConfig(); + txn2Config.setNoWait(true); + final Transaction txn2 = env.beginTransaction(null, txn2Config); + try { + key.setData(TestUtils.getTestArray(1)); + status = db.get(txn2, key, data, null); + fail(status.toString()); + } catch (LockNotAvailableException expected) { + } + final Cursor cursor2 = db.openCursor(txn2, null); + try { + key.setData(TestUtils.getTestArray(1)); + status = cursor2.getSearchKey(key, data, null); + fail(status.toString()); + } catch (LockNotAvailableException expected) { + } + + /* Commit txn1. Then expect NOTFOUND when reading. */ + txn1.commit(); + key.setData(TestUtils.getTestArray(1)); + status = db.get(txn2, key, data, null); + assertSame(status, OperationStatus.NOTFOUND); + key.setData(TestUtils.getTestArray(1)); + status = cursor2.getSearchKey(key, data, null); + assertSame(status, OperationStatus.NOTFOUND); + + cursor2.close(); + txn2.commit(); + db.close(); + close(env); + } +} diff --git a/test/com/sleepycat/je/CursorTest.java b/test/com/sleepycat/je/CursorTest.java new file mode 100644 index 0000000..ce58b8d --- /dev/null +++ b/test/com/sleepycat/je/CursorTest.java @@ -0,0 +1,1481 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DbInternal.Search; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.utilint.StringUtils; + +import org.junit.Test; + +public class CursorTest extends DualTestCase { + private static final boolean DEBUG = false; + private static final int NUM_RECS = 257; + + /* + * Use a ridiculous value because we've seen extreme slowness on ocicat + * where dbperf is often running. + */ + private static final long LOCK_TIMEOUT = 50000000L; + + private static final String DUPKEY = "DUPKEY"; + + private Environment env; + private Database db; + private PhantomTestConfiguration config; + + private File envHome; + + private volatile int sequence; + + public CursorTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testGetConfig() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + env = create(envHome, envConfig); + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(txn, "testDB", dbConfig); + txn.commit(); + Cursor cursor = null; + Transaction txn1 = + env.beginTransaction(null, TransactionConfig.DEFAULT); + try { + cursor = db.openCursor(txn1, CursorConfig.DEFAULT); + CursorConfig config = cursor.getConfig(); + if (config == CursorConfig.DEFAULT) { + fail("didn't clone"); + } + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } finally { + if (cursor != null) { + cursor.close(); + } + txn1.abort(); + db.close(); + close(env); + env = null; + } + } + + /** + * Put some data in a database, take it out. Yank the file size down so we + * have many files. + */ + @Test + public void testBasic() + throws Throwable { + + try { + insertMultiDb(1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testMulti() + throws Throwable { + + try { + insertMultiDb(4); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Specifies a test configuration. This is just a struct for holding + * parameters to be passed down to threads in inner classes. + */ + class PhantomTestConfiguration { + String testName; + String thread1EntryToLock; + String thread1OpArg; + String thread2Start; + String expectedResult; + boolean doInsert; + boolean doGetNext; + boolean doCommit; + + PhantomTestConfiguration(String testName, + String thread1EntryToLock, + String thread1OpArg, + String thread2Start, + String expectedResult, + boolean doInsert, + boolean doGetNext, + boolean doCommit) { + this.testName = testName; + this.thread1EntryToLock = thread1EntryToLock; + this.thread1OpArg = thread1OpArg; + this.thread2Start = thread2Start; + this.expectedResult = expectedResult; + this.doInsert = doInsert; + this.doGetNext = doGetNext; + this.doCommit = doCommit; + } + } + + /** + * This series of tests sets up a simple 2 BIN tree with a specific set of + * elements (see setupDatabaseAndEnv()). It creates two threads. + * + * Thread 1 positions a cursor on an element on the edge of a BIN (either + * the last element on the left BIN or the first element on the right BIN). + * This locks that element. It throws control to thread 2. + * + * Thread 2 positions a cursor on the adjacent element on the other BIN + * (either the first element on the right BIN or the last element on the + * left BIN, resp.) It throws control to thread 1. After it signals + * thread 1 to continue, thread 2 does either a getNext or getPrev. This + * should block because thread 1 has the next/prev element locked. + * + * Thread 1 then waits a short time (250ms) so that thread 2 can execute + * the getNext/getPrev. Thread 1 then inserts or deletes the "phantom + * element" right in between the cursors that were set up in the previous + * two steps, sleeps a second, and either commits or aborts. + * + * Thread 2 will then return from the getNext/getPrev. The returned key + * from the getNext/getPrev is then verified. + * + * The Serializable isolation level is not used for either thread so as to + * allow phantoms; otherwise, this test would deadlock. + * + * These parameters are all configured through a PhantomTestConfiguration + * instance passed to phantomWorker which has the template for the steps + * described above. + */ + + /** + * Phantom test inserting and committing a phantom while doing a getNext. + */ + @Test + public void testPhantomInsertGetNextCommit() + throws Throwable { + + try { + phantomWorker + (new PhantomTestConfiguration + ("testPhantomInsertGetNextCommit", + "F", "D", "C", "D", + true, true, true)); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + /** + * Phantom test inserting and aborting a phantom while doing a getNext. + */ + @Test + public void testPhantomInsertGetNextAbort() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomInsertGetNextAbort", + "F", "D", "C", "F", + true, true, false)); + } + + /** + * Phantom test inserting and committing a phantom while doing a getPrev. + */ + @Test + public void testPhantomInsertGetPrevCommit() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomInsertGetPrevCommit", + "C", "F", "G", "F", + true, false, true)); + } + + /** + * Phantom test inserting and aborting a phantom while doing a getPrev. + */ + @Test + public void testPhantomInsertGetPrevAbort() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomInsertGetPrevAbort", + "C", "F", "G", "C", + true, false, false)); + } + + /** + * Phantom test deleting and committing an edge element while doing a + * getNext. + */ + @Test + public void testPhantomDeleteGetNextCommit() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomDeleteGetNextCommit", + "F", "F", "C", "G", + false, true, true)); + } + + /** + * Phantom test deleting and aborting an edge element while doing a + * getNext. + */ + @Test + public void testPhantomDeleteGetNextAbort() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomDeleteGetNextAbort", + "F", "F", "C", "F", + false, true, false)); + } + + /** + * Phantom test deleting and committing an edge element while doing a + * getPrev. + */ + @Test + public void testPhantomDeleteGetPrevCommit() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomDeleteGetPrevCommit", + "F", "F", "G", "C", + false, false, true)); + } + + /** + * Phantom test deleting and aborting an edge element while doing a + * getPrev. + */ + @Test + public void testPhantomDeleteGetPrevAbort() + throws Throwable { + + phantomWorker + (new PhantomTestConfiguration + ("testPhantomDeleteGetPrevAbort", + "F", "F", "G", "F", + false, false, false)); + } + + /** + * Phantom Dup test inserting and committing a phantom while doing a + * getNext. + */ + @Test + public void testPhantomDupInsertGetNextCommit() + throws Throwable { + + try { + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupInsertGetNextCommit", + "F", "D", "C", "D", + true, true, true)); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + /** + * Phantom Dup test inserting and aborting a phantom while doing a getNext. + */ + @Test + public void testPhantomDupInsertGetNextAbort() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupInsertGetNextAbort", + "F", "D", "C", "F", + true, true, false)); + } + + /** + * Phantom Dup test inserting and committing a phantom while doing a + * getPrev. + */ + @Test + public void testPhantomDupInsertGetPrevCommit() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupInsertGetPrevCommit", + "C", "F", "G", "F", + true, false, true)); + } + + /** + * Phantom Dup test inserting and aborting a phantom while doing a getPrev. + */ + @Test + public void testPhantomDupInsertGetPrevAbort() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupInsertGetPrevAbort", + "C", "F", "G", "C", + true, false, false)); + } + + /** + * Phantom Dup test deleting and committing an edge element while doing a + * getNext. + */ + @Test + public void testPhantomDupDeleteGetNextCommit() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupDeleteGetNextCommit", + "F", "F", "C", "G", + false, true, true)); + } + + /** + * Phantom Dup test deleting and aborting an edge element while doing a + * getNext. + */ + @Test + public void testPhantomDupDeleteGetNextAbort() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupDeleteGetNextAbort", + "F", "F", "C", "F", + false, true, false)); + } + + /** + * Phantom Dup test deleting and committing an edge element while doing a + * getPrev. + */ + @Test + public void testPhantomDupDeleteGetPrevCommit() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupDeleteGetPrevCommit", + "F", "F", "G", "C", + false, false, true)); + } + + /** + * Phantom Dup test deleting and aborting an edge element while doing a + * getPrev. + */ + @Test + public void testPhantomDupDeleteGetPrevAbort() + throws Throwable { + + phantomDupWorker + (new PhantomTestConfiguration + ("testPhantomDupDeleteGetPrevAbort", + "F", "F", "G", "F", + false, false, false)); + } + + private void phantomWorker(PhantomTestConfiguration c) + throws Throwable { + + try { + this.config = c; + setupDatabaseAndEnv(false); + + if (config.doInsert && + !config.doGetNext) { + + Transaction txnDel = + env.beginTransaction(null, TransactionConfig.DEFAULT); + + /* + * Delete the first entry in the second bin so that we can + * reinsert it in tester1 and have it be the first entry in + * that bin. If we left F and then tried to insert something + * to the left of F, it would end up in the first bin. + */ + assertEquals + (OperationStatus.SUCCESS, + db.delete(txnDel, + new DatabaseEntry(StringUtils.toUTF8("F")))); + txnDel.commit(); + } + + JUnitThread tester1 = + new JUnitThread(config.testName + "1") { + public void testBody() + throws Throwable { + + Cursor cursor = null; + try { + Transaction txn1 = + env.beginTransaction(null, null); + cursor = db.openCursor(txn1, CursorConfig.DEFAULT); + OperationStatus status = + cursor.getSearchKey + (new DatabaseEntry(StringUtils.toUTF8 + (config.thread1EntryToLock)), + new DatabaseEntry(), + LockMode.RMW); + assertEquals(OperationStatus.SUCCESS, status); + sequence++; // 0 -> 1 + + /* Wait for tester2 to position cursor. */ + while (sequence < 2) { + Thread.yield(); + } + + if (config.doInsert) { + status = db.put + (txn1, + new DatabaseEntry + (StringUtils.toUTF8(config.thread1OpArg)), + new DatabaseEntry(new byte[10])); + } else { + status = db.delete + (txn1, + new DatabaseEntry + (StringUtils.toUTF8(config.thread1OpArg))); + } + assertEquals(OperationStatus.SUCCESS, status); + sequence++; // 2 -> 3 + + /* + * Since we can't increment sequence when tester2 + * blocks on the getNext call, all we can do is + * bump sequence right before the getNext, and then + * wait a little in this thread for tester2 to + * block. + */ + try { + Thread.sleep(1000); + } catch (InterruptedException IE) { + } + + cursor.close(); + cursor = null; + if (config.doCommit) { + txn1.commit(); + } else { + txn1.abort(); + } + } catch (DatabaseException DBE) { + if (cursor != null) { + cursor.close(); + } + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread(config.testName + "2") { + public void testBody() + throws Throwable { + + Cursor cursor = null; + try { + Transaction txn2 = + env.beginTransaction(null, null); + txn2.setLockTimeout(LOCK_TIMEOUT); + cursor = db.openCursor(txn2, CursorConfig.DEFAULT); + + /* Wait for tester1 to position cursor. */ + while (sequence < 1) { + Thread.yield(); + } + + OperationStatus status = + cursor.getSearchKey + (new DatabaseEntry + (StringUtils.toUTF8(config.thread2Start)), + new DatabaseEntry(), + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + + sequence++; // 1 -> 2 + + /* Wait for tester1 to insert/delete. */ + while (sequence < 3) { + Thread.yield(); + } + + DatabaseEntry nextKey = new DatabaseEntry(); + try { + + /* + * This will block until tester1 above commits. + */ + if (config.doGetNext) { + status = + cursor.getNext(nextKey, + new DatabaseEntry(), + LockMode.DEFAULT); + } else { + status = + cursor.getPrev(nextKey, + new DatabaseEntry(), + LockMode.DEFAULT); + } + } catch (DatabaseException DBE) { + System.out.println("t2 caught " + DBE); + } + assertEquals(3, sequence); + assertEquals(config.expectedResult, + StringUtils.fromUTF8 + (nextKey.getData())); + cursor.close(); + cursor = null; + txn2.commit(); + } catch (DatabaseException DBE) { + if (cursor != null) { + cursor.close(); + } + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + + tester1.finishTest(); + tester2.finishTest(); + } finally { + db.close(); + close(env); + env = null; + } + } + + private void phantomDupWorker(PhantomTestConfiguration c) + throws Throwable { + + Cursor cursor = null; + try { + this.config = c; + setupDatabaseAndEnv(true); + + if (config.doInsert && + !config.doGetNext) { + + Transaction txnDel = + env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor = db.openCursor(txnDel, CursorConfig.DEFAULT); + + /* + * Delete the first entry in the second bin so that we can + * reinsert it in tester1 and have it be the first entry in + * that bin. If we left F and then tried to insert something + * to the left of F, it would end up in the first bin. + */ + assertEquals(OperationStatus.SUCCESS, cursor.getSearchBoth + (new DatabaseEntry(StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry(StringUtils.toUTF8("F")), + LockMode.DEFAULT)); + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + cursor.close(); + cursor = null; + txnDel.commit(); + } + + JUnitThread tester1 = + new JUnitThread(config.testName + "1") { + public void testBody() + throws Throwable { + + Cursor cursor = null; + Cursor c = null; + try { + Transaction txn1 = + env.beginTransaction(null, null); + cursor = db.openCursor(txn1, CursorConfig.DEFAULT); + OperationStatus status = + cursor.getSearchBoth + (new DatabaseEntry(StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry(StringUtils.toUTF8 + (config.thread1EntryToLock)), + LockMode.RMW); + assertEquals(OperationStatus.SUCCESS, status); + cursor.close(); + cursor = null; + sequence++; // 0 -> 1 + + /* Wait for tester2 to position cursor. */ + while (sequence < 2) { + Thread.yield(); + } + + if (config.doInsert) { + status = db.put + (txn1, + new DatabaseEntry + (StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry + (StringUtils.toUTF8 + (config.thread1OpArg))); + } else { + c = db.openCursor(txn1, CursorConfig.DEFAULT); + assertEquals(OperationStatus.SUCCESS, + c.getSearchBoth + (new DatabaseEntry + (StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry + (StringUtils.toUTF8 + (config.thread1OpArg)), + LockMode.DEFAULT)); + assertEquals(OperationStatus.SUCCESS, + c.delete()); + c.close(); + c = null; + } + assertEquals(OperationStatus.SUCCESS, status); + sequence++; // 2 -> 3 + + /* + * Since we can't increment sequence when tester2 + * blocks on the getNext call, all we can do is + * bump sequence right before the getNext, and then + * wait a little in this thread for tester2 to + * block. + */ + try { + Thread.sleep(1000); + } catch (InterruptedException IE) { + } + + if (config.doCommit) { + txn1.commit(); + } else { + txn1.abort(); + } + } catch (DatabaseException DBE) { + if (cursor != null) { + cursor.close(); + } + if (c != null) { + c.close(); + } + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testPhantomInsert2") { + public void testBody() + throws Throwable { + + Cursor cursor = null; + try { + Transaction txn2 = + env.beginTransaction(null, null); + txn2.setLockTimeout(LOCK_TIMEOUT); + cursor = db.openCursor(txn2, CursorConfig.DEFAULT); + + /* Wait for tester1 to position cursor. */ + while (sequence < 1) { + Thread.yield(); + } + + OperationStatus status = + cursor.getSearchBoth + (new DatabaseEntry(StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry + (StringUtils.toUTF8(config.thread2Start)), + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + + sequence++; // 1 -> 2 + + /* Wait for tester1 to insert/delete. */ + while (sequence < 3) { + Thread.yield(); + } + + DatabaseEntry nextKey = new DatabaseEntry(); + DatabaseEntry nextData = new DatabaseEntry(); + try { + + /* + * This will block until tester1 above commits. + */ + if (config.doGetNext) { + status = + cursor.getNextDup(nextKey, nextData, + LockMode.DEFAULT); + } else { + status = + cursor.getPrevDup(nextKey, nextData, + LockMode.DEFAULT); + } + } catch (DatabaseException DBE) { + System.out.println("t2 caught " + DBE); + } + assertEquals(3, sequence); + byte[] data = nextData.getData(); + assertEquals(config.expectedResult, + StringUtils.fromUTF8(data)); + cursor.close(); + cursor = null; + txn2.commit(); + } catch (DatabaseException DBE) { + if (cursor != null) { + cursor.close(); + } + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + + tester1.finishTest(); + tester2.finishTest(); + } finally { + if (cursor != null) { + cursor.close(); + } + db.close(); + close(env); + env = null; + } + } + + /** + * Sets up a small database with a tree containing 2 bins, one with A, B, + * and C, and the other with F, G, H, and I. + */ + private void setupDatabaseAndEnv(boolean writeAsDuplicateData) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + /* RepeatableRead isolation is required by this test. */ + TestUtils.clearIsolationLevel(envConfig); + + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX_DUPTREE.getName(), + "6"); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + "1024"); + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "true"); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + env = create(envHome, envConfig); + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(txn, "testDB", dbConfig); + + if (writeAsDuplicateData) { + writeDuplicateData(db, txn); + } else { + writeData(db, txn); + } + + txn.commit(); + } + + String[] dataStrings = { + "A", "B", "C", "F", "G", "H", "I" + }; + + private void writeData(Database db, Transaction txn) + throws DatabaseException { + + for (int i = 0; i < dataStrings.length; i++) { + db.put(txn, new DatabaseEntry(StringUtils.toUTF8(dataStrings[i])), + new DatabaseEntry(new byte[10])); + } + } + + private void writeDuplicateData(Database db, Transaction txn) + throws DatabaseException { + + for (int i = 0; i < dataStrings.length; i++) { + db.put(txn, new DatabaseEntry(StringUtils.toUTF8(DUPKEY)), + new DatabaseEntry(StringUtils.toUTF8(dataStrings[i]))); + } + } + + /** + * Insert data over many databases. + */ + private void insertMultiDb(int numDbs) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + /* RepeatableRead isolation is required by this test. */ + TestUtils.clearIsolationLevel(envConfig); + + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "1024"); + envConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "true"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "6"); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setAllowCreate(true); + Environment env = create(envHome, envConfig); + + Database[] myDb = new Database[numDbs]; + Cursor[] cursor = new Cursor[numDbs]; + Transaction txn = + env.beginTransaction(null, TransactionConfig.DEFAULT); + + /* In a non-replicated environment, the txn id should be positive. */ + assertTrue(txn.getId() > 0); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + for (int i = 0; i < numDbs; i++) { + myDb[i] = env.openDatabase(txn, "testDB" + i, dbConfig); + cursor[i] = myDb[i].openCursor(txn, CursorConfig.DEFAULT); + + /* + * In a non-replicated environment, the db id should be + * positive. + */ + DatabaseImpl dbImpl = DbInternal.getDbImpl(myDb[i]); + assertTrue(dbImpl.getId().getId() > 0); + } + + /* Insert data in a round robin fashion to spread over log. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = NUM_RECS; i > 0; i--) { + for (int c = 0; c < numDbs; c++) { + key.setData(TestUtils.getTestArray(i + c)); + data.setData(TestUtils.getTestArray(i + c)); + if (DEBUG) { + System.out.println("i = " + i + + TestUtils.dumpByteArray(key.getData())); + } + cursor[c].put(key, data); + } + } + + for (int i = 0; i < numDbs; i++) { + cursor[i].close(); + myDb[i].close(); + } + txn.commit(); + + assertTrue(env.verify(null, System.err)); + close(env); + env = null; + + envConfig.setAllowCreate(false); + env = create(envHome, envConfig); + + /* + * Before running the verifier, run the cleaner to make sure it has + * completed. Otherwise, the cleaner will be running when we call + * verify, and open txns will be reported. + */ + env.cleanLog(); + + env.verify(null, System.err); + + /* Check each db in turn, using null transactions. */ + dbConfig.setTransactional(false); + dbConfig.setAllowCreate(false); + for (int d = 0; d < numDbs; d++) { + Database checkDb = env.openDatabase(null, "testDB" + d, + dbConfig); + Cursor myCursor = checkDb.openCursor(null, CursorConfig.DEFAULT); + + OperationStatus status = + myCursor.getFirst(key, data, LockMode.DEFAULT); + + int i = 1; + while (status == OperationStatus.SUCCESS) { + byte[] expectedKey = TestUtils.getTestArray(i + d); + byte[] expectedData = TestUtils.getTestArray(i + d); + + if (DEBUG) { + System.out.println("Database " + d + " Key " + i + + " expected = " + + TestUtils.dumpByteArray(expectedKey) + + " seen = " + + TestUtils.dumpByteArray(key.getData())); + } + + assertTrue("Database " + d + " Key " + i + " expected = " + + TestUtils.dumpByteArray(expectedKey) + + " seen = " + + TestUtils.dumpByteArray(key.getData()), + Arrays.equals(expectedKey, key.getData())); + assertTrue("Data " + i, Arrays.equals(expectedData, + data.getData())); + i++; + + status = myCursor.getNext(key, data, LockMode.DEFAULT); + } + myCursor.close(); + assertEquals("Number recs seen", NUM_RECS, i-1); + checkDb.close(); + } + close(env); + env = null; + } + + /** + * This is a rudimentary test of DbInternal.search, just to make sure we're + * passing parameters down correctly the RangeCursor. RangeCursor is tested + * thoroughly elsewhere. + */ + @Test + public void testDbInternalSearch() { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + final Environment env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "testDB", dbConfig); + + insert(db, 1, 1); + insert(db, 3, 3); + insert(db, 5, 5); + + final Cursor cursor = db.openCursor(null, null); + + checkSearch(cursor, Search.GT, 0, 1); + checkSearch(cursor, Search.GTE, 0, 1); + checkSearch(cursor, Search.GT, 1, 3); + checkSearch(cursor, Search.GTE, 1, 1); + checkSearch(cursor, Search.GT, 2, 3); + checkSearch(cursor, Search.GTE, 2, 3); + checkSearch(cursor, Search.GT, 3, 5); + checkSearch(cursor, Search.GTE, 3, 3); + checkSearch(cursor, Search.GT, 4, 5); + checkSearch(cursor, Search.GTE, 4, 5); + checkSearch(cursor, Search.GT, 5, -1); + checkSearch(cursor, Search.GTE, 5, 5); + checkSearch(cursor, Search.GT, 6, -1); + checkSearch(cursor, Search.GTE, 6, -1); + + checkSearch(cursor, Search.LT, 0, -1); + checkSearch(cursor, Search.LTE, 0, -1); + checkSearch(cursor, Search.LT, 1, -1); + checkSearch(cursor, Search.LTE, 1, 1); + checkSearch(cursor, Search.LT, 2, 1); + checkSearch(cursor, Search.LTE, 2, 1); + checkSearch(cursor, Search.LT, 3, 1); + checkSearch(cursor, Search.LTE, 3, 3); + checkSearch(cursor, Search.LT, 4, 3); + checkSearch(cursor, Search.LTE, 4, 3); + checkSearch(cursor, Search.LT, 5, 3); + checkSearch(cursor, Search.LTE, 5, 5); + checkSearch(cursor, Search.LT, 6, 5); + checkSearch(cursor, Search.LTE, 6, 5); + + cursor.close(); + db.close(); + close(env); + } + + private void insert(Database db, int key, int data) { + final DatabaseEntry keyEntry = + new DatabaseEntry(new byte[] { (byte) key }); + final DatabaseEntry dataEntry = + new DatabaseEntry(new byte[] { (byte) data }); + + final OperationStatus status = db.put(null, keyEntry, dataEntry); + assertSame(OperationStatus.SUCCESS, status); + } + + private void checkSearch(Cursor cursor, + Search searchMode, + int searchKey, + int expectKey) { + + final DatabaseEntry key = new DatabaseEntry( + new byte[] { (byte) searchKey }); + + final DatabaseEntry data = new DatabaseEntry(); + + final OperationStatus status = DbInternal.search( + cursor, key, null, data, searchMode, (LockMode) null); + + if (expectKey < 0) { + assertSame(OperationStatus.NOTFOUND, status); + return; + } + + assertSame(OperationStatus.SUCCESS, status); + assertEquals(expectKey, key.getData()[0]); + assertEquals(expectKey, data.getData()[0]); + } + + /** + * This is a rudimentary test of DbInternal.searchBoth, just to make sure + * we're passing parameters down correctly the RangeCursor. RangeCursor is + * tested thoroughly elsewhere. + */ + @Test + public void testDbInternalSearchBoth() { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + final Environment env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "testDB", dbConfig); + + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(true); + secConfig.setTransactional(true); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(data.getData()); + return true; + } + }); + final SecondaryDatabase secDb = env.openSecondaryDatabase( + null, "testDupsDB", db, secConfig); + + insert(db, 0, 1); + insert(db, 1, 2); + insert(db, 3, 2); + insert(db, 5, 2); + + final SecondaryCursor cursor = secDb.openCursor(null, null); + + checkSearchBoth(cursor, Search.GT, 2, 0, 1); + checkSearchBoth(cursor, Search.GTE, 2, 0, 1); + checkSearchBoth(cursor, Search.GT, 2, 1, 3); + checkSearchBoth(cursor, Search.GTE, 2, 1, 1); + checkSearchBoth(cursor, Search.GT, 2, 2, 3); + checkSearchBoth(cursor, Search.GTE, 2, 2, 3); + checkSearchBoth(cursor, Search.GT, 2, 3, 5); + checkSearchBoth(cursor, Search.GTE, 2, 3, 3); + checkSearchBoth(cursor, Search.GT, 2, 4, 5); + checkSearchBoth(cursor, Search.GTE, 2, 4, 5); + checkSearchBoth(cursor, Search.GT, 2, 5, -1); + checkSearchBoth(cursor, Search.GTE, 2, 5, 5); + checkSearchBoth(cursor, Search.GT, 2, 6, -1); + checkSearchBoth(cursor, Search.GTE, 2, 6, -1); + + checkSearchBoth(cursor, Search.LT, 2, 0, -1); + checkSearchBoth(cursor, Search.LTE, 2, 0, -1); + checkSearchBoth(cursor, Search.LT, 2, 1, -1); + checkSearchBoth(cursor, Search.LTE, 2, 1, 1); + checkSearchBoth(cursor, Search.LT, 2, 2, 1); + checkSearchBoth(cursor, Search.LTE, 2, 2, 1); + checkSearchBoth(cursor, Search.LT, 2, 3, 1); + checkSearchBoth(cursor, Search.LTE, 2, 3, 3); + checkSearchBoth(cursor, Search.LT, 2, 4, 3); + checkSearchBoth(cursor, Search.LTE, 2, 4, 3); + checkSearchBoth(cursor, Search.LT, 2, 5, 3); + checkSearchBoth(cursor, Search.LTE, 2, 5, 5); + checkSearchBoth(cursor, Search.LT, 2, 6, 5); + checkSearchBoth(cursor, Search.LTE, 2, 6, 5); + + cursor.close(); + secDb.close(); + db.close(); + close(env); + } + + private void checkSearchBoth(Cursor cursor, + Search searchMode, + int searchKey, + int searchPKey, + int expectPKey) { + + final DatabaseEntry key = new DatabaseEntry( + new byte[] { (byte) searchKey }); + + final DatabaseEntry pKey = new DatabaseEntry( + new byte[] { (byte) searchPKey }); + + final DatabaseEntry data = new DatabaseEntry(); + + final OperationStatus status = DbInternal.searchBoth( + cursor, key, pKey, data, searchMode, (LockMode) null); + + if (expectPKey < 0) { + assertSame(OperationStatus.NOTFOUND, status); + return; + } + + assertSame(OperationStatus.SUCCESS, status); + assertEquals(expectPKey, pKey.getData()[0]); + assertEquals(searchKey, data.getData()[0]); + } + + /** + * Checks that Cursor.getSearchKeyRange (as well as internal range + * searches) works even when insertions occur while doing a getNextBin in + * the window where no latches are held. In particular there is a scenario + * where it did not work, if a split during getNextBin arranges things in + * a particular way. This is a very specific scenario and requires many + * insertions in the window, so it seems unlikely to occur in the wild. + * This test creates that scenario. + */ + @Test + public void testInsertionDuringGetNextBinDuringRangeSearch() { + + /* + * Disable daemons for predictability. Disable BIN deltas so we can + * compress away a deleted slot below (if a delta would be logged next, + * slots won't be compressed). + */ + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.TREE_BIN_DELTA, "0"); + env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "testDB", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry value = new DatabaseEntry(); + OperationStatus status; + + /* + * Create a tree that contains: + * A BIN with keys 0-63. + * Additional BINs with keys 1000-1063. + * + * Keys 1000-1063 are inserted in reverse order to make sure the split + * occurs in the middle of the BIN (rather than a "special" split). + * + * Key 64 is deleted so that key 63 will be the last one in the BIN + * when a split occurs later on, in the hook method below. + */ + for (int i = 0; i < 64; i += 1) { + insertRecord(db, i); + } + for (int i = 1063; i >= 1000; i -= 1) { + insertRecord(db, i); + } + insertRecord(db, 64); + deleteRecord(db, 64); + env.compress(); + + /* + * Set a hook that will be called during the window when no INs are + * latched when getNextBin is called when Cursor.searchInternal + * processes a range search for key 500. searchInternal first calls + * CursorImpl.searchAndPosition which lands on key 63. It then calls + * CursorImpl.getNext, which does the getNextBin and calls the hook. + */ + DbInternal.getDbImpl(db).getTree().setGetParentINHook( + new TestHook() { + @Override + public void doHook() { + /* Only process the first call to the hook. */ + DbInternal.getDbImpl(db).getTree(). + setGetParentINHook(null); + /* + * Cause a split, leaving keys 0-63 in the first BIN and + * keys 64-130 in the second BIN. + */ + for (int i = 64; i < 130; i += 1) { + insertRecord(db, i); + } + } + @Override public void hookSetup() { } + @Override public void doIOHook() { } + @Override public void doHook(Object obj) { } + @Override public Object getHookValue() { return null; } + } + ); + + /* + * Search for a key >= 500, which should find key 1000. But due to the + * bug, CursorImpl.getNext advances to key 64 in the second BIN, and + * returns it. + */ + IntegerBinding.intToEntry(500, key); + final Cursor c = db.openCursor(null, null); + status = c.getSearchKeyRange(key, value, null); + + c.close(); + db.close(); + close(env); + + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1000, IntegerBinding.entryToInt(key)); + } + + private void insertRecord(Database db, int key) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(new byte[1]); + IntegerBinding.intToEntry(key, keyEntry); + final OperationStatus status = + db.putNoOverwrite(null, keyEntry, dataEntry); + assertSame(OperationStatus.SUCCESS, status); + } + + private void deleteRecord(Database db, int key) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + IntegerBinding.intToEntry(key, keyEntry); + final OperationStatus status = db.delete(null, keyEntry); + assertSame(OperationStatus.SUCCESS, status); + } + + @Test + public void testGetStorageSize() { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "testDB", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry value = new DatabaseEntry(); + + Transaction t = env.beginTransaction(null, null); + Cursor c = db.openCursor(t, null); + + value.setData(new byte[100]); + OperationResult r = c.put(key, value, Put.OVERWRITE, null); + assertNotNull(r); + final int separateLN1 = DbInternal.getCursorImpl(c).getStorageSize(); + + r = c.get(key, value, Get.FIRST, null); + assertNotNull(r); + final int separateLN2 = DbInternal.getCursorImpl(c).getStorageSize(); + + value.setData(new byte[10]); + r = c.put(key, value, Put.OVERWRITE, null); + assertNotNull(r); + final int embeddedLN1 = DbInternal.getCursorImpl(c).getStorageSize(); + + r = c.get(key, value, Get.FIRST, null); + assertNotNull(r); + final int embeddedLN2 = DbInternal.getCursorImpl(c).getStorageSize(); + + c.close(); + t.commit(); + db.close(); + + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDBDups", dbConfig); + + t = env.beginTransaction(null, null); + c = db.openCursor(t, null); + + value.setData(new byte[10]); + r = c.put(key, value, Put.OVERWRITE, null); + assertNotNull(r); + final int duplicateLN1 = DbInternal.getCursorImpl(c).getStorageSize(); + + r = c.get(key, value, Get.FIRST, null); + assertNotNull(r); + final int duplicateLN2 = DbInternal.getCursorImpl(c).getStorageSize(); + + c.close(); + t.commit(); + db.close(); + + dbConfig.setSortedDuplicates(false); + db = env.openDatabase(null, "testDBPri", dbConfig); + + final SecondaryConfig sdbConfig = new SecondaryConfig(); + sdbConfig.setAllowCreate(true); + sdbConfig.setTransactional(true); + sdbConfig.setKeyCreator(new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(key.getData()); + return true; + } + }); + final SecondaryDatabase sdb = env.openSecondaryDatabase( + null, "testDBSec", db, sdbConfig); + + t = env.beginTransaction(null, null); + c = db.openCursor(t, null); + SecondaryCursor sc = sdb.openCursor(t, null); + + value.setData(new byte[100]); + r = c.put(key, value, Put.OVERWRITE, null); + assertNotNull(r); + final int priRec1 = DbInternal.getCursorImpl(c).getStorageSize(); + + r = sc.get(key, value, Get.FIRST, null); + assertNotNull(r); + final int secEqPri1 = DbInternal.getCursorImpl(sc).getStorageSize(); + + r = sc.get(key, null, Get.FIRST, null); + assertNotNull(r); + final int secNePri1 = DbInternal.getCursorImpl(sc).getStorageSize(); + + sc.close(); + c.close(); + t.commit(); + sdb.close(); + db.close(); + + db = env.openDatabase(null, "testDBSec", dbConfig); + t = env.beginTransaction(null, null); + c = db.openCursor(t, null); + + r = c.get(key, value, Get.FIRST, null); + assertNotNull(r); + final int secNePri2 = DbInternal.getCursorImpl(c).getStorageSize(); + + /* Check whether embedded LNs are disabled in this test. */ + final boolean embeddedLNsConfigured = + DbInternal.getEnvironmentImpl(env).getMaxEmbeddedLN() >= 10; + + c.close(); + t.commit(); + db.close(); + + close(env); + + /* + * Exact sizes are checked below because these have been manually + * confirmed to be roughly the size expected. If something changes in + * the code above, the exact sizes below may need to be adjusted. + * + * The StorageSize.getStorageSize javadoc explains the large inaccuracy + * for the embedded LN and the smaller inaccuracy for the separate LN. + * The duplicate LN size is accurate, and this accuracy matters because + * the total size is small. + * + * The embedded LN size is 2 bytes larger than the duplicate LN size + * because PRI_SLOT_OVERHEAD - SEC_SLOT_OVERHEAD = 2. + */ + final boolean rep = isReplicatedTest(this.getClass()); + + /* Customer formula: 100 (data size) + 2 * 1 (key size) + 64 = 166 */ + assertEquals(rep ? 144 : 135, separateLN1); + assertEquals(separateLN1, separateLN2); + + if (embeddedLNsConfigured) { + /* Customer formula: 10 (data size) + 2 * 1 (key size) + 64 = 76 */ + assertEquals(31, embeddedLN1); + assertEquals(embeddedLN1, embeddedLN2); + } + + /* Customer formula: 10 (data size) + 1 (key size) + 12 = 23 */ + assertEquals(23, duplicateLN1); + assertEquals(duplicateLN1, duplicateLN2); + + /* Sec cursor returns pri size, but only when reading pri data. */ + assertEquals(priRec1, secEqPri1); + assertTrue(priRec1 > secNePri1); + assertEquals(secNePri1, secNePri2); + + /* + * We do not show the actual storage sizes here because we don't have a + * simple way to serialize a single slot. + */ + } +} diff --git a/test/com/sleepycat/je/DatabaseComparatorsTest.java b/test/com/sleepycat/je/DatabaseComparatorsTest.java new file mode 100644 index 0000000..708f1d7 --- /dev/null +++ b/test/com/sleepycat/je/DatabaseComparatorsTest.java @@ -0,0 +1,663 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Comparator; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class DatabaseComparatorsTest extends DualTestCase { + + private File envHome; + private Environment env; + private boolean DEBUG = false; + + public DatabaseComparatorsTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private void openEnv() + throws DatabaseException { + + openEnv(false); + } + + private void openEnv(boolean transactional) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(transactional); + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "true"); + /* Prevent compression. */ + envConfig.setConfigParam("je.env.runINCompressor", "false"); + envConfig.setConfigParam("je.env.runCheckpointer", "false"); + envConfig.setConfigParam("je.env.runEvictor", "false"); + envConfig.setConfigParam("je.env.runCleaner", "false"); + env = create(envHome, envConfig); + } + + private Database openDb + (boolean transactional, + boolean dups, + Class> btreeComparator, + Class> dupComparator) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + dbConfig.setTransactional(transactional); + if (btreeComparator != null) { + dbConfig.setBtreeComparator(btreeComparator); + } + if (dupComparator != null) { + dbConfig.setDuplicateComparator(dupComparator); + } + return env.openDatabase(null, "testDB", dbConfig); + } + + @Test + public void testSR12517() + throws Exception { + + openEnv(); + Database db = openDb(false /*transactional*/, false /*dups*/, + ReverseComparator.class, ReverseComparator.class); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Insert 5 items. */ + for (int i = 0; i < 5; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + /* Add a dup. */ + IntegerBinding.intToEntry(i * 2, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + read(db); + + db.close(); + close(env); + + openEnv(); + db = openDb(false /*transactional*/, false /*dups*/, + ReverseComparator.class, ReverseComparator.class); + + read(db); + db.close(); + close(env); + env = null; + } + + @Test + public void testDatabaseCompareKeysArgs() + throws Exception { + + openEnv(); + Database db = openDb(false /*transactional*/, false /*dups*/, + null, null); + + DatabaseEntry entry1 = new DatabaseEntry(); + DatabaseEntry entry2 = new DatabaseEntry(); + + try { + db.compareKeys(null, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareKeys(entry1, null); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(null, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(entry1, null); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + IntegerBinding.intToEntry(1, entry1); + + try { + db.compareKeys(entry1, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareKeys(entry2, entry1); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(entry1, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(entry2, entry1); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + entry1.setPartial(true); + IntegerBinding.intToEntry(1, entry2); + + try { + db.compareKeys(entry1, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareKeys(entry2, entry1); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(entry1, entry2); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + try { + db.compareDuplicates(entry2, entry1); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + db.close(); + + try { + db.compareKeys(entry1, entry2); + fail("should have thrown ISE"); + } catch (IllegalStateException ISE) { + assertTrue(ISE.getMessage().contains("Database was closed")); + } + + try { + db.compareDuplicates(entry1, entry2); + fail("should have thrown ISE"); + } catch (IllegalStateException ISE) { + assertTrue(ISE.getMessage().contains("Database was closed")); + } + + close(env); + env = null; + } + + @Test + public void testSR16816DefaultComparator() + throws Exception { + + doTestSR16816(null, null, 1); + } + + @Test + public void testSR16816ReverseComparator() + throws Exception { + + doTestSR16816(ReverseComparator.class, ReverseComparator.class, -1); + } + + private void doTestSR16816(Class btreeComparator, + Class dupComparator, + int expectedSign) + throws Exception { + + openEnv(); + Database db = openDb(false /*transactional*/, false /*dups*/, + btreeComparator, dupComparator); + + DatabaseEntry entry1 = new DatabaseEntry(); + DatabaseEntry entry2 = new DatabaseEntry(); + + IntegerBinding.intToEntry(1, entry1); + IntegerBinding.intToEntry(2, entry2); + + assertEquals(expectedSign * -1, db.compareKeys(entry1, entry2)); + assertEquals(0, db.compareKeys(entry1, entry1)); + assertEquals(0, db.compareKeys(entry2, entry2)); + assertEquals(expectedSign * 1, db.compareKeys(entry2, entry1)); + + assertEquals(expectedSign * -1, db.compareDuplicates(entry1, entry2)); + assertEquals(0, db.compareDuplicates(entry1, entry1)); + assertEquals(0, db.compareDuplicates(entry2, entry2)); + assertEquals(expectedSign * 1, db.compareDuplicates(entry2, entry1)); + + db.close(); + close(env); + env = null; + } + + private void read(Database db) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Iterate */ + Cursor c = db.openCursor(null, null); + int expected = 4; + while (c.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + assertEquals(expected, IntegerBinding.entryToInt(key)); + expected--; + if (DEBUG) { + System.out.println("cursor: k=" + + IntegerBinding.entryToInt(key) + + " d=" + + IntegerBinding.entryToInt(data)); + } + } + assertEquals(expected, -1); + + c.close(); + + /* Retrieve 5 items */ + for (int i = 0; i < 5; i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(i * 2, IntegerBinding.entryToInt(data)); + if (DEBUG) { + System.out.println("k=" + + IntegerBinding.entryToInt(key) + + " d=" + + IntegerBinding.entryToInt(data)); + } + } + } + + public static class ReverseComparator implements Comparator { + + public ReverseComparator() { + } + + public int compare(byte[] o1, byte[] o2) { + + DatabaseEntry arg1 = new DatabaseEntry(o1); + DatabaseEntry arg2 = new DatabaseEntry(o2); + int val1 = IntegerBinding.entryToInt(arg1); + int val2 = IntegerBinding.entryToInt(arg2); + + if (val1 < val2) { + return 1; + } else if (val1 > val2) { + return -1; + } else { + return 0; + } + } + } + + /** + * Checks that when reusing a slot and then aborting the transaction, the + * original data is restored, when using a btree comparator. [#15704] + * + * When using partial keys to reuse a slot with a different--but equal + * according to a custom comparator--key, a bug caused corruption of an + * existing record after an abort. The sequence for a non-duplicate + * database and a btree comparator that compares only the first integer in + * a two integer key is: + * + * 100 Insert LN key={0,0} txn 1 + * 110 Commit txn 1 + * 120 Delete LN key={0,0} txn 2 + * 130 Insert LN key={0,1} txn 2 + * 140 Abort txn 2 + * + * When key {0,1} is inserted at LSN 130, it reuses the slot for {0,0} + * because these two keys are considered equal by the comparator. When txn + * 2 is aborted, it restores LSN 100 in the slot, but the key in the BIN + * stays {0,1}. Fetching the record after the abort gives key {0,1}. + */ + @Test + public void testReuseSlotAbortPartialKey() + throws DatabaseException { + + doTestReuseSlotPartialKey(false /*runRecovery*/); + } + + /** + * Same as testReuseSlotAbortPartialKey but runs recovery after the abort. + */ + @Test + public void testReuseSlotRecoverPartialKey() + throws DatabaseException { + + doTestReuseSlotPartialKey(true /*runRecovery*/); + } + + private void doTestReuseSlotPartialKey(boolean runRecovery) + throws DatabaseException { + + openEnv(true /*transactional*/); + Database db = openDb + (true /*transactional*/, false /*dups*/, + Partial2PartComparator.class /*btreeComparator*/, + null /*dupComparator*/); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key={0,0}/data={0} using auto-commit. */ + status = db.put(null, entry(0, 0), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + key = entry(0, 1); + data = entry(0); + status = db.getSearchBoth(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0, 0); + check(data, 0); + + /* Delete, insert key={0,1}/data={1}, abort. */ + Transaction txn = env.beginTransaction(null, null); + status = db.delete(txn, entry(0, 1)); + assertSame(OperationStatus.SUCCESS, status); + status = db.get(txn, entry(0, 0), data, null); + assertSame(OperationStatus.NOTFOUND, status); + status = db.put(txn, entry(0, 1), entry(1)); + assertSame(OperationStatus.SUCCESS, status); + key = entry(0, 0); + data = entry(1); + status = db.getSearchBoth(txn, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0, 1); + check(data, 1); + txn.abort(); + + if (runRecovery) { + db.close(); + close(env); + env = null; + openEnv(true /*transactional*/); + db = openDb + (true /*transactional*/, false /*dups*/, + Partial2PartComparator.class /*btreeComparator*/, + null /*dupComparator*/); + } + + /* Check that we rolled back to key={0,0}/data={0}. */ + key = entry(0, 1); + data = entry(0); + status = db.getSearchBoth(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0, 0); + check(data, 0); + + db.close(); + close(env); + env = null; + } + + /** + * Same as testReuseSlotAbortPartialKey but for reuse of duplicate data + * slots. [#15704] + * + * The sequence for a duplicate database and a duplicate comparator that + * compares only the first integer in a two integer data value is: + * + * 100 Insert LN key={0}/data={0,0} txn 1 + * 110 Insert LN key={0}/data={1,1} txn 1 + * 120 Commit txn 1 + * 130 Delete LN key={0}/data={0,0} txn 2 + * 140 Insert LN key={0}/data={0,1} txn 2 + * 150 Abort txn 2 + * + * When data {0,1} is inserted at LSN 140, it reuses the slot for {0,0} + * because these two data values are considered equal by the comparator. + * When txn 2 is aborted, it restores LSN 100 in the slot, but the data in + * the DBIN stays {0,1}. Fetching the record after the abort gives data + * {0,1}. + */ + @Test + public void testReuseSlotAbortPartialDup() + throws DatabaseException { + + doTestReuseSlotPartialDup(false /*runRecovery*/); + } + + /** + * Same as testReuseSlotAbortPartialDup but runs recovery after the abort. + */ + @Test + public void testReuseSlotRecoverPartialDup() + throws DatabaseException { + + doTestReuseSlotPartialDup(true /*runRecovery*/); + } + + private void doTestReuseSlotPartialDup(boolean runRecovery) + throws DatabaseException { + + openEnv(true /*transactional*/); + Database db = openDb + (true /*transactional*/, true /*dups*/, + null /*btreeComparator*/, + Partial2PartComparator.class /*dupComparator*/); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key={0}/data={0,0} using auto-commit. */ + Transaction txn = env.beginTransaction(null, null); + status = db.put(txn, entry(0), entry(0, 0)); + assertSame(OperationStatus.SUCCESS, status); + status = db.put(txn, entry(0), entry(1, 1)); + assertSame(OperationStatus.SUCCESS, status); + txn.commit(); + key = entry(0); + data = entry(0, 1); + status = db.getSearchBoth(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0); + check(data, 0, 0); + + /* Delete, insert key={0}/data={0,1}, abort. */ + txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + key = entry(0); + data = entry(0, 1); + status = cursor.getSearchBoth(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0); + check(data, 0, 0); + status = cursor.delete(); + assertSame(OperationStatus.SUCCESS, status); + status = cursor.put(entry(0), entry(0, 1)); + assertSame(OperationStatus.SUCCESS, status); + key = entry(0); + data = entry(0, 1); + status = cursor.getSearchBoth(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0); + check(data, 0, 1); + cursor.close(); + txn.abort(); + + if (runRecovery) { + db.close(); + close(env); + env = null; + openEnv(true /*transactional*/); + db = openDb + (true /*transactional*/, true /*dups*/, + null /*btreeComparator*/, + Partial2PartComparator.class /*dupComparator*/); + } + + /* Check that we rolled back to key={0,0}/data={0}. */ + key = entry(0); + data = entry(0, 1); + status = db.getSearchBoth(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0); + check(data, 0, 0); + + db.close(); + close(env); + env = null; + } + + /** + * In the past, we prohibited the case where dups are configured and the + * btree comparator does not compare all bytes of the key. With the old + * DBIN/DIN dup implementation, to support this would require maintaining + * the BIN slot and DIN/DBIN.dupKey fields to be transactionally correct. + * This would have impractical since INs by design are non-transctional. + * [#15704] + * + * But with the two-part key dups implementation, the BIN slot is always + * transactionally correct, and this test now confirms this. [#19165] + */ + @Test + public void testDupsWithPartialComparator() + throws DatabaseException { + + openEnv(false /*transactional*/); + Database db = openDb + (false /*transactional*/, true /*dups*/, + Partial2PartComparator.class /*btreeComparator*/, + null /*dupComparator*/); + + OperationStatus status; + + /* Insert key={0,0}/data={0}. */ + status = db.put(null, entry(0, 0), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + + /* Update to key={0,1}/data={0}. */ + status = db.put(null, entry(0, 1), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + DatabaseEntry key = entry(0, 0); + DatabaseEntry data = entry(0); + status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + check(key, 0, 1); + check(data, 0); + + db.close(); + close(env); + env = null; + } + + private void check(DatabaseEntry entry, int p1) { + assertEquals(4, entry.getSize()); + TupleInput input = TupleBase.entryToInput(entry); + assertEquals(p1, input.readInt()); + } + + private void check(DatabaseEntry entry, int p1, int p2) { + assertEquals(8, entry.getSize()); + TupleInput input = TupleBase.entryToInput(entry); + assertEquals(p1, input.readInt()); + assertEquals(p2, input.readInt()); + } + + /* + private void dump(Database db, Transaction txn) + throws DatabaseException { + + System.out.println("-- dump --"); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + Cursor c = db.openCursor(txn, null); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + TupleInput keyInput = TupleBase.entryToInput(key); + int keyP1 = keyInput.readInt(); + int keyP2 = keyInput.readInt(); + int dataVal = IntegerBinding.entryToInt(data); + System.out.println("keyP1=" + keyP1 + + " keyP2=" + keyP2 + + " dataVal=" + dataVal); + } + c.close(); + } + */ + + private DatabaseEntry entry(int p1) { + DatabaseEntry entry = new DatabaseEntry(); + TupleOutput output = new TupleOutput(); + output.writeInt(p1); + TupleBase.outputToEntry(output, entry); + return entry; + } + + private DatabaseEntry entry(int p1, int p2) { + DatabaseEntry entry = new DatabaseEntry(); + TupleOutput output = new TupleOutput(); + output.writeInt(p1); + output.writeInt(p2); + TupleBase.outputToEntry(output, entry); + return entry; + } + + /** + * Writes two integers to the byte array. + */ + private void make2PartEntry(int p1, int p2, DatabaseEntry entry) { + TupleOutput output = new TupleOutput(); + output.writeInt(p1); + output.writeInt(p2); + TupleBase.outputToEntry(output, entry); + } + + /** + * Compares only the first integer in the byte arrays. + */ + public static class Partial2PartComparator + implements Comparator, PartialComparator { + + public int compare(byte[] o1, byte[] o2) { + int val1 = new TupleInput(o1).readInt(); + int val2 = new TupleInput(o2).readInt(); + return val1 - val2; + } + } +} diff --git a/test/com/sleepycat/je/DatabaseConfigTest.java b/test/com/sleepycat/je/DatabaseConfigTest.java new file mode 100644 index 0000000..8ca013b --- /dev/null +++ b/test/com/sleepycat/je/DatabaseConfigTest.java @@ -0,0 +1,1103 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.Serializable; +import java.util.Comparator; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Basic database configuration testing. + */ +public class DatabaseConfigTest extends DualTestCase { + private static final boolean DEBUG = false; + + private final File envHome; + private Environment env; + + public DatabaseConfigTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Test that we can retrieve a database configuration and that it clones + * its configuration appropriately. + */ + @Test + public void testConfig() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* + * Make sure that the database keeps its own copy of the + * configuration object. + */ + DatabaseConfig dbConfigA = new DatabaseConfig(); + dbConfigA.setAllowCreate(true); + Database dbA = env.openDatabase(null, "foo", dbConfigA); + + /* Change the original dbConfig */ + dbConfigA.setAllowCreate(false); + DatabaseConfig getConfig1 = dbA.getConfig(); + assertEquals(true, getConfig1.getAllowCreate()); + assertEquals(false, getConfig1.getSortedDuplicates()); + + /* + * Change the retrieved config, ought to have no effect on what the + * Database is storing. + */ + getConfig1.setSortedDuplicates(true); + DatabaseConfig getConfig2 = dbA.getConfig(); + assertEquals(false, getConfig2.getSortedDuplicates()); + + dbA.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testConfigMatching() + throws Throwable { + + try { + /* DatabaseConfig matching. */ + + DatabaseConfig confA = new DatabaseConfig(); + DatabaseConfig confB = new DatabaseConfig(); + + try { + confA.validate(confB); + } catch (Exception E) { + fail("expected valid match"); + } + + try { + confB.validate(confA); + } catch (Exception E) { + fail("expected valid match"); + } + + try { + confA.validate(null); // uses the DEFAULT config + } catch (Exception E) { + fail("expected valid match"); + } + + confA.setReadOnly(true); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + + confA.setReadOnly(false); + confA.setSortedDuplicates(true); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confA.setSortedDuplicates(false); + + confA.setOverrideBtreeComparator(true); + confA.setBtreeComparator(TestComparator.class); + confB.setOverrideBtreeComparator(true); + confB.setBtreeComparator(TestComparator2.class); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confA.setBtreeComparator((Class) null); + confA.setOverrideBtreeComparator(false); + confB.setBtreeComparator((Class) null); + confB.setOverrideBtreeComparator(false); + + confA.setOverrideDuplicateComparator(true); + confA.setDuplicateComparator(TestComparator.class); + confB.setOverrideDuplicateComparator(true); + confB.setDuplicateComparator(TestComparator2.class); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + + /* Same tests as above but for serialized comparators. */ + + confA.setOverrideBtreeComparator(true); + confA.setBtreeComparator(new TestSerialComparator()); + confB.setOverrideBtreeComparator(true); + confB.setBtreeComparator(new TestSerialComparator2()); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confA.setBtreeComparator((Comparator) null); + confA.setOverrideBtreeComparator(false); + confB.setBtreeComparator((Comparator) null); + confB.setOverrideBtreeComparator(false); + + confA.setOverrideDuplicateComparator(true); + confA.setDuplicateComparator(new TestSerialComparator()); + confB.setOverrideDuplicateComparator(true); + confB.setDuplicateComparator(new TestSerialComparator2()); + try { + confA.validate(confB); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + + /* SecondaryConfig matching. */ + + SecondaryConfig confC = new SecondaryConfig(); + SecondaryConfig confD = new SecondaryConfig(); + confC.setKeyCreator(new SecKeyCreator1()); + confD.setKeyCreator(new SecKeyCreator1()); + + try { + confC.validate(confD); + } catch (Exception E) { + E.printStackTrace(); + fail("expected valid match"); + } + + try { + confD.validate(confC); + } catch (Exception E) { + fail("expected valid match"); + } + + try { + confC.validate(null); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + + confD.setKeyCreator(new SecKeyCreator2()); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confD.setKeyCreator(new SecKeyCreator1()); + + confD.setMultiKeyCreator(new SecMultiKeyCreator1()); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confD.setMultiKeyCreator(null); + + confC.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confC.setForeignKeyDeleteAction(ForeignKeyDeleteAction.ABORT); + + confC.setForeignKeyNullifier(new ForeignKeyNullifier1()); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confC.setForeignKeyNullifier(null); + + confC.setForeignMultiKeyNullifier(new ForeignMultiKeyNullifier1()); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confC.setForeignMultiKeyNullifier(null); + + confC.setImmutableSecondaryKey(true); + try { + confC.validate(confD); + fail("expected exception"); + } catch (IllegalArgumentException E) { + // ok + } + confC.setImmutableSecondaryKey(false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Make sure we can instantiate a comparator at the time it's set. + */ + @Test + public void testComparator() + throws Throwable { + + try { + /* Can't be instantiated, a nested class */ + try { + DatabaseConfig config = new DatabaseConfig(); + config.setBtreeComparator(BadComparator1.class); + fail("Comparator shouldn't be instantiated"); + } catch (IllegalArgumentException e) { + /* Expected. */ + if (DEBUG) { + System.out.println(e); + } + } + + /* No zero-parameter constructor */ + try { + DatabaseConfig config = new DatabaseConfig(); + config.setBtreeComparator(BadComparator2.class); + fail("Comparator shouldn't be instantiated"); + } catch (IllegalArgumentException e) { + /* Expected. */ + if (DEBUG) { + System.out.println(e); + } + } + + /* Can't be serialized, not serializable */ + try { + DatabaseConfig config = new DatabaseConfig(); + config.setBtreeComparator(new BadSerialComparator1()); + fail("Comparator shouldn't be instantiated"); + } catch (IllegalArgumentException e) { + /* Expected. */ + if (DEBUG) { + System.out.println(e); + } + } + + /* Can't be serialized, contains non-serializable field */ + try { + DatabaseConfig config = new DatabaseConfig(); + config.setBtreeComparator(new BadSerialComparator2()); + fail("Comparator shouldn't be instantiated"); + } catch (IllegalArgumentException e) { + /* Expected. */ + if (DEBUG) { + System.out.println(e); + } + } + + /* Valid comparators */ + DatabaseConfig config = new DatabaseConfig(); + config.setBtreeComparator(TestComparator.class); + config.setBtreeComparator(TestComparator2.class); + config.setBtreeComparator(new TestSerialComparator()); + config.setBtreeComparator(new TestSerialComparator2()); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test that any conflicts between configuration object settings and the + * underlying impl object are detected. + */ + @Test + public void testConfigConflict() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* + * Test conflicts of duplicate allowed configuration. + */ + + /* 1a. Create allowing duplicates. */ + DatabaseConfig firstConfig = new DatabaseConfig(); + firstConfig.setAllowCreate(true); + firstConfig.setSortedDuplicates(true); + Database firstHandle = env.openDatabase(null, "fooDups", + firstConfig); + /* 1b. Try to open w/no duplicates. */ + DatabaseConfig secondConfig = new DatabaseConfig(); + secondConfig.setSortedDuplicates(false); + try { + env.openDatabase(null, "fooDups", secondConfig); + fail("Conflict in duplicates allowed should be detected."); + } catch (IllegalArgumentException expected) { + } + + firstHandle.close(); + env.removeDatabase(null, "fooDups"); + + /* 2a. Create dis-allowing duplicates. */ + firstConfig.setSortedDuplicates(false); + firstConfig.setKeyPrefixing(false); + firstHandle = env.openDatabase(null, "fooDups", firstConfig); + /* 2b. Try to open w/duplicates. */ + secondConfig.setSortedDuplicates(true); + try { + env.openDatabase(null, "fooDups", secondConfig); + fail("Conflict in duplicates allowed should be detected."); + } catch (IllegalArgumentException expected) { + } + firstHandle.close(); + + /* + * Test conflicts of read only. If the environment is read/write + * we should be able to open handles in read only or read/write + * mode. If the environment is readonly, the database handles + * must also be read only. + */ + DatabaseConfig readOnlyConfig = new DatabaseConfig(); + readOnlyConfig.setReadOnly(true); + Database roHandle = env.openDatabase(null, "fooDups", + readOnlyConfig); + roHandle.close(); + + /* Open the environment in read only mode. */ + close(env); + envConfig = TestUtils.initEnvConfig(); + envConfig.setReadOnly(true); + env = create(envHome, envConfig); + + /* Open a readOnly database handle, should succeed */ + roHandle = env.openDatabase(null, "fooDups", + readOnlyConfig); + roHandle.close(); + + /* Open a read/write database handle, should not succeed. */ + try { + env.openDatabase(null, "fooDups", null); + fail("Should not be able to open read/write"); + } catch (IllegalArgumentException expected) { + } + close(env); + + /* + * Check comparator changes. + */ + /* 1a. Open w/a null comparator */ + env = create(envHome, null); + firstConfig = new DatabaseConfig(); + firstConfig.setAllowCreate(true); + firstHandle = env.openDatabase(null, + "fooComparator", + firstConfig); + DatabaseConfig firstRetrievedConfig = firstHandle.getConfig(); + assertEquals(null, firstRetrievedConfig.getBtreeComparator()); + assertEquals(null, firstRetrievedConfig.getDuplicateComparator()); + + /* + * 1b. Open a db w/a different comparator, shouldn't take effect + * because override is not set. + */ + secondConfig = new DatabaseConfig(); + Comparator btreeComparator = new TestComparator(); + Comparator dupComparator = new TestComparator(); + secondConfig.setBtreeComparator + ((Class>)btreeComparator.getClass()); + secondConfig.setDuplicateComparator + ((Class>)dupComparator.getClass()); + Database secondHandle = + env.openDatabase(null, "fooComparator", secondConfig); + DatabaseConfig retrievedConfig = secondHandle.getConfig(); + assertEquals(null, retrievedConfig.getBtreeComparator()); + assertEquals(null, retrievedConfig.getDuplicateComparator()); + secondHandle.close(); + + /* Same as above but with a serialized comparator. */ + secondConfig = new DatabaseConfig(); + btreeComparator = new TestSerialComparator(); + dupComparator = new TestSerialComparator(); + secondConfig.setBtreeComparator(btreeComparator); + secondConfig.setDuplicateComparator(dupComparator); + secondHandle = + env.openDatabase(null, "fooComparator", secondConfig); + retrievedConfig = secondHandle.getConfig(); + assertEquals(null, retrievedConfig.getBtreeComparator()); + assertEquals(null, retrievedConfig.getDuplicateComparator()); + secondHandle.close(); + + /* + * Test that update DatabaseConfig while there are open handles + * should throw exceptions. + */ + secondConfig.setOverrideBtreeComparator(true); + secondConfig.setOverrideDuplicateComparator(true); + btreeComparator = new TestComparator(); + dupComparator = new TestComparator(); + secondConfig.setBtreeComparator + ((Class>)btreeComparator.getClass()); + secondConfig.setDuplicateComparator + ((Class>)dupComparator.getClass()); + try { + secondHandle = env.openDatabase(null, + "fooComparator", + secondConfig); + fail("Expect exceptions here"); + } catch (IllegalStateException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e.getMessage()); + } + secondHandle.close(); + + /* + * Open a new database handle without DatabaseConfig changes should + * be valid. + */ + try { + secondHandle = env.openDatabase(null, + "fooComparator", + firstConfig); + } catch (Exception e) { + fail("Unexpected exception: " + e.getMessage()); + } + + secondHandle.close(); + firstHandle.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + close(env); + throw t; + } + } + + @Test + public void testIsTransactional() + throws Throwable { + + try { + /* Open environment in transactional mode.*/ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* Create a db, open transactionally with implied auto-commit. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database myDb = env.openDatabase(null, "testDB", dbConfig); + assertTrue(myDb.isTransactional()); + assertTrue(myDb.getConfig().getTransactional()); + myDb.close(); + + /* Open an existing db, can open it non-transactionally. */ + dbConfig.setTransactional(false); + myDb = env.openDatabase(null, "testDB", null); + assertFalse(myDb.isTransactional()); + assertFalse(myDb.getConfig().getTransactional()); + myDb.close(); + + /* Open another db, pass an explicit transaction. */ + dbConfig.setTransactional(true); + Transaction txn = env.beginTransaction(null, null); + myDb = env.openDatabase(txn, "testDB2", dbConfig); + assertTrue(myDb.isTransactional()); + assertTrue(myDb.getConfig().getTransactional()); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + try { + myDb.put(null, key, data); + } catch (DatabaseException DBE) { + fail("didn't expect DatabaseException, implied autocommit"); + } + + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + try { + myDb.put(txn, key, data); + } catch (DatabaseException DBE) { + fail("didn't expect DatabaseException with txn passed"); + } + + try { + myDb.get(txn, key, data, LockMode.DEFAULT); + } catch (DatabaseException DBE) { + fail("didn't expect DatabaseException with txn passed"); + } + + txn.commit(); + + try { + myDb.get(null, key, data, LockMode.DEFAULT); + } catch (DatabaseException DBE) { + fail("didn't expect DatabaseException because no txn passed"); + } + + myDb.close(); + + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testOpenReadOnly() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(txn, "testDB2", dbConfig); + + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + try { + myDb.put(txn, key, data); + } catch (DatabaseException DBE) { + fail("unexpected DatabaseException during put"); + } + + txn.commit(); + myDb.close(); + + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setReadOnly(true); + txn = env.beginTransaction(null, null); + myDb = env.openDatabase(txn, "testDB2", dbConfig); + assertTrue(myDb.isTransactional()); + assertTrue(myDb.getConfig().getTransactional()); + + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + try { + myDb.put(txn, key, data); + fail("expected UnsupportedOperationException " + + "because open RDONLY"); + } catch (UnsupportedOperationException expected) { + } + + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + assertEquals(OperationStatus.SUCCESS, + myDb.get(txn, key, data, LockMode.DEFAULT)); + + Cursor cursor = myDb.openCursor(txn, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, LockMode.DEFAULT)); + + try { + cursor.delete(); + fail("expected Exception from delete on RD_ONLY db"); + } catch (UnsupportedOperationException e) { + } + + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + try { + myDb.put(txn, key, data); + fail + ("expected UnsupportedOperationException because open RDONLY"); + } catch (UnsupportedOperationException expected) { + } + + cursor.close(); + txn.commit(); + myDb.close(); + + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test exclusive creation. + */ + @Test + public void testExclusive() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + /* + * Make sure that the database keeps its own copy of the + * configuration object. + */ + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + + /* Should succeed and create the database. */ + Database dbA = env.openDatabase(null, "foo", dbConfig); + dbA.close(); + + /* Should not succeed, because the database exists. */ + try { + env.openDatabase(null, "foo", dbConfig); + fail("Database already exists"); + } catch (DatabaseException e) { + } + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /* + * Test that changing the Btree comparator really writes it to disk. + */ + @Test + public void testConfigOverrideUpdateSR15743() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* + * Make sure that the database keeps its own copy of the + * configuration object. + */ + DatabaseConfig dbConfigA = new DatabaseConfig(); + dbConfigA.setOverrideBtreeComparator(false); + dbConfigA.setBtreeComparator(TestComparator.class); + dbConfigA.setAllowCreate(true); + Database dbA = env.openDatabase(null, "foo", dbConfigA); + + /* Change the original dbConfig */ + dbConfigA.setBtreeComparator(TestComparator2.class); + DatabaseConfig getConfig1 = dbA.getConfig(); + assertEquals(TestComparator.class, + getConfig1.getBtreeComparator().getClass()); + + /* + * Change the retrieved config, ought to have no effect on what the + * Database is storing. + */ + getConfig1.setBtreeComparator(TestComparator2.class); + DatabaseConfig getConfig2 = dbA.getConfig(); + assertEquals(TestComparator.class, + getConfig2.getBtreeComparator().getClass()); + + dbA.close(); + close(env); + + /* Ensure new comparator is written to disk. */ + envConfig = TestUtils.initEnvConfig(); + env = create(envHome, envConfig); + + dbConfigA = new DatabaseConfig(); + /* Change the comparator. */ + dbConfigA.setOverrideBtreeComparator(true); + dbConfigA.setBtreeComparator(TestComparator2.class); + dbA = env.openDatabase(null, "foo", dbConfigA); + + getConfig2 = dbA.getConfig(); + assertEquals(TestComparator2.class, + getConfig2.getBtreeComparator().getClass()); + + dbA.close(); + close(env); + + /* Read it back during recovery to ensure it was written. */ + envConfig = TestUtils.initEnvConfig(); + env = create(envHome, envConfig); + + dbConfigA = new DatabaseConfig(); + dbA = env.openDatabase(null, "foo", dbConfigA); + getConfig2 = dbA.getConfig(); + assertEquals(TestComparator2.class, + getConfig2.getBtreeComparator().getClass()); + + /* Create a root for the tree. */ + dbA.put(null, + new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[1])); + + dbA.close(); + close(env); + + /* Change it to a third one when there is a root present. */ + envConfig = TestUtils.initEnvConfig(); + env = create(envHome, envConfig); + + dbConfigA = new DatabaseConfig(); + /* Change the comparator. */ + dbConfigA.setOverrideBtreeComparator(true); + dbConfigA.setBtreeComparator(TestComparator3.class); + dbA = env.openDatabase(null, "foo", dbConfigA); + getConfig2 = dbA.getConfig(); + assertEquals(TestComparator3.class, + getConfig2.getBtreeComparator().getClass()); + dbA.close(); + close(env); + + /* Read it back during recovery to ensure it was written. */ + envConfig = TestUtils.initEnvConfig(); + env = create(envHome, envConfig); + + dbConfigA = new DatabaseConfig(); + dbA = env.openDatabase(null, "foo", dbConfigA); + getConfig2 = dbA.getConfig(); + assertEquals(TestComparator3.class, + getConfig2.getBtreeComparator().getClass()); + dbA.close(); + close(env); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /* Test mutable and persistent configurations. */ + @Test + public void testPersistentAndMutableConfigs() + throws Exception { + + final String dbName = "foo"; + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + + DbConfigManager configMgr = + DbInternal.getNonNullEnvImpl(env).getConfigManager(); + int defaultNodeMaxEntries = + configMgr.getInt(EnvironmentParams.NODE_MAX); + int defaultNodeDupTreeMaxEntries = + configMgr.getInt(EnvironmentParams.NODE_MAX_DUPTREE); + + /* Check the default node max entries setting. */ + assertEquals(defaultNodeMaxEntries, 128); + assertEquals(defaultNodeDupTreeMaxEntries, 128); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* Do updates on each persistent and mutable config. */ + + /* Check whether BtreeComparator setting is persisted. */ + dbConfig.setOverrideBtreeComparator(true); + dbConfig.setBtreeComparator(TestComparator.class); + DatabaseConfig newConfig = setAndGetDbConfig(env, dbConfig, dbName); + assertTrue(newConfig.getBtreeComparator() instanceof TestComparator); + + /* Check whether DuplicateComparator setting is persisted. */ + dbConfig.setOverrideDuplicateComparator(true); + dbConfig.setDuplicateComparator(new TestSerialComparator()); + newConfig = setAndGetDbConfig(env, dbConfig, dbName); + assertTrue(newConfig.getDuplicateComparator() instanceof + TestSerialComparator); + + /* Check whether KeyPrefixing setting is persisted. */ + dbConfig.setKeyPrefixing(true); + newConfig = setAndGetDbConfig(env, dbConfig, dbName); + assertTrue(newConfig.getKeyPrefixing()); + + /* Check whether NodeMaxEntries setting is persisted. */ + dbConfig.setNodeMaxEntries(512); + newConfig = setAndGetDbConfig(env, dbConfig, dbName); + assertTrue(newConfig.getNodeMaxEntries() == 512); + + close(env); + } + + /* + * This method will: + * 1. apply the modified DatabaseConfig to the database. + * 2. close the database and do a sync to make sure the new configuration + * is written to the log. + * 3. open the database with a useExisting config and return the current + * DatabaseConfig. + */ + private DatabaseConfig setAndGetDbConfig(Environment env, + DatabaseConfig dbConfig, + String dbName) + throws Exception { + + Database db = env.openDatabase(null, "foo", dbConfig); + db.close(); + + env.sync(); + + /* + * Open with the useExisting config to see what attributes have been + * persisted. + */ + DatabaseConfig newConfig = new DatabaseConfig(); + newConfig.setReadOnly(true); + newConfig.setTransactional(true); + newConfig.setUseExistingConfig(true); + + db = env.openDatabase(null, dbName, newConfig); + newConfig = db.getConfig(); + db.close(); + + return newConfig; + } + + /* + * This Comparator can't be instantiated because it's private and not + * static. + */ + private class BadComparator1 implements Comparator { + public BadComparator1(int foo) { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * This Comparator can't be instantiated because it doesn't have zero + * parameter constructor. + */ + public static class BadComparator2 implements Comparator { + public BadComparator2(int i) { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * OK comparator for setting comparators. + */ + public static class TestComparator implements Comparator { + public TestComparator() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * OK comparator for setting comparators. + */ + public static class TestComparator2 implements Comparator { + public TestComparator2() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * OK comparator for setting comparators. + */ + public static class TestComparator3 implements Comparator { + public TestComparator3() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * This Comparator can't be serialized because it's not serializable. + */ + public class BadSerialComparator1 implements Comparator { + + public BadSerialComparator1() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * This Comparator can't be serialized because it contains a reference to + * an object that's not serializable. + */ + @SuppressWarnings("serial") + public class BadSerialComparator2 implements Comparator, + Serializable { + + private final BadSerialComparator1 o = new BadSerialComparator1(); + + public BadSerialComparator2() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + /* + * OK comparator for setting comparators -- private class, private + * constructor, and serializable fields are allowed. + */ + @SuppressWarnings("serial") + private static class TestSerialComparator + implements Comparator, Serializable { + + private final String s = "sss"; + + private TestSerialComparator() { + } + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + + @Override + public boolean equals(Object other) { + TestSerialComparator o = (TestSerialComparator) other; + return s.equals(o.s); + } + } + + /* + * OK comparator for setting comparators. + */ + @SuppressWarnings("serial") + public static class TestSerialComparator2 + implements Comparator, Serializable { + + public int compare(byte[] o1, byte[] o2) { + return 0; + } + } + + public static class SecKeyCreator1 implements SecondaryKeyCreator { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + return true; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + return (o.getClass() == getClass()); + } + } + + public static class SecKeyCreator2 implements SecondaryKeyCreator { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + return true; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + return (o.getClass() == getClass()); + } + } + + public static class SecMultiKeyCreator1 + implements SecondaryMultiKeyCreator { + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) { + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + return (o.getClass() == getClass()); + } + } + + public static class ForeignKeyNullifier1 implements ForeignKeyNullifier { + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry data) { + return true; + } + } + + public static class ForeignMultiKeyNullifier1 + implements ForeignMultiKeyNullifier { + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry secKey) { + return true; + } + } +} diff --git a/test/com/sleepycat/je/DatabaseEntryTest.java b/test/com/sleepycat/je/DatabaseEntryTest.java new file mode 100644 index 0000000..a2f2e8e --- /dev/null +++ b/test/com/sleepycat/je/DatabaseEntryTest.java @@ -0,0 +1,343 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; + +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class DatabaseEntryTest extends DualTestCase { + + private final File envHome; + private Environment env; + private Database db; + + public DatabaseEntryTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testBasic() { + /* Constructor that takes a byte array. */ + int size = 10; + byte[] foo = new byte[size]; + byte val = 1; + Arrays.fill(foo, val); + + DatabaseEntry dbtA = new DatabaseEntry(foo); + assertEquals(foo.length, dbtA.getSize()); + assertTrue(Arrays.equals(foo, dbtA.getData())); + + /* Set the data to null */ + dbtA.setData(null); + assertEquals(0, dbtA.getSize()); + assertFalse(Arrays.equals(foo, dbtA.getData())); + + /* Constructor that sets the data later */ + DatabaseEntry dbtLater = new DatabaseEntry(); + assertTrue(dbtLater.getData() == null); + assertEquals(0, dbtLater.getSize()); + dbtLater.setData(foo); + assertTrue(Arrays.equals(foo, dbtLater.getData())); + + /* Set offset, then reset data and offset should be reset. */ + DatabaseEntry dbtOffset = new DatabaseEntry(foo, 1, 1); + assertEquals(1, dbtOffset.getOffset()); + assertEquals(1, dbtOffset.getSize()); + dbtOffset.setData(foo); + assertEquals(0, dbtOffset.getOffset()); + assertEquals(foo.length, dbtOffset.getSize()); + } + + @Test + public void testOffset() + throws DatabaseException { + + final int N_BYTES = 30; + + openDb(false); + + DatabaseEntry originalKey = new DatabaseEntry(new byte[N_BYTES]); + DatabaseEntry originalData = new DatabaseEntry(new byte[N_BYTES]); + for (int i = 0; i < N_BYTES; i++) { + originalKey.getData()[i] = (byte) i; + originalData.getData()[i] = (byte) i; + } + + originalKey.setSize(10); + originalKey.setOffset(10); + originalData.setSize(10); + originalData.setOffset(10); + + db.put(null, originalKey, originalData); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, CursorConfig.DEFAULT); + + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT)); + + assertEquals(0, foundKey.getOffset()); + assertEquals(0, foundData.getOffset()); + assertEquals(10, foundKey.getSize()); + assertEquals(10, foundData.getSize()); + for (int i = 0; i < 10; i++) { + assertEquals(i + 10, foundKey.getData()[i]); + assertEquals(i + 10, foundData.getData()[i]); + } + + cursor.close(); + txn.commit(); + closeDb(); + } + + @Test + public void testPartial() + throws DatabaseException { + + openDb(false); + + DatabaseEntry originalKey = new DatabaseEntry(new byte[20]); + DatabaseEntry originalData = new DatabaseEntry(new byte[20]); + for (int i = 0; i < 20; i++) { + originalKey.getData()[i] = (byte) i; + originalData.getData()[i] = (byte) i; + } + + originalData.setPartial(true); + originalData.setPartialLength(10); + originalData.setPartialOffset(10); + + db.put(null, originalKey, originalData); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, CursorConfig.DEFAULT); + + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT)); + + assertEquals(0, foundKey.getOffset()); + assertEquals(20, foundKey.getSize()); + for (int i = 0; i < 20; i++) { + assertEquals(i, foundKey.getData()[i]); + } + + assertEquals(0, foundData.getOffset()); + assertEquals(30, foundData.getSize()); + for (int i = 0; i < 10; i++) { + assertEquals(0, foundData.getData()[i]); + } + for (int i = 0; i < 20; i++) { + assertEquals(i, foundData.getData()[i + 10]); + } + + foundKey.setPartial(5, 10, true); + foundData.setPartial(5, 20, true); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT)); + assertEquals(0, foundKey.getOffset()); + assertEquals(10, foundKey.getSize()); + for (int i = 0; i < 10; i++) { + assertEquals(i + 5, foundKey.getData()[i]); + } + + assertEquals(0, foundData.getOffset()); + assertEquals(20, foundData.getSize()); + for (int i = 0; i < 5; i++) { + assertEquals(0, foundData.getData()[i]); + } + for (int i = 0; i < 15; i++) { + assertEquals(i, foundData.getData()[i + 5]); + } + + /* Check that partial keys on put() is not allowed. */ + + originalKey.setPartial(true); + originalKey.setPartialLength(10); + originalKey.setPartialOffset(10); + + try { + db.put(null, originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + try { + db.putNoOverwrite(null, originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + try { + db.putNoDupData(null, originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + + try { + cursor.put(originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + try { + cursor.putNoOverwrite(originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + try { + cursor.putNoDupData(originalKey, originalData); + fail(); + } catch (IllegalArgumentException expected) {} + + cursor.close(); + txn.commit(); + closeDb(); + } + + @Test + public void testPartialCursorPuts() + throws DatabaseException { + + openDb(false); + + DatabaseEntry originalKey = new DatabaseEntry(new byte[20]); + DatabaseEntry originalData = new DatabaseEntry(new byte[20]); + for (int i = 0; i < 20; i++) { + originalKey.getData()[i] = (byte) i; + originalData.getData()[i] = (byte) i; + } + + /* Put 20 bytes of key and data. */ + db.put(null, originalKey, originalData); + + Transaction txn = null; + txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, CursorConfig.DEFAULT); + + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT)); + + assertEquals(0, foundKey.getOffset()); + assertEquals(20, foundKey.getSize()); + for (int i = 0; i < 20; i++) { + assertEquals(i, foundKey.getData()[i]); + } + + assertEquals(0, foundData.getOffset()); + assertEquals(20, foundData.getSize()); + + for (int i = 0; i < 20; i++) { + assertEquals(i, foundData.getData()[i]); + } + + for (int i = 0; i < 10; i++) { + foundData.getData()[i] = (byte) (i + 50); + } + + foundData.setPartial(true); + foundData.setPartialLength(10); + foundData.setPartialOffset(10); + + cursor.putCurrent(foundData); + + foundData = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT)); + assertEquals(0, foundKey.getOffset()); + assertEquals(20, foundKey.getSize()); + assertEquals(0, foundData.getOffset()); + assertEquals(30, foundData.getSize()); + for (int i = 0; i < 10; i++) { + assertEquals(foundData.getData()[i], i); + assertEquals(foundData.getData()[i + 10], (i + 50)); + assertEquals(foundData.getData()[i + 20], (i + 10)); + } + + cursor.close(); + txn.commit(); + closeDb(); + } + + @Test + public void testToString() { + DatabaseEntry entry = new DatabaseEntry(new byte[] {1, 2, 3}, 1, 2); + String s1 = entry.toString(); + entry.setPartial(3, 4, true); + String s2 = entry.toString(); + + /* + * Normally leave this disabled. Enable it to manually look at the + * toString output and ensure it is valid XML. + */ + if (false) { + System.out.println(s1); + System.out.println(s2); + } + } + + private void openDb(boolean dups) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + "1024"); + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "true"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, "testDB", dbConfig); + } + + private void closeDb() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + close(env); + env = null; + } + } +} diff --git a/test/com/sleepycat/je/DatabaseTest.java b/test/com/sleepycat/je/DatabaseTest.java new file mode 100644 index 0000000..a7dfdf0 --- /dev/null +++ b/test/com/sleepycat/je/DatabaseTest.java @@ -0,0 +1,2863 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BINS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_BIN_ENTRIES_HISTOGRAM; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_DELETED_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_INS_BYLEVEL; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_IN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_LN_COUNT; +import static com.sleepycat.je.dbi.BTreeStatDefinition.BTREE_MAINTREE_MAXDEPTH; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; +import java.util.Locale; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.DatabasePreemptedException; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.LongArrayStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.After; +import org.junit.Assume; +import org.junit.Test; + +/** + * Basic database operations, excluding configuration testing. + */ +public class DatabaseTest extends DualTestCase { + private static final boolean DEBUG = false; + private static final int NUM_RECS = 257; + private static final int NUM_DUPS = 10; + + private final File envHome; + private Environment env; + + private boolean runBtreeVerifier = true; + private boolean runINCompressor = true; + + public DatabaseTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + if (env != null) { + try { + env.close(); + } finally { + env = null; + } + } + super.tearDown(); + } + + /** + * Make sure we can't create a transactional cursor on a non-transactional + * database. + */ + @Test + public void testCursor() + throws Exception { + + Environment txnalEnv = null; + Database nonTxnalDb = null; + Cursor txnalCursor = null; + Transaction txn = null; + + try { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + txnalEnv = new Environment(envHome, envConfig); + + // Make a db and open it + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(false); + nonTxnalDb = txnalEnv.openDatabase(null, "testDB", dbConfig); + + // We should not be able to open a txnal cursor. + txn = txnalEnv.beginTransaction(null, null); + try { + txnalCursor = nonTxnalDb.openCursor(txn, null); + fail("Openin a txnal cursor on a nontxnal db is invalid."); + } catch (IllegalArgumentException e) { + // expected + } + } finally { + if (txn != null) { + txn.abort(); + } + if (txnalCursor != null) { + txnalCursor.close(); + } + if (nonTxnalDb != null) { + nonTxnalDb.close(); + } + if (txnalEnv != null) { + txnalEnv.close(); + } + + } + } + + @Test + public void testWackyLocalesSR18504() + throws Throwable { + + Locale currentLocale = Locale.getDefault(); + Database myDb = null; + try { + Locale.setDefault(new Locale("tr", "TR")); + myDb = initEnvAndDb(true, true, false, true, false, null); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + Locale.setDefault(currentLocale); + myDb.close(); + close(env); + } + } + + @Test + public void testPutExisting() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + + Transaction txn = env.beginTransaction(null, null); + OperationResult r; + + for (int i = NUM_RECS; i > 0; i--) { + + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + + r = myDb.put(txn, key, data, Put.OVERWRITE, null); + assertNotNull(r); + assertFalse(r.isUpdate()); + + r = myDb.get(txn, key, getData, Get.SEARCH, null); + assertNotNull(r); + assertFalse(r.isUpdate()); + assertTrue(data.equals(getData)); + + r = myDb.put(txn, key, data, Put.OVERWRITE, null); + assertNotNull(r); + assertTrue(r.isUpdate()); + + r = myDb.get(txn, key, getData, Get.SEARCH_BOTH, null); + assertNotNull(r); + assertFalse(r.isUpdate()); + assertTrue(data.equals(getData)); + } + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /* + * Test that zero length data always returns the same (static) byte[]. + */ + @Test + public void testZeroLengthData() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + byte[] appZLBA = new byte[0]; + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(appZLBA); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + assertEquals(OperationStatus.SUCCESS, + myDb.get(txn, key, getData, LockMode.DEFAULT)); + assertFalse(getData.getData() == appZLBA); + assertTrue(getData.getData() == + LogUtils.ZERO_LENGTH_BYTE_ARRAY); + assertEquals(0, Key.compareKeys(data.getData(), + getData.getData(), null)); + } + txn.commit(); + myDb.close(); + close(env); + + /* + * Read back from the log. + */ + + myDb = initEnvAndDb(true, true, false, true, false, null); + key = new DatabaseEntry(); + data = new DatabaseEntry(); + getData = new DatabaseEntry(); + txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.get(txn, key, getData, LockMode.DEFAULT)); + assertTrue(getData.getData() == + LogUtils.ZERO_LENGTH_BYTE_ARRAY); + } + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDeleteAbort() + throws Throwable { + + try { + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnTimeout(5, TimeUnit.SECONDS); + envConfig.setLockTimeout(5, TimeUnit.SECONDS); + envConfig.setTxnSerializableIsolation(false); + + env = create(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(null, "testDB", dbConfig); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + txn.commit(); + int delkey = NUM_RECS/2; + DeleteIt deleteIt = new DeleteIt(delkey, env, myDb); + txn = env.beginTransaction(null, null); + key.setData(TestUtils.getTestArray(delkey)); + data.setData(TestUtils.getTestArray(delkey)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key)); + Thread t1 = new Thread(deleteIt); + t1.start(); + Thread.sleep(1000); + txn.abort(); + t1.join(); + assertEquals(OperationStatus.SUCCESS, + deleteIt.getResult()); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + + @Test + public void testDeleteNonDup() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key)); + OperationStatus status = + myDb.get(txn, key, getData, LockMode.DEFAULT); + if (status != OperationStatus.KEYEMPTY && + status != OperationStatus.NOTFOUND) { + fail("invalid Database.get return: " + status); + } + assertEquals(OperationStatus.NOTFOUND, + myDb.delete(txn, key)); + } + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDeleteDup() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + for (int j = 0; j < NUM_DUPS; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + } + txn.commit(); + + txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key)); + OperationStatus status = + myDb.get(txn, key, getData, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + assertEquals(OperationStatus.NOTFOUND, myDb.delete(txn, key)); + } + txn.commit(); + myDb.close(); + close(env); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /* Remove until 14264 is resolved. + public void XXtestDeleteDupWithData() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + for (int j = 0; j < NUM_DUPS; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + } + txn.commit(); + + txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + for (int j = 0; j < NUM_DUPS; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key, data)); + OperationStatus status = + myDb.getSearchBoth(txn, key, data, LockMode.DEFAULT); + if (status != OperationStatus.KEYEMPTY && + status != OperationStatus.NOTFOUND) { + fail("invalid Database.get return"); + } + assertEquals(OperationStatus.NOTFOUND, + myDb.delete(txn, key, data)); + } + } + txn.commit(); + myDb.close(); + env.close(); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + public void XXtestDeleteDupWithSingleRecord() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry getData = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + txn.commit(); + + txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key, data)); + OperationStatus status = + myDb.getSearchBoth(txn, key, data, LockMode.DEFAULT); + if (status != OperationStatus.KEYEMPTY && + status != OperationStatus.NOTFOUND) { + fail("invalid Database.get return"); + } + assertEquals(OperationStatus.NOTFOUND, + myDb.delete(txn, key, data)); + } + txn.commit(); + myDb.close(); + env.close(); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + */ + @Test + public void testPutDuplicate() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + data.setData(TestUtils.getTestArray(i * 2)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testPutNoDupData() + throws Throwable { + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoDupData(txn, key, data)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoDupData(txn, key, data)); + data.setData(TestUtils.getTestArray(i+1)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoDupData(txn, key, data)); + } + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testPutNoOverwriteInANoDupDb() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoOverwrite(txn, key, data)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoOverwrite(txn, key, data)); + } + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testPutNoOverwriteInADupDbTxn() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = NUM_RECS; i > 0; i--) { + Transaction txn1 = env.beginTransaction(null, null); + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoOverwrite(txn1, key, data)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoOverwrite(txn1, key, data)); + data.setData(TestUtils.getTestArray(i << 1)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn1, key, data)); + data.setData(TestUtils.getTestArray(i << 2)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoOverwrite(txn1, key, data)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn1, key)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoOverwrite(txn1, key, data)); + txn1.commit(); + } + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testPutNoOverwriteInADupDbNoTxn() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, false, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoOverwrite(null, key, data)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoOverwrite(null, key, data)); + data.setData(TestUtils.getTestArray(i << 1)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + data.setData(TestUtils.getTestArray(i << 2)); + assertEquals(OperationStatus.KEYEXIST, + myDb.putNoOverwrite(null, key, data)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(null, key)); + assertEquals(OperationStatus.SUCCESS, + myDb.putNoOverwrite(null, key, data)); + } + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDatabaseCount() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + long count = myDb.count(); + assertEquals(NUM_RECS, count); + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDeferredWriteDatabaseCount() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, true, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + } + + long count = myDb.count(); + assertEquals(NUM_RECS, count); + + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testStat() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + BtreeStats stat = (BtreeStats) + myDb.getStats(TestUtils.FAST_STATS); + + assertEquals(0, stat.getInternalNodeCount()); + assertEquals(0, stat.getBottomInternalNodeCount()); + assertEquals(0, stat.getLeafNodeCount()); + assertEquals(0, stat.getDeletedLeafNodeCount()); + assertEquals(0, stat.getMainTreeMaxDepth()); + + stat = (BtreeStats) myDb.getStats(null); + + assertEquals(15, stat.getInternalNodeCount()); + assertEquals(52, stat.getBottomInternalNodeCount()); + assertEquals(NUM_RECS, stat.getLeafNodeCount()); + assertEquals(0, stat.getDeletedLeafNodeCount()); + assertEquals(4, stat.getMainTreeMaxDepth()); + + long[] levelsTest = new long[]{ 12, 23, 34, 45, 56, + 67, 78, 89, 90, 0 }; + + StatGroup group1 = new StatGroup("test1", "test1"); + LongStat stat1 = new LongStat(group1, BTREE_BIN_COUNT, 20); + new LongStat(group1, BTREE_DELETED_LN_COUNT, 40); + LongStat stat2 = new LongStat(group1, BTREE_IN_COUNT, 60); + new LongStat(group1, BTREE_LN_COUNT, 80); + new IntStat(group1, BTREE_MAINTREE_MAXDEPTH, 5); + new LongArrayStat(group1, BTREE_INS_BYLEVEL, levelsTest); + new LongArrayStat(group1, BTREE_BINS_BYLEVEL, levelsTest); + new LongArrayStat(group1, BTREE_BIN_ENTRIES_HISTOGRAM, levelsTest); + + BtreeStats bts = new BtreeStats(); + bts.setDbImplStats(group1); + + assertEquals(20, bts.getBottomInternalNodeCount()); + assertEquals(40, bts.getDeletedLeafNodeCount()); + assertEquals(60, bts.getInternalNodeCount()); + assertEquals(80, bts.getLeafNodeCount()); + assertEquals(5, bts.getMainTreeMaxDepth()); + + for (int i = 0; i < levelsTest.length; i++) { + assertEquals(levelsTest[i], bts.getINsByLevel()[i]); + } + + for (int i = 0; i < levelsTest.length; i++) { + assertEquals(levelsTest[i], bts.getBINsByLevel()[i]); + } + + for (int i = 0; i < levelsTest.length; i++) { + assertEquals(levelsTest[i], bts.getBINEntriesHistogram()[i]); + } + + bts.toString(); + + stat1.set(0L); + stat2.set(0L); + + assertEquals(0, bts.getBottomInternalNodeCount()); + assertEquals(0, bts.getInternalNodeCount()); + bts.toString(); + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDatabaseCountEmptyDB() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + + long count = myDb.count(); + assertEquals(0, count); + + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDatabaseCountWithDeletedEntries() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + int deletedCount = 0; + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + if ((i % 5) == 0) { + myDb.delete(txn, key); + deletedCount++; + } + } + + long count = myDb.count(); + assertEquals(NUM_RECS - deletedCount, count); + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testStatDups() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + for (int j = 0; j < 10; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + } + + BtreeStats stat = (BtreeStats) + myDb.getStats(TestUtils.FAST_STATS); + + assertEquals(0, stat.getInternalNodeCount()); + assertEquals(0, stat.getDuplicateInternalNodeCount()); + assertEquals(0, stat.getBottomInternalNodeCount()); + assertEquals(0, stat.getDuplicateBottomInternalNodeCount()); + assertEquals(0, stat.getLeafNodeCount()); + assertEquals(0, stat.getDeletedLeafNodeCount()); + assertEquals(0, stat.getDupCountLeafNodeCount()); + assertEquals(0, stat.getMainTreeMaxDepth()); + assertEquals(0, stat.getDuplicateTreeMaxDepth()); + + stat = (BtreeStats) myDb.getStats(null); + + assertEquals(383, stat.getInternalNodeCount()); + assertEquals(0, stat.getDuplicateInternalNodeCount()); + assertEquals(771, stat.getBottomInternalNodeCount()); + assertEquals(0, stat.getDuplicateBottomInternalNodeCount()); + assertEquals(2570, stat.getLeafNodeCount()); + assertEquals(0, stat.getDeletedLeafNodeCount()); + assertEquals(0, stat.getDupCountLeafNodeCount()); + assertEquals(7, stat.getMainTreeMaxDepth()); + assertEquals(0, stat.getDuplicateTreeMaxDepth()); + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDatabaseCountDups() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + for (int j = 0; j < 10; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + } + + long count = myDb.count(); + + assertEquals(2570, count); + + txn.commit(); + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDeferredWriteDatabaseCountDups() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true, true, true, true, true, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + for (int j = 0; j < 10; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + } + } + + long count = myDb.count(); + + assertEquals(2570, count); + + myDb.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testStatDeletes() + throws Throwable { + + deleteTestInternal(1, 2, 0, 2); + tearDown(); + deleteTestInternal(2, 2, 2, 2); + tearDown(); + deleteTestInternal(10, 2, 10, 10); + tearDown(); + deleteTestInternal(11, 2, 10, 12); + } + + private void deleteTestInternal(int numRecs, + int numDupRecs, + int expectedLNs, + int expectedDeletedLNs) + throws Throwable { + + runINCompressor = false; + TestUtils.removeLogFiles("Setup", envHome, false); + Database myDb = initEnvAndDb(true, true, true, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = numRecs; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + for (int j = 0; j < numDupRecs; j++) { + data.setData(TestUtils.getTestArray(i + j)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + } + + for (int i = numRecs; i > 0; i -= 2) { + key.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.delete(txn, key)); + } + + txn.commit(); + + BtreeStats stat = (BtreeStats) myDb.getStats(null); + + assertEquals(expectedLNs, stat.getLeafNodeCount()); + assertEquals(expectedDeletedLNs, stat.getDeletedLeafNodeCount()); + assertEquals(0, stat.getDupCountLeafNodeCount()); + + myDb.close(); + close(env); + } + + /** + * Test preload of all records into the main cache. + */ + @Test + public void testPreloadAllInCache() + throws Throwable { + + doPreloadAllInCache(false); + } + + /** + * Test preload of all records into cache, using an off-heap cache. + */ + @Test + public void testPreloadAllInCacheOffHeap() + throws Throwable { + + Assume.assumeTrue(!JVMSystemUtils.ZING_JVM); + + System.out.println("testPreloadAllInCacheOffHeap disabled [#25594]"); + +// doPreloadAllInCache(true); + } + + /** + * Inserts and preloads 100,000 records with key size 100, data size 100, + * no dups. LNs are not embedded. + * + * DbCacheSize requires: + * 3,158,477 env overhead + * + * 1. java DbCacheSize -records 100000 -key 100 -data 100 + * + * 14,567,896 Internal nodes only + * 29,584,536 Internal nodes and leaf nodes + * + * 2. java DbCacheSize -records 100000 -key 100 -data 100 \ + * -offheap -maincache 6558477 + * + * 6,558,477 = 3,400,000 + 3,158,477 + * + * 3,400,000 Internal nodes only: MAIN cache + * 8,828,128 Internal nodes only: OFF-HEAP cache + * 3,400,000 Internal nodes and leaf nodes: MAIN cache + * 21,671,816 Internal nodes and leaf nodes: OFF-HEAP cache + * + * 3. java -XX:-UseCompressedOops DbCacheSize \ # for Zing + * -records 100000 -key 100 -data 100 + * + * 16,157,464 Internal nodes only + * 33,368,152 Internal nodes and leaf nodes + */ + private void doPreloadAllInCache(final boolean useOffHeapCache) + throws Throwable { + + final int nRecs = 100000; + final long mainDataSize; + final long offHeapDataSize; + final long mainDataSizeNoLNs; + final long offHeapDataSizeNoLNs; + + if (JVMSystemUtils.ZING_JVM) { + mainDataSize = 34000000; + offHeapDataSize = 0; + mainDataSizeNoLNs = 17000000; + offHeapDataSizeNoLNs = 0; + } else if (useOffHeapCache) { + mainDataSize = 3400000; + offHeapDataSize = 21800000; + mainDataSizeNoLNs = 3400000; + offHeapDataSizeNoLNs = 8900000; + } else { + mainDataSize = 30000000; + offHeapDataSize = 0; + mainDataSizeNoLNs = 14600000; + offHeapDataSizeNoLNs = 0; + } + + /* Use full size cache to do insertions. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setOffHeapCacheSize(offHeapDataSize); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.STATS_COLLECT, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + env = new Environment(envHome, envConfig); + + TestUtils.adjustCacheSize(env, mainDataSize); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + Database db = env.openDatabase(null, "foo", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int nBins = insert100ByteTestData(db, nRecs); + verify100ByteTestData(db, nRecs); + + /* + * Re-open env with full cache sizes to hold all nodes. + */ + db.close(); + close(env); + env = null; + envConfig.setAllowCreate(false); + dbConfig.setAllowCreate(false); + envConfig.setCacheSize(mainDataSize); + envConfig.setOffHeapCacheSize(offHeapDataSize); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "foo", dbConfig); + + TestUtils.adjustCacheSize(env, mainDataSize); + + /* Expect very little in cache. */ + EnvironmentStats stats = env.getStats(null); + assertEquals(0, stats.getOffHeapCachedBINs()); + assertEquals(0, stats.getOffHeapCachedLNs()); + assertTrue(stats.getNCachedBINs() < 10); + + /* Preload with LNs and expect everything in cache. */ + PreloadConfig preConfig = new PreloadConfig(); + preConfig.setLoadLNs(true); + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Strip LNs and repeat preload. */ + mutateBINs(db, false /*mutateToDelta*/, true /*stripLNs*/); + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Mutate BINs to BIN-deltas and repeat preload. */ + mutateBINs(db, true /*mutateToDelta*/, false /*stripLNs*/); + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Both mutate and strip LNs, and repeat preload. */ + mutateBINs(db, true /*mutateToDelta*/, true /*stripLNs*/); + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Preload with everything already in cache. */ + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Verify final result. */ + verify100ByteTestData(db, nRecs); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* + * Re-open env with "internal nodes only" cache sizes. + */ + db.close(); + close(env); + env = null; + envConfig.setAllowCreate(false); + dbConfig.setAllowCreate(false); + envConfig.setCacheSize(mainDataSizeNoLNs); + envConfig.setOffHeapCacheSize(offHeapDataSizeNoLNs); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "foo", dbConfig); + + TestUtils.adjustCacheSize(env, mainDataSizeNoLNs); + + /* Expect very little in cache. */ + stats = env.getStats(StatsConfig.CLEAR); + assertEquals(0, stats.getOffHeapCachedBINs()); + assertEquals(0, stats.getOffHeapCachedLNs()); + assertTrue(stats.getNCachedBINs() < 10); + + /* Preload without LNs and expect everything except LNs in cache. */ + preConfig.setLoadLNs(false); + db.preload(preConfig); + expectCachedLNsAndBINs(db, 0, nBins); + + /* Mutate BINs to BIN-deltas and repeat preload. */ + mutateBINs(db, true /*mutateToDelta*/, false /*stripLNs*/); + db.preload(preConfig); + expectCachedLNsAndBINs(db, 0, nBins); + + /* Preload with BINs already in cache. */ + db.preload(preConfig); + expectCachedLNsAndBINs(db, 0, nBins); + + /* + * Mutate config to use full cache sizes to hold all nodes. + */ + envConfig.setCacheSize(mainDataSize); + envConfig.setOffHeapCacheSize(offHeapDataSize); + env.setMutableConfig(envConfig); + + /* Preload with LNs and expect everything in cache. */ + preConfig.setLoadLNs(true); + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Preload with everything already in cache. */ + db.preload(preConfig); + expectCachedLNsAndBINs(db, nRecs, nBins); + + /* Verify final result. */ + verify100ByteTestData(db, nRecs); + expectCachedLNsAndBINs(db, nRecs, nBins); + + db.close(); + close(env); + } + + private int insert100ByteTestData(Database db, int nRecs) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + /* + * Count BINs as we insert. Using the nCachedBINs stats to count the + * BINs is unreliable because the LRU will cause some cold level 2 + * INs to be evicted from main, along with their off-heap children. + */ + final Cursor cursor = db.openCursor(null, null); + BIN prevBin = null; + int nBins = 0; + for (int i = 0; i < nRecs; i += 1) { + final byte[] array = get100ByteTestArray(i); + key.setData(array); + data.setData(array); + final OperationResult result = cursor.put( + key, data, Put.NO_OVERWRITE, null); + assertNotNull(result); + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + if (prevBin != bin) { + prevBin = bin; + nBins += 1; + } + } + cursor.close(); + return nBins; + } + + private void verify100ByteTestData(Database db, int nRecs) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + final Cursor cursor = db.openCursor(null, null); + for (int i = 0; i < nRecs; i += 1) { + final OperationResult result = + cursor.get(key, data, Get.NEXT, null); + assertNotNull(result); + final byte[] array = get100ByteTestArray(i); + assertTrue(Arrays.equals(array, key.getData())); + assertTrue(Arrays.equals(array, data.getData())); + } + cursor.close(); + } + + private byte[] get100ByteTestArray(int i) { + final byte[] value = TestUtils.getTestArray(i); + final byte[] array = new byte[100]; + System.arraycopy(value, 0, array, 0, value.length); + return array; + } + + private void mutateBINs(Database db, + boolean mutateToDelta, + boolean stripLNs) { + + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + final OffHeapCache ohCache = dbImpl.getEnv().getOffHeapCache(); + + for (final IN in : dbImpl.getEnv().getInMemoryINs()) { + if (in.getDatabase() != dbImpl) { + continue; + } + in.latch(); + try { + if (in.isBIN()) { + final BIN bin = (BIN) in; + if (stripLNs) { + bin.evictLNs(); + } + if (mutateToDelta && bin.canMutateToBINDelta()) { + bin.mutateToBINDelta(); + } + } else { + for (int i = 0; i < in.getNEntries(); i += 1) { + if (in.getOffHeapBINId(i) < 0) { + continue; + } + if (stripLNs || mutateToDelta) { + ohCache.stripLNs(in, i); + } + if (mutateToDelta) { + ohCache.mutateToBINDelta(in, i); + } + } + } + } finally { + in.releaseLatch(); + } + } + } + + private void expectCachedLNsAndBINs( + final Database db, + final int nExpectedLNs, + final int nExpectedBINs) { + + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + final EnvironmentImpl envImpl = dbImpl.getEnv(); + final OffHeapCache ohCache = envImpl.getOffHeapCache(); + + int nL2INs = 0; + int nMainLNs = 0; + int nMainBINs = 0; + int nMainBINDeltas = 0; + int nOhLNs = 0; + int nOhBINs = 0; + int nOhBINDeltas = 0; + + for (final IN in : dbImpl.getEnv().getInMemoryINs()) { + + /* Be certain that this node is resident. */ + in.latchNoUpdateLRU(); + final boolean resident = in.getInListResident(); + in.releaseLatch(); + + if (!resident) { + System.out.println("Not resident"); + continue; + } + + /* Ensure there are no off-heap objects in other DBs. */ + if (in.isBIN()) { + final BIN bin = (BIN) in; + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.getOffHeapLNId(i) != 0) { + assertSame(dbImpl, in.getDatabase()); + } + } + } else { + for (int i = 0; i < in.getNEntries(); i += 1) { + final int ohBinId = in.getOffHeapBINId(i); + if (ohBinId >= 0) { + assertSame(dbImpl, in.getDatabase()); + } + } + } + + if (in.getDatabase() != dbImpl) { + continue; + } + + if (in.isBIN()) { + nMainBINs += 1; + if (in.isBINDelta(false)) { + nMainBINDeltas += 1; + } + final BIN bin = (BIN) in; + for (int i = 0; i < bin.getNEntries(); i += 1) { + if (bin.getTarget(i) != null) { + nMainLNs += 1; + } + if (bin.getOffHeapLNId(i) != 0) { + nOhLNs += 1; + } + } + } else { + if (in.getNormalizedLevel() != 2) { + continue; + } + nL2INs += 1; + for (int i = 0; i < in.getNEntries(); i += 1) { + final int ohBinId = in.getOffHeapBINId(i); + if (ohBinId < 0) { + assertNotNull(in.getTarget(i)); + continue; + } + assertNull(in.getTarget(i)); + final BIN ohBin = ohCache.loadBIN(envImpl, ohBinId); + assertTrue(ohBin.getLastFullLsn() != DbLsn.NULL_LSN); + nOhBINs += 1; + if (ohBin.isBINDelta(false)) { + nOhBINDeltas += 1; + } + for (int j = 0; j < ohBin.getNEntries(); j += 1) { + if (ohBin.getOffHeapLNId(j) != 0) { + nOhLNs += 1; + } + } + } + } + } + + /* Recalculate using Btree as a double-check. */ + int nL2INs2 = 0; + int nMainBINs2 = 0; + int nMainBINDeltas2 = 0; + int nOhBINs2 = 0; + int nOhBINDeltas2 = 0; + + final IN rootIN = dbImpl.getTree().getResidentRootIN(false); + assertTrue(rootIN.getInListResident()); + assertEquals(3, rootIN.getNormalizedLevel()); + for (int i = 0; i < rootIN.getNEntries(); i += 1) { + final IN in = (IN) rootIN.getTarget(i); + assertNotNull(in); + assertTrue(in.getInListResident()); + nL2INs2 += 1; + for (int j = 0; j < in.getNEntries(); j += 1) { + final int ohBinId = in.getOffHeapBINId(j); + final BIN bin = (BIN) in.getTarget(j); + if (bin != null) { + assertTrue(bin.getInListResident()); + assertEquals(-1, ohBinId); + nMainBINs2 += 1; + if (bin.isBINDelta(false)) { + nMainBINDeltas2 += 1; + } + } else { + assertTrue(ohBinId >= 0); + final BIN ohBin = ohCache.loadBIN(envImpl, ohBinId); + assertTrue(ohBin.getLastFullLsn() != DbLsn.NULL_LSN); + nOhBINs2 += 1; + if (ohBin.isBINDelta(false)) { + nOhBINDeltas2 += 1; + } + } + } +// System.out.println(in2.getClass().getName()); + } + + final String msg = "Cache contents" + + " nL2INs=" + nL2INs + + " nL2INs2=" + nL2INs2 + + " mainBINs=" + nMainBINs + + " mainBINDeltas=" + nMainBINDeltas + + " mainBINs2=" + nMainBINs2 + + " mainBINDeltas2=" + nMainBINDeltas2 + + " offHeapBINs=" + nOhBINs + + " offHeapBINDeltas=" + nOhBINDeltas + + " offHeapBINs2=" + nOhBINs2 + + " offHeapBINDeltas2=" + nOhBINDeltas2 + + " mainLNs=" + nMainLNs + + " offHeapLNs=" + nOhLNs; + + EnvironmentStats stats = env.getStats(null); + try { + assertEquals(msg, nL2INs, nL2INs2); + assertEquals(msg, nOhBINs, nOhBINs2); + assertEquals(msg, nOhBINDeltas, nOhBINDeltas2); + assertEquals(msg, nMainBINs, nMainBINs2); + assertEquals(msg, nMainBINDeltas, nMainBINDeltas2); + assertEquals(msg, nOhBINs, stats.getOffHeapCachedBINs()); + assertEquals(msg, nOhBINDeltas, stats.getOffHeapCachedBINDeltas()); + assertEquals(msg, nOhLNs, stats.getOffHeapCachedLNs()); + } catch (Throwable e) { + System.out.println(stats); + throw e; + } + + assertEquals(msg, 0, nMainBINDeltas); + assertEquals(msg, 0, nOhBINDeltas); + assertEquals(msg, nExpectedLNs, nMainLNs + nOhLNs); + assertEquals(msg, nExpectedBINs, nMainBINs + nOhBINs); + } + + /** + * Test the cache memory limit. + */ + @Test + public void testPreloadCacheMemoryLimit() + throws Throwable { + + /* Set up a test db */ + Database myDb = initEnvAndDb(false, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = 2500; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + /* Recover the database, restart w/no evictor. */ + long postCreateMemUsage = env.getMemoryUsage(); + INList inlist = env.getNonNullEnvImpl().getInMemoryINs(); + long postCreateResidentNodes = inlist.getSize(); + txn.commit(); + myDb.close(); + close(env); + myDb = initEnvAndDb( + true, true, false, true, false, + MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING); + + /* + * Do two evictions, because the first eviction will only strip + * LNs. We need to actually evict BINS because preload only pulls in + * IN/BINs + */ + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + long postEvictMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postEvictResidentNodes = inlist.getSize(); + + /* Now preload, but not up to the full size of the db */ + PreloadConfig conf = new PreloadConfig(); + conf.setMaxBytes(92000); + PreloadStats stats = + myDb.preload(conf); /* Cache size is currently 92160. */ + + assertEquals(PreloadStatus.FILLED_CACHE, stats.getStatus()); + + long postPreloadMemUsage = env.getMemoryUsage(); + long postPreloadResidentNodes = inlist.getSize(); + + /* Now iterate to get everything back into memory */ + Cursor cursor = myDb.openCursor(null, null); + int count = 0; + OperationStatus status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + count++; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + cursor.close(); + + long postIterationMemUsage = env.getMemoryUsage(); + long postIterationResidentNodes = inlist.getSize(); + + if (DEBUG) { + System.out.println("postCreateMemUsage: " + postCreateMemUsage); + System.out.println("postEvictMemUsage: " + postEvictMemUsage); + System.out.println("postPreloadMemUsage: " + postPreloadMemUsage); + System.out.println("postIterationMemUsage: " + + postIterationMemUsage); + System.out.println("postEvictResidentNodes: " + + postEvictResidentNodes); + System.out.println("postPreloadResidentNodes: " + + postPreloadResidentNodes); + System.out.println("postIterationResidentNodes: " + + postIterationResidentNodes); + System.out.println("postCreateResidentNodes: " + + postCreateResidentNodes); + } + + assertTrue(postEvictMemUsage < postCreateMemUsage); + assertTrue(postEvictMemUsage < postPreloadMemUsage); + assertTrue("postPreloadMemUsage=" + postPreloadMemUsage + + " postIterationMemUsage=" + postIterationMemUsage, + postPreloadMemUsage < postIterationMemUsage); + assertTrue(postIterationMemUsage <= postCreateMemUsage); + assertTrue(postEvictResidentNodes < postPreloadResidentNodes); + //assertEquals(postCreateResidentNodes, postIterationResidentNodes); + assertTrue(postCreateResidentNodes >= postIterationResidentNodes); + + VerifyConfig vcfg = new VerifyConfig(); + + vcfg.setPropagateExceptions(true); + vcfg.setAggressive(false); + vcfg.setPrintInfo(true); + vcfg.setShowProgressStream(System.out); + vcfg.setShowProgressInterval(5); + + assertEquals(true, vcfg.getPropagateExceptions()); + assertEquals(false, vcfg.getAggressive()); + assertEquals(true, vcfg.getPrintInfo()); + assertEquals(System.out.getClass(), + vcfg.getShowProgressStream().getClass()); + assertEquals(5, vcfg.getShowProgressInterval()); + vcfg.toString(); + + myDb.close(); + close(env); + } + + /** + * Test the internal memory limit. + */ + @Test + public void testPreloadInternalMemoryLimit() + throws Throwable { + + /* Set up a test db */ + Database myDb = initEnvAndDb(false, true, false, true, false, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + + for (int i = 2500; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, myDb.put(txn, key, data)); + } + + /* Recover the database, restart w/no evictor. */ + long postCreateMemUsage = env.getMemoryUsage(); + INList inlist = env.getNonNullEnvImpl().getInMemoryINs(); + long postCreateResidentNodes = inlist.getSize(); + + txn.commit(); + myDb.close(); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + boolean embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + close(env); + + /* + * Don't run the cleaner if embedded LNs. Otherwise, the cleaner + * admin overhead will tip the mem usage over the cache size, a + * situation that cannot be rectified by the env.evictMemory() calls + * below. As a result, the preload will become a noop and the assertion + * at the end of this test will fail. + */ + myDb = initEnvAndDb( + true, !embeddedLNs /*runCleaner*/, false, true, false, + MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING); + + long postReopenMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postReopenResidentNodes = inlist.getSize(); + + // 26,624 + long minTreeMemUsage = + env.getNonNullEnvImpl().getMemoryBudget().getMinTreeMemoryUsage(); + long currTreeMemUsage = + env.getNonNullEnvImpl().getMemoryBudget().getTreeMemoryUsage(); + + /* + * Do two evictions, because the first eviction will only strip + * LNs. We need to actually evict BINS because preload only pulls in + * IN/BINs + */ + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + long postEvictMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postEvictResidentNodes = inlist.getSize(); + + /* Now preload, but not up to the full size of the db */ + PreloadConfig conf = new PreloadConfig(); + conf.setInternalMemoryLimit(9200); + + PreloadStats stats = myDb.preload(conf); /* Cache size is currently 92160. */ + + if (DEBUG) { + System.out.println(); + System.out.println("postCreateResidentNodes = " + + postCreateResidentNodes); + System.out.println("postCreateMemUsage = " + + postCreateMemUsage); + + System.out.println(); + System.out.println("postReopenResidentNodes = " + + postReopenResidentNodes); + System.out.println("postReopenMemUsage = " + + postReopenMemUsage); + + System.out.println(); + System.out.println("postReopenMinTreeMemUsage = " + + minTreeMemUsage); + System.out.println("postReopenTreeMemUsage = " + + currTreeMemUsage); + + System.out.println(); + System.out.println("postEvictResidentNodes = " + + postEvictResidentNodes); + System.out.println("postEvictMemUsage = " + + postEvictMemUsage); + + System.out.println("NCountMemoryExceeded = " + + stats.getNCountMemoryExceeded()); + System.out.println("Stats : " + stats); + } + + assertTrue(stats.getNCountMemoryExceeded() > 0); + + myDb.close(); + close(env); + } + + @Test + public void testPreloadTimeLimit() + throws Throwable { + + /* Set up a test db */ + Database myDb = initEnvAndDb(false, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = 25000; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(new byte[1]); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + /* Recover the database, restart w/no evictor. */ + long postCreateMemUsage = env.getMemoryUsage(); + INList inlist = env.getNonNullEnvImpl().getInMemoryINs(); + long postCreateResidentNodes = inlist.getSize(); + txn.commit(); + myDb.close(); + close(env); + myDb = initEnvAndDb(true, true, false, true, false, null); + + /* + * Do two evictions, because the first eviction will only strip + * LNs. We need to actually evict BINS because preload only pulls in + * IN/BINs + */ + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + long postEvictMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postEvictResidentNodes = inlist.getSize(); + + /* Now preload, but not up to the full size of the db */ + PreloadConfig conf = new PreloadConfig(); + conf.setMaxMillisecs(50); + PreloadStats stats = myDb.preload(conf); + assertEquals(PreloadStatus.EXCEEDED_TIME, stats.getStatus()); + + long postPreloadMemUsage = env.getMemoryUsage(); + long postPreloadResidentNodes = inlist.getSize(); + + /* Now iterate to get everything back into memory */ + Cursor cursor = myDb.openCursor(null, null); + int count = 0; + OperationStatus status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + count++; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + cursor.close(); + + long postIterationMemUsage = env.getMemoryUsage(); + long postIterationResidentNodes = inlist.getSize(); + + if (DEBUG) { + System.out.println("postCreateMemUsage: " + postCreateMemUsage); + System.out.println("postEvictMemUsage: " + postEvictMemUsage); + System.out.println("postPreloadMemUsage: " + postPreloadMemUsage); + System.out.println("postIterationMemUsage: " + + postIterationMemUsage); + System.out.println("postEvictResidentNodes: " + + postEvictResidentNodes); + System.out.println("postPreloadResidentNodes: " + + postPreloadResidentNodes); + System.out.println("postIterationResidentNodes: " + + postIterationResidentNodes); + System.out.println("postCreateResidentNodes: " + + postCreateResidentNodes); + } + + assertTrue(postEvictMemUsage < postCreateMemUsage); + assertTrue(postEvictMemUsage < postPreloadMemUsage); + assertTrue("postPreloadMemUsage=" + postPreloadMemUsage + + " postIterationMemUsage=" + postIterationMemUsage, + postPreloadMemUsage < postIterationMemUsage); + assertTrue(postIterationMemUsage <= postCreateMemUsage); + assertTrue(postEvictResidentNodes < postPreloadResidentNodes); + //assertEquals(postCreateResidentNodes, postIterationResidentNodes); + assertTrue(postCreateResidentNodes >= postIterationResidentNodes); + + myDb.close(); + close(env); + } + + @Test + public void testPreloadMultipleDatabases() + throws Throwable { + + /* Set up a test db */ + Database myDb1 = initEnvAndDb(false, true, false, true, false, null); + + /* Make a 2nd db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(false); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database myDb2 = env.openDatabase(null, "testDB2", dbConfig); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = 25000; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(new byte[1]); + assertEquals(OperationStatus.SUCCESS, + myDb1.put(txn, key, data)); + assertEquals(OperationStatus.SUCCESS, + myDb2.put(txn, key, data)); + } + + /* Recover the database, restart w/no evictor. */ + long postCreateMemUsage = env.getMemoryUsage(); + INList inlist = env.getNonNullEnvImpl().getInMemoryINs(); + long postCreateResidentNodes = inlist.getSize(); + txn.commit(); + myDb2.close(); + myDb1.close(); + close(env); + myDb1 = initEnvAndDb(true, true, false, true, false, null); + /* Make a 2nd db and open it. */ + dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(false); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + myDb2 = env.openDatabase(null, "testDB2", dbConfig); + + /* + * Do two evictions, because the first eviction will only strip + * LNs. We need to actually evict BINS because preload only pulls in + * IN/BINs + */ + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + long postEvictMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postEvictResidentNodes = inlist.getSize(); + + /* Now preload. */ + PreloadConfig conf = new PreloadConfig(); + PreloadStats stats = env.preload(new Database[] { myDb1, myDb2 }, conf); + + long postPreloadMemUsage = env.getMemoryUsage(); + long postPreloadResidentNodes = inlist.getSize(); + + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + /* Now iterate to get everything back into memory */ + Cursor cursor = myDb1.openCursor(null, null); + int count = 0; + OperationStatus status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + count++; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + cursor.close(); + + cursor = myDb2.openCursor(null, null); + count = 0; + status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + count++; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + cursor.close(); + + long postIterationMemUsage = env.getMemoryUsage(); + long postIterationResidentNodes = inlist.getSize(); + + if (DEBUG) { + System.out.println("postCreateMemUsage: " + postCreateMemUsage); + System.out.println("postEvictMemUsage: " + postEvictMemUsage); + System.out.println("postPreloadMemUsage: " + postPreloadMemUsage); + System.out.println("postIterationMemUsage: " + + postIterationMemUsage); + System.out.println("postEvictResidentNodes: " + + postEvictResidentNodes); + System.out.println("postPreloadResidentNodes: " + + postPreloadResidentNodes); + System.out.println("postIterationResidentNodes: " + + postIterationResidentNodes); + System.out.println("postCreateResidentNodes: " + + postCreateResidentNodes); + } + + assertTrue(postEvictMemUsage < postCreateMemUsage); + assertTrue(postEvictMemUsage < postPreloadMemUsage); + assertTrue("postPreloadMemUsage=" + postPreloadMemUsage + + " postIterationMemUsage=" + postIterationMemUsage, + postPreloadMemUsage == postIterationMemUsage); + assertTrue(postIterationMemUsage <= postCreateMemUsage); + assertTrue(postEvictResidentNodes < postPreloadResidentNodes); + //assertEquals(postCreateResidentNodes, postIterationResidentNodes); + assertTrue(postCreateResidentNodes >= postIterationResidentNodes); + + myDb1.close(); + myDb2.close(); + close(env); + } + + @Test + public void testPreloadWithProgress() + throws Throwable { + + /* Set up a test db */ + Database myDb = initEnvAndDb(false, true, false, true, false, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + + for (int i = 2500; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + /* Recover the database, restart w/no evictor. */ + long postCreateMemUsage = env.getMemoryUsage(); + INList inlist = env.getNonNullEnvImpl().getInMemoryINs(); + long postCreateResidentNodes = inlist.getSize(); + + txn.commit(); + myDb.close(); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + boolean embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + close(env); + + /* + * Don't run the cleaner if embedded LNs. Otherwise, the cleaner + * admin overhead will tip the mem usage over the cache size, a + * situation that cannot be rectified by the env.evictMemory() calls + * below. As a result, the preload will become a noop and the assertion + * at the end of this test will fail. + */ + myDb = initEnvAndDb( + true, !embeddedLNs/*runCleaner*/, false, true, false, + MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING); + + /* + * Do two evictions, because the first eviction will only strip + * LNs. We need to actually evict BINS because preload only pulls in + * IN/BINs + */ + env.evictMemory(); // first eviction strips LNS. + env.evictMemory(); // second one will evict BINS + + long postEvictMemUsage = env.getMemoryUsage(); + inlist = env.getNonNullEnvImpl().getInMemoryINs(); // re-get inList + long postEvictResidentNodes = inlist.getSize(); + + /* Now preload, but not up to the full size of the db */ + PreloadConfig conf = new PreloadConfig(); + + conf.setProgressListener( + new ProgressListener() { + public boolean progress(PreloadConfig.Phases operation, + long n, + long total) { + if (n % 10 == 0) { + throw new RuntimeException("Stop it"); + } + return true; + } + }); + PreloadStats stats = null; + try { + stats = myDb.preload(conf); + fail("expected RE"); + } catch (RuntimeException RE) { + // Expect RuntimeException + } + + conf.setProgressListener(new ProgressListener() { + public boolean progress(PreloadConfig.Phases operation, + long n, + long total) { + if (n % 10 == 0) { + return false; + } + return true; + } + }); + try { + stats = myDb.preload(conf); + } catch (RuntimeException RE) { + fail("unexpected RE"); + } + + assertEquals(PreloadStatus.USER_HALT_REQUEST, stats.getStatus()); + + myDb.close(); + close(env); + } + + /** + * Load the entire database with preload. + */ + @Test + public void testPreloadEntireDatabase() + throws Throwable { + + /* Create a test db with one record */ + Database myDb = initEnvAndDb(false, true, false, false, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + assertEquals(OperationStatus.SUCCESS, myDb.put(null, key, data)); + + /* Close and reopen. */ + myDb.close(); + close(env); + myDb = initEnvAndDb(false, true, false, false, false, null); + + /* + * Preload the entire database. In JE 2.0.54 this would cause a + * NullPointerException. + */ + PreloadConfig conf = new PreloadConfig(); + conf.setMaxBytes(100000); + myDb.preload(conf); + + myDb.close(); + close(env); + } + + /** + * Test preload(N, 0) where N > cache size (throws IllArgException). + */ + @Test + public void testPreloadBytesExceedsCache() + throws Throwable { + + /* Create a test db with one record */ + Database myDb = + initEnvAndDb(false, true, false, false, false, "100000"); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + assertEquals(OperationStatus.SUCCESS, myDb.put(null, key, data)); + + /* Close and reopen. */ + myDb.close(); + close(env); + myDb = initEnvAndDb(false, true, false, false, false, "100000"); + + /* maxBytes > cache size. Should throw IllegalArgumentException. */ + try { + PreloadConfig conf = new PreloadConfig(); + conf.setMaxBytes(Integer.MAX_VALUE); + myDb.preload(conf); + fail("should have thrown IAE"); + } catch (IllegalArgumentException IAE) { + } + + myDb.close(); + close(env); + } + + @Test + public void testPreloadNoLNs() + throws Throwable { + + Database myDb = initEnvAndDb(false, true, false, true, false, null); + + final boolean embeddedLNs = + (DbInternal.getNonNullEnvImpl(env).getMaxEmbeddedLN() >= 1); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1000; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(new byte[1]); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + } + + /* Do not load LNs. */ + PreloadConfig conf = new PreloadConfig(); + conf.setLoadLNs(false); + PreloadStats stats = myDb.preload(conf); + assertEquals(0, stats.getNLNsLoaded()); + assertEquals(embeddedLNs ? 1000 : 0, stats.getNEmbeddedLNs()); + + /* Load LNs. */ + conf.setLoadLNs(true); + stats = myDb.preload(conf); + assertEquals(embeddedLNs ? 0 : 1000, stats.getNLNsLoaded()); + assertEquals(embeddedLNs ? 1000 : 0, stats.getNEmbeddedLNs()); + + myDb.close(); + close(env); + } + + /** + * Test preload with BIN-deltas in cache, for example, when BINs are + * mutated by the evictor, or after a crash and recovery. The latter is + * more likely, since preload is normally performed right after opening + * the Environment as when the bug was discovered [#24565]. + */ + @Test + public void testPreloadWithBINDeltasInCache() + throws Throwable { + + /* + * At first, with log file corruption verification code and btree + * verification code, this test can pass. + * + * After I add some code to sleep some time after checking each file + * in log file corruption code, "Failed adding new IN node=7 + * dbIdentity=284751250... Existing IN node=7 dbIdentity=893504292" + * begin appear. I think this is because the sleep in log file + * verification delay the execution of BtreeVerifier and cause + * BtreeVerifier report above issue. + * + * Consider this test manually operate the INList and do the preload, + * I think above issue may happen. So now I just choose to disbale + * BtreeVerifier for this test. + */ + runBtreeVerifier = false; + Database myDb = initEnvAndDb(false, false, false, true, false, null); + + final Random rnd = new Random(123); + int nRecs = 0; + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Write using random keys to enable logging deltas. */ + for (int i = 0; i < 500; i += 1) { + key.setData(TestUtils.getTestArray(rnd.nextInt(5000))); + data.setData(new byte[4]); + + if (OperationStatus.SUCCESS == + myDb.putNoOverwrite(null, key, data)) { + nRecs += 1; + } + } + + /* The checkpoint will log some deltas. */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * To create deltas in cache, it is easier to mutate the BINs + * explicitly than to truncate the log before CkptEnd and recover. + */ + for (final IN in : + DbInternal.getNonNullEnvImpl(env).getInMemoryINs()) { + + if (in.isBIN()) { + final BIN bin = (BIN) in; + bin.latch(); + if (bin.canMutateToBINDelta()) { + bin.mutateToBINDelta(); + } + bin.releaseLatch(); + } + } + +// System.out.println( +// "nDeltas=" + env.getStats(null).getNCachedBINDeltas()); + + final PreloadConfig conf = new PreloadConfig(); + conf.setLoadLNs(true); + final PreloadStats stats = myDb.preload(conf); + + assertEquals(nRecs, stats.getNLNsLoaded() + stats.getNEmbeddedLNs()); + +// System.out.println("count " + myDb.count()); + + myDb.close(); + close(env); + } + + /** + * Tests a fix for a bug that caused eviction of previously cached nodes + * when scanning with CacheMode.UNCHANGED after a preload. [#24629] + */ + @Test + public void testUseUnchangedModeAfterPreload() + throws Throwable { + + /* + * This test case check the equality of the total amount of JE cache + * in use before and after some Read operations with + * CacheMode.UNCHANGED. But the BtreeVerifier will influence the + * cache size, such as the traversal of the NameDb, the call of + * dbTree.getDb and so on, so BtreeVerifier is disabled for this + * test case. + */ + runBtreeVerifier = false; + + /* Create a test db with one record */ + Database myDb = initEnvAndDb(false, false, false, true, false, null); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final int nRecs = 10000; + data.setData(new byte[1000]); + + final Transaction txn = env.beginTransaction(null, null); + + for (int i = 0; i < nRecs; i += 1) { + + IntegerBinding.intToEntry(i, key); + + assertEquals( + OperationStatus.SUCCESS, + myDb.putNoOverwrite(txn, key, data)); + } + + txn.commit(); + + /* + * Test with and without loading LNs. + */ + for (boolean loadLNs : new boolean[] { false, true }) { + + myDb.close(); + close(env); + myDb = initEnvAndDb(false, false, false, true, false, null); + + myDb.preload(new PreloadConfig().setLoadLNs(loadLNs)); + + long cacheSizeBeforeScan = env.getStats(null).getCacheTotalBytes(); + + data.setPartial(!loadLNs); + + try (final Cursor c = myDb.openCursor(null, null)) { + c.setCacheMode(CacheMode.UNCHANGED); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + } + } + + long cacheSizeAfterScan = env.getStats(null).getCacheTotalBytes(); + + assertEquals( + "loadLNs " + loadLNs, + cacheSizeBeforeScan, cacheSizeAfterScan); + } + + myDb.close(); + close(env); + } + + @Test + public void testDbClose() + throws Throwable { + + /* Set up a test db */ + Database myDb = initEnvAndDb(false, true, false, true, false, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + for (int i = 2500; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + /* Create a cursor, use it, then close db without closing cursor. */ + Cursor cursor = myDb.openCursor(txn, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, LockMode.DEFAULT)); + + try { + myDb.close(); + fail("didn't throw IllegalStateException for unclosed cursor"); + } catch (IllegalStateException e) { + } + + try { + txn.commit(); + fail("didn't throw IllegalStateException for uncommitted " + + "transaction"); + } catch (IllegalStateException e) { + } + + close(env); + } + + /** + * Checks that a DatabasePreemptedException is thrown after database handle + * has been forcibly closed by an HA database naming operation (rename, + * remove, truncate). [#17015] + */ + @Test + public void testDbPreempted() { + doDbPreempted(false /*useTxnForDbOpen*/, + false /*accessDbAfterPreempted*/); + + doDbPreempted(false /*useTxnForDbOpen*/, + true /*accessDbAfterPreempted*/); + + doDbPreempted(true /*useTxnForDbOpen*/, + false /*accessDbAfterPreempted*/); + + doDbPreempted(true /*useTxnForDbOpen*/, + true /*accessDbAfterPreempted*/); + } + + private void doDbPreempted(boolean useTxnForDbOpen, + boolean accessDbAfterPreempted) { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* DatabasePreemptedException is thrown only if replicated. */ + if (!envImpl.isReplicated()) { + env.close(); + return; + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[1]); + + /* Create databases and write one record. */ + Database db1 = env.openDatabase(null, "db1", dbConfig); + OperationStatus status = db1.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + db1.close(); + Database db2 = env.openDatabase(null, "db2", dbConfig); + status = db2.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + db2.close(); + + /* Open databases for reading. */ + Transaction txn = env.beginTransaction(null, null); + dbConfig.setAllowCreate(false); + db1 = env.openDatabase(useTxnForDbOpen ? txn : null, "db1", dbConfig); + db2 = env.openDatabase(useTxnForDbOpen ? txn : null, "db2", dbConfig); + + Cursor c1 = db1.openCursor(txn, null); + Cursor c2 = db2.openCursor(txn, null); + + /* Read one record in each. */ + status = c1.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + status = c2.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + /* + * Use an importunate txn (also used by the HA replayer) to perform a + * removeDatabase, which will steal the database handle lock and + * invalidate the database handle. + */ + Transaction importunateTxn = env.beginTransaction(null, null); + DbInternal.getTxn(importunateTxn).setImportunate(true); + env.removeDatabase(importunateTxn, "db1"); + importunateTxn.commit(); + + if (useTxnForDbOpen) { + try { + status = c2.getSearchKey(key, data, null); + fail(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + } else { + status = c2.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + } + + if (accessDbAfterPreempted) { + try { + status = c1.getSearchKey(key, data, null); + fail(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + try { + c1.dup(true); + fail(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + try { + status = db1.get(txn, key, data, null); + fail(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + try { + db1.openCursor(txn, null); + fail(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + } + + c1.close(); + c2.close(); + + if (useTxnForDbOpen || accessDbAfterPreempted) { + try { + txn.commit(); + } catch (DatabasePreemptedException expected) { + assertSame(db1, expected.getDatabase()); + assertEquals("db1", expected.getDatabaseName()); + } + txn.abort(); + } else { + txn.commit(); + } + db1.close(); + db2.close(); + + env.close(); + } + + /** + * Ensure that Database.close is allowed (no exception is thrown) after + * aborting the txn that opened the Database. + */ + @Test + public void testDbOpenAbortWithDbClose() { + doDbOpenAbort(true /*withDbClose*/); + } + + /** + * Ensure that Database.close is not required (lack of close does not cause + * a leak) after aborting the txn that opened the Database. + */ + @Test + public void testDbOpenAbortNoDbClose() { + doDbOpenAbort(false /*withDbClose*/); + } + + /** + * Opens (creates) a database with a txn, then aborts that txn. Optionally + * closes the database handle after the abort. + */ + private void doDbOpenAbort(boolean withDbClose) { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + env = new Environment(envHome, envConfig); + Transaction txn = env.beginTransaction(null, null); + Database db = env.openDatabase(txn, "testDB", dbConfig); + Cursor c = db.openCursor(txn, null); + OperationStatus status = c.put(new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[1])); + assertSame(OperationStatus.SUCCESS, status); + + c.close(); + txn.abort(); + if (withDbClose) { + db.close(); + } + env.close(); + } + + @Test + public void testDbCloseUnopenedDb() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + Database myDb = new Database(env); + try { + myDb.close(); + } catch (DatabaseException DBE) { + fail("shouldn't catch DatabaseException for closing unopened db"); + } + env.close(); + } + + /** + * Test that open cursor isn't possible on a closed database. + */ + @Test + public void testOpenCursor() + throws DatabaseException { + + Database db = initEnvAndDb(true, true, false, true, false, null); + Cursor cursor = db.openCursor(null, null); + cursor.close(); + db.close(); + try { + db.openCursor(null, null); + fail("Should throw exception because database is closed"); + } catch (IllegalStateException expected) { + close(env); + } + } + + /** + * Test that openCursor throws IllegalStateException after invalidating and + * closing the environment. [#23083] + */ + @Test + public void testOpenCursorAfterEnvInvalidation() { + + final Database db = initEnvAndDb(true, true, false, true, false, null); + final Transaction txn = env.beginTransaction(null, null); + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.invalidate(EnvironmentFailureException.unexpectedState(envImpl)); + close(env); + try { + db.openCursor(txn, null); + fail("Should throw exception because env is closed"); + } catch (IllegalStateException expected) { + } + } + + @Test + public void testBufferOverflowingPut() + throws Throwable { + + try { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + //envConfig.setConfigParam("je.log.totalBufferBytes", "5000"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database myDb = env.openDatabase(null, "testDB", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[10000000]); + try { + key.setData(TestUtils.getTestArray(10)); + myDb.put(null, key, data); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + myDb.close(); + env.close(); + env = null; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Check that the handle lock is not left behind when a non-transactional + * open of a primary DB fails while populating the secondary. [#15558] + * @throws Exception + */ + @Test + public void testFailedNonTxnDbOpen() + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + DatabaseConfig priConfig = new DatabaseConfig(); + priConfig.setAllowCreate(true); + Database priDb = env.openDatabase(null, "testDB", priConfig); + + priDb.put(null, new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[2])); + + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(true); + secConfig.setAllowPopulate(true); + /* Use priDb as foreign key DB for ease of testing. */ + secConfig.setForeignKeyDatabase(priDb); + secConfig.setKeyCreator(new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData + (data.getData(), data.getOffset(), data.getSize()); + return true; + } + }); + try { + env.openSecondaryDatabase(null, "testDB2", priDb, secConfig); + fail(); + } catch (DatabaseException e) { + /* Fails because [0,0] does not exist as a key in priDb. */ + assertTrue(e.toString(), + e.toString().indexOf("foreign key not allowed") > 0); + } + + priDb.close(); + env.close(); + env = null; + } + + EnvironmentConfig getEnvironmentConfig( + boolean dontRunEvictor, + boolean runCleaner, + boolean transactional, + String memSize) + throws IllegalArgumentException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(transactional); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_CHECK_LEAKS, "false"); + + envConfig.setConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, "6"); + + envConfig.setConfigParam( + EnvironmentConfig.NODE_DUP_TREE_MAX_ENTRIES, "6"); + + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + + if (!runCleaner) { + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + } + + if (dontRunEvictor) { + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + + /* + * Don't let critical eviction run or it will interfere with the + * preload test. + */ + envConfig.setConfigParam( + EnvironmentConfig.EVICTOR_CRITICAL_PERCENTAGE, "500"); + } + + if (memSize != null) { + envConfig.setConfigParam(EnvironmentConfig.MAX_MEMORY, memSize); + } + + if (!runBtreeVerifier) { + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + } + + if (!runINCompressor) { + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + } + + envConfig.setAllowCreate(true); + return envConfig; + } + + /** + * Set up the environment and db. + */ + private Database initEnvAndDb( + boolean dontRunEvictor, + boolean runCleaner, + boolean allowDuplicates, + boolean transactional, + boolean deferredWrite, + String memSize) + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvironmentConfig(dontRunEvictor, + runCleaner, + transactional, + memSize); + env = create(envHome, envConfig); + + /* Make a db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(allowDuplicates); + dbConfig.setAllowCreate(true); + if (!deferredWrite) { + dbConfig.setTransactional(transactional); + } + dbConfig.setDeferredWrite(deferredWrite); + Database myDb = env.openDatabase(null, "testDB", dbConfig); + return myDb; + } + + /** + * X'd out because this is expected to be used in the debugger to set + * specific breakpoints and step through in a synchronous manner. + */ + private Database pNOCDb; + + public void XXtestPutNoOverwriteConcurrently() + throws Throwable { + + pNOCDb = initEnvAndDb(true, true, true, true, false, null); + JUnitThread tester1 = + new JUnitThread("testNonBlocking1") { + @Override + public void testBody() { + try { + Transaction txn1 = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(1)); + OperationStatus status = + pNOCDb.putNoOverwrite(txn1, key, data); + txn1.commit(); + System.out.println("thread1: " + status); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testNonBlocking2") { + @Override + public void testBody() { + try { + Transaction txn2 = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(1)); + data.setData(TestUtils.getTestArray(2)); + OperationStatus status = + pNOCDb.putNoOverwrite(txn2, key, data); + txn2.commit(); + System.out.println("thread2: " + status); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + } + + /** + * Ensure that close/abort methods can be called without an exception when + * the Environment is invalid, if they were closed earlier. [#21264] + */ + @Test + public void testCloseWithInvalidEnv() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + + Transaction txn = env.beginTransaction(null, null); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = env.openDatabase(txn, "testDB", dbConfig); + + Cursor cursor = db.openCursor(txn, null); + + cursor.close(); + db.close(); + txn.commit(); + + /* Invalidate env. */ + EnvironmentFailureException.unexpectedState(env.getNonNullEnvImpl()); + + try { + cursor.close(); + db.close(); + txn.abort(); + } catch (RuntimeException e) { + e.printStackTrace(); + fail("Close/abort with invalid/closed env shouldn't fail."); + } + + /* + * abnormalClose is used simply to avoid rep (dual) checks that don't + * work with an invalidated environment. + */ + abnormalClose(env); + } + + class DeleteIt implements Runnable { + int key; + Database db; + Environment env; + OperationStatus retstat = null; + int waitEventPre; + int waitEventPost; + long waittime; + + DeleteIt(int key, + Environment env, + Database db) { + this.key = key; + this.db = db; + this.env = env; + } + + public void run() { + while (!doWork()); + } + + private boolean doWork() { + boolean done = false; + DatabaseEntry keye = new DatabaseEntry(); + keye.setData(TestUtils.getTestArray(key)); + while (!done) { + Transaction xact = env.beginTransaction(null, null); + try { + retstat = db.delete(xact, keye); + } catch (LockConflictException e) { + fail("deadlock occured but not expected."); + } + + if (xact != null) { + xact.commit(); + done = true; + } + } + return done; + } + + public OperationStatus getResult() { + return retstat; + } + } +} diff --git a/test/com/sleepycat/je/DbHandleLockTest.java b/test/com/sleepycat/je/DbHandleLockTest.java new file mode 100644 index 0000000..8162733 --- /dev/null +++ b/test/com/sleepycat/je/DbHandleLockTest.java @@ -0,0 +1,213 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * BDB's transactional DDL operations (database creation, truncation, + * remove and rename) need special support through what we call "handle" locks. + * + * When a database is created, a write lock is taken. When the creation + * transaction is committed, that write lock should be turned into a read lock + * and should be transferred to the database handle. + * + * Note that when this test is run in HA mode, environment creation results in + * a different number of outstanding locks. And example of a HA specific lock + * is that taken for the RepGroupDb, which holds replication group information. + * Because of that, this test takes care to check for a relative number of + * locks, rather than an absolute number. + */ +public class DbHandleLockTest extends DualTestCase { + private File envHome; + private Environment env; + + public DbHandleLockTest() { + envHome = SharedTestUtils.getTestDir(); + } + + public void openEnv() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + } + + @Test + public void testOpenHandle() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + Transaction txnA = + env.beginTransaction(null, TransactionConfig.DEFAULT); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + LockStats oldLockStat = env.getLockStats(null); + + Database db = env.openDatabase(txnA, "foo", dbConfig); + + /* + * At this point, we expect a write lock on the NameLN by txnA, and + * a read lock on the NameLN by the handle locker. + */ + LockStats lockStat = env.getLockStats(null); + assertEquals(oldLockStat.getNTotalLocks() + 1, + lockStat.getNTotalLocks()); + assertEquals(oldLockStat.getNWriteLocks() + 1, + lockStat.getNWriteLocks()); + assertEquals(oldLockStat.getNReadLocks() + 1, + lockStat.getNReadLocks()); + + txnA.commit(); + + lockStat = env.getLockStats(null); + assertEquals(oldLockStat.getNTotalLocks() + 1, + lockStat.getNTotalLocks()); + assertEquals(oldLockStat.getNWriteLocks(), + lockStat.getNWriteLocks()); + assertEquals(oldLockStat.getNReadLocks() + 1, + lockStat.getNReadLocks()); + + /* Updating the root from another txn should be possible. */ + insertData(10, db); + db.close(); + + lockStat = env.getLockStats(null); + assertEquals(oldLockStat.getNTotalLocks(), + lockStat.getNTotalLocks()); + assertEquals(oldLockStat.getNWriteLocks(), + lockStat.getNWriteLocks()); + assertEquals(oldLockStat.getNReadLocks(), + lockStat.getNReadLocks()); + close(env); + env = null; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR12068() + throws Throwable { + + try { + openEnv(); + Transaction txnA = + env.beginTransaction(null, TransactionConfig.DEFAULT); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(txnA, "foo", dbConfig); + db.close(); + + dbConfig.setExclusiveCreate(true); + try { + db = env.openDatabase(txnA, "foo", dbConfig); + fail("should throw database exeception"); + } catch (DatabaseException DE) { + /* expected Database already exists. */ + } + dbConfig.setAllowCreate(false); + dbConfig.setExclusiveCreate(false); + db = env.openDatabase(txnA, "foo", dbConfig); + db.close(); + txnA.commit(); + txnA = env.beginTransaction(null, TransactionConfig.DEFAULT); + env.removeDatabase(txnA, "foo"); + txnA.commit(); + close(env); + env = null; + } catch (Throwable T) { + T.printStackTrace(); + throw T; + } + } + + private void insertData(int numRecs, Database db) + throws Throwable { + + for (int i = 0; i < numRecs; i++) { + DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(i)); + DatabaseEntry data = new DatabaseEntry(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + } + } + + /** + * Ensures that handle locks are released on the old LSN when a NameLN is + * migrated by the cleaner. [#20617] + */ + @Test + public void testReleaseHandleLocks() { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + final int nDbs = 500; + final int dataSize = 100000; + final Database[] handles = new Database[nDbs]; + + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + + for (int i = 0; i < nDbs; i += 1) { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + handles[i] = env.openDatabase(null, "db" + i, dbConfig); + final Database db = handles[i]; + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + if (i < nDbs - 100) { + status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + } + } + env.cleanLog(); + env.checkpoint(new CheckpointConfig().setForce(true)); + + final EnvironmentStats stats = env.getStats(null); + assertTrue(String.valueOf(stats.getNReadLocks()), + stats.getNReadLocks() < (nDbs * 2)); + for (final Database db : handles) { + db.close(); + } + close(env); + } +} diff --git a/test/com/sleepycat/je/DbTestProxy.java b/test/com/sleepycat/je/DbTestProxy.java new file mode 100644 index 0000000..cf6f56d --- /dev/null +++ b/test/com/sleepycat/je/DbTestProxy.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import com.sleepycat.je.dbi.CursorImpl; + +/** + * DbTestProxy is for internal use only. It serves to shelter methods that must + * be public to be used by JE unit tests that but are not part of the + * public api available to applications. + */ +public class DbTestProxy { + /** + * Proxy to Cursor.getCursorImpl + */ + public static CursorImpl dbcGetCursorImpl(Cursor dbc) { + return dbc.getCursorImpl(); + } +} diff --git a/test/com/sleepycat/je/DirtyReadTest.java b/test/com/sleepycat/je/DirtyReadTest.java new file mode 100644 index 0000000..9281db1 --- /dev/null +++ b/test/com/sleepycat/je/DirtyReadTest.java @@ -0,0 +1,216 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; + +import org.junit.Test; + +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Check that the Database and Cursor classes properly use read-uncommitted + * when specified. + */ +public class DirtyReadTest extends DualTestCase { + private final File envHome; + private Environment env; + + public DirtyReadTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testReadUncommitted() + throws Throwable { + + Database db = null; + Transaction txnA = null; + Cursor cursor = null; + try { + /* Make an environment, a db, insert a few records */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* Now open for real, insert a record */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + + StringDbt key = new StringDbt("key1"); + StringDbt data = new StringDbt("data1"); + txnA = env.beginTransaction(null, TransactionConfig.DEFAULT); + OperationStatus status = db.put(txnA, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + /* + * txnA should have a write lock on this record. Now try to read it + * with read-uncommitted. + */ + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + Cursor nonTxnCursor = + db.openCursor(null, CursorConfig.DEFAULT); + try { + nonTxnCursor.getSearchKey + (key, foundData, LockMode.READ_UNCOMMITTED); + nonTxnCursor.getSearchKey + (key, foundData, LockMode.READ_UNCOMMITTED_ALL); + + /* + * Make sure we get a deadlock exception without + * read-uncommitted. + */ + try { + nonTxnCursor.getSearchKey + (key, foundData, LockMode.DEFAULT); + fail("Should throw LockConflict if non-txnl, " + + "non-readUnc."); + } catch (LockConflictException expected) { + } + } finally { + nonTxnCursor.close(); + } + + /* + * Make sure we get a deadlock exception without read-uncommitted. + */ + try { + db.get(null, key, foundData, LockMode.DEFAULT); + fail("Should deadlock"); + } catch (LockConflictException e) { + } + + /* + * Specify read-uncommitted as a lock mode. + */ + status = db.get(null, key, foundData, LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + status = db.get( + null, key, foundData, LockMode.READ_UNCOMMITTED_ALL); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + status = db.getSearchBoth + (null, key, data, LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + + status = db.getSearchBoth + (null, key, data, LockMode.READ_UNCOMMITTED_ALL); + assertEquals(OperationStatus.SUCCESS, status); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = txnA; + } + + cursor = db.openCursor(txn, CursorConfig.DEFAULT); + + status = cursor.getFirst( + foundKey, foundData, LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(key.getData(), foundKey.getData())); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + status = cursor.getFirst( + foundKey, foundData, LockMode.READ_UNCOMMITTED_ALL); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(key.getData(), foundKey.getData())); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + cursor.close(); + + /* + * Specify read-uncommitted through a read-uncommitted txn. + */ + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setReadUncommitted(true); + Transaction readUncommittedTxn = + env.beginTransaction(null, txnConfig); + + status = db.get + (readUncommittedTxn, key, foundData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + status = db.getSearchBoth + (readUncommittedTxn, key, data,LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + + cursor = db.openCursor(readUncommittedTxn, CursorConfig.DEFAULT); + status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(key.getData(), foundKey.getData())); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + cursor.close(); + readUncommittedTxn.abort(); + + /* + * Specify read-uncommitted through a read-uncommitted cursor + */ + if (DualTestCase.isReplicatedTest(getClass())) { + txn = txnA; + } else { + txn = null; + } + + CursorConfig cursorConfig = new CursorConfig(); + cursorConfig.setReadUncommitted(true); + cursor = db.openCursor(txn, cursorConfig); + status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(Arrays.equals(key.getData(), foundKey.getData())); + assertTrue(Arrays.equals(data.getData(), foundData.getData())); + + /* + * Open through the compatiblity method, should accept dirty + * read (but ignores it) + */ + // Database compatDb = new Database(env); + // compatDb.open(null, null, "foo", DbConstants.DB_BTREE, + // DbConstants.DB_DIRTY_READ, DbConstants.DB_UNKNOWN); + // compatDb.close(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (cursor != null) { + cursor.close(); + } + + if (txnA != null) { + txnA.abort(); + } + + if (db != null) { + db.close(); + } + close(env); + } + } +} diff --git a/test/com/sleepycat/je/DupSlotReuseTest.java b/test/com/sleepycat/je/DupSlotReuseTest.java new file mode 100644 index 0000000..aa2bd1c --- /dev/null +++ b/test/com/sleepycat/je/DupSlotReuseTest.java @@ -0,0 +1,395 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Reproduces a bug [#18937] where the abortLsn is set incorrectly after a slot + * is reused, and verifies the fix. + * + * Problem scenario is as follows. LN-X-Y is used to mean an LN with key X and + * data Y. + * + * 1/100 LN-A-1, inserted, txn 1 + * 1/200 Commit, txn 1 + * 2/100 LN-A-1, deleted, txn 2 + * 2/200 LN-A-2, inserted, txn 2 + * 2/300 LN-A-3, inserted, txn 2 + * 2/400 Abort, txn 2 + * 2/500 DBIN with one entry, mainKey:A, dbinKey:2, data:1, lsn:1/100 + * + * The DBIN's key is wrong. It is 2 and should be 1. The symptoms are: + * + * + A user Btree lookup (e.g., getSearchBoth) on A-2 will find the record, + * but should not; the data returned is 1. + * + * + A lookup on A-1 will return NOTFOUND, but should return the record. + * + * + When log file 1 is cleaned, the cleaner will not migrate 1/100 because it + * can't find A-1 in the Btree and assumes it is obsolete. After the file + * is deleted, the user will get LogFileNotFound when accessing the record + * in the DBIN. + * + * In the user's log there is no FileSummaryLN indicating that LSN 1/100 is + * obsolete, implying that the cleaner did a Btree lookup and did not find A-1. + * + * Cause: The insertion of LN-A-2 incorrectly reuses the BIN slot for LN-A-1, + * because we don't check the data (we only check the key) when reusing a slot + * in a BIN, even though the database is configured for duplicates. + * + * After 1/100: + * BIN + * | + * LN-A-1 + * + * After 2/100: + * BIN + * | + * LN-A-1 (deleted) + * + * After 2/200: + * BIN + * | + * LN-A-2 + * + * After 2/300: + * BIN + * | + * DIN + * | + * DBIN + * / \ + * LN-A-2 LN-A-3 + * + * The problem in the last two pictures is that the DBIN slot for LN-A-2 has + * the wrong abortLsn: 1/100. + * + * After 2/400: + * BIN + * | + * DIN + * | + * DBIN + * | + * LN-A-1 (with wrong key: LN-A-2) + * + * Now we have the situation that causes the symptoms listed above. + * + * The problem only occurs if: + * + the prior committed version of the record is not deleted, + * + a txn deletes the sole member of a dup set, then adds at least two more + * members of that dup set with different data, then aborts. + * + * The fix is to create the dup tree at 2/300, rather than reusing the slot. + * The new rule is, in a duplicates database, a slot cannot be reused if the + * new data and old data are not equal. Unequal data means logically different + * records, and slot reuse isn't appropriate. + * + * With the fix after 2/200: + * BIN + * | + * DIN + * | + * DBIN + * / \ + * LN-A-1 (deleted) LN-A-2 + * + * With the fix after 2/300: + * BIN + * | + * DIN + * | + * DBIN + * / \ \ + * LN-A-1 (deleted) LN-A-2 LN-A-3 + * + * With the fix after 2/400: + * BIN + * | + * DIN + * | + * DBIN + * | + * LN-A-1 (with correct key) + * + * I don't believe a problem occurs when the txn commits rather than aborts. + * The abortLsn will be wrong (with the bug), but that won't cause harm. The + * abortLsn is counted obsolete during commit, but that happens to be correct. + * + * And I don't believe a problem occurs when the prior version of the record is + * deleted, as below. + * + * 1/100 LN-A-1, deleted, txn 1 + * 1/200 Commit, txn 1 + * 2/200 LN-A-2, inserted, txn 2 + * 2/300 LN-A-3, inserted, txn 2 + * 2/400 Abort, txn 2 + * 2/500 DBIN with one entry, mainKey:A, dbinKey:2, lsn:1/100, KD:true + * + * With the bug, the end result is a slot with a DBIN key and LSN that should + * not go together. But since the KD flag will be set on the slot, we'll never + * try to use the LSN for a user operation. So I think it's safe. + * + * This last fact is leveraged in the bug fix. See testDiffTxnAbort, which + * tests the case above and references the relevant part of the bug fix. + */ +public class DupSlotReuseTest extends TestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private File envHome; + private Environment env; + private Database db; + + public DupSlotReuseTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + db = null; + env = null; + envHome = null; + } + + private void openEnv() { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setTransactional(true); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + env = new Environment(envHome, config); + + openDb(); + } + + private void openDb() { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + private void closeEnv() { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + * Tests the failure reported by the user in [#18937]. + */ + @Test + public void testSameTxnAbort() { + + openEnv(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + OperationStatus status; + + /* Put {1,1}, auto-commit. */ + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + status = db.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Flip a couple files so file 0 (containing {1,1}) can be cleaned. */ + envImpl.forceLogFileFlip(); + envImpl.forceLogFileFlip(); + + /* + * Delete {1,1}, put {1,2} and {1,3}, then abort. With the bug, {1,2} + * will reuse the slot for {1,1}. This is incorrect. When the txn + * aborts, the undo of {1,2} will set the LSN in that slot back to the + * LSN of {1,1}. But the key in the DBIN will incorrectly be {2}, + * while it should be {1}. + */ + Transaction txn = env.beginTransaction(null, null); + status = db.delete(txn, key); + assertSame(OperationStatus.SUCCESS, status); + IntegerBinding.intToEntry(2, data); + status = db.putNoDupData(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + IntegerBinding.intToEntry(3, data); + status = db.putNoDupData(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + txn.abort(); + + /* + * Get first dup record for key {1}. In spite of the bug, the first + * dup for key {1} is returned correctly as {1,1}. With the bug, + * although the DBIN key is incorrectly {2}, the data returned is {1}. + */ + status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(data)); + + /* + * Get dup record with key/data of {1,1}. The bug causes the following + * operation to return NOTFOUND, because it looks up {1,1} and does not + * find a DBIN key of {1}. + */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getSearchBoth(key, data, null); + /* Comment out assertions below to see LogFileNotFound. */ + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1, cursor.count()); + cursor.close(); + + /* + * If the above assertions are commented out, the bug will cause a + * LogFileNotFound when file 0 is cleaned and deleted, and then we + * attempt to fetch {1,1}. With the bug, the cleaner will lookup {1,1} + * in the Btree (just as getSearchBoth does above) and it will not be + * found, so the LN will not be migrated. After file 0 is deleted, + * when we evict and explicitly fetch the LN with a cursor, the + * getCurrent call below will cause LogFileNotFound. + */ + envImpl.getCleaner(). + getFileSelector(). + injectFileForCleaning(new Long(0)); + long nCleaned = env.cleanLog(); + assertTrue(nCleaned > 0); + env.checkpoint(forceConfig); + cursor = db.openCursor(null, null); + status = cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(data)); + DbInternal.getCursorImpl(cursor).evictLN(); + status = cursor.getCurrent(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + cursor.close(); + + closeEnv(); + } + + /** + * Checks that the failure reported in [#18937] does not occur when the + * prior committed version of the record is deleted. This did not fail, + * even before the bug fix. This confirms what the comment in Tree.insert + * says: + *-------------------- + * 2. The last committed version of the record is deleted. + * In this case it may be impossible to get the data + * (the prior version may be cleaned), so no comparison + * is possible. Fortunately, reusing a slot when the + * prior committed version is deleted won't cause a + * problem if the txn aborts. Even though the abortLsn + * may belong to a different dup key, the residual slot + * will have knownDeleted set, i.e., will be ignored. + *-------------------- + */ + @Test + public void testDiffTxnAbort() { + + openEnv(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + OperationStatus status; + + /* Put and delete {1,1}, auto-commit. */ + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + status = db.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + + /* Flip a couple files so file 0 (containing {1,1}) can be cleaned. */ + envImpl.forceLogFileFlip(); + envImpl.forceLogFileFlip(); + + /* See testSameTxnAbort. */ + Transaction txn = env.beginTransaction(null, null); + IntegerBinding.intToEntry(2, data); + status = db.putNoDupData(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + IntegerBinding.intToEntry(3, data); + status = db.putNoDupData(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + txn.abort(); + + /* Confirm that we roll back to the deleted record. */ + status = db.get(null, key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + + /* + * Confirm that the file containing the deleted record can be cleaned + * and deleted. + */ + envImpl.getCleaner(). + getFileSelector(). + injectFileForCleaning(new Long(0)); + long nCleaned = env.cleanLog(); + assertTrue(nCleaned > 0); + env.checkpoint(forceConfig); + Cursor cursor = db.openCursor(null, null); + status = cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + DbInternal.getCursorImpl(cursor).evictLN(); + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + cursor.close(); + + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/EnvMultiSubDirTest.java b/test/com/sleepycat/je/EnvMultiSubDirTest.java new file mode 100644 index 0000000..3dc3d20 --- /dev/null +++ b/test/com/sleepycat/je/EnvMultiSubDirTest.java @@ -0,0 +1,207 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test multiple environment data directories. + */ +public class EnvMultiSubDirTest extends TestBase { + private static final String DB_NAME = "testDb"; + private static final String keyPrefix = "herococo"; + private static final String dataValue = "abcdefghijklmnopqrstuvwxyz"; + + private final File envHome; + private final int N_DATA_DIRS = 3; + + public EnvMultiSubDirTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + TestUtils.createEnvHomeWithSubDir(envHome, N_DATA_DIRS); + } + + /* Test the basic CRUD operations with multiple data directories. */ + @Test + public void testSubDirBasic() + throws Throwable { + + doTestWork(false, false); + } + + /* Test deferred write with multiple data directories. */ + @Test + public void testSubDirDeferredWrite() + throws Throwable { + + doTestWork(true, false); + } + + /* Test transactional environment with multiple data directories. */ + @Test + public void testSubDirTransactional() + throws Throwable { + + doTestWork(false, true); + } + + private void doTestWork(boolean deferredWrite, boolean transactional) + throws Throwable { + + EnvironmentConfig envConfig = createEnvConfig(transactional); + Environment env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(deferredWrite); + dbConfig.setTransactional(transactional); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + + /* Do updates on the database. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 2000; i++) { + StringBinding.stringToEntry(keyPrefix + i, key); + StringBinding.stringToEntry(dataValue, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + for (int i = 1; i <= 1000; i++) { + StringBinding.stringToEntry(keyPrefix + i, key); + assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); + } + + for (int i = 1001; i <= 2000; i++) { + StringBinding.stringToEntry(keyPrefix + i, key); + StringBinding.stringToEntry(dataValue + dataValue, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + /* Check the contents of the current database. */ + assertEquals(1000, db.count()); + checkDbContents(db, 1001, 2000); + db.close(); + env.close(); + + /* Make sure reopen is OK. */ + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, DB_NAME, dbConfig); + assertEquals(1000, db.count()); + checkDbContents(db, 1001, 2000); + db.close(); + env.close(); + + /* Check that log files in the sub directories are round-robin. */ + for (int i = 1; i <= N_DATA_DIRS; i++) { + File subDir = new File(envHome, TestUtils.getSubDirName(i)); + File[] logFiles = subDir.listFiles(); + for (File logFile : logFiles) { + if (logFile.getName().endsWith("jdb") && logFile.isFile()) { + String fileNumber = logFile.getName().substring + (0, logFile.getName().indexOf(".")); + int number = + Integer.valueOf(Integer.parseInt(fileNumber, 16)); + assertTrue((number % N_DATA_DIRS) == (i - 1)); + } + } + } + } + + private EnvironmentConfig createEnvConfig(boolean transactional) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(transactional); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + N_DATA_DIRS + ""); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "10000"); + envConfig.setConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, + "20000"); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_BYTES_INTERVAL, + "10000"); + return envConfig; + } + + /* Test that log files should stay in the correct sub directory. */ + @Test + public void testLogFilesDirCheck() + throws Throwable { + + /* Generating some log files. */ + doTestWork(false, false); + + /* Copy the log files from one sub directory to another. */ + File[] files = envHome.listFiles(); + String copySubName = null; + for (File file : files) { + if (file.isDirectory() && file.getName().startsWith("data")) { + if (copySubName == null) { + copySubName = file.getName(); + } else { + assertTrue(!copySubName.equals(file.getName())); + SharedTestUtils.copyFiles + (new File(envHome, copySubName), + new File(envHome, file.getName())); + break; + } + } + } + + try { + new Environment(envHome, createEnvConfig(false)); + fail("Expected to see exceptions."); + } catch (EnvironmentFailureException e) { + /* Expected exceptions. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + } + + private void checkDbContents(Database db, int start, int end) + throws Exception { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + final Cursor c = db.openCursor(null, null); + for (int i = start; i <= end; i++) { + assertEquals("i=" + i, + OperationStatus.SUCCESS, + c.getNext(key, data, null)); + assertEquals(keyPrefix + i, StringBinding.entryToString(key)); + assertEquals(dataValue + dataValue, + StringBinding.entryToString(data)); + } + assertEquals(OperationStatus.NOTFOUND, c.getNext(key, data, null)); + c.close(); + } +} diff --git a/test/com/sleepycat/je/EnvironmentConfigTest.java b/test/com/sleepycat/je/EnvironmentConfigTest.java new file mode 100644 index 0000000..df5a11d --- /dev/null +++ b/test/com/sleepycat/je/EnvironmentConfigTest.java @@ -0,0 +1,245 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.Properties; + +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class EnvironmentConfigTest extends TestBase { + private final File envHome; + + public EnvironmentConfigTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Try out the validation in EnvironmentConfig. + */ + @Test + public void testValidation() { + + /* + * This validation should be successfull + */ + Properties props = new Properties(); + props.setProperty(EnvironmentConfig.TXN_TIMEOUT, "10000"); + props.setProperty(EnvironmentConfig.TXN_DEADLOCK_STACK_TRACE, + "true"); + new EnvironmentConfig(props); // Just instantiate a config object. + + /* + * Should fail: we should throw because leftover.param is not + * a valid parameter. + */ + props.clear(); + props.setProperty("leftover.param", "foo"); + checkEnvironmentConfigValidation(props); + + /* + * Should fail: we should throw because FileHandlerLimit + * is less than its minimum + */ + props.clear(); + props.setProperty(EnvironmentConfig.LOCK_N_LOCK_TABLES, "0"); + checkEnvironmentConfigValidation(props); + + /* + * Should fail: we should throw because FileHandler.on is not + * a valid value. + */ + props.clear(); + props.setProperty(EnvironmentConfig.TXN_DEADLOCK_STACK_TRACE, "xxx"); + checkEnvironmentConfigValidation(props); + } + + /** + * Test single parameter setting. + */ + @Test + public void testSingleParam() + throws Exception { + + try { + EnvironmentConfig config = new EnvironmentConfig(); + config.setConfigParam("foo", "7"); + fail("Should fail because of invalid param name"); + } catch (IllegalArgumentException e) { + // expected. + } + + EnvironmentConfig config = new EnvironmentConfig(); + config.setConfigParam(EnvironmentParams.MAX_MEMORY_PERCENT.getName(), + "81"); + assertEquals(81, config.getCachePercent()); + } + + /* + * Test that replicated config param is wrongly set on a standalone + * Environment. + */ + @Test + public void testRepParam() + throws Exception { + + /* The replicated property name and value. */ + final String propName = "je.rep.maxMessageSize"; + final String propValue = "1000000"; + + /* + * Should fail if set this configuration through the EnvironmentConfig + * class. + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + try { + envConfig.setConfigParam(propName, propValue); + fail("Should fail because it's a replicated parameter"); + } catch (IllegalArgumentException e) { + /* Expected exception here. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* Read the je.properties. */ + ArrayList formerLines = TestUtils.readWriteJEProperties + (envHome, propName + "=" + propValue + "\n"); + + /* Check this behavior is valid. */ + Environment env = null; + try { + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* + * Check that getting the value for je.rep.maxMessageSize will throw + * exception. + */ + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.getConfigManager().get(propName); + fail("Expect to see exceptions here."); + } catch (IllegalArgumentException e) { + /* Expected exception here. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } finally { + if (env != null) { + env.close(); + } + } + + TestUtils.reWriteJEProperties(envHome, formerLines); + } + + @Test + public void testSerialize() + throws Exception { + + final String nodeName = "env1"; + + EnvironmentConfig envConfig = new EnvironmentConfig(); + /* Test the seriliazed fileds of EnvironmentConfig. */ + envConfig.setAllowCreate(true); + envConfig.setNodeName(nodeName); + /* Test the transient fields of EnvironmentConfig. */ + envConfig.setTxnReadCommitted(true); + /* Test the serialized fields of EnvironmentMutableConfig. */ + envConfig.setTransactional(true); + envConfig.setTxnNoSync(true); + envConfig.setCacheSize(100000); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setCacheMode(CacheMode.DEFAULT); + /* Test the transient fields in the EnvironmentMutableConfig. */ + envConfig.setLoadPropertyFile(true); + envConfig.setExceptionListener(new ExceptionListener() { + public void exceptionThrown(ExceptionEvent event) { + } + }); + + EnvironmentConfig newConfig = (EnvironmentConfig) + TestUtils.serializeAndReadObject(envHome, envConfig); + + assertTrue(newConfig != envConfig); + /* Check the serialized fields of EnvironmentConfig. */ + assertEquals(newConfig.getAllowCreate(), envConfig.getAllowCreate()); + assertEquals(newConfig.getNodeName(), nodeName); + /* Check the transient fields of EnvironmentConfig. */ + assertFalse(newConfig.getCreateUP() == envConfig.getCreateUP()); + assertNotSame + (newConfig.getCheckpointUP(), envConfig.getCheckpointUP()); + assertNotSame + (newConfig.getTxnReadCommitted(), envConfig.getTxnReadCommitted()); + /* Check the serialized fields of EnvironmentMutableConfig. */ + assertEquals + (newConfig.getTransactional(), envConfig.getTransactional()); + assertEquals(newConfig.getTxnNoSync(), envConfig.getTxnNoSync()); + assertEquals(newConfig.getCacheSize(), envConfig.getCacheSize()); + assertEquals(new DbConfigManager(newConfig). + get(EnvironmentConfig.ENV_RUN_CLEANER), + "false"); + assertEquals(newConfig.getCacheMode(), envConfig.getCacheMode()); + /* Check the transient fields of EnvironmentMutableConfig. */ + assertFalse(newConfig.getLoadPropertyFile() == + envConfig.getLoadPropertyFile()); + assertFalse + (newConfig.getValidateParams() == envConfig.getValidateParams()); + assertEquals(newConfig.getExceptionListener(), null); + } + + @Test + public void testInconsistentParams() + throws Exception { + + try { + EnvironmentConfig config = new EnvironmentConfig(); + config.setAllowCreate(true); + config.setLocking(false); + config.setTransactional(true); + File envHome = SharedTestUtils.getTestDir(); + new Environment(envHome, config); + fail("Should fail because of inconsistent param values"); + } catch (IllegalArgumentException e) { + // expected. + } + } + + /* Helper to catch expected exceptions. */ + private void checkEnvironmentConfigValidation(Properties props) { + try { + new EnvironmentConfig(props); + fail("Should fail because of a parameter validation problem"); + } catch (IllegalArgumentException e) { + // expected. + } + } +} diff --git a/test/com/sleepycat/je/EnvironmentStatTest.java b/test/com/sleepycat/je/EnvironmentStatTest.java new file mode 100644 index 0000000..4693a51 --- /dev/null +++ b/test/com/sleepycat/je/EnvironmentStatTest.java @@ -0,0 +1,413 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +public class EnvironmentStatTest extends TestBase { + + private final File envHome; + private static final String DB_NAME = "foo"; + + public EnvironmentStatTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Basic cache management stats. + */ + @Test + public void testCacheStats() + throws Exception { + + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + Environment env = new Environment(envHome, envConfig); + + EnvironmentStats stat = env.getStats(TestUtils.FAST_STATS); + env.close(); + assertEquals(0, stat.getNCacheMiss()); + assertEquals(0, stat.getNNotResident()); + + // Try to open and close again, now that the environment exists + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + db.put(null, new DatabaseEntry(new byte[0]), + new DatabaseEntry(new byte[0])); + Transaction txn = env.beginTransaction(null, null); + db.put(txn, new DatabaseEntry(new byte[0]), + new DatabaseEntry(new byte[0])); + + /* Do the check. */ + stat = env.getStats(TestUtils.FAST_STATS); + MemoryBudget mb = + DbInternal.getNonNullEnvImpl(env).getMemoryBudget(); + + assertEquals(mb.getCacheMemoryUsage(), stat.getCacheTotalBytes()); + + /* + * The size of each log buffer is calculated by: + * mb.logBufferBudget/numBuffers, which is rounded down to the nearest + * integer. The stats count the precise capacity of the log + * buffers. Because of rounding down, the memory budget may be slightly + * > than the stats buffer bytes, but the difference shouldn't be + * greater than the numBuffers. + */ + assertTrue((mb.getLogBufferBudget() - stat.getBufferBytes() <= + stat.getNLogBuffers())); + assertEquals(mb.getTreeMemoryUsage() + mb.getTreeAdminMemoryUsage(), + stat.getDataBytes()); + assertEquals(mb.getLockMemoryUsage(), stat.getLockBytes()); + assertEquals(mb.getAdminMemoryUsage(), stat.getAdminBytes()); + + assertTrue(stat.getBufferBytes() > 0); + assertTrue(stat.getDataBytes() > 0); + assertTrue(stat.getLockBytes() > 0); + assertTrue(stat.getAdminBytes() > 0); + + /* Account for rounding down when calculating log buffer size. */ + assertTrue(stat.getCacheTotalBytes() - + (stat.getBufferBytes() + + stat.getDataBytes() + + stat.getLockBytes() + + stat.getAdminBytes()) <= stat.getNLogBuffers()); + + assertTrue(stat.getNCacheMiss() > 10); + assertTrue(stat.getNNotResident() > 10); + + /* Test deprecated getCacheDataBytes method. */ + final EnvironmentStats finalStat = stat; + final long expectCacheDataBytes = mb.getCacheMemoryUsage() - + mb.getLogBufferBudget(); + (new Runnable() { + @Deprecated + public void run() { + assertTrue((expectCacheDataBytes - + finalStat.getCacheDataBytes()) <= + finalStat.getNLogBuffers()); + } + }).run(); + + txn.abort(); + db.close(); + env.close(); + } + + /** + * Test that fetching a LN larger than LOG_FAULT_READ_SIZE does not cause a + * repeat-fault-read, since the lastLoggedSize is known. Fetching large INs + * however, will currently cause a cause a repeat-fault-read. + */ + @Test + public void testRepeatFaultReads() + throws Exception { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + /* Use EVICT_LN to fetch LNs via Database.get(). */ + envConfig.setCacheMode(CacheMode.EVICT_LN); + /* Do not use the off-heap cache to fetch from disk. */ + envConfig.setOffHeapCacheSize(0); + /* Disable daemon threads for reliability. */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + final Environment env = new Environment(envHome, envConfig); + + final int smallSize = 100; + final int bigSize = 100 * 1024; + + final int readSize = Integer.parseInt( + env.getConfig().getConfigParam( + EnvironmentConfig.LOG_FAULT_READ_SIZE)); + + assertTrue(readSize < bigSize); + assertTrue(readSize > smallSize); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + final Database db = env.openDatabase(null, DB_NAME, dbConfig); + + final DatabaseEntry key = new DatabaseEntry(new byte[0]); + final DatabaseEntry data = new DatabaseEntry(); + + /* + * With embedded data, no fetch is needed. + */ + data.setData(new byte[0]); + OperationResult result = db.put(null, key, data, Put.OVERWRITE, null); + assertNotNull(result); + + clearLogBuffers(env); + env.getStats(StatsConfig.CLEAR); + result = db.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + EnvironmentStats stats = env.getStats(StatsConfig.CLEAR); + + assertEquals(0, stats.getNLNsFetch()); + assertEquals(0, stats.getNLNsFetchMiss()); + assertEquals(0, stats.getNRepeatFaultReads()); + + /* + * With data smaller than read size, the fetch is not a repeat fault. + */ + data.setData(new byte[smallSize]); + result = db.put(null, key, data, Put.OVERWRITE, null); + assertNotNull(result); + + clearLogBuffers(env); + env.getStats(StatsConfig.CLEAR); + result = db.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + stats = env.getStats(StatsConfig.CLEAR); + + assertEquals(1, stats.getNLNsFetch()); + assertEquals(1, stats.getNLNsFetchMiss()); + assertEquals(0, stats.getNRepeatFaultReads()); + + /* + * With data larger than read size, the fetch is still not a repeat + * fault because the lastLoggedSize is known for LNs. + */ + data.setData(new byte[bigSize]); + result = db.put(null, key, data, Put.OVERWRITE, null); + assertNotNull(result); + + clearLogBuffers(env); + env.getStats(StatsConfig.CLEAR); + result = db.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + stats = env.getStats(StatsConfig.CLEAR); + + assertEquals(1, stats.getNLNsFetch()); + assertEquals(1, stats.getNLNsFetchMiss()); + assertEquals(0, stats.getNRepeatFaultReads()); + + /* + * Fetching an IN larger than read size will cause a repeat fault + * because the lastLoggedSize is not known for INs. + */ + key.setData(new byte[bigSize]); + data.setData(new byte[0]); + result = db.put(null, key, data, Put.OVERWRITE, null); + assertNotNull(result); + + /* Flush BIN to disk, since a dirty BIN cannot be evicted. */ + env.checkpoint( + new CheckpointConfig(). + setMinimizeRecoveryTime(true). + setForce(true)); + + /* Use EVICT_BIN to evict the non-dirty BIN. */ + result = db.get( + null, key, data, Get.SEARCH, + new ReadOptions().setCacheMode(CacheMode.EVICT_BIN)); + assertNotNull(result); + + clearLogBuffers(env); + env.getStats(StatsConfig.CLEAR); + result = db.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + stats = env.getStats(StatsConfig.CLEAR); + + assertEquals(0, stats.getNLNsFetch()); + assertEquals(0, stats.getNLNsFetchMiss()); + assertEquals(1, stats.getNBINsFetch()); + assertEquals(1, stats.getNBINsFetchMiss()); + assertEquals(1, stats.getNRepeatFaultReads()); + + db.close(); + env.close(); + } + + /** + * Use internal APIs to clear the log buffer pool, to ensure that a fetch + * is performed from the file system. + */ + private void clearLogBuffers(final Environment env) { + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.getLogManager().resetPool(envImpl.getConfigManager()); + } + + /** + * Check stats to see if we correctly record nLogFsyncs (any fsync of the + * log) and nFSyncs(commit fsyncs) + */ + @Test + public void testFSyncStats() + throws Exception { + + /* The usual env and db setup */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + Database db = env.openDatabase(null, "foo", dbConfig); + DatabaseEntry value = new DatabaseEntry(); + Transaction txn = env.beginTransaction(null, null); + IntegerBinding.intToEntry(10, value); + db.put(txn, value, value); + + StatsConfig statConfig = new StatsConfig(); + statConfig.setClear(true); + /* Get a snapshot of the stats, for use as the starting point. */ + EnvironmentStats start = env.getStats(statConfig); + + /* + * The call to env.sync() provokes a ckpt, which does a group mgr type + * commit, so both getNFsyncs and nLogFSyncs are incremented. + */ + env.sync(); + EnvironmentStats postSync = env.getStats(statConfig); + assertEquals(1, postSync.getNFSyncs()); + assertEquals(1, postSync.getNLogFSyncs()); + + /* Should be a transaction related fsync */ + txn.commitSync(); + EnvironmentStats postCommit = env.getStats(statConfig); + assertEquals(1, postCommit.getNFSyncs()); + assertEquals(1, postCommit.getNLogFSyncs()); + + /* Should be a transaction related fsync */ + DbInternal.getNonNullEnvImpl(env).forceLogFileFlip(); + EnvironmentStats postFlip = env.getStats(statConfig); + assertEquals(0, postFlip.getNFSyncs()); + assertEquals(1, postCommit.getNLogFSyncs()); + + /* Call api to test that cast exception does not occur [#23060] */ + postFlip.getAvgBatchManual(); + + db.close(); + env.close(); + } + + /* + * Test that the Database.sync() and Database.close() won't do a fsync if + * there is no updates on the database. + */ + @Test + public void testDbFSyncs() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(false); + + Environment env = new Environment(envHome, envConfig); + + /* Check the normal database. */ + checkCloseFSyncs(env, false); + + /* Flush a new log file to make sure the next check would succeed. */ + DbInternal.getNonNullEnvImpl(env).forceLogFileFlip(); + + /* Check the dw database. */ + checkCloseFSyncs(env, true); + + env.close(); + } + + private void checkCloseFSyncs(Environment env, boolean deferredWrite) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(deferredWrite); + + Database[] dbs = new Database[1000]; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < 1000; i++) { + dbs[i] = env.openDatabase(null, "db" + i, dbConfig); + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + dbs[i].put(null, key, data); + } + + StatsConfig stConfig = new StatsConfig(); + stConfig.setClear(true); + + EnvironmentStats envStats = env.getStats(stConfig); + assertTrue(envStats.getNLogFSyncs() > 0); + + env.sync(); + + envStats = env.getStats(stConfig); + + /* + * The log file size is default 10M, 1000 records should be written in + * the same log file. + */ + assertTrue(envStats.getNLogFSyncs() == 1); + + if (deferredWrite) { + for (int i = 0; i < 1000; i++) { + dbs[i].sync(); + } + envStats = env.getStats(stConfig); + assertTrue(envStats.getNLogFSyncs() == 0); + } + + for (int i = 0; i < 1000; i++) { + dbs[i].close(); + } + + /* + * Test no matter the database is deferred write or not, no log fsyncs + * if no changes made before closing it. + */ + envStats = env.getStats(stConfig); + assert(envStats.getNLogFSyncs() == 0); + } +} diff --git a/test/com/sleepycat/je/EnvironmentTest.java b/test/com/sleepycat/je/EnvironmentTest.java new file mode 100644 index 0000000..ecfd20e --- /dev/null +++ b/test/com/sleepycat/je/EnvironmentTest.java @@ -0,0 +1,1810 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvConfigObserver; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.latch.Latch; +import com.sleepycat.je.latch.LatchFactory; +import com.sleepycat.je.txn.LockInfo; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DaemonRunner; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.Test; + +public class EnvironmentTest extends DualTestCase { + + private Environment env1; + private Environment env2; + private Environment env3; + private final File envHome; + + public EnvironmentTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Prints Java version as information for debugging. + */ + @Test + public void testDisplayJavaVersion() { + System.out.println( + "Java version: " + System.getProperty("java.version") + + " Vendor: " + System.getProperty("java.vendor")); + } + + /** + * Test open and close of an environment. + */ + @Test + public void testBasic() + throws Throwable { + + try { + assertEquals("Checking version", "7.5.11", + JEVersion.CURRENT_VERSION.getVersionString()); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + /* Don't track detail with a tiny cache size. */ + envConfig.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + close(env1); + + /* Try to open and close again, now that the environment exists. */ + envConfig.setAllowCreate(false); + env1 = create(envHome, envConfig); + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test creation of a reserved name fails. + */ + @Test + public void testNoCreateReservedNameDB() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + try { + env1.openDatabase(null, DbType.VLSN_MAP.getInternalName(), + dbConfig); + fail("expected DatabaseException since Environment not " + + "transactional"); + } catch (IllegalArgumentException IAE) { + } + + close(env1); + + /* Try to open and close again, now that the environment exists. */ + envConfig.setAllowCreate(false); + env1 = create(envHome, envConfig); + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test environment reference counting. + */ + @Test + public void testReferenceCounting() + throws Throwable { + + try { + + /* Create two environment handles on the same environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + /* Don't track detail with a tiny cache size. */ + envConfig.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + envConfig.setAllowCreate(false); + env2 = create(envHome, envConfig); + + assertEquals("DbEnvironments should be equal", + env1.getNonNullEnvImpl(), + env2.getNonNullEnvImpl()); + + /* Try to close one of them twice */ + EnvironmentImpl dbenv1 = env1.getNonNullEnvImpl(); + close(env1); + try { + close(env1); + } catch (DatabaseException DENOE) { + fail("Caught DatabaseException while re-closing " + + "an Environment."); + } + + /* + * Close both, open a third handle, should get a new + * EnvironmentImpl. + */ + close(env2); + env1 = create(envHome, envConfig); + assertTrue("EnvironmentImpl did not change", + dbenv1 != env1.getNonNullEnvImpl()); + try { + close(env2); + } catch (DatabaseException DENOE) { + fail("Caught DatabaseException while re-closing " + + "an Environment."); + } + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTransactional() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + try { + env1.beginTransaction(null, null); + fail("should have thrown exception for non transactional "+ + " environment"); + } catch (UnsupportedOperationException expected) { + } + + String databaseName = "simpleDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + try { + env1.openDatabase(null, databaseName, dbConfig); + fail("expected IllegalArgumentException since Environment " + + " not transactional"); + } catch (IllegalArgumentException expected) { + } + + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Checks that Environment.flushLog writes buffered data to the log. + * [#19111] + */ + @Test + public void testFlushLog() { + + /* Open transactional environment. */ + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setConfigParam(EnvironmentConfig.LOG_USE_WRITE_QUEUE, + "true"); + env1 = create(envHome, envConfig); + EnvironmentImpl envImpl = env1.getNonNullEnvImpl(); + final boolean isReplicated = envImpl.isReplicated(); + + /* Open transactional database. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setReplicated(true); + Database db1 = env1.openDatabase(null, "db1", dbConfig); + + /* Insert into database without flushing. */ + final DatabaseEntry key = new DatabaseEntry(new byte[10]); + final DatabaseEntry data = new DatabaseEntry(new byte[10]); + OperationStatus status = db1.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Same for non-transactional database. */ + Database db2 = null; + if (!isReplicated) { + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(false); + dbConfig.setReplicated(false); + db2 = env1.openDatabase(null, "db2", dbConfig); + status = db2.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + } + + /* Flush. */ + env1.flushLog(false /*fsync*/); + assertFalse(envImpl.getFileManager().hasQueuedWrites()); + env1.flushLog(true /*fsync*/); + + /* Crash, re-open and check. */ + abnormalClose(env1); + env1 = create(envHome, envConfig); + envImpl = env1.getNonNullEnvImpl(); + dbConfig.setTransactional(true); + dbConfig.setReplicated(true); + db1 = env1.openDatabase(null, "db1", dbConfig); + status = db1.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (!isReplicated) { + dbConfig.setTransactional(false); + dbConfig.setReplicated(false); + db2 = env1.openDatabase(null, "db2", dbConfig); + status = db2.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + } + + db1.close(); + if (!isReplicated) { + db2.close(); + } + close(env1); + } + + @Test + public void testReadOnly() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setReadOnly(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + String databaseName = "simpleDb"; + try { + env1.openDatabase(null, databaseName, dbConfig); + fail("expected DatabaseException since Environment is " + + "readonly"); + } catch (IllegalArgumentException expected) { + } + + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testReadOnlyDbNameOps() + throws DatabaseException { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + /* Create read-write env and DB, insert one record, close. */ + { + final Environment envRW = create(envHome, envConfig); + Database db1 = envRW.openDatabase(null, "db1", dbConfig); + final DatabaseEntry dk = new DatabaseEntry(new byte[10]); + final DatabaseEntry dv = new DatabaseEntry(new byte[10]); + db1.put(null, dk,dv); + assertEquals(1, db1.count()); + db1.close(); + close(envRW); + } + + /* Open the env read-only. */ + envConfig.setReadOnly(true); + envConfig.setAllowCreate(false); + final Environment envRO = create(envHome, envConfig); + assertTrue(envRO.getConfig().getReadOnly()); + + /* Check that truncate, remove and rename are not supported. */ + try { + envRO.truncateDatabase(null, "db1", true); + fail(); + } catch (UnsupportedOperationException expected) { + } + try { + envRO.removeDatabase(null, "db1"); + fail(); + } catch (UnsupportedOperationException expected) { + } + try { + envRO.renameDatabase(null, "db1", "db2"); + fail(); + } catch (UnsupportedOperationException expected) { + } + + /* Make sure the DB is still intact. */ + dbConfig.setReadOnly(true); + dbConfig.setAllowCreate(false); + Database db1 = envRO.openDatabase(null, "db1", dbConfig); + assertEquals(1, db1.count()); + db1.close(); + close(envRO); + } + + /* + * Tests memOnly mode with a home dir that does not exist. [#15255] + */ + @Test + public void testMemOnly() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true"); + + File noHome = new File("fileDoesNotExist"); + assertTrue(!noHome.exists()); + env1 = create(noHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = env1.openDatabase(null, "foo", dbConfig); + + Transaction txn = env1.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + txn.commit(); + db.close(); + + close(env1); + assertTrue(!noHome.exists()); + } + + /** + * Tests that opening an environment after a clean close does not add to + * the log. + */ + @Test + public void testOpenWithoutCheckpoint() + throws Throwable { + + /* Open, close, open. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + close(env1); + env1 = create(envHome, null); + + /* Check that no checkpoint was performed. */ + EnvironmentStats stats = env1.getStats(null); + assertEquals(0, stats.getNCheckpoints()); + + close(env1); + env1 = null; + } + + /** + * Test environment configuration. + */ + @Test + @SuppressWarnings("deprecation") + public void testConfig() + throws Throwable { + + /* This tests assumes these props are immutable. */ + assertTrue(!isMutableConfig("je.lock.timeout")); + assertTrue(!isMutableConfig("je.env.isReadOnly")); + + try { + + /* + * Make sure that the environment keeps its own copy of the + * configuration object. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setReadOnly(true); + envConfig.setAllowCreate(true); + envConfig.setLockTimeout(7777); + env1 = create(envHome, envConfig); + + /* + * Change the environment config object, make sure the + * environment cloned a copy when it was opened. + */ + envConfig.setReadOnly(false); + EnvironmentConfig retrievedConfig1 = env1.getConfig(); + assertTrue(envConfig != retrievedConfig1); + assertEquals(true, retrievedConfig1.getReadOnly()); + assertEquals(true, retrievedConfig1.getAllowCreate()); + assertEquals(7000, retrievedConfig1.getLockTimeout()); + assertEquals + (7, retrievedConfig1.getLockTimeout(TimeUnit.MILLISECONDS)); + + /* + * Make sure that the environment returns a cloned config + * object when you call Environment.getConfig. + */ + retrievedConfig1.setReadOnly(false); + EnvironmentConfig retrievedConfig2 = env1.getConfig(); + assertEquals(true, retrievedConfig2.getReadOnly()); + assertTrue(retrievedConfig1 != retrievedConfig2); + + /* + * Open a second environment handle, check that its attributes + * are available. + */ + env2 = create(envHome, null); + EnvironmentConfig retrievedConfig3 = env2.getConfig(); + assertEquals(true, retrievedConfig3.getReadOnly()); + assertEquals(7000, retrievedConfig3.getLockTimeout()); + + /* + * Open an environment handle on an existing environment with + * mismatching config params. + */ + try { + create(envHome, TestUtils.initEnvConfig()); + fail("Shouldn't open, config param has wrong number of params"); + } catch (IllegalArgumentException e) { + /* expected */ + } + + try { + envConfig.setLockTimeout(8888); + create(envHome, envConfig); + fail("Shouldn't open, cache size doesn't match"); + } catch (IllegalArgumentException e) { + /* expected */ + } + + /* + * Ditto for the mutable attributes. + */ + EnvironmentMutableConfig mutableConfig = + new EnvironmentMutableConfig(); + mutableConfig.setTxnNoSync(true); + env1.setMutableConfig(mutableConfig); + EnvironmentMutableConfig retrievedMutableConfig1 = + env1.getMutableConfig(); + assertTrue(mutableConfig != retrievedMutableConfig1); + retrievedMutableConfig1.setTxnNoSync(false); + EnvironmentMutableConfig retrievedMutableConfig2 = + env1.getMutableConfig(); + assertEquals(true, retrievedMutableConfig2.getTxnNoSync()); + assertTrue(retrievedMutableConfig1 != retrievedMutableConfig2); + + /* + * Plus check that mutables can be retrieved via the main config. + */ + EnvironmentConfig retrievedConfig4 = env1.getConfig(); + assertEquals(true, retrievedConfig4.getTxnNoSync()); + retrievedConfig4 = env2.getConfig(); + assertEquals(false, retrievedConfig4.getTxnNoSync()); + + /* + * Check that mutables can be passed to the ctor. + */ + EnvironmentConfig envConfig3 = env2.getConfig(); + assertEquals(false, envConfig3.getTxnNoSync()); + envConfig3.setTxnNoSync(true); + env3 = create(envHome, envConfig3); + EnvironmentMutableConfig retrievedMutableConfig3 = + env3.getMutableConfig(); + assertNotSame(envConfig3, retrievedMutableConfig3); + assertEquals(true, retrievedMutableConfig3.getTxnNoSync()); + close(env1); + close(env2); + close(env3); + env1 = null; + env2 = null; + env3 = null; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test the semantics of env-wide mutable config properties. + */ + @Test + public void testMutableConfig() + throws DatabaseException { + + /* + * Note that during unit testing the shared je.properties is expected + * to be empty, so we don't test the application of je.properties here. + */ + final String P1 = EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(); + final String P2 = EnvironmentParams.ENV_RUN_CLEANER.getName(); + final String P3 = EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(); + + assertTrue(isMutableConfig(P1)); + assertTrue(isMutableConfig(P2)); + assertTrue(isMutableConfig(P3)); + + EnvironmentConfig config; + EnvironmentMutableConfig mconfig; + + /* + * Create env1, first handle. + * P1 defaults to true. + * P2 is set to true (the default). + * P3 is set to false (not the default). + */ + config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + config.setConfigParam(P2, "true"); + config.setConfigParam(P3, "false"); + env1 = create(envHome, config); + check3Params(env1, P1, "true", P2, "true", P3, "false"); + + MyObserver observer = new MyObserver(); + env1.getNonNullEnvImpl().addConfigObserver(observer); + assertEquals(0, observer.testAndReset()); + + /* + * Open env2, second handle, test that no mutable params can be + * overridden. + * P1 is set to false. + * P2 is set to false. + * P3 is set to true. + */ + config = TestUtils.initEnvConfig(); + config.setConfigParam(P1, "false"); + config.setConfigParam(P2, "false"); + config.setConfigParam(P3, "true"); + env2 = create(envHome, config); + assertEquals(0, observer.testAndReset()); + check3Params(env1, P1, "true", P2, "true", P3, "false"); + + /* + * Set mutable config explicitly. + */ + mconfig = env2.getMutableConfig(); + mconfig.setConfigParam(P1, "false"); + mconfig.setConfigParam(P2, "false"); + mconfig.setConfigParam(P3, "true"); + env2.setMutableConfig(mconfig); + assertEquals(1, observer.testAndReset()); + check3Params(env2, P1, "false", P2, "false", P3, "true"); + + close(env1); + env1 = null; + close(env2); + env2 = null; + } + + /** + * Checks that je.txn.deadlockStackTrace is mutable and takes effect. + */ + @Test + public void testTxnDeadlockStackTrace() + throws DatabaseException { + + String name = EnvironmentParams.TXN_DEADLOCK_STACK_TRACE.getName(); + assertTrue(isMutableConfig(name)); + + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + config.setConfigParam(name, "true"); + env1 = create(envHome, config); + assertTrue(LockInfo.getDeadlockStackTrace()); + + EnvironmentMutableConfig mconfig = env1.getMutableConfig(); + mconfig.setConfigParam(name, "false"); + env1.setMutableConfig(mconfig); + assertTrue(!LockInfo.getDeadlockStackTrace()); + + mconfig = env1.getMutableConfig(); + mconfig.setConfigParam(name, "true"); + env1.setMutableConfig(mconfig); + assertTrue(LockInfo.getDeadlockStackTrace()); + + close(env1); + env1 = null; + } + + /** + * Checks three config parameter values. + */ + private void check3Params(Environment env, + String p1, String v1, + String p2, String v2, + String p3, String v3) + throws DatabaseException { + + EnvironmentConfig config = env.getConfig(); + + assertEquals(v1, config.getConfigParam(p1)); + assertEquals(v2, config.getConfigParam(p2)); + assertEquals(v3, config.getConfigParam(p3)); + + EnvironmentMutableConfig mconfig = env.getMutableConfig(); + + assertEquals(v1, mconfig.getConfigParam(p1)); + assertEquals(v2, mconfig.getConfigParam(p2)); + assertEquals(v3, mconfig.getConfigParam(p3)); + } + + /** + * Returns whether a config parameter is mutable. + */ + private boolean isMutableConfig(String name) { + ConfigParam param = EnvironmentParams.SUPPORTED_PARAMS.get(name); + assert param != null; + return param.isMutable(); + } + + /** + * Observes config changes and remembers how many times it was called. + */ + private static class MyObserver implements EnvConfigObserver { + + private int count = 0; + + public void envConfigUpdate(DbConfigManager mgr, + EnvironmentMutableConfig ignore) { + count += 1; + } + + int testAndReset() { + int result = count; + count = 0; + return result; + } + } + + /** + * Make sure that config param loading follows the right precedence. + */ + @Test + public void testParamLoading() + throws Throwable { + + File testEnvHome = null; + try { + + /* + * A je.properties file has been put into + * /propTest/je.properties + */ + StringBuilder testPropsEnv = new StringBuilder(); + testPropsEnv.append(SharedTestUtils.getTestDir().getParent()); + testPropsEnv.append(File.separatorChar); + testPropsEnv.append("propTest"); + testEnvHome = new File(testPropsEnv.toString()); + TestUtils.removeLogFiles("testParamLoading start", + testEnvHome, false); + + /* + * Set some configuration params programatically. Do not use + * TestUtils.initEnvConfig since we're counting properties. + */ + EnvironmentConfig appConfig = new EnvironmentConfig(); + appConfig.setConfigParam("je.log.numBuffers", "88"); + appConfig.setConfigParam + ("je.log.totalBufferBytes", + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING + 10); + appConfig.setConfigParam("je.txn.durability", + "sync,sync,simple_majority"); + appConfig.setAllowCreate(true); + + Environment appEnv = create(testEnvHome, appConfig); + EnvironmentConfig envConfig = appEnv.getConfig(); + + assertEquals(4, envConfig.getNumExplicitlySetParams()); + assertEquals("false", + envConfig.getConfigParam("je.env.recovery")); + assertEquals("7001", + envConfig.getConfigParam("je.log.totalBufferBytes")); + assertEquals("200", + envConfig.getConfigParam("je.log.numBuffers")); + assertEquals("NO_SYNC,NO_SYNC,NONE", + envConfig.getConfigParam("je.txn.durability")); + assertEquals(new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE), + envConfig.getDurability()); + appEnv.close(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + finally { + TestUtils.removeLogFiles("testParamLoadingEnd", + testEnvHome, false); + } + } + + @Test + public void testDbRename() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + String databaseName = "simpleDb"; + String newDatabaseName = "newSimpleDb"; + + /* Try to rename a non-existent db. */ + try { + env1.renameDatabase(null, databaseName, newDatabaseName); + fail("Rename on non-existent db should fail"); + } catch (DatabaseException e) { + /* expect exception */ + } + + /* Now create a test db. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database exampleDb = env1.openDatabase(null, databaseName, + dbConfig); + + Transaction txn = env1.beginTransaction(null, null); + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + txn.commit(); + exampleDb.close(); + + dbConfig.setAllowCreate(false); + env1.renameDatabase(null, databaseName, newDatabaseName); + exampleDb = env1.openDatabase(null, newDatabaseName, dbConfig); + + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env1.beginTransaction(null, null); + } else { + txn = null; + } + + cursor = exampleDb.openCursor(txn, null); + // XXX doSimpleVerification(cursor); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + /* Check debug name. */ + DatabaseImpl dbImpl = DbInternal.getDbImpl(exampleDb); + assertEquals(newDatabaseName, dbImpl.getDebugName()); + exampleDb.close(); + try { + exampleDb = env1.openDatabase(null, databaseName, dbConfig); + fail("didn't get db not found exception"); + } catch (DatabaseNotFoundException expected) { + } + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDbRenameCommit() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + String databaseName = "simpleRenameCommitDb"; + String newDatabaseName = "newSimpleRenameCommitDb"; + + Transaction txn = env1.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = env1.openDatabase(txn, databaseName, + dbConfig); + + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + exampleDb.close(); + + dbConfig.setAllowCreate(false); + env1.renameDatabase(txn, databaseName, newDatabaseName); + exampleDb = env1.openDatabase(txn, newDatabaseName, dbConfig); + cursor = exampleDb.openCursor(txn, null); + cursor.close(); + exampleDb.close(); + try { + exampleDb = env1.openDatabase(txn, databaseName, dbConfig); + fail("didn't get db not found exception"); + } catch (DatabaseNotFoundException expected) { + } + txn.commit(); + + try { + exampleDb = env1.openDatabase(null, databaseName, null); + fail("didn't catch DatabaseException opening old name"); + } catch (DatabaseNotFoundException expected) { + } + try { + exampleDb = env1.openDatabase(null, newDatabaseName, null); + exampleDb.close(); + } catch (DatabaseException DBE) { + fail("caught unexpected exception"); + } + + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDbRenameAbort() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + /* Create a database. */ + String databaseName = "simpleRenameAbortDb"; + String newDatabaseName = "newSimpleRenameAbortDb"; + Transaction txn = env1.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = + env1.openDatabase(txn, databaseName, dbConfig); + + /* Put some data in, close the database, commit. */ + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + exampleDb.close(); + txn.commit(); + + /* + * Rename under another txn, shouldn't be able to open under the + * old name. + */ + txn = env1.beginTransaction(null, null); + env1.renameDatabase(txn, databaseName, newDatabaseName); + dbConfig.setAllowCreate(false); + exampleDb = env1.openDatabase(txn, newDatabaseName, dbConfig); + cursor = exampleDb.openCursor(txn, null); + // XXX doSimpleVerification(cursor); + cursor.close(); + exampleDb.close(); + try { + exampleDb = env1.openDatabase(txn, databaseName, dbConfig); + fail("didn't get db not found exception"); + } catch (DatabaseNotFoundException expected) { + } + + /* + * Abort the rename, should be able to open under the old name with + * empty props (DB_CREATE not set) + */ + txn.abort(); + exampleDb = new Database(env1); + try { + exampleDb = env1.openDatabase(null, databaseName, null); + exampleDb.close(); + } catch (DatabaseException dbe) { + fail("caught DatabaseException opening old name: " + + dbe.getMessage()); + } + + /* Shouldn't be able to open under the new name. */ + try { + exampleDb = env1.openDatabase(null, newDatabaseName, null); + fail("didn't catch DatabaseException opening new name"); + } catch (DatabaseNotFoundException expected) { + } + + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDbRemove() + throws Throwable { + + doDbRemove(true, false); + } + + @Test + public void testDbRemoveReadCommitted() + throws Throwable { + + doDbRemove(true, true); + } + + @Test + public void testDbRemoveNonTxnl() + throws Throwable { + + doDbRemove(false, false); + } + + private void doDbRemove(boolean txnl, boolean readCommitted) + throws Throwable { + + try { + /* Set up an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(txnl); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + String databaseName = "simpleDb"; + + /* Try to remove a non-existent db */ + try { + env1.removeDatabase(null, databaseName); + fail("Remove of non-existent db should fail"); + } catch (DatabaseNotFoundException expected) { + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(txnl); + dbConfig.setAllowCreate(true); + Transaction txn = null; + if (txnl && readCommitted) { + /* Create, close, then open with ReadCommitted. */ + Database db = env1.openDatabase(txn, databaseName, dbConfig); + db.close(); + dbConfig.setAllowCreate(false); + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setReadCommitted(true); + txn = env1.beginTransaction(null, txnConfig); + } + Database exampleDb = + env1.openDatabase(txn, databaseName, dbConfig); + if (txn != null) { + txn.commit(); + } + + txn = null; + if (txnl) { + txn = env1.beginTransaction(null, null); + } + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + /* Remove should fail because database is open. */ + try { + env1.removeDatabase(null, databaseName); + fail("didn't get db open exception"); + } catch (DatabaseException DBE) { + } + exampleDb.close(); + + env1.removeDatabase(null, databaseName); + + /* Remove should fail because database does not exist. */ + try { + exampleDb = env1.openDatabase(null, databaseName, null); + fail("did not catch db does not exist exception"); + } catch (DatabaseNotFoundException expected) { + } + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDbRemoveCommit() + throws Throwable { + + try { + /* Set up an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + /* Make a database. */ + String databaseName = "simpleDb"; + Transaction txn = env1.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = + env1.openDatabase(txn, databaseName, dbConfig); + + /* Insert and delete data in it. */ + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + + /* + * Try a remove without closing the open Database handle. Should + * get an exception. + */ + try { + env1.removeDatabase(txn, databaseName); + fail("didn't get db open exception"); + } catch (IllegalStateException e) { + } + exampleDb.close(); + + /* Do a remove, try to open again. */ + env1.removeDatabase(txn, databaseName); + try { + dbConfig.setAllowCreate(false); + exampleDb = env1.openDatabase(txn, databaseName, dbConfig); + fail("did not catch db does not exist exception"); + } catch (DatabaseNotFoundException expected) { + } + txn.commit(); + + /* Try to open, the db should have been removed. */ + try { + exampleDb = env1.openDatabase(null, databaseName, null); + fail("did not catch db does not exist exception"); + } catch (DatabaseNotFoundException expected) { + } + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDbRemoveAbort() + throws Throwable { + + try { + /* Set up an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + + /* Create a database, commit. */ + String databaseName = "simpleDb"; + Transaction txn = env1.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = + env1.openDatabase(txn, databaseName, dbConfig); + txn.commit(); + + /* Start a new txn and put some data in the created db. */ + txn = env1.beginTransaction(null, null); + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + + /* + * Try to remove, we should get an exception because the db is + * open. + */ + try { + env1.removeDatabase(txn, databaseName); + fail("didn't get db open exception"); + } catch (DatabaseException DBE) { + } + exampleDb.close(); + + /* + * txn can only be aborted at this point since the removeDatabase() + * timed out. + */ + txn.abort(); + txn = env1.beginTransaction(null, null); + env1.removeDatabase(txn, databaseName); + + try { + dbConfig.setAllowCreate(false); + exampleDb = env1.openDatabase(txn, databaseName, dbConfig); + fail("did not catch db does not exist exception"); + } catch (DatabaseNotFoundException expected) { + } + + /* Abort, should rollback the db remove. */ + txn.abort(); + + try { + DatabaseConfig dbConfig2 = new DatabaseConfig(); + dbConfig2.setTransactional(true); + exampleDb = env1.openDatabase(null, databaseName, dbConfig2); + } catch (DatabaseException DBE) { + fail("db does not exist anymore after delete/abort"); + } + + exampleDb.close(); + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Provides general testing of getDatabaseNames. Additionally verifies a + * fix for a bug that occurred when the first DB (lowest valued name) was + * removed or renamed prior to calling getDatabaseNames. A NPE occurred + * in this case if the compressor had not yet deleted the BIN entry for + * the removed/renamed name. [#13377] + */ + @Test + public void testGetDatabaseNames() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + /* Start with no databases. */ + Set dbNames = new HashSet(); + env1 = create(envHome, envConfig); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Add DB1. */ + dbNames.add("DB1"); + Database db = env1.openDatabase(null, "DB1", dbConfig); + db.close(); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Add DB2. */ + dbNames.add("DB2"); + db = env1.openDatabase(null, "DB2", dbConfig); + db.close(); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Rename DB2 to DB3 (this caused NPE). */ + dbNames.remove("DB2"); + dbNames.add("DB3"); + env1.renameDatabase(null, "DB2", "DB3"); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Remove DB1. */ + dbNames.remove("DB1"); + dbNames.add("DB4"); + env1.renameDatabase(null, "DB1", "DB4"); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Add DB0. */ + dbNames.add("DB0"); + db = env1.openDatabase(null, "DB0", dbConfig); + db.close(); + checkDbNames(dbNames, env1.getDatabaseNames()); + + /* Remove DB0 (this caused NPE). */ + dbNames.remove("DB0"); + env1.removeDatabase(null, "DB0"); + checkDbNames(dbNames, env1.getDatabaseNames()); + + close(env1); + env1 = null; + } + + /** + * Checks that the expected set of names equals the list of names returned + * from getDatabaseNames. A list can't be directly compared to a set using + * equals(). + */ + private void checkDbNames(Set expected, List actual) { + assertEquals(expected.size(), actual.size()); + assertEquals(expected, new HashSet(actual)); + } + + /* + * This little test case can only invoke the compressor, since the evictor, + * cleaner and checkpointer are all governed by utilization metrics and are + * tested elsewhere. + */ + @Test + public void testDaemonManualInvocation() + throws Throwable { + + try { + /* Set up an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + String testPropVal = "120000000"; + envConfig.setConfigParam + (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL.getName(), + testPropVal); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), "20000"); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + env1 = create(envHome, envConfig); + + String databaseName = "simpleDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database exampleDb = + env1.openDatabase(null, databaseName, dbConfig); + + Transaction txn = env1.beginTransaction(null, null); + Cursor cursor = exampleDb.openCursor(txn, null); + doSimpleCursorPutAndDelete(cursor, false); + cursor.close(); + txn.commit(); + exampleDb.close(); + EnvironmentStats envStats = env1.getStats(TestUtils.FAST_STATS); + env1.compress(); + + envStats = env1.getStats(TestUtils.FAST_STATS); + long compressorTotal = + envStats.getSplitBins() + + envStats.getDbClosedBins() + + envStats.getCursorsBins() + + envStats.getNonEmptyBins() + + envStats.getProcessedBins() + + envStats.getInCompQueueSize(); + assertTrue(compressorTotal > 0); + + close(env1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Tests that each daemon can be turned on and off dynamically. + */ + @Test + public void testDaemonRunPause() + throws DatabaseException, InterruptedException { + + final String[] runProps = { + EnvironmentParams.ENV_RUN_CLEANER.getName(), + EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), + EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), + }; + + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + + config.setConfigParam + (EnvironmentParams.MAX_MEMORY.getName(), + MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING); + /* Don't track detail with a tiny cache size. */ + config.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + config.setConfigParam + (EnvironmentParams.CLEANER_BYTES_INTERVAL.getName(), + "100"); + config.setConfigParam + (EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName(), + "100"); + config.setConfigParam + (EnvironmentParams.COMPRESSOR_WAKEUP_INTERVAL.getName(), + "1000000"); + config.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + config.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + setBoolConfigParams(config, runProps, + new boolean[] { false, false, false, false }); + + env1 = create(envHome, config); + EnvironmentImpl envImpl = env1.getNonNullEnvImpl(); + + final DaemonRunner[] daemons = { + envImpl.getCleaner(), + envImpl.getCheckpointer(), + envImpl.getINCompressor(), + }; + + //* + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { false, false, false, false }); + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { true, false, false, false }); + if (!envImpl.isNoLocking()) { + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { false, true, false, false }); + } + //*/ + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { false, false, true, false }); + //* + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { false, false, false, true }); + doTestDaemonRunPause(env1, daemons, runProps, + new boolean[] { false, false, false, false }); + + //*/ + close(env1); + env1 = null; + } + + /** + * Tests a set of daemon on/off settings. + */ + private void doTestDaemonRunPause(Environment env, + DaemonRunner[] daemons, + String[] runProps, + boolean[] runValues) + throws DatabaseException, InterruptedException { + + /* Set daemon run properties. */ + EnvironmentMutableConfig config = env.getMutableConfig(); + setBoolConfigParams(config, runProps, runValues); + env.setMutableConfig(config); + + /* Allow previously running daemons to come to a stop. */ + for (int i = 0; i < 10; i += 1) { + Thread.yield(); + Thread.sleep(10); + } + + /* Get current wakeup counts. */ + int[] prevCounts = new int[daemons.length]; + for (int i = 0; i < prevCounts.length; i += 1) { + prevCounts[i] = daemons[i].getNWakeupRequests(); + } + + /* Write some data to wakeup the checkpointer, cleaner and evictor. */ + String dbName = "testDaemonRunPause"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database db = env1.openDatabase(null, dbName, dbConfig); + Cursor cursor = db.openCursor(null, null); + doSimpleCursorPutAndDelete(cursor, true); + cursor.close(); + db.close(); + + /* Sleep for a while to wakeup the compressor. */ + Thread.sleep(1000); + + /* Check that the expected daemons were woken. */ + for (int i = 0; i < prevCounts.length; i += 1) { + int currNWakeups = daemons[i].getNWakeupRequests(); + boolean woken = prevCounts[i] < currNWakeups; + assertEquals(daemons[i].getClass().getName() + + " prevNWakeups=" + prevCounts[i] + + " currNWakeups=" + currNWakeups, + runValues[i], woken); + } + } + + private void setBoolConfigParams(EnvironmentMutableConfig config, + String[] names, + boolean[] values) { + for (int i = 0; i < names.length; i += 1) { + config.setConfigParam(names[i], + Boolean.valueOf(values[i]).toString()); + } + } + + @Test + @SuppressWarnings("deprecation") + public void testExceptions() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + close(env1); + + /* Test for exceptions on closed environments via public APIs */ + + try { + env1.openDatabase(null, null, null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.openSecondaryDatabase(null, null, null, null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.removeDatabase(null, "name"); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.renameDatabase(null, "old", "new"); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.truncateDatabase(null, "name", false); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.removeDatabase(null, "name"); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.beginTransaction(null, null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.checkpoint(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.sync(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.cleanLog(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.evictMemory(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.compress(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getConfig(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.setMutableConfig(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getMutableConfig(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getStats(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getLockStats(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getTransactionStats(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getDatabaseNames(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.verify(null,null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.getThreadTransaction(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.setThreadTransaction(null); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + + try { + env1.checkOpen(); + fail("Expected IllegalStateException for op on closed env"); + } catch (IllegalStateException expected) { + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testClose() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env1 = create(envHome, envConfig); + close(env1); + + envConfig.setAllowCreate(false); + env1 = create(envHome, envConfig); + + /* Create a transaction to prevent the close from succeeding */ + env1.beginTransaction(null, null); + try { + close(env1); + fail("Expected IllegalStateException for open transactions"); + } catch (IllegalStateException expected) { + } + + try { + close(env1); + } catch (DatabaseException DENOE) { + fail("Caught DatabaseException while re-closing " + + "an Environment."); + } + + env1 = create(envHome, envConfig); + + String databaseName = "simpleDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + env1.openDatabase(null, databaseName, dbConfig); + env1.openDatabase(null, databaseName + "2", dbConfig); + try { + close(env1); + fail("Expected IllegalStateException for open dbs"); + } catch (IllegalStateException expected) { + } + try { + close(env1); + } catch (Exception e) { + fail("Caught DatabaseException while re-closing " + + "an Environment."); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + protected String[] simpleKeyStrings = { + "foo", "bar", "baz", "aaa", "fubar", + "foobar", "quux", "mumble", "froboy" }; + + protected String[] simpleDataStrings = { + "one", "two", "three", "four", "five", + "six", "seven", "eight", "nine" }; + + protected void doSimpleCursorPutAndDelete(Cursor cursor, boolean extras) + throws DatabaseException { + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + for (int i = 0; i < simpleKeyStrings.length; i++) { + foundKey.setString(simpleKeyStrings[i]); + foundData.setString(simpleDataStrings[i]); + OperationStatus status = + cursor.putNoOverwrite(foundKey, foundData); + if (status != OperationStatus.SUCCESS) { + fail("non-success return " + status); + } + /* Need to write some extra out to force eviction to run. */ + if (extras) { + for (int j = 0; j < 500; j++) { + foundData.setString(Integer.toString(j)); + status = cursor.put(foundKey, foundData); + if (status != OperationStatus.SUCCESS) { + fail("non-success return " + status); + } + } + } + } + + OperationStatus status = + cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + + while (status == OperationStatus.SUCCESS) { + cursor.delete(); + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + } + + protected void doSimpleVerification(Cursor cursor) + throws DatabaseException { + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + int count = 0; + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + + while (status == OperationStatus.SUCCESS) { + count++; + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + assertEquals(simpleKeyStrings.length, count); + } + + /** + * Test that a latch timeout occurs. We manually confirm that full stack + * trace appears in the je.info log. + * + * This test is here rather than in LatchTest only because an + * EnvironmentImpl is needed to create a thread dump. + */ + @Test + public void testLatchTimeout() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam( + EnvironmentParams.ENV_LATCH_TIMEOUT.getName(), "1 ms"); + + env1 = create(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env1); + + final Latch latch = LatchFactory.createExclusiveLatch( + envImpl, "test", false); + + final CountDownLatch latched = new CountDownLatch(1); + + Thread thread = new Thread() { + @Override + public void run() { + latch.acquireExclusive(); + latched.countDown(); + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException expected) { + } + } + }; + + thread.start(); + + latched.await(1000, TimeUnit.MILLISECONDS); + + try { + latch.acquireExclusive(); + fail("Expected latch timeout"); + } catch (EnvironmentFailureException e) { + assertTrue( + e.getMessage(), e.getMessage().contains("Latch timeout")); + } finally { + thread.interrupt(); + } + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (env1 != null) { + try { + abnormalClose(env1); + } catch (Throwable expected) { + } + env1 = null; + } + } + } +} diff --git a/test/com/sleepycat/je/GetSearchBothRangeTest.java b/test/com/sleepycat/je/GetSearchBothRangeTest.java new file mode 100644 index 0000000..fb85e4a --- /dev/null +++ b/test/com/sleepycat/je/GetSearchBothRangeTest.java @@ -0,0 +1,502 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.util.Comparator; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests getSearchBothRange when searching for a key that doesn't exist. + * [#11119] + */ +public class GetSearchBothRangeTest extends DualTestCase { + + private File envHome; + private Environment env; + private Database db; + private boolean dups; + + public GetSearchBothRangeTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Open environment and database. + */ + private void openEnv() + throws DatabaseException { + + openEnvWithComparator(null); + } + + private void openEnvWithComparator(Class comparatorClass) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + //* + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + //*/ + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(dups); + dbConfig.setAllowCreate(true); + + dbConfig.setBtreeComparator(comparatorClass); + + db = env.openDatabase(null, "GetSearchBothRangeTest", dbConfig); + } + + /** + * Close environment and database. + */ + private void closeEnv() + throws DatabaseException { + + db.close(); + db = null; + close(env); + env = null; + } + + @Test + public void testSearchKeyRangeWithDupTree() + throws Exception { + + dups = true; + openEnv(); + + insert(1, 1); + insert(1, 2); + insert(3, 1); + + DatabaseEntry key = entry(2); + DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, val(key)); + assertEquals(1, val(data)); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSearchBothWithNoDupTree() + throws Exception { + + dups = true; + openEnv(); + + insert(1, 1); + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBoth(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + + key = entry(1); + data = entry(1); + + cursor = db.openCursor(txn, null); + status = cursor.getSearchBoth(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(key)); + assertEquals(1, val(data)); + cursor.close(); + + key = entry(1); + data = entry(0); + + cursor = db.openCursor(txn, null); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(key)); + assertEquals(1, val(data)); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSuccess() + throws DatabaseException { + + openEnv(); + insert(1, 1); + insert(3, 1); + if (dups) { + insert(1, 2); + insert(3, 2); + } + + DatabaseEntry key = entry(3); + DatabaseEntry data = entry(0); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + data = entry(1); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, val(key)); + assertEquals(1, val(data)); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSuccessDup() + throws DatabaseException { + + dups = true; + + openEnv(); + insert(1, 1); + insert(3, 1); + if (dups) { + insert(1, 2); + insert(3, 2); + } + + DatabaseEntry key = entry(3); + DatabaseEntry data = entry(0); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, val(key)); + assertEquals(1, val(data)); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testNotFound() + throws DatabaseException { + + openEnv(); + insert(1, 0); + if (dups) { + insert(1, 1); + } + + DatabaseEntry key = entry(2); + DatabaseEntry data = entry(0); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testNotFoundDup() + throws DatabaseException { + + dups = true; + testNotFound(); + } + + @Test + public void testSearchBefore() + throws DatabaseException { + + dups = true; + openEnv(); + insert(1, 0); + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSearchBeforeDups() + throws DatabaseException { + + dups = true; + openEnv(); + insert(1, 1); + insert(1, 2); + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(0); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(key)); + assertEquals(1, val(data)); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + public static class NormalComparator implements Comparator { + + public NormalComparator() { + } + + public int compare(Object o1, Object o2) { + + DatabaseEntry arg1 = new DatabaseEntry((byte[]) o1); + DatabaseEntry arg2 = new DatabaseEntry((byte[]) o2); + int val1 = IntegerBinding.entryToInt(arg1); + int val2 = IntegerBinding.entryToInt(arg2); + + if (val1 < val2) { + return -1; + } else if (val1 > val2) { + return 1; + } else { + return 0; + } + } + } + + @Test + public void testSearchAfterDups() + throws DatabaseException { + + dups = true; + openEnv(); + insert(1, 0); + insert(1, 1); + insert(2, 0); + insert(2, 1); + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSearchAfterDupsWithComparator() + throws DatabaseException { + + dups = true; + openEnvWithComparator(NormalComparator.class); + insert(1, 0); + insert(1, 1); + insert(2, 0); + insert(2, 1); + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + @Test + public void testSearchAfterDeletedDup() + throws DatabaseException { + + dups = true; + openEnv(); + insert(1, 1); + insert(1, 2); + insert(1, 3); + + /* Delete {1,3} leaving {1,1} in dup tree. */ + Transaction txn = null; + txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(3); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + cursor.delete(); + cursor.close(); + env.compress(); + + /* Search for {1,3} and expect NOTFOUND. */ + cursor = db.openCursor(txn, null); + key = entry(1); + data = entry(3); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + txn.commit(); + + closeEnv(); + } + + @Test + public void testSingleDatumBug() + throws DatabaseException { + + dups = true; + openEnv(); + insert(1, 1); + insert(2, 2); + + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + + /* Search for {1,2} and expect NOTFOUND. */ + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + OperationStatus status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + closeEnv(); + } + + private int val(DatabaseEntry entry) { + return IntegerBinding.entryToInt(entry); + } + + private DatabaseEntry entry(int val) { + DatabaseEntry entry = new DatabaseEntry(); + IntegerBinding.intToEntry(val, entry); + return entry; + } + + private void insert(int keyVal, int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + OperationStatus status; + if (dups) { + status = db.putNoDupData(null, key, data); + } else { + status= db.putNoOverwrite(null, key, data); + } + assertEquals(OperationStatus.SUCCESS, status); + } +} diff --git a/test/com/sleepycat/je/InterruptTest.java b/test/com/sleepycat/je/InterruptTest.java new file mode 100644 index 0000000..0d46bea --- /dev/null +++ b/test/com/sleepycat/je/InterruptTest.java @@ -0,0 +1,198 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +/** + * @author Paul.Kendall@orionhealth.com + * + * This test throws thread interrupts while JE is doing I/O intensive + * work. When an interrupt is received during various NIO activities, NIO + * closes the underlying file descriptor. In this multi-threaded test, abruptly + * closing the file descriptor causes exceptions such as + * java.nio.ChannelClosedException, because the uninterrupted thread may be in + * the middle of using that file. + * + * JE must convert all such exceptions to + * com.sleepycat.je.RunRecoveryException. + */ +public class InterruptTest extends TestBase { + + private final File envHome; + private final int NUM_OPS = 1000; + private final int NUM_ITERATIONS = 1; + + public InterruptTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testInterruptHandling() + throws Exception { + + for (int i = 0; i < NUM_ITERATIONS; i++) { + interruptThreads(i); + } + } + + public void interruptThreads(int i) + throws Exception { + + // TestUtils.removeLogFiles("Loop", envHome, false); + Environment env = null; + Database db = null; + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "testDB" + i, dbConfig); + + ActionThread putter = new ActionThread(env, db, 1) { + @Override + protected void doStuff(Database db, + Transaction txn, + DatabaseEntry key, + DatabaseEntry value) + throws DatabaseException { + db.put(txn, key, value); + } + }; + + ActionThread deleter = new ActionThread(env, db, 1) { + @Override + protected void doStuff(Database db, + Transaction txn, + DatabaseEntry key, + DatabaseEntry value) + throws DatabaseException { + db.delete(txn, key); + } + }; + + putter.start(); + Thread.sleep(1000); + + deleter.start(); + Thread.sleep(2000); + + /* + * Interrupting these threads will catch them in the middle of an + * NIO operation, expect a RunRecovery exception. + */ + putter.interrupt(); + deleter.interrupt(); + + putter.join(); + deleter.join(); + } finally { + try { + if (db != null) { + db.close(); + } + } catch (RunRecoveryException ok) { + + /* + * Expect a run recovery exception. Since it will be detected + * when we try to close the database, close the environment + * now so we can re-start in the same JVM. + */ + } catch (Throwable t) { + t.printStackTrace(); + fail("Should not see any other kind of exception. Iteration=" + + i); + } finally { + if (env != null) { + try { + env.close(); + env = null; + } catch (RunRecoveryException ignore) { + /* Sometimes the checkpointer can't close down. */ + } + } + } + } + } + + abstract class ActionThread extends Thread { + private final Environment env; + private final Database db; + private final int threadNumber; + + public ActionThread(Environment env, Database db, int threadNumber) { + this.env = env; + this.db = db; + this.threadNumber = threadNumber; + } + + @Override + public void run() { + int i=0; + Transaction txn = null; + try { + for (; i < NUM_OPS; i++) { + txn = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(); + key.setData + (StringUtils.toUTF8("" + threadNumber * 10000 + i)); + DatabaseEntry value = new DatabaseEntry(); + value.setData(new byte[8192]); + doStuff(db, txn, key, value); + Thread.sleep(10); + txn.commit(); + txn = null; + } + } catch (InterruptedException e) { + /* possible outcome. */ + } catch (RunRecoveryException e) { + /* possible outcome. */ + } catch (DatabaseException e) { + /* possible outcome. */ + //System.out.println("Put to " + i); + //e.printStackTrace(); + } finally { + try { + if (txn != null) { + txn.abort(); + } + } catch (DatabaseException ignored) { + } + } + } + + abstract protected void doStuff(Database db, + Transaction txn, + DatabaseEntry key, + DatabaseEntry value) + throws DatabaseException; + } +} diff --git a/test/com/sleepycat/je/LoadedClassImpl.java.original b/test/com/sleepycat/je/LoadedClassImpl.java.original new file mode 100644 index 0000000..0f8e74d --- /dev/null +++ b/test/com/sleepycat/je/LoadedClassImpl.java.original @@ -0,0 +1,185 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +package com.sleepycat.je; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.io.Serializable; +import java.util.Comparator; + +import com.sleepycat.je.tree.Key; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +public class LoadedClassImpl + implements ClassLoaderTest.LoadedClass { + + public Comparator getReverseComparator() { + return new ReverseComparator(); + } + + public Class> getReverseComparatorClass() { + return ReverseComparator.class; + } + + public static class ReverseComparator + implements Comparator, Serializable { + + private static final long serialVersionUID = 1L; + + public int compare(byte[] o1, byte[] o2) { + return Key.compareKeys(o2, o1, null /*comparator*/); + } + } + + public Object getSerializableInstance() { + return new SerialData(); + } + + private static class SerialData implements Serializable { + + private static final long serialVersionUID = 1L; + + private int x; + private int y; + + private SerialData() { + x = 1; + y = 2; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof SerialData)) { + return false; + } + final SerialData o = (SerialData) other; + return x == o.x && y == o.y; + } + } + + @Persistent + static class KeyClass implements Comparable { + + @KeyField(1) + int i1; + @KeyField(2) + int i2; + + KeyClass() { + } + + KeyClass(int key, int data) { + this.i1 = key; + this.i2 = data; + } + + public int compareTo(KeyClass o) { + int cmp = i2 - o.i2; + if (cmp != 0) { + return cmp; + } + cmp = i1 - o.i1; + if (cmp != 0) { + return cmp; + } + return 0; + } + + @Override + public String toString() { + return "[KeyClass i1=" + i1 + " i2=" + i2 + "]"; + } + } + + @Entity + static class MyEntity { + + @PrimaryKey + int key; + + int data; + + @SecondaryKey(relate=MANY_TO_ONE) + KeyClass skey; + + MyEntity() { + } + + MyEntity(int key, int data) { + this.key = key; + this.data = data; + this.skey = new KeyClass(key, data); + } + + @Override + public String toString() { + return "[MyEntity key=" + key + " data=" + data + + " skey=" + skey + "]"; + } + } + + public void writeEntities(EntityStore store) { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, MyEntity.class); + index.put(new MyEntity(0, 10)); + index.put(new MyEntity(1, 11)); + index.put(new MyEntity(2, 10)); + index.put(new MyEntity(3, 11)); + index.put(new MyEntity(4, 10)); + index.put(new MyEntity(5, 11)); + } + + public void readEntities(EntityStore store) { + + final PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + final SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, KeyClass.class, "skey"); + + MyEntity e = priIndex.get(1); + check(e != null && e.key == 1); + + e = secIndex.get(new KeyClass(1, 11)); + if (!(e != null && e.key == 1)) { + System.out.println(e); + } + check(e != null && e.key == 1); + + EntityCursor cursor = priIndex.entities(); + for (int i = 0; i <= 5; i += 1) { + e = cursor.next(); + check(e.key == i); + } + e = cursor.next(); + check(e == null); + cursor.close(); + + cursor = secIndex.entities(); + for (int i : new int[] {0, 2, 4, 1, 3, 5}) { + e = cursor.next(); + check(e.key == i); + } + e = cursor.next(); + check(e == null); + cursor.close(); + } + + void check(boolean b) { + if (!b) { + throw new RuntimeException(); + } + } +} diff --git a/test/com/sleepycat/je/MultiProcessWriteTest.java b/test/com/sleepycat/je/MultiProcessWriteTest.java new file mode 100644 index 0000000..dcfa199 --- /dev/null +++ b/test/com/sleepycat/je/MultiProcessWriteTest.java @@ -0,0 +1,308 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.util.ArrayList; + +import com.sleepycat.je.junit.JUnitProcessThread; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * [#16348] JE file handle leak when multi-process writing a same environment. + * + * This test would create two process through two threads, one process open + * the environment in r/w mode, then the other thread tries to get the write + * lock of the je.lck, to check whether there exists file handle leak. + */ +public class MultiProcessWriteTest extends TestBase { + private final File envHome; + + public MultiProcessWriteTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testMultiEnvWrite() { + /* Initiate the environment. */ + MainWrite.main + (new String[]{"-envHome", SharedTestUtils.getTestDir().getAbsolutePath(), + "-init", "-initSize", "1000"}); + + /* Command for process 1. */ + String[] command1 = new String[8]; + command1[0] = "com.sleepycat.je.MultiProcessWriteTest$MainWrite"; + command1[1] = "-envHome"; + command1[2] = SharedTestUtils.getTestDir().getPath(); + command1[3] = "-write"; + command1[4] = "-numOps"; + command1[5] = "100000"; + command1[6] = "-procNum"; + command1[7] = "1"; + + /* Command for process 2. */ + String[] command2 = new String[8]; + command2[0] = command1[0]; + command2[1] = command1[1]; + command2[2] = command1[2]; + command2[3] = command1[3]; + command2[4] = command1[4]; + command2[5] = "200000"; + command2[6] = command1[6]; + command2[7] = "2"; + + /* Create and start the two threads. */ + JUnitProcessThread thread1 = + new JUnitProcessThread("process1", command1); + JUnitProcessThread thread2 = + new JUnitProcessThread("process2", 40, null, command2); + + thread1.start(); + + try { + Thread.sleep(20); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + thread2.start(); + + /* End these two threads. */ + try { + thread1.finishTest(); + thread2.finishTest(); + } catch (Throwable t) { + System.err.println(t.toString()); + } + + /* Check whether the process throws out unexpected exceptions. */ + assertEquals(thread1.getExitVal(), 0); + assertEquals(thread2.getExitVal(), 0); + } + + /** + * Write records into the environment. + * + * It can run initialization and run in process 1 mode, process 2 mode + * as specified. + */ + static class MainWrite { + private static final int CACHE_LIMIT = 50000; + private int procNum; + + private final File envHome; + private ArrayList objectList = new ArrayList(); + + private PrimaryIndex objectBySid; + private Environment env; + private EntityStore store; + + public MainWrite(File envHome) { + this.envHome = envHome; + } + + public void setProcNum(int procNum) { + this.procNum = procNum; + } + + public boolean setup(boolean readOnly) { + boolean open = false; + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(readOnly); + envConfig.setAllowCreate(!readOnly); + + env = new Environment(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setReadOnly(readOnly); + storeConfig.setAllowCreate(!readOnly); + + store = new EntityStore(env, "EntityStore", storeConfig); + + objectBySid = + store.getPrimaryIndex(String.class, TestObject.class); + + open = true; + } catch (EnvironmentLockedException e) { + } catch (Exception e) { + e.printStackTrace(); + System.exit(-1); + } + + return open; + } + + public void init(int initSize) { + putNoTry(initSize); + } + + public void putNoTry(int numOps) { + setup(false); + try { + for (int i = 0; i < numOps; i++) { + TestObject object = new TestObject(); + String sId = new Integer(i).toString(); + object.setSid(sId); + object.setName("hero" + sId); + object.setCountry("China"); + objectBySid.putNoReturn(object); + } + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + public void putWithTry(int numOps) { + try { + setup(true); + for (int i = 0; i < numOps; i++) { + TestObject object = new TestObject(); + String s = new Integer(i).toString(); + object.setSid(s); + object.setName("hero" + s); + object.setCountry("China"); + + objectList.add(object); + + if (objectList.size() >= CACHE_LIMIT) { + close(); + + boolean success = false; + while (!success) { + success = setup(false); + } + + for (int j = 0; j < objectList.size(); j++) { + objectBySid.putNoReturn(objectList.get(j)); + } + + close(); + setup(true); + objectList = new ArrayList(); + } + } + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + public void close() { + if (store != null) { + try { + store.close(); + store = null; + } catch (Exception e) { + e.printStackTrace(); + } + } + + if (env != null) { + try { + env.close(); + env = null; + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + public static void usage() { + System.out.println("java MainWrite -envHome " + + "-init|-write -numOps -procNum <1|2>"); + System.exit(-1); + } + + public static void main(String args[]) { + if (!args[0].equals("-envHome")) { + usage(); + } + + MainWrite test = new MainWrite(new File(args[1])); + + if (args[2].equals("-init")) { + if (!args[3].equals("-initSize")) { + usage(); + } else { + test.init(new Integer(args[4])); + test.close(); + } + } else if (args[2].equals("-write")) { + if (!args[3].equals("-numOps")) { + usage(); + } else { + if (!(args[6].equals("1") || + args[6].equals("2"))) { + usage(); + } + + test.setProcNum(new Integer(args[6])); + + if (args[6].equals("1")) + test.putNoTry(new Integer(args[4])); + else + test.putWithTry(new Integer(args[4])); + + test.close(); + } + } else { + usage(); + } + } + } + + @Entity + static class TestObject { + @PrimaryKey + private String sid; + + private String name; + + private String country; + + public void setSid(String sid) { + this.sid = sid; + } + + public void setName(String name) { + this.name = name; + } + + public void setCountry(String country) { + this.country = country; + } + + public String getSid() { + return sid; + } + + public String getName() { + return name; + } + + public String getCountry() { + return country; + } + } +} diff --git a/test/com/sleepycat/je/ReadCommittedTest.java b/test/com/sleepycat/je/ReadCommittedTest.java new file mode 100644 index 0000000..35b8e13 --- /dev/null +++ b/test/com/sleepycat/je/ReadCommittedTest.java @@ -0,0 +1,541 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import org.junit.After; +import org.junit.Test; + +/** + * Tests the read-committed (degree 2) isolation level. + */ +public class ReadCommittedTest extends DualTestCase { + + private final File envHome; + private Environment env; + private Database db; + + public ReadCommittedTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + LockManager.afterLockHook = null; + + super.tearDown(); + + if (env != null) { + env.close(); + } + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + /* Control over isolation level is required by this test. */ + TestUtils.clearIsolationLevel(envConfig); + + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + +// envConfig.setConfigParam( +// EnvironmentConfig.LOCK_TIMEOUT, "" + Integer.MAX_VALUE); + + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = 100; i <= 200; i += 100) { + for (int j = 1; j <= 5; j += 1) { + IntegerBinding.intToEntry(i + j, key); + IntegerBinding.intToEntry(0, data); + db.put(null, key, data); + } + } + } + + private void close() + throws DatabaseException { + + db.close(); + db = null; + close(env); + env = null; + } + + @Test + public void testIllegalConfig() + throws DatabaseException { + + open(); + + CursorConfig cursConfig; + TransactionConfig txnConfig; + + /* Disallow transaction ReadCommitted and Serializable. */ + txnConfig = new TransactionConfig(); + txnConfig.setReadCommitted(true); + txnConfig.setSerializableIsolation(true); + try { + env.beginTransaction(null, txnConfig); + fail(); + } catch (IllegalArgumentException expected) {} + + /* Disallow transaction ReadCommitted and ReadUncommitted. */ + txnConfig = new TransactionConfig(); + txnConfig.setReadCommitted(true); + txnConfig.setReadUncommitted(true); + try { + env.beginTransaction(null, txnConfig); + fail(); + } catch (IllegalArgumentException expected) {} + + /* Disallow cursor ReadCommitted and ReadUncommitted. */ + cursConfig = new CursorConfig(); + cursConfig.setReadCommitted(true); + cursConfig.setReadUncommitted(true); + Transaction txn = env.beginTransaction(null, null); + try { + db.openCursor(txn, cursConfig); + fail(); + } catch (IllegalArgumentException expected) {} + txn.abort(); + + close(); + } + + @Test + public void testWithTransactionConfig() + throws DatabaseException { + + doTestWithTransactionConfig(false /*nonSticky*/); + } + + @Test + public void testNonCloningWithTransactionConfig() + throws DatabaseException { + + doTestWithTransactionConfig(true /*nonSticky*/); + } + + private void doTestWithTransactionConfig(boolean nonSticky) + throws DatabaseException { + + open(); + + TransactionConfig config = new TransactionConfig(); + config.setReadCommitted(true); + Transaction txn = env.beginTransaction(null, config); + Cursor cursor = db.openCursor( + txn, new CursorConfig().setNonSticky(nonSticky)); + + checkReadCommitted(cursor, 100, true, nonSticky); + + cursor.close(); + txn.commit(); + close(); + } + + @Test + public void testWithCursorConfig() + throws DatabaseException { + + doTestWithCursorConfig(false /*nonSticky*/); + } + + @Test + public void testNonCloningWithCursorConfig() + throws DatabaseException { + + doTestWithCursorConfig(true /*nonSticky*/); + } + + private void doTestWithCursorConfig(boolean nonSticky) { + + open(); + + Transaction txn = env.beginTransaction(null, null); + CursorConfig config = new CursorConfig(); + config.setReadCommitted(true); + config.setNonSticky(nonSticky); + Cursor cursor = db.openCursor(txn, config); + Cursor degree3Cursor = db.openCursor(txn, null); + + checkReadCommitted(cursor, 100, true, nonSticky); + checkReadCommitted(degree3Cursor, 200, false, nonSticky); + + degree3Cursor.close(); + cursor.close(); + txn.commit(); + close(); + } + + @Test + public void testWithLockMode() + throws DatabaseException { + + open(); + + Transaction txn = env.beginTransaction(null, null); + + checkReadCommitted(txn, LockMode.READ_COMMITTED, 100, true); + checkReadCommitted(txn, null, 200, false); + + txn.commit(); + close(); + } + + /** + * Checks that the given cursor provides the given + * expectReadLocksAreReleased behavior. + */ + private void checkReadCommitted(Cursor cursor, + int startKey, + boolean expectReadLocksAreReleased, + boolean nonSticky) + throws DatabaseException { + + final EnvironmentStats baseStats = env.getStats(null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + class MyHook implements TestHook { + int maxReadLocks = 0; + + @Override + public void doHook() { + maxReadLocks = Math.max( + maxReadLocks, getNReadLocks(baseStats)); + } + + @Override + public void doHook(Void obj) { + } + @Override + public void hookSetup() { + } + @Override + public void doIOHook() throws IOException { + } + @Override + public Void getHookValue() { + return null; + } + } + + MyHook hook = new MyHook(); + LockManager.afterLockHook = hook; + + /* Move to first record. */ + checkNReadLocks(baseStats, 0); + IntegerBinding.intToEntry(startKey + 1, key); + OperationStatus status = cursor.getSearchKey(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + + /* Check read locks calling next/prev. [#23775] */ + for (int i = 2; i <= 5; i += 1) { + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(startKey + i, IntegerBinding.entryToInt(key)); + if (expectReadLocksAreReleased) { + /* Read locks are released as the cursor moves. */ + checkNReadLocks(baseStats, 1); + } else { + /* Read locks are not released. */ + checkNReadLocks(baseStats, i); + } + } + for (int i = 4; i >= 1; i -= 1) { + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(startKey + i, IntegerBinding.entryToInt(key)); + if (expectReadLocksAreReleased) { + /* Read locks are released as the cursor moves. */ + checkNReadLocks(baseStats, 1); + } else { + /* Read locks are not released. */ + checkNReadLocks(baseStats, 5); + } + } + + /* Move to last key in range to normalize write locking checking. */ + IntegerBinding.intToEntry(startKey + 5, key); + status = cursor.getSearchKey(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + + /* Check write locks calling put. */ + checkNWriteLocks(baseStats, 0); + for (int i = 1; i <= 5; i += 1) { + IntegerBinding.intToEntry(startKey + i, key); + IntegerBinding.intToEntry(0, data); + cursor.put(key, data); + /* Write locks are not released. */ + if (expectReadLocksAreReleased) { + /* A single new write lock, no upgrade. */ + checkNWriteLocks(baseStats, i); + } else { + /* Upgraded lock plus new write lock. */ + checkNWriteLocks(baseStats, i * 2); + } + } + + /* All read locks were upgraded by the put() calls above. */ + checkNReadLocks(baseStats, 0); + + /* + * The max number of read locks held at one time is indicative of + * whether read-committed is used. Only one lock may be held when + * read-committed is used with a non-sticky cursor, since the + * non-sticky mode is intended to avoid deadlocks. [#23775] + */ + if (!expectReadLocksAreReleased) { + /* All records are locked at once with repeatable read. */ + assertEquals(5, hook.maxReadLocks); + } else if (nonSticky) { + /* Special case: one lock for read-committed and non-sticky. */ + assertEquals(1, hook.maxReadLocks); + } else { + /* + * With read-committed with without non-sticky, two locks are held + * temporarily during the movement from one record to the next. + */ + assertEquals(2, hook.maxReadLocks); + } + } + + /** + * Checks that the given lock mode provides the given + * expectReadLocksAreReleased behavior. + */ + private void checkReadCommitted(Transaction txn, + LockMode lockMode, + int startKey, + boolean expectReadLocksAreReleased) + throws DatabaseException { + + EnvironmentStats baseStats = env.getStats(null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Check read locks calling search. */ + checkNReadLocks(baseStats, 0); + for (int i = 1; i <= 5; i += 1) { + IntegerBinding.intToEntry(startKey + i, key); + OperationStatus status = db.get(txn, key, data, lockMode); + assertEquals(OperationStatus.SUCCESS, status); + if (expectReadLocksAreReleased) { + /* Read locks are released when the cursor is closed. */ + checkNReadLocks(baseStats, 0); + } else { + /* Read locks are not released. */ + checkNReadLocks(baseStats, i); + } + } + + /* Check write locks calling put. */ + checkNWriteLocks(baseStats, 0); + for (int i = 1; i <= 5; i += 1) { + IntegerBinding.intToEntry(startKey + i, key); + IntegerBinding.intToEntry(0, data); + db.put(txn, key, data); + /* Write locks are not released. */ + if (expectReadLocksAreReleased) { + /* A single new write lock, no upgrade. */ + checkNWriteLocks(baseStats, i); + } else { + /* Upgraded lock plus new write lock. */ + checkNWriteLocks(baseStats, i * 2); + } + } + + /* All read locks were upgraded by the put() calls above. */ + checkNReadLocks(baseStats, 0); + } + + private void checkNReadLocks(EnvironmentStats baseStats, + int nReadLocksExpected) { + assertEquals( + "Read locks -- ", nReadLocksExpected, getNReadLocks(baseStats)); + } + + private void checkNWriteLocks(EnvironmentStats baseStats, + int nWriteLocksExpected) { + assertEquals( + "Write locks -- ", nWriteLocksExpected, getNWriteLocks(baseStats)); + } + + private int getNReadLocks(EnvironmentStats baseStats) { + EnvironmentStats stats = env.getStats(null); + return stats.getNReadLocks() - baseStats.getNReadLocks(); + } + + private int getNWriteLocks(EnvironmentStats baseStats) { + EnvironmentStats stats = env.getStats(null); + return stats.getNWriteLocks() - baseStats.getNWriteLocks(); + } + + /** + * Current disabled because we haven't fixed the bug [#24453] that this + * test reproduces. + * + * To debug, uncomment code in open() that sets a large lock timeout. + */ +// @Test + public void testRepeatableReadCombination() throws InterruptedException { + + open(); + + final CountDownLatch t1Latch = new CountDownLatch(1); + + /* T1 gets a read lock and holds it until t1Latch is ready. */ + + final Thread t1 = new Thread() { + + @Override + public void run() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + try (final Cursor cursor = db.openCursor(null, null)) { + + final OperationStatus s = cursor.getFirst(key, data, null); + assert (OperationStatus.SUCCESS == s); + + t1Latch.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }; + + t1.start(); + + try { + /* Main thread gets a read-lock also. It is not the first owner. */ + + final Transaction txn = env.beginTransaction(null, null); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + printStats(0); + + try (final Cursor cursor = db.openCursor(txn, null)) { + + OperationStatus s = cursor.getFirst(key, data, null); + assert (OperationStatus.SUCCESS == s); + } + + printStats(1); + + /* T2 attempts to get a write lock, but blocks. */ + + final Thread t2 = new Thread() { + + @Override + public void run() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + try (final Cursor cursor = db.openCursor(null, null)) { + + final OperationStatus s = + cursor.getFirst(key, data, LockMode.RMW); + assert (OperationStatus.SUCCESS == s); + + printStats(4); + } + } + }; + + t2.start(); + + try { + while (env.getStats(null).getNWaiters() == 0) { + Thread.yield(); + } + + printStats(2); + + /* Main thread gets read lock again using read-committed. */ + + try (final Cursor cursor = + db.openCursor(txn, CursorConfig.READ_COMMITTED)) { + + final OperationStatus s = cursor.getFirst(key, data, null); + assert (OperationStatus.SUCCESS == s); + } + + printStats(3); + + t1Latch.countDown(); + t1.join(10); + + txn.abort(); + t2.join(10); + + printStats(5); + + } finally { + while (t2.isAlive()) { + t2.interrupt(); + t2.join(10); + } + } + } finally { + while (t1.isAlive()) { + t1.interrupt(); + t1.join(10); + } + } + + close(); + } + + private void printStats(final int seq) { + + final EnvironmentStats stats = env.getStats(null); + + System.out.println("[" + seq + + "] write-locks = " + stats.getNWriteLocks() + + " read-locks = " + stats.getNReadLocks() + + " waiters = " + stats.getNWaiters()); + } +} diff --git a/test/com/sleepycat/je/RunRecoveryFailureTest.java b/test/com/sleepycat/je/RunRecoveryFailureTest.java new file mode 100644 index 0000000..ef58132 --- /dev/null +++ b/test/com/sleepycat/je/RunRecoveryFailureTest.java @@ -0,0 +1,174 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @excludeDualMode + * This test does not run in Replication Dual Mode. There are several + * logistical issues. + * + * -It assumes that all log files are in the directory, whereas + * dual mode environments are in /rep* + * -It attempts to set the log file size to 1024, which is overridden by the + * dual mode framework. + * + * Since the test doesn't add any unique coverage to dual mode testing, it's + * not worth overcoming the logistical issues. + */ +public class RunRecoveryFailureTest extends TestBase { + + private Environment env; + private final File envHome; + + public RunRecoveryFailureTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + openEnv(); + } + + @After + public void tearDown() { + + /* + * Close down environments in case the unit test failed so that the log + * files can be removed. + */ + try { + if (env != null) { + env.close(); + env = null; + } + } catch (RunRecoveryException e) { + /* ok, the test hosed it. */ + return; + } catch (DatabaseException e) { + /* ok, the test closed it */ + } + } + + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + + /* + * Run with tiny log buffers, so we can go to disk more (and see the + * checksum errors) + */ + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "1024"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + } + + /* + * Corrupt an environment while open, make sure we get a + * RunRecoveryException. + */ + @Test + public void testInvalidateEnvMidStream() + throws Throwable { + + try { + /* Make a new db in this env and flush the file. */ + Transaction txn = + env.beginTransaction(null, TransactionConfig.DEFAULT); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(txn, "foo", dbConfig); + DatabaseEntry key = new DatabaseEntry(new byte[1000]); + DatabaseEntry data = new DatabaseEntry(new byte[1000]); + for (int i = 0; i < 100; i += 1) { + db.put(txn, key, data); + } + + env.getNonNullEnvImpl().getLogManager().flushSync(); + env.getNonNullEnvImpl().getFileManager().clear(); + + /* + * Corrupt each log file, then abort the txn. Aborting the txn + * results in an undo of each insert, which will provoke JE into + * reading the log a lot, and noticing the file corruption. Should + * get a checksum error, which should invalidate the environment. + */ + long currentFile = DbInternal.getNonNullEnvImpl(env) + .getFileManager() + .getCurrentFileNum(); + for (int fileNum = 0; fileNum <= currentFile; fileNum += 1) { + String logFileName = + FileManager.getFileName(fileNum, FileManager.JE_SUFFIX); + File file = new File(envHome, logFileName); + RandomAccessFile starterFile = + new RandomAccessFile(file, "rw"); + FileChannel channel = starterFile.getChannel(); + long fileSize = channel.size(); + if (fileSize > FileManager.firstLogEntryOffset()) { + ByteBuffer junkBuffer = ByteBuffer.allocate + ((int) fileSize - FileManager.firstLogEntryOffset()); + int written = channel.write + (junkBuffer, FileManager.firstLogEntryOffset()); + assertTrue(written > 0); + starterFile.close(); + } + } + + try { + txn.abort(); + fail("Should see a run recovery exception"); + } catch (RunRecoveryException e) { + } + + try { + env.getDatabaseNames(); + fail("Should see a run recovery exception again"); + } catch (RunRecoveryException e) { + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/StatCaptureTest.java b/test/com/sleepycat/je/StatCaptureTest.java new file mode 100644 index 0000000..bb910ed --- /dev/null +++ b/test/com/sleepycat/je/StatCaptureTest.java @@ -0,0 +1,912 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileFilter; +import java.io.FileReader; +import java.util.Map; +import java.util.SortedMap; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironmentStats; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.StatCaptureRepDefinitions; +import com.sleepycat.je.statcap.EnvStatsLogger; +import com.sleepycat.je.statcap.StatCapture; +import com.sleepycat.je.statcap.StatCaptureDefinitions; +import com.sleepycat.je.statcap.StatFile; +import com.sleepycat.je.statcap.StatManager; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.LongMaxStat; +import com.sleepycat.je.utilint.LongMinStat; +import com.sleepycat.je.utilint.Stat; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +public class StatCaptureTest extends TestBase { + + private final File envHome; + private static final String DB_NAME = "foo"; + + public StatCaptureTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Custom Statistics. + */ + @Test + public void testCustomStats() throws Exception { + + long start; + final int DATACOUNT = 1025; + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + Custom customStats = new Custom(); + envConfig.setCustomStats(customStats); + envConfig.setAllowCreate(true); + envConfig.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), "10 s"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + Environment env = null; + Database db = null; + try { + env = new Environment(envHome, envConfig); + + env.close(); + env = null; + + /* Try to open and close again, now that the environment exists */ + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + start = System.currentTimeMillis(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + customStats.setPutLatency(System.currentTimeMillis() - start); + } + } finally { + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + } + + File statcsv = + new File(envHome.getAbsolutePath() + File.separator + + "je.stat.csv"); + Map values = StatFile.sumItUp(statcsv); + Long putCount = values.get("Op:priInsert"); + Long customPutLatency = values.get("Custom:putLatency"); + assertEquals(putCount.longValue(), DATACOUNT); + assertTrue(customPutLatency > 0); + } + + /** + * Basic Statistics Capture. + */ + @Test + public void testStatsCapture() throws Exception { + + final int DATACOUNT = 9999; + + long envCreationTime = 0; + SortedMap statmap; + File envStatFile = new File(envHome.getAbsolutePath() + + File.separator + EnvStatsLogger.STATFILENAME + + "." + EnvStatsLogger.STATFILEEXT); + + /* remove any existing stats files. */ + File envHome = SharedTestUtils.getTestDir(); + FindFile ff = new FindFile(StatCapture.STATFILENAME); + File[] files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + + FindFile envff = new FindFile(EnvStatsLogger.STATFILENAME); + files = envHome.listFiles(envff); + for (File f : files) { + f.delete(); + } + + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + Environment env = new Environment(envHome, envConfig); + env.close(); + env = null; + assertEquals("Number of rows in env stat file not expected.", + getRowCount(envStatFile), + 2); + + /* Try to open and close again, now that the environment exists */ + envConfig.setAllowCreate(false); +// envConfig.setTxnNoSync(true); + envConfig.setConfigParam(EnvironmentParams. + MAX_MEMORY.getName(), + "100000"); + env = new Environment(envHome, envConfig); + assertEquals("Number of rows in env stat file not expected.", + getRowCount(envStatFile), + 3); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < DATACOUNT; i++) { + byte[] key = Integer.valueOf(i).toString().getBytes(); + DatabaseEntry value = new DatabaseEntry(); + OperationStatus os = + db.get(null, + new DatabaseEntry(key), + value, + LockMode.DEFAULT); + assertSame(os, OperationStatus.SUCCESS); + } + + DatabaseEntry value = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + + Cursor corsair = db.openCursor(null, CursorConfig.DEFAULT); + + assertSame(corsair.getPrev(key, value, null), + OperationStatus.SUCCESS); + assertSame(corsair.getFirst(key, value, null), + OperationStatus.SUCCESS); + corsair.close(); + corsair = null; + + EnvironmentStats es = env.getStats(TestUtils.FAST_STATS); + statmap = StatFile.getMap(es.getStatGroups()); + envCreationTime = es.getEnvironmentCreationTime(); + + db.close(); + env.close(); + + File statcsv = + new File(envHome.getAbsolutePath() + File.separator + + "je.stat.csv"); + Map values = StatFile.sumItUp(statcsv); + + Long putCount = values.get("Op:priInsert"); + Long getCount = values.get("Op:priSearch"); + Long posCount = values.get("Op:priPosition"); + assertEquals(DATACOUNT, putCount.longValue()); + assertEquals(DATACOUNT, getCount.longValue()); + assertEquals(2, posCount.longValue()); + assertEquals(Long.valueOf(envCreationTime), + values.get("Environment:environmentCreationTime")); + + verify(values, "Stat File", statmap, "public getStats API"); + } + + /** + * Statistics Capture configuration test + */ + @Test + public void testChangeStatConfig() throws Exception { + + final int DATACOUNT = 30; + int filecount = 0; + int prevrowcount = 0; + int rowcount = 0; + File currentStatFile; + File currentConfigStatFile; + EnvironmentMutableConfig mc; + + /* remove any existing stats files. */ + File envHome = SharedTestUtils.getTestDir(); + FindFile ff = new FindFile(StatCapture.STATFILENAME); + File[] files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + currentStatFile = + new File(envHome.getAbsolutePath() + File.separator + + StatCapture.STATFILENAME + "." + + StatCapture.STATFILEEXT); + currentConfigStatFile = + new File(envHome.getAbsolutePath() + File.separator + + EnvStatsLogger.STATFILENAME + "." + + EnvStatsLogger.STATFILEEXT); + + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam( + EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName(), + "1000000000"); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + Environment env = null; + Database db = null; + try { + env = new Environment(envHome, envConfig); + env.close(); + env = null; + + filecount = envHome.listFiles(ff).length; + assertSame("Number of stat files was expected to be zero.", + filecount, + 0); + + /* Try to open and close again, now that the environment exists */ + envConfig.setAllowCreate(false); + + envConfig.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), + "1 s"); + env = new Environment(envHome, envConfig); + + /* change config param to collect stats */ + mc = env.getMutableConfig(); + mc.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "true"); + env.setMutableConfig(mc); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + /* wait a second */ + Thread.sleep(1000); + if (i == 0) { + /* change configuration */ + Thread.sleep(1000); + mc = env.getMutableConfig(); + mc.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), + "10 s"); + env.setMutableConfig(mc); + Thread.sleep(5000); + /* check there was at least one file */ + filecount = envHome.listFiles(ff).length; + assertSame("No stats files found. ", filecount, 1); + rowcount = getRowCount(currentStatFile); + } + } + prevrowcount = rowcount; + rowcount = getRowCount(currentStatFile); + assertSame( + "Expected number of rows in stat file is " + + "incorrect expected " + + (DATACOUNT / 10) + " have " + (rowcount - prevrowcount), + (rowcount - prevrowcount), + (DATACOUNT / 10)); + + /* turn stat capture off, then on to insure that works */ + mc = env.getMutableConfig(); + mc.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "false"); + env.setMutableConfig(mc); + + filecount = envHome.listFiles(ff).length; + + // turn back capture back on + mc = env.getMutableConfig(); + mc.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "true"); + env.setMutableConfig(mc); + + mc = env.getMutableConfig(); + mc.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), + "1 s"); + mc.setConfigParam( + EnvironmentParams.STATS_FILE_ROW_COUNT.getName(), + Integer.toString(DATACOUNT / 4)); + env.setMutableConfig(mc); + + for (int i = 0; i < DATACOUNT; i++) { + byte[] key = Integer.valueOf(i).toString().getBytes(); + DatabaseEntry value = new DatabaseEntry(); + OperationStatus os = + db.get(null, + new DatabaseEntry(key), + value, + LockMode.DEFAULT); + assertSame(os, OperationStatus.SUCCESS); + Thread.sleep(1000); + } + + assertTrue("Number of stat files did not increase " + + filecount + " have " + envHome.listFiles(ff).length, + filecount < envHome.listFiles(ff).length ); + + // check the environment config log + rowcount = getRowCount(currentConfigStatFile); + assertEquals("Number of rows in "+ + currentConfigStatFile.getAbsolutePath() + + "not expected.", + 6, rowcount); + db.close(); + db = null; + env.close(); + env = null; + + /* Test changing a non mutable parameter is logged */ + envConfig.setConfigParam( + EnvironmentParams.CHECKPOINTER_BYTES_INTERVAL.getName(), + "4000000"); + envConfig.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "true"); + env = new Environment(envHome, envConfig); + + // check the environment config log + rowcount = getRowCount(currentConfigStatFile); + assertEquals("Number of rows in "+ + currentConfigStatFile.getAbsolutePath() + + "not expected.", + 7, rowcount); + } finally { + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + } + + Map values = + StatFile.sumItUp(envHome, StatCapture.STATFILENAME); + + Long putCount = values.get("Op:priInsert"); + Long getCount = values.get("Op:priSearch"); + assertEquals(DATACOUNT, putCount.longValue()); + assertEquals(DATACOUNT, getCount.longValue()); + } + + + /** + * Statistics Capture test that moves the statistics + * file while stat capture is ongoing. + */ + @Test + public void testFileErrors() throws Exception { + + final int DATACOUNT = 10; + File jeLogFile; + EnvironmentMutableConfig mc; + + /* remove any existing stats files. */ + File envHome = SharedTestUtils.getTestDir(); + FindFile ff = new FindFile(StatCapture.STATFILENAME); + File[] files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + File statFileDir = + new File(envHome.getAbsolutePath() + File.separator + + "statsdir"); + jeLogFile = + new File(envHome.getAbsolutePath() + File.separator + + "je.info.0"); + + jeLogFile.delete(); + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "true"); + envConfig.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), + "1 s"); + envConfig.setConfigParam(EnvironmentParams.STATS_FILE_DIRECTORY.getName(), + statFileDir.getAbsolutePath()); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + Environment env = null; + Database db = null; + try { + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + /* wait a second */ + Thread.sleep(1000); + statFileDir.renameTo( + new File(statFileDir.getAbsolutePath() + "1")); + } + } finally { + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + } + assertTrue("Number of rows in env stat file not expected.", + getRowCount(jeLogFile) >= 1); + } + + + /** + * Test to check if added statistics are projected to the stat file. + * A manual change to the StatCaptureDefinitions class is required if + * a new statistic is added and is to be projected. + */ + @Test + public void testForMissingStats() throws Exception {; + + StatCaptureDefinitions sd = new StatCaptureDefinitions(); + Map> stats; + final int DATACOUNT = 10; + + final StatsConfig fastconfig = new StatsConfig(); + fastconfig.setFast(true); + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + Environment env = null; + Database db = null; + try { + env = new Environment(envHome, envConfig); + env.close(); + env = null; + + /* Try to open and close again, now that the environment exists */ + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + stats = + StatFile.getNameValueMap( + env.getStats(fastconfig).getStatGroups()); + for (Map.Entry> entry : stats.entrySet()) { + String name = entry.getKey(); + assertTrue("Statistic " + name + + " returned but not captured.", + sd.getDefinition(name) != null); + + if (entry.getValue() instanceof LongMinStat) { + assertTrue("Statistic " + name + + " returned but not defined as a minStat.", + findDef(name, StatCaptureDefinitions.minStats)); + } else if (entry.getValue() instanceof LongMaxStat) { + assertTrue("Statistic " + name + + " returned but not defined as a maxStat.", + findDef(name, StatCaptureDefinitions.maxStats)); + } + } + } finally { + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + } + } + + @Test + public void testRepEnvStats() throws Exception { + + final int DATACOUNT = 17; + RepEnvInfo[] repEnvInfo = null; + File [] repEnvHome = null; + FindFile ff = new FindFile(StatCapture.STATFILENAME); + FindFile envff = new FindFile(EnvStatsLogger.STATFILENAME); + Database db = null; + try { + EnvironmentConfig envConfig = RepTestUtils.createEnvConfig( + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL)); + envConfig.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), "1 s"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + repEnvInfo = RepTestUtils.setupEnvInfos( + envHome, 2, envConfig, new ReplicationConfig()); + repEnvHome = new File[repEnvInfo.length]; + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvHome[i] = repEnvInfo[i].getEnvHome(); + } + + /* remove any existing stats files. */ + for (RepEnvInfo ri : repEnvInfo) { + File[] files = ri.getEnvHome().listFiles(ff); + for (File f : files) { + f.delete(); + } + files = ri.getEnvHome().listFiles(envff); + for (File f : files) { + f.delete(); + } + } + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = master.openDatabase(null, DB_NAME, dbConfig); + ReplicatedEnvironment replica = repEnvInfo[1].getEnv(); + final StatsConfig fastconfig = new StatsConfig(); + fastconfig.setFast(true); + fastconfig.setClear(true); + + /* + * sleep to allow at least one row to be captured in stat file + * before doing work. + */ + Thread.sleep(1000); + ReplicatedEnvironmentStats rs = replica.getRepStats(fastconfig); + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + Thread.sleep(250); + rs = replica.getRepStats(null); + assertTrue("value not expected ", + rs.getReplayMaxCommitProcessingNanos() > 0); + final long replayTotalCommitLagMs = rs.getReplayTotalCommitLagMs(); + assertTrue("replayTotalCommitLagMs should be greater than zero," + + " was " + replayTotalCommitLagMs, + replayTotalCommitLagMs > 0); + } finally { + if (db != null) { + db.close(); + } + Thread.sleep(2000); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + /* Check master statistics */ + File statcsv = + new File(repEnvHome[0].getAbsolutePath() + File.separator + + "je.stat.csv"); + Map values = StatFile.sumItUp(statcsv); + Long putCount = values.get("Op:priInsert"); + assertEquals(putCount.longValue(), DATACOUNT); + Long writtenMessages = values.get("BinaryProtocol:nMessagesWritten"); + assertTrue("BinaryProtocol:nMessagesWritten value not " + + "greater than zero.", + writtenMessages > 0); + + /* Check slave statistics. */ + statcsv = + new File(repEnvHome[1].getAbsolutePath() + File.separator + + "je.stat.csv"); + values = StatFile.sumItUp(statcsv); + Long readMessages = values.get("BinaryProtocol:nMessagesRead"); + assertTrue("BinaryProtocol:nMessagesRead value not greater than zero.", + readMessages > 0); + } + + @Test + public void testNoJoinRepEnvStats() throws Exception { + + RepEnvInfo[] repEnvInfo = null; + try { + File currentFile = + new File(envHome.getAbsolutePath() + + File.separator + EnvStatsLogger.STATFILENAME + + "." + EnvStatsLogger.STATFILEEXT); + currentFile.delete(); + + repEnvInfo = RepTestUtils.setupEnvInfos(envHome, 2); + ReplicatedEnvironment master = + new ReplicatedEnvironment(envHome, + repEnvInfo[0].getRepConfig(), + repEnvInfo[0].getEnvConfig()); + master.close(); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + @Test + public void testMissingRepEnvStats() throws Exception { + + final int DATACOUNT = 17; + Map> stats; + RepEnvInfo[] repEnvInfo = null; + Database db = null; + File [] repEnvHome = null; + StatCaptureRepDefinitions rsd = new StatCaptureRepDefinitions(); + final StatsConfig fastconfig = new StatsConfig(); + fastconfig.setFast(true); + FindFile ff = new FindFile(StatCapture.STATFILENAME); + FindFile envff = new FindFile(EnvStatsLogger.STATFILENAME); + try { + EnvironmentConfig envConfig = RepTestUtils.createEnvConfig( + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL)); + envConfig.setConfigParam( + EnvironmentParams.STATS_COLLECT_INTERVAL.getName(), "1 s"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + repEnvInfo = RepTestUtils.setupEnvInfos( + envHome, 2, envConfig, new ReplicationConfig()); + repEnvHome = new File[repEnvInfo.length]; + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvHome[i] = repEnvInfo[i].getEnvHome(); + } + + /* remove any existing stats files. */ + for (RepEnvInfo ri : repEnvInfo) { + File[] files = ri.getEnvHome().listFiles(ff); + for (File f : files) { + f.delete(); + } + files = ri.getEnvHome().listFiles(envff); + for (File f : files) { + f.delete(); + } + } + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = master.openDatabase(null, DB_NAME, dbConfig); + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + stats = + StatFile.getNameValueMap( + master.getRepStats(fastconfig).getStatGroups()); + + for (Map.Entry> entry : stats.entrySet()) { + String name = entry.getKey(); + + assertTrue("Statistic " + name + + " returned but not captured from master.", + rsd.getDefinition(name) != null); + + if (entry.getValue() instanceof LongMinStat) { + assertTrue("Statistic " + name + + "returned but not defined as a minStat.", + findDef(name, + StatCaptureRepDefinitions.minStats)); + } else if (entry.getValue() instanceof LongMaxStat) { + assertTrue("Statistic " + name + + "returned but not defined as a maxStat.", + findDef(name, + StatCaptureRepDefinitions.maxStats)); + } + } + + ReplicatedEnvironment replica = repEnvInfo[1].getEnv(); + stats = + StatFile.getNameValueMap( + replica.getRepStats(fastconfig).getStatGroups()); + + + for (Map.Entry> entry : stats.entrySet()) { + String name = entry.getKey(); + + assertTrue("Statistic " + name + + " returned but not captured from replica.", + rsd.getDefinition(name) != null); + + if (entry.getValue() instanceof LongMinStat) { + assertTrue("Statistic " + name + + "returned but not defined as a minStat.", + findDef(name, + StatCaptureRepDefinitions.minStats)); + } else if (entry.getValue() instanceof LongMaxStat) { + assertTrue("Statistic " + name + + "returned but not defined as a maxStat.", + findDef(name, + StatCaptureRepDefinitions.maxStats)); + } + } + + db.close(); + } finally { + if (db != null) { + db.close(); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private void verify(Mapm1, + String m1desc, + Map m2, + String m2desc) { + + final int ERROR_PERCENT = 5; + String[] statsToVerify = { + "Cache:adminBytes", + "Cache:dataBytes", + "Cache:nBINsFetch", + "Cache:nBINsFetchMiss", + "Cache:nCachedBINs", + "Cache:nINCompactKey", + "Cache:nINNoTarget", + "Cache:nINSparseTarget", + "Cache:nLNsFetch", + "Cache:nLNsFetchMiss", + "Cache:nUpperINsFetch", + "Environment:btreeRelatchesRequired", + "Environment:environmentCreationTime", + "I/O:bufferBytes", + "I/O:nCacheMiss", + "I/O:nFSyncRequests", + "I/O:nFSyncs", + "I/O:nLogBuffers", + "I/O:nLogFSyncs", + "I/O:nNotResident", + "I/O:nRandomReadBytes", + "I/O:nRandomReads", + "I/O:nSequentialReadBytes", + "I/O:nSequentialReads", + "I/O:nSequentialWriteBytes", + "I/O:nSequentialWrites", + "Op:priSearch", + "Op:priInsert", + }; + + for (String statname : statsToVerify) { + long v1 = m1.get(statname).longValue(); + long v2 = m2.get(statname).longValue(); + long av1 = Math.abs(v1); + long av2 = Math.abs(v2); + assertTrue(statname + " " + + m1desc + " [" + v1 + "] not equal to " + + m2desc + " [" + v2 + "]", + (av1 - (av1 * ERROR_PERCENT * .01) <= av2) && + (av2 <= (av1 + (av1 * ERROR_PERCENT * .01)))); + } + } + + private boolean findDef(String name, StatManager.SDef[] definitions) { + String[] namePart = name.split(":"); + for (StatManager.SDef def : definitions) { + if (namePart[0].equals(def.getGroupName()) && + namePart[1].equals(def.getDefinition().getName())) { + return true; + } + } + return false; + } + + private class Custom implements CustomStats { + + private final AtomicLong putLatency = new AtomicLong(); + private final AtomicLong putCount = new AtomicLong(); + + public Custom() { + putLatency.set(0); + putCount.set(0); + } + + public void setPutLatency(long val) { + if (val == 0){ + val = 1; + } + putCount.incrementAndGet(); + putLatency.addAndGet(val); + } + + @Override + public String[] getFieldNames() { + return new String[]{"putLatency"}; + } + + @Override + public String[] getFieldValues() { + String[] retval = new String[1]; + if (putCount.get() != 0) { + long val = putLatency.get() / putCount.get(); + retval[0] = + Long.valueOf(val).toString(); + } else { + retval[0] = "0"; + } + return retval; + } + } + + private int getRowCount(File file) throws Exception { + + BufferedReader fr = null; + int currentRowCount = 0; + try { + fr = new BufferedReader(new FileReader(file)); + while (fr.readLine() != null) { + currentRowCount++; + } + } + finally { + if (fr != null) { + fr.close(); + } + } + return currentRowCount; + } + + class FindFile implements FileFilter { + + String fileprefix; + FindFile(String fileprefix) { + this.fileprefix = fileprefix; + } + + @Override + public boolean accept(File f) { + return f.getName().startsWith(fileprefix); + } + } +} diff --git a/test/com/sleepycat/je/TruncateTest.java b/test/com/sleepycat/je/TruncateTest.java new file mode 100644 index 0000000..5b486fe --- /dev/null +++ b/test/com/sleepycat/je/TruncateTest.java @@ -0,0 +1,711 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.After; +import org.junit.Test; + +/** + * Basic database operations, excluding configuration testing. + */ +public class TruncateTest extends DualTestCase { + private static final int NUM_RECS = 257; + private static final String DB_NAME = "testDb"; + + private File envHome; + private Environment env; + + public TruncateTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + if (env != null) { + try { + /* Close in case we hit an exception and didn't close. */ + close(env); + } finally { + /* For JUNIT, to reduce memory usage when run in a suite. */ + env = null; + } + } + super.tearDown(); + } + + @Test + public void testEnvTruncateAbort() + throws Throwable { + + doTruncateAndAdd(true, // transactional + 256, // step1 num records + false, // step2 autocommit + 150, // step3 num records + true, // step4 abort + 0); // step5 num records + } + + @Test + public void testEnvTruncateCommit() + throws Throwable { + + doTruncateAndAdd(true, // transactional + 256, // step1 num records + false, // step2 autocommit + 150, // step3 num records + false, // step4 abort + 150); // step5 num records + } + + @Test + public void testEnvTruncateAutocommit() + throws Throwable { + + doTruncateAndAdd(true, // transactional + 256, // step1 num records + true, // step2 autocommit + 150, // step3 num records + false, // step4 abort + 150); // step5 num records + } + + @Test + public void testEnvTruncateNoFirstInsert() + throws Throwable { + + doTruncateAndAdd(true, // transactional + 0, // step1 num records + false, // step2 autocommit + 150, // step3 num records + false, // step4 abort + 150); // step5 num records + } + + @Test + public void testNoTxnEnvTruncateCommit() + throws Throwable { + + doTruncateAndAdd(false, // transactional + 256, // step1 num records + false, // step2 autocommit + 150, // step3 num records + false, // step4 abort + 150); // step5 num records + } + + @Test + public void testTruncateCommit() + throws Throwable { + + doTruncate(false, false); + } + + @Test + public void testTruncateCommitAutoTxn() + throws Throwable { + + doTruncate(false, true); + } + + @Test + public void testTruncateAbort() + throws Throwable { + + doTruncate(true, false); + } + + /* + * SR 10386, 11252. This used to deadlock, because the truncate did not + * use an AutoTxn on the new mapLN, and the put operations conflicted with + * the held write lock. + */ + @Test + public void testWriteAfterTruncate() + throws Throwable { + + try { + Database myDb = initEnvAndDb(true); + + myDb.close(); + Transaction txn = env.beginTransaction(null, null); + long truncateCount = env.truncateDatabase(txn, DB_NAME, true); + assertEquals(0, truncateCount); + txn.commit(); + close(env); + env = null; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTruncateEmptyDeferredWriteDatabase() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(false); + envConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(false); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + Database myDb = env.openDatabase(null, DB_NAME, dbConfig); + myDb.close(); + long truncateCount; + truncateCount = env.truncateDatabase(null, DB_NAME, true); + assertEquals(0, truncateCount); + } catch (Throwable T) { + T.printStackTrace(); + throw T; + } + } + + @Test + public void testTruncateNoLocking() + throws Throwable { + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(false); + envConfig.setConfigParam + (EnvironmentConfig.ENV_IS_LOCKING, "false"); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(false); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(null, DB_NAME, dbConfig); + myDb.put(null, new DatabaseEntry(new byte[0]), + new DatabaseEntry(new byte[0])); + myDb.close(); + long truncateCount; + truncateCount = env.truncateDatabase(null, DB_NAME, true); + assertEquals(1, truncateCount); + } catch (Throwable T) { + T.printStackTrace(); + throw T; + } + } + + /** + * 1. Populate a database. + * 2. Truncate. + * 3. Commit or abort. + * 4. Check that database has the right amount of records. + */ + private void doTruncate(boolean abort, boolean useAutoTxn) + throws Throwable { + + try { + int numRecsAfterTruncate = + useAutoTxn ? 0 : ((abort) ? NUM_RECS : 0); + Database myDb = initEnvAndDb(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate database. */ + for (int i = NUM_RECS; i > 0; i--) { + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + myDb.put(null, key, data)); + } + + long nInsBefore = env.getStats(null).getNCachedUpperINs(); + long nBinsBefore = env.getStats(null).getNCachedBINs(); + + /* Truncate, check the count, commit. */ + myDb.close(); + long truncateCount = 0; + if (useAutoTxn) { + truncateCount = env.truncateDatabase(null, DB_NAME, true); + } else { + Transaction txn = env.beginTransaction(null, null); + truncateCount = env.truncateDatabase(txn, DB_NAME, true); + + if (abort) { + txn.abort(); + } else { + txn.commit(); + } + } + + assertEquals(NUM_RECS, truncateCount); + + /* Ensure cached IN/BIN stats are decremented. [#22100] */ + if (!abort) { + long nInsAfter = env.getStats(null).getNCachedUpperINs(); + long nBinsAfter = env.getStats(null).getNCachedBINs(); + assertTrue(nInsAfter < nInsBefore); + assertTrue(nBinsAfter < nBinsBefore); + } + + /* Do a cursor read, make sure there's the right amount of data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + myDb = env.openDatabase(null, DB_NAME, dbConfig); + int count = 0; + Transaction txn = null; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + Cursor cursor = myDb.openCursor(txn, null); + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + count++; + } + assertEquals(numRecsAfterTruncate, count); + cursor.close(); + if (txn != null) { + txn.commit(); + } + + /* Recover the database. */ + myDb.close(); + close(env); + myDb = initEnvAndDb(true); + + /* Check data after recovery. */ + count = 0; + if (DualTestCase.isReplicatedTest(getClass())) { + txn = env.beginTransaction(null, null); + } + cursor = myDb.openCursor(txn, null); + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + count++; + } + assertEquals(numRecsAfterTruncate, count); + cursor.close(); + if (txn != null) { + txn.commit(); + } + myDb.close(); + close(env); + env = null; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * This method can be configured to execute a number of these steps: + * - Populate a database with 0 or N records + + * 2. Truncate. + * 3. add more records + * 4. abort or commit + * 5. Check that database has the right amount of records. + */ + private void doTruncateAndAdd(boolean transactional, + int step1NumRecs, + boolean step2AutoCommit, + int step3NumRecs, + boolean step4Abort, + int step5NumRecs) + throws Throwable { + + String databaseName = "testdb"; + try { + /* Use enough records to force a split. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(transactional); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + env = create(envHome, envConfig); + + /* Make a db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(transactional); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(null, databaseName, dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate database with step1NumRecs. */ + Transaction txn = null; + if (transactional) { + txn = env.beginTransaction(null, null); + } + for (int i = 0; i < step1NumRecs; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + myDb.close(); + + /* Truncate. Possibly autocommit*/ + if (step2AutoCommit && transactional) { + txn.commit(); + txn = null; + } + + /* + * Before truncate, there should be four databases in the system: + * the testDb database, naming db, and two cleaner databases. + */ + final int nInitDbs = 4; + countLNs(nInitDbs, nInitDbs); + long truncateCount = env.truncateDatabase(txn, databaseName, true); + assertEquals(step1NumRecs, truncateCount); + + /* + * The naming tree should not have more entries, the + * mapping tree might one more, depending on abort. + */ + if (step2AutoCommit || !transactional) { + countLNs(nInitDbs, nInitDbs); + } else { + countLNs(nInitDbs, nInitDbs + 1); + } + + /* Add more records. */ + myDb = env.openDatabase(txn, databaseName, dbConfig); + checkCount(myDb, txn, 0); + for (int i = 0; i < step3NumRecs; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, + myDb.put(txn, key, data)); + } + + checkCount(myDb, txn, step3NumRecs); + myDb.close(); + + if (txn != null) { + if (step4Abort) { + txn.abort(); + } else { + txn.commit(); + + } + } + /* Now the mapping tree should only one less entry. */ + countLNs(nInitDbs, nInitDbs); + + /* Do a cursor read, make sure there's the right amount of data. */ + myDb = env.openDatabase(null, databaseName, dbConfig); + checkCount(myDb, null, step5NumRecs); + myDb.close(); + close(env); + + /* Check data after recovery. */ + env = create(envHome, envConfig); + myDb = env.openDatabase(null, databaseName, dbConfig); + checkCount(myDb, null, step5NumRecs); + myDb.close(); + close(env); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test that truncateDatabase and removeDatabase can be called after + * replaying an LN in that database during recovery. This is to test a fix + * to a bug where truncateDatabase caused a hang because DbTree.releaseDb + * was not called by RecoveryUtilizationTracker. [#16329] + */ + @Test + public void testTruncateAfterRecovery() + throws Throwable { + + DatabaseEntry key = new DatabaseEntry(new byte[10]); + DatabaseEntry data = new DatabaseEntry(new byte[10]); + + Database db = initEnvAndDb(true); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Write a single record for recovery. */ + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Close without a checkpoint and run recovery. */ + db.close(); + envImpl.abnormalClose(); + envImpl = null; + env = null; + db = initEnvAndDb(true); + + /* Ensure that truncateDatabase does not hang. */ + db.close(); + long truncateCount = env.truncateDatabase(null, DB_NAME, true); + assertEquals(1, truncateCount); + + /* removeDatabase should also work. */ + env.removeDatabase(null, DB_NAME); + assertTrue(!env.getDatabaseNames().contains(DB_NAME)); + + close(env); + env = null; + } + + @Test + public void testTruncateRecoveryWithoutMapLNDeletion() + throws Throwable { + + doTestRecoveryWithoutMapLNDeletion(false, true); + } + + @Test + public void testTruncateRecoveryWithoutMapLNDeletionNonTxnal() + throws Throwable { + + doTestRecoveryWithoutMapLNDeletion(false, false); + } + + @Test + public void testRemoveRecoveryWithoutMapLNDeletion() + throws Throwable { + + doTestRecoveryWithoutMapLNDeletion(true, true); + } + + @Test + public void testRemoveRecoveryWithoutMapLNDeletionNonTxnal() + throws Throwable { + + doTestRecoveryWithoutMapLNDeletion(true, false); + } + + /** + * Test that the MapLN is deleted by recovery after a crash during the + * commit of truncateDatabase and removeDatabase. The crash is in the + * window between logging the txn Commit and deleting the MapLN. [#20816] + */ + private void doTestRecoveryWithoutMapLNDeletion(boolean doRemove, + boolean txnal) + throws Throwable { + + /* Open db/env and get pre-crash info. */ + Database db = initEnvAndDb(txnal); + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + DatabaseId dbIdBeforeCrash = dbImpl.getId(); + final EnvironmentImpl envBeforeCrash = + DbInternal.getNonNullEnvImpl(env); + assertSame(envBeforeCrash.getDbTree().getDb(dbIdBeforeCrash), dbImpl); + envBeforeCrash.getDbTree().releaseDb(dbImpl); + + /* Thrown to abort truncate/remove operation after simulated crash. */ + class CrashException extends RuntimeException {} + + /* + * Install a hook that 'crashes' after the txn Commit is logged but + * before the MapLN deletion. We flush to log to make sure the + * NameLN and Commit are flushed, since they may not be for a NoSync + * txn or non-txnal use. + */ + dbImpl.setPendingDeletedHook(new TestHook() { + public void doHook() { + envBeforeCrash.getLogManager().flushSync(); + envBeforeCrash.abnormalClose(); + throw new CrashException(); + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + + /* Write a record, then close and truncate/remove the database. */ + DatabaseEntry key = new DatabaseEntry(new byte[10]); + DatabaseEntry data = new DatabaseEntry(new byte[10]); + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + db.close(); + try { + if (doRemove) { + env.removeDatabase(null, DB_NAME); + } else { + env.truncateDatabase(null, DB_NAME, false); + } + fail(); + } catch (CrashException expected) { + env = null; + } + + /* Recover after crash. */ + db = initEnvAndDb(true); + final EnvironmentImpl envAfterCrash = + DbInternal.getNonNullEnvImpl(env); + + /* New DB should have new MapLN, old MapLN should be deleted. */ + dbImpl = DbInternal.getDbImpl(db); + assertTrue(dbIdBeforeCrash != dbImpl.getId()); + DatabaseImpl oldDbImpl = + envAfterCrash.getDbTree().getDb(dbIdBeforeCrash); + assertTrue(oldDbImpl != dbImpl); + assertTrue("isDeleted=" + (oldDbImpl == null || oldDbImpl.isDeleted()), + (oldDbImpl == null) || + (oldDbImpl.isDeleted() && oldDbImpl.isDeleteFinished())); + envBeforeCrash.getDbTree().releaseDb(oldDbImpl); + + db.close(); + close(env); + env = null; + } + + /** + * Test that the MapLN is NOT deleted by recovery when a renameDatabase is + * replayed. [#21537] + */ + @Test + public void testRecoveryRenameMapLNDeletion() + throws Throwable { + + /* Open db/env. */ + final Database dbBefore = initEnvAndDb(true /*txnal*/); + + /* Write a record. */ + final DatabaseEntry key = new DatabaseEntry(new byte[10]); + final DatabaseEntry data = new DatabaseEntry(new byte[10]); + final OperationStatus status = dbBefore.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1, dbBefore.count()); + + /* Close, rename, then rename back again. */ + dbBefore.close(); + final String newName = DB_NAME + "-renamed"; + env.renameDatabase(null, DB_NAME, newName); + env.renameDatabase(null, newName, DB_NAME); + + /* Crash. */ + final EnvironmentImpl envBefore = DbInternal.getNonNullEnvImpl(env); + envBefore.getLogManager().flushSync(); + envBefore.abnormalClose(); + + /* Recover after crash, open DB. */ + final Database dbAfter = initEnvAndDb(true); + + /* Verify that record is still present. */ + final OperationStatus statusAfter = dbAfter.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, statusAfter); + assertEquals(1, dbAfter.count()); + + dbAfter.close(); + close(env); + env = null; + } + + /** + * Set up the environment and db. + */ + private Database initEnvAndDb(boolean isTransactional) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(isTransactional); + envConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + /* Make a db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setSortedDuplicates(true); + dbConfig.setAllowCreate(true); + Database myDb = env.openDatabase(null, DB_NAME, dbConfig); + return myDb; + } + + private void checkCount(Database db, Transaction txn, int expectedCount) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, null); + int count = 0; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) { + count++; + } + assertEquals(expectedCount, count); + cursor.close(); + } + + /** + * Use stats to count the number of LNs in the id and name mapping + * trees. It's not possible to use Cursor, and stats are easier to use + * than CursorImpl. This relies on the fact that the stats actually + * correctly account for deleted entries. + */ + private void countLNs(int expectNameLNs, + int expectMapLNs) + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* check number of LNs in the id mapping tree. */ + DatabaseImpl mapDbImpl = + envImpl.getDbTree().getDb(DbTree.ID_DB_ID); + // mapDbImpl.getTree().dump(); + BtreeStats mapStats = + (BtreeStats) mapDbImpl.stat(new StatsConfig()); + assertEquals(expectMapLNs, + (mapStats.getLeafNodeCount())); + + /* check number of LNs in the naming tree. */ + DatabaseImpl nameDbImpl = + envImpl.getDbTree().getDb(DbTree.NAME_DB_ID); + BtreeStats nameStats = + (BtreeStats) nameDbImpl.stat(new StatsConfig()); + assertEquals(expectNameLNs, + (nameStats.getLeafNodeCount())); + } +} diff --git a/test/com/sleepycat/je/cleaner/BackgroundIOTest.java b/test/com/sleepycat/je/cleaner/BackgroundIOTest.java new file mode 100644 index 0000000..e702073 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/BackgroundIOTest.java @@ -0,0 +1,284 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; + +@RunWith(Parameterized.class) +public class BackgroundIOTest extends CleanerTestBase { + + final static int FILE_SIZE = 1000000; + + private static CheckpointConfig forceConfig; + static { + forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + } + + private int readLimit; + private int writeLimit; + private int nSleeps; + + private boolean embeddedLNs = false; + + public BackgroundIOTest(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + @Test + public void testBackgroundIO1() + throws DatabaseException { + + openEnv(10, 10); + if (isCkptHighPriority()) { + doTest(93, 113); + } else { + if (embeddedLNs) { + doTest(240, 270); + } else { + doTest(186, 206); + } + } + } + + @Test + public void testBackgroundIO2() + throws DatabaseException { + + openEnv(10, 5); + if (isCkptHighPriority()) { + doTest(93, 113); + } else { + if (embeddedLNs) { + doTest(410, 440); + } else { + doTest(310, 330); + } + } + } + + @Test + public void testBackgroundIO3() + throws DatabaseException { + + openEnv(5, 10); + if (isCkptHighPriority()) { + doTest(167, 187); + } else { + if (embeddedLNs) { + doTest(310, 350); + } else { + doTest(259, 279); + } + } + } + + @Test + public void testBackgroundIO4() + throws DatabaseException { + + openEnv(5, 5); + if (isCkptHighPriority()) { + doTest(167, 187); + } else { + if (embeddedLNs) { + doTest(490, 520); + } else { + doTest(383, 403); + } + } + } + + private boolean isCkptHighPriority() + throws DatabaseException { + + return "true".equals(env.getConfig().getConfigParam + (EnvironmentParams.CHECKPOINTER_HIGH_PRIORITY.getName())); + } + + private void openEnv(int readLimit, int writeLimit) + throws DatabaseException { + + this.readLimit = readLimit; + this.writeLimit = writeLimit; + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + + envConfig.setConfigParam + (EnvironmentParams.LOG_BUFFER_MAX_SIZE.getName(), + Integer.toString(1024)); + + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "60"); + //* + envConfig.setConfigParam + (EnvironmentParams.ENV_BACKGROUND_READ_LIMIT.getName(), + String.valueOf(readLimit)); + envConfig.setConfigParam + (EnvironmentParams.ENV_BACKGROUND_WRITE_LIMIT.getName(), + String.valueOf(writeLimit)); + + if (envMultiSubDir) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, DATA_DIRS + ""); + } + + //*/ + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + } + + private void doTest(int minSleeps, int maxSleeps) + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + envImpl.setBackgroundSleepHook( + new TestHook() { + public void doHook() { + nSleeps += 1; + assertEquals(0, LatchSupport.nBtreeLatchesHeld()); + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + Database db = env.openDatabase(null, "BackgroundIO", dbConfig); + + final int nFiles = 3; + final int keySize = 20; + final int dataSize = 10; + final int recSize = keySize + dataSize + 35 /* LN overhead */; + + /* 3 * (1,000,000 / 65) = 46,153 */ + final int nRecords = nFiles * (FILE_SIZE / recSize); + + /* + * Insert records first so we will have a sizeable checkpoint. Insert + * interleaved because sequential inserts flush the BINs, and we want + * to defer BIN flushing until the checkpoint. + */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + for (int i = 0; i <= nRecords; i += 2) { + setKey(key, i, keySize); + db.put(null, key, data); + } + for (int i = 1; i <= nRecords; i += 2) { + setKey(key, i, keySize); + db.put(null, key, data); + } + + /* Perform a checkpoint to perform background writes. */ + env.checkpoint(forceConfig); + + /* Delete records so we will have a sizable cleaning. */ + for (int i = 0; i <= nRecords; i += 1) { + setKey(key, i, keySize); + db.delete(null, key); + } + + /* Perform cleaning to perform background reading. */ + env.checkpoint(forceConfig); + env.cleanLog(); + env.checkpoint(forceConfig); + + db.close(); + env.close(); + env = null; + + String msg; + msg = "readLimit=" + readLimit + + " writeLimit=" + writeLimit + + " minSleeps=" + minSleeps + + " maxSleeps=" + maxSleeps + + " actualSleeps=" + nSleeps; + //System.out.println(msg); + + //* + assertTrue(msg, nSleeps >= minSleeps && nSleeps <= maxSleeps); + //*/ + } + + /** + * Outputs an integer followed by pad bytes. + */ + private void setKey(DatabaseEntry entry, int val, int len) { + TupleOutput out = new TupleOutput(); + out.writeInt(val); + for (int i = 0; i < len - 4; i += 1) { + out.writeByte(0); + } + TupleBase.outputToEntry(out, entry); + } +} diff --git a/test/com/sleepycat/je/cleaner/CleanerTest.java b/test/com/sleepycat/je/cleaner/CleanerTest.java new file mode 100644 index 0000000..ec27e04 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/CleanerTest.java @@ -0,0 +1,2123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.recovery.Checkpointer; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.LockType; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.utilint.StringUtils; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class CleanerTest extends CleanerTestBase { + + private static final int N_KEYS = 300; + private static final int N_KEY_BYTES = 10; + + /* + * Make the log file size small enough to allow cleaning, but large enough + * not to generate a lot of fsyncing at the log file boundaries. + */ + private static final int FILE_SIZE = 10000; + + protected Database db = null; + + private Database exampleDb; + + private boolean embeddedLNs = false; + + private static final CheckpointConfig FORCE_CONFIG = + new CheckpointConfig(); + static { + FORCE_CONFIG.setForce(true); + } + + private JUnitThread junitThread; + private volatile int synchronizer; + + public CleanerTest(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + private void initEnv(boolean createDb, boolean allowDups) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.CLEANER_REMOVE.getName(), + "false"); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(), + "75"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + String databaseName = "cleanerDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(createDb); + dbConfig.setSortedDuplicates(allowDups); + exampleDb = env.openDatabase(null, databaseName, dbConfig); + } + + @After + public void tearDown() + throws Exception { + + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + + super.tearDown(); + exampleDb = null; + } + + private void closeEnv() + throws DatabaseException { + + if (exampleDb != null) { + exampleDb.close(); + exampleDb = null; + } + + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testCleanerNoDupes() + throws Throwable { + + initEnv(true, false); + try { + doCleanerTest(N_KEYS, 1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testCleanerWithDupes() + throws Throwable { + + initEnv(true, true); + try { + doCleanerTest(2, 500); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + private void doCleanerTest(int nKeys, int nDupsPerKey) + throws DatabaseException { + + EnvironmentImpl environment = + DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = environment.getFileManager(); + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, nKeys, nDupsPerKey, true); + Long lastNum = fileManager.getLastFileNum(); + + /* Read the data back. */ + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + Cursor cursor = exampleDb.openCursor(null, null); + + while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + } + + env.checkpoint(FORCE_CONFIG); + + for (int i = 0; i < (int) lastNum.longValue(); i++) { + + /* + * Force clean one file. Utilization-based cleaning won't + * work here, since utilization is over 90%. + */ + DbInternal.getNonNullEnvImpl(env). + getCleaner(). + doClean(false, // cleanMultipleFiles + true); // forceCleaning + } + + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + assertTrue(stats.getNINsCleaned() > 0); + + cursor.close(); + closeEnv(); + + initEnv(false, (nDupsPerKey > 1)); + + checkData(expectedMap); + assertTrue(fileManager.getLastFileNum().longValue() > + lastNum.longValue()); + + closeEnv(); + } + + /** + * Ensure that INs are cleaned. + */ + @Test + public void testCleanInternalNodes() + throws DatabaseException { + + initEnv(true, true); + int nKeys = 200; + + EnvironmentImpl environment = + DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = environment.getFileManager(); + /* Insert a lot of keys. ExpectedMap holds the expected data */ + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, nKeys, 1, true); + + /* Modify every other piece of data. */ + modifyData(expectedMap, 10, true); + checkData(expectedMap); + + /* Checkpoint */ + env.checkpoint(FORCE_CONFIG); + checkData(expectedMap); + + /* Modify every other piece of data. */ + modifyData(expectedMap, 10, true); + checkData(expectedMap); + + /* Checkpoint -- this should obsolete INs. */ + env.checkpoint(FORCE_CONFIG); + checkData(expectedMap); + + /* Clean */ + Long lastNum = fileManager.getLastFileNum(); + env.cleanLog(); + + /* Validate after cleaning. */ + checkData(expectedMap); + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + + /* Make sure we really cleaned something.*/ + assertTrue(stats.getNINsCleaned() > 0); + assertTrue(stats.getNLNsCleaned() > 0); + + closeEnv(); + initEnv(false, true); + checkData(expectedMap); + assertTrue(fileManager.getLastFileNum().longValue() > + lastNum.longValue()); + + closeEnv(); + } + + /** + * See if we can clean in the middle of the file set. + */ + @Test + public void testCleanFileHole() + throws Throwable { + + initEnv(true, true); + + int nKeys = 20; // test ends up inserting 2*nKeys + int nDupsPerKey = 30; + + EnvironmentImpl environment = + DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = environment.getFileManager(); + + /* Insert some non dup data, modify, insert dup data. */ + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, nKeys, 1, true); + modifyData(expectedMap, 10, true); + doLargePut(expectedMap, nKeys, nDupsPerKey, true); + checkData(expectedMap); + + /* + * Delete all the data, but abort. (Try to fill up the log + * with entries we don't need. + */ + deleteData(expectedMap, false, false); + checkData(expectedMap); + + /* Do some more insertions, but abort them. */ + doLargePut(expectedMap, nKeys, nDupsPerKey, false); + checkData(expectedMap); + + /* Do some more insertions and commit them. */ + doLargePut(expectedMap, nKeys, nDupsPerKey, true); + checkData(expectedMap); + + /* Checkpoint */ + env.checkpoint(FORCE_CONFIG); + checkData(expectedMap); + + /* Clean */ + Long lastNum = fileManager.getLastFileNum(); + env.cleanLog(); + + /* Validate after cleaning. */ + checkData(expectedMap); + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + + /* Make sure we really cleaned something.*/ + assertTrue(stats.getNINsCleaned() > 0); + assertTrue(stats.getNLNsCleaned() > 0); + + closeEnv(); + initEnv(false, true); + checkData(expectedMap); + assertTrue(fileManager.getLastFileNum().longValue() > + lastNum.longValue()); + + closeEnv(); + } + + /** + * Test for SR13191. This SR shows a problem where a MapLN is initialized + * with a DatabaseImpl that has a null EnvironmentImpl. When the Database + * gets used, a NullPointerException occurs in the Cursor code which + * expects there to be an EnvironmentImpl present. The MapLN gets init'd + * by the Cleaner reading through a log file and encountering a MapLN which + * is not presently in the DbTree. As an efficiency, the Cleaner calls + * updateEntry on the BIN to try to insert the MapLN into the BIN so that + * it won't have to fetch it when it migrates the BIN. But this is bad + * since the MapLN has not been init'd properly. The fix was to ensure + * that the MapLN is init'd correctly by calling postFetchInit on it just + * prior to inserting it into the BIN. + * + * This test first creates an environment and two databases. The first + * database it just adds to the tree with no data. This will be the MapLN + * that eventually gets instantiated by the cleaner. The second database + * is used just to create a bunch of data that will get deleted so as to + * create a low utilization for one of the log files. Once the data for + * db2 is created, the log is flipped (so file 0 is the one with the MapLN + * for db1 in it), and the environment is closed and reopened. We insert + * more data into db2 until we have enough .jdb files that file 0 is + * attractive to the cleaner. Call the cleaner to have it instantiate the + * MapLN and then use the MapLN in a Database.get() call. + */ + @Test + public void testSR13191() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db1 = + env.openDatabase(null, "db1", dbConfig); + + Database db2 = + env.openDatabase(null, "db2", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + data.setData(new byte[100000]); + for (int i = 0; i < 50; i++) { + assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data)); + } + db1.close(); + db2.close(); + assertEquals("Should have 0 as current file", 0L, + fileManager.getCurrentFileNum()); + envImpl.forceLogFileFlip(); + env.close(); + + env = new Environment(envHome, envConfig); + fileManager = DbInternal.getNonNullEnvImpl(env).getFileManager(); + assertEquals("Should have 1 as current file", 1L, + fileManager.getCurrentFileNum()); + + db2 = env.openDatabase(null, "db2", dbConfig); + + for (int i = 0; i < 250; i++) { + assertEquals(OperationStatus.SUCCESS, db2.put(null, key, data)); + } + + db2.close(); + env.cleanLog(); + db1 = env.openDatabase(null, "db1", dbConfig); + db1.get(null, key, data, null); + db1.close(); + env.close(); + } + + /** + * Tests that setting je.env.runCleaner=false stops the cleaner from + * processing more files even if the target minUtilization is not met + * [#15158]. + */ + @Test + public void testCleanerStop() + throws Throwable { + + final int fileSize = 1000000; + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(fileSize)); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "CleanerStop", dbConfig); + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[fileSize]); + for (int i = 0; i <= 10; i += 1) { + db.put(null, key, data); + } + env.checkpoint(FORCE_CONFIG); + + EnvironmentStats stats = env.getStats(null); + assertEquals(0, stats.getNCleanerRuns()); + + envConfig = env.getConfig(); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true"); + env.setMutableConfig(envConfig); + + int iter = 0; + while (stats.getNCleanerRuns() < 10) { + iter += 1; + if (iter == 20) { + + /* + * At one time the DaemonThread did not wakeup immediately in + * this test. A workaround was to add an item to the job queue + * in FileProcessor.wakeup. Later the job queue was removed + * and the DaemonThread.run() was fixed to wakeup immediately. + * This test verifies that the cleanup of the run() method + * works properly [#15267]. + */ + fail("Cleaner did not run after " + iter + " tries"); + } + Thread.yield(); + Thread.sleep(1000); + stats = env.getStats(null); + } + + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + env.setMutableConfig(envConfig); + + long prevNFiles = stats.getNCleanerRuns(); + + /* Do multiple updates to create obsolete records. */ + for (int i = 0; i <= 10; i++) { + db.put(null, key, data); + } + + /* Wait a while to see if cleaner starts to work. */ + Thread.sleep(1000); + + stats = env.getStats(null); + long currNFiles = stats.getNCleanerRuns(); + assertEquals("Expected no files cleaned, prevNFiles=" + prevNFiles + + ", currNFiles=" + currNFiles, + prevNFiles, currNFiles); + + db.close(); + env.close(); + } + + /** + * Tests that the FileSelector memory budget is subtracted when the + * environment is closed. Before the fix in SR [#16368], it was not. + */ + @Test + public void testFileSelectorMemBudget() + throws Throwable { + + final int fileSize = 1000000; + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(fileSize)); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[fileSize]); + for (int i = 0; i <= 10; i += 1) { + db.put(null, key, data); + } + env.checkpoint(FORCE_CONFIG); + + int nFiles = env.cleanLog(); + assertTrue(nFiles > 0); + + db.close(); + + /* + * To force the memory leak to be detected we have to close without a + * checkpoint. The checkpoint will finish processing all cleaned files + * and subtract them from the budget. But this should happen during + * close, even without a checkpoint. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.close(false /*doCheckpoint*/); + } + + /** + * Tests that the cleanLog cannot be called in a read-only environment. + * [#16368] + */ + @Test + public void testCleanLogReadOnly() + throws Throwable { + + /* Open read-write. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + env.close(); + env = null; + + /* Open read-only. */ + envConfig.setAllowCreate(false); + envConfig.setReadOnly(true); + env = new Environment(envHome, envConfig); + + /* Try cleanLog in a read-only env. */ + try { + env.cleanLog(); + fail(); + } catch (UnsupportedOperationException e) { + assertEquals + ("Log cleaning not allowed in a read-only or memory-only " + + "environment", e.getMessage()); + + } + } + + /** + * Tests that when a file being cleaned is deleted, we ignore the error and + * don't repeatedly try to clean it. This is happening when we mistakenly + * clean a file after it has been queued for deletion. The workaround is + * to catch LogFileNotFoundException in the cleaner and ignore the error. + * We're testing the workaround here by forcing cleaning of deleted files. + * [#15528] + */ + @Test + public void testUnexpectedFileDeletion() + throws DatabaseException { + + initEnv(true, false); + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + env.setMutableConfig(config); + + final EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + final Cleaner cleaner = envImpl.getCleaner(); + final FileSelector fileSelector = cleaner.getFileSelector(); + + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, 1000, 1, true); + checkData(expectedMap); + + final long file1 = 0; + final long file2 = 1; + + for (int i = 0; i < 100; i += 1) { + modifyData(expectedMap, 1, true); + checkData(expectedMap); + fileSelector.injectFileForCleaning(new Long(file1)); + fileSelector.injectFileForCleaning(new Long(file2)); + assertTrue(fileSelector.getToBeCleanedFiles().contains(file1)); + assertTrue(fileSelector.getToBeCleanedFiles().contains(file2)); + while (env.cleanLog() > 0) {} + assertTrue(!fileSelector.getToBeCleanedFiles().contains(file1)); + assertTrue(!fileSelector.getToBeCleanedFiles().contains(file2)); + env.checkpoint(FORCE_CONFIG); + Map allFiles = envImpl.getUtilizationProfile(). + getFileSummaryMap(true /*includeTrackedFiles*/); + assertTrue(!allFiles.containsKey(file1)); + assertTrue(!allFiles.containsKey(file2)); + } + checkData(expectedMap); + + closeEnv(); + } + + /** + * Helper routine. Generates keys with random alpha values while data + * is numbered numerically. + */ + private void doLargePut(Map> expectedMap, + int nKeys, + int nDupsPerKey, + boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < nKeys; i++) { + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + + /* + * The data map is keyed by key value, and holds a hash + * map of all data values. + */ + Set dataVals = new HashSet(); + if (commit) { + expectedMap.put(keyString, dataVals); + } + for (int j = 0; j < nDupsPerKey; j++) { + String dataString = Integer.toString(j); + exampleDb.put(txn, + new StringDbt(keyString), + new StringDbt(dataString)); + dataVals.add(dataString); + } + } + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + } + + /** + * Increment each data value. + */ + private void modifyData(Map> expectedMap, + int increment, + boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + Cursor cursor = exampleDb.openCursor(txn, null); + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + + boolean toggle = true; + while (status == OperationStatus.SUCCESS) { + if (toggle) { + + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + int newValue = Integer.parseInt(foundDataString) + increment; + String newDataString = Integer.toString(newValue); + + /* If committing, adjust the expected map. */ + if (commit) { + + Set dataVals = expectedMap.get(foundKeyString); + if (dataVals == null) { + fail("Couldn't find " + + foundKeyString + "/" + foundDataString); + } else if (dataVals.contains(foundDataString)) { + dataVals.remove(foundDataString); + dataVals.add(newDataString); + } else { + fail("Couldn't find " + + foundKeyString + "/" + foundDataString); + } + } + + assertEquals(OperationStatus.SUCCESS, + cursor.delete()); + assertEquals(OperationStatus.SUCCESS, + cursor.put(foundKey, + new StringDbt(newDataString))); + toggle = false; + } else { + toggle = true; + } + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + + cursor.close(); + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + } + + /** + * Delete data. + */ + private void deleteData(Map> expectedMap, + boolean everyOther, + boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + Cursor cursor = exampleDb.openCursor(txn, null); + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + + boolean toggle = true; + while (status == OperationStatus.SUCCESS) { + if (toggle) { + + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + + /* If committing, adjust the expected map */ + if (commit) { + + Set dataVals = expectedMap.get(foundKeyString); + if (dataVals == null) { + fail("Couldn't find " + + foundKeyString + "/" + foundDataString); + } else if (dataVals.contains(foundDataString)) { + dataVals.remove(foundDataString); + if (dataVals.size() == 0) { + expectedMap.remove(foundKeyString); + } + } else { + fail("Couldn't find " + + foundKeyString + "/" + foundDataString); + } + } + + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + } + + if (everyOther) { + toggle = toggle? false: true; + } + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + + cursor.close(); + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + } + + /** + * Check what's in the database against what's in the expected map. + */ + private void checkData(Map> expectedMap) + throws DatabaseException { + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + Cursor cursor = exampleDb.openCursor(null, null); + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + + /* + * Make a copy of expectedMap so that we're free to delete out + * of the set of expected results when we verify. + * Also make a set of counts for each key value, to test count. + */ + + Map> checkMap = new HashMap>(); + MapcountMap = new HashMap(); + Iterator>> iter = + expectedMap.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Set copySet = new HashSet(); + copySet.addAll(entry.getValue()); + checkMap.put(entry.getKey(), copySet); + countMap.put(entry.getKey(), new Integer(copySet.size())); + } + + while (status == OperationStatus.SUCCESS) { + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + + /* Check that the current value is in the check values map */ + Set dataVals = checkMap.get(foundKeyString); + if (dataVals == null) { + fail("Couldn't find " + + foundKeyString + "/" + foundDataString); + } else if (dataVals.contains(foundDataString)) { + dataVals.remove(foundDataString); + if (dataVals.size() == 0) { + checkMap.remove(foundKeyString); + } + } else { + fail("Couldn't find " + + foundKeyString + "/" + + foundDataString + + " in data vals"); + } + + /* Check that the count is right. */ + int count = cursor.count(); + assertEquals(countMap.get(foundKeyString).intValue(), + count); + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + + cursor.close(); + + if (checkMap.size() != 0) { + dumpExpected(checkMap); + fail("checkMapSize = " + checkMap.size()); + + } + assertEquals(0, checkMap.size()); + } + + private void dumpExpected(Map expectedMap) { + Iterator iter = expectedMap.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + String key = (String) entry.getKey(); + Iterator dataIter = ((Set) entry.getValue()).iterator(); + while (dataIter.hasNext()) { + System.out.println("key=" + key + + " data=" + (String) dataIter.next()); + } + } + } + + /** + * Tests that cleaner mutable configuration parameters can be changed and + * that the changes actually take effect. + */ + @Test + public void testMutableConfig() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + envConfig = env.getConfig(); + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + Cleaner cleaner = envImpl.getCleaner(); + MemoryBudget budget = envImpl.getMemoryBudget(); + String name; + String val; + + /* je.cleaner.minUtilization */ + name = EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(); + setParam(name, "33"); + assertEquals(33, cleaner.minUtilization); + + /* je.cleaner.minFileUtilization */ + name = EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(); + setParam(name, "7"); + assertEquals(7, cleaner.minFileUtilization); + + /* je.cleaner.bytesInterval */ + name = EnvironmentParams.CLEANER_BYTES_INTERVAL.getName(); + setParam(name, "1000"); + assertEquals(1000, cleaner.cleanerBytesInterval); + + /* je.cleaner.deadlockRetry */ + name = EnvironmentParams.CLEANER_DEADLOCK_RETRY.getName(); + setParam(name, "7"); + assertEquals(7, cleaner.nDeadlockRetries); + + /* je.cleaner.lockTimeout */ + name = EnvironmentParams.CLEANER_LOCK_TIMEOUT.getName(); + setParam(name, "7000"); + assertEquals(7, cleaner.lockTimeout); + + /* je.cleaner.expunge */ + name = EnvironmentParams.CLEANER_REMOVE.getName(); + val = "false".equals(envConfig.getConfigParam(name)) ? + "true" : "false"; + setParam(name, val); + assertEquals(val.equals("true"), cleaner.expunge); + + /* je.cleaner.minAge */ + name = EnvironmentParams.CLEANER_MIN_AGE.getName(); + setParam(name, "7"); + assertEquals(7, cleaner.minAge); + + /* je.cleaner.readSize */ + name = EnvironmentParams.CLEANER_READ_SIZE.getName(); + setParam(name, "7777"); + assertEquals(7777, cleaner.readBufferSize); + + /* je.cleaner.detailMaxMemoryPercentage */ + name = EnvironmentParams.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE. + getName(); + setParam(name, "7"); + assertEquals((budget.getMaxMemory() * 7) / 100, + budget.getTrackerBudget()); + + /* je.cleaner.threads */ + name = EnvironmentParams.CLEANER_THREADS.getName(); + setParam(name, "7"); + assertEquals((envImpl.isNoLocking() ? 0 : 7), + countCleanerThreads()); + + env.close(); + env = null; + } + + /** + * Sets a mutable config param, checking that the given value is not + * already set and that it actually changes. + */ + private void setParam(String name, String val) + throws DatabaseException { + + EnvironmentMutableConfig config = env.getMutableConfig(); + String myVal = config.getConfigParam(name); + assertTrue(!val.equals(myVal)); + + config.setConfigParam(name, val); + env.setMutableConfig(config); + + config = env.getMutableConfig(); + myVal = config.getConfigParam(name); + assertTrue(val.equals(myVal)); + } + + /** + * Count the number of threads with the name "Cleaner#". + */ + private int countCleanerThreads() { + + Thread[] threads = new Thread[Thread.activeCount()]; + Thread.enumerate(threads); + + int count = 0; + for (int i = 0; i < threads.length; i += 1) { + if (threads[i] != null && + threads[i].getName().startsWith("Cleaner")) { + count += 1; + } + } + + return count; + } + + /** + * Checks that the memory budget is updated properly by the + * UtilizationTracker. Prior to a bug fix [#15505] amounts were added to + * the budget but not subtracted when two TrackedFileSummary objects were + * merged. Merging occurs when a local tracker is added to the global + * tracker. Local trackers are used during recovery, checkpoints, lazy + * compression, and reverse splits. + */ + @Test + public void testTrackerMemoryBudget() + throws DatabaseException { + + /* Open environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + /* Open database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* Insert data. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 200; i += 1) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + exampleDb.put(null, key, data); + } + + /* Sav the admin budget baseline. */ + flushTrackedFiles(); + long admin = env.getStats(null).getAdminBytes(); + + /* + * Nothing becomes obsolete when inserting and no INs are logged, so + * the budget does not increase. In fact, if the new LN is embedded, + * it is recorded as immediately obsolete, but its offset is not + * tracked. + */ + IntegerBinding.intToEntry(201, key); + exampleDb.put(null, key, data); + assertEquals(admin, env.getStats(null).getAdminBytes()); + flushTrackedFiles(); + assertEquals(admin, env.getStats(null).getAdminBytes()); + + /* + * Update a record and expect the budget to increase because the old + * LN becomes obsolete. With embedded LNs, no increase occurs because + * the old LN has already been counted. + */ + exampleDb.put(null, key, data); + + if (embeddedLNs) { + assertEquals(admin, env.getStats(null).getAdminBytes()); + } else { + assertTrue(admin < env.getStats(null).getAdminBytes()); + } + + flushTrackedFiles(); + assertEquals(admin, env.getStats(null).getAdminBytes()); + + /* + * Delete all records and expect the budget to increase because LNs + * become obsolete. + */ + for (int i = 1; i <= 201; i += 1) { + IntegerBinding.intToEntry(i, key); + exampleDb.delete(null, key); + } + + if (embeddedLNs) { + assertEquals(admin, env.getStats(null).getAdminBytes()); + } else { + assertTrue(admin < env.getStats(null).getAdminBytes()); + } + + flushTrackedFiles(); + assertEquals(admin, env.getStats(null).getAdminBytes()); + + /* + * Compress and expect no change to the budget. Prior to the fix for + * [#15505] the assertion below failed because the baseline admin + * budget was not restored. + */ + env.compress(); + flushTrackedFiles(); + assertEquals(admin, env.getStats(null).getAdminBytes()); + + closeEnv(); + } + + /** + * Flushes all tracked files to subtract tracked info from the admin memory + * budget. + */ + private void flushTrackedFiles() + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + UtilizationTracker tracker = envImpl.getUtilizationTracker(); + UtilizationProfile profile = envImpl.getUtilizationProfile(); + + for (TrackedFileSummary summary : tracker.getTrackedFiles()) { + profile.flushFileSummary(summary); + } + } + + /** + * Tests that memory is budgeted correctly for FileSummaryLNs that are + * inserted and deleted after calling setTrackedSummary. The size of the + * FileSummaryLN changes during logging when setTrackedSummary is called, + * and this is accounted for specially in CursorImpl.finishInsert. [#15831] + */ + @Test + public void testFileSummaryLNMemoryUsage() + throws DatabaseException { + + /* Open environment, prevent concurrent access by daemons. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + UtilizationProfile up = envImpl.getUtilizationProfile(); + DatabaseImpl fileSummaryDb = up.getFileSummaryDb(); + MemoryBudget memBudget = envImpl.getMemoryBudget(); + + BasicLocker locker = null; + CursorImpl cursor = null; + try { + locker = BasicLocker.createBasicLocker(envImpl); + cursor = new CursorImpl(fileSummaryDb, locker); + + /* Get parent BIN. There should be only one BIN in the tree. */ + IN root = + fileSummaryDb.getTree().getRootIN(CacheMode.DEFAULT); + root.releaseLatch(); + assertEquals(1, root.getNEntries()); + BIN parent = (BIN) root.getTarget(0); + + /* Use an artificial FileSummaryLN with a tracked summary. */ + FileSummaryLN ln = new FileSummaryLN(new FileSummary()); + TrackedFileSummary tfs = new TrackedFileSummary + (envImpl.getUtilizationTracker(), 0 /*fileNum*/, + true /*trackDetail*/); + tfs.trackObsolete(0, true /*checkDupOffsets*/); + byte[] keyBytes = + FileSummaryLN.makeFullKey(0 /*fileNum*/, 123 /*sequence*/); + int keySize = MemoryBudget.byteArraySize(keyBytes.length); + + /* Perform insert after calling setTrackedSummary. */ + long oldSize = ln.getMemorySizeIncludedByParent(); + long oldParentSize = getAdjustedMemSize(parent, memBudget); + ln.setTrackedSummary(tfs); + boolean inserted = cursor.insertRecord( + keyBytes, ln, false, fileSummaryDb.getRepContext()); + assertTrue(inserted); + + cursor.latchBIN(); + assertTrue(cursor.isOnBIN(parent)); + ln.addExtraMarshaledMemorySize(parent); + cursor.releaseBIN(); + + long newSize = ln.getMemorySizeIncludedByParent(); + long newParentSize = getAdjustedMemSize(parent, memBudget); + + /* The size of the LN increases during logging. */ + assertEquals(newSize, + oldSize + + ln.getObsoleteOffsets().getExtraMemorySize()); + + /* The correct size is accounted for by the parent BIN. */ + assertEquals(newSize + keySize, newParentSize - oldParentSize); + + /* Correct size is subtracted during eviction. */ + oldParentSize = newParentSize; + cursor.evictLN(); + newParentSize = getAdjustedMemSize(parent, memBudget); + assertEquals(oldParentSize - newSize, newParentSize); + + /* Fetch a fresh FileSummaryLN before deleting it. */ + oldParentSize = newParentSize; + ln = (FileSummaryLN) cursor.lockAndGetCurrentLN(LockType.READ); + newSize = ln.getMemorySizeIncludedByParent(); + newParentSize = getAdjustedMemSize(parent, memBudget); + assertEquals(newSize, newParentSize - oldParentSize); + + /* Perform delete. */ + oldSize = newSize; + oldParentSize = newParentSize; + OperationResult result = cursor.deleteCurrentRecord( + fileSummaryDb.getRepContext()); + assertNotNull(result); + newSize = ln.getMemorySizeIncludedByParent(); + newParentSize = getAdjustedMemSize(parent, memBudget); + + /* Size changes during delete also, which performs eviction. */ + assertTrue(newSize < oldSize); + assertTrue(oldSize - newSize > + ln.getObsoleteOffsets().getExtraMemorySize()); + assertEquals(0 - oldSize, newParentSize - oldParentSize); + } finally { + if (cursor != null) { + cursor.releaseBIN(); + cursor.close(); + } + if (locker != null) { + locker.operationEnd(); + } + } + + TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); + + /* Insert again, this time using the UtilizationProfile method. */ + FileSummaryLN ln = new FileSummaryLN(new FileSummary()); + TrackedFileSummary tfs = new TrackedFileSummary + (envImpl.getUtilizationTracker(), 0 /*fileNum*/, + true /*trackDetail*/); + tfs.trackObsolete(0, true/*checkDupOffsets*/); + ln.setTrackedSummary(tfs); + assertTrue(up.insertFileSummary(ln, 0 /*fileNum*/, 123 /*sequence*/)); + TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); + + closeEnv(); + } + + /** + * Checks that log utilization is updated incrementally during the + * checkpoint rather than only when the highest dirty level in the Btree is + * flushed. This feature (incremental update) was added so that log + * cleaning is not delayed until the end of the checkpoint. [#16037] + */ + @Test + public void testUtilizationDuringCheckpoint() + throws DatabaseException { + + /* + * Use Database.sync of a deferred-write database to perform this test + * rather than a checkpoint, because the hook is called at a + * predictable place when only a single database is flushed. The + * implementation of Checkpointer.flushDirtyNodes is shared for + * Database.sync and checkpoint, so this tests both cases. + */ + final int FANOUT = 25; + final int N_KEYS = FANOUT * FANOUT * FANOUT; + + /* Open environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + /* Open ordinary non-transactional database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setNodeMaxEntries(FANOUT); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* Clear stats. */ + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + env.getStats(statsConfig); + + /* Write to database to create a 3 level Btree. */ + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(new byte[4]); + for (int i = 0; i < N_KEYS; i += 1) { + LongBinding.longToEntry(i, keyEntry); + assertSame(OperationStatus.SUCCESS, + exampleDb.put(null, keyEntry, dataEntry)); + EnvironmentStats stats = env.getStats(statsConfig); + if (stats.getNEvictPasses() > 0) { + break; + } + } + + /* + * Sync and write an LN in each BIN to create a bunch of dirty INs + * that, when flushed again, will cause the prior versions to be + * obsolete. + */ + env.sync(); + for (int i = 0; i < N_KEYS; i += FANOUT) { + LongBinding.longToEntry(i, keyEntry); + assertSame(OperationStatus.SUCCESS, + exampleDb.put(null, keyEntry, dataEntry)); + } + + /* + * Close and re-open as a deferred-write DB so that we can call sync. + * The INs will remain dirty. + */ + exampleDb.close(); + dbConfig = new DatabaseConfig(); + dbConfig.setDeferredWrite(true); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* + * The test hook is called just before writing the highest dirty level + * in the Btree. At that point, utilization should be reduced if the + * incremental utilization update feature is working properly. Before + * adding this feature, utilization was not changed at this point. + */ + final int oldUtilization = getUtilization(); + final StringBuilder hookCalledFlag = new StringBuilder(); + + Checkpointer.setMaxFlushLevelHook( + new TestHook() { + + public void doHook() { + + hookCalledFlag.append(1); + + final int newUtilization = getUtilization(); + + int diff = (int) + ((100.0 * (oldUtilization - newUtilization)) / + oldUtilization); + + String msg = "oldUtilization=" + oldUtilization + + " newUtilization=" + newUtilization + + " diff = " + diff + "%"; + + if (embeddedLNs) { + assertTrue(msg, diff >= 6); + } else { + assertTrue(msg, diff >= 10); + } + + /* Don't call the test hook repeatedly. */ + Checkpointer.setMaxFlushLevelHook(null); + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + } + ); + exampleDb.sync(); + assertTrue(hookCalledFlag.length() > 0); + + /* While we're here, do a quick check of getCurrentMinUtilization. */ + final int lastKnownUtilization = + env.getStats(null).getCurrentMinUtilization(); + assertTrue(lastKnownUtilization > 0); + + closeEnv(); + } + + private int getUtilization() { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Map map = + envImpl.getUtilizationProfile().getFileSummaryMap(true); + FileSummary totals = new FileSummary(); + for (FileSummary summary : map.values()) { + totals.add(summary); + } + return FileSummary.utilization(totals.getObsoleteSize(), + totals.totalSize); + } + + /** + * Returns the memory size taken by the given IN, not including the target + * rep, which changes during eviction. + */ + private long getAdjustedMemSize(IN in, MemoryBudget memBudget) { + return getMemSize(in, memBudget) - + in.getTargets().calculateMemorySize(); + } + + /** + * Returns the memory size taken by the given IN and the tree memory usage. + */ + private long getMemSize(IN in, MemoryBudget memBudget) { + return memBudget.getTreeMemoryUsage() + + in.getInMemorySize() - + in.getBudgetedMemorySize(); + } + + /** + * Tests that dirtiness is logged upwards during a checkpoint, even if a + * node is evicted and refetched after being added to the checkpointer's + * dirty map, and before that entry in the dirty map is processed by the + * checkpointer. [#16523] + * + * Root INa + * / \ + * INb ... + * / + * INc + * / + * BINd + * + * The scenario that causes the bug is: + * + * 1) Prior to the final checkpoint, the cleaner processes a log file + * containing BINd. The cleaner marks BINd dirty so that it will be + * flushed prior to the end of the next checkpoint, at which point the file + * containing BINd will be deleted. The cleaner also calls + * setProhibitNextDelta on BINd to ensure that a full version will be + * logged. + * + * 2) At checkpoint start, BINd is added to the checkpoiner's dirty map. + * It so happens that INa is also dirty, perhaps as the result of a split, + * and added to the dirty map. The checkpointer's max flush level is 4. + * + * 3) The evictor flushes BINd and then its parent INc. Both are logged + * provisionally, since their level is less than 4, the checkpointer's max + * flush level. INb, the parent of INc, is dirty. + * + * 4) INc, along with BINd, is loaded back into the Btree as the result of + * reading an LN in BINd. INc and BINd are both non-dirty. INb, the + * parent of INc, is still dirty. + * + * 5) The checkpointer processes its reference to BINd in the dirty map. + * It finds that BINd is not dirty, so does not need to be logged. It + * attempts to add the parent, INc, to the dirty map in order to propagate + * changes upward. However, because INc is not dirty, it is not added to + * the dirty map -- this was the bug, it should be added even if not dirty. + * So as the result of this step, the checkpointer does no logging and does + * not add anything to the dirty map. + * + * 6) The checkpointer logs INa (it was dirty at the start of the + * checkpoint) and the checkpoint finishes. It deletes the cleaned log + * file that contains the original version of BINd. + * + * The key thing is that INb is now dirty and was not logged. It should + * have been logged as the result of being an ancestor of BINd, which was + * in the dirty map. Its parent INa was logged, but does not refer to the + * latest version of INb/INc/BINd. + * + * 7) Now we recover. INc and BINd, which were evicted during step (3), + * are not replayed because they are provisional -- they are lost. When a + * search for an LN in BINd is performed, we traverse down to the old + * version of BINd, which causes LogFileNotFound. + * + * The fix is to add INc to the dirty map at step (5), even though it is + * not dirty. When the reference to INc in the dirty map is processed we + * will not log INc, but we will add its parent INb to the dirty map. Then + * when the reference to INb is processed, it will be logged because it is + * dirty. Then INa is logged and refers to the latest version of + * INb/INc/BINd. + * + * This problem could only occur with a Btree of depth 4 or greater. + */ + @Test + public void testEvictionDuringCheckpoint() + throws DatabaseException { + + /* Use small fanout to create a deep tree. */ + final int FANOUT = 6; + final int N_KEYS = FANOUT * FANOUT * FANOUT; + + /* Open environment without interference of daemon threads. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + /* + * We should disable BtreeVerifier for this test case. Otherwise, + * it will encounter IN-exist-exception when calling cursor.getNext + * in BtreeVerifier. cursor.getNext may call Tree.getNextBin, when + * the next-BIN wants to add to INList, it will throw exception. + * + * The reason is as follows. + * We have the following in-memory tree. The exception happens when + * we wants to get BINe. simulateEviction can cause BINd and INc to + * be removed from INList, but it did not remove BINe from the INList. + * So after simulating evict BINd and INc, through cursor.get/getNext, + * we will re-fetch INc and BINd. Now it is OK, because BINd and INc + * are not in the INList. If we wants to continue to access BINe, + * then now the slot target in INc is null and we need to fetch BINe + * and then attach it to INc. For IN, for its hashCode, equals, + * compareTo, we all use the nodeId to identify a IN. So when we want + * to insert new fetch BINe to ConcurrentMap INList.ins, it will + * think that the IN has already exist on the INList. + * Root INa + * / \ + * INb ... + * / + * INc + * / \ + * BINd BINe + */ + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + final EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + + /* Open database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setNodeMaxEntries(FANOUT); + exampleDb = env.openDatabase(null, "foo", dbConfig); + DatabaseImpl dbImpl = DbInternal.getDbImpl(exampleDb); + + /* Write to database to create a 4 level Btree. */ + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(new byte[0]); + int nRecords; + for (nRecords = 1;; nRecords += 1) { + LongBinding.longToEntry(nRecords, keyEntry); + assertSame(OperationStatus.SUCCESS, + exampleDb.put(null, keyEntry, dataEntry)); + if (nRecords % 10 == 0) { + int level = envImpl.getDbTree().getHighestLevel(dbImpl); + if ((level & IN.LEVEL_MASK) >= 4) { + break; + } + } + } + + /* Flush all dirty nodes. */ + env.sync(); + + /* Get BINd and its ancestors. Mark BINd and INa dirty. */ + final IN nodeINa = dbImpl.getTree().getRootIN(CacheMode.DEFAULT); + nodeINa.releaseLatch(); + final IN nodeINb = (IN) nodeINa.getTarget(0); + final IN nodeINc = (IN) nodeINb.getTarget(0); + final BIN nodeBINd = (BIN) nodeINc.getTarget(0); + assertNotNull(nodeBINd); + nodeINa.setDirty(true); + nodeBINd.setDirty(true); + + /* + * The test hook is called after creating the checkpoint dirty map and + * just before flushing dirty nodes. + */ + final StringBuilder hookCalledFlag = new StringBuilder(); + + Checkpointer.setBeforeFlushHook(new TestHook() { + public void doHook() { + hookCalledFlag.append(1); + /* Don't call the test hook repeatedly. */ + Checkpointer.setBeforeFlushHook(null); + try { + /* Evict BINd and INc. */ + simulateEviction(env, envImpl, nodeBINd, nodeINc); + simulateEviction(env, envImpl, nodeINc, nodeINb); + + /* + * Force BINd and INc to be loaded into cache by fetching + * the left-most record. + * + * Note that nodeINc and nodeBINd are different instances + * and are no longer in the Btree but we don't change these + * variables because they are final. They should not be + * used past this point. + */ + LongBinding.longToEntry(1, keyEntry); + assertSame(OperationStatus.SUCCESS, + exampleDb.get(null, keyEntry, dataEntry, null)); + } catch (DatabaseException e) { + throw new RuntimeException(e); + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + env.checkpoint(FORCE_CONFIG); + assertTrue(hookCalledFlag.length() > 0); + assertTrue(!nodeINa.getDirty()); + assertTrue(!nodeINb.getDirty()); /* This failed before the bug fix. */ + + closeEnv(); + } + + /** + * Simulate eviction by logging this node, updating the LSN in its + * parent slot, setting the Node to null in the parent slot, and + * removing the IN from the INList. Logging is provisional. The + * parent is dirtied. May not be called unless this node is dirty and + * none of its children are dirty. Children may be resident. + */ + private void simulateEviction(Environment env, + EnvironmentImpl envImpl, + IN nodeToEvict, + IN parentNode) + throws DatabaseException { + + assertTrue("not dirty " + nodeToEvict.getNodeId(), + nodeToEvict.getDirty()); + assertTrue(!hasDirtyChildren(nodeToEvict)); + + parentNode.latch(); + + long lsn = TestUtils.logIN( + env, nodeToEvict, false /*allowDeltas*/, true /*provisional*/, + parentNode); + + int index; + for (index = 0;; index += 1) { + if (index >= parentNode.getNEntries()) { + fail(); + } + if (parentNode.getTarget(index) == nodeToEvict) { + break; + } + } + + nodeToEvict.latch(); + + envImpl.getEvictor().remove(nodeToEvict); + envImpl.getInMemoryINs().remove(nodeToEvict); + parentNode.recoverIN(index, null /*node*/, lsn, 0 /*lastLoggedSize*/); + + nodeToEvict.releaseLatch(); + parentNode.releaseLatch(); + } + + private boolean hasDirtyChildren(IN parent) { + for (int i = 0; i < parent.getNEntries(); i += 1) { + Node child = parent.getTarget(i); + if (child instanceof IN) { + IN in = (IN) child; + if (in.getDirty()) { + return true; + } + } + } + return false; + } + + @Test + public void testMultiCleaningBug() + throws DatabaseException { + + initEnv(true, false); + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final Cleaner cleaner = envImpl.getCleaner(); + + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, 1000, 1, true); + modifyData(expectedMap, 1, true); + checkData(expectedMap); + + final TestHook hook = new TestHook() { + public void doHook() { + /* Signal that hook was called. */ + if (synchronizer != 99) { + synchronizer = 1; + } + /* Wait for signal to proceed with cleaning. */ + while (synchronizer != 2 && + synchronizer != 99 && + !Thread.interrupted()) { + Thread.yield(); + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }; + + junitThread = new JUnitThread("TestMultiCleaningBug") { + public void testBody() + throws DatabaseException { + + try { + while (synchronizer != 99) { + /* Wait for initial state. */ + while (synchronizer != 0 && + synchronizer != 99 && + !Thread.interrupted()) { + Thread.yield(); + } + /* Clean with hook set, hook is called next. */ + cleaner.setFileChosenHook(hook); + env.cleanLog(); + /* Signal that cleaning is done. */ + if (synchronizer != 99) { + synchronizer = 3; + } + } + } catch (Throwable e) { + e.printStackTrace(); + } + } + }; + + /* Kick off thread above. */ + synchronizer = 0; + junitThread.start(); + + for (int i = 0; i < 100 && junitThread.isAlive(); i += 1) { + /* Wait for hook to be called when a file is chosen. */ + while (synchronizer != 1 && junitThread.isAlive()) { + Thread.yield(); + } + /* Allow the thread to clean the chosen file. */ + synchronizer = 2; + /* But immediately clean here, which could select the same file. */ + cleaner.setFileChosenHook(null); + env.cleanLog(); + /* Wait for both cleaner runs to finish. */ + while (synchronizer != 3 && junitThread.isAlive()) { + Thread.yield(); + } + /* Make more waste to be cleaned. */ + modifyData(expectedMap, 1, true); + synchronizer = 0; + } + + synchronizer = 99; + + try { + junitThread.finishTest(); + junitThread = null; + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + + closeEnv(); + } + + /** + * Ensures that LN migration is immediate. Lazy migration is no longer + * used, even if configured. + */ + @SuppressWarnings("deprecation") + public void testCleanerLazyMigrationConfig() + throws DatabaseException { + + /* Open environment without interference of daemon threads. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + + /* Configure immediate migration, even though it's deprecated. */ + envConfig.setConfigParam + (EnvironmentConfig.CLEANER_LAZY_MIGRATION, "true"); + + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + final EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + + /* Open database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* Clear stats. */ + StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + env.getStats(clearStats); + + /* Insert and update data. */ + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, 1000, 1, true); + modifyData(expectedMap, 1, true); + checkData(expectedMap); + + /* Clean. */ + while (true) { + long files = env.cleanLog(); + if (files == 0) { + break; + } + } + + /* There should be no checkpoint or eviction. */ + EnvironmentStats stats = env.getStats(null); + assertEquals(0, stats.getNEvictPasses()); + assertEquals(0, stats.getNCheckpoints()); + assertTrue(stats.getNCleanerRuns() > 0); + + /* Clear stats. */ + env.getStats(clearStats); + + /* Flush all dirty nodes. */ + env.sync(); + + stats = env.getStats(null); + assertEquals(0, stats.getNLNsMigrated()); + + closeEnv(); + } + + /** + * Checks that no fetch misses occur when deleting FileSummaryLNs. + */ + @Test + public void testOptimizedFileSummaryLNDeletion() { + + /* Open environment without interference of daemon threads. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + final EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + + /* Open database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* Insert and update data. */ + Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, 1000, 1, true); + modifyData(expectedMap, 1, false); + checkData(expectedMap); + deleteData(expectedMap, false, true); + checkData(expectedMap); + + /* Clear stats. */ + StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + env.getStats(clearStats); + + /* Clean. */ + while (true) { + long files = env.cleanLog(); + if (files == 0) { + break; + } + } + + /* There should be cleaning but no checkpoint or eviction. */ + EnvironmentStats stats = env.getStats(clearStats); + assertTrue(stats.getNCleanerRuns() > 0); + assertEquals(0, stats.getNEvictPasses()); + assertEquals(0, stats.getNCheckpoints()); + + /* + * Flush all dirty nodes, which should delete the cleaned log files and + * their FileSummaryLNs and also update the related MapLNs. + */ + env.sync(); + + /* + * Before optimization of FileSummaryLN deletion, there were 16 cache + * misses. Without the optimization the cache misses occur because + * FileSummaryLNs are always evicted after reading or writing them, and + * then were fetched before deleting them. Now that we can delete + * FileSummaryLNs without fetching, there should be no misses. + */ + stats = env.getStats(clearStats); + assertEquals(0, stats.getNLNsFetchMiss()); + + closeEnv(); + } + + /** + * Ensure that LN migration does not cause the representation of INs to + * change when migration places an LN in the slot and then evicts it + * afterwards. The representation should remain "no target" (empty), if + * that was the BIN representation before the LN migration. Before the bug + * fix [#21734] we neglected to call BIN.compactMemory after the eviction. + * The fix is for the cleaner to call BIN.evictLN, rather then doing the + * eviction separately. + */ + @Test + public void testCompactBINAfterMigrateLN() { + + /* Open environment without interference of daemon threads. */ + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.TREE_MAX_EMBEDDED_LN.getName(), "0"); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + final EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + + /* Open database with CacheMode.EVICT_LN. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setCacheMode(CacheMode.EVICT_LN); + exampleDb = env.openDatabase(null, "foo", dbConfig); + + /* Clear stats. */ + final StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + env.getStats(clearStats); + + /* Insert and update data. */ + final Map> expectedMap = + new HashMap>(); + doLargePut(expectedMap, 1000, 1, true); + modifyData(expectedMap, 1, true); + checkData(expectedMap); + + EnvironmentStats stats = env.getStats(clearStats); + + /* There should be no checkpoint or eviction. */ + assertEquals(0, stats.getNEvictPasses()); + assertEquals(0, stats.getNCheckpoints()); + + /* + * Due to using EVICT_LN mode, the representation of most INs should be + * "no target" (empty). + */ + final long nNoTarget = stats.getNINNoTarget(); + assertTrue(stats.toString(), nNoTarget > stats.getNINSparseTarget()); + + /* Clean. */ + while (true) { + final long files = env.cleanLog(); + if (files == 0) { + break; + } + } + + stats = env.getStats(null); + + /* There should be no checkpoint or eviction. */ + assertEquals(0, stats.getNEvictPasses()); + assertEquals(0, stats.getNCheckpoints()); + + /* A bunch of LNs should have been migrated. */ + assertTrue(stats.getNCleanerRuns() > 0); + assertTrue(stats.getNLNsMigrated() > 100); + + /* + * Most importantly, LN migration should not cause the representation + * of INs to change -- most should still be "no target" (empty). + * [#21734] + * + * The reason that nNoTarget is reduced by one (one is subtracted from + * nNoTarget below) is apparently because a FileSummaryLN DB BIN has + * changed. + */ + final long nNoTarget2 = stats.getNINNoTarget(); + assertTrue("nNoTarget=" + nNoTarget + " nNoTarget2=" + nNoTarget2, + nNoTarget2 >= nNoTarget - 1); + + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/cleaner/CleanerTestBase.java b/test/com/sleepycat/je/cleaner/CleanerTestBase.java new file mode 100644 index 0000000..dbf4393 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/CleanerTestBase.java @@ -0,0 +1,71 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CleanerTestBase extends TestBase { + + protected final File envHome; + protected boolean envMultiSubDir; + protected Environment env; + + protected static final int DATA_DIRS = 3; + + public CleanerTestBase() { + envHome = SharedTestUtils.getTestDir(); + } + + public static List getEnv(boolean[] envMultiSubDirParams) { + List list = new ArrayList(); + for (boolean env : envMultiSubDirParams) + list.add(new Object[] { env }); + + return list; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + if (envMultiSubDir) { + TestUtils.createEnvHomeWithSubDir(envHome, DATA_DIRS); + } + } + + @After + public void tearDown() + throws Exception { + + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + env = null; + } +} diff --git a/test/com/sleepycat/je/cleaner/CleanerTestUtils.java b/test/com/sleepycat/je/cleaner/CleanerTestUtils.java new file mode 100644 index 0000000..ccfc466 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/CleanerTestUtils.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Package utilities. + */ +public class CleanerTestUtils { + + /** + * Gets the file of the LSN at the cursor position, using internal methods. + */ + static long getLogFile(Cursor cursor) { + CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor); + BIN bin = impl.getBIN(); + assertNotNull(bin); + int index = impl.getIndex(); + long lsn = bin.getLsn(index); + assertTrue(lsn != DbLsn.NULL_LSN); + long file = DbLsn.getFileNumber(lsn); + return file; + } +} diff --git a/test/com/sleepycat/je/cleaner/DiskLimitTest.java b/test/com/sleepycat/je/cleaner/DiskLimitTest.java new file mode 100644 index 0000000..c0f3350 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/DiskLimitTest.java @@ -0,0 +1,649 @@ +/*- + * Copyright (c) 2002, 2017, oracle and/or its affiliates. all rights reserved. + * + * this file was distributed by oracle as part of a version of oracle berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.Collections; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.Put; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.recovery.Checkpointer; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NoConsistencyRequiredPolicy; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.utilint.TestHookAdapter; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests the MAX_DISK and FREE_DISK limits and the available log size stat. + * + * Testing of concurrent activity (while hitting the disk limits) is done by + * the DiskLimitStress standalone test. + */ +public class DiskLimitTest extends RepTestBase { + + private static final long FIVE_GB = 5L * 1024L * 1024L * 1024L; + private static final long TEN_GB = 2 * FIVE_GB; + + private static final Durability ACK_ALL = new Durability( + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL); + + private static final Durability ACK_MAJORITY = new Durability( + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + private int nRecords = 100; + + private Environment standaloneEnv; + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + + for (int i = 0; i < groupSize; i += 1) { + final EnvironmentConfig envConfig = repEnvInfo[i].getEnvConfig(); + + /* Test requires explicit control over all operations. */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + + /* Disable shared cache to allow eviction testing. */ + envConfig.setConfigParam( + EnvironmentConfig.SHARED_CACHE, "false"); + } + } + + @Override + @After + public void tearDown() + throws Exception { + + Checkpointer.setBeforeFlushHook(null); + + if (standaloneEnv != null) { + try { + standaloneEnv.close(); + } catch (DiskLimitException e) { + /* Ignored. Disk limits may be set by test. */ + } finally { + standaloneEnv = null; + } + } + + super.tearDown(); + } + + /** + * Checks calculation of available log size with various input values. + */ + @Test + public void testAvailableLogSize() { + + standaloneEnv = new Environment(envRoot, repEnvInfo[0].getEnvConfig()); + + /* + * Use values from table in Cleaner.recalcLogSizeStats. + * + * params: freeDL, maxDL, diskFS, availableLS. + */ + checkAvailableSize( 5, 0, 20, 35); + checkAvailableSize(25, 0, 5, 0); + checkAvailableSize(30, 0, 5, -5); + checkAvailableSize( 5, 100, 20, 35); + checkAvailableSize(25, 100, 20, 15); + checkAvailableSize( 5, 80, 20, 20); + checkAvailableSize(25, 80, 20, 0); + checkAvailableSize(25, 200, 5, 0); + checkAvailableSize(25, 75, 20, -5); + checkAvailableSize(50, 80, 90, -25); + + /* + * While a disk limit + * is violated (availableLogSize is -5), make sure + * Environment.close throws DLE. + */ + try { + standaloneEnv.close(); + fail("DLE expected"); + } catch (DiskLimitException e) { + /* Expected. */ + } + } + + private void checkAvailableSize(final int freeDL, + final int maxDL, + final int diskFS, + final int availableLS) { + + /* Use values from table in Cleaner.recalcLogSizeStats. */ + final int activeLS = 50; + final int reservedLS = 25; + final int protectedLS = 5; + + final FileProtector.LogSizeStats logSizeStats = + new FileProtector.LogSizeStats( + activeLS, reservedLS, protectedLS, Collections.emptyMap()); + + standaloneEnv.setMutableConfig( + standaloneEnv.getMutableConfig(). + setMaxDisk(maxDL). + setConfigParam( + EnvironmentConfig.FREE_DISK, String.valueOf(freeDL))); + + final Cleaner cleaner = + DbInternal.getEnvironmentImpl(standaloneEnv).getCleaner(); + + cleaner.recalcLogSizeStats(logSizeStats, diskFS); + + final EnvironmentStats stats = standaloneEnv.getStats(null); + + assertEquals( + "freeDL=" + freeDL + " maxDL=" + maxDL + " diskFS=" + diskFS, + availableLS, stats.getAvailableLogSize()); + + if (availableLS > 0) { + assertNull( + cleaner.getDiskLimitViolation(), + cleaner.getDiskLimitViolation()); + } else { + assertNotNull( + cleaner.getDiskLimitMessage(), + cleaner.getDiskLimitViolation()); + + assertTrue( + "maxOverage=" + cleaner.getMaxDiskOverage() + + " freeShortage=" + cleaner.getFreeDiskShortage(), + cleaner.getMaxDiskOverage() >= 0 || + cleaner.getFreeDiskShortage() >= 0); + } + + assertEquals(activeLS + reservedLS, stats.getTotalLogSize()); + + assertEquals(maxDL, standaloneEnv.getConfig().getMaxDisk()); + } + + /** + * Checks that FREE_DISK is subtracted from MAX_DISK iff: + * - MAX_DISK > 10GB, or + * - FREE_DISK is explicitly specified. + */ + @Test + public void testFreeDiskSubtraction() { + + standaloneEnv = new Environment(envRoot, repEnvInfo[0].getEnvConfig()); + + /* + * FREE_DISK is not specified, so it is subtracted only if MAX_DISK + * is GT 10GB. Default FREE_DISK is 5GB. + */ + checkFreeDiskSubtraction(1000, FIVE_GB, false); + checkFreeDiskSubtraction(TEN_GB, FIVE_GB, false); + checkFreeDiskSubtraction(TEN_GB + 1, FIVE_GB, true); + + final long FREE_DISK = 100; + + standaloneEnv.setMutableConfig( + standaloneEnv.getMutableConfig(). + setConfigParam( + EnvironmentConfig.FREE_DISK, + String.valueOf(FREE_DISK))); + + + /* FREE_DISK is specified so it is always subtracted. */ + checkFreeDiskSubtraction(1000, FREE_DISK, true); + checkFreeDiskSubtraction(TEN_GB, FREE_DISK, true); + checkFreeDiskSubtraction(TEN_GB + 1, FREE_DISK, true); + } + + private void checkFreeDiskSubtraction(final long maxDisk, + final long freeDisk, + final boolean expectSubtracted) { + + /* + * Use maxDisk-1 value for activeLogSize so that if freeDisk is + * subtracted then availableLogSize will be zero and there will be + * a disk limit violation. + */ + final FileProtector.LogSizeStats logSizeStats = + new FileProtector.LogSizeStats( + maxDisk - 1, 0, 0, Collections.emptyMap()); + + standaloneEnv.setMutableConfig( + standaloneEnv.getMutableConfig().setMaxDisk(maxDisk)); + + final Cleaner cleaner = + DbInternal.getEnvironmentImpl(standaloneEnv).getCleaner(); + + cleaner.recalcLogSizeStats(logSizeStats, freeDisk + 1); + + if (expectSubtracted) { + assertNotNull( + cleaner.getDiskLimitMessage(), + cleaner.getDiskLimitViolation()); + } else { + assertNull( + cleaner.getDiskLimitViolation(), + cleaner.getDiskLimitViolation()); + } + } + + /** + * Checks that a disk limit violation will prevent write operations. + */ + @Test + public void testWritesProhibited() { + + RepTestUtils.joinGroup(repEnvInfo); + final RepEnvInfo masterInfo = repEnvInfo[0]; + write(masterInfo, 0, nRecords, ACK_MAJORITY); + RepTestUtils.syncGroup(repEnvInfo); + readAll(); + + /* + * Prohibit writes on master. + * No records are inserted on any node. + */ + prohibitWrites(masterInfo); + try { + insertOne(masterInfo, ACK_ALL); + fail("Expected DLE"); + } catch (DiskLimitException e) { + /* Expected. */ + } + readAll(); + + /* And then allowed again. */ + allowWrites(masterInfo); + insertOne(masterInfo, ACK_ALL); + readAll(); + + /* + * Prohibit writes on one replica. + * Expect writes allowed with ack-majority but not ack-all. + * Inserted records will only be present on two nodes. + */ + prohibitWrites(repEnvInfo[1]); + insertOne(masterInfo, ACK_MAJORITY); + read(masterInfo, nRecords); + read(repEnvInfo[2], nRecords); + read(repEnvInfo[1], nRecords - 1); + try { + insertOne(masterInfo, ACK_ALL); + fail("Expected IAE"); + } catch (InsufficientAcksException|InsufficientReplicasException e) { + /* Expected. */ + } + read(masterInfo, nRecords); + read(repEnvInfo[2], nRecords); + read(repEnvInfo[1], nRecords - 2); + + /* + * Prohibit writes on both replicas. + * Expect writes not allowed with ack-majority. + * Inserted record will only be present on the master. + */ + prohibitWrites(repEnvInfo[2]); + try { + insertOne(masterInfo, ACK_MAJORITY); + fail("Expected IAE"); + } catch (InsufficientAcksException|InsufficientReplicasException e) { + /* Expected. */ + } + read(masterInfo, nRecords); + read(repEnvInfo[2], nRecords - 1); + read(repEnvInfo[1], nRecords - 3); + + /* And then allowed again. */ + allowWrites(repEnvInfo[1]); + allowWrites(repEnvInfo[2]); + insertOne(masterInfo, ACK_ALL); + readAll(); + + checkNodeEquality(); + } + + /** + * Tests that checkpoints and cleaner runs are prohibited while a disk + * limit is violated, and that eviction will not log dirty nodes. + */ + @Test + public void testCheckpointCleanEvict() { + + for (int i = 0; i < groupSize; i += 1) { + final EnvironmentConfig envConfig = repEnvInfo[i].getEnvConfig(); + + /* Use small files so we can clean with a small data set. */ + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, "1000"); + } + + RepTestUtils.joinGroup(repEnvInfo); + final RepEnvInfo masterInfo = repEnvInfo[0]; + + /* Test is not designed for non-default embedded LN param. */ + assumeTrue(masterInfo.getRepImpl().getMaxEmbeddedLN() >= 16); + + write(masterInfo, 0, nRecords, ACK_MAJORITY); + RepTestUtils.syncGroup(repEnvInfo); + readAll(); + + for (final RepEnvInfo info : repEnvInfo) { + /* + * Prohibit writes and expect that a checkpoint will throw + * DiskLimitException. Then allow writes and expect a complete + * checkpoint. + * + * Prohibit writes after first CkptStart has been logged. + */ + Checkpointer.setBeforeFlushHook(new TestHookAdapter() { + @Override + public void doHook() { + prohibitWrites(info); + } + }); + + final CheckpointConfig config = + new CheckpointConfig().setKBytes(1); + + final Environment env = info.getEnv(); + final long nCheckpoints = env.getStats(null).getNCheckpoints(); + + for (int i = 0; i < 3; i += 1) { + try { + env.checkpoint(config); + fail("Expected DLE"); + } catch (DiskLimitException e) { + /* Expected. */ + } + } + + /* nCheckpoints is incremented at start of checkpoint. */ + assertEquals( + nCheckpoints + 1, + env.getStats(null).getNCheckpoints()); + + allowWrites(info); + Checkpointer.setBeforeFlushHook(null); + env.checkpoint(config); + + assertEquals( + nCheckpoints + 2, + env.getStats(null).getNCheckpoints()); + + /* + * Prohibit writes and expect that a cleaner run will throw + * DiskLimitException. Then allow writes and expect a complete + * cleaner run. + */ + final int minUtil = env.getStats(null).getCurrentMinUtilization(); + assertTrue("expect <= 40 minUtil=" + minUtil, minUtil <= 40); + + final long nCleanerRuns = env.getStats(null).getNCleanerRuns(); + + /* Prohibit writes after first cleaner run has started. */ + final Cleaner cleaner = info.getRepImpl().getCleaner(); + cleaner.setFileChosenHook(new TestHookAdapter() { + @Override + public void doHook() { + prohibitWrites(info); + } + }); + + for (int i = 0; i < 3; i += 1) { + try { + env.cleanLog(); + fail("Expected DLE"); + } catch (DiskLimitException e) { + /* Expected. */ + } + } + + /* nCleanerRuns is incremented at start of cleaner run. */ + assertEquals( + nCleanerRuns + 1, + env.getStats(null).getNCleanerRuns()); + + cleaner.setFileChosenHook(null); + allowWrites(info); + env.cleanLog(); + + assertEquals( + nCleanerRuns + 2, + env.getStats(null).getNCleanerRuns()); + + /* + * Cleaning will have dirtied INs. Prohibit writes and expect that + * eviction will not log dirty nodes. Then allow writes and expect + * eviction of dirty nodes. + */ + prohibitWrites(info); + evictDirtyBINs(info, false /*expectEviction*/); + + allowWrites(info); + evictDirtyBINs(info, true /*expectEviction*/); + } + + readAll(); + checkNodeEquality(); + } + + private void insertOne(final RepEnvInfo info, + final Durability durability) { + try { + write(info, nRecords, 1, durability); + nRecords += 1; + } catch (InsufficientAcksException e) { + nRecords += 1; + throw e; + } + } + + private void write(final RepEnvInfo info, + final int startKey, + final int nKeys, + final Durability durability) { + + final ReplicatedEnvironment master = info.getEnv(); + final DatabaseEntry entry = new DatabaseEntry(); + + final Database db = master.openDatabase( + null, TEST_DB_NAME, + new DatabaseConfig().setTransactional(true).setAllowCreate(true)); + + final TransactionConfig txnConfig = new + TransactionConfig().setDurability(durability); + + final Transaction txn = master.beginTransaction(null, txnConfig); + try { + for (int i = startKey; i < startKey + nKeys; i += 1) { + IntegerBinding.intToEntry(i, entry); + db.put(txn, entry, entry, Put.OVERWRITE, null); + } + txn.commit(durability); + db.close(); + } catch (Throwable e) { + txn.abort(); + db.close(); + throw e; + } + } + + private void read(final RepEnvInfo info, + final int nKeys) { + + final TransactionConfig txnConfig = new TransactionConfig(). + setConsistencyPolicy(new NoConsistencyRequiredPolicy()). + setReadOnly(true); + + Transaction txn = info.getEnv().beginTransaction(null, txnConfig); + + final Database db = info.getEnv().openDatabase( + txn, TEST_DB_NAME, + new DatabaseConfig().setTransactional(true)); + + txn.commit(); + + txn = info.getEnv().beginTransaction(null, txnConfig); + + try { + try (final Cursor cursor = db.openCursor(txn, null)) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int i = 0; + while (cursor.get(key, data, Get.NEXT, null) != null) { + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(i, IntegerBinding.entryToInt(data)); + i += 1; + } + + assertEquals(nKeys, i); + } + } finally { + txn.commit(); + } + + db.close(); + } + + private void readAll() { + for (final RepEnvInfo info : repEnvInfo) { + read(info, nRecords); + } + } + + private void prohibitWrites(final RepEnvInfo info) { + setMaxDisk(info, 1); + } + + private void allowWrites(final RepEnvInfo info) { + setMaxDisk(info, 0); + } + + private void setMaxDisk(final RepEnvInfo info, final long size) { + + final Environment env = info.getEnv(); + + env.setMutableConfig(env.getMutableConfig().setMaxDisk(size)); + + try { + info.getEnv().cleanLog(); + } catch (DiskLimitException e) { + /* Do nothing. */ + } + } + + private void checkNodeEquality() { + + final VLSN lastVLSN = + repEnvInfo[0].getRepImpl().getVLSNIndex().getRange().getLast(); + + try { + RepTestUtils.checkNodeEquality(lastVLSN, false, repEnvInfo); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private void evictDirtyBINs(final RepEnvInfo info, + final boolean expectEviction) { + + final EnvironmentImpl envImpl = info.getRepImpl(); + final StatsConfig sConfig = new StatsConfig(); + int dirtyBins = 0; + + for (final IN in : envImpl.getInMemoryINs()) { + + if (!in.isBIN() || !in.getDirty()) { + continue; + } + + dirtyBins += 1; + + final long origEvicted = + envImpl.loadStats(sConfig).getNDirtyNodesEvicted(); + + in.latch(); + + /* Compress, strip LNs and compact. */ + in.partialEviction(); + in.partialEviction(); + + final long bytes = envImpl.getEvictor().doTestEvict( + in, Evictor.EvictionSource.MANUAL); + + final long evicted = + envImpl.loadStats(sConfig).getNDirtyNodesEvicted() - + origEvicted; + + if (expectEviction) { + assertTrue(bytes > 0); + assertEquals(1, evicted); + } else { + assertEquals(0, bytes); + assertEquals(0, evicted); + } + } + + assertTrue(dirtyBins > 0); + } +} diff --git a/test/com/sleepycat/je/cleaner/FileProtectorTest.java b/test/com/sleepycat/je/cleaner/FileProtectorTest.java new file mode 100644 index 0000000..b444b28 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/FileProtectorTest.java @@ -0,0 +1,979 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskOrderedCursor; +import com.sleepycat.je.DiskOrderedCursorConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.Put; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.stream.FeederReplicaSyncup; +import com.sleepycat.je.rep.stream.ReplicaSyncupReader; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.util.DbSpace; +import com.sleepycat.je.utilint.TestHookAdapter; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +/** + * Tests that deletion of cleaned/reserved log files is prohibited as + * appropriate: + * - during syncup + * - by feeders + * - during network restore, which uses DbBackup + * - during regular DbBackup + * - during Database.count + * - while a DiskOrderedCursor is open + */ +public class FileProtectorTest extends RepTestBase { + + /* + * If: + * - file size is FILE_SIZE + * - no files are protected + * - there is no max disk limit + * - RECORDS are written with DATA_SIZE and UPDATES are performed + * - cleaning and checkpointing are performed + * Then: + * - at least RESERVED_FILES will be cleaned and reserved + * After that, if: + * - MAX_DISK is configured + * - cleaner is woken up + * Then: + * - total activeLogSize will be LTE ACTIVE_DATA_SIZE + * - VLSNS_REMAINING or less remain in the VLSNIndex range + * + * These values are approximate. If they become a little off due to other + * changes, they can be adjusted to the actual values produced in + * testBaselineCleaning. + */ + private static final int FILE_SIZE = 10 * 1000; + private static final int DATA_SIZE = 100; + private static final int RECORDS = 500; + private static final int UPDATES = 5; + private static final int RESERVED_FILES = 12; + private static final int VLSNS_REMAINING = (RECORDS * 2) + 200; + private static final int ACTIVE_DATA_SIZE = (RECORDS * 150 * 5) / 3; + private static final int ACTIVE_DATA_FILES = + (ACTIVE_DATA_SIZE / FILE_SIZE) + 1; + + @Rule + public TestName testName= new TestName(); + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + + for (int i = 0; i < groupSize; i += 1) { + final RepEnvInfo info = repEnvInfo[i]; + final EnvironmentConfig envConfig = info.getEnvConfig(); + final ReplicationConfig repConfig = info.getRepConfig(); + + /* Use small log files to quickly clean multiple files. */ + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, String.valueOf(FILE_SIZE)); + + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_AGE, "1"); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_UTILIZATION, "60"); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, "20"); + + /* Allow the VLSNIndex to become small, to delete more files. */ + repConfig.setConfigParam( + RepParams.MIN_VLSN_INDEX_SIZE.getName(), "10"); + + /* Test requires explicit control over all operations. */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + } + } + + @Override + @After + public void tearDown() + throws Exception { + + ReplicaSyncupReader.setFileGapHook(null); + FeederReplicaSyncup.setAfterSyncupStartedHook(null); + FeederReplicaSyncup.setAfterSyncupEndedHook(null); + Feeder.setInitialWriteMessageHook(null); + + super.tearDown(); + } + + /** + * Checks that test parameters cause cleaning, reserved files and deleted + * files, as expected. Also checks barren file deletion. + */ + @Test + public void testBaselineCleaning() { + + RepTestUtils.joinGroup(repEnvInfo); + final RepEnvInfo masterInfo = repEnvInfo[0]; + makeWaste(masterInfo); + + for (final RepEnvInfo info : repEnvInfo) { + makeReservedFiles(info); + expectFilesDeleted(info, 0); + } + + /* + * While we are here and the feeders are protecting one or more files + * in the VLSNIndex range, check barren file deletion. The barren files + * written and deleted by makeNonHAWaste will come after the files in + * the VSLNIndex range. + */ + for (final RepEnvInfo info : repEnvInfo) { + makeNonHAWaste(info); + makeReservedFiles(info); + /* Allow two remaining reserved files due to checkpoints. */ + expectFilesDeleted(info, 2); + } + + /* + * Close one replica, write more data, then open the replica to check + * that the syncup and feeder readers can skip over the gap due to the + * deleted barren files. + */ + final RepEnvInfo replicaInfo = repEnvInfo[2]; + replicaInfo.closeEnv(); + makeWaste(masterInfo); + + class Hook extends TestHookAdapter { + private volatile boolean called = false; + + @Override + public void doHook(Long prevFileNum) { + called = true; + } + } + + final Hook hook = new Hook(); + ReplicaSyncupReader.setFileGapHook(hook); + replicaInfo.openEnv(); + assertTrue(hook.called); + + verifyData(); + } + + /** + * Tests that files are protected on a master as the result of syncup and + * feeder position. + * + * 1. Create master, make waste and reserved files. + * 2. Add a replica, but pause after syncup begins. + * 3. Expect that no files can be deleted. + * 4. Allow syncup to finish but pause before feeder starts. + * 5. Expect that no files can be deleted. + * 6. Allow feeder to catch up half way. + * 7. Expect only half the files can be deleted. + * 8. Allow feeder to completely catch up. + * 9. Except the rest of the files can be deleted. + */ + @Test + public void testSyncupAndFeeder() + throws Throwable { + + /* Create group of 2. Make waste and reserved files on both nodes. */ + RepTestUtils.joinGroup(Arrays.copyOf(repEnvInfo, 2)); + final RepEnvInfo masterInfo = repEnvInfo[0]; + final RepEnvInfo replicaInfo = repEnvInfo[1]; + makeWaste(masterInfo); + makeReservedFiles(masterInfo); + makeReservedFiles(replicaInfo); + + final VLSNIndex masterVlsnIndex = + masterInfo.getRepImpl().getVLSNIndex(); + + final VLSNRange initialRange = masterVlsnIndex.getRange(); + + class ExpectNoDeletions extends TestHookAdapter { + private volatile Throwable exception; + + @Override + public void doHook(Feeder feeder) { + try { + final VLSNRange prevRange = masterVlsnIndex.getRange(); + + /* + * Syncup has just started, or has just finished but + * feeding has not started. No files can be deleted on the + * master. + */ + final int deleted = deleteReservedFiles(masterInfo); + assertEquals(0, deleted); + + final VLSNRange curRange = masterVlsnIndex.getRange(); + + assertEquals( + "initialRange=" + initialRange + + " curRange=" + curRange + + " prevRange=" + prevRange, + initialRange.getFirst(), curRange.getFirst()); + + /* + * While we're here, check that files can be deleted on the + * first replica, which is not impacted by this syncup. + */ + expectFilesDeleted(replicaInfo, 0); + + } catch (Throwable e) { + e.printStackTrace(); + exception = e; + throw e; + } + } + } + + final long halfwayMessages = + initialRange.getLast().getSequence() - + initialRange.getFirst().getSequence(); + + final long halfwayVlsn = (halfwayMessages / 2) - VLSNS_REMAINING; + + final int someFiles = ACTIVE_DATA_FILES / 2; + + class ExpectSomeDeletions + extends TestHookAdapter { + + private volatile Throwable exception; + private volatile int nMessages = 0; + + @Override + public void doHook(BinaryProtocol.Message feeder) { + + nMessages += 1; + + if (nMessages != halfwayMessages) { + return; + } + + /* + * After sending half the rep stream, expect that we can + * truncate the VLSNIndex and delete some files. + */ + try { + final VLSNRange prevRange = masterVlsnIndex.getRange(); + + final int deleted = deleteReservedFiles(masterInfo); + + assertTrue( + "expected=" + someFiles + + " actual=" + deleted + + " nMessages=" + nMessages, + deleted >= someFiles); + + final VLSNRange curRange = masterVlsnIndex.getRange(); + + assertTrue( + "expected=" + halfwayVlsn + + " initialRange=" + initialRange + + " curRange=" + curRange + + " prevRange=" + prevRange + + " nMessages=" + nMessages, + curRange.getFirst().getSequence() >= halfwayVlsn); + + } catch (Throwable e) { + e.printStackTrace(); + exception = e; + throw e; + } + } + } + + final ExpectNoDeletions expectNoDeletions = new ExpectNoDeletions(); + + final ExpectSomeDeletions expectSomeDeletions = + new ExpectSomeDeletions(); + + FeederReplicaSyncup.setAfterSyncupStartedHook(expectNoDeletions); + FeederReplicaSyncup.setAfterSyncupEndedHook(expectNoDeletions); + Feeder.setInitialWriteMessageHook(expectSomeDeletions); + + /* + * Add the third node (the second replica). Syncup and feeding of this + * node will result in calling the test hooks. + */ + RepTestUtils.joinGroup(repEnvInfo); + RepTestUtils.syncGroup(repEnvInfo); + + if (expectNoDeletions.exception != null) { + throw expectNoDeletions.exception; + } + + if (expectSomeDeletions.exception != null) { + throw expectSomeDeletions.exception; + } + + FeederReplicaSyncup.setAfterSyncupStartedHook(null); + FeederReplicaSyncup.setAfterSyncupEndedHook(null); + Feeder.setInitialWriteMessageHook(null); + + /* The rest of the files can now be deleted. */ + makeReservedFiles(repEnvInfo[2]); + for (final RepEnvInfo info : repEnvInfo) { + expectFilesDeleted(info, 0); + } + + verifyData(); + } + + /** + * Tests that network restore protects the active files on the server plus + * the two latest reserved files. + */ + @Test + public void testNetworkRestore() + throws Throwable { + + /* + * Create group of 2 and clean/delete files, so that syncup of the 3rd + * node will fail. + */ + RepTestUtils.joinGroup(Arrays.copyOf(repEnvInfo, 2)); + + final RepEnvInfo masterInfo = repEnvInfo[0]; + makeWaste(masterInfo); + makeReservedFiles(masterInfo); + expectFilesDeleted(masterInfo, 0); + + final RepEnvInfo replicaInfo = repEnvInfo[1]; + makeReservedFiles(replicaInfo); + + /* Get stats before network restore. */ + final EnvironmentStats masterStats = + masterInfo.getEnv().getStats(null); + + final EnvironmentStats replicaStats = + replicaInfo.getEnv().getStats(null); + + /* + * The replica (not the master) should be chosen as the server. + * The master is lower priority, when current VLSNs are roughly equal. + */ + final String expectNode = "Node2"; + + /* + * Two reserved files, plus the active files, should be restored. + * + * The original server active log size, plus the two reserved files, + * should roughly equal the total bytes restored. + */ + final int expectFiles = + replicaInfo.getRepImpl().getFileProtector().getNActiveFiles() + 2; + + final long expectBytes = + replicaStats.getActiveLogSize() + (2 * FILE_SIZE); + + class Hook extends TestHookAdapter { + private volatile Throwable exception; + private volatile int nFiles = 0; + private volatile long nBytes = 0; + + @Override + public void doHook(File file) { + try { + final FileManager fm = + replicaInfo.getRepImpl().getFileManager(); + + /* Convert client path to server path. */ + file = new File( + fm.getFullFileName( + fm.getNumFromName(file.getName()))); + + nBytes += file.length(); + nFiles += 1; + + /* + * Make reserved files and delete files other than those + * that are protected because they are being restored. + * This checks that the network restore is protecting the + * files yet to be transferred, and that the VLSNIndex can + * advance at the same time. + * + * The number of reserved files will start at expectFiles + * and go down as files are transferred, since files are + * unprotected as they are transferred. Two files always + * remain reserved: one for the file just transferred and + * another is added as a fudge factor. + */ + makeWaste(masterInfo); + makeReservedFiles(replicaInfo); + expectFilesDeleted(replicaInfo, expectFiles - nFiles + 2); + + /* + final EnvironmentStats stats = + replicaInfo.getEnv().getStats(null); + + System.out.format( + "expect reserved %d true reserved %,d %n", + (expectFiles - nFiles + 2), + stats.getReservedLogSize()); + + System.out.println("thread=" + + Thread.currentThread().getName() + + " activeSize=" + stats.getActiveLogSize() + + " reservedSize=" + stats.getReservedLogSize() + + " totalSize=" + stats.getTotalLogSize() + + " protectedSize=" + stats.getProtectedLogSize() + + " protectedSizeMap=" + stats.getProtectedLogSizeMap()); + //*/ + } catch (Throwable e) { + exception = e; + } + } + } + + /* + * Add 3rd node to group, which will throw ILE. Then perform the + * network restore, which will call the hook. + */ + final NetworkRestore networkRestore = new NetworkRestore(); + final Hook hook = new Hook(); + + try { + RepTestUtils.joinGroup(repEnvInfo); + fail("Expected ILE"); + } catch (final InsufficientLogException ile) { + networkRestore.setInterruptHook(hook); + networkRestore.execute(ile, new NetworkRestoreConfig()); + } + + if (hook.exception != null) { + throw hook.exception; + } + + final String nodeName = networkRestore.getLogProvider().getName(); + + final String msg = "nodeName=" + nodeName + + " hook.nFiles=" + hook.nFiles + + " hook.nBytes=" + hook.nBytes + + " masterActive=" + masterStats.getActiveLogSize() + + " replicaActive=" + replicaStats.getActiveLogSize() + + " expectNode=" + expectNode + + " expectFiles=" + expectFiles + + " expectBytes=" + expectBytes; + +// System.out.println(msg); + + assertEquals(msg, expectNode, nodeName); + assertEquals(expectFiles, hook.nFiles); + assertTrue(msg, Math.abs(hook.nBytes - expectBytes) < 5000); + + /* The restored node can now be opened. */ + final RepEnvInfo restoredReplicaInfo = repEnvInfo[2]; + restoredReplicaInfo.openEnv(); + + verifyData(); + } + + /** + * Tests that regular DbBackup protects files in the backup set, and that + * protection can be removed as files are copied. + */ + @Test + public void testBackup() { + + RepTestUtils.joinGroup(repEnvInfo); + final RepEnvInfo masterInfo = repEnvInfo[0]; + makeWaste(masterInfo); + + for (final RepEnvInfo info : repEnvInfo) { + + /* Make a data set, delete the waste. */ + makeReservedFiles(info); + expectFilesDeleted(info, 0); + assertEquals(0, deleteReservedFiles(info)); + + /* The backup will protect all files currently active. */ + final DbBackup backup = new DbBackup(info.getEnv()); + backup.startBackup(); + final String[] files = backup.getLogFilesInBackupSet(); + + /* + * Create more waste which will make all files in the backup set + * reserved, but they cannot be deleted yet. + */ + makeWaste(masterInfo); + makeReservedFiles(info); + expectFilesDeleted(info, files.length); + + final Set reservedFiles = info.getRepImpl(). + getFileProtector().getReservedFileInfo().second(); + + final FileManager fm = info.getRepImpl().getFileManager(); + + /* + * Simulate copying where protection is removed for each file after + * it is copied, and then it can be deleted (if it is reserved). + */ + int deleted = 0; + + for (final String file : files) { + + final File fileObj = new File(info.getEnvHome(), file); + final EnvironmentStats stats = info.getEnv().getStats(null); + + final String msg = "file=" + file + + " protected=" + stats.getProtectedLogSize() + + " protectedMap=" + stats.getProtectedLogSizeMap(); + + assertTrue(msg, fileObj.exists()); + backup.removeFileProtection(file); + + if (reservedFiles.contains(fm.getNumFromName(file))) { + assertEquals(msg, 1, deleteReservedFiles(info)); + assertFalse(msg, fileObj.exists()); + deleted += 1; + } else { + assertEquals(msg, 0, deleteReservedFiles(info)); + } + } + + /* At least some files should have been deleted. */ + final int someFiles = ACTIVE_DATA_FILES / 2; + assertTrue( + "expected=" + someFiles + " deleted=" + deleted, + deleted >= someFiles); + + backup.endBackup(); + expectFilesDeleted(info, 0); + } + } + + /** + * Tests that DiskOrderedCursor and Database.count protect all active + * files, including files created during the scan. The test only checks + * DiskOrderedCursor, and we assume protection works equally well for + * Database.count, since they both rely on DiskOrderedScanner to implement + * file protection. + */ + @Test + public void testDiskOrderedCursor() { + + RepTestUtils.joinGroup(repEnvInfo); + final RepEnvInfo masterInfo = repEnvInfo[0]; + makeWaste(masterInfo); + + for (final RepEnvInfo info : repEnvInfo) { + + /* Make a bunch of reserved files. */ + makeReservedFiles(info); + + /* + * The files currently reserved can be deleted even while the + * cursor is open. + */ + final Set reservedFiles = info.getRepImpl(). + getFileProtector().getReservedFileInfo().second(); + + /* Set queue size to one to cause DOS producer to block. */ + final DiskOrderedCursorConfig config = + new DiskOrderedCursorConfig(). + setQueueSize(1); + + /* + * Open the cursor, which will protect all active files, including + * those that become reserved while the cursor is open. + */ + final Database db = info.getEnv().openDatabase( + null, "test", + new DatabaseConfig().setTransactional(true)); + + final DiskOrderedCursor cursor = db.openCursor(config); + + /* + * Make more waste and reserved files, although these files cannot + * be deleted while the cursor is open. + */ + makeWaste(masterInfo); + makeReservedFiles(info); + + /* Only the files reserved earlier can be deleted. */ + final int deleted = deleteReservedFiles(info); + assertEquals(deleted, countDeletedFiles(info, reservedFiles)); + + /* At least some files should have been deleted. */ + final int someFiles = ACTIVE_DATA_FILES / 2; + assertTrue( + "expected=" + someFiles + " deleted=" + deleted, + deleted >= someFiles); + + /* + * After the cursor is closed, the rest of the files can be + * deleted. + */ + cursor.close(); + + /* + * Wait for DOS producer thread to stop and remove file protection. + */ + while (!DbInternal.getDiskOrderedCursorImpl(cursor). + isProcessorClosed()) { + try { + Thread.sleep(5); + } catch (Throwable e) { + /* Do nothing. */ + } + } + + expectFilesDeleted(info, 0); + db.close(); + } + } + + /** + * Generate waste that should later cause cleaning to generate at least + * RESERVED_FILES reserved files. + */ + private void makeWaste(final RepEnvInfo info) { + + final ReplicatedEnvironment master = info.getEnv(); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + final Database db = master.openDatabase(null, "test", dbConfig); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]); + + /* Insert records and then update them UPDATES*2 times. */ + for (int i = 0; i <= UPDATES * 2; i++) { + for (int j = 0; j < RECORDS; j++) { + IntegerBinding.intToEntry(j, key); + Arrays.fill(data.getData(), (byte) j); + db.put(null, key, data, Put.OVERWRITE, null); + } + } + + db.close(); + RepTestUtils.syncGroup(repEnvInfo); + } + + private void verifyData() { + + for (final RepEnvInfo info : repEnvInfo) { + final ReplicatedEnvironment env = info.getEnv(); + + final Database db = env.openDatabase( + null, "test", + new DatabaseConfig().setTransactional(true)); + + final Cursor cursor = db.openCursor(null, null); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int i = 0; + while (cursor.get(key, data, Get.NEXT, null) != null) { + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(DATA_SIZE, data.getSize()); + for (int j = 0; j < DATA_SIZE; j += 1) { + assertEquals(data.getData()[j], (byte) i); + } + i += 1; + } + + assertEquals(RECORDS, i); + cursor.close(); + db.close(); + } + + final VLSN lastVLSN = + repEnvInfo[0].getRepImpl().getVLSNIndex().getRange().getLast(); + + try { + RepTestUtils.checkNodeEquality(lastVLSN, false, repEnvInfo); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + /** + * Performs cleaning and a checkpoint. Checks that at least RESERVED_FILES + * files are cleaned and made reserved. + */ + private void makeReservedFiles(final RepEnvInfo info) { + final Environment env = info.getEnv(); + + final int cleaned = env.cleanLog(); + verifyMetadata(info); + + String msg = "cleaned=" + cleaned; + assertTrue(msg, cleaned >= RESERVED_FILES); + + final int minUtil = + info.getRepImpl().getCleaner().getUtilizationCalculator(). + getPredictedMinUtilization(); + + assertTrue("minUtil=" + minUtil, minUtil >= 40); + + env.checkpoint(new CheckpointConfig().setForce(true)); + verifyMetadata(info); + + EnvironmentStats stats = env.getStats(null); + + final double reservedEst = + stats.getReservedLogSize() / (double) FILE_SIZE; + + final int reserved = + info.getRepImpl().getFileProtector().getNReservedFiles(); + + msg += " reservedEst=" + reservedEst + " reserved=" + reserved; + + assertTrue(msg, reserved >= reservedEst); + assertTrue(msg, reserved >= RESERVED_FILES); + } + + /** + * Sets the MAX_DISK limit temporarily to delete as many reserved files as + * possible. Returns the number of files that were deleted. + */ + private int deleteReservedFiles(final RepEnvInfo info) { + + final Environment env = info.getEnv(); + final EnvironmentStats stats = env.getStats(null); + + /* + * Set MAX_DISK to less than the active data set size to delete as + * many files as possible. + */ + env.setMutableConfig( + env.getMutableConfig().setConfigParam( + EnvironmentConfig.MAX_DISK, + String.valueOf(ACTIVE_DATA_SIZE / 2))); + + /* + * Normally the cleaner will call manageDiskUsage periodically to + * delete reserved files. We call it here directly to avoid + * DiskLimitException and other side effects of attempting cleaning. + */ + info.getRepImpl().getCleaner().manageDiskUsage(); + + verifyMetadata(info); + + /* + * Remove MAX_DISK limit and call manageDiskUsage to reset usage + * violation state variables. + */ + env.setMutableConfig( + env.getMutableConfig().setConfigParam( + EnvironmentConfig.MAX_DISK, "0")); + + info.getRepImpl().getCleaner().manageDiskUsage(); + + verifyMetadata(info); + + return (int) (env.getStats(null).getNCleanerDeletions() - + stats.getNCleanerDeletions()); + } + + /** + * After deleting reserved files, expect ACTIVE_DATA_FILES or less + * remaining and VLSNS_REMAINING or less in the VLSNIndex. + * + * @param expectReservedFiles no more than this amount should remain + * reserved. + */ + private void expectFilesDeleted(final RepEnvInfo info, + final int expectReservedFiles) { + + deleteReservedFiles(info); + + final VLSNIndex vlsnIndex = info.getRepImpl().getVLSNIndex(); + final VLSNRange range = vlsnIndex.getRange(); + final long firstSeq = range.getLast().getSequence(); + final long lastSeq = range.getFirst().getSequence(); + + assertTrue( + "expected=" + VLSNS_REMAINING + " range=" + range, + firstSeq - lastSeq <= VLSNS_REMAINING); + + final EnvironmentStats stats = info.getEnv().getStats(null); + final long totalSize = stats.getTotalLogSize(); + final long reservedSize = stats.getReservedLogSize(); + + /* + * util * total == active + * total == active / util + */ + final int minUtil = + info.getRepImpl().getCleaner().getUtilizationCalculator(). + getCurrentMinUtilization(); + + final int expectTotal = ((ACTIVE_DATA_SIZE * 100) / minUtil); + final int expectReserved = expectReservedFiles * FILE_SIZE; + + final String msg = "totalSize=" + totalSize + + " reservedSize=" + reservedSize + + " expectTotal=" + expectTotal + + " expectReserved=" + expectReserved + + " minUtil=" + minUtil; + + assertTrue(msg, totalSize <= expectTotal); + assertTrue(msg, reservedSize <= expectReserved); + } + + /* + * For debugging. + */ + @SuppressWarnings("unused") + private void printDbSpace(final RepEnvInfo info, final String label) { + System.out.println(label); + DbSpace dbSpace = new DbSpace(info.getEnv(), false, false, false); + dbSpace.print(System.out); + } + + /** + * Generate waste that is not replicated, to create "barren files" (having + * no replicable entries). A non-replicated DB is used to create the waste. + */ + private void makeNonHAWaste(final RepEnvInfo info) { + + final ReplicatedEnvironment env = info.getEnv(); + + /* Create non-replicated DB. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setReplicated(false); + dbConfig.setAllowCreate(true); + + final Database db = env.openDatabase(null, "non-ha", dbConfig); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]); + + /* Insert/update/delete records. */ + for (int i = 0; i <= UPDATES; i++) { + for (int j = 0; j < RECORDS; j++) { + IntegerBinding.intToEntry(j, key); + db.put(null, key, data, Put.OVERWRITE, null); + db.delete(null, key, null); + } + } + + db.close(); + env.flushLog(false); + } + + /** + * Checks integrity of various metadata. + */ + private void verifyMetadata(final RepEnvInfo info) { + + /* + * Check that all VLSNs in the VLSNIndex range are in existing files. + */ + final RepImpl repImpl = info.getRepImpl(); + final FileManager fm = repImpl.getFileManager(); + final VLSNIndex vlsnIndex = repImpl.getVLSNIndex(); + final VLSNRange range = vlsnIndex.getRange(); + + for (VLSN vlsn = range.getFirst(); + vlsn.compareTo(range.getLast()) <= 0; + vlsn = vlsn.getNext()) { + + final long file = vlsnIndex.getGTEFileNumber(vlsn); + assertEquals(file, vlsnIndex.getLTEFileNumber(vlsn)); + assertTrue("file=" + file, fm.isFileValid(file)); + } + + /* Sanity check of log size values. */ + final EnvironmentStats stats = info.getEnv().getStats(null); + assertEquals( + stats.getTotalLogSize(), + stats.getActiveLogSize() + stats.getReservedLogSize()); + final long protectedSize = stats.getProtectedLogSize(); + assertTrue(protectedSize <= stats.getTotalLogSize()); + + /* The largest protected map value should be LTE the total. */ + final Map map = stats.getProtectedLogSizeMap(); + final String msg = "protectedSize= " + protectedSize + " val="; + for (final Long val : map.values()) { + assertTrue(msg + val, val <= protectedSize); + } + } + + /** + * Returns the number of files in the given set that do not exist. + */ + private int countDeletedFiles(final RepEnvInfo info, + final Set files) { + + final FileManager fm = info.getRepImpl().getFileManager(); + int count = 0; + + for (final long file : files) { + if (!fm.isFileValid(file)) { + count += 1; + } + } + + return count; + } +} diff --git a/test/com/sleepycat/je/cleaner/FileSelectionTest.java b/test/com/sleepycat/je/cleaner/FileSelectionTest.java new file mode 100644 index 0000000..0cb7b7c --- /dev/null +++ b/test/com/sleepycat/je/cleaner/FileSelectionTest.java @@ -0,0 +1,1492 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class FileSelectionTest extends TestBase { + + private static final String DBNAME = "cleanerFileSelection"; + private static final int DATA_SIZE = 140; + private static final int FILE_SIZE = 4096 * 10; + private static final int INITIAL_FILES = 5; + private static final int INITIAL_FILES_TEMP = 1; + private static final int INITIAL_KEYS = 2000; + private static final int INITIAL_KEYS_DUPS = 5000; + private static final int INITIAL_KEYS_DUPS_TEMP = 10000; + private static final byte[] MAIN_KEY_FOR_DUPS = {0, 1, 2, 3, 4, 5}; + + private static final EnvironmentConfig envConfig = initConfig(); + private static final EnvironmentConfig highUtilizationConfig = + initConfig(); + private static final EnvironmentConfig steadyStateAutoConfig = + initConfig(); + private static final EnvironmentConfig noLogFileDeleteDetectConfig = + initConfig(); + static { + highUtilizationConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), + String.valueOf(90)); + + steadyStateAutoConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "true"); + + noLogFileDeleteDetectConfig.setConfigParam + (EnvironmentParams.LOG_DETECT_FILE_DELETE.getName(), "false"); + } + + static EnvironmentConfig initConfig() { + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setTransactional(true); + config.setAllowCreate(true); + config.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + config.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "false"); + config.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + config.setConfigParam(EnvironmentParams.CLEANER_REMOVE.getName(), + "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.CLEANER_LOCK_TIMEOUT.getName(), "1"); + config.setConfigParam + (EnvironmentParams.CLEANER_MAX_BATCH_FILES.getName(), "1"); + return config; + } + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private File envHome; + private Environment env; + private EnvironmentImpl envImpl; + private Database db; + private JUnitThread junitThread; + private volatile int synchronizer; + private boolean dups; + private boolean deferredWrite; + private boolean temporary; + + /* The index is the file number, the value is the first key in the file. */ + private List firstKeysInFiles; + + /* Set of keys that should exist. */ + private Set existingKeys; + + @Parameters + public static List genParams() { + return Arrays.asList( + new Object[][] {{false, false}, {true, false}, {false,true}}); + } + + public FileSelectionTest(boolean deferredWrite, boolean temporary) { + envHome = SharedTestUtils.getTestDir(); + this.deferredWrite = deferredWrite; + this.temporary = temporary; + customName = deferredWrite ? ":deferredWrite" : + (temporary ? ":temporary" : ":txnl"); + } + + @After + public void tearDown() { + + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + db = null; + env = null; + envImpl = null; + envHome = null; + existingKeys = null; + firstKeysInFiles = null; + } + + private void openEnv() + throws DatabaseException { + + openEnv(envConfig); + } + + private void openEnv(EnvironmentConfig config) + throws DatabaseException { + + env = new Environment(envHome, config); + envImpl = DbInternal.getNonNullEnvImpl(env); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(!isDeferredWriteMode()); + dbConfig.setDeferredWrite(deferredWrite); + dbConfig.setTemporary(temporary); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, DBNAME, dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (temporary) { + existingKeys.clear(); + } + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + * Tests that the test utilities work. + */ + @Test + public void testBaseline() + throws DatabaseException { + + int nCleaned; + + openEnv(); + writeData(); + verifyData(); + nCleaned = cleanRoutine(); + if (dups) { + /* + * For dup DBs, all LNs are immediately obsolete so we can't expect + * the same sort of behavior as we do otherwise. Most files are + * already obsolete because all LNs are. + */ + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES_TEMP / 2); + } else { + /* One file may be cleaned after writing, if a checkpoint occurs. */ + assertTrue(String.valueOf(nCleaned), nCleaned <= 1); + env.checkpoint(forceConfig); + nCleaned = cleanRoutine(); + /* One file may be cleaned after cleaning and checkpointing. */ + assertTrue(String.valueOf(nCleaned), nCleaned <= 1); + } + closeEnv(); + openEnv(); + verifyData(); + nCleaned = cleanRoutine(); + if (dups) { + /* + * For non-temporary DBs, one file may be cleaned due to the + * checkpoint with no migrated LNs. For temporary DBs, everything + * was cleaned in the first phase above. + */ + assertTrue(String.valueOf(nCleaned), nCleaned <= 1); + } else if (temporary) { + /* Temp DBs are automatically deleted and cleaned. */ + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES_TEMP); + } else { + /* No files should be cleaned when no writing occurs. */ + assertEquals(0, nCleaned); + } + closeEnv(); + } + + @Test + public void testBaselineDups() + throws DatabaseException { + + dups = true; + testBaseline(); + } + + /** + * Tests that the expected files are selected for cleaning. + */ + @Test + public void testBasic() + throws DatabaseException { + + /* Test assumes that keys are written in order. */ + if (isDeferredWriteMode()) { + return; + } + + openEnv(); + writeData(); + verifyDeletedFiles(null); + + /* + * The first file should be the first to be cleaned because it has + * relatively few LNs. + */ + forceCleanOne(); + verifyDeletedFiles(new int[] {0}); + verifyData(); + + /* + * The rest of this test doesn't apply to dup DBs, since the LNs are + * immediately obsolete and we can't predict which files will contain + * the BINs for a given key range. + */ + if (dups) { + closeEnv(); + return; + } + + /* + * Delete most of the LNs in two middle files. They should be the next + * two files cleaned. + */ + int fileNum = INITIAL_FILES / 2; + int firstKey = firstKeysInFiles.get(fileNum); + int nextKey = firstKeysInFiles.get(fileNum + 1); + int count = nextKey - firstKey - 4; + deleteData(firstKey, count); + + fileNum += 1; + firstKey = firstKeysInFiles.get(fileNum); + nextKey = firstKeysInFiles.get(fileNum + 1); + count = nextKey - firstKey - 4; + deleteData(firstKey, count); + + forceCleanOne(); + forceCleanOne(); + verifyDeletedFiles(new int[] {0, fileNum - 1, fileNum}); + verifyData(); + + closeEnv(); + } + + @Test + public void testBasicDups() + throws DatabaseException { + + dups = true; + testBasic(); + } + + /* + * testCleaningMode, testTruncateDatabase, and testRemoveDatabase and are + * not tested with dups=true because with duplicates the total utilization + * after calling writeData() is 47%, so cleaning will occur and the tests + * don't expect that. + */ + + /** + * Tests that routine cleaning does not clean when it should not. + */ + @Test + public void testCleaningMode() + throws DatabaseException { + + int nextFile = -1; + int nCleaned; + + /* + * Nothing is cleaned with routine cleaning, even after reopening the + * environment. + */ + openEnv(); + writeData(); + + nCleaned = cleanRoutine(); + assertEquals(0, nCleaned); + nextFile = getNextDeletedFile(nextFile); + assertTrue(nextFile == -1); + + verifyData(); + closeEnv(); + openEnv(); + verifyData(); + + nCleaned = cleanRoutine(); + if (temporary) { + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES_TEMP); + } else { + assertEquals(0, nCleaned); + nextFile = getNextDeletedFile(nextFile); + assertTrue(nextFile == -1); + } + + verifyData(); + + closeEnv(); + } + + /** + * Test retries after cleaning fails because an LN was write-locked. + */ + @Test + public void testRetry() + throws DatabaseException { + + /* Test assumes that keys are written in order. */ + if (isDeferredWriteMode()) { + return; + } + + /* + * This test doesn't apply to dup DBs, since the LNs are immediately + * obsolete and locking the record has no impact on cleaning. + */ + if (dups) { + return; + } + + openEnv(highUtilizationConfig); + writeData(); + verifyData(); + + /* + * The first file is full of LNs. Delete all but the last record to + * cause it to be selected next for cleaning. + */ + int firstKey = firstKeysInFiles.get(1); + int nextKey = firstKeysInFiles.get(2); + int count = nextKey - firstKey - 1; + deleteData(firstKey, count); + verifyData(); + + /* Write-lock the last record to cause cleaning to fail. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey - 1)); + status = cursor.getSearchBoth(key, data, LockMode.RMW); + } else { + key.setData(TestUtils.getTestArray(nextKey - 1)); + status = cursor.getSearchKey(key, data, LockMode.RMW); + } + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + + /* Cleaning should fail. */ + forceCleanOne(); + verifyDeletedFiles(null); + forceCleanOne(); + verifyDeletedFiles(null); + + /* Release the write-lock. */ + cursor.close(); + txn.abort(); + verifyData(); + + /* Cleaning should succeed, with file 1 (possibly more) deleted. */ + forceCleanOne(); + assertEquals(1, getNextDeletedFile(0)); + verifyData(); + + closeEnv(); + } + + /** + * Tests that the je.cleaner.minFileUtilization property works as expected. + */ + @Test + public void testMinFileUtilization() + throws DatabaseException { + + /* Test assumes that keys are written in order. */ + if (isDeferredWriteMode()) { + return; + } + + /* Open with minUtilization=10 and minFileUtilization=0. */ + EnvironmentConfig myConfig = initConfig(); + myConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), + String.valueOf(10)); + myConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(), + String.valueOf(0)); + openEnv(myConfig); + + /* Write data and delete two thirds of the LNs in the middle file. */ + writeData(); + verifyDeletedFiles(null); + int fileNum = INITIAL_FILES / 2; + int firstKey = firstKeysInFiles.get(fileNum); + int nextKey = firstKeysInFiles.get(fileNum + 1); + int count = ((nextKey - firstKey) * 2) / 3; + deleteData(firstKey, count); + + /* The file should not be deleted. */ + env.cleanLog(); + env.checkpoint(forceConfig); + verifyDeletedFiles(null); + + /* Change minFileUtilization=50 */ + myConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(), + String.valueOf(50)); + env.setMutableConfig(myConfig); + + /* The file should now be deleted. */ + env.cleanLog(); + env.checkpoint(forceConfig); + verifyDeletedFiles(new int[] {fileNum}); + verifyData(); + + closeEnv(); + } + + private void printFiles(String msg) { + System.out.print(msg); + Long lastNum = envImpl.getFileManager().getLastFileNum(); + for (int i = 0; i <= (int) lastNum.longValue(); i += 1) { + String name = envImpl.getFileManager(). + getFullFileName(i, FileManager.JE_SUFFIX); + if (new File(name).exists()) { + System.out.print(" " + i); + } + } + System.out.println(""); + } + + @Test + public void testRetryDups() + throws DatabaseException { + + dups = true; + testRetry(); + } + + /** + * Steady state should occur with normal (50% utilization) configuration + * and automatic checkpointing and cleaning. + */ + @Test + public void testSteadyStateAutomatic() + throws DatabaseException { + + doSteadyState(steadyStateAutoConfig, false, 13); + } + + @Test + public void testSteadyStateAutomaticDups() + throws DatabaseException { + + dups = true; + testSteadyStateAutomatic(); + } + + /** + * Steady state utilization with manual checkpointing and cleaning. + */ + @Test + public void testSteadyStateManual() + throws DatabaseException { + + doSteadyState(envConfig, true, + (deferredWrite | temporary) ? 20 : 13); + } + + @Test + public void testSteadyStateManualDups() + throws DatabaseException { + + dups = true; + testSteadyStateManual(); + } + + /** + * Steady state should occur when utilization is at the maximum. + */ + @Test + public void testSteadyStateHighUtilization() + throws DatabaseException { + + doSteadyState(highUtilizationConfig, true, + (deferredWrite | temporary) ? 12 : 9); + } + + @Test + public void testSteadyStateHighUtilizationDups() + throws DatabaseException { + + dups = true; + testSteadyStateHighUtilization(); + } + + /** + * Tests that we quickly reach a steady state of disk usage when updates + * are made but no net increase in data occurs. + * + * @param manualCleaning is whether to run cleaning manually every + * iteration, or to rely on the cleaner thread. + * + * @param maxFileCount the maximum number of files allowed for this test. + */ + private void doSteadyState(EnvironmentConfig config, + boolean manualCleaning, + int maxFileCount) + throws DatabaseException { + + openEnv(config); + writeData(); + verifyData(); + + final int iterations = 100; + + for (int i = 0; i < iterations; i += 1) { + /* updateData flushes temp and deferredWrite DBs. */ + updateData(100, 100); + int cleaned = -1; + if (manualCleaning) { + cleaned = cleanRoutine(); + } else { + /* Need to delay a bit for the cleaner to keep up. */ + try { + Thread.sleep(25); + } catch (InterruptedException e) {} + } + + /* + * Checkpoints need to occur often for the cleaner to keep up. + * and to delete files that were cleaned. + * + * Note that temp DBs are not checkpointed and we rely on eviction + * to flush obsolete information and cause cleaning. [#16928] + */ + env.checkpoint(forceConfig); + verifyData(); + int fileCount = + envImpl.getFileManager().getAllFileNumbers().length; + assertTrue(customName + + " fileCount=" + fileCount + + " maxFileCount=" + maxFileCount + + " iteration=" + i, + fileCount <= maxFileCount); + if (false) { + System.out.println("fileCount=" + fileCount + + " cleaned=" + cleaned); + } + } + closeEnv(); + } + + /** + * Tests basic file protection. + */ + @Test + public void testProtectedFileRange() + throws DatabaseException { + + /* Test assumes that keys are written in order. */ + if (isDeferredWriteMode()) { + return; + } + + openEnv(); + writeData(); + verifyDeletedFiles(null); + + /* Delete all records so that all INITIAL_FILES will be cleaned. */ + int lastFile = firstKeysInFiles.size() - 1; + int lastKey = firstKeysInFiles.get(lastFile); + deleteData(0, lastKey); + + /* Protect 4 ranges: {0-N}, {1-N}, {2-N}, {2-N}. */ + FileProtector fileProtector = envImpl.getFileProtector(); + FileProtector.ProtectedFileSet pfs0 = + fileProtector.protectFileRange("test-0", 0); + FileProtector.ProtectedFileSet pfs1 = + fileProtector.protectFileRange("test-1", 1); + FileProtector.ProtectedFileSet pfs2a = + fileProtector.protectFileRange("test-2a", 2); + FileProtector.ProtectedFileSet pfs2b = + fileProtector.protectFileRange("test-2b", 2); + + /* No files should be deleted. */ + forceClean(lastFile); + verifyDeletedFiles(null); + verifyData(); + assertEquals(lastFile, fileProtector.getNReservedFiles()); + + /* + * Removing {1-N} will not cause any deletions, because {0-N} is still + * in effect. + */ + fileProtector.removeFileProtection(pfs1); + forceClean(lastFile); + verifyDeletedFiles(null); + verifyData(); + assertEquals(lastFile, fileProtector.getNReservedFiles()); + + /* Removing {0-N} will cause 0 and 1 to be deleted. */ + fileProtector.removeFileProtection(pfs0); + forceClean(lastFile); + verifyDeletedFiles(new int[] {0, 1}); + verifyData(); + assertEquals(lastFile - 2, fileProtector.getNReservedFiles()); + + /* + * Removing {2-N} will not cause more deletions because another {2-N} + * range is still in effect. + */ + fileProtector.removeFileProtection(pfs2a); + forceClean(lastFile); + verifyDeletedFiles(new int[] {0, 1}); + verifyData(); + assertEquals(lastFile - 2, fileProtector.getNReservedFiles()); + + /* Removing the 2nd {2-N} range causes all files to be deleted. */ + fileProtector.removeFileProtection(pfs2b); + forceClean(lastFile); + int[] allFiles = new int[lastFile]; + for (int i = 0; i < lastFile; i += 1) { + allFiles[i] = i; + } + verifyDeletedFiles(allFiles); + verifyData(); + assertEquals(0, fileProtector.getNReservedFiles()); + + closeEnv(); + } + + /** + * Tests that truncate causes cleaning. + */ + @Test + public void testTruncateDatabase() + throws DatabaseException { + + int nCleaned; + + openEnv(); + writeData(); + + nCleaned = cleanRoutine(); + assertEquals(0, nCleaned); + db.close(); + db = null; + + /* + * Temporary databases are removed when the database is closed, so + * don't call truncate explicitly. + */ + if (!temporary) { + env.truncateDatabase(null, DBNAME, false /* returnCount */); + } + + nCleaned = cleanRoutine(); + if (temporary) { + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES_TEMP - 1); + } else { + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES - 1); + } + + closeEnv(); + } + + /** + * Tests that remove causes cleaning. + */ + @Test + public void testRemoveDatabase() + throws DatabaseException { + + int nCleaned; + + openEnv(); + writeData(); + + String dbName = db.getDatabaseName(); + db.close(); + db = null; + + nCleaned = cleanRoutine(); + if (temporary) { + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES_TEMP - 1); + assertTrue(!env.getDatabaseNames().contains(dbName)); + } else { + assertEquals(0, nCleaned); + + env.removeDatabase(null, dbName); + nCleaned = cleanRoutine(); + assertTrue(String.valueOf(nCleaned), + nCleaned >= INITIAL_FILES - 1); + } + + closeEnv(); + } + + @Test + public void testForceCleanFiles() + throws DatabaseException { + + /* When the temp DB is closed many files will be cleaned. */ + if (temporary) { + return; + } + + /* No files force cleaned. */ + EnvironmentConfig myConfig = initConfig(); + openEnv(myConfig); + writeData(); + verifyData(); + env.cleanLog(); + env.checkpoint(forceConfig); + verifyDeletedFiles(null); + + EnvironmentMutableConfig mutableConfig = env.getMutableConfig(); + + /* Force cleaning: 3 */ + mutableConfig.setConfigParam( + EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES, "3"); + env.setMutableConfig(mutableConfig); + forceCleanOne(); + verifyDeletedFiles(new int[] {3}); + + /* Force cleaning: 0 - 1 */ + mutableConfig.setConfigParam( + EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES, "0-1"); + env.setMutableConfig(mutableConfig); + forceCleanOne(); + forceCleanOne(); + verifyDeletedFiles(new int[] {0, 1, 3}); + + /* + * Clean file 2 using public cleanLog method -- forcing cleaning with + * an internal API should not be necessary. File is not deleted, + * however, because we don't do a checkpoint. + */ + mutableConfig.setConfigParam( + EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES, "2"); + env.setMutableConfig(mutableConfig); + int files = env.cleanLog(); + assertEquals(1, files); + verifyDeletedFiles(new int[] {0, 1, 3}); + + /* + * Try cleaning 2 again, should not get exception. + * Before a bug fix [#26326], an assertion fired in FileSelector. + */ + mutableConfig.setConfigParam( + EnvironmentConfig.CLEANER_FORCE_CLEAN_FILES, "2"); + env.setMutableConfig(mutableConfig); + files = env.cleanLog(); + assertEquals(0, files); + verifyDeletedFiles(new int[] {0, 1, 3}); + + /* Finally perform a checkpoint and file 2 should be deleted. */ + env.checkpoint(forceConfig); + verifyDeletedFiles(new int[] {0, 1, 2, 3}); + + closeEnv(); + } + + /** + * Checks that old version log files are upgraded when + * je.cleaner.upgradeToLogVersion is set. The version 5 log files to be + * upgraded in this test were created with MakeMigrationLogFiles. + */ + @Test + public void testLogVersionUpgrade() + throws DatabaseException, IOException { + + if (temporary) { + /* This test is not applicable. */ + return; + } + + /* Copy pre-created files 0 and 1, which are log verion 5. */ + TestUtils.loadLog + (getClass(), "migrate_f0.jdb", envHome, "00000000.jdb"); + TestUtils.loadLog + (getClass(), "migrate_f1.jdb", envHome, "00000001.jdb"); + + /* + * Write several more files which are log version 6 or greater. To + * check whether these files are cleaned below we need to write more + * than 2 files (2 is the minimum age for cleaning). + */ + env = MakeMigrationLogFiles.openEnv(envHome, false /*allowCreate*/); + MakeMigrationLogFiles.makeMigrationLogFiles(env); + env.checkpoint(forceConfig); + MakeMigrationLogFiles.makeMigrationLogFiles(env); + env.checkpoint(forceConfig); + closeEnv(); + + /* With upgradeToLogVersion=0 no files should be cleaned. */ + openEnvWithUpgradeToLogVersion(0); + int nFiles = env.cleanLog(); + assertEquals(0, nFiles); + closeEnv(); + + /* With upgradeToLogVersion=5 no files should be cleaned. */ + openEnvWithUpgradeToLogVersion(5); + nFiles = env.cleanLog(); + assertEquals(0, nFiles); + closeEnv(); + + /* Upgrade log files to the current version, which is 6 or greater. */ + openEnvWithUpgradeToLogVersion(-1); // -1 means current version + + /* + * Clean one log file at a time so we can check that the backlog is + * not impacted by log file migration. + */ + for (int i = 0; i < 2; i += 1) { + boolean cleaned = env.cleanLogFile(); + assertTrue(cleaned); + EnvironmentStats stats = env.getStats(null); + assertEquals(0, stats.getCleanerBacklog()); + } + env.checkpoint(forceConfig); + verifyDeletedFiles(new int[] {0, 1}); + + /* No more files should be cleaned. */ + nFiles = env.cleanLog(); + assertEquals(0, nFiles); + closeEnv(); + + /* + * Force clean file 2 to ensure that it was not cleaned above because + * of its log version and not some other factor. + */ + EnvironmentConfig myConfig = initConfig(); + myConfig.setConfigParam + (EnvironmentParams.CLEANER_FORCE_CLEAN_FILES.getName(), "2"); + openEnv(myConfig); + nFiles = env.cleanLog(); + assertEquals(1, nFiles); + env.checkpoint(forceConfig); + verifyDeletedFiles(new int[] {0, 1, 2}); + + closeEnv(); + } + + private void openEnvWithUpgradeToLogVersion(int upgradeToLogVersion) + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_UPGRADE_TO_LOG_VERSION.getName(), + String.valueOf(upgradeToLogVersion)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + } + + /** + * Tests that when cleaned files are deleted during a compression, the + * flushing of the local tracker does not transfer tracker information + * for the deleted files. [#15528] + * + * This test also checks that tracker information is not transfered to the + * MapLN's per-DB utilization information in DbFileSummaryMap. This was + * occuring in JE 3.3.74 and earlier, under the same circumstances as + * tested here (IN compression). [#16610] + */ + @Test + public void testCompressionBug() + throws DatabaseException { + + /* + * We need to compress deleted keys and count their utilization under + * an explicit compress() call. With deferred write, no utilization + * counting occurs until eviction/sync, and that would also do + * compression. + */ + if (isDeferredWriteMode()) { + return; + } + + EnvironmentConfig envConfig = initConfig(); + /* Disable compressor so we can compress explicitly. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Ensure that we check for resurrected file leaks. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "true"); + openEnv(envConfig); + + /* Write and then delete all data. */ + writeData(); + for (Iterator i = existingKeys.iterator(); i.hasNext();) { + int nextKey = ((Integer) i.next()).intValue(); + DatabaseEntry key = + new DatabaseEntry(TestUtils.getTestArray(nextKey)); + OperationStatus status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + } + + synchronizer = 0; + + /* Create thread that will do the compression. */ + junitThread = new JUnitThread("TestCompress") { + @Override + public void testBody() { + try { + /* compress() will call the test hook below. */ + env.compress(); + } catch (Throwable e) { + e.printStackTrace(); + } + } + }; + + /* + * Set a hook that is called by the INCompressor before it calls + * UtilizationProfile.flushLocalTracker. + */ + envImpl.getINCompressor().setBeforeFlushTrackerHook(new TestHook() { + public void doHook() { + synchronizer = 1; + /* Wait for log cleaning to complete. */ + while (synchronizer < 2 && !Thread.interrupted()) { + Thread.yield(); + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + + /* Kick off test in thread above. */ + junitThread.start(); + /* Wait for hook to be called at the end of compression. */ + while (synchronizer < 1) Thread.yield(); + /* Clean and checkpoint to delete cleaned files. */ + while (env.cleanLog() > 0) { } + env.checkpoint(forceConfig); + /* Allow test hook to return, so that flushLocalTracker is called. */ + synchronizer = 2; + + /* + * Before the fix [#15528], an assertion fired in + * BaseUtilizationTracker.getFileSummary when flushLocalTracker was + * called. This assertion fires if the file being tracked does not + * exist. The fix was to check for valid files in flushLocalTracker. + */ + try { + junitThread.finishTest(); + junitThread = null; + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + + closeEnv(); + } + + /** + * Checks that DB utilization is repaired when damaged by JE 3.3.74 or + * earlier. Go to some trouble to create a DatabaseImpl with the repair + * done flag not set, and with a DB file summary for a deleted file. + * [#16610] + */ + @Test + public void testDbUtilizationRepair() + throws DatabaseException, IOException { + + openEnv(noLogFileDeleteDetectConfig); + writeData(); + forceCleanOne(); + verifyDeletedFiles(new int[] {0}); + + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + DbFileSummaryMap summaries = dbImpl.getDbFileSummaries(); + + /* New version DB does not need repair. */ + assertTrue(dbImpl.getUtilizationRepairDone()); + + /* Deleted file is absent from summary map. */ + assertNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/, + true /*checkResurrected*/, + envImpl.getFileManager())); + + /* + * Force addition of deleted file to summary map by creating a dummy + * file to prevent assertions from firing. + */ + File dummyFile = new File(env.getHome(), "00000000.jdb"); + assertTrue(dummyFile.createNewFile()); + assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/, + false /*checkResurrected*/, + envImpl.getFileManager())); + assertTrue(dummyFile.delete()); + + /* Now an entry in the summary map is there for a deleted file.. */ + assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/, + true /*checkResurrected*/, + envImpl.getFileManager())); + + /* Force the MapLN with the bad entry to be flushed. */ + dbImpl.setDirty(); + env.checkpoint(forceConfig); + closeEnv(); + + /* If the DB is temporary, we can't test it further. */ + if (temporary) { + return; + } + + /* + * When the DB is opened, the repair should not take place, because we + * did not clear the repair done flag above. + */ + openEnv(); + dbImpl = DbInternal.getDbImpl(db); + summaries = dbImpl.getDbFileSummaries(); + assertTrue(dbImpl.getUtilizationRepairDone()); + assertNotNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/, + true /*checkResurrected*/, + envImpl.getFileManager())); + + /* Clear the repair done flag and force the MapLN to be flushed. */ + dbImpl.clearUtilizationRepairDone(); + dbImpl.setDirty(); + env.checkpoint(forceConfig); + closeEnv(); + + /* + * Since the repair done flag was cleared above, when the DB is opened, + * the repair should take place. + */ + openEnv(); + dbImpl = DbInternal.getDbImpl(db); + summaries = dbImpl.getDbFileSummaries(); + assertTrue(dbImpl.getUtilizationRepairDone()); + assertNull(summaries.get(0L /*fileNum*/, true /*adjustMemBudget*/, + true /*checkResurrected*/, + envImpl.getFileManager())); + closeEnv(); + } + + /** + * Force cleaning of N files. + */ + private void forceClean(int nFiles) + throws DatabaseException { + + for (int i = 0; i < nFiles; i += 1) { + envImpl.getCleaner().doClean(false, // cleanMultipleFiles + true); // forceCleaning + } + /* To force file deletion a checkpoint is necessary. */ + env.checkpoint(forceConfig); + } + + /** + * Force cleaning of one file. + */ + private void forceCleanOne() + throws DatabaseException { + + envImpl.getCleaner().doClean(false, // cleanMultipleFiles + true); // forceCleaning + /* To force file deletion a checkpoint is necessary. */ + env.checkpoint(forceConfig); + } + + /** + * Do routine cleaning just as normally done via the cleaner daemon, and + * return the number of files cleaned. + */ + private int cleanRoutine() + throws DatabaseException { + + return env.cleanLog(); + } + + /** + * Use transactions when not testing deferred write or temporary DBs. + */ + private boolean isDeferredWriteMode() { + return deferredWrite || temporary; + } + + /** + * Forces eviction when a temporary database is used, since otherwise data + * will not be flushed. + */ + private void forceEvictionIfTemporary() + throws DatabaseException { + + if (temporary) { + EnvironmentMutableConfig config = env.getMutableConfig(); + long saveSize = config.getCacheSize(); + config.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE * 2); + env.setMutableConfig(config); + env.evictMemory(); + config.setCacheSize(saveSize); + env.setMutableConfig(config); + } + } + + /** + * Writes data to create INITIAL_FILES number of files, storing the first + * key for each file in the firstKeysInFiles list. One extra file is + * actually created, to ensure that the firstActiveLSN is not in any of + * INITIAL_FILES files. + */ + private void writeData() + throws DatabaseException { + + int firstFile = + (int) envImpl.getFileManager().getLastFileNum().longValue(); + assertEquals(0, firstFile); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]); + existingKeys = new HashSet(); + + if (isDeferredWriteMode()) { + firstKeysInFiles = null; + + Cursor cursor = db.openCursor(null, null); + + int maxKey = dups ? + (temporary ? INITIAL_KEYS_DUPS_TEMP : INITIAL_KEYS_DUPS) : + INITIAL_KEYS; + for (int nextKey = 0; nextKey < maxKey; nextKey += 1) { + + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey)); + status = cursor.putNoDupData(key, data); + } else { + key.setData(TestUtils.getTestArray(nextKey)); + data.setData(new byte[DATA_SIZE]); + status = cursor.putNoOverwrite(key, data); + } + + assertEquals(OperationStatus.SUCCESS, status); + existingKeys.add(new Integer(nextKey)); + } + + cursor.close(); + } else { + firstKeysInFiles = new ArrayList(); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + int fileNum = -1; + + for (int nextKey = 0; fileNum < INITIAL_FILES; nextKey += 1) { + + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey)); + status = cursor.putNoDupData(key, data); + } else { + key.setData(TestUtils.getTestArray(nextKey)); + data.setData(new byte[DATA_SIZE]); + status = cursor.putNoOverwrite(key, data); + } + + assertEquals(OperationStatus.SUCCESS, status); + existingKeys.add(new Integer(nextKey)); + + long lsn = getLsn(cursor); + if (DbLsn.getFileNumber(lsn) != fileNum) { + assertTrue(fileNum < DbLsn.getFileNumber(lsn)); + fileNum = (int) DbLsn.getFileNumber(lsn); + assertEquals(fileNum, firstKeysInFiles.size()); + firstKeysInFiles.add(nextKey); + } + } + //System.out.println("Num keys: " + existingKeys.size()); + + cursor.close(); + txn.commit(); + } + + forceEvictionIfTemporary(); + env.checkpoint(forceConfig); + + int lastFile = + (int) envImpl.getFileManager().getLastFileNum().longValue(); + if (temporary) { + assertTrue(String.valueOf(lastFile), + lastFile >= INITIAL_FILES_TEMP); + } else { + assertTrue(String.valueOf(lastFile), + lastFile >= INITIAL_FILES); + } + //System.out.println("last file " + lastFile); + } + + /** + * Deletes the specified keys. + */ + private void deleteData(int firstKey, int keyCount) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = !isDeferredWriteMode() ? + env.beginTransaction(null, null) : null; + Cursor cursor = db.openCursor(txn, null); + + for (int i = 0; i < keyCount; i += 1) { + int nextKey = firstKey + i; + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchBoth(key, data, null); + } else { + key.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchKey(key, data, null); + } + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + existingKeys.remove(new Integer(nextKey)); + } + + cursor.close(); + if (txn != null) { + txn.commit(); + } + forceEvictionIfTemporary(); + } + + /** + * Updates the specified keys. + */ + private void updateData(int firstKey, int keyCount) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = !isDeferredWriteMode() ? + env.beginTransaction(null, null) : null; + Cursor cursor = db.openCursor(txn, null); + + for (int i = 0; i < keyCount; i += 1) { + int nextKey = firstKey + i; + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchBoth(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(MAIN_KEY_FOR_DUPS.length, key.getSize()); + assertEquals(nextKey, TestUtils.getTestVal(data.getData())); + } else { + key.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchKey(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(nextKey, TestUtils.getTestVal(key.getData())); + assertEquals(DATA_SIZE, data.getSize()); + } + status = cursor.putCurrent(data); + assertEquals(OperationStatus.SUCCESS, status); + } + + cursor.close(); + if (txn != null) { + txn.commit(); + } + + /* + * For deferred write and temp DBs, flush them to produce a situation + * comparable to other modes. + */ + forceEvictionIfTemporary(); + if (deferredWrite) { + db.sync(); + } + } + + /** + * Verifies that the data written by writeData can be read. + */ + private void verifyData() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = !isDeferredWriteMode() ? + env.beginTransaction(null, null) : null; + Cursor cursor = db.openCursor(txn, null); + + for (Iterator i = existingKeys.iterator(); i.hasNext();) { + int nextKey = ((Integer) i.next()).intValue(); + OperationStatus status; + if (dups) { + key.setData(MAIN_KEY_FOR_DUPS); + data.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchBoth(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(MAIN_KEY_FOR_DUPS.length, key.getSize()); + assertEquals(nextKey, TestUtils.getTestVal(data.getData())); + } else { + key.setData(TestUtils.getTestArray(nextKey)); + status = cursor.getSearchKey(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(nextKey, TestUtils.getTestVal(key.getData())); + assertEquals(DATA_SIZE, data.getSize()); + } + } + + cursor.close(); + if (txn != null) { + txn.commit(); + } + } + + /** + * Checks that all log files exist except those specified. + */ + private void verifyDeletedFiles(int[] shouldNotExist) { + Long lastNum = envImpl.getFileManager().getLastFileNum(); + for (int i = 0; i <= (int) lastNum.longValue(); i += 1) { + boolean shouldExist = true; + if (shouldNotExist != null) { + for (int j = 0; j < shouldNotExist.length; j += 1) { + if (i == shouldNotExist[j]) { + shouldExist = false; + break; + } + } + } + String name = envImpl.getFileManager(). + getFullFileName(i, FileManager.JE_SUFFIX); + if (shouldExist != new File(name).exists()) { + fail("file=" + i + " shouldExist=" + shouldExist + + " filesThatShouldNotExist=" + + Arrays.toString(shouldNotExist) + + " filesThatDoExist=" + Arrays.toString( + envImpl.getFileManager().getAllFileNumbers())); + } + } + } + + /** + * Returns the first deleted file number or -1 if none. + */ + private int getNextDeletedFile(int afterFile) { + Long lastNum = envImpl.getFileManager().getLastFileNum(); + for (int i = afterFile + 1; i <= (int) lastNum.longValue(); i += 1) { + String name = envImpl.getFileManager(). + getFullFileName(i, FileManager.JE_SUFFIX); + if (!(new File(name).exists())) { + return i; + } + } + return -1; + } + + /** + * Gets the LSN at the cursor position, using internal methods. + */ + private long getLsn(Cursor cursor) { + CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor); + BIN bin = impl.getBIN(); + int index = impl.getIndex(); + assertNotNull(bin); + assertTrue(index >= 0); + long lsn = bin.getLsn(index); + assertTrue(lsn != DbLsn.NULL_LSN); + return lsn; + } +} diff --git a/test/com/sleepycat/je/cleaner/INUtilizationTest.java b/test/com/sleepycat/je/cleaner/INUtilizationTest.java new file mode 100644 index 0000000..870c391 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/INUtilizationTest.java @@ -0,0 +1,1326 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.SearchFileReader; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; + +import org.junit.After; +import org.junit.Test; + +/** + * Test utilization counting of INs. + */ +public class INUtilizationTest extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private EnvironmentImpl envImpl; + private Database db; + private DatabaseImpl dbImpl; + private Transaction txn; + private Cursor cursor; + private boolean dups = false; + private DatabaseEntry keyEntry = new DatabaseEntry(); + private DatabaseEntry dataEntry = new DatabaseEntry(); + private boolean truncateOrRemoveDone; + + private boolean embeddedLNs = false; + + public INUtilizationTest() { + envMultiSubDir = false; + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + envImpl = null; + db = null; + dbImpl = null; + txn = null; + cursor = null; + keyEntry = null; + dataEntry = null; + } + + /** + * Opens the environment and database. + */ + @SuppressWarnings("deprecation") + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setTransactional(true); + config.setTxnNoSync(true); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Use a tiny log file size to write one node per file. */ + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(64)); + /* With tiny files we can't log expiration profile records. */ + DbInternal.setCreateEP(config, false); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, config); + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Speed up test that uses lots of very small files. */ + envImpl.getFileManager().setSyncAtFileEnd(false); + + openDb(); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + } + + /** + * Opens the database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, DB_NAME, dbConfig); + dbImpl = DbInternal.getDbImpl(db); + } + + private void closeEnv(boolean doCheckpoint) + throws DatabaseException { + + closeEnv(doCheckpoint, + true, // expectAccurateObsoleteLNCount + true); // expectAccurateObsoleteLNSize + } + + private void closeEnv(boolean doCheckpoint, + boolean expectAccurateObsoleteLNCount) + throws DatabaseException { + + closeEnv(doCheckpoint, + expectAccurateObsoleteLNCount, + expectAccurateObsoleteLNCount); + } + + /** + * Closes the environment and database. + * + * @param expectAccurateObsoleteLNCount should be false when a deleted LN + * is not counted properly by recovery because its parent INs were flushed + * and the obsolete LN was not found in the tree. + * + * @param expectAccurateObsoleteLNSize should be false when a tree walk is + * performed for truncate/remove or an abortLsn is counted by recovery. + */ + private void closeEnv(boolean doCheckpoint, + boolean expectAccurateObsoleteLNCount, + boolean expectAccurateObsoleteLNSize) + throws DatabaseException { + + /* + * We pass expectAccurateDbUtilization as false when + * truncateOrRemoveDone, because the database utilization info for that + * database is now gone. + */ + VerifyUtils.verifyUtilization + (envImpl, expectAccurateObsoleteLNCount, + expectAccurateObsoleteLNSize, + !truncateOrRemoveDone); // expectAccurateDbUtilization + + if (db != null) { + db.close(); + db = null; + dbImpl = null; + } + if (envImpl != null) { + envImpl.close(doCheckpoint); + envImpl = null; + env = null; + } + } + + /** + * Initial setup for all tests -- open env, put one record (or two for + * dups) and sync. + */ + private void openAndWriteDatabase() + throws DatabaseException { + + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Put one record. */ + IntegerBinding.intToEntry(0, keyEntry); + IntegerBinding.intToEntry(0, dataEntry); + cursor.put(keyEntry, dataEntry); + + /* Add a duplicate. */ + if (dups) { + IntegerBinding.intToEntry(1, dataEntry); + cursor.put(keyEntry, dataEntry); + } + + /* Commit the txn to avoid crossing the checkpoint boundary. */ + cursor.close(); + txn.commit(); + + /* Checkpoint to the root so nothing is dirty. */ + env.sync(); + + /* Open a txn and cursor for use by the test case. */ + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* If we added a duplicate, move cursor back to the first record. */ + cursor.getFirst(keyEntry, dataEntry, null); + + /* Expect that BIN and parent IN files are not obsolete. */ + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + expectObsolete(binFile, false); + expectObsolete(inFile, false); + } + + /** + * Tests that BIN and IN utilization counting works. + */ + @Test + public void testBasic() + throws DatabaseException { + + openAndWriteDatabase(); + + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Update to make BIN dirty. */ + cursor.put(keyEntry, dataEntry); + + /* Checkpoint */ + env.checkpoint(forceConfig); + + /* After checkpoint, expect BIN and IN are obsolete. */ + expectObsolete(binFile, true); + expectObsolete(inFile, true); + assertTrue(binFile != getBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + /* After second checkpoint, no changes. */ + env.checkpoint(forceConfig); + + /* Both BIN and IN are obsolete. */ + expectObsolete(binFile, true); + expectObsolete(inFile, true); + assertTrue(binFile != getBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + /* Expect that new files are not obsolete. */ + long binFile2 = getBINFile(cursor); + long inFile2 = getINFile(cursor); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + cursor.close(); + txn.commit(); + closeEnv(true); + } + + /** + * Performs testBasic with duplicates. + */ + @Test + public void testBasicDup() + throws DatabaseException { + + dups = true; + testBasic(); + } + + /** + * Tests that BIN-delta utilization counting works. + */ + @Test + public void testBINDeltas() + throws DatabaseException { + + /* + * Insert 4 additional records so there will be 5 slots total. Then one + * modified slot (less than 25% of the total) will cause a delta to be + * logged. Two modified slots (more than 25% of the total) will cause + * a full version to be logged. + */ + openAndWriteDatabase(); + for (int i = 1; i <= 4; i += 1) { + IntegerBinding.intToEntry(i, keyEntry); + IntegerBinding.intToEntry(0, dataEntry); + db.put(txn, keyEntry, dataEntry); + } + env.sync(); + long fullBinFile = getFullBINFile(cursor); + long deltaBinFile = getDeltaBINFile(cursor); + long inFile = getINFile(cursor); + assertEquals(-1, deltaBinFile); + + /* Update first record to make one BIN slot dirty. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + + /* Checkpoint, write BIN-delta. */ + env.checkpoint(forceConfig); + + /* After checkpoint, expect only IN is obsolete. */ + expectObsolete(fullBinFile, false); + expectObsolete(inFile, true); + assertTrue(fullBinFile == getFullBINFile(cursor)); + assertTrue(deltaBinFile != getDeltaBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + /* After second checkpoint, no changes. */ + env.checkpoint(forceConfig); + expectObsolete(fullBinFile, false); + expectObsolete(inFile, true); + assertTrue(fullBinFile == getFullBINFile(cursor)); + assertTrue(deltaBinFile != getDeltaBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + fullBinFile = getFullBINFile(cursor); + deltaBinFile = getDeltaBINFile(cursor); + inFile = getINFile(cursor); + assertTrue(deltaBinFile != -1); + + /* Update first record again, checkpoint to write another delta. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + + /* After checkpoint, expect IN and first delta are obsolete. */ + env.checkpoint(forceConfig); + expectObsolete(fullBinFile, false); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + assertTrue(fullBinFile == getFullBINFile(cursor)); + assertTrue(deltaBinFile != getDeltaBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + fullBinFile = getFullBINFile(cursor); + deltaBinFile = getDeltaBINFile(cursor); + inFile = getINFile(cursor); + assertTrue(deltaBinFile != -1); + + /* Update two records, checkpoint to write a full BIN version. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + IntegerBinding.intToEntry(1, keyEntry); + cursor.put(keyEntry, dataEntry); + + /* After checkpoint, expect IN, full BIN, last delta are obsolete. */ + env.checkpoint(forceConfig); + expectObsolete(fullBinFile, true); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + assertTrue(fullBinFile != getFullBINFile(cursor)); + assertTrue(deltaBinFile != getDeltaBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + assertEquals(-1, getDeltaBINFile(cursor)); + + /* Expect that new files are not obsolete. */ + long binFile2 = getBINFile(cursor); + long inFile2 = getINFile(cursor); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + cursor.close(); + txn.commit(); + closeEnv(true); + } + + /** + * Performs testBINDeltas with duplicates. + */ + @Test + public void testBINDeltasDup() + throws DatabaseException { + + dups = true; + testBINDeltas(); + } + + /** + * Similar to testBasic, but logs INs explicitly and performs recovery to + * ensure utilization recovery works. + */ + @Test + public void testRecovery() + throws DatabaseException { + + openAndWriteDatabase(); + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Close normally and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(true); + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN files have not changed. */ + assertEquals(binFile, getBINFile(cursor)); + assertEquals(inFile, getINFile(cursor)); + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* + * Log explicitly since we have no way to do a partial checkpoint. + * The BIN is logged provisionally and the IN non-provisionally. + */ + TestUtils.logBINAndIN(env, cursor); + + /* Expect to obsolete the BIN and IN. */ + expectObsolete(binFile, true); + expectObsolete(inFile, true); + assertTrue(binFile != getBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + /* Save current BIN and IN files. */ + long binFile2 = getBINFile(cursor); + long inFile2 = getINFile(cursor); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + /* Shutdown without a checkpoint and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(false); + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Sync to make all INs non-dirty. */ + env.sync(); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect that recovery counts BIN and IN as obsolete. */ + expectObsolete(binFile, true); + expectObsolete(inFile, true); + assertTrue(binFile != getBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + + /* + * Even though it is provisional, expect that current BIN is not + * obsolete because it is not part of partial checkpoint. This is + * similar to what happens with a split. The current IN is not + * obsolete either (nor is it provisional). + */ + assertTrue(binFile2 == getBINFile(cursor)); + assertTrue(inFile2 == getINFile(cursor)); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + /* Update to make BIN dirty. */ + cursor.put(keyEntry, dataEntry); + + /* Check current BIN and IN files. */ + assertTrue(binFile2 == getBINFile(cursor)); + assertTrue(inFile2 == getINFile(cursor)); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + /* Close normally and reopen to cause checkpoint of dirty BIN/IN. */ + cursor.close(); + txn.commit(); + closeEnv(true); + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN were checkpointed during close. */ + assertTrue(binFile2 != getBINFile(cursor)); + assertTrue(inFile2 != getINFile(cursor)); + expectObsolete(binFile2, true); + expectObsolete(inFile2, true); + + /* After second checkpoint, no change. */ + env.checkpoint(forceConfig); + + /* Both BIN and IN are obsolete. */ + assertTrue(binFile2 != getBINFile(cursor)); + assertTrue(inFile2 != getINFile(cursor)); + expectObsolete(binFile2, true); + expectObsolete(inFile2, true); + + cursor.close(); + txn.commit(); + closeEnv(true); + } + + /** + * Performs testRecovery with duplicates. + */ + @Test + public void testRecoveryDup() + throws DatabaseException { + + dups = true; + testRecovery(); + } + + /** + * Similar to testRecovery, but tests BIN-deltas. + */ + @Test + public void testBINDeltaRecovery() + throws DatabaseException { + + /* + * Insert 4 additional records so there will be 5 slots total. Then one + * modified slot (less than 25% of the total) will cause a delta to be + * logged. Two modified slots (more than 25% of the total) will cause + * a full version to be logged. + */ + openAndWriteDatabase(); + for (int i = 1; i <= 4; i += 1) { + IntegerBinding.intToEntry(i, keyEntry); + IntegerBinding.intToEntry(0, dataEntry); + db.put(txn, keyEntry, dataEntry); + } + env.sync(); + long fullBinFile = getFullBINFile(cursor); + long deltaBinFile = getDeltaBINFile(cursor); + long inFile = getINFile(cursor); + assertEquals(-1, deltaBinFile); + + /* Close normally and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(true); + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN files have not changed. */ + assertEquals(fullBinFile, getFullBINFile(cursor)); + assertEquals(deltaBinFile, getDeltaBINFile(cursor)); + assertEquals(inFile, getINFile(cursor)); + expectObsolete(fullBinFile, false); + expectObsolete(inFile, false); + + /* Update first record to make one BIN slot dirty. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + + /* + * Log explicitly since we have no way to do a partial checkpoint. + * The BIN-delta is logged provisionally and the IN non-provisionally. + */ + TestUtils.logBINAndIN(env, cursor, true /*allowDeltas*/); + + /* Expect to obsolete the IN but not the full BIN. */ + expectObsolete(fullBinFile, false); + expectObsolete(inFile, true); + assertEquals(fullBinFile, getFullBINFile(cursor)); + + /* Save current BIN-delta and IN files. */ + long deltaBinFile2 = getDeltaBINFile(cursor); + long inFile2 = getINFile(cursor); + assertTrue(deltaBinFile != deltaBinFile2); + assertTrue(inFile != inFile2); + expectObsolete(deltaBinFile2, false); + expectObsolete(inFile2, false); + + /* Shutdown without a checkpoint and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(false, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Sync to make all INs non-dirty. */ + env.sync(); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect that recovery counts only IN as obsolete. */ + expectObsolete(inFile, true); + expectObsolete(fullBinFile, false); + expectObsolete(deltaBinFile2, false); + expectObsolete(inFile2, false); + assertEquals(fullBinFile, getFullBINFile(cursor)); + assertEquals(deltaBinFile2, getDeltaBINFile(cursor)); + assertEquals(inFile2, getINFile(cursor)); + + /* Reset variables to current versions. */ + deltaBinFile = deltaBinFile2; + inFile = inFile2; + + /* Update same slot and write another delta. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + TestUtils.logBINAndIN(env, cursor, true /*allowDeltas*/); + + /* Expect to obsolete the BIN-delta and IN, but not the full BIN. */ + expectObsolete(fullBinFile, false); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + assertEquals(fullBinFile, getFullBINFile(cursor)); + + /* Save current BIN-delta and IN files. */ + deltaBinFile2 = getDeltaBINFile(cursor); + inFile2 = getINFile(cursor); + assertTrue(deltaBinFile != deltaBinFile2); + assertTrue(inFile != inFile2); + expectObsolete(deltaBinFile2, false); + expectObsolete(inFile2, false); + + /* Shutdown without a checkpoint and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(false, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Sync to make all INs non-dirty. */ + env.sync(); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect that recovery counts only BIN-delta and IN as obsolete. */ + expectObsolete(fullBinFile, false); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + expectObsolete(deltaBinFile2, false); + expectObsolete(inFile2, false); + assertEquals(fullBinFile, getFullBINFile(cursor)); + assertEquals(deltaBinFile2, getDeltaBINFile(cursor)); + assertEquals(inFile2, getINFile(cursor)); + + /* Reset variables to current versions. */ + deltaBinFile = deltaBinFile2; + inFile = inFile2; + + /* Update two records and write a full BIN version. */ + IntegerBinding.intToEntry(0, keyEntry); + cursor.put(keyEntry, dataEntry); + IntegerBinding.intToEntry(1, keyEntry); + cursor.put(keyEntry, dataEntry); + TestUtils.logBINAndIN(env, cursor, true /*allowDeltas*/); + + /* Expect to obsolete the full BIN, the BIN-delta and the IN. */ + expectObsolete(fullBinFile, true); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + + /* Save current BIN, BIN-delta and IN files. */ + long fullBinFile2 = getFullBINFile(cursor); + deltaBinFile2 = getDeltaBINFile(cursor); + inFile2 = getINFile(cursor); + assertTrue(fullBinFile != fullBinFile2); + assertTrue(deltaBinFile != deltaBinFile2); + assertTrue(inFile != inFile2); + assertEquals(DbLsn.NULL_LSN, deltaBinFile2); + expectObsolete(fullBinFile2, false); + expectObsolete(inFile2, false); + + /* Shutdown without a checkpoint and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(false, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Sync to make all INs non-dirty. */ + env.sync(); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect that recovery counts BIN, BIN-delta and IN as obsolete. */ + expectObsolete(fullBinFile, true); + expectObsolete(deltaBinFile, true); + expectObsolete(inFile, true); + expectObsolete(fullBinFile2, false); + assertEquals(DbLsn.NULL_LSN, deltaBinFile2); + expectObsolete(inFile2, false); + assertEquals(deltaBinFile2, getDeltaBINFile(cursor)); + assertEquals(inFile2, getINFile(cursor)); + + cursor.close(); + txn.commit(); + closeEnv(false, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + } + + /** + * Performs testRecovery with duplicates. + */ + @Test + public void testBINDeltaRecoveryDup() + throws DatabaseException { + + dups = true; + testBINDeltaRecovery(); + } + + /** + * Tests that in a partial checkpoint (CkptStart with no CkptEnd) all + * provisional INs are counted as obsolete. + */ + @Test + public void testPartialCheckpoint() + throws DatabaseException, IOException { + + openAndWriteDatabase(); + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Close with partial checkpoint and reopen. */ + cursor.close(); + txn.commit(); + performPartialCheckpoint(true); // truncateUtilizationInfo + + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN files have not changed. */ + assertEquals(binFile, getBINFile(cursor)); + assertEquals(inFile, getINFile(cursor)); + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* Update to make BIN dirty. */ + cursor.put(keyEntry, dataEntry); + + /* Force IN dirty so that BIN is logged provisionally. */ + TestUtils.getIN(TestUtils.getBIN(cursor)).setDirty(true); + + /* Check current BIN and IN files. */ + assertTrue(binFile == getBINFile(cursor)); + assertTrue(inFile == getINFile(cursor)); + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* Close with partial checkpoint and reopen. */ + cursor.close(); + txn.commit(); + performPartialCheckpoint(true); // truncateUtilizationInfo + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN files are obsolete. */ + assertTrue(binFile != getBINFile(cursor)); + assertTrue(inFile != getINFile(cursor)); + expectObsolete(binFile, true); + expectObsolete(inFile, true); + + /* + * Expect that the current BIN is obsolete because it was provisional, + * and provisional nodes following CkptStart are counted obsolete + * even if that is sometimes incorrect. The parent IN file is not + * obsolete because it is not provisonal. + */ + long binFile2 = getBINFile(cursor); + long inFile2 = getINFile(cursor); + expectObsolete(binFile2, true); + expectObsolete(inFile2, false); + + /* + * Now repeat the test above but do not truncate the FileSummaryLNs. + * The counting will be accurate because the FileSummaryLNs override + * what is counted manually during recovery. + */ + + /* Update to make BIN dirty. */ + cursor.put(keyEntry, dataEntry); + + /* Close with partial checkpoint and reopen. */ + cursor.close(); + txn.commit(); + performPartialCheckpoint(false, // truncateUtilizationInfo + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* The prior BIN file is now double-counted as obsolete. */ + assertTrue(binFile2 != getBINFile(cursor)); + assertTrue(inFile2 != getINFile(cursor)); + expectObsolete(binFile2, 2); + expectObsolete(inFile2, 1); + + /* Expect current BIN and IN files are not obsolete. */ + binFile2 = getBINFile(cursor); + inFile2 = getINFile(cursor); + expectObsolete(binFile2, false); + expectObsolete(inFile2, false); + + cursor.close(); + txn.commit(); + closeEnv(true, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + } + + /** + * Performs testPartialCheckpoint with duplicates. + */ + @Test + public void testPartialCheckpointDup() + throws DatabaseException, IOException { + + dups = true; + testPartialCheckpoint(); + } + + /** + * Tests that deleting a subtree (by deleting the last LN in a BIN) is + * counted correctly. + */ + @Test + public void testDelete() + throws DatabaseException, IOException { + + openAndWriteDatabase(); + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Close normally and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(true); + openEnv(); + txn = env.beginTransaction(null, null); + cursor = db.openCursor(txn, null); + + /* Position cursor to load BIN and IN. */ + cursor.getSearchKey(keyEntry, dataEntry, null); + + /* Expect BIN and IN are still not obsolete. */ + assertEquals(binFile, getBINFile(cursor)); + assertEquals(inFile, getINFile(cursor)); + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* + * Add records until we move to the next BIN, so that the compressor + * would not need to delete the root in order to delete the BIN. + */ + if (dups) { + int dataVal = 1; + while (binFile == getBINFile(cursor)) { + dataVal += 1; + IntegerBinding.intToEntry(dataVal, dataEntry); + cursor.put(keyEntry, dataEntry); + } + } else { + int keyVal = 0; + while (binFile == getBINFile(cursor)) { + keyVal += 1; + IntegerBinding.intToEntry(keyVal, keyEntry); + cursor.put(keyEntry, dataEntry); + } + } + binFile = getBINFile(cursor); + inFile = getINFile(cursor); + + /* Delete all records in the last BIN. */ + while (binFile == getBINFile(cursor)) { + cursor.delete(); + cursor.getLast(keyEntry, dataEntry, null); + } + + /* Compressor daemon is not running -- they're not obsolete yet. */ + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* Close cursor and compress. */ + cursor.close(); + txn.commit(); + env.compress(); + + /* Without a checkpoint, the deleted nodes are not obsolete yet. */ + expectObsolete(binFile, false); + expectObsolete(inFile, false); + + /* + * Checkpoint to flush dirty INs and count pending obsolete LSNs. + * Then expect BIN and IN to be obsolete. + */ + env.checkpoint(forceConfig); + expectObsolete(binFile, true); + expectObsolete(inFile, true); + + /* Close with partial checkpoint and reopen. */ + performPartialCheckpoint(true); // truncateUtilizationInfo + openEnv(); + + /* + * Expect both files to be obsolete after recovery, because the + * FileSummaryLN and MapLN were written prior to the checkpoint during + * compression. + */ + expectObsolete(binFile, true); + expectObsolete(inFile, true); + + /* + * expectAccurateObsoleteLNCount is false because the deleted LN is not + * counted obsolete correctly as described in RecoveryManager + * redoUtilizationInfo. + */ + closeEnv(true, // doCheckpoint + false); // expectAccurateObsoleteLNCount + } + + /** + * Performs testDelete with duplicates. + */ + @Test + public void testDeleteDup() + throws DatabaseException, IOException { + + dups = true; + testDelete(); + } + + /** + * Tests that truncating a database is counted correctly. + * Tests recovery also. + */ + @Test + public void testTruncate() + throws DatabaseException, IOException { + + /* Expect inaccurate LN sizes only if we force a tree walk. */ + final boolean expectAccurateObsoleteLNSize = + !DatabaseImpl.forceTreeWalkForTruncateAndRemove; + + openAndWriteDatabase(); + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Close normally and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(true, // doCheckpoint + true, // expectAccurateObsoleteLNCount + expectAccurateObsoleteLNSize); + openEnv(); + db.close(); + db = null; + /* Truncate. */ + txn = env.beginTransaction(null, null); + env.truncateDatabase(txn, DB_NAME, false /* returnCount */); + truncateOrRemoveDone = true; + txn.commit(); + + /* + * Expect BIN and IN are obsolete. Do not check DbFileSummary when we + * truncate/remove, since the old DatabaseImpl is gone. + */ + expectObsolete(binFile, true, false /*checkDbFileSummary*/); + expectObsolete(inFile, true, false /*checkDbFileSummary*/); + + /* Close with partial checkpoint and reopen. */ + performPartialCheckpoint(true, // truncateUtilizationInfo + true, // expectAccurateObsoleteLNCount + expectAccurateObsoleteLNSize); + openEnv(); + + /* Expect BIN and IN are counted obsolete during recovery. */ + expectObsolete(binFile, true, false /*checkDbFileSummary*/); + expectObsolete(inFile, true, false /*checkDbFileSummary*/); + + /* + * expectAccurateObsoleteLNSize is false because the size of the + * deleted NameLN is not counted during recovery, as with other + * abortLsns as described in RecoveryManager redoUtilizationInfo. + */ + closeEnv(true, // doCheckpoint + true, // expectAccurateObsoleteLNCount + false); // expectAccurateObsoleteLNSize + } + + /** + * Tests that truncating a database is counted correctly. + * Tests recovery also. + */ + @Test + public void testRemove() + throws DatabaseException, IOException { + + /* Expect inaccurate LN sizes only if we force a tree walk. */ + final boolean expectAccurateObsoleteLNSize = + !DatabaseImpl.forceTreeWalkForTruncateAndRemove; + + openAndWriteDatabase(); + long binFile = getBINFile(cursor); + long inFile = getINFile(cursor); + + /* Close normally and reopen. */ + cursor.close(); + txn.commit(); + closeEnv(true, // doCheckpoint + true, // expectAccurateObsoleteLNCount + expectAccurateObsoleteLNSize); + openEnv(); + + /* Remove. */ + db.close(); + db = null; + txn = env.beginTransaction(null, null); + env.removeDatabase(txn, DB_NAME); + truncateOrRemoveDone = true; + txn.commit(); + + /* + * Expect BIN and IN are obsolete. Do not check DbFileSummary when we + * truncate/remove, since the old DatabaseImpl is gone. + */ + expectObsolete(binFile, true, false /*checkDbFileSummary*/); + expectObsolete(inFile, true, false /*checkDbFileSummary*/); + + /* Close with partial checkpoint and reopen. */ + performPartialCheckpoint(true, // truncateUtilizationInfo + true, // expectAccurateObsoleteLNCount + expectAccurateObsoleteLNSize); + openEnv(); + + /* Expect BIN and IN are counted obsolete during recovery. */ + expectObsolete(binFile, true, false /*checkDbFileSummary*/); + expectObsolete(inFile, true, false /*checkDbFileSummary*/); + + /* + * expectAccurateObsoleteLNCount is false because the deleted NameLN is + * not counted obsolete correctly as described in RecoveryManager + * redoUtilizationInfo. + */ + closeEnv(true, // doCheckpoint + false); // expectAccurateObsoleteLNCount + } + + /* + * The xxxForceTreeWalk tests set the DatabaseImpl + * forceTreeWalkForTruncateAndRemove field to true, which will force a walk + * of the tree to count utilization during truncate/remove, rather than + * using the per-database info. This is used to test the "old technique" + * for counting utilization, which is now used only if the database was + * created prior to log version 6. + */ + + @Test + public void testTruncateForceTreeWalk() + throws Exception { + + /* + * We cannot use forceTreeWalkForTruncateAndRemove with embedded LNs + * because we don't have the lastLoggedFile info in the BIN slots. + * This info is required by the SortedLSNTreeWalker used in + * DatabaseImpl.finishDeleteProcessing(). + */ + if (embeddedLNs) { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + } + + try { + testTruncate(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveForceTreeWalk() + throws Exception { + + /* + * We cannot use forceTreeWalkForTruncateAndRemove with embedded LNs + * because we don't have the lastLoggedFile info in the BIN slots. + * This info is required by the SortedLSNTreeWalker used in + * DatabaseImpl.finishDeleteProcessing(). + */ + if (embeddedLNs) { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + } + + try { + testRemove(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + private void expectObsolete(long file, boolean obsolete) { + expectObsolete(file, obsolete, true /*checkDbFileSummary*/); + } + + private void expectObsolete(long file, + boolean obsolete, + boolean checkDbFileSummary) { + FileSummary fileSummary = getFileSummary(file); + assertEquals("totalINCount", + 1, fileSummary.totalINCount); + assertEquals("obsoleteINCount", + obsolete ? 1 : 0, fileSummary.obsoleteINCount); + + if (checkDbFileSummary) { + DbFileSummary dbFileSummary = getDbFileSummary(file); + assertEquals("db totalINCount", + 1, dbFileSummary.totalINCount); + assertEquals("db obsoleteINCount", + obsolete ? 1 : 0, dbFileSummary.obsoleteINCount); + } + } + + private void expectObsolete(long file, int obsoleteCount) { + FileSummary fileSummary = getFileSummary(file); + assertEquals("totalINCount", + 1, fileSummary.totalINCount); + assertEquals("obsoleteINCount", + obsoleteCount, fileSummary.obsoleteINCount); + + DbFileSummary dbFileSummary = getDbFileSummary(file); + assertEquals("db totalINCount", + 1, dbFileSummary.totalINCount); + assertEquals("db obsoleteINCount", + obsoleteCount, dbFileSummary.obsoleteINCount); + } + + private long getINFile(Cursor cursor) + throws DatabaseException { + + IN in = TestUtils.getIN(TestUtils.getBIN(cursor)); + long lsn = in.getLastLoggedLsn(); + assertTrue(lsn != DbLsn.NULL_LSN); + return DbLsn.getFileNumber(lsn); + } + + private long getBINFile(Cursor cursor) { + long lsn = TestUtils.getBIN(cursor).getLastLoggedLsn(); + assertTrue(lsn != DbLsn.NULL_LSN); + return DbLsn.getFileNumber(lsn); + } + + private long getFullBINFile(Cursor cursor) { + long lsn = TestUtils.getBIN(cursor).getLastFullLsn(); + assertTrue(lsn != DbLsn.NULL_LSN); + return DbLsn.getFileNumber(lsn); + } + + private long getDeltaBINFile(Cursor cursor) { + long lsn = TestUtils.getBIN(cursor).getLastDeltaLsn(); + if (lsn == DbLsn.NULL_LSN) { + return -1; + } + return DbLsn.getFileNumber(lsn); + } + + /** + * Returns the utilization summary for a given log file. + */ + private FileSummary getFileSummary(long file) { + return envImpl.getUtilizationProfile() + .getFileSummaryMap(true) + .get(new Long(file)); + } + + /** + * Returns the per-database utilization summary for a given log file. + */ + private DbFileSummary getDbFileSummary(long file) { + return dbImpl.getDbFileSummary + (new Long(file), false /*willModify*/); + } + + private void performPartialCheckpoint(boolean truncateUtilizationInfo) + throws DatabaseException, IOException { + + performPartialCheckpoint(truncateUtilizationInfo, + true, // expectAccurateObsoleteLNCount + true); // expectAccurateObsoleteLNSize + } + + private void performPartialCheckpoint(boolean truncateUtilizationInfo, + boolean + expectAccurateObsoleteLNCount) + throws DatabaseException, IOException { + + performPartialCheckpoint(truncateUtilizationInfo, + expectAccurateObsoleteLNCount, + expectAccurateObsoleteLNCount); + } + + /** + * Performs a checkpoint and truncates the log before the last CkptEnd. If + * truncateUtilizationInfo is true, truncates before the FileSummaryLNs + * that appear at the end of the checkpoint. The environment should be + * open when this method is called, and it will be closed when it returns. + */ + private void performPartialCheckpoint + (boolean truncateUtilizationInfo, + boolean expectAccurateObsoleteLNCount, + boolean expectAccurateObsoleteLNSize) + throws DatabaseException, IOException { + + /* Do a normal checkpoint. */ + env.checkpoint(forceConfig); + long eofLsn = envImpl.getFileManager().getNextLsn(); + long lastLsn = envImpl.getFileManager().getLastUsedLsn(); + + /* Searching backward from end, find last CkptEnd. */ + SearchFileReader searcher = + new SearchFileReader(envImpl, 1000, false, lastLsn, eofLsn, + LogEntryType.LOG_CKPT_END); + assertTrue(searcher.readNextEntry()); + long ckptEnd = searcher.getLastLsn(); + long truncateLsn = ckptEnd; + + if (truncateUtilizationInfo) { + + /* Searching backward from CkptEnd, find last CkptStart. */ + searcher = + new SearchFileReader(envImpl, 1000, false, ckptEnd, eofLsn, + LogEntryType.LOG_CKPT_START); + assertTrue(searcher.readNextEntry()); + long ckptStart = searcher.getLastLsn(); + + /* + * Searching forward from CkptStart, find first MapLN for a user + * database (ID GT 2), or if none, the last MapLN for a non-user + * database. MapLNs are written after writing root INs and before + * all FileSummaryLNs. This will find the position at which to + * truncate all MapLNs and FileSummaryLNs, but not INs below the + * mapping tree. + */ + searcher = new SearchFileReader(envImpl, 1000, true, + ckptStart, eofLsn, + LogEntryType.LOG_MAPLN); + while (searcher.readNextEntry()) { + MapLN mapLN = (MapLN) searcher.getLastObject(); + truncateLsn = searcher.getLastLsn(); + if (mapLN.getDatabase().getId().getId() > 2) { + break; + } + } + } + + /* + * Close without another checkpoint, although it doesn't matter since + * we would truncate before it. + */ + closeEnv(false, // doCheckpoint + expectAccurateObsoleteLNCount, + expectAccurateObsoleteLNSize); + + /* Truncate the log. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + EnvironmentImpl cmdEnv = + DbInternal.getNonNullEnvImpl(new Environment(envHome, envConfig)); + cmdEnv.getFileManager().truncateLog(DbLsn.getFileNumber(truncateLsn), + DbLsn.getFileOffset(truncateLsn)); + cmdEnv.abnormalClose(); + } +} diff --git a/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java b/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java new file mode 100644 index 0000000..87eab5a --- /dev/null +++ b/test/com/sleepycat/je/cleaner/MakeMigrationLogFiles.java @@ -0,0 +1,114 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; + +/** + * Creates two small log files with close to 100% utilization for use by + * FileSelectionTest.testLogVersionMigration. This main program is with the + * arguments: -h HOME_DIRECTORY + * + * This program was used to create two log files (stored in CVS as + * migrate_f0.jdb and migrate_f1.jdb) running against JE 3.2.68, which writes + * log version 5. Testing with these files in testLogVersionUpgrade checks + * that these files are migrated when je.cleaner.migrateToLogVersion is set. + */ +public class MakeMigrationLogFiles { + + private static final int FILE_SIZE = 1000000; + + public static void main(String[] args) + throws DatabaseException { + + String homeDir = null; + for (int i = 0; i < args.length; i += 1) { + if (args[i].equals("-h")) { + i += 1; + homeDir = args[i]; + } else { + throw new IllegalArgumentException("Unknown arg: " + args[i]); + } + } + if (homeDir == null) { + throw new IllegalArgumentException("Missing -h arg"); + } + Environment env = openEnv(new File(homeDir), true /*allowCreate*/); + makeMigrationLogFiles(env); + env.close(); + } + + /** + * Opens an Environment with a small log file size. + */ + static Environment openEnv(File homeDir, boolean allowCreate) + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + String.valueOf(FILE_SIZE)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + return new Environment(homeDir, envConfig); + } + + /** + * Creates two log files. + */ + static void makeMigrationLogFiles(Environment env) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + int nextKey = 0; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Cursor c = db.openCursor(null, null); + OperationStatus status = c.getLast(key, data, null); + if (status == OperationStatus.SUCCESS) { + nextKey = IntegerBinding.entryToInt(key); + } + c.close(); + + byte[] dataBytes = new byte[1000]; + final int OVERHEAD = dataBytes.length + 100; + data.setData(dataBytes); + + for (int size = 0; size < FILE_SIZE * 2; size += OVERHEAD) { + nextKey += 1; + IntegerBinding.intToEntry(nextKey, key); + status = db.putNoOverwrite(null, key, data); + assert status == OperationStatus.SUCCESS; + } + + db.close(); + } +} diff --git a/test/com/sleepycat/je/cleaner/OffsetTest.java b/test/com/sleepycat/je/cleaner/OffsetTest.java new file mode 100644 index 0000000..f039b92 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/OffsetTest.java @@ -0,0 +1,113 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** + * Tests the OffsetList and PackedOffset classes. + */ +public class OffsetTest extends TestBase { + + @Test + public void testOffsets() { + + doAllTest(new long[] { + 1, + 2, + 0xfffe, + 0xffff, + 0xfffff, + Integer.MAX_VALUE - 1, + Integer.MAX_VALUE, + + /* + * The following values don't work, which is probably a bug, but + * LSN offsets are not normally this large so the bug currently has + * little impact. + */ + //Integer.MAX_VALUE + 1L, + //Long.MAX_VALUE - 100L, + //Long.MAX_VALUE, + }); + } + + private void doAllTest(long[] offsets) { + + ArrayList list = list(offsets); + + doOneTest(offsets); + + Collections.reverse(list); + doOneTest(array(list)); + + Collections.shuffle(list); + doOneTest(array(list)); + } + + private void doOneTest(long[] offsets) { + + OffsetList list = new OffsetList(); + for (int i = 0; i < offsets.length; i += 1) { + list.add(offsets[i], true); + } + long[] array = list.toArray(); + assertTrue("array=\n" + dump(array) + " offsets=\n" + dump(offsets), + Arrays.equals(offsets, array)); + + long[] sorted = new long[array.length]; + System.arraycopy(array, 0, sorted, 0, array.length); + Arrays.sort(sorted); + + PackedOffsets packed = new PackedOffsets(); + packed.pack(array); + assertTrue(Arrays.equals(sorted, packed.toArray())); + } + + private ArrayList list(long[] array) { + + ArrayList list = new ArrayList(array.length); + for (int i = 0; i < array.length; i += 1) { + list.add(new Long(array[i])); + } + return list; + } + + private long[] array(ArrayList list) { + + long[] array = new long[list.size()]; + for (int i = 0; i < array.length; i += 1) { + array[i] = ((Long) list.get(i)).longValue(); + } + return array; + } + + private String dump(long[] array) { + + StringBuilder buf = new StringBuilder(array.length * 10); + for (int i = 0; i < array.length; i += 1) { + buf.append(Long.toString(array[i])); + buf.append(' '); + } + return buf.toString(); + } +} diff --git a/test/com/sleepycat/je/cleaner/RMWLockingTest.java b/test/com/sleepycat/je/cleaner/RMWLockingTest.java new file mode 100644 index 0000000..ef40d88 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/RMWLockingTest.java @@ -0,0 +1,194 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; + +/** + * Use LockMode.RMW and verify that the FileSummaryLNs accurately reflect only + * those LNs that have been made obsolete. + */ +@RunWith(Parameterized.class) +public class RMWLockingTest extends CleanerTestBase { + + private static final int NUM_RECS = 5; + + private Database db; + private DatabaseEntry key; + private DatabaseEntry data; + + public RMWLockingTest(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + @After + public void tearDown() + throws Exception { + + if (db != null) { + db.close(); + } + + super.tearDown(); + + db = null; + } + + @Test + public void testBasic() + throws DatabaseException { + + init(); + insertRecords(); + rmwModify(); + + UtilizationProfile up = + DbInternal.getNonNullEnvImpl(env).getUtilizationProfile(); + + /* + * Checkpoint the environment to flush all utilization tracking + * information before verifying. + */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + assertTrue(up.verifyFileSummaryDatabase()); + } + + /** + * Tests that we can load a log file containing offsets that correspond to + * non-obsolete LNs. The bad log file was created using testBasic run + * against JE 2.0.54. It contains version 1 FSLNs, one of which has an + * offset which is not obsolete. + */ + @Test + public void testBadLog() + throws DatabaseException, IOException { + + /* Copy a log file with bad offsets to log file zero. */ + String resName = "rmw_bad_offsets.jdb"; + File destDir = envHome; + if (envMultiSubDir) { + destDir = new File(envHome, "data001"); + } + TestUtils.loadLog(getClass(), resName, destDir); + + /* Open the log we just copied. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(false); + envConfig.setReadOnly(true); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + /* + * Verify the UP of the bad log. Prior to adding the code in + * FileSummaryLN.postFetchInit that discards version 1 offsets, this + * assertion failed. + */ + UtilizationProfile up = + DbInternal.getNonNullEnvImpl(env).getUtilizationProfile(); + assertTrue(up.verifyFileSummaryDatabase()); + + env.close(); + env = null; + } + + private void init() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + /* Insert records. */ + private void insertRecords() + throws DatabaseException { + + key = new DatabaseEntry(); + data = new DatabaseEntry(); + + IntegerBinding.intToEntry(100, data); + + for (int i = 0; i < NUM_RECS; i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + /* lock two records with RMW, only modify one. */ + private void rmwModify() + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + IntegerBinding.intToEntry(0, key); + assertEquals(OperationStatus.SUCCESS, + db.get(txn, key, data, LockMode.RMW)); + IntegerBinding.intToEntry(1, key); + assertEquals(OperationStatus.SUCCESS, + db.get(txn, key, data, LockMode.RMW)); + + IntegerBinding.intToEntry(200, data); + assertEquals(OperationStatus.SUCCESS, + db.put(txn, key, data)); + txn.commit(); + } +} diff --git a/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java b/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java new file mode 100644 index 0000000..f15985c --- /dev/null +++ b/test/com/sleepycat/je/cleaner/ReadOnlyLockingTest.java @@ -0,0 +1,314 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitProcessThread.OutErrReader; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Verifies that opening an environment read-only will prevent cleaned files + * from being deleted in a read-write environment. Uses the ReadOnlyProcess + * class to open the environment read-only in a separate process. + */ +@RunWith(Parameterized.class) +public class ReadOnlyLockingTest extends CleanerTestBase { + + private static final int FILE_SIZE = 4096; + private static final int READER_STARTUP_SECS = 30; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private EnvironmentImpl envImpl; + private Database db; + private Process readerProcess; + + private static File getProcessFile() { + return new File(System.getProperty(TestUtils.DEST_DIR), + "ReadOnlyProcessFile"); + } + + private static void deleteProcessFile() { + File file = getProcessFile(); + file.delete(); + assertTrue(!file.exists()); + } + + static void createProcessFile() + throws IOException { + + File file = getProcessFile(); + assertTrue(file.createNewFile()); + assertTrue(file.exists()); + } + + public ReadOnlyLockingTest(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + @After + public void tearDown() + throws Exception { + + deleteProcessFile(); + try { + stopReaderProcess(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + super.tearDown(); + + db = null; + envImpl = null; + readerProcess = null; + } + + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "80"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(FILE_SIZE)); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + if (envMultiSubDir) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, DATA_DIRS + ""); + } + + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "ReadOnlyLockingTest", dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + * Tests that cleaned files are deleted when there is no reader process. + */ + @Test + public void testBaseline() + throws DatabaseException { + + openEnv(); + writeAndDeleteData(); + env.checkpoint(forceConfig); + + int nFilesCleaned = env.cleanLog(); + assertTrue(nFilesCleaned > 0); + assertTrue(listFiles(), !areAnyFilesDeleted()); + + /* Files are deleted during the checkpoint. */ + env.checkpoint(forceConfig); + assertTrue(listFiles(), areAnyFilesDeleted()); + + closeEnv(); + } + + /** + * Tests that cleaned files are not deleted when there is a reader process. + */ + @Test + public void testReadOnlyLocking() + throws Exception { + + openEnv(); + writeAndDeleteData(); + env.checkpoint(forceConfig); + int nFilesCleaned = env.cleanLog(); + assertTrue(nFilesCleaned > 0); + assertTrue(listFiles(), !areAnyFilesDeleted()); + + /* + * No files are deleted after cleaning when the reader process is + * running. + */ + startReaderProcess(); + env.cleanLog(); + env.checkpoint(forceConfig); + assertTrue(listFiles(), !areAnyFilesDeleted()); + + /* + * Files are deleted when a checkpoint occurs after the reader + * process stops. + */ + stopReaderProcess(); + env.cleanLog(); + env.checkpoint(forceConfig); + assertTrue(listFiles(), areAnyFilesDeleted()); + + closeEnv(); + } + + private void writeAndDeleteData() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[FILE_SIZE]); + for (int i = 0; i < 5; i += 1) { + db.put(null, key, data); + } + } + + private boolean areAnyFilesDeleted() { + long lastNum = envImpl.getFileManager().getLastFileNum().longValue(); + for (long i = 0; i <= lastNum; i += 1) { + String name = envImpl.getFileManager().getFullFileName + (i, FileManager.JE_SUFFIX); + if (!(new File(name).exists())) { + return true; + } + } + return false; + } + + private String listFiles() { + StringBuilder builder = new StringBuilder(); + builder.append("Files:"); + final String[] names = envHome.list(); + if (names != null) { + for (String name : names) { + builder.append(' '); + builder.append(name); + } + } + return builder.toString(); + } + + private void startReaderProcess() + throws Exception { + + List cmd = new ArrayList<>(); + cmd.add("java"); + JVMSystemUtils.addZingJVMArgs(cmd); + + cmd.addAll(Arrays.asList( + "-cp", + System.getProperty("java.class.path"), + "-D" + SharedTestUtils.DEST_DIR + '=' + + SharedTestUtils.getDestDir(), + ReadOnlyProcess.class.getName(), + Boolean.toString(envMultiSubDir), + DATA_DIRS + "")); + + /* Start it and wait for it to open the environment. */ + readerProcess = new ProcessBuilder(cmd).start(); + InputStream error = readerProcess.getErrorStream(); + InputStream output = readerProcess.getInputStream(); + Thread err = + new Thread(new OutErrReader(error, false /*ignoreOutput*/)); + err.start(); + Thread out = + new Thread(new OutErrReader(output, false /*ignoreOutput*/)); + out.start(); + long startTime = System.currentTimeMillis(); + boolean running = false; + while (!running && + ((System.currentTimeMillis() - startTime) < + (READER_STARTUP_SECS * 1000))) { + if (getProcessFile().exists()) { + running = true; + } else { + Thread.sleep(10); + } + } + //printReaderStatus(); + assertTrue("ReadOnlyProcess did not start after " + + READER_STARTUP_SECS + " + secs", + running); + } + + private void stopReaderProcess() + throws Exception { + + if (readerProcess != null) { + readerProcess.destroy(); + readerProcess.waitFor(); + Thread.sleep(2000); + readerProcess = null; + } + } + + private void printReaderStatus() { + try { + int status = readerProcess.exitValue(); + System.out.println("Process status=" + status); + } catch (IllegalThreadStateException e) { + System.out.println("Process is still running"); + } + } +} diff --git a/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java b/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java new file mode 100644 index 0000000..ca21a31 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/ReadOnlyProcess.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.io.File; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * @see ReadOnlyLockingTest + */ +public class ReadOnlyProcess { + + public static void main(String[] args) { + + /* + * Don't write to System.out in this process because the parent + * process only reads System.err. + */ + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setReadOnly(true); + if (args[0].equals("true")) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, args[1]); + } + + File envHome = SharedTestUtils.getTestDir(); + Environment env = new Environment(envHome, envConfig); + + //System.err.println("Opened read-only: " + envHome); + //System.err.println(System.getProperty("java.class.path")); + + /* Notify the test that this process has opened the environment. */ + ReadOnlyLockingTest.createProcessFile(); + + /* Sleep until the parent process kills me. */ + Thread.sleep(Long.MAX_VALUE); + } catch (Exception e) { + + e.printStackTrace(System.err); + System.exit(1); + } + } +} diff --git a/test/com/sleepycat/je/cleaner/SR10553Test.java b/test/com/sleepycat/je/cleaner/SR10553Test.java new file mode 100644 index 0000000..8c167c2 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR10553Test.java @@ -0,0 +1,180 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; + +@RunWith(Parameterized.class) +public class SR10553Test extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private Database db; + + public SR10553Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Use a small log file size to make cleaning more frequent. */ + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(1024)); + /* Use a small memory size to force eviction. */ + config.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(), + Integer.toString(1024 * 96)); + /* Don't track detail with a tiny cache size. */ + config.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + config.setConfigParam(EnvironmentParams.NUM_LOG_BUFFERS.getName(), + Integer.toString(2)); + /* Set log buffers large enough for trace messages. */ + config.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(), + Integer.toString(7000)); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, config); + + openDb(); + } + + /** + * Opens that database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + */ + @Test + public void testSR10553() + throws DatabaseException { + + openEnv(); + + /* Put some duplicates, enough to fill a log file. */ + final int COUNT = 10; + DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(0)); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < COUNT; i += 1) { + data.setData(TestUtils.getTestArray(i)); + db.put(null, key, data); + } + Cursor cursor = db.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, null)); + assertEquals(COUNT, cursor.count()); + cursor.close(); + + /* Delete everything. Do not compress. */ + db.delete(null, key); + + /* Checkpoint and clean. */ + env.checkpoint(forceConfig); + int cleaned = env.cleanLog(); + assertTrue("cleaned=" + cleaned, cleaned > 0); + + /* Force eviction. */ + env.evictMemory(); + + /* Scan all values. */ + cursor = db.openCursor(null, null); + for (OperationStatus status = cursor.getFirst(key, data, null); + status == OperationStatus.SUCCESS; + status = cursor.getNext(key, data, null)) { + } + cursor.close(); + + /* + * Before the fix to 10553, while scanning over deleted records, a + * LogFileNotFoundException would occur when faulting in a deleted + * record, if the log file had been cleaned. This was because the + * cleaner was not setting knownDeleted for deleted records. + */ + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/cleaner/SR10597Test.java b/test/com/sleepycat/je/cleaner/SR10597Test.java new file mode 100644 index 0000000..6943295 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR10597Test.java @@ -0,0 +1,165 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; + +@RunWith(Parameterized.class) +public class SR10597Test extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private Database db; + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Use a small log file size to make cleaning more frequent. */ + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(1024)); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, config); + + openDb(); + } + + public SR10597Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + /** + * Opens that database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + */ + @Test + public void testSR10597() + throws DatabaseException { + + openEnv(); + + /* Put some duplicates, enough to fill a log file. */ + final int COUNT = 10; + DatabaseEntry key = new DatabaseEntry(TestUtils.getTestArray(0)); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < COUNT; i += 1) { + data.setData(TestUtils.getTestArray(i)); + db.put(null, key, data); + } + Cursor cursor = db.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, null)); + assertEquals(COUNT, cursor.count()); + cursor.close(); + + /* Delete everything, then compress to delete the DIN. */ + db.delete(null, key); + env.compress(); + data.setData(TestUtils.getTestArray(0)); + + /* Add a single record, which will not create a DIN. */ + db.put(null, key, data); + + /* Checkpoint and clean. */ + env.checkpoint(forceConfig); + int cleaned = env.cleanLog(); + assertTrue("cleaned=" + cleaned, cleaned > 0); + + /* + * Before the fix to 10597, when cleaning the log we would be looking + * for an LN with containsDuplicates=true. We assumed that when we + * found the BIN entry, it must point to a DIN. But because we + * deleted and compressed above, the entry is actually an LN. This + * caused a ClassCastException at the bottom of + * Tree.getParentBINForChildLN. + */ + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/cleaner/SR12885Test.java b/test/com/sleepycat/je/cleaner/SR12885Test.java new file mode 100644 index 0000000..eefc56c --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR12885Test.java @@ -0,0 +1,271 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; + +/** + * Because LNs no longer have node IDs and we now lock the LSN, the specific + * bug this test was checking is no longer applicable. However, a similar + * situation could occur now, because the LSN changes not only during slot + * reuse but every time we log the LN. So we continue to run this test. + * + * Original description + * -------------------- + * Reproduces a problem found in SR12885 where we failed to migrate a pending + * LN if the slot was reused by an active transaction and that transaction was + * later aborted. + * + * This bug can manifest as a LogNotFoundException. However, there was another + * bug that caused this bug to manifest sometimes as a NOTFOUND return value. + * This secondary problem -- more sloppyness than a real bug -- was that the + * PendingDeleted flag was not cleared during an abort. If the PendingDeleted + * flag is set, the low level fetch method will return null rather than + * throwing a LogFileNotFoundException. This caused a NOTFOUND in some cases. + * + * The sequence that causes the bug is: + * + * 1) The cleaner processes a file containing LN-A (node A) for key X. Key X + * is a non-deleted LN. + * + * 2) The cleaner sets the migrate flag on the BIN entry for LN-A. + * + * 3) In transaction T-1, LN-A is deleted and replaced by LN-B with key X, + * reusing the same slot but assigning a new node ID. At this point both node + * IDs (LN-A and LN-B) are locked. + * + * 4) The cleaner (via a checkpoint or eviction that logs the BIN) tries to + * migrate LN-B, the current LN in the BIN, but finds it locked. It adds LN-B + * to the pending LN list. + * + * 5) T-1 aborts, putting the LSN of LN-A back into the BIN slot. + * + * 6) In transaction T-2, LN-A is deleted and replaced by LN-C with key X, + * reusing the same slot but assigning a new node ID. At this point both node + * IDs (LN-A and LN-C) are locked. + * + * 7) The cleaner (via a checkpoint or wakeup) processes the pending LN-B. It + * first gets a lock on node B, then does the tree lookup. It finds LN-C in + * the tree, but it doesn't notice that it has a different node ID than the + * node it locked. + * + * 8) The cleaner sees that LN-C is deleted, and therefore no migration is + * necessary -- this is incorrect. It removes LN-B from the pending list, + * allowing the cleaned file to be deleted. + * + * 9) T-2 aborts, putting the LSN of LN-A back into the BIN slot. + * + * 10) A fetch of key X will fail, since the file containing the LSN for LN-A + * has been deleted. If we didn't clear the PendingDeleted flag, this will + * cause a NOTFOUND error instead of a LogFileNotFoundException. + */ +@RunWith(Parameterized.class) +public class SR12885Test extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private Database db; + + public SR12885Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setTransactional(true); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Use a small log file size to make cleaning more frequent. */ + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(1024)); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, config); + + openDb(); + } + + /** + * Opens that database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testSR12885() + throws DatabaseException { + + openEnv(); + + final int COUNT = 10; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(TestUtils.getTestArray(0)); + OperationStatus status; + + /* Add some records, enough to fill a log file. */ + for (int i = 0; i < COUNT; i += 1) { + key.setData(TestUtils.getTestArray(i)); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* + * Delete all but key 0, so the first file can be cleaned but key 0 + * will need to be migrated. + */ + for (int i = 1; i < COUNT; i += 1) { + key.setData(TestUtils.getTestArray(i)); + status = db.delete(null, key); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* + * Checkpoint and clean to set the migrate flag for key 0. This must + * be done when key 0 is not locked, so that it will not be put onto + * the pending list yet. Below we cause it to be put onto the pending + * list with a different node ID. + */ + env.checkpoint(forceConfig); + int cleaned = env.cleanLog(); + assertTrue("cleaned=" + cleaned, cleaned > 0); + + /* + * Using a transaction, delete then insert key 0, reusing the slot. + * The insertion assigns a new node ID. Don't abort the transaction + * until after the cleaner migration is finished. + */ + Transaction txn = env.beginTransaction(null, null); + key.setData(TestUtils.getTestArray(0)); + status = db.delete(txn, key); + assertEquals(OperationStatus.SUCCESS, status); + status = db.putNoOverwrite(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + /* + * Checkpoint again to perform LN migration. LN migration will not + * migrate key 0 because it is locked -- it will be put onto the + * pending list. But the LN put on the pending list will be the newly + * inserted node, which has a different node ID than the LN that needs + * to be migrated -- this is the first condition for the bug. + */ + env.checkpoint(forceConfig); + + /* + * Abort the transaction to revert to the original node ID for key 0. + * Then perform a delete with a new transaction. This makes the + * current LN for key 0 deleted. + */ + txn.abort(); + txn = env.beginTransaction(null, null); + key.setData(TestUtils.getTestArray(0)); + status = db.delete(txn, key); + assertEquals(OperationStatus.SUCCESS, status); + + /* + * The current state of key 0 is that the BIN contains a deleted LN, + * and that LN has a node ID that is different than the one in the + * pending LN list. This node is the one that needs to be migrated. + * + * Perform a checkpoint to cause pending LNs to be processed and then + * delete the cleaned file. When we process the pending LN, we'll lock + * the pending LN's node ID (the one we inserted and aborted), which is + * the wrong node ID. We'll then examine the current LN, find it + * deleted, and neglect to migrate the LN that needs to be migrated. + * The error is that we don't lock the node ID of the current LN. + * + * Then abort the delete transaction. That will revert the BIN entry + * to the node we failed to migrate. If we then try to fetch key 0, + * we'll get LogNotFoundException. + */ + env.checkpoint(forceConfig); + txn.abort(); + status = db.get(null, key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + + /* If we get this far without LogNotFoundException, it's fixed. */ + + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/cleaner/SR12978Test.java b/test/com/sleepycat/je/cleaner/SR12978Test.java new file mode 100644 index 0000000..8367b07 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR12978Test.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; + +/** + * Tests a fix to 12978, which was a ClassCastException in the following + * sequence. + * + * 1) A LN's BIN entry has the MIGRATE flag set. + * + * 2) Another LN with the same key is inserted (duplicates are allowed) and the + * first LN is moved to a dup tree, + * + * 3) The MIGRATE flag on the BIN entry is not cleared, and this entry now + * contains a DIN. + * + * 4) A split of the BIN occurs, logging the BIN with DIN entry. During a + * split we can't do migration, so we attempt to put the DIN onto the cleaner's + * pending list. We cast from DIN to LN, causing the exception. + * + * The fix was to clear the MIGRATE flag on the BIN entry at the time we update + * it to contain the DIN. + * + * This bug also left latches unreleased if a runtime exception occurred during + * a split, and that problem was fixed also. + */ +@RunWith(Parameterized.class) +public class SR12978Test extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private Database db; + + public SR12978Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + /** + * Opens the environment and database. + */ + private void open() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Configure to make cleaning more frequent. */ + config.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "10240"); + config.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "90"); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, config); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void close() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + */ + @Test + public void testSR12978() + throws DatabaseException { + + open(); + + final int COUNT = 800; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* + * Insert enough non-dup records to write a few log files. Delete + * every other so that cleaning will occur. Leave key space so we can + * insert below to cause splits. + */ + IntegerBinding.intToEntry(0, data); + for (int i = 0; i < COUNT; i += 4) { + + IntegerBinding.intToEntry(i + 0, key); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + IntegerBinding.intToEntry(i + 1, key); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + status = db.delete(null, key); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* Clean to set the MIGRATE flag on some LN entries. */ + env.checkpoint(forceConfig); + int nCleaned = env.cleanLog(); + assertTrue(nCleaned > 0); + + /* Add dups to cause the LNs to be moved to a dup tree. */ + IntegerBinding.intToEntry(1, data); + for (int i = 0; i < COUNT; i += 4) { + + IntegerBinding.intToEntry(i + 0, key); + status = db.putNoDupData(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* + * Insert more unique keys to cause BIN splits. Before the fix to + * 12978, a CastCastException would occur during a split. + */ + IntegerBinding.intToEntry(0, data); + for (int i = 0; i < COUNT; i += 4) { + + IntegerBinding.intToEntry(i + 2, key); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + IntegerBinding.intToEntry(i + 3, key); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + + close(); + } +} diff --git a/test/com/sleepycat/je/cleaner/SR13061Test.java b/test/com/sleepycat/je/cleaner/SR13061Test.java new file mode 100644 index 0000000..3a979be --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR13061Test.java @@ -0,0 +1,123 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.utilint.StringUtils; + +/** + * Tests that a FileSummaryLN with an old style string key can be read. When + * we relied solely on log entry version to determine whether an LN had a + * string key, we could fail when an old style LN was migrated by the cleaner. + * In that case the key was still a string key but the log entry version was + * updated to something greater than zero. See FileSummaryLN.hasStringKey for + * details of how we now guard against this. + */ +@RunWith(Parameterized.class) +public class SR13061Test extends CleanerTestBase { + + public SR13061Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + @Test + public void testSR13061() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + if (envMultiSubDir) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + env = new Environment(envHome, envConfig); + + FileSummaryLN ln = new FileSummaryLN(new FileSummary()); + + /* + * All of these tests failed before checking that the byte array must + * be eight bytes for integer keys. + */ + assertTrue(ln.hasStringKey(stringKey(0))); + assertTrue(ln.hasStringKey(stringKey(1))); + assertTrue(ln.hasStringKey(stringKey(12))); + assertTrue(ln.hasStringKey(stringKey(123))); + assertTrue(ln.hasStringKey(stringKey(1234))); + assertTrue(ln.hasStringKey(stringKey(12345))); + assertTrue(ln.hasStringKey(stringKey(123456))); + assertTrue(ln.hasStringKey(stringKey(1234567))); + assertTrue(ln.hasStringKey(stringKey(123456789))); + assertTrue(ln.hasStringKey(stringKey(1234567890))); + + /* + * These tests failed before checking that the first byte of the + * sequence number (in an eight byte key) must not be '0' to '9' for + * integer keys. + */ + assertTrue(ln.hasStringKey(stringKey(12345678))); + assertTrue(ln.hasStringKey(stringKey(12340000))); + + /* These tests are just for good measure. */ + assertTrue(!ln.hasStringKey(intKey(0, 1))); + assertTrue(!ln.hasStringKey(intKey(1, 1))); + assertTrue(!ln.hasStringKey(intKey(12, 1))); + assertTrue(!ln.hasStringKey(intKey(123, 1))); + assertTrue(!ln.hasStringKey(intKey(1234, 1))); + assertTrue(!ln.hasStringKey(intKey(12345, 1))); + assertTrue(!ln.hasStringKey(intKey(123456, 1))); + assertTrue(!ln.hasStringKey(intKey(1234567, 1))); + assertTrue(!ln.hasStringKey(intKey(12345678, 1))); + assertTrue(!ln.hasStringKey(intKey(123456789, 1))); + assertTrue(!ln.hasStringKey(intKey(1234567890, 1))); + } + + private byte[] stringKey(long fileNum) { + + try { + return StringUtils.toUTF8(String.valueOf(fileNum)); + } catch (Exception e) { + fail(e.toString()); + return null; + } + } + + private byte[] intKey(long fileNum, long seqNum) { + + TupleOutput out = new TupleOutput(); + out.writeUnsignedInt(fileNum); + out.writeUnsignedInt(seqNum); + return out.toByteArray(); + } +} diff --git a/test/com/sleepycat/je/cleaner/SR18567Test.java b/test/com/sleepycat/je/cleaner/SR18567Test.java new file mode 100644 index 0000000..96f4fa8 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/SR18567Test.java @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; + +/** + * Test concurrent checkpoint and Database.sync operations. + * + * Prior to the fix for [#18567], checkpoint and Database.sync both acquired a + * shared latch on the parent IN while logging a child IN. During concurrent + * logging of provisional child BINs for same parent IN, problems can occur + * when the utilization info in the parent IN is being updated by both threads. + * + * For example, the following calls could be concurrent in the two threads for + * the same parent IN. + * + * BIN.afterLog (two different BINs) + * |--IN.trackProvisionalObsolete (same parent) + * |-- PackedObsoleteInfo.copyObsoleteInfo (array copy) + * + * Concurrent array copies could corrupt the utilization info, and this could + * result in the exception reported by the OTN user later on, when the parent + * IN is logged and the obsolete info is copied to the UtilizationTracker: + * + * java.lang.IndexOutOfBoundsException + * at com.sleepycat.bind.tuple.TupleInput.readBoolean(TupleInput.java:186) + * at com.sleepycat.je.cleaner.PackedObsoleteInfo.countObsoleteInfo(PackedObsoleteInfo.java:60) + * at com.sleepycat.je.log.LogManager.serialLogInternal(LogManager.java:671) + * at com.sleepycat.je.log.SyncedLogManager.serialLog(SyncedLogManager.java:40) + * at com.sleepycat.je.log.LogManager.multiLog(LogManager.java:388) + * at com.sleepycat.je.recovery.Checkpointer.logSiblings(Checkpointer.java:1285) + * + * Although we have not reproduced this particular exception, the test case + * below, along with assertions in IN.beforeLog and afterLog, demonstrate that + * we do allow concurrent logging of two child BINs for the same parent IN. + * Since both threads are modifying the parent IN, it seems obvious that using + * a shared latch could cause more than one type of problem and should be + * disallowed. + */ +@RunWith(Parameterized.class) +public class SR18567Test extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private Database db; + private JUnitThread junitThread1; + private JUnitThread junitThread2; + + public SR18567Test(boolean multiSubDir) { + envMultiSubDir = multiSubDir; + customName = envMultiSubDir ? "multi-sub-dir" : null ; + } + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + @After + public void tearDown() + throws Exception { + + if (junitThread1 != null) { + junitThread1.shutdown(); + junitThread1 = null; + } + if (junitThread2 != null) { + junitThread2.shutdown(); + junitThread2 = null; + } + super.tearDown(); + db = null; + } + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + env = new Environment(envHome, config); + + openDb(); + } + + /** + * Opens that database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testSR18567() + throws Throwable { + + openEnv(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[0]); + + for (int i = 0; i < 100; i += 1) { + for (int j = 0; j < 1000; j += 1) { + IntegerBinding.intToEntry(j, key); + db.put(null, key, data); + } + junitThread1 = new JUnitThread("Checkpoint") { + @Override + public void testBody() { + env.checkpoint(forceConfig); + } + }; + junitThread2 = new JUnitThread("Database.sync") { + @Override + public void testBody() { + db.sync(); + } + }; + junitThread1.start(); + junitThread2.start(); + junitThread1.finishTest(); + junitThread2.finishTest(); + } + + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/cleaner/TTLCleaningTest.java b/test/com/sleepycat/je/cleaner/TTLCleaningTest.java new file mode 100644 index 0000000..efaa65b --- /dev/null +++ b/test/com/sleepycat/je/cleaner/TTLCleaningTest.java @@ -0,0 +1,870 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Deque; +import java.util.LinkedList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.Put; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.test.TTLTest; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import junit.framework.Assert; + +/** + * Tests purging/cleaning of expired data. + */ +public class TTLCleaningTest extends TestBase { + + private final File envHome; + private Environment env; + private Database db; + private EnvironmentImpl envImpl; + + public TTLCleaningTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + TTL.setTimeTestHook(null); + if (env != null) { + try { + env.close(); + } finally { + env = null; + } + } + } + + private EnvironmentConfig createEnvConfig() { + + final EnvironmentConfig envConfig = new EnvironmentConfig(); + + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + + return envConfig; + } + + private void open() { + open(createEnvConfig()); + } + + private void open(final EnvironmentConfig envConfig) { + + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + db = env.openDatabase(null, "foo", dbConfig); + } + + private void close() { + db.close(); + env.close(); + } + + /** + * Check histogram with all TTLs in days, increasing then decreasing TTLs. + */ + @Test + public void testHistogram1() { + + final int[] ttls = new int[20]; + final TimeUnit units[] = new TimeUnit[20]; + + for (int i = 0; i < 10; i += 1) { + ttls[i] = i * 2; + units[i] = TimeUnit.DAYS; + } + + for (int i = 10; i < 20; i += 1) { + ttls[i] = (20 - i) * 3; + units[i] = TimeUnit.DAYS; + } + + checkHistogram(ttls, units); + } + + /** + * Check histogram with all TTLs in hours, decreasing then increasing TTLs. + */ + @Test + public void testHistogram2() { + + final int[] ttls = new int[20]; + final TimeUnit units[] = new TimeUnit[20]; + + for (int i = 0; i < 10; i += 1) { + ttls[i] = (10 - i) * 3; + units[i] = TimeUnit.HOURS; + } + + for (int i = 10; i < 20; i += 1) { + ttls[i] = i * 2; + units[i] = TimeUnit.HOURS; + } + + checkHistogram(ttls, units); + } + + /** + * Check histogram with a mixture of days and hours. + */ + @Test + public void testHistogram3() { + + final int[] ttls = new int[20]; + final TimeUnit units[] = new TimeUnit[20]; + + for (int i = 0; i < 20; i += 1) { + + final int ttl; + final TimeUnit unit; + if (i % 3 == 0) { + ttl = i * 3; + unit = TimeUnit.HOURS; + } else { + ttl = i; + unit = TimeUnit.DAYS; + } + + ttls[i] = ttl; + units[i] = unit; + } + + checkHistogram(ttls, units); + } + + /** + * Writes some data for each TTL given, in the order of the array, and + * confirms that the histogram is correct. + */ + private void checkHistogram(final int[] ttls, final TimeUnit units[]) { + + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR); + + open(); + + final ExpirationProfile profile = envImpl.getExpirationProfile(); + final FileManager fileManager = envImpl.getFileManager(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* + * Before writing and measuring the change in LSN, we must ensure an + * insertion will not write the first BIN or cause a split. Do a put + * (with no TTL) to write the first BIN, and assert that we won't + * insert more records that will fit in one BIN. + */ + assertTrue(ttls.length < 120); + IntegerBinding.intToEntry(Integer.MAX_VALUE, key); + data.setData(new byte[0]); + db.put(null, key, data, Put.OVERWRITE, null); + + /* + * Write a record for each specified TTL, measuring its size. Keep + * track of the max expiration time and whether any units are in hours. + */ + final WriteOptions options = new WriteOptions().setUpdateTTL(true); + final int[] sizes = new int[ttls.length]; + long maxExpireTime = 0; + boolean anyHours = false; + + for (int i = 0; i < ttls.length; i += 1) { + + options.setTTL(ttls[i], units[i]); + + IntegerBinding.intToEntry(i, key); + data.setData(new byte[20 + i]); // Don't want embedded LNs. + + final Transaction txn = env.beginTransaction(null, null); + + final long offset0 = DbLsn.getFileOffset(fileManager.getNextLsn()); + db.put(txn, key, data, Put.OVERWRITE, options); + final long offset1 = DbLsn.getFileOffset(fileManager.getNextLsn()); + + txn.commit(); + + sizes[i] = (int) (offset1 - offset0); // size of LN alone + + maxExpireTime = Math.max( + maxExpireTime, getExpireTime(ttls[i], units[i])); + + if (units[i] == TimeUnit.HOURS) { + anyHours = true; + } + } + + /* Everything should be in one file. + assertEquals(0, fileManager.getCurrentFileNum()); + + /* Moving to a new file will update the expiration profile. */ + envImpl.forceLogFileFlip(); + + /* + * Use the cleaner to explicitly count expiration, to simulate what + * happens during two pass cleaning. + */ + final FileProcessor processor = envImpl.getCleaner().createProcessor(); + final ExpirationTracker tracker = processor.countExpiration(0); + + /* + * The interval that data expires depends on whether any TTL units are + * in hours. + */ + final long ttlInterval = anyHours ? + TimeUnit.HOURS.toMillis(1) : TimeUnit.DAYS.toMillis(1); + + /* + * Check 5 minute intervals, in time order, starting with the current + * time and going a couple TTL intervals past the last expiration time. + */ + final long maxCheckTime = maxExpireTime + (2 * ttlInterval); + final long startCheckTime = TTL.currentSystemTime(); + final long checkInterval = TimeUnit.MINUTES.toMillis(5); + + for (long checkTime = startCheckTime; + checkTime <= maxCheckTime; + checkTime += checkInterval) { + + /* + * Check that our estimated expiration bytes agrees with the + * expiration profile, and with the expiration tracker. + */ + profile.refresh(checkTime); + final int profileBytes = profile.getExpiredBytes(0 /*file*/); + final int trackerBytes = tracker.getExpiredBytes(checkTime); + + final int expiredBytes = + getExpiredBytes(ttls, units, sizes, checkTime); + + assertEquals(expiredBytes, profileBytes); + assertEquals(expiredBytes, trackerBytes); + + /* + * Check that ExpirationProfile.getExpiredBytes(file, time) returns + * gradually expired values for the current period. + */ + if (expiredBytes == 0) { + continue; + } + + final long intervalStartTime = + checkTime - (checkTime % ttlInterval); + + final long prevIntervalStart = intervalStartTime - ttlInterval; + + final int prevExpiredBytes = (prevIntervalStart < startCheckTime) ? + 0 : tracker.getExpiredBytes(prevIntervalStart); + + final int newlyExpiredBytes = expiredBytes - prevExpiredBytes; + final long newMs = checkTime - intervalStartTime; + + final long expectGradualBytes = prevExpiredBytes + + ((newlyExpiredBytes * newMs) / ttlInterval); + + final int gradualBytes = profile.getExpiredBytes( + 0 /*file*/, checkTime).second(); + + assertEquals(expectGradualBytes, gradualBytes); + } + + close(); + } + + /** + * Returns the first millisecond on which bytes with the given TTL will + * expire. + */ + private long getExpireTime(final int ttl, final TimeUnit unit) { + + if (ttl == 0) { + return 0; + } + + final long interval = unit.toMillis(1); + long time = TTL.currentSystemTime() + (ttl * interval); + + /* Round up to nearest interval. */ + final long remainder = time % interval; + if (remainder != 0) { + time = time - remainder + interval; + } + + /* Check that TTL methods agree. */ + assertEquals( + time, + TTL.expirationToSystemTime( + TTL.ttlToExpiration(ttl, unit), + unit == TimeUnit.HOURS)); + + return time; + } + + /** + * Returns the number of bytes that expire on or after the given system + * time. + */ + private int getExpiredBytes(final int[] ttls, + final TimeUnit units[], + final int[] sizes, + final long sysTime) { + int bytes = 0; + for (int i = 0; i < ttls.length; i += 1) { + final long expireTime = getExpireTime(ttls[i], units[i]); + if (expireTime != 0 && sysTime >= expireTime) { + bytes += sizes[i]; + } + } + return bytes; + } + + /** + * Checks that cleaning occurs when LNs expire. + * + * Also checks that no two-pass or revisal runs occur, because this test + * does not make LNs obsolete, so there is little overlap between expired + * and obsolete data. + */ + @Test + public void testCleanLNs() { + + /* + * Use start time near the end of the hour period. If the beginning of + * the period were used, when two hours passes and all data expires at + * once, only a small portion would be considered expired due to + * gradual expiration. + */ + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR - 100); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + open(envConfig); + + /* + * Write 10 files, using 1024 byte records so there are no embedded + * LNs, and giving all records a TTL. + */ + writeFiles(10, 4, 1024, 1); + + /* Almost no cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + /* Advance time and most files are cleaned. */ + TTLTest.fixedSystemTime += 2 * TTL.MILLIS_PER_HOUR; + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 6); + + /* No overlap, so no two-pass or revisal runs. */ + final EnvironmentStats stats = env.getStats(null); + assertEquals(0, stats.getNCleanerTwoPassRuns()); + assertEquals(0, stats.getNCleanerRevisalRuns()); + assertEquals(nFilesCleaned, stats.getNCleanerRuns()); + + close(); + } + + /** + * Checks that cleaning occurs when BIN slots expire, using embedded LNs to + * remove LN expiration from the picture. + * + * We don't check for revisal or two-pass runs here because they are + * unlikely but difficult to predict. They are unlikely because with + * embedded LNs, normal cleaning, due to BINs made obsolete by checkpoints + * and splits, will often clean expired data as well. + */ + @Test + public void testCleanBINs() { + + /* Use start time near the end of the hour period. See testCleanLNs. */ + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR - 100); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + open(envConfig); + + if (DbInternal.getNonNullEnvImpl(env).getMaxEmbeddedLN() < 16) { + System.out.println( + "testCleanBasicINs not applicable when embedded LNs " + + "are disabled"); + close(); + return; + } + + /* + * Write 20 files, using 16 byte records to create embedded LNs, and + * giving all records a TTL. + */ + writeFiles(20, 4, 16, 1); + + /* Clean obsolete LNs, etc, to start with non-expired data. */ + env.cleanLog(); + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* Almost no further cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + /* Advance time and several files are cleaned. */ + TTLTest.fixedSystemTime += 2 * TTL.MILLIS_PER_HOUR; + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 2); + + final EnvironmentStats stats = env.getStats(null); + assertTrue(stats.getNCleanerTwoPassRuns() <= stats.getNCleanerRuns()); + + close(); + } + + /** + * Checks that two-pass cleaning is used when: + * + expired and obsolete data partially overlap, and + * + max utilization per-file is over the two-pass threshold, but + * + the true utilization of the files is below the threshold. + * + * @see EnvironmentParams#CLEANER_TWO_PASS_GAP + * @see EnvironmentParams#CLEANER_TWO_PASS_THRESHOLD + */ + @Test + public void testTwoPassCleaning() { + + /* Use start time near the end of the hour period. See testCleanLNs. */ + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR - 100); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + open(envConfig); + + /* Write 10 files, giving 1/2 records a TTL. */ + writeFiles(10, 4, 1024, 2); + + /* Almost no cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + /* + * Delete 1/3 records explicitly, causing an expired/obsolete overlap, + * but leaving the per-file max utilization fairly high. + */ + deleteRecords(3); + + /* Advance time and most files are cleaned. */ + TTLTest.fixedSystemTime += 2 * TTL.MILLIS_PER_HOUR; + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 6); + + /* All runs are two-pass runs. */ + final EnvironmentStats stats = env.getStats(null); + Assert.assertTrue(stats.getNCleanerTwoPassRuns() >= 6); + assertEquals(stats.getNCleanerRuns(), stats.getNCleanerTwoPassRuns()); + assertEquals(0, stats.getNCleanerRevisalRuns()); + assertTrue(stats.getNCleanerTwoPassRuns() <= stats.getNCleanerRuns()); + + close(); + } + + /** + * Checks that a revisal run (not true cleaning) is used when: + * + expired and obsolete data mostly overlap, and + * + max utilization per-file is over the two-pass threshold, and + * + the true utilization of the files is also above the threshold. + * + * @see EnvironmentParams#CLEANER_TWO_PASS_GAP + * @see EnvironmentParams#CLEANER_TWO_PASS_THRESHOLD + */ + @Test + public void testRevisalCleaning() { + + /* Use start time near the end of the hour period. See testCleanLNs. */ + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR - 100); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + open(envConfig); + + /* Write 10 files, giving 1/2 records a TTL. */ + writeFiles(10, 4, 1024, 2); + + /* Almost no cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + /* + * Delete 1/2 records explicitly, causing an expired/obsolete overlap, + * but leaving the per-file max utilization fairly high. + */ + deleteRecords(2); + + /* Advance time and most files are cleaned. */ + TTLTest.fixedSystemTime += 2 * TTL.MILLIS_PER_HOUR; + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 6); + + /* Most runs are revisal runs. */ + EnvironmentStats stats = env.getStats(null); + assertTrue(stats.getNCleanerRevisalRuns() >= 6); + assertTrue(stats.getNCleanerTwoPassRuns() <= stats.getNCleanerRuns()); + + /* + * Now check that the same files can be cleaned again, if they become + * eligible. + */ + env.getStats(new StatsConfig().setClear(true)); + deleteRecords(1); + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 6); + stats = env.getStats(null); + assertTrue(stats.getNCleanerRuns() >= 6); + assertEquals(0, stats.getNCleanerRevisalRuns()); + + close(); + } + + /** + * Checks that cleaning is gradual rather than spiking on expiration time + * boundaries. Hours is used here but the same test applies to days and the + * histogram tests ensure that data expires gradually for days and hours. + * + * Cleaning will not itself occur gradually in this test, + * unfortunately, because the data in all files expires at the same + * time, so once utilization drops below 50 all files are cleaned. + * This is unlike the real world, where data will expire at different + * times. Therefore, we test gradual cleaning by looking at the + * predictedMinUtilization, which is used to drive cleaning. This should + * decrease gradually. + */ + @Test + public void testGradualExpiration() { + + /* + * Use start time on an hour boundary because we want to test gradual + * expiration from the start of the time period to the end. + */ + final long startTime = TTL.MILLIS_PER_DAY; + TTLTest.setFixedTimeHook(startTime); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + open(envConfig); + + /* Write 10 files, giving all records a TTL. */ + writeFiles(10, 4, 1024, 1); + + /* Almost no cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + final UtilizationCalculator calculator = + envImpl.getCleaner().getUtilizationCalculator(); + + /* + * Collect predictedMinUtilization values as we bump the time by one + * minute and attempt cleaning. + */ + final long checkStartTime = startTime + TimeUnit.HOURS.toMillis(1); + final long checkEndTime = checkStartTime + TimeUnit.HOURS.toMillis(1); + final long checkInterval = TimeUnit.MINUTES.toMillis(1); + final Deque predictedUtils = new LinkedList<>(); + + for (long time = checkStartTime; + time <= checkEndTime; + time += checkInterval) { + + TTLTest.fixedSystemTime = time; + env.cleanLog(); + + predictedUtils.add(calculator.getPredictedMinUtilization()); + } + + /* + * Expect a wide spread of predictedMinUtilization values that are + * decreasing and fairly evenly spaced. + */ + final int firstUtil = predictedUtils.getFirst(); + final int lastUtil = predictedUtils.getLast(); + + assertTrue( + "firstUtil=" + firstUtil + " lastUtil=" + lastUtil, + firstUtil > 90 && lastUtil < 10); + + int prevUtil = predictedUtils.removeFirst(); + + for (final int util : predictedUtils) { + + final int decrease = prevUtil - util; + + assertTrue( + "util=" + util + " prevUtil=" + prevUtil, + decrease >= 0 && decrease < 5); + + prevUtil = util; + } + + close(); + } + + @Test + public void testExpirationDisabled() { + + /* Use start time near the end of the hour period. See testCleanLNs. */ + TTLTest.setFixedTimeHook(TTL.MILLIS_PER_HOUR - 100); + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(10 * 1024 * 1024)); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_EXPIRATION_ENABLED, "false"); + + open(envConfig); + + /* Write 10 files, giving all records a TTL. */ + writeFiles(10, 4, 1024, 1); + final long nRecords = db.count(); + + /* Almost no cleaning occurs before data expires. */ + int nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned <= 1); + + /* + * Advance time, but no files are cleaned because expiration is + * disabled, and all data is still readable. + */ + TTLTest.fixedSystemTime += 2 * TTL.MILLIS_PER_HOUR; + nFilesCleaned = env.cleanLog(); + assertEquals(0, nFilesCleaned); + + long count = 0; + + try (final Cursor cursor = db.openCursor(null, null)) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + while (cursor.get(key, data, Get.NEXT, null) != null) { + count += 1; + } + } + + assertEquals(nRecords, count); + + /* Enable expiration and expect cleaning and filtering of all data. */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_EXPIRATION_ENABLED, "true"); + + env.setMutableConfig(envConfig); + + nFilesCleaned = env.cleanLog(); + assertTrue(String.valueOf(nFilesCleaned), nFilesCleaned >= 6); + + try (final Cursor cursor = db.openCursor(null, null)) { + if (cursor.get(null, null, Get.NEXT, null) != null) { + fail(); + } + } + + assertEquals(0, db.count()); + + close(); + } + + @Test + public void testProfileRecovery() { + + final EnvironmentConfig envConfig = createEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(1000000)); + + open(envConfig); + + /* Write 10 files, giving all records a TTL. */ + writeFiles(10, 4, 1024, 1); + + close(); + + final AtomicBoolean phaseSeen = new AtomicBoolean(false); + final AtomicLong numSeen = new AtomicLong(0); + + envConfig.setRecoveryProgressListener( + new ProgressListener() { + @Override + public boolean progress( + RecoveryProgress phase, + long n, + long total) { + + if (phase == + RecoveryProgress.POPULATE_EXPIRATION_PROFILE) { + + phaseSeen.set(true); + numSeen.incrementAndGet(); + } + + return true; + } + } + ); + + open(envConfig); + + assertTrue(phaseSeen.get()); + assertEquals(1, numSeen.get()); + + close(); + } + + /** + * Inserts records with the given size until the given number of files are + * added. Every expireNth record is assigned a 1 hour TTL. + */ + private void writeFiles(final int nFiles, + final int keySize, + final int dataSize, + final int expireNth) { + + assert keySize >= 4; + + final byte[] keyBytes = new byte[keySize]; + final TupleOutput keyOut = new TupleOutput(keyBytes); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + final WriteOptions options = new WriteOptions(); + + final FileManager fileManager = envImpl.getFileManager(); + final long prevLastFile = fileManager.getCurrentFileNum(); + + for (int i = 0;; i += 1) { + + if (fileManager.getCurrentFileNum() - prevLastFile >= nFiles) { + break; + } + + keyOut.reset(); + keyOut.writeInt(i); + key.setData(keyBytes); + + options.setTTL( + (i % expireNth == 0) ? 1 : 0, + TimeUnit.HOURS); + + db.put(null, key, data, Put.OVERWRITE, options); + } + } + + /** + * Deletes every deleteNth record. + */ + private void deleteRecords(final int deleteNth) { + + final Transaction txn = env.beginTransaction(null, null); + + try (final Cursor cursor = db.openCursor(txn, null)) { + + int counter = 0; + + while (cursor.get(null, null, Get.NEXT, null) != null) { + if (counter % deleteNth == 0) { + cursor.delete(null); + } + counter += 1; + } + } + + txn.commit(); + } +} diff --git a/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java b/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java new file mode 100644 index 0000000..0710eb1 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/TruncateAndRemoveTest.java @@ -0,0 +1,1455 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.DumpFileReader; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.TestHook; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Test cleaning and utilization counting for database truncate and remove. + */ +@RunWith(Parameterized.class) +public class TruncateAndRemoveTest extends CleanerTestBase { + + private static final String DB_NAME1 = "foo"; + private static final String DB_NAME2 = "bar"; + private static final int RECORD_COUNT = 100; + + private static final CheckpointConfig FORCE_CHECKPOINT = + new CheckpointConfig(); + static { + FORCE_CHECKPOINT.setForce(true); + } + + private static final boolean DEBUG = false; + + private EnvironmentImpl envImpl; + private Database db; + private DatabaseImpl dbImpl; + private JUnitThread junitThread; + private boolean fetchObsoleteSize; + private boolean truncateOrRemoveDone; + private boolean dbEviction; + + private boolean embeddedLNs = false; + + @Parameters + public static List genParams() { + + return getEnv(new boolean[] {false, true}); + } + + public TruncateAndRemoveTest (boolean envMultiDir) { + envMultiSubDir = envMultiDir; + customName = (envMultiSubDir) ? "multi-sub-dir" : null; + } + + @After + public void tearDown() + throws Exception { + + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + super.tearDown(); + db = null; + dbImpl = null; + envImpl = null; + } + + /** + * Opens the environment. + */ + private void openEnv(boolean transactional) + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setTransactional(transactional); + config.setAllowCreate(true); + /* Do not run the daemons since they interfere with LN counting. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + config.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + /* Use small nodes to test the post-txn scanning. */ + config.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "10"); + config.setConfigParam + (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "10"); + if (envMultiSubDir) { + config.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + } + + /* Use small files to ensure that there is cleaning. */ + config.setConfigParam("je.cleaner.minUtilization", "80"); + DbInternal.disableParameterValidation(config); + config.setConfigParam("je.log.fileMax", "4000"); + /* With tiny files we can't log expiration profile records. */ + DbInternal.setCreateEP(config, false); + + /* Obsolete LN size counting is optional per test. */ + if (fetchObsoleteSize) { + config.setConfigParam + (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(), + "true"); + } + + env = new Environment(envHome, config); + envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + config = env.getConfig(); + dbEviction = config.getConfigParam + (EnvironmentParams.ENV_DB_EVICTION.getName()).equals("true"); + } + + /** + * Opens that database. + */ + private void openDb(Transaction useTxn, String dbName) + throws DatabaseException { + + openDb(useTxn, dbName, null /*cacheMode*/); + } + + private void openDb(Transaction useTxn, String dbName, CacheMode cacheMode) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + EnvironmentConfig envConfig = env.getConfig(); + dbConfig.setTransactional(envConfig.getTransactional()); + dbConfig.setAllowCreate(true); + dbConfig.setCacheMode(cacheMode); + db = env.openDatabase(useTxn, dbName, dbConfig); + dbImpl = DbInternal.getDbImpl(db); + } + + /** + * Closes the database. + */ + private void closeDb() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + dbImpl = null; + } + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + closeDb(); + + if (env != null) { + env.close(); + env = null; + envImpl = null; + } + } + + @Test + public void testTruncate() + throws Exception { + + doTestTruncate(false /*simulateCrash*/); + } + + @Test + public void testTruncateRecover() + throws Exception { + + doTestTruncate(true /*simulateCrash*/); + } + + /** + * Test that truncate generates the right number of obsolete LNs. + */ + private void doTestTruncate(boolean simulateCrash) + throws Exception { + + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + DatabaseImpl saveDb = dbImpl; + DatabaseId saveId = dbImpl.getId(); + closeDb(); + + Transaction txn = env.beginTransaction(null, null); + + truncate(txn, true); + + ObsoleteCounts beforeCommit = getObsoleteCounts(); + + /* + * The commit of the truncation creates 3 additional obsolete logrecs: + * prev MapLN + deleted MapLN + prev NameLN + */ + txn.commit(); + truncateOrRemoveDone = true; + + /* Make sure use count is decremented when we commit. */ + assertDbInUse(saveDb, false); + openDb(null, DB_NAME1); + saveDb = dbImpl; + closeDb(); + assertDbInUse(saveDb, false); + + if (simulateCrash) { + envImpl.abnormalClose(); + envImpl = null; + env = null; + openEnv(true); + /* After recovery, expect that the record LNs are obsolete. */ + ObsoleteCounts afterCrash = getObsoleteCounts(); + + int obsolete = afterCrash.obsoleteLNs - beforeCommit.obsoleteLNs; + + if (embeddedLNs) { + assertEquals(3, obsolete); + } else { + assertTrue("obsolete=" + obsolete + " expected=" + RECORD_COUNT, + obsolete >= RECORD_COUNT); + } + } else { + int expectedObsLNs = (embeddedLNs ? 3 : RECORD_COUNT + 3); + verifyUtilization(beforeCommit, + expectedObsLNs, + 15); // 1 root, 2 INs, 12 BINs + } + + closeEnv(); + batchCleanAndVerify(saveId); + } + + /** + * Test that aborting truncate generates the right number of obsolete LNs. + */ + @Test + public void testTruncateAbort() + throws Exception { + + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + DatabaseImpl saveDb = dbImpl; + closeDb(); + + Transaction txn = env.beginTransaction(null, null); + truncate(txn, true); + ObsoleteCounts beforeAbort = getObsoleteCounts(); + txn.abort(); + + /* Make sure use count is decremented when we abort. */ + assertDbInUse(saveDb, false); + openDb(null, DB_NAME1); + saveDb = dbImpl; + closeDb(); + assertDbInUse(saveDb, false); + + /* + * The obsolete count should include the records inserted after + * the truncate. + */ + verifyUtilization(beforeAbort, + /* 1 new nameLN, 2 copies of MapLN for new db */ + 3, + 0); + + /* Reopen, db should be populated. */ + openDb(null, DB_NAME1); + assertEquals(RECORD_COUNT, countRecords(null)); + closeEnv(); + } + + /** + * Test that aborting truncate generates the right number of obsolete LNs. + */ + @Test + public void testTruncateRepopulateAbort() + throws Exception { + + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + + closeDb(); + + Transaction txn = env.beginTransaction(null, null); + + /* + * Truncation creates 2 additional logrecs: new MapLN and nameLN logrecs + * for the new db. + */ + truncate(txn, true); + + /* populate the database with some more records. */ + openDb(txn, DB_NAME1); + + writeAndCountRecords(txn, RECORD_COUNT/4); + + DatabaseImpl saveDb = dbImpl; + DatabaseId saveId = dbImpl.getId(); + closeDb(); + + ObsoleteCounts beforeAbort = getObsoleteCounts(); + + /* + * The abort generates one more MapLN logrec: for the deletion of the + * new DB. This logrec, together with the 2 logrecs generated by the + * truncate call above are additional obsolete logecs, not counted in + * beforeAbort. + */ + txn.abort(); + + /* + * We set truncateOrRemoveDone to true (meaning that per-DB utilization + * will not be verified) even though the txn was aborted because the + * discarded new DatabaseImpl will not be counted yet includes INs and + * LNs from the operations above. + */ + truncateOrRemoveDone = true; + + /* Make sure use count is decremented when we abort. */ + assertDbInUse(saveDb, false); + openDb(null, DB_NAME1); + saveDb = dbImpl; + closeDb(); + assertDbInUse(saveDb, false); + + /* + * The obsolete count should include the records inserted after + * the truncate. + */ + int expectedObsLNs = (embeddedLNs ? 3 : RECORD_COUNT/4 + 3); + + verifyUtilization(beforeAbort, expectedObsLNs, 5); + + /* Reopen, db should be populated. */ + openDb(null, DB_NAME1); + assertEquals(RECORD_COUNT, countRecords(null)); + + closeEnv(); + batchCleanAndVerify(saveId); + } + + @Test + public void testRemove() + throws Exception { + + doTestRemove(false /*simulateCrash*/); + } + + @Test + public void testRemoveRecover() + throws Exception { + + doTestRemove(true /*simulateCrash*/); + } + + /** + * Test that remove generates the right number of obsolete LNs. + */ + private void doTestRemove(boolean simulateCrash) + throws Exception { + + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + + DatabaseImpl saveDb = dbImpl; + DatabaseId saveId = dbImpl.getId(); + closeDb(); + + Transaction txn = env.beginTransaction(null, null); + + env.removeDatabase(txn, DB_NAME1); + + ObsoleteCounts beforeCommit = getObsoleteCounts(); + + txn.commit(); + truncateOrRemoveDone = true; + + /* Make sure use count is decremented when we commit. */ + assertDbInUse(saveDb, false); + + if (simulateCrash) { + envImpl.abnormalClose(); + envImpl = null; + env = null; + openEnv(true); + + /* After recovery, expect that the record LNs are obsolete. */ + ObsoleteCounts afterCrash = getObsoleteCounts(); + + int obsolete = afterCrash.obsoleteLNs - beforeCommit.obsoleteLNs; + + if (embeddedLNs) { + assertEquals(3, obsolete); + } else { + assertTrue("obsolete=" + obsolete + + " expected=" + RECORD_COUNT, + obsolete >= RECORD_COUNT); + } + } else { + + /* LNs + old NameLN, old MapLN, delete MapLN */ + int expectedObsLNs = (embeddedLNs ? 3 : RECORD_COUNT + 3); + + verifyUtilization(beforeCommit, expectedObsLNs, 15); + } + + openDb(null, DB_NAME1); + assertEquals(0, countRecords(null)); + + closeEnv(); + batchCleanAndVerify(saveId); + } + + /** + * Test that remove generates the right number of obsolete LNs. + */ + @Test + public void testNonTxnalRemove() + throws Exception { + + openEnv(false); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + DatabaseImpl saveDb = dbImpl; + DatabaseId saveId = dbImpl.getId(); + closeDb(); + ObsoleteCounts beforeOperation = getObsoleteCounts(); + env.removeDatabase(null, DB_NAME1); + truncateOrRemoveDone = true; + + /* Make sure use count is decremented. */ + assertDbInUse(saveDb, false); + + /* LNs + new NameLN, old NameLN, old MapLN, delete MapLN */ + int expectedObsLNs = (embeddedLNs ? 4 : RECORD_COUNT + 4); + + verifyUtilization(beforeOperation, expectedObsLNs, 15); + + openDb(null, DB_NAME1); + assertEquals(0, countRecords(null)); + + closeEnv(); + batchCleanAndVerify(saveId); + } + + /** + * Test that aborting remove generates the right number of obsolete LNs. + */ + @Test + public void testRemoveAbort() + throws Exception { + + /* Create database, populate, remove, abort the remove. */ + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + DatabaseImpl saveDb = dbImpl; + closeDb(); + Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, DB_NAME1); + ObsoleteCounts beforeAbort = getObsoleteCounts(); + txn.abort(); + + /* Make sure use count is decremented when we abort. */ + assertDbInUse(saveDb, false); + + verifyUtilization(beforeAbort, 0, 0); + + /* All records should be there. */ + openDb(null, DB_NAME1); + assertEquals(RECORD_COUNT, countRecords(null)); + + closeEnv(); + + /* + * Batch clean and then check the record count again, just to make sure + * we don't lose any valid data. + */ + openEnv(true); + while (env.cleanLog() > 0) { + } + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + env.checkpoint(force); + closeEnv(); + + openEnv(true); + openDb(null, DB_NAME1); + assertEquals(RECORD_COUNT, countRecords(null)); + closeEnv(); + } + + /** + * The same as testRemoveNotResident but forces fetching of obsolets LNs + * in order to count their sizes accurately. + */ + @Test + public void testRemoveNotResidentFetchObsoleteSize() + throws Exception { + + fetchObsoleteSize = true; + testRemoveNotResident(); + } + + /** + * Test that we can properly account for a non-resident database. + */ + @Test + public void testRemoveNotResident() + throws Exception { + + /* Create a database, populate. */ + openEnv(true); + + /* Use EVICT_LN so that updates do not count obsolete size. */ + openDb(null, DB_NAME1, CacheMode.EVICT_LN); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + /* Updates will not count obsolete size. */ + writeAndCountRecords(null, RECORD_COUNT); + DatabaseId saveId = DbInternal.getDbImpl(db).getId(); + closeEnv(); + + /* + * Open the environment and remove the database. The + * database is not resident at all. + */ + openEnv(true); + Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, DB_NAME1); + ObsoleteCounts beforeCommit = getObsoleteCounts(); + txn.commit(); + truncateOrRemoveDone = true; + + /* LNs + old NameLN, old MapLN, delete MapLN */ + int expectedObsLNs = (embeddedLNs ? 3 : RECORD_COUNT + 3); + + verifyUtilization(beforeCommit, + expectedObsLNs, + /* + * 15 INs for data tree, plus 2 for FileSummaryDB + * split during tree walk. + */ + DatabaseImpl.forceTreeWalkForTruncateAndRemove ? + 17 : 15, + /* Records re-written + deleted + aborted LN. */ + RECORD_COUNT + 2, + /* Records write twice. */ + RECORD_COUNT * 2, + true /*expectAccurateObsoleteLNCount*/); + + /* check record count. */ + openDb(null, DB_NAME1); + assertEquals(0, countRecords(null)); + + closeEnv(); + batchCleanAndVerify(saveId); + } + + /** + * The same as testRemovePartialResident but forces fetching of obsolets + * LNs in order to count their sizes accurately. + */ + @Test + public void testRemovePartialResidentFetchObsoleteSize() + throws Exception { + + fetchObsoleteSize = true; + testRemovePartialResident(); + } + + /** + * Test that we can properly account for partially resident tree. + */ + @Test + public void testRemovePartialResident() + throws Exception { + + /* Create a database, populate. */ + openEnv(true); + /* Use EVICT_LN so that updates do not count obsolete size. */ + openDb(null, DB_NAME1, CacheMode.EVICT_LN); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndCountRecords(null, RECORD_COUNT); + /* Updates will not count obsolete size. */ + writeAndCountRecords(null, RECORD_COUNT); + DatabaseId saveId = DbInternal.getDbImpl(db).getId(); + closeEnv(); + + /* + * Open the environment and remove the database. Pull 1 BIN in. + */ + openEnv(true); + openDb(null, DB_NAME1); + Cursor c = db.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + c.getFirst(new DatabaseEntry(), new DatabaseEntry(), + LockMode.DEFAULT)); + c.close(); + DatabaseImpl saveDb = dbImpl; + closeDb(); + + Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, DB_NAME1); + ObsoleteCounts beforeCommit = getObsoleteCounts(); + txn.commit(); + truncateOrRemoveDone = true; + + /* Make sure use count is decremented when we commit. */ + assertDbInUse(saveDb, false); + + /* LNs + old NameLN, old MapLN, delete MapLN */ + int expectedObsLNs = (embeddedLNs ? 3 : RECORD_COUNT + 3); + + verifyUtilization(beforeCommit, + expectedObsLNs, + /* + * 15 INs for data tree, plus 2 for FileSummaryDB + * split during tree walk. + */ + DatabaseImpl.forceTreeWalkForTruncateAndRemove ? + 17 : 15, + /* Records re-written + deleted + aborted LN. */ + RECORD_COUNT + 2, + /* Records write twice. */ + RECORD_COUNT * 2, + true /*expectAccurateObsoleteLNCount*/); + + /* check record count. */ + openDb(null, DB_NAME1); + assertEquals(0, countRecords(null)); + + closeEnv(); + batchCleanAndVerify(saveId); + } + + /** + * Tests that a log file is not deleted by the cleaner when it contains + * entries in a database that is pending deletion. + */ + @Test + public void testDBPendingDeletion() + throws DatabaseException, InterruptedException { + + doDBPendingTest(RECORD_COUNT + 30, false /*deleteAll*/, 5); + } + + /** + * Like testDBPendingDeletion but creates a scenario where only a single + * log file is cleaned, and that log file contains only known obsolete + * log entries. This reproduced a bug where we neglected to add pending + * deleted DBs to the cleaner's pending DB set if all entries in the log + * file were known obsoleted. [#13333] + */ + @Test + public void testObsoleteLogFile() + throws DatabaseException, InterruptedException { + + doDBPendingTest(70, true /*deleteAll*/, 1); + } + + private void doDBPendingTest(int recordCount, + boolean deleteAll, + int expectFilesCleaned) + throws DatabaseException, InterruptedException { + + /* Create a database, populate, close. */ + Set logFiles = new HashSet(); + openEnv(true); + openDb(null, DB_NAME1); + + if (embeddedLNs && DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + closeEnv(); + return; + } + + writeAndMakeWaste(recordCount, logFiles, deleteAll); + int remainingRecordCount = deleteAll ? 0 : recordCount; + env.checkpoint(FORCE_CHECKPOINT); + ObsoleteCounts obsoleteCounts = getObsoleteCounts(); + DatabaseImpl saveDb = dbImpl; + closeDb(); + assertTrue(!saveDb.isDeleteFinished()); + assertTrue(!saveDb.isDeleted()); + assertDbInUse(saveDb, false); + + /* Make sure that we wrote a full file's worth of LNs. */ + assertTrue(logFiles.size() >= 2); + assertTrue(logFilesExist(logFiles)); + + /* Remove the database but do not commit yet. */ + final Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, DB_NAME1); + + /* + * The obsolete count should be <= 1 (for the NameLN). + * + * The reason for passing false for expectAccurateObsoleteLNCount is + * that the NameLN deletion is not committed. It is not yet counted + * obsolete by live utilization counting, but will be counted obsolete + * by the utilization recalculation utility, which assumes that + * transactions will commit. [#22208] + */ + obsoleteCounts = verifyUtilization + (obsoleteCounts, 1, 0, 0, 0, + false /*expectAccurateObsoleteLNCount*/); + truncateOrRemoveDone = true; + + junitThread = new JUnitThread("Committer") { + @Override + public void testBody() { + try { + txn.commit(); + } catch (Throwable e) { + e.printStackTrace(); + } + } + }; + + /* + * Set a hook to cause the commit to block. The commit is done in a + * separate thread. The commit will set the DB state to pendingDeleted + * and will then wait for the hook to return. + */ + final Object lock = new Object(); + + saveDb.setPendingDeletedHook(new TestHook() { + public void doHook() { + synchronized (lock) { + try { + lock.notify(); + lock.wait(); + } catch (InterruptedException e) { + e.printStackTrace(); + throw new RuntimeException(e.toString()); + } + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + + /* Start the committer thread; expect the pending deleted state. */ + synchronized (lock) { + junitThread.start(); + lock.wait(); + } + assertTrue(!saveDb.isDeleteFinished()); + assertTrue(saveDb.isDeleted()); + assertDbInUse(saveDb, true); + + /* Expect obsolete LNs: NameLN */ + obsoleteCounts = verifyUtilization(obsoleteCounts, 1, 0); + + /* The DB deletion is pending; the log file should still exist. */ + int filesCleaned = env.cleanLog(); + assertEquals(expectFilesCleaned, filesCleaned); + assertTrue(filesCleaned > 0); + env.checkpoint(FORCE_CHECKPOINT); + env.checkpoint(FORCE_CHECKPOINT); + assertTrue(logFilesExist(logFiles)); + + /* + * When the committer thread finishes, the DB deletion will be + * complete and the DB state will change to deleted. + */ + synchronized (lock) { + lock.notify(); + } + try { + junitThread.finishTest(); + junitThread = null; + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + assertTrue(saveDb.isDeleteFinished()); + assertTrue(saveDb.isDeleted()); + assertDbInUse(saveDb, false); + + /* Expect obsolete LNs: recordCount + MapLN + FSLNs (apprx). */ + int expectedObsLNs = (embeddedLNs ? 0 : remainingRecordCount) + 8; + verifyUtilization(obsoleteCounts, expectedObsLNs, 0); + + /* The DB deletion is complete; the log file should be deleted. */ + env.checkpoint(FORCE_CHECKPOINT); + env.checkpoint(FORCE_CHECKPOINT); + assertTrue(!logFilesExist(logFiles)); + } + + /* + * The xxxForceTreeWalk tests set the DatabaseImpl + * forceTreeWalkForTruncateAndRemove field to true, which will force a walk + * of the tree to count utilization during truncate/remove, rather than + * using the per-database info. This is used to test the "old technique" + * for counting utilization, which is now used only if the database was + * created prior to log version 6. + */ + @Test + public void testTruncateForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testTruncate(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testTruncateAbortForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testTruncateAbort(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testTruncateRepopulateAbortForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testTruncateRepopulateAbort(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemove(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testNonTxnalRemoveForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testNonTxnalRemove(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveAbortForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemoveAbort(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveNotResidentForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemoveNotResident(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemovePartialResidentForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemovePartialResident(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testDBPendingDeletionForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testDBPendingDeletion(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testObsoleteLogFileForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testObsoleteLogFile(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + /** + * Tickles a bug that caused NPE during recovery during the sequence: + * delete record, trucate DB, crash (close without checkpoint), and + * recover. [#16515] + */ + @Test + public void testDeleteTruncateRecover() + throws DatabaseException { + + /* Delete a record. */ + openEnv(true); + openDb(null, DB_NAME1); + writeAndCountRecords(null, 1); + closeDb(); + + /* Truncate DB. */ + Transaction txn = env.beginTransaction(null, null); + truncate(txn, false); + txn.commit(); + + /* Close without checkpoint. */ + envImpl.close(false /*doCheckpoint*/); + envImpl = null; + env = null; + + /* Recover -- the bug cause NPE here. */ + openEnv(true); + closeEnv(); + } + + private void writeAndCountRecords(Transaction txn, long count) + throws DatabaseException { + + for (int i = 1; i <= count; i += 1) { + DatabaseEntry entry = new DatabaseEntry(TestUtils.getTestArray(i)); + + db.put(txn, entry, entry); + } + + /* Insert and delete some records, insert and abort some records. */ + DatabaseEntry entry = + new DatabaseEntry(TestUtils.getTestArray((int)count+1)); + db.put(txn, entry, entry); + db.delete(txn, entry); + + EnvironmentConfig envConfig = env.getConfig(); + if (envConfig.getTransactional()) { + entry = new DatabaseEntry(TestUtils.getTestArray(0)); + Transaction txn2 = env.beginTransaction(null, null); + db.put(txn2, entry, entry); + txn2.abort(); + txn2 = null; + } + + assertEquals(count, countRecords(txn)); + } + + /** + * Writes the specified number of records to db. Check the number of + * records, and return the number of obsolete records. Returns a set of + * the file numbers that are written to. + * + * Makes waste (obsolete records): If doDelete=true, deletes records as + * they are added; otherwise does updates to produce obsolete records + * interleaved with non-obsolete records. + */ + private void writeAndMakeWaste(long count, + Set logFilesWritten, + boolean doDelete) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + for (int i = 0; i < count; i += 1) { + DatabaseEntry entry = new DatabaseEntry(TestUtils.getTestArray(i)); + cursor.put(entry, entry); + /* Add log file written. */ + long file = CleanerTestUtils.getLogFile(cursor); + logFilesWritten.add(new Long(file)); + /* Make waste. */ + if (!doDelete) { + cursor.put(entry, entry); + cursor.put(entry, entry); + } + } + if (doDelete) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + for (status = cursor.getFirst(key, data, null); + status == OperationStatus.SUCCESS; + status = cursor.getNext(key, data, null)) { + /* Make waste. */ + cursor.delete(); + /* Add log file written. */ + long file = CleanerTestUtils.getLogFile(cursor); + logFilesWritten.add(new Long(file)); + } + } + cursor.close(); + txn.commit(); + assertEquals(doDelete ? 0 : count, countRecords(null)); + } + + /* Truncate database and check the count. */ + private void truncate(Transaction useTxn, boolean getCount) + throws DatabaseException { + + long nTruncated = env.truncateDatabase(useTxn, DB_NAME1, getCount); + + if (getCount) { + assertEquals(RECORD_COUNT, nTruncated); + } + + assertEquals(0, countRecords(useTxn)); + } + + /** + * Returns how many records are in the database. + */ + private int countRecords(Transaction useTxn) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean opened = false; + if (db == null) { + openDb(useTxn, DB_NAME1); + opened = true; + } + Cursor cursor = db.openCursor(useTxn, null); + int count = 0; + try { + OperationStatus status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + count += 1; + status = cursor.getNext(key, data, null); + } + } finally { + cursor.close(); + } + if (opened) { + closeDb(); + } + return count; + } + + /** + * Return the total number of obsolete node counts according to the + * UtilizationProfile and UtilizationTracker. + */ + private ObsoleteCounts getObsoleteCounts() { + FileSummary[] files = envImpl.getUtilizationProfile() + .getFileSummaryMap(true) + .values().toArray(new FileSummary[0]); + int lnCount = 0; + int inCount = 0; + int lnSize = 0; + int lnSizeCounted = 0; + for (int i = 0; i < files.length; i += 1) { + lnCount += files[i].obsoleteLNCount; + inCount += files[i].obsoleteINCount; + lnSize += files[i].obsoleteLNSize; + lnSizeCounted += files[i].obsoleteLNSizeCounted; + } + + return new ObsoleteCounts(lnCount, inCount, lnSize, lnSizeCounted); + } + + private class ObsoleteCounts { + int obsoleteLNs; + int obsoleteINs; + int obsoleteLNSize; + int obsoleteLNSizeCounted; + + ObsoleteCounts(int obsoleteLNs, + int obsoleteINs, + int obsoleteLNSize, + int obsoleteLNSizeCounted) { + this.obsoleteLNs = obsoleteLNs; + this.obsoleteINs = obsoleteINs; + this.obsoleteLNSize = obsoleteLNSize; + this.obsoleteLNSizeCounted = obsoleteLNSizeCounted; + } + + @Override + public String toString() { + return "lns=" + obsoleteLNs + " ins=" + obsoleteINs + + " lnSize=" + obsoleteLNSize + + " lnSizeCounted=" + obsoleteLNSizeCounted; + } + } + + private ObsoleteCounts verifyUtilization(ObsoleteCounts prev, + int expectedLNs, + int expectedINs) + throws DatabaseException { + + return verifyUtilization(prev, expectedLNs, expectedINs, 0, 0, + true /*expectAccurateObsoleteLNCount*/); + } + + /* + * Check obsolete counts. If the expected IN count is zero, don't + * check the obsolete IN count. Always check the obsolete LN count. + */ + private ObsoleteCounts verifyUtilization(ObsoleteCounts prev, + int expectedLNs, + int expectedINs, + int expectLNsSizeNotCounted, + int minTotalLNsObsolete, + boolean expectAccurateObsoleteLNCount) + throws DatabaseException { + + /* + * If we are not forcing a tree walk or we have explicitly configured + * fetchObsoleteSize, then the size of every LN should have been + * counted. + */ + boolean expectAccurateObsoleteLNSize = + !DatabaseImpl.forceTreeWalkForTruncateAndRemove || + fetchObsoleteSize; + + /* + * Unless we are forcing the tree walk and not not fetching to get + * obsolete size, the obsolete size is always counted. + */ + if (fetchObsoleteSize || + !DatabaseImpl.forceTreeWalkForTruncateAndRemove) { + expectLNsSizeNotCounted = 0; + } + + ObsoleteCounts now = getObsoleteCounts(); + String beforeAndAfter = "before: " + prev + " now: " + now; + + final int newObsolete = now.obsoleteLNs - prev.obsoleteLNs; + + assertEquals(beforeAndAfter, expectedLNs, newObsolete); + + if (expectAccurateObsoleteLNSize) { + assertEquals(beforeAndAfter, + newObsolete + expectLNsSizeNotCounted, + now.obsoleteLNSizeCounted - + prev.obsoleteLNSizeCounted); + final int expectMinSize = minTotalLNsObsolete * 6 /*average*/; + assertTrue("expect min = " + expectMinSize + + " total size = " + now.obsoleteLNSize, + now.obsoleteLNSize > expectMinSize); + } + + if (expectedINs > 0) { + assertEquals(beforeAndAfter, expectedINs, + now.obsoleteINs - prev.obsoleteINs); + } + + /* + * We pass expectAccurateDbUtilization as false when + * truncateOrRemoveDone, because the database utilization info for that + * database is now gone. + */ + VerifyUtils.verifyUtilization + (envImpl, + expectAccurateObsoleteLNCount, + expectAccurateObsoleteLNSize, + !truncateOrRemoveDone); // expectAccurateDbUtilization + + return now; + } + + /** + * Checks whether a given DB has a non-zero use count. Does nothing if + * je.dbEviction is not enabled, since reference counts are only maintained + * if that config parameter is enabled. + */ + private void assertDbInUse(DatabaseImpl db, boolean inUse) { + if (dbEviction) { + assertEquals(inUse, db.isInUse()); + } + } + + /** + * Returns true if all files exist, or false if any file is deleted. + */ + private boolean logFilesExist(Set fileNumbers) { + + Iterator iter = fileNumbers.iterator(); + while (iter.hasNext()) { + long fileNum = ((Long) iter.next()).longValue(); + File file = new File(envImpl.getFileManager().getFullFileName + (fileNum, FileManager.JE_SUFFIX)); + if (!file.exists()) { + return false; + } + } + return true; + } + + /* + * Run batch cleaning and verify that there are no files with these + * log entries. + */ + private void batchCleanAndVerify(DatabaseId dbId) + throws Exception { + + /* + * Open the environment, flip the log files to reduce mixing of new + * records and old records and add more records to force the + * utilization level of the removed records down. + */ + openEnv(true); + openDb(null, DB_NAME2); + long lsn = envImpl.forceLogFileFlip(); + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + env.checkpoint(force); + + writeAndCountRecords(null, RECORD_COUNT * 3); + env.checkpoint(force); + + closeDb(); + + /* Check log files, there should be entries with this database. */ + CheckReader checker = new CheckReader(envImpl, dbId, true); + while (checker.readNextEntry()) { + } + + if (DEBUG) { + System.out.println("entries for this db =" + checker.getCount()); + } + + assertTrue(checker.getCount() > 0); + + /* batch clean. */ + boolean anyCleaned = false; + while (env.cleanLog() > 0) { + anyCleaned = true; + } + + assertTrue(anyCleaned); + + if (anyCleaned) { + env.checkpoint(force); + } + + /* Check log files, there should be no entries with this database. */ + checker = new CheckReader(envImpl, dbId, false); + while (checker.readNextEntry()) { + } + + closeEnv(); + } + + class CheckReader extends DumpFileReader{ + + private final DatabaseId dbId; + private final boolean expectEntries; + private int count; + + /* + * @param databaseId we're looking for log entries for this database. + * @param expectEntries if false, there should be no log entries + * with this database id. If true, the log should have entries + * with this database id. + */ + CheckReader(EnvironmentImpl envImpl, + DatabaseId dbId, + boolean expectEntries) + throws DatabaseException { + + super(envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN, + DbLsn.NULL_LSN, null, null, null, false, false, true); + this.dbId = dbId; + this.expectEntries = expectEntries; + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + /* Figure out what kind of log entry this is */ + byte type = currentEntryHeader.getType(); + LogEntryType lastEntryType = LogEntryType.findType(type); + boolean isNode = lastEntryType.isNodeType(); + + /* Read the entry. */ + LogEntry entry = lastEntryType.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + long lsn = getLastLsn(); + if (isNode) { + boolean found = false; + if (entry instanceof INLogEntry) { + INLogEntry inEntry = (INLogEntry) entry; + found = dbId.equals(inEntry.getDbId()); + } else { + LNLogEntry lnEntry = (LNLogEntry) entry; + found = dbId.equals(lnEntry.getDbId()); + } + if (found) { + if (expectEntries) { + count++; + } else { + StringBuilder sb = new StringBuilder(); + entry.dumpEntry(sb, false); + fail("lsn=" + DbLsn.getNoFormatString(lsn) + + " dbId = " + dbId + + " entry= " + sb.toString()); + } + } + } + + return true; + } + + /* Num entries with this database id seen by reader. */ + int getCount() { + return count; + } + } +} diff --git a/test/com/sleepycat/je/cleaner/UtilizationTest.java b/test/com/sleepycat/je/cleaner/UtilizationTest.java new file mode 100644 index 0000000..6a2ae3a --- /dev/null +++ b/test/com/sleepycat/je/cleaner/UtilizationTest.java @@ -0,0 +1,1433 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.LogSource; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Test utilization counting of LNs. + */ +@RunWith(Parameterized.class) +public class UtilizationTest extends CleanerTestBase { + + private static final String DB_NAME = "foo"; + + private static final String OP_NONE = "op-none"; + private static final String OP_CHECKPOINT = "op-checkpoint"; + private static final String OP_RECOVER = "op-recover"; + //private static final String[] OPERATIONS = { OP_NONE, }; + //* + private static final String[] OPERATIONS = { OP_NONE, + OP_CHECKPOINT, + OP_RECOVER, + OP_RECOVER }; + //*/ + + /* + * Set fetchObsoleteSize=true only for the second OP_RECOVER test. + * We check that OP_RECOVER works with without fetching, but with fetching + * we check that all LN sizes are counted. + */ + private static final boolean[] FETCH_OBSOLETE_SIZE = { false, + false, + false, + true }; + + private static final CheckpointConfig forceConfig = new CheckpointConfig(); + static { + forceConfig.setForce(true); + } + + private EnvironmentImpl envImpl; + private Database db; + private DatabaseImpl dbImpl; + private boolean dups = false; + private DatabaseEntry keyEntry = new DatabaseEntry(); + private DatabaseEntry dataEntry = new DatabaseEntry(); + private final String operation; + private long lastFileSeen; + private final boolean fetchObsoleteSize; + private boolean truncateOrRemoveDone; + + private boolean embeddedLNs = false; + + public UtilizationTest (String op, boolean fetch) { + this.operation = op; + this.fetchObsoleteSize = fetch; + customName = this.operation + (fetchObsoleteSize ? "fetch" : ""); + } + + @Parameters + public static List genParams() { + int i = 0; + List list = new ArrayList(); + for (String operation : OPERATIONS) { + list.add(new Object[] {operation, FETCH_OBSOLETE_SIZE[i]}); + i++; + } + return list; + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + db = null; + dbImpl = null; + envImpl = null; + keyEntry = null; + dataEntry = null; + } + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(config); + config.setTransactional(true); + config.setTxnNoSync(true); + config.setAllowCreate(true); + /* Do not run the daemons. */ + config.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + config.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + /* Use a tiny log file size to write one LN per file. */ + config.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(64)); + /* With tiny files we can't log expiration profile records. */ + DbInternal.setCreateEP(config, false); + + /* Obsolete LN size counting is optional per test. */ + if (fetchObsoleteSize) { + config.setConfigParam + (EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(), + "true"); + } + + if (envMultiSubDir) { + config.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, DATA_DIRS + ""); + } + + env = new Environment(envHome, config); + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Speed up test that uses lots of very small files. */ + envImpl.getFileManager().setSyncAtFileEnd(false); + + openDb(); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + } + + /** + * Opens the database. + */ + private void openDb() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, DB_NAME, dbConfig); + dbImpl = DbInternal.getDbImpl(db); + } + + /** + * Closes the environment and database. + */ + private void closeEnv(boolean doCheckpoint) + throws DatabaseException { + + /* + * We pass expectAccurateDbUtilization as false when + * truncateOrRemoveDone, because the database utilization info for that + * database is now gone. + */ + VerifyUtils.verifyUtilization + (envImpl, + true, // expectAccurateObsoleteLNCount + expectObsoleteLNSizeCounted(), + !truncateOrRemoveDone); // expectAccurateDbUtilization + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + envImpl.close(doCheckpoint); + env = null; + } + } + + @Test + public void testReuseSlotAfterDelete() + throws DatabaseException { + + openEnv(); + + /* Insert and delete without compress to create a knownDeleted slot. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doDelete(0, txn); + txn.commit(); + + /* Insert key 0 to reuse the knownDeleted slot. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, txn); + /* Delete and insert to reuse deleted slot in same txn. */ + long file3 = doDelete(0, txn); + long file4 = doPut(0, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + expectObsolete(file4, embeddedLNs); + + closeEnv(true); + } + + @Test + public void testReuseKnownDeletedSlot() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and abort to create a knownDeleted slot. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + txn.abort(); + + /* Insert key 0 to reuse the knownDeleted slot. */ + txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + txn.commit(); + performRecoveryOperation(); + + /* Verify that file0 is still obsolete. */ + expectObsolete(file0, true); + expectObsolete(file1, embeddedLNs); + + closeEnv(true); + } + + @Test + public void testReuseKnownDeletedSlotAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and abort to create a knownDeleted slot. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + txn.abort(); + + /* Insert key 0 to reuse the knownDeleted slot, and abort. */ + txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + txn.abort(); + performRecoveryOperation(); + + /* Verify that file0 is still obsolete. */ + expectObsolete(file0, true); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testReuseKnownDeletedSlotDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert {0, 2} and abort to create a knownDeleted slot. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + txn.abort(); + + /* Insert {0, 2} to reuse the knownDeleted slot. */ + txn = env.beginTransaction(null, null); + long file3 = doPut(0, 2, txn); // 4th LN + txn.commit(); + performRecoveryOperation(); + + /* Verify that file2 is still obsolete and only counted once. */ + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testReuseKnownDeletedSlotDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert {0, 2} and abort to create a knownDeleted slot. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + txn.abort(); + + /* Insert {0, 2} to reuse the knownDeleted slot, then abort. */ + txn = env.beginTransaction(null, null); + long file3 = doPut(0, 2, txn); // 4th LN + txn.abort(); + performRecoveryOperation(); + + /* Verify that file2 is still obsolete and only counted once. */ + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testInsert() + throws DatabaseException { + + openEnv(); + + /* Insert key 0. */ + long file0 = doPut(0, true); + performRecoveryOperation(); + + /* Expect that LN is not obsolete. */ + FileSummary fileSummary = getFileSummary(file0); + + assertEquals(1, fileSummary.totalLNCount); + + if (embeddedLNs) { + assertEquals(1, fileSummary.obsoleteLNCount); + } else { + assertEquals(0, fileSummary.obsoleteLNCount); + } + + DbFileSummary dbFileSummary = getDbFileSummary(file0); + + assertEquals(1, dbFileSummary.totalLNCount); + + if (embeddedLNs) { + assertEquals(1, fileSummary.obsoleteLNCount); + } else { + assertEquals(0, dbFileSummary.obsoleteLNCount); + } + + closeEnv(true); + } + + @Test + public void testInsertAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0. */ + long file0 = doPut(0, false); + performRecoveryOperation(); + + /* Expect that LN is obsolete. */ + FileSummary fileSummary = getFileSummary(file0); + assertEquals(1, fileSummary.totalLNCount); + assertEquals(1, fileSummary.obsoleteLNCount); + + DbFileSummary dbFileSummary = getDbFileSummary(file0); + assertEquals(1, dbFileSummary.totalLNCount); + assertEquals(1, dbFileSummary.obsoleteLNCount); + + closeEnv(true); + } + + @Test + public void testInsertDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert key 0 and a dup. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); + long file1 = doPut(0, 1, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); // 1st LN + expectObsolete(file1, true); // 2nd LN + + closeEnv(true); + } + + @Test + public void testInsertDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert key 0 and a dup. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); + long file2 = doPut(0, 1, txn); + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); // 1st LN + expectObsolete(file2, true); // 2nd LN + + closeEnv(true); + } + + @Test + public void testUpdate() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update key 0. */ + long file1 = doPut(0, true); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, embeddedLNs); + + closeEnv(true); + } + + @Test + public void testUpdateAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update key 0 and abort. */ + long file1 = doPut(0, false); + performRecoveryOperation(); + + expectObsolete(file0, embeddedLNs); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testUpdateDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update {0, 0}. */ + txn = env.beginTransaction(null, null); + long file3 = doUpdate(0, 0, txn); // 3rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testUpdateDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update {0, 0}. */ + txn = env.beginTransaction(null, null); + long file3 = doUpdate(0, 0, txn); // 3rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testDelete() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Delete key 0. */ + long file1 = doDelete(0, true); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testDeleteAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Delete key 0 and abort. */ + long file1 = doDelete(0, false); + performRecoveryOperation(); + + expectObsolete(file0, embeddedLNs); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testDeleteDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Delete {0, 0} and abort. */ + txn = env.beginTransaction(null, null); + long file2 = doDelete(0, 0, txn); // 3rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + + closeEnv(true); + } + + @Test + public void testDeleteDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Delete {0, 0} and abort. */ + txn = env.beginTransaction(null, null); + long file2 = doDelete(0, 0, txn); // 3rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + + closeEnv(true); + } + + @Test + public void testInsertUpdate() + throws DatabaseException { + + openEnv(); + + /* Insert and update key 0. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doPut(0, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, embeddedLNs); + + closeEnv(true); + } + + @Test + public void testInsertUpdateAbort() + throws DatabaseException { + + openEnv(); + + /* Insert and update key 0. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doPut(0, txn); + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testInsertUpdateDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert and update {0, 2}. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + long file3 = doUpdate(0, 2, txn); // 4rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testInsertUpdateDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert and update {0, 2}. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + long file3 = doUpdate(0, 2, txn); // 4rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testInsertDelete() + throws DatabaseException { + + openEnv(); + + /* Insert and update key 0. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doDelete(0, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testInsertDeleteAbort() + throws DatabaseException { + + openEnv(); + + /* Insert and update key 0. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doDelete(0, txn); + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + + closeEnv(true); + } + + @Test + public void testInsertDeleteDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert and delete {0, 2}. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + long file3 = doDelete(0, 2, txn); // 4rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testInsertDeleteDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Insert and delete {0, 2} and abort. */ + txn = env.beginTransaction(null, null); + long file2 = doPut(0, 2, txn); // 3rd LN + long file3 = doDelete(0, 2, txn); // 4rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testUpdateUpdate() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update key 0 twice. */ + Transaction txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + long file2 = doPut(0, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, embeddedLNs); + + closeEnv(true); + } + + @Test + public void testUpdateUpdateAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update key 0 twice and abort. */ + Transaction txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + long file2 = doPut(0, txn); + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, embeddedLNs); + expectObsolete(file1, true); + expectObsolete(file2, true); + + closeEnv(true); + } + + @Test + public void testUpdateUpdateDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update {0, 1} twice. */ + txn = env.beginTransaction(null, null); + long file2 = doUpdate(0, 1, txn); // 3rd LN + long file3 = doUpdate(0, 1, txn); // 4rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testUpdateUpdateDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update {0, 1} twice and abort. */ + txn = env.beginTransaction(null, null); + long file2 = doUpdate(0, 1, txn); // 3rd LN + long file3 = doUpdate(0, 1, txn); // 4rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testUpdateDelete() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update and delete key 0. */ + Transaction txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + long file2 = doDelete(0, txn); + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + + closeEnv(true); + } + + @Test + public void testUpdateDeleteAbort() + throws DatabaseException { + + openEnv(); + + /* Insert key 0 and checkpoint. */ + long file0 = doPut(0, true); + env.checkpoint(forceConfig); + + /* Update and delete key 0 and abort. */ + Transaction txn = env.beginTransaction(null, null); + long file1 = doPut(0, txn); + long file2 = doDelete(0, txn); + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, embeddedLNs); + expectObsolete(file1, true); + expectObsolete(file2, true); + + closeEnv(true); + } + + @Test + public void testUpdateDeleteDup() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update and delete {0, 1}. */ + txn = env.beginTransaction(null, null); + long file2 = doUpdate(0, 1, txn); // 3rd LN + long file3 = doDelete(0, 1, txn); // 4rd LN + txn.commit(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testUpdateDeleteDupAbort() + throws DatabaseException { + + dups = true; + openEnv(); + + /* Insert two key 0 dups and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, 0, txn); // 1st LN + long file1 = doPut(0, 1, txn); // 2nd LN + txn.commit(); + env.checkpoint(forceConfig); + + /* Update and delete {0, 1} and abort. */ + txn = env.beginTransaction(null, null); + long file2 = doUpdate(0, 1, txn); // 3rd LN + long file3 = doDelete(0, 1, txn); // 4rd LN + txn.abort(); + performRecoveryOperation(); + + expectObsolete(file0, true); + expectObsolete(file1, true); + expectObsolete(file2, true); + expectObsolete(file3, true); + + closeEnv(true); + } + + @Test + public void testTruncate() + throws DatabaseException { + + truncateOrRemove(true, true); + } + + @Test + public void testTruncateAbort() + throws DatabaseException { + + truncateOrRemove(true, false); + } + + @Test + public void testRemove() + throws DatabaseException { + + truncateOrRemove(false, true); + } + + @Test + public void testRemoveAbort() + throws DatabaseException { + + truncateOrRemove(false, false); + } + + /** + */ + private void truncateOrRemove(boolean truncate, boolean commit) + throws DatabaseException { + + openEnv(); + + /* + * We cannot use forceTreeWalkForTruncateAndRemove with embedded LNs + * because we don't have the lastLoggedFile info in the BIN slots. + * This info is required by the SortedLSNTreeWalker used in + * DatabaseImpl.finishDeleteProcessing(). + */ + if (embeddedLNs) { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + + /* Insert 3 keys and checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + long file0 = doPut(0, txn); + long file1 = doPut(1, txn); + long file2 = doPut(2, txn); + txn.commit(); + env.checkpoint(forceConfig); + + /* Truncate. */ + txn = env.beginTransaction(null, null); + if (truncate) { + db.close(); + db = null; + long count = env.truncateDatabase(txn, DB_NAME, + true /* returnCount */); + assertEquals(3, count); + } else { + db.close(); + db = null; + env.removeDatabase(txn, DB_NAME); + } + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + truncateOrRemoveDone = true; + performRecoveryOperation(); + + /* + * Do not check DbFileSummary when we truncate/remove, since the old + * DatabaseImpl is gone. + */ + expectObsolete(file0, + commit || embeddedLNs, + !commit /*checkDbFileSummary*/); + + expectObsolete(file1, + commit || embeddedLNs, + !commit /*checkDbFileSummary*/); + + expectObsolete(file2, + commit || embeddedLNs, + !commit /*checkDbFileSummary*/); + + closeEnv(true); + } + + /* + * The xxxForceTreeWalk tests set the DatabaseImpl + * forceTreeWalkForTruncateAndRemove field to true, which will force a walk + * of the tree to count utilization during truncate/remove, rather than + * using the per-database info. This is used to test the "old technique" + * for counting utilization, which is now used only if the database was + * created prior to log version 6. + */ + + @Test + public void testTruncateForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testTruncate(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testTruncateAbortForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testTruncateAbort(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemove(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + @Test + public void testRemoveAbortForceTreeWalk() + throws Exception { + + DatabaseImpl.forceTreeWalkForTruncateAndRemove = true; + try { + testRemoveAbort(); + } finally { + DatabaseImpl.forceTreeWalkForTruncateAndRemove = false; + } + } + + private void expectObsolete(long file, boolean obsolete) + throws DatabaseException { + + expectObsolete(file, obsolete, true /*checkDbFileSummary*/); + } + + private void expectObsolete(long file, + boolean obsolete, + boolean checkDbFileSummary) + throws DatabaseException { + + FileSummary fileSummary = getFileSummary(file); + assertEquals("totalLNCount", + 1, fileSummary.totalLNCount); + assertEquals("obsoleteLNCount", + obsolete ? 1 : 0, fileSummary.obsoleteLNCount); + + DbFileSummary dbFileSummary = getDbFileSummary(file); + if (checkDbFileSummary) { + assertEquals("db totalLNCount", + 1, dbFileSummary.totalLNCount); + assertEquals("db obsoleteLNCount", + obsolete ? 1 : 0, dbFileSummary.obsoleteLNCount); + } + + if (obsolete) { + if (expectObsoleteLNSizeCounted()) { + assertTrue(fileSummary.obsoleteLNSize > 0); + assertEquals(1, fileSummary.obsoleteLNSizeCounted); + if (checkDbFileSummary) { + assertTrue(dbFileSummary.obsoleteLNSize > 0); + assertEquals(1, dbFileSummary.obsoleteLNSizeCounted); + } + } + /* If we counted the size, make sure it is the actual LN size. */ + if (expectObsoleteLNSizeCounted() && + fileSummary.obsoleteLNSize > 0) { + assertEquals(getLNSize(file), fileSummary.obsoleteLNSize); + } + if (checkDbFileSummary) { + if (expectObsoleteLNSizeCounted() && + dbFileSummary.obsoleteLNSize > 0) { + assertEquals(getLNSize(file), dbFileSummary.obsoleteLNSize); + } + assertEquals(fileSummary.obsoleteLNSize > 0, + dbFileSummary.obsoleteLNSize > 0); + } + } else { + assertEquals(0, fileSummary.obsoleteLNSize); + assertEquals(0, fileSummary.obsoleteLNSizeCounted); + if (checkDbFileSummary) { + assertEquals(0, dbFileSummary.obsoleteLNSize); + assertEquals(0, dbFileSummary.obsoleteLNSizeCounted); + } + } + } + + /** + * If an LN is obsolete, expect the size to be counted unless we ran + * recovery and we did NOT configure fetchObsoleteSize=true. In that + * case, the size may or may not be counted depending on how the redo + * or undo was processed during recovery. + */ + private boolean expectObsoleteLNSizeCounted() { + return fetchObsoleteSize || !OP_RECOVER.equals(operation); + } + + private long doPut(int key, boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + long file = doPut(key, txn); + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + return file; + } + + private long doPut(int key, Transaction txn) + throws DatabaseException { + + return doPut(key, key, txn); + } + + private long doPut(int key, int data, Transaction txn) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, null); + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + cursor.put(keyEntry, dataEntry); + long file = getFile(cursor); + cursor.close(); + return file; + } + + private long doUpdate(int key, int data, Transaction txn) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, null); + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(keyEntry, dataEntry, null)); + cursor.putCurrent(dataEntry); + long file = getFile(cursor); + cursor.close(); + return file; + } + + private long doDelete(int key, boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + long file = doDelete(key, txn); + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + return file; + } + + private long doDelete(int key, Transaction txn) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, null); + IntegerBinding.intToEntry(key, keyEntry); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(keyEntry, dataEntry, null)); + cursor.delete(); + long file = getFile(cursor); + cursor.close(); + return file; + } + + private long doDelete(int key, int data, Transaction txn) + throws DatabaseException { + + Cursor cursor = db.openCursor(txn, null); + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(keyEntry, dataEntry, null)); + cursor.delete(); + long file = getFile(cursor); + cursor.close(); + return file; + } + + /** + * Checkpoint, recover, or do neither, depending on the configured + * operation for this test. Always compress to count deleted LNs. + */ + private void performRecoveryOperation() + throws DatabaseException { + + if (OP_NONE.equals(operation)) { + /* Compress to count deleted LNs. */ + env.compress(); + } else if (OP_CHECKPOINT.equals(operation)) { + /* Compress before checkpointing to count deleted LNs. */ + env.compress(); + env.checkpoint(forceConfig); + } else if (OP_RECOVER.equals(operation)) { + closeEnv(false); + openEnv(); + /* Compress after recovery to count deleted LNs. */ + env.compress(); + } else { + assert false : operation; + } + } + + /** + * Gets the file of the LSN at the cursor position, using internal methods. + * Also check that the file number is greater than the last file returned, + * to ensure that we're filling a file every time we write. + */ + private long getFile(Cursor cursor) { + long file = CleanerTestUtils.getLogFile(cursor); + assert file > lastFileSeen; + lastFileSeen = file; + return file; + } + + /** + * Returns the utilization summary for a given log file. + */ + private FileSummary getFileSummary(long file) { + return envImpl.getUtilizationProfile() + .getFileSummaryMap(true) + .get(new Long(file)); + } + + /** + * Returns the per-database utilization summary for a given log file. + */ + private DbFileSummary getDbFileSummary(long file) { + return dbImpl.getDbFileSummary + (new Long(file), false /*willModify*/); + } + + /** + * Peek into the file to get the total size of the first entry past the + * file header, which is known to be the LN log entry. + */ + private int getLNSize(long file) + throws DatabaseException { + + try { + long offset = FileManager.firstLogEntryOffset(); + long lsn = DbLsn.makeLsn(file, offset); + LogManager lm = envImpl.getLogManager(); + LogSource src = lm.getLogSource(lsn); + ByteBuffer buf = src.getBytes(offset); + LogEntryHeader header = + new LogEntryHeader(buf, LogEntryType.LOG_VERSION, lsn); + int size = header.getItemSize(); + src.release(); + return size + header.getSize(); + } catch (IOException e) { + throw new RuntimeException(e); + } catch (ChecksumException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/com/sleepycat/je/cleaner/WakeupTest.java b/test/com/sleepycat/je/cleaner/WakeupTest.java new file mode 100644 index 0000000..4ae75e8 --- /dev/null +++ b/test/com/sleepycat/je/cleaner/WakeupTest.java @@ -0,0 +1,245 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.cleaner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Checks that the cleaner wakes up at certain times even when there is no + * logging: + * - startup + * - change to minUtilization + * - DB remove/truncate + * Also checks that checkpointing and file deletion occur when writing stops. + */ +public class WakeupTest extends TestBase { + + private static final int FILE_SIZE = 1000000; + private static final String DB_NAME = "WakeupTest"; + + private final File envHome; + private Environment env; + private Database db; + + public WakeupTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private void open(final boolean runCleaner) { + + /* + * Use a cleaner/checkpointer byte interval that is much larger than + * the amount we will write. + */ + final String veryLargeWriteSize = String.valueOf(Long.MAX_VALUE); + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, Integer.toString(FILE_SIZE)); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_UTILIZATION, "50"); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, "0"); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_WAKEUP_INTERVAL, "1 s"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, runCleaner ? "true" : "false"); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_BYTES_INTERVAL, veryLargeWriteSize); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, veryLargeWriteSize); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + private void close() { + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + if (env != null) { + try { + env.close(); + } finally { + env = null; + } + } + } + + @Test + public void testCleanAtStartup() { + + open(false /*runCleaner*/); + writeFiles(0 /*nActive*/, 10 /*nObsolete*/); + close(); + + open(true /*runCleaner*/); + expectBackgroundCleaning(); + close(); + } + + @Test + public void testCleanAfterMinUtilizationChange() { + + open(true /*runCleaner*/); + writeFiles(4 /*nActive*/, 3 /*nObsolete*/); + expectNothingToClean(); + + final EnvironmentConfig envConfig = env.getConfig(); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_MIN_UTILIZATION, "90"); + env.setMutableConfig(envConfig); + + expectBackgroundCleaning(); + close(); + } + + /** + * Tests cleaner wakeup after writing stops. + * + * Only a small amount is logged by removeDatabase, which is not enough to + * motivate cleaning. As of JE 7.1 the cleaner wakes up periodically and + * cleans writing has stopped but there was at least some writing since the + * last cleaner activation. + */ + @Test + public void testCleanAfterWritingStops() { + + open(true /*runCleaner*/); + writeFiles(5 /*nActive*/, 0 /*nObsolete*/); + expectNothingToClean(); + db.close(); + db = null; + + env.removeDatabase(null, DB_NAME); + expectBackgroundCleaning(); + close(); + } + + /** + * Tests cleaner wakeup and file deletion, which requires a checkpoint, + * after writing stops. + * + * Only a small amount is logged by truncateDatabase, which is not enough + * to motivate cleaning. Plus, . As of JE 7.1 the cleaner wakes up periodically and + * cleans writing has stopped but there was at least some writing since the + * last cleaner activation. + */ + @Test + public void testFileDeletionAfterWritingStops() { + open(true /*runCleaner*/); + writeFiles(5 /*nActive*/, 0 /*nObsolete*/); + expectNothingToClean(); + db.close(); + db = null; + + /* Clear nCheckpoints stat. */ + env.getStats(StatsConfig.CLEAR); + + final EnvironmentConfig envConfig = env.getConfig(); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "true"); + env.setMutableConfig(envConfig); + + env.truncateDatabase(null, DB_NAME, false); + expectBackgroundCleaning(); + expectBackgroundCheckpointAndFileDeletion(); + close(); + } + + private void expectNothingToClean() { + env.cleanLog(); + final EnvironmentStats stats = env.getStats(null); + final String msg = String.format("%d probes, %d non-probes", + stats.getNCleanerProbeRuns(), stats.getNCleanerRuns()); + assertEquals(msg, 0, + stats.getNCleanerRuns() - stats.getNCleanerProbeRuns()); + } + + private void expectBackgroundCleaning() { + final long endTime = System.currentTimeMillis() + (30 * 1000); + while (System.currentTimeMillis() < endTime) { + final EnvironmentStats stats = env.getStats(null); + if (stats.getNCleanerRuns() > 0) { + return; + } + } + close(); + fail("Cleaner did not run"); + } + + private void expectBackgroundCheckpointAndFileDeletion() { + final long endTime = System.currentTimeMillis() + (30 * 1000); + EnvironmentStats stats = null; + while (System.currentTimeMillis() < endTime) { + stats = env.getStats(null); + if (stats.getNCheckpoints() > 0 && + stats.getNCleanerDeletions() > 0) { + return; + } + } + close(); + fail("Checkpointer did not run or no files were deleted: " + stats); + } + + private void writeFiles(final int nActive, final int nObsolete) { + int key = 0; + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(new byte[FILE_SIZE]); + for (int i = 0; i < nActive; i += 1) { + IntegerBinding.intToEntry(key, keyEntry); + db.put(null, keyEntry, dataEntry); + key += 1; + } + IntegerBinding.intToEntry(key, keyEntry); + for (int i = 0; i <= nObsolete; i += 1) { + db.put(null, keyEntry, dataEntry); + } + env.checkpoint(new CheckpointConfig().setForce(true)); + } +} diff --git a/test/com/sleepycat/je/cleaner/migrate_f0.jdb b/test/com/sleepycat/je/cleaner/migrate_f0.jdb new file mode 100644 index 0000000..17f43df Binary files /dev/null and b/test/com/sleepycat/je/cleaner/migrate_f0.jdb differ diff --git a/test/com/sleepycat/je/cleaner/migrate_f1.jdb b/test/com/sleepycat/je/cleaner/migrate_f1.jdb new file mode 100644 index 0000000..6b16ee1 Binary files /dev/null and b/test/com/sleepycat/je/cleaner/migrate_f1.jdb differ diff --git a/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb b/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb new file mode 100644 index 0000000..285e240 Binary files /dev/null and b/test/com/sleepycat/je/cleaner/rmw_bad_offsets.jdb differ diff --git a/test/com/sleepycat/je/config/EnvironmentParamsTest.java b/test/com/sleepycat/je/config/EnvironmentParamsTest.java new file mode 100644 index 0000000..569ac74 --- /dev/null +++ b/test/com/sleepycat/je/config/EnvironmentParamsTest.java @@ -0,0 +1,93 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.config; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.util.test.TestBase; + +public class EnvironmentParamsTest extends TestBase { + + private IntConfigParam intParam = + new IntConfigParam("param.int", + new Integer(2), + new Integer(10), + new Integer(5), + false, // mutable + false);// for replication + + private LongConfigParam longParam = + new LongConfigParam("param.long", + new Long(2), + new Long(10), + new Long(5), + false, // mutable + false);// for replication + + private ConfigParam mvParam = + new ConfigParam("some.mv.param.#", null, true /* mutable */, + false /* for replication */); + + /** + * Test param validation. + */ + @Test + public void testValidation() { + assertTrue(mvParam.isMultiValueParam()); + + try { + new ConfigParam(null, "foo", false /* mutable */, + false /* for replication */); + fail("should disallow null name"); + } catch (EnvironmentFailureException e) { + // expected. + } + + /* Test bounds. These are all invalid and should fail */ + checkValidateParam(intParam, "1"); + checkValidateParam(intParam, "11"); + checkValidateParam(longParam, "1"); + checkValidateParam(longParam, "11"); + } + + /** + * Check that an invalid parameter isn't mistaken for a multivalue + * param. + */ + @Test + public void testInvalidVsMultiValue() { + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setConfigParam("je.maxMemory.stuff", "true"); + fail("Should throw exception"); + } catch (IllegalArgumentException IAE) { + // expected + } + } + + /* Helper to catch expected exceptions */ + private void checkValidateParam(ConfigParam param, String value) { + try { + param.validateValue(value); + fail("Should throw exception"); + } catch (IllegalArgumentException e) { + // expect this exception + } + } +} diff --git a/test/com/sleepycat/je/dbi/BINDeltaOperationTest.java b/test/com/sleepycat/je/dbi/BINDeltaOperationTest.java new file mode 100644 index 0000000..27ce6db --- /dev/null +++ b/test/com/sleepycat/je/dbi/BINDeltaOperationTest.java @@ -0,0 +1,496 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.Test; + +public class BINDeltaOperationTest extends DualTestCase { + + /* + * N_RECORDS is set to 110 and NODE_MAX_ENTRIES to 100. This results in + * a tree with 2 BIN, the first of which has 40 entries. + */ + private static final int N_RECORDS = 110; + + private final File envHome; + private Environment env; + private Database db; + private boolean runBtreeVerifier = true; + + public BINDeltaOperationTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private void open() { + open(false); + } + + private void open(boolean deferredWrite) { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, "100"); + if (!runBtreeVerifier) { + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + } + env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(!deferredWrite); + dbConfig.setCacheMode(CacheMode.EVICT_LN); + dbConfig.setDeferredWrite(deferredWrite); + db = env.openDatabase(null, "testDB", dbConfig); + } + + private void close() { + db.close(); + db = null; + env.close(); + env = null; + } + + @Test + public void testEviction() { + + runBtreeVerifier = false; + open(); + + /* Insert N_RECORDS records into 2 BINs */ + writeData(); + checkData(null); + + /* Flush BINs (checkpoint). */ + env.checkpoint(new CheckpointConfig().setForce(true)); + checkData(null); + + /* + * Update only enough records in the 1st BIN to make it a delta + * when it gets selected for eviction. + */ + writeDeltaFraction(); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + + BIN bin = getFirstBIN(); + assertFalse(bin.isBINDelta(false)); + + EnvironmentStats stats = env.getStats(StatsConfig.CLEAR); + final long initialBINs = stats.getNCachedBINs(); + final long initialDeltas = stats.getNCachedBINDeltas(); + assertEquals(0, initialDeltas); + + /* + * Test partial eviction to mutate full BIN to BIN delta. + */ + mutateToDelta(bin); + stats = env.getStats(null); + assertEquals(initialBINs, stats.getNCachedBINs()); + assertEquals(initialDeltas + 1, stats.getNCachedBINDeltas()); + + Transaction txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + Cursor cursor1 = db.openCursor(txn, null); + Cursor cursor2 = db.openCursor(txn, null); + + /* + * Read 2 existing keys directly from the bin delta, using 2 different + * cursors; one doing an exact search and the other a range search. + */ + searchExistingKey(cursor1, 6, true); + searchExistingKey(cursor2, 10, false); + + assertTrue(bin.isBINDelta(false)); + + /* + * Update the record where cursor1 is positioned at and make sure the + * bin is still a delta. + */ + updateCurrentRecord(cursor1, 20); + + assertTrue(bin.isBINDelta(false)); + + /* + * Now, read all records (checkData). This will mutate the delta to a + * the full BIN, but only one fetch will be needed (nNotResident == 1). + */ + checkData(txn); + assertTrue(!bin.isBINDelta(false)); + assertSame(bin, getFirstBIN()); + stats = env.getStats(StatsConfig.CLEAR); + assertEquals(initialBINs, stats.getNCachedBINs()); + assertEquals(initialDeltas, stats.getNCachedBINDeltas()); + assertEquals(1, stats.getNFullBINsMiss()); + assertEquals(0, stats.getNBINDeltasFetchMiss()); + assertEquals(1, stats.getNNotResident()); + + /* + * Make sure that cursors 1 and 2 were adjusted correctly when the + * delta got mutated. + */ + confirmCurrentKey(cursor1, 6); + confirmCurrentKey(cursor2, 10); + + cursor1.close(); + cursor2.close(); + txn.commit(); + + /* + * Call evict() to mutate the BIN to a delta. + */ + mutateToDelta(bin); + assertTrue(bin.getInListResident()); + stats = env.getStats(null); + assertEquals(initialBINs, stats.getNCachedBINs()); + assertEquals(initialDeltas + 1, stats.getNCachedBINDeltas()); + + /* + * Delete a record from the bin delta + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + + searchExistingKey(cursor1, 6, true); + assertTrue(bin.isBINDelta(false)); + assertEquals(OperationStatus.SUCCESS, cursor1.delete()); + + cursor1.close(); + txn.commit(); + + assertTrue(bin.isBINDelta(false)); + + /* + * Call evict(true) to evict the BIN completely (without explicitly + * forcing the eviction, the BIN will be put in the dirty LRU). Then + * reading the entries will require two fetches. + */ + evict(bin, true); + assertFalse(bin.getInListResident()); + stats = env.getStats(null); + assertEquals(initialBINs - 1, stats.getNCachedBINs()); + assertEquals(initialDeltas, stats.getNCachedBINDeltas()); + + BIN prevBin = bin; + bin = getFirstBIN(); + assertNotSame(prevBin, bin); + assertFalse(bin.isBINDelta(false)); + stats = env.getStats(StatsConfig.CLEAR); + assertEquals(initialBINs, stats.getNCachedBINs()); + assertEquals(initialDeltas, stats.getNCachedBINDeltas()); + if (stats.getOffHeapBINsLoaded() > 0) { + assertEquals(1, stats.getOffHeapBINsLoaded()); + } else { + assertEquals(1, stats.getNBINsFetchMiss()); + assertEquals(1, stats.getNBINDeltasFetchMiss()); + assertEquals(1, stats.getNFullBINsMiss()); + assertEquals(2, stats.getNNotResident()); + } + + /* + * Put back the record deleted above, so that checkData won't complain. + */ + inserRecord(6, 10); + + checkData(null); + stats = env.getStats(StatsConfig.CLEAR); + assertEquals(0, stats.getNBINsFetchMiss()); + assertEquals(0, stats.getNBINDeltasFetchMiss()); + assertEquals(0, stats.getNNotResident()); + + close(); + } + + @Test + public void testDbCount() { + + open(); + + writeData(0, 5000); + assertEquals(5000, db.count()); + checkData(null, 5000); + close(); + + open(); + assertEquals(5000, db.count()); + checkData(null, 5000); + close(); + } + + @Test + public void testTransitionToDeferredWrite() { + + /* + * Open in normal mode (not deferred-write) and write a BIN-delta. Then + * close the env in order to start the next step with an empty cache. + */ + open(false /*deferredWrite*/); + + writeData(); + checkData(null); + + env.checkpoint(new CheckpointConfig().setForce(true)); + checkData(null); + + writeDeltaFraction(); + env.checkpoint(new CheckpointConfig().setForce(true)); + + close(); + + /* + * Open in deferred-write mode and search for an existing key. Prior to + * the fix for [#25999], an assertion would fire during the cursor + * search, when fetching the BIN-delta, saying that BIN-deltas aren't + * allowed with deferred-write. + */ + open(true /*deferredWrite*/); + + Cursor cursor = db.openCursor(null, null); + searchExistingKey(cursor, 6, true); + + BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + assertFalse(bin.isBINDelta(false)); + + cursor.close(); + close(); + } + + private void writeData() { + writeData(0, N_RECORDS); + } + + private void writeDeltaFraction() { + /* Update records in slots 5 to 15 (inclusive) of the 1st BIN */ + writeData(5, N_RECORDS / 10); + } + + private void writeData(int startRecord, int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = startRecord; i < nRecords + startRecord; i += 1) { + + key.setData(TestUtils.getTestArray(i)); + data.setData(TestUtils.getTestArray(i)); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + private void inserRecord(int keyVal, int dataVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + + private void checkData(final Transaction txn) { + checkData(txn, N_RECORDS); + } + + /** + * Reads all keys, but does not read data to avoid changing the + * nNotResident stat. + */ + private void checkData(final Transaction txn, final int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(txn, null); + + for (int i = 0; i < nRecords; i += 1) { + + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + + assertEquals(i, TestUtils.getTestVal(key.getData())); + } + + assertEquals(OperationStatus.NOTFOUND, + cursor.getNext(key, data, null)); + + cursor.close(); + } + + private void searchExistingKey( + final Cursor cursor, + final int keyVal, + boolean exactSearch) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(keyVal)); + + data.setPartial(true); + + if (exactSearch) { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKeyRange(key, data, null)); + } + + assertEquals(keyVal, TestUtils.getTestVal(key.getData())); + } + + private Cursor confirmCurrentKey(final Cursor cursor, int keyVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(keyVal, TestUtils.getTestVal(key.getData())); + + return cursor; + } + + private void updateCurrentRecord(final Cursor cursor, int dataVal) { + + final DatabaseEntry data = new DatabaseEntry(); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.putCurrent(data)); + + final DatabaseEntry key = new DatabaseEntry(); + data.setData(null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(dataVal, TestUtils.getTestVal(data.getData())); + } + + /** + * Reads first key and returns its BIN. Does not read data to avoid + * changing the nNotResident stat. + */ + private BIN getFirstBIN() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + assertNotNull(bin); + return bin; + } + + private void mutateToDelta(BIN bin) { + + OffHeapCache ohCache = + DbInternal.getNonNullEnvImpl(env).getOffHeapCache(); + + if (!ohCache.isEnabled()) { + evict(bin, false); + assertEquals(1, env.getStats(null).getNNodesMutated()); + } else { + bin.latchNoUpdateLRU(); + bin.mutateToBINDelta(); + bin.releaseLatch(); + } + + assertTrue(bin.isBINDelta(false)); + assertTrue(bin.getInListResident()); + } + + /** + * Simulated eviction of the BIN, if it were selected. This may only do + * partial eviction, if LNs are present or it can be mutated to a delta. + * We expect that some memory will be reclaimed. + */ + private void evict(BIN bin, boolean force) { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Evictor evictor = envImpl.getEvictor(); + + final long memBefore = TestUtils.validateNodeMemUsage(envImpl, true); + + if (force) { + /* CACHEMODE eviction will not evict a dirty BIN. */ + env.sync(); + } + + bin.latch(CacheMode.UNCHANGED); + + if (force) { + evictor.doTestEvict(bin, Evictor.EvictionSource.CACHEMODE); + } else { + evictor.doTestEvict(bin, Evictor.EvictionSource.MANUAL); + } + + final long memAfter = TestUtils.validateNodeMemUsage(envImpl, true); + + assertTrue(memAfter < memBefore); + } +} diff --git a/test/com/sleepycat/je/dbi/BINDeltaOpsTest.java b/test/com/sleepycat/je/dbi/BINDeltaOpsTest.java new file mode 100644 index 0000000..56c02ee --- /dev/null +++ b/test/com/sleepycat/je/dbi/BINDeltaOpsTest.java @@ -0,0 +1,881 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.BINDeltaBloomFilter; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.Test; + +public class BINDeltaOpsTest extends DualTestCase { + + /* + * N_RECORDS is set to 110 and NODE_MAX_ENTRIES to 100. This results in + * a tree with 2 BIN, the first of which has 50 entries. + */ + private static final int N_RECORDS = 110; + + private final File envHome; + private Environment env; + private Database db; + private boolean dups; + + private boolean debug = false; + + public BINDeltaOpsTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private void open(boolean dups) { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, "100"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setCacheMode(CacheMode.EVICT_LN); + + if (!dups) { + db = env.openDatabase(null, "testDB", dbConfig); + } else { + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDupsDB", dbConfig); + } + + this.dups = dups; + } + + private void close() { + db.close(); + db = null; + env.close(); + env = null; + } + + /* + * This is a test to make sure that when a full BIN is mutated to a delta, + * the delta is smaller than the full version (there used to be a bug + * whare key prefixing was used in the full BIN, but not in the delta, and + * as a result, the delta could be larger than the full BIN). + */ + @Test + public void testMemory() { + + open(true); + + /* + * Create a long key to be used for all the records inserted in the + * dups db. + */ + byte[] keyBytes = new byte[50]; + for (int i = 0; i < 50; ++i) { + keyBytes[i] = 65; + } + + /* + * Do the record insertions. All records have the same, 50-byte-long + * key and a 1-byte-long data portion. Key prefixing is done, resulting + * in records that take only 3 bytes each. + */ + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < N_RECORDS; i += 1) { + + key.setData(keyBytes); + data.setData(new byte[] {(byte)i}); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + BIN bin1 = getFirstBIN(); + + assertTrue(bin1.hasKeyPrefix()); + + /* Flush BINs (checkpoint) */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Update the max number of records in the 1st BIN to make it a delta + * when it gets selected for eviction. + */ + for (int i = 0; i < 12; i += 1) { + + key.setData(keyBytes); + data.setData(new byte[] {(byte)i}); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + assertTrue(!bin1.isBINDelta(false)); + + /* + * Mutate bin1 to a delta and make sure memory gets released. + */ + mutateToDelta(bin1); + assertTrue(bin1.getInListResident()); + + close(); + } + + @Test + public void testNoDups() { + + Transaction txn; + Cursor cursor1; + Cursor cursor2; + + open(false); + + writeData(); + checkData(null); + + BIN bin1 = getFirstBIN(); + BIN bin2 = getLastBIN(); + + /* + * The BIN split causes full version of both BINs to be logged. After + * the split is done, 10 more records are inserted in the 2nd BIN, so + * that BIN has 10 entries marked dirty. During the checkpoint below, + * a delta will be logged for the 2nd BIN, so although after the + * checkpoint the BIN will be clean, it will still have 10 dirty + * entries. + */ + if (debug) { + System.out.println( + "1. BIN-1 has " + bin1.getNEntries() + " entries"); + System.out.println( + "1. BIN-1 has " + bin1.getNDeltas() + " dirty entries"); + System.out.println( + "1. BIN-2 has " + bin2.getNEntries() + " entries"); + System.out.println( + "1. BIN-2 has " + bin2.getNDeltas() + " dirty entries"); + } + + /* Flush BINs (checkpoint) */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Update only enough records in the 1st BIN to make it a delta + * when it gets selected for eviction. Specifically, update records + * in slots 5 to 15 (inclusive). + */ + writeData(5, N_RECORDS / 10); + + if (debug) { + System.out.println( + "2. BIN-1 has " + bin1.getNDeltas() + " dirty entries"); + System.out.println( + "2. BIN-2 has " + bin2.getNDeltas() + " dirty entries"); + } + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(!bin2.isBINDelta(false)); + + /* + * Mutate bin1 to a delta + */ + mutateToDelta(bin1); + assertTrue(bin1.getInListResident()); + + /* + * Read 2 existing keys directly from the bin delta, using 2 cursors: + * one doing an exact search and the other a range search. Make sure + * no mutation back to full bin occurs. + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + cursor2 = db.openCursor(txn, null); + + searchKey(cursor1, 12, true/*exact*/, true/*exist*/); + searchKey(cursor2, 20, false/*range*/, true/*exists*/); + + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + confirmCurrentKey(cursor1, 12); + confirmCurrentKey(cursor2, 20); + + /* + * Delete the record where cursor1 is positioned on and make sure no + * mutation back to full bin occurs. + */ + assertEquals(OperationStatus.SUCCESS, cursor1.delete()); + + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + + /* + * Update a record that in is already in the bin1 delta via a put op + * and make sure no mutation back to full bin occurs. + */ + putRecord(cursor1, 14, 40); + assertTrue(bin1.isBINDelta(false)); + + /* + * Cause mutation to full bin by searching for an existing key that + * is not in the delta. Make sure that the 2 cursors are adjusted + * correctly. + */ + searchKey(cursor1, 2, true, true/*exists*/); + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + confirmCurrentKey(cursor1, 2); + confirmCurrentKey(cursor2, 20); + + /* Mutation to full BIN should not clear dirty flag. */ + assertTrue(bin1.getDirty()); + + cursor1.close(); + cursor2.close(); + txn.commit(); + + /* + * Create a KD slot in bin1 by inserting a new record between two + * existing records, and then aborting the txn. + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + + putRecord(cursor1, 11, 11); + + CursorImpl cursor1Impl = DbInternal.getCursorImpl(cursor1); + assertTrue(bin1 == cursor1Impl.getBIN()); + int slotPos = cursor1Impl.getIndex(); + + if (debug) { + System.out.println("Cursor1 is on bin1 and slot " + slotPos); + } + + cursor1.close(); + txn.abort(); + + assertTrue(bin1.isEntryKnownDeleted(slotPos)); + assertTrue(bin1.getNEntries() == 51); + + /* Turn bin1 into a delta */ + mutateToDelta(bin1); + assertTrue(bin1.getInListResident()); + if (debug) { + System.out.println("BIN-1D has " + bin1.getNEntries() + " entries"); + } + assertTrue(bin1.getNEntries() == 12); + + /* + * Do a range search for key 11. This operation will first position + * (and register) a CursorImpl on the KD slot, and then getNext() will + * be called on that cursor. getNext() will mutate the delta to a full + * BIN, and the position of the cursor must be adjusted correctly. + */ + cursor1 = db.openCursor(null, null); + + searchKey(cursor1, 11, false, false/*exists*/); + + assertTrue(!bin1.isBINDelta(false)); + cursor1.close(); + + /* Turn bin1 into a delta */ + mutateToDelta(bin1); + + /* Mutate bin1 to full bin by doing a put of a non-existing key */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + putRecord(cursor1, 13, 13); + assertTrue(!bin1.isBINDelta(false)); + cursor1.close(); + txn.commit(); + + /* + * Mutate bin2 to a delta + */ + mutateToDelta(bin2); + assertTrue(bin2.getInListResident()); + + /* + * Do a db.count() with a cached BIN delta + */ + assertTrue(db.count() == N_RECORDS); + + assertTrue(bin2.isBINDelta(false)); + assertTrue(bin2.getInListResident()); + + /* + * 1. Update more records in bin1 so that next logrec will be a full bin. + * 2. Do a checkpoint to write the full bin1 to the log. Note that bin1 + * has 2 deleted slots that will be compressed away by logging it. + * 3. Update 1 record in bin1. + * 4. Mutate bin1 to a delta with one slot only. + */ + writeData(15, 10); + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 52); + env.checkpoint(new CheckpointConfig().setForce(true)); + assertTrue(bin1.getNDeltas() == 0); + assertTrue(bin1.getNEntries() == 50); + writeData(5, 1); + mutateToDelta(bin1); + assertTrue(bin1.getNEntries() == 1); + assertTrue(bin1.getMaxEntries() == 12); + + /* Do 2 blind puts and 1 blind putNoOverwrite in the bin1 delta */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + putRecord(cursor1, 15, 15); + putRecord(cursor1, 17, 17); + insertRecord(cursor1, 21, 21); + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 4); + cursor1.close(); + txn.commit(); + + /* Search for a non-existent key in bin1 delta */ + cursor1 = db.openCursor(null, null); + searchKey(cursor1, 111, true, false/*exists*/); + assertTrue(bin1.isBINDelta(false)); + cursor1.close(); + + /* Mutate bin1 to a full bin by doing an updating put */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + putRecord(cursor1, 16, 16); + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 53); + cursor1.close(); + txn.commit(); + + close(); + } + + @Test + public void testDups() { + + Transaction txn; + Cursor cursor1; + Cursor cursor2; + CursorImpl cursor1Impl; + CursorImpl cursor2Impl; + + open(true); + + writeData(); + checkData(null); + + BIN bin1 = getFirstBIN(); + BIN bin2 = getLastBIN(); + + /* Flush BINs (checkpoint) */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Update only enough records in the 1st BIN to make it a delta + * when it gets selected for eviction. Specifically, update records + * in slots 5 to 15 (inclusive). + */ + writeData(5, N_RECORDS / 10); + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(!bin2.isBINDelta(false)); + + /* + * Mutate bin1 to a delta + */ + mutateToDelta(bin1); + + /* + * Read 2 existing records directly from the bin delta, using 2 cursors: + * one doing an exact search and the other a range search. Make sure no + * mutation back to full bin occurs. + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + cursor2 = db.openCursor(txn, null); + + searchRecord(cursor1, 12, 12, true/*exact*/, true/*exists*/); + searchRecord(cursor2, 20, 20, false/*exact*/, true/*exists*/); + + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + + /* + * Delete the record where cursor1 is positioned on and make sure no + * mutation back to full bin occurs. + */ + assertEquals(OperationStatus.SUCCESS, cursor1.delete()); + + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + + /* + * Cause mutation to full bin by searching for a key only. + */ + searchKey(cursor1, 14, true/*exact*/, true/*exists*/); + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + confirmCurrentKey(cursor1, 14); + confirmCurrentKey(cursor2, 20); + + /* + * Put a duplicate record in bin1. Note: we are inserting record + * (20, 10) which compares greater than the existing (20, 20) + * record. + */ + putRecord(cursor2, 20, 10); + + cursor1.close(); + cursor2.close(); + txn.commit(); + + /* + * Mutate bin to delta again + */ + mutateToDelta(bin1); + + /* + * Cause mutation to full bin via a getNextNoDup() call (which + * passes a non-null comparator to CursorImpl.searchRange() + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + cursor2 = db.openCursor(txn, null); + + searchRecord(cursor1, 20, 10, false/*exact*/, true/*exists*/); + searchRecord(cursor2, 20, 20, false/*exact*/, true/*exists*/); + + if (debug) { + cursor1Impl = DbInternal.getCursorImpl(cursor1); + int slot1Pos = cursor1Impl.getIndex(); + System.out.println("Cursor1 is on bin1 and slot " + slot1Pos); + + cursor2Impl = DbInternal.getCursorImpl(cursor2); + int slot2Pos = cursor2Impl.getIndex(); + System.out.println("Cursor2 is on bin1 and slot " + slot2Pos); + } + + assertTrue(bin1.isBINDelta(false)); + confirmCurrentKey(cursor1, 20); + + getNext(cursor1, true/*skipDups*/); + + assertTrue(!bin1.isBINDelta(false)); + confirmCurrentKey(cursor1, 22); + + cursor1.close(); + cursor2.close(); + txn.commit(); + + /* + * Mutate bin1 to delta again. Then do exact record searches for + * non-existing records and make sure no mutation to full bin. + */ + mutateToDelta(bin1); + cursor1 = db.openCursor(null, null); + searchRecord(cursor1, 21, 10, true/*exact*/, false/*exists*/); + searchRecord(cursor1, 20, 20000, true/*exact*/, false/*exists*/); + assertTrue(bin1.isBINDelta(false)); + + /* + * Mutate bin1 to full bin by searching for record that exists in the + * full bin, but not in the delta. + */ + searchRecord(cursor1, 32, 32, true/*exact*/, true/*exists*/); + assertTrue(!bin1.isBINDelta(false)); + cursor1.close(); + + /* + * Mutate bin1 to delta and then back to full bin again by doing an + * exact search for a key only. + */ + mutateToDelta(bin1); + cursor1 = db.openCursor(null, null); + searchKey(cursor1, 25, true/*exact*/, false/*exists*/); + assertTrue(!bin1.isBINDelta(false)); + cursor1.close(); + + /* + * 1. Update more records in bin1 so that next logrec will be a full bin. + * 2. Do a checkpoint to write the full bin1 to the log. Note that bin1 + * has 2 deleted slots that will be compressed away by logging it. + * 3. Update 1 record in bin1. + * 4. Mutate bin1 to a delta with one slot only. + */ + writeData(15, 10); + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 51); + env.checkpoint(new CheckpointConfig().setForce(true)); + assertTrue(bin1.getNDeltas() == 0); + assertTrue(bin1.getNEntries() == 50); + writeData(5, 1); + mutateToDelta(bin1); + assertTrue(bin1.getNEntries() == 1); + assertTrue(bin1.getMaxEntries() == 12); + + /* Do 3 blind puts in the bin1 delta */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + putRecord(cursor1, 15, 15); + putRecord(cursor1, 17, 17); + putRecord(cursor1, 21, 21); + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 4); + cursor1.close(); + txn.commit(); + + /* Mutate bin1 to a full bin by doing a putNoOverwrite */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + insertRecord(cursor1, 23, 23); + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getNEntries() == 54); + cursor1.close(); + txn.commit(); + + close(); + } + + /** + * Checks that getPrev works when moving backwards across BIN-deltas. + */ + @Test + public void testPrevBin() { + + final int nRecs = 1000; + + open(false); + writeData(0, nRecs); + env.sync(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + BIN prevBin = null; + + for (int i = 0; i < nRecs; i += 1) { + + assertEquals( + OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + + final CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + final BIN bin = cursorImpl.getBIN(); + + if (bin != prevBin) { + + assertEquals( + OperationStatus.SUCCESS, + cursor.putCurrent(data)); + + if (prevBin != null) { + mutateToDelta(prevBin); + } + + prevBin = bin; + } + } + + assertEquals( + OperationStatus.NOTFOUND, + cursor.getNext(key, data, null)); + + cursor.close(); + txn.commit(); + + cursor = db.openCursor(null, null); + + assertEquals( + OperationStatus.SUCCESS, + cursor.getLast(key, data, null)); + + for (int i = nRecs - 1; i >= 0; i -= 1) { + + assertEquals(2 * i, TestUtils.getTestVal(key.getData())); + + assertEquals( + (i > 0) ? OperationStatus.SUCCESS : OperationStatus.NOTFOUND, + cursor.getPrev(key, data, null)); + } + + cursor.close(); + + close(); + } + + @Test + public void testLargeBloomFilter() { + + int numKeys = 129; + int nbytes = BINDeltaBloomFilter.getByteSize(numKeys); + + byte[] bf = new byte[nbytes]; + + BINDeltaBloomFilter.HashContext hc = + new BINDeltaBloomFilter.HashContext(); + + for (int i = 0; i < numKeys; ++i) { + BINDeltaBloomFilter.add(bf, new byte[i], hc); + } + } + + private void writeData() { + writeData(0, N_RECORDS); + } + + private void writeData(int startRecord, int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = startRecord; i < nRecords + startRecord; i += 1) { + + key.setData(TestUtils.getTestArray(2*i)); + data.setData(TestUtils.getTestArray(2*i)); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + private void putRecord(Cursor cursor, int keyVal, int dataVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + } + + private void insertRecord(Cursor cursor, int keyVal, int dataVal) { + + OperationStatus status = OperationStatus.SUCCESS; + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(status, cursor.putNoOverwrite(key, data)); + } + + private void updateCurrentRecord(final Cursor cursor, int dataVal) { + + final DatabaseEntry data = new DatabaseEntry(); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.putCurrent(data)); + + final DatabaseEntry key = new DatabaseEntry(); + data.setData(null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(dataVal, TestUtils.getTestVal(data.getData())); + } + + private void searchKey( + final Cursor cursor, + final int keyVal, + boolean exactSearch, + boolean exists) { + + OperationStatus status = OperationStatus.SUCCESS; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(keyVal)); + data.setPartial(true); + + if (exactSearch) { + if (!exists) { + status = OperationStatus.NOTFOUND; + } + assertEquals(status, cursor.getSearchKey(key, data, null)); + } else { + assertEquals(status, cursor.getSearchKeyRange(key, data, null)); + } + } + + private void searchRecord( + Cursor cursor, + int keyVal, + int dataVal, + boolean exactSearch, + boolean exists) { + + OperationStatus status = OperationStatus.SUCCESS; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + + if (exactSearch) { + if (!exists) { + status = OperationStatus.NOTFOUND; + } + assertEquals(status, cursor.getSearchBoth(key, data, null)); + } else { + assertEquals(status, cursor.getSearchBothRange(key, data, null)); + } + } + + private void getNext(Cursor cursor, boolean skipDups) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + if (skipDups) { + assertEquals(OperationStatus.SUCCESS, + cursor.getNextNoDup(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + } + } + + private Cursor confirmCurrentKey(final Cursor cursor, int keyVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(keyVal, TestUtils.getTestVal(key.getData())); + + return cursor; + } + + private void checkData(final Transaction txn) { + checkData(txn, N_RECORDS); + } + + /** + * Reads all keys, but does not read data to avoid changing the + * nNotResident stat. + */ + private void checkData(final Transaction txn, final int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(txn, null); + + for (int i = 0; i < nRecords; i += 1) { + + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + + assertEquals(2*i, TestUtils.getTestVal(key.getData())); + } + + assertEquals(OperationStatus.NOTFOUND, + cursor.getNext(key, data, null)); + + cursor.close(); + } + + /** + * Reads first key and returns its BIN. Does not read data to avoid + * changing the nNotResident stat. + */ + private BIN getFirstBIN() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + assertNotNull(bin); + return bin; + } + + /** + * Reads last key and returns its BIN. Does not read data to avoid + * changing the nNotResident stat. + */ + private BIN getLastBIN() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getLast(key, data, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + assertNotNull(bin); + return bin; + } + + private void mutateToDelta(BIN bin) { + + bin.latchNoUpdateLRU(); + bin.mutateToBINDelta(); + bin.releaseLatch(); + + assertTrue(bin.isBINDelta(false)); + assertTrue(bin.getInListResident()); + } +} diff --git a/test/com/sleepycat/je/dbi/CodeCoverageTest.java b/test/com/sleepycat/je/dbi/CodeCoverageTest.java new file mode 100644 index 0000000..92dd14a --- /dev/null +++ b/test/com/sleepycat/je/dbi/CodeCoverageTest.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.StringDbt; + +/** + * Various unit tests for CursorImpl to enhance code coverage. + */ +public class CodeCoverageTest extends DbCursorTestBase { + + public CodeCoverageTest() { + super(); + } + + /** + * Test the internal CursorImpl.delete() deleted LN code.. + */ + @Test + public void testDeleteDeleted() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + + cursor.delete(); + cursor.delete(); + + /* + * While we've got a cursor in hand, call CursorImpl.dumpToString() + */ + DbInternal.getCursorImpl(cursor).dumpToString(true); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/dbi/CompressedOopsDetectorTest.java b/test/com/sleepycat/je/dbi/CompressedOopsDetectorTest.java new file mode 100644 index 0000000..0d58c1a --- /dev/null +++ b/test/com/sleepycat/je/dbi/CompressedOopsDetectorTest.java @@ -0,0 +1,42 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.sleepycat.je.utilint.JVMSystemUtils; + +import org.junit.Test; + +public class CompressedOopsDetectorTest { + + /** + * There is no easy way to test that the detector has determined correctly + * if compact OOPs are in use -- that requires out-of-band confirmation. + * Just test that the detector reports that it knows the answer. A test + * failure suggests that the CompressedOopsDetector class needs to be + * updated for the current platform. + */ + @Test + public void testDetector() { + if (JVMSystemUtils.ZING_JVM) { + assertNull("Zing result should be unknown", + CompressedOopsDetector.isEnabled()); + return; + } + assertNotNull("CompressedOopsDetector result is unknown", + CompressedOopsDetector.isEnabled()); + } +} diff --git a/test/com/sleepycat/je/dbi/DbConfigManagerTest.java b/test/com/sleepycat/je/dbi/DbConfigManagerTest.java new file mode 100644 index 0000000..2a8f1ea --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbConfigManagerTest.java @@ -0,0 +1,65 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.BooleanConfigParam; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.TestBase; + +public class DbConfigManagerTest extends TestBase { + + /** + * Test that parameter defaults work, that we can add and get + * parameters + */ + @Test + public void testBasicParams() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setCacheSize(2000); + DbConfigManager configManager = new DbConfigManager(envConfig); + + /** + * Longs: The config manager should return the value for an + * explicitly set param and the default for one not set. + * + */ + assertEquals(2000, + configManager.getLong(EnvironmentParams.MAX_MEMORY)); + assertEquals(EnvironmentParams.ENV_RECOVERY.getDefault(), + configManager.get(EnvironmentParams.ENV_RECOVERY)); + } + + /** + * Checks that leading and trailing whitespace is ignored when parsing a + * boolean. [#22212] + */ + @Test + public void testBooleanWhitespace() { + String val = " TruE "; // has leading and trailing space + String name = EnvironmentConfig.SHARED_CACHE; // any boolean will do + BooleanConfigParam param = + (BooleanConfigParam) EnvironmentParams.SUPPORTED_PARAMS.get(name); + param.validateValue(val); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(name, val); + DbConfigManager configManager = new DbConfigManager(envConfig); + assertEquals(true, configManager.getBoolean(param)); + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java b/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java new file mode 100644 index 0000000..c6fa342 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorDeleteTest.java @@ -0,0 +1,466 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Hashtable; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.StringDbt; + +/** + * Various unit tests for CursorImpl.delete(). + */ +public class DbCursorDeleteTest extends DbCursorTestBase { + + public DbCursorDeleteTest() { + super(); + } + + /** + * Put a small number of data items into the database in a specific order, + * delete all the ones beginning with 'f', and then make sure they were + * really deleted. + */ + @Test + public void testSimpleDelete() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + + int deletedEntries = 0; + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + try { + if (foundKey.charAt(0) == 'f') { + cursor.delete(); + deletedEntries++; + } + } catch (DatabaseException DBE) { + System.out.println("DBE " + DBE); + } + } + }; + dw.walkData(); + deletedEntries = dw.deletedEntries; + dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundKey.compareTo(prevKey) >= 0); + assertTrue(foundKey.charAt(0) != 'f'); + prevKey = foundKey; + } + }; + dw.walkData(); + assertTrue(dw.nEntries == simpleKeyStrings.length - deletedEntries); + } + + /** + * Put a small number of data items into the database in a specific order. + * For each one: delete, getCurrent (make sure failure), reinsert + * (success), delete (success). Once iterated through all of them, + * reinsert and make sure successful. + */ + @Test + public void testSimpleDeleteInsert() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + try { + cursor.delete(); + deletedEntries++; + assertEquals(OperationStatus.KEYEMPTY, + cursor.getCurrent + (new StringDbt(), new StringDbt(), + LockMode.DEFAULT)); + StringDbt newKey = new StringDbt(foundKey); + StringDbt newData = new StringDbt(foundData); + assertEquals(OperationStatus.SUCCESS, + cursor2.putNoOverwrite(newKey, newData)); + assertEquals(OperationStatus.SUCCESS, + cursor2.delete()); + } catch (DatabaseException DBE) { + System.out.println("DBE " + DBE); + } + } + }; + + dw.walkData(); + doSimpleCursorPuts(); + + dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + assertEquals(foundData, + (String) simpleDataMap.get(foundKey)); + simpleDataMap.remove(foundKey); + } + }; + dw.walkData(); + assertTrue(simpleDataMap.size() == 0); + } + + /** + * Put a small number of data items into the database in a specific order. + * For each one: delete, getCurrent (make sure failure), reinsert + * (success), delete (success). Once iterated through all of them, + * reinsert and make sure successful. + */ + @Test + public void testSimpleDeletePutCurrent() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + try { + cursor.delete(); + deletedEntries++; + assertEquals(OperationStatus.KEYEMPTY, + cursor.getCurrent + (new StringDbt(), new StringDbt(), + LockMode.DEFAULT)); + StringDbt newData = new StringDbt(foundData); + assertEquals(OperationStatus.KEYEMPTY, + cursor.putCurrent(newData)); + } catch (DatabaseException DBE) { + System.out.println("DBE " + DBE); + } + } + }; + + dw.walkData(); + doSimpleCursorPuts(); + + dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + assertEquals(foundData, + (String) simpleDataMap.get(foundKey)); + simpleDataMap.remove(foundKey); + } + }; + dw.walkData(); + assertTrue(simpleDataMap.size() == 0); + } + + /** + * Similar to above test, but there was some question about whether this + * tests new functionality or not. Insert k1/d1 and d1/k1. Iterate + * through the data and delete k1/d1. Reinsert k1/d1 and make sure it + * inserts ok. + */ + @Test + public void testSimpleInsertDeleteInsert() + throws DatabaseException { + + initEnv(true); + StringDbt key = new StringDbt("k1"); + StringDbt data1 = new StringDbt("d1"); + + assertEquals(OperationStatus.SUCCESS, + putAndVerifyCursor(cursor, key, data1, true)); + assertEquals(OperationStatus.SUCCESS, + putAndVerifyCursor(cursor, data1, key, true)); + + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (foundKey.equals("k1")) { + if (cursor.delete() == OperationStatus.SUCCESS) { + deletedEntries++; + } + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + assertEquals(OperationStatus.SUCCESS, + putAndVerifyCursor(cursor, key, data1, true)); + } + + /** + * Put a small number of data items into the database in a specific order, + * delete all of them and then make sure they were really deleted. + */ + @Test + public void testSimpleDeleteAll() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + + int deletedEntries = 0; + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + try { + cursor.delete(); + deletedEntries++; + assertEquals(OperationStatus.KEYEMPTY, + cursor.getCurrent + (new StringDbt(), new StringDbt(), + LockMode.DEFAULT)); + } catch (DatabaseException DBE) { + System.out.println("DBE " + DBE); + } + } + }; + dw.walkData(); + deletedEntries = dw.deletedEntries; + dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + fail("didn't delete everything"); + } + }; + dw.walkData(); + assertTrue(dw.nEntries == 0); + assertTrue(simpleKeyStrings.length == deletedEntries); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order deleting anything that has 'F' as the second character. + * Iterate through the tree again and make sure they are all correctly + * deleted. + * @throws Exception + */ + @Test + public void testLargeDelete() + throws Exception { + + tearDown(); + for (int i = 0; i < N_ITERS; i++) { + setUp(); + initEnv(false); + doLargeDelete(); + tearDown(); + } + } + + /** + * Helper routine for above. + */ + private void doLargeDeleteAll() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + int n_keys = 2000; + doLargePut(dataMap, /* N_KEYS */ n_keys); + + int deletedEntries = 0; + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + cursor.delete(); + deletedEntries++; + assertEquals(OperationStatus.KEYEMPTY, + cursor.getCurrent + (new StringDbt(), new StringDbt(), + LockMode.DEFAULT)); + } + }; + dw.walkData(); + deletedEntries = dw.deletedEntries; + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + fail("didn't delete everything"); + } + }; + dw.walkData(); + assertTrue(dw.nEntries == 0); + assertTrue(/* N_KEYS */ n_keys == deletedEntries); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order deleting all entries. Iterate through the tree again + * and make sure they are all correctly deleted. + * @throws Exception + */ + @Test + public void testLargeDeleteAll() + throws Exception { + + tearDown(); + for (int i = 0; i < N_ITERS; i++) { + setUp(); + initEnv(false); + doLargeDeleteAll(); + tearDown(); + } + } + + /** + * Helper routine for above. + */ + private void doLargeDelete() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + int deletedEntries = 0; + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + if (foundKey.charAt(1) == 'F') { + cursor.delete(); + deletedEntries++; + } + } + }; + dw.walkData(); + deletedEntries = dw.deletedEntries; + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundKey.compareTo(prevKey) >= 0); + assertTrue(foundKey.charAt(1) != 'F'); + prevKey = foundKey; + } + }; + dw.walkData(); + assertTrue(dw.nEntries == N_KEYS - deletedEntries); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order deleting the first entry. Iterate through the tree + * again and make sure only the first entry is deleted. + * @throws Exception + */ + @Test + public void testLargeDeleteFirst() + throws Exception { + + tearDown(); + for (int i = 0; i < N_ITERS; i++) { + setUp(); + initEnv(false); + doLargeDeleteFirst(); + tearDown(); + } + } + + /** + * Helper routine for above. + */ + private void doLargeDeleteFirst() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + if (deletedEntry == null) { + deletedEntry = foundKey; + cursor.delete(); + } + } + }; + dw.walkData(); + + String deletedEntry = dw.deletedEntry; + + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + assertFalse(deletedEntry.equals(foundKey)); + } + }; + dw.deletedEntry = deletedEntry; + dw.walkData(); + assertTrue(dw.nEntries == N_KEYS - 1); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order deleting the last entry. Iterate through the tree again + * and make sure only the last entry is deleted. + * @throws Exception + */ + @Test + public void testLargeDeleteLast() + throws Exception { + + tearDown(); + for (int i = 0; i < N_ITERS; i++) { + setUp(); + initEnv(false); + doLargeDeleteLast(); + tearDown(); + } + } + + /** + * Helper routine for above. + */ + private void doLargeDeleteLast() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new BackwardsDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + if (deletedEntry == null) { + deletedEntry = foundKey; + cursor.delete(); + } + } + }; + dw.walkData(); + + String deletedEntry = dw.deletedEntry; + + dw = new BackwardsDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + assertFalse(deletedEntry.equals(foundKey)); + } + }; + dw.deletedEntry = deletedEntry; + dw.walkData(); + assertTrue(dw.nEntries == N_KEYS - 1); + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorDupTest.java b/test/com/sleepycat/je/dbi/DbCursorDupTest.java new file mode 100644 index 0000000..b1f7887 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorDupTest.java @@ -0,0 +1,206 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.utilint.StringUtils; + +/** + * Various unit tests for CursorImpl.dup(). + */ +public class DbCursorDupTest extends DbCursorTestBase { + + public DbCursorDupTest() { + super(); + } + + @Test + public void testCursorDupAndCloseDb() + throws DatabaseException { + + initEnv(false); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig); + + myDb.put(null, new StringDbt("blah"), new StringDbt("blort")); + Cursor cursor = myDb.openCursor(null, null); + OperationStatus status = cursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + Cursor cursorDup = cursor.dup(true); + cursor.close(); + cursorDup.close(); + myDb.close(); + } + + @Test + public void testDupInitialized() + throws DatabaseException { + + /* Open db. */ + initEnv(false); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig); + + /* Open uninitialized cursor. */ + Cursor c1 = myDb.openCursor(null, null); + try { + c1.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); + fail(); + } catch (IllegalStateException expected) {} + + /* Dup uninitialized cursor with samePosition=false. */ + Cursor c2 = c1.dup(false); + try { + c2.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); + fail(); + } catch (IllegalStateException expected) {} + + /* Dup uninitialized cursor with samePosition=true. */ + Cursor c3 = c1.dup(true); + try { + c3.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); + fail(); + } catch (IllegalStateException expected) {} + + /* Ensure dup'ed cursors are usable. */ + assertEquals(OperationStatus.SUCCESS, + c1.put(new DatabaseEntry(new byte[0]), + new DatabaseEntry(new byte[0]))); + assertEquals(OperationStatus.SUCCESS, + c2.getFirst(new DatabaseEntry(), new DatabaseEntry(), + null)); + assertEquals(OperationStatus.NOTFOUND, + c2.getNext(new DatabaseEntry(), new DatabaseEntry(), + null)); + assertEquals(OperationStatus.SUCCESS, + c3.getFirst(new DatabaseEntry(), new DatabaseEntry(), + null)); + assertEquals(OperationStatus.NOTFOUND, + c3.getNext(new DatabaseEntry(), new DatabaseEntry(), + null)); + + /* Close db. */ + c3.close(); + c2.close(); + c1.close(); + myDb.close(); + } + + /** + * Create some duplicate data. + * + * Pass 1, walk over the data and with each iteration, dup() the + * cursor at the same position. Ensure that the dup points to the + * same key/data pair. Advance the dup'd cursor and ensure that + * the data is different (key may be the same since it's a + * duplicate set). Then dup() the cursor without maintaining + * position. Ensure that getCurrent() throws a Cursor Not Init'd + * exception. + * + * Pass 2, iterate through the data, and dup the cursor in the + * same position. Advance the original cursor and ensure that the + * dup()'d points to the original data and the original cursor + * points at new data. + */ + @Test + public void testCursorDupSamePosition() + throws DatabaseException { + + initEnv(true); + createRandomDuplicateData(null, false); + + DataWalker dw = new DataWalker(null) { + void perData(String foundKey, String foundData) + throws DatabaseException { + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + Cursor cursor2 = cursor.dup(true); + cursor2.getCurrent(keyDbt, dataDbt, LockMode.DEFAULT); + String c2Key = StringUtils.fromUTF8(keyDbt.getData()); + String c2Data = StringUtils.fromUTF8(dataDbt.getData()); + assertTrue(c2Key.equals(foundKey)); + assertTrue(c2Data.equals(foundData)); + if (cursor2.getNext(keyDbt, + dataDbt, + LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + /* Keys can be the same because we have duplicates. */ + /* + assertFalse(StringUtils.fromUTF8(keyDbt.getData()). + equals(foundKey)); + */ + assertFalse(StringUtils.fromUTF8(dataDbt.getData()). + equals(foundData)); + } + cursor2.close(); + try { + cursor2 = cursor.dup(false); + cursor2.getCurrent(keyDbt, dataDbt, LockMode.DEFAULT); + fail("didn't catch Cursor not initialized exception"); + } catch (IllegalStateException expected) { + } + cursor2.close(); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + dw = new DataWalker(null) { + void perData(String foundKey, String foundData) + throws DatabaseException { + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + DatabaseEntry key2Dbt = new DatabaseEntry(); + DatabaseEntry data2Dbt = new DatabaseEntry(); + Cursor cursor2 = cursor.dup(true); + + OperationStatus status = + cursor.getNext(keyDbt, dataDbt, LockMode.DEFAULT); + + cursor2.getCurrent(key2Dbt, data2Dbt, LockMode.DEFAULT); + String c2Key = StringUtils.fromUTF8(key2Dbt.getData()); + String c2Data = StringUtils.fromUTF8(data2Dbt.getData()); + assertTrue(c2Key.equals(foundKey)); + assertTrue(c2Data.equals(foundData)); + if (status == OperationStatus.SUCCESS) { + assertFalse(StringUtils.fromUTF8(dataDbt.getData()). + equals(foundData)); + assertFalse(StringUtils.fromUTF8(dataDbt.getData()). + equals(c2Data)); + } + cursor2.close(); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java new file mode 100644 index 0000000..2e0bba5 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateDeleteTest.java @@ -0,0 +1,1112 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Hashtable; +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.utilint.StringUtils; + +import org.junit.Test; + +/** + * Various unit tests for CursorImpl using duplicates. + */ +public class DbCursorDuplicateDeleteTest extends DbCursorTestBase { + + private AtomicInteger sequence; + + public DbCursorDuplicateDeleteTest() { + super(); + } + + /** + * Create some simple duplicate data. Delete it all. Try to create + * it again. + */ + @Test + public void testSimpleDeleteInsert() + throws Exception { + + try { + initEnv(true); + doSimpleDuplicatePuts(); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (prevKey.equals("")) { + prevKey = foundKey; + } + if (!prevKey.equals(foundKey)) { + deletedEntries = 0; + } + prevKey = foundKey; + if (cursor.delete() == OperationStatus.SUCCESS) { + deletedEntries++; + } + assertEquals(simpleKeyStrings.length - deletedEntries, + cursor.count()); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + doSimpleDuplicatePuts(); + + dw = new DataWalker(null); + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(simpleKeyStrings.length * simpleKeyStrings.length, + dw.nEntries); + closeEnv(); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + @Test + public void testCountAfterDelete() + throws Throwable { + initEnv(true); + DatabaseEntry key = + new DatabaseEntry(new byte[] {(byte) 'n', + (byte) 'o', (byte) 0 }); + DatabaseEntry val1 = + new DatabaseEntry(new byte[] {(byte) 'k', + (byte) '1', (byte) 0 }); + DatabaseEntry val2 = + new DatabaseEntry(new byte[] {(byte) 'k', + (byte) '2', (byte) 0 }); + OperationStatus status = + exampleDb.putNoDupData(null, key, val1); + if (status != OperationStatus.SUCCESS) + throw new Exception("status on put 1=" + status); + status = exampleDb.putNoDupData(null, key, val2); + if (status != OperationStatus.SUCCESS) + throw new Exception("status on put 2=" + status); + + Cursor c = exampleDb.openCursor(null, null); + try { + status = c.getSearchKey(key, new DatabaseEntry(), + LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) + throw new Exception("status on search=" + status); + assertEquals(2, c.count()); + status = c.delete(); + if (status != OperationStatus.SUCCESS) + throw new Exception("err on del 1=" + status); + status = c.getNext(key, new DatabaseEntry(), LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) + throw new Exception("err on next=" + status); + status = c.delete(); + if (status != OperationStatus.SUCCESS) + throw new Exception("err on del 2=" + status); + assertEquals(0, c.count()); + } finally { + c.close(); + } + + status = exampleDb.putNoDupData(null, key, val1); + if (status != OperationStatus.SUCCESS) + throw new Exception("err on put 3=" + status); + + c = exampleDb.openCursor(null, null); + try { + status = + c.getSearchKey(key, new DatabaseEntry(), LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) + throw new Exception("err on search=" + status); + assertEquals(1, c.count()); + } finally { + c.close(); + } + } + + @Test + public void testDuplicateDeletionAll() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + createRandomDuplicateData(10, 1000, dataMap, false, false); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + if (ht.get(foundData) != null) { + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } else { + fail("didn't find " + foundKey + "/" + foundData); + } + + /* Make sure keys are ascending/descending. */ + assertTrue(foundKey.compareTo(prevKey) >= 0); + + /* + * Make sure duplicate items within key are asc/desc. + */ + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + assertTrue(cursor.delete() == OperationStatus.SUCCESS); + assertEquals(ht.size(), cursor.count()); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dataMap.size() == 0); + + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + fail("data found after deletion: " + + foundKey + "/" + foundData); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateDeletionAssorted() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + Hashtable deletedDataMap = new Hashtable(); + createRandomDuplicateData(10, 1000, dataMap, false, false); + + /* Use the DataWalker.addedData field for a deleted Data Map. */ + DataWalker dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + /* Make sure keys are ascending/descending. */ + assertTrue(foundKey.compareTo(prevKey) >= 0); + + /* + * Make sure duplicate items within key are asc/desc. + */ + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + if (rnd.nextInt(10) < 8) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht == null) { + delht = new Hashtable(); + addedDataMap.put(foundKey, delht); + } + delht.put(foundData, foundData); + assertTrue(cursor.delete() == + OperationStatus.SUCCESS); + + if (ht.get(foundData) == null) { + fail("didn't find " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + assertEquals(ht.size(), cursor.count()); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht != null && + delht.get(foundData) != null) { + fail("found deleted entry for " + + foundKey + "/" + foundData); + } + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("couldn't find hashtable for " + foundKey); + } + if (ht.get(foundData) == null) { + fail("couldn't find entry for " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateDeletionAssortedSR15375() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + Hashtable deletedDataMap = new Hashtable(); + createRandomDuplicateData(10, 1000, dataMap, false, false); + + /* Use the DataWalker.addedData field for a deleted Data Map. */ + DataWalker dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + /* Make sure keys are ascending/descending. */ + assertTrue(foundKey.compareTo(prevKey) >= 0); + + /* + * Make sure duplicate items within key are asc/desc. + */ + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + if (rnd.nextInt(10) < 8) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht == null) { + delht = new Hashtable(); + addedDataMap.put(foundKey, delht); + } + delht.put(foundData, foundData); + assertTrue(cursor.delete() == + OperationStatus.SUCCESS); + + if (ht.get(foundData) == null) { + fail("didn't find " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + assertEquals(ht.size(), cursor.count()); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + + /* + * Add back in a duplicate for each one deleted. + */ + String newDupData = foundData + "x"; + StringDbt newDupDBT = + new StringDbt(newDupData); + assertTrue + (putAndVerifyCursor + (cursor, + new StringDbt(foundKey), + newDupDBT, true) == + OperationStatus.SUCCESS); + ht.put(newDupData, newDupData); + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht != null && + delht.get(foundData) != null) { + fail("found deleted entry for " + + foundKey + "/" + foundData); + } + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("couldn't find hashtable for " + foundKey); + } + if (ht.get(foundData) == null) { + fail("couldn't find entry for " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateDeleteFirst() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + Hashtable deletedDataMap = new Hashtable(); + createRandomDuplicateData(-10, 10, dataMap, false, false); + + /* Use the DataWalker.addedData field for a deleted Data Map. */ + DataWalker dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + /* Make sure keys are ascending/descending. */ + assertTrue(foundKey.compareTo(prevKey) >= 0); + + /* + * Make sure duplicate items within key are asc/desc. + */ + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + if (cursor.count() > 1) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht == null) { + delht = new Hashtable(); + addedDataMap.put(foundKey, delht); + } + delht.put(foundData, foundData); + assertTrue(cursor.delete() == + OperationStatus.SUCCESS); + + if (ht.get(foundData) == null) { + fail("didn't find " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + assertEquals(ht.size(), cursor.count()); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } + } + + prevKey = foundKey; + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + dw = new DataWalker(dataMap, deletedDataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable delht = + (Hashtable) addedDataMap.get(foundKey); + if (delht != null && + delht.get(foundData) != null) { + fail("found deleted entry for " + + foundKey + "/" + foundData); + } + + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("couldn't find hashtable for " + foundKey); + } + if (ht.get(foundData) == null) { + fail("couldn't find entry for " + + foundKey + "/" + foundData); + } + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Similar to above test, but there was some question about whether + * this tests new functionality or not. Insert k1/d1 and d1/k1. + * Iterate through the data and delete k1/d1. Reinsert k1/d1 and + * make sure it inserts ok. + */ + @Test + public void testSimpleSingleElementDupTree() + throws DatabaseException { + + initEnv(true); + StringDbt key = new StringDbt("k1"); + StringDbt data1 = new StringDbt("d1"); + StringDbt data2 = new StringDbt("d2"); + + assertEquals(OperationStatus.SUCCESS, + putAndVerifyCursor(cursor, key, data1, true)); + assertEquals(OperationStatus.SUCCESS, + putAndVerifyCursor(cursor, key, data2, true)); + + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (foundKey.equals("k1") && deletedEntries == 0) { + if (cursor.delete() == OperationStatus.SUCCESS) { + deletedEntries++; + } + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) { + deletedEntries++; + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + + assertEquals(1, dw.deletedEntries); + } + + @Test + public void testEmptyNodes() + throws Throwable { + + initEnv(true); + synchronized (DbInternal.getNonNullEnvImpl(exampleEnv). + getINCompressor()) { + writeEmptyNodeData(); + + exampleEnv.compress(); + + Cursor cursor = exampleDb.openCursor(null, null); + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + cursor.close(); + assertEquals(OperationStatus.SUCCESS, status); + } + } + + @Test + public void testDeletedReplaySR8984() + throws DatabaseException { + + initEnvTransactional(true); + Transaction txn = exampleEnv.beginTransaction(null, null); + Cursor c = exampleDb.openCursor(txn, null); + c.put(simpleKeys[0], simpleData[0]); + c.delete(); + for (int i = 1; i < 3; i++) { + c.put(simpleKeys[0], simpleData[i]); + } + c.close(); + txn.abort(); + txn = exampleEnv.beginTransaction(null, null); + c = exampleDb.openCursor(txn, null); + assertEquals(OperationStatus.NOTFOUND, + c.getFirst(new DatabaseEntry(), + new DatabaseEntry(), + LockMode.DEFAULT)); + c.close(); + txn.commit(); + } + + @Test + public void testDuplicateDeadlockSR9885() + throws DatabaseException { + + initEnvTransactional(true); + Transaction txn = exampleEnv.beginTransaction(null, null); + Cursor c = exampleDb.openCursor(txn, null); + for (int i = 0; i < simpleKeyStrings.length; i++) { + c.put(simpleKeys[0], simpleData[i]); + } + c.close(); + txn.commit(); + sequence = new AtomicInteger(0); + + JUnitThread tester1 = + new JUnitThread("testDuplicateDeadlock1") { + @Override + public void testBody() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn1 = exampleEnv.beginTransaction(null, null); + Cursor cursor1 = exampleDb.openCursor(txn1, null); + try { + cursor1.getFirst(key, data, LockMode.DEFAULT); + sequence.incrementAndGet(); + while (sequence.get() < 2) { + Thread.yield(); + } + cursor1.delete(); + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + } catch (LockConflictException e) { + } finally { + cursor1.close(); + txn1.abort(); + sequence.set(4); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testDuplicateDeadlock2") { + @Override + public void testBody() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn2 = exampleEnv.beginTransaction(null, null); + Cursor cursor2 = exampleDb.openCursor(txn2, null); + try { + while (sequence.get() < 1) { + Thread.yield(); + } + cursor2.getLast(key, data, LockMode.DEFAULT); + sequence.incrementAndGet(); + //cursor2.put(key, + //new DatabaseEntry(StringUtils.toUTF8("d1d1d1"))); + cursor2.delete(); + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + } catch (LockConflictException e) { + } finally { + cursor2.close(); + txn2.abort(); + sequence.set(4); + } + } + }; + + try { + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + DatabaseImpl dbImpl = DbInternal.getDbImpl(exampleDb); + dbImpl.verify(new VerifyConfig()); + } catch (Throwable T) { + fail("testDuplicateDeadlock caught: " + T); + } + } + + @Test + public void testSR9992() + throws DatabaseException { + + initEnvTransactional(true); + Transaction txn = exampleEnv.beginTransaction(null, null); + Cursor c = exampleDb.openCursor(txn, null); + for (int i = 1; i < simpleKeys.length; i++) { + c.put(simpleKeys[0], simpleData[i]); + } + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + c.getCurrent(key, data, LockMode.DEFAULT); + c.delete(); + assertEquals(OperationStatus.KEYEMPTY, + c.putCurrent + (new DatabaseEntry(StringUtils.toUTF8("aaaa")))); + c.close(); + txn.commit(); + } + + @Test + public void testSR9900() + throws DatabaseException { + + initEnvTransactional(true); + Transaction txn = exampleEnv.beginTransaction(null, null); + Cursor c = exampleDb.openCursor(txn, null); + c.put(simpleKeys[0], simpleData[0]); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + c.getCurrent(key, data, LockMode.DEFAULT); + c.delete(); + assertEquals(OperationStatus.KEYEMPTY, + c.putCurrent + (new DatabaseEntry(StringUtils.toUTF8("aaaa")))); + c.close(); + txn.commit(); + } + + private void put(int data, int key) + throws DatabaseException { + + byte[] keyBytes = new byte[1]; + keyBytes[0] = (byte) (key & 0xff); + DatabaseEntry keyDbt = new DatabaseEntry(keyBytes); + + byte[] dataBytes = new byte[1]; + if (data == -1) { + dataBytes = new byte[0]; + } else { + dataBytes[0] = (byte) (data & 0xff); + } + DatabaseEntry dataDbt = new DatabaseEntry(dataBytes); + + OperationStatus status = exampleDb.put(null, keyDbt, dataDbt); + if (status != OperationStatus.SUCCESS) { + System.out.println("db.put returned " + status + + " for key " + key + "/" + data); + } + } + + private void del(int key) + throws DatabaseException { + + byte[] keyBytes = new byte[1]; + keyBytes[0] = (byte) (key & 0xff); + DatabaseEntry keyDbt = new DatabaseEntry(keyBytes); + + OperationStatus status = exampleDb.delete(null, keyDbt); + if (status != OperationStatus.SUCCESS) { + System.out.println("db.del returned " + status + + " for key " + key); + } + } + + private void delBoth(int key, int data) + throws DatabaseException { + + byte[] keyBytes = new byte[1]; + keyBytes[0] = (byte) (key & 0xff); + DatabaseEntry keyDbt = new DatabaseEntry(keyBytes); + + byte[] dataBytes = new byte[1]; + dataBytes[0] = (byte) (data & 0xff); + DatabaseEntry dataDbt = new DatabaseEntry(dataBytes); + + Cursor cursor = exampleDb.openCursor(null, null); + OperationStatus status = + cursor.getSearchBoth(keyDbt, dataDbt, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + System.out.println("getSearchBoth returned " + status + + " for key " + key + "/" + data); + } + + status = cursor.delete(); + if (status != OperationStatus.SUCCESS) { + System.out.println("Dbc.delete returned " + status + + " for key " + key + "/" + data); + } + cursor.close(); + } + + private void writeEmptyNodeData() + throws DatabaseException { + + put(101, 1); + put(102, 2); + put(103, 3); + put(104, 4); + put(105, 5); + put(106, 6); + del(1); + del(3); + del(5); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(3); + del(5); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(3); + del(5); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(-1, 2); + put(-1, 4); + put(-1, 6); + put(-1, 1); + put(-1, 3); + put(-1, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + del(1); + del(2); + del(3); + del(4); + del(5); + del(6); + put(102, 2); + put(104, 4); + put(106, 6); + put(101, 1); + put(103, 3); + put(105, 5); + put(102, 1); + put(103, 1); + put(104, 1); + put(105, 1); + delBoth(1, 101); + delBoth(1, 102); + delBoth(1, 103); + delBoth(1, 104); + delBoth(1, 105); + put(101, 1); + put(102, 1); + put(103, 1); + put(104, 1); + put(105, 1); + delBoth(1, 101); + delBoth(1, 102); + delBoth(1, 103); + delBoth(1, 104); + delBoth(1, 105); + put(101, 1); + put(102, 1); + put(103, 1); + put(104, 1); + put(105, 1); + delBoth(1, 101); + delBoth(1, 102); + delBoth(1, 103); + delBoth(1, 104); + delBoth(1, 105); + put(101, 1); + put(102, 1); + put(103, 1); + put(104, 1); + put(105, 1); + delBoth(1, 102); + delBoth(1, 103); + delBoth(1, 104); + delBoth(1, 105); + put(103, 2); + put(104, 2); + put(105, 2); + put(106, 2); + delBoth(2, 102); + delBoth(2, 103); + delBoth(2, 104); + delBoth(2, 105); + delBoth(2, 106); + put(102, 2); + put(103, 2); + put(104, 2); + put(105, 2); + put(106, 2); + delBoth(2, 102); + delBoth(2, 103); + delBoth(2, 104); + delBoth(2, 105); + delBoth(2, 106); + put(102, 2); + put(103, 2); + put(104, 2); + put(105, 2); + put(106, 2); + delBoth(2, 102); + delBoth(2, 103); + delBoth(2, 104); + delBoth(2, 105); + delBoth(2, 106); + put(102, 2); + put(103, 2); + put(104, 2); + put(105, 2); + put(106, 2); + delBoth(2, 102); + delBoth(2, 103); + delBoth(2, 104); + delBoth(2, 105); + delBoth(2, 106); + put(107, 6); + put(108, 6); + put(109, 6); + put(110, 6); + delBoth(6, 106); + delBoth(6, 107); + delBoth(6, 108); + delBoth(6, 109); + delBoth(6, 110); + put(106, 6); + put(107, 6); + put(108, 6); + put(109, 6); + put(110, 6); + delBoth(6, 106); + delBoth(6, 107); + delBoth(6, 108); + delBoth(6, 109); + delBoth(6, 110); + put(106, 6); + put(107, 6); + put(108, 6); + put(109, 6); + put(110, 6); + delBoth(6, 106); + delBoth(6, 107); + delBoth(6, 108); + delBoth(6, 109); + delBoth(6, 110); + put(106, 6); + put(107, 6); + put(108, 6); + put(109, 6); + put(110, 6); + delBoth(6, 107); + delBoth(6, 108); + delBoth(6, 109); + delBoth(6, 110); + put(106, 5); + put(107, 5); + put(108, 5); + put(109, 5); + delBoth(5, 105); + delBoth(5, 106); + delBoth(5, 107); + delBoth(5, 108); + delBoth(5, 109); + put(105, 5); + put(106, 5); + put(107, 5); + put(108, 5); + put(109, 5); + delBoth(5, 105); + delBoth(5, 106); + delBoth(5, 107); + delBoth(5, 108); + delBoth(5, 109); + put(105, 5); + put(106, 5); + put(107, 5); + put(108, 5); + put(109, 5); + delBoth(5, 105); + delBoth(5, 106); + delBoth(5, 107); + delBoth(5, 108); + delBoth(5, 109); + put(105, 5); + put(106, 5); + put(107, 5); + put(108, 5); + put(109, 5); + delBoth(5, 106); + delBoth(5, 107); + delBoth(5, 108); + delBoth(5, 109); + delBoth(1, 101); + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java new file mode 100644 index 0000000..77d77a0 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateTest.java @@ -0,0 +1,1049 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.Serializable; +import java.util.Comparator; +import java.util.Hashtable; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PartialComparator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.utilint.StringUtils; + +import org.junit.Test; + +/** + * Various unit tests for CursorImpl using duplicates. + */ +public class DbCursorDuplicateTest extends DbCursorTestBase { + + public DbCursorDuplicateTest() { + super(); + } + + /** + * Rudimentary insert/retrieve test. Walk over the results forwards. + */ + @Test + public void testDuplicateCreationForward() + throws Throwable { + + initEnv(true); + try { + doDuplicateTest(true, false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Same as testDuplicateCreationForward except uses keylast. + */ + @Test + public void testDuplicateCreationForwardKeyLast() + throws Throwable { + + initEnv(true); + try { + doDuplicateTest(true, true); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Rudimentary insert/retrieve test. Walk over the results backwards. + */ + @Test + public void testDuplicateCreationBackwards() + throws Throwable { + + initEnv(true); + try { + doDuplicateTest(false, false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Insert N_KEYS data items into a tree. Set a btreeComparison function. + * Iterate through the tree in ascending order. Ensure that the elements + * are returned in ascending order. + */ + @Test + public void testLargeGetForwardTraverseWithNormalComparisonFunction() + throws Throwable { + + try { + tearDown(); + duplicateComparisonFunction = duplicateComparator; + setUp(); + initEnv(true); + doDuplicateTest(true, false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Insert N_KEYS data items into a tree. Set a reverse order + * btreeComparison function. Iterate through the tree in ascending order. + * Ensure that the elements are returned in ascending order. + */ + @Test + public void testLargeGetForwardTraverseWithReverseComparisonFunction() + throws Throwable { + + try { + tearDown(); + duplicateComparisonFunction = reverseDuplicateComparator; + setUp(); + initEnv(true); + doDuplicateTest(false, false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Put a bunch of data items into the database in a specific order and + * ensure that when read back that we can't putNoDupData without receiving + * an error return code. + */ + @Test + public void testPutNoDupData() + throws Throwable { + + try { + initEnv(true); + createRandomDuplicateData(null, false); + + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertEquals + (OperationStatus.KEYEXIST, + cursor.putNoDupData(new StringDbt(foundKey), + new StringDbt(foundData))); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testPutNoDupData2() + throws Throwable { + + try { + initEnv(true); + for (int i = 0; i < simpleKeyStrings.length; i++) { + OperationStatus status = + cursor.putNoDupData(new StringDbt("oneKey"), + new StringDbt(simpleDataStrings[i])); + assertEquals(OperationStatus.SUCCESS, status); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testAbortDuplicateTreeCreation() + throws Throwable { + + try { + initEnvTransactional(true); + Transaction txn = exampleEnv.beginTransaction(null, null); + Cursor c = exampleDb.openCursor(txn, null); + OperationStatus status = + c.put(new StringDbt("oneKey"), + new StringDbt("firstData")); + assertEquals(OperationStatus.SUCCESS, status); + c.close(); + txn.commit(); + txn = exampleEnv.beginTransaction(null, null); + c = exampleDb.openCursor(txn, null); + status = + c.put(new StringDbt("oneKey"), + new StringDbt("secondData")); + assertEquals(OperationStatus.SUCCESS, status); + c.close(); + txn.abort(); + txn = exampleEnv.beginTransaction(null, null); + c = exampleDb.openCursor(txn, null); + DatabaseEntry keyRet = new DatabaseEntry(); + DatabaseEntry dataRet = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + c.getFirst(keyRet, dataRet, LockMode.DEFAULT)); + assertEquals(1, c.count()); + assertEquals(OperationStatus.NOTFOUND, + c.getNext(keyRet, dataRet, LockMode.DEFAULT)); + c.close(); + txn.commit(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Create the usual random duplicate data. Iterate back over it calling + * count at each element. Make sure the number of duplicates returned for + * a particular key is N_DUPLICATE_PER_KEY. Note that this is somewhat + * inefficient, but cautious, in that it calls count for every duplicate + * returned, rather than just once for each unique key returned. + */ + @Test + public void testDuplicateCount() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + createRandomDuplicateData(N_COUNT_TOP_KEYS, + N_COUNT_DUPLICATES_PER_KEY, + dataMap, false, true); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertEquals(N_COUNT_DUPLICATES_PER_KEY, + cursor.count()); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(N_COUNT_DUPLICATES_PER_KEY, dw.nEntries); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateDuplicates() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + assertTrue(cursor.put(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + dataString = "d2d2"; + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + assertTrue(cursor.put(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dw.nEntries == 2); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateDuplicatesWithComparators() //cwl + throws Throwable { + + try { + tearDown(); + duplicateComparisonFunction = invocationCountingComparator; + btreeComparisonFunction = invocationCountingComparator; + invocationCountingComparator.setInvocationCount(0); + setUp(); + runBtreeVerifier = false; + initEnv(true); + + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.put(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.put(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + + InvocationCountingBtreeComparator bTreeICC = + (InvocationCountingBtreeComparator) + (exampleDb.getConfig().getBtreeComparator()); + + InvocationCountingBtreeComparator dupICC = + (InvocationCountingBtreeComparator) + (exampleDb.getConfig().getDuplicateComparator()); + + /* + * Key and data are combined internally, so both comparators are + * called twice. + */ + assertEquals(2, bTreeICC.getInvocationCount()); + assertEquals(2, dupICC.getInvocationCount()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateReplacement() + throws Throwable { + + try { + initEnv(true); + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + dataString = "d2d2"; + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + StringDbt dataDbt = new StringDbt(); + dataDbt.setString(foundData); + assertEquals(OperationStatus.SUCCESS, + cursor.putCurrent(dataDbt)); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dw.nEntries == 2); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateReplacementFailure() + throws Throwable { + + try { + initEnv(true); + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + dataString = "d2d2"; + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) { + StringDbt dataDbt = new StringDbt(); + dataDbt.setString("blort"); + try { + cursor.putCurrent(dataDbt); + fail("didn't catch DatabaseException"); + } catch (DatabaseException DBE) { + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dw.nEntries == 2); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicateReplacementFailure1Dup() + throws Throwable { + + try { + initEnv(true); + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) { + StringDbt dataDbt = new StringDbt(); + dataDbt.setString("blort"); + try { + cursor.putCurrent(dataDbt); + fail("didn't catch DatabaseException"); + } catch (DatabaseException DBE) { + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dw.nEntries == 1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * When using a duplicate comparator that does not compare all bytes, + * attempting to change the data for a duplicate data item should work when + * a byte not compared is changed. [#15527] [#15704] + */ + @Test + public void testDuplicateReplacementFailureWithComparisonFunction1() + throws Throwable { + + try { + tearDown(); + duplicateComparisonFunction = truncatedComparator; + setUp(); + initEnv(true); + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + dataString = "d2d2"; + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + StringDbt dataDbt = new StringDbt(); + StringBuilder sb = new StringBuilder(foundData); + sb.replace(3, 4, "3"); + dataDbt.setString(sb.toString()); + try { + cursor.putCurrent(dataDbt); + } catch (DatabaseException e) { + fail(e.toString()); + } + StringDbt keyDbt = new StringDbt(); + assertSame(OperationStatus.SUCCESS, + cursor.getCurrent(keyDbt, dataDbt, null)); + assertEquals(foundKey, keyDbt.getString()); + assertEquals(sb.toString(), dataDbt.getString()); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * When using a duplicate comparator that compares all bytes, attempting to + * change the data for a duplicate data item should cause an error. + * [#15527] + */ + @Test + public void testDuplicateReplacementFailureWithComparisonFunction2() + throws Throwable { + + try { + tearDown(); + duplicateComparisonFunction = truncatedComparator; + setUp(); + initEnv(true); + + String keyString = "aaaa"; + String dataString = "d1d1"; + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + keyDbt.setData(StringUtils.toUTF8(keyString)); + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + dataString = "d2d2"; + dataDbt.setData(StringUtils.toUTF8(dataString)); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) == + OperationStatus.SUCCESS); + assertTrue(cursor.putNoDupData(keyDbt, dataDbt) != + OperationStatus.SUCCESS); + DataWalker dw = new DataWalker(null) { + @Override + void perData(String foundKey, String foundData) { + StringDbt dataDbt = new StringDbt(); + StringBuilder sb = new StringBuilder(foundData); + sb.replace(2, 2, "3"); + sb.setLength(4); + dataDbt.setString(sb.toString()); + try { + cursor.putCurrent(dataDbt); + fail("didn't catch DatabaseException"); + } catch (DatabaseException DBE) { + } + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dw.nEntries == 2); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + private void doDuplicateTest(boolean forward, boolean useKeyLast) + throws Throwable { + + Hashtable dataMap = new Hashtable(); + createRandomDuplicateData(dataMap, useKeyLast); + + DataWalker dw; + if (forward) { + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + foundKey + "/" + + foundData); + } + + if (ht.get(foundData) != null) { + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } else { + fail("didn't find " + foundKey + "/" + foundData); + } + + assertTrue(foundKey.compareTo(prevKey) >= 0); + + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + } + }; + } else { + dw = new BackwardsDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + foundKey + "/" + + foundData); + } + + if (ht.get(foundData) != null) { + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } else { + fail("didn't find " + foundKey + "/" + foundData); + } + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + + if (prevKey.equals(foundKey)) { + if (!prevData.equals("")) { + if (duplicateComparisonFunction == null) { + assertTrue + (foundData.compareTo(prevData) <= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) <= 0); + } + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + } + }; + } + dw.setIgnoreDataMap(true); + dw.walkData(); + assertTrue(dataMap.size() == 0); + } + + /** + * Create a bunch of random duplicate data. Iterate over it using + * getNextDup until the end of the dup set. At end of set, handleEndOfSet + * is called to do a getNext onto the next dup set. Verify that ascending + * order is maintained and that we reach end of set the proper number of + * times. + */ + @Test + public void testGetNextDup() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + createRandomDuplicateData(dataMap, false); + + DataWalker dw = new DupDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + if (ht.get(foundData) != null) { + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } else { + fail("didn't find " + foundKey + "/" + foundData); + } + + assertTrue(foundKey.compareTo(prevKey) >= 0); + + if (prevKey.equals(foundKey)) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo(prevData) >= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) >= 0); + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + } + + @Override + OperationStatus handleEndOfSet(OperationStatus status) + throws DatabaseException { + + String foundKeyString = foundKey.getString(); + Hashtable ht = (Hashtable) dataMap.get(foundKeyString); + assertNull(ht); + return cursor.getNext(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(N_TOP_LEVEL_KEYS, dw.nHandleEndOfSet); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Create a bunch of random duplicate data. Iterate over it using + * getNextDup until the end of the dup set. At end of set, handleEndOfSet + * is called to do a getNext onto the next dup set. Verify that descending + * order is maintained and that we reach end of set the proper number of + * times. + */ + @Test + public void testGetPrevDup() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + createRandomDuplicateData(dataMap, false); + + DataWalker dw = new BackwardsDupDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + if (ht.get(foundData) != null) { + ht.remove(foundData); + if (ht.size() == 0) { + dataMap.remove(foundKey); + } + } else { + fail("didn't find " + foundKey + "/" + foundData); + } + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + + if (prevKey.equals(foundKey)) { + if (!prevData.equals("")) { + if (duplicateComparisonFunction == null) { + assertTrue(foundData.compareTo + (prevData) <= 0); + } else { + assertTrue + (duplicateComparisonFunction.compare + (StringUtils.toUTF8(foundData), + StringUtils.toUTF8(prevData)) <= 0); + } + } + prevData = foundData; + } else { + prevData = ""; + } + + prevKey = foundKey; + } + + @Override + OperationStatus handleEndOfSet(OperationStatus status) + throws DatabaseException { + + String foundKeyString = foundKey.getString(); + Hashtable ht = (Hashtable) dataMap.get(foundKeyString); + assertNull(ht); + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(N_TOP_LEVEL_KEYS, dw.nHandleEndOfSet); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Create a bunch of random duplicate data. Iterate over it using + * getNextNoDup until the end of the top level set. Verify that + * ascending order is maintained and that we reach see the proper + * number of top-level keys. + */ + @Test + public void testGetNextNoDup() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + createRandomDuplicateData(dataMap, false); + + DataWalker dw = new NoDupDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + if (ht.get(foundData) != null) { + dataMap.remove(foundKey); + } else { + fail("saw " + + foundKey + "/" + foundData + " twice."); + } + + assertTrue(foundKey.compareTo(prevKey) > 0); + prevKey = foundKey; + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(N_TOP_LEVEL_KEYS, dw.nEntries); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Create a bunch of random duplicate data. Iterate over it using + * getNextNoDup until the end of the top level set. Verify that descending + * order is maintained and that we reach see the proper number of top-level + * keys. + */ + @Test + public void testGetPrevNoDup() + throws Throwable { + + try { + initEnv(true); + Hashtable dataMap = new Hashtable(); + + createRandomDuplicateData(dataMap, false); + + DataWalker dw = new NoDupBackwardsDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + Hashtable ht = (Hashtable) dataMap.get(foundKey); + if (ht == null) { + fail("didn't find ht " + + foundKey + "/" + foundData); + } + + if (ht.get(foundData) != null) { + dataMap.remove(foundKey); + } else { + fail("saw " + + foundKey + "/" + foundData + " twice."); + } + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) < 0); + } + prevKey = foundKey; + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + assertEquals(N_TOP_LEVEL_KEYS, dw.nEntries); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testIllegalDuplicateCreation() + throws Throwable { + + try { + initEnv(false); + Hashtable dataMap = new Hashtable(); + + try { + createRandomDuplicateData(dataMap, false); + fail("didn't throw DuplicateEntryException"); + } catch (DuplicateEntryException DEE) { + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Just use the BtreeComparator that's already available. + */ + private static Comparator duplicateComparator = + new DuplicateAscendingComparator(); + + private static Comparator reverseDuplicateComparator = + new DuplicateReverseComparator(); + + private static InvocationCountingBtreeComparator + invocationCountingComparator = + new InvocationCountingBtreeComparator(); + + @SuppressWarnings("serial") + public static class DuplicateAscendingComparator + extends BtreeComparator { + + public DuplicateAscendingComparator() { + super(); + } + } + + @SuppressWarnings("serial") + public static class DuplicateReverseComparator + extends ReverseBtreeComparator { + + public DuplicateReverseComparator() { + super(); + } + } + + @SuppressWarnings("serial") + public static class InvocationCountingBtreeComparator + extends BtreeComparator { + + private int invocationCount = 0; + + @Override + public int compare(Object o1, Object o2) { + invocationCount++; + return super.compare(o1, o2); + } + + public int getInvocationCount() { + return invocationCount; + } + + public void setInvocationCount(int invocationCount) { + this.invocationCount = invocationCount; + } + } + + /* + * A special comparator that only looks at the first length-1 bytes of data + * so that the last byte can be changed without affecting "equality". Use + * this for putCurrent tests of duplicates. + */ + private static Comparator truncatedComparator = new TruncatedComparator(); + + @SuppressWarnings("serial") + private static class TruncatedComparator implements Comparator, + PartialComparator, + Serializable { + protected TruncatedComparator() { + } + + public int compare(Object o1, Object o2) { + byte[] arg1; + byte[] arg2; + arg1 = (byte[]) o1; + arg2 = (byte[]) o2; + int a1Len = arg1.length - 1; + int a2Len = arg2.length - 1; + + int limit = Math.min(a1Len, a2Len); + + for (int i = 0; i < limit; i++) { + byte b1 = arg1[i]; + byte b2 = arg2[i]; + if (b1 == b2) { + continue; + } else { + /* + * Remember, bytes are signed, so convert to + * shorts so that we effectively do an unsigned + * byte comparison. + */ + short s1 = (short) (b1 & 0x7F); + short s2 = (short) (b2 & 0x7F); + if (b1 < 0) { + s1 |= 0x80; + } + if (b2 < 0) { + s2 |= 0x80; + } + return (s1 - s2); + } + } + + return (a1Len - a2Len); + } + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java b/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java new file mode 100644 index 0000000..f151a63 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorDuplicateValidationTest.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertFalse; + +import java.util.Enumeration; +import java.util.Hashtable; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.tree.BIN; + +public class DbCursorDuplicateValidationTest extends DbCursorTestBase { + + public DbCursorDuplicateValidationTest() { + super(); + } + + @Test + public void testValidateCursors() + throws Throwable { + + initEnv(true); + Hashtable dataMap = new Hashtable(); + createRandomDuplicateData(10, 1000, dataMap, false, false); + + Hashtable bins = new Hashtable(); + + DataWalker dw = new DataWalker(bins) { + void perData(String foundKey, String foundData) + throws DatabaseException { + CursorImpl cursorImpl = + DbTestProxy.dbcGetCursorImpl(cursor); + BIN lastBin = cursorImpl.getBIN(); + if (rnd.nextInt(10) < 8) { + cursor.delete(); + } + dataMap.put(lastBin, lastBin); + } + }; + dw.setIgnoreDataMap(true); + dw.walkData(); + dw.close(); + Enumeration e = bins.keys(); + while (e.hasMoreElements()) { + BIN b = (BIN) e.nextElement(); + assertFalse(b.getCursorSet().size() > 0); + } + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorSearchTest.java b/test/com/sleepycat/je/dbi/DbCursorSearchTest.java new file mode 100644 index 0000000..fec0339 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorSearchTest.java @@ -0,0 +1,314 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Enumeration; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.Map; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.utilint.StringUtils; + +/** + * Test cursor getSearch* + */ +public class DbCursorSearchTest extends DbCursorTestBase { + + public DbCursorSearchTest() { + super(); + } + + /** + * Put a small number of data items into the database + * then make sure we can retrieve them with getSearchKey. + */ + @Test + public void testSimpleSearchKey() + throws DatabaseException { + initEnv(false); + doSimpleCursorPuts(); + verify(simpleDataMap, false); + } + + /** + * Put a small number of data items into the database + * then make sure we can retrieve them with getSearchKey. + * Delete them, and make sure we can't search for them anymore. + */ + @Test + public void testSimpleDeleteAndSearchKey() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + verify(simpleDataMap, true); + } + + /** + * Put a large number of data items into the database, + * then make sure we can retrieve them with getSearchKey. + */ + @Test + public void testLargeSearchKey() + throws DatabaseException { + + initEnv(false); + Hashtable expectedData = new Hashtable(); + doLargePut(expectedData, N_KEYS); + verify(expectedData, false); + } + + /** + * Put a large number of data items into the database, + * then make sure we can retrieve them with getSearchKey. + */ + @Test + public void testLargeDeleteAndSearchKey() + throws DatabaseException { + + initEnv(false); + Hashtable expectedData = new Hashtable(); + doLargePut(expectedData, N_KEYS); + verify(expectedData, true); + } + + @Test + public void testLargeSearchKeyDuplicates() + throws DatabaseException { + + initEnv(true); + Hashtable expectedData = new Hashtable(); + createRandomDuplicateData(expectedData, false); + + verifyDuplicates(expectedData); + } + + /** + * Put a small number of data items into the database + * then make sure we can retrieve them with getSearchKey. + * See [#9337]. + */ + @Test + public void testSimpleSearchBothWithPartialDbt() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + DatabaseEntry key = new DatabaseEntry(StringUtils.toUTF8("bar")); + DatabaseEntry data = new DatabaseEntry(new byte[100]); + data.setSize(3); + System.arraycopy(StringUtils.toUTF8("two"), 0, data.getData(), 0, 3); + OperationStatus status = + cursor2.getSearchBoth(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + } + + @Test + public void testGetSearchBothNoDuplicatesAllowedSR9522() + throws DatabaseException { + + initEnv(false); + doSimpleCursorPuts(); + DatabaseEntry key = new DatabaseEntry(StringUtils.toUTF8("bar")); + DatabaseEntry data = new DatabaseEntry(StringUtils.toUTF8("two")); + OperationStatus status = + cursor2.getSearchBoth(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + } + + /** + * Make sure the database contains the set of data we put in. + */ + private void verify(Hashtable expectedData, boolean doDelete) + throws DatabaseException { + + Iterator iter = expectedData.entrySet().iterator(); + StringDbt testKey = new StringDbt(); + StringDbt testData = new StringDbt(); + + // Iterate over the expected values. + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + testKey.setString((String) entry.getKey()); + + // search for the expected values using SET. + OperationStatus status = cursor2.getSearchKey(testKey, testData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + // check that getCurrent returns the same thing. + status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + if (doDelete) { + // Delete the entry and make sure that getSearchKey doesn't + // return it again. + status = cursor2.delete(); + assertEquals(OperationStatus.SUCCESS, status); + + // search for the expected values using SET. + status = + cursor2.getSearchKey(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + // search for the expected values using SET_BOTH. + status = + cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + // search for the expected values using SET_RANGE - should + // give 0 except if this is the last key in the tree, in which + // case DB_NOTFOUND. It should never be DB_KEYEMPTY. + // It would be nice to be definite about the expected + // status, but to do that we have to know whether this is the + // highest key in the set, which we don't currently track. + status = cursor2.getSearchKeyRange + (testKey, testData, LockMode.DEFAULT); + assertTrue(status == OperationStatus.SUCCESS || + status == OperationStatus.NOTFOUND); + } else { + // search for the expected values using SET_BOTH. + status = + cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + // check that getCurrent returns the same thing. + status = + cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + // check that SET_RANGE works as expected for exact keys + status = cursor2.getSearchKeyRange + (testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + // search for the expected values using SET_RANGE. + byte[] keyBytes = testKey.getData(); + keyBytes[keyBytes.length - 1]--; + status = cursor2.getSearchKeyRange + (testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + + // check that getCurrent returns the same thing. + status = + cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals((String) entry.getValue(), testData.getString()); + assertEquals((String) entry.getKey(), testKey.getString()); + } + } + } + + private void verifyDuplicates(Hashtable expectedData) + throws DatabaseException { + + Enumeration iter = expectedData.keys(); + StringDbt testKey = new StringDbt(); + StringDbt testData = new StringDbt(); + + // Iterate over the expected values. + while (iter.hasMoreElements()) { + String key = (String) iter.nextElement(); + testKey.setString(key); + + // search for the expected values using SET. + OperationStatus status = cursor2.getSearchKey(testKey, testData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(key, testKey.getString()); + String dataString = testData.getString(); + + // check that getCurrent returns the same thing. + status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(dataString, testData.getString()); + assertEquals(key, testKey.getString()); + + // search for the expected values using SET_RANGE. + byte[] keyBytes = testKey.getData(); + keyBytes[keyBytes.length - 1]--; + status = + cursor2.getSearchKeyRange(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(dataString, testData.getString()); + assertEquals(key, testKey.getString()); + + // check that getCurrent returns the same thing. + status = cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(dataString, testData.getString()); + assertEquals(key, testKey.getString()); + + Hashtable ht = (Hashtable) expectedData.get(key); + + Enumeration iter2 = ht.keys(); + while (iter2.hasMoreElements()) { + String expectedDataString = (String) iter2.nextElement(); + testData.setString(expectedDataString); + + // search for the expected values using SET_BOTH. + status = + cursor2.getSearchBoth(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(expectedDataString, testData.getString()); + assertEquals(key, testKey.getString()); + + // check that getCurrent returns the same thing. + status = + cursor2.getCurrent(testKey, testData, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(expectedDataString, testData.getString()); + assertEquals(key, testKey.getString()); + + // search for the expected values using SET_RANGE_BOTH. + byte[] dataBytes = testData.getData(); + dataBytes[dataBytes.length - 1]--; + status = cursor2.getSearchBothRange(testKey, testData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(key, testKey.getString()); + assertEquals(expectedDataString, testData.getString()); + + // check that getCurrent returns the same thing. + status = cursor2.getCurrent(testKey, testData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(expectedDataString, testData.getString()); + assertEquals(key, testKey.getString()); + } + } + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorTest.java b/test/com/sleepycat/je/dbi/DbCursorTest.java new file mode 100644 index 0000000..0f8c766 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorTest.java @@ -0,0 +1,1494 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Hashtable; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.utilint.StringUtils; + +/** + * Various unit tests for CursorImpl. + */ +@RunWith(Parameterized.class) +public class DbCursorTest extends DbCursorTestBase { + + @Parameters + public static List genParams() { + + return Arrays.asList(new Object[][]{{false}, {true}}); + } + + public DbCursorTest(boolean keyPrefixing) { + super(); + this.keyPrefixing = keyPrefixing; + customName = (keyPrefixing) ? "keyPrefixing" : ""; + } + + private boolean alreadyTornDown = false; + + /** + * Put a small number of data items into the database in a specific order + * and ensure that they read back in ascending order. + */ + @Test + public void testSimpleGetPut() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundKey.compareTo(prevKey) >= 0); + prevKey = foundKey; + } + }; + dw.walkData(); + assertTrue(dw.nEntries == simpleKeyStrings.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test the internal Cursor.advanceCursor() entrypoint. + */ + @Test + public void testCursorAdvance() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + String prevKey = ""; + + OperationStatus status = cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + + /* + * Advance forward and then back to the first. Rest of scan + * should be as normal. + */ + DbInternal.advanceCursor(cursor, foundKey, foundData); + DbInternal.retrieveNext + (cursor, foundKey, foundData, LockMode.DEFAULT, GetMode.PREV); + int nEntries = 0; + while (status == OperationStatus.SUCCESS) { + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + + assertTrue(foundKeyString.compareTo(prevKey) >= 0); + prevKey = foundKeyString; + nEntries++; + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + + assertTrue(nEntries == simpleKeyStrings.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Put a small number of data items into the database in a specific order + * and ensure that they read back in descending order. + */ + @Test + public void testSimpleGetPutBackwards() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + DataWalker dw = new BackwardsDataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + prevKey = foundKey; + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + assertTrue(dw.nEntries == simpleKeyStrings.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Put a small number of data items into the database in a specific order + * and ensure that they read back in descending order. When "quux" is + * found, insert "fub" into the database and make sure that it is also read + * back in the cursor. + */ + @Test + public void testSimpleGetPut2() + throws Throwable { + + try { + initEnv(false); + doSimpleGetPut2("quux", "fub"); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + public void doSimpleGetPut2(String whenFoundDoInsert, + String newKey) + throws DatabaseException { + + doSimpleCursorPuts(); + + DataWalker dw = + new BackwardsDataWalker(whenFoundDoInsert, newKey, simpleDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (foundKey.equals(whenFoundDoInsert)) { + putAndVerifyCursor(cursor2, new StringDbt(newKey), + new StringDbt("ten"), true); + simpleDataMap.put(newKey, "ten"); + } + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + assertTrue(dw.nEntries == simpleKeyStrings.length + 1); + } + + /** + * Iterate through each of the keys in the list of "simple keys". For each + * one, create the database afresh, iterate through the entries in + * ascending order, and when the key being tested is found, insert the next + * highest key into the database. Make sure that the key just inserted is + * retrieved during the cursor walk. Lather, rinse, repeat. + */ + @Test + public void testSimpleGetPutNextKeyForwardTraverse() + throws Throwable { + + try { + tearDown(); + for (int i = 0; i < simpleKeyStrings.length; i++) { + setUp(); + initEnv(false); + doSimpleGetPut(true, + simpleKeyStrings[i], + nextKey(simpleKeyStrings[i]), + 1); + tearDown(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Iterate through each of the keys in the list of "simple keys". For each + * one, create the database afresh, iterate through the entries in + * ascending order, and when the key being tested is found, insert the next + * lowest key into the database. Make sure that the key just inserted is + * not retrieved during the cursor walk. Lather, rinse, repeat. + */ + @Test + public void testSimpleGetPutPrevKeyForwardTraverse() + throws Throwable { + + try { + tearDown(); + for (int i = 0; i < simpleKeyStrings.length; i++) { + setUp(); + initEnv(false); + doSimpleGetPut(true, simpleKeyStrings[i], + prevKey(simpleKeyStrings[i]), 0); + tearDown(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Iterate through each of the keys in the list of "simple keys". For each + * one, create the database afresh, iterate through the entries in + * descending order, and when the key being tested is found, insert the + * next lowest key into the database. Make sure that the key just inserted + * is retrieved during the cursor walk. Lather, rinse, repeat. + */ + @Test + public void testSimpleGetPutPrevKeyBackwardsTraverse() { + try { + tearDown(); + for (int i = 0; i < simpleKeyStrings.length; i++) { + setUp(); + initEnv(false); + doSimpleGetPut(false, simpleKeyStrings[i], + prevKey(simpleKeyStrings[i]), 1); + tearDown(); + } + } catch (Throwable t) { + t.printStackTrace(); + } + } + + /** + * Iterate through each of the keys in the list of "simple keys". For each + * one, create the database afresh, iterate through the entries in + * descending order, and when the key being tested is found, insert the + * next highest key into the database. Make sure that the key just + * inserted is not retrieved during the cursor walk. Lather, rinse, + * repeat. + */ + @Test + public void testSimpleGetPutNextKeyBackwardsTraverse() + throws Throwable { + + try { + tearDown(); + for (int i = 0; i < simpleKeyStrings.length; i++) { + setUp(); + initEnv(false); + doSimpleGetPut(true, simpleKeyStrings[i], + prevKey(simpleKeyStrings[i]), 0); + tearDown(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for the above four tests. + */ + private void doSimpleGetPut(boolean forward, + String whenFoundDoInsert, + String newKey, + int additionalEntries) + throws DatabaseException { + + doSimpleCursorPuts(); + + DataWalker dw; + if (forward) { + dw = new DataWalker(whenFoundDoInsert, newKey, simpleDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (foundKey.equals(whenFoundDoInsert)) { + putAndVerifyCursor(cursor2, new StringDbt(newKey), + new StringDbt("ten"), true); + simpleDataMap.put(newKey, "ten"); + } + } + }; + } else { + dw = new BackwardsDataWalker(whenFoundDoInsert, + newKey, + simpleDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (foundKey.equals(whenFoundDoInsert)) { + putAndVerifyCursor(cursor2, new StringDbt(newKey), + new StringDbt("ten"), true); + simpleDataMap.put(newKey, "ten"); + } + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + } + dw.walkData(); + assertEquals(simpleKeyStrings.length + additionalEntries, dw.nEntries); + } + + /** + * Put a small number of data items into the database in a specific order + * and ensure that they read back in descending order. Replace the data + * portion for each one, then read back again and make sure it was replaced + * properly. + */ + @Test + public void testSimpleReplace() + throws Throwable { + + try { + initEnv(false); + doSimpleReplace(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + public void doSimpleReplace() + throws DatabaseException { + + doSimpleCursorPuts(); + + DataWalker dw = + new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + String newData = foundData + "x"; + cursor.putCurrent(new StringDbt(newData)); + simpleDataMap.put(foundKey, newData); + } + }; + dw.walkData(); + dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundData.equals(simpleDataMap.get(foundKey))); + } + }; + dw.walkData(); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order. After each element is retrieved, insert the next + * lowest key into the tree. Ensure that the element just inserted is not + * returned by the cursor. Ensure that the elements are returned in + * ascending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutPrevKeyForwardTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutPrevKeyForwardTraverse(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutPrevKeyForwardTraverse() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertTrue(foundKey.compareTo(prevKey) >= 0); + putAndVerifyCursor(cursor2, + new StringDbt(prevKey(foundKey)), + new StringDbt + (Integer.toString(dataMap.size() + + nEntries)), + true); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + assertTrue(dw.nEntries == N_KEYS); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree + * in ascending order. Ensure that count() always returns 1 for each + * data item returned. + */ + @Test + public void testLargeCount() + throws Throwable { + + try { + initEnv(false); + doLargeCount(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeCount() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertTrue(cursor.count() == 1); + assertTrue(foundKey.compareTo(prevKey) >= 0); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + assertTrue(dw.nEntries == N_KEYS); + } + + public void xxtestGetPerf() + throws Throwable { + + try { + initEnv(false); + final int N = 50000; + int count = 0; + doLargePutPerf(N); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + OperationStatus status; + status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + + while (status == OperationStatus.SUCCESS) { + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + count++; + } + + assertTrue(count == N); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Insert a bunch of key/data pairs. Read them back and replace each of + * the data. Read the pairs back again and make sure the replace did the + * right thing. + */ + @Test + public void testLargeReplace() + throws Throwable { + + try { + initEnv(false); + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + String newData = foundData + "x"; + cursor.putCurrent(new StringDbt(newData)); + dataMap.put(foundKey, newData); + } + }; + dw.walkData(); + dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundData.equals(dataMap.get(foundKey))); + dataMap.remove(foundKey); + } + }; + dw.walkData(); + assertTrue(dw.nEntries == N_KEYS); + assertTrue(dataMap.size() == 0); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * descending order. After each element is retrieved, insert the next + * highest key into the tree. Ensure that the element just inserted is not + * returned by the cursor. Ensure that the elements are returned in + * descending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutNextKeyBackwardsTraverse() + throws Throwable { + + try { + tearDown(); + for (int i = 0; i < N_ITERS; i++) { + setUp(); + initEnv(false); + doLargeGetPutNextKeyBackwardsTraverse(); + tearDown(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutNextKeyBackwardsTraverse() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new BackwardsDataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + putAndVerifyCursor(cursor2, + new StringDbt(nextKey(foundKey)), + new StringDbt + (Integer.toString(dataMap.size() + + nEntries)), + true); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + assertTrue(dw.nEntries == N_KEYS); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order. After each element is retrieved, insert the next + * highest key into the tree. Ensure that the element just inserted is + * returned by the cursor. Ensure that the elements are returned in + * ascending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutNextKeyForwardTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutNextKeyForwardTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutNextKeyForwardTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new DataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertTrue(foundKey.compareTo(prevKey) >= 0); + if (addedDataMap.get(foundKey) == null) { + String newKey = nextKey(foundKey); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + addedDataMap.put(newKey, newData); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + addedDataMap.remove(foundKey); + } + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys * 2); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * descending order. After each element is retrieved, insert the next + * lowest key into the tree. Ensure that the element just inserted is + * returned by the cursor. Ensure that the elements are returned in + * descending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutPrevKeyBackwardsTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutPrevKeyBackwardsTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutPrevKeyBackwardsTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + if (addedDataMap.get(foundKey) == null) { + String newKey = prevKey(foundKey); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + addedDataMap.put(newKey, newData); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + addedDataMap.remove(foundKey); + } + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys * 2); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order. After each element is retrieved, insert the next + * highest and next lowest keys into the tree. Ensure that the next + * highest element just inserted is returned by the cursor. Ensure that + * the elements are returned in ascending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutBothKeyForwardTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutBothKeyForwardTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutBothKeyForwardTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new DataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertTrue(foundKey.compareTo(prevKey) >= 0); + if (addedDataMap.get(foundKey) == null) { + String newKey = nextKey(foundKey); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + addedDataMap.put(newKey, newData); + newKey = prevKey(foundKey); + newData = Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + addedDataMap.remove(foundKey); + } + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys * 2); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * descending order. After each element is retrieved, insert the next + * highest and next lowest keys into the tree. Ensure that the next lowest + * element just inserted is returned by the cursor. Ensure that the + * elements are returned in descending order. Lather, rinse, repeat. + */ + @Test + public void testLargeGetPutBothKeyBackwardsTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutBothKeyBackwardsTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutBothKeyBackwardsTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + if (addedDataMap.get(foundKey) == null) { + String newKey = nextKey(foundKey); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + newKey = prevKey(foundKey); + newData = Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + addedDataMap.put(newKey, newData); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + addedDataMap.remove(foundKey); + } + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys * 2); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * ascending order. After each element is retrieved, insert a new random + * key/data pair into the tree. Ensure that the element just inserted is + * returned by the cursor if it is greater than the current element. + * Ensure that the elements are returned in ascending order. Lather, + * rinse, repeat. + */ + @Test + public void testLargeGetPutRandomKeyForwardTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutRandomKeyForwardTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutRandomKeyForwardTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new DataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + assertTrue(foundKey.compareTo(prevKey) >= 0); + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String newKey = StringUtils.fromUTF8(key); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + if (newKey.compareTo(foundKey) > 0) { + addedDataMap.put(newKey, newData); + extraVisibleEntries++; + } + if (addedDataMap.get(foundKey) == null) { + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + if (addedDataMap.remove(foundKey) == null) { + fail(foundKey + " not found in either datamap"); + } + } + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys + dw.extraVisibleEntries); + } + + /** + * Insert N_KEYS data items into a tree. Iterate through the tree in + * descending order. After each element is retrieved, insert a new random + * key/data pair into the tree. Ensure that the element just inserted is + * returned by the cursor if it is less than the current element. Ensure + * that the elements are returned in descending order. Lather, rinse, + * repeat. + */ + @Test + public void testLargeGetPutRandomKeyBackwardsTraverse() + throws Throwable { + + try { + initEnv(false); + doLargeGetPutRandomKeyBackwardsTraverse(N_KEYS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetPutRandomKeyBackwardsTraverse(int nKeys) + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + Hashtable addedDataMap = new Hashtable(); + doLargePut(dataMap, nKeys); + + DataWalker dw = new BackwardsDataWalker(dataMap, addedDataMap) { + @Override + void perData(String foundKey, String foundData) + throws DatabaseException { + + if (!prevKey.equals("")) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String newKey = StringUtils.fromUTF8(key); + String newData = + Integer.toString(dataMap.size() + nEntries); + putAndVerifyCursor(cursor2, + new StringDbt(newKey), + new StringDbt(newData), + true); + if (newKey.compareTo(foundKey) < 0) { + addedDataMap.put(newKey, newData); + extraVisibleEntries++; + } + if (addedDataMap.get(foundKey) == null) { + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } else { + if (addedDataMap.remove(foundKey) == null) { + fail(foundKey + " not found in either datamap"); + } + } + } + + @Override + OperationStatus getFirst(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, + StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + if (addedDataMap.size() > 0) { + System.out.println(addedDataMap); + fail("addedDataMap still has entries"); + } + assertTrue(dw.nEntries == nKeys + dw.extraVisibleEntries); + } + + /** + * Insert N_KEYS data items into a tree. Set a btreeComparison function. + * Iterate through the tree in ascending order. Ensure that the elements + * are returned in ascending order. + */ + @Test + public void testLargeGetForwardTraverseWithNormalComparisonFunction() + throws Throwable { + + try { + tearDown(); + btreeComparisonFunction = btreeComparator; + setUp(); + initEnv(false); + doLargeGetForwardTraverseWithNormalComparisonFunction(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetForwardTraverseWithNormalComparisonFunction() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + assertTrue(foundKey.compareTo(prevKey) >= 0); + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + assertTrue(dw.nEntries == N_KEYS); + } + + /** + * Insert N_KEYS data items into a tree. Set a reverse order + * btreeComparison function. Iterate through the tree in ascending order. + * Ensure that the elements are returned in ascending order. + */ + @Test + public void testLargeGetForwardTraverseWithReverseComparisonFunction() + throws Throwable { + + try { + tearDown(); + btreeComparisonFunction = reverseBtreeComparator; + setUp(); + initEnv(false); + doLargeGetForwardTraverseWithReverseComparisonFunction(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper routine for above. + */ + private void doLargeGetForwardTraverseWithReverseComparisonFunction() + throws DatabaseException { + + Hashtable dataMap = new Hashtable(); + doLargePut(dataMap, N_KEYS); + + DataWalker dw = new DataWalker(dataMap) { + @Override + void perData(String foundKey, String foundData) { + if (prevKey.length() != 0) { + assertTrue(foundKey.compareTo(prevKey) <= 0); + } + prevKey = foundKey; + assertTrue(dataMap.get(foundKey) != null); + dataMap.remove(foundKey); + } + }; + dw.walkData(); + if (dataMap.size() > 0) { + fail("dataMap still has entries"); + } + assertTrue(dw.nEntries == N_KEYS); + } + + @Test + public void testNullKeyAndDataParams() + throws Throwable { + + try { + initEnv(false); + /* Put some data so that we can get a cursor. */ + doSimpleCursorPuts(); + + DataWalker dw = new DataWalker(simpleDataMap) { + @Override + void perData(String foundKey, String foundData) { + + /* getCurrent() */ + try { + cursor.getCurrent( + new StringDbt(""), null, + LockMode.DEFAULT); + } catch (Throwable IAE) { + fail("null data is allowed"); + } + + try { + cursor.getCurrent( + null, new StringDbt(""), + LockMode.DEFAULT); + } catch (Throwable IAE) { + fail("null key is allowed"); + } + + /* getFirst() */ + try { + Cursor c = cursor.dup(true); + c.getFirst( + new StringDbt(""), null, + LockMode.DEFAULT); + c.close(); + } catch (Throwable IAE) { + fail("null data is allowed"); + } + + try { + Cursor c = cursor.dup(true); + c.getFirst( + null, new StringDbt(""), + LockMode.DEFAULT); + c.close(); + } catch (Throwable IAE) { + fail("null key is allowed"); + } + + /* getNext(), getPrev, getNextDup, + getNextNoDup, getPrevNoDup */ + try { + Cursor c = cursor.dup(true); + c.getNext( + new StringDbt(""), + null, LockMode.DEFAULT); + c.close(); + } catch (Throwable IAE) { + fail("null data is allowed"); + } + + try { + Cursor c = cursor.dup(true); + c.getNext( + null, new StringDbt(""), + LockMode.DEFAULT); + c.close(); + } catch (Throwable IAE) { + fail("null key is allowed"); + } + + /* putXXX() */ + try { + cursor.put(new StringDbt(""), null); + fail("didn't throw IllegalArgumentException"); + } catch (IllegalArgumentException IAE) { + } catch (DatabaseException DBE) { + fail("threw DatabaseException not " + + "IllegalArgumentException"); + } + + try { + cursor.put(null, new StringDbt("")); + fail("didn't throw IllegalArgumentException"); + } catch (IllegalArgumentException IAE) { + } catch (DatabaseException DBE) { + fail("threw DatabaseException not " + + "IllegalArgumentException"); + } + } + }; + dw.walkData(); + assertTrue(dw.nEntries == simpleKeyStrings.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testCursorOutOfBoundsBackwards() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + OperationStatus status; + status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + + assertEquals(OperationStatus.SUCCESS, status); + assertEquals("aaa", foundKey.getString()); + assertEquals("four", foundData.getString()); + + status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT); + + assertEquals(OperationStatus.NOTFOUND, status); + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + + assertEquals(OperationStatus.SUCCESS, status); + assertEquals("bar", foundKey.getString()); + assertEquals("two", foundData.getString()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testCursorOutOfBoundsForwards() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + OperationStatus status; + status = cursor.getLast(foundKey, foundData, LockMode.DEFAULT); + + assertEquals(OperationStatus.SUCCESS, status); + assertEquals("quux", foundKey.getString()); + assertEquals("seven", foundData.getString()); + + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT); + + assertEquals(OperationStatus.SUCCESS, status); + assertEquals("mumble", foundKey.getString()); + assertEquals("eight", foundData.getString()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTwiceClosedCursor() + throws Throwable { + + try { + initEnv(false); + doSimpleCursorPuts(); + Cursor cursor = exampleDb.openCursor(null, null); + cursor.close(); + try { + cursor.close(); + } catch (Exception e) { + fail("Caught Exception while re-closing a Cursor."); + } + + try { + cursor.put + (new StringDbt("bogus"), new StringDbt("thingy")); + fail("Expected IllegalStateException for re-use of cursor"); + } catch (IllegalStateException DBE) { + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTreeSplittingWithDeletedIdKey() + throws Throwable { + + treeSplittingWithDeletedIdKeyWorker(); + } + + @Test + public void testTreeSplittingWithDeletedIdKeyWithUserComparison() + throws Throwable { + + tearDown(); + btreeComparisonFunction = btreeComparator; + setUp(); + treeSplittingWithDeletedIdKeyWorker(); + } + + static private Comparator btreeComparator = new BtreeComparator(); + + static private Comparator reverseBtreeComparator = + new ReverseBtreeComparator(); + + private void treeSplittingWithDeletedIdKeyWorker() + throws Throwable { + + initEnv(false); + StringDbt data = new StringDbt("data"); + + Cursor cursor = exampleDb.openCursor(null, null); + cursor.put(new StringDbt("AGPFX"), data); + cursor.put(new StringDbt("AHHHH"), data); + cursor.put(new StringDbt("AIIII"), data); + cursor.put(new StringDbt("AAAAA"), data); + cursor.put(new StringDbt("AABBB"), data); + cursor.put(new StringDbt("AACCC"), data); + cursor.close(); + exampleDb.delete(null, new StringDbt("AGPFX")); + exampleEnv.compress(); + cursor = exampleDb.openCursor(null, null); + cursor.put(new StringDbt("AAAAB"), data); + cursor.put(new StringDbt("AAAAC"), data); + cursor.close(); + validateDatabase(); + } +} diff --git a/test/com/sleepycat/je/dbi/DbCursorTestBase.java b/test/com/sleepycat/je/dbi/DbCursorTestBase.java new file mode 100644 index 0000000..d40d0ed --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbCursorTestBase.java @@ -0,0 +1,773 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.Serializable; +import java.util.Comparator; +import java.util.Enumeration; +import java.util.Hashtable; +import java.util.Random; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +import org.junit.After; + +/** + * Various unit tests for CursorImpl. + */ +public class DbCursorTestBase extends TestBase { + protected File envHome; + protected Cursor cursor; + protected Cursor cursor2; + protected Database exampleDb; + protected Environment exampleEnv; + protected Hashtable simpleDataMap; + protected Comparator btreeComparisonFunction = null; + protected Comparator duplicateComparisonFunction = null; + protected StringDbt[] simpleKeys; + protected StringDbt[] simpleData; + protected boolean duplicatesAllowed; + protected boolean keyPrefixing; + + protected static final int N_KEY_BYTES = 10; + protected static final int N_ITERS = 2; + protected static final int N_KEYS = 5000; + protected static final int N_TOP_LEVEL_KEYS = 10; + protected static final int N_DUPLICATES_PER_KEY = 2500; + protected static final int N_COUNT_DUPLICATES_PER_KEY = 500; + protected static final int N_COUNT_TOP_KEYS = 1; + + protected static int dbCnt = 0; + protected boolean runBtreeVerifier = true; + + public DbCursorTestBase() { + envHome = SharedTestUtils.getTestDir(); + } + + protected void initEnv(boolean duplicatesAllowed) + throws DatabaseException { + + initEnvInternal(duplicatesAllowed, false); + } + + protected void initEnvTransactional(boolean duplicatesAllowed) + throws DatabaseException { + + initEnvInternal(duplicatesAllowed, true); + } + + private void initEnvInternal(boolean duplicatesAllowed, + boolean transactionalDatabase) + throws DatabaseException { + + this.duplicatesAllowed = duplicatesAllowed; + + /* Set up sample data. */ + int nKeys = simpleKeyStrings.length; + simpleKeys = new StringDbt[nKeys]; + simpleData = new StringDbt[nKeys]; + for (int i = 0; i < nKeys; i++) { + simpleKeys[i] = new StringDbt(simpleKeyStrings[i]); + simpleData[i] = new StringDbt(simpleDataStrings[i]); + } + + /* Set up an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(), + new Long(1 << 24).toString()); + envConfig.setAllowCreate(true); + if (!runBtreeVerifier) { + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + } + exampleEnv = new Environment(envHome, envConfig); + + /* Set up a database. */ + String databaseName = "simpleDb" + dbCnt++; + DatabaseConfig dbConfig = new DatabaseConfig(); + if (btreeComparisonFunction != null) { + dbConfig.setBtreeComparator(btreeComparisonFunction); + } + if (duplicateComparisonFunction != null) { + dbConfig.setDuplicateComparator(duplicateComparisonFunction); + } + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicatesAllowed); + dbConfig.setTransactional(transactionalDatabase); + /* Prefixing cannot be false when sortedDuplicates is true. */ + dbConfig.setKeyPrefixing(keyPrefixing || duplicatesAllowed); + exampleDb = exampleEnv.openDatabase(null, databaseName, dbConfig); + + /* Set up cursors. */ + cursor = exampleDb.openCursor(null, null); + cursor2 = exampleDb.openCursor(null, null); + simpleDataMap = new Hashtable(); + } + + void closeEnv() { + simpleKeys = null; + simpleData = null; + simpleDataMap = null; + + try { + if (cursor != null) { + cursor.close(); + cursor = null; + } + } catch (DatabaseException ignore) { + } + + try { + if (cursor2 != null) { + cursor2.close(); + cursor2 = null; + } + } catch (DatabaseException ignore) { + /* Same as above. */ + } + + try { + if (exampleDb != null) { + exampleDb.close(); + exampleDb = null; + } + } catch (Exception ignore) { + } + + try { + if (exampleEnv != null) { + exampleEnv.close(); + exampleEnv = null; + } + } catch (Exception ignore) { + + /* + * Ignore this exception. It's caused by us calling + * tearDown() within the test. Each tearDown() call + * forces the database closed. So when the call from + * junit comes along, it's already closed. + */ + } + } + + @After + public void tearDown() { + closeEnv(); + } + + protected String[] simpleKeyStrings = { + "foo", "bar", "baz", "aaa", "fubar", + "foobar", "quux", "mumble", "froboy" }; + + protected String[] simpleDataStrings = { + "one", "two", "three", "four", "five", + "six", "seven", "eight", "nine" }; + + protected void doSimpleCursorPuts() + throws DatabaseException { + + for (int i = 0; i < simpleKeyStrings.length; i++) { + putAndVerifyCursor(cursor, simpleKeys[i], simpleData[i], true); + simpleDataMap.put(simpleKeyStrings[i], simpleDataStrings[i]); + } + } + + /** + * A class that performs cursor walking. The walkData method iterates + * over all data in the database and calls the "perData()" method on + * each data item. The perData() method is expected to be overridden + * by the user. + */ + protected class DataWalker { + String prevKey = ""; + String prevData = ""; + int nEntries = 0; + int deletedEntries = 0; + int extraVisibleEntries = 0; + int expectReadLocks = 1; + protected int nHandleEndOfSet = 0; + String whenFoundDoInsert; + String newKey; + String deletedEntry = null; + Hashtable dataMap; + Hashtable addedDataMap; + Random rnd = new Random(); + /* True if the datamap processing should not happen in the walkData + routine. */ + boolean ignoreDataMap = false; + + DataWalker(Hashtable dataMap) { + this.dataMap = dataMap; + this.addedDataMap = null; + } + + DataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + this.dataMap = dataMap; + this.addedDataMap = addedDataMap; + } + + DataWalker() { + this.dataMap = simpleDataMap; + this.addedDataMap = null; + } + + DataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + this.whenFoundDoInsert = whenFoundDoInsert; + this.newKey = newKey; + this.dataMap = dataMap; + this.addedDataMap = null; + } + + void setIgnoreDataMap(boolean ignoreDataMap) { + this.ignoreDataMap = ignoreDataMap; + } + + OperationStatus getFirst(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getFirst(foundKey, foundData, + LockMode.DEFAULT); + } + + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getNext(foundKey, foundData, + LockMode.DEFAULT); + } + + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + void walkData() + throws DatabaseException { + + /* get some data back */ + OperationStatus status = getFirst(foundKey, foundData); + + while (status == OperationStatus.SUCCESS) { + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + + if (!ignoreDataMap) { + if (dataMap.get(foundKeyString) != null) { + assertEquals(dataMap.get(foundKeyString), + foundDataString); + } else if (addedDataMap != null && + addedDataMap.get(foundKeyString) != null) { + assertEquals(addedDataMap.get(foundKeyString), + foundDataString); + } else { + fail("didn't find key in either map (" + + foundKeyString + + ")"); + } + } + + StatGroup stats = + DbTestProxy.dbcGetCursorImpl(cursor).getLockStats(); + assertEquals(expectReadLocks, stats.getInt(LOCK_READ_LOCKS)); + assertEquals(0, stats.getInt(LOCK_WRITE_LOCKS)); + perData(foundKeyString, foundDataString); + nEntries++; + status = getData(foundKey, foundData); + if (status != OperationStatus.SUCCESS) { + nHandleEndOfSet++; + status = handleEndOfSet(status); + } + } + TestUtils.validateNodeMemUsage( + DbInternal.getNonNullEnvImpl(exampleEnv), false); + } + + /** + * @throws DatabaseException from subclasses. + */ + void perData(String foundKey, String foundData) + throws DatabaseException { + + /* to be overridden */ + } + + /** + * @throws DatabaseException from subclasses. + */ + OperationStatus handleEndOfSet(OperationStatus status) + throws DatabaseException { + + return status; + } + + void close() + throws DatabaseException { + + cursor.close(); + } + } + + protected class BackwardsDataWalker extends DataWalker { + BackwardsDataWalker(Hashtable dataMap) { + super(dataMap); + } + + BackwardsDataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + super(dataMap, addedDataMap); + } + + BackwardsDataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + super(whenFoundDoInsert, newKey, dataMap); + } + + @Override + OperationStatus getFirst(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getLast(foundKey, foundData, + LockMode.DEFAULT); + } + + @Override + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getPrev(foundKey, foundData, + LockMode.DEFAULT); + } + } + + protected class DupDataWalker extends DataWalker { + DupDataWalker(Hashtable dataMap) { + super(dataMap); + } + + DupDataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + super(dataMap, addedDataMap); + } + + DupDataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + super(whenFoundDoInsert, newKey, dataMap); + } + + @Override + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getNextDup(foundKey, foundData, + LockMode.DEFAULT); + } + } + + protected class BackwardsDupDataWalker extends BackwardsDataWalker { + BackwardsDupDataWalker(Hashtable dataMap) { + super(dataMap); + } + + BackwardsDupDataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + super(dataMap, addedDataMap); + } + + BackwardsDupDataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + super(whenFoundDoInsert, newKey, dataMap); + } + + @Override + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getPrevDup(foundKey, foundData, + LockMode.DEFAULT); + } + } + + protected class NoDupDataWalker extends DataWalker { + NoDupDataWalker(Hashtable dataMap) { + super(dataMap); + } + + NoDupDataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + super(dataMap, addedDataMap); + } + + NoDupDataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + super(whenFoundDoInsert, newKey, dataMap); + } + + @Override + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getNextNoDup(foundKey, foundData, + LockMode.DEFAULT); + } + } + + protected class NoDupBackwardsDataWalker extends BackwardsDataWalker { + NoDupBackwardsDataWalker(Hashtable dataMap) { + super(dataMap); + } + + NoDupBackwardsDataWalker(Hashtable dataMap, + Hashtable addedDataMap) { + super(dataMap, addedDataMap); + } + + NoDupBackwardsDataWalker(String whenFoundDoInsert, + String newKey, + Hashtable dataMap) { + super(whenFoundDoInsert, newKey, dataMap); + } + + @Override + OperationStatus getData(StringDbt foundKey, StringDbt foundData) + throws DatabaseException { + + return cursor.getPrevNoDup(foundKey, foundData, + LockMode.DEFAULT); + } + } + + /** + * Construct the next highest key. + */ + protected String nextKey(String key) { + byte[] sb = StringUtils.toUTF8(key); + sb[sb.length - 1]++; + return StringUtils.fromUTF8(sb); + } + + /** + * Construct the next lowest key. + */ + protected String prevKey(String key) { + byte[] sb = StringUtils.toUTF8(key); + sb[sb.length - 1]--; + return StringUtils.fromUTF8(sb); + } + + /** + * Helper routine for testLargeXXX routines. + */ + protected void doLargePut(Hashtable dataMap, int nKeys) + throws DatabaseException { + + for (int i = 0; i < nKeys; i++) { + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + String dataString = Integer.toString(i); + putAndVerifyCursor(cursor, new StringDbt(keyString), + new StringDbt(dataString), true); + if (dataMap != null) { + dataMap.put(keyString, dataString); + } + } + } + + /** + * Helper routine for testLargeXXX routines. + */ + protected void doLargePutPerf(int nKeys) + throws DatabaseException { + + byte[][] keys = new byte[nKeys][]; + for (int i = 0; i < nKeys; i++) { + byte[] key = new byte[20]; + keys[i] = key; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + byte[] dataBytes = new byte[120]; + TestUtils.generateRandomAlphaBytes(dataBytes); + String dataString = StringUtils.fromUTF8(dataBytes); + putAndVerifyCursor(cursor, new StringDbt(keyString), + new StringDbt(dataString), true); + } + } + + /** + * Create some simple duplicate data. + */ + protected void doSimpleDuplicatePuts() + throws DatabaseException { + + for (int i = 0; i < simpleKeyStrings.length; i++) { + for (int j = 0; j < simpleKeyStrings.length; j++) { + putAndVerifyCursor(cursor, simpleKeys[i], simpleData[j], true); + } + } + } + + /** + * Create a tree with N_TOP_LEVEL_KEYS keys and N_DUPLICATES_PER_KEY + * data items per key. + * + * @param dataMap A Hashtable of hashtables. This routine adds entries + * to the top level hash for each key created. Secondary hashes contain + * the duplicate data items for each key in the top level hash. + * + * @param putVariant a boolean for varying the way the data is put with the + * cursor, currently unused.. + */ + protected void createRandomDuplicateData(Hashtable dataMap, + boolean putVariant) + throws DatabaseException { + + createRandomDuplicateData(N_TOP_LEVEL_KEYS, + N_DUPLICATES_PER_KEY, + dataMap, + putVariant, + false); + } + + /** + * Create a tree with a given number of keys and nDup + * data items per key. + * + * @param nTopKeys the number of top level keys to create. If negative, + * create that number of top level keys with dupes underneath and the + * same number of top level keys without any dupes. + * + * @param nDup The number of duplicates to create in the duplicate subtree. + * + * @param dataMap A Hashtable of hashtables. This routine adds entries + * to the top level hash for each key created. Secondary hashes contain + * the duplicate data items for each key in the top level hash. + * + * @param putVariant a boolean for varying the way the data is put with the + * cursor, currently unused.. + */ + protected void createRandomDuplicateData(int nTopKeys, + int nDup, + Hashtable dataMap, + boolean putVariant, + boolean verifyCount) + throws DatabaseException { + + boolean createSomeNonDupes = false; + if (nTopKeys < 0) { + nTopKeys = Math.abs(nTopKeys); + nTopKeys <<= 1; + createSomeNonDupes = true; + } + + byte[][] keys = new byte[nTopKeys][]; + for (int i = 0; i < nTopKeys; i++) { + byte[] key = new byte[N_KEY_BYTES]; + keys[i] = key; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + Hashtable ht = new Hashtable(); + if (dataMap != null) { + dataMap.put(keyString, ht); + } + int nDupesThisTime = nDup; + if (createSomeNonDupes && (i % 2) == 0) { + nDupesThisTime = 1; + } + for (int j = 1; j <= nDupesThisTime; j++) { + byte[] data = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(data); + OperationStatus status = + putAndVerifyCursor(cursor, new StringDbt(keyString), + new StringDbt(data), putVariant); + + if (verifyCount) { + assertTrue(cursor.count() == j); + } + + if (status != OperationStatus.SUCCESS) { + throw new DuplicateEntryException + ("Duplicate Entry"); + } + String dataString = StringUtils.fromUTF8(data); + ht.put(dataString, dataString); + } + } + } + + /** + * Debugging routine. Iterate through the transient hashtable of + * key/data pairs and ensure that each key can be retrieved from + * the tree. + */ + protected void verifyEntries(Hashtable dataMap) + throws DatabaseException { + + Tree tree = DbInternal.getDbImpl(exampleDb).getTree(); + Enumeration e = dataMap.keys(); + while (e.hasMoreElements()) { + String key = (String) e.nextElement(); + if (!retrieveData(tree, StringUtils.toUTF8(key))) { + System.out.println("Couldn't find: " + key); + } + } + } + + /* Throw assertion if the database is not valid. */ + protected void validateDatabase() + throws DatabaseException { + + DatabaseImpl dbImpl = DbInternal.getDbImpl(exampleDb); + dbImpl.verify(new VerifyConfig()); + } + + /** + * Helper routine for above. + */ + protected boolean retrieveData(Tree tree, byte[] key) + throws DatabaseException { + + TestUtils.checkLatchCount(); + Node n = tree.search(key, Tree.SearchType.NORMAL, null, + CacheMode.DEFAULT, null /*keyComparator*/); + if (!(n instanceof BIN)) { + fail("search didn't return a BIN for key: " + key); + } + BIN bin = (BIN) n; + try { + int index = bin.findEntry(key, false, true); + if (index == -1) { + return false; + } + return true; + } finally { + bin.releaseLatch(); + TestUtils.checkLatchCount(); + } + } + + protected OperationStatus putAndVerifyCursor(Cursor cursor, + StringDbt key, + StringDbt data, + boolean putVariant) + throws DatabaseException { + + OperationStatus status; + if (duplicatesAllowed) { + status = cursor.putNoDupData(key, data); + } else { + status = cursor.putNoOverwrite(key, data); + } + + if (status == OperationStatus.SUCCESS) { + StringDbt keyCheck = new StringDbt(); + StringDbt dataCheck = new StringDbt(); + + assertEquals(OperationStatus.SUCCESS, cursor.getCurrent + (keyCheck, dataCheck, LockMode.DEFAULT)); + assertEquals(key.getString(), keyCheck.getString()); + assertEquals(data.getString(), dataCheck.getString()); + } + + return status; + } + + @SuppressWarnings("serial") + protected static class BtreeComparator implements Comparator, + Serializable { + protected boolean ascendingComparison = true; + + protected BtreeComparator() { + } + + public int compare(Object o1, Object o2) { + byte[] arg1; + byte[] arg2; + if (ascendingComparison) { + arg1 = (byte[]) o1; + arg2 = (byte[]) o2; + } else { + arg1 = (byte[]) o2; + arg2 = (byte[]) o1; + } + int a1Len = arg1.length; + int a2Len = arg2.length; + + int limit = Math.min(a1Len, a2Len); + + for (int i = 0; i < limit; i++) { + byte b1 = arg1[i]; + byte b2 = arg2[i]; + if (b1 == b2) { + continue; + } else { + /* Remember, bytes are signed, so convert to shorts so that + we effectively do an unsigned byte comparison. */ + short s1 = (short) (b1 & 0x7F); + short s2 = (short) (b2 & 0x7F); + if (b1 < 0) { + s1 |= 0x80; + } + if (b2 < 0) { + s2 |= 0x80; + } + return (s1 - s2); + } + } + + return (a1Len - a2Len); + } + } + + @SuppressWarnings("serial") + protected static class ReverseBtreeComparator extends BtreeComparator { + protected ReverseBtreeComparator() { + ascendingComparison = false; + } + } +} diff --git a/test/com/sleepycat/je/dbi/DbEnvPoolTest.java b/test/com/sleepycat/je/dbi/DbEnvPoolTest.java new file mode 100644 index 0000000..2e61eaf --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbEnvPoolTest.java @@ -0,0 +1,239 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.concurrent.CountDownLatch; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHookAdapter; +import com.sleepycat.util.test.TestBase; + +public class DbEnvPoolTest extends TestBase { + private static final String dbName = "testDb"; + private static final String envHomeBName = "envB"; + + private final File envHomeA; + private final File envHomeB; + + public DbEnvPoolTest() { + envHomeA = new File(System.getProperty(TestUtils.DEST_DIR)); + envHomeB = new File(envHomeA, envHomeBName); + } + + @Before + public void setUp() + throws Exception { + + TestUtils.removeLogFiles("Setup", envHomeA, false); + if (envHomeB.exists()) { + clearEnvHomeB(); + } else { + envHomeB.mkdir(); + } + super.setUp(); + } + + @After + public void tearDown() { + TestUtils.removeLogFiles("TearDown", envHomeA, false); + clearEnvHomeB(); + } + + private void clearEnvHomeB() { + File[] logFiles = envHomeB.listFiles(); + for (File logFile : logFiles) { + assertTrue(logFile.delete()); + } + } + + @Test + public void testCanonicalEnvironmentName () + throws Throwable { + + try { + /* Create an environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + Environment envA = new Environment(envHomeA, envConfig); + + /* Look in the environment pool with the relative path name. */ + File file2 = new File("build/test/classes"); + assertTrue(DbEnvPool.getInstance().isOpen(file2)); + + envA.close(); + + } catch (Throwable t) { + /* Dump stack trace before trying to tear down. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test that SharedCache Environments really shares cache. + */ + @Test + public void testSharedCacheEnv() + throws Throwable { + + OpenEnvThread threadA = null; + OpenEnvThread threadB = null; + try { + CountDownLatch awaitLatch = new CountDownLatch(1); + AwaitHook hook = new AwaitHook(awaitLatch); + + threadA = new OpenEnvThread("threadA", envHomeA, hook, 10); + threadB = new OpenEnvThread("threadB", envHomeB, hook, 1000); + + threadA.start(); + threadB.start(); + + /* + * Make sure that all two threads have finished the first + * synchronization block. + */ + while (DbEnvPool.getInstance().getEnvImpls().size() != 2) { + } + + /* Count down the latch so that Environment creation is done. */ + awaitLatch.countDown(); + + threadA.finishTest(); + threadB.finishTest(); + + Environment envA = threadA.getEnv(); + Environment envB = threadB.getEnv(); + + /* Check the two Environments using the same SharedEvictor. */ + assertTrue(DbInternal.getNonNullEnvImpl(envA).getEvictor() == + DbInternal.getNonNullEnvImpl(envB).getEvictor()); + + StatsConfig stConfig = new StatsConfig(); + stConfig.setFast(true); + assertTrue(envA.getConfig().getCacheSize() == + envB.getConfig().getCacheSize()); + /* Check the shared cache total bytes are the same. */ + assertTrue(envA.getStats(stConfig).getSharedCacheTotalBytes() == + envB.getStats(stConfig).getSharedCacheTotalBytes()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (threadA != null) { + threadA.closeEnv(); + } + + if (threadB != null) { + threadB.closeEnv(); + } + } + } + + /* Thread used to opening two environments. */ + private static class OpenEnvThread extends JUnitThread { + private final File envHome; + private final AwaitHook awaitHook; + private final int dbSize; + private Environment env; + private Database db; + + public OpenEnvThread(String threadName, + File envHome, + AwaitHook awaitHook, + int dbSize) { + super(threadName); + this.envHome = envHome; + this.awaitHook = awaitHook; + this.dbSize = dbSize; + } + + @Override + public void testBody() + throws Throwable { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setSharedCache(true); + + DbEnvPool.getInstance().setBeforeFinishInitHook(awaitHook); + + env = new Environment(envHome, envConfig); + + /* + * Write different data on different environments to check the + * shared cache total bytes. + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, dbName, dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= dbSize; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + } + + public Environment getEnv() { + return env; + } + + public void closeEnv() { + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } + } + + private static class AwaitHook extends TestHookAdapter { + private final CountDownLatch awaitLatch; + + public AwaitHook(CountDownLatch awaitLatch) { + this.awaitLatch = awaitLatch; + } + + @Override + public void doHook(EnvironmentImpl unused) { + try { + awaitLatch.await(); + } catch (InterruptedException e) { + /* should never happen */ + } + } + } +} + diff --git a/test/com/sleepycat/je/dbi/DbTreeTest.java b/test/com/sleepycat/je/dbi/DbTreeTest.java new file mode 100644 index 0000000..33fabe5 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DbTreeTest.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class DbTreeTest extends DualTestCase { + private final File envHome; + + public DbTreeTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testDbLookup() throws Throwable { + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + Environment env = create(envHome, envConfig); + + // Make two databases + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database dbHandleAbcd = env.openDatabase(null, "abcd", dbConfig); + Database dbHandleXyz = env.openDatabase(null, "xyz", dbConfig); + + // Can we get them back? + dbConfig.setAllowCreate(false); + Database newAbcdHandle = env.openDatabase(null, "abcd", dbConfig); + Database newXyzHandle = env.openDatabase(null, "xyz", dbConfig); + + dbHandleAbcd.close(); + dbHandleXyz.close(); + newAbcdHandle.close(); + newXyzHandle.close(); + close(env); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/dbi/DeleteUpdateWithoutReadTest.java b/test/com/sleepycat/je/dbi/DeleteUpdateWithoutReadTest.java new file mode 100644 index 0000000..ecbdf21 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DeleteUpdateWithoutReadTest.java @@ -0,0 +1,221 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.trigger.TestBase.DBT; +import com.sleepycat.je.trigger.Trigger; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Checks that deletions and updates can be performed without reading the old + * record, when it is not in cache. + */ +public class DeleteUpdateWithoutReadTest extends DualTestCase { + + private static final int NUM_RECORDS = 5; + private static final String DB_NAME = "foo"; + private static final StatsConfig CLEAR_STATS; + static { + CLEAR_STATS = new StatsConfig(); + CLEAR_STATS.setClear(true); + } + private final File envHome; + private boolean dups; + private Environment env; + private Database db; + private final boolean isSerializable = + "serializable".equals(System.getProperty("isolationLevel")); + + public DeleteUpdateWithoutReadTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + try { + super.tearDown(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + env = null; + db = null; + } + + /* + * Delete/update not currently optimized to avoid fetching for dup DBs. + */ + @Test + public void testNoReadDups() { + dups = true; + testNoFetch(); + } + + /* Test that delete and updates don't need to fetch. */ + @Test + public void testNoFetch() { + open(false); + env.getStats(CLEAR_STATS); + EnvironmentStats stats; + + /* Insert */ + writeData(false, false /*update*/); + stats = env.getStats(CLEAR_STATS); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(0, stats.getNLNsFetch()); + + /* Update */ + writeData(true, false /*update*/); + stats = env.getStats(CLEAR_STATS); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(0, stats.getNLNsFetch()); + + /* Delete */ + deleteData(); + stats = env.getStats(CLEAR_STATS); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(0, stats.getNLNsFetch()); + + /* Compress */ + env.compress(); + stats = env.getStats(CLEAR_STATS); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + /* Compressor does one fetch for MapLN. */ + assertEquals(1, stats.getNLNsFetch()); + + /* Truncate the database. */ + db.close(); + stats = env.getStats(CLEAR_STATS); + env.truncateDatabase(null, DB_NAME, false); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(0, stats.getNLNsFetch()); + db = null; + close(); + } + + /* + * Test the cases where updates and deletes are required to fetch. + */ + @Test + public void testFetch() + throws Throwable { + + open(false); + env.getStats(CLEAR_STATS); + EnvironmentStats stats; + + /* Insert */ + writeData(false, false); + stats = env.getStats(CLEAR_STATS); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(0, stats.getNLNsFetch()); + + /* Update with partial DatabaseEntry will fetch. */ + writeData(true, true); + stats = env.getStats(CLEAR_STATS); + assertEquals(5, TestUtils.getNLNsLoaded(stats)); + assertEquals(5, stats.getNLNsFetch()); + close(); + + /* Configuring triggers will require fetching. */ + open(true); + env.getStats(CLEAR_STATS); + writeData(true, false); + stats = env.getStats(CLEAR_STATS); + assertEquals(5, TestUtils.getNLNsLoaded(stats)); + assertTrue(stats.getNLNsFetch() >= 5); + close(); + } + + private void open(boolean useTriggers) { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setCacheMode(CacheMode.EVICT_LN); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + if (useTriggers) { + List triggers = + new LinkedList(Arrays.asList((Trigger) new DBT("t1"), + (Trigger) new DBT("t2"))); + dbConfig.setTriggers(triggers); + dbConfig.setOverrideTriggers(true); + } + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + private void close() { + if (db != null) { + db.close(); + } + close(env); + } + + private void writeData(boolean update, boolean partial) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[1000]); + if (partial) { + data.setPartial(10, 100, true); + } + for (int i = 0; i < NUM_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status; + if (update) { + status = db.put(null, key, data); + } else { + status = db.putNoOverwrite(null, key, data); + } + assertSame(OperationStatus.SUCCESS, status); + } + } + + private void deleteData() { + final DatabaseEntry key = new DatabaseEntry(); + for (int i = 0; i < NUM_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + } + } +} diff --git a/test/com/sleepycat/je/dbi/DiskOrderedScanTest.java b/test/com/sleepycat/je/dbi/DiskOrderedScanTest.java new file mode 100644 index 0000000..c7e3026 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DiskOrderedScanTest.java @@ -0,0 +1,1610 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskOrderedCursor; +import com.sleepycat.je.DiskOrderedCursorConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.ForwardCursor; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Assume; +import org.junit.Test; + +public class DiskOrderedScanTest extends TestBase { + + class DOSTestHook implements TestHook { + + int counter = 0; + + @Override + public void hookSetup() { + counter = 0; + } + + @Override + public void doIOHook() { + } + + @Override + public void doHook() { + ++counter; + } + + @Override + public void doHook(Integer obj) { + } + + @Override + public Integer getHookValue() { + return counter; + } + }; + + class EvictionHook implements TestHook { + + DiskOrderedScanner dos; + + EvictionHook(DiskOrderedScanner dos) { + this.dos = dos; + } + + @Override + public void hookSetup() { + } + + @Override + public void doIOHook() { + } + + @Override + public void doHook() { + dos.evictBinRefs(); + } + + @Override + public void doHook(Integer obj) { + } + + @Override + public Integer getHookValue() { + return 0; + } + }; + + private static final int N_RECS = 10000; + private static final int ONE_MB = 1 << 20; + + private final File envHome; + private Environment env; + private EnvironmentImpl envImpl; + + boolean embeddedLNs; + + private Database[] dbs; + + private int numDBs = 5; + + public DiskOrderedScanTest() { + envHome = SharedTestUtils.getTestDir(); + + dbs = new Database[numDBs]; + } + + @Test + public void testScanArgChecks() + throws Throwable { + + System.out.println("Running test testScanArgChecks"); + + open(false, CacheMode.DEFAULT, 0); + + writeData(1/*nDBs*/, false, N_RECS); + + ForwardCursor dos = dbs[0].openCursor(new DiskOrderedCursorConfig()); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* lockMode must be null, DEFAULT or READ_UNCOMMITTED. */ + try { + dos.getNext(key, data, LockMode.READ_COMMITTED); + fail("expected IllegalArgumentException"); + } catch (IllegalArgumentException IAE) { + // expected + } + + dos.close(); + try { + dos.close(); + } catch (IllegalStateException ISE) { + fail("unexpected IllegalStateException"); + } + close(); + } + + @Test + public void testScanPermutations() + throws Throwable { + + System.out.println("Running test testScanPermutations"); + + for (final int nDBs : new int[] { numDBs, 1 }) { + + for (final boolean dups : new boolean[] { false, true }) { + + for (final int nRecs : new int[] { 0, N_RECS }) { + + for (final CacheMode cacheMode : + new CacheMode[] { CacheMode.DEFAULT, + CacheMode.EVICT_LN, + CacheMode.EVICT_BIN }) { + + for (int i = 0; i < 3; i += 1) { + final boolean keysOnly; + final boolean countOnly; + switch (i) { + case 0: + keysOnly = false; + countOnly = false; + break; + case 1: + keysOnly = true; + countOnly = false; + break; + case 2: + keysOnly = true; + countOnly = true; + break; + default: + throw new IllegalStateException(); + } + + for (final long memoryLimit : + new long[] { Long.MAX_VALUE, ONE_MB}) { + + for (final long lsnBatchSize : + new long[] { Long.MAX_VALUE, 100 }) { + + TestUtils.removeFiles( + "Setup", envHome, + FileManager.JE_SUFFIX); + + try { + doScan(nDBs, dups, nRecs, cacheMode, + keysOnly, countOnly, + memoryLimit, lsnBatchSize); + } catch (AssertionError | + RuntimeException e) { + /* Wrap with context info. */ + throw new RuntimeException( + "scan failed with" + + " dups=" + dups + + " nRecs=" + nRecs + + " cacheMode=" + cacheMode + + " keysOnly=" + keysOnly + + " memoryLimit=" + memoryLimit + + " lsnBatchSize=" + lsnBatchSize, + e); + } + } + } + } + } + } + } + } + } + + /** + * Checks that a 3 level (or larger) Btree can be scanned. + */ + @Test + public void testLargeScan() + throws Throwable { + + System.out.println("Running test testLargeScan"); + + doScan(1/*nDBs*/, false /*dups*/, 5 * 1000 * 1000, CacheMode.DEFAULT, + false /*keysOnly*/, false /*countOnly*/, 10L * ONE_MB, + Long.MAX_VALUE); + } + + @Test + public void testLowMemoryLargeCount() + throws Throwable { + + System.out.println("Running test testLowMemoryLargeCount"); + + doScan(1/*nDBs*/, false /*dups*/, 100 * 1000, CacheMode.EVICT_BIN, + true /*keysOnly*/, true /*countOnly*/, ONE_MB, 50); + } + + private void doScan( + final int nDBs, + final boolean dups, + final int nRecs, + final CacheMode cacheMode, + final boolean keysOnly, + final boolean countOnly, + final long memoryLimit, + final long lsnBatchSize) + throws Throwable { + + open(dups, cacheMode, 0); + + writeData(nDBs, dups, nRecs); + + DiskOrderedCursorConfig dosConfig = new DiskOrderedCursorConfig(); + dosConfig.setKeysOnly(keysOnly); + dosConfig.setCountOnly(countOnly); + dosConfig.setInternalMemoryLimit(memoryLimit); + dosConfig.setLSNBatchSize(lsnBatchSize); + + DiskOrderedCursor dos; + + if (nDBs == 1) { + dos = dbs[0].openCursor(dosConfig); + } else { + dos = env.openDiskOrderedCursor(dbs, dosConfig); + } + + int cnt = 0; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + int expectedCnt = nDBs * (dups ? (nRecs * 2) : nRecs); + + BitSet seenKeys = new BitSet(expectedCnt); + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + int k1; + if (countOnly) { + k1 = 0; + } else { + k1 = entryToInt(key); + } + + int d1; + if (keysOnly) { + assertNull(data.getData()); + d1 = 0; + } else { + d1 = entryToInt(data); + } + + if (dups) { + if (!keysOnly) { + boolean v1 = (k1 == (d1 * -1)); + boolean v2 = (d1 == (-1 * (k1 + nRecs + nRecs))); + assertTrue(v1 ^ v2); + } + } else { + if (!keysOnly) { + assertEquals(k1, (d1 * -1)); + } + } + + if (!countOnly) { + seenKeys.set(k1); + } + cnt++; + } + + assertEquals(cnt, expectedCnt); + + if (!countOnly) { + assertEquals(seenKeys.cardinality(), nRecs * nDBs); + } + + /* [#21282] getNext should return NOTFOUND if called again. */ + assertEquals(dos.getNext(key, data, null), OperationStatus.NOTFOUND); + dos.close(); + close(); + + /* + System.out.println("iters " + + DbInternal.getDiskOrderedCursorImpl(dos). + getNScannerIterations() + ' ' + + getName()); + */ + } + + @Test + public void testInterruptedDiskOrderedScan() + throws Throwable { + + System.out.println("Running test testInterruptedDiskOrderedScan"); + + open(false, CacheMode.DEFAULT, 0); + + writeData(1, false, N_RECS); + + ForwardCursor dos = dbs[0].openCursor(new DiskOrderedCursorConfig()); + + int cnt = 0; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + assertTrue(dos.getNext(key, data, null) == OperationStatus.SUCCESS); + + assertEquals(dos.getDatabase(), dbs[0]); + + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertTrue(k1 == (d1 * -1)); + + DatabaseEntry key2 = new DatabaseEntry(); + DatabaseEntry data2 = new DatabaseEntry(); + + assertTrue(dos.getCurrent(key2, data2, null) == + OperationStatus.SUCCESS); + + int k2 = entryToInt(key2); + int d2 = entryToInt(data2); + assertTrue(k1 == k2 && d1 == d2); + + dos.close(); + + try { + dos.getCurrent(key2, data2, null); + fail("expected IllegalStateException from getCurrent"); + } catch (IllegalStateException ISE) { + // expected + } + + try { + dos.getNext(key2, data2, null); + fail("expected IllegalStateException from getNext"); + } catch (IllegalStateException ISE) { + // expected + } + + close(); + } + + /* + * Test that a delete of the record that the DiskOrderedCursor is pointing + * to doesn't affect the DOS. + */ + @Test + public void testDeleteOneDuringScan() + throws Throwable { + + System.out.println("Running test testDeleteOneDuringScan"); + + open(false, CacheMode.DEFAULT, 0); + + writeData(1, false, N_RECS); + + ForwardCursor dos = dbs[0].openCursor(new DiskOrderedCursorConfig()); + Cursor cursor = dbs[0].openCursor(null, null); + int cnt = 0; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key2 = new DatabaseEntry(); + DatabaseEntry data2 = new DatabaseEntry(); + + assertTrue(dos.getNext(key, data, null) == OperationStatus.SUCCESS); + + int k1 = entryToInt(key); + int d1 = entryToInt(data); + + assertTrue(k1 == (d1 * -1)); + + assertTrue(dos.getCurrent(key2, data2, null) == + OperationStatus.SUCCESS); + + int k2 = entryToInt(key2); + int d2 = entryToInt(data2); + assertTrue(k1 == k2 && d1 == d2); + + assertTrue(cursor.getSearchKey(key, data, null) == + OperationStatus.SUCCESS); + + cursor.delete(); + assertTrue(dos.getCurrent(key2, data2, null) == + OperationStatus.SUCCESS); + + k2 = entryToInt(key2); + d2 = entryToInt(data2); + + dos.close(); + cursor.close(); + close(); + } + + /** + * Checks that a consumer thread performing deletions does not cause a + * deadlock. This failed prior to the use of DiskOrderedScanner. [#21667] + */ + @Test + public void testDeleteAllDuringScan() + throws Throwable { + + System.out.println("Running test testDeleteAllDuringScan"); + + open(false, CacheMode.DEFAULT, 0); + + writeData(1, false, N_RECS); + + DiskOrderedCursorConfig config = new DiskOrderedCursorConfig(); + config.setQueueSize(10).setLSNBatchSize(10); + + DiskOrderedCursor dos = dbs[0].openCursor(config); + DiskOrderedCursorImpl dosImpl = + DbInternal.getDiskOrderedCursorImpl(dos); + + Cursor cursor = dbs[0].openCursor(null, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > 0) { } + + for (int cnt = 0; cnt < N_RECS; cnt += 1) { + + assertSame(OperationStatus.SUCCESS, dos.getNext(key, data, null)); + + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + + assertSame(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, LockMode.RMW)); + + assertEquals(k1, entryToInt(key)); + assertEquals(d1, entryToInt(data)); + assertSame(OperationStatus.SUCCESS, cursor.delete()); + } + + assertSame(OperationStatus.NOTFOUND, cursor.getFirst(key, data, null)); + + dos.close(); + cursor.close(); + close(); + } + + @Test + public void testBlockedProducerKeysOnly() throws Throwable { + System.out.println("Running test testBlockedProducerKeysOnly"); + testBlockedProducer(true, 10, 128 * 2); + } + + @Test + public void testBlockedProducerKeysAndData() throws Throwable { + System.out.println("Running test testBlockedProducerKeysAndData"); + testBlockedProducer(false, 100, 10); + } + + public void testBlockedProducer( + boolean keysonly, + int lsnBatchSize, + int queueSize) throws Throwable { + + /* Cache size sensitivity makes Zing support very difficult. */ + Assume.assumeTrue(!JVMSystemUtils.ZING_JVM); + + /* + * Use a small cache so that not all the full bins fit in it. + * + * This test is sensitive to cache sizes and isn't important to run + * with an off-heap cache. + */ + open(false, CacheMode.DEFAULT, ONE_MB/5, false /*allowOffHeapCache*/); + + /* Load the initial set of 10,000 records */ + writeData(1, false, N_RECS); + + DiskOrderedCursorConfig config = new DiskOrderedCursorConfig(); + config.setQueueSize(queueSize); + config.setLSNBatchSize(lsnBatchSize); + config.setKeysOnly(keysonly); + //config.setDebug(true); + + DiskOrderedCursor dos = dbs[0].openCursor(config); + + DiskOrderedCursorImpl dosImpl = + DbInternal.getDiskOrderedCursorImpl(dos); + + DOSTestHook hook = new DOSTestHook(); + dosImpl.getScanner().setTestHook1(hook); + + Cursor cursor = dbs[0].openCursor(null, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + int minFreeSlots = (keysonly ? 127 : 0); + + /* + * Test 1 + */ + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + int freeSlots = dosImpl.freeQueueSlots(); + long numLsns = dosImpl.getNumLsns(); + + //System.out.println( + // "freeSlots = " + freeSlots + " numLsns = " + numLsns); + + /* Delete all the records */ + while (cursor.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor.delete()); + } + + assertSame(OperationStatus.NOTFOUND, cursor.getFirst(key, data, null)); + + cursor.close(); + env.compress(); + + /* + * The dos cursor should return the records that were already in + * the queue, plus the records from any lsns accumulated before + * the records were deleted. These lsns correspond to bins not + * found in the cache during phase 1. + */ + int cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + if (embeddedLNs) { + assertEquals(queueSize - freeSlots + numLsns * 127, cnt); + assertEquals(2, hook.getHookValue().intValue()); + } else { + assertEquals(0, hook.getHookValue().intValue()); + assertEquals((keysonly ? 1461: 191), cnt); + } + + dos.close(); + + /* + * Test 2 + */ + + //System.out.println("TEST 2 \n"); + + /* Relaod the records */ + writeData(1, false, N_RECS); + + cursor = dbs[0].openCursor(null, null); + + dos = dbs[0].openCursor(config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + dosImpl.getScanner().setTestHook1(hook); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* Delete all the records except from the last 300 ones */ + cnt = 0; + + while (cursor.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor.delete()); + + ++cnt; + if (cnt >= (N_RECS - 300)) { + break; + } + } + + cursor.close(); + env.compress(); + + /* + * The dos cursor should return the records that were already in + * the queue plus the last 300 records. + */ + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + if (embeddedLNs) { + assertEquals(keysonly ? 618 : (queueSize - freeSlots + 300), cnt); + assertEquals(keysonly ? 4 : 2, hook.getHookValue().intValue()); + } else { + assertEquals(0, hook.getHookValue().intValue()); + assertEquals((keysonly ? 1461 : 191) + 300, cnt); + } + + dos.close(); + + /* + * Test 3 + */ + + //System.out.println("TEST 3 \n"); + + dos = dbs[0].openCursor(config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + dosImpl.getScanner().setTestHook1(hook); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* Relaod 1000 records, starting with record 1000 */ + writeData(dbs[0], false, 1000, 1000); + + /* + * The dos cursor should return the last 300 records. + */ + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + assertEquals(300, cnt); + + if (embeddedLNs) { + assertEquals(keysonly ? 4 : 3, hook.getHookValue().intValue()); + } + + dos.close(); + + /* + * Test 4 + */ + + //System.out.println("TEST 4 \n"); + + dos = dbs[0].openCursor(config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + dosImpl.getScanner().setTestHook1(hook); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* Relaod the first 1000 records */ + writeData(dbs[0], false, 1000, 0); + + /* Load 20000 new records */ + writeData(dbs[0], false, 20000, N_RECS); + + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + assertEquals(21300, cnt); + + if (embeddedLNs) { + assertEquals(5, hook.getHookValue().intValue()); + } + + dos.close(); + + /* + * Test 5 + */ + + //System.out.println("TEST 5 \n"); + + dos = dbs[0].openCursor(config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + //dosImpl.getScanner().debug = true; + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + dos.close(); + + synchronized (this) { + wait(2000); + } + + assertTrue(dosImpl.isProcessorClosed()); + + close(); + } + + @Test + public void testBlockedProducerMultiDBInternal1() throws Throwable { + System.out.println("Running test testBlockedProducerMultiDBInternal1"); + testBlockedProducerMultiDBInternal(false, false); + } + + @Test + public void testBlockedProducerMultiDBInternal2() throws Throwable { + System.out.println("Running test testBlockedProducerMultiDBInternal2"); + testBlockedProducerMultiDBInternal(true, false); + } + + @Test + public void testBlockedProducerMultiDBInternal3() throws Throwable { + System.out.println("Running test testBlockedProducerMultiDBInternal3"); + testBlockedProducerMultiDBInternal(false, true); + } + + @Test + public void testBlockedProducerMultiDBInternal4() throws Throwable { + System.out.println("Running test testBlockedProducerMultiDBInternal4"); + testBlockedProducerMultiDBInternal(true, true); + } + + public void testBlockedProducerMultiDBInternal( + boolean serialScan, + boolean keysonly) + throws Throwable { + + /* + * This test is sensitive to cache sizes and isn't important to run + * with an off-heap cache. + */ + open(false, CacheMode.DEFAULT, 5*ONE_MB, false /*allowOffHeapCache*/); + + /* + * Load the initial data: + * DB0: record keys 0 to 9,999 (10,000 records) + * DB1: record keys 10,000 to 29,999 (20,000 records) + * DB2: record keys 30,000 to 59,999 (30,000 records) + */ + writeData(dbs[0], false, N_RECS, 0); + writeData(dbs[1], false, 2 * N_RECS, N_RECS); + writeData(dbs[2], false, 3 * N_RECS, 3 * N_RECS); + + int queueSize = 30000; + + DiskOrderedCursorConfig config = new DiskOrderedCursorConfig(); + config.setQueueSize(queueSize); + config.setLSNBatchSize(1000); + config.setSerialDBScan(serialScan); + config.setKeysOnly(keysonly); + + DiskOrderedCursor dos = env.openDiskOrderedCursor(dbs, config); + + DiskOrderedCursorImpl dosImpl = + DbInternal.getDiskOrderedCursorImpl(dos); + + DOSTestHook hook = new DOSTestHook(); + dosImpl.getScanner().setTestHook1(hook); + + Cursor cursor0 = dbs[0].openCursor(null, null); + Cursor cursor1 = dbs[1].openCursor(null, null); + Cursor cursor2 = dbs[2].openCursor(null, null); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + int minFreeSlots = (keysonly ? 128 : 0); + + /* + * Test 1 + */ + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + int freeSlots = dosImpl.freeQueueSlots(); + + /* Delete all the records from all DBs */ + while (cursor0.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor0.delete()); + } + + while (cursor1.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor1.delete()); + } + + while (cursor2.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor2.delete()); + } + + assertSame(OperationStatus.NOTFOUND, cursor0.getFirst(key, data, null)); + assertSame(OperationStatus.NOTFOUND, cursor1.getFirst(key, data, null)); + assertSame(OperationStatus.NOTFOUND, cursor2.getFirst(key, data, null)); + + cursor0.close(); + cursor1.close(); + cursor2.close(); + env.compress(); + + /* + * Thee dos cursor should return the records that were already in + * the queue. + */ + int cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + assertEquals(queueSize - freeSlots, cnt); + + dos.close(); + + /* + * Test 2 + */ + + queueSize = 9000; + config.setQueueSize(queueSize); + + /* Relaod the records */ + writeData(dbs[0], false, N_RECS, 0); + writeData(dbs[1], false, 2 * N_RECS, N_RECS); + writeData(dbs[2], false, 3 * N_RECS, 3 * N_RECS); + + cursor0 = dbs[0].openCursor(null, null); + cursor1 = dbs[1].openCursor(null, null); + cursor2 = dbs[2].openCursor(null, null); + + dos = env.openDiskOrderedCursor(dbs, config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* + * Delete all the records except from the last 100 ones in DB0, + * the last 200 ones in DB1, and the last 300 ones in DB2. + */ + cnt = 0; + + while (cursor0.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor0.delete()); + + ++cnt; + if (cnt >= (N_RECS - 100)) { + break; + } + } + + cnt = 0; + + while (cursor1.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor1.delete()); + + ++cnt; + if (cnt >= (2*N_RECS - 200)) { + break; + } + } + + cnt = 0; + + while (cursor2.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + assertSame(OperationStatus.SUCCESS, cursor2.delete()); + + ++cnt; + if (cnt >= (3*N_RECS - 300)) { + break; + } + } + + cursor0.close(); + cursor1.close(); + cursor2.close(); + env.compress(); + + /* + * The dos cursor should return the records that were already in + * the queue plus the last 600 records. + */ + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + ++cnt; + } + + assertEquals(queueSize - freeSlots + 600, cnt); + + dos.close(); + + /* + * Test 3 + */ + + /* + * At this point, the 3 DBs contain 600 records as follows: + * DB0: record keys 9,899 to 9,999 (100 records) + * DB1: record keys 29,799 to 29,999 (200 records) + * DB2: record keys 59,699 to 59,999 (300 records) + */ + + queueSize = 400; + config.setQueueSize(queueSize); + + dos = env.openDiskOrderedCursor(dbs, config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* + * Relaod 1000 records in each DB. The records are inserted "behind" + * the current position of the DOS and should not be returned by DOS. + */ + writeData(dbs[0], false, 1000, 1000); + writeData(dbs[1], false, 1000, 11000); + writeData(dbs[2], false, 1000, 31000); + + /* + * The dos cursor should return the last 600 records. + */ + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + assertEquals(600, cnt); + + dos.close(); + + /* + * Test 4 + */ + + /* + * At this point, the 3 DBs contain 3600 records as follows: + * DB0: record keys 1,000 to 1,999 and 9,899 to 9,999 (1100 recs) + * DB1: record keys 11,000 to 11,999 and 29,799 to 29,999 (1200 recs) + * DB2: record keys 31,000 to 31,999 and 59,699 to 59,999 (1300 recs) + */ + + dos = env.openDiskOrderedCursor(dbs, config); + dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + + /* Loop until queue is full. */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + freeSlots = dosImpl.freeQueueSlots(); + + /* + * Reload the first 1000 records in each DB. Note that with + * a linear scan, the queue is full with 400 records from DB0 + * only, so the DOS will pickup the 2000 records inserted in + * DB1 and DB2. + */ + writeData(dbs[0], false, 1000, 0); + writeData(dbs[1], false, 1000, 10000); + writeData(dbs[2], false, 1000, 30000); + + /* Load 20000 new records in each DB */ + writeData(dbs[0], false, 20000, N_RECS); + writeData(dbs[1], false, 20000, 3 * N_RECS); + writeData(dbs[2], false, 20000, 6 * N_RECS); + + cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + + if (!keysonly) { + int k1 = entryToInt(key); + int d1 = entryToInt(data); + assertEquals(k1, d1 * -1); + } + + ++cnt; + } + + if (serialScan) { + assertEquals(65600, cnt); + } else { + assertEquals(63600, cnt); + } + + dos.close(); + + close(); + } + + @Test + public void testCleanDeltasNoEviction() throws Throwable { + System.out.println("Running test testCleanDeltasNoEviction"); + doTestDeltas(10000, 128 * 2, true, false); + } + + @Test + public void testCleanDeltasEviction() throws Throwable { + System.out.println("Running test testCleanDeltasEviction"); + doTestDeltas(10000, 128 * 2, true, true); + } + + @Test + public void testDirtyDeltasNoEviction() throws Throwable { + /* Cache size sensitivity makes Zing support very difficult. */ + Assume.assumeTrue(!JVMSystemUtils.ZING_JVM); + System.out.println("Running test testDirtyDeltasNoEviction"); + doTestDeltas(10000, 128 * 2, false, false); + } + + @Test + public void testDirtyDeltasEviction() throws Throwable { + /* Cache size sensitivity makes Zing support very difficult. */ + Assume.assumeTrue(!JVMSystemUtils.ZING_JVM); + System.out.println("Running test testDirtyDeltasEviction"); + doTestDeltas(10000, 128 * 2, false, true); + } + + public void doTestDeltas( + int memoryLimit, + int queueSize, + boolean doCkpt, + boolean allowEviction) throws Throwable { + + boolean debug = false; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + int nRecs = 3 * N_RECS; + + /* + * Use a small cache so that not all the full bins fit in it. + */ + int cacheSize = ONE_MB / 5; + open(false, CacheMode.EVICT_LN, cacheSize); + + EnvironmentMutableConfig envConfig = env.getConfig(); + boolean useOffHeapCache = envConfig.getOffHeapCacheSize() > 0; + + if (useOffHeapCache) { + int halfSize = cacheSize / 2; + + envConfig = envConfig. + setCacheSize(halfSize). + setOffHeapCacheSize(halfSize); + + env.setMutableConfig(envConfig); + } + + /* + * Disable all sources of eviction, except CACHEMODE (i.e., the + * Evictor.doEvictOneIN() will still evict the given IN. + */ + Evictor evictor = envImpl.getEvictor(); + evictor.setEnabled(allowEviction); + + if (!allowEviction && useOffHeapCache) { + envConfig = envConfig.setOffHeapCacheSize(1024 * 1024 * 1024); + env.setMutableConfig(envConfig); + } + + /* + * Load the initial set of 30,000 records. The record keys are even + * numbers. Given that eviction has been disabled, the je cache stores + * all the BINs in the env as full BINs. + */ + putEvenRecords(1, nRecs); + + EnvironmentStats stats = env.getStats(null); + long nDeltas = stats.getNCachedBINDeltas(); + long nBins = stats.getNCachedBINs(); + long nOhDeltas = stats.getOffHeapCachedBINDeltas(); + long nOhBins = stats.getOffHeapCachedBINs(); + assert(nDeltas == 0); + + if (debug) { + System.out.println("Found " + nBins + " bins in main cache"); + System.out.println("Found " + nDeltas + " deltas in main cache"); + System.out.println("Found " + nOhBins + " bins off-heap"); + System.out.println("Found " + nOhDeltas + " deltas off-heap"); + } + + DiskOrderedCursorConfig dosConfig = new DiskOrderedCursorConfig(); + dosConfig.setQueueSize(queueSize); + dosConfig.setInternalMemoryLimit(memoryLimit); + dosConfig.setKeysOnly(true); + dosConfig.setDebug(debug); + + DiskOrderedCursor dos = dbs[0].openCursor(dosConfig); + + DiskOrderedCursorImpl dosImpl = DbInternal.getDiskOrderedCursorImpl(dos); + DiskOrderedScanner scanner = dosImpl.getScanner(); + + /* + * Create a non-sticky cursor so that we can have a stable CursorImpl + * to use below. + */ + CursorConfig config = new CursorConfig(); + config.setNonSticky(true); + + Cursor cursor = dbs[0].openCursor(null, config); + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + + int minFreeSlots = 127; + + /* + * Test 1 + */ + + /* + * Loop until queue is full. The dos producer fills the queue and + * blocks during phase 1 (after processing 2 full bins). + */ + while (dosImpl.freeQueueSlots() > minFreeSlots) { } + + synchronized (this) { + wait(1000); + } + + /* + * Create deltas by updating 1 record in each bin and then explicitly + * calling mutateToBINDelta on each bin. + */ + List bins = new ArrayList(); + BIN bin = null; + + while (cursor.getNext(key, data, LockMode.RMW) == + OperationStatus.SUCCESS) { + + if (bin == null) { + bin = cursorImpl.getBIN(); + } else if (bin != cursorImpl.getBIN()) { + bin = cursorImpl.getBIN(); + bins.add(bin); + assertSame(OperationStatus.SUCCESS, cursor.putCurrent(data)); + } + } + + for (BIN bin2 : bins) { + bin2.latch(); + if (bin2.getInListResident() && bin2.canMutateToBINDelta()) { + bin2.mutateToBINDelta(); + } + bin2.releaseLatch(); + } + bins.clear(); + + cursor.close(); + + /* Mutate off-heap BINs to deltas also. */ + final DatabaseImpl dbImpl = DbInternal.getDbImpl(dbs[0]); + + for (IN in : envImpl.getInMemoryINs()) { + + if (in.getNormalizedLevel() != 2 || + in.getDatabase() != dbImpl) { + continue; + } + + for (int i = 0; i < in.getNEntries(); i += 1) { + in.latchNoUpdateLRU(); + if (in.getOffHeapBINId(i) >= 0) { + envImpl.getOffHeapCache().stripLNs(in, i); + envImpl.getOffHeapCache().mutateToBINDelta(in, i); + } + in.releaseLatch(); + } + } + + if (doCkpt) { + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + } + + if (debug) { + stats = env.getStats(null); + nDeltas = stats.getNCachedBINDeltas(); + nBins = stats.getNCachedBINs(); + nOhDeltas = stats.getOffHeapCachedBINDeltas(); + nOhBins = stats.getOffHeapCachedBINs(); + System.out.println("Found " + nBins + " bins in main cache"); + System.out.println("Found " + nDeltas + " deltas in main cache"); + System.out.println("Found " + nOhBins + " bins off-heap"); + System.out.println("Found " + nOhDeltas + " deltas off-heap"); + } + + /* + * Create a test hook and register it with the DOS producer. The hook + * will be executed after phase 1 and before phase 2. For each bin + * delta on which a WeakBinRef was created during phase 1, the hook + * will evict the bin and clear the WeakReference on it. So, during + * phase 2, all of these bins will have to be deferred. The total + * (approximate) memory that will be needed to store these delta is + * greater than the DOS budget, and as a result, more than one + * subsequent iteration will be needed to process the deferred bins. + */ + EvictionHook hook = new EvictionHook(scanner); + scanner.setEvictionHook(hook); + + /* + * The dos cursor should return all the records + */ + int cnt = 0; + + while (dos.getNext(key, data, null) == OperationStatus.SUCCESS) { + ++cnt; + + if (allowEviction && cnt % 10 == 0) { + env.evictMemory(); + } + } + + assertEquals(3*N_RECS, cnt); + + int nIter = scanner.getNIterations(); + + //System.out.println("num iterations = " + nIter); + + if (useOffHeapCache) { + if (doCkpt) { + assertTrue(nIter > 33); + } else { + if (allowEviction) { + assertEquals((embeddedLNs ? 37 : 31), nIter); + } else { + assertEquals(embeddedLNs ? 8 : 9, nIter); + } + } + } else { + if (doCkpt) { + assertTrue(nIter > 34); + } else { + if (allowEviction) { + assertEquals((12), nIter); + } else { + assertEquals((8), nIter); + } + } + } + + dos.close(); + + close(); + } + + private void putEvenRecords(int startRecord, int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + CursorConfig config = new CursorConfig(); + config.setNonSticky(true); + + Cursor cursor = dbs[0].openCursor(null, config); + + for (int i = startRecord; i < nRecords + startRecord; i += 1) { + + key.setData(TestUtils.getTestArray(2*i)); + data.setData(TestUtils.getTestArray(2*i)); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + } + + cursor.close(); + } + + private void open( + final boolean allowDuplicates, + final CacheMode cacheMode, + final long cacheSize) + throws Exception { + + open(allowDuplicates, cacheMode, cacheSize, true /*allowOffHeapCache*/); + } + + private void open( + final boolean allowDuplicates, + final CacheMode cacheMode, + final long cacheSize, + final boolean allowOffHeapCache) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + if (cacheSize > 0) { + envConfig.setCacheSize(cacheSize); + } + + if (!allowOffHeapCache) { + /* Override what was set by initEnvConfig. */ + envConfig.setOffHeapCacheSize(0); + } + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + + envConfig.setConfigParam( + EnvironmentConfig.DOS_PRODUCER_QUEUE_TIMEOUT, "2 seconds"); + + /* + * Why disable BtreeVerifier? + * + * At least, for assertion in line 685, TEST 2 of com.sleepycat.je. + * dbi.DiskOrderedScanTest.testBlockedProducerKeysOnly, + * assertEquals(keysonly ? 618 : (queueSize - freeSlots + 300), cnt) + * encounters AssertionFailedError: + * expected:<618> but was:<491>. + * + * The reason is as follows. + * + * Without BtreeVerifier, the diskOrderedCusor will queue + * BIN 87(In-memory, 64 entries), BIN 88(In-memory, 127 entries). Then + * lsnAcc.add(binLsn) accumulates BIN 89(NOT in-memory, 127 entries) + * for later processing during phase 2. At last, when + * checking BIN 90, it will find that the remaining capacity is not + * enough. So it will wait. During this process, the test code deletes + * many records. When using diskOrderedCusor.getNext to retrieve + * records, it will get the queued records and the accumulated records, + * i.e. 64 + 127 + 127 = 318. Adding the left 300 records after + * deleting, the final count is 618. + * + * With BtreeVerifier, the diskOrderedCusor will queue + * BIN 87(In-memory, 64 entries), BIN 88(In-memory, 127 entries). + * Now BIN 89(127 entries) is in-memory because BtreeVerifier may + * cause this BIN to be fetched to cache. But, when checking BIN 89, + * it will find that the remaining capacity is not + * enough. So it will wait. During this process, the test code deletes + * many records. When using diskOrderedCusor.getNext to retrieve + * records, it will only get the queued records, + * i.e. 64 + 127 = 191. Adding the left 300 records after + * deleting, the final count is 419. + * + * At least, for some other test cases, BtreeVerifier can also + * influence the count. So I choose to disable BtreeVerifier for + * all the test cases in this test. + * + * TODO: But this may hide some real errors. So may need to test + * further when we have enough time. + * + */ + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + envConfig.setTransactional(false); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + + envImpl = DbInternal.getNonNullEnvImpl(env); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() > 0); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(false); + dbConfig.setTransactional(false); + dbConfig.setSortedDuplicates(allowDuplicates); + dbConfig.setCacheMode(cacheMode); + + for (int i = 0; i < numDBs; ++i) { + dbs[i] = env.openDatabase(null, "testDb" + i, dbConfig); + } + } + + private void writeData( + int nDBs, + boolean dups, + int nRecs) { + + for (int i = 0; i < nDBs; ++i) { + writeData(dbs[i], dups, nRecs, nRecs * i); + } + } + + private void writeData(Database db, boolean dups, int nRecs) { + writeData(db, dups, nRecs, 0); + } + + private void writeData( + Database db, + boolean dups, + int nRecs, + int start) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = start; i < start + nRecs; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i * -1, data); + + assertEquals(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + + if (dups) { + IntegerBinding.intToEntry(-1 * (i + nRecs + nRecs), data); + + assertEquals(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + } + } + + /* + * If the scanned data set is large enough, a checkpoint may be needed + * to ensure all expected records are scanned. It seems that a + * checkpoint is needed on some machines but not others, probably + * because the checkpointer thread gets more or less time. Therefore, + * to make the test more reliable we always do a checkpoint here. + */ + env.checkpoint(new CheckpointConfig().setForce(true)); + } + + private int entryToInt(DatabaseEntry entry) { + assertEquals(4, entry.getSize()); + return IntegerBinding.entryToInt(entry); + } + + private void close() + throws Exception { + + for (int i = 0; i < numDBs; ++i) { + if (dbs[i] != null) { + dbs[i].close(); + dbs[i] = null; + } + } + + if (env != null) { + env.close(); + env = null; + } + } +} diff --git a/test/com/sleepycat/je/dbi/DuplicateEntryException.java b/test/com/sleepycat/je/dbi/DuplicateEntryException.java new file mode 100644 index 0000000..ec1b622 --- /dev/null +++ b/test/com/sleepycat/je/dbi/DuplicateEntryException.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +/** + * Exception to indicate that an entry is already present in a node. + */ +@SuppressWarnings("serial") +class DuplicateEntryException extends RuntimeException { + + DuplicateEntryException() { + super(); + } + + DuplicateEntryException(String message) { + super(message); + } +} diff --git a/test/com/sleepycat/je/dbi/EmbeddedOpsTest.java b/test/com/sleepycat/je/dbi/EmbeddedOpsTest.java new file mode 100644 index 0000000..0fbfc18 --- /dev/null +++ b/test/com/sleepycat/je/dbi/EmbeddedOpsTest.java @@ -0,0 +1,606 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.evictor.Evictor; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class EmbeddedOpsTest extends DualTestCase { + + /* + * N_RECORDS is set to 110 and NODE_MAX_ENTRIES to 100. This results in + * a tree with 2 BINs, the first of which has 50 entries. + */ + private static final int N_RECORDS = 110; + + private final File envHome; + private Environment env; + private Database db; + private boolean dups; + + EnvironmentConfig envConfig; + DatabaseConfig dbConfig; + + private boolean debug = true; + + public EmbeddedOpsTest() { + + envHome = SharedTestUtils.getTestDir(); + + envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + envConfig.setConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, "100"); + + /* + * evict() method needs to force MANUAL eviction, but this is defeated + * in dual/HA mode due to the min tree memory check when a shared cache + * is used, so we disable the shared cache here as a workaround. + */ + envConfig.setSharedCache(false); + + envConfig.setConfigParam(EnvironmentConfig.TREE_MAX_EMBEDDED_LN, "20"); + + dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setCacheMode(CacheMode.EVICT_LN); + } + + private boolean open(boolean dups) { + + env = create(envHome, envConfig); + + if (!dups) { + db = env.openDatabase(null, "testDB", dbConfig); + } else { + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDupsDB", dbConfig); + } + + this.dups = dups; + + /* TREE_MAX_EMBEDDED_LN may be overridden in a je.properties file. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + if (envImpl.getMaxEmbeddedLN() != 20) { + System.out.println( + "Skipping EmbeddedOpsTest because TREE_MAX_EMBEDDED_LN has " + + "been overridden."); + close(); + return false; + } + + return true; + } + + private void close() { + db.close(); + db = null; + close(env); + env = null; + } + + @Test + public void testNoDups() { + + Transaction txn; + Cursor cursor1; + Cursor cursor2; + + if (!open(false)) { + return; // embedded LNs are disabled + } + + writeData(); + + BIN bin1 = getFirstBIN(); + BIN bin2 = getLastBIN(); + + assertEquals(50, bin1.getNumEmbeddedLNs()); + assertEquals(60, bin2.getNumEmbeddedLNs()); + + /* + * The BIN split causes full version of both BINs to be logged. After + * the split is done, 10 more records are inserted in the 2nd BIN, so + * that BIN has 10 entries marked dirty. During the checkpoint below, + * a delta will be logged for the 2nd BIN, so although after the + * checkpoint the BIN will be clean, it will still have 10 dirty + * entries. + */ + if (debug) { + System.out.println( + "1. BIN-1 has " + bin1.getNEntries() + " entries"); + System.out.println( + "1. BIN-1 has " + bin1.getNDeltas() + " dirty entries"); + System.out.println( + "1. BIN-2 has " + bin2.getNEntries() + " entries"); + System.out.println( + "1. BIN-2 has " + bin2.getNDeltas() + " dirty entries"); + } + + /* Flush BINs (checkpoint) */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Update only enough records in the 1st BIN to make it a delta + * when it gets selected for eviction. Specifically, update records + * in slots 5 to 15 (inclusive). + */ + writeData(5, N_RECORDS / 10); + + if (debug) { + System.out.println( + "2. BIN-1 has " + bin1.getNDeltas() + " dirty entries"); + System.out.println( + "2. BIN-2 has " + bin2.getNDeltas() + " dirty entries"); + } + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(!bin2.isBINDelta(false)); + + /* + * Mutate bin1 to a delta. Make sure all slots have embedded data + * and use the compact key rep. + */ + mutateToDelta(bin1); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + assertTrue(bin1.getKeyVals().accountsForKeyByteMemUsage()); + + /* + * Read keys 12 and 20 directly from the bin delta, using 2 cursors: + * one doing an exact search and the other a range search. Make sure no + * mutation back to full bin occurs and the embedded data is correct. + */ + txn = env.beginTransaction(null, TransactionConfig.DEFAULT); + cursor1 = db.openCursor(txn, null); + cursor2 = db.openCursor(txn, null); + + searchKey(cursor1, 12, true/*exact*/, true/*exist*/); + searchKey(cursor2, 20, false/*range*/, true/*exists*/); + + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + + confirmCurrentData(cursor1, 12); + confirmCurrentData(cursor2, 20); + + /* + * Delete record 12-12. + */ + assertEquals(OperationStatus.SUCCESS, cursor1.delete()); + + /* + * Update record 20-20 to 20-21. The op keeps the data embedded. + */ + putRecord(cursor2, 20, 21); + confirmCurrentData(cursor2, 21); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + + /* + * Re-insert record 12-14 (slot reuse). The op keeps the data embedded. + */ + putRecord(cursor1, 12, 14); + confirmCurrentData(cursor1, 14); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + + /* + * Update record 12-14 to 12-25. The op causes the key rep to switch + * from compact to default. + */ + putRecord(cursor1, 12, 25); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + assertTrue(!bin1.getKeyVals().accountsForKeyByteMemUsage()); + confirmCurrentData(cursor1, 25); + + /* + * Update record 120 in a way that makes it non-embedded. Then update + * it again into a zero-data record. + */ + putLargeRecord(cursor2, 120, 120); + assertEquals(bin2.getNEntries()-1, bin2.getNumEmbeddedLNs()); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + putZeroRecord(cursor2, 120); + assertEquals(bin2.getNEntries(), bin2.getNumEmbeddedLNs()); + confirmCurrentKey(cursor2, 120); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Abort the txn and make sure everything went back to pre-txn state. + */ + cursor1.close(); + cursor2.close(); + txn.abort(); + + cursor1 = db.openCursor(null, null); + + searchKey(cursor1, 12, true/*exact*/, true/*exist*/); + confirmCurrentData(cursor1, 12); + + searchKey(cursor1, 20, true/*exact*/, true/*exist*/); + confirmCurrentData(cursor1, 20); + + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + assertEquals(bin2.getNEntries(), bin2.getNumEmbeddedLNs()); + assertTrue(bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + + checkKeys(0, 50); + checkKeys(100, 50); + + cursor1.close(); + + + /* + * Cause mutation of bin1 to full bin by searching for an existing + * key that is not in the delta. Make sure all entries stay embedded + * and readable. + */ + cursor1 = db.openCursor(null, null); + + searchKey(cursor1, 0, true/*exactSearch*/, true/*exists*/); + + assertTrue(!bin1.isBINDelta(false)); + assertTrue(bin1.getInListResident()); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + + checkKeys(0, 50); + + cursor1.close(); + + /* + * Crash and recover. + */ + abnormalClose(env); + open(false); + + checkKeys(0, 50); + checkKeys(100, 50); + + bin1 = getFirstBIN(); + bin2 = getLastBIN(); + assertEquals(bin1.getNEntries(), bin1.getNumEmbeddedLNs()); + assertEquals(bin2.getNEntries(), bin2.getNumEmbeddedLNs()); + + close(); + } + + private void writeData() { + writeData(0, N_RECORDS); + } + + private void writeData(int startRecord, int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = startRecord; i < nRecords + startRecord; i += 1) { + + key.setData(TestUtils.getTestArray(2*i, false)); + data.setData(TestUtils.getTestArray(2*i, false)); + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + private void putRecord(Cursor cursor, int keyVal, int dataVal) { + + final DatabaseEntry key = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + final DatabaseEntry data = new DatabaseEntry(); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + } + + private void putLargeRecord(Cursor cursor, int keyVal, int dataVal) { + + byte[] dataBytes = new byte[60]; + byte[] tmp = TestUtils.getTestArray(dataVal); + System.arraycopy(tmp, 0, dataBytes, 0, tmp.length); + + final DatabaseEntry data = new DatabaseEntry(); + data.setData(dataBytes); + final DatabaseEntry key = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + } + + private void putZeroRecord(Cursor cursor, int keyVal) { + + final DatabaseEntry key = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + final DatabaseEntry data = new DatabaseEntry(); + data.setData(new byte[0]); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + } + + private void insertRecord( + Cursor cursor, + int keyVal, + int dataVal, + boolean large) { + + OperationStatus status = OperationStatus.SUCCESS; + + final DatabaseEntry key = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(keyVal)); + + byte[] dataBytes; + + if (large) { + dataBytes = new byte[60]; + byte[] tmp = TestUtils.getTestArray(dataVal); + System.arraycopy(tmp, 0, dataBytes, 0, tmp.length); + } else { + dataBytes = TestUtils.getTestArray(dataVal); + } + + final DatabaseEntry data = new DatabaseEntry(); + data.setData(dataBytes); + + assertEquals(status, cursor.putNoOverwrite(key, data)); + } + + private void updateCurrentRecord(final Cursor cursor, int dataVal) { + + final DatabaseEntry data = new DatabaseEntry(); + data.setData(TestUtils.getTestArray(dataVal)); + + assertEquals(OperationStatus.SUCCESS, cursor.putCurrent(data)); + + final DatabaseEntry key = new DatabaseEntry(); + data.setData(null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(dataVal, TestUtils.getTestVal(data.getData())); + } + + private void searchKey( + final Cursor cursor, + final int keyVal, + boolean exactSearch, + boolean exists) { + + OperationStatus status = OperationStatus.SUCCESS; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(keyVal)); + data.setPartial(true); + + if (exactSearch) { + if (!exists) { + status = OperationStatus.NOTFOUND; + } + assertEquals(status, cursor.getSearchKey(key, data, null)); + } else { + assertEquals(status, cursor.getSearchKeyRange(key, data, null)); + } + } + + private void searchRecord( + Cursor cursor, + int keyVal, + int dataVal, + boolean exactSearch, + boolean exists) { + + OperationStatus status = OperationStatus.SUCCESS; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + + if (exactSearch) { + if (!exists) { + status = OperationStatus.NOTFOUND; + } + assertEquals(status, cursor.getSearchBoth(key, data, null)); + } else { + assertEquals(status, cursor.getSearchBothRange(key, data, null)); + } + } + + private void getNext(Cursor cursor, boolean skipDups) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + if (skipDups) { + assertEquals(OperationStatus.SUCCESS, + cursor.getNextNoDup(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + } + } + + private Cursor confirmCurrentKey(final Cursor cursor, int keyVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(keyVal, TestUtils.getTestVal(key.getData())); + + return cursor; + } + + private Cursor confirmCurrentData(final Cursor cursor, int dataVal) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + cursor.getCurrent(key, data, null)); + + assertEquals(dataVal, TestUtils.getTestVal(data.getData())); + + return cursor; + } + + private void checkKeys(final int startKeyVal, final int nRecords) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + searchKey(cursor, startKeyVal, true, true); + + for (int i = 1; i < nRecords; i += 1) { + + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, null)); + + assertEquals(2 * i + startKeyVal, + TestUtils.getTestVal(key.getData())); + } + + cursor.close(); + } + + /** + * Reads first key and returns its BIN. Does not read data to avoid + * changing the nNotResident stat. + */ + private BIN getFirstBIN() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + assertNotNull(bin); + return bin; + } + + /** + * Reads last key and returns its BIN. Does not read data to avoid + * changing the nNotResident stat. + */ + private BIN getLastBIN() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + final Cursor cursor = db.openCursor(null, null); + + assertEquals(OperationStatus.SUCCESS, + cursor.getLast(key, data, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + assertNotNull(bin); + return bin; + } + + private void mutateToDelta(BIN bin) { + + OffHeapCache ohCache = + DbInternal.getNonNullEnvImpl(env).getOffHeapCache(); + + if (!ohCache.isEnabled()) { + evict(bin, false); + } else { + bin.latchNoUpdateLRU(); + bin.mutateToBINDelta(); + bin.releaseLatch(); + } + assertTrue(bin.isBINDelta(false)); + assertTrue(bin.getInListResident()); + } + + /** + * Simulated eviction of the BIN, if it were selected. This may only do + * partial eviction, if LNs are present or it can be mutated to a delta. + * We expect that some memory will be reclaimed. + */ + private void evict(BIN bin, boolean force) { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Evictor evictor = envImpl.getEvictor(); + + final long memBefore = TestUtils.validateNodeMemUsage(envImpl, true); + + bin.latch(CacheMode.UNCHANGED); + + if (force) { + evictor.doTestEvict(bin, Evictor.EvictionSource.CACHEMODE); + } else { + evictor.doTestEvict(bin, Evictor.EvictionSource.MANUAL); + } + + bin.updateMemoryBudget(); + + final long memAfter = TestUtils.validateNodeMemUsage(envImpl, true); + + assertTrue(memAfter < memBefore); + } +} diff --git a/test/com/sleepycat/je/dbi/INListTest.java b/test/com/sleepycat/je/dbi/INListTest.java new file mode 100644 index 0000000..6d9b30e --- /dev/null +++ b/test/com/sleepycat/je/dbi/INListTest.java @@ -0,0 +1,466 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Iterator; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class INListTest extends TestBase { + private static String DB_NAME = "INListTestDb"; + private File envHome; + private volatile int sequencer = 0; + private Environment env; + private EnvironmentImpl envImpl; + private Database db; + private DatabaseImpl dbImpl; + + public INListTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + sequencer = 0; + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + dbImpl = DbInternal.getDbImpl(db); + } + + private void close() + throws DatabaseException { + + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + db = null; + dbImpl = null; + env = null; + envImpl = null; + } + + @After + public void tearDown() { + + try { + close(); + } catch (Exception e) { + System.out.println("During tearDown: " + e); + } + envHome = null; + } + + /** + * This test was originally written when the INList had a major and minor + * latch. It was used to test the addition of INs holding the minor latch + * while another thread holds the major latch. Now that we're using + * ConcurrentHashMap this type of testing is not important, but I've left + * the test in place (without the latching) since it does exercise the + * INList API a little. + */ + @Test + public void testConcurrentAdditions() + throws Throwable { + + final INList inList1 = new INList(envImpl); + inList1.enable(); + + JUnitThread tester1 = + new JUnitThread("testConcurrentAdditions-Thread1") { + @Override + public void testBody() { + + try { + /* Create two initial elements. */ + for (int i = 0; i < 2; i++) { + IN in = new IN(dbImpl, null, 1, 1); + inList1.add(in); + } + + /* Wait for tester2 to try to acquire the + /* minor latch */ + sequencer = 1; + while (sequencer <= 1) { + Thread.yield(); + } + + /* + * Sequencer is now 2. There should be three elements + * in the list right now because thread 2 added a third + * one. + */ + int count = 0; + Iterator iter = inList1.iterator(); + while (iter.hasNext()) { + iter.next(); + count++; + } + + assertEquals(3, count); + + /* + * Allow thread2 to run again. It will + * add another element and throw control + * back to thread 1. + */ + sequencer++; // now it's 3 + while (sequencer <= 3) { + Thread.yield(); + } + + /* + * Check that the entry added by tester2 was really + * added. + */ + count = 0; + iter = inList1.iterator(); + while (iter.hasNext()) { + iter.next(); + count++; + } + + assertEquals(4, count); + } catch (Throwable T) { + T.printStackTrace(System.out); + fail("Thread 1 caught some Throwable: " + T); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testConcurrentAdditions-Thread2") { + @Override + public void testBody() { + + try { + /* Wait for tester1 to start */ + while (sequencer < 1) { + Thread.yield(); + } + + assertEquals(1, sequencer); + + inList1.add(new IN(dbImpl, null, 1, 1)); + sequencer++; + + /* Sequencer is now 2. */ + + while (sequencer < 3) { + Thread.yield(); + } + + assertEquals(3, sequencer); + /* Add one more element. */ + inList1.add(new IN(dbImpl, null, 1, 1)); + sequencer++; + } catch (Throwable T) { + T.printStackTrace(System.out); + fail("Thread 2 caught some Throwable: " + T); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + } + + /* + * Variations of this loop are used in the following tests to simulate the + * INList memory budget recalculation that is performed by the same loop + * construct in DirtyINMap.selectDirtyINsForCheckpoint. + * + * inList.memRecalcBegin(); + * boolean completed = false; + * try { + * for (IN in : inList) { + * inList.memRecalcIterate(in); + * } + * completed = true; + * } finally { + * inList.memRecalcEnd(completed); + * } + */ + + /** + * Scenario #1: IN size is unchanged during the iteration + * begin + * iterate -- add total IN size, mark processed + * end + */ + @Test + public void testMemBudgetReset1() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + for (IN in : inList) { + inList.memRecalcIterate(in); + } + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem, mb.getTreeMemoryUsage()); + + close(); + } + + /** + * Scenario #2: IN size is updated during the iteration + * begin + * update -- do not add delta because IN is not yet processed + * iterate -- add total IN size, mark processed + * update -- do add delta because IN was already processed + * end + */ + @Test + public void testMemBudgetReset2() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + /* + * Size changes must be greater than IN.ACCUMULATED_LIMIT to be + * counted in the budget, and byte array lengths should be a multiple + * of 4 to give predictable sizes, since array sizes are allowed in + * multiples of 4. + */ + final int SIZE = IN.ACCUMULATED_LIMIT + 100; + DatabaseEntry key = new DatabaseEntry(new byte[1]); + db.put(null, key, new DatabaseEntry(new byte[SIZE * 1])); + + /* Test increasing size. */ + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + db.put(null, key, new DatabaseEntry(new byte[SIZE * 2])); + for (IN in : inList) { + inList.memRecalcIterate(in); + } + db.put(null, key, new DatabaseEntry(new byte[SIZE * 3])); + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem + SIZE * 2, mb.getTreeMemoryUsage()); + + /* Test decreasing size. */ + inList.memRecalcBegin(); + completed = false; + try { + db.put(null, key, new DatabaseEntry(new byte[SIZE * 2])); + for (IN in : inList) { + inList.memRecalcIterate(in); + } + db.put(null, key, new DatabaseEntry(new byte[SIZE * 1])); + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem, mb.getTreeMemoryUsage()); + + close(); + } + + /** + * Scenario #3: IN is added during the iteration but not iterated + * begin + * add -- add IN size, mark processed + * end + */ + @Test + public void testMemBudgetReset3() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + IN newIn = new IN(dbImpl, null, 1, 1); + long size = newIn.getBudgetedMemorySize(); + + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + for (IN in : inList) { + inList.memRecalcIterate(in); + } + inList.add(newIn); + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem + size, mb.getTreeMemoryUsage()); + + close(); + } + + /** + * Scenario #4: IN is added during the iteration and is iterated + * begin + * add -- add IN size, mark processed + * iterate -- do not add size because IN was already processed + * end + */ + @Test + public void testMemBudgetReset4() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + IN newIn = new IN(dbImpl, null, 1, 1); + long size = newIn.getBudgetedMemorySize(); + + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + inList.add(newIn); + for (IN in : inList) { + inList.memRecalcIterate(in); + } + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem + size, mb.getTreeMemoryUsage()); + + close(); + } + + /** + * Scenario #5: IN is removed during the iteration but not iterated + * begin + * remove -- do not add delta because IN is not yet processed + * end + */ + @Test + public void testMemBudgetReset5() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + IN oldIn = inList.iterator().next(); + long size = oldIn.getInMemorySize(); + + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + inList.remove(oldIn); + for (IN in : inList) { + inList.memRecalcIterate(in); + } + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem - size, mb.getTreeMemoryUsage()); + + close(); + } + + /** + * Scenario #6: IN is removed during the iteration and is iterated + * begin + * iterate -- add total IN size, mark processed + * remove -- add delta because IN was already processed + * end + */ + @Test + public void testMemBudgetReset6() + throws DatabaseException { + + INList inList = envImpl.getInMemoryINs(); + MemoryBudget mb = envImpl.getMemoryBudget(); + + IN oldIn = inList.iterator().next(); + long size = oldIn.getInMemorySize(); + + long origTreeMem = getActualTreeMemoryUsage(mb, inList); + inList.memRecalcBegin(); + boolean completed = false; + try { + for (IN in : inList) { + inList.memRecalcIterate(in); + } + inList.remove(oldIn); + completed = true; + } finally { + inList.memRecalcEnd(completed); + } + assertEquals(origTreeMem - size, mb.getTreeMemoryUsage()); + + close(); + } + + private long getActualTreeMemoryUsage(final MemoryBudget mb, + final INList inList) { + long actual = 0; + long budgeted = 0; + for (IN in : inList) { + budgeted += in.getBudgetedMemorySize(); + actual += in.getInMemorySize(); + } + assertEquals(budgeted, mb.getTreeMemoryUsage()); + return actual; + } +} diff --git a/test/com/sleepycat/je/dbi/MemoryBudgetTest.java b/test/com/sleepycat/je/dbi/MemoryBudgetTest.java new file mode 100644 index 0000000..06d4d09 --- /dev/null +++ b/test/com/sleepycat/je/dbi/MemoryBudgetTest.java @@ -0,0 +1,115 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +public class MemoryBudgetTest extends TestBase { + private final File envHome; + + public MemoryBudgetTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testDefaults() + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + MemoryBudget testBudget = envImpl.getMemoryBudget(); + + /* + System.out.println("max= " + testBudget.getMaxMemory()); + System.out.println("log= " + testBudget.getLogBufferBudget()); + System.out.println("thresh= " + testBudget.getEvictorCheckThreshold()); + */ + + assertTrue(testBudget.getMaxMemory() > 0); + assertTrue(testBudget.getLogBufferBudget() > 0); + + assertTrue(testBudget.getMaxMemory() <= + JVMSystemUtils.getRuntimeMaxMemory()); + + env.close(); + } + + /* Verify that the proportionally based setting works. */ + @Test + public void testCacheSizing() + throws Exception { + + long jvmMemory = JVMSystemUtils.getRuntimeMaxMemory(); + + /* + * Runtime.maxMemory() may return Long.MAX_VALUE if there is no + * inherent limit. + */ + if (jvmMemory == Long.MAX_VALUE) { + jvmMemory = 1 << 26; + } + + /* The default cache size ought to be percentage based. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + long percentConfig = envImpl.getConfigManager(). + getInt(EnvironmentParams.MAX_MEMORY_PERCENT); + + EnvironmentConfig c = env.getConfig(); + long expectedMem = (jvmMemory * percentConfig) / 100; + assertEquals(expectedMem, c.getCacheSize()); + assertEquals(expectedMem, envImpl.getMemoryBudget().getMaxMemory()); + env.close(); + + /* Try setting the percentage.*/ + expectedMem = (jvmMemory * 30) / 100; + envConfig = TestUtils.initEnvConfig(); + envConfig.setCachePercent(30); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + c = env.getConfig(); + assertEquals(expectedMem, c.getCacheSize()); + assertEquals(expectedMem, envImpl.getMemoryBudget().getMaxMemory()); + env.close(); + + /* Try overriding */ + envConfig = TestUtils.initEnvConfig(); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + c = env.getConfig(); + assertEquals(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10, c.getCacheSize()); + assertEquals(MemoryBudget.MIN_MAX_MEMORY_SIZE + 10, + envImpl.getMemoryBudget().getMaxMemory()); + env.close(); + } +} diff --git a/test/com/sleepycat/je/dbi/NullCursor.java b/test/com/sleepycat/je/dbi/NullCursor.java new file mode 100644 index 0000000..cb98983 --- /dev/null +++ b/test/com/sleepycat/je/dbi/NullCursor.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.txn.Locker; + +/** + * A NullCursor is used as a no-op object by tree unit tests, which + * wish to speak directly to Tree methods. + */ +public class NullCursor extends CursorImpl { + /** + * Cursor constructor. + */ + public NullCursor(DatabaseImpl database, Locker txn) { + super(database, txn); + } + + @Override + void addCursor(BIN bin) {} + @Override + void addCursor() {} +} diff --git a/test/com/sleepycat/je/dbi/SR12641.java b/test/com/sleepycat/je/dbi/SR12641.java new file mode 100644 index 0000000..a4c3456 --- /dev/null +++ b/test/com/sleepycat/je/dbi/SR12641.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintStream; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * This reproduces the bug described SR [#12641], also related to SR [#9543]. + * + * Note that allthough this is a JUnit test case, it is not run as part of the + * JUnit test suite. It takes a long time, and when it fails it hangs. + * Therefore, it was only used for debugging and is not intended to be a + * regression test. + * + * For some reason the bug was not reproducible with a simple main program, + * which is why a JUnit test was used. + */ +public class SR12641 extends TestBase { + + /* Use small NODE_MAX to cause lots of splits. */ + private static final int NODE_MAX = 6; + + private final File envHome; + private Environment env; + private Database db; + private boolean dups; + private boolean writerStopped; + + public SR12641() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.err.println("TearDown: " + e); + } + } + env = null; + db = null; + } + + @Test + public void testSplitsWithScansDups() + throws Throwable { + + dups = true; + testSplitsWithScans(); + } + + @Test + public void testSplitsWithScans() + throws Throwable { + + open(); + + /* Cause splits in the last BIN. */ + JUnitThread writer = new JUnitThread("writer") { + @Override + public void testBody() { + try { + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[1]); + OperationStatus status; + + Cursor cursor = db.openCursor(null, null); + + for (int i = 0; i < 100000; i += 1) { + IntegerBinding.intToEntry(i, dups ? data : key); + if (dups) { + status = cursor.putNoDupData(key, data); + } else { + status = cursor.putNoOverwrite(key, data); + } + assertEquals(OperationStatus.SUCCESS, status); + + if (i % 5000 == 0) { + System.out.println("Iteration: " + i); + } + } + + cursor.close(); + writerStopped = true; + + } catch (Exception e) { + try { + FileOutputStream os = + new FileOutputStream(new File("./err.txt")); + e.printStackTrace(new PrintStream(os)); + os.close(); + } catch (IOException ignored) {} + System.exit(1); + } + } + }; + + /* Move repeatedly from the last BIN to the prior BIN. */ + JUnitThread reader = new JUnitThread("reader") { + @Override + public void testBody() { + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + CursorConfig cursorConfig = new CursorConfig(); + cursorConfig.setReadUncommitted(true); + Cursor cursor = db.openCursor(null, cursorConfig); + + while (!writerStopped) { + cursor.getLast(key, data, null); + for (int i = 0; i <= NODE_MAX; i += 1) { + cursor.getPrev(key, data, null); + } + } + + cursor.close(); + + } catch (Exception e) { + try { + FileOutputStream os = + new FileOutputStream(new File("./err.txt")); + e.printStackTrace(new PrintStream(os)); + os.close(); + } catch (IOException ignored) {} + System.exit(1); + } + } + }; + + writer.start(); + reader.start(); + writer.finishTest(); + reader.finishTest(); + + close(); + System.out.println("SUCCESS"); + } + + private void open() + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), String.valueOf(NODE_MAX)); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, "testDb", dbConfig); + } + + private void close() + throws Exception { + + db.close(); + db = null; + env.close(); + env = null; + } +} diff --git a/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java b/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java new file mode 100644 index 0000000..700f889 --- /dev/null +++ b/test/com/sleepycat/je/dbi/SortedLSNTreeWalkerTest.java @@ -0,0 +1,424 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.SortedLSNTreeWalker.TreeNodeProcessor; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.Node; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; + +public class SortedLSNTreeWalkerTest extends DualTestCase { + private static boolean DEBUG = false; + + /* Use small NODE_MAX to cause lots of splits. */ + private static final int NODE_MAX = 6; + private static final int N_RECS = 30; + + private final File envHome; + private Environment env; + private Database db; + + public SortedLSNTreeWalkerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testNoDupsLoadLNs() + throws Throwable { + + doTestNoDups(true); + } + + @Test + public void testNoDupsNoLoadLNs() + throws Throwable { + + doTestNoDups(false); + } + + private void doTestNoDups(boolean loadLNs) + throws Throwable { + + open(false); + writeData(false); + if (DEBUG) { + System.out.println("***************"); + DbInternal.getDbImpl(db).getTree().dump(); + } + BtreeStats stats = (BtreeStats) db.getStats(null); + close(); + if (DEBUG) { + System.out.println("***************"); + } + open(false); + readData(); + if (DEBUG) { + DbInternal.getDbImpl(db).getTree().dump(); + System.out.println("***************"); + } + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + db.close(); + db = null; + assertEquals(N_RECS, walkTree(dbImpl, stats, loadLNs)); + close(); + } + + @Test + public void testNoDupsDupsAllowed() + throws Throwable { + + open(true); + writeData(false); + if (DEBUG) { + System.out.println("***************"); + DbInternal.getDbImpl(db).getTree().dump(); + } + BtreeStats stats = (BtreeStats) db.getStats(null); + close(); + if (DEBUG) { + System.out.println("***************"); + } + open(true); + if (DEBUG) { + DbInternal.getDbImpl(db).getTree().dump(); + System.out.println("***************"); + } + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + db.close(); + db = null; + assertEquals(N_RECS, walkTree(dbImpl, stats, true)); + close(); + } + + @Test + public void testDups() + throws Throwable { + + doTestDups(true); + } + + @Test + public void testDupsNoLoadLNs() + throws Throwable { + + doTestDups(false); + } + + private void doTestDups(boolean loadLNs) + throws Throwable { + + open(true); + writeData(true); + BtreeStats stats = (BtreeStats) db.getStats(null); + close(); + open(true); + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + db.close(); + db = null; + assertEquals(N_RECS * 2, walkTree(dbImpl, stats, loadLNs)); + assertEquals(N_RECS * 2, walkTree(dbImpl, stats, loadLNs, loadLNs)); + close(); + } + + @Test + public void testPendingDeleted() + throws Throwable { + + open(true); + int numRecs = writeDataWithDeletes(); + BtreeStats stats = (BtreeStats) db.getStats(null); + close(); + open(true); + readData(); + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + db.close(); + db = null; + assertEquals(numRecs, walkTree(dbImpl, stats, true)); + close(); + } + + private void open(boolean allowDuplicates) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), String.valueOf(NODE_MAX)); + /* + envConfig.setConfigParam + (EnvironmentParams.MAX_MEMORY.getName(), "10000000"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + */ + + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(false); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(allowDuplicates); + /* Use EVICT_LN to test loading LNs via SLTW. */ + dbConfig.setCacheMode(CacheMode.EVICT_LN); + db = env.openDatabase(null, "testDb", dbConfig); + } + + private void writeData(boolean dups) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < N_RECS; i++) { + IntegerBinding.intToEntry(i, key); + data.setData(new byte[1000]); + assertEquals(db.put(null, key, data), + OperationStatus.SUCCESS); + if (dups) { + IntegerBinding.intToEntry(i + N_RECS + N_RECS, data); + assertEquals(db.put(null, key, data), + OperationStatus.SUCCESS); + } + } + } + + private int writeDataWithDeletes() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + int numInserted = 0; + + data.setData(new byte[10]); + + for (int i = 0; i < N_RECS; i++) { + IntegerBinding.intToEntry(i, key); + Transaction txn = env.beginTransaction(null, null); + assertEquals(db.put(txn, key, data), + OperationStatus.SUCCESS); + boolean deleted = false; + if ((i % 2) ==0) { + assertEquals(db.delete(txn, key), + OperationStatus.SUCCESS); + deleted = true; + } + if ((i % 3)== 0){ + txn.abort(); + } else { + txn.commit(); + if (!deleted) { + numInserted++; + } + } + } + return numInserted; + } + + private void readData() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(N_RECS - 1, key); + assertEquals(db.get(null, key, data, LockMode.DEFAULT), + OperationStatus.SUCCESS); + } + + private int walkTree(final DatabaseImpl dbImpl, + final BtreeStats stats, + final boolean loadLNNodes) { + return walkTree(dbImpl, stats, loadLNNodes, false); + } + + /* Return the number of keys seen in all BINs. */ + private int walkTree(final DatabaseImpl dbImpl, + final BtreeStats stats, + final boolean loadLNNodes, + final boolean loadDupLNNodes) { + + final boolean useOffHeapCache = + dbImpl.getEnv().getOffHeapCache().isEnabled(); + + TestingTreeNodeProcessor tnp = new TestingTreeNodeProcessor() { + @Override + public void processLSN(long childLSN, + LogEntryType childType, + Node node, + byte[] lnKey, + int lastLoggedSize) { + if (DEBUG) { + System.out.println + (childType + " " + DbLsn.toString(childLSN)); + } + + if (childType.equals(LogEntryType.LOG_DBIN)) { + dbinCount++; + assertNull(lnKey); + assertNotNull(node); + } else if (childType.equals(LogEntryType.LOG_BIN)) { + binCount++; + assertNull(lnKey); + assertNotNull(node); + } else if (childType.equals(LogEntryType.LOG_DIN)) { + dinCount++; + assertNull(lnKey); + assertNotNull(node); + } else if (childType.equals(LogEntryType.LOG_IN)) { + inCount++; + assertNull(lnKey); + assertNotNull(node); + } else if (childType.isUserLNType()) { + entryCount++; + assertNotNull(lnKey); + if (dbImpl.getSortedDuplicates()) { + if (loadDupLNNodes) { + assertNotNull(node); + } else if (!useOffHeapCache) { + assertNull(node); + } + } else { + if (loadLNNodes) { + assertNotNull(node); + } else if (!useOffHeapCache) { + assertNull(node); + } + } + } else if (childType.equals(LogEntryType.LOG_DUPCOUNTLN)) { + dupLNCount++; + assertNotNull(lnKey); + assertNotNull(node); + } else { + throw new RuntimeException + ("unknown entry type: " + childType); + } + } + + public void processDupCount(long ignore) { + } + }; + + SortedLSNTreeWalker walker = + new SortedLSNTreeWalker + (new DatabaseImpl[] { dbImpl }, + false /*setDbState*/, + new long[] { dbImpl.getTree().getRootLsn() }, + tnp, null /*savedExceptions*/, null); + + walker.accumulateLNs = loadLNNodes; + walker.accumulateDupLNs = loadDupLNNodes; + + walker.walk(); + + if (DEBUG) { + System.out.println(stats); + } + + /* Add one since the root LSN is not passed to the walker. */ + assertEquals(stats.getInternalNodeCount(), tnp.inCount + 1); + assertEquals(stats.getBottomInternalNodeCount(), tnp.binCount); + assertEquals(stats.getDuplicateInternalNodeCount(), tnp.dinCount); + assertEquals(stats.getDuplicateBottomInternalNodeCount(), + tnp.dbinCount); + assertEquals(stats.getLeafNodeCount(), tnp.entryCount); + assertEquals(stats.getDupCountLeafNodeCount(), tnp.dupLNCount); + if (DEBUG) { + System.out.println("INs: " + tnp.inCount); + System.out.println("BINs: " + tnp.binCount); + System.out.println("DINs: " + tnp.dinCount); + System.out.println("DBINs: " + tnp.dbinCount); + System.out.println("entries: " + tnp.entryCount); + System.out.println("dupLN: " + tnp.dupLNCount); + } + + return tnp.entryCount; + } + + private static class TestingTreeNodeProcessor + implements TreeNodeProcessor { + + int binCount = 0; + int dbinCount = 0; + int dinCount = 0; + int inCount = 0; + int entryCount = 0; + int dupLNCount = 0; + + public void processLSN(long childLSN, + LogEntryType childType, + Node ignore, + byte[] ignore2, + int ignore3) { + throw new RuntimeException("override me please"); + } + + public void processDirtyDeletedLN(long childLsn, LN ln, byte[] lnKey) { + /* Do nothing. */ + } + + public void processDupCount(int ignore) { + throw new RuntimeException("override me please"); + } + + public void noteMemoryExceeded() { + } + } + + private void close() + throws Exception { + + if (db != null) { + db.close(); + db = null; + } + + if (env != null) { + close(env); + env = null; + } + } +} diff --git a/test/com/sleepycat/je/dbi/StartupTrackerTest.java b/test/com/sleepycat/je/dbi/StartupTrackerTest.java new file mode 100644 index 0000000..30e620c --- /dev/null +++ b/test/com/sleepycat/je/dbi/StartupTrackerTest.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.PrintStream; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Basic verification of environment startup tracking. Verification is really + * via inspection. + */ +public class StartupTrackerTest extends TestBase { + + private Environment env; + private final File envHome; + + public StartupTrackerTest() { + envHome = new File(System.getProperty(TestUtils.DEST_DIR)); + } + + @Before + public void setUp() + throws Exception { + + TestUtils.removeLogFiles("Setup", envHome, false); + super.setUp(); + } + + @After + public void tearDown() { + + /* + * Close down environments in case the unit test failed so that the log + * files can be removed. + */ + try { + if (env != null) { + env.close(); + env = null; + } + } catch (Exception e) { + e.printStackTrace(); + return; + } + + // TestUtils.removeLogFiles("TearDown", envHome, false); + } + + /* + */ + @Test + public void testEnvRecovery() { + + Logger logger = LoggerUtils.getLoggerFixedPrefix(this.getClass(), + "test"); + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream p = new PrintStream(baos); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam("je.env.startupThreshold", "0"); + env = new Environment(envHome, envConfig); + env.printStartupInfo(p); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + DatabaseEntry key = new DatabaseEntry(new byte[1000]); + DatabaseEntry data = new DatabaseEntry(new byte[1000]); + for (int i = 0; i < 10; i += 1) { + db.put(null, key, data); + } + db.close(); + env.close(); + + env = new Environment(envHome, envConfig); + env.printStartupInfo(p); + logger.fine(baos.toString()); + env.close(); + env = null; + } catch (Exception e) { + fail("This test succeeds as long as the printing of the report " + + "does not cause a problem. Any exception is a failure. " ); + } + } +} + diff --git a/test/com/sleepycat/je/dbi/UncontendedLockTest.java b/test/com/sleepycat/je/dbi/UncontendedLockTest.java new file mode 100644 index 0000000..4b9bb15 --- /dev/null +++ b/test/com/sleepycat/je/dbi/UncontendedLockTest.java @@ -0,0 +1,276 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.dbi; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Checks that optimized uncontended locks (see CursorImpl.lockLN) are taken + * during delete/update operations for a non-duplicates DB. For a duplicates + * DB, ensure that uncontended locks are not taken, since this is not fully + * implemented and would be unreliable. + */ +public class UncontendedLockTest extends DualTestCase { + + private static final StatsConfig CLEAR_STATS; + static { + CLEAR_STATS = new StatsConfig(); + CLEAR_STATS.setClear(true); + } + private final File envHome; + private Environment env; + private Database db; + private boolean isSerializable; + + public UncontendedLockTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + try { + super.tearDown(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + env = null; + db = null; + } + + @Test + public void testUncontended() { + doUncontended(false); + } + + @Test + public void testUncontendedDups() { + doUncontended(true); + } + + private void doUncontended(boolean dups) { + open(dups); + env.getStats(CLEAR_STATS); + + /* + * NOTE: Number of contended and dup DB requests are expected based on + * testing, not on any specific requirement. If we can reduce these + * over time, great, just update the test. + * + * The critical thing is that the number of requests and write locks is + * exactly one for the non-dup, non-contended case. + */ + + /* Insert */ + Transaction txn = env.beginTransaction(null, null); + writeData(dups, txn, false /*update*/); + checkLocks( + txn, + dups ? 2 : 1 /*nRequests*/, + dups ? 2 : 1 /*nWriteLocks*/); + txn.commit(); + + /* Update */ + txn = env.beginTransaction(null, null); + writeData(dups, txn, true /*update*/); + checkLocks( + txn, + dups ? 2 : 1 /*nRequests*/, + dups ? 2 : 1 /*nWriteLocks*/); + txn.commit(); + + /* Delete */ + txn = env.beginTransaction(null, null); + deleteData(dups, txn); + checkLocks( + txn, + dups ? 3 : (isSerializable ? 3 : 1) /*nRequests*/, + dups ? 2 : (isSerializable ? 2 : 1) /*nWriteLocks*/); + txn.commit(); + + close(); + } + + @Test + public void testContended() { + doContended(false); + } + + @Test + public void testContendedDups() { + doContended(true); + } + + private void doContended(boolean dups) { + open(dups); + env.getStats(CLEAR_STATS); + + /* + * NOTE: Number of contended and dup DB requests are expected based on + * testing, not on any specific requirement. If we can reduce these + * over time, great, just update the test. + */ + + /* Insert - no way to have contention on a new slot. */ + writeData(dups, null, false /*update*/); + + /* Simulate contended locking by reading first. */ + + /* Update */ + Transaction txn = env.beginTransaction(null, null); + readData(dups, txn); + writeData(dups, txn, true /*update*/); + checkLocks( + txn, + dups ? (isSerializable ? 6 : 6) : (isSerializable ? 4 : 4) + /*nRequests*/, + dups ? 3 : 2 /*nWriteLocks*/); + txn.commit(); + + /* Delete */ + txn = env.beginTransaction(null, null); + readData(dups, txn); + deleteData(dups, txn); + checkLocks( + txn, + dups ? 4 : (isSerializable ? 4 : 3) /*nRequests*/, + dups ? 2 : 2 /*nWriteLocks*/); + txn.commit(); + + close(); + } + + private void checkLocks(Transaction txn, int nRequests, int nWriteLocks) { + final EnvironmentStats envStats = env.getStats(CLEAR_STATS); + assertEquals(nRequests, envStats.getNRequests()); + assertEquals(0, envStats.getNWaits()); + StatGroup lockerStats = DbInternal.getLocker(txn).collectStats(); + assertEquals(0, lockerStats.getInt(LOCK_READ_LOCKS)); + assertEquals(nWriteLocks, lockerStats.getInt(LOCK_WRITE_LOCKS)); + } + + private void open(boolean dups) { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + /* Don't run daemons, so they don't lock unexpectedly. */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + isSerializable = env.getConfig().getTxnSerializableIsolation(); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void close() { + db.close(); + close(env); + } + + private void writeData(boolean dups, Transaction txn, boolean update) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + OperationStatus status; + /* putNoOverwrite not used for dups because it does extra locking. */ + if (update || dups) { + status = db.put(txn, key, data); + } else { + status = db.putNoOverwrite(txn, key, data); + } + assertSame(OperationStatus.SUCCESS, status); + if (!dups) { + return; + } + IntegerBinding.intToEntry(2, data); + if (update) { + status = db.put(txn, key, data); + } else { + status = db.putNoDupData(txn, key, data); + } + assertSame(OperationStatus.SUCCESS, status); + } + + private void deleteData(boolean dups, Transaction txn) { + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + if (!dups) { + final OperationStatus status = db.delete(txn, key); + assertSame(OperationStatus.SUCCESS, status); + return; + } + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(2, data); + final Cursor cursor = db.openCursor(txn, null); + OperationStatus status = cursor.getSearchBoth(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertSame(OperationStatus.SUCCESS, status); + cursor.close(); + } + + private void readData(boolean dups, Transaction txn) { + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + if (!dups) { + final OperationStatus status = + db.get(txn, key, new DatabaseEntry(), null); + assertSame(OperationStatus.SUCCESS, status); + return; + } + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(2, data); + final OperationStatus status = db.getSearchBoth(txn, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + } +} diff --git a/test/com/sleepycat/je/evictor/BackgroundEvictionTest.java b/test/com/sleepycat/je/evictor/BackgroundEvictionTest.java new file mode 100644 index 0000000..9620b6e --- /dev/null +++ b/test/com/sleepycat/je/evictor/BackgroundEvictionTest.java @@ -0,0 +1,330 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that the background eviction threadpool can appropriately handle + * maintaining the memory budget. Do basic eviction, but turn the critical + * percentage way up so that the app threads do no eviction, leaving the + * work to the eviction pool. + * Disable daemon threads (which do in-line eviction) so they are not + * available to help with the eviction. + */ +public class BackgroundEvictionTest extends TestBase { + + private static final int N_ENVS = 5; + private static final int ONE_MB = 1 << 20; + private static final int ENV_CACHE_SIZE = ONE_MB; + private static final int TOTAL_CACHE_SIZE = N_ENVS * ENV_CACHE_SIZE; + + private static final int MIN_DATA_SIZE = 50 * 1024; + private static final int ENTRY_DATA_SIZE = 500; + private static final String TEST_PREFIX = "BackgroundEvictionTest_"; + private static final StatsConfig CLEAR_CONFIG = new StatsConfig(); + static { + CLEAR_CONFIG.setClear(true); + } + + private File envHome; + private File[] dirs; + private Environment[] envs; + private Database[] dbs; + private boolean sharedCache = true; + + public BackgroundEvictionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + dirs = new File[N_ENVS]; + envs = new Environment[N_ENVS]; + dbs = new Database[N_ENVS]; + + for (int i = 0; i < N_ENVS; i += 1) { + dirs[i] = new File(envHome, TEST_PREFIX + i); + dirs[i].mkdir(); + assertTrue(dirs[i].isDirectory()); + } + } + + @Override + @After + public void tearDown() { + + for (int i = 0; i < N_ENVS; i += 1) { + if (dbs[i] != null) { + try { + dbs[i].close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + dbs[i] = null; + } + if (envs[i] != null) { + try { + envs[i].close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + envs[i] = null; + } + } + envHome = null; + dirs = null; + envs = null; + dbs = null; + } + + @Test + public void testBaseline() + throws DatabaseException { + + /* Open all DBs in the same environment. */ + final int N_DBS = N_ENVS; + sharedCache = false; + openOne(0); + DatabaseConfig dbConfig = dbs[0].getConfig(); + for (int i = 1; i < N_DBS; i += 1) { + dbs[i] = envs[0].openDatabase(null, "foo" + i, dbConfig); + } + + for (int i = 0; i < N_DBS; i += 1) { + write(i, ENV_CACHE_SIZE * 2); + } + + for (int repeat = 0; repeat < 50; repeat += 1) { + + /* Read all DBs evenly. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (dbs[j].get(null, key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + } + + for (int i = 1; i < N_DBS; i += 1) { + dbs[i].close(); + dbs[i] = null; + } + + closeOne(0); + } + + /** + * Checks that the background pool works correctly with shared environments. + */ + @Test + public void testOpenClose() + throws DatabaseException { + + openAll(); + int nRecs = 0; + for (int i = 0; i < N_ENVS; i += 1) { + int n = write(i, TOTAL_CACHE_SIZE); + if (nRecs < n) { + nRecs = n; + } + } + + closeAll(); + openAll(); + readEvenly(nRecs); + /* Close only one. */ + for (int i = 0; i < N_ENVS; i += 1) { + closeOne(i); + readEvenly(nRecs); + openOne(i); + readEvenly(nRecs); + } + /* Close all but one. */ + for (int i = 0; i < N_ENVS; i += 1) { + for (int j = 0; j < N_ENVS; j += 1) { + if (j != i) { + closeOne(j); + } + } + readEvenly(nRecs); + for (int j = 0; j < N_ENVS; j += 1) { + if (j != i) { + openOne(j); + } + } + readEvenly(nRecs); + } + closeAll(); + } + + private void openAll() + throws DatabaseException { + + for (int i = 0; i < N_ENVS; i += 1) { + openOne(i); + } + } + + private void openOne(int i) + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setSharedCache(sharedCache); + envConfig.setCacheSize(TOTAL_CACHE_SIZE); + envConfig.setConfigParam(EnvironmentConfig.TREE_MIN_MEMORY, + String.valueOf(MIN_DATA_SIZE)); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam + (EnvironmentParams.EVICTOR_CRITICAL_PERCENTAGE.getName(), "900"); + + envConfig.setConfigParam(EnvironmentParams.STATS_COLLECT.getName(), + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + envs[i] = new Environment(dirs[i], envConfig); + /* Clear the eviction stats calculated during recovery. */ + envs[i].getStats(CLEAR_CONFIG); + + dbs[i] = envs[i].openDatabase(null, "foo", dbConfig); + } + + private void closeAll() + throws DatabaseException { + + for (int i = 0; i < N_ENVS; i += 1) { + closeOne(i); + } + } + + private void closeOne(int i) + throws DatabaseException { + + if (dbs[i] != null) { + dbs[i].close(); + dbs[i] = null; + } + + if (envs[i] != null) { + envs[i].close(); + envs[i] = null; + } + + /* Check stats only after system is quiescent. */ + checkStatsConsistency(); + } + + /** + * Writes enough records in the given envIndex environment to cause at + * least minSizeToWrite bytes to be used in the cache. + */ + private int write(int envIndex, int minSizeToWrite) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]); + int i; + for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) { + IntegerBinding.intToEntry(i, key); + dbs[envIndex].put(null, key, data); + } + return i; + } + + /** + * Reads alternating records from each env, reading all records from each + * env. Checks that all environments use roughly equal portions of the + * cache. + */ + private void readEvenly(int nRecs) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Repeat reads twice to give the LRU a fighting chance. */ + for (int repeat = 0; repeat < 2; repeat += 1) { + for (int i = 0; i < nRecs; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_ENVS; j += 1) { + if (dbs[j] != null) { + dbs[j].get(null, key, data, null); + } + } + } + } + } + + /** + * Checks that all eviction was done with the background pool. + */ + private void checkStatsConsistency() + throws DatabaseException { + + EnvironmentStats stats = null; + + for (int i = 0; i < N_ENVS; i += 1) { + if (envs[i] != null) { + stats = envs[i].getStats(null); + assertEquals(0, stats.getNBytesEvictedCritical()); + assertEquals(0, stats.getNBytesEvictedCacheMode()); + assertEquals(0, stats.getNBytesEvictedManual()); + assertTrue(stats.getNBytesEvictedEvictorThread() > 0); + assertTrue("cacheTotalBytes=" + stats.getCacheTotalBytes() + + " maxMem=" + TOTAL_CACHE_SIZE, + stats.getCacheTotalBytes() < TOTAL_CACHE_SIZE); + } + } + } +} diff --git a/test/com/sleepycat/je/evictor/CacheModeTest.java b/test/com/sleepycat/je/evictor/CacheModeTest.java new file mode 100644 index 0000000..040bd1a --- /dev/null +++ b/test/com/sleepycat/je/evictor/CacheModeTest.java @@ -0,0 +1,1057 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +@RunWith(Parameterized.class) +public class CacheModeTest extends TestBase { + + /* Records occupy three BINs. */ + private static final int FIRST_REC = 0; + private static final int LAST_REC = 7; + private static final int NODE_MAX = 5; + + private File envHome; + private Environment env; + private Database db; + private IN root; + private BIN[] bins; + private DatabaseEntry[] keys; + private boolean resetOnFailure; + private CursorConfig cursorConfig; + + private boolean embeddedLNs = false; + private boolean useOffHeapCache = false; + + @Parameterized.Parameters + public static List genParams() { + final List params = new ArrayList(); + /*0*/ params.add( + new Object[] { Boolean.FALSE, Boolean.FALSE, Boolean.FALSE }); + /*1*/ params.add( + new Object[] { Boolean.FALSE, Boolean.FALSE, Boolean.TRUE }); + /*2*/ params.add( + new Object[] { Boolean.FALSE, Boolean.TRUE, Boolean.FALSE }); + /*3*/ params.add( + new Object[] { Boolean.FALSE, Boolean.TRUE, Boolean.TRUE }); + /*4*/ params.add( + new Object[] { Boolean.TRUE, Boolean.FALSE, Boolean.FALSE }); + /*5*/ params.add( + new Object[] { Boolean.TRUE, Boolean.FALSE, Boolean.TRUE }); + /*6*/ params.add( + new Object[] { Boolean.TRUE, Boolean.TRUE, Boolean.FALSE }); + /*7*/ params.add( + new Object[] { Boolean.TRUE, Boolean.TRUE, Boolean.TRUE }); + return params; + } + + public CacheModeTest(final boolean resetOnFailure, + final boolean embeddedLNs, + final boolean useOffHeapCache) { + + envHome = SharedTestUtils.getTestDir(); + + this.resetOnFailure = resetOnFailure; + this.embeddedLNs = embeddedLNs; + this.useOffHeapCache = useOffHeapCache; + + cursorConfig = + resetOnFailure ? + (new CursorConfig().setNonSticky(true)) : + null; + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + } + + try { + TestUtils.removeLogFiles("TearDown", envHome, false); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + envHome = null; + env = null; + db = null; + root = null; + bins = null; + keys = null; + } + + private void open() { + + /* Open env, disable all daemon threads. */ + final EnvironmentConfig envConfig = new EnvironmentConfig(); + + envConfig.setAllowCreate(true); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.STATS_COLLECT, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + envConfig.setOffHeapCacheSize(useOffHeapCache ? 10000000 : 0); + /* Use one LRU list for easier testing. */ + envConfig.setConfigParam(EnvironmentConfig.EVICTOR_N_LRU_LISTS, "1"); + + /* Force embedded LN setting. We use 1 byte data values. */ + envConfig.setConfigParam( + EnvironmentConfig.TREE_MAX_EMBEDDED_LN, + embeddedLNs ? "1" : "0"); + + env = new Environment(envHome, envConfig); + + /* TREE_MAX_EMBEDDED_LN may be overridden in a je.properties file. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 1); + + /* Open db. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setNodeMaxEntries(NODE_MAX); + db = env.openDatabase(null, "foo", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + for (int i = FIRST_REC; i <= LAST_REC; i += 1) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data); + } + + /* Sync to flush log buffer and make BINs non-dirty. */ + env.sync(); + + /* Get root/parent IN in this two level tree. */ + root = DbInternal.getDbImpl(db). + getTree().getRootIN(CacheMode.UNCHANGED); + root.releaseLatch(); + assertEquals(root.toString(), 3, root.getNEntries()); + + /* Get BINs and first key in each BIN. */ + bins = new BIN[3]; + keys = new DatabaseEntry[3]; + for (int i = 0; i < 3; i += 1) { + bins[i] = (BIN) root.getTarget(i); + keys[i] = new DatabaseEntry(); + keys[i].setData(bins[i].getKey(0)); + //System.out.println("key " + i + ": " + + //IntegerBinding.entryToInt(keys[i])); + } + } + + private void close() { + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + private void setMode(CacheMode mode) { + EnvironmentMutableConfig envConfig = env.getMutableConfig(); + envConfig.setCacheMode(mode); + env.setMutableConfig(envConfig); + } + + /** + * Configure a tiny cache size and set a trap that fires an assertion when + * eviction occurs. This is used for testing EVICT_BIN and MAKE_COLD, + * which should never cause critical eviction. + */ + private void setEvictionTrap() { + + EnvironmentMutableConfig envConfig = env.getMutableConfig(); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + env.setMutableConfig(envConfig); + + /* Fill the cache artificially. */ + DbInternal.getNonNullEnvImpl(env).getMemoryBudget(). + updateAdminMemoryUsage(MemoryBudget.MIN_MAX_MEMORY_SIZE); + + class MyHook implements TestHook { + public Boolean getHookValue() { + fail("Eviction should not occur in EVICT_BIN mode"); + return false; /* For compiler, will never happen. */ + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void doHook() { + throw new UnsupportedOperationException(); + } + public void doHook(Boolean obj) { + throw new UnsupportedOperationException(); + } + } + + DbInternal.getNonNullEnvImpl(env).getEvictor().setRunnableHook + (new MyHook()); + } + + private void clearEvictionTrap() { + DbInternal.getNonNullEnvImpl(env).getEvictor().setRunnableHook(null); + + /* Bump cache size back up to a reasonable amount. */ + EnvironmentMutableConfig envConfig = env.getMutableConfig(); + envConfig.setCacheSize(64 * 1024 * 1024); + env.setMutableConfig(envConfig); + + DbInternal.getNonNullEnvImpl(env).getMemoryBudget(). + updateAdminMemoryUsage(0 - MemoryBudget.MIN_MAX_MEMORY_SIZE); + } + + private void readFirstAndLastRecord() { + final DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + status = db.get(null, keys[0], data, null); + assertSame(OperationStatus.SUCCESS, status); + status = db.get(null, keys[2], data, null); + assertSame(OperationStatus.SUCCESS, status); + } + + private void writeFirstAndLastRecord() { + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + OperationStatus status; + status = db.put(null, keys[0], data); + assertSame(OperationStatus.SUCCESS, status); + status = db.put(null, keys[2], data); + assertSame(OperationStatus.SUCCESS, status); + } + + private List getLRUList() { + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + return envImpl.getEvictor().getPri1LRUList(); + } + + private List getMRUList() { + final List list = getLRUList(); + Collections.reverse(list); + return list; + } + + private void assertLRU(BIN... bins) { + final List list = getLRUList(); + for (int i = 0; i < bins.length; i += 1) { + assertTrue(i < list.size()); + assertSame(list.get(i), bins[i]); + } + } + + private void assertMRU(BIN... bins) { + + final List list = getMRUList(); + + for (int i = 0; i < bins.length; i += 1) { + + assertTrue("" + i, i < list.size()); + + assertSame( + "expect-node=" + bins[i].getNodeId() + + "actual-node=" + list.get(i).getNodeId(), + bins[i], list.get(i)); + } + } + + @Test + public void testMode_DEFAULT() { + open(); + setMode(CacheMode.DEFAULT); + doTestMode_DEFAULT(); + close(); + } + + /** + * CacheMode.DEFAULT assigns next generation to BIN and all ancestors, does + * not evict. + */ + private void doTestMode_DEFAULT() { + readFirstAndLastRecord(); + + /* MRU order: last BIN, first BIN. */ + assertMRU(bins[2], bins[0]); + + /* BINs should not be evicted. */ + assertNotNull(root.getTarget(0)); + assertNotNull(root.getTarget(1)); + assertNotNull(root.getTarget(2)); + + /* LNs should not be evicted. */ + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + assertNull(bins[1].getTarget(0)); + assertNull(bins[2].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + assertNotNull(bins[1].getTarget(0)); + assertNotNull(bins[2].getTarget(0)); + } + } + + @Test + public void testMode_UNCHANGED() { + open(); + setMode(CacheMode.UNCHANGED); + doTestMode_UNCHANGED(); + close(); + } + + /** + * CacheMode.UNCHANGED does not change generations, does not evict. + */ + private void doTestMode_UNCHANGED() { + List mruList = getMRUList(); + + readFirstAndLastRecord(); + + /* MRU order should not have changed. */ + assertEquals(mruList, getMRUList()); + + /* BINs should not be evicted. */ + assertNotNull(root.getTarget(0)); + assertNotNull(root.getTarget(1)); + assertNotNull(root.getTarget(2)); + assertTrue(!root.getTarget(0).isBINDelta(false)); + assertTrue(!root.getTarget(1).isBINDelta(false)); + assertTrue(!root.getTarget(2).isBINDelta(false)); + + /* LNs should not be evicted. */ + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + assertNull(bins[1].getTarget(0)); + assertNull(bins[2].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + assertNotNull(bins[1].getTarget(0)); + assertNotNull(bins[2].getTarget(0)); + } + + /* Everything is resident, no eviction should occur with UNCHANGED. */ + checkEvictionWithCursorScan( + CacheMode.UNCHANGED, + false /*expectLNEviction*/, + false /*expectBINEviction*/); + + /* And LRU list should not change. */ + assertEquals(mruList, getMRUList()); + + /* Evict all LNs with EVICT_LN. */ + checkEvictionWithCursorScan( + CacheMode.EVICT_LN, + true /*expectLNEviction*/, + false /*expectBINEviction*/); + + mruList = getMRUList(); + + /* Now LNs should be evicted using UNCHANGED. */ + checkEvictionWithCursorScan( + CacheMode.UNCHANGED, + true /*expectLNEviction*/, + false /*expectBINEviction*/); + + /* But LRU list should not change. */ + assertEquals(mruList, getMRUList()); + + /* Evict all BINs using EVICT_BIN. */ + checkEvictionWithCursorScan( + CacheMode.EVICT_BIN, + true /*expectLNEviction*/, + true /*expectBINEviction*/); + + /* Now BINs should be evicted using UNCHANGED. */ + checkEvictionWithCursorScan( + CacheMode.UNCHANGED, + true /*expectLNEviction*/, + true /*expectBINEviction*/); + } + + private void checkEvictionWithCursorScan(CacheMode mode, + boolean expectLNEviction, + boolean expectBINEviction) { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final Cursor cursor = db.openCursor(null, null); + cursor.setCacheMode(mode); + + BIN prevBin = null; + int prevIndex = -1; + + while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) { + + final CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + final BIN currBin = cursorImpl.getBIN(); + final int currIndex = cursorImpl.getIndex(); + + if (embeddedLNs) { + assertNull(currBin.getTarget(currIndex)); + } else { + assertNotNull(currBin.getTarget(currIndex)); + } + + if (currBin != prevBin && prevBin != null) { + assertEquals(expectBINEviction, !prevBin.getInListResident()); + } + + if (!embeddedLNs && prevIndex >= 0) { + if (expectLNEviction || expectBINEviction) { + assertNull(prevBin.getTarget(prevIndex)); + } else { + assertNotNull(prevBin.getTarget(prevIndex)); + } + } + + prevBin = currBin; + prevIndex = currIndex; + } + + cursor.close(); + + if (prevBin != null) { + assertEquals(expectBINEviction, !prevBin.getInListResident()); + } + + if (!embeddedLNs && prevIndex >= 0) { + if (expectLNEviction || expectBINEviction) { + assertNull(prevBin.getTarget(prevIndex)); + } else { + assertNotNull(prevBin.getTarget(prevIndex)); + } + } + } + + /** + * CacheMode.KEEP_HOT is deprecated and behaves the same as DEFAULT. + */ + @Test + public void testMode_KEEP_HOT() { + open(); + setMode(CacheMode.KEEP_HOT); + doTestMode_DEFAULT(); + close(); + } + + /** + * CacheMode.MAKE_COLD is deprecated and behaves he same as UNCHANGED. + */ + @Test + public void testMode_MAKE_COLD() { + open(); + setMode(CacheMode.MAKE_COLD); + doTestMode_UNCHANGED(); + close(); + } + + /** + * CacheMode.EVICT_LN assigns min generation to BIN but not to ancestors. + * + * evicts LN, but does not evict BIN. + */ + @Test + public void testMode_EVICT_LN() { + open(); + + setMode(CacheMode.EVICT_LN); + + readFirstAndLastRecord(); + + /* MRU order: last BIN, first BIN. */ + assertMRU(bins[2], bins[0]); + + /* BINs should not be evicted. */ + assertNotNull(root.getTarget(0)); + assertNotNull(root.getTarget(1)); + assertNotNull(root.getTarget(2)); + assertTrue(!root.getTarget(0).isBINDelta(false)); + assertTrue(!root.getTarget(1).isBINDelta(false)); + assertTrue(!root.getTarget(2).isBINDelta(false)); + + /* LNs should be evicted. */ + assertNull(bins[0].getTarget(0)); + if (embeddedLNs) { + assertNull(bins[1].getTarget(0)); + } else { + assertNotNull(bins[1].getTarget(0)); + } + assertNull(bins[2].getTarget(0)); + + close(); + } + + /** + * CacheMode.EVICT_BIN does not change generation of BIN ancestors, evicts + * BIN (and its LNs). + */ + @Test + public void testMode_EVICT_BIN() { + open(); + + setMode(CacheMode.EVICT_BIN); + + setEvictionTrap(); + + readFirstAndLastRecord(); + + /* BINs should be evicted. */ + assertNull(root.getTarget(0)); + assertNotNull(root.getTarget(1)); + assertNull(root.getTarget(2)); + + clearEvictionTrap(); + + /* Dirty first and last BIN. */ + writeFirstAndLastRecord(); + + bins[0] = (BIN) root.getTarget(0); + bins[1] = (BIN) root.getTarget(1); + bins[2] = (BIN) root.getTarget(2); + + /* Dirty BINs should be evicted only when using an off-heap cache. */ + if (useOffHeapCache) { + assertNull(bins[0]); + assertNotNull(bins[1]); + assertNull(bins[2]); + } else { + assertNotNull(bins[0]); + assertNotNull(bins[1]); + assertNotNull(bins[2]); + } + + /* MRU order: last BIN, first BIN. */ + if (!useOffHeapCache) { + assertMRU(bins[2], bins[0]); + } + + /* Get rid of the dirtiness we created above. */ + env.sync(); + + /* Scanning with EVICT_BIN should evict all BINs. */ + checkEvictionWithCursorScan( + CacheMode.EVICT_BIN, + true /*expectLNEviction*/, + true /*expectBINEviction*/); + + /* + * All LNs should be evicted when accessing a dirty BIN. + */ + if (!embeddedLNs) { + + /* Load all BINs and LNs into cache. */ + checkEvictionWithCursorScan( + CacheMode.DEFAULT, + false /*expectLNEviction*/, + false /*expectBINEviction*/); + + bins[0] = (BIN) root.getTarget(0); + bins[1] = (BIN) root.getTarget(1); + bins[2] = (BIN) root.getTarget(2); + + assertNotNull(bins[0]); + assertNotNull(bins[1]); + assertNotNull(bins[2]); + + /* Access/dirty first and last BIN with EVICT_BIN. */ + writeFirstAndLastRecord(); + + if (useOffHeapCache) { + assertNull(root.getTarget(0)); + assertSame(bins[1], root.getTarget(1)); + assertNull(root.getTarget(2)); + } else { + assertSame(bins[0], root.getTarget(0)); + assertSame(bins[1], root.getTarget(1)); + assertSame(bins[2], root.getTarget(2)); + + /* Check that first and last BIN have no LNs. */ + for (final BIN bin : new BIN[] { bins[0], bins[2] }) { + for (int i = 0; i < bin.getNEntries(); i += 1) { + assertNull(bin.getTarget(i)); + } + } + } + + /* Check that middle BIN still has resident LNs. */ + final BIN bin = bins[1]; + for (int i = 0; i < bin.getNEntries(); i += 1) { + assertNotNull(bin.getTarget(i)); + } + } + + close(); + } + + /** + * CacheMode.EVICT_LN does not evict the LN when two consecutive Cursor + * operations end up on the same record. + */ + @Test + public void testEvictLnOnlyWhenMovingAway() { + open(); + + setMode(CacheMode.EVICT_LN); + + Cursor cursor = db.openCursor(null, cursorConfig); + assertSame(CacheMode.EVICT_LN, cursor.getCacheMode()); + + /* + * Examine the NNotResident stat to ensure that a node is not evicted + * and then fetched by a single operation that doesn't move the cursor. + */ + final StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + EnvironmentStats stats = env.getStats(clearStats); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Find 1st record resident. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + stats = env.getStats(clearStats); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + + /* Find 2nd record resident, evict 1st. */ + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(1)); + } else { + assertNotNull(bins[0].getTarget(1)); + } + assertNull(bins[0].getTarget(0)); + stats = env.getStats(clearStats); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + + /* Fetch 1st, evict 2nd. */ + status = cursor.getPrev(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + assertNull(bins[0].getTarget(1)); + stats = env.getStats(clearStats); + if (embeddedLNs) { + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + } else { + assertEquals(1, TestUtils.getNLNsLoaded(stats)); + } + + /* Fetch 2nd, evict 1st. */ + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertNull(bins[0].getTarget(0)); + if (embeddedLNs) { + assertNull(bins[0].getTarget(1)); + } else { + assertNotNull(bins[0].getTarget(1)); + } + stats = env.getStats(clearStats); + if (embeddedLNs) { + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + } else { + assertEquals(1, TestUtils.getNLNsLoaded(stats)); + } + + /* Fetch 1st, evict 2nd. */ + status = cursor.getPrev(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + assertNull(bins[0].getTarget(1)); + stats = env.getStats(clearStats); + if (embeddedLNs) { + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + } else { + assertEquals(1, TestUtils.getNLNsLoaded(stats)); + } + + /* + * With a non-sticky cursor, if we attempt an operation that may move + * the cursor, we will always evict the LN because there is no dup + * cursor to compare with, to see if the position has changed. This is + * an expected drawback of using a non-sticky cursor. + */ + final int expectFetchWithoutPositionChange = resetOnFailure ? 1 : 0; + + /* No fetch needed to access 1st again. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + stats = env.getStats(clearStats); + + if (embeddedLNs) { + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + } else { + assertEquals( + expectFetchWithoutPositionChange, + TestUtils.getNLNsLoaded(stats)); + } + + /* + * No fetch needed to access 1st again. Note that no fetch occurs here + * even with a non-sticky cursor, because getCurrent cannot move the + * cursor. + */ + status = cursor.getCurrent(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + stats = env.getStats(clearStats); + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + + /* No fetch needed to access 1st again. */ + status = cursor.getSearchKey(keys[0], data, null); + assertSame(OperationStatus.SUCCESS, status); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertNotNull(bins[0].getTarget(0)); + } + stats = env.getStats(clearStats); + if (embeddedLNs) { + assertNull(bins[0].getTarget(0)); + } else { + assertEquals( + expectFetchWithoutPositionChange, + TestUtils.getNLNsLoaded(stats)); + } + + cursor.close(); + close(); + } + + /** + * CacheMode.EVICT_BIN does not evict the BIN when two consecutive Cursor + * operations end up on the same BIN. If we stay on the same BIN but move + * to a new LN, only the LN is evicted. If we stay on the same LN, neither + * LN nor BIN is evicted. + */ + @Test + public void testEvictBinOnlyWhenMovingAway() { + open(); + + setMode(CacheMode.EVICT_BIN); + setEvictionTrap(); + + Cursor cursor = db.openCursor(null, cursorConfig); + assertSame(CacheMode.EVICT_BIN, cursor.getCacheMode()); + + /* + * Examine the NNotResident stat to ensure that a node is not evicted + * and then fetched by a single operation that doesn't move the cursor. + */ + final StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + EnvironmentStats stats = env.getStats(clearStats); + + final int firstKeyInSecondBin = IntegerBinding.entryToInt(keys[1]); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Find records in 1st BIN resident. */ + for (int i = FIRST_REC; i < firstKeyInSecondBin; i += 1) { + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(i, IntegerBinding.entryToInt(key)); + assertSame(bins[0], DbInternal.getCursorImpl(cursor).getBIN()); + assertSame(bins[0], root.getTarget(0)); + if (embeddedLNs) { + assertNull(bins[0].getTarget(i)); + } else { + assertNotNull(bins[0].getTarget(i)); + } + stats = env.getStats(clearStats); + assertEquals(0, stats.getNNotResident()); + /* Find prior LN evicted. */ + if (i > 0) { + assertNull(bins[0].getTarget(i - 1)); + } + } + + /* Move to 2nd BIN, find resident. */ + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(bins[1], DbInternal.getCursorImpl(cursor).getBIN()); + assertSame(bins[1], root.getTarget(1)); + if (embeddedLNs) { + assertNull(bins[1].getTarget(0)); + } else { + assertNotNull(bins[1].getTarget(0)); + } + stats = env.getStats(clearStats); + assertEquals(0, stats.getNNotResident()); + /* Find prior BIN evicted. */ + assertNull(root.getTarget(0)); + + /* Move back to 1st BIN, fetch BIN and LN. */ + status = cursor.getPrev(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertNotNull(root.getTarget(0)); + assertNotSame(bins[0], root.getTarget(0)); + bins[0] = (BIN) root.getTarget(0); + if (embeddedLNs) { + assertNull(bins[0].getTarget(firstKeyInSecondBin - 1)); + } else { + assertNotNull(bins[0].getTarget(firstKeyInSecondBin - 1)); + } + assertEquals(firstKeyInSecondBin - 1, IntegerBinding.entryToInt(key)); + stats = env.getStats(clearStats); + if (embeddedLNs) { + assertEquals(0, TestUtils.getNLNsLoaded(stats)); + assertEquals(1, TestUtils.getNBINsLoaded(stats)); + } else { + assertEquals(1, TestUtils.getNLNsLoaded(stats)); + assertEquals(1, TestUtils.getNBINsLoaded(stats)); + } + /* Find next BIN evicted. */ + assertNull(root.getTarget(1)); + + /* When not moving the cursor, nothing is evicted. */ + status = cursor.getCurrent(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + stats = env.getStats(clearStats); + assertEquals(0, stats.getNNotResident()); + + cursor.close(); + clearEvictionTrap(); + close(); + } + + /** + * CacheMode can be set via the Environment, Database and Cursor + * properties. Database CacheMode overrides Environment CacheMode. Cursor + * CacheMode overrides Database and Environment CacheMode. + */ + @Test + public void testModeProperties() { + open(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Env property is not overridden. */ + setMode(CacheMode.KEEP_HOT); + Cursor cursor = db.openCursor(null, cursorConfig); + assertSame(CacheMode.KEEP_HOT, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.KEEP_HOT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Then overridden by cursor. */ + cursor.setCacheMode(CacheMode.EVICT_LN); + assertSame(CacheMode.EVICT_LN, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.EVICT_LN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Then overridden by ReadOptions. */ + OperationResult result = cursor.get( + key, data, Get.SEARCH, + new ReadOptions().setCacheMode(CacheMode.EVICT_BIN)); + assertNotNull(result); + assertSame(CacheMode.EVICT_BIN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Cursor default was not changed. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.EVICT_LN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Then overridden by WriteOptions. */ + result = cursor.put( + key, data, Put.OVERWRITE, + new WriteOptions().setCacheMode(CacheMode.EVICT_BIN)); + assertNotNull(result); + assertSame(CacheMode.EVICT_BIN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Cursor default was not changed. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.EVICT_LN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + + /* Env property does not apply to internal databases. */ + DbTree dbTree = DbInternal.getNonNullEnvImpl(env).getDbTree(); + DatabaseImpl dbImpl = dbTree.getDb(DbTree.ID_DB_ID); + BasicLocker locker = + BasicLocker.createBasicLocker(DbInternal.getNonNullEnvImpl(env)); + cursor = DbInternal.makeCursor(dbImpl, locker, null); + assertSame(CacheMode.DEFAULT, cursor.getCacheMode()); + assertSame(CacheMode.DEFAULT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.getFirst(new DatabaseEntry(), new DatabaseEntry(), null); + assertSame(CacheMode.DEFAULT, cursor.getCacheMode()); + assertSame(CacheMode.DEFAULT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + locker.operationEnd(); + dbTree.releaseDb(dbImpl); + + /* Env property overridden by db property. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setCacheMode(CacheMode.MAKE_COLD); + Database db2 = env.openDatabase(null, "foo2", dbConfig); + cursor = db2.openCursor(null, cursorConfig); + assertSame(CacheMode.MAKE_COLD, cursor.getCacheMode()); + key.setData(new byte[1]); + data.setData(new byte[1]); + status = cursor.put(key, data); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.MAKE_COLD, + DbInternal.getCursorImpl(cursor).getCacheMode()); + + /* Then overridden by cursor. */ + cursor.setCacheMode(CacheMode.EVICT_LN); + assertSame(CacheMode.EVICT_LN, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.EVICT_LN, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + + /* Opening another handle on the db will override the property. */ + dbConfig.setCacheMode(CacheMode.DEFAULT); + Database db3 = env.openDatabase(null, "foo2", dbConfig); + cursor = db3.openCursor(null, cursorConfig); + assertSame(CacheMode.DEFAULT, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.DEFAULT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + + /* Open another handle, set mode to null, close immediately. */ + dbConfig.setCacheMode(null); + Database db4 = env.openDatabase(null, "foo2", dbConfig); + db4.close(); + /* Env default is now used. */ + cursor = db.openCursor(null, cursorConfig); + assertSame(CacheMode.KEEP_HOT, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.KEEP_HOT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + + /* Set env property to null, DEFAULT is then used. */ + setMode(null); + cursor = db3.openCursor(null, cursorConfig); + assertSame(CacheMode.DEFAULT, cursor.getCacheMode()); + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertSame(CacheMode.DEFAULT, + DbInternal.getCursorImpl(cursor).getCacheMode()); + cursor.close(); + + db3.close(); + db2.close(); + close(); + } +} diff --git a/test/com/sleepycat/je/evictor/EvictActionTest.java b/test/com/sleepycat/je/evictor/EvictActionTest.java new file mode 100644 index 0000000..be02c2f --- /dev/null +++ b/test/com/sleepycat/je/evictor/EvictActionTest.java @@ -0,0 +1,997 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * This tests exercises the act of eviction and determines whether the + * expected nodes have been evicted properly. + */ +public class EvictActionTest extends TestBase { + + private static final boolean DEBUG = false; + private static final int NUM_KEYS = 60; + private static final int NUM_DUPS = 50; + private static final int BIG_CACHE_SIZE = 500000; + private static final int SMALL_CACHE_SIZE = (int) + MemoryBudget.MIN_MAX_MEMORY_SIZE; + + private File envHome = null; + private Environment env = null; + private Database db = null; + private int actualLNs = 0; + private int actualINs = 0; + + public EvictActionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + IN.ACCUMULATED_LIMIT = 0; + Txn.ACCUMULATED_LIMIT = 0; + super.setUp(); + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + } + envHome = null; + env = null; + db = null; + } + + @Test + public void testEvict() + throws Throwable { + + doEvict(50, SMALL_CACHE_SIZE, true); + } + + @Test + public void testNoNeedToEvict() + throws Throwable { + + doEvict(80, BIG_CACHE_SIZE, false); + } + + /** + * Evict in very controlled circumstances. Check that we first strip + * BINs and later evict BINS. + */ + private void doEvict(int floor, + int maxMem, + boolean shouldEvict) + throws Throwable { + + openEnv(floor, maxMem); + insertData(NUM_KEYS); + + /* Evict once after insert. */ + evictAndCheck(shouldEvict, NUM_KEYS); + + /* Evict again after verification. */ + evictAndCheck(shouldEvict, NUM_KEYS); + + closeEnv(); + } + + @Test + public void testSetCacheSize() + throws DatabaseException { + + /* Start with large cache size. */ + openEnv(80, BIG_CACHE_SIZE); + EnvironmentMutableConfig config = env.getMutableConfig(); + insertData(NUM_KEYS); + + /* No need to evict. */ + verifyData(NUM_KEYS); + evictAndCheck(false, NUM_KEYS); + + /* Set small cache size. */ + config.setCacheSize(SMALL_CACHE_SIZE); + env.setMutableConfig(config); + + /* Expect eviction. */ + verifyData(NUM_KEYS); + evictAndCheck(true, NUM_KEYS); + + /* Set large cache size. */ + config.setCacheSize(BIG_CACHE_SIZE); + env.setMutableConfig(config); + + /* Expect no eviction. */ + verifyData(NUM_KEYS); + evictAndCheck(false, NUM_KEYS); + + closeEnv(); + } + + @Test + public void testSetCachePercent() + throws DatabaseException { + + int nKeys = NUM_KEYS * 500; + + /* Start with large cache size. */ + openEnv(80, BIG_CACHE_SIZE); + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setCacheSize(0); + config.setCachePercent(90); + env.setMutableConfig(config); + insertData(nKeys); + + /* No need to evict. */ + verifyData(nKeys); + evictAndCheck(false, nKeys); + + /* Set small cache percent. */ + config.setCacheSize(0); + config.setCachePercent(1); + env.setMutableConfig(config); + + /* Expect eviction. */ + verifyData(nKeys); + evictAndCheck(true, nKeys); + + /* Set large cache percent. */ + config.setCacheSize(0); + config.setCachePercent(90); + env.setMutableConfig(config); + + /* Expect no eviction. */ + verifyData(nKeys); + evictAndCheck(false, nKeys); + + closeEnv(); + } + + @Test + public void testThreadedCacheSizeChanges() + throws DatabaseException { + + final int N_ITERS = 10; + openEnv(80, BIG_CACHE_SIZE); + insertData(NUM_KEYS); + + JUnitThread writer = new JUnitThread("Writer") { + @Override + public void testBody() + throws DatabaseException { + for (int i = 0; i < N_ITERS; i += 1) { + env.evictMemory(); + /* insertData will update if data exists. */ + insertData(NUM_KEYS); + env.evictMemory(); + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setCacheSize(SMALL_CACHE_SIZE); + env.setMutableConfig(config); + } + } + }; + + JUnitThread reader = new JUnitThread("Reader") { + @Override + public void testBody() + throws DatabaseException { + for (int i = 0; i < N_ITERS; i += 1) { + env.evictMemory(); + verifyData(NUM_KEYS); + env.evictMemory(); + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setCacheSize(BIG_CACHE_SIZE); + env.setMutableConfig(config); + } + } + }; + + writer.start(); + reader.start(); + + try { + writer.finishTest(); + } catch (Throwable e) { + try { + reader.finishTest(); + } catch (Throwable ignore) { } + e.printStackTrace(); + fail(e.toString()); + } + + try { + reader.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + + closeEnv(); + } + + @Test + public void testSmallCacheSettings() + throws DatabaseException { + + /* + * With a cache size > 600 KB, the min tree usage should be the default + * value. + */ + openEnv(0, 1200 * 1024); + EnvironmentMutableConfig config = env.getMutableConfig(); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + MemoryBudget mb = envImpl.getMemoryBudget(); + assertEquals(500 * 1024, mb.getMinTreeMemoryUsage()); + + /* + * With a cache size > 1000 KB, evict bytes may be > 500 KB but we + * should not evict over half the cache size. + */ + putLargeData(1200, 1024); + + EnvironmentStats stats = env.getStats(null); + + env.evictMemory(); + stats = env.getStats(null); + assertTrue(stats.getCacheTotalBytes() >= 1200 * 1024 / 2); + + /* + * With a cache size of 500 KB, the min tree usage should be the amount + * available in the cache after the buffer bytes are subtracted. + */ + config.setCacheSize(500 * 1024); + env.setMutableConfig(config); + stats = env.getStats(null); + assertEquals(500 * 1024 - stats.getBufferBytes(), + mb.getMinTreeMemoryUsage()); + + /* + * With a cache size of 500 KB, evict bytes may be < 500 KB but we + * should not evict over half the cache size. + */ + putLargeData(500, 1024); + env.evictMemory(); + stats = env.getStats(null); + assertTrue(stats.getCacheTotalBytes() >= 500 * 1024 / 2); + + /* + * Even when using a large amount of non-tree memory, the tree memory + * usage should not go below the minimum. + */ + mb.updateAdminMemoryUsage(500 * 1024); + env.evictMemory(); + stats = env.getStats(null); + long treeBytes = stats.getDataBytes() + + 50 * 1024 /* larger than any LN or IN */; + assertTrue(treeBytes >= mb.getMinTreeMemoryUsage()); + mb.updateAdminMemoryUsage(-(500 * 1024)); + + /* Allow changing the min tree usage explicitly. */ + config.setCacheSize(500 * 1024); + config.setConfigParam("je.tree.minMemory", String.valueOf(50 * 1024)); + env.setMutableConfig(config); + assertEquals(50 * 1024, mb.getMinTreeMemoryUsage()); + + /* The min tree usage may not be larger than the cache. */ + config.setCacheSize(500 * 1024); + config.setConfigParam("je.tree.minMemory", String.valueOf(900 * 1024)); + env.setMutableConfig(config); + stats = env.getStats(null); + assertEquals(500 * 1024 - stats.getBufferBytes(), + mb.getMinTreeMemoryUsage()); + + closeEnv(); + } + + /** + * We now allow eviction of the root IN of a DB, whether the DB is closed + * or not. Check that basic root eviction works. [#13415] + */ + @Test + public void testRootINEviction() + throws DatabaseException { + + DatabaseEntry entry = new DatabaseEntry(new byte[1]); + OperationStatus status; + + openEnv(80, SMALL_CACHE_SIZE); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db1 = env.openDatabase(null, "db1", dbConfig); + + /* Root starts out null. */ + assertTrue(!isRootResident(db1)); + /* It is created when we insert the first record. */ + status = db1.put(null, entry, entry); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db1)); + /* It is evicted when necessary. */ + forceEviction(); + assertTrue(!isRootResident(db1)); + /* And fetched again when needed. */ + status = db1.get(null, entry, entry, null); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db1)); + + /* Deferred write DBs work in the same way. */ + dbConfig.setDeferredWrite(true); + Database db2 = env.openDatabase(null, "db2", dbConfig); + status = db2.put(null, entry, entry); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db2)); + /* It is evicted when necessary. */ + forceEviction(); + assertTrue(!isRootResident(db2)); + /* And fetched again when needed. */ + status = db2.get(null, entry, entry, null); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db2)); + /* Deferred-write eviction also works when the root is not dirty. */ + db2.sync(); + forceEviction(); + assertTrue(!isRootResident(db2)); + + db2.close(); + db1.close(); + closeEnv(); + } + + /** + * We now allow eviction of the MapLN and higher level INs in the DB + * mapping tree when DBs are closed. Check that basic mapping tree IN + * eviction works. [#13415] + */ + @Test + public void testMappingTreeEviction() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + DatabaseEntry entry = new DatabaseEntry(new byte[1]); + OperationStatus status; + + EnvironmentConfig envConfig = + getEnvConfig(80, SMALL_CACHE_SIZE, false /*readonly*/); + /* Can't use cleaner/expiration DBs in this sensitive test. */ + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCreateEP(envConfig, false); + openEnv(envConfig); + + /* Baseline mapping tree LNs and INs. */ + final int baseLNs = 1; // Test DB + final int baseINs = 2; // Root IN and BIN + checkMappingTree(baseLNs, baseINs); + forceEviction(); + checkMappingTree(baseLNs, baseINs); + + /* + * Create enough DBs to fill up a BIN in the mapping DB. NODE_MAX is + * configured to be 4 in this test. There are already 2 DBs open. + */ + final int nDbs = 4; + Database[] dbs = new Database[nDbs]; + for (int i = 0; i < nDbs; i += 1) { + dbs[i] = env.openDatabase(null, "db" + i, dbConfig); + status = dbs[i].put(null, entry, entry); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(dbs[i])); + } + final int openLNs = baseLNs + nDbs; // Add 1 MapLN per open DB + final int openINs = baseINs + 1; // Add 1 BIN in the mapping tree + checkMappingTree(openLNs, openINs); + forceEviction(); + checkMappingTree(openLNs, openINs); + + /* Close DBs and force eviction. */ + for (int i = 0; i < nDbs; i += 1) { + dbs[i].close(); + } + forceEviction(); + checkMappingTree(baseLNs, baseINs); + + /* Re-open the DBs, opening each DB twice. */ + Database[] dbs2 = new Database[nDbs]; + for (int i = 0; i < nDbs; i += 1) { + dbs[i] = env.openDatabase(null, "db" + i, dbConfig); + dbs2[i] = env.openDatabase(null, "db" + i, dbConfig); + } + checkMappingTree(openLNs, openINs); + forceEviction(); + checkMappingTree(openLNs, openINs); + + /* Close one handle only, MapLN eviction should not occur. */ + for (int i = 0; i < nDbs; i += 1) { + dbs[i].close(); + } + forceEviction(); + checkMappingTree(openLNs, openINs); + + /* Close the other handles, eviction should occur. */ + for (int i = 0; i < nDbs; i += 1) { + dbs2[i].close(); + } + forceEviction(); + checkMappingTree(baseLNs, baseINs); + + closeEnv(); + } + + /** + * Checks that cleaning performs memory budget calculations correctly for + * evicted databases (non-resident MapLNs). [#21686] + */ + public void testCleaningAfterMappingTreeEviction() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + DatabaseEntry key = new DatabaseEntry(new byte[1000]); + DatabaseEntry data = new DatabaseEntry(new byte[10000]); + OperationStatus status; + + EnvironmentConfig envConfig = + getEnvConfig(0, SMALL_CACHE_SIZE, false /*readonly*/); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(1024 * 1024)); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, + "75"); + openEnv(envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* + * Create enough DBs to fill several 10 or more files and close them so + * they'll be evicted below. + */ + final int nDbs = 1000; + for (int i = 0; i < nDbs; i += 1) { + Database db = env.openDatabase(null, "db" + i, dbConfig); + status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + db.close(); + } + env.checkpoint(new CheckpointConfig().setForce(true)); + forceEviction(); + + /* + * Clean and checkpoint repeatedly to create a scenario where a + * non-resident MapLN is migrated by the cleaner [#21686]. With only a + * single call to cleanLog, the MapLN will be resident due to the + * nearby LNs for that database, since the cleaner does read-ahead + * during LN processing. + * + * Prior to the bug fix, TestUtils.validateNodeMemUsage (called by + * forceEviction below) would report a mismatch in the tree admin + * memory usage. + */ + for (int i = 0; i < 10; i += 1) { + env.cleanLog(); + env.checkpoint(new CheckpointConfig().setForce(true)); + forceEviction(); + } + + /* Final check for good measure. */ + TestUtils.validateNodeMemUsage(envImpl, true); + + closeEnv(); + } + + /** + * Checks that a dirty root IN is not evicted in a read-only environment. + * [#16368] + */ + @Test + public void testReadOnlyRootINEviction() + throws DatabaseException { + + OperationStatus status; + + openEnv(80, SMALL_CACHE_SIZE); + + /* Big record will be used to force eviction. */ + DatabaseEntry bigRecordKey = new DatabaseEntry(new byte[1]); + status = db.put(null, bigRecordKey, + new DatabaseEntry(new byte[BIG_CACHE_SIZE])); + assertSame(OperationStatus.SUCCESS, status); + + /* Open DB1 and insert a record to create the root IN. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db1 = env.openDatabase(null, "db1", dbConfig); + + DatabaseEntry smallRecordKey = new DatabaseEntry(new byte[1]); + DatabaseEntry smallData = new DatabaseEntry(new byte[1]); + status = db1.put(null, smallRecordKey, smallData); + assertSame(OperationStatus.SUCCESS, status); + + /* Close environment and re-open it read-only. */ + db1.close(); + closeEnv(); + + EnvironmentConfig envConfig = + getEnvConfig(80, SMALL_CACHE_SIZE, true /*readOnly*/); + openEnv(envConfig); + + dbConfig.setReadOnly(true); + dbConfig.setAllowCreate(false); + db1 = env.openDatabase(null, "db1", dbConfig); + + /* Load a record to load the root IN. */ + status = db1.get(null, smallRecordKey, new DatabaseEntry(), null); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db1)); + + /* + * Set the root dirty to prevent eviction. In real life, this can only + * be done by recovery in a read-only environment, but that's very + * difficult to simulate precisely. + */ + IN rootIN = DbInternal.getDbImpl(db1). + getTree(). + getRootIN(CacheMode.DEFAULT); + rootIN.setDirty(true); + rootIN.releaseLatch(); + + /* Root should not be evicted while dirty. */ + forceReadOnlyEviction(bigRecordKey); + assertTrue(isRootResident(db1)); + forceReadOnlyEviction(bigRecordKey); + assertTrue(isRootResident(db1)); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Evictor evictor = envImpl.getEvictor(); + + /* When made non-dirty, it can be evicted. */ + rootIN.setDirty(false); + evictor.addBack(rootIN); + + forceReadOnlyEviction(bigRecordKey); + assertTrue(!isRootResident(db1)); + + db1.close(); + closeEnv(); + } + + /** + * Check that opening a database in a transaction and then aborting the + * transaction will decrement the database use count. [#13415] + */ + @Test + public void testAbortOpen() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams. + ENV_DB_EVICTION.getName(), "true"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + + /* Abort the txn used to open a database. */ + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db1 = env.openDatabase(txn, "db1", dbConfig); + DatabaseImpl saveDb = DbInternal.getDbImpl(db1); + txn.abort(); + + /* DB should not be in use and does not have to be closed. */ + assertEquals(false, saveDb.isInUse()); + + /* + * Environment.close will not throw an exception, even though the DB + * has not been closed. The abort took care of cleaning up the handle. + */ + closeEnv(); + + /* + * Try a non-transactional DB open that throws an exception because we + * create it exclusively and it already exists. The use count should + * be decremented. + */ + env = new Environment(envHome, envConfig); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setTransactional(false); + db1 = env.openDatabase(null, "db1", dbConfig); + saveDb = DbInternal.getDbImpl(db1); + try { + env.openDatabase(null, "db1", dbConfig); + fail(); + } catch (DatabaseException e) { + assertTrue(e.getMessage().indexOf("already exists") >= 0); + } + db1.close(); + assertEquals(false, saveDb.isInUse()); + + /* + * Try a non-transactional DB open that throws an exception because we + * change the duplicatesAllowed setting. The use count should be + * decremented. + */ + dbConfig.setSortedDuplicates(true); + dbConfig.setExclusiveCreate(false); + try { + env.openDatabase(null, "db1", dbConfig); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().indexOf("sortedDuplicates") >= 0); + } + assertEquals(false, saveDb.isInUse()); + + closeEnv(); + } + + /** + * Check for the expected number of nodes in the mapping DB. + */ + private void checkMappingTree(int expectLNs, int expectINs) + throws DatabaseException { + + IN root = DbInternal.getNonNullEnvImpl(env). + getDbTree().getDb(DbTree.ID_DB_ID).getTree(). + getRootIN(CacheMode.UNCHANGED); + actualLNs = 0; + actualINs = 0; + countMappingTree(root); + root.releaseLatch(); + assertEquals("LNs", expectLNs, actualLNs); + assertEquals("INs", expectINs, actualINs); + } + + private void countMappingTree(IN parent) { + actualINs += 1; + for (int i = 0; i < parent.getNEntries(); i += 1) { + if (parent.getTarget(i) != null) { + if (parent.getTarget(i) instanceof IN) { + countMappingTree((IN) parent.getTarget(i)); + } else { + actualLNs += 1; + } + } + } + } + + /** + * Returns whether the root IN is currently resident for the given DB. + */ + private boolean isRootResident(Database dbParam) { + return DbInternal.getDbImpl(dbParam) + .getTree() + .isRootResident(); + } + + /** + * Force eviction by inserting a large record in the pre-opened DB. + */ + private void forceEviction() + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + OperationStatus status; + + /* + * Repeat twice to cause a 2nd pass over the INList. The second pass + * evicts BINs that were only stripped of LNs in the first pass. + */ + for (int i = 0; i < 2; i += 1) { + Cursor c = db.openCursor(null, null); + status = c.put(new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[BIG_CACHE_SIZE])); + assertSame(OperationStatus.SUCCESS, status); + + /* + * Evict while cursor pins LN memory, to ensure eviction of other + * DB INs, including the DB root. When lruOnly=false, root IN + * eviction may not occur unless a cursor is used to pin the LN. + */ + env.evictMemory(); + + status = c.delete(); + assertSame(OperationStatus.SUCCESS, status); + + c.close(); + } + + TestUtils.validateNodeMemUsage(envImpl, true); + } + + /** + * Force eviction by reading a large record. + */ + private void forceReadOnlyEviction(DatabaseEntry key) + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + OperationStatus status; + + /* + * Repeat twice to cause a 2nd pass over the INList. The second pass + * evicts BINs that were only stripped of LNs in the first pass. + */ + for (int i = 0; i < 2; i += 1) { + Cursor c = db.openCursor(null, null); + status = c.getSearchKey(key, new DatabaseEntry(), null); + assertSame(OperationStatus.SUCCESS, status); + + /* + * Evict while cursor pins LN memory, to ensure eviction of other + * DB INs, including the DB root. When lruOnly=false, root IN + * eviction may not occur unless a cursor is used to pin the LN. + */ + env.evictMemory(); + + c.close(); + } + + TestUtils.validateNodeMemUsage(envImpl, true); + } + + /** + * Open an environment and database. + */ + private void openEnv(int floor, + int maxMem) + throws DatabaseException { + + EnvironmentConfig envConfig = + getEnvConfig(floor, maxMem, false /*readonly*/); + openEnv(envConfig); + } + + /** + * Open an environment and database. + */ + private EnvironmentConfig getEnvConfig(int floor, + int maxMem, + boolean readOnly) { + /* Convert floor percentage into bytes. */ + long evictBytes = 0; + if (floor > 0) { + evictBytes = maxMem - ((maxMem * floor) / 100); + } + + /* Make a non-txnal env w/no daemons and small nodes. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(!readOnly); + envConfig.setReadOnly(readOnly); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + if (evictBytes > 0) { + envConfig.setConfigParam(EnvironmentParams. + EVICTOR_EVICT_BYTES.getName(), + (new Long(evictBytes)).toString()); + } + envConfig.setConfigParam(EnvironmentParams. + MAX_MEMORY.getName(), + new Integer(maxMem).toString()); + /* Don't track detail with a tiny cache size. */ + envConfig.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam(EnvironmentParams.NUM_LOG_BUFFERS.getName(), + "2"); + /* Enable DB (MapLN) eviction for eviction tests. */ + envConfig.setConfigParam(EnvironmentParams. + ENV_DB_EVICTION.getName(), "true"); + + /* + * Disable critical eviction, we want to test under controlled + * circumstances. + */ + envConfig.setConfigParam(EnvironmentParams. + EVICTOR_CRITICAL_PERCENTAGE.getName(), + "1000"); + + /* Make small nodes */ + envConfig.setConfigParam(EnvironmentParams. + NODE_MAX.getName(), "4"); + envConfig.setConfigParam(EnvironmentParams. + NODE_MAX_DUPTREE.getName(), "4"); + + return envConfig; + } + + private void openEnv(EnvironmentConfig envConfig) + throws DatabaseException { + + env = new Environment(envHome, envConfig); + boolean readOnly = envConfig.getReadOnly(); + + /* Open database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(!readOnly); + dbConfig.setReadOnly(readOnly); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + private void insertData(int nKeys) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < nKeys; i++) { + + IntegerBinding.intToEntry(i, key); + + if ((i % 5) == 0) { + for (int j = 10; j < (NUM_DUPS + 10); j++) { + IntegerBinding.intToEntry(j, data); + db.put(null, key, data); + } + } else { + IntegerBinding.intToEntry(i+1, data); + db.put(null, key, data); + } + } + } + + private void putLargeData(int nKeys, int dataSize) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + for (int i = 0; i < nKeys; i++) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data); + } + } + + private void verifyData(int nKeys) + throws DatabaseException { + + /* Full scan of data, make sure we can bring everything back in. */ + Cursor cursor = db.openCursor(null, null); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + + for (int i = 0; i < nKeys; i++) { + if ((i % 5) ==0) { + for (int j = 10; j < (NUM_DUPS + 10); j++) { + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(j, IntegerBinding.entryToInt(data)); + } + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(i+1, IntegerBinding.entryToInt(data)); + } + } + + assertEquals(OperationStatus.NOTFOUND, + cursor.getNext(key, data, LockMode.DEFAULT)); + cursor.close(); + } + + private void evictAndCheck(boolean shouldEvict, int nKeys) + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + MemoryBudget mb = envImpl.getMemoryBudget(); + + /* + * The following batches are run in a single evictMemory() call: + * 1st eviction will strip DBINs. + * 2nd will evict DBINs + * 3rd will evict DINs + * 4th will strip BINs + * 5th will evict BINs + * 6th will evict INs + * 7th will evict INs + */ + long preEvictMem = mb.getCacheMemoryUsage(); + TestUtils.validateNodeMemUsage(envImpl, true); + env.evictMemory(); + long postEvictMem = mb.getCacheMemoryUsage(); + + TestUtils.validateNodeMemUsage(envImpl, true); + if (DEBUG) { + System.out.println("preEvict=" + preEvictMem + + " postEvict=" + postEvictMem); + } + + if (shouldEvict) { + assertTrue("preEvict=" + preEvictMem + + " postEvict=" + postEvictMem + + " maxMem=" + mb.getMaxMemory(), + (preEvictMem > postEvictMem)); + } else { + assertTrue("preEvict=" + preEvictMem + + " postEvict=" + postEvictMem, + (preEvictMem == postEvictMem)); + } + + verifyData(nKeys); + TestUtils.validateNodeMemUsage(envImpl, true); + } +} diff --git a/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java b/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java new file mode 100644 index 0000000..2adb2c6 --- /dev/null +++ b/test/com/sleepycat/je/evictor/EvictNNodesStatsTest.java @@ -0,0 +1,381 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * This tests exercises the act of eviction and determines whether the + * expected nodes have been evicted properly. + */ +public class EvictNNodesStatsTest extends TestBase { + + private static final boolean DEBUG = false; + private static final int BIG_CACHE_SIZE = 500000; + private static final int SMALL_CACHE_SIZE = (int) + MemoryBudget.MIN_MAX_MEMORY_SIZE; + private final StatGroup placeholderMBStats = + new StatGroup("placeholder", ""); + + private File envHome = null; + private Environment env = null; + private Database db = null; + private int actualLNs = 0; + private int actualINs = 0; + + public EvictNNodesStatsTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + IN.ACCUMULATED_LIMIT = 0; + Txn.ACCUMULATED_LIMIT = 0; + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + } + envHome = null; + env = null; + db = null; + } + + /** + * Check that the counters of evicted MapLNs in the DB mapping tree and + * the counter of evicted BINs in a regular DB eviction works. [#13415] + */ + @Test + public void testRegularDB() + throws DatabaseException { + + /* Initialize an environment and open a test DB. */ + openEnv(80, SMALL_CACHE_SIZE); + + EnvironmentStats stats; + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + DatabaseEntry entry = new DatabaseEntry(new byte[1]); + OperationStatus status; + + /* Baseline mapping tree LNs and INs. */ + final int baseLNs = 1; // Test DB + final int baseINs = 2; // Root IN and BIN + checkMappingTree(baseLNs, baseINs); + + /* + * Create enough DBs to fill up a BIN in the mapping DB. NODE_MAX is + * configured to be 4 in this test. There are already 2 DBs open. + */ + final int nDbs = 4; + Database[] dbs = new Database[nDbs]; + for (int i = 0; i < nDbs; i += 1) { + dbs[i] = env.openDatabase(null, "db" + i, dbConfig); + status = dbs[i].put(null, entry, entry); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(dbs[i])); + } + checkMappingTree(baseLNs + nDbs /*Add 1 MapLN per open DB*/, + baseINs + 1 /*Add 1 BIN in the mapping tree*/); + + /* Close DBs and force eviction. */ + for (int i = 0; i < nDbs; i += 1) { + dbs[i].close(); + } + + forceEviction(); + /* Load Stats. */ + stats = env.getStats(statsConfig); + + assertEquals("Evicted MapLNs", + nDbs, + stats.getNRootNodesEvicted()); + assertEquals("Evicted BINs", + nDbs + 3, // 2 BINs for Name DB, 1 for Mapping DB, + stats.getNNodesEvicted() - stats.getNRootNodesEvicted()); + checkMappingTree(baseLNs, baseINs); + + /* + * Sneak in some testing of the stat getter calls. The actual + * value we're comparing to is not that important, just updated them + * if the test changes by printing System.out.println(stats) and + * setting appropriate comparison vals. This is a way to make + * sure the getter works. + */ + assertEquals(0, stats.getNBytesEvictedCacheMode()); + assertEquals(0, stats.getNBytesEvictedEvictorThread()); + assertTrue(stats.getNBytesEvictedCritical() > 0); + assertTrue(stats.getNBytesEvictedManual() == 0); + + assertEquals(11, stats.getNNodesEvicted()); + + assertTrue(stats.getNBINsFetch() > 0); + assertEquals(0, stats.getNBINsFetchMiss()); + assertEquals(0, stats.getNUpperINsFetch()); + assertEquals(0, stats.getNUpperINsFetchMiss()); + assertEquals(0, stats.getNThreadUnavailable()); + assertTrue(stats.getNLNsFetch() > 0); + assertEquals(0, stats.getNLNsFetchMiss()); + assertTrue(stats.getNCachedBINs() > 0); + assertTrue(stats.getNCachedUpperINs() > 0); + + closeEnv(); + } + + /** + * Check that the counters of evicted MapLNs in the DB mapping tree and + * the counter of evicted BINs in a deferred write DB eviction works. + * [#13415] + */ + @Test + public void testDeferredWriteDB() + throws DatabaseException { + + /* Initialize an environment and open a test DB. */ + openEnv(80, SMALL_CACHE_SIZE); + + EnvironmentStats stats; + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + DatabaseEntry entry = new DatabaseEntry(new byte[1]); + OperationStatus status; + + /* Baseline mapping tree LNs and INs. */ + final int baseLNs = 1; // Test DB + final int baseINs = 2; // Root IN and BIN + + checkMappingTree(baseLNs, baseINs); + + /* Deferred write DBs work in the same way. */ + dbConfig.setDeferredWrite(true); + Database db2 = env.openDatabase(null, "db2", dbConfig); + status = db2.put(null, entry, entry); + assertSame(OperationStatus.SUCCESS, status); + assertTrue(isRootResident(db2)); + checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB. + + /* Root eviction is allowed, even when the root is dirty. */ + forceEviction(); + /* Load Stats. */ + stats = env.getStats(statsConfig); + assertEquals("Evicted MapLNs", + 1, // Test DB + stats.getNRootNodesEvicted()); + assertEquals("Evicted BINs", + 2, // 1 BIN for Name DB, 1 for Deferred Write DB. + stats.getNNodesEvicted() - stats.getNRootNodesEvicted()); + assertTrue(!isRootResident(db2)); + checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB. + + db2.sync(); + forceEviction(); + /* Load Stats. */ + stats = env.getStats(statsConfig); + assertEquals("Evicted MapLNs", + 1, // Root eviction. + stats.getNRootNodesEvicted()); + assertEquals("Evicted BINs", + 0, + stats.getNNodesEvicted() - stats.getNRootNodesEvicted()); + assertTrue(!isRootResident(db2)); + checkMappingTree(baseLNs + 1, baseINs); // Deferred Write DB. + + db2.close(); + forceEviction(); + /* Load Stats. */ + stats = env.getStats(statsConfig); + assertEquals("Evicted MapLNs", + 1, // Root eviction. + stats.getNRootNodesEvicted()); + assertEquals("Evicted BINs", + 0, + stats.getNNodesEvicted() - stats.getNRootNodesEvicted()); + + checkMappingTree(baseLNs, baseINs); + + closeEnv(); + } + + private void forceEviction() + throws DatabaseException { + + OperationStatus status; + + /* + * Repeat twice to cause a 2nd pass over the INList. The second pass + * evicts BINs that were only stripped of LNs in the first pass. + */ + for (int i = 0; i < 2; i += 1) { + /* Fill up cache so as to call eviction. */ + status = db.put(null, new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[BIG_CACHE_SIZE])); + assertSame(OperationStatus.SUCCESS, status); + + /* Do a manual call eviction. */ + env.evictMemory(); + + status = db.delete(null, new DatabaseEntry(new byte[1])); + assertSame(OperationStatus.SUCCESS, status); + } + } + + /** + * Check for the expected number of nodes in the mapping DB. + */ + private void checkMappingTree(int expectLNs, int expectINs) + throws DatabaseException { + + IN root = DbInternal.getNonNullEnvImpl(env). + getDbTree().getDb(DbTree.ID_DB_ID).getTree(). + getRootIN(CacheMode.UNCHANGED); + actualLNs = 0; + actualINs = 0; + countMappingTree(root); + root.releaseLatch(); + assertEquals("LNs", expectLNs, actualLNs); + assertEquals("INs", expectINs, actualINs); + } + + private void countMappingTree(IN parent) { + actualINs += 1; + for (int i = 0; i < parent.getNEntries(); i += 1) { + if (parent.getTarget(i) != null) { + if (parent.getTarget(i) instanceof IN) { + countMappingTree((IN) parent.getTarget(i)); + } else { + actualLNs += 1; + } + } + } + } + + /** + * Returns whether the root IN is currently resident for the given DB. + */ + private boolean isRootResident(Database dbParam) { + return DbInternal.getDbImpl(dbParam). + getTree(). + isRootResident(); + } + + /** + * Open an environment and database. + */ + private void openEnv(int floor, + int maxMem) + throws DatabaseException { + + /* Convert floor percentage into bytes. */ + long evictBytes = maxMem - ((maxMem * floor) / 100); + + /* Make a non-txnal env w/no daemons and small nodes. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + EVICTOR_EVICT_BYTES.getName(), + (new Long(evictBytes)).toString()); + envConfig.setConfigParam(EnvironmentParams. + MAX_MEMORY.getName(), + new Integer(maxMem).toString()); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + /* Enable DB (MapLN) eviction for eviction tests. */ + envConfig.setConfigParam(EnvironmentParams. + ENV_DB_EVICTION.getName(), "true"); + /* Can't use expiration/cleaner DBs in this sensitive test. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + + /* Make small nodes */ + envConfig.setConfigParam(EnvironmentParams. + NODE_MAX.getName(), "4"); + envConfig.setConfigParam(EnvironmentParams. + NODE_MAX_DUPTREE.getName(), "4"); + + env = new Environment(envHome, envConfig); + + /* Open a database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } +} diff --git a/test/com/sleepycat/je/evictor/EvictSelectionTest.java b/test/com/sleepycat/je/evictor/EvictSelectionTest.java new file mode 100644 index 0000000..3a6c193 --- /dev/null +++ b/test/com/sleepycat/je/evictor/EvictSelectionTest.java @@ -0,0 +1,239 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class EvictSelectionTest extends TestBase { + private final File envHome; + private final int scanSize = 5; + private Environment env; + private EnvironmentImpl envImpl; + + public EvictSelectionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @After + public void tearDown() { + + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + env = null; + envImpl = null; + } + + static class EvictProfile implements TestHook { + /* Keep a list of candidate nodes. */ + private final List candidates = new ArrayList(); + + /* Remember that this node was targeted. */ + public void doHook(IN target) { + candidates.add(Long.valueOf(target.getNodeId())); + } + + public List getCandidates() { + return candidates; + } + + public void hookSetup() { + candidates.clear(); + } + + public void doIOHook() {}; + public void doHook() {} + + public IN getHookValue() { + return null; + }; + } + + /* + * We might call evict on an empty INList if the cache is set very low + * at recovery time. + */ + @Test + public void testEmptyINList() + throws Throwable { + + /* Create an environment, database, and insert some data. */ + initialize(true); + + env.close(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + env = new Environment(envHome, envConfig); + env.close(); + env = null; + } + + /* + * Create an environment, database, and insert some data. + */ + private void initialize(boolean makeDatabase) + throws DatabaseException { + + /* Environment */ + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_EVICTOR.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CLEANER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CHECKPOINTER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + NODE_MAX.getName(), "4"); + + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + if (makeDatabase) { + /* Database */ + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + /* Insert enough keys to get an odd number of nodes */ + + DatabaseEntry keyAndData = new DatabaseEntry(); + for (int i = 0; i < 110; i++) { + IntegerBinding.intToEntry(i, keyAndData); + db.put(null, keyAndData, keyAndData); + } + + db.close(); + } + } + + /** + * Tests a fix for an eviction bug that could cause an OOME in a read-only + * environment. [#17590] + * + * Before the bug fix, a dirty IN prevented eviction from working if the + * dirty IN is returned by Evictor.selectIN repeatedly, only to be rejected + * by Evictor.evictIN because it is dirty. A dirty IN was considered as a + * target and sometimes selected by selectIN as a way to avoid an infinite + * loop when all INs are dirty. This is unnecessary, since a condition was + * added to cause the selectIN loop to terminate when all INs in the INList + * have been iterated. Now, with the fix, a dirty IN in a read-only + * environment is never considered as a target or returned by selectIN. + * + * The OOME was reproduced with a simple test that uses a cursor to iterate + * through 100k records, each 100k in size, in a read-only enviroment with + * a 16m heap. However, reproducing the problem in a fast-running unit + * test is very difficult. Instead, since the code change only impacts a + * read-only environment, this unit test only ensures that the fix does not + * cause an infinte loop when all nodes are dirty. + */ + @Test + public void testReadOnlyAllDirty() + throws Throwable { + + /* Create an environment, database, and insert some data. */ + initialize(true /*makeDatabase*/); + env.close(); + env = null; + envImpl = null; + + /* Open the environment read-only. */ + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setReadOnly(true); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Load everything into cache. */ + { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + final Database db = env.openDatabase(null, "foo", dbConfig); + final Cursor cursor = db.openCursor(null, null); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + status = cursor.getNext(key, data, null); + } + cursor.close(); + db.close(); + } + + /* Artificially make all nodes dirty in a read-only environment. */ + for (IN in : envImpl.getInMemoryINs()) { + in.setDirty(true); + } + + /* + * Force an eviction. No nodes will be selected for an eviction, + * because all nodes are dirty. If the (nIterated < maxNodesToIterate) + * condition is removed from the selectIN loop, an infinite loop will + * occur. + */ + final EnvironmentMutableConfig mutableConfig = env.getMutableConfig(); + mutableConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + env.setMutableConfig(mutableConfig); + final StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + EnvironmentStats stats = env.getStats(clearStats); + env.evictMemory(); + stats = env.getStats(clearStats); + assertEquals(0, stats.getNNodesSelected()); + + env.close(); + env = null; + envImpl = null; + } +} diff --git a/test/com/sleepycat/je/evictor/EvictionThreadPoolTest.java b/test/com/sleepycat/je/evictor/EvictionThreadPoolTest.java new file mode 100644 index 0000000..a6f901c --- /dev/null +++ b/test/com/sleepycat/je/evictor/EvictionThreadPoolTest.java @@ -0,0 +1,180 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.concurrent.ThreadPoolExecutor; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that the background eviction threadpool state. + */ +public class EvictionThreadPoolTest extends TestBase { + private static final String DB_NAME = "testDB"; + private static final int ONE_MB = 1 << 20; + private static final int MIN_DATA_SIZE = 50 * 1024; + private static final int ENTRY_DATA_SIZE = 500; + + private final File envHome; + private Environment env; + private Database db; + + public EvictionThreadPoolTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + db = null; + env = null; + } + + @Test + public void testPoolState() + throws Exception { + + final int corePoolSize = 3; + + openEnv(corePoolSize); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + ThreadPoolExecutor pool = envImpl.getEvictor().getThreadPool(); + DbConfigManager configManager = envImpl.getConfigManager(); + + /* Check that the configurations to the pool are applied. */ + assertEquals + (configManager.getInt(EnvironmentParams.EVICTOR_CORE_THREADS), + pool.getCorePoolSize()); + assertEquals + (configManager.getInt(EnvironmentParams.EVICTOR_MAX_THREADS), + pool.getMaximumPoolSize()); + + /* Invoke the eviction thread. */ + for (int i = 1; i <= corePoolSize; i++) { + envImpl.getEvictor().alert(); + assertEquals(pool.getPoolSize(), i); + } + + /* Do a checkpoint to invoke daemon eviction. */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + /* The pool size shouldn't change because no eviction should happen. */ + assertEquals(pool.getPoolSize(), corePoolSize); + + /* Do database operations to invoke critical eviction. */ + int records = write(ONE_MB * 3); + readEvenly(records); + + /* Do checkpoint to invoke daemon eviction. */ + env.checkpoint(ckptConfig); + + /* Because of heavy eviction work, the pool should be full. */ + assertEquals(pool.getPoolSize(), pool.getMaximumPoolSize()); + + EnvironmentStats stats = env.getStats(null); + /* There should be some threads rejected by the pool. */ + assertTrue(stats.getNThreadUnavailable() > 0); + + /* + * Most of the eviction should be done by the pool thread and critical. + */ + assertTrue(stats.getNBytesEvictedCritical() > 0 || + stats.getNBytesEvictedEvictorThread() > 0); + } + + /* Open the Environment and database for this test. */ + private void openEnv(int corePoolSize) + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(ONE_MB); + envConfig.setConfigParam("je.tree.minMemory", + String.valueOf(MIN_DATA_SIZE)); + /* Configure the core pool threads. */ + envConfig.setConfigParam("je.evictor.coreThreads", + String.valueOf(corePoolSize)); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Writes enough records in the given envIndex environment to cause at + * least minSizeToWrite bytes to be used in the cache. + */ + private int write(int minSizeToWrite) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]); + int i; + for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data); + } + return i; + } + + /** + * Reads alternating records from each env, reading all records from each + * env. Checks that all environments use roughly equal portions of the + * cache. + */ + private void readEvenly(int nRecs) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Repeat reads twice to give the LRU a fighting chance. */ + for (int repeat = 0; repeat < 2; repeat += 1) { + for (int i = 0; i < nRecs; i += 1) { + IntegerBinding.intToEntry(i, key); + db.get(null, key, data, null); + } + } + } +} diff --git a/test/com/sleepycat/je/evictor/LRUTest.java b/test/com/sleepycat/je/evictor/LRUTest.java new file mode 100644 index 0000000..60c61c9 --- /dev/null +++ b/test/com/sleepycat/je/evictor/LRUTest.java @@ -0,0 +1,423 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Arrays; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests that the LRU algorithm is accurate. + */ +public class LRUTest extends TestBase { + + private static final int N_DBS = 5; + private static final int ONE_MB = 1 << 20; + private static final int DB_CACHE_SIZE = ONE_MB; + private static final int ENV_CACHE_SIZE = N_DBS * DB_CACHE_SIZE; + private static final int MIN_DATA_SIZE = 50 * 1024; + private static final int LRU_ACCURACY_PCT = 70; + private static final int ENTRY_DATA_SIZE = 500; + + private File envHome; + private Environment env; + private Database[] dbs = new Database[N_DBS]; + + public LRUTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + } + envHome = null; + env = null; + dbs = null; + } + + private void open() { + open(null); + } + + private void open(CacheMode cacheMode) { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(ENV_CACHE_SIZE); + envConfig.setCacheMode(cacheMode); + envConfig.setConfigParam( + EnvironmentConfig.TREE_MIN_MEMORY, String.valueOf(MIN_DATA_SIZE)); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + + env = new Environment(envHome, envConfig); + + TestUtils.adjustCacheSizeForOffHeapCache(env); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + for (int i = 0; i < dbs.length; i += 1) { + dbs[i] = env.openDatabase(null, "foo-" + i, dbConfig); + } + } + + private void close() { + for (int i = 0; i < N_DBS; i += 1) { + if (dbs[i] != null) { + dbs[i].close(); + dbs[i] = null; + } + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testBaseline() { + open(CacheMode.DEFAULT); + doTestBaseline(); + close(); + } + + private void doTestBaseline() { + + for (int i = 0; i < N_DBS; i += 1) { + write(dbs[i], DB_CACHE_SIZE); + } + long[] results = new long[100]; + for (int repeat = 0; repeat < 100; repeat += 1) { + + /* Read all DBs evenly. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (dbs[j].get(null, key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + + /* + * Check that each DB uses approximately equal portions of the + * cache. + */ + StringBuilder buf = new StringBuilder(); + long low = Long.MAX_VALUE; + long high = 0; + for (int i = 0; i < N_DBS; i += 1) { + long val = getDatabaseCacheBytes(dbs[i], true); + buf.append(" db=" + i + " bytes=" + val); + if (low > val) { + low = val; + } + if (high < val) { + high = val; + } + } + long pct = (low * 100) / high; + if (repeat > 75) { + assertTrue( + "failed repeat=" + repeat + " " + + " with pct=" + pct + buf, + pct >= LRU_ACCURACY_PCT); + } + results[repeat] = pct; + } + Arrays.sort(results); +// System.out.println(Arrays.toString(results)); + } + + @Test + public void testCacheMode_KEEP_HOT() { + open(CacheMode.KEEP_HOT); + doTestBaseline(); + close(); + } + + @Test + public void testCacheMode_UNCHANGED() { + + open(); + doTestCacheMode_UNCHANGED(CacheMode.UNCHANGED); + close(); + } + + private void doTestCacheMode_UNCHANGED(CacheMode cacheMode) { + for (int i = 0; i < N_DBS; i += 1) { + write(dbs[i], DB_CACHE_SIZE); + } + long[] results = new long[100]; + for (int repeat = 0; repeat < 100; repeat += 1) { + + /* Read all DBs evenly. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + Cursor[] cursors = new Cursor[N_DBS]; + for (int j = 0; j < N_DBS; j++) { + cursors[j] = dbs[j].openCursor(null, null); + } + cursors[0].setCacheMode(cacheMode); + cursors[1].setCacheMode(cacheMode); + cursors[2].setCacheMode(CacheMode.DEFAULT); + cursors[3].setCacheMode(CacheMode.DEFAULT); + cursors[4].setCacheMode(CacheMode.DEFAULT); + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (cursors[j].getSearchKey(key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + + for (int j = 0; j < N_DBS; j++) { + cursors[j].close(); + } + + /* + * Check that db[0] and db[1] use less than the other three. + */ + StringBuilder buf = new StringBuilder(); + long[] dbBytes = new long[N_DBS]; + for (int i = 0; i < N_DBS; i += 1) { + dbBytes[i] = getDatabaseCacheBytes(dbs[i], true); + buf.append(" db=" + i + " bytes=" + dbBytes[i]); + } +// System.out.println(buf); + assertTrue(dbBytes[0] < dbBytes[2]); + assertTrue(dbBytes[0] < dbBytes[3]); + assertTrue(dbBytes[0] < dbBytes[4]); + assertTrue(dbBytes[1] < dbBytes[2]); + assertTrue(dbBytes[1] < dbBytes[3]); + assertTrue(dbBytes[1] < dbBytes[4]); + } + Arrays.sort(results); + //System.out.println(Arrays.toString(results)); + } + + @Test + public void testCacheMode_MAKE_COLD() { + open(); + doTestCacheMode_UNCHANGED(CacheMode.MAKE_COLD); + close(); + } + + @Test + public void testCacheMode_EVICT_LN() { + + open(); + for (int i = 0; i < N_DBS; i += 1) { + write(dbs[i], DB_CACHE_SIZE); + } + long[] results = new long[100]; + for (int repeat = 0; repeat < 100; repeat += 1) { + + /* Read all DBs evenly. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + Cursor[] cursors = new Cursor[N_DBS]; + for (int j = 0; j < N_DBS; j++) { + cursors[j] = dbs[j].openCursor(null, null); + } + cursors[0].setCacheMode(CacheMode.EVICT_LN); + cursors[1].setCacheMode(CacheMode.EVICT_LN); + cursors[2].setCacheMode(CacheMode.UNCHANGED); + cursors[3].setCacheMode(CacheMode.UNCHANGED); + cursors[4].setCacheMode(CacheMode.UNCHANGED); + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (cursors[j].getSearchKey(key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + + for (int j = 0; j < N_DBS; j++) { + cursors[j].close(); + } + + /* + * Check that db[0] and db[1] use less than the other three. + */ + StringBuilder buf = new StringBuilder(); + long[] dbBytes = new long[N_DBS]; + for (int i = 0; i < N_DBS; i += 1) { + dbBytes[i] = getDatabaseCacheBytes(dbs[i], false); + buf.append(" db=" + i + " bytes=" + dbBytes[i]); + } + assertTrue(dbBytes[0] < dbBytes[2]); + assertTrue(dbBytes[0] < dbBytes[3]); + assertTrue(dbBytes[0] < dbBytes[4]); + assertTrue(dbBytes[1] < dbBytes[2]); + assertTrue(dbBytes[1] < dbBytes[3]); + assertTrue(dbBytes[1] < dbBytes[4]); + //System.out.println(buf); + } + Arrays.sort(results); + //System.out.println(Arrays.toString(results)); + + close(); + } + + @Test + public void testCacheMode_EVICT_BIN() { + + open(); + for (int i = 0; i < N_DBS; i += 1) { + write(dbs[i], DB_CACHE_SIZE); + } + long[] results = new long[100]; + for (int repeat = 0; repeat < 100; repeat += 1) { + + /* Read all DBs evenly. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + Cursor[] cursors = new Cursor[N_DBS]; + for (int j = 0; j < N_DBS; j++) { + cursors[j] = dbs[j].openCursor(null, null); + } + cursors[0].setCacheMode(CacheMode.EVICT_BIN); + cursors[1].setCacheMode(CacheMode.EVICT_BIN); + cursors[2].setCacheMode(CacheMode.EVICT_LN); + cursors[3].setCacheMode(CacheMode.EVICT_LN); + cursors[4].setCacheMode(CacheMode.EVICT_LN); + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (cursors[j].getSearchKey(key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + + for (int j = 0; j < N_DBS; j++) { + cursors[j].close(); + } + + /* + * Check that db[0] and db[1] use less than the other three. + */ + final StringBuilder buf = new StringBuilder("repeat="); + buf.append(repeat); + final long[] dbBytes = new long[N_DBS]; + for (int i = 0; i < N_DBS; i += 1) { + dbBytes[i] = getDatabaseCacheBytes(dbs[i], false); + buf.append(" db=").append(i).append(" bytes="). + append(dbBytes[i]); + } + final String msg = buf.toString(); + + assertTrue(msg, dbBytes[0] < dbBytes[2]); + assertTrue(msg, dbBytes[0] < dbBytes[3]); + assertTrue(msg, dbBytes[0] < dbBytes[4]); + assertTrue(msg, dbBytes[1] < dbBytes[2]); + assertTrue(msg, dbBytes[1] < dbBytes[3]); + assertTrue(msg, dbBytes[1] < dbBytes[4]); + //System.out.println(buf); + } + Arrays.sort(results); + //System.out.println(Arrays.toString(results)); + + close(); + } + + private long getDatabaseCacheBytes(Database db, boolean addOffHeap) { + + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + final OffHeapCache ohCache = dbImpl.getEnv().getOffHeapCache(); + final INList ins = dbImpl.getEnv().getInMemoryINs(); + + long total = 0; + + for (final IN in : ins) { + if (in.getDatabase() != dbImpl) { + continue; + } + + total += in.getInMemorySize(); + + if (addOffHeap) { + total += ohCache.getINSize(in); + } + } + + return total; + } + + /** + * Writes enough records in the given envIndex environment to cause at + * least minSizeToWrite bytes to be used in the cache. + */ + private int write(Database db, int minSizeToWrite) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]); + int i; + for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data); + } + return i; + } +} diff --git a/test/com/sleepycat/je/evictor/MeasureOffHeapMemory.java b/test/com/sleepycat/je/evictor/MeasureOffHeapMemory.java new file mode 100644 index 0000000..7c71260 --- /dev/null +++ b/test/com/sleepycat/je/evictor/MeasureOffHeapMemory.java @@ -0,0 +1,336 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Random; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.je.util.TestUtils; + +/** + * Measure the amount of actual (RSS) memory used by the off-heap allocator. + * + * Usage: + * java com.sleepycat.je.evictor.MeasureOffHeapMemory \ + * number-of-blocks + * min-block-size + * max-block-size + * number-of-threads + * create-new-threads (true or false) + * + * Allocates a given number of blocks. Then the loops forever freeing and + * allocating every other block. Each malloc uses a random size between the + * given min/max sizes (if equal, the size is fixed). + * + * Blocks allocated by one thread are freed by another, to mimic JE off-heap + * cache usage. Half the blocks are allocated at the beginning and never freed, + * to mimic hot data that stays in cache. + * + * If create-new-threads is true, each time number-of-blocks is allocated, all + * threads are discarded and new threads are created. This is a worst case + * scenario for malloc's per-thread allocation pools, which can cause behavior + * that looks like a memory leak when blocks outlive the thread that allocated + * them. + * + * The Java heap must be large enough to hold an array of block pointers, each + * of which uses 8 bytes, so 8 * number-of-blocks. + * + * Example run to use around 220 GB: + * java -Xmx4g -cp test.jar com.sleepycat.je.evictor.MeasureOffHeapMemory \ + * 400000000 40 1040 4 false + * + * Output includes the estimated space used by off-heap blocks (JE's cache + * usage statistic) and the actual memory usage calculated by subtracting the + * initial RSS from the current RSS. Ops/s is also included, where one op is + * an alloc and a free; however, each op is just an alloc during ramp-up. + * + * It runs forever, with output every number-of-blocks ops. To stop it, kill + * the process. + */ +public class MeasureOffHeapMemory { + + private static final DateFormat DATE_FORMAT = + new SimpleDateFormat("MM-dd HH:mm:ss.SSS"); + private static final Date DATE = new Date(); + + public static void main(final String[] args) { + try { + new MeasureOffHeapMemory(args).runTest(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + private final long[] ids; + private final int minSize; + private final int range; + private final int nThreads; + private final int blocksPerThread; + private final OffHeapAllocator allocator; + private final String[] psCommand; + private final Random rnd; + private final boolean createNewThreads; + private CyclicBarrier barrier; + + private MeasureOffHeapMemory(final String[] args) throws Exception { + + ids = new long[Integer.parseInt(args[0])]; + minSize = Integer.parseInt(args[1]); + final int maxSize = Integer.parseInt(args[2]); + createNewThreads = Boolean.parseBoolean(args[4]); + range = maxSize - minSize; + nThreads = Integer.parseInt(args[3]); + blocksPerThread = ids.length / nThreads; + + if (ids.length % nThreads != 0) { + throw new IllegalArgumentException( + "Number of blocks not evenly divisible by number of threads"); + } + + final OffHeapAllocatorFactory factory = new OffHeapAllocatorFactory(); + allocator = factory.getDefaultAllocator(); + + psCommand = new String[] { "ps", "o", "rss=,vsz=", "" + getPid() }; + rnd = new Random(123); + } + + private void runTest() throws Exception { + + /* Causes the initial RSS size to include the heap size. */ + fillTheHeap(); + + final long[] startRssAndVsz = new long[2]; + final long[] endRssAndVsz = new long[2]; + getRssAndVsz(startRssAndVsz); + + System.out.format( + "Initial RSS: %,d VSZ: %,d\n", + startRssAndVsz[0], startRssAndVsz[1]); + + final AtomicLong startTime = new AtomicLong(0L); + final AtomicBoolean rampUp = new AtomicBoolean(true); + + final Runner[] threads = new Runner[nThreads]; + + barrier = new CyclicBarrier(nThreads, new Runnable() { + @Override + public void run() { + try { + final long endTime = System.currentTimeMillis(); + getRssAndVsz(endRssAndVsz); + + final long rate = + (ids.length * (1000L / 2)) / + (endTime - startTime.get()); + + System.out.format( + "%s Estimate: %,d RSS: %,d VSZ: %,d Ops/s: %,d %s\n", + getDate(endTime), + allocator.getUsedBytes(), + endRssAndVsz[0] - startRssAndVsz[0], + endRssAndVsz[1] - startRssAndVsz[1], + rate, + rampUp.get() ? "(ramp-up)" : ""); + + rampUp.set(false); + + if (createNewThreads) { + barrier.reset(); + startTime.set(System.currentTimeMillis()); + + for (int i = 0; i < nThreads; i += 1) { + threads[i] = new Runner( + threads[i].getRangeNumber(), false /*doInit*/); + threads[i].start(); + } + } else { + for (int i = 0; i < nThreads; i += 1) { + threads[i].bumpRange(); + } + startTime.set(System.currentTimeMillis()); + barrier.reset(); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + }); + + startTime.set(System.currentTimeMillis()); + + for (int i = 0; i < nThreads; i += 1) { + threads[i] = new Runner(i, true /*doInit*/); + threads[i].start(); + } + + Thread.sleep(Long.MAX_VALUE); + } + + private static synchronized String getDate(long time) { + DATE.setTime(time); + return DATE_FORMAT.format(DATE); + } + + class Runner extends Thread { + + final boolean doInit; + int rangeNum; + int startBlock; + int endBlock; + + Runner(int num, boolean doInit) { + this.doInit = doInit; + rangeNum = num; + startBlock = rangeNum * blocksPerThread; + endBlock = startBlock + blocksPerThread; + } + + int getRangeNumber() { + return rangeNum; + } + + void bumpRange() { + + rangeNum += 1; + + if (rangeNum == nThreads) { + rangeNum = 0; + } + + startBlock = rangeNum * blocksPerThread; + endBlock = startBlock + blocksPerThread; + } + + @Override + public void run() { + + try { + if (doInit) { + for (int i = startBlock; i < endBlock; i += 1) { + + final int size = + (range == 0) ? minSize : (minSize + rnd.nextInt(range)); + + + try { + ids[i] = allocator.allocate(size); + } catch (Throwable e) { + System.err.println("Unable to allocate block " + i); + throw e; + } + } + + try { + barrier.await(); + } catch (BrokenBarrierException ignore) { + } + + if (createNewThreads) { + return; + } + } + + while (true) { + + for (int i = startBlock; i < endBlock; i += 2) { + + allocator.free(ids[i]); + + final int size = (range == 0) ? + minSize : (minSize + rnd.nextInt(range)); + + try { + ids[i] = allocator.allocate(size); + } catch (Throwable e) { + System.err.println("Unable to allocate block " + i); + throw e; + } + } + + try { + barrier.await(); + } catch (BrokenBarrierException ignore) { + } + + if (createNewThreads) { + return; + } + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private static void fillTheHeap() { + Throwable e = null; + while (true) { + try { + e = new Throwable(e); + } catch (OutOfMemoryError e1) { + return; + } + } + } + + private void getRssAndVsz(long[] rssAndVsz) throws Exception { + + final ProcessBuilder pBuilder = new ProcessBuilder(psCommand); + final Process process = pBuilder.start(); + + final BufferedReader reader = new BufferedReader( + new InputStreamReader(process.getInputStream())); + + String result = ""; + for (String line = reader.readLine(); line != null; + line = reader.readLine()) { + result += line; + } + + result = result.trim(); + final int sep = result.indexOf(" "); + final String rss = result.substring(0, sep); + final String vsz = result.substring(sep + 1); + + try { + rssAndVsz[0] = Long.parseLong(rss) * 1024; + rssAndVsz[1] = Long.parseLong(vsz) * 1024; + } catch (NumberFormatException e) { + throw new RuntimeException(result, e); + } + } + + private static int getPid() throws Exception { + + final int pid = TestUtils.getPid(MeasureOffHeapMemory.class.getName()); + + if (pid < 0) { + throw new RuntimeException("Couldn't get my PID"); + } + + return pid; + } +} diff --git a/test/com/sleepycat/je/evictor/OffHeapAllocatorTest.java b/test/com/sleepycat/je/evictor/OffHeapAllocatorTest.java new file mode 100644 index 0000000..5a635cb --- /dev/null +++ b/test/com/sleepycat/je/evictor/OffHeapAllocatorTest.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; + +import com.sleepycat.util.test.TestBase; +import org.junit.Test; + +/** + */ +public class OffHeapAllocatorTest extends TestBase { + + @Test + public void testBasic() throws Exception { + + final OffHeapAllocatorFactory factory = new OffHeapAllocatorFactory(); + final OffHeapAllocator allocator = factory.getDefaultAllocator(); + + long memId = allocator.allocate(100); + assertTrue(memId != 0); + assertEquals(100, allocator.size(memId)); + + byte[] buf = new byte[100]; + byte[] buf2 = new byte[100]; + + Arrays.fill(buf, (byte) 1); + allocator.copy(memId, 0, buf, 0, 100); + Arrays.fill(buf2, (byte) 0); + assertTrue(Arrays.equals(buf, buf2)); + + Arrays.fill(buf, (byte) 1); + allocator.copy(buf, 0, memId, 0, 100); + Arrays.fill(buf2, (byte) 0); + allocator.copy(memId, 0, buf2, 0, 100); + assertTrue(Arrays.equals(buf, buf2)); + + allocator.free(memId); + } +} diff --git a/test/com/sleepycat/je/evictor/OffHeapCacheTest.java b/test/com/sleepycat/je/evictor/OffHeapCacheTest.java new file mode 100644 index 0000000..161ad08 --- /dev/null +++ b/test/com/sleepycat/je/evictor/OffHeapCacheTest.java @@ -0,0 +1,689 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.Put; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.entry.INLogEntry; +import com.sleepycat.je.test.TTLTest; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Needs testing (not executed in coverage report): + * - enable checksums and run unit tests + * - loadBINIfLsnMatches, evictBINIfLsnMatch + */ +public class OffHeapCacheTest extends TestBase { + + private File envHome; + private Environment env; + private Database db; + private OffHeapCache ohCache; + private OffHeapAllocator allocator; + + public OffHeapCacheTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + @Override + public void tearDown() { + + try { + if (env != null) { + env.close(); + } + } finally { + env = null; + db = null; + ohCache = null; + allocator = null; + TTL.setTimeTestHook(null); + } + + TestUtils.removeLogFiles("TearDown", envHome, false); + } + + private void open() { + open(false); + } + + private void open(final boolean transactional) { + + final EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(transactional); + envConfig.setOffHeapCacheSize(1024 * 1024); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(transactional); + + db = env.openDatabase(null, "foo", dbConfig); + + ohCache = DbInternal.getNonNullEnvImpl(env).getOffHeapCache(); + + allocator = ohCache.getAllocator(); + } + + private void close() { + db.close(); + env.close(); + } + + @Test + public void testBINSerialization() throws Exception { + + open(); + + final BIN bin = new BIN( + DbInternal.getDbImpl(db), + new byte[] { 1, 2, 3 }, + 128, IN.BIN_LEVEL); + + /* Avoid assertions setting LN memIds. */ + bin.setOffHeapLruId(1); + + bin.latch(); + try { + checkBINSerialization(bin); + checkBINSerialization(bin, 0); + checkBINSerialization(bin, 100); + checkBINSerialization(bin, 0, 0); + checkBINSerialization(bin, 100, 0); + checkBINSerialization(bin, 0, 101); + checkBINSerialization(bin, 100, 101); + checkBINSerialization(bin, 0, 0, 0); + checkBINSerialization(bin, 0, 0, 102); + checkBINSerialization(bin, 0, 101, 0); + checkBINSerialization(bin, 0, 101, 102); + checkBINSerialization(bin, 100, 0, 0); + checkBINSerialization(bin, 100, 0, 102); + checkBINSerialization(bin, 100, 101, 0); + checkBINSerialization(bin, 100, 101, 102); + checkBINSerialization(bin, 0, 0, 0, 0); + checkBINSerialization(bin, 0, 0, 0, 103); + checkBINSerialization(bin, 0, 0, 102, 0); + checkBINSerialization(bin, 0, 0, 102, 103); + checkBINSerialization(bin, 100, 101, 0, 0); + checkBINSerialization(bin, 100, 101, 0, 103); + checkBINSerialization(bin, 100, 101, 102, 0); + checkBINSerialization(bin, 100, 101, 102, 103); + } finally { + bin.releaseLatch(); + } + + close(); + } + + private void checkBINSerialization(BIN bin, int... memIds) { + + assertTrue(memIds.length >= bin.getNEntries()); + + for (int i = 0; i < memIds.length; i += 1) { + + if (i >= bin.getNEntries()) { + assertTrue(bin.insertEntry(null, new byte[] {(byte) i}, 0)); + } + + bin.setOffHeapLNId(i, memIds[i]); + } + + final long memId = ohCache.serializeBIN(bin, bin.isBINDelta()); + final byte[] bytes = new byte[allocator.size(memId)]; + allocator.copy(memId, 0, bytes, 0, bytes.length); + allocator.free(memId); + + final BIN bin2 = ohCache.materializeBIN(bin.getEnv(), bytes); + bin2.setDatabase(DbInternal.getDbImpl(db)); + + /* Avoid assertions setting LN memIds. */ + bin2.setOffHeapLruId(1); + + bin2.latch(); + try { + assertEquals(bin.isBINDelta(), bin2.isBINDelta()); + assertEquals(bin.getNEntries(), bin2.getNEntries()); + + for (int i = 0; i < bin.getNEntries(); i += 1) { + assertEquals(bin.getOffHeapLNId(i), bin2.getOffHeapLNId(i)); + } + + /* + * We don't bother to check all BIN fields, since writeToLog and + * readFromLog are used for serialization and tested in many places. + */ + } finally { + bin2.releaseLatch(); + } + } + + /** + * Makes a call to each getter to make sure it doesn't throw an exception. + */ + @Test + public void testStatGetters() throws Exception { + + open(); + + final EnvironmentStats stats = env.getStats(null); + + stats.getOffHeapAllocFailures(); + stats.getOffHeapAllocOverflows(); + stats.getOffHeapThreadUnavailable(); + stats.getOffHeapNodesTargeted(); + stats.getOffHeapCriticalNodesTargeted(); + stats.getOffHeapNodesEvicted(); + stats.getOffHeapDirtyNodesEvicted(); + stats.getOffHeapNodesStripped(); + stats.getOffHeapNodesMutated(); + stats.getOffHeapNodesSkipped(); + stats.getOffHeapLNsEvicted(); + stats.getOffHeapLNsLoaded(); + stats.getOffHeapLNsStored(); + stats.getOffHeapBINsLoaded(); + stats.getOffHeapBINsStored(); + stats.getOffHeapCachedLNs(); + stats.getOffHeapCachedBINs(); + stats.getOffHeapCachedBINDeltas(); + stats.getOffHeapTotalBytes(); + stats.getOffHeapTotalBlocks(); + stats.getOffHeapLRUSize(); + + close(); + } + + /** + * Verifies the phases of eviction of a BIN in main cache with off-heap + * LNs. + */ + @Test + public void testMainBINEviction() { + + open(); + + long bytes; + + /* + * BIN starts with 10 off-heap entries. + */ + final BIN bin = createMainCacheBIN(); + assertEquals(10, bin.getNEntries()); + + /* + * First eviction: 5 expired LNs are evicted; + * expired slots are removed except for one dirty slot. + */ + bytes = ohCache.testEvictMainBIN(bin); + assertTrue(bytes > 0); + assertEquals(6, bin.getNEntries()); + + assertEquals(0, bin.getOffHeapLNId(0)); + + for (int i = 1; i < 6; i += 1) { + assertTrue(bin.getOffHeapLNId(i) != 0); + } + + /* + * Second eviction: remaining LNs are evicted; + * the one expired dirty slot remains. + */ + bytes = ohCache.testEvictMainBIN(bin); + assertTrue(bytes > 0); + assertEquals(6, bin.getNEntries()); + assertTrue(bin.getOffHeapLruId() < 0); + + for (int i = 0; i < 6; i += 1) { + assertEquals(0, bin.getOffHeapLNId(i)); + } + + close(); + } + + /** + * Verifies the phases of eviction of an off-heap BIN. + */ + @Test + public void testOffHeapBINEviction() { + + open(); + + final IN parent = createOffHeapBIN(); + long bytes; + BIN bin; + + /* + * BIN starts with 10 off-heap entries. + */ + bin = materializeBIN(parent); + assertNotNull(bin); + assertEquals(10, bin.getNEntries()); + + /* + * First eviction: 5 expired LNs are evicted; + * expired slots are removed except for one dirty slot. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + bin = materializeBIN(parent); + assertNotNull(bin); + assertFalse(bin.isBINDelta(false)); + assertEquals(6, bin.getNEntries()); + + assertEquals(0, bin.getOffHeapLNId(0)); + + for (int i = 1; i < 6; i += 1) { + assertTrue(bin.getOffHeapLNId(i) != 0); + } + + /* + * Second eviction: remaining LNs are evicted; + * the one expired dirty slot remains. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + bin = materializeBIN(parent); + assertNotNull(bin); + assertFalse(bin.isBINDelta(false)); + assertEquals(6, bin.getNEntries()); + + for (int i = 0; i < 6; i += 1) { + assertEquals(0, bin.getOffHeapLNId(i)); + } + + /* + * Third eviction: mutate to delta with a single slot. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + bin = materializeBIN(parent); + assertNotNull(bin); + assertTrue(bin.isBINDelta(false)); + assertEquals(1, bin.getNEntries()); + + /* + * Fourth eviction: move from pri1 to pri2 LRU, because its dirty. + */ + assertFalse(parent.isOffHeapBINPri2(0)); + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes == 0); + bin = materializeBIN(parent); + assertNotNull(bin); + assertTrue(bin.isBINDelta(false)); + assertEquals(1, bin.getNEntries()); + assertTrue(parent.isOffHeapBINPri2(0)); + + /* + * Fifth eviction: evict the off-heap BIN entirely. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + bin = materializeBIN(parent); + assertNull(bin); + + close(); + } + + /** + * Tests a bug fix where we were freeing an off-heap LN twice, when it + * expired but was still locked. The scenario is: + * + * 1. LN with an expiration time is locked. + * 2. LN is moved off-heap. + * 3. The LN's parent BIN is moved off-heap. + * 4. The LN's expiration time passes. + * 5. Off-heap evictor processes BIN. It frees the expired LN, but cannot + * compress the BIN slot, since the record is locked. The serialized + * BIN is mistakenly not updated, so it still has the reference to the + * LN that was freed. The BIN is re-serialized, but only if a slot was + * compressed, and this didn't happen. + * 6. Off-heap evictor processes BIN again. This time it tries to free the + * LN that was previously freed, resulting in a JVM crash. + * + * The fix is to always re-serialize the BIN when an expired LN was freed, + * even if its slot cannot be compressed due to a lock. + */ + @Test + public void testLockedExpiredLNInOffHeapBIN() { + + open(true /*transactional*/); + + /* Used a fixed time for expiring records. */ + TTLTest.setFixedTimeHook(System.currentTimeMillis()); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(0, key); + final DatabaseEntry data = new DatabaseEntry(new byte[100]); + + final WriteOptions options = new WriteOptions(). + setCacheMode(CacheMode.EVICT_BIN). + setTTL(1, TimeUnit.HOURS); + + final Transaction txn = env.beginTransaction(null, null); + final Cursor cursor = db.openCursor(txn, null); + + final OperationResult result = + cursor.put(key, data, Put.NO_OVERWRITE, options); + + assertNotNull(result); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + final IN parent = bin.getParent(); + assertEquals(1, parent.getNEntries()); + + cursor.close(); + assertNull(parent.getTarget(0)); + + /* Make the record expire. */ + TTLTest.fixedSystemTime += TTL.MILLIS_PER_HOUR * 2; + + /* + * First eviction will evict expired LN, but cannot compress the slot + * because of the record lock. + */ + long bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + + /* + * Second eviction, prior to the bug fix, would mistakenly evict the + * same expired LN and crash. We release the lock here before evicting, + * but the crash would have occurred without releasing it. By releasing + * the lock, we can compress and free the slot without evicting the + * entire BIN. + */ + txn.commit(); + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + + /* + * Third time: moves it to the priority 2 LRU. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertEquals(0, bytes); + + /* + * Fourth time: evicts the entire BIN. + */ + bytes = ohCache.testEvictOffHeapBIN(parent, 0); + assertTrue(bytes > 0); + + close(); + } + + /** + * Tests the following compression scenario, which caused a "double free". + * This was fixed by always considering off-heap BINs stale when a main + * cache version is present. + * + * 1. BIN is off-heap and contains an LN in an expired slot. + * 2. BIN will be loaded into main. + * 3. After materializing, but before calling OffHeapCache.postBINLoad, + * we compress the BIN, which removes the expired slot and frees the + * off-heap LN. + * 4. Because postBINLoad had not been called, the BIN's IN_OFFHEAP_BIT was + * not set before compressing, and BIN.setOffHeapLNId(idx, 0) did not + * set the BIN's IN_OFFHEAP_STALE_BIT. + * 5. Later, when the off-heap evictor tries to strip LNs, it attempts to + * free an already freed block. + * + * Note that another, similar compression scenario, would have caused a + * lost deletion: + * + * 1. BIN is dirty and is both on-heap and off-heap. + * 2. Compression removes a dirty slot and sets ProhibitNextDelta. + * 3. Main BIN is evicted, but is not re-copied off-heap, because off-heap + * BIN is not stale. + * 4. Checkpoint logs the off-heap BIN as a delta, losing the deletion. + * + * This was not observed, because (we think) it is difficult to create + * condition 1. This is because of another bug, where when loading a dirty + * off-heap BIN, the off-heap version was immediately made stale when + * postBINLoad called BIN.setDirty. This is also resolved by the new + * approach where always consider off-heap BINs stale when a main cache + * version is present. + */ + @Test + public void testCompressDuringLoad() { + + open(); + + /* Used a fixed time for expiring records. */ + TTLTest.setFixedTimeHook(System.currentTimeMillis()); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(0, key); + final DatabaseEntry data = new DatabaseEntry(new byte[100]); + + final WriteOptions options = new WriteOptions(). + setCacheMode(CacheMode.EVICT_BIN). + setTTL(1, TimeUnit.HOURS); + + final Cursor cursor = db.openCursor(null, null); + + final OperationResult result = + cursor.put(key, data, Put.NO_OVERWRITE, options); + + assertNotNull(result); + + BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + final IN parent = bin.getParent(); + assertEquals(1, parent.getNEntries()); + + env.sync(); + + cursor.close(); + assertNull(parent.getTarget(0)); + + /* Make the record expire. */ + TTLTest.fixedSystemTime += TTL.MILLIS_PER_HOUR * 2; + + parent.latchNoUpdateLRU(); + bin = (BIN) parent.loadIN(0, CacheMode.UNCHANGED); + parent.releaseLatch(); + + final Evictor evictor = + DbInternal.getNonNullEnvImpl(env).getEvictor(); + + bin.latchNoUpdateLRU(); + evictor.doTestEvict(bin, Evictor.EvictionSource.MANUAL); + + /* This caused a double-free, before the bug fix. */ + ohCache.testEvictOffHeapBIN(parent, 0); + + close(); + } + + /** + * Tests a fix to a bug where the ProhibitNextDelta flag was not honored + * when logging an off-heap BIN, and the BIN was re-materialized in order + * to compress expired slots. [#24973] + */ + @Test + public void testProhibitNextDeltaBug() { + + open(); + + final IN parent = createOffHeapBIN(true /*prohibitNextDelta*/); + + parent.latchNoUpdateLRU(); + + final INLogEntry entry = + ohCache.createBINLogEntryForCheckpoint(parent, 0); + + /* This failed prior to the bug fix. */ + assertFalse(entry.isBINDelta()); + + entry.getMainItem().releaseLatch(); + parent.releaseLatch(); + + close(); + } + + private BIN materializeBIN(final IN parent) { + + parent.latchNoUpdateLRU(); + final byte[] bytes = ohCache.getBINBytes(parent, 0); + parent.releaseLatch(); + + if (bytes == null) { + return null; + } + + return ohCache.materializeBIN( + DbInternal.getNonNullEnvImpl(env), bytes); + } + + /** + * Calls createBIN and then moves it off-heap. + * + * @return the parent, which will only have one slot containing the + * off-heap BIN. + */ + private IN createOffHeapBIN(final boolean prohibitNextDelta) { + final IN parent = createBIN(CacheMode.EVICT_BIN, prohibitNextDelta); + assertNull(parent.getTarget(0)); + assertTrue(parent.getOffHeapBINId(0) >= 0); + return parent; + } + + private IN createOffHeapBIN() { + return createOffHeapBIN(false /*prohibitNextDelta*/); + } + + /** + * Calls createBIN and moves its LNs off-heap. + */ + private BIN createMainCacheBIN() { + final IN parent = createBIN( + CacheMode.EVICT_LN, false /*prohibitNextDelta*/); + final BIN bin = (BIN) parent.getTarget(0); + assertNotNull(bin); + assertTrue(bin.getOffHeapLruId() >= 0); + assertTrue(parent.getOffHeapBINId(0) < 0); + return bin; + } + + /** + * Creates a BIN with: + * - 10 LNs + * - the first 5 LNs are expired + * - all LNs are moved off-heap if cacheMode is EVICT_LN, or the entire + * BIN is moved off-heap if it is EVICT_BIN + * - only one slot (an expired slot) is dirty, and therefore the BIN can + * be mutated to a delta + * + * @return the parent, which will only have one slot. + */ + private IN createBIN(final CacheMode cacheMode, + final boolean prohibitNextDelta) { + + /* Used a fixed time for expiring records. */ + TTLTest.setFixedTimeHook(System.currentTimeMillis()); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[100]); + + final WriteOptions options = + new WriteOptions().setCacheMode(cacheMode); + + final Cursor cursor = db.openCursor(null, null); + OperationResult result; + BIN bin = null; + + for (int i = 0; i < 10; i += 1) { + + options.setTTL((i < 5) ? 1 : 0, TimeUnit.HOURS); + IntegerBinding.intToEntry(i, key); + + result = cursor.put(key, data, Put.NO_OVERWRITE, options); + assertNotNull(result); + + final BIN cursorBin = DbInternal.getCursorImpl(cursor).getBIN(); + + if (bin == null) { + bin = cursorBin; + } else { + assertSame(bin, cursorBin); + } + } + + /* + * Checkpoint and dirty one record, so that a delta should be logged + * next. + */ + env.checkpoint(new CheckpointConfig().setForce(true)); + IntegerBinding.intToEntry(0, key); + result = db.put(null, key, data, Put.OVERWRITE, options); + assertNotNull(result); + + bin.latchNoUpdateLRU(); + assertTrue(bin.shouldLogDelta()); + bin.setProhibitNextDelta(prohibitNextDelta); + bin.releaseLatch(); + + cursor.close(); + + /* Make the 5 records expire. */ + TTLTest.fixedSystemTime += TTL.MILLIS_PER_HOUR * 2; + + final IN parent = bin.getParent(); + assertEquals(1, parent.getNEntries()); + return parent; + } +} diff --git a/test/com/sleepycat/je/evictor/SharedCacheTest.java b/test/com/sleepycat/je/evictor/SharedCacheTest.java new file mode 100644 index 0000000..bafbedb --- /dev/null +++ b/test/com/sleepycat/je/evictor/SharedCacheTest.java @@ -0,0 +1,800 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.evictor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests the shared cache feature enabled via Environment.setSharedCache(true). + */ +public class SharedCacheTest extends TestBase { + + private static final int N_ENVS = 5; + private static final int ONE_MB = 1 << 20; + private static final int ENV_DATA_SIZE = ONE_MB; + private static final int TOTAL_DATA_SIZE = N_ENVS * ENV_DATA_SIZE; + private static final int LOG_BUFFER_SIZE = (ENV_DATA_SIZE * 7) / 100; + private static final int MIN_DATA_SIZE = 50 * 1024; + private static final int LRU_ACCURACY_PCT = 60; + private static final int ENTRY_DATA_SIZE = 500; + private static final String TEST_PREFIX = "SharedCacheTest_"; + private static final StatsConfig CLEAR_CONFIG = new StatsConfig(); + + static { + CLEAR_CONFIG.setClear(true); + } + + private File envHome; + private File[] dirs; + private Environment[] envs; + private Database[] dbs; + private boolean sharedCache = true; + private boolean offHeapCache = false; + private long actualTotalCacheSize; + + public SharedCacheTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + dirs = new File[N_ENVS]; + envs = new Environment[N_ENVS]; + dbs = new Database[N_ENVS]; + + for (int i = 0; i < N_ENVS; i += 1) { + dirs[i] = new File(envHome, TEST_PREFIX + i); + dirs[i].mkdir(); + assertTrue(dirs[i].isDirectory()); + } + + IN.ACCUMULATED_LIMIT = 0; + } + + @Override + @After + public void tearDown() { + for (int i = 0; i < N_ENVS; i += 1) { + if (dbs[i] != null) { + try { + dbs[i].close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + dbs[i] = null; + } + if (envs[i] != null) { + try { + envs[i].close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + envs[i] = null; + } + } + envHome = null; + dirs = null; + envs = null; + dbs = null; + + IN.ACCUMULATED_LIMIT = IN.ACCUMULATED_LIMIT_DEFAULT; + } + + @Test + public void testBaselineOffHeap() { + offHeapCache = true; + testBaseline(); + } + + @Test + public void testBaseline() { + + /* Open all DBs in the same environment. */ + final int N_DBS = N_ENVS; + sharedCache = false; + openOne(0); + DatabaseConfig dbConfig = dbs[0].getConfig(); + for (int i = 1; i < N_DBS; i += 1) { + dbs[i] = envs[0].openDatabase(null, "foo" + i, dbConfig); + } + for (int i = 0; i < N_DBS; i += 1) { + write(i, ENV_DATA_SIZE); + } + + envs[0].sync(); // So the separate LRU dirty list isn't used + + for (int iter = 0; iter < 50; iter += 1) { + + /* Read all DBs evenly. */ + for (int repeat = 0; repeat < 2; repeat += 1) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + boolean done = false; + for (int i = 0; !done; i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_DBS; j += 1) { + if (dbs[j].get(null, key, data, null) != + OperationStatus.SUCCESS) { + done = true; + } + } + } + } + + /* + * Check that each DB uses approximately equal portions of the + * cache. + */ + StringBuilder buf = new StringBuilder(); + long low = Long.MAX_VALUE; + long high = 0; + for (int i = 0; i < N_DBS; i += 1) { + long val = getDatabaseCacheBytes(dbs[i], false); + buf.append(" db=").append(i).append(" bytes=").append(val); + if (low > val) { + low = val; + } + if (high < val) { + high = val; + } + } + + final long pct = (low * 100) / high; + +// System.out.println("Baseline LRU accuracy pct=" + pct + buf); + + if (iter > 25) { + assertTrue( + "failed with pct=" + pct + buf, + pct >= LRU_ACCURACY_PCT); + } + } + + for (int i = 1; i < N_DBS; i += 1) { + dbs[i].close(); + dbs[i] = null; + } + closeOne(0); + } + + private long getDatabaseCacheBytes(Database db, boolean offHeapOnly) { + + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + final OffHeapCache ohCache = dbImpl.getEnv().getOffHeapCache(); + + long total = 0; + + for (IN in : dbImpl.getEnv().getInMemoryINs()) { + + if (in.getDatabase() != dbImpl) { + continue; + } + + if (!offHeapOnly) { + total += in.getInMemorySize(); + } + + if (offHeapCache) { + total += ohCache.getINSize(in); + } + } + + return total; + } + + @Test + public void testWriteOneEnvAtATimeOffHeap() { + offHeapCache = true; + testWriteOneEnvAtATime(); + } + + /** + * Writes to each env one at a time, writing enough data in each env to fill + * the entire cache. Each env in turn takes up a large majority of the + * cache. + */ + @Test + public void testWriteOneEnvAtATime() { + + final int SMALL_DATA_SIZE = MIN_DATA_SIZE + (20 * 1024); + final int SMALL_TOTAL_SIZE = SMALL_DATA_SIZE + LOG_BUFFER_SIZE; + final int BIG_TOTAL_SIZE = + ENV_DATA_SIZE - ((N_ENVS - 1) * SMALL_TOTAL_SIZE); + + openAll(); + + for (int i = 0; i < N_ENVS; i += 1) { + write(i, TOTAL_DATA_SIZE); + + final long sharedTotal = getSharedCacheTotal(i); + final long localTotal = getLocalCacheTotal(i); + + String msg = "env=" + i + + " total=" + localTotal + + " shared=" + sharedTotal; + + assertTrue(msg, sharedTotal >= BIG_TOTAL_SIZE); + assertTrue(msg, localTotal >= BIG_TOTAL_SIZE); + } + + closeAll(); + } + + private long getLocalCacheTotal(final int i) { + return getLocalCacheTotal(i, null); + } + + private long getLocalCacheTotal(final int i, final StringBuilder buf) { + + final EnvironmentStats stats = envs[i].getStats(null); + + final long mainSize = stats.getCacheTotalBytes(); + + final long offHeapSize = offHeapCache ? + getDatabaseCacheBytes(dbs[i], true) : 0; + + final long total = mainSize + offHeapSize; + + if (buf != null) { + buf.append("\nenv=").append(i); + buf.append(" main=").append(mainSize); + buf.append(" offHeap=").append(offHeapSize); + buf.append(" total=").append(total); + } + + return total; + } + + private long getSharedCacheTotal(final int i) { + final EnvironmentStats stats = envs[i].getStats(null); + long size = stats.getSharedCacheTotalBytes(); + if (offHeapCache) { + size += stats.getOffHeapTotalBytes(); + } + return size; + } + + @Test + public void testWriteAllEnvsEvenlyOffHeap() { + offHeapCache = true; + testWriteAllEnvsEvenly(); + } + + /** + * Writes alternating records to each env, writing enough data to fill the + * entire cache. Each env takes up roughly equal portions of the cache. + */ + @Test + public void testWriteAllEnvsEvenly() { + + openAll(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]); + for (int i = 0; i < 2 * (ENV_DATA_SIZE / ENTRY_DATA_SIZE); i += 1) { + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_ENVS; j += 1) { + dbs[j].put(null, key, data); + } + checkStatsConsistency(); + } + checkEvenCacheUsage(); + closeAll(); + } + + @Test + public void testOpenCloseOffHeap() { + offHeapCache = true; + testOpenClose(); + } + + /** + * Checks that the cache usage changes appropriately as environments are + * opened and closed. + */ + @Test + public void testOpenClose() { + + openAll(); + int nRecs = 0; + for (int i = 0; i < N_ENVS; i += 1) { + int n = write(i, TOTAL_DATA_SIZE); + if (nRecs < n) { + nRecs = n; + } + } + closeAll(); + openAll(); + readEvenly(nRecs); + /* Close only one. */ + for (int i = 0; i < N_ENVS; i += 1) { + closeOne(i); + readEvenly(nRecs); + openOne(i); + readEvenly(nRecs); + } + /* Close all but one. */ + for (int i = 0; i < N_ENVS; i += 1) { + for (int j = 0; j < N_ENVS; j += 1) { + if (j != i) { + closeOne(j); + } + } + readEvenly(nRecs); + for (int j = 0; j < N_ENVS; j += 1) { + if (j != i) { + openOne(j); + } + } + readEvenly(nRecs); + } + closeAll(); + } + + @Test + public void testHotnessOffHeap() { + offHeapCache = true; + testHotness(); + } + + /** + * Checks that an environment with hot data uses more of the cache. + */ + @Test + public void testHotness() { + + final int HOT_CACHE_SIZE = (int) (1.5 * ENV_DATA_SIZE); + + openAll(); + + int nRecs = Integer.MAX_VALUE; + + for (int i = 0; i < N_ENVS; i += 1) { + int n = write(i, TOTAL_DATA_SIZE); + if (nRecs > n) { + nRecs = n; + } + envs[i].sync(); + } + + readEvenly(nRecs); + + /* Keep one env "hot". */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < N_ENVS; i += 1) { + for (int j = 0; j < N_ENVS; j += 1) { + for (int k = 0; k < nRecs; k += 1) { + IntegerBinding.intToEntry(k, key); + dbs[i].get(null, key, data, null); + dbs[j].get(null, key, data, null); + } + checkStatsConsistency(); + + if (getLocalCacheTotal(i) < HOT_CACHE_SIZE || + getLocalCacheTotal(j) < HOT_CACHE_SIZE) { + + EnvironmentStats iStats = envs[i].getStats(null); + EnvironmentStats jStats = envs[j].getStats(null); + + StringBuilder msg = new StringBuilder(); + msg.append("Hot cache size is below " + HOT_CACHE_SIZE + + " for env " + i + " or " + j); + for (int k = 0; k < N_ENVS; k += 1) { + msg.append("\n**** ENV " + k + " ****\n"); + msg.append(envs[k].getStats(null)); + } + fail(msg.toString()); + } + } + } + closeAll(); + } + + /** + * Tests changing the cache size. + */ + @Test + public void testMutateCacheSize() { + + final int HALF_DATA_SIZE = TOTAL_DATA_SIZE / 2; + openAll(); + + int nRecs = 0; + for (int i = 0; i < N_ENVS; i += 1) { + int n = write(i, ENV_DATA_SIZE); + if (nRecs < n) { + nRecs = n; + } + } + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(envs[0]); + DbConfigManager configManager = envImpl.getConfigManager(); + + long evictBytes = + configManager.getLong(EnvironmentParams.EVICTOR_EVICT_BYTES); + + long nodeMaxEntries = + configManager.getInt(EnvironmentParams.NODE_MAX); + + long maxLNBytesPerBIN = + (nodeMaxEntries - 1) * + (MemoryBudget.LN_OVERHEAD + + MemoryBudget.byteArraySize(ENTRY_DATA_SIZE)); + + long maxFreeMem = (evictBytes * 2) + maxLNBytesPerBIN; + + /* Full cache size. */ + readEvenly(nRecs); + + long memConsumed = envs[0].getStats(null).getSharedCacheTotalBytes(); + +// System.out.println( +// "Mem consumed = " + memConsumed + +// " free mem = " + (actualTotalCacheSize - memConsumed) + +// " max free allowed = " + maxFreeMem); + + assertTrue(Math.abs(actualTotalCacheSize - memConsumed) < maxFreeMem); + + /* Halve cache size. */ + EnvironmentMutableConfig config = envs[0].getMutableConfig(); + config.setCacheSize(HALF_DATA_SIZE); + envs[0].setMutableConfig(config); + final long actualHalfSize = + TestUtils.adjustSharedCacheSize(envs, HALF_DATA_SIZE); + + readEvenly(nRecs); + + memConsumed = envs[0].getStats(null).getSharedCacheTotalBytes(); + +// System.out.println( +// "Mem consumed = " + memConsumed + +// " free mem = " + (actualHalfSize - memConsumed) + +// " max free allowed = " + maxFreeMem); + + assertTrue(Math.abs(actualHalfSize - memConsumed) < maxFreeMem); + + /* Full cache size. */ + config = envs[0].getMutableConfig(); + config.setCacheSize(TOTAL_DATA_SIZE); + envs[0].setMutableConfig(config); + actualTotalCacheSize = + TestUtils.adjustSharedCacheSize(envs, TOTAL_DATA_SIZE); + + readEvenly(nRecs); + + memConsumed = envs[0].getStats(null).getSharedCacheTotalBytes(); + +// System.out.println( +// "Mem consumed = " + memConsumed + +// " free mem = " + (actualTotalCacheSize - memConsumed) + +// " max free allowed = " + maxFreeMem); + + assertTrue(Math.abs(actualTotalCacheSize - memConsumed) < maxFreeMem); + + closeAll(); + } + + private void openAll() { + + for (int i = 0; i < N_ENVS; i += 1) { + openOne(i); + } + } + + private void openOne(int i) { + + IN.ACCUMULATED_LIMIT = 0; + + final long mainSize; + final long offHeapSize; + if (offHeapCache) { + mainSize = ONE_MB; + offHeapSize = TOTAL_DATA_SIZE; + } else { + mainSize = TOTAL_DATA_SIZE; + offHeapSize = 0; + } + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setSharedCache(sharedCache); + envConfig.setCacheSize(mainSize); + envConfig.setOffHeapCacheSize(offHeapSize); + envConfig.setConfigParam( + EnvironmentConfig.TREE_MIN_MEMORY, String.valueOf(MIN_DATA_SIZE)); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.STATS_COLLECT, "false"); + envConfig.setConfigParam( + EnvironmentConfig.EVICTOR_EVICT_BYTES, "10240"); + envConfig.setConfigParam( + EnvironmentConfig.OFFHEAP_EVICT_BYTES, "10240"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + /* + * Because the evictors each have multiple LRU lists per LRUSet, the + * accuracy of the LRU varies too much to be predictable in this test, + * especially due to outliers on some machines. Use a single LRU list + * per LRUSet. + */ + envConfig.setConfigParam( + EnvironmentConfig.OFFHEAP_N_LRU_LISTS, "1"); + envConfig.setConfigParam( + EnvironmentConfig.EVICTOR_N_LRU_LISTS, "1"); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + envs[i] = new Environment(dirs[i], envConfig); + dbs[i] = envs[i].openDatabase(null, "foo", dbConfig); + + if (offHeapCache) { + actualTotalCacheSize = + TestUtils.adjustSharedCacheSize(envs, ONE_MB) + + TOTAL_DATA_SIZE; + } else { + actualTotalCacheSize = + TestUtils.adjustSharedCacheSize(envs, TOTAL_DATA_SIZE); + } + } + + private void closeAll() { + for (int i = 0; i < N_ENVS; i += 1) { + closeOne(i); + } + } + + private void closeOne(int i) { + if (dbs[i] != null) { + dbs[i].close(); + dbs[i] = null; + } + if (envs[i] != null) { + envs[i].close(); + envs[i] = null; + } + } + + /** + * Writes enough records in the given envIndex environment to cause at + * least minSizeToWrite bytes to be used in the cache. + */ + private int write(int envIndex, int minSizeToWrite) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[ENTRY_DATA_SIZE]); + int i; + for (i = 0; i < minSizeToWrite / ENTRY_DATA_SIZE; i += 1) { + IntegerBinding.intToEntry(i, key); + dbs[envIndex].put(null, key, data); + } + checkStatsConsistency(); + return i; + } + + /** + * Reads alternating records from each env, reading all records from each + * env. Checks that all environments use roughly equal portions of the + * cache. + */ + private void readEvenly(int nRecs) { + + /* + EnvironmentImpl firstEnvImpl = DbInternal.getNonNullEnvImpl(envs[0]); + LRUEvictor evictor = (LRUEvictor)firstEnvImpl.getEvictor(); + + ArrayList statsList = + new ArrayList(N_ENVS); + + for (int i = 0; i < N_ENVS; i += 1) { + statsList.add(new LRUEvictor.LRUDebugStats()); + } + */ + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Repeat reads twice to give the LRU a fighting chance. */ + for (int repeat = 0; repeat < 2; repeat += 1) { + + /* + for (int k = 0; k < N_ENVS; k += 1) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(envs[k]); + System.out.println("Before-read LRU stats for env " + k); + evictor.getPri1LRUStats(envImpl, statsList.get(k)); + System.out.println("MIXED: " + statsList.get(k)); + evictor.getPri2LRUStats(envImpl, statsList.get(k)); + System.out.println("DIRTY: " + statsList.get(k)); + System.out.println(""); + } + */ + + for (int i = 0; i < nRecs; i += 1) { + + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < N_ENVS; j += 1) { + if (dbs[j] != null) { + dbs[j].get(null, key, data, null); + } + } + + /* + if (i % 512 == 0 || i == 1600) { + for (int k = 0; k < N_ENVS; k += 1) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(envs[k]); + System.out.println("LRU stats for env " + k + + " at record " + i); + evictor.getPri1LRUStats(envImpl, statsList.get(k)); + System.out.println("MIXED: " + statsList.get(k)); + evictor.getPri2LRUStats(envImpl, statsList.get(k)); + System.out.println("DIRTY: " + statsList.get(k)); + System.out.println(""); + } + } + */ + checkStatsConsistency(); + } + } + + /* + for (int i = 0; i < N_ENVS; i += 1) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(envs[i]); + System.out.println("After-read LRU stats for env " + i); + evictor.getPri1LRUStats(envImpl, statsList.get(i)); + System.out.println("MIXED: " + statsList.get(i)); + evictor.getPri2LRUStats(envImpl, statsList.get(i)); + System.out.println("DIRTY: " + statsList.get(i)); + System.out.println(""); + } + */ + + checkEvenCacheUsage(); + } + + /** + * Checks that each env uses approximately equal portions of the cache. + * How equal the portions are depends on the accuracy of the LRU. + */ + private void checkEvenCacheUsage() { + + final StringBuilder buf = new StringBuilder(); + + long low = Long.MAX_VALUE; + long high = 0; + + for (int i = 0; i < N_ENVS; i += 1) { + if (envs[i] == null) { + continue; + } + + final long val = getLocalCacheTotal(i, buf); + + if (low > val) { + low = val; + } + if (high < val) { + high = val; + } + } + + long pct = (low * 100) / high; + if (pct < LRU_ACCURACY_PCT) { + fail("failed with pct=" + pct + buf); + } +// System.out.println("readEven LRU accuracy pct=" + pct + buf); + } + + /** + * Checks that the sum of all env cache usages is the total cache usage, + * and other self-consistency checks. + */ + private void checkStatsConsistency() { + + if (!sharedCache) { + return; + } + + long localTotal = 0; + long sharedTotal = -1; + int nShared = 0; + EnvironmentStats stats = null; + + for (int i = 0; i < N_ENVS; i += 1) { + if (envs[i] != null) { + envs[i].evictMemory(); + } + } + + for (int i = 0; i < N_ENVS; i += 1) { + if (envs[i] == null) { + continue; + } + + if (sharedTotal == -1) { + sharedTotal = getSharedCacheTotal(i); + } else { + assertEquals(sharedTotal, getSharedCacheTotal(i)); + } + + final long local = getLocalCacheTotal(i); + + localTotal += local; + +// System.out.println( +// "Env= " + i + +// " localTotal= " + localTotal + +// " localTotal=" + local + +// " shared=" + sharedTotal); + + nShared += 1; + + if (stats == null) { + stats = envs[i].getStats(null); + } + } + + assertEquals(nShared, stats.getNSharedCacheEnvironments()); + assertEquals(sharedTotal, localTotal); + + final long expectMax = + actualTotalCacheSize + (actualTotalCacheSize / 10); + + assertTrue( + "sharedTotal= " + sharedTotal + + " expectMax= " + expectMax, + sharedTotal < expectMax); + } +} diff --git a/test/com/sleepycat/je/incomp/EmptyBINTest.java b/test/com/sleepycat/je/incomp/EmptyBINTest.java new file mode 100644 index 0000000..b4ac022 --- /dev/null +++ b/test/com/sleepycat/je/incomp/EmptyBINTest.java @@ -0,0 +1,449 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.incomp; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that searches and cursor traversals execute correctly in the face of + * a BIN with 0 entries, and with tree pruning at key points. + */ +@RunWith(Parameterized.class) +public class EmptyBINTest extends TestBase { + private static final boolean DEBUG = false; + + private static final byte DEFAULT_VAL = 100; + private final File envHome; + private Environment env; + private Database db; + + private final boolean useDups; + private final boolean doPruningAtCursorLevel; + private final boolean doPruningAtTreeLevel; + + /* + * Run all tests in four combinations, using dups, and invoking bin + * pruning. + */ + @Parameters + public static List genParams() { + List list = new ArrayList(); + boolean[] combo = new boolean[] {true, false}; + + for (boolean dup : combo) { + for (boolean pruneCursor : combo) { + for (boolean pruneTree : combo) { + if (!(pruneCursor && pruneTree)) { + list.add(new Object[] {dup, pruneCursor, pruneTree}); + } + } + } + } + return list; + } + + public EmptyBINTest(boolean dup, boolean pruneCursor, boolean pruneTree) { + this.useDups = dup; + this.doPruningAtCursorLevel = pruneCursor; + this.doPruningAtTreeLevel = pruneTree; + if (DEBUG) { + System.out.println("useDups=" + useDups + + " doPruningAtCursorLevel=" + + doPruningAtCursorLevel + + " doPruningAtTreeLevel=" + + doPruningAtTreeLevel); + } + this.envHome = SharedTestUtils.getTestDir(); + customName = (useDups ? "DUPS" : "!DUPS") + + "_" + + (doPruningAtCursorLevel ? "CURSORPRUNE" : "!CURSORPRUNE") + + "_" + + (doPruningAtTreeLevel ? "TREEPRUNE" : "!TREEPRUNE"); + } + + @After + public void tearDown() + throws Exception { + + if (db != null) { + try { + db.close(); + } catch (DatabaseException ignore) { + } + } + + if (env != null) { + try { + env.close(); + } catch (DatabaseException ignore) { + } + } + env = null; + db = null; + } + + /* Non-dupes scans across an empty BIN. */ + @Test + public void testScanFromEndOfFirstBin() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * | + * fwd scan starts --- -+ + * Fwd scan starting at 4. Expect 4, 8, 9, 10 + */ + doScanAcrossEmptyBin(true, // forward + (byte) 4, // start + new byte[] {4,8,9,10}); // expected + } + + @Test + public void testScanFromLeftSideOfEmptyBin() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * | + * scan starts -------------+ + * Fwd scan starting at 5 (deleted). Expect 8, 9, 10 + */ + doScanAcrossEmptyBin(true, // forward + (byte) 5, // start + new byte[] {8,9,10}); // expected + } + + @Test + public void testScanFromRightSideOfEmptyBin() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * | + * backwards scan starts ------+ + * Backwards scan starting at 7 (deleted). Expect 8,4,3,2,1,0 + */ + doScanAcrossEmptyBin(false, // backwards + (byte) 7, // start + new byte[] {8,4,3,2,1,0}); // expected + } + + @Test + public void testScanFromBeginningOfLastBin() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * | + * backwards scan starts -----------+ + */ + doScanAcrossEmptyBin(false, // backwards + (byte) 8, // start + new byte[] {8,4,3,2,1,0}); // expected vals + } + + @Test + public void testScanForward() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * Fwd scan starting with first. Expect 0, 1, 2, 4, 8, 9, 10. + */ + doScanAcrossEmptyBin(true, // forward + (byte) -1, + new byte[] {0,1,2,3,4,8,9,10}); + } + + @Test + public void testScanBackwards() + throws DatabaseException { + + /* + * Tree holds <0,1> <2,3,4> <8,9,10>. + * Bwd scan starting with last. 10 -> 0 + */ + doScanAcrossEmptyBin(false, // backwards + (byte) -1, + new byte[] {10,9,8,4,3,2,1,0}); + } + + /** + * Scan over an empty BIN that is in the middle of the tree. [#11778] + * The tree holds values from 0 - 10. Values 5, 6, 7 have been deleted. + * @param forward indicates use getNext(). + * @param startKey >= 0 indicates do getSearchKeyRange to init cursor. + * @param expectVals are the elements to expect find + */ + private void doScanAcrossEmptyBin(boolean forward, + byte startKey, + byte[] expectVals) + throws DatabaseException { + + int deleteStartVal = 5; + int deleteEndVal = 7; + openAndInitEmptyMiddleBIN(deleteStartVal, deleteEndVal); + + if (DEBUG) { + DbInternal.getDbImpl(db).getTree().dump(); + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * Position a cursor and check that we get the expected values. + */ + int cnt = 0; + Cursor cursor = db.openCursor(null, null); + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + + if (doPruningAtCursorLevel) { + cursorImpl.setTestHook(new PruningHook(env)); + } + + if (doPruningAtTreeLevel) { + DbInternal.getDbImpl(db).getTree(). + setSearchHook(new PruningHook(env)); + } + + int expectIndex = 0; + if (startKey < 0) { + if (forward) { + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getLast(key, data, null)); + } + } else { + if (useDups) { + key.setData(new byte[] {DEFAULT_VAL}); + data.setData(new byte[] {startKey}); + } else { + key.setData(new byte[] { startKey }); + } + + if ((startKey >= deleteStartVal) && + (startKey <= deleteEndVal)) { + /* Test range query. */ + if (useDups) { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBothRange(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKeyRange(key, data, null)); + } + } else { + /* Test from getSearchKey(). */ + if (useDups) { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(key, data, null)); + } else { + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, null)); + } + } + } + + OperationStatus status; + do { + cnt++; + + /* check value. */ + if (DEBUG) { + System.out.println("=>key=" + key.getData()[0] + + " data=" + data.getData()[0]); + } + if (useDups) { + assertEquals(expectVals[expectIndex++], data.getData()[0]); + } else { + assertEquals(expectVals[expectIndex++], key.getData()[0]); + } + + if (forward) { + status = cursor.getNext(key, data, null); + } else { + status = cursor.getPrev(key, data, null); + } + } while (status == OperationStatus.SUCCESS); + + assertEquals(expectVals.length, cnt); + cursor.close(); + closeEnv(); + } + + /** + * Create a tree with: + * IN + * / \ + * IN IN + * / \ / \ + * BIN1 BIN2 BIN3 BIN4 + * + * where BIN1 has values 0,1 + * BIN2 has valus 2,3,4 + * BIN3 has valus 5,6,7 + * BIN4 has valus 8,9,10 + * Depending on configuration, the entries in BIN2 or BIN3 + */ + private void openAndInitEmptyMiddleBIN(int deleteStartVal, + int deleteEndVal) + throws DatabaseException { + + openEnv(false, "4"); + DatabaseEntry data = new DatabaseEntry(); + data.setData(new byte[] {DEFAULT_VAL}); + DatabaseEntry key = new DatabaseEntry(); + key.setData(new byte[] {DEFAULT_VAL}); + + /* Create four BINs */ + OperationStatus status; + for (int i = 0; i < 11; i++) { + if (useDups) { + data = new DatabaseEntry(new byte[] { (byte) i }); + } else { + key = new DatabaseEntry(new byte[] { (byte) i }); + } + status = db.put(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* Empty out one of the middle ones. */ + if (useDups) { + Cursor cursor = db.openCursor(null, null); + data = new DatabaseEntry(new byte[] { (byte) deleteStartVal }); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(key, data, LockMode.DEFAULT)); + for (int i = deleteStartVal; i <= deleteEndVal; i++) { + assertEquals(OperationStatus.SUCCESS, + cursor.delete()); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + } + cursor.close(); + } else { + for (int i = deleteStartVal; i <= deleteEndVal; i++) { + key = new DatabaseEntry(new byte[] { (byte) i }); + status = db.delete(null, key); + assertEquals(OperationStatus.SUCCESS, status); + } + } + + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + env.checkpoint(config); + } + + /** + * Opens the environment and db. + */ + private void openEnv(boolean transactional, String nodeMax) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(transactional); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "true"); + if (nodeMax != null) { + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), nodeMax); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX_DUPTREE.getName(), nodeMax); + } + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + /* Make a db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(transactional); + dbConfig.setSortedDuplicates(useDups); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + /** + * Closes the db and environment. + */ + private void closeEnv() + throws DatabaseException { + + db.close(); + db = null; + env.close(); + env = null; + } + + private static class PruningHook implements TestHook { + Environment env; + + PruningHook(Environment env) { + this.env = env; + } + + public void doHook() { + DbInternal.getNonNullEnvImpl(env).getINCompressor(). + wakeup(); + Thread.yield(); + try { + Thread.sleep(100); + } catch (Throwable T) { + } + } + + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + + public void doIOHook() { + throw new UnsupportedOperationException(); + } + + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/test/com/sleepycat/je/incomp/INCompressorTest.java b/test/com/sleepycat/je/incomp/INCompressorTest.java new file mode 100644 index 0000000..3fe16c7 --- /dev/null +++ b/test/com/sleepycat/je/incomp/INCompressorTest.java @@ -0,0 +1,1297 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.incomp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.evictor.Evictor.EvictionSource; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Test that BIN compression occurs in the various ways it is supposed to. + *

        These are:

        + *
          + *
        • transactional and non-transactional delete,
        • + *
        • delete duplicates and non-duplicates,
        • + *
        • removal of empty sub-trees (duplicates and non-duplicates),
        • + *
        • compression of BIN for deleted DIN subtree.
        • + *
        • removal of empty BIN after deleting a DIN subtree.
        • + *
        • undo causes compression of inserted LN during abort and recovery,
        • + *
        • redo causes compression of deleted LN during recovery,
        • + *
        + * + *

        Also test that compression retries occur after we attempt to compress but + * cannot because:

        + *
          + *
        • cursors are open on the BIN when the compressor dequeues them,
        • + *
        • cursors are open when attempting to delete a sub-tree (dup and non-dup + * are two separate code paths).
        • + *
        • a deleted key is locked during compression (NOT TESTED - this is very + * difficult to reproduce),
        • + *
        + * + *

        Possible problem: When we attempt to delete a subtree because the BIN is + * empty, we give up when NodeNotEmptyException is thrown by the search. + * However, this is thrown not only when entries have been added but also when + * there are cursors on the BIN; it seems like we should retry in the latter + * case. Or is it impossible to have a cursor on an empty BIN?

        + * + *

        We do not test here the last ditch effort to compress to make room in + * IN.insertEntry1; that should never happen in theory, so I don't think it + * is worthwhile to try to reproduce it.

        + * + *

        Note that when this test is run in replicated mode, for some reason + * there are deleted FileSummaryDB LNs, causing BINs in the compressor queue, + * and this throws off the test assertions. The brute force workaround here is + * to call env.compress() one extra time in replicated mode.

        + */ +public class INCompressorTest extends DualTestCase { + + private static final CheckpointConfig forceConfig; + static { + forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + } + private final File envHome; + private Environment env; + private Database db; + private IN in; + private BIN bin; + /* Use high keys since we fill the first BIN with low keys. */ + private DatabaseEntry entry0 = new DatabaseEntry(new byte[] {0}); + private DatabaseEntry entry1 = new DatabaseEntry(new byte[] {1}); + private DatabaseEntry entry2 = new DatabaseEntry(new byte[] {2}); + private DatabaseEntry keyFound = new DatabaseEntry(); + private DatabaseEntry dataFound = new DatabaseEntry(); + + public INCompressorTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + + if (env != null) { + env.close(); + } + } + + @Test + public void testDeleteTransactional() + throws DatabaseException { + + /* Transactional no-dups, 2 keys. */ + openAndInit(true, false); + OperationStatus status; + + /* Cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Closing the cursor without commit does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Commit without calling compress does not compress. */ + txn.commit(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 1, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testDeleteTransactionalWithBinDeltas() + throws DatabaseException { + + /* Transactional no-dups, 2 keys, binDeltas. */ + openAndInit(true, false, true); + OperationStatus status; + + /* Cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + if (isReplicatedTest(getClass())) { + env.compress(); + } + checkINCompQueueSize(0); + + /* Compression does not occur when a cursor or txn is open. */ + status = cursor.delete(); + checkINCompQueueSize(0); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + checkINCompQueueSize(0); + + /* Even with txn closed, compression is not queued. */ + txn.commit(); + checkBinEntriesAndCursors(bin, 2, 0); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + checkINCompQueueSize(0); + + /* Lazy compression finally occurs during a full sync (no delta). */ + bin.setProhibitNextDelta(true); + env.sync(); + checkBinEntriesAndCursors(bin, 1, 0); + checkINCompQueueSize(0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + /** + * Ensures that deleted slots are re-inserted into a BIN when applying a + * delta. [#20737] + */ + @Test + public void testDeleteEvictFetchWithBinDeltas() + throws DatabaseException { + + /* Transactional no-dups, 2 keys, binDeltas. */ + openAndInit(true, false, true); + OperationStatus status; + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + if (isReplicatedTest(getClass())) { + env.compress(); + } + checkINCompQueueSize(0); + + /* Delete and hold lock with txn. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + status = cursor.putNoOverwrite(entry2, entry0); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + cursor.close(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Evict BIN which should log delta. */ + bin.latch(CacheMode.UNCHANGED); + DbInternal.getNonNullEnvImpl(env).getEvictor().doTestEvict + (bin, EvictionSource.CACHEMODE); + + /* Fetch BIN which should not delete slot when applying delta. */ + cursor = db.openCursor(txn, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(entry0, keyFound); + cursor.close(); + initInternalNodes(); + checkBinEntriesAndCursors(bin, 3, 0); + + txn.commit(); + + closeEnv(); + } + + @Test + public void testDeleteNonTransactional() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 1, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testDeleteNonTransactionalWithBinDeltas() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys, binDeltas. */ + openAndInit(false, false, true); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + checkINCompQueueSize(0); + + /* Compression does not occur when a cursor is open. */ + status = cursor.delete(); + checkINCompQueueSize(0); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + checkINCompQueueSize(0); + + /* Even with cursor closed, compression is not queued. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 2, 0); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + checkINCompQueueSize(0); + + /* Lazy compression finally occurs during a full sync (no delta). */ + bin.setProhibitNextDelta(true); + env.sync(); + checkBinEntriesAndCursors(bin, 1, 0); + checkINCompQueueSize(0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testDeleteDuplicate() + throws DatabaseException { + + /* Non-transactional dups, 3 two-part keys. */ + openAndInit(false, true); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + checkBinEntriesAndCursors(bin, 2, 0); + + closeEnv(); + } + + @Test + public void testRemoveEmptyBIN() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 0, 0); + + /* BIN is empty so parent entry should be gone also. */ + assertEquals(1, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testRemoveEmptyBINWithBinDeltas() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys, binDeltas. */ + openAndInit(false, false, true); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + checkINCompQueueSize(0); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + checkINCompQueueSize(0); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + checkINCompQueueSize(0); + + /* Move to next. */ + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + + /* + * A second deletion will be queued, since we should not log a delta. + * But it is not queued until cursor moves/closes, i.e., releases its + * locks. The same thing would apply to a txn commit. + */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + checkINCompQueueSize(0); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + checkINCompQueueSize(0); + + /* Closing the cursor will queue the compression. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 2, 0); + checkINCompQueueSize(1); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 0, 0); + checkINCompQueueSize(0); + + /* BIN is empty so parent entry should be gone also. */ + assertEquals(1, in.getNEntries()); + + closeEnv(); + } + + /** + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testRemoveEmptyDBIN() + throws DatabaseException { + + /* Non-transactional dups, 3 two-part keys. */ + openAndInit(false, true); + OperationStatus status; + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 1, 0); + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + /** + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testRemoveEmptyDBINandBIN() + throws DatabaseException { + + /* Non-transactional dups, 3 two-part keys. */ + openAndInit(false, true); + OperationStatus status; + + /* Delete key 1, cursor appears on BIN, no compression yet. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getSearchKey(entry1, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Move cursor to 1st dup, cursor moves to BIN, no compresion yet. */ + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Delete the duplicates for key 0, no compression yet. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Finally compress can compress. */ + env.compress(); + + checkBinEntriesAndCursors(bin, 0, 0); + checkBinEntriesAndCursors(bin, 0, 0); + + /* BIN is empty so parent entry should be gone also. */ + assertEquals(1, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testAbortInsert() + throws DatabaseException { + + /* Transactional no-dups, 2 keys. */ + openAndInit(true, false); + + /* Add key 2, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry2, entry0); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Abort without calling compress does not compress. */ + txn.abort(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testAbortInsertWithBinDeltas() + throws DatabaseException { + + /* Transactional no-dups, 2 keys, binDeltas. */ + openAndInit(true, false, true); + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + if (isReplicatedTest(getClass())) { + env.compress(); + } + checkINCompQueueSize(0); + + /* Add key 2, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry2, entry0); + checkBinEntriesAndCursors(bin, 3, 1); + checkINCompQueueSize(0); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + + /* + * Undo will not queue compression because BIN-delta should be logged. + */ + txn.abort(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + + /* Lazy compression finally occurs during a full sync (no delta). */ + bin.setProhibitNextDelta(true); + env.sync(); + checkBinEntriesAndCursors(bin, 2, 0); + checkINCompQueueSize(0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + /** + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testAbortInsertDuplicate() + throws DatabaseException { + + /* Transactional dups, 3 two-part keys. */ + openAndInit(true, true); + + /* Add datum 2 for key 0, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry0, entry2); + checkBinEntriesAndCursors(bin, 4, 1); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 4, 0); + + /* Abort without calling compress does not compress. */ + txn.abort(); + checkBinEntriesAndCursors(bin, 4, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testRollBackInsert() + throws DatabaseException { + + /* Transactional no-dups, 2 keys. */ + openAndInit(true, false); + + /* Add key 2, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry2, entry0); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Checkpoint to preserve internal nodes through recovery. */ + env.checkpoint(forceConfig); + + /* Abort without calling compress does not compress. */ + txn.abort(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* + * Shutdown and reopen to run recovery. The checkpoint will compress. + */ + db.close(); + closeNoCheckpoint(env); + env = null; + openEnv(true, false); + initInternalNodes(); + + /* + * In replicated tests, we expect 64 BINs. In non-replicated tests, + * there should be 2. + */ + if (isReplicatedTest(getClass())) { + checkBinEntriesAndCursors(bin, 64, 0); + } else { + checkBinEntriesAndCursors(bin, 3, 0); + } + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + + closeEnv(); + } + + @Test + public void testRollBackInsertWithBinDeltas() + throws DatabaseException { + + /* Transactional no-dups, 2 keys, binDeltas. */ + openAndInit(true, false, true); + + /* + * Sync so that next operation dirties one slot only, which should be + * logged as a delta. + */ + env.sync(); + if (isReplicatedTest(getClass())) { + env.compress(); + } + checkINCompQueueSize(0); + + /* Add key 2, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry2, entry0); + checkBinEntriesAndCursors(bin, 3, 1); + checkINCompQueueSize(0); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + + /* Checkpoint to preserve internal nodes through recovery. */ + env.checkpoint(forceConfig); + + /* + * Undo will not queue compression because BIN-delta should be logged. + */ + txn.abort(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + + /* + * Shutdown and reopen to run recovery. The checkpoint will not + * compress because deltas are logged. + */ + db.close(); + closeNoCheckpoint(env); + env = null; + openEnv(true, false, true); + initInternalNodes(); + + if (isReplicatedTest(getClass())) { + checkBinEntriesAndCursors(bin, 64, 0); + } else { + checkBinEntriesAndCursors(bin, 3, 0); + checkINCompQueueSize(0); + } + + /* Lazy compression finally occurs during a full sync (no delta). */ + bin.setDirty(true); + bin.setProhibitNextDelta(true); + env.sync(); + checkBinEntriesAndCursors(bin, 2, 0); + if (!isReplicatedTest(getClass())) { + checkINCompQueueSize(0); + } + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + /** + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testRollBackInsertDuplicate() + throws DatabaseException { + + /* Transactional dups, 3 two-keys. */ + openAndInit(true, true); + + /* Add datum 2 for key 0, cursor appears on BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + cursor.put(entry0, entry2); + checkBinEntriesAndCursors(bin, 4, 1); + + /* Closing the cursor without abort does not compress. */ + cursor.close(); + env.compress(); + checkBinEntriesAndCursors(bin, 4, 0); + + /* Checkpoint to preserve internal nodes through recovery. */ + env.checkpoint(forceConfig); + + /* Abort without calling compress does not compress. */ + txn.abort(); + checkBinEntriesAndCursors(bin, 4, 0); + + /* + * Shutdown and reopen to run recovery. The checkpoint will not + * compress because deltas are logged. + */ + db.close(); + closeNoCheckpoint(env); + env = null; + openEnv(true, true); + initInternalNodes(); + + /* + * In replicated tests, we expect 64 BINs. In non-replicated tests, + * there should be 2. + */ + if (isReplicatedTest(getClass())) { + checkBinEntriesAndCursors(bin, 64, 0); + } else { + checkBinEntriesAndCursors(bin, 4, 0); + } + + /* Lazy compression finally occurs during a full sync (no delta). */ + bin.setDirty(true); + bin.setProhibitNextDelta(true); + env.sync(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + @Test + public void testRollForwardDelete() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + OperationStatus status; + + /* Checkpoint to preserve internal nodes through recovery. */ + env.checkpoint(forceConfig); + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 2, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* + * Shutdown and reopen to run recovery. The checkpoint will compress. + */ + db.close(); + closeNoCheckpoint(env); + openEnv(false, false); + initInternalNodes(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 1, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + + closeEnv(); + } + + /** + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testRollForwardDeleteDuplicate() + throws DatabaseException { + + /* Non-transactional dups, 3 two-part keys. */ + openAndInit(false, true); + OperationStatus status; + + /* Checkpoint to preserve internal nodes through recovery. */ + env.checkpoint(forceConfig); + + /* Cursor appears on BIN. */ + Cursor cursor = db.openCursor(null, null); + status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Delete without closing the cursor does not compress. */ + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + checkBinEntriesAndCursors(bin, 3, 1); + + /* Closing the cursor without calling compress does not compress. */ + cursor.close(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* + * Shutdown and reopen to run recovery. The checkpoint will compress. + */ + db.close(); + closeNoCheckpoint(env); + openEnv(false, true); + initInternalNodes(); + checkBinEntriesAndCursors(bin, 3, 0); + + /* Finally compress can compress. */ + env.compress(); + checkBinEntriesAndCursors(bin, 2, 0); + + /* Should be no change in parent nodes. */ + assertEquals(2, in.getNEntries()); + checkBinEntriesAndCursors(bin, 2, 0); + + closeEnv(); + } + + /** + * Test that we can handle cases where lazy compression runs first, but the + * daemon handles pruning. Testing against BINs. + */ + @Test + public void testLazyPruning() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + + deleteAndLazyCompress(false); + + /* Now compress, empty BIN should disappear. */ + env.compress(); + checkINCompQueueSize(0); + assertEquals(1, in.getNEntries()); + + closeEnv(); + } + + /** + * Test that we can handle cases where lazy compression runs first, but the + * daemon handles pruning. Testing against DBINs. [#11778] + * DBINs are no longer used, but this test is retained for good measure. + */ + @Test + public void testLazyPruningDups() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, true); + + deleteAndLazyCompress(true); + + /* Now compress, empty BIN should disappear. */ + env.compress(); + /* Compress again. Empty BIN should disappear. */ + env.compress(); + checkINCompQueueSize(0); + assertEquals(1, in.getNEntries()); + + closeEnv(); + } + + /** + * Scan over an empty DBIN. [#11778] + * + * WARNING: This test no longer tests the situation it originally intended, + * since DBINs and DBINs are obsolete, but it is left intact for posterity. + */ + @Test + public void testEmptyInitialDBINScan() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, true); + + deleteAndLazyCompress(true); + + /* + * Have IN with two entries, first entry is BIN with 1 entry. That + * entry is DIN with 1 entry. That entry is a DBIN with 0 entries. + * Position the cursor at the first entry so that we move over that + * zero-entry DBIN. + */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(keyFound.getData()[0] == 64); + cursor.close(); + closeEnv(); + } + + /** + * Scan over an empty BIN. This looks very similar to + * com.sleepycat.je.test.SR11297Test. [#11778] + */ + @Test + public void testEmptyInitialBINScan() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + + deleteAndLazyCompress(false); + + /* + * Have IN with two entries, first entry is BIN with 0 entries. + * Position the cursor at the first entry so that we move over that + * zero-entry BIN. + */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + assertTrue(keyFound.getData()[0] == 64); + cursor.close(); + closeEnv(); + } + + /** + * Test that we can handle cases where lazy compression runs first, but the + * daemon handles pruning. + */ + @Test + public void testNodeNotEmpty() + throws DatabaseException { + + /* Non-transactional no-dups, 2 keys. */ + openAndInit(false, false); + + deleteAndLazyCompress(false); + + /* + * We now have an entry on the compressor queue, but let's re-insert a + * value to make pruning hit the NodeNotEmptyException case. + */ + assertEquals(OperationStatus.SUCCESS, db.put(null, entry0, entry0)); + checkBinEntriesAndCursors(bin, 1, 0); + + env.compress(); + assertEquals(2, in.getNEntries()); + checkINCompQueueSize(0); + + closeEnv(); + } + + /* Todo: Check cursor movement across an empty bin. */ + + /* Delete all records from the first bin and invoke lazy compression. */ + private void deleteAndLazyCompress(boolean doDups) + throws DatabaseException { + + /* Position the cursor at the first BIN and delete both keys. */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getFirst(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + checkBinEntriesAndCursors(bin, doDups ? 3 : 2, 1); + + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + if (doDups) { + status = cursor.getNext(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + } + cursor.close(); + + /* + * Do lazy compression, leaving behind an empty BIN. + */ + checkINCompQueueSize(1); + env.checkpoint(forceConfig); + checkBinEntriesAndCursors(bin, 0, 0); + + /* BIN is empty but tree pruning hasn't happened. */ + assertEquals(2, in.getNEntries()); + checkINCompQueueSize(1); + } + + /** + * Checks for expected entry and cursor counts on the given BIN. + */ + private void checkBinEntriesAndCursors(BIN checkBin, + int nEntries, + int nCursors) { + assertEquals("nEntries", nEntries, checkBin.getNEntries()); + assertEquals("nCursors", nCursors, checkBin.nCursors()); + } + + /** + * Check expected size of the INCompressor queue. + */ + private void checkINCompQueueSize(int expected) { + assertEquals(expected, + DbInternal.getNonNullEnvImpl(env).getINCompressorQueueSize()); + } + + private void openAndInit(boolean transactional, boolean dups) { + openAndInit(transactional, dups, false /*binDeltas*/); + } + + /** + * Opens the environment and db and writes 2 records (3 if dups are used). + * + *

        Without dups: {0,0}, {1,0}. This gives two LNs in the BIN.

        + * + *

        With dups: {0,0}, {0,1}, {1,0}. This gives three LNs in the BIN.

        + */ + private void openAndInit(boolean transactional, + boolean dups, + boolean binDeltas) + throws DatabaseException { + + openEnv(transactional, dups, binDeltas); + + /* + * We need at least 2 BINs, otherwise empty BINs won't be deleted. So + * we add keys until the BIN splits, then delete everything in the + * first BIN except the first two keys. Those are the keys we'll use + * for testing, and are key values 0 and 1. + */ + BIN firstBin = null; + OperationStatus status; + + for (int i = 0;; i += 1) { + DatabaseEntry key = new DatabaseEntry(new byte[] { (byte) i }); + status = db.put(null, key, entry0); + assertEquals(OperationStatus.SUCCESS, status); + + Cursor cursor = db.openCursor(null, null); + + status = cursor.getLast(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + + BIN b = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + if (firstBin == null) { + firstBin = b; + } else if (firstBin != b) { + /* Now delete all but the first two keys in the first BIN. */ + while (firstBin.getNEntries() > 2) { + cursor = db.openCursor(null, null); + keyFound.setData(entry2.getData()); + status = + cursor.getSearchKeyRange(keyFound, dataFound, null); + assertEquals(OperationStatus.SUCCESS, status); + cursor.close(); + status = db.delete(null, keyFound); + assertEquals(OperationStatus.SUCCESS, status); + env.compress(); + } + break; + } + } + + /* Write dup records. */ + if (dups) { + status = db.put(null, entry0, entry1); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* Set in, bin. */ + initInternalNodes(); + assertSame(bin, firstBin); + + /* Check that all tree nodes are populated. */ + assertEquals(2, in.getNEntries()); + checkBinEntriesAndCursors(bin, dups ? 3 : 2, 0); + } + + /** + * Initialize IN, BIN. + */ + private void initInternalNodes() + throws DatabaseException { + + /* Find the BIN. */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = + cursor.getFirst(keyFound, dataFound, LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + bin = DbInternal.getCursorImpl(cursor).getBIN(); + cursor.close(); + + /* Find the IN parent of the BIN. */ + bin.latch(); + in = DbInternal.getDbImpl(db).getTree().getParentINForChildIN( + bin, false, /*useTargetLevel*/ + true, /*doFetch*/ CacheMode.DEFAULT).parent; + assertNotNull(in); + in.releaseLatch(); + } + + private void openEnv(boolean transactional, boolean dups) { + openEnv(transactional, dups, false /*binDeltas*/); + } + + /** + * Opens the environment and db. + */ + private void openEnv(boolean transactional, + boolean dups, + boolean binDeltas) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(transactional); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + if (binDeltas) { + /* Enable deltas when only 1/2 records are modified. */ + envConfig.setConfigParam + (EnvironmentConfig.TREE_BIN_DELTA, "60"); + } + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + /* Make a db and open it. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(transactional); + dbConfig.setSortedDuplicates(dups); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + /** + * Closes the db and environment. + */ + private void closeEnv() + throws DatabaseException { + + db.close(); + db = null; + close(env); + env = null; + } +} diff --git a/test/com/sleepycat/je/je.properties b/test/com/sleepycat/je/je.properties new file mode 100644 index 0000000..cc52c90 --- /dev/null +++ b/test/com/sleepycat/je/je.properties @@ -0,0 +1,4 @@ +je.env.recovery = false +je.log.totalBufferBytes=7001 +je.log.numBuffers=200 +je.txn.durability=no_sync,no_sync,none \ No newline at end of file diff --git a/test/com/sleepycat/je/jmx/JEApplicationMBean.java b/test/com/sleepycat/je/jmx/JEApplicationMBean.java new file mode 100644 index 0000000..2f214b7 --- /dev/null +++ b/test/com/sleepycat/je/jmx/JEApplicationMBean.java @@ -0,0 +1,328 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.io.File; +import java.lang.reflect.Constructor; +import java.util.List; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanConstructorInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanNotificationInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; + +/** + * JEApplicationMBean is an example of how a JE application can incorporate JE + * monitoring into its existing MBean. It may be installed as is, or used as a + * starting point for building a MBean which includes JE support. + *

        + * JE management is divided between the JEApplicationMBean class and + * JEMBeanHelper class. JEApplicationMBean contains an instance of + * JEMBeanHelper, which knows about JE attributes, operations and + * notifications. JEApplicationMBean itself has the responsibility of + * configuring, opening and closing the JE environment along with any other + * resources used by the application, and maintains a + * com.sleepycat.je.Environment handle. + *

        + * The approach taken for accessing the environment is an application specific + * choice. Some of the salient considerations are: + *

          + *
        • Applications may open one or many Environment objects per process + * against a given environment.
        • + * + *
        • All Environment handles reference the same underlying JE environment + * implementation object.
        • + + *
        • The first Environment object instantiated in the process does the real + * work of configuring and opening the environment. Follow-on instantiations of + * Environment merely increment a reference count. Likewise, + * Environment.close() only does real work when it's called by the last + * Environment object in the process.
        • + *
        + *

        + * Another MBean approach for environment access can be seen in + * com.sleepycat.je.jmx.JEMonitor. That MBean does not take responsibility for + * opening and closing environments, and can only operate against already-open + * environments. + */ + +public class JEApplicationMBean implements DynamicMBean { + + private static final String DESCRIPTION = + "A MBean for an application which uses JE. Provides open and close " + + "operations which configure and open a JE environment as part of the "+ + "applications's resources. Also supports general JE monitoring."; + + private MBeanInfo mbeanInfo; // this MBean's visible interface. + private JEMBeanHelper jeHelper; // gets JE management interface + private Environment targetEnv; // saved environment handle + + /** + * This MBean provides an open operation to open the JE environment. + */ + public static final String OP_OPEN = "openJE"; + + /** + * This MBean provides a close operation to release the JE environment. + * Note that environments must be closed to release resources. + */ + public static final String OP_CLOSE = "closeJE"; + + /** + * Instantiate a JEApplicationMBean + * + * @param environmentHome home directory of the target JE environment. + */ + public JEApplicationMBean(String environmentHome) { + + File environmentDirectory = new File(environmentHome); + jeHelper = new JEMBeanHelper(environmentDirectory, true); + resetMBeanInfo(); + } + + /** + * @see DynamicMBean#getAttribute + */ + public Object getAttribute(String attributeName) + throws AttributeNotFoundException, + MBeanException { + + return jeHelper.getAttribute(targetEnv, attributeName); + } + + /** + * @see DynamicMBean#setAttribute + */ + public void setAttribute(Attribute attribute) + throws AttributeNotFoundException, + InvalidAttributeValueException { + + jeHelper.setAttribute(targetEnv, attribute); + } + + /** + * @see DynamicMBean#getAttributes + */ + public AttributeList getAttributes(String[] attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("Attributes cannot be null"); + } + + /* Get each requested attribute. */ + AttributeList results = new AttributeList(); + for (int i = 0; i < attributes.length; i++) { + try { + String name = attributes[i]; + Object value = jeHelper.getAttribute(targetEnv, name); + results.add(new Attribute(name, value)); + } catch (Exception e) { + e.printStackTrace(); + } + } + return results; + } + + /** + * @see DynamicMBean#setAttributes + */ + public AttributeList setAttributes(AttributeList attributes) { + + /* Sanity checking. */ + if (attributes == null) { + throw new IllegalArgumentException("attribute list can't be null"); + } + + /* Set each attribute specified. */ + AttributeList results = new AttributeList(); + for (int i = 0; i < attributes.size(); i++) { + Attribute attr = (Attribute) attributes.get(i); + try { + /* Set new value. */ + jeHelper.setAttribute(targetEnv, attr); + + /* + * Add the name and new value to the result list. Be sure + * to ask the MBean for the new value, rather than simply + * using attr.getValue(), because the new value may not + * be same if it is modified according to the JE + * implementation. + */ + String name = attr.getName(); + Object newValue = jeHelper.getAttribute(targetEnv, name); + results.add(new Attribute(name, newValue)); + } catch (Exception e) { + e.printStackTrace(); + } + } + return results; + } + + /** + * @see DynamicMBean#invoke + */ + public Object invoke(String actionName, + Object[] params, + String[] signature) + throws MBeanException { + + Object result = null; + + if (actionName == null) { + throw new IllegalArgumentException("actionName cannot be null"); + } + + if (actionName.equals(OP_OPEN)) { + openEnvironment(); + return null; + } else if (actionName.equals(OP_CLOSE)) { + closeEnvironment(); + return null; + } else { + result = jeHelper.invoke(targetEnv, actionName, params, signature); + } + + return result; + } + + /** + * @see DynamicMBean#getMBeanInfo + */ + public MBeanInfo getMBeanInfo() { + return mbeanInfo; + } + + /** + * Create the available management interface for this environment. + * The attributes and operations available vary according to + * environment configuration. + * + */ + private synchronized void resetMBeanInfo() { + + /* + * Get JE attributes, operation and notification information + * from JEMBeanHelper. An application may choose to add functionality + * of its own when constructing the MBeanInfo. + */ + + /* Attributes. */ + List attributeList = jeHelper.getAttributeList(targetEnv); + MBeanAttributeInfo[] attributeInfo = + new MBeanAttributeInfo[attributeList.size()]; + attributeList.toArray(attributeInfo); + + /* Constructors. */ + Constructor[] constructors = this.getClass().getConstructors(); + MBeanConstructorInfo[] constructorInfo = + new MBeanConstructorInfo[constructors.length]; + for (int i = 0; i < constructors.length; i++) { + constructorInfo[i] = + new MBeanConstructorInfo(this.getClass().getName(), + constructors[i]); + } + + /* Operations. */ + + /* + * Get the list of operations available from the jeHelper. Then add + * an open and close operation. + */ + List operationList = jeHelper.getOperationList(targetEnv); + if (targetEnv == null) { + operationList.add( + new MBeanOperationInfo(OP_OPEN, + "Configure and open the JE environment.", + new MBeanParameterInfo[0], // no params + "java.lang.Boolean", + MBeanOperationInfo.ACTION_INFO)); + } else { + operationList.add( + new MBeanOperationInfo(OP_CLOSE, + "Close the JE environment.", + new MBeanParameterInfo[0], // no params + "void", + MBeanOperationInfo.ACTION_INFO)); + } + + MBeanOperationInfo[] operationInfo = + new MBeanOperationInfo[operationList.size()]; + operationList.toArray(operationInfo); + + /* Notifications. */ + MBeanNotificationInfo[] notificationInfo = + jeHelper.getNotificationInfo(targetEnv); + + /* Generate the MBean description. */ + mbeanInfo = new MBeanInfo(this.getClass().getName(), + DESCRIPTION, + attributeInfo, + constructorInfo, + operationInfo, + notificationInfo); + } + + /** + * Open a JE environment using the configuration specified through + * MBean attributes and recorded within the JEMBeanHelper. + */ + private void openEnvironment() + throws MBeanException { + + try { + if (targetEnv == null) { + /* + * The environment configuration has been set through + * mbean attributes managed by the JEMBeanHelper. + */ + targetEnv = + new Environment(jeHelper.getEnvironmentHome(), + jeHelper.getEnvironmentOpenConfig()); + resetMBeanInfo(); + } + } catch (DatabaseException e) { + throw new MBeanException(e); + } + } + + /** + * Release the environment handle contained within the MBean to properly + * release resources. + */ + private void closeEnvironment() + throws MBeanException { + + try { + if (targetEnv != null) { + targetEnv.close(); + targetEnv = null; + resetMBeanInfo(); + } + } catch (DatabaseException e) { + throw new MBeanException(e); + } + } +} diff --git a/test/com/sleepycat/je/jmx/JEDiagnosticsTest.java b/test/com/sleepycat/je/jmx/JEDiagnosticsTest.java new file mode 100644 index 0000000..7cedfd3 --- /dev/null +++ b/test/com/sleepycat/je/jmx/JEDiagnosticsTest.java @@ -0,0 +1,260 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.lang.reflect.Method; +import java.util.logging.Handler; +import java.util.logging.Level; + +import javax.management.Attribute; +import javax.management.DynamicMBean; +import javax.management.MBeanInfo; +import javax.management.MBeanOperationInfo; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @excludeDualMode + * + * Instantiate and exercise the JEDiagnostics. + */ +public class JEDiagnosticsTest extends TestBase { + + private static final boolean DEBUG = false; + private File envHome; + + public JEDiagnosticsTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Test JEDiagnostics' attribute getters. + */ + @Test + public void testGetters() + throws Throwable { + + Environment env = null; + try { + if (!this.getClass().getName().contains("rep")) { + env = openEnv(); + DynamicMBean mbean = createMBean(env); + MBeanTestUtils.validateGetters(mbean, 2, DEBUG); + env.close(); + } + + env = openEnv(); + String environmentDir = env.getHome().getPath(); + DynamicMBean mbean = createMBean(env); + MBeanTestUtils.validateGetters(mbean, 2, DEBUG); + env.close(); + + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + if (env != null) { + env.close(); + } + throw t; + } + } + + /* Create a DynamicMBean using a standalone or replicated Environment. */ + protected DynamicMBean createMBean(Environment env) { + return new JEDiagnostics(env); + } + + /** + * Test JEDiagnostics' attribute setters. + */ + @Test + public void testSetters() + throws Throwable { + + Environment env = null; + try { + env = openEnv(); + String environmentDir = env.getHome().getPath(); + + DynamicMBean mbean = createMBean(env); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Class envImplClass = envImpl.getClass(); + + /* Test setting ConsoleHandler's level. */ + Method getConsoleHandler = + envImplClass.getMethod("getConsoleHandler", (Class[]) null); + checkAttribute(env, + mbean, + getConsoleHandler, + "consoleHandlerLevel", + "OFF"); + + /* Test setting FileHandler's level. */ + Method getFileHandler = + envImplClass.getMethod("getFileHandler", (Class[]) null); + checkAttribute(env, + mbean, + getFileHandler, + "fileHandlerLevel", + "OFF"); + + env.close(); + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + + if (env != null) { + env.close(); + } + + throw t; + } + } + + /** + * Test JEDiagnostics' operations invocation. + */ + @Test + public void testOperations() + throws Throwable { + + Environment env = null; + try { + env = openEnv(); + String environmentDir = env.getHome().getPath(); + DynamicMBean mbean = createMBean(env); + + /* + * RepJEDiagnostics has three operations while JEDiagnostics is + * lack of getRepStats operation. + */ + validateOperations(mbean, env, 1); + env.close(); + + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + + if (env != null) { + env.close(); + } + + throw t; + } + } + + /* Verify the correction of JEDiagnostics' operations. */ + private void validateOperations(DynamicMBean mbean, + Environment env, + int numExpectedOperations) + throws Throwable { + + MBeanTestUtils.checkOpNum(mbean, numExpectedOperations, DEBUG); + + MBeanInfo info = mbean.getMBeanInfo(); + MBeanOperationInfo[] ops = info.getOperations(); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + for (int i = 0; i < ops.length; i++) { + String opName = ops[i].getName(); + if (opName.equals("resetLoggerLevel")) { + + /* + * If this method is invoked by RepJEDiagnostics, the logger + * name should contain RepImpl, not EnvironmentImpl. + */ + Object[] params = new Object[] {"EnvironmentImpl", "OFF"}; + if (this.getClass().getName().contains("rep")) { + params = new Object[] {"RepImpl", "OFF"}; + } + Object result = mbean.invoke + (opName, params, + new String[] {"java.lang.String", "java.lang.String"}); + assertEquals(envImpl.getLogger().getLevel(), Level.OFF); + assertTrue(result == null); + } else { + + /* + * Check the correction of the getRepStats operation that only + * in RepJEDiagnostics. + */ + if (this.getClass().getName().contains("rep")) { + Object result = mbean.invoke(opName, null, null); + assertTrue(result instanceof String); + MBeanTestUtils.checkObjectType + ("Operation", opName, ops[i].getReturnType(), result); + } + } + } + } + + /* Test this MBean's serialization. */ + @Test + public void testSerializable() + throws Throwable { + + Environment env = openEnv(); + + DynamicMBean mbean = createMBean(env); + MBeanTestUtils.doTestSerializable(mbean); + + env.close(); + } + + /* Check this MBean's attributes. */ + private void checkAttribute(Environment env, + DynamicMBean mbean, + Method getMethod, + String attributeName, + Object newValue) + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Object result = getMethod.invoke(envImpl, (Object[]) null); + assertTrue(!result.toString().equals(newValue.toString())); + + mbean.setAttribute(new Attribute(attributeName, newValue)); + + envImpl = DbInternal.getNonNullEnvImpl(env); + Handler handler = (Handler) getMethod.invoke(envImpl, (Object[]) null); + assertEquals(newValue.toString(), handler.getLevel().toString()); + + Object mbeanNewValue = mbean.getAttribute(attributeName); + assertEquals(newValue.toString(), mbeanNewValue.toString()); + } + + /* + * Helper to open an environment. + */ + protected Environment openEnv() + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + return new Environment(envHome, envConfig); + } +} diff --git a/test/com/sleepycat/je/jmx/JEMonitorTest.java b/test/com/sleepycat/je/jmx/JEMonitorTest.java new file mode 100644 index 0000000..78265da --- /dev/null +++ b/test/com/sleepycat/je/jmx/JEMonitorTest.java @@ -0,0 +1,275 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.lang.reflect.Method; + +import javax.management.Attribute; +import javax.management.DynamicMBean; +import javax.management.MBeanException; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @excludeDualMode + * + * Instantiate and exercise the JEMonitor. + */ +public class JEMonitorTest extends TestBase { + + private static final boolean DEBUG = false; + private File envHome; + + public JEMonitorTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Test JEMonitor's attributes getters. + */ + @Test + public void testGetters() + throws Throwable { + + Environment env = null; + try { + env = openEnv(true); + String environmentDir = env.getHome().getPath(); + DynamicMBean mbean = createMBean(env); + MBeanTestUtils.validateGetters(mbean, 8, DEBUG); // see the change. + env.close(); + + /* + * Replicated Environment must be transactional, so RepJEMonitor + * can't open an Environment with non-transactional. + */ + if (!this.getClass().getName().contains("rep")) { + env = openEnv(false); + mbean = createMBean(env); + MBeanTestUtils.validateGetters(mbean, 6, DEBUG); + env.close(); + } + + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + if (env != null) { + env.close(); + } + + throw t; + } + } + + /* + * Create a DynamicMBean with the assigned standalone or replicated + * Environment. + */ + protected DynamicMBean createMBean(Environment env) { + return new JEMonitor(env); + } + + /** + * Test JEMonitor's attributes setters. + */ + @Test + public void testSetters() + throws Throwable { + + Environment env = null; + try { + /* Mimic an application by opening an environment. */ + env = openEnv(true); + String environmentDir = env.getHome().getPath(); + + /* Open an Mbean and set the Environment home. */ + DynamicMBean mbean = createMBean(env); + + /* + * Try setting different attributes. Check against the + * initial value, and the value after setting. + */ + EnvironmentConfig config = env.getConfig(); + Class configClass = config.getClass(); + + Method getCacheSize = + configClass.getMethod("getCacheSize", (Class[]) null); + checkAttribute(env, + mbean, + getCacheSize, + JEMBeanHelper.ATT_CACHE_SIZE, + new Long(100000)); // new value + + Method getCachePercent = + configClass.getMethod("getCachePercent", (Class[]) null); + checkAttribute(env, + mbean, + getCachePercent, + JEMBeanHelper.ATT_CACHE_PERCENT, + new Integer(10)); + env.close(); + + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + + if (env != null) { + env.close(); + } + + throw t; + } + } + + /** + * Test correction of JEMonitor's operations invocation. + */ + @Test + public void testOperations() + throws Throwable { + + Environment env = null; + try { + env = openEnv(true); + String environmentDir = env.getHome().getPath(); + DynamicMBean mbean = createMBean(env); + int opNum = 0; + if (!this.getClass().getName().contains("rep")) { + opNum = 8; + } else { + opNum = 10; + } + MBeanTestUtils.validateMBeanOperations + (mbean, opNum, true, null, null, DEBUG); + + /* + * Getting database stats against a non-existing db ought to + * throw an exception. + */ + try { + MBeanTestUtils.validateMBeanOperations + (mbean, opNum, true, "bozo", null, DEBUG); + fail("Should not have run stats on a non-existent db"); + } catch (MBeanException expected) { + // ignore + } + + /* + * Make sure the vanilla db open within the helper can open + * a db created with a non-default configuration. + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = env.openDatabase(null, "bozo", dbConfig); + + /* insert a record. */ + DatabaseEntry entry = new DatabaseEntry(); + IntegerBinding.intToEntry(1, entry); + db.put(null, entry, entry); + + MBeanTestUtils.validateMBeanOperations + (mbean, opNum, true, "bozo", new String[] {"bozo"}, DEBUG); + db.close(); + env.close(); + + /* + * Replicated Environment must be transactional, so can't test + * RepJEMonitor with opening a non-transactional Environment. + */ + if (!this.getClass().getName().contains("rep")) { + env = openEnv(false); + mbean = createMBean(env); + MBeanTestUtils.validateMBeanOperations + (mbean, 7, true, null, null, DEBUG); + env.close(); + } + + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + if (env != null) { + env.close(); + } + throw t; + } + } + + /* Check the correction of JEMonitor's attributes. */ + private void checkAttribute(Environment env, + DynamicMBean mbean, + Method configMethod, + String attributeName, + Object newValue) + throws Exception { + + /* Check starting value. */ + EnvironmentConfig config = env.getConfig(); + Object result = configMethod.invoke(config, (Object[]) null); + assertTrue(!result.toString().equals(newValue.toString())); + + /* set through mbean */ + mbean.setAttribute(new Attribute(attributeName, newValue)); + + /* check present environment config. */ + config = env.getConfig(); + assertEquals(newValue.toString(), + configMethod.invoke(config, (Object[]) null).toString()); + + /* check through mbean. */ + Object mbeanNewValue = mbean.getAttribute(attributeName); + assertEquals(newValue.toString(), mbeanNewValue.toString()); + } + + /** + * Checks that all parameters and return values are Serializable to + * support JMX over RMI. + */ + @Test + public void testSerializable() + throws Exception { + + /* Create and close the environment. */ + Environment env = openEnv(true); + + /* Test without an open environment. */ + DynamicMBean mbean = createMBean(env); + MBeanTestUtils.doTestSerializable(mbean); + + env.close(); + } + + /* + * Helper to open an environment. + */ + protected Environment openEnv(boolean openTransactionally) + throws Exception { + + return MBeanTestUtils.openTxnalEnv(openTransactionally, envHome); + } +} diff --git a/test/com/sleepycat/je/jmx/MBeanTest.java b/test/com/sleepycat/je/jmx/MBeanTest.java new file mode 100644 index 0000000..e5cf0ea --- /dev/null +++ b/test/com/sleepycat/je/jmx/MBeanTest.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.io.File; + +import javax.management.DynamicMBean; + +import junit.framework.TestCase; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.util.TestUtils; + +/** + * @excludeDualMode + * + * Instantiate and exercise the JEMBeanHelper. + */ +public class MBeanTest extends TestCase { + + private static final boolean DEBUG = false; + private File envHome; + private String environmentDir; + + public MBeanTest() { + environmentDir = System.getProperty(TestUtils.DEST_DIR); + envHome = new File(environmentDir); + } + + public void setUp() { + + TestUtils.removeLogFiles("Setup", envHome, false); + } + + public void tearDown() + throws Exception { + + TestUtils.removeLogFiles("tearDown", envHome, true); + } + + /** + * MBean which can configure and open an environment. + */ + public void testOpenableBean() + throws Throwable { + + Environment env = null; + try { + /* Environment is not open, and we can open. */ + env = MBeanTestUtils.openTxnalEnv(false, envHome); + env.close(); + + DynamicMBean mbean = new JEApplicationMBean(environmentDir); + MBeanTestUtils.validateGetters(mbean, 5, DEBUG); + MBeanTestUtils.validateMBeanOperations + (mbean, 1, false, null, null, DEBUG); + + /* Open the environment. */ + mbean.invoke(JEApplicationMBean.OP_OPEN, null, null); + + MBeanTestUtils.validateGetters(mbean, 7, DEBUG); + MBeanTestUtils.validateMBeanOperations + (mbean, 7, true, null, null, DEBUG); + + /* + * The last call to validateOperations ended up closing the + * environment. + */ + MBeanTestUtils.validateGetters(mbean, 5, DEBUG); + MBeanTestUtils.validateMBeanOperations + (mbean, 1, false, null, null, DEBUG); + + /* Should be no open handles. */ + MBeanTestUtils.checkForNoOpenHandles(environmentDir); + } catch (Throwable t) { + t.printStackTrace(); + + if (env != null) { + env.close(); + } + throw t; + } + } + + /** + * Checks that all parameters and return values are Serializable to + * support JMX over RMI. + */ + public void testSerializable() + throws Exception { + + /* Create and close the environment. */ + Environment env = MBeanTestUtils.openTxnalEnv(false, envHome); + env.close(); + + /* Test without an open environment. */ + DynamicMBean mbean = new JEApplicationMBean(environmentDir); + MBeanTestUtils.doTestSerializable(mbean); + + /* Test with an open environment. */ + mbean.invoke(JEApplicationMBean.OP_OPEN, null, null); + MBeanTestUtils.doTestSerializable(mbean); + + /* Close. */ + mbean.invoke(JEApplicationMBean.OP_CLOSE, null, null); + } +} diff --git a/test/com/sleepycat/je/jmx/MBeanTestUtils.java b/test/com/sleepycat/je/jmx/MBeanTestUtils.java new file mode 100644 index 0000000..7a54323 --- /dev/null +++ b/test/com/sleepycat/je/jmx/MBeanTestUtils.java @@ -0,0 +1,265 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.jmx; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import javax.management.DynamicMBean; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanConstructorInfo; +import javax.management.MBeanInfo; +import javax.management.MBeanNotificationInfo; +import javax.management.MBeanOperationInfo; +import javax.management.MBeanParameterInfo; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.util.TestUtils; +import junit.framework.TestCase; + +/** + * A utility class for testing JE MBeans. + */ +public class MBeanTestUtils extends TestCase { + public static void validateGetters(DynamicMBean mbean, + int numExpectedAttributes, + boolean DEBUG) + throws Throwable { + + MBeanInfo info = mbean.getMBeanInfo(); + + MBeanAttributeInfo[] attrs = info.getAttributes(); + + /* test getters. */ + int attributesWithValues = 0; + for (int i = 0; i < attrs.length; i++) { + String name = attrs[i].getName(); + Object result = mbean.getAttribute(name); + if (DEBUG) { + System.out.println("Attribute " + i + + " name=" + name + + " result=" + result); + } + if (result != null) { + attributesWithValues++; + checkObjectType + ("Attribute", name, attrs[i].getType(), result); + } + } + + assertEquals(numExpectedAttributes, attributesWithValues); + } + + /* + * Check that there are the expected number of operations. + */ + public static void checkOpNum(DynamicMBean mbean, + int numExpectedOperations, + boolean DEBUG) + throws Throwable { + + MBeanInfo info = mbean.getMBeanInfo(); + + MBeanOperationInfo[] ops = info.getOperations(); + if (DEBUG) { + for (int i = 0; i < ops.length; i++) { + System.out.println("op: " + ops[i].getName()); + } + } + assertEquals(numExpectedOperations, ops.length); + } + + /** + * Checks that all types for the given mbean are serializable. + */ + public static void doTestSerializable(DynamicMBean mbean) { + + MBeanInfo info = mbean.getMBeanInfo(); + + MBeanAttributeInfo[] attrs = info.getAttributes(); + for (int i = 0; i < attrs.length; i++) { + checkSerializable + ("Attribute", attrs[i].getName(), attrs[i].getType()); + } + + MBeanOperationInfo[] ops = info.getOperations(); + for (int i = 0; i < ops.length; i += 1) { + checkSerializable + ("Operation", + ops[i].getName() + " return type", + ops[i].getReturnType()); + MBeanParameterInfo[] params = ops[i].getSignature(); + for (int j = 0; j < params.length; j += 1) { + checkSerializable + ("Operation", + ops[i].getName() + " parameter " + j, + params[j].getType()); + } + } + + MBeanConstructorInfo[] ctors = info.getConstructors(); + for (int i = 0; i < ctors.length; i++) { + MBeanParameterInfo[] params = ctors[i].getSignature(); + for (int j = 0; j < params.length; j += 1) { + checkSerializable + ("Constructor", + ctors[i].getName() + " parameter " + j, + params[j].getType()); + } + } + + MBeanNotificationInfo[] notifs = info.getNotifications(); + for (int i = 0; i < notifs.length; i++) { + String[] types = notifs[i].getNotifTypes(); + for (int j = 0; j < types.length; j += 1) { + checkSerializable + ("Notification", notifs[i].getName(), types[j]); + } + } + } + + /** + * Checks that a given type is serializable. + */ + private static void checkSerializable(String identifier, + String name, + String type) { + + if ("void".equals(type)) { + return; + } + + String msg = identifier + ' ' + name + " is type " + type; + try { + Class cls = Class.forName(type); + if (!Serializable.class.isAssignableFrom(cls)) { + fail(msg + " -- not Serializable"); + } + } catch (Exception e) { + fail(msg + " -- " + e); + } + } + + /** + * Checks that an object (parameter or return value) is of the type + * specified in the BeanInfo. + */ + public static void checkObjectType(String identifier, + String name, + String type, + Object object) { + String msg = identifier + ' ' + name + " is type " + type; + if ("void".equals(type)) { + assertNull(msg + "-- should be null", object); + return; + } + try { + Class cls = Class.forName(type); + assertTrue + (msg + " -- object class is " + object.getClass().getName(), + cls.isAssignableFrom(object.getClass())); + } catch (Exception e) { + fail(msg + " -- " + e); + } + + /* + * The true test of serializable is to serialize. This checks the a + * elements of a list, for example. + */ + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(object); + } catch (Exception e) { + fail(msg + " -- " + e); + } + } + + public static void checkForNoOpenHandles(String environmentDir) { + assertFalse(DbEnvPool.getInstance().isOpen(new File(environmentDir))); + } + + public static Environment openTxnalEnv(boolean isTransactional, + File envHome) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(isTransactional); + + return new Environment(envHome, envConfig); + } + + /* + * Check that there are the expected number of operations. If specified, + * invoke and check the operations of JEMonitor and JEApplicationMBean. + */ + public static void validateMBeanOperations(DynamicMBean mbean, + int numExpectedOperations, + boolean tryInvoke, + String databaseName, + String[] expectedDatabases, + boolean DEBUG) + throws Throwable { + + checkOpNum(mbean, numExpectedOperations, DEBUG); + + MBeanInfo info = mbean.getMBeanInfo(); + MBeanOperationInfo[] ops = info.getOperations(); + + if (tryInvoke) { + for (int i = 0; i < ops.length; i++) { + String opName = ops[i].getName(); + + /* Try the per-database operations if specified. */ + if ((databaseName != null) && + opName.equals(JEMBeanHelper.OP_DB_STAT)) { + /* Invoke with the name of the database. */ + Object result = mbean.invoke + (opName, + new Object[] {null, null, databaseName}, + null); + assertTrue(result instanceof String); + checkObjectType + ("Operation", opName, ops[i].getReturnType(), result); + } + + if ((expectedDatabases != null) && + opName.equals(JEMBeanHelper.OP_DB_NAMES)) { + Object result = mbean.invoke(opName, null, null); + List names = (List) result; + assertTrue(Arrays.equals(expectedDatabases, + names.toArray())); + checkObjectType + ("Operation", opName, ops[i].getReturnType(), result); + } + + /* + * Also invoke all operations with null params, to sanity + * check. + */ + Object result = mbean.invoke(opName, null, null); + if (result != null) { + checkObjectType + ("Operation", opName, ops[i].getReturnType(), result); + } + } + } + } +} diff --git a/test/com/sleepycat/je/junit/JUnitMethodThread.java b/test/com/sleepycat/je/junit/JUnitMethodThread.java new file mode 100644 index 0000000..756f392 --- /dev/null +++ b/test/com/sleepycat/je/junit/JUnitMethodThread.java @@ -0,0 +1,53 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.junit; + +import java.lang.reflect.Method; + +/** + * A JUnitThread whose testBody calls a given TestCase method. + */ +public class JUnitMethodThread extends JUnitThread { + + private Object testCase; + private Method method; + private Object param; + + public JUnitMethodThread(String threadName, String methodName, + Object testCase) + throws NoSuchMethodException { + + this(threadName, methodName, testCase, null); + } + + public JUnitMethodThread(String threadName, String methodName, + Object testCase, Object param) + throws NoSuchMethodException { + + super(threadName); + this.testCase = testCase; + this.param = param; + method = testCase.getClass().getMethod(methodName, new Class[0]); + } + + public void testBody() + throws Exception { + + if (param != null) { + method.invoke(testCase, new Object[] { param }); + } else { + method.invoke(testCase, new Object[0]); + } + } +} diff --git a/test/com/sleepycat/je/junit/JUnitProcessThread.java b/test/com/sleepycat/je/junit/JUnitProcessThread.java new file mode 100644 index 0000000..f40ab0e --- /dev/null +++ b/test/com/sleepycat/je/junit/JUnitProcessThread.java @@ -0,0 +1,157 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.junit; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.sleepycat.je.utilint.JVMSystemUtils; + +/** + * [#16348] JE file handle leak when multi-process writing a same environment. + * + * Write this thread for creating multi-process test, you can generate + * a JUnitProcessThread, each thread would create a process for you, just need + * to assign the command line parameters to the thread. + */ +public class JUnitProcessThread extends JUnitThread { + private final List cmd; + + /* + * Variable record the return value of the process. If 0, it means the + * process finishes successfully, if not, the process fails. + */ + private int exitVal = 0; + + /* If true, don't print out the standard output in this process. */ + private final boolean suppressOutput; + + /** + * Pass the process name and command line to the constructor. + */ + public JUnitProcessThread(String threadName, String[] parameters) { + this(threadName, 0, null, parameters, false); + } + + public JUnitProcessThread(String threadName, + int heapMB, + String[] jvmParams, + String[] parameters) { + this(threadName, heapMB, jvmParams, parameters, false); + } + + public JUnitProcessThread(String threadName, + int heapMB, + String[] jvmParams, + String[] parameters, + boolean suppressOutput) { + super(threadName); + + this.suppressOutput = suppressOutput; + + if (jvmParams == null) { + jvmParams = new String[0]; + } + + cmd = new ArrayList<>(); + + cmd.add(System.getProperty("java.home") + + System.getProperty("file.separator") + "bin" + + System.getProperty("file.separator") + "java" + + (System.getProperty("path.separator").equals(":") ? "" : "w.exe")); + + if (heapMB != 0) { + heapMB = Math.max(heapMB, JVMSystemUtils.MIN_HEAP_MB); + cmd.add("-Xmx" + heapMB + "m"); + } + + JVMSystemUtils.addZingJVMArgs(cmd); + + cmd.addAll(Arrays.asList(jvmParams)); + + cmd.add("-cp"); + cmd.add( + "." + System.getProperty("path.separator") + + System.getProperty("java.class.path")); + + cmd.addAll(Arrays.asList(parameters)); + } + + /** Generate a process for this thread.*/ + public void testBody() { + try { + Process proc = new ProcessBuilder(cmd).start(); + + InputStream error = proc.getErrorStream(); + InputStream output = proc.getInputStream(); + + Thread err = new Thread(new OutErrReader(error)); + err.start(); + + if (!suppressOutput) { + Thread out = new Thread(new OutErrReader(output)); + out.start(); + } + + exitVal = proc.waitFor(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** Return the return value of the created process to main thread. */ + public int getExitVal() { + return exitVal; + } + + /** + * A class prints out the output information of writing serialized files. + */ + public static class OutErrReader implements Runnable { + final InputStream is; + final boolean ignoreOutput; + + OutErrReader(InputStream is) { + this.is = is; + this.ignoreOutput = false; + } + + public OutErrReader(InputStream is, boolean ignoreOutput) { + this.is = is; + this.ignoreOutput = ignoreOutput; + } + + public void run() { + try { + BufferedReader in = + new BufferedReader(new InputStreamReader(is)); + String temp; + while((temp = in.readLine()) != null) { + if (!ignoreOutput) { + System.err.println(temp); + } + } + is.close(); + } catch (Exception e) { + if (!ignoreOutput) { + e.printStackTrace(); + } + } + } + } +} diff --git a/test/com/sleepycat/je/junit/JUnitThread.java b/test/com/sleepycat/je/junit/JUnitThread.java new file mode 100644 index 0000000..7fc5460 --- /dev/null +++ b/test/com/sleepycat/je/junit/JUnitThread.java @@ -0,0 +1,130 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.junit; + +import junit.framework.Assert; + +/** + * JUnitThread is a utility class that allows JUnit assertions to be + * run in other threads. A JUnit assertion thrown from a + * thread other than the invoking one can not be caught by JUnit. + * This class allows these AssertionFailedErrors to be caught and + * passed back to the original thread. + *

        + * To use, create a JUnitThread and override the testBody() method with + * the test code. Then call doTest() on the thread to run the test + * and re-throw any assertion failures that were thrown by the + * subthread. + *

        + * Example: + *

        +    public void testEquality() {
        +    JUnitThread tester =
        +    new JUnitThread("testEquality") {
        +    public void testBody() {
        +    int one = 1;
        +    assertTrue(one == 1);
        +    }
        +    };
        +    tester.doTest();
        +    }
        + * 
        + */ +public class JUnitThread extends Thread { + private Throwable errorReturn; + + /** + * Construct a new JUnitThread. + */ + public JUnitThread(String name) { + super(name); + } + + @Override + public void run() { + try { + testBody(); + } catch (Throwable T) { + errorReturn = T; + } + } + + /** + * Method that is to be overridden by the user. Code should be + * the guts of the test. assertXXXX() methods may be called in + * this method. + * @throws Throwable in subclasses. + */ + public void testBody() + throws Throwable { + } + + /** + * This method should be called after the JUnitThread has been + * constructed to cause the actual test to be run and any failures + * to be returned. + */ + public void doTest() + throws Throwable { + + start(); + finishTest(); + } + + /** + * This method should be called after the JUnitThread has been + * started to cause the test to report any failures. + */ + public void finishTest() + throws Throwable { + + try { + join(); + } catch (InterruptedException IE) { + Assert.fail("caught unexpected InterruptedException"); + } + if (errorReturn != null) { + throw new RuntimeException( + "Test failed in JUnitThread, see nested exception.\n", + errorReturn); + } + } + + /** + * Attempt to kill a thread that's still running due to a test problem. + * Intended to be called during the test tearDown. In other cases, call + * {@link #finishTest()} instead. + */ + @SuppressWarnings("deprecation") + public void shutdown() { + + final long maxTime = System.currentTimeMillis() + (30 * 1000); + + while (isAlive() && + System.currentTimeMillis() < maxTime) { + interrupt(); + yield(); + } + + if (isAlive()) { + /* Although unsafe, it's best to stop the thread in a test. */ + stop(); + } + } + + @Override + public String toString() { + return ""; + } +} diff --git a/test/com/sleepycat/je/latch/LatchTest.java b/test/com/sleepycat/je/latch/LatchTest.java new file mode 100644 index 0000000..a0a37f2 --- /dev/null +++ b/test/com/sleepycat/je/latch/LatchTest.java @@ -0,0 +1,442 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.latch; + +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_CONTENTION; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_SUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NOWAIT_UNSUCCESS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_NO_WAITERS; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_RELEASES; +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_SELF_OWNED; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.atomic.AtomicBoolean; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.TestBase; + +public class LatchTest extends TestBase { + private Latch latch1 = null; + private Latch latch2 = null; + private JUnitThread tester1 = null; + private JUnitThread tester2 = null; + + static private final boolean DEBUG = false; + + private void debugMsg(String message) { + if (DEBUG) { + System.out.println(Thread.currentThread().toString() + + " " + message); + } + } + + private void createExclusiveLatches() { + latch1 = LatchFactory.createExclusiveLatch( + LatchFactory.createTestLatchContext("LatchTest-latch1"), + true /*collectStats*/); + latch2 = LatchFactory.createExclusiveLatch( + LatchFactory.createTestLatchContext("LatchTest-latch2"), + true /*collectStats*/); + } + + @After + public void tearDown() { + latch1 = null; + latch2 = null; + } + + @Test + public void testDebugOutput() { + + /* Stupid test solely for the sake of code coverage. */ + createExclusiveLatches(); + + /* Acquire a latch. */ + latch1.acquireExclusive(); + + LatchSupport.btreeLatchesHeldToString(); + } + + @Test + public void testAcquireAndReacquire() + throws Throwable { + + createExclusiveLatches(); + JUnitThread tester = + new JUnitThread("testAcquireAndReacquire") { + @Override + public void testBody() { + + /* Acquire a latch. */ + latch1.acquireExclusive(); + + /* Try to acquire it again -- should fail. */ + try { + latch1.acquireExclusive(); + fail("didn't catch UNEXPECTED_STATE"); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason. + UNEXPECTED_STATE, expected.getReason()); + assertTrue + (latch1.getStats().getInt + (LATCH_SELF_OWNED) == 1); + } + + /* Release it. */ + latch1.release(); + + /* Release it again -- should fail. */ + try { + latch1.release(); + fail("didn't catch UNEXPECTED_STATE"); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason. + UNEXPECTED_STATE, expected.getReason()); + } + } + }; + + tester.doTest(); + } + + @Test + public void testAcquireAndReacquireShared() + throws Throwable { + + final SharedLatch latch = LatchFactory.createSharedLatch( + LatchFactory.createTestLatchContext("LatchTest-latch2"), false); + + JUnitThread tester = + new JUnitThread("testAcquireAndReacquireShared") { + @Override + public void testBody() { + + /* Acquire a shared latch. */ + latch.acquireShared(); + assert latch.isOwner(); + + /* Acquire it again -- should fail. */ + try { + latch.acquireShared(); + fail("didn't catch UNEXPECTED_STATE"); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason.UNEXPECTED_STATE, + expected.getReason()); + } + assert latch.isOwner(); + + /* Release it. */ + latch.release(); + + /* Release it again -- should fail. */ + try { + latch.release(); + fail("didn't catch UNEXPECTED_STATE"); + } catch (EnvironmentFailureException e) { + assertSame(EnvironmentFailureReason. + UNEXPECTED_STATE, e.getReason()); + } + } + }; + + tester.doTest(); + } + + /* + * Do a million acquire/release pairs. The junit output will tell us how + * long it took. + */ + @Test + public void testAcquireReleasePerformance() + throws Throwable { + + createExclusiveLatches(); + JUnitThread tester = + new JUnitThread("testAcquireReleasePerformance") { + @Override + public void testBody() { + final int N_PERF_TESTS = 1000000; + for (int i = 0; i < N_PERF_TESTS; i++) { + /* Acquire a latch */ + latch1.acquireExclusive(); + /* Release it. */ + latch1.release(); + } + StatGroup stats = latch1.getStats(); + stats.toString(); + assertTrue(stats.getInt(LATCH_NO_WAITERS) == N_PERF_TESTS); + assertTrue(stats.getInt(LATCH_RELEASES) == N_PERF_TESTS); + } + }; + + tester.doTest(); + } + + /* Test latch waiting. */ + + @Test + public void testWait() + throws Throwable { + + createExclusiveLatches(); + for (int i = 0; i < 10; i++) { + doTestWait(); + } + } + + private int nAcquiresWithContention = 0; + + public void doTestWait() + throws Throwable { + + final AtomicBoolean t1Acquired = new AtomicBoolean(false); + + tester1 = + new JUnitThread("testWait-Thread1") { + @Override + public void testBody() { + /* Acquire a latch. */ + latch1.acquireExclusive(); + t1Acquired.set(true); + + /* Wait for tester2 to try to acquire the latch. */ + while (latch1.getNWaiters() == 0) { + Thread.yield(); + } + + latch1.release(); + } + }; + + tester2 = + new JUnitThread("testWait-Thread2") { + @Override + public void testBody() { + /* Wait for tester1 to start. */ + while (!t1Acquired.get()) { + Thread.yield(); + } + + /* Acquire a latch. */ + latch1.acquireExclusive(); + + assertTrue(latch1.getStats().getInt(LATCH_CONTENTION) + == ++nAcquiresWithContention); + + /* Release it. */ + latch1.release(); + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + } + + @Test + public void testAcquireNoWait() + throws Throwable { + + final AtomicBoolean t1Acquired = new AtomicBoolean(false); + final AtomicBoolean t2TryAcquire = new AtomicBoolean(false); + final AtomicBoolean t1Released = new AtomicBoolean(false); + + createExclusiveLatches(); + tester1 = + new JUnitThread("testWait-Thread1") { + @Override + public void testBody() { + /* Acquire a latch. */ + debugMsg("Acquiring Latch"); + latch1.acquireExclusive(); + t1Acquired.set(true); + + /* Wait for tester2 to try to acquire the latch. */ + debugMsg("Waiting for other thread"); + while (!t2TryAcquire.get()) { + Thread.yield(); + } + + debugMsg("Releasing the latch"); + latch1.release(); + t1Released.set(true); + } + }; + + tester2 = + new JUnitThread("testWait-Thread2") { + @Override + public void testBody() { + /* Wait for tester1 to start. */ + debugMsg("Waiting for T1 to acquire latch"); + while (!t1Acquired.get()) { + Thread.yield(); + } + + /* + * Attempt Acquire with no wait -- should fail since + * tester1 has it. + */ + debugMsg("Acquiring no wait"); + assertFalse(latch1.acquireExclusiveNoWait()); + assertTrue(latch1.getStats().getInt + (LATCH_NOWAIT_UNSUCCESS) == 1); + t2TryAcquire.set(true); + + debugMsg("Waiting for T1 to release latch"); + while (!t1Released.get()) { + Thread.yield(); + } + + /* + * Attempt Acquire with no wait -- should succeed now that + * tester1 is done. + */ + debugMsg("Acquiring no wait - 2"); + assertTrue(latch1.acquireExclusiveNoWait()); + assertTrue(latch1.getStats().getInt + (LATCH_NOWAIT_SUCCESS) == 1); + + /* + * Attempt Acquire with no wait again -- should throw + * exception since we already have it. + */ + debugMsg("Acquiring no wait - 3"); + try { + latch1.acquireExclusiveNoWait(); + fail("didn't throw UNEXPECTED_STATE"); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason. + UNEXPECTED_STATE, expected.getReason()); + } + + /* Release it. */ + debugMsg("releasing the latch"); + latch1.release(); + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + } + + /* State for testMultipleWaiters. */ + private final int N_WAITERS = 5; + + /* A JUnitThread that holds the waiter number. */ + private class MultiWaiterTestThread extends JUnitThread { + private final int waiterNumber; + public MultiWaiterTestThread(String name, int waiterNumber) { + super(name); + this.waiterNumber = waiterNumber; + } + } + + @Test + public void testMultipleWaiters() + throws Throwable { + + createExclusiveLatches(); + + JUnitThread[] waiterThreads = new JUnitThread[N_WAITERS]; + + final AtomicBoolean t1Acquired = new AtomicBoolean(false); + + tester1 = + new JUnitThread("testWait-Thread1") { + @Override + public void testBody() { + + /* Acquire a latch. */ + debugMsg("About to acquire latch"); + latch1.acquireExclusive(); + debugMsg("acquired latch"); + t1Acquired.set(true); + + /* + * Wait for all other testers to be waiting on the latch. + */ + while (latch1.getNWaiters() < N_WAITERS) { + Thread.yield(); + } + + debugMsg("About to release latch"); + latch1.release(); + } + }; + + for (int i = 0; i < N_WAITERS; i++) { + waiterThreads[i] = + new MultiWaiterTestThread("testWait-Waiter" + i, i) { + @Override + public void testBody() { + + int waiterNumber = + ((MultiWaiterTestThread) + Thread.currentThread()).waiterNumber; + + /* Wait for tester1 to start. */ + debugMsg("Waiting for main to acquire latch"); + while (!t1Acquired.get()) { + Thread.yield(); + } + + /* + * Wait until it's our turn to try to acquire the + * latch. + */ + debugMsg("Waiting for our turn to acquire latch"); + while (latch1.getNWaiters() < waiterNumber) { + Thread.yield(); + } + + /* Try to acquire the latch */ + debugMsg("About to acquire latch"); + latch1.acquireExclusive(); + + debugMsg("getNWaiters: " + latch1.getNWaiters()); + assertTrue(latch1.getNWaiters() == + (N_WAITERS - waiterNumber - 1)); + + /* Release it. */ + debugMsg("About to release latch"); + latch1.release(); + } + }; + } + + tester1.start(); + + for (int i = 0; i < N_WAITERS; i++) { + waiterThreads[i].start(); + } + + tester1.finishTest(); + for (int i = 0; i < N_WAITERS; i++) { + waiterThreads[i].finishTest(); + } + } +} diff --git a/test/com/sleepycat/je/log/BufferPoolReadLatchTest.java b/test/com/sleepycat/je/log/BufferPoolReadLatchTest.java new file mode 100644 index 0000000..97fccee --- /dev/null +++ b/test/com/sleepycat/je/log/BufferPoolReadLatchTest.java @@ -0,0 +1,112 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static com.sleepycat.je.latch.LatchStatDefinition.LATCH_RELEASES; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Checks that we don't have to latch the buffer pool to check for LSNs that + * are outside the pool's range. This is important to avoid read contention. + * [#19642] + */ +public class BufferPoolReadLatchTest extends DualTestCase { + private File envHome; + private Environment env; + + public BufferPoolReadLatchTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testBufferPoolReadLatch() + throws Throwable { + + /* Open env with tiny cache to cause cache misses. */ + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(1 << 20); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + env = create(envHome, envConfig); + + /* Open db. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + final Database db = env.openDatabase(null, "foo", dbConfig); + + /* Write enough data to cycle twice through log buffers. */ + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[1024]); + final int logBufMemSize = (1 << 20) * 3; // 3 MB + final int nRecords = (logBufMemSize * 2) / 1024; + for (int i = 0; i < nRecords; i += 1) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + } + + /* Get and clear buffer pool latch stats. */ + final StatGroup latchStats = DbInternal.getNonNullEnvImpl(env). + getLogManager(). + getBufferPoolLatchStats(); + latchStats.clear(); + + /* Read first half of records, which should not be in log buffers. */ + for (int i = 0; i < (nRecords / 2); i += 1) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + } + + /* + * Check for a small number of latches. Check release count because it + * is simple (there are many acquire counts). Before the enhancement + * [#19642], there were around 3,000 latches. After the enhancement + * there were only 2 latches, but this number could be variable. + */ + final int nLatches = latchStats.getInt(LATCH_RELEASES); + assertTrue(String.valueOf(nLatches), nLatches < 10); + + /* Close all. */ + db.close(); + close(env); + } +} diff --git a/test/com/sleepycat/je/log/FSyncManagerTest.java b/test/com/sleepycat/je/log/FSyncManagerTest.java new file mode 100644 index 0000000..784d920 --- /dev/null +++ b/test/com/sleepycat/je/log/FSyncManagerTest.java @@ -0,0 +1,277 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Exercise the synchronization aspects of the sync manager. + */ +public class FSyncManagerTest extends TestBase { + private final File envHome; + + public FSyncManagerTest() { + super(); + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testBasic() + throws Throwable{ + + Environment env = null; + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentConfig.LOG_FSYNC_TIMEOUT, + "50000000"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + WaitVal waitVal = new WaitVal(0); + + FSyncManager syncManager = + new TestSyncManager(DbInternal.getNonNullEnvImpl(env), + waitVal); + JUnitThread t1 = new TestSyncThread(syncManager); + JUnitThread t2 = new TestSyncThread(syncManager); + JUnitThread t3 = new TestSyncThread(syncManager); + t1.start(); + t2.start(); + t3.start(); + + /* Wait for all threads to request a sync, so they form a group.*/ + Thread.sleep(500); + + /* Free thread 1. */ + synchronized (waitVal) { + waitVal.value = 1; + waitVal.notify(); + } + + t1.join(); + t2.join(); + t3.join(); + + /* + * All three threads ask for fsyncs. + * 2 do fsyncs -- the initial leader, and the leader of the + * waiting group of 2. + * The last thread gets a free ride. + */ + assertEquals(3, syncManager.getNFSyncRequests()); + assertEquals(2, syncManager.getNFSyncs()); + assertEquals(0, syncManager.getNTimeouts()); + } finally { + if (env != null) { + env.close(); + } + } + } + + /* This test class waits for an object instead of executing a sync. + * This way, we can manipulate grouping behavior. + */ + class TestSyncManager extends FSyncManager { + private final WaitVal waitVal; + TestSyncManager(EnvironmentImpl env, WaitVal waitVal) { + super(env); + this.waitVal = waitVal; + } + @Override + protected void executeFSync() { + try { + synchronized (waitVal) { + if (waitVal.value < 1) { + waitVal.wait(); + } + } + } catch (InterruptedException e) { + // woken up. + } + } + } + + class TestSyncThread extends JUnitThread { + private final FSyncManager syncManager; + TestSyncThread(FSyncManager syncManager) { + super("syncThread"); + this.syncManager = syncManager; + } + + @Override + public void testBody() + throws Throwable { + syncManager.flushAndSync(true); + } + } + + class WaitVal { + public int value; + + WaitVal(int value) { + this.value = value; + } + } + + private final int SIM_THREADS = 10; + private final int SIM_ITERS = 50; + private final int SIM_MAX_EXECUTE_MS = 1000; + + /** + * Simulates fsync by maintaining a map of entries that represent writes. + * Before calling FSyncManager.fsync an entry is added to the map, and we + * expect it to be removed from the map when fsync returns. The overridden + * executeFSync method removes all entries from the map. + */ + @Test + public void testSimulatedFsync() { + + Environment env = null; + JUnitThread[] threads = null; + try { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + final SimSyncManager syncManager = + new SimSyncManager(DbInternal.getNonNullEnvImpl(env)); + + threads = new JUnitThread[SIM_THREADS]; + for (int i = 0; i < SIM_THREADS; i += 1) { + threads[i] = new SimSyncThread(syncManager, i); + } + for (int i = 0; i < SIM_THREADS; i += 1) { + threads[i].start(); + } + for (int i = 0; i < SIM_THREADS; i += 1) { + threads[i].finishTest(); + } + threads = null; + env.close(); + env = null; + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } finally { + if (threads != null) { + for (int i = 0; i < SIM_THREADS; i += 1) { + final Thread thread = threads[i]; + if (thread != null) { + while (thread.isAlive()) { + thread.interrupt(); + Thread.yield(); + } + } + } + } + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("After failure: " + e); + } + } + } + } + + class SimSyncThread extends JUnitThread { + + private final SimSyncManager syncManager; + + SimSyncThread(SimSyncManager syncManager, int i) { + super("SimSyncThread-" + i); + this.syncManager = syncManager; + } + + @Override + + public void testBody() { + for (int i = 0; i < SIM_ITERS && !syncManager.failed(); i += 1) { + final int entry = syncManager.addEntry(); + syncManager.flushAndSync(true); + syncManager.expectDone(entry); + } + } + } + + class SimSyncManager extends FSyncManager { + + private final Map entries; + private final AtomicInteger nextEntry; + private final Random rnd; + private volatile boolean failure; + + SimSyncManager(EnvironmentImpl env) { + super(env); + entries = + Collections.synchronizedMap(new HashMap()); + nextEntry = new AtomicInteger(1); + rnd = new Random(123); + failure = false; + } + + boolean failed() { + return failure; + } + + int addEntry() { + final int entry = nextEntry.getAndIncrement(); + entries.put(entry, entry); + return entry; + } + + void expectDone(int entry) { + if (entries.containsKey(entry)) { + failure = true; + fail("found entry: " + entry); + } + } + + @Override + protected void executeFSync() { + try { + Thread.currentThread().sleep(getNextSleepMs()); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + entries.clear(); + } + + private long getNextSleepMs() { + synchronized (rnd) { + return rnd.nextInt(SIM_MAX_EXECUTE_MS) + 1; + } + } + } +} diff --git a/test/com/sleepycat/je/log/FileEdgeCaseTest.java b/test/com/sleepycat/je/log/FileEdgeCaseTest.java new file mode 100644 index 0000000..ecd7c0f --- /dev/null +++ b/test/com/sleepycat/je/log/FileEdgeCaseTest.java @@ -0,0 +1,300 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +public class FileEdgeCaseTest extends TestBase { + + private final File envHome; + private Environment env; + + public FileEdgeCaseTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } finally { + env = null; + } + } + } + + /** + * SR #15133 + * Create a JE environment with a single log file and a checksum + * exception in the second entry in the log file. + * + * When an application attempts to open this JE environment, JE truncates + * the log file at the point before the bad checksum, because it assumes + * that bad entries at the end of the log are the result of incompletely + * flushed writes from the last environment use. However, the truncated + * log doesn't have a valid environment root, so JE complains and asks the + * application to move aside the existing log file (via the exception + * message). The resulting environment has a single log file, with + * a single valid entry, which is the file header. + * + * Any subsequent attempts to open the environment should also fail at the + * same point. In the error case reported by this SR, we didn't handle this + * single log file/single file header case right, and subsequent opens + * first truncated before the file header, leaving a 0 length log, and + * then proceeded to write error trace messages into the log. This + * resulted in a log file with no file header, (but with trace messages) + * and any following opens got unpredictable errors like + * ClassCastExceptions and BufferUnderflows. + * + * The correct situation is to continue to get the same exception. + */ + @Test + public void testPostChecksumError() + throws IOException { + + EnvironmentConfig config = new EnvironmentConfig(); + config.setAllowCreate(true); + config.setConfigParam(EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + env = new Environment(envHome, config); + + env.close(); + env = null; + + /* Intentionally corrupt the second entry. */ + corruptSecondEntry(); + + /* + * Attempts to open the environment should fail with a + * EnvironmentFailureException + */ + for (int i = 0; i < 3; i += 1) { + try { + env = new Environment(envHome, config); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason.LOG_INTEGRITY, + expected.getReason()); + } + } + } + + /** + * [#18307] + * Suppose we have LSN 1000, and the log entry there has a checksum + * exception. + * + * Case 1. if we manage to read past LSN 1000, but then hit a second + * checksum exception, return false and truncate the log at the + * first exception. + * Case 2. if we manage to read past LSN 1000, and do not see any checksum + * exceptions, and do not see any commits, return false and + * truncate the log. + * Case 3. if we manage to read past LSN 1000, and do not see any checksum + * exceptions, but do see a txn commit, return true and throw + * EnvironmentFailureException. + * + * Note the following comments: + * + * We should guarantee that, for case 3, the environment open + * process will throw EFE. Otherwise, the envImpl will exist in DbEnvPool + * for ever. This will cause the next test case will still use this + * envImpl. Then the DataVerifier in this test case will throw exception + * in next test case. This causes that it is very difficult to debug the + * exception in next test case. + */ + @Test + public void testFindCommittedTxn() + throws IOException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.HALT_ON_COMMIT_AFTER_CHECKSUMEXCEPTION, "true"); + env = new Environment(envHome, envConfig); + + long[] offsets = writeTwoLNs(); + long lnOffset1 = offsets[0]; + long lnOffset2 = offsets[1]; + long postCommitOffset = offsets[2]; + + /* + * Case 2: Intentionally pollute the entry checksum after all committed + * txns, so no committed txn will be found. + */ + polluteEntryChecksum(postCommitOffset); + + /* + * When doing recovery, no committed txn will be found. So the recovery + * process just truncate the log file at the bad log point. + */ + env = new Environment(envHome, envConfig); + env.close(); + env = null; + + /* + * Case 3: Intentionally pollute the entry checksum before the + * committed txn. + * + * When the reader meets the first checksum error, it will step forward + * to look for the committed txn. After finding the committed txn, + * EnvironmentFailureException will be thrown, the recovery process + * will be stopped. + */ + polluteEntryChecksum(lnOffset1); + + /* + * Next attempt to open the environment should fail with a + * EnvironmentFailureException. + */ + try { + + /* + * When doing recovery, one committed txn will be found. So + * EnvironmentFailureException will be thrown. + */ + env = new Environment(envHome, envConfig); + fail("Should caught exception while finding committed txn"); + } catch (EnvironmentFailureException expected) { + assertSame(EnvironmentFailureReason.FOUND_COMMITTED_TXN, + expected.getReason()); + } + + /* + * Case 1: Intentionally pollute two entries' checksums before the + * committed txn. + * + * When the reader meets the first checksum error, it will step forward + * to look for the committed txn. Before finding any committed txn, if + * the reader meets another checksum error, it will stop the search, + * and just truncate the log file at the first checksum error spot. + */ + polluteEntryChecksum(lnOffset1); + polluteEntryChecksum(lnOffset2); + + /* + * When doing recovery, no committed txn will be found. So the recovery + * process just truncate the log file at the corrupted log entry. + */ + env = new Environment(envHome, envConfig); + env.close(); + env = null; + } + + /** + * Writes two LNs in a txn, and closes the env. + * @return an array of [lnOffset1, lnOffset2, pastCommitOffset]. + */ + private long[] writeTwoLNs() { + + FileManager fm = DbInternal.getEnvironmentImpl(env).getFileManager(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = env.openDatabase(null, "testDB", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(0, key); + IntegerBinding.intToEntry(0, data); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + + long lnOffset1 = DbLsn.getFileOffset(fm.getNextLsn()); + cursor.put(key, data); + long lnOffset2 = DbLsn.getFileOffset(fm.getNextLsn()); + cursor.put(key, data); + + cursor.close(); + txn.commit(); + long postCommitOffset = DbLsn.getFileOffset(fm.getNextLsn()); + + db.close(); + env.close(); + env = null; + + return new long[] { lnOffset1, lnOffset2, postCommitOffset }; + } + + /** + * Write junk into the second log entry, after the file header. + */ + private void corruptSecondEntry() + throws IOException { + + writeToFirstFile(FileManager.firstLogEntryOffset(), new byte[20]); + } + + /** + * Pollute a specified log entry's checksum. + */ + private void polluteEntryChecksum(long entryOffset) + throws IOException { + + /* + * We just want to pollute the checksum bytes, so the junk has 4 + * bytes. + */ + writeToFirstFile(entryOffset, new byte[4]); + } + + private void writeToFirstFile(long entryOffset, byte[] junk) + throws IOException { + + File firstFile = new File( + envHome, FileManager.getFileName(0, FileManager.JE_SUFFIX)); + + try (RandomAccessFile file = new RandomAccessFile( + firstFile, + FileManager.FileMode.READWRITE_MODE.getModeValue())) { + + file.seek(entryOffset); + file.write(junk); + + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } +} diff --git a/test/com/sleepycat/je/log/FileManagerMultiDataDirTest.java b/test/com/sleepycat/je/log/FileManagerMultiDataDirTest.java new file mode 100644 index 0000000..40fe4f7 --- /dev/null +++ b/test/com/sleepycat/je/log/FileManagerMultiDataDirTest.java @@ -0,0 +1,289 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * File Manager + */ +public class FileManagerMultiDataDirTest extends TestBase { + + private static int FILE_SIZE = 120; + + private static int N_DATA_DIRS = 3; + + private Environment env; + private FileManager fileManager; + private final File envHome; + + public FileManagerMultiDataDirTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws IOException, DatabaseException { + + if (fileManager != null) { + fileManager.clear(); + fileManager.close(); + } + } + + private void createEnvAndFileManager() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + new Integer(FILE_SIZE).toString()); + /* Yank the cache size way down. */ + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_CACHE_SIZE.getName(), "3"); + envConfig.setAllowCreate(true); + + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + new File(envHome, "data00" + i).mkdir(); + } + + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Make a standalone file manager for this test. */ + envImpl.close(); + envImpl.open(); /* Just sets state to OPEN. */ + fileManager = new FileManager(envImpl, envHome, false); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs1() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 000, 001, 002, 003, expect failure because of 000 */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + new File(envHome, "data00" + i).mkdir(); + } + new File(envHome, "data000").mkdir(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + File dataDir = new File(envHome, "data000"); + TestUtils.removeFiles + ("TearDown", dataDir, FileManager.JE_SUFFIX); + dataDir.delete(); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs2() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 001, 002, 004, expect failure because 003 doesn't exist */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + new File(envHome, "data001").mkdir(); + new File(envHome, "data002").mkdir(); + new File(envHome, "data004").mkdir(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + File dataDir = new File(envHome, "data004"); + TestUtils.removeFiles + ("TearDown", dataDir, FileManager.JE_SUFFIX); + dataDir.delete(); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs3() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 001, 002, expect failure because 003 doesn't exist */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + new File(envHome, "data001").mkdir(); + new File(envHome, "data002").mkdir(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs4() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 001, 002, 003, expect failure because 003 is a file */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + new File(envHome, "data001").mkdir(); + new File(envHome, "data002").mkdir(); + new File(envHome, "data003").createNewFile(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs5() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 001, 002, xxx, expect failure because xxx is not NNN */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + new File(envHome, "data001").mkdir(); + new File(envHome, "data002").mkdir(); + new File(envHome, "dataxxx").mkdir(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + File dataDir = new File(envHome, "dataxxx"); + TestUtils.removeFiles + ("TearDown", dataDir, FileManager.JE_SUFFIX); + dataDir.delete(); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @Test + public void testMultipleDataDirs6() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* Create 001, 002, xxx, expect failure because xxx is not NNN */ + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), "0"); + new File(envHome, "data001").mkdir(); + new File(envHome, "data002").mkdir(); + new File(envHome, "data003").mkdir(); + + try { + env = new Environment(envHome, envConfig); + fail("expected too many dirs exception"); + } catch (EnvironmentFailureException EFE) { + } + + File dataDir = new File(envHome, "dataxxx"); + TestUtils.removeFiles + ("TearDown", dataDir, FileManager.JE_SUFFIX); + dataDir.delete(); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } +} diff --git a/test/com/sleepycat/je/log/FileManagerTest.java b/test/com/sleepycat/je/log/FileManagerTest.java new file mode 100644 index 0000000..eaeb9ef --- /dev/null +++ b/test/com/sleepycat/je/log/FileManagerTest.java @@ -0,0 +1,634 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Set; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +import org.junit.After; +import org.junit.Test; + +/** + * File Manager + */ +public class FileManagerTest extends TestBase { + + private static int FILE_SIZE = 120; + + private Environment env; + private FileManager fileManager; + private final File envHome; + + public FileManagerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + /* Disable daemons to rule out their interference. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + new Integer(FILE_SIZE).toString()); + /* Yank the cache size way down. */ + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_CACHE_SIZE.getName(), "3"); + envConfig.setAllowCreate(true); + + /* + * The following TestUtils.removeFiles will delete files manually, + * so for this series of tests, we disable log file deletion detect. + */ + envConfig.setConfigParam + (EnvironmentParams.LOG_DETECT_FILE_DELETE.getName(), "false"); + + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Make a standalone file manager for this test. */ + envImpl.close(); + envImpl.open(); /* Just sets state to OPEN. */ + fileManager = new FileManager(envImpl, envHome, false); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } + + @After + public void tearDown() + throws IOException, DatabaseException { + + if (fileManager != null) { + fileManager.clear(); + fileManager.close(); + } + } + + /** + * Test LSN administration. + */ + + @Test + public void testLsnBumping() + throws Exception { + + /* + We are adding these entries: + +----+------+---------+--------+ + file 0: |hdr | 30 | 50 |empty | + +----+------+---------+--------+ + 0 hdr hdr+30 hdr+80 99 + + +----+--------+-------+-------+-----+-------+ + file 1: |hdr | 40 | 20 | 10 | 5 | empty | + +----+--------+-------+-------+-----+-------+ + 0 hdr hdr+40 hdr+60 hdr+70 hdr+75 + + +-----+-----+--------+ + file 2: | hdr | 75 | empty | + +-----+-----+--------+ + 0 hdr hdr+75 + + +-----+-------------------------------+ + file 3: | hdr | 125 | + +-----+-------------------------------+ + 0 hdr + + +-----+-----+------+-----+--------------+ + file 4: | hdr | 10 | 20 | 30 | empty + +-----+-----+------+-----+--------------+ + 0 hdr hdr+10 hdr+30 + + */ + + try { + /* Should start out at LSN 0. */ + + /* "add" some entries to the log. */ + long hdrSize = FileManager.firstLogEntryOffset(); + + long prevOffset = bumpLsn(30L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(0, hdrSize), + fileManager.getLastUsedLsn()); + /* prev entry. */ + assertEquals(0, prevOffset); + + prevOffset = bumpLsn(50L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(0, (hdrSize + 30)), + fileManager.getLastUsedLsn()); + assertEquals(hdrSize, prevOffset); + + /* bump over to a file 1. */ + prevOffset = bumpLsn(40L); + /* item placed here. */ + assertEquals(DbLsn.makeLsn(1, hdrSize), + fileManager.getLastUsedLsn()); + assertEquals(0, prevOffset); + + prevOffset = bumpLsn(20L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(1,(hdrSize+40)), + fileManager.getLastUsedLsn()); + assertEquals(hdrSize, prevOffset); + + prevOffset = bumpLsn(10L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(1,(hdrSize+60)), + fileManager.getLastUsedLsn()); + assertEquals(hdrSize+40, prevOffset); + + prevOffset = bumpLsn(5L); + /* item placed here. */ + assertEquals(DbLsn.makeLsn(1,(hdrSize+70)), + fileManager.getLastUsedLsn()); + assertEquals(hdrSize+60, prevOffset); + + /* bump over to file 2. */ + prevOffset = bumpLsn(75L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(2, hdrSize), + fileManager.getLastUsedLsn()); + assertEquals(0, prevOffset); + + /* Ask for something bigger than a file: bump over to file 3. */ + prevOffset = bumpLsn(125L); + /* item placed here. */ + assertEquals(DbLsn.makeLsn(3, hdrSize), + fileManager.getLastUsedLsn()); + assertEquals(0, prevOffset); + + /* bump over to file 4. */ + prevOffset = bumpLsn(10L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(4, hdrSize), + fileManager.getLastUsedLsn()); + assertEquals(0, prevOffset); + + prevOffset = bumpLsn(20L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(4, (hdrSize+10)), + fileManager.getLastUsedLsn()); + assertEquals(hdrSize, prevOffset); + + prevOffset = bumpLsn(30L); + /* Item placed here. */ + assertEquals(DbLsn.makeLsn(4, (hdrSize+30)), + fileManager.getLastUsedLsn()); + assertEquals((hdrSize+10), prevOffset); + + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + /** + * Test initializing the last position in the logs. + */ + @Test + public void testSetLastPosition() { + + /* + * Pretend that the last file is file 79. + */ + fileManager.setLastPosition(// next available LSN + DbLsn.makeLsn(79L, 88L), + DbLsn.makeLsn(79L, 77), + 66L); + + /* Put an entry down, should fit within file 79. */ + long prevOffset = bumpLsn(11L); + assertEquals(DbLsn.makeLsn(79L, 88L), fileManager.getLastUsedLsn()); + assertEquals(77L, prevOffset); + + /* Put another entry in, should go to the next file. */ + prevOffset = bumpLsn(22L); + assertEquals(DbLsn.makeLsn(80L, FileManager.firstLogEntryOffset()), + fileManager.getLastUsedLsn()); + assertEquals(0, prevOffset); + } + + /** + * Test log file naming. + */ + @Test + public void testFileNameFormat() { + String filePrefix = envHome + File.separator; + assertEquals(filePrefix + "00000001.jdb", + fileManager.getFullFileNames(1L)[0]); + assertEquals(filePrefix + "0000007b.jdb", + fileManager.getFullFileNames(123L)[0]); + } + + private long bumpLsn(long size) { + return FileManagerTestUtils.bumpLsn(fileManager, size); + } + + /** + * Test log file creation. + */ + @Test + public void testFileCreation() + throws ChecksumException, IOException, DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + + String[] jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + + assertEquals("Should have two files", 2, jeFiles.length); + + /* Make a fake files with confusing names. */ + File fakeFile1 = new File(envHome, "00000abx.jdb"); + File fakeFile2 = new File(envHome, "10.10.jdb"); + fakeFile1.createNewFile(); + fakeFile2.createNewFile(); + + jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should have two files", 2, jeFiles.length); + + /* Open the two existing log files. */ + FileHandle file0Handle = fileManager.getFileHandle(0L); + FileHandle file1Handle = fileManager.getFileHandle(1L); + + jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should have two files", 2, jeFiles.length); + file0Handle.release(); + file1Handle.release(); + + /* Empty the cache and get them again. */ + fileManager.clear(); + file0Handle = fileManager.getFileHandle(0L); + file1Handle = fileManager.getFileHandle(1L); + + jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should have two files", 2, jeFiles.length); + file0Handle.close(); + file1Handle.close(); + file0Handle.release(); + file1Handle.release(); + + fakeFile1.delete(); + fakeFile2.delete(); + } + + /** + * Make sure we can find the last file. + */ + @Test + public void testLastFile() + throws IOException, DatabaseException { + + /* There shouldn't be any files here anymore. */ + String[] jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertTrue(jeFiles.length == 0); + + /* No files exist, should get null. */ + assertNull("No last file", fileManager.getLastFileNum()); + + /* Create some files, ask for the largest. */ + File fakeFile1 = new File(envHome, "108.cif"); + fakeFile1.createNewFile(); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + + assertEquals("Should have 2 as last file", 2L, + fileManager.getLastFileNum().longValue()); + fakeFile1.delete(); + } + + /** + * Make sure we can find the next file in a set of files. + */ + @Test + public void testFollowingFile() + throws IOException { + + /* There shouldn't be any files here anymore. */ + String[] jeFiles = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertTrue(jeFiles.length == 0); + + /* No files exist, should get null. */ + assertNull("No last file", fileManager.getFollowingFileNum(0, true)); + assertNull("No last file", fileManager.getFollowingFileNum(0, false)); + assertNull("No last file", fileManager.getFollowingFileNum(1, true)); + assertNull("No last file", fileManager.getFollowingFileNum(-1, false)); + + /* Create some files. */ + File okFile1 = new File(envHome, "00000001.jdb"); + okFile1.createNewFile(); + + File fakeFile3 = new File(envHome, "003.jdb"); + fakeFile3.createNewFile(); + + File okFile6 = new File(envHome, "00000006.jdb"); + okFile6.createNewFile(); + + File okFile9 = new File(envHome, "00000009.jdb"); + okFile9.createNewFile(); + + /* Test forward */ + assertEquals("Should get 6 next", 6L, + fileManager.getFollowingFileNum(2, true).longValue()); + assertEquals("Should get 9 next, testing non-existent file", 9L, + fileManager.getFollowingFileNum(8, true).longValue()); + assertNull("Should get null next", + fileManager.getFollowingFileNum(9, true)); + assertNull("Should get null next", + fileManager.getFollowingFileNum(10, true)); + + /* Test prev */ + assertEquals("Should get 6 next, testing non-existent file", 6L, + fileManager.getFollowingFileNum(8, false).longValue()); + assertEquals("Should get 6 next", 6L, + fileManager.getFollowingFileNum(9, false).longValue()); + assertNull("Should get null next", + fileManager.getFollowingFileNum(1, false)); + assertNull("Should get null next", + fileManager.getFollowingFileNum(0, false)); + + okFile1.delete(); + fakeFile3.delete(); + okFile6.delete(); + okFile9.delete(); + } + + /** + * See if we can catch a file with an invalid header. + */ + @Test + public void testBadHeader() + throws IOException, DatabaseException { + + /* First try a bad environment r/w. */ + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager test = + new FileManager(envImpl, new File("xxyy"), true); + fail("expect creation of " + test + "to fail."); + } catch (IllegalArgumentException e) { + /* should throw */ + } + + /* Next try a bad environment r/o. */ + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager test = + new FileManager(envImpl, new File("xxyy"), false); + fail("expect creation of " + test + "to fail."); + } catch (IllegalArgumentException e) { + /* should throw */ + } + + /* Now create a file, but mess up the header. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + + byte[] badData = new byte[]{1,1}; + RandomAccessFile file0 = + new RandomAccessFile + (fileManager.getFullFileName(0, FileManager.JE_SUFFIX), + FileManager.FileMode.READWRITE_MODE.getModeValue()); + file0.write(badData); + file0.close(); + fileManager.clear(); + + try { + fileManager.getFileHandle(0L); + fail("expect to catch a checksum error"); + } catch (ChecksumException e) { + } + } + + @Test + public void testTruncatedHeader() + throws IOException, DatabaseException { + + /* Create a log file */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, FILE_SIZE); + + /* Truncate the header */ + RandomAccessFile file0 = + new RandomAccessFile + (fileManager.getFullFileName(0, FileManager.JE_SUFFIX), + FileManager.FileMode.READWRITE_MODE.getModeValue()); + file0.getChannel().truncate(FileManager.firstLogEntryOffset()/2); + file0.close(); + + try { + fileManager.getFileHandle(0); + fail("Expected ChecksumException"); + } catch (ChecksumException e) { + } + } + + /** + * Test the file cache. + */ + @Test + public void testCache() + throws Throwable { + + try { + + /* + * Make five log files. The file descriptor cache should be empty. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile + (fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile + (fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile + (fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile + (fileManager, envImpl, FILE_SIZE); + FileManagerTestUtils.createLogFile + (fileManager, envImpl, FILE_SIZE); + + Long f0 = new Long(0L); + Long f1 = new Long(1L); + Long f2 = new Long(2L); + Long f3 = new Long(3L); + Long f4 = new Long(4L); + + Set keySet = fileManager.getCacheKeys(); + assertEquals("should have 0 keys", 0, keySet.size()); + + /* + * Get file descriptors for three files, expect 3 handles in the + * cache. + */ + FileHandle f0Handle = fileManager.getFileHandle(0); + FileHandle f1Handle = fileManager.getFileHandle(1); + FileHandle f2Handle = fileManager.getFileHandle(2); + keySet = fileManager.getCacheKeys(); + assertEquals("should have 3 keys", 3, keySet.size()); + assertTrue(keySet.contains(f0)); + assertTrue(keySet.contains(f1)); + assertTrue(keySet.contains(f2)); + + /* + * Ask for a fourth handle, the cache should grow even though it + * was set to 3 as a starting size, because all handles are + * locked. Do it within another thread, otherwise we'll get a + * latch-already-held exception when we test the other handles in + * the cache. The other thread will get the handle and then release + * it. + */ + CachingThread otherThread = new CachingThread(fileManager, 3); + otherThread.start(); + otherThread.join(); + + keySet = fileManager.getCacheKeys(); + assertEquals("should have 4 keys", 4, keySet.size()); + assertTrue(keySet.contains(f0)); + assertTrue(keySet.contains(f1)); + assertTrue(keySet.contains(f2)); + assertTrue(keySet.contains(f3)); + + /* + * Now ask for another file. The cache should not grow, because no + * handles are locked and there's room to evict one. + */ + f0Handle.release(); + f1Handle.release(); + f2Handle.release(); + FileHandle f4Handle = fileManager.getFileHandle(4); + keySet = fileManager.getCacheKeys(); + assertEquals("should have 4 keys", 4, keySet.size()); + assertTrue(keySet.contains(f4)); + + f4Handle.release(); + + /* Clearing should release all file descriptors. */ + fileManager.clear(); + assertEquals("should have 0 keys", 0, + fileManager.getCacheKeys().size()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testFlipFile() + throws Throwable { + + /* + * The setUp() method opens a standalone FileManager, but in this test + * case we need a regular Environment. On Windows, we can't lock the + * file range twice in FileManager.lockEnvironment, so we must close + * the standalone FileManager here before opening a regular + * environment. + */ + fileManager.clear(); + fileManager.close(); + fileManager = null; + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + Environment env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = envImpl.getFileManager(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database exampleDb = + env.openDatabase(null, "simpleDb", dbConfig); + + assertEquals("Should have 0 as current file", 0L, + fileManager.getCurrentFileNum()); + long flipLsn = envImpl.forceLogFileFlip(); + assertEquals("LSN should be 1 post-flip", 1L, + DbLsn.getFileNumber(flipLsn)); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(StringUtils.toUTF8("key")); + data.setData(StringUtils.toUTF8("data")); + exampleDb.put(null, key, data); + assertEquals("Should have 1 as last file", 1L, + fileManager.getCurrentFileNum()); + exampleDb.close(); + env.close(); + } + + class CachingThread extends Thread { + private final FileManager fManager; + private final long fileNum; + + private FileHandle handle; + + CachingThread(FileManager fileManager, long fileNum) { + this.fManager = fileManager; + this.fileNum = fileNum; + } + + @Override + public void run() { + try { + handle = fManager.getFileHandle(fileNum); + handle.release(); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + FileHandle getHandle() { + return handle; + } + } +} diff --git a/test/com/sleepycat/je/log/FileManagerTestUtils.java b/test/com/sleepycat/je/log/FileManagerTestUtils.java new file mode 100644 index 0000000..7766e21 --- /dev/null +++ b/test/com/sleepycat/je/log/FileManagerTestUtils.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.io.IOException; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; + +class FileManagerTestUtils { + + static long bumpLsn(FileManager fileManager, long size) { + + boolean flippedFile = fileManager.shouldFlipFile(size); + + return fileManager.advanceLsn( + fileManager.calculateNextLsn(flippedFile), + size, flippedFile); + } + + static void createLogFile(FileManager fileManager, + EnvironmentImpl envImpl, + long logFileSize) + throws DatabaseException, IOException { + + LogBuffer logBuffer = new LogBuffer(50, envImpl); + logBuffer.latchForWrite(); + logBuffer.getDataBuffer().flip(); + long size = logFileSize - FileManager.firstLogEntryOffset(); + boolean flippedFile = fileManager.shouldFlipFile(size); + long lsn = fileManager.calculateNextLsn(flippedFile); + fileManager.advanceLsn(lsn, size, flippedFile); + logBuffer.registerLsn(lsn); + fileManager.writeLogBuffer(logBuffer, true); + logBuffer.release(); + fileManager.syncLogEndAndFinishFile(); + } +} diff --git a/test/com/sleepycat/je/log/FileReaderBufferingTest.java b/test/com/sleepycat/je/log/FileReaderBufferingTest.java new file mode 100644 index 0000000..8fdc583 --- /dev/null +++ b/test/com/sleepycat/je/log/FileReaderBufferingTest.java @@ -0,0 +1,175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +/** + * Check our ability to adjust the file reader buffer size. + */ +public class FileReaderBufferingTest extends TestBase { + + private final File envHome; + private Environment env; + private EnvironmentImpl envImpl; + private ArrayList expectedLsns; + private ArrayList expectedVals; + + public FileReaderBufferingTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Should overflow once and then grow. + */ + @Test + public void testBasic() + throws Exception { + + readLog(1050, // starting size of object in entry + 0, // object growth increment + 100, // starting read buffer size + "3000", // max read buffer size + 0); // expected number of overflows. + } + + /** + * Should overflow once and then grow. + */ + @Test + public void testCantGrow() + throws Exception { + + readLog(2000, // starting size of object in entry + 0, // object growth increment + 100, // starting read buffer size + "1000", // max read buffer size + 10); // expected number of overflows. + } + + /** + * Should overflow, grow, and then reach the max. + */ + @Test + public void testReachMax() + throws Exception { + + readLog(1000, // size of object in entry + 1000, // object growth increment + 100, // starting read buffer size + "3500", // max read buffer size + 7); // expected number of overflows. + } + /** + * + */ + private void readLog(int entrySize, + int entrySizeIncrement, + int readBufferSize, + String bufferMaxSize, + int expectedOverflows) + throws Exception { + + try { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.LOG_ITERATOR_MAX_SIZE.getName(), + bufferMaxSize); + env = new Environment(envHome, envConfig); + + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Make a log file */ + createLogFile(10, entrySize, entrySizeIncrement); + SearchFileReader reader = + new SearchFileReader(envImpl, + readBufferSize, + true, + DbLsn.longToLsn + (expectedLsns.get(0)), + DbLsn.NULL_LSN, + LogEntryType.LOG_TRACE); + + Iterator lsnIter = expectedLsns.iterator(); + Iterator valIter = expectedVals.iterator(); + while (reader.readNextEntry()) { + Trace rec = (Trace) reader.getLastObject(); + assertTrue(lsnIter.hasNext()); + assertEquals(reader.getLastLsn(), + DbLsn.longToLsn(lsnIter.next())); + assertEquals(valIter.next(), rec.getMessage()); + } + assertEquals(10, reader.getNumRead()); + assertEquals(expectedOverflows, reader.getNRepeatIteratorReads()); + + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + env.close(); + } + } + + /** + * Write a logfile of entries, put the entries that we expect to + * read into a list for later verification. + * @return end of file LSN. + */ + private void createLogFile(int numItems, int size, int sizeIncrement) + throws IOException, DatabaseException { + + LogManager logManager = envImpl.getLogManager(); + expectedLsns = new ArrayList(); + expectedVals = new ArrayList(); + + for (int i = 0; i < numItems; i++) { + /* Add a debug record just to be filler. */ + int recordSize = size + (i * sizeIncrement); + byte[] filler = new byte[recordSize]; + Arrays.fill(filler, (byte)i); + String val = StringUtils.fromUTF8(filler); + + Trace rec = new Trace(val); + long lsn = rec.trace(envImpl, rec); + expectedLsns.add(new Long(lsn)); + expectedVals.add(val); + } + + logManager.flushSync(); + envImpl.getFileManager().clear(); + } +} diff --git a/test/com/sleepycat/je/log/FileReaderTest.java b/test/com/sleepycat/je/log/FileReaderTest.java new file mode 100644 index 0000000..1ab8fca --- /dev/null +++ b/test/com/sleepycat/je/log/FileReaderTest.java @@ -0,0 +1,166 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.nio.ByteBuffer; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Test edge cases for file reading. + */ +public class FileReaderTest extends DualTestCase { + + private final File envHome; + + public FileReaderTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /* + * Check that we can handle the case when we are reading forward + * with other than the LastFileReader, and the last file exists but is + * 0 length. This case came up when a run of MemoryStress was killed off, + * and we then attempted to read it with DbPrintLog. + */ + @Test + public void testEmptyExtraFile() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + Environment env = create(envHome, envConfig); + + try { + /* Make an environment. */ + env.sync(); + + /* Add an extra, 0 length file */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + File newFile = new File(envHome, "00000001.jdb"); + newFile.createNewFile(); + + INFileReader reader = new INFileReader(envImpl, + 1000, + DbLsn.NULL_LSN, + DbLsn.NULL_LSN, + false, + DbLsn.NULL_LSN, + DbLsn.NULL_LSN, + null); + while (reader.readNextEntry()) { + } + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + close(env); + } + } + + /** + * Check that we can read a log with various non-default parameters set. + * (This test is currently only exercising one, je.log.checksumRead) + * @throws DatabaseException + * @throws EnvironmentLockedException + */ + @Test + public void testNonDefaultParams() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + /* Set non-default params. */ + envConfig.setConfigParam("je.log.checksumRead", "false"); + + Environment env = create(envHome, envConfig); + Database db = null; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + try { + db = env.openDatabase(null, "foo", dbConfig); + DatabaseEntry entry = new DatabaseEntry(); + for (int i = 0; i < 10; i++) { + IntegerBinding.intToEntry(i, entry); + assertEquals(OperationStatus.SUCCESS, + db.put(null, entry, entry)); + } + + env.sync(); + + TestReader reader = + new TestReader(DbInternal.getNonNullEnvImpl(env)); + while (reader.readNextEntry()) { + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (db != null) { + db.close(); + } + close(env); + } + } + + private static class TestReader extends FileReader { + + public TestReader(EnvironmentImpl envImpl) + throws Exception { + + super(envImpl, 1024 /* readBufferSize*/, true /* forward */, + 0L, null /* singleFileNumber */, + DbLsn.NULL_LSN /* endOfFileLsn */, + DbLsn.NULL_LSN /* finishLsn */); + + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + LogEntryType type = + LogEntryType.findType(currentEntryHeader.getType()); + LogEntry entry = type.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + return true; + } + } +} diff --git a/test/com/sleepycat/je/log/INFileReaderTest.java b/test/com/sleepycat/je/log/INFileReaderTest.java new file mode 100644 index 0000000..2792c21 --- /dev/null +++ b/test/com/sleepycat/je/log/INFileReaderTest.java @@ -0,0 +1,436 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.cleaner.RecoveryUtilizationTracker; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class INFileReaderTest extends TestBase { + + static private final boolean DEBUG = false; + + private final File envHome; + private Environment env; + + /* + * Need a handle onto the true environment in order to create + * a reader + */ + private EnvironmentImpl envImpl; + private Database db; + private long maxNodeId; + private List checkList; + + public INFileReaderTest() { + super(); + envHome = SharedTestUtils.getTestDir(); + Key.DUMP_TYPE = DumpType.BINARY; + } + + @Before + public void setUp() + throws Exception { + + /* + * Note that we use the official Environment class to make the + * environment, so that everything is set up, but we then go a + * backdoor route to get to the underlying EnvironmentImpl class + * so that we don't require that the Environment.getDbEnvironment + * method be unnecessarily public. + */ + super.setUp(); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "75"); + envConfig.setAllowCreate(true); + + /* Disable noisy cleaner database usage. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCheckpointUP(envConfig, false); + /* Don't run the cleaner without a UtilizationProfile. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + + env = new Environment(envHome, envConfig); + + envImpl =DbInternal.getNonNullEnvImpl(env); + + } + + @After + public void tearDown() + throws DatabaseException { + + envImpl = null; + env.close(); + } + + /** + * Test no log file + */ + @Test + public void testNoFile() + throws DatabaseException { + + /* Make a log file with a valid header, but no data. */ + INFileReader reader = new INFileReader + (envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN, false, + DbLsn.NULL_LSN, DbLsn.NULL_LSN, null); + reader.addTargetType(LogEntryType.LOG_IN); + reader.addTargetType(LogEntryType.LOG_BIN); + reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO); + + int count = 0; + while (reader.readNextEntry()) { + count += 1; + } + assertEquals("Empty file should not have entries", 0, count); + } + + /** + * Run with an empty file + */ + @Test + public void testEmpty() + throws IOException, DatabaseException { + + /* Make a log file with a valid header, but no data. */ + FileManager fileManager = envImpl.getFileManager(); + FileManagerTestUtils.bumpLsn(fileManager, 1000000); + FileManagerTestUtils.createLogFile(fileManager, envImpl, 10000); + fileManager.clear(); + + INFileReader reader = new INFileReader + (envImpl, 1000, DbLsn.NULL_LSN, DbLsn.NULL_LSN, false, + DbLsn.NULL_LSN, DbLsn.NULL_LSN, null); + reader.addTargetType(LogEntryType.LOG_IN); + reader.addTargetType(LogEntryType.LOG_BIN); + reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO); + + int count = 0; + while (reader.readNextEntry()) { + count += 1; + } + assertEquals("Empty file should not have entries", 0, count); + } + + /** + * Run with defaults, read whole log + */ + @Test + public void testBasic() + throws IOException, DatabaseException { + + DbConfigManager cm = envImpl.getConfigManager(); + doTest(50, + cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE), + 0, + false); + } + + /** + * Run with very small buffers and track IDs + */ + @Test + public void testTracking() + throws IOException, DatabaseException { + + doTest(50, // num iterations + 10, // tiny buffer + 0, // start lsn index + true); // track ids + } + + /** + * Start in the middle of the file + */ + @Test + public void testMiddleStart() + throws IOException, DatabaseException { + + doTest(50, 100, 40, true); + } + + private void doTest(int numIters, + int bufferSize, + int startLsnIndex, + boolean trackIds) + throws IOException, DatabaseException { + + /* Fill up a fake log file. */ + createLogFile(numIters); + + /* Decide where to start. */ + long startLsn = DbLsn.NULL_LSN; + int checkIndex = 0; + if (startLsnIndex >= 0) { + startLsn = checkList.get(startLsnIndex).lsn; + checkIndex = startLsnIndex; + } + + /* Use an empty utilization map for testing tracking. */ + RecoveryUtilizationTracker tracker = trackIds ? + (new RecoveryUtilizationTracker(envImpl)) : null; + + INFileReader reader = + new INFileReader(envImpl, bufferSize, startLsn, DbLsn.NULL_LSN, + trackIds, DbLsn.NULL_LSN, + DbLsn.NULL_LSN, tracker); + reader.addTargetType(LogEntryType.LOG_IN); + reader.addTargetType(LogEntryType.LOG_BIN); + reader.addTargetType(LogEntryType.LOG_BIN_DELTA); + reader.addTargetType(LogEntryType.LOG_IN_DELETE_INFO); + + /* Read. */ + checkLogFile(reader, checkIndex, trackIds); + } + + /** + * Write a logfile of entries, then read the end + */ + private void createLogFile(int numIters) + throws IOException, DatabaseException { + + /* + * Create a log file full of INs, BIN-deltas and Debug Records + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + LogManager logManager = envImpl.getLogManager(); + maxNodeId = 0; + + checkList = new ArrayList(); + + for (int i = 0; i < numIters; i++) { + /* Add a debug record. */ + Trace rec = new Trace("Hello there, rec " + (i + 1)); + rec.trace(envImpl, rec); + + /* Create, log, and save an IN. */ + byte[] data = new byte[i + 1]; + Arrays.fill(data, (byte) (i + 1)); + + byte[] key = new byte[i + 1]; + Arrays.fill(key, (byte) (i + 1)); + + IN in = new IN(DbInternal.getDbImpl(db), key, 5, 10); + in.latch(CacheMode.UNCHANGED); + long lsn = in.log(); + checkList.add(new CheckInfo(lsn, in)); + + if (DEBUG) { + System.out.println("LSN " + i + " = " + lsn); + System.out.println("IN " + i + " = " + in.getNodeId()); + } + + /* Add other types of INs. */ + BIN bin = new BIN(DbInternal.getDbImpl(db), key, 2, 1); + bin.latch(CacheMode.UNCHANGED); + lsn = bin.log(); + checkList.add(new CheckInfo(lsn, bin)); + + /* Add provisional entries, which should get ignored. */ + bin.log( + false /*allowDeltas*/, true /*isProvisional*/, + false /*backgroundIO*/, in); + + bin.releaseLatch(); + + /* Add a LN, to stress the node tracking. */ + LN ln = LN.makeLN(envImpl, data); + ln.log( + envImpl, DbInternal.getDbImpl(db), + null /*locker*/, null /*writeLockInfo*/, + false /*newEmbeddedLN*/, key, + 0 /*newExpiration*/, false /*newExpirationInHours*/, + false /*currEmbeddedLN*/, DbLsn.NULL_LSN /*currLsn*/, + 0 /*currSize*/, true /*isInsertion*/, + false, ReplicationContext.NO_REPLICATE); + + /* + * Add an BIN-delta. Generate it by making the first, full version + * provisional so the test doesn't pick it up, and then log a + * delta. + */ + BIN binDeltaBin = + new BIN(DbInternal.getDbImpl(db), key, 10, 1); + maxNodeId = binDeltaBin.getNodeId(); + binDeltaBin.latch(); + + assertTrue(binDeltaBin.insertEntry(null, key, DbLsn.makeLsn(0, 0))); + + binDeltaBin.log( + false /*allowDeltas*/, true /*isProvisional*/, + false /*backgroundIO*/, in); + + /* Modify the bin with one entry so there can be a delta. */ + + byte[] keyBuf2 = new byte[2]; + Arrays.fill(keyBuf2, (byte) (i + 2)); + + assertTrue(binDeltaBin.insertEntry( + null, keyBuf2, DbLsn.makeLsn(100, 101))); + + binDeltaBin.log( + true /*allowDeltas*/, false /*isProvisional*/, + false /*backgroundIO*/, in); + lsn = binDeltaBin.getLastLoggedLsn(); + if (DEBUG) { + System.out.println("delta =" + binDeltaBin.getNodeId() + + " at LSN " + lsn); + } + checkList.add(new CheckInfo(lsn, binDeltaBin)); + + binDeltaBin.releaseLatch(); + + in.releaseLatch(); + } + + /* Flush the log, files. */ + logManager.flushSync(); + envImpl.getFileManager().clear(); + } + + private void checkLogFile(INFileReader reader, + int checkIndex, + boolean checkMaxNodeId) + throws DatabaseException { + + try { + /* Read all the INs. */ + int i = checkIndex; + + while (reader.readNextEntry()) { + if (DEBUG) { + System.out.println("i = " + + i + + " reader.isDeleteInfo=" + + reader.isDeleteInfo() + + " LSN = " + + reader.getLastLsn()); + } + + CheckInfo check = checkList.get(i); + + /* + * When comparing the check data against the data from the + * log, make the dirty bits match so that they compare + * equal. + */ + IN inFromLog = reader.getIN(DbInternal.getDbImpl(db)); + + inFromLog.setDatabase(DbInternal.getDbImpl(db)); + + inFromLog.latch(CacheMode.UNCHANGED); + + if (inFromLog.isBINDelta()) { + inFromLog.mutateToFullBIN(false /*leaveFreeSlot*/); + } + + inFromLog.setDirty(true); + inFromLog.releaseLatch(); + + IN testIN = check.in; + testIN.latch(CacheMode.UNCHANGED); + testIN.setDirty(true); + testIN.releaseLatch(); + + /* + * Only check the INs we created in the test. (The others + * are from the map db. + */ + if (reader.getDatabaseId(). + equals(DbInternal.getDbImpl(db).getId())) { + // The IN should match + String inFromLogString = inFromLog.toString(); + String testINString = testIN.toString(); + if (DEBUG) { + System.out.println("testIN=" + testINString); + System.out.println("inFromLog=" + inFromLogString); + } + + assertEquals("IN " + + inFromLog.getNodeId() + + " at index " + + i + + " should match.\nTestIN=" + + testIN + + "\nLogIN=" + + inFromLog, + testINString, + inFromLogString); + } + + /* The LSN should match. */ + assertEquals + ("LSN " + i + " should match", + check.lsn, + reader.getLastLsn()); + + i++; + } + assertEquals(i, checkList.size()); + if (checkMaxNodeId) { + assertEquals(maxNodeId, reader.getMaxNodeId()); + } + } finally { + db.close(); + } + } + + private class CheckInfo { + long lsn; + IN in; + + CheckInfo(long lsn, IN in) { + this.lsn = lsn; + this.in = in; + } + } +} diff --git a/test/com/sleepycat/je/log/IOExceptionTest.java b/test/com/sleepycat/je/log/IOExceptionTest.java new file mode 100644 index 0000000..f03150a --- /dev/null +++ b/test/com/sleepycat/je/log/IOExceptionTest.java @@ -0,0 +1,299 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.nio.ByteBuffer; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class IOExceptionTest extends TestBase { + + private Environment env; + private Database db; + private final File envHome; + + public IOExceptionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws DatabaseException { + + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } + + @Test + public void testLogBufferOverflowAbortNoDupes() { + doLogBufferOverflowTest(false, false); + } + + @Test + public void testLogBufferOverflowCommitNoDupes() { + doLogBufferOverflowTest(true, false); + } + + @Test + public void testLogBufferOverflowAbortDupes() { + doLogBufferOverflowTest(false, true); + } + + @Test + public void testLogBufferOverflowCommitDupes() { + doLogBufferOverflowTest(true, true); + } + + private void doLogBufferOverflowTest(boolean abort, boolean dupes) { + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(100000); + env = new Environment(envHome, envConfig); + + String databaseName = "ioexceptiondb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, databaseName, dbConfig); + + Transaction txn = env.beginTransaction(null, null); + DatabaseEntry oneKey = + (dupes ? + new DatabaseEntry(StringUtils.toUTF8("2")) : + new DatabaseEntry(StringUtils.toUTF8("1"))); + DatabaseEntry oneData = + new DatabaseEntry(new byte[10]); + DatabaseEntry twoKey = + new DatabaseEntry(StringUtils.toUTF8("2")); + DatabaseEntry twoData = + new DatabaseEntry(new byte[100000]); + if (dupes) { + DatabaseEntry temp = oneKey; + oneKey = oneData; + oneData = temp; + temp = twoKey; + twoKey = twoData; + twoData = temp; + } + + try { + assertTrue(db.put(txn, oneKey, oneData) == + OperationStatus.SUCCESS); + db.put(txn, twoKey, twoData); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + /* Read back the data and make sure it all looks ok. */ + try { + assertTrue(db.get(txn, oneKey, oneData, null) == + OperationStatus.SUCCESS); + assertTrue(oneData.getData().length == (dupes ? 1 : 10)); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + try { + assertTrue(db.get(txn, twoKey, twoData, null) == + OperationStatus.SUCCESS); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + try { + if (abort) { + txn.abort(); + } else { + txn.commit(); + } + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + /* Read back the data and make sure it all looks ok. */ + try { + assertTrue(db.get(null, oneKey, oneData, null) == + (abort ? + OperationStatus.NOTFOUND : + OperationStatus.SUCCESS)); + assertTrue(oneData.getData().length == (dupes ? 1 : 10)); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + try { + assertTrue(db.get(null, twoKey, twoData, null) == + (abort ? + OperationStatus.NOTFOUND : + OperationStatus.SUCCESS)); + } catch (DatabaseException DE) { + fail("unexpected DatabaseException"); + } + + } catch (Exception E) { + E.printStackTrace(); + } + } + + @Test + public void testIOExceptionDuringFileFlippingWrite() { + doIOExceptionDuringFileFlippingWrite(8, 33, 2); + } + + private void doIOExceptionDuringFileFlippingWrite(int numIterations, + int exceptionStartWrite, + int exceptionWriteCount) { + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam("je.log.fileMax", "1000"); + envConfig.setConfigParam("je.log.bufferSize", "1025"); + envConfig.setConfigParam("je.env.runCheckpointer", "false"); + envConfig.setConfigParam("je.env.runCleaner", "false"); + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + + /* + * Put one record into the database so it gets populated w/INs and + * LNs, and we can fake out the RMW commits used below. + */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(5, key); + IntegerBinding.intToEntry(5, data); + db.put(null, key, data); + + /* + * Now generate trace and commit log entries. The trace records + * aren't forced out, but the commit records are forced. + */ + FileManager.WRITE_COUNT = 0; + FileManager.THROW_ON_WRITE = true; + FileManager.STOP_ON_WRITE_COUNT = exceptionStartWrite; + FileManager.N_BAD_WRITES = exceptionWriteCount; + for (int i = 0; i < numIterations; i++) { + + try { + /* Generate a non-forced record. */ + if (i == (numIterations - 1)) { + + /* + * On the last iteration, write a record that is large + * enough to force a file flip (i.e. an fsync which + * succeeds) followed by the large write (which doesn't + * succeed due to an IOException). In [#15754] the + * large write fails on Out Of Disk Space, rolling back + * the savedLSN to the previous file, even though the + * file has flipped. The subsequent write ends up in + * the flipped file, but at the offset of the older + * file (leaving a hole in the new flipped file). + */ + Trace.trace(envImpl, + i + "/" + FileManager.WRITE_COUNT + + " " + new String(new byte[2000])); + } else { + Trace.trace(envImpl, + i + "/" + FileManager.WRITE_COUNT + + " " + "xx"); + } + } catch (IllegalStateException ISE) { + /* Eat exception thrown by TraceLogHandler. */ + } + + /* + * Generate a forced record by calling commit. Since RMW + * transactions that didn't actually do a write won't log a + * commit record, do an addLogInfo to trick the txn into + * logging a commit. + */ + Transaction txn = env.beginTransaction(null, null); + db.get(txn, key, data, LockMode.RMW); + DbInternal.getTxn(txn).addLogInfo(DbLsn.makeLsn(3, 3)); + txn.commit(); + } + db.close(); + + /* + * Verify that the log files are ok and have no checksum errors. + */ + FileReader reader = + new FileReader(DbInternal.getNonNullEnvImpl(env), + 4096, true, 0, null, DbLsn.NULL_LSN, + DbLsn.NULL_LSN) { + @Override + protected boolean processEntry(ByteBuffer entryBuffer) { + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } + }; + + DbInternal.getNonNullEnvImpl(env).getLogManager().flushSync(); + + while (reader.readNextEntry()) { + } + + /* Make sure the reader really did scan the files. */ + assert (DbLsn.getFileNumber(reader.getLastLsn()) == 3) : + DbLsn.toString(reader.getLastLsn()); + + env.close(); + env = null; + db = null; + } catch (Throwable T) { + T.printStackTrace(); + } finally { + FileManager.STOP_ON_WRITE_COUNT = Long.MAX_VALUE; + FileManager.N_BAD_WRITES = Long.MAX_VALUE; + } + } +} diff --git a/test/com/sleepycat/je/log/InvisibleTest.java b/test/com/sleepycat/je/log/InvisibleTest.java new file mode 100644 index 0000000..aa40d7c --- /dev/null +++ b/test/com/sleepycat/je/log/InvisibleTest.java @@ -0,0 +1,349 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.util.DbVerifyLog; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Test that we can set invisible bits in the log, and that we can ignore them + * appropriately. Excluded from dual mode testing, because we are manipulating + * the log files explicitly. + */ +public class InvisibleTest extends TestBase { + + private static boolean verbose = Boolean.getBoolean("verbose"); + private final File envHome; + + public InvisibleTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * @throws FileNotFoundException + */ + @Test + public void testBasic() + throws FileNotFoundException { + + final String filler = "--------------------------------------------"; + + Environment env = setupEnvironment(); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + List allData = new ArrayList(); + Map> invisibleEntriesByFile = + new HashMap>(); + try { + LogManager logManager = envImpl.getLogManager(); + + /* + * Setup test data. Insert a number of records, then make all the + * even entries invisible. + */ + long currentFile = -1; + List invisibleLsns = null; + for (int i = 0; i < 50; i++) { + Trace t = new Trace("debug " + filler + i); + long lsn = t.trace(envImpl, t); + boolean isInvisible = true; + if ((i % 2) == 0) { + if (currentFile != DbLsn.getFileNumber(lsn)) { + currentFile = DbLsn.getFileNumber(lsn); + invisibleLsns = new ArrayList(); + invisibleEntriesByFile.put(currentFile, invisibleLsns); + } + invisibleLsns.add(lsn); + } else { + isInvisible = false; + } + allData.add(new TestInfo(lsn, t, isInvisible)); + } + + /* + * We want to run this test on multiple files, so make sure that + * the invisible entries occupy at least three distinct files. + */ + assertTrue("size=" + invisibleEntriesByFile.size(), + invisibleEntriesByFile.size() > 3); + + if (verbose) { + for (TestInfo info : allData) { + System.out.println(info); + } + System.out.println("------------------------"); + } + + /* + * Invisibility marking only works on log entries that are + * on disk. Flush everything to disk. + */ + logManager.flushSync(); + + /* Make the specified set of entries invisible */ + makeInvisible(envImpl, invisibleEntriesByFile); + + /* Check that only the right tracer log entries are visible */ + scanOnlyVisible(envImpl, allData); + + /* Check that we can read both visible and invisible log entries. */ + scanAll(envImpl, allData, false /* onlyReadVisible */); + + /* + * Check that we can fetch invisible log entries through the log + * manager. This kind of fetch is done when executing rollbacks and + * constructing the txn chain. + * We want the fetch to go disk and not to fetch from the log + * buffers, so write some more stuff to the log so that all + * invisible entries are flushed out of the log buffers. + */ + for (int i = 0; i < 50; i++) { + Trace t = new Trace("debug " + filler + i); + t.trace(envImpl, t); + } + + fetchInvisible(envImpl, allData); + + try { + DbVerifyLog verifier = new DbVerifyLog(env); + verifier.verifyAll(); + } catch (Exception e) { + e.printStackTrace(); + fail("Don't expect exceptions here."); + } + } finally { + env.close(); + env = null; + } + } + + private Environment setupEnvironment() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + /* + * Turn off cleaning so that it doesn't interfere with + * what is in the log. + */ + DbInternal.disableParameterValidation(envConfig); + + /* + * Use uniformly small log files, to make the invisible bit cross + * files. + */ + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000"); + envConfig.setConfigParam("je.env.runCleaner", "false"); + + /* + * When we test fetching of invisible log entries, we want to make sure + * we fetch from the file, and not from the un-modified, non-invisible + * entry in the log buffer. Because of that, make log buffers small. + */ + envConfig.setConfigParam("je.log.bufferSize", "1024"); + + return new Environment(envHome, envConfig); + } + + private void makeInvisible(EnvironmentImpl envImpl, + Map> invisibleEntries) { + + FileManager fileManager = envImpl.getFileManager(); + for (Map.Entry> entry : invisibleEntries.entrySet()) { + fileManager.makeInvisible(entry.getKey(), entry.getValue()); + } + + fileManager.force(invisibleEntries.keySet()); + } + + private void scanOnlyVisible(EnvironmentImpl envImpl, + List allData) { + scanAll(envImpl, allData, true /* onlyReadVisible */); + } + + private void scanAll(EnvironmentImpl envImpl, + List allData, + boolean onlyReadVisible) { + + /* + * Make a list of expected Trace entries. If we are skipping invisible + * entries, only include the visible ones. + */ + List expectedData = new ArrayList(); + for (TestInfo info : allData) { + if (info.isInvisible) { + if (!onlyReadVisible) { + expectedData.add(info.trace); + } + } else { + expectedData.add(info.trace); + } + } + + TestReader reader = new TestReader(envImpl, onlyReadVisible); + int i = (onlyReadVisible)? 1 : 0; + while (reader.readNextEntry()) { + assertEquals(allData.get(i).isInvisible, + reader.isInvisible()); + i += (onlyReadVisible) ? 2 : 1; + } + assertEquals(expectedData, reader.getTraces()); + } + + /** + * Check that invisible log entries throw a checksum exception by default, + * and that they can be read only through the special log manager method + * that expects and fixes invisibility. + * @throws FileNotFoundException + */ + private void fetchInvisible(EnvironmentImpl envImpl, + List allData) + throws FileNotFoundException { + + LogManager logManager = envImpl.getLogManager(); + for (TestInfo info : allData) { + + if (!info.isInvisible) { + continue; + } + + SingleItemEntry okEntry = (SingleItemEntry) + logManager.getLogEntryAllowInvisible(info.lsn).getEntry(); + assertEquals(info.trace, okEntry.getMainItem()); + + try { + logManager.getLogEntry(info.lsn); + fail("Should have thrown exception for " + info); + } catch(EnvironmentFailureException expected) { + assertEquals(EnvironmentFailureReason.LOG_INTEGRITY, + expected.getReason()); + } + } + } + + /* + * Struct to package together test information. + */ + private static class TestInfo { + final long lsn; + final Trace trace; + final boolean isInvisible; + + TestInfo(long lsn, Trace trace, boolean isInvisible) { + this.lsn = lsn; + this.trace = trace; + this.isInvisible = isInvisible; + } + + @Override + public String toString() { + return DbLsn.getNoFormatString(lsn) + + (isInvisible ? " INVISIBLE " : " visible ") + trace; + } + } + + /* + * A FileReader that can read visible or invisible entries, upon command. + */ + private static class TestReader extends FileReader { + + private final boolean readVisible; + private final LogEntry entry; + + private List tracers; + private Trace currentTrace; + + public TestReader(EnvironmentImpl envImpl, boolean readVisible) { + + super(envImpl, + 1024 /* readBufferSize*/, + true /* forward */, + 0L, + null /* singleFileNumber */, + DbLsn.NULL_LSN /* endOfFileLsn */, + DbLsn.NULL_LSN /* finishLsn */); + this.readVisible = readVisible; + this.entry = LogEntryType.LOG_TRACE.getSharedLogEntry(); + tracers = new ArrayList(); + } + + /** + * @return true if this reader should process this entry, or just + * skip over it. + * @throws DatabaseException from subclasses. + */ + @Override + protected boolean isTargetEntry() + throws DatabaseException { + + if (readVisible && currentEntryHeader.isInvisible()) { + return false; + } + + if (currentEntryHeader.getType() == + LogEntryType.LOG_TRACE.getTypeNum()) { + return true; + } + + return false; + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + currentTrace = (Trace) entry.getMainItem(); + tracers.add(currentTrace); + return true; + } + + public List getTraces() { + return tracers; + } + + boolean isInvisible() { + return currentEntryHeader.isInvisible(); + } + + Trace getCurrentTrace() { + return currentTrace; + } + } +} diff --git a/test/com/sleepycat/je/log/LNFileReaderTest.java b/test/com/sleepycat/je/log/LNFileReaderTest.java new file mode 100644 index 0000000..151dda0 --- /dev/null +++ b/test/com/sleepycat/je/log/LNFileReaderTest.java @@ -0,0 +1,515 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.WriteLockInfo; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test the LNFileReader + */ +public class LNFileReaderTest extends TestBase { + static private final boolean DEBUG = false; + + private final File envHome; + private Environment env; + private EnvironmentImpl envImpl; + private Database db; + private List checkList; + + public LNFileReaderTest() { + super(); + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws DatabaseException { + + /* + * Note that we use the official Environment class to make the + * environment, so that everything is set up, but we then go a backdoor + * route to get to the underlying EnvironmentImpl class so that we + * don't require that the Environment.getDbEnvironment method be + * unnecessarily public. + */ + TestUtils.removeLogFiles("Setup", envHome, false); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "1024"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + envImpl = DbInternal.getNonNullEnvImpl(env); + } + + @After + public void tearDown() + throws DatabaseException { + + envImpl = null; + env.close(); + } + + /** + * Test no log file + */ + @Test + public void testNoFile() + throws DatabaseException { + + /* Make a log file with a valid header, but no data. */ + LNFileReader reader = + new LNFileReader(envImpl, + 1000, // read buffer size + DbLsn.NULL_LSN, // start lsn + true, // redo + DbLsn.NULL_LSN, // end of file lsn + DbLsn.NULL_LSN, // finish lsn + null, // single file + DbLsn.NULL_LSN); // ckpt end lsn + addUserLNTargetTypes(reader); + assertFalse("Empty file should not have entries", + reader.readNextEntry()); + } + + private void addUserLNTargetTypes(LNFileReader reader) { + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isUserLNType() && entryType.isTransactional()) { + reader.addTargetType(entryType); + } + } + } + + /** + * Run with an empty file. + */ + @Test + public void testEmpty() + throws IOException, DatabaseException { + + /* Make a log file with a valid header, but no data. */ + FileManager fileManager = envImpl.getFileManager(); + FileManagerTestUtils.createLogFile(fileManager, envImpl, 1000); + fileManager.clear(); + + LNFileReader reader = + new LNFileReader(envImpl, + 1000, // read buffer size + DbLsn.NULL_LSN, // start lsn + true, // redo + DbLsn.NULL_LSN, // end of file lsn + DbLsn.NULL_LSN, // finish lsn + null, // single file + DbLsn.NULL_LSN); // ckpt end lsn + addUserLNTargetTypes(reader); + assertFalse("Empty file should not have entries", + reader.readNextEntry()); + } + + /** + * Run with defaults, read whole log for redo, going forwards. + */ + @Test + public void testBasicRedo() + throws Throwable { + + try { + DbConfigManager cm = envImpl.getConfigManager(); + doTest(50, + cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE), + 0, + false, + true); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Run with defaults, read whole log for undo, going backwards. + */ + @Test + public void testBasicUndo() + throws Throwable { + + try { + DbConfigManager cm = envImpl.getConfigManager(); + doTest(50, + cm.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE), + 0, + false, + false); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Run with very small read buffer for redo, and track LNs. + */ + @Test + public void testSmallBuffersRedo() + throws IOException, DatabaseException { + + doTest(50, 10, 0, true, true); + } + + /** + * Run with very small read buffer for undo and track LNs. + */ + @Test + public void testSmallBuffersUndo() + throws IOException, DatabaseException { + + doTest(50, 10, 0, true, false); + } + + /** + * Run with medium buffers for redo. + */ + @Test + public void testMedBuffersRedo() + throws IOException, DatabaseException { + + doTest(50, 100, 0, false, true); + } + + /** + * Run with medium buffers for undo. + */ + @Test + public void testMedBuffersUndo() + throws IOException, DatabaseException { + + doTest(50, 100, 0, false, false); + } + + /** + * Start in the middle of the file for redo. + */ + @Test + public void testMiddleStartRedo() + throws IOException, DatabaseException { + + doTest(50, 100, 20, true, true); + } + + /** + * Start in the middle of the file for undo. + */ + @Test + public void testMiddleStartUndo() + throws IOException, DatabaseException { + + doTest(50, 100, 20, true, false); + } + + /** + * Create a log file, create the reader, read the log file + * @param numIters each iteration makes 3 log entries (debug record, ln + * and mapLN + * @param bufferSize to pass to reader + * @param checkIndex where in the test data to start + * @param trackLNs true if we're tracking LNS, false if we're tracking + * mapLNs + */ + private void doTest(int numIters, + int bufferSize, + int checkIndex, + boolean trackLNs, + boolean redo) + throws IOException, DatabaseException { + + checkList = new ArrayList(); + + /* Fill up a fake log file. */ + long endOfFileLsn = createLogFile(numIters, trackLNs, redo); + + if (DEBUG) { + System.out.println("eofLsn = " + endOfFileLsn); + } + + /* Decide where to start. */ + long startLsn = DbLsn.NULL_LSN; + long finishLsn = DbLsn.NULL_LSN; + if (redo) { + startLsn = checkList.get(checkIndex).lsn; + } else { + /* Going backwards. Start at last check entry. */ + int lastEntryIdx = checkList.size() - 1; + startLsn = checkList.get(lastEntryIdx).lsn; + finishLsn = checkList.get(checkIndex).lsn; + } + + LNFileReader reader = + new LNFileReader(envImpl, bufferSize, startLsn, redo, endOfFileLsn, + finishLsn, null, DbLsn.NULL_LSN); + if (trackLNs) { + addUserLNTargetTypes(reader); + } else { + reader.addTargetType(LogEntryType.LOG_MAPLN); + } + + if (!redo) { + reader.addTargetType(LogEntryType.LOG_TXN_COMMIT); + } + + /* read. */ + checkLogFile(reader, checkIndex, redo); + } + + /** + * Write a logfile of entries, put the entries that we expect to + * read into a list for later verification. + * @return end of file LSN. + */ + private long createLogFile(int numIters, boolean trackLNs, boolean redo) + throws IOException, DatabaseException { + + /* + * Create a log file full of LNs, DeletedDupLNs, MapLNs and Debug + * Records + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + LogManager logManager = envImpl.getLogManager(); + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + DatabaseImpl mapDbImpl = envImpl.getDbTree().getDb(DbTree.ID_DB_ID); + + long lsn; + Txn userTxn = Txn.createLocalTxn(envImpl, new TransactionConfig()); + long txnId = userTxn.getId(); + + for (int i = 0; i < numIters; i++) { + /* Add a debug record just to be filler. */ + Trace rec = new Trace("Hello there, rec " + (i+1)); + rec.trace(envImpl, rec); + + /* Make a transactional LN, we expect it to be there. */ + byte[] data = new byte[i+1]; + Arrays.fill(data, (byte)(i+1)); + LN ln = LN.makeLN(envImpl, data); + byte[] key = new byte[i+1]; + Arrays.fill(key, (byte)(i+10)); + + /* + * Log an LN. If we're tracking LNs add it to the verification + * list. + */ + lsn = ln.log( + envImpl, dbImpl, + userTxn, new WriteLockInfo(), + false, key, + 0 /*newExpiration*/, false /*newExpirationInHours*/, + false, DbLsn.NULL_LSN, 0, + true /*isInsertion*/, + false, ReplicationContext.NO_REPLICATE).lsn; + + if (trackLNs) { + checkList.add(new CheckInfo(lsn, ln, key, txnId)); + } + /* + * Make a non-transactional LN. Shouldn't get picked up by reader. + */ + data = Arrays.copyOf(data, data.length); + LN nonTxnalLN = LN.makeLN(envImpl, data); + nonTxnalLN.log( + envImpl, dbImpl, null, null, + false, key, + 0 /*newExpiration*/, false /*newExpirationInHours*/, + false, DbLsn.NULL_LSN, 0, + true /*isInsertion*/, + false, ReplicationContext.NO_REPLICATE); + + /* Add a MapLN. */ + MapLN mapLN = new MapLN(dbImpl); + + lsn = mapLN.log( + envImpl, mapDbImpl, null, null, + false, key, + 0 /*newExpiration*/, false /*newExpirationInHours*/, + false, DbLsn.NULL_LSN, 0, + true /*isInsertion*/, + false, ReplicationContext.NO_REPLICATE).lsn; + + if (!trackLNs) { + checkList.add(new CheckInfo(lsn, mapLN, key, 0)); + } + } + + long commitLsn = userTxn.commit(Durability.COMMIT_SYNC); + + /* We only expect checkpoint entries to be read in redo passes. */ + if (!redo) { + checkList.add(new CheckInfo(commitLsn, null, null, txnId)); + } + + /* Make a marker log entry to pose as the end of file. */ + Trace rec = new Trace("Pretend this is off the file"); + long lastLsn = rec.trace(envImpl, rec); + db.close(); + logManager.flushSync(); + envImpl.getFileManager().clear(); + return lastLsn; + } + + private void checkLogFile(LNFileReader reader, + int checkIndex, + boolean redo) + throws DatabaseException { + + LN lnFromLog; + byte[] keyFromLog; + + /* Read all the LNs. */ + int i; + if (redo) { + /* start where indicated. */ + i = checkIndex; + } else { + /* start at the end. */ + i = checkList.size() - 1; + } + + while (reader.readNextEntry()) { + + CheckInfo expected = checkList.get(i); + + /* Check LSN. */ + assertEquals("LSN " + i + " expected " + + DbLsn.getNoFormatString(expected.lsn) + + " but read " + + DbLsn.getNoFormatString(reader.getLastLsn()), + expected.lsn, + reader.getLastLsn()); + + if (reader.isLN()) { + + /* Check the LN. */ + LNLogEntry lnEntry = reader.getLNLogEntry(); + lnEntry.postFetchInit(false /*isDupDb*/); + lnFromLog = lnEntry.getLN(); + LN expectedLN = expected.ln; + assertEquals("Should be the same type of object", + expectedLN.getClass(), + lnFromLog.getClass()); + + if (DEBUG) { + if (!expectedLN.toString().equals(lnFromLog.toString())) { + System.out.println("expected = " + + expectedLN.toString()+ + "lnFromLog = " + + lnFromLog.toString()); + } + } + + /* + * Don't expect MapLNs to be equal, since they change as + * logging occurs and utilization info changes. + */ + if (!(expectedLN instanceof MapLN)) { + assertEquals("LN " + i + " should match", + expectedLN.toString(), + lnFromLog.toString()); + } + + /* Check the key. */ + keyFromLog = lnEntry.getKey(); + byte[] expectedKey = expected.key; + if (DEBUG) { + if (!Arrays.equals(expectedKey, keyFromLog)) { + System.out.println("expectedKey=" + expectedKey + + " logKey=" + keyFromLog); + } + } + + assertTrue("Key " + i + " should match", + Arrays.equals(expectedKey, keyFromLog)); + + if (expected.txnId != 0) { + assertEquals(expected.txnId, + reader.getTxnId().longValue()); + } + + } else { + /* Should be a txn commit record. */ + assertEquals(expected.txnId, + reader.getTxnCommitId()); + } + + if (redo) { + i++; + } else { + i--; + } + } + int expectedCount = checkList.size() - checkIndex; + assertEquals(expectedCount, reader.getNumRead()); + } + + private class CheckInfo { + long lsn; + LN ln; + byte[] key; + long txnId; + + CheckInfo(long lsn, LN ln, byte[] key, long txnId) { + this.lsn = lsn; + this.ln = ln; + this.key = key; + this.txnId = txnId; + } + } +} diff --git a/test/com/sleepycat/je/log/LastFileReaderTest.java b/test/com/sleepycat/je/log/LastFileReaderTest.java new file mode 100644 index 0000000..1a70eaa --- /dev/null +++ b/test/com/sleepycat/je/log/LastFileReaderTest.java @@ -0,0 +1,601 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.AbortLogEntry; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +public class LastFileReaderTest extends TestBase { + + private DbConfigManager configManager; + private FileManager fileManager; + private LogManager logManager; + private final File envHome; + private Environment env; + + public LastFileReaderTest() { + super(); + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @After + public void tearDown() { + + /* + * Pass false to skip checkpoint, since the file manager may hold + * an open file that we've trashed in the tests, so we don't want to + * write to it here. + */ + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.close(false); + } catch (DatabaseException e) { + } + + } + + /* Create an environment, using the default log file size. */ + private void initEnv() + throws Exception { + + initEnv(null); + } + + /* Create an environment, specifying the log file size. */ + private void initEnv(String logFileSize) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + /* Don't run daemons; we do some abrupt shutdowns. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + if (logFileSize != null) { + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), logFileSize); + } + + /* Disable noisy cleaner database usage. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCheckpointUP(envConfig, false); + + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + configManager = envImpl.getConfigManager(); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + } + + /** + * Run with an empty file that has a file header but no log entries. + */ + @Test + public void testEmptyAtEnd() + throws Throwable { + + initEnv(); + + /* + * Make a log file with a valid header, but no data. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, 100); + fileManager.clear(); + + LastFileReader reader = new LastFileReader(envImpl, 1000); + assertTrue(reader.readNextEntry()); + assertEquals(0, DbLsn.getFileOffset(reader.getLastLsn())); + } + + /** + * Run with an empty, 0 length file at the end. This has caused a + * BufferUnderflowException. [#SR 12631] + */ + @Test + public void testLastFileEmpty() + throws Throwable { + + initEnv("1000"); + int numIters = 10; + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + + /* + * Create a log with one or more files. Use only Trace objects so we + * can iterate through the entire log ... ? + */ + for (int i = 0; i < numIters; i++) { + /* Add a debug record. */ + Trace msg = new Trace("Hello there, rec " + (i+1)); + testObjs.add(msg); + testLsns.add(new Long(Trace.trace(envImpl, msg))); + } + /* Flush the log, files. */ + logManager.flushSync(); + fileManager.clear(); + + int lastFileNum = fileManager.getAllFileNumbers().length - 1; + + /* + * Create an extra, totally empty file. + */ + fileManager.syncLogEnd(); + fileManager.clear(); + String emptyLastFile = + fileManager.getFullFileName(lastFileNum+1, FileManager.JE_SUFFIX); + + RandomAccessFile file = + new RandomAccessFile(emptyLastFile, FileManager.FileMode. + READWRITE_MODE.getModeValue()); + file.close(); + + assertTrue(fileManager.getAllFileNumbers().length >= 2); + + /* + * Try a LastFileReader. It should give us a end-of-log position in the + * penultimate file. + */ + LastFileReader reader = new LastFileReader(envImpl, 1000); + while (reader.readNextEntry()) { + } + + /* + * The reader should be positioned at the last, valid file, skipping + * this 0 length file. + */ + assertEquals("lastValid=" + DbLsn.toString(reader.getLastValidLsn()), + lastFileNum, + DbLsn.getFileNumber(reader.getLastValidLsn())); + assertEquals(lastFileNum, DbLsn.getFileNumber(reader.getEndOfLog())); + } + + /** + * Corrupt the file headers of the one and only log file. + */ + @Test + public void testBadFileHeader() + throws Throwable { + + initEnv(); + + /* + * Handle a log file that has data and a bad header. First corrupt the + * existing log file. We will not be able to establish log end, but + * won't throw away the file because it has data. + */ + long lastFileNum = fileManager.getLastFileNum().longValue(); + String lastFile = + fileManager.getFullFileName(lastFileNum, + FileManager.JE_SUFFIX); + + RandomAccessFile file = + new RandomAccessFile(lastFile, FileManager.FileMode. + READWRITE_MODE.getModeValue()); + + file.seek(15); + file.writeBytes("putting more junk in, mess up header"); + file.close(); + + /* + * We should see an exception on this one, because we made a file that + * looks like it has a bad header and bad data. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + try { + LastFileReader reader = new LastFileReader(envImpl, 1000); + fail("Should see exception when creating " + reader); + } catch (EnvironmentFailureException e) { + assertSame(EnvironmentFailureReason. + LOG_CHECKSUM, e.getReason()); + /* Eat exception, expected. */ + } + + /* + * Now make a bad file header, but one that is less than the size of a + * file header. This file ought to get moved aside. + */ + file = new RandomAccessFile(lastFile, "rw"); + file.getChannel().truncate(0); + file.writeBytes("bad"); + file.close(); + + LastFileReader reader = new LastFileReader(envImpl, 1000); + /* Nothing comes back from reader. */ + assertFalse(reader.readNextEntry()); + File movedFile = new File(envHome, "00000000.bad"); + assertTrue(movedFile.exists()); + + /* Try a few more times, we ought to keep moving the file. */ + file = new RandomAccessFile(lastFile, "rw"); + file.getChannel().truncate(0); + file.writeBytes("bad"); + file.close(); + + reader = new LastFileReader(envImpl, 1000); + assertTrue(movedFile.exists()); + File movedFile1 = new File(envHome, "00000000.bad.1"); + assertTrue(movedFile1.exists()); + } + + /** + * Run with defaults. + */ + @Test + public void testBasic() + throws Throwable { + + initEnv(); + int numIters = 50; + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + + fillLogFile(numIters, testLsns, testObjs); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LastFileReader reader = + new LastFileReader(envImpl, + configManager.getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE)); + + checkLogEnd(reader, numIters, testLsns, testObjs); + } + + /** + * Run with very small read buffer. + */ + @Test + public void testSmallBuffers() + throws Throwable { + + initEnv(); + int numIters = 50; + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + + fillLogFile(numIters, testLsns, testObjs); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LastFileReader reader = new LastFileReader(envImpl, 10); + checkLogEnd(reader, numIters, testLsns, testObjs); + } + + /** + * Run with medium buffers. + */ + @Test + public void testMedBuffers() + throws Throwable { + + initEnv(); + int numIters = 50; + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + + fillLogFile(numIters, testLsns, testObjs); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LastFileReader reader = new LastFileReader(envImpl, 100); + checkLogEnd(reader, numIters, testLsns, testObjs); + } + + /** + * Put junk at the end of the file. + */ + @Test + public void testJunk() + throws Throwable { + + initEnv(); + + int numIters = 50; + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + + /* Write junk into the end of the file. */ + fillLogFile(numIters, testLsns, testObjs); + long lastFileNum = fileManager.getLastFileNum().longValue(); + String lastFile = + fileManager.getFullFileName(lastFileNum, + FileManager.JE_SUFFIX); + + RandomAccessFile file = + new RandomAccessFile(lastFile, FileManager.FileMode. + READWRITE_MODE.getModeValue()); + file.seek(file.length()); + file.writeBytes("hello, some junk"); + file.close(); + + /* Read. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LastFileReader reader = new LastFileReader(envImpl, 100); + checkLogEnd(reader, numIters, testLsns, testObjs); + } + + /** + * Make a log, then make a few extra files at the end, one empty, one with + * a bad file header. + */ + @Test + public void testExtraEmpty() + throws Throwable { + + initEnv(); + int numIters = 50; + List testObjs = new ArrayList(); + List testLsns = new ArrayList(); + int defaultBufferSize = + configManager.getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE); + + /* + * Make a valid log with data, then put a couple of extra files after + * it. Make the file numbers non-consecutive. We should have three log + * files. + */ + /* Create a log */ + fillLogFile(numIters, testLsns, testObjs); + + /* First empty log file -- header, no data. */ + FileManagerTestUtils.bumpLsn(fileManager, 100000000); + FileManagerTestUtils.bumpLsn(fileManager, 100000000); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManagerTestUtils.createLogFile(fileManager, envImpl, 10); + + /* Second empty log file -- header, no data. */ + FileManagerTestUtils.bumpLsn(fileManager, 100000000); + FileManagerTestUtils.bumpLsn(fileManager, 100000000); + FileManagerTestUtils.createLogFile(fileManager, envImpl, 10); + + assertEquals(3, fileManager.getAllFileNumbers().length); + + /* + * Corrupt the last empty file and then search for the correct last + * file. + */ + long lastFileNum = fileManager.getLastFileNum().longValue(); + String lastFile = + fileManager.getFullFileName(lastFileNum, + FileManager.JE_SUFFIX); + RandomAccessFile file = + new RandomAccessFile(lastFile, FileManager.FileMode. + READWRITE_MODE.getModeValue()); + file.getChannel().truncate(10); + file.close(); + fileManager.clear(); + + /* + * Make a reader, read the log. After the reader returns, we should + * only have 2 log files. + */ + LastFileReader reader = new LastFileReader(envImpl, + defaultBufferSize); + checkLogEnd(reader, numIters, testLsns, testObjs); + assertEquals(2, fileManager.getAllFileNumbers().length); + + /* + * Corrupt the now "last" empty file and try again. This is actually + * the first empty file we made. + */ + lastFileNum = fileManager.getLastFileNum().longValue(); + lastFile = fileManager.getFullFileName(lastFileNum, + FileManager.JE_SUFFIX); + file = new RandomAccessFile(lastFile, FileManager.FileMode. + READWRITE_MODE.getModeValue()); + file.getChannel().truncate(10); + file.close(); + + /* + * Validate that we have the right number of log entries, and only one + * valid log file. + */ + reader = new LastFileReader(envImpl, defaultBufferSize); + checkLogEnd(reader, numIters, testLsns, testObjs); + assertEquals(1, fileManager.getAllFileNumbers().length); + } + + /** + * Create a marker file that has a RestoreRequired entry, and make + * sure that the last file reader detects it. + */ + @Test + public void testRestoreMarkerFile() + throws Throwable { + + initEnv(); + + /* + * Make a marker log file. + */ + RestoreMarker marker = new RestoreMarker(fileManager, logManager); + Properties props = new Properties(); + props.setProperty("prop1", "1"); + props.setProperty("prop1", "2"); + marker.createMarkerFile(RestoreRequired.FailureType.NETWORK_RESTORE, + props); + + /* + * We expect the marker file to have a file header, followed by + * the RestoreRequired entry. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LastFileReader reader = new LastFileReader(envImpl, 1000); + assertTrue(reader.readNextEntry()); + LogEntryType type = reader.getEntryType(); + assertEquals(LogEntryType.LOG_FILE_HEADER, type); + + assertTrue(reader.readNextEntry()); + type = reader.getEntryType(); + assertEquals(LogEntryType.LOG_RESTORE_REQUIRED, type); + + RestoreRequired rr = reader.getRestoreRequired(); + Properties rrProps = rr.getProperties(); + assertTrue(rrProps.entrySet().containsAll(props.entrySet())); + } + + /** + * Write a logfile of entries, then read the end. + */ + private void fillLogFile(int numIters, + List testLsns, + List testObjs) + throws Throwable { + + /* + * Create a log file full of LNs and Debug Records. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + for (int i = 0; i < numIters; i++) { + /* Add a debug record. */ + Trace msg = new Trace("Hello there, rec " + (i+1)); + testObjs.add(msg); + testLsns.add(new Long(Trace.trace(envImpl, msg))); + + /* Add a txn abort */ + TxnAbort abort = new TxnAbort(10L, 200L, + 1234567 /* masterNodeId */, + 1 /* DTVLSN */); + final AbortLogEntry entry = new AbortLogEntry(abort); + testObjs.add(abort); + testLsns.add(new Long(logManager.log + (entry, + ReplicationContext.NO_REPLICATE))); + } + + /* Flush the log, files. */ + logManager.flushSync(); + fileManager.clear(); + } + + /** + * Use the LastFileReader to check this file, see if the log end is set + * right. + */ + private void checkLogEnd(LastFileReader reader, + int numIters, + List testLsns, + List testObjs) + throws Throwable { + + reader.setTargetType(LogEntryType.LOG_DBTREE); + reader.setTargetType(LogEntryType.LOG_TXN_COMMIT); + reader.setTargetType(LogEntryType.LOG_TXN_ABORT); + reader.setTargetType(LogEntryType.LOG_TRACE); + + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isUserLNType()) { + reader.setTargetType(entryType); + } + } + + /* Now ask the LastFileReader to read it back. */ + while (reader.readNextEntry()) { + } + + /* Truncate the file. */ + reader.setEndOfFile(); + + /* + * In non-replicated environments, we should see numIters * 2 + 4 + * entries (the extra 4 are the root, debug records, checkpoints and + * file header written by reocovery. + */ + assertEquals("should have seen this many entries", numIters * 2 + 4, + reader.getNumRead()); + + /* Check last used LSN. */ + int numLsns = testLsns.size(); + long lastLsn = DbLsn.longToLsn(testLsns.get(numLsns - 1)); + assertEquals("last LSN", lastLsn, reader.getLastLsn()); + + /* Check last offset. */ + assertEquals("prev offset", DbLsn.getFileOffset(lastLsn), + reader.getPrevOffset()); + + /* Check next available LSN. */ + int lastSize = + testObjs.get(testObjs.size() - 1).getLogSize(); + + long entryHeaderSize = LogEntryHeader.MIN_HEADER_SIZE; + assertEquals("next available", + DbLsn.makeLsn(DbLsn.getFileNumber(lastLsn), + DbLsn.getFileOffset(lastLsn) + + entryHeaderSize + lastSize), + reader.getEndOfLog()); + + /* The log should be truncated to just the right size. */ + FileHandle handle = fileManager.getFileHandle(0L); + RandomAccessFile file = handle.getFile(); + assertEquals(DbLsn.getFileOffset(reader.getEndOfLog()), + file.getChannel().size()); + handle.release(); + fileManager.clear(); + + /* Check the last tracked LSNs. */ + assertTrue(reader.getLastSeen(LogEntryType.LOG_DBTREE) != + DbLsn.NULL_LSN); + assertTrue(reader.getLastSeen(LogEntryType.LOG_IN) == DbLsn.NULL_LSN); + + for (LogEntryType entryType : LogEntryType.getAllTypes()) { + if (entryType.isUserLNType()) { + assertTrue(reader.getLastSeen(entryType) == DbLsn.NULL_LSN); + } + } + + assertEquals(reader.getLastSeen(LogEntryType.LOG_TRACE), + DbLsn.longToLsn(testLsns.get(numLsns - 2))); + assertEquals(reader.getLastSeen(LogEntryType.LOG_TXN_ABORT), + lastLsn); + } +} diff --git a/test/com/sleepycat/je/log/LogBufferPoolTest.java b/test/com/sleepycat/je/log/LogBufferPoolTest.java new file mode 100644 index 0000000..1517aeb --- /dev/null +++ b/test/com/sleepycat/je/log/LogBufferPoolTest.java @@ -0,0 +1,305 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +public class LogBufferPoolTest extends TestBase { + + Environment env; + Database db; + EnvironmentImpl envImpl; + FileManager fileManager; + File envHome; + LogBufferPool bufPool; + + public LogBufferPoolTest() { + super(); + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + bufPool = null; + if (fileManager != null) { + fileManager.clear(); + fileManager.close(); + } + } + + /** + * Make sure that we'll add more buffers as needed. + */ + @Test + public void testGrowBuffers() + throws Throwable { + + try { + + setupEnv(true, true); + + /* + * Each buffer can only hold 2 items. Put enough test items in to + * get seven buffers. + */ + List lsns = new ArrayList(); + for (int i = 0; i < 14; i++) { + long lsn = insertData(bufPool, (byte) (i + 1)); + lsns.add(new Long(lsn)); + } + + /* + * Check that the bufPool knows where each LSN lives and that the + * fetched buffer does hold this item. + */ + LogBuffer logBuf; + ByteBuffer b; + for (int i = 0; i < 14; i++) { + + /* + * For each test LSN, ask the bufpool for the logbuffer that + * houses it. + */ + long testLsn = DbLsn.longToLsn(lsns.get(i)); + logBuf = bufPool.getReadBufferByLsn(testLsn); + assertNotNull(logBuf); + + /* Here's the expected data. */ + byte[] expected = new byte[10]; + Arrays.fill(expected, (byte)(i+1)); + + /* Here's the data in the log buffer. */ + byte[] logData = new byte[10]; + b = logBuf.getDataBuffer(); + long firstLsnInBuf = logBuf.getFirstLsn(); + b.position((int) (DbLsn.getFileOffset(testLsn) - + DbLsn.getFileOffset(firstLsnInBuf))); + logBuf.getDataBuffer().get(logData); + + /* They'd better be equal. */ + assertTrue(Arrays.equals(logData, expected)); + logBuf.release(); + } + + /* + * This LSN shouldn't be in the buffers, it's less than any + * buffered item. + */ + assertNull(bufPool.getReadBufferByLsn(DbLsn.makeLsn(0,10))); + + /* + * This LSN is illegal to ask for, it's greater than any registered + * LSN. + */ + assertNull("LSN too big", + bufPool.getReadBufferByLsn(DbLsn.makeLsn(10, 141))); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper to insert fake data. + * @return LSN registered for this fake data + */ + private long insertData(LogBufferPool bufPool, + byte value) + throws IOException, DatabaseException { + + byte[] data = new byte[10]; + Arrays.fill(data, value); + boolean flippedFile = fileManager.shouldFlipFile(data.length); + long lsn = fileManager.calculateNextLsn(flippedFile); + LogBuffer logBuf = bufPool.getWriteBuffer(data.length, flippedFile); + fileManager.advanceLsn(lsn, data.length, flippedFile); + logBuf.latchForWrite(); + logBuf.getDataBuffer().put(data); + logBuf.registerLsn(lsn); + logBuf.release(); + bufPool.bumpCurrent(0); + bufPool.writeDirty(true); + return lsn; + } + + /** + * Test buffer flushes. + */ + @Test + public void testBufferFlush() + throws Throwable { + + try { + setupEnv(false, false); + assertFalse("There should be no files", fileManager.filesExist()); + + fileManager.VERIFY_CHECKSUMS = false; + + /* + * Each buffer can only hold 2 items. Put enough test items in to + * get five buffers. + */ + for (int i = 0; i < 9; i++) { + insertData(bufPool, (byte) (i+1)); + } + fileManager.syncLogEnd(); + + /* We should see two files exist. */ + String[] fileNames = + fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should be 2 files", 2, fileNames.length); + + /* Read the files. */ + if (false) { + ByteBuffer dataBuffer = ByteBuffer.allocate(100); + FileHandle file0 = fileManager.getFileHandle(0L); + RandomAccessFile file = file0.getFile(); + FileChannel channel = file.getChannel(); + int bytesRead = channel.read(dataBuffer, + FileManager.firstLogEntryOffset()); + dataBuffer.flip(); + assertEquals("Check bytes read", 50, bytesRead); + assertEquals("Check size of file", 50, dataBuffer.limit()); + file.close(); + FileHandle file1 = fileManager.getFileHandle(1L); + file = file1.getFile(); + channel = file.getChannel(); + bytesRead = channel.read(dataBuffer, + FileManager.firstLogEntryOffset()); + dataBuffer.flip(); + assertEquals("Check bytes read", 40, bytesRead); + assertEquals("Check size of file", 40, dataBuffer.limit()); + file0.release(); + file1.release(); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } + } + + @Test + public void testTemporaryBuffers() + throws Exception { + + final int KEY_SIZE = 10; + final int DATA_SIZE = 1000000; + + tempBufferInitEnvInternal + ("0", MemoryBudget.MIN_MAX_MEMORY_SIZE_STRING); + DatabaseEntry key = new DatabaseEntry(new byte[KEY_SIZE]); + DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]); + db.put(null, key, data); + db.close(); + env.close(); + } + + private void tempBufferInitEnvInternal(String buffSize, String cacheSize) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + if (!buffSize.equals("0")) { + envConfig.setConfigParam("je.log.totalBufferBytes", buffSize); + } + + if (!cacheSize.equals("0")) { + envConfig.setConfigParam("je.maxMemory", cacheSize); + } + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "InsertAndDelete", dbConfig); + } + + private void setupEnv(boolean inMemory, boolean detectLogDelete) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam( + EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam( + EnvironmentParams.LOG_FILE_MAX.getName(), "90"); + envConfig.setConfigParam( + EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setAllowCreate(true); + if (inMemory) { + /* Make the bufPool grow some buffers. Disable writing. */ + envConfig.setConfigParam( + EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true"); + } + + if (!detectLogDelete) { + envConfig.setConfigParam( + EnvironmentParams.LOG_DETECT_FILE_DELETE.getName(), "false"); + } + + + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Make a standalone file manager for this test. */ + envImpl.close(); + envImpl.open(); /* Just sets state to OPEN. */ + fileManager = new FileManager(envImpl, envHome, false); + bufPool = new LogBufferPool(fileManager, envImpl); + + /* + * Remove any files after the environment is created again! We want to + * remove the files made by recovery, so we can test the file manager + * in controlled cases. + */ + TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); + } +} diff --git a/test/com/sleepycat/je/log/LogEntryTest.java b/test/com/sleepycat/je/log/LogEntryTest.java new file mode 100644 index 0000000..be32eeb --- /dev/null +++ b/test/com/sleepycat/je/log/LogEntryTest.java @@ -0,0 +1,102 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Collection; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.ReplicableLogEntry; +import com.sleepycat.util.test.TestBase; + +public class LogEntryTest extends TestBase { + + @Test + public void testEquality() + throws DatabaseException { + + byte testTypeNum = LogEntryType.LOG_IN.getTypeNum(); + + /* Look it up by type */ + LogEntryType foundType = LogEntryType.findType(testTypeNum); + assertEquals(foundType, LogEntryType.LOG_IN); + assertTrue(foundType.getSharedLogEntry() instanceof + com.sleepycat.je.log.entry.INLogEntry); + + /* Look it up by type */ + foundType = LogEntryType.findType(testTypeNum); + assertEquals(foundType, LogEntryType.LOG_IN); + assertTrue(foundType.getSharedLogEntry() instanceof + com.sleepycat.je.log.entry.INLogEntry); + + /* Get a new entry object */ + LogEntry sharedEntry = foundType.getSharedLogEntry(); + LogEntry newEntry = foundType.getNewLogEntry(); + + assertTrue(sharedEntry != newEntry); + } + + /** + * See {@link ReplicableLogEntry#getEmbeddedLoggables()}. + */ + @Test + public void testLastFormatChange() throws Exception { + for (final LogEntryType type : LogEntryType.getAllTypes()) { + final LogEntry entry = type.getSharedLogEntry(); + if (!(entry instanceof ReplicableLogEntry)) { + continue; + } + final ReplicableLogEntry repEntry = (ReplicableLogEntry) entry; + verifyLastFormatChange( + repEntry.getClass().getName(), repEntry.getLastFormatChange(), + repEntry.getEmbeddedLoggables()); + } + } + + private void verifyLastFormatChange( + final String entryClassName, + final int entryLastFormatChange, + final Collection embeddedLoggables) + throws Exception { + + assertNotNull(embeddedLoggables); + + if (embeddedLoggables.size() == 0) { + return; + } + + for (final VersionedWriteLoggable child : embeddedLoggables) { + + final int childLastFormatChange = child.getLastFormatChange(); + + if (childLastFormatChange > entryLastFormatChange) { + fail(String.format( + "Embedded %s version %d is GT entry %s version %d", + child.getClass().getName(), childLastFormatChange, + entryClassName, entryLastFormatChange)); + } + + verifyLastFormatChange( + entryClassName, entryLastFormatChange, + child.getEmbeddedLoggables()); + } + } +} diff --git a/test/com/sleepycat/je/log/LogFileGapTest.java b/test/com/sleepycat/je/log/LogFileGapTest.java new file mode 100644 index 0000000..48fdad8 --- /dev/null +++ b/test/com/sleepycat/je/log/LogFileGapTest.java @@ -0,0 +1,197 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/* + * Test whether JE can correctly find the FileHeader.lastEntryInPrevFileOffset + * when the FileManager.truncateLog is invoked, also test that JE can throw out + * an EnvironmentFailureException rather than hang when a log file gap is + * detected while reading backwards during recovery, see SR [#19463]. + */ +public class LogFileGapTest extends TestBase { + private static final String DB_NAME = "testDb"; + private final File envHome; + private Environment env; + + public LogFileGapTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + if (env != null) { + env.close(); + } + } + + /* + * Test the case that we can recover after truncating some log entries on + * an invalidated Environment, it will truncate to the FileHeader. + */ + @Test + public void testLogFileAllTruncate() + throws Throwable { + + createEnvAndData(); + createLogFileGapAndRecover(true, true, false); + } + + /* + * Test the case that we can recover after truncating some log entries on + * an invalidated Environment, it won't truncate the FileHeader. + */ + @Test + public void testLogFileNotTruncateAll() + throws Throwable { + + createEnvAndData(); + createLogFileGapAndRecover(true, false, false); + } + + /* + * Test that an EnvironmentFailureException will be thrown if a log file + * gap is detected during the recovery. + */ + @Test + public void testLogFileGapRecover() + throws Throwable { + + createEnvAndData(); + createLogFileGapAndRecover(false, false, true); + } + + /* Create some data in the databases. */ + private void createEnvAndData() { + env = new Environment(envHome, createEnvConfig()); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[200]); + for (int i = 1; i <= 50; i++) { + IntegerBinding.intToEntry(i, key); + assertTrue(OperationStatus.SUCCESS == db.put(null, key, data)); + } + db.close(); + } + + private EnvironmentConfig createEnvConfig() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + /* Disable all daemon threads. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "2000"); + + return envConfig; + } + + /* + * Truncate the log file to create a log file gap and see if we correctly + * throw out an EnvironmentFailureException. + */ + private void createLogFileGapAndRecover(boolean invalidateEnvironment, + boolean truncateFileHeader, + boolean setLogFileEnd) + throws Throwable { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + int readBufferSize = envImpl.getConfigManager().getInt + (EnvironmentParams.LOG_ITERATOR_READ_SIZE); + LastFileReader fileReader = + new LastFileReader(envImpl, readBufferSize); + + /* + * If we want to truncate to the FileHeader, the reader will only read + * once, otherwise, it will read two entries. + */ + int threshold = (truncateFileHeader ? 1 : 2); + int counter = 0; + while (fileReader.readNextEntry()) { + counter++; + if (counter == threshold) { + break; + } + } + + /* Calculate the lsn that we want to truncate to. */ + long truncatedLsn = fileReader.getLastLsn(); + + if (invalidateEnvironment) { + envImpl.invalidate(EnvironmentFailureException.unexpectedState + ("Invalidate Environment for testing")); + envImpl.abnormalClose(); + } + + if (setLogFileEnd) { + envImpl.getFileManager().truncateSingleFile + (DbLsn.getFileNumber(truncatedLsn), + DbLsn.getFileOffset(truncatedLsn)); + } else { + envImpl.getFileManager().truncateLog + (DbLsn.getFileNumber(truncatedLsn), + DbLsn.getFileOffset(truncatedLsn)); + } + + /* Do a log file flip. */ + if (!invalidateEnvironment) { + envImpl.forceLogFileFlip(); + envImpl.abnormalClose(); + } + + /* Recover the Environment. */ + try { + env = new Environment(envHome, createEnvConfig()); + env.close(); + } catch (EnvironmentFailureException e) { + /* Expected exceptions if a log file gap is detected. */ + assertTrue(e.getMessage(), setLogFileEnd); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/log/LogFlusherTest.java b/test/com/sleepycat/je/log/LogFlusherTest.java new file mode 100644 index 0000000..da05adf --- /dev/null +++ b/test/com/sleepycat/je/log/LogFlusherTest.java @@ -0,0 +1,556 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.TimerTask; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Test that the LogFlusher works as we expect. + */ +public class LogFlusherTest extends TestBase { + + private static final String DB_NAME = "testDb"; + private static final String DATA_VALUE = "herococo"; + + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private Environment env; + + public LogFlusherTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Tests the basic configuration of LogFlusher, using deprecated HA params. + * + * This is an older test case, added prior to moving the LogFlusher into + * standalone JE. It continues to use the deprecated params for additional + * compatibility testing. + */ + @Test + @SuppressWarnings("deprecation") + public void testHAConfigOld() throws IOException { + + /* + * Open env with flushSync interval set to 30s using deprecated + * LOG_FLUSH_TASK_INTERVAL. The flushNoSync interval will have its + * default value. + */ + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + ReplicationConfig repConfig = new ReplicationConfig(); + + repConfig.setConfigParam( + ReplicationMutableConfig.LOG_FLUSH_TASK_INTERVAL, "30 s"); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, envConfig, repConfig); + + RepTestUtils.joinGroup(repEnvInfo); + assertTrue(repEnvInfo[0].isMaster()); + + TimerTask[] oldSyncTasks = new TimerTask[repEnvInfo.length]; + TimerTask[] oldNoSyncTasks = new TimerTask[repEnvInfo.length]; + + for (int i = 0; i < repEnvInfo.length; i++) { + + LogFlusher flusher = repEnvInfo[i].getRepImpl().getLogFlusher(); + assertNotNull(flusher); + + assertNotNull(flusher.getFlushSyncTask()); + assertNotNull(flusher.getFlushNoSyncTask()); + assertEquals(30000, flusher.getFlushSyncInterval()); + assertEquals(5000, flusher.getFlushNoSyncInterval()); + + oldSyncTasks[i] = flusher.getFlushSyncTask(); + oldNoSyncTasks[i] = flusher.getFlushNoSyncTask(); + } + + /* + * Mutate flushSync interval to 50s using LOG_FLUSH_TASK_INTERVAL. + */ + repConfig.setConfigParam( + ReplicationMutableConfig.LOG_FLUSH_TASK_INTERVAL, "50 s"); + + for (RepEnvInfo element : repEnvInfo) { + element.getEnv().setRepMutableConfig(repConfig); + } + + for (int i = 0; i < repEnvInfo.length; i++) { + + LogFlusher flusher = repEnvInfo[i].getRepImpl().getLogFlusher(); + assertNotNull(flusher); + + assertNotNull(flusher.getFlushSyncTask()); + assertNotNull(flusher.getFlushNoSyncTask()); + assertNotSame(flusher.getFlushSyncTask(), oldSyncTasks[i]); + assertNotSame(flusher.getFlushNoSyncTask(), oldNoSyncTasks[i]); + assertEquals(50000, flusher.getFlushSyncInterval()); + assertEquals(5000, flusher.getFlushNoSyncInterval()); + } + + /* + * Disable both intervals using deprecated RUN_LOG_FLUSH_TASK. + */ + repConfig.setConfigParam( + ReplicationMutableConfig.RUN_LOG_FLUSH_TASK, "false"); + + for (RepEnvInfo element : repEnvInfo) { + element.getEnv().setRepMutableConfig(repConfig); + } + + for (RepEnvInfo info : repEnvInfo) { + + LogFlusher flusher = info.getRepImpl().getLogFlusher(); + assertNotNull(flusher); + + assertNull(flusher.getFlushSyncTask()); + assertNull(flusher.getFlushNoSyncTask()); + assertEquals(0, flusher.getFlushSyncInterval()); + assertEquals(0, flusher.getFlushNoSyncInterval()); + } + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + RepTestUtils.removeRepEnvironments(envRoot); + + /* + * Open new env using deprecated RUN_LOG_FLUSH_TASK. + */ + repConfig.setConfigParam( + ReplicationConfig.RUN_LOG_FLUSH_TASK, "false"); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, envConfig, repConfig); + + RepTestUtils.joinGroup(repEnvInfo); + + for (RepEnvInfo info : repEnvInfo) { + + LogFlusher flusher = info.getRepImpl().getLogFlusher(); + assertNotNull(flusher); + + assertNull(flusher.getFlushSyncTask()); + assertNull(flusher.getFlushNoSyncTask()); + assertEquals(0, flusher.getFlushSyncInterval()); + assertEquals(0, flusher.getFlushNoSyncInterval()); + } + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /** + * Tests config via EnvironmentConfig, not using old HA params. + */ + @Test + public void testConfig() throws IOException { + + ReplicationConfig repConfig = new ReplicationConfig(); + + checkConfig(null, null, repConfig, 20 * 1000, 5 * 1000); + + checkConfig("7 ms", null, repConfig, 7, 5 * 1000); + + checkConfig(null, "7 ms", repConfig, 20 * 1000, 7); + + checkConfig("3 ms", "4 ms", repConfig, 3, 4); + } + + /** + * Tests use of old HA config params, and combination with new params. + */ + @Test + @SuppressWarnings("deprecation") + public void testConfigCompatibility() throws IOException { + + /* + * HA flush interval param is used, but is illegal when the standalone + * param is also specified. + */ + ReplicationConfig repConfig = + new ReplicationConfig().setConfigParam( + ReplicationMutableConfig.LOG_FLUSH_TASK_INTERVAL, "7 s"); + + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + checkHAConfig(envConfig, repConfig, 7 * 1000, 5 * 1000); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL, "8 ms"); + + try { + checkHAConfig(envConfig, repConfig, 0, 0); + fail(); + } catch (IllegalArgumentException e) { + /* Expected */ + } + + /* + * HA flushing param may be set to false and will disable flushing, but + * is illegal if a standalone param is also specified. + */ + repConfig = new ReplicationConfig().setConfigParam( + ReplicationMutableConfig.RUN_LOG_FLUSH_TASK, "false"); + + envConfig = RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + checkHAConfig(envConfig, repConfig, 0, 0); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL, "8 ms"); + + try { + checkHAConfig(envConfig, repConfig, 0, 0); + fail(); + } catch (IllegalArgumentException e) { + /* Expected */ + } + + envConfig = RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL, "8 ms"); + + try { + checkHAConfig(envConfig, repConfig, 0, 0); + fail(); + } catch (IllegalArgumentException e) { + /* Expected */ + } + } + + private void checkConfig( + String syncParam, + String noSyncParam, + ReplicationConfig repConfig, + int flushSyncInterval, + int flushNoSyncInterval) + throws IOException { + + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + if (syncParam != null) { + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL, syncParam); + } + + if (noSyncParam != null) { + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL, noSyncParam); + } + + checkStandaloneConfig( + envConfig, flushSyncInterval, flushNoSyncInterval); + + checkHAConfig( + envConfig, repConfig, flushSyncInterval, flushNoSyncInterval); + } + + private void checkStandaloneConfig( + EnvironmentConfig envConfig, + int flushSyncInterval, + int flushNoSyncInterval) + throws IOException { + + env = new Environment(envRoot, envConfig); + + expectFlushIntervals(env, flushSyncInterval, flushNoSyncInterval); + + env.close(); + env = null; + } + + private void checkHAConfig( + EnvironmentConfig envConfig, + ReplicationConfig repConfig, + int flushSyncInterval, + int flushNoSyncInterval) + throws IOException { + + SharedTestUtils.cleanUpTestDir(envRoot); + RepTestUtils.removeRepEnvironments(envRoot); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, envConfig, repConfig); + + RepTestUtils.joinGroup(repEnvInfo); + assertTrue(repEnvInfo[0].isMaster()); + + for (RepEnvInfo info : repEnvInfo) { + + expectFlushIntervals( + info.getEnv(), flushSyncInterval, flushNoSyncInterval); + } + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + private void expectFlushIntervals( + Environment env, + int flushSyncInterval, + int flushNoSyncInterval) { + + LogFlusher flusher = + DbInternal.getEnvironmentImpl(env).getLogFlusher(); + + assertNotNull(flusher); + + assertEquals( + flushSyncInterval, flusher.getFlushSyncInterval()); + + assertEquals( + flushNoSyncInterval, flusher.getFlushNoSyncInterval()); + } + + @Test + public void testFlushSync() + throws IOException, InterruptedException { + + checkLogFlush(true /*fsync*/, true /*expectFlush*/); + } + + @Test + public void testNoFlushSync() + throws IOException, InterruptedException { + + checkLogFlush(true /*fsync*/, false /*expectFlush*/); + } + + @Test + public void testFlushNoSync() + throws IOException, InterruptedException { + + checkLogFlush(false /*fsync*/, true /*expectFlush*/); + } + + @Test + public void testNoFlushNoSync() + throws IOException, InterruptedException { + + checkLogFlush(false /*fsync*/, false /*expectFlush*/); + } + + /** + * @param fsync is true to test the flushSync task, or false to test the + * flushNoSync task. + * + * @param expectFlush If true, checks that the LogFlushTask does flush the + * dirty data to the log, and it can be read after crash. If false, checks + * that the LogFlushTask does not flush the updates before the crash; no + * data may be written to the disk. + */ + @SuppressWarnings("null") + private void checkLogFlush( + boolean fsync, + boolean expectFlush) + throws IOException, InterruptedException { + + /* + * When we expect a flush, use a small value for the sync (or noSync) + * interval, and disable the noSync (or sync) interval. Use a large + * interval when we will not expect a flush. + */ + createRepEnvInfo(expectFlush ? "5 s" : "20 s", fsync); + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + long startTime = System.currentTimeMillis(); + + StatsConfig stConfig = new StatsConfig(); + stConfig.setClear(true); + + /* Flush the existed dirty data before we do writes. */ + for (RepEnvInfo element : repEnvInfo) { + element.getEnv().sync(); + element.getEnv().getStats(stConfig); + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + Database db = master.openDatabase(null, DB_NAME, dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 100; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry(DATA_VALUE, data); + db.put(null, key, data); + } + + assertTrue(System.currentTimeMillis() - startTime < 5000); + + Thread.sleep(8000); // Add 3s to ensure timer fires + + long endTime = System.currentTimeMillis(); + + for (RepEnvInfo element : repEnvInfo) { + + final long fsyncCount = + element.getEnv().getStats(null).getNLogFSyncs(); + + final LogFlusher flusher = element.getRepImpl().getLogFlusher(); + + final LogFlusher.FlushTask task = fsync ? + flusher.getFlushSyncTask() : flusher.getFlushNoSyncTask(); + + final int flushCount = task.getFlushCount(); + final long execTime = task.scheduledExecutionTime(); + + if (expectFlush) { + assertTrue(flushCount > 0); + assertTrue(execTime > startTime); + assertTrue(execTime < endTime); + if (fsync) { + assertTrue(fsyncCount > 0); + } else { + assertEquals(0, fsyncCount); + } + } else { + assertEquals(0, flushCount); + assertTrue(execTime < startTime); + assertEquals(0, fsyncCount); + } + } + + /* Close the replicas without doing a checkpoint. */ + File[] envHomes = new File[3]; + for (int i = 0; i < repEnvInfo.length; i++) { + envHomes[i] = repEnvInfo[i].getEnvHome(); + repEnvInfo[i].getRepImpl().abnormalClose(); + } + + /* + * Open a read only standalone Environment on each node to see whether + * the data has been flushed to the disk. + */ + EnvironmentConfig newConfig = new EnvironmentConfig(); + newConfig.setAllowCreate(false); + newConfig.setReadOnly(true); + newConfig.setTransactional(true); + + for (int i = 0; i < repEnvInfo.length; i++) { + Environment env = new Environment(envHomes[i], newConfig); + + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + + db = null; + try { + db = env.openDatabase(null, DB_NAME, dbConfig); + } catch (DatabaseNotFoundException e) { + + /* + * If the system crashes before the flush, the database is + * not synced to the disk, so this database can't be found + * at all, it's expected. + */ + assertFalse(expectFlush); + } + + if (expectFlush) { + assertEquals(100l, db.count()); + for (int index = 1; index <= 100; index++) { + IntegerBinding.intToEntry(index, key); + OperationStatus status = db.get(null, key, data, null); + if (expectFlush) { + assertSame(OperationStatus.SUCCESS, status); + assertEquals(DATA_VALUE, + StringBinding.entryToString(data)); + } + } + } + + if (db != null) { + db.close(); + } + env.close(); + } + } + + /** + * Uses the given interval as the flush interval for the Sync (or NoSync if + * fsync is false) timer, and disable the NoSync (or Sync) timer. + */ + private void createRepEnvInfo(String interval, boolean fsync) + throws IOException { + + /* + * Set a large buffer size and disable checkpointing, so the data in + * the buffer will only be flushed by the LogFlushTask. + */ + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); + + envConfig.setConfigParam( + EnvironmentConfig.MAX_MEMORY, "20000000"); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_TOTAL_BUFFER_BYTES, "120000000"); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_NUM_BUFFERS, "4"); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + + /* Configure the log flush task. */ + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_SYNC_INTERVAL, + fsync ? interval : "0"); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FLUSH_NO_SYNC_INTERVAL, + fsync ? "0" : interval); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, envConfig, null); + } +} diff --git a/test/com/sleepycat/je/log/LogManagerTest.java b/test/com/sleepycat/je/log/LogManagerTest.java new file mode 100644 index 0000000..11a0590 --- /dev/null +++ b/test/com/sleepycat/je/log/LogManagerTest.java @@ -0,0 +1,1071 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +import org.junit.Test; + +/** + * Test basic log management. + */ +public class LogManagerTest extends TestBase { + + static private final boolean DEBUG = false; + + private FileManager fileManager; + private LogManager logManager; + private final File envHome; + private Environment env; + + public LogManagerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Log and retrieve objects, with log in memory + */ + @Test + public void testBasicInMemory() + throws IOException, ChecksumException, DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "1000"); + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + logAndRetrieve(envImpl); + env.close(); + } + + @Test + public void testFlushItemLargerThanBufferSize() + throws Throwable { + + JUnitThread junitThread = null; + try { + /* Open env with small buffer size. */ + final int bufSize = 1024; + final String bufSizeStr = String.valueOf(bufSize); + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.LOG_BUFFER_SIZE, + bufSizeStr); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + assertEquals(bufSizeStr, + env.getConfig().getConfigParam + (EnvironmentConfig.LOG_BUFFER_SIZE)); + + /* Write a Trace to initialize FileManager.endOfLogRWFile. */ + final Trace smTrace = new Trace("small"); + Trace.trace(envImpl, smTrace); + logManager.flushNoSync(); + + /* Make a Trace entry that is larger than buffer size. */ + final StringBuilder bigString = new StringBuilder(bufSize * 2); + while (bigString.length() < bufSize) { + bigString.append + ("12345679890123456798901234567989012345679890"); + } + final Trace trace = new Trace(bigString.toString()); + + /* Use a separate thread to simulate an fsync. */ + final CountDownLatch step1 = new CountDownLatch(1); + junitThread = new JUnitThread + ("LogManagerTest.testFlushItemLargerThanBufferSize") { + @Override + public void testBody() + throws Throwable { + + fileManager.testWriteQueueLock(); + step1.countDown(); + + /* + * Sleep long enough to let logForceFlush() get to the + * point that it checks the fsync lock. But we have to + * release the lock or it will deadlock. + */ + Thread.sleep(1000); + fileManager.testWriteQueueUnlock(); + } + }; + + /* + * Flush log buffer and write queue, then log the big trace entry. + * Before the bug fix [#20717], the write queue was not flushed + * when the trace was logged. + */ + logManager.flushNoSync(); + assertFalse(fileManager.hasQueuedWrites()); + junitThread.start(); + step1.await(); + logManager.logForceFlush(new TraceLogEntry(trace), + false /*fsyncRequired*/, + ReplicationContext.NO_REPLICATE); + assertFalse(fileManager.hasQueuedWrites()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + env.close(); + } + } + + /** + * Log and retrieve objects, with log completely flushed to disk + */ + @Test + public void testBasicOnDisk() + throws Throwable { + + try { + + /* + * Force the buffers and files to be small. The log buffer is + * actually too small, will have to grow dynamically. Each file + * only holds one test item (each test item is 50 bytes). + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam( + EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam( + EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setConfigParam( + EnvironmentParams.LOG_FILE_MAX.getName(), "79"); + envConfig.setConfigParam( + EnvironmentParams.NODE_MAX.getName(), "6"); + + /* Disable noisy cleaner database usage. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCheckpointUP(envConfig, false); + /* Don't run the cleaner without a UtilizationProfile. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + + /* + * Don't run any daemons, those emit trace messages and other log + * entries and mess up our accounting. + */ + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + + /* + * Recreate the file manager and log manager w/different configs. + */ + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + + logAndRetrieve(envImpl); + + /* + * Expect 10 je files, 7 to hold logged records, 1 to hold root, no + * recovery messages, 2 for checkpoint records + */ + String[] names = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should be 10 files on disk", 10, names.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + env.close(); + } + } + + /** + * Log and retrieve objects, with some of log flushed to disk, some of log + * in memory. + */ + @Test + public void testComboDiskMemory() + throws Throwable { + + try { + + /* + * Force the buffers and files to be small. The log buffer is + * actually too small, will have to grow dynamically. Each file + * only holds one test item (each test item is 50 bytes) + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + "64"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + + /* Disable noisy cleaner database usage. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCheckpointUP(envConfig, false); + /* Don't run the cleaner without a UtilizationProfile. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + + /* + * Don't run the cleaner or the checkpointer daemons, those create + * more log entries and mess up our accounting + */ + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + + logAndRetrieve(envImpl); + + /* + * Expect 10 JE files: + * root + * ckptstart + * ckptend + * trace trace + * trace trace + * trace trace + * trace trace + * trace trace + * trace trace + * trace trace + * + * This is based on a manual perusal of the log files and their + * contents. Changes in the sizes of log entries can throw this + * test off, and require that a check and a change to the assertion + * value. + */ + String[] names = fileManager.listFileNames(FileManager.JE_SUFFIXES); + assertEquals("Should be 10 files on disk", 10, names.length); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + env.close(); + } + } + + /** + * Log and retrieve objects, with some of log flushed to disk, some + * of log in memory. Force the read buffer to be very small + */ + @Test + public void testFaultingIn() + throws Throwable { + + try { + + /* + * Force the buffers and files to be small. The log buffer is + * actually too small, will have to grow dynamically. We read in 32 + * byte chunks, will have to re-read only holds one test item (each + * test item is 50 bytes) + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam + (EnvironmentParams.LOG_MEM_SIZE.getName(), + EnvironmentParams.LOG_MEM_SIZE_MIN_STRING); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), "2"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), "200"); + envConfig.setConfigParam + (EnvironmentParams.LOG_FAULT_READ_SIZE.getName(), "32"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + logAndRetrieve(envImpl); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + env.close(); + } + } + + /** + * Log several objects, retrieve them. + */ + private void logAndRetrieve(EnvironmentImpl envImpl) + throws IOException, ChecksumException, DatabaseException { + + /* Make test loggable objects. */ + List testRecs = new ArrayList(); + for (int i = 0; i < 10; i++) { + testRecs.add(new Trace("Hello there, rec " + (i+1))); + } + + /* Log three of them, remember their LSNs. */ + List testLsns = new ArrayList(); + + for (int i = 0; i < 3; i++) { + long lsn = Trace.trace(envImpl, testRecs.get(i)); + if (DEBUG) { + System.out.println("i = " + i + " test LSN: file = " + + DbLsn.getFileNumber(lsn) + + " offset = " + + DbLsn.getFileOffset(lsn)); + } + testLsns.add(new Long(lsn)); + } + + /* Ask for them back, out of order. */ + assertEquals(testRecs.get(2), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(2)))); + assertEquals(testRecs.get(0), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(0)))); + assertEquals(testRecs.get(1), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(1)))); + + /* Intersperse logging and getting. */ + testLsns.add + (new Long(Trace.trace(envImpl, testRecs.get(3)))); + testLsns.add + (new Long(Trace.trace(envImpl, testRecs.get(4)))); + + assertEquals(testRecs.get(2), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(2)))); + assertEquals(testRecs.get(4), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(4)))); + + /* Intersperse logging and getting. */ + testLsns.add + (new Long(Trace.trace(envImpl, testRecs.get(5)))); + testLsns.add + (new Long(Trace.trace(envImpl, testRecs.get(6)))); + testLsns.add + (new Long(Trace.trace(envImpl, testRecs.get(7)))); + + assertEquals(testRecs.get(7), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(7)))); + assertEquals(testRecs.get(0), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(0)))); + assertEquals(testRecs.get(6), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(6)))); + + /* + * Check that we can retrieve log entries as byte buffers, and get the + * correct object back. Used by replication. + */ + long lsn = testLsns.get(7).longValue(); + ByteBuffer buffer = getByteBufferFromLog(envImpl, lsn); + + HeaderAndEntry contents = readHeaderAndEntry(buffer, null /*envImpl*/); + + assertEquals(testRecs.get(7), + contents.entry.getMainItem()); + assertEquals(LogEntryType.LOG_TRACE.getTypeNum(), + contents.header.getType()); + assertEquals(LogEntryType.LOG_VERSION, + contents.header.getVersion()); + } + + private void turnOffDaemons(EnvironmentConfig envConfig) { + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_EVICTOR.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), + "false"); + } + + /** + * Log a few items, then hit exceptions. Make sure LSN state is correctly + * maintained and that items logged after the exceptions are at the correct + * locations on disk. + */ + @Test + public void testExceptions() + throws Throwable { + + int logBufferSize = ((int) EnvironmentParams.LOG_MEM_SIZE_MIN) / 3; + int numLogBuffers = 5; + int logBufferMemSize = logBufferSize * numLogBuffers; + int logFileMax = 1000; + int okCounter = 0; + + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(), + new Integer(logBufferMemSize).toString()); + envConfig.setConfigParam + (EnvironmentParams.NUM_LOG_BUFFERS.getName(), + new Integer(numLogBuffers).toString()); + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_MAX.getName(), + new Integer(logFileMax).toString()); + envConfig.setConfigParam( + EnvironmentParams.NODE_MAX.getName(), "6"); + + /* Disable noisy cleaner database usage. */ + DbInternal.setCreateEP(envConfig, false); + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCheckpointUP(envConfig, false); + /* Don't run the cleaner without a UtilizationProfile. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + + /* + * Don't run any daemons, those emit trace messages and other log + * entries and mess up our accounting. + */ + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + + /* Keep track of items logged and their LSNs. */ + ArrayList testRecs = new ArrayList(); + ArrayList testLsns = new ArrayList(); + + /* + * Intersperse: + * - log successfully + * - log w/failure because the item doesn't fit in the log buffer + * - have I/O failures writing out the log + * Verify that all expected items can be read. Some will come + * from the log buffer pool. + * Then close and re-open the environment, to verify that + * all log items are faulted from disk + */ + + /* Successful log. */ + addOkayItem(envImpl, okCounter++, + testRecs, testLsns, logBufferSize); + + /* Item that's too big for the log buffers. */ + attemptTooBigItem(envImpl, logBufferSize, testRecs, testLsns); + + /* Successful log. */ + addOkayItem(envImpl, okCounter++, + testRecs, testLsns, logBufferSize); + + /* + * This verify read the items from the log buffers. Note before SR + * #12638 existed (LSN state not restored properly after exception + * because of too-small log buffer), this verify hung. + */ + verifyOkayItems(logManager, testRecs, testLsns, true); + + /* More successful logs, along with a few too-big items. */ + for (;okCounter < 23; okCounter++) { + addOkayItem(envImpl, okCounter, testRecs, + testLsns, logBufferSize); + + if ((okCounter % 4) == 0) { + attemptTooBigItem(envImpl, logBufferSize, + testRecs, testLsns); + } + /* + * If we verify in the loop, sometimes we'll read from disk and + * sometimes from the log buffer pool. + */ + verifyOkayItems(logManager, testRecs, testLsns, true); + } + + /* + * Test the case where we flip files and write the old write buffer + * out before we try getting a log buffer for the new item. We need + * to + * + * - hit a log-too-small exceptin + * - right after, we need to log an item that is small enough + * to fit in the log buffer but big enough to require that + * we flip to a new file. + */ + long nextLsn = fileManager.getNextLsn(); + long fileOffset = DbLsn.getFileOffset(nextLsn); + + assertTrue((logFileMax - fileOffset ) < logBufferSize); + attemptTooBigItem(envImpl, logBufferSize, testRecs, testLsns); + addOkayItem(envImpl, okCounter++, + testRecs, testLsns, logBufferSize, + ((int)(logFileMax - fileOffset))); + verifyOkayItems(logManager, testRecs, testLsns, true); + + /* + * Finally, close this environment and re-open, and read all + * expected items from disk. + */ + env.close(); + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + logManager = envImpl.getLogManager(); + verifyOkayItems(logManager, testRecs, testLsns, false); + + /* Check that we read these items off disk. */ + EnvironmentStats stats = env.getStats(null); + assertTrue(stats.getEndOfLog() > 0); + assertTrue("nNotResident=" + stats.getNNotResident() + + " nRecs=" + testRecs.size(), + stats.getNNotResident() >= testRecs.size()); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + env.close(); + } + } + + private void addOkayItem(EnvironmentImpl envImpl, + int tag, + List testRecs, + List testLsns, + int logBufferSize, + int fillerLen) + throws DatabaseException { + + String filler = StringUtils.fromUTF8(new byte[fillerLen]); + Trace t = new Trace("okay" + filler + tag ); + assertTrue(logBufferSize > t.getLogSize()); + testRecs.add(t); + long lsn = Trace.trace(envImpl, t); + testLsns.add(new Long(lsn)); + } + + private void addOkayItem(EnvironmentImpl envImpl, + int tag, + List testRecs, + List testLsns, + int logBufferSize) + throws DatabaseException { + + addOkayItem(envImpl, tag, testRecs, testLsns, logBufferSize, 0); + } + + private void attemptTooBigItem(EnvironmentImpl envImpl, + int logBufferSize, + Trace big, + List testRecs, + List testLsns) { + assertTrue(big.getLogSize() > logBufferSize); + + try { + long lsn = Trace.trace(envImpl, big); + testLsns.add(new Long(lsn)); + testRecs.add(big); + } catch (DatabaseException expected) { + fail("Should not have hit exception."); + } + } + + private void attemptTooBigItem(EnvironmentImpl envImpl, + int logBufferSize, + List testRecs, + List testLsns) { + String stuff = "12345679890123456798901234567989012345679890"; + while (stuff.length() < EnvironmentParams.LOG_MEM_SIZE_MIN) { + stuff += stuff; + } + Trace t = new Trace(stuff); + attemptTooBigItem(envImpl, logBufferSize, t, testRecs, testLsns); + } + + @SuppressWarnings("hiding") + private void verifyOkayItems(LogManager logManager, + ArrayList testRecs, + ArrayList testLsns, + boolean checkOrder) + throws IOException, DatabaseException { + + /* read forwards. */ + for (int i = 0; i < testRecs.size(); i++) { + assertEquals(testRecs.get(i), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(i)))); + + } + + /* Make sure LSNs are adjacent */ + assertEquals(testLsns.size(), testRecs.size()); + + if (checkOrder) { + + /* + * TODO: sometimes an ioexception entry will make it into the write + * buffer, and sometimes it won't. It depends on whether the IO + * exception was thrown when before or after the logabble item was + * written into the buffer. I haven't figure out yet how to tell + * the difference, so for now, we don't check order in the portion + * of the test that issues IO exceptions. + */ + for (int i = 1; i < testLsns.size(); i++) { + + long lsn = testLsns.get(i).longValue(); + long lsnFile = DbLsn.getFileNumber(lsn); + long lsnOffset = DbLsn.getFileOffset(lsn); + long prevLsn = testLsns.get(i-1).longValue(); + long prevFile = DbLsn.getFileNumber(prevLsn); + long prevOffset = DbLsn.getFileOffset(prevLsn); + if (prevFile == lsnFile) { + assertEquals("item " + i + "prev = " + + DbLsn.toString(prevLsn) + + " current=" + DbLsn.toString(lsn), + (testRecs.get(i-1).getLogSize() + + LogEntryHeader.MIN_HEADER_SIZE), + lsnOffset - prevOffset); + } else { + assertEquals(prevFile+1, lsnFile); + assertEquals(FileManager.firstLogEntryOffset(), + lsnOffset); + } + } + } + + /* read backwards. */ + for (int i = testRecs.size() - 1; i > -1; i--) { + assertEquals(testRecs.get(i), + logManager.getEntry + (DbLsn.longToLsn(testLsns.get(i)))); + + } + } + + /** + * Convenience method for marshalling a header and log entry + * out of a byte buffer read directly out of the log. + * @throws DatabaseException + */ + private static HeaderAndEntry readHeaderAndEntry(ByteBuffer bytesFromLog, + EnvironmentImpl envImpl) + throws ChecksumException { + + HeaderAndEntry ret = new HeaderAndEntry(); + ret.header = new LogEntryHeader( + bytesFromLog, LogEntryType.LOG_VERSION, DbLsn.NULL_LSN); + ret.header.readVariablePortion(bytesFromLog); + + ret.entry = + LogEntryType.findType(ret.header.getType()).getNewLogEntry(); + + ret.entry.readEntry(envImpl, ret.header, bytesFromLog); + return ret; + } + + private static class HeaderAndEntry { + public LogEntryHeader header; + public LogEntry entry; + + /* Get an HeaderAndEntry from readHeaderAndEntry */ + private HeaderAndEntry() { + } + + public boolean logicalEquals(HeaderAndEntry other) { + return (header.logicalEqualsIgnoreVersion(other.header) && + entry.logicalEquals(other.entry)); + } + + @Override + public String toString() { + return header.toString() + ' ' + entry; + } + } + + /** + * Ensure that a ChecksumException is thrown when any portion of a log + * entry is corrupted, e.g., by a disk failure. This tests ensures that + * no other exception is thrown when validating the entry, e.g., via an + * assertion. + * + * There is one exception of a corrupted entry that is intentionally + * allowed: If the corruption does nothing more than toggle the invisible + * bit, then we do not consider this a checksum error. This is extremely + * unlikely to occur, and is tolerated to allow toggling the invisible bit + * in the log entry with a single atomic write of a single byte. + */ + @Test + public void testEntryChecksum() { + + /* Open env. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + logManager = envImpl.getLogManager(); + + /* Check file header, then next entry. */ + int len = doChecksumTest(envImpl, DbLsn.makeLsn(0, 0)); + doChecksumTest(envImpl, DbLsn.makeLsn(0, len)); + + env.close(); + } + + private int doChecksumTest(final EnvironmentImpl envImpl, final long lsn) { + + final ByteBuffer byteBuf = getByteBufferFromLog(envImpl, lsn); + final int byteLen = byteBuf.capacity(); + + /* Expect no exception with unmodified buffer. */ + try { + verifyChecksum(envImpl, lsn, byteBuf); + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + + /* Expect failure. */ + for (int i = 0; i < byteLen; i += 1) { + int oldVal = byteBuf.get(i) & 0xFF; + int newVal; + /* Replace byte with 0. */ + newVal = (oldVal == 0) ? 1 : 0; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, newVal); + /* Replace byte with 0xFF. */ + newVal = (oldVal == 0xFF) ? 0xF7 : 0xFF; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, newVal); + /* Increment byte value. */ + newVal = oldVal + 1; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, newVal); + /* Decrement byte value. */ + newVal = oldVal - 1; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, newVal); + /* Set individual bits. */ + for (int j = 0; j < 8; j += 1) { + int flag = 1 << j; + if ((oldVal & flag) == 0) { + newVal = oldVal | flag; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, + newVal); + } + } + /* Clear individual bits. */ + for (int j = 0; j < 8; j += 1) { + int flag = 1 << j; + if ((oldVal & flag) != 0) { + newVal = oldVal & ~flag; + expectChecksumFailure(envImpl, lsn, byteBuf, i, oldVal, + newVal); + } + } + } + + return byteBuf.remaining(); + } + + private void expectChecksumFailure(final EnvironmentImpl envImpl, + final long lsn, + final ByteBuffer byteBuf, + final int bufIndex, + final int oldValue, + final int newValue) { + /* Replace buffer value. */ + byteBuf.put(bufIndex, (byte) newValue); + + /* Expect checksum exception. */ + try { + verifyChecksum(envImpl, lsn, byteBuf); + fail("Expected ChecksumException"); + } catch (ChecksumException e) { + /* Expected. */ + } catch (EnvironmentFailureException e) { + if (bufIndex == LogEntryHeader.FLAGS_OFFSET && + newValue == LogEntryHeader.makeInvisible((byte) oldValue)) { + /* Expected when only the invisible bit is toggled. */ + assertEquals(EnvironmentFailureReason.LOG_INTEGRITY, + e.getReason()); + } else { + /* Not expected. */ + throw e; + } + } + + /* Restore buffer value. */ + byteBuf.put(bufIndex, (byte) oldValue); + } + + private void verifyChecksum(final EnvironmentImpl envImpl, + final long lsn, + final ByteBuffer byteBuf) + throws ChecksumException { + + LogBuffer logBuf = new LogBuffer(byteBuf.capacity(), envImpl); + logBuf.latchForWrite(); + logBuf.getDataBuffer().put(byteBuf); + byteBuf.rewind(); + logBuf.getDataBuffer().rewind(); + logBuf.registerLsn(lsn); + logManager.getLogEntryFromLogSource( + lsn, 0, logBuf, false /*invisibleReadAllowed*/); + logBuf.release(); + } + + /** + * Return a ByteBuffer holding the log entry at this LSN. The log entry + * must begin at position 0, to mimic the marshalledBuffer used in + * serialLogInternal(). + * + * @param lsn location of entry in log + * @return log entry that embodies all the objects in the log entry + */ + private ByteBuffer getByteBufferFromLog(final EnvironmentImpl envImpl, + final long lsn) + throws DatabaseException { + + /* Fail loudly if the environment is invalid. */ + envImpl.checkIfInvalid(); + + /* + * Get a log source for the log entry which provides an abstraction + * that hides whether the entry is in a buffer or on disk. Will + * register as a reader for the buffer or the file, which will take a + * latch if necessary. + */ + LogSource logSource = null; + try { + logSource = envImpl.getLogManager().getLogSource(lsn); + + /* + * Read the log entry header into a byte buffer. This assumes + * that the minimum size of this byte buffer (determined by + * je.log.faultReadSize) is always >= the maximum log entry header. + */ + long fileOffset = DbLsn.getFileOffset(lsn); + ByteBuffer entryBuffer = logSource.getBytes(fileOffset); + int startingPosition = entryBuffer.position(); + int amountRemaining = entryBuffer.remaining(); + assert (amountRemaining >= LogEntryHeader.MAX_HEADER_SIZE); + + /* Read the header, find out how large this buffer needs to be */ + LogEntryHeader header = new LogEntryHeader( + entryBuffer, logSource.getLogVersion(), lsn); + int totalSize = header.getSize() + header.getItemSize(); + + /* + * The log entry must be positioned at the start of the returned + * buffer, to mimic the normal logging path. + */ + entryBuffer.position(startingPosition); + ByteBuffer singleEntryBuffer = ByteBuffer.allocate(totalSize); + entryBuffer.limit(startingPosition + totalSize); + singleEntryBuffer.put(entryBuffer); + singleEntryBuffer.position(0); + return singleEntryBuffer; + } catch (FileNotFoundException e) { + throw new EnvironmentFailureException + (envImpl, + EnvironmentFailureReason.LOG_FILE_NOT_FOUND, e); + } catch (ChecksumException e) { + throw new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.LOG_CHECKSUM, e); + } finally { + logSource.release(); + } + } + + /** + * In {@link LogManager#getLogEntry}, {@link + * LogManager#getLogEntryFromLogSource} may throw ChecksumException. This + * means that some data corruption happens. But the data corruption may + * be persistent or transient. {@link LogManager#getLogEntry} contains code + * to distinguish this. This test is just to check whether data corruption + * can be correctly distinguished as persistent or transient. + *

        + * When data corruption is detected, + *

      • if the data is read through {@link FileSource}, then the corruption + * is considered as persistent.
      • + *
      • if the data is read through {@link LogBuffer}, then + *
          + *
        • if the logBuffer has not been flushed, then the corruption is + * considered as transient.
        • + *
        • if the logBuffer has already been flushed, then the corruption is + * considered as persistent.
        • + *
      • + * + * The following three test cases test the above three situations. + */ + @Test + public void testChecksumExReasonLogBufferTransient() + throws FileNotFoundException{ + testChecksumExReasonInternal("transient"); + } + + @Test + public void testChecksumExReasonLogBufferPersistent() + throws FileNotFoundException{ + testChecksumExReasonInternal("logbufferPersistent"); + } + + @Test + public void testChecksumExReasonFileSource() + throws FileNotFoundException{ + testChecksumExReasonInternal("filesource"); + } + + public void testChecksumExReasonInternal(String mode) + throws FileNotFoundException{ + + /* Open env. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffDaemons(envConfig); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + logManager = envImpl.getLogManager(); + + /* Write a small trace to generate a logBuffer. */ + Trace smallTrace = new Trace("generate logbuffer"); + long lsn = Trace.trace(envImpl, smallTrace); + + /* Crash the logbuffer. */ + try { + LogBuffer logBuffer = (LogBuffer) logManager.getLogSource(lsn); + ByteBuffer byteBuf = logBuffer.getBytes(DbLsn.getFileOffset(lsn)); + int oldVal = byteBuf.get(0) & 0xFF; + int newVal = (oldVal == 0) ? 1 : 0; + byteBuf.put(0, (byte) newVal); + logBuffer.release(); + } catch (ChecksumException ce) { + fail("Should not throw ChecksumException"); + } + + if (mode.equals("transient")) { + /* + * Read the record. + * + * Now it should read from the logbuffer and at the same time, the + * logbuffer has not been flushed. So data corruption should happen + * and this is a transient corruption. + */ + try { + logManager.getEntry(lsn); + fail("Should find data corruption"); + } catch (EnvironmentFailureException efe) { + assertEquals(efe.getMessage().contains( + "Corruption detected in log buffer, " + + "but was not written to disk"), true); + assertEquals(efe.isCorrupted(), false); + } + } else if (mode.equals("logbufferPersistent")) { + /* + * Flush the logbuffer to the log file. + */ + logManager.flushSync(); + /* + * Read the record. + * + * Now it should read from the logbuffer and at the same time, the + * logbuffer has been flushed. So data corruption should happen + * and this is a persistent corruption. + */ + try { + logManager.getEntry(lsn); + fail("Should find data corruption"); + } catch (EnvironmentFailureException efe) { + assertEquals(efe.getMessage().contains( + EnvironmentFailureReason.LOG_CHECKSUM.toString()), true); + assertEquals(efe.isCorrupted(), true); + } + } else if (mode.equals("filesource")) { + logManager.resetPool(envImpl.getConfigManager()); + /* + * Read the record. + * + * Now it should read from the FileSource. So data corruption should + * happen and this is a persistent corruption. + */ + try { + logManager.getEntry(lsn); + fail("Should find data corruption"); + } catch (EnvironmentFailureException efe) { + efe.printStackTrace(); + assertEquals(efe.getMessage().contains( + EnvironmentFailureReason.LOG_CHECKSUM.toString()), true); + assertEquals(efe.isCorrupted(), true); + } + } + env.close(); + } +} diff --git a/test/com/sleepycat/je/log/LogUtilsTest.java b/test/com/sleepycat/je/log/LogUtilsTest.java new file mode 100644 index 0000000..8c735b4 --- /dev/null +++ b/test/com/sleepycat/je/log/LogUtilsTest.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Calendar; + +import org.junit.Test; + +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.util.test.TestBase; + +/** + * Test basic marshalling utilities + */ +public class LogUtilsTest extends TestBase { + + @Test + public void testMarshalling() { + ByteBuffer dest = ByteBuffer.allocate(100); + + // unsigned ints + long unsignedData = 10; + dest.clear(); + LogUtils.writeUnsignedInt(dest, unsignedData); + assertEquals(LogUtils.UNSIGNED_INT_BYTES, dest.position()); + dest.flip(); + assertEquals(unsignedData, LogUtils.readUnsignedInt(dest)); + + unsignedData = 49249249L; + dest.clear(); + LogUtils.writeUnsignedInt(dest, unsignedData); + assertEquals(LogUtils.UNSIGNED_INT_BYTES, dest.position()); + dest.flip(); + assertEquals(unsignedData, LogUtils.readUnsignedInt(dest)); + + // ints + int intData = -1021; + dest.clear(); + LogUtils.writeInt(dest, intData); + assertEquals(LogUtils.INT_BYTES, dest.position()); + dest.flip(); + assertEquals(intData, LogUtils.readInt(dest)); + + intData = 257; + dest.clear(); + LogUtils.writeInt(dest, intData); + assertEquals(LogUtils.INT_BYTES, dest.position()); + dest.flip(); + assertEquals(intData, LogUtils.readInt(dest)); + + // longs + long longData = -1021; + dest.clear(); + LogUtils.writeLong(dest, longData); + assertEquals(LogUtils.LONG_BYTES, dest.position()); + dest.flip(); + assertEquals(longData, LogUtils.readLong(dest)); + + // byte arrays + byte[] byteData = new byte[] {1,2,3,4,5,6,7,8,9,10,11,12}; + dest.clear(); + LogUtils.writeByteArray(dest, byteData); + assertEquals(LogUtils.getPackedIntLogSize(12) + 12, dest.position()); + dest.flip(); + assertTrue(Arrays.equals(byteData, + LogUtils.readByteArray(dest, + false/*unpacked*/))); + + // Strings + String stringData = "Hello world!"; + dest.clear(); + LogUtils.writeString(dest, stringData); + assertEquals(LogUtils.getPackedIntLogSize(12) + 12, dest.position()); + dest.flip(); + assertEquals(stringData, + LogUtils.readString + (dest, false/*unpacked*/, LogEntryType.LOG_VERSION)); + + // String with multi-byte char, a Euro sign represented as 3 UTF bytes + String multiByteData = "Hello Euro!\u20ac"; + dest.clear(); + LogUtils.writeString(dest, multiByteData); + assertEquals(LogUtils.getPackedIntLogSize(14) + 14, dest.position()); + dest.flip(); + assertEquals(multiByteData, + LogUtils.readString( + dest, false/*unpacked*/, LogEntryType.LOG_VERSION)); + + // Timestamps + Timestamp timestampData = + new Timestamp(Calendar.getInstance().getTimeInMillis()); + dest.clear(); + LogUtils.writeTimestamp(dest, timestampData); + assertEquals(LogUtils.getTimestampLogSize(timestampData), + dest.position()); + dest.flip(); + assertEquals(timestampData, LogUtils.readTimestamp(dest, false)); + + // Booleans + boolean boolData = true; + dest.clear(); + LogUtils.writeBoolean(dest, boolData); + assertEquals(1, dest.position()); + dest.flip(); + assertEquals(boolData, LogUtils.readBoolean(dest)); + + testPacked(dest); + } + + private void testPacked(ByteBuffer dest) { + + // packed ints + int intValue = 119; + dest.clear(); + LogUtils.writePackedInt(dest, intValue); + assertEquals(1, dest.position()); + dest.flip(); + assertEquals(intValue, LogUtils.readPackedInt(dest)); + + intValue = 0xFFFF + 119; + dest.clear(); + LogUtils.writePackedInt(dest, intValue); + assertEquals(3, dest.position()); + dest.flip(); + assertEquals(intValue, LogUtils.readPackedInt(dest)); + + intValue = Integer.MAX_VALUE; + dest.clear(); + LogUtils.writePackedInt(dest, intValue); + assertEquals(5, dest.position()); + dest.flip(); + assertEquals(intValue, LogUtils.readPackedInt(dest)); + + // packed longs + long longValue = 119; + dest.clear(); + LogUtils.writePackedLong(dest, longValue); + assertEquals(1, dest.position()); + dest.flip(); + assertEquals(longValue, LogUtils.readPackedLong(dest)); + + longValue = 0xFFFFFFFFL + 119; + dest.clear(); + LogUtils.writePackedLong(dest, longValue); + assertEquals(5, dest.position()); + dest.flip(); + assertEquals(longValue, LogUtils.readPackedLong(dest)); + + longValue = Long.MAX_VALUE; + dest.clear(); + LogUtils.writePackedLong(dest, longValue); + assertEquals(9, dest.position()); + dest.flip(); + assertEquals(longValue, LogUtils.readPackedLong(dest)); + } +} diff --git a/test/com/sleepycat/je/log/LoggableTest.java b/test/com/sleepycat/je/log/LoggableTest.java new file mode 100644 index 0000000..38478e5 --- /dev/null +++ b/test/com/sleepycat/je/log/LoggableTest.java @@ -0,0 +1,419 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.cleaner.FileSummary; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.RestoreRequired; +import com.sleepycat.je.recovery.CheckpointEnd; +import com.sleepycat.je.recovery.CheckpointStart; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.FileSummaryLN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.MapLN; +import com.sleepycat.je.tree.NameLN; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.txn.TxnPrepare; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Matchpoint; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Check that every loggable object can be read in and out of a buffer + */ +public class LoggableTest extends TestBase { + + static final boolean verbose = Boolean.getBoolean("verbose"); + + // private DocumentBuilder builder; + private Environment env; + private final File envHome; + private DatabaseImpl database; + + public LoggableTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + } + + @Override + @After + public void tearDown() + throws DatabaseException { + + env.close(); + } + + @Test + public void testEntryData() + throws Throwable { + + try { + ByteBuffer buffer = ByteBuffer.allocate(1000); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + database = new DatabaseImpl(null, + "foo", new DatabaseId(1), + envImpl, new DatabaseConfig()); + + /* + * For each loggable object, can we write the entry data out? + */ + + /* + * Trace records. + */ + Trace dMsg = new Trace("Hello there"); + writeAndRead(buffer, LogEntryType.LOG_TRACE, dMsg, new Trace()); + + /* + * LNs + */ + String data = "abcdef"; + LN ln = LN.makeLN(envImpl, StringUtils.toUTF8(data)); + LN lnFromLog = new LN(); + writeAndRead(buffer, LogEntryType.LOG_INS_LN, ln, lnFromLog); + assertTrue(LogEntryType.LOG_INS_LN.marshallOutsideLatch()); + + FileSummaryLN fsLN = new FileSummaryLN(new FileSummary()); + FileSummaryLN fsLNFromLog = new FileSummaryLN(); + writeAndRead(buffer, LogEntryType.LOG_FILESUMMARYLN, + fsLN, fsLNFromLog); + assertFalse( + LogEntryType.LOG_FILESUMMARYLN.marshallOutsideLatch()); + + /* + * INs + */ + IN in = new IN(database, + new byte[] {1,0,1,0}, + 7, 5); + in.latch(); + in.insertEntry(null, new byte[] {1,0,1,0}, DbLsn.makeLsn(12, 200)); + in.insertEntry(null, new byte[] {1,1,1,0}, DbLsn.makeLsn(29, 300)); + in.insertEntry(null, new byte[] {0,0,1,0}, DbLsn.makeLsn(35, 400)); + + /* Write it. */ + IN inFromLog = new IN(); + inFromLog.setDatabase(database); + inFromLog.latch(); + writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog); + inFromLog.releaseLatch(); + in.releaseLatch(); + + /* + * IN - long form + */ + in = new IN(database, + new byte[] {1,0,1,0}, + 7, 5); + in.latch(); + in.insertEntry(null, new byte[] {1,0,1,0}, DbLsn.makeLsn(12, 200)); + + in.insertEntry(null, new byte[] {1,1,1,0}, DbLsn.makeLsn(29, 300)); + + in.insertEntry( + null, new byte[] {0,0,1,0}, DbLsn.makeLsn(1235, 400)); + + in.insertEntry( + null, new byte[] {0,0,1,0}, DbLsn.makeLsn(0xFFFFFFF0L, 400)); + + /* Write it. */ + inFromLog = new IN(); + inFromLog.setDatabase(database); + inFromLog.latch(); + writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog); + inFromLog.releaseLatch(); + in.releaseLatch(); + + /* + * BINs + */ + BIN bin = new BIN(database, + new byte[] {3,2,1}, + 8, 5); + bin.latch(); + + bin.insertEntry( + null, new byte[] {1,0,1,0}, DbLsn.makeLsn(212, 200)); + + bin.insertEntry( + null, new byte[] {1,1,1,0}, DbLsn.makeLsn(229, 300)); + + bin.insertEntry( + null, new byte[] {0,0,1,0}, DbLsn.makeLsn(235, 400)); + + BIN binFromLog = new BIN(); + binFromLog.setDatabase(database); + binFromLog.latch(); + writeAndRead(buffer, LogEntryType.LOG_BIN, bin, binFromLog); + binFromLog.releaseLatch(); + bin.releaseLatch(); + + /* + * Root + */ + DbTree dbTree = new DbTree(envImpl, + false /*replicationIntended*/, + false /*preserveVLSN*/); + DbTree dbTreeFromLog = new DbTree(); + writeAndRead + (buffer, LogEntryType.LOG_DBTREE, dbTree, dbTreeFromLog); + dbTree.close(); + + /* + * MapLN + */ + MapLN mapLn = new MapLN(database); + MapLN mapLnFromLog = new MapLN(); + writeAndRead(buffer, LogEntryType.LOG_MAPLN, mapLn, mapLnFromLog); + + /* + * NameLN + */ + NameLN nameLn = new NameLN(database.getId()); + NameLN nameLnFromLog = new NameLN(); + writeAndRead( + buffer, LogEntryType.LOG_NAMELN_TRANSACTIONAL, + nameLn, nameLnFromLog); + + /* + * UserTxn + */ + + /* + * Disabled for now because these txns don't compare equal, + * because one has a name of "main" and the other has a name of + * null because it was read from the log. + + Txn txn = new Txn(envImpl, new TransactionConfig()); + Txn txnFromLog = new Txn(); + writeAndRead(buffer, LogEntryType.TXN_COMMIT, txn, txnFromLog); + txn.commit(); + */ + + /* + * TxnCommit + */ + TxnCommit commit = new TxnCommit(111, DbLsn.makeLsn(10, 10), + 179 /* masterNodeId */, + 1 /* DTVLSN */); + TxnCommit commitFromLog = new TxnCommit(); + writeAndRead(buffer, LogEntryType.LOG_TXN_COMMIT, commit, + commitFromLog); + + /* + * TxnAbort + */ + TxnAbort abort = new TxnAbort(111, DbLsn.makeLsn(11, 11), + 7654321 /* masterNodeId*/, + 1 /* DTVLSN */); + TxnAbort abortFromLog = new TxnAbort(); + writeAndRead(buffer, LogEntryType.LOG_TXN_ABORT, + abort, abortFromLog); + + /* + * TxnPrepare + */ + byte[] gid = new byte[64]; + byte[] bqual = new byte[64]; + TxnPrepare prepare = + new TxnPrepare(111, new LogUtils.XidImpl(1, gid, bqual)); + TxnPrepare prepareFromLog = new TxnPrepare(); + writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, + prepareFromLog); + + prepare = + new TxnPrepare(111, new LogUtils.XidImpl(1, null, bqual)); + prepareFromLog = new TxnPrepare(); + writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, + prepare, prepareFromLog); + + prepare = + new TxnPrepare(111, new LogUtils.XidImpl(1, gid, null)); + prepareFromLog = new TxnPrepare(); + writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, + prepare, prepareFromLog); + + /* + * Checkpoint start + */ + CheckpointStart start = new CheckpointStart(177, "test"); + CheckpointStart startFromLog = new CheckpointStart(); + writeAndRead(buffer, LogEntryType.LOG_CKPT_START, + start, startFromLog); + + /* + * Checkpoint end + */ + CheckpointEnd end = new CheckpointEnd + ("test", + DbLsn.makeLsn(20, 55), + envImpl.getRootLsn(), + envImpl.getTxnManager().getFirstActiveLsn(), + envImpl.getNodeSequence().getLastLocalNodeId(), + envImpl.getNodeSequence().getLastReplicatedNodeId(), + envImpl.getDbTree().getLastLocalDbId(), + envImpl.getDbTree().getLastReplicatedDbId(), + envImpl.getTxnManager().getLastLocalTxnId(), + envImpl.getTxnManager().getLastReplicatedTxnId(), + 177, + true /*cleanerFilesToDelete*/); + CheckpointEnd endFromLog = new CheckpointEnd(); + writeAndRead(buffer, LogEntryType.LOG_CKPT_END, end, endFromLog); + + /** + * RollbackStart + */ + Set activeTxnIds = new HashSet(); + activeTxnIds.add(1999L); + activeTxnIds.add(2999L); + RollbackStart rs = new RollbackStart(new VLSN(1001), + 99, + activeTxnIds); + RollbackStart rsFromLog = new RollbackStart(); + writeAndRead(buffer, LogEntryType.LOG_ROLLBACK_START, rs, + rsFromLog); + + /** + * RollbackEnd + */ + RollbackEnd re = new RollbackEnd(39L, 79L); + RollbackEnd reFromLog = new RollbackEnd(); + writeAndRead(buffer, LogEntryType.LOG_ROLLBACK_END, re, + reFromLog); + + /** + * Matchpoint + */ + Matchpoint matchpoint = new Matchpoint(5); + Matchpoint matchpointFromLog = new Matchpoint(); + writeAndRead(buffer, LogEntryType.LOG_ROLLBACK_END, matchpoint, + matchpointFromLog); + + /** + * RestoreRequired + */ + /* It doesn't matter what the properties are */ + Properties props = new Properties(); + props.setProperty("foo", "bar"); + props.setProperty("apple", "tree"); + RestoreRequired rr = + new RestoreRequired(RestoreRequired.FailureType.NETWORK_RESTORE, + props); + + RestoreRequired rrFromLog = new RestoreRequired(); + writeAndRead(buffer, LogEntryType.LOG_RESTORE_REQUIRED, + rr, rrFromLog); + + /* + * Mimic what happens when the environment is closed. + */ + database.releaseTreeAdminMemory(); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Helper which takes a dbLoggable, writes it, reads it back and + * checks for equality and size + */ + private void writeAndRead(ByteBuffer buffer, + LogEntryType entryType, + Loggable orig, + Loggable fromLog) + throws Exception { + + /* Write it. */ + buffer.clear(); + orig.writeToLog(buffer); + + /* Check the log size. */ + buffer.flip(); + assertEquals(buffer.limit(), orig.getLogSize()); + + /* + * Read it and compare sizes. Note that we assume we're testing + * objects that are readable and writable to the log. + */ + fromLog.readFromLog(buffer, LogEntryType.LOG_VERSION); + assertEquals(orig.getLogSize(), fromLog.getLogSize()); + + assertEquals("We should have read the whole buffer for " + + fromLog.getClass().getName(), + buffer.limit(), buffer.position()); + + /* Compare contents. */ + StringBuilder sb1 = new StringBuilder(); + StringBuilder sb2 = new StringBuilder(); + orig.dumpLog(sb1, true); + fromLog.dumpLog(sb2, true); + + if (verbose) { + System.out.println("sb1 = " + sb1.toString()); + System.out.println("sb2 = " + sb2.toString()); + } + assertEquals("Not equals for " + + fromLog.getClass().getName(), + sb1.toString(), sb2.toString()); + + /* Validate that the dump string is valid XML. */ + // builder = factory.newDocumentBuilder(); + // builder.parse(""); + // sb1.toString()+ + } +} diff --git a/test/com/sleepycat/je/log/TestUtilLogReader.java b/test/com/sleepycat/je/log/TestUtilLogReader.java new file mode 100644 index 0000000..37596df --- /dev/null +++ b/test/com/sleepycat/je/log/TestUtilLogReader.java @@ -0,0 +1,85 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import java.nio.ByteBuffer; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.utilint.DbLsn; + +/** + * Instantiates all log entries using the shared log entry instances. + */ +public class TestUtilLogReader extends FileReader { + + private LogEntryType entryType; + private LogEntry entry; + + public TestUtilLogReader(EnvironmentImpl env) + throws DatabaseException { + + super(env, + 4096, + true, + DbLsn.NULL_LSN, + null, + DbLsn.NULL_LSN, + DbLsn.NULL_LSN); + } + + public TestUtilLogReader(EnvironmentImpl env, + int readBufferSize, + boolean forward, + long startLsn, + Long singleFileNumber, + long endOfFileLsn, + long finishLsn) + throws DatabaseException { + + super(env, + readBufferSize, + forward, + startLsn, + singleFileNumber, + endOfFileLsn, + finishLsn); + } + + public LogEntryType getEntryType() { + return entryType; + } + + public int getEntryVersion() { + return currentEntryHeader.getVersion(); + } + + public LogEntry getEntry() { + return entry; + } + + protected boolean isTargetEntry() { + return true; + } + + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + entryType = LogEntryType.findType(currentEntryHeader.getType()); + entry = entryType.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + return true; + } +} diff --git a/test/com/sleepycat/je/log/WriteQueueTest.java b/test/com/sleepycat/je/log/WriteQueueTest.java new file mode 100644 index 0000000..f93052c --- /dev/null +++ b/test/com/sleepycat/je/log/WriteQueueTest.java @@ -0,0 +1,297 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.log; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.nio.ByteBuffer; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.FileManager.LogEndFileDescriptor; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test File Manager write queue + */ +public class WriteQueueTest extends TestBase { + + static private int FILE_SIZE = 120; + + private Environment env; + private FileManager fileManager; + private final File envHome; + + public WriteQueueTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + /* Remove files to start with a clean slate. */ + super.setUp(); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + new Integer(FILE_SIZE).toString()); + /* Yank the cache size way down. */ + envConfig.setConfigParam + (EnvironmentParams.LOG_FILE_CACHE_SIZE.getName(), "3"); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + fileManager = DbInternal.getNonNullEnvImpl(env).getFileManager(); + } + + @After + public void tearDown() + throws DatabaseException { + + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testReadFromEmptyWriteQueue() { + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + ByteBuffer bb = ByteBuffer.allocate(100); + assertFalse(lefd.checkWriteCache(bb, 0, 0)); + } + } + + @Test + public void testReadFromWriteQueueWithDifferentFileNum() { + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(1); + ByteBuffer bb = ByteBuffer.allocate(100); + assertFalse(lefd.checkWriteCache(bb, 0, 0)); + } + } + + @Test + public void testReadFromWriteQueueExactMatch() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(5); + assertTrue(lefd.checkWriteCache(bb, 0, 0)); + bb.position(0); + for (int i = 0; i < 5; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueSubset() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(3); + assertTrue(lefd.checkWriteCache(bb, 0, 0)); + bb.position(0); + for (int i = 0; i < bb.limit(); i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueSubsetOffset() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(3); + assertTrue(lefd.checkWriteCache(bb, 2, 0)); + bb.position(0); + for (int i = 2; i < bb.limit() + 2; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueSubsetUnderflow() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(4); + assertTrue(lefd.checkWriteCache(bb, 2, 0)); + /* Ensure that buffer was reset to where it belongs. */ + assertEquals(bb.position(), 3); + bb.flip(); + for (int i = 2; i < 2 + bb.limit(); i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueSubsetOverflow() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + lefd.enqueueWrite(0, new byte[] { 5, 6, 7, 8, 9 }, 5, 0, 5); + + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(4); + assertTrue(lefd.checkWriteCache(bb, 2, 0)); + bb.position(0); + for (int i = 2; i < bb.limit() + 2; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueSubsetOverflow2() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + lefd.enqueueWrite(0, new byte[] { 5, 6, 7, 8, 9 }, 5, 0, 5); + + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(8); + assertTrue(lefd.checkWriteCache(bb, 2, 0)); + bb.position(0); + for (int i = 2; i < bb.limit() + 2; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueMultipleEntries() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + lefd.enqueueWrite(0, new byte[] { 5, 6, 7, 8, 9 }, 5, 0, 5); + lefd.enqueueWrite(0, new byte[] { 10, 11, 12, 13, 14 }, 10, 0, 5); + + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(9); + assertTrue(lefd.checkWriteCache(bb, 2, 0)); + bb.position(0); + for (int i = 2; i < bb.limit() + 2; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueLast2EntriesOnly() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + lefd.enqueueWrite(0, new byte[] { 5, 6, 7, 8, 9 }, 5, 0, 5); + lefd.enqueueWrite(0, new byte[] { 10, 11, 12, 13, 14 }, 10, 0, 5); + + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(9); + assertTrue(lefd.checkWriteCache(bb, 6, 0)); + bb.position(0); + for (int i = 6; i < bb.limit() + 6; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } + + @Test + public void testReadFromWriteQueueLastEntryOnly() + throws Exception { + + if (fileManager.getUseWriteQueue()) { + LogEndFileDescriptor lefd = + fileManager.new LogEndFileDescriptor(); + lefd.setQueueFileNum(0); + lefd.enqueueWrite(0, new byte[] { 0, 1, 2, 3, 4 }, 0, 0, 5); + lefd.enqueueWrite(0, new byte[] { 5, 6, 7, 8, 9 }, 5, 0, 5); + lefd.enqueueWrite(0, new byte[] { 10, 11, 12, 13, 14 }, 10, 0, 5); + + ByteBuffer bb = ByteBuffer.allocate(100); + bb.limit(5); + assertTrue(lefd.checkWriteCache(bb, 10, 0)); + bb.position(0); + for (int i = 10; i < bb.limit() + 10; i++) { + byte b = bb.get(); + assertTrue(b == i); + } + } + } +} diff --git a/test/com/sleepycat/je/logversion/LogEntryVersionTest.java b/test/com/sleepycat/je/logversion/LogEntryVersionTest.java new file mode 100644 index 0000000..ab8edc8 --- /dev/null +++ b/test/com/sleepycat/je/logversion/LogEntryVersionTest.java @@ -0,0 +1,385 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.logversion; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.LineNumberReader; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.TestUtilLogReader; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +/** + * Tests that prior versions of log entries can be read. This test is used in + * conjunction with MakeLogEntryVersionData, a main program that was used once + * to generate log files named je-x.y.z.jdb, where x.y.z is the version of JE + * used to create the log. When a new test log file is created with + * MakeLogEntryVersionData, add a new test_x_y_z() method to this class. + * + * @see MakeLogEntryVersionData + */ +public class LogEntryVersionTest extends TestBase { + + private File envHome; + private Environment env; + private Database db1; + private Database db2; + + public LogEntryVersionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + envHome = null; + env = null; + db1 = null; + db2 = null; + } + + private void openEnv(String jeVersion, boolean readOnly) + throws DatabaseException, IOException { + + /* Copy log file resource to log file zero. */ + String resName = "je-" + jeVersion + ".jdb"; + TestUtils.loadLog(getClass(), resName, envHome); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(false); + envConfig.setReadOnly(readOnly); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + + /* Validate mem usage with daemons disabled, then enable them. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); + envConfig = env.getConfig(); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "true"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "true"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "true"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "true"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "true"); + env.setMutableConfig(envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(readOnly); + dbConfig.setSortedDuplicates(true); + db1 = env.openDatabase(null, Utils.DB1_NAME, dbConfig); + db2 = env.openDatabase(null, Utils.DB2_NAME, dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + db1.close(); + db1 = null; + db2.close(); + db2 = null; + env.close(); + env = null; + } + + //@Test + public void test_1_5_4() + throws DatabaseException, IOException { + + doTest("1.5.4"); + } + + //@Test + public void test_1_7_0() + throws DatabaseException, IOException { + + doTest("1.7.0"); + } + + /** + * JE 2.0: FileHeader version 3. + */ + @Test + public void test_2_0_0() + throws DatabaseException, IOException { + + doTest("2.0.0"); + } + + /** + * JE 3.0.12: FileHeader version 4. + */ + @Test + public void test_3_0_12() + throws DatabaseException, IOException { + + /* + * The test was not run until JE 3.1.25, but no format changes were + * made between 3.0.12 and 3.1.25. + */ + doTest("3.1.25"); + } + + /** + * JE 3.2.79: FileHeader version 5. Version 5 was actually introduced in + * 3.2.22 + */ + @Test + public void test_3_2_79() + throws DatabaseException, IOException { + + doTest("3.2.79"); + } + + /** + * JE 3.3.78: FileHeader version 5. Version 5 was actually introduced in + * 3.2.22 + */ + @Test + public void test_3_3_78() + throws DatabaseException, IOException { + + doTest("3.3.78"); + } + + @Test + public void test_4_0_51() + throws DatabaseException, IOException { + + doTest("4.0.51"); + } + + @Test + public void test_5_0_39() + throws DatabaseException, IOException { + + doTest("5.0.39"); + } + + @Test + public void test_6_0_13() + throws DatabaseException, IOException { + + doTest("6.0.13"); + } + + @Test + public void test_6_2_12() + throws DatabaseException, IOException { + + doTest("6.2.12"); + } + + @Test + public void test_6_4_14() + throws DatabaseException, IOException { + + doTest("6.4.14"); + } + + @Test + public void test_7_0_6() + throws DatabaseException, IOException { + + doTest("7.0.6"); + } + + @Test + public void test_7_1_9() + throws DatabaseException, IOException { + + doTest("7.1.9"); + } + + private void doTest(String jeVersion) + throws DatabaseException, IOException { + + openEnv(jeVersion, true /*readOnly*/); + + VerifyConfig verifyConfig = new VerifyConfig(); + verifyConfig.setAggressive(true); + assertTrue(env.verify(verifyConfig, System.err)); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Database 1 is empty because the txn was aborted. */ + Cursor cursor = db1.openCursor(null, null); + try { + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + } finally { + cursor.close(); + } + + /* Database 2 has one record: {3, 0} */ + cursor = db2.openCursor(null, null); + try { + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, Utils.value(key)); + assertEquals(0, Utils.value(data)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + } finally { + cursor.close(); + } + + /* + * Database 3 should have one record (99,79) that was explicitly + * committed. We only added this commit record and test case when + * implementing JE 3.3, and only went to the trouble of backporting the + * MakeLogEntryVersionData to file version 5. (It's just an additional + * test, it should be fine for earlier versions.) + */ + if (!((jeVersion.startsWith("1")) || + (jeVersion.startsWith("2")) || + (jeVersion.startsWith("3.1")))) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + Database db3 = env.openDatabase(null, Utils.DB3_NAME, dbConfig); + + cursor = db3.openCursor(null, null); + try { + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(99, Utils.value(key)); + assertEquals(79, Utils.value(data)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + } finally { + cursor.close(); + db3.close(); + } + } + + /* + * Verify log entry types using a log reader. Read both full and + * partial items. + */ + String resName = "je-" + jeVersion + ".txt"; + LineNumberReader textReader = new LineNumberReader + (new InputStreamReader(getClass().getResourceAsStream(resName))); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + TestUtilLogReader logReader = new TestUtilLogReader(envImpl); + + String expectedType = textReader.readLine(); + while (expectedType != null) { + /* Read the full item. */ + assertTrue(logReader.readNextEntry()); + String foundType = logReader.getEntryType().toString(); + assertEquals + ("At line " + textReader.getLineNumber(), + expectedType.substring(0, expectedType.indexOf('/')), + foundType); + + assertEquals + ("At line " + textReader.getLineNumber(), + expectedType.substring(0, expectedType.indexOf('/')), + foundType); + + expectedType = textReader.readLine(); + } + assertTrue("This test should be sure to read some lines", + textReader.getLineNumber() > 0); + assertFalse("No more expected entries after line " + + textReader.getLineNumber() + " but found: " + + logReader.getEntry(), + logReader.readNextEntry()); + + assertTrue(env.verify(verifyConfig, System.err)); + closeEnv(); + + /* + * Do enough inserts to cause a split and perform some other write + * operations for good measure. + */ + openEnv(jeVersion, false /*readOnly*/); + for (int i = -127; i < 127; i += 1) { + status = db2.put(null, Utils.entry(i), Utils.entry(0)); + assertEquals(OperationStatus.SUCCESS, status); + } + /* Do updates. */ + for (int i = -127; i < 127; i += 1) { + status = db2.put(null, Utils.entry(i), Utils.entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + } + /* Do deletes. */ + for (int i = -127; i < 127; i += 1) { + status = db2.delete(null, Utils.entry(i)); + assertEquals(OperationStatus.SUCCESS, status); + } + /* Same for duplicates. */ + for (int i = -127; i < 127; i += 1) { + status = db2.put(null, Utils.entry(0), Utils.entry(i)); + assertEquals(OperationStatus.SUCCESS, status); + } + for (int i = -127; i < 127; i += 1) { + status = db2.put(null, Utils.entry(0), Utils.entry(i)); + assertEquals(OperationStatus.SUCCESS, status); + } + cursor = db2.openCursor(null, null); + try { + status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getNext(key, data, null); + } + } finally { + cursor.close(); + } + + assertTrue(env.verify(verifyConfig, System.err)); + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java b/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java new file mode 100644 index 0000000..3ec205d --- /dev/null +++ b/test/com/sleepycat/je/logversion/LogHeaderVersionTest.java @@ -0,0 +1,107 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.logversion; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.VersionMismatchException; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests log file header versioning. This test is used in conjunction with + * MakeLogHeaderVersionData, a main program that was used once to generate two + * log files with maximum and minimum valued header version numbers. + * + * @see MakeLogHeaderVersionData + */ +public class LogHeaderVersionTest extends TestBase { + + private File envHome; + + public LogHeaderVersionTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + envHome = null; + } + + /** + * Tests that an exception is thrown when a log header is read with a newer + * version than the current version. The maxversion.jdb log file is loaded + * as a resource by this test and written as a regular log file. When the + * environment is opened, we expect a VersionMismatchException. + */ + @Test + public void testGreaterVersionNotAllowed() + throws IOException { + + TestUtils.loadLog(getClass(), Utils.MAX_VERSION_NAME, envHome); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(false); + envConfig.setTransactional(true); + + try { + Environment env = new Environment(envHome, envConfig); + try { + env.close(); + } catch (Exception ignore) {} + } catch (VersionMismatchException e) { + /* Got VersionMismatchException as expected. */ + return; + } + fail("Expected VersionMismatchException"); + } + + /** + * Tests that when a file is opened with a lesser version than the current + * version, a new log file is started for writing new log entries. This is + * important so that the new header version is written even if no new log + * file is needed. If the new version were not written, an older version + * of JE would not recognize that there had been a version change. + */ + @Test + public void testLesserVersionNotUpdated() + throws DatabaseException, IOException { + + TestUtils.loadLog(getClass(), Utils.MIN_VERSION_NAME, envHome); + File logFile = new File(envHome, TestUtils.LOG_FILE_NAME); + long origFileSize = logFile.length(); + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(false); + envConfig.setTransactional(true); + + Environment env = new Environment(envHome, envConfig); + env.sync(); + env.close(); + + assertEquals(origFileSize, logFile.length()); + } +} diff --git a/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java b/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java new file mode 100644 index 0000000..5d0efe8 --- /dev/null +++ b/test/com/sleepycat/je/logversion/MakeLogEntryVersionData.java @@ -0,0 +1,315 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.logversion; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.PrintWriter; +import java.util.HashSet; +import java.util.Set; +import javax.transaction.xa.XAResource; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogUtils.XidImpl; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.TestUtilLogReader; +import com.sleepycat.je.log.entry.EmptyLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.MatchpointLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.Matchpoint; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.utilint.StringUtils; + +/** + * This standalone command line program generates log files named je-x.y.z.jdb + * and je-x.y.z.txt, where x.y.z is the version of JE used to run the program. + * This program needs to be run for the current version of JE when we release + * a new major version of JE. It does not need to be run again for older + * versions of JE, unless it is changed to generate new types of log entries + * and we need to verify those log entries for all versions of JE. In that + * case the LogEntryVersionTest may also need to be changed. + * + *

        Run this program with the desired version of JE in the classpath and pass + * a home directory as the single command line argument. After running this + * program move the je-x.y.z.* files to the directory of this source package. + * When adding je-x.y.z.jdb to CVS make sure to use -kb since it is a binary + * file.

        + * + *

        This program can be run using the logversiondata ant target.

        + * + * @see LogEntryVersionTest + */ +public class MakeLogEntryVersionData { + + /* Minimum child entries per BIN. */ + private static int N_ENTRIES = 4; + + private MakeLogEntryVersionData() { + } + + public static void main(String[] args) + throws Exception { + + if (args.length != 1) { + throw new Exception("Home directory arg is required."); + } + + File homeDir = new File(args[0]); + File logFile = new File(homeDir, TestUtils.LOG_FILE_NAME); + File renamedLogFile = new File(homeDir, "je-" + + JEVersion.CURRENT_VERSION.getNumericVersionString() + ".jdb"); + File summaryFile = new File(homeDir, "je-" + + JEVersion.CURRENT_VERSION.getNumericVersionString() + ".txt"); + + if (logFile.exists()) { + throw new Exception("Home directory (" + homeDir + + ") must be empty of log files."); + } + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + /* Make as small a log as possible to save space in CVS. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + /* Use a 100 MB log file size to ensure only one file is written. */ + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(100 * (1 << 20))); + /* Force BIN-delta. */ + envConfig.setConfigParam + (EnvironmentParams.BIN_DELTA_PERCENT.getName(), + Integer.toString(75)); + /* Ensure that we create two BINs with N_ENTRIES LNs. */ + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), + Integer.toString(N_ENTRIES)); + + CheckpointConfig forceCheckpoint = new CheckpointConfig(); + forceCheckpoint.setForce(true); + + XAEnvironment env = new XAEnvironment(homeDir, envConfig); + + /* + * Make two shadow database. Database 1 is transactional and has + * aborts, database 2 is not transactional. + */ + for (int i = 0; i < 2; i += 1) { + boolean transactional = (i == 0); + String dbName = transactional ? Utils.DB1_NAME : Utils.DB2_NAME; + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(transactional); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, dbName, dbConfig); + + Transaction txn = null; + if (transactional) { + txn = env.beginTransaction(null, null); + } + + for (int j = 0; j < N_ENTRIES; j += 1) { + db.put(txn, Utils.entry(j), Utils.entry(0)); + } + db.put(txn, Utils.entry(0), Utils.entry(1)); + /* Update. */ + db.put(txn, Utils.entry(0), Utils.entry(1)); + + /* Must checkpoint to generate BIN-deltas. */ + env.checkpoint(forceCheckpoint); + + /* Delete everything but the last LN to cause IN deletion. */ + for (int j = 0; j < N_ENTRIES - 1; j += 1) { + db.delete(txn, Utils.entry(j)); + } + + if (transactional) { + txn.abort(); + } + + db.close(); + } + + /* Compress twice to delete DBIN, DIN, BIN, IN. */ + env.compress(); + env.compress(); + + /* DB2 was not aborted and will contain: {3, 0} */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, Utils.DB2_NAME, dbConfig); + Cursor cursor = db.openCursor(null, null); + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getFirst(key, data, null); + if (status != OperationStatus.SUCCESS) { + throw new Exception("Expected SUCCESS but got: " + status); + } + if (Utils.value(key) != 3 || Utils.value(data) != 0) { + throw new Exception("Expected {3,0} but got: {" + + Utils.value(key) + ',' + + Utils.value(data) + '}'); + } + } finally { + cursor.close(); + } + db.close(); + + /* + * Make database 3, which is transactional and has some explicit + * transaction commit record. + */ + dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Transaction txn = env.beginTransaction(null, null); + db = env.openDatabase(null, Utils.DB3_NAME, dbConfig); + OperationStatus status = db.put(txn, Utils.entry(99), Utils.entry(79)); + assert status == OperationStatus.SUCCESS: "status=" + status; + db.close(); + txn.commit(); + + /* + * Generate an XA txn Prepare. The transaction must be non-empty in + * order to actually log the Prepare. + */ + XidImpl xid = + new XidImpl(1, StringUtils.toUTF8("MakeLogEntryVersionData"), null); + env.start(xid, XAResource.TMNOFLAGS); + /* Re-write the existing {3,0} record. */ + dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(false); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, Utils.DB2_NAME, dbConfig); + db.put(null, Utils.entry(3), Utils.entry(0)); + db.close(); + env.prepare(xid); + env.rollback(xid); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* Log a RollbackStart entry. */ + LogEntry entry = + SingleItemEntry.create(LogEntryType.LOG_ROLLBACK_START, + new RollbackStart(VLSN.NULL_VLSN, + DbLsn.NULL_LSN, + new HashSet())); + envImpl.getLogManager().log(entry, ReplicationContext.NO_REPLICATE); + + /* Log a RollbackEnd entry. */ + entry = SingleItemEntry.create(LogEntryType.LOG_ROLLBACK_END, + new RollbackEnd(DbLsn.NULL_LSN, + DbLsn.NULL_LSN)); + envImpl.getLogManager().log(entry, ReplicationContext.NO_REPLICATE); + + /* Log a Matchpoint entry. */ + entry = new MatchpointLogEntry(new Matchpoint(1)); + envImpl.getLogManager().log(entry, ReplicationContext.NO_REPLICATE); + + /* Log an ImmutableFile entry. */ + entry = SingleItemEntry.create(LogEntryType.LOG_IMMUTABLE_FILE, + new EmptyLogEntry()); + envImpl.getLogManager().log(entry, ReplicationContext.NO_REPLICATE); + + env.close(); + + /* + * Get the set of all log entry types we expect to output. We exclude + * several types: + * - MapLN_TX: MapLN (non-transactional) is now used instead. + * - INDelete: root compression is no longer used. + * - LN, LN_TX: deprecated and replaced by LN_INS/UPD/DEL, etc. + * - DelDupLN, DelDupLN_TX, DupCountLN, DupCountLN_TX, DIN, DBIN, + * DupBINDelta, INDupDelete: deprecated, dup tree is no longer used. + */ + Set expectedTypes = LogEntryType.getAllTypes(); + expectedTypes.remove(LogEntryType.LOG_MAPLN_TRANSACTIONAL); + expectedTypes.remove(LogEntryType.LOG_IN_DELETE_INFO); + expectedTypes.remove(LogEntryType.LOG_OLD_LN); // LN + expectedTypes.remove(LogEntryType.LOG_OLD_LN_TRANSACTIONAL); // LN_TX + expectedTypes.remove(LogEntryType.LOG_DEL_DUPLN); + expectedTypes.remove(LogEntryType.LOG_DEL_DUPLN_TRANSACTIONAL); + expectedTypes.remove(LogEntryType.LOG_DUPCOUNTLN); + expectedTypes.remove(LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL); + expectedTypes.remove(LogEntryType.LOG_DIN); + expectedTypes.remove(LogEntryType.LOG_DBIN); + expectedTypes.remove(LogEntryType.LOG_OLD_DUP_BIN_DELTA); + expectedTypes.remove(LogEntryType.LOG_IN_DUPDELETE_INFO); + expectedTypes.remove(LogEntryType.LOG_OLD_BIN_DELTA); + + /* Open read-only and write all LogEntryType names to a text file. */ + envConfig.setReadOnly(true); + Environment env2 = new Environment(homeDir, envConfig); + PrintWriter writer = new PrintWriter + (new BufferedOutputStream(new FileOutputStream(summaryFile))); + TestUtilLogReader reader = new TestUtilLogReader + (DbInternal.getNonNullEnvImpl(env2)); + while (reader.readNextEntry()) { + LogEntryType type = reader.getEntryType(); + writer.println(type.toStringNoVersion() + '/' + + reader.getEntryVersion()); + expectedTypes.remove(type); + } + writer.close(); + env2.close(); + + if (expectedTypes.size() > 0) { + throw new Exception("Types not output: " + expectedTypes); + } + + if (!logFile.exists()) { + throw new Exception("What happened to: " + logFile); + } + + if (!logFile.renameTo(renamedLogFile)) { + throw new Exception + ("Could not rename: " + logFile + " to " + renamedLogFile); + } + + System.out.println("Created: " + renamedLogFile); + System.out.println("Created: " + summaryFile); + } +} diff --git a/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java b/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java new file mode 100644 index 0000000..bb9ef92 --- /dev/null +++ b/test/com/sleepycat/je/logversion/MakeLogHeaderVersionData.java @@ -0,0 +1,80 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.logversion; + +import java.io.File; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; + +/** + * This standalone command line program creates a single 00000000.jdb log file. + * It was used to generate maxversion.jdb and minversion.jdb, and although it + * may never need to be used again, below are instructions. + * + *

        Before running this program change LogEntryType.LOG_VERSION to + * Integer.MAX_VALUE or one temporarily, just for creating a file with the + * maximum or minimum version number. A single command line argument is + * required for the home directory. After running this program rename the + * 00000000.jdb file to maxversion.jdb or minversion.jdb file in the directory + * of this source package. When adding it to CVS make sure to use -kb since it + * is a binary file. Don't forget to change LogEntryType.LOG_VERSION back to + * the correct value.

        + * + * @see LogHeaderVersionTest + */ +public class MakeLogHeaderVersionData { + + private MakeLogHeaderVersionData() { + } + + public static void main(String[] args) + throws Exception { + + if (args.length != 1) { + throw new Exception("Home directory arg is required."); + } + + File homeDir = new File(args[0]); + File logFile = new File(homeDir, TestUtils.LOG_FILE_NAME); + + if (logFile.exists()) { + throw new Exception("Home directory must be empty of log files."); + } + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + /* Make as small a log as possible to save space in CVS. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + + Environment env = new Environment(homeDir, envConfig); + env.close(); + + if (!logFile.exists()) { + throw new Exception("Home directory does not contain: " + logFile); + } + + System.out.println("Sucessfully created: " + logFile); + } +} diff --git a/test/com/sleepycat/je/logversion/Utils.java b/test/com/sleepycat/je/logversion/Utils.java new file mode 100644 index 0000000..946918e --- /dev/null +++ b/test/com/sleepycat/je/logversion/Utils.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.logversion; + +import com.sleepycat.je.DatabaseEntry; + +public class Utils { + + static final String DB1_NAME = "database1"; + static final String DB2_NAME = "database2"; + static final String DB3_NAME = "database3"; + static final String MIN_VERSION_NAME = "minversion.jdb"; + static final String MAX_VERSION_NAME = "maxversion.jdb"; + + static DatabaseEntry entry(int val) { + + byte[] data = new byte[] { (byte) val }; + return new DatabaseEntry(data); + } + + static int value(DatabaseEntry entry) { + + byte[] data = entry.getData(); + if (data.length != 1) { + throw new IllegalStateException("len=" + data.length); + } + return data[0]; + } +} diff --git a/test/com/sleepycat/je/logversion/je-1.5.4.jdb b/test/com/sleepycat/je/logversion/je-1.5.4.jdb new file mode 100644 index 0000000..db91014 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-1.5.4.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-1.5.4.txt b/test/com/sleepycat/je/logversion/je-1.5.4.txt new file mode 100644 index 0000000..b4e5edc --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-1.5.4.txt @@ -0,0 +1,186 @@ +FileHeader/0 +Trace/0 +DbTree/0 +CkptStart/0 +IN/0 +BIN/0 +NameLN_TX/0 +IN/0 +BIN/0 +MapLN_TX/0 +Commit/0 +Commit/0 +IN/0 +BIN/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN_TX/0 +Commit/0 +Commit/0 +IN/0 +BIN/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/0 +BIN/0 +IN/0 +DupCountLN/0 +DBIN/0 +DIN/0 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/0 +BIN/0 +BINDelta/0 +BIN/0 +IN/0 +DbTree/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +MapLN_TX/0 +Commit/0 +BIN/0 +IN/0 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN_TX/0 +Commit/0 +IN/0 +BIN/0 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/0 +BIN/0 +IN/0 +DupCountLN/0 +DBIN/0 +DIN/0 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/0 +DIN/0 +DIN/0 +BIN/0 +BIN/0 +BIN/0 +BINDelta/0 +BINDelta/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +MapLN_TX/0 +Commit/0 +BIN/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDelete/0 +INDelete/0 +CkptStart/0 +BIN/0 +BIN/0 +BIN/0 +BINDelta/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +CkptStart/0 +BIN/0 +BIN/0 +IN/0 +DbTree/0 +IN/0 +MapLN_TX/0 +Commit/0 +BIN/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +CkptStart/0 +BIN/0 +IN/0 +MapLN_TX/0 +Commit/0 +IN/0 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +CkptStart/0 +BIN/0 +IN/0 +MapLN_TX/0 +Commit/0 +BIN/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-1.7.0.jdb b/test/com/sleepycat/je/logversion/je-1.7.0.jdb new file mode 100644 index 0000000..cf433b1 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-1.7.0.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-1.7.0.txt b/test/com/sleepycat/je/logversion/je-1.7.0.txt new file mode 100644 index 0000000..ac89401 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-1.7.0.txt @@ -0,0 +1,189 @@ +FileHeader/0 +Trace/0 +DbTree/0 +CkptStart/0 +IN/1 +BIN/1 +NameLN_TX/0 +IN/1 +BIN/1 +MapLN_TX/0 +Commit/0 +Commit/0 +IN/1 +BIN/1 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN_TX/0 +Commit/0 +Commit/0 +IN/1 +BIN/1 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/1 +BIN/1 +IN/1 +DupCountLN/0 +DBIN/1 +DIN/1 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/1 +BIN/1 +BINDelta/0 +BIN/1 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +DbTree/0 +BIN/1 +IN/1 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN_TX/0 +Commit/0 +IN/1 +BIN/1 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/1 +BIN/1 +IN/1 +DupCountLN/0 +DBIN/1 +DIN/1 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/1 +DIN/1 +DIN/1 +BINDelta/0 +BIN/1 +BINDelta/0 +BIN/1 +BIN/1 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +MapLN_TX/0 +Commit/0 +BIN/1 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDupDelete/0 +FileSummaryLN/0 +INDupDelete/0 +FileSummaryLN/0 +INDelete/0 +FileSummaryLN/0 +CkptStart/0 +BIN/1 +BINDelta/0 +BIN/1 +BIN/1 +BIN/1 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +Trace/0 +CkptStart/0 +BIN/1 +BIN/1 +IN/1 +DbTree/0 +IN/1 +MapLN_TX/0 +Commit/0 +BIN/1 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +Trace/0 +CkptStart/0 +BIN/1 +IN/1 +MapLN_TX/0 +Commit/0 +IN/1 +DbTree/0 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 +CkptStart/0 +BIN/1 +IN/1 +MapLN_TX/0 +Commit/0 +BIN/1 +FileSummaryLN/0 +Trace/0 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-2.0.0.jdb b/test/com/sleepycat/je/logversion/je-2.0.0.jdb new file mode 100644 index 0000000..305cb80 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-2.0.0.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-2.0.0.txt b/test/com/sleepycat/je/logversion/je-2.0.0.txt new file mode 100644 index 0000000..141b08b --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-2.0.0.txt @@ -0,0 +1,149 @@ +FileHeader/0 +Trace/0 +DbTree/1 +CkptStart/0 +IN/2 +BIN/2 +NameLN_TX/0 +IN/2 +BIN/2 +MapLN_TX/1 +Commit/0 +Commit/0 +IN/2 +BIN/2 +FileSummaryLN/1 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN_TX/1 +Commit/0 +Commit/0 +IN/2 +BIN/2 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/2 +BIN/2 +BINDelta/0 +BIN/2 +IN/2 +MapLN_TX/1 +Commit/0 +IN/2 +MapLN_TX/1 +Commit/0 +IN/2 +DbTree/1 +BIN/2 +IN/2 +DbTree/1 +FileSummaryLN/1 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN_TX/1 +Commit/0 +IN/2 +BIN/2 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/2 +DIN/2 +DIN/2 +BIN/2 +BINDelta/0 +BINDelta/0 +BIN/2 +BINDelta/0 +IN/2 +MapLN_TX/1 +Commit/0 +IN/2 +MapLN_TX/1 +Commit/0 +BINDelta/0 +FileSummaryLN/1 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDupDelete/0 +INDelete/0 +INDupDelete/0 +INDelete/0 +FileSummaryLN/1 +Prepare/0 +CkptStart/0 +BIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +IN/2 +MapLN_TX/1 +Commit/0 +IN/2 +MapLN_TX/1 +Commit/0 +BINDelta/0 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/1 +Trace/0 +CkptEnd/0 +MapLN_TX/1 +Commit/0 +CkptStart/0 +BIN/2 +BIN/2 +IN/2 +DbTree/1 +IN/2 +MapLN_TX/1 +Commit/0 +BIN/2 +IN/2 +DbTree/1 +FileSummaryLN/1 +CkptEnd/0 +CkptStart/0 +BIN/2 +FileSummaryLN/1 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-3.1.25.jdb b/test/com/sleepycat/je/logversion/je-3.1.25.jdb new file mode 100644 index 0000000..aeb088e Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.1.25.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-3.1.25.txt b/test/com/sleepycat/je/logversion/je-3.1.25.txt new file mode 100644 index 0000000..f06ab06 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-3.1.25.txt @@ -0,0 +1,149 @@ +FileHeader/0 +Trace/0 +DbTree/1 +BIN/2 +IN/2 +NameLN_TX/0 +BIN/2 +IN/2 +MapLN/2 +Commit/0 +CkptStart/0 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +FileSummaryLN/2 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN/2 +Commit/0 +BIN/2 +IN/2 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/2 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN/2 +BIN/2 +IN/2 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/2 +DIN/2 +DIN/2 +BINDelta/0 +BIN/2 +BIN/2 +BINDelta/0 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BINDelta/0 +IN/2 +DbTree/1 +FileSummaryLN/2 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDupDelete/0 +IN/2 +MapLN/2 +INDupDelete/0 +FileSummaryLN/2 +INDelete/0 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/2 +Prepare/0 +CkptStart/0 +BIN/2 +BIN/2 +BIN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/2 +Trace/0 +CkptEnd/0 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +FileSummaryLN/2 +CkptEnd/0 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/2 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-3.2.22.jdb b/test/com/sleepycat/je/logversion/je-3.2.22.jdb new file mode 100644 index 0000000..1969689 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.2.22.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-3.2.22.txt b/test/com/sleepycat/je/logversion/je-3.2.22.txt new file mode 100644 index 0000000..33bec20 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-3.2.22.txt @@ -0,0 +1,151 @@ +FileHeader/0 +Trace/0 +DbTree/1 +BIN/2 +IN/2 +NameLN_TX/0 +BIN/2 +IN/2 +MapLN/2 +Commit/0 +CkptStart/0 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN/2 +Commit/0 +BIN/2 +IN/2 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN/2 +BIN/2 +IN/2 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/2 +DIN/2 +DIN/2 +BIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BINDelta/0 +IN/2 +DbTree/1 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDupDelete/0 +IN/2 +MapLN/2 +INDupDelete/0 +FileSummaryLN/3 +INDelete/0 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/3 +LN_TX/0 +Prepare/0 +Abort/0 +CkptStart/0 +BIN/2 +BIN/2 +BIN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +FileSummaryLN/3 +CkptEnd/0 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/3 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-3.2.79.jdb b/test/com/sleepycat/je/logversion/je-3.2.79.jdb new file mode 100644 index 0000000..19116a8 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.2.79.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-3.2.79.txt b/test/com/sleepycat/je/logversion/je-3.2.79.txt new file mode 100644 index 0000000..6194092 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-3.2.79.txt @@ -0,0 +1,164 @@ +FileHeader/0 +Trace/0 +DbTree/1 +BIN/2 +IN/2 +NameLN_TX/0 +BIN/2 +IN/2 +MapLN/2 +Commit/0 +CkptStart/0 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +DbTree/1 +BIN/2 +IN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +Trace/0 +NameLN_TX/0 +MapLN/2 +Commit/0 +BIN/2 +IN/2 +LN_TX/0 +LN_TX/0 +LN_TX/0 +LN_TX/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN_TX/0 +LN_TX/0 +CkptStart/0 +DupBINDelta/0 +DIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +DelDupLN_TX/0 +DupCountLN_TX/0 +LN_TX/0 +LN_TX/0 +Abort/0 +NameLN/0 +MapLN/2 +BIN/2 +IN/2 +LN/0 +LN/0 +LN/0 +LN/0 +BIN/2 +BIN/2 +IN/2 +DupCountLN/0 +DBIN/2 +DIN/2 +DupCountLN/0 +LN/0 +CkptStart/0 +DupBINDelta/0 +DBIN/2 +DIN/2 +DIN/2 +BIN/2 +BINDelta/0 +BIN/2 +BINDelta/0 +BINDelta/0 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +BINDelta/0 +IN/2 +DbTree/1 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +DelDupLN/0 +DupCountLN/0 +DelDupLN/0 +DupCountLN/0 +LN/0 +LN/0 +INDupDelete/0 +IN/2 +MapLN/2 +INDupDelete/0 +FileSummaryLN/3 +INDelete/0 +MapLN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/3 +NameLN_TX/0 +MapLN/2 +Commit/0 +BIN/2 +IN/2 +LN_TX/0 +Commit/0 +LN_TX/0 +Prepare/0 +Abort/0 +CkptStart/0 +BIN/2 +BIN/2 +BIN/2 +BIN/2 +IN/2 +MapLN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +IN/2 +MapLN/2 +BIN/2 +FileSummaryLN/3 +Trace/0 +CkptEnd/0 +BIN/2 +BIN/2 +IN/2 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +IN/2 +DbTree/1 +FileSummaryLN/3 +CkptEnd/0 +CkptStart/0 +BIN/2 +IN/2 +MapLN/2 +BIN/2 +BIN/2 +BIN/2 +IN/2 +FileSummaryLN/3 +CkptEnd/0 diff --git a/test/com/sleepycat/je/logversion/je-3.3.78.jdb b/test/com/sleepycat/je/logversion/je-3.3.78.jdb new file mode 100644 index 0000000..8114814 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-3.3.78.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-3.3.78.txt b/test/com/sleepycat/je/logversion/je-3.3.78.txt new file mode 100644 index 0000000..6205a43 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-3.3.78.txt @@ -0,0 +1,171 @@ +FileHeader/6 +Trace/6 +DbTree/6 +BIN/6 +IN/6 +NameLN_TX/6 +BIN/6 +IN/6 +MapLN/6 +Commit/6 +CkptStart/6 +BIN/6 +IN/6 +DbTree/6 +BIN/6 +IN/6 +DbTree/6 +BIN/6 +IN/6 +FileSummaryLN/6 +Trace/6 +CkptEnd/6 +Trace/6 +NameLN_TX/6 +MapLN/6 +Commit/6 +BIN/6 +IN/6 +LN_TX/6 +LN_TX/6 +LN_TX/6 +LN_TX/6 +BIN/6 +BIN/6 +IN/6 +DupCountLN/6 +DBIN/6 +DIN/6 +DupCountLN_TX/6 +LN_TX/6 +CkptStart/6 +DupBINDelta/6 +DIN/6 +BINDelta/6 +BIN/6 +IN/6 +MapLN/6 +BINDelta/6 +IN/6 +MapLN/6 +BIN/6 +DbTree/6 +FileSummaryLN/6 +Trace/6 +CkptEnd/6 +DelDupLN_TX/6 +DupCountLN_TX/6 +DelDupLN_TX/6 +DupCountLN_TX/6 +LN_TX/6 +LN_TX/6 +Abort/6 +NameLN/6 +MapLN/6 +BIN/6 +IN/6 +LN/6 +LN/6 +LN/6 +LN/6 +BIN/6 +BIN/6 +IN/6 +DupCountLN/6 +DBIN/6 +DIN/6 +DupCountLN/6 +LN/6 +CkptStart/6 +DupBINDelta/6 +DIN/6 +DBIN/6 +DIN/6 +BIN/6 +BINDelta/6 +BINDelta/6 +BINDelta/6 +BIN/6 +IN/6 +MapLN/6 +BINDelta/6 +IN/6 +DbTree/6 +MapLN/6 +MapLN/6 +FileSummaryLN/6 +Trace/6 +CkptEnd/6 +DelDupLN/6 +DupCountLN/6 +DelDupLN/6 +DupCountLN/6 +LN/6 +LN/6 +IN/6 +MapLN/6 +INDupDelete/6 +INDupDelete/6 +IN/6 +MapLN/6 +FileSummaryLN/6 +MapLN/6 +MapLN/6 +INDelete/6 +MapLN/6 +BIN/6 +BIN/6 +IN/6 +FileSummaryLN/6 +MapLN/6 +NameLN_TX/6 +MapLN/6 +Commit/6 +BIN/6 +IN/6 +LN_TX/6 +Commit/6 +LN_TX/6 +Prepare/6 +Abort/6 +CkptStart/6 +BIN/6 +BIN/6 +BIN/6 +IN/6 +MapLN/6 +BIN/6 +IN/6 +MapLN/6 +BIN/6 +MapLN/6 +DbTree/6 +FileSummaryLN/6 +Trace/6 +CkptEnd/6 +BIN/6 +BIN/6 +IN/6 +CkptStart/6 +BIN/6 +IN/6 +DbTree/6 +IN/6 +MapLN/6 +BIN/6 +IN/6 +DbTree/6 +MapLN/6 +FileSummaryLN/6 +CkptEnd/6 +CkptStart/6 +BIN/6 +IN/6 +MapLN/6 +BIN/6 +DbTree/6 +BIN/6 +BIN/6 +IN/6 +FileSummaryLN/6 +CkptEnd/6 diff --git a/test/com/sleepycat/je/logversion/je-4.0.51.jdb b/test/com/sleepycat/je/logversion/je-4.0.51.jdb new file mode 100644 index 0000000..a43b894 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-4.0.51.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-4.0.51.txt b/test/com/sleepycat/je/logversion/je-4.0.51.txt new file mode 100644 index 0000000..a9be58b --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-4.0.51.txt @@ -0,0 +1,167 @@ +FileHeader/7 +DbTree/7 +BIN/7 +IN/7 +NameLN_TX/7 +BIN/7 +IN/7 +MapLN/7 +Commit/7 +CkptStart/7 +BIN/7 +IN/7 +DbTree/7 +BIN/7 +IN/7 +DbTree/7 +BIN/7 +IN/7 +FileSummaryLN/7 +CkptEnd/7 +NameLN_TX/7 +MapLN/7 +Commit/7 +BIN/7 +IN/7 +LN_TX/7 +LN_TX/7 +LN_TX/7 +LN_TX/7 +BIN/7 +BIN/7 +IN/7 +DupCountLN/7 +DBIN/7 +DIN/7 +DupCountLN_TX/7 +LN_TX/7 +CkptStart/7 +DupBINDelta/7 +DIN/7 +BINDelta/7 +BIN/7 +IN/7 +MapLN/7 +BINDelta/7 +IN/7 +MapLN/7 +BIN/7 +DbTree/7 +FileSummaryLN/7 +CkptEnd/7 +DelDupLN_TX/7 +DupCountLN_TX/7 +DelDupLN_TX/7 +DupCountLN_TX/7 +DelDupLN_TX/7 +DelDupLN_TX/7 +Abort/7 +NameLN/7 +MapLN/7 +BIN/7 +IN/7 +LN/7 +LN/7 +LN/7 +LN/7 +BIN/7 +BIN/7 +IN/7 +DupCountLN/7 +DBIN/7 +DIN/7 +DupCountLN/7 +LN/7 +CkptStart/7 +DupBINDelta/7 +DIN/7 +DBIN/7 +DIN/7 +BIN/7 +BINDelta/7 +BINDelta/7 +BINDelta/7 +BIN/7 +IN/7 +MapLN/7 +BINDelta/7 +IN/7 +DbTree/7 +MapLN/7 +MapLN/7 +FileSummaryLN/7 +CkptEnd/7 +DelDupLN/7 +DupCountLN/7 +DelDupLN/7 +DupCountLN/7 +DelDupLN/7 +DelDupLN/7 +IN/7 +MapLN/7 +INDupDelete/7 +INDupDelete/7 +IN/7 +MapLN/7 +FileSummaryLN/7 +MapLN/7 +MapLN/7 +NameLN_TX/7 +MapLN/7 +Commit/7 +BIN/7 +IN/7 +LN_TX/7 +Commit/7 +LN_TX/7 +Prepare/7 +Abort/7 +RollbackStart/7 +RollbackEnd/7 +Matchpoint/7 +Trace/7 +CkptStart/7 +BIN/7 +BIN/7 +BIN/7 +IN/7 +MapLN/7 +BIN/7 +BIN/7 +BIN/7 +MapLN/7 +MapLN/7 +DbTree/7 +BIN/7 +BIN/7 +IN/7 +FileSummaryLN/7 +CkptEnd/7 +Trace/7 +BIN/7 +BIN/7 +IN/7 +CkptStart/7 +BIN/7 +IN/7 +DbTree/7 +IN/7 +MapLN/7 +IN/7 +MapLN/7 +BIN/7 +BIN/7 +IN/7 +DbTree/7 +MapLN/7 +FileSummaryLN/7 +CkptEnd/7 +Trace/7 +CkptStart/7 +BIN/7 +IN/7 +MapLN/7 +BIN/7 +DbTree/7 +FileSummaryLN/7 +CkptEnd/7 diff --git a/test/com/sleepycat/je/logversion/je-5.0.39.jdb b/test/com/sleepycat/je/logversion/je-5.0.39.jdb new file mode 100644 index 0000000..b93915d Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-5.0.39.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-5.0.39.txt b/test/com/sleepycat/je/logversion/je-5.0.39.txt new file mode 100644 index 0000000..61263f4 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-5.0.39.txt @@ -0,0 +1,132 @@ +FileHeader/8 +DbTree/8 +BIN/8 +IN/8 +NameLN_TX/8 +BIN/8 +IN/8 +MapLN/8 +Commit/8 +CkptStart/8 +BIN/8 +IN/8 +DbTree/8 +BIN/8 +IN/8 +DbTree/8 +BIN/8 +IN/8 +FileSummaryLN/8 +CkptEnd/8 +NameLN_TX/8 +MapLN/8 +Commit/8 +BIN/8 +IN/8 +INS_LN_TX/8 +INS_LN_TX/8 +INS_LN_TX/8 +INS_LN_TX/8 +BIN/8 +BIN/8 +IN/8 +INS_LN_TX/8 +UPD_LN_TX/8 +CkptStart/8 +BINDelta/8 +IN/8 +DbTree/8 +BIN/8 +IN/8 +MapLN/8 +BINDelta/8 +IN/8 +MapLN/8 +BIN/8 +IN/8 +DbTree/8 +FileSummaryLN/8 +CkptEnd/8 +DEL_LN_TX/8 +DEL_LN_TX/8 +DEL_LN_TX/8 +DEL_LN_TX/8 +Abort/8 +NameLN/8 +MapLN/8 +BIN/8 +IN/8 +INS_LN/8 +INS_LN/8 +INS_LN/8 +INS_LN/8 +BIN/8 +BIN/8 +IN/8 +INS_LN/8 +UPD_LN/8 +CkptStart/8 +BINDelta/8 +IN/8 +DbTree/8 +BINDelta/8 +IN/8 +MapLN/8 +BIN/8 +BIN/8 +IN/8 +MapLN/8 +BINDelta/8 +IN/8 +MapLN/8 +BIN/8 +IN/8 +DbTree/8 +FileSummaryLN/8 +CkptEnd/8 +DEL_LN/8 +DEL_LN/8 +DEL_LN/8 +DEL_LN/8 +IN/8 +MapLN/8 +IN/8 +MapLN/8 +FileSummaryLN/8 +MapLN/8 +MapLN/8 +NameLN_TX/8 +MapLN/8 +Commit/8 +BIN/8 +IN/8 +INS_LN_TX/8 +Commit/8 +UPD_LN_TX/8 +Prepare/8 +Abort/8 +RollbackStart/8 +RollbackEnd/8 +Matchpoint/8 +Trace/8 +CkptStart/8 +BINDelta/8 +IN/8 +DbTree/8 +BINDelta/8 +IN/8 +MapLN/8 +BIN/8 +IN/8 +MapLN/8 +BIN/8 +IN/8 +MapLN/8 +BIN/8 +IN/8 +DbTree/8 +BIN/8 +BIN/8 +IN/8 +FileSummaryLN/8 +CkptEnd/8 diff --git a/test/com/sleepycat/je/logversion/je-6.0.13.jdb b/test/com/sleepycat/je/logversion/je-6.0.13.jdb new file mode 100644 index 0000000..968b77f Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-6.0.13.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-6.0.13.txt b/test/com/sleepycat/je/logversion/je-6.0.13.txt new file mode 100644 index 0000000..2e56f4f --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-6.0.13.txt @@ -0,0 +1,132 @@ +FileHeader/9 +DbTree/9 +BIN/9 +IN/9 +NameLN_TX/9 +BIN/9 +IN/9 +MapLN/9 +Commit/9 +CkptStart/9 +BIN/9 +IN/9 +DbTree/9 +BIN/9 +IN/9 +DbTree/9 +BIN/9 +IN/9 +FileSummaryLN/9 +CkptEnd/9 +NameLN_TX/9 +MapLN/9 +Commit/9 +BIN/9 +IN/9 +INS_LN_TX/9 +INS_LN_TX/9 +INS_LN_TX/9 +INS_LN_TX/9 +BIN/9 +BIN/9 +IN/9 +INS_LN_TX/9 +UPD_LN_TX/9 +CkptStart/9 +NewBINDelta/9 +IN/9 +DbTree/9 +BIN/9 +IN/9 +MapLN/9 +NewBINDelta/9 +IN/9 +MapLN/9 +BIN/9 +IN/9 +DbTree/9 +FileSummaryLN/9 +CkptEnd/9 +DEL_LN_TX/9 +DEL_LN_TX/9 +DEL_LN_TX/9 +DEL_LN_TX/9 +Abort/9 +NameLN/9 +MapLN/9 +BIN/9 +IN/9 +INS_LN/9 +INS_LN/9 +INS_LN/9 +INS_LN/9 +BIN/9 +BIN/9 +IN/9 +INS_LN/9 +UPD_LN/9 +CkptStart/9 +NewBINDelta/9 +IN/9 +DbTree/9 +NewBINDelta/9 +IN/9 +MapLN/9 +BIN/9 +BIN/9 +IN/9 +MapLN/9 +NewBINDelta/9 +IN/9 +MapLN/9 +BIN/9 +IN/9 +DbTree/9 +FileSummaryLN/9 +CkptEnd/9 +DEL_LN/9 +DEL_LN/9 +DEL_LN/9 +DEL_LN/9 +IN/9 +MapLN/9 +IN/9 +MapLN/9 +FileSummaryLN/9 +MapLN/9 +MapLN/9 +NameLN_TX/9 +MapLN/9 +Commit/9 +BIN/9 +IN/9 +INS_LN_TX/9 +Commit/9 +UPD_LN_TX/9 +Prepare/9 +Abort/9 +RollbackStart/9 +RollbackEnd/9 +Matchpoint/9 +Trace/9 +CkptStart/9 +NewBINDelta/9 +IN/9 +DbTree/9 +NewBINDelta/9 +IN/9 +MapLN/9 +BIN/9 +IN/9 +MapLN/9 +BIN/9 +IN/9 +MapLN/9 +BIN/9 +IN/9 +DbTree/9 +BIN/9 +BIN/9 +IN/9 +FileSummaryLN/9 +CkptEnd/9 diff --git a/test/com/sleepycat/je/logversion/je-6.2.12.jdb b/test/com/sleepycat/je/logversion/je-6.2.12.jdb new file mode 100644 index 0000000..d1b8d40 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-6.2.12.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-6.2.12.txt b/test/com/sleepycat/je/logversion/je-6.2.12.txt new file mode 100644 index 0000000..369671d --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-6.2.12.txt @@ -0,0 +1,132 @@ +FileHeader/10 +DbTree/10 +BIN/10 +IN/10 +NameLN_TX/10 +BIN/10 +IN/10 +MapLN/10 +Commit/10 +CkptStart/10 +BIN/10 +IN/10 +DbTree/10 +BIN/10 +IN/10 +DbTree/10 +BIN/10 +IN/10 +FileSummaryLN/10 +CkptEnd/10 +NameLN_TX/10 +MapLN/10 +Commit/10 +BIN/10 +IN/10 +INS_LN_TX/10 +INS_LN_TX/10 +INS_LN_TX/10 +INS_LN_TX/10 +BIN/10 +BIN/10 +IN/10 +INS_LN_TX/10 +UPD_LN_TX/10 +CkptStart/10 +NewBINDelta/10 +IN/10 +DbTree/10 +BIN/10 +IN/10 +MapLN/10 +NewBINDelta/10 +IN/10 +MapLN/10 +BIN/10 +IN/10 +DbTree/10 +FileSummaryLN/10 +CkptEnd/10 +DEL_LN_TX/10 +DEL_LN_TX/10 +DEL_LN_TX/10 +DEL_LN_TX/10 +Abort/10 +NameLN/10 +MapLN/10 +BIN/10 +IN/10 +INS_LN/10 +INS_LN/10 +INS_LN/10 +INS_LN/10 +BIN/10 +BIN/10 +IN/10 +INS_LN/10 +UPD_LN/10 +CkptStart/10 +NewBINDelta/10 +IN/10 +DbTree/10 +NewBINDelta/10 +IN/10 +MapLN/10 +BIN/10 +BIN/10 +IN/10 +MapLN/10 +NewBINDelta/10 +IN/10 +MapLN/10 +BIN/10 +IN/10 +DbTree/10 +FileSummaryLN/10 +CkptEnd/10 +DEL_LN/10 +DEL_LN/10 +DEL_LN/10 +DEL_LN/10 +IN/10 +MapLN/10 +IN/10 +MapLN/10 +FileSummaryLN/10 +MapLN/10 +MapLN/10 +NameLN_TX/10 +MapLN/10 +Commit/10 +BIN/10 +IN/10 +INS_LN_TX/10 +Commit/10 +UPD_LN_TX/10 +Prepare/10 +Abort/10 +RollbackStart/10 +RollbackEnd/10 +Matchpoint/10 +Trace/10 +CkptStart/10 +NewBINDelta/10 +IN/10 +DbTree/10 +NewBINDelta/10 +IN/10 +MapLN/10 +BIN/10 +IN/10 +MapLN/10 +BIN/10 +IN/10 +MapLN/10 +BIN/10 +IN/10 +DbTree/10 +BIN/10 +BIN/10 +IN/10 +FileSummaryLN/10 +CkptEnd/10 diff --git a/test/com/sleepycat/je/logversion/je-6.4.14.jdb b/test/com/sleepycat/je/logversion/je-6.4.14.jdb new file mode 100644 index 0000000..3f44d09 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-6.4.14.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-6.4.14.txt b/test/com/sleepycat/je/logversion/je-6.4.14.txt new file mode 100644 index 0000000..b5cbcaf --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-6.4.14.txt @@ -0,0 +1,131 @@ +FileHeader/11 +DbTree/11 +BIN/11 +IN/11 +NameLN_TX/11 +BIN/11 +IN/11 +MapLN/11 +Commit/11 +CkptStart/11 +BIN/11 +IN/11 +DbTree/11 +BIN/11 +IN/11 +DbTree/11 +BIN/11 +IN/11 +FileSummaryLN/11 +CkptEnd/11 +NameLN_TX/11 +MapLN/11 +Commit/11 +BIN/11 +IN/11 +INS_LN_TX/11 +INS_LN_TX/11 +INS_LN_TX/11 +INS_LN_TX/11 +BIN/11 +BIN/11 +IN/11 +INS_LN_TX/11 +UPD_LN_TX/11 +CkptStart/11 +NewBINDelta/11 +IN/11 +DbTree/11 +BIN/11 +IN/11 +MapLN/11 +NewBINDelta/11 +IN/11 +MapLN/11 +BIN/11 +IN/11 +DbTree/11 +FileSummaryLN/11 +CkptEnd/11 +DEL_LN_TX/11 +DEL_LN_TX/11 +DEL_LN_TX/11 +DEL_LN_TX/11 +Abort/11 +NameLN/11 +MapLN/11 +BIN/11 +IN/11 +INS_LN/11 +INS_LN/11 +INS_LN/11 +INS_LN/11 +BIN/11 +BIN/11 +IN/11 +INS_LN/11 +UPD_LN/11 +CkptStart/11 +BIN/11 +BIN/11 +IN/11 +MapLN/11 +NewBINDelta/11 +IN/11 +DbTree/11 +NewBINDelta/11 +IN/11 +MapLN/11 +NewBINDelta/11 +IN/11 +MapLN/11 +BIN/11 +IN/11 +DbTree/11 +FileSummaryLN/11 +CkptEnd/11 +DEL_LN/11 +DEL_LN/11 +DEL_LN/11 +DEL_LN/11 +IN/11 +IN/11 +FileSummaryLN/11 +MapLN/11 +MapLN/11 +NameLN_TX/11 +MapLN/11 +Commit/11 +BIN/11 +IN/11 +INS_LN_TX/11 +Commit/11 +UPD_LN_TX/11 +Prepare/11 +Abort/11 +RollbackStart/11 +RollbackEnd/11 +Matchpoint/11 +ImmutableFile/11 +Trace/11 +CkptStart/11 +BIN/11 +IN/11 +MapLN/11 +NewBINDelta/11 +IN/11 +DbTree/11 +NewBINDelta/11 +IN/11 +MapLN/11 +BIN/11 +IN/11 +MapLN/11 +BIN/11 +IN/11 +DbTree/11 +BIN/11 +BIN/11 +IN/11 +FileSummaryLN/11 +CkptEnd/11 diff --git a/test/com/sleepycat/je/logversion/je-7.0.6.jdb b/test/com/sleepycat/je/logversion/je-7.0.6.jdb new file mode 100644 index 0000000..a400e88 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-7.0.6.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-7.0.6.txt b/test/com/sleepycat/je/logversion/je-7.0.6.txt new file mode 100644 index 0000000..53d55d7 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-7.0.6.txt @@ -0,0 +1,140 @@ +FileHeader/12 +DbTree/12 +BIN/12 +IN/12 +NameLN_TX/12 +BIN/12 +IN/12 +MapLN/12 +Commit/12 +NameLN_TX/12 +MapLN/12 +Commit/12 +CkptStart/12 +BIN/12 +IN/12 +DbTree/12 +BIN/12 +IN/12 +DbTree/12 +BIN/12 +IN/12 +FileSummaryLN/12 +CkptEnd/12 +NameLN_TX/12 +MapLN/12 +Commit/12 +BIN/12 +IN/12 +INS_LN_TX/12 +INS_LN_TX/12 +INS_LN_TX/12 +INS_LN_TX/12 +BIN/12 +BIN/12 +IN/12 +INS_LN_TX/12 +UPD_LN_TX/12 +CkptStart/12 +NewBINDelta/12 +IN/12 +DbTree/12 +BIN/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +DbTree/12 +FileSummaryLN/12 +CkptEnd/12 +DEL_LN_TX/12 +DEL_LN_TX/12 +DEL_LN_TX/12 +DEL_LN_TX/12 +Abort/12 +NameLN/12 +MapLN/12 +BIN/12 +IN/12 +INS_LN/12 +INS_LN/12 +INS_LN/12 +INS_LN/12 +BIN/12 +BIN/12 +IN/12 +INS_LN/12 +UPD_LN/12 +CkptStart/12 +BIN/12 +BIN/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +DbTree/12 +NewBINDelta/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +DbTree/12 +FileSummaryLN/12 +CkptEnd/12 +DEL_LN/12 +DEL_LN/12 +DEL_LN/12 +DEL_LN/12 +IN/12 +IN/12 +FileSummaryLN/12 +MapLN/12 +MapLN/12 +BIN/12 +BIN/12 +IN/12 +NameLN_TX/12 +BIN/12 +BIN/12 +IN/12 +MapLN/12 +Commit/12 +BIN/12 +IN/12 +INS_LN_TX/12 +Commit/12 +UPD_LN_TX/12 +Prepare/12 +Abort/12 +RollbackStart/12 +RollbackEnd/12 +Matchpoint/12 +ImmutableFile/12 +Trace/12 +CkptStart/12 +BIN/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +DbTree/12 +BIN/12 +IN/12 +MapLN/12 +NewBINDelta/12 +IN/12 +DbTree/12 +BIN/12 +BIN/12 +IN/12 +FileSummaryLN/12 +CkptEnd/12 diff --git a/test/com/sleepycat/je/logversion/je-7.1.9.jdb b/test/com/sleepycat/je/logversion/je-7.1.9.jdb new file mode 100644 index 0000000..7169cf4 Binary files /dev/null and b/test/com/sleepycat/je/logversion/je-7.1.9.jdb differ diff --git a/test/com/sleepycat/je/logversion/je-7.1.9.txt b/test/com/sleepycat/je/logversion/je-7.1.9.txt new file mode 100644 index 0000000..4404443 --- /dev/null +++ b/test/com/sleepycat/je/logversion/je-7.1.9.txt @@ -0,0 +1,140 @@ +FileHeader/13 +DbTree/13 +BIN/13 +IN/13 +NameLN_TX/13 +BIN/13 +IN/13 +MapLN/13 +Commit/13 +NameLN_TX/13 +MapLN/13 +Commit/13 +CkptStart/13 +BIN/13 +IN/13 +DbTree/13 +BIN/13 +IN/13 +DbTree/13 +BIN/13 +IN/13 +FileSummaryLN/13 +CkptEnd/13 +NameLN_TX/13 +MapLN/13 +Commit/13 +BIN/13 +IN/13 +INS_LN_TX/13 +INS_LN_TX/13 +INS_LN_TX/13 +INS_LN_TX/13 +BIN/13 +BIN/13 +IN/13 +INS_LN_TX/13 +UPD_LN_TX/13 +CkptStart/13 +NewBINDelta/13 +IN/13 +DbTree/13 +BIN/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +DbTree/13 +FileSummaryLN/13 +CkptEnd/13 +DEL_LN_TX/13 +DEL_LN_TX/13 +DEL_LN_TX/13 +DEL_LN_TX/13 +Abort/13 +NameLN/13 +MapLN/13 +BIN/13 +IN/13 +INS_LN/13 +INS_LN/13 +INS_LN/13 +INS_LN/13 +BIN/13 +BIN/13 +IN/13 +INS_LN/13 +UPD_LN/13 +CkptStart/13 +BIN/13 +BIN/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +DbTree/13 +NewBINDelta/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +DbTree/13 +FileSummaryLN/13 +CkptEnd/13 +DEL_LN/13 +DEL_LN/13 +DEL_LN/13 +DEL_LN/13 +IN/13 +IN/13 +FileSummaryLN/13 +MapLN/13 +MapLN/13 +BIN/13 +BIN/13 +IN/13 +NameLN_TX/13 +BIN/13 +BIN/13 +IN/13 +MapLN/13 +Commit/13 +BIN/13 +IN/13 +INS_LN_TX/13 +Commit/13 +UPD_LN_TX/13 +Prepare/13 +Abort/13 +RollbackStart/13 +RollbackEnd/13 +Matchpoint/13 +ImmutableFile/13 +Trace/13 +CkptStart/13 +BIN/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +DbTree/13 +BIN/13 +IN/13 +MapLN/13 +NewBINDelta/13 +IN/13 +DbTree/13 +BIN/13 +BIN/13 +IN/13 +FileSummaryLN/13 +CkptEnd/13 diff --git a/test/com/sleepycat/je/logversion/maxversion.jdb b/test/com/sleepycat/je/logversion/maxversion.jdb new file mode 100644 index 0000000..84a2cb5 Binary files /dev/null and b/test/com/sleepycat/je/logversion/maxversion.jdb differ diff --git a/test/com/sleepycat/je/logversion/minversion.jdb b/test/com/sleepycat/je/logversion/minversion.jdb new file mode 100644 index 0000000..75abd9e Binary files /dev/null and b/test/com/sleepycat/je/logversion/minversion.jdb differ diff --git a/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java b/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java new file mode 100644 index 0000000..baa369b --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckBINDeltaTest.java @@ -0,0 +1,166 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.util.TestUtils; +import org.junit.Test; + +public class CheckBINDeltaTest extends CheckBase { + + private static final String DB_NAME = "simpleDB"; + private static final boolean DEBUG = false; + + /** + * SR #11123 + * Make sure that BIN-deltas are applied only to non-deleted nodes. + */ + @Test + public void testBINDelta() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(), + "75"); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + EnvironmentConfig restartConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(restartConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator() { + void generateData(Database db) + throws DatabaseException { + + addData(db); + } + }, + restartConfig, + new DatabaseConfig()); + } + + /** + * This test checks for the bug described in SR11123. If an IN and its + * child-subtree is deleted, an INDeleteInfo is written to the + * log. If there is a BIN-delta in the log for a BIN-child of the + * removed subtree (i.e. compressed), then recovery will apply it to the + * compressed IN. Since the IN has no data in * it, that is not + * necessarily a problem. However, reinstantiating the obsolete IN + * may cause a parent IN to split which is not allowed during IN + * recovery. + * + * Here's the case: + * + * | + * IN1 + * +---------------------------------+ + * | | + * IN2 IN6 + * / | / | \ + * BIN3 BIN4 BIN7 BIN8 BIN9 + * + * IN2 and the subtree below are compressed away. During recovery + * replay, after the pass where INs and INDeleteINfos are + * processed, the in-memory tree looks like this: + * + * IN1 + * | + * IN6 + * / | \ + * BIN7 BIN8 BIN9 + * + * However, let's assume that BIN-deltas were written for + * BIN3, BIN4, BIN5 within the recovery part of the log, before the + * subtree was compressed. We'll replay those BIN-deltas in the + * following pass, and in the faulty implementation, they cause + * the ghosts of BIN3, BIN4 to be resurrected and applied to + * IN6. Let's assume that the max node size is 4 -- we won't be + * able to connect BIN3, BIN4 because IN6 doesn't have the + * capacity, and we don't expect to have to do splits. + */ + private void addData(Database db) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so there are 3 levels. */ + for (int i = 0; i < 140; i += 10) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + Tree tree = DbInternal.getDbImpl(db).getTree(); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + if (DEBUG) { + tree.dump(); + } + + /* + * Update a key on the BIN3 and a key on BIN4, to create reason for + * a BIN-delta. Force a BIN-delta for BIN3 and BIN4 out to the log. + */ + IntegerBinding.intToEntry(0, key); + IntegerBinding.intToEntry(100, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + IntegerBinding.intToEntry(20, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + + BIN bin = tree.getFirstNode(CacheMode.DEFAULT); + bin.log(true, false, false, null); + bin = tree.getNextBin(bin, CacheMode.DEFAULT); + bin.log(true, false, false, null); + bin.releaseLatch(); + + /* + * Delete all of left hand side of the tree, so that the subtree root + * headed by IN2 is compressed. + */ + for (int i = 0; i < 50; i+=10) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); + } + + /* force a compression */ + env.compress(); + if (DEBUG) { + tree.dump(); + } + } +} diff --git a/test/com/sleepycat/je/recovery/CheckBase.java b/test/com/sleepycat/je/recovery/CheckBase.java new file mode 100644 index 0000000..38587f5 --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckBase.java @@ -0,0 +1,515 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.junit.After; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.cleaner.VerifyUtils; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.recovery.stepwise.EntryTrackerReader; +import com.sleepycat.je.recovery.stepwise.LogEntryInfo; +import com.sleepycat.je.recovery.stepwise.TestData; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CheckBase extends TestBase { + + private static final boolean DEBUG = false; + private HashSet expected; + private Set found; + + File envHome; + Environment env; + + private List logDescription; + private long stepwiseStartLsn; + + private boolean checkLsns = true; + + public CheckBase() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + env = null; + } catch (Exception ignore) { + } + } + } + + /** + * Create an environment, generate data, record the expected values. + * Then close the environment and recover, and check that the expected + * values are there. + */ + protected void testOneCase(String dbName, + EnvironmentConfig startEnvConfig, + DatabaseConfig startDbConfig, + TestGenerator testGen, + EnvironmentConfig validateEnvConfig, + DatabaseConfig validateDbConfig) + throws Throwable { + + try { + /* Create an environment. */ + env = new Environment(envHome, startEnvConfig); + Database db = env.openDatabase(null, dbName, startDbConfig); + + /* Generate test data. */ + testGen.generateData(db); + + /* Scan the database to save what values we should have. */ + loadExpectedData(db); + + /* Check for overlap between the tree and utilization profile. */ + if (checkLsns) { + VerifyUtils.checkLsns(db); + } + + /* Close w/out checkpointing. */ + db.close(); + DbInternal.getNonNullEnvImpl(env).close(false); + env = null; + + if (testGen.generateLogDescription) { + makeLogDescription(); + } + + tryRecovery(validateEnvConfig, + validateDbConfig, + dbName, + expected); + } catch (Throwable t) { + /* Dump stack trace before trying to tear down. */ + t.printStackTrace(); + throw t; + } + } + + /* Run recovery and validation twice. */ + private void tryRecovery(EnvironmentConfig validateEnvConfig, + DatabaseConfig validateDbConfig, + String dbName, + HashSet useExpected) + throws DatabaseException { + /* Recover and load what's in the database post-recovery. */ + recoverAndLoadData(validateEnvConfig, + validateDbConfig, + dbName); + + /* Check the pre and post recovery data. */ + if (useExpected == null) { + useExpected = expected; + } + validate(useExpected); + + /* Repeat the recovery and validation. */ + recoverAndLoadData(validateEnvConfig, + validateDbConfig, + dbName); + + validate(useExpected); + } + + void setCheckLsns(boolean checkLsns) { + this.checkLsns = checkLsns; + } + + /** + * Call this method to set the start of the stepwise loop. The stepwise + * testing will begin at this point in the log. + */ + void setStepwiseStart() { + + /* + * Put a tracing message both for debugging assistance, and also + * to force the truncation to start at this log entry, since we're + * getting the last used LSN. + */ + Trace.trace(DbInternal.getNonNullEnvImpl(env), "StepwiseStart"); + FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + stepwiseStartLsn = fileManager.getLastUsedLsn(); + } + + void stepwiseLoop(String dbName, + EnvironmentConfig validateEnvConfig, + DatabaseConfig validateDbConfig, + HashSet useExpected, + int startingIteration) + throws DatabaseException, IOException { + + assertTrue(logDescription.size() > 0); + saveLogFiles(envHome); + + /* txnId -> LogEntryInfo */ + Map> newUncommittedRecords = new HashMap>(); + Map> deletedUncommittedRecords = new HashMap>(); + + /* Now run recovery repeatedly, truncating at different locations. */ + String status = null; + try { + + /* + * Some tests are not working with starting at 0. As a workaround, + * start at another iteration. + */ + for (int i = startingIteration; i < logDescription.size(); i++ ) { + + /* Find out where to truncate. */ + LogEntryInfo info = logDescription.get(i); + long lsn = info.getLsn(); + + if (lsn == 0) { + continue; + } + + status = "Iteration " + i + " out of " + + logDescription.size() + " truncate at 0x" + + DbLsn.getNoFormatString(lsn); + + if (DEBUG) { + System.out.println(status); + } + + /* copy files back. */ + resetLogFiles(envHome); + + /* truncate */ + truncateAtOffset(envHome, lsn); + + /* recover */ + tryRecovery(validateEnvConfig, validateDbConfig, + dbName, useExpected); + + /* Adjust the set of expected records for the next iteration.*/ + info.updateExpectedSet(useExpected, newUncommittedRecords, + deletedUncommittedRecords); + } + } catch (Error e) { + System.err.println("Failure at step: " + status); + throw e; + } + } + + protected void turnOffEnvDaemons(EnvironmentConfig envConfig) { + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CHECKPOINTER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), + "false"); + } + + /** + * Re-open the environment and load all data present, to compare to the + * data set of expected values. + */ + protected void recoverAndLoadData(EnvironmentConfig envConfig, + DatabaseConfig dbConfig, + String dbName) + throws DatabaseException { + + env = new Environment(envHome, envConfig); + Database db = env.openDatabase(null, dbName, dbConfig); + + /* Check for overlap between the tree and utilization profile. */ + if (checkLsns) { + VerifyUtils.checkLsns(db); + } + + found = new HashSet(); + + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + try { + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + TestData t = new TestData(key, data); + if (DEBUG) { + System.out.println("found k=" + + IntegerBinding.entryToInt(key) + + " d=" + + IntegerBinding.entryToInt(data)); + } + found.add(t); + } + } + finally { + cursor.close(); + } + + db.close(); + + assertTrue(env.verify(new VerifyConfig(), System.out)); + env.close(); + } + + /* + * The found and expected data sets need to match exactly after recovery. + */ + @SuppressWarnings("unchecked") // clone() returns Object + void validate(HashSet expected) { + Set useExpected = (Set) expected.clone(); + + if (useExpected.size() != found.size()) { + System.err.println("expected---------"); + dumpHashSet(useExpected); + System.err.println("actual---------"); + dumpHashSet(found); + assertEquals("expected and found set sizes don't match", + useExpected.size(), found.size()); + } + + Iterator foundIter = found.iterator(); + while (foundIter.hasNext()) { + TestData t = foundIter.next(); + assertTrue("Missing " + t + "from the expected set", + useExpected.remove(t)); + } + + if (useExpected.size() != 0) { + System.err.println("remaining---------"); + dumpHashSet(useExpected); + assertEquals( + "Expected has " + useExpected.size() + " items remaining", + 0, useExpected.size()); + } + } + + protected void putTestData(Database db, + DatabaseEntry key, + DatabaseEntry data) + throws DatabaseException { + + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + private void loadExpectedData(Database db) + throws DatabaseException { + + expected = new HashSet(); + + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + try { + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + if (DEBUG) { + System.out.println("expect k=" + + IntegerBinding.entryToInt(key) + + " d=" + + IntegerBinding.entryToInt(data)); + } + TestData t = new TestData(key, data); + expected.add(t); + } + } + finally { + cursor.close(); + } + } + + void dumpData(Database db) + throws DatabaseException { + + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + int i = 0; + try { + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + TestData t = new TestData(key, data); + System.out.println(t); + i++; + } + } + finally { + cursor.close(); + } + System.out.println("scanned=" + i); + } + + private void dumpHashSet(Set expected) { + Iterator iter = + new TreeSet(expected).iterator(); + System.err.println("size=" + expected.size()); + while (iter.hasNext()) { + System.err.println(iter.next()); + } + } + + private void makeLogDescription() + throws DatabaseException { + + EnvironmentImpl cmdEnvImpl = + CmdUtil.makeUtilityEnvironment(envHome, false); + logDescription = new ArrayList(); + + try { + EntryTrackerReader reader = + new EntryTrackerReader(cmdEnvImpl, + stepwiseStartLsn, + logDescription); + while (reader.readNextEntry()) { + } + } finally { + cmdEnvImpl.close(); + } + + if (DEBUG) { + Iterator iter = logDescription.iterator(); + while (iter.hasNext()) { + Object o = iter.next(); + LogEntryInfo entryInfo =(LogEntryInfo) o; + System.out.println(entryInfo); + } + } + } + + /** + * Truncate the log at the specified offset. + */ + private void truncateAtOffset(File envHome, long lsn) + throws DatabaseException, IOException { + + EnvironmentImpl cmdEnvImpl = + CmdUtil.makeUtilityEnvironment(envHome, false); + + cmdEnvImpl.getFileManager().truncateLog(DbLsn.getFileNumber(lsn), + DbLsn.getFileOffset(lsn)); + + cmdEnvImpl.close(); + } + + /* Copy all .jdb files to .jdb_save for stepwise processing. */ + private void saveLogFiles(File envHome) + throws IOException { + + String[] suffix = new String[] {".jdb"}; + String[] fileNames = FileManager.listFiles(envHome, suffix, false); + + for (int i = 0; i < fileNames.length; i++) { + File src = new File(envHome, fileNames[i]); + File dst = new File(envHome, fileNames[i]+ "_save"); + copy(src, dst); + } + } + + /* Copy all .jdb_save file back to ._jdb */ + private void resetLogFiles(File envHome) + throws IOException { + String[] suffix = new String[] {".jdb_save"}; + String[] fileNames = FileManager.listFiles(envHome, suffix, false); + + for (int i = 0; i < fileNames.length; i++) { + String srcName = fileNames[i]; + int end = srcName.indexOf("_save"); + String dstName = srcName.substring(0, end); + copy(new File(envHome, srcName), new File(envHome, dstName)); + } + } + + private void copy(File src, File dst) + throws IOException { + + InputStream in = new FileInputStream(src); + OutputStream out = new FileOutputStream(dst); + + // Transfer bytes from in to out + byte[] buf = new byte[1024]; + int len; + while ((len = in.read(buf)) > 0) { + out.write(buf, 0, len); + } + in.close(); + out.close(); + } + + /* + * Each unit test overrides the generateData method. Don't make this + * abstract, because we may want different unit tests to call different + * flavors of generateData(), and we want a default implementation for each + * flavor. + * + * A unit test may also specify an implementation for truncateLog. When + * that happens, the truncation is done before the first recovery. + */ + protected class TestGenerator { + + /* If true, generate a log description to use in stepwise testing. */ + boolean generateLogDescription; + + public TestGenerator() { + } + + public TestGenerator(boolean generateLogDescription) { + this.generateLogDescription = generateLogDescription; + } + + /** + * @throws Exception in subclasses. + */ + void generateData(Database db) + throws Exception { + } + } +} diff --git a/test/com/sleepycat/je/recovery/CheckNewRootTest.java b/test/com/sleepycat/je/recovery/CheckNewRootTest.java new file mode 100644 index 0000000..5f869d2 --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckNewRootTest.java @@ -0,0 +1,431 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.HashSet; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.stepwise.TestData; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; + +/** + * Test situations where a new root is created + */ +public class CheckNewRootTest extends CheckBase { + + private static final boolean DEBUG = false; + private static final String DB_NAME = "simpleDB"; + + private final boolean useDups = false; + private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig(); + static { + FORCE_CONFIG.setForce(true); + } + + /** + * Create a tree, make sure the root changes and is logged + * before any checkpointing. The bug found in [#13897] was this: + * + * 100 BIN a + * 110 RootIN b + * 120 MapLN points to root IN at 110 + * 130 RootIN b written as part of compression + * 140 ckpt start + * 150 ckpt end + * + * Since the compression was writing a root IN w/out updating the mapLN, + * the obsolete root at 110 was recovered instead of newer rootIN at 130. + */ + @Test + public void testWrittenByCompression() + throws Throwable { + + EnvironmentConfig envConfig = setupEnvConfig(); + DatabaseConfig dbConfig = setupDbConfig(); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description. */){ + @Override + void generateData(Database db) + throws DatabaseException { + setupWrittenByCompression(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each log entry. + * Our baseline expected set is empty -- no records expected. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + /** + * Create a populated tree, delete all records, then begin to insert again. + */ + private void setupWrittenByCompression(Database db) + throws DatabaseException { + setStepwiseStart(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 2 levels, with 2 BINs. */ + for (int i = 0; i < 10; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After inserts"); + env.checkpoint(FORCE_CONFIG); + if (DEBUG) { + System.out.println(db.getStats(new StatsConfig())); + } + + /* Now delete all of 1 BIN. */ + for (int i = 0; i < 5; i ++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); + } + + /* Compress, removing a BIN. */ + env.compress(); + if (DEBUG) { + System.out.println("After compress"); + System.out.println(db.getStats(new StatsConfig())); + } + + /* Checkpoint again. */ + env.checkpoint(FORCE_CONFIG); + } + + /** + * Create a tree, make sure the root changes and is logged + * before any checkpointing. The bug found in [#13897] was this: + * + * 110 RootIN b + * 120 MapLN points to root IN at 110 + * 130 BINb split + * 140 RootIN b written as part of split + * 150 ckpt start + * 160 ckpt end + * + * Since the compression was writing a root IN w/out updating the mapLN, + * the obsolete root at 110 was recovered instead of newer rootIN at 130. + */ + @Test + public void testWrittenBySplit() + throws Throwable { + + EnvironmentConfig envConfig = setupEnvConfig(); + DatabaseConfig dbConfig = setupDbConfig(); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description. */){ + @Override + void generateData(Database db) + throws DatabaseException { + setupWrittenBySplits(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each log entry. + * Our baseline expected set is empty -- no records expected. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + /** + */ + private void setupWrittenBySplits(Database db) + throws DatabaseException { + setStepwiseStart(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Create a tree and checkpoint. */ + IntegerBinding.intToEntry(0, key); + IntegerBinding.intToEntry(0, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + env.checkpoint(FORCE_CONFIG); + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After creation"); + + /* Populate a tree so it splits. */ + for (int i = 1; i < 6; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After inserts"); + env.checkpoint(FORCE_CONFIG); + } + + /* + * Scenario from [#13897]: tree is created. Log looks like this + * provisional BIN + * root IN + * checkpoint start + * LN is logged but not yet attached to BIN + * checkpoint end + * BIN is dirtied, but is not part of checkpoint, because dirtying wasn't + * seen + * In this case, getParentForBIN hangs, because there is no root. + * This test is for debugging only, because it's not really possible to + * run a real checkpoint in the small window when the bin is not dirty. + * Attempts to run a checkpoint programmatically result in failing the + * assert that no latches are held when the inlist latch is taken. + * Instead, we do this pseudo checkpoint, to make the hang reproduce. But + * this test will still fail even with the fixed code because the fix + * now causes the rootIN to get re-logged, and the pseudo checkpoint + * doesn't do that logging. + */ + public void xxtestCreateNewTree() // This test for debugging only + throws Throwable { + + EnvironmentConfig envConfig = setupEnvConfig(); + DatabaseConfig dbConfig = setupDbConfig(); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description. */){ + @Override + void generateData(Database db) + throws DatabaseException { + setupCreateNewTree(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each log entry. + * Our baseline expected set is empty -- no records expected. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + /** + * Create a populated tree, delete all records, then begin to insert again. + */ + private void setupCreateNewTree(Database db) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + TestHook ckptHook = new CheckpointHook(env); + DbInternal.getDbImpl(db).getTree().setCkptHook(ckptHook); + + env.checkpoint(FORCE_CONFIG); + + /* + * Create in the log + * provisional BIN, IN, ckpt start, LN + */ + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + /* + * Force a checkpoint into the log. Use another thread, lest the asserts + * about held latches take effect. + */ + private static class CheckpointHook implements TestHook { + private final Environment env; + + CheckpointHook(Environment env) { + this.env = env; + } + + public void doHook() { + try { + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + SingleItemEntry startEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_START, + new CheckpointStart(100, "test")); + long checkpointStart = envImpl.getLogManager().log + (startEntry, + ReplicationContext.NO_REPLICATE); + CheckpointEnd ckptEnd = new CheckpointEnd + ("test", + checkpointStart, + envImpl.getRootLsn(), + envImpl.getTxnManager().getFirstActiveLsn(), + envImpl.getNodeSequence().getLastLocalNodeId(), + envImpl.getNodeSequence().getLastReplicatedNodeId(), + envImpl.getDbTree().getLastLocalDbId(), + envImpl.getDbTree().getLastReplicatedDbId(), + envImpl.getTxnManager().getLastLocalTxnId(), + envImpl.getTxnManager().getLastReplicatedTxnId(), + 100, + true /*cleanedFilesToDelete*/); + SingleItemEntry endEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_END, ckptEnd); + envImpl.getLogManager().logForceFlush + (endEntry, + true, // fsyncRequired + ReplicationContext.NO_REPLICATE); + } catch (DatabaseException e) { + fail(e.getMessage()); + } + } + + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + + public void doIOHook() { + throw new UnsupportedOperationException(); + } + + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + } + + /** + * Make sure eviction doesn't evict roots. If it did, we'd need to + * log the mapLN to be sure that recovery is correct. + */ + @Test + public void testChangeAndEvictRoot() + throws Throwable { + + EnvironmentConfig envConfig = setupEnvConfig(); + DatabaseConfig dbConfig = setupDbConfig(); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description. */){ + @Override + void generateData(Database db) + throws DatabaseException { + setupEvictedRoot(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each log entry. + * Our baseline expected set is empty -- no records expected. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + /** + * Create a populated tree, delete all records, then begin to insert again. + */ + private void setupEvictedRoot(Database db) + throws DatabaseException { + setStepwiseStart(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 2 levels, with 2 BINs. */ + for (int i = 0; i < 10; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After inserts"); + env.checkpoint(FORCE_CONFIG); + + /* + * Add another record so that the eviction below will log + * a different versions of the IN nodes. + */ + IntegerBinding.intToEntry(10, key); + IntegerBinding.intToEntry(10, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + + /* Evict */ + TestHook evictHook = new TestHook() { + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void doHook() { + throw new UnsupportedOperationException(); + } + public Boolean getHookValue() { + return Boolean.TRUE; + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Boolean obj) { + throw new UnsupportedOperationException(); + } + }; + DbInternal.getNonNullEnvImpl(env).getEvictor(). + setRunnableHook(evictHook); + env.evictMemory(); + + /* Checkpoint again. */ + env.checkpoint(FORCE_CONFIG); + } + + private EnvironmentConfig setupEnvConfig() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setAllowCreate(true); + return envConfig; + } + + private DatabaseConfig setupDbConfig() { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(useDups); + dbConfig.setAllowCreate(true); + return dbConfig; + } +} diff --git a/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java b/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java new file mode 100644 index 0000000..ac52d8c --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckReverseSplitsTest.java @@ -0,0 +1,307 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import java.util.HashSet; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.recovery.stepwise.TestData; +import com.sleepycat.je.util.TestUtils; + +/* + * Exercise reverse splits (deletes of subtrees). Add a comprehensive + * "stepwise" approach, where we run the test repeatedly, truncating the log + * at each log entry point. At recovery, we check that we have all expected + * values. In particular, this approach was required to reproduce SR [#13501], + * which only failed if the log was broken off at a given point, between + * the logging of an IN and the update of a mapln. + */ +public class CheckReverseSplitsTest extends CheckBase { + + private static final String DB_NAME = "simpleDB"; + + private int max = 12; + private boolean useDups; + private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig(); + static { + FORCE_CONFIG.setForce(true); + } + + /** + * SR #13501 + * Reverse splits require the same upward propagation as regular splits, + * to avoid logging inconsistent versions of ancestor INs. + */ + @Test + public void testReverseSplit() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(useDups); + dbConfig.setAllowCreate(true); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description */){ + void generateData(Database db) + throws DatabaseException { + setupReverseSplit(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. + */ + + /* Establish the base set of records we expect. */ + HashSet currentExpected = new HashSet(); + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + for (int i = 2; i < max; i++) { + if (useDups) { + IntegerBinding.intToEntry(0, keyEntry); + } else { + IntegerBinding.intToEntry(i, keyEntry); + } + IntegerBinding.intToEntry(i, dataEntry); + currentExpected.add(new TestData(keyEntry, dataEntry)); + } + + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + @Test + public void testReverseSplitDups() + throws Throwable { + + useDups = true; + testReverseSplit(); + } + + /** + * Create this: + *

        + *

        +
        +                         INa                        level 3
        +                   /           \
        +                INb            INc                  level 2
        +             /   |    \        /  \
        +           BINs BINt  BINu   BINv  BINw             level 1
        +     * 
        + *

        + * First provoke an IN compression which removes BINs, and then + * provoke a split of BINw which results in propagating the change + * all the way up the tree. The bug therefore created a version of INa + * on disk which did not include the removal of BINs. + */ + private void setupReverseSplit(Database db) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 3 levels. */ + for (int i = 0; i < max; i ++) { + if (useDups) { + IntegerBinding.intToEntry(0, key); + } else { + IntegerBinding.intToEntry(i, key); + } + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + /* Empty out the leftmost bin */ + Cursor c = db.openCursor(null, null); + try { + assertEquals(OperationStatus.SUCCESS, c.getFirst(key, data, + LockMode.DEFAULT)); + assertEquals(OperationStatus.SUCCESS, c.delete()); + assertEquals(OperationStatus.SUCCESS, + c.getFirst(key, data, LockMode.DEFAULT)); + assertEquals(OperationStatus.SUCCESS, c.delete()); + } finally { + c.close(); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After deletes"); + + /* For log description start. */ + setStepwiseStart(); + + /* + * Checkpoint so that the deleted lns are not replayed, and recovery + * relies on INs. + */ + env.checkpoint(FORCE_CONFIG); + + /* Now remove the empty BIN. */ + env.compress(); + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After compress"); + + /* + * Add enough keys to split the level 2 IN on the right hand side. + * This makes an INa which still references the obsolete BINs. + * Truncate the log before the mapLN which refers to the new INa, + * else the case will not fail, because recovery will first apply the + * new INa, and then apply the INDelete of BINs. We want this case + * to apply the INDelete of BINs, and then follow with a splicing in + * of the new root. + */ + for (int i = max; i < max+13; i ++) { + if (useDups) { + IntegerBinding.intToEntry(0, key); + } else { + IntegerBinding.intToEntry(i, key); + } + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After data setup"); + + } + + /** + * Create a tree, remove it all, replace with new records. + */ + @Test + public void testCompleteRemoval() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setSortedDuplicates(useDups); + dbConfig.setAllowCreate(true); + + /* Run the full test case w/out truncating the log. */ + testOneCase(DB_NAME, envConfig, dbConfig, + new TestGenerator(true /* generate log description. */){ + void generateData(Database db) + throws DatabaseException { + setupCompleteRemoval(db); + } + }, + envConfig, dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each log entry. + * Our baseline expected set is empty -- no records expected. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + @Test + public void testCompleteRemovalDups() + throws Throwable { + + useDups = true; + testCompleteRemoval(); + } + + /** + * Create a populated tree, delete all records, then begin to insert again. + */ + private void setupCompleteRemoval(Database db) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 3 levels. */ + for (int i = 0; i < max; i ++) { + if (useDups) { + IntegerBinding.intToEntry(0, key); + } else { + IntegerBinding.intToEntry(i, key); + } + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After inserts"); + + /* Now delete it all. */ + Cursor c = db.openCursor(null, null); + try { + int count = 0; + while (c.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + assertEquals(OperationStatus.SUCCESS, c.delete()); + count++; + } + } finally { + c.close(); + } + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After deletes"); + + /* For log description start. */ + setStepwiseStart(); + + /* Checkpoint before, so we don't simply replay all the deleted LNs */ + env.checkpoint(FORCE_CONFIG); + + /* Compress, and make sure the subtree was removed. */ + env.compress(); + BtreeStats stats = (BtreeStats) db.getStats(new StatsConfig()); + if (useDups) { + assertEquals(0, stats.getDuplicateInternalNodeCount()); + } else { + assertEquals(1, stats.getBottomInternalNodeCount()); + } + + /* Insert new data. */ + for (int i = max*2; i < ((max*2) +5); i ++) { + if (useDups) { + IntegerBinding.intToEntry(0, key); + } else { + IntegerBinding.intToEntry(i, key); + } + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } +} diff --git a/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java b/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java new file mode 100644 index 0000000..e7db356 --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckSplitAuntTest.java @@ -0,0 +1,155 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import java.util.HashSet; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.recovery.stepwise.TestData; +import com.sleepycat.je.util.TestUtils; + +/** + * The split aunt problem [#14424] is described in LevelRecorder. + * Also see [#23990] and [#24663]. + */ +public class CheckSplitAuntTest extends CheckBase { + + private static final String DB_NAME = "simpleDB"; + + @Test + public void testSplitAunt() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + EnvironmentConfig restartConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setTransactional(true); + + testOneCase(DB_NAME, + envConfig, + dbConfig, + new TestGenerator(true){ + void generateData(Database db) + throws DatabaseException { + setupSplitData(db); + } + }, + restartConfig, + new DatabaseConfig()); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. We start the steps before the inserts, so the base + * expected set is empty. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + private void setupSplitData(Database db) + throws DatabaseException { + + setStepwiseStart(); + + int max = 26; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 4 levels, then checkpoint. */ + for (int i = 0; i < max; i ++) { + IntegerBinding.intToEntry(i*10, key); + IntegerBinding.intToEntry(i*10, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + CheckpointConfig ckptConfig = new CheckpointConfig(); + Trace.trace(DbInternal.getNonNullEnvImpl(env), "First sync"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "Second sync"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "Third sync"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "Fourth sync"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "Fifth sync"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "Sync6"); + env.sync(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "After sync"); + + /* + * Add a key to dirty the left hand branch. 4 levels are needed to + * create the problem scenario, because the single key added here, + * followed by a checkpoint, will always cause at least 2 levels to be + * logged -- that's the smallest maxFlushLevel for any checkpoint. And + * we must not dirty the root, so 3 levels is not enough. + */ + IntegerBinding.intToEntry(5, key); + IntegerBinding.intToEntry(5, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + Trace.trace + (DbInternal.getNonNullEnvImpl(env), "After single key insert"); + + /* + * A normal checkpoint should log the BIN and its parent IN, but no + * higher than level 2. The level 3 parent will be left dirty, but + * level 4 (the root) will not be dirtied. + */ + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "before split"); + + /* Add enough keys to split the right hand branch. */ + for (int i = max*10; i < max*10 + 7; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "after split"); + } +} diff --git a/test/com/sleepycat/je/recovery/CheckSplitsTest.java b/test/com/sleepycat/je/recovery/CheckSplitsTest.java new file mode 100644 index 0000000..f0db2fe --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckSplitsTest.java @@ -0,0 +1,392 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import java.util.HashSet; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.recovery.stepwise.TestData; +import com.sleepycat.je.util.TestUtils; + +public class CheckSplitsTest extends CheckBase { + + private static final String DB_NAME = "simpleDB"; + private boolean useDups; + + /** + * Test basic inserts. + */ + @Test + public void testBasicInsert() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "4"); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(useDups); + + DatabaseConfig validateDbConfig = new DatabaseConfig(); + validateDbConfig.setSortedDuplicates(useDups); + + testOneCase(DB_NAME, + envConfig, + dbConfig, + new TestGenerator(true /* generate log description */){ + void generateData(Database db) + throws DatabaseException { + + setupBasicInsertData(db); + } + }, + envConfig, + validateDbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. We start the steps before the inserts, so the base + * expected set is empty. + */ + HashSet currentExpected = new HashSet(); + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + + @Test + public void testBasicInsertDups() + throws Throwable { + + useDups = true; + testBasicInsert(); + } + + private void setupBasicInsertData(Database db) + throws DatabaseException { + + setStepwiseStart(); + + /* If using dups, create several dup trees. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < 21; i++) { + if (useDups) { + IntegerBinding.intToEntry(i%3, key); + } else { + IntegerBinding.intToEntry(i, key); + } + IntegerBinding.intToEntry(i, data); + db.put(null, key, data); + } + } + + /** + * SR #10715 + * Splits must propagate up the tree at split time to avoid logging + * inconsistent versions of ancestor INs. + */ + @Test + public void testSplitPropagation() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + EnvironmentConfig restartConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setTransactional(true); + + testOneCase(DB_NAME, + envConfig, + dbConfig, + new TestGenerator(true){ + void generateData(Database db) + throws DatabaseException { + + setupSplitData(db); + } + }, + restartConfig, + new DatabaseConfig()); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. We start the steps before the inserts, so the base + * expected set is empty. + */ + HashSet currentExpected = new HashSet(); + if (TestUtils.runLongTests()) { + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + } + + private void setupSplitData(Database db) + throws DatabaseException { + + setStepwiseStart(); + + int max = 120; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Populate a tree so it grows to 4 levels, then checkpoint. */ + + for (int i = 0; i < max; i ++) { + IntegerBinding.intToEntry(i*10, key); + IntegerBinding.intToEntry(i*10, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + /* Add enough keys to split the left hand branch again. */ + for (int i = 50; i < 100; i+=2) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + /* Add enough keys to split the right hand branch. */ + for (int i = 630; i < 700; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "before split"); + + /* Add enough keys to split the left hand branch again. */ + for (int i = 58; i < 75; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + /** + * [#13435] Checks that a DIN can be replayed with a full BIN parent. + * When a DIN is replayed, it may already be present in the parent BIN. + * Before fixing this bug, we searched without allowing splits and then + * called IN.insertEntry, which would throw InconsistentNodeException if + * the BIN was full. We now search with splits allowed, which avoids the + * exception; however, it causes a split when one is not needed. + * + * Note that an alternate fix would be to revert to an earlier version of + * RecoveryManager.replaceOrInsertDuplicateRoot (differences are between + * version 1.184 and 1.185). The older version searches for an existing + * entry, and then inserts if necessary. This would avoid the extra split. + * However, we had to search with splits allowed anyway to fix another + * problem -- see testBINSplitDuringDeletedDINReplay. + */ + @Test + public void testBINSplitDuringDINReplay() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + testOneCase(DB_NAME, + envConfig, + dbConfig, + new TestGenerator(true){ + void generateData(Database db) + throws DatabaseException { + + setupBINSplitDuringDINReplay(db); + } + }, + envConfig, + dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. We start the steps before the inserts, so the base + * expected set is empty. + */ + HashSet currentExpected = new HashSet(); + if (TestUtils.runLongTests()) { + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + } + + /** + * Fill a BIN with entries, with a DIN in the first entry; then force the + * BIN to be flushed, as might occur via eviction or checkpointing. + */ + private void setupBINSplitDuringDINReplay(Database db) + throws DatabaseException { + + setStepwiseStart(); + + final int max = 128; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(0, data); + assertEquals(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + IntegerBinding.intToEntry(1, data); + assertEquals(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + + Cursor cursor = db.openCursor(null, null); + + for (int i = 2; i <= max; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(0, data); + assertEquals(OperationStatus.SUCCESS, + cursor.putNoOverwrite(key, data)); + } + + TestUtils.logBINAndIN(env, cursor); + + cursor.close(); + } + + /** + * [#13435] Checks that recovering a DIN causes a BIN split when needed. + * This occurs when a DIN has been deleted and subsequently the BIN is + * filled. The DIN and the INDupDelete will be be replayed; we will insert + * the DIN and then delete it. In order to insert it, we may need to split + * the BIN. The sequence is: + * + * LN-a + * (DupCountLN/) DIN (/DBIN/DupCountLN) + * LN-b + * DelDupLN-a (/DupCountLN) + * DelDupLN-b (/DupCountLN) + * INDupDelete compress + * LN-c/etc to fill the BIN + * BIN + * + * LN-a and LN-b are dups (same key). After being compressed away, the + * BIN is filled completely and flushed by the evictor or checkpointer. + * + * During recovery, when we replay the DIN and need to insert it into the + * full BIN, therefore we need to split. Before the bug fix, we did not + * search with splits allowed, and got an InconsistentNodeException. + */ + @Test + public void testBINSplitDuringDeletedDINReplay() + throws Throwable { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + turnOffEnvDaemons(envConfig); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + testOneCase(DB_NAME, + envConfig, + dbConfig, + new TestGenerator(true){ + void generateData(Database db) + throws DatabaseException { + + setupBINSplitDuringDeletedDINReplay(db); + } + }, + envConfig, + dbConfig); + + /* + * Now run the test in a stepwise loop, truncate after each + * log entry. We start the steps before the inserts, so the base + * expected set is empty. + */ + HashSet currentExpected = new HashSet(); + if (TestUtils.runLongTests()) { + stepwiseLoop(DB_NAME, envConfig, dbConfig, currentExpected, 0); + } + } + + /** + * Insert two dups, delete them, and compress to free the BIN entry; + * then fill the BIN with LNs and flush the BIN. + */ + private void setupBINSplitDuringDeletedDINReplay(Database db) + throws DatabaseException { + + setStepwiseStart(); + + int max = 128; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + IntegerBinding.intToEntry(0, key); + IntegerBinding.intToEntry(0, data); + assertEquals(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + IntegerBinding.intToEntry(1, data); + assertEquals(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + + assertEquals(OperationStatus.SUCCESS, + db.delete(null, key)); + + env.compress(); + + Cursor cursor = db.openCursor(null, null); + + for (int i = 1; i <= max; i ++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(0, data); + assertEquals(OperationStatus.SUCCESS, + cursor.putNoOverwrite(key, data)); + } + + TestUtils.logBINAndIN(env, cursor); + + cursor.close(); + } +} diff --git a/test/com/sleepycat/je/recovery/CheckpointActivationTest.java b/test/com/sleepycat/je/recovery/CheckpointActivationTest.java new file mode 100644 index 0000000..bd12025 --- /dev/null +++ b/test/com/sleepycat/je/recovery/CheckpointActivationTest.java @@ -0,0 +1,290 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CheckpointActivationTest extends TestBase { + + private final File envHome; + + public CheckpointActivationTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Write elements to the log, check that the right number of + * checkpoints ran. + */ + @Test + public void testLogSizeBasedCheckpoints() + throws Exception { + + final int CKPT_INTERVAL = 5000; + final int TRACER_OVERHEAD = 26; + final int N_TRACES = 100; + final int N_CHECKPOINTS = 10; + final int WAIT_FOR_CHECKPOINT_SECS = 10; + final int FILE_SIZE = 20000000; + + /* Init trace message with hyphens. */ + assertEquals(0, CKPT_INTERVAL % N_TRACES); + int msgBytesPerTrace = (CKPT_INTERVAL / N_TRACES) - TRACER_OVERHEAD; + StringBuilder traceBuf = new StringBuilder(); + for (int i = 0; i < msgBytesPerTrace; i += 1) { + traceBuf.append('-'); + } + String traceMsg = traceBuf.toString(); + + Environment env = null; + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams. + CHECKPOINTER_BYTES_INTERVAL.getName(), + String.valueOf(CKPT_INTERVAL)); + + /* + * This test needs to control exactly how much goes into the log, + * so disable daemons. + */ + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CLEANER.getName(), "false"); + env = new Environment(envHome, envConfig); + + /* + * Get a first reading on number of checkpoints run. Read once + * to clear, then read again. + */ + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setFast(true); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); // clear stats + + stats = env.getStats(statsConfig); // read again + assertEquals(0, stats.getNCheckpoints()); + long lastCkptEnd = stats.getLastCheckpointEnd(); + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + Thread ckptThread = envImpl.getCheckpointer().getThread(); + + /* Run several checkpoints to ensure they occur as expected. */ + for (int i = 0; i < N_CHECKPOINTS; i += 1) { + + /* Wait for checkpointer thread to go to wait state. */ + while (true) { + Thread.State state = ckptThread.getState(); + if (state == Thread.State.WAITING || + state == Thread.State.TIMED_WAITING) { + break; + } + } + + env.getStats(statsConfig); // Clear stats + + /* + * Write enough to prompt a checkpoint. 20% extra bytes are + * written to be sure that we exceed the checkpoint interval. + */ + long lastLsn = envImpl.getFileManager().getNextLsn(); + while (DbLsn.getNoCleaningDistance + (lastLsn, envImpl.getFileManager().getNextLsn(), + FILE_SIZE) < CKPT_INTERVAL + + ((CKPT_INTERVAL * 2)/10)) { + Trace.trace(envImpl, traceMsg); + } + + /* + * Wait for a checkpoint to start (if the test succeeds it will + * start right away). We take advantage of the fact that the + * NCheckpoints stat is set at the start of a checkpoint. + */ + long startTime = System.currentTimeMillis(); + boolean started = false; + while (!started && + (System.currentTimeMillis() - startTime < + WAIT_FOR_CHECKPOINT_SECS * 1000)) { + Thread.yield(); + Thread.sleep(1); + stats = env.getStats(statsConfig); + if (stats.getNCheckpoints() > 0) { + started = true; + } + } + assertTrue("Checkpoint " + i + " did not start after " + + WAIT_FOR_CHECKPOINT_SECS + " seconds", + started); + + /* + * Wait for the checkpointer daemon to do its work. We do not + * want to continue writing until the checkpoint is complete, + * because the amount of data we write is calculated to be the + * correct amount in between checkpoints. We know the + * checkpoint is finished when the LastCheckpointEnd LSN + * changes. + */ + while (true) { + Thread.yield(); + Thread.sleep(1); + stats = env.getStats(statsConfig); + if (lastCkptEnd != stats.getLastCheckpointEnd()) { + lastCkptEnd = stats.getLastCheckpointEnd(); + break; + } + } + } + } catch (Exception e) { + + /* + * print stack trace now, else it gets subsumed in exceptions + * caused by difficulty in removing log files. + */ + e.printStackTrace(); + throw e; + } finally { + if (env != null) { + env.close(); + } + } + } + + /* Test programmatic call to checkpoint. */ + @Test + public void testApiCalls() + throws Exception { + + Environment env = null; + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams. + CHECKPOINTER_BYTES_INTERVAL.getName(), + "1000"); + + /* + * Try disabling stat capture thread to see if erroneous failures + * go away. We're doing a checkpoint, yet the nCheckoints stat is + * sometimes returned as zero. See comment on stat capture below. + */ + envConfig.setConfigParam(EnvironmentConfig.STATS_COLLECT, "false"); + + /* Disable all daemons */ + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam(EnvironmentParams. + ENV_RUN_CHECKPOINTER.getName(), "false"); + env = new Environment(envHome, envConfig); + + /* + * Get a first reading on number of checkpoints run. Read once + * to clear, then read again. + */ + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setFast(true); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); // clear stats + + stats = env.getStats(statsConfig); // read again + assertEquals(0, stats.getNCheckpoints()); + + /* + * From the last checkpoint start LSN, there should be the + * checkpoint end log entry and a trace message. These take 196 + * bytes. + */ + CheckpointConfig checkpointConfig = new CheckpointConfig(); + + /* Should not cause a checkpoint, too little growth. */ + checkpointConfig.setKBytes(1); + env.checkpoint(checkpointConfig); + stats = env.getStats(statsConfig); // read again + assertEquals(0, stats.getNCheckpoints()); + + /* Fill up the log, there should be a checkpoint. */ + String filler = "123456789012345678901245678901234567890123456789"; + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + for (int i = 0; i < 20; i++) { + Trace.trace(envImpl, filler); + } + env.checkpoint(checkpointConfig); + stats = env.getStats(statsConfig); // read again + /* Following is sometimes 0. Try disabling stat capture above. */ + assertEquals(1, stats.getNCheckpoints()); + + /* Try time based, should not checkpoint. */ + checkpointConfig.setKBytes(0); + checkpointConfig.setMinutes(1); + env.checkpoint(checkpointConfig); + stats = env.getStats(statsConfig); // read again + assertEquals(0, stats.getNCheckpoints()); + + /* + * Sleep, enough time has passed for a checkpoint, but nothing was + * written to the log. + */ + Thread.sleep(1000); + env.checkpoint(checkpointConfig); + stats = env.getStats(statsConfig); // read again + assertEquals(0, stats.getNCheckpoints()); + + /* Log something, now try a checkpoint. */ + Trace.trace(envImpl, filler); + env.checkpoint(checkpointConfig); + stats = env.getStats(statsConfig); // read again + // TODO: make this test more timing independent. Sometimes + // the assertion will fail. + // assertEquals(1, stats.getNCheckpoints()); + + } catch (Exception e) { + /* + * print stack trace now, else it gets subsumed in exceptions + * caused by difficulty in removing log files. + */ + e.printStackTrace(); + throw e; + } finally { + if (env != null) { + env.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/DbConfigUpdateRecoveryTest.java b/test/com/sleepycat/je/recovery/DbConfigUpdateRecoveryTest.java new file mode 100644 index 0000000..0e1e1bb --- /dev/null +++ b/test/com/sleepycat/je/recovery/DbConfigUpdateRecoveryTest.java @@ -0,0 +1,197 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LNFileReader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.DbOperationType; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.NameLNLogEntry; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.util.DbTruncateLog; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class DbConfigUpdateRecoveryTest extends TestBase { + private static final String DB_NAME = "testDb"; + + private final File envHome; + private Environment env; + private Database db; + + public DbConfigUpdateRecoveryTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @After + public void tearDown() { + + try { + if (db != null) { + db.close(); + } + if (env != null) { + env.close(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + /* Test in a transactional Environment. */ + @Test + public void testTransactional() + throws Exception { + + doTest(true); + } + + /* Test in a non-transactional Environment. */ + @Test + public void testNonTransactional() + throws Exception { + + doTest(false); + } + + /* + * This test is exercising the following recovery scenario, discussed in SR + * [#18262]. An update of a database configuration results in the logging + * of a NameLN, followed by a MapLN. Since MapLNs are always + * non-transactional, there is nothing that links the NameLN and the MapLN. + * If the MapLN is not flushed to disk, and the NameLN alone is within the + * recovery processing part of the log, we expect recovery to succeed, and + * that the configuration change will not be persisted. + * + * This test will do the following things: + * 1. Open a database and do database config updates. + * 2. Write a tracer entry right after the updates so that the file is + * flipped, and we can be sure that there is no checkpoint after the + * logged MapLN, and that the NameLN will be in the recovery period. + * 3. Use the LNFileReader to read the updated NameLNLogEntry, and calculate + * the lsn of the entry right after the NameLNLogEntry. + * 4. Close the environment without doing a checkpoint. + * 5. Use DbTruncateLog to truncate log entries after the updated + * NameLNLogEntry. + * 6. Open the Environment again to see if the database config has updated. + */ + private void doTest(boolean transactional) + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(transactional); + + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(transactional); + + /* Open a database. */ + db = env.openDatabase(null, DB_NAME, dbConfig); + db.close(); + + /* Update the DatabaseConfig. */ + dbConfig.setNodeMaxEntries(512); + db = env.openDatabase(null, DB_NAME, dbConfig); + assertEquals(512, db.getConfig().getNodeMaxEntries()); + db.close(); + + /* Flush the updated NameLN and MapLN. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LogManager logManager = envImpl.getLogManager(); + Trace tracer = new Trace("test message"); + LogEntry tracerEntry = new TraceLogEntry(tracer); + logManager.logForceFlush + (tracerEntry, false, ReplicationContext.NO_REPLICATE); + + /* Use FileReader to get the start lsn for the deleted entry. */ + LNFileReader reader = new LNFileReader(envImpl, + 1000, + DbLsn.NULL_LSN, + true, + DbLsn.NULL_LSN, + DbLsn.NULL_LSN, + null, + DbLsn.NULL_LSN); + reader.addTargetType(LogEntryType.LOG_NAMELN_TRANSACTIONAL); + reader.addTargetType(LogEntryType.LOG_NAMELN); + + /* Get the truncation start lsn. */ + long deleteLsn = 0; + while (reader.readNextEntry()) { + NameLNLogEntry entry = (NameLNLogEntry) reader.getLNLogEntry(); + if (entry.getOperationType() == DbOperationType.UPDATE_CONFIG) { + deleteLsn = reader.getLastLsn() + reader.getLastEntrySize(); + } + } + + assertTrue(deleteLsn > 0); + + /* Close the Environment without doing a checkpoint. */ + envImpl.close(false); + + /* If not delete, the updated config can be recovered. */ + env = new Environment(envHome, envConfig); + + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(transactional); + dbConfig.setUseExistingConfig(true); + + db = env.openDatabase(null, DB_NAME, dbConfig); + assertEquals(512, db.getConfig().getNodeMaxEntries()); + + /* + * Close the database and Environment, because the DbTruncateLog needs + * to open an Environment. + */ + db.close(); + env.close(); + + /* Use DbTruncateLog deletes entries right after the updated NameLN. */ + DbTruncateLog truncate = new DbTruncateLog(); + truncate.truncateLog(envHome, + DbLsn.getFileNumber(deleteLsn), + DbLsn.getFileOffset(deleteLsn)); + + /* Open the Environment and database to see the updates are lost. */ + env = new Environment(envHome, envConfig); + + dbConfig = new DatabaseConfig(); + dbConfig.setUseExistingConfig(true); + + db = env.openDatabase(null, DB_NAME, dbConfig); + assertTrue(db.getConfig().getNodeMaxEntries() != 512); + } +} diff --git a/test/com/sleepycat/je/recovery/LNSlotReuseTest.java b/test/com/sleepycat/je/recovery/LNSlotReuseTest.java new file mode 100644 index 0000000..d4a1c4d --- /dev/null +++ b/test/com/sleepycat/je/recovery/LNSlotReuseTest.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test recovery redo of a LN, when the redo provokes slot reuse. + */ +public class LNSlotReuseTest extends TestBase { + private final File envHome; + + public LNSlotReuseTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * This test was motivated by SR [#17770], which had to do with the + * fact recovery redos were not appropriately clearing the known deleted + * and pending deleted fields in the BIN. When a slot is reused, those + * bits must be initialized properly so the LN does not erroneously seem + * be deleted. + * + * This unit test is trying to generate the following log sequence: + * 100 LNA (key xxx) is inserted + * 110 txn commit for insert of LNA + * 120 LNA is deleted + * 125 checkpoint start + * 130 BIN for key xxx, pending deleted bit for LNA is set, slot + * points to lsn 120. + * 135 checkpoint end + * 140 txn commit for delete of LNA (in memory, BIN's known deleted bit + is set, but it's not set in the log) + * 150 LNB (key xxx) is inserted, goes into slot for LNA. + * 160 txn commit for LNB. + * + * The goal is to provoke a recovery that runs from lsn 125->160. LNB is + * committed, but goes into a slot previously occupied by LNA. Since LNB's + * pending deleted state is incorrectly set, a call to Database.count() + * skips over the slot. + */ + @Test + public void testLNSlotReuse() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_WRITE_NO_SYNC); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + Environment env = null; + Database db = null; + + try { + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "testDB", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(1024, key); + StringBinding.stringToEntry("herococo", data); + + /* + * Insert and delete a record, so our BIN will have a slot with + * pending deleted set. + */ + Transaction txn = env.beginTransaction(null, null); + db.put(txn, key, data); // insert record A + txn.commit(); + txn = env.beginTransaction(null, null); + db.delete(txn, key); // delete record A + + /* Checkpoint to flush our target BIN out to disk. */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + /* + * Committing the deletion after the checkpoint means the BIN will + * go out with Pending Deleted set. If we commit before the + * checkpoint, the BIN will be compressed, the slot will be + * deleted, and we won't exercise slot reuse. + */ + txn.commit(); + + /* Insert record B and reuse the slot previously held by A */ + txn = env.beginTransaction(null, null); + db.put(txn, key, data); + txn.commit(); + db.close(); + + /* Simulate a crash. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.close(false); + + /* Do a recovery */ + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "testDB", dbConfig); + + /* + * Compare counts obtained via a cursor traveral to a count from + * Database.count() The expected value is 1. + */ + Cursor cursor = db.openCursor(null, null); + int counter = 0; + while (OperationStatus.SUCCESS == + cursor.getNext(key, data, null)) { + counter++; + } + cursor.close(); + + /* + * We expect the count to be 1, and we expect the two methods to + * be equal. + */ + assertEquals(1, counter); + assertEquals(counter, db.count()); + } finally { + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/Level2SplitBugTest.java b/test/com/sleepycat/je/recovery/Level2SplitBugTest.java new file mode 100644 index 0000000..1859f35 --- /dev/null +++ b/test/com/sleepycat/je/recovery/Level2SplitBugTest.java @@ -0,0 +1,331 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Reproduces a problem to do with checkpointing and splits, and tests a fix. + * The problem was introduced with the off-heap implementation, although JE was + * never shipped with this bug. + * + * With the off-heap cache, when iterating over the INList to create the + * checkpoint dirty set, we must select dirty BINs via their parent rather + * than when they're encountered during the iteration. (This is explained in + * DirtyINMap.selectForCheckpoint.) When a level 2 is split, the references to + * half of the BINs go into the new sibling. Those dirty BINs weren't being + * counted if the split happened in the middle of the INList iteration and the + * new sibling did not happen to be iterated (ConcurrentHashMap doesn't + * guarantee that newly added elements will be seen by an in-progress + * iteration). The fix is to add the dirty BINs to the dirty set during the + * split via the DirtyINMap.coordinateSplitWithCheckpoint method. + */ +public class Level2SplitBugTest extends TestBase { + + private static final int NODE_MAX = 30; + private static final int N_RECORDS = 10 * NODE_MAX * NODE_MAX; + + private final File envHome; + private Environment env; + private Database db; + + public Level2SplitBugTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } finally { + env = null; + db = null; + } + } + + private void open() { + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam( + EnvironmentConfig.NODE_MAX_ENTRIES, String.valueOf(NODE_MAX)); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void close() { + db.close(); + db = null; + env.close(); + env = null; + } + + private void write() { + + final DatabaseEntry key = new DatabaseEntry(); + + for (int i = 0; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.put(null, key, key); + assertSame(OperationStatus.SUCCESS, status); + } + } + + private void verify() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int expectKey = 0; + + try (final Cursor cursor = db.openCursor(null, null)) { + + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + + assertEquals(expectKey, IntegerBinding.entryToInt(key)); + assertEquals(expectKey, IntegerBinding.entryToInt(data)); + + expectKey += 1; + } + } + } + + @Test + public void testLevel2SplitBug() { + + open(); + write(); + verify(); + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + write(); + final int inListSize = envImpl.getInMemoryINs().getSize(); + + /* + * After the write() call above, all BINs are dirty. Collect the level + * 2 INs in our DB and their dirty BIN children. + */ + final List>> list = new ArrayList<>(); + + for (final IN in : envImpl.getInMemoryINs()) { + + if (in.getDatabase() != dbImpl) { + continue; + } + if (in.getNormalizedLevel() != 2) { + continue; + } + + final List dirtyBins = new ArrayList<>(); + list.add(new Pair<>(in, dirtyBins)); + + for (int i = 0; i < in.getNEntries(); i += 1) { + final BIN bin = (BIN) in.getTarget(i); + if (bin.getDirty()) { + dirtyBins.add(bin); + } + } + } + + assertTrue(list.size() >= 5); + + for (final Pair> pair : list) { + final List dirtyBins = pair.second(); + assertTrue(dirtyBins.size() >= 2); + } + + final Set newSiblings = new HashSet<>(); + + /* + * Hook is called after examining each IN during dirty map creation. + * We must do the splits after the iteration starts, so that the INs + * added to the INList by the splits (or at least some of them) are not + * seen in the iteration. + * + * When we do the splits we add all new siblings to newSiblings. The + * INs seen by the the iteration are removed from this set. At the end, + * the set should be non-empty for the test to be valid. + */ + class MyHook implements TestHook { + int nIters = 0; + boolean didSplits = false; + + public void doHook(IN inIterated) { + + nIters += 1; + + if (didSplits) { + newSiblings.remove(inIterated); + return; + } + + if (nIters < inListSize / 2) { + return; + } + + didSplits = true; + + /* + * Do splits in a separate thread to attempt to make the INs + * added by the split NOT appear to the checkpoint's INList + * iteration. This has been unreliable. + */ + final Thread splitThread = new Thread() { + @Override + public void run() { + for (final Pair> pair : list) { + + final IN child = pair.first(); + child.latch(CacheMode.UNCHANGED); + + final IN parent = child.latchParent(); + final int index = parent.getKnownChildIndex(child); + + assertTrue(parent.isRoot()); + + final IN newSibling = child.split( + parent, index, null /*grandParent*/, NODE_MAX); + + newSiblings.add(newSibling); + + child.releaseLatch(); + parent.releaseLatch(); + } + } + }; + + try { + splitThread.start(); + splitThread.join(30 * 1000); + final boolean completed = !splitThread.isAlive(); + while (splitThread.isAlive()) { + splitThread.interrupt(); + } + assertTrue(completed); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + +// System.out.println( +// "newSiblings initial size=" + newSiblings.size()); + } + + /* Unused methods. */ + public void doHook() { + throw new UnsupportedOperationException(); + } + public IN getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + }; + + final MyHook hook = new MyHook(); + + Checkpointer.examineINForCheckpointHook = hook; + try { + env.checkpoint(new CheckpointConfig().setForce(true)); + } finally { + Checkpointer.examineINForCheckpointHook = null; + } + +// System.out.println( +// "newSiblings final size=" + newSiblings.size()); + + assertTrue(hook.didSplits); + + if (newSiblings.isEmpty()) { + System.out.println( + "Unable to create conditions for test. ConcurrentHashMap " + + "(INList) iteration can't be relied on to return or not " + + "return items added during the iteration."); + close(); + return; + } + + /* + * All the BINs that were dirty before the checkpoint must be non-dirty + * now. Before the bug fix, some dirty BINs were not logged by the + * checkpoint, and the assertion below fired. + */ + for (final Pair> pair : list) { + final List dirtyBins = pair.second(); + for (final BIN bin : dirtyBins) { + assertFalse(bin.getDirty()); + } + } + + verify(); + close(); + + open(); + verify(); + close(); + } +} diff --git a/test/com/sleepycat/je/recovery/MultiEnvTest.java b/test/com/sleepycat/je/recovery/MultiEnvTest.java new file mode 100644 index 0000000..a69e25a --- /dev/null +++ b/test/com/sleepycat/je/recovery/MultiEnvTest.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class MultiEnvTest extends TestBase { + + private final File envHome1; + private final File envHome2; + + public MultiEnvTest() { + envHome1 = SharedTestUtils.getTestDir(); + envHome2 = new File(envHome1, + "propTest"); + } + + @Test + public void testNodeIdsAfterRecovery() { + + /* + * TODO: replace this test which previously checked that the node + * id sequence shared among environments was correct with a test + * that checks all sequences, including replicated ones. This + * change is appropriate because the node id sequence is no longer + * a static field. + */ + } +} diff --git a/test/com/sleepycat/je/recovery/Recovery2PCTest.java b/test/com/sleepycat/je/recovery/Recovery2PCTest.java new file mode 100644 index 0000000..e252c12 --- /dev/null +++ b/test/com/sleepycat/je/recovery/Recovery2PCTest.java @@ -0,0 +1,624 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import javax.transaction.xa.XAException; +import javax.transaction.xa.XAResource; +import javax.transaction.xa.Xid; + +import org.junit.Assume; +import org.junit.experimental.theories.DataPoint; +import org.junit.experimental.theories.Theories; +import org.junit.experimental.theories.Theory; +import org.junit.runner.RunWith; + +import com.sleepycat.je.Transaction; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.log.LogUtils.XidImpl; +import com.sleepycat.utilint.StringUtils; + +@RunWith(Theories.class) +public class Recovery2PCTest extends RecoveryTestBase { + private boolean explicitTxn; + private boolean commit; + private boolean recover; + + /* We only need to test XARecoveryAPI for implicit and explicit. */ + @DataPoint + public static boolean enable = true; + + @DataPoint + public static boolean disable = false; + + private String opName() { + StringBuilder sb = new StringBuilder(); + + if (explicitTxn) { + sb.append("Exp"); + } else { + sb.append("Imp"); + } + + sb.append("-"); + + if (commit) { + sb.append("C"); + } else { + sb.append("A"); + } + + sb.append("-"); + + if (recover) { + sb.append("Rec"); + } else { + sb.append("No Rec"); + } + + return sb.toString(); + } + + @Theory + public void testDetectUnfinishedXATxns(boolean implicit, + boolean commitFlag, + boolean recoverFlag) + throws Throwable { + + Assume.assumeTrue(implicit); + Assume.assumeTrue(commitFlag); + Assume.assumeTrue(recoverFlag); + + explicitTxn = implicit; + commit = commitFlag; + recover = recoverFlag; + + customName = opName(); + + System.out.println("TestCase: Recovery2PCTest-" + + "testDetectUnfinishedXATxns-" + customName); + + createXAEnvAndDbs(1 << 20, false/*runCheckpointerDaemon*/, 1, false); + XAEnvironment xaEnv = (XAEnvironment) env; + int numRecs = 2; + + try { + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + XidImpl xid = + new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + Transaction txn = env.beginTransaction(null, null); + xaEnv.setXATransaction(xid, txn); + insertData(txn, 0, numRecs, expectedData, 1, commit, 1); + xaEnv.prepare(xid); + txn.commit(); + dbs[0].close(); + /* Close with a still-open XA Txn. */ + try { + xaEnv.close(); + fail("expected IllegalStateException"); + } catch (IllegalStateException DE) { + if (!DE.getMessage().contains("There is")) { + DE.printStackTrace(System.out); + fail("expected open XA message, but got " + + DE.getMessage()); + } + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Theory + public void testBasic(boolean implicit, + boolean commitFlag, + boolean recoverFlag) + throws Throwable { + + explicitTxn = implicit; + commit = commitFlag; + recover = recoverFlag; + + customName = opName(); + + System.out.println("TestCase: Recovery2PCTest-testBasic-" + customName); + + createXAEnvAndDbs(1 << 20, false/*runCheckpointerDaemon*/, + NUM_DBS, !recover); + XAEnvironment xaEnv = (XAEnvironment) env; + int numRecs = NUM_RECS * 3; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + XidImpl xid = + new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + Transaction txn = null; + if (explicitTxn) { + txn = env.beginTransaction(null, null); + xaEnv.setXATransaction(xid, txn); + } else { + xaEnv.start(xid, XAResource.TMNOFLAGS); + } + insertData(txn, 0, numRecs - 1, expectedData, 1, commit, NUM_DBS); + if (!explicitTxn) { + xaEnv.end(xid, XAResource.TMSUCCESS); + } + + xaEnv.prepare(xid); + + if (recover) { + closeEnv(); + xaRecoverOnly(); + xaEnv = (XAEnvironment) env; + } + + if (commit) { + xaEnv.commit(xid, false); + } else { + xaEnv.rollback(xid); + } + + if (recover) { + verifyData(expectedData, commit, NUM_DBS); + forceCloseEnvOnly(); + } else { + closeEnv(); + } + xaRecoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + @Theory + public void testXARecoverAPI(boolean implicit, + boolean commitFlag, + boolean recoverFlag) + throws Throwable { + + Assume.assumeTrue(commitFlag); + Assume.assumeTrue(recoverFlag); + explicitTxn = implicit; + commit = commitFlag; + recover = recoverFlag; + + customName = opName(); + + System.out.println("TestCase: Recovery2PCTest-testXARecoverAPI-" + + customName); + + createXAEnvAndDbs(1 << 20, false/*runCheckpointerDaemon*/, + NUM_DBS << 1, false); + final XAEnvironment xaEnv = (XAEnvironment) env; + final int numRecs = NUM_RECS * 3; + + try { + /* Set up an repository of expected data. */ + final Map> expectedData1 = + new HashMap>(); + + final Map> expectedData2 = + new HashMap>(); + + /* Insert all the data. */ + final Transaction txn1 = + (explicitTxn ? + env.beginTransaction(null, null) : + null); + final Transaction txn2 = + (explicitTxn ? + env.beginTransaction(null, null) : + null); + final XidImpl xid1 = + new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + final XidImpl xid2 = + new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + + Thread thread1 = new Thread() { + @Override + public void run() { + try { + if (explicitTxn) { + xaEnv.setXATransaction(xid1, txn1); + } else { + xaEnv.start(xid1, XAResource.TMNOFLAGS); + } + Thread.yield(); + insertData(txn1, 0, numRecs - 1, expectedData1, 1, + true, 0, NUM_DBS); + Thread.yield(); + if (!explicitTxn) { + xaEnv.end(xid1, XAResource.TMSUCCESS); + } + Thread.yield(); + } catch (Exception E) { + fail("unexpected: " + E); + } + } + }; + + Thread thread2 = new Thread() { + @Override + public void run() { + try { + if (explicitTxn) { + xaEnv.setXATransaction(xid2, txn2); + } else { + xaEnv.start(xid2, XAResource.TMNOFLAGS); + } + Thread.yield(); + insertData(txn2, numRecs, numRecs << 1, + expectedData2, 1, false, NUM_DBS, + NUM_DBS << 1); + Thread.yield(); + if (!explicitTxn) { + xaEnv.end(xid2, XAResource.TMSUCCESS); + } + Thread.yield(); + } catch (Exception E) { + fail("unexpected: " + E); + } + } + }; + + thread1.start(); + thread2.start(); + thread1.join(); + thread2.join(); + + xaEnv.prepare(xid1); + try { + xaEnv.prepare(xid1); + fail("should have thrown XID has already been registered"); + } catch (XAException XAE) { + // xid1 has already been registered. + } + xaEnv.prepare(xid2); + + XAEnvironment xaEnv2 = xaEnv; + Xid[] unfinishedXAXids = xaEnv2.recover(0); + assertTrue(unfinishedXAXids.length == 2); + boolean sawXid1 = false; + boolean sawXid2 = false; + for (int i = 0; i < 2; i++) { + if (unfinishedXAXids[i].equals(xid1)) { + if (sawXid1) { + fail("saw Xid1 twice"); + } + sawXid1 = true; + } + if (unfinishedXAXids[i].equals(xid2)) { + if (sawXid2) { + fail("saw Xid2 twice"); + } + sawXid2 = true; + } + } + assertTrue(sawXid1 && sawXid2); + + for (int ii = 0; ii < 4; ii++) { + forceCloseEnvOnly(); + xaEnv2 = (XAEnvironment) env; + xaRecoverOnly(); + xaEnv2 = (XAEnvironment) env; + + unfinishedXAXids = xaEnv2.recover(0); + assertTrue(unfinishedXAXids.length == 2); + sawXid1 = false; + sawXid2 = false; + for (int i = 0; i < 2; i++) { + if (unfinishedXAXids[i].equals(xid1)) { + if (sawXid1) { + fail("saw Xid1 twice"); + } + sawXid1 = true; + } + if (unfinishedXAXids[i].equals(xid2)) { + if (sawXid2) { + fail("saw Xid2 twice"); + } + sawXid2 = true; + } + } + assertTrue(sawXid1 && sawXid2); + } + + xaEnv2 = (XAEnvironment) env; + xaEnv2.getXATransaction(xid1); + xaEnv2.getXATransaction(xid2); + xaEnv2.commit(xid1, false); + xaEnv2.rollback(xid2); + verifyData(expectedData1, false, 0, NUM_DBS); + verifyData(expectedData2, false, NUM_DBS, NUM_DBS << 1); + forceCloseEnvOnly(); + xaRecoverOnly(); + verifyData(expectedData1, false, 0, NUM_DBS); + verifyData(expectedData2, false, NUM_DBS, NUM_DBS << 1); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + @Theory + public void testXARecoverArgCheck(boolean implicit, + boolean commitFlag, + boolean recoverFlag) + throws Throwable { + + Assume.assumeTrue(implicit); + Assume.assumeTrue(commitFlag); + Assume.assumeTrue(recoverFlag); + + explicitTxn = implicit; + commit = commitFlag; + recover = recoverFlag; + + customName = opName(); + + System.out.println("TestCase: Recovery2PCTest-testXARecoverArgCheck-" + + customName); + + createXAEnvAndDbs(1 << 20, false/*runCheckpointerDaemon*/, + NUM_DBS, false); + XAEnvironment xaEnv = (XAEnvironment) env; + + try { + XidImpl xid = + new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + + /* Check that only one of TMJOIN and TMRESUME can be set. */ + try { + xaEnv.start(xid, XAResource.TMJOIN | XAResource.TMRESUME); + fail("Expected XAException(XAException.XAER_INVAL)"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + /* + * Check that only one of TMJOIN and TMRESUME can be set by passing + * a bogus flag value (TMSUSPEND). + */ + try { + xaEnv.start(xid, XAResource.TMSUSPEND); + fail("Expected XAException(XAException.XAER_INVAL)"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + xaEnv.start(xid, XAResource.TMNOFLAGS); + try { + xaEnv.start(xid, XAResource.TMNOFLAGS); + fail("Expected XAER_DUPID"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_DUPID); + } + xaEnv.end(xid, XAResource.TMSUCCESS); + + /* + * Check that JOIN with a non-existant association throws NOTA. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.start(xid, XAResource.TMJOIN); + fail("Expected XAER_NOTA"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_NOTA); + } + + /* + * Check that RESUME with a non-existant association throws NOTA. + */ + try { + xaEnv.start(xid, XAResource.TMRESUME); + fail("Expected XAER_NOTA"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_NOTA); + } + + /* + * Check that start(JOIN) from a thread that is already associated + * throws XAER_PROTO. + */ + Xid xid2 = new XidImpl(1, StringUtils.toUTF8("TwoPCTest3"), null); + xaEnv.start(xid2, XAResource.TMNOFLAGS); + xaEnv.end(xid2, XAResource.TMSUCCESS); + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + try { + xaEnv.start(xid2, XAResource.TMJOIN); + fail("Expected XAER_PROTO"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_PROTO); + } + + /* + * Check that start(RESUME) for an xid that is not suspended throws + * XAER_PROTO. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.start(xid, XAResource.TMRESUME); + fail("Expected XAER_PROTO"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_PROTO); + } + + /* + * Check that end(TMFAIL | TMSUCCESS) throws XAER_INVAL. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.end(xid, XAResource.TMFAIL | XAResource.TMSUCCESS); + fail("Expected XAER_INVAL"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + /* + * Check that end(TMFAIL | TMSUSPEND) throws XAER_INVAL. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.end(xid, XAResource.TMFAIL | XAResource.TMSUSPEND); + fail("Expected XAER_INVAL"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + /* + * Check that end(TMSUCCESS | TMSUSPEND) throws XAER_INVAL. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + xaEnv.end(xid, XAResource.TMSUCCESS | XAResource.TMSUSPEND); + fail("Expected XAER_INVAL"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + /* + * Check that end(TMSUSPEND) actually works. + */ + Xid xid4 = new XidImpl(1, StringUtils.toUTF8("TwoPCTest4"), null); + xaEnv.start(xid4, XAResource.TMNOFLAGS); + Transaction txn4 = xaEnv.getThreadTransaction(); + assertTrue(txn4 != null); + xaEnv.end(xid4, XAResource.TMSUSPEND); + assertTrue(xaEnv.getThreadTransaction() == null); + Xid xid5 = new XidImpl(1, StringUtils.toUTF8("TwoPCTest5"), null); + xaEnv.start(xid5, XAResource.TMNOFLAGS); + Transaction txn5 = xaEnv.getThreadTransaction(); + xaEnv.end(xid5, XAResource.TMSUSPEND); + assertTrue(xaEnv.getThreadTransaction() == null); + xaEnv.start(xid4, XAResource.TMRESUME); + assertTrue(xaEnv.getThreadTransaction().equals(txn4)); + xaEnv.end(xid4, XAResource.TMSUCCESS); + xaEnv.start(xid5, XAResource.TMRESUME); + assertTrue(xaEnv.getThreadTransaction().equals(txn5)); + xaEnv.end(xid5, XAResource.TMSUCCESS); + + /* + * Check TMFAIL. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest6"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + xaEnv.end(xid, XAResource.TMFAIL); + xaEnv.commit(xid, false); + fail("Expected XA_RBROLLBACK"); + } catch (XAException XAE) { + /* Expect this. */ + assertTrue(XAE.errorCode == XAException.XA_RBROLLBACK); + } + xaEnv.rollback(xid); + + /* + * Check TMSUCCESS. + */ + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest6"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + xaEnv.end(xid, XAResource.TMSUCCESS); + xaEnv.commit(xid, false); + + /* + * Check start(); end(SUSPEND); end(SUCCESS). This is a case that + * JBoss causes to happen. It should succeed. + */ + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest7"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + xaEnv.end(xid, XAResource.TMSUSPEND); + xaEnv.end(xid, XAResource.TMSUCCESS); + xaEnv.commit(xid, false); + + /* + * Check end(SUSPEND, SUCCESS, FAIL) with no start() call. + * This should fail. + */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest8"), null); + xaEnv.end(xid, XAResource.TMSUSPEND); + fail("Expected XAER_NOTA"); + } catch (XAException XAE) { + assertTrue(XAE.errorCode == XAException.XAER_NOTA); + } + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest9"), null); + xaEnv.end(xid, XAResource.TMSUCCESS); + fail("Expected XAER_NOTA"); + } catch (XAException XAE) { + assertTrue(XAE.errorCode == XAException.XAER_NOTA); + } + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest10"), null); + xaEnv.end(xid, XAResource.TMFAIL); + fail("Expected XAER_NOTA"); + } catch (XAException XAE) { + assertTrue(XAE.errorCode == XAException.XAER_NOTA); + } + + /* Check end(NOFLAGS), should fail. */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest11"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + xaEnv.end(xid, XAResource.TMNOFLAGS); + fail("Expected XAER_INVAL"); + } catch (XAException XAE) { + assertTrue(XAE.errorCode == XAException.XAER_INVAL); + } + + /* Check end(SUSPEND), end(SUSPEND), should fail. */ + try { + xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest12"), null); + xaEnv.start(xid, XAResource.TMNOFLAGS); + xaEnv.end(xid, XAResource.TMSUSPEND); + xaEnv.end(xid, XAResource.TMSUSPEND); + fail("Expected XAER_PROTO"); + } catch (XAException XAE) { + assertTrue(XAE.errorCode == XAException.XAER_PROTO); + } + + forceCloseEnvOnly(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryAbortTest.java b/test/com/sleepycat/je/recovery/RecoveryAbortTest.java new file mode 100644 index 0000000..f06d03c --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryAbortTest.java @@ -0,0 +1,709 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; + +public class RecoveryAbortTest extends RecoveryTestBase { + private static final boolean DEBUG = false; + + public RecoveryAbortTest() { + super(true); + } + + /** + * Insert data into several dbs, then abort. + */ + @Test + public void testBasic() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = NUM_RECS * 3; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, false, NUM_DBS); + txn.abort(); + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test insert/abort with no duplicates. + */ + @Test + public void testInserts() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + EnvironmentImpl realEnv = DbInternal.getNonNullEnvImpl(env); + + int N = NUM_RECS; + + if (DEBUG) { + System.out.println(""); + } + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert 0 - N and commit. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, N - 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + verifyData(expectedData, false, NUM_DBS); + + /* Insert N - 3N and abort. */ + txn = env.beginTransaction(null, null); + insertData(txn, N, (3 * N) - 1, expectedData, 1, false, NUM_DBS); + txn.abort(); + verifyData(expectedData, false, NUM_DBS); + + /* + * Wait for the incompressor queue to be processed, so we force the + * recovery to run w/IN delete replays. + */ + while (realEnv.getINCompressorQueueSize() > 0) { + Thread.sleep(10000); + } + + /* Insert 2N - 4N and commit. */ + txn = env.beginTransaction(null, null); + insertData(txn, (2 * N), (4 * N) - 1, expectedData, 1, true, + NUM_DBS); + txn.commit(); + verifyData(expectedData, false, NUM_DBS); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } finally { + if (DEBUG) { + System.out.println(""); + } + } + } + + @Test + public void testMix() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + + int numRecs = NUM_RECS; + int numDups = 10; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert data without duplicates. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + + /* Insert more with duplicates, commit. */ + insertData(txn, numRecs+1, (2*numRecs), expectedData, + numDups, true, NUM_DBS); + txn.commit(); + + /* Delete all and abort. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, false, NUM_DBS); + txn.abort(); + + /* Delete every other and commit. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + /* Modify some and abort. */ + txn = env.beginTransaction(null, null); + modifyData(txn, numRecs, expectedData, 3, false, NUM_DBS); + txn.abort(); + + /* Modify some and commit. */ + txn = env.beginTransaction(null, null); + modifyData(txn, numRecs/2, expectedData, 2, true, NUM_DBS); + txn.commit(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + TestUtils.validateNodeMemUsage + (DbInternal.getNonNullEnvImpl(env), + false); + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + // print stacktrace before trying to clean up files + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR13726() + throws Throwable { + + int numDbs = 1; + + createEnvAndDbs(1 << 20, true, numDbs); + + try { + /* + * Insert data without duplicates, commit. This gets us a + * DupCountLN. + */ + Transaction txn = env.beginTransaction(null, null); + Cursor c = dbs[0].openCursor(txn, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + byte[] keyData = TestUtils.getTestArray(0); + byte[] dataData = TestUtils.byteArrayCopy(keyData); + key.setData(keyData); + data.setData(dataData); + for (int i = 0; i < 3; i++) { + data.setData(TestUtils.getTestArray(i)); + assertEquals("insert some dups", + c.put(key, data), + OperationStatus.SUCCESS); + } + c.close(); + txn.commit(); + + /* This gets us a DelDupLN in the slot in the BIN. */ + txn = env.beginTransaction(null, null); + assertEquals("delete initial dups", + dbs[0].delete(txn, key), + OperationStatus.SUCCESS); + txn.commit(); + + /* This gets the dup tree cleaned up. */ + env.compress(); + + /* Gets the BIN written out with knownDeleted=true. */ + closeEnv(); + recoverOnly(); + createDbs(null, numDbs); + + /* + * Tree now has a BIN referring to a DelDupLN. Add duplicates, + * and abort. + */ + txn = env.beginTransaction(null, null); + c = dbs[0].openCursor(txn, null); + for (int i = 0; i < 3; i++) { + data.setData(TestUtils.getTestArray(i)); + assertEquals("insert later dups", + c.put(key, data), + OperationStatus.SUCCESS); + } + c.close(); + txn.abort(); + + /* + * Now add duplicates again and commit. + */ + txn = env.beginTransaction(null, null); + c = dbs[0].openCursor(txn, null); + for (int i = 0; i < 3; i++) { + data.setData(TestUtils.getTestArray(i)); + assertEquals("insert later dups", + c.put(key, data), + OperationStatus.SUCCESS); + } + c.close(); + txn.commit(); + + txn = env.beginTransaction(null, null); + c = dbs[0].openCursor(txn, null); + int count = 0; + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + count++; + } + c.getSearchKey(key, data, null); + assertEquals("scanned count == count()", count, c.count()); + c.close(); + txn.commit(); + closeEnv(); + } catch (Throwable t) { + // print stacktrace before trying to clean up files + t.printStackTrace(); + throw t; + } + } + + /* + * Test the sequence where we have an existing record in the + * database; then in a separate transaction we delete that data + * and reinsert it and then abort that transaction. During the + * undo, the insert will be undone first (by deleting the record + * and setting knownDeleted true in the ChildReference); the + * deletion will be undone second by adding the record back into + * the database. The entry needs to be present in the BIN when we + * add it back in. But the compressor may be running at the same + * time and compress the entry out between the deletion and + * re-insertion making the entry disappear from the BIN. This is + * prevented by a lock being taken by the compressor on the LN, + * even if the LN is "knownDeleted". [#9465] + */ + @Test + public void testSR9465Part1() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = NUM_RECS; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert data without duplicates. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* Delete all and abort. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, false, NUM_DBS); + insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS); + txn.abort(); + + txn = env.beginTransaction(null, null); + verifyData(expectedData, NUM_DBS); + txn.commit(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR9465Part2() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = NUM_RECS; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert data without duplicates. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* Delete all and abort. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, false, NUM_DBS); + insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS); + deleteData(txn, expectedData, true, false, NUM_DBS); + txn.abort(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + + txn = env.beginTransaction(null, null); + verifyData(expectedData, NUM_DBS); + txn.commit(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR9752Part1() + throws Throwable { + + createEnvAndDbs(1 << 20, false, NUM_DBS); + int numRecs = NUM_RECS; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert data without duplicates. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* + * txn1 just puts a piece of data out to a database that won't + * be seen by deleteData or insertData. The idea is to hold + * the transaction open across the env.sync() so that firstActive + * comes before ckptStart. + */ + Transaction txn1 = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(new byte[] { 1, 2, 3, 4 }); + DatabaseEntry data = new DatabaseEntry(new byte[] { 4, 3, 2, 1 }); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(false); + dbConfig.setTransactional(true); + Database otherDb = env.openDatabase(txn1, "extradb", dbConfig); + otherDb.put(txn1, key, data); + + /* Delete all and abort. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, false, NUM_DBS); + txn.abort(); + + /* Delete all and commit. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + env.sync(); /* env.checkpoint does not seem to be sufficient. */ + txn1.commit(); + otherDb.close(); + + closeEnv(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR9752Part2() + throws Throwable { + + createEnvAndDbs(1 << 20, false, NUM_DBS); + DbInternal.getNonNullEnvImpl(env).getCleaner().shutdown(); + int numRecs = NUM_RECS; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert data without duplicates. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* + * txn1 just puts a piece of data out to a database that won't + * be seen by deleteData or insertData. The idea is to hold + * the transaction open across the env.sync() so that firstActive + * comes before ckptStart. + */ + Transaction txn1 = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(new byte[] { 1, 2, 3, 4 }); + DatabaseEntry data = new DatabaseEntry(new byte[] { 4, 3, 2, 1 }); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(false); + dbConfig.setTransactional(true); + Database otherDb = env.openDatabase(txn1, "extradb", dbConfig); + otherDb.put(txn1, key, data); + + /* Delete all and abort. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, false, NUM_DBS); + txn.abort(); + + /* Delete all and commit. */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + env.sync(); /* env.checkpoint does not seem to be sufficient. */ + txn1.commit(); + otherDb.close(); + + closeEnv(); + + if (DEBUG) { + dumpData(NUM_DBS); + dumpExpected(expectedData); + com.sleepycat.je.tree.Key.DUMP_TYPE = + com.sleepycat.je.tree.Key.DumpType.BINARY; + DbInternal.getDbImpl(dbs[0]).getTree().dump(); + } + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + /* Print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Insert dbs, commit some, abort some. To do: add db remove, rename. + */ + @Test + public void testDbCreateRemove() + throws Throwable { + + createEnv(1 << 20, true); + int N1 = 10; + int N2 = 50; + int N3 = 60; + int N4 = 70; + int N5 = 100; + + String dbName1 = "foo"; + String dbName2 = "bar"; + + try { + /* Make Dbs, abort */ + Transaction txn = env.beginTransaction(null, null); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + for (int i = 0; i < N2; i++) { + env.openDatabase(txn, dbName1 + i, dbConfig); + } + txn.abort(); + + /* All dbs should not exist */ + checkForNoDb(dbName1, 0, N2); + + /* Make more dbs, overlapping with some of the aborted set. */ + txn = env.beginTransaction(null, null); + for (int i = N1; i < N5; i++) { + Database db = env.openDatabase(txn, dbName1 + i, dbConfig); + db.close(); + } + txn.commit(); + + /* + * Dbs 0 - N1-1 shouldn't exist + * Dbs N1 - N5 should exist + */ + checkForNoDb(dbName1, 0, N1); + checkForDb(dbName1, N1, N5); + + /* Close and recover */ + env.close(); + + EnvironmentConfig envConfig1 = TestUtils.initEnvConfig(); + envConfig1.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig1.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(), + new Long(1 << 24).toString()); + envConfig1.setTransactional(true); + envConfig1.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + env = new Environment(envHome, envConfig1); + + /* + * Dbs 0 - N1-1 shouldn't exist + * Dbs N1 - N5 should exist + */ + checkForNoDb(dbName1, 0, N1); + checkForDb(dbName1, N1, N5); + + /* Remove some dbs, abort */ + txn = env.beginTransaction(null, null); + for (int i = N2; i < N3; i++) { + env.removeDatabase(txn, dbName1+i); + } + txn.abort(); + + /* Remove some dbs, abort -- use READ_COMMITTED [#23821]. */ + txn = env.beginTransaction( + null, new TransactionConfig().setReadCommitted(true)); + for (int i = N3; i < N4; i++) { + env.removeDatabase(txn, dbName1+i); + } + txn.abort(); + + /* Remove some dbs, commit */ + txn = env.beginTransaction(null, null); + for (int i = N3; i < N4; i++) { + env.removeDatabase(txn, dbName1+i); + } + txn.commit(); + + /* + * Dbs 0 - N1-1 should not exist + * Dbs N1 - N3-1 should exist + * Dbs N3 - N4-1 should not exist + * Dbs N4 - N5-1 should exist + */ + checkForNoDb(dbName1, 0, N1); + checkForDb(dbName1, N1, N3); + checkForNoDb(dbName1, N3, N4); + checkForDb(dbName1, N4, N5); + + /* Close and recover */ + env.close(); + env = new Environment(envHome, envConfig1); + + /* + * Dbs 0 - N1-1 should not exist + * Dbs N1 - N3-1 should exist + * Dbs N3 - N4-1 should not exist + * Dbs N4 - N5-1 should exist + */ + checkForNoDb(dbName1, 0, N1); + checkForDb(dbName1, N1, N3); + checkForNoDb(dbName1, N3, N4); + checkForDb(dbName1, N4, N5); + + /* Rename some dbs, abort */ + txn = env.beginTransaction(null, null); + for (int i = N1; i < N3; i++) { + env.renameDatabase + (txn, dbName1+i, dbName2+i); + } + txn.abort(); + + /* Remove some dbs, commit */ + txn = env.beginTransaction(null, null); + for (int i = N2; i < N3; i++) { + env.renameDatabase + (txn, dbName1+i, dbName2+i); + } + txn.commit(); + + /* + * Dbs 0 - N1-1 should not exist + * Dbs N1 - N2-1 should exist with old name + * Dbs N2 - N3-1 should exist with new name + * Dbs N3 - N4 should not exist + * Dbs N4 - N5-1 should exist with old name + */ + checkForNoDb(dbName1, 0, N1); + checkForDb(dbName1, N1, N2); + checkForDb(dbName2, N2, N3); + checkForNoDb(dbName1, N3, N4); + checkForDb(dbName1, N4, N5); + } catch (Throwable t) { + /* print stacktrace before trying to clean up files. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Fail if any db from start - (end -1) exists + */ + private void checkForNoDb(String dbName, int start, int end) { + /* Dbs start - end -1 shouldn't exist */ + for (int i = start; i < end; i++) { + try { + env.openDatabase(null, dbName + i, null); + fail(DB_NAME + i + " shouldn't exist"); + } catch (DatabaseException e) { + } + } + } + + /** + * Fail if any db from start - (end -1) doesn't exist + */ + private void checkForDb(String dbName, int start, int end) { + /* Dbs start - end -1 should exist. */ + for (int i = start; i < end; i++) { + try { + Database checkDb = env.openDatabase(null, dbName + i, null); + checkDb.close(); + } catch (DatabaseException e) { + fail(e.getMessage()); + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java b/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java new file mode 100644 index 0000000..1f26c2e --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryCheckpointTest.java @@ -0,0 +1,415 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; + +import org.junit.Test; + +public class RecoveryCheckpointTest extends RecoveryTestBase { + + volatile int sequence = 0; + + @Override + public void setExtraProperties() { + + /* + * Make sure that the environments in this unit test always run with + * checkpointing off, so we can call it explicitly. + */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + } + + /** + * Run checkpoints on empty dbs. + */ + @Test + public void testEmptyCheckpoint() + throws Throwable { + + createEnvAndDbs(1 << 20, false, NUM_DBS); + + try { + + /* + * Run checkpoint on empty environment. Should be the second one + * run, the first was run by recovery when the environment was + * opened. + */ + env.checkpoint(forceConfig); + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + assertEquals(2, stats.getNCheckpoints()); + assertEquals(2, stats.getLastCheckpointId()); + + /* Shutdown, recover. */ + Map> expectedData = + new HashMap>(); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); // 2 checkpoints + + /* Another checkpoint. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + env.checkpoint(forceConfig); + stats = env.getStats(TestUtils.FAST_STATS); + + assertEquals(1, stats.getNCheckpoints()); + assertEquals(5, stats.getLastCheckpointId()); + + /* Shutdown, recover. */ + env.close(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Run checkpoints on empty dbs. + */ + @Test + public void testNoCheckpointOnOpenSR11861() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + + try { + + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + assertEquals(1, stats.getNCheckpoints()); + assertEquals(1, stats.getLastCheckpointId()); + + /* Shutdown, recover. */ + Map> expectedData = + new HashMap>(); + + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + closeEnv(); // closes without a checkpoint + recoverAndVerify(expectedData, NUM_DBS); // 2 checkpoints + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + stats = env.getStats(TestUtils.FAST_STATS); + assertEquals(0, stats.getNCheckpoints()); + assertEquals(3, stats.getLastCheckpointId()); + env.close(); + env = new Environment(envHome, envConfig); + stats = env.getStats(TestUtils.FAST_STATS); + assertEquals(0, stats.getNCheckpoints()); + assertEquals(3, stats.getLastCheckpointId()); + + /* Shutdown, recover. */ + env.close(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test checkpoints that end up using BIN-deltas -- the recovery must work. + */ + @Test + public void testBinDelta() + throws Throwable { + + createEnvAndDbs(1 << 20, false, NUM_DBS); + + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + + CheckpointConfig forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + + try { + + /* + * Insert 4 records (nodeMax is 6), checkpoint, then insert 1 + * record. The 1 record insertion will qualify for a delta, + * because the threshold percentage is 25%, and 25% of 4 is 1. + */ + int numRecs = 4; + Map> expectedData = + new HashMap>(); + + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, true, NUM_DBS); + env.checkpoint(forceConfig); + insertData(txn, numRecs+1, numRecs+1, expectedData, + 1, true, NUM_DBS); + txn.commit(); + + /* + * This next checkpoint will end up using a BIN-delta to log the + * last inserted record. It will have practically nothing but the + * root in the checkpoint. + */ + EnvironmentStats stats = env.getStats(statsConfig); + env.checkpoint(forceConfig); + stats = env.getStats(statsConfig); + assertTrue(stats.getNDeltaINFlush() > 0); + + /* Shutdown, recover from a checkpoint that uses BIN-deltas. */ + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test the rollback of transactions that are active during a checkpoint. + */ + @Test + public void testActiveWhileCheckpointing() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + + try { + int numRecs = 1; + Map> expectedData = + new HashMap>(); + + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs, expectedData, 1, false, NUM_DBS); + + /* Now run a checkpoint while this operation hasn't finished. */ + env.checkpoint(forceConfig); + txn.abort(); + + /* Shutdown, recover. */ + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testSR11293() + throws Throwable { + + createEnv(1 << 20, false); + + Transaction dbTxn = env.beginTransaction(null, null); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final DbTree dbTree = envImpl.getDbTree(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + final Database db = env.openDatabase(dbTxn, "foo", dbConfig); + dbTxn.commit(); + final Transaction txn = env.beginTransaction(null, null); + sequence = 0; + + /** + * The sequence between the two tester threads is: + * + * tester2: write 1/1 into the database. This causes the initial tree + * to be created (IN/BIN/LN). Flush that out to the disk with a full + * checkpoint. Signal tester1 and wait. + * + * tester1: Lock the MapLN for "foo" db. Signal tester2 and wait. + * + * tester2: Add 2/2 to the tree which causes the BIN to be dirtied. + * Signal tester1 to continue, perform a full checkpoint which will + * causes the root IN to be dirtied and flushed. DbTree.modifyDbRoot + * will block on the MapLN lock held by tester1. + * + * tester1: while tester2 is blocking on the MapLN lock, this thread is + * sleeping. When it wakes up, it releases the MapLN lock by aborting + * the transaction. + * + * tester2: modifyDbRoot finally acquires the write lock on foo-db's + * MapLN write lock, performs the update to the DbTree and returns from + * the sync(). + */ + JUnitThread tester1 = + new JUnitThread("testSR11293DbTreeLocker") { + @Override + public void testBody() { + try { + /* Wait for tester2. */ + while (sequence < 1) { + Thread.yield(); + } + + /* Lock the MapLN for the database. */ + DatabaseId fooId = + DbInternal.getDbImpl(db).getId(); + DatabaseImpl fooDb = dbTree.getDb(fooId, 500000L); + assert fooDb != null; + + sequence++; + + /* Wait for tester2. */ + while (sequence < 3) { + Thread.yield(); + } + + try { + Thread.sleep(3000); + } catch (Exception E) { + } + + try { + txn.abort(); + db.close(); + env.close(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("unexpected exception: " + DBE); + } + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testSR11293DbWriter") { + @Override + public void testBody() { + try { + DatabaseEntry key = + new DatabaseEntry(new byte[] { 1 }); + DatabaseEntry data = + new DatabaseEntry(new byte[] { 1 }); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + env.sync(); + + sequence++; + while (sequence < 2) { + Thread.yield(); + } + + key.setData(new byte[] { 2 }); + data.setData(new byte[] { 2 }); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + sequence++; + env.sync(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("unexpected exception: " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + + EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig(); + + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + + env = new Environment(envHome, recoveryConfig); + dbConfig.setAllowCreate(false); + dbConfig.setTransactional(false); + Database db2 = env.openDatabase(null, "foo", dbConfig); + Cursor c = db2.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals((key.getData())[0], 1); + assertEquals((data.getData())[0], 1); + + assertEquals(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals((key.getData())[0], 2); + assertEquals((data.getData())[0], 2); + assertEquals(OperationStatus.NOTFOUND, + c.getNext(key, data, LockMode.DEFAULT)); + + c.close(); + db2.close(); + env.close(); + } + + /* + * See what happens if someone calls checkpoint on a read only environment. + */ + @Test + public void testReadOnlyCheckpoint() + throws DatabaseException { + /* Create an environment, close. */ + EnvironmentConfig c = TestUtils.initEnvConfig(); + c.setAllowCreate(true); + Environment e = new Environment(envHome, c); + e.close(); + + /* Now open read only. */ + c.setAllowCreate(false); + c.setReadOnly(true); + e = new Environment(envHome, c); + try { + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + e.checkpoint(ckptConfig); + } finally { + e.close(); + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java b/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java new file mode 100644 index 0000000..0f2548d --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryDeleteTest.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; + +public class RecoveryDeleteTest extends RecoveryTestBase { + + @Override + protected void setExtraProperties() { + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), + "false"); + } + + /* Make sure that we can recover after the entire tree is compressed away. */ + @Test + public void testDeleteAllAndCompress() + throws Throwable { + + createEnvAndDbs(1 << 20, false, NUM_DBS); + int numRecs = 10; + + try { + // Set up an repository of expected data + Map> expectedData = + new HashMap>(); + + // insert all the data + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs -1 , expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* + * Do two checkpoints here so that the INs that make up this new + * tree are not in the redo part of the log. + */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + env.checkpoint(ckptConfig); + txn = env.beginTransaction(null, null); + insertData(txn, numRecs, numRecs + 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* delete all */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, true, NUM_DBS); + txn.commit(); + + /* This will remove the root. */ + env.compress(); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + // print stacktrace before trying to clean up files + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java b/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java new file mode 100644 index 0000000..865bd52 --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryDeltaTest.java @@ -0,0 +1,379 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.evictor.Evictor.EvictionSource; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.txn.BasicLocker; + +/** + * Exercise delta BIN logging. + */ +public class RecoveryDeltaTest extends RecoveryTestBase { + + /** + * The recovery delta tests set extra properties. + */ + @Override + public void setExtraProperties() { + /* Always run with delta logging cranked up. */ + envConfig.setConfigParam + (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "75"); + + /* + * Make sure that the environments in this unit test always + * run with checkpointing off, so we can call it explicitly. + */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + + /* + * Make sure that the environments in this unit test always + * run with the compressor off, so we get known results + */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + } + + /** + * Test the interaction of compression and deltas. After a compress, + * the next entry must be a full one. + */ + @Test + public void testCompress() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = 20; + try { + /* Set up an repository of expected data */ + Map> expectedData = + new HashMap>(); + + /* insert all the data */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* delete every other record */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + /* Ask the compressor to run. */ + env.compress(); + + /* force a checkpoint, should avoid deltas.. */ + env.checkpoint(forceConfig); + + closeEnv(); + + recoverAndVerify(expectedData, NUM_DBS); + + } catch (Throwable t) { + /* print stacktrace before trying to clean up files */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test a recovery that processes deltas. + */ + @Test + public void testRecoveryDelta() + throws Throwable { + + treeFanout = 16; + + createEnvAndDbs(1 << 20, true, NUM_DBS); + + try { + /* Set up a repository of expected data */ + Map> expectedData = + new HashMap>(); + + /* + * Force a checkpoint, to flush a full version of the BIN + * to disk, so the next checkpoint can cause deltas + */ + env.checkpoint(forceConfig); + + /* + * Use repeatable random sequence so that the number of deltas is + * consistent. + */ + Random rng = new Random(1); + + /* insert data */ + int numRecs = 80; + Transaction txn = env.beginTransaction(null, null); + insertRandomData(txn, rng, numRecs, expectedData, 1, false, 0, 1); + txn.commit(); + + /* + * Take a checkpoint. This causes a number of BIN-deltas to be + * logged. + */ + env.getStats(new StatsConfig().setClear(true)); + env.checkpoint(forceConfig); + EnvironmentStats envStats = env.getStats(null); + assertTrue(envStats.getNDeltaINFlush() > 5); + + /* insert data */ + numRecs = 20; + txn = env.beginTransaction(null, null); + insertRandomData(txn, rng, numRecs, expectedData, 1, false, 0, 1); + txn.commit(); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + + } catch (Throwable t) { + /* print stacktrace before trying to clean up files */ + t.printStackTrace(); + throw t; + } + } + + /** + * This test checks that reconstituting the bin deals properly with + * the knownDeleted flag + * insert key1, abort, checkpoint, -- after abort, childref KD = true; + * insert key1, commit, -- after commit, childref KD = false + * delete key1, abort, -- BinDelta should have KD = false + * checkpoint to write deltas, + * recover. verify key1 is present. -- reconstituteBIN should make KD=false + */ + @Test + public void testKnownDeleted() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = 20; + try { + + /* Set up a repository of expected data */ + Map> expectedData = + new HashMap>(); + + /* Insert data and abort. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, false, NUM_DBS); + + /* + * Add cursors to pin down BINs. Otherwise the checkpoint that + * follows will compress away all the values. + */ + Cursor[][] cursors = new Cursor[NUM_DBS][numRecs]; + addCursors(cursors); + txn.abort(); + + /* + * Force a checkpoint, to flush a full version of the BIN + * to disk, so the next checkpoint can cause deltas. + * These checkpointed BINS have known deleted flags set. + */ + env.checkpoint(forceConfig); + removeCursors(cursors); + + /* + * Insert every other data value, makes some known deleted flags + * false. + */ + txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, + true, true, NUM_DBS); + txn.commit(); + + /* Delete data and abort, keeps known delete flag true */ + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, false, NUM_DBS); + txn.abort(); + + /* This checkpoint should write deltas. */ + cursors = new Cursor[NUM_DBS][numRecs/2]; + addCursors(cursors); + env.getStats(new StatsConfig().setClear(true)); + env.checkpoint(forceConfig); + EnvironmentStats envStats = env.getStats(null); + assertTrue(envStats.getNDeltaINFlush() > 0); + removeCursors(cursors); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + + } catch (Throwable t) { + /* print stacktrace before trying to clean up files */ + t.printStackTrace(); + throw t; + } + } + + /** + * Checks that BIN-deltas written by eviction (non-provisionally) are + * applied properly by recovery. + */ + @Test + public void testEvictedDelta() + throws Throwable { + + createEnv(1 << 20, false /*runCheckpointerDaemon*/); + int numRecs = 200; + int numDbs = 30; + try { + /* Set up a repository of expected data */ + Map> expectedData = + new HashMap>(); + + env.checkpoint(forceConfig); + + createDbs(null /*txn*/, numDbs); + + /* + * Force eviction to write out non-provisional MapLN deltas. Must + * close DBs in order to evict MapLNs. We verified manually that + * MapLN deltas are written by evictAllBins. [#21401] + */ + closeDbs(); + evictAllBins(DbInternal.getNonNullEnvImpl(env). + getDbTree().getIdDatabaseImpl()); + + createDbs(null /*txn*/, numDbs); + + /* Insert data. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, true, numDbs); + txn.commit(); + + /* + * Force eviction to write out non-provisional deltas for app DBs. + * We verified manually that the deltas are written by + * evictAllBins. [#21401] + */ + for (Database db : dbs) { + evictAllBins(db); + } + + closeEnv(); + recoverAndVerify(expectedData, numDbs); + + } catch (Throwable t) { + /* print stacktrace before trying to clean up files */ + t.printStackTrace(); + throw t; + } + } + + private void evictAllBins(Database db) { + final Cursor cursor = db.openCursor(null, null); + final List bins = collectAllBins(cursor); + cursor.close(); + /* Must close cursor before evicting. */ + for (BIN bin : bins) { + evictBin(bin); + } + } + + private void evictAllBins(DatabaseImpl dbImpl) { + final BasicLocker locker = + BasicLocker.createBasicLocker(DbInternal.getNonNullEnvImpl(env)); + final Cursor cursor = DbInternal.makeCursor(dbImpl, locker, null); + final List bins = collectAllBins(cursor); + cursor.close(); + locker.operationEnd(); + /* Must close cursor before evicting. */ + for (BIN bin : bins) { + evictBin(bin); + } + } + + private List collectAllBins(Cursor cursor) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(0, 0, true); + final List bins = new ArrayList(); + BIN prevBin = null; + while (cursor.getNext(key, data, LockMode.READ_UNCOMMITTED) == + OperationStatus.SUCCESS) { + final BIN thisBin = DbInternal.getCursorImpl(cursor).getBIN(); + if (prevBin != thisBin) { + prevBin = thisBin; + bins.add(thisBin); + } + } + return bins; + } + + private void evictBin(BIN bin) { + bin.latch(CacheMode.UNCHANGED); + DbInternal.getNonNullEnvImpl(env).getEvictor().doTestEvict + (bin, EvictionSource.CACHEMODE); + } + + /* Add cursors on each value to prevent compression. */ + private void addCursors(Cursor[][] cursors) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Pin each record with a cursor. */ + for (int d = 0; d < NUM_DBS; d++) { + for (int i = 0; i < cursors[d].length; i++) { + cursors[d][i] = dbs[d].openCursor(null, null); + + for (int j = 0; j < i; j++) { + OperationStatus status = + cursors[d][i].getNext(key, data, + LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + } + } + } + } + + private void removeCursors(Cursor[][] cursors) + throws DatabaseException { + for (int d = 0; d < NUM_DBS; d++) { + for (int i = 0; i < cursors[d].length; i++) { + cursors[d][i].close(); + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java b/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java new file mode 100644 index 0000000..a66664e --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryDuplicatesTest.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.Transaction; + +public class RecoveryDuplicatesTest extends RecoveryTestBase { + + @Test + public void testDuplicates() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = 10; + int numDups = N_DUPLICATES_PER_KEY; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, + numDups, true, NUM_DBS); + txn.commit(); + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicatesWithDeletion() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = 10; + int nDups = N_DUPLICATES_PER_KEY; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs -1, expectedData, nDups, true, NUM_DBS); + + /* Delete all the even records. */ + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + /* Modify all the records. */ + // modifyData(expectedData); + + closeEnv(); + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDuplicatesWithAllDeleted() + throws Throwable { + + createEnvAndDbs(1 << 20, true, NUM_DBS); + int numRecs = 10; + int nDups = N_DUPLICATES_PER_KEY; + + try { + /* Set up an repository of expected data. */ + Map> expectedData = + new HashMap>(); + + /* Insert all the data. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, nDups, + true, NUM_DBS); + + /* Delete all data. */ + deleteData(txn, expectedData, true, true, NUM_DBS); + txn.commit(); + + /* Modify all the records. */ + // modifyData(expectedData); + closeEnv(); + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java b/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java new file mode 100644 index 0000000..3a9b7d7 --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryEdgeTest.java @@ -0,0 +1,525 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.NodeSequence; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.SearchFileReader; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; + +import org.junit.Test; + +public class RecoveryEdgeTest extends RecoveryTestBase { + + @Test + public void testNoLogFiles() + throws Throwable { + + /* Creating an environment runs recovery. */ + Environment env = null; + try { + EnvironmentConfig noFileConfig = TestUtils.initEnvConfig(); + /* Don't checkpoint utilization info for this test. */ + DbInternal.setCheckpointUP(noFileConfig, false); + noFileConfig.setConfigParam + (EnvironmentParams.LOG_MEMORY_ONLY.getName(), "true"); + noFileConfig.setTransactional(true); + noFileConfig.setAllowCreate(true); + + env = new Environment(envHome, noFileConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + List dbList = envImpl.getDbTree().getDbNames(); + assertEquals("no dbs exist", 0, dbList.size()); + + /* Fake a shutdown/startup. */ + env.close(); + env = new Environment(envHome, noFileConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + dbList = envImpl.getDbTree().getDbNames(); + assertEquals("no dbs exist", 0, dbList.size()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (env != null) + env.close(); + } + } + + /** + * Test setting of the database ids in recovery. + */ + @Test + public void testDbId() + throws Throwable { + + Transaction createTxn = null; + try { + + /* + * Create an environment and three databases. The first four + * ids are allocated to the name db, id db and 2 cleaner dbs. + */ + EnvironmentConfig createConfig = TestUtils.initEnvConfig(); + createConfig.setTransactional(true); + createConfig.setAllowCreate(true); + createConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + env = new Environment(envHome, createConfig); + + final int nInitDbs = 5; + int numStartDbs = 1; + createTxn = env.beginTransaction(null, null); + + /* Check id of each db. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + for (int i = 0; i < numStartDbs; i++) { + Database anotherDb = env.openDatabase(createTxn, "foo" + i, + dbConfig); + assertEquals( + i + nInitDbs, + DbInternal.getDbImpl(anotherDb).getId().getId()); + anotherDb.close(); + } + createTxn.commit(); + env.close(); + + /* + * Go through a set of open, creates, and closes. Check id after + * recovery. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + createTxn = null; + for (int i = numStartDbs; i < numStartDbs + 3; i++) { + env = new Environment(envHome, envConfig); + + createTxn = env.beginTransaction(null, null); + Database anotherDb = env.openDatabase(createTxn, "foo" + i, + dbConfig); + assertEquals( + i + nInitDbs, + DbInternal.getDbImpl(anotherDb).getId().getId()); + anotherDb.close(); + createTxn.commit(); + env.close(); + } + } catch (Throwable t) { + if (createTxn != null) { + createTxn.abort(); + } + t.printStackTrace(); + throw t; + } + } + + /** + * Test setting the node ids in recovery. + */ + @Test + public void testNodeId() + throws Throwable { + + try { + /* Create an environment and databases. */ + createEnvAndDbs(1024, true, NUM_DBS); + Map> expectedData = + new HashMap>(); + + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, 4, expectedData, 1, true, NUM_DBS); + txn.commit(); + + /* Find the largest node id that has been allocated. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + DatabaseImpl dbImpl = DbInternal.getDbImpl(dbs[0]); + NodeSequence nodeSequence = envImpl.getNodeSequence(); + long maxSeenNodeId = nodeSequence.getLastLocalNodeId(); + + /* Close the environment, then recover. */ + closeEnv(); + EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig(); + recoveryConfig.setConfigParam( + EnvironmentParams.NODE_MAX.getName(), "6"); + recoveryConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + /* Don't checkpoint utilization info for this test. */ + DbInternal.setCheckpointUP(recoveryConfig, false); + env = new Environment(envHome, recoveryConfig); + IN in = new IN(dbImpl, new byte[0], 1, 1); + + /* Recovery should have initialized the next node id to use */ + assertTrue("maxSeenNodeId=" + maxSeenNodeId + + " in=" + in.getNodeId(), + maxSeenNodeId < in.getNodeId()); + maxSeenNodeId = nodeSequence.getLastLocalNodeId(); + assertEquals(NodeSequence.FIRST_REPLICATED_NODE_ID + 1, + nodeSequence.getLastReplicatedNodeId()); + + /* + * One more time -- this recovery will get the node id off the + * checkpoint of the environment close. This checkpoint records + * the fact that the node id was bumped forward by the create of + * the IN above. + */ + env.close(); + env = new Environment(envHome, recoveryConfig); + in = new IN(dbImpl, new byte[0], 1, 1); + /* + * The environment re-opening will increment the node id + * several times because of the EOF node id. + */ + assertTrue(maxSeenNodeId < in.getNodeId()); + assertEquals(NodeSequence.FIRST_REPLICATED_NODE_ID + 1, + nodeSequence.getLastReplicatedNodeId()); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test setting the txn id. + */ + @Test + public void testTxnId() + throws Throwable { + + try { + /* Create an environment and databases. */ + createEnvAndDbs(1024, true, NUM_DBS); + Map> expectedData = + new HashMap>(); + + /* Make txns before and after a checkpoint */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, 4, expectedData, 1, true, NUM_DBS); + txn.commit(); + env.checkpoint(forceConfig); + txn = env.beginTransaction(null, null); + insertData(txn, 5, 6, expectedData, 1, false, NUM_DBS); + + /* Find the largest node id that has been allocated. */ + long maxTxnId = txn.getId(); + txn.abort(); + + /* Close the environment, then recover. */ + closeEnv(); + + EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig(); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + recoveryConfig.setTransactional(true); + env = new Environment(envHome, recoveryConfig); + + /* + * Check that the next txn id is larger than the last seen. + * A few txn ids were eaten by AutoTxns during recovery, do + * a basic check that we didn't eat more than 11. + */ + txn = env.beginTransaction(null, null); + createDbs(txn, NUM_DBS); + assertTrue(maxTxnId < txn.getId()); + assertTrue((txn.getId() - maxTxnId) < 11); + + /* + * Do something with this txn so a node with it's value shows up in + * the log. + */ + insertData(txn, 7, 8, expectedData, 1, false, NUM_DBS); + long secondMaxTxnId = txn.getId(); + txn.abort(); + + /* + * One more time -- this recovery will get the txn id off the + * checkpoint of the second environment creation. + */ + closeEnv(); + env = new Environment(envHome, recoveryConfig); + txn = env.beginTransaction(null, null); + assertTrue(secondMaxTxnId < txn.getId()); + assertTrue((txn.getId() - secondMaxTxnId) < 10); + txn.abort(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test writing a non-transactional db in a transactional environment. + * Make sure we can recover. + */ + @Test + public void testNonTxnalDb () + throws Throwable { + + createEnv(1024, false); + try { + + /* + * Create a database, write into it non-txnally. Should be + * allowed + */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database dbA = env.openDatabase(null, "NotTxnal", dbConfig); + + DatabaseEntry key = new StringDbt("foo"); + DatabaseEntry data = new StringDbt("bar"); + dbA.put(null, key, data); + + /* close and recover -- the database should still be there + * because we're shutting down clean. + */ + dbA.close(); + env.close(); + createEnv(1024, false); + + dbA = env.openDatabase(null, "NotTxnal", null); + dbA.close(); + + /* + * Create a database, auto commit. Then write a record. + * The database should exist after recovery. + */ + dbConfig.setTransactional(true); + Database dbB = env.openDatabase(null, "Txnal", dbConfig); + dbB.close(); + dbB = env.openDatabase(null, "Txnal", null); + dbB.put(null, key, data); + dbB.close(); + env.close(); + + /* + * Recover. We should see the database. We may or may not see + * the records. + */ + createEnv(1024, false); + List dbNames = env.getDatabaseNames(); + assertEquals(2, dbNames.size()); + assertEquals("Txnal", dbNames.get(1)); + assertEquals("NotTxnal", dbNames.get(0)); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + env.close(); + } + } + + /** + * Test that we can recover with a bad checksum. + */ + @Test + public void testBadChecksum() + throws Throwable { + + try { + /* Create an environment and databases. */ + createEnvAndDbs(2048, false, 1); + Map> expectedData = + new HashMap>(); + + /* Make txns before and after a checkpoint */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, 4, expectedData, 1, true, 1); + txn.commit(); + env.checkpoint(forceConfig); + + txn = env.beginTransaction(null, null); + insertData(txn, 5, 6, expectedData, 1, true, 1); + txn.commit(); + + txn = env.beginTransaction(null, null); + insertData(txn, 7, 8, expectedData, 1, false, 1); + + /* Close the environment, then recover. */ + closeEnv(); + + /* Write some 0's into the last file. */ + writeBadStuffInLastFile(); + + recoverAndVerify(expectedData, 1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Another bad checksum test. Make sure that there is no checkpoint in the + * last file so that this recovery will have to read backwards into the + * previous file. Also recover in read/only mode to make sure we don't + * process the bad portion of the log. + */ + @Test + public void testBadChecksumReadOnlyReadPastLastFile() + throws Throwable { + + try { + /* Create an environment and databases. */ + createEnvAndDbs(500, false, 1); + Map> expectedData = + new HashMap>(); + + /* Commit some data, checkpoint. */ + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, 4, expectedData, 1, true, 1); + txn.commit(); + env.checkpoint(forceConfig); + + /* + * Remember how many files we have, so we know where the last + * checkpoint is. + */ + String[] suffixes = new String[] {FileManager.JE_SUFFIX}; + String[] fileList = + FileManager.listFiles(envHome, suffixes, false); + int startingNumFiles = fileList.length; + + /* Now add enough non-committed data to add more files. */ + txn = env.beginTransaction(null, null); + insertData(txn, 7, 50, expectedData, 1, false, 1); + + /* Close the environment, then recover. */ + closeEnv(); + + /* Make sure that we added on files after the checkpoint. */ + fileList = FileManager.listFiles(envHome, suffixes, false); + assertTrue(fileList.length > startingNumFiles); + + /* Write some 0's into the last file. */ + writeBadStuffInLastFile(); + + recoverROAndVerify(expectedData, 1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + private void writeBadStuffInLastFile() + throws IOException { + + String[] files = + FileManager.listFiles(envHome, + new String[] {FileManager.JE_SUFFIX}, + false); + File lastFile = new File(envHome, files[files.length - 1]); + RandomAccessFile rw = new RandomAccessFile(lastFile, "rw"); + + rw.seek(rw.length() - 10); + rw.writeBytes("000000"); + rw.close(); + } + + /** + * Test that we can recover with no checkpoint end + */ + @Test + public void testNoCheckpointEnd() + throws Exception { + + /* Create a new environment */ + EnvironmentConfig createConfig = TestUtils.initEnvConfig(); + createConfig.setTransactional(true); + createConfig.setAllowCreate(true); + env = new Environment(envHome, createConfig); + + /* + * In order to prevent following write, we need to validate + * the environment first and then call FileManager.truncateLog. + * See ReplicaFeederSyncup.setupHardRecovery. + */ + EnvironmentFailureException.unexpectedException( + DbInternal.getNonNullEnvImpl(env), + "Just simulate EFE.", + new Exception()); + + /* Truncate before the first ckpt end. */ + truncateAtEntry(LogEntryType.LOG_CKPT_END); + env.close(); + + /* Check that we can recover. */ + createConfig.setAllowCreate(false); + env = new Environment(envHome, createConfig); + env.close(); + } + + /** + * Truncate the log so it doesn't include the first incidence of this + * log entry type. + */ + private void truncateAtEntry(LogEntryType entryType) + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* + * Find the first given log entry type and truncate the file so it + * doesn't include that entry. + */ + SearchFileReader reader = + new SearchFileReader(envImpl, + 1000, // readBufferSize + true, // forward + 0, // startLSN + DbLsn.NULL_LSN, // endLSN + entryType); + + long targetLsn = 0; + if (reader.readNextEntry()) { + targetLsn = reader.getLastLsn(); + } else { + fail("There should be some kind of " + entryType + " in the log."); + } + + assertTrue(targetLsn != 0); + envImpl.getFileManager().truncateLog(DbLsn.getFileNumber(targetLsn), + DbLsn.getFileOffset(targetLsn)); + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryTest.java b/test/com/sleepycat/je/recovery/RecoveryTest.java new file mode 100644 index 0000000..f879ef6 --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryTest.java @@ -0,0 +1,314 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; + +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.utilint.StringUtils; + +public class RecoveryTest extends RecoveryTestBase { + + /** + * Basic insert, delete data. + */ + @Test + public void testBasic() + throws Throwable { + + doBasic(true); + } + + /** + * Basic insert, delete data with BtreeComparator + */ + @Test + public void testBasicRecoveryWithBtreeComparator() + throws Throwable { + + btreeComparisonFunction = new BtreeComparator(true); + doBasic(true); + } + + /** + * Test that put(OVERWRITE) works correctly with duplicates. + */ + @Test + public void testDuplicateOverwrite() + throws Throwable { + + createEnvAndDbs(1 << 10, false, NUM_DBS); + try { + Map> expectedData = + new HashMap>(); + + Transaction txn = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(StringUtils.toUTF8("aaaaa")); + DatabaseEntry data1 = + new DatabaseEntry(StringUtils.toUTF8("dddddddddd")); + DatabaseEntry data2 = + new DatabaseEntry(StringUtils.toUTF8("eeeeeeeeee")); + DatabaseEntry data3 = + new DatabaseEntry(StringUtils.toUTF8("ffffffffff")); + Database db = dbs[0]; + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data1)); + addExpectedData(expectedData, 0, key, data1, true); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data2)); + addExpectedData(expectedData, 0, key, data2, true); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data3)); + addExpectedData(expectedData, 0, key, data3, true); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data3)); + txn.commit(); + closeEnv(); + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Basic insert, delete data. + */ + @Test + public void testBasicFewerCheckpoints() + throws Throwable { + + doBasic(false); + } + + @Test + public void testSR8984Part1() + throws Throwable { + + doTestSR8984Work(true); + } + + @Test + public void testSR8984Part2() + throws Throwable { + + doTestSR8984Work(false); + } + + private void doTestSR8984Work(boolean sameKey) + throws DatabaseException { + + final int NUM_EXTRA_DUPS = 150; + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + /* Make an environment and open it */ + envConfig.setTransactional(false); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(false); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, "testSR8984db", dbConfig); + + DatabaseEntry key = new DatabaseEntry(StringUtils.toUTF8("k1")); + DatabaseEntry data = new DatabaseEntry(StringUtils.toUTF8("d1")); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); + + if (!sameKey) { + data.setData(StringUtils.toUTF8("d2")); + } + /* Cause a dup tree of some depth to be created. */ + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + for (int i = 3; i < NUM_EXTRA_DUPS; i++) { + data.setData(StringUtils.toUTF8("d" + i)); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + data.setData(StringUtils.toUTF8("d1")); + + Cursor c = db.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + c.getFirst(key, data, LockMode.DEFAULT)); + + c.close(); + db.close(); + + /* Force an abrupt close so there is no checkpoint at the end. */ + closeEnv(); + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "testSR8984db", dbConfig); + c = db.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + c.getFirst(key, data, LockMode.DEFAULT)); + assertEquals(NUM_EXTRA_DUPS - 2, c.count()); + c.close(); + db.close(); + env.close(); + } + + /** + * Insert data, delete data into several dbs. + */ + public void doBasic(boolean runCheckpointerDaemon) + throws Throwable { + + createEnvAndDbs(1 << 20, runCheckpointerDaemon, NUM_DBS); + int numRecs = NUM_RECS; + + try { + // Set up an repository of expected data + Map> expectedData = + new HashMap>(); + + // insert all the data + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + + // delete all the even records + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, false, true, NUM_DBS); + txn.commit(); + + // modify all the records + txn = env.beginTransaction(null, null); + modifyData(txn, NUM_RECS/2, expectedData, 1, true, NUM_DBS); + txn.commit(); + + closeEnv(); + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + // print stacktrace before trying to clean up files + t.printStackTrace(); + throw t; + } + } + + /** + * Insert data, delete all data into several dbs. + */ + @Test + public void testBasicDeleteAll() + throws Throwable { + + createEnvAndDbs(1024, true, NUM_DBS); + int numRecs = NUM_RECS; + try { + // Set up an repository of expected data + Map> expectedData = + new HashMap>(); + + // insert all the data + Transaction txn = env.beginTransaction(null, null); + insertData(txn, 0, numRecs - 1, expectedData, 1, true, NUM_DBS); + txn.commit(); + + // modify half the records + txn = env.beginTransaction(null, null); + modifyData(txn, numRecs/2, expectedData, 1, true, NUM_DBS); + txn.commit(); + + // delete all the records + txn = env.beginTransaction(null, null); + deleteData(txn, expectedData, true, true, NUM_DBS); + txn.commit(); + + closeEnv(); + + recoverAndVerify(expectedData, NUM_DBS); + } catch (Throwable t) { + // print stacktrace before trying to clean up files + t.printStackTrace(); + throw t; + } + } + + protected static class BtreeComparator implements Comparator { + protected boolean ascendingComparison = true; + + public BtreeComparator() { + } + + protected BtreeComparator(boolean ascendingComparison) { + this.ascendingComparison = ascendingComparison; + } + + public int compare(byte[] o1, byte[] o2) { + byte[] arg1; + byte[] arg2; + if (ascendingComparison) { + arg1 = (byte[]) o1; + arg2 = (byte[]) o2; + } else { + arg1 = (byte[]) o2; + arg2 = (byte[]) o1; + } + int a1Len = arg1.length; + int a2Len = arg2.length; + + int limit = Math.min(a1Len, a2Len); + + for (int i = 0; i < limit; i++) { + byte b1 = arg1[i]; + byte b2 = arg2[i]; + if (b1 == b2) { + continue; + } else { + /* Remember, bytes are signed, so convert to shorts so that + we effectively do an unsigned byte comparison. */ + short s1 = (short) (b1 & 0x7F); + short s2 = (short) (b2 & 0x7F); + if (b1 < 0) { + s1 |= 0x80; + } + if (b2 < 0) { + s2 |= 0x80; + } + return (s1 - s2); + } + } + + return (a1Len - a2Len); + } + } +} diff --git a/test/com/sleepycat/je/recovery/RecoveryTestBase.java b/test/com/sleepycat/je/recovery/RecoveryTestBase.java new file mode 100644 index 0000000..dc26535 --- /dev/null +++ b/test/com/sleepycat/je/recovery/RecoveryTestBase.java @@ -0,0 +1,952 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.StartupTracker; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; + +public class RecoveryTestBase extends TestBase { + private static final boolean DEBUG = false; + + protected static final int NUM_RECS = 257; + protected static final int N_DUPLICATES_PER_KEY = 28; + protected static final int NUM_DBS = 3; + protected static final int TREE_FANOUT = 6; + + protected static final String DB_NAME = "testDb"; + + protected File envHome; + protected Environment env; + protected Database[] dbs; + protected EnvironmentConfig envConfig; + protected CheckpointConfig forceConfig; + protected Comparator btreeComparisonFunction = null; + + protected int treeFanout; + + public RecoveryTestBase() { + init(); + } + + public RecoveryTestBase(boolean reduceMemory) { + init(); + envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(), + new Long(1 << 24).toString()); + } + + private void init() { + envHome = SharedTestUtils.getTestDir(); + Key.DUMP_TYPE = DumpType.BINARY; + envConfig = TestUtils.initEnvConfig(); + forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + treeFanout = TREE_FANOUT; + } + + + @After + public void tearDown() { + + if (env != null) { + try { + env.close(); + } catch (RuntimeException E) { + } + } + env = null; + dbs = null; + envConfig = null; + forceConfig = null; + } + + /** + * Make an environment and databases, commit the db creation by default. + * Running with or without the checkpoint daemon changes how recovery is + * exercised. + */ + protected void createEnv(int fileSize, boolean runCheckpointDaemon) + throws DatabaseException { + + createEnvInternal(fileSize, runCheckpointDaemon, false, false); + } + + private void createEnvInternal(int fileSize, + boolean runCheckpointDaemon, + boolean createXAEnv, + boolean checkLeaks) + throws DatabaseException { + + /* Make an environment and open it. */ + DbInternal.disableParameterValidation(envConfig); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig. + setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(fileSize)); + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + (checkLeaks ? " true" : "false")); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + Integer.toString(treeFanout)); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), + "false"); + + if (!runCheckpointDaemon) { + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + } + setExtraProperties(); + if (createXAEnv) { + env = new XAEnvironment(envHome, envConfig); + } else { + env = new Environment(envHome, envConfig); + } + } + + /** + * Overriden by using class. + * @throws DatabaseException from subclasses. + */ + protected void setExtraProperties() + throws DatabaseException { + } + + /** + * Open/create databases. + */ + protected void createDbs(Transaction txn, int numDbs) + throws DatabaseException { + + /* Make a db and open it. */ + dbs = new Database[numDbs]; + + DatabaseConfig dbConfig = new DatabaseConfig(); + if (btreeComparisonFunction != null) { + dbConfig.setBtreeComparator + ((Class>) + btreeComparisonFunction.getClass()); + } + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + for (int i = 0; i < numDbs; i++) { + dbs[i] = env.openDatabase(txn, DB_NAME + i, dbConfig); + } + } + + protected void closeDbs() { + if (dbs == null) { + return; + } + for (Database db : dbs) { + if (db != null) { + db.close(); + } + } + dbs = null; + } + + /** + * Make an environment and databases. + */ + protected void createEnvAndDbs(int fileSize, + boolean runCheckpointerDaemon, + int numDbs) + throws DatabaseException { + + createEnvAndDbsInternal(fileSize, runCheckpointerDaemon, + numDbs, false, false); + } + + protected void createXAEnvAndDbs(int fileSize, + boolean runCheckpointerDaemon, + int numDbs, + boolean checkLeaks) + throws DatabaseException { + + createEnvAndDbsInternal(fileSize, runCheckpointerDaemon, + numDbs, true, checkLeaks); + } + + protected void createEnvAndDbsInternal(int fileSize, + boolean runCheckpointerDaemon, + int numDbs, + boolean createXAEnv, + boolean checkLeaks) + throws DatabaseException { + + createEnvInternal(fileSize, runCheckpointerDaemon, + createXAEnv, checkLeaks); + Transaction txn = env.beginTransaction(null, null); + createDbs(txn, numDbs); + txn.commit(); + } + + /** + * Throw away the environment so the next open will cause a recovery. + */ + protected void closeEnv() + throws DatabaseException { + + TestUtils.validateNodeMemUsage(DbInternal.getNonNullEnvImpl(env), + false); + + /* Close the environment. */ + closeDbs(); + forceCloseEnvOnly(); + } + + /* Force the environment to be closed even if with outstanding handles.*/ + protected void forceCloseEnvOnly() + throws DatabaseException { + + /* Close w/out checkpointing, in order to exercise recovery better.*/ + DbInternal.getNonNullEnvImpl(env).close(false); + env = null; + } + + /* + * Recover the databases and check the data. Return a list of the + * RecoveryInfos generated by each recovery. + */ + protected List recoverAndVerify(Map> expectedData, + int numDbs) + throws DatabaseException { + + return recoverAndVerifyInternal(expectedData, numDbs, + false, // XA + false); // readOnly + } + + protected List + recoverROAndVerify(Map> expectedData, + int numDbs) + throws DatabaseException { + + return recoverAndVerifyInternal(expectedData, numDbs, + false, // XA + true); // readOnly + } + + /* + * Recover the databases and check the data. Return a list of the + * RecoveryInfos generated by each recovery. + */ + protected List + xaRecoverAndVerify(Map> expectedData, + int numDbs) + throws DatabaseException { + + return recoverAndVerifyInternal(expectedData, + numDbs, + true, // XA + false); // readOnly + } + + private List + recoverAndVerifyInternal(Map> expectedData, + int numDbs, + boolean createXAEnv, + boolean readOnlyMode) + throws DatabaseException { + + List infoList = + recoverOnlyInternal(createXAEnv, readOnlyMode); + verifyData(expectedData, numDbs); + TestUtils.validateNodeMemUsage(DbInternal.getNonNullEnvImpl(env), + false); + /* Run verify again. */ + DbInternal.getNonNullEnvImpl(env).close(false); + env = new Environment(envHome, getRecoveryConfig(readOnlyMode)); + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + infoList.add(envImpl.getStartupTracker()); + verifyData(expectedData, numDbs); + TestUtils.validateNodeMemUsage(envImpl, false); + env.close(); + return infoList; + } + + private EnvironmentConfig getRecoveryConfig(boolean readOnlyMode) { + EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig(); + recoveryConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + recoveryConfig.setConfigParam + (EnvironmentParams.MAX_MEMORY.getName(), + String.valueOf(10L << 24)); + recoveryConfig.setReadOnly(readOnlyMode); + + /* + * Don't run checkLeaks, because verify is running while the system is + * not quiescent. The other daemons are running. + */ + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + recoveryConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + + recoveryConfig.setTransactional(true); + return recoveryConfig; + } + + protected List recoverOnly() + throws DatabaseException { + + return recoverOnlyInternal(false, // XA + false); // read only + } + + protected List xaRecoverOnly() + throws DatabaseException { + + return recoverOnlyInternal(true, // XA + false); // read only + } + + private List + recoverOnlyInternal(boolean createXAEnv, + boolean readOnlyMode) + throws DatabaseException { + + List infoList = new ArrayList(); + + /* Open it again, which will run recovery. */ + if (createXAEnv) { + env = new XAEnvironment(envHome, getRecoveryConfig(readOnlyMode)); + } else { + env = new Environment(envHome, getRecoveryConfig(readOnlyMode)); + } + TestUtils.validateNodeMemUsage(DbInternal.getNonNullEnvImpl(env), + false); + + infoList.add + (DbInternal.getNonNullEnvImpl(env).getStartupTracker()); + + return infoList; + } + + /** + * Compare the data in the databases agains the data in the expected data + * set. + */ + protected void verifyData(Map> expectedData, + int numDbs) + throws DatabaseException { + + verifyData(expectedData, true, numDbs); + } + + /** + * Compare the data in the databases against the data in the expected data + * set. + */ + protected void verifyData(Map> expectedData, + boolean checkInList, + int numDbs) + throws DatabaseException { + + verifyData(expectedData, checkInList, 0, numDbs); + } + + @SuppressWarnings("unchecked") + protected void verifyData(Map> expectedData, + boolean checkInList, + int startDb, + int endDb) + throws DatabaseException { + + /* Run verify. */ + if (checkInList) { + assertTrue(env.verify(null, System.err)); + } else { + assertTrue(env.verify(null, System.err)); + } + + /* + * Get a deep copy of expected data (cloning the data sets, not the + * items within dataSet, since verifyData will remove items, and we + * need to keep the expectedData set intact because we call verify + * repeatedly. + */ + Map> useData = + new HashMap>(); + + Iterator>> iter = + expectedData.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Set clone = + (Set) ((HashSet)entry.getValue()).clone(); + useData.put(entry.getKey(), clone); + } + + /* Generate an expected count map. */ + Map countMap = generateCountMap(expectedData); + + /* Check each db in turn. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + if (btreeComparisonFunction != null) { + dbConfig.setBtreeComparator + ((Class>) + btreeComparisonFunction.getClass()); + } + dbConfig.setTransactional(env.getConfig().getTransactional()); + dbConfig.setSortedDuplicates(true); + dbConfig.setReadOnly(true); + for (int d = startDb; d < endDb; d++) { + Database checkDb = env.openDatabase(null, DB_NAME + d, + dbConfig); + Cursor myCursor = checkDb.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = + myCursor.getFirst(key, data, LockMode.DEFAULT); + DbInternal.getNonNullEnvImpl(env).verifyCursors(); + int numSeen = 0; + + while (status == OperationStatus.SUCCESS) { + + /* The key should have been in the expected data set. */ + removeExpectedData(useData, d, key, data, true); + + /* The count should be right. */ + int count = myCursor.count(); + assertEquals("Count not right for key " + + TestUtils.dumpByteArray(key.getData()), + getExpectedCount(countMap, d, key), count); + + status = myCursor.getNext(key, data, LockMode.DEFAULT); + numSeen++; + } + + myCursor.close(); + + /* Should be nothing left in the expected data map. */ + if (DEBUG) { + System.out.println("Finished db" + d + " numSeen=" +numSeen); + dumpExpected(useData); + } + checkDb.close(); + } + + assertEquals(0, useData.size()); + } + + /** + * Process the expected data map to generate expected counts. For each + * database, make a map of key value to count. + */ + private Map + generateCountMap(Map> expectedData) { + + Map countMap = new HashMap(); + + Iterator> iter = expectedData.values().iterator(); + while (iter.hasNext()) { + Set dataSet = iter.next(); + Iterator dataIter = dataSet.iterator(); + while (dataIter.hasNext()) { + TestData t = dataIter.next(); + TestData countKey = new TestData(t.dbNum, t.key); + Integer count = countMap.get(countKey); + if (count == null) { + countMap.put(countKey, new Integer(1)); + } else { + countMap.put(countKey, new Integer(count.intValue()+1)); + } + } + } + return countMap; + } + + /** + * @return the expected count value for a given key in a given db. + */ + private int getExpectedCount(Map countMap, + int whichDb, + DatabaseEntry key) { + return countMap.get(new TestData(whichDb, key.getData())).intValue(); + } + + /** + * Insert data over many databases. + */ + protected void insertData(Transaction txn, + int startVal, + int endVal, + Map> expectedData, + int nDuplicatesPerKey, + boolean addToExpectedData, + int numDbs) + throws DatabaseException { + + insertData(txn, startVal, endVal, expectedData, + nDuplicatesPerKey, false, addToExpectedData, + 0, numDbs); + } + + protected void insertData(Transaction txn, + int startVal, + int endVal, + Map> expectedData, + int nDuplicatesPerKey, + boolean addToExpectedData, + int startDb, + int endDb) + throws DatabaseException { + + insertData(txn, startVal, endVal, expectedData, + nDuplicatesPerKey, false, addToExpectedData, + startDb, endDb); + } + + /** + * Insert data over many databases. + * + * @param toggle if true, insert every other value. + */ + protected void insertData(Transaction txn, + int startVal, + int endVal, + Map> expectedData, + int nDuplicatesPerKey, + boolean toggle, + boolean addToExpectedData, + int numDbs) + throws DatabaseException { + + insertData(txn, startVal, endVal, expectedData, nDuplicatesPerKey, + toggle, addToExpectedData, 0, numDbs); + } + + /** + * Insert data over many databases. + * + * @param toggle if true, insert every other value. + */ + protected void insertData(Transaction txn, + int startVal, + int endVal, + Map> expectedData, + int nDuplicatesPerKey, + boolean toggle, + boolean addToExpectedData, + int startDb, + int endDb) + throws DatabaseException { + + Cursor[] cursors = getCursors(txn, startDb, endDb); + + /* Make sure this test inserts something! */ + assertTrue(endVal - startVal > -1); + + /* Are we inserting in an ascending or descending way? */ + int incVal = (toggle) ? 2 : 1; + if (startVal < endVal) { + for (int i = startVal; i <= endVal; i += incVal) { + insertOneRecord(cursors, i, expectedData, + nDuplicatesPerKey, addToExpectedData); + } + } else { + for (int i = startVal; i >= endVal; i -= incVal) { + insertOneRecord(cursors, i, expectedData, + nDuplicatesPerKey, addToExpectedData); + } + } + + for (int i = 0; i < cursors.length; i++) { + cursors[i].close(); + } + } + + /** + * Insert data over many databases. + */ + protected void insertRandomData(Transaction txn, + Random rng, + int numRecs, + Map> expectedData, + int nDuplicatesPerKey, + boolean evictBINs, + int startDb, + int endDb) + throws DatabaseException { + + Cursor[] cursors = getCursors(txn, startDb, endDb); + + if (evictBINs) { + for (int i = 0; i < cursors.length; i += 1) { + cursors[i].setCacheMode(CacheMode.EVICT_BIN); + } + } + + for (int i = 0; i < numRecs; i += 1) { + int keyVal = rng.nextInt(); + insertOneRecord(cursors, keyVal, expectedData, + nDuplicatesPerKey, true); + } + + for (int i = 0; i < cursors.length; i++) { + cursors[i].close(); + } + } + + /** + * Add to the set of expected results. ExpectedData is keyed by a TestData + * object that wraps db number and key, and points to sets of TestData + * objects that wrap db number, key, and data. + */ + protected void addExpectedData(Map> expectedData, + int dbNum, + DatabaseEntry key, + DatabaseEntry data, + boolean expectCommit) { + if (expectCommit) { + TestData keyTestData = new TestData(dbNum, key, null); + Set dataSet = expectedData.get(keyTestData); + if (dataSet == null) { + dataSet = new HashSet(); + expectedData.put(keyTestData, dataSet); + } + + dataSet.add(new TestData(dbNum, key, data)); + } + } + + /** + * Remove from the set of expected results. + */ + private void removeExpectedData(Map> expectedData, + int dbNum, + DatabaseEntry key, + DatabaseEntry data, + boolean expectCommit) { + if (expectCommit) { + TestData keyTestData = new TestData(dbNum, key, null); + Set dataSet = expectedData.get(keyTestData); + assertTrue("Should be a data set for " + keyTestData, + (dataSet != null)); + assertTrue("Should be able to remove key " + key + + " from expected data ", + dataSet.remove(new TestData(dbNum, key, data))); + if (dataSet.size() == 0) { + expectedData.remove(keyTestData); + } + } + } + + /** + * @return a set of cursors for the test databases. + */ + private Cursor[] getCursors(Transaction txn, int startDb, int endDb) + throws DatabaseException { + + Cursor[] cursors = new Cursor[endDb - startDb]; + for (int i = 0; i < cursors.length; i++) { + cursors[i] = dbs[startDb + i].openCursor(txn, null); + } + return cursors; + } + + /** + * Insert the given record into all databases. + */ + private void insertOneRecord(Cursor[] cursors, + int val, + Map> expectedData, + int nDuplicatesPerKey, + boolean expectCommit) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int c = 0; c < cursors.length; c++) { + + int testVal = val + c; + byte[] keyData = TestUtils.getTestArray(testVal); + byte[] dataData = TestUtils.byteArrayCopy(keyData); + key.setData(keyData); + + for (int d = 0; d < nDuplicatesPerKey; d++) { + dataData = TestUtils.byteArrayCopy(dataData); + dataData[1]++; + data.setData(dataData); + + //System.out.println("Inserting record with key: " + testVal); + + assertEquals("Insertion of key " + + TestUtils.dumpByteArray(keyData), + OperationStatus.SUCCESS, + cursors[c].putNoDupData(key, data)); + + addExpectedData(expectedData, c, key, data, expectCommit); + } + } + } + + /** + * Delete either every other or all data. + */ + protected void deleteData(Transaction txn, + Map> expectedData, + boolean all, + boolean expectCommit, + int numDbs) + throws DatabaseException { + + Cursor[] cursors = getCursors(txn, 0, numDbs); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int d = 0; d < cursors.length; d++) { + OperationStatus status = + cursors[d].getFirst(key, data, LockMode.DEFAULT); + boolean toggle = true; + int deleteCount = 0; + while (status == OperationStatus.SUCCESS) { + if (toggle) { + removeExpectedData(expectedData, d, key, data, + expectCommit); + assertEquals(OperationStatus.SUCCESS, cursors[d].delete()); + deleteCount++; + toggle = all; + } else { + toggle = true; + } + status = cursors[d].getNext(key, data, LockMode.DEFAULT); + } + /* Make sure the test deletes something! */ + assertTrue(deleteCount > 0); + } + + for (int i = 0; i < cursors.length; i++) { + cursors[i].close(); + } + } + + /** + * Modify data + * @param txn owning txn + * @param endVal end point of the modification range + * @param expectedData store of expected values for verification at end + * @param increment used to modify the data. + * @param expectCommit if true, reflect change in expected map. Sometimes + * we don't want to do this because we plan to make the txn abort. + */ + protected void modifyData(Transaction txn, + int endVal, + Map> expectedData, + int increment, + boolean expectCommit, + int numDbs) + throws DatabaseException { + + Cursor[] cursors = getCursors(txn, 0, numDbs); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int d = 0; d < cursors.length; d++) { + + /* Position cursor at the start value. */ + OperationStatus status = + cursors[d].getFirst(key, data, LockMode.DEFAULT); + + /* For each record within the range, change the data. */ + int modCount = 0; + int keyVal = TestUtils.getTestVal(key.getData()); + + while ((status == OperationStatus.SUCCESS) && (keyVal <= endVal)) { + + /* Change the data. */ + removeExpectedData(expectedData, d, key, data, expectCommit); + data.setData(TestUtils.getTestArray(keyVal + increment)); + cursors[d].delete(); + cursors[d].put(key, data); + addExpectedData(expectedData, d, key, data, expectCommit); + modCount++; + + status = cursors[d].getNext(key, data, LockMode.DEFAULT); + + if (status == OperationStatus.SUCCESS) { + keyVal = TestUtils.getTestVal(key.getData()); + } + } + /* Make sure we modify something! */ + assertTrue(modCount > 0); + } + + for (int i = 0; i < cursors.length; i++) { + cursors[i].close(); + } + } + + /** + * Print the contents of the databases out for debugging + */ + protected void dumpData(int numDbs) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + if (btreeComparisonFunction != null) { + dbConfig.setBtreeComparator + ((Class>) + btreeComparisonFunction.getClass()); + } + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + for (int d = 0; d < numDbs; d++) { + Database checkDb = env.openDatabase(null, DB_NAME + d, dbConfig); + Cursor myCursor = checkDb.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + OperationStatus status = + myCursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + System.out.println("Database " + d + + " seen = " + + /* + new String(key.getData(), UTF8) + + "/" + + new String(data.getData(), UTF8)); + */ + TestUtils.dumpByteArray(key.getData()) + + "/" + + TestUtils.dumpByteArray(data.getData())); + status = myCursor.getNext(key, data, LockMode.DEFAULT); + } + myCursor.close(); + } + } + + /** + * Print the contents of the expected map for debugging. + */ + protected void dumpExpected(Map> expectedData) { + System.out.println("Expected = " ); + Iterator> iter = expectedData.values().iterator(); + while (iter.hasNext()) { + Set dataSet = iter.next(); + Iterator dataIter = dataSet.iterator(); + while (dataIter.hasNext()) { + TestData t = dataIter.next(); + System.out.println(t); + } + } + } + + protected class TestData { + public int dbNum; + public byte[] key; + public byte[] data; + + TestData(int dbNum, DatabaseEntry keyDbt, DatabaseEntry dataDbt) { + this.dbNum = dbNum; + key = keyDbt.getData(); + if (dataDbt == null) { + dataDbt = new DatabaseEntry(); + dataDbt.setData(new byte[1]); + } + data = dataDbt.getData(); + } + + TestData(int dbNum, byte[] key) { + this.dbNum = dbNum; + this.key = key; + } + + @Override + public boolean equals(Object o ) { + if (this == o) + return true; + if (!(o instanceof TestData)) + return false; + + TestData other = (TestData) o; + if ((dbNum == other.dbNum) && + Arrays.equals(key, other.key) && + Arrays.equals(data, other.data)) { + return true; + } else + return false; + } + + @Override + public String toString() { + if (data == null) { + return "db=" + dbNum + + " k=" + TestUtils.dumpByteArray(key); + } else { + return "db=" + dbNum + + " k=" + TestUtils.dumpByteArray(key) + + " d=" + TestUtils.dumpByteArray(data); + } + } + + @Override + public int hashCode() { + return toString().hashCode(); + } + } +} diff --git a/test/com/sleepycat/je/recovery/Rollback2PCTest.java b/test/com/sleepycat/je/recovery/Rollback2PCTest.java new file mode 100644 index 0000000..38221e5 --- /dev/null +++ b/test/com/sleepycat/je/recovery/Rollback2PCTest.java @@ -0,0 +1,203 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.io.File; + +import javax.transaction.xa.XAException; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.log.LogUtils.XidImpl; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class Rollback2PCTest extends TestBase { + private final File envHome; + + public Rollback2PCTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Test that getXATransaction does not return a prepared txn. + */ + @Test + public void testSR16375() + throws DatabaseException, XAException { + + /* Setup environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + XAEnvironment xaEnv = new XAEnvironment(envHome, envConfig); + + /* Setup database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = xaEnv.openDatabase(null, "foo", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + + /* + * Start an XA transaction and add a record. Then crash the + * environment. + */ + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("FooTxn"), null); + Transaction preCrashTxn = xaEnv.beginTransaction(null, null); + xaEnv.setXATransaction(xid, preCrashTxn); + IntegerBinding.intToEntry(99, data); + assertEquals(OperationStatus.SUCCESS, db.put(preCrashTxn, key, data)); + db.close(); + xaEnv.prepare(xid); + xaEnv.sync(); + + /* Crash */ + DbInternal.getNonNullEnvImpl(xaEnv).abnormalClose(); + xaEnv = null; + + /* Recover */ + envConfig.setAllowCreate(false); + xaEnv = new XAEnvironment(envHome, envConfig); + + /* Ensure that getXATransaction returns null. */ + Transaction resumedTxn = xaEnv.getXATransaction(xid); + assertNull(resumedTxn); + + /* Rollback. */ + xaEnv.rollback(xid); + DbInternal.getNonNullEnvImpl(xaEnv).abnormalClose(); + } + + /** + * Verifies a bug fix to a problem that occurs when aborting a prepared txn + * after recovery. During recovery, we were counting the old version of an + * LN as obsolete when replaying the prepared txn LN. But if that txn + * aborts later, the old version becomes active. The fix is to use inexact + * counting. [#17022] + */ + @Test + public void testLogCleanAfterRollbackPrepared() + throws DatabaseException, XAException { + + /* + * Setup environment. + * + * We intentionally do not disable the checkpointer daemon to add + * variability to the test. This variability found a checkpointer bug + * in the past. [#20270] + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, + "90"); + XAEnvironment xaEnv = new XAEnvironment(envHome, envConfig); + + /* Setup database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = xaEnv.openDatabase(null, "foo", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(99, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + DbInternal.getNonNullEnvImpl(xaEnv).forceLogFileFlip(); + DbInternal.getNonNullEnvImpl(xaEnv).forceLogFileFlip(); + DbInternal.getNonNullEnvImpl(xaEnv).forceLogFileFlip(); + + /* + * Start an XA transaction and add a record. Then crash the + * environment. + */ + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("FooTxn"), null); + Transaction preCrashTxn = xaEnv.beginTransaction(null, null); + xaEnv.setXATransaction(xid, preCrashTxn); + IntegerBinding.intToEntry(100, data); + assertEquals(OperationStatus.SUCCESS, db.put(preCrashTxn, key, data)); + db.close(); + xaEnv.prepare(xid); + DbInternal.getNonNullEnvImpl(xaEnv).getLogManager().flushSync(); + + /* Crash */ + DbInternal.getNonNullEnvImpl(xaEnv).abnormalClose(); + xaEnv = null; + + /* Recover */ + envConfig.setAllowCreate(false); + xaEnv = new XAEnvironment(envHome, envConfig); + + /* Rollback. */ + xaEnv.rollback(xid); + + /* Force log cleaning. */ + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + xaEnv.checkpoint(force); + xaEnv.cleanLog(); + xaEnv.checkpoint(force); + + /* Close and re-open, ensure we can read the original record. */ + xaEnv.close(); + xaEnv = new XAEnvironment(envHome, envConfig); + db = xaEnv.openDatabase(null, "foo", dbConfig); + /* Before the fix, the get() caused a LogFileNotFound. */ + assertEquals(OperationStatus.SUCCESS, db.get(null, key, data, null)); + /* BEGIN debugging code. */ + if (99 != IntegerBinding.entryToInt(data)) { + String entryTypes = null; + String txnIds = null; + long startLsn = DbLsn.NULL_LSN; + long endLsn = DbLsn.NULL_LSN; + boolean verbose = true; + boolean stats = false; + boolean csvFormat = false; + boolean repEntriesOnly = false; + boolean forwards = true; + String customDumpReaderClass = null; + new com.sleepycat.je.util.DbPrintLog().dump + (envHome, entryTypes, txnIds, startLsn, endLsn, + verbose, stats, repEntriesOnly, csvFormat, forwards, false, + customDumpReaderClass); + } + /* END debugging code. */ + assertEquals(99, IntegerBinding.entryToInt(data)); + db.close(); + xaEnv.close(); + } +} diff --git a/test/com/sleepycat/je/recovery/RollbackTrackerTest.java b/test/com/sleepycat/je/recovery/RollbackTrackerTest.java new file mode 100644 index 0000000..11b96bf --- /dev/null +++ b/test/com/sleepycat/je/recovery/RollbackTrackerTest.java @@ -0,0 +1,643 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery; + +import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.recovery.RollbackTracker.RollbackPeriod; +import com.sleepycat.je.txn.RollbackEnd; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that the rollback periods in a log are correctly created. This test + * should not be dual mode, because it sets up very specific cases, and does + * not create a replication stream at all. + * + * The test infrastructure can take the declaration of a pseudo log, and then + * process it accordingly. Each test case is therefore a mini-log excerpt. + * + */ + +/* To add: --- ---------- ------------- + new VisibleLN (100L), + new VisibleLN (200L), + new RBStart (300L, 200L), + new InvisibleLN(310L), + new RBEnd (400L, 200L, 300L) +*/ + +public class RollbackTrackerTest extends TestBase { + private static boolean verbose = Boolean.getBoolean("verbose"); + + private final File envHome; + + public RollbackTrackerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * This valid log has a four level nested rollback period + */ + @Test + public void testGoodNestedLogs() { + + LogRecord[] testLog = new LogRecord[] { + /* + * This is a pseudo log. + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500 ), + new Commit (11, -500 ), + new VisibleLN (12, -501 ), + + /* rollback period from lsn 20 -> 22 */ + new Abort (20, -503 ), + new InvisibleLN(21, -504 ), + new RBStart (22, 20 ), + + new VisibleLN (30, -504 ), + new VisibleLN (31, -504 ), + + /* A: second nested rollback period from lsn x -> y */ + /* B: second nested rollback period from lsn x -> y */ + new Abort (40, -505 ), + new InvisibleLN(41, -506 ), + /* C: third nested rollback period from lsn x -> y */ + new Abort (42, -600 ), + /* D: most nested rollback period from lsn 43 -> 45 */ + new Abort (43, -600 ), + new InvisibleLN(44, -506 ), + new RBStart (45, 43 ), + new RBEnd (46, 43, 45 ), // D + + new InvisibleLN(47, -507 ), + new RBStart (48, 42 ), // C + new InvisibleLN(49, -508 ), + new RBStart (50, 40 ), // B + new InvisibleLN(51, -509 ), + new RBStart (52, 40 ), + new RBEnd (53, 40, 52 ), // A + + /* rollback period from lsn 70 -> 92 */ + new Commit (70, -504 ), + new InvisibleLN(71, -600 ), + new InvisibleLN(72, -600 ), + new RBStart (73, 70 ), + new RBEnd (74, 70, 73 ) + }; + + /* + * Each RollbackStart should have a set of txn ids that were + * active when the rollback started. + */ + ((RBStart) testLog[5]).addActiveTxnIds(new Long[] {-504L}); + ((RBStart) testLog[13]).addActiveTxnIds(new Long[] {-506L}); + ((RBStart) testLog[16]).addActiveTxnIds(new Long[] {-507L}); + ((RBStart) testLog[18]).addActiveTxnIds(new Long[] {-506L, -508L}); + ((RBStart) testLog[20]).addActiveTxnIds(new Long[] {-509L}); + ((RBStart) testLog[25]).addActiveTxnIds(new Long[] {-600L}); + + /* This is what we expect to be in the tracker. */ + List expected = new ArrayList(); + expected.add(new RollbackPeriod(70, 73, 74, 9)); + expected.add(new RollbackPeriod(40, 52, 53, 9)); + expected.add(new RollbackPeriod(20, 22, -1, 9)); + + /* Process the test data with a tracker. */ + runTest(testLog, expected, 9 /* checkpoint start */); + } + + /** + * This valid log has multiple rollback periods and rollback periods with + * and without rollback end. + */ + @Test + public void testGoodLogs() { + + LogRecord[] testLog = new LogRecord[] { + /* + * This is a pseudo log. + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500 ), + + /* rollback period from lsn 20 -> 22 */ + new Abort (20 -500 ), + new InvisibleLN(21, -501 ), + new RBStart (22, 20 ), + new RBEnd (23, 20, 22 ), + + new VisibleLN (30, -502 ), + + /* rollback period from lsn 40 -> 48 */ + new Commit (40, -502 ), + new VisibleLN (45, -503 ), + new Abort (46, -503 ), + new InvisibleLN(47, -504 ), + new RBStart (48, 40 ), + + new VisibleLN (50, -504 ), + new VisibleLN (60, -504 ), + + /* rollback period from lsn 70 -> 92 */ + new Commit (70, -504 ), + new InvisibleLN(72, -505 ), + + /* rest of rollback period from lsn 70 -> 92 */ + new InvisibleLN(90, -506 ), + new RBStart (92, 70 ), + new RBEnd (94, 70, 92 ) + }; + + /* + * Each RollbackStart should have a set of txn ids that were + * active when the rollback started. + */ + ((RBStart) testLog[3]).addActiveTxnIds(new Long[] {-501L}); + ((RBStart) testLog[10]).addActiveTxnIds(new Long[] {-504L}); + ((RBStart) testLog[16]).addActiveTxnIds(new Long[] {-505L, -506L}); + + /* This is what we expect to be in the tracker. */ + List expected = new ArrayList(); + expected.add(new RollbackPeriod(70, 92, 94, 9)); + expected.add(new RollbackPeriod(40, 48, -1, 9)); + expected.add(new RollbackPeriod(20, 22, 23, 9)); + /* Process the test data with a tracker. */ + runTest(testLog, expected, 9 /* checkpoint start */); + } + + /** + * This valid log has rollback periods before checkpoint start. + */ + @Test + public void testGoodCkptStart() { + + LogRecord[] testLog = new LogRecord[] { + /* + * This is a pseudo log. + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500 ), + + /* rollback period from lsn 20 -> 22 */ + new Abort (20 -500 ), + new AlreadyRBLN(21, -501 ), + new RBStart (22, 20 ), + new RBEnd (23, 20, 22 ), + new VisibleLN (30, -502 ) + }; + + /* + * Each RollbackStart should have a set of txn ids that were + * active when the rollback started. + */ + ((RBStart) testLog[3]).addActiveTxnIds(new Long[] {-501L}); + + /* This is what we expect to be in the tracker. */ + List expected = new ArrayList(); + expected.add(new RollbackPeriod(20, 22, 23, 40)); + + /* Process the test data with a tracker. */ + runTest(testLog, expected, 40 /* checkpoint start. */); + + /* Change the log so that the rollback period has no rollback end */ + testLog = new LogRecord[] { + /* + * This is a pseudo log. + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500 ), + + /* rollback period from lsn 20 -> 22 */ + new Abort (20 -500 ), + new InvisibleLN (21, -501 ), + new RBStart (22, 20 ), + new VisibleLN (30, -502 ) + }; + + /* + * Each RollbackStart should have a set of txn ids that were + * active when the rollback started. + */ + ((RBStart) testLog[3]).addActiveTxnIds(new Long[] {-501L}); + + /* This is what we expect to be in the tracker. */ + expected = new ArrayList(); + expected.add(new RollbackPeriod(20, 22, -1, 40)); + + /* Process the test data with a tracker. */ + runTest(testLog, expected, 40L /* checkpoint start. */); + } + + /** + * Bad log - two rollback periods intersect. + */ + @Test + public void testBadIntersection() { + + LogRecord[] testLog = new LogRecord[] { + /* + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500), + new Abort (11, -500), + new InvisibleLN(12, -501), + new Commit (13, -501), + new InvisibleLN(14, -502), + new RBStart (15, 11), + new RBEnd (16, 11, 15L), + new InvisibleLN(17, -503), + new RBStart (18, 13), + new VisibleLN (19), + }; + ((RBStart) testLog[5]).addActiveTxnIds(new Long[] {-501L, -502L}); + ((RBStart) testLog[8]).addActiveTxnIds(new Long[] {-502L, -503L}); + expectConstructionFailure(testLog); + } + + /** + * Bad log - a commit entry is in the rollback period. + */ + @Test + public void testBadCommitInRollbackPeriod() { + + LogRecord[] testLog = new LogRecord[] { + /* + * lsn txn match rollback + * id point start + *------------ --- --- ----- ----- */ + new VisibleLN (10, -500), + new Abort (11, -501), + new InvisibleLN(12, -500), + new Commit (13, -502), + new InvisibleLN(14, -503), + new RBStart (15, 11), + new RBEnd (16, 11, 15L), + }; + + ((RBStart) testLog[5]).addActiveTxnIds(new Long[] {-500L, -503L}); + expectConstructionFailure(testLog); + } + + /* + * Bad log - a LN is inbetween the rBStart and RB commit. + */ + + // TBW + + + /********************************************************************** + Methods for processing test data + *********************************************************************/ + + /** + * All test logs are exercised in a way to mimic recovery. The log is + * - read backwards and constructed (undoLNs w/MapLNs) + * - then read forwards (redoLN) + * - then read backwards (undoLN for non-mapLNs) + */ + private void runTest(LogRecord[] testLog, + List expected, + long checkpointStart) { + Environment env = createEnvironment(); + RollbackTracker tracker = + new RollbackTracker(DbInternal.getNonNullEnvImpl(env)); + tracker.setCheckpointStart(checkpointStart); + try { + firstConstructionPass(tracker, testLog); + } finally { + env.close(); + } + + /* Check that the rollback period are as expected. */ + assertEquals(expected, tracker.getPeriodList()); + + backwardPass(tracker, testLog); + } + + /** + * Check that this log fails the construction stage. + */ + private void expectConstructionFailure(LogRecord[] testLog) { + Environment env = createEnvironment(); + RollbackTracker tracker = + new RollbackTracker(DbInternal.getNonNullEnvImpl(env)); + try { + firstConstructionPass(tracker, testLog); + fail("Should have failed"); + } catch(EnvironmentFailureException expected) { + assertEquals(EnvironmentFailureReason.LOG_INTEGRITY, + expected.getReason()); + if (verbose) { + expected.printStackTrace(); + } + } finally { + env.close(); + } + } + + private Environment createEnvironment() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + return new Environment(envHome, envConfig); + } + + /* + * Mimic the first recovery pass, where we scan the log backwards + * and create the rollback tracker. + */ + private void firstConstructionPass(RollbackTracker tracker, + LogRecord[] testData) { + + tracker.setFirstPass(true); + RollbackTracker.Scanner scanner = tracker.getScanner(); + for (int i = testData.length - 1; i >= 0; i--) { + LogRecord rec = testData[i]; + if (verbose) { + System.out.println("first pass " + rec); + } + rec.doConstructionStep(tracker); + rec.checkContains(scanner, tracker); + } + } + + /* Mimic a later backward scan, after the rollback tracker is created. */ + private void backwardPass(RollbackTracker tracker, + LogRecord[] testData) { + + tracker.setFirstPass(false); + RollbackTracker.Scanner scanner = tracker.getScanner(); + for (int i = testData.length - 1; i >= 0; i--) { + LogRecord rec = testData[i]; + if (verbose) { + System.out.println("backward pass " + rec); + } + rec.checkContains(scanner, tracker); + } + } + + /************************************************************* + * LogRecords and their subclasses represent the log entries that will + * be passed to the RollbackTracker in recovery. + *************************************************************/ + + abstract static class LogRecord { + final long ownLSN; + + LogRecord(long ownLSN) { + this.ownLSN = ownLSN; + } + + void doConstructionStep(RollbackTracker tracker) { + /* + * Nothing to do, by default, most log records are not registered + * with the tracker. + */ + } + + void checkContains(RollbackTracker.Scanner scanner, + RollbackTracker tracker) { + /* do nothing, containment is only checked for LNs. */ + } + + void checkNeedsRollback(RollbackTracker.Scanner scanner, + RollbackTracker tracker) { + /* do nothing, rollback checking is only checked for LNs. */ + } + + @Override + public String toString() { + return getName() + " at lsn " + ownLSN + "[" + + DbLsn.getNoFormatString(ownLSN) + "]"; + } + + abstract String getName(); + } + + abstract static class TxnLogRecord extends LogRecord { + final long txnId; + + TxnLogRecord(long lsn, long txnId) { + super(lsn); + this.txnId = txnId; + } + + /* + * Don't bother with a txn, this test data is meant to fail before + * a txn id is needed. Saves on spec'ing the test data. + */ + TxnLogRecord(long lsn) { + super(lsn); + txnId = -1; + } + + @Override + public String toString() { + return super.toString() + " txnId=" + txnId; + } + } + + /** A LN that is visible */ + static class VisibleLN extends TxnLogRecord { + + VisibleLN(long ownLSN, long txnId) { + super(ownLSN, txnId); + } + + VisibleLN(long ownLSN) { + super(ownLSN); + } + + @Override + void checkContains(RollbackTracker.Scanner scanner, + RollbackTracker tracker) { + assertFalse("contains check for " + this + "\n tracker=" + tracker, + scanner.positionAndCheck(ownLSN, txnId)); + } + + @Override + String getName() { + return "VisibleLN"; + } + } + + /** A LN that is in a rollback period, and is invisible */ + static class InvisibleLN extends TxnLogRecord { + InvisibleLN(long ownLSN, long txnId) { + super(ownLSN, txnId); + } + + InvisibleLN(long ownLSN) { + super(ownLSN); + } + + @Override + void checkContains(RollbackTracker.Scanner scanner, + RollbackTracker tracker) { + assertTrue("contains check for " + this + "\n tracker=" + tracker, + scanner.positionAndCheck(ownLSN, txnId)); + assertTrue("needsRollback check for " + this + "\n tracker=" + + tracker, scanner.needsRollback()); + } + + @Override + String getName() { + return "InvisibleLN"; + } + } + + /** + * A LN that is in a rollback period, and is invisible, but is already + * rolled back. + */ + static class AlreadyRBLN extends InvisibleLN { + AlreadyRBLN(long ownLSN, long txnId) { + super(ownLSN, txnId); + } + + AlreadyRBLN(long ownLSN) { + super(ownLSN); + } + + + @Override + void checkContains(RollbackTracker.Scanner scanner, + RollbackTracker tracker) { + assertTrue("contains check for " + this + "\n tracker=" + tracker, + scanner.positionAndCheck(ownLSN, txnId)); + assertFalse("needsRollback check for " + this + "\n tracker=" + + tracker, scanner.needsRollback()); + } + + @Override + String getName() { + return "AlreadyRBLN"; + } + } + + static class Abort extends TxnLogRecord { + Abort(long ownLSN, long txnId) { + super(ownLSN, txnId); + } + + Abort(long ownLSN) { + super(ownLSN); + } + + @Override + String getName() { + return "Abort"; + } + } + + static class Commit extends TxnLogRecord { + Commit(long ownLSN, long txnId) { + super(ownLSN, txnId); + } + + Commit(long ownLSN) { + super(ownLSN); + } + + @Override + void doConstructionStep(RollbackTracker tracker) { + tracker.checkCommit(ownLSN, txnId); + } + + @Override + String getName() { + return "Commit"; + } + } + + /** A RollbackStart */ + static class RBStart extends LogRecord { + final long matchpointLSN; + Set activeTxnIds; + + RBStart(long ownLSN, long matchpointLSN) { + super(ownLSN); + this.matchpointLSN = matchpointLSN; + activeTxnIds = new HashSet(); + } + + void addActiveTxnIds(Long[] txnIds) { + for (Long id : txnIds) { + activeTxnIds.add(id); + } + } + + @Override + void doConstructionStep(RollbackTracker tracker) { + tracker.register(new RollbackStart(NULL_VLSN, matchpointLSN, + activeTxnIds), + ownLSN); + } + + @Override + String getName() { + return "RBStart"; + } + } + + /** A RollbackEnd */ + static class RBEnd extends LogRecord { + final long matchpointLSN; + final long rollbackStartLSN; + + RBEnd(long ownLSN, long matchpointLSN, long rollbackStartLSN) { + super(ownLSN); + this.matchpointLSN = matchpointLSN; + this.rollbackStartLSN = rollbackStartLSN; + } + + @Override + void doConstructionStep(RollbackTracker tracker) { + tracker.register(new RollbackEnd(matchpointLSN, rollbackStartLSN), + ownLSN); + } + + @Override + String getName() { + return "RBEnd"; + } + } +} + diff --git a/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java b/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java new file mode 100644 index 0000000..e95fac0 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/CommitEntry.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +/* + * A Commit entry signals that some records should be moved from the + * not-yet-committed sets to the expected set. + */ +public class CommitEntry extends LogEntryInfo { + private long txnId; + + CommitEntry(long lsn, long txnId) { + super(lsn, 0, 0); + this.txnId = txnId; + } + + @Override + public void updateExpectedSet + (Set useExpected, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) { + + Long mapKey = new Long(txnId); + + /* Add any new records to the expected set. */ + Set records = newUncommittedRecords.get(mapKey); + if (records != null) { + Iterator iter = records.iterator(); + while (iter.hasNext()) { + useExpected.add(iter.next()); + } + } + + /* Remove any deleted records from expected set. */ + records = deletedUncommittedRecords.get(mapKey); + if (records != null) { + Iterator iter = records.iterator(); + while (iter.hasNext()) { + useExpected.remove(iter.next()); + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java b/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java new file mode 100644 index 0000000..6f8e07c --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/EntryTrackerReader.java @@ -0,0 +1,163 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.nio.ByteBuffer; +import java.util.List; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileReader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.utilint.DbLsn; + +/** + * EntryTrackerReader collects a list of EntryInfo describing all log entries + * in the truncated portion of a log. It lets the test know where to do a log + * truncation and remembers whether an inserted or deleted record was seen, in + * order to update the test's set of expected records. + */ +public class EntryTrackerReader extends FileReader { + + /* + * EntryInfo is a list that corresponds to each entry in the truncated + * area of the log. + */ + private List entryInfo; + private DatabaseEntry dbt = new DatabaseEntry(); + private LogEntry useLogEntry; + private LogEntryType useLogEntryType; + private boolean isCommit; + + /** + * Create this reader to start at a given LSN. + */ + public EntryTrackerReader(EnvironmentImpl env, + long startLsn, + List entryInfo) // EntryInfo + throws DatabaseException { + + super(env, 2000, true, startLsn, null, + -1, DbLsn.NULL_LSN); + + this.entryInfo = entryInfo; + } + + /** + * @return true if this is a targeted entry that should be processed. + */ + protected boolean isTargetEntry() { + LogEntryType entryType = + LogEntryType.findType(currentEntryHeader.getType()); + isCommit = false; + boolean targeted = true; + + useLogEntryType = null; + + if (entryType.isUserLNType()) { + useLogEntryType = entryType; + } else if (entryType == LogEntryType.LOG_TXN_COMMIT) { + useLogEntryType = LogEntryType.LOG_TXN_COMMIT; + isCommit = true; + } else { + + /* + * Just make note, no need to process the entry, nothing to record + * besides the LSN. Note that the offset has not been bumped by + * the FileReader, so use nextEntryOffset. + */ + entryInfo.add + (new LogEntryInfo(DbLsn.makeLsn(window.currentFileNum(), + nextEntryOffset), 0, 0)); + targeted = false; + } + + if (useLogEntryType != null) { + useLogEntry = useLogEntryType.getSharedLogEntry(); + } + return targeted; + } + + /** + * This log entry has data which affects the expected set of records. + * We need to save each lsn and determine whether the value of the + * log entry should affect the expected set of records. For + * non-transactional entries, the expected set is affected right away. + * For transactional entries, we defer updates of the expected set until + * a commit is seen. + */ + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + /* + * Note that the offset has been bumped, so use currentEntryOffset + * for the LSN. + */ + long lsn = DbLsn.makeLsn(window.currentFileNum(), currentEntryOffset); + useLogEntry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + boolean isTxnal = useLogEntryType.isTransactional(); + long txnId = useLogEntry.getTransactionId(); + + if (isCommit) { + + /* + * The txn id in a single item log entry is embedded within + * the item. + */ + txnId = ((TxnCommit) useLogEntry.getMainItem()).getId(); + entryInfo.add(new CommitEntry(lsn, txnId)); + } else { + final LNLogEntry lnLogEntry = (LNLogEntry) useLogEntry; + final boolean isDupDb = + (lnLogEntry.getUnconvertedKeyLength() != 4); + lnLogEntry.postFetchInit(isDupDb); + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + lnLogEntry.getUserKeyData(keyEntry, dataEntry); + final int keyValue = IntegerBinding.entryToInt(keyEntry); + final int dataValue; + final boolean deleted = lnLogEntry.isDeleted(); + if (deleted) { + dataValue = -1; + } else { + dataValue = IntegerBinding.entryToInt(dataEntry); + } + + if (deleted) { + if (isTxnal) { + entryInfo.add(new TxnalDeletedEntry(lsn, keyValue, + dataValue, txnId)); + } else { + entryInfo.add(new NonTxnalDeletedEntry(lsn, keyValue, + dataValue)); + } + } else { + if (isTxnal) { + entryInfo.add(new TxnalEntry(lsn, keyValue, dataValue, + txnId)); + } else { + entryInfo.add(new NonTxnalEntry(lsn, keyValue, dataValue)); + } + } + } + + return true; + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java b/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java new file mode 100644 index 0000000..d400608 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/LogEntryInfo.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.Map; +import java.util.Set; + +import com.sleepycat.je.utilint.DbLsn; + +/* + * A LogEntryInfo supports stepwise recovery testing, where the log is + * systematically truncated and recovery is executed. At each point in a log, + * there is a set of records that we expect to see. The LogEntryInfo + * encapsulates enough information about the current log entry so we can + * update the expected set accordingly. + */ + +public class LogEntryInfo { + private long lsn; + int key; + int data; + + LogEntryInfo(long lsn, + int key, + int data) { + this.lsn = lsn; + this.key = key; + this.data = data; + } + + /* + * Implement this accordingly. For example, a LogEntryInfo which + * represents a non-txnal LN record would add that key/data to the + * expected set. A txnal delete LN record would delete the record + * from the expecte set at commit. + * + * The default action is that the expected set is not changed. + */ + public void updateExpectedSet + (Set expectedSet, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) {} + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("type=").append(this.getClass().getName()); + sb.append("lsn=").append(DbLsn.getNoFormatString(lsn)); + sb.append(" key=").append(key); + sb.append(" data=").append(data); + return sb.toString(); + } + + public long getLsn() { + return lsn; + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java b/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java new file mode 100644 index 0000000..fe13b21 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/NonTxnalDeletedEntry.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; + +/* + * A non-transactional log entry should add itself to the expected set. + */ + +class NonTxnalDeletedEntry extends LogEntryInfo { + NonTxnalDeletedEntry(long lsn, + int key, + int data) { + super(lsn, key, data); + } + + /* Delete this item from the expected set. */ + @Override + public void updateExpectedSet + (Set useExpected, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) { + + Iterator iter = useExpected.iterator(); + while (iter.hasNext()) { + TestData setItem = iter.next(); + int keyValInSet = IntegerBinding.entryToInt(setItem.getKey()); + if (keyValInSet == key) { + if (data == -1) { + /* non-dup case, remove the matching key. */ + iter.remove(); + break; + } else { + int dataValInSet = + IntegerBinding.entryToInt(setItem.getData()); + if (dataValInSet == data) { + iter.remove(); + break; + } + } + } + } + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java b/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java new file mode 100644 index 0000000..d90a8b1 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/NonTxnalEntry.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; + +/* + * A non-transactional log entry should add itself to the expected set. + */ + +public class NonTxnalEntry extends LogEntryInfo { + NonTxnalEntry(long lsn, + int key, + int data) { + super(lsn, key, data); + } + + /* Implement this accordingly. For example, a LogEntryInfo which + * represents a non-txnal LN record would add that key/data to the + * expected set. A txnal delete LN record would delete the record + * from the expecte set at commit time. + */ + @Override + public void updateExpectedSet + (Set useExpected, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) { + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + + useExpected.add(new TestData(keyEntry, dataEntry)); + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/TestData.java b/test/com/sleepycat/je/recovery/stepwise/TestData.java new file mode 100644 index 0000000..50814f2 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/TestData.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.recovery.stepwise; + +import java.util.Arrays; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; + +/** + * Wrapper class that encapsulates a record in a database used for recovery + * testing. + */ +public class TestData implements Comparable { + private DatabaseEntry key; + private DatabaseEntry data; + + public TestData(DatabaseEntry key, DatabaseEntry data) { + this.key = new DatabaseEntry(key.getData()); + this.data = new DatabaseEntry(data.getData()); + } + + public boolean equals(Object o ) { + if (this == o) + return true; + if (!(o instanceof TestData)) + return false; + + TestData other = (TestData) o; + if (Arrays.equals(key.getData(), other.key.getData()) && + Arrays.equals(data.getData(), other.data.getData())) { + return true; + } else + return false; + } + + public String toString() { + return " k=" + IntegerBinding.entryToInt(key) + + " d=" + IntegerBinding.entryToInt(data); + } + + public int hashCode() { + return toString().hashCode(); + } + + public DatabaseEntry getKey() { + return key; + } + + public DatabaseEntry getData() { + return data; + } + + /** TODO: do any recovery tests use a custom comparator? */ + @Override + public int compareTo(TestData o) { + final int key1 = IntegerBinding.entryToInt(key); + final int key2 = IntegerBinding.entryToInt(o.key); + final int keyCmp = Integer.compare(key1, key2); + if (keyCmp != 0) { + return keyCmp; + } + final int data1 = IntegerBinding.entryToInt(data); + final int data2 = IntegerBinding.entryToInt(o.data); + return Integer.compare(data1, data2); + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java b/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java new file mode 100644 index 0000000..c34029b --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/TxnalDeletedEntry.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; + +/* + * A Transactional log entry should add put itself + * into the not-yet-committed set. + */ + +public class TxnalDeletedEntry extends LogEntryInfo { + private long txnId; + + TxnalDeletedEntry(long lsn, + int key, + int data, + long txnId) { + super(lsn, key, data); + this.txnId = txnId; + } + + /* Implement this accordingly. For example, a LogEntryInfo which + * represents a non-txnal LN record would add that key/data to the + * expected set. A txnal delete LN record would delete the record + * from the expecte set at commit time. + */ + @Override + public void updateExpectedSet + (Set useExpected, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) { + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + + Long mapKey = new Long(txnId); + Set records = deletedUncommittedRecords.get(mapKey); + if (records == null) { + records = new HashSet(); + deletedUncommittedRecords.put(mapKey, records); + } + records.add(new TestData(keyEntry, dataEntry)); + } +} diff --git a/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java b/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java new file mode 100644 index 0000000..a7a81b1 --- /dev/null +++ b/test/com/sleepycat/je/recovery/stepwise/TxnalEntry.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.recovery.stepwise; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.DatabaseEntry; + +/* + * A Transactional log entry should add put itself + * into the not-yet-committed set. + */ + +public class TxnalEntry extends LogEntryInfo { + private long txnId; + + TxnalEntry(long lsn, + int key, + int data, + long txnId) { + super(lsn, key, data); + this.txnId = txnId; + } + + /* Implement this accordingly. For example, a LogEntryInfo which + * represents a non-txnal LN record would add that key/data to the + * expected set. A txnal delete LN record would delete the record + * from the expecte set at commit time. + */ + @Override + public void updateExpectedSet + (Set useExpected, + Map> newUncommittedRecords, + Map> deletedUncommittedRecords) { + + DatabaseEntry keyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + IntegerBinding.intToEntry(data, dataEntry); + + Long mapKey = new Long(txnId); + Set records = newUncommittedRecords.get(mapKey); + if (records == null) { + records = new HashSet(); + newUncommittedRecords.put(mapKey, records); + } + records.add(new TestData(keyEntry, dataEntry)); + } +} diff --git a/test/com/sleepycat/je/rep.properties b/test/com/sleepycat/je/rep.properties new file mode 100644 index 0000000..35c5061 --- /dev/null +++ b/test/com/sleepycat/je/rep.properties @@ -0,0 +1,6 @@ +je.log.totalBufferBytes=7001 +je.log.numBuffers=200 +je.rep.node.foo=address:localhost:3000,isPeer:true +je.rep.node.bar=address:localhost:3001,isPeer:false +je.rep.node.baz=address:localhost:3002,isPeer:false +je.rep.local.address = 127.0.0.1:9999 diff --git a/test/com/sleepycat/je/rep/CheckAccessTest.java b/test/com/sleepycat/je/rep/CheckAccessTest.java new file mode 100644 index 0000000..334fe5f --- /dev/null +++ b/test/com/sleepycat/je/rep/CheckAccessTest.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Iterator; +import java.util.Properties; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Check various modes of database access, including with/without SSL. + */ +public class CheckAccessTest extends TestBase { + + private File envRoot; + private File[] envHomes; + + @Before + public void setUp() + throws Exception { + + envRoot = SharedTestUtils.getTestDir(); + envHomes = RepTestUtils.makeRepEnvDirs(envRoot, 2); + super.setUp(); + } + + /** + * Sanity check that no SSL works. + */ + @Test + public void testBasicConfig() + throws Exception { + + checkAccess(null); + } + + /** + * Test that SSL works. + */ + @Test + public void testSSLOnlyConfig() + throws Exception { + + Properties props = new Properties(); + setBasicSSLProperties(props); + + checkAccess(props); + } + + /** + * Set the basic SSL properties. These rely on the build.xml configuration + * that copies keystore and truststore files to the test environment. + */ + public void setBasicSSLProperties(Properties props) + throws Exception { + + RepTestUtils.setUnitTestSSLProperties(props); + } + + /** + * Check whether a particular access configuration works + * + * @param extraProperties Properties to be appended to the standard property + * file + * @param servicePassword A service password to be used for authentication + */ + private void checkAccess(Properties extraProperties) + throws Exception { + + String propString = "\n"; + if (extraProperties != null) { + Iterator piter = + extraProperties.stringPropertyNames().iterator(); + while (piter.hasNext()) { + String key = piter.next(); + String value = extraProperties.getProperty(key); + propString = propString + key + " = " + value + "\n"; + } + } + + TestUtils.readWriteJEProperties(envHomes[0], propString); + TestUtils.readWriteJEProperties(envHomes[1], propString); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + /* + * masterFail and replicaFail are true if the master or replica + * environment creation failed. + */ + boolean masterFail = false; + boolean replicaFail = false; + + ReplicatedEnvironment master = null; + ReplicatedEnvironment replica = null; + + /* Create the ReplicationConfig for master and replica. */ + ReplicationConfig masterConfig = RepTestUtils.createRepConfig(1); + masterConfig.setDesignatedPrimary(true); + masterConfig.setHelperHosts(masterConfig.getNodeHostPort()); + + ReplicationConfig replicaConfig = RepTestUtils.createRepConfig(2); + replicaConfig.setHelperHosts(masterConfig.getNodeHostPort()); + + /* + * Attempt to create the master with the specified EnvironmentConfig. + */ + master = new ReplicatedEnvironment( + envHomes[0], masterConfig, envConfig); + + /* Check the specified EnvironmentConfig on the replica. */ + replica = new ReplicatedEnvironment( + envHomes[1], replicaConfig, envConfig); + + assertTrue(master != null); + assertTrue(replica != null); + + /* + * If the specified EnvironmentConfig is correct, wait for + * replication initialization to finish. + */ + while (replica.getState() != ReplicatedEnvironment.State.REPLICA) { + Thread.sleep(1000); + } + + /* Make sure the test runs on both master and replica. */ + assertTrue(master.getState().isMaster()); + assertTrue(!replica.getState().isMaster()); + + /* Close the replica and master. */ + replica.close(); + master.close(); + } +} diff --git a/test/com/sleepycat/je/rep/CheckConfigTest.java b/test/com/sleepycat/je/rep/CheckConfigTest.java new file mode 100644 index 0000000..d706fd7 --- /dev/null +++ b/test/com/sleepycat/je/rep/CheckConfigTest.java @@ -0,0 +1,668 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Check that both master and replica nodes catch invalid environment and + * database configurations. + */ +public class CheckConfigTest extends TestBase { + + private static final String originalSkipHelperHostResolution = + System.getProperty(RepParams.SKIP_HELPER_HOST_RESOLUTION, "false"); + + private final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + private File envRoot; + private File[] envHomes; + private boolean useCorruptHelperHost = false; + private boolean useLoopbackAddresses = true; + + @Override + @Before + public void setUp() + throws Exception { + + envRoot = SharedTestUtils.getTestDir(); + envHomes = RepTestUtils.makeRepEnvDirs(envRoot, 2); + super.setUp(); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + System.setProperty(RepParams.SKIP_HELPER_HOST_RESOLUTION, + originalSkipHelperHostResolution); + } + + /** + * Replicated environments do not support non transactional mode. + */ + @Test + public void testEnvNonTransactionalConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + config.setTransactional(false); + expectRejection(config); + } + + /** + * A configuration of transactional + noLocking is invalid. + */ + @Test + public void testEnvNoLockingConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + config.setLocking(false); + expectRejection(config); + } + + /** + * ReadOnly = true should be accepted. + * + * Since setting environment read only is only possible when an Environment + * exists, this test first creates a normal Environment and then reopens it + * with read only configuration. + */ + @Test + public void testEnvReadOnlyConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + expectAcceptance(config); + config.setReadOnly(true); + expectRejection(config); + } + + /** + * AllowCreate = false should be accepted. + * + * Since setting environment allowCreate to false is only possible when an + * Environment exists, this test creates a normal Environment and then + * reopens it with allowCreate=false configuration. + */ + @Test + public void testEnvAllowCreateFalseConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + expectAcceptance(config); + config.setAllowCreate(false); + expectAcceptance(config); + } + + /** + * SharedCache = true should be accepted. + */ + @Test + public void testEnvSharedCacheConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + config.setSharedCache(true); + expectAcceptance(config); + } + + /** + * Serializable isolation = true should be accepted. + */ + @Test + public void testEnvSerializableConfig() + throws Exception { + + EnvironmentConfig config = createConfig(); + config.setTxnSerializableIsolation(true); + expectAcceptance(config); + } + + /** + * Check that a bad helper host is accepted. + */ + @Test + public void testBadHelperHost() + throws Exception { + EnvironmentConfig config = createConfig(); + useCorruptHelperHost = true; + System.setProperty(RepParams.SKIP_HELPER_HOST_RESOLUTION, "true"); + + /* + * The replica should fail because of the mix of a loopback address for + * node and a non-resolvable address in helpers + */ + checkEnvConfig(config, + false /* masterInvalid */, + true /* replicaInvalid */); + + /* + * Remove any existing environment files to avoid problems when + * changing from loopback to local host names + */ + RepTestUtils.removeRepEnv(envHomes[0]); + RepTestUtils.removeRepEnv(envHomes[1]); + + /* + * If the local host isn't a loopback address, arrange to use that + * address and expect the non-resolvable host to be OK because it is + * also a non-loopback address. Just skip if the local host is a + * loopback address. + */ + if (!InetAddress.getLocalHost().isLoopbackAddress()) { + useLoopbackAddresses = false; + expectAcceptance(config); + } + + /* + * If unknown host names are not resolvable to IP addresses, then test + * that the configuration fails when checking for unresolvable helper + * addresses. On some systems, unknown host names are redirected to IP + * addresses, so skip this test in that case. + */ + boolean unknownHostnamesAreResolved = false; + try { + InetAddress.getByName("unknownhostfoobar"); + unknownHostnamesAreResolved = true; + } catch (UnknownHostException e) { + } + if (!unknownHostnamesAreResolved) { + System.setProperty(RepParams.SKIP_HELPER_HOST_RESOLUTION, "false"); + expectRejection(config); + } + useCorruptHelperHost = false; + useLoopbackAddresses = true; + } + + /** + * Return a new transactional EnvironmentConfig for test use. + */ + private EnvironmentConfig createConfig() { + EnvironmentConfig config = new EnvironmentConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + + return config; + } + + /** + * Return a new transactional DatabaseConfig for test use. + */ + private DatabaseConfig createDbConfig() { + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + + return config; + } + + /** + * Wrap checkEnvConfig in this method to make the intent of the test + * obvious. + */ + private void expectAcceptance(EnvironmentConfig envConfig) + throws Exception { + + checkEnvConfig(envConfig, + false /* masterInvalid */, + false /* replicaInvalid */); + } + + /** + * Wrap checkEnvConfig in this method to make the intent of the test + * obvious. + */ + private void expectRejection(EnvironmentConfig envConfig) + throws Exception { + + checkEnvConfig(envConfig, + true /* masterInvalid */, + true /* replicaInvalid */); + } + + /** + * Check whether an EnvironmentConfig is valid. + * + * @param envConfig the EnvironmentConfig to check + * @param masterInvalid whether creating master should fail + * @param replicaInvalid whether creating replica should fail + */ + private void checkEnvConfig(EnvironmentConfig envConfig, + boolean masterInvalid, + boolean replicaInvalid) + throws Exception { + + /* + * masterFail and replicaFail are true if the master or replica + * environment creation failed. + */ + boolean masterFail = false; + boolean replicaFail = false; + + ReplicatedEnvironment master = null; + ReplicatedEnvironment replica = null; + + /* Create the ReplicationConfig for master and replica. */ + ReplicationConfig masterConfig = RepTestUtils.createRepConfig(1); + masterConfig.setDesignatedPrimary(true); + + /* + * Use non-loopback addresses if requested, so that we can test + * unresolvable host names without a mix of loopback and non-loopback + * addresses + */ + final String localHost = InetAddress.getLocalHost().getHostName(); + if (!useLoopbackAddresses) { + masterConfig.setNodeHostPort( + localHost + ":" + masterConfig.getNodePort()); + } + masterConfig.setHelperHosts(masterConfig.getNodeHostPort()); + ReplicationConfig replicaConfig = RepTestUtils.createRepConfig(2); + if (!useLoopbackAddresses) { + replicaConfig.setNodeHostPort( + localHost + ":" + replicaConfig.getNodePort()); + } + if (useCorruptHelperHost) { + try { + replicaConfig.setHelperHosts(masterConfig.getNodeHostPort() + + ",unknownhostfoobar:1111"); + } catch (IllegalArgumentException e) { + masterFail = true; + logger.info("Unresolvable helper: " + e); + } + final boolean checkHelperHostResolution = + !Boolean.getBoolean(RepParams.SKIP_HELPER_HOST_RESOLUTION); + assertEquals(checkHelperHostResolution, masterFail); + + /* + * If creating the configuration failed because of an unresolvable + * helper host, then there is nothing more to test + */ + if (checkHelperHostResolution) { + return; + } + } else { + replicaConfig.setHelperHosts(masterConfig.getNodeHostPort()); + } + + /* + * Attempt to create the master with the specified EnvironmentConfig. + */ + try { + master = new ReplicatedEnvironment(envHomes[0], + masterConfig, + envConfig); + } catch (IllegalArgumentException e) { + logger.info("Create master: " + e); + masterFail = true; + } + + /* + * If the master is expected to fail and the test tried to create a + * replica in the following steps, it would actually try to create a + * master. + * + * Since the test needs to test on both master and replica, create a + * real master here. + */ + if (masterInvalid) { + EnvironmentConfig okConfig = + RepTestUtils.createEnvConfig(RepTestUtils.DEFAULT_DURABILITY); + master = new ReplicatedEnvironment(envHomes[0], masterConfig, + okConfig); + } + + /* Check the specified EnvironmentConfig on the replica. */ + try { + replica = new ReplicatedEnvironment(envHomes[1], + replicaConfig, + envConfig); + } catch (IllegalArgumentException e) { + logger.info("Create replica: " + e); + replicaFail = true; + } + + /* Check whether the master and replica creations are as expected. */ + assertEquals("masterFail", masterInvalid, masterFail); + assertEquals("replicaFail", replicaInvalid, replicaFail); + + /* + * If the replica is expected to fail, close the master and return. + */ + if (replicaInvalid) { + if (master != null) { + assertTrue(master.getState().isMaster()); + master.close(); + } + + return; + } + + if (master != null && replica != null) { + /* + * If the specified EnvironmentConfig is correct, wait for + * replication initialization to finish. + */ + while (replica.getState() != ReplicatedEnvironment.State.REPLICA) { + Thread.sleep(1000); + } + + /* Make sure the test runs on both master and replica. */ + assertTrue(master.getState().isMaster()); + assertTrue(!replica.getState().isMaster()); + + /* Close the replica and master. */ + replica.close(); + master.close(); + } + } + + /** + * AllowCreate = false should be accepted. + * + * Setting allowCreate to false is only possible when the database already + * exists. Because of that, this test first creates a database and then + * reopens it with allowCreate = false configuration. + */ + @Test + public void testDbAllowCreateFalseConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + expectDbAcceptance(dbConfig, true); + dbConfig.setAllowCreate(false); + expectDbAcceptance(dbConfig, false); + } + + /** + * Replicated datatabases do not support non transactional mode. + */ + @Test + public void testDbNonTransactionalConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setTransactional(false); + expectDbRejection(dbConfig, false); + } + + /** + * A database configuration of transactional + deferredWrite is invalid. + */ + @Test + public void testDbDeferredWriteConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setDeferredWrite(true); + expectDbRejection(dbConfig, false); + } + + /** + * A database configuration of transactional + temporary is invalid. + */ + @Test + public void testDbTemporaryConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setTemporary(true); + expectDbRejection(dbConfig, false); + } + + /** + * ExclusiveCreate = true should be accepted on the master. + * + * Setting exclusiveCreate is expected to fail on the replica. It's because + * when a database is created on master, replication will create the same + * database on the replica. When the replica tries to create the database, + * it will find the database already exists. When we set exclusiveCreate = + * true, the replica will throw out a DatabaseExistException. The check + * for this is done within the logic for expectDbAcceptance. + */ + @Test + public void testDbExclusiveCreateConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setExclusiveCreate(true); + expectDbAcceptance(dbConfig, true); + } + + /** + * KeyPrefixing = true should be accpted. + */ + @Test + public void testDbKeyPrefixingConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setKeyPrefixing(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * ReadOnly = true should be accpted. + * + * Database read only is only possible when the database exists, so this + * test first creates a database and then reopens it with read only + * configuration. + */ + @Test + public void testDbReadOnlyConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + expectDbAcceptance(dbConfig, true); + dbConfig.setReadOnly(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * SortedDuplicates = true should be accpted. + */ + @Test + public void testDbSortedDuplicatesConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setSortedDuplicates(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * OverrideBtreeComparator = true should be accepted. + */ + @Test + public void testDbOverideBtreeComparatorConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setOverrideBtreeComparator(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * OverrideDuplicatComparator = true should be accepted. + */ + @Test + public void testDbOverrideDuplicateComparatorConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + dbConfig.setOverrideDuplicateComparator(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * UseExistingConfig = true should be accepted. + * + * UseExistingConfig is only possible when the database exists, so this + * test first creates a database and then reopens it with UseExistingConfig + * configuration. + */ + @Test + public void testDbUseExistingConfig() + throws Exception { + + DatabaseConfig dbConfig = createDbConfig(); + expectDbAcceptance(dbConfig, true); + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setUseExistingConfig(true); + dbConfig.setReadOnly(true); + expectDbAcceptance(dbConfig, false); + } + + /** + * Wrap checkDbConfig in this method to make the intent of the test + * obvious. + */ + private void expectDbAcceptance(DatabaseConfig dbConfig, boolean doSync) + throws Exception { + + checkDbConfig(dbConfig, false /* isInvalid */, doSync); + } + + /** + * Wrap checkEnvConfig in this method to make the intent of the test + * obvious. + */ + private void expectDbRejection(DatabaseConfig dbConfig, boolean doSync) + throws Exception { + + checkDbConfig(dbConfig, true /* isInvalid */, doSync); + } + + /** + * The main function checks whether a database configuration is valid. + * + * @param dbConfig The DatabaseConfig to check. + * @param isInvalid if true, dbConfig represents an invalid configuration + * and we expect database creation to fail. + * @param doSync If true, the test should do a group sync after creating + * the database on the master + */ + public void checkDbConfig(DatabaseConfig dbConfig, + boolean isInvalid, + boolean doSync) + throws Exception { + + /* + * masterFail and replicaFail are true if the master or replica + * database creation failed. + */ + boolean masterFail = false; + boolean replicaFail =false; + + /* Create an array of replicators successfully and join the group. */ + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + repEnvInfo[0].getRepConfig().setDesignatedPrimary(true); + RepTestUtils.joinGroup(repEnvInfo); + + /* Create the database with the specified configuration on master. */ + Database masterDb = null; + try { + masterDb = repEnvInfo[0].getEnv().openDatabase(null, "test", + dbConfig); + } catch (IllegalArgumentException e) { + masterFail = true; + } + + /* + * The test does a group sync when the tested configuration needs to + * create a real database first. + * + * If a group sync isn't done, the replica would incorrectly try to + * create the database since it hasn't seen it yet. Since write + * operations on the replica are forbidden, the test would fail, which + * is not expected. + */ + if (doSync) { + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + } + + /* Open the database with the specified configuration on replica. */ + Database replicaDb = null; + try { + replicaDb = repEnvInfo[1].getEnv().openDatabase(null, "test", + dbConfig); + } catch (IllegalArgumentException e) { + replicaFail = true; + } catch (ReplicaWriteException e) { + /* + * If the test throws a ReplicaStateException, it's because it + * tries to create a new database on replica, but replica doesn't + * allow create operation, it's thought to be valid. + */ + } catch (DatabaseExistsException e) { + replicaFail = true; + } + + /* Check the validity here. */ + if (isInvalid) { + assertTrue(masterFail && replicaFail); + } else { + + /* + * The exclusiveCreate config is checked explicitly here, because + * it has different master/replica behavior. + */ + if (dbConfig.getExclusiveCreate()) { + assertFalse(masterFail); + assertTrue(replicaFail); + } else { + assertFalse(masterFail || replicaFail); + } + } + + /* Shutdown the databases and environments. */ + if (masterDb != null) { + masterDb.close(); + } + + if (replicaDb != null) { + replicaDb.close(); + } + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } +} diff --git a/test/com/sleepycat/je/rep/CommitPointConsistencyPolicyTest.java b/test/com/sleepycat/je/rep/CommitPointConsistencyPolicyTest.java new file mode 100644 index 0000000..5d2ed4a --- /dev/null +++ b/test/com/sleepycat/je/rep/CommitPointConsistencyPolicyTest.java @@ -0,0 +1,175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; + +public class CommitPointConsistencyPolicyTest extends RepTestBase { + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 2; + super.setUp(); + + /* Add a secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + repEnvInfo[repEnvInfo.length-1].getRepConfig().setNodeType( + NodeType.SECONDARY); + } + + @Test + public void testCommitPointConsistencyOnOpen() { + ReplicatedEnvironment menv = repEnvInfo[0].openEnv(); + CommitToken token = populateDB(menv, TEST_DB_NAME, 10); + CommitPointConsistencyPolicy cp = + new CommitPointConsistencyPolicy(token, 100, TimeUnit.SECONDS); + for (int i = 1; i < repEnvInfo.length; i++) { + ReplicatedEnvironment renv = repEnvInfo[i].openEnv(cp); + /* Verify that the database is available on the replica. */ + Database rdb = renv.openDatabase(null, TEST_DB_NAME, dbconfig); + rdb.close(); + } + } + + @Test + public void testVLSNConsistencyJoinGroup() + throws UnknownMasterException, + DatabaseException, + InterruptedException { + + createGroup(); + leaveGroupAllButMaster(); + ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + + /* Populate just the master. */ + CommitToken commitToken = populateDB(masterRep, TEST_DB_NAME, 100); + CommitPointConsistencyPolicy cp1 = + new CommitPointConsistencyPolicy(commitToken, 1, TimeUnit.SECONDS); + + final int failTimeout = 2000; + final int passTimeout = 5000; + final StatsConfig statsConf = new StatsConfig().setClear(true); + TxnThread[] txnThreads = new TxnThread[repEnvInfo.length]; + + for (int i = 1; i < repEnvInfo.length; i++) { + ReplicatedEnvironment replica = repEnvInfo[i].openEnv(); + + // In sync to the commit point + TransactionConfig tc = new TransactionConfig(); + tc.setConsistencyPolicy(cp1); + Transaction txn = replica.beginTransaction(null, tc); + txn.commit(); + + CommitToken futureCommitToken = + new CommitToken(commitToken.getRepenvUUID(), + commitToken.getVLSN() + 100); + + tc.setConsistencyPolicy( + new CommitPointConsistencyPolicy( + futureCommitToken, failTimeout, TimeUnit.MILLISECONDS)); + long start = System.currentTimeMillis(); + try { + txn = null; + // Unable to reach consistency, timeout. + txn = replica.beginTransaction(null, tc); + txn.abort(); + fail("Exception expected"); + } catch (ReplicaConsistencyException rce) { + long policyTimeout = rce.getConsistencyPolicy().getTimeout( + TimeUnit.MILLISECONDS); + assertTrue(policyTimeout <= + (System.currentTimeMillis() - start)); + } + + // reset statistics + replica.getRepStats(statsConf); + + // Have a replica transaction actually wait + tc.setConsistencyPolicy( + new CommitPointConsistencyPolicy( + futureCommitToken, passTimeout, TimeUnit.MILLISECONDS)); + TxnThread txnThread = new TxnThread(replica, tc); + txnThreads[i] = txnThread; + txnThread.start(); + Thread.yield(); // give the other threads a chance to block + } + + // Advance the master + populateDB(masterRep, TEST_DB_NAME, 100, 100); + + for (int i = 1; i < repEnvInfo.length; i++) { + ReplicatedEnvironment replica = repEnvInfo[i].getEnv(); + TxnThread txnThread = txnThreads[i]; + txnThread.join(passTimeout); + assertTrue(!txnThread.isAlive()); + assertNull("i=" + i + ": Exception: " + txnThread.testException, + txnThread.testException); + ReplicatedEnvironmentStats stats = replica.getRepStats(statsConf); + assertEquals(1, stats.getTrackerVLSNConsistencyWaits()); + + // Test with a commit token which is in the past replica does not + // need to wait. + + TransactionConfig tc = new TransactionConfig(); + tc.setConsistencyPolicy(cp1); + Transaction txn = replica.beginTransaction(null, tc); + stats = replica.getRepStats(statsConf.setClear(true)); + assertEquals(0, stats.getTrackerVLSNConsistencyWaits()); + txn.commit(); + } + } + + class TxnThread extends Thread { + final ReplicatedEnvironment replicator; + final TransactionConfig tc; + Exception testException = null; + + TxnThread(ReplicatedEnvironment replicator, TransactionConfig tc) { + this.replicator = replicator; + this.tc = tc; + } + + @Override + public void run() { + try { + Transaction txn = replicator.beginTransaction(null, tc); + txn.commit(); + } catch (Exception e) { + testException = e; + e.printStackTrace(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/ConversionTest.java b/test/com/sleepycat/je/rep/ConversionTest.java new file mode 100644 index 0000000..ac10218 --- /dev/null +++ b/test/com/sleepycat/je/rep/ConversionTest.java @@ -0,0 +1,120 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Standalone environments can be converted once to be replicated environments. + * Replicated environments can't be opened in standalone mode. + */ +public class ConversionTest extends TestBase { + + private final File envRoot; + + public ConversionTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Check that an environment which is opened for replication cannot be + * re-opened as a standalone environment in r/w mode + */ + @Test + public void testNoStandaloneReopen() + throws DatabaseException, IOException { + + RepEnvInfo[] repEnvInfo = initialOpenWithReplication(); + + /* Try to re-open standalone r/w, should fail. */ + try { + EnvironmentConfig reopenConfig = new EnvironmentConfig(); + reopenConfig.setTransactional(true); + @SuppressWarnings("unused") + Environment unused = new Environment(repEnvInfo[0].getEnvHome(), + reopenConfig); + fail("Should have thrown an exception."); + } catch (UnsupportedOperationException ignore) { + /* throw a more specific exception? */ + } + } + + /** + * Check that an environment which is opened for replication can + * also be opened as a standalone r/o environment. + */ + @Test + public void testStandaloneRO() + throws DatabaseException, IOException { + + RepEnvInfo[] repEnvInfo = initialOpenWithReplication(); + + /* Try to re-open standalone r/o, should succeed */ + try { + EnvironmentConfig reopenConfig = new EnvironmentConfig(); + reopenConfig.setTransactional(true); + reopenConfig.setReadOnly(true); + Environment env = new Environment(repEnvInfo[0].getEnvHome(), + reopenConfig); + env.close(); + } catch (DatabaseException e) { + fail("Should be successful" + e); + } + } + + @Test + public void testStandaloneUtility() + throws DatabaseException, IOException { + + RepEnvInfo[] repEnvInfo = initialOpenWithReplication(); + + /* Try to re-open as a read/only utility, should succeed */ + try { + EnvironmentConfig reopenConfig = new EnvironmentConfig(); + reopenConfig.setTransactional(true); + reopenConfig.setReadOnly(true); + EnvironmentImpl envImpl = + CmdUtil.makeUtilityEnvironment(repEnvInfo[0].getEnvHome(), + true /* readOnly */); + envImpl.close(); + } catch (DatabaseException e) { + fail("Should be successful" + e); + } + } + + private RepEnvInfo[] initialOpenWithReplication() + throws DatabaseException, IOException { + + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + RepTestUtils.joinGroup(repEnvInfo); + for (RepEnvInfo repi : repEnvInfo) { + repi.getEnv().close(); + } + return repEnvInfo; + } +} diff --git a/test/com/sleepycat/je/rep/DatabaseOperationTest.java b/test/com/sleepycat/je/rep/DatabaseOperationTest.java new file mode 100644 index 0000000..89808ca --- /dev/null +++ b/test/com/sleepycat/je/rep/DatabaseOperationTest.java @@ -0,0 +1,1035 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.dual.trigger.InvokeTest.RDBT; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.trigger.Trigger; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Check that database operations are properly replicated. + */ +public class DatabaseOperationTest extends TestBase { + + private final File envRoot; + private final String[] dbNames = new String[] {"DbA", "DbB"}; + private RepEnvInfo[] repEnvInfo; + private Map expectedResults; + private final boolean verbose = Boolean.getBoolean("verbose"); + + public DatabaseOperationTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Check that master->replica replication of database operations work. + */ + @Test + public void testBasic() + throws Exception { + + expectedResults = new HashMap(); + + try { + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + execDatabaseOperations(master); + checkEquality(repEnvInfo); + + doMoreDatabaseOperations(master, repEnvInfo); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* Test whether database configure changes are replayed on replicas. */ + @Test + public void testDatabaseConfigUpdates() + throws Exception { + + try { + /* Open the ReplicatedEnvironments. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + ReplicatedEnvironment replica = repEnvInfo[1].getEnv(); + + assertTrue(master.getState().isMaster()); + assertTrue(replica.getState().isReplica()); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* Open a database on the master. */ + Database db = master.openDatabase(null, "testDb", dbConfig); + db.close(); + + /* Override database properties. */ + dbConfig.setOverrideBtreeComparator(true); + dbConfig.setOverrideDuplicateComparator(true); + dbConfig.setOverrideTriggers(true); + dbConfig.setBtreeComparator(new FooComparator()); + dbConfig.setDuplicateComparator(new BarComparator()); + dbConfig.setNodeMaxEntries(512); + dbConfig.setKeyPrefixing(true); + + /* Set trigger properties. */ + List triggers = new LinkedList + (Arrays.asList((Trigger) new RDBT("t1"), + (Trigger) new RDBT("t2"))); + dbConfig.setTriggers(triggers); + + db = master.openDatabase(null, "testDb", dbConfig); + assertTrue + (db.getConfig().getBtreeComparator() instanceof FooComparator); + assertTrue(db.getConfig().getDuplicateComparator() + instanceof BarComparator); + assertTrue(db.getConfig().getNodeMaxEntries() == 512); + assertTrue(db.getConfig().getKeyPrefixing()); + db.close(); + + /* + * Don't override a database BtreeComparator and make sure the + * BtreeComparator doesn't change. + */ + dbConfig.setOverrideBtreeComparator(false); + dbConfig.setBtreeComparator(new BarComparator()); + db = master.openDatabase(null, "testDb", dbConfig); + assertTrue + (db.getConfig().getBtreeComparator() instanceof FooComparator); + assertFalse + (db.getConfig().getBtreeComparator() instanceof BarComparator); + insertData(db); + db.close(); + + /* Do a sync make sure that all replicated entries are replayed. */ + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + RepTestUtils.checkNodeEquality(vlsn, false, repEnvInfo); + + /* + * Open the database on the replica and make sure its + * BtreeComparator is set. + */ + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setUseExistingConfig(true); + db = replica.openDatabase(null, "testDb", dbConfig); + + /* + * Do the check to see configuration properties changes made on the + * master are replayed on the replica. + */ + assertTrue + (db.getConfig().getBtreeComparator() instanceof FooComparator); + assertTrue(db.getConfig().getDuplicateComparator() + instanceof BarComparator); + assertTrue(db.getConfig().getNodeMaxEntries() == 512); + assertTrue(db.getConfig().getKeyPrefixing()); + assertTrue(db.getConfig().getTriggers().size() == 2); + for (Trigger trigger : db.getConfig().getTriggers()) { + assertTrue(trigger instanceof RDBT); + } + + db.close(); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* + * Test the master updates a database config while the same name database + * on the replica is reading data. + */ + @Test + public void testMasterUpdateWhileReplicaReading() + throws Exception { + + try { + /* Construct the replication group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + ReplicatedEnvironment replica = repEnvInfo[1].getEnv(); + + assertTrue(master.getState().isMaster()); + assertTrue(replica.getState().isReplica()); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* Open a database on the master and write some data. */ + Database db = master.openDatabase(null, "testDb", dbConfig); + insertData(db); + db.close(); + + /* + * Open the database with a changed config on the replica, it is + * expected to fail because it requires a write operation. + */ + Database replicaDb = null; + try { + DatabaseConfig repConfig = dbConfig.clone(); + repConfig.setNodeMaxEntries(512); + replicaDb = replica.openDatabase(null, "testDb", repConfig); + fail("Expected exception here."); + } catch (ReplicaWriteException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* + * Open the database on the replica with no database config + * changes, start a reading thread on the replica. + */ + replicaDb = replica.openDatabase(null, "testDb", dbConfig); + CountDownLatch start = new CountDownLatch(1); + CountDownLatch end = new CountDownLatch(1); + ReplicaReadingThread thread = + new ReplicaReadingThread(start, end, replicaDb); + thread.start(); + + /* Make sure the replica reading thread has done some jobs. */ + start.await(); + + /* + * Do the database config changes, it would create a + * DatabasePreemptedException on the database on replicas. + */ + dbConfig.setNodeMaxEntries(512); + db = master.openDatabase(null, "testDb", dbConfig); + db.close(); + + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* End the reading thread. */ + thread.setExit(true); + end.await(); + assertTrue(thread.getException()); + + /* + * Because DatabasePreemptedException, the underlying DatabaseImpl + * has been null, close it. + */ + replicaDb.close(); + + /* Open the database on replica again, using the existed config. */ + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setUseExistingConfig(true); + replicaDb = replica.openDatabase(null, "testDb", dbConfig); + assertEquals(512, replicaDb.getConfig().getNodeMaxEntries()); + replicaDb.close(); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * Check that master->replica replication of database operations work, and + * also verify that the client has logged enough information to act + * as the master later on. + */ + @Test + public void testCascade() + throws Exception { + + expectedResults = new HashMap(); + + try { + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 5); + + /* Open all the replicated environments and select a master. */ + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + /* Shutdown a replica. */ + for (RepEnvInfo repInfo : repEnvInfo) { + if (repInfo.getEnv().getState().isReplica()) { + repInfo.closeEnv(); + break; + } + } + + /* Record the former master id. */ + int formerMasterId = RepInternal.getNodeId(master); + /* Do some database work. */ + execDatabaseOperations(master); + /* Sync the replicators and shutdown the master. */ + checkEquality(RepTestUtils.getOpenRepEnvs(repEnvInfo)); + for (RepEnvInfo repInfo: repEnvInfo) { + if (repInfo.getEnv() != null && + repInfo.getEnv().getState().isMaster()) { + repInfo.closeEnv(); + break; + } + } + + /* Find out the new master for those open replicators. */ + master = RepTestUtils.openRepEnvsJoin(repEnvInfo); + /* Make sure the master is not the former one. */ + assertTrue(formerMasterId != RepInternal.getNodeId(master)); + doMoreDatabaseOperations(master, + RepTestUtils.getOpenRepEnvs(repEnvInfo)); + + /* Re-open closed replicators and check the node equality. */ + master = RepTestUtils.joinGroup(repEnvInfo); + /* Verify the new master is different from the first master. */ + assertTrue(formerMasterId != RepInternal.getNodeId(master)); + assertEquals(RepTestUtils.getOpenRepEnvs(repEnvInfo).length, + repEnvInfo.length); + checkEquality(repEnvInfo); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * Check that ReplicaWriteException occurs when doing a DB name operation + * (rename, remove, truncate) on a replica, but that it can later be done + * if that node is elected master. Previously a bug [#22394] prevented the + * operation on the master because a use count on the database was not + * decremented when the ReplicaWriteException was thrown. + */ + @Test + public void testDbNameOpReplicaWriteException() + throws Exception { + + expectedResults = new HashMap(); + + try { + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + int formerMasterId = RepInternal.getNodeId(master); + + /* Do some database work and sync the replicators. */ + execDatabaseOperations(master); + checkEquality(RepTestUtils.getOpenRepEnvs(repEnvInfo)); + + /* Try DB ops on replicas -- should get ReplicaWriteException. */ + for (RepEnvInfo repInfo: repEnvInfo) { + if (repInfo.getEnv().getState().isMaster()) { + continue; + } + try { + doMoreDatabaseOperations(repInfo.getEnv(), repEnvInfo); + fail(); + } catch (ReplicaWriteException expected) { + } + } + + /* Shutdown the master. */ + for (RepEnvInfo repInfo: repEnvInfo) { + if (repInfo.getEnv() != null && + repInfo.getEnv().getState().isMaster()) { + repInfo.closeEnv(); + break; + } + } + + /* Find out the new master for those open replicators. */ + master = RepTestUtils.openRepEnvsJoin(repEnvInfo); + assertTrue(formerMasterId != RepInternal.getNodeId(master)); + + /* + * DB ops should succeed on former replica. Before the bug fix + * [#22394] this operation looped forever. + */ + doMoreDatabaseOperations(master, + RepTestUtils.getOpenRepEnvs(repEnvInfo)); + + /* Re-open closed replicators and check the node equality. */ + master = RepTestUtils.joinGroup(repEnvInfo); + assertEquals(RepTestUtils.getOpenRepEnvs(repEnvInfo).length, + repEnvInfo.length); + checkEquality(repEnvInfo); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + @Entity + static class MyEntity { + @PrimaryKey(sequence="id") + int key; + String data; + @SecondaryKey(relate=MANY_TO_ONE) + int skey = 1; + } + + /** + * Check that local (non-replicated) databases are not replicated. Also + * check that creating a replicated database on a replica is prohibited. + * [#20543] + */ + @Test + public void testLocalDatabases() + throws Exception { + + expectedResults = new HashMap(); + + boolean success = false; + + try { + final int nEnvs = 3; + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, nEnvs); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + final StoreConfig storeConfig = new StoreConfig(); + dbConfig.setAllowCreate(true); + storeConfig.setAllowCreate(true); + + /* + * Ensure that a replicated database cannot be created on a + * replica. + */ + dbConfig.setReplicated(true); + storeConfig.setReplicated(true); + + for (int i = 0; i < nEnvs; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + if (env.getState().isReplica()) { + for (final boolean txnl : new boolean[] {false, true}) { + dbConfig.setTransactional(txnl); + storeConfig.setTransactional(txnl); + try { + env.openDatabase(null, "anotherRepDb", dbConfig); + fail(); + } catch (ReplicaWriteException expected) { + assertTrue(txnl); + } catch (IllegalArgumentException expected) { + assertFalse(txnl); + } + try { + new EntityStore(env, "anotherRepStore", + storeConfig); + } catch (ReplicaWriteException expected) { + assertTrue(txnl); + } catch (IllegalArgumentException expected) { + assertFalse(txnl); + } + } + } + } + + /* + * Create a different local DB on each node and write a record with + * a different data value. + */ + final Database[] txnlDbs = new Database[nEnvs]; + final Database[] nonTxnlDbs = new Database[nEnvs]; + final Database[] dwDbs = new Database[nEnvs]; + final Database[] tempDbs = new Database[nEnvs]; + final EntityStore[] txnlStores = new EntityStore[nEnvs]; + final EntityStore[] nonTxnlStores = new EntityStore[nEnvs]; + final EntityStore[] dwStores = new EntityStore[nEnvs]; + final EntityStore[] tempStores = new EntityStore[nEnvs]; + + dbConfig.setReplicated(false); + dbConfig.setTransactional(false); + storeConfig.setReplicated(false); + storeConfig.setTransactional(false); + + createLocalDbs("txnl", + txnlDbs, + dbConfig.clone().setTransactional(true), + txnlStores, + storeConfig.clone().setTransactional(true)); + createLocalDbs("nonTxnl", + nonTxnlDbs, + dbConfig, + nonTxnlStores, + storeConfig); + createLocalDbs("dw", + dwDbs, + dbConfig.clone().setDeferredWrite(true), + dwStores, + storeConfig.clone().setDeferredWrite(true)); + createLocalDbs("temp", + tempDbs, + dbConfig.clone().setTemporary(true), + tempStores, + storeConfig.clone().setTemporary(true)); + + /* Test abort. */ + checkLocalDbAbort(txnlDbs); + + /* + * Ensure that no records are replicated by checking that there is + * only the one expected record on each node. + */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, nEnvs); + checkAndRemoveLocalDbs("txnl", txnlDbs, txnlStores); + checkAndRemoveLocalDbs("nonTxnl", nonTxnlDbs, nonTxnlStores); + checkAndRemoveLocalDbs("dw", dwDbs, dwStores); + checkAndRemoveLocalDbs("temp", tempDbs, tempStores); + + /* Ensure that all databases (removed above) do not exist. */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, nEnvs); + for (int i = 0; i < nEnvs; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + assertEquals(0, env.getDatabaseNames().size()); + } + + /* After removing local databases, all nodes should be equal. */ + checkEquality(repEnvInfo); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + success = true; + } finally { + if (!success) { + try { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } catch (Throwable e) { + /* Do not preempt in-flight exception. */ + System.out.println("Shutdown error while another " + + "exception is in flight: " + e); + } + } + } + } + + /** + * Create local DBs and write a single record in each with a unique key + * for each env. + */ + private void createLocalDbs(final String namePrefix, + final Database[] localDbs, + final DatabaseConfig dbConfig, + final EntityStore[] localStores, + final StoreConfig storeConfig) { + + final int nEnvs = repEnvInfo.length; + assertEquals(nEnvs, localDbs.length); + assertEquals(nEnvs, localStores.length); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < nEnvs; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + + final String dbName = namePrefix + "Db"; + final Database db = env.openDatabase(null, dbName, dbConfig); + localDbs[i] = db; + assertTrue(!db.getConfig().getReplicated()); + + key.setData(new byte[] { (byte) i }); + data.setData(new byte[] { (byte) i }); + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(i, data.getData()[0]); + + final String storeName = namePrefix + "Store"; + final EntityStore store = + new EntityStore(env, storeName, storeConfig); + localStores[i] = store; + assertTrue(!store.getConfig().getReplicated()); + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, MyEntity.class); + assertTrue(!index.getDatabase().getConfig().getReplicated()); + + MyEntity entity = new MyEntity(); + entity.data = String.valueOf(i); + index.put(entity); + assertEquals(1, entity.key); + entity = index.get(1); + assertNotNull(entity); + assertEquals(1, entity.key); + assertEquals(String.valueOf(i), entity.data); + } + } + + /** + * Checks that undo works for a non-replicated txnal db on a replica. + * There was a bug where the undo databases were not updated properly, and + * although this seems to have had no impact except in rare corner cases, + * we check here that undo works for good measure. [#22875] + */ + private void checkLocalDbAbort(final Database[] localDbs) { + final int nEnvs = repEnvInfo.length; + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < nEnvs; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + final Transaction txn = env.beginTransaction( + null, new TransactionConfig().setLocalWrite(true)); + final Database db = localDbs[i]; + + /* Update. */ + key.setData(new byte[] { (byte) i }); + data.setData(new byte[] { (byte) (i + 1) }); + OperationStatus status = db.put(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Insert. */ + key.setData(new byte[] { (byte) (i + 1) }); + data.setData(new byte[] { (byte) i }); + status = db.putNoOverwrite(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Abort. */ + txn.abort(); + + /* Check that update was undone. */ + key.setData(new byte[] { (byte) i }); + status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(i, data.getData()[0]); + + /* Check that insertion was undone. */ + key.setData(new byte[] { (byte) (i + 1) }); + status = db.get(null, key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + } + } + + /** + * Check local DBs and then remove them. + */ + private void checkAndRemoveLocalDbs(final String namePrefix, + final Database[] localDbs, + final EntityStore[] localStores) { + + final int nEnvs = repEnvInfo.length; + assertEquals(nEnvs, localDbs.length); + assertEquals(nEnvs, localStores.length); + final boolean isTemp = "temp".equals(namePrefix); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < nEnvs; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + + final String dbName = namePrefix + "Db"; + final Database db = localDbs[i]; + assertEquals(1, db.count()); + key.setData(new byte[] { (byte) i }); + OperationStatus status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(i, data.getData()[0]); + db.close(); + if (!isTemp) { + String newName = "new." + dbName; + env.renameDatabase(null, dbName, newName); + env.truncateDatabase(null, newName, false); + env.removeDatabase(null, newName); + } + + final String storeName = namePrefix + "Store"; + final EntityStore store = localStores[i]; + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, MyEntity.class); + assertEquals(1, index.count()); + MyEntity entity = index.get(1); + assertNotNull(entity); + assertEquals(1, entity.key); + assertEquals(String.valueOf(i), entity.data); + store.close(); + if (!isTemp) { + for (final String name : env.getDatabaseNames()) { + if (name.startsWith("persist#" + storeName)) { + String newName = "new." + name; + env.renameDatabase(null, name, newName); + env.truncateDatabase(null, newName, false); + env.removeDatabase(null, newName); + } + } + } + } + } + + /** + * Check that with a local (non-replicated) EntityStore, auto-commit + * transactions do not check replication consistency. [#20543] + */ + @Test + public void testLocalStoreNoConsistency() + throws IOException { + + /* Register custom consistency policy format while quiescent. */ + RepUtils.addConsistencyPolicyFormat + (RepTestUtils.AlwaysFail.NAME, + new RepTestUtils.AlwaysFailFormat()); + + /* Open with max durabity and AlwaysFail consistency. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + for (RepEnvInfo rei : repEnvInfo) { + rei.getEnvConfig().setDurability + (RepTestUtils.SYNC_SYNC_ALL_DURABILITY); + rei.getRepConfig().setConsistencyPolicy + (new RepTestUtils.AlwaysFail()); + } + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + final String repStoreName = "repStore"; + final String localStoreName = "localStore"; + + /* On master, create replicated store and write a record. */ + final StoreConfig repStoreConfig = new StoreConfig(); + repStoreConfig.setTransactional(true); + repStoreConfig.setAllowCreate(true); + final EntityStore repStore = + new EntityStore(master, repStoreName, repStoreConfig); + final PrimaryIndex repIndex = + repStore.getPrimaryIndex(Integer.class, MyEntity.class); + MyEntity entity = new MyEntity(); + entity.data = "aaa"; + repIndex.put(entity); + assertEquals(1, entity.key); + + /* On replica, create local store and write/read/delete/truncate. */ + ReplicatedEnvironment replica = null; + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() != master) { + replica = info.getEnv(); + break; + } + } + final StoreConfig localStoreConfig = new StoreConfig(); + localStoreConfig.setTransactional(true); + localStoreConfig.setAllowCreate(true); + localStoreConfig.setReplicated(false); + final EntityStore localStore = + new EntityStore(replica, localStoreName, localStoreConfig); + final PrimaryIndex localIndex = + localStore.getPrimaryIndex(Integer.class, MyEntity.class); + entity = new MyEntity(); + entity.data = "aaa"; + localIndex.put(entity); + assertEquals(1, entity.key); + entity = localIndex.get(1); + assertNotNull(entity); + assertEquals(1, entity.key); + assertEquals("aaa", entity.data); + final SecondaryIndex localSecIndex = + localStore.getSecondaryIndex(localIndex, Integer.class, "skey"); + entity = localSecIndex.get(1); + assertNotNull(entity); + assertEquals(1, entity.key); + assertEquals("aaa", entity.data); + final boolean deleted = localIndex.delete(1); + assertTrue(deleted); + localStore.truncateClass(MyEntity.class); + + localStore.close(); + repStore.close(); + + /* + * Check that auto-commit DB name operations can be done using the base + * API, which has a special auto-commit mechanism that is different + * from the one used in the DPL (tested above). + */ + for (String dbName : replica.getDatabaseNames()) { + if (dbName.startsWith("persist#" + localStoreName)) { + /* Truncate */ + replica.truncateDatabase(null, dbName, false); + final String newName = dbName + ".new"; + /* Rename */ + replica.renameDatabase(null, dbName, newName); + try { + replica.renameDatabase(null, dbName, newName); + fail(); + } catch (DatabaseNotFoundException expected) { + } + try { + replica.truncateDatabase(null, dbName, false); + fail(); + } catch (DatabaseNotFoundException expected) { + } + /* Remove */ + replica.removeDatabase(null, newName); + try { + replica.removeDatabase(null, newName); + fail(); + } catch (DatabaseNotFoundException expected) { + } + } + } + + /* We're done. */ + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /* Truncate, rename and remove databases on replicators. */ + private void doMoreDatabaseOperations(ReplicatedEnvironment master, + RepEnvInfo[] repInfoArray) + throws Exception { + + for (String dbName : dbNames) { + truncateDatabases(master, dbName, repInfoArray); + master.renameDatabase(null, dbName, "new" + dbName); + checkEquality(repInfoArray); + master.removeDatabase(null, "new" + dbName); + checkEquality(repInfoArray); + } + } + + /** + * Execute a variety of database operations on this node. + */ + @SuppressWarnings("unchecked") + private void execDatabaseOperations(ReplicatedEnvironment env) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(false); + + /* Make a vanilla database and add some records. */ + Database db = env.openDatabase(null, dbNames[0], dbConfig); + insertData(db); + expectedResults.put(dbNames[0], + new TestDb(db.getConfig(), db.count())); + db.close(); + + /* Make a database with comparators */ + dbConfig.setBtreeComparator(new FooComparator()); + dbConfig.setDuplicateComparator + ((Class>) + Class.forName("com.sleepycat.je.rep." + + "DatabaseOperationTest$BarComparator")); + db = env.openDatabase(null, dbNames[1], dbConfig); + expectedResults.put(dbNames[1], + new TestDb(db.getConfig(), db.count())); + db.close(); + } + + /* Insert some data for truncation verfication. */ + private void insertData(Database db) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < 10; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + } + + /* + * Truncate the database on the master and check whether the db.count + * is 0 after truncation. + */ + private void truncateDatabases(ReplicatedEnvironment master, + String dbName, + RepEnvInfo[] repInfoArray) + throws Exception { + + /* Check the correction of db.count before truncation. */ + long expectedCount = expectedResults.get(dbName).count; + DatabaseConfig dbConfig = + expectedResults.get(dbName).dbConfig.cloneConfig(); + checkCount(repInfoArray, dbName, dbConfig, expectedCount); + + /* Truncate the database and do the check. */ + master.truncateDatabase(null, dbName, true); + /* Do the sync so that the replicators do the truncation. */ + RepTestUtils.syncGroupToLastCommit(repInfoArray, repInfoArray.length); + checkCount(repInfoArray, dbName, dbConfig, 0); + checkEquality(repInfoArray); + } + + /* Check that the number of records in the database is correct */ + private void checkCount(RepEnvInfo[] repInfoArray, + String dbName, + DatabaseConfig dbConfig, + long dbCount) + throws Exception { + + for (RepEnvInfo repInfo : repInfoArray) { + Database db = + repInfo.getEnv().openDatabase(null, dbName, dbConfig); + assertEquals(dbCount, db.count()); + db.close(); + } + } + + private void checkEquality(RepEnvInfo[] repInfoArray) + throws Exception { + + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repInfoArray, + repInfoArray.length); + RepTestUtils.checkNodeEquality(vlsn, verbose, repInfoArray); + } + + /** + * Keep track of the database name and other characteristics, to + * be used in validating data. + */ + static class TestDb { + DatabaseConfig dbConfig; + long count; + + TestDb(DatabaseConfig dbConfig, long count) { + this.dbConfig = dbConfig.cloneConfig(); + this.count = count; + } + } + + /** + * A placeholder comparator class, just for testing whether comparators + * replicate properly. + */ + @SuppressWarnings("serial") + public static class FooComparator implements Comparator, + Serializable { + + public FooComparator() { + } + + public int compare(@SuppressWarnings("unused") byte[] o1, + @SuppressWarnings("unused") byte[] o2) { + /* No need to really fill in. */ + return 0; + } + } + + /** + * A placeholder comparator class, just for testing whether comparators + * replicate properly. + */ + @SuppressWarnings("serial") + public static class BarComparator implements Comparator, + Serializable { + public BarComparator() { + } + + public int compare(@SuppressWarnings("unused") byte[] arg0, + @SuppressWarnings("unused") byte[] arg1) { + /* No need to really fill in. */ + return 0; + } + } + + /* A thread doing reads on the replica. */ + private class ReplicaReadingThread extends Thread { + private CountDownLatch start; + private CountDownLatch end; + private Database db; + private boolean exit = false; + private boolean getException = false; + + public ReplicaReadingThread(CountDownLatch start, + CountDownLatch end, + Database db) { + this.start = start; + this.end = end; + this.db = db; + } + + public void run() { + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (true) { + for (int i = 0; i < 10; i++) { + IntegerBinding.intToEntry(i, key); + try { + db.get(null, key, data, null); + } catch (DatabasePreemptedException e) { + + /* + * DatabasePreemptedException is expected if the + * db.get() is inovked while JE is preempting this + * database. + */ + getException = true; + } catch (NullPointerException e) { + + /* + * NullPointerException is expected if the db.get() + * is invoked after preempting this database action + * is finished. + */ + getException = true; + } + + if (!getException) { + assertEquals + ("herococo", + StringBinding.entryToString(data)); + } + } + + if (start.getCount() > 0) { + start.countDown(); + } + + if (exit && getException) { + break; + } + } + } finally { + /* Make the main thread goes on. */ + end.countDown(); + } + } + + /* Exit the thread. */ + public void setExit(boolean exit) { + this.exit = exit; + } + + public boolean getException() { + return getException; + } + } +} diff --git a/test/com/sleepycat/je/rep/ElectableGroupSizeOverrideTest.java b/test/com/sleepycat/je/rep/ElectableGroupSizeOverrideTest.java new file mode 100644 index 0000000..782e928 --- /dev/null +++ b/test/com/sleepycat/je/rep/ElectableGroupSizeOverrideTest.java @@ -0,0 +1,264 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class ElectableGroupSizeOverrideTest extends RepTestBase { + + /* + * Verify that elections can be held, and writes performed by a minority + * with an override in place. + */ + @Test + public void testBasic() + throws InterruptedException { + createGroup(); + /* Shutdown the entire group. */ + closeNodes(repEnvInfo); + + /* Verify that the node cannot come up on its own. */ + RepEnvInfo ri0 = repEnvInfo[0]; + setEnvSetupTimeout("2 s"); + try { + ri0.openEnv(); + fail("Unknow master exception expected."); + } catch (UnknownMasterException ume) { + /* Expected. */ + } + /* Restore the timeout. */ + setEnvSetupTimeout(RepParams.ENV_SETUP_TIMEOUT.getDefault()); + + startGroupWithOverride(1); + + /* Resume normal operations, eliminate override */ + setElectableGroupSize(0, repEnvInfo); + RepEnvInfo mi = restartNodes(repEnvInfo); + assertNotNull(mi); + closeNodes(repEnvInfo); + } + + /** + * tests a 5 node group, with the master failing as part of the majority + * of the nodes being lost. + * + * 1) Shutdown nodes n1-n3, including n1 the master + * 2) Verify no master amongst the remaining + * 3) Set override + * 4) Verify master emerges and write transactions can be committed. + * 5) Remove override + * 6) Bring up down nodes n1-n3 -- group is in normal working order + */ + @Test + public void testMasterDownOverride() throws InterruptedException { + createGroup(); + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + + /* Shutdown a simple majority, including the Master. */ + final int simpleMajority = (repEnvInfo.length + 1) / 2; + RepEnvInfo[] downNodes = copyArray(repEnvInfo, 0, simpleMajority); + RepEnvInfo[] activeNodes = + copyArray(repEnvInfo, simpleMajority, + repEnvInfo.length - simpleMajority); + closeNodes(downNodes); + + for (RepEnvInfo ri : activeNodes) { + /* No master amongst the remaining nodes. */ + assertTrue(!ri.getEnv().getState().isMaster()); + } + setMutableElectableGroupSize(simpleMajority-1, activeNodes); + + RepEnvInfo master = + findMasterAndWaitForReplicas(10000, activeNodes.length - 1, + activeNodes); + + /* They should now be able to conclude an election. */ + assertTrue(master != null); + + /* Write should succeed without exceptions with the override */ + tryWrite(findMaster(activeNodes).getEnv(), "dbok"); + + /* Bring up a down node, restoring a simple majority of active nodes */ + ReplicatedEnvironment renv0 = downNodes[0].openEnv(); + assertTrue(renv0.getState().isReplica()); + + /* Restore normal functioning. */ + setMutableElectableGroupSize(0, activeNodes); + + /* Bring up the rest of the nodes. */ + restartNodes(copyArray(downNodes, 1, downNodes.length - 1)); + } + + /* Copy a part of an array to a new array. */ + private RepEnvInfo[] copyArray(RepEnvInfo[] nodes, + int srcStart, + int copyLength) { + RepEnvInfo[] newNodes = new RepEnvInfo[copyLength]; + System.arraycopy(nodes, srcStart, newNodes, 0, copyLength); + + return newNodes; + } + + /** + * tests a 5 node group, with the master being retained when the majority + * of the nodes is lost. + * + * 1) Shutdown nodes n3-n5, n1 is the master and is alive. + * 2) Verify that master can no longer commit transactions + * 3) Set override + * 4) Verify that write transactions can be committed + * 5) Remove override + * 6) Bring up down nodes n3-n5 -- group is in normal working order + */ + @Test + public void testMasterUpOverride() throws InterruptedException { + createGroup(); + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + + /* Shutdown a simple majority, excluding the Master. */ + final int simpleMajority = (repEnvInfo.length + 1) / 2; + RepEnvInfo[] downNodes = + copyArray(repEnvInfo, simpleMajority - 1, + repEnvInfo.length - simpleMajority + 1); + RepEnvInfo[] activeNodes = copyArray(repEnvInfo, 0, + simpleMajority - 1); + closeNodes(downNodes); + + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + + /* Write should fail without the override */ + try { + tryWrite(findMaster(activeNodes).getEnv(), "dbfail"); + fail("Exception expected"); + } catch (InsufficientAcksException iae) { + // ok + } catch (InsufficientReplicasException ire) { + // ok + } + setMutableElectableGroupSize(simpleMajority-1, activeNodes); + + /* Write should succeed without exceptions with the override */ + tryWrite(findMaster(activeNodes).getEnv(), "dbok"); + + /* Bring up a down node, restoring a simple majority of active nodes */ + ReplicatedEnvironment renv = downNodes[0].openEnv(); + assertTrue(renv.getState().isReplica()); + + /* Restore normal functioning. */ + setMutableElectableGroupSize(0, activeNodes); + + /* Bring up the rest of the nodes. */ + restartNodes(copyArray(downNodes, 1, downNodes.length - 1)); + } + + private void startGroupWithOverride(int override) + throws InterruptedException { + + /* Now Try bringing up just one node using override */ + setElectableGroupSize(override, repEnvInfo); + + RepEnvInfo[] activeNodes = copyArray(repEnvInfo, 0, override); + RepEnvInfo mi = restartNodes(activeNodes); + assertNotNull(mi); + + ReplicatedEnvironment menv = mi.getEnv(); + /* Write must succeed without exceptions. */ + tryWrite(menv, "dbok" + override); + /* + * It should be possible for the other nodes to find the master + * and join. + */ + for (int i=override; i < repEnvInfo.length; i++) { + repEnvInfo[i].openEnv(); + assertTrue(repEnvInfo[i].getEnv().getState().isReplica()); + } + /* The master should be unchanged */ + assertTrue(menv.getState().isMaster()); + closeNodes(repEnvInfo); + } + + /* + * Attempt write operation by creating a database. Caller knows whether or + * not to expect an exception. + */ + private void tryWrite(ReplicatedEnvironment repEnv, String dbName) { + Database db = null; + + try { + Transaction txn = repEnv.beginTransaction(null, null); + db = repEnv.openDatabase(txn, dbName, dbconfig); + txn.commit(new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY)); + } finally { + if (db != null) { + /* Close database even in presence of exceptions. */ + db.close(); + } + } + } + + /** + * Sets the electable group size in the configuration associated with each + * of the nodes. + * + * @param override the override value + * @param nodes the configs where the override is to be applied + */ + void setElectableGroupSize(int override, RepEnvInfo... nodes) { + for (RepEnvInfo ri : nodes) { + ri.getRepConfig().setElectableGroupSizeOverride(override); + } + } + + /** + * Sets the electable group size mutable associated with an open + * environment handle. + * + * @param override the override value + * @param nodes the nodes where the override is to be applied + */ + void setMutableElectableGroupSize(int override, RepEnvInfo... nodes) { + for (RepEnvInfo ri : nodes) { + ReplicationConfig mutableConfig = ri.getEnv().getRepConfig(); + mutableConfig.setElectableGroupSizeOverride(override); + ri.getEnv().setRepMutableConfig(mutableConfig); + } + } + + /** + * Sets the setup timeout associated with all the nodes in the test. + * + * @param duration the amount of time to wait + */ + private void setEnvSetupTimeout(String duration) { + for (RepEnvInfo ri : repEnvInfo) { + ri.getRepConfig().setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), duration); + } + } +} diff --git a/test/com/sleepycat/je/rep/ExceptionIdiomsTest.java b/test/com/sleepycat/je/rep/ExceptionIdiomsTest.java new file mode 100644 index 0000000..7380bf9 --- /dev/null +++ b/test/com/sleepycat/je/rep/ExceptionIdiomsTest.java @@ -0,0 +1,313 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.util.test.TestBase; + +/** + * Dummy junit test captures illustrative code for HA exception handling + * idioms. + */ +public class ExceptionIdiomsTest extends TestBase { + + /* + * The number of seconds to wait between retries when a sufficient + * number of replicas are not available for a transaction. + */ + private static final int INSUFFICIENT_REPLICA_RETRY_SEC = 1; + + /* Amount of time to wait after a lock conflict. */ + private static final int LOCK_CONFLICT_RETRY_SEC = 1; + + /* Amount of time to wait to let a replica catch up before retrying. */ + private static final int CONSISTENCY_RETRY_SEC = 1; + + private static final int STATE_CHANGE_RETRY_SEC = 1; + private static final int ENVIRONMENT_RETRY_SEC = 1; + + /* The maximum number of times to retry handle creation. */ + private static final int REP_HANDLE_RETRY_MAX = 10; + + /* The maximum number of times to retry the transaction. */ + static final int TRANSACTION_RETRY_MAX = 10; + + /* Exists only to satisfy junit. */ + @Test + public void testNULL() { + } + + /** + * The method illustrates the outermost loop used to maintain a valid + * environment handle. It handles all exceptions that invalidate the + * environment, and creates a new environment handle so that the + * application can recover and continue to make progress in the face of + * errors that might result in environment invalidation. + * + * @param envHome the directory containing the replicated environment + * @param repConfig the replication configuration + * @param envConfig the environment configuration + * + * @throws InterruptedException if interrupted + */ + void environmentLoop(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) + throws InterruptedException { + + while (true) { + ReplicatedEnvironment repEnv = null; + try { + repEnv = getEnvironmentHandle(envHome, repConfig, envConfig); + transactionDispatchLoop(repEnv); + } catch (InsufficientLogException insufficientLog) { + /* Restore the log files from another node in the group. */ + NetworkRestore networkRestore = new NetworkRestore(); + networkRestore.execute(insufficientLog, + new NetworkRestoreConfig()); + continue; + } catch (RollbackException rollback) { + + /* + * Any transient state that is dependent on the environment + * must be re-synchronized to account for transactions that + * may have been rolled back. + */ + continue; + } finally { + if (repEnv != null) { + repEnv.close(); + } + } + } + } + + /** + * Creates the replicated environment handle and returns it. It will retry + * indefinitely if a master could not be established because a sufficient + * number of nodes were not available, or there were networking issues, + * etc. + * + * @return the newly created replicated environment handle + * + * @throws InterruptedException if the operation was interrupted + */ + ReplicatedEnvironment getEnvironmentHandle(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) + throws InterruptedException { + + /* + * In this example we retry REP_HANDLE_RETRY_MAX times, but a + * production HA application may retry indefinitely. + */ + for (int i = 0; i < REP_HANDLE_RETRY_MAX; i++) { + try { + return + new ReplicatedEnvironment(envHome, repConfig, envConfig); + } catch (UnknownMasterException unknownMaster) { + + /* + * Indicates there is a group level problem: insufficient nodes + * for an election, network connectivity issues, etc. Wait and + * retry to allow the problem to be resolved. + */ + + /* + * INSERT APP-SPECIFIC CODE HERE: Application would typically + * log this issue, page a sysadmin, etc., to get the underlying + * system issues resolved and retry. + */ + Thread.sleep(ENVIRONMENT_RETRY_SEC * 1000); + continue; + } + } + + /* + * INSERT APP-SPECIFIC CODE HERE: For example, an applications may + * throw an exception or retry indefinitely in the above loop. + */ + throw new IllegalStateException("Unable to open handle after " + + REP_HANDLE_RETRY_MAX + " tries"); + } + + /** + * Initiates transactions based upon the state of the node. + *

        + * The code in the method is single threaded, but the dispatch could be + * also be used to start threads to perform transactional work. + * + * @param repEnv the replicated environment + * + * @throws InterruptedException + */ + private void transactionDispatchLoop(ReplicatedEnvironment repEnv) + throws InterruptedException { + + while (true) { + final State state = repEnv.getState(); + if (state.isUnknown()) { + + /* + * Typically means there is a group level problem, insufficient + * nodes for an election, network connectivity issues, etc. + * Wait and retry to allow the problem to be resolved. + */ + Thread.sleep(STATE_CHANGE_RETRY_SEC * 1000); + continue; + } else if (state.isDetached()) { + + /* + * The node is no longer in communication with the group. + * Reopen the environment, so the application can resume. + */ + return; + } + + /* + * The node is in the master or replica state. Assumes that a + * higher level has sent it the appropriate type (read or write) + * of transaction workload. If that isn't the case, the handler + * for ReplicaWriteException will cause it to be redirected to the + * correct node using an application-specific mechanism. + */ + boolean readOnly = false; // Set based upon app-specific knowledge + doTransaction(repEnv, readOnly); + } + } + + /** + * Illustrates the handling of exceptions thrown in the process of + * creating and executing a transaction. It retries the transaction if the + * exception indicates that a retry is warranted. + * + * @param repEnv the replicated transactional environment + * @param readOnly determines whether the transaction to be run is read + * only. + * @throws InterruptedException + * @throws InsufficientLogException environment invalidating exception + * handled at the outer level. + * @throws RollbackException environment invalidation exception handled at + * the the outer level. + */ + private void doTransaction(ReplicatedEnvironment repEnv, + boolean readOnly) + throws InterruptedException, + InsufficientLogException, + RollbackException { + + boolean success = false; + long sleepMillis = 0; + final TransactionConfig txnConfig = readOnly ? + new TransactionConfig().setDurability(Durability.READ_ONLY_TXN) : + null; + + for (int i = 0; i < TRANSACTION_RETRY_MAX; i++) { + /* Sleep before retrying. */ + if (sleepMillis != 0) { + Thread.sleep(sleepMillis); + sleepMillis = 0; + } + Transaction txn = null; + try { + txn = repEnv.beginTransaction(null, txnConfig); + + /* INSERT APP-SPECIFIC CODE HERE: Do transactional work. */ + + txn.commit(); + success = true; + return; + } catch (InsufficientReplicasException insufficientReplicas) { + + /* + * Give replicas a chance to contact this master, in case they + * have not had a chance to do so following an election. + */ + sleepMillis = INSUFFICIENT_REPLICA_RETRY_SEC * 1000; + continue; + } catch (InsufficientAcksException insufficientReplicas) { + + /* + * Transaction has been committed at this node. The other + * acknowledgments may be late in arriving, or may never arrive + * because the replica just went down. + */ + + /* + * INSERT APP-SPECIFIC CODE HERE: For example, repeat + * idempotent changes to ensure they went through. + * + * Note that 'success' is false at this point, although some + * applications may consider the transaction to be complete. + */ + txn = null; + return; + } catch (ReplicaWriteException replicaWrite) { + + /* + * Attempted a modification while in the Replica state. + * + * INSERT APP-SPECIFIC CODE HERE: Cannot accomplish the changes + * on this node, redirect the write to the new master and retry + * the transaction there. This could be done by forwarding the + * request to the master here, or by returning an error to the + * requester and retrying the request at a higher level. + */ + return; + } catch (LockConflictException lockConflict) { + + /* + * Retry the transaction. Note that LockConflictException + * covers the HA LockPreemptedException. + */ + sleepMillis = LOCK_CONFLICT_RETRY_SEC * 1000; + continue; + } catch (ReplicaConsistencyException replicaConsistency) { + + /* + * Retry the transaction. The timeout associated with the + * ReplicaConsistencyPolicy may need to be relaxed if it's too + * stringent. + */ + sleepMillis = CONSISTENCY_RETRY_SEC * 1000; + continue; + } finally { + + if (!success) { + if (txn != null) { + txn.abort(); + } + + /* + * INSERT APP-SPECIFIC CODE HERE: Perform any app-specific + * cleanup. + */ + } + } + } + + /* + * INSERT APP-SPECIFIC CODE HERE: Transaction failed, despite retries. + * Take some app-specific course of action. + */ + } +} diff --git a/test/com/sleepycat/je/rep/ExternalNodeTypeTest.java b/test/com/sleepycat/je/rep/ExternalNodeTypeTest.java new file mode 100644 index 0000000..cdacafe --- /dev/null +++ b/test/com/sleepycat/je/rep/ExternalNodeTypeTest.java @@ -0,0 +1,207 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * Unit test to test the EXTERNAL node type + * + */ +public class ExternalNodeTypeTest extends RepTestBase { + + /* test db */ + private final String dbName = "SUBSCRIPTION_UNIT_TEST_DB"; + private final int startKey = 1; + /* test db with 10k keys */ + private final int numKeys = 1024*10; + private final List keys = new ArrayList<>(); + + /* a rep group with 1 master, 2 replicas */ + private final int numReplicas = 2; + private final int numDataNodes = 1 + numReplicas; + private final int groupSize = 1 + numReplicas; + private Logger logger; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + "ExternalNodeTypeTest"); + } + + @Override + @After + public void tearDown() throws Exception { + + super.tearDown(); + } + + /* Test an external node is able to handshake and syncup with feeder */ + @Test + public void testExternalNodeType() throws Exception { + + /* create and verify a replication group */ + prepareTestEnv(); + + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + /* populate some data and verify */ + populateDataAndVerify(masterEnv); + + final MockClientNode mockClientNode = + new MockClientNode(NodeType.EXTERNAL, masterEnv, logger); + + /* handshake with feeder */ + mockClientNode.handshakeWithFeeder(); + + /* sync up with feeder */ + final VLSN startVLSN = mockClientNode.syncupWithFeeder(VLSN.FIRST_VLSN); + + /* verify */ + assertTrue("Mismatch start vlsn ", startVLSN.equals(VLSN.FIRST_VLSN)); + + /* receive 10K keys */ + mockClientNode.consumeMsgLoop(numKeys); + assertTrue("Expect receive " + numKeys + " keys while actually " + + "receiving " + mockClientNode.getReceivedMsgs().size() + + " keys.", + mockClientNode.getReceivedMsgs().size() == numKeys); + + mockClientNode.shutdown(); + } + + /* Test an external node correctly dumped by ReplicationGroupAdmin */ + @Test + public void testDumpExternalNode() throws Exception { + + /* create and verify a replication group */ + prepareTestEnv(); + + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + /* populate some data and verify */ + populateDataAndVerify(masterEnv); + + final MockClientNode mockClientNode = + new MockClientNode(NodeType.EXTERNAL, masterEnv, logger); + + /* handshake with feeder */ + mockClientNode.handshakeWithFeeder(); + + /* sync up with feeder */ + mockClientNode.syncupWithFeeder(VLSN.FIRST_VLSN); + + /* get an instance of rep group admin */ + final ReplicationNetworkConfig repNetConfig = + RepTestUtils.readRepNetConfig(); + final ReplicationGroupAdmin repGroupAdmin = + repNetConfig.getChannelType().isEmpty() ? + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + masterEnv.getRepConfig() + .getHelperSockets()) + : + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + masterEnv.getRepConfig() + .getHelperSockets(), + RepTestUtils.readRepNetConfig()); + + final RepGroupImpl repGroupImpl = repGroupAdmin.getGroup() + .getRepGroupImpl(); + final Set result = repGroupImpl.getExternalMembers(); + assertTrue("Expect only one external node.", result.size() == 1); + final RepNodeImpl node = result.iterator().next(); + assertEquals("Node name mismatch", mockClientNode.nodeName, + node.getName()); + assertEquals("Node type mismatch", NodeType.EXTERNAL, node.getType()); + + mockClientNode.shutdown(); + } + + /* Create a test env and verify it is in good shape */ + private void prepareTestEnv() throws InterruptedException { + + createGroup(numDataNodes); + + for (int i=0; i < numDataNodes; i++) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + final int targetGroupSize = groupSize; + + ReplicationGroup group = null; + for (int j=0; j < 100; j++) { + group = env.getGroup(); + if (group.getNodes().size() == targetGroupSize) { + break; + } + /* Wait for the replica to catch up. */ + Thread.sleep(1000); + } + assertEquals("Nodes", targetGroupSize, group.getNodes().size()); + assertEquals(RepTestUtils.TEST_REP_GROUP_NAME, group.getName()); + for (int ii = 0; ii < targetGroupSize; ii++) { + RepTestUtils.RepEnvInfo rinfo = repEnvInfo[ii]; + final ReplicationConfig repConfig = rinfo.getRepConfig(); + ReplicationNode member = + group.getMember(repConfig.getNodeName()); + assertNotNull("Member", member); + assertEquals(repConfig.getNodeName(), member.getName()); + assertEquals(repConfig.getNodeType(), member.getType()); + assertEquals(repConfig.getNodeSocketAddress(), + member.getSocketAddress()); + } + + /* verify data nodes */ + final Set dataNodes = group.getDataNodes(); + for (final ReplicationNode n : dataNodes) { + assertEquals(NodeType.ELECTABLE, n.getType()); + } + logger.info("data nodes verified"); + } + } + + /* Populate data into test db and verify */ + private void populateDataAndVerify(ReplicatedEnvironment masterEnv) { + createTestData(); + populateDB(masterEnv, dbName, keys); + readDB(masterEnv, dbName, startKey, numKeys); + logger.info(numKeys + " records (start key: " + + startKey + ") have been populated into db " + + dbName + " and verified"); + } + + /* Create a list of (k, v) pairs for testing */ + private void createTestData() { + for (int i = startKey; i < startKey + numKeys; i++) { + keys.add(i); + } + } + +} diff --git a/test/com/sleepycat/je/rep/GroupCommitTest.java b/test/com/sleepycat/je/rep/GroupCommitTest.java new file mode 100644 index 0000000..8c83712 --- /dev/null +++ b/test/com/sleepycat/je/rep/GroupCommitTest.java @@ -0,0 +1,196 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; + +/** + * test for group commit functionality + */ +public class GroupCommitTest extends RepTestBase { + + @Override + public void setUp() + throws Exception { + + /* need just one replica for this test. */ + groupSize = 2; + + super.setUp(); + } + + /** + * Verify that group commits can be initiated by either exceeding the + * time interval, or the group commit size. + */ + @Test + public void testBasic() + throws InterruptedException { + + /* Use a very generous full second for the group commit interval. */ + final long intervalNs = TimeUnit.SECONDS.toNanos(1); + final int maxGroupCommit = 4; + + initGroupCommitConfig(intervalNs, maxGroupCommit); + + createGroup(); + State state = repEnvInfo[0].getEnv().getState(); + assertEquals(State.MASTER, state); + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + ReplicatedEnvironment renv = repEnvInfo[1].getEnv(); + + long startNs = System.nanoTime(); + final StatsConfig statsConfig = new StatsConfig().setClear(true); + + /* Clear and discard stats. */ + renv.getRepStats(statsConfig); + + /* Just a single write. */ + doWrites(menv, 1); + + ReplicatedEnvironmentStats rstats = renv.getRepStats(statsConfig); + + /* Verify that the group commit was the result of a timeout. */ + assertTrue((System.nanoTime() - startNs) > intervalNs); + + assertEquals(1, rstats.getNReplayGroupCommitTxns()); + assertEquals(1, rstats.getNReplayGroupCommits()); + assertEquals(1, rstats.getNReplayGroupCommitTimeouts()); + assertEquals(0, rstats.getNReplayCommitSyncs()); + assertEquals(1, rstats.getNReplayCommitNoSyncs()); + + /* Now force an exact group commit size overflow. */ + doWrites(menv, maxGroupCommit); + rstats = renv.getRepStats(statsConfig); + + assertEquals(maxGroupCommit, rstats.getNReplayGroupCommitTxns()); + assertEquals(1, rstats.getNReplayGroupCommits()); + assertEquals(0, rstats.getNReplayGroupCommitTimeouts()); + assertEquals(0, rstats.getNReplayCommitSyncs()); + assertEquals(maxGroupCommit, rstats.getNReplayCommitNoSyncs()); + + /* Group commit size + 1 timeout txn */ + doWrites(menv, maxGroupCommit + 1); + rstats = renv.getRepStats(statsConfig); + + assertEquals(maxGroupCommit + 1, rstats.getNReplayGroupCommitTxns()); + assertEquals(2, rstats.getNReplayGroupCommits()); + assertEquals(1, rstats.getNReplayGroupCommitTimeouts()); + assertEquals(0, rstats.getNReplayCommitSyncs()); + assertEquals(maxGroupCommit + 1, rstats.getNReplayCommitNoSyncs()); + } + + private void initGroupCommitConfig(final long intervalMs, + final int maxGroupCommit) + throws IllegalArgumentException { + + for (int i=0; i < groupSize; i++) { + repEnvInfo[i].getRepConfig(). + setConfigParam(ReplicationConfig.REPLICA_GROUP_COMMIT_INTERVAL, + intervalMs + " ns"); + repEnvInfo[i].getRepConfig(). + setConfigParam(ReplicationConfig.REPLICA_MAX_GROUP_COMMIT, + Integer.toString(maxGroupCommit)); + } + } + + /** + * Verify that group commits can be turned off. + */ + @Test + public void testGroupCommitOff() + throws InterruptedException { + + /* Now turn off group commits on the replica */ + initGroupCommitConfig(Integer.MAX_VALUE, 0); + + createGroup(); + /* Already joined, rejoin master. */ + State state = repEnvInfo[0].getEnv().getState(); + assertEquals(State.MASTER, state); + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + ReplicatedEnvironment renv = repEnvInfo[1].getEnv(); + + final StatsConfig statsConfig = new StatsConfig().setClear(true); + + /* Clear and discard stats. */ + renv.getRepStats(statsConfig); + + /* Just a single write. */ + doWrites(menv, 1); + + ReplicatedEnvironmentStats rstats = renv.getRepStats(statsConfig); + + assertEquals(0, rstats.getNReplayGroupCommitTxns()); + assertEquals(0, rstats.getNReplayGroupCommits()); + assertEquals(0, rstats.getNReplayGroupCommitTimeouts()); + assertEquals(1, rstats.getNReplayCommitSyncs()); + assertEquals(0, rstats.getNReplayCommitNoSyncs()); + } + + void doWrites(ReplicatedEnvironment menv, int count) + throws InterruptedException { + + final WriteThread wt[] = new WriteThread[count]; + + for (int i=0; i < count; i++) { + wt[i] = new WriteThread(menv); + wt[i].start(); + } + + for (int i=0; i < count; i++) { + wt[i].join(60000); + } + } + + /* Used as the basis for producing unique db names. */ + private static AtomicInteger dbId = new AtomicInteger(0); + + /** + * Thread used to create concurrent updates amenable to group commits. + */ + private class WriteThread extends Thread { + ReplicatedEnvironment menv; + + WriteThread(ReplicatedEnvironment menv) { + super(); + this.menv = menv; + } + + @Override + public void run() { + final TransactionConfig mtc = new TransactionConfig(); + mtc.setDurability(RepTestUtils.SYNC_SYNC_ALL_DURABILITY); + Transaction mt = menv.beginTransaction(null, mtc); + Database db = menv.openDatabase(mt, + "testDB" + dbId.incrementAndGet(), + dbconfig); + mt.commit(); + db.close(); + } + } +} diff --git a/test/com/sleepycat/je/rep/HandshakeTest.java b/test/com/sleepycat/je/rep/HandshakeTest.java new file mode 100644 index 0000000..14de23f --- /dev/null +++ b/test/com/sleepycat/je/rep/HandshakeTest.java @@ -0,0 +1,323 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.FeederReplicaHandshake; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshake; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class HandshakeTest extends TestBase { + + private final File envRoot; + private final int groupSize = 4; + + private ReplicatedEnvironment master = null; + private RepNode masterNode = null; + + RepEnvInfo[] repEnvInfo = null; + RepEnvInfo replicaEnvInfo = null; + + public HandshakeTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize); + + /* + * Disable stat collection to avoid NPE in testDup. It constructs a + * Feeder() with a null outputThread field, and this causes an NPE when + * getProtocolStats is called from the stat collector, which depends on + * timing. Outside of this particular test the outputThread field is + * non-null and final, so checking for null in Feeder is undesirable. + */ + repEnvInfo[0].getEnvConfig().setConfigParam( + EnvironmentConfig.STATS_COLLECT, "false"); + + master = repEnvInfo[0].openEnv(); + replicaEnvInfo = repEnvInfo[1]; + State state = master.getState(); + masterNode = RepInternal.getNonNullRepImpl(master).getRepNode(); + assertEquals(ReplicatedEnvironment.State.MASTER, state); + } + + @Override + @After + public void tearDown() + throws Exception { + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /** + * Test error handling on a version mismatch + */ + @Test + public void testProtocolVersionMismatch() + throws Throwable { + + /* Hack the version number for the test */ + try { + ReplicaFeederHandshake.setTestProtocolVersion(Integer.MIN_VALUE); + checkForException + (EnvironmentFailureReason.PROTOCOL_VERSION_MISMATCH); + } finally { + /* Restore the default version */ + ReplicaFeederHandshake.setTestProtocolVersion(0); + } + } + + /** + * Test feeder older log version than replica. Version 13 introduces + * dtvlsn commits and so changes the txn commit/abort record formats. + */ + @Test + public void testLogVersionFeeder12Replica13() + throws Throwable { + + try { + ReplicaFeederHandshake.setTestLogVersion(13); + FeederReplicaHandshake.setTestLogVersion(12); + checkSuccess(); + } finally { + ReplicaFeederHandshake.setTestLogVersion(0); + } + } + + /** + * Test feeder newer log version than replica. Version 13 introduces + * dtvlsn commits and so changes the txn commit/abort record formats. + */ + @Test + public void testLogVersionFeeder13Replica12() + throws Throwable { + + try { + ReplicaFeederHandshake.setTestLogVersion(12); + FeederReplicaHandshake.setTestLogVersion(13); + checkSuccess(); + } finally { + ReplicaFeederHandshake.setTestLogVersion(0); + } + } + + /** + * Test feeder with log version 8 and replica one version older: older + * replica is not supported until feeder version 9. + */ + @Test + public void testOneOlderLogVersion8() + throws Throwable { + + try { + ReplicaFeederHandshake.setTestLogVersion(7); + FeederReplicaHandshake.setTestLogVersion(8); + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + } finally { + ReplicaFeederHandshake.setTestLogVersion(0); + FeederReplicaHandshake.setTestLogVersion(0); + } + } + + /** + * Test feeder with log version 9 and replica log version 8: this downgrade + * is supported. + */ + @Test + public void testOneOlderLogVersion9() + throws Throwable { + + try { + ReplicaFeederHandshake.setTestLogVersion(8); + FeederReplicaHandshake.setTestLogVersion(9); + checkSuccess(); + } finally { + ReplicaFeederHandshake.setTestLogVersion(0); + FeederReplicaHandshake.setTestLogVersion(0); + } + } + + /** + * Test that the handshake accounts for the fact that the highest replica + * log entry version may not be the current log version. + */ + @Test + public void testLogVersionFeederHighestReplicableVersion() + throws Throwable { + + int highestFeederVersion = LogEntryType.LOG_VERSION_HIGHEST_REPLICABLE; + int lowestReplicaVersion; + if (highestFeederVersion >= + LogEntryType.LOG_VERSION_REPLICATE_OLDER) { + /* Feeder can downgrade by one or more versions */ + lowestReplicaVersion = highestFeederVersion - 1; + } else { + /* No downgrades */ + lowestReplicaVersion = highestFeederVersion; + } + try { + ReplicaFeederHandshake.setTestLogVersion(lowestReplicaVersion); + checkSuccess(); + } finally { + ReplicaFeederHandshake.setTestLogVersion(0); + } + } + + /** + * Test error handling when there is a duplicate replica node. + */ + @Test + public void testDup() + throws Exception { + + // Introduce a fake feeder in the map + masterNode.feederManager().putFeeder(replicaEnvInfo.getRepConfig(). + getNodeName(), new Feeder()); + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + } + + @Test + public void testReplicaLeadingClockSkew() + throws Exception { + + int delta = (int) replicaEnvInfo.getRepConfig().getMaxClockDelta + (TimeUnit.MILLISECONDS); + try { + RepImpl.setSkewMs(delta + 10); + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + } finally { + RepImpl.setSkewMs(0); + } + } + + @Test + public void testReplicaLaggingClockSkew() + throws Exception { + + int delta = (int) replicaEnvInfo.getRepConfig().getMaxClockDelta + (TimeUnit.MILLISECONDS); + RepImpl.setSkewMs(-(delta + 10)); + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + try { + RepImpl.setSkewMs(0); + } finally { + RepImpl.setSkewMs(0); + } + } + + @Test + public void testDuplicateSocket() + throws Exception { + + ReplicatedEnvironment renv2 = repEnvInfo[1].openEnv(); + ReplicatedEnvironment renv3 = repEnvInfo[2].openEnv(); + renv3.close(); + try { + ReplicationConfig config = repEnvInfo[3].getRepConfig(); + config.setNodeHostPort(repEnvInfo[2].getRepConfig(). + getNodeHostPort()); + ReplicatedEnvironment renv4 = repEnvInfo[3].openEnv(); + renv4.close(); + fail("Expected exception"); + } catch (EnvironmentFailureException e) { + assertEquals(EnvironmentFailureReason.HANDSHAKE_ERROR, + e.getReason()); + } catch (Exception e) { + fail ("Wrong exception type " + e); + } + renv2.close(); + } + + @Test + public void testConflictingPort() + throws Exception { + + /* Establish the node in the rep group db. */ + replicaEnvInfo.openEnv(); + replicaEnvInfo.closeEnv(); + + ReplicationConfig config = replicaEnvInfo.getRepConfig(); + config.setNodeHostPort(config.getNodeHostname() + ":" + 8888 ); + + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + } + + @Test + public void testConflictingType() + throws Exception { + + /* Establish the node in the rep group db. */ + replicaEnvInfo.openEnv(); + replicaEnvInfo.closeEnv(); + + ReplicationConfig config = replicaEnvInfo.getRepConfig(); + config.setNodeType(NodeType.MONITOR); + + checkForException(EnvironmentFailureReason.HANDSHAKE_ERROR); + } + + @Test + public void testBadGroupOnReopen() + throws Exception { + + /* Establish the node in the rep group db. */ + replicaEnvInfo.openEnv(); + replicaEnvInfo.closeEnv(); + + ReplicationConfig config = replicaEnvInfo.getRepConfig(); + config.setGroupName("BAD"); + checkForException(EnvironmentFailureReason.UNEXPECTED_STATE); + } + + private void checkForException(EnvironmentFailureReason reason) { + try { + replicaEnvInfo.openEnv(); + replicaEnvInfo.closeEnv(); + fail("expected exception"); + } catch (EnvironmentFailureException e) { + assertEquals(reason, e.getReason()); + } + } + + private void checkSuccess() { + replicaEnvInfo.openEnv(); + replicaEnvInfo.closeEnv(); + } +} diff --git a/test/com/sleepycat/je/rep/HardRecoveryTest.java b/test/com/sleepycat/je/rep/HardRecoveryTest.java new file mode 100644 index 0000000..b0b4cb6 --- /dev/null +++ b/test/com/sleepycat/je/rep/HardRecoveryTest.java @@ -0,0 +1,621 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.impl.RepImplStatDefinition; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForMasterListener; +import com.sleepycat.je.util.DbTruncateLog; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Check the rollback past a commit or abort. + */ +public class HardRecoveryTest extends TestBase { + private final boolean verbose = Boolean.getBoolean("verbose"); + private static final String DB_NAME = "testDb"; + private final File envRoot; + private final Logger logger; + + public HardRecoveryTest() { + envRoot = SharedTestUtils.getTestDir(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + } + + /** + * HardRecovery as invoked via the ReplicatedEnvironment constructor. + * Mimics the case where A was the master, has a commit in its log but that + * commit has not been propagated. A goes down, new master = B. When A + * comes up, it has to roll back its commit and do a hard recovery. This + * flavor of hard recovery is executed silently within the + * ReplicatedEnvironment constructor. + */ + @Test + public void testHardRecoveryNoLimit() + throws Exception { + + doHardRecovery(false, false, false); + } + + /** + * Same as {@link #testHardRecoveryNoLimit} but sets the rollback limit + * to cause a RollbackProhibitedException. + */ + @Test + public void testHardRecoveryWithLimit() + throws Exception { + + doHardRecovery(true, false, false); + } + + /** + * Same as {@link #testHardRecoveryNoLimit} but disables hard rollbacks + * to cause a RollbackProhibitedException. + */ + @Test + public void testHardRecoveryDisabled() + throws Exception { + + doHardRecovery(false, true, false); + } + + /* + * Test that a hard recovery which requires truncation in the penultimate + * log file works, and that hard recovery processing does not mistakenly + * cause a gap in the log file. + * + * This bug results in a recovery hang. [#19463] + */ + @Test + public void testMultipleLogFilesHardRecovery() + throws Exception { + + doHardRecovery(false, false, true); + } + + /** + * If setRollbackLimit or disableHardRollback, we expect to get a + * RollbackProhibitedException and to have to manually truncate the + * environment. + * + * @param setRollbackLimit if true, set TXN_ROLLBACK_LIMIT to zero and + * falsify the DTVLSN info to cause RollbackProhibitedException. + * + * @param disableHardRollback if true, set TXN_ROLLBACK_ENABLED to false + * to cause RollbackProhibitedException. + * + * @param txnMultipleLogFiles make the rollback txn span multiple files. + */ + private void doHardRecovery(boolean setRollbackLimit, + boolean disableHardRollback, + boolean txnMultipleLogFiles) + throws Exception { + + assertFalse(setRollbackLimit && disableHardRollback); + + RepEnvInfo[] repEnvInfo = null; + Database db = null; + int numNodes = 3; + try { + EnvironmentConfig envConfig = RepTestUtils.createEnvConfig + (RepTestUtils.SYNC_SYNC_NONE_DURABILITY); + + /* + * Set a small log file size to make sure the rolled back + * transaction log entries in mulitple log files. + */ + if (txnMultipleLogFiles) { + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam + (EnvironmentConfig.LOG_FILE_MAX, "2000"); + } + + ReplicationConfig repConfig = new ReplicationConfig(); + + if (setRollbackLimit) { + repConfig.setConfigParam( + ReplicationConfig.TXN_ROLLBACK_LIMIT, "0"); + } + + if (disableHardRollback) { + repConfig.setConfigParam( + ReplicationConfig.TXN_ROLLBACK_DISABLED, "true"); + } + + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, numNodes, envConfig, repConfig); + + ReplicatedEnvironment master = repEnvInfo[0].openEnv(); + assert master != null; + + db = createDb(master); + + logger.info("Created db on master"); + CommitToken commitToken = doInsert(master, db, 1, 1); + CommitPointConsistencyPolicy cp = + new CommitPointConsistencyPolicy(commitToken, 1000, + TimeUnit.SECONDS); + for (int i = 1; i < numNodes; i++) { + repEnvInfo[i].openEnv(cp); + } + + /* Sync the group to make sure all replicas start up. */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* + * Shut down all replicas so that they don't see the next + * commit. + */ + for (int i = 1; i < numNodes; i++) { + logger.info("shut down replica " + + repEnvInfo[i].getEnv().getNodeName()); + repEnvInfo[i].closeEnv(); + } + + /* + * Do work on the sole node, which is the master, then close it. + * This work was committed, and will have to be rolled back later + * on. + */ + logger.info("do master only insert"); + + if (txnMultipleLogFiles) { + + /* + * A large transaction writes log entries in mulitple log + * files. + */ + DbInternal.getNonNullEnvImpl(master).forceLogFileFlip(); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[200]); + Transaction txn = master.beginTransaction(null, null); + for (int i = 1; i <= 10; i++) { + IntegerBinding.intToEntry(i, key); + db.put(txn, key, data); + } + txn.commit(); + commitToken = txn.getCommitToken(); + } else { + commitToken = doInsert(master, db, 2, 5); + checkExists(master, db, 1, 2, 3, 4, 5); + } + + db.close(); + db = null; + + + /* + * If setRollbackLimit is true, pretend that the preceding + * transactions were made durable by a surreptitious update of + * the DTVLSN used to determine hard rollback limits. + * + * Note that we do not need to do this when disableHardRollback is + * true, because rollback is prohibited regardless of whether the + * rolled back txns are considered durable. + */ + if (setRollbackLimit) { + final RepNode masterRepNode = + master.getNonNullRepImpl().getRepNode(); + + final long commitTokenVLSN = commitToken.getVLSN(); + masterRepNode.updateDTVLSN(commitTokenVLSN); + + new PollCondition(100, 5000) { + + @Override + protected boolean condition() { + /* + * Wait for the Null transaction to ensure that the + * DTVLSN has been made persistent + */ + return masterRepNode.getCurrentTxnEndVLSN(). + getSequence() > commitTokenVLSN; + }; + }.await(); + } + + if (txnMultipleLogFiles) { + /* Close the Environment without doing a checkpoint. */ + repEnvInfo[0].abnormalCloseEnv(); + } else { + repEnvInfo[0].closeEnv(); + } + + /* + * Restart the group, make it do some other work which the + * original master, which is down, won't see. + */ + logger.info("restart group"); + master = RepTestUtils.restartGroup(repEnvInfo[1], repEnvInfo[2]); + + logger.info("group came up, new master = " + master.getNodeName()); + db = openDb(master); + commitToken = doInsert(master, db, 10,15); + checkNotThere(master, db, 2, 3, 4, 5); + checkExists(master, db, 1, 10, 11, 12, 13, 14, 15); + + /* + * When we restart the master, it should transparently do a hard + * recovery. + */ + logger.info("restart old master"); + ReplicatedEnvironment oldMaster = null; + try { + repEnvInfo[0].openEnv + (new CommitPointConsistencyPolicy(commitToken, 1000, + TimeUnit.SECONDS)); + assertFalse(setRollbackLimit); + assertFalse(disableHardRollback); + oldMaster = repEnvInfo[0].getEnv(); + assertTrue(RepInternal.getNonNullRepImpl(oldMaster). + getNodeStats(). + getBoolean(RepImplStatDefinition.HARD_RECOVERY)); + logger.info + (RepInternal.getNonNullRepImpl(oldMaster).getNodeStats(). + getString(RepImplStatDefinition.HARD_RECOVERY_INFO)); + } catch (RollbackProhibitedException e) { + + /* + * If setRollback limit is set, we should get this exception + * with directions on how to truncate the log. If the + * limit was not set, the truncation should have been done + * by JE already. + */ + assertTrue(setRollbackLimit || disableHardRollback); + assertEquals(0, e.getTruncationFileNumber()); + assertTrue(e.getEarliestTransactionId() != 0); + assertTrue(e.getEarliestTransactionCommitTime() != null); + + /* + * Very test dependent, it should be 2657 at least, but should + * be larger if some internal replicated transaction commits. + * A change in log entry sizes could change this value. This + * should be set to the value in of the matchpoint. + */ + assertTrue(e.getTruncationFileOffset() >= 2657); + + DbTruncateLog truncator = new DbTruncateLog(); + truncator.truncateLog(repEnvInfo[0].getEnvHome(), + e.getTruncationFileNumber(), + e.getTruncationFileOffset()); + repEnvInfo[0].openEnv + (new CommitPointConsistencyPolicy(commitToken, 1000, + TimeUnit.SECONDS)); + oldMaster = repEnvInfo[0].getEnv(); + } + + Database replicaDb = openDb(oldMaster); + checkNotThere(oldMaster, replicaDb, 2, 3, 4, 5); + replicaDb.close(); + + VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + numNodes); + RepTestUtils.checkNodeEquality(commitVLSN, verbose, repEnvInfo); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + if (db != null) { + db.close(); + db = null; + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private Database createDb(ReplicatedEnvironment master) { + return openDb(master, true); + } + + private Database openDb(ReplicatedEnvironment master) { + return openDb(master, false); + } + + private Database openDb(ReplicatedEnvironment master, boolean allowCreate) { + + Transaction txn = master.beginTransaction(null,null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(allowCreate); + dbConfig.setTransactional(true); + Database db = master.openDatabase(txn, DB_NAME, dbConfig); + txn.commit(); + return db; + } + + /** + * @return the commit token for the last txn used in the insert + */ + private CommitToken doInsert(ReplicatedEnvironment master, + Database db, + int startVal, + int endVal) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = null; + + for (int i = startVal; i <= endVal; i++) { + txn = master.beginTransaction(null, null); + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + assertEquals(OperationStatus.SUCCESS, db.put(txn, key, data)); + if (verbose) { + System.out.println("insert " + i); + } + txn.commit(); + } + + return (txn == null) ? null: txn.getCommitToken(); + } + + /** + * Assert that these values are IN the database. + */ + private void checkExists(ReplicatedEnvironment node, + Database db, + int ... values) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + if (verbose) { + System.err.println("Entering checkThere: node=" + + node.getNodeName()); + } + + for (int i : values) { + IntegerBinding.intToEntry(i, key); + if (verbose) { + System.err.println("checkThere: node=" + node.getNodeName() + + " " + i); + } + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(data)); + } + } + + /** + * Assert that these values are NOT IN the database. + */ + private void checkNotThere(ReplicatedEnvironment node, + Database db, + int ... values) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i : values) { + IntegerBinding.intToEntry(i, key); + if (verbose) { + System.out.println("checkNotThere: node=" + node.getNodeName() + + " " + i); + } + assertEquals("for key " + i, OperationStatus.NOTFOUND, + db.get(null, key, data, LockMode.DEFAULT)); + } + } + + /** + * HardRecovery as invoked on a live replica. Can only occur with network + * partitioning. + * + * Suppose we have nodes A,B,C. A is the master. C is partitioned, and + * therefore misses part of the replication stream. Then, through a series + * of timing accidents, C wins mastership over A and B. A and B then have + * to discover the problem during a syncup, and throw hard recovery + * exceptions. + */ + @Test + public void testHardRecoveryDeadHandle() + throws Throwable { + + RepEnvInfo[] repEnvInfo = null; + Database db = null; + int numNodes = 3; + try { + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, numNodes, RepTestUtils.SYNC_SYNC_NONE_DURABILITY); + + /* + * Start the master first, to ensure that it is the master, and then + * start the rest of the group. + */ + ReplicatedEnvironment master = repEnvInfo[0].openEnv(); + assert master != null; + + db = createDb(master); + CommitToken commitToken = doInsert(master, db, 1, 1); + CommitPointConsistencyPolicy cp = + new CommitPointConsistencyPolicy(commitToken, 1000, + TimeUnit.SECONDS); + + for (int i = 1; i < numNodes; i++) { + repEnvInfo[i].openEnv(cp); + } + + /* + * After node1 and node2 join, make sure that their presence in the + * rep group db is propagated before we do a forceMaster. When a + * node calls for an election, it must have its own id available to + * itself from the rep group db on disk. If it doesn't, it will + * send an election request with an illegal node id. In real life, + * this can never happen, because a node that does not have its own + * id won't win mastership, since others will be ahead of it. + */ + commitToken = doInsert(master, db, 2, 2); + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setConsistencyPolicy + (new CommitPointConsistencyPolicy(commitToken, 1000, + TimeUnit.SECONDS)); + + for (int i = 1; i < numNodes; i++) { + Transaction txn = + repEnvInfo[i].getEnv().beginTransaction(null, txnConfig); + txn.commit(); + } + + /* + * Mimic a network partition by forcing one replica to not see the + * incoming LNs. Do some work, which that last replica doesn't see + * and then force the laggard to become the master. This will + * create a case where the current master has to do a hard + * recovery. + */ + int lastIndex = numNodes - 1; + WaitForMasterListener masterWaiter = new WaitForMasterListener(); + ReplicatedEnvironment forcedMaster = repEnvInfo[lastIndex].getEnv(); + forcedMaster.setStateChangeListener(masterWaiter); + RepNode lastRepNode = repEnvInfo[lastIndex].getRepNode(); + lastRepNode.replica().setDontProcessStream(); + + commitToken = doInsert(master, db, 3, 4); + db.close(); + db = null; + logger.info("Before force"); + + /* + * WaitForNodeX are latches that will help us know when node 1 and 2 + * have finished syncing up with node 3, after forcing node 3 to + * become a master. There will be two syncup attempts, because after + * the first syncup attempt, we do an election to make sure that + * we are running with the most optimal master. + */ + CountDownLatch waitForNode1 = + setupWaitForSyncup(repEnvInfo[0].getEnv(), 2); + CountDownLatch waitForNode2 = + setupWaitForSyncup(repEnvInfo[1].getEnv(), 2); + + /* + * Make node3 the master. Make sure that it did not see the + * work done while it was in its fake network partitioned state. + */ + lastRepNode.forceMaster(true); + logger.info("After force"); + masterWaiter.awaitMastership(); + + db = openDb(forcedMaster); + checkNotThere(forcedMaster, db, 3, 4); + checkExists(forcedMaster, db, 1, 2); + db.close(); + + /* + * At this point, node 1 and 2 should have thrown a + * RollbackException. Both will become invalid. + */ + checkForHardRecovery(waitForNode1, repEnvInfo[0]); + checkForHardRecovery(waitForNode2, repEnvInfo[1]); + + /* + * Restart the group, make it do some other work and check + * that the group has identical contents. + */ + logger.info("restarting nodes which did hard recovery"); + RepTestUtils.restartReplicas(repEnvInfo[0]); + RepTestUtils.restartReplicas(repEnvInfo[1]); + logger.info("sync group"); + VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + numNodes); + + logger.info("run check"); + RepTestUtils.checkNodeEquality(commitVLSN, verbose, repEnvInfo); + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (db != null) { + db.close(); + db = null; + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * Wait until a replica/feeder syncup has been tried numSyncupAttempt times + * on this node. + */ + private CountDownLatch setupWaitForSyncup + (final ReplicatedEnvironment master, int numSyncupAttempts) { + final CountDownLatch waiter = new CountDownLatch(numSyncupAttempts); + + TestHook syncupFinished = new TestHook() { + @Override + public void doHook() throws InterruptedException { + logger.info("----syncup countdown for " + + master.getNodeName() + " latch=" + waiter); + waiter.countDown(); + } + }; + + RepInternal.getNonNullRepImpl(master).getRepNode(). + replica().setReplicaFeederSyncupHook(syncupFinished); + return waiter; + } + + /** + * Make sure that this node has thrown a RollbackException. + */ + private void checkForHardRecovery(CountDownLatch syncupFinished, + RepEnvInfo envInfo) + throws Throwable { + + syncupFinished.await(); + logger.info(envInfo.getEnv().getNodeName() + " becomes replica"); + + try { + ReplicatedEnvironment.State state = envInfo.getEnv().getState(); + fail("Should have seen rollback exception, got state of " + + state); + } catch (RollbackException expected) { + assertTrue(expected.getEarliestTransactionId() != 0); + assertTrue(expected.getEarliestTransactionCommitTime() != null); + logger.info("expected = " + expected.toString()); + envInfo.closeEnv(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } +} diff --git a/test/com/sleepycat/je/rep/JoinGroupTest.java b/test/com/sleepycat/je/rep/JoinGroupTest.java new file mode 100644 index 0000000..fc9d6c8 --- /dev/null +++ b/test/com/sleepycat/je/rep/JoinGroupTest.java @@ -0,0 +1,297 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; + +public class JoinGroupTest extends RepTestBase { + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + repEnvInfo[repEnvInfo.length - 1].getRepConfig().setNodeType( + NodeType.SECONDARY); + } + + /** + * Simulates the scenario where an entire group goes down and is restarted. + */ + @Test + public void testAllJoinLeaveJoinGroup() + throws DatabaseException, + InterruptedException { + + createGroup(); + ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + populateDB(masterRep, TEST_DB_NAME, 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Shutdown the entire group. */ + closeNodes(repEnvInfo); + + /* + * Restart the group, using a longer join wait time to allow the + * secondary to query the primaries a second time after the election is + * complete. See RepNode.MASTER_QUERY_INTERVAL. + */ + final long masterQueryInterval = 10000; + restartNodes(JOIN_WAIT_TIME + masterQueryInterval, repEnvInfo); + } + + // Tests repeated opens of the same environment + @Test + public void testRepeatedOpen() + throws UnknownMasterException, DatabaseException { + + /* All nodes have joined. */ + createGroup(); + + /* Already joined, rejoin master. */ + State state = repEnvInfo[0].getEnv().getState(); + assertEquals(State.MASTER, state); + + /* Already joined, rejoin replica, by creating another handle. */ + ReplicatedEnvironment r1Handle = new ReplicatedEnvironment + (repEnvInfo[1].getEnvHome(), + repEnvInfo[1].getRepConfig(), + repEnvInfo[1].getEnvConfig()); + state = r1Handle.getState(); + assertEquals(State.REPLICA, state); + r1Handle.close(); + } + + @Test + public void testDefaultJoinGroup() + throws UnknownMasterException, + DatabaseException { + + createGroup(); + ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + assertEquals(State.MASTER, masterRep.getState()); + leaveGroupAllButMaster(); + /* Populates just the master. */ + CommitToken ct = populateDB(masterRep, TEST_DB_NAME, 100); + + /* Replicas should have caught up when they re-open their handles. */ + for (RepEnvInfo ri : repEnvInfo) { + ReplicatedEnvironment rep = + (ri.getEnv() == null) ? ri.openEnv() : ri.getEnv(); + VLSN repVLSN = RepInternal.getNonNullRepImpl(rep). + getVLSNIndex().getRange().getLast(); + assertTrue(new VLSN(ct.getVLSN()).compareTo(repVLSN) <= 0); + } + } + + @Test + public void testDefaultJoinGroupHelper() + throws UnknownMasterException, + DatabaseException { + + for (int i = 0; i < repEnvInfo.length; i++) { + RepEnvInfo ri = repEnvInfo[i]; + if ((i + 1) == repEnvInfo.length) { + /* Use a non-master helper for the last replicator. */ + ReplicationConfig config = + RepTestUtils.createRepConfig((short) (i + 1)); + String hpPairs = ""; + // Skip the master, use all the other nodes + for (int j = 1; j < i; j++) { + hpPairs += + "," + repEnvInfo[j].getRepConfig().getNodeHostPort(); + } + hpPairs = hpPairs.substring(1); + config.setHelperHosts(hpPairs); + File envHome = ri.getEnvHome(); + ri = repEnvInfo[i] = + new RepEnvInfo(envHome, + config, + RepTestUtils.createEnvConfig + (Durability.COMMIT_SYNC)); + } + ri.openEnv(); + State state = ri.getEnv().getState(); + assertEquals((i == 0) ? State.MASTER : State.REPLICA, state); + } + } + + @Test + public void testTimeConsistencyJoinGroup() + throws UnknownMasterException, + DatabaseException{ + + createGroup(); + ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + assertEquals(State.MASTER, masterRep.getState()); + + leaveGroupAllButMaster(); + /* Populates just the master. */ + populateDB(masterRep, TEST_DB_NAME, 1000); + + repEnvInfo[1].openEnv + (new TimeConsistencyPolicy(1, TimeUnit.MILLISECONDS, + RepTestUtils.MINUTE_MS, + TimeUnit.MILLISECONDS)); + ReplicatedEnvironmentStats stats = + repEnvInfo[1].getEnv().getRepStats(StatsConfig.DEFAULT); + + assertEquals(1, stats.getTrackerLagConsistencyWaits()); + assertTrue(stats.getTrackerLagConsistencyWaitMs() > 0); + } + + @Test + public void testVLSNConsistencyJoinGroup() + throws UnknownMasterException, + DatabaseException, + InterruptedException { + + createGroup(); + ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + assertEquals(State.MASTER, masterRep.getState()); + leaveGroupAllButMaster(); + /* Populates just the master. */ + populateDB(masterRep, TEST_DB_NAME, 100); + UUID uuid = + RepInternal.getNonNullRepImpl(masterRep).getRepNode().getUUID(); + long masterVLSN = RepInternal.getNonNullRepImpl(masterRep). + getVLSNIndex().getRange().getLast(). + getSequence()+2 /* 1 new entry + txn commit record */; + + JoinCommitThread jt = + new JoinCommitThread(new CommitToken(uuid,masterVLSN), + repEnvInfo[1]); + jt.start(); + Thread.sleep(5000); + // supply the vlsn it's waiting for. Record count MUST sync up with + // the expected masterVLSN + populateDB(masterRep, TEST_DB_NAME, 1); + jt.join(JOIN_WAIT_TIME); + + assertTrue(!jt.isAlive()); + assertNull("Join thread exception", jt.testException); + } + + /* + * Test that a replica using the jdb files copied from the master can join + * the group. + */ + @Test + public void testCopyEnvJoin() + throws Throwable { + + createGroup(1); + assertTrue(repEnvInfo[0].isMaster()); + + /* Create some data on the master. */ + populateDB(repEnvInfo[0].getEnv(), "testDB", 1000); + + /* Close the Environment before copy. */ + repEnvInfo[0].closeEnv(); + + /* First check there is no jdb files in the second replica. */ + File repEnvHome = repEnvInfo[1].getEnvHome(); + File[] envFiles = repEnvHome.listFiles(); + for (File envFile : envFiles) { + if (envFile.getName().contains(".jdb")) { + throw new IllegalStateException + ("Replica home should not contain any jdb files"); + } + } + + /* Copy the jdb files from the master to the replica. */ + SharedTestUtils.copyFiles(repEnvInfo[0].getEnvHome(), + repEnvInfo[1].getEnvHome()); + + /* Reopen the master. */ + repEnvInfo[0].openEnv(); + assertTrue(repEnvInfo[0].isMaster()); + + /* Open the replica. */ + repEnvInfo[1].openEnv(); + assertTrue(repEnvInfo[1].isReplica()); + + /* Read the data to make sure data is correctly copied. */ + Database db = + repEnvInfo[1].getEnv().openDatabase(null, "testDB", dbconfig); + for (int i = 0; i < 1000; i++) { + IntegerBinding.intToEntry(i, key); + db.get(null, key, data, null); + assertEquals(i, (int) LongBinding.entryToLong(data)); + } + db.close(); + } + + /* Utility thread for joining group. */ + class JoinCommitThread extends Thread { + final RepEnvInfo replicator; + final CommitToken commitToken; + Exception testException = null; + + JoinCommitThread(CommitToken commitToken, RepEnvInfo replicator) { + this.commitToken = commitToken; + this.replicator = replicator; + } + + @Override + public void run() { + try { + ReplicatedEnvironment repenv= replicator.openEnv + (new CommitPointConsistencyPolicy(commitToken, + RepTestUtils.MINUTE_MS, + TimeUnit.MILLISECONDS)); + assertEquals(ReplicatedEnvironment.State.REPLICA, + repenv.getState()); + ReplicatedEnvironmentStats stats = + replicator.getEnv().getRepStats(StatsConfig.DEFAULT); + + assertEquals(1, stats.getTrackerVLSNConsistencyWaits()); + assertTrue(stats.getTrackerVLSNConsistencyWaitMs() > 0); + } catch (UnknownMasterException e) { + testException = e; + throw new RuntimeException(e); + } catch (DatabaseException e) { + testException = e; + throw new RuntimeException(e); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/JoinGroupTimeoutsTest.java b/test/com/sleepycat/je/rep/JoinGroupTimeoutsTest.java new file mode 100644 index 0000000..b70de80 --- /dev/null +++ b/test/com/sleepycat/je/rep/JoinGroupTimeoutsTest.java @@ -0,0 +1,206 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForListener; + +public class JoinGroupTimeoutsTest extends RepTestBase { + + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + @After + public void tearDown() + throws Exception { + + ReplicaFeederSyncup.setGlobalSyncupEndHook(null); + super.tearDown(); + } + + /* Test for UnknownMasterException when the setup timeout is exceeded. */ + @Test + public void testSetupTimeout() { + + createGroup(); + final RepEnvInfo riMaster = repEnvInfo[0]; + assertEquals(State.MASTER, riMaster.getEnv().getState()); + leaveGroupAllButMaster(); + riMaster.closeEnv(); + + // Can't hold elections need at least two nodes, so timeout + try { + ReplicationConfig config = riMaster.getRepConfig(); + config.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "3 s"); + State status = riMaster.openEnv().getState(); + fail("Joined group in state: " + status); + } catch (UnknownMasterException e) { + // Expected exception + } + } + + /* + * Test for an unknown state handle when the unknown state timeout is + * exceeded. + */ + @Test + public void testUnknownStateTimeout() + throws InterruptedException { + + createGroup(); + final RepEnvInfo ri0 = repEnvInfo[0]; + closeNodes(repEnvInfo); + + // Can't hold elections need at least two nodes, so timeout + final ReplicationConfig config = ri0.getRepConfig(); + config.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "5 s"); + + config.setConfigParam + (RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), "1 s"); + + long startMs = System.currentTimeMillis(); + assertEquals(ri0.openEnv().getState(), State.UNKNOWN); + + /* Check that we waited for the "1 sec" timeout. */ + assertTrue((System.currentTimeMillis() - startMs) >= 1000); + + /* + * Transition out of unknown state by starting up another node to + * establish a quorum for elections. + */ + repEnvInfo[1].openEnv(); + assertTrue(repEnvInfo[1].getEnv().getState().isActive()); + + final WaitForListener listener = + new WaitForListener(State.MASTER, State.REPLICA); + repEnvInfo[0].getEnv().setStateChangeListener(listener); + + boolean success = listener.await(); + assertTrue("State:" + repEnvInfo[0].getEnv().getState(), success); + } + + /** + * Verify that a long syncup which exceeds the UNKNOWN_STATE_TIMEOUT + * but is less than ENV_SETUP_TIMEOUT succeeds. + */ + @Test + public void testUnknownStateTimeoutAndProceed() { + createGroup(); + final RepEnvInfo ri3 = repEnvInfo[0]; + ri3.closeEnv(); + + final ReplicationConfig config = ri3.getRepConfig(); + config.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "10 s"); + + config.setConfigParam + (RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), "5 s"); + + /* + * Simulate the syncup delay. It must be larger than unknown state + * timeout, but less than the generous setup timeout. + */ + final int syncupStallMs = 1500; + stallSyncup(syncupStallMs); + + final long startMs = System.currentTimeMillis(); + + /* No exceptions expected. */ + ri3.openEnv(); + assertTrue((System.currentTimeMillis() - startMs) >= syncupStallMs); + assertTrue(ri3.getEnv().getState().isReplica()); + } + + /** + * Verify that exceeding the setup timeout results in an exception. + */ + @Test + public void testEnvSetupTimeoutExceeded() { + createGroup(); + final RepEnvInfo ri3 = repEnvInfo[0]; + ri3.closeEnv(); + + final ReplicationConfig config = ri3.getRepConfig(); + config.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "6 s"); + + config.setConfigParam + (RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), "5 s"); + + final int syncupStallMs = 100000; + stallSyncup(syncupStallMs); + final long startMs = System.currentTimeMillis(); + try { + ri3.openEnv(); + fail("Expected replicaConssitencyException"); + } catch (ReplicaConsistencyException ume) { + /* Expected exception. */ + } + assertTrue((System.currentTimeMillis() - startMs) >= 1000); + } + + /** + * Puts a hook in place to stalls the syncup for the designated time + * period. It's the caller's responsibility to clear the hook if necessary. + */ + private void stallSyncup(final int syncupStallMs) { + final TestHook syncupEndHook = new TestHook() { + public void doHook() throws InterruptedException { + Thread.sleep(syncupStallMs); + } + }; + ReplicaFeederSyncup.setGlobalSyncupEndHook(syncupEndHook); + } + + /** + * Test for IAE on invalid configurations. + */ + @Test + public void testIllegalTimeoutArg() { + final RepEnvInfo ri0 = repEnvInfo[0]; + final ReplicationConfig config = ri0.getRepConfig(); + config.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "60 s"); + + /* Election timeout larger than setup value. */ + config.setConfigParam + (RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), "61 s"); + try { + ri0.openEnv(); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException iae) { + // Expected exception + } + } +} diff --git a/test/com/sleepycat/je/rep/LocalWriteTxnTest.java b/test/com/sleepycat/je/rep/LocalWriteTxnTest.java new file mode 100644 index 0000000..c9a6f89 --- /dev/null +++ b/test/com/sleepycat/je/rep/LocalWriteTxnTest.java @@ -0,0 +1,631 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; + +/** + * Tests combinations of local-write and read-only transaction config settings. + * Ensures that read/write of local/replicated DBs, as well as ack and + * consistency policies, are enforced as specified in the API. + */ +public class LocalWriteTxnTest extends RepTestBase { + + private static final int N_RECORDS = 10; + + private static class Case { + /* Following fields identify the test case. */ + final boolean localWrite; + final boolean readOnly; + /* Following fields determine the expected behavior. */ + final boolean localWriteAllowed; + final boolean repWriteAllowed; + final boolean acksEnforced; + + Case(final boolean localWrite, + final boolean readOnly, + final boolean localWriteAllowed, + final boolean repWriteAllowed, + final boolean acksEnforced) { + this.localWrite = localWrite; + this.readOnly = readOnly; + this.localWriteAllowed = localWriteAllowed; + this.repWriteAllowed = repWriteAllowed; + this.acksEnforced = acksEnforced; + } + + @Override + public String toString() { + return "localWrite=" + localWrite + + " readOnly=" + readOnly + + " localWriteAllowed=" + localWriteAllowed + + " repWriteAllowed=" + repWriteAllowed + + " acksEnforced=" + acksEnforced; + } + } + + private static Case[] CASES = new Case[] { + /* + * localWrite + * readOnly + * localWriteAllowed + * repWriteAllowed + * acksEnforced + */ + new Case(false, false, false, true, true), + new Case(false, true, false, false, false), + new Case(true, false, true, false, false), + }; + + ReplicatedEnvironment master; + Database masterDb; + Database[] repDbs; + Database[] localNonTxnalDbs; + Database[] localTxnalDbs; + + public LocalWriteTxnTest() { + groupSize = 3; + repDbs = new Database[groupSize]; + localNonTxnalDbs = new Database[groupSize]; + localTxnalDbs = new Database[groupSize]; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + + /* Prevent OOME in a long test suite. */ + master = null; + masterDb = null; + repDbs = null; + localNonTxnalDbs = null; + localTxnalDbs = null; + } + + /** + * Expect exception when both localWrite and readOnly are set to true. + */ + @Test + public void testIllegalConfig() { + final TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setLocalWrite(true); + try { + txnConfig.setReadOnly(true); + fail(); + } catch (IllegalArgumentException expected) { + assertEquals(false, txnConfig.getReadOnly()); + } + + /* Should fail setting params in either order. */ + txnConfig.setLocalWrite(false); + txnConfig.setReadOnly(true); + try { + txnConfig.setLocalWrite(true); + fail(); + } catch (IllegalArgumentException expected) { + assertEquals(false, txnConfig.getLocalWrite()); + } + } + + @Test + public void testReadAndWrite() { + + openAndPopulateDBs(); + + int n = 0; + + /* + * Checks for auto-commit writes and non-txnal reads. + */ + try { + for (n = 0; n < groupSize; n += 1) { + + /* + * Auto-commit writes are always allowed (other than to a rep + * DB on a replica, which is handled by checkWriteAllowed). + */ + checkWriteAllowed( + repDbs[n], null, true); + checkWriteAllowed( + localTxnalDbs[n], null, true); + checkWriteAllowed( + localNonTxnalDbs[n], null, true); + + /* Reads are always allowed. */ + readRecordsNonTxnal(repDbs[n]); + readRecordsNonTxnal(localTxnalDbs[n]); + readRecordsNonTxnal(localNonTxnalDbs[n]); + } + + /* Sync group before checking consistency enforcement. */ + syncGroupToLastCommit(); + + for (n = 0; n < groupSize; n += 1) { + + /* Consistency is only enforced on replicas. */ + final boolean consistencyEnforced = (n > 0); + + /* + * Consistency of non-tnxal reads is always enforced for a rep + * DB, and never enforced for a local DB. + */ + checkConsistencyEnforced( + repDbs[n], null, consistencyEnforced); + checkConsistencyEnforced( + localTxnalDbs[n], null, false); + checkConsistencyEnforced( + localNonTxnalDbs[n], null, false); + } + } catch (final Throwable e) { + throw new RuntimeException( + String.valueOf(repEnvInfo[n].getEnv().getState()) + + " NonTxnal", e); + } + + /* + * Checks for user txn writes and reads. + */ + for (final Case c : CASES) { + try { + final TransactionConfig txnConfig = + makeTxnConfig(c.localWrite, c.readOnly); + + for (n = 0; n < groupSize; n += 1) { + + /* + * Whether writes are allowed depends on the whether + * local-write or read-only are configured. + */ + checkWriteAllowed( + repDbs[n], txnConfig, c.repWriteAllowed); + checkWriteAllowed( + localTxnalDbs[n], txnConfig, c.localWriteAllowed); + + /* Reads are always allowed. */ + readRecords(repDbs[n], txnConfig); + readRecords(localTxnalDbs[n], txnConfig); + } + + /* Sync group before checking consistency enforcement. */ + syncGroupToLastCommit(); + + for (n = 0; n < groupSize; n += 1) { + + /* Consistency is only enforced on replicas. */ + final boolean consistencyEnforced = (n > 0); + + /* Consistency is always enforced for user txns. */ + checkConsistencyEnforced( + repDbs[n], txnConfig, consistencyEnforced); + checkConsistencyEnforced( + repDbs[n], null, consistencyEnforced); + checkConsistencyEnforced( + localTxnalDbs[n], txnConfig, consistencyEnforced); + } + } catch (final Throwable e) { + throw new RuntimeException( + String.valueOf(repEnvInfo[n].getEnv().getState()) + + " Case: " + c, e); + } + } + + /* + * Stop one replica to check ack enforcement with ack policy ALL. + */ + final int stopNode = groupSize - 1; + repDbs[stopNode].close(); + localTxnalDbs[stopNode].close(); + localNonTxnalDbs[stopNode].close(); + repEnvInfo[stopNode].getEnv().close(); + + /* + * Ack enforcement checks for auto-commit writes. + */ + try { + for (n = 0; n < stopNode; n += 1) { + + /* + * Acks are enforced only for rep DBs on the master. (If + * writeAllowed is false, then of course acks cannot be + * enforced.) + */ + if (n == 0) { + checkAcksEnforced( + repDbs[n], null, true /*acksEnforced*/, + true /*writeAllowed*/); + } else { + checkAcksEnforced( + repDbs[n], null, false /*acksEnforced*/, + false /*writeAllowed*/); + } + checkAcksEnforced( + localTxnalDbs[n], null, false /*acksEnforced*/, + true /*writeAllowed*/); + checkAcksEnforced( + localNonTxnalDbs[n], null, false /*acksEnforced*/, + true /*writeAllowed*/); + } + } catch (final Throwable e) { + throw new RuntimeException( + String.valueOf(repEnvInfo[n].getEnv().getState()) + + " NonTxnal", e); + } + + /* + * Ack enforcement checks for user txn writes. + */ + for (final Case c : CASES) { + try { + final TransactionConfig txnConfig = + makeTxnConfig(c.localWrite, c.readOnly); + + /* + * Whether acks are enforced depends on the whether local-write + * is configured. (If writeAllowed is false, then of course + * acks cannot be enforced.) + */ + for (n = 0; n < stopNode; n += 1) { + final boolean acksEnforced = c.acksEnforced && (n == 0); + + checkAcksEnforced( + repDbs[n], txnConfig, acksEnforced, + c.repWriteAllowed && (n == 0)); + checkAcksEnforced( + localTxnalDbs[n], txnConfig, acksEnforced, + c.localWriteAllowed); + } + } catch (final Throwable e) { + throw new RuntimeException( + String.valueOf(repEnvInfo[n].getEnv().getState()) + + " Case: " + c, e); + } + } + + for (n = 0; n < stopNode; n += 1) { + repDbs[n].close(); + localTxnalDbs[n].close(); + localNonTxnalDbs[n].close(); + } + } + + private TransactionConfig makeTxnConfig(final boolean localWrite, + final boolean readOnly) { + + final TransactionConfig txnConfig = new TransactionConfig(); + + txnConfig.setLocalWrite(localWrite); + txnConfig.setReadOnly(readOnly); + + assertEquals(localWrite, txnConfig.getLocalWrite()); + assertEquals(readOnly, txnConfig.getReadOnly()); + + return txnConfig; + } + + /** + * Open and populate test DBs using auto-commit writes, which works without + * specifying local-write. This, plus using non-transactional reads, also + * provides more test coverage. + */ + private void openAndPopulateDBs() { + + /* Start rep group. */ + master = RepTestUtils.joinGroup(repEnvInfo); + assertSame(master, repEnvInfo[0].getEnv()); + + /* Open/populate/read local DBs on all nodes. */ + for (int i = 0; i < groupSize; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + + localTxnalDbs[i] = openDb( + env, false /*replicated*/, true /*txnal*/); + + writeRecordsAutoCommit(localTxnalDbs[i]); + readRecordsNonTxnal(localTxnalDbs[i]); + + localNonTxnalDbs[i] = openDb( + env, false /*replicated*/, false /*txnal*/); + + writeRecordsAutoCommit(localNonTxnalDbs[i]); + readRecordsNonTxnal(localNonTxnalDbs[i]); + } + + /* Open/populate/read master DB. */ + masterDb = openDb(master, true /*replicated*/, true /*txnal*/); + repDbs[0] = masterDb; + + writeRecordsAutoCommit(masterDb); + readRecordsNonTxnal(masterDb); + + /* Must sync group before reading from replicas. */ + syncGroupToLastCommit(); + + /* Open/read replica DBs. */ + for (int i = 1; i < groupSize; i += 1) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + repDbs[i] = openDb(env, true /*replicated*/, true /*txnal*/); + readRecordsNonTxnal(repDbs[i]); + } + } + + private Database openDb(final ReplicatedEnvironment env, + final boolean replicated, + final boolean txnal) { + + final DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setReplicated(replicated); + config.setTransactional(txnal); + + final String dbName = "test-" + + (replicated ? "rep" : "nonRep") + "-" + + (txnal ? "txnal" : "nonTxnal"); + + return env.openDatabase(null, dbName, config); + } + + /** + * Tries to write and checks behavior against the writeAllowed param value. + * + * @param txnConfig is non-null if an explicit txn should be used. Is null + * to write with a null txn (auto-commit) and when the db is non-txnal. + */ + private void checkWriteAllowed(final Database db, + final TransactionConfig txnConfig, + final boolean writeAllowed) { + final ReplicatedEnvironment env = + (ReplicatedEnvironment) db.getEnvironment(); + + try { + if (txnConfig != null) { + writeRecords(db, txnConfig); + } else { + writeRecordsAutoCommit(db); + } + assertTrue("Write was allowed", writeAllowed); + } catch (UnsupportedOperationException e) { + assertFalse( + "Write was not allowed - " + e.getMessage(), writeAllowed); + } catch (ReplicaWriteException e) { + assertTrue( + "Expected UnsupportedOperation but got ReplicaWrite", + writeAllowed); + assertTrue(db.getConfig().getReplicated()); + assertEquals(ReplicatedEnvironment.State.REPLICA, env.getState()); + } + } + + /** + * Called with one replica down, so committing with ReplicaAckPolicy.ALL + * should always fail. + * + * @param txnConfig is non-null if an explicit txn should be used. + * Is null to use a null txn (auto-commit) and when the db is non-txnal. + */ + private void checkAcksEnforced(final Database db, + final TransactionConfig txnConfig, + final boolean acksEnforced, + final boolean writeAllowed) { + + final ReplicatedEnvironment env = + (ReplicatedEnvironment) db.getEnvironment(); + + final Durability ackPolicyAll = new Durability( + Durability.SyncPolicy.NO_SYNC, + Durability.SyncPolicy.NO_SYNC, + Durability.ReplicaAckPolicy.ALL); + + final Durability saveDurability; + + if (txnConfig != null) { + saveDurability = txnConfig.getDurability(); + txnConfig.setDurability(ackPolicyAll); + } else { + final EnvironmentConfig envConfig = env.getConfig(); + saveDurability = envConfig.getDurability(); + envConfig.setDurability(ackPolicyAll); + env.setMutableConfig(envConfig); + } + + try { + if (txnConfig != null) { + if (writeAllowed) { + writeRecords(db, txnConfig); + } else { + readRecords(db, txnConfig); + } + } else { + if (writeAllowed) { + writeRecordsAutoCommit(db); + } else { + readRecordsNonTxnal(db); + } + } + assertFalse("Acks were not enforced", acksEnforced); + } catch (InsufficientReplicasException e) { + assertTrue("Acks were enforced", acksEnforced); + } finally { + if (txnConfig != null) { + txnConfig.setDurability(saveDurability); + } else { + final EnvironmentConfig envConfig = env.getConfig(); + envConfig.setDurability(saveDurability); + env.setMutableConfig(envConfig); + } + } + } + + private void writeRecordsAutoCommit(final Database db) { + writeRecords(db, (Transaction) null); + } + + private CommitToken writeRecords(final Database db, + final TransactionConfig txnConfig) { + assertNotNull(txnConfig); + final Environment env = db.getEnvironment(); + final Transaction txn = env.beginTransaction(null, txnConfig); + try { + writeRecords(db, txn); + txn.commit(); + return txn.getCommitToken(); + } catch (final Throwable e) { + txn.abort(); + throw e; + } + } + + private void writeRecords(final Database db, final Transaction txn) { + + /* Cannot use a cursor to write to a txnal DB with no txn. */ + final Cursor cursor = + (!db.getConfig().getTransactional() || (txn != null)) ? + db.openCursor(txn, null) : + null; + + try { + for (int i = 0; i < N_RECORDS; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + final OperationStatus status; + if ((cursor == null) || ((i & 1) == 0)) { + status = db.put(txn, key, data); + } else { + status = cursor.put(key, data); + } + assertSame(OperationStatus.SUCCESS, status); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + /** + * Tries to read with an impossible consistency policy that can never be + * satisfied. + * + * @param txnConfig is non-null if an explicit txn should be used. + * Is null to read non-transactionally and when the db is non-txnal. + */ + private void checkConsistencyEnforced( + final Database db, + final TransactionConfig txnConfig, + final boolean consistencyEnforced) { + + final ReplicatedEnvironment env = + (ReplicatedEnvironment) db.getEnvironment(); + + final RepImpl repImpl = RepInternal.getNonNullRepImpl(env); + final RepNode repNode = repImpl.getRepNode(); + + final CommitToken impossibleCommitToken = new CommitToken( + repNode.getUUID(), + repNode.getCurrentTxnEndVLSN().getSequence() + 100); + + final ReplicaConsistencyPolicy impossiblePolicy = + new CommitPointConsistencyPolicy( + impossibleCommitToken, 1, MILLISECONDS); + + final ReplicaConsistencyPolicy savePolicy; + + if (txnConfig != null) { + savePolicy = txnConfig.getConsistencyPolicy(); + txnConfig.setConsistencyPolicy(impossiblePolicy); + } else { + savePolicy = repImpl.getDefaultConsistencyPolicy(); + repImpl.setDefaultConsistencyPolicy(impossiblePolicy); + } + + try { + if (txnConfig != null) { + readRecords(db, txnConfig); + } else { + readRecordsNonTxnal(db); + } + assertFalse("Consistency was not enforced", consistencyEnforced); + } catch (ReplicaConsistencyException e) { + assertTrue("Consistency was enforced", consistencyEnforced); + } finally { + if (txnConfig != null) { + txnConfig.setConsistencyPolicy(savePolicy); + } else { + repImpl.setDefaultConsistencyPolicy(savePolicy); + } + } + } + + private void readRecordsNonTxnal(final Database db) { + readRecords(db, (Transaction) null); + } + + private void readRecords(final Database db, + final TransactionConfig txnConfig) { + final Environment env = db.getEnvironment(); + final Transaction txn = env.beginTransaction(null, txnConfig); + readRecords(db, txn); + txn.commit(); + } + + private void readRecords(final Database db, final Transaction txn) { + for (int i = 0; i < N_RECORDS; i++) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.get(txn, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(i, IntegerBinding.entryToInt(data)); + } + int i = 0; + final Cursor cursor = db.openCursor(txn, null); + while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) { + assertEquals(i, IntegerBinding.entryToInt(key)); + assertEquals(i, IntegerBinding.entryToInt(data)); + i += 1; + } + assertEquals(N_RECORDS, i); + cursor.close(); + } + + private void syncGroupToLastCommit() { + try { + RepTestUtils.syncGroupToLastCommit(repEnvInfo, groupSize); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/com/sleepycat/je/rep/LogRewriteWarningTest.java b/test/com/sleepycat/je/rep/LogRewriteWarningTest.java new file mode 100644 index 0000000..4fc45db --- /dev/null +++ b/test/com/sleepycat/je/rep/LogRewriteWarningTest.java @@ -0,0 +1,175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.Set; +import java.util.concurrent.Callable; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Simple tests for the log rewrite notification feature. + */ +public class LogRewriteWarningTest extends RepTestBase { + + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + /** + * Verifies that if there has been no hard recovery, we don't get any + * spurious notification. + */ + @Test + public void testUsuallyQuiet() throws Exception { + checkRollbacks(false); + } + + /** + * Verifies that hard recovery generates the desired notification callback. + */ + @Test + public void testNotification() throws Exception { + checkRollbacks(true); + } + + /** + * @param hard whether to do hard recovery or not. + */ + private void checkRollbacks(boolean hard) throws Exception { + createGroup(); + ReplicatedEnvironment env = repEnvInfo[0].getEnv(); + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + + boolean left = false; + if (hard) { + leaveGroupAllButMaster(); + left = true; + + /* + * Write a transaction at the master, at a time when the other two + * nodes are not running, so they won't receive the update. + */ + Transaction txn = + env.beginTransaction(null, RepTestUtils.SYNC_SYNC_NONE_TC); + db.put(txn, key, data); + txn.commit(); + } else { + db.put(null, key, data); + } + db.close(); + + closeNodes(repEnvInfo[0]); + if (left) { + /* + * Restart the other two nodes, after shutting down the master. + * The other two nodes will elect a new master bewteen them, and + * this new master will lack the transaction we created above. + */ + restartNodes(repEnvInfo[1], repEnvInfo[2]); + } else { + /* + * Wait for a new master to be elected, because we want to make + * sure the old master doesn't become master again when we restart + * it. + */ + final RepEnvInfo node2 = repEnvInfo[1]; + final RepEnvInfo node3 = repEnvInfo[2]; + RepTestUtils.awaitCondition(new Callable() { + public Boolean call() { + return node2.isMaster() || node3.isMaster(); + } + }); + } + + /* + * Restart the former master. It will sync with the new master, which + * will result in a rollback of a committed transaction. + */ + final boolean warned[] = new boolean[1]; + repEnvInfo[0].getRepConfig().setLogFileRewriteListener + (new LogFileRewriteListener() { + public void rewriteLogFiles(Set files) { + warned[0] = true; + } + }); + restartNodes(repEnvInfo[0]); + if (hard) { + assertTrue(warned[0]); + } else { + /* + * We haven't done a hard recovery, so the callback should not have + * been invoked. + */ + assertFalse(warned[0]); + } + } + + /** + * Verifies that an exception that occurs in the user's callback results in + * failure to open the environment, and is preserved as the "cause" + * reported back to the caller. + */ + @Test + public void testException() throws Exception { + createGroup(); + ReplicatedEnvironment env = repEnvInfo[0].getEnv(); + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + + /* + * Use the same technique as other tests in this class to produce a + * hard recovery scenario. + */ + leaveGroupAllButMaster(); + Transaction txn = + env.beginTransaction(null, RepTestUtils.SYNC_SYNC_NONE_TC); + db.put(txn, key, data); + txn.commit(); + db.close(); + + closeNodes(repEnvInfo[0]); + restartNodes(repEnvInfo[1], repEnvInfo[2]); + + final RuntimeException problem = + new RuntimeException("application problem in callback"); + repEnvInfo[0].getRepConfig().setLogFileRewriteListener + (new LogFileRewriteListener() { + public void rewriteLogFiles(Set files) { + throw problem; + } + }); + try { + repEnvInfo[0].openEnv(); + } catch (EnvironmentFailureException e) { + assertSame(problem, e.getCause()); + } + } +} diff --git a/test/com/sleepycat/je/rep/MasterChangeTest.java b/test/com/sleepycat/je/rep/MasterChangeTest.java new file mode 100644 index 0000000..40114c4 --- /dev/null +++ b/test/com/sleepycat/je/rep/MasterChangeTest.java @@ -0,0 +1,621 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForMasterListener; +import com.sleepycat.je.rep.utilint.WaitForReplicaListener; +import com.sleepycat.je.util.FileHandler; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test what happens when a master node loses mastership and becomes a replica. + */ +public class MasterChangeTest extends TestBase { + + private final boolean verbose = Boolean.getBoolean("verbose"); + private static final String DB_NAME = "testDb"; + private final File envRoot; + private final Logger logger; + + public MasterChangeTest() { + envRoot = SharedTestUtils.getTestDir(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + FileHandler.STIFLE_DEFAULT_ERROR_MANAGER = true; + } + + /** + * Checks that a master node that becomes a replica functions + * properly as a replica. Preexisting transactions will be closed and any + * attempt to do more writes, or to commit or abort old transactions will + * result in a ReplicaWriteException. + */ + @Test + public void testMasterBecomesReplica() + throws Exception { + + RepEnvInfo[] repEnvInfo = null; + Database db = null; + int numNodes = 3; + Transaction oldMasterIncompleteTxn1 = null; + Transaction oldMasterIncompleteTxn3 = null; + Transaction newMasterIncompleteTxn5 = null; + try { + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, numNodes, RepTestUtils.SYNC_SYNC_NONE_DURABILITY); + + /* + * Start the master first, to ensure that it is the master, and + * then start the rest of the group. + */ + ReplicatedEnvironment firstMaster = repEnvInfo[0].openEnv(); + assert firstMaster != null; + + db = createDb(firstMaster); + db.close(); + oldMasterIncompleteTxn1 = doInsert(firstMaster, 1, false); + + for (int i = 1; i < numNodes; i++) { + repEnvInfo[i].openEnv(); + } + + /* + * After node1 and node2 join, make sure that their presence in the + * rep group db is propagated before we do a forceMaster, by doing + * a consistent read. When a node calls for an election, it must + * have its own id available to itself from the rep group db on + * disk. If it doesn't, it will send an election request with an + * illegal node id. In real life, this can never happen, because a + * node that does not have its own id won't win mastership, since + * others will be ahead of it. + */ + Transaction oldMasterTxn2 = doInsert(firstMaster, 2, true); + makeReplicasConsistent(oldMasterTxn2, repEnvInfo[1], repEnvInfo[2]); + + /* + * Mimic a network partition by forcing one replica to become the + * master. + */ + int lastIndex = numNodes - 1; + WaitForMasterListener masterWaiter = new WaitForMasterListener(); + ReplicatedEnvironment forcedMaster = repEnvInfo[lastIndex].getEnv(); + forcedMaster.setStateChangeListener(masterWaiter); + RepNode lastRepNode = repEnvInfo[lastIndex].getRepNode(); + + WaitForReplicaListener replicaWaiter = new WaitForReplicaListener(); + firstMaster.setStateChangeListener(replicaWaiter); + + /* + * Write record 3 and do one last incomplete transaction on node1, + * the current master, used to test that the transaction will later + * be aborted after mastership transfer. + */ + oldMasterIncompleteTxn3 = doInsert(firstMaster, 3, false); + + /* + * Make node3 the master + */ + lastRepNode.forceMaster(true); + masterWaiter.awaitMastership(); + + /* + * Write record 4 on the new master, node3. Insert and commit on + * the new master. + */ + doInsert(forcedMaster, 4, true); + + /* + * We expect the old master to have become a replica, and for the + * same environment handle to still be valid. Use the old handle + * to be sure it's still valid. The old transactions should be + * aborted. + */ + replicaWaiter.awaitReplica(); + assertEquals(State.REPLICA, firstMaster.getState()); + assertEquals(Transaction.State.ABORTED, + oldMasterIncompleteTxn1.getState()); + assertEquals(Transaction.State.ABORTED, + oldMasterIncompleteTxn3.getState()); + + /* + * Sync up the group. We should now see records 2 and 4 on all + * nodes, but records 1 and 3 were not committed, and should be + * aborted. + */ + logger.info("sync group"); + VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + numNodes); + + /* + * Make sure we can do a transactional cursor scan of the data on + * all nodes. This will ensure that we don't have any dangling + * locks. + */ + scanData(repEnvInfo, 2, 4); + + logger.info("run check"); + + RepTestUtils.checkNodeEquality(commitVLSN, verbose, repEnvInfo); + + /* + * Now return mastership back to node 1, and make sure it can serve + * as master. + */ + newMasterIncompleteTxn5 = doInsert(forcedMaster, 5, false); + RepTestUtils.syncGroup(repEnvInfo); + WaitForMasterListener oldMasterWaiter = + new WaitForMasterListener(); + firstMaster.setStateChangeListener(oldMasterWaiter); + + /* Make it possible to check that node3 has become a replica again*/ + WaitForReplicaListener newReplicaWaiter = + new WaitForReplicaListener(); + forcedMaster.setStateChangeListener(newReplicaWaiter); + + logger.info("returning mastership to first node"); + ReplicationMutableConfig config = new ReplicationMutableConfig(); + config.setNodePriority(2); + firstMaster.setRepMutableConfig(config); + repEnvInfo[0].getRepNode().forceMaster(true); + logger.info("wait for transition of mastership"); + oldMasterWaiter.awaitMastership(); + newReplicaWaiter.awaitReplica(); + logger.info("transition done"); + assertEquals(Transaction.State.ABORTED, + newMasterIncompleteTxn5.getState()); + + /* Insert and commit on the old, original master */ + doInsert(firstMaster, 6, true); + commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + numNodes); + scanData(repEnvInfo, 2, 4, 6); + } catch (Exception e) { + logger.info("Unexpected failure"); + e.printStackTrace(); + throw e; + } finally { + if (oldMasterIncompleteTxn1 != null) { + /* Should still be valid to call abort */ + oldMasterIncompleteTxn1.abort(); + } + + if (oldMasterIncompleteTxn3 != null) { + /* Should still be valid to call abort */ + oldMasterIncompleteTxn3.abort(); + } + + if (newMasterIncompleteTxn5 != null) { + /* Should still be valid to call abort */ + newMasterIncompleteTxn5.abort(); + } + + if (db != null) { + db.close(); + db = null; + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * Move mastership around a group by feigning network partitions. + * @throws Throwable + */ + @Test + public void testTransitions() + throws Throwable { + + RepEnvInfo[] repEnvInfo = null; + Database db = null; + int numNodes = 3; + + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, numNodes, RepTestUtils.SYNC_SYNC_NONE_DURABILITY); + + /* + * Start the master first, to ensure that it is the master, and + * then start the rest of the group. + */ + ReplicatedEnvironment firstMaster = repEnvInfo[0].openEnv(); + assert firstMaster != null; + + db = createDb(firstMaster); + db.close(); + + for (int i = 1; i < numNodes; i++) { + repEnvInfo[i].openEnv(); + } + + Set expectedValues = new HashSet(); + + int numSwitches = 5; + int firstIndex = 0; + AtomicInteger useVal = new AtomicInteger(); + /* + * Do a round of switching masters, by mimicking network partitions. + * Check that open txns are aborted if they have writes, and left open + * if they only did reads. + */ + while (numSwitches-- > 0) { + WorkGenerator generator = new WorkGenerator(expectedValues, + useVal); + int targetIndex = (firstIndex == 2) ? 0 : firstIndex + 1; + logger.fine("==> Switching from " + firstIndex + " to " + + targetIndex + " starting with record " + useVal.get()); + startTxnAndSwitch(repEnvInfo, firstIndex, targetIndex, generator); + firstIndex = targetIndex; + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + private void startTxnAndSwitch(RepEnvInfo[] repEnvInfo, + int currentIndex, + int targetIndex, + WorkGenerator generator) + throws DatabaseException, InterruptedException { + + ReplicatedEnvironment currentMaster = + repEnvInfo[currentIndex].getEnv(); + + try { + /* + * Do one last incomplete transaction on the master, used to test + * that the transaction will later be aborted after mastership + * transfer. + */ + generator.doWorkOnOldMaster(currentMaster); + + /* + * Mimic a network partition by forcing a replica to become the + * new master. Keep the old master alive; don't shoot it, because + * we want to test master->replica transitions. + */ + WaitForMasterListener masterWaiter = new WaitForMasterListener(); + WaitForReplicaListener replicaWaiter = + new WaitForReplicaListener(); + ReplicatedEnvironment targetMaster = + repEnvInfo[targetIndex].getEnv(); + targetMaster.setStateChangeListener(masterWaiter); + currentMaster.setStateChangeListener(replicaWaiter); + RepNode targetRepNode = repEnvInfo[targetIndex].getRepNode(); + ReplicationMutableConfig config = new ReplicationMutableConfig(); + config.setNodePriority(2); + targetMaster.setRepMutableConfig(config); + targetRepNode.forceMaster(true); + masterWaiter.awaitMastership(); + replicaWaiter.awaitReplica(); + + /* Revert the priority back */ + config.setNodePriority(1); + targetMaster.setRepMutableConfig(config); + + /* Insert and commit on the new master. */ + generator.doWorkOnNewMaster(targetMaster); + + /* + * Sync up the group before checking that it holds committed + * records only. + */ + logger.info("sync group"); + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + /* + * Make sure we can do a transactional cursor scan of the data on + * all nodes. This will ensure that we don't have any dangling + * locks. + */ + generator.scanData(repEnvInfo); + logger.info("run check"); + RepTestUtils.checkNodeEquality(commitVLSN, verbose, repEnvInfo); + } finally { + /* + * Check that all the non-committed transactions from the old + * master are not usable anymore. + */ + generator.assertIncompleteTxnsInvalidated(); + + /* In this test, there should be no recoveries. TODO, better + test? */ + generator.assertNoRestarts(repEnvInfo); + + /* Close off the old transactions */ + generator.abortIncompleteTxns(); + } + } + + /** + * Do a transactional cursor scan and check each node to see if the + * expected records are there. Using a transactional cursor means we'll + * check for dangling locks. If a lock is left over, it will cause a + * deadlock. + */ + private void scanData(RepEnvInfo[] repEnvInfo, int... expected) { + for (RepEnvInfo info: repEnvInfo) { + + Database db = openDb(info.getEnv()); + Cursor c = db.openCursor(null, CursorConfig.READ_COMMITTED); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int val : expected) { + assertEquals(OperationStatus.SUCCESS, + c.getNext(key, data, null)); + assertEquals(val, IntegerBinding.entryToInt(key)); + } + + /* Assert that there are no other records */ + assertEquals(OperationStatus.NOTFOUND, c.getNext(key, data, null)); + + c.close(); + db.close(); + } + } + + private Database createDb(ReplicatedEnvironment master) { + return openDb(master, true); + } + + private Database openDb(ReplicatedEnvironment master) { + return openDb(master, false); + } + + private Database openDb(ReplicatedEnvironment master, boolean allowCreate) { + + Transaction txn = master.beginTransaction(null,null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(allowCreate); + dbConfig.setTransactional(true); + Database db = master.openDatabase(txn, DB_NAME, dbConfig); + txn.commit(); + return db; + } + + /** + * @return the transaction for the unfinished txn + */ + private Transaction doInsert(ReplicatedEnvironment master, + int val, + boolean doCommit) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = null; + + Database db = openDb(master); + txn = master.beginTransaction(null, null); + IntegerBinding.intToEntry(val, key); + IntegerBinding.intToEntry(val, data); + assertEquals(OperationStatus.SUCCESS, db.put(txn, key, data)); + + if (doCommit) { + txn.commit(); + } + + db.close(); + + return txn; + } + + + /** Just do an insert, transactions are managed outside this method. */ + private void doInsert(ReplicatedEnvironment master, + final Transaction txn, + final int val) { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Database db = openDb(master); + IntegerBinding.intToEntry(val, key); + IntegerBinding.intToEntry(val, data); + assertEquals(OperationStatus.SUCCESS, db.put(txn, key, data)); + db.close(); + } + + /** + * Ensure that the specified nodes are consistent.with the specified + * transaction. + */ + private void makeReplicasConsistent(Transaction targetTxn, + RepEnvInfo... repEnvInfo) { + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setConsistencyPolicy(new CommitPointConsistencyPolicy + (targetTxn.getCommitToken(), + 1000, + TimeUnit.SECONDS)); + + for (RepEnvInfo info : repEnvInfo) { + + /* + * Open a read transaction that forces the replicas to become + * consistent. + */ + Transaction txn = info.getEnv().beginTransaction(null, txnConfig); + txn.commit(); + } + } + + /** + * Generates a different work load for each test iteration. The pattern is + * - bring up a group, nodeA is the master + * - call doWorkOnOldMaster() to execute a variety of committed and + * uncommited transactions on nodeA + * - force the mastership of the group to nodeB, leaving nodeA up + * - call doWorkOnNewMaster on nodeA + * - sync up the group + * - call scanData to make sure all nodes have the proper set of + * committed data, and don't see any uncommitted data + * - make sure that the transactions used from doWorkOnOldMaster are + * invalid, and that the application can no longer use them. + */ + public class WorkGenerator { + + protected final Set oldMasterIncompleteTxns = + new HashSet(); + protected final Set oldMasterUnusedTxns = + new HashSet(); + protected final Set committedValues; + protected final AtomicInteger useVal; + private Cursor oldMasterCursor; + private Database oldMasterDb; + + WorkGenerator(Set committedValues, AtomicInteger useVal) { + this.committedValues = committedValues; + this.useVal = useVal; + } + + public void assertNoRestarts(RepEnvInfo[] repEnvInfo) { + for (RepEnvInfo rInfo : repEnvInfo) { + ReplicatedEnvironment.State state = rInfo.getEnv().getState(); + assert (state == State.MASTER) || (state == State.REPLICA) : + "state is unexpectedly " + state; + } + } + + public void doWorkOnOldMaster(ReplicatedEnvironment master) { + + Transaction t1 = master.beginTransaction(null, null); + Transaction t2 = master.beginTransaction(null, null); + Transaction tUnused = master.beginTransaction(null, null); + + /* not committed */ + doInsert(master, t1, useVal.incrementAndGet()); + + int recordVal = useVal.incrementAndGet(); + doInsert(master, t2, recordVal); + committedValues.add(recordVal); + + /* not committed */ + doInsert(master, t1, useVal.incrementAndGet()); + + recordVal = useVal.incrementAndGet(); + doInsert(master, t2, recordVal); + committedValues.add(recordVal); + + oldMasterIncompleteTxns.add(t1); + assertEquals("txn " + tUnused.getId(), 0, + DbInternal.getTxn(tUnused).getWriteLockIds().size()); + assertEquals("txn " + tUnused.getId(), 0, + DbInternal.getTxn(tUnused).getReadLockIds().size()); + oldMasterUnusedTxns.add(tUnused); + t2.commit(); + + /* Get read locks */ + oldMasterDb = openDb(master); + oldMasterCursor = + oldMasterDb.openCursor(t1, CursorConfig.READ_COMMITTED); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + while (oldMasterCursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + logger.fine("scanning " + IntegerBinding.entryToInt(key)); + } + } + + public void doWorkOnNewMaster(ReplicatedEnvironment currentMaster) { + Transaction t1 = currentMaster.beginTransaction(null, null); + int recordVal = useVal.incrementAndGet(); + doInsert(currentMaster, t1, recordVal); + committedValues.add(recordVal); + t1.commit(); + } + + public void scanData(RepEnvInfo[] repEnvInfo) { + + for (RepEnvInfo info: repEnvInfo) { + Database db = openDb(info.getEnv()); + Cursor c = db.openCursor(null, CursorConfig.READ_COMMITTED); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Set results = new HashSet(); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + results.add(IntegerBinding.entryToInt(key)); + } + + c.close(); + db.close(); + + logger.fine("Node " + info.getEnv().getNodeName() + + " contains " + results); + assert results.containsAll(committedValues) : + "Results do not contain all committed values. " + + "results=" + results + " committed=" + committedValues; + + assert committedValues.containsAll(results) : + "CommittedValues do not contain all results. " + + "results=" + results + " committed=" + committedValues; + } + } + + public void assertIncompleteTxnsInvalidated() { + for (Transaction txn : oldMasterIncompleteTxns) { + assert !txn.isValid() : txn.toString() + " " + txn.getState(); + } + + for (Transaction txn : oldMasterUnusedTxns) { + assert txn.isValid() : txn.toString() + " " + txn.getState(); + } + } + + public void abortIncompleteTxns() { + for (Transaction txn : oldMasterIncompleteTxns) { + txn.abort(); + } + + for (Transaction txn : oldMasterUnusedTxns) { + txn.abort(); + } + + if (oldMasterCursor != null) { + oldMasterCursor.close(); + } + + if (oldMasterDb != null) { + oldMasterDb.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/MockClientNode.java b/test/com/sleepycat/je/rep/MockClientNode.java new file mode 100644 index 0000000..9fb13e6 --- /dev/null +++ b/test/com/sleepycat/je/rep/MockClientNode.java @@ -0,0 +1,261 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Timer; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.ChannelTimeoutTask; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshake; +import com.sleepycat.je.rep.stream.ReplicaFeederHandshakeConfig; +import com.sleepycat.je.rep.stream.SubscriberFeederSyncup; +import com.sleepycat.je.rep.subscription.SubscriptionConfig; +import com.sleepycat.je.rep.utilint.BinaryProtocol; +import com.sleepycat.je.rep.utilint.NamedChannel; +import com.sleepycat.je.rep.utilint.NamedChannelWithTimeout; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * A mock client node that can be created to a given node type, used in test + * only. + */ +class MockClientNode { + + public final String nodeName = "MockClientNode"; + + private final Logger logger; + private final SubscriptionConfig config; + private final NodeType nodeType; + private final RepImpl repImpl; + + /* communication channel between subscriber and feeder */ + private NamedChannelWithTimeout namedChannel; + /* task to register channel with timeout */ + private ChannelTimeoutTask channelTimeoutTask; + /* protocol used to communicate with feeder */ + private Protocol protocol; + /* received msgs */ + private List receivedMsgs; + + MockClientNode(NodeType nodeType, ReplicatedEnvironment env, Logger logger) + throws Exception { + + this.nodeType = nodeType; + this.logger = logger; + + repImpl = RepInternal.getNonNullRepImpl(env); + config = createConfig(env, true); + receivedMsgs = new ArrayList<>(); + protocol = null; + namedChannel = null; + } + + void handshakeWithFeeder() throws Exception { + + openChannel(); + ReplicaFeederHandshake handshake = + new ReplicaFeederHandshake( + new MockClientNodeFeederHandshakeConfig()); + protocol = handshake.execute(); + + } + + VLSN syncupWithFeeder(VLSN reqVLSN) { + final SubscriberFeederSyncup syncup = + new SubscriberFeederSyncup(namedChannel, protocol, + config.getFeederFilter(), + repImpl, + EntryRequestType.DEFAULT, + logger); + return syncup.execute(reqVLSN); + } + + void consumeMsgLoop(long expected) + throws InternalException, IOException { + + long counter = 0; + while (counter < expected) { + final BinaryProtocol.Message message = protocol.read(namedChannel); + if ((message == null)) { + return; + } + + final BinaryProtocol.MessageOp messageOp = message.getOp(); + /* ignore heartbeat in mock client */ + if (messageOp != Protocol.HEARTBEAT) { + if (messageOp == Protocol.SHUTDOWN_REQUEST) { + throw new InternalException("Receive shutdown msg from " + + "feeder " + message); + } else { + /* a regular data entry message */ + receivedMsgs.add(message); + counter++; + } + } + } + } + + void shutdown() { + RepUtils.shutdownChannel(namedChannel); + if (channelTimeoutTask != null) { + channelTimeoutTask.cancel(); + } + } + + List getReceivedMsgs() { + return receivedMsgs; + } + + /* Open a data channel to feeder */ + private NamedChannel openChannel() throws Exception { + + if (repImpl == null) { + throw new IllegalStateException("Replication env is unavailable."); + } + + DataChannelFactory.ConnectOptions connectOpts = + new DataChannelFactory + .ConnectOptions() + .setTcpNoDelay(config.TCP_NO_DELAY) + .setReceiveBufferSize(config.getReceiveBufferSize()) + .setOpenTimeout((int) config + .getStreamOpenTimeout(TimeUnit.MILLISECONDS)) + .setBlocking(config.BLOCKING_MODE_CHANNEL); + + final DataChannel channel = + RepUtils.openBlockingChannel(config.getInetSocketAddress(), + repImpl.getChannelFactory(), + connectOpts); + + ServiceDispatcher.doServiceHandshake(channel, + FeederManager.FEEDER_SERVICE); + final int timeoutMs = repImpl.getConfigManager(). + getDuration(RepParams.PRE_HEARTBEAT_TIMEOUT); + + channelTimeoutTask = new ChannelTimeoutTask(new Timer(true)); + namedChannel = + new NamedChannelWithTimeout(repImpl, logger, channelTimeoutTask, + channel, timeoutMs); + + return namedChannel; + } + + /* Create a subscription configuration */ + private SubscriptionConfig createConfig(ReplicatedEnvironment masterEnv, + boolean useGroupUUID) + throws Exception { + + final String home = "./subhome/"; + final String subNodeName = "test-mockclient-node"; + final String nodeHostPortPair = "localhost:6001"; + + String feederNode; + int feederPort; + String groupName; + + final File envRoot = SharedTestUtils.getTestDir(); + final File subHome = new File(envRoot.getAbsolutePath() + + File.separator + home); + if (!subHome.exists()) { + if (!subHome.mkdir()) { + fail("unable to create test dir, fail the test"); + } + } + + ReplicationGroup group = masterEnv.getGroup(); + ReplicationNode member = group.getMember(masterEnv.getNodeName()); + feederNode = member.getHostName(); + feederPort = member.getPort(); + groupName = group.getName(); + + UUID uuid; + if (useGroupUUID) { + uuid = group.getRepGroupImpl().getUUID(); + } else { + uuid = null; + } + + final String feederHostPortPair = feederNode + ":" + feederPort; + return new SubscriptionConfig(subNodeName, subHome.getAbsolutePath(), + nodeHostPortPair, feederHostPortPair, + groupName, uuid, nodeType); + } + + /*-----------------------------------*/ + /*- Inner Classes -*/ + /*-----------------------------------*/ + private class MockClientNodeFeederHandshakeConfig + implements ReplicaFeederHandshakeConfig { + + MockClientNodeFeederHandshakeConfig() { + } + + public RepImpl getRepImpl() { + return repImpl; + } + + public NameIdPair getNameIdPair() { + return new NameIdPair(nodeName); + } + + public RepUtils.Clock getClock() { + return new RepUtils.Clock(RepImpl.getClockSkewMs()); + } + + public NodeType getNodeType() { + return nodeType; + } + + public NamedChannel getNamedChannel() { + return namedChannel; + } + + /* create a group impl from group name and group uuid */ + public RepGroupImpl getGroup() { + + RepGroupImpl repGroupImpl = new RepGroupImpl( + config.getGroupName(), + true, /* unknown group uuid */ + repImpl.getCurrentJEVersion()); + + /* use uuid if specified, otherwise unknown uuid will be used */ + if (config.getGroupUUID() != null) { + repGroupImpl.setUUID(config.getGroupUUID()); + } + return repGroupImpl; + } + } +} diff --git a/test/com/sleepycat/je/rep/MultiProcessOpenEnvTest.java b/test/com/sleepycat/je/rep/MultiProcessOpenEnvTest.java new file mode 100644 index 0000000..321d71a --- /dev/null +++ b/test/com/sleepycat/je/rep/MultiProcessOpenEnvTest.java @@ -0,0 +1,886 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.util.ArrayList; +import java.util.Properties; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.junit.JUnitProcessThread; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Every replication node manages a single replicated JE environment + * directory. The environment follows the usual regulations governing a JE + * environment; namely, only a single read/write process can access the + * environment at a single point in time. + * + * In this unit test, exercise the rules by opening a single replicated + * environment from two processes. One process will open a + * ReplicatedEnvironment handle. The other will open a standalone + * environment. The expected results are: + * + * Env handle\Open Open for write Open for read + *===================== =============== + * ReplicatedEnvironment OK. IllegalArgEx + * + * Environment IllegalArgEx OK. Verify that we are + * seeing a snapshot in the presence + * of ongoing changes (in another + * process) at a master and Replica + * and that a reopen of the handle + * updates the snapshot. + */ +public class MultiProcessOpenEnvTest extends TestBase { + + private final File envRoot; + private final File masterEnvHome; + private final File replicaEnvHome; + private final File lockFile; + private static final int MAX_RETRIES = 20; + private static final String sleepInterval = "5000"; + private static final String DB_NAME = "testDB"; + private static final String LOCK_FILE_NAME = "filelocks.txt"; + + /* Name of the process which opens a ReplicatedEnvironment. */ + private static final String repProcessName = "repProcess"; + /* Name of the process which opens a standalone Environment. */ + private static final String envProcessName = "envProcess"; + + public MultiProcessOpenEnvTest() + throws Exception { + + envRoot = SharedTestUtils.getTestDir(); + /* Make rep0 as the environment home. */ + File[] envHomes = RepTestUtils.makeRepEnvDirs(envRoot, 2); + masterEnvHome = envHomes[0]; + replicaEnvHome = envHomes[1]; + lockFile = new File(envRoot, LOCK_FILE_NAME); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a replicated Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a r/w standalone Environment + * on the same envHome. + * 3. p2 should get an EnvironmentLockedException and exit with value 4. + * 4. p1 should exit with value 0. + */ + @Test + public void testEnvWriteOnRepEnv() { + JUnitProcessThread repThread = getRepProcess(true); + JUnitProcessThread envThread = getEnvProcess(false, false); + + startThreads(repThread, envThread, 2000); + + checkExitVals(repThread, 0, envThread, 4); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a replicated Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a read only standalone + * Environment on the same envHome. + * 3. Both p1 and p2 should exit normally with value 0. + */ + @Test + public void testEnvReadOnRepEnv() { + JUnitProcessThread repThread = getRepProcess(true); + JUnitProcessThread envThread = getEnvProcess(true, false); + + startThreads(repThread, envThread, 2000); + + checkExitVals(repThread, 0, envThread, 0); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a r/w standalone Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a replicated Environment on + * the same envHome. + * 3. p2 should get an EnvironmentLockedException and exit with value 1. + * 4. p1 should exit with value 0. + */ + @Test + public void testRepEnvOnEnvWrite() { + JUnitProcessThread repThread = getRepProcess(false); + JUnitProcessThread envThread = getEnvProcess(false, true); + + startThreads(envThread, repThread, 500); + + checkExitVals(repThread, 1, envThread, 0); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a read only standalone Environment + * on envHome, then sleeps. + * 2. Start a new process, p2, which opens a replicated Environment on + * the same envHome. + * 3. p2 should get an UnsupportedOperationException and exit with + * value 2. + * 4. p1 should exit with value 0. + */ + @Test + public void testRepEnvOnEnvRead() { + testRepEnvOnEnvWrite(); + + JUnitProcessThread repThread = getRepProcess(false); + JUnitProcessThread envThread = getEnvProcess(true, true); + + startThreads(envThread, repThread, 500); + + checkExitVals(repThread, 2, envThread, 0); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a replicated Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a replicated Environment on + * the same envHome. + * 3. p2 should get an EnvironmentLockedException and exit with value 1. + * 4. p1 should exit with value 0. + */ + @Test + public void testRepEnvOnRepEnv() { + JUnitProcessThread repThread1 = getRepProcess(true); + JUnitProcessThread repThread2 = getRepProcess(false); + + startThreads(repThread1, repThread2, 2000); + + checkExitVals(repThread1, 0, repThread2, 1); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a r/w standalone Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a r/w standalone Environment + * on the same envHome. + * 3. p2 should get an EnvironmentLockedException and exit with value 4. + * 4. p1 should exit with value 0. + */ + @Test + public void testEnvWriteOnEnvWrite() { + JUnitProcessThread envThread1 = getEnvProcess(false, true); + JUnitProcessThread envThread2 = getEnvProcess(false, false); + + startThreads(envThread1, envThread2, 500); + + checkExitVals(envThread1, 0, envThread2, 4); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a r/w standalone Environment on + * envHome, then sleeps. + * 2. Start a new process, p2, which opens a read only standalone + * Environment on the same envHome. + * 3. Both p1 and p2 should exit with value 0. + */ + @Test + public void testEnvWriteOnEnvRead() { + JUnitProcessThread envThread1 = getEnvProcess(false, true); + JUnitProcessThread envThread2 = getEnvProcess(true, false); + + startThreads(envThread1, envThread2, 2000); + + checkExitVals(envThread1, 0, envThread2, 0); + } + + /* + * Test the following case: + * 1. Start a process, p1, which opens a read only standalone Environment + * on envHome, then sleeps. + * 2. Start a new process, p2, which opens a read only standalone + * Environment on the same envHome. + * 3. Both p1 and p2 should exit with value 0. + */ + @Test + public void testEnvReadOnEnvRead() + throws Throwable { + + if (readPreserveRecordVersionProperty()) { + return; + } + + testEnvWriteOnEnvRead(); + + /* Write some data. */ + JUnitProcessThread writeThread = getEnvProcess(false, true, "-1"); + writeThread.start(); + writeThread.finishTest(); + assertTrue(writeThread.getExitVal() == 0); + + JUnitProcessThread envThread1 = getEnvProcess(true, true); + JUnitProcessThread envThread2 = getEnvProcess(true, false); + + startThreads(envThread1, envThread2, 500); + + checkExitVals(envThread1, 0, envThread2, 0); + } + + private boolean readPreserveRecordVersionProperty() + throws Exception { + + FileInputStream fis = + new FileInputStream(new File(envRoot, "je.properties")); + Properties jeProperties = new Properties(); + jeProperties.load(fis); + + return new Boolean(jeProperties.getProperty + (RepParams.PRESERVE_RECORD_VERSION.getName())); + } + + /* + * Test the following case: + * 1. Start process p1: + * 1. It will get FileLock A and FileLock C at the begining to make + * sure it gets theset two locks before p2. + * 2. Then it will start a new replication group with two replicas, + * and do some inserts. + * 3. Sync all the nodes and do a node equality check. + * 4. Release lock A. + * 5. Try to get FileLock B. + * 2. Start process p2 right after staring p1: + * 1. It will get FileLock B at the begining to make sure p1 gets lock + * B before p1. + * 2. Try to get lock A, if it gets A, which means there are some data + * on the replicas, then open two read only standalone Environments + * on the two replicas. + * 3. Release lock A and lock B. + * 4. Read the records and do a compare between replicas, also check + * to see if the values are expected. + * 5. Try to get FileLock C. + * 3. When p1 gets FileLock B, it continues: + * 1. When p1 gets FileLock B, it knows p2 has read a snapshot, so it + * can do further updates. + * 2. Do updates and make sure the replicas have same data. + * 3. Release FileLock C. + * 4. Exit the process. + * 4. When p2 gets FileLock C, it continues: + * 1. When p2 gets FileLock C, it knows p1 has finished updates, then + * it does read on replicas. + * 2. Do the compare + * 3. Release FileLock C and exit. + * 5. The two processes should exit successfully with value 0. + */ + @Test + public void testEnvReadSnapshotOnRepEnv() { + /* Start the process which starts a writing replication group. */ + String[] repCommand = new String[5]; + repCommand[0] = + "com.sleepycat.je.rep.MultiProcessOpenEnvTest$RepGroupWriteProcess"; + repCommand[1] = envRoot.getAbsolutePath(); + repCommand[2] = DB_NAME; + /* Make process sleep for a while to make sure p2 get lock BBB. */ + repCommand[3] = "1000"; + repCommand[4] = lockFile.getAbsolutePath(); + JUnitProcessThread p1 = + new JUnitProcessThread(repProcessName, repCommand); + + /* Start the process which starts reading Environments on replicas. */ + String[] envCommand = new String[5]; + envCommand[0] = "com.sleepycat.je.rep.MultiProcessOpenEnvTest$" + + "EnvReadRepGroupProcess"; + envCommand[1] = masterEnvHome.getAbsolutePath(); + envCommand[2] = replicaEnvHome.getAbsolutePath(); + envCommand[3] = DB_NAME; + envCommand[4] = lockFile.getAbsolutePath(); + JUnitProcessThread p2 = + new JUnitProcessThread(envProcessName, envCommand); + + startThreads(p1, p2, 300); + + checkExitVals(p1, 0, p2, 0); + } + + /* Start these two processes. */ + private void startThreads(JUnitProcessThread thread1, + JUnitProcessThread thread2, + long sleepTime) { + thread1.start(); + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + e.printStackTrace(); + } + thread2.start(); + } + + /* Create a process which opens a replicated Environment. */ + private JUnitProcessThread getRepProcess(boolean sleep) { + String[] repCommand = new String[3]; + repCommand[0] = + "com.sleepycat.je.rep.MultiProcessOpenEnvTest$RepEnvProcess"; + repCommand[1] = masterEnvHome.getAbsolutePath(); + repCommand[2] = (sleep ? sleepInterval : "0"); + + return new JUnitProcessThread(repProcessName, repCommand); + } + + /* Create a process which opens a standalone Environment. */ + private JUnitProcessThread getEnvProcess(boolean readOnly, + boolean sleep) { + return getEnvProcess(readOnly, sleep, sleepInterval); + } + + private JUnitProcessThread getEnvProcess(boolean readOnly, + boolean sleep, + String sleepTime) { + String[] envCommand = new String[4]; + envCommand[0] = + "com.sleepycat.je.rep.MultiProcessOpenEnvTest$EnvProcess"; + envCommand[1] = masterEnvHome.getAbsolutePath(); + envCommand[2] = (readOnly ? "true" : "false"); + envCommand[3] = (sleep ? sleepTime : "0"); + + return new JUnitProcessThread(envProcessName, envCommand); + } + + /* Check the exit value of processes. */ + private void checkExitVals(JUnitProcessThread thread1, + int exitVal1, + JUnitProcessThread thread2, + int exitVal2) { + /* End these threads. */ + try { + thread1.finishTest(); + thread2.finishTest(); + } catch (Throwable t) { + System.err.println(t.toString()); + } + + /* Check whether the processes exit with expected values. */ + assertEquals(thread1.getExitVal(), exitVal1); + assertEquals(thread2.getExitVal(), exitVal2); + } + + /** + * Open a ReplicatedEnvironment depends on the configuration. + */ + static class RepEnvProcess { + private final File envHome; + + /* + * Sleep interval for waiting a ReplicatedEnvironment to open on this + * ReplicatedEnvironment. + */ + private final long sleepTime; + private RepEnvInfo repEnvInfo; + + public RepEnvProcess(File envHome, long sleepTime) { + this.envHome = envHome; + this.sleepTime = sleepTime; + } + + public void openEnv() { + try { + Durability durability = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL); + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(durability); + ReplicationConfig repConfig = + RepTestUtils.createRepConfig(1); + repEnvInfo = RepTestUtils.setupEnvInfo + (envHome, envConfig, repConfig, null); + repEnvInfo.openEnv(); + Thread.sleep(sleepTime); + } catch (EnvironmentLockedException e) { + + /* + * Exit the process with value 1, don't print out the exception + * since it's expected. + */ + System.exit(1); + } catch (UnsupportedOperationException e) { + + /* + * Exit the process with value 2, don't print out the exception + * since it's expected. + * + * Note: this exception thrown because we can't start a + * replicated Environment on an existed standalone Environment. + */ + System.exit(2); + } catch (Exception e) { + /* Dump unexpected exceptions, exit processs with value 3. */ + e.printStackTrace(); + System.exit(3); + } finally { + if (repEnvInfo.getEnv() != null) { + repEnvInfo.closeEnv(); + } + } + } + + public static void main(String args[]) { + RepEnvProcess thread = + new RepEnvProcess(new File(args[0]), new Long(args[1])); + thread.openEnv(); + } + } + + /** + * Open a standalone Environment, specifying the configuration. + */ + static class EnvProcess { + private final File envHome; + private final boolean readOnly; + + /* + * Sleep interval for waiting a ReplicatedEnvironment to open on this + * Environment. + */ + private final long sleepTime; + private Environment env; + + public EnvProcess(File envHome, boolean readOnly, long sleepTime) { + this.envHome = envHome; + this.readOnly = readOnly; + this.sleepTime = sleepTime; + } + + public void openEnv() { + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(readOnly); + envConfig.setAllowCreate(!readOnly); + + env = new Environment(envHome, envConfig); + if (sleepTime < 0) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(!readOnly); + Database db = env.openDatabase(null, "testDB", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 50; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + db.close(); + } else { + Thread.sleep(sleepTime); + } + } catch (EnvironmentLockedException e) { + + /* + * Exit the process with value 4, the exception is expected in + * this casse, don't dump it out. + */ + System.exit(4); + } catch (Exception e) { + /* Dump unexpected exception, exit process with value 5. */ + e.printStackTrace(); + System.exit(5); + } finally { + if (env != null) { + env.close(); + } + } + } + + public static void main(String args[]) { + EnvProcess process = new EnvProcess(new File(args[0]), + new Boolean(args[1]), + new Long(args[2])); + process.openEnv(); + } + } + + /* Close a RandomAccessFile. */ + private static void closeLockFile(RandomAccessFile lockFile) { + if (lockFile != null) { + try { + lockFile.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + /* Get a FileLock with retry. */ + private static FileLock getLockWithReTry(FileChannel channel, + long position, + long size) + throws Exception { + + int retries = 0; + FileLock lock = channel.tryLock(position, size, false); + + while (lock == null && retries <= MAX_RETRIES) { + Thread.sleep(1000); + lock = channel.tryLock(position, size, false); + retries++; + } + + if (lock == null) { + System.err.println("Can't get a FileLock during " + + MAX_RETRIES + " seconds."); + System.exit(6); + } + + return lock; + } + + /** + * Open a replication group and do some work. + */ + static class RepGroupWriteProcess { + private final File envRoot; + private RandomAccessFile lockFile; + private final long sleepTime; + private final String dbName; + private RepEnvInfo[] repEnvInfo; + + public RepGroupWriteProcess(File envRoot, + String dbName, + long sleepTime, + RandomAccessFile lockFile) { + this.envRoot = envRoot; + this.dbName = dbName; + this.sleepTime = sleepTime; + this.lockFile = lockFile; + } + + public void run() { + try { + /* Get FileLocks. */ + FileChannel channel = lockFile.getChannel(); + FileLock lockA = channel.lock(1, 1, false); + FileLock lockC = channel.lock(3, 1, false); + + ReplicatedEnvironment master = getMaster(); + doWork(master, dbName, 1); + /* Release lock A so that read process can do reads. */ + lockA.release(); + + /* Make sure read process get lock B before this process. */ + Thread.sleep(sleepTime); + + /* Get lock B means read process finish reading, do updates. */ + FileLock lockB = getLockWithReTry(channel, 2, 1); + doWork(master, dbName, 101); + + /* Release lock B and lock C. */ + lockB.release(); + lockC.release(); + } catch (Exception e) { + /* Dump exceptions and exit with value 6. */ + e.printStackTrace(); + System.exit(7); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + closeLockFile(lockFile); + } + } + + /* Start a replication group with 2 nodes and returns the master. */ + private ReplicatedEnvironment getMaster() + throws Exception { + + Durability durability = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL); + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig(durability); + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 2, envConfig); + + return RepTestUtils.joinGroup(repEnvInfo); + } + + /* Insert 100 records begins with the beginKey. */ + private void doWork(Environment master, String dbName, int beginKey) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* Insert/Update the records of the database. */ + Database db = master.openDatabase(null, dbName, dbConfig); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < 100; i++) { + IntegerBinding.intToEntry(beginKey + i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + db.close(); + + /* + * Do a sync at the end of the stage to make sure master and + * replica have the same data set. + */ + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); + } + + public static void main(String args[]) { + try { + RandomAccessFile lockFile = + new RandomAccessFile(args[3], "rw"); + RepGroupWriteProcess process = + new RepGroupWriteProcess(new File(args[0]), args[1], + new Long(args[2]), lockFile); + process.run(); + } catch (IOException e) { + e.printStackTrace(); + System.exit(8); + } + } + } + + /** + * Open a ReplicatedEnvironment depends on the configuration. + */ + static class EnvReadRepGroupProcess { + private final File masterEnvHome; + private final File replicaEnvHome; + private RandomAccessFile lockFile; + private final String dbName; + private final ArrayList prevMasterRecords = + new ArrayList(); + private final ArrayList prevReplicaRecords = + new ArrayList(); + private final ArrayList newMasterRecords = + new ArrayList(); + private final ArrayList newReplicaRecords = + new ArrayList(); + private Environment master; + private Environment replica; + + public EnvReadRepGroupProcess(File masterEnvHome, + File replicaEnvHome, + String dbName, + RandomAccessFile lockFile) { + this.masterEnvHome = masterEnvHome; + this.replicaEnvHome = replicaEnvHome; + this.dbName = dbName; + this.lockFile = lockFile; + } + + public void run() { + try { + FileChannel channel = lockFile.getChannel(); + /* Get lock B so that write process waits. */ + FileLock lockB = channel.lock(2, 1, false); + + /* Get lock A means write process finish the first phase. */ + FileLock lockA = getLockWithReTry(channel, 1, 1); + openEnvironments(); + + /* Release lock A and B so that write process can continue. */ + lockB.release(); + lockA.release(); + + /* Read records and check the node equality. */ + readRecords + (master, prevMasterRecords, replica, prevReplicaRecords); + doEqualityCompare(prevMasterRecords, prevReplicaRecords, 100); + closeEnvironments(); + + /* Get lock C means second phase of write process finishes. */ + FileLock lockC = getLockWithReTry(channel, 3, 1); + /* Reopen and read records, then do the compare. */ + openEnvironments(); + readRecords + (master, newMasterRecords, replica, newReplicaRecords); + doEqualityCompare(newMasterRecords, newReplicaRecords, 200); + + /* Do compare between two snapshots. */ + doDiffCompare(prevMasterRecords, newMasterRecords); + doDiffCompare(prevReplicaRecords, newReplicaRecords); + lockC.release(); + } catch (Exception e) { + /* Dump exceptions and exit process with value 7.*/ + e.printStackTrace(); + System.exit(9); + } finally { + closeEnvironments(); + closeLockFile(lockFile); + } + } + + /* Open read only standalone Environment on replicated nodes. */ + private void openEnvironments() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setReadOnly(true); + envConfig.setAllowCreate(false); + + master = new Environment(masterEnvHome, envConfig); + replica = new Environment(replicaEnvHome, envConfig); + } + + /* Close the Environments after finishing reading operations. */ + private void closeEnvironments() { + if (master != null) { + master.close(); + } + if (replica != null) { + replica.close(); + } + } + + /* Read records for these two Environments. */ + private void readRecords(Environment masterEnv, + ArrayList masterData, + Environment replicaEnv, + ArrayList replicaData) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + + doRead(masterEnv, dbConfig, masterData); + doRead(replicaEnv, dbConfig, replicaData); + } + + /* Do the real reading work. */ + private void doRead(Environment env, + DatabaseConfig dbConfig, + ArrayList list) + throws Exception { + + Database db = env.openDatabase(null, dbName, dbConfig); + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (OperationStatus.SUCCESS == + cursor.getNext(key, data, null)) { + list.add(new TestObject(IntegerBinding.entryToInt(key), + StringBinding.entryToString(data))); + } + cursor.close(); + db.close(); + } + + /* Do compare between master and replica, expected to be the same. */ + private void doEqualityCompare(ArrayList masterData, + ArrayList replicaData, + int expectedSize) { + assertEquals(masterData.size(), replicaData.size()); + for (int i = 0; i < masterData.size(); i++) { + assertEquals(masterData.get(i), replicaData.get(i)); + } + assertEquals(masterData.size(), expectedSize); + for (int i = 0; i < expectedSize; i++) { + TestObject object = new TestObject(i + 1, "herococo"); + assertEquals(masterData.get(i), object); + assertEquals(replicaData.get(i), object); + } + } + + /* Do compare between two snapshots, should be different. */ + private void doDiffCompare(ArrayList prevData, + ArrayList newData) { + assertEquals(newData.size(), prevData.size() * 2); + for (int i = 0; i < prevData.size(); i++) { + assertEquals(prevData.get(i), newData.get(i)); + } + for (int i = 0; i < prevData.size(); i++) { + assertEquals(newData.get(i + 100).getKey() - 100, + prevData.get(i).getKey()); + } + } + + static class TestObject { + private final int key; + private final String name; + + public TestObject(int key, String name) { + this.key = key; + this.name = name; + } + + public int getKey() { + return key; + } + + public String getName() { + return name; + } + + public boolean equals(Object obj) { + if (obj == null || !(obj instanceof TestObject)) { + return false; + } + + TestObject tObj = (TestObject) obj; + if (tObj.getKey() == key && tObj.getName().equals(name)) { + return true; + } + + return false; + } + } + + public static void main(String args[]) { + try { + RandomAccessFile lockFile = new RandomAccessFile(args[3], "rw"); + EnvReadRepGroupProcess process = + new EnvReadRepGroupProcess(new File(args[0]), + new File(args[1]), + args[2], + lockFile); + process.run(); + } catch (IOException e) { + e.printStackTrace(); + System.exit(8); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/NodePriorityTest.java b/test/com/sleepycat/je/rep/NodePriorityTest.java new file mode 100644 index 0000000..5e167d0 --- /dev/null +++ b/test/com/sleepycat/je/rep/NodePriorityTest.java @@ -0,0 +1,214 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class NodePriorityTest extends RepTestBase { + + /* + * Check that a NZ prio node is elected a master even when there are other + * zero prio nodes with more current log files. + */ + @Test + public void testNZObsoletLogfiles() throws InterruptedException { + + createGroup(); + closeNodes(repEnvInfo); + + /* Only the last node has non-zero priority */ + for (int i = 0; i <= repEnvInfo.length - 2; i++) { + repEnvInfo[i].getRepConfig().setNodePriority(0); + } + + /* Last node should always be elected the master */ + RepEnvInfo minfo = restartNodes(repEnvInfo); + assertEquals(1, minfo.getRepConfig().getNodePriority()); + assertEquals(repEnvInfo[repEnvInfo.length-1].getRepNode().getNodeId(), + minfo.getRepNode().getNodeId()); + + /* shutdown second last node. */ + final int secondLast = repEnvInfo.length - 2; + final RepEnvInfo secondLastEnv = repEnvInfo[secondLast]; + secondLastEnv.closeEnv(); + /* It will come up with a NZ prio. */ + secondLastEnv.getRepConfig().setNodePriority(1); + + /* Make changes, obsoleting the logs on secondLast since it's down */ + ReplicatedEnvironment menv = minfo.getEnv(); + Transaction txn = menv.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + /* + * Make updates to ensure that some zero prio nodes have more up to + * date files relative to secondLastEnv. + */ + Database db1 = menv.openDatabase(txn, "db1", dbConfig); + txn.commit(); /* commit with simple majority. */ + db1.close(); + + /* Now shutdown the only node with NZ priority, the current master */ + minfo.closeEnv(); + + /* Bring up the NZ prio node with the obsolete logs. */ + secondLastEnv.openEnv(); + assertTrue(secondLastEnv.getEnv().getState().isMaster()); + + for (int i = 0; i < secondLast; i++) { + /* Explicitly close invalid (RollbackException) environments. */ + repEnvInfo[i].closeEnv(); + } + } + + /* Simple API test. */ + @Test + public void testPriorityBasic() { + ReplicationConfig repConfig = new ReplicationConfig(); + int prio = repConfig.getNodePriority(); + assertEquals(1, prio); /* Verify default priority. */ + repConfig.setNodePriority(++prio); + assertEquals(prio, repConfig.getNodePriority()); + + try { + repConfig.setNodePriority(-1); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + + /* + * Test failure/restart of a solitary NZ prio node in a group. + */ + @Test + public void testNZFailoverAndRestore() throws InterruptedException { + + createGroup(); + closeNodes(repEnvInfo); + + final int last = repEnvInfo.length - 1; + + /* Only the last node has non-zero priority */ + for (int i = 0; i < last; i++) { + repEnvInfo[i].getRepConfig().setNodePriority(0); + } + + /* Last node should always be elected the master */ + RepEnvInfo minfo = restartNodes(repEnvInfo); + assertTrue(minfo.getRepConfig().getNodePriority() > 0); + + ReplicatedEnvironment listenEnv = repEnvInfo[0].getEnv(); + ElectionListener listener = new ElectionListener(); + listenEnv.setStateChangeListener(listener); + + /* Now shutdown the only node with NZ priority. */ + minfo.closeEnv(); + + /* We should not be able to conclude an election. */ + boolean ok = listener.electionLatch.await(10, TimeUnit.SECONDS); + assertFalse(ok); + + /* + * Now bring the NZ prio node back up again, so it's selected the + * master, since it's the only choice. + */ + minfo.openEnv(); + ok = listener.electionLatch.await(10, TimeUnit.SECONDS); + assertTrue(ok); + } + + + /** + * test that elections only pick nodes with NZ priority. Kill masters + * checking each new master that's elected to make sure it has NZ + * priority. + */ + @Test + public void testOnlyNZMasters() throws InterruptedException { + + createGroup(); + closeNodes(repEnvInfo); + + final int majority = (repEnvInfo.length/2) - 1; + + /* Set less than a simple majority of nodes to have prio zero. */ + for (int i = 0; i < majority; i++) { + repEnvInfo[i].getRepConfig().setNodePriority(0); + } + + /* Last node should always be elected the master */ + RepEnvInfo minfo = restartNodes(repEnvInfo); + assertTrue(minfo.getRepConfig().getNodePriority() > 0); + + for (int i=0; i < majority; i++) { + minfo = findMaster(repEnvInfo); + assertNotNull(minfo); + assertTrue(minfo.getRepConfig().getNodePriority() > 0); + + // wait for new master to emerge + ReplicatedEnvironment listenEnv = repEnvInfo[0].getEnv(); + ElectionListener listener = new ElectionListener(); + listenEnv.setStateChangeListener(listener); + minfo.closeEnv(); + /* Verify that election has been concluded. */ + boolean ok = listener.electionLatch.await(10, TimeUnit.SECONDS); + assertTrue(ok); + } + } + + /* Listen for an election to be concluded. */ + class ElectionListener implements StateChangeListener { + + final CountDownLatch electionLatch; + State prevState; + State newState; + + ElectionListener() { + electionLatch = new CountDownLatch(1); + } + + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + if (prevState == null) { + prevState = stateChangeEvent.getState(); + /* Ignore the first immediate synchronous callback. */ + return; + } + + newState = stateChangeEvent.getState(); + + if (newState.isMaster() || newState.isReplica()) { + electionLatch.countDown(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/ParamTest.java b/test/com/sleepycat/je/rep/ParamTest.java new file mode 100644 index 0000000..aeb4094 --- /dev/null +++ b/test/com/sleepycat/je/rep/ParamTest.java @@ -0,0 +1,127 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Properties; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test setting and retrieving of replication configurations. Should test + * - mutable configurations + * - the static fields in ReplicatorParams most be loaded properly. + * + * TBW - test is incomplete. + + * Test setting and retrieving of replication params. Make sure we can + * parse the special format of the je.rep.node.* param, and that we + * give params specified in files precedence over params specified + * programmatically. + */ +public class ParamTest extends TestBase { + + private final File envRoot; + + public ParamTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * THIS TESTCASE should go first in this file, before a replicator is + * instantiated in this JVM, to ensure that an application can instantiate + * a ReplicationConfig before instantiating a replicated environment. + * ReplicationConfig needs a static from ReplicatorParams, and we have to + * make sure it is loaded properly. + */ + @Test + public void testConfigSetting() { + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setConfigParam("je.rep.groupName", "TestGroup"); + } + + /** + * Make sure that this valid property can be set through both a file and + * through a configuration instance. + */ + private void verifySuccess(String paramName, String value) { + try { + Properties props = new Properties(); + props.put(paramName, value); + DbConfigManager.validateProperties(props, false, null); + } catch (Exception E) { + E.printStackTrace(); + fail("Unexpected exception: " + E); + } + + try { + ReplicationConfig goodConfig = new ReplicationConfig(); + goodConfig.setConfigParam(paramName, value); + } catch (Exception E) { + E.printStackTrace(); + fail("Unexpected exception: " + E); + } + + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setConfigParam(paramName, value); + fail(paramName + " should have been rejected"); + } catch (IllegalArgumentException expected) { + } + } + + /** + * Make sure that this invalid property will be caught when set through + * either a file, or a configuration instance. + */ + void verifyFailure(String paramName, String badValue) { + try { + Properties props = new Properties(); + props.put(paramName, badValue); + DbConfigManager.validateProperties(props, false, null); + fail("Bad value: " + badValue+ " not detected."); + } catch (IllegalArgumentException expected) { + } + + try { + ReplicationConfig badConfig = new ReplicationConfig(); + badConfig.setConfigParam(paramName, badValue); + fail("Bad value: " + badValue+ " not detected."); + } catch (IllegalArgumentException expected) { + } + } + + @Test + public void testGroupName() { + verifySuccess(RepParams.GROUP_NAME.getName(), "SleepycatGroup1"); + verifyFailure(RepParams.GROUP_NAME.getName(), + "Sleepycat Group 1"); + } + + @Test + public void testNodeType() { + verifySuccess(RepParams.NODE_TYPE.getName(), "ELECTABLE"); + verifySuccess(RepParams.NODE_TYPE.getName(), "MONITOR"); + verifySuccess(RepParams.NODE_TYPE.getName(), "SECONDARY"); + verifyFailure(RepParams.NODE_TYPE.getName(), "NOT-A-NODE-TYPE"); + } +} diff --git a/test/com/sleepycat/je/rep/PerDbReplicationTest.java b/test/com/sleepycat/je/rep/PerDbReplicationTest.java new file mode 100644 index 0000000..4939aa5 --- /dev/null +++ b/test/com/sleepycat/je/rep/PerDbReplicationTest.java @@ -0,0 +1,161 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Make sure the unadvertised per-db replication config setting works. + */ +public class PerDbReplicationTest extends TestBase { + + private static final String TEST_DB = "testdb"; + private final File envRoot; + private Environment env; + private Database db; + + public PerDbReplicationTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * A database in a replicated environment should replicate by default. + */ + @Test + public void testDefault() { +// Replicator[] replicators = RepTestUtils.startGroup(envRoot, +// 1, +// false /* verbose */); +// try { +// env = replicators[0].getEnvironment(); +// DatabaseConfig config = new DatabaseConfig(); +// config.setAllowCreate(true); +// config.setTransactional(true); +// +// validate(config, true /* replicated */); +// } finally { +// if (db != null) { +// db.close(); +// } +// +// for (Replicator rep: replicators) { +// rep.close(); +// } +// } + } + + /** + * Check that a database in a replicated environment which is configured to + * not replicate is properly saved. + * (Not a public feature yet). + */ + @Test + public void testNotReplicated() { +// Replicator[] replicators = RepTestUtils.startGroup(envRoot, +// 1, +// false /* verbose*/); +// try { +// env = replicators[0].getEnvironment(); +// DatabaseConfig config = new DatabaseConfig(); +// config.setAllowCreate(true); +// config.setTransactional(true); +// config.setReplicated(false); +// +// validate(config, false /* replicated */); +// } finally { +// if (db != null) { +// db.close(); +// } +// +// for (Replicator rep: replicators) { +// rep.close(); +// } +// } + } + + /** + * A database in a standalone environment should not be replicated. + */ + @Test + public void testStandalone() + throws DatabaseException { + + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envRoot, envConfig); + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + + validate(config, false /* replicated */); + } finally { + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } + } + + /* + * Check that the notReplicate attribute is properly immutable and + * persistent. + */ + private void validate(DatabaseConfig config, + boolean replicated) + throws DatabaseException { + + /* Create the database -- is its config what we expect? */ + db = env.openDatabase(null, TEST_DB, config); + DatabaseConfig inUseConfig = db.getConfig(); + assertEquals(replicated, inUseConfig.getReplicated()); + + /* Close, re-open. */ + db.close(); + db = null; + db = env.openDatabase(null, TEST_DB, inUseConfig); + assertEquals(replicated, db.getConfig().getReplicated()); + + /* + * Close, re-open w/inappropriate value for the replicated bit. This is + * only checked for replicated environments. + */ + db.close(); + db = null; + if (DbInternal.getNonNullEnvImpl(env).isReplicated()) { + inUseConfig.setReplicated(!replicated); + try { + db = env.openDatabase(null, TEST_DB, inUseConfig); + fail("Should have caught config mismatch"); + } catch (IllegalArgumentException expected) { + } + } + } +} diff --git a/test/com/sleepycat/je/rep/RecoveryUtilizationTest.java b/test/com/sleepycat/je/rep/RecoveryUtilizationTest.java new file mode 100644 index 0000000..a188eb8 --- /dev/null +++ b/test/com/sleepycat/je/rep/RecoveryUtilizationTest.java @@ -0,0 +1,155 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.cleaner.VerifyUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class RecoveryUtilizationTest extends TestBase { + + private final File envRoot; + + public RecoveryUtilizationTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * DupCountLNs are strange hybrid beasts. They are not replicated, because + * they are incremented as a side effect of applying LNs. At recovery time, + * any DupCountLNs that are part of a replicated txn must be recovered and + * resurrected just like other replicated LNs though. + * + * This test is trying to create a log has a DupCountLN followed by its DIN + * parent, like this: + * + * 100 DupCountLN for uncommitted, replicated txn + * 200 DIN that refers to DupCountLN + * + * where the log entries are within a checkpoint interval that will be + * processed at recovery. The bug is that DupCountLN, which is not a + * replicated log entry, was being undone by recovery because it was not + * committed. Then it was redone by recovery because it is in a replicated + * txn. Although the logical outcome was correct -- the DIN parent + * continued to point to DupCountLN 100, the utilization was wrong. + * + * @throws InterruptedException + */ + @Test + public void testDupCountRecoverySR17879() + throws IOException, InterruptedException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Make a two node group. */ + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + RepEnvInfo replicaInfo = null; + ReplicatedEnvironment replica = null; + for (RepEnvInfo info: repEnvInfo) { + if (info.getEnv() != master) { + replicaInfo = info; + replica = replicaInfo.getEnv(); + } + } + + Database db = openDb(master); + Transaction txnA = null; + try { + /* Create a dup tree of k=1/d=1, k=1/d=2, and commit it. */ + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + IntegerBinding.intToEntry(2, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + + /* + * Now begin what will be an uncommitted txn, that will result in + * the update of the dup tree, thereby logging a DupCountLN to the + * log. + */ + txnA = master.beginTransaction(null, null); + IntegerBinding.intToEntry(3, data); + assertEquals(OperationStatus.SUCCESS, db.put(txnA, key, data)); + + /* + * Now ensure that the replica receives the changes from txnA. To + * do that, make and commit an unrelated change, purely as a way of + * ensuring that the replica proceeds to a known spot in the + * replication stream. + */ + Transaction txnB = master.beginTransaction(null, null); + IntegerBinding.intToEntry(10, key); + IntegerBinding.intToEntry(10, data); + assertEquals(OperationStatus.SUCCESS, db.put(txnB, key, data)); + txnB.commit(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 2); + + /* + * Do a checkpoint on the replica to ensure that the DIN parent of + * the problem DupCountLN goes to disk. + */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + replica.checkpoint(ckptConfig); + + /* Crash the replica, then recovery it. */ + replicaInfo.abnormalCloseEnv(); + replicaInfo.openEnv(); + + /* + * Check that the utilization offsets match those in the tree. + * When the bug is in effect, this will fail. + */ + Database repDb = openDb(replicaInfo.getEnv()); + try { + VerifyUtils.checkLsns(repDb); + } finally { + repDb.close(); + } + + } finally { + txnA.abort(); + db.close(); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private Database openDb(Environment env) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + + return env.openDatabase(null, "foo", dbConfig); + } +} diff --git a/test/com/sleepycat/je/rep/RepEnvMultiSubDirTest.java b/test/com/sleepycat/je/rep/RepEnvMultiSubDirTest.java new file mode 100644 index 0000000..42544dd --- /dev/null +++ b/test/com/sleepycat/je/rep/RepEnvMultiSubDirTest.java @@ -0,0 +1,312 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.impl.RepImplStatDefinition; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.DbTruncateLog; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Test that replication group with je.log.nDataDirectories enabled can work + * correctly. It tests the basic operations, hard recovery, NetworkBackup is + * tested in com.sleepycat.je.rep.impl.networkRestore.NetworkBackupTest. + */ +public class RepEnvMultiSubDirTest extends TestBase { + private static final String DB_NAME = "testDb"; + private static final String keyPrefix = "herococo"; + private static final String dataValue = "abcdefghijklmnopqrstuvwxyz"; + + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public RepEnvMultiSubDirTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + private EnvironmentConfig createEnvConfig(boolean noAckDurability) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + if (noAckDurability) { + envConfig.setDurability(RepTestUtils.SYNC_SYNC_NONE_DURABILITY); + } + + /* + * Configure a small log file size so that the log files can spread in + * the sub directories. + */ + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, + "10000"); + + /* + * Configure a small checkpointer and cleaner interval bytes, so that + * checkpointer and cleaner can be invoked more frequently to do the + * cleaning work. + */ + envConfig.setConfigParam(EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, + "20000"); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_BYTES_INTERVAL, + "10000"); + + return envConfig; + } + + private DatabaseConfig createDbConfig() { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + return dbConfig; + } + + /* + * Test the basic database operations on both master and replicas. + */ + @Test + public void testRepBasic() + throws Throwable { + + try { + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, createEnvConfig(false)); + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + checkNodeStates(0); + + /* + * Do enough updates to make sure log files spread to all sub + * directories. + */ + doUpdatesOnMaster(master); + + /* Sync group to make sure records are replayed on the replicas. */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Check that records can be read correctly on replicas. */ + for (int i = 1; i < repEnvInfo.length; i++) { + checkContents(repEnvInfo[i].getEnv(), 1001, 2000, + dataValue + dataValue); + } + + assertTrue(repEnvInfo[1].getEnv().getState().isReplica()); + + assertTrue(repEnvInfo[2].getEnv().getState().isReplica()); + repEnvInfo[0].closeEnv(); + + /* Make sure the mastership has changed. */ + boolean rn3rn2IsMaster = new PollCondition(10, 60000) { + + @Override + protected boolean condition() { + return (repEnvInfo[2].isMaster() || + repEnvInfo[1].isMaster()); + } + }.await(); + + assertTrue(rn3rn2IsMaster); + + /* Reopen the former master node -- it will be a replica */ + repEnvInfo[0].openEnv(); + assertTrue(repEnvInfo[0].isReplica()); + + /* Check the contents on the former master. */ + checkContents + (repEnvInfo[0].getEnv(), 1001, 2000, dataValue + dataValue); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private void checkNodeStates(int masterIndex) { + for (int i = 0; i < repEnvInfo.length; i++) { + if (i == masterIndex) { + assertTrue(repEnvInfo[i].isMaster()); + } else { + assertTrue(repEnvInfo[i].isReplica()); + } + } + } + + private void doUpdatesOnMaster(ReplicatedEnvironment master) + throws Exception { + + Database db = master.openDatabase(null, DB_NAME, createDbConfig()); + + /* Insert data. */ + insertData(db, null, 1, 2000, dataValue); + + /* Delete data. */ + DatabaseEntry key = new DatabaseEntry(); + for (int i = 1; i <= 100; i++) { + Transaction txn = master.beginTransaction(null, null); + for (int j = 1; j <= 10; j++) { + StringBinding.stringToEntry(keyPrefix + (i * 10 + j), key); + assertEquals(OperationStatus.SUCCESS, db.delete(txn, key)); + } + txn.commit(); + } + + /* Update data. */ + insertData(db, null, 1001, 2000, dataValue + dataValue); + db.close(); + } + + private void insertData(Database db, + Transaction txn, + int start, + int end, + String value) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = start; i <= end; i++) { + StringBinding.stringToEntry(keyPrefix + i, key); + StringBinding.stringToEntry(value, data); + assertEquals(OperationStatus.SUCCESS, db.put(txn, key, data)); + } + } + + private void checkContents(ReplicatedEnvironment repEnv, + int start, + int end, + String value) + throws Exception { + + Database db = repEnv.openDatabase(null, DB_NAME, createDbConfig()); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = start; i <= end; i++) { + StringBinding.stringToEntry(keyPrefix + i, key); + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, null)); + assertEquals(StringBinding.entryToString(data), value); + } + db.close(); + } + + /* + * Test that hard recovery can work correctly on a multi sub directories + * replica. + */ + @Test + public void testHardRecovery() + throws Throwable { + + try { + /* Expect RollbackProhibitedException when hard recovery happen. */ + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setConfigParam + (ReplicationConfig.TXN_ROLLBACK_LIMIT, "0"); + + /* Start the whole replication group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, + 3, + createEnvConfig(true), + repConfig); + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + checkNodeStates(0); + + Database db = master.openDatabase(null, DB_NAME, createDbConfig()); + + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Shut down replicas so that they don't see the commit. */ + for (int i = 1; i < repEnvInfo.length; i++) { + repEnvInfo[i].closeEnv(); + } + + /* Only insert data on the master and shutdown the master. */ + insertData(db, null, 1, 10, dataValue); + db.close(); + checkContents(master, 1, 10, dataValue); + repEnvInfo[0].closeEnv(); + + /* + * Restart the replicas, and do some work to make they have + * different data as the former master to cause a hard recovery. + */ + master = RepTestUtils.restartGroup(repEnvInfo[1], repEnvInfo[2]); + db = master.openDatabase(null, DB_NAME, createDbConfig()); + + Transaction txn = master.beginTransaction(null, null); + insertData(db, txn, 101, 110, dataValue); + txn.commit(); + CommitToken token = txn.getCommitToken(); + db.close(); + checkContents(master, 101, 110, dataValue); + + /* Restart the old master, expecting hard recovery. */ + try { + repEnvInfo[0].openEnv + (new CommitPointConsistencyPolicy(token, 1000, + TimeUnit.SECONDS)); + assertTrue( + RepInternal.getNonNullRepImpl(repEnvInfo[0].getEnv()). + getNodeStats(). + getBoolean(RepImplStatDefinition.HARD_RECOVERY)); + } catch (RollbackProhibitedException e) { + + /* + * Expected exceptions, truncate the unmatched log on the old + * master. + */ + DbTruncateLog truncator = new DbTruncateLog(); + truncator.truncateLog(repEnvInfo[0].getEnvHome(), + e.getTruncationFileNumber(), + e.getTruncationFileOffset()); + + /* Reopen the old master after truncation. */ + repEnvInfo[0].openEnv + (new CommitPointConsistencyPolicy(token, 1000, + TimeUnit.SECONDS)); + } + assertTrue(repEnvInfo[0].isReplica()); + /* Check that old master has the newest log. */ + checkContents(repEnvInfo[0].getEnv(), 101, 110, dataValue); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } +} diff --git a/test/com/sleepycat/je/rep/RepGroupAdminTest.java b/test/com/sleepycat/je/rep/RepGroupAdminTest.java new file mode 100644 index 0000000..4e0c09c --- /dev/null +++ b/test/com/sleepycat/je/rep/RepGroupAdminTest.java @@ -0,0 +1,228 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class RepGroupAdminTest extends RepTestBase { + + @Test + public void testRemoveMember() { + createGroup(groupSize); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + RepEnvInfo rmMember = repEnvInfo[repEnvInfo.length-1]; + Set helperSockets = + rmMember.getRepImpl().getHelperSockets(); + final String rmName = rmMember.getRepNode().getNodeName(); + rmMember.closeEnv(); + + ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin + (RepTestUtils.TEST_REP_GROUP_NAME, helperSockets, + RepTestUtils.readRepNetConfig()); + assertEquals(groupSize, + master.getGroup().getElectableNodes().size()); + groupAdmin.removeMember(rmName); + assertEquals(groupSize-1, + master.getGroup().getElectableNodes().size()); + + try { + rmMember.openEnv(); + fail("Expected exception"); + } catch (EnvironmentFailureException e) { + assertEquals(EnvironmentFailureReason.HANDSHAKE_ERROR, + e.getReason()); + } + + /* Exception tests. We currently allow either IAE or EFE. */ + try { + groupAdmin.removeMember("unknown node"); + fail("Expected exception"); + } catch (MemberNotFoundException e) { + // Expected. + } + + try { + groupAdmin.removeMember(rmName); + fail("Expected exception"); + } catch (MemberNotFoundException e) { + // Expected. + } + + try { + groupAdmin.removeMember(master.getNodeName()); + fail("Expected exception"); + } catch (MasterStateException e) { + // Expected. + } + } + + @Test + public void testDeleteMember() { + createGroup(groupSize); + final ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + final RepEnvInfo delMember = repEnvInfo[repEnvInfo.length-1]; + final Set helperSockets = + delMember.getRepImpl().getHelperSockets(); + final String delName = delMember.getRepNode().getNodeName(); + delMember.closeEnv(); + + final ReplicationGroupAdmin groupAdmin = new ReplicationGroupAdmin( + RepTestUtils.TEST_REP_GROUP_NAME, helperSockets, + RepTestUtils.readRepNetConfig()); + assertEquals(groupSize, + master.getGroup().getElectableNodes().size()); + groupAdmin.deleteMember(delName); + assertEquals(groupSize-1, + master.getGroup().getElectableNodes().size()); + + /* The deleted member automatically rejoins when reopened */ + delMember.openEnv(); + + /* Exception tests. */ + try { + groupAdmin.deleteMember("unknown node"); + fail("Expected exception"); + } catch (MemberNotFoundException e) { + // Expected. + } + + try { + groupAdmin.deleteMember(delName); + fail("Expected exception"); + } catch (EnvironmentFailureException e) { + // Expected. + } + + delMember.closeEnv(); + groupAdmin.deleteMember(delName); + + try { + groupAdmin.deleteMember(delName); + fail("Expected exception"); + } catch (MemberNotFoundException e) { + // Expected. + } + + try { + groupAdmin.deleteMember(master.getNodeName()); + fail("Expected exception"); + } catch (MasterStateException e) { + // Expected. + } + } + + @Test + public void testAddMonitor() + throws DatabaseException, InterruptedException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + RepImpl lastImpl = RepInternal.getNonNullRepImpl( + repEnvInfo[repEnvInfo.length-1].getEnv()); + + Set helperSockets = + new HashSet(); + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + helperSockets.add(RepInternal.getNonNullRepImpl(rep).getSocket()); + } + + DbConfigManager lastConfigMgr = lastImpl.getConfigManager(); + ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin(lastConfigMgr.get(GROUP_NAME), + helperSockets, + RepTestUtils.readRepNetConfig()); + int lastId = lastImpl.getNodeId(); + final short monitorId = (short)(lastId+1); + + RepNodeImpl monitorNode = + new RepNodeImpl(new NameIdPair("monitor" + monitorId, + monitorId), + NodeType.MONITOR, + lastImpl.getHostName(), + lastImpl.getPort()+1, + null); + groupAdmin.ensureMonitor(monitorNode); + + /* Second ensure should not result in errors. */ + groupAdmin.ensureMonitor(monitorNode); + + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + assertTrue(master.getState().isMaster()); + /* All nodes should know about the new monitor. */ + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + RepGroupImpl repGroup = + RepInternal.getNonNullRepImpl(rep).getRepNode().getGroup(); + RepNodeImpl monitor = repGroup.getMember(monitorId); + assertNotNull(monitor); + assertTrue(monitorNode.equivalent(monitor)); + } + + /* Catch incorrect use of an existing non-monitor node name */ + RepNodeImpl badMonitorNode = + new RepNodeImpl( + new NameIdPair(repEnvInfo[1].getRepConfig().getNodeName()), + NodeType.MONITOR, + lastImpl.getHostName(), + lastImpl.getPort(), + null); + try { + groupAdmin.ensureMonitor(badMonitorNode); + fail("expected exception"); + } catch (DatabaseException e) { + assertTrue(true); + } + + /* test exception from adding a non-monitor node. */ + badMonitorNode = + new RepNodeImpl(new NameIdPair("monitor" + monitorId, monitorId), + NodeType.ELECTABLE, + lastImpl.getHostName(), + lastImpl.getPort(), + null); + try { + groupAdmin.ensureMonitor(badMonitorNode); + fail("expected exception"); + } catch (EnvironmentFailureException e) { + assertTrue(true); + } + } +} diff --git a/test/com/sleepycat/je/rep/RepIDSequenceTest.java b/test/com/sleepycat/je/rep/RepIDSequenceTest.java new file mode 100644 index 0000000..2736663 --- /dev/null +++ b/test/com/sleepycat/je/rep/RepIDSequenceTest.java @@ -0,0 +1,182 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +public class RepIDSequenceTest extends TestBase { + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private final int DB_NUM = 5; + private final int DB_SIZE = 100; + private final String DB_NAME = "test"; + private final boolean verbose = Boolean.getBoolean("verbose"); + + public RepIDSequenceTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /* Verify that id generation is in sequence after recovery. */ + @Test + public void testIDSequenceAfterRecovery() + throws Exception { + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + /* Get configurations so that they can be used when reopened.*/ + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + ReplicationConfig repConfig = master.getRepConfig(); + EnvironmentConfig envConfig = master.getConfig(); + File envHome = master.getHome(); + /* Do some operations. */ + doOperations(master, 1, DB_NUM); + /* Do a sync to make sure the ids are written to the log. */ + master.sync(); + RepImpl repImpl = RepInternal.getNonNullRepImpl(master); + long lastRepNodeId = + repImpl.getNodeSequence().getLastReplicatedNodeId(); + long lastDbId = repImpl.getDbTree().getLastReplicatedDbId(); + long lastTxnId = repImpl.getTxnManager().getLastReplicatedTxnId(); + /* Make sure that replicated ids are negative ids. */ + assertTrue(lastRepNodeId < 0); + assertTrue(lastDbId < 0); + assertTrue(lastTxnId < 0); + master.close(); + + master = new ReplicatedEnvironment(envHome, repConfig, envConfig); + assertTrue(master.getState().isMaster()); + repImpl = RepInternal.getNonNullRepImpl(master); + + /* + * The node id, db id and txn id should be the same as before, since + * the test doesn't do any operations during this time. + */ + assertEquals(lastRepNodeId, + repImpl.getNodeSequence().getLastReplicatedNodeId()); + assertEquals(lastDbId, + repImpl.getDbTree().getLastReplicatedDbId()); + assertEquals(lastTxnId, + repImpl.getTxnManager().getLastReplicatedTxnId()); + doOperations(master, DB_NUM + 1, DB_NUM + 1); + + /* + * The db id and txn id should be smaller than before, since test + * creats a new database, db id should be 1 smaller than before. + */ + assertTrue + (repImpl.getDbTree().getLastReplicatedDbId() == lastDbId - 1); + assertTrue + (repImpl.getTxnManager().getLastReplicatedTxnId() < lastTxnId); + master.close(); + } + + /* Verify that ids are in sequence after fail over. */ + @Test + public void testIDSequenceAfterFailOver() + throws Exception { + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + doOperations(master, 1, DB_NUM); + master.sync(); + RepImpl repImpl = RepInternal.getNonNullRepImpl(master); + long lastRepNodeId = + repImpl.getNodeSequence().getLastReplicatedNodeId(); + long lastDbId = repImpl.getDbTree().getLastReplicatedDbId(); + long lastTxnId = repImpl.getTxnManager().getLastReplicatedTxnId(); + /* Make sure that replicated ids are negative ids. */ + assertTrue(lastRepNodeId < 0); + assertTrue(lastDbId < 0); + assertTrue(lastTxnId < 0); + checkEquality(repEnvInfo); + master.close(); + + master = RepTestUtils.openRepEnvsJoin(repEnvInfo); + assertTrue(master.getState().isMaster()); + repImpl = RepInternal.getNonNullRepImpl(master); + assertEquals(lastRepNodeId, + repImpl.getNodeSequence().getLastReplicatedNodeId()); + assertEquals(lastDbId, + repImpl.getDbTree().getLastReplicatedDbId()); + + /* + * The replication group needs to do elections to select a master, so + * the txn id should be different as before. + */ + assertTrue + (repImpl.getTxnManager().getLastReplicatedTxnId() <= lastTxnId); + /* Create a new database. */ + doOperations(master, DB_NUM + 1, DB_NUM + 1); + assertTrue + (repImpl.getDbTree().getLastReplicatedDbId() == lastDbId - 1); + assertTrue + (repImpl.getTxnManager().getLastReplicatedTxnId() < lastTxnId); + checkEquality(RepTestUtils.getOpenRepEnvs(repEnvInfo)); + } + + private void doOperations(ReplicatedEnvironment master, + int beginId, + int endId) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(false); + + for (int i = beginId; i <= endId; i++) { + Database db = master.openDatabase + (null, DB_NAME + new Integer(i).toString(), dbConfig); + insertData(db); + db.close(); + } + } + + private void insertData(Database db) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= DB_SIZE; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + } + + private void checkEquality(RepEnvInfo[] repInfoArray) + throws Exception { + + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repInfoArray, + repInfoArray.length); + RepTestUtils.checkNodeEquality(vlsn, verbose, repInfoArray); + } +} diff --git a/test/com/sleepycat/je/rep/RepPreloadTest.java b/test/com/sleepycat/je/rep/RepPreloadTest.java new file mode 100644 index 0000000..49381de --- /dev/null +++ b/test/com/sleepycat/je/rep/RepPreloadTest.java @@ -0,0 +1,358 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Logger; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Database; +import com.sleepycat.je.Environment; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.PreloadStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Test using the {@link Environment#preload} method with a replicated + * environment. Because preload acquires an exclusive latch on the btree for + * every database, it prevents the replay operation on the replica from making + * progress, causing the heartbeat to be delayed and, depending on the length + * of the delay, resulting in the replica being dropped from the replication + * group. + */ +public class RepPreloadTest extends RepTestBase { + + /** + * The number of entries in the database. Use a larger value to produce a + * preload time longer than the feeder timeout, to test that the preload + * disrupts the feeder in that case. The value 400000 works for this + * purpose on some platforms. + */ + private static final int NUM_ENTRIES = + Integer.getInteger("test.num_entries", 100000); + + /** + * The feeder timeout. Use a shorter value to permit the preload time to + * be longer than the feeder timeout. 2000 is the minimum, and is a good + * choice for this purpose. + */ + private static final long FEEDER_TIMEOUT_MS = + Long.getLong("test.feeder_timeout_ms", 10000); + + /** + * The preload timeout. Increase this time to allow the preload time to + * exceed the feeder timeout. + */ + private static final long PRELOAD_TIMEOUT_MS = + Long.getLong("test.preload_timeout_ms", 5000); + + /** The timeout for the replica to sync up. */ + private static final long REPLICA_SYNCUP_MS = + Long.getLong("test.replica_syncup_ms", 120000); + + /** The timeout to wait for events. */ + private static final long EVENT_TIMEOUT_MS = + Long.getLong("test.event_timeout_ms", 10000); + + /** The timeout to wait for replication to begin. */ + private static final long REPLICATION_START_TIMEOUT_MS = + Long.getLong("test.replication_start_timeout_ms", 60000); + + private static final Logger logger = + LoggerUtils.getLoggerFixedPrefix(RepPreloadTest.class, "Test"); + + /** + * Test that preloading on a replica causes the feeder to timeout. + */ + @Test + public void testPreloadEnvironmentReplica() throws Exception { + + logger.info("Parameters:" + + "\n test.num_entries=" + NUM_ENTRIES + + "\n test.feeder_timeout_ms=" + FEEDER_TIMEOUT_MS + + "\n test.preload_timeout_ms=" + PRELOAD_TIMEOUT_MS + + "\n test.replica_syncup_ms=" + REPLICA_SYNCUP_MS + + "\n test.event_timeout_ms=" + EVENT_TIMEOUT_MS + + "\n test.replication_start_timeout_ms=" + + REPLICATION_START_TIMEOUT_MS); + + logger.info("Setting feeder timeout"); + + for (final RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + ReplicationConfig.FEEDER_TIMEOUT, + FEEDER_TIMEOUT_MS + " ms"); + } + + createGroup(3); + + final RepEnvInfo master = repEnvInfo[0]; + final RepEnvInfo replica = repEnvInfo[2]; + + logger.info("Populating database"); + CommitToken populateToken = null; + + for (int i = 0; i < NUM_ENTRIES; i += 1000) { + populateToken = populateDB(master.getEnv(), TEST_DB_NAME, i, 1000); + } + + logger.info("Wait for replica to sync up"); + + Transaction txn = replica.getEnv().beginTransaction( + null, + new TransactionConfig().setConsistencyPolicy( + new CommitPointConsistencyPolicy( + populateToken, REPLICA_SYNCUP_MS, MILLISECONDS))); + + txn.commit(); + + /* Close the replica so that we can preload a fresh environment */ + repEnvInfo[2].closeEnv(); + logger.info("Closed replica"); + + /* + * Update existing entries in the database. This creates a replication + * stream, but also means that the preload will have plenty of data to + * load from the existing entries. + */ + final AtomicBoolean done = new AtomicBoolean(false); + + final Thread updateThread = new Thread() { + @Override + public void run() { + final Database db = master.getEnv().openDatabase( + null, TEST_DB_NAME, dbconfig); + try { + int i = 0; + logger.info("Starting updates"); + while (!done.get()) { + final Transaction txn2 = + master.getEnv().beginTransaction( + null, RepTestUtils.SYNC_SYNC_NONE_TC); + IntegerBinding.intToEntry(i % NUM_ENTRIES, key); + LongBinding.longToEntry(i, data); + db.put(txn2, key, data); + txn2.commit(); + } + } finally { + db.close(); + } + } + }; + + updateThread.start(); + + /* + * Don't wait for consistency, to make sure the preload has work to do. + */ + final ReplicatedEnvironment replicaEnv = + replica.openEnv(NoConsistencyRequiredPolicy.NO_CONSISTENCY); + logger.info("Opened replica"); + + /* Track node state changes */ + final TestStateChangeListener[] listeners = + new TestStateChangeListener[3]; + + for (int i = 0; i < 3; i++) { + final RepEnvInfo info = repEnvInfo[i]; + final TestStateChangeListener listener = + new TestStateChangeListener(info.getEnv().getNodeName()); + listeners[i] = listener; + info.getEnv().setStateChangeListener(listener); + } + + /* Clear initial events */ + listeners[0].awaitState(MASTER, EVENT_TIMEOUT_MS, MILLISECONDS); + listeners[1].awaitState(REPLICA, EVENT_TIMEOUT_MS, MILLISECONDS); + listeners[2].awaitState(REPLICA, EVENT_TIMEOUT_MS, MILLISECONDS); + + /* Track replication */ + final CountDownLatch started = new CountDownLatch(1); + + final StatsConfig statsConfig = + new StatsConfig().setFast(true).setClear(true); + + final Thread statsThread = new Thread() { + @Override + public void run() { + while (true) { + try { + final ReplicatedEnvironmentStats stats = + replicaEnv.getRepStats(statsConfig); + if (stats == null) { + break; + } + final long nReplayLNs = stats.getNReplayLNs(); + if (nReplayLNs > 0 && started.getCount() > 0) { + started.countDown(); + } + logger.fine("nReplayLNs: " + stats.getNReplayLNs()); + Thread.sleep(1000); + } catch (InterruptedException e) { + } catch (IllegalStateException e) { + break; + } + } + } + }; + statsThread.setDaemon(true); + statsThread.start(); + + Database db = null; + try { + logger.info("Opening replica DB"); + txn = replicaEnv.beginTransaction( + null, + /* + * Don't wait for consistency, to make sure the preload has + * work to do + */ + new TransactionConfig() + .setConsistencyPolicy( + NoConsistencyRequiredPolicy.NO_CONSISTENCY)); + db = replicaEnv.openDatabase(txn, TEST_DB_NAME, dbconfig); + txn.commit(); + + started.await(REPLICATION_START_TIMEOUT_MS, MILLISECONDS); + logger.info("Replication started"); + + logger.info("Starting preload"); + final PreloadConfig preloadConfig = + new PreloadConfig() + /* Loading the LNs takes more time -- good! */ + .setLoadLNs(true) + /* Specify timeout */ + .setMaxMillisecs(PRELOAD_TIMEOUT_MS); + final long startPreload = System.currentTimeMillis(); + final PreloadStats preloadStats = + replicaEnv.preload(new Database[] { db }, preloadConfig); + final long preloadTime = System.currentTimeMillis() - startPreload; + + assertEquals(PreloadStatus.SUCCESS, preloadStats.getStatus()); + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(master.getEnv()); + boolean embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + if (embeddedLNs) { + assertTrue("nEmbeddedLNs > " + NUM_ENTRIES/2, + preloadStats.getNEmbeddedLNs() > NUM_ENTRIES/2); + } else { + assertTrue("nLNsLoaded > " + NUM_ENTRIES/2, + preloadStats.getNLNsLoaded() > NUM_ENTRIES/2); + } + + logger.info("Finished preload in " + preloadTime + " ms: " + + preloadStats); + + /* + * If the preload took longer than the feeder timeout, then the + * preload should cause the feeder to timeout and disconnect the + * replica. Otherwise, there should be no disruption to the + * feeder. + */ + if (preloadTime > FEEDER_TIMEOUT_MS) { + listeners[2].awaitState( + UNKNOWN, EVENT_TIMEOUT_MS, MILLISECONDS); + logger.info("Received expected event with state " + UNKNOWN); + } else { + listeners[2].awaitNoEvent(EVENT_TIMEOUT_MS, MILLISECONDS); + logger.info("Received no unexpected events"); + } + + } finally { + done.set(true); + updateThread.join(); + if (db != null) { + db.close(); + } + } + } + + /** A listener that queues and prints events. */ + private static class TestStateChangeListener + implements StateChangeListener { + + private final String node; + + final BlockingQueue events = + new LinkedBlockingQueue(); + + TestStateChangeListener(final String node) { + this.node = node; + } + + @Override + public void stateChange(final StateChangeEvent stateChangeEvent) { + events.add(stateChangeEvent); + String master; + try { + master = stateChangeEvent.getMasterNodeName(); + } catch (IllegalStateException e) { + master = "(none)"; + } + logger.info("State change event: " + + "node:" + node + + ", master:" + master + + ", state:" + stateChangeEvent.getState()); + } + + /** Wait for an event with the specified state. */ + void awaitState(final ReplicatedEnvironment.State state, + final long time, + final TimeUnit timeUnit) + throws InterruptedException { + + final StateChangeEvent event = events.poll(time, timeUnit); + assertNotNull("Expected state " + state + ", but got no event", + event); + assertEquals(state, event.getState()); + } + + /** Wait to confirm that no event is delivered. */ + void awaitNoEvent(final long time, final TimeUnit timeUnit) + throws InterruptedException { + + final StateChangeEvent event = events.poll(time, timeUnit); + assertNull("Expected no event", event); + } + } +} diff --git a/test/com/sleepycat/je/rep/ReplicatedEnvironmentStatsTest.java b/test/com/sleepycat/je/rep/ReplicatedEnvironmentStatsTest.java new file mode 100644 index 0000000..475a9e6 --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicatedEnvironmentStatsTest.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import org.junit.Test; + +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class ReplicatedEnvironmentStatsTest extends RepTestBase { + + // TODO: more detailed tests check for expected stat return values under + // simulated conditions. + + /** + * Exercise every public entry point on master and replica stats. + */ + @Test + public void testBasic() { + createGroup(); + + for (RepEnvInfo ri : repEnvInfo) { + ReplicatedEnvironment rep = ri.getEnv(); + final ReplicatedEnvironmentStats repStats = rep.getRepStats(null); + invokeAllAccessors(repStats); + } + } + + /** + * Simply exercise the code path + */ + private void invokeAllAccessors(ReplicatedEnvironmentStats stats) { + stats.getAckWaitMs(); + stats.getLastCommitTimestamp(); + stats.getLastCommitVLSN(); + stats.getNFeedersCreated(); + stats.getNFeedersShutdown(); + stats.getNMaxReplicaLag(); + stats.getNMaxReplicaLagName(); + stats.getNProtocolBytesRead(); + stats.getNProtocolBytesWritten(); + stats.getNProtocolMessagesRead(); + stats.getNProtocolMessagesWritten(); + stats.getNReplayAborts(); + stats.getNReplayCommitAcks(); + stats.getNReplayCommitNoSyncs(); + stats.getNReplayCommitSyncs(); + stats.getNReplayCommitWriteNoSyncs(); + stats.getNReplayCommits(); + stats.getNReplayGroupCommitMaxExceeded(); + stats.getNReplayGroupCommitTimeouts(); + stats.getNReplayGroupCommits(); + stats.getNReplayLNs(); + stats.getNReplayNameLNs(); + stats.getNTxnsAcked(); + stats.getNTxnsNotAcked(); + stats.getProtocolBytesReadRate(); + stats.getProtocolBytesWriteRate(); + stats.getProtocolMessageReadRate(); + stats.getProtocolMessageWriteRate(); + stats.getProtocolReadNanos(); + stats.getProtocolWriteNanos(); + stats.getReplayElapsedTxnTime(); + stats.getReplayLatestCommitLagMs(); + stats.getReplayMaxCommitProcessingNanos(); + stats.getReplayMinCommitProcessingNanos(); + stats.getReplayTotalCommitLagMs(); + stats.getReplayTotalCommitProcessingNanos(); + stats.getReplicaDelayMap(); + stats.getReplicaLastCommitTimestampMap(); + stats.getReplicaLastCommitVLSNMap(); + stats.getReplicaVLSNLagMap(); + stats.getReplicaVLSNRateMap(); + stats.getStatGroups(); + stats.getTips(); + stats.getTotalTxnMs(); + stats.getTrackerLagConsistencyWaitMs(); + stats.getTrackerLagConsistencyWaits(); + stats.getTrackerVLSNConsistencyWaitMs(); + stats.getTrackerVLSNConsistencyWaits(); + stats.getVLSNRate(); + } +} diff --git a/test/com/sleepycat/je/rep/ReplicatedEnvironmentTest.java b/test/com/sleepycat/je/rep/ReplicatedEnvironmentTest.java new file mode 100644 index 0000000..c15e87a --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicatedEnvironmentTest.java @@ -0,0 +1,974 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.RecoveryProgress; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepParams.ChannelTypeConfigParam; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.utilint.PropUtil; + +import org.junit.Test; + +/* + * TODO: + * 1) Test null argument for repConfig + * + */ +/** + * Check ReplicatedEnvironment-specific functionality; environment-specific + * functionality is covered via the DualTest infrastructure. + */ +public class ReplicatedEnvironmentTest extends RepTestBase { + + private static final String DEFAULT_NODEHOST = "localhost:5000"; + + /** + * Test to validate the code fragments contained in the javdoc for the + * ReplicatedEnvironment class, or to illustrate statements made there. + */ + @Test + public void testClassJavadoc() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(); + ReplicationConfig repEnvConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + repEnv.close(); + + repEnv = new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + RepImpl repImpl = repEnv.getNonNullRepImpl(); + assertTrue(repImpl != null); + + repEnv.close(); + /* It's ok to check after it's closed. */ + try { + repEnv.getState(); + fail("expected exception"); + } catch (IllegalStateException e) { + /* Expected. */ + } + } + + /** + * This is literally the snippet of code used as an + * startup example. Test here to make sure it compiles. + */ + @Test + public void testExample() { + File envHome = new File("."); + try { + /******* begin example *************/ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + // Identify the node + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setGroupName("PlanetaryRepGroup"); + repConfig.setNodeName("mercury"); + repConfig.setNodeHostPort("mercury.acme.com:5001"); + + // This is the first node, so its helper is itself + repConfig.setHelperHosts("mercury.acme.com:5001"); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + /******* end example *************/ + repEnv.close(); + } catch (IllegalArgumentException expected) { + } + + try { + /******* begin example *************/ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + // Identify the node + ReplicationConfig repConfig = + new ReplicationConfig("PlanetaryRepGroup", "Jupiter", + "jupiter.acme.com:5002"); + + // Use the node at mercury.acme.com:5001 as a helper to find the + // rest of the group. + repConfig.setHelperHosts("mercury.acme.com:5001"); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + + /******* end example *************/ + repEnv.close(); + } catch (IllegalArgumentException expected) { + } + } + + private EnvironmentConfig getEnvConfig() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + return envConfig; + } + + private ReplicationConfig getRepEnvConfig() { + /* ** DO NOT ** use localhost in javadoc. */ + ReplicationConfig repEnvConfig = + new ReplicationConfig("ExampleGroup", "node1", DEFAULT_NODEHOST); + + /* Configure it to be the master. */ + repEnvConfig.setHelperHosts(repEnvConfig.getNodeHostPort()); + return repEnvConfig; + } + + @Test + public void testJoinGroupJavadoc() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(); + ReplicationConfig repEnvConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + assertEquals(ReplicatedEnvironment.State.MASTER, repEnv1.getState()); + + ReplicatedEnvironment repEnv2 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + assertEquals(ReplicatedEnvironment.State.MASTER, repEnv2.getState()); + + repEnv1.close(); + repEnv2.close(); + } + + + /** + * Verify that an EFE will result in some other node being elected, even + * if the EFE environment has not been closed. + */ + @Test + public void testEFE() throws InterruptedException { + createGroup(); + final String masterName = repEnvInfo[0].getEnv().getNodeName(); + final CountDownLatch masterChangeLatch = new CountDownLatch(1); + + StateChangeListener listener = new StateChangeListener() { + + @Override + public void stateChange(StateChangeEvent sce) + throws RuntimeException { + + if (sce.getState().isActive() && + !sce.getMasterNodeName().equals(masterName)) { + masterChangeLatch.countDown(); + } + } + }; + + repEnvInfo[1].getEnv().setStateChangeListener(listener); + + /* Fail the master. */ + @SuppressWarnings("unused") + EnvironmentFailureException efe = + new EnvironmentFailureException(repEnvInfo[0].getRepImpl(), + EnvironmentFailureReason. + INSUFFICIENT_LOG); + /* Validate that master has moved on. */ + boolean ok = masterChangeLatch.await(10, TimeUnit.SECONDS); + + try { + assertTrue(ok); + } finally { + /* Now close the EFE environment */ + repEnvInfo[0].getEnv().close(); + } + } + + /** + * Verify exceptions resulting from timeouts due to slow syncups, or + * because the Replica was too far behind and could not catch up in the + * requisite time. + */ + @Test + public void testRepEnvTimeout() + throws DatabaseException { + + createGroup(); + repEnvInfo[2].closeEnv(); + populateDB(repEnvInfo[0].getEnv(), "db", 10); + + /* Get past syncup for replica consistency exception. */ + repEnvInfo[2].getRepConfig().setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "1000 ms"); + + TestHook syncupEndHook = new TestHook() { + @Override + public void doHook() throws InterruptedException { + Thread.sleep(Integer.MAX_VALUE); + } + }; + + ReplicaFeederSyncup.setGlobalSyncupEndHook(syncupEndHook); + + /* Syncup driven exception. */ + try { + repEnvInfo[2].openEnv(); + } catch (ReplicaConsistencyException ume) { + /* Expected exception. */ + } finally { + ReplicaFeederSyncup.setGlobalSyncupEndHook(null); + } + + repEnvInfo[2].getRepConfig().setConfigParam + (RepParams.TEST_REPLICA_DELAY.getName(), + Integer.toString(Integer.MAX_VALUE)); + repEnvInfo[2].getRepConfig().setConfigParam + (ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, "1000 ms"); + try { + repEnvInfo[2].openEnv(); + } catch (ReplicaConsistencyException ume) { + /* Expected exception. */ + } + } + + /* + * Ensure that default consistency policy can be overridden in the handle. + */ + @Test + public void testRepEnvConfig() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(); + + ReplicationConfig repEnvConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + /* Verify that default is used. */ + ReplicationConfig repConfig1 = repEnv1.getRepConfig(); + assertEquals(RepUtils.getReplicaConsistencyPolicy + (RepParams.CONSISTENCY_POLICY.getDefault()), + repConfig1.getConsistencyPolicy()); + + /* Override the policy in the handle. */ + repEnvConfig.setConsistencyPolicy + (NoConsistencyRequiredPolicy.NO_CONSISTENCY); + + ReplicatedEnvironment repEnv2 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + ReplicationConfig repConfig2 = repEnv2.getRepConfig(); + /* New handle should have new policy. */ + assertEquals(NoConsistencyRequiredPolicy.NO_CONSISTENCY, + repConfig2.getConsistencyPolicy()); + + /* Old handle should retain the old default policy. */ + assertEquals(RepUtils.getReplicaConsistencyPolicy + (RepParams.CONSISTENCY_POLICY.getDefault()), + repConfig1.getConsistencyPolicy()); + + /* Default should be retained for new handles. */ + repEnvConfig = new ReplicationConfig(); + repEnvConfig.setGroupName("ExampleGroup"); + repEnvConfig.setNodeName("node1"); + repEnvConfig.setNodeHostPort(DEFAULT_NODEHOST); + ReplicatedEnvironment repEnv3 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + ReplicationConfig repConfig3 = repEnv3.getRepConfig(); + assertEquals(RepUtils.getReplicaConsistencyPolicy + (RepParams.CONSISTENCY_POLICY.getDefault()), + repConfig3.getConsistencyPolicy()); + + repEnv1.close(); + repEnv2.close(); + repEnv3.close(); + } + + /* + * Ensure that channel configuration is set up properly. In particular, + * test that a ReplicationNetworkConfig object that is passed to a + * ReplicatedEnvironment constructor (indirectly) is retained through the + * RepEnvImpl construction process unless a channelType override says + * otherwise. + */ + @Test + public void testRepEnvNetConfig() + throws DatabaseException { + + Properties homeNetProps = RepTestUtils.readNetProps(); + String homeNetChanType = homeNetProps.getProperty("je.rep.channelType"); + + EnvironmentConfig envConfig = getEnvConfig(); + + ReplicationConfig repEnvConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + /* Verify that default is used if no file property specified. */ + ReplicationConfig repConfig1 = repEnv1.getRepConfig(); + + if (homeNetChanType == null) { + /* No je.properties override */ + assertEquals(RepParams.CHANNEL_TYPE.getDefault(), + repConfig1.getRepNetConfig().getChannelType()); + + } else { + assertEquals(homeNetChanType, + repConfig1.getRepNetConfig().getChannelType()); + } + + /* Try setting to basic in the config. */ + repEnvConfig.setRepNetConfig(new ReplicationBasicConfig()); + + ReplicatedEnvironment repEnv2 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + ReplicationConfig repConfig2 = repEnv2.getRepConfig(); + + if (homeNetChanType == null) { + /* No je.properties override */ + assertEquals(ChannelTypeConfigParam.BASIC, + repConfig2.getRepNetConfig().getChannelType()); + + } else { + assertEquals(homeNetChanType, + repConfig2.getRepNetConfig().getChannelType()); + } + + /* Try setting to ssl in the config. */ + Properties sslProps = new Properties(); + RepTestUtils.setUnitTestSSLProperties(sslProps); + + /* + * Remove the channelType property to verify that it gets + * set automatically based on the created type. + */ + sslProps.remove(ReplicationNetworkConfig.CHANNEL_TYPE); + + ReplicationSSLConfig repSSLConfig = new ReplicationSSLConfig(sslProps); + PasswordSource pwdSrc = + new PasswordSource() { + @Override + public char[] getPassword() { + return new char[0]; + } + }; + repSSLConfig.setSSLKeyStorePasswordSource(pwdSrc); + repEnvConfig.setRepNetConfig(repSSLConfig); + + ReplicatedEnvironment repEnv3 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + ReplicationConfig repConfig3 = repEnv3.getRepConfig(); + + if (homeNetChanType == null) { + /* No je.properties override */ + assertEquals(ChannelTypeConfigParam.SSL, + repConfig3.getRepNetConfig().getChannelType()); + ReplicationSSLConfig envRepSSLConfig3 = + (ReplicationSSLConfig) repConfig3.getRepNetConfig(); + assertEquals(pwdSrc, + envRepSSLConfig3.getSSLKeyStorePasswordSource()); + + } else { + assertEquals(homeNetChanType, + repConfig3.getRepNetConfig().getChannelType()); + } + + repEnv1.close(); + repEnv2.close(); + repEnv3.close(); + } + + @Test + public void testRepEnvMutableConfig() { + final EnvironmentConfig envConfig = getEnvConfig(); + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + final ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + /* Test mutable change to REPLAY and HELPER_HOST config parameters. */ + final ReplicationMutableConfig mutableConfig = + repEnv.getRepMutableConfig(); + + final int defaultHandles = + Integer.parseInt(RepParams.REPLAY_MAX_OPEN_DB_HANDLES. + getDefault()); + + assertEquals(defaultHandles, + ((RepImpl)DbInternal.getNonNullEnvImpl(repEnv)). + getRepNode().getReplica().getDbCache().getMaxEntries()); + + mutableConfig.setConfigParam(ReplicationMutableConfig. + REPLAY_MAX_OPEN_DB_HANDLES, + Integer.toString(defaultHandles + 1)); + + final int defaultTimeoutMs = PropUtil.parseDuration( + RepParams.REPLAY_DB_HANDLE_TIMEOUT.getDefault()); + + assertEquals(defaultTimeoutMs, + ((RepImpl)DbInternal.getNonNullEnvImpl(repEnv)). + getRepNode().getReplica().getDbCache().getTimeoutMs()); + + mutableConfig.setConfigParam(ReplicationMutableConfig. + REPLAY_DB_HANDLE_TIMEOUT, + Integer.toString(defaultTimeoutMs + 1) + + " ms"); + + repEnv.setRepMutableConfig(mutableConfig); + + assertEquals(defaultHandles + 1, + ((RepImpl)DbInternal.getNonNullEnvImpl(repEnv)). + getRepNode().getReplica().getDbCache().getMaxEntries()); + + + assertEquals(defaultTimeoutMs + 1, + ((RepImpl)DbInternal.getNonNullEnvImpl(repEnv)). + getRepNode().getReplica().getDbCache().getTimeoutMs()); + + /* Check the current value of helper hosts */ + String currentHelperHosts = mutableConfig.getHelperHosts(); + assertEquals(DEFAULT_NODEHOST, currentHelperHosts); + + /* Set a new helper host value */ + mutableConfig.setHelperHosts("localhost:13100"); + repEnv.setRepMutableConfig(mutableConfig); + assertEquals("localhost:13100", + RepInternal.getNonNullRepImpl(repEnv). + getConfigManager().get(RepParams.HELPER_HOSTS)); + repEnv.close(); + } + + /* + * Verify that configuring BIND_INADDR_ANY results in use of a wildcard + * address by the ServiceDispatcher. + */ + @Test + public void testRepInaddrAnyConfig() { + final EnvironmentConfig envConfig = getEnvConfig(); + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + /* Verify specific address */ + assertTrue(!repEnv.getNonNullRepImpl().getRepNode(). + getServiceDispatcher().getSocketBoundAddress(). + isAnyLocalAddress()); + repEnv.close(); + + repEnvConfig.setConfigParam(ReplicationConfig.BIND_INADDR_ANY, + "true"); + + repEnv = new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + /* Verify that it's a wildcard */ + assertTrue(repEnv.getNonNullRepImpl().getRepNode(). + getServiceDispatcher().getSocketBoundAddress(). + isAnyLocalAddress()); + + repEnv.close(); + } + + /* + * Ensure that only a r/o standalone Environment can open on a closed + * replicated Environment home directory. + */ + @Test + public void testEnvOpenOnRepEnv() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(); + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + final DatabaseConfig dbConfig = getDbConfig(); + + final ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + Database db = repEnv.openDatabase(null, "db1", dbConfig); + final DatabaseEntry dk = new DatabaseEntry(new byte[10]); + final DatabaseEntry dv = new DatabaseEntry(new byte[10]); + OperationStatus stat = db.put(null, dk, dv); + assertEquals(OperationStatus.SUCCESS, stat); + db.close(); + repEnv.close(); + + try { + Environment env = new Environment(envRoot, envConfig); + env.close(); + fail("expected exception"); + } catch (UnsupportedOperationException e) { + /* Expected. */ + } + envConfig.setReadOnly(true); + dbConfig.setReadOnly(true); + + /* Open the replicated environment as read-only. Should be OK. */ + Environment env = new Environment(envRoot, envConfig); + Transaction txn = env.beginTransaction(null, null); + db = env.openDatabase(txn, "db1", dbConfig); + stat = db.get(txn, dk, dv, LockMode.READ_COMMITTED); + assertEquals(OperationStatus.SUCCESS, stat); + + db.close(); + txn.commit(); + env.close(); + } + + /* + * Check that JE throws an UnsupportedOperationException if we open a r/w + * standalone Environment on a opened replicated Environment home + * directory. + */ + @Test + public void testOpenEnvOnAliveRepEnv() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(); + final ReplicationConfig repConfig = getRepEnvConfig(); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repConfig, envConfig); + + Environment env = null; + try { + env = new Environment(envRoot, envConfig); + env.close(); + fail("expected exception"); + } catch (UnsupportedOperationException e) { + /* Expected */ + } + + envConfig.setReadOnly(true); + + try { + env = new Environment(envRoot, envConfig); + env.close(); + fail("expected exception"); + } catch (IllegalArgumentException e) { + + /* + * Expect IllegalArgumentException since the ReplicatedEnvironment + * this Environment open is not read only. + */ + } + + repEnv.close(); + } + + @Test + public void testRepEnvUsingEnvHandle() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(); + final DatabaseConfig dbConfig = getDbConfig(); + final DatabaseEntry dk = new DatabaseEntry(new byte[10]); + final DatabaseEntry dv = new DatabaseEntry(new byte[10]); + + { + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + final ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + final Database db = repEnv1.openDatabase(null, "db1", dbConfig); + + OperationStatus stat = db.put(null, dk, dv); + assertEquals(OperationStatus.SUCCESS, stat); + db.close(); + repEnv1.close(); + } + + envConfig.setReadOnly(true); + final Environment env = new Environment(envRoot, envConfig); + dbConfig.setReadOnly(true); + + final Transaction t1 = env.beginTransaction(null, null); + final Database db = env.openDatabase(null, "db1", dbConfig); + + try { + /* Read operations ok. */ + OperationStatus stat = db.get(t1, dk, dv, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, stat); + } catch (Exception e) { + fail("Unexpected exception" + e); + } + t1.commit(); + + /* + * Iterate over all update operations that must fail, using auto and + * explicit commit. + */ + for (TryOp op : new TryOp[] { + new TryOp(UnsupportedOperationException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + db.put(t, dk, dv); + } + }, + + new TryOp(UnsupportedOperationException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + db.delete(t, dk); + } + }, + + new TryOp(IllegalArgumentException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + env.openDatabase(t, "db2", dbConfig); + } + }, + + new TryOp(UnsupportedOperationException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + env.truncateDatabase(t, "db1", false); + } + }, + + new TryOp(UnsupportedOperationException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + env.renameDatabase(t, "db1", "db2"); + } + }, + + new TryOp(UnsupportedOperationException.class) { + @Override + void exec(Transaction t) + throws DatabaseException { + + env.removeDatabase(t, "db1"); + } + }}) { + for (final Transaction t : new Transaction[] { + env.beginTransaction(null, null), null}) { + try { + op.exec(t); + fail("expected exception"); + } catch (RuntimeException e) { + if (!op.expectedException.equals(e.getClass())) { + e.printStackTrace(); + fail("unexpected exception." + + "Expected: " + op.expectedException + + "Threw: " + e.getClass()); + } + if (t != null) { + t.abort(); + continue; + } + } + if (t != null) { + t.commit(); + } + } + } + db.close(); + env.close(); + } + + /* + * Ensure that an exception is thrown when we open a replicated env, put + * some data in it, close the env, and then open a r/o replicated env. + */ + @Test + public void testReadOnlyRepEnvUsingEnvHandleSR17643() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(); + final DatabaseConfig dbConfig = getDbConfig(); + final DatabaseEntry dk = new DatabaseEntry(new byte[10]); + final DatabaseEntry dv = new DatabaseEntry(new byte[10]); + + { + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + final ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + final Database db = repEnv1.openDatabase(null, "db1", dbConfig); + + OperationStatus stat = db.put(null, dk, dv); + assertEquals(OperationStatus.SUCCESS, stat); + db.close(); + repEnv1.close(); + } + + try { + final ReplicationConfig repEnvConfig = getRepEnvConfig(); + envConfig.setReadOnly(true); + final ReplicatedEnvironment repEnv1 = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + repEnv1.close(); + fail("expected an exception"); + } catch (IllegalArgumentException IAE) { + /* Expected. */ + } + } + + /** + * Check basic progress listener functionality. + */ + @Test + public void testRecoveryProgressListener() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(); + final DatabaseConfig dbConfig = getDbConfig(); + final DatabaseEntry dk = new DatabaseEntry(new byte[10]); + final DatabaseEntry dv = new DatabaseEntry(new byte[10]); + + /* Check the phases seen by the recovery of a new environment. */ + TestProgress newEnvListener = + new TestProgress(true, + RecoveryProgress.POPULATE_UTILIZATION_PROFILE, + RecoveryProgress.POPULATE_EXPIRATION_PROFILE, + RecoveryProgress.REMOVE_TEMP_DBS, + RecoveryProgress.CKPT, + RecoveryProgress.RECOVERY_FINISHED, + RecoveryProgress.BECOME_CONSISTENT); + + ReplicationConfig repEnvConfig = getRepEnvConfig(); + envConfig.setRecoveryProgressListener(newEnvListener); + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + + final Database db = repEnv.openDatabase(null, "db1", dbConfig); + OperationStatus stat = db.put(null, dk, dv); + assertEquals(OperationStatus.SUCCESS, stat); + db.close(); + repEnv.close(); + newEnvListener.verify(); + + /* Check the phases seen by the recovery of an existing environment. */ + TestProgress secondOpenListener = + new TestProgress(true, + RecoveryProgress.FIND_END_OF_LOG, + RecoveryProgress.FIND_LAST_CKPT, + RecoveryProgress.READ_DBMAP_INFO, + RecoveryProgress.REDO_DBMAP_INFO, + RecoveryProgress.UNDO_DBMAP_RECORDS, + RecoveryProgress.REDO_DBMAP_RECORDS, + RecoveryProgress.READ_DATA_INFO, + RecoveryProgress.REDO_DATA_INFO, + RecoveryProgress.UNDO_DATA_RECORDS, + RecoveryProgress.REDO_DATA_RECORDS, + RecoveryProgress.POPULATE_UTILIZATION_PROFILE, + RecoveryProgress.POPULATE_EXPIRATION_PROFILE, + RecoveryProgress.REMOVE_TEMP_DBS, + RecoveryProgress.RECOVERY_FINISHED, + RecoveryProgress.BECOME_CONSISTENT); + + envConfig.setRecoveryProgressListener(secondOpenListener); + repEnv = new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + repEnv.close(); + + /* + * This listener returns false from progress() and should stop the + * recovery. + */ + TestProgress shutdownListener = + new TestProgress(false, RecoveryProgress.FIND_END_OF_LOG, + RecoveryProgress.RECOVERY_FINISHED); + envConfig.setRecoveryProgressListener(shutdownListener); + try { + repEnv = new ReplicatedEnvironment(envRoot, repEnvConfig, + envConfig); + fail("Should have failed due to false return from progress()"); + } catch (EnvironmentFailureException expected) { + assertEquals(EnvironmentFailureReason.PROGRESS_LISTENER_HALT, + expected.getReason()); + } + } + + /** + * A test listener that checks that the phases come in the expected sequence + */ + private class TestProgress implements ProgressListener { + + private final boolean progressReturnVal; + private final LinkedList expected; + + /** + * Seed the listener with the expected order of phases. + */ + TestProgress(boolean progressReturnVal, + RecoveryProgress... progress) { + this.progressReturnVal = progressReturnVal; + expected = + new LinkedList(Arrays.asList(progress)); + } + + /** + * Check that we expect this phase. + */ + @Override + public boolean progress(RecoveryProgress phase, long n, long total) { + System.err.println("phase=" + phase); + assertEquals(expected.getFirst(), phase); + expected.removeFirst(); + return progressReturnVal; + } + + /** All phases should have been seen. */ + public void verify() { + assertEquals(0, expected.size()); + } + } + + private DatabaseConfig getDbConfig() { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + return dbConfig; + } + + abstract class TryOp { + Class expectedException; + + TryOp(Class expectedException) { + this.expectedException = expectedException; + } + + abstract void exec(Transaction t) throws DatabaseException; + } + + /* + * Verify that requirements on the environment config imposed by + * replication are enforced. + */ + public void xtestEnvConfigRequirements() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + + ReplicationConfig repEnvConfig = + new ReplicationConfig("ExampleGroup", "node1", "localhost:5000"); + ReplicatedEnvironment repEnv = null; + try { + repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + repEnv.close(); + fail("expected exception saying env is not transactional"); + } catch (IllegalArgumentException e) { + /* Expected. */ + } + } + + /** + * Test setting ENV_UNKNOWN_STATE_TIMEOUT and checking that individual + * nodes can come up by themselves in the UNKNOWN state. + */ + @Test + public void testOpenUnknown() + throws DatabaseException { + + /* Try a SECONDARY node */ + repEnvInfo[repEnvInfo.length - 1].getRepConfig().setNodeType( + NodeType.SECONDARY); + + createGroup(); + closeNodes(repEnvInfo); + for (final RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, "200 ms"); + info.openEnv(); + assertEquals(ReplicatedEnvironment.State.UNKNOWN, + info.getEnv().getState()); + info.closeEnv(); + } + } + + /** + * Verifies that a node can join the group, even in the absence of an + * initial quorum thus preventing the master from updating the rep group + * db, by retrying until a quorum eventually becomes available. + */ + @Test + public void testJoin() throws Exception { + /* Speed the test along. */ + setRepConfigParam(RepParams.INSUFFICIENT_REPLICAS_TIMEOUT, "1 s"); + /* Set to avoid UnknownMasterException in case test is slow. */ + setRepConfigParam(RepParams.ENV_SETUP_TIMEOUT, "1000 s"); + createGroup(3); + + /* Create insufficient replicas, so node 4 cannot join. */ + repEnvInfo[1].closeEnv(); + repEnvInfo[2].closeEnv(); + + /* + * Node 4 will keep trying to join and will fail until a quorum is + * established. + */ + final EnvOpenThread asyncOpen = new EnvOpenThread(repEnvInfo[3]); + asyncOpen.start(); + + asyncOpen.join(5000); + + /* Verify that the open is still being tried. */ + assertTrue(asyncOpen.isAlive()); + + /* Restore Quorum. */ + repEnvInfo[1].openEnv(); + repEnvInfo[2].openEnv(); + + /* Wait for env open to complete successfully. */ + asyncOpen.join(60000); + assertTrue(!asyncOpen.isAlive()); + assertTrue(asyncOpen.testException == null); + } +} diff --git a/test/com/sleepycat/je/rep/ReplicatedTransactionTest.java b/test/com/sleepycat/je/rep/ReplicatedTransactionTest.java new file mode 100644 index 0000000..6d4f67d --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicatedTransactionTest.java @@ -0,0 +1,828 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.DurabilityQuorum; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class ReplicatedTransactionTest extends TestBase { + + /* Convenience constants depicting variations in durability */ + static private final Durability SYNC_SYNC_ALL = + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.ALL); + + static private final Durability SYNC_SYNC_QUORUM = + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY); + + static private final Durability SYNC_SYNC_NONE = + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.NONE); + + private final File envRoot; + /* min group size must be three */ + private final int groupSize = 3; + + /* The replicators used for each test. */ + RepEnvInfo[] repEnvInfo = null; + DatabaseConfig dbconfig; + final DatabaseEntry key = new DatabaseEntry(new byte[]{1}); + final DatabaseEntry data = new DatabaseEntry(new byte[]{100}); + + public ReplicatedTransactionTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + dbconfig = new DatabaseConfig(); + dbconfig.setAllowCreate(true); + dbconfig.setTransactional(true); + dbconfig.setSortedDuplicates(false); + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize, + SYNC_SYNC_ALL); + /* + * Avoid commits from group updates, that can result in assertion + * failures in verifyReplicaStats. + */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + } + + @Override + @After + public void tearDown() { + try { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } catch (Throwable t) { + t.printStackTrace(); + } + } + + @SuppressWarnings("unused") + /* For future tests */ + private void waitForReplicaConnections(final ReplicatedEnvironment master) + throws DatabaseException { + + assertTrue(master.getState().isMaster()); + Environment env = master; + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(SYNC_SYNC_ALL); + Transaction td = env.beginTransaction(null, tc); + td.commit(); + } + + @Test + public void testAutoCommitDatabaseCreation() + throws UnknownMasterException, + DatabaseException, + InterruptedException { + + ReplicatedEnvironment master = repEnvInfo[0].openEnv(); + State status = master.getState(); + assertEquals(status, State.MASTER); + /* Create via auto txn. */ + Database mdb = master.openDatabase(null, "randomDB", dbconfig); + + /* Replicate the database. */ + ReplicatedEnvironment replica = repEnvInfo[1].openEnv(); + status = replica.getState(); + assertEquals(status, State.REPLICA); + try { + Database db = replica.openDatabase(null, "randomDB", dbconfig); + db.close(); + mdb.close(); + } catch (Exception e) { + fail("Unexpected exception"); + e.printStackTrace(); + } + VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, 2); + RepTestUtils.checkNodeEquality(master, replica, commitVLSN, false); + } + + @Test + public void testReadonlyTxnBasic() + throws DatabaseException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + final Environment menv = master; + RepEnvInfo replicaInfo = findAReplica(repEnvInfo); + createEmptyDB(menv); + + replicaInfo.closeEnv(); + final TransactionConfig mtc = new TransactionConfig(); + mtc.setDurability(SYNC_SYNC_QUORUM); + final DatabaseEntry keyEntry = new DatabaseEntry(); + IntegerBinding.intToEntry(1, keyEntry); + long lastTime = 0; + for (int i=0; i < 100; i++) { + Transaction mt = menv.beginTransaction(null, mtc); + Database db = menv.openDatabase(mt, "testDB", dbconfig); + IntegerBinding.intToEntry(i, keyEntry); + DatabaseEntry value = new DatabaseEntry(); + lastTime = System.currentTimeMillis(); + LongBinding.longToEntry(lastTime, value); + db.put(mt, keyEntry, value); + mt.commit(); + db.close(); + } + State state = replicaInfo.openEnv().getState(); + /* Slow down the replay on the replica, so the transaction waits. */ + RepImpl repImpl = RepInternal.getNonNullRepImpl(replicaInfo.getEnv()); + repImpl.getRepNode().replica().setTestDelayMs(1); + assertEquals(state, State.REPLICA); + + final Environment renv = replicaInfo.getEnv(); + final TransactionConfig rtc = new TransactionConfig(); + /* Ignore the lag */ + rtc.setConsistencyPolicy + (new TimeConsistencyPolicy(Integer.MAX_VALUE, + TimeUnit.MILLISECONDS, 0, null)); + + Transaction rt = renv.beginTransaction(null, rtc); + + Database rdb = renv.openDatabase(rt, "testDB", dbconfig); + + rt.commit(); + /* Consistent within 2ms of master. */ + rtc.setConsistencyPolicy + (new TimeConsistencyPolicy(2, TimeUnit.MILLISECONDS, + RepTestUtils.MINUTE_MS, + TimeUnit.MILLISECONDS)); + rt = renv.beginTransaction(null, rtc); + DatabaseEntry val= new DatabaseEntry(); + OperationStatus status = + rdb.get(rt, keyEntry, val, LockMode.READ_COMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + long entryTime = LongBinding.entryToLong(val); + assertEquals(lastTime, entryTime); + rt.commit(); + rdb.close(); + } + + /** + * Tests transaction begin on the master to make sure that the transaction + * scope is only entered if the current Ack policy can be satisfied. + */ + @Test + public void testMasterTxnBegin() + throws DatabaseException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + final Environment env = master; + final TransactionConfig tc = new TransactionConfig(); + + ExpectNoException noException = new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + t = env.beginTransaction(null, tc); + } + }; + + class ExpectInsufficientReplicasException + extends ExpectException { + final int requiredNodeCount; + ExpectInsufficientReplicasException(int requiredNodeCount) { + super(InsufficientReplicasException.class); + this.requiredNodeCount = requiredNodeCount; + } + @Override + void test() + throws DatabaseException { + + t = env.beginTransaction(null, tc); + } + @Override + void checkException(InsufficientReplicasException e) { + assertEquals("Required node count", + requiredNodeCount, e.getRequiredNodeCount()); + } + } + + tc.setDurability(SYNC_SYNC_ALL); + noException.exec(); + + shutdownAReplica(master, repEnvInfo); + /* Timeout with database exception for Ack all with missing replica. */ + new ExpectInsufficientReplicasException(3).exec(); + + tc.setDurability(SYNC_SYNC_QUORUM); + /* No exception with one less replica since we still have a quorum. */ + noException.exec(); + + DurabilityQuorum dq = RepInternal.getNonNullRepImpl(master). + getRepNode().getDurabilityQuorum(); + final int quorumReplicas = dq.getCurrentRequiredAckCount + (Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + int liveReplicas = groupSize - 2 /* master + shutdown replica */; + + /* Shut them down until we cross the quorum threshold. */ + while (liveReplicas-- >= quorumReplicas) { + shutdownAReplica(master, repEnvInfo); + } + + /* Timeout due to lack of quorum. */ + new ExpectInsufficientReplicasException(2).exec(); + + /* No Acks -- no worries. */ + tc.setDurability(SYNC_SYNC_NONE); + noException.exec(); + } + + /** + * Test auto commit operations. They are all positive tests. + */ + @Test + public void testAutoTransactions() + throws DatabaseException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + final Environment env = master; + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + db = env.openDatabase(null, "testDB", dbconfig); + db.put(null, key, data); + DatabaseEntry val = new DatabaseEntry(); + OperationStatus status = + db.get(null, key, val, LockMode.READ_COMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(data, val); + } + }.exec(); + } + + @Test + public void testReplicaAckPolicy() + throws UnknownMasterException, + DatabaseException { + + final ReplicatedEnvironment master = + RepTestUtils.joinGroup(repEnvInfo); + final Environment env = master; + final int repNodes = groupSize - 1; + + createEmptyDB(env); + resetReplicaStats(repEnvInfo); + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(SYNC_SYNC_ALL); + t = env.beginTransaction(null, tc); + db = env.openDatabase(t, "testDB", dbconfig); + /* No changes, so it does not call for a replica commit. */ + t.commit(SYNC_SYNC_ALL); + tc.setDurability(SYNC_SYNC_ALL); + t = env.beginTransaction(null, tc); + db.put(t, key, data); + t.commit(SYNC_SYNC_ALL); + t = null; + /* Verify that all the replicas Ack'd the commit and synced. */ + int replicas = verifyReplicaStats(new long[] {1, 1, 1, 0, 0}); + assertEquals(repNodes, replicas); + } + }.exec(); + + resetReplicaStats(repEnvInfo); + + DurabilityQuorum dq = RepInternal.getNonNullRepImpl(master). + getRepNode().getDurabilityQuorum(); + final int quorumReplicas = dq.getCurrentRequiredAckCount + (Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(SYNC_SYNC_ALL); + t = env.beginTransaction(null, tc); + db = env.openDatabase(t, "testDB", dbconfig); + /* No changes, so it does not call for a replica commit. */ + t.commit(SYNC_SYNC_ALL); + shutdownAReplica(master, repEnvInfo); + tc.setDurability(SYNC_SYNC_QUORUM); + t = env.beginTransaction(null, tc); + db.put(t, key, data); + t.commit(SYNC_SYNC_QUORUM); + t = null; + /* Verify that the replicas Ack'd the commit and synced. */ + int replicas = verifyReplicaStats(new long[] {1, 1, 1, 0, 0}); + assertTrue(replicas >= quorumReplicas); + } + }.exec(); + + int liveReplicas = repNodes - 1 /* master + shutdown replica */; + + /* Shut them down until we cross the quorum threshold. */ + while (liveReplicas-- >= quorumReplicas) { + shutdownAReplica(master, repEnvInfo); + } + + resetReplicaStats(repEnvInfo); + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(SYNC_SYNC_NONE); + t = env.beginTransaction(null, tc); + db = env.openDatabase(t, "testDB", dbconfig); + /* No changes, so it does not call for a replica commit */ + t.commit(SYNC_SYNC_NONE); + tc.setDurability(SYNC_SYNC_NONE); + t = env.beginTransaction(null, tc); + db.put(t, key, data); + t.commit(SYNC_SYNC_NONE); + t = null; + /* We did not wait for any acks. */ + } + }.exec(); + } + + /* + * Simple test to create a database and make some changes on a master + * with an explicit commit ACK policy. + */ + @Test + public void testReplicaCommitDurability() + throws UnknownMasterException, + DatabaseException { + + final ReplicatedEnvironment master = + RepTestUtils.joinGroup(repEnvInfo); + final Environment env = master; + int repNodes = groupSize - 1; + final Durability[] durabilityTest = new Durability[] { + new Durability(SyncPolicy.SYNC, SyncPolicy.SYNC, + ReplicaAckPolicy.ALL), + new Durability(SyncPolicy.SYNC, SyncPolicy.NO_SYNC, + ReplicaAckPolicy.ALL), + new Durability(SyncPolicy.SYNC, SyncPolicy.WRITE_NO_SYNC, + ReplicaAckPolicy.ALL) + }; + + /* The expected commit statistics, for the above durability config. */ + long[][] statistics = { {1, 1, 1, 0, 0}, + {1, 1, 0, 1, 0}, + {1, 1, 0, 0, 1}}; + createEmptyDB(env); + for (int i=0; i < durabilityTest.length; i++) { + resetReplicaStats(repEnvInfo); + final int testNo = i; + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + t = env.beginTransaction(null, null); + db = env.openDatabase(t, "testDB", dbconfig); + /* No changes, so it does not call for a replica commit. */ + t.commit(durabilityTest[testNo]); + t = env.beginTransaction(null, null); + db.put(t, key, data); + + /* + * A modification requiring acknowledgment from the + * replicas. + */ + t.commit(durabilityTest[testNo]); + t = null; + } + }.exec(); + /* Verify that all the replicas Ack'd the commit and synced. */ + int replicas = verifyReplicaStats(statistics[i]); + assertEquals(repNodes, replicas); + } + + /* Verify that the committed value was available on the Replica. */ + RepEnvInfo replicaInfo = findAReplica(repEnvInfo); + final Environment renv = replicaInfo.getEnv(); + try { + Transaction rt = renv.beginTransaction(null, null); + Database replicaDb = renv.openDatabase(rt, "testDB", dbconfig); + DatabaseEntry val = new DatabaseEntry(); + OperationStatus status = + replicaDb.get(rt, key, val, LockMode.READ_COMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(data, val); + rt.commit(); + replicaDb.close(); + } catch (Throwable e) { + e.printStackTrace(); + fail("Unexpected exception"); + } + + /* Repeat for a Quorum. */ + + resetReplicaStats(repEnvInfo); + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + t = env.beginTransaction(null, null); + db = env.openDatabase(t, "testDB", dbconfig); + t.commit(SYNC_SYNC_ALL); + t = env.beginTransaction(null, null); + shutdownAReplica(master, repEnvInfo); + db.put(t, key, data); + t.commit(SYNC_SYNC_QUORUM); + t = null; + } + }.exec(); + } + + /* + * A very basic test to ensure that "write" operations are disallowed on + * the replica db. + */ + /* + * TODO: need a more comprehensive test enumerating every type of write + * operation on the Env and database. Is there an easy way to do this? + */ + @Test + public void testReplicaReadonlyTransaction() + throws DatabaseException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + { /* Create a database for use in subsequent tests */ + Environment env = master; + try { + Transaction t = env.beginTransaction(null, null); + Database testDb = env.openDatabase(t, "testDB", dbconfig); + t.commit(SYNC_SYNC_ALL); + testDb.close(); + assertTrue(true); + } catch (Throwable e) { + e.printStackTrace(); + fail("Unexpected exception"); + } + } + + RepEnvInfo replicaInfo = findAReplica(repEnvInfo); + final Environment renv = replicaInfo.getEnv(); + new ExpectException(ReplicaWriteException.class) + { + @Override + void test() + throws DatabaseException { + + t = renv.beginTransaction(null, null); + db = renv.openDatabase(t, "testDB", dbconfig); + db.put(t, key, data); + } + }.exec(); + + new ExpectException(ReplicaWriteException.class) + { + @Override + void test() + throws DatabaseException { + + t = renv.beginTransaction(null, null); + db = renv.openDatabase(t, "testDBRep", dbconfig); + } + }.exec(); + + /* + * A delete operation is tested specially below. At one time a bug + * in LSN locking would allow a delete on a replica, due to a problem + * with the uncontended lock optimization (see CursorImpl.lockLN). + * + * In one case (encountered in stress testing and reproduced here), a + * NPE was thrown when logging the deletion, and the VLSNIndex + * attempted to bump the VLSN. + * + * In another case (encountered in McStress), the delete was logged + * successfully because the node transitioned to Master. A + * ReplicaWriteException was thrown after that, which caused an abort, + * which threw an NPE because the undoDatabases field was not + * initialized in Txn, because no write lock was taken. + */ + final Transaction t = master.beginTransaction(null, null); + final Database testDb = master.openDatabase(t, "testDB", dbconfig); + final OperationStatus status = testDb.putNoOverwrite(t, key, data); + assertSame(OperationStatus.SUCCESS, status); + t.commit(SYNC_SYNC_ALL); + testDb.close(); + + new ExpectException(ReplicaWriteException.class) + { + @Override + void test() + throws DatabaseException { + + t = renv.beginTransaction(null, null); + db = renv.openDatabase(t, "testDB", dbconfig); + db.delete(t, key); + } + }.exec(); + } + + @Test + public void testTxnCommitException() + throws UnknownMasterException, + DatabaseException { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + Environment env = master; + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(SYNC_SYNC_ALL); + Transaction td = env.beginTransaction(null, tc); + td.commit(); + Database db = null; + Transaction t = null; + try { + t = env.beginTransaction(null, null); + shutdownAReplica(master, repEnvInfo); + db = env.openDatabase(t, "testDB", dbconfig); + + /* + * Should fail with ALL policy in place and a missing replica in + * the preLogCommitHook. + */ + t.commit(SYNC_SYNC_ALL); + fail("expected CommitException"); + } catch (InsufficientReplicasException e) { + if (t != null) { + t.abort(); + } + if (db != null) { + db.close(); + } + /* Make sure we get to this point successfully */ + assertTrue(true); + } catch (Throwable e) { + e.printStackTrace(); + fail("Unexpected exception"); + } + } + + /* Utility methods below. */ + + /* + * Create an empty database for test purposes. + */ + private Database createEmptyDB(final Environment env) + throws DatabaseException { + + ExpectNoException ene = + new ExpectNoException() { + @Override + void test() + throws DatabaseException { + + t = env.beginTransaction(null, null); + db = env.openDatabase(t, "testDB", dbconfig); + t.commit(SYNC_SYNC_ALL); + t = null; + } + }; + ene.exec(); + return ene.db; + } + + /* + * Shutdown some one replica and wait for the Master to shutdown its + * associated feeder. + */ + private ReplicatedEnvironment + shutdownAReplica(ReplicatedEnvironment master, + RepEnvInfo[] replicators) + throws DatabaseException { + + RepNode masterRepNode = + RepInternal.getNonNullRepImpl(master).getRepNode(); + int replicaCount = + masterRepNode.feederManager().activeReplicas().size(); + final RepEnvInfo shutdownReplicaInfo = findAReplica(replicators); + assertNotNull(shutdownReplicaInfo); + shutdownReplicaInfo.getEnv().close(); + + /* Wait for feeder to recognize it's gone. */ + for (int i=0; i < 60; i++) { + int currReplicaCount = + masterRepNode.feederManager().activeReplicas().size(); + if (currReplicaCount == replicaCount) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + fail("unexpected interrupt exception"); + } + } + } + assertTrue + (masterRepNode.feederManager().activeReplicas().size() < replicaCount); + + return null; + } + + /** + * Select from one amongst the active replicas and return it. + */ + private RepEnvInfo findAReplica(RepEnvInfo[] replicators) + throws DatabaseException { + + for (RepEnvInfo repi : replicators) { + ReplicatedEnvironment replicator = repi.getEnv(); + if (!replicator.isValid() || + replicator.getState().isMaster()) { + continue; + } + return repi; + } + return null; + } + + /** + * Resets the statistics associated with a Replica + * @param replicators + * @throws DatabaseException + */ + private void resetReplicaStats(RepEnvInfo[] replicators) + throws DatabaseException { + + for (RepEnvInfo repi : replicators) { + ReplicatedEnvironment replicator = repi.getEnv(); + if ((replicator == null) || + !replicator.isValid() || + replicator.getState().isMaster()) { + continue; + } + RepInternal.getNonNullRepImpl(replicator).getReplay().resetStats(); + } + } + + private int verifyReplicaStats(long[] expected) + throws DatabaseException { + + int replicas = 0; + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment replicator = repi.getEnv(); + + if (!replicator.isValid() || + replicator.getState().isMaster()) { + continue; + } + replicas++; + ReplicatedEnvironmentStats actual = + replicator.getRepStats(StatsConfig.DEFAULT); + assertEquals(expected[0], actual.getNReplayCommits()); + assertEquals(expected[1], actual.getNReplayCommitAcks()); + + assertEquals(expected[2], + actual.getNReplayCommitSyncs() + + actual.getNReplayGroupCommitTxns()); + assertEquals(expected[3], + actual.getNReplayCommitNoSyncs() - + actual.getNReplayGroupCommitTxns()); + assertEquals(expected[4], actual.getNReplayCommitWriteNoSyncs()); + } + + return replicas; + } + + /* + * Helper classes for exception testing. + */ + private abstract class ExpectException { + private final Class exceptionClass; + Transaction t = null; + Database db = null; + + ExpectException(Class exceptionClass) { + this.exceptionClass = exceptionClass; + } + + abstract void test() throws Throwable; + + void exec() + throws DatabaseException { + + try { + test(); + try { + if (t != null) { + t.abort(); + } + t = null; + } catch (Exception ae) { + ae.printStackTrace(); + fail("Spurious exception"); + } + fail("Exception expected"); + } catch (Throwable th) { + if (!exceptionClass.isInstance(th)) { + th.printStackTrace(System.err); + fail("unexpected exception"); + } + checkException(exceptionClass.cast(th)); + } finally { + if (t != null) { + t.abort(); + } + if (db != null){ + db.close(); + } + t = null; + db = null; + } + } + + void checkException(T th) { } + } + + private abstract class ExpectNoException { + Transaction t = null; + Database db = null; + abstract void test() throws Throwable; + + void exec() + throws DatabaseException { + + try { + test(); + if (t!= null) { + t.commit(); + } + t = null; + } catch (Throwable th) { + th.printStackTrace(System.err); + fail("unexpected exception"); + } finally { + if (t != null) { + t.abort(); + } + if (db != null){ + db.close(); + } + } + } + } +} diff --git a/test/com/sleepycat/je/rep/ReplicationConfigTest.java b/test/com/sleepycat/je/rep/ReplicationConfigTest.java new file mode 100644 index 0000000..391c2e7 --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicationConfigTest.java @@ -0,0 +1,193 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.rep.impl.PointConsistencyPolicy; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class ReplicationConfigTest extends TestBase { + + ReplicationConfig repConfig; + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + repConfig = new ReplicationConfig(); + } + + // TODO: need tests for every entrypoint + + @Test + public void testConsistency() { + + ReplicaConsistencyPolicy policy = + new TimeConsistencyPolicy(100, TimeUnit.MILLISECONDS, + 1, TimeUnit.SECONDS); + repConfig.setConsistencyPolicy(policy); + assertEquals(policy, repConfig.getConsistencyPolicy()); + + policy = NoConsistencyRequiredPolicy.NO_CONSISTENCY; + repConfig.setConsistencyPolicy(policy); + assertEquals(policy, repConfig.getConsistencyPolicy()); + + try { + policy = + new CommitPointConsistencyPolicy + (new CommitToken(new UUID(0, 0), 0), 0, null); + repConfig.setConsistencyPolicy(policy); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + try { + policy = new PointConsistencyPolicy(VLSN.NULL_VLSN); + repConfig.setConsistencyPolicy(policy); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + try { + repConfig.setConfigParam + (RepParams.CONSISTENCY_POLICY.getName(), + "badPolicy"); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + } + + @Test + public void testHelperHosts() { + /* Correct configs */ + repConfig.setHelperHosts("localhost"); + Set helperSockets = repConfig.getHelperSockets(); + assertEquals(1, helperSockets.size()); + assertEquals(Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()), + helperSockets.iterator().next().getPort()); + + repConfig.setHelperHosts("localhost:6000"); + helperSockets = repConfig.getHelperSockets(); + assertEquals(1, helperSockets.size()); + assertEquals(6000, helperSockets.iterator().next().getPort()); + + repConfig.setHelperHosts("localhost:6000,localhost:6001"); + helperSockets = repConfig.getHelperSockets(); + assertEquals(2, helperSockets.size()); + + /* Incorrect configs */ + /* + * It would be nice if this were an effective test, but because various + * ISPs will not actually let their DNS servers return an unknown + * host, we can't rely on this failing. + try { + repConfig.setHelperHosts("unknownhost"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + */ + try { + repConfig.setHelperHosts("localhost:80"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + try { + repConfig.setHelperHosts("localhost:xyz"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + + try { + repConfig.setHelperHosts(":6000"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + + @Test + public void testMinRetainedVLSNs() { + + /* Boundary conditions */ + repConfig.setConfigParam(RepParams.MIN_RETAINED_VLSNS.getName(), "0"); + + repConfig.setConfigParam(RepParams.MIN_RETAINED_VLSNS.getName(), + Integer.toString(Integer.MAX_VALUE)); + + /* Routine */ + repConfig.setConfigParam(RepParams.MIN_RETAINED_VLSNS.getName(), + "100"); + + try { + repConfig.setConfigParam(RepParams.MIN_RETAINED_VLSNS.getName(), + "-1"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + + @Test + public void testSerialize() + throws Throwable { + + ReplicationConfig repConfig = new ReplicationConfig(); + /* Test the serialized fields in RepliationMutableConfig, props. */ + repConfig.setNodeName("node1"); + repConfig.setGroupName("group"); + repConfig.setNodeHostPort("localhost:5001"); + /* Test the serialized fields in RepliationConfig. */ + repConfig.setAllowConvert(true); + + File envHome = SharedTestUtils.getTestDir(); + ReplicationConfig newConfig = (ReplicationConfig) + TestUtils.serializeAndReadObject(envHome, repConfig); + + assertTrue(newConfig != repConfig); + assertEquals(newConfig.getNodeName(), "node1"); + assertEquals(newConfig.getGroupName(), "group"); + assertEquals(newConfig.getNodeHostPort(), "localhost:5001"); + assertFalse + (newConfig.getValidateParams() == repConfig.getValidateParams()); + assertTrue + (newConfig.getAllowConvert() == repConfig.getAllowConvert()); + } +} diff --git a/test/com/sleepycat/je/rep/ReplicationGroupTest.java b/test/com/sleepycat/je/rep/ReplicationGroupTest.java new file mode 100644 index 0000000..29b6d8a --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicationGroupTest.java @@ -0,0 +1,138 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.hamcrest.core.AnyOf.anyOf; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; + +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.monitor.MonitorConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class ReplicationGroupTest extends RepTestBase { + + @SuppressWarnings("null") + @Test + public void testBasic() + throws InterruptedException { + + final int dataNodeSize = groupSize - 1; + final int electableNodeSize = groupSize - 2; + final int persistentNodeSize = groupSize - 1; + + final ReplicationConfig sConfig = + repEnvInfo[groupSize-2].getRepConfig(); + sConfig.setNodeType(NodeType.SECONDARY); + + createGroup(dataNodeSize); + + ReplicationConfig rConfig = repEnvInfo[groupSize-1].getRepConfig(); + /* RepNetConfig needs to come from an open environment */ + ReplicationConfig r0Config = repEnvInfo[0].getEnv().getRepConfig(); + rConfig.setNodeType(NodeType.MONITOR); + MonitorConfig monConfig = new MonitorConfig(); + monConfig.setNodeName(rConfig.getNodeName()); + monConfig.setGroupName(rConfig.getGroupName()); + monConfig.setNodeHostPort(rConfig.getNodeHostPort()); + monConfig.setHelperHosts(rConfig.getHelperHosts()); + monConfig.setRepNetConfig(r0Config.getRepNetConfig()); + + + new Monitor(monConfig).register(); + + for (int i=0; i < dataNodeSize; i++) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + final boolean isMaster = (env.getState() == MASTER); + final int targetGroupSize = + isMaster ? groupSize : persistentNodeSize; + ReplicationGroup group = null; + for (int j=0; j < 100; j++) { + group = env.getGroup(); + if (group.getNodes().size() == targetGroupSize) { + break; + } + /* Wait for the replica to catch up. */ + Thread.sleep(1000); + } + assertEquals("Nodes", targetGroupSize, group.getNodes().size()); + assertEquals(RepTestUtils.TEST_REP_GROUP_NAME, group.getName()); + logger.info(group.toString()); + + for (RepEnvInfo rinfo : repEnvInfo) { + final ReplicationConfig repConfig = rinfo.getRepConfig(); + ReplicationNode member = + group.getMember(repConfig.getNodeName()); + if (!isMaster && + repConfig.getNodeType().isSecondary()) { + assertNull("Member", member); + } else { + assertNotNull("Member", member); + assertEquals(repConfig.getNodeName(), member.getName()); + assertEquals(repConfig.getNodeType(), member.getType()); + assertEquals(repConfig.getNodeSocketAddress(), + member.getSocketAddress()); + } + } + + final Set electableNodes = + group.getElectableNodes(); + for (final ReplicationNode n : electableNodes) { + assertEquals(NodeType.ELECTABLE, n.getType()); + } + assertEquals("Electable nodes", + electableNodeSize, electableNodes.size()); + + final Set monitorNodes = group.getMonitorNodes(); + for (final ReplicationNode n : monitorNodes) { + assertEquals(NodeType.MONITOR, n.getType()); + } + assertEquals("Monitor nodes", 1, monitorNodes.size()); + + final Set secondaryNodes = + group.getSecondaryNodes(); + for (final ReplicationNode n : secondaryNodes) { + assertEquals(NodeType.SECONDARY, n.getType()); + } + assertEquals("Secondary nodes", + isMaster ? 1 : 0, + secondaryNodes.size()); + + final Set dataNodes = group.getDataNodes(); + for (final ReplicationNode n : dataNodes) { + if (isMaster) { + assertThat(n.getType(), + anyOf(is(NodeType.ELECTABLE), + is(NodeType.SECONDARY))); + } else { + assertEquals(NodeType.ELECTABLE, n.getType()); + } + } + assertEquals("Data nodes", + isMaster ? dataNodeSize : electableNodeSize, + dataNodes.size()); + } + } +} diff --git a/test/com/sleepycat/je/rep/ReplicationNetworkConfigTest.java b/test/com/sleepycat/je/rep/ReplicationNetworkConfigTest.java new file mode 100644 index 0000000..d238445 --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicationNetworkConfigTest.java @@ -0,0 +1,1233 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_FILE; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_CLASS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_PARAMS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_TYPE; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_CLIENT_KEY_ALIAS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_SERVER_KEY_ALIAS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_TRUSTSTORE_FILE; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_TRUSTSTORE_TYPE; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_CIPHER_SUITES; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_PROTOCOLS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_AUTHENTICATOR; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_AUTHENTICATOR_CLASS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_AUTHENTICATOR_PARAMS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_HOST_VERIFIER; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_HOST_VERIFIER_CLASS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_HOST_VERIFIER_PARAMS; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.SocketChannel; +import java.util.Properties; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.impl.RepParams.ChannelTypeConfigParam; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.rep.utilint.net.SSLChannelFactory; +import com.sleepycat.je.rep.utilint.net.SSLMirrorAuthenticator; +import com.sleepycat.je.rep.utilint.net.SSLMirrorHostVerifier; +import com.sleepycat.je.rep.utilint.net.SimpleChannelFactory; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class ReplicationNetworkConfigTest extends TestBase { + + private Properties stdProps; + + @Before + public void setup() { + stdProps = new Properties(); + RepTestUtils.setUnitTestSSLProperties(stdProps); + } + + @Test + public void testChannelType() { + /* default constructor initializes to "basic" */ + ReplicationNetworkConfig defRnc = + ReplicationNetworkConfig.createDefault(); + assertEquals(ChannelTypeConfigParam.BASIC, defRnc.getChannelType()); + + /* property constructor initializes to "basic" */ + Properties props = new Properties(); + ReplicationNetworkConfig empRnc = + ReplicationNetworkConfig.create(props); + assertEquals(ChannelTypeConfigParam.BASIC, empRnc.getChannelType()); + + /* Use property constructor to set to a value */ + props.setProperty(ReplicationNetworkConfig.CHANNEL_TYPE, + ChannelTypeConfigParam.BASIC); + ReplicationNetworkConfig rnc = + ReplicationNetworkConfig.create(props); + assertEquals(rnc.getChannelType(), ChannelTypeConfigParam.BASIC); + + /* Make sure other valid types work */ + ReplicationNetworkConfig rsc = new ReplicationSSLConfig(); + assertEquals(rsc.getChannelType(), ChannelTypeConfigParam.SSL); + + /* Make sure invalid values are rejected */ + props.setProperty(ReplicationNetworkConfig.CHANNEL_TYPE, "xyz"); + try { + ReplicationNetworkConfig.create(props); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testLogName() { + final String testLogName = "RNC"; + + /* default constructor initializes to empty */ + ReplicationNetworkConfig defRnc = + ReplicationNetworkConfig.createDefault(); + assertEmpty(defRnc.getLogName()); + + /* property constructor initializes to empty */ + Properties props = new Properties(); + ReplicationNetworkConfig empRnc = + ReplicationNetworkConfig.create(props); + assertEmpty(empRnc.getLogName()); + + /* Use property constructor to set to a value */ + props.setProperty(ReplicationNetworkConfig.CHANNEL_LOG_NAME, + testLogName); + ReplicationNetworkConfig rnc = + ReplicationNetworkConfig.create(props); + assertEquals(rnc.getLogName(), testLogName); + + /* Make sure we can clear it */ + rnc.setLogName(""); + assertEmpty(rnc.getLogName()); + + /* Make sure we can set it */ + rnc.setLogName(testLogName); + assertEquals(rnc.getLogName(), testLogName); + } + + @Test + public void testDCFactoryClass() { + + /* default constructor initializes to "" */ + ReplicationNetworkConfig defRnc = + ReplicationNetworkConfig.createDefault(); + assertEmpty(defRnc.getChannelFactoryClass()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationNetworkConfig empRnc = + ReplicationNetworkConfig.create(props); + assertEmpty(empRnc.getChannelFactoryClass()); + + final String dummyClass = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(ReplicationNetworkConfig.CHANNEL_FACTORY_CLASS, + dummyClass); + ReplicationNetworkConfig rnc = + ReplicationNetworkConfig.create(props); + assertEquals(rnc.getChannelFactoryClass(), dummyClass); + + /* Make sure we can clear it */ + rnc.setChannelFactoryClass(""); + assertEmpty(rnc.getChannelFactoryClass()); + + /* Make sure we can set it */ + rnc.setChannelFactoryClass(dummyClass); + assertEquals(rnc.getChannelFactoryClass(), dummyClass); + + } + + @Test + public void testDCFactoryParams() { + + /* default constructor initializes to "" */ + ReplicationNetworkConfig defRnc = + ReplicationNetworkConfig.createDefault(); + assertEmpty(defRnc.getChannelFactoryParams()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationNetworkConfig empRnc = + ReplicationNetworkConfig.create(props); + assertEmpty(empRnc.getChannelFactoryParams()); + + final String dummyParams = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(ReplicationNetworkConfig.CHANNEL_FACTORY_PARAMS, + dummyParams); + ReplicationNetworkConfig rnc = ReplicationNetworkConfig.create(props); + assertEquals(rnc.getChannelFactoryParams(), dummyParams); + + /* Make sure we can clear it */ + rnc.setChannelFactoryParams(""); + assertEmpty(rnc.getChannelFactoryParams()); + + /* Make sure we can set it */ + rnc.setChannelFactoryParams(dummyParams); + assertEquals(rnc.getChannelFactoryParams(), dummyParams); + + } + + @Test + public void testSSLKeyStore() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLKeyStore()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLKeyStore()); + + final String dummyKS = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_KEYSTORE_FILE, dummyKS); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLKeyStore(), dummyKS); + + /* Make sure we can clear it */ + rsc.setSSLKeyStore(""); + assertEmpty(rsc.getSSLKeyStore()); + + /* Make sure we can set it */ + rsc.setSSLKeyStore(dummyKS); + assertEquals(rsc.getSSLKeyStore(), dummyKS); + + } + + @Test + public void testSSLKeyStoreType() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLKeyStoreType()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLKeyStoreType()); + + final String dummyType = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_KEYSTORE_TYPE, dummyType); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLKeyStoreType(), dummyType); + + /* Make sure we can clear it */ + rsc.setSSLKeyStoreType(""); + assertEmpty(rsc.getSSLKeyStoreType()); + + /* Make sure we can set it */ + rsc.setSSLKeyStoreType(dummyType); + assertEquals(rsc.getSSLKeyStoreType(), dummyType); + + } + + @Test + public void testSSLKeyStorePassword() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLKeyStorePassword()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLKeyStorePassword()); + + final String dummyPassword = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_KEYSTORE_PASSWORD, dummyPassword); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLKeyStorePassword(), dummyPassword); + + /* Make sure we can clear it */ + rsc.setSSLKeyStorePassword(""); + assertEmpty(rsc.getSSLKeyStorePassword()); + + /* Make sure we can set it */ + rsc.setSSLKeyStorePassword(dummyPassword); + assertEquals(rsc.getSSLKeyStorePassword(), dummyPassword); + + } + + @Test + public void testSSLKeyStorePasswordClass() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLKeyStorePasswordClass()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLKeyStorePasswordClass()); + + final String dummyPasswordClass = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_KEYSTORE_PASSWORD_CLASS, dummyPasswordClass); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLKeyStorePasswordClass(), dummyPasswordClass); + + /* Make sure we can clear it */ + rsc.setSSLKeyStorePasswordClass(""); + assertEmpty(rsc.getSSLKeyStorePasswordClass()); + + /* Make sure we can set it */ + rsc.setSSLKeyStorePasswordClass(dummyPasswordClass); + assertEquals(rsc.getSSLKeyStorePasswordClass(), dummyPasswordClass); + + } + + @Test + public void testSSLKeyStorePasswordParams() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLKeyStorePasswordParams()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLKeyStorePasswordParams()); + + final String dummyPasswordParams = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_KEYSTORE_PASSWORD_PARAMS, dummyPasswordParams); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLKeyStorePasswordParams(), dummyPasswordParams); + + /* Make sure we can clear it */ + rsc.setSSLKeyStorePasswordParams(""); + assertEmpty(rsc.getSSLKeyStorePasswordParams()); + + /* Make sure we can set it */ + rsc.setSSLKeyStorePasswordParams(dummyPasswordParams); + assertEquals(rsc.getSSLKeyStorePasswordParams(), dummyPasswordParams); + + } + + @Test + public void testSSLServerKeyAlias() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLServerKeyAlias()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLServerKeyAlias()); + + final String dummyAlias = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_SERVER_KEY_ALIAS, dummyAlias); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLServerKeyAlias(), dummyAlias); + + /* Make sure we can clear it */ + rsc.setSSLServerKeyAlias(""); + assertEmpty(rsc.getSSLServerKeyAlias()); + + /* Make sure we can set it */ + rsc.setSSLServerKeyAlias(dummyAlias); + assertEquals(rsc.getSSLServerKeyAlias(), dummyAlias); + + } + + @Test + public void testSSLClientKeyAlias() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLClientKeyAlias()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLClientKeyAlias()); + + final String dummyAlias = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_CLIENT_KEY_ALIAS, dummyAlias); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLClientKeyAlias(), dummyAlias); + + /* Make sure we can clear it */ + rsc.setSSLClientKeyAlias(""); + assertEmpty(rsc.getSSLClientKeyAlias()); + + /* Make sure we can set it */ + rsc.setSSLClientKeyAlias(dummyAlias); + assertEquals(rsc.getSSLClientKeyAlias(), dummyAlias); + + } + + @Test + public void testSSLTrustStore() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLTrustStore()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLTrustStore()); + + final String dummyTS = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_TRUSTSTORE_FILE, dummyTS); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLTrustStore(), dummyTS); + + /* Make sure we can clear it */ + rsc.setSSLTrustStore(""); + assertEmpty(rsc.getSSLTrustStore()); + + /* Make sure we can set it */ + rsc.setSSLTrustStore(dummyTS); + assertEquals(rsc.getSSLTrustStore(), dummyTS); + + } + + @Test + public void testSSLTrustStoreType() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLTrustStoreType()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLTrustStoreType()); + + final String dummyType = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_TRUSTSTORE_TYPE, dummyType); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLTrustStoreType(), dummyType); + + /* Make sure we can clear it */ + rsc.setSSLTrustStoreType(""); + assertEmpty(rsc.getSSLTrustStoreType()); + + /* Make sure we can set it */ + rsc.setSSLTrustStoreType(dummyType); + assertEquals(rsc.getSSLTrustStoreType(), dummyType); + + } + + @Test + public void testSSLCipherSuites() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLCipherSuites()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLCipherSuites()); + + final String dummySuites = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_CIPHER_SUITES, dummySuites); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLCipherSuites(), dummySuites); + + /* Make sure we can clear it */ + rsc.setSSLCipherSuites(""); + assertEmpty(rsc.getSSLCipherSuites()); + + /* Make sure we can set it */ + rsc.setSSLCipherSuites(dummySuites); + assertEquals(rsc.getSSLCipherSuites(), dummySuites); + + } + + @Test + public void testSSLProtocols() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLProtocols()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLProtocols()); + + final String dummyProtocols = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_PROTOCOLS, dummyProtocols); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLProtocols(), dummyProtocols); + + /* Make sure we can clear it */ + rsc.setSSLProtocols(""); + assertEmpty(rsc.getSSLProtocols()); + + /* Make sure we can set it */ + rsc.setSSLProtocols(dummyProtocols); + assertEquals(rsc.getSSLProtocols(), dummyProtocols); + + } + + @Test + public void testSSLAuthenticator() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLAuthenticator()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLAuthenticator()); + + final String mirrorAuthenticator = "mirror"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_AUTHENTICATOR, mirrorAuthenticator); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLAuthenticator(), mirrorAuthenticator); + + /* Make sure we can clear it */ + rsc.setSSLAuthenticator(""); + assertEmpty(rsc.getSSLAuthenticator()); + + /* Make sure we can set it */ + rsc.setSSLAuthenticator(mirrorAuthenticator); + assertEquals(rsc.getSSLAuthenticator(), mirrorAuthenticator); + + /* Check that dnmatch works */ + final String dnmatchAuthenticator = "dnmatch(foo)"; + rsc.setSSLAuthenticator(dnmatchAuthenticator); + assertEquals(rsc.getSSLAuthenticator(), dnmatchAuthenticator); + + /* Check that invalid dnmatch is signaled */ + try { + rsc.setSSLAuthenticator("dnmatch(foo"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + + /* Check that invalid name is signaled */ + try { + rsc.setSSLAuthenticator("foo"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLAuthenticatorClass() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLAuthenticatorClass()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLAuthenticatorClass()); + + final String dummyAuthenticatorClass = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_AUTHENTICATOR_CLASS, dummyAuthenticatorClass); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLAuthenticatorClass(), dummyAuthenticatorClass); + + /* Make sure we can clear it */ + rsc.setSSLAuthenticatorClass(""); + assertEmpty(rsc.getSSLAuthenticatorClass()); + + /* Make sure we can set it */ + rsc.setSSLAuthenticatorClass(dummyAuthenticatorClass); + assertEquals(rsc.getSSLAuthenticatorClass(), dummyAuthenticatorClass); + + } + + @Test + public void testSSLAuthenticatorParams() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLAuthenticatorParams()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLAuthenticatorParams()); + + final String dummyParams = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_AUTHENTICATOR_PARAMS, dummyParams); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLAuthenticatorParams(), dummyParams); + + /* Make sure we can clear it */ + rsc.setSSLAuthenticatorParams(""); + assertEmpty(rsc.getSSLAuthenticatorParams()); + + /* Make sure we can set it */ + rsc.setSSLAuthenticatorParams(dummyParams); + assertEquals(rsc.getSSLAuthenticatorParams(), dummyParams); + + } + + @Test + public void testSSLAuthenticatorConflict() { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLAuthenticator("dnmatch(foo)"); + rsc.setSSLHostVerifierClass(SSLMirrorAuthenticator.class.getName()); + + /* Make sure that conflict is detected */ + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLHostVerifier() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLHostVerifier()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLHostVerifier()); + + final String mirrorVerifier = "mirror"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_HOST_VERIFIER, mirrorVerifier); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLHostVerifier(), mirrorVerifier); + + /* Make sure we can clear it */ + rsc.setSSLHostVerifier(""); + assertEmpty(rsc.getSSLHostVerifier()); + + /* Make sure we can set it */ + rsc.setSSLHostVerifier(mirrorVerifier); + assertEquals(rsc.getSSLHostVerifier(), mirrorVerifier); + + /* Check that other options work */ + final String dnmatchVerifier = "dnmatch(foo)"; + rsc.setSSLHostVerifier(dnmatchVerifier); + assertEquals(rsc.getSSLHostVerifier(), dnmatchVerifier); + + final String hostnameVerifier = "hostname"; + rsc.setSSLHostVerifier(hostnameVerifier); + assertEquals(rsc.getSSLHostVerifier(), hostnameVerifier); + + /* Make sure that invalid choices are detected */ + try { + rsc.setSSLHostVerifier("foo"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + + /* Make sure that invalid dnmatch syntax is detected */ + try { + rsc.setSSLHostVerifier("dnmatch(foo"); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLHostVerifierClass() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLHostVerifierClass()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLHostVerifierClass()); + + final String dummyClass = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_HOST_VERIFIER_CLASS, dummyClass); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLHostVerifierClass(), dummyClass); + + /* Make sure we can clear it */ + rsc.setSSLHostVerifierClass(""); + assertEmpty(rsc.getSSLHostVerifierClass()); + + /* Make sure we can set it */ + rsc.setSSLHostVerifierClass(dummyClass); + assertEquals(rsc.getSSLHostVerifierClass(), dummyClass); + + } + + @Test + public void testSSLHostVerifierParams() { + + /* default constructor initializes to "" */ + ReplicationSSLConfig defRsc = new ReplicationSSLConfig(); + assertEmpty(defRsc.getSSLHostVerifierParams()); + + /* property constructor initializes to "" */ + Properties props = new Properties(); + ReplicationSSLConfig empRsc = new ReplicationSSLConfig(props); + assertEmpty(empRsc.getSSLHostVerifierParams()); + + final String dummyParams = "xyz"; + + /* Use property constructor to set to a value */ + props.setProperty(SSL_HOST_VERIFIER_PARAMS, dummyParams); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(props); + assertEquals(rsc.getSSLHostVerifierParams(), dummyParams); + + /* Make sure we can clear it */ + rsc.setSSLHostVerifierParams(""); + assertEmpty(rsc.getSSLHostVerifierParams()); + + /* Make sure we can set it */ + rsc.setSSLHostVerifierParams(dummyParams); + assertEquals(rsc.getSSLHostVerifierParams(), dummyParams); + + } + + @Test + public void testSSLHostVerifierConflict() { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLHostVerifier("dnmatch(foo)"); + rsc.setSSLHostVerifierClass(SSLMirrorHostVerifier.class.getName()); + + /* Make sure that conflict is detected */ + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSetConfigParam() { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + assertEmpty(rsc.getSSLHostVerifierParams()); + + final String dummyParams = "xyz"; + rsc.setConfigParam(SSL_HOST_VERIFIER_PARAMS, dummyParams); + assertEquals(rsc.getSSLHostVerifierParams(), dummyParams); + + rsc.setConfigParam(SSL_HOST_VERIFIER_PARAMS, ""); + assertEmpty(rsc.getSSLHostVerifierParams()); + } + + @Test + public void testBasicFactory() { + + ReplicationNetworkConfig rnc = new ReplicationBasicConfig(); + DataChannelFactory factory = + DataChannelFactoryBuilder.construct(rnc); + assertTrue(factory instanceof SimpleChannelFactory); + } + + @Test + public void testBasicFactoryDefault() { + + ReplicationNetworkConfig rnc = ReplicationNetworkConfig.createDefault(); + DataChannelFactory factory = + DataChannelFactoryBuilder.construct(rnc); + assertTrue(factory instanceof SimpleChannelFactory); + } + + @Test + public void testSSLFactory() { + + ReplicationNetworkConfig rnc = new ReplicationSSLConfig(); + DataChannelFactory factory = + DataChannelFactoryBuilder.construct(rnc); + assertTrue(factory instanceof SSLChannelFactory); + } + + @Test + public void testSSLConfigNoKSPW() { + + /* Keystore without a password */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadKSPW() { + + /* Keystore with the wrong password */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty( + SSL_KEYSTORE_PASSWORD) + "XXX"); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigKSPWSourceNoCtor() { + + /* + * password source class doesn't have a ctor with the expected + * signature. + */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePasswordClass(String.class.getName()); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("Expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigKSPWSourceNotImplemented() { + + /* + * password source class doesn't implement PasswordSource + */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePasswordClass(DummyFactory.class.getName()); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("Expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadKSNotExist() { + + /* Keystore does not exist */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore("/tmp/ThisFileShouldNotExist"); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadKSNotKS() throws IOException { + + /* "Keystore" is not a keystore */ + + File bogusKS = makeBogusKeyStore(); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(bogusKS.getPath()); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadTSNotExist() { + + /* Keystore does not exist */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLTrustStore("/tmp/ThisFileShouldNotExist"); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadTSNotTS() throws IOException { + + /* "truststore" is not a truststore */ + + File bogusTS = makeBogusKeyStore(); + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLTrustStore(bogusTS.getPath()); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadCiphers() { + + /* No valid cipher suites */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLCipherSuites("BlackMagic"); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testSSLConfigBadProtocols() { + + /* No valid protocols */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + rsc.setSSLKeyStore(stdProps.getProperty(SSL_KEYSTORE_FILE)); + rsc.setSSLKeyStorePassword(stdProps.getProperty(SSL_KEYSTORE_PASSWORD)); + rsc.setSSLCipherSuites("TLSv9"); + + try { + DataChannelFactoryBuilder.construct(rsc); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testCustomFactory() { + + ReplicationBasicConfig rbc = new ReplicationBasicConfig(); + final String constructParams = "abc"; + + rbc.setChannelFactoryClass(DummyFactory.class.getName()); + rbc.setChannelFactoryParams(constructParams); + + DataChannelFactory factory = DataChannelFactoryBuilder.construct(rbc); + DummyFactory constructFactory = (DummyFactory) factory; + assertEquals(constructFactory.getParams(), constructParams); + } + + @Test + public void testBadCustomFactoryNoCtor() { + + ReplicationBasicConfig rbc = new ReplicationBasicConfig(); + + /* + * factory class doesn't have a ctor with the expected signature. + */ + rbc.setChannelFactoryClass(String.class.getName()); + + try { + DataChannelFactoryBuilder.construct(rbc); + fail("Expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testBadCustomFactoryNotImplemented() { + + ReplicationBasicConfig rbc = new ReplicationBasicConfig(); + + /* + * factory class doesn't implement DataChannelFactory + */ + rbc.setChannelFactoryClass(DummySource.class.getName()); + + try { + DataChannelFactoryBuilder.construct(rbc); + fail("Expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + public void testBasicClone() { + + /* build the initial config */ + ReplicationBasicConfig rbc = new ReplicationBasicConfig(); + final String constructParams = "abc"; + + /* set a representative sample of properties */ + rbc.setChannelFactoryClass(DummyFactory.class.getName()); + rbc.setChannelFactoryParams(constructParams); + DataChannelFactory factory = DataChannelFactoryBuilder.construct(rbc); + + /* make a clone */ + ReplicationBasicConfig copyRbc = rbc.clone(); + + /* clone should produce a distinct object */ + assertFalse(copyRbc == rbc); + + /* Check one of the properties */ + assertEquals(constructParams, copyRbc.getChannelFactoryParams()); + } + + public void testSSLClone() { + + /* build the initial config */ + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + final String constructParams = "abc"; + final String pass = "hello"; + PasswordSource pwSource = new DummySource(pass); + + /* set a representative sample of properties */ + rsc.setChannelFactoryClass(DummyFactory.class.getName()); + rsc.setChannelFactoryParams(constructParams); + rsc.setSSLKeyStorePasswordSource(pwSource); + DataChannelFactory factory = DataChannelFactoryBuilder.construct(rsc); + + /* make a clone */ + ReplicationSSLConfig copyRsc = rsc.clone(); + + /* clone should produce a distinct object */ + assertFalse(copyRsc == rsc); + + /* Check one of the properties */ + assertEquals(constructParams, copyRsc.getChannelFactoryParams()); + + /* The password source should be kept */ + assertTrue(copyRsc.getSSLKeyStorePasswordSource() == pwSource); + } + + public void testApplyRNP() { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + Properties props = new Properties(); + final String inputParams = "abc"; + + props.setProperty(SSL_HOST_VERIFIER_PARAMS, inputParams); + rsc.applyRepNetProperties(props); + String outputParams = rsc.getSSLHostVerifierParams(); + assertEquals(inputParams, outputParams); + } + + public void testApplyRNPReject() { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + Properties props = new Properties(); + final String inputParams = "abc"; + final String badProp = "not.a.property"; + final String repProp = ReplicationConfig.NODE_NAME; + + props.setProperty(SSL_HOST_VERIFIER_PARAMS, inputParams); + props.setProperty(badProp, badProp); + props.setProperty(repProp, repProp); + rsc.applyRepNetProperties(props); + + Properties rscProps = rsc.getProps(); + + String outputParams = rsc.getSSLHostVerifierParams(); + assertEquals(inputParams, outputParams); + assertNull(rscProps.getProperty(badProp)); + assertNull(rscProps.getProperty(repProp)); + } + + @Test + public void testSerializeBasic() + throws Throwable { + + ReplicationBasicConfig rbc = new ReplicationBasicConfig(); + final String constructParams = "abc"; + + /* set a representative sample of properties */ + rbc.setChannelFactoryClass(DummyFactory.class.getName()); + rbc.setChannelFactoryParams(constructParams); + + File envHome = SharedTestUtils.getTestDir(); + ReplicationBasicConfig newRbc = (ReplicationBasicConfig) + TestUtils.serializeAndReadObject(envHome, rbc); + + /* clone should produce a distinct object */ + assertFalse(newRbc == rbc); + + /* Check one of the properties */ + assertEquals(constructParams, newRbc.getChannelFactoryParams()); + } + + @Test + public void testSerializeSSL() + throws Throwable { + + ReplicationSSLConfig rsc = new ReplicationSSLConfig(); + final String constructParams = "abc"; + final String pass = "hello"; + PasswordSource pwSource = new DummySource(pass); + + /* set a representative sample of properties */ + rsc.setChannelFactoryClass(DummyFactory.class.getName()); + rsc.setChannelFactoryParams(constructParams); + rsc.setSSLKeyStorePasswordSource(pwSource); + + File envHome = SharedTestUtils.getTestDir(); + ReplicationSSLConfig newRsc = (ReplicationSSLConfig) + TestUtils.serializeAndReadObject(envHome, rsc); + + /* clone should produce a distinct object */ + assertFalse(newRsc == rsc); + + /* Check one of the properties */ + assertEquals(constructParams, newRsc.getChannelFactoryParams()); + + /* The password source should be discarded */ + assertNull(newRsc.getSSLKeyStorePasswordSource()); + } + + File makeBogusKeyStore() throws IOException { + final File testDir = SharedTestUtils.getTestDir(); + final File bogusKS = new File(testDir.getPath(), "NotAKeyStore"); + final FileOutputStream fos = new FileOutputStream(bogusKS); + final byte[] someData = new byte[1000]; + fos.write(someData); + fos.close(); + return bogusKS; + } + + + public static class DummyFactory implements DataChannelFactory { + private final String params; + + public DummyFactory(String param) { + this.params = param; + } + + public DummyFactory(InstanceParams params) { + this.params = params.getClassParams(); + } + + public String getParams() { + return params; + } + + @Override + public DataChannel acceptChannel(SocketChannel socketChannel) { + return null; + } + + @Override + public DataChannel connect(InetSocketAddress addr, + ConnectOptions connectOptions) { + return null; + } + } + + public static class DummySource implements PasswordSource { + private final String password; + + public DummySource(String pass) { + password = pass; + } + + public DummySource(InstanceParams params) { + password = params.getClassParams(); + } + + public String getPasswordString() { + return password; + } + + @Override + public char[] getPassword() { + return password.toCharArray(); + } + } + + private void assertEmpty(String value) { + assertTrue("".equals(value)); + } +} diff --git a/test/com/sleepycat/je/rep/ReplicationRateStatsTest.java b/test/com/sleepycat/je/rep/ReplicationRateStatsTest.java new file mode 100644 index 0000000..2670316 --- /dev/null +++ b/test/com/sleepycat/je/rep/ReplicationRateStatsTest.java @@ -0,0 +1,535 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.je.rep.NoConsistencyRequiredPolicy.NO_CONSISTENCY; +import static com.sleepycat.util.test.GreaterThan.greaterThan; +import static org.hamcrest.CoreMatchers.not; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.hamcrest.Matcher; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.TestHookAdapter; + +/** Test statistics that are used to measure replication rates. */ +public class ReplicationRateStatsTest extends RepTestBase { + + private UpdateThread updateThread; + private RepEnvInfo masterInfo; + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + + /* + * Use a shorter interval for collecting statistics, to better test any + * anomalies that might introduce and to make it easier to audit the + * output + */ + for (RepEnvInfo i : repEnvInfo) { + i.getEnvConfig().setConfigParam( + EnvironmentConfig.STATS_COLLECT_INTERVAL, "5 s"); + } + } + + @Override + @After + public void tearDown() + throws Exception { + + Replica.setInitialReplayHook(null); + if (updateThread != null) { + updateThread.shutdown(); + } + super.tearDown(); + } + + /** The list of all test stages. */ + final List stages = new ArrayList<>(); + + /* Define test stages */ + + /** + * Reach a steady master VLSN rate of 100 VLSNs/second. Note that even + * though we are generating VLSNs at this rate in the application, the rate + * will sometimes be higher because of meta data VLSNs. + */ + final Stage STARTUP = new Stage("STARTUP") { + @Override + void init() { + createGroup(); + assertEquals(State.MASTER, repEnvInfo[0].getEnv().getState()); + masterInfo = findMaster(repEnvInfo); + assertSame(repEnvInfo[0], masterInfo); + updateThread = new UpdateThread(masterInfo.getEnv()); + updateThread.start(); + } + + /** Wait for node 2's VLSN rate to reach 100 VLSNs/second. */ + @Override + boolean nextIsLast(int i) { + ReplicatedEnvironmentStats stats = + masterInfo.getEnv().getRepStats(StatsConfig.DEFAULT); + Long node2VLSNRate = stats.getReplicaVLSNRateMap().get("Node2"); + return (node2VLSNRate != null) && + (node2VLSNRate > 80*60) && + (node2VLSNRate < 120*60); + } + }; + + /** Run at the 100 VLSNs/second rate. */ + final Stage ALL = new Stage("ALL") { + @Override + boolean nextIsLast(int i) { return i == 4; } + }; + + /** Close Node 2. */ + final Stage NODE2_CLOSED = new Stage("NODE2_CLOSED") { + @Override + void init() { repEnvInfo[1].closeEnv(); } + @Override + boolean nextIsLast(int i) { return i == 4; } + }; + + /** Open Node 2, inserting a delay, and let it catch up. */ + final Stage NODE2_CATCHUP = new Stage("NODE2_CATCHUP") { + + /** Reopen node 2 with a delay */ + @Override + void init() { + Replica.setInitialReplayHook(new DelayHook(100, 100)); + repEnvInfo[1].openEnv(NO_CONSISTENCY); + } + + /** Wait for node 2's VLSN rate to reach 100 VLSNs/second */ + @Override + boolean nextIsLast(int i) { + ReplicatedEnvironmentStats stats = + masterInfo.getEnv().getRepStats(StatsConfig.DEFAULT); + Long node2VLSNRate = stats.getReplicaVLSNRateMap().get("Node2"); + return (node2VLSNRate != null) && + (node2VLSNRate > 80*60) && + (node2VLSNRate < 120*60); + } + }; + + /** Increase Node 2's delay. */ + final Stage NODE2_SLOWDOWN = new Stage("NODE2_SLOWDOWN") { + @Override + void init() { + Replica.setInitialReplayHook(new DelayHook(1000, 100)); + } + }; + + /** Reduce Node 2's delay and let it catch up. */ + final Stage NODE2_CATCHUP2 = new Stage("NODE2_CATCHUP2") { + @Override + void init() { + Replica.setInitialReplayHook(new DelayHook(100, 100)); + } + + /** Wait for Node 2's VLSN rate to reach 100 VLSNs/second. */ + @Override + boolean nextIsLast(int i) { + ReplicatedEnvironmentStats stats = + masterInfo.getEnv().getRepStats(StatsConfig.DEFAULT); + Long node2VLSNRate = stats.getReplicaVLSNRateMap().get("Node2"); + return (node2VLSNRate != null) && + (node2VLSNRate > 80*60) && + (node2VLSNRate < 120*60); + } + }; + + /** + * Stop updates, and clear stats after shutting down the update thread, to + * make sure that the VLSN rate will be zero when measured next. + */ + final Stage NO_UPDATES = new Stage("NO_UPDATES") { + @Override + void init() throws InterruptedException { + updateThread.shutdown(); + masterInfo.getEnv().getRepStats(StatsConfig.CLEAR); + } + }; + + /** A test stage */ + class Stage { + private final String name; + Stage(String name) { + this.name = name; + stages.add(this); + } + + /** Run the test */ + void run() throws InterruptedException { + logger.fine(this + ": Start"); + init(); + boolean nextIsLast = false; + int i; + + /* Give up after 60 iterations or seconds */ + for (i = 1; i <= 60; i++) { + boolean last = nextIsLast; + Thread.sleep(1000); + if (runIter(i, last)) { + nextIsLast = true; + } + if (last) { + logger.info(this + ": Complete after " + i + + " iterations"); + return; + } + } + fail(this + " was not complete after " + i + " iterations"); + } + + /** Before running the test */ + void init() throws InterruptedException { } + + /** + * Run an iteration of the test and return if the next iteration should + * be the last one. + */ + boolean runIter(int i, boolean last) { + for (RepEnvInfo info : repEnvInfo) { + ReplicatedEnvironment env = info.getEnv(); + if (env == null) { + continue; + } + ReplicatedEnvironmentStats stats = env.getRepStats( + last ? StatsConfig.CLEAR : StatsConfig.DEFAULT); + ReplicationRateStatsTest.this.checkStats( + this, env, stats, i); + } + return last || nextIsLast(i); + } + + /** Should the next iteration be the last one? */ + boolean nextIsLast(int i) { + return i == 9; + } + + @Override + public String toString() { + return "Stage " + name; + } + } + + /** Run the various test stages and check statistics. */ + @Test + public void testStats() + throws Exception { + + for (Stage stage : stages) { + stage.run(); + } + } + + private static class DelayHook extends TestHookAdapter { + private final long delay; + private final int every; + private int count; + DelayHook(long delay, int every) { + this.delay = delay; + this.every = every; + } + @Override + public void doHook(Message m) { + if (count > 0) { + count--; + return; + } + count = every; + if (delay > 0) { + try { + Thread.sleep(delay); + } catch (InterruptedException ignored) { + } + } + } + } + + /** Check statistics for the specified stage. */ + private void checkStats(Stage stage, + ReplicatedEnvironment env, + ReplicatedEnvironmentStats stats, + int i) { + + long lastCommitVLSN = stats.getLastCommitVLSN(); + long lastCommitTimestamp = stats.getLastCommitTimestamp(); + long vlsnRate = stats.getVLSNRate(); + Map replicaDelayMap = stats.getReplicaDelayMap(); + Map replicaLastCommitTimestampMap = + stats.getReplicaLastCommitTimestampMap(); + Map replicaLastCommitVLSNMap = + stats.getReplicaLastCommitVLSNMap(); + Map replicaVLSNLagMap = stats.getReplicaVLSNLagMap(); + Map replicaVLSNRateMap = + stats.getReplicaVLSNRateMap(); + + String iter = stage + "(iter=" + i + ")"; + + /* None of these stats should have contents for a non-master */ + if (!env.getState().isMaster()) { + assertEquals("lastCommitVLSN", 0, lastCommitVLSN, 0); + assertEquals("lastCommitTimestamp", 0, lastCommitTimestamp, 0); + assertEquals("vlsnRate", 0, vlsnRate, 0); + assertIsEmpty("replicaDelayMap", replicaDelayMap); + assertIsEmpty("replicaLastCommitTimestampMap", + replicaLastCommitTimestampMap); + assertIsEmpty("replicaLastCommitVLSNMap", + replicaLastCommitVLSNMap); + assertIsEmpty("replicaVLSNLagMap", replicaVLSNLagMap); + assertIsEmpty("replicaVLSNRateMap", replicaVLSNRateMap); + return; + } + + logger.fine( + String.format( + "Replication stats for master: %s\n" + + " lastCommitVLSN: %d\n" + + " lastCommitTimestamp: %d\n" + + " vlsnRate: %d\n" + + " replicaDelayMap: %s\n" + + " replicaLastCommitTimestampMap: %s\n" + + " replicaLastCommitVLSNMap: %s\n" + + " replicaVLSNLagMap: %s\n" + + " replicaVLSNRateMap: %s", + iter, + lastCommitVLSN, + lastCommitTimestamp, + vlsnRate, + formatLongStats(replicaDelayMap), + formatLongStats(replicaLastCommitTimestampMap), + formatLongStats(replicaLastCommitVLSNMap), + formatLongStats(replicaVLSNLagMap), + formatLongStats(replicaVLSNRateMap))); + + assertThat(iter + ": lastCommitVLSN", lastCommitVLSN, greaterThan(0)); + + assertThat(iter + ": lastCommitTimestamp", lastCommitTimestamp, + greaterThan(0)); + + if (stage == NO_UPDATES) { + checkEquals(iter + ": vlsnRate", 0, vlsnRate, 0); + } else if (stage != STARTUP) { + checkEquals(iter + ": vlsnRate", 100*60, vlsnRate, 20*60); + } + + if (stage == NODE2_CLOSED) { + + /* No entries for Node 2 when it is closed */ + assertFalse(iter + ": replicaDelayMap contains entry for" + + " closed Node2", + replicaDelayMap.containsKey("Node2")); + assertFalse(iter + ": replicaLastCommitTimestampMap contains" + + " entry for closed Node2", + replicaLastCommitTimestampMap.containsKey("Node2")); + assertFalse(iter + ": replicaLastCommitVLSNMap contains entry" + + " for closed Node2", + replicaLastCommitVLSNMap.containsKey("Node2")); + assertFalse(iter + ": replicaVLSNLagMap contains entry for" + + " closed Node2", + replicaVLSNLagMap.containsKey("Node2")); + assertFalse(iter + ": replicaVLSNRateMap contains entry for" + + " closed Node2", + replicaVLSNRateMap.containsKey("Node2")); + } + + assertThatAllValues(iter + ": replicaLastCommitVLSNMap", + replicaLastCommitVLSNMap, + greaterThan(0)); + + /* + * Note that the master stats are collected after the replica ones + * so the order of collection should not result in the master VLSNs + * or timestamps being earlier than the replica ones + */ + assertThatAllValues(iter + ": replicaLastCommitVLSNMap", + replicaLastCommitVLSNMap, + not(greaterThan(lastCommitVLSN))); + assertThatAllValues(iter + ": replicaLastCommitTimestampMap", + replicaLastCommitTimestampMap, greaterThan(0)); + assertThatAllValues(iter + ": replicaLastCommitTimestampMap", + replicaLastCommitTimestampMap, + not(greaterThan(lastCommitTimestamp))); + + if (stage == NO_UPDATES) { + for (Entry e : replicaVLSNRateMap.entrySet()) { + checkEquals(iter + ": replicaVLSNRateMap " + e.getKey(), + 0, e.getValue(), 20*60); + } + } else if ((stage != NODE2_CATCHUP) && + (stage != NODE2_CATCHUP2)) { + for (Entry e : replicaVLSNRateMap.entrySet()) { + checkEquals(iter + ": replicaVLSNRateMap " + e.getKey(), + 100*60, e.getValue(), 20*60); + } + } + } + + private static void assertIsEmpty(String msg, Map map) { + if (!map.isEmpty()) { + fail(msg + " not empty: " + map); + } + } + + private static void assertThatAllValues( + String mapName, Map map, Matcher matcher) { + for (Entry entry : map.entrySet()) { + assertThat(mapName + ", key " + entry.getKey(), + entry.getValue(), matcher); + } + } + + /** + * Like the assertion check, but only logs if the value was not expected. + * Use this for checks for typical values where the check is not dependable + * enough to be used as a success criterion. + */ + private void checkEquals(String msg, + double expected, + double actual, + double delta) { + if ((actual < (expected - delta)) || (actual > (expected + delta))) { + logger.warning(msg + " expected:<" + expected + ">, was:<" + + actual + ">"); + } + } + + private static String formatLongStats(Map map) { + if (map.isEmpty()) { + return "empty"; + } + final StringBuilder sb = new StringBuilder(); + boolean first = true; + for (final Entry entry : map.entrySet()) { + if (!first) { + sb.append(", "); + } else { + first = false; + } + sb.append(entry); + } + return sb.toString(); + } + + /** Create 100 VLSNs per second */ + private class UpdateThread extends Thread { + private final long period = 100; + private final Environment env; + private volatile boolean shutdown; + volatile Throwable exception; + + UpdateThread(Environment env) { + this.env = env; + } + + void shutdown() + throws InterruptedException { + + shutdown = true; + join(1000); + if (exception != null) { + throw new RuntimeException("Unexpected exception: " + exception, + exception); + } + assertFalse("isAlive", isAlive()); + } + + public void run() { + final Database db; + Transaction txn1 = env.beginTransaction(null, null); + try { + db = env.openDatabase(txn1, TEST_DB_NAME, dbconfig); + txn1.commit(); + txn1 = null; + } finally { + if (txn1 != null) { + txn1.abort(); + } + } + try { + TransactionConfig tc = new TransactionConfig().setDurability( + Durability.COMMIT_NO_SYNC); + long next = System.currentTimeMillis() + period; + while (!shutdown) { + int k = 0; + for (int i = 0; i < 5; i++) { + Transaction txn = env.beginTransaction(null, tc); + try { + for (int j = 0; j < 1; j++) { + IntegerBinding.intToEntry(k, key); + LongBinding.longToEntry(k, data); + db.put(txn, key, data); + k++; + } + txn.commit(); + txn = null; + } finally { + if (txn != null) { + txn.abort(); + } + } + } + final long wait = next - System.currentTimeMillis(); + next += period; + if (wait > 0) { + Thread.sleep(wait); + } + } + } catch (Throwable t) { + exception = t; + } finally { + db.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/SecondaryNodeTest.java b/test/com/sleepycat/je/rep/SecondaryNodeTest.java new file mode 100644 index 0000000..bd24d03 --- /dev/null +++ b/test/com/sleepycat/je/rep/SecondaryNodeTest.java @@ -0,0 +1,515 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import static com.sleepycat.je.rep.impl.RepParams.COMMIT_TO_NETWORK; +import static com.sleepycat.je.rep.impl.RepParams.TEST_JE_VERSION; +import static com.sleepycat.je.rep.impl.RepParams.TEST_REPLICA_DELAY; + +import java.util.Arrays; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.Feeder; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.TestHookAdapter; +import com.sleepycat.je.utilint.WaitTestHook; + +/** Specific tests for secondary nodes. */ +public class SecondaryNodeTest extends RepTestBase { + + public SecondaryNodeTest() { + + /* Use a smaller group size, since we'll be adding secondaries */ + groupSize = 3; + } + + /* Tests */ + + /** Add a secondary to a group, shutdown secondary, and restart. */ + @Test + public void testJoinLeaveJoin() + throws Exception { + + /* Create initial group */ + createGroup(); + + /* Add the secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + + /* Check that the node ID is assigned */ + int nodeId = secondaryInfo.getRepNode().getNodeId(); + assertTrue("Node ID should be non-negative: " + nodeId, nodeId > 0); + + /* Populate and wait for replication to complete */ + final ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + populateDB(masterRep, 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Close and reopen the secondary */ + secondaryInfo.closeEnv(); + secondaryInfo.openEnv(); + nodeId = secondaryInfo.getRepNode().getNodeId(); + assertTrue("Node ID should be non-negative: " + nodeId, nodeId > 0); + } + + /** Add a secondary to a group, then shutdown and restart all nodes. */ + @Test + public void testJoinLeaveAllJoinAll() + throws Exception { + + /* Create initial group */ + createGroup(); + + /* Add the secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + + /* Check that the node ID is assigned */ + int nodeId = secondaryInfo.getRepNode().getNodeId(); + assertTrue("Node ID should be non-negative: " + nodeId, nodeId > 0); + + /* Populate and wait for replication to complete */ + final ReplicatedEnvironment masterRep = repEnvInfo[0].getEnv(); + populateDB(masterRep, 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Close all members */ + closeNodes(repEnvInfo); + + /* + * Reopen all members, using a longer join wait time to allow the + * secondary to query the primaries a second time after the election is + * complete. See RepNode.MASTER_QUERY_INTERVAL. + */ + final long masterQueryInterval = 10000; + restartNodes(JOIN_WAIT_TIME + masterQueryInterval, repEnvInfo); + + nodeId = secondaryInfo.getRepNode().getNodeId(); + assertTrue("Node ID should be non-negative: " + nodeId, nodeId > 0); + } + + /** + * Add a secondary node to a group and force a new master, to test that the + * secondary node can successfully replicate with the new master. [#22980] + */ + @Test + public void testSecondaryChangeMaster() + throws Exception { + + /* Create initial group */ + createGroup(); + + /* Add secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + + /* Restart master */ + final RepEnvInfo masterInfo1 = findMasterWait(5000, repEnvInfo); + masterInfo1.closeEnv(); + masterInfo1.openEnv(); + + /* Find new master */ + final RepEnvInfo masterInfo2 = findMasterWait(5000, repEnvInfo); + assertTrue("Shouldn't have same master", masterInfo1 != masterInfo2); + + /* Write and await propagation */ + populateDB(masterInfo2.getEnv(), 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + } + + /** Test election and durability quorums. */ + @Test + public void testQuorums() + throws Exception { + + /* Create initial group with three members */ + createGroup(3); + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + + /* + * Close one primary and confirm that quorum is maintained. Note that + * 2/3 is a majority if the secondary is not counted, but 2/4 is not, + * so closing one primary should confirm that the secondary is not + * being included in the quorum count. + */ + repEnvInfo[0].closeEnv(); + ReplicatedEnvironment master = + findMasterAndWaitForReplicas(5000, 1, repEnvInfo).getEnv(); + logger.info("2/3 plus secondary, master " + master.getNodeName()); + populateDB(master, TEST_DB_NAME, 0, 100, + new TransactionConfig().setDurability( + RepTestUtils.DEFAULT_DURABILITY)); + + /* Close the secondary and confirm again. */ + secondaryInfo.closeEnv(); + master = findMasterWait(5000, repEnvInfo).getEnv(); + logger.info("2/3 without secondary, master " + master.getNodeName()); + populateDB(master, TEST_DB_NAME, 100, 100, + new TransactionConfig().setDurability( + RepTestUtils.DEFAULT_DURABILITY)); + + /* + * Open the secondary and close another primary to confirm that the + * secondary is not being counted. + */ + secondaryInfo.openEnv(); + repEnvInfo[1].closeEnv(); + master = findMasterWait(5000, repEnvInfo).getEnv(); + logger.info("1/3 with secondary, master " + master.getNodeName()); + try { + populateDB(master, TEST_DB_NAME, 200, 100, + new TransactionConfig().setDurability( + RepTestUtils.DEFAULT_DURABILITY)); + fail("Expected exception"); + } catch (InsufficientReplicasException e) { + logger.info("Got expected exception: " + e); + } + + /* + * Reopen the last closed primary but introduce a long delay for + * acknowledgments so that its acknowledgment isn't counted, to + * confirm that the secondary is not providing an acknowledgment. + */ + repEnvInfo[1].getRepConfig().setConfigParam( + TEST_REPLICA_DELAY.getName(), "10000"); + repEnvInfo[1].openEnv(); + master = findMasterWait(5000, repEnvInfo).getEnv(); + logger.info("2/3 with delay and secondary, master " + + master.getNodeName()); + try { + populateDB(master, TEST_DB_NAME, 300, 100, + new TransactionConfig().setDurability( + RepTestUtils.DEFAULT_DURABILITY)); + fail("Expected exception"); + } catch (InsufficientAcksException e) { + logger.info("Got expected exception: " + e); + } + repEnvInfo[1].getRepConfig().setConfigParam( + TEST_REPLICA_DELAY.getName(), "0"); + + /* + * Close and restart using commitToNetwork, to test that separate code + * path. + */ + closeNodes(repEnvInfo); + for (final RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + COMMIT_TO_NETWORK.getName(), "true"); + } + restartNodes(repEnvInfo[1], repEnvInfo[2]); + RepTestUtils.syncGroupToLastCommit( + new RepEnvInfo[] { repEnvInfo[1], repEnvInfo[2] }, 2); + secondaryInfo.openEnv(); + RepEnvInfo masterInfo = findMasterWait(5000, repEnvInfo); + master = masterInfo.getEnv(); + RepEnvInfo replicaInfo = (masterInfo == repEnvInfo[1]) ? + repEnvInfo[2] : + repEnvInfo[1]; + Feeder replicaFeeder = masterInfo.getRepNode().feederManager(). + getFeeder(replicaInfo.getEnv().getNodeName()); + WaitTestHook writeMessageHook = new WaitTestHook(); + logger.info("2/3 with commitToNetwork, delay, and secondary, master " + + master.getNodeName()); + try { + replicaFeeder.setWriteMessageHook(writeMessageHook); + populateDB(master, TEST_DB_NAME, 300, 100, + new TransactionConfig().setDurability( + RepTestUtils.DEFAULT_DURABILITY)); + fail("Expected exception"); + } catch (InsufficientAcksException e) { + logger.info("Got expected exception: " + e); + } finally { + writeMessageHook.stopWaiting(); + replicaFeeder.setWriteMessageHook(null); + } + for (final RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + COMMIT_TO_NETWORK.getName(), "false"); + } + + /* + * Close and restart primaries, and test operations with quorum=ALL to + * make sure it does not include the secondary. + */ + closeNodes(repEnvInfo); + final RepEnvInfo[] primaries = Arrays.copyOf(repEnvInfo, groupSize); + for (final RepEnvInfo primary : primaries) { + primary.setInitialElectionPolicy(QuorumPolicy.ALL); + } + master = restartNodes(primaries).getEnv(); + logger.info("ALL without secondary, master " + master.getNodeName()); + populateDB(master, TEST_DB_NAME, 0, 100, + new TransactionConfig().setDurability( + RepTestUtils.SYNC_SYNC_ALL_DURABILITY)); + } + + /** + * Test that a lagging secondary node triggers the N_MAX_REPLICA_LAG_NAME + * statistic. + */ + @Test + public void testSecondaryLag() + throws Exception { + + /* Create group with secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + createGroup(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Clear and discard stats. */ + final RepEnvInfo masterInfo = findMaster(repEnvInfo); + final StatsConfig statsConfig = new StatsConfig().setClear(true); + masterInfo.getEnv().getRepStats(statsConfig); + + /* Configure a message delay for the secondary */ + final String secondaryName = secondaryInfo.getEnv().getNodeName(); + final Feeder secondaryFeeder = + masterInfo.getRepNode().feederManager().getFeeder(secondaryName); + try { + secondaryFeeder.setWriteMessageHook( + new TestHookAdapter() { + @Override + public void doHook(Message msg) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + } + } + }); + + /* Populate */ + populateDB(masterInfo.getEnv(), 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + } finally { + secondaryFeeder.setWriteMessageHook(null); + } + + /* Check stats */ + final ReplicatedEnvironmentStats stats = + masterInfo.getEnv().getRepStats(statsConfig); + assertEquals("Secondary node should be the slowest", + secondaryName, stats.getNMaxReplicaLagName()); + } + + /** + * Test setting ENV_UNKNOWN_STATE_TIMEOUT to permit reading from a + * disconnected secondary. + */ + @Test + public void testReadDisconnectedSecondary() + throws Exception { + + /* Create group with secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + createGroup(); + + /* Populate */ + final ReplicatedEnvironment master = findMaster(repEnvInfo).getEnv(); + populateDB(master, 100); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Close all nodes */ + closeNodes(repEnvInfo); + + /* Start up secondary in unknown state */ + secondaryInfo.getRepConfig() + .setConfigParam(ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, + "200 ms"); + secondaryInfo.openEnv(); + + /* Read from secondary */ + readDB(secondaryInfo.getEnv(), 100); + } + + /** Convert between ELECTABLE and SECONDARY node types. */ + @Test + public void testConvertNodeType() + throws Exception { + + createGroup(); + + /* Convert ELECTABLE to SECONDARY */ + repEnvInfo[1].closeEnv(); + repEnvInfo[1].getRepConfig().setNodeType(NodeType.SECONDARY); + try { + repEnvInfo[1].openEnv(); + fail("Convert ELECTABLE to SECONDARY should throw" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Convert ELECTABLE to SECONDARY: " + e); + } + + /* Convert SECONDARY to ELECTABLE. */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + secondaryInfo.closeEnv(); + secondaryInfo.getRepConfig().setNodeType(NodeType.ELECTABLE); + + /* Should succeed */ + secondaryInfo.openEnv(); + final ReplicationGroup group = secondaryInfo.getEnv().getGroup(); + assertEquals("Group size", + repEnvInfo.length, + group.getElectableNodes().size()); + } + + /** + * Test creating a secondary when not all replicas are up to the current + * version, or they are offline. + */ + @Test + public void testCheckCompatible() + throws Exception { + + /* Start replicas using old version that doesn't support secondaries */ + final JEVersion rgV3 = RepGroupImpl.FORMAT_VERSION_3_JE_VERSION; + final JEVersion oldVersion = + new JEVersion(String.format("%d.%d.%d", + rgV3.getMajor(), + rgV3.getMinor(), + rgV3.getPatch() - 1)); + final RepEnvInfo secondary; + setJEVersion(oldVersion, repEnvInfo); + + createGroup(); + + /* Add secondary configuration */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + secondary = repEnvInfo[repEnvInfo.length - 1]; + secondary.getRepConfig().setNodeType(NodeType.SECONDARY); + + /* Attempt to create with all nodes up */ + try { + secondary.openEnv(); + fail("Create secondary before upgrade should fail with" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Create secondary before upgrade: " + e); + } + + /* + * Shutdown nodes, and bring up a quorum, which should upgrade to the + * latest version, but need all nodes upgraded for this to succeed. + */ + closeNodes(repEnvInfo); + + setJEVersion(rgV3, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1]); + try { + secondary.openEnv(); + fail("Create secondary after partial upgrade should fail with" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Create secondary after partial upgrade: " + e); + } + + /* Bring up the remaining node */ + restartNodes(repEnvInfo[2]); + + /* Bring up the secondary */ + setJEVersion(rgV3, secondary); + secondary.openEnv(); + } + + /** Test that a secondary node can be a helper. */ + @Test + public void testHelper() + throws Exception { + + /* Create initial group */ + createGroup(); + + /* Add a secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + + /* Use the secondary as a helper when creating an electable node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo electableInfo = repEnvInfo[repEnvInfo.length - 1]; + electableInfo.openEnv(secondaryInfo); + + /* Use the secondary as a helper when creating another secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo2 = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo2.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo2.openEnv(secondaryInfo); + } + + /** + * Test that it is possible to add a secondary node to a replication group + * that has lost quorum. + */ + @Test + public void testAddSecondaryWithNonAuthoritativeMaster() + throws Exception { + + /* Create initial group */ + createGroup(); + + /* Lose quorum */ + repEnvInfo[1].closeEnv(); + repEnvInfo[2].closeEnv(); + + /* Add a secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + } + + /* Utilities */ + + /** Set the JE version for the specified nodes. */ + private void setJEVersion(final JEVersion jeVersion, + final RepEnvInfo... nodes) { + for (final RepEnvInfo node : nodes) { + node.getRepConfig().setConfigParam( + TEST_JE_VERSION.getName(), jeVersion.toString()); + } + } +} diff --git a/test/com/sleepycat/je/rep/SerializationTest.java b/test/com/sleepycat/je/rep/SerializationTest.java new file mode 100644 index 0000000..bad155e --- /dev/null +++ b/test/com/sleepycat/je/rep/SerializationTest.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.fail; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.NotSerializableException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Map; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.serializecompatibility.SerializeUtils; + +/** + * Verify that all classes marked as being serializable, can be serialized and + * deserialized. + */ +public class SerializationTest { + + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + /** + * Verifies that the clases identified by SerializeUtils.getSerializedSet() + * can be serialized and deserialized. + * + * The test does not currently verify that structural equality is preserved + * across serialization/deserialization. + */ + @Test + public void test() + throws IOException, ClassNotFoundException { + + for (Map.Entry entry : + SerializeUtils.getSerializedSet().entrySet()) { + final String className = entry.getKey(); + + try { + final ByteArrayOutputStream baos = + new ByteArrayOutputStream(1024); + final ObjectOutputStream out = new ObjectOutputStream(baos); + final Object o1 = entry.getValue(); + out.writeObject(o1); + out.close(); + + final ByteArrayInputStream bais = + new ByteArrayInputStream(baos.toByteArray()); + final ObjectInputStream in = new ObjectInputStream(bais); + @SuppressWarnings("unused") + Object o2 = in.readObject(); + in.close(); + + // Equality checking -- a future SR + } catch (NotSerializableException nse) { + nse.printStackTrace(System.err); + fail(className + " " + nse.getMessage()); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/StateChangeListenerTest.java b/test/com/sleepycat/je/rep/StateChangeListenerTest.java new file mode 100644 index 0000000..ba4d2a1 --- /dev/null +++ b/test/com/sleepycat/je/rep/StateChangeListenerTest.java @@ -0,0 +1,301 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.DETACHED; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; +import static java.util.logging.Level.INFO; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.utilint.Timestamp; + +public class StateChangeListenerTest extends RepTestBase { + + private volatile CountDownLatch listenerLatch = null; + + /* + * Verify that a ReplicaStateException is correctly associated with the + * state change event that established it as such. + */ + @Test + public void testEventIdentity() { + ReplicatedEnvironment rep0 = repEnvInfo[0].openEnv(); + rep0.setStateChangeListener(new PassiveListener(rep0)); + + ReplicatedEnvironment rep1 = repEnvInfo[1].openEnv(); + rep1.setStateChangeListener(new PassiveListener(rep1)); + assertTrue(rep1.getState().isReplica()); + try { + rep1.openDatabase(null,"db", dbconfig); + fail("expected exception"); + } catch (ReplicaWriteException e) { + final PassiveListener passiveListener = + (PassiveListener)rep1.getStateChangeListener(); + assertEquals(e.getEvent(), passiveListener.currentEvent); + } + } + + /* + * Verify that an exception leaking out of a listener invalidates the + * environment. + */ + @Test + public void testExceptionInStateChangeNotifier() { + ReplicatedEnvironment rep = repEnvInfo[0].openEnv(); + BadListener listener = new BadListener(); + try { + rep.setStateChangeListener(listener); + fail("Expected exception"); + } catch (EnvironmentFailureException e) { + assertTrue(e.getCause() instanceof NullPointerException); + assertTrue(!rep.isValid()); + } + repEnvInfo[0].closeEnv(); + } + + @Test + public void testListenerReplacement() { + ReplicatedEnvironment rep = repEnvInfo[0].openEnv(); + + final Listener listener1 = new Listener(rep); + rep.setStateChangeListener(listener1); + assertEquals(listener1, rep.getStateChangeListener()); + final Listener listener2 = new Listener(rep); + rep.setStateChangeListener(listener2); + assertEquals(listener2, rep.getStateChangeListener()); + repEnvInfo[0].closeEnv(); + } + + @Test + public void testBasic() + throws Exception { + List listeners = new LinkedList(); + + /* Verify that initial notification is always sent. */ + for (int i=0; i < repEnvInfo.length; i++) { + ReplicatedEnvironment rep = repEnvInfo[i].openEnv(); + State state = rep.getState(); + State expectedState = (i == 0) ? MASTER : REPLICA; + assertEquals(expectedState, state); + Listener listener = new Listener(rep); + listeners.add(listener); + rep.setStateChangeListener(listener); + /* Check that there was an immediate callback. */ + assertEquals(1, listener.events.size()); + StateChangeEvent event = listener.events.get(0); + assertEquals(expectedState, event.getState()); + assertEquals(repEnvInfo[0].getRepConfig().getNodeName(), + event.getMasterNodeName()); + listener.events.clear(); + } + + /* + * Verify that notifications are sent on master transitions. 2 + * transitions per node, except for the node being shutdown. + */ + listenerLatch = new CountDownLatch(repEnvInfo.length*2); + repEnvInfo[0].closeEnv(); + /* Wait 60s to ensure events can be delivered */ + awaitEvents(listenerLatch, 60, TimeUnit.SECONDS, + listeners.get(0).events, 2, UNKNOWN, DETACHED); + + int masterIndex = -1; + for (int i=1; i < repEnvInfo.length; i++) { + /* Verify state transitions: UNKNOWN [MASTER | REPLICA] */ + assertEquals(2, listeners.get(i).events.size()); + + final State handleState = repEnvInfo[i].getEnv().getState(); + assertEquals(UNKNOWN, listeners.get(i).events.get(0).getState()); + assertEquals(handleState, + listeners.get(i).events.get(1).getState()); + if (handleState == MASTER) { + masterIndex = i; + } + } + assertTrue(masterIndex > 0); + + /* Verify that notifications are sent on close. */ + for (int i=1; i < repEnvInfo.length; i++) { + listeners.get(i).events.clear(); + int numExpectedEvents = (masterIndex==i) ? 2 : 1; + listenerLatch = new CountDownLatch(numExpectedEvents); + repEnvInfo[i].closeEnv(); + /* Wait 60s to ensure events can be delivered */ + awaitEvents(listenerLatch, 60, TimeUnit.SECONDS, + listeners.get(i).events, numExpectedEvents); + } + } + + /** + * Test state changes when establishing a secondary node, having it lose + * contact with the master, and then shutting it down. + */ + @Test + public void testSecondary() + throws Exception { + + /* Set up environment with a secondary replica */ + ReplicatedEnvironment rep0 = repEnvInfo[0].openEnv(); + repEnvInfo[1].getRepConfig().setNodeType(NodeType.SECONDARY); + ReplicatedEnvironment rep1 = repEnvInfo[1].openEnv(); + + /* Listen for as many as three state events */ + listenerLatch = new CountDownLatch(3); + Listener listener = new Listener(rep1); + rep1.setStateChangeListener(listener); + + /* Close master, then replica */ + repEnvInfo[0].closeEnv(); + repEnvInfo[1].closeEnv(); + + /* Check expected states */ + /* Wait 60s to ensure events can be delivered */ + /* + * There should be either two or three events: REPLICA, UNKNOWN + * (optional), DETACHED. The UNKNOWN event is optional because it + * depends on the timing of closing the environments. It will be + * generated only if the secondary notices the loss of the master + * before it is closed down. + */ + listenerLatch.await(60, TimeUnit.SECONDS); + if (listener.events.size() == 2) { + awaitEvents(listenerLatch, 0, TimeUnit.SECONDS, + listener.events, 2, REPLICA, DETACHED); + } else { + awaitEvents(listenerLatch, 0, TimeUnit.SECONDS, + listener.events, 3, REPLICA, UNKNOWN, DETACHED); + } + } + + /** + * Assert that the count down latch reaches zero in the specified amount + * of time, and confirm that the expected number of events were delivered. + * If expectedStates are specified, check that the delivered events have + * the expected states. + */ + void awaitEvents(CountDownLatch latch, + long time, + TimeUnit timeUnit, + List events, + int numExpectedEvents, + State... expectedStates) + throws InterruptedException { + + final long start = System.currentTimeMillis(); + latch.await(time, timeUnit); + if (events.size() < numExpectedEvents) { + fail("Expected " + numExpectedEvents + " events, found " + + events.size() + ": " + describeEvents(events)); + } + + if ((expectedStates != null) && (expectedStates.length > 0)) { + assertEquals("Number of expected states", numExpectedEvents, + expectedStates.length); + for (int i = 0; i < numExpectedEvents; i++) { + if (!expectedStates[i].equals(events.get(i).getState())) { + fail("Expected event " + i + " state " + + expectedStates[i] + ", found " + + events.get(i).getState() + ", for events: " + + describeEvents(events)); + } + } + } + if (logger.isLoggable(INFO)) { + logger.info("Received awaited events" + + ", startTime: " + new Timestamp(start) + + ", events: " + describeEvents(events)); + } + } + + private String describeEvents(final List events) { + final StringBuilder sb = new StringBuilder(); + boolean first = true; + for (final StateChangeEvent event : events) { + if (!first) { + sb.append(", "); + } else { + first = false; + } + sb.append("StateChangeEvent["); + sb.append("state=").append(event.getState()); + sb.append(", eventTime="); + sb.append(new Timestamp(event.getEventTime())); + sb.append("]"); + } + return sb.toString(); + } + + class Listener implements StateChangeListener { + + final ReplicatedEnvironment rep; + final List events = + Collections.synchronizedList(new LinkedList()); + + public Listener(ReplicatedEnvironment rep) { + this.rep = rep; + } + + @Override + public void stateChange(StateChangeEvent stateChangeEvent) { + events.add(stateChangeEvent); + if (listenerLatch != null) { + listenerLatch.countDown(); + } + } + } + + /* Always throw an exception upon notification. */ + class BadListener implements StateChangeListener { + + @Override + public void stateChange + (@SuppressWarnings("unused") StateChangeEvent stateChangeEvent) { + + throw new NullPointerException("Test exception"); + } + } + + /** + * A passive listener that simply remembers the last event. + */ + class PassiveListener implements StateChangeListener { + + final ReplicatedEnvironment rep; + volatile StateChangeEvent currentEvent = null; + + public PassiveListener(ReplicatedEnvironment rep) { + this.rep = rep; + } + + @Override + public void stateChange(StateChangeEvent stateChangeEvent) { + currentEvent = stateChangeEvent; + } + } +} diff --git a/test/com/sleepycat/je/rep/StoredClassCatalogTest.java b/test/com/sleepycat/je/rep/StoredClassCatalogTest.java new file mode 100644 index 0000000..e68d6c3 --- /dev/null +++ b/test/com/sleepycat/je/rep/StoredClassCatalogTest.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.serial.StoredClassCatalog; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class StoredClassCatalogTest extends TestBase { + private static final String dbName = "catalogDb"; + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public StoredClassCatalogTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /* + * Test that opening the StoredClassCatalog on the replicas after the + * database used for store the ClassCatalog is created doesn't throw a + * ReplicaWriteException, see SR 18938. + */ + @Test + public void testOpenClassCatalogOnReplicas() + throws Exception { + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + Database catalogDb = master.openDatabase(null, dbName, dbConfig); + StoredClassCatalog catalog = new StoredClassCatalog(catalogDb); + + /* + * Sync the whole group to make sure the database has been created on + * the replicas. + */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Check we can open the catalog db on the replicas. */ + Database repCatalogDb = null; + try { + repCatalogDb = + repEnvInfo[1].getEnv().openDatabase(null, dbName, dbConfig); + } catch (Exception e) { + e.printStackTrace(); + fail("unexpected exception: " + e); + } + + /* Check no exceptions thrown while opening the StoredClassCatalog. */ + try { + catalog = new StoredClassCatalog(repCatalogDb); + } catch (Exception e) { + e.printStackTrace(); + fail("unexpected exception: " + e); + } + + catalogDb.close(); + repCatalogDb.close(); + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } +} diff --git a/test/com/sleepycat/je/rep/UnknownStateReplicaTest.java b/test/com/sleepycat/je/rep/UnknownStateReplicaTest.java new file mode 100644 index 0000000..454c607 --- /dev/null +++ b/test/com/sleepycat/je/rep/UnknownStateReplicaTest.java @@ -0,0 +1,182 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.ValidStateListener; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Check unknown state replica can serve read opertions. + */ +public class UnknownStateReplicaTest extends TestBase { + + private final File envRoot; + private static final String DB_NAME = "testDB"; + private RepEnvInfo[] repEnvInfo; + + public UnknownStateReplicaTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Check a replica works in Unknown state. + */ + @Test + public void testBasic() + throws Throwable { + + try { + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* Open a new database on replicas. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + Database db = master.openDatabase(null, DB_NAME, dbConfig); + + /* Insert some data. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 10; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo" + i, data); + db.put(null, key, data); + } + db.close(); + + /* Shutdown all the replicas. */ + for (int i = repEnvInfo.length - 1; i >= 0; i--) { + repEnvInfo[i].closeEnv(); + } + + /* + * Configure the replica work in Unknown state and use the + * NoConsistencyPolicy for read. + */ + repEnvInfo[0].getRepConfig().setConfigParam + (ReplicationConfig.ALLOW_UNKNOWN_STATE_ENV_OPEN, "true"); + repEnvInfo[0].getRepConfig().setConsistencyPolicy + (new NoConsistencyRequiredPolicy()); + + /* Reopen the replica, and make sure its state is Unknown. */ + repEnvInfo[0].openEnv(); + assertTrue(repEnvInfo[0].isUnknown()); + + /* Read the database to make sure the content is correct. */ + db = repEnvInfo[0].getEnv().openDatabase(null, DB_NAME, dbConfig); + for (int i = 1; i <= 10; i++) { + IntegerBinding.intToEntry(i, key); + db.get(null, key, data, null); + assertEquals + ("herococo" + i, StringBinding.entryToString(data)); + } + + /* + * Do some write and expect to see ReplicaWriteException, because + * it's not a master. + */ + try { + IntegerBinding.intToEntry(11, key); + StringBinding.stringToEntry("herococo11", data); + db.put(null, key, data); + fail("Expect to see exceptions."); + } catch (ReplicaWriteException e) { + /* Expected exceptions. */ + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + db.close(); + + /* Configure the state change listener to repEnvInfo[0]. */ + ValidStateListener stateListener = new ValidStateListener(); + repEnvInfo[0].getEnv().setStateChangeListener(stateListener); + + /* + * Open another replica and check to see whether the state has + * changed, because this is a three nodes group. + */ + repEnvInfo[1].openEnv(); + assertTrue(repEnvInfo[1].isMaster() || repEnvInfo[1].isReplica()); + + /* Make sure that the state of repEnvInfo[0] has changed. */ + stateListener.awaitValidState(); + assertTrue(repEnvInfo[0].isMaster() || repEnvInfo[0].isReplica()); + + /* Find out the current master. */ + int masterIndex = -1; + for (int i = 0; i <= 1; i++) { + if (repEnvInfo[i].isMaster()) { + masterIndex = i; + break; + } + } + assertTrue(masterIndex != -1); + + /* Open the database again and do some inserts. */ + master = repEnvInfo[masterIndex].getEnv(); + db = master.openDatabase(null, DB_NAME, dbConfig); + + for (int i = 11; i <= 20; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo" + i, data); + db.put(null, key, data); + } + db.close(); + + /* Open the rest replica and we're sure it should be a replica. */ + repEnvInfo[2].openEnv(); + assertTrue(repEnvInfo[2].isReplica()); + + /* + * Open the database on all replicas and make sure the database + * contents are correct. + */ + for (int i = 0; i < repEnvInfo.length; i++) { + db = repEnvInfo[i].getEnv().openDatabase + (null, DB_NAME, dbConfig); + for (int j = 1; j <= 20; j++) { + IntegerBinding.intToEntry(j, key); + db.get(null, key, data, null); + assertEquals + ("herococo" + j, StringBinding.entryToString(data)); + } + db.close(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } +} diff --git a/test/com/sleepycat/je/rep/UnresolvedHelperHostTest.java b/test/com/sleepycat/je/rep/UnresolvedHelperHostTest.java new file mode 100644 index 0000000..82d6abd --- /dev/null +++ b/test/com/sleepycat/je/rep/UnresolvedHelperHostTest.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep; + +import static com.sleepycat.je.rep.impl.RepParams.SKIP_HELPER_HOST_RESOLUTION; +import static org.hamcrest.core.IsNull.nullValue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; + +import java.io.File; +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.LocalAliasNameService; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Test using a replicated environment with a helper host whose host name + * becomes not resolvable in DNS. + */ +public class UnresolvedHelperHostTest extends RepTestBase { + + private static final String dnsHostPrefix = + "hostname-from-LocalAliasNameService-"; + private static final String originalSkipHelperHostResolution = + System.getProperty(SKIP_HELPER_HOST_RESOLUTION, "false"); + private int[] originalDNSCachePolicy; + + @BeforeClass + public static void setUpClass() { + + /* + * Print a message if skipping helper host resolution has been + * suppressed during testing, so that we can confirm that the + * non-default setting was in effect during testing + */ + if (Boolean.valueOf(originalSkipHelperHostResolution)) { + System.err.println(SKIP_HELPER_HOST_RESOLUTION + " = true"); + } + } + + @Override + @Before + public void setUp() + throws Exception { + + RepTestUtils.removeRepEnvironments(envRoot); + + originalDNSCachePolicy = LocalAliasNameService.setDNSCachePolicy(0, 0); + + /* Confirm that DNS providers were installed properly */ + assertEquals("sun.net.spi.nameservice.provider.1", + "default", + System.getProperty("sun.net.spi.nameservice.provider.1")); + assertEquals("sun.net.spi.nameservice.provider.2", + "dns,localalias", + System.getProperty("sun.net.spi.nameservice.provider.2")); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + LocalAliasNameService.clearAllAliases(); + + LocalAliasNameService.setDNSCachePolicy( + originalDNSCachePolicy[0], originalDNSCachePolicy[1]); + + System.setProperty(SKIP_HELPER_HOST_RESOLUTION, + originalSkipHelperHostResolution); + } + + /** + * Test starting nodes when one of their helper hosts has a hostname that + * is not resolvable. [#23120] + */ + @Test + public void testBasic() + throws Exception { + + try { + InetAddress.getByName("this-is-an-unknown-hostname"); + assumeThat("Skip when running on systems that resolve unknown" + + " hostnames", + nullValue()); + } catch (UnknownHostException e) { + } + + /* Start up the cluster */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + /* + * Set LOG_FILE_MAX so that RepTestUtils.setupEnvInfo does not disable + * parameter validation, since validation is what notices the + * unresolvable helper host + */ + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000"); + + File[] dirs = RepTestUtils.makeRepEnvDirs(envRoot, groupSize); + repEnvInfo = new RepEnvInfo[groupSize]; + for (int i = 0; i < groupSize; i++) { + EnvironmentConfig ec = envConfig.clone(); + ReplicationConfig rc = RepTestUtils.createRepConfig(i + 1); + + /* + * Use made-up DNS names for each node so we can test later what + * happens if we remove them from DNS + */ + String hostname = dnsHostPrefix + i; + LocalAliasNameService.addAlias(hostname); + String hostPort = rc.getNodeHostPort(); + rc.setNodeHostPort(hostPort.replaceAll("localhost", hostname)); + + repEnvInfo[i] = + RepTestUtils.setupEnvInfo(dirs[i], ec, rc, repEnvInfo[0]); + } + + /* Use the first two nodes as the helpers for the last three */ + String helpers = repEnvInfo[0].getRepConfig().getNodeHostPort() + + "," + + repEnvInfo[1].getRepConfig().getNodeHostPort(); + for (int i = 2; i < 5; i++) { + repEnvInfo[i].getRepConfig().setHelperHosts(helpers); + } + + RepTestUtils.joinGroup(repEnvInfo); + RepEnvInfo masterInfo = findMaster(repEnvInfo); + + /* Stop a replica that is a helper and remove its DNS name */ + int replicaIndex = 1; + RepEnvInfo replicaInfo = repEnvInfo[1]; + if (masterInfo == replicaInfo) { + replicaIndex = 0; + replicaInfo = repEnvInfo[0]; + } + String replicaName = dnsHostPrefix + replicaIndex; + replicaInfo.closeEnv(); + LocalAliasNameService.removeAlias(replicaName); + + /* Confirm that reopening the node whose DNS entry is missing fails */ + try { + replicaInfo.openEnv(); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Got expected exception: " + e); + } + + /* Stop the remaining nodes */ + RepEnvInfo[] remainingRepEnvInfo = new RepEnvInfo[groupSize - 1]; + int j = 0; + for (int i = 0; i < groupSize; i++) { + if (i != replicaIndex) { + remainingRepEnvInfo[j++] = repEnvInfo[i]; + } + } + RepTestUtils.shutdownRepEnvs(remainingRepEnvInfo); + + /* + * Check that nodes 2 through 4 cannot start by default because of + * hostname validation for their helper hosts + */ + System.setProperty(SKIP_HELPER_HOST_RESOLUTION, "false"); + for (int i = 2; i < 5; i++) { + try { + repEnvInfo[i].openEnv(); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Got expected exception: " + e); + } + } + + /* + * Set the system property to skip helper host resolution, and confirm + * that the group starts successfully + */ + System.setProperty(SKIP_HELPER_HOST_RESOLUTION, "true"); + RepTestUtils.restartGroup(remainingRepEnvInfo); + + /* + * Close the current master and wait for a new master to be chosen, to + * make sure that the bad hostname doesn't interfere with elections + */ + RepEnvInfo prevMasterInfo = findMasterWait(0, repEnvInfo); + prevMasterInfo.closeEnv(); + masterInfo = findMasterWait(30000, repEnvInfo); + assertFalse(prevMasterInfo + " should not equal " + masterInfo, + prevMasterInfo.equals(masterInfo)); + prevMasterInfo.openEnv(); + } +} diff --git a/test/com/sleepycat/je/rep/arb/ArbiterTest.java b/test/com/sleepycat/je/rep/arb/ArbiterTest.java new file mode 100644 index 0000000..c511a39 --- /dev/null +++ b/test/com/sleepycat/je/rep/arb/ArbiterTest.java @@ -0,0 +1,2122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.arb; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.arbiter.Arbiter; +import com.sleepycat.je.rep.arbiter.ArbiterConfig; +import com.sleepycat.je.rep.arbiter.ArbiterMutableConfig; +import com.sleepycat.je.rep.arbiter.ArbiterStats; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.rep.monitor.GroupChangeEvent; +import com.sleepycat.je.rep.monitor.JoinGroupEvent; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.monitor.MonitorChangeListener; +import com.sleepycat.je.rep.monitor.MonitorConfig; +import com.sleepycat.je.rep.monitor.NewMasterEvent; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.util.DbGroupAdmin; +import com.sleepycat.je.rep.util.DbPing; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHookAdapter; + +public class ArbiterTest extends RepTestBase { + + private final int portOffset = 20; + private volatile int phase; + private boolean useRepNetConfig; + private Arbiter arb; + private Arbiter arb2; + private Monitor monitor; + + private final static long fiveSec = 5 * 1000; + private final static long tenSec = 10 * 1000; + private final static long UNKN_TIMEOUT = tenSec; + private final static int MAX_PHASE_WAITS = 30; + private final static int PHASE_ERROR = -1; + private final static long ARB_SETUP_TIMEOUT = 60 * 1000; + + @Override + public void setUp() throws Exception { + groupSize = 4; + super.setUp(); + phase = 0; + + /* + * to set fine logging. + * Logger parent = Logger.getLogger("com.sleepycat.je"); + * parent.setLevel(Level.FINE); + * */ + ReplicationNetworkConfig repNetConfig = + RepTestUtils.readRepNetConfig(); + useRepNetConfig = !repNetConfig.getChannelType().isEmpty(); + arb = null; + arb2 = null; + monitor = null; + } + + /** + * @throws Exception in subclasses. + */ + @Override + @After + public void tearDown() + throws Exception { + Replica.setInitialReplayHook(null); + phase = Integer.MAX_VALUE; + if (arb != null) { + arb.shutdown(); + } + if (arb2 != null) { + arb2.shutdown(); + } + if (monitor != null) { + monitor.shutdown(); + } + super.tearDown(); + } + + /** + * Test to insure there is an exception if + * the envHome parameter is null and if + * the arbiter home directory does not exist + * + * @throws DatabaseException + */ + @Test + public void testNegative() + throws DatabaseException { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (arbHome.exists()) { + arbHome.delete(); + } + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + boolean gotErrors = false; + + try { + try { + arb = getReadyArbiter(ac); + } catch (IllegalArgumentException notFound) { + gotErrors = true; + } + if (!gotErrors) { + fail("Test should have failed due to null arg."); + } + + gotErrors = false; + ac.setArbiterHome(arbHome.getAbsolutePath()); + try { + arb = getReadyArbiter(ac); + } catch (IllegalArgumentException e) { + gotErrors = true; + } + if (!gotErrors) { + fail("Test should have failed due to missing directory."); + } + arbHome.mkdir(); + } finally { + if (arb != null) { + arb.shutdown(); + } + } + } + + /** + * Test to make sure that an UnKnownMasterException is + * thrown if an arbiter cannot communicate with the group. + * @throws DatabaseException + */ + @Test + public void testTimeout() + throws DatabaseException { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(1000, TimeUnit.MILLISECONDS); + boolean gotUnMEx = false; + try { + arb = getReadyArbiter(ac); + } catch (Exception e) { + gotUnMEx = true; + } + assert(gotUnMEx); + } + + /** + * Test to insure that the Arbiter acks transactions when + * the Replica node is down. Also tests that ack durability + * of ALL does not succeed with a master and arb in a + * two rep node and Arbiter group. + * This test brings up two rep nodes and an arbiter node. + * Adds data, stops the replica node. The group now consists + * of the master and arbiter. More data is added. + * @throws Exception + */ + @Test + public void testReplicaDown() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + Properties props = new Properties(); + props.setProperty(ReplicationConfig.GROUP_NAME, rc.getGroupName()); + props.setProperty(ReplicationConfig.NODE_NAME, nodeName); + props.setProperty(ReplicationMutableConfig.HELPER_HOSTS, + rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + props.setProperty(ReplicationConfig.NODE_HOST_PORT, nodeHostPort); + props.setProperty(ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, "5 s" ); + ArbiterConfig ac = new ArbiterConfig(props); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + leaveGroupAllButMaster(); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + /* should not be able to do a write with ALL durability */ + TransactionConfig txncnf = new TransactionConfig(); + Durability d = + new Durability(SyncPolicy.NO_SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.ALL); // replicaAck + txncnf.setDurability(d); + boolean gotException = false; + try { + populateDB(repEnvInfo[0].getEnv(), "db", 21, 10, txncnf); + } catch (InsufficientReplicasException e) { + gotException = true; + } + if (!gotException) { + fail("Insertion with ACK durabilty of " + + "ALL should have failed."); + } + phase++; + } + }); + + /* Do some fail testing by starting arbiter and timeout */ + boolean gotException = false; + try { + arb = getReadyArbiter(ac); + } catch (Exception e){ + gotException = true; + } + if (!gotException) { + fail("Starting Arbiter should have failed" + + " due to no members in RepGroup up."); + } + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arb.shutdown(); + } + + /* + * Test to insure that multiple threads inserting data while the replica is + * stopped still succeed. After the replica is stopped, + * the Master and Arbiter handle the write requests. + */ + @Test + public void testReplicaDownActiveXact() + throws Exception { + String dbname = "db"; + int nInserters = 30; + Thread[] insertThreads = new Thread[nInserters]; + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_" + rc.getNodeName(); + String nodeHostPort = rc.getNodeHostname() + ":" + port; + + Properties props = new Properties(); + props.setProperty(ReplicationConfig.GROUP_NAME, rc.getGroupName()); + props.setProperty(ReplicationConfig.NODE_NAME, nodeName); + props.setProperty(ReplicationMutableConfig.HELPER_HOSTS, + rc.getNodeHostPort() + "," + rc2.getNodeHostPort()); + props.setProperty(ReplicationConfig.NODE_HOST_PORT, nodeHostPort); + props.setProperty(ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, "5 s" ); + ArbiterConfig ac = new ArbiterConfig(props); + File arbHome = + new File(envRoot.getAbsolutePath() + File.separator + "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + createGroup(2); + + arb = getReadyArbiter(ac); + + populateDB(repEnvInfo[0].getEnv(), dbname, 0, 10, null ); + + /* Do some DML with master and arbiter */ + for (int i = 0; i < nInserters; i++) { + insertThreads[i] = + new Thread(new InsertRunnable(repEnvInfo[0].getEnv(), + dbname, + 1, + 1000 * i, + 10)); + } + + for (int i = 0; i < nInserters; i++) { + insertThreads[i].start(); + } + Thread.sleep(1*1000); + leaveGroupAllButMaster(); + Thread.sleep(1*1000); + + int startKey = 11; + int nRecords = 100; + Environment env = repEnvInfo[0].getEnv(); + Transaction txn = null; + Database db = null; + try { + db = env.openDatabase(txn, dbname, dbconfig); + txn = env.beginTransaction(null, null); + for (int i = 0; i < nRecords; i++) { + IntegerBinding.intToEntry(startKey + i, key); + LongBinding.longToEntry(i, data); + db.put(txn, key, data); + } + txn.commit(); + txn = null; + } finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + phase++; + waitForPhase(nInserters + 1); + arb.shutdown(); + } + + /** + * This tests that a slow responding ack from a replica will not + * throw an InsufficientAckException if the Arbiter has acked the + * transaction. + */ + @Test + public void testSlowReplica() throws Exception { + String dbname = "db"; + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_" + rc.getNodeName(); + String nodeHostPort = rc.getNodeHostname() + ":" + port; + + Properties props = new Properties(); + props.setProperty(ReplicationConfig.GROUP_NAME, rc.getGroupName()); + props.setProperty(ReplicationConfig.NODE_NAME, nodeName); + props.setProperty(ReplicationMutableConfig.HELPER_HOSTS, + rc.getNodeHostPort() + "," + rc2.getNodeHostPort()); + props.setProperty(ReplicationConfig.NODE_HOST_PORT, nodeHostPort); + props.setProperty(ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, "5 s" ); + ArbiterConfig ac = new ArbiterConfig(props); + File arbHome = + new File(envRoot.getAbsolutePath() + File.separator + "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + AtomicInteger delayFlag = new AtomicInteger(0); + + Replica.setInitialReplayHook(new DelayCommit(1100, delayFlag)); + int firstn = 2; + for (int i = 0; i < firstn; i++) { + ReplicationConfig repcfg = repEnvInfo[i].getRepConfig(); + repcfg.setReplicaAckTimeout(1, TimeUnit.SECONDS); + ReplicatedEnvironment rep = repEnvInfo[i].openEnv(); + State state = rep.getState(); + assertEquals((i == 0) ? State.MASTER : State.REPLICA, state); + } + + arb = getReadyArbiter(ac); + + delayFlag.incrementAndGet(); + + populateDB(repEnvInfo[0].getEnv(), dbname, 0, 10, null ); + + arb.shutdown(); + } + + /** + * Test that two Arbiters can not join the same + * replication group. + * This test brings up two rep nodes and an arbiter node. + * Adds data + * A second arbiter node is attempted to be booted. This should + * fail since there is an arbiter already in the rep group. + * @throws Exception + */ + @Test + public void testTwoArbiters() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + phase++; + waitForPhase(3); + } + }); + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arbHome = new File(envRoot.getAbsolutePath()+File.separator+ "arb1"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + port++; + nodeHostPort = (rc.getNodeHostname()+ ":" + port); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName("arbiter_2"); + arb2 = null; + boolean gotException = false; + try { + arb2 = getReadyArbiter(ac); + } catch (EnvironmentFailureException e) { + gotException = true; + } finally { + if (arb2 != null) { + arb2.shutdown(); + } + phase++; + } + if (!gotException) { + fail("Second Arbiter should not " + + "be allowed to be part of the group."); + } + + arb.shutdown(); + } + + /** + * Test that two Arbiter cannot be booted using the same + * directory at the same time. + * This test brings up two rep nodes and an arbiter node. + * Attempts to bring up another arb using the same arb directory. + * Note that this test exercises code running in the same JVM. + * Check is made because the underlying envImpl is cached and we + * check that this is illegal for arbiters. + * Separate processes are required to test the file locking code. + * @throws Exception + */ + @Test + public void testArbsSameDir() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + phase++; + waitForPhase(3); + } + }); + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + /* configure to attempt to boot in same directory */ + port++; + nodeHostPort = (rc.getNodeHostname()+ ":" + port); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName("arbiter_2"); + arb2 = null; + boolean gotError = false; + try { + arb2 = getReadyArbiter(ac); + } catch (Exception e) { + gotError = true; + } finally { + phase++; + if (arb2 != null) { + arb2.shutdown(); + } + } + + if (!gotError) { + fail("Starting arbiter in directory when another arbiter " + + "is running should have failed."); + } + arb.shutdown(); + } + + /** + * Test that the rep group consisting of a master, replica and arbiter + * can have write availablity when the master becomes unavailable. + * This test brings up two rep nodes and an arbiter node. + * Adds data, stops the master node add more data. + * @throws Exception + */ + @Test + public void testMasterDown() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + closeMaster(); + waitForMaster(repEnvInfo[1]); + populateDB(repEnvInfo[1].getEnv(), "db", 11, 10, null); + phase++; + } + + }); + testthread.start(); + arb = getReadyArbiter(ac); + waitForReplica(arb, ARB_SETUP_TIMEOUT); + phase++; + waitForPhase(2); + arb.shutdown(); + } + + /** + * Test that the Arbiter prevents a node from becoming + * Master due to its VLSN lower than the Arbiters. + * This test brings up two rep nodes and an arbiter node. + * Adds data, stops the replica node add more data. + * stops the master + * starts the node that was the Replica + * waits a bit to allow time for node to attempt an election + * start the node that was master + * check that this node is the master + * add more data + * @throws Exception + */ + @Test + public void testOneMaster() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + leaveGroupAllButMaster(); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + closeMaster(); + + restartNodesNoWaitForReady(repEnvInfo[1]); + + try { + Thread.sleep(fiveSec); + } catch (Exception e) { + + } + + /* the replica should not be able to become master */ + RepEnvInfo master = findMaster(repEnvInfo[1]); + assert(master == null); + + try { + restartNodes(tenSec, repEnvInfo[0]); + } catch (InterruptedException e) { + fail("error restarting node1."); + } + try { + Thread.sleep(tenSec); + } catch (Exception e) { + + } + master = findMaster(repEnvInfo[0]); + assert(master != null); + + populateDB(repEnvInfo[0].getEnv(), "db", 21, 10, null); + phase++; + } + + }); + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arb.shutdown(); + } + + /** + * Test booting the Arbiter first followed by two + * Replication nodes. The initial replication group + * is empty. + * @throws Exception + */ + @Test + public void testBootOrderArbFirst() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + phase++; + } + }); + + ArbiterRunnable arun = new ArbiterRunnable(ac); + Thread arbThread = new Thread(arun); + arbThread.start(); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + try { + testthread.start(); + waitForPhase(1); + } finally { + arun.shutdown(); + } + arbThread.join(); + } + + /** + * Test that a rep group with an Arbiter and one Rep Node + * is provides write availability. Also tests that another + * rep node can be added to the group and that this node + * can become the master if the other (master) rep node is not + * available. + * Tests group database initialization and boot order with the Arbiter. + * Start Arbiter + * Start Node0 + * Add data + * Start Node1 + * Add data + * Stop Node0 - Arbiter and Node1 will allow Node1 to be elected master + * Add data + * @throws Exception + */ + @Test + public void testBootOrderOneRepTwoRep() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(1); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + + restartNodesNoWaitForReady(repEnvInfo[1]); + try { + Thread.sleep(fiveSec); + } catch (Exception e) { + + } + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null ); + closeMaster(); + try { + Thread.sleep(fiveSec); + } catch (Exception e) { + + } + populateDB(repEnvInfo[1].getEnv(), "db", 21, 10, null ); + + phase++; + } + }); + + ArbiterRunnable arun = new ArbiterRunnable(ac); + Thread arbThread = new Thread(arun); + arbThread.start(); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + try { + testthread.start(); + waitForPhase(1); + } finally { + arun.shutdown(); + } + arbThread.join(); + } + + /** + * This test uses the arbiter to flip flop the Master between two + * rep nodes. + * This test brings up two rep nodes and an arbiter node. + * Adds data + * stops the master + * Add more data + * Start the old master + * add more data + * stop current master + * add more data. + * @throws Exception + */ + @Test + public void testFlipMaster() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + closeMaster(); + /* wait to allow for other node to become master */ + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + RepEnvInfo master = findMaster(repEnvInfo[1]); + assert(master != null); + + populateDB(repEnvInfo[1].getEnv(), "db", 11, 10, null); + + /* Restart the original master */ + try { + restartNodes(fiveSec, repEnvInfo[0]); + } catch (InterruptedException e) { + fail("error restarting node1."); + } + + master = findMaster(repEnvInfo[1]); + assert(master != null); + + /* Stop the current master */ + closeMaster(); + /* wait to allow for other node to become master */ + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + master = findMaster(repEnvInfo[0]); + assert(master != null); + populateDB(repEnvInfo[0].getEnv(), "db", 21, 10, null); + /* Restart the original master */ + try { + restartNodes(fiveSec, repEnvInfo[1]); + } catch (InterruptedException e) { + fail("error restarting node1."); + } + populateDB(repEnvInfo[0].getEnv(), "db", 31, 10, null); + + phase++; + } + }); + + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arb.shutdown(); + } + + /** + * This test exercises the situation when a rep node is added to + * a group consisting of two rep nodes and an arbiter. Also + * tests that the Arbiter will prevent rep nodes from becoming + * Master if the other rep nodes are not current. This is done + * when the rep group consists of three rep nodes and an arbiter. + * + * This test brings up two rep nodes and an arbiter node. + * Adds data (2 rep/1 arb rep size 2) + * stops the replica + * Add more data (1 rep/1 arb rep size 2) + * Stop the master + * Bring up old replica. + * boot new replica node + * check that there is no master + * (due to arb prevention) (2 rep/ 1 arb rep size 3) + * bring up old master. + * add more data (3 rep / 1 arb rep size 3) + * stop arbiter + * add more data (3 rep rep size 3) + * stop rep node 1 + * add more data (2 rep) rep size 3 + * @throws Exception + */ + @Test + public void testQuad() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + leaveGroupAllButMaster(); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + closeMaster(); + try { + restartNodesNoWaitForReady(repEnvInfo[1]); + restartNodesNoWaitForReady(repEnvInfo[2]); + } catch (Exception e) { + fail("test failed becasuse node could not be started. "+e); + } + /* wait to allow for other node to become master */ + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + RepEnvInfo master = findMaster(repEnvInfo[1]); + assert(master == null); + master = findMaster(repEnvInfo[2]); + assert(master == null); + /* restart original master */ + try { + restartNodes(fiveSec, repEnvInfo[0]); + } catch (InterruptedException e) { + fail("error restarting node1."); + } + populateDB(repEnvInfo[0].getEnv(), "db", 21, 10, null); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + phase++; + /* wait for arbiter to be shut down */ + waitForPhase(3); + populateDB(repEnvInfo[0].getEnv(), "db", 31, 10, null); + closeNodes(repEnvInfo[1]); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + populateDB(repEnvInfo[0].getEnv(), "db", 41, 10, null); + phase++; + } + }); + + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arb.shutdown(); + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + phase++; + waitForPhase(4); + } + + /** + * Tests that the Arbiter is a election participant when + * the replication group consists of three rep nodes + * and an Arbiter. See below for various rep nodes/arb + * in the 3 rep node/arbiter configuration. + * + * + * Creates group of 3 rep nodes. (3 rep) rep size 3 + * Create arbiter node. (3rep/1arb) rep size 3 + * Add data + * Stop all rep nodes. + * Stop arbiter node. + * Start nodes 0 and 1. (2 rep) + * Check to make sure there is no master + * Start rep node 2 (3 rep) + * Check to see there is a master. + * add data + * stop all rep nodes + * Start arbiter + * start rep node 0 and 1 (2 rep/1arb) + * make sure there is a master + * Add data + * stop arbiter (2 rep) + * Add data + * start arbiter + * stop replica (1 rep/1 arb) + * attempt to insert data. check for isf rep exception + * + * @throws Exception + */ + @Test + public void testQuadElection() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(3); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + closeNodes(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + phase++; + waitForPhase(3); + try { + restartNodesNoWaitForReady(repEnvInfo[0]); + restartNodesNoWaitForReady(repEnvInfo[1]); + } catch (Exception e) { + fail("test failed becasuse node could not be started. "+e); + } + /* wait to allow for other node to become master */ + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + RepEnvInfo master = findMaster(repEnvInfo[0]); + assert(master == null); + master = findMaster(repEnvInfo[1]); + assert(master == null); + + try { + restartNodesNoWaitForReady(repEnvInfo[2]); + } catch (Exception e) { + fail("test failed becasuse node could not be started. "+e); + } + /* wait to allow for other node to become master */ + try { + Thread.sleep(fiveSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + master = + findMaster(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + assert(master != null); + populateDB(master.getEnv(), "db", 11, 10, null ); + closeNodes(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + phase++; + waitForPhase(5); + /* write with two rep nodes and an arbiter */ + try { + restartNodesNoWaitForReady(repEnvInfo[0]); + restartNodesNoWaitForReady(repEnvInfo[1]); + } catch (Exception e) { + fail("test failed because node could not be started. "+e); + } + /* wait to allow for other node to become master */ + try { + Thread.sleep(tenSec); + } catch (InterruptedException e) { + fail("Thread sleep."); + } + master = findMaster(repEnvInfo[0], repEnvInfo[1]); + assert(master != null); + populateDB(master.getEnv(), "db", 21, 10, null ); + phase++; + waitForPhase(7); + populateDB(master.getEnv(), "db", 31, 10, null ); + phase++; + waitForPhase(9); + leaveGroupAllButMaster(); + boolean gotISRException = false; + try { + populateDB(master.getEnv(), "db", 41, 10, null ); + } catch (InsufficientReplicasException e) { + gotISRException = true; + } + assertTrue(gotISRException); + phase++; + + } + }); + + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + arb.shutdown(); + phase++; + waitForPhase(4); + phase++; + arb = getReadyArbiter(ac); + waitForPhase(6); + arb.shutdown(); + phase++; + waitForPhase(8); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(10); + arb.shutdown(); + } + + /** + * Tests that ping works on the Arbiter. + * This test brings up two rep nodes and an arbiter node. + * Adds data, stops the replica node add more data. + * Uses DbPing to send ping request to the Arbiter. + * + * @throws Exception + */ + @Test + public void testPing() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + leaveGroupAllButMaster(); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + phase++; + waitForPhase(3); + } + }); + testthread.start(); + arb = getReadyArbiter(ac); + phase++; + waitForPhase(2); + String[] args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-nodeName", nodeName, + "-nodeHost", nodeHostPort, + "-socketTimeout", "5000" }; + + /* Ping the node. */ + PrintStream original = System.out; + try { + /* Avoid polluting the test output. */ + System.setOut(new PrintStream(new ByteArrayOutputStream())); + + DbPing.main(args); + + } catch (Exception e) { + fail("Unexpected exception: " + LoggerUtils.getStackTrace(e)); + } finally { + System.setOut(original); + } + phase++; + arb.shutdown(); + } + + /** + * Test Arbiter Monitor events are fired and that the + * correct transactions are being acked by the + * Arbiter. Checks using Arbiter statistics. + * + * @throws Exception + */ + @Test + public void testGroupAckAndReJoin() + throws Exception { + int nInserters = 30; + Thread[] insertThreads = new Thread[nInserters]; + + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String monName = "monitor_" + rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + port++; + MonitorConfig mc = new MonitorConfig(); + mc.setGroupName(rc.getGroupName()); + mc.setNodeName(monName); + mc.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + mc.setNodeHostPort(rc.getNodeHostname() + ":" + port); + TestListener listenUp = new TestListener(); + createGroup(2); + + /* + * Do a check to insure that the monitor + * event is sent by the arbiter. + */ + monitor = new Monitor(mc); + monitor.register(); + monitor.startListener(listenUp); + arb = getReadyArbiter(ac); + Thread.sleep(500); + ArrayList events = listenUp.getEvents(); + if (events.size() == 0 ) { + fail("did not get Arbiter join event."); + } + + Object lastEvent = events.get(events.size() - 1); + if (!(lastEvent instanceof GroupChangeEvent || + lastEvent instanceof JoinGroupEvent)) { + fail("did not get Arbiter join group event." + lastEvent); + } + String eventNodeName; + if (lastEvent instanceof GroupChangeEvent) { + eventNodeName = ((GroupChangeEvent)lastEvent).getNodeName(); + } else { + eventNodeName = ((JoinGroupEvent)lastEvent).getNodeName(); + } + if (!eventNodeName.equals(ac.getNodeName())) { + fail("did not get Arbiter join group event. "+ + "Node name is not Arbiter." + lastEvent); + } + + /* all nodes up */ + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + + ArbiterStats stats = arb.getStats(new StatsConfig()); + String masterName = repEnvInfo[0].getRepConfig().getNodeName(); + assertTrue(stats.getMaster().startsWith(masterName)); + assertTrue( + stats.getState().equals( + ReplicatedEnvironment.State.REPLICA.toString())); + leaveGroupAllButMaster(); + stats = arb.getStats(new StatsConfig()); + long preInsertAckCount = stats.getAcks(); + + /* Do some DML with master and arbiter */ + for (int i = 0; i < nInserters; i++) { + insertThreads[i] = + new Thread( + new PopulateDeleteRunnable( + repEnvInfo[0].getEnv(), + 1, 1000 * i, + 10)); + } + + for (int i = 0; i < nInserters; i++) { + insertThreads[i].start(); + } + phase++; + waitForPhase(nInserters + 1); + + /* Do some statistics checking */ + stats = arb.getStats(new StatsConfig()); + long ackCount = stats.getAcks(); + assertEquals( nInserters * 20, ackCount - preInsertAckCount); + assertTrue(stats.getVLSN() >= stats.getDTVLSN()); + assertTrue(stats.getWrites() >= stats.getFSyncs()); + + StatsConfig sc = new StatsConfig(); + sc.setClear(true); + stats = arb.getStats(sc); + assertEquals(ackCount, stats.getAcks()); + assertTrue(stats.getWrites() > 0); + stats = arb.getStats(new StatsConfig()); + assertEquals(0, stats.getAcks()); + assertTrue(stats.getWrites() == 0); + + restartNodes(fiveSec, repEnvInfo[1]); + + /* do dml with all nodes up again */ + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + closeMaster(); + waitForMaster(repEnvInfo[1]); + + /* + * Do dml after the original master was closed and the + * replica was elected the new master. + */ + populateDB(repEnvInfo[1].getEnv(), "db", 21, 10, null); + arb.shutdown(); + restartNodes(fiveSec, repEnvInfo[0]); + Thread.sleep(500); + + /* check for the arbiter leave group event */ + if (!lookForEvent(ac.getNodeName(), + LeaveReason.NORMAL_SHUTDOWN, + listenUp.getEvents())) { + fail("Did not get expected Arbiter shutdown monitor event."); + } + + /* + * do dml with the two rep nodes without the arbiter. + */ + populateDB(repEnvInfo[1].getEnv(), "db", 31, 10, null); + } + + /** + * Tests that an Arbiter cannot be started if there + * is an Arbiter with the same configuration already + * running. + * This test exercises the code when two arbiters are + * started with the same configuration on the same + * jvm. An error is expected on the second attempt to + * start the arbiter. + */ + @Test + public void multiArbs() { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + createGroup(2); + arb = getReadyArbiter(ac); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + Exception e = null; + arb2 = null; + try { + arb2 = getReadyArbiter(ac); + } catch (UnsupportedOperationException ex) { + e = ex; + } finally { + if (arb2 != null) { + arb2.shutdown(); + arb2 = null; + } + } + if (e == null) { + fail("The second arbiter creation should have failed but didn't."); + } + arb.shutdown(); + + arb2 = getReadyArbiter(ac); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + arb2.shutdown(); + } + + /** + * Tests online rep group configuration changes. + * The replication group is altered by adding and + * removing replication and arbiter nodes. + * + * starts with rep0,rep1 and arbiter. + * shutdown rep1 and remove from group. + * boot new node rep2 + * add rep2 node to arbiter helper hosts + * shutdown rep1 + * check that rep2 has become master. + * + * @throws Exception + */ + @Test + public void testReconfigureRG() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + createGroup(2); + arb = getReadyArbiter(ac); + /* Construct a DbGroupAdmin instance. */ + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + + /* Basic ReplicationGroup test with Arbiter */ + ReplicationGroupAdmin repGroupAdmin = + useRepNetConfig ? + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + ReplicationGroup rg = repGroupAdmin.getGroup(); + Set arbNodes = rg.getArbiterNodes(); + assertTrue(arbNodes.size() == 1); + + + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + /* stop replica node */ + leaveGroupAllButMaster(); + dbAdmin.removeMember(repEnvInfo[1].getRepConfig().getNodeName()); + + /* boot new replica node */ + restartNodes(fiveSec, repEnvInfo[2]); + ReplicationConfig rc3 = repEnvInfo[2].getRepConfig(); + ArbiterMutableConfig amc = arb.getArbiterMutableConfig(); + amc.setHelperHosts(rc.getNodeHostPort() + "," + rc3.getNodeHostPort()); + arb.setArbiterMutableConfig(amc); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + + /* stop master and wait for new node to become elected */ + closeMaster(); + RepEnvInfo newmaster = waitForMaster(repEnvInfo[2]); + populateDB(repEnvInfo[2].getEnv(), "db", 21, 10, null); + arb.shutdown(); + restartNodes(fiveSec, repEnvInfo[0]); + newmaster = waitForMaster(repEnvInfo[2]); + populateDB(newmaster.getEnv(), "db", 21, 10, null); + dbAdmin.removeMember(ac.getNodeName()); + + boolean removeFailed = false; + try { + dbAdmin.removeMember(ac.getNodeName()); + } catch (Exception e) { + removeFailed = true; + } + if (!removeFailed) { + fail("the remove of the arbiter should have failed."); + } + } + + + /** + * Tests that the Arbiter can be used as the port in order to + * create a ReplicationGroupAdmin object. + * + * @throws Exception + */ + @Test + public void testRepGroupAdmin() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + createGroup(2); + arb = getReadyArbiter(ac); + + Set arbAddr = HostPortPair.getSockets(nodeHostPort); + + /* Construct a DbGroupAdmin instance. */ + DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + arbAddr, + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + arbAddr); + + /* Basic ReplicationGroup test with Arbiter */ + ReplicationGroupAdmin repGroupAdmin = + useRepNetConfig ? + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + arbAddr, + RepTestUtils.readRepNetConfig()) : + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + arbAddr); + ReplicationGroup rg = repGroupAdmin.getGroup(); + Set arbNodes = rg.getArbiterNodes(); + assertTrue(arbNodes.size() == 1); + leaveGroupAllButMaster(); + dbAdmin.removeMember(repEnvInfo[1].getRepConfig().getNodeName()); + } + + /** + * Tests Arbiter configured with SSL. + * + */ + @Test + public void testArbiterSSL() + throws Exception { + + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + + /* Try setting to ssl in the config. */ + Properties sslProps = new Properties(); + RepTestUtils.setUnitTestSSLProperties(sslProps); + ReplicationSSLConfig repSSLConfig = new ReplicationSSLConfig(sslProps); + repSSLConfig.setSSLKeyStorePasswordSource(null); + rc.setRepNetConfig(repSSLConfig); + rc2.setRepNetConfig(repSSLConfig); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(tenSec, TimeUnit.MILLISECONDS); + ac.setRepNetConfig(repSSLConfig); + createGroup(2); + arb = getReadyArbiter(ac); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + /* stop master and wait for new node to become elected */ + closeMaster(); + RepEnvInfo newmaster = waitForMaster(repEnvInfo[1]); + populateDB(newmaster.getEnv(), "db", 21, 10, null); + restartNodes(repEnvInfo[0]); + populateDB(newmaster.getEnv(), "db", 31, 10, null); + arb.shutdown(); + populateDB(newmaster.getEnv(), "db", 41, 10, null); + } + + /** + * Test Arbiter logging is mutable. + * @throws Exception + */ + @Test + public void testArbiterLogging() + throws Exception { + ReplicationConfig rc = repEnvInfo[0].getRepConfig(); + ReplicationConfig rc2 = repEnvInfo[1].getRepConfig(); + int port = rc2.getNodePort() + portOffset; + String nodeName = "arbiter_"+rc.getNodeName(); + String nodeHostPort = + (rc.getNodeHostname()+ ":" + port); + + ArbiterConfig ac = new ArbiterConfig(); + File arbHome = + new File(envRoot.getAbsolutePath()+File.separator+ "arb"); + if (!arbHome.exists()) { + arbHome.mkdir(); + } + ac.setArbiterHome(arbHome.getAbsolutePath()); + ac.setGroupName(rc.getGroupName()); + ac.setHelperHosts(rc.getNodeHostPort()+","+rc2.getNodeHostPort()); + ac.setNodeHostPort(nodeHostPort); + ac.setNodeName(nodeName); + ac.setUnknownStateTimeout(UNKN_TIMEOUT, TimeUnit.MILLISECONDS); + + Thread testthread = new Thread(new Runnable() { + @Override + public void run() { + createGroup(2); + waitForPhase(1); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null ); + leaveGroupAllButMaster(); + populateDB(repEnvInfo[0].getEnv(), "db", 11, 10, null); + phase++; + } + }); + + try { + Logger parent = Logger.getLogger("com.sleepycat.je"); + parent.setLevel(Level.FINE); + testthread.start(); + arb = getReadyArbiter(ac); + ArbiterMutableConfig amc = arb.getArbiterMutableConfig(); + amc.setFileLoggingLevel(Level.FINE.getName()); + arb.setArbiterMutableConfig(amc); + phase++; + waitForPhase(2); + } finally { + arb.shutdown(); + } + File logFile = + new File(arbHome.getAbsolutePath() + File.separator + + "je.info.0"); + if (!checkFile(logFile, "FINE")) { + fail("The je.info.0 file does not have FINE logging messages"); + } + } + + private boolean checkFile(File in, + String valueToLookFor) + throws Exception { + boolean found = false; + String curRow; + BufferedReader fr = new BufferedReader(new FileReader(in)); + try { + while ((curRow = fr.readLine()) != null) { + if (curRow.indexOf(valueToLookFor) >= 0) { + found = true; + break; + } + } + } finally { + fr.close(); + } + return found; + } + + private boolean lookForEvent(String nodeName, + LeaveReason reason, + ArrayList events) { + for (int i = events.size(); i > 0; i--) { + if (events.get(i - 1) instanceof LeaveGroupEvent) { + LeaveGroupEvent lge = (LeaveGroupEvent)events.get(i - 1); + if (nodeName.equals(lge.getNodeName()) && + reason.equals(lge.getLeaveReason())) { + return true; + } + } + } + return false; + } + + private void waitForReplica(Arbiter an, long timeout) + throws RuntimeException { + long startTime = System.currentTimeMillis(); + while (System.currentTimeMillis() < startTime + timeout) { + State anState = an.getState(); + if (anState == State.REPLICA || anState == State.DETACHED) { + return; + } + try { + Thread.sleep(500); + } catch (InterruptedException e) { + + } + } + throw new RuntimeException("Timed out waiting for Arbiter " + + "to become a replica."); + } + + private Arbiter getReadyArbiter(ArbiterConfig ac) { + Arbiter arb = new Arbiter(ac); + try { + waitForReplica(arb, ARB_SETUP_TIMEOUT); + } catch (RuntimeException e) { + arb.shutdown(); + throw e; + } + return arb; + } + + class TestListener implements MonitorChangeListener { + private final ArrayList events = new ArrayList(); + @Override + public void notify(NewMasterEvent newMasterEvent) { + events.add(newMasterEvent); + } + @Override + public void notify(GroupChangeEvent groupChangeEvent) { + events.add(groupChangeEvent); + } + + @Override + public void notify(JoinGroupEvent joinGroupEvent) { + events.add(joinGroupEvent); + } + + @Override + public void notify(LeaveGroupEvent leaveGroupEvent) { + events.add(leaveGroupEvent); + } + + public ArrayList getEvents() { + return new ArrayList(events); + } + } + + class PopulateDeleteRunnable implements Runnable { + private final int preCondition; + private final int startVal; + private final int nVals; + private final ReplicatedEnvironment repEnv; + private final TransactionConfig txnCfg; + + PopulateDeleteRunnable(ReplicatedEnvironment repEnv, + int preCondition, + int startVal, + int nVals) { + this.preCondition = preCondition; + this.startVal = startVal; + this.nVals = nVals; + this.repEnv = repEnv; + txnCfg = new TransactionConfig(); + Durability d = + new Durability(SyncPolicy.NO_SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.SIMPLE_MAJORITY); + txnCfg.setDurability(d); + } + + @Override + public void run() { + waitForPhase(preCondition); + + for (int i = 0; i < 20; i++) { + boolean done = false; + while(!done) { + try { + if ( (i % 2) == 0) { + populateDB(repEnv, "db", startVal, nVals, txnCfg); + } else { + deleteDB(repEnv, "db", startVal, nVals, txnCfg); + } + } catch (LockConflictException timeout) { + continue; + } + done = true; + } + } + phase++; + } + } + + class InsertRunnable implements Runnable { + private final int postCondition; + private final int startVal; + private final int nVals; + private final ReplicatedEnvironment repEnv; + private final TransactionConfig txnCfg; + private final String dbname; + + InsertRunnable(ReplicatedEnvironment repEnv, + String dbname, + int postCondition, + int startVal, + int nVals) { + this.postCondition = postCondition; + this.startVal = startVal; + this.nVals = nVals; + this.repEnv = repEnv; + this.dbname = dbname; + txnCfg = new TransactionConfig(); + Durability d = + new Durability(SyncPolicy.NO_SYNC, // localSync + SyncPolicy.NO_SYNC, // replicaSync + ReplicaAckPolicy.SIMPLE_MAJORITY); + txnCfg.setDurability(d); + } + + @Override + public void run() { + int insertCount = 0; + Database db = null; + Transaction txn = null; + DatabaseEntry key = new DatabaseEntry(new byte[]{1}); + try { + db = repEnv.openDatabase(null, dbname, dbconfig); + boolean done = false; + while(!done) { + insertCount++; + txn = repEnv.beginTransaction(null, null); + int startKey = insertCount % nVals + startVal; + IntegerBinding.intToEntry(startKey, key); + LongBinding.longToEntry(insertCount, data); + db.put(txn, key, data); + txn.commit(); + txn = null; + if (phase >= postCondition) { + done = true; + } + } + } catch (DatabaseException de) { + fail("Insert thread got exception "+de); + } + finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + phase++; + } + } + + class ArbiterRunnable implements Runnable { + ArbiterConfig ac; + Arbiter arbiter; + volatile boolean shutdown = false; + ArbiterRunnable(ArbiterConfig ac) { + this.ac = ac; + } + @Override + public void run() { + arbiter = getReadyArbiter(ac); + while (!shutdown) { + try { + Thread.sleep(500); + } catch (Exception e) { + } + } + } + + public void shutdown() { + if (arbiter != null) { + arbiter.shutdown(); + arbiter = null; + shutdown = true; + } + } + + } + + private void waitForPhase(int phaseToWaitFor) { + if (phase == PHASE_ERROR) { + throw new RuntimeException("Thread encountered phase error."); + } + int numberOfWaits = 0; + while (phase < phaseToWaitFor) { + try { + Thread.sleep(1000); + if (phase == PHASE_ERROR) { + throw new RuntimeException( + "Thread encountered phase error."); + } + numberOfWaits++; + if (numberOfWaits > MAX_PHASE_WAITS) { + phase = PHASE_ERROR; + throw new RuntimeException("Test failed due to phase " + + "timeout. Waiting for phase " + phaseToWaitFor + + " current phase " + phase +"." + + LoggerUtils.getStackTrace()); + } + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted sleep."); + } + } + } + + private RepEnvInfo waitForMaster(RepEnvInfo... nodes) { + if (phase == PHASE_ERROR) { + throw new RuntimeException( + "Thread encountered phase error waiting for master."); + } + int numberOfWaits = 0; + while (true) { + for (RepEnvInfo repi : nodes) { + if (State.MASTER.equals(repi.getEnv().getState())) { + return repi; + } + } + + numberOfWaits++; + if (numberOfWaits > MAX_PHASE_WAITS) { + phase = PHASE_ERROR; + throw new RuntimeException("Test failed due to timeout " + + "waiting for a master." + LoggerUtils.getStackTrace()); + } + + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted sleep."); + } + } + } + + private void closeMaster() + throws DatabaseException { + + for (RepEnvInfo repi : repEnvInfo) { + if (repi.getEnv() == null) { + continue; + } + if (!repi.getEnv().isValid()) { + continue; + } + if (State.MASTER.equals(repi.getEnv().getState())) { + repi.closeEnv(); + } + } + } + + protected CommitToken deleteDB(ReplicatedEnvironment rep, + String dbName, + int startKey, + int nRecords, + TransactionConfig txnConfig) + throws DatabaseException { + + Environment env = rep; + Database db = null; + boolean done = false; + Transaction txn = env.beginTransaction(null, txnConfig); + try { + db = env.openDatabase(txn, dbName, dbconfig); + txn.commit(); + txn = null; + txn = env.beginTransaction(null, txnConfig); + for (int i = 0; i < nRecords; i++) { + IntegerBinding.intToEntry(startKey + i, key); + db.delete(txn, key); + } + txn.commit(); + done = true; + return txn.getCommitToken(); + } finally { + if (txn != null && !done) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + + class DelayCommit extends TestHookAdapter { + private final long delayTime; + private final AtomicInteger flag; + + DelayCommit(long delayTime, + AtomicInteger flag) { + this.delayTime = delayTime; + this.flag = flag; + } + + @Override + public void doHook(Message m) { + if (flag.get() == 1 && m.getOp() == Protocol.COMMIT) { + try { + Thread.sleep(delayTime); + } catch (Exception e) + { + } + } + } + } + +} diff --git a/test/com/sleepycat/je/rep/dual/ClassLoaderTest.java b/test/com/sleepycat/je/rep/dual/ClassLoaderTest.java new file mode 100644 index 0000000..07327e0 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/ClassLoaderTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class ClassLoaderTest extends com.sleepycat.je.ClassLoaderTest { +} diff --git a/test/com/sleepycat/je/rep/dual/CursorEdgeTest.java b/test/com/sleepycat/je/rep/dual/CursorEdgeTest.java new file mode 100644 index 0000000..fde6b85 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/CursorEdgeTest.java @@ -0,0 +1,22 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class CursorEdgeTest extends com.sleepycat.je.CursorEdgeTest { + + /* Database in this test case is set non-transactional. */ + @Override + public void testGetPrevNoDupWithEmptyTree() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/CursorTest.java b/test/com/sleepycat/je/rep/dual/CursorTest.java new file mode 100644 index 0000000..efd3f96 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/CursorTest.java @@ -0,0 +1,27 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class CursorTest extends com.sleepycat.je.CursorTest { + + /* Database in this test case is set non-transactional. */ + @Override + public void testBasic() { + } + + /* Database in this test case is set non-transactional. */ + @Override + public void testMulti() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/DatabaseComparatorsTest.java b/test/com/sleepycat/je/rep/dual/DatabaseComparatorsTest.java new file mode 100644 index 0000000..5bb694c --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DatabaseComparatorsTest.java @@ -0,0 +1,42 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DatabaseComparatorsTest + extends com.sleepycat.je.DatabaseComparatorsTest { + + /* Following test cases are setting non-transactional. */ + @Override + public void testSR12517() { + } + + @Override + public void testDupsWithPartialComparator() { + } + + @Override + public void testDatabaseCompareKeysArgs() + throws Exception { + } + + @Override + public void testSR16816DefaultComparator() + throws Exception { + } + + @Override + public void testSR16816ReverseComparator() + throws Exception { + } +} diff --git a/test/com/sleepycat/je/rep/dual/DatabaseConfigTest.java b/test/com/sleepycat/je/rep/dual/DatabaseConfigTest.java new file mode 100644 index 0000000..5780e98 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DatabaseConfigTest.java @@ -0,0 +1,42 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DatabaseConfigTest extends com.sleepycat.je.DatabaseConfigTest { + + /* Environment in this test case is set non-transactional. */ + @Override + public void testConfig() { + } + + /* Environment in this test case is set non-transactional. */ + @Override + public void testConfigConflict() { + } + + /* Database in this test case is set non-transactional. */ + @Override + public void testIsTransactional() { + } + + /* Environment in this test case is set non-transactional. */ + @Override + public void testExclusive() { + } + + /* Environment in this test case is set non-transactional. */ + @Override + public void testConfigOverrideUpdateSR15743() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/DatabaseEntryTest.java b/test/com/sleepycat/je/rep/dual/DatabaseEntryTest.java new file mode 100644 index 0000000..bf4aa0e --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DatabaseEntryTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DatabaseEntryTest extends com.sleepycat.je.DatabaseEntryTest { +} diff --git a/test/com/sleepycat/je/rep/dual/DatabaseTest.java b/test/com/sleepycat/je/rep/dual/DatabaseTest.java new file mode 100644 index 0000000..1e34254 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DatabaseTest.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DatabaseTest extends com.sleepycat.je.DatabaseTest { + + /* Non-transactional tests. */ + + @Override + public void testOpenCursor() { + } + + @Override + public void testOpenCursorAfterEnvInvalidation() { + } + + @Override + public void testPreloadBytesExceedsCache() { + } + + @Override + public void testPreloadEntireDatabase() { + } + + @Override + public void testPutNoOverwriteInADupDbNoTxn() { + } + + @Override + public void testDeferredWriteDatabaseCount() { + } + + @Override + public void testDeferredWriteDatabaseCountDups() { + } + + /* + * Replication disturbs the cache due to the presence of the feeder and as + * a consequence invalidates the assumptions underlying the test. + */ + @Override + public void testPreloadAllInCache() { + } + + @Override + public void testPreloadAllInCacheOffHeap() { + } + + @Override + public void testPreloadCacheMemoryLimit() { + } + + @Override + public void testPreloadMultipleDatabases() { + } + + @Override + public void testPreloadTimeLimit() { + } + + @Override + public void testPreloadInternalMemoryLimit() { + } + + @Override + public void testPreloadWithProgress() { + } + + @Override + public void testDbPreempted() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/DbHandleLockTest.java b/test/com/sleepycat/je/rep/dual/DbHandleLockTest.java new file mode 100644 index 0000000..6073448 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DbHandleLockTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DbHandleLockTest extends com.sleepycat.je.DbHandleLockTest { +} diff --git a/test/com/sleepycat/je/rep/dual/DirtyReadTest.java b/test/com/sleepycat/je/rep/dual/DirtyReadTest.java new file mode 100644 index 0000000..8dbfcd2 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/DirtyReadTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class DirtyReadTest extends com.sleepycat.je.DirtyReadTest { +} diff --git a/test/com/sleepycat/je/rep/dual/EnvironmentTest.java b/test/com/sleepycat/je/rep/dual/EnvironmentTest.java new file mode 100644 index 0000000..1957900 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/EnvironmentTest.java @@ -0,0 +1,121 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class EnvironmentTest extends com.sleepycat.je.EnvironmentTest { + + /* In following test cases, Environment is set non-transactional. */ + @Override + public void testReadOnly() { + } + + @Override + public void testTransactional() { + } + + @Override + public void testOpenWithoutCheckpoint() { + } + + @Override + public void testMutableConfig() { + } + + @Override + public void testTxnDeadlockStackTrace() { + } + + @Override + public void testParamLoading() { + } + + @Override + public void testConfig() { + } + + @Override + public void testGetDatabaseNames() { + } + + @Override + public void testDaemonRunPause() { + } + + /* In following test cases, Database is set non-transactional. */ + @Override + public void testDbRenameCommit() { + } + + @Override + public void testDbRenameAbort() { + } + + @Override + public void testDbRemove() { + } + + @Override + public void testDbRemoveReadCommitted() { + } + + @Override + public void testDbRemoveNonTxnl() { + } + + @Override + public void testDbRemoveCommit() { + } + + /* + * In following two test cases, they try to reclose a same environment or + * database. This behavior would fail, override it for now, + * may be fixed in the future. + */ + @Override + public void testClose() { + } + + @Override + public void testExceptions() { + } + + /* + * This test case tries to open two environments on a same directory, + * it would fail, override it for now, may be fixed in the future. + */ + @Override + public void testReferenceCounting() { + } + + /* + * This test opens an environment read-only so this is not applicable + * to ReplicatedEnvironments (which can not be opened read-only). + */ + @Override + public void testReadOnlyDbNameOps() { + } + + /* HA doesn't support in memory only environment. */ + @Override + public void testMemOnly() { + } + + /* + * The 1ms latch timeout used in the test sometimes expires during an HA + * join group. It isn't important to test this separately for HA. + */ + @Override + public void testLatchTimeout() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/GetSearchBothRangeTest.java b/test/com/sleepycat/je/rep/dual/GetSearchBothRangeTest.java new file mode 100644 index 0000000..0f392d2 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/GetSearchBothRangeTest.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class GetSearchBothRangeTest + extends com.sleepycat.je.GetSearchBothRangeTest { +} diff --git a/test/com/sleepycat/je/rep/dual/ReadCommittedTest.java b/test/com/sleepycat/je/rep/dual/ReadCommittedTest.java new file mode 100644 index 0000000..3c7c58d --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/ReadCommittedTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class ReadCommittedTest extends com.sleepycat.je.ReadCommittedTest { +} diff --git a/test/com/sleepycat/je/rep/dual/SecondaryTest.java b/test/com/sleepycat/je/rep/dual/SecondaryTest.java new file mode 100644 index 0000000..de32223 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/SecondaryTest.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class SecondaryTest extends com.sleepycat.je.test.SecondaryTest { + + public SecondaryTest(String type, + boolean multiKey, + boolean customAssociation, + boolean resetOnFailure){ + super(type, multiKey, customAssociation, resetOnFailure); + } + + @Parameters + public static List genParams() { + return paramsHelper(true); + } + + /** + * Test is based on EnvironmentStats.getNLNsFetch which returns varied + * results when replication is enabled, presumably because the feeder is + * fetching. + */ + @Override + public void testImmutableSecondaryKey() { + } + + /** + * Same issue as testImmutableSecondaryKey. + */ + @Override + public void testExtractFromPrimaryKeyOnly() { + } + + /** + * Same issue as testImmutableSecondaryKey. + */ + @Override + public void testImmutableSecondaryKeyAndExtractFromPrimaryKeyOnly() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/TruncateTest.java b/test/com/sleepycat/je/rep/dual/TruncateTest.java new file mode 100644 index 0000000..217be43 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/TruncateTest.java @@ -0,0 +1,93 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual; + +public class TruncateTest extends com.sleepycat.je.TruncateTest { + + // TODO: relies on exact standalone LN counts. Rep introduces additional + // LNs. + @Override + public void testEnvTruncateAbort() { + } + + @Override + public void testEnvTruncateCommit() { + } + + @Override + public void testEnvTruncateAutocommit() { + } + + @Override + public void testEnvTruncateNoFirstInsert() { + } + + // Skip since it's non-transactional + @Override + public void testNoTxnEnvTruncateCommit() { + } + + @Override + public void testTruncateCommit() { + } + + @Override + public void testTruncateCommitAutoTxn() { + } + + @Override + public void testTruncateEmptyDeferredWriteDatabase() { + } + + // TODO: Complex setup -- replicators not shutdown resulting in an + // attempt to rebind to an already bound socket address + @Override + public void testTruncateAfterRecovery() { + } + + /* Non-transactional access is not supported by HA. */ + @Override + public void testTruncateNoLocking() { + } + + /* Calls EnvironmentImpl.abnormalShutdown. */ + @Override + public void testTruncateRecoveryWithoutMapLNDeletion() + throws Throwable { + } + + /* Calls EnvironmentImpl.abnormalShutdown. */ + @Override + public void testTruncateRecoveryWithoutMapLNDeletionNonTxnal() + throws Throwable { + } + + /* Calls EnvironmentImpl.abnormalShutdown. */ + @Override + public void testRemoveRecoveryWithoutMapLNDeletion() + throws Throwable { + } + + /* Calls EnvironmentImpl.abnormalShutdown. */ + @Override + public void testRemoveRecoveryWithoutMapLNDeletionNonTxnal() + throws Throwable { + } + + /* Calls EnvironmentImpl.abnormalShutdown. */ + @Override + public void testRecoveryRenameMapLNDeletion() + throws Throwable { + } +} diff --git a/test/com/sleepycat/je/rep/dual/dbi/DbTreeTest.java b/test/com/sleepycat/je/rep/dual/dbi/DbTreeTest.java new file mode 100644 index 0000000..83ff737 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/dbi/DbTreeTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.dbi; + +public class DbTreeTest extends com.sleepycat.je.dbi.DbTreeTest { +} diff --git a/test/com/sleepycat/je/rep/dual/dbi/EmbeddedOpsTest.java b/test/com/sleepycat/je/rep/dual/dbi/EmbeddedOpsTest.java new file mode 100644 index 0000000..fdff334 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/dbi/EmbeddedOpsTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.dbi; + +public class EmbeddedOpsTest extends com.sleepycat.je.dbi.EmbeddedOpsTest { +} diff --git a/test/com/sleepycat/je/rep/dual/dbi/SortedLSNTreeWalkerTest.java b/test/com/sleepycat/je/rep/dual/dbi/SortedLSNTreeWalkerTest.java new file mode 100644 index 0000000..aac4c00 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/dbi/SortedLSNTreeWalkerTest.java @@ -0,0 +1,18 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.dbi; + +public class SortedLSNTreeWalkerTest + extends com.sleepycat.je.dbi.SortedLSNTreeWalkerTest { +} diff --git a/test/com/sleepycat/je/rep/dual/incomp/INCompressorTest.java b/test/com/sleepycat/je/rep/dual/incomp/INCompressorTest.java new file mode 100644 index 0000000..3a6084c --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/incomp/INCompressorTest.java @@ -0,0 +1,95 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.incomp; + + +public class INCompressorTest + extends com.sleepycat.je.incomp.INCompressorTest { + + /* The following test cases are non-transactional. */ + @Override + public void testDeleteTransactional() { + } + + @Override + public void testDeleteNonTransactional() { + } + + @Override + public void testDeleteNonTransactionalWithBinDeltas() { + } + + @Override + public void testDeleteDuplicate() { + } + + @Override + public void testRemoveEmptyBIN() { + } + + @Override + public void testRemoveEmptyBINWithBinDeltas() { + } + + @Override + public void testRemoveEmptyDBIN() { + } + + @Override + public void testRemoveEmptyDBINandBIN() { + } + + @Override + public void testRollForwardDelete() { + } + + @Override + public void testRollForwardDeleteDuplicate() { + } + + @Override + public void testLazyPruning() { + } + + @Override + public void testLazyPruningDups() { + } + + @Override + public void testEmptyInitialDBINScan() { + } + + @Override + public void testEmptyInitialBINScan() { + } + + @Override + public void testNodeNotEmpty() { + } + + @Override + public void testAbortInsert() { + } + + @Override + public void testAbortInsertDuplicate() { + } + + @Override + public void testRollBackInsert() { + } + + @Override + public void testRollBackInsertDuplicate() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/log/FileReaderTest.java b/test/com/sleepycat/je/rep/dual/log/FileReaderTest.java new file mode 100644 index 0000000..1706054 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/log/FileReaderTest.java @@ -0,0 +1,21 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.log; + +public class FileReaderTest extends com.sleepycat.je.log.FileReaderTest { + + public FileReaderTest() { + super(); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/ForeignKeyTest.java b/test/com/sleepycat/je/rep/dual/persist/test/ForeignKeyTest.java new file mode 100644 index 0000000..1f4e431 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/ForeignKeyTest.java @@ -0,0 +1,33 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.persist.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.persist.model.DeleteAction; + +public class ForeignKeyTest extends com.sleepycat.persist.test.ForeignKeyTest { + + public ForeignKeyTest(String type, DeleteAction action, String label, + String useClassLabel) { + super(type, action, label, useClassLabel); + } + + @Parameters + public static List genParams() { + return paramsHelper(true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/IndexTest.java b/test/com/sleepycat/je/rep/dual/persist/test/IndexTest.java new file mode 100644 index 0000000..07dce42 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/IndexTest.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class IndexTest extends com.sleepycat.persist.test.IndexTest { + + public IndexTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/JoinTest.java b/test/com/sleepycat/je/rep/dual/persist/test/JoinTest.java new file mode 100644 index 0000000..b257064 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/JoinTest.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + + +public class JoinTest extends com.sleepycat.persist.test.JoinTest { + + public JoinTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/NegativeTest.java b/test/com/sleepycat/je/rep/dual/persist/test/NegativeTest.java new file mode 100644 index 0000000..9b2f20c --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/NegativeTest.java @@ -0,0 +1,29 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class NegativeTest extends com.sleepycat.persist.test.NegativeTest { + + public NegativeTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/OperationTest.java b/test/com/sleepycat/je/rep/dual/persist/test/OperationTest.java new file mode 100644 index 0000000..8f25cc7 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/OperationTest.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + + +public class OperationTest extends com.sleepycat.persist.test.OperationTest { + + public OperationTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/SequenceTest.java b/test/com/sleepycat/je/rep/dual/persist/test/SequenceTest.java new file mode 100644 index 0000000..f2d9322 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/SequenceTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +public class SequenceTest extends com.sleepycat.persist.test.SequenceTest { +} diff --git a/test/com/sleepycat/je/rep/dual/persist/test/SubclassIndexTest.java b/test/com/sleepycat/je/rep/dual/persist/test/SubclassIndexTest.java new file mode 100644 index 0000000..692f268 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/persist/test/SubclassIndexTest.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.persist.test; + +public class SubclassIndexTest + extends com.sleepycat.persist.test.SubclassIndexTest { +} diff --git a/test/com/sleepycat/je/rep/dual/test/AtomicPutTest.java b/test/com/sleepycat/je/rep/dual/test/AtomicPutTest.java new file mode 100644 index 0000000..f34feab --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/AtomicPutTest.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.util.test.TxnTestCase; + +public class AtomicPutTest extends com.sleepycat.je.test.AtomicPutTest { + + public AtomicPutTest(String txnType) { + super(txnType); + } + + @Parameters + public static List genParams() { + return getTxnParams(new String[] {TxnTestCase.TXN_USER}, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/ForeignKeyTest.java b/test/com/sleepycat/je/rep/dual/test/ForeignKeyTest.java new file mode 100644 index 0000000..c32814a --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/ForeignKeyTest.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class ForeignKeyTest extends com.sleepycat.je.test.ForeignKeyTest { + + public ForeignKeyTest(String type, boolean multiKey) { + super(type, multiKey); + } + + @Parameters + public static List genParams() { + return paramsHelper(true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/InternalCursorTest.java b/test/com/sleepycat/je/rep/dual/test/InternalCursorTest.java new file mode 100644 index 0000000..b769638 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/InternalCursorTest.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class InternalCursorTest + extends com.sleepycat.je.test.InternalCursorTest { + + public InternalCursorTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/JoinTest.java b/test/com/sleepycat/je/rep/dual/test/JoinTest.java new file mode 100644 index 0000000..fd4d716 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/JoinTest.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class JoinTest extends com.sleepycat.je.test.JoinTest { + + public JoinTest(String type, boolean multiKey) { + super(type, multiKey); + } + + @Parameters + public static List genParams() { + return paramsHelper(true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/PhantomRestartTest.java b/test/com/sleepycat/je/rep/dual/test/PhantomRestartTest.java new file mode 100644 index 0000000..a0b4e86 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/PhantomRestartTest.java @@ -0,0 +1,22 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +public class PhantomRestartTest + extends com.sleepycat.je.test.PhantomRestartTest { + + public PhantomRestartTest(Spec spec, Boolean dups) { + super(spec, dups); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/PhantomTest.java b/test/com/sleepycat/je/rep/dual/test/PhantomTest.java new file mode 100644 index 0000000..832cabc --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/PhantomTest.java @@ -0,0 +1,23 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import com.sleepycat.je.TransactionConfig; + +public class PhantomTest extends com.sleepycat.je.test.PhantomTest { + + public PhantomTest(TransactionConfig txnConfig) { + super(txnConfig); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/SecondaryDirtyReadTest.java b/test/com/sleepycat/je/rep/dual/test/SecondaryDirtyReadTest.java new file mode 100644 index 0000000..b94f7cc --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/SecondaryDirtyReadTest.java @@ -0,0 +1,36 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +public class SecondaryDirtyReadTest extends + com.sleepycat.je.test.SecondaryDirtyReadTest { + + public SecondaryDirtyReadTest( + String type, + boolean multiKey, + boolean duplication, + boolean dirtyReadAll) { + + super(type, multiKey, duplication, dirtyReadAll); + } + + @Parameters + public static List genParams() { + return paramsHelper(true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/SequenceTest.java b/test/com/sleepycat/je/rep/dual/test/SequenceTest.java new file mode 100644 index 0000000..3947058 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/SequenceTest.java @@ -0,0 +1,30 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + + +public class SequenceTest extends com.sleepycat.je.test.SequenceTest { + + public SequenceTest(String type) { + super(type); + } + @Parameters + public static List genParams() { + return getTxnParams(null, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/test/TTLTest.java b/test/com/sleepycat/je/rep/dual/test/TTLTest.java new file mode 100644 index 0000000..5dae0d1 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/TTLTest.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized; + +public class TTLTest extends com.sleepycat.je.test.TTLTest { + + @Parameterized.Parameters + public static List genParams() { + return getTxnParams(null, true /*rep*/); + } + + public TTLTest(String type){ + super(type); + } + + /* + * Causes a data mismatch when comparing nodes, because the test only + * compresses BINs on the master node and the time is set artificially. + */ + @Override + public void testCompression() {} + + /* + * LNs are not purged as expected with replication, because .jdb files are + * protected from deletion for various reasons, e.g., a temporary replica + * lag. + */ + @Override + public void testPurgedLNs() {} + + /* + * Same as for testPurgedLNs. + */ + @Override + public void testPurgedSlots() {} +} diff --git a/test/com/sleepycat/je/rep/dual/test/ToManyTest.java b/test/com/sleepycat/je/rep/dual/test/ToManyTest.java new file mode 100644 index 0000000..dec74a2 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/test/ToManyTest.java @@ -0,0 +1,33 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.test; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.util.test.TxnTestCase; + +public class ToManyTest extends com.sleepycat.je.test.ToManyTest { + + public ToManyTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams( + new String[] {TxnTestCase.TXN_USER, TxnTestCase.TXN_AUTO}, true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/tree/KeyPrefixTest.java b/test/com/sleepycat/je/rep/dual/tree/KeyPrefixTest.java new file mode 100644 index 0000000..6ece9ef --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/tree/KeyPrefixTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.tree; + +public class KeyPrefixTest extends com.sleepycat.je.tree.KeyPrefixTest { +} diff --git a/test/com/sleepycat/je/rep/dual/tree/MemorySizeTest.java b/test/com/sleepycat/je/rep/dual/tree/MemorySizeTest.java new file mode 100644 index 0000000..6ffd9b1 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/tree/MemorySizeTest.java @@ -0,0 +1,24 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.tree; + +public class MemorySizeTest extends com.sleepycat.je.tree.MemorySizeTest { + + /* + * This test changes the KeyPrefix on the master, but not on the replicas, + * so it would fail on the rep mode, disable it. + */ + @Override + public void testKeyPrefixChange() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/tree/SR13034Test.java b/test/com/sleepycat/je/rep/dual/tree/SR13034Test.java new file mode 100644 index 0000000..4997496 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/tree/SR13034Test.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.tree; + +public class SR13034Test extends com.sleepycat.je.tree.SR13034Test { +} diff --git a/test/com/sleepycat/je/rep/dual/tree/SplitRace_SR11144Test.java b/test/com/sleepycat/je/rep/dual/tree/SplitRace_SR11144Test.java new file mode 100644 index 0000000..805af3e --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/tree/SplitRace_SR11144Test.java @@ -0,0 +1,17 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.tree; + +public class SplitRace_SR11144Test + extends com.sleepycat.je.tree.SplitRace_SR11144Test { +} diff --git a/test/com/sleepycat/je/rep/dual/tree/SplitTest.java b/test/com/sleepycat/je/rep/dual/tree/SplitTest.java new file mode 100644 index 0000000..c34e0b4 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/tree/SplitTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.tree; + +public class SplitTest extends com.sleepycat.je.tree.SplitTest { +} diff --git a/test/com/sleepycat/je/rep/dual/trigger/ConfigTest.java b/test/com/sleepycat/je/rep/dual/trigger/ConfigTest.java new file mode 100644 index 0000000..150fc4d --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/trigger/ConfigTest.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.trigger; + +import static org.junit.Assert.fail; + +import java.util.Arrays; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.trigger.Trigger; + +public class ConfigTest extends com.sleepycat.je.trigger.ConfigTest { + + Environment env; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + env = create(envRoot, envConfig); + } + + @After + public void tearDown() + throws Exception { + + close(env); + super.tearDown(); + } + + @Test + public void testTriggerConfigOnEnvOpen() { + dbConfig.setTriggers(Arrays.asList((Trigger) new InvokeTest.DBT("t1"), + (Trigger) new InvokeTest.DBT("t2"))); + + /* Implementing ReplicatedDatabaseTrigger (RDBT) is expected. */ + try { + env.openDatabase(null, "db1", dbConfig).close(); + fail("IAE expected"); + } catch (IllegalArgumentException iae) { + // expected + } + + } +} diff --git a/test/com/sleepycat/je/rep/dual/trigger/InvokeTest.java b/test/com/sleepycat/je/rep/dual/trigger/InvokeTest.java new file mode 100644 index 0000000..33a4041 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/trigger/InvokeTest.java @@ -0,0 +1,260 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.trigger; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; + +import org.junit.Before; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.util.RepEnvWrapper; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.trigger.ReplicatedDatabaseTrigger; +import com.sleepycat.je.trigger.TestBase; +import com.sleepycat.je.trigger.Trigger; + +/** + * Replicated version of the trigger unit test. The test repeats the trigger + * tests but now across the replication group ensuring that the triggers are + * invoked at every node in the replication group. + */ +public class InvokeTest extends com.sleepycat.je.trigger.InvokeTest { + + @Before + public void setUp() + throws Exception { + + super.setUp(); + nNodes = ((RepEnvWrapper)getWrapper()).getRepEnvInfo(envRoot).length; + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + } + + @Override + public void testAddRemoveTriggerExistindDbTrans() { + // TODO: Need equivalent tests once #18262 is fixed + } + + @Override + public void testAddRemoveTriggerExistindDbAuto() { + // TODO: Need equivalent tests once #18262 is fixed + } + + /* + * All verifyXXX methods are overriden to ensure that the entire + * replication group is in sync before verifying that the triggers have + * been invoked. + */ + @Override + protected void verifyOpen(final int nCreate, int nOpen) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyOpen(nCreate, nOpen); + } + + @Override + protected void verifyClose(@SuppressWarnings("unused") final int nClose) { + /* closes are asynchronous on the Replica. */ + } + + @Override + protected void verifyRename(final String newName, + final int nRename) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyRename(newName, nRename); + } + + @Override + protected void verifyTruncate(final int nTruncate) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyTruncate(nTruncate); + } + + @Override + protected void verifyRemove(final int nRemove) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyRemove(nRemove); + } + + @Override + protected void verifyCommit(final int nCommit) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyCommit(nCommit); + } + + @Override + protected void verifyAbort(final int nAbort) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyAbort(nAbort); + } + + @Override + protected void verifyDelete(final int nDelete, + final DatabaseEntry key, + final DatabaseEntry oldData) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyDelete(nDelete, key, oldData); + + } + + @Override + protected void verifyPut(final int nPut, + final DatabaseEntry key, + final DatabaseEntry newData, + final DatabaseEntry oldData) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyPut(nPut, key, newData, oldData); + } + + @Override + protected void verifyAddTrigger(final int nAddTrigger) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyAddTrigger(nAddTrigger); + } + + @Override + protected void verifyRemoveTrigger(final int nRemoveTrigger) { + ((RepEnvWrapper)getWrapper()).syncGroup(envRoot); + super.verifyRemoveTrigger(nRemoveTrigger); + } + + @Override + protected TransactionConfig getTransactionConfig() { + return RepTestUtils.SYNC_SYNC_ALL_TC; + } + + @Override + protected List getTriggers() { + return new LinkedList(Arrays.asList((Trigger) new RDBT("t1"), + (Trigger) new RDBT("t2"))); + } + + @Override + protected List getTransientTriggers() { + return new LinkedList(Arrays.asList((Trigger) new TRDBT("tt1"), + (Trigger) new TRDBT("tt2"))); + } + + @Override + protected List getTriggersPlusOne() { + List triggers = getTriggers(); + triggers.add(new InvokeTest.RDBT("t3")); + return triggers; + } + + @SuppressWarnings("unused") + public static class RDBT extends TestBase.DBT + implements ReplicatedDatabaseTrigger { + + private static final long serialVersionUID = 1L; + + public RDBT(String name) { + super(name); + } + + /* Awaits syncup unit tests. */ + + public void repeatCreate(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatDelete(Transaction txn, DatabaseEntry key) { + // TODO Auto-generated method stub + } + + public void repeatPut(Transaction txn, + DatabaseEntry key, + DatabaseEntry newData) { + // TODO Auto-generated method stub + } + + public void repeatRemove(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatRename(Transaction txn, String newName) { + // TODO Auto-generated method stub + } + + public void repeatTransaction(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatTruncate(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatAddTrigger(Transaction txn) { + // TODO Auto-generated method stub + + } + + public void repeatRemoveTrigger(Transaction txn) { + // TODO Auto-generated method stub + + } + } + + @SuppressWarnings("unused") + public static class TRDBT extends TestBase.TDBT + implements ReplicatedDatabaseTrigger { + + public TRDBT(String name) { + super(name); + } + + /* Awaits syncup unit tests. */ + + public void repeatCreate(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatDelete(Transaction txn, DatabaseEntry key) { + // TODO Auto-generated method stub + } + + public void repeatPut(Transaction txn, + DatabaseEntry key, + DatabaseEntry newData) { + // TODO Auto-generated method stub + } + + public void repeatRemove(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatRename(Transaction txn, String newName) { + // TODO Auto-generated method stub + } + + public void repeatTransaction(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatTruncate(Transaction txn) { + // TODO Auto-generated method stub + } + + public void repeatAddTrigger(Transaction txn) { + // TODO Auto-generated method stub + + } + + public void repeatRemoveTrigger(Transaction txn) { + // TODO Auto-generated method stub + + } + } +} diff --git a/test/com/sleepycat/je/rep/dual/txn/LockManagerTest.java b/test/com/sleepycat/je/rep/dual/txn/LockManagerTest.java new file mode 100644 index 0000000..6af2b4e --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/LockManagerTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.txn; + +public class LockManagerTest extends com.sleepycat.je.txn.LockManagerTest { +} diff --git a/test/com/sleepycat/je/rep/dual/txn/LockTest.java b/test/com/sleepycat/je/rep/dual/txn/LockTest.java new file mode 100644 index 0000000..02e4549 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/LockTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.txn; + +public class LockTest extends com.sleepycat.je.txn.LockTest { +} diff --git a/test/com/sleepycat/je/rep/dual/txn/TxnFSyncTest.java b/test/com/sleepycat/je/rep/dual/txn/TxnFSyncTest.java new file mode 100644 index 0000000..6ab6f52 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/TxnFSyncTest.java @@ -0,0 +1,47 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.txn; + +import java.util.List; + +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.util.test.TxnTestCase; + +public class TxnFSyncTest extends com.sleepycat.je.txn.TxnFSyncTest { + + // TODO: Low level environment manipulation. Env not being closed. Multiple + // active environment handles to the same environment. + + public TxnFSyncTest(String type) { + super(type); + } + + @Parameters + public static List genParams() { + return getTxnParams( + new String[] {TxnTestCase.TXN_USER, TxnTestCase.TXN_AUTO}, true); + } + + /* junit.framework.AssertionFailedError: Address already in use + at junit.framework.Assert.fail(Assert.java:47) + at com.sleepycat.je.rep.RepEnvWrapper.create(RepEnvWrapper.java:60) + at com.sleepycat.je.DualTestCase.create(DualTestCase.java:63) + at com.sleepycat.je.txn.TxnFSyncTest.testFSyncButNoClose(TxnFSyncTest.java:105) + ... + + */ + @Override + public void testFSyncButNoClose() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/txn/TxnMemoryTest.java b/test/com/sleepycat/je/rep/dual/txn/TxnMemoryTest.java new file mode 100644 index 0000000..5d1e119 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/TxnMemoryTest.java @@ -0,0 +1,34 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.txn; + +import java.util.List; + +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class TxnMemoryTest extends com.sleepycat.je.txn.TxnMemoryTest { + + public TxnMemoryTest(String testMode, String eMode) { + super(testMode, eMode); + } + + @Parameters + public static List genParams() { + + return paramsHelper(true); + } +} diff --git a/test/com/sleepycat/je/rep/dual/txn/TxnTest.java b/test/com/sleepycat/je/rep/dual/txn/TxnTest.java new file mode 100644 index 0000000..b1ab19e --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/TxnTest.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.txn; + +public class TxnTest extends com.sleepycat.je.txn.TxnTest { + + @Override + public void testBasicLocking() + throws Throwable { + } + + /* + * This test case is excluded because it uses the deprecated durability + * API, which is prohibited in dual mode tests. + */ + @Override + public void testSyncCombo() + throws Throwable { + } + + /** + * Excluded because it opens and closes the environment several times and + * the rep utils don't behave well under these conditions. + */ + @Override + public void testPossiblyCommittedState() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/txn/TxnTimeoutTest.java b/test/com/sleepycat/je/rep/dual/txn/TxnTimeoutTest.java new file mode 100644 index 0000000..3d76c00 --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/txn/TxnTimeoutTest.java @@ -0,0 +1,68 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dual.txn; + + +public class TxnTimeoutTest extends com.sleepycat.je.txn.TxnTimeoutTest { + + /* + * The following unit tests are excluded because they intentionally + * provoked to exceptions and handles those accordingly. The special + * handing is not available on the replica side, and would cause a replica + * failure. + */ + @Override + public void testTxnTimeout() { + } + + @Override + public void testPerTxnTimeout() { + } + + @Override + public void testEnvTxnTimeout() { + } + + @Override + public void testEnvNoLockTimeout() { + } + + @Override + public void testPerLockTimeout() { + } + + @Override + public void testEnvLockTimeout() { + } + + @Override + public void testPerLockerTimeout() { + } + + @Override + public void testReadCommittedTxnTimeout() { + } + + @Override + public void testReadCommittedLockTimeout() { + } + + @Override + public void testSerializableTxnTimeout() { + } + + @Override + public void testSerializableLockTimeout() { + } +} diff --git a/test/com/sleepycat/je/rep/dual/util/VerifyLogTest.java b/test/com/sleepycat/je/rep/dual/util/VerifyLogTest.java new file mode 100644 index 0000000..c8e396e --- /dev/null +++ b/test/com/sleepycat/je/rep/dual/util/VerifyLogTest.java @@ -0,0 +1,16 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.dual.util; + +public class VerifyLogTest extends com.sleepycat.je.util.VerifyLogTest { +} diff --git a/test/com/sleepycat/je/rep/dupconvert/RepDupConvertTest.java b/test/com/sleepycat/je/rep/dupconvert/RepDupConvertTest.java new file mode 100644 index 0000000..1000923 --- /dev/null +++ b/test/com/sleepycat/je/rep/dupconvert/RepDupConvertTest.java @@ -0,0 +1,252 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.dupconvert; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.FileInputStream; +import java.util.Properties; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * JE 5.0 changes the duplicated entries representation, and this change + * requires that no duplicated entries exist in the last recovery interval + * of the logs that created by JE 4.1. + * + * To achieve this goal, JE 4.1 needs to invoke DbRepPreUpgrade_4_2 to assure + * no duplicated entries exist in the last recover interval. The goal of the + * test is to make sure JE 5.0 can read the logs correctly if we perform the + * upgrade on the original log, see SR 19165 for more details. + */ +public class RepDupConvertTest extends TestBase { + private static final int groupSize = 3; + private static final String dbName = "testDB"; + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public RepDupConvertTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + System.setProperty(RepParams.SKIP_NODENAME_VALIDATION, "true"); + super.setUp(); + } + + @Test + public void testLogWithSingletonLN() + throws Throwable { + + doNormalTest("singletonLN", 0, 100, 0, 0); + } + + @Test + public void testLogWithDIN() + throws Throwable { + + doNormalTest("din", 0, 10, 0, 10); + } + + @Test + public void testLogWithDeletedLNCommit() + throws Throwable { + + doNormalTest("deletedLNCommit", 0, 10, 5, 10); + } + + @Test + public void testLogWithDeleteLNNoCommit() + throws Throwable { + + doNormalTest("deletedLNNoCommit", 0, 10, 0, 10); + } + + @Test + public void testLogWithMixIN() + throws Throwable { + + doNormalTest("mixIN", 0, 10, 3, 7); + } + + public void doNormalTest(String logName, + int outerStart, + int outerEnd, + int innerStart, + int innerEnd) + throws Throwable { + + if (readPreserveRecordVersionProperty()) { + return; + } + + loadLogFiles(logName); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + + /* Start the group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize, envConfig); + + adjustNodeNames(); + + ReplicatedEnvironment master = + RepTestUtils.restartGroup(true, repEnvInfo); + + /* Create databases on the master. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + + /* Check the database content on all replicas. */ + for (int i = 0; i < repEnvInfo.length; i++) { + Database db = repEnvInfo[i].getEnv(). + openDatabase(null, dbName, dbConfig); + checkDatabaseContents + (db, outerStart, outerEnd, innerStart, innerEnd); + db.close(); + } + + /* Do some updates on database. */ + Database db = master.openDatabase(null, dbName, dbConfig); + updateDatabase(db); + + db.close(); + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /** + * Modifies the rep configuration to match the older convention of allowing + * blank space in node names. This avoids the need to regenerate old + * version log files to match the new, more restrictive naming rules + * [#21407], but also, more signficantly, gives us an easy way to test the + * override (RepParams.SKIP_NODENAME_VALIDATION). + */ + private void adjustNodeNames() { + for (int i = 0; i < repEnvInfo.length; i++) { + int nodeId = i + 1; + repEnvInfo[i].getRepConfig().setNodeName("Node " + nodeId); + } + } + + private boolean readPreserveRecordVersionProperty() + throws Exception { + + FileInputStream fis = + new FileInputStream(new File(envRoot, "je.properties")); + Properties jeProperties = new Properties(); + jeProperties.load(fis); + + return new Boolean(jeProperties.getProperty + (RepParams.PRESERVE_RECORD_VERSION.getName())); + } + + private void checkDatabaseContents(Database db, + int outerStart, + int outerEnd, + int innerStart, + int innerEnd) + throws Throwable { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Cursor cursor = db.openCursor(null, null); + try { + for (int i = outerStart; i < outerEnd; i++) { + if (innerStart == innerEnd) { + cursor.getNext(key, data, null); + assertEquals(key, makeEntry(i)); + assertEquals(data, makeEntry(i)); + continue; + } + for (int j = innerStart; j < innerEnd; j++) { + cursor.getNext(key, data, null); + assertEquals(key, makeEntry(i)); + assertEquals(data, makeEntry(j * 10 + i)); + } + } + } finally { + cursor.close(); + } + } + + private void loadLogFiles(String logName) + throws Throwable { + + /* Create the environment home if it doesn't exist. */ + for (int i = 0; i < groupSize; i++) { + File envDir = new File(envRoot, "rep" + i); + if (!envDir.exists()) { + envDir.mkdir(); + } + } + + for (int i = 0; i < groupSize; i++) { + String resName = "je-4.1.7_" + logName + "_" + i + ".jdb"; + TestUtils.loadLog + (getClass(), resName, new File(envRoot, "rep" + i)); + } + } + + private void updateDatabase(Database db) { + /* Insert a piece of non duplicated record. */ + DatabaseEntry key = makeEntry(100); + DatabaseEntry data = makeEntry(100); + OperationStatus status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + /* Insert duplicated data. */ + key = makeEntry(5); + data = makeEntry(101); + status = db.putNoOverwrite(null, key, data); + assertEquals(OperationStatus.KEYEXIST, status); + status = db.put(null, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + /* Delete a piece of duplicated record. */ + status = db.delete(null, makeEntry(6)); + assertEquals(OperationStatus.SUCCESS, status); + status = db.get(null, makeEntry(6), data, null); + assertEquals(OperationStatus.NOTFOUND, status); + } + + private DatabaseEntry makeEntry(int val) { + byte[] data = new byte[] { (byte) val }; + return new DatabaseEntry(data); + } +} diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_0.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_0.jdb new file mode 100644 index 0000000..6f2e269 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_0.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_1.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_1.jdb new file mode 100644 index 0000000..3bc3762 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_1.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_2.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_2.jdb new file mode 100644 index 0000000..e7f0ec0 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNCommit_2.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_0.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_0.jdb new file mode 100644 index 0000000..86e25d0 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_0.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_1.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_1.jdb new file mode 100644 index 0000000..ae163ca Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_1.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_2.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_2.jdb new file mode 100644 index 0000000..fcff763 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_deletedLNNoCommit_2.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_0.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_0.jdb new file mode 100644 index 0000000..e68d5d5 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_0.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_1.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_1.jdb new file mode 100644 index 0000000..942f650 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_1.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_2.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_2.jdb new file mode 100644 index 0000000..473bdf0 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_din_2.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_0.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_0.jdb new file mode 100644 index 0000000..17d5c3b Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_0.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_1.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_1.jdb new file mode 100644 index 0000000..b8b8c32 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_1.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_2.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_2.jdb new file mode 100644 index 0000000..86070bf Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_mixIN_2.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_0.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_0.jdb new file mode 100644 index 0000000..2fdbd87 Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_0.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_1.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_1.jdb new file mode 100644 index 0000000..75ba89a Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_1.jdb differ diff --git a/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_2.jdb b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_2.jdb new file mode 100644 index 0000000..82b31aa Binary files /dev/null and b/test/com/sleepycat/je/rep/dupconvert/je-4.1.7_singletonLN_2.jdb differ diff --git a/test/com/sleepycat/je/rep/elections/AcceptorTest.java b/test/com/sleepycat/je/rep/elections/AcceptorTest.java new file mode 100644 index 0000000..31eca0b --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/AcceptorTest.java @@ -0,0 +1,191 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.HashSet; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Accept; +import com.sleepycat.je.rep.elections.Protocol.Propose; +import com.sleepycat.je.rep.elections.Protocol.StringValue; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.util.test.TestBase; + +/** + * Tests the Acceptor Protocol for the correct responses to Propose and Accept + * messages, based on the Paxos protocol. + */ +public class AcceptorTest extends TestBase { + + Protocol protocol; + Acceptor acceptor; + DataChannelFactory channelFactory; + + TimebasedProposalGenerator proposalGenerator = + new TimebasedProposalGenerator(); + + @Override + @Before + public void setUp() { + channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + + Acceptor.SuggestionGenerator suggestionGenerator = + new Acceptor.SuggestionGenerator() { + + @Override + public Value get(Proposal proposal) { + return new StringValue("VALUE"); + } + + @Override + public Ranking getRanking(Proposal proposal) { + return new Ranking(100, 0); + } + }; + protocol = new Protocol + (TimebasedProposalGenerator.getParser(), + MasterValue.getParser(), + "TestGroup", + new NameIdPair("n1", 1), + null, + channelFactory); + protocol.updateNodeIds(new HashSet + (Arrays.asList(Integer.valueOf(1)))); + RepNode rn = new RepNode(new NameIdPair("n0", 0)) { + @Override + public int getElectionPriority() { + return 1; + } + }; + ElectionsConfig ac = new RepElectionsConfig(rn); + acceptor = new Acceptor + (protocol, + ac, + suggestionGenerator); + } + + @Override + @After + public void tearDown() { + acceptor = null; + } + + void checkPropose(Proposal pn, Protocol.MessageOp checkOp) { + Propose prop = protocol.new Propose(pn); + ResponseMessage prom1 = acceptor.process(prop); + + assertEquals(checkOp, prom1.getOp()); + } + + void checkAccept(Proposal pn, Value v, Protocol.MessageOp checkOp) { + Accept a = protocol.new Accept(pn, v); + ResponseMessage ad = acceptor.process(a); + assertEquals(checkOp, ad.getOp()); + } + + @Test + public void testAcceptor() { + Proposal pn0 = proposalGenerator.nextProposal(); + Proposal pn1 = proposalGenerator.nextProposal(); + + /* Proposal numbers should be in ascending order. */ + assertTrue(pn1.compareTo(pn0)> 0); + + checkPropose(pn1, protocol.PROMISE); + + /* Lower numbered proposal should be rejected. */ + checkPropose(pn0, protocol.REJECT); + + Value v = new StringValue("VALUE"); + checkAccept(pn1, v, protocol.ACCEPTED); + + /* .. and continue to be rejected after the acceptance. */ + checkPropose(pn0, protocol.REJECT); + + /* .. higher proposals should still be accepted. */ + Proposal pn2 = proposalGenerator.nextProposal(); + assertTrue(pn2.compareTo(pn1)> 0); + checkPropose(pn2, protocol.PROMISE); + checkAccept(pn2, v, protocol.ACCEPTED); + + /* .. and ones lower than the promised proposal are rejected. */ + checkAccept(pn0, v, protocol.REJECT); + checkAccept(pn1, v, protocol.REJECT); + } + + private class RepElectionsConfig implements ElectionsConfig { + + private final RepNode repNode; + private String groupName; + + public RepElectionsConfig(RepNode repNode) { + this.repNode = repNode; + + if (repNode.getRepImpl() == null) { + /* when used for unit testing */ + return; + } + groupName = + repNode.getRepImpl().getConfigManager().get(GROUP_NAME); + } + + /** + * used for testing only + * @param groupName + */ + public void setGroupName(String groupName) { + this.groupName = groupName; + } + public String getGroupName() { + return groupName; + } + public NameIdPair getNameIdPair() { + return repNode.getNameIdPair(); + } + public ServiceDispatcher getServiceDispatcher() { + return repNode.getServiceDispatcher(); + } + public int getElectionPriority() { + return repNode.getElectionPriority(); + } + public int getLogVersion() { + return repNode.getLogVersion(); + } + public RepImpl getRepImpl() { + return repNode.getRepImpl(); + } + public RepNode getRepNode() { + return repNode; + } + } +} diff --git a/test/com/sleepycat/je/rep/elections/ElectionWithLogVersionTest.java b/test/com/sleepycat/je/rep/elections/ElectionWithLogVersionTest.java new file mode 100644 index 0000000..51ea2da --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/ElectionWithLogVersionTest.java @@ -0,0 +1,223 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.Durability.ReplicaAckPolicy.ALL; +import static com.sleepycat.je.Durability.SyncPolicy.NO_SYNC; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Tests for the influence of log version on the outcome of elections. + * Generally the node(s) with the oldest log versions should be preferred, + * because an old-version replica downstream of a newer master may not know how + * to process newer log record types. But if a group-majority of nodes is at + * the later version, it's OK to forsake any other nodes at older versions; + * because although the older replicas might choke and die, the group as a + * whole survives (relying on the majority). + */ +public class ElectionWithLogVersionTest extends RepTestBase { + /* + * Check that JE HA can support more than 2 log formats during the upgrade. + */ + @Test + public void testMultiVersions() + throws Exception { + + createGroup(); + + /* Set different log versions on each node. */ + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvInfo[i].getRepNode().setVersion(i); + } + + /* Make sure we're really getting at least 3 different log versions. */ + assertTrue(repEnvInfo.length > 2); + + /* + * Shut down the master, in order to provoke an election. + */ + RepEnvInfo master = repEnvInfo[0]; + assertTrue(master.isMaster()); + master.closeEnv(); + + /* + * The node with the lowest log version should be elected as the + * master, which is repEnvInfo[1]. + */ + awaitElectionResult(1); + + /* + * Restart the closed node, just so as to leave the group in the + * healthy state that's expected by tearDown(). + */ + repEnvInfo[0].openEnv(); + } + + private int awaitElectionResult(int ... nodes) + throws InterruptedException { + + long deadline = System.currentTimeMillis() + 4000; + while (System.currentTimeMillis() < deadline) { + for (int i : nodes) { + if (repEnvInfo[i].isMaster()) { + return i; + } + } + Thread.sleep(100); + } + fail("no election winner emerged from expected set"); + return -1; // not reached + } + + /** + * Check that election results comply with the rules about nodes' log + * versions for JE 5. + */ + @Test + public void testLogVersionSensitivityJe5() + throws Exception { + + /* + * Set the log version that supports replication in the previous format + * to LOG_VERSION + 1, so that the current version requires enforcing + * rules involving log versions. + */ + RankingProposer.testLogVersionReplicatePrevious = + LogEntryType.LOG_VERSION + 1; + try { + testLogVersionSensitivityInternal(true); + } finally { + RankingProposer.testLogVersionReplicatePrevious = 0; + } + } + + /** + * Check that election results are not sensitive to log versions for + * releases greater than JE 5. + */ + @Test + public void testLogVersionSensitivity() + throws Exception { + + /* + * Set the log version that supports replication in the previous format + * to be LOG_VERSION, so that the current version does not require + * enforcing rules involving log versions. + */ + RankingProposer.testLogVersionReplicatePrevious = + LogEntryType.LOG_VERSION; + try { + testLogVersionSensitivityInternal(false); + } finally { + RankingProposer.testLogVersionReplicatePrevious = 0; + } + } + + private void testLogVersionSensitivityInternal(final boolean je5) + throws Exception { + + createGroup(); + + /* + * Set the log version of the first four replicas to a lower version, + * then shutdown the master in order to provoke an election. In the + * normal case, the 5th replica has the largest port number and will be + * elected master, but in this case, and only when using log versions + * for JE 5 and earlier, it will still be a replica because it has the + * largest version. + */ + for (int i = 0; i < 4; i++) { + repEnvInfo[i].getRepNode().setVersion(LogEntryType.LOG_VERSION - 1); + } + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + RepEnvInfo master = repEnvInfo[0]; + master.closeEnv(); + int newMasterIndex = awaitElectionResult(1, 2, 3, 4); + if (je5) { + assertTrue(newMasterIndex != 4); + } + + master = repEnvInfo[newMasterIndex]; + + /* + * Now try setting a majority of the group to the higher log version. + * In this case, it is considered OK to elect one of the higher-version + * nodes, effectively abandoning the laggards. + * + * repEnvInfo[4] is still already at the higher log version. We know + * [0] is a replica, since we're just now restarting it. Need to find + * one more node other than the current master to have its log version + * set and participate in the next election: try "1", but if that + * happens to be the previous election winner then use "2" instead. + */ + repEnvInfo[0].openEnv(); + repEnvInfo[0].getRepNode().setVersion(LogEntryType.LOG_VERSION); + int otherReplica = newMasterIndex == 1 ? 2 : 1; + repEnvInfo[otherReplica].getRepNode().setVersion( + LogEntryType.LOG_VERSION); + + /* + * Make sure all replicas are caught up with the master, to avoid + * having differing VLSNs influence the outcome of the following + * election. + */ + ReplicatedEnvironment masterEnv = master.getEnv(); + + /* + * Ensure all replicas have had a chance to join before using the + * durability ALL commit to avoid an IRE. + */ + assertEquals(master, + findMasterAndWaitForReplicas(60000, + repEnvInfo.length -1, + repEnvInfo)); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + @SuppressWarnings("hiding") + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry value = new DatabaseEntry(new byte[1]); + TransactionConfig tc = + new TransactionConfig().setDurability + (new Durability(NO_SYNC, NO_SYNC, ALL)); + Transaction txn = masterEnv.beginTransaction(null, tc); + db.put(txn, key, value); + txn.commit(); + db.close(); + + master.closeEnv(); + if (je5) { + awaitElectionResult(0, otherReplica, 4); + } else { + awaitElectionResult(0, 1, 2, 3, 4); + } + + /* As usual, leave in a clean state, just to placate tearDown(). */ + master.openEnv(); // not really still master at this point + } +} diff --git a/test/com/sleepycat/je/rep/elections/ElectionsTest.java b/test/com/sleepycat/je/rep/elections/ElectionsTest.java new file mode 100644 index 0000000..55d4315 --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/ElectionsTest.java @@ -0,0 +1,502 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_NO_NON_ZERO_PRIO; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PHASE1_NO_QUORUM; +import static com.sleepycat.je.rep.elections.ProposerStatDefinition.PROMISE_COUNT; +import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.arbitration.Arbiter; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.ElectionQuorum; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.util.test.TestBase; + +/** + * Tests for elections as a whole. + */ +public class ElectionsTest extends TestBase { + + /* Number of nodes in the test */ + private static final int nodes = 3; + private static final int monitors = 1; + private int nretries; + + private final Object notificationsLock = new Object(); + private int listenerNotifications = 0; + + private final ReplicationConfig repConfig[] = + new ReplicationConfig[nodes + 1]; + // private Monitor monitor; + private boolean monitorInvoked = false; + + private final List electionNodes = new LinkedList(); + private MasterValue winningValue = null; + + /* Latch to ensure that required all listeners have made it through. */ + CountDownLatch listenerLatch; + + private RepGroupImpl repGroup = null; + + @Override + @Before + public void setUp() throws IOException { + repGroup = RepTestUtils.createTestRepGroup(nodes, monitors); + for (RepNodeImpl rn : repGroup.getAllElectableMembers()) { + ReplicationConfig config = new ReplicationConfig(); + config.setRepNetConfig( + ReplicationNetworkConfig.create( + RepTestUtils.readNetProps())); + repConfig[rn.getNodeId()] = config; + config.setNodeName(rn.getName()); + config.setNodeHostPort(rn.getHostName()+ ":" +rn.getPort()); + } + } + + @Override + @After + public void tearDown() throws Exception { + if ((electionNodes != null) && (electionNodes.size() > 0)) { + electionNodes.get(0).shutdownAcceptorsLearners( + repGroup.getAllAcceptorSockets(), + repGroup.getAllHelperSockets()); + + for (Elections node : electionNodes) { + node.getServiceDispatcher().shutdown(); + } + } + } + + /** + * Simulates the start up of the first "n" nodes. If < n nodes are started, + * the others simulate being down. + * + * @param nstart nodes to start up + * @param groupSize the size of the group + * @throws IOException + */ + public void startReplicationNodes(final int nstart, + final int groupSize, + final boolean testPriority) + throws IOException { + + for (short nodeNum = 1; nodeNum <= nstart; nodeNum++) { + RepNode rn = newRepNode(groupSize, nodeNum, testPriority); + RepElectionsConfig ec = new RepElectionsConfig(rn); + ec.setGroupName("TEST_GROUP"); + Elections elections = + new Elections(ec, + new TestListener(), + newSuggestionGenerator(nodeNum, testPriority)); + elections.getRepNode().getServiceDispatcher().start(); + elections.startLearner(); + elections.participate(); + electionNodes.add(elections); + elections.updateRepGroup(repGroup); + } + + // Start up the Monitor as well. + + /* + InetSocketAddress monitorSocket = + repGroup.getMonitors().iterator().next().getLearnerSocket(); + monitor = new Monitor(repConfig[1].getGroupName(), + monitorSocket, + repGroup); + monitor.setMonitorChangeListener(new MonitorChangeListener() { + @Override + public void replicationChange( + MonitorChangeEvent monitorChangeEvent) { + monitorInvoked = true; + assertEquals(winningValue.getMasterNodeId(), + ((NewMasterEvent) monitorChangeEvent).getMasterId()); + } + }); + monitor.startMonitor(); + */ + } + + public void startReplicationNodes(final int nstart, + final int groupSize) + throws IOException { + startReplicationNodes(nstart, groupSize, false); + } + + private RepNode newRepNode(final int groupSize, + final short nodeNum, + final boolean testPriority) + throws IOException { + + final DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + repConfig[nodeNum].getRepNetConfig(), + repConfig[nodeNum].getNodeName()); + final ServiceDispatcher serviceDispatcher = + new ServiceDispatcher(repConfig[nodeNum].getNodeSocketAddress(), + channelFactory); + + return new RepNode(new NameIdPair(repConfig[nodeNum].getNodeName(), + nodeNum), + serviceDispatcher) { + @Override + public ElectionQuorum getElectionQuorum() { + return new ElectionQuorum() { + + @Override + public boolean haveQuorum(QuorumPolicy quorumPolicy, + int votes) { + return votes >= quorumPolicy.quorumSize(groupSize); + } + }; + } + + @Override + public int getElectionPriority() { + return testPriority ? (groupSize - nodeNum + 1) : + repConfig[nodeNum].getNodePriority(); + } + + /** + * This faked out test node never really does arbitration, but + * needs to be able to return an Arbiter instance + * for election retries. + */ + @Override + public Arbiter getArbiter() { + return new Arbiter(null) { + @Override + public synchronized boolean activateArbitration() { + return false; + } + }; + } + }; + } + + private SuggestionGenerator newSuggestionGenerator( + final short nodeNum, + final boolean testPriority) { + return new Acceptor.SuggestionGenerator() { + @Override + public Value get(Proposal proposal) { + return new MasterValue("testhost", 9999, + new NameIdPair("n" + nodeNum, + nodeNum)); + } + + @Override + public Ranking getRanking(Proposal proposal) { + return new Ranking(testPriority ? 1000l : nodeNum * 10l, 0); + } + }; + } + + public void startReplicationNodes(int nstart) + throws IOException { + startReplicationNodes(nstart, nstart); + } + + class TestListener implements Learner.Listener { + + @Override + public void notify(Proposal proposal, Value value) { + synchronized (notificationsLock) { + listenerNotifications++; + } + assertEquals(winningValue, value); + listenerLatch.countDown(); + } + } + + private Elections setupAndRunElection(QuorumPolicy qpolicy, + int activeNodes, + int groupSize) + throws IOException, InterruptedException { + + /* Start all of them. */ + startReplicationNodes(activeNodes, groupSize); + winningValue = new MasterValue("testhost", 9999, + new NameIdPair("n" + (activeNodes), + (activeNodes))); + return runElection(qpolicy, activeNodes); + } + + private Elections setupAndRunElection(int activeNodes) throws IOException, + InterruptedException { + return setupAndRunElection(QuorumPolicy.SIMPLE_MAJORITY, + activeNodes, + activeNodes); + } + + private Elections setupAndRunElection(int activeNodes, int groupSize) + throws IOException, InterruptedException { + return setupAndRunElection(QuorumPolicy.SIMPLE_MAJORITY, + activeNodes, + groupSize); + } + + private Elections runElection(QuorumPolicy qpolicy, int activeNodes) + throws InterruptedException { + listenerNotifications = 0; + monitorInvoked = false; + nretries = 2; + listenerLatch = new CountDownLatch(activeNodes); + /* Initiate an election on the first node. */ + Elections testElections = electionNodes.iterator().next(); + + testElections.initiateElection(repGroup, qpolicy, nretries); + /* Ensure that Proposer has finished. */ + testElections.waitForElection(); + return testElections; + } + + private Elections runElection(int activeNodes) + throws InterruptedException { + + return runElection(QuorumPolicy.SIMPLE_MAJORITY, activeNodes); + } + + /** + * Simulates presence of a simple majority, but with prio zero nodes + */ + @Test + public void testBasicZeroPrio() throws InterruptedException, + IOException { + + /* Elections with a mix of zero and non-zero prio nodes. */ + final int majority = (nodes / 2); + /* Have the first < majority nodes be zero prio. */ + + for (int i = 1; i < nodes; i++) { + repConfig[i].setNodePriority(0); + } + setupAndRunElection(nodes); + listenerLatch.await(); + assertEquals(nodes, listenerNotifications); + + /* Now remove all non-zero prio nodes and try hold an election. */ + + // Now remove one node and the elections should give up after + // retries have expired. + electionNodes.get(nodes-1).getAcceptor().shutdown(); + Elections testElections = runElection(nodes); + /* No successful elections, hence no notification. */ + assertEquals(0, listenerNotifications); + + /* Ensure that all retries were due to lack of a Quorum. */ + assertEquals + (nretries, + testElections.getStats().getInt(PHASE1_NO_NON_ZERO_PRIO)); + } + + /** + * Tests a basic election with everything being normal. + */ + @Test + public void testBasicAllNodes() + throws InterruptedException, IOException { + + /* Start all of them. */ + setupAndRunElection(nodes); + listenerLatch.await(); + + assertEquals(nodes, listenerNotifications); + // assertTrue(monitorInvoked); + runElection(nodes); + listenerLatch.await(); + assertEquals(nodes, listenerNotifications); + assertFalse(monitorInvoked); + } + + @Test + public void testBasicAllPrioNodes() + throws InterruptedException, IOException { + + /* Start all of them. */ + startReplicationNodes(nodes, nodes, true); + winningValue = new MasterValue("testhost", 9999, + new NameIdPair("n1", 1)); + runElection(QuorumPolicy.SIMPLE_MAJORITY, nodes); + listenerLatch.await(); + + assertEquals(nodes, listenerNotifications); + // assertTrue(monitorInvoked); + runElection(nodes); + listenerLatch.await(); + assertEquals(nodes, listenerNotifications); + assertFalse(monitorInvoked); + } + + /** + * Simulates one node never having come up. + */ + @Test + public void testBasicAllButOneNode() throws InterruptedException, + IOException { + + /* + * Simulate one node down at startup, but sufficient nodes for a quorum. + */ + setupAndRunElection(nodes - 1); + listenerLatch.await(); + assertEquals(nodes - 1, listenerNotifications); + // assertTrue(monitorInvoked); + } + + /** + * Tests a basic election with one node having crashed. + */ + @Test + public void testBasicOneNodeCrash() throws InterruptedException, + IOException { + /* Start all of them. */ + Elections testElections = setupAndRunElection(nodes); + listenerLatch.await(); + + assertEquals(nodes, listenerNotifications); + // assertTrue(monitorInvoked); + assertEquals(nodes, testElections.getStats().getInt(PROMISE_COUNT)); + electionNodes.get(0).getAcceptor().shutdown(); + testElections = runElection(nodes); + listenerLatch.await(); + /* The listener should have still obtained a notification. */ + assertEquals(nodes, listenerNotifications); + /* Master unchanged so monitor not invoked */ + assertFalse(monitorInvoked); + assertEquals(nodes - 1, testElections.getStats().getInt(PROMISE_COUNT)); + } + + /** + * Tests a QuorumPolicy of ALL. + */ + @Test + public void testQuorumPolicyAll() throws InterruptedException, IOException { + + /* Start all of them. */ + Elections testElections = + setupAndRunElection(QuorumPolicy.ALL, nodes, nodes); + listenerLatch.await(); + + assertEquals(nodes, listenerNotifications); + // assertTrue(monitorInvoked); + assertEquals(nodes, testElections.getStats().getInt(PROMISE_COUNT)); + + // Now remove one node and the elections should give up after + // retries have expired. + electionNodes.get(0).getAcceptor().shutdown(); + testElections = runElection(QuorumPolicy.ALL, nodes); + + assertEquals(0, listenerNotifications); + assertFalse(monitorInvoked); + + /* Ensure that all retries were due to lack of a Quorum. */ + assertEquals + (nretries, testElections.getStats().getInt(PHASE1_NO_QUORUM)); + } + + /** + * Tests the case where a quorum could not be reached. + * + * @throws IOException + * @throws InterruptedException + */ + @Test + public void testNoQuorum() throws IOException, InterruptedException { + + Elections testElections = setupAndRunElection(nodes / 2, nodes); + + /* + * No listeners were invoked so don't wait for a latch. No quorum, + * therefore no listener invocations. + */ + assertEquals(0, listenerNotifications); + assertFalse(monitorInvoked); + /* No listeners were invoked. */ + assertEquals(nodes / 2, listenerLatch.getCount()); + /* Ensure that all retries were due to lack of a Quorum. */ + assertEquals + (nretries, testElections.getStats().getInt(PHASE1_NO_QUORUM)); + } + + private class RepElectionsConfig implements ElectionsConfig { + + private final RepNode repNode; + private String groupName; + + public RepElectionsConfig(RepNode repNode) { + this.repNode = repNode; + + if (repNode.getRepImpl() == null) { + /* when used for unit testing */ + return; + } + groupName = + repNode.getRepImpl().getConfigManager().get(GROUP_NAME); + } + + /** + * used for testing only + * @param groupName + */ + public void setGroupName(String groupName) { + this.groupName = groupName; + } + public String getGroupName() { + return groupName; + } + public NameIdPair getNameIdPair() { + return repNode.getNameIdPair(); + } + public ServiceDispatcher getServiceDispatcher() { + return repNode.getServiceDispatcher(); + } + public int getElectionPriority() { + return repNode.getElectionPriority(); + } + public int getLogVersion() { + return repNode.getLogVersion(); + } + public RepImpl getRepImpl() { + return repNode.getRepImpl(); + } + public RepNode getRepNode() { + return repNode; + } + } +} diff --git a/test/com/sleepycat/je/rep/elections/JoinerElectionTest.java b/test/com/sleepycat/je/rep/elections/JoinerElectionTest.java new file mode 100644 index 0000000..2cd176a --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/JoinerElectionTest.java @@ -0,0 +1,226 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.Replica; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.TestHookAdapter; + +public class JoinerElectionTest extends RepTestBase { + private static final int GROUP_SIZE = 3; + private static final int INITIAL_NODES = 2; + private static final int N_TXN = 100; + private static final long WAIT_LIMIT = 60000; + private RepImpl repImpl; + volatile private RepNode repNode; + + @Override + @Before + public void setUp() throws Exception { + groupSize = GROUP_SIZE; + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + Replica.setInitialReplayHook(null); + DbEnvPool.getInstance().setBeforeFinishInitHook(null); + super.tearDown(); + } + + /** Reproduces 21915. */ + @Test + public void testPartialGroupDB() throws Exception { + createGroup(2); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment env = master.getEnv(); + + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + TransactionConfig tc = new TransactionConfig(); + Durability d = + new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + tc.setDurability(d); + for (int count = 0; count < N_TXN; count++) { + Transaction txn = env.beginTransaction(null, tc); + IntegerBinding.intToEntry(count, key); + LongBinding.longToEntry(count, data); + db.put(txn, key, data); + txn.commit(); + } + db.close(); + + CountDownLatch latch = new CountDownLatch(1); + RepImplRetriever rir = new RepImplRetriever(latch); + DbEnvPool.getInstance().setBeforeFinishInitHook(rir); + Replica.setInitialReplayHook(new HalfBacklogSink(latch)); + RepEnvInfo replica = repEnvInfo[2]; + try { + replica.openEnv(); + fail("should have failed initial open"); + } catch (EnvironmentFailureException e) { + if (!(e.getCause() instanceof MyTestException)) { + throw e; + } + } + Replica.setInitialReplayHook(null); + DbEnvPool.getInstance().setBeforeFinishInitHook(null); + + /* + * Now that we've done all this preparation (getting Node3's copy of + * the GroupDB only partially applied), start Node3 again, but without + * being able to reach the master. Ensure that it doesn't start an + * election. + */ + logger.info("preparation complete, will now open env"); + RepTestUtils.disableServices(master); + String node2hostPort = repEnvInfo[1].getRepConfig().getNodeHostPort(); + replica.getRepConfig().setHelperHosts(node2hostPort); + + try { + replica.openEnv(); + fail("expected UnknownMasterException"); + } catch (UnknownMasterException e) { + // expected + } + + assertEquals(0, repNode.getElections().getElectionCount()); + } + + class RepImplRetriever extends TestHookAdapter { + private final CountDownLatch latch; + + RepImplRetriever(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public + void doHook(EnvironmentImpl envImpl) { + repImpl = (RepImpl)envImpl; + latch.countDown(); + } + } + + class HalfBacklogSink extends TestHookAdapter { + private final CountDownLatch latch; + private int txnCount; + + HalfBacklogSink(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void doHook(Message m) { + + /* + * Before our initial attempt to add Node3 to the group, the group + * did a few initial internal/system transactions, including + * GroupDB updates for the first two nodes, followed by 100 user + * txns. By counting to 100 here we expect we'll be somewhere + * within the series of user txns (undoubtedly near the end). The + * idea is, we want to stop after the point where the two initial + * nodes have been added, but before we see the addition of Node3. + */ + if (m.getOp() == Protocol.COMMIT && ++txnCount == N_TXN) { + + // wait til we see desired group composition (happens in + // another thread) + + // flush log, and then die + + /* + * Wait until repImpl has been established in user thread, + * though by the time we get to having seen 100 commits that + * should long since have happend. + */ + try { + latch.await(WAIT_LIMIT, TimeUnit.MILLISECONDS); + } catch (Exception e) { + // Should never happen, and if it does test is hopeless. + // In fact we might as well get NPE on repImpl. + e.printStackTrace(); + } + repNode = repImpl.getRepNode(); + + /* + * Wait for the expected changes to the GroupDB to have been + * applied by the Replay thread. Again, by the time we get to + * 100 commits that has probably already happened long ago. + */ + try { + RepTestUtils.awaitCondition(new Callable() { + public Boolean call() { + RepGroupImpl group = + repImpl.getRepNode().getGroup(); + if (group == null || + group.getElectableMembers().size() < + INITIAL_NODES) { + return false; + } + return true; + } + }); + } catch (Exception e) { + // Shouldn't happen (doesn't include JUnit Assertion + // failure). + e.printStackTrace(); + } + repImpl.flushLog(false); + throw new MyTestException(); + } + } + } + + @SuppressWarnings("serial") + static class MyTestException extends DatabaseException { + MyTestException() { + super("testing"); + } + } +} diff --git a/test/com/sleepycat/je/rep/elections/ProtocolFailureTest.java b/test/com/sleepycat/je/rep/elections/ProtocolFailureTest.java new file mode 100644 index 0000000..a198365 --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/ProtocolFailureTest.java @@ -0,0 +1,273 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static com.sleepycat.je.rep.impl.TextProtocol.SEPARATOR; +import static com.sleepycat.je.rep.impl.TextProtocol.SEPARATOR_REGEXP; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.je.rep.impl.TextProtocol.RequestMessage; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.TextProtocol.TOKENS; +import com.sleepycat.je.utilint.TestHook; + +/** + * Check rep node resilience when there are various types of protocol message + * format corruption or semantic inconsistencies. The intent here is to confine + * the failure as much as possible and prevent the environment itself from + * failing. The tests here are pretty basic for the most part. We can augment + * them as we find more scenarios of interest. + */ +public class ProtocolFailureTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 3; + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + @Test + public void testBadVersionReq() throws InterruptedException { + testInternal(TOKENS.VERSION_TOKEN, RequestMessage.class); + } + + @Test + public void testBadVersionResp() throws InterruptedException { + testInternal(TOKENS.VERSION_TOKEN, ResponseMessage.class); + } + + @Test + public void testBadNameReq() throws InterruptedException { + testInternal(TOKENS.NAME_TOKEN, RequestMessage.class); + } + + @Test + public void testBadNameResp() throws InterruptedException { + testInternal(TOKENS.NAME_TOKEN, ResponseMessage.class); + } + + @Test + public void testBadId() throws InterruptedException { + testInternal(TOKENS.ID_TOKEN, RequestMessage.class); + } + + @Test + public void testBadIdResp() throws InterruptedException { + testInternal(TOKENS.ID_TOKEN, ResponseMessage.class); + } + + @Test + public void testBadOp() throws InterruptedException { + testInternal(TOKENS.OP_TOKEN, RequestMessage.class); + } + + @Test + public void testBadOpResp() throws InterruptedException { + testInternal(TOKENS.OP_TOKEN, ResponseMessage.class); + } + + @Test + public void testBadPayloadRequest() { + /* + * Future: Need custom tests and custom code for bad payload requests. + */ + // testInternal(TOKENS.FIRST_PAYLOAD_TOKEN, RequestMessage.class); + } + + @Test + public void testBadPayloadResp() throws InterruptedException { + testInternal(TOKENS.FIRST_PAYLOAD_TOKEN, ResponseMessage.class); + } + + /** + * Tests damage to a specific field as identified by the message token for + * a specific type of message. + * + * @param testToken the token to be munged + * @param testMessageType the type of messages (request/response) to be + * munged. + */ + private void testInternal(TOKENS testToken, + Class testMessageType) + throws InterruptedException { + createGroup(3); + closeNodes(repEnvInfo); /* Close all nodes to eliminate masters */ + + repEnvInfo[0].getRepConfig(). + setConfigParam("je.rep.envUnknownStateTimeout", "1 ms"); + repEnvInfo[0].openEnv(); + + final Protocol protocol = + repEnvInfo[0].getRepNode().getElections().getProtocol(); + + final Set modOps = protocol.getOps(testMessageType); + + final SerDeHook corruptOpHook = new SerDeHook(testToken, modOps); + TextProtocol.setSerDeHook(corruptOpHook); + repEnvInfo[2].getRepConfig(). + setConfigParam("je.rep.envUnknownStateTimeout", "5 s"); + repEnvInfo[2].openEnv(); + + /* Verify that hook was called by the test. */ + assertTrue(corruptOpHook.count > 0); + + /* + * Verify that nodes are up and standing, that is, not DETACHED and + * there's no master or replica since the election messages were munged. + */ + assertEquals(State.UNKNOWN, repEnvInfo[2].getEnv().getState()); + assertEquals(State.UNKNOWN, repEnvInfo[0].getEnv().getState()); + + /* Start sending valid messages. */ + TextProtocol.setSerDeHook(null); + + assertTrue(findMasterWait(60000, repEnvInfo[0], repEnvInfo[2]) + != null); + } + + /** + * The Hook that mungs messages. Note that since the hook is invoked from + * a method that's re-entrant, its methods must be re-entrant as well, + */ + private class SerDeHook implements TestHook { + final TOKENS testToken; + + /* Number of times messages were actually modified. */ + volatile int count; + + /* + * Messages with this op will be modified at the token identified by + * testToken. + */ + final Set modOps; + + /* + * Use ThreadLocal to make the doHook and getValue methods re-entrant + */ + final ThreadLocal messageLine = new ThreadLocal<>(); + + + /** + * Hook constructor + * @param testToken the token that must be munged within the message + * @param modOps the ops associated with messages that should be + * munged. All other messages are left intact. + */ + public SerDeHook(TOKENS testToken, + Set modOps) { + super(); + this.testToken = testToken; + this.modOps = Collections.unmodifiableSet(modOps); + } + + @Override + public void hookSetup() { + throw new UnsupportedOperationException("Method not implemented: " + + "hookSetup"); + } + + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException("Method not implemented: " + + "doIOHook"); + } + + @Override + public void doHook() { + throw new UnsupportedOperationException("Method not implemented: " + + "doHook"); + } + + @Override + public void doHook(String origMessage) { + + if (origMessage == null) { + messageLine.set(null); + return; + } + + final String[] tokens = origMessage.split(SEPARATOR_REGEXP); + if (testToken.ordinal() >= tokens.length) { + /* The messages does not have any payload. */ + messageLine.set(origMessage); + return; + } + + final String opToken = tokens[TOKENS.OP_TOKEN.ordinal()]; + if (!modOps.contains(opToken)) { + /* Not in the set of modifiable messages */ + messageLine.set(origMessage); + return; + } + + /* + * Modify the token. The message format is: + * |||| + */ + count++; + + final String token = tokens[testToken.ordinal()]; + + final String leadSeparator = + (testToken.ordinal() > 0) ? SEPARATOR : ""; + final String trailingSeparator = + (testToken.ordinal() == (tokens.length - 1)) ? + "" : SEPARATOR ; + final String badToken = + leadSeparator + "bad" + token + trailingSeparator; + + String newMessage = origMessage. + replace(leadSeparator + token + trailingSeparator, badToken); + assertTrue(!messageLine.equals(origMessage)); + + if (testToken.equals(TOKENS.FIRST_PAYLOAD_TOKEN)) { + /* Mung the rest of the payload. */ + int payloadStart = newMessage.indexOf(badToken); + newMessage = + newMessage.substring(0, payloadStart) + leadSeparator + + "badPayload"; + } + messageLine.set(newMessage); + } + + @Override + public String getHookValue() { + try { + return messageLine.get(); + } finally { + messageLine.remove(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/elections/ProtocolTest.java b/test/com/sleepycat/je/rep/elections/ProtocolTest.java new file mode 100644 index 0000000..9f5aae5 --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/ProtocolTest.java @@ -0,0 +1,394 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.elections; + +import static org.junit.Assert.assertEquals; + +import java.util.Arrays; +import java.util.HashSet; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator.Ranking; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Proposer.ProposalParser; +import com.sleepycat.je.rep.elections.Protocol.Promise; +import com.sleepycat.je.rep.elections.Protocol.StringValue; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.elections.Protocol.ValueParser; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.je.rep.impl.TextProtocolTestBase; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; + +public class ProtocolTest extends TextProtocolTestBase { + + private TestProtocol protocol = null; + private DataChannelFactory channelFactory; + + @Override + @Before + public void setUp() + throws Exception { + + channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig(), GROUP_NAME); + + protocol = new TestProtocol(TimebasedProposalGenerator.getParser(), + new ValueParser() { + @Override + public Value parse(String wireFormat) { + if ("".equals(wireFormat)) { + return null; + } + return new StringValue(wireFormat); + + } + }, + GROUP_NAME, + new NameIdPair(NODE_NAME, 1), + null, + channelFactory); + protocol.updateNodeIds(new HashSet + (Arrays.asList(new Integer(1)))); + } + + @Override + @After + public void tearDown() { + protocol = null; + } + + @Override + protected Message[] createMessages() { + TimebasedProposalGenerator proposalGenerator = + new TimebasedProposalGenerator(); + Proposal proposal = proposalGenerator.nextProposal(); + Value value = new Protocol.StringValue("test1"); + Value svalue = new Protocol.StringValue("test2"); + Message[] messages = new Message[] { + protocol.new Propose(proposal), + protocol.new Accept(proposal, value), + protocol.new Result(proposal, value), + protocol.new Shutdown(), + protocol.new MasterQuery(), + + protocol.new Reject(proposal), + protocol.new Promise(proposal, value, svalue, + new Ranking(100, 101), 1, + LogEntryType.LOG_VERSION, + JEVersion.CURRENT_VERSION), + protocol.new Accepted(proposal, value), + protocol.new MasterQueryResponse(proposal, value) + }; + + return messages; + } + + @Override + protected TextProtocol getProtocol() { + return protocol; + } + + @Test + public void testPromiseCompatibility() throws InvalidMessageException { + TimebasedProposalGenerator proposalGenerator = + new TimebasedProposalGenerator(); + Proposal proposal = proposalGenerator.nextProposal(); + Value value = new Protocol.StringValue("test1"); + Value svalue = new Protocol.StringValue("test2"); + Promise prom = + protocol.new Promise(proposal, value, svalue,new + Ranking(100, 101), 1, + LogEntryType.LOG_VERSION, + JEVersion.CURRENT_VERSION); + assertEquals(101, prom.getSuggestionRanking().minor); + + final String wireFormatNew = prom.wireFormat(); + + int tieBreaker = wireFormatNew.lastIndexOf(TextProtocol.SEPARATOR); + final String wireFormatOld = wireFormatNew.substring(0, tieBreaker); + + /* Simulate new node reading old Promise format. */ + Promise prom2 = (Promise)protocol.parse(wireFormatOld); + + assertEquals(Ranking.UNINITIALIZED.major, + prom2.getSuggestionRanking().minor); + + TestProtocol.OldPromise oldProm = protocol.new + OldPromise(proposal, value, svalue, 100, 1, + LogEntryType.LOG_VERSION, + JEVersion.CURRENT_VERSION); + + /* Simulate old node reading old and new promise formats. */ + protocol.replacePromise(); + + assertEquals(oldProm.wireFormat(), wireFormatOld); + TestProtocol.OldPromise oldProm1 = + (TestProtocol.OldPromise)protocol.parse(wireFormatOld); + TestProtocol.OldPromise oldProm2 = + (TestProtocol.OldPromise)protocol.parse(wireFormatNew); + + /* verify they check out equal. */ + assertEquals(oldProm1, oldProm2); + } + + + /** + * Subclass of Protocol to facilitate compatibility testing with the + * old definition of Promise. + */ + class TestProtocol extends Protocol { + + /* An instance of ProposalParser used to de-serialize proposals */ + private final ProposalParser proposalParser; + + /* An instance of ValueParser used to de-serialize values */ + private final ValueParser valueParser; + + public TestProtocol(ProposalParser proposalParser, + ValueParser valueParser, + String groupName, + NameIdPair nameIdPair, + RepImpl repImpl, + DataChannelFactory channelFactory) { + super(proposalParser, valueParser, groupName, nameIdPair, + repImpl, channelFactory); + this.proposalParser = proposalParser; + this.valueParser = valueParser; + } + + /** + * Replace the message associated with "PR", so it's the old one for + * compatibility testing. + */ + protected void replacePromise() { + replaceOp("PR", new MessageOp("PR", OldPromise.class)); + } + + /** + * Old Promise response message. It's sent in response to a Propose + * message. + * + * The code here has been copied as is from its previous version to + * facilitate testing. + */ + public class OldPromise extends ResponseMessage { + private Proposal highestProposal = null; + private Value acceptedValue = null; + private Value suggestion = null; + private long suggestionWeight = Long.MIN_VALUE; + private final int priority; + private int logVersion; + private JEVersion jeVersion; + + public OldPromise(Proposal highestProposal, + Value value, + Value suggestion, + long suggestionWeight, + int priority, + int logVersion, + JEVersion jeVersion) { + this.highestProposal = highestProposal; + this.acceptedValue = value; + this.suggestion = suggestion; + this.suggestionWeight = suggestionWeight; + this.priority = priority; + this.logVersion = logVersion; + this.jeVersion = jeVersion; + } + + public OldPromise(String responseLine, String[] tokens) + throws InvalidMessageException { + + super(responseLine, tokens); + highestProposal = proposalParser.parse(nextPayloadToken()); + acceptedValue = valueParser.parse(nextPayloadToken()); + suggestion = valueParser.parse(nextPayloadToken()); + String weight = nextPayloadToken(); + suggestionWeight = + "".equals(weight) ? + Long.MIN_VALUE : + Long.parseLong(weight); + priority = Integer.parseInt(nextPayloadToken()); + if (getMajorVersionNumber(sendVersion) > 1) { + logVersion = Integer.parseInt(nextPayloadToken()); + jeVersion = new JEVersion(nextPayloadToken()); + } + } + + @Override + public MessageOp getOp() { + return PROMISE; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + getOuterType().hashCode(); + result = prime * result + + ((acceptedValue == null) ? 0 : acceptedValue.hashCode()); + result = prime + * result + + ((highestProposal == null) ? 0 + : highestProposal.hashCode()); + result = prime * result + priority; + result = prime * result + + ((suggestion == null) ? 0 : suggestion.hashCode()); + result = prime * result + + (int) (suggestionWeight ^ (suggestionWeight >>> 32)); + + if (getMajorVersionNumber(sendVersion) > 1) { + result += prime* result + logVersion + jeVersion.hashCode(); + } + + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!super.equals(obj)) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + OldPromise other = (OldPromise) obj; + if (!getOuterType().equals(other.getOuterType())) { + return false; + } + + if (acceptedValue == null) { + if (other.acceptedValue != null) { + return false; + } + } else if (!acceptedValue.equals(other.acceptedValue)) { + return false; + } + + if (highestProposal == null) { + if (other.highestProposal != null) { + return false; + } + } else if (!highestProposal.equals(other.highestProposal)) { + return false; + } + + if (priority != other.priority) { + return false; + } + + if (getMajorVersionNumber(sendVersion) > 1) { + if (logVersion != other.logVersion) { + return false; + } + + if (jeVersion.compareTo(other.jeVersion) != 0) { + return false; + } + } + + if (suggestion == null) { + if (other.suggestion != null) { + return false; + } + } else if (!suggestion.equals(other.suggestion)) { + return false; + } + + if (suggestionWeight != other.suggestionWeight) { + return false; + } + + return true; + } + + @Override + public String wireFormat() { + String line = + wireFormatPrefix() + + SEPARATOR + + ((highestProposal != null) ? + highestProposal.wireFormat() : + "") + + SEPARATOR + + ((acceptedValue != null) ? acceptedValue.wireFormat() : "") + + SEPARATOR + + ((suggestion != null) ? suggestion.wireFormat() : "") + + SEPARATOR + + ((suggestionWeight == Long.MIN_VALUE) ? + "" : + Long.toString(suggestionWeight)) + + SEPARATOR + + priority; + + if (getMajorVersionNumber(sendVersion) > 1) { + line += SEPARATOR + logVersion + SEPARATOR + + jeVersion.toString(); + } + + return line; + } + + Proposal getHighestProposal() { + return highestProposal; + } + + Value getAcceptedValue() { + return acceptedValue; + } + + Value getSuggestion() { + return suggestion; + } + + long getSuggestionRanking() { + return suggestionWeight; + } + + int getPriority() { + return priority; + } + + int getLogVersion() { + return logVersion; + } + + JEVersion getJEVersion() { + return jeVersion; + } + + private Protocol getOuterType() { + return TestProtocol.this; + } + } + + } +} diff --git a/test/com/sleepycat/je/rep/elections/RankingProposerTest.java b/test/com/sleepycat/je/rep/elections/RankingProposerTest.java new file mode 100644 index 0000000..03c89bd --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/RankingProposerTest.java @@ -0,0 +1,295 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static org.junit.Assert.assertEquals; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.elections.Acceptor.SuggestionGenerator.Ranking; +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.elections.Protocol.Promise; +import com.sleepycat.je.rep.elections.Protocol.Value; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.util.test.TestBase; + +/** Test the RankingProposer class. */ +public class RankingProposerTest extends TestBase { + + private static final String GROUP_NAME = "group1"; + private static final String NODE_NAME = "node1"; + private static final int NODE_ID = 42; + private static final TimebasedProposalGenerator proposalGenerator = + new TimebasedProposalGenerator(); + private static final InetSocketAddress socketAddress = + new InetSocketAddress("localhost", 5000); + + private final Proposal proposal = proposalGenerator.nextProposal(); + private ServiceDispatcher serviceDispatcher; + private RankingProposer proposer; + private Protocol protocol; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + + /* Set up facilities for creating proposals and promises */ + serviceDispatcher = new ServiceDispatcher( + socketAddress, null, /* repImpl */ null /* channelFactory */); + final NameIdPair nameIdPair = new NameIdPair(NODE_NAME, NODE_ID); + final ElectionsConfig ec = new ElectionsConfig() { + @Override + public String getGroupName() { return GROUP_NAME; } + @Override + public NameIdPair getNameIdPair() { return nameIdPair; } + @Override + public ServiceDispatcher getServiceDispatcher() { + return serviceDispatcher; + } + @Override + public int getElectionPriority() { return 0; } + @Override + public int getLogVersion() { return -1; } + @Override + public RepImpl getRepImpl() { return null; } + @Override + public RepNode getRepNode() { return null; } + }; + final Elections elections = new Elections( + ec, null /* testListener */, null /* suggestionGenerator */); + proposer = new RankingProposer(elections, nameIdPair); + protocol = elections.getProtocol(); + protocol.updateNodeIds(new HashSet(Arrays.asList(NODE_ID))); + } + + @Override + @After + public void tearDown() throws Exception { + super.setUp(); + if (serviceDispatcher != null) { + serviceDispatcher.shutdown(); + } + } + + /* Tests */ + + @Test + public void testPhase2TwoNodes() { + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 200))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(NODE_NAME, 100))); + } + + @Test + public void testPhase2ThreeNodes() { + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 100), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 200), + promise(NODE_NAME, 300))); + } + + @Test + public void testPhase2ArbOneNode() { + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 100), + promise(NODE_NAME, 100))); + assertEquals(null, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 200))); + assertEquals(null, + choosePhase2Value(promise(null, 200), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(null, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 100), + promise(NODE_NAME, 200))); + } + + /** + * Arbiter should be ignored if there are two nodes, even if the arbiter + * has a higher DTVLSN. + */ + @Test + public void testPhase2ArbTwoNodes() { + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 100), + promise(null, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 100), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 100), + promise(NODE_NAME, 100), + promise(NODE_NAME, 100))); + + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 200), + promise(null, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 100), + promise(NODE_NAME, 200))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 100), + promise(NODE_NAME, 100), + promise(NODE_NAME, 200))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(NODE_NAME, 100), + promise(null, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(null, 100), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 100), + promise(NODE_NAME, 200), + promise(NODE_NAME, 100))); + + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(NODE_NAME, 200), + promise(null, 300))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 300), + promise(NODE_NAME, 200))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 300), + promise(NODE_NAME, 100), + promise(NODE_NAME, 200))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(NODE_NAME, 100), + promise(null, 300))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 200), + promise(null, 300), + promise(NODE_NAME, 100))); + assertEquals(NODE_NAME, + choosePhase2Value(promise(null, 300), + promise(NODE_NAME, 200), + promise(NODE_NAME, 100))); + } + + /** Both arbiters should be ignored. */ + @Test + public void testPhase2TwoArbs() { + assertEquals(NODE_NAME, + choosePhase2Value(promise(NODE_NAME, 100), + promise(null, 300), + promise(null, 400), + promise(NODE_NAME, 200))); + } + + /* Utilities */ + + private String choosePhase2Value(Promise... promises) { + final List exchangeList = new ArrayList<>(); + for (final Promise p : promises) { + exchangeList.add(messageExchange(p)); + } + final Set exchanges = sortedSet(exchangeList); + final Value result = proposer.choosePhase2Value(exchanges); + if (result instanceof MasterValue) { + return ((MasterValue) result).getNodeName(); + } + return null; + } + + /** + * Create a promise with the specified node as the suggested master and the + * DTVLSN for ranking. Specify a null nodeName for a promise from an + * arbiter. + */ + private Promise promise(String nodeName, long dtvlsn) { + return protocol.new Promise(proposal, + null /* value */, + masterValue(nodeName), /* suggestion */ + new Ranking(dtvlsn, 0), + 1, /* priority*/ + 0, /* logVersion */ + JEVersion.CURRENT_VERSION); + } + + private MessageExchange messageExchange(Promise promise) { + final MessageExchange msgExchange = protocol.new MessageExchange( + socketAddress /* target */, "service1", + protocol.new Propose(proposal)); + msgExchange.setResponseMessage(promise); + return msgExchange; + } + + /** + * Create a master value for a node, or for an arbiter if nodeName is + * null. + */ + private static MasterValue masterValue(String nodeName) { + return new MasterValue(nodeName, 5000, + (nodeName == null) ? + NameIdPair.NULL : + new NameIdPair(nodeName, NODE_ID)); + } + + /** Create a set with the elements and order specified by a list. */ + private static SortedSet sortedSet(final List list) { + final SortedSet set = new TreeSet<>( + new Comparator() { + @Override + public int compare(E x, E y) { + return list.indexOf(x) - list.indexOf(y); + } + }); + for (E e : list) { + set.add(e); + } + return set; + } +} diff --git a/test/com/sleepycat/je/rep/elections/VLSNFreezeLatchTest.java b/test/com/sleepycat/je/rep/elections/VLSNFreezeLatchTest.java new file mode 100644 index 0000000..5709e9e --- /dev/null +++ b/test/com/sleepycat/je/rep/elections/VLSNFreezeLatchTest.java @@ -0,0 +1,85 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.elections; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.elections.Proposer.Proposal; +import com.sleepycat.je.rep.impl.node.CommitFreezeLatch; + +public class VLSNFreezeLatchTest { + + private CommitFreezeLatch latch = new CommitFreezeLatch(); + /* A sequential series of proposals */ + private Proposal p1, p2, p3; + + @Before + public void setUp() + throws Exception { + + latch = new CommitFreezeLatch(); + latch.setTimeOut(10 /* ms */); + TimebasedProposalGenerator pg = new TimebasedProposalGenerator(1); + p1 = pg.nextProposal(); + p2 = pg.nextProposal(); + p3 = pg.nextProposal(); + } + + @Test + public void testTimeout() + throws InterruptedException { + + latch.freeze(p2); + // Earlier event does not release waiters + latch.vlsnEvent(p1); + + assertFalse(latch.awaitThaw()); + assertEquals(1, latch.getAwaitTimeoutCount()); + } + + @Test + public void testElection() + throws InterruptedException { + + latch.freeze(p2); + latch.vlsnEvent(p2); + assertTrue(latch.awaitThaw()); + assertEquals(1, latch.getAwaitElectionCount()); + } + + @Test + public void testNewerElection() + throws InterruptedException { + + latch.freeze(p2); + latch.vlsnEvent(p3); + assertTrue(latch.awaitThaw()); + assertEquals(1, latch.getAwaitElectionCount()); + } + + @Test + public void testNoFreeze() + throws InterruptedException { + + latch.vlsnEvent(p1); + + assertFalse(latch.awaitThaw()); + assertEquals(0, latch.getAwaitTimeoutCount()); + } +} diff --git a/test/com/sleepycat/je/rep/impl/DTVLSNTest.java b/test/com/sleepycat/je/rep/impl/DTVLSNTest.java new file mode 100644 index 0000000..f631cbd --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/DTVLSNTest.java @@ -0,0 +1,440 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.PropUtil; +import com.sleepycat.je.utilint.VLSN; + +/** + * Test the properties associated with the durable vlsn. + */ +public class DTVLSNTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 3; + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Verify that the DTVLSN becomes consistent across the shard and persists + * across shutdown. + */ + @Test + public void testDTVLSNPersistence() throws InterruptedException { + createGroup(); + final long qvlsn1 = + repEnvInfo[0].getRepNode().getCurrentTxnEndVLSN().getSequence(); + awaitDTVLSNQuiesce(qvlsn1); + + closeNodes(repEnvInfo); + + final RepEnvInfo minfo = restartNodes(repEnvInfo); + final long qvlsn2 = + minfo.getRepNode().getCurrentTxnEndVLSN().getSequence(); + awaitDTVLSNQuiesce(qvlsn1); + + /* Cannot go backwards */ + assertTrue(qvlsn2 >= qvlsn1); + } + + /** + * Check the values of the durable transaction VLSN at strategic times to + * make sure it's tracking changes as expected. + */ + @Test + public void testDTVLSN() throws InterruptedException { + + createGroup(); + + long dtvlsn = repEnvInfo[0].getRepNode().getDTVLSN(); + assertTrue(!VLSN.isNull(dtvlsn)); + + final CommitToken ct = + populateDB(repEnvInfo[0].getEnv(), "db1", 1, 10, + RepTestUtils.SYNC_SYNC_ALL_TC); + + dtvlsn = repEnvInfo[0].getRepNode().getDTVLSN(); + + /* + * The in-memory DTVLSN must be current as a result of ALL acks. A + * pollCondition is needed here, since the DTVLSN is updated + * asynchronously wrt the transaction commit above. + */ + boolean pass = new PollCondition(1, 5000) { + + @Override + protected boolean condition() { + return repEnvInfo[0].getRepNode().getDTVLSN() == ct.getVLSN(); + } + + }.await(); + assertTrue(pass); + + final CommitToken ct2 = + populateDB(repEnvInfo[0].getEnv(), "db2", 1, 10, + RepTestUtils.WNSYNC_NONE_TC); + + /* + * No acks to update DTVLSN. Ensure that the null transaction is + * created to update the DTVLSN. + */ + awaitDTVLSNQuiesce(ct2.getVLSN()); + + /* Verify that no spurious null commits are being generated. */ + final ReplicationConfig repConfig = + repEnvInfo[0].getEnv().getRepConfig(); + final long feederManagerPollTimeout = + PropUtil.parseDuration(repConfig.getConfigParam + (RepParams. + FEEDER_MANAGER_POLL_TIMEOUT.getName())); + + Thread.sleep(10 * feederManagerPollTimeout); + + /* Calculate the VLSN of the null commit which immediately follows.*/ + final long nullCommitVLSN = ct2.getVLSN() + 1; + assertEquals(nullCommitVLSN, + repEnvInfo[0].getRepNode().getCurrentTxnEndVLSN(). + getSequence()); + } + + /** + * Ensure that a log that is created by a master and is replayed to a + * replica can in turn be used to feed a subsequent replica when it becomes + * the master. In this a rep group is grown to a size of 3 with the master + * moving to each subsequent new node, after it has initialized itself from + * a previous copy of the stream. DTVLSNs are checked during replay to + * ensure they observe their sequencing invariants during replay. + */ + @Test + public void testMultiGenerationalStream() throws InterruptedException { + ReplicatedEnvironment n1 = repEnvInfo[0].openEnv(); + + populateDB(n1, 100); + + /* Reads first generation stream to become current. */ + ReplicatedEnvironment n2 = repEnvInfo[1].openEnv(); + + populateDB(n1, 100); + + forceMaster(1); + + populateDB(n2, 100); + + /* Reads second generation stream to become current. */ + ReplicatedEnvironment n3 = repEnvInfo[2].openEnv(); + + populateDB(n2, 100); + + forceMaster(2); + + populateDB(n3, 100); + } + + /** + * Verify that DTVLSN(commitVLSN) == commitVLSN for a RG consisting of a + * single durable node. + */ + @Test + public void testDoesNotNeedAcks() { + ReplicatedEnvironment n1 = repEnvInfo[0].openEnv(); + + CommitToken ct = populateDB(n1, 10); + + assertEquals(ct.getVLSN(), repEnvInfo[0].getRepNode().getAnyDTVLSN()); + + ct = populateDB(n1, 10); + assertEquals(ct.getVLSN(), repEnvInfo[0].getRepNode().getAnyDTVLSN()); + } + + @Test + public void testSimulatePreDTVLSNGroup() throws InterruptedException { + RepImpl.setSimulatePreDTVLSNMaster(true); + + createGroup(); + + populateDB(repEnvInfo[0].getEnv(), 1); + populateDB(repEnvInfo[0].getEnv(), 1); + + /* + * Verify that all nodes have zero dtvlsn values. + */ + for (RepEnvInfo element : repEnvInfo) { + assertEquals(VLSN.UNINITIALIZED_VLSN_SEQUENCE, + element.getRepNode().getAnyDTVLSN()); + } + + closeNodes(repEnvInfo); + + /* Revert back to postDTVLSN operation. */ + RepImpl.setSimulatePreDTVLSNMaster(false); + + /* The new nodes should be able to hold an election and proceed. */ + RepEnvInfo newMaster = restartNodes(repEnvInfo[0], repEnvInfo[1]); + + populateDB(newMaster.getEnv(), 1); + + /* + * Now bring up the third node, it should see a pre to post dtvlsn + * transition in the HA stream. + */ + repEnvInfo[2].openEnv(); + + CommitToken ct = populateDB(newMaster.getEnv(), 1); + + /* Non-zero DTVLSN advancing as expected. */ + awaitDTVLSNQuiesce(ct.getVLSN()); + } + + /** + * Write concurrently to the environment relying on the checks in HA replay + * to catch any DTVLSN sequences that are invalid. + */ + @Test + public void testConcurrentDTVLSequence() + throws InterruptedException { + + + createGroup(); + + final ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + + /* Create parallel workload. */ + + final int poolThreads = 30; + ExecutorService requestPool = Executors.newFixedThreadPool(poolThreads); + + populateDB(master, 1); + + for (RepEnvInfo r : repEnvInfo) { + assertTrue(r.getEnv().getNodeName(), r.getEnv().isValid()); + } + + final Database db = master.openDatabase(null, TEST_DB_NAME, dbconfig); + final AtomicInteger count = new AtomicInteger(poolThreads * 100); + final AtomicReference exception = new AtomicReference<>(); + + for (int i=0; i < poolThreads; i++) { + requestPool. + submit(new Callable() { + + @Override + public Long call() throws Exception { + final DatabaseEntry key1 = new DatabaseEntry(); + final DatabaseEntry value = new DatabaseEntry(); + LongBinding.longToEntry(1, value); + + int keyId; + Transaction txn = null; + while (((keyId = count.decrementAndGet()) > 0) && + (exception.get() == null)) { + try { + txn = master.beginTransaction(null, null); + IntegerBinding.intToEntry(keyId, key1); + db.put(txn, key1, value); + txn.commit(); + } catch(Throwable e) { + if (txn != null) { + txn.abort(); + } + exception.compareAndSet(null, e); + } + } + return 0l; + } + }); + } + requestPool.shutdown(); + assertTrue(requestPool.awaitTermination(60, TimeUnit.SECONDS)); + db.close(); + + assertNull(exception.get()); + } + + + /** + * Verify that we roll back non-durable transactions upon startup without + * a RollbackProhibitedException. + * + * The HardRecoveryTest is used to test the converse, that is, that a + * RollBackProhibitedException is thrown when one is warranted. + */ + @Test + public void testRollbackNonDurable() throws InterruptedException { + + createGroup(); + + /* Shutdown replicas, leave master intact. */ + closeNodes(repEnvInfo[1], repEnvInfo[2]); + int rollbackTxns = + 2 * Integer.parseInt(RepParams.TXN_ROLLBACK_LIMIT.getDefault()); + + /* Created non-durable transactions on node 1. */ + for (int i=0; i < rollbackTxns; i++) { + populateDB(repEnvInfo[0].getEnv(), "db1", 1, 1, + RepTestUtils.WNSYNC_NONE_TC); + } + + /* shutdown the master. */ + repEnvInfo[0].closeEnv(); + + /* restart nodes 2 and 3 with a new master */ + RepEnvInfo masterInfo = restartNodes(repEnvInfo[1], repEnvInfo[2]); + for (int i=0; i < rollbackTxns; i++) { + populateDB(masterInfo.getEnv(), "db2", 1, 1); + } + + /* Open and rollback without any exceptions. */ + repEnvInfo[0].openEnv(); + } + + + /** + * Verify that in the case of a DTVLSN tie during elections, the one node + * with the most advanced VLSN wins to minimize the chance of rollbacks. + */ + @Test + public void testDTVLSNRanking() + throws InterruptedException { + + createGroup(); + + /* + * Run a statistically significant number of iterations to ensure that + * the master is being picked for a good reason. + */ + for (int n=0; n < repEnvInfo.length; n++) { + + /* Vary the choice of master in the group. */ + + final int n1 = n; + int n2 = (n1 + 1) % repEnvInfo.length; + int n3 = (n2 + 1) % repEnvInfo.length; + + /* Start with the right master. */ + forceMaster(n1); + + for (int i=0; i < 2; i++) { + final long qvlsn1 = + repEnvInfo[n1].getRepNode().getCurrentTxnEndVLSN().getSequence(); + awaitDTVLSNQuiesce(qvlsn1); + repEnvInfo[n2].closeEnv(); + repEnvInfo[n3].closeEnv(); + + /* + * Created non-durable transactions on node 1 to give it the + * most advanced VLSN. + */ + for (int j=0; j < 2; j++) { + populateDB(repEnvInfo[n1].getEnv(), "db1", 1, 1, + RepTestUtils.WNSYNC_NONE_TC); + } + + /* shutdown the master, all nodes are now down. */ + repEnvInfo[n1].closeEnv(); + + /* + * Hold back node 3, since we don't want n2 and n3 forming + * election quorum. + */ + restartNodes(repEnvInfo[n1], repEnvInfo[n2]); + assertTrue(repEnvInfo[n1].getEnv().getState().isMaster()); + repEnvInfo[n3].openEnv(); + } + } + } + + private void forceMaster(final int n1) throws InterruptedException { + repEnvInfo[n1].getRepImpl().getRepNode().forceMaster(true); + assertTrue(new PollCondition(10, 30000) { + + @Override + protected boolean condition() { + return repEnvInfo[n1].getEnv().getState().isMaster(); + } + }.await()); + repEnvInfo[n1].getRepImpl().getRepNode().forceMaster(false); + } + + /** + * Returns after waiting for the DTVLSN across the entire shard to advance + * past quiesceVLSN + */ + private void awaitDTVLSNQuiesce(long quiesceVLSN) { + final AwaitDTVLSN qcond = + new AwaitDTVLSN(100, 30 * 1000, quiesceVLSN); + + assertTrue(qcond.await()); + } + + /** + * Utility class to check for a DTVLSN to advance past some expected value + */ + private class AwaitDTVLSN extends PollCondition { + + final long targetVLSN; + + public AwaitDTVLSN(long checkPeriodMs, long timeoutMs, + long targetVLSN) { + super(checkPeriodMs, timeoutMs); + this.targetVLSN = targetVLSN; + } + + @Override + protected boolean condition() { + for (RepEnvInfo rfi : repEnvInfo) { + final long dtvlsn = rfi.getRepNode().getDTVLSN(); + + if (dtvlsn >= targetVLSN) { + continue; + } + return false; + } + return true; + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/DynamicGroupTest.java b/test/com/sleepycat/je/rep/impl/DynamicGroupTest.java new file mode 100644 index 0000000..162723d --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/DynamicGroupTest.java @@ -0,0 +1,504 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MemberActiveException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.txn.MasterTxn.MasterTxnFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.TestUtils; + +public class DynamicGroupTest extends RepTestBase { + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 5; + super.setUp(); + } + + @Test + public void testRemoveMemberExceptions() { + createGroup(2); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + RepNode masterRep = repEnvInfo[0].getRepNode(); + try { + masterRep.removeMember(master.getNodeName()); + fail("Exception expected."); + } catch (MasterStateException e) { + // Expected + } + + try { + masterRep.removeMember("unknown node foobar"); + fail("Exception expected."); + } catch (MemberNotFoundException e) { + // Expected + } + + masterRep.removeMember(repEnvInfo[1].getRepNode().getNodeName()); + try { + masterRep.removeMember(repEnvInfo[1].getRepNode().getNodeName()); + fail("Exception expected."); + } catch (MemberNotFoundException e) { + // Expected + } + repEnvInfo[1].closeEnv(); + } + + @Test + public void testDeleteMemberExceptions() { + createGroup(2); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + RepNode masterRep = repEnvInfo[0].getRepNode(); + try { + masterRep.removeMember(master.getNodeName(), true); + fail("Exception expected."); + } catch (MasterStateException e) { + // Expected + } + + try { + masterRep.removeMember("unknown node foobar", true); + fail("Exception expected."); + } catch (MemberNotFoundException e) { + // Expected + } + + final String delName = repEnvInfo[1].getRepNode().getNodeName(); + try { + masterRep.removeMember(delName, true); + fail("Exception expected."); + } catch (MemberActiveException e) { + // Expected + } + + repEnvInfo[1].closeEnv(); + masterRep.removeMember(delName, true); + + try { + masterRep.removeMember(delName, true); + fail("Exception expected."); + } catch (MemberNotFoundException e) { + // Expected + } + } + + /* + * Tests internal node removal APIs. + */ + @Test + public void testRemoveMember() { + createGroup(groupSize); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + RepNode masterRep = repEnvInfo[0].getRepNode(); + + /* Reduce the group size all the way down to one. */ + for (int i = 1; i < groupSize; i++) { + assertTrue(repEnvInfo[i].getEnv().isValid()); + masterRep.removeMember(repEnvInfo[i].getEnv().getNodeName()); + assertEquals((groupSize-i), + masterRep.getGroup().getElectableGroupSize()); + } + + /* Close the replica handles*/ + for (int i = groupSize-1; i > 0; i--) { + repEnvInfo[i].closeEnv(); + } + + /* Attempting to re-open them with the same node names should fail. */ + for (int i = 1; i < groupSize; i++) { + try { + repEnvInfo[i].openEnv(); + fail("Exception expected"); + } catch (EnvironmentFailureException e) { + /* Expected, the master should reject the attempt. */ + assertEquals(EnvironmentFailureReason.HANDSHAKE_ERROR, + e.getReason()); + } + } + + /* Doing the same but with different node names should be ok. */ + for (int i = 1; i < groupSize; i++) { + final RepEnvInfo ri = repEnvInfo[i]; + final ReplicationConfig repConfig = ri.getRepConfig(); + TestUtils.removeLogFiles("RemoveRepEnvironments", + ri.getEnvHome(), + false); + + repConfig.setNodeName("ReplaceNode_" + i); + ri.openEnv(); + assertEquals(i+1, masterRep.getGroup().getElectableGroupSize()); + } + master.close(); + } + + /* + * Tests internal node deletion APIs. + */ + @Test + public void testDeleteMember() { + createGroup(groupSize); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + assertTrue(master.getState().isMaster()); + + RepNode masterRep = repEnvInfo[0].getRepNode(); + + /* Reduce the group size all the way down to one. */ + for (int i = 1; i < groupSize; i++) { + assertTrue(repEnvInfo[i].getEnv().isValid()); + final String delName = repEnvInfo[i].getEnv().getNodeName(); + repEnvInfo[i].closeEnv(); + masterRep.removeMember(delName, true); + assertEquals((groupSize-i), + masterRep.getGroup().getElectableGroupSize()); + } + + /* + * Attempting to re-open them with the same node names should succeed + */ + for (int i = 1; i < groupSize; i++) { + repEnvInfo[i].openEnv(); + } + } + + /* + * Verifies that an InsufficientAcksException is not thrown if the group + * size changes while a transaction commit is waiting for acknowledgments. + */ + @Test + public void testMemberRemoveAckInteraction() { + testMemberRemoveAckInteraction(false); + } + + /* Same but deleting the members. */ + @Test + public void testDeleteRemoveAckInteraction() { + testMemberRemoveAckInteraction(true); + } + + private void testMemberRemoveAckInteraction(final boolean delete) { + createGroup(groupSize); + Transaction txn; + Database db; + try { + MasterTxn.setFactory(new TxnFactory(delete)); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + + txn = master.beginTransaction(null, null); + /* Write to the environment. */ + db = master.openDatabase(txn, "random", dbconfig); + db.close(); + txn.commit(); + } catch (InsufficientAcksException e) { + fail ("No exception expected."); + } finally { + MasterTxn.setFactory(null); + } + } + + @Test + public void testNoQuorum() + throws DatabaseException, + InterruptedException { + + for (int i=0; i < 3; i++) { + ReplicatedEnvironment rep = repEnvInfo[i].openEnv(); + State state = rep.getState(); + assertEquals((i == 0) ? State.MASTER : State.REPLICA, state); + } + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 3); + repEnvInfo[1].closeEnv(); + repEnvInfo[2].closeEnv(); + + // A new node joining in the absence of a quorum must fail + try { + repEnvInfo[3].openEnv(); + fail("Expected exception"); + } catch (UnknownMasterException e) { + /* Expected. */ + } + } + + /* Start the master (the helper node) first */ + @Test + public void testGroupCreateMasterFirst() + throws DatabaseException { + + for (int i=0; i < repEnvInfo.length; i++) { + ReplicatedEnvironment rep = repEnvInfo[i].openEnv(); + State state = rep.getState(); + assertEquals((i == 0) ? State.MASTER : State.REPLICA, state); + RepNode repNode = RepInternal.getNonNullRepImpl(rep).getRepNode(); + /* No elections, helper nodes or members queried for master. */ + assertEquals(0, repNode.getElections().getElectionCount()); + } + } + + /* + * Start the master (the helper node) last, so the other nodes have to + * wait and retry until the helper node comes up. + */ + @Test + public void testGroupCreateMasterLast() + throws DatabaseException, + InterruptedException { + + RepNodeThread threads[] = new RepNodeThread[repEnvInfo.length]; + + /* Start up non-masters, they should wait */ + for (int i=1; i < repEnvInfo.length; i++) { + threads[i]=new RepNodeThread(i); + threads[i].start(); + } + + State state = repEnvInfo[0].openEnv().getState(); + assertEquals(State.MASTER, state); + + for (int i=1; i < repEnvInfo.length; i++) { + threads[i].join(30000); + assertTrue(!threads[i].isAlive()); + assertNull(threads[i].te); + } + } + + /** + * Test that a timeout in the feeder while attempting to read the group + * database because other feeders have it write locked causes the feeder + * (and replica) to fail, but allows the master to continue operating. + * [#23822] + */ + @Test + public void testJoinGroupReadGroupTimeout() + throws DatabaseException, InterruptedException { + + /* Start first node as master */ + ReplicatedEnvironment repEnv = repEnvInfo[0].openEnv(); + assertEquals("Master node state", State.MASTER, repEnv.getState()); + + RepImpl repImpl = RepInternal.getNonNullRepImpl(repEnv); + + for (int i = 1; i <= 2; i++) { + + /* Get a write lock on the RepGroupDB */ + final MasterTxn txn = new MasterTxn( + repImpl, + new TransactionConfig().setDurability( + new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY)), + repImpl.getNameIdPair()); + final DatabaseImpl groupDbImpl = repImpl.getGroupDb(); + final DatabaseEntry value = new DatabaseEntry(); + final Cursor cursor = + DbInternal.makeCursor(groupDbImpl, txn, new CursorConfig()); + final OperationStatus status = cursor.getNext( + RepGroupDB.groupKeyEntry, value, LockMode.RMW); + assertEquals(i + ": Lock group result", + OperationStatus.SUCCESS, status); + + /* Wait longer than the default 500 ms read timeout */ + Thread.sleep(600); + + /* Test both electable and secondary nodes */ + if (i == 2) { + repEnvInfo[i].getRepConfig().setNodeType(NodeType.SECONDARY); + } + + /* Create a thread that attempts to join another environment */ + RepNodeThread repNodeThread = new RepNodeThread(i, i != 1); + repNodeThread.start(); + + /* Wait for attempt to complete */ + repNodeThread.join(30000); + assertEquals("RN thread alive", false, repNodeThread.isAlive()); + + if (i == 1) { + + /* Join attempt should fail for primary */ + assertNotNull("Expected RN thread exception", + repNodeThread.te); + + /* Release write lock on RepGroupDB */ + cursor.close(); + txn.abort(); + + /* Second join attempt should succeed */ + repNodeThread = new RepNodeThread(1); + repNodeThread.start(); + repNodeThread.join(30000); + assertEquals("RN thread alive", + false, repNodeThread.isAlive()); + assertEquals("RN thread exception", null, repNodeThread.te); + } else { + + /* Join attempt should succeed for secondary */ + assertEquals("RN thread exception", null, repNodeThread.te); + + /* Release write lock on RepGroupDB */ + cursor.close(); + txn.abort(); + } + } + } + + private class RepNodeThread extends Thread { + private final int id; + private final boolean printStackTrace; + volatile Throwable te; + + RepNodeThread(int id) { + this(id, false); + } + + RepNodeThread(int id, boolean printStackTrace) { + this.id = id; + this.printStackTrace = printStackTrace; + } + + @Override + public void run() { + + try { + repEnvInfo[id].openEnv().getState(); + } catch (Throwable e) { + te = e; + if (printStackTrace) { + te.printStackTrace(); + } + } + } + } + + /* + * Factory for producing test MasterTxns + */ + private class TxnFactory implements MasterTxnFactory { + final boolean delete; + final Thread thread = Thread.currentThread(); + + TxnFactory(final boolean delete) { + this.delete = delete; + } + + @Override + public MasterTxn create(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + if (Thread.currentThread() != thread) { + return new MasterTxn(envImpl, config, nameIdPair); + } + return new TestMasterTxn(envImpl, config, nameIdPair, delete); + } + + @Override + public MasterTxn createNullTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + + return new MasterTxn(envImpl, config, nameIdPair) { + @Override + protected boolean updateLoggedForTxn() { + return true; + } + }; + } + } + + private class TestMasterTxn extends MasterTxn { + private final boolean delete; + + public TestMasterTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair, + boolean delete) + throws DatabaseException { + + super(envImpl, config, nameIdPair); + this.delete = delete; + } + + @Override + protected void preLogCommitHook() { + super.preLogCommitHook(); + RepNode rmMasterNode = repEnvInfo[0].getRepNode(); + int size = rmMasterNode.getGroup().getAllElectableMembers().size(); + int delNodes = ((size & 1) == 1) ? 2 : 1; + int closeNodeIndex = (size - delNodes) - 1; + + /* + * The loop below simulates the concurrent removal of a node while + * a transaction is in progress. It deletes a sufficient number of + * nodes so as to get a lower simple nodes to get to a new lower + * simple majority. + */ + for (int i= repEnvInfo.length-1; delNodes-- > 0; i--) { + repEnvInfo[i].closeEnv(); + rmMasterNode.removeMember( + repEnvInfo[i].getRepConfig().getNodeName(), delete); + } + + /* + * Shut down an additional undeleted Replica to provoke a + * lack of acks based on the old simple majority. + */ + repEnvInfo[closeNodeIndex].closeEnv(); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/GroupDbAckFailureTest.java b/test/com/sleepycat/je/rep/impl/GroupDbAckFailureTest.java new file mode 100644 index 0000000..4276af0 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/GroupDbAckFailureTest.java @@ -0,0 +1,275 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Tests to ensure that a group can eventually recover from ack failures that + * occur during GroupDB update operations, and that conditions that might lead + * to loss of "durably committed and replicated" transactions cannot arise. + */ +public class GroupDbAckFailureTest extends RepTestBase { + + /** + * Exercises the scenario where addition of a new node originally fails, + * and is later retried. + */ + @Test + public void testRetriedCreation() throws Exception { + createGroup(3); + repEnvInfo[1].getRepNode().replica().setDontProcessStream(); + repEnvInfo[2].getRepNode().replica().setDontProcessStream(); + try { + repEnvInfo[3].openEnv(); + fail("expected first attempt to fail"); + } catch (UnknownMasterException e) { + // expected + } + assertNull(repEnvInfo[3].getEnv()); + closeNodes(repEnvInfo[1], repEnvInfo[2]); + restartNodes(repEnvInfo[1], repEnvInfo[2]); + repEnvInfo[3].openEnv(); + } + + /** + * Exercises a scenario in which the application ignores the failure of an + * updateAddress() operation, and tries to start the moved node anyway. + * Applications shouldn't do this, but it's good to make sure that the + * system retains its integrity anyway. + */ + @Test + public void testIgnoreUpdateFailure() throws Exception { + int size = 3; + createGroup(size); + RepEnvInfo master = repEnvInfo[0]; + // Database db = + // master.getEnv().openDatabase(null, TEST_DB_NAME, dbconfig); + RepEnvInfo replica = repEnvInfo[1]; + RepEnvInfo mover = repEnvInfo[size-1]; + mover.closeEnv(); + + replica.getRepNode().replica().setDontProcessStream(); + Set helpers = new HashSet(); + helpers.add(master.getRepConfig().getNodeSocketAddress()); + ReplicationGroupAdmin rga = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helpers, + RepTestUtils.readRepNetConfig()); + ReplicationConfig rc = mover.getRepConfig(); + int newPort = rc.getNodePort() + 1732; + try { + logger.info("try first update operation"); + rga.updateAddress(rc.getNodeName(), "localhost", newPort); + fail("should have failed"); + } catch (Exception e) { + logger.info("test sees " + e.getClass()); + // expected (modulo exception type) + } + Thread.sleep(25000); // wait for master's retry loop to give up + replica.closeEnv(); + rc.setNodeHostPort("localhost:" + newPort); + + /* Imagine the clumsy application ignores previous failure. */ + logger.info("will restart the moved node"); + try { + mover.openEnv(); + fail("expected restart of moved node to fail"); + } catch (UnknownMasterException e) { + logger.info("restart of the moved node failed as expected"); + // expected + } + + /* + * This test is designed to demonstrate bug fix #21095. Originally, + * the above step failed. Now, you could ask, is it necessarily bad + * that the node is allowed to connect? What's really critical is that + * the master doesn't then think it has a quorum for authoritative + * majority commits. Originally, even that didn't work right: in other + * words, a default db.put() operation would succeed at this point (the + * commented-out code below). Of course with the fix in place it is + * impossible to even try this step. + */ + + // try { + // db.put(null, key, data); + // fail(""); + // db.close(); + } + + /** + * Exercises the scenario where the master tries an updateAddress() + * operation that has already been completed previously. This isn't + * completely silly: it could happen if the application tried to do the + * update once, but failed to get the response message indicating that it + * worked; the application would have to retry in order to be sure. + *

        + * As simple as this is, it didn't work before bug fix #21095. + */ + @Test + public void testRedundantUpdateAddressOp() throws Exception { + int size = 3; + createGroup(size); + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo mover = repEnvInfo[size-1]; + closeNodes(mover); + + Set helpers = new HashSet(); + helpers.add(master.getRepConfig().getNodeSocketAddress()); + ReplicationGroupAdmin rga = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helpers, + RepTestUtils.readRepNetConfig()); + + ReplicationConfig rc = mover.getRepConfig(); + String nodeName = rc.getNodeName(); + int newPort = rc.getNodePort() + 1732; + rga.updateAddress(nodeName, "localhost", newPort); + + /* Pretend we lost the response to the above operation, so retry it. */ + rga.updateAddress(nodeName, "localhost", newPort); + + rc.setNodeHostPort(rc.getNodeHostname() + ":" + newPort); + restartNodes(mover); + } + + /** + * Ensure that if an unfortunately timed network partition occurs around + * the time of an updateAddress() operation, we avoid allowing two masters + * to emerge both thinking they're authoritative. There will be two + * masters (which is bad), but one of them will reject the Feeder + * connection from the zombie replica, so at least it won't become + * authoritative. + */ + @Test + public void testZombie() throws Exception { + + /* + * Super-class has set up 5 RepEnvInfo objects. We'll use 4 of them, + * even though our true group size is only 3. We'll use the 4th one as + * the new, moved incarnation of the node whose address is to be + * changed. In order to do this, we'll have to fake out some of the + * configuration information in this 4th slot. + */ + int realGroupSize = 3; + createGroup(realGroupSize); + RepEnvInfo node1 = repEnvInfo[0]; + RepEnvInfo node2 = repEnvInfo[1]; + + RepEnvInfo master = node1; + + /* The (original incarnation of the) node to be moved. */ + RepEnvInfo mover = repEnvInfo[2]; + + /* The new incarnation, at the new network address. */ + RepEnvInfo moved = repEnvInfo[3]; + + closeNodes(mover); + + Set helpers = new HashSet(); + helpers.add(master.getRepConfig().getNodeSocketAddress()); + ReplicationGroupAdmin rga = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helpers, + RepTestUtils.readRepNetConfig()); + + /* + * "Move" the node to a new port address. The new port is the one + * reserved in the as-yet-unused "moved" installation, so we must tell + * the group to use this new port. + */ + String nodeName = mover.getRepConfig().getNodeName(); + int newPort = moved.getRepConfig().getNodePort(); + rga.updateAddress(nodeName, "localhost", newPort); + + /* + * Take the pre-existing node name and poke it into the new + * installation. (By default it was assigned a fourth, distinct node + * name during set-up.) + */ + moved.getRepConfig().setNodeName(nodeName); + restartNodes(moved); + + /* + * Now imagine a network partition occurs: node1 can only reach the + * new, duly authorized incarnation of node3. There's nothing wrong + * with that. + */ + closeNodes(repEnvInfo); + restartNodes(node1, moved); + + /* + * On the other side of the partition, node2 can only reach the old, + * obsolete incarnation of node3. Ideally that old node3 should never + * have been allowed to restart. But let's make sure that if it were + * to start, no harm would come. + * + * It would be nice if we could use the usual restartNodes() method + * here. But that method throws an AssertionError if either node + * fails to fully come up. Here's what we expect to happen here: + * + * - both nodes start to come up + * - an election is held, and node2 wins; we know this because node3 + * had to be down in order to allow the updateAddress() operation to + * proceed, so it didn't see the associated GroupDB update; node2 + * must have received it, because the master required the ack from it + * in order to confirm the operation. + * + * Note that if this hadn't been the case we would be in trouble, + * because node3 would have no reason to reject a Feeder connection + * from node2 + * - node3 tries to establish a Feeder connection to master node2, but + * this is rejected because the obsolete network address no longer + * matches. + */ + closeNodes(repEnvInfo); + EnvOpenThread t2 = new EnvOpenThread(node2); + t2.start(); + EnvOpenThread t3 = new EnvOpenThread(mover); + t3.start(); + long waitTime = 2 * 60 * 1000; + t2.join(waitTime); + assertFalse(t2.isAlive()); + if (t2.testException != null) { + t2.testException.printStackTrace(); + fail("expected non-authoritative master failed to restart"); + } + t3.join(waitTime); + assertFalse(t3.isAlive()); + assertTrue(t3.testException instanceof EnvironmentFailureException); + + final ReplicatedEnvironment env2 = node2.getEnv(); + assertNotNull(env2); + assertTrue(env2.isValid()); + assertTrue(env2.getState().isMaster()); + } +} diff --git a/test/com/sleepycat/je/rep/impl/GroupServiceTest.java b/test/com/sleepycat/je/rep/impl/GroupServiceTest.java new file mode 100644 index 0000000..1509b3d --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/GroupServiceTest.java @@ -0,0 +1,171 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertSame; + +import java.net.InetSocketAddress; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepGroupProtocol.EnsureOK; +import com.sleepycat.je.rep.impl.RepGroupProtocol.Fail; +import com.sleepycat.je.rep.impl.RepGroupProtocol.FailReason; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupResponse; +import com.sleepycat.je.rep.impl.TextProtocol.MessageExchange; +import com.sleepycat.je.rep.impl.TextProtocol.OK; +import com.sleepycat.je.rep.impl.TextProtocol.ResponseMessage; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; + +public class GroupServiceTest extends RepTestBase { + + @SuppressWarnings("null") + @Test + public void testService() throws Exception { + RepTestUtils.joinGroup(repEnvInfo); + RepNode master = null; + ServiceDispatcher masterDispatcher = null; + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment replicator = repi.getEnv(); + RepNode repNode = + RepInternal.getNonNullRepImpl(replicator).getRepNode(); + ServiceDispatcher dispatcher = repNode.getServiceDispatcher(); + if (repNode.isMaster()) { + master = repNode; + masterDispatcher = dispatcher; + } + } + assertTrue(masterDispatcher != null); + InetSocketAddress socketAddress = masterDispatcher.getSocketAddress(); + RepGroupProtocol protocol = + new RepGroupProtocol(RepTestUtils.TEST_REP_GROUP_NAME, + NameIdPair.NULL, + master.getRepImpl(), + master.getRepImpl().getChannelFactory()); + + /* Test Group Request. */ + MessageExchange me = + protocol.new MessageExchange(socketAddress, + GroupService.SERVICE_NAME, + protocol.new GroupRequest()); + me.run(); + ResponseMessage resp = me.getResponseMessage(); + assertEquals(GroupResponse.class, resp.getClass()); + assertEquals(master.getGroup(), ((GroupResponse)resp).getGroup()); + int monitorCount = + ((GroupResponse)resp).getGroup().getMonitorMembers().size(); + + /* Test add Monitor. */ + short monitorId = 1000; + RepNodeImpl monitor = + new RepNodeImpl(new NameIdPair("mon"+monitorId, monitorId), + NodeType.MONITOR, "localhost", 6000, null); + me = protocol.new MessageExchange(socketAddress, + GroupService.SERVICE_NAME, + protocol.new EnsureNode(monitor)); + me.run(); + resp = me.getResponseMessage(); + assertEquals(EnsureOK.class, resp.getClass()); + + + /* Retrieve the group again, it should have the new monitor. */ + me = protocol.new MessageExchange(socketAddress, + GroupService.SERVICE_NAME, + protocol.new GroupRequest()); + me.run(); + resp = me.getResponseMessage(); + assertEquals(GroupResponse.class, resp.getClass()); + RepGroupImpl repGroup = ((GroupResponse)resp).getGroup(); + Set monitors = repGroup.getMonitorMembers(); + assertEquals(monitorCount+1, monitors.size()); + + /* Exercise the remove member service to remove the monitor. */ + me = protocol.new MessageExchange + (socketAddress,GroupService.SERVICE_NAME, + protocol.new RemoveMember(monitor.getName())); + me.run(); + resp = me.getResponseMessage(); + assertEquals(OK.class, resp.getClass()); + + /* + * Exercise the delete member service using the already removed monitor + */ + me = protocol.new MessageExchange( + socketAddress, GroupService.SERVICE_NAME, + protocol.new DeleteMember(monitor.getName())); + me.run(); + resp = me.getResponseMessage(); + assertEquals(Fail.class, resp.getClass()); + Fail fail = (Fail) resp; + assertSame(FailReason.MEMBER_NOT_FOUND, fail.getReason()); + + /* Retrieve the group again and check for the absence of the monitor */ + me = protocol.new MessageExchange(socketAddress, + GroupService.SERVICE_NAME, + protocol.new GroupRequest()); + me.run(); + resp = me.getResponseMessage(); + assertEquals(GroupResponse.class, resp.getClass()); + repGroup = ((GroupResponse)resp).getGroup(); + monitors = repGroup.getMonitorMembers(); + assertEquals(0, monitors.size()); + + /* + * Most GroupService requests can only be served by the master. See + * that requests sent to a replica are rejected. + */ + RepEnvInfo deadNode = repEnvInfo[4]; + assertTrue(deadNode.isReplica()); + deadNode.closeEnv(); + RepEnvInfo replica = repEnvInfo[1]; + assertTrue(replica.isReplica()); + socketAddress = replica.getRepConfig().getNodeSocketAddress(); + RepNode repNode = + RepInternal.getNonNullRepImpl(replica.getEnv()).getRepNode(); + protocol = + new RepGroupProtocol(RepTestUtils.TEST_REP_GROUP_NAME, + NameIdPair.NULL, + repNode.getRepImpl(), + repNode.getRepImpl().getChannelFactory()); + me = protocol.new MessageExchange + (socketAddress, GroupService.SERVICE_NAME, + protocol.new RemoveMember(deadNode.getRepConfig().getNodeName())); + me.run(); + resp = me.getResponseMessage(); + assertEquals(Fail.class, resp.getClass()); + fail = (Fail) resp; + assertSame(FailReason.IS_REPLICA, fail.getReason()); + + me = protocol.new MessageExchange( + socketAddress, GroupService.SERVICE_NAME, + protocol.new DeleteMember(deadNode.getRepConfig().getNodeName())); + me.run(); + resp = me.getResponseMessage(); + assertEquals(Fail.class, resp.getClass()); + fail = (Fail) resp; + assertSame(FailReason.IS_REPLICA, fail.getReason()); + + /* Restart dead node, just to placate superclass tearDown(). */ + deadNode.openEnv(); + } +} diff --git a/test/com/sleepycat/je/rep/impl/NetworkPartitionHealingTest.java b/test/com/sleepycat/je/rep/impl/NetworkPartitionHealingTest.java new file mode 100644 index 0000000..67e9b91 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/NetworkPartitionHealingTest.java @@ -0,0 +1,296 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForMasterListener; +import com.sleepycat.je.rep.utilint.WaitForReplicaListener; +import com.sleepycat.je.utilint.PollCondition; + +public class NetworkPartitionHealingTest extends RepTestBase { + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.impl.RepTestBase#setUp() + */ + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + /** + * This test captures the problem described in SR 20572 and related + * SR 20258. + * + * Simulates a network partition test where a 3 node group (A, B, C) is + * split into two: (A) and (B,C), resulting in two masters: an A and and a + * newly elected B. + * + * The majority side (B,C) continues to make progress and performs durable + * writes. + * + * The master on the majority side B goes down. There is now no master on + * the (B,C) side since there is no quorum. + * + * The partition is healed. This should result in a master being elected on + * the "majority" (B,C) side of the partition thus ensuring that + * transactions are not lost. The old master A learns about the new master + * through the periodic master broadcasts and reverts to a replica. + */ + @Test + public void testPostNetworkPartitionMaster() + throws DatabaseException, InterruptedException { + + /* Rebroadcast master election results every second. */ + createPartitionedGroup("1 s"); + + /* perform durable writes on the majority side. */ + final RepEnvInfo rei2 = repEnvInfo[1]; + ReplicatedEnvironment env2 = rei2.getEnv(); + populateDB(env2,"test", 0, 20, RepTestUtils.DEFAULT_TC); + + final RepEnvInfo rei1 = repEnvInfo[0]; + final RepEnvInfo rei3 = repEnvInfo[2]; + final ReplicatedEnvironment env1 = rei1.getEnv(); + ReplicatedEnvironment env3 = rei3.getEnv(); + + healPartition(); + + env2 = rei2.getEnv(); + + /* + * Node 1 should become a replica, after hearing about the new master + * through the master broadcasts + */ + boolean isReplica = new PollCondition(1000, 30000) { + + @Override + protected boolean condition() { + return env1.getState().isReplica(); + } + }.await(); + assertTrue(isReplica); + + /* + * The master must be on the majority partition's side. Either node + * 2 or node 3 could have become a master. The previous env handles + * should still be valid, as master->replica transition does not + * require a recovery. + */ + assertTrue(env1.isValid()); + assertTrue(env2.isValid()); + assertTrue(env3.isValid()); + + assertTrue(env2.getState().isMaster() || env3.getState().isMaster()); + } + + /** + * Verifies that a unique master is re-established in the rep group after a + * network partition involving a split where the master is on the minority + * side of the network split has been resolved. + * + * Simulates a network partition with a master on the minority side and + * then heals it. The obsolete master environment becomes a replica as + * result. + * + * 1) Start a 3 node RG. node 1 is master. + * + * 2) Disable Acceptor/Learner/Feeder for node 1. Simulating a network + * partition. + * + * 3) Force node 2 to be master. We now have 2 masters. With node 1 not + * able to process durable writes and node 2 the true master. + * + * 4) Heal the network partition. + * + * 5) Verify that node1 is informed of the new master and becomes a replica. + */ + @Test + public void testPostNetworkPartition() + throws DatabaseException, InterruptedException { + + final RepEnvInfo rei1 = repEnvInfo[0]; + + createPartitionedGroup("1 s"); + + ReplicatedEnvironment env1 = rei1.getEnv(); + WaitForReplicaListener replicaWaiter = new WaitForReplicaListener(); + env1.setStateChangeListener(replicaWaiter); + + /* + * Sleep a multiple of the 1s period above. To ensure that the master + * is broadcasting repeatedly. + */ + Thread.sleep(10000); + + healPartition(); + + assertTrue(replicaWaiter.awaitReplica()); + assertTrue(env1.isValid()); + assertEquals(ReplicatedEnvironment.State.REPLICA, env1.getState()); + + rei1.closeEnv(); + } + + /** + * This test demonstrates the fix to SR24615, that is a correct master + * (one with a more advanced DTVLSN, rather than a VLSN) is selected after + * a network partition. + * + * 1) Create a 3 node group. + * + * 2) Simulate a network partition with [node1(master)] and [node2(master, + * node3(replica)] + * + * 3) 10 records are written to the majority partition and 100 + * (unacknowledged) records are written to the minority partition. + * + * 4) The nodes are shutdown and brought back up, with the network + * functioning normally. + * + * 5) The nodes come up and hold an election choosing one of nodes 2 or 3 + * as the master, since it has the higher dtvlsn, even though node 1 has + * the higher vlsn. + * + * 6) When node1 comes up it rolls back the 100 unacknowledged records and + * joins as a replica. + */ + @Test + public void testDemonstrateCorrectMasterSelection() + throws DatabaseException, InterruptedException { + + createPartitionedGroup("1000000 s"); + final RepEnvInfo rei1 = repEnvInfo[0]; + final RepEnvInfo rei2 = repEnvInfo[1]; + final RepEnvInfo rei3 = repEnvInfo[2]; + + CommitToken ctMinority = + populateDB(rei1.getEnv(), TEST_DB_NAME, 0, 100, + RepTestUtils.SYNC_SYNC_NONE_TC); + rei1.closeEnv(); + + /* + * Ensure that the DTVLSN has advanced on the majority side by using two + * transactions. + */ + populateDB(rei2.getEnv(), TEST_DB_NAME, 0, 1, + new TransactionConfig(). + setDurability(RepTestUtils.DEFAULT_DURABILITY)); + CommitToken ctMajority = + populateDB(rei2.getEnv(), TEST_DB_NAME, 0, 10, + new TransactionConfig(). + setDurability(RepTestUtils.DEFAULT_DURABILITY)); + + /* Ensure Minority master has larger VLSN */ + assertTrue(ctMinority.compareTo(ctMajority) > 0); + rei1.closeEnv(); + rei2.closeEnv(); + rei3.closeEnv(); + + restartNodes(repEnvInfo); + + /* Master should be on the majority side. */ + assertTrue(repEnvInfo[1].getEnv().getState().isMaster() || + repEnvInfo[2].getEnv().getState().isMaster()); + + /* + * Previous minority master, should now be a replica. + */ + assertTrue(repEnvInfo[0].getEnv().getState().isReplica()); + } + + /** + * Simulates a network partitioned group with node 1 (the master) on one + * side and nodes 2 an 3 on the other side, with node 2 being the master. + * + * It does so by disabling the Learner and Acceptor agents, as well as the + * feeder service on node 1 and forcing node 2 to be the master, so that + * node 1 is not informed that node 2 is the new master. + */ + private void createPartitionedGroup(String rebroadcastPeriod) + throws DatabaseException, InterruptedException { + + final RepEnvInfo rei1 = repEnvInfo[0]; + final RepEnvInfo rei2 = repEnvInfo[1]; + + for (int i=0; i < groupSize; i++) { + repEnvInfo[i].getRepConfig().setConfigParam + (ReplicationConfig.ELECTIONS_REBROADCAST_PERIOD, + rebroadcastPeriod); + } + + createGroup(); + + assertTrue(rei1.getEnv().getState().isMaster()); + final CommitToken ct = populateDB(rei1.getEnv(),"x", 10); + + /* Await DTVLSN on replica. */ + boolean awaitDTVLSN = new PollCondition(10, 10000) { + + @Override + protected boolean condition() { + return rei2.getRepNode().getDTVLSN() >= ct.getVLSN(); + } + + }.await(); + + /* + * There's a small chance that the null dtvlsn txn itself, may itself + * become durable via a heartbeat from the replica before the connection + * is cut below. In this case, the minority side will have a higher + * in-memory dtvlsn that it has not yet had a chance to communicate + * to the replicas. + */ + + assertTrue(awaitDTVLSN); + + logger.info("Simulating partition"); + + RepTestUtils.disableServices(rei1); + + WaitForMasterListener masterWaiter = new WaitForMasterListener(); + rei2.getEnv().setStateChangeListener(masterWaiter); + rei2.getRepNode().forceMaster(true); + + masterWaiter.awaitMastership(); + + /* Two masters in group. */ + assertTrue(rei1.getEnv().getState().isMaster()); + assertTrue(rei2.getEnv().getState().isMaster()); + + logger.info("Simulated partition"); + } + + private void healPartition() { + logger.info("healed partition"); + + final RepEnvInfo rei1 = repEnvInfo[0]; + RepTestUtils.reenableServices(rei1); + } +} diff --git a/test/com/sleepycat/je/rep/impl/NodeStateProtocolTest.java b/test/com/sleepycat/je/rep/impl/NodeStateProtocolTest.java new file mode 100644 index 0000000..43c6dbb --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/NodeStateProtocolTest.java @@ -0,0 +1,60 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import org.junit.Before; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; + +/** + * Tests the protocols used to querying the current state of a replica. + */ +public class NodeStateProtocolTest extends TextProtocolTestBase { + + private NodeStateProtocol protocol; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + protocol = + new NodeStateProtocol(GROUP_NAME, + new NameIdPair("n1", (short) 1), + null, + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig())); + } + + @Override + protected Message[] createMessages() { + Message[] messages = new Message[] { + protocol.new NodeStateRequest(NODE_NAME), + protocol.new NodeStateResponse(NODE_NAME, + NODE_NAME, + System.currentTimeMillis(), + State.MASTER) + }; + + return messages; + } + + @Override + protected TextProtocol getProtocol() { + return protocol; + } +} diff --git a/test/com/sleepycat/je/rep/impl/RepGroupDBTest.java b/test/com/sleepycat/je/rep/impl/RepGroupDBTest.java new file mode 100644 index 0000000..a4a1260 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RepGroupDBTest.java @@ -0,0 +1,189 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class RepGroupDBTest extends RepTestBase { + + public RepGroupDBTest() { + } + + @Test + public void testBasic() + throws DatabaseException, InterruptedException { + + RepTestUtils.joinGroup(repEnvInfo); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + verifyRepGroupDB(); + } + + /** + * This is a test to verify the fix for SR 20607, where a failure during + * the process of adding a node can result in a circular wait situation. + * This test case implements the scenario described in the SR. + * + * @see SR 20607 + */ + @Test + public void testInitFailure() + throws DatabaseException { + + /* Create a two node group. */ + RepEnvInfo r1 = repEnvInfo[0]; + RepEnvInfo r2 = repEnvInfo[1]; + + r1.openEnv(); + r2.openEnv(); + + /* Simulate a process kill of r2 */ + RepInternal.getNonNullRepImpl(r2.getEnv()).abnormalClose(); + + r1.closeEnv(); + r2.closeEnv(); + + /* + * restart the group,the group should not timeout due to a circular + * wait, with r1 waiting to conclude an election and r2 trying to + * locate a mater. + */ + RepTestUtils.restartGroup(r1, r2); + } + + + /** + * This test ensures that the replication stream is not blocked due to a + * conflict on rep group db by a metadata operation requiring + * acks from the replication stream. + * + * 1) Set up three node group: small log files and long replica timeouts, + * long lock timeouts + * 2) Shutdown two nodes. + * 3) Populate the DB with no acks + * 4) Start a thread to update the address for node 3 asynchronously + * 5) Verify that it has stalled. + * 6) Bring node 2 online. + * 7) The async address update thread should finish within the ack timeout + * period. + */ + @Test + public void testRepGroupContention() throws InterruptedException { + /* + * Small log files to cause local cbvlsn updates, which could block + * replay. + */ + setEnvConfigParam(EnvironmentParams.LOG_FILE_MAX, "10000"); + setEnvConfigParam(EnvironmentParams.LOCK_TIMEOUT, "60 s"); + setRepConfigParam(RepParams.REPLICA_ACK_TIMEOUT, "60 s"); + + createGroup(3); + String rn3Name = repEnvInfo[2].getRepNode().getNodeName(); + repEnvInfo[2].closeEnv(); + repEnvInfo[1].closeEnv(); + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + populateDB(menv, 1000); + + final AsyncUpdateAddress asyncUpdateThread = + new AsyncUpdateAddress(rn3Name); + asyncUpdateThread.start(); + asyncUpdateThread.join(5000); + + /* Verify update address stall. */ + assertTrue(asyncUpdateThread.isAlive()); + + /* Allow it to join, catch up and ack */ + repEnvInfo[1].openEnv(); + + asyncUpdateThread.join(30000); + + /* + * Verify that it has concluded successfully. If the feeder was blocked + * the async operation would not be able to complete and the thread + * would still be alive. + */ + assertTrue(!asyncUpdateThread.isAlive()); + assertTrue(asyncUpdateThread.testException == null); + } + + protected class AsyncUpdateAddress extends Thread { + final String rn3Name; + public Throwable testException = null; + + public AsyncUpdateAddress(String rn3Name) { + this.rn3Name = rn3Name; + } + + @Override + public void run() { + try { + final Set helperSockets = + repEnvInfo[0].getRepImpl().getHelperSockets(); + + final ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helperSockets, + RepTestUtils.readRepNetConfig()); + groupAdmin.updateAddress(rn3Name, "localhost", 10000); + } catch (Throwable e) { + testException = e; + } + } + } + + /** + * Verifies that the contents of the database matches the contents of the + * individual repConfigs. + * + * @throws DatabaseException + */ + private void verifyRepGroupDB() + throws DatabaseException { + /* + * master and replica must all agree on the contents of the + * rep group db and the local info about the node. + */ + for (RepEnvInfo repi : repEnvInfo) { + + ReplicatedEnvironment rep = repi.getEnv(); + Collection nodes = + RepGroupDB.getGroup(RepInternal.getNonNullRepImpl(rep), + RepTestUtils.TEST_REP_GROUP_NAME) + .getElectableMembers(); + assertEquals(repEnvInfo.length, nodes.size()); + for (RepNodeImpl n : nodes) { + int nodeId = n.getNodeId(); + RepImpl repImpl = RepInternal.getNonNullRepImpl( + repEnvInfo[nodeId-1].getEnv()); + assertEquals(repImpl.getPort(), n.getPort()); + assertEquals(repImpl.getHostName(), n.getHostName()); + assertEquals(n.isQuorumAck(), true); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/RepGroupImplCompatibilityTest.java b/test/com/sleepycat/je/rep/impl/RepGroupImplCompatibilityTest.java new file mode 100644 index 0000000..31685d6 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RepGroupImplCompatibilityTest.java @@ -0,0 +1,98 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static com.sleepycat.je.rep.impl.RepParams.TEST_JE_VERSION; +import static org.junit.Assert.fail; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +import org.junit.Test; + +/** + * Test compatibility checks for RepGroupImpl versions. + */ +public class RepGroupImplCompatibilityTest extends RepTestBase { + + /** + * Test using a master supporting a stream protocol 4, too old to support + * RepGroupImpl version 3. + */ + @Test + public void testRepGroupImplCompatibilityOldMaster() + throws DatabaseException { + + /* Create master with stream protocol version 4 */ + setJEVersion(Protocol.VERSION_4_JE_VERSION, repEnvInfo[0]); + createGroup(1); + + /* Replica with RepGroupImpl minimum version is OK. */ + setJEVersion(RepGroupImpl.MIN_FORMAT_VERSION_JE_VERSION, repEnvInfo[1]); + repEnvInfo[1].openEnv(); + + /* Replica with RepGroupImpl version 3 is not. */ + setJEVersion(RepGroupImpl.FORMAT_VERSION_3_JE_VERSION, repEnvInfo[2]); + try { + repEnvInfo[2].openEnv(); + fail("Expected EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Replica version too new: " + e); + } + } + + /** + * Test using a master requiring stream protocol 5, too new to support + * RepGroupImpl version 2. + */ + @Test + public void testRepGroupImplCompatibilityNewMaster() + throws DatabaseException { + + /* Prevent setting of minJEVersion for this test. */ + for (RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + RepParams.TEST_CBVLSN.getName(), "true"); + } + + /* Create master with RepGroupImpl version 3 */ + createGroup(1); + + /* Replica with stream protocol version 5 is OK */ + setJEVersion(Protocol.VERSION_5_JE_VERSION, repEnvInfo[1]); + repEnvInfo[1].openEnv(); + + /* Replica with stream protocol version 4 is not */ + setJEVersion(Protocol.VERSION_4_JE_VERSION, repEnvInfo[2]); + try { + repEnvInfo[2].openEnv(); + fail("Expected EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Replica version too old: " + e); + } + } + + /** Set the JE version for the specified nodes. */ + private void setJEVersion(final JEVersion jeVersion, + final RepEnvInfo... nodes) { + assert jeVersion != null; + for (final RepEnvInfo node : nodes) { + node.getRepConfig().setConfigParam( + TEST_JE_VERSION.getName(), jeVersion.toString()); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/RepGroupImplTest.java b/test/com/sleepycat/je/rep/impl/RepGroupImplTest.java new file mode 100644 index 0000000..383969f --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RepGroupImplTest.java @@ -0,0 +1,57 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; + +import java.net.UnknownHostException; + +import org.junit.Test; + +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.util.test.TestBase; + +public class RepGroupImplTest extends TestBase { + + @Test + public void testSerializeDeserialize() + throws UnknownHostException { + + for (int formatVersion = RepGroupImpl.MIN_FORMAT_VERSION; + formatVersion <= RepGroupImpl.MAX_FORMAT_VERSION; + formatVersion++) { + final int electable = 5; + final int monitor = 1; + final int secondary = + (formatVersion < RepGroupImpl.FORMAT_VERSION_3) ? + 0 : + 3; + RepGroupImpl group = + RepTestUtils.createTestRepGroup(electable, monitor, secondary); + String s1 = group.serializeHex(formatVersion); + String tokens[] = s1.split(TextProtocol.SEPARATOR_REGEXP); + assertEquals( + 1 + /* The Rep group itself */ + + electable + monitor + secondary, /* the individual nodes. */ + tokens.length); + RepGroupImpl dgroup = RepGroupImpl.deserializeHex(tokens, 0); + assertEquals("Version", formatVersion, dgroup.getFormatVersion()); + if (formatVersion == RepGroupImpl.INITIAL_FORMAT_VERSION) { + assertEquals("Deserialized version " + formatVersion, + group, dgroup); + } + String s2 = dgroup.serializeHex(formatVersion); + assertEquals("Reserialized version " + formatVersion, s1, s2); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/RepGroupProtocolTest.java b/test/com/sleepycat/je/rep/impl/RepGroupProtocolTest.java new file mode 100644 index 0000000..14446b4 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RepGroupProtocolTest.java @@ -0,0 +1,306 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.net.UnknownHostException; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupProtocol.EnsureNode; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupRequest; +import com.sleepycat.je.rep.impl.RepGroupProtocol.GroupResponse; +import com.sleepycat.je.rep.impl.RepGroupProtocol.TransferMaster; +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; + +/** + * Tests the protocols used to maintain the rep group and support the Monitor. + */ +public class RepGroupProtocolTest extends TextProtocolTestBase { + + private RepGroupProtocol protocol; + private DataChannelFactory channelFactory; + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + + channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig(), GROUP_NAME); + + protocol = + new RepGroupProtocol(GROUP_NAME, + new NameIdPair("n1", (short) 1), + null, + channelFactory); + } + + @Override + protected Message[] createMessages() { + try { + final List m = new ArrayList(); + final EnsureNode ensureNode = + protocol.new EnsureNode( + new RepNodeImpl(new NameIdPair(NODE_NAME, 1), + NodeType.MONITOR, + "localhost", + 5000, + null)); + m.add(ensureNode); + m.add(protocol.new EnsureOK( + ensureNode, new NameIdPair(NODE_NAME, 1))); + m.add(protocol.new RemoveMember("m1")); + final GroupRequest groupRequest = + protocol.new GroupRequest(); + m.add(groupRequest); + m.add(protocol.new GroupResponse( + groupRequest, RepTestUtils.createTestRepGroup(5, 5))); + m.add(protocol.new Fail( + RepGroupProtocol.FailReason.DEFAULT, "failed")); + m.add(protocol.new UpdateAddress("test", "localhost", 5001)); + final TransferMaster transferMaster = + protocol.new TransferMaster("mercury,venus,mars", + 10000, false); + m.add(transferMaster); + m.add(protocol.new TransferOK(transferMaster, "venus")); + m.add(protocol.new DeleteMember("m1")); + return m.toArray(new Message[m.size()]); + } catch (UnknownHostException e) { + fail("Unexpected exception: " + e.getStackTrace()); + return null; + } + } + + @Override + protected TextProtocol getProtocol() { + return protocol; + } + + /** + * Test protocol changes to support RepGroupImpl version 3 for EnsureNode. + */ + @Test + public void testRepGroupImplV3EnsureNode() + throws InvalidMessageException { + + /* Node with JE version info */ + final RepNodeImpl newNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + JEVersion.CURRENT_VERSION); + + /* Old protocol using old RepGroupImpl version 2 format */ + final RepGroupProtocol oldProtocol = + new RepGroupProtocol(RepGroupProtocol.REP_GROUP_V2_VERSION, + GROUP_NAME, new NameIdPair("n2", 2), null, + channelFactory); + + /* Old node format with no JE version */ + final RepNodeImpl oldNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + null); + + /* Old message format, using new node format, to check conversion */ + final EnsureNode oldEnsureNode = oldProtocol.new EnsureNode(newNode); + + /* Receive old format with old protocol */ + final EnsureNode oldEnsureNodeViaOld = + (EnsureNode) oldProtocol.parse(oldEnsureNode.wireFormat()); + assertEquals("Old message format via old protocol should use old" + + " node format", + oldNode, oldEnsureNodeViaOld.getNode()); + + /* Receive old format with new protocol */ + final EnsureNode oldEnsureNodeViaNew = + (EnsureNode) protocol.parse(oldEnsureNode.wireFormat()); + assertEquals("Old message format via new protocol should use old" + + " node format", + oldNode, oldEnsureNodeViaNew.getNode()); + + /* Receive new format with old protocol */ + final EnsureNode newEnsureNode = protocol.new EnsureNode(newNode); + try { + oldProtocol.parse(newEnsureNode.wireFormat()); + fail("Expected InvalidMessageException when old protocol" + + " receives new format message"); + } catch (InvalidMessageException e) { + assertEquals("New message format via old protocol should produce" + + " a version mismatch", + TextProtocol.MessageError.VERSION_MISMATCH, + e.getErrorType()); + } + } + + /** + * Test protocol changes to support RepGroupImpl version 3 for + * GroupResponse. + */ + @Test + public void testRepGroupImplV3GroupResponse() + throws InvalidMessageException { + + /* New group format with JE version and new node types */ + final RepNodeImpl newNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + JEVersion.CURRENT_VERSION); + final RepNodeImpl secondaryNode = new RepNodeImpl( + new NameIdPair("s1", 2), NodeType.SECONDARY, "localhost", 5001, + JEVersion.CURRENT_VERSION); + final RepGroupImpl newGroup = new RepGroupImpl(GROUP_NAME, null); + final Map nodeMap = + new HashMap(); + nodeMap.put(1, newNode); + nodeMap.put(2, secondaryNode); + newGroup.setNodes(nodeMap); + + /* Old protocol using old RepGroupImpl version 2 format */ + final RepGroupProtocol oldProtocol = + new RepGroupProtocol(RepGroupProtocol.REP_GROUP_V2_VERSION, + GROUP_NAME, new NameIdPair("n2", 2), null, + channelFactory); + + /* Old group format with no JE version or new node types */ + final RepNodeImpl oldNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + null); + final RepGroupImpl oldGroup = + new RepGroupImpl(GROUP_NAME, newGroup.getUUID(), + RepGroupImpl.FORMAT_VERSION_2); + oldGroup.setNodes(Collections.singletonMap(1, oldNode)); + + /* Old message format, using new node format, to check conversion */ + final GroupResponse oldGroupResponse = + oldProtocol.new GroupResponse( + oldProtocol.new GroupRequest(), newGroup); + + /* Receive old format with old protocol */ + final GroupResponse oldGroupResponseViaOld = + (GroupResponse) oldProtocol.parse(oldGroupResponse.wireFormat()); + assertEquals("Old message format via old protocol should use old" + + " group format", + oldGroup, oldGroupResponseViaOld.getGroup()); + + /* Receive old format with new protocol */ + final GroupResponse oldGroupResponseViaNew = + (GroupResponse) protocol.parse(oldGroupResponse.wireFormat()); + assertEquals("Old message format via new protocol should use old" + + " group format", + oldGroup, oldGroupResponseViaNew.getGroup()); + + /* Receive new format with old protocol */ + final GroupResponse newGroupResponse = + protocol.new GroupResponse(protocol.new GroupRequest(), newGroup); + try { + oldProtocol.parse(newGroupResponse.wireFormat()); + fail("Expected InvalidMessageException when old protocol" + + " receives new format message"); + } catch (InvalidMessageException e) { + assertEquals("New message format via old protocol should produce" + + " a version mismatch", + TextProtocol.MessageError.VERSION_MISMATCH, + e.getErrorType()); + } + } + + /** + * Test the message format of InvalidMessageExceptions thrown by + * TextProtocol for malformed messages. In particular, test that the + * exception includes information about the protocol message that caused + * the failure. [#23352] + */ + @Test + public void testInvalidMessageExceptions() + throws Exception { + + /* Message with too few tokens */ + String message = "msg-with-too-few-tokens"; + try { + protocol.parse(message); + fail("Expected InvalidMessageException"); + } catch (InvalidMessageException e) { + assertEquals("Missing message op in message: " + message, + e.getMessage()); + } + + /* Message version is too high */ + protocol.updateNodeIds(Collections.singleton(1)); + final String badVersion = RepGroupProtocol.VERSION + "99"; + message = badVersion + "|" + GROUP_NAME + "|1|ENREQ|my-payload"; + try { + protocol.parse(message); + fail("Expected InvalidMessageException"); + } catch (InvalidMessageException e) { + assertEquals("Version argument mismatch." + + " Expected: " + RepGroupProtocol.VERSION + + ", found: " + badVersion + + ", in message: " + message, + e.getMessage()); + } + + /* Wrong message group name */ + message = RepGroupProtocol.VERSION + + "|wrong-group-name|1|ENREQ|my-payload"; + try { + protocol.parse(message); + fail("Expected InvalidMessageException"); + } catch (InvalidMessageException e) { + assertEquals("Group name mismatch;" + + " this group name: " + GROUP_NAME + + ", message group name: wrong-group-name" + + ", in message: " + message, + e.getMessage()); + } + + /* Unknown member */ + message = RepGroupProtocol.VERSION + "|" + GROUP_NAME + + "|99|ENREQ|my-payload"; + try { + protocol.parse(message); + fail("Expected InvalidMessageException"); + } catch (InvalidMessageException e) { + assertEquals("Sender's member id: 99, message op: ENREQ" + + ", was not a member of the group: [1]" + + ", in message: " + message, + e.getMessage()); + } + + /* Missing payload */ + message = RepGroupProtocol.VERSION + "|" + GROUP_NAME + "|1|ENREQ"; + try { + protocol.parse(message); + fail("Expected InvalidMessageException"); + } catch (InvalidMessageException e) { + assertEquals("Bad format; missing token at position: 4" + + ", in message: " + message, + e.getMessage()); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/RepTestBase.java b/test/com/sleepycat/je/rep/impl/RepTestBase.java new file mode 100644 index 0000000..2a382f6 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RepTestBase.java @@ -0,0 +1,622 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Collection; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.monitor.MonitorConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public abstract class RepTestBase extends TestBase { + + protected final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + /** + * Used to start up an existing group. Each environment must be opened in + * its own thread, since the open of the environment does not return until + * an election has been concluded and a Master is in place. + */ + protected static class EnvOpenThread extends Thread { + final RepEnvInfo threadRepInfo; + public Throwable testException = null; + + public EnvOpenThread(RepEnvInfo info) { + this.threadRepInfo = info; + } + + @Override + public void run() { + try { + threadRepInfo.openEnv(); + } catch (Throwable e) { + testException = e; + } + } + } + + /** + * Listener used to determine when a Master becomes available, by tripping + * the count down latch. + */ + protected static class MasterListener implements StateChangeListener{ + final CountDownLatch masterLatch; + + public MasterListener(CountDownLatch masterLatch) { + super(); + this.masterLatch = masterLatch; + } + + @Override + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + + if (stateChangeEvent.getState().isMaster()) { + masterLatch.countDown(); + } + } + } + + protected final File envRoot = SharedTestUtils.getTestDir(); + protected int groupSize = 5; + protected RepEnvInfo[] repEnvInfo = null; + protected DatabaseConfig dbconfig; + protected final DatabaseEntry key = new DatabaseEntry(new byte[]{1}); + protected final DatabaseEntry data = new DatabaseEntry(new byte[]{100}); + protected static final String TEST_DB_NAME = "TestDB"; + + /* Max time to wait for consistency to be established. */ + protected static final int CONSISTENCY_TIMEOUT_MS = 10000; + public static final int JOIN_WAIT_TIME = 10000; + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + dbconfig = new DatabaseConfig(); + dbconfig.setAllowCreate(true); + dbconfig.setTransactional(true); + dbconfig.setSortedDuplicates(false); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize); + } + + /** + * @throws Exception in subclasses. + */ + @Override + @After + public void tearDown() + throws Exception { + + /* + * Don't checkpoint on shutdown, we are not going to use these + * environments again. + */ + RepTestUtils.shutdownRepEnvs(repEnvInfo, false); + + /* Verify that all environments were indeed closed. */ + Collection residualImpls = + DbEnvPool.getInstance().getEnvImpls(); + if (residualImpls.size() != 0) { + String implNames = ""; + for (EnvironmentImpl envImpl : residualImpls) { + implNames += envImpl.getEnvironmentHome().toString() + " "; + } + + /* + * Clear the bad env state so that the next test is not + * contaminated. + */ + DbEnvPool.getInstance().clear(); + fail("residual environments:" + implNames); + } + } + + /** + * Populates the master db using the specified transaction configuration. + */ + protected CommitToken populateDB(ReplicatedEnvironment rep, + String dbName, + int startKey, + int nRecords, + TransactionConfig txnConfig) + throws DatabaseException { + + Environment env = rep; + Database db = null; + boolean done = false; + Transaction txn = env.beginTransaction(null, txnConfig); + try { + db = env.openDatabase(txn, dbName, dbconfig); + txn.commit(); + txn = null; + txn = env.beginTransaction(null, txnConfig); + for (int i = 0; i < nRecords; i++) { + IntegerBinding.intToEntry(startKey + i, key); + LongBinding.longToEntry(i, data); + db.put(txn, key, data); + } + txn.commit(); + done = true; + return txn.getCommitToken(); + } finally { + if (txn != null && !done) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + + /** + * Populates the master db using the specified keys and values + * and transaction configuration. + */ + protected CommitToken populateDB(ReplicatedEnvironment rep, + String dbName, + List keys, + TransactionConfig txnConfig) + throws DatabaseException { + + Environment env = rep; + Database db = null; + boolean done = false; + Transaction txn = env.beginTransaction(null, txnConfig); + try { + db = env.openDatabase(txn, dbName, dbconfig); + txn.commit(); + txn = null; + txn = env.beginTransaction(null, txnConfig); + for (int i = 0; i < keys.size(); i++) { + IntegerBinding.intToEntry(keys.get(i), key); + LongBinding.longToEntry(i, data); + db.put(txn, key, data); + } + txn.commit(); + done = true; + return txn.getCommitToken(); + } finally { + if (txn != null && !done) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + + + /** + * Populates the master db without regard for the state of the replicas: It + * uses ACK NONE to populate the database. + */ + protected CommitToken populateDB(ReplicatedEnvironment rep, + String dbName, + int startKey, + int nRecords) + throws DatabaseException { + + return populateDB(rep, dbName, startKey, nRecords, + RepTestUtils.WNSYNC_NONE_TC); + } + + + /** + * Populates the master db with a list of keys. It + * uses ACK NONE to populate the database. + */ + protected CommitToken populateDB(ReplicatedEnvironment rep, + String dbName, + List keys) + throws DatabaseException { + + return populateDB(rep, dbName, keys, RepTestUtils.WNSYNC_NONE_TC); + } + + protected CommitToken populateDB(ReplicatedEnvironment rep, + String dbName, + int nRecords) + throws DatabaseException { + + return populateDB(rep, dbName, 0, nRecords); + } + + protected CommitToken populateDB(ReplicatedEnvironment rep, int nRecords) + throws DatabaseException { + + return populateDB(rep, TEST_DB_NAME, 0, nRecords); + } + + /** Read the db using the specified transaction configuration. */ + protected void readDB(final ReplicatedEnvironment rep, + final String dbName, + final int startKey, + final int nRecords, + final TransactionConfig txnConfig) + throws DatabaseException { + + Environment env = rep; + Database db = null; + Transaction txn = env.beginTransaction(null, txnConfig); + try { + db = env.openDatabase(txn, dbName, dbconfig); + txn.commit(); + txn = null; + txn = env.beginTransaction(null, txnConfig); + for (int i = 0; i < nRecords; i++) { + IntegerBinding.intToEntry(startKey + i, key); + db.get(txn, key, data, null /* lockMode */); + final long value = LongBinding.entryToLong(data); + assertEquals("Database value for key " + (startKey+i), + i, value); + } + txn.commit(); + txn = null; + } finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + + /** Read the db using NO_CONSISTENCY. */ + protected void readDB(final ReplicatedEnvironment rep, + final String dbName, + final int startKey, + final int nRecords) + throws DatabaseException { + + readDB(rep, dbName, startKey, nRecords, + RepTestUtils.NO_CONSISTENCY_TC); + } + + protected void readDB(final ReplicatedEnvironment rep, + final String dbName, + final int nRecords) + throws DatabaseException { + + readDB(rep, dbName, 0, nRecords); + } + + protected void readDB(final ReplicatedEnvironment rep, final int nRecords) + throws DatabaseException { + + readDB(rep, TEST_DB_NAME, 0, nRecords); + } + + protected void createGroup() + throws UnknownMasterException, DatabaseException { + + createGroup(repEnvInfo.length); + } + + protected void createGroup(int firstn) + throws UnknownMasterException, DatabaseException { + + for (int i = 0; i < firstn; i++) { + ReplicatedEnvironment rep = repEnvInfo[i].openEnv(); + State state = rep.getState(); + assertEquals((i == 0) ? State.MASTER : State.REPLICA, state); + } + + logger.info("A replication group of " + groupSize + + " nodes has been created"); + } + + protected ReplicatedEnvironment leaveGroupAllButMaster() + throws DatabaseException { + + ReplicatedEnvironment master = null; + for (RepEnvInfo repi : repEnvInfo) { + if (repi.getEnv() == null) { + continue; + } + if (State.MASTER.equals(repi.getEnv().getState())) { + master = repi.getEnv(); + } else { + repi.closeEnv(); + } + } + + assert(master != null); + return master; + } + + /** + * Restarts the nodes in an existing group using the default join wait + * time. Returns the info associated with the Master. + * + * @return the RepEnvInfo associated with the master, or null if there is + * no master. This could be because the election was not concluded within + * JOIN_WAIT_TIME. + */ + protected RepEnvInfo restartNodes(RepEnvInfo... nodes) + throws InterruptedException { + + return restartNodes(JOIN_WAIT_TIME, nodes); + } + + /** + * Restarts the nodes in an existing group, waiting the specified amount of + * time for the election to complete. Returns the info associated with the + * Master. + * + * @return the RepEnvInfo associated with the master, or null if there is + * no master. This could be because the election was not concluded within + * the join wait time. + */ + protected RepEnvInfo restartNodes(final long joinWaitTime, + final RepEnvInfo... nodes) + throws InterruptedException { + + logger.info("Restarting " + nodes.length + " nodes"); + + /* Restart the group. */ + EnvOpenThread threads[] = new EnvOpenThread[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + threads[i] = new EnvOpenThread(nodes[i]); + threads[i].start(); + } + + RepEnvInfo mi = null; + + for (EnvOpenThread eot : threads) { + eot.join(joinWaitTime); + if (eot.isAlive()) { + final String msg = + "Restart of node " + + eot.threadRepInfo.getRepConfig().getNodeName() + + " failed to complete within timeout " + + joinWaitTime + " ms"; + + /* + * Print message in case assertion is masked by another failure + */ + System.err.println(msg); + fail(msg); + } + + if (eot.testException != null) { + eot.testException.printStackTrace(); + } + + assertNull("test exception: " + + eot.testException, eot.testException); + final ReplicatedEnvironment renv = eot.threadRepInfo.getEnv(); + if ((renv != null) && + renv.isValid() && + renv.getState().isMaster()) { + mi = eot.threadRepInfo; + } + } + + return mi; + } + + /** + * Restarts the nodes in an existing group + * but does, waiting the specified amount of + * time for the election to complete. Returns the info associated with the + * Master. + * + */ + protected void restartNodesNoWaitForReady( + final RepEnvInfo... nodes) { + + /* Restart the group. */ + EnvOpenThread threads[] = new EnvOpenThread[nodes.length]; + for (int i = 0; i < nodes.length; i++) { + threads[i] = new EnvOpenThread(nodes[i]); + threads[i].start(); + } + + } + + /** + * Find and return the Master from the set of nodes passed in. + */ + static public RepEnvInfo findMaster(RepEnvInfo... nodes) { + for (RepEnvInfo ri : nodes) { + if ((ri.getEnv() == null) || !ri.getEnv().isValid()) { + continue; + } + + if (ri.getEnv().getState().isMaster()) { + return ri; + } + } + + return null; + } + + /** + * Find the master, waiting for the specified number of milliseconds, and + * failing if no master is found. + */ + public static RepEnvInfo findMasterWait(final long timeout, + final RepEnvInfo... nodes) + throws InterruptedException { + + final long start = System.currentTimeMillis(); + while (true) { + final RepEnvInfo masterInfo = findMaster(nodes); + if (masterInfo != null) { + return masterInfo; + } + if (System.currentTimeMillis() > start + timeout) { + break; + } + Thread.sleep(500); + } + throw new AssertionError( + "No master after " + (System.currentTimeMillis() - start) + + " milliseconds"); + } + + /** + * Find the master and specified number of acking replicas. + * Fail if waiting for the specified number of milliseconds, and + * no master or the specified number of replicas are found. + */ + public static RepEnvInfo findMasterAndWaitForReplicas(final long timeout, + final int nAckingReplicas, + final RepEnvInfo... nodes) + throws InterruptedException { + + final RepEnvInfo masterRepInfo = findMasterWait(timeout, nodes); + final FeederManager feederManger = + masterRepInfo.getRepNode().feederManager(); + + final boolean replicasReady = new PollCondition(100, (int)timeout) { + + @Override + protected boolean condition() { + return feederManger.activeAckReplicaCount() >= nAckingReplicas; + } + }.await(); + + if (replicasReady) { + return masterRepInfo; + } + + throw new AssertionError( + "Found master but not the required number of replicas." + + " Number of replicas needed " + nAckingReplicas + "found " + + feederManger.activeReplicaCount() + + " after " + timeout +" milliseconds"); + } + + /** + * Close the nodes that were passed in. Close the master last to prevent + * spurious elections, where intervening elections create Masters that are + * immediately closed. + */ + protected void closeNodes(RepEnvInfo... nodes) { + RepEnvInfo mi = null; + for (RepEnvInfo ri : nodes) { + ReplicatedEnvironment env = ri.getEnv(); + if ((env == null) || !env.isValid()) { + continue; + } + if (env.getState().isMaster()) { + mi = ri; + continue; + } + ri.closeEnv(); + } + + if (mi != null) { + mi.closeEnv(); + } + } + + /** + * Create and return a {@link Monitor}. The caller should make sure to + * call {@link Monitor#shutdown} when it is done using the monitor. + * + * @param portDelta the increment past the default port for the monitor + * port + * @param monitorName the name of the monitor + * @throws Exception if a problem occurs + */ + protected Monitor createMonitor(final int portDelta, + final String monitorName) + throws Exception { + + final String nodeHosts = + repEnvInfo[0].getRepConfig().getNodeHostPort() + + "," + repEnvInfo[1].getRepConfig().getNodeHostPort(); + final int monitorPort = + Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()) + portDelta; + final MonitorConfig monitorConfig = new MonitorConfig(); + monitorConfig.setGroupName(RepTestUtils.TEST_REP_GROUP_NAME); + monitorConfig.setNodeName(monitorName); + monitorConfig.setNodeHostPort + (RepTestUtils.TEST_HOST + ":" + monitorPort); + monitorConfig.setHelperHosts(nodeHosts); + + Properties accessProps = RepTestUtils.readNetProps(); + monitorConfig.setRepNetConfig( + ReplicationNetworkConfig.create(accessProps)); + + return new Monitor(monitorConfig); + } + + protected void setRepConfigParam(ConfigParam param, String value) { + + for (RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam(param.getName(), value); + } + } + + protected void setEnvConfigParam(ConfigParam param, String value) { + + for (RepEnvInfo info : repEnvInfo) { + info.getEnvConfig().setConfigParam(param.getName(), value); + } + } + + protected void updateHelperHostConfig() { + String helperHosts = ""; + for (RepEnvInfo rinfo : repEnvInfo) { + helperHosts += (rinfo.getRepConfig().getNodeHostPort() + ","); + } + + setRepConfigParam(RepParams.HELPER_HOSTS, helperHosts); + } +} diff --git a/test/com/sleepycat/je/rep/impl/ReplayWithBinDeltaInsertionsTest.java b/test/com/sleepycat/je/rep/impl/ReplayWithBinDeltaInsertionsTest.java new file mode 100644 index 0000000..da606da --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/ReplayWithBinDeltaInsertionsTest.java @@ -0,0 +1,314 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.util.logging.Logger; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Bring up a number of nodes with a fixed master and n clients. Perform + * basic operations, test for correctness. + */ +public class ReplayWithBinDeltaInsertionsTest extends TestBase { + + private static final int ONE_MB = 1 << 20; + + private final boolean verbose = Boolean.getBoolean("VERBOSE"); + private static final String TEST_DB = "testdb"; + + /* Replication tests use multiple environments. */ + private final File envRoot; + private final int nNodes; + + public ReplayWithBinDeltaInsertionsTest() { + envRoot = SharedTestUtils.getTestDir(); + nNodes = 3; + customName = nNodes + "nodes"; + } + + /** + * Create n nodes, startup. + * - do some work, verify that all nodes have the same data. + * - switch masters + * - do more work, verify that all nodes have the same data. + * - switch masters + * etc + */ + @Test + public void testRoundRobinMasters() + throws Exception { + + RepEnvInfo[] repEnvInfo = null; + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + try { + /* Create a replicator for each environment directory. */ + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig + (new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY)); + envConfig.setConfigParam + (EnvironmentConfig.LOG_FILE_MAX, + EnvironmentParams.LOG_FILE_MAX.getDefault()); + + envConfig.setConfigParam( + EnvironmentParams.MAX_MEMORY.getName(), + new Integer(ONE_MB).toString()); + + // TODO: Is this needed now that hard recovery works? + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + envConfig.setConfigParam("je.env.runCleaner", "false"); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig); + + /* Increase the ack timeout, to deal with slow test machines. */ + RepTestUtils.setConfigParam(RepParams.REPLICA_ACK_TIMEOUT, "30 s", + repEnvInfo); + + /* Start all members of the group. */ + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assert(master != null); + + /* Do work */ + int startVal = 50000; + doWork(master, startVal); + + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + RepTestUtils.checkNodeEquality(commitVLSN, verbose , repEnvInfo); + + logger.fine("--> All nodes in sync"); + + /* + * Round robin through the group, letting each one have a turn + * as the master. + */ + for (int i = 0; i < nNodes; i++) { + /* + * Shut just under a quorum of the nodes. Let the remaining + * nodes vote, and then do some work. Then bring + * the rest of the group back in a staggered fashion. Check for + * consistency among the entire group. + */ + logger.fine("--> Shutting down, oldMaster=" + + master.getNodeName()); + int activeNodes = + shutdownAllButQuorum(logger, + repEnvInfo, + RepInternal.getNodeId(master)); + + master = RepTestUtils.openRepEnvsJoin(repEnvInfo); + + assertNotNull(master); + logger.fine("--> New master = " + master.getNodeName()); + + if (i == 0) { + startVal = 0; + } else if (i == 1) { + startVal = 70000; + } else if (i == 2) { + startVal = 30000; + } else { + assert(false); + } + + /* + * This test is very timing dependent, so + * InsufficientReplicasException is allowed. + */ + int retries = 5; + for (int retry = 0;; retry++) { + try{ + doWork(master, startVal); + break; + } catch (InsufficientReplicasException e) { + if (retry >= retries) { + throw e; + } + } + } + + /* Re-open the closed nodes and have them re-join the group. */ + logger.fine("--> Before closed nodes rejoin"); + ReplicatedEnvironment newMaster = + RepTestUtils.joinGroup(repEnvInfo); + + assertEquals("Round " + i + + " expected master to stay unchanged. ", + master.getNodeName(), + newMaster.getNodeName()); + VLSN vlsn = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + activeNodes); + RepTestUtils.checkNodeEquality(vlsn, verbose, repEnvInfo); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private int shutdownAllButQuorum(Logger logger, + RepEnvInfo[] replicators, + int currentMasterId) + throws DatabaseException, InterruptedException { + + /* + * Shut all but a quorum of the nodes. Make sure that the master + * is one of the shut down nodes. + */ + int nShutdown = replicators.length - + RepTestUtils.getQuorumSize(replicators.length); + + /* Start by shutting down the master. */ + int shutdownIdx = currentMasterId - 1; + int numSyncNodes = 0; + for (RepEnvInfo ri : replicators) { + if (ri.getEnv() != null) { + numSyncNodes ++; + } + } + + RepTestUtils.syncGroupToLastCommit(replicators, numSyncNodes); + while (nShutdown > 0) { + logger.fine("Closing node " + (shutdownIdx+1)); + replicators[shutdownIdx].closeEnv(); + nShutdown--; + shutdownIdx++; + if (shutdownIdx == replicators.length) { + shutdownIdx = 0; + } + + /* + * It is possible that, after shutting down the master, the new + * selected master is also shut immediately, which will cause + * InsufficientReplicasException. This check will largely avoid + * such unexpected case. + */ + if (replicators[shutdownIdx].isMaster()) { + shutdownIdx++; + if (shutdownIdx == replicators.length) { + shutdownIdx = 0; + } + } + } + return replicators.length - nShutdown; + } + + private void doWork(ReplicatedEnvironment master, int startVal) + throws DatabaseException { + + /* Now do some work. */ + Database testDb = openTestDb(master); + insertData(testDb, startVal, startVal + 5000); + modifyData(testDb, startVal + 2, startVal + 300); + insertData(testDb, startVal + 5001, startVal + 8000); + deleteData(testDb, startVal + 30, startVal + 4000); + insertData(testDb, startVal + 8001, startVal + 9000); + modifyData(testDb, startVal + 7500, startVal + 8500); + deleteData(testDb, startVal + 8500, startVal + 9000); + insertData(testDb, startVal + 8501, startVal + 9000); + + testDb.close(); + } + + /* + * Create a database on the master. + */ + private Database openTestDb(ReplicatedEnvironment master) + throws DatabaseException { + + Environment env = master; + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + config.setSortedDuplicates(true); + Database testDb = env.openDatabase(null, TEST_DB, config); + return testDb; + } + + private void insertData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry val = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[1024]); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, val); + assertEquals(OperationStatus.SUCCESS, + testDb.put(null, val /*key*/, data /*data*/)); + } + } + + private void modifyData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry newDataVal = new DatabaseEntry(); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i+1, newDataVal); + assertEquals(OperationStatus.SUCCESS, + testDb.put(null, key, newDataVal)); + } + } + + private void deleteData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry val = new DatabaseEntry(); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, val); + assertEquals(OperationStatus.SUCCESS, + testDb.delete(null, val /*key*/)); + } + } + +} diff --git a/test/com/sleepycat/je/rep/impl/RoundRobinTest.java b/test/com/sleepycat/je/rep/impl/RoundRobinTest.java new file mode 100644 index 0000000..bf4123b --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/RoundRobinTest.java @@ -0,0 +1,307 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.util.Arrays; +import java.util.List; +import java.util.logging.Logger; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Bring up a number of nodes with a fixed master and n clients. Perform + * basic operations, test for correctness. + */ +@RunWith(Parameterized.class) +public class RoundRobinTest extends TestBase { + + @Parameters + public static List genParams() { + + return Arrays.asList(new Object[][] {{3}, {5}, {6}}); + } + + private final boolean verbose = Boolean.getBoolean("VERBOSE"); + private static final String TEST_DB = "testdb"; + + /* Replication tests use multiple environments. */ + private final File envRoot; + private final int nNodes; + + public RoundRobinTest(int size) { + envRoot = SharedTestUtils.getTestDir(); + nNodes = size; + customName = nNodes + "nodes"; + } + + /** + * Create n nodes, startup. + * - do some work, verify that all nodes have the same data. + * - switch masters + * - do more work, verify that all nodes have the same data. + * - switch masters + * etc + */ + @Test + public void testRoundRobinMasters() + throws Exception { + + RepEnvInfo[] repEnvInfo = null; + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + try { + /* Create a replicator for each environment directory. */ + EnvironmentConfig envConfig = + RepTestUtils.createEnvConfig + (new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY)); + envConfig.setConfigParam + (EnvironmentConfig.LOG_FILE_MAX, + EnvironmentParams.LOG_FILE_MAX.getDefault()); + + // TODO: Is this needed now that hard recovery works? + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + envConfig.setConfigParam("je.env.runCleaner", "false"); + + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig); + + /* Increase the ack timeout, to deal with slow test machines. */ + RepTestUtils.setConfigParam(RepParams.REPLICA_ACK_TIMEOUT, "30 s", + repEnvInfo); + + /* Start all members of the group. */ + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assert(master != null); + + /* Do work */ + int startVal = 1; + doWork(master, startVal); + + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + RepTestUtils.checkNodeEquality(commitVLSN, verbose , repEnvInfo); + + logger.fine("--> All nodes in sync"); + + /* + * Round robin through the group, letting each one have a turn + * as the master. + */ + for (int i = 0; i < nNodes; i++) { + /* + * Shut just under a quorum of the nodes. Let the remaining + * nodes vote, and then do some work. Then bring + * the rest of the group back in a staggered fashion. Check for + * consistency among the entire group. + */ + logger.fine("--> Shutting down, oldMaster=" + + master.getNodeName()); + int activeNodes = + shutdownAllButQuorum(logger, + repEnvInfo, + RepInternal.getNodeId(master)); + + master = RepTestUtils.openRepEnvsJoin(repEnvInfo); + + assertNotNull(master); + logger.fine("--> New master = " + master.getNodeName()); + + startVal += 5; + + /* + * This test is very timing dependent, so + * InsufficientReplicasException is allowed. + */ + int retries = 5; + for (int retry = 0;; retry++) { + try{ + doWork(master, startVal); + break; + } catch (InsufficientReplicasException e) { + if (retry >= retries) { + throw e; + } + } + } + + /* Re-open the closed nodes and have them re-join the group. */ + logger.fine("--> Before closed nodes rejoin"); + ReplicatedEnvironment newMaster = + RepTestUtils.joinGroup(repEnvInfo); + + assertEquals("Round " + i + + " expected master to stay unchanged. ", + master.getNodeName(), + newMaster.getNodeName()); + VLSN vlsn = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + activeNodes); + RepTestUtils.checkNodeEquality(vlsn, verbose, repEnvInfo); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private int shutdownAllButQuorum(Logger logger, + RepEnvInfo[] replicators, + int currentMasterId) + throws DatabaseException, InterruptedException { + + /* + * Shut all but a quorum of the nodes. Make sure that the master + * is one of the shut down nodes. + */ + int nShutdown = replicators.length - + RepTestUtils.getQuorumSize(replicators.length); + + /* Start by shutting down the master. */ + int shutdownIdx = currentMasterId - 1; + int numSyncNodes = 0; + for (RepEnvInfo ri : replicators) { + if (ri.getEnv() != null) { + numSyncNodes ++; + } + } + + RepTestUtils.syncGroupToLastCommit(replicators, numSyncNodes); + while (nShutdown > 0) { + logger.fine("Closing node " + (shutdownIdx+1)); + replicators[shutdownIdx].closeEnv(); + nShutdown--; + shutdownIdx++; + if (shutdownIdx == replicators.length) { + shutdownIdx = 0; + } + + /* + * It is possible that, after shutting down the master, the new + * selected master is also shut immediately, which will cause + * InsufficientReplicasException. This check will largely avoid + * such unexpected case. + */ + if (replicators[shutdownIdx].isMaster()) { + shutdownIdx++; + if (shutdownIdx == replicators.length) { + shutdownIdx = 0; + } + } + } + return replicators.length - nShutdown; + } + + private void doWork(ReplicatedEnvironment master, int startVal) + throws DatabaseException { + + /* Now do some work. */ + Database testDb = openTestDb(master); + insertData(testDb, startVal, startVal + 5); + modifyData(testDb, startVal + 2, startVal + 3); + deleteData(testDb, startVal + 3, startVal + 4); + + testDb.close(); + } + + /* + * Create a database on the master. + */ + private Database openTestDb(ReplicatedEnvironment master) + throws DatabaseException { + + Environment env = master; + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + config.setSortedDuplicates(true); + Database testDb = env.openDatabase(null, TEST_DB, config); + return testDb; + } + + private void insertData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry val = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[1024]); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, val); + assertEquals(OperationStatus.SUCCESS, + testDb.put(null, val /*key*/, data /*data*/)); + } + } + + private void modifyData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry newDataVal = new DatabaseEntry(); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i+1, newDataVal); + assertEquals(OperationStatus.SUCCESS, + testDb.put(null, key, newDataVal)); + } + } + + private void deleteData(Database testDb, + int startVal, + int endVal) + throws DatabaseException { + + DatabaseEntry val = new DatabaseEntry(); + for (int i = startVal; i < endVal; i++) { + IntegerBinding.intToEntry(i, val); + assertEquals(OperationStatus.SUCCESS, + testDb.delete(null, val /*key*/)); + } + } + +} diff --git a/test/com/sleepycat/je/rep/impl/TextProtocolTestBase.java b/test/com/sleepycat/je/rep/impl/TextProtocolTestBase.java new file mode 100644 index 0000000..c493c54 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/TextProtocolTestBase.java @@ -0,0 +1,109 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.util.test.TestBase; + +/** + * The superclass for all tests of protocols that inherit from TextProtocol. + * + * All subclasses need to create the messages belongs to each sub-protocol and + * return an instance of sub-protocol. + */ +public abstract class TextProtocolTestBase extends TestBase { + + private TextProtocol protocol; + protected static final String GROUP_NAME = "TestGroup"; + protected static final String NODE_NAME = "Node 1"; + + /** + * Verify that all Protocol messages are idempotent under the + * serialization/de-serialization sequence. + * @throws InvalidMessageException + */ + @Test + public void testAllMessages() + throws InvalidMessageException { + + Message[] messages = createMessages(); + + protocol = getProtocol(); + + /* Ensure that we are testing all of them */ + assertEquals(messages.length, protocol.messageCount()); + /* Now test them. */ + for (Message m : messages) { + check(m); + if (!getClass().equals(RepGroupProtocolTest.class) && + !getClass().equals(NodeStateProtocolTest.class)) { + checkMismatch(m); + } + } + } + + /* Create messages for test. */ + protected abstract Message[] createMessages(); + + /* Return the concrete protocol. */ + protected abstract TextProtocol getProtocol(); + + private void check(Message m1) + throws InvalidMessageException { + + String wireFormat = m1.wireFormat(); + Message m2 = protocol.parse(wireFormat); + assertEquals(m1, m2); + } + + /* Replaces a specific token vale with the one supplied. */ + private String hackToken(String wireFormat, + TextProtocol.TOKENS tokenType, + String hackValue) { + String[] tokens = wireFormat.split(TextProtocol.SEPARATOR_REGEXP); + tokens[tokenType.ordinal()] = hackValue; + String line = ""; + for (String token : tokens) { + line += (token + TextProtocol.SEPARATOR); + } + + return line.substring(0, line.length()-1); + } + + /* Tests consistency checks on message headers. */ + private void checkMismatch(Message m1){ + String[] wireFormats = new String[] { + hackToken(m1.wireFormat(), TextProtocol.TOKENS.VERSION_TOKEN, + "9999999"), + hackToken(m1.wireFormat(), TextProtocol.TOKENS.NAME_TOKEN, + "BADGROUPNAME"), + hackToken(m1.wireFormat(), TextProtocol.TOKENS.ID_TOKEN, + "0") }; + + for (String wireFormat : wireFormats) { + try { + protocol.parse(wireFormat); + fail("Expected Illegal Arg Exception"); + } catch (InvalidMessageException e) { + assertTrue(true); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/InterruptedNetworkRestoreTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/InterruptedNetworkRestoreTest.java new file mode 100644 index 0000000..4f2ad46 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/InterruptedNetworkRestoreTest.java @@ -0,0 +1,262 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.logging.Logger; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test an interrupted network restore and the RestoreRequired mechanism. + */ +public class InterruptedNetworkRestoreTest extends RepTestBase { + + private Logger logger; + + @Override + @Before + public void setUp() + throws Exception { + + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "FailoverTest"); + groupSize = 2; + super.setUp(); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + } + + /** + * 1) Start with a two node group: rg1-rn1(M) and rg1-rn2(R) + */ + @Test + public void testBasic() + throws InterruptedException { + + /* Need a disk limit to advance VLSN range head. */ + for (int i = 0; i < 2; i++) { + repEnvInfo[i].getEnvConfig().setConfigParam( + EnvironmentConfig.MAX_DISK, String.valueOf(50 * 10000)); + } + createGroup(2); + final RepEnvInfo mInfo1 = repEnvInfo[0]; + final RepEnvInfo mInfo2 = repEnvInfo[1]; + + shiftVLSNRight(mInfo1.getEnv()); + + /* + * Shut down node2 and delete its files, so it will open with an + * InsufficientLogException. + */ + mInfo2.closeEnv(); + TestUtils.removeLogFiles("Setting up for network restore", + mInfo2.getEnvHome(), false); + logger.info("Removed files from " + mInfo2.getEnvHome()); + InsufficientLogException expectedILE = openEnvExpectILE(mInfo2); + + /* + * Start a network restore on node 2, but intentionally kill + * the network restore before the first transfer of a file + * equal to or greater than 4. + */ + NetworkRestore nr = new NetworkRestore(); + nr.setInterruptHook(new StopBackup(4, repEnvInfo[0].getRepImpl())); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + try { + nr.execute(expectedILE, config); + fail("should throw StopBackupException"); + } catch (StopBackupException expected) { + } + + /* At this point, a marker file should exist. */ + assertTrue(markerFileExists(mInfo2.getEnvHome())); + + /* + * Try to start up node2 multiple times. It should not be able to + * recover, and should continue to throw the ILE until a network + * restore is completed. + */ + InsufficientLogException useException = null; + for (int i = 0; i < 3; i++) { + useException = openEnvExpectILE(mInfo2); + assertTrue(markerFileExists(mInfo2.getEnvHome())); + } + + /* + * Reestablish a fresh network restore, still with the test hook, using + * the new ILE generated from recovery. This will exercise the + * path to create a new repImpl from the persisted ILE properties in + * the RestoreRequired log entry. + */ + nr = new NetworkRestore(); + nr.setInterruptHook(new StopBackup(2, repEnvInfo[0].getRepImpl())); + try { + nr.execute(useException, config); + fail("should throw StopBackupException"); + } catch (StopBackupException expected) { + } + assertTrue(markerFileExists(mInfo2.getEnvHome())); + + /* + * Reestablish another fresh network restore, with no test hook, using + * the new ILE generated from recovery. This will exercise the + * path to create a new repImpl from the persisted ILE properties in + * the RestoreRequired log entry. + */ + useException = openEnvExpectILE(mInfo2); + nr = new NetworkRestore(); + nr.execute(useException, config); + + /* At this point, a marker file should not exist */ + assertFalse(markerFileExists(mInfo2.getEnvHome())); + + mInfo2.openEnv(); + VLSN commitVLSN = RepTestUtils.syncGroup(repEnvInfo); + RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); + } + + private boolean markerFileExists(File envHome) { + + String [] jdbFiles = + FileManager.listFiles(envHome, + new String[]{FileManager.JE_SUFFIX}, + false); + return jdbFiles[jdbFiles.length-1].equals + (RestoreMarker.getMarkerFileName()); + } + + private InsufficientLogException openEnvExpectILE(RepEnvInfo rinfo) { + try { + rinfo.openEnv(); + fail("Expected ILE"); + } catch (InsufficientLogException ile) { + return ile; + } + throw new IllegalStateException + ("Should have seen an InsufficientLogException"); + } + + /* + * Provoke sufficient log cleaning to move the entire VLSN right + * sufficiently that the new VLSN range no longer overlaps the VLSN range + * upon entry thus guaranteeing a InsufficientLogFileException. + */ + private void shiftVLSNRight(ReplicatedEnvironment menv) { + /* Shift the vlsn range window. */ + + RepImpl menvImpl = repEnvInfo[0].getRepImpl(); + final VLSNIndex vlsnIndex = menvImpl.getRepNode().getVLSNIndex(); + VLSN masterHigh = vlsnIndex.getRange().getLast(); + + CheckpointConfig checkpointConfig = new CheckpointConfig(); + checkpointConfig.setForce(true); + + do { + + /* + * Populate just the master, leaving the replica behind Re-populate + * with the same keys to create Cleaner fodder. + */ + populateDB(menv, TEST_DB_NAME, 1000); + + /* + * Sleep to permit the cbvlsn on the master to be updated. It's + * done with the period: FeederManager.MASTER_CHANGE_CHECK_TIMEOUT + */ + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + fail("unexpected interrupt"); + } + menv.cleanLog(); + menv.checkpoint(checkpointConfig); + } while (masterHigh.compareTo(vlsnIndex.getRange().getFirst()) > 0); + } + + private class StopBackupException extends RuntimeException { + StopBackupException(String msg) { + super(msg); + } + } + + private class StopBackup implements TestHook { + + private final long interruptPoint; + private final RepImpl repImpl; + + StopBackup(long interruptPoint, RepImpl repImpl) { + this.interruptPoint = interruptPoint; + this.repImpl = repImpl; + } + + @Override + public void hookSetup() { + } + + @Override + public void doIOHook() throws IOException { + } + + @Override + public void doHook(File f) { + long fileNum = repImpl.getFileManager().getNumFromName(f.getName()); + if (fileNum >= interruptPoint) { + throw new StopBackupException( + "Testhook: throwing exception because we're at file " + + interruptPoint); + } + } + + @Override + public File getHookValue() { + return null; + } + + @Override + public void doHook() { + } + } + +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupTest.java new file mode 100644 index 0000000..c487d27 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkBackupTest.java @@ -0,0 +1,693 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.List; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class NetworkBackupTest extends TestBase { + + /* The port being handled by the dispatcher. */ + private static final int TEST_PORT = 5000; + + /* The source environment */ + private File envHome; + private EnvironmentConfig envConfig; + private Environment env; + private Database db; + + /* + * The destination environment, and filemanager and logmanager for that + * destination. + */ + File backupDir; + private Environment backupEnv; + private FileManager backupFileManager; + private LogManager backupLogManager; + + private final InetSocketAddress serverAddress = + new InetSocketAddress("localhost", TEST_PORT); + + private DataChannelFactory channelFactory; + private ServiceDispatcher dispatcher; + private FeederManager fm; + + protected DatabaseConfig dbconfig; + protected final DatabaseEntry key = new DatabaseEntry(new byte[] { 1 }); + protected final DatabaseEntry data = new DatabaseEntry(new byte[] { 100 }); + protected static final String TEST_DB_NAME = "TestDB"; + + protected static final VerifyConfig vconfig = new VerifyConfig(); + + private static final int DB_ENTRIES = 100; + + static { + vconfig.setAggressive(false); + vconfig.setPropagateExceptions(true); + } + + /* True if the Feeder enables multiple sub directories. */ + private boolean envMultiDirs; + + /* + * True if the nodes need to copy log files enables multiple sub + * directories. + */ + private boolean backupMultiDirs; + private final int DATA_DIRS = 3; + + /* + * Experiences four cases: + * 1. Feeder doesn't enable sub directories, nor replicas. + * 2. Feeder doesn't enable sub directories, but replicas do. + * 3. Feeder enables sub directories, but replicas don't. + * 4. Feeder enables sub directories, so do replicas. + */ + @Parameters + public static List genParams() { + + return Arrays.asList(new Object[][] {{false, false}, {false, true}, + {true, false}, {true, true}}); + } + + public NetworkBackupTest(boolean envMultiDirs, boolean backupMultiDirs) { + this.envMultiDirs = envMultiDirs; + this.backupMultiDirs = backupMultiDirs; + customName = (envMultiDirs ? ":env-multi-sub-dirs" : "") + + (backupMultiDirs ? ":backup-multi-sub-dirs" : ""); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envHome = SharedTestUtils.getTestDir(); + envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + + /* If multiple sub directories property is enabled. */ + if (envMultiDirs) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + createSubDir(envHome, false); + } + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + env = new Environment(envHome, envConfig); + + dbconfig = new DatabaseConfig(); + dbconfig.setAllowCreate(true); + dbconfig.setTransactional(true); + dbconfig.setSortedDuplicates(false); + db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + + for (int i = 0; i < DB_ENTRIES; i++) { + IntegerBinding.intToEntry(i, key); + LongBinding.longToEntry(i, data); + db.put(null, key, data); + } + /* Create cleaner fodder. */ + for (int i = 0; i < (DB_ENTRIES / 2); i++) { + IntegerBinding.intToEntry(i, key); + LongBinding.longToEntry(i, data); + db.put(null, key, data); + } + env.cleanLog(); + env.verify(vconfig, System.err); + + /* Create the backup environment. */ + backupDir = new File(envHome.getCanonicalPath() + ".backup"); + /* Clear the log files in the backup directory. */ + cleanEnvHome(backupDir, true); + /* Create the Environment home for replicas. */ + if (backupMultiDirs) { + envConfig.setConfigParam(EnvironmentConfig.LOG_N_DATA_DIRECTORIES, + DATA_DIRS + ""); + createSubDir(backupDir, true); + } else { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, "0"); + backupDir.mkdir(); + } + assertTrue(backupDir.exists()); + + backupEnv = new Environment(backupDir, envConfig); + backupFileManager = + DbInternal.getNonNullEnvImpl(backupEnv).getFileManager(); + backupLogManager = + DbInternal.getNonNullEnvImpl(backupEnv).getLogManager(); + + final ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.create(RepTestUtils.readNetProps()); + channelFactory = DataChannelFactoryBuilder.construct(repNetConfig); + + dispatcher = new ServiceDispatcher(serverAddress, channelFactory); + dispatcher.start(); + fm = new FeederManager(dispatcher, + DbInternal.getNonNullEnvImpl(env), + new NameIdPair("n1", (short) 1)); + fm.start(); + } + + private void createSubDir(File home, boolean isBackupDir) + throws Exception { + + if (isBackupDir) { + if (!home.exists()) { + home.mkdir(); + } + } + + if ((envMultiDirs && !isBackupDir) || + (backupMultiDirs && isBackupDir)) { + for (int i = 1; i <= DATA_DIRS; i++) { + File subDir = new File(home, TestUtils.getSubDirName(i)); + assertTrue(!subDir.exists()); + assertTrue(subDir.mkdir()); + } + } + } + + @After + public void tearDown() + throws Exception { + + try { + db.close(); + env.close(); + fm.shutdown(); + dispatcher.shutdown(); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + private void cleanEnvHome(File home, boolean isBackupDir) + throws Exception { + + if (home == null) { + return; + } + + File[] files = home.listFiles(); + if (files == null || files.length == 0) { + return; + } + + /* Delete the sub directories if any. */ + for (File file : files) { + if (file.isDirectory() && file.getName().startsWith("data")) { + File[] subFiles = file.listFiles(); + for (File subFile : subFiles) { + assertTrue(subFile.delete()); + } + assertTrue(file.delete()); + } + + if (isBackupDir && file.isFile()) { + assertTrue(file.delete()); + } + } + + TestUtils.removeLogFiles("tearDown", home, false); + + if (isBackupDir) { + assertTrue(home.delete()); + } + } + + @Test + public void testBackupFiles() + throws Exception { + + /* The client side */ + NetworkBackup backup1 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + false, + backupFileManager, + backupLogManager, + channelFactory); + String files1[] = backup1.execute(); + NetworkBackupStats stats1 = backup1.getStats(); + assertEquals(0, stats1.getSkipCount()); + + verify(envHome, backupDir, files1); + + /* + * Check byte transfer stats. Not testing getTransferRate -- transfer + * is too quick to get any results for that stat. + */ + File[] jdbFiles1 = backupFileManager.listJDBFiles(); + long jdbFilesBytes1 = 0; + for (File f : jdbFiles1) { + jdbFilesBytes1 += f.length(); + } + assertEquals(jdbFilesBytes1, stats1.getExpectedBytes()); + assertEquals(jdbFilesBytes1, stats1.getTransferredBytes()); + + /* Corrupt the currently backed up log files. */ + for (File f : jdbFiles1) { + FileOutputStream os = new FileOutputStream(f); + os.write(1); + os.close(); + } + + int count = backupFileManager.listJDBFiles().length; + NetworkBackup backup2 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + false, + backupFileManager, + backupLogManager, + channelFactory); + String files2[] = backup2.execute(); + verify(envHome, backupDir, files2); + assertEquals(count, backup2.getStats().getDisposedCount()); + + verifyAsEnv(backupDir); + + /* + * Close the database to avoid problems later when we corrupt the files + * on the server + */ + db.close(); + + /* Corrupt files on the server, and make sure no files are copied */ + for (final File f : + DbInternal.getNonNullEnvImpl(env).getFileManager(). + listJDBFiles()) { + final FileOutputStream os = new FileOutputStream(f); + os.write(1); + os.close(); + } + final NetworkBackup backup3 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + true, + backupFileManager, + backupLogManager, + channelFactory); + try { + backup3.execute(); + fail("Expected IOException"); + } catch (IOException e) { + } + + assertEquals("No files should have been fetched", + 0, + backup3.getStats().getFetchCount()); + + /* The environment is corrupted -- invalidate it */ + new EnvironmentFailureException( + DbInternal.getNonNullEnvImpl(env), + EnvironmentFailureReason.TEST_INVALIDATE); + } + + /** + * Performs a backup while the database is growing actively + * + * @throws InterruptedException + * @throws IOException + * @throws DatabaseException + */ + @Test + public void testConcurrentBackup() + throws InterruptedException, IOException, DatabaseException { + + LogFileGeneratingThread lfThread = new LogFileGeneratingThread(); + BackupThread backupThread = new BackupThread(); + lfThread.start(); + + backupThread.start(); + backupThread.join(60*1000); + lfThread.quit = true; + lfThread.join(60*1000); + + DbBackup dbBackup = new DbBackup(env); + dbBackup.startBackup(); + int newCount = dbBackup.getLogFilesInBackupSet().length; + + assertNull(backupThread.error); + assertNull(lfThread.error); + + /* + * Verify that the count did increase while the backup was in progress. + */ + assertTrue(newCount > backupThread.files.length); + /* Verify that the backup was correct. */ + verify(envHome, backupDir, backupThread.files); + + verifyAsEnv(backupDir); + dbBackup.endBackup(); + } + + class BackupThread extends Thread { + Exception error = null; + String files[] = null; + + BackupThread() { + setDaemon(true); + } + + @Override + public void run() { + try { + NetworkBackup backup1 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + true, + backupFileManager, + backupLogManager, + channelFactory); + files = backup1.execute(); + } catch (Exception e) { + error = e; + error.printStackTrace(); + } + } + } + + class LogFileGeneratingThread extends Thread { + Exception error = null; + volatile boolean quit = false; + + LogFileGeneratingThread() { + setDaemon(true); + } + + @Override + public void run() { + try { + for (int i = 0; i < 100000; i++) { + IntegerBinding.intToEntry(i, key); + LongBinding.longToEntry(i, data); + db.put(null, key, data); + if (quit) { + return; + } + } + } catch (Exception e) { + error = e; + error.printStackTrace(); + } + fail("Backup did not finish in time"); + } + } + + @Test + public void testBasicWithRetainLog() + throws Exception { + + doBasicTest(true); + } + + @Test + public void testBasicWithoutRetainLog() + throws Exception { + + doBasicTest(false); + } + + private void doBasicTest(boolean retainLog) + throws Exception { + + /* The client side */ + NetworkBackup backup1 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + retainLog, + backupFileManager, + backupLogManager, + channelFactory); + backup1.execute(); + assertEquals(0, backup1.getStats().getSkipCount()); + + /* + * repeat, should find mostly cached files. Invoking backup causes + * a checkpoint to be written to the log. + */ + NetworkBackup backup2 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + retainLog, + backupFileManager, + backupLogManager, + channelFactory); + String files2[] = backup2.execute(); + verify(envHome, backupDir, files2); + + assertTrue((backup1.getStats().getFetchCount() - + backup2.getStats().getSkipCount()) <= 1); + + verifyAsEnv(backupDir); + } + + @Test + public void testLeaseBasic() + throws Exception { + + int errorFileNum = 2; + NetworkBackup backup1 = + new TestNetworkBackup(serverAddress, + backupEnv, + (short) 1, + true, + errorFileNum); + try { + backup1.execute(); + fail("Exception expected"); + } catch (IOException e) { + /* Expected. */ + } + /* Wait for server to detect a broken connection. */ + Thread.sleep(500); + /* Verify that the lease was created. */ + assertEquals(1, fm.getLeaseCount()); + NetworkBackup backup2 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + true, + backupFileManager, + backupLogManager, + channelFactory); + /* Verify that the lease was renewed. */ + String[] files2 = backup2.execute(); + assertEquals(2, backup2.getStats().getSkipCount()); + assertEquals(1, fm.getLeaseRenewalCount()); + + /* Verify that the copy resumed correctly. */ + verify(envHome, backupDir, files2); + + verifyAsEnv(backupDir); + } + + @Test + public void testLeaseExpiration() + throws Exception { + + int errorFileNum = 2; + + /* + * Verify that leases are created and expire as expected. + */ + NetworkBackup backup1 = new TestNetworkBackup(serverAddress, + backupEnv, + (short) 1, + true, + errorFileNum); + /* Shorten the lease duration for test purposes. */ + long leaseDuration = 1*1000; + try { + fm.setLeaseDuration(leaseDuration); + backup1.execute(); + fail("Exception expected"); + } catch (IOException e) { + /* Expected. */ + } + /* Wait for server to detect broken connection. */ + Thread.sleep(500); + /* Verify that the lease was created. */ + assertEquals(1, fm.getLeaseCount()); + Thread.sleep(leaseDuration); + /* Verify that the lease has expired after its duration. */ + assertEquals(0, fm.getLeaseCount()); + + /* Resume after lease expiration. */ + NetworkBackup backup2 = + new NetworkBackup(serverAddress, + backupEnv.getHome(), + new NameIdPair("n1", (short) 1), + true, + backupFileManager, + backupLogManager, + channelFactory); + /* Verify that the lease was renewed. */ + String[] files2 = backup2.execute(); + /* Verify that the copy resumed correctly. */ + verify(envHome, backupDir, files2); + + verifyAsEnv(backupDir); + } + + private void verify(File envDir, + File envBackupDir, + String backupEnvFiles[]) + throws IOException { + + for (String backupFile : backupEnvFiles) { + File envFile = null; + + /* + * The file names returned by NetworkBackup only apply on the + * replicas (if they have sub directories enabled while Feeder + * doesn't), so need to calculate the real path on the Feeder + * according the file name. + */ + if (envMultiDirs) { + if (backupMultiDirs) { + envFile = new File(envDir, backupFile); + } else { + envFile = new File(DbInternal.getNonNullEnvImpl(env). + getFileManager(). + getFullFileName(backupFile)); + } + } else { + if (backupMultiDirs) { + int start = backupFile.indexOf(File.separator); + envFile = new File + (envDir, + backupFile.substring(start, backupFile.length())); + } else { + envFile = new File(envDir, backupFile); + } + } + FileInputStream envStream = new FileInputStream(envFile); + FileInputStream envBackupStream = + new FileInputStream(new File(envBackupDir, backupFile)); + int ib1, ib2; + do { + ib1 = envStream.read(); + ib2 = envBackupStream.read(); + } while ((ib1 == ib2) && (ib1 != -1)); + assertEquals(ib1, ib2); + envStream.close(); + envBackupStream.close(); + } + } + + void verifyAsEnv(File dir) + throws EnvironmentLockedException, DatabaseException { + + /* Close the backupEnv abnormally. */ + DbInternal.getNonNullEnvImpl(backupEnv).abnormalClose(); + + Environment benv = new Environment(dir, envConfig); + /* Note that verify modifies log files. */ + benv.verify(vconfig, System.err); + benv.close(); + } + + /** + * Class to provoke a client failure when requesting a specific file. + */ + private class TestNetworkBackup extends NetworkBackup { + int errorFileNum = 0; + + public TestNetworkBackup(InetSocketAddress serverSocket, + Environment backupEnv, + short clientId, + boolean retainLogfiles, + int errorFileNum) + throws DatabaseException { + + super(serverSocket, + backupEnv.getHome(), + new NameIdPair("node"+clientId, clientId), + retainLogfiles, + DbInternal.getNonNullEnvImpl(backupEnv).getFileManager(), + DbInternal.getNonNullEnvImpl(backupEnv).getLogManager(), + channelFactory); + this.errorFileNum = errorFileNum; + } + + @Override + protected void getFile(File file) + throws IOException, ProtocolException, DigestException, + RestoreMarker.FileCreationException { + if (errorFileNum-- == 0) { + throw new IOException("test exception"); + } + super.getFile(file); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreNoMasterTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreNoMasterTest.java new file mode 100644 index 0000000..7ea91b6 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreNoMasterTest.java @@ -0,0 +1,236 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.impl.networkRestore; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForListener; + +/** + * Verifies that a "minority" group (without a master) can be used to bootstrap + * a fully functioning group by using network restore operations to establish a + * quorum and select a new master. + */ +public class NetworkRestoreNoMasterTest extends RepTestBase { + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + } + + /** + * Tests a special case of failure recovery with a newly added node as + * below: + * + * 1) Start with a two node group: rg1-rn1(M) and rg1-rn2(R) + * 2) Add rg1-rn3, which joins as a replica. + * 3) rg1-rn3 starts syncing up but before it can read the updates that + * establish it in the group membership table the master rg1-rn1 goes down + * hard. This results in rg1-rn3 transitioning to the UNKNOWN state, + * where because it does not have a "node id", tries to "find" a master + * by querying the nodes it knows about. rg1-rn2 does not respond because + * it's no longer in contact with a master and is trying to initiate an + * election. + * 4) rg1-rn3 should network restore from rg1-rn2, so that it can help + * establish a quorum, hold an election, and make the shard available for + * writes again. + * + * This is sr22851, where the rep node incorrectly retains its cached + * knowledge of a previous master. + */ + @Test + public void testSyncupFailure() + throws InterruptedException { + + createGroup(2); + final RepEnvInfo mInfo = repEnvInfo[0]; + assertTrue(mInfo.getEnv().getState().isMaster()); + + /* + * Populate enough of the env so that it can't all be buffered in the + * network buffers, to ensure that rg1-rn3 does not have the group + * database with changes that insert rg1-rn3 into it when rg1-rn1 + * subsequently goes down. + */ + populateDB(mInfo.getEnv(), 10000); + + /* latches to coordinate killing of rg1-rn1 */ + final CountDownLatch syncupEnded = new CountDownLatch(1); + final CountDownLatch masterDown = new CountDownLatch(1); + + TestHook syncupEndHook = new TestHook() { + + @Override + public void doHook() + throws InterruptedException { + syncupEnded.countDown(); + masterDown.await(); + } + }; + + /* Ensure that rg1-rn3 has a current set of helper hosts */ + updateHelperHostConfig(); + + ReplicaFeederSyncup.setGlobalSyncupEndHook(syncupEndHook); + + final AtomicReference openFailed = + new AtomicReference(); + + /* + * Open the rg1-rn3 env and expect an ILE due to the interrupted syncup + * and recover by doing a network restore. + */ + Thread asyncOpen = new Thread() { + @Override + public void run() { + try { + /* + * Slow down the feeder, so that the rep group db changes + * for rg1-rn3 are delayed + */ + mInfo.getRepImpl().getRepNode().feederManager(). + setTestDelayMs(1000); + openEnvExpectILE(repEnvInfo[2]); + } catch (Throwable e) { + openFailed.set("open failed:" + e.getMessage()); + } + } + }; + + asyncOpen.start(); + syncupEnded.await(); + + /* Shutdown the master. */ + mInfo.closeEnv(); + + /* + * Release the replica feeder syncup resulting in the replica + * discovering the closed connection and transitioning to the + * UNKNOWN state and trying to find/elect a new master. + */ + masterDown.countDown(); + + /* Wait for the open, network restore, open sequence to finish. */ + asyncOpen.join(); + + /* check for failures in the open thread. */ + assertNull(openFailed.get()); + + /* + * With two nodes up, we should have a functioning group. + */ + final ReplicatedEnvironment r2Env = repEnvInfo[2].getEnv(); + assertNotNull(r2Env); + assertTrue(r2Env.getState().isActive()); + } + + + /** + * This testcase implements the following scenario: + * + * 1) Create a 3 node group + * 2) Stop 2 of the nodes, leaving the group without a master. + * 3) Delete the log files of the above two nodes. + * 4) Restart one of the nodes. It finds there is no master and does + * a network restore. It subsequently proceeds to participate in an + * election and choose a master. + * 5) Restart the second node; it queries for and finds the master and + * syncs up with it. + * + * This is sr 22815 + */ + @Test + public void testMissingEnv() + throws InterruptedException { + + createGroup(); + final WaitForListener rn3Unknown = new WaitForListener.Unknown(); + repEnvInfo[2].getEnv().setStateChangeListener(rn3Unknown); + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + + /* Close rn1 and rn2, leaving just rn3. */ + repEnvInfo[1].closeEnv(); + repEnvInfo[0].closeEnv(); + + assertTrue(rn3Unknown.await()); + + /* Remove env directories for rn1 and rn2. */ + RepTestUtils.removeRepEnv(repEnvInfo[0].getEnvHome()); + RepTestUtils.removeRepEnv(repEnvInfo[1].getEnvHome()); + + final WaitForListener rn3Active = new WaitForListener.Active(); + repEnvInfo[2].getEnv().setStateChangeListener(rn3Active); + + /* Setup helper hosts, so nodes can find each other. */ + /* Ensure that rg1-rn3 has a current set of helper hosts */ + updateHelperHostConfig(); + + openEnvExpectILE(repEnvInfo[0]); + + /* + * With two nodes up, we should have a functioning group. + */ + assertTrue(rn3Active.await()); + + /* + * Now that there is a master, rn2 should just be able to locate the + * master and sync up + */ + assertTrue(repEnvInfo[1].openEnv().getState().isActive()); + } + + private void openEnvExpectILE(RepEnvInfo rinfo) { + try { + rinfo.openEnv(); + fail("Expected ILE"); + } catch (InsufficientLogException ile) { + NetworkRestore nr = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + nr.execute(ile, config); + rinfo.openEnv(); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreTest.java new file mode 100644 index 0000000..1b0ab1f --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/NetworkRestoreTest.java @@ -0,0 +1,475 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static com.sleepycat.util.test.GreaterThan.greaterThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.CyclicBarrier; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.ExceptionEvent; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.RestoreMarker; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.networkRestore.NetworkBackup.RejectedServerException; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.utilint.DaemonThread; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.Test; + +public class NetworkRestoreTest extends RepTestBase { + + /* + * Repeats of network restore operations, as the master advances the log + * with modifications during each iteration. + */ + private static int RESTORE_CYCLES = 5; + + @Test + public void testLogProviders() + throws Exception { + + configureForMaxCleaning(5); + final RepEnvInfo minfo = repEnvInfo[0]; + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo sInfo = repEnvInfo[repEnvInfo.length - 1]; + sInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + + createGroup(); + populateDB(minfo.getEnv(), TEST_DB_NAME, 100); + + /* The node that will be use for network restores. */ + RepEnvInfo nrInfo = repEnvInfo[1]; + + /* restore from master. */ + doAndCheckRestore(nrInfo, minfo); + /* Check restore from specific Replica. */ + doAndCheckRestore(nrInfo, repEnvInfo[2]); + /* restore from self should fail. */ + try { + doAndCheckRestore(nrInfo, repEnvInfo[1]); + fail("exception expected"); + } catch (EnvironmentFailureException e) { + // Expected. Cannot restore from just yourself. + } + + /* Restore secondary */ + doAndCheckRestore(sInfo, minfo); + + /* Restore from secondary */ + doAndCheckRestore(nrInfo, sInfo); + } + + @Test + public void testConfigError() { + configureForMaxCleaning(5); + final RepEnvInfo minfo = repEnvInfo[0]; + + createGroup(); + populateDB(minfo.getEnv(), TEST_DB_NAME, 100); + RepEnvInfo nrInfo = repEnvInfo[1]; + nrInfo.closeEnv(); + shiftVLSNRight(repEnvInfo[0].getEnv()); + try { + setExceptionListener(nrInfo); + nrInfo.openEnv(); + fail("exception expected"); + } catch (InsufficientLogException e) { + NetworkRestore networkRestore = new NetworkRestore(); + + final ReplicationConfig repConfig = repEnvInfo[0].getRepConfig(); + // bad node name + repConfig.setNodeName("badname"); + ReplicationNode restoreNode = new RepNodeImpl(repConfig); + final NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setLogProviders(Arrays.asList(restoreNode)); + try { + networkRestore.execute(e, config); + fail("exception expected"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + } + + /* + * Creates conditions for a network restore at nrInfo and then restores the + * node from a specific member. + */ + private void doAndCheckRestore(RepEnvInfo nrInfo, + RepEnvInfo restoreFromInfo) { + nrInfo.closeEnv(); + shiftVLSNRight(repEnvInfo[0].getEnv()); + try { + nrInfo.openEnv(); + fail("exception expected"); + } catch (InsufficientLogException e) { + NetworkRestore networkRestore = new NetworkRestore(); + ReplicationNode restoreNode = + new RepNodeImpl(restoreFromInfo.getRepConfig()); + final NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setLogProviders(Arrays.asList(restoreNode)); + networkRestore.execute(e, config); + assertEquals(restoreNode, networkRestore.getLogProvider()); + final NetworkBackupStats stats = + networkRestore.getNetworkBackupStats(); + assertThat(stats.getExpectedBytes(), greaterThan(0)); + assertThat(stats.getTransferredBytes(), greaterThan(0)); + nrInfo.openEnv(); + } + } + + private class NetworkTestExceptionListener implements ExceptionListener { + public void exceptionThrown(ExceptionEvent event) { + if (event.getException() instanceof InsufficientLogException) { + return; + } + + System.err.println("NetworkRestoreTest caught: " + + event.getException() + + "\n in thread: " + + event.getThreadName()); + } + } + + private void setExceptionListener(final RepEnvInfo info) { + EnvironmentConfig infoConfig = info.getEnvConfig(); + infoConfig.setExceptionListener(new NetworkTestExceptionListener()); + info.setEnvConfig(infoConfig); + } + + /** + * This is really multiple tests in one. It tests network restore with a + * replica in each of the following three states: + * + * 1) A brand new node joining the group and needing a network restore. + * + * 2) An existing node with its own unique log needing a network restore. + * + * 3) Repeated network restores, reflecting a mature node. + */ + @Test + public void testBasic() + throws DatabaseException, Exception { + + /* + * The cleaner thread can see InsufficientLogExceptions so just stifle + * those exceptions from stderr. + */ + DaemonThread.stifleExceptionChatter = true; + + configureForMaxCleaning(2); + + final RepEnvInfo info1 = repEnvInfo[0]; + RepEnvInfo info2 = repEnvInfo[1]; + + ReplicatedEnvironment masterRep = info1.openEnv(); + + /* + * Have just the master join first. We do this to test the special case + * of a brand new node joining a group and needing VLSN 1. The same + * node then rejoins with its VLSN > 1 to test subsequent rejoins + * where the node has already participated in the replication. + */ + populateDB(masterRep, TEST_DB_NAME, 100); + + File cenvDir = info2.getEnvHome(); + final int cid = 2; + + for (int i = 0; i < RESTORE_CYCLES; i++) { + + leaveGroupAllButMaster(); + shiftVLSNRight(masterRep); + RepNodeImpl memberPrev = + info1.getRepNode().getGroup().getMember + (info2.getRepConfig().getNodeName()); + /* Node1 is not known on the first iteration. */ + final VLSN prevSync = (i == 0) ? null : + memberPrev.getBarrierState().getLastCBVLSN(); + try { + /* Should force a network restore. */ + setExceptionListener(info2); + info2.openEnv(); + fail("exception expected"); + } catch (InsufficientLogException e) { + logger.info("Got " + e); + RepNodeImpl member = info1.getRepNode().getGroup().getMember + (info2.getRepConfig().getNodeName()); + + /* + * The sync state should have been advanced to help contribute + * to the global CBVLSN and prevent it from advancing. + */ + final VLSN currSync = member.getBarrierState().getLastCBVLSN(); + assertTrue((i == 0) || currSync.compareTo(prevSync) >= 0); + + NetworkRestore networkRestore = new NetworkRestore(); + networkRestore.execute(e, new NetworkRestoreConfig()); + final NetworkBackupStats stats = + networkRestore.getNetworkBackupStats(); + assertThat(stats.getExpectedBytes(), greaterThan(0)); + assertThat(stats.getTransferredBytes(), greaterThan(0)); + /* Create a replacement replicator. */ + info2 = RepTestUtils.setupEnvInfo + (cenvDir, + RepTestUtils.DEFAULT_DURABILITY, + cid, + info1); + setExceptionListener(info2); + info2.openEnv(); + } + /* Verify that we can continue with the "restored" log files. */ + populateDB(masterRep, TEST_DB_NAME, 100, 100); + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 2); + RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); + info2.closeEnv(); + } + } + + private void configureForMaxCleaning(int size) { + for (int i = 0; i < size; i++) { + /* Make it easy to create cleaner fodder. */ + repEnvInfo[i].getEnvConfig().setConfigParam( + EnvironmentParams.LOG_FILE_MAX.getName(), "10000"); + repEnvInfo[i].getEnvConfig().setConfigParam( + EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "90"); + repEnvInfo[i].getEnvConfig().setConfigParam( + EnvironmentParams.CLEANER_MIN_FILE_UTILIZATION.getName(), + "90"); + /* Need a disk limit to cause VLSN index truncation. */ + repEnvInfo[i].getEnvConfig().setConfigParam( + EnvironmentConfig.MAX_DISK, String.valueOf(50 * 10000)); + } + } + + /* + * Provoke sufficient log cleaning to move the entire VLSN right + * sufficiently that the new VLSN range no longer overlaps the VLSN + * range upon entry thus guaranteeing a LogFileRefreshException. + */ + private void shiftVLSNRight(ReplicatedEnvironment menv) { + /* Shift the vlsn range window. */ + + RepImpl menvImpl = repEnvInfo[0].getRepImpl(); + final VLSNIndex vlsnIndex = menvImpl.getRepNode().getVLSNIndex(); + VLSN masterHigh = vlsnIndex.getRange().getLast(); + + CheckpointConfig checkpointConfig = new CheckpointConfig(); + checkpointConfig.setForce(true); + + do { + + /* + * Populate just the master, leaving the replica behind Re-populate + * with the same keys to create Cleaner fodder. + */ + populateDB(menv, TEST_DB_NAME, 100); + + menv.cleanLog(); + menv.checkpoint(checkpointConfig); + + } while (masterHigh.compareTo(vlsnIndex.getRange().getFirst()) >= 0); + } + + /* + * Test the API: RepNode.shutdownNetworkBackup/restartNetworkBackup service + * used to disable the service around a replica syncup operation. + */ + @Test + public void testLockout() + throws IOException { + + setExceptionListener(repEnvInfo[0]); + repEnvInfo[0].openEnv(); + RepNode repNode = repEnvInfo[0].getRepNode(); + leaveGroupAllButMaster(); + + repNode.shutdownNetworkBackup(); + File backupDir = + new File(repEnvInfo[1].getEnvHome().getCanonicalPath() + + ".backup"); + backupDir.mkdir(); + assertTrue(backupDir.exists()); + + DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + EnvironmentImpl envImpl = createEnvImpl(backupDir); + try { + NetworkBackup backup = + new NetworkBackup(repNode.getSocket(), + backupDir, + new NameIdPair("n1", (short)1), + true, + envImpl.getFileManager(), + envImpl.getLogManager(), + channelFactory); + backup.execute(); + fail("expected exception service should not have been available"); + } catch (ServiceConnectFailedException e) { + /* Expected. */ + } catch (Exception e) { + fail("unexpected exception" + e); + } + + repNode.restartNetworkBackup(); + try { + NetworkBackup backup = + new NetworkBackup(repNode.getSocket(), + backupDir, + new NameIdPair("n1", (short)1), + true, + envImpl.getFileManager(), + envImpl.getLogManager(), + channelFactory); + backup.execute(); + } catch (Exception e) { + fail("unexpected exception:" + e); + } + + envImpl.abnormalClose(); + } + + private EnvironmentImpl createEnvImpl(File envDir) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + Environment backEnv = new Environment(envDir, envConfig); + + return DbInternal.getNonNullEnvImpl(backEnv); + } + + /** + * Verify that a NetworkBackup that's in progress is aborted by + * repNode.shutdownNetworkRestore() and therefore during a rollback + * operation. + */ + @Test + public void testNBAbortOnSyncup() + throws IOException, DatabaseException, ServiceConnectFailedException, + RejectedServerException, RestoreMarker.FileCreationException { + + setExceptionListener(repEnvInfo[0]); + repEnvInfo[0].openEnv(); + final RepNode repNode = repEnvInfo[0].getRepNode(); + leaveGroupAllButMaster(); + File backupDir = + new File(repEnvInfo[1].getEnvHome().getCanonicalPath() + + ".backup"); + backupDir.mkdir(); + DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + EnvironmentImpl envImpl = createEnvImpl(backupDir); + NetworkBackup backup = + new NetworkBackup(repNode.getSocket(), + backupDir, + new NameIdPair("n1", (short)1), + true, + envImpl.getFileManager(), + envImpl.getLogManager(), + channelFactory); + CyclicBarrier testBarrier = + new CyclicBarrier(1, new Runnable() { + public void run() { + /* The syncup should kill the NB */ + repNode.shutdownNetworkBackup(); + } + } + ); + backup.setTestBarrier(testBarrier); + try { + backup.execute(); + fail("Expected exception"); + } catch(IOException e) { + /* Expected exception as in progress service was terminated. */ + } + + envImpl.abnormalClose(); + } + + /** + * Check that NumberFormatException is not thrown when parsing ILE + * properties and VLSN GT max-int. [#26311] + */ + @Test + public void testLongVLSNInILE() { + + /* Use a dummy/shell env to speed up the test. */ + final EnvironmentConfig envConfig = new EnvironmentConfig() + .setTransactional(true) + .setAllowCreate(true); + + final ReplicatedEnvironment env = RepInternal.createInternalEnvHandle( + envRoot, new ReplicationConfig(), envConfig); + + final RepImpl repImpl = RepInternal.getNonNullRepImpl(env); + + /* Create VLSN GT max-int. */ + final long vlsn = Long.MAX_VALUE - 100; + + /* Create ILE with properties containing the VLSN. */ + InsufficientLogException ile = + new InsufficientLogException(repImpl, new VLSN(vlsn)); + + /* We're done with the dummy env now. */ + env.close(); + + /* + * Create ILE that parses the properties. Prior to the bug fix, this + * threw NumberFormatException. + */ + ile = new InsufficientLogException(ile.getProperties(), null); + + assertEquals( + String.valueOf(vlsn), + ile.getProperties().getProperty("REFRESH_VLSN")); + } +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/OneNodeRestoreTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/OneNodeRestoreTest.java new file mode 100644 index 0000000..be17c5c --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/OneNodeRestoreTest.java @@ -0,0 +1,272 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class OneNodeRestoreTest extends RepTestBase { + + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + /** + * Tests the ability to restore an entire group by manually restoring the + * files of just one node, and then letting the other nodes do a Network + * Restore from the one node, even when that first node is in no better + * than the "UNKNOWN" state. Forces the two dependent nodes to come up + * somewhat in parallel, so that no election is completed until all have + * finished the NR. + */ + @Test + public void testBasic() throws Exception { + createGroup(); + final RepEnvInfo helper = repEnvInfo[0]; + closeNodes(repEnvInfo); + RepTestUtils.removeRepDirs(repEnvInfo[1], repEnvInfo[2]); + + /* + * The timeout could be made even quicker, which would make this test + * run a bit faster, after bug #21427 is fixed. + */ + final ReplicationConfig conf = helper.getRepConfig(); + conf.setConfigParam(RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), + "5 s"); + helper.openEnv(); + + final CyclicBarrier barrier = new CyclicBarrier(2); + class NodeStarter implements Runnable { + private Throwable exception; + private final RepEnvInfo node; + + NodeStarter(RepEnvInfo node) { + this.node = node; + } + + public void run() { + try { + try { + node.openEnv(); + fail("expected failure with no env dir contents"); + } catch (InsufficientLogException ile) { + NetworkRestore nr = new NetworkRestore(); + NetworkRestoreConfig config = + new NetworkRestoreConfig(); + nr.execute(ile, config); + barrier.await(); + node.openEnv(); + } + } catch (Throwable t) { + t.printStackTrace(); + exception = t; + } + } + + Throwable getException() { + return exception; + } + } + + Thread[] threads = new Thread[2]; + NodeStarter[] starters = new NodeStarter[2]; + for (int i = 0; i < 2; i++) { + starters[i] = new NodeStarter(repEnvInfo[i+1]); + threads[i] = new Thread(starters[i]); + threads[i].start(); + } + + for (int i = 0; i < 2; i++) { + threads[i].join(20000); + assertFalse(threads[i].isAlive()); + assertNull(starters[i].getException()); + } + + boolean elected = false; + final long deadline = System.currentTimeMillis() + 20000; + outer: while (System.currentTimeMillis() < deadline) { + for (RepEnvInfo rei : repEnvInfo) { + ReplicatedEnvironment e = rei.getEnv(); + State s = e.getState(); + if (s.isMaster()) { + elected = true; + break outer; + } + } + Thread.sleep(100); + } + assertTrue(elected); + } + + /** + * Tests the 1-node restore in a scenario where a master is elected before + * all nodes have completed the Network Restore. + * + * @see #testBasic + */ + @Test + public void testWithElection() throws Exception { + createGroup(); + final RepEnvInfo helper = repEnvInfo[0]; + closeNodes(repEnvInfo); + RepTestUtils.removeRepDirs(repEnvInfo[1], repEnvInfo[2]); + final ReplicationConfig conf = helper.getRepConfig(); + conf.setConfigParam(RepParams.ENV_UNKNOWN_STATE_TIMEOUT.getName(), + "5 s"); + helper.openEnv(); + + /* Trip the latch when the helper exits "unknown" state. */ + final CountDownLatch latch = new CountDownLatch(1); + ReplicatedEnvironment helperEnv = helper.getEnv(); + helperEnv.setStateChangeListener(new StateChangeListener() { + public void stateChange(StateChangeEvent event) { + if (!UNKNOWN.equals(event.getState())) { + latch.countDown(); + } + } + }); + + /* + * Since we haven't set an unknown-state timeout on Node2, we know that + * successful completion indicates that it reached either MASTER or + * REPLICA state, which means an election must have been successfully + * completed. + */ + assertTrue("expected failure with no env dir contents", + openWithNR(repEnvInfo[1])); + + State state = repEnvInfo[1].getEnv().getState(); + assertTrue(MASTER.equals(state) || REPLICA.equals(state)); + State other = state == MASTER ? REPLICA : MASTER; + latch.await(1, TimeUnit.MINUTES); + assertSame(other, helperEnv.getState()); + + /* Now that there's a master, opening Node3 here won't need a NR. */ + repEnvInfo[2].openEnv(); + } + + @Test + public void testIndirectHelper() throws Exception { + createGroup(); + final RepEnvInfo survivor = repEnvInfo[0]; + closeNodes(repEnvInfo); + RepTestUtils.removeRepDirs(repEnvInfo[1], repEnvInfo[2]); + + /* + * Make the helper hosts setting more interesting: Node3 will now + * "point to" Node2 (instead of Node1 as originally). And Node2 will + * point to both Node1 and Node3. In a moment, we'll start just Node2 + * and Node3, both of which have lost their env dirs. Both will try to + * ask the other for Group info, but of course both should find no + * useful info. Finally, we'll restart Node1: Node2 will be able to + * restore, and Node1 and Node2 will elect a master, and then finally + * Node3 will be able to restore. + */ + String hp1 = repEnvInfo[0].getRepConfig().getNodeHostPort(); + String hp2 = repEnvInfo[1].getRepConfig().getNodeHostPort(); + String hp3 = repEnvInfo[2].getRepConfig().getNodeHostPort(); + repEnvInfo[1].getRepConfig().setHelperHosts(hp1 + "," + hp3); + repEnvInfo[2].getRepConfig().setHelperHosts(hp2); + + /* + * Start two nodes in background threads, and verify that no master is + * elected immediately. + */ + ExecutorService threads = Executors.newFixedThreadPool(2); + class Opener implements Runnable { + int index; + Opener(int i) { index = i; } + public void run() { + openWithNR(repEnvInfo[index]); + } + } + try { + Future f2 = threads.submit(new Opener(1)); + Future f3 = threads.submit(new Opener(2)); + try { + f2.get(10, TimeUnit.SECONDS); + fail("Node2 start wasn't expected to complete yet"); + } catch (TimeoutException te) { + // expected + } + try { + /* Already waited 10 seconds above. */ + f3.get(1, TimeUnit.MILLISECONDS); + fail("Node3 start wasn't expected to complete yet"); + } catch (TimeoutException te) { + // expected + } + + /* + * Restart the one node that still has env dir, and then everyone + * should finally be able to complete startup. + */ + survivor.openEnv(); + f2.get(); + f3.get(); + } finally { + threads.shutdownNow(); + } + } + + /** + * @return true if we needed NetworkRestore; false if plain open was OK. + */ + private boolean openWithNR(RepEnvInfo rei) { + try { + rei.openEnv(); + return false; + } catch (InsufficientLogException ile) { + NetworkRestore nr = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + nr.execute(ile, config); + rei.openEnv(); + return true; + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/networkRestore/ProtocolTest.java b/test/com/sleepycat/je/rep/impl/networkRestore/ProtocolTest.java new file mode 100644 index 0000000..6aae86a --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/networkRestore/ProtocolTest.java @@ -0,0 +1,130 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.networkRestore; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.util.Arrays; +import java.util.zip.CheckedInputStream; +import java.util.zip.CheckedOutputStream; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.impl.networkRestore.Protocol.FileStart; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.util.TestChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.utilint.Adler32; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.TestBase; + +public class ProtocolTest extends TestBase { + + Protocol protocol; + private Message[] messages; + + @Before + public void setUp() + throws Exception { + + protocol = new Protocol(new NameIdPair("n1", (short)1), + Protocol.VERSION, + null); + + messages = new Message[] { + protocol.new FeederInfoReq(), + protocol.new FeederInfoResp(1, new VLSN(100), new VLSN(200)), + protocol.new FileListReq(), + protocol.new FileListResp(new String[]{"f1","f2"}), + protocol.new FileReq("f1"), + protocol.new FileStart("f1",100, System.currentTimeMillis()), + protocol.new FileEnd("f1", 100, System.currentTimeMillis(), + new byte[100]), + protocol.new FileInfoReq("f1", true), + protocol.new FileInfoResp("f1", 100, System.currentTimeMillis(), + new byte[100]), + protocol.new Done(), + }; + + } + + @Test + public void testBasic() + throws IOException { + + assertEquals(protocol.messageCount() - + protocol.getPredefinedMessageCount(), + messages.length); + for (Message m : messages) { + ByteBuffer testWireFormat = m.wireFormat().duplicate(); + Message newMessage = + protocol.read(new TestChannel(testWireFormat)); + assertTrue(newMessage.getOp() + " " + + Arrays.toString(testWireFormat.array()) + "!=" + + Arrays.toString(newMessage.wireFormat().array()), + Arrays.equals(testWireFormat.array().clone(), + newMessage.wireFormat().array().clone())); + } + } + + @Test + public void testFileReqResp() + throws IOException, Exception { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(10000); + WritableByteChannel oc = Channels.newChannel(baos); + oc.write(protocol.new FileStart("f1", 100, System.currentTimeMillis()). + wireFormat().duplicate()); + + Adler32 ochecksum = new Adler32(); + CheckedOutputStream cos = new CheckedOutputStream(baos, ochecksum); + + // Simulate a file payload. + for (int i=0; i < 100; i++) { + cos.write(i); + } + ByteBuffer csum = ByteBuffer.allocate(8); + LogUtils.writeLong(csum, ochecksum.getValue()); + baos.write(csum.array()); + + byte[] o = baos.toByteArray(); + + TestChannel ch = + new TestChannel((ByteBuffer)ByteBuffer.allocate(o.length). + put(o).flip()); + + FileStart m = (FileStart) protocol.read(ch); + long length = m.getFileLength(); + Adler32 ichecksum = new Adler32(); + CheckedInputStream cis = + new CheckedInputStream(Channels.newInputStream(ch), ichecksum); + for (int i=0; i < length; i++) { + assertEquals(i, cis.read()); + } + + csum = ByteBuffer.allocate(8); + ch.read(csum); + csum.flip(); + assertEquals(ochecksum.getValue(), LogUtils.readLong(csum)); + assertEquals(ochecksum.getValue(), ichecksum.getValue()); + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/CBVLSNTest.java b/test/com/sleepycat/je/rep/impl/node/CBVLSNTest.java new file mode 100644 index 0000000..9c5ddf5 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/CBVLSNTest.java @@ -0,0 +1,602 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.cbvlsn.GlobalCBVLSN; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.Before; +import org.junit.Test; + +/** + * Because the global CBVLSN is defunct as of JE 7.5, this test is only + * maintained to exercise support for groups containing older nodes that rely + * on the global CBVLSN. This test artificially enables CBVLSN maintenance. + * See {@link GlobalCBVLSN}. + */ +public class CBVLSNTest extends RepTestBase { + + /* enable debugging printlns with -Dverbose=true */ + private final boolean verbose = Boolean.getBoolean("verbose"); + + /** + * Don't rely on RepTestBase setup, because we want to specify the + * creation of the environment config. + */ + @Before + public void setUp() + throws Exception { + + RepTestUtils.removeRepEnvironments(envRoot); + dbconfig = new DatabaseConfig(); + dbconfig.setAllowCreate(true); + dbconfig.setTransactional(true); + dbconfig.setSortedDuplicates(false); + } + + private void setupConfig(boolean allowCleaning) { + + for (RepEnvInfo i : repEnvInfo) { + + /* Enable maintenance of CBVLSN, regardless of JE version. */ + i.getRepConfig().setConfigParam( + RepParams.TEST_CBVLSN.getName(), "true"); + + /* + * Disable MIN_RETAINED_VLSNS so that the group/global CBVLSN will + * advance without regard to the number of records written. + */ + i.getRepConfig().setConfigParam( + RepParams.MIN_RETAINED_VLSNS.getName(), "0"); + + /* + * Turn off the cleaner because this test is doing scans of the log + * file for test purposes, and those scans are not coordinated with + * the cleaner. + */ + if (!allowCleaning) { + i.getEnvConfig().setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + } + } + } + + @Test + public void testBasic() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + File[] dirs = RepTestUtils.makeRepEnvDirs(envRoot, groupSize); + repEnvInfo = new RepEnvInfo[groupSize]; + for (int i = 0; i < groupSize; i++) { + + /* + * Mimic the default (variable per-env) log file sizing behavior + * that would occur when this test case is the first one to be + * executed. (Since the order of execution is unpredictable, it's + * unsound to rely on getting this behavior without explicitly + * coding it here.) It makes replicas have larger log files than + * the master. With five nodes, it tends to mean that the last + * replica is still using a single log file, and its local CBVLSN + * lags behind, and gates the global CBVLSN advancement. + */ + long size = (i + 2) * 10000; + EnvironmentConfig ec = envConfig.clone(); + DbInternal.disableParameterValidation(ec); + ec.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, + Long.toString(size)); + ReplicationConfig rc = + RepTestUtils.createRepConfig(i + 1); + repEnvInfo[i] = + RepTestUtils.setupEnvInfo(dirs[i], + ec, + rc, + repEnvInfo[0]); + } + + setupConfig(false /*allowCleaning*/); + + checkCBVLSNs(1000, 2); + } + + /* + * Because this test case sets a very small log file size, + * as a result, the GlobalCBVLSN would advance when the whole group + * starts up and an InsufficientLogException would be thrown when a node + * tries to join the group at the start up, which is unexpected. + * + * To avoid this, disable the LocalCBVLSN updates on the master while + * starting up the group and enable it after the whole group is up. + */ + @Test + public void testSmallFiles() + throws Exception { + + /* + * Use uniformly small log files, which means that local CBVLSNs will + * advance frequently, and the global CBVLSN will advance. + */ + EnvironmentConfig smallFileConfig = new EnvironmentConfig(); + DbInternal.disableParameterValidation(smallFileConfig); + smallFileConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000"); + smallFileConfig.setAllowCreate(true); + smallFileConfig.setTransactional(true); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, + groupSize, + smallFileConfig); + + setupConfig(false /*allowCleaning*/); + + checkCBVLSNs(60, 5, true); + } + + private void checkCBVLSNs(int numRecords, + int filesToUse) + throws Exception { + + checkCBVLSNs(numRecords, filesToUse, false); + } + + private void checkCBVLSNs(int numRecords, + int filesToUse, + boolean disableLocalCBVLSNUpdate) + throws Exception { + + if (!disableLocalCBVLSNUpdate) { + RepTestUtils.joinGroup(repEnvInfo); + } else { + /* Open the master. */ + repEnvInfo[0].openEnv(); + /* Disable the LocalCBVLSN updates on the master. */ + RepInternal.getNonNullRepImpl(repEnvInfo[0].getEnv()).getRepNode(). + getCBVLSNTracker().setAllowUpdate(false); + + /* + * Open other replicas one by one, so that no + * InsufficientLogException will be thrown. + */ + for (int i = 1; i < repEnvInfo.length; i++) { + repEnvInfo[i].openEnv(); + } + + /* Enable the LocalCBVLSN updates again. */ + RepInternal.getNonNullRepImpl(repEnvInfo[0].getEnv()).getRepNode(). + getCBVLSNTracker().setAllowUpdate(true); + } + + /* Master is the first node. */ + ReplicatedEnvironment mRepEnv = repEnvInfo[0].getEnv(); + RepImpl mRepImpl = repEnvInfo[0].getRepImpl(); + assertTrue(mRepEnv.getState().isMaster()); + + Database db = mRepEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + + try { + + /* + * Do work on the master, and check that its local CBVLSN advances. + */ + VLSN initialCBVLSN = workAndCheckLocalCBVLSN( + mRepEnv, db, RepTestUtils.SYNC_SYNC_ALL_TC, + numRecords, filesToUse); + + /* Make sure that all the replicas send in their local CBVLSNs. */ + doGroupWideChecks( + mRepEnv, false /*oneStalledReplica*/, initialCBVLSN); + + /* + * Crash one node, and resume execution. Make sure that the dead + * node holds back the global CBVLSN. Run this set with quorum + * acks. + */ + if (verbose) { + System.out.println("crash one node"); + } + RepEnvInfo killInfo = repEnvInfo[repEnvInfo.length - 1]; + int killNodeId = killInfo.getRepNode().getNodeId(); + killInfo.getEnv().close(); + + /* + * The last CBVLSN received on the master from the dead replica + * will (eventually) become the new group CBVLSN. + */ + VLSN deadReplicaCBVLSN = + mRepImpl.getRepNode().getGroup().getMember(killNodeId). + getBarrierState().getLastCBVLSN(); + + assertTrue(deadReplicaCBVLSN.compareTo(initialCBVLSN) > 0); + + TransactionConfig tConfig = new TransactionConfig(); + tConfig.setDurability( + new Durability(SyncPolicy.SYNC, SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY)); + + workAndCheckLocalCBVLSN( + mRepEnv, db, tConfig, numRecords, filesToUse); + + if (verbose) { + System.out.println("group wide check"); + } + + doGroupWideChecks( + mRepEnv, true /*oneStalledReplica*/, deadReplicaCBVLSN); + + } catch(Exception e) { + e.printStackTrace(); + throw e; + } finally { + db.close(); + } + } + + /** + * @return a VLSN beyond which the group CBVLSN should advance. We return + * the first VLSN written on the master, since we know that we'll write at + * least one file after that (on all nodes). + */ + private VLSN workAndCheckLocalCBVLSN(ReplicatedEnvironment mRepEnv, + Database db, + TransactionConfig tConfig, + int numRecords, + int numFilesToUse) + throws InterruptedException { + + RepImpl mRepImpl = RepInternal.getNonNullRepImpl(mRepEnv); + RepNode mNode = mRepImpl.getRepNode(); + final FileManager mFileManager = mRepImpl.getFileManager(); + String mName = mRepEnv.getNodeName(); + long logFileNum = mFileManager.getLastFileNum().longValue(); + long startFileNum = logFileNum; + VLSN barrierVLSN = + mNode.getGroup().getMember(mName).getBarrierState().getLastCBVLSN(); + VLSN firstWrittenVLSN = VLSN.NULL_VLSN; + + /* + * Tests that every log file switch results in a local CBVLSN update on + * the master. + */ + int mId = mRepImpl.getNodeId(); + for (int i=0; i < numRecords; i++) { + Transaction txn = mRepEnv.beginTransaction(null, tConfig); + IntegerBinding.intToEntry(i, key); + LongBinding.longToEntry(i, data); + db.put(txn, key, data); + txn.commit(); + + if (firstWrittenVLSN.isNull()) { + firstWrittenVLSN = + mNode.getCBVLSNTracker().getLastSyncableVLSN(); + } + + long newFileNum = mFileManager.getLastFileNum().longValue(); + if (logFileNum < newFileNum) { + logFileNum = newFileNum; + + /* + * We should have moved up to a new local CBVLSN. + * localCBVLSNVal is updated directly, so it's sure to have + * advanced. + */ + VLSN newLocalCBVLSN = + mNode.getCBVLSNTracker().getBroadcastCBVLSN(); + + if (verbose) { + System.out.println( + "master's global CBVLSN = " + mNode.getGlobalCBVLSN() + + " local CBVLSN = " + newLocalCBVLSN ); + } + + if (!barrierVLSN.isNull()) { + + /* + * if the barrier vlsn is null, we'd expect the first + * non-null vlsn to update the localcblvlsn state, so + * only to this check for if we're not starting out at + * null. + */ + String info = "newLocal=" + newLocalCBVLSN + + " prevLocal=" + barrierVLSN; + assertTrue(info, + newLocalCBVLSN.compareTo(barrierVLSN) >= 0); + } + + barrierVLSN = newLocalCBVLSN; + + /* + * Check that the new local CBVLSN value is in the cached group + * info, and the database. The group info and the database are + * only updated by the master when it is running in the + * FeederManager loop, so we will retry once if needed. + */ + int retries = 2; + VLSN indbVLSN = null; + while (retries-- > 0) { + mNode.refreshCachedGroup(); + indbVLSN = mNode.getGroup().getMember(mId). + getBarrierState().getLastCBVLSN(); + + if (!indbVLSN.equals(newLocalCBVLSN)) { + /* Wait one feederManager channel polling cycle. */ + Thread.sleep(1000); + continue; + } + } + + assertEquals("local=" + newLocalCBVLSN + + " inDbVLSN=" + indbVLSN, + newLocalCBVLSN, indbVLSN); + } + } + + /* + * Test that at least two log files worth of data have been generated, + * so the conditional above is exercised. + */ + assertTrue(logFileNum > (startFileNum + 1)); + + /* + * Test that we exercised a certain number of log files on the + * master, since local CBVLSNs are broadcast at file boundaries. + */ + assertTrue("logFileNum=" + logFileNum + + " startFileNum=" + startFileNum + + " numFilesToUse=" + numFilesToUse, + (logFileNum - startFileNum) >= numFilesToUse); + if (verbose) { + System.out.println("logFileNum = " + logFileNum + + " start=" + startFileNum); + } + + return firstWrittenVLSN; + } + + /** + * Check replicas for local CBVLSN value consistency wrt the current + * master. Also check that the global CBVLSN has been advancing, if + * appropriate. + * + * @param oneReplicaStalled true if a replica is down and is holding up + * the group CBVLSN advancement and it is stuck at previousCBVLSN. Or + * false if all nodes are up and previousCBVLSN is the group CBVLSN before + * writes were performed. + * + * @throws IOException + */ + private void doGroupWideChecks(ReplicatedEnvironment mRepEnv, + boolean oneReplicaStalled, + VLSN previousCBVLSN) + throws DatabaseException, InterruptedException, IOException { + + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (!rep.isValid()) { + continue; + } + } + + RepNode mNode = RepInternal.getNonNullRepImpl(mRepEnv).getRepNode(); + + /* Ensure that all replicas have reasonably current syncup values. */ + int replicaCount = 0; + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (!rep.isValid()) { + continue; + } + replicaCount++; + + RepNode repNode = RepInternal.getNonNullRepImpl(rep).getRepNode(); + final int heartBeatMs = + Integer.parseInt(rep.getRepConfig(). + getConfigParam(RepParams. + HEARTBEAT_INTERVAL.getName())); + final int retryWaitMs = 1000; + + /* + * Each replicator will result in a roundtrip update, thus taking + * more time for the group as a whole to reach stasis. So increase + * the number of retries based on the number of replicators. + */ + int retries = (repEnvInfo.length * heartBeatMs * 2) / retryWaitMs; + + if (verbose) { + System.out.println("start retries = " + retries); + } + + /* + * Check to ensure they are reasonably current at the node itself. + */ + while (true) { + VLSN groupCBVLSN = repNode.getGlobalCBVLSN(); + VLSN groupMasterCBVLSN = mNode.getGlobalCBVLSN(); + + VLSN localVLSN = + repNode.getCBVLSNTracker().getLastSyncableVLSN(); + VLSN localMasterVLSN = + mNode.getCBVLSNTracker().getLastSyncableVLSN(); + + VLSN masterBroadcastVLSN = + mNode.getCBVLSNTracker().getBroadcastCBVLSN(); + + String info = + rep.getNodeName() + " retries=" + retries + " time = " + + String.format("%tc", System.currentTimeMillis()) + + " groupCBVLSN = " + groupCBVLSN + + " groupMasterCBVLSN = " + groupMasterCBVLSN + + " localVLSN = " + localVLSN + + " localMasterVLSN = " + localMasterVLSN + + " masterBroadcastVLSN = " + masterBroadcastVLSN + + " oneReplicaStalled = " + oneReplicaStalled + + " previousCBVLSN = " + previousCBVLSN; + + if (retries-- <= 0) { + fail(info + dumpGroups()); + } + + /* + * There should be group CBVLSN agreement across the entire + * group. + */ + if (verbose) { + System.out.println(info); + } + + if (!localVLSN.equals(localMasterVLSN)) { + + /* + * Replica is still processing the replication stream. See + * if it needs to catch up. + */ + Thread.sleep(retryWaitMs); + continue; // retry + } + + /* + * Now that the replica has broadcast a local CBVLSN near the + * end of its log, check that everyone agrees on the global + * CBVLSN value. + */ + if (oneReplicaStalled) { + + /* + * The dead replica should prevent everyone from advancing + * past its CBVLSN, and everyone should agree on that + * global VLSN (eventually). + */ + if (!previousCBVLSN.equals(groupCBVLSN) || + !previousCBVLSN.equals(groupMasterCBVLSN)) { + Thread.sleep(retryWaitMs); + continue; // retry + } + } else { + + /* + * When no replica is stalled, unfortunately we can't + * guarantee the local and group CBVLSNs will ever match. + * This is because the CBVLSN is only broadcast when the + * file flips, and we broadcast the VLSN from the + * penultimate file, + * + * Because we always write multiple files since the last + * known point, we can confirm that the group CBVLSN has + * advanced. But we can't predict how far. + */ + if (groupMasterCBVLSN.compareTo(previousCBVLSN) <= 0) { + Thread.sleep(retryWaitMs); + continue; // retry + } + } + break; + } + } + + /* We should have checked all the live replicas. */ + assertEquals(replicaCount, + (oneReplicaStalled ? repEnvInfo.length - 1 : + repEnvInfo.length)); + } + + private String dumpGroups() { + StringBuilder sb = new StringBuilder(); + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (!rep.isValid()) { + continue; + } + dumpGroup(rep, sb); + } + return sb.toString(); + } + + private void dumpGroup(ReplicatedEnvironment repEnv, StringBuilder sb) { + RepNode repNode = RepInternal.getNonNullRepImpl(repEnv).getRepNode(); + sb.append("\n").append(repEnv.getNodeName()).append(" group members:"); + for (RepNodeImpl n : repNode.getGroup().getAllElectableMembers()) { + sb.append("\n ").append(n.getName()); + sb.append(" ").append(n.getBarrierState()); + } + } + + @Test + public void testDbUpdateSuppression() + throws DatabaseException, InterruptedException, IOException { + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize); + setupConfig(false /*allowCleaning*/); + + ReplicatedEnvironment mEnv = RepTestUtils.joinGroup(repEnvInfo); + assertEquals(ReplicatedEnvironment.State.MASTER, mEnv.getState()); + RepImpl repInternal = RepInternal.getNonNullRepImpl(mEnv); + RepNode masterNode = repInternal.getRepNode(); + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + final FileManager masterFM = repInternal.getFileManager(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + RepGroupImpl group1 = masterNode.getGroup(); + long fileNum1 = masterFM.getLastFileNum().longValue(); + + Database db = mEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + + /* Force two new log files. */ + for (int i=0; true; i++) { + IntegerBinding.intToEntry(i, key); + LongBinding.longToEntry(i, data); + db.put(null, key, data); + if (masterFM.getLastFileNum().longValue() > (fileNum1+1)) { + break; + } + } + RepGroupImpl group2 = masterNode.getGroup(); + for (RepNodeImpl n1 : group1.getAllElectableMembers()) { + RepNodeImpl n2 = group2.getMember(n1.getNodeId()); + assertEquals(n1.getBarrierState(), n2.getBarrierState()); + } + db.close(); + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/DbCacheTest.java b/test/com/sleepycat/je/rep/impl/node/DbCacheTest.java new file mode 100644 index 0000000..7c43c96 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/DbCacheTest.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class DbCacheTest extends TestBase { + + protected final File envRoot = SharedTestUtils.getTestDir(); + + static final int dbCount = 10; + static final int cacheSize = 3; + + DatabaseImpl dbImpls[] = new DatabaseImpl[dbCount]; + + Environment env = null; + private EnvironmentImpl envImpl; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envRoot, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + for (int i=0; i < dbCount; i++) { + Database db = env.openDatabase(null, "db"+i, dbConfig); + dbImpls[i] = DbInternal.getDbImpl(db); + db.close(); + } + } + + @After + public void tearDown() { + env.close(); + } + + @Test + public void testCacheMRU() { + + DbCache dbCache = new DbCache(envImpl.getDbTree(), cacheSize, + Integer.MAX_VALUE); + for (int i=0; i < dbCount; i++) { + DatabaseImpl dbImpl = dbCache.get(dbImpls[i].getId(), null); + assertEquals(Math.min(i+1, cacheSize), dbCache.getMap().size()); + assertEquals(dbImpls[i].getId(), dbImpl.getId()); + } + + /* Verify that the correct handles have been released. */ + for (int i=0; i < dbCount; i++) { + if (i < (dbCount - cacheSize)) { + assertTrue(!dbImpls[i].isInUse()); + } else { + assertTrue(dbImpls[i].isInUse()); + } + } + } + + @Test + public void testCacheTimeout() throws InterruptedException { + + DbCache dbCache = new DbCache(envImpl.getDbTree(), cacheSize, 1000); + Thread.sleep(1000); + dbCache.tick(); + Thread.sleep(1000); + dbCache.tick(); + + /* Verify that the correct handles have been released. */ + for (int i=0; i < dbCount; i++) { + assertTrue(!dbImpls[i].isInUse()); + } + assertEquals(0, dbCache.getMap().size()); + + dbCache = new DbCache(envImpl.getDbTree(), cacheSize, 0); + dbCache.get(dbImpls[0].getId(), null); + dbCache.get(dbImpls[1].getId(), null); + dbCache.tick(); + dbCache.get(dbImpls[0].getId(), null); + dbCache.tick(); + assertTrue(!dbImpls[1].isInUse()); + assertTrue(dbImpls[0].isInUse()); + } + +} diff --git a/test/com/sleepycat/je/rep/impl/node/FeederRecordBatchTest.java b/test/com/sleepycat/je/rep/impl/node/FeederRecordBatchTest.java new file mode 100644 index 0000000..462c83e --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/FeederRecordBatchTest.java @@ -0,0 +1,179 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicatedEnvironmentStats; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.utilint.VLSN; + +/** + * Tests to verify that feeder output records are being batched as expected. + */ +public class FeederRecordBatchTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + + /** + * Relax the time allowed for batching to a full one second to avoid + * breaking up batches. + */ + ReplicationConfig mrc = repEnvInfo[0].getRepConfig(); + mrc.setConfigParam(RepParams.FEEDER_BATCH_NS.getName(), + "1000000000"); + /** + * Effectively suppress periodic heartbeats so they don't interfere + * with message counts. + */ + mrc.setConfigParam(RepParams.HEARTBEAT_INTERVAL.getName(), + "1000000"); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Basic test to check that messages are being batched as follows: + * + * 1) Create a two node group. + * 2) Shut down the replica. + * 3) Populate a database at the master. + * 4) Clear the stats. + * 5) Bring up the replica which needs to replay and catch up. + * 6) The Feeder has the opportunity to batch records, since the replica + * is lagging. + * 7) Verify that records are being batched by checking the statistics. + * 8) Verify that records are the same and have not been mangled. + */ + @Test + public void testBasic() throws InterruptedException { + createGroup(2); + + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + assertEquals(menv.getState(), State.MASTER); + repEnvInfo[1].closeEnv(); + + final int nRecords = 1000; + populateDB(menv, nRecords); + + ReplicatedEnvironmentStats stats = + menv.getRepStats(StatsConfig.CLEAR); + assertEquals(0, stats.getNProtocolMessagesBatched()); + + /* Open replica and catch up. */ + repEnvInfo[1].openEnv(); + + /* Wait for catchup. */ + VLSN vlsn = RepTestUtils.syncGroup(repEnvInfo); + /* + * All the messages must have been sent in batches as part of the sync + * operation. + */ + stats = menv.getRepStats(null); + + assertTrue(stats.getNProtocolMessagesBatched() >= nRecords); + + /* Verify contents. */ + RepTestUtils.checkNodeEquality(vlsn, false, repEnvInfo); + } + + /** + * Test to ensure that batching works correctly around a large object that + * does not fit into the cache. + */ + @Test + public void testDataMix() + throws InterruptedException { + + createGroup(2); + + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + TransactionConfig txnConfig = RepTestUtils.WNSYNC_NONE_TC; + Transaction txn = menv.beginTransaction(null, txnConfig); + Database db = menv.openDatabase(txn, "mixed", dbconfig); + txn.commit(); + txn = null; + RepTestUtils.syncGroup(repEnvInfo); + + repEnvInfo[1].closeEnv(); + assertEquals(menv.getState(), State.MASTER); + + final int batchBuffSize = + Integer.parseInt(RepParams.FEEDER_BATCH_BUFF_KB.getDefault()) * + 1024; + + txn = menv.beginTransaction(null, txnConfig); + + /* + * Generate a log pattern with an intervening large object that serves + * to break up a batch. + */ + for (int size : new int[] { 1, 2, /* batch 1 */ + batchBuffSize + 1, /* break the batch */ + 3, 4 /* batch 2 */ + }) { + IntegerBinding.intToEntry(size, key); + data.setData(new byte[size]); + db.put(txn, key, data); + } + txn.commit(); + db.close(); + + + ReplicatedEnvironmentStats stats = + menv.getRepStats(StatsConfig.CLEAR); + + repEnvInfo[1].openEnv(); + final VLSN vlsn = RepTestUtils.syncGroup(repEnvInfo); + + stats = menv.getRepStats(null); + + /* + * Seven total messages: 1 unconditional startup heartbeat + + * 5 puts + 1 commit + */ + assertEquals(7, stats.getNProtocolMessagesWritten()); + + /* 4 puts + 1 commit batched. */ + assertEquals(5, stats.getNProtocolMessagesBatched()); + + /* 2 batches as above. */ + assertEquals(2, stats.getNProtocolMessageBatches()); + + RepTestUtils.checkNodeEquality(vlsn, false, repEnvInfo); + } + +} diff --git a/test/com/sleepycat/je/rep/impl/node/GroupShutdownTest.java b/test/com/sleepycat/je/rep/impl/node/GroupShutdownTest.java new file mode 100644 index 0000000..23ede7c --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/GroupShutdownTest.java @@ -0,0 +1,423 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.NoConsistencyRequiredPolicy; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.utilint.TestHookAdapter; + +public class GroupShutdownTest extends RepTestBase { + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + + /* Include a SECONDARY node. */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + repEnvInfo[repEnvInfo.length-1].getRepConfig().setNodeType( + NodeType.SECONDARY); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + RepNode.queryGroupForMembershipBeforeSleepHook = null; + RepNode.beforeFindRestoreSupplierHook = null; + RepNode.queryGroupForMembershipBeforeQueryForMaster = null; + RepUtils.openSocketChannelHook = null; + } + + @Test + public void testShutdownExceptions() { + createGroup(); + ReplicatedEnvironment mrep = repEnvInfo[0].getEnv(); + + try { + repEnvInfo[1].getEnv().shutdownGroup(10000, TimeUnit.MILLISECONDS); + fail("expected exception"); + } catch (IllegalStateException e) { + /* OK, shutdownGroup on Replica. */ + } + + ReplicatedEnvironment mrep2 = + new ReplicatedEnvironment(repEnvInfo[0].getEnvHome(), + repEnvInfo[0].getRepConfig(), + repEnvInfo[0].getEnvConfig()); + + try { + mrep.shutdownGroup(10000, TimeUnit.MILLISECONDS); + fail("expected exception"); + } catch (IllegalStateException e) { + /* OK, multiple master handles. */ + mrep2.close(); + } + mrep.shutdownGroup(10000, TimeUnit.MILLISECONDS); + for (int i=1; i < repEnvInfo.length; i++) { + repEnvInfo[i].closeEnv(); + } + } + + @Test + public void testShutdownTimeout() + throws InterruptedException { + + new ShutdownSupport() { + @Override + void checkException(GroupShutdownException e){} + }.shutdownBasic(500, 1); + } + + @Test + public void testShutdownBasic() + throws InterruptedException { + + new ShutdownSupport() { + @Override + void checkException(GroupShutdownException e) { + /* + * It's possible, in rare circumstances, for the exception to + * not contain the shutdown VLSN, that is, for it to be null, + * because the VLSNIndex range was not yet initialized. Ignore + * it in that circumstance. + */ + assertTrue((e.getShutdownVLSN() == null) || + (ct.getVLSN() <= + e.getShutdownVLSN().getSequence())); + } + }.shutdownBasic(10000, 0); + } + + /** + * Test that shutting a node down while it is querying for group membership + * still causes the node to exit. Previously, the query failed to check + * for shutdowns, and so the query continued, causing the node to fail to + * shutdown. [#24600] + */ + @Test + public void testShutdownDuringQueryForGroupMembership() + throws Exception { + + /* + * Number of minutes to wait for things to settle, to support testing + * on slow machines. + */ + final long wait = 2; + + createGroup(); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + + final CountDownLatch hookCalled = new CountDownLatch(1); + final CountDownLatch closeDone = new CountDownLatch(1); + class Hook extends TestHookAdapter { + volatile Throwable exception; + @Override + public void doHook(String obj) { + hookCalled.countDown(); + try { + assertTrue("Wait for close", + closeDone.await(wait, TimeUnit.MINUTES)); + /* + * Wait for the soft shutdown to timeout, which happens + * after 4 seconds for RepNode, since it is the failure + * to detect the subsequent interrupt that was the bug. + * The interrupt should come during this sleep, but the + * situation we are testing is where the interrupt occurs + * not during a sleep, so ignore the interrupt here. + */ + try { + Thread.sleep(10 * 1000); + } catch (InterruptedException e) { + } + } catch (Throwable t) { + exception = t; + } + } + }; + final Hook hook = new Hook(); + RepNode.queryGroupForMembershipBeforeSleepHook = hook; + + /* + * Test using the secondary node, since it is easier to reproduce this + * situation there. + */ + final RepEnvInfo nodeInfo = repEnvInfo[repEnvInfo.length - 1]; + + /* Reduce the environment setup timeout to make the test quicker */ + nodeInfo.getRepConfig().setConfigParam( + RepParams.ENV_SETUP_TIMEOUT.getName(), "10 s"); + + final OpenEnv openEnv = new OpenEnv(nodeInfo); + openEnv.start(); + assertTrue("Wait for hook to be called", + hookCalled.await(wait, TimeUnit.MINUTES)); + nodeInfo.closeEnv(); + closeDone.countDown(); + openEnv.join(wait * 60 * 1000); + assertFalse("OpenEnv thread exited", openEnv.isAlive()); + if (hook.exception != null) { + throw new RuntimeException( + "Unexpected exception: " + hook.exception); + } + } + + /** + * Check for another EnvironmentWedgedException reported in [#25314]. In + * this case, the shutdown request comes during the call by + * RepNode.queryGroupForMembership to Learner.queryForMaster, which was + * interrupted but did not exit for 10 seconds because it failed to check + * for shutdowns. Then the subsequent socket connection to the previous + * master in RepNode.checkGroupMasterIsAlive was still underway when the + * shutdown was reported wedged. + */ + @Test + public void testShutdownDuringQueryForGroupMembershipQueryLearners() + throws Exception { + + /* + * Number of minutes to wait for things to settle, to support testing + * on slow machines. + */ + final long wait = 2; + + createGroup(); + + /* + * Make sure that there are multiple helpers, which are needed to + * cause Learner.queryForMaster to continue looping after an + * interrupt. + */ + updateHelperHostConfig(); + + /* Shutdown all but the last secondary node */ + RepTestUtils.shutdownRepEnvs( + Arrays.copyOf(repEnvInfo, repEnvInfo.length - 1)); + + /* Track when the Learner.queryForMaster call starts */ + final CountDownLatch queryHookCalled = new CountDownLatch(1); + class QueryHook extends TestHookAdapter { + @Override + public void doHook(String x) { + queryHookCalled.countDown(); + } + }; + final QueryHook queryHook = new QueryHook(); + RepNode.queryGroupForMembershipBeforeQueryForMaster = queryHook; + + /* + * Simulate socket connection timeouts that come from network + * disconnections. + */ + class OpenHook extends TestHookAdapter { + volatile Exception exception; + @Override + public void doHook(ConnectOptions options) { + try { + Thread.sleep(options.getOpenTimeout()); + } catch (Exception e) { + exception = e; + } + } + }; + final OpenHook openHook = new OpenHook(); + RepUtils.openSocketChannelHook = openHook; + + /* + * Close the secondary node after the call to Learner.queryForMaster + * starts + */ + final RepEnvInfo nodeInfo = repEnvInfo[repEnvInfo.length - 1]; + class CloseEnv extends Thread { + volatile Throwable exception; + CloseEnv() { setDaemon(true); } + @Override + public void run() { + try { + nodeInfo.closeEnv(); + } catch (Throwable t) { + exception = t; + } + } + } + CloseEnv closeEnv = new CloseEnv(); + assertTrue(queryHookCalled.await(wait, TimeUnit.MINUTES)); + closeEnv.start(); + + /* Wait for the close to complete and check for success */ + closeEnv.join(wait * 60 * 1000); + assertFalse("CloseEnv thread exited", closeEnv.isAlive()); + assertEquals("CloseEnv throws", null, closeEnv.exception); + assertEquals("OpenHook throws", + InterruptedException.class, + openHook.exception.getClass()); + } + + /** + * Test that the node shuts down in a timely manner while searching for + * network restore suppliers. Tests that that process checks for shutdown + * between suppliers. [#25314] + */ + @Test + public void testShutdownDuringFindRestoreSuppliers() + throws Exception { + + /* + * Number of minutes to wait for things to settle, to support testing + * on slow machines. + */ + final long wait = 2; + + createGroup(); + + /* Make sure that there are multiple helpers */ + updateHelperHostConfig(); + + /* Install a hook that will be called when finding restore suppliers */ + final CountDownLatch hookCalled = new CountDownLatch(1); + RepNode.beforeFindRestoreSupplierHook = new TestHookAdapter() { + @Override + public void doHook(String obj) { + hookCalled.countDown(); + try { + /* + * Wait long enough that multiple waits for failing + * attempts to contact network restore suppliers after a + * shutdown has been requested will cause the shutdown to + * fail. Ignore interrupts, since, although the interrupts + * could interrupt the connect attempts, the code would + * continue to contact the next possible supplier. + */ + Thread.sleep(10 * 1000); + } catch (Throwable t) { + } + } + }; + + /* Shutdown all nodes except for the secondary */ + final RepEnvInfo[] allButSecondary = + Arrays.copyOf(repEnvInfo, repEnvInfo.length - 1); + RepTestUtils.shutdownRepEnvs(allButSecondary); + + /* + * Wait for the find restore supplier hook to be called, then close the + * environment, which needs to succeed. + */ + assertTrue("Wait for hook to be called", + hookCalled.await(wait, TimeUnit.MINUTES)); + final RepEnvInfo nodeInfo = repEnvInfo[repEnvInfo.length - 1]; + nodeInfo.closeEnv(); + } + + /* -- Utility classes and methods -- */ + + abstract class ShutdownSupport { + CommitToken ct; + + abstract void checkException(GroupShutdownException e); + + public void shutdownBasic(long timeoutMs, + int testDelayMs) + throws InterruptedException { + + createGroup(); + ReplicatedEnvironment mrep = repEnvInfo[0].getEnv(); + leaveGroupAllButMaster(); + + ct = populateDB(mrep, TEST_DB_NAME, 1000); + repEnvInfo[0].getRepNode().feederManager(). + setTestDelayMs(testDelayMs); + restartReplicasNoWait(); + + mrep.shutdownGroup(timeoutMs, TimeUnit.MILLISECONDS); + + for (int i=1; i < repEnvInfo.length; i++) { + RepEnvInfo repi = repEnvInfo[i]; + final int retries = 100; + for (int j=0; j < retries; j++) { + try { + /* Provoke exception */ + repi.getEnv().getState(); + if ((j+1) == retries) { + fail("expected exception from " + + repi.getRepNode().getNameIdPair()); + } + /* Give the replica time to react */ + Thread.sleep(1000); /* a second between retries */ + } catch (GroupShutdownException e) { + checkException(e); + break; + } + } + /* Close the handle. */ + repi.closeEnv(); + } + } + } + + /** + * Start up replicas for existing master, but don't wait for any + * consistency to be reached. + */ + private void restartReplicasNoWait() { + for (int i=1; i < repEnvInfo.length; i++) { + RepEnvInfo ri = repEnvInfo[i]; + ri.openEnv(new NoConsistencyRequiredPolicy()); + } + } + + private class OpenEnv extends Thread { + final RepEnvInfo repEnvInfo; + volatile Throwable exception; + OpenEnv(RepEnvInfo repEnvInfo) { + this.repEnvInfo = repEnvInfo; + setDaemon(true); + } + + @Override + public void run() { + try { + repEnvInfo.openEnv(); + } catch (Throwable t) { + exception = t; + } + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/MasterBounceTest.java b/test/com/sleepycat/je/rep/impl/node/MasterBounceTest.java new file mode 100644 index 0000000..230651c --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/MasterBounceTest.java @@ -0,0 +1,69 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * A master going up and down should result in an election being held even if + * all the replicas still agree that it's the master. This is because the + * other replicas may have more up to date logs than the master, which may + * have lost transactions when it went down. + * + * SR17911 has more detail. + */ +public class MasterBounceTest extends RepTestBase { + + /* (non-Javadoc) + * @see com.sleepycat.je.rep.impl.RepTestBase#setUp() + */ + @Before + public void setUp() + throws Exception { + + /* + * A rep group of two effectively prevents another election from being + * held across the bounce, since there is no election quorum. + */ + groupSize = 2; + super.setUp(); + } + + @Test + public void testBounce() { + createGroup(); + final RepEnvInfo masterInfo = repEnvInfo[0]; + ReplicatedEnvironment master = masterInfo.getEnv(); + assertTrue(master.getState().isMaster()); + + /* No elections since the group grew around the first node. */ + assertEquals(0, masterInfo.getRepNode().getElections(). + getElectionCount()); + + masterInfo.closeEnv(); + masterInfo.openEnv(); + + /* Verify that an election was held to select a new master. */ + assertEquals(1, masterInfo.getRepNode().getElections(). + getElectionCount()); + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/MasterTransferTest.java b/test/com/sleepycat/je/rep/impl/node/MasterTransferTest.java new file mode 100644 index 0000000..5d901c5 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/MasterTransferTest.java @@ -0,0 +1,3129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static com.sleepycat.je.Durability.ReplicaAckPolicy.NONE; +import static com.sleepycat.je.Durability.ReplicaAckPolicy.SIMPLE_MAJORITY; +import static com.sleepycat.je.OperationStatus.NOTFOUND; +import static com.sleepycat.je.OperationStatus.SUCCESS; +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_ABORT; +import static com.sleepycat.je.log.LogEntryType.LOG_TXN_COMMIT; +import static com.sleepycat.je.rep.NoConsistencyRequiredPolicy.NO_CONSISTENCY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.io.StringReader; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.MasterTransferFailureException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.stream.InputWireRecord; +import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.util.DbGroupAdmin; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookAdapter; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Unit tests for the Master Transfer operation. + * + * @see ReplicatedEnvironment#transferMaster + */ +public class MasterTransferTest extends RepTestBase { + /** + * Time duration used to control slow-paced tests, in milliseconds. + *

        + * Several of these tests use a {@code TestHook} in replica Replay in order + * to deliberately slow down the processing, so as to control the relative + * timing of events in racing threads. How much time is enough? For the + * tests to be reliable when run unattended as part of a full test suite, + * the delays must be estimated fairly generously, in order to allow for a + * range of processor speeds and other concurrent system activity. On the + * other hand, if you're debugging just this one test interactively and + * repeatedly, a generous time estimate would make you impatient. + *

        + * Therefore many of the time delay durations used here are computed in + * terms of this "tick", which is defined to be 1 second by default, but + * can be overridden as desired: values on the order of 100-200 msec seem to + * work well for interactive use on a contemporary workstation that is not + * otherwise overloaded with other activity. + *

        + * (Note that some of the time limits used here that are not computed in + * terms of this tick are merely fail-safe limits that should never come + * into play, but are present only to prevent an infinite hang. For + * example, when threads are expected to finish "right away" we might + * {@code join()} them with a 5000 msec limit, even though they should + * usually finish much more quickly. Reducing the limit in these cases + * cannot normally help make the test complete more quickly.) + */ + final static long TICK = + Long.getLong("fasterPace", 1000).longValue(); + + /** All potentially open databases, so we can close them during tearDown */ + private final List databases = new LinkedList<>(); + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + @Override + @After + public void tearDown() + throws Exception { + + Replica.setInitialReplayHook(null); + for (final Database db : databases) { + try { + db.close(); + } catch (Exception e) { + /* Ignore during cleanup */ + } + } + databases.clear(); + super.tearDown(); + } + + /** + * Tests a basic master transfer operation in a group of 3, with the master + * idle at the time of the transfer. Both replicas are named as candidate + * targets for the transfer. + */ + @Test + public void testBasic() throws Exception { + createGroup(); + /* env[0] is guaranteed to be the master. */ + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + int nRecords = 3; // arbitrary + populateDB(master, nRecords); + + /* Setup hooks for state change and syncup. */ + final CountDownLatch becameMaster = new CountDownLatch(1); + for (int i = 1; i < groupSize; i++) { + repEnvInfo[i].getEnv().setStateChangeListener + (new MasterListener(becameMaster)); + } + CountDownLatch syncupWaiter = RepTestUtils.setupWaitForSyncup(master, 1); + + /* Transfer master to any of the replicas. */ + Set replicas = new HashSet<>(); + for (int i = 1; i < groupSize; i++) { + replicas.add(repEnvInfo[i].getEnv().getNodeName()); + } + String newMaster = + master.transferMaster(replicas, 10, TimeUnit.SECONDS); + + /* Expect that one of the replicas became the master. */ + boolean found = false; + for (int i = 1; i < groupSize; i++) { + if (newMaster.equals(repEnvInfo[i].getEnv().getNodeName())) { + found = true; + break; + } + } + assertTrue("node name produced as result not in candidate set", + found); + + /* Expect that the state change event was also received. */ + boolean tookOver = becameMaster.await(10, TimeUnit.SECONDS); + assertTrue("neither replica became master within 10 seconds of " + + "supposed completion of transfer", + tookOver); + + /* + * The old master should sync up with the new master, without a + * recovery. + */ + assertTrue(syncupWaiter.await(10, TimeUnit.SECONDS)); + + /* Master should now be a replica, and able to support reads. */ + assertEquals(master.getState(), + ReplicatedEnvironment.State.REPLICA); + } + + /** + * Tests a master transfer with a transaction in progress. + */ + @Test + public void testInFlightTxn() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + Transaction txn = masterEnv.beginTransaction(null, null); + try { + for (int i = 0; i < 10; i++) { + IntegerBinding.intToEntry(i, key1); + LongBinding.longToEntry(i, value); + db.put(txn, key1, value); + } + + /* Leave the transaction uncommitted. */ + Set replicas = new HashSet<>(); + replicas.add(replicaName); + String result = + masterEnv.transferMaster(replicas, 5, TimeUnit.SECONDS); + assertEquals(replicaName, result); + awaitSettle(master, replica); + + try { + txn.commit(); + fail("expected exception from commit()"); + } catch (IllegalStateException expected) { + } + assertFalse(txn.isValid()); + txn.abort(); + } finally { + safeAbort(txn); + } + db.close(); + + masterEnv = repEnvInfo[1].getEnv(); + db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + IntegerBinding.intToEntry(1, key1); + OperationStatus ret = db.get(null, key1, value, null); + assertEquals(NOTFOUND, ret); + db.close(); + } + + /** Abort a transaction for cleanup, ignoring any exceptions. */ + private static void safeAbort(final Transaction txn) { + try { + txn.abort(); + } catch (Exception e) { + } + } + + /** + * Use test hooks to orchestrate a number of transactions that will be + * inflight when the master transfer happens, and have the application + * issue inserts or commits while the transfer is in play. + * Check afterwards that locks are released appropriately, and the inflight + * transactions have the proper state. + */ + @Test + public void testApplicationActivityDuringConversion() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + + /* + * Populate the db with a committed records 100, 101, 102. We'll read + * lock them so the in-progress txns have both read and write locks. + */ + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + DatabaseEntry val = new DatabaseEntry(); + DatabaseEntry readEntry = new DatabaseEntry(); + + Transaction txn = masterEnv.beginTransaction(null, null); + try { + for (int i = 100; i <=102; i++) { + IntegerBinding.intToEntry(i, val); + db.putNoOverwrite(txn, val, val); + } + txn.commit(); + } finally { + safeAbort(txn); + } + + /* + * Make several in-flight, uncommitted txns which write a record + * and read record 100. The write locks will be on records 1-5. + */ + List testTxns = new ArrayList<>(); + Transaction readLockTxn = null; + try { + int numInFlightTxns = 14; + for (int i = 0; i < numInFlightTxns; i++) { + txn = masterEnv.beginTransaction(null, null); + testTxns.add(txn); + IntegerBinding.intToEntry(i+1, val); + /* Get a write lock on a new value */ + db.put(txn, val, val); + + /* get a read lock on an old value */ + IntegerBinding.intToEntry(100, val); + assertEquals(OperationStatus.SUCCESS, + db.get(txn, val, readEntry, null)); + } + + /* Make an uncommitted txn that has read locks on 101 and 102 */ + readLockTxn = masterEnv.beginTransaction(null, null); + + IntegerBinding.intToEntry(101, val); + assertEquals(OperationStatus.SUCCESS, + db.get(readLockTxn, val, readEntry, null)); + IntegerBinding.intToEntry(102, val); + assertEquals(OperationStatus.SUCCESS, + db.get(readLockTxn, val, readEntry, null)); + + /* + * Generate 12 hooks. + * - also have one txn with no write locks, should still be valid. + */ + CountDownLatch done = new CountDownLatch(12); + Set hooks = new HashSet<>(); + hooks.addAll(setupTxnEndHooks(testTxns.get(0), + testTxns.get(1), + testTxns.get(2), + done)); + + hooks.addAll(setupInsertHooks(testTxns.get(3), + testTxns.get(4), + testTxns.get(5), + db, + done)); + + hooks.addAll(setupUpdateHooks(testTxns.get(6), + testTxns.get(7), + testTxns.get(8), + db, + done)); + + hooks.addAll( + setupRNConvertHooks( + RepInternal.getNonNullRepImpl(masterEnv).getRepNode(), + testTxns.get(9), + testTxns.get(10), + testTxns.get(11), + db, + done)); + + /* issue a transfer. */ + Set replicas = new HashSet<>(); + replicas.add(replicaName); + String result = masterEnv.transferMaster(replicas, 5, + TimeUnit.SECONDS); + assertEquals(replicaName, result); + awaitSettle(master, replica); + + /* Check hooks */ + done.await(); + for (CheckedHook h : hooks) { + assertTrue(h.toString() + " must run", h.ran); + assertTrue(h + " " + h.problem, h.problem == null); + } + + /* The inflight txns should be aborted and have no locks */ + for (Transaction t: testTxns) { + logger.info("checking state on " + t + "/" + t.getState()); + assertFalse(t.isValid()); + assertFalse (t.getState().equals(Transaction.State.COMMITTED)); + // if (t.getState().equals(Transaction.State.COMMITTED)) { + // logger.info("txn " + t.getId() + " is committed"); + // continue; + // } + + Txn internalT = DbInternal.getTxn(t); + /* The read locks should be null or zeroed */ + if (internalT.getReadLockIds() != null) { + assertEquals("txn " + internalT.getId(), 0, + internalT.getReadLockIds().size()); + } + assertEquals("txn " + t.getId(), 0, + DbInternal.getTxn(t).getWriteLockIds().size()); + try { + t.commit(); + fail("expected exception from commit, already closed()"); + } catch (IllegalStateException expected) { + logger.info("Expected ISE for txn " + t.getId()); + } + + t.abort(); + } + db.close(); + + /* The transaction that has read locks should still be good. */ + assertTrue("readLockTxn = " + readLockTxn.getId() + "/" + + readLockTxn.getState(), readLockTxn.isValid()); + assertEquals(2, DbInternal.getTxn(readLockTxn). + getReadLockIds().size()); + + /* + * Now that the transfer has happened, confirm that none of the + * in-flight data exists on any of the environments. + */ + + /* Syncup the group */ + masterEnv = repEnvInfo[1].getEnv(); + Database checkDb = masterEnv.openDatabase(null, TEST_DB_NAME, + dbconfig); + databases.add(checkDb); + allAckWrite(repEnvInfo[1].getEnv(), checkDb, 1000); + checkDb.close(); + + /* Check that committed data is there, uncommitted is gone */ + for (RepEnvInfo info: repEnvInfo) { + ReplicatedEnvironment env = info.getEnv(); + logger.info("reading after transfer for node " + + env.getNodeName()); + DatabaseEntry readKey = new DatabaseEntry(); + DatabaseEntry readVal = new DatabaseEntry(); + Database readDb = env.openDatabase(null, TEST_DB_NAME, + dbconfig); + databases.add(readDb); + for (int i = 100; i <= 102; i++) { + IntegerBinding.intToEntry(i, readKey); + txn = env.beginTransaction(null, null); + try { + assertEquals(OperationStatus.SUCCESS, + readDb.get(txn, readKey, readVal, null)); + txn.commit(); + } finally { + safeAbort(txn); + } + } + for (int i = 1; i <= 5; i++) { + IntegerBinding.intToEntry(i, readKey); + txn = env.beginTransaction(null, null); + try { + assertEquals(OperationStatus.NOTFOUND, + readDb.get(txn, readKey, readVal, null)); + txn.commit(); + } finally { + safeAbort(txn); + } + } + readDb.close(); + } + + readLockTxn.commit(); + } finally { + for (Transaction t : testTxns) { + safeAbort(t); + } + safeAbort(readLockTxn); + } + } + + private Set setupTxnEndHooks(Transaction txn1, + Transaction txn2, + Transaction txn3, + CountDownLatch done) { + + Set hooks = new HashSet<>(); + /* Commit while frozen in MasterTxn.convertToReplayTxnAndClose */ + final TxnEndHook whileFrozen = + new TxnEndHook(0, txn1, UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, + "hook: commit while frozen"); + + ((MasterTxn)DbInternal.getTxn(txn1)).setConvertHook(whileFrozen); + hooks.add(whileFrozen); + + /* + * Commit after the freeze in MasterTxn.convertToReplayTxnAndClose, + * before the txn is closed. + * Even though the txn has no write locks left, it still must abort, + * because its writes are not committed. + */ + final TxnEndHook afterUnfrozen = + new TxnEndHook(1, txn2, UnknownMasterException.class, + Transaction.State.ABORTED, + done, "hook: commit after unfreeze"); + ((MasterTxn)DbInternal.getTxn(txn2)).setConvertHook(afterUnfrozen); + hooks.add(afterUnfrozen); + + /* Commit after close in MasterTxn.convertToReplayTxnAndClose */ + final TxnEndHook afterClose = + new TxnEndHook(2, txn3, IllegalStateException.class, + Transaction.State.ABORTED, done, + "hook: commit after close"); + ((MasterTxn)DbInternal.getTxn(txn3)).setConvertHook(afterClose); + hooks.add(afterClose); + return hooks; + } + + private Set setupInsertHooks(Transaction txn4, + Transaction txn5, + Transaction txn6, + Database db, + CountDownLatch done) { + + Set hooks = new HashSet<>(); + /* Insert while frozen in MasterTxn.convertToReplayTxnAndClose */ + final AdditionalWriteHook whileFrozen = + new AdditionalWriteHook(0, db, txn4, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 20, + "hook: insert while frozen"); + + ((MasterTxn)DbInternal.getTxn(txn4)).setConvertHook(whileFrozen); + hooks.add(whileFrozen); + + /* Insert after the freeze in MasterTxn.convertToReplayTxnAndClose. */ + final AdditionalWriteHook afterUnfreeze = + new AdditionalWriteHook(0, db, txn5, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 21, + "hook: insert after unfreeze"); + ((MasterTxn)DbInternal.getTxn(txn5)).setConvertHook(afterUnfreeze); + hooks.add(afterUnfreeze); + + /* Insert after close in MasterTxn.convertToReplayTxnAndClose */ + final AdditionalWriteHook afterClose = + new AdditionalWriteHook(0, db, txn6, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 22, + "hook: insert after close"); + ((MasterTxn)DbInternal.getTxn(txn6)).setConvertHook(afterClose); + hooks.add(afterClose); + + + return hooks; + } + + private Set setupUpdateHooks(Transaction txn7, + Transaction txn8, + Transaction txn9, + Database db, + CountDownLatch done) { + + Set hooks = new HashSet<>(); + /* Insert before the freeze in MasterTxn.convertToReplayTxnAndClose */ + AdditionalWriteHook whileFrozen = + new AdditionalWriteHook(0, db, txn7, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 7, + "hook: update while frozen"); + + ((MasterTxn)DbInternal.getTxn(txn7)).setConvertHook(whileFrozen); + hooks.add(whileFrozen); + + /* Insert after the freeze in MasterTxn.convertToReplayTxnAndClose. */ + final AdditionalWriteHook afterUnfreeze = + new AdditionalWriteHook(0, db, txn8, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 8, + "hook: update after Unfreeze"); + ((MasterTxn)DbInternal.getTxn(txn8)).setConvertHook(afterUnfreeze); + hooks.add(afterUnfreeze); + + /* Insert after close in MasterTxn.convertToReplayTxnAndClose */ + final AdditionalWriteHook afterClose = + new AdditionalWriteHook(0, db, txn9, + UnknownMasterException.class, + Transaction.State.MUST_ABORT, done, 9, + "hook: update after close"); + ((MasterTxn)DbInternal.getTxn(txn9)).setConvertHook(afterClose); + hooks.add(afterClose); + + return hooks; + } + + /* Setup hooks that execute just after the RepNode converts to master */ + private Set setupRNConvertHooks(RepNode repNode, + Transaction txn10, + Transaction txn11, + Transaction txn12, + Database db, + CountDownLatch done) { + + Set hooks = new HashSet<>(); + /* Try to do a write just after the node becomes unknown */ + AdditionalWriteHook update = + new AdditionalWriteHook(0, db, txn10, + UnknownMasterException.class, + Transaction.State.OPEN, done, 10, + "hook: update after unknown"); + repNode.setConvertHook(update); + hooks.add(update); + + /* Commit after the node becomes unknown. */ + final TxnEndHook commitH = + new TxnEndHook(0, txn11, null, Transaction.State.ABORTED, + done, "hook: commit after unknown"); + repNode.setConvertHook(commitH); + hooks.add(commitH); + + /* Try to do an insert just after the node becomes unknown */ + AdditionalWriteHook insert = + new AdditionalWriteHook(0, db, txn12, + UnknownMasterException.class, + Transaction.State.OPEN, done, 112, + "hook: insert after unknown"); + repNode.setConvertHook(insert); + hooks.add(insert); + + return hooks; + } + + @Test + public void testLateJoiner() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + + final RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + Set replicas = new HashSet<>(); + replicas.add(replicaName); + try { + masterEnv.transferMaster(replicas, 5, TimeUnit.SECONDS); + fail("transfer to non-running node should have failed"); + } catch (MasterTransferFailureException e) { + // expected + } + final Throwable[] th = new Throwable[1]; + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + Thread.sleep(5000); + restartNodes(replica); + } catch (Throwable xcp) { + th[0] = xcp; + } + } + }); + t.start(); + String result = + masterEnv.transferMaster(replicas, 10, TimeUnit.SECONDS); + assertEquals(replicaName, result); + + t.join(60000); + assertFalse(t.isAlive()); + if (th[0] != null) { + throw new Exception("node-starter thread threw exception", th[0]); + } + + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + } + + @Test + public void testStupidCodingMistakes() throws Exception { + createGroup(); + ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + try { + master.transferMaster(null, 1, TimeUnit.SECONDS); + fail("null 'replicas' argument should be rejected"); + } catch (IllegalArgumentException e) { + // expected + } + Set replicas = new HashSet<>(); + try { + master.transferMaster(replicas, 1, TimeUnit.SECONDS); + fail("empty 'replicas' set should be rejected"); + } catch (IllegalArgumentException e) { + // expected + } + replicas.add(repEnvInfo[1].getEnv().getNodeName()); + try { + master.transferMaster(replicas, 1, null); + fail("null TimeUnit should be rejected"); + } catch (IllegalArgumentException e) { + // expected + } + for (int t = 0; t >= -2; t -= 2) { + try { + master.transferMaster(replicas, t, TimeUnit.SECONDS); + fail("bogus timeout value " + t + " should be rejected"); + } catch (IllegalArgumentException e) { + // expected; + } + } + replicas.add("venus"); + try { + master.transferMaster(replicas, 1, TimeUnit.SECONDS); + fail("bogus replica name should be rejected"); + } catch (IllegalArgumentException e) { + // expected + } + + /* Transfer to monitor */ + final Monitor monitor = createMonitor(100, "monitor100"); + try { + monitor.register(); + replicas = Collections.singleton("monitor100"); + try { + master.transferMaster(replicas, 1, TimeUnit.SECONDS); + fail("Operation should fail if invoked on a monitor"); + } catch (IllegalArgumentException e) { + /* Expected */ + } + } finally { + monitor.shutdown(); + } + + /* Try to transfer to secondary */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo secondaryInfo = repEnvInfo[repEnvInfo.length - 1]; + secondaryInfo.getRepConfig().setNodeType(NodeType.SECONDARY); + secondaryInfo.openEnv(); + replicas = Collections.singleton(secondaryInfo.getEnv().getNodeName()); + try { + master.transferMaster(replicas, 1, TimeUnit.SECONDS); + fail("Transfer to SECONDARY should throw" + + " IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Transfer to SECONDARY: " + e); + } + + replicas = new HashSet<>(); + replicas.add(repEnvInfo[2].getEnv().getNodeName()); + try { + repEnvInfo[1].getEnv().transferMaster(replicas, + 1, TimeUnit.SECONDS); + fail("operation should be rejected if invoked on replica"); + } catch (IllegalStateException e) { + // expected + } + + /* + * If the master itself is in the list of candidate targets, the + * operation is defined to complete immediately, successfully. This + * doesn't really quite qualify as a "stupid coding mistake". But we + * include it here on the grounds that it's similarly trivial. + */ + replicas = new HashSet<>(); + for (int i = 0; i < groupSize; i++) { + replicas.add(repEnvInfo[i].getEnv().getNodeName()); + } + String result = master.transferMaster(replicas, 1, + TimeUnit.MILLISECONDS); + assertEquals(master.getNodeName(), result); + + /* + * Existing master env handle should still remain active. Check that + * these operations do not throw exceptions. + */ + assertTrue(master.isValid()); + Thread.sleep(2000); + Transaction txn = master.beginTransaction(null, null); + txn.abort(); + } + + /** + * Tests recovery from thread interrupt. Users are not supposed to + * interrupt their threads when they're in a call to JE, so this should + * never happen. Still, we should do something reasonable. + */ + @Test + public void testInterruption() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + final String replicaName = replica.getEnv().getNodeName(); + final ReplicatedEnvironment masterEnv = master.getEnv(); + + replica.closeEnv(); + final int pause = 10; + final int timeout = (int)TICK * pause * 2; + Transferer transferer = + new Transferer(masterEnv, replicaName, + timeout, TimeUnit.MILLISECONDS); + Thread thread = new Thread(transferer); + thread.start(); + Thread.sleep(TICK * pause); + thread.interrupt(); + thread.join(5000); + assertFalse(thread.isAlive()); + assertNull(transferer.getUnexpectedException()); + assertFalse(masterEnv.isValid()); + + master.closeEnv(); + restartNodes(master, replica); + } + + /** + * Tests interruption of a thread trying a commit, blocked in phase 2. + * There was previously a bug where an interrupt would cause the thread to + * be allowed to proceed with the commit. + */ + @Test + public void testTxnThreadInterrupt() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + final RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + final Database db = + masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + replica.closeEnv(); + + int backlog = 20; + makeBacklog(masterEnv, db, backlog); + repEnvInfo[2].closeEnv(); + + final Set > expectedException = + makeSet(UnknownMasterException.class, + ThreadInterruptedException.class); + final int nThreads = 5; + final TxnGenerator generators[] = new TxnGenerator[nThreads]; + final Thread threads[] = new Thread[nThreads]; + int extraStartKey = (nThreads + 1) * 1000; + final TxnGenerator generator = + new TxnGenerator(db, extraStartKey, + null, SIMPLE_MAJORITY, expectedException); + final Thread thread = new Thread(generator); + + /* + * Halfway through the backlog, start the 5 txn generator threads. + */ + CyclicBarrier b1 = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + for (int i = 0; i < nThreads; i++) { + generators[i] = + new TxnGenerator(db, (i + 1) * 1000, + null, SIMPLE_MAJORITY, + expectedException); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + try { + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + }); + + /* + * Once we've acked a couple of new txns (so that we know we're into + * phase 2, and so commits should be blocked), start an extra txn + * generator. + */ + CyclicBarrier b2 = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + /* Wait a bit to make sure node 2's acks get + * processed. */ + Thread.sleep(2 * TICK); + thread.start(); + Thread.sleep(4 * TICK); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + CountDownLatch latch = new CountDownLatch(1); + BarrierPacer pacer = new BarrierPacer(b1, b2, latch); + Replica.setInitialReplayHook(pacer); + replica.openEnv(NO_CONSISTENCY); + + Transferer mt = new Transferer(masterEnv, 60, replicaName); + Thread transferThread = new Thread(mt); + transferThread.start(); + Thread.sleep(5 * TICK); + + b1.await(); + b2.await(); + + /* + * Interrupt the txn generating thread; then finish the transfer and + * make sure the thread was not allowed to proceed to a successful + * commit. + */ + logger.info("interrupt the extra thread"); + thread.interrupt(); + logger.info("allow the xfer to complete"); + latch.countDown(); + + transferThread.join(10000); + assertFalse(transferThread.isAlive()); + assertFalse(mt.didItFail()); + assertNull(mt.getUnexpectedException()); + assertEquals(replicaName, mt.getResult()); + RepTestUtils.awaitCondition(new Callable() { + @Override + public Boolean call() { + return !master.getEnv().isValid(); + }}, 10000); + try { + db.close(); + } catch (DatabaseException de) { + // ignored + } + + for (int i = 0; i < nThreads; i++) { + threads[i].join(5000); + assertFalse(threads[i].isAlive()); + assertNull(generators[i].getUnexpectedException()); + } + + /* + * The master was invalidated, the environment must be closed and + * reopened. + */ + master.closeEnv(); + master.openEnv(); + + thread.join(5000); + assertFalse(thread.isAlive()); + assertEquals(0, generator.getCommitCount()); + Throwable xcp = generator.getExpectedException(); + assertNotNull(xcp); + assertTrue(xcp instanceof ThreadInterruptedException); + + Database db2 = + replica.getEnv().openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db2); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + IntegerBinding.intToEntry(extraStartKey, key1); + OperationStatus ret = db2.get(null, key1, value, null); + assertEquals(NOTFOUND, ret); + db2.close(); + } + + /** + * Closes the master env handle while a locally invoked Master Transfer + * operation is in progress, and transaction threads are blocked in + * commit. Applications are not supposed to close the env handle while + * other operations are in progress, so this shouldn't happen, so it's OK + * if the result is a bit ugly. But even in this case, it should + * eventually be possible to get resources cleaned up enough so as to be + * able to open a fresh env handle and start over again. + */ + @Test + public void testEnvCloseLocal() throws Exception { + RepEnvInfo master = repEnvInfo[0]; + /* Disable ugly System.out warning messages. */ + master.getEnvConfig().setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); + + ensureAssertions(); + long ackTimeout = 20 * TICK; // 20 >= sum of sleep durations below + bePatient(ackTimeout); + createGroup(); + ReplicatedEnvironment masterEnv = master.getEnv(); + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + final Database db = + masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + replica.closeEnv(); + + int backlog = 20; + makeBacklog(masterEnv, db, backlog); + repEnvInfo[2].closeEnv(); + + final Set > expectedException = + new HashSet< >(); + expectedException.add(ThreadInterruptedException.class); + expectedException.add(NullPointerException.class); + expectedException.add(InsufficientReplicasException.class); + expectedException.add(InsufficientAcksException.class); + expectedException.add(UnknownMasterException.class); + expectedException.add(EnvironmentFailureException.class); + final int nThreads = 5; + final TxnGenerator generators[] = new TxnGenerator[nThreads]; + final Thread threads[] = new Thread[nThreads]; + int extraStartKey = (nThreads + 1) * 1000; + final TxnGenerator generator = + new TxnGenerator(db, extraStartKey, + null, SIMPLE_MAJORITY, expectedException); + final Thread thread = new Thread(generator); + + /* + * Halfway through the backlog, start the 5 txn generator threads. + */ + CyclicBarrier b1 = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + for (int i = 0; i < nThreads; i++) { + generators[i] = + new TxnGenerator(db, (i + 1) * 1000, + null, SIMPLE_MAJORITY, + expectedException); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + try { + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + }); + + /* + * Once we've acked a couple of new txns (so that we know we're into + * phase 2, and so commits should be blocked), start an extra txn + * generator. + */ + CyclicBarrier b2 = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + /* Wait a bit to make sure node 2's acks get + * processed. */ + Thread.sleep(2 * TICK); + thread.start(); + Thread.sleep(4 * TICK); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + CountDownLatch latch = new CountDownLatch(1); + BarrierPacer pacer = new BarrierPacer(b1, b2, latch); + Replica.setInitialReplayHook(pacer); + replica.openEnv(NO_CONSISTENCY); + + Transferer mt = new Transferer(masterEnv, 60, replicaName); + Thread transferThread = new Thread(mt); + transferThread.start(); + Thread.sleep(5 * TICK); + + b1.await(); + b2.await(); + + try { + master.closeEnv(); + } catch (DatabaseException de) { + // expected + } + thread.join(10000); + assertFalse(thread.isAlive()); + assertEquals(0, generator.getCommitCount()); + + transferThread.join(10000); + assertFalse(transferThread.isAlive()); + assertTrue(mt.didItFail()); + assertNull(mt.getUnexpectedException()); + + latch.countDown(); + for (Thread t : threads) { + t.join(60000); + assertFalse(t.isAlive()); + } + + /* As usual, clean up, to placate tearDown(). */ + restartNodes(master, repEnvInfo[2]); + } + + @Test + public void testOverlappingAttempts() throws Exception { + createGroup(); + + // shut down Node 2 + // start a thread to transfer to node 2, with 20 second timeout? + // start another thread to sleep a bit, and then try a transfer + // it should fail, verify that + // once that happens, start up Node 2 + // verify that the original transfer completed successfully + // + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + Transferer xfr1 = new Transferer(0, master.getEnv(), + replicaName, 30, false); + Thread t1 = new Thread(xfr1); + t1.start(); + + /* + * This allows 10 seconds to make sure first thread has already started + * the operation. + */ + Transferer xfr2 = new Transferer(10, master.getEnv(), + replicaName, 180, false); + Thread t2 = new Thread(xfr2); + t2.start(); + + /* + * Wait for the second thread, which should fail as soon as it tries + * the operation. + */ + t2.join(200 * 1000); + assertFalse(t2.isAlive()); + assertTrue(xfr2.didItFail()); + assertNull(xfr2.getUnexpectedException()); + + /* + * Now start the target replica, which should cause the original + * transfer to complete right away. + */ + replica.openEnv(); + t1.join(5 * 1000); + assertFalse(t1.isAlive()); + assertFalse(xfr1.didItFail()); + assertNull(xfr1.getUnexpectedException()); + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + + // part 2: + // now the master is Node 2; shut down Node 3 + // start a thread to transfer to node 3 ("futile") + // start another thread to sleep a bit, and then try transfer with + // force, but to node 1! + // verify that first thread got an exception + // verify that second thread completed successfully, and that the + // master indeed is now at Node 1. + // (maybe we can do the second "forcing" attempt in our main thread + // here) + master = repEnvInfo[1]; + RepEnvInfo futile = repEnvInfo[2]; + replica = repEnvInfo[0]; + futile.closeEnv(); + + xfr1 = new Transferer(0, master.getEnv(), + futile.getRepConfig().getNodeName(), + 180, false); + t1 = new Thread(xfr1); + t1.start(); + xfr2 = new Transferer(10, master.getEnv(), + replica.getEnv().getNodeName(), 10, true); + t2 = new Thread(xfr2); + t2.start(); + t2.join(30 * 1000); + assertFalse(t2.isAlive()); + assertFalse(xfr2.didItFail()); + assertNull(xfr2.getUnexpectedException()); + + t1.join(5 * 1000); + assertFalse(t1.isAlive()); + assertTrue(xfr1.didItFail()); + assertNull(xfr1.getUnexpectedException()); + awaitSettle(master, replica); + + /* Clean everything up as usual, to satisfy tearDown(). */ + master.closeEnv(); + master.openEnv(); + futile.openEnv(); + } + + /** + * Tests a scenario in which completion of both phase 1 and phase 2 must be + * triggered by acks to fresh txns from the replica. In order to be sure + * this is happening, we use a {@code TestHook} to slow down commit + * processing at the replica to a pace of one per second. + */ + @Test + public void testConcurrentTxns() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + int nRecords = 5; // arbitrary + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + populateDB(masterEnv, nRecords); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + + /* This replica will be the target of the transfer. */ + RepEnvInfo replica = repEnvInfo[1]; + replica.closeEnv(); + + /* The replica will have to "catch up" with 20 transactions. */ + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + long arbitraryData = 1732; + LongBinding.longToEntry(arbitraryData, value); + int backlog = 20; + for (int i = 0; i < backlog; i++) { + IntegerBinding.intToEntry(i, key1); + db.put(null, key1, value); + } + + /* + * Shut down other replica, so that master will be forced to await + * acks from our target replica. + */ + repEnvInfo[2].closeEnv(); + + /* + * Create and install a test hook that throttles the rate of commit/ack + * processing at the replica. + */ + int maxLag = backlog / 2; + Semaphore throttle = new Semaphore(maxLag); + throttle.drainPermits(); + int initialTxns = backlog - maxLag; + AckPacer pacer = new AckPacer(throttle, initialTxns, maxLag); + Replica.setInitialReplayHook(pacer); + + int nThreads = 5; + Thread threads[] = new Thread[nThreads]; + ThrottlingGenerator generators[] = new ThrottlingGenerator[nThreads]; + for (int i = 0; i < nThreads; i++) { + generators[i] = new ThrottlingGenerator(db, throttle, + 10000 * (i + 1)); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + logger.info("restart the replica, with a Replay hook installed"); + replica.openEnv(); + + /* Reset, so as not to affect future env restarts. */ + Replica.setInitialReplayHook(null); + + logger.info("start Master Transfer with 60-second timeout " + + "(expected duration ~ 30 sec)"); + Set target = new HashSet<>(); + String replicaName = replica.getEnv().getNodeName(); + target.add(replicaName); + + String result = masterEnv.transferMaster(target, 60, TimeUnit.SECONDS); + assertEquals(replicaName, result); + + logger.info("transfer succeeded, now waiting (max 5 sec) " + + "for old master to become replica"); + + /* 5 seconds should be plenty of time for everything to settle down. */ + /* The replica will have become the new master. */ + awaitSettle(master, replica); + + logger.info("Master and replica have settled"); + + /* + * All the threads that are mimicking application transactions should + * be finished. If we're unlucky, a thread could be waiting in a + * post-commit hook for insufficient acks, having miraculously dodged + * the commit block. Since the ack timeout is rather long, we might + * have to wait quite some time for this thread to finish. Fortunately + * that doesn't happen often. + */ + for (Thread t : threads) { + t.join(65000); + assertFalse(t.isAlive()); + } + + db.close(); + restartNodes(repEnvInfo[2]); + + /* + * Examine each transaction generator to see that it terminated as + * expected, and to find out how much it succeeded in writing. Verify + * that the expected records indeed survive at the new master. + */ + key1 = new DatabaseEntry(); + value = new DatabaseEntry(); + ReplicatedEnvironment newMaster = replica.getEnv(); + db = newMaster.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + for (int i = 0; i < nThreads; i++) { + Throwable unexpected = generators[i].getUnexpectedException(); + assertNull("" + unexpected, unexpected); + int keyNumber = generators[i].getStartKey(); + int count = generators[i].getCommitCount(); + while (count-- > 0) { + IntegerBinding.intToEntry(keyNumber++, key1); + OperationStatus ret = db.get(null, key1, value, null); + assertEquals(SUCCESS, ret); + } + /* Make sure no extra record snuck in there. */ + IntegerBinding.intToEntry(keyNumber, key1); + OperationStatus ret = db.get(null, key1, value, null); + assertEquals(NOTFOUND, ret); + } + db.close(); + } + + /* + * Assertions must be enabled for many of these tests, because the test + * hook is executed as a side effect in an assert statement. + */ + private void ensureAssertions() throws Exception { + boolean assertionsEnabled = false; + assert (assertionsEnabled = true); + if (!assertionsEnabled) { + throw new Exception("this test requires assertions be enabled"); + } + } + + /** + * Sets a very long ack timeout, so that our pending commits remain in + * place while the Master Transfer operation waits, even when we've + * deliberately slowed things down so as to control the relative timing of + * events. + */ + private void bePatient() { + bePatient(60000); + } + + private void bePatient(long timeout) { + for (int i = 0; i < groupSize; i++) { + repEnvInfo[i].getRepConfig(). + setReplicaAckTimeout(timeout, TimeUnit.MILLISECONDS); + } + } + + /** + * Tests transfer operations when the last stuff in the log is txn aborts. + */ + @Test + public void testLogEndAbort() throws Exception { + ensureAssertions(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + + RepEnvInfo replica = repEnvInfo[1]; + replica.closeEnv(); + + /* Generate a backlog of txns while the replica is down. */ + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + long arbitraryData = 1732; + LongBinding.longToEntry(arbitraryData, value); + int n = 20; + for (int i = 0; i < n; i++) { + IntegerBinding.intToEntry(i, key1); + db.put(null, key1, value); + } + for (int i = n; i < 2 * n; i++) { + Transaction t = masterEnv.beginTransaction(null, null); + try { + IntegerBinding.intToEntry(i, key1); + db.put(t, key1, value); + t.abort(); + } finally { + safeAbort(t); + } + } + db.close(); + + restartNodes(replica); + + /* + * First try the simple case of a transfer when the replica has already + * caught up. + */ + Set target = new HashSet<>(); + String replicaName = replica.getEnv().getNodeName(); + target.add(replicaName); + String result = masterEnv.transferMaster(target, 10, TimeUnit.SECONDS); + + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + + /* + * Now try the case where the replica is catching up at the time we + * start the transfer operation. To do so, install a test hook that + * will force the replica to take a long time to process each commit or + * abort. + */ + replica = repEnvInfo[2]; + replica.closeEnv(); + /* Do some more txns, to create a backlog again. */ + master = repEnvInfo[1]; + masterEnv = master.getEnv(); + db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + key1 = new DatabaseEntry(); + value = new DatabaseEntry(); + arbitraryData = 314159; + LongBinding.longToEntry(arbitraryData, value); + n = 20; + for (int i = 0; i < n; i++) { + IntegerBinding.intToEntry(i, key1); + db.put(null, key1, value); + } + for (int i = n; i < 2 * n; i++) { + Transaction t = masterEnv.beginTransaction(null, null); + try { + IntegerBinding.intToEntry(i, key1); + db.put(t, key1, value); + t.abort(); + } finally { + safeAbort(t); + } + } + db.close(); + + logger.info("set replay hook"); + Replica.setInitialReplayHook + (new TestHookAdapter() { + @Override + public void doHook(Message m) { + if (m.getOp() != Protocol.SHUTDOWN_REQUEST && + m.getOp() != Protocol.HEARTBEAT) { + Protocol.Entry e = (Protocol.Entry) m; + final InputWireRecord record = e.getWireRecord(); + final byte entryType = record.getEntryType(); + if (LOG_TXN_ABORT.equalsType(entryType)) { + try { + Thread.sleep(TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + } + } + }); + replica.openEnv(); + Replica.setInitialReplayHook(null); + + logger.info("start Master Transfer with 60-second timeout " + + "(expected duration ~ 20 sec)"); + target = new HashSet<>(); + replicaName = replica.getEnv().getNodeName(); + target.add(replicaName); + + result = masterEnv.transferMaster(target, 60, TimeUnit.SECONDS); + assertEquals(replicaName, result); + awaitSettle(master, replica); + + master.closeEnv(); + master.openEnv(); + } + + /** + * Wait for the former master to become a replica, and the new master to + * get into the "MASTER" state. + */ + public static void awaitSettle(final RepEnvInfo oldMaster, // former master + final RepEnvInfo oldReplica) // upcoming master + throws Exception { + + try { + RepTestUtils.awaitCondition(new Callable() { + @Override + public Boolean call() { + return + State.MASTER.equals(oldReplica.getEnv().getState()) && + State.REPLICA.equals(oldMaster.getEnv().getState()); + } + }, 10000); + } catch (Throwable e) { + throw new RuntimeException( + "Failed waiting for mastership change" + + " oldReplica.state=" + oldReplica.getEnv().getState() + + " oldMaster.state=" + oldMaster.getEnv().getState(), + e); + } + } + + /** + * Tests a second Master Transfer attempt, after a previous attempt has + * timed out. + */ + @Test + public void testAnotherTryAfterFailure() throws Exception { + createGroup(); + + RepEnvInfo replica = repEnvInfo[2]; + String replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + makeTxns(masterEnv); + + Transferer xfr = new Transferer(masterEnv, 10, replicaName); + Thread t = new Thread(xfr); + t.start(); + makeTxns(masterEnv); + t.join(20 * 1000); + assertFalse(t.isAlive()); + assertTrue(xfr.didItFail()); + assertNull(xfr.getUnexpectedException()); + + replica.openEnv(); + makeTxns(masterEnv); + + replica = repEnvInfo[1]; + replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + + xfr = new Transferer(10, masterEnv, replicaName, 20, false); + t = new Thread(xfr); + t.start(); + makeTxns(masterEnv); + replica.openEnv(); + t.join(30 * 1000); + assertFalse(t.isAlive()); + assertFalse(xfr.didItFail()); + assertNull(xfr.getUnexpectedException()); + + master.closeEnv(); + master.openEnv(); + } + + private void makeTxns(Database db, int n) throws Exception { + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + for (int i = 0; i < n; i++) { + IntegerBinding.intToEntry(i, key1); + LongBinding.longToEntry(i, value); + db.put(null, key1, value); + } + } + + private void makeTxns(ReplicatedEnvironment env) throws Exception { + int n = 5; // arbitrary + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + try { + makeTxns(db, n); + } finally { + db.close(); + } + } + + /** + * Tests a scenario in which a Master Transfer operation times out while in + * phase 2, freeing previously blocked transactions to complete + * successfully, and allowing future transactions to execute freely. + */ + @Test + public void testPhase2Timeout() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + + // create a backlog, with node 2 down + final int backlog = 20; + makeTxns(db, backlog); + + logger.info("set replay hook"); + final CountDownLatch finishLatch = new CountDownLatch(1); + final CountDownLatch generatorLatch = new CountDownLatch(1); + + // start 5 txn generator threads. Need to set large ack timeout + // Need to add another latch, that the pacer hook trips when it first + // runs, to let the generator threads know it's OK to start running. + // (Or maybe it's better to do like the other test, and trip that one + // half way through the backlog). + + final int nThreads = 5; + TxnGenerator generators[] = new TxnGenerator[nThreads]; + Thread threads[] = new Thread[nThreads]; + for (int i = 0; i < nThreads; i++) { + generators[i] = new TxnGenerator(db, 1000 * (i + 1), + generatorLatch); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + + // The hook paces commits until it sees one needing an ack (which means + // we've made it past the backlog. It then acks only 2 txns (without + // delay), and then sleeps indefinitely (until we deliberately release + // it). + Replica.setInitialReplayHook + (new TwoAckPacer(backlog, generatorLatch) { + @Override + protected void tripOnSecondAck() throws InterruptedException { + finishLatch.await(); + } + }); + replica.openEnv(); + Replica.setInitialReplayHook(null); + + // start the master transfer operation, with timeout + Set replicas = new HashSet<>(); + replicas.add(replicaName); + try { + int timeoutSecs = backlog + backlog/2; + int timeout = (int)(timeoutSecs * TICK); + masterEnv.transferMaster(replicas, timeout, TimeUnit.MILLISECONDS); + fail("expected timeout"); + } catch (MasterTransferFailureException e) { + // expected + } + finishLatch.countDown(); + + // join generator threads and make sure they've been able to generate + // their txns successfully. + for (Thread t : threads) { + t.join(10000); + assertFalse(t.isAlive()); + } + for (TxnGenerator g : generators) { + assertNull(g.getUnexpectedException()); + } + db.close(); + + // make sure new txns can work too + // + makeTxns(masterEnv); + + // do a transfer to node 3 + replica = repEnvInfo[2]; + replicaName = replica.getEnv().getNodeName(); + replicas.clear(); + replicas.add(replicaName); + String result = + masterEnv.transferMaster(replicas, 10, TimeUnit.SECONDS); + assertEquals(replicaName, result); + awaitSettle(master, replica); + + master.closeEnv(); + master.openEnv(); + } + + /** + * Tests the scenario in which one of the candidate replicas establishes + * phase 2, but then a different replica becomes the winner. + */ + @Test + public void testFeederRace() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + final RepEnvInfo node2 = repEnvInfo[1]; + final RepEnvInfo node3 = repEnvInfo[2]; + final String node2Name = node2.getEnv().getNodeName(); + final String node3Name = node3.getEnv().getNodeName(); + node2.closeEnv(); + node3.closeEnv(); + + final int backlog = 20; + makeBacklog(masterEnv, db, backlog); + + /* Restart node2 with a replay-pacing test hook. */ + final CountDownLatch finishLatch = new CountDownLatch(1); + final CountDownLatch generatorLatch = new CountDownLatch(1); + final CountDownLatch node3Latch = new CountDownLatch(1); + + // start 5 txn generator threads + Set > allowedExceptions = + makeSet(UnknownMasterException.class, + InsufficientAcksException.class); + final int nThreads = 5; + TxnGenerator generators[] = new TxnGenerator[nThreads]; + Thread threads[] = new Thread[nThreads]; + for (int i = 0; i < nThreads; i++) { + generators[i] = new TxnGenerator(db, 1000 * (i + 1), generatorLatch, + SIMPLE_MAJORITY, + allowedExceptions); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + + Replica.setInitialReplayHook + (new TwoAckPacer(backlog, generatorLatch) { + @Override + protected void tripOnSecondAck() + throws InterruptedException { + + logger.info("long sleep after 2nd ack"); + Thread.sleep(5 * TICK); + logger.info("will start node3"); + node3Latch.countDown(); + finishLatch.await(); + } + }); + logger.info("restart node2 with replay hook in place"); + node2.openEnv(); + Replica.setInitialReplayHook(null); + + class NodeStarter implements Runnable { + private Throwable exception; + @Override + public void run() { + try { + node3Latch.await(); + restartNodes(node3); + } catch (Throwable t) { + t.printStackTrace(); + exception = t; + } + } + + Throwable getUnexpectedException() { + return exception; + } + } + + // start a thread to start node3 + NodeStarter nodeStarter = new NodeStarter(); + Thread nodeStarterThread = new Thread(nodeStarter); + nodeStarterThread.start(); + + // do a master transfer targeting node2 and node3 + Set replicas = new HashSet<>(); + replicas.add(node2Name); + replicas.add(node3Name); + + /* + * Account for the time the pacer (q.v.) will introduce, plus a little + * extra just in case. + */ + int timeoutSecs = backlog + (2 * 5); + int timeout = (int)(timeoutSecs * TICK); + int extra = 5000; + timeout += extra; + String result = masterEnv.transferMaster(replicas, + timeout, + TimeUnit.MILLISECONDS); + + /* + * Most of the work of this node-starter thread must have already been + * done, if the transfer operation has completed. But make sure every + * last bit has completed, so that node 3's repEnvInfo env handle + * reference is set before proceeding (since we pass it to + * awaitSettle()). And we may as well finally now allow node 2 to + * finish up too, to avoid leaving it in the weird, artificial blocked + * state. + */ + nodeStarterThread.join(5000); + assertFalse(nodeStarterThread.isAlive()); + assertNull(nodeStarter.getUnexpectedException()); + finishLatch.countDown(); + + assertEquals(node3Name, result); + awaitSettle(master, node3); + + // join threads and check for unexpected errors + for (Thread t : threads) { + t.join(5000); + assertFalse(t.isAlive()); + } + + for (TxnGenerator g : generators) { + assertNull("exception=" + g.getUnexpectedException(), + g.getUnexpectedException()); + } + + db.close(); + master.closeEnv(); + master.openEnv(); + } + + private void makeBacklog(Environment env, Database db, int n) + throws Exception { + + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE)); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + for (int i = 0; i < n; i++) { + Transaction txn = env.beginTransaction(null, tc); + try { + IntegerBinding.intToEntry(i, key1); + LongBinding.longToEntry(i, value); + db.put(txn, key1, value); + txn.commit(); + } finally { + safeAbort(txn); + } + } + } + + /** + * Tests the scenario where a Feeder dies after having asserted the end of + * phase 1. Case #1: another Feeder has also "caught up" by the time the + * first Feeder dies. + */ + @Test + public void testFeederDeath1() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + final Database db = + masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + + final RepEnvInfo node2 = repEnvInfo[1]; + String node2Name = node2.getEnv().getNodeName(); + RepEnvInfo node3 = repEnvInfo[2]; + String node3Name = node3.getEnv().getNodeName(); + node2.closeEnv(); + node3.closeEnv(); + + int backlog = 20; + makeBacklog(masterEnv, db, backlog); + + final int nThreads = 5; + final TxnGenerator generators[] = new TxnGenerator[nThreads]; + final Thread threads[] = new Thread[nThreads]; + int extraStartKey = (nThreads + 1) * 1000; + final TxnGenerator generator = + new TxnGenerator(db, extraStartKey, SIMPLE_MAJORITY); + final Thread thread = new Thread(generator); + + // get everything ready before we actually start stuff. ... + + /* + * We'll first run node2, stopping at two rendezvous points: b2a and + * b2b. In the first rendezvous (when node 2 has gotten halfway + * through the 20-txn backlog), we'll start the 5 txn generator + * threads, and wait a moment for them to get going: + */ + CyclicBarrier b2a = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + for (int i = 0; i < nThreads; i++) { + generators[i] = new TxnGenerator(db, (i + 1) * 1000); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + try { + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + }); + + /* + * At the next rendezvous (when node 2 has acked 2 new txns), we'll + * start one extra txn generating thread, and turn on node 2's "don't + * process stream" test flag (in order to prevent it from completing + * its phase 2). (Later we'll kill the node.) + */ + CyclicBarrier b2b = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + /* Wait a bit to make sure node 2's acks get + * processed. */ + Thread.sleep(2 * TICK); + thread.start(); + Thread.sleep(4 * TICK); + node2.getRepNode().replica().setDontProcessStream(); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + /* + * Finally, we'll start node 3 after all that, have it run until it has + * acked 2 txns. Once we've processed that, we'll kill node 2, and + * then allow node 3 to proceed to the finish. + */ + final CountDownLatch latch2 = new CountDownLatch(1); + CyclicBarrier b3 = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + Thread.sleep(2 * TICK); + + /* + * Here, we know node 3 has asserted completion of + * phase 1, so we're ready to kill node 2. + */ + latch2.countDown(); + node2.closeEnv(); + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + /* + * Preparation is complete; now we can start the real fun. Start node + * 2, and start the Master Transfer thread. + */ + BarrierPacer node2Pacer = new BarrierPacer(b2a, b2b, latch2); + Replica.setInitialReplayHook(node2Pacer); + node2.openEnv(NO_CONSISTENCY); + + Transferer mt = new Transferer(masterEnv, 60, node2Name, node3Name); + Thread transferThread = new Thread(mt); + transferThread.start(); + Thread.sleep(5 * TICK); + + b2a.await(); // complete 1st rendezvous point + b2b.await(); // 2nd rendezvous point + + /* + * Start node 3. It doesn't need a barrier for the backlog halfway + * point; just one after the first 2 acks. + */ + BarrierPacer node3Pacer = new BarrierPacer(null, b3, null); + Replica.setInitialReplayHook(node3Pacer); + node3.openEnv(NO_CONSISTENCY); + Replica.setInitialReplayHook(null); + + b3.await(); // wait for node 3 to "catch up" + + /* Wait for it all to end, and check the results. */ + transferThread.join(10000); + assertFalse(transferThread.isAlive()); + assertFalse(mt.didItFail()); + assertNull(mt.getUnexpectedException()); + assertEquals(node3Name, mt.getResult()); + + awaitSettle(master, node3); + try { + db.close(); + } catch (DatabaseException de) { + // ignored + } + + for (int i = 0; i < nThreads; i++) { + threads[i].join(5000); + assertFalse(threads[i].isAlive()); + assertNull(generators[i].getUnexpectedException()); + } + master.closeEnv(); + master.openEnv(); + + thread.join(5000); + assertFalse(thread.isAlive()); + assertNull(generator.getUnexpectedException()); + + Database db2 = + node3.getEnv().openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db2); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + IntegerBinding.intToEntry(extraStartKey, key1); + OperationStatus ret = db2.get(null, key1, value, null); + assertEquals(NOTFOUND, ret); + db2.close(); + } + + /** + * Tests the scenario where a Feeder dies after having asserted the end of + * phase 1. Case #2a: no other Feeder has also caught up at the time of + * the death, and so we must rescind phase 1 completion and txn blockage; + * in fact no other Feeder is even running at the time of the death. + */ + @Test + public void testFeederDeath2a() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + final Database db = + masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + + final RepEnvInfo node2 = repEnvInfo[1]; + String node2Name = node2.getEnv().getNodeName(); + RepEnvInfo node3 = repEnvInfo[2]; + String node3Name = node3.getEnv().getNodeName(); + node2.closeEnv(); + node3.closeEnv(); + + int backlog = 20; + makeBacklog(masterEnv, db, backlog); + + final int nThreads = 5; + final TxnGenerator generators[] = new TxnGenerator[nThreads]; + final Thread threads[] = new Thread[nThreads]; + int extraStartKey = (nThreads + 1) * 1000; + final TxnGenerator generator = + new TxnGenerator(db, extraStartKey, NONE); + final Thread thread = new Thread(generator); + + /* + * We'll first run node2, stopping at two rendezvous points: b2a and + * b2b. In the first rendezvous (when node 2 has gotten halfway + * through the 20-txn backlog), we'll start the 5 txn generator + * threads, and wait a moment for them to get going: + */ + final Set > allowedExceptions = + makeSet(UnknownMasterException.class, + InsufficientReplicasException.class); + CyclicBarrier b2a = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + for (int i = 0; i < nThreads; i++) { + generators[i] = + new TxnGenerator(db, (i + 1) * 1000, null, + SIMPLE_MAJORITY, + allowedExceptions); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + try { + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + }); + + /* + * At the next rendezvous (when node 2 has acked 2 new txns), we'll + * start one extra txn generating thread, and turn on node 2's "don't + * process stream" test flag (in order to prevent it from completing + * its phase 2). (Later we'll kill the node.) + */ + CyclicBarrier b2b = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + /* Wait a bit to make sure node 2's acks get + * processed. */ + Thread.sleep(2 * TICK); + thread.start(); + Thread.sleep(4 * TICK); + node2.getRepNode().replica().setDontProcessStream(); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + /* + * Preparation is complete. Start node 2, and start the Master + * Transfer thread. + */ + BarrierPacer node2Pacer = new BarrierPacer(b2a, b2b, null); + Replica.setInitialReplayHook(node2Pacer); + node2.openEnv(NO_CONSISTENCY); + Replica.setInitialReplayHook(null); + + Transferer mt = new Transferer(masterEnv, 60, node2Name, node3Name); + Thread transferThread = new Thread(mt); + transferThread.start(); + Thread.sleep(5 * TICK); + + b2a.await(); // complete 1st rendezvous point + b2b.await(); // 2nd rendezvous point + + /* + * Kill node 2; this should rescind the txn block. Then start node 3, + * with no test hook. + */ + node2.closeEnv(); + Thread.sleep(5 * TICK); + thread.join(5000); + assertFalse(thread.isAlive()); + assertNull(generator.getUnexpectedException()); + + node3.openEnv(NO_CONSISTENCY); + + /* Wait for it all to end, and check the results. */ + transferThread.join(10000); + assertFalse(transferThread.isAlive()); + assertFalse(mt.didItFail()); + assertNull(mt.getUnexpectedException()); + assertEquals(node3Name, mt.getResult()); + + awaitSettle(master, node3); + try { + db.close(); + } catch (DatabaseException de) { + // ignored + } + + for (int i = 0; i < nThreads; i++) { + threads[i].join(5000); + assertFalse(threads[i].isAlive()); + assertNull("Saw exception " + + generators[i].getUnexpectedException(), + generators[i].getUnexpectedException()); + } + master.closeEnv(); + master.openEnv(); + + + Database db2 = node3.getEnv().openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db2); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + IntegerBinding.intToEntry(extraStartKey, key1); + OperationStatus ret = db2.get(null, key1, value, null); + assertEquals(SUCCESS, ret); + db2.close(); + } + + /** + * Tests the scenario where a Feeder dies after having asserted the end of + * phase 1. Case #2b: no other Feeder has also caught up at the time of + * the death, and so we must rescind phase 1 completion and txn blockage; + * though the competing Feeder is at least running, working on its backlog. + */ + @Test + public void testFeederDeath2b() throws Exception { + ensureAssertions(); + bePatient(); + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + ReplicatedEnvironment masterEnv = master.getEnv(); + final Database db = + masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db); + + final RepEnvInfo node2 = repEnvInfo[1]; + String node2Name = node2.getEnv().getNodeName(); + RepEnvInfo node3 = repEnvInfo[2]; + String node3Name = node3.getEnv().getNodeName(); + node2.closeEnv(); + node3.closeEnv(); + + int backlog = 20; + makeBacklog(masterEnv, db, backlog); + + final int nThreads = 5; + final TxnGenerator generators[] = new TxnGenerator[nThreads]; + final Thread threads[] = new Thread[nThreads]; + int extraStartKey = (nThreads + 1) * 1000; + final TxnGenerator generator = + new TxnGenerator(db, extraStartKey, NONE); + final Thread thread = new Thread(generator); + + /* + * We'll first start both Feeders, allowing them to get halfway through + * the 20-txn backlog. At this first barrier's rendezvous point, we'll + * then start 5 txn generator threads. + */ + CyclicBarrier b23a = + new CyclicBarrier + (3, + new Runnable() { + @Override + public void run() { + for (int i = 0; i < nThreads; i++) { + generators[i] = new TxnGenerator(db, (i + 1) * 1000); + threads[i] = new Thread(generators[i]); + threads[i].start(); + } + try { + Thread.sleep(5 * TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + } + }); + + /* + * At the next rendezvous (when node 2 has acked 2 new txns), we'll + * start one extra txn generating thread, and turn on node 2's "don't + * process stream" test flag (in order to prevent it from completing + * its phase 2). (Right after that we'll kill the node.) + */ + CyclicBarrier b2b = + new CyclicBarrier + (2, + new Runnable() { + @Override + public void run() { + try { + /* Wait a bit to make sure node 2's acks get + * processed. */ + Thread.sleep(2 * TICK); + thread.start(); + Thread.sleep(4 * TICK); + node2.getRepNode().replica().setDontProcessStream(); + } catch (InterruptedException ie) { + // shouldn't happen + } + } + }); + + CountDownLatch latch3 = new CountDownLatch(1); + + /* Preparation is complete. */ + BarrierPacer node2Pacer = new BarrierPacer(b23a, b2b, null); + Replica.setInitialReplayHook(node2Pacer); + node2.openEnv(NO_CONSISTENCY); + + BarrierPacer node3Pacer = new BarrierPacer(b23a, null, latch3); + Replica.setInitialReplayHook(node3Pacer); + node3.openEnv(NO_CONSISTENCY); + Replica.setInitialReplayHook(null); + + Transferer mt = new Transferer(masterEnv, 60, node2Name, node3Name); + Thread transferThread = new Thread(mt); + transferThread.start(); + Thread.sleep(5 * TICK); + + b23a.await(); // complete 1st rendezvous point + b2b.await(); // 2nd rendezvous point + + /* + * Kill node 2; this should rescind the txn block. Then release node 3, + * to complete its processing + */ + node2.closeEnv(); + Thread.sleep(5 * TICK); + thread.join(5000); + assertFalse(thread.isAlive()); + assertNull(generator.getUnexpectedException()); + + latch3.countDown(); + + /* Wait for it all to end, and check the results. */ + transferThread.join(10000); + assertFalse(transferThread.isAlive()); + assertFalse(mt.didItFail()); + assertNull(mt.getUnexpectedException()); + assertEquals(node3Name, mt.getResult()); + + awaitSettle(master, node3); + try { + db.close(); + } catch (DatabaseException de) { + // ignored + } + + for (int i = 0; i < nThreads; i++) { + threads[i].join(5000); + assertFalse(threads[i].isAlive()); + assertNull(generators[i].getUnexpectedException()); + } + master.closeEnv(); + master.openEnv(); + + Database db2 = node3.getEnv().openDatabase(null, TEST_DB_NAME, dbconfig); + databases.add(db2); + DatabaseEntry key1 = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + IntegerBinding.intToEntry(extraStartKey, key1); + OperationStatus ret = db2.get(null, key1, value, null); + assertEquals(SUCCESS, ret); + db2.close(); + } + + static class Transferer implements Runnable { + private final ReplicatedEnvironment env; + private final Set replicas; + private final int delay; + private final int timeout; + private final TimeUnit units; + private final boolean force; + private boolean failed; + private Throwable unexpectedException; + private String result; + + Transferer(ReplicatedEnvironment env, + int timeout, + String ... targetReplicas) { + this(0, env, makeSet(targetReplicas), + timeout, TimeUnit.SECONDS, false); + } + + static private Set makeSet(String ... items) { + Set set = new HashSet<>(); + set.addAll(Arrays.asList(items)); + return set; + } + + Transferer(ReplicatedEnvironment env, + String replica, + int timeout, + TimeUnit units) { + this(0, env, makeSet(replica), timeout, units, false); + } + + Transferer(int delay, + ReplicatedEnvironment env, + String replica, + int timeout, + boolean flag) { + this(delay, env, makeSet(replica), + timeout, TimeUnit.SECONDS, flag); + } + + Transferer(int delay, + ReplicatedEnvironment env, + Set replicas, + int timeout, + TimeUnit units, + boolean force) { + this.delay = delay; + this.env = env; + this.replicas = replicas; + this.timeout = timeout; + this.units = units; + this.force = force; + } + + @Override + public void run() { + try { + if (delay > 0) { + Thread.sleep(TimeUnit.SECONDS.toMillis(delay)); + } + result = env.transferMaster(replicas, timeout, units, force); + } catch (MasterTransferFailureException mtfe) { + failed = true; + } catch (ThreadInterruptedException tie) { + // expected in one case, doesn't happen in others + } catch (Throwable t) { + unexpectedException = t; + t.printStackTrace(); + } + } + + boolean didItFail() { + return failed; + } + + String getResult() { + return result; + } + + Throwable getUnexpectedException() { + return unexpectedException; + } + } + + /** + * Tests env close during remote invocation. + */ + @Test + public void testEnvCloseRemote() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + + String groupName = RepTestUtils.TEST_REP_GROUP_NAME; + ReplicationConfig rc = master.getRepConfig(); + String host = rc.getNodeHostname(); + int port = rc.getNodePort(); + Set helpers = new HashSet<>(); + helpers.add(new InetSocketAddress(host, port)); + final ReplicationGroupAdmin rga = + new ReplicationGroupAdmin(groupName, helpers, + RepTestUtils.readRepNetConfig()); + final Set replicas = new HashSet<>(); + String replicaName = replica.getEnv().getNodeName(); + replicas.add(replicaName); + + class RemoteInvoker implements Runnable { + private Throwable unexpectedException; + private boolean failed; + + @Override + public void run() { + try { + rga.transferMaster(replicas, 60, TimeUnit.SECONDS, false); + } catch (EnvironmentFailureException efe) { + Pattern pat = + Pattern.compile(".*shutting down.*"); + if (pat.matcher(efe.getMessage()).matches()) { + failed = true; + } else { + efe.printStackTrace(); + unexpectedException = efe; + } + } catch (Throwable t) { + t.printStackTrace(); + unexpectedException = t; + } + } + + boolean didItFail() { + return failed; + } + + Throwable getUnexpectedException() { + return unexpectedException; + } + } + + replica.closeEnv(); + RemoteInvoker invoker = new RemoteInvoker(); + Thread thread = new Thread(invoker); + thread.start(); + Thread.sleep(5 * TICK); + master.closeEnv(); + + thread.join(10000); + assertFalse(thread.isAlive()); + assertTrue(invoker.didItFail()); + assertNull(invoker.getUnexpectedException()); + + restartNodes(master, replica); + } + + /** + * Tests remove invocation. + */ + @Test + public void testRemoteInvocation() throws Exception { + createGroup(); + + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + + final String groupName = RepTestUtils.TEST_REP_GROUP_NAME; + ReplicationConfig rc = replica.getRepConfig(); + String host = rc.getNodeHostname(); + int port = rc.getNodePort(); + Set helpers = new HashSet<>(); + helpers.add(new InetSocketAddress(host, port)); + ReplicationGroupAdmin rga = + new ReplicationGroupAdmin(groupName, helpers, + RepTestUtils.readRepNetConfig()); + Set replicas = new HashSet<>(); + String replicaName = replica.getEnv().getNodeName(); + replicas.add(replicaName); + String result = + rga.transferMaster(replicas, 10, TimeUnit.SECONDS, false); + assertEquals(replicaName, result); + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + + /* Node 2 is the new master. */ + master = repEnvInfo[1]; + replica = repEnvInfo[2]; + replicaName = replica.getEnv().getNodeName(); + replica.closeEnv(); + replicas.clear(); + replicas.add(replicaName); + try { + rga.transferMaster(replicas, 5, TimeUnit.SECONDS, false); + fail("expected timeout failure from remote invocation"); + } catch (MasterTransferFailureException e) { + // expected + } + + /* Shut down master, so that only one replica remains running. */ + master.closeEnv(); + replica = repEnvInfo[0]; + rc = replica.getRepConfig(); + host = rc.getNodeHostname(); + port = rc.getNodePort(); + helpers.clear(); + helpers.add(new InetSocketAddress(host, port)); + rga = new ReplicationGroupAdmin(groupName, helpers, + RepTestUtils.readRepNetConfig()); + replicaName = replica.getEnv().getNodeName(); + replicas.clear(); + replicas.add(replicaName); + try { + rga.transferMaster(replicas, 5, TimeUnit.SECONDS, false); + fail("expected unknown-master exception"); + } catch (UnknownMasterException e) { + // expected + } + + restartNodes(repEnvInfo[1], repEnvInfo[2]); + } + + @Test + public void testCmdLineInvocation() throws Exception { + + PrintStream original = System.out; + try { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + File propertyFile = new File(master.getEnvHome().getPath(), + "je.properties"); + + final String groupName = RepTestUtils.TEST_REP_GROUP_NAME; + ReplicationConfig rc = replica.getRepConfig(); + String helper = rc.getNodeHostPort(); + System.setOut(new PrintStream(new ByteArrayOutputStream())); + DbGroupAdmin.main("-groupName", groupName, + "-helperHosts", helper, + "-netProps", propertyFile.getPath(), + "-transferMaster", "-force", replicaName, "5 s"); + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + + /* Now Node 2 is the new master. */ + master = repEnvInfo[1]; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream ps = new PrintStream(baos, true); + System.setOut(ps); + + String candidate1 = repEnvInfo[0].getEnv().getNodeName(); + String candidate2 = repEnvInfo[2].getEnv().getNodeName(); + DbGroupAdmin.main("-groupName", groupName, + "-helperHosts", helper, + "-netProps", propertyFile.getPath(), + "-transferMaster", + candidate1 + "," + candidate2, + "10", "s"); + String output = baos.toString(); + /* Get the text sans line ending. */ + BufferedReader br = new BufferedReader(new StringReader(output)); + String line = br.readLine(); + assertNotNull(line); + + /* "The new master is: node_name" */ + Pattern pat = Pattern.compile(".*: (.*)"); + Matcher m = pat.matcher(line); + assertTrue(m.matches()); + String result = m.group(1); + assertTrue(result, + candidate1.equals(result) || candidate2.equals(result)); + RepEnvInfo newmaster = + candidate1.equals(result) ? + repEnvInfo[0] : repEnvInfo[2]; + awaitSettle(master, newmaster); + master.closeEnv(); + master.openEnv(); + } finally { + System.setOut(original); + } + } + + /** + * Checks to see that a "forcing" operation leaves the "in progress" flag + * set properly, but attempting a third (non-forcing) operation. + */ + @Test + public void testInProgressFlag() throws Exception { + createGroup(); + RepEnvInfo master = repEnvInfo[0]; + RepEnvInfo replica = repEnvInfo[1]; + String replicaName = replica.getEnv().getNodeName(); + ReplicatedEnvironment masterEnv = master.getEnv(); + replica.closeEnv(); + + Transferer mt1 = + new Transferer(0, masterEnv, replicaName, 60, false); + Thread t1 = new Thread(mt1); + t1.start(); + + /* Give mt1 a chance to get established, then supersede it with mt2. */ + Thread.sleep(5 * TICK); + Transferer mt2 = + new Transferer(0, masterEnv, replicaName, 60, true); + Thread t2 = new Thread(mt2); + t2.start(); + + /* + * Wait for mt1 to fail. We've already tested elsewhere that it does. + * But it's useful to wait for it here just so that we know when mt2 + * has become established. + */ + t1.join(10000); + assertFalse(t1.isAlive()); + assertTrue(mt1.didItFail()); + + /* Here, a non-forcing MT op should be rejected immediately. */ + Transferer mt3 = + new Transferer(0, masterEnv, replicaName, 3, false); + mt3.run(); + assertTrue(mt3.didItFail()); + + restartNodes(replica); + t2.join(10000); + assertFalse(t2.isAlive()); + assertFalse(mt2.didItFail()); + assertNull(mt2.getUnexpectedException()); + assertEquals(replicaName, mt2.getResult()); + + /* As always, clean things up to placate our superclass. */ + awaitSettle(master, replica); + master.closeEnv(); + master.openEnv(); + } + + abstract class Pacer extends TestHookAdapter { + @Override + public void doHook(Message m) { + if (m.getOp() == Protocol.COMMIT) { + Protocol.Entry e = (Protocol.Entry) m; + final InputWireRecord record = e.getWireRecord(); + final byte entryType = record.getEntryType(); + if (LOG_TXN_COMMIT.equalsType(entryType)) { + Protocol.Commit commitEntry = (Protocol.Commit) e; + final boolean needsAck = commitEntry.getNeedsAck(); + processCommit(needsAck); + } + } + } + protected abstract void processCommit(boolean ackable); + } + + class AckPacer extends Pacer { + private final Semaphore throttle; + private final int initialTxns; + private final int maxLag; + private int count = 0; + private boolean released = false; + + AckPacer(Semaphore t, int i, int m) { + throttle = t; + initialTxns = i; + maxLag = m; + } + + // throttle all commits, by making them take 1 second + // once we've seen half the backlog go by, unleash the master + // txn-writing threads by releasing all semaphore perms. + // as we see ack-seeking commits, at the end of the sleeping time, + // do another semaphore.release + + @Override + protected void processCommit(boolean ackable) { + try { + Thread.sleep(TICK); + } catch (InterruptedException ie) { + // doesn't happen + } + if (!released) { + if (++count >= initialTxns) { + throttle.release(maxLag); + released = true; + } + } else { + if (ackable) { + throttle.release(); + } + } + } + } + + /* + * When two barriers are supplied, there are two phases to the processing + * before a latch, if supplied. If the first but not the second barrier is + * supplied, along with a latch, then the waiting on the latch occurs on + * completion of the first (only) phase. + */ + class BarrierPacer extends Pacer { + private final CyclicBarrier barrier1, barrier2; + private final CountDownLatch latch; + private int commitCount; + private int ackCount; + private int phase = 1; + + BarrierPacer(CyclicBarrier b1, CyclicBarrier b2, CountDownLatch l) { + barrier1 = b1; + barrier2 = b2; + latch = l; + } + + @Override + protected void processCommit(boolean ackable) { + try { + switch (phase) { + case 1: + if (++commitCount > 10) { + if (barrier1 != null) { + barrier1.await(); + } + if (barrier2 == null && latch != null) { + latch.await(); + phase = 3; + } else { + phase = 2; + } + } + break; + case 2: + if (ackable && ++ackCount > 2) { + if (barrier2 != null) { + barrier2.await(); + } + phase = 3; + if (latch != null) { + latch.await(); + } + } + break; + case 3: + break; + } + } catch (InterruptedException ie) { + // shouldn't happen + } catch (BrokenBarrierException bb) { + // shouldn't happen + } + } + } + + /** + * An ack pacer that trips a latch when it's halfway through an initial + * backlog, and then performs some specified action after it has seen and + * acked the 2nd "live" txn. + */ + abstract class TwoAckPacer extends Pacer { + private int ackCount; + private int txnCount; + private final int backlog; + private boolean seenAckable; + private final CountDownLatch latch; + + TwoAckPacer(int backlog, CountDownLatch latch) { + this.backlog = backlog; + this.latch = latch; + } + + @Override + protected void processCommit(boolean ackable) { + + /* + * Once we get half-way through the backlog, release the generator + * threads to do their work. (We want to make sure our hook is + * installed, and the Feeder connected, before doing so.) + */ + if (++txnCount >= backlog / 2) { + latch.countDown(); + } + if (ackable) { + seenAckable = true; + } + try { + if (seenAckable) { + + /* + * Ack the first 2 "live" txns without delay; after that + * do whatever the subclass implements. + */ + logger.info("seeing live txn"); + if (++ackCount > 2) { + tripOnSecondAck(); + } + } else { + Thread.sleep(TICK); + } + } catch (InterruptedException ie) { + // doesn't happen + } + } + + abstract protected void tripOnSecondAck() throws InterruptedException; + } + + static class ThrottlingGenerator extends AbstractTxnGenerator { + private final Semaphore throttle; + + ThrottlingGenerator(Database db, Semaphore throttle, int start) { + + /* + * Insufficient Acks is rare, but it can occasionally happen during + * the throes of everything settling down at the end of a transfer. + */ + super(db, start, SIMPLE_MAJORITY, + makeSet(UnknownMasterException.class, + InsufficientAcksException.class)); + this.throttle = throttle; + } + + @Override + protected void throttle() throws InterruptedException { + throttle.acquire(); + } + } + + static class TxnGenerator extends AbstractTxnGenerator { + private final CountDownLatch latch; + + TxnGenerator(Database db, int startKey) { + this(db, startKey, SIMPLE_MAJORITY); + } + + TxnGenerator(Database db, int startKey, ReplicaAckPolicy policy) { + this(db, startKey, null, policy, + makeSet(UnknownMasterException.class)); + } + + TxnGenerator(Database db, int startKey, CountDownLatch latch) { + this(db, startKey, latch, SIMPLE_MAJORITY, + makeSet(UnknownMasterException.class)); + } + + TxnGenerator(Database db, + int startKey, + CountDownLatch latch, + ReplicaAckPolicy policy, + Set > allowableExceptionTypes) { + super(db, startKey, policy, allowableExceptionTypes); + this.latch = latch == null ? + new CountDownLatch(0) : latch; + } + + @Override + protected void throttle() throws InterruptedException { + latch.await(); + } + } + + abstract static class AbstractTxnGenerator implements Runnable { + private final Database db; + private Throwable unexpectedException; + private Throwable expectedException; + private final int startKey; + private final ReplicaAckPolicy policy; + private int commitCount; + private final Set > allowableExceptionTypes; + + AbstractTxnGenerator(Database db, + int startKey, + ReplicaAckPolicy policy, + Set > + allowableExceptionTypes) { + + this.db = db; + this.startKey = startKey; + this.allowableExceptionTypes = allowableExceptionTypes; + this.policy = policy; + + /* + * Experiment to see if tests start passing when + * IllegalStateException is allowed. + */ + this.allowableExceptionTypes.add(IllegalStateException.class); + } + + @Override + public void run() { + try { + TransactionConfig tc = new TransactionConfig(); + tc.setDurability(new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + policy)); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry value = new DatabaseEntry(); + final int n = 10; + for (int i = 0; i < n; i++) { + throttle(); + Transaction txn = + db.getEnvironment().beginTransaction(null, tc); + try { + IntegerBinding.intToEntry(startKey + i, key); + LongBinding.longToEntry(i, value); + db.put(txn, key, value); + txn.commit(); + commitCount++; + } finally { + safeAbort(txn); + } + } + } catch (Throwable t) { + if (allowableExceptionTypes.contains(t.getClass())) { + expectedException = t; + } else { + t.printStackTrace(); + unexpectedException = t; + } + } + } + + abstract protected void throttle() throws InterruptedException; + + Throwable getUnexpectedException() { + return unexpectedException; + } + + Throwable getExpectedException() { + return expectedException; + } + + int getCommitCount() { + return commitCount; + } + + int getStartKey() { + return startKey; + } + } + + @SafeVarargs + static private Set > + makeSet(Class... classes) { + + Set> set = + new HashSet<>(); + + Collections.addAll(set, classes); + return set; + } + + abstract private static class CheckedHook implements TestHook { + final Transaction txn; + final Class expectedException; + final Transaction.State expectedState; + final int triggerCount; + public String problem; + public boolean ran; + public final CountDownLatch done; + protected final String description; + + CheckedHook(int triggerCount, + Transaction txn, + Class expectedException, + Transaction.State expectedState, + CountDownLatch done, + String description) { + this.txn = txn; + this.expectedException = expectedException; + this.expectedState = expectedState; + this.triggerCount = triggerCount; + this.done = done; + this.description = description; + } + + @Override + public void hookSetup() { + } + + @Override + public void doIOHook() throws IOException { + } + + @Override + public void doHook() { + } + + @Override + public void doHook(Integer val) { + if (!val.equals(triggerCount)) { + return; + } + + Thread t = new Thread() { + @Override + public void run() { + executeAsyncTask(); + } + }; + t.start(); + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + ran = true; + } + + abstract void executeAsyncTask(); + + @Override + public Integer getHookValue() { + return 0; + } + + @Override + public String toString() { + return "Txn " + txn.getId() + "/trigger=" + triggerCount + + " " + description; + } + } + + /* Try to commit while frozen */ + private class TxnEndHook extends CheckedHook { + + TxnEndHook(int triggerCount, + Transaction txn, + Class expectedException, + Transaction.State expectedState, + CountDownLatch done, + String description) { + super(triggerCount, txn, expectedException, expectedState, done, + description); + } + + @Override + void executeAsyncTask() { + try { + System.err.println("txn" + txn.getId() + " " + description + + " for txnEndHook"); + txn.commit(); + if (expectedException != null) { + problem = "txn should not have committed" + txn; + } + } catch (Exception e) { + if (expectedException != null) { + if (!e.getClass().equals(expectedException)) { + problem = "commit got unexpected " + e; + } + } + } + if (!expectedState.equals(txn.getState())) { + problem = "Saw " + txn.getState() + " instead of " + + expectedState; + } + + done.countDown(); + }; + } + + /* Try to write while frozen */ + private class AdditionalWriteHook extends CheckedHook { + private final Database db; + private final int useKey; + + AdditionalWriteHook(int triggerCount, + Database db, + Transaction txn, + Class expectedException, + Transaction.State expectedState, + CountDownLatch done, + int useKey, + String description) { + super(triggerCount, txn, expectedException, expectedState, done, + description); + this.db = db; + this.useKey = useKey; + } + + @Override + void executeAsyncTask() { + System.err.println("txn" + txn.getId() + " " + description + + " for writeHook"); + try { + DatabaseEntry entry = new DatabaseEntry(); + DatabaseEntry found = new DatabaseEntry(); + IntegerBinding.intToEntry(useKey, entry); + OperationStatus status = db.get(txn, entry, found, null); + logger.info("get status = " + status + + " useKey = " + useKey); + db.put(txn, entry, entry); + ran = true; + if (expectedException != null) { + problem = "txn should not have committed" + txn; + } + } catch (Exception e) { + if (expectedException == null) { + problem = "commit got unexpected " + e; + } else { + if (!e.getClass().equals(expectedException)) { + problem = "commit got unexpected " + e; + } + } + } + if (!expectedState.equals(txn.getState())) { + problem = "Saw " + txn.getState() + " instead of " + + expectedState; + } + done.countDown(); + } + } + + /* + * Write a record that requires an ack policy of ALL, to ensure that + * the group is synced up. + */ + private void allAckWrite(ReplicatedEnvironment masterEnv, + Database db, + int keyVal) { + logger.info("Syncing up group with an all-ack write using master Env " + + masterEnv.getNodeName()); + Durability all = new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.ALL); + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(all); + Transaction txn = masterEnv.beginTransaction(null, txnConfig); + try { + DatabaseEntry val = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, val); + db.put(txn, val, val); + txn.commit(); + } finally { + safeAbort(txn); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/MinRetainedVLSNsTest.java b/test/com/sleepycat/je/rep/impl/node/MinRetainedVLSNsTest.java new file mode 100644 index 0000000..ff27389 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/MinRetainedVLSNsTest.java @@ -0,0 +1,116 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertTrue; + +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class MinRetainedVLSNsTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 4; + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Test old behavior with no retained VLSNS + */ + @Test + public void testNoneRetained() { + retainedInternalTest(0); + } + + @Test + public void testRetained() { + retainedInternalTest(1000); + } + + /** + * Test to ensure that at least the ninimum number of configured VLSNs + * is maintained. + */ + public void retainedInternalTest(int minRetainedVLSNs) { + setRepConfigParam(RepParams.MIN_RETAINED_VLSNS, + Integer.toString(minRetainedVLSNs)); + + /* + * For rapid updates of the global cbvlsn as new log files are created + */ + setEnvConfigParam(EnvironmentParams.LOG_FILE_MAX, "4000"); + createGroup(3); + + final ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + populateDB(master, 1000); + + /* Create garbage by overwriting */ + populateDB(master, 1000); + + /* Sync group. */ + RepTestUtils.syncGroup(repEnvInfo); + + checkGlobalCBVLSN(); + + /* + * Open a new environment. It must be able to syncup or network + * restore. + */ + try { + repEnvInfo[repEnvInfo.length - 1].openEnv(); + } catch (InsufficientLogException ile) { + new NetworkRestore().execute(ile, new NetworkRestoreConfig()); + repEnvInfo[repEnvInfo.length - 1].openEnv(); + } + + checkGlobalCBVLSN(); + } + + private void checkGlobalCBVLSN() { + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() == null) { + continue; + } + final int minRetainedVLSNs = Integer.parseInt(info.getRepConfig(). + getConfigParam(RepParams.MIN_RETAINED_VLSNS.getName())); + final VLSN groupCBVLSN = info.getRepNode().getGlobalCBVLSN(); + final VLSNRange range = info.getRepImpl().getVLSNIndex().getRange(); + final long retainedVLSNs = range.getLast().getSequence() - + groupCBVLSN.getSequence(); + + assertTrue(retainedVLSNs >= minRetainedVLSNs); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/MinorityTransferTest.java b/test/com/sleepycat/je/rep/impl/node/MinorityTransferTest.java new file mode 100644 index 0000000..f2d52db --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/MinorityTransferTest.java @@ -0,0 +1,163 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.Transaction; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.UnknownMasterException; + +/** + * Master Transfer tests that run with only a group minority. + */ +public class MinorityTransferTest extends RepTestBase { + private RepEnvInfo master; + private ReplicatedEnvironment masterEnv; + private RepEnvInfo replica; + private String replicaName; + + @Override + public void setUp() + throws Exception { + + super.setUp(); + master = repEnvInfo[0]; + master.getRepConfig(). + setConfigParam(ReplicationConfig.INSUFFICIENT_REPLICAS_TIMEOUT, + "30 s"); + createGroup(); + masterEnv = master.getEnv(); + replica = repEnvInfo[1]; + replicaName = replica.getEnv().getNodeName(); + closeNodes(repEnvInfo[2], repEnvInfo[3], repEnvInfo[4]); + } + + @Override + public void tearDown() + throws Exception { + + restartNodes(repEnvInfo[0], + repEnvInfo[2], repEnvInfo[3], repEnvInfo[4]); + super.tearDown(); + } + + /** + * Ensures that a thread waiting in {@code beginTransaction()} for + * sufficient replicas gets a proper {@code + * UnknownMasterException} upon a Master Transfer. + */ + @Test + public void testBeginWaiterException() throws Exception { + ResultEvaluator expected = + new ResultEvaluator() { + @Override + public boolean isExpected(Throwable t) { + return ((t instanceof UnknownMasterException) || + (t instanceof ReplicaWriteException)); + } + }; + TxnRunner runner = new TxnRunner(expected); + Thread thread = new Thread(runner); + thread.start(); + Thread.sleep(5 * MasterTransferTest.TICK); + + Set replicas = new HashSet(); + replicas.add(replicaName); + masterEnv.transferMaster(replicas, 10, TimeUnit.SECONDS); + thread.join(10000); + assertFalse(thread.isAlive()); + assertTrue(runner.isOK()); + + MasterTransferTest.awaitSettle(master, replica); + master.closeEnv(); + } + + @Test + public void testEnvClose() throws Exception { + ResultEvaluator expected = + new ResultEvaluator() { + @Override + public boolean isExpected(Throwable t) { + return t instanceof EnvironmentFailureException && + t.getCause() instanceof IllegalStateException; + } + }; + TxnRunner runner = new TxnRunner(expected); + Thread thread = new Thread(runner); + thread.start(); + Thread.sleep(5 * MasterTransferTest.TICK); + + /* + * Depending on thread timing, close() may either succeed or throw this + * exception. + */ + try { + master.closeEnv(); + } catch (EnvironmentFailureException efe) { + if (!expected.isExpected(efe.getCause())) { + efe.printStackTrace(); + fail(); + } + } + thread.join(10000); + assertFalse(thread.isAlive()); + assertTrue(runner.isOK()); + } + + class TxnRunner implements Runnable { + private final ResultEvaluator evaluator; + private boolean ok; + + TxnRunner(ResultEvaluator evaluator) { + this.evaluator = evaluator; + } + + @Override + public void run() { + try { + Transaction txn = masterEnv.beginTransaction(null, null); + txn.abort(); + ok = true; + } catch (Throwable t) { + if (evaluator.isExpected(t)) { + ok = true; + } else { + t.printStackTrace(); + } + } + } + + boolean isOK() { + return ok; + } + } + + interface ResultEvaluator { + public boolean isExpected(Throwable exception); + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/PrimaryNodeTest.java b/test/com/sleepycat/je/rep/impl/node/PrimaryNodeTest.java new file mode 100644 index 0000000..27d8dec --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/PrimaryNodeTest.java @@ -0,0 +1,552 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +import org.junit.Before; +import org.junit.Test; + +public class PrimaryNodeTest extends RepTestBase { + + final TransactionConfig txnConfig = new TransactionConfig(); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + + RepEnvInfo primary = null; + RepEnvInfo second = null; + RepEnvInfo third = null; + RepEnvInfo allInfo[] = null; + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + allInfo = repEnvInfo; + groupSize = 2; + repEnvInfo = new RepEnvInfo[2]; + primary = repEnvInfo[0] = allInfo[0]; + second = repEnvInfo[1] = allInfo[1]; + third = allInfo[2]; + + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + /* Config to speed up test elapsed time. */ + primary.getRepConfig(). + setConfigParam(ReplicationConfig.INSUFFICIENT_REPLICAS_TIMEOUT, + "5 s"); + primary.getRepConfig(). + setConfigParam(ReplicationConfig.ELECTIONS_PRIMARY_RETRIES, "1"); + + // TODO: is this needed now that hard recovery works? + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + for (RepEnvInfo ri : allInfo) { + ri.getEnvConfig().setConfigParam("je.env.runCleaner", "false"); + } + } + + @Test + public void testPrimaryParam() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + ReplicationConfig repEnvConfig = new ReplicationConfig(); + repEnvConfig.setGroupName("ExampleGroup"); + repEnvConfig.setNodeName("node1"); + repEnvConfig.setNodeHostPort("localhost:5000"); + repEnvConfig.setHelperHosts("localhost:5000"); + + final int defaultPriority = + Integer.parseInt(RepParams.NODE_PRIORITY.getDefault()); + + assertEquals(false, repEnvConfig.getDesignatedPrimary()); + assertEquals(defaultPriority, repEnvConfig.getNodePriority()); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + /* Default value must be false. */ + assertEquals(false, repEnv.getRepConfig().getDesignatedPrimary()); + ReplicationMutableConfig repMutableConfig = + repEnv.getRepMutableConfig(); + assertEquals(false, repMutableConfig.getDesignatedPrimary()); + repMutableConfig.setDesignatedPrimary(true); + repEnv.setRepMutableConfig(repMutableConfig); + assertEquals(true, + repEnv.getRepMutableConfig().getDesignatedPrimary()); + + repEnv.close(); + + repEnvConfig.setDesignatedPrimary(true); + + /* Ensure that the priority is also increased. */ + + repEnv = new ReplicatedEnvironment(envRoot, repEnvConfig, envConfig); + assertTrue(defaultPriority < + RepInternal.getNonNullRepImpl(repEnv).getRepNode(). + getElectionPriority()); + assertEquals(true, repEnv.getRepConfig().getDesignatedPrimary()); + repEnv.close(); + } + + @Test + public void testConflictingPrimary() { + primary.getRepConfig().setDesignatedPrimary(true); + second.getRepConfig().setDesignatedPrimary(true); + primary.openEnv(); + try { + second.openEnv(); + second.closeEnv(); + fail("expected exception"); + } catch (EnvironmentFailureException e) { + assertEquals(EnvironmentFailureReason.HANDSHAKE_ERROR, + e.getReason()); + } + primary.closeEnv(); + } + + /** + * Verifies that Txn begin and commits activate a Primary. Also that + * addition of a new node passivates a Primary. + * + * @throws DatabaseException + * @throws InterruptedException + */ + @Test + public void testActivatePassivate() + throws DatabaseException, + InterruptedException { + + createGroup(); + second.closeEnv(); + + verifyDefaultTwoNodeTxnBehavior(); + + primary.getRepConfig().setDesignatedPrimary(true); + primary.closeEnv(); + + RepTestUtils.restartGroup(repEnvInfo); + second.closeEnv(); + /* Will activate the Primary since it causes an election. */ + primary.getRepNode().forceMaster(true); + for (int i = 0; i < 10; i++) { + if (primary.getEnv().getState().isMaster()) { + break; + } + Thread.sleep(1000); + } + assertTrue(primary.getEnv().getState().isMaster()); + + activateOnBegin(); + + activateOnCommit(); + + passivateWithNewNode(); + + primary.closeEnv(); + } + + /* + * Basic test that does insertion with + * majority durability and that ALL durability + * fails. + */ + @Test + public void testBasic() + throws DatabaseException, + InterruptedException { + createGroup(); + ReplicationMutableConfig repMutableConfig = + primary.getEnv().getRepMutableConfig(); + assertEquals(false, repMutableConfig.getDesignatedPrimary()); + repMutableConfig.setDesignatedPrimary(true); + primary.getEnv().setRepMutableConfig(repMutableConfig); + second.closeEnv(); + populateDB(repEnvInfo[0].getEnv(), "db", 0, 10, null); + boolean gotException = false; + try { + populateDB(repEnvInfo[0].getEnv(), "db", 0, 100, + new TransactionConfig().setDurability( + RepTestUtils.SYNC_SYNC_ALL_DURABILITY)); + } catch (InsufficientReplicasException e) { + gotException = true; + } + assertEquals(true, gotException); + + } + + /* + * Test the typical dynamic use of designating and undesignating a node + * as a primary. + */ + @Test + public void testDynamicPrimary() { + createGroup(); + second.closeEnv(); + + /* Start by verifying the default two node txn behavior. */ + verifyDefaultTwoNodeTxnBehavior(); + assertTrue(!primary.getRepNode().getArbiter().isActive()); + + ReplicationMutableConfig repMutableConfig = + primary.getEnv().getRepMutableConfig(); + assertEquals(false, repMutableConfig.getDesignatedPrimary()); + repMutableConfig.setDesignatedPrimary(true); + primary.getEnv().setRepMutableConfig(repMutableConfig); + + assertTrue(!primary.getRepNode().getArbiter().isActive()); + try { + Transaction txn = + primary.getEnv().beginTransaction(null, txnConfig); + /* Verify that it has transitioned to an active primary. */ + assertTrue(primary.getRepNode().getArbiter().isActive()); + txn.abort(); + } catch (Exception e) { + fail("Unxpected exception:" + e); + } + + /* Revert it back, and verify unaltered behavior */ + repMutableConfig = primary.getEnv().getRepMutableConfig(); + assertEquals(true, repMutableConfig.getDesignatedPrimary()); + repMutableConfig.setDesignatedPrimary(false); + primary.getEnv().setRepMutableConfig(repMutableConfig); + + /* Verify that it's back to the default two node behavior. */ + verifyDefaultTwoNodeTxnBehavior(); + + assertTrue(!primary.getRepNode().getArbiter().isActive()); + } + + private void verifyDefaultTwoNodeTxnBehavior() { + boolean success = false; + Transaction txn = null; + Database db = null; + try { + txn = primary.getEnv().beginTransaction(null, txnConfig); + db = primary.getEnv().openDatabase(txn, "dbx", dbConfig); + success = true; + } catch (InsufficientReplicasException e) { + /* Expected. */ + } catch (InsufficientAcksException e) { + + /* + * Expected alternative, depending upon network timing. That is, + * if the master had not realized that the replica connection had + * already been closed at the time of the begin transaction. + */ + } finally { + if (db != null) { + db.close(); + } + if (txn != null) { + if (success) { + /* Should throw exception, if begin() did not. */ + try { + txn.commit(); + fail("expected exception"); + } catch (InsufficientReplicasException e) { + /* Expected. */ + } catch (InsufficientAcksException e) { + /* Expected. */ + } finally { + txn.abort(); + } + } else { + txn.abort(); + } + } + } + } + + + /* + * Increasing the group size to three should passivate an active Primary. + */ + private void passivateWithNewNode() + throws InterruptedException { + + assertTrue(primary.getRepNode().getArbiter().isActive()); + + third.openEnv(); + waitUntilPassive(); + + assertTrue(!primary.getRepNode().getArbiter().isActive()); + third.closeEnv(); + } + + /* + * Passivate the Primary by restarting the second node. It leaves the + * second node's env open. + */ + private void passivatePrimary() + throws InterruptedException { + + second.openEnv(); + waitUntilPassive(); + } + + private void waitUntilPassive() + throws InterruptedException { + for (int i = 0; i < 60; i++) { + if (!primary.getRepNode().getArbiter().isActive()) { + return; + } + Thread.sleep(1000); + } + fail("failed to passivate primary"); + } + + /* A commit should activate the Primary. */ + private void activateOnCommit() + throws InterruptedException { + + passivatePrimary(); + + assertTrue(!primary.getRepNode().getArbiter().isActive()); + /* A commit transaction should activate the primary. */ + boolean success = false; + Transaction txn = null; + Database db = null; + try { + txn = primary.getEnv().beginTransaction(null, txnConfig); + second.closeEnv(); + + assertTrue(!primary.getRepNode().getArbiter().isActive()); + + /* Update something */ + db = primary.getEnv().openDatabase(txn, "db1", dbConfig); + success = true; + + /* + * Sleep five seconds so that the tcp connection is closed down. + * and the primary has had time to react to it before it reaches + * the commit. + */ + Thread.sleep(5000); + } catch (Exception e) { + fail("Unxpected exception:" + e); + } finally { + if (db != null) { + db.close(); + } + if (txn != null) { + if (success) { + txn.commit(); + assertTrue(primary.getRepNode().getArbiter().isActive()); + } else { + txn.abort(); + } + } + } + } + + /* A begin transaction should activate the primary. */ + private void activateOnBegin() + throws InterruptedException { + + passivatePrimary(); + second.closeEnv(); + assertTrue(!primary.getRepNode().getArbiter().isActive()); + + Transaction txn = null; + for (int i = 0; i < 10; i++) { + try { + txn = primary.getEnv().beginTransaction(null, txnConfig); + /* Verify that it has transitioned to an active primary. */ + if (primary.getRepNode().getArbiter().isActive()) { + /* test passed */ + return; + } + } finally { + if (txn != null) { + txn.abort(); + } + txn = null; + } + + /* + * Retry the test, the master might think the feeder for the + * Replica is still alive, that is, the tcp connection has not yet + * timed out. + */ + Thread.sleep(2000); + } + fail("failed despite retries"); + } + + @Test + public void testElectionsActivate() + throws InterruptedException { + + createGroup(); + closeEnvironments(); + + /* Fail elections if the second node is not available. */ + String saveTimeout = primary.getRepConfig(). + getConfigParam(RepParams.ENV_SETUP_TIMEOUT.getName()); + /* Speed up the test. */ + primary.getRepConfig(). + setConfigParam(RepParams.ENV_SETUP_TIMEOUT.getName(), "5 s"); + try { + primary.openEnv(); + fail("Expected exception"); + } catch (UnknownMasterException e) { + // Expected join group times out since the second node is down + } + /* Restore the timeout */ + primary.getRepConfig(). + setConfigParam(RepParams.ENV_SETUP_TIMEOUT.getName(), saveTimeout); + + /* + * Pass elections if the second node is not available, but Primary is + * active. + */ + primary.getRepConfig().setDesignatedPrimary(true); + try { + primary.openEnv(); + /* Verify that it has transitioned to an active primary. */ + assertTrue(primary.getRepNode().getArbiter().isActive()); + } catch (UnknownMasterException e) { + fail("Unxpected exception:" + e); + } + passivatePrimary(); + /* Primary should be passivated. */ + assertTrue(!primary.getRepNode().getArbiter().isActive()); + primary.closeEnv(); + second.closeEnv(); + } + + /** + * Regression test for [#21536]. + */ + @Test + public void testActivateWhileAwaitingAck() throws Exception { + primary.getRepConfig(). + setConfigParam(ReplicationConfig.REPLICA_ACK_TIMEOUT, + "5 s"); + primary.getRepConfig().setDesignatedPrimary(true); + createGroup(); + ReplicatedEnvironment masterEnv = primary.getEnv(); + Database db = masterEnv.openDatabase(null, TEST_DB_NAME, dbconfig); + DatabaseEntry key1 = new DatabaseEntry("1".getBytes()); + DatabaseEntry value = new DatabaseEntry("one".getBytes()); + db.put(null, key1, value); + + second.getRepNode().replica().setDontProcessStream(); + key1 = new DatabaseEntry("2".getBytes()); + value = new DatabaseEntry("two".getBytes()); + db.put(null, key1, value); + + /* + * Even though we forced the replica not to ack the txn, it's still + * connected, and we decided in this case that it makes sense to + * refrain from setting active primary mode. (We only set active + * primary mode when the connection is broken.) + */ + assertTrue(!primary.getRepNode().getArbiter().isActive()); + + db.close(); + } + + /** + * Test designating a primary in a two (electable) node group that also has + * a secondary node. + */ + @Test + public void testSecondaryNode() + throws Exception { + + /* + * Create a group, add a secondary node, and confirm that closing one + * electable node prevents creating write transactions + */ + createGroup(); + third.getRepConfig().setNodeType(NodeType.SECONDARY); + third.openEnv(); + try { + second.closeEnv(); + verifyDefaultTwoNodeTxnBehavior(); + assertTrue(!primary.getRepNode().getArbiter().isActive()); + + /* + * Specify a designated primary and check that write transactions + * are now supported + */ + final ReplicationMutableConfig repMutableConfig = + primary.getEnv().getRepMutableConfig(); + repMutableConfig.setDesignatedPrimary(true); + primary.getEnv().setRepMutableConfig(repMutableConfig); + Transaction txn = + primary.getEnv().beginTransaction(null, txnConfig); + assertTrue(primary.getRepNode().getArbiter().isActive()); + txn.abort(); + + /* Try designating the secondary as a primary */ + repMutableConfig.setDesignatedPrimary(false); + primary.getEnv().setRepMutableConfig(repMutableConfig); + repMutableConfig.setDesignatedPrimary(true); + third.getEnv().setRepMutableConfig(repMutableConfig); + try { + txn = primary.getEnv().beginTransaction(null, txnConfig); + fail("Shouldn't support write transactions"); + } catch (InsufficientReplicasException e) { + /* Expect this... */ + } catch (InsufficientAcksException e) { + /* ... or this */ + } + + } finally { /* Clean up */ + try { + third.closeEnv(); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + private void closeEnvironments() { + for (RepEnvInfo ri : repEnvInfo) { + ri.closeEnv(); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/RepNodeTest.java b/test/com/sleepycat/je/rep/impl/node/RepNodeTest.java new file mode 100644 index 0000000..3950169 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/RepNodeTest.java @@ -0,0 +1,70 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.HashSet; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.rep.impl.node.RepNode.TransientIds; + +public class RepNodeTest { + + /** Test the SecondaryNodeIds class. */ + @Test + public void testSecondaryNodeIds() { + final int size = 100; + final TransientIds ids = new TransientIds(size); + final Set set = new HashSet(); + for (int i = 0; i < size; i++) { + final int id = ids.allocateId(); + assertTrue("Unexpected ID value: " + id, + id >= (Integer.MAX_VALUE - size)); + set.add(id); + } + assertEquals("Number of unique IDs", 100, set.size()); + try { + ids.allocateId(); + fail("Expected IllegalStateException when no more IDs"); + } catch (IllegalStateException e) { + } + try { + ids.deallocateId(-1); + fail("Expected IllegalArgumentException for negative ID"); + } catch (IllegalArgumentException e) { + } + try { + ids.deallocateId(Integer.MAX_VALUE - size - 1); + fail("Expected IllegalArgumentException for invalid ID"); + } catch (IllegalArgumentException e) { + } + for (final int id : new HashSet(set)) { + ids.deallocateId(id); + final int id2 = ids.allocateId(); + assertEquals("Reallocate same ID", id, id2); + } + for (final int id : set) { + ids.deallocateId(id); + } + try { + ids.deallocateId(Integer.MAX_VALUE); + fail("Expected IllegalArgumentException for not present ID"); + } catch (IllegalArgumentException e) { + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/ReplicaMasterStateTransitionsTest.java b/test/com/sleepycat/je/rep/impl/node/ReplicaMasterStateTransitionsTest.java new file mode 100644 index 0000000..70418cc --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/ReplicaMasterStateTransitionsTest.java @@ -0,0 +1,98 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.CommitPointConsistencyPolicy; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.util.FileHandler; +import com.sleepycat.je.utilint.PollCondition; + +public class ReplicaMasterStateTransitionsTest extends RepTestBase { + + /* + * This test was motivated by SR 18212. In this test node 1 starts out as a + * master, relinquishes mastership to node 2, and then tries to resume as a + * replica with node 2 as the master. + */ + @Test + public void testMasterReplicaTransition() + throws Throwable { + + FileHandler.STIFLE_DEFAULT_ERROR_MANAGER = true; + createGroup(); + ReplicatedEnvironment renv1 = repEnvInfo[0].getEnv(); + assertTrue(renv1.getState().isMaster()); + { + Transaction txn = + renv1.beginTransaction(null, RepTestUtils.SYNC_SYNC_ALL_TC); + renv1.openDatabase(txn, "db1", dbconfig).close(); + txn.commit(); + } + final ReplicatedEnvironment renv2 = repEnvInfo[1].getEnv(); + final RepNode rn2 = + RepInternal.getNonNullRepImpl(renv2).getRepNode(); + + assertFalse(renv2.getState().isMaster()); + rn2.forceMaster(true); + /* Verify handle has transitioned to master state. */ + assertTrue(new PollCondition(100, 60000) { + + @Override + protected boolean condition() { + return renv2.getState().isMaster(); + } + }.await()); + + /* Wait for replicas to join up prior to seeking ALL acks. */ + findMasterAndWaitForReplicas(60000, repEnvInfo.length - 1, repEnvInfo); + + renv1 = repEnvInfo[0].getEnv(); + + CommitToken db2CommitToken = null; + { + Transaction txn = + renv2.beginTransaction(null, RepTestUtils.SYNC_SYNC_ALL_TC); + renv2.openDatabase(txn, "db2", dbconfig).close(); + txn.commit(); + db2CommitToken = txn.getCommitToken(); + } + + /* + * Verify that the change was replayed at the replica via the + * replication stream. + */ + { + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setConsistencyPolicy + (new CommitPointConsistencyPolicy + (db2CommitToken, 60, TimeUnit.SECONDS)); + Transaction txn = renv1.beginTransaction(null, txnConfig); + assertTrue(renv1.getDatabaseNames().contains("db2")); + txn.commit(); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadTest.java b/test/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadTest.java new file mode 100644 index 0000000..3e20c94 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/ReplicaOutputThreadTest.java @@ -0,0 +1,240 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.utilint.TestHook; + +/** + * + * Tests associated with the ReplicaOutputThread. + */ +public class ReplicaOutputThreadTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * The test simulates network stalls via a hook and verifies that + * responses accumulate in the output queue and are correctly processed + * when the network stall clears out. + * + * @throws InterruptedException + */ + @Test + public void testWithGroupAckDisabled() throws InterruptedException { + internalTest(false); + } + + @Test + public void testWithGroupAckEnabled() throws InterruptedException { + internalTest(true); + } + + public void internalTest(Boolean groupacksEnabled) throws InterruptedException { + + RepTestUtils.setConfigParam(RepParams.ENABLE_GROUP_ACKS, + groupacksEnabled.toString(), repEnvInfo); + /* Increase the client timeout since the test throttles acks. */ + RepTestUtils.setConfigParam(RepParams.REPLICA_ACK_TIMEOUT, + "60 s", repEnvInfo); + createGroup(2); + + ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + assertEquals(menv.getState(), State.MASTER); + + populateDB(menv, 100); + + final RepNode replica = repEnvInfo[1].getRepNode(); + final ReplicaOutputThread rot = + replica.getReplica().getReplicaOutputThread(); + + int nThreads = 10; + OutputHook outputHook = new OutputHook(nThreads); + rot.setOutputHook(outputHook); + + /* Start parallel inserts into the database. */ + final InsertThread ithreads[] = new InsertThread[nThreads]; + for (int i=0; i < nThreads; i++) { + ithreads[i] = new InsertThread(menv); + ithreads[i].start(); + } + + /* Wait for parallel inserts to finish or fail. */ + for (int i=0; i < nThreads; i++) { + final InsertThread t = ithreads[i]; + t.join(60000); + assertTrue(! t.isAlive()); + assertNoException(t.e); + } + + assertNoException(outputHook.e); + + assertTrue(outputHook.done); + + if (groupacksEnabled) { + assertTrue(rot.getNumGroupedAcks() > 0); + } else { + assertEquals(0, rot.getNumGroupedAcks()); + } + } + + private void assertNoException(Exception e) { + assertTrue(((e != null) ? e.getMessage() : ""), e == null); + } + + /* Sequence used to generate keys in the thread below. */ + static final AtomicInteger keyGen = new AtomicInteger(0); + + /** + * Thread use to do parallel inserts. Each thread will block on the commit + * waiting for an ack, until the test hook lets them through. + */ + private class InsertThread extends Thread { + + final ReplicatedEnvironment env; + Exception e; + + InsertThread(ReplicatedEnvironment env) { + super(); + this.env = env; + } + + @Override + public void run() { + Database db = null; + + final TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.ALL)); + Transaction txn = env.beginTransaction(null, txnConfig); + try { + db = env.openDatabase(txn,TEST_DB_NAME, dbconfig); + DatabaseEntry k = new DatabaseEntry(); + IntegerBinding.intToEntry(keyGen.incrementAndGet(), k); + db.put(txn, k, k); + /* Wait for acks here. */ + txn.commit(); + txn = null; + } catch (Exception ie) { + this.e = ie; + } finally { + if (txn != null) { + txn.abort(); + } + if (db != null) { + db.close(); + } + } + } + } + + /* + * Test hook inserted into ReplicaOutputThread to wait until desired + * number of acks have queued up after which it removes the stall and lets + * the responses go out on to the network. + */ + private class OutputHook implements TestHook { + + final int queueSize; + volatile boolean done = false ; + volatile Exception e; + + OutputHook(int queueSize) { + this.queueSize = queueSize; + } + + @Override + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook() { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook(final Object rot) { + + try { + Callable predicate = new Callable() { + + @SuppressWarnings("unchecked") + @Override + public Boolean call() throws Exception { + /* + * -1 because the hook is called after an ack is + * removed from the queue. + */ + return (((ReplicaOutputThread) rot).getOutputQueueSize()) >= + (queueSize - 1); + } + + }; + + RepTestUtils.awaitCondition(predicate, 60000); + + } catch (Exception ie) { + e = ie; + } finally { + done = true; + } + } + + @Override + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + } + +} diff --git a/test/com/sleepycat/je/rep/impl/node/ReplicaTimeoutTest.java b/test/com/sleepycat/je/rep/impl/node/ReplicaTimeoutTest.java new file mode 100644 index 0000000..db81491 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/ReplicaTimeoutTest.java @@ -0,0 +1,143 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; + +public class ReplicaTimeoutTest extends RepTestBase { + + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + } + + @Test + public void testFeederHeartbeatTimeout() + throws InterruptedException { + + for (int i=1; i < groupSize; i++) { + /* + * Ensure that the replicas don't initiate a connection close due + * to inactivity. + */ + repEnvInfo[i].getRepConfig(). + setConfigParam(ReplicationConfig.REPLICA_TIMEOUT, "10000 s"); + } + + /* + * The feeder should timeout the connection because of unresponsive + * replicas. Configure it for 5 seconds. + */ + repEnvInfo[0].getRepConfig(). + setConfigParam(RepParams.FEEDER_TIMEOUT.getName(), "5 s"); + + createGroup(); + ReplicatedEnvironment renv1 = repEnvInfo[0].getEnv(); + assertTrue(renv1.getState().isMaster()); + final FeederManager feederManager = + repEnvInfo[0].getRepNode().feederManager(); + + for (int i=1; i < groupSize; i++) { + /* + * make the replicas unresponsive to heartbeat requests. + */ + repEnvInfo[i].getRepNode().getReplica().setTestDelayMs(60000); + } + + /* Wait a minute for the feeder connections to be shut down. */ + for (int i=0; i < 60; i++) { + if (feederManager.activeReplicaCount() == 0) { + break; + } + Thread.sleep(1000); + } + assertEquals(0, feederManager.activeReplicaCount()); + } + + /* + * Ensure that the replica times out and holds an election if it's inactive + * for the configured REPLICA_HEARTBEAT_TIMEOUT period. + */ + @Test + public void testReplicaHeartbeatTimeout() throws InterruptedException { + /* + * Slow down the heartbeat for the master, so it looks like a + * missing heartbeat on the replica side. + */ + repEnvInfo[0].getRepConfig(). + setConfigParam(RepParams.HEARTBEAT_INTERVAL.getName(), "60000"); + + for (int i = 1; i < groupSize; i++) { + + /* + * If the replica does not see a heartbeat in 5 seconds it will + * transition to the unknown state and hold an election. + */ + repEnvInfo[i].getRepConfig(). + setConfigParam(ReplicationConfig.REPLICA_TIMEOUT, "5 s"); + } + + createGroup(); + ReplicatedEnvironment renv1 = repEnvInfo[0].getEnv(); + assertTrue(renv1.getState().isMaster()); + + final CountDownLatch latch = new CountDownLatch(1); + repEnvInfo[1].getEnv(). + setStateChangeListener(new UnknownStateListener(latch)); + assertTrue(latch.await(60, TimeUnit.SECONDS)); + + /* + * Close the old master explicitly since it may be invalidated due to a + * MasterStateChangeException if the election resulted in a new master + */ + repEnvInfo[0].closeEnv(); + } + + /* + * Listener used to trip the latch if the replica transitions through the + * unknown state. + */ + protected static class UnknownStateListener implements StateChangeListener{ + final CountDownLatch unknownLatch; + + public UnknownStateListener(CountDownLatch unknownLatch) { + super(); + this.unknownLatch = unknownLatch; + } + + public void stateChange(StateChangeEvent stateChangeEvent) + throws RuntimeException { + + if (stateChangeEvent.getState().isUnknown()) { + unknownLatch.countDown(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/UpdateJEVersionTest.java b/test/com/sleepycat/je/rep/impl/node/UpdateJEVersionTest.java new file mode 100644 index 0000000..29642d9 --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/UpdateJEVersionTest.java @@ -0,0 +1,398 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import static com.sleepycat.je.rep.impl.RepParams.TEST_JE_VERSION; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.impl.MinJEVersionUnsupportedException; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.stream.Protocol.FeederJEVersions; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.TestAction; +import com.sleepycat.je.utilint.WaitTestHook; + +/** Test the RepNode.checkJEVersionSupport method. */ +public class UpdateJEVersionTest extends RepTestBase { + + private static final JEVersion RG_V3 = JEVersion.CURRENT_VERSION; + private static final JEVersion RG_V2 = + new JEVersion( + String.format( + "%d.%d.%d", + RepGroupImpl.FORMAT_VERSION_3_JE_VERSION.getMajor(), + RepGroupImpl.FORMAT_VERSION_3_JE_VERSION.getMinor(), + RepGroupImpl.FORMAT_VERSION_3_JE_VERSION.getPatch() - 1)); + private static final JEVersion RG_V3PLUS = + new JEVersion(String.format("%d.%d.%d", + RG_V3.getMajor(), + RG_V3.getMinor(), + RG_V3.getPatch() + 1)); + + /** + * Test that the RepGroupImpl object is stored in version 3 format by + * default. + */ + @Test + public void testRgv2Compatible() + throws Exception { + + createGroup(3); + final RepGroupImpl group = getMasterRepNode().getGroup(); + assertEquals(RepGroupImpl.FORMAT_VERSION_3, group.getFormatVersion()); + } + + @Test + public void testUpdateJEVersion() + throws Exception { + + /* + * Create the group with the JE version for RepGroupImpl version 2, + * which does not store replica JE version information persistently. + */ + setJEVersion(RG_V2, repEnvInfo); + createGroup(3); + + /* Test that RepGroupImpl version 2 is supported */ + RepNode masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V2); + + /* + * Test that the JE version for RepGroupImpl version 3, which is the + * version that stores replica JE version information, is not + * supported. + */ + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Not upgraded to " + RG_V3 + ": " + e); + } + + /* + * Restart just a quorum of the group using RepGroupImpl version 3, and + * test that version 3 is still not supported because the status of the + * offline node is not known. + */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1]); + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3 + ": " + e); + } + + /* + * Restart the last node, and make sure the new version is now + * supported + */ + restartNodes(repEnvInfo[2]); + masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V3); + + /* + * Make sure the RepGroupImpl version 3 changes are stored. Sync first + * to make sure the rep group format update sticks, and then restart + * and sync to get the persistent data for the nodes to be updated. + */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 3); + for (int i = 0; i < 3; i++) { + assertEquals(RG_V3, repEnvInfo[i].getRepNode().getMinJEVersion()); + } + closeNodes(repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 3); + + /* + * Restart a quorum with a still newer version and make sure the + * changes were stored persistently for the offline node. + */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3PLUS, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1]); + RepTestUtils.syncGroupToLastCommit( + new RepEnvInfo[] { repEnvInfo[0], repEnvInfo[1] }, 2); + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3PLUS); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3PLUS + ": " + e); + assertEquals("Node version not upgraded", RG_V3, e.nodeVersion); + } + + /* + * Restart a different quorum and add a secondary node running the + * older version. Since the changes were sync'ed, the fact that node 2 + * was upgraded should be known even though that node is currently + * offline, but the secondary node should still prevent the upgrade. + * Testing the version with only a subset of the nodes online should + * also confirm that attempting to update the latest JE version during + * the handshake does not require a quorum, which will not be available + * because the replica will not yet be online at that point. + */ + closeNodes(repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[2]); + setJEVersion(RG_V3, repEnvInfo[3]); + repEnvInfo[3].getRepConfig().setNodeType(NodeType.SECONDARY); + + /* + * Use a longer join wait time to allow the secondary to query the + * primaries a second time after the election is complete. See + * RepNode.MASTER_QUERY_INTERVAL. + */ + final long masterQueryInterval = 10000; + restartNodes(JOIN_WAIT_TIME + masterQueryInterval, repEnvInfo[3]); + masterRepNode = getMasterRepNode(); + final String secondaryNodeName = repEnvInfo[3].getEnv().getNodeName(); + try { + masterRepNode.setMinJEVersion(RG_V3PLUS); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3PLUS + ": " + e); + assertEquals("Wrong node", secondaryNodeName, e.nodeName); + } + + /* Close the secondary to enable new version */ + closeNodes(repEnvInfo[3]); + masterRepNode.setMinJEVersion(RG_V3PLUS); + + /* Restart node 2, just for completeness */ + restartNodes(repEnvInfo[1]); + masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V3PLUS); + + /* + * Switch back to RepGroupImpl version 3, create a new node, and make + * sure that it cannot join. + */ + setJEVersion(RG_V3, repEnvInfo); + try { + repEnvInfo[4].openEnv(); + fail("Expected EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Node with old version can't join: " + e); + } + } + + /** + * Test that calling RepNode.setMinJEVersion on a replica will throw + * DatabaseException if it attempts to update the rep group DB. + */ + @Test + public void testUpdateJEVersionReplica() + throws Exception { + + /* + * Create the group with the JE version for RepGroupImpl version 3, + * which stores replica JE version information persistently. + */ + createGroup(3); + RepNode masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V3); + + /* Restart all nodes using a newer version */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3PLUS, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 3); + masterRepNode = getMasterRepNode(); + + /* Test that updating the minimum JE version on a replica fails */ + for (int i = 0; i < 3; i++) { + final RepNode repNode = repEnvInfo[i].getRepNode(); + if (repNode == masterRepNode) { + continue; + } + try { + repNode.setMinJEVersion(RG_V3PLUS); + fail("Expected DatabaseException"); + } catch (DatabaseException e) { + logger.info("No update on replica: " + e); + } + } + } + + @Test + public void testUpdateJEVersionDowngrade() + throws Exception { + + /* + * Create the group with the JE version for RepGroupImpl version 3, + * which stores replica JE version information persistently. + */ + createGroup(3); + RepNode masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V3); + + /* + * Restart two nodes using a newer version and test that the group does + * not support that version. + */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3PLUS, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1]); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 2); + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3PLUS); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3PLUS + ": " + e); + assertEquals("Node version not upgraded", RG_V3, e.nodeVersion); + } + + /* Revert to the original version. */ + closeNodes(repEnvInfo[0], repEnvInfo[1]); + setJEVersion(RG_V3, repEnvInfo); + restartNodes(repEnvInfo[0], repEnvInfo[1]); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 2); + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3PLUS); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3PLUS + ": " + e); + assertEquals("Node version not upgraded", RG_V3, e.nodeVersion); + } + + /* + * Close all three nodes, and restart a different two using the new + * version, confirming that the system remembers that other node was + * downgraded. + */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3PLUS, repEnvInfo); + restartNodes(repEnvInfo[1], repEnvInfo[2]); + RepTestUtils.syncGroupToLastCommit( + new RepEnvInfo[] { repEnvInfo[1], repEnvInfo[2] }, 2); + masterRepNode = getMasterRepNode(); + try { + masterRepNode.setMinJEVersion(RG_V3PLUS); + fail("Expected MinJEVersionUnsupportedException"); + } catch (MinJEVersionUnsupportedException e) { + logger.info("Partially upgraded to " + RG_V3PLUS + ": " + e); + assertEquals("Node version not upgraded", RG_V3, e.nodeVersion); + } + + /* Restart first node and make sure new version is supported */ + restartNodes(repEnvInfo[0]); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, 3); + masterRepNode = getMasterRepNode(); + masterRepNode.setMinJEVersion(RG_V3PLUS); + } + + /** + * Test adding a secondary node running an older version just as the + * cluster's minimum JE version is being increased to make that version + * incompatible. This test checks that the logic in + * RepNode.setMinJEVersion performs the proper interlock for secondary + * nodes joining the cluster while the minimum JE version is being + * modified. + */ + @Test + public void testUpgradeOldSecondaryNodeJoinRace() + throws Exception { + + /* Create a version 3 cluster with three members */ + setJEVersion(RG_V3, repEnvInfo); + createGroup(3); + + /* Upgrade nodes to version 3+ */ + closeNodes(repEnvInfo); + setJEVersion(RG_V3PLUS, repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + restartNodes(repEnvInfo[0], repEnvInfo[1], repEnvInfo[2]); + + final RepNode masterRepNode = getMasterRepNode(); + try { + + /* + * Hook to wait before sending the FeederJEVersions reply message + * during the secondary node handshake. + */ + final WaitTestHook writeMessageHook = + new WaitTestHook() { + @Override + public void doHook(final Message msg) { + if (msg instanceof FeederJEVersions) { + doHook(); + } + } + }; + Feeder.setInitialWriteMessageHook(writeMessageHook); + + /* Add a secondary node running the older version 3 */ + final TestAction addOldNode = new TestAction() { + @Override + protected void action() { + repEnvInfo[3].getRepConfig().setNodeType( + NodeType.SECONDARY); + repEnvInfo[3].openEnv(); + } + }; + addOldNode.start(); + + /* + * Wait until the node's feeder completes checking JE version + * compatibility and waits before continuing + */ + writeMessageHook.awaitWaiting(10000); + + /* Increase the group's minimum JE version to 3+ */ + masterRepNode.setMinJEVersion(RG_V3PLUS); + + /* + * Continue activating the new node, which is now incompatible and + * should fail + */ + writeMessageHook.stopWaiting(); + addOldNode.assertException( + 10000, EnvironmentFailureException.class); + + } finally { + Feeder.setInitialWriteMessageHook(null); + } + } + + private RepNode getMasterRepNode() { + return RepInternal.getNonNullRepImpl( + findMaster(repEnvInfo).getEnv()).getRepNode(); + } + + /** Set the JE version for the specified nodes. */ + private void setJEVersion(final JEVersion jeVersion, + final RepEnvInfo... nodes) { + assert jeVersion != null; + for (final RepEnvInfo node : nodes) { + node.getRepConfig().setConfigParam( + TEST_JE_VERSION.getName(), jeVersion.toString()); + } + } +} diff --git a/test/com/sleepycat/je/rep/impl/node/UpdateNodeAddressTest.java b/test/com/sleepycat/je/rep/impl/node/UpdateNodeAddressTest.java new file mode 100644 index 0000000..4693d4c --- /dev/null +++ b/test/com/sleepycat/je/rep/impl/node/UpdateNodeAddressTest.java @@ -0,0 +1,248 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.impl.node; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.net.InetSocketAddress; +import java.util.HashSet; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.ReplicaStateException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Tests for the {@code updateAddress()} feature. + */ +public class UpdateNodeAddressTest extends TestBase { + /* Replication tests use multiple environments. */ + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public UpdateNodeAddressTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Test that incorrect operations will throw correct exceptions. + */ + @Test + public void testUpdateExceptions() + throws Throwable { + + try { + /* Set up the replication group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* Create the ReplicationGroupAdmin. */ + HashSet helperSets = + new HashSet(); + helperSets.add(master.getRepConfig().getNodeSocketAddress()); + ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helperSets, + RepTestUtils.readRepNetConfig()); + + /* + * Try to update an unexisted node, expect a + * MemberNotFoundException. + */ + try { + groupAdmin.updateAddress("node4", "localhost", 5004); + fail("Expect exceptions here"); + } catch (MemberNotFoundException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* Try to update the master, expect a MasterStateException. */ + try { + groupAdmin.updateAddress + (master.getNodeName(), "localhost", 5004); + fail("Expect exceptions here"); + } catch (MasterStateException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* + * Try to update a node that is still alive, expect + * ReplicaStateException. + */ + try { + groupAdmin.updateAddress + (repEnvInfo[2].getEnv().getNodeName(), + "localhost", 5004); + fail("Expect exceptions here"); + } catch (ReplicaStateException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* + * Test that the node after updating its address can work after removing + * all its old log files. + */ + @Test + public void testUpdateAddressWithNoFormerLogs() + throws Throwable { + + doTest(true); + } + + /* + * Do the test, if deleteLogFiles is true, the former environment home for + * the node whose address updated will be deleted. + */ + private void doTest(boolean deleteLogFiles) + throws Throwable { + + try { + + /* + * Disable the LocalCBVLSN changes so that no + * InsufficientLogException will be thrown when restart the node + * whose address has been updated. + */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + + /* Create the replication group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* Create the ReplicationGroupAdmin. */ + HashSet helperSets = + new HashSet(); + helperSets.add(master.getRepConfig().getNodeSocketAddress()); + ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + helperSets, + RepTestUtils.readRepNetConfig()); + + /* Shutdown node3. */ + InetSocketAddress nodeAddress = + repEnvInfo[2].getRepConfig().getNodeSocketAddress(); + String nodeName = repEnvInfo[2].getEnv().getNodeName(); + File envHome = repEnvInfo[2].getEnv().getHome(); + repEnvInfo[2].closeEnv(); + + /* Update the address for node3. */ + try { + groupAdmin.updateAddress(nodeName, + nodeAddress.getHostName(), + nodeAddress.getPort() + 1); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* Restart node3 will get an EnvironmentFailureException. */ + try { + repEnvInfo[2].openEnv(); + fail("Expect exceptions here."); + } catch (EnvironmentFailureException e) { + /* Expected exception. */ + assertEquals(e.getReason(), + EnvironmentFailureReason.HANDSHAKE_ERROR); + } catch (Exception e) { + fail("Unexpected excpetion: " + e); + } + + /* + * Delete all files in node3's environment home so that node3 can + * restart as a fresh new node. + */ + assertTrue(envHome.exists()); + + /* Delete the former log files if we'd like to. */ + if (deleteLogFiles) { + for (File file : envHome.listFiles()) { + /* Don't delete the je.properties. */ + if (file.getName().contains("properties")) { + continue; + } + + assertTrue(file.isFile()); + assertTrue(file.delete()); + } + } + + /* Reset the ReplicationConfig and restart again. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setNodeName(nodeName); + repConfig.setGroupName(RepTestUtils.TEST_REP_GROUP_NAME); + repConfig.setNodeHostPort(nodeAddress.getHostName() + ":" + + (nodeAddress.getPort() + 1)); + repConfig.setHelperHosts(master.getRepConfig().getNodeHostPort()); + + ReplicatedEnvironment replica = null; + try { + replica = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } finally { + if (replica != null) { + replica.close(); + } + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* + * Test that address updates does work even the node reuses its old log + * files. + */ + @Test + public void testUpdateAddressWithFormerLogs() + throws Throwable { + + doTest(false); + } +} diff --git a/test/com/sleepycat/je/rep/jmx/RepJEDiagnosticsTest.java b/test/com/sleepycat/je/rep/jmx/RepJEDiagnosticsTest.java new file mode 100644 index 0000000..543ed87 --- /dev/null +++ b/test/com/sleepycat/je/rep/jmx/RepJEDiagnosticsTest.java @@ -0,0 +1,66 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx; + +import java.io.File; + +import javax.management.DynamicMBean; + +import org.junit.After; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Test RepJEDiagnostics. + */ +public class RepJEDiagnosticsTest extends com.sleepycat.je.jmx.JEDiagnosticsTest { + private File envRoot; + private RepEnvInfo[] repEnvInfo; + + public RepJEDiagnosticsTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + @Override + protected DynamicMBean createMBean(Environment env) { + return new RepJEDiagnostics(env); + } + + @Override + protected Environment openEnv() + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2, envConfig); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + return master; + } +} diff --git a/test/com/sleepycat/je/rep/jmx/RepJEMonitorTest.java b/test/com/sleepycat/je/rep/jmx/RepJEMonitorTest.java new file mode 100644 index 0000000..42eb3bb --- /dev/null +++ b/test/com/sleepycat/je/rep/jmx/RepJEMonitorTest.java @@ -0,0 +1,67 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.jmx; + +import java.io.File; + +import javax.management.DynamicMBean; + +import org.junit.After; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Test RepJEMonitor. + */ +public class RepJEMonitorTest extends com.sleepycat.je.jmx.JEMonitorTest { + private static final boolean DEBUG = false; + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public RepJEMonitorTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + @Override + protected DynamicMBean createMBean(Environment env) { + return new RepJEMonitor(env); + } + + @Override + protected Environment openEnv(boolean openTransactionally) + throws Exception { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(openTransactionally); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2, envConfig); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + return master; + } +} diff --git a/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerNoEventsTest.java b/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerNoEventsTest.java new file mode 100644 index 0000000..0dfc7d0 --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerNoEventsTest.java @@ -0,0 +1,80 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.utilint.TestHookAdapter; + +/** + * Perform the tests from MonitorChangeListenerTest, but with event delivery + * disabled, to test in the presence of network failures. + */ +public class MonitorChangeListenerNoEventsTest + extends MonitorChangeListenerTest { + + @BeforeClass + public static void classSetup() { + MonitorService.processGroupChangeHook = + new TestHookAdapter() { + @Override + public void doHook(GroupChangeEvent event) { + ReplicationNode node; + try { + node = event.getRepGroup().getMember(event.getNodeName()); + } catch (MemberNotFoundException e) { + node = null; + } + /* + * Deliver monitor events because they are only generated + * locally and are not simulated by pings. + */ + if ((node != null) && !node.getType().isMonitor()) { + throw new IllegalStateException("don't deliver"); + } + } + }; + MonitorService.processJoinGroupHook = + new TestHookAdapter() { + @Override + public void doHook(JoinGroupEvent event) { + throw new IllegalStateException("don't deliver"); + } + }; + MonitorService.processLeaveGroupHook = + new TestHookAdapter() { + @Override + public void doHook(LeaveGroupEvent event) { + throw new IllegalStateException("don't deliver"); + } + }; + } + + @AfterClass + public static void classCleanup() { + MonitorService.processGroupChangeHook = null; + MonitorService.processJoinGroupHook = null; + MonitorService.processLeaveGroupHook = null; + } + + /** + * When event delivery is disabled, the ping produces the leave event, but + * it uses a different LeaveReason, so disable this check. + */ + void checkShutdownReplicaLeaveReason(final LeaveGroupEvent event) { + } +} diff --git a/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerTest.java b/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerTest.java new file mode 100644 index 0000000..d26a906 --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/MonitorChangeListenerTest.java @@ -0,0 +1,368 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; + +import org.junit.Test; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Test the MonitorChangeListener behaviors. + */ +public class MonitorChangeListenerTest extends MonitorTestBase { + private TestChangeListener testListener; + + /** + * Test basic behaviors of MonitorChangeListener. + */ + @Test + public void testBasicBehaviors() + throws Exception { + + checkGroupStart(); + + /* + * Close the master, expect to see a NewMasterEvent and a + * LeaveGroupEvent. + */ + testListener.masterBarrier = new CountDownLatch(1); + testListener.leaveGroupBarrier = new CountDownLatch(1); + repEnvInfo[0].closeEnv(); + /* Wait for a LeaveGroupEvent. */ + testListener.awaitEvent(testListener.leaveGroupBarrier); + /* Wait for elections to settle down. */ + testListener.awaitEvent(testListener.masterBarrier); + + /* Do the check. */ + assertEquals(1, testListener.getMasterEvents()); + assertEquals(1, testListener.getLeaveGroupEvents()); + assertTrue(!repEnvInfo[0].getRepConfig().getNodeName().equals + (testListener.masterNodeName)); + + /* + * Shutdown all the replicas, see if it generates the expected number + * of LeaveGroupEvents. + */ + testListener.clearLeaveGroupEvents(); + shutdownReplicasNormally(); + } + + /* Check the monitor events during the group start up. */ + private void checkGroupStart() + throws Exception { + + repEnvInfo[0].openEnv(); + RepNode master = repEnvInfo[0].getRepNode(); + assertTrue(master.isMaster()); + + testListener = new TestChangeListener(); + testListener.masterBarrier = new CountDownLatch(1); + testListener.groupBarrier = new CountDownLatch(1); + + /* + * Start the listener first, so the Listener is guaranteed to get + * the group change event when the monitor is registered. + */ + + /* generates sync master change event */ + monitor.startListener(testListener); + /* generates async group change event */ + monitor.register(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Make sure it fires a NewMasterEvent, and do the check. */ + testListener.awaitEvent(testListener.masterBarrier); + assertEquals(1, testListener.getMasterEvents()); + NewMasterEvent masterEvent = testListener.masterEvent; + assertEquals(masterEvent.getNodeName(), master.getNodeName()); + assertEquals(masterEvent.getMasterName(), master.getNodeName()); + + /* Adding a monitor fires an ADD GroupChangeEvent, do check. */ + testListener.awaitEvent(testListener.groupBarrier); + assertEquals(1, testListener.getGroupAddEvents()); + GroupChangeEvent groupEvent = testListener.groupEvent; + assertEquals(monitor.getNodeName(), groupEvent.getNodeName()); + + /* Get the JoinGroupEvents for current active node: master. */ + assertEquals(1, testListener.getJoinGroupEvents()); + JoinGroupEvent joinEvent = testListener.joinEvent; + assertEquals(master.getNodeName(), joinEvent.getNodeName()); + assertEquals(master.getNodeName(), joinEvent.getMasterName()); + + testListener.clearMasterEvents(); + testListener.clearJoinGroupEvents(); + testListener.clearGroupAddEvents(); + for (int i = 1; i < repEnvInfo.length; i++) { + testListener.groupBarrier = new CountDownLatch(1); + testListener.joinGroupBarrier = new CountDownLatch(1); + repEnvInfo[i].openEnv(); + String nodeName = repEnvInfo[i].getEnv().getNodeName(); + testListener.awaitEvent(nodeName, testListener.groupBarrier); + /* Wait for a JoinGroupEvent. */ + testListener.awaitEvent(nodeName, testListener.joinGroupBarrier); + /* No change in master. */ + assertEquals(0, testListener.getMasterEvents()); + assertEquals(i, testListener.getGroupAddEvents()); + assertEquals(i, testListener.getJoinGroupEvents()); + + /* Do the GroupChangeEvent check. */ + groupEvent = testListener.groupEvent; + assertEquals(nodeName, groupEvent.getNodeName()); + assertEquals(groupEvent.getRepGroup().getNodes().size(), i + 2); + + /* Do the JoinGroupEvent check. */ + joinEvent = testListener.joinEvent; + assertEquals(nodeName, joinEvent.getNodeName()); + assertEquals(master.getNodeName(), joinEvent.getMasterName()); + } + } + + /* + * Shutdown all the replicas normally, do not shutdown the master before + * shutting down all replicas so that there is no NewMasterEvent + * generated during this process. + */ + private void shutdownReplicasNormally() + throws Exception { + + RepEnvInfo master = null; + int shutdownReplicas = 0; + for (RepEnvInfo repInfo : repEnvInfo) { + ReplicatedEnvironment env = repInfo.getEnv(); + if ((env == null) || !env.isValid()) { + continue; + } + if (env.getState().isMaster()) { + master = repInfo; + continue; + } + shutdownReplicas++; + shutdownReplicaAndDoCheck(repInfo, shutdownReplicas); + } + + /* Shutdown the master. */ + if (master != null) { + shutdownReplicas++; + shutdownReplicaAndDoCheck(master, shutdownReplicas); + } + } + + /* Shutdown a replica and do the check. */ + private void shutdownReplicaAndDoCheck(RepEnvInfo replica, + int index) + throws Exception { + + testListener.leaveGroupBarrier = new CountDownLatch(1); + String nodeName = replica.getEnv().getNodeName(); + replica.closeEnv(); + testListener.awaitEvent(nodeName, testListener.leaveGroupBarrier); + + /* Do the check. */ + LeaveGroupEvent event = testListener.leaveEvent; + assertEquals(index, testListener.getLeaveGroupEvents()); + assertEquals(nodeName, event.getNodeName()); + + checkShutdownReplicaLeaveReason(event); + } + + void checkShutdownReplicaLeaveReason(final LeaveGroupEvent event) { + assertEquals(LeaveReason.NORMAL_SHUTDOWN, event.getLeaveReason()); + } + + /** + * Test removeMember which would create GroupChangeEvent, but no + * LeaveGroupEvents. + */ + @Test + public void testRemoveMember() + throws Exception { + + checkGroupStart(); + + RepNode master = repEnvInfo[0].getRepNode(); + assertTrue(master.isMaster()); + + /* + * Remove replica from RepGroupDB, see if it fires a REMOVE + * GroupChangeEvent. + */ + testListener.clearGroupAddEvents(); + testListener.clearLeaveGroupEvents(); + for (int i = 1; i < repEnvInfo.length; i++) { + testListener.groupBarrier = new CountDownLatch(1); + String nodeName = repEnvInfo[i].getRepNode().getNodeName(); + master.removeMember(nodeName); + testListener.awaitEvent(nodeName, testListener.groupBarrier); + assertEquals(0, testListener.getGroupAddEvents()); + assertEquals(i, testListener.getGroupRemoveEvents()); + assertEquals(nodeName, testListener.groupEvent.getNodeName()); + } + assertEquals(0, testListener.getLeaveGroupEvents()); + + /* + * Shutdown all the replicas, see if it generates the expected number + * of LeaveGroupEvents. + */ + shutdownReplicasNormally(); + } + + @Test + public void testActiveNodesWhenMonitorStarts() + throws Exception { + + RepTestUtils.joinGroup(repEnvInfo); + testListener = new TestChangeListener(); + /* generates sync master change event */ + monitor.startListener(testListener); + /* generates async group change event */ + monitor.register(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + assertEquals(1, testListener.getMasterEvents()); + assertEquals(5, testListener.getJoinGroupEvents()); + JoinGroupEvent event = testListener.joinEvent; + assertEquals + (repEnvInfo[0].getEnv().getNodeName(), event.getMasterName()); + + shutdownReplicasNormally(); + } + + /** + * Test monitor events when adding a secondary node. + */ + @Test + public void testAddSecondaryNode() + throws Exception { + + checkGroupStart(); + final RepNode master = repEnvInfo[0].getRepNode(); + assertTrue("Master", master.isMaster()); + + testListener.clearGroupAddEvents(); + testListener.clearGroupRemoveEvents(); + testListener.clearJoinGroupEvents(); + testListener.clearLeaveGroupEvents(); + testListener.joinGroupBarrier = new CountDownLatch(1); + + /* Create a new secondary */ + final int pos = repEnvInfo.length; + repEnvInfo = Arrays.copyOf(repEnvInfo, pos + 1); + repEnvInfo[pos] = RepTestUtils.setupEnvInfo( + RepTestUtils.makeRepEnvDir(envRoot, pos), + RepTestUtils.createEnvConfig(RepTestUtils.DEFAULT_DURABILITY), + RepTestUtils.createRepConfig(pos + 1).setNodeType( + NodeType.SECONDARY), + repEnvInfo[0]); + repEnvInfo[pos].openEnv(); + final String nodeName = repEnvInfo[pos].getEnv().getNodeName(); + + testListener.awaitEvent(testListener.joinGroupBarrier); + assertEquals("Add events", 0, testListener.getGroupAddEvents()); + assertEquals("Remove events", 0, testListener.getGroupRemoveEvents()); + assertEquals("Join events", 1, testListener.getJoinGroupEvents()); + assertEquals("Join event node name", + nodeName, testListener.joinEvent.getNodeName()); + assertEquals("Leave events", 0, testListener.getLeaveGroupEvents()); + + testListener.clearGroupAddEvents(); + testListener.clearGroupRemoveEvents(); + testListener.clearJoinGroupEvents(); + testListener.clearLeaveGroupEvents(); + testListener.leaveGroupBarrier = new CountDownLatch(1); + + /* Close secondary */ + repEnvInfo[pos].closeEnv(); + + testListener.awaitEvent(testListener.leaveGroupBarrier); + assertEquals("Add events", 0, testListener.getGroupAddEvents()); + assertEquals("Remove events", 0, testListener.getGroupRemoveEvents()); + assertEquals("Join events", 0, testListener.getJoinGroupEvents()); + assertEquals("Leave events", 1, testListener.getLeaveGroupEvents()); + assertEquals("Leave event node name", + nodeName, testListener.leaveEvent.getNodeName()); + testListener.clearLeaveGroupEvents(); + + /* Shutdown */ + shutdownReplicasNormally(); + } + + /** + * Test monitor events when replacing a primary replica with a secondary + * node that reuses its environment. + */ + @Test + public void testReplaceAsSecondaryNode() + throws Exception { + + checkGroupStart(); + final RepNode master = repEnvInfo[0].getRepNode(); + assertTrue("Master", master.isMaster()); + + /* Remove replica */ + testListener.clearGroupAddEvents(); + testListener.clearGroupRemoveEvents(); + testListener.clearJoinGroupEvents(); + testListener.clearLeaveGroupEvents(); + testListener.groupBarrier = new CountDownLatch(1); + testListener.leaveGroupBarrier = new CountDownLatch(1); + master.removeMember(repEnvInfo[1].getRepNode().getNodeName()); + testListener.awaitEvent(testListener.groupBarrier); + assertEquals("Add events", 0, testListener.getGroupAddEvents()); + assertEquals("Remove events", 1, testListener.getGroupRemoveEvents()); + assertEquals("Remove event node name", + repEnvInfo[1].getEnv().getNodeName(), + testListener.groupEvent.getNodeName()); + assertEquals("Join events", 0, testListener.getJoinGroupEvents()); + assertEquals("Leave events", 0, testListener.getLeaveGroupEvents()); + + /* Close environment */ + String closedName = repEnvInfo[1].getEnv().getNodeName(); + repEnvInfo[1].closeEnv(); + testListener.awaitEvent(testListener.leaveGroupBarrier); + assertEquals(closedName, testListener.leaveEvent.getNodeName()); + + /* Open replica as a (new) secondary */ + testListener.clearGroupAddEvents(); + testListener.clearGroupRemoveEvents(); + testListener.clearJoinGroupEvents(); + testListener.clearLeaveGroupEvents(); + testListener.joinGroupBarrier = new CountDownLatch(1); + repEnvInfo[1].getRepConfig() + .setNodeName("Node2-secondary") + .setNodeType(NodeType.SECONDARY); + repEnvInfo[1].openEnv(); + testListener.awaitEvent(testListener.joinGroupBarrier); + assertEquals("Add events", 0, testListener.getGroupAddEvents()); + assertEquals("Remove events", 0, testListener.getGroupRemoveEvents()); + assertEquals("Join events", 1, testListener.getJoinGroupEvents()); + assertEquals("Join event node name", + repEnvInfo[1].getEnv().getNodeName(), + testListener.joinEvent.getNodeName()); + assertEquals("Leave events", 0, testListener.getLeaveGroupEvents()); + + /* Shutdown */ + shutdownReplicasNormally(); + } +} diff --git a/test/com/sleepycat/je/rep/monitor/MonitorTest.java b/test/com/sleepycat/je/rep/monitor/MonitorTest.java new file mode 100644 index 0000000..d38ff14 --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/MonitorTest.java @@ -0,0 +1,263 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import static com.sleepycat.je.rep.impl.RepParams.TEST_JE_VERSION; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Properties; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepGroupProtocol; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +public class MonitorTest extends MonitorTestBase { + + /** + * Test the direct API calls to get the master and group. + */ + @Test + public void testMonitorDirect() + throws Exception { + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + repEnvInfo[repEnvInfo.length-1].getRepConfig().setNodeType( + NodeType.SECONDARY); + + repEnvInfo[0].openEnv(); + RepNode master = + RepInternal.getNonNullRepImpl(repEnvInfo[0].getEnv()).getRepNode(); + assertTrue(master.isMaster()); + String masterNodeName = master.getNodeName(); + assertEquals(masterNodeName, monitor.getMasterNodeName()); + + for (int i = 1; i < repEnvInfo.length; i++) { + repEnvInfo[i].openEnv(); + Thread.sleep(1000); + assertEquals(masterNodeName, monitor.getMasterNodeName()); + ReplicationGroup group = monitor.getGroup(); + assertEquals(master.getGroup(), + RepInternal.getRepGroupImpl(group)); + } + repEnvInfo[0].closeEnv(); + /* Wait for elections to settle down. */ + Thread.sleep(10000); + assert(!masterNodeName.equals(monitor.getMasterNodeName())); + } + + /** + * Make sure code snippet in class javadoc compiles. + * @throws IOException + * @throws DatabaseException + */ + @SuppressWarnings("hiding") + @Test + public void testJavadoc() + throws DatabaseException, IOException { + + // Initialize the monitor node config + try { + MonitorConfig monConfig = new MonitorConfig(); + monConfig.setGroupName("PlanetaryRepGroup"); + monConfig.setNodeName("mon1"); + monConfig.setNodeHostPort("monhost1.acme.com:7000"); + monConfig.setHelperHosts("mars.acme.com:5000,jupiter.acme.com:5000"); + Monitor monitor = new Monitor(monConfig); + + // If the monitor has not been registered as a member of the group, + // register it now. register() returns the current node that is the + // master. + + @SuppressWarnings("unused") + ReplicationNode currentMaster = monitor.register(); + + // Start up the listener, so that it can be used to track changes + // in the master node, or group composition. + monitor.startListener(new MyChangeListener()); + } catch (IllegalArgumentException expected) { + } + } + + /* For javadoc and GSG */ + class MyChangeListener implements MonitorChangeListener { + + public void notify(NewMasterEvent newMasterEvent) { + + String newNodeName = newMasterEvent.getNodeName(); + + InetSocketAddress newMasterAddr = + newMasterEvent.getSocketAddress(); + String newMasterHostName = newMasterAddr.getHostName(); + int newMasterPort = newMasterAddr.getPort(); + + // Do something with this information here. + } + + public void notify(GroupChangeEvent groupChangeEvent) { + ReplicationGroup repGroup = groupChangeEvent.getRepGroup(); + + // Do something with the new ReplicationGroup composition here. + } + + public void notify(JoinGroupEvent joinGroupEvent) { + } + + public void notify(LeaveGroupEvent leaveGroupEvent) { + } + } + + /** Convert between ELECTABLE and MONITOR node types. */ + @Test + public void testConvertNodeType() + throws Exception { + + createGroup(); + + /* Convert ELECTABLE to MONITOR */ + repEnvInfo[1].closeEnv(); + final ReplicationConfig repConfig = repEnvInfo[1].getRepConfig(); + repConfig.setNodeType(NodeType.MONITOR); + try { + repEnvInfo[1].openEnv(); + fail("Convert ELECTABLE to MONITOR should throw" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Convert ELECTABLE to MONITOR: " + e); + } + + /* Convert ELECTABLE to Monitor. */ + final Properties accessProps = RepTestUtils.readNetProps(); + final Monitor monitor2 = new Monitor( + new MonitorConfig() + .setGroupName(repConfig.getGroupName()) + .setNodeName(repConfig.getNodeName()) + .setNodeHostPort(repConfig.getNodeHostPort()) + .setHelperHosts(repConfig.getHelperHosts()) + .setRepNetConfig(ReplicationNetworkConfig.create(accessProps))); + try { + monitor2.register(); + fail("Convert ELECTABLE to Monitor should throw" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Convert ELECTABLE to Monitor: " + e); + } + + /* Convert Monitor to ELECTABLE. */ + monitor.register(); + monitor.shutdown(); + final InetSocketAddress monitorAddress = + monitor.getMonitorSocketAddress(); + repConfig.setNodeName(monitor.getNodeName()) + .setNodeHostPort(monitorAddress.getHostName() + ":" + + monitorAddress.getPort()) + .setNodeType(NodeType.ELECTABLE); + try { + repEnvInfo[1].openEnv(); + fail("Convert Monitor to ELECTABLE should throw" + + " EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + logger.info("Convert Monitor to ELECTABLE: " + e); + } + } + + /** + * Attempt to connect an old monitor that doesn't understand secondary + * nodes to a group containing a secondary node to check that the version + * negotiation happens correctly. + */ + @Test + public void testOldMonitorSecondary() + throws Exception { + + /* Add a secondary node */ + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + repEnvInfo[repEnvInfo.length-1].getRepConfig().setNodeType( + NodeType.SECONDARY); + + createGroup(); + monitor.shutdown(); + + /* Start a monitor using RepGroupImpl version 2 */ + final ReplicationConfig repConfig = repEnvInfo[0].getRepConfig(); + final int monitorPort = Integer.parseInt( + RepParams.DEFAULT_PORT.getDefault()) + 200; + RepGroupProtocol.setTestVersion(RepGroupProtocol.REP_GROUP_V2_VERSION); + final Properties accessProps = RepTestUtils.readNetProps(); + try { + monitor = new Monitor( + new MonitorConfig() + .setGroupName(repConfig.getGroupName()) + .setNodeName("oldMonitor") + .setNodeHostPort(RepTestUtils.TEST_HOST + ":" + monitorPort) + .setHelperHosts(repConfig.getHelperHosts()) + .setRepNetConfig(ReplicationNetworkConfig.create(accessProps))); + + monitor.register(); + final ReplicationGroup group = monitor.getGroup(); + + assertEquals("No secondary nodes expected", + 0, group.getSecondaryNodes().size()); + + } finally { + RepGroupProtocol.setTestVersion(null); + } + } + + /** + * Attempt to connect a new monitor to a cluster that doesn't understand + * secondary nodes, to make sure that the version negotiation happens + * correctly. + */ + @Test + public void testNewMonitor() + throws Exception { + + /* + * Use RepGroupImpl version 2, which doesn't understand secondary + * nodes. + */ + for (final RepEnvInfo node : repEnvInfo) { + node.getRepConfig().setConfigParam( + TEST_JE_VERSION.getName(), + RepGroupImpl.MIN_FORMAT_VERSION_JE_VERSION.toString()); + } + + createGroup(); + + monitor.register(); + final ReplicationGroup group = monitor.getGroup(); + + assertEquals("No secondary nodes expected", + 0, group.getSecondaryNodes().size()); + } +} diff --git a/test/com/sleepycat/je/rep/monitor/MonitorTestBase.java b/test/com/sleepycat/je/rep/monitor/MonitorTestBase.java new file mode 100644 index 0000000..7e20420 --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/MonitorTestBase.java @@ -0,0 +1,173 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import static org.junit.Assert.assertEquals; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.je.rep.impl.RepTestBase; + +public class MonitorTestBase extends RepTestBase { + + /* The monitor being tested. */ + protected Monitor monitor; + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + monitor = createMonitor(100, "mon10000"); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + monitor.shutdown(); + } + + protected class TestChangeListener implements MonitorChangeListener { + String masterNodeName; + + volatile NewMasterEvent masterEvent; + volatile GroupChangeEvent groupEvent; + volatile JoinGroupEvent joinEvent; + volatile LeaveGroupEvent leaveEvent; + + /* Statistics records how may events happen. */ + private final AtomicInteger masterEvents = new AtomicInteger(0); + private final AtomicInteger groupAddEvents = new AtomicInteger(0); + private final AtomicInteger groupRemoveEvents = new AtomicInteger(0); + private final AtomicInteger joinGroupEvents = new AtomicInteger(0); + private final AtomicInteger leaveGroupEvents = new AtomicInteger(0); + + /* Barrier to test whether event happens. */ + volatile CountDownLatch masterBarrier; + volatile CountDownLatch groupBarrier; + volatile CountDownLatch joinGroupBarrier; + volatile CountDownLatch leaveGroupBarrier; + + public TestChangeListener() {} + + public void notify(NewMasterEvent newMasterEvent) { + logger.info("notify " + newMasterEvent); + masterEvents.incrementAndGet(); + masterNodeName = newMasterEvent.getNodeName(); + masterEvent = newMasterEvent; + countDownBarrier(masterBarrier); + } + + public void notify(GroupChangeEvent groupChangeEvent) { + logger.info("notify " + groupChangeEvent); + switch (groupChangeEvent.getChangeType()) { + case ADD: + groupAddEvents.incrementAndGet(); + break; + case REMOVE: + groupRemoveEvents.incrementAndGet(); + break; + default: + throw new IllegalStateException("Unexpected change type."); + } + groupEvent = groupChangeEvent; + countDownBarrier(groupBarrier); + } + + public void notify(JoinGroupEvent joinGroupEvent) { + logger.info("notify " + joinGroupEvent); + joinGroupEvents.incrementAndGet(); + joinEvent = joinGroupEvent; + countDownBarrier(joinGroupBarrier); + } + + public void notify(LeaveGroupEvent leaveGroupEvent) { + logger.info("notify " + leaveGroupEvent); + leaveGroupEvents.incrementAndGet(); + leaveEvent = leaveGroupEvent; + countDownBarrier(leaveGroupBarrier); + } + + void awaitEvent(CountDownLatch latch) + throws InterruptedException { + + awaitEvent(null, latch); + } + + void awaitEvent(String message, CountDownLatch latch) + throws InterruptedException { + + latch.await(30, TimeUnit.SECONDS); + assertEquals(((message != null) ? (message + ": ") : "") + + "Events not received after timeout:", + 0, latch.getCount()); + } + + private void countDownBarrier(CountDownLatch barrier) { + if (barrier != null && barrier.getCount() > 0) { + barrier.countDown(); + } + } + + int getMasterEvents() { + return masterEvents.get(); + } + + void clearMasterEvents() { + masterEvents.set(0); + } + + int getGroupAddEvents() { + return groupAddEvents.get(); + } + + void clearGroupAddEvents() { + groupAddEvents.set(0); + } + + int getGroupRemoveEvents() { + return groupRemoveEvents.get(); + } + + void clearGroupRemoveEvents() { + groupRemoveEvents.set(0); + } + + int getJoinGroupEvents() { + return joinGroupEvents.get(); + } + + void clearJoinGroupEvents() { + joinGroupEvents.set(0); + } + + int getLeaveGroupEvents() { + return leaveGroupEvents.get(); + } + + void clearLeaveGroupEvents() { + leaveGroupEvents.set(0); + } + + } +} diff --git a/test/com/sleepycat/je/rep/monitor/PingCommandTest.java b/test/com/sleepycat/je/rep/monitor/PingCommandTest.java new file mode 100644 index 0000000..b412bed --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/PingCommandTest.java @@ -0,0 +1,423 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.monitor; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashSet; +import java.util.concurrent.CountDownLatch; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.junit.JUnitProcessThread; +import com.sleepycat.je.rep.AppStateMonitor; +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.util.ReplicationGroupAdmin; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.utilint.StringUtils; + +/** + * Test behaviors of the underlying Ping thread. + */ +public class PingCommandTest extends MonitorTestBase { + private static final String TEST_STRING = "Hello, Ping Command!"; + + /* Test those missed ADD GroupChangeEvent are fired. */ + @Test + public void testMissedAddGroupChangeEvents() + throws Exception { + + repEnvInfo[0].openEnv(); + + TestChangeListener listener = new TestChangeListener(); + /* Disable notifying any MonitorChangeEvents. */ + monitor.disableNotify(true); + monitor.startListener(listener); + monitor.register(); + + /* Make sure no JoinGroupEvents are fired because master is up. */ + assertEquals(0, listener.getGroupAddEvents()); + assertEquals(0, listener.getJoinGroupEvents()); + + /* + * Await from here in case node 5 fires immediately after enabling + * events notification. Note that there will be only 4 ADD events + * because the listener will notice that node 1 is already present when + * it is created. + */ + listener.groupBarrier = new CountDownLatch(4); + listener.joinGroupBarrier = new CountDownLatch(5); + + repEnvInfo[1].openEnv(); + repEnvInfo[2].openEnv(); + repEnvInfo[3].openEnv(); + repEnvInfo[4].openEnv(); + + /* Sleep a while to make sure no Events fired. */ + Thread.sleep(10000); + + assertEquals(0, listener.getGroupAddEvents()); + assertEquals(0, listener.getJoinGroupEvents()); + + /* Enable notification. */ + monitor.disableNotify(false); + + listener.awaitEvent(listener.groupBarrier); + assertEquals(4, listener.getGroupAddEvents()); + + listener.awaitEvent(listener.joinGroupBarrier); + assertEquals(5, listener.getJoinGroupEvents()); + } + + /* Test those missed Remove GroupChangeEvents are fired. */ + @Test + public void testMissedRemoveGroupChangeEvents() + throws Exception { + + repEnvInfo[0].openEnv(); + RepNode master = repEnvInfo[0].getRepNode(); + + TestChangeListener listener = new TestChangeListener(); + listener.joinGroupBarrier = new CountDownLatch(1); + listener.groupBarrier = new CountDownLatch(1); + monitor.startListener(listener); + monitor.register(); + + /* Check a JoinGroupEvent is fired. */ + listener.awaitEvent(listener.joinGroupBarrier); + listener.awaitEvent(listener.groupBarrier); + assertEquals(1, listener.getJoinGroupEvents()); + assertEquals(1, listener.getGroupAddEvents()); + + listener.joinGroupBarrier = new CountDownLatch(4); + listener.groupBarrier = new CountDownLatch(4); + + /* Open the rest four replicas. */ + repEnvInfo[1].openEnv(); + repEnvInfo[2].openEnv(); + repEnvInfo[3].openEnv(); + repEnvInfo[4].openEnv(); + + listener.awaitEvent(listener.joinGroupBarrier); + listener.awaitEvent(listener.groupBarrier); + /* 5 JoinGroupEvents for 5 replicas. */ + assertEquals(5, listener.getJoinGroupEvents()); + + /* + * 5 ADD GroupChangeEvents for the 5 replicas, note that the monitor + * does not belong to the Electable nodes set, so we don't fire missed + * ADD GroupChangeEvent for it. + */ + assertEquals(5, listener.getGroupAddEvents()); + + /* Disable the MonitorChangeEvents notification and remove nodes. */ + monitor.disableNotify(true); + + for (int i = 1; i < repEnvInfo.length; i++) { + master.removeMember(repEnvInfo[i].getRepNode().getNodeName()); + } + + assertEquals(0, listener.getGroupRemoveEvents()); + + /* + * Enable notification and make sure there are 4 missed REMOVE + * GroupChangeEvents. + */ + listener.groupBarrier = new CountDownLatch(4); + monitor.disableNotify(false); + listener.awaitEvent(listener.groupBarrier); + assertEquals(4, listener.getGroupRemoveEvents()); + } + + /** + * Test those missed JoinGroupEvent can be fired through ping thread. + */ + @Test + public void testMissedJoinGroupEvents() + throws Exception { + + RepTestUtils.joinGroup(repEnvInfo); + + TestChangeListener listener = new TestChangeListener(); + /* Disable notifying JoinGroupEvents. */ + monitor.disableNotify(true); + monitor.startListener(listener); + monitor.register(); + + /* Expect no JoinGroupEvents fired. */ + assertEquals(0, listener.getJoinGroupEvents()); + + listener.joinGroupBarrier = new CountDownLatch(repEnvInfo.length); + /* Enable notifying JoinGroupEvents. */ + monitor.disableNotify(false); + listener.awaitEvent(listener.joinGroupBarrier); + /* Make sure all missed JoinGroupEvents are fired. */ + assertEquals(groupSize, listener.getJoinGroupEvents()); + } + + /** + * Before Ping thread is enabled, abnormal close won't fire a + * LeaveGroupEvent, now they should be fired by Ping thread. + */ + @Test + public void testAbnormalClose() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + final String masterName = master.getRepConfig().getNodeName(); + + TestChangeListener listener = new TestChangeListener(); + monitor.startListener(listener); + monitor.register(); + assertEquals(groupSize, listener.getJoinGroupEvents()); + + /* Abnormally shutting down nodes one by one. */ + for (int i = repEnvInfo.length - 1; i >= 0; i--) { + listener.leaveGroupBarrier = new CountDownLatch(1); + final String nodeName = repEnvInfo[i].getRepConfig().getNodeName(); + repEnvInfo[i].abnormalCloseEnv(); + listener.awaitEvent(listener.leaveGroupBarrier); + assertEquals(LeaveReason.ABNORMAL_TERMINATION, + listener.leaveEvent.getLeaveReason()); + assertEquals(nodeName, listener.leaveEvent.getNodeName()); + assertEquals(masterName, listener.leaveEvent.getMasterName()); + } + + /* Expect all LeaveGroupEvents are fired as we expected. */ + assertEquals(groupSize, listener.getLeaveGroupEvents()); + } + + /** + * Test those two new utility methods added in ReplicationGroupAdmin. + */ + @Test + public void testReplicationGroupAdmin() + throws Exception { + + testNewUtility(new TestAppStateMonitor()); + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + RepTestUtils.removeRepEnvironments(envRoot); + monitor.shutdown(); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, groupSize); + monitor = createMonitor(100, "mon10000"); + testNewUtility(new NullAppStateMonitor()); + } + + /* + * Test the new utility provided in ReplicationGroupAdmin with different + * AppStateMonitor implementations. + */ + private void testNewUtility(AppStateMonitor stateMonitor) + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + TestChangeListener listener = new TestChangeListener(); + monitor.startListener(listener); + monitor.register(); + assertEquals(groupSize, listener.getJoinGroupEvents()); + + for (int i = 0; i < groupSize; i++) { + repEnvInfo[i].getEnv(). + registerAppStateMonitor(stateMonitor); + } + + ReplicationConfig masterConfig = master.getRepConfig(); + HashSet addresses = + new HashSet(); + addresses.add(masterConfig.getNodeSocketAddress()); + ReplicationGroupAdmin groupAdmin = new ReplicationGroupAdmin + (RepTestUtils.TEST_REP_GROUP_NAME, addresses, + RepTestUtils.readRepNetConfig()); + + for (int i = 0; i < groupSize; i++) { + ReplicationConfig repConfig = repEnvInfo[i].getRepConfig(); + NodeState state = + groupAdmin.getNodeState(new RepNodeImpl(repConfig), 10000); + checkNodeState(state, i, stateMonitor); + } + + for (int i = groupSize - 1; i >= 3; i--) { + ReplicationConfig config = repEnvInfo[i].getRepConfig(); + repEnvInfo[i].closeEnv(); + + try { + groupAdmin.getNodeState(new RepNodeImpl(config), 10000); + fail("Expected exceptions here."); + } catch (IOException e) { + /* Expected exception. */ + } catch (ServiceConnectFailedException e) { + /* Expected exception. */ + } + } + } + + /* Check to see whether the node state is correct. */ + private void checkNodeState(NodeState state, + int index, + AppStateMonitor stateMonitor) + throws Exception { + + if (index != 0) { + assertTrue(state.getNodeState() == State.REPLICA); + } else { + assertTrue(state.getNodeState() == State.MASTER); + } + + if (stateMonitor instanceof TestAppStateMonitor) { + assertTrue(StringUtils.fromUTF8(state.getAppState()). + equals(TEST_STRING)); + } else { + assertTrue(state.getAppState() == null); + } + } + + /** + * Test whether the Ping thread can notify a LeaveGroupEvent for a suddenly + * exited node. + * + * The test would do following steps: + * 1. Start a 5 nodes group. + * 2. Start a process which will add a new node to the group. + * 3. Exit the process so that the new node is unachievable. + * 4. The Ping thread should detect there is an abnormally shutdown node. + */ + @Test + public void testKillReplicas() + throws Throwable { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + TestChangeListener listener = new TestChangeListener(); + monitor.startListener(listener); + monitor.register(); + assertEquals(groupSize, listener.getJoinGroupEvents()); + + /* + * Create the environment home for the new replica, and start it in a + * new process. + */ + File envHome = RepTestUtils.makeRepEnvDir(envRoot, (groupSize + 1)); + final String nodeName = "replica6"; + String[] processCommands = new String[3]; + processCommands[0] = + "com.sleepycat.je.rep.monitor.PingCommandTest$NodeProcess"; + processCommands[1] = envHome.getAbsolutePath(); + processCommands[2] = nodeName; + + assertEquals(groupSize, listener.getJoinGroupEvents()); + + /* Start a new process. */ + listener.joinGroupBarrier = new CountDownLatch(1); + listener.leaveGroupBarrier = new CountDownLatch(1); + JUnitProcessThread process = + new JUnitProcessThread(nodeName, processCommands); + process.start(); + listener.awaitEvent(listener.joinGroupBarrier); + listener.awaitEvent(listener.leaveGroupBarrier); + process.finishTest(); + assertEquals(process.getExitVal(), 2); + + /* + * Expect to detect groupSize + 1 JoinGroupEvents, which means the + * replica in the process successfully joins the group. + */ + assertEquals(groupSize + 1, listener.getJoinGroupEvents()); + + /* Expect to detect an abnormally LeaveGroupEvent. */ + assertEquals(1, listener.getLeaveGroupEvents()); + /* Make sure the LeaveGroupEvent is notified by replica6. */ + assertEquals(LeaveReason.ABNORMAL_TERMINATION, + listener.leaveEvent.getLeaveReason()); + assertEquals(nodeName, listener.leaveEvent.getNodeName()); + assertEquals(master.getRepConfig().getNodeName(), + listener.leaveEvent.getMasterName()); + + /* Delete the environment home for the process. */ + if (envHome.exists()) { + for (File file : envHome.listFiles()) { + file.delete(); + } + envHome.delete(); + } + } + + /* A class implements the AppStateMonitor used in the test. */ + private class TestAppStateMonitor implements AppStateMonitor { + public byte[] getAppState() { + return StringUtils.toUTF8(TEST_STRING); + } + } + + /* A class which returns a null application state. */ + private class NullAppStateMonitor implements AppStateMonitor { + public byte[] getAppState() { + return null; + } + } + + /* A process starts a node and exits itself without closing the replica. */ + static class NodeProcess { + private final File envRoot; + private final String nodeName; + + public NodeProcess(File envRoot, String nodeName) { + this.envRoot = envRoot; + this.nodeName = nodeName; + } + + public void run() + throws Exception { + + ReplicationConfig repConfig = RepTestUtils.createRepConfig(6); + repConfig.setHelperHosts("localhost:5001"); + repConfig.setNodeName(nodeName); + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + ReplicatedEnvironment replica = + new ReplicatedEnvironment(envRoot, repConfig, envConfig); + assertTrue(replica.getState().isReplica()); + + /* Sleep a while. */ + Thread.sleep(10000); + + /* Crash the replica. */ + System.exit(2); + } + + public static void main(String args[]) + throws Exception { + + NodeProcess process = new NodeProcess(new File(args[0]), args[1]); + process.run(); + } + } +} diff --git a/test/com/sleepycat/je/rep/monitor/ProtocolTest.java b/test/com/sleepycat/je/rep/monitor/ProtocolTest.java new file mode 100644 index 0000000..5644ab3 --- /dev/null +++ b/test/com/sleepycat/je/rep/monitor/ProtocolTest.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.monitor; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.TextProtocol; +import com.sleepycat.je.rep.impl.TextProtocol.InvalidMessageException; +import com.sleepycat.je.rep.impl.TextProtocol.Message; +import com.sleepycat.je.rep.impl.TextProtocolTestBase; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.monitor.GroupChangeEvent.GroupChangeType; +import com.sleepycat.je.rep.monitor.LeaveGroupEvent.LeaveReason; +import com.sleepycat.je.rep.monitor.Protocol.GroupChange; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; + +public class ProtocolTest extends TextProtocolTestBase { + + private Protocol protocol; + private DataChannelFactory channelFactory; + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + protocol = + new Protocol(GROUP_NAME, new NameIdPair(NODE_NAME, 1), null, + channelFactory); + protocol.updateNodeIds(new HashSet + (Arrays.asList(new Integer(1)))); + } + + @Override + @After + public void tearDown() { + protocol = null; + } + + @Override + protected Message[] createMessages() { + Message[] messages = new Message [] { + protocol.new GroupChange( + new RepGroupImpl(GROUP_NAME, null), NODE_NAME, + GroupChangeType.ADD), + protocol.new JoinGroup(NODE_NAME, + null, + System.currentTimeMillis()), + protocol.new LeaveGroup(NODE_NAME, null, + LeaveReason.ABNORMAL_TERMINATION, + System.currentTimeMillis(), + System.currentTimeMillis()) + }; + + return messages; + } + + @Override + protected TextProtocol getProtocol() { + return protocol; + } + + /** + * Test parsing messages with version differences between the message and + * the protocol for the change to add the jeVersion field to the + * RepNodeImpl class. + */ + @Test + public void testJEVersionVersioning() + throws InvalidMessageException { + + /* New group format with JE version and new node types */ + final RepNodeImpl newNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + JEVersion.CURRENT_VERSION); + final RepNodeImpl secondaryNode = new RepNodeImpl( + new NameIdPair("s1", 2), NodeType.SECONDARY, "localhost", 5001, + JEVersion.CURRENT_VERSION); + final RepGroupImpl newGroup = new RepGroupImpl(GROUP_NAME, null); + final Map nodeMap = + new HashMap(); + nodeMap.put(1, newNode); + nodeMap.put(2, secondaryNode); + newGroup.setNodes(nodeMap); + + /* Old protocol using RepGroupImpl version 2 */ + final Protocol oldProtocol = + new Protocol(Protocol.REP_GROUP_V2_VERSION, GROUP_NAME, + new NameIdPair(NODE_NAME, 1), null, + channelFactory); + + /* Old group format with no JE version or new node types */ + final RepNodeImpl oldNode = new RepNodeImpl( + new NameIdPair("m1", 1), NodeType.MONITOR, "localhost", 5000, + null); + final RepGroupImpl oldGroup = + new RepGroupImpl(GROUP_NAME, newGroup.getUUID(), + RepGroupImpl.FORMAT_VERSION_2); + oldGroup.setNodes(Collections.singletonMap(1, oldNode)); + + /* Old message format, using new group format, to check conversion */ + final GroupChange oldGroupChange = oldProtocol.new GroupChange( + newGroup, NODE_NAME, GroupChangeType.ADD); + + /* Receive old format with old protocol */ + final GroupChange oldGroupChangeViaOld = + (GroupChange) oldProtocol.parse(oldGroupChange.wireFormat()); + assertEquals("Old message format via old protocol should use old" + + " group format", + oldGroup, oldGroupChangeViaOld.getGroup()); + + /* Receive old format with new protocol */ + final GroupChange oldGroupChangeViaNew = + (GroupChange) protocol.parse(oldGroupChange.wireFormat()); + assertEquals("Old message format via new protocol should use old" + + " group format", + oldGroup, oldGroupChangeViaNew.getGroup()); + + /* Receive new format with old protocol */ + final GroupChange newGroupChange = protocol.new GroupChange( + newGroup, NODE_NAME, GroupChangeType.ADD); + try { + oldProtocol.parse(newGroupChange.wireFormat()); + fail("Expected InvalidMessageException when old protocol" + + " receives new format message"); + } catch (InvalidMessageException e) { + assertEquals("New message format via old protocol should produce" + + " a version mismatch", + TextProtocol.MessageError.VERSION_MISMATCH, + e.getErrorType()); + } + } +} diff --git a/test/com/sleepycat/je/rep/node/replica/ReplayTest.java b/test/com/sleepycat/je/rep/node/replica/ReplayTest.java new file mode 100644 index 0000000..1b5c86e --- /dev/null +++ b/test/com/sleepycat/je/rep/node/replica/ReplayTest.java @@ -0,0 +1,179 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.node.replica; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.CommitPointConsistencyPolicy; +import com.sleepycat.je.rep.DatabasePreemptedException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; + +public class ReplayTest extends RepTestBase { + + static final String dbName = "ReplayTestDB"; + + /* + * Tests that a Replica correctly replays a transaction that was resumed + * after a syncup operation. + */ + @Test + public void testResumedTransaction() { + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(repEnvInfo[1].getEnv().getState().isReplica()); + ReplicatedEnvironment replica = repEnvInfo[1].getEnv(); + Transaction mt1 = master.beginTransaction(null, null); + String dbName1 = "DB1"; + Database db1m = master.openDatabase(mt1, dbName1, dbconfig); + + /* Leave the transaction open. */ + + /* Start a new transaction and get its commit token. */ + Transaction mt2 = master.beginTransaction(null, null); + String dbName2 = "DB2"; + Database db2 = master.openDatabase(mt1, dbName2, dbconfig); + db2.put(mt2, key, data); + db2.close(); + mt2.commit(); + CommitToken ct2 = mt2.getCommitToken(); + db1m.put(mt1, key, data); + + /* Sync replica to mt2, it contains the put of mt1 as well. */ + TransactionConfig rconfig = new TransactionConfig(); + rconfig.setConsistencyPolicy + (new CommitPointConsistencyPolicy(ct2, 60, TimeUnit.SECONDS)); + Transaction rt1 = replica.beginTransaction(null, rconfig); + rt1.commit(); + + /* Now shut down the replica, with mt1 still open. */ + repEnvInfo[1].closeEnv(); + + /* Reopen forcing a sync, rt1 must be resurrected */ + replica = repEnvInfo[1].openEnv(); + db1m.close(); + mt1.commit(); + CommitToken ct1 = mt1.getCommitToken(); + rconfig.setConsistencyPolicy + (new CommitPointConsistencyPolicy(ct1, 60, TimeUnit.SECONDS)); + + Transaction rt2 = null; + + rt2 = replica.beginTransaction(null, rconfig); + + DatabaseConfig dbrconfig = new DatabaseConfig(); + dbrconfig.setAllowCreate(false); + dbrconfig.setTransactional(true); + dbrconfig.setSortedDuplicates(false); + /* Check that rt1 came through and created the DB1 on the replica. */ + Database db1r = replica.openDatabase(rt2, dbName1, dbrconfig); + db1r.close(); + rt2.commit(); + } + + @Test + public void testBasicDatabaseOperations() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + Environment menv = master; + + String truncDbName = "ReplayTestDBTrunc"; + String origDbName = "ReplayTestDBOrig"; + String newDbName = "ReplayTestDBNew"; + String removeDbName = "ReplayTestDBRemove"; + + // Create database + + menv.openDatabase(null, truncDbName, dbconfig).close(); + menv.openDatabase(null, origDbName, dbconfig).close(); + menv.openDatabase(null, removeDbName, dbconfig).close(); + + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* make sure they have all showed up. */ + dbconfig.setAllowCreate(false); + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + Environment renv = rep; + renv.openDatabase(null, truncDbName, dbconfig).close(); + renv.openDatabase(null, origDbName, dbconfig).close(); + renv.openDatabase(null, removeDbName, dbconfig).close(); + } + // Perform the operations on the master. + menv.truncateDatabase(null, truncDbName, false); + menv.renameDatabase(null, origDbName, newDbName); + menv.removeDatabase(null, removeDbName); + + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Verify the changes on the replicators. */ + for (RepEnvInfo repi : repEnvInfo) { + Environment renv = repi.getEnv(); + // the database should be found + renv.openDatabase(null, truncDbName, dbconfig).close(); + try { + renv.openDatabase(null, origDbName, dbconfig).close(); + fail("Expected DatabaseNotFoundException"); + } catch (DatabaseNotFoundException e) { + // expected + } + // renamed db should be found + renv.openDatabase(null, newDbName, dbconfig).close(); + try { + renv.openDatabase(null, removeDbName, dbconfig); + fail("Expected DatabaseNotFoundException"); + } catch (DatabaseNotFoundException e) { + // expected + } + } + RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); + } + + @Test + public void testDatabaseOpContention() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + Environment menv = master; + Environment renv = repEnvInfo[1].getEnv(); + + Database mdb = menv.openDatabase(null, dbName, dbconfig); + mdb.close(); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + Database rdb = renv.openDatabase(null, dbName, dbconfig); + menv.removeDatabase(null, dbName); + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + try { + rdb.count(); + fail("Expected exception. Handle should have been invalidated"); + } catch (DatabasePreemptedException e) { + // expected + } + } +} diff --git a/test/com/sleepycat/je/rep/persist/test/AppBaseImpl.java b/test/com/sleepycat/je/rep/persist/test/AppBaseImpl.java new file mode 100644 index 0000000..99d012d --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/AppBaseImpl.java @@ -0,0 +1,84 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.persist.test; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.evolve.Mutations; + +public abstract class AppBaseImpl implements AppInterface { + + protected int version; + protected ReplicatedEnvironment env; + protected EntityStore store; + private boolean doInitDuringOpen; + + public void setVersion(final int version) { + this.version = version; + } + + public void setInitDuringOpen(final boolean doInit) { + doInitDuringOpen = doInit; + } + + public void open(final ReplicatedEnvironment env) { + this.env = env; + StoreConfig config = + new StoreConfig().setAllowCreate(true).setTransactional(true); + Mutations mutations = new Mutations(); + EntityModel model = new AnnotationModel(); + if (doInitDuringOpen) { + setupConfig(mutations, model); + } + config.setMutations(mutations); + config.setModel(model); + store = new EntityStore(env, "foo", config); + if (doInitDuringOpen) { + init(); + } + } + + protected abstract void setupConfig(final Mutations mutations, + final EntityModel model); + + protected abstract void init(); + + public void close() { + store.close(); + } + + public void adopt(AppInterface other) { + version = other.getVersion(); + env = other.getEnv(); + store = other.getStore(); + if (doInitDuringOpen) { + init(); + } + } + + public ReplicatedEnvironment getEnv() { + return env; + } + + public EntityStore getStore() { + return store; + } + + public int getVersion() { + return version; + } +} diff --git a/test/com/sleepycat/je/rep/persist/test/AppImpl.java.0 b/test/com/sleepycat/je/rep/persist/test/AppImpl.java.0 new file mode 100644 index 0000000..5e1d10a --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/AppImpl.java.0 @@ -0,0 +1,204 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +package com.sleepycat.je.rep.persist.test; + +import junit.framework.TestCase; + +import com.sleepycat.je.rep.persist.test.AppBaseImpl; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKey; + +public class AppImpl extends AppBaseImpl { + + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private SecondaryIndex subclassAIndex; + + protected void setupConfig(final Mutations mutations, + final EntityModel model) { + model.registerClass(DataA.class); + } + + protected void init() { + priIndex = store.getPrimaryIndex(Integer.class, Data.class); + secIndex = store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + subclassAIndex = store.getSubclassIndex(priIndex, DataA.class, + Integer.class, "secKeyA"); + } + + public void writeData(final int key) { + priIndex.put(new Data(key)); + } + + public void writeDataA(final int key) { + priIndex.put(new DataA(key)); + } + + public void writeDataB(final int key) { + priIndex.put(new DataB(key)); + } + + public void writeDataC(final int key) { + TestCase.fail(); + } + + public void writeData2(final int key) { + TestCase.fail(); + } + + public void readData(final int key) { + final Data data = priIndex.get(key); + TestCase.assertNotNull(data); + + final Data data2 = secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + } + + public void readDataA(final int key) { + final DataA data = (DataA) priIndex.get(key); + TestCase.assertNotNull(data); + + final DataA data2 = (DataA) secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + + final DataA data3 = subclassAIndex.get(key); + TestCase.assertNotNull(data3); + TestCase.assertEquals(key, data3.priKey); + } + + public void readDataB(final int key) { + final DataB data = (DataB) priIndex.get(key); + TestCase.assertNotNull(data); + + final DataB data2 = (DataB) secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + } + + public void readDataC(final int key) { + TestCase.fail(); + } + + public void readData2(final int key) { + TestCase.fail(); + } + + @Entity(version=0) + static class Data { + + @PrimaryKey + int priKey; + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + int secKey; + + Data(final int priKey) { + this.priKey = priKey; + secKey = priKey; + } + + private Data() {} // for deserialization + } + + @Persistent(version=0) + static class DataA extends Data { + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + int secKeyA; + + DataA(final int priKey) { + super(priKey); + secKeyA = priKey; + } + + private DataA() {} // for deserialization + } + + @Persistent(version=0) + static class DataB extends Data { + + DataB(final int priKey) { + super(priKey); + } + + private DataB() {} // for deserialization + } + + /* For testRefreshBeforeWrite. */ + + public void insertNullAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + TestCase.assertTrue(index.putNoOverwrite(new AnimalEntity(1, null))); + } + + public void readNullAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + AnimalEntity e = index.get(1); + TestCase.assertNotNull(e); + TestCase.assertNull(e.animal); + } + + public void insertDogAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + TestCase.assertTrue(index.putNoOverwrite(new AnimalEntity(2, + new Dog()))); + } + + public void readDogAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + AnimalEntity e = index.get(2); + TestCase.assertNotNull(e); + TestCase.assertTrue(e.animal instanceof Dog); + } + + public void insertCatAnimal() { + TestCase.fail(); + } + + public void readCatAnimal() { + TestCase.fail(); + } + + @Entity(version=0) + static class AnimalEntity { + + @PrimaryKey + int id; + + Animal animal; + + AnimalEntity(final int id, final Animal animal) { + this.id = id; + this.animal = animal; + } + + private AnimalEntity() {} // for deserialization + } + + @Persistent(version=0) + static abstract class Animal { + } + + @Persistent(version=0) + static class Dog extends Animal { + int woof = 999999; + } +} diff --git a/test/com/sleepycat/je/rep/persist/test/AppImpl.java.1 b/test/com/sleepycat/je/rep/persist/test/AppImpl.java.1 new file mode 100644 index 0000000..f0c8bd6 --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/AppImpl.java.1 @@ -0,0 +1,308 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2002, 2014 Oracle and/or its affiliates. All rights reserved. + * + */ + +package com.sleepycat.je.rep.persist.test; + +import junit.framework.TestCase; + +import com.sleepycat.je.rep.persist.test.AppBaseImpl; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKey; + +public class AppImpl extends AppBaseImpl { + + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private SecondaryIndex subclassAIndex; + + protected void setupConfig(final Mutations mutations, + final EntityModel model) { + model.registerClass(DataA.class); + model.registerClass(DataB.class); + model.registerClass(NewDataC.class); + final String APP_IMPL = AppImpl.class.getName(); + mutations.addRenamer(new Renamer(APP_IMPL + "$Data", 0, + APP_IMPL + "$RenamedData")); + mutations.addRenamer(new Renamer(APP_IMPL + "$Data", 0, "secKey", + "renamedSecKey")); + } + + protected void init() { + priIndex = store.getPrimaryIndex(Integer.class, RenamedData.class); + secIndex = store.getSecondaryIndex(priIndex, Integer.class, + "renamedSecKey"); + subclassAIndex = store.getSubclassIndex(priIndex, DataA.class, + Integer.class, "secKeyA"); + } + + public void writeData(final int key) { + priIndex.put(new RenamedData(key)); + } + + public void writeDataA(final int key) { + priIndex.put(new DataA(key)); + } + + public void writeDataB(final int key) { + priIndex.put(new DataB(key)); + } + + public void writeDataC(final int key) { + priIndex.put(new NewDataC(key)); + } + + public void writeData2(final int key) { + /* May throw IndexNotAvailableException. */ + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, NewData2.class); + index.put(new NewData2(key)); + } + + public void readData(final int key) { + final RenamedData data = priIndex.get(key); + TestCase.assertNotNull(data); + + final RenamedData data2 = secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + + /* May throw IndexNotAvailableException. */ + SecondaryIndex newSecIndex = + store.getSecondaryIndex(priIndex, Integer.class, "newSecKey"); + final RenamedData data3 = newSecIndex.get(key); + if (data.newSecKey == null) { + TestCase.assertNull(data3); + } else { + TestCase.assertNotNull(data3); + TestCase.assertEquals(key, data3.priKey); + } + } + + public void readDataA(final int key) { + final DataA data = (DataA) priIndex.get(key); + TestCase.assertNotNull(data); + + final DataA data2 = (DataA) secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + + final DataA data3 = subclassAIndex.get(key); + TestCase.assertNotNull(data3); + TestCase.assertEquals(key, data3.priKey); + } + + public void readDataB(final int key) { + final DataB data = (DataB) priIndex.get(key); + TestCase.assertNotNull(data); + + final DataB data2 = (DataB) secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + + /* May throw IndexNotAvailableException. */ + final SecondaryIndex newSubclassBIndex = + store.getSubclassIndex(priIndex, DataB.class, Integer.class, + "newSecKeyB"); + final DataB data3 = newSubclassBIndex.get(key); + if (data.newSecKeyB == null) { + TestCase.assertNull(data3); + } else { + TestCase.assertNotNull(data3); + TestCase.assertEquals(key, data3.priKey); + } + } + + public void readDataC(final int key) { + + /* + * Call getSubclassIndex before reading via primary and existing (old) + * secondary index, since the latter is not possible until we've + * written a record. We want to ensure that getSubclassIndex throws + * IndexNotAvailableException. + */ + final SecondaryIndex subclassCIndex = + store.getSubclassIndex(priIndex, NewDataC.class, Integer.class, + "secKeyC"); + + final NewDataC data = (NewDataC) priIndex.get(key); + TestCase.assertNotNull(data); + + final NewDataC data2 = (NewDataC) secIndex.get(key); + TestCase.assertNotNull(data2); + TestCase.assertEquals(key, data2.priKey); + + final NewDataC data3 = subclassCIndex.get(key); + TestCase.assertNotNull(data3); + TestCase.assertEquals(key, data3.priKey); + } + + public void readData2(final int key) { + /* May throw IndexNotAvailableException. */ + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, NewData2.class); + NewData2 data = index.get(key); + TestCase.assertNotNull(data); + } + + @Entity(version=1) + static class RenamedData { + + @PrimaryKey + int priKey; + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + int renamedSecKey; + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + Integer newSecKey; + + private String newField; + + RenamedData(final int priKey) { + this.priKey = priKey; + renamedSecKey = priKey; + newSecKey = priKey; + newField = "new"; + } + + private RenamedData() {} // for deserialization + } + + @Persistent(version=0) + static class DataA extends RenamedData { + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + int secKeyA; + + DataA(final int priKey) { + super(priKey); + secKeyA = priKey; + } + + private DataA() {} // for deserialization + } + + @Persistent(version=1) + static class DataB extends RenamedData { + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + Integer newSecKeyB; + + DataB(final int priKey) { + super(priKey); + newSecKeyB = priKey; + } + + private DataB() {} // for deserialization + } + + @Persistent(version=0) + static class NewDataC extends RenamedData { + + @SecondaryKey(relate=Relationship.ONE_TO_ONE) + int secKeyC; + + NewDataC(final int priKey) { + super(priKey); + secKeyC = priKey; + } + + private NewDataC() {} // for deserialization + } + + @Entity(version=0) + static class NewData2 { + + @PrimaryKey + int priKey; + + NewData2(final int priKey) { + this.priKey = priKey; + } + + private NewData2() {} // for deserialization + } + + public void insertNullAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + TestCase.assertTrue(index.putNoOverwrite(new AnimalEntity(1, null))); + } + + public void readNullAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + AnimalEntity e = index.get(1); + TestCase.assertNotNull(e); + TestCase.assertNull(e.animal); + } + + public void insertDogAnimal() { + TestCase.fail(); + } + + public void readDogAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + AnimalEntity e = index.get(2); + TestCase.assertNotNull(e); + TestCase.assertTrue(e.animal instanceof Dog); + } + + public void insertCatAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + TestCase.assertTrue(index.putNoOverwrite(new AnimalEntity(3, + new Cat()))); + } + + public void readCatAnimal() { + final PrimaryIndex index = + store.getPrimaryIndex(Integer.class, AnimalEntity.class); + AnimalEntity e = index.get(3); + TestCase.assertNotNull(e); + TestCase.assertTrue(e.animal instanceof Cat); + } + + @Entity(version=0) + static class AnimalEntity { + + @PrimaryKey + int id; + + Animal animal; + + AnimalEntity(final int id, final Animal animal) { + this.id = id; + this.animal = animal; + } + + private AnimalEntity() {} // for deserialization + } + + @Persistent(version=0) + static abstract class Animal { + } + + @Persistent(version=0) + static class Dog extends Animal { + int woof = 999999; + } + + @Persistent(version=0) + static class Cat extends Animal { + String meow = "meow?"; + } +} diff --git a/test/com/sleepycat/je/rep/persist/test/AppInterface.java b/test/com/sleepycat/je/rep/persist/test/AppInterface.java new file mode 100644 index 0000000..f6fcde3 --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/AppInterface.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.persist.test; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.persist.EntityStore; + +public interface AppInterface { + public void setVersion(final int label); + public void setInitDuringOpen(final boolean doInit); + public void open(final ReplicatedEnvironment env); + public void close(); + public void writeData(final int key); + public void writeDataA(final int key); + public void writeDataB(final int key); + public void writeDataC(final int key); + public void writeData2(final int key); + public void readData(final int key); + public void readDataA(final int key); + public void readDataB(final int key); + public void readDataC(final int key); + public void readData2(final int key); + public void adopt(AppInterface other); + public int getVersion(); + public ReplicatedEnvironment getEnv(); + public EntityStore getStore(); + /* For testRefreshBeforeWrite. */ + public void insertNullAnimal(); + public void readNullAnimal(); + public void insertDogAnimal(); + public void readDogAnimal(); + public void insertCatAnimal(); + public void readCatAnimal(); +} diff --git a/test/com/sleepycat/je/rep/persist/test/SimpleTest.java b/test/com/sleepycat/je/rep/persist/test/SimpleTest.java new file mode 100644 index 0000000..eab066c --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/SimpleTest.java @@ -0,0 +1,214 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.persist.test; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Simple tests using the DPL and HA. + */ +public class SimpleTest extends TestBase { + + private File envRoot; + private RepEnvInfo[] repEnvInfo; + private ReplicatedEnvironment masterEnv; + private ReplicatedEnvironment replicaEnv; + + public SimpleTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (repEnvInfo != null) { + + /* + * close() was not called, test failed. Do cleanup to allow more + * tests to run, but leave log files for debugging this test case. + */ + try { + close(false /*normalShutdown*/); + } catch (Exception ignored) { + /* This secondary exception is just noise. */ + } + } + } + + /** + * Create a 2 node group. + * + * ReplicaAckPolicy.ALL is used to ensure that when a master operation is + * committed, the change is immediately available on the replica for + * testing -- no waiting in the test is needed. + */ + private void open() + throws IOException { + + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, 2, + RepTestUtils.createEnvConfig + (new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL)), + new ReplicationConfig()); + masterEnv = RepTestUtils.joinGroup(repEnvInfo); + replicaEnv = repEnvInfo[1].getEnv(); + assertNotNull(masterEnv); + assertNotNull(replicaEnv); + assertNotSame(masterEnv, replicaEnv); + } + + private void close() { + close(true /*normalShutdown*/); + } + + private void close(boolean normalShutdown) { + try { + if (normalShutdown) { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } else { + for (RepEnvInfo info : repEnvInfo) { + info.abnormalCloseEnv(); + } + } + } finally { + repEnvInfo = null; + masterEnv = null; + replicaEnv = null; + } + } + + /** + * Test that the following sequence works. + * + Open EntityStore on Master and Replica. + * + Open PrimaryIndex on Master. + * + Open PrimaryIndex on Replica. + * + * This requires that a refresh of DPL metadata be performed on the Replica + * when the PrimaryIndex is opened, since the Replica will have stale + * metadata that does not include information for the index. + * + * Note that this is not the normal/expected usage mode. Normally, all + * indexes are opened immediately after opening the EntityStore. + * + * [#18594] + */ + @Test + public void testDeferOpenIndex() + throws IOException { + + open(); + + final EntityStore masterStore = new EntityStore + (masterEnv, "foo", + new StoreConfig().setAllowCreate(true).setTransactional(true)); + final EntityStore replicaStore = new EntityStore + (replicaEnv, "foo", + new StoreConfig().setTransactional(true)); + + final PrimaryIndex masterIndex = + masterStore.getPrimaryIndex(Integer.class, SimpleEntity.class); + + /* Before the [#18594] fix, this threw RefreshException. */ + final PrimaryIndex replicaIndex = + replicaStore.getPrimaryIndex(Integer.class, SimpleEntity.class); + + replicaStore.close(); + masterStore.close(); + + close(); + } + + /** + * Test that the following sequence works. + * + Open EntityStore on Master and Replica. + * + Open PrimaryIndex A, with a sequence, on Master. + * + Open PrimaryIndex B, with a sequence, on Replica. + * + * This tickled a bug where the transaction config for the sequence was + * not being properly initialized. + * + * Note that this is not the normal/expected usage mode. Normally, all + * indexes are opened immediately after opening the EntityStore. + * + * [#18594] + */ + @Test + public void testOpenSecondNonExistentSequenceOnReplica() + throws IOException { + + open(); + + final EntityStore masterStore = new EntityStore + (masterEnv, "foo", + new StoreConfig().setAllowCreate(true).setTransactional(true)); + final EntityStore replicaStore = new EntityStore + (replicaEnv, "foo", + new StoreConfig().setTransactional(true)); + + masterStore.getPrimaryIndex(Integer.class, SimpleEntity.class); + + /* Note: An IndexNotAvailableException may be thrown in the future. */ + try { + replicaStore.getPrimaryIndex(Integer.class, SimpleEntity2.class); + fail(); + } catch (ReplicaWriteException expected) { + } + + replicaStore.close(); + masterStore.close(); + + close(); + } + + @Entity + static class SimpleEntity { + + @PrimaryKey(sequence="ID") + int id; + + String data = "data"; + } + + @Entity + static class SimpleEntity2 { + + @PrimaryKey(sequence="ID2") + int id; + + String data = "data"; + } +} diff --git a/test/com/sleepycat/je/rep/persist/test/UpgradeTest.java b/test/com/sleepycat/je/rep/persist/test/UpgradeTest.java new file mode 100644 index 0000000..0068fdb --- /dev/null +++ b/test/com/sleepycat/je/rep/persist/test/UpgradeTest.java @@ -0,0 +1,900 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.persist.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.rep.DatabasePreemptedException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForMasterListener; +import com.sleepycat.je.util.SimpleClassLoader; +import com.sleepycat.persist.IndexNotAvailableException; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +/** + * Tests DPL schema evolution that takes place as part of application upgrades + * in a replication group. + * + * Here's an outline of the test procedure for the scenario described in the + * SR [#16655] as B-1. + * + * 1. Start a 2 or more node group. + * 2. Change persistent classes in some way (see below). + * 3. Stop the replica, update the app classes, and restart it. + * 4. Make sure the replica behaves correctly (see below), including when the + * Master updates the metadata by storing a new entity class and the Replica + * must refresh. + * + * More specifically, the persistent class changes and expected behaviors to + * test are: + * + * - Bump version of entity class and add a field. Expect unchanged behavior, + * new field should be null. + * - Add new secondary key, call getSecondaryIndex. Expect new + * IndexNotAvailableException from getSecondaryIndex, unchanged behavior + * otherwise. + * - Add new entity subclass with secondary key, call getSubclassIndex. + * Expect new IndexNotAvailableException from getSubclassIndex, unchanged + * behavior otherwise. + * - Add new entity class, call getPrimaryIndex. Expect new + * IndexNotAvailableException from getPrimaryIndex, unchanged behavior + * otherwise. + * - Rename class. Expect unchanged behavior. + * - Rename secondary key. Expect unchanged behavior. + */ +public class UpgradeTest extends TestBase { + + private static final int N_APP_VERSIONS = 2; + private static final String APP_IMPL = + "com.sleepycat.je.rep.persist.test.AppImpl"; + + private File envRoot; + private RepEnvInfo[] repEnvInfo; + private ReplicatedEnvironment masterEnv; + private ReplicatedEnvironment replicaEnv1; + private ReplicatedEnvironment replicaEnv2; + private AppInterface masterApp; + private AppInterface replicaApp1; + private AppInterface replicaApp2; + private Class[] appClasses = new Class[N_APP_VERSIONS]; + private boolean doInitDuringOpen = true; + + public UpgradeTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (repEnvInfo != null) { + + /* + * close() was not called, test failed. Do cleanup to allow more + * tests to run, but leave log files for debugging this test case. + */ + try { + close(false /*normalShutdown*/); + } catch (Exception ignored) { + /* This secondary exception is just noise. */ + } + } + } + + /** + * Creates a 3 node group and initializes the app classes. + */ + private void open() + throws Exception { + + /* + * ReplicaAckPolicy.ALL is used to ensure that when a master operation + * is committed, the change is immediately available on the replica for + * testing -- no waiting in the test is needed. + */ + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, 3, + RepTestUtils.createEnvConfig + (new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL)), + new ReplicationConfig()); + masterEnv = RepTestUtils.joinGroup(repEnvInfo); + replicaEnv1 = repEnvInfo[1].getEnv(); + replicaEnv2 = repEnvInfo[2].getEnv(); + + /* Load app classes with custom class loader. */ + final File evolveParentDir = + new File(System.getProperty("testevolvedir")); + final ClassLoader parentClassLoader = + Thread.currentThread().getContextClassLoader(); + for (int i = 0; i < N_APP_VERSIONS; i += 1) { + final ClassLoader myLoader = new SimpleClassLoader + (parentClassLoader, + new File(evolveParentDir, "dplUpgrade." + i)); + appClasses[i] = + Class.forName(APP_IMPL, true /*initialize*/, myLoader); + } + + /* Open v0 app objects. */ + masterApp = newAppObject(0); + masterApp.open(masterEnv); + replicaApp1 = newAppObject(0); + replicaApp1.open(replicaEnv1); + replicaApp2 = newAppObject(0); + replicaApp2.open(replicaEnv2); + } + + private void close() { + close(true /*normalShutdown*/); + } + + private void close(boolean normalShutdown) { + try { + if (normalShutdown) { + replicaApp1.close(); + replicaApp2.close(); + masterApp.close(); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } else { + for (RepEnvInfo info : repEnvInfo) { + info.abnormalCloseEnv(); + } + } + } finally { + repEnvInfo = null; + masterEnv = null; + replicaEnv1 = null; + replicaEnv2 = null; + masterApp = null; + replicaApp1 = null; + replicaApp2 = null; + } + } + + @Test + public void testClassLoader() + throws Exception { + + open(); + + /* The AppImpl class must not be defined in the normal class loader. */ + try { + Class.forName(APP_IMPL); + fail(); + } catch (ClassNotFoundException expected) { + } + + /* All AppImpl classes must be distinct / different. */ + for (int i = 0; i < N_APP_VERSIONS; i += 1) { + for (int j = i + 1; j < N_APP_VERSIONS; j += 1) { + assertNotSame(appClasses[i], appClasses[j]); + } + } + + close(); + } + + /** + * Tests that incremental metadata changes made on the master are visible + * (refreshed) when needed on the replicas. Incremental metadata changes + * occur when not all metadata is known to the DPL initially when the store + * is opened, and additional metadata is discovered as entities are + * written. This is scenario A-1 in the [#16655] SR. + * + * This is not actually a schema upgrade test, but is conveniently tested + * here using the upgrade test framework. + */ + @Test + public void testIncrementalMetadataChanges() + throws Exception { + + open(); + + /* Master writes and reads Data entity. */ + masterApp.writeData(0); + masterApp.readData(0); + + /* Replicas read Data entity. */ + replicaApp1.readData(0); + replicaApp2.readData(0); + + /* Master writes DataA (subclass), causing a metadata update. */ + masterApp.writeDataA(1); + masterApp.readDataA(1); + + /* Replicas read DataA and must refresh metadata. */ + replicaApp1.readDataA(1); + replicaApp2.readDataA(1); + + /* Read Data again for good measure. */ + masterApp.readData(0); + replicaApp1.readData(0); + replicaApp2.readData(0); + + close(); + } + + /** + * Tests that when a replica having stale metadata is elected master, the + * first metadata update on the new master causes refresh of the stale + * metadata before the new metadata is written. This is scenario A-2 in + * the [#16655] SR. + * + * This is not actually a schema upgrade test, but is conveniently tested + * here using the upgrade test framework. + */ + @Test + public void testElectedMasterWithStaleMetadata() + throws Exception { + + open(); + + /* Master writes and reads Data entity. */ + masterApp.writeData(0); + masterApp.readData(0); + + /* Replicas read Data entity. */ + replicaApp1.readData(0); + replicaApp2.readData(0); + + /* Master writes DataA (subclass), causing a metadata update. */ + masterApp.writeDataA(1); + masterApp.readDataA(1); + + /* + * Master is bounced (but not upgraded), replica1 switches roles with + * master. + */ + bounceMaster(0); + + /* + * Master writes DataB, which requires a metadata change. Before this + * new metadata change, it must refresh metadata from disk to get the + * definition of DataA. + */ + masterApp.writeDataB(2); + + /* + * Reading DataA would cause a ClassCastException if refresh did not + * occur above, because the format ID for DataA would be incorrect. + */ + masterApp.readDataA(1); + + /* Read all again for good measure. */ + masterApp.readData(0); + masterApp.readDataA(1); + masterApp.readDataB(2); + replicaApp1.readData(0); + replicaApp1.readDataA(1); + replicaApp1.readDataB(2); + replicaApp2.readData(0); + replicaApp2.readDataA(1); + replicaApp2.readDataB(2); + + close(); + } + + /** + * Tests scenarios B-1 and B-2 in the [#16655] SR. + */ + @Test + public void testUpgrade() + throws Exception { + + open(); + + /* Master writes and reads v0 entities. */ + masterApp.writeData(0); + masterApp.writeDataA(1); + masterApp.writeDataB(2); + masterApp.readData(0); + masterApp.readDataA(1); + masterApp.readDataB(2); + + /* Replicas read v0 entities. */ + replicaApp1.readData(0); + replicaApp1.readDataA(1); + replicaApp1.readDataB(2); + replicaApp2.readData(0); + replicaApp2.readDataA(1); + replicaApp2.readDataB(2); + + /* Replica1 is upgraded to v1, upgrades metadata in memory. */ + bounceReplica1(1); + + /* Upgraded replica1 reads v0 entities, can't get new index. */ + try { + replicaApp1.readData(0); + fail(); + } catch (IndexNotAvailableException e) { + } + try { + replicaApp1.readDataB(2); + fail(); + } catch (IndexNotAvailableException e) { + } + + /* Upgraded replica1 can't get index for new entity NewData2. */ + try { + replicaApp1.readData2(14); + fail(); + } catch (IndexNotAvailableException e) { + } + + /* Replica1 can read v0 DataA, because it has no new indexes. */ + replicaApp1.readDataA(1); + + /* Replica2 (not yet upgraded) reads v0 entities without errors. */ + replicaApp2.readData(0); + replicaApp2.readDataA(1); + replicaApp2.readDataB(2); + + /* Replica2 is upgraded to v1, upgrades metadata in memory. */ + bounceReplica2(1); + + /* Upgraded replicas read v0 entities, can't get new index. */ + try { + replicaApp1.readData(0); + fail(); + } catch (IndexNotAvailableException e) { + } + try { + replicaApp1.readDataB(2); + fail(); + } catch (IndexNotAvailableException e) { + } + try { + replicaApp2.readData(0); + fail(); + } catch (IndexNotAvailableException e) { + } + try { + replicaApp2.readDataB(2); + fail(); + } catch (IndexNotAvailableException e) { + } + + /* Upgraded replicas can't get index for new entity NewData2. */ + try { + replicaApp1.readData2(14); + fail(); + } catch (IndexNotAvailableException e) { + } + try { + replicaApp2.readData2(14); + fail(); + } catch (IndexNotAvailableException e) { + } + + /* Upgraded replicas can read v0 DataA, it has no new indexes. */ + replicaApp1.readDataA(1); + replicaApp2.readDataA(1); + + /* Read again on master for good measure. */ + masterApp.readData(0); + masterApp.readDataA(1); + masterApp.readDataB(2); + + /* Master is upgraded to v1, replica1 switches roles with master. */ + bounceMaster(1); + + /* Metadata is refreshed when new indexes are requested. */ + try { + masterApp.readData(0); + fail(); + } catch (DatabasePreemptedException expected) { + masterApp.close(); + masterApp.open(masterEnv); + masterApp.readData(0); + } + masterApp.readDataA(1); + masterApp.readDataB(2); + try { + replicaApp1.readData(0); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp1.close(); + replicaApp1.open(replicaEnv1); + replicaApp1.readData(0); + } + replicaApp1.readDataA(1); + replicaApp1.readDataB(2); + try { + replicaApp2.readData(0); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp2.close(); + replicaApp2.open(replicaEnv2); + replicaApp2.readData(0); + } + replicaApp2.readDataA(1); + replicaApp2.readDataB(2); + + /* Master writes v1 entities. */ + masterApp.writeData(10); + masterApp.writeDataA(11); + masterApp.writeDataB(12); + + /* Master reads v0 and v1 entities. */ + masterApp.readData(0); + masterApp.readData(10); + masterApp.readDataA(1); + masterApp.readDataA(11); + masterApp.readDataB(2); + masterApp.readDataB(12); + + /* Replicas read v0 and v1 entities. */ + replicaApp1.readData(0); + replicaApp1.readData(10); + replicaApp1.readDataA(1); + replicaApp1.readDataA(11); + replicaApp1.readDataB(2); + replicaApp1.readDataB(12); + replicaApp2.readData(0); + replicaApp2.readData(10); + replicaApp2.readDataA(1); + replicaApp2.readDataA(11); + replicaApp2.readDataB(2); + replicaApp2.readDataB(12); + + /* Master writes new NewDataC subclass, all can read. */ + masterApp.writeDataC(13); + masterApp.readDataC(13); + replicaApp1.readDataC(13); + replicaApp2.readDataC(13); + + /* Master writes new NewData2 entity class, all can read. */ + masterApp.writeData2(14); + masterApp.readData2(14); + replicaApp1.readData2(14); + replicaApp2.readData2(14); + + close(); + } + + /** + * Ensure that when the master is bounced, the first write will refresh + * metadata. The testUpgrade method, OTOH, ensures that metadata is + * refreshed when new indexes are requested. + */ + @Test + public void testRefreshAfterFirstWrite() + throws Exception { + + open(); + + /* Master writes v0 entity, all nodes read. */ + masterApp.writeData(0); + masterApp.readData(0); + replicaApp1.readData(0); + replicaApp2.readData(0); + + /* Replica1 is upgraded to v1, upgrades metadata in memory. */ + bounceReplica1(1); + + /* Upgraded replica1 reads v0 entities, can't get new index. */ + try { + replicaApp1.readData(0); + fail(); + } catch (IndexNotAvailableException e) { + } + + /* Replica2 (not yet upgraded) reads v0 entity without errors. */ + replicaApp2.readData(0); + + /* Replica2 is upgraded to v1, upgrades metadata in memory. */ + bounceReplica2(1); + + /* Read again on master for good measure. */ + masterApp.readData(0); + + /* Master is upgraded to v1, replica1 switches roles with master. */ + bounceMaster(1); + + /* Metadata is refreshed on first write. */ + try { + masterApp.writeData(10); + fail(); + } catch (DatabasePreemptedException expected) { + masterApp.close(); + masterApp.open(masterEnv); + masterApp.writeData(10); + } + + /* Replicas also get DatabasePreemptedException on first read. */ + try { + replicaApp1.readData(0); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp1.close(); + replicaApp1.open(replicaEnv1); + replicaApp1.readData(0); + } + try { + replicaApp2.readData(0); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp2.close(); + replicaApp2.open(replicaEnv2); + replicaApp2.readData(0); + } + + /* All reads now work. */ + masterApp.readData(0); + masterApp.readData(10); + replicaApp1.readData(0); + replicaApp1.readData(10); + replicaApp2.readData(0); + replicaApp2.readData(10); + + close(); + } + + /** + * Tests that a reasonable exception occurs when an upgraded node is + * elected Master *before* all other nodes have been upgraded. This is a + * user error, since the Master election should occur last, but it cannot + * always be avoided, for example, when an unexpected failover occurs + * during the upgrade process. + * + * There are two cases: (1) when the non-upgraded Replica node is already + * running when an upgraded node becomes Master, and (2) when the + * non-upgraded node is brought up as a Replica in a group with an upgraded + * Master. However, implementation-wise case (1) becomes case (2), because + * in case (1) the Replica will attempt to refresh metadata, which is + * internally the same as bringing up the Replica from scratch. In both + * case we instantiate a new PersistCatalog internally, and run class + * evolution. This should result in an IncompatibleClassException. + */ + @Test + public void testPrematureUpgradedMaster() + throws Exception { + + open(); + + /* Master writes v0 entity, all nodes read. */ + masterApp.writeData(0); + masterApp.readData(0); + replicaApp1.readData(0); + replicaApp2.readData(0); + + /* Replica2 is upgraded to v1, then Master is upgraded to v1. */ + bounceReplica2(1); + bounceMaster(1); + + /* Replica2 and Replica1 were swapped when the Master was bounced. */ + assertEquals(1, masterApp.getVersion()); + assertEquals(1, replicaApp1.getVersion()); + assertEquals(0, replicaApp2.getVersion()); + + /* Write a v1 entity on the Master. */ + try { + masterApp.writeData(10); + fail(); + } catch (DatabasePreemptedException expected) { + masterApp.close(); + masterApp.open(masterEnv); + masterApp.writeData(10); + } + + /* The upgraded replica can read the v1 entity. */ + try { + replicaApp1.readData(10); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp1.close(); + replicaApp1.open(replicaEnv1); + replicaApp1.readData(10); + } + + /* The non-upgraded replica will get IncompatibleClassException. */ + try { + replicaApp2.readData(10); + fail(); + } catch (DatabasePreemptedException expected) { + replicaApp2.close(); + try { + replicaApp2.open(replicaEnv2); + fail(); + } catch (IncompatibleClassException expected2) { + } + } + + /* When finally upgraded, the replica can read the v1 entity. */ + bounceReplica2(1); + replicaApp2.readData(10); + + /* Read all for good measure. */ + masterApp.readData(0); + masterApp.readData(10); + replicaApp1.readData(0); + replicaApp1.readData(10); + replicaApp2.readData(0); + replicaApp2.readData(10); + + close(); + } + + /** + * Tests a somewhat contrived upgrade scenario with persistent classes A + * and B that can be embedded in an entity without changing the entity + * class definition or version. For example, if a field of type Animal + * has subclasses Cat and Dog, these subclasses will not be known to the + * DPL until an instance of them is stored. When a new subclass is defined + * in the app, a metadata upgrade will not occur until an instance is + * stored. + * + * The contrived scenario is: + * + * + App initially contains persistent class A, but no instance of A is + * stored. Neither the Master nor Replica DPL is aware of class A. + * + * + App is modified to contain new persistent class B. + * + * + Replica is upgraded, but does not enter Replica Upgrade Mode because + * class A and B are not yet used or referenced. + * + * + Master writes record containing instance of A, but Replica does not + * read it. Replica metadata is stale. It so happens that A is assigned + * format ID 200 by the Master. + * + * + Master is bounced and former Replica is elected Master. + * + * + New Master attempts to write record with instances of B, and updates + * its metadata, and happens to assign B format ID 200. If the metadata + * previously written by the old Master is overwritten, then metadata + * corruption will occur. Class A and B will have been assigned the same + * format ID. + * + * + When any node reads record containing A, some sort of exception will + * probably occur as a result of the incorrect format ID, or perhaps + * incorrect data will be returned. + * + * This problem should be avoided because the new Master should detect that + * its metadata is stale, and refresh it prior to writing the new metadata. + * It detects this, even though it is not in Replica Upgrade Mode, by + * reading the metadata before updating it, and checking to see if it has + * changed since it was last read. See PersistCatalog.writeDataCheckStale. + * + * The scenario is contrived in the sense that it is very unlikely that an + * instance of A will happen to be written by the Master for the very first + * time during the upgrade process. But it does provide a test case for + * the detection of metadata changes in PersistCatalog.writeData. + */ + @Test + public void testRefreshBeforeWrite() + throws Exception { + + doInitDuringOpen = false; + open(); + + masterApp.insertNullAnimal(); + replicaApp1.readNullAnimal(); + replicaApp2.readNullAnimal(); + + /* Replicas upgraded to v1 but do not enter Replica Upgrade Mode. */ + bounceReplica1(1); + bounceReplica2(1); + assertFalse(replicaApp1.getStore().isReplicaUpgradeMode()); + assertFalse(replicaApp2.getStore().isReplicaUpgradeMode()); + + masterApp.insertDogAnimal(); + bounceMaster(1); + masterApp.insertCatAnimal(); + + masterApp.readDogAnimal(); + masterApp.readCatAnimal(); + replicaApp1.readDogAnimal(); + replicaApp1.readCatAnimal(); + replicaApp2.readDogAnimal(); + replicaApp2.readCatAnimal(); + + close(); + } + + /** + * Bounce replica1. No election will take place. If a higher appVersion + * is specified, the bounced node will also be upgraded. + */ + private void bounceReplica1(final int appVersion) + throws Exception { + + replicaApp1.close(); + replicaApp1 = null; + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() == replicaEnv1) { + info.closeEnv(); + replicaEnv1 = info.openEnv(); + replicaApp1 = newAppObject(appVersion); + replicaApp1.open(replicaEnv1); + break; + } + } + assertNotNull(replicaApp1); + assertSame(masterEnv.getState(), ReplicatedEnvironment.State.MASTER); + } + + /** + * Bounce replica2. No election will take place. If a higher appVersion + * is specified, the bounced node will also be upgraded. + */ + private void bounceReplica2(final int appVersion) + throws Exception { + + replicaApp2.close(); + replicaApp2 = null; + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() == replicaEnv2) { + info.closeEnv(); + replicaEnv2 = info.openEnv(); + replicaApp2 = newAppObject(appVersion); + replicaApp2.open(replicaEnv2); + break; + } + } + assertNotNull(replicaApp2); + assertSame(masterEnv.getState(), ReplicatedEnvironment.State.MASTER); + } + + /** + * Bounce the master, causing replica1 to switch roles with the master. If + * a higher appVersion is specified, the bounced node will also be + * upgraded. + */ + private void bounceMaster(final int appVersion) + throws Exception { + + /* Disable updates to RepGroupDB due to LocalCBVLSN updates. */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() == masterEnv) { + + /* + * Sync up the replication group so that node2 doesn't do + * hard recovery. + */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + /* Disable replay on replicas. */ + shutdownFeeder(info.getRepNode(), replicaEnv1); + shutdownFeeder(info.getRepNode(), replicaEnv2); + + /* Close the master. */ + masterApp.close(); + masterApp = null; + info.closeEnv(); + masterEnv = null; + + /* Force repEnvInfo[2] to the master. */ + WaitForMasterListener masterWaiter = + new WaitForMasterListener(); + replicaEnv2.setStateChangeListener(masterWaiter); + RepNode repNode = repEnvInfo[2].getRepNode(); + repNode.forceMaster(true); + /* Enable the LocalCBVLSN updates. */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); + masterWaiter.awaitMastership(); + assertTrue(repNode.isMaster()); + masterEnv = replicaEnv2; + + /* Replica2 was elected, swap names with replica1. */ + final ReplicatedEnvironment tmpEnv = replicaEnv1; + replicaEnv1 = replicaEnv2; + replicaEnv2 = tmpEnv; + final AppInterface tmpApp = replicaApp1; + replicaApp1 = replicaApp2; + replicaApp2 = tmpApp; + + /* Replica1 (or 2, see above) has been elected master. */ + masterApp = newAppObject(appVersion); + masterApp.adopt(replicaApp1); + /* Former master (just upgraded) becomes replica1. */ + replicaEnv1 = info.openEnv(); + replicaApp1.open(replicaEnv1); + break; + } + } + assertNotNull(masterApp); + assertSame(masterEnv.getState(), ReplicatedEnvironment.State.MASTER); + } + + /* + * Remove a feeder from the master so that no entries can be replayed on + * this node. + */ + private void shutdownFeeder(RepNode masterNode, + ReplicatedEnvironment repEnv) + throws Exception { + + String nodeName = repEnv.getRepConfig().getNodeName(); + RepNodeImpl removeNode = masterNode.getGroup().getNode(nodeName); + masterNode.feederManager().shutdownFeeder(removeNode); + } + + /** + * Creates an instance of the specified class that implements + * AppInterface, and returns a Proxy to the instance. The returned Proxy + * invokes all methods of the target instance in the context of the + * ClassLoader of the specified class. + */ + private AppInterface newAppObject(final int appVersion) + throws Exception { + + AppInterface app = (AppInterface) Proxy.newProxyInstance + (AppInterface.class.getClassLoader(), + new Class[] { AppInterface.class }, + new MyInvocationHandler(appClasses[appVersion].newInstance())); + + app.setVersion(appVersion); + app.setInitDuringOpen(doInitDuringOpen); + + return app; + } + + /** + * Simple InvocationHandler to invoke methods for a given target object in + * the context of the ClassLoader of the target's class. + */ + private static class MyInvocationHandler implements InvocationHandler { + + private final Object target; + private final ClassLoader loader; + + MyInvocationHandler(final Object target) { + this.target = target; + this.loader = target.getClass().getClassLoader(); + } + + public Object invoke(final Object proxy, + final Method method, + final Object[] args) + throws Throwable { + + final Thread thread = Thread.currentThread(); + final ClassLoader saveLoader = thread.getContextClassLoader(); + try { + thread.setContextClassLoader(loader); + return method.invoke(target, args); + } catch (InvocationTargetException e) { + throw e.getCause(); + } finally { + thread.setContextClassLoader(saveLoader); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/secure.properties b/test/com/sleepycat/je/rep/secure.properties new file mode 100644 index 0000000..a599375 --- /dev/null +++ b/test/com/sleepycat/je/rep/secure.properties @@ -0,0 +1,9 @@ +# Set for SSL +je.rep.channelType=ssl +je.rep.ssl.keyStoreFile=SSLDIR/keys.store +je.rep.ssl.keyStorePassword=unittest +je.rep.ssl.trustStoreFile=SSLDIR/trust.store +#je.rep.ssl.cipherSuites=SSL_RSA_WITH_3DES_EDE_CBC_SHA,SSL_RSA_WITH_RC4_128_SHA +#je.rep.ssl.protocols=TLSv1,TLSv1.1,TLSv1.2 +je.rep.ssl.authenticator=dnmatch(CN=Unit Test) +je.rep.channelLogName=foobar diff --git a/test/com/sleepycat/je/rep/stream/FeederFilterTest.java b/test/com/sleepycat/je/rep/stream/FeederFilterTest.java new file mode 100644 index 0000000..e91c67d --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/FeederFilterTest.java @@ -0,0 +1,277 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.logging.Logger; + +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.utilint.LoggerUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test the generic filtering at the Feeder + */ +public class FeederFilterTest extends RepTestBase { + + private final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Test to verify the filter is actually passed to feeder and is running + * at feeder. Filter used in test simply filter out all records checked, + * thus Replica would be unable to achieve consistency. + * + * ReplicaConsistencyException will be raised if filter works as expected. + */ + @Test + public void testBlockFilter() { + /* set timeout to 10 seconds to shorten test time */ + repEnvInfo[1].getRepConfig() + .setConfigParam(RepParams.ENV_CONSISTENCY_TIMEOUT + .getName(), "10 seconds"); + + /* set a blocking filter for a replica */ + TestFilterBlockAll filter = new TestFilterBlockAll(1); + repEnvInfo[1].getRepConfig().setFeederFilter(filter); + try { + createGroup(3); + } catch (ReplicaConsistencyException e) { + /* + * expect consistency exception because all entries + * are filtered out by feeder and replica is unable + * to achieve consistency. + */ + logger.info("expected exception: " + e.getLocalizedMessage()); + return; + } + + fail("Test failed due to missing expected consistency exception"); + } + + /** + * Test that uses a filter pass everything, so the rep group + * is expected to be created normally. + */ + @Test + public void testNoOpFilter() { + TestFilterPassAll filter = new TestFilterPassAll(2); + /* set filter for a replica */ + repEnvInfo[1].getRepConfig().setFeederFilter(filter); + createGroup(3); + } + + /** + * Test that uses a filter pass everything, and also tracks the statistics + * which are used for verification. + */ + @Test + public void testFilterWithStatistics() { + /* timeout is 10 seconds to shorten test time */ + repEnvInfo[1].getRepConfig() + .setConfigParam(RepParams.ENV_CONSISTENCY_TIMEOUT + .getName(), "10 seconds"); + + /* set a blocking filter for a replica */ + String filterID = "TestFilterWithStat"; + TestFilterPassAllWithStat filter = + new TestFilterPassAllWithStat(filterID); + repEnvInfo[1].getRepConfig().setFeederFilter(filter); + + createGroup(3); + + /* verification by looking at the de-serialized filter */ + List deserializedFilters = + filter.getDeserializedFilter(); + assertTrue("Expect at least one de-serialized filter", + deserializedFilters.isEmpty() == false); + boolean foundFilter = false; + for (TestFilterPassAllWithStat f : deserializedFilters) { + if (f.getFilterId().equals(filterID)) { + foundFilter = true; + f.dumpFilterStatistics(logger); + + assertTrue("Expect non-zero number of checked records", + f.getNumCheckedRecords() > 0); + assertTrue("Expect non-zero internal records", + f.getNumInternalRecords() > 0); + assertTrue("Expect non-zero commits", + f.getNumNumCommits() > 0); + } + } + assertTrue("Expect find de-serialized filter", foundFilter); + } + + /* a test filter which filters out every entry */ + public static class TestFilterBlockAll + implements FeederFilter, Serializable { + private static final long serialVersionUID = 1L; + private final int filterID; + + TestFilterBlockAll(int id) { + super(); + filterID = id; + } + + @Override + public OutputWireRecord execute(final OutputWireRecord outputRecord, + final RepImpl repImpl) { + /* block every entry! */ + return null; + } + + @Override + public String[] getTableIds() { + return null; + } + + } + + /* a test filter which passes every entry */ + public static class TestFilterPassAll + implements FeederFilter, Serializable { + private static final long serialVersionUID = 1L; + private final int filterID; + + TestFilterPassAll(int id) { + super(); + filterID = id; + } + + @Override + public OutputWireRecord execute(final OutputWireRecord outputRecord, + final RepImpl repImpl) { + /* no-op filter */ + return outputRecord; + } + + @Override + public String[] getTableIds() { + return null; + } + + } + + /* another pass-all test filter, with tracking of statistics */ + public static class TestFilterPassAllWithStat + implements FeederFilter, Serializable { + + private static final long serialVersionUID = 1L; + private static final List + deserializedFilters = Collections.synchronizedList( + new ArrayList()); + + private final String filterID; + private int numCheckedRecords; + private int numInternalRecords; + private int numNumCommits; + + TestFilterPassAllWithStat(String id) { + super(); + filterID = id; + numCheckedRecords = 0; + numInternalRecords = 0; + numNumCommits = 0; + } + + public String getFilterId() { + return filterID; + } + + public int getNumCheckedRecords() { + return numCheckedRecords; + } + + public int getNumInternalRecords() { + return numInternalRecords; + } + + public int getNumNumCommits() { + return numNumCommits; + } + + public List getDeserializedFilter() { + return deserializedFilters; + } + + public void dumpFilterStatistics(Logger logger) { + logger.info("feeder filter id: " + filterID + + " number checked entries:" + numCheckedRecords + + " number internal nameln entries: " + numInternalRecords + + " number of commits: " + numNumCommits + + " number of de-serialized filters: " + + deserializedFilters.size()); + } + + @Override + public OutputWireRecord execute(final OutputWireRecord outputRecord, + final RepImpl repImpl) { + final byte type = outputRecord.getEntryType(); + numCheckedRecords++; + if (LogEntryType.LOG_NAMELN_TRANSACTIONAL.equalsType(type) || + LogEntryType.LOG_UPD_LN_TRANSACTIONAL.equalsType(type) || + LogEntryType.LOG_INS_LN_TRANSACTIONAL.equalsType(type)) { + numInternalRecords++; + } else if (LogEntryType.LOG_TXN_COMMIT.equalsType(type)) { + numNumCommits++; + } else { + /* do not count other types if seen */ + } + + /* pass every entry! */ + return outputRecord; + } + + @Override + public String[] getTableIds() { + return null; + } + + /* get and store the de-serialized filter at feeder thread */ + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException { + + in.defaultReadObject(); + deserializedFilters.add(this); + } + } +} diff --git a/test/com/sleepycat/je/rep/stream/FeederReaderTest.java b/test/com/sleepycat/je/rep/stream/FeederReaderTest.java new file mode 100644 index 0000000..4714794 --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/FeederReaderTest.java @@ -0,0 +1,729 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.stream.VLSNTestUtils.CheckReader; +import com.sleepycat.je.rep.stream.VLSNTestUtils.CheckWireRecord; +import com.sleepycat.je.rep.stream.VLSNTestUtils.LogPopulator; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Exercise the FeederReader, which is used for scanning for vlsn tagged + * entries. + */ +public class FeederReaderTest extends TestBase { + private static int STRIDE = 5; + private static int MAX_MAPPINGS = 5; + private static final String SPACER = "space" + new String(new byte[100]) + + "space"; + + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envHome; + private ReplicatedEnvironment rep; + private CheckReader checker; + + public FeederReaderTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * Check that we can retrieve the correct results when doing forward + * scans. Create a log, and start forward scans at each VLSN. (i.e. VLSN 1, + * VLSN2, etc.) + */ + @Test + public void testForwardScans() + throws Throwable { + + rep = null; + try { + ArrayList expected = setupLog(10 /* numFiles */); + VLSN lastVLSN = expected.get(expected.size() - 1).getVLSN(); + + /* Vary the read buffer size. */ + for (int readBufferSize = 100; + readBufferSize < 2000; + readBufferSize += 100) { + + /* And also vary the start point. */ + for (int i = 1; i <= lastVLSN.getSequence(); i++) { + checkForwardScan(checker.nScanned, expected, new VLSN(i), + readBufferSize, + false /* expectLogGrowth */); + } + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + } + } + + /** + * Check that we can wait for an upcoming VLSN when doing forward + * scans. Create a log, and start forward scans at each VLSN. (i.e. VLSN 1, + * VLSN2, etc.), and then ask for one more. + */ + @Test + public void testWait() + throws Throwable { + + rep = null; + try { + ArrayList expected = setupLog(3 /* numFiles */); + checkForwardScan(checker.nScanned, expected, new VLSN(1), + 99, + true /* expectLogGrowth */); + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + } + } + + /** + * Check that we can retrieve the correct results when doing backwards + * scans. Create a log, and start forward scans at each VLSN. (i.e. VLSN + * 50, VLSN 49, etc.) + */ + @Test + public void testBackwardScans() + throws Throwable { + + rep = null; + try { + ArrayList expected = setupLog(10 /* numFiles */); + VLSN lastVLSN = expected.get(expected.size() - 1).getVLSN(); + + for (long i = lastVLSN.getSequence(); i >= 1; i--) { + checkBackwardScan(checker.nScanned, expected, new VLSN(i), + 90909 /* readBufferSize */); + checkBackwardScan(checker.nScanned, expected, new VLSN(i), + 1000 /* readBufferSize */); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + } + } + + /** + * Check that we can find the sync-able matchpoints when doing backward + * scans. + */ + @Test + public void testFindSyncableentries() + throws Throwable { + + rep = null; + try { + ArrayList expected = setupLog(10 /* numFiles */); + VLSN lastVLSN = expected.get(expected.size() - 1).getVLSN(); + + for (long i = lastVLSN.getSequence(); i >= 1; i--) { + checkSyncScan(checker.nScanned, expected, new VLSN(i), + 90909 /* readBufferSize */); + checkSyncScan(checker.nScanned, expected, new VLSN(i), + 1000 /* readBufferSize */); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + } + } + + private class Inserter implements LogPopulator { + private final int desiredNumFiles; + + Inserter(int numFiles) { + this.desiredNumFiles = numFiles; + } + + @SuppressWarnings("hiding") + @Override + public void populateLog(ReplicatedEnvironment rep) { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(rep); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + + FileManager fileManager = envImpl.getFileManager(); + + Database db = rep.openDatabase(null, "test", dbConfig); + try { + DatabaseEntry value = new DatabaseEntry(); + for (int i = 0; + fileManager.getLastFileNum() < desiredNumFiles; + i++) { + + Transaction txn = rep.beginTransaction(null, null); + IntegerBinding.intToEntry(i, value); + OperationStatus status = db.put(txn, value, value); + if (status != OperationStatus.SUCCESS) { + throw new IllegalStateException("bad status of " + + status); + } + Trace.trace(envImpl, SPACER); + txn.commit(); + } + } finally { + if (db != null){ + db.close(); + db = null; + } + } + + } + } + + /** Start with an empty log. */ + private class NoInsert implements LogPopulator { + public void populateLog(ReplicatedEnvironment rep) { + } + } + + private ArrayList setupLog(int numFiles) + throws UnknownMasterException, DatabaseException, + InterruptedException { + return setupLog(new Inserter(numFiles)); + } + + private ArrayList setupLog(LogPopulator populator) + throws UnknownMasterException, DatabaseException, + InterruptedException { + + rep = VLSNTestUtils.setupLog(envHome, STRIDE, MAX_MAPPINGS, populator); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(rep); + checker = new CheckReader(envImpl); + return VLSNTestUtils.collectExpectedData(checker); + } + + /** + * Start a FeederReader, start scanning from startVLSN forwards. + */ + private void checkForwardScan(long nCheckerScans, + ArrayList expected, + VLSN startVLSN, + int readBufferSize, + boolean expectLogGrowth) + throws DatabaseException, IOException, InterruptedException { + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(rep); + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + FeederReader feederReader = new FeederReader(envImpl, + vlsnIndex, + DbLsn.NULL_LSN, + readBufferSize, + true /*bypassCache*/); + if (verbose) { + System.out.println("test forward scan starting at " + startVLSN + + " readBufferSize = " + readBufferSize + + " expectLogGrowth = " + expectLogGrowth); + } + feederReader.initScan(startVLSN); + + /* Read every replicated log entry with the feederReader. */ + for (CheckWireRecord w : expected){ + + VLSN vlsn = w.getVLSN(); + if (vlsn.compareTo(startVLSN) < 0) { + /* not checking yet, just go on. */ + continue; + } + + /* Ask the feederReader for this. */ + OutputWireRecord feederRecord = feederReader.scanForwards(vlsn, 0); + + /* + * Compare the contents. Can't use w.equals(), since equals + * can only be used before the WireRecord's entryBuffer has been + * reused. + */ + assertTrue("check=" + w + " feederRecord= " + feederRecord, + w.exactMatch(feederRecord)); + } + + /* Check that the feeder reader has done some repositioning. */ + VLSN lastVLSN = expected.get(expected.size() - 1).getVLSN(); + int minimumMappings = + (int)(lastVLSN.getSequence() - startVLSN.getSequence()) / STRIDE; + assertTrue(feederReader.getNReposition() >= minimumMappings); + assertTrue(feederReader.getNScanned() >= minimumMappings); + assertTrue(nCheckerScans > feederReader.getNScanned()); + if (verbose) { + System.out.println("repos=" + feederReader.getNReposition() + + " checkscan=" + nCheckerScans + + " feedscan=" + feederReader.getNScanned() + + " minMappings=" + minimumMappings); + } + + /* Ask for one more vlsn. It should time out. */ + if (expectLogGrowth) { + WireRecord notThere = feederReader.scanForwards(lastVLSN.getNext(), + 5); + assertEquals(null, notThere); + } + + } + + private void checkBackwardScan(long nCheckerScans, + ArrayList expected, + VLSN startVLSN, + int readBufferSize) + throws DatabaseException, IOException, ChecksumException { + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(rep); + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + long lastLsn = envImpl.getFileManager().getLastUsedLsn(); + + /* Try both kinds of backwards readers */ + FeederSyncupReader feederSyncupReader = + new FeederSyncupReader(envImpl, + vlsnIndex, + lastLsn, + readBufferSize, + startVLSN, + DbLsn.NULL_LSN); + ReplicaSyncupReader replicaSyncupReader = + new ReplicaSyncupReader(envImpl, + vlsnIndex, + lastLsn, + readBufferSize, + startVLSN, + DbLsn.NULL_LSN, + new MatchpointSearchResults(envImpl)); + + if (verbose) { + System.out.println("->lastLsn = " + + DbLsn.getNoFormatString(lastLsn) + + " startVLSN = " + startVLSN); + } + + /* Read every replicated log entry with the feederReader. */ + for (int i = expected.size() - 1; i >= 0; i--) { + CheckWireRecord w = expected.get(i); + VLSN vlsn = w.getVLSN(); + if (vlsn.compareTo(startVLSN) > 0) { + /* not checking yet, just go on. */ + continue; + } + + /* Ask the readers for this. */ + OutputWireRecord feederRecord = + feederSyncupReader.scanBackwards(vlsn); + OutputWireRecord replicaRecord = + replicaSyncupReader.scanBackwards(vlsn); + + /* + * Compare the contents. Can't use w.equals(), since equals + * can only be used before the WireRecord's entryBuffer has been + * reused, and in this case, the WireRecords are saved in a + * collection. + */ + assertTrue("feeder check=" + w + " feederRecord= " + feederRecord, + w.exactMatch(feederRecord)); + assertTrue("replica check=" + w + " replicaRecord= " + + replicaRecord, w.exactMatch(replicaRecord)); + + } + + /* + * Check that the feeder reader has done some repositioning. The way + * the mappings work, it's harder to assert as rigorously as the + * forward scans that the number of repositions and scans are a certain + * value. + */ + int minimumMappings = (int)(startVLSN.getSequence()/ STRIDE); + if (verbose) { + System.out.println + ("feeder repos=" + feederSyncupReader.getNReposition() + + "replica repos=" + replicaSyncupReader.getNReposition() + + " checkscan=" + nCheckerScans + + " feedscan=" + feederSyncupReader.getNScanned() + + " numVLSNs=" + minimumMappings); + } + + assertTrue(nCheckerScans > feederSyncupReader.getNScanned()); + assertEquals(0, replicaSyncupReader.getNReposition()); + if (minimumMappings > 2) { + assertTrue(feederSyncupReader.getNReposition() >= minimumMappings); + } + } + + private void checkSyncScan(@SuppressWarnings("unused") long nCheckerScans, + ArrayList expected, + VLSN startVLSN, + int readBufferSize) + throws DatabaseException, IOException { + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(rep); + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + long lastLsn = envImpl.getFileManager().getLastUsedLsn(); + ReplicaSyncupReader backwardsReader = + new ReplicaSyncupReader(envImpl, + vlsnIndex, + lastLsn, + readBufferSize, + startVLSN, + DbLsn.NULL_LSN, + new MatchpointSearchResults(envImpl)); + + /* Ask the feederReader for the start entry. */ + OutputWireRecord syncupRecord = + backwardsReader.scanBackwards(startVLSN); + + int checkIndex = (int) (startVLSN.getSequence() - 1); + CheckWireRecord check = expected.get(checkIndex); + assertTrue("check=" + check + " syncupRecord= " + syncupRecord, + check.exactMatch(syncupRecord)); + + /* + * Now search backwards for syncable entries. Iterate through the + * expected array, stopping at log entries of the right kind. + * then check the feederReader's ability to also stop at that kind + * of log entry. + */ + if (verbose) { + System.out.println("checking starting from " + startVLSN); + } + for (int i = checkIndex - 1; i >= 0; i--) { + check = expected.get(i); + if (LogEntryType.isSyncPoint(check.getEntryType())) { + syncupRecord = backwardsReader.findPrevSyncEntry(true); + if (verbose) { + System.out.print("i= " + i); + System.out.println(check); + } + + assertTrue("check=" + check + " syncupRecord= " + syncupRecord, + check.exactMatch(syncupRecord)); + } + } + } + + /** + * Test that a feeder reader can read log entries that have not yet + * been flushed to disk. + * @throws Throwable + */ + @Test + public void testNonFlushedFetch() + throws Throwable { + rep = null; + try { + /* Create a replicator, and create a log. */ + ArrayList expected = setupLog(2 /* numFiles */); + + for (int i = 0; i < 4; i++) { + logItem(expected, i, true /* replicate */, false /* sync */); + } + VLSN lastVLSN = expected.get(expected.size() - 1).getVLSN(); + + for (int i = 1; i <= lastVLSN.getSequence(); i++) { + checkForwardScan(checker.nScanned, expected, new VLSN(i), + 90909 /* readBufferSize */, + true /* expectLogGrowth */); + checkForwardScan(checker.nScanned, expected, new VLSN(i), + 1000 /* readBufferSize */, + true /* expectLogGrowth */); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + } + } + + private void logItem(ArrayList expected, + int traceNumber, + boolean replicate, + boolean sync) + throws DatabaseException { + + LogManager logManager = + DbInternal.getNonNullEnvImpl(rep). + getLogManager(); + + Trace debugMsg = new Trace("Test " + traceNumber); + LogEntry entry = new TraceLogEntry(debugMsg); + + ByteBuffer buffer = ByteBuffer.allocate(entry.getSize()); + entry.writeEntry(buffer); + buffer.flip(); + if (replicate) { + long lsn; + if (sync) { + lsn = logManager.logForceFlush(entry, + false, + ReplicationContext.MASTER); + } else { + lsn = logManager.log(entry, ReplicationContext.MASTER); + } + + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + CheckWireRecord c = + new CheckWireRecord(RepInternal.getNonNullRepImpl(rep), lsn, + LogEntryType.LOG_TRACE.getTypeNum(), + LogEntryType.LOG_VERSION, + entry.getSize(), + vlsnIndex.getRange().getLast(), + buffer); + expected.add(c); + } else { + if (sync) { + logManager.logForceFlush(entry, + false, + ReplicationContext.NO_REPLICATE); + } else { + logManager.log(entry, ReplicationContext.NO_REPLICATE); + } + } + } + + /** + * Test that a feeder reader can switch between reading from the log + * buffer pool, to the file, to the logBuffer pool, and back and forth. + * @throws Throwable + */ + @Test + public void testSwitchFetch() + throws Throwable { + + rep = null; + for (int readBufferSize = 100; + readBufferSize < 2000; + readBufferSize += 100) { + + /* + * The startIndex parameter for readAndGrow indicates where the + * feeder reader should start. In different loops, we run the + * reader starting at successively different location -- i.e + * starting the scan at vlsn1, vlsn2, etc. That varies the location + * of the items in the readBuffer. We need to call readAndGrow once + * to establish how many items are created by the test. + */ + int end; + try { + end = readAndGrow(0, readBufferSize); + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + RepTestUtils.removeRepEnvironments(envHome); + } + + /* + * Now that we know how many test items are created, rerun the + * test, starting the scan at different places. + */ + for (int i = 1; i < end; i++) { + try { + readAndGrow(i, readBufferSize); + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + if (rep != null) { + rep.close(); + rep = null; + } + RepTestUtils.removeRepEnvironments(envHome); + } + } + } + } + + /** + * Mimic a feeder that is reading as a log is growing. + * @throws InterruptedException + * @throws IOException + * @throws DatabaseException + * @throws UnknownMasterException + */ + private int readAndGrow(int startIndex, int readBufferSize) + throws DatabaseException, IOException, InterruptedException { + + if (verbose) { + System.out.println("readAndGrow start at " + startIndex + + " readBufferSize =" + readBufferSize); + } + + /* + * Create a replicator. Even though we don't create any + * application data, there will be a few replicated log entries + * for the group db + */ + ArrayList expected = setupLog(new NoInsert()); + + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(rep); + FeederReader feederReader = new FeederReader(envImpl, + vlsnIndex, + DbLsn.NULL_LSN, + readBufferSize, + true /*bypassCache*/); + + for (int i = 0; i < 4; i++) { + logItem(expected, i, true /* replicate */, false /* sync */); + } + + /* Read items that are still in the log buffer. */ + checkContinuedScan(feederReader, expected, 0, startIndex); + int nextStart = expected.size(); + + for (int i = 4; i < 8; i++) { + logItem(expected, i, true /* replicate */, false /* sync */); + } + + /* Read items that are still in the log buffer. */ + checkContinuedScan(feederReader, expected, nextStart, startIndex); + nextStart = expected.size(); + + for (int i = 8; i < 18; i++) { + logItem(expected, i, true /* replicate */, true /* sync */); + } + + /* + * Read items that should have been flushed out of the log + * buffers. + */ + checkContinuedScan(feederReader, expected, nextStart, startIndex); + nextStart = expected.size(); + + for (int i = 18; i < 20; i++) { + logItem(expected, i, true /* replicate */, false /* sync */); + } + + /* Read items that should still be in log buffers. */ + checkContinuedScan(feederReader, expected, nextStart, startIndex); + nextStart = expected.size(); + + /* + * Read items that should have been flushed out of the log + * buffers. + */ + for (int i = 20; i < 30; i++) { + logItem(expected, i, true /* replicate */, true /* sync */); + } + checkContinuedScan(feederReader, expected, nextStart, startIndex); + nextStart = expected.size(); + + /* + * Read items that should have been flushed out of the log + * buffers. + */ + for (int i = 30; i < 40; i++) { + logItem(expected, i, true /* replicate */, false /* sync */); + } + checkContinuedScan(feederReader, expected, nextStart, startIndex); + return expected.size(); + } + + /** + * In this case, the FeederReader has been doing some reading already. + * Ask it to read some more, and check the results. + */ + private void checkContinuedScan(FeederReader feederReader, + ArrayList expected, + int newRecordsStart, + int feederStart) + throws InterruptedException, DatabaseException, IOException { + + for (int i = newRecordsStart; i < expected.size(); i++) { + CheckWireRecord c = expected.get(i); + if (i == feederStart) { + feederReader.initScan(c.getVLSN()); + } + + if (i >= feederStart) { + OutputWireRecord feederRecord = + feederReader.scanForwards(c.getVLSN(), 0); + assertTrue("check=" + c + " feederRecord= " + feederRecord, + c.exactMatch(feederRecord)); + + } + } + } +} diff --git a/test/com/sleepycat/je/rep/stream/FeederWriteQueueTest.java b/test/com/sleepycat/je/rep/stream/FeederWriteQueueTest.java new file mode 100644 index 0000000..53c441b --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/FeederWriteQueueTest.java @@ -0,0 +1,188 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.io.File; +import java.io.IOException; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * [#18882] Exercise a bug in the interaction between the FeederReader and + * the write queue. + * + * Unlike any other FileReader, the FeederReader must operate on log files + * that are concurrently growing in size. Log entries may be in the log buffers, + * in the write queue, or on disk. + * + * The FeederReader starts out by looking for additional log contents in the + * log buffers. If the log buffers don't have the desired data, the + * FeederReader assumes the data must be somewhere on disk. In addition, the + * FeederReader is not reading on lsn boundaries. It's just reading sections of + * the log files, based on offsets. Because of that, the FeederReader must + * deduce when a log file has ended, and it needs to switch to the next file. + * + * The bug was in this transition. The FeederReader was incorrectly deducing + * the end of a given log file by checking the size of the log file. However, + * it's possible that data can exist in the write queue only, and not be on + * disk. In this case, the FeederReader was incorrectly skipping to the next + * file, which resulted in an attempt to access a non existent file. + * + * For example, suppose a log entry goes through these stages: + * 1. In write log buffer + * 2. Log buffer flushed to LogManager, but is queued in write queue + * instead of going to disk. + * 3. Queued writes are written to disk, log entry is removed from queue, exists + * on disk only. + * + * The bug was that during stage 2, the FeederReader checked the size of the + * current log file, found that it had read everything in that file already, + * and incorrectly decided to jump to the next file. That next file does not + * exist. + */ +public class FeederWriteQueueTest extends TestBase { + private boolean verbose = Boolean.getBoolean("verbose"); + + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private ReplicatedEnvironment master; + private EntityStore store; + private PrimaryIndex primaryIndex; + private final int numThreads = 5; + private final int numRecords = 500; + private final int nNodes = 2; + + public FeederWriteQueueTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * [#18882] Before this bug fix, this test would result in a + * java.io.FileNotFoundException out of FeederReader$SwitchWindow.fillNext. + */ + @Test + public void testDataInWriteQueue() + throws Exception { + + openGroup(); + + ExecutorService appThreads = Executors.newFixedThreadPool(numThreads); + int opsPerThread = numRecords/numThreads; + for (int i = 0; i < numThreads; i++) { + appThreads.execute(new AppWork(i, opsPerThread)); + } + + appThreads.shutdown(); + appThreads.awaitTermination(6000, TimeUnit.SECONDS); + + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repEnvInfo, + repEnvInfo.length); + RepTestUtils.checkNodeEquality(vlsn, verbose, repEnvInfo); + closeGroup(); + } + + private void openGroup() + throws IOException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setConfigParam(RepParams.VLSN_LOG_CACHE_SIZE.getName(), + "2"); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig, + repConfig); + master = RepTestUtils.joinGroup(repEnvInfo); + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + store = new EntityStore(master, "test", config); + primaryIndex = store.getPrimaryIndex(Integer.class, AppData.class); + } + + private void closeGroup() { + store.close(); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /* + * Test data + */ + @Entity + static class AppData { + public AppData() { } + AppData(int key) { + this.key = key; + } + + @SuppressWarnings("unused") + @PrimaryKey + private int key; + @SuppressWarnings("unused") + private String data = "abcdefghijklmnopqrstuv"; + + } + + /** + * Each thread does some inserts and deletes. + */ + private class AppWork implements Runnable { + private final Random random; + private final int numOperations; + private final int whichThread; + + AppWork(int whichThread, int numOperations) { + this.whichThread = whichThread; + random = new Random(whichThread); + this.numOperations = numOperations; + } + + public void run() { + for (int i = 0; i < numOperations; i++) { + Transaction txn = master.beginTransaction(null, null); + int keyVal = random.nextInt(); + AppData data = new AppData(keyVal); + primaryIndex.put(txn, data); + txn.commitSync(); + + txn = master.beginTransaction(null, null); + primaryIndex.delete(txn, keyVal); + txn.commitSync(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/stream/ProtocolTest.java b/test/com/sleepycat/je/rep/stream/ProtocolTest.java new file mode 100644 index 0000000..4463a81 --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/ProtocolTest.java @@ -0,0 +1,424 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static com.sleepycat.je.rep.utilint.BinaryProtocolStatDefinition.N_ENTRIES_WRITTEN_OLD_VERSION; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.log.entry.TraceLogEntry; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.Protocol.FeederProtocolVersion; +import com.sleepycat.je.rep.stream.Protocol.ReplicaProtocolVersion; +import com.sleepycat.je.rep.stream.Protocol.SNTPRequest; +import com.sleepycat.je.rep.stream.Protocol.SNTPResponse; +import com.sleepycat.je.rep.util.TestChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Test basic functionality of feeder protocol messages. + */ +public class ProtocolTest extends TestBase { + + @Test + public void testBasic() + throws IOException { + + /* init token and table ids used in some test */ + final byte[] token = new byte[8]; + for(int i = 0 ; i < token.length; i++) { + token[i] = (new Integer(i)).byteValue(); + } + final String[] tableIds = new String[8]; + for(int i = 0 ; i < token.length; i++) { + tableIds[i] = String.valueOf(i); + } + + /* Setup a collection of every type of message */ + List testMessages = new LinkedList(); + Protocol protocol = + Protocol.get(new RepNode(), Protocol.MAX_VERSION, + Protocol.MIN_VERSION, Protocol.MAX_VERSION, + LogEntryType.LOG_VERSION); + + OutputWireRecord wireRecord = makeFakeLogEntry("Tom Brady"); + Message testMsg = protocol.new Entry(wireRecord); + testMessages.add(testMsg); + + testMsg = protocol.new ReplicaProtocolVersion(); + testMessages.add(testMsg); + + testMsg = protocol.new FeederProtocolVersion(1); + testMessages.add(testMsg); + + testMsg= protocol.new DuplicateNodeReject("1234"); + testMessages.add(testMsg); + + testMsg = protocol.new ReplicaJEVersions(JEVersion.CURRENT_VERSION, + LogEntryType.LOG_VERSION); + testMessages.add(testMsg); + + testMsg = protocol.new FeederJEVersions(JEVersion.CURRENT_VERSION, + LogEntryType.LOG_VERSION, + JEVersion.CURRENT_VERSION); + testMessages.add(testMsg); + + testMsg= protocol.new JEVersionsReject("1234"); + testMessages.add(testMsg); + + /* multiple StartStream msgs share the the same op id in protocol */ + int numStartStreamMsg = 0; + testMsg = protocol.new StartStream(new VLSN(18)); + testMessages.add(testMsg); + numStartStreamMsg++; + testMsg = protocol.new StartStream(new VLSN(19), null); + testMessages.add(testMsg); + numStartStreamMsg++; + testMsg = protocol.new StartStream(new VLSN(20), + new TestFilter(token, tableIds)); + testMessages.add(testMsg); + numStartStreamMsg++; + + testMsg = protocol.new Heartbeat(System.currentTimeMillis(), + 0xdeadbeefdeadbeefL); + testMessages.add(testMsg); + + testMsg = protocol.new HeartbeatResponse(new VLSN(100), + new VLSN(200)); + testMessages.add(testMsg); + + wireRecord = makeFakeLogEntry("Randy Moss"); + testMsg = protocol.new Commit(true, + Durability.SyncPolicy.SYNC, + wireRecord); + testMessages.add(testMsg); + + testMsg = protocol.new Ack(19); + testMessages.add(testMsg); + + testMsg = protocol.new GroupAck(new long[]{100, 101, 102}); + testMessages.add(testMsg); + + testMsg = protocol.new NodeGroupInfo + ("repGroup", + UUID.randomUUID(), + new NameIdPair("node1",(short)1), + "oracle.com", + 7000, + NodeType.ELECTABLE, + true, + null); + testMessages.add(testMsg); + + testMsg = protocol.new NodeGroupInfoOK(UUID.randomUUID(), + new NameIdPair("node1",(short)1)); + testMessages.add(testMsg); + + testMsg = + protocol.new NodeGroupInfoReject("Patriots lost the Superbowl."); + testMessages.add(testMsg); + + testMsg = protocol.new EntryRequest(new VLSN(80)); + testMessages.add(testMsg); + + testMsg = protocol.new EntryNotFound(); + testMessages.add(testMsg); + + testMessages.add(protocol.new RestoreRequest(new VLSN(50))); + + RepNodeImpl rn1 = new RepNodeImpl(new NameIdPair("n1",1), + NodeType.ELECTABLE, + "host1", + 1000, + null); + RepNodeImpl rn2 = new RepNodeImpl(new NameIdPair("n2",1), + NodeType.ELECTABLE, + "host2", + 2000, + null); + testMsg = protocol.new RestoreResponse + (new VLSN(60), new RepNodeImpl[] {rn1, rn2}); + testMessages.add(testMsg); + + wireRecord = makeFakeLogEntry("Bruschi"); + testMsg = protocol.new AlternateMatchpoint(wireRecord); + testMessages.add(testMsg); + + testMsg = protocol.new ShutdownRequest(System.currentTimeMillis()); + testMessages.add(testMsg); + + testMsg = protocol.new ShutdownResponse(); + testMessages.add(testMsg); + + testMsg = protocol.new ReAuthenticate(token); + testMessages.add(testMsg); + + /* + * For each type of message, make sure we can parse it, and that the + * resulting new message is identical. Make sure we test all message + * types but the SNTP messages, since they contain timestamp fields + * that are initialized at serialization and deserialization. + */ + assertEquals(protocol.messageCount() - + protocol.getPredefinedMessageCount() - + /* Excluded SNTP messages. */ + 2 + + /* Include duplicated StarStream messages */ + (numStartStreamMsg - 1), + testMessages.size()); + for (Message m : testMessages) { + ByteBuffer testWireFormat = m.wireFormat().duplicate(); + Message newMessage = + protocol.read(new TestChannel(testWireFormat)); + assertTrue(newMessage.getOp() + " new=" + newMessage + + " test=" + m, + newMessage.match(m)); + } + /* Custom tests for sntp messages */ + testSNTPMessages(protocol); + } + + public static class TestFilter implements FeederFilter, Serializable { + private static final long serialVersionUID = 1L; + + private final byte[] token; + private final String[] tableIds; + + public TestFilter(byte[] token, String[] tableIds) { + this.token = token; + this.tableIds = tableIds; + } + + @Override + public OutputWireRecord execute(final OutputWireRecord outputRecord, + final RepImpl repImpl) { + throw new UnsupportedOperationException("Method not implemented: " + + "execute"); + } + + @Override + public String[] getTableIds() { + return tableIds; + } + + } + + private void testSNTPMessages(Protocol protocol) + throws IOException { + + SNTPRequest m1s = protocol.new SNTPRequest(true); + assertEquals(-1, m1s.getReceiveTimestamp()); + SNTPRequest m1r = + (SNTPRequest) protocol.read(new TestChannel + (m1s.wireFormat().duplicate())); + assertFalse(-1 == m1r.getReceiveTimestamp()); + assertTrue(m1r.isLast()); + SNTPResponse m2s = protocol.new SNTPResponse(m1s); + assertEquals(m1s.getOriginateTimestamp(), m2s.getOriginateTimestamp()); + assertEquals(m1s.getReceiveTimestamp(), m2s.getReceiveTimestamp()); + assertEquals(-1, m2s.getTransmitTimestamp()); + assertEquals(-1, m2s.getDestinationTimestamp()); + ByteBuffer wireFormat = m2s.wireFormat().duplicate(); + assertFalse(-1 == m2s.getTransmitTimestamp()); + assertEquals(-1, m2s.getDestinationTimestamp()); + SNTPResponse m2r = + (SNTPResponse) protocol.read(new TestChannel(wireFormat)); + assertEquals(m1s.getOriginateTimestamp(), m2s.getOriginateTimestamp()); + assertEquals(m1s.getReceiveTimestamp(), m2r.getReceiveTimestamp()); + assertEquals(m2s.getTransmitTimestamp(), m2s.getTransmitTimestamp()); + assertFalse(-1 == m2r.getDestinationTimestamp()); + } + + private OutputWireRecord makeFakeLogEntry(String msg) { + return makeFakeLogEntry(msg, LogEntryType.LOG_VERSION); + } + + private OutputWireRecord makeFakeLogEntry(String msg, int logVersion) { + final TraceLogEntry entry = new TraceLogEntry(new Trace(msg)); + final ByteBuffer entryBuffer = ByteBuffer.allocate(entry.getSize()); + entry.writeEntry(entryBuffer); + entryBuffer.flip(); + final LogEntryHeader fakeHeader = + new LogEntryHeader(LogEntryType.LOG_TRACE.getTypeNum(), + logVersion, + entry.getSize(), + new VLSN(33)); + return new OutputWireRecord(null, fakeHeader, entryBuffer); + } + + @Test + public void testVersion() { + Protocol protocol100 = + Protocol.get(new RepNode(), Protocol.MAX_VERSION, + Protocol.MIN_VERSION, Protocol.MAX_VERSION, + LogEntryType.LOG_VERSION); + + ReplicaProtocolVersion repVersion = + protocol100.new ReplicaProtocolVersion(); + assertEquals(repVersion.getVersion(), protocol100.getVersion()); + assertEquals(repVersion.getVersion(), protocol100.getVersion()); + FeederProtocolVersion feederVersion = + protocol100.new FeederProtocolVersion(protocol100.getVersion()); + assertEquals(feederVersion.getVersion(), protocol100.getVersion()); + + } + + /** + * Test that writing a message containing a log entry where the requested + * log format version is less than both the current log version and the log + * entry version results in converting the entry to the requested version. + */ + @Test + public void testWritePreviousVersionOlderConvert() + throws IOException { + + /* Use this value when converting to the previous version */ + final Trace priorItem = new Trace("replacement"); + TraceLogEntry.setTestPriorItem(priorItem); + try { + final Protocol protocol = + Protocol.get(new RepNode(), Protocol.MAX_VERSION, + Protocol.MIN_VERSION, Protocol.MAX_VERSION, + /* Request the previous version */ + new TraceLogEntry().getLastFormatChange() - 1); + final OutputWireRecord writeRecord = makeFakeLogEntry("original"); + final Message writeMessage = protocol.new Entry(writeRecord); + final Protocol.Entry readMessage = + (Protocol.Entry) protocol.read( + new TestChannel(writeMessage.wireFormat().duplicate())); + final InputWireRecord readRecord = readMessage.getWireRecord(); + final TraceLogEntry readEntry = + (TraceLogEntry) readRecord.getLogEntry(); + final Trace readItem = readEntry.getMainItem(); + /* Confirm that the entry was converted */ + assertEquals("Trace", priorItem, readItem); + final StatGroup stats = protocol.getStats(new StatsConfig()); + assertEquals("N_ENTRIES_WRITTEN_OLD_VERSION", + 1, stats.getLong(N_ENTRIES_WRITTEN_OLD_VERSION)); + } finally { + TraceLogEntry.setTestPriorItem(null); + } + } + + /** + * Test that writing a message containing a log entry where the requested + * log format version is less than the current log version but is + * compatible with the requested version results in the entry being copied, + * not converted. + */ + @Test + public void testWritePreviousVersionCompatibleCopy() + throws IOException { + + /* Use this value when converting to the previous version */ + final Trace priorItem = new Trace("replacement"); + TraceLogEntry.setTestPriorItem(priorItem); + try { + final Protocol protocol = + Protocol.get(new RepNode(), Protocol.MAX_VERSION, + Protocol.MIN_VERSION, Protocol.MAX_VERSION, + /* Request the previous version */ + new TraceLogEntry().getLastFormatChange() - 1); + final OutputWireRecord writeRecord = + makeFakeLogEntry("original", + /* Create the entry in the previous version */ + new TraceLogEntry().getLastFormatChange() - 1); + final Message writeMessage = protocol.new Entry(writeRecord); + final Protocol.Entry readMessage = + (Protocol.Entry) protocol.read( + new TestChannel(writeMessage.wireFormat().duplicate())); + final InputWireRecord readRecord = readMessage.getWireRecord(); + final TraceLogEntry readEntry = + (TraceLogEntry) readRecord.getLogEntry(); + final Trace readItem = readEntry.getMainItem(); + /* Confirm that the entry was copied, not converted */ + assertEquals("Trace message", "original", readItem.getMessage()); + final StatGroup stats = protocol.getStats(new StatsConfig()); + assertEquals("N_ENTRIES_WRITTEN_OLD_VERSION", + 0, stats.getLong(N_ENTRIES_WRITTEN_OLD_VERSION)); + } finally { + TraceLogEntry.setTestPriorItem(null); + } + } + + /** + * Test reading a NodeGroupInfo object using the format that does not + * support the jeVersion field. + */ + @Test + public void testNodeGroupInfoNoJEVersion() + throws IOException { + + final Protocol protocol = + Protocol.get(new RepNode(), Protocol.VERSION_4, + Protocol.MIN_VERSION, Protocol.MAX_VERSION, + LogEntryType.LOG_VERSION); + for (final JEVersion jeVersion : + new JEVersion[] { null, new JEVersion("1.2.3") }) { + final Message msg = protocol.new NodeGroupInfo( + "repGroup", UUID.randomUUID(), new NameIdPair("node7", 7), + "example.com", 7000, NodeType.ELECTABLE, true, jeVersion); + final ByteBuffer bytes = msg.wireFormat().duplicate(); + final Message newMsg = protocol.read(new TestChannel(bytes)); + assertTrue("Expected " + msg + ", found " + newMsg, + newMsg.match(msg)); + } + } + + /** + * Test sending a SECONDARY node to a master running a protocol version + * that does not support them. Note that protocol version 4 shouldn't get + * picked for systems that can create secondary nodes, but just checking + * that an internal exception is thrown in that case. + */ + @Test + public void testNodeGroupInfoSecondaryToOldMaster() { + final Protocol protocol = Protocol.get( + new RepNode(), Protocol.VERSION_4, Protocol.MIN_VERSION, + Protocol.MAX_VERSION, LogEntryType.LOG_VERSION); + final Message msg = protocol.new NodeGroupInfo( + "repGroup", UUID.randomUUID(), new NameIdPair("node7", 7), + "example.com", 7000, NodeType.SECONDARY, true, + JEVersion.CURRENT_VERSION); + try { + msg.wireFormat(); + fail("Expected IllegalStateException"); + } catch (IllegalStateException e) { + } + } +} diff --git a/test/com/sleepycat/je/rep/stream/ReplicaSyncupReaderTest.java b/test/com/sleepycat/je/rep/stream/ReplicaSyncupReaderTest.java new file mode 100644 index 0000000..13977a6 --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/ReplicaSyncupReaderTest.java @@ -0,0 +1,229 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.util.List; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.CommitLogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.CheckpointEnd; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.stream.MatchpointSearchResults.PassedTxnInfo; +import com.sleepycat.je.rep.stream.VLSNTestUtils.CheckReader; +import com.sleepycat.je.rep.stream.VLSNTestUtils.CheckWireRecord; +import com.sleepycat.je.rep.stream.VLSNTestUtils.LogPopulator; +import com.sleepycat.je.txn.TxnCommit; +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Ensure that ReplicaSyncupReader tracks checkpoints and commits properly, so + * that rollback conditions are obeyed. + */ + +public class ReplicaSyncupReaderTest extends TestBase { + + private final File envHome; + + public ReplicaSyncupReaderTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testRepAndNonRepCommits() + throws DatabaseException, InterruptedException { + runTest(new RepAndNonRepCommits()); + } + + @Test + public void testMultipleCkpts() + throws DatabaseException, InterruptedException { + runTest(new MultipleCkpts()); + } + + private void runTest(CommitsAndCkpts populator) + throws DatabaseException, InterruptedException { + + ReplicatedEnvironment rep = + VLSNTestUtils.setupLog(envHome, + 5, + 3, + populator); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(rep); + List expected = + VLSNTestUtils.collectExpectedData(new CheckReader(envImpl)); + long lastLsn = envImpl.getFileManager().getLastUsedLsn(); + + try { + + MatchpointSearchResults searchResults = + new MatchpointSearchResults(envImpl); + int lastIndex = expected.size() - 1; + + ReplicaSyncupReader replicaSyncupReader = + new ReplicaSyncupReader + (envImpl, + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(), + lastLsn, + 10000, + expected.get(lastIndex).getVLSN(), // startVLSN + populator.lsnBeforePopulate, // finishLSN + searchResults); + + for (int i = lastIndex; i >=0; i-- ) { + replicaSyncupReader.scanBackwards(expected.get(i).getVLSN()); + } + + assertEquals(populator.nExpectedCommits, + searchResults.getNumPassedCommits()); + assertEquals(populator.passedCheckpointEnd, + searchResults.getPassedCheckpointEnd()); + + PassedTxnInfo earliest = searchResults.getEarliestPassedTxn(); + assertEquals(populator.earliestTxnId, earliest.id); + assertEquals(populator.earliestPassedTime, earliest.time); + assertEquals(populator.earliestTxnLsn, earliest.lsn); + } finally { + if (rep != null) { + rep.close(); + } + } + } + + private abstract class CommitsAndCkpts implements LogPopulator { + + long nExpectedCommits = 1; + boolean passedCheckpointEnd = true; + long earliestTxnId = 20; + Timestamp earliestPassedTime; + long earliestTxnLsn; + long lsnBeforePopulate; + + protected LogManager logManager; + + @Override + public void populateLog(ReplicatedEnvironment rep) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(rep); + logManager = envImpl.getLogManager(); + + /* + * Remember the lsn before we begin adding new entries to the log. + * We want to limit the reading of the log by the + * ReplicaSyncupReader in order to skip the parts of the log written + * before this phase, so the test can be sure what to check for. + */ + lsnBeforePopulate = envImpl.getFileManager().getNextLsn(); + + writeLogEntries(rep); + } + + protected abstract void writeLogEntries(ReplicatedEnvironment rep); + } + + private class RepAndNonRepCommits extends CommitsAndCkpts { + + RepAndNonRepCommits() { + nExpectedCommits = 1; + passedCheckpointEnd = true; + earliestTxnId = 20; + } + + @Override + public void writeLogEntries(ReplicatedEnvironment rep) { + + SingleItemEntry endEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_END, + new CheckpointEnd + ("test", 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + true /*cleanedFilesToDelete*/)); + logManager.log(endEntry, ReplicationContext.NO_REPLICATE); + + /* + * Only replicated commits should be noted by the sync reader. + */ + TxnCommit commit = new TxnCommit(10, 0, 1, 1); + + CommitLogEntry commitEntry = new CommitLogEntry(commit); + logManager.log(commitEntry, ReplicationContext.NO_REPLICATE); + + commit = new TxnCommit(20, 0, 1, VLSN.NULL_VLSN_SEQUENCE); + commitEntry = new CommitLogEntry(commit); + earliestPassedTime = commit.getTime(); + earliestTxnLsn = + logManager.log(commitEntry, ReplicationContext.MASTER); + logManager.flushNoSync(); + } + } + + private class MultipleCkpts extends CommitsAndCkpts { + + MultipleCkpts() { + nExpectedCommits = 2; + passedCheckpointEnd = false; + earliestTxnId = 10; + } + + @Override + public void writeLogEntries(ReplicatedEnvironment rep) { + + /* Ckpt A */ + SingleItemEntry endEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_END, + new CheckpointEnd + ("test", 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + false /*cleanedFilesToDelete*/)); + logManager.log(endEntry, ReplicationContext.NO_REPLICATE); + + /* Commit A */ + TxnCommit commit = new TxnCommit(10, 0, 1, + VLSN.NULL_VLSN_SEQUENCE); + earliestPassedTime = commit.getTime(); + CommitLogEntry commitEntry = new CommitLogEntry(commit); + earliestTxnLsn = + logManager.log(commitEntry, ReplicationContext.MASTER); + + /* Commit B */ + commitEntry = + new CommitLogEntry(new TxnCommit(20, 0, 1, + VLSN.NULL_VLSN_SEQUENCE)); + logManager.log(commitEntry, ReplicationContext.MASTER); + + + /* Ckpt B */ + endEntry = + SingleItemEntry.create(LogEntryType.LOG_CKPT_END, + new CheckpointEnd + ("test", 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + false /*cleanedFilesToDelete*/)); + logManager.log(endEntry, ReplicationContext.NO_REPLICATE); + + logManager.flushNoSync(); + } + } +} diff --git a/test/com/sleepycat/je/rep/stream/VLSNTestUtils.java b/test/com/sleepycat/je/rep/stream/VLSNTestUtils.java new file mode 100644 index 0000000..47f8556 --- /dev/null +++ b/test/com/sleepycat/je/rep/stream/VLSNTestUtils.java @@ -0,0 +1,284 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.stream; + +import java.io.File; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileReader; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +public class VLSNTestUtils { + private static final String GROUP_NAME = "repGroup"; + private static final String NODE_NAME = "n8"; + private static final String TEST_HOST = "localhost"; + private static final Integer MAX_DISTANCE = 1000; + + /* + * If -DlongTimeout is true, then this test will run with very long + * timeouts, to make interactive debugging easier. + */ + private static final boolean longTimeout = + Boolean.getBoolean("longTimeout"); + + /** + * Create a replicated environment and populate the log. + */ + public static ReplicatedEnvironment setupLog(File envHome, + int bucketStride, + int bucketMaxMappings, + LogPopulator populator) + throws UnknownMasterException, DatabaseException { + + /* + * Create a single replicator. We're only interested in the log + * on this node, so the durability for the replica is a no-op. + */ + Durability syncDurability = new Durability(SyncPolicy.SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + ReplicatedEnvironment rep = + makeReplicator(envHome, + syncDurability, 1000 /* logfilelen */, + bucketStride, + bucketMaxMappings); + + ReplicatedEnvironment.State joinState = rep.getState(); + if (!joinState.equals(ReplicatedEnvironment.State.MASTER)) { + throw new IllegalStateException("bad state " + joinState); + } + + populator.populateLog(rep); + + return rep; + } + + public static ReplicatedEnvironment + makeReplicator(File envHome, + Durability durability, + long fileLen, + int bucketStride, + int bucketMaxMappings) + throws DatabaseException { + + /* + * Configure the environment with a specific durability and log file + * length. + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(durability); + + /* + * Disable anything that might asynchronously write the log and + * interfere with this test's notion of what data should be present. + */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam("je.log.fileMax", Long.toString(fileLen)); + + int port = Integer.parseInt(RepParams.DEFAULT_PORT.getDefault()); + String hostName = TEST_HOST + ":" + port; + ReplicationConfig repConfig = + new ReplicationConfig(GROUP_NAME, NODE_NAME, hostName); + repConfig.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "60 s"); + repConfig.setConfigParam + (ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, "60 s"); + repConfig.setConfigParam("je.rep.vlsn.stride", + Integer.toString(bucketStride)); + repConfig.setConfigParam("je.rep.vlsn.mappings", + Integer.toString(bucketMaxMappings)); + repConfig.setConfigParam("je.rep.vlsn.distance", + MAX_DISTANCE.toString()); + + repConfig.setHelperHosts(hostName); + + /* + * If -DlongTimeout is true, then this test will run with very + * long timeouts, to make interactive debugging easier. + */ + if (longTimeout) { + RepTestUtils.setLongTimeouts(repConfig); + } + + ReplicatedEnvironment rep = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + return rep; + } + + /** + * Scan the log using a reader that checks every entry and use it to + * verify that a FeederReader picks up the right replicated entries. + * Also check that the FeederReader did actually make skip to the lsn + * positions provided by the mapping. + * @throws InterruptedException + */ + public static ArrayList + collectExpectedData(CheckReader checker) + + throws DatabaseException, InterruptedException { + + ArrayList expected = new ArrayList(); + + /* Read every replicated log entry with the checker. */ + while (checker.readNextEntry()) { + CheckWireRecord w = checker.getWireRecord(); + expected.add(w); + } + return expected; + } + + /* This scans all log entries and picks out the replicated ones. */ + public static class CheckReader extends FileReader { + + private CheckWireRecord wireRecord; + public long nScanned; + + public CheckReader(EnvironmentImpl envImpl) + throws DatabaseException { + + super(envImpl, + 1000, // readBufferSize + true, // forward + DbLsn.NULL_LSN, // startLsn + null, // singleFileNumber + DbLsn.NULL_LSN, // endOfFileLsn + DbLsn.NULL_LSN); // finishLsn + } + + /** Return true if this entry is replicated. */ + @Override + protected boolean isTargetEntry() { + nScanned++; + return entryIsReplicated(); + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + ByteBuffer buffer = entryBuffer.slice(); + buffer.limit(currentEntryHeader.getItemSize()); + wireRecord = new CheckWireRecord(envImpl, getLastLsn(), + currentEntryHeader.getType(), + currentEntryHeader.getVersion(), + currentEntryHeader.getItemSize(), + currentEntryHeader.getVLSN(), + buffer); + + entryBuffer.position(entryBuffer.position() + + currentEntryHeader.getItemSize()); + return true; + } + + CheckWireRecord getWireRecord() { + return wireRecord; + } + } + + /** + * A CheckWireRecord contains an OutputWireRecord read from the log. It + * also adds the lsn of that WireRecord and instantiates the log entry + * right away because we know we'll need it for test purposes. + */ + public static class CheckWireRecord extends OutputWireRecord { + public long lsn; + private final LogEntry logEntry; + + CheckWireRecord(EnvironmentImpl envImpl, + long lsn, + byte entryType, + int entryVersion, + int itemSize, + VLSN vlsn, + ByteBuffer entryBuffer) + throws DatabaseException { + + super(envImpl, + new LogEntryHeader(entryType, entryVersion, itemSize, vlsn), + entryBuffer); + this.lsn = lsn; + this.logEntry = instantiateEntry(envImpl, entryBuffer); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("lsn=").append(DbLsn.getNoFormatString(lsn)); + sb.append(" ").append(header); + sb.append(" ").append(logEntry); + return sb.toString(); + } + + /** + * @return true if this CheckWireRecord has the exact physical same + * contents as the OutputWireRecord. Must be called before the + * entryBuffer that backs this OutputWireRecord is reused. + * @throws DatabaseException + */ + public boolean exactMatch(OutputWireRecord feederRecord) + throws DatabaseException { + + if (!header.logicalEqualsIgnoreVersion(feederRecord.header)) { + return false; + } + + LogEntry feederEntry = feederRecord.instantiateEntry + (envImpl, feederRecord.entryBuffer); + StringBuilder sb = new StringBuilder(); + feederEntry.dumpEntry(sb, true); + String feederString = sb.toString(); + + sb = new StringBuilder(); + logEntry.dumpEntry(sb, true); + String myEntryString = sb.toString(); + + return myEntryString.equals(feederString); + } + } + + /** + * Tests can customize how they populate a log. + */ + public interface LogPopulator { + + /* Put the desired data into this environment. */ + public void populateLog(ReplicatedEnvironment rep); + } +} diff --git a/test/com/sleepycat/je/rep/subscription/EntryRequestTypeTest.java b/test/com/sleepycat/je/rep/subscription/EntryRequestTypeTest.java new file mode 100644 index 0000000..4e86ed8 --- /dev/null +++ b/test/com/sleepycat/je/rep/subscription/EntryRequestTypeTest.java @@ -0,0 +1,364 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.concurrent.TimeoutException; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Unit tests to test different EntryRequest type. Each test verifies + * behavior of one EntryRequest type if the start VLSN to subscribe the + * replication stream has been cleaned by cleaner and no longer available in + * the VLSN index. + */ +public class EntryRequestTypeTest extends SubscriptionTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 1 + numReplicas + (hasMonitor ? 1 : 0); + super.setUp(); + keys = new ArrayList<>(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + "EntryRequestTypeTest"); + /* to be created in each test */ + subscription = null; + monitor = null; + } + + @Override + @After + public void tearDown() + throws Exception { + if (subscription != null) { + subscription.shutdown(); + } + subscription = null; + + if (monitor != null) { + monitor.shutdown(); + } + super.tearDown(); + } + + + /** + * Without any cleaning, all modes should succeed. + * + * @throws Exception if test fails + */ + @Test + public void testNoCleaning() throws Exception { + + final boolean cleanLog = false; + createEnv(cleanLog); + + /* all modes should succeed */ + for (EntryRequestType mode : EntryRequestType.values()) { + logger.info("Test stream mode: " + mode); + testLow(mode, new TestCallback()); + testInRange(mode, new TestCallback()); + testHigh(mode, new TestCallback()); + logger.info("Done test mode: " + mode + "\n"); + } + } + + /** + * With log cleaning + * + * @throws Exception if test fails + */ + @Test + public void testCleaning() throws Exception { + + final boolean cleanLog = true; + createEnv(cleanLog); + + /* ensure log cleaned */ + final VLSN lower = getVLSNRange().getFirst(); + assertTrue("Expect log cleaned", lower.compareTo(VLSN.FIRST_VLSN) > 0); + + /* now and available modes should always succeed */ + final EntryRequestType[] modes = + new EntryRequestType[]{EntryRequestType.NOW, + EntryRequestType.AVAILABLE}; + for (EntryRequestType mode : modes) { + logger.info("Test stream mode: " + mode); + testLow(mode, new TestCallback()); + testInRange(mode, new TestCallback()); + testHigh(mode, new TestCallback()); + logger.info("Done test mode: " + mode + "\n"); + } + + /* default mode */ + final EntryRequestType mode = EntryRequestType.DEFAULT; + /* in range and high should be good */ + testInRange(mode, new TestCallback()); + testHigh(mode, new TestCallback()); + + /* low start vlsn should raise ILE */ + try { + testLow(mode, new TestCallback()); + fail("In default mode, subscription should fail if requested vlsn" + + " is lower than the range"); + } catch (InsufficientLogException exp) { + logger.info("Expected exception: " + exp.getMessage()); + } finally { + subscription.shutdown(); + } + } + + private void testLow(EntryRequestType mode, TestCallback cbk) + throws Exception { + + logger.info("Test low start, mode: " + mode); + logger.info("VLSN index range: " + getVLSNRange()); + + final VLSN reqVLSN = VLSN.FIRST_VLSN; + final VLSN expVLSN; + switch (mode) { + case DEFAULT: + expVLSN = reqVLSN; + break; + case AVAILABLE: + expVLSN = getVLSNRange().getFirst(); + break; + case NOW: + expVLSN = getVLSNRange().getLast(); + break; + default: + expVLSN = VLSN.NULL_VLSN; + } + testSub(mode, cbk, reqVLSN, expVLSN); + } + + private void testInRange(EntryRequestType mode, TestCallback cbk) + throws Exception { + + logger.info("Test in-range start, mode: " + mode); + logger.info("VLSN index range: " + getVLSNRange()); + + final VLSN lower = getVLSNRange().getFirst(); + final VLSN upper = getVLSNRange().getLast(); + final long mid = (lower.getSequence() + upper.getSequence()) / 2; + final VLSN reqVLSN = new VLSN(mid); + final VLSN expVLSN; + switch (mode) { + case DEFAULT: + expVLSN = reqVLSN; + break; + case AVAILABLE: + expVLSN = reqVLSN; + break; + case NOW: + expVLSN = upper; + break; + default: + expVLSN = VLSN.NULL_VLSN; + } + testSub(mode, cbk, reqVLSN, expVLSN); + } + + private void testHigh(EntryRequestType mode, TestCallback cbk) + throws Exception { + + logger.info("Test high start, mode: " + mode); + logger.info("VLSN index range: " + getVLSNRange()); + + final VLSN reqVLSN = new VLSN(Long.MAX_VALUE); + final VLSN expVLSN; + switch (mode) { + case DEFAULT: + expVLSN = getVLSNRange().getLastSync(); + break; + case AVAILABLE: + expVLSN = getVLSNRange().getLast(); + break; + case NOW: + expVLSN = getVLSNRange().getLast(); + break; + default: + expVLSN = VLSN.NULL_VLSN; + } + testSub(mode, cbk, reqVLSN, expVLSN); + } + + private void testSub(EntryRequestType mode, TestCallback cbk, + VLSN requestVLSN, VLSN expectedVLSN) + throws Exception { + + createSubscription(mode, cbk); + + /* start subscription at high VLSN */ + logger.info("Req VLSN: " + requestVLSN + + ", exp VLSN " + expectedVLSN); + subscription.start(requestVLSN); + try { + waitFor(new PollCondition(TEST_POLL_INTERVAL_MS, + TEST_POLL_TIMEOUT_MS) { + @Override + protected boolean condition() { + final VLSN vlsn = cbk.getFirstVLSN(); + return vlsn.equals(expectedVLSN); + } + }); + } catch (TimeoutException e) { + failTimeout(); + } + logger.info("Subscription successfully started"); + subscription.shutdown(); + } + + private void createSubscription(EntryRequestType mode, TestCallback cbk) + throws Exception { + + /* create a subscription */ + final ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + final SubscriptionConfig config = createSubConfig(masterEnv, false); + config.setStreamMode(mode); + config.setCallback(cbk); + config.setFeederFilter(new TestFilter()); + subscription = new Subscription(config, logger); + } + + + /* gets a vlsn in available range of index */ + private VLSNRange getVLSNRange() { + final ReplicatedEnvironment master = repEnvInfo[0].getEnv(); + final VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(master).getVLSNIndex(); + return vlsnIndex.getRange(); + } + + private void createEnv(boolean cleaning) + throws Exception { + + /* Use small log files so we can set a small disk limit. */ + repEnvInfo[0].getEnvConfig().setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, String.valueOf(1L << 20)); + + /* create and verify a replication group */ + prepareTestEnv(); + + /* populate some data and verify */ + final ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + if (cleaning) { + /* cause VLSN range being to advance past zero */ + advanceVLSNRange(masterEnv); + } + + logger.info("Env created, log cleaning? " + cleaning); + } + + /* Wait for test done */ + private void waitFor(final PollCondition condition) + throws TimeoutException { + boolean success = condition.await(); + /* if timeout */ + if (!success) { + throw new TimeoutException("timeout in polling test "); + } + } + + class TestCallback implements SubscriptionCallback { + + private VLSN firstVLSN; + + TestCallback() { + firstVLSN = VLSN.NULL_VLSN; + } + + @Override + public void processPut(VLSN vlsn, byte[] key, byte[] value, + long txnId) { + processVLSN(vlsn); + } + + @Override + public void processDel(VLSN vlsn, byte[] key, long txnId) { + processVLSN(vlsn); + } + + @Override + public void processCommit(VLSN vlsn, long txnId) { + processVLSN(vlsn); + } + + @Override + public void processAbort(VLSN vlsn, long txnId) { + processVLSN(vlsn); + } + + @Override + public void processException(Exception exception) { + } + + VLSN getFirstVLSN() { + return firstVLSN; + } + + private void processVLSN(VLSN vlsn) { + /* record the first VLSN received from feeder */ + if (firstVLSN.isNull()) { + firstVLSN = vlsn; + } + } + + } + + /* no op filter to ensure VLSN=1 is not filtered */ + static class TestFilter implements FeederFilter, Serializable { + + private static final long serialVersionUID = 1L; + + TestFilter() { + super(); + } + + @Override + public OutputWireRecord execute(OutputWireRecord record, + RepImpl repImpl) { + return record; + } + + @Override + public String[] getTableIds() { + return new String[0]; + } + } +} diff --git a/test/com/sleepycat/je/rep/subscription/SubscriptionAuthTestHelper.java b/test/com/sleepycat/je/rep/subscription/SubscriptionAuthTestHelper.java new file mode 100644 index 0000000..17d566f --- /dev/null +++ b/test/com/sleepycat/je/rep/subscription/SubscriptionAuthTestHelper.java @@ -0,0 +1,169 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import java.util.Arrays; + +import com.sleepycat.je.rep.utilint.ServiceHandshake; + +/** + * Helper of test subscription authentication in handshake test + * {@link com.sleepycat.je.rep.utilint.HandshakeTest} + */ +public class SubscriptionAuthTestHelper implements + ServiceHandshake.AuthenticationMethod { + + private final static byte[] goodToken = ("GoodToken").getBytes(); + private final static byte[] badToken = ("BadToken").getBytes(); + private final static String[] tableIds = new String[]{"1", "2", "3"}; + + private final StreamAuthenticator serverAuth; + private final SubscriptionAuthHandler clientAuthHandler; + + public SubscriptionAuthTestHelper(TokenType type) { + + serverAuth = new TestAuthenticator(goodToken, tableIds); + + if (type == TokenType.GOOD) { + clientAuthHandler = new TestSubscriptionAuth(goodToken, true); + } else if (type == TokenType.BAD) { + clientAuthHandler = new TestSubscriptionAuth(badToken, true); + } else if (type == TokenType.EMPTY) { + clientAuthHandler = new TestSubscriptionAuth(new byte[0], true); + } else { + clientAuthHandler = new TestSubscriptionAuth(null, true); + } + } + + @Override + public String getMechanismName() { + return "SubscriptionAuthTest"; + } + + @Override + public String getServerParams() { + return null; + } + + @Override + public ServiceHandshake.ServerInitOp getServerOp( + ServiceHandshake.ServerHandshake initState) { + return new ServerTestOp(initState, serverAuth); + } + + @Override + public ServiceHandshake.ClientInitOp getClientOp( + ServiceHandshake.ClientHandshake initState, String params) { + return new ClientTestOp(initState, clientAuthHandler); + } + + static class ServerTestOp extends ServerAuthMethod.ServerTokenOp { + + ServerTestOp(ServiceHandshake.ServerHandshake initState, + StreamAuthenticator serverAuth) { + super(initState, serverAuth); + } + } + + static class ClientTestOp extends ClientAuthMethod.ClientTokenOp { + + ClientTestOp(ServiceHandshake.ClientHandshake initState, + SubscriptionAuthHandler clientAuthHandler) { + super(initState, clientAuthHandler); + } + } + + /* + * This method actually checks the contents of the expected token and + * received token to ensure the data is being transmitted properly. So + * does it for table ids. + */ + static class TestAuthenticator implements StreamAuthenticator { + + private byte[] expectedToken; + private byte[] receivedToken; + + private String[] expectedTableIds; + private String[] receivedTableIds; + + private long ts; + + TestAuthenticator(byte[] expectedToken, + String[] expectedTableIds) { + + this.expectedToken = expectedToken; + this.expectedTableIds = expectedTableIds; + receivedToken = null; + receivedTableIds = null; + ts = 0; + } + + @Override + public void setToken(byte[] token) { + receivedToken = token; + } + + @Override + public void setTableIds(String[] tableIdStr) { + receivedTableIds = tableIdStr; + } + + @Override + public boolean authenticate() { + ts = System.currentTimeMillis(); + return Arrays.equals(expectedToken, receivedToken); + } + + @Override + public boolean checkAccess() { + ts = System.currentTimeMillis(); + return authenticate() && + Arrays.equals(expectedTableIds, receivedTableIds); + } + + @Override + public long getLastCheckTimeMs() { + return ts; + } + + } + + static class TestSubscriptionAuth implements SubscriptionAuthHandler { + + private final byte[] token; + private final boolean reauth; + + TestSubscriptionAuth(byte[] token, boolean reauth) { + this.token = token; + this.reauth = reauth; + } + + @Override + public boolean hasNewToken() { + return reauth; + } + + @Override + public byte[] getToken() { + return token; + } + } + + public enum TokenType { + GOOD, + BAD, + EMPTY, + NONE + } +} diff --git a/test/com/sleepycat/je/rep/subscription/SubscriptionConfigTest.java b/test/com/sleepycat/je/rep/subscription/SubscriptionConfigTest.java new file mode 100644 index 0000000..33dd87a --- /dev/null +++ b/test/com/sleepycat/je/rep/subscription/SubscriptionConfigTest.java @@ -0,0 +1,348 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.subscription; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.Serializable; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.stream.BaseProtocol.EntryRequestType; +import com.sleepycat.je.rep.stream.FeederFilter; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.utilint.HostPortPair; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.TestBase; + +import org.junit.Before; +import org.junit.Test; + +/** + * Test that SubscriptionConfig can be initialized, set, and read correctly. + */ +public class SubscriptionConfigTest extends TestBase { + + private final String home = "./test/subscription/"; + private final String feederHostPortPair = "localhost:6000"; + private final String subNodeName = "test-subscriber"; + private final String subHostPortPair = "localhost:6000"; + private final String groupName = "rg1"; + private final UUID groupUUID = + UUID.fromString("cb675927-433a-4ed6-8382-0403e9861619"); + private SubscriptionConfig config; + + private final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + config = new SubscriptionConfig(subNodeName, home, subHostPortPair, + feederHostPortPair, groupName, + groupUUID); + } + + @Test + public void testInitialziedParameters() throws Exception { + assertEquals(home, config.getSubscriberHome()); + assertEquals(subNodeName, config.getSubNodeName()); + assertEquals(subHostPortPair, config.getSubNodeHostPort()); + + assertEquals(HostPortPair.getHostname(feederHostPortPair), + config.getFeederHost()); + assertEquals(HostPortPair.getPort(feederHostPortPair), + config.getFeederPort()); + + assertEquals(groupName, config.getGroupName()); + assertEquals(groupUUID, config.getGroupUUID()); + assertEquals(NodeType.SECONDARY, config.getNodeType()); + + assertEquals(EntryRequestType.DEFAULT, config.getStreamMode()); + } + + @Test + public void testNodeType() throws Exception { + SubscriptionConfig config1 = + new SubscriptionConfig(subNodeName, home, subHostPortPair, + feederHostPortPair, groupName, groupUUID, + NodeType.EXTERNAL); + assertEquals(NodeType.EXTERNAL, config1.getNodeType()); + + /* types other than EXTERNAL and SECONDARY */ + NodeType[] types = new NodeType[]{NodeType.ARBITER, + NodeType.ELECTABLE, NodeType.MONITOR}; + + for (NodeType type : types) { + try { + new SubscriptionConfig(subNodeName, home, subHostPortPair, + feederHostPortPair, groupName, + groupUUID, type); + } catch (IllegalArgumentException e) { + /* expected exception due to non-supported node type */ + logger.info( + "Expected IllegalArgumentException " + e.getMessage()); + } + } + } + + @Test + public void testSetParameters() { + long timeout = 10000; + TimeUnit unit = TimeUnit.MILLISECONDS; + config.setChannelTimeout(timeout, unit); + assertEquals(timeout, config.getChannelTimeout(unit)); + config.setPreHeartbeatTimeout(2 * timeout, unit); + assertEquals(2 * timeout, config.getPreHeartbeatTimeout(unit)); + config.setStreamOpenTimeout(3 * timeout, unit); + assertEquals(3 * timeout, config.getStreamOpenTimeout(unit)); + + int interval = 2000; + config.setHeartbeatInterval(interval); + assertEquals(interval, config.getHeartbeatIntervalMs()); + + int sz = 10240; + config.setInputMessageQueueSize(sz); + assertEquals(sz, config.getInputMessageQueueSize()); + config.setOutputMessageQueueSize(2 * sz); + assertEquals(2 * sz, config.getOutputMessageQueueSize()); + + config.setReceiveBufferSize(3 * sz); + assertEquals(3 * sz, config.getReceiveBufferSize()); + } + + @Test + public void testFeederFilter() throws Exception { + + /* filter cannot be null */ + try { + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + feederHostPortPair, + groupName); + + + config.setFeederFilter(null); + } catch (IllegalArgumentException e) { + /* expected exception due to null feeder filter */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + /* filter is set correctly */ + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + feederHostPortPair, + groupName); + + final String token = "test filter"; + config.setFeederFilter(new TestFeederFilter(token)); + assert(config.getFeederFilter().toString().equals(token)); + } + + @Test + public void testCallback() throws Exception { + + /* callback cannot be null */ + try { + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + feederHostPortPair, + groupName); + + + config.setCallback(null); + } catch (IllegalArgumentException e) { + /* expected exception due to null feeder filter */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + /* callback is set correctly */ + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + feederHostPortPair, + groupName); + + final String token = "TestCallback"; + config.setCallback(new TestCallback(token)); + assert(config.getCallBack().toString().equals(token)); + } + + + @Test + public void testMissingParameters() throws Exception { + + /* making missing parameters */ + try { + config = new SubscriptionConfig(null, + "./", + subHostPortPair, + feederHostPortPair, + groupName); + + /* should not be able to create a config with missing parameters */ + fail("Expect IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected exception */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + try { + config = new SubscriptionConfig(subNodeName, + null, + subHostPortPair, + feederHostPortPair, + groupName); + + /* should not be able to create a config with missing parameters */ + fail("Expect IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected exception */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + try { + config = new SubscriptionConfig(subNodeName, + "./", + null, + feederHostPortPair, + groupName); + + /* should not be able to create a config with missing parameters */ + fail("Expect IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected exception */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + try { + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + null, + groupName); + + /* should not be able to create a config with missing parameters */ + fail("Expect IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected exception */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + + try { + config = new SubscriptionConfig(subNodeName, + "./", + subHostPortPair, + feederHostPortPair, + null); + + /* should not be able to create a config with missing parameters */ + fail("Expect IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected exception */ + logger.info("Expected IllegalArgumentException " + e.getMessage()); + } + } + + @Test + public void testStreamMode() throws Exception { + final SubscriptionConfig config = + new SubscriptionConfig(subNodeName, home, subHostPortPair, + feederHostPortPair, groupName, groupUUID, + NodeType.EXTERNAL); + + for (EntryRequestType type : EntryRequestType.values()) { + config.setStreamMode(type); + assertEquals(type, config.getStreamMode()); + } + } + + /* no-op test callback */ + private class TestCallback implements SubscriptionCallback { + + private final String id; + + TestCallback(String id) { + this.id = id; + } + + @Override + public void processPut(VLSN vlsn, byte[] key, byte[] value, + long txnId) { + + } + + @Override + public void processDel(VLSN vlsn, byte[] key, long txnId) { + + } + + @Override + public void processCommit(VLSN vlsn, long txnid) { + + } + + @Override + public void processAbort(VLSN vlsn, long txnid) { + + } + + @Override + public void processException(final Exception exception) { + + } + + @Override + public String toString() { + return id; + } + } + + /* no-op test feeder filter */ + private class TestFeederFilter implements FeederFilter, Serializable { + private static final long serialVersionUID = 1L; + private final String id; + + TestFeederFilter(String id) { + this.id = id; + } + + @Override + public OutputWireRecord execute(final OutputWireRecord record, + final RepImpl repImpl) { + return record; + } + + @Override + public String[] getTableIds() { + return null; + } + + @Override + public String toString() { + return id; + } + } +} diff --git a/test/com/sleepycat/je/rep/subscription/SubscriptionTest.java b/test/com/sleepycat/je/rep/subscription/SubscriptionTest.java new file mode 100644 index 0000000..e71da53 --- /dev/null +++ b/test/com/sleepycat/je/rep/subscription/SubscriptionTest.java @@ -0,0 +1,582 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.subscription; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.logging.Level; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.TestHookAdapter; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * test subscription API to receive a replication stream + * from a start VLSN + */ +public class SubscriptionTest extends SubscriptionTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 1 + numReplicas + (hasMonitor ? 1 : 0); + super.setUp(); + keys = new ArrayList<>(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + "SubscriptionTest"); + logger.setLevel(Level.FINE); + + /* to be created in each test */ + subscription = null; + monitor = null; + } + + @Override + @After + public void tearDown() + throws Exception { + if (subscription != null) { + subscription.shutdown(); + } + subscription = null; + + if (monitor != null) { + monitor.shutdown(); + } + super.tearDown(); + } + + /** + * Testcase of subscribing from the very beginning of VLSN. Assume the + * VLSN index should have all entries available given the small set of + * test data. This is the test case of basic subscription api usage. + * + * @throws Exception if test fails + */ + @Test + public void testSubscriptionBasic() throws Exception { + + /* a slightly bigger db */ + numKeys = 1024*100; + + testGroupUUIDhelper(false); + } + + /** + * Similar test case to testSubscriptionBasic except that a rep group + * uuid is tested in configuration. This is to test that subscription can + * only succeed if the specified group uuid matches that of feeder. + * + * @throws Exception if test fails + */ + @Test + public void testSubscriptionGroupUUID() throws Exception { + + /* a slightly bigger db */ + numKeys = 1024*100; + + /* same test as above test but use a matching group uuid */ + testGroupUUIDhelper(true); + + /* now test a invalid random group uuid, subscription should fail */ + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + SubscriptionConfig config = createSubConfig(masterEnv, true); + config.setGroupUUID(UUID.randomUUID()); + Subscription failSubscription = new Subscription(config, logger); + /* start streaming from the very beginning, should succeed */ + try { + failSubscription.start(); + fail("Did not see exception due to mismatch group uuid"); + } catch (InternalException e) { + logger.info("Expected exception due to mismatch group uuid: " + + e.getMessage()); + } + + failSubscription.shutdown(); + } + + /** + * Test that the dummy env created by subscription is a secondary node + * + * @throws Exception if test fails + */ + @Test + public void testDummyEnvNodeType() throws Exception { + + /* a slightly bigger db */ + numKeys = 100; + /* turn off data cleaner on master */ + turnOffMasterCleaner(); + + /* create and verify a replication group */ + prepareTestEnv(); + + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* create a subscription */ + SubscriptionConfig config = createSubConfig(masterEnv, false); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeys); + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + + final ReplicatedEnvironment dummyEnv = subscription.getDummyRepEnv(); + assert(!dummyEnv.getRepConfig().getNodeType().isElectable()); + assert(dummyEnv.getRepConfig().getNodeType().isSecondary()); + } + + /** + * Testcase of subscribing from a particular VLSN. Assume the + * VLSN index should have all entries available given the small set of + * test data + * + * @throws Exception if test fails + */ + @Test + public void testSubscriptionFromVLSN() throws Exception { + + /* emtpy security property */ + testSubscriptionFromVLSNHelper(new Properties()); + } + + /** + * Tests Subscription configured with SSL. It is similar to above test + * testSubscriptionFromVLSN except the env is configured with security + * property + * + * @throws Exception if test fails + */ + @Test + public void testSubscriptionSSL() throws Exception { + + /* set ssl in the config. */ + final Properties sslProps = new Properties(); + RepTestUtils.setUnitTestSSLProperties(sslProps); + final ReplicationSSLConfig sslConf = new ReplicationSSLConfig(sslProps); + + /* dump security properties */ + for (Object key : sslProps.keySet()) { + final String trace = key + ": " + sslProps.get(key); + logger.info(trace); + } + + /* update each rep env with ssl config */ + for (RepTestUtils.RepEnvInfo envInfo : repEnvInfo) { + ReplicationConfig rc = envInfo.getRepConfig(); + rc.setRepNetConfig(sslConf); + } + + testSubscriptionFromVLSNHelper(sslProps); + } + + /** + * Testcase of subscribing from a NULL VLSN. + * + * @throws Exception if test fails + */ + @Test + public void testInvalidStartVLSN() throws Exception { + + numKeys = 200; + + /* turn off data cleaner on master */ + turnOffMasterCleaner(); + + /* the start VLSN to subscribe*/ + final VLSN startVLSN = VLSN.NULL_VLSN; + /* number of transactions we subscribe */ + final int numKeysToStream = 10; + + /* create and verify a replication group */ + prepareTestEnv(); + + /* populate some data and verify */ + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* create a subscription */ + SubscriptionConfig config = createSubConfig(masterEnv, false); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeysToStream); + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + + logger.info("subscription created at home " + + config.getSubscriberHome() + + ", start streaming " + numKeys + " items from feeder"); + + try { + subscription.start(startVLSN); + fail("Expect IllegalArgumentException raised from subscription"); + } catch (IllegalArgumentException iae) { + /* expected exception */ + logger.info("Expected exception " + iae.getMessage()); + } finally { + subscription.shutdown(); + } + } + + /** + * Testcase that if the start VLSN to subscribe the replication stream + * has been cleaned by cleaner and no longer available in the VLSN + * index at the time of subscription. We expect an entry-not-found message + * raised in sync-up phase + * + * @throws Exception if test fails + */ + @Test + public void testSubscriptionUnavailableVLSN() throws Exception { + + /* Use small log files so we can set a small disk limit. */ + repEnvInfo[0].getEnvConfig().setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, String.valueOf(1L << 20)); + + /* create and verify a replication group */ + prepareTestEnv(); + + /* populate some data and verify */ + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* cause VLSN range being to advance past zero */ + advanceVLSNRange(masterEnv); + + /* create a subscription */ + SubscriptionConfig config = createSubConfig(masterEnv, false); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeys); + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + + /* start subscription at VLSN 0 and expect InsufficientLogException */ + try { + subscription.start(); + fail("Expect InsufficientLogException since start VLSN is not " + + "available at feeder"); + } catch (InsufficientLogException ile){ + logger.info("Expected InsufficientLogException: " + + ile.getMessage()); + } finally { + subscription.shutdown(); + } + } + + /** + * Testcase that subscription callback is able to process commits and + * aborts from feeder. + */ + @Test + public void testTxnCommitsAndAborts() throws Exception { + + numKeys = 1024; + final int numAborts = 5; + + /* create and verify a replication group */ + prepareTestEnv(); + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + /* now create some aborts */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability + (new Durability(Durability.SyncPolicy.NO_SYNC, + Durability.SyncPolicy.NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY)); + try (Database db = masterEnv.openDatabase(null, dbName, dbConfig)) { + for (int i = 1; i <= numAborts; i++) { + IntegerBinding.intToEntry(i, key); + Transaction txn = masterEnv.beginTransaction(null, txnConfig); + db.put(txn, key, data); + txn.abort(); + } + } + + /* start streaming from the very beginning, should succeed */ + SubscriptionConfig config = createSubConfig(masterEnv, false); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeys); + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + subscription.start(); + + try { + waitForTestDone(testCbk); + } catch (TimeoutException e) { + failTimeout(); + } + + /* expect multiple commits */ + assertTrue("expect commits from loading " + numKeys + " keys", + (testCbk.getNumCommits() > 0)); + assertEquals("expect aborts", numAborts, testCbk.getNumAborts()); + + + /* shutdown test verify we receive all expected keys */ + subscription.shutdown(); + } + + /** + * Testcase that subscription callback is able to process an exception. + */ + @Test + public void testExceptionHandling() throws Exception { + + numKeys = 100; + + /* create and verify a replication group */ + prepareTestEnv(); + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* start streaming from the very beginning, should succeed */ + SubscriptionConfig config = createSubConfig(masterEnv, false); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeys, + true); /* allow exception */ + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + subscription.start(); + + try { + waitForTestDone(testCbk); + } catch (TimeoutException e) { + failTimeout(); + } + + /* now inject an exception into queue */ + testCbk.resetTestDone(); + final String token = "test internal exception"; + Exception exp = new InternalException(token); + subscription.setExceptionHandlingTestHook(new ExceptionTestHook(exp)); + + try { + waitForTestDone(testCbk); + } catch (TimeoutException e) { + failTimeout(); + } + + /* verify injected exception is processed in callback */ + if (testCbk.getNumExceptions() != 1) { + testCbk.getFirstException().printStackTrace(); + testCbk.getLastException().printStackTrace(); + } + assertEquals("Expect one exception", 1, testCbk.getNumExceptions()); + final Exception lastExp = testCbk.getLastException(); + assertTrue("Expect InternalException", + (lastExp != null) && (lastExp instanceof InternalException)); + assertTrue("Expect same token", token.equals(exp.getMessage())); + subscription.shutdown(); + } + + /* Wait for test done */ + private void waitForTestDone(final SubscriptionTestCallback callBack) + throws TimeoutException { + + boolean success = new PollCondition(TEST_POLL_INTERVAL_MS, + TEST_POLL_TIMEOUT_MS) { + @Override + protected boolean condition() { + return callBack.isTestDone(); + } + }.await(); + + /* if timeout */ + if (!success) { + throw new TimeoutException("timeout in polling test "); + } + } + + /* Verify received correct test data from feeder */ + private void verifyTestResults(SubscriptionTestCallback mycbk) { + List receivedKeys = mycbk.getAllKeys(); + int numKeys = keys.size(); + + assertTrue("expect some commits", (mycbk.getNumCommits() > 0)); + assertTrue("expect no aborts", (mycbk.getNumAborts() == 0)); + + logger.info("number of keys to verify: " + numKeys); + assertEquals("number of keys mismatch!", numKeys, + receivedKeys.size()); + + IntegerBinding binding = new IntegerBinding(); + for (int i = 0; i < keys.size(); i++){ + Integer expectedKey = keys.get(i); + byte[] receivedKeyByte = receivedKeys.get(i); + + TupleInput tuple = new TupleInput(receivedKeyByte); + Integer receivedKey = binding.entryToObject(tuple); + assertEquals("mismatched key!", expectedKey.longValue(), + receivedKey.longValue()); + } + logger.info("successfully verified all " + numKeys + "keys" + + ", # commits: " + mycbk.getNumCommits() + + ", # aborts: " + mycbk.getNumAborts()); + } + + /* + * Turn off data cleaner on master to prevent start VLSN from being + * cleaned. It can be turned on or called explicitly in unit test. + */ + private void turnOffMasterCleaner() { + + EnvironmentConfig econfig = repEnvInfo[0].getEnvConfig(); + /* + * Turn off the cleaner, since it's controlled explicitly + * by the test. + */ + econfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + } + + /* test helper to test subscription with or without group uuid */ + private void testGroupUUIDhelper(boolean useGroupUUID) throws Exception { + + /* turn off data cleaner on master */ + turnOffMasterCleaner(); + + /* create and verify a replication group */ + prepareTestEnv(); + + /* populate test db and verify */ + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* create a subscription with a valid group uuid */ + SubscriptionConfig config = createSubConfig(masterEnv, useGroupUUID); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeys); + + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + + /* start streaming from the very beginning, should succeed */ + subscription.start(); + + try { + waitForTestDone(testCbk); + } catch (TimeoutException e) { + failTimeout(); + } + /* shutdown test verify we receive all expected keys */ + subscription.shutdown(); + verifyTestResults(testCbk); + } + + private class ExceptionTestHook + extends TestHookAdapter { + + private Exception e; + + ExceptionTestHook(Exception e) { + this.e = e; + } + + @Override + public void doHook(final SubscriptionThread subscriptionThread) { + try { + subscriptionThread.offer(e); + } catch (InterruptedException e1) { + e1.printStackTrace(); + } + } + + @Override + public SubscriptionThread getHookValue() { + throw new UnsupportedOperationException(); + } + } + + private void testSubscriptionFromVLSNHelper(Properties sslProps) + throws Exception { + + /* a small db is enough for this test */ + numKeys = 1024; + + /* turn off data cleaner on master */ + turnOffMasterCleaner(); + + /* the start VLSN to subscribe*/ + final VLSN startVLSN = new VLSN(100); + /* number of transactions we subscribe */ + final int numKeysToStream = 10; + + /* create and verify a replication group */ + prepareTestEnv(); + + /* populate some data and verify */ + ReplicatedEnvironment masterEnv = repEnvInfo[0].getEnv(); + populateDataAndVerify(masterEnv); + + /* create a subscription */ + SubscriptionConfig config = createSubConfig(masterEnv, false, sslProps); + SubscriptionTestCallback testCbk = + new SubscriptionTestCallback(numKeysToStream); + config.setCallback(testCbk); + subscription = new Subscription(config, logger); + + logger.info("subscription created at home " + + config.getSubscriberHome() + + ", start streaming " + numKeys + " items from feeder"); + + + subscription.start(startVLSN); + + /* let subscription run to receive expected # of keys */ + try { + waitForTestDone(testCbk); + } catch (TimeoutException te) { + failTimeout(); + } + + /* verify */ + assertEquals("Mismatch start VLSN!", startVLSN, + testCbk.getFirstVLSN()); + assertEquals("Mismatch start VLSN from statistics", + subscription.getStatistics().getStartVLSN(), + testCbk.getFirstVLSN()); + + subscription.shutdown(); + + } +} diff --git a/test/com/sleepycat/je/rep/subscription/SubscriptionTestBase.java b/test/com/sleepycat/je/rep/subscription/SubscriptionTestBase.java new file mode 100644 index 0000000..25e8e17 --- /dev/null +++ b/test/com/sleepycat/je/rep/subscription/SubscriptionTestBase.java @@ -0,0 +1,523 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + + +package com.sleepycat.je.rep.subscription; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationGroup; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.monitor.Monitor; +import com.sleepycat.je.rep.monitor.MonitorConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.After; +import org.junit.Before; + +/** + * Test base of subscription test + */ +class SubscriptionTestBase extends RepTestBase { + + /* polling interval and timeout to check if test is done */ + final static long TEST_POLL_INTERVAL_MS = 1000; + final static long TEST_POLL_TIMEOUT_MS = 120000; + + /* test db */ + private static final int START_KEY = 1; + final String dbName = "SUBSCRIPTION_UNIT_TEST_DB"; + + /* test db with 10k keys */ + int numKeys = 1024*10; + protected List keys; + + /* a rep group with 1 master, 2 replicas and 1 monitor */ + int numReplicas = 2; + boolean hasMonitor = true; + + Subscription subscription; + Monitor monitor; + Logger logger; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + + /** + * Write garbage and clean the log until the lower end of VLSN on master + * has been bumped up to some value greater than the very first VLSN (0). + */ + void advanceVLSNRange(ReplicatedEnvironment master) { + /* Need a disk limit to delete files and advance VLSN range begin. */ + master.setMutableConfig( + master.getMutableConfig().setConfigParam( + EnvironmentConfig.MAX_DISK, + String.valueOf(20 * (1L << 20)))); + + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(master).getVLSNIndex(); + + for (int i = 0; i < 100; i += 1) { + /* delete and update some keys, and clean the log */ + createObsoleteLogs(master, dbName, numKeys); + cleanLog(master); + + /* check lower end of VLSN index on master */ + VLSNRange range = vlsnIndex.getRange(); + logger.info(master.getNodeName() + ": " + range); + + if (range.getFirst().compareTo(VLSN.FIRST_VLSN) > 0) { + return; + } + } + + /* failed to move the VLSN lower end */ + fail("Lower end of VLSN index is not GT 0."); + } + + /** + * Create a test env and verify it is in good shape + * + * @throws InterruptedException if test fails + */ + void prepareTestEnv() throws InterruptedException { + + createGroup(getNumDataNodes()); + if (hasMonitor) { + /* monitor is the last node in group */ + ReplicationConfig rConfig = + repEnvInfo[groupSize - 1].getRepConfig(); + rConfig.setNodeType(NodeType.MONITOR); + MonitorConfig monConfig = new MonitorConfig(); + monConfig.setNodeName(rConfig.getNodeName()); + monConfig.setGroupName(rConfig.getGroupName()); + monConfig.setNodeHostPort(rConfig.getNodeHostPort()); + monConfig.setHelperHosts(rConfig.getHelperHosts()); + + ReplicationConfig r0Config = + repEnvInfo[0].getEnv().getRepConfig(); + monConfig.setRepNetConfig(r0Config.getRepNetConfig()); + monitor = new Monitor(monConfig); + monitor.register(); + } else { + monitor = null; + } + + for (int i=0; i < getNumDataNodes(); i++) { + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + final boolean isMaster = (env.getState() == MASTER); + final int targetGroupSize = groupSize; + + ReplicationGroup group = null; + for (int j=0; j < 100; j++) { + group = env.getGroup(); + if (group.getNodes().size() == targetGroupSize) { + break; + } + /* Wait for the replica to catch up. */ + Thread.sleep(1000); + } + assertEquals("Nodes", targetGroupSize, group.getNodes().size()); + assertEquals(RepTestUtils.TEST_REP_GROUP_NAME, group.getName()); + for (RepTestUtils.RepEnvInfo rinfo : repEnvInfo) { + final ReplicationConfig repConfig = rinfo.getRepConfig(); + ReplicationNode member = + group.getMember(repConfig.getNodeName()); + + /* only log the group and nodes in group for master */ + if (isMaster) { + logger.info("group: " + group.getName() + + " node: " + member.getName() + + " type: " + member.getType() + + " socket addr: " + member.getSocketAddress()); + } + + assertNotNull("Member", member); + assertEquals(repConfig.getNodeName(), member.getName()); + assertEquals(repConfig.getNodeType(), member.getType()); + assertEquals(repConfig.getNodeSocketAddress(), + member.getSocketAddress()); + } + + /* verify monitor */ + final Set monitorNodes = group.getMonitorNodes(); + for (final ReplicationNode n : monitorNodes) { + assertEquals(NodeType.MONITOR, n.getType()); + } + if (hasMonitor) { + assertEquals("Monitor nodes", 1, monitorNodes.size()); + logger.info("monitor verified"); + } + + /* verify data nodes */ + final Set dataNodes = group.getDataNodes(); + for (final ReplicationNode n : dataNodes) { + assertEquals(NodeType.ELECTABLE, n.getType()); + } + logger.info("data nodes verified"); + } + } + + protected void cleanLog(ReplicatedEnvironment repEnv) { + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + + EnvironmentStats stats = repEnv.getStats(new StatsConfig()); + int numCleaned; + int cleanedThisRun; + long beforeNFileDeletes = stats.getNCleanerDeletions(); + + /* clean logs */ + numCleaned = 0; + while ((cleanedThisRun = repEnv.cleanLog()) > 0) { + numCleaned += cleanedThisRun; + } + repEnv.checkpoint(force); + + while ((cleanedThisRun = repEnv.cleanLog()) > 0) { + numCleaned += cleanedThisRun; + } + repEnv.checkpoint(force); + + stats = repEnv.getStats(new StatsConfig()); + long afterNFileDeletes = stats.getNCleanerDeletions(); + long actualDeleted = afterNFileDeletes - beforeNFileDeletes; + repEnv.checkpoint(force); + + logger.info(repEnv.getNodeName() + + " deletedFiles=" + actualDeleted + + " numCleaned=" + numCleaned); + } + + void failTimeout() { + String error = "fail test due to timeout in " + + TEST_POLL_TIMEOUT_MS + " ms"; + logger.info(error); + fail(error); + } + + /* Populate data into test db and verify */ + void populateDataAndVerify(ReplicatedEnvironment masterEnv) { + keys.addAll(createTestData(START_KEY, numKeys)); + populateDB(masterEnv, dbName, keys); + readDB(masterEnv, dbName, START_KEY, numKeys); + logger.info(numKeys + " records (start key: " + + START_KEY + ") have been populated into db " + + dbName + " and verified"); + } + + /* + * Create a subscription configuration + * + * @param masterEnv env of master node + * @param useGroupUUID true if use a valid group uuid + * + * @return a subscription configuration + * @throws Exception + */ + SubscriptionConfig createSubConfig(ReplicatedEnvironment masterEnv, + boolean useGroupUUID) + throws Exception { + return createSubConfig(masterEnv, useGroupUUID, new Properties()); + } + + SubscriptionConfig createSubConfig(ReplicatedEnvironment masterEnv, + boolean useGroupUUID, Properties sp) + throws Exception { + /* constants and parameters used in test */ + final String home = "./subhome/"; + final String subNodeName = "test-subscriber-node"; + final String nodeHostPortPair = "localhost:6001"; + + String feederNode; + int feederPort; + String groupName; + File subHome = + new File(envRoot.getAbsolutePath() + File.separator + home); + if (!subHome.exists()) { + if (subHome.mkdir()) { + logger.info("create test dir " + subHome.getAbsolutePath()); + } else { + fail("unable to create test dir, fail the test"); + } + } + + ReplicationGroup group = masterEnv.getGroup(); + ReplicationNode member = group.getMember(masterEnv.getNodeName()); + feederNode = member.getHostName(); + feederPort = member.getPort(); + groupName = group.getName(); + + UUID uuid; + if (useGroupUUID) { + uuid = group.getRepGroupImpl().getUUID(); + } else { + uuid = null; + } + + String msg = "Feeder is on node " + feederNode + ":" + feederPort + + " in replication group " + groupName + + " (group uuid: " + uuid + ")"; + logger.info(msg); + + final String feederHostPortPair = feederNode + ":" + feederPort; + + + return new SubscriptionConfig(subNodeName, subHome.getAbsolutePath(), + nodeHostPortPair, feederHostPortPair, + groupName, uuid, NodeType.SECONDARY, + null, sp); + } + + /* Create a list of (k, v) pairs for testing */ + private static List createTestData(int start, int num) { + final List ret = new ArrayList<>(); + for (int i = start; i < start + num; i++) { + ret.add(i); + } + return ret; + } + + private int getNumDataNodes() { + return 1 + numReplicas; + } + + /* + * Put some keys multiple times and delete it, in order to create + * obsolete entries in log files so cleaner can clean it + * + * @param master master node + * @param dbName test db name + * @param numKeys number of keys to put and delete + */ + private void createObsoleteLogs(ReplicatedEnvironment master, + String dbName, + int numKeys) { + final int repeatPutTimes = 10; + final int startKey = START_KEY; + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[100]); + + final TransactionConfig txnConf = new TransactionConfig(); + txnConf.setDurability + (new Durability(Durability.SyncPolicy.NO_SYNC, + Durability.SyncPolicy.NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY)); + + + try (Database db = master.openDatabase(null, dbName, dbConfig)) { + for (int i = startKey; i < numKeys; i++) { + IntegerBinding.intToEntry(i, key); + final Transaction txn = master.beginTransaction(null, txnConf); + for (int repeat = 0; repeat < repeatPutTimes; repeat++) { + db.put(txn, key, data); + } + db.delete(txn, key); + txn.commit(); + } + + /* One more synchronous one to flush the log files. */ + IntegerBinding.intToEntry(startKey, key); + txnConf.setDurability + (new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY)); + final Transaction txn = master.beginTransaction(null, txnConf); + db.put(txn, key, data); + db.delete(txn, key); + txn.commit(); + } + } + + class SubscriptionTestCallback implements SubscriptionCallback { + + + private final int numKeysExpected; + private final boolean allowException; + + private int numCommits; + private int numAborts; + private int numExceptions; + private Exception firstException; + private Exception lastException; + private List recvKeys; + private VLSN firstVLSN; + private boolean testDone; + + SubscriptionTestCallback(int numKeysExpected, boolean allowException) { + this.numKeysExpected = numKeysExpected; + this.allowException = allowException; + numAborts = 0; + numCommits = 0; + recvKeys = new ArrayList<>(); + firstVLSN = VLSN.NULL_VLSN; + testDone = false; + } + + /* callback does not allow exception */ + SubscriptionTestCallback(int numKeysExpected) { + this(numKeysExpected, false); + } + + @Override + public void processPut(VLSN vlsn, byte[] key, byte[] value, + long txnId) { + processPutAndDel(vlsn, key); + } + + @Override + public void processDel(VLSN vlsn, byte[] key, long txnId) { + processPutAndDel(vlsn, key); + } + + @Override + public void processCommit(VLSN vlsn, long txnId) { + numCommits++; + } + + @Override + public void processAbort(VLSN vlsn, long txnId) { + numAborts++; + } + + @Override + public void processException(Exception exception) { + assert (exception != null); + + if (allowException) { + numExceptions++; + if (firstException == null) { + firstException = exception; + } + lastException = exception; + } else { + /* fail test if we do not expect any exception*/ + fail(exception.getMessage()); + } + testDone = true; + } + + void resetTestDone() { + testDone = false; + } + + boolean isTestDone() { + return testDone; + } + + List getAllKeys() { + return recvKeys; + } + + VLSN getFirstVLSN() { + return firstVLSN; + } + + int getNumCommits() { + return numCommits; + } + + int getNumAborts() { + return numAborts; + } + + int getNumExceptions() { + return numExceptions; + } + + Exception getLastException() { + return lastException; + } + + Exception getFirstException() { + return firstException; + } + + private void processPutAndDel(VLSN vlsn, byte[] key) { + /* record the first VLSN received from feeder */ + if (firstVLSN.isNull()) { + firstVLSN = vlsn; + } + + if (recvKeys.size() < numKeysExpected) { + recvKeys.add(key); + + logger.finest("vlsn: " + vlsn + + "key " + Key.dumpString(key, 0) + + ", # of keys received: " + recvKeys.size() + + ", expected: " + numKeysExpected); + + if (recvKeys.size() == numKeysExpected) { + logger.info("received all " + numKeysExpected + " keys."); + testDone = true; + } + + } else { + /* + * we may receive more keys because in some tests, the # of + * keys expected could be less than the size of database, but + * they are not interesting to us so ignore. + */ + logger.finest("keys beyond expected " + numKeysExpected + + " keys, vlsn: " + vlsn + + ", key: " + Key.dumpString(key, 0)); + } + } + + } +} diff --git a/test/com/sleepycat/je/rep/txn/CommitTokenTest.java b/test/com/sleepycat/je/rep/txn/CommitTokenTest.java new file mode 100644 index 0000000..be9ce18 --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/CommitTokenTest.java @@ -0,0 +1,136 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.UUID; + +import org.junit.Test; + +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CommitTokenTest extends TestBase { + + private final File envRoot; + + public CommitTokenTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Test + public void testBasic() + throws IOException, ClassNotFoundException { + + UUID repenvUUID = UUID.randomUUID(); + + CommitToken t1 = new CommitToken(repenvUUID, 1); + CommitToken t2 = new CommitToken(repenvUUID, 2); + CommitToken t3 = new CommitToken(repenvUUID, 3); + + assertTrue((t1.compareTo(t2) < 0) && (t2.compareTo(t1) > 0)); + assertTrue((t2.compareTo(t3) < 0) && (t3.compareTo(t2) > 0)); + assertTrue((t1.compareTo(t3) < 0) && (t3.compareTo(t1) > 0)); + + assertEquals(t1, new CommitToken(repenvUUID, 1)); + assertEquals(0, t1.compareTo(new CommitToken(repenvUUID, 1))); + + try { + t1.compareTo(new CommitToken(UUID.randomUUID(), 1)); + fail("Expected exception"); + } catch (IllegalArgumentException ie) { + // expected + } + + /* test serialization/de-serialization. */ + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(t1); + ByteArrayInputStream bais = + new ByteArrayInputStream(baos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bais); + CommitToken t11 = (CommitToken)ois.readObject(); + + assertEquals(t1, t11); + } + + /** + * Make sure that we only return a commit token when we've done real work. + */ + @Test + public void testCommitTokenFailures() + throws IOException { + + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* It's illegal to get a commit token before it has closed. */ + Transaction txn = master.beginTransaction(null, null); + try { + txn.getCommitToken(); + fail("Should have gotten IllegalStateException"); + } catch (IllegalStateException expected) { + /* expected outcome. */ + } + + /* + * Now abort and try again. Simce this transaction has done no writing + * the commit token should be null. + */ + txn.abort(); + CommitToken token = txn.getCommitToken(); + assertTrue(token == null); + + /* + * A committed txn that has done no writing should also return a null + * commit token. + */ + txn = master.beginTransaction(null, null); + txn.commit(); + token = txn.getCommitToken(); + assertTrue(token == null); + + /* + * A committed txn that has done a write should return a non-null + * token. + */ + txn = master.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = master.openDatabase(txn, "foo", dbConfig); + db.close(); + txn.commit(); + token = txn.getCommitToken(); + assertTrue(token != null); + + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } +} diff --git a/test/com/sleepycat/je/rep/txn/ExceptionTest.java b/test/com/sleepycat/je/rep/txn/ExceptionTest.java new file mode 100644 index 0000000..6443fff --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/ExceptionTest.java @@ -0,0 +1,110 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.atomic.AtomicReference; + +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class ExceptionTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + /** + * Test for SR23970. + * + * A transaction begin should not invalidate the environment if it is + * stalled in a beginTransaction while trying to get a quorum of replicas. + * The transaction will fail later, without invalidating the environment, + * when it tries to acquire locks and discovers that the environment has + * been closed. + */ + @Test + public void test() throws InterruptedException { + + repEnvInfo[0].getRepConfig(). + setConfigParam(ReplicationConfig.INSUFFICIENT_REPLICAS_TIMEOUT, + "60 s"); + createGroup(2); + final ReplicatedEnvironment menv = repEnvInfo[0].getEnv(); + assertEquals(menv.getState(), State.MASTER); + + /* Shutdown the second RN so that the beginTransaction() below stalls */ + repEnvInfo[1].closeEnv(); + + final AtomicReference te = new AtomicReference(); + + final Thread t = new Thread () { + @Override + public void run() { + Transaction txn = null; + try { + txn = menv.beginTransaction(null, + RepTestUtils.SYNC_SYNC_ALL_TC); + } catch (Exception e) { + /* Test failed if there is an exception on this path. */ + te.set(e); + } finally { + if (txn != null) { + try { + txn.abort(); + } catch (Exception e) { + /* Ignore it */ + } + } + } + } + }; + + t.start(); + + /* Let it get to the beginTransaction. */ + Thread.sleep(2000); + + try { + menv.close(); + } catch (Exception e) { + /* + * The abort above may not execute in time leaving an unclosed txn. + * Ignore the resulting exception. + */ + } + + t.join(10000); + + assertTrue(menv.getInvalidatingException() == null); + } +} diff --git a/test/com/sleepycat/je/rep/txn/LockPreemptionTest.java b/test/com/sleepycat/je/rep/txn/LockPreemptionTest.java new file mode 100644 index 0000000..198f5a5 --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/LockPreemptionTest.java @@ -0,0 +1,785 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.LockPreemptedException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class LockPreemptionTest extends TestBase { + + private static final byte[] KEY1 = new byte[] { 1 }; + private static final byte[] KEY2 = new byte[] { 2 }; + private static final byte[] DATA = new byte[1]; + + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private ReplicatedEnvironment masterEnv; + private ReplicatedEnvironment replicaEnv; + private Database masterDb; + private Database replicaDb; + + public LockPreemptionTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (repEnvInfo != null) { + + /* + * close() was not called, test failed. Do cleanup to allow more + * tests to run, but leave log files for debugging this test case. + */ + try { + close(false /*normalShutdown*/); + } catch (Exception ignored) { + /* This secondary exception is just noise. */ + } + } + } + + /** + * Create a 2 node group and insert two records into a database. + * + * The minimum replay txn timeout is configured to cause the replayer to + * timeout immediately when there is a lock conflict, since that's the + * situation we're testing for. + * + * ReplicaAckPolicy.ALL is used to ensure that when a master operation is + * committed, the change is immediately available on the replica for + * testing -- no waiting in the test is needed. + */ + private void open() + throws IOException { + + repEnvInfo = RepTestUtils.setupEnvInfos + (envRoot, 2, + RepTestUtils.createEnvConfig + (new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.ALL)), + new ReplicationConfig().setConfigParam + (ReplicationConfig.REPLAY_TXN_LOCK_TIMEOUT, "1 ms")); + masterEnv = RepTestUtils.joinGroup(repEnvInfo); + replicaEnv = repEnvInfo[1].getEnv(); + assertNotNull(masterEnv); + assertNotNull(replicaEnv); + assertNotSame(masterEnv, replicaEnv); + masterDb = masterEnv.openDatabase + (null, "foo", + new DatabaseConfig().setAllowCreate(true).setTransactional(true)); + replicaDb = replicaEnv.openDatabase + (null, "foo", new DatabaseConfig().setTransactional(true)); + /* Insert records to operate on. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(null, new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY2), + new DatabaseEntry(DATA))); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(null, new DatabaseEntry(KEY2), + new DatabaseEntry(), null)); + } + + private void close() { + close(true /*normalShutdown*/); + } + + private void close(boolean normalShutdown) { + try { + if (normalShutdown) { + masterDb.close(); + replicaDb.close(); + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } else { + for (RepEnvInfo info : repEnvInfo) { + info.abnormalCloseEnv(); + } + } + } finally { + repEnvInfo = null; + masterEnv = null; + replicaEnv = null; + masterDb = null; + replicaDb = null; + } + } + + /** + * Stealing from a txn that reads again WILL cause LockPreemtped. + */ + @Test + public void testPreempted() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + try { + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaTxn.abort(); + } + + close(); + } + + /** + * Cursor variation of testPreempted. + */ + @Test + public void testPreemptedWithCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = replicaDb.openCursor(replicaTxn, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + try { + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * With a ReadCommitted cursor, getCurrent (unlike getNext/getPrev/etc) + * will cause LockPreempted after a lock is stolen, because cursor + * stability is violated. + */ + @Test + public void testPreemptedWithReadCommittedCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + try { + replicaCursor.getCurrent(new DatabaseEntry(), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * Non-transactional version of testPreemptedWithReadCommittedCursor. + */ + @Test + public void testPreemptedWithNonTransactionalCursor() + throws IOException { + + open(); + + /* Read. */ + final Cursor replicaCursor = replicaDb.openCursor(null, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + try { + replicaCursor.getCurrent(new DatabaseEntry(), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + replicaCursor.close(); + } + + close(); + } + + /** + * When a lock is stoken from one ReadCommitted cursor and then a lock is + * takn by another (for the same txn), LockPreempted is thrown. + */ + @Test + public void testPreemptedWithTwoReadCommittedCursors() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor1 = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor1.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + final Cursor replicaCursor2 = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + try { + replicaCursor2.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor1.close(); + replicaCursor2.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * Non-transactional version of testPreemptedWithTwoReadCommittedCursors. + */ + @Test + public void testPreemptedWithTwoNonTransactionalCursors() + throws IOException { + + open(); + + /* Read. */ + final Cursor replicaCursor1 = replicaDb.openCursor(null, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor1.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + final Cursor replicaCursor2 = replicaDb.openCursor(null, null); + try { + replicaCursor2.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + replicaCursor1.close(); + replicaCursor2.close(); + } + + close(); + } + + /** + * Similar to testPreemptedWithTwoReadCommittedCursors but LockPreempted is + * also thrown when one locker is the ReadCommittedLocker and the other is + * the Txn itself. + */ + @Test + public void testPreemptedWithReadCommittedCursorThenDbRead() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + try { + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * Reverse situation from testPreemptedWithReadCommittedCursorThenDbRead. + */ + @Test + public void testPreemptedWithDbReadThenReadCommittedCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + try { + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * Make sure LockPreempted is thrown when (after lock stealing), the cursor + * first attempts to move and fails (gets a NOTFOUND in this case), and + * then calls getCurrent. In other words, make sure the preempted state is + * not mistakenly reset by the NOTFOUND operation. + */ + @Test + public void testPreemptedAfterAttemptToMoveReadCommittedCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + try { + replicaCursor.getCurrent(new DatabaseEntry(), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + assertFalse(replicaTxn.isValid()); + replicaCursor.close(); + replicaTxn.abort(); + } + + close(); + } + + /** + * Non-transactional variant of + * testPreemptedAfterAttemptToMoveReadCommittedCursor. + */ + @Test + public void testPreemptedAfterAttemptToMoveNonTransactionalCursor() + throws IOException { + + open(); + + /* Read. */ + final Cursor replicaCursor = replicaDb.openCursor(null, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + try { + replicaCursor.getCurrent(new DatabaseEntry(), + new DatabaseEntry(), null); + fail(); + } catch (LockPreemptedException expected) { + replicaCursor.close(); + } + + close(); + } + + /** + * If a ReadCommitted cursor is moved after lock stealing, LockPreempted is + * NOT thrown. + */ + @Test + public void testNotPreemptedMoveReadCommittedCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.SUCCESS, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + + replicaCursor.close(); + replicaTxn.commit(); + close(); + } + + /** + * Non-transactional variant of testNotPreemptedMoveReadCommittedCursor. + */ + @Test + public void testNotPreemptedMoveNonTransactionalCursor() + throws IOException { + + open(); + + /* Read. */ + final Cursor replicaCursor = replicaDb.openCursor(null, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.SUCCESS, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + + replicaCursor.close(); + close(); + } + + /** + * Make sure LockPreempted is NOT thrown when (after lock stealing), the + * cursor first attempts to move and fails (gets a NOTFOUND in this case), + * and then moves the cursor. In other words, make sure the preempted + * state is not mistakenly reset by the NOTFOUND operation. + */ + @Test + public void testNotPreemptedAfterAttemptToMoveReadCommittedCursor() + + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = + replicaDb.openCursor(replicaTxn, CursorConfig.READ_COMMITTED); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + + replicaCursor.close(); + replicaTxn.commit(); + close(); + } + + /** + * Non-transactional variant of + * testNotPreemptedAfterAttemptToMoveReadCommittedCursor. + */ + @Test + public void testNotPreemptedAfterAttemptToMoveNonTransactionalCursor() + throws IOException { + + open(); + + /* Read. */ + final Cursor replicaCursor = replicaDb.openCursor(null, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Read. */ + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.NOTFOUND, + replicaCursor.getNext(new DatabaseEntry(), + new DatabaseEntry(), null)); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getPrev(new DatabaseEntry(), + new DatabaseEntry(), null)); + + replicaCursor.close(); + close(); + } + + /** + * Ensure that LockPreempted is not thrown by commit when no additional + * lock is taken after a lock is stolen. + */ + @Test + public void testNotPreemptedCommit() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Commit. */ + replicaTxn.commit(); + + close(); + } + + /** + * Cursor variant of testNotPreemptedCommit. + */ + @Test + public void testNotPreemptedCommitWithCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = replicaDb.openCursor(replicaTxn, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Commit. */ + replicaCursor.close(); + replicaTxn.commit(); + + close(); + } + + /** + * Ensure that LockPreempted is not thrown by commit when no additional + * lock is taken after a lock is stolen. + */ + @Test + public void testNotPreemptedAbort() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + assertSame(OperationStatus.SUCCESS, + replicaDb.get(replicaTxn, new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Abort. */ + replicaTxn.abort(); + + close(); + } + + /** + * Cursor variant of testNotPreemptedAbort. + */ + @Test + public void testNotPreemptedAbortWithCursor() + throws IOException { + + open(); + + /* Read. */ + final Transaction replicaTxn = replicaEnv.beginTransaction(null, null); + final Cursor replicaCursor = replicaDb.openCursor(replicaTxn, null); + assertSame(OperationStatus.SUCCESS, + replicaCursor.getSearchKey(new DatabaseEntry(KEY1), + new DatabaseEntry(), null)); + + /* Steal. */ + assertSame(OperationStatus.SUCCESS, + masterDb.put(null, new DatabaseEntry(KEY1), + new DatabaseEntry(DATA))); + + /* Abort. */ + replicaCursor.close(); + replicaTxn.abort(); + + close(); + } +} diff --git a/test/com/sleepycat/je/rep/txn/PostLogCommitTest.java b/test/com/sleepycat/je/rep/txn/PostLogCommitTest.java new file mode 100644 index 0000000..2b78e9c --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/PostLogCommitTest.java @@ -0,0 +1,220 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.txn.MasterTxn.MasterTxnFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class PostLogCommitTest extends TestBase { + + private final File envRoot; + + public PostLogCommitTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Checks that the state of a txn is COMMITTED when a post-log-commit + * exception (InsufficientAcksException) is thrown, and that abort can be + * called after commit. + * + * Prior to a bug fix [#21598] the txn state was MUST_ABORT, and the abort + * fired an assertion because the commitLsn was non-null. Even worse, if + * assertions were disabled the abort would be logged (as well as the + * commit). + */ + @Test + public void testPostLogCommitException() + throws IOException { + + /* Create 3 node group with 1 "dead" replica that we'll close below. */ + final RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + final ReplicatedEnvironment master = + RepTestUtils.joinGroup(repEnvInfo); + assertSame(master, repEnvInfo[0].getEnv()); + final ReplicatedEnvironment liveReplica = repEnvInfo[1].getEnv(); + final RepEnvInfo deadReplicaInfo = repEnvInfo[2]; + + final Durability syncSyncAll = new Durability + (Durability.SyncPolicy.SYNC, Durability.SyncPolicy.SYNC, + Durability.ReplicaAckPolicy.ALL); + + /* + * Open/create database on master. Use syncSyncAll so DB can be opened + * immediately on replica. + */ + final DatabaseConfig dbConfig = + new DatabaseConfig().setAllowCreate(true).setTransactional(true); + Transaction txn = master.beginTransaction(null, null); + Database masterDb = master.openDatabase(txn, "foo", dbConfig); + txn.commit(syncSyncAll); + txn = null; + + /* Open DB on replica. */ + dbConfig.setAllowCreate(false); + Database replicaDb = liveReplica.openDatabase(null, "foo", dbConfig); + + /* + * Write data and commit with ReplicaAckPolicy.ALL, expect + * InsufficientAcksException. + */ + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + try { + MasterTxn.setFactory(new TxnFactory(deadReplicaInfo)); + txn = master.beginTransaction(null, null); + final OperationStatus status = masterDb.put(txn, key, data); + assertSame(OperationStatus.SUCCESS, status); + verifyData(txn, masterDb); + try { + txn.commit(syncSyncAll); + fail(); + } catch (InsufficientAcksException expected) { + } + } finally { + MasterTxn.setFactory(null); + } + + /* Before the fix, the txn state was MUST_ABORT here. */ + assertSame(Transaction.State.COMMITTED, txn.getState()); + verifyData(null, masterDb); + + /* Before the fix, abort would fire an assertion here. */ + txn.abort(); + assertSame(Transaction.State.COMMITTED, txn.getState()); + verifyData(null, masterDb); + verifyData(null, replicaDb); + + masterDb.close(); + replicaDb.close(); + + /* Shutdown environments without a checkpoint. */ + for (final RepEnvInfo repi : repEnvInfo) { + if (repi.getEnv() != null) { + repi.abnormalCloseEnv(); + } + } + + /* Restart the environments and verify data for all. */ + RepTestUtils.restartGroup(repEnvInfo); + dbConfig.setAllowCreate(false); + for (final RepEnvInfo repi : repEnvInfo) { + final ReplicatedEnvironment rep = repi.getEnv(); + final Database db = rep.openDatabase(null, "foo", dbConfig); + verifyData(null, db); + db.close(); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + private void verifyData(Transaction txn, Database checkDb) { + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data = new DatabaseEntry(); + final OperationStatus status = checkDb.get(txn, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(1, checkDb.count()); + assertEquals(1, key.getData().length); + assertEquals(1, data.getData().length); + assertEquals(0, key.getData()[0]); + assertEquals(0, data.getData()[0]); + } + + /** + * Factory for creating a TestMasterTxn. The TestMasterTxn is only created + * when called in this thread, to guard against it being called by + * unrelated tasks. + */ + private class TxnFactory implements MasterTxnFactory { + + private final RepEnvInfo deadReplicaInfo; + private final Thread thread = Thread.currentThread(); + + TxnFactory(RepEnvInfo deadReplicaInfo) { + this.deadReplicaInfo = deadReplicaInfo; + } + + @Override + public MasterTxn create(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + if (Thread.currentThread() != thread) { + return new MasterTxn(envImpl, config, nameIdPair); + } + return new TestMasterTxn(envImpl, config, nameIdPair, + deadReplicaInfo); + } + + @Override + public MasterTxn createNullTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair) { + + return new MasterTxn(envImpl, config, nameIdPair) { + @Override + protected boolean updateLoggedForTxn() { + return true; + } + }; + } + } + + /** + * MasterTxn that calls deadReplicaInfo.closeEnv after the pre-log-commit + * check. If we were to close the replica env earlier, + * InsufficientReplicasException would be thrown by the pre-log-commit + * check. We want InsufficientAcksException to be thrown instead, during + * the post-log-commit check. + */ + private class TestMasterTxn extends MasterTxn { + + private final RepEnvInfo deadReplicaInfo; + + public TestMasterTxn(EnvironmentImpl envImpl, + TransactionConfig config, + NameIdPair nameIdPair, + RepEnvInfo deadReplicaInfo) { + super(envImpl, config, nameIdPair); + this.deadReplicaInfo = deadReplicaInfo; + } + + @Override + protected void preLogCommitHook() { + super.preLogCommitHook(); + deadReplicaInfo.closeEnv(); + } + } +} diff --git a/test/com/sleepycat/je/rep/txn/RepAutoCommitTest.java b/test/com/sleepycat/je/rep/txn/RepAutoCommitTest.java new file mode 100644 index 0000000..bc9f89a --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/RepAutoCommitTest.java @@ -0,0 +1,253 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.TimeConsistencyPolicy; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.RepUtils; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Check that autocommit txns in a replicated environment still obey + * consistency policies. + */ +public class RepAutoCommitTest extends TestBase { + + /* Replication tests use multiple environments. */ + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public RepAutoCommitTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() != null) { + info.closeEnv(); + } + } + } + + /** + * @throws IOException + */ + @Test + public void testAutoCommit() + throws IOException { + + /* Register custom consistency policy format while quiescent. */ + RepUtils.addConsistencyPolicyFormat + (RepTestUtils.AlwaysFail.NAME, + new RepTestUtils.AlwaysFailFormat()); + + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + "Test"); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + /* Require all nodes to ack. */ + for (RepEnvInfo rei : repEnvInfo) { + rei.getEnvConfig(). + setDurability(RepTestUtils.SYNC_SYNC_ALL_DURABILITY); + } + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* Create a db */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database masterDb = master.openDatabase(null, "Foo", dbConfig); + Database replicaDb = null; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + ReplicatedEnvironment replica = null; + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() != master) { + replica = info.getEnv(); + break; + } + } + + try { + /* Insert a record, should be successful. */ + IntegerBinding.intToEntry(1, key); + IntegerBinding.intToEntry(1, data); + assertEquals(OperationStatus.SUCCESS, + masterDb.put(null, key, data)); + + /* Read on replica w/auto commit. */ + replicaDb = replica.openDatabase(null, "Foo", dbConfig); + assertEquals(OperationStatus.SUCCESS, + replicaDb.get(null, key, data, LockMode.DEFAULT)); + + /* Read on replica w/non-txnl cursor. */ + Cursor cursor = replicaDb.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, LockMode.DEFAULT)); + cursor.close(); + + /* Read on master w/auto commit. */ + assertEquals(OperationStatus.SUCCESS, + masterDb.get(null, key, data, LockMode.DEFAULT)); + + /* Read on master w/non-txnl cursor. */ + cursor = masterDb.openCursor(null, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, LockMode.DEFAULT)); + cursor.close(); + + /* + * Write on replica w/autocommit should fail because writes are + * prohibited. + */ + try { + OperationStatus status = replicaDb.put(null, key, data); + fail("Should have gotten write exception. Status=" + + status); + } catch (ReplicaWriteException expected) { + logger.info("expected " + expected); + } + + /* Crash the replicas */ + replicaDb.close(); + replicaDb = null; + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() != master) { + logger.info("closing" + info.getEnv().getNodeName()); + info.abnormalCloseEnv(); + } + } + + /* Insert a record. It should block w/insufficient acks. */ + IntegerBinding.intToEntry(2, key); + IntegerBinding.intToEntry(2, data); + try { + OperationStatus status = masterDb.put(null, key, data); + fail("Should have gotten insufficient replicas. Status=" + + status); + } catch (InsufficientReplicasException expected) { + logger.info("expected " + expected); + } + + /* + * Read the successfully inserted record back on the master even + * though all replicas are dead It should use an autocommit read + * only txn, and should not block due to the dead replicas, because + * this is read only. + */ + IntegerBinding.intToEntry(1, key); + assertEquals(OperationStatus.SUCCESS, + masterDb.get(null, key, data, LockMode.DEFAULT)); + logger.info("attempt to read on master after replica crash" + + IntegerBinding.entryToInt(data)); + + /* + * Open the replicas w/a consistency policy that won't be + * satisfied. + */ + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() == null) { + info.getRepConfig().setConsistencyPolicy + (new RepTestUtils.AlwaysFail()); + replica = info.openEnv(); + break; + } + } + + /* + * Test openDatabase with fail consistency check because + * env.openDatabase now requires a write lock. + */ + try { + replicaDb = replica.openDatabase(null, "Foo", dbConfig); + fail("Should have gotten consistency failure."); + } catch (ReplicaConsistencyException expected) { + logger.info("expected " + expected); + } + + /* + * Open the database with a TimeConsistencyPolicy so that the + * operation succeeds. + */ + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setConsistencyPolicy + (new TimeConsistencyPolicy(Integer.MAX_VALUE, + TimeUnit.MILLISECONDS, 0, null)); + Transaction txn = replica.beginTransaction(null, txnConfig); + replicaDb = replica.openDatabase(txn, "Foo", dbConfig); + txn.commit(); + + /* Should fail consistency check: Read on replica w/auto commit. */ + try { + OperationStatus status = replicaDb.get(null, key, data, + LockMode.DEFAULT); + fail("Should have gotten consistency failure. Status=" + + status); + } catch (ReplicaConsistencyException expected) { + logger.info("expected " + expected); + } + + /* + * Should fail consistency check : Open cursor on replica + * w/non-txnal cursor. + */ + try { + replicaDb.openCursor(null, null); + fail("Should have gotten consistency failure."); + } catch (ReplicaConsistencyException expected) { + logger.info("expected " + expected); + } + } finally { + masterDb.close(); + + if (replicaDb != null) { + replicaDb.close(); + } + } + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv() != null) { + info.closeEnv(); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/txn/ReplayRecoveryTest.java b/test/com/sleepycat/je/rep/txn/ReplayRecoveryTest.java new file mode 100644 index 0000000..1e60f07 --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/ReplayRecoveryTest.java @@ -0,0 +1,746 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ProgressListener; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.SearchFileReader; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.SyncupProgress; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that uncommitted, unaborted, replicated transactions are recovered and + * resurrected at recovery time. + * - check that this happens both with and without a checkpoint. + * - check that only replicated transactions are resurrected. + * - check that rollbacks are honored and are rolled back at recovery. + */ +public class ReplayRecoveryTest extends TestBase { + + private final static boolean verbose = Boolean.getBoolean("verbose"); + private ReplicatedEnvironment master; + private ReplicatedEnvironment replica; + + /* Replication tests use multiple environments. */ + private final File envRoot; + + public ReplayRecoveryTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + master = null; + replica = null; + super.setUp(); + } + + @Override + @After + public void tearDown() { + cleanup(); + } + + /** + * @throws InterruptedException + * @throws IOException + */ + @Test + public void testRBRecoveryOneTxn() + throws IOException, InterruptedException { + + doRollbackRecovery(new OneTransactionWorkload()); + } + + @Test + public void testRBRecoveryMultiTxn() + throws IOException, InterruptedException { + + doRollbackRecovery(new MultiTransactionWorkload()); + } + + @Test + public void testRBRecoveryPostMatchpointTxn() + throws IOException, InterruptedException { + + doRollbackRecovery(new PostMatchpointTransaction()); + } + + /** + * Run two nodes. + * Crash replica. + * Restart, recovery, and syncup replica, requiring a partial rollback. + * Crash replica again. Recover replica, requiring recovery w/partial + * rollback + * Compare master and replica txns. + */ + private void doRollbackRecovery(Workload workload) + throws IOException, InterruptedException { + + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), + "Test"); + + RepEnvInfo[] repEnvInfo = null; + + /* Create a 2 node group */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + SyncupListenerTester tester = new SyncupListenerTester(repEnvInfo); + master = RepTestUtils.joinGroup(repEnvInfo); + + /* Do some work */ + Set unfinished = workload.doWork(master); + + /* Make sure both nodes are now up to the same VLSN */ + VLSN lastVLSN = RepInternal.getNonNullRepImpl(master). + getVLSNIndex().getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 2, lastVLSN); + + /* + * Crash the replica and then sync up again. The replica will have a + * partial rollback in its log from the syncup. Truncate the log so + * that we remove the replay that has ensued from the syncup. That way, + * we can test that a later recovery executed a redo of the partial + * rollback. + */ + logger.fine("Crash replica"); + RepEnvInfo crashed = crashReplica(repEnvInfo); + logger.fine("Re-open replica"); + replica = crashed.openEnv(); + EnvironmentImpl replicaImpl = DbInternal.getNonNullEnvImpl(replica); + + long rollbackEndLsn = findRollbackEnd(replicaImpl); + String fileName = replicaImpl.getFileManager().getFullFileName + (DbLsn.getFileNumber(rollbackEndLsn), FileManager.JE_SUFFIX); + + /* + * Bounce the replica again. We want to force a recovery that has to + * process the RollbackEnd. The recovery is artificially stopped + * before the replica does a handshake and a syncup, so that we + * can check what active transactions have been created by recovery. + */ + logger.fine("Crash replica again"); + crashed.abnormalCloseEnv(); + truncateLog(rollbackEndLsn, fileName); + logger.fine("Recover with no syncup"); + replica = recoverWithoutSyncup(crashed); + + checkPostRecoveryReplicaTxns(unfinished, true /* checkToMatchpoint */); + tester.checkForTwoSyncups(); + } + + @Test + public void testResurrectionOneTxn() + throws Throwable { + + doResurrection(new OneTransactionWorkload()); + } + + @Test + public void testResurrectionMultiTxn() + throws Throwable { + + doResurrection(new MultiTransactionWorkload()); + } + + @Test + public void testResurrectionPostMatchpointTxn() + throws Throwable { + + doResurrection(new PostMatchpointTransaction()); + } + + /** + * Do work in a two node system, crash the replica and examine the + * resurrected transactions. + */ + /** + * Run two nodes. + * Crash replica. + * Restart and recover replica. + * Compare master and replica txns. + */ + private void doResurrection(Workload workload) + throws Throwable { + + RepEnvInfo[] repEnvInfo = null; + + /* Create a 2 node group */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + try { + master = RepTestUtils.joinGroup(repEnvInfo); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + + /* Do some work, make sure both nodes see all the work. */ + Set unfinished = workload.doWork(master); + + /* Make sure both nodes are now up to the same VLSN */ + VLSN lastVLSN = RepInternal.getNonNullRepImpl(master). + getVLSNIndex().getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 2, lastVLSN); + + /* Crash the replica. */ + RepEnvInfo replicaInfo = crashReplica(repEnvInfo); + + /* + * Bring up the replica again and check before joining that the + * resurrected transactions are correct. + */ + replica = recoverWithoutSyncup(replicaInfo); + checkPostRecoveryReplicaTxns(unfinished, + false /* checkToMatchpoint */); + } + + private void checkPostRecoveryReplicaTxns(Set unfinished, + boolean rollbackInRecovery) { + + if (verbose) { + System.out.println("comparing recovered transactions"); + } + + /* + * Create a new set of the replay txns active on the replica. Make a + * set because we're going to remove items from the set as part of the + * verification. + */ + Map testReplays = + RepInternal.getNonNullRepImpl(replica).getReplay(). + getActiveTxns().getMap(); + + /* + * Expect the master and the recovered replica to have the same number + * of replay and unfinished transactions, excluding those that + * were part of a partial rollback. + */ + int expectedCount = 0; + for (Expected e : unfinished) { + if (rollbackInRecovery && e.absentIfRollback) { + continue; + } + expectedCount++; + } + assertEquals("Expected=" + expectedCount + + " actual=" + testReplays.size(), + expectedCount, testReplays.size()); + + for (Expected info : unfinished) { + if ((rollbackInRecovery) && (info.absentIfRollback)) { + continue; + } + + ReplayTxn replayTxn = testReplays.remove(info.transaction.getId()); + + /* Check that the id is the same. */ + assertEquals(info.transaction.getId(), replayTxn.getId()); + + /* + * Check that the number of write locks is as expected. If this + * test is checking partial rollbacks, compare to the matchpoint + * locks. If not, compare to the locks currently in the master. + */ + Set expectedLocks = null; + Txn unfinishedTxn = DbInternal.getTxn(info.transaction); + if (rollbackInRecovery) { + expectedLocks = info.matchpointWriteLockIds; + } else { + expectedLocks = copyLocks(unfinishedTxn); + } + + Set replayTxnWriteLocks = copyLocks(replayTxn); + assertTrue(replayTxnWriteLocks.containsAll(expectedLocks)); + assertTrue("replay " + replayTxnWriteLocks + + " expected = " + expectedLocks, + expectedLocks.containsAll(replayTxnWriteLocks)); + + assertEquals(0, replayTxn.getReadLockIds().size()); + + if (verbose) { + System.out.println("Compare " + replayTxn + + " to " + expectedLocks); + System.out.println("replayWriteLocks= " + + replayTxnWriteLocks); + } + + unfinishedTxn.abort(); + } + assertEquals(testReplays.size() + " txns left in the test set", + 0, testReplays.size()); + } + + /** + * Crash the replica node in a two node system. + * @return the RepEnvInfo for the replica node + */ + private RepEnvInfo crashReplica(RepEnvInfo[] repEnvInfo) { + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (rep.getState().isMaster()) { + continue; + } + repi.abnormalCloseEnv(); + return repi; + } + return null; + } + + /** + * Recover the replica, but don't let it run syncup. + */ + private ReplicatedEnvironment recoverWithoutSyncup(RepEnvInfo replicaInfo) { + + EnvironmentConfig replicaEnvConfig = replicaInfo.getEnvConfig(); + replicaEnvConfig.setConfigParam + (EnvironmentParams.ENV_CHECK_LEAKS.getName(),"false"); + ReplicatedEnvironment rep = + RepInternal.createDetachedEnv(replicaInfo.getEnvHome(), + replicaInfo.getRepConfig(), + replicaEnvConfig); + + /* + * After a recovery, the vlsnIndex should have been entirely flushed + * to disk. + */ + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(rep).getVLSNIndex(); + boolean isFlushed = vlsnIndex.isFlushedToDisk(); + if (!isFlushed) { + vlsnIndex.dumpDb(true); + fail("VLSNIndex should have been flushed to disk by recovery"); + } + + return rep; + } + + private long findRollbackEnd(EnvironmentImpl envImpl) { + + /* Ensure that everything is out to disk. */ + FileManager fileManager = envImpl.getFileManager(); + + long startLsn = fileManager.getLastUsedLsn(); + long endLsn = fileManager.getNextLsn(); + envImpl.getLogManager().flushSync(); + + SearchFileReader searcher = + new SearchFileReader(envImpl, + 10000, + false, // forward + startLsn, + endLsn, + LogEntryType.LOG_ROLLBACK_END); + + long targetLsn = 0; + if (searcher.readNextEntry()) { + targetLsn = searcher.getLastLsn(); + } else { + fail("There should be some kind of rollback end in the log."); + } + + assertTrue(targetLsn != 0); + long truncateLsn = searcher.getLastEntrySize() + targetLsn; + return truncateLsn; + } + + /* + * Find the last RollbackEnd and truncate the file directly after that. + */ + private void truncateLog(long lsn, String fileName) + throws IOException { + + RandomAccessFile file = new RandomAccessFile(fileName, "rw"); + long offset = DbLsn.getFileOffset(lsn); + try { + file.getChannel().truncate(offset); + } finally { + file.close(); + } + } + + private void cleanup() { + try { + if (replica != null) { + //DbInternal.getNonNullEnvImpl(replica).abnormalClose(); + replica.close(); + } + } catch (DatabaseException ignore) { + /* ignore txn close leaks. */ + } finally { + replica = null; + } + + try { + if (master != null) { + DbInternal.getNonNullEnvImpl(master).abnormalClose(); + } + } catch (DatabaseException ignore) { + /* ignore txn close leaks. */ + } finally { + master = null; + } + } + + static abstract class Workload { + EntityStore store; + Environment env; + + Set unfinished = new HashSet(); + PrimaryIndex testIndex; + + /** + * @return the set of unfinished transactions after doing work. + */ + abstract Set doWork(ReplicatedEnvironment master) + throws DatabaseException; + + void setupStore(ReplicatedEnvironment master) + throws DatabaseException { + + env = master; + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + try { + store = new EntityStore(env, "foo", config); + testIndex = store.getPrimaryIndex(Integer.class, + TestData.class); + } catch (DatabaseException e) { + if (store != null) { + store.close(); + } + } + } + } + + /** + * One unfinished transaction after the checkpoint. + */ + static class OneTransactionWorkload extends Workload { + + @Override + Set doWork(ReplicatedEnvironment master) + throws DatabaseException { + + setupStore(master); + + try { + Transaction commitTxn = env.beginTransaction(null, null); + Transaction unfinishedTxn = env.beginTransaction(null, null); + + testIndex.put(commitTxn, new TestData(1)); + testIndex.put(unfinishedTxn, new TestData(2)); + + /* This is the matchpoint. */ + commitTxn.commit(); + Set matchpointLocks = copyLocks(unfinishedTxn); + + /* An insert after the matchpoint. */ + testIndex.put(unfinishedTxn, new TestData(3)); + + unfinished.add(new Expected(unfinishedTxn, matchpointLocks)); + } finally { + if (store != null) { + store.close(); + } + } + return unfinished; + } + } + + /** + * Multiple unfinished transactions intermingled with the checkpoint and + * aborts. + */ + static class MultiTransactionWorkload extends Workload { + + @Override + Set doWork(ReplicatedEnvironment master) + throws DatabaseException { + + setupStore(master); + + try { + + Transaction unfinishedA = env.beginTransaction(null, null); + Transaction unfinishedB = env.beginTransaction(null, null); + Transaction commitA = env.beginTransaction(null, null); + Transaction commitB = env.beginTransaction(null, null); + Transaction abortA = env.beginTransaction(null, null); + Transaction abortB = env.beginTransaction(null, null); + + testIndex.put(unfinishedA, new TestData(1)); + testIndex.put(commitA, new TestData(2)); + commitA.commit(); + + testIndex.put(unfinishedA, new TestData(3)); + testIndex.put(abortA, new TestData(4)); + abortA.abort(); + + /* checkpoint ! */ + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + env.checkpoint(config); + + testIndex.put(unfinishedB, new TestData(5)); + + + testIndex.put(commitB, new TestData(6)); + commitB.commit(); + + testIndex.put(abortB, new TestData(7)); + + /* Matchpoint */ + abortB.abort(); + Set matchUnALocks = copyLocks(unfinishedA); + Set matchUnBLocks = copyLocks(unfinishedB); + + testIndex.put(unfinishedA, new TestData(8)); + + unfinished.add(new Expected(unfinishedA, matchUnALocks)); + unfinished.add(new Expected(unfinishedB, matchUnBLocks)); + + } finally { + if (store != null) { + store.close(); + } + } + return unfinished; + } + } + + /** + * A transaction that is started after the matchpoint. It should be rolled + * back at syncup, and not recovered. It will be replayed on the replica if + * the master sends it. + */ + static class PostMatchpointTransaction extends Workload { + + @Override + Set doWork(ReplicatedEnvironment master) + throws DatabaseException { + + setupStore(master); + + try { + Transaction commitTxn = env.beginTransaction(null, null); + Transaction preMatch = env.beginTransaction(null, null); + Transaction postMatch = env.beginTransaction(null, null); + + testIndex.put(commitTxn, new TestData(1)); + testIndex.put(preMatch, new TestData(2)); + + /* This is the matchpoint. */ + commitTxn.commit(); + Set matchpointLocks = copyLocks(preMatch); + + testIndex.put(postMatch, new TestData(3)); + + /* + * We expect the preMatch transaction to be visible after + * a non-rollback and a rollback recovery. The postMatch + * txn should only be visible in the recovery w/out a rollback + * period. + */ + unfinished.add(new Expected(preMatch, matchpointLocks)); + unfinished.add(new Expected(postMatch, true)); + } finally { + if (store != null) { + store.close(); + } + } + return unfinished; + } + } + + private static Set copyLocks(Transaction trans) { + return copyLocks(DbInternal.getTxn(trans)); + } + + /** + * Convert each lock ID from LSN to VLSN to allow comparison of locks on + * different HA nodes. + */ + private static Set copyLocks(Txn txn) { + final LogManager logManager = txn.getEnvironment().getLogManager(); + final Set lsns = txn.getWriteLockIds(); + final Set vlsns = new HashSet(lsns.size()); + for (long lsn : lsns) { + final WholeEntry entry; + try { + entry = logManager.getLogEntryAllowInvisible(lsn); + } catch (FileNotFoundException e) { + throw new RuntimeException + ("LSN " + DbLsn.getNoFormatString(lsn) + + " may have been cleaned", e); + } + VLSN vlsn = entry.getHeader().getVLSN(); + assertNotNull(vlsn); + vlsns.add(vlsn.getSequence()); + } + return vlsns; + } + + private class SyncupListenerTester { + private final TestListener[] listeners; + private final RepEnvInfo[] repEnvInfo; + + SyncupListenerTester(RepEnvInfo[] repEnvInfo) { + this.repEnvInfo = repEnvInfo; + listeners = new TestListener[repEnvInfo.length]; + for (int i = 0; i < repEnvInfo.length; i++) { + listeners[i] = new TestListener(); + repEnvInfo[i].getRepConfig(). + setSyncupProgressListener(listeners[i]); + } + } + + public void checkForTwoSyncups() { + for (int i = 0; i < repEnvInfo.length; i++) { + if (repEnvInfo[i].isMaster()) { + assertEquals(0, listeners[i].phasesSeen.size()); + } else { + List seen = listeners[i].phasesSeen; + assertEquals(7, seen.size()); + assertEquals(SyncupProgress.FIND_MATCHPOINT, seen.get(0)); + assertEquals(SyncupProgress.CHECK_FOR_ROLLBACK, seen.get(1)); + assertEquals(SyncupProgress.END, seen.get(2)); + assertEquals(SyncupProgress.FIND_MATCHPOINT, seen.get(3)); + assertEquals(SyncupProgress.CHECK_FOR_ROLLBACK, seen.get(4)); + assertEquals(SyncupProgress.DO_ROLLBACK, seen.get(5)); + assertEquals(SyncupProgress.END, seen.get(6)); + } + } + } + + private class TestListener implements ProgressListener { + List phasesSeen; + + TestListener() { + phasesSeen = new ArrayList(); + } + + public boolean progress(SyncupProgress phase, long n, long total) { + phasesSeen.add(phase); + return true; + } + } + } + + @Entity + static class TestData { + @PrimaryKey + private int id; + + private int stuff; + + @SuppressWarnings("unused") + private TestData() { + } + + TestData(int id) { + this.id = id; + stuff = 10; + } + + TestData(int id, int stuff) { + this.id = id; + this.stuff = stuff; + } + + @Override + public String toString() { + return "id=" + id + " stuff=" + stuff; + } + } + + /** + * Encapsulate which transactions and write locks are expected at the end + * of the test. + */ + private static class Expected { + + /* + * absentIfRollback is true it this txn would be in a rollback period + * if it were run in the doRollbackRecovery method. For example, + * transactions that start after a syncup matchpoint are rolled back, + * and are not recovered. + */ + final boolean absentIfRollback; + final Transaction transaction; + final Set matchpointWriteLockIds; + + Expected(Transaction transaction, + Set matchpointWriteLockIds) { + this.transaction = transaction; + this.matchpointWriteLockIds = matchpointWriteLockIds; + absentIfRollback = false; + } + + Expected(Transaction transaction, + boolean absentIfRollback) { + this.transaction = transaction; + this.matchpointWriteLockIds = new HashSet(); + this.absentIfRollback = absentIfRollback; + } + } +} diff --git a/test/com/sleepycat/je/rep/txn/RollbackTest.java b/test/com/sleepycat/je/rep/txn/RollbackTest.java new file mode 100644 index 0000000..ead425a --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/RollbackTest.java @@ -0,0 +1,546 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.rep.LogOverwriteException; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.txn.RollbackWorkload.DatabaseOpsStraddlesMatchpoint; +import com.sleepycat.je.rep.txn.RollbackWorkload.IncompleteTxnAfterMatchpoint; +import com.sleepycat.je.rep.txn.RollbackWorkload.IncompleteTxnBeforeMatchpoint; +import com.sleepycat.je.rep.txn.RollbackWorkload.IncompleteTxnStraddlesMatchpoint; +import com.sleepycat.je.rep.txn.RollbackWorkload.RemoveDatabaseAfterRollback; +import com.sleepycat.je.rep.txn.RollbackWorkload.RemoveSomeDatabasesAfterRollback; +import com.sleepycat.je.rep.txn.RollbackWorkload.SteadyWork; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test that a replica can rollback an replay active txn for syncup. + * Test cases: + * - Replay txn has only logged log entries that follow the syncup matchpoint + * and must be entirely rolled back. + * + * - Replay txn has only logged log entries that precede the syncup matchpoint + * and doesn not have to be rolled back at all. + * + * - Replay txn has logged log entries that both precede and follow the + * syncup matchpoint, and the txn must be partially rolled back. + * + * TRY: master fails + * replica fails + * + * The txn should have + * - inserts + * - delete + * - reuse of a BIN slot + * - intermediate versions within the same txn (the same record is modified + * multiple times within the txn. + */ +public class RollbackTest extends TestBase { + + private final Logger logger; + private final boolean verbose = Boolean.getBoolean("verbose"); + + /* Replication tests use multiple environments. */ + private final File envRoot; + + public RollbackTest() { + envRoot = SharedTestUtils.getTestDir(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + } + + @Override + public void setUp() + throws Exception { + super.setUp(); + RepTestUtils.removeRepEnvironments(envRoot); + } + + @Override + public void tearDown() { + /* Restore static var in case other tests will run in this process. */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); + } + + @Test + public void testDbOpsRollback() + throws Throwable { + + try { + masterDiesAndRejoins(new DatabaseOpsStraddlesMatchpoint()); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTxnEndBeforeMatchpoint() + throws Throwable { + + masterDiesAndRejoins(new IncompleteTxnBeforeMatchpoint()); + } + + @Test + public void testTxnEndAfterMatchpoint() + throws Throwable { + + masterDiesAndRejoins(new IncompleteTxnAfterMatchpoint()); + } + + @Test + public void testTxnStraddleMatchpoint() + throws Throwable { + + masterDiesAndRejoins(new IncompleteTxnStraddlesMatchpoint()); + } + + @Test + public void testRemoveDatabaseAfterRollback() + throws Throwable { + + masterDiesAndRejoins(new RemoveDatabaseAfterRollback()); + } + + @Test + public void testRemoveSomeDatabasesAfterRollback() + throws Throwable { + + masterDiesAndRejoins(new RemoveSomeDatabasesAfterRollback()); + } + + // TODO: why all the rollbacks when the master never changes? + @Test + public void testReplicasFlip() + throws Throwable { + + replicasDieAndRejoin(new SteadyWork(), 10); + } + + /* + * Test the API: RepImpl.setBackupProhibited would disable the DbBackup in + * DbBackup.startBackup, may be caused by Replay.rollback(). + */ + @Test + public void testRollingBackDbBackupAPI() + throws Throwable { + + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + RepImpl repImpl = RepInternal.getNonNullRepImpl(master); + + DbBackup backupHelper = new DbBackup(master); + repImpl.setBackupProhibited(true); + + try { + backupHelper.startBackup(); + fail("Should throw out a LogOverwriteException here."); + } catch (LogOverwriteException e) { + /* Expect a LogOverwriteException here. */ + } + + repImpl.setBackupProhibited(false); + try { + backupHelper.startBackup(); + backupHelper.endBackup(); + } catch (Exception e) { + fail("Shouldn't get an exception here."); + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* + * Test the API: RepImpl.invalidateDbBackups would disable the DbBackup + * at endBackup, may be caused by Replay.rollback(). + */ + @Test + public void testRollBackInvalidateDbBackup() + throws Exception { + + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + final RepImpl repImpl = RepInternal.getNonNullRepImpl(master); + + DbBackup backupHelper = new DbBackup(master); + backupHelper.startBackup(); + + backupHelper.setTestHook(new TestHook() { + public void doHook() { + repImpl.invalidateBackups(8L); + } + + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + + public void doIOHook() { + throw new UnsupportedOperationException(); + } + + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + }); + + try { + backupHelper.endBackup(); + fail("Should throw out a LogOverwriteException here."); + } catch (LogOverwriteException e) { + /* Expect to get a LogOverwriteException here. */ + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * Create 3 nodes and replicate operations. + * Kill off the master, and make the other two resume. This will require + * a syncup and a rollback of any operations after the matchpoint. + */ + private void masterDiesAndRejoins(RollbackWorkload workload) + throws Throwable { + + RepEnvInfo[] repEnvInfo = null; + + try { + /* Create a 3 node group */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + logger.severe("master=" + master); + + /* Run a workload against the master. */ + workload.beforeMasterCrash(master); + + /* + * Disable group DB updates from this point onward, to ensure they + * don't interfere with rollbacks and so we can easily check for an + * uncommitted txn at the end of the log (this is done by + * ensureDistinctLastAndSyncVLSN). + */ + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(true); + + /* + * Sync up the group and check that all nodes have the same + * contents. This first workload must end with in-progress, + * uncommitted transactions. + */ + VLSN lastVLSN = VLSN.NULL_VLSN; + if (workload.noLockConflict()) { + lastVLSN = checkIfWholeGroupInSync(master, repEnvInfo, + workload); + } + + /* + * Crash the master, find a new master. + */ + RepEnvInfo oldMaster = + repEnvInfo[RepInternal.getNodeId(master) - 1]; + master = crashMasterAndElectNewMaster(master, repEnvInfo); + RepEnvInfo newMaster = + repEnvInfo[RepInternal.getNodeId(master) - 1]; + logger.severe("newmaster=" + master); + RepEnvInfo alwaysReplica = null; + for (RepEnvInfo info : repEnvInfo) { + if ((info != oldMaster) && (info != newMaster)) { + alwaysReplica = info; + break; + } + } + + /* + * Check that the remaining two nodes only contain committed + * updates. + * TODO: check that the number of group members is 2. + */ + assertTrue(workload.containsSavedData(master)); + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + + /* + * Do some work against the new master, while the old master is + * asleep. Note that the first workload may have contained + * in-flight transactions, so this may result in the rollback of + * some transactions in the first workload. + */ + workload.afterMasterCrashBeforeResumption(master); + + /* + * The intent of this test is that the work after crash will end on + * an incomplete txn. Check for that. + */ + lastVLSN = ensureDistinctLastAndSyncVLSN(master, repEnvInfo); + + /* Now bring up the old master. */ + logger.info("Bring up old master"); + oldMaster.openEnv(); + + logger.info("Old master joined"); + RepTestUtils.syncGroupToVLSN(repEnvInfo, repEnvInfo.length, + lastVLSN); + logger.info("Old master synced"); + + /* + * Check that all nodes only contain committed updates. + */ + workload.releaseDbLocks(); + assertTrue(workload.containsSavedData(master)); + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + + /* + * Now crash the node that has never been a master. Do some work + * without it, then recover that node, then do a verification + * check. This exercises the recovery of a log that has syncups in + * it. + */ + alwaysReplica.abnormalCloseEnv(); + workload.afterReplicaCrash(master); + + lastVLSN = RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 2, lastVLSN); + alwaysReplica.openEnv(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 3, lastVLSN); + assertTrue(workload.containsSavedData(master)); + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + RepTestUtils.checkUtilizationProfile(repEnvInfo); + + workload.close(); + + /* + * We're done with the test. Bringing down these replicators + * forcibly, without closing transactions and whatnot. + */ + for (RepEnvInfo repi : repEnvInfo) { + repi.abnormalCloseEnv(); + } + + /* + * Open and verify the environments one last time, to ensure that + * rollbacks in the recovery interval don't cause problems. + */ + master = RepTestUtils.restartGroup(repEnvInfo); + lastVLSN = RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 3, lastVLSN); + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + + /* Final close. */ + for (RepEnvInfo repi : repEnvInfo) { + repi.closeEnv(); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } + } + + /** + * Since the master never dies in this test, no rollbacks should occur, + * but no data should be lost either. + * + * TODO: Should the workload param be of a different class (not a + * RollbackWorkload), since its masterSteadyWork method is only called + * here? + */ + private void replicasDieAndRejoin(RollbackWorkload workload, + int numIterations) + throws Throwable { + + RepEnvInfo[] repEnvInfo = null; + + try { + /* Create a 3 node group. Assign identities. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + logger.severe("master=" + master); + + RepEnvInfo replicaA = null; + RepEnvInfo replicaB = null; + + for (RepEnvInfo info : repEnvInfo) { + if (info.getEnv().getState().isMaster()) { + continue; + } + + if (replicaA == null) { + replicaA = info; + } else { + replicaB = info; + } + } + + /* + * For the sake of easy test writing, make sure numIterations is an + * even number. + */ + assertTrue((numIterations % 2) == 0); + replicaA.abnormalCloseEnv(); + for (int i = 0; i < numIterations; i++) { + workload.masterSteadyWork(master); + waitForReplicaToSync(master, repEnvInfo); + if ((i % 2) == 0) { + flushLogAndCrash(replicaB); + replicaA.openEnv(); + } else { + flushLogAndCrash(replicaA); + replicaB.openEnv(); + } + waitForReplicaToSync(master, repEnvInfo); + } + replicaA.openEnv(); + + VLSN lastVLSN = + RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, + repEnvInfo.length, + lastVLSN); + + assertTrue(workload.containsAllData(master)); + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + + workload.close(); + for (RepEnvInfo repi : repEnvInfo) { + /* + * We're done with the test. Bringing down these replicators + * forcibly, without closing transactions and whatnot. + */ + repi.abnormalCloseEnv(); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } + } + + private void flushLogAndCrash(RepEnvInfo replica) { + DbInternal.getNonNullEnvImpl(replica.getEnv()). + getLogManager().flushSync(); + replica.abnormalCloseEnv(); + } + + /** + * Syncup the group and check for these requirements: + * - the master has all the data we expect + * - the replicas have all the data that is on the master. + + * - the last VLSN is not a sync VLSN. We want to ensure that the + * matchpoint is not the last VLSN, so the test will need to do rollback + * @throws InterruptedException + * @return lastVLSN on the master + */ + private VLSN checkIfWholeGroupInSync(ReplicatedEnvironment master, + RepEnvInfo[] repEnvInfo, + RollbackWorkload workload) + throws InterruptedException { + + /* + * Make sure we're testing partial rollbacks, and that the replication + * stream is poised at a place where the last sync VLSN != lastVLSN. + */ + VLSN lastVLSN = ensureDistinctLastAndSyncVLSN(master, repEnvInfo); + + RepTestUtils.syncGroupToVLSN(repEnvInfo, repEnvInfo.length, lastVLSN); + + /* + * All nodes in the group should have the same data, and it should + * consist of committed and uncommitted updates. + */ + assertTrue(workload.containsAllData(master)); + + RepTestUtils.checkNodeEquality(lastVLSN, verbose, repEnvInfo); + + lastVLSN = ensureDistinctLastAndSyncVLSN(master, repEnvInfo); + + return lastVLSN; + } + + /* Just check if the replica is in sync. */ + private void waitForReplicaToSync(ReplicatedEnvironment master, + RepEnvInfo[] repEnvInfo) + throws InterruptedException { + + VLSN lastVLSN = RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, 2, lastVLSN); + } + + /** + * Crash the current master, and wait until the group elects a new one. + */ + private ReplicatedEnvironment + crashMasterAndElectNewMaster(ReplicatedEnvironment master, + RepEnvInfo[] repEnvInfo) { + + int masterIndex = RepInternal.getNodeId(master) - 1; + + logger.info("Crashing " + master.getNodeName()); + repEnvInfo[masterIndex].abnormalCloseEnv(); + + logger.info("Rejoining"); + ReplicatedEnvironment newMaster = + RepTestUtils.openRepEnvsJoin(repEnvInfo); + + logger.info("New master = " + newMaster.getNodeName()); + return newMaster; + } + + /** + * In this test, we often want to check that the last item in the + * replicated stream is not a matchpoint candidate (that VLSNRange.lastVLSN + * != VLSNRange.lastSync) There's nothing wrong intrinsically with that + * being so, it's just that this test is trying to ensure that we test + * partial rollbacks. + * @return lastVLSN + * @throws InterruptedException + */ + private VLSN ensureDistinctLastAndSyncVLSN(ReplicatedEnvironment master, + RepEnvInfo[] repEnvInfo) + throws InterruptedException { + + VLSNIndex vlsnIndex = + RepInternal.getNonNullRepImpl(master).getVLSNIndex(); + VLSNRange range = vlsnIndex.getRange(); + VLSN lastVLSN = range.getLast(); + VLSN syncVLSN = range.getLastSync(); + if (lastVLSN.equals(syncVLSN)) { + master.flushLog(false); // for debugging using the data log + fail("lastVLSN = " + lastVLSN + " syncVLSN = " + syncVLSN); + } + return lastVLSN; + } +} diff --git a/test/com/sleepycat/je/rep/txn/RollbackToMatchpointTest.java b/test/com/sleepycat/je/rep/txn/RollbackToMatchpointTest.java new file mode 100644 index 0000000..d25e118 --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/RollbackToMatchpointTest.java @@ -0,0 +1,1044 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.txn; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; + +/* + * Rollback to a matchpoint, used for HA. + * + * The test follows a common pattern dictated by runWorkload. Different tests + * use workloads that vary whether duplicates and custom comparators are used. + * + * Rollback functionality is only available on a ReplayTxn. To make testing + * simpler, a wrapper Transaction class is used, for ease of setting up test + * scenarios. The TestWrapperTransaction lets us artificially uses a ReplayTxn + * for api operations, so we don't need to run two nodes to create the database + * needed. + */ +public class RollbackToMatchpointTest extends RepTestBase { + private long TEST_TXN_ID = -1000; + private final File envHome; + private Environment env; + private Database dbA; + private Database dbB; + private final boolean verbose = Boolean.getBoolean("verbose"); + private Logger logger; + + public RollbackToMatchpointTest() { + envHome = SharedTestUtils.getTestDir(); + groupSize = 1; + } + + @Override + @Before + public void setUp() + throws Exception { + + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + super.setUp(); + } + + /** + * Test transaction locking and releasing. + */ + @Test + public void testBasicRollback() + throws Exception { + + Workload w = new BasicWorkload(); + runWorkload(w); + } + + @Test + public void testCustomBtreeComparator() + throws Exception { + + Workload w = new CustomBtreeComparatorWorkload(); + runWorkload(w); + } + + @Test + public void testDups() + throws Exception { + + Workload w = new DupWorkload(); + runWorkload(w); + } + + // TODO: add testing for + // database operations when that's implemented. + // public void testDatabaseOperations + // public void testDupCustomComparator + + private void runWorkload(Workload workload) + throws Exception { + if (verbose) { + System.out.println("Workload = " + workload); + } + + workload.openEnvironment(); + + rollbackStepByStep(workload, false /* retransmit */); + rollbackStepByStep(workload, true /* retransmit */); + rollbackEntireTransaction(workload); + doAbort(workload); + + workload.closeEnvironment(); + } + + /** + * Create a transaction with the workload and roll it back to a given + * point. Systematically repeat in order to roll back to all possible + * points. + * This test does some inserts and deletes to create an initial data + * context. Then it enters a loop which executes a to-be-rolled back + * transaction encompassing inserts, updates. The loop rolls back the + * transaction mix systematically entry by entry in order to exercise a + * full mix of rollbacks. + * + * @param retransmit if true, the workload re-executes the rolled back + * entries, to simulate how a master would re-send the replication stream. + */ + private void rollbackStepByStep(Workload workload, boolean retransmit) + throws DatabaseException { + + int matchpointIndex = 0; + int numTxnOperations = 0; + + do { + workload.openStore(); + Set committedData = workload.setupInitialData(); + if (verbose && false) { + System.out.println("------- committed ----------"); + System.out.println(committedData); + } + + TestWrapperTransaction wrapperTransaction = makeTransaction(); + + /* + * The workHistory is used to find points to roll back to. The + * loop iterates over all points in the work history in order to + * fully test all combinatios. + */ + List workHistory = workload.doWork(wrapperTransaction); + Set beforeRollbackContents = dumpStore(workload); + numTxnOperations = workHistory.size(); + + /* Choose a point to roll back to. */ + long matchpointLsn = workHistory.get(matchpointIndex).loggedLsn; + + if (verbose) { + System.out.println("\ndump store before rollback - " + + beforeRollbackContents); + + System.out.println("Rollback transaction " + + wrapperTransaction.getId() + " to step " + + matchpointIndex + " " + + DbLsn.getNoFormatString(matchpointLsn)); + System.out.println("history=" + workHistory); + } + + /* Rollback to the specified matchpoint. */ + ReplayTxn replayTxn = wrapperTransaction.getReplayTxn(); + replayTxn.rollback(matchpointLsn); + + Collection expected = expectedData(committedData, + workHistory, + matchpointIndex); + checkContents(expected, workload, "step by step"); + assertEquals(matchpointLsn, replayTxn.getLastLsn()); + assertFalse(replayTxn.isClosed()); + + ReplicationContext endContext = + new ReplicationContext(new VLSN(100)); + if (retransmit) { + workload.simulateRetransmit(workHistory, wrapperTransaction, + matchpointIndex); + + wrapperTransaction.commit(SyncPolicy.NO_SYNC, endContext, 1); + checkContents(beforeRollbackContents, workload, "retransmit"); + } else { + + /* + * Call abort after the rollback, to make sure that a final + * abort, such as might happen after a master failover, will + * work. + */ + wrapperTransaction.abort(endContext, 1 /* masterId */); + checkContents(committedData, workload, "abort"); + } + + workload.closeStore(); + workload.truncateDatabases(); + + matchpointIndex++; + } while (matchpointIndex < numTxnOperations); + } + + private void checkContents(Collection expected, + Workload workload, + String label) + throws DatabaseException { + + /* + * Do a dirty read to see what's there, and check against the + * expected results. + */ + Set currentContents = dumpStore(workload); + if (verbose){ + System.out.println(" ==> Check for " + label); + System.out.println("current = " + currentContents); + System.out.println("expected = " + expected); + } + + assertEquals("expected=" + expected + " current=" + + currentContents, + expected.size(), currentContents.size()); + assertTrue("expected=" + expected + " current=" + + currentContents, + expected.containsAll(currentContents)); + } + + /* + * For completeness, run the transaction and abort it without doing any + * rollback. + */ + private void doAbort(Workload workload) + throws DatabaseException { + + workload.openStore(); + Set committedData = workload.setupInitialData(); + TestWrapperTransaction wrapperTransaction = makeTransaction(); + workload.doWork(wrapperTransaction); + wrapperTransaction.abort(new ReplicationContext(new VLSN(100)), + 1 /* masterId */); + checkContents(committedData, workload, "doAbort"); + workload.closeStore(); + workload.truncateDatabases(); + } + + /** + * Rollback to a matchpoint earlier than anything in the transaction. + * Should be the equivalent of abort(). + */ + private void rollbackEntireTransaction(Workload workload) + throws DatabaseException { + + workload.openStore(); + Set committedData = workload.setupInitialData(); + + TestWrapperTransaction wrapperTransaction = makeTransaction(); + workload.doWork(wrapperTransaction); + ReplayTxn txn = (ReplayTxn) DbInternal.getTxn(wrapperTransaction); + txn.rollback(txn.getFirstActiveLsn() - 1); + + /* Check the transaction chain. */ + assertEquals(DbLsn.NULL_LSN, txn.getLastLsn()); + + /* Check the number of locks. */ + StatGroup stats = txn.collectStats(); + assertEquals(0, stats.getInt(LOCK_WRITE_LOCKS)); + + checkContents(committedData, workload, "entire"); + + /* + * When the transaction is entirely rolled back, it's closed + * and deregistered. + */ + assertTrue(txn.isClosed()); + + workload.closeStore(); + } + + /** + * Dump the store using dirty reads. + * @throws DatabaseException + */ + private Set dumpStore(Workload w) + throws DatabaseException { + + Set resultSet = new HashSet(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Cursor cursor = dbA.openCursor(null, CursorConfig.READ_UNCOMMITTED); + try { + while (cursor.getNext(key, data, LockMode.READ_UNCOMMITTED) == + OperationStatus.SUCCESS) { + TestData result = w.unmarshall(dbA, key, data); + resultSet.add(result); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + cursor = dbB.openCursor(null, CursorConfig.READ_UNCOMMITTED); + try { + while (cursor.getNext(key, data, LockMode.READ_UNCOMMITTED) == + OperationStatus.SUCCESS) { + TestData result = w.unmarshall(dbB, key, data); + resultSet.add(result); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + return resultSet; + } + + /** + * Calculate what the data set should be after the rollback. + */ + private Collection expectedData(Set committedData, + List workHistory, + int matchpointIndex) { + + /* + * Use a map of id->TestData to calculate the expected set so we can + * mimic updates. + */ + Map expected = new HashMap(); + for (TestData t : committedData) { + expected.put(t.getHashMapKey(), t); + } + + for (TestData t : workHistory.subList(0, matchpointIndex + 1)) { + if (t.isDeleted) { + TestData removed = expected.remove(t.getHashMapKey()); + assert removed != null; + } else { + expected.put(t.getHashMapKey(), t); + } + } + return expected.values(); + } + + TestWrapperTransaction makeTransaction() + throws DatabaseException { + + ReplayTxn replayTxn = + new ReplayTxn(DbInternal.getNonNullEnvImpl(env), + TransactionConfig.DEFAULT, + (TEST_TXN_ID--), + logger); + + return new TestWrapperTransaction(env, replayTxn); + } + + /** + * The test needs a Transaction to use for regular operations like put() + * and get(). But the rollback functionality is only available for a + * ReplayTxn. The TestWrapperTransaction wraps up a ReplayTxn within a + * Transaction solely for test purposes. This way, we can set up rollback + * situations in a single node, without replication, which is easier to + * do. But we can also call ReplayTxn.rollback(), which is usually + * only executed on a Replica. + */ + class TestWrapperTransaction extends Transaction { + + private final ReplayTxn replayTxn; + + TestWrapperTransaction(Environment env, ReplayTxn replayTxn) { + super(env, replayTxn); + this.replayTxn = replayTxn; + } + + public long commit(SyncPolicy syncPolicy, + ReplicationContext replayContext, + int masterNodeId) + throws DatabaseException { + + return replayTxn.commit(syncPolicy, replayContext, masterNodeId, + 1 /* DTVLSN */); + } + + public long abort(ReplicationContext replayContext, + int masterNodeId) + throws DatabaseException { + + return replayTxn.abort(replayContext, masterNodeId, + 1 /* DTVLSN */); + } + + ReplayTxn getReplayTxn() { + return replayTxn; + } + } + + /** + * Embodies the data set for each test case. Different data sets vary + * whether the database uses duplicates or not, custom comparators, etc. + */ + abstract class Workload { + + /** + * Open the environment and database. Different workloads try + * different database configurations. + */ + abstract void openStore() + throws DatabaseException; + + void closeStore() { + try { + if (dbA != null) { + dbA.close(); + dbA = null; + } + + if (dbB != null) { + dbB.close(); + dbB = null; + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * Truncate the databases in order to reset the context for repeated + * execution of the workload. + */ + void truncateDatabases() { + env.truncateDatabase(null, "testA", false /* returnCount */); + env.truncateDatabase(null, "testB", false /* returnCount */); + } + + void openEnvironment() + throws DatabaseException { + + env = RepTestUtils.joinGroup(repEnvInfo); + } + + void closeEnvironment() { + try { + if (env != null) { + env.close(); + env = null; + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + abstract Set setupInitialData() + throws DatabaseException; + + abstract List doWork(Transaction trans) + throws DatabaseException; + + /** + * Read the log to find the lsns of this transactions's entries. We + * need those to find the candidate matchpoints. + */ + void findLsns(Txn txn, List workHistory) + throws DatabaseException { + + long entryLsn = txn.getLastLsn(); + int lastIndex = workHistory.size() - 1; + LogManager logManager = + DbInternal.getNonNullEnvImpl(env).getLogManager(); + + /* Troll through LN_TX entries and save their lsns. */ + while (entryLsn != DbLsn.NULL_LSN) { + LNLogEntry undoEntry = (LNLogEntry) + logManager.getLogEntryHandleFileNotFound(entryLsn); + + /* + * Skip any DupCountLNs, they don't correspond to + * the application operations tracked in workHistory. + */ + if (!undoEntry.getLogType().equals + (LogEntryType.LOG_DUPCOUNTLN_TRANSACTIONAL)) { + workHistory.get(lastIndex).loggedLsn = entryLsn; + lastIndex--; + } + + entryLsn = undoEntry.getUserTxn().getLastLsn(); + } + } + + void putData(Database db, Transaction t, TestData testData) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(testData.id, key); + IntegerBinding.intToEntry(testData.data, data); + OperationStatus status = db.put(t, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + + void deleteData(Database db, Transaction t, TestData testData) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(testData.id, key); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(testData.data, data); + Cursor c = db.openCursor(t, null); + OperationStatus status = c.getSearchBoth(key, data, LockMode.RMW); + assertEquals(OperationStatus.SUCCESS, status); + status = c.delete(); + assertEquals(OperationStatus.SUCCESS, status); + c.close(); + } + + /* + * Well, if we could use the DPL, we wouldn't have to have + * this method! Unmarshall the data into the expected return object. + */ + abstract TestData unmarshall(Database db, + DatabaseEntry key, + DatabaseEntry data); + + void simulateRetransmit(List workHistory, + Transaction transaction, + int matchpointIndex) + throws DatabaseException { + + int startPoint = matchpointIndex + 1; + for (int i = startPoint; i < workHistory.size(); i++) { + TestData t = workHistory.get(i); + if (t.isDeleted) { + deleteData(t.db, transaction, t); + } else { + putData(t.db, transaction, t); + } + } + } + } + + /** + * Exercise a non-dup, default comparator database. + */ + class BasicWorkload extends Workload { + @Override + void openStore() + throws DatabaseException { + + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbA = env.openDatabase(null, "testA", dbConfig); + dbB = env.openDatabase(null, "testB", dbConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + throw e; + } + } + + @Override + Set setupInitialData() + throws DatabaseException { + + /* + * Make sure that there's a delete and an overwrite in the + * initial data. These exercise BIN slot re-use scenarios. + */ + putData(dbA, null, new TestData(dbA, 10, 1)); + putData(dbA, null, new TestData(dbA, 20, 1)); + putData(dbA, null, new TestData(dbA, 20, 2)); + putData(dbA, null, new TestData(dbA, 30, -1)); + + putData(dbB, null, new TestData(dbB, 20, 1)); + + /* + * DeleteData uses a cursor in order to delete precisely this + * record, and that needs a non-null transaction. + */ + Transaction t = env.beginTransaction(null, null); + deleteData(dbA, t, new TestData(dbA, 10, 1)); + t.commit(); + + return dumpStore(this); + } + + /** + * If we used the DPL, we wouldn't need to have this unmarshall + * method, because the store would automatically know what type of + * object to return. + */ + @Override + TestData unmarshall(Database db, + DatabaseEntry key, + DatabaseEntry data) { + + return new TestData(db, + IntegerBinding.entryToInt(key), + IntegerBinding.entryToInt(data)); + } + + /** + * Do a mix of inserts, updates, and deletes. For inserts instigated + * by this transaction, make sure these cases are covered: + * - first record for this key + * - key exists, was previously deleted by another txn. + * - key is overwritten. + * @throws DatabaseException + */ + @Override + List doWork(Transaction trans) + throws DatabaseException { + + List workHistory = new ArrayList(); + + /* Insert new record. */ + TestData t = new TestData(dbA, 30, 1, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Insert new record, reusing a slot from previous txn. */ + t = new TestData(dbA, 10, 2, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Update record created in this txn. */ + t = new TestData(dbA, 30, 2, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* delete a record created outside this txn. */ + t = new TestData(dbA, 20, 2, true); + deleteData(dbA, trans, t); + workHistory.add(t); + + /* delete a record created inside this txn. */ + t = new TestData(dbA, 30, 2, true); + deleteData(dbA, trans, t); + workHistory.add(t); + + /* Insert new record, reusing a slot from this txn. */ + t = new TestData(dbA, 30, 10, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Update record created in this txn. */ + t = new TestData(dbA, 30, 11, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Insert a new record into another database */ + t = new TestData(dbB, 30, 11, false); + putData(dbB, trans, t); + workHistory.add(t); + + findLsns(DbInternal.getTxn(trans), workHistory); + return workHistory; + } + } + + /** + * Exercise a non-dup, default comparator database. + */ + class DupWorkload extends BasicWorkload { + @Override + void openStore() + throws DatabaseException { + + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + dbA = env.openDatabase(null, "testA", dbConfig); + dbB = env.openDatabase(null, "testB", dbConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + throw e; + } + } + + @Override + Set setupInitialData() + throws DatabaseException { + + putData(dbA, null, new DupData(dbA, 10, 1)); + putData(dbA, null, new DupData(dbA, 20, 1)); + putData(dbA, null, new DupData(dbA, 20, 2)); + putData(dbA, null, new DupData(dbA, 30, -1)); + + /* + * DeleteData uses a cursor in order to delete precisely this + * record, and that needs a non-null transaction. + */ + Transaction t = env.beginTransaction(null, null); + deleteData(dbA, t, new DupData(dbA, 10, 1)); + t.commit(); + return dumpStore(this); + } + + @Override + List doWork(Transaction trans) + throws DatabaseException { + + List workHistory = new ArrayList(); + + /* Insert new record. */ + TestData t = new DupData(dbA, 30, 1, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Insert new record, reusing a slot from previous txn. */ + t = new DupData(dbA, 10, 2, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Update record created in this txn. */ + t = new DupData(dbA, 30, 2, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* delete a record created outside this txn. */ + t = new DupData(dbA, 20, 2, true); + deleteData(dbA, trans, t); + workHistory.add(t); + + /* delete a record created inside this txn. */ + t = new DupData(dbA, 30, 2, true); + deleteData(dbA, trans, t); + workHistory.add(t); + + /* Insert new record, reusing a slot from this txn. */ + t = new DupData(dbA, 30, 10, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Update record created in this txn. */ + t = new DupData(dbA, 30, 11, false); + putData(dbA, trans, t); + workHistory.add(t); + + findLsns(DbInternal.getTxn(trans), workHistory); + return workHistory; + } + + @Override + TestData unmarshall(Database db, + DatabaseEntry key, + DatabaseEntry data) { + + return new DupData(db, + IntegerBinding.entryToInt(key), + IntegerBinding.entryToInt(data)); + } + } + + /** + */ + class CustomBtreeComparatorWorkload extends Workload { + @Override + void openStore() + throws DatabaseException { + + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setBtreeComparator(new RoundTo2Comparator()); + dbA = env.openDatabase(null, "testA", dbConfig); + dbB = env.openDatabase(null, "testB", dbConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + throw e; + } + } + + @Override + Set setupInitialData() + throws DatabaseException { + + /* 10 and 11 equate to the same record. */ + putData(dbA, null, new RoundTo2Data(dbA, 10, 1)); + putData(dbA, null, new RoundTo2Data(dbA, 11, 1)); + + /* 20 and 21 equate to the same record. */ + putData(dbA, null, new RoundTo2Data(dbA, 20, 1)); + putData(dbA, null, new RoundTo2Data(dbA, 21, 2)); + + putData(dbA, null, new RoundTo2Data(dbA, 30, -1)); + /* + * DeleteData uses a cursor in order to delete precisely this + * record, and that needs a non-null transaction. + */ + Transaction t = env.beginTransaction(null, null); + deleteData(dbA, t, new RoundTo2Data(dbA, 10, 1)); + t.commit(); + + /* Expect to see (20,2) and (30,-1) */ + return dumpStore(this); + } + + /** + * Do a mix of inserts, updates, and deletes. For inserts instigated + * by this transaction, make sure these cases are covered: + * - first record for this key + * - key exists, was previously deleted by another txn. + * - key is overwritten. + * @throws DatabaseException + */ + @Override + List doWork(Transaction trans) + throws DatabaseException { + + List workHistory = new ArrayList(); + + /* Update an existing record, (30,-1). */ + TestData t = new RoundTo2Data(dbA, 31, 99, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Insert new record, reusing a slot from previous txn. */ + t = new RoundTo2Data(dbA, 10, 77, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Update 10,77 */ + t = new RoundTo2Data(dbA, 11, 7, false); + putData(dbA, trans, t); + workHistory.add(t); + + /* Delete a record. */ + t = new RoundTo2Data(dbA, 10, 7, true); + deleteData(dbA, trans, t); + workHistory.add(t); + + /* New record, 10,200 */ + t = new RoundTo2Data(dbA, 10, 200, false); + putData(dbA, trans, t); + workHistory.add(t); + + findLsns(DbInternal.getTxn(trans), workHistory); + return workHistory; + } + + /** + * If we used the DPL, we wouldn't need to have this unmarshall + * method, because the store would automatically know what type of + * object to return. + */ + @Override + TestData unmarshall(Database db, + DatabaseEntry key, + DatabaseEntry data) { + + return new RoundTo2Data(db, + IntegerBinding.entryToInt(key), + IntegerBinding.entryToInt(data)); + } + } + + /** + * Ideally we'd use the DPL and store TestData directly, but this + * test needs to exercise duplicates and custom comparators, and therefore + * uses the base API. + */ + static class TestData { + + int id; // key + int data; // data + Database db; + + /* The loggedLsn is the location for this piece of test data. */ + transient + long loggedLsn; + + /* IsPut is true if put() was called, false if delete() was called. */ + transient + boolean isDeleted; + + TestData() { + isDeleted = false; + } + + TestData(Database db, int id, int data, boolean isDeleted) { + this.id = id; + this.data = data; + this.isDeleted = isDeleted; + this.db = db; + } + + TestData(Database db, int id, int data) { + this.id = id; + this.data = data; + this.isDeleted = false; + this.db = db; + } + + @Override + public String toString() { + String val = "\n" + id + "/" + data; + if (isDeleted) { + val += " (deleted)"; + } + + if (loggedLsn != 0) { + val += " lsn=" + DbLsn.getNoFormatString(loggedLsn); + } + + val += " dbName=" + db.getDatabaseName(); + return val; + } + + @Override + public boolean equals(Object other) { + if (other instanceof TestData) { + TestData t = (TestData) other; + return ((t.id == id) && + (t.data == data) && + (t.isDeleted == isDeleted) && + (t.db.getDatabaseName().equals(db.getDatabaseName()))); + } + + return false; + } + + @Override + public int hashCode() { + return (id + data + db.getDatabaseName().hashCode()); + } + + int getHashMapKey() { + return id + db.getDatabaseName().hashCode(); + } + } + + static class DupData extends TestData { + + DupData(Database db, int id, int data, boolean isDeleted) { + super(db, id, data, isDeleted); + } + + DupData(Database db, int id, int data) { + super(db, id, data); + } + + @Override + int getHashMapKey() { + return id + data; + } + } + + @SuppressWarnings("serial") + static class RoundTo2Comparator + implements Comparator, Serializable { + public int compare(byte[] a, byte[] b) { + int valA = new TupleInput(a).readInt(); + int valB = new TupleInput(b).readInt(); + + int roundA = RoundTo2Data.roundTo2(valA); + int roundB = RoundTo2Data.roundTo2(valB); + + return roundA - roundB; + } + } + + /** + * Test data which uses a partial btree comparator. + */ + static class RoundTo2Data extends TestData { + + public RoundTo2Data(Database db, int id, int data, boolean isDeleted) { + super(db, id, data, isDeleted); + } + + public RoundTo2Data(Database db, int id, int data) { + super(db, id, data); + } + + static int roundTo2(int i) { + return (i & 0xfffffffe); + } + + @Override + public boolean equals(Object other) { + if (other instanceof TestData) { + TestData t = (TestData) other; + + return ((roundTo2(t.id) == roundTo2(id)) && + (t.data == data) && + (t.isDeleted == isDeleted)); + } + + return false; + } + + @Override + public int hashCode() { + return (roundTo2(id) + data); + } + + @Override + public String toString() { + return "Rd:" + super.toString(); + } + + @Override + int getHashMapKey() { + return roundTo2(id); + } + } +} diff --git a/test/com/sleepycat/je/rep/txn/RollbackWorkload.java b/test/com/sleepycat/je/rep/txn/RollbackWorkload.java new file mode 100644 index 0000000..8fdd065 --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/RollbackWorkload.java @@ -0,0 +1,787 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.txn; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.txn.Utils.RollbackData; +import com.sleepycat.je.rep.txn.Utils.SavedData; +import com.sleepycat.je.rep.txn.Utils.TestData; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; + +/** + * A RollbackWorkload is a pattern of data operations designed to test the + * permutations of ReplayTxn rollback. + * + * Each workload defines a set of operations that will happen before and after + * the syncup matchpoint. The workload knows what should be rolled back and + * and what should be preserved, and can check the results. Concrete workload + * classes add themselves to the static set of WorkloadCombinations, and the + * RollbackTest generates a test case for each workload element. + */ +abstract class RollbackWorkload { + + private static final String TEST_DB = "testdb"; + private static final String DB_NAME_PREFIX = "persist#" + TEST_DB + "#"; + private static final String TEST_DB2 = "testdb2"; + private static final String DB_NAME_PREFIX2 = "persist#" + TEST_DB2 + "#"; + + private static final boolean verbose = Boolean.getBoolean("verbose"); + + final Set saved; + final Set rolledBack; + + /* + * Most tests use only a single store. A second store may be used when + * multiple databases are needed, but be careful not to use the same key in + * both stores, since the code that dumps the stores and compares records + * uses only the record key. + */ + private EntityStore store; + private EntityStore store2; + PrimaryIndex testIndex; + PrimaryIndex testIndex2; + + final Random rand = new Random(10); + + RollbackWorkload() { + saved = new HashSet<>(); + rolledBack = new HashSet<>(); + } + + boolean isMasterDiesWorkload() { + return true; + } + + void masterSteadyWork(ReplicatedEnvironment master) { + } + + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + } + + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + } + + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + } + + boolean noLockConflict() { + return true; + } + + void releaseDbLocks() { + } + + boolean openStore(ReplicatedEnvironment replicator, boolean readOnly) { + store = openStoreByName(replicator, TEST_DB, readOnly); + if (store == null) { + testIndex = null; + return false; + } + testIndex = store.getPrimaryIndex(Long.class, TestData.class); + return true; + } + + boolean openStore2(ReplicatedEnvironment replicator, boolean readOnly) { + store2 = openStoreByName(replicator, TEST_DB2, readOnly); + if (store2 == null) { + testIndex2 = null; + return false; + } + testIndex2 = store2.getPrimaryIndex(Long.class, TestData.class); + return true; + } + + EntityStore openStoreByName(ReplicatedEnvironment replicator, + String storeName, + boolean readOnly) { + if (readOnly) { + String catalogDbName = + "persist#" + storeName + "#com.sleepycat.persist.formats"; + if (!replicator.getDatabaseNames().contains(catalogDbName)) { + return null; + } + } + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + config.setReadOnly(readOnly); + return new EntityStore(replicator, storeName, config); + } + + /** + * Dump all the data out of the test db on this replicator. Use + * READ_UNCOMMITTED so we can see the data for in-flight transactions. + */ + Set dumpData(ReplicatedEnvironment replicator) + throws DatabaseException { + + Set dumpedData = new HashSet<>(); + + if (openStore(replicator, true /* readOnly */)) { + dumpedData.addAll(dumpIndexData(testIndex, replicator)); + } + + if (openStore2(replicator, true /* readOnly */)) { + dumpedData.addAll(dumpIndexData(testIndex2, replicator)); + } + + close(); + + if (verbose) { + System.out.println("Replicator " + replicator.getNodeName()); + displayDump(dumpedData); + } + + return dumpedData; + } + + Set dumpIndexData(PrimaryIndex index, + ReplicatedEnvironment replicator) { + + Transaction txn = replicator.beginTransaction(null, null); + + EntityCursor cursor = + index.entities(txn, CursorConfig.READ_UNCOMMITTED); + + Set dumpedData = new HashSet<>(); + + for (TestData t : cursor) { + dumpedData.add(t); + } + + cursor.close(); + txn.commit(); + + return dumpedData; + } + + private void displayDump(Set data) { + for (TestData t : data) { + System.out.println(t); + } + } + + void close() throws DatabaseException { + if (store != null) { + store.close(); + store = null; + testIndex = null; + } + if (store2 != null) { + store2.close(); + store2 = null; + testIndex2 = null; + } + } + + void removeStore(ReplicatedEnvironment master, + String dbNamePrefix) { + for (String dbName : master.getDatabaseNames()) { + if (dbName.startsWith(dbNamePrefix)) { + master.removeDatabase(null, dbName); + } + } + } + + boolean containsAllData(ReplicatedEnvironment replicator) + throws DatabaseException { + + Set dataInStore = dumpData(replicator); + if (!checkSubsetAndRemove(dataInStore, saved, "saved")) { + return false; + } + if (!checkSubsetAndRemove(dataInStore, rolledBack, "rollback")) { + return false; + } + if (dataInStore.size() == 0) { + return true; + } + if (verbose) { + System.out.println("DataInStore has an unexpected " + + "remainder: " + dataInStore); + } + return false; + } + + boolean containsSavedData(ReplicatedEnvironment replicator) + throws DatabaseException { + + Set dataInStore = dumpData(replicator); + if (!checkSubsetAndRemove(dataInStore, saved, "saved")) { + return false; + } + if (dataInStore.size() == 0) { + return true; + } + if (verbose) { + System.out.println("DataInStore has an unexpected " + + "remainder: " + dataInStore); + } + return false; + } + + private boolean checkSubsetAndRemove(Set dataInStore, + Set subset, + String checkType) { + if (dataInStore.containsAll(subset)) { + /* + * Doesn't work, why? + * boolean removed = dataInStore.removeAll(subset); + */ + for (TestData t: subset) { + boolean removed = dataInStore.remove(t); + assert removed; + } + return true; + } + + if (verbose) { + System.out.println("DataInStore didn't contain " + + " subset " + checkType + + ". DataInStore=" + dataInStore + + " subset = " + subset); + } + return false; + } + + void insertRandom(PrimaryIndex index, + Transaction txn, + Set addToSet) { + assertTrue(addToSet == saved || addToSet == rolledBack); + int payload = rand.nextInt(); + TestData data = (addToSet == saved) ? + new SavedData(payload) : new RollbackData(payload); + /* Must call put() to assign primary key before adding to set. */ + index.put(txn, data); + addToSet.add(data); + } + + /** + * This workload rolls back an unfinished transaction which is entirely + * after the matchpoint. It tests a complete undo. + */ + static class IncompleteTxnAfterMatchpoint extends RollbackWorkload { + + IncompleteTxnAfterMatchpoint() { + super(); + } + + @Override + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction matchpointTxn = master.beginTransaction(null, null); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex, matchpointTxn, saved); + + /* This commit will serve as the syncup matchpoint */ + matchpointTxn.commit(); + + /* This data is in an uncommitted txn, it will be rolled back. */ + Transaction rollbackTxn = master.beginTransaction(null, null); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex, rollbackTxn, rolledBack); + + close(); + } + + /** + * The second workload should have a fewer number of updates from the + * incomplete, rolled back transaction from workloadBeforeNodeLeaves, + * so that we can check that the vlsn sequences have been rolled back + * too. This work is happening while the crashed node is still down. + */ + @Override + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileAsleepTxn = master.beginTransaction(null, null); + insertRandom(testIndex, whileAsleepTxn, saved); + whileAsleepTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileReplicaDeadTxn = + master.beginTransaction(null, null); + insertRandom(testIndex, whileReplicaDeadTxn, saved); + whileReplicaDeadTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + } + + /** + * This workload creates an unfinished transaction in which all operations + * exist before the matchpoint. It should be preserved, and then undone + * by an abort issued by the new master. + */ + static class IncompleteTxnBeforeMatchpoint extends RollbackWorkload { + + IncompleteTxnBeforeMatchpoint() { + super(); + } + + @Override + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction matchpointTxn = master.beginTransaction(null, null); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex, matchpointTxn, saved); + + /* This data is in an uncommitted txn, it will be rolled back. */ + Transaction rollbackTxnA = master.beginTransaction(null, null); + insertRandom(testIndex, rollbackTxnA, rolledBack); + insertRandom(testIndex, rollbackTxnA, rolledBack); + + /* This commit will serve as the syncup matchpoint */ + matchpointTxn.commit(); + + /* This data is in an uncommitted txn, it will be rolled back. */ + Transaction rollbackTxnB = master.beginTransaction(null, null); + insertRandom(testIndex, rollbackTxnB, rolledBack); + close(); + } + + /** + * The second workload will re-insert some of the data that + * was rolled back. + */ + @Override + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileAsleepTxn = master.beginTransaction(null, null); + insertRandom(testIndex, whileAsleepTxn, saved); + whileAsleepTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileReplicaDeadTxn = + master.beginTransaction(null, null); + insertRandom(testIndex, whileReplicaDeadTxn, saved); + whileReplicaDeadTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + } + + /** + * This workload creates an unfinished transaction in which operations + * exist before and after the matchpoint. Only the operations after the + * matchpoint should be rolled back. Ultimately, the rollback transaction + * will be aborted, because the master is down. + */ + static class IncompleteTxnStraddlesMatchpoint extends RollbackWorkload { + + IncompleteTxnStraddlesMatchpoint() { + super(); + } + + @Override + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction matchpointTxn = master.beginTransaction(null, null); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex, matchpointTxn, saved); + + /* This data is in an uncommitted txn, it will be rolled back. */ + Transaction rollbackTxn = master.beginTransaction(null, null); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex, rollbackTxn, rolledBack); + + /* This commit will serve as the syncup matchpoint */ + matchpointTxn.commit(); + + /* This data is in an uncommitted txn, it will be rolled back. */ + insertRandom(testIndex, rollbackTxn, rolledBack); + close(); + } + + /** + * The second workload will re-insert some of the data that + * was rolled back. + */ + @Override + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileAsleepTxn = master.beginTransaction(null, null); + insertRandom(testIndex, whileAsleepTxn, saved); + whileAsleepTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + + Transaction whileReplicaDeadTxn = + master.beginTransaction(null, null); + insertRandom(testIndex, whileReplicaDeadTxn, saved); + whileReplicaDeadTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + close(); + } + } + + /** + * Exercise the rollback of database operations. + */ + static class DatabaseOpsStraddlesMatchpoint extends RollbackWorkload { + + private DatabaseConfig dbConfig; + + private List expectedDbNames; + private List allDbNames; + private Transaction incompleteTxn; + + DatabaseOpsStraddlesMatchpoint() { + super(); + dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + } + + /* Executed by node that is the first master. */ + @Override + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + + Transaction txn1 = master.beginTransaction(null, null); + Transaction txn2 = master.beginTransaction(null, null); + Transaction txn3 = master.beginTransaction(null, null); + + Database dbA = master.openDatabase(txn1, "AAA", dbConfig); + dbA.close(); + Database dbB = master.openDatabase(txn2, "BBB", dbConfig); + dbB.close(); + + /* + * This will be the syncpoint. + * txn 1 is commmited. + * txn 2 will be partially rolled back. + * txn 3 will be fully rolled back. + */ + txn1.commit(); + + /* Txn 2 will have to be partially rolled back. */ + master.removeDatabase(txn2, "BBB"); + + /* Txn 3 will be fully rolled back. */ + master.removeDatabase(txn3, "AAA"); + + expectedDbNames = new ArrayList<>(); + expectedDbNames.add("AAA"); + + allDbNames = new ArrayList<>(); + allDbNames.add("AAA"); + } + + /* Executed by node that was a replica, and then became master */ + @Override + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + + Transaction whileAsleepTxn = master.beginTransaction(null, null); + Database dbC = master.openDatabase(whileAsleepTxn, "CCC", + dbConfig); + dbC.close(); + whileAsleepTxn.commit(); + + incompleteTxn = master.beginTransaction(null, null); + Database dbD = master.openDatabase(incompleteTxn, "DDD", dbConfig); + dbD.close(); + + expectedDbNames = new ArrayList<>(); + expectedDbNames.add("AAA"); + expectedDbNames.add("CCC"); + expectedDbNames.add("DDD"); + } + + @Override + void releaseDbLocks() { + incompleteTxn.commit(); + } + + /* Executed while node that has never been a master is asleep. */ + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + Transaction whileReplicaDeadTxn = + master.beginTransaction(null, null); + master.renameDatabase(whileReplicaDeadTxn, + "CCC", "renamedCCC"); + whileReplicaDeadTxn.commit(); + expectedDbNames = new ArrayList<>(); + expectedDbNames.add("AAA"); + expectedDbNames.add("renamedCCC"); + expectedDbNames.add("DDD"); + } + + boolean noLockConflict() { + return false; + } + + @Override + boolean containsSavedData(ReplicatedEnvironment master) { + List names = master.getDatabaseNames(); + if (!(names.containsAll(expectedDbNames) && + expectedDbNames.containsAll(names))) { + System.out.println("master names = " + names + + " expected= " + expectedDbNames); + return false; + } + return true; + } + + @Override + boolean containsAllData(ReplicatedEnvironment master) + throws DatabaseException { + + List names = master.getDatabaseNames(); + return names.containsAll(allDbNames) && + allDbNames.containsAll(names); + } + } + + /** + * An incomplete transaction containing LN writes is rolled back, and then + * the database containing those LNs is removed. Recovery of this entire + * sequence requires rollback to process the LNs belonging to the removed + * database. When this test was written, rollback threw an NPE when such + * LNs were encountered (due to the removed database), and a bug fix was + * required [#22052]. + */ + static class RemoveDatabaseAfterRollback + extends IncompleteTxnAfterMatchpoint { + + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + super.afterReplicaCrash(master); + removeStore(master, DB_NAME_PREFIX); + rolledBack.addAll(saved); + saved.clear(); + } + } + + /** + * Similar to RemoveDatabaseAfterRollback except that the txn contains LNs + * in multiple databases (two in this test) and only some databases (one in + * this test), not all, are removed after rollback. + * + * When undoing an LN in recovery, if the LN is in a removed database, + * the undo code will do nothing. This is the case handed by the earlier + * test, RemoveDatabaseAfterRollback. + * + * But when the LN is not in a removed database, the undo must proceed. + * The tricky thing is that a TxnChain must be created even though some of + * the entries in the actual txn chain (in the data log) will be for LNs in + * removed databases. + * + * This test does not subclass RemoveDatabaseAfterRollback or + * IncompleteTxnAfterMatchpoint because those tests only use one database. + * + * [#22071] + */ + static class RemoveSomeDatabasesAfterRollback extends RollbackWorkload { + + @Override + void beforeMasterCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + openStore2(master, false /* readOnly */); + + Transaction matchpointTxn = master.beginTransaction(null, null); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex2, matchpointTxn, saved); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex2, matchpointTxn, saved); + + /* This commit will serve as the syncup matchpoint */ + matchpointTxn.commit(); + + /* This data is in an uncommitted txn, it will be rolled back. */ + Transaction rollbackTxn = master.beginTransaction(null, null); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex2, rollbackTxn, rolledBack); + insertRandom(testIndex, rollbackTxn, rolledBack); + insertRandom(testIndex2, rollbackTxn, rolledBack); + + close(); + } + + /** + * @see IncompleteTxnAfterMatchpoint#afterMasterCrashBeforeResumption + */ + @Override + void afterMasterCrashBeforeResumption(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + openStore2(master, false /* readOnly */); + + Transaction whileAsleepTxn = master.beginTransaction(null, null); + insertRandom(testIndex, whileAsleepTxn, saved); + insertRandom(testIndex2, whileAsleepTxn, saved); + whileAsleepTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + insertRandom(testIndex2, secondTxn, saved); + close(); + } + + @Override + void afterReplicaCrash(ReplicatedEnvironment master) + throws DatabaseException { + + openStore(master, false /* readOnly */); + openStore2(master, false /* readOnly */); + + Transaction whileReplicaDeadTxn = + master.beginTransaction(null, null); + insertRandom(testIndex, whileReplicaDeadTxn, saved); + insertRandom(testIndex2, whileReplicaDeadTxn, saved); + whileReplicaDeadTxn.commit(); + + Transaction secondTxn = master.beginTransaction(null, null); + insertRandom(testIndex, secondTxn, saved); + insertRandom(testIndex2, secondTxn, saved); + + Set index2Data = dumpIndexData(testIndex2, master); + close(); + removeStore(master, DB_NAME_PREFIX2); + rolledBack.addAll(index2Data); + saved.removeAll(index2Data); + } + } + + /** + * This workload simulates a master that is just doing a steady stream of + * work. + */ + static class SteadyWork extends RollbackWorkload { + + private Transaction straddleTxn = null; + + @Override + boolean isMasterDiesWorkload() { + return false; + } + + @Override + void masterSteadyWork(ReplicatedEnvironment master) + throws DatabaseException { + + if (straddleTxn != null) { + straddleTxn.commit(); + } + + openStore(master, false /* readOnly */); + + Transaction matchpointTxn = master.beginTransaction(null, null); + insertRandom(testIndex, matchpointTxn, saved); + insertRandom(testIndex, matchpointTxn, saved); + + /* This transaction straddles the matchpoint. */ + straddleTxn = master.beginTransaction(null, null); + insert(); + insert(); + + /* This commit will serve as the syncup matchpoint */ + matchpointTxn.commit(); + + for (int i = 0; i < 10; i++) { + insert(); + } + + close(); + } + + private void insert() { + TestData d = new SavedData(12); + /* Must call put() to assign primary key before adding to set. */ + testIndex.put(straddleTxn, d); + saved.add(d); + } + } +} diff --git a/test/com/sleepycat/je/rep/txn/Utils.java b/test/com/sleepycat/je/rep/txn/Utils.java new file mode 100644 index 0000000..fe982cc --- /dev/null +++ b/test/com/sleepycat/je/rep/txn/Utils.java @@ -0,0 +1,97 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.txn; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; + +/** + * Test data for unit tests in this package. + * + * TestData has an id key, populated by a sequence, and a random value data + * field. Testdata subclasses know whether they are meant to survive the syncup + * or not, and put themselves in the appropriate test confirmation set. These + * classes are static because the DPL does not support persistent inner + * classes. + */ +class Utils { + + @Entity + abstract static class TestData { + @PrimaryKey(sequence="ID") + private long id; + private int payload; + + TestData(int payload) { + this.payload = payload; + } + + TestData() {} // for deserialization + + @Override + public String toString() { + return "id=" + id + " payload=" + payload + + " rollback=" + getRollback(); + } + + @Override + public boolean equals(Object other) { + if (other instanceof TestData) { + TestData t = (TestData) other; + return ((t.id == id) && (t.payload==payload)); + } + + return false; + } + + @Override + public int hashCode() { + return (int)(id + payload); + } + + abstract boolean getRollback(); + } + + /* SavedData was committed, and should persist past rollbacks. */ + @Persistent + static class SavedData extends TestData { + + SavedData(int payload) { + super(payload); + } + + @SuppressWarnings("unused") + private SavedData() {super();} // for deserialization + + boolean getRollback() { + return false; + } + } + + /* RollbackData was uncommitted, and should disappear after rollbacks. */ + @Persistent + static class RollbackData extends TestData { + + RollbackData(int payload) { + super(payload); + } + + @SuppressWarnings("unused") + private RollbackData() {super();} // for deserialization + + boolean getRollback() { + return true; + } + } +} diff --git a/test/com/sleepycat/je/rep/util/DbGroupAdminTest.java b/test/com/sleepycat/je/rep/util/DbGroupAdminTest.java new file mode 100644 index 0000000..bb77f59 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/DbGroupAdminTest.java @@ -0,0 +1,738 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.matchers.JUnitMatchers.containsString; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.util.Set; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicaStateException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.MasterTransferTest; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/* + * A unit test which tests the DbGroupAdmin utility and also the utilities + * provided by ReplicationGroupAdmin. + */ +public class DbGroupAdminTest extends TestBase { + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private final Logger logger; + private boolean useRepNetConfig; + + public DbGroupAdminTest() { + envRoot = SharedTestUtils.getTestDir(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + ReplicationNetworkConfig repNetConfig = RepTestUtils.readRepNetConfig(); + useRepNetConfig = !repNetConfig.getChannelType().isEmpty(); + } + + @Override + @After + public void tearDown() { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /* + * Test the removeMember and deleteMember behavior of DbGroupAdmin, since + * DbGroupAdmin invokes ReplicationGroupAdmin, so it tests + * ReplicationGroupAdmin too. + * + * TODO: When the simple majority is configurable, need to test that a + * group becomes electable again when some nodes are removed. + */ + @Test + public void testRemoveMember() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + /* Construct a DbGroupAdmin instance. */ + DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + + /* Removing the master will result in MasterStateException. */ + try { + dbAdmin.removeMember(master.getNodeName()); + fail("Shouldn't execute here, expect an exception."); + } catch (MasterStateException e) { + /* Expected exception. */ + } + try { + dbAdmin.deleteMember(master.getNodeName()); + fail("Shouldn't execute here, expect an exception."); + } catch (MasterStateException e) { + /* Expected exception. */ + } + + /* + * Removing a non-existent node will result in + * MemberNotFoundException. + */ + try { + dbAdmin.removeMember("Unknown Node"); + fail("Removing a non-existent node should fail."); + } catch (MemberNotFoundException e) { + /* Expected exception. */ + } + try { + dbAdmin.deleteMember("Unknown Node"); + fail("Removing a non-existent node should fail."); + } catch (MemberNotFoundException e) { + /* Expected exception. */ + } + + /* Remove a monitor */ + RepNodeImpl monitorNode = openMonitor(master); + dbAdmin.deleteMember(monitorNode.getName()); + monitorNode = openMonitor(master); + dbAdmin.removeMember(monitorNode.getName()); + + /* Attempt to remove a secondary */ + final RepEnvInfo secondaryInfo = openSecondaryNode(); + try { + dbAdmin.removeMember(secondaryInfo.getEnv().getNodeName()); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Remove secondary: " + e); + } + try { + dbAdmin.deleteMember(secondaryInfo.getEnv().getNodeName()); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Remove secondary: " + e); + } + + final DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + + /* Check node state for secondary */ + final ReplicationGroupAdmin groupAdmin = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + channelFactory); + + final NodeState secondaryNodeState = + groupAdmin.getNodeState( + groupAdmin.getGroup().getMember( + secondaryInfo.getEnv().getNodeName()), + secondaryInfo.getRepConfig().getNodePort()); + assertEquals(secondaryNodeState.getNodeState(), State.REPLICA); + + /* + * Try removing the active Node 3 from the group using the deleteMember + * API + */ + try { + dbAdmin.deleteMember(repEnvInfo[2].getEnv().getNodeName()); + fail("Expected EnvironmentFailureException"); + } catch (EnvironmentFailureException e) { + /* Should fail with active node */ + } + + /* Shut down Node 3 and try again */ + final String node3Name = repEnvInfo[2].getEnv().getNodeName(); + repEnvInfo[2].closeEnv(); + dbAdmin.deleteMember(node3Name); + RepGroupImpl groupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(groupImpl.getAllElectableMembers().size(), 2); + + /* Add Node 3 back, and then remove it using the removeMember API. */ + repEnvInfo[2].openEnv(); + try { + dbAdmin.removeMember(repEnvInfo[2].getEnv().getNodeName()); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + groupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(groupImpl.getAllElectableMembers().size(), 2); + + /* Close and delete Node 2 from the group using main method. */ + final String node2Name = repEnvInfo[1].getEnv().getNodeName(); + repEnvInfo[1].closeEnv(); + try { + String[] args; + if (useRepNetConfig) { + /* + * We don't actually use the repNetConfig here, but we + * do the equivalent through the -netProps argument. + */ + File propertyFile = + new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-helperHosts", master.getRepConfig().getNodeHostPort(), + "-netProps", propertyFile.getPath(), + "-deleteMember", node2Name }; + } else { + args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-helperHosts", master.getRepConfig().getNodeHostPort(), + "-deleteMember", node2Name }; + } + DbGroupAdmin.main(args); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + groupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(groupImpl.getAllElectableMembers().size(), 1); + + /* Add Node 2 back and remove it using main method. */ + repEnvInfo[1].openEnv(); + try { + String[] args; + if (useRepNetConfig) { + /* + * We don't actually use the repNetConfig here, but we + * do the equivalent through the -netProps argument. + */ + File propertyFile = + new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-helperHosts", master.getRepConfig().getNodeHostPort(), + "-netProps", propertyFile.getPath(), + "-removeMember", repEnvInfo[1].getEnv().getNodeName() }; + } else { + args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-helperHosts", master.getRepConfig().getNodeHostPort(), + "-removeMember", repEnvInfo[1].getEnv().getNodeName() }; + } + DbGroupAdmin.main(args); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + groupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(groupImpl.getAllElectableMembers().size(), 1); + } + + /** + * Create a secondary node, adding it to the end of the repEnvInfo array, + * and returning it. + */ + private RepEnvInfo openSecondaryNode() + throws IOException { + + repEnvInfo = RepTestUtils.setupExtendEnvInfo(repEnvInfo, 1); + final RepEnvInfo info = repEnvInfo[repEnvInfo.length - 1]; + info.getRepConfig().setNodeType(NodeType.SECONDARY); + info.openEnv(); + return info; + } + + /** + * Create a monitor node, using the node ID and port after the last one in + * repEnvInfo, and return it. + */ + private RepNodeImpl openMonitor(final ReplicatedEnvironment master) { + final DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + + final ReplicationGroupAdmin repGroupAdmin = + new ReplicationGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + channelFactory); + final int id = repEnvInfo.length + 1; + final RepNodeImpl monitorNode = + new RepNodeImpl(new NameIdPair("Monitor" + id, id), + NodeType.MONITOR, + "localhost", + 5000 + id, + null); + repGroupAdmin.ensureMonitor(monitorNode); + return monitorNode; + } + + /* + * Test that mastership transfer behavior of DbGroupAdmin. + * @see com.sleepycat.je.rep.impl.node.MasterTransferTest + */ + @Test + public void testMastershipTransfer() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + /* Construct a DbGroupAdmin instance. */ + DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + + /* Transfer master to a nonexistent replica. */ + try { + dbAdmin.transferMaster("node 5", "5 s"); + fail("Shouldn't execute here, expect an exception"); + } catch (MemberNotFoundException e) { + /* Expected exception. */ + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + assertTrue(master.getState().isMaster()); + + /* Transfer master to a monitor */ + final RepNodeImpl monitorNode = openMonitor(master); + try { + dbAdmin.transferMaster(monitorNode.getName(), "1 s"); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* Expected exception */ + } catch (Exception e) { + /* Unexpected exception */ + throw e; + } + + /* Transfer master to a secondary node */ + final RepEnvInfo secondaryInfo = openSecondaryNode(); + try { + dbAdmin.transferMaster(secondaryInfo.getEnv().getNodeName(), + "1 s"); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + logger.info("Transfer master to secondary: " + e); + } + + /* Transfer the mastership to node 1. */ + PrintStream original = System.out; + try { + /* Avoid polluting the test output. */ + System.setOut(new PrintStream(new ByteArrayOutputStream())); + dbAdmin.transferMaster(repEnvInfo[1].getEnv().getNodeName(), + "5 s"); + MasterTransferTest.awaitSettle(repEnvInfo[0], repEnvInfo[1]); + } catch (Exception e) { + fail("Unexpected exception: " + LoggerUtils.getStackTrace(e)); + } finally { + System.setOut(original); + } + + /* Check the node state. */ + assertTrue(repEnvInfo[0].isReplica()); + assertTrue(repEnvInfo[1].isMaster()); + + /* Do some database operations to make sure everything is OK. */ + final String dbName = "testDB"; + final String dataString = "herococo"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = + repEnvInfo[1].getEnv().openDatabase(null, dbName , dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 50; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry(dataString, data); + assertTrue(OperationStatus.SUCCESS == db.put(null, key, data)); + } + db.close(); + + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + + /* Check the old master is OK. */ + db = repEnvInfo[0].getEnv().openDatabase(null, dbName, dbConfig); + for (int i = 1; i <= 50; i++) { + IntegerBinding.intToEntry(i, key); + assertTrue + (OperationStatus.SUCCESS == db.get(null, key, data, null)); + assertTrue(StringBinding.entryToString(data).equals(dataString)); + } + db.close(); + } + + /* + * Test the update address utility of DbGroupAdmin. */ + @Test + public void testUpdateAddress() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + /* Construct a DbGroupAdmin instance. */ + DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + + try { + dbAdmin.updateAddress("node 5", "localhost", 5004); + fail("Expected MemberNotFoundException"); + } catch (MemberNotFoundException e) { + logger.info("Update address member not found: " + e); + } + assertTrue(master.getState().isMaster()); + + try { + dbAdmin.updateAddress(master.getNodeName(), "localhost", 5004); + fail("Expected MasterStateException"); + } catch (MasterStateException e) { + logger.info("Update address for master: " + e); + } + assertTrue(master.getState().isMaster()); + + final String nodeName = repEnvInfo[1].getEnv().getNodeName(); + final File envHome = repEnvInfo[1].getEnvHome(); + final EnvironmentConfig envConfig = repEnvInfo[1].getEnvConfig(); + final ReplicationConfig repConfig = repEnvInfo[1].getRepConfig(); + try { + dbAdmin.updateAddress(nodeName, "localhost", 5004); + fail("Expected ReplicaStateException"); + } catch (ReplicaStateException e) { + logger.info("Update address for live node: " + e); + } + assertTrue(master.getState().isMaster()); + assertTrue(repEnvInfo[1].isReplica()); + + /* Attempt to update the address of a secondary */ + final RepEnvInfo secondaryInfo = openSecondaryNode(); + try { + dbAdmin.updateAddress(secondaryInfo.getEnv().getNodeName(), + "localhost", 5004); + fail("Expected ReplicaStateException"); + } catch (ReplicaStateException e) { + logger.info("Update address for secondary node: " + e); + } + + /* + * Update the address by shutting down the environment, and reopening + * it with a different address. + */ + secondaryInfo.closeEnv(); + secondaryInfo.getRepConfig().setNodeHostPort("localhost:5004"); + secondaryInfo.openEnv(); + + /* Clean up */ + secondaryInfo.closeEnv(); + + /* Shutdown the second replica. */ + repEnvInfo[1].closeEnv(); + try { + dbAdmin.updateAddress(nodeName, "localhost", 5004); + } catch (Exception e) { + fail("Unexpected exception: " + e); + } + + /* Reopen the repEnvInfo[1] with updated configurations. */ + repConfig.setNodeHostPort("localhost:5004"); + ReplicatedEnvironment replica = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + assertTrue(replica.getState().isReplica()); + assertEquals(replica.getRepConfig().getNodeHostname(), + "localhost"); + assertEquals(replica.getRepConfig().getNodePort(), 5004); + replica.close(); + } + + /* Test behaviors of ReplicationGroupAdmin. */ + @Test + public void testReplicationGroupAdmin() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + final DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig()); + + ReplicationGroupAdmin groupAdmin = new ReplicationGroupAdmin + (RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + channelFactory); + + /* Test the DbPing utility. */ + RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); + VLSN commitVLSN = repEnvInfo[0].getRepNode().getCurrentTxnEndVLSN(); + String groupName = groupAdmin.getGroupName(); + String masterName = groupAdmin.getMasterNodeName(); + + Set replicationNodes = + groupAdmin.getGroup().getElectableNodes(); + for (ReplicationNode replicationNode : replicationNodes) { + NodeState nodeState = + groupAdmin.getNodeState(replicationNode, 10000); + assertEquals(nodeState.getGroupName(), groupName); + assertEquals(nodeState.getMasterName(), masterName); + assertEquals(nodeState.getJEVersion(), JEVersion.CURRENT_VERSION); + /* Check the values depends on the node state. */ + if (replicationNode.getName().equals(masterName)) { + assertEquals(nodeState.getNodeState(), State.MASTER); + assertEquals(nodeState.getActiveFeeders(), + repEnvInfo.length - 1); + assertEquals(nodeState.getKnownMasterTxnEndVLSN(), 0); + } else { + assertEquals(nodeState.getNodeState(), State.REPLICA); + assertEquals(nodeState.getActiveFeeders(), 0); + assertEquals(nodeState.getKnownMasterTxnEndVLSN(), + commitVLSN.getSequence()); + } + assertEquals(nodeState.getCurrentTxnEndVLSN(), + commitVLSN.getSequence()); + assertEquals(nodeState.getLogVersion(), LogEntryType.LOG_VERSION); + } + + /* Check the master name. */ + assertEquals(master.getNodeName(), groupAdmin.getMasterNodeName()); + + /* Check the group information. */ + RepGroupImpl repGroupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(repGroupImpl, groupAdmin.getGroup().getRepGroupImpl()); + + /* Check the ensureMember utility, no monitors at the begining. */ + assertEquals(repGroupImpl.getMonitorMembers().size(), 0); + + openMonitor(master); + + /* Check the group information and monitor after insertion. */ + repGroupImpl = master.getGroup().getRepGroupImpl(); + assertEquals(repGroupImpl, groupAdmin.getGroup().getRepGroupImpl()); + assertEquals(repGroupImpl.getMonitorMembers().size(), 1); + } + + /** + * Test the dumpGroup with just electable nodes. + */ + @Test + public void testDumpGroupNoMonitorNoSecondary() { + dumpGroup(); + } + + private String dumpGroup() { + final ReplicatedEnvironment master = + RepTestUtils.joinGroup(repEnvInfo); + final DbGroupAdmin dbAdmin = + useRepNetConfig ? + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()) : + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets()); + + final ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + final PrintStream originalOut = System.out; + final PrintStream bytesOut = new PrintStream(bytes); + try { + System.setOut(bytesOut); + dbAdmin.dumpGroup(); + } finally { + System.setOut(originalOut); + } + bytesOut.close(); + final String dump = bytes.toString(); + logger.info("Dump group output:\n" + dump); + for (final RepEnvInfo info : repEnvInfo) { + assertThat(dump, containsString(info.getEnv().getNodeName())); + } + return dump; + } + + /** + * Test the dumpGroup with monitor and secondary nodes. + */ + @Test + public void testDumpGroupMonitorSecondary() + throws IOException, InterruptedException { + + final ReplicatedEnvironment master = + RepTestUtils.joinGroup(repEnvInfo); + + openSecondaryNode(); + final RepNodeImpl monitor = openMonitor(master); + + Thread.sleep(2000); + + final String dump = dumpGroup(); + assertThat(dump, containsString(monitor.getName())); + } + + /* + * Test the non-default network properties constructor. + */ + @Test + public void testNetConfigCtor() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + /* Construct a DbGroupAdmin instance providing a Properties object. */ + DbGroupAdmin dbAdmin = + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()); + + tryDbGroupAdmin(dbAdmin); + } + + /* + * Test the non-default net properties file constructor + */ + @Test + public void testNetPropertiesFileCtor() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + File propertyFile = new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + + /* Construct a DbGroupAdmin instance providing a property file. */ + DbGroupAdmin dbAdmin = + new DbGroupAdmin(RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + propertyFile); + + tryDbGroupAdmin(dbAdmin); + } + + /* + * Support method for net properties constructor variants. This just + * does a basic master transfer test to make sure communications works. + */ + private void tryDbGroupAdmin(DbGroupAdmin dbAdmin) + throws Exception { + + assertTrue(repEnvInfo[0].isMaster()); + assertTrue(repEnvInfo[1].isReplica()); + + /* Transfer the mastership to node 1. */ + PrintStream original = System.out; + try { + /* Avoid polluting the test output. */ + System.setOut(new PrintStream(new ByteArrayOutputStream())); + dbAdmin.transferMaster(repEnvInfo[1].getEnv().getNodeName(), + "5 s"); + MasterTransferTest.awaitSettle(repEnvInfo[0], repEnvInfo[1]); + } catch (Exception e) { + fail("Unexpected exception: " + LoggerUtils.getStackTrace(e)); + } finally { + System.setOut(original); + } + + /* Check the node state. */ + assertTrue(repEnvInfo[0].isReplica()); + assertTrue(repEnvInfo[1].isMaster()); + } + + /* + * Test the -netProps command-line argument + */ + @Test + public void testNetPropsCommandLine() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + assertTrue(master.getState().isMaster()); + + File propertyFile = new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + + String[] args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-helperHosts", master.getRepConfig().getNodeHostPort(), + "-netProps", propertyFile.getPath(), + "-transferMaster", "-force", repEnvInfo[1].getEnv().getNodeName(), "5 s" }; + + /* Transfer the mastership to node 1. */ + PrintStream original = System.out; + try { + /* Avoid polluting the test output. */ + System.setOut(new PrintStream(new ByteArrayOutputStream())); + + DbGroupAdmin.main(args); + + MasterTransferTest.awaitSettle(repEnvInfo[0], repEnvInfo[1]); + } catch (Exception e) { + fail("Unexpected exception: " + LoggerUtils.getStackTrace(e)); + } finally { + System.setOut(original); + } + + + + /* Check the node state. */ + assertTrue(repEnvInfo[0].isReplica()); + assertTrue(repEnvInfo[1].isMaster()); + } +} diff --git a/test/com/sleepycat/je/rep/util/DbPingTest.java b/test/com/sleepycat/je/rep/util/DbPingTest.java new file mode 100644 index 0000000..c81ef9d --- /dev/null +++ b/test/com/sleepycat/je/rep/util/DbPingTest.java @@ -0,0 +1,162 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.PrintStream; +import java.util.Set; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.NodeState; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/* + * A unit test which tests the DbPing utility. + */ +public class DbPingTest extends TestBase { + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + + public DbPingTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + } + + @Override + @After + public void tearDown() { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + /* + * Test the function of DbPing when using network properties + */ + @Test + public void testDbPingNetProps() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + File propertyFile = new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + ReplicationNetworkConfig repNetConfig = + RepTestUtils.readRepNetConfig(); + + DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct(repNetConfig); + + ReplicationGroupAdmin groupAdmin = new ReplicationGroupAdmin + (RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + channelFactory); + + String groupName = groupAdmin.getGroupName(); + + Set replicationNodes = + groupAdmin.getGroup().getElectableNodes(); + assertTrue(replicationNodes.size() > 0); + for (ReplicationNode replicationNode : replicationNodes) { + /* + * Test DbPing with network properties set via a configuration + * object + */ + DbPing propsPing = new DbPing( + replicationNode, groupName, 10000, repNetConfig); + NodeState propsNodeState = propsPing.getNodeState(); + assertEquals(propsNodeState.getGroupName(), groupName); + + /* Test DbPing with network properties set via a property file */ + DbPing filePing = new DbPing( + replicationNode, groupName, 10000, propertyFile); + NodeState fileNodeState = filePing.getNodeState(); + assertEquals(fileNodeState.getGroupName(), groupName); + + /* Test DbPing with an explicit channel factory */ + DbPing factoryPing = new DbPing( + replicationNode, groupName, 10000, channelFactory); + NodeState factoryNodeState = factoryPing.getNodeState(); + assertEquals(factoryNodeState.getGroupName(), groupName); + } + } + + /* + * Test the -netProps command-line argument + */ + @Test + public void testDbPingNetPropsCommandLine() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + File propertyFile = new File(repEnvInfo[0].getEnvHome().getPath(), + "je.properties"); + + ReplicationGroupAdmin groupAdmin = new ReplicationGroupAdmin + (RepTestUtils.TEST_REP_GROUP_NAME, + master.getRepConfig().getHelperSockets(), + RepTestUtils.readRepNetConfig()); + + Set replicationNodes = + groupAdmin.getGroup().getElectableNodes(); + assertTrue(replicationNodes.size() > 0); + for (ReplicationNode replicationNode : replicationNodes) { + + String[] args = new String[] { + "-groupName", RepTestUtils.TEST_REP_GROUP_NAME, + "-nodeName", replicationNode.getName(), + "-nodeHost", master.getRepConfig().getNodeHostPort(), + "-netProps", propertyFile.getPath(), + "-socketTimeout", "5000" }; + + /* Ping the node. */ + PrintStream original = System.out; + try { + /* Avoid polluting the test output. */ + System.setOut(new PrintStream(new ByteArrayOutputStream())); + + DbPing.main(args); + + } catch (Exception e) { + fail("Unexpected exception: " + LoggerUtils.getStackTrace(e)); + } finally { + System.setOut(original); + } + } + } +} diff --git a/test/com/sleepycat/je/rep/util/EnableRenameTest.java b/test/com/sleepycat/je/rep/util/EnableRenameTest.java new file mode 100644 index 0000000..a88bd1c --- /dev/null +++ b/test/com/sleepycat/je/rep/util/EnableRenameTest.java @@ -0,0 +1,156 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import java.util.Properties; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Regression test for SR #21537, which was first reported on OTN forum. + */ +public class EnableRenameTest extends RepTestBase { + public static final String NEW_DB_NAME = TEST_DB_NAME + "2"; + + @Before + public void setUp() + throws Exception { + + groupSize = 2; + super.setUp(); + } + + @Test + public void testEnableWithRename() throws Exception { + + RepEnvInfo master = repEnvInfo[0]; + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + Environment env = new Environment(master.getEnvHome(), + master.getEnvConfig()); + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + db.close(); + env.close(); + + ReplicationConfig masterConf = + master.getRepConfig(); + DbEnableReplication enabler = + new DbEnableReplication(master.getEnvHome(), + masterConf.getGroupName(), + masterConf.getNodeName(), + masterConf.getNodeHostPort()); + enabler.convert(); + + restartNodes(master); + ReplicatedEnvironment masterEnv = master.getEnv(); + masterEnv.renameDatabase(null, TEST_DB_NAME, NEW_DB_NAME); + + ReplicatedEnvironment replicaEnv = null; + try { + replicaEnv = openRepEnv(); + } catch (InsufficientLogException ile) { + NetworkRestore restore = new NetworkRestore(); + restore.execute(ile, new NetworkRestoreConfig()); + replicaEnv = openRepEnv(); + } + DatabaseConfig dc2 = new DatabaseConfig(); + dc2.setReadOnly(true); + dc2.setTransactional(true); + + try { + db = replicaEnv.openDatabase(null, NEW_DB_NAME, dc2); + db.close(); + } finally { + replicaEnv.close(); + } + } + + @Test + public void testWorkaround() throws Exception { + // same as above, except start new replica before doing the rename + RepEnvInfo master = repEnvInfo[0]; + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + Environment env = new Environment(master.getEnvHome(), + master.getEnvConfig()); + Database db = env.openDatabase(null, TEST_DB_NAME, dbconfig); + db.close(); + env.close(); + + ReplicationConfig masterConf = + master.getRepConfig(); + DbEnableReplication enabler = + new DbEnableReplication(master.getEnvHome(), + masterConf.getGroupName(), + masterConf.getNodeName(), + masterConf.getNodeHostPort()); + enabler.convert(); + + restartNodes(master); + ReplicatedEnvironment replicaEnv = null; + try { + replicaEnv = openRepEnv(); + } catch (InsufficientLogException ile) { + NetworkRestore restore = new NetworkRestore(); + restore.execute(ile, new NetworkRestoreConfig()); + replicaEnv = openRepEnv(); + } + + ReplicatedEnvironment masterEnv = master.getEnv(); + masterEnv.renameDatabase(null, TEST_DB_NAME, NEW_DB_NAME); + + DatabaseConfig dc2 = new DatabaseConfig(); + dc2.setReadOnly(true); + dc2.setTransactional(true); + db = replicaEnv.openDatabase(null, NEW_DB_NAME, dc2); + + db.close(); + replicaEnv.close(); + } + + private ReplicatedEnvironment openRepEnv() throws Exception { + RepEnvInfo replica = repEnvInfo[1]; + return new ReplicatedEnvironment(replica.getEnvHome(), + replica.getRepConfig(), + replica.getEnvConfig()); + } +} diff --git a/test/com/sleepycat/je/rep/util/EnvConvertTest.java b/test/com/sleepycat/je/rep/util/EnvConvertTest.java new file mode 100644 index 0000000..9cfc90d --- /dev/null +++ b/test/com/sleepycat/je/rep/util/EnvConvertTest.java @@ -0,0 +1,617 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static com.sleepycat.je.rep.impl.RepParams.DEFAULT_PORT; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.List; +import java.util.Properties; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationMutableConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.WaitForMasterListener; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class EnvConvertTest extends TestBase { + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envRoot; + private final String DB_NAME = "test"; + private final String EMPTY_DB = "emptyDB"; + private final int dbSize = 100; + private EnvironmentConfig envConfig; + private DatabaseConfig dbConfig; + private RepEnvInfo[] repEnvInfo; + + public EnvConvertTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + + envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setReadOnly(false); + + dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setReadOnly(false); + } + + @After + public void tearDown() { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + @Test + public void testOpenSequence() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + /* Create, populate, and close a standalone environment. */ + openStandaloneEnvAndInsertData(); + + /* Open a r/w standalone Environment on it. */ + Environment env = null; + + try { + env = new Environment(repEnvInfo[0].getEnvHome(), envConfig); + } catch (Exception e) { + fail("Expect no exceptions here."); + } finally { + if (env != null) { + env.close(); + } + } + + /* + * Open a replicated Environment on it, without setting it + * allowConvert. + */ + try { + repEnvInfo[0].openEnv(); + fail("Shouldn't go here."); + } catch (UnsupportedOperationException e) { + /* An expected exception. */ + } + + /* Open a replicated Environment on it, now permitting conversion. */ + try { + ReplicationConfig repConfig = repEnvInfo[0].getRepConfig(); + RepInternal.setAllowConvert(repConfig, true); + repEnvInfo[0].openEnv(); + } catch (Exception e) { + fail("Expect no exceptions here."); + } finally { + repEnvInfo[0].closeEnv(); + } + + /* + * Open a replicated Environment on it again. Test that user defined + * databases have been converted to replicated and were properly + * written to the log. + */ + Database db = null; + try { + ReplicationConfig repConfig = repEnvInfo[0].getRepConfig(); + RepInternal.setAllowConvert(repConfig, false); + repEnvInfo[0].openEnv(); + dbConfig.setTransactional(true); + db = repEnvInfo[0].getEnv().openDatabase(null, DB_NAME, dbConfig); + } catch (Exception e) { + fail("Got " + e + " expect no exceptions here."); + } finally { + if (db != null) { + db.close(); + } + repEnvInfo[0].closeEnv(); + } + + /* Open a r/o standalone Environment on it. */ + try { + envConfig.setReadOnly(true); + env = new Environment(repEnvInfo[0].getEnvHome(), envConfig); + } catch (Exception e) { + fail("Expect no exceptions here."); + } finally { + if (env != null) { + env.close(); + } + } + + /* Open a r/w standalone Environmetn on it. */ + try { + envConfig.setReadOnly(false); + env = new Environment(repEnvInfo[0].getEnvHome(), envConfig); + fail("Shouldn't go here."); + } catch (UnsupportedOperationException e) { + /* An expected exception. */ + } + } + + @Test + public void testCRUDOnDb() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + syncupGroup(); + + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + assertFalse(repEnvInfo[1].getEnv().getState().isMaster()); + + dbConfig.setTransactional(true); + Database db = + repEnvInfo[0].getEnv().openDatabase(null, DB_NAME, dbConfig); + + /* Read data. */ + doCRUDOperations(1, dbSize, db, OpType.READ); + + /* Insert data. */ + doCRUDOperations(101, 200, db, OpType.CREATE); + + /* Delete data. */ + doCRUDOperations(51, 200, db, OpType.DELETE); + + /* Update data. */ + doCRUDOperations(1, dbSize - 50, db, OpType.UPDATE); + + db.close(); + + checkEquality(repEnvInfo); + } + + @Test + public void testDbOps() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + syncupGroup(); + + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + assertFalse(repEnvInfo[1].getEnv().getState().isMaster()); + + /* Truncate database. */ + assertEquals(repEnvInfo[0].getEnv().truncateDatabase + (null, DB_NAME, true), dbSize); + + /* Rename database. */ + repEnvInfo[0].getEnv().renameDatabase(null, DB_NAME, "db1"); + List namesList = repEnvInfo[0].getEnv().getDatabaseNames(); + assertTrue(!namesList.contains(DB_NAME) && namesList.contains("db1")); + + /* Remove database. */ + repEnvInfo[0].getEnv().removeDatabase(null, "db1"); + namesList = repEnvInfo[0].getEnv().getDatabaseNames(); + assertFalse(namesList.contains("db1")); + + /* Create database. */ + dbConfig.setTransactional(true); + Database db = + repEnvInfo[0].getEnv().openDatabase(null, DB_NAME, dbConfig); + namesList = repEnvInfo[0].getEnv().getDatabaseNames(); + assertTrue(namesList.contains(DB_NAME)); + db.close(); + + checkEquality(repEnvInfo); + } + + /* Test the read operations on replica after NetworkRestore. */ + @Test + public void testReadOnReplica() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + syncupGroup(); + + try { + dbConfig.setTransactional(true); + Database db = + repEnvInfo[1].getEnv().openDatabase(null, DB_NAME, dbConfig); + doCRUDOperations(1, dbSize, db, OpType.READ); + db.close(); + } catch (Exception e) { + fail("Shouldn't throw out exceptions here."); + } + + checkEquality(repEnvInfo); + } + + /* Test replica can be the master if the former master is shutdown. */ + @Test + public void testTwoNodesFailover() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + syncupGroup(); + + repEnvInfo[0].closeEnv(); + + WaitForMasterListener waitForMaster = new WaitForMasterListener(); + repEnvInfo[1].getEnv().setStateChangeListener(waitForMaster); + ReplicationMutableConfig config = + repEnvInfo[1].getEnv().getRepMutableConfig(); + assertEquals(false, config.getDesignatedPrimary()); + config.setDesignatedPrimary(true); + repEnvInfo[1].getEnv().setRepMutableConfig(config); + + waitForMaster.awaitMastership(); + + assertTrue(repEnvInfo[1].getRepNode().getArbiter().isActive()); + assertTrue(repEnvInfo[1].getEnv().getState().isMaster()); + + dbConfig.setTransactional(true); + Database db = null; + try { + db = repEnvInfo[1].getEnv().openDatabase(null, DB_NAME, dbConfig); + /* Do some update operations. */ + doCRUDOperations(1, dbSize, db, OpType.UPDATE); + } finally { + if (db != null) { + db.close(); + } + } + } + + /* Test three nodes fail over behaviors. */ + @Test + public void testThreeNodesFailover() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + repEnvInfo = null; + /* Make a three nodes replication group. */ + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3); + + syncupGroup(); + + /* Sync up node 3. */ + doNetworkRestore(repEnvInfo[2]); + + /* Close the former master. */ + String masterName = repEnvInfo[0].getEnv().getNodeName(); + repEnvInfo[0].closeEnv(); + + /* Select a new master, check to make sure the master is changed. */ + ReplicatedEnvironment master = + RepTestUtils.openRepEnvsJoin(repEnvInfo); + assertTrue(master.getState().isMaster()); + assertTrue(!master.getNodeName().equals(masterName)); + + checkEquality(RepTestUtils.getOpenRepEnvs(repEnvInfo)); + } + + /* + * Test the behaviors of creating a ReplicatedEnvironment on an open + * standalone Environment, see SR 18649. + */ + @Test + public void testOpenRepEnvOnUnClosedStandaloneEnv() + throws Exception { + + if ("true".equals + (repEnvInfo[0].getEnvConfig().getConfigParam + ("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + File envHome = new File(envRoot, "testOpenRepEnv"); + envHome.mkdir(); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + Environment env = new Environment(envHome, envConfig); + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setGroupName("group1"); + repConfig.setNodeName("node1"); + repConfig.setNodeHostPort("localhost:5001"); + repConfig.setHelperHosts(repConfig.getNodeHostPort()); + + ReplicatedEnvironment repEnv = null; + try { + repEnv = new ReplicatedEnvironment(envHome, repConfig, envConfig); + fail("Expected UnsupportedOperationException here."); + } catch (UnsupportedOperationException e) { + /* Expected exception. */ + } catch (Exception e) { + e.printStackTrace(); + fail("unexpected exception: " + e); + } finally { + env.close(); + if (repEnv != null) { + repEnv.close(); + } + } + + for (File file : envHome.listFiles()) { + if (file.isFile()) { + file.delete(); + } + } + envHome.delete(); + } + + /* Sync up the whole group. */ + private void syncupGroup() + throws Exception { + + openStandaloneEnvAndInsertData(); + + DbEnableReplication converter = new DbEnableReplication + (repEnvInfo[0].getEnvHome(), RepTestUtils.TEST_REP_GROUP_NAME, + "Node1", RepTestUtils.TEST_HOST + ":" + + DEFAULT_PORT.getDefault()); + + converter.convert(); + + repEnvInfo[0].openEnv(); + + doNetworkRestore(repEnvInfo[1]); + } + + /* + * Do a NetworkRestore to copy the latest log files from master to + * replica. + */ + private void doNetworkRestore(RepEnvInfo repNode) + throws Exception { + + try { + repNode.openEnv(); + } catch (InsufficientLogException e) { + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setRetainLogFiles(false); + restore.execute(e, config); + } finally { + if (repNode.getEnv() != null) { + repNode.closeEnv(); + } + } + + try { + repNode.openEnv(); + } catch (Exception e) { + e.printStackTrace(); + fail("Shouldn't throw out exceptions here."); + } + + /* Do read operations after the network restore. */ + Database db = null; + try { + dbConfig.setTransactional(true); + db = repEnvInfo[0].getEnv().openDatabase(null, DB_NAME, dbConfig); + doCRUDOperations(1, dbSize, db, OpType.READ); + } catch (Exception e) { + fail("Shouldn't throw out exceptions here."); + } finally { + if (db != null) { + db.close(); + } + } + } + + /* Check the equality of replicas in the same group. */ + private void checkEquality(RepEnvInfo[] repInfoArray) + throws Exception { + + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repInfoArray, + repInfoArray.length); + RepTestUtils.checkNodeEquality(vlsn, verbose, repInfoArray); + } + + /* Create a standalone environment, insert some records and close it. */ + private void openStandaloneEnvAndInsertData() + throws Exception { + + Environment env = + new Environment(repEnvInfo[0].getEnvHome(), envConfig); + Database db = env.openDatabase(null, DB_NAME, dbConfig); + doCRUDOperations(1, dbSize, db, OpType.CREATE); + db.close(); + + Database emptyDb = env.openDatabase(null, EMPTY_DB, dbConfig); + emptyDb.close(); + env.removeDatabase(null, EMPTY_DB); + env.close(); + } + + /* Do CRUD operations on the specified database. */ + private void doCRUDOperations(int beginId, + int endId, + Database db, + OpType type) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = beginId; i <= endId; i++) { + IntegerBinding.intToEntry(i, key); + switch (type) { + case CREATE: + StringBinding.stringToEntry + ("herococo" + new Integer(i).toString(), data); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + break; + case READ: + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, null)); + assertEquals("herococo" + new Integer(i).toString(), + StringBinding.entryToString(data)); + break; + case DELETE: + assertEquals(OperationStatus.SUCCESS, + db.delete(null, key)); + break; + case UPDATE: + StringBinding.stringToEntry("here&&coco", data); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + break; + } + } + } + + enum OpType { + CREATE, READ, UPDATE, DELETE; + } + + /** + * Make sure that the code example for DbEnableReplication compiles. The + * code will not actually execute because the hostname of + * "hostname.widget.org is not valid, so catch the expected exception. + */ + @Test + public void testJavadocForDbEnableReplication() { + + File envDirMars = repEnvInfo[0].getEnvHome(); + File envDirVenus = repEnvInfo[1].getEnvHome(); + @SuppressWarnings("unused") + ReplicatedEnvironment nodeVenus; + try { + + /* ------------- example below ------------- */ + + // Create the first node using an existing environment + DbEnableReplication converter = + new DbEnableReplication(envDirMars, // env home dir + "UniversalRepGroup", // group name + "nodeMars", // node name + "mars:5001"); // node host,port + converter.convert(); + + /* + * This line is in the example, but it's pseudo code so it won't + * compile ReplicatedEnvironment nodeMars = + * new ReplicatedEnvironment(envDirMars, ...); + */ + + // Bring up additional nodes, which will be initialized from + // nodeMars. + ReplicationConfig repConfig = null; + try { + repConfig = + new ReplicationConfig("UniversalRepGroup", // groupName + "nodeVenus", // nodeName + "venus:5008"); // nodeHostPort + repConfig.setHelperHosts("mars:5001"); + + nodeVenus = new ReplicatedEnvironment(envDirVenus, + repConfig, + envConfig); + } catch (InsufficientLogException insufficientLogEx) { + + // log files will be copied from another node in the group + NetworkRestore restore = new NetworkRestore(); + restore.execute(insufficientLogEx, new NetworkRestoreConfig()); + + // try opening the node now + nodeVenus = new ReplicatedEnvironment(envDirVenus, + repConfig, + envConfig); + } + + /* ----------- end of example --------- */ + + } catch (IllegalArgumentException expected) { + /* + * The hostnames are invalid. We just want to make sure the + * example compiles. + */ + } + } +} diff --git a/test/com/sleepycat/je/rep/util/RepEnvWrapper.java b/test/com/sleepycat/je/rep/util/RepEnvWrapper.java new file mode 100644 index 0000000..b57bcd9 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/RepEnvWrapper.java @@ -0,0 +1,220 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import junit.framework.TestCase; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.EnvTestWrapper; +import com.sleepycat.je.utilint.VLSN; + +/** + * An environment wrapper for replicated tests. + */ +public class RepEnvWrapper extends EnvTestWrapper { + + // TODO: a standard way to parameterize all replication tests. + final int repNodes = 3; + + private boolean doNodeEqualityCheck = true; + + private final Map dirRepEnvInfoMap = + new HashMap(); + + @Override + public Environment create(File envRootDir, EnvironmentConfig envConfig) + throws DatabaseException { + + adjustEnvConfig(envConfig); + + RepEnvInfo[] repEnvInfo = dirRepEnvInfoMap.get(envRootDir); + Environment env = null; + if (repEnvInfo != null) { + /* An existing environment */ + try { + repEnvInfo = RepTestUtils.setupEnvInfos + (envRootDir, repNodes, envConfig); + } catch (Exception e1) { + TestCase.fail(e1.getMessage()); + } + env = restartGroup(repEnvInfo); + } else { + /* Eliminate detritus from earlier failed tests. */ + RepTestUtils.removeRepEnvironments(envRootDir); + try { + repEnvInfo = + RepTestUtils.setupEnvInfos(envRootDir, + repNodes, + envConfig); + RepTestUtils.joinGroup(repEnvInfo); + TestCase.assertTrue(repEnvInfo[0].getEnv(). + getState().isMaster()); + env = repEnvInfo[0].getEnv(); + } catch (IOException e) { + e.printStackTrace(); + throw new RuntimeException(e); + } + } + dirRepEnvInfoMap.put(envRootDir, repEnvInfo); + return env; + } + + /** + * Modifies the config to use a shared cache and replace use of obsolete + * sync apis with the Durability api to avoid mixed mode exceptions from + * Environment.checkTxnConfig. + */ + @SuppressWarnings("deprecation") + private void adjustEnvConfig(EnvironmentConfig envConfig) + throws IllegalArgumentException { + + /* + * Replicated tests use multiple environments, configure shared cache + * to reduce the memory consumption (unless this property was set by + * the test earlier). + */ + if (!envConfig.isConfigParamSet(EnvironmentConfig.SHARED_CACHE)) { + envConfig.setSharedCache(true); + } + + boolean sync = false; + boolean writeNoSync = envConfig.getTxnWriteNoSync(); + boolean noSync = envConfig.getTxnNoSync(); + envConfig.setTxnWriteNoSync(false); + envConfig.setTxnNoSync(false); + envConfig.setDurability(getDurability(sync, writeNoSync, noSync)); + } + + /** + * Restarts a group associated with an existing environment on disk. + * Returns the environment associated with the master. + */ + private Environment restartGroup(RepEnvInfo[] repEnvInfo) { + return RepTestUtils.restartGroup(repEnvInfo); + + } + + private void closeInternal(Environment env, boolean doCheckpoint) { + + File envRootDir = env.getHome().getParentFile(); + + RepEnvInfo[] repEnvInfo = dirRepEnvInfoMap.get(envRootDir); + try { + ReplicatedEnvironment master = null; + for (RepEnvInfo ri : repEnvInfo) { + ReplicatedEnvironment r = ri.getEnv(); + if (r == null) { + continue; + } + if (r.getState() == ReplicatedEnvironment.State.MASTER) { + master = r; + } + } + /* + * Note that the assertion below will fire if all nodes have been + * closed (all r are null above). This means close() cannot be + * called twice, and I suspect that's unintended. + */ + TestCase.assertNotNull(master); + VLSN lastVLSN = RepInternal.getNonNullRepImpl(master).getRepNode(). + getVLSNIndex().getRange().getLast(); + RepTestUtils.syncGroupToVLSN(repEnvInfo, repNodes, lastVLSN); + + if (doNodeEqualityCheck) { + RepTestUtils.checkNodeEquality(lastVLSN, false, repEnvInfo); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo, doCheckpoint); + } + + /* Close environment with a checkpoint. */ + @Override + public void close(Environment env) + throws DatabaseException { + + closeInternal(env, true); + } + + /* Close environment without a checkpoint. */ + @Override + public void closeNoCheckpoint(Environment env) + throws DatabaseException { + + closeInternal(env, false); + } + + /* Simulate a crash. */ + @Override + public void abnormalClose(Environment env) { + File envRootDir = env.getHome().getParentFile(); + RepEnvInfo[] repEnvInfo = dirRepEnvInfoMap.get(envRootDir); + for (RepEnvInfo info : repEnvInfo) { + info.abnormalCloseEnv(); + } + } + + @Override + public void destroy() { + for (File f : dirRepEnvInfoMap.keySet()) { + RepTestUtils.removeRepEnvironments(f); + } + dirRepEnvInfoMap.clear(); + } + + public void syncGroup(File envRootDir) { + RepEnvInfo[] repEnvInfo = dirRepEnvInfoMap.get(envRootDir); + RepTestUtils.syncGroup(repEnvInfo); + } + + public RepEnvInfo[] getRepEnvInfo(File envRootDir) { + return dirRepEnvInfoMap.get(envRootDir); + } + + /** + * Convert old style sync into Durability as defined in TxnConfig. + * + * @see com.sleepycat.je.TransactionConfig#getDurabilityFromSync + */ + private Durability getDurability(boolean sync, + boolean writeNoSync, + boolean noSync) { + if (sync) { + return Durability.COMMIT_SYNC; + } else if (writeNoSync) { + return Durability.COMMIT_WRITE_NO_SYNC; + } else if (noSync) { + return Durability.COMMIT_NO_SYNC; + } + return Durability.COMMIT_SYNC; + } + + @Override + public void resetNodeEqualityCheck() { + doNodeEqualityCheck = !doNodeEqualityCheck; + } +} diff --git a/test/com/sleepycat/je/rep/util/RepSequenceTest.java b/test/com/sleepycat/je/rep/util/RepSequenceTest.java new file mode 100644 index 0000000..097f573 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/RepSequenceTest.java @@ -0,0 +1,293 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static com.sleepycat.je.rep.impl.RepParams.DEFAULT_PORT; +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Properties; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DbConfigManager; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class RepSequenceTest extends TestBase { + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envRoot; + private final String DB_NAME = "test"; + private final int dbSize = 100; + private EnvironmentConfig envConfig; + private RepEnvInfo[] repEnvInfo; + + public RepSequenceTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + + envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setReadOnly(false); + envConfig.setTransactional(true); + } + + @After + public void tearDown() { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + + @Test + public void testDPLSequenceWithConversion() + throws Exception { + + Properties temp = new Properties(); + DbConfigManager.applyFileConfig(repEnvInfo[0].getEnvHome(), + temp, true); + if ("true".equals + (temp.get("je.rep.preserveRecordVersion"))) { + // TODO: enable this and fix the JE bug + return; + } + + syncupGroup(); + + doDPLOperations(true); + } + + private void doDPLOperations(boolean converted) + throws Exception { + + assertTrue(repEnvInfo[0].getEnv().getState().isMaster()); + assertFalse(repEnvInfo[1].getEnv().getState().isMaster()); + + EntityStore store = openStore(repEnvInfo[0].getEnv(), DB_NAME); + + /* Do some CRUD operations on master.*/ + int beginId = converted ? dbSize : 0; + insertData(1 + beginId, beginId + dbSize, store); + deleteData(51 + beginId, 100 + beginId, store); + readData(1, 50 + beginId, store); + store.close(); + + /* Open a new database and insert records to the database on master. */ + store = openStore(repEnvInfo[0].getEnv(), "testDB"); + insertData(1, dbSize, store); + store.close(); + + /* Do read operations on the replica. */ + store = openStore(repEnvInfo[1].getEnv(), DB_NAME); + readData(1, 50 + beginId, store); + store.close(); + + try { + /* Open a non-existed database on the replica. */ + store = openStore(repEnvInfo[1].getEnv(), "myDB"); + } catch (ReplicaWriteException e) { + /* Expect to see this exception. */ + } finally { + if (store != null) { + store.close(); + } + } + + checkEquality(repEnvInfo); + } + + @Test + public void testDPLSequenceWithoutConversion() + throws Exception { + + RepTestUtils.joinGroup(repEnvInfo); + + doDPLOperations(false); + } + + /* Sync up the whole group. */ + private void syncupGroup() + throws Exception { + + openStandaloneEnvAndInsertData(); + + DbEnableReplication converter = new DbEnableReplication + (repEnvInfo[0].getEnvHome(), RepTestUtils.TEST_REP_GROUP_NAME, + "Node1", RepTestUtils.TEST_HOST + ":" + + DEFAULT_PORT.getDefault()); + + converter.convert(); + + repEnvInfo[0].openEnv(); + + doNetworkRestore(repEnvInfo[1]); + } + + /* + * Do a NetworkRestore to copy the latest log files from master to + * replica. + */ + private void doNetworkRestore(RepEnvInfo repNode) + throws Exception { + + try { + repNode.openEnv(); + } catch (InsufficientLogException e) { + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setRetainLogFiles(false); + restore.execute(e, config); + } finally { + if (repNode.getEnv() != null) { + repNode.closeEnv(); + } + } + + try { + repNode.openEnv(); + } catch (Exception e) { + e.printStackTrace(); + fail("Shouldn't throw out exceptions here."); + } + } + + /* Check the equality of replicas in the same group. */ + private void checkEquality(RepEnvInfo[] repInfoArray) + throws Exception { + + VLSN vlsn = RepTestUtils.syncGroupToLastCommit(repInfoArray, + repInfoArray.length); + RepTestUtils.checkNodeEquality(vlsn, verbose, repInfoArray); + } + + private EntityStore openStore(Environment env, String dbName) + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + + return new EntityStore(env, dbName, config); + } + + /* Create a standalone environment, insert some records and close it. */ + private void openStandaloneEnvAndInsertData() + throws Exception { + + Environment env = + new Environment(repEnvInfo[0].getEnvHome(), envConfig); + EntityStore store = openStore(env, DB_NAME); + insertData(1, dbSize, store); + store.close(); + + env.close(); + } + + /* Do insert operations on the specified database. */ + private void insertData(int beginId, int endId, EntityStore store) + throws Exception { + + PrimaryIndex primaryIndex = + store.getPrimaryIndex(Integer.class, RepTestData.class); + for (int i = beginId; i <= endId; i++) { + RepTestData data = new RepTestData(); + data.setName("herococo" + new Integer(i).toString()); + primaryIndex.put(data); + } + } + + /* Do delete operations on the specified database. */ + private void deleteData(int beginId, int endId, EntityStore store) + throws Exception { + + PrimaryIndex primaryIndex = + store.getPrimaryIndex(Integer.class, RepTestData.class); + for (int i = beginId; i <= endId; i++) { + primaryIndex.delete(null, i); + } + } + + /* Do read operations on the specified database. */ + private void readData(int beginId, int endId, EntityStore store) + throws Exception { + + PrimaryIndex primaryIndex = + store.getPrimaryIndex(Integer.class, RepTestData.class); + for (int i = beginId; i <= endId; i++) { + + /* + * Do reads to exercise the replica read route, even though the + * value is not used. + */ + @SuppressWarnings("unused") + RepTestData data = primaryIndex.get(i); + } + } + + @Entity + static class RepTestData { + @PrimaryKey(sequence="KEY") + private int key; + + @SecondaryKey(relate=MANY_TO_ONE) + private String name; + + public void setKey(int key) { + this.key = key; + } + + public void setName(String name) { + this.name = name; + } + + public int getKey() { + return key; + } + + public String getName() { + return name; + } + + @Override + public String toString() { + return "RepTestData: key = " + key + ", name = " + name; + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ResetRepGroupTest.java b/test/com/sleepycat/je/rep/util/ResetRepGroupTest.java new file mode 100644 index 0000000..9c14cbe --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ResetRepGroupTest.java @@ -0,0 +1,328 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import static com.sleepycat.je.rep.utilint.RepTestUtils.TEST_REP_GROUP_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileWriter; +import java.io.Writer; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.Before; +import org.junit.Test; + +/** + * Tests operation of the DbResetGroup utility + */ +public class ResetRepGroupTest extends RepTestBase { + + /* Prefix associated with new nodes in the reset rep group. */ + private static final String NEW_NODE_PREFIX = "nx"; + + /* The new "reset" rep group name. */ + private static final String NEW_GROUP_NAME = "gx"; + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 4; + super.setUp(); + } + + /** Test reset with old master as new master */ + @Test + public void testBasicMaster() + throws Exception { + + testBasic(0, false); + } + + /** Same, but recreating the group in place. */ + @Test + public void testBasicMasterInPlace() + throws Exception { + + testBasic(0, true); + } + + /** Test reset with old electable replica as new master */ + @Test + public void testBasicReplica() + throws Exception { + + testBasic(1, false); + } + + /** Same, but recreating the group in place. */ + @Test + public void testBasicReplicaInPlace() + throws Exception { + + testBasic(1, true); + } + + /** Test reset with old secondary as new master */ + @Test + public void testBasicSecondary() + throws Exception { + + testBasic(3, false); + } + + /** Same, but recreating the group in place. */ + @Test + public void testBasicSecondaryInPlace() + throws Exception { + + testBasic(3, true); + } + + /** + * Basic test of reseting the group using the node with the specified ID as + * the first node. If retainGroupUUID is true, then the existing group + * UUID is reused, allowing existing group members to rejoin without doing + * a network restore. + */ + private void testBasic(final int firstNodeId, + final boolean retainGroupUUID) + throws Exception { + + final int numElectableNodes = 3; + repEnvInfo[3].getRepConfig().setNodeType(NodeType.SECONDARY); + + for (RepEnvInfo info : repEnvInfo) { + EnvironmentConfig config = info.getEnvConfig(); + /* Smaller log files to provoke faster cleaning */ + config.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "100000"); + /* Need a disk limit to delete files. */ + config.setConfigParam( + EnvironmentConfig.MAX_DISK, String.valueOf(8 * 100000)); + } + + createGroup(); + + /* + * Populate, then sync group since we may use a replica as the basis + * for resetting the group. We need enough data files for log cleaning + * further below. + */ + final CommitToken token = + populateDB(repEnvInfo[0].getEnv(), "db1", 10000); + RepTestUtils.syncGroupToVLSN( + repEnvInfo, repEnvInfo.length, new VLSN(token.getVLSN())); + + closeNodes(repEnvInfo); + + ReplicationConfig config = repEnvInfo[firstNodeId].getRepConfig(); + + File jePropertiesFile = null; + if (retainGroupUUID) { + + /* + * Arrange to retain the group UUID. DbResetGroup doesn't provide + * a way to specify arbitrary configuration parameters, so append + * this one to the existing je.properties file. + */ + jePropertiesFile = new File(repEnvInfo[firstNodeId].getEnvHome(), + "je.properties"); + final Writer out = new FileWriter(jePropertiesFile, true); + try { + out.write(RepParams.RESET_REP_GROUP_RETAIN_UUID.getName() + + "=true\n"); + } finally { + out.close(); + } + } + + resetRepEnvInfo(config, firstNodeId, retainGroupUUID); + + DbResetRepGroup reset = new DbResetRepGroup( + repEnvInfo[firstNodeId].getEnvHome(), + config.getGroupName(), + config.getNodeName(), + config.getNodeHostPort()); + reset.reset(); + if (retainGroupUUID) { + jePropertiesFile.delete(); + /* Recreate the original file */ + RepTestUtils.makeRepEnvDir(SharedTestUtils.getTestDir(), + firstNodeId); + } + + /* Make sure the new master is electable. */ + repEnvInfo[firstNodeId].getRepConfig().setNodeType( + NodeType.ELECTABLE); + + /* + * Open the environment that was converted, to the new group, node and + * hostport location + */ + ReplicatedEnvironment env = repEnvInfo[firstNodeId].openEnv(); + + assertEquals(retainGroupUUID ? TEST_REP_GROUP_NAME : NEW_GROUP_NAME, + env.getGroup().getName()); + assertTrue(env.getNodeName().startsWith( + retainGroupUUID ? "Node" : NEW_NODE_PREFIX)); + + /** + * RF=1 single electable data node, dtvlsn should now track + * the commit vlsn + */ + assertEquals(repEnvInfo[firstNodeId].getRepNode(). + getCurrentTxnEndVLSN().getSequence(), + repEnvInfo[firstNodeId].getRepNode().getDTVLSN()); + + /* Check that a new internal node id was allocated. */ + final int newNodeId = + repEnvInfo[firstNodeId].getRepNode().getNodeId(); + assertTrue("New node ID (" + newNodeId + ") should be greater than " + + numElectableNodes, + newNodeId > numElectableNodes); + + /* + * Create enough data to provoke cleaning and truncation of the VLSN + * index. Truncation does not occur, though, if retaining the group + * UUID. + */ + populateDB(env, "db1", 1000); + + /* Create cleaner fodder */ + env.removeDatabase(null, "db1"); + + /* + * Wait for Master VLSN update. Because there are no replicas, + * FeederManager.runFeeders must wait for the call to + * BlockingQueue.poll timeout, before updating the master's CBVLSN. + */ + long pollTimeoutMs = + repEnvInfo[firstNodeId].getRepImpl().getConfigManager(). + getDuration(RepParams.FEEDER_MANAGER_POLL_TIMEOUT); + Thread.sleep(pollTimeoutMs * 2); + + env.cleanLog(); + env.checkpoint(new CheckpointConfig().setForce(true)); + if (!retainGroupUUID) { + assertTrue("failed to provoke cleaning", + env.getStats(null).getNCleanerDeletions() > 0); + } + + /* + * Grow the group, expecting ILEs when opening the environment unless + * retaining group UUIDs + */ + for (int i=0; i < repEnvInfo.length; i++) { + if (i == firstNodeId) { + continue; + } + try { + repEnvInfo[i].openEnv(); + if (!retainGroupUUID) { + fail("ILE exception expected"); + } + } catch (InsufficientLogException ile) { + /* + * The ILE could be due to cleaning, if the UUID was retained. + * when the replica tried to sync up. + */ + NetworkRestore restore = new NetworkRestore(); + restore.execute(ile, new NetworkRestoreConfig()); + ReplicatedEnvironment env2 = repEnvInfo[i].openEnv(); + assertEquals(retainGroupUUID ? + TEST_REP_GROUP_NAME : NEW_GROUP_NAME, + env2.getGroup().getName()); + + assertTrue(env2.getNodeName(). + startsWith(retainGroupUUID ? "Node" : NEW_NODE_PREFIX)); + int id = repEnvInfo[i].getRepNode().getNodeId(); + assertTrue("New node ID (" + id + ") for node " + i + + " should be greater than " + numElectableNodes, + id > numElectableNodes); + } + } + + closeNodes(repEnvInfo); + } + + /** + * Updates the repEnvInfo array with the configuration for the new + * singleton group. It also removes all environment directories for all + * nodes with the exception of the restarted one, unless retaining the + * group UUID. + */ + private void resetRepEnvInfo(ReplicationConfig config, + int firstNodeId, + boolean retainGroupUUID) + throws IllegalArgumentException { + /* Assign new group, name and ports for the nodes. */ + for (int j = 0; j < repEnvInfo.length; j++) { + + /* Start with the first node and wrap around */ + int i = (firstNodeId + j) % repEnvInfo.length; + RepEnvInfo info = repEnvInfo[i]; + ReplicationConfig rconfig = info.getRepConfig(); + rconfig.setHelperHosts(config.getNodeHostPort()); + + if (retainGroupUUID) { + + /* Just convert to SECONDARY */ + rconfig.setConfigParam( + RepParams.IGNORE_SECONDARY_NODE_ID.getName(), "true"); + rconfig.setNodeType(NodeType.SECONDARY); + continue; + } + + int newPort = rconfig.getNodePort() + repEnvInfo.length; + rconfig.setGroupName(NEW_GROUP_NAME); + rconfig.setNodeName(NEW_NODE_PREFIX + i); + rconfig.setNodeHostPort(rconfig.getNodeHostname() + ":" + newPort); + + EnvironmentConfig econfig = info.getEnvConfig(); + /* + * Turn off the cleaner, since it's controlled explicitly + * by the test. + */ + econfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + + /* Remove all other environment directories. */ + if (i != firstNodeId) { + TestUtils.removeLogFiles("RemoveRepEnvironments", + info.getEnvHome(), + false); // checkRemove + } + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ServiceDispatcherTest.java b/test/com/sleepycat/je/rep/util/ServiceDispatcherTest.java new file mode 100644 index 0000000..a61d47a --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ServiceDispatcherTest.java @@ -0,0 +1,302 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.Test; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; + +public class ServiceDispatcherTest extends ServiceDispatcherTestBase { + + /* The number of simulated services. */ + private static int numServices = 10; + + /* The simulated service map. */ + private static final Map> serviceMap = + new HashMap<>(); + + private DataChannelFactory channelFactory = + DataChannelFactoryBuilder.construct(RepTestUtils.readRepNetConfig()); + + /* Initialize the simulated service map. */ + static { + for (int i=0; i < numServices; i++) { + serviceMap.put("service"+i, + new LinkedBlockingQueue()); + } + } + + class EService implements Runnable { + final DataChannel dataChannel; + final int serviceNumber; + + EService(DataChannel dataChannel, int serviceNumber) { + this.dataChannel = dataChannel; + this.serviceNumber = serviceNumber; + } + + @Override + public void run() { + try { + dataChannel.getSocketChannel().configureBlocking(true); + Channels.newOutputStream(dataChannel).write( + (byte)serviceNumber); + dataChannel.close(); + } catch (IOException e) { + throw new RuntimeException("Unexpected exception", e); + } + } + } + + class LService extends Thread { + + final int serviceNumber; + final BlockingQueue queue; + final AtomicReference ex = new AtomicReference<>(); + volatile boolean started; + + LService(int serviceNumber, BlockingQueue queue) { + this.queue = queue; + this.serviceNumber = serviceNumber; + } + + @Override + public void run() { + started = true; + try { + final DataChannel dataChannel = queue.take(); + final SocketChannel channel = dataChannel.getSocketChannel(); + channel.configureBlocking(true); + Channels.newOutputStream(dataChannel).write( + (byte)serviceNumber); + dataChannel.close(); + } catch (Throwable e) { + ex.compareAndSet(null, e); + } + } + + void finishTest() throws Throwable { + assertTrue(started); + join(); + final Throwable e = ex.get(); + if (e != null) { + throw e; + } + } + } + + @Test + public void testExecuteBasic() + throws IOException, ServiceConnectFailedException { + + for (int i=0; i < numServices; i++) { + final int serviceNumber = i; + dispatcher.register(new ServiceDispatcher.ExecutingService( + "service"+i, dispatcher) { + + @Override + public Runnable getRunnable(DataChannel dataChannel) { + return new EService(dataChannel, serviceNumber); + } + }); + } + + verifyServices(); + } + + @Test + public void testLazyQueueBasic() throws Throwable { + + List lServiceList = new ArrayList<>(); + + for (int i=0; i < numServices; i++) { + LinkedBlockingQueue queue = + new LinkedBlockingQueue<>(); + + LService lService = new LService(i, queue); + + dispatcher.register(dispatcher.new LazyQueuingService( + "service"+i, queue, lService) { + }); + } + + verifyServices(); + + for (LService lService : lServiceList) { + lService.finishTest(); + } + } + + /* + * Verifies the services that were set up. + */ + private void verifyServices() + throws IOException, ServiceConnectFailedException { + + DataChannel dataChannels[] = new DataChannel[numServices]; + for (int i=0; i < numServices; i++) { + DataChannel dataChannel = channelFactory.connect( + dispatcherAddress, new ConnectOptions()); + dataChannels[i] = dataChannel; + + ServiceDispatcher.doServiceHandshake(dataChannel, "service"+i); + } + + for (int i=0; i < numServices; i++) { + DataChannel dataChannel = dataChannels[i]; + int result = Channels.newInputStream(dataChannel).read(); + assertEquals(i,result); + dataChannel.close(); + } + } + + @Test + public void testBusyExecuteBasic() + throws IOException, ServiceConnectFailedException { + + dispatcher.register(new ServiceDispatcher.ExecutingService( + "service1", dispatcher) { + int bcount=0; + @Override + public Runnable getRunnable(DataChannel dataChannel) { + bcount++; + return new EService(dataChannel, 1); + } + @Override + public boolean isBusy() { + return bcount > 0; + } + }); + + DataChannel dataChannel = + channelFactory.connect(dispatcherAddress, new ConnectOptions()); + + ServiceDispatcher.doServiceHandshake(dataChannel, "service1"); + + /* Service should now be busy. */ + try { + ServiceDispatcher.doServiceHandshake(dataChannel, "service1"); + fail("expected exception"); + } catch (ServiceConnectFailedException e1) { + assertEquals(Response.BUSY, e1.getResponse()); + } + } + + + @Test + public void testQueueBasic() + throws IOException, InterruptedException, + ServiceConnectFailedException { + + for (Entry> e : + serviceMap.entrySet()) { + dispatcher.register(e.getKey(), e.getValue()); + } + + for (Entry> e : + serviceMap.entrySet()) { + DataChannel dataChannel = channelFactory.connect( + dispatcherAddress, new ConnectOptions()); + + ServiceDispatcher.doServiceHandshake(dataChannel, e.getKey()); + dataChannel.close(); + } + + for (Entry> e : + serviceMap.entrySet()) { + DataChannel channel = + dispatcher.takeChannel(e.getKey(), true, 100); + assertTrue(channel != null); + assertTrue(e.getValue().isEmpty()); + } + } + + @Test + public void testRegister() { + try { + dispatcher.register(null, new LinkedBlockingQueue()); + fail("Expected EnvironmentFailureException"); + } catch(EnvironmentFailureException e) { + } + + try { + dispatcher.register("s1", (BlockingQueue)null); + fail("Expected EnvironmentFailureException"); + } catch(EnvironmentFailureException e) { + } + + dispatcher.register("s1", new LinkedBlockingQueue()); + try { + dispatcher.register("s1", new LinkedBlockingQueue()); + fail("Expected EnvironmentFailureException"); + } catch(EnvironmentFailureException e) { + } + dispatcher.cancel("s1"); + } + + @Test + public void testCancel() { + dispatcher.register("s1", new LinkedBlockingQueue()); + dispatcher.cancel("s1"); + + try { + dispatcher.cancel("s1"); + fail("Expected EnvironmentFailureException"); + } catch(EnvironmentFailureException e) { + } + + try { + dispatcher.cancel(null); + fail("Expected EnvironmentFailureException"); + } catch(EnvironmentFailureException e) { + } + } + + @Test + public void testExceptions() + throws IOException { + + /* Close connection due to unregistered service name. */ + DataChannel dataChannel = + channelFactory.connect(dispatcherAddress, new ConnectOptions()); + try { + ServiceDispatcher.doServiceHandshake(dataChannel, "s1"); + fail("Expected exception"); + } catch (ServiceConnectFailedException e) { + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ServiceDispatcherTestBase.java b/test/com/sleepycat/je/rep/util/ServiceDispatcherTestBase.java new file mode 100644 index 0000000..0a0fe25 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ServiceDispatcherTestBase.java @@ -0,0 +1,51 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.util; + +import java.net.InetSocketAddress; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.ServiceDispatcher; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.util.test.TestBase; + +public abstract class ServiceDispatcherTestBase extends TestBase { + + protected ServiceDispatcher dispatcher = null; + private static final int TEST_PORT = 5000; + protected InetSocketAddress dispatcherAddress; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + dispatcherAddress = new InetSocketAddress("localhost", TEST_PORT); + dispatcher = new ServiceDispatcher( + dispatcherAddress, + DataChannelFactoryBuilder.construct( + RepTestUtils.readRepNetConfig())); + dispatcher.start(); + } + + @After + public void tearDown() + throws Exception { + + dispatcher.shutdown(); + dispatcher = null; + } +} diff --git a/test/com/sleepycat/je/rep/util/TestChannel.java b/test/com/sleepycat/je/rep/util/TestChannel.java new file mode 100644 index 0000000..40c9381 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/TestChannel.java @@ -0,0 +1,46 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.util; + +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; + +/** + * This mimics the two part SocketChannel read done in real life when + * assembling an incoming message. + */ +public class TestChannel implements ReadableByteChannel { + + ByteBuffer content; + + public TestChannel(ByteBuffer content) { + this.content = content; + } + + public int read(ByteBuffer fill) { + int remaining = fill.remaining(); + for (int i = 0; i < remaining; i++) { + fill.put(content.get()); + } + + return fill.limit(); + } + + public void close() { + throw new UnsupportedOperationException(); + } + + public boolean isOpen() { + throw new UnsupportedOperationException(); + } +} diff --git a/test/com/sleepycat/je/rep/util/TestLogItem.java b/test/com/sleepycat/je/rep/util/TestLogItem.java new file mode 100644 index 0000000..595e7fc --- /dev/null +++ b/test/com/sleepycat/je/rep/util/TestLogItem.java @@ -0,0 +1,28 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util; + +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.utilint.VLSN; + +/* Used to create placeholder log items for unit tests */ + +public class TestLogItem extends LogItem { + + public TestLogItem(VLSN vlsn, long lsn, byte entryType) { + header = new LogEntryHeader(entryType, 1, 0, vlsn); + this.lsn = lsn; + } +} \ No newline at end of file diff --git a/test/com/sleepycat/je/rep/util/ldiff/BlockBagTest.java b/test/com/sleepycat/je/rep/util/ldiff/BlockBagTest.java new file mode 100644 index 0000000..d51d517 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/BlockBagTest.java @@ -0,0 +1,600 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Iterator; +import java.util.List; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +public class BlockBagTest extends TestBase { + + /** + * A get() following a remove() shouldn't return any removed blocks. + */ + @Test + public void testGetAfterRemove() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 7654321L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + b = new Block(i++); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(0L); + bb.add(b); + for (; i < 2 * count + 1; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List blocks = bb.get(rollingChksum); + assertTrue(blocks != null); + if (blocks == null) + return; + assertEquals(2 * count, blocks.size()); + + List toRemove = bb.get(0L); + assertTrue(toRemove != null); + if (toRemove == null) + return; + assertEquals(1, toRemove.size()); + List removed = bb.remove(toRemove.get(0)); + assertTrue(removed != null); + if (removed == null) + return; + assertEquals(count, removed.size()); + assertEquals(count, bb.size()); + + blocks = bb.get(rollingChksum); + assertTrue(blocks != null); + if (blocks == null) + return; + assertEquals(count, blocks.size()); + } + + /** + * Insert blocks with identical checksums, make sure get() returns them all + * in insertion order. + */ + @Test + public void testGetMultiple() { + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int numKeys = 10000; + int count = 10; + long rollingChksum = 7654321L; + + bb = new BlockBag(); + + for (int i = 0; i < count; i++) { + Block b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List blocks = bb.get(rollingChksum); + assertTrue(blocks != null); + if (blocks == null) + return; + assertEquals(count, blocks.size()); + int id1, id2; + for (int i = 1; i < blocks.size(); i++) { + // Block id indicates insertion order for this test + id1 = blocks.get(i - 1).getBlockId(); + id2 = blocks.get(i).getBlockId(); + assertTrue(id1 < id2); + } + } + + /** + * If a checksum does not exist in the bag, a null should be returned + */ + @Test + public void testGetNonexistent() { + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int numKeys = 10000; + int count = 10; + long rollingChksum = 7654321L; + + bb = new BlockBag(); + + for (int i = 0; i < count; i++) { + Block b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List blocks = bb.get(0L); + assertTrue(blocks == null); + } + + /** + * The for ( : ) construct should iterate over blocks in insertion order + */ + @Test + public void testIterable() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 0L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum++); + bb.add(b); + } + + /* + * Iterate through the records, there should be count and their ids + * should be increasing. + */ + i = 0; + Block oldBlk = null; + for (Block blk : bb) { + if (oldBlk != null) + assertTrue(oldBlk.getBlockId() < blk.getBlockId()); + oldBlk = blk; + i++; + } + assertEquals(count, i); + } + + /** + * The for ( : ) construct shouldn't return deleted items. + */ + @Test + public void testIterableAfterDelete() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 7654321L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + b = new Block(i++); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(0L); + bb.add(b); + for (; i < 2 * count + 1; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List toRemove = bb.get(0L); + assertTrue(toRemove != null); + if (toRemove == null) + return; + assertEquals(1, toRemove.size()); + List removed = bb.remove(toRemove.get(0)); + assertTrue(removed != null); + if (removed == null) + return; + + /* Iterate through the records, there should be count. */ + assertEquals(count, bb.getBlockIndex() - 1); + } + + /** + * The typical iterator usage should return items in insertion order. + */ + @Test + public void testIterator() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 0L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum++); + bb.add(b); + } + + /* + * Iterate through the records, there should be count and their ids + * should be increasing. + */ + Iterator iter = bb.iterator(); + i = 0; + Block oldBlk = null; + while (iter.hasNext()) { + Block blk = iter.next(); + if (oldBlk != null) + assertTrue(oldBlk.getBlockId() < blk.getBlockId()); + oldBlk = blk; + i++; + } + assertEquals(count, i); + } + + /** + * The typical iterator usage should not return deleted items + */ + @Test + public void testIteratorAfterDelete() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 7654321L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + b = new Block(i++); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(0L); + bb.add(b); + for (; i < 2 * count + 1; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List toRemove = bb.get(0L); + assertTrue(toRemove != null); + if (toRemove == null) + return; + assertEquals(1, toRemove.size()); + List removed = bb.remove(toRemove.get(0)); + assertTrue(removed != null); + if (removed == null) + return; + + /* Iterate through the records, there should be count. */ + Iterator iter = bb.iterator(); + i = 0; + while (iter.hasNext()) { + b = iter.next(); + i++; + } + assertEquals(count, i); + } + + /** + * Populate a bag and then immediately remove everything. The bag should be + * empty. + */ + @Test + public void testRemoveAll() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int numKeys = 10000; + int count = 10; + long rollingChksum = 0L; + + b = null; + bb = new BlockBag(); + + for (int i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum++); + bb.add(b); + } + + /* + * Remove the last block. Unmatched should be the rest of the blocks. + */ + List unmatched = bb.removeAll(); + assertTrue(unmatched != null); + if (unmatched == null) + return; + assertEquals(count, unmatched.size()); + int id1, id2; + for (int i = 1; i < unmatched.size(); i++) { + /* Block id indicates insertion order for this test. */ + id1 = unmatched.get(i - 1).getBlockId(); + id2 = unmatched.get(i).getBlockId(); + assertTrue(id1 < id2); + } + + assertEquals(0, bb.size()); + List retrieve = bb.get(2L); + assertTrue(retrieve == null); + } + + /** + * Test removeAll() after some items have been deleted. + */ + @Test + public void testRemoveSomeThenAll() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int i; + int count = 10; + int numKeys = 10000; + long rollingChksum = 7654321L; + + b = null; + bb = new BlockBag(); + + /* + * Add count with the same checksum, one unique, then another count + * with the same checksum. + */ + for (i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + b = new Block(i++); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(0L); + bb.add(b); + for (; i < 2 * count + 1; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum); + bb.add(b); + } + + List blocks = bb.get(rollingChksum); + assertTrue(blocks != null); + if (blocks == null) + return; + assertEquals(2 * count, blocks.size()); + + List toRemove = bb.get(0L); + assertTrue(toRemove != null); + if (toRemove == null) + return; + assertEquals(1, toRemove.size()); + List removed = bb.remove(toRemove.get(0)); + assertTrue(removed != null); + if (removed == null) + return; + assertEquals(count, removed.size()); + + blocks = bb.get(rollingChksum); + assertTrue(blocks != null); + if (blocks == null) + return; + assertEquals(count, blocks.size()); + + // Remove the remaining blocks. + List unmatched = bb.removeAll(); + assertTrue(unmatched != null); + if (unmatched == null) + return; + assertEquals(count, unmatched.size()); + int id1, id2; + for (i = 1; i < unmatched.size(); i++) { + // Block id indicates insertion order for this test + id1 = unmatched.get(i - 1).getBlockId(); + id2 = unmatched.get(i).getBlockId(); + assertTrue(id1 < id2); + } + + assertEquals(0, bb.size()); + List retrieve = bb.get(rollingChksum); + assertTrue(retrieve == null); + } + + /** + * Removing a block removes all blocks inserted before it as well. + */ + @Test + public void testRemoveUnmatched() { + Block b; + BlockBag bb; + byte[] beginKey = { 0, 0, 0, 0 }; + byte[] beginData = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + byte[] md5Hash = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + int numKeys = 10000; + int count = 10; + long rollingChksum = 0L; + + b = null; + bb = new BlockBag(); + + for (int i = 0; i < count; i++) { + b = new Block(i); + b.setBeginKey(beginKey); + b.setBeginData(beginData); + b.setMd5Hash(md5Hash); + b.setNumRecords(numKeys); + b.setRollingChksum(rollingChksum++); + bb.add(b); + } + + assertTrue(b != null); + + /* + * Remove the last block. Unmatched should be the rest of the blocks. + */ + List unmatched = bb.remove(b); + assertTrue(unmatched != null); + if (unmatched == null) + return; + assertEquals(count - 1, unmatched.size()); + int id1, id2; + for (int i = 1; i < unmatched.size(); i++) { + /* Block id indicates insertion order for this test. */ + id1 = unmatched.get(i - 1).getBlockId(); + id2 = unmatched.get(i).getBlockId(); + assertTrue(id1 < id2); + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/LDiffServiceTest.java b/test/com/sleepycat/je/rep/util/ldiff/LDiffServiceTest.java new file mode 100644 index 0000000..7e68cff --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/LDiffServiceTest.java @@ -0,0 +1,388 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.net.InetSocketAddress; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.utilint.BinaryProtocol.ProtocolException; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class LDiffServiceTest extends TestBase { + private final File envRoot; + private RepEnvInfo[] repEnvInfo; + private static final String DB_NAME = "testDb"; + + public LDiffServiceTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + /* + * It may take a little while for environment files to be closed, so do + * this cleanup in a retry loop + */ + new PollCondition(1000, 10000) { + @Override + protected boolean condition() { + try { + SharedTestUtils.cleanUpTestDir( + SharedTestUtils.getTestDir()); + return true; + } catch (IllegalStateException e) { + return false; + } + } + }.await(); + + super.setUp(); + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 2); + + /* + * Disable the shared cache for this particular test because it causes + * an assertion to fire when shutting down the LDiffService. The + * service has a env handle open, and it closes it in the middle of + * normal close processing. We could potentially fix this, but since + * the LDiffService is not for public consumption, it's simpler to + * avoid the problem for now at least. + * + * Since this test opens two nodes at most per rep group, the shared + * cache had limited benefit anyway. + */ + for (int i = 0; i < repEnvInfo.length; i += 1) { + repEnvInfo[i].getEnvConfig().setSharedCache(false); + } + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + if (repEnvInfo != null) { + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvInfo[i].closeEnv(); + } + } + } + + /* Do a diff between two replicators. */ + @Test + public void testSame() + throws Exception { + + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + + insertData(master, DB_NAME, 6000, "herococo"); + + InetSocketAddress replicaAddress = + repEnvInfo[1].getRepConfig().getNodeSocketAddress(); + checkLDiff(master, replicaAddress, false, true); + } + + /* Insert records into the database on a replicator. */ + private void insertData(ReplicatedEnvironment repEnv, + String dbName, + int dbSize, + String dataStr) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = repEnv.openDatabase(null, dbName, dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= dbSize; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry(dataStr, data); + db.put(null, key, data); + } + db.close(); + } + + /* Check the LDiff result between two replicators. */ + private void checkLDiff(ReplicatedEnvironment localEnv, + InetSocketAddress remoteAddress, + boolean doAnalysis, + boolean expectedSame) + throws Exception { + + LDiffConfig config = new LDiffConfig(); + config.setWaitIfBusy(true, -1, 0); + config.setBlockSize(doAnalysis ? 10 : 1000); + /* If do analysis, disable the output. */ + if (doAnalysis) { + config.setDiffAnalysis(true); + config.setVerbose(false); + } + config.setDiffAnalysis(doAnalysis); + LDiff ldf = new LDiff(config); + + ReplicationNetworkConfig repNetConfig = + RepTestUtils.readRepNetConfig(); + if (repNetConfig.getChannelType().isEmpty()) { + assertEquals(ldf.diff(localEnv, remoteAddress), expectedSame); + } else { + assertEquals(ldf.diff(localEnv, + remoteAddress, + DataChannelFactoryBuilder.construct( + repNetConfig)), + expectedSame); + } + } + + /* Test local Environment has additional data. */ + @Test + public void testExtraLocalData() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 3000, "herococo"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* + * Make two replication groups. + * + * To compare the records between two replicators, since it's hard to make + * records different on replicators in a group, so make two groups and + * do compare between the masters of the two groups. + */ + private void makeTwoGroups() + throws Exception { + + ReplicationConfig repConfig = repEnvInfo[0].getRepConfig(); + repConfig.setGroupName("TestGroup1"); + + repConfig = repEnvInfo[1].getRepConfig(); + repConfig.setGroupName("TestGroup2"); + repConfig.setHelperHosts(repConfig.getNodeHostPort()); + + repEnvInfo[0].openEnv(); + assertTrue(repEnvInfo[0].isMaster()); + repEnvInfo[1].openEnv(); + assertTrue(repEnvInfo[1].isMaster()); + } + + /* Test remote Environment has additional data. */ + @Test + public void testExtraRemoteData() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 3000, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "herocooc"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test two replicators have different data. */ + @Test + public void testDifferentData() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "hero&&coco"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test local Environment has a database but remote Environment doesn't. */ + @Test + public void testNonExistentDb() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + + try { + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } catch (ProtocolException e) { + /* Expected, do nothing. */ + } + } + + /* Test remote Environment doesn't have any records in the database. */ + @Test + public void testEmptyRemoteDb() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 0, "herococo"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test local and remote Environment have multi databases. */ + @Test + public void testSameEnvs() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData + (repEnvInfo[0].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "herococo"); + insertData + (repEnvInfo[1].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + true); + } + + /* + * Test local and remote Environment have multi databases with different + * data. + */ + @Test + public void testEnvsWithDifferentData() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6001, "herococo"); + insertData + (repEnvInfo[0].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "herococo"); + insertData + (repEnvInfo[1].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test local Environment have more databases than remote. */ + @Test + public void testEnvsWithExtraLocalDatabase() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData + (repEnvInfo[0].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "herococo"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test remote Environment have more databases than local. */ + @Test + public void testEnvsWithExtraRemoteDatabase() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 6000, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 6000, "herococo"); + insertData + (repEnvInfo[1].getEnv(), "another" + DB_NAME, 6000, "hero&&coco"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + false, + false); + } + + /* Test local Environment has more data with analysis enabled. */ + @Test + public void testAdditional() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 200, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 100, "herococo"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + true, + false); + } + + /* Test local and Environment have different data with analysis enabled. */ + @Test + public void testDifferentArea() + throws Exception { + + makeTwoGroups(); + + insertData(repEnvInfo[0].getEnv(), DB_NAME, 200, "herococo"); + insertData(repEnvInfo[1].getEnv(), DB_NAME, 300, "hero&&coco"); + + checkLDiff(repEnvInfo[0].getEnv(), + repEnvInfo[1].getRepConfig().getNodeSocketAddress(), + true, + false); + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/LDiffTest.java b/test/com/sleepycat/je/rep/util/ldiff/LDiffTest.java new file mode 100644 index 0000000..5877e23 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/LDiffTest.java @@ -0,0 +1,493 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class LDiffTest extends TestBase { + private final File envRoot; + private final File envDir1; + private final File envDir2; + private static final String dbName = "ldiff.db"; + + public LDiffTest() { + envRoot = SharedTestUtils.getTestDir(); + envDir1 = new File(envRoot, "env1"); + envDir2 = new File(envRoot, "env2"); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + LDiffTestUtils.deleteDir(envDir1); + envDir1.mkdir(); + + LDiffTestUtils.deleteDir(envDir2); + envDir2.mkdir(); + } + + @After + public void tearDown() { + LDiffTestUtils.deleteDir(envDir1); + LDiffTestUtils.deleteDir(envDir2); + } + + /** + * Two environments with identical dbs should be equal. + */ + @Test + public void testEnvSameDb() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertRecords(db2, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertTrue(ldf.diff(env1, env2)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db1.close(); + db2.close(); + env1.close(); + env2.close(); + } + + /** + * Two environments with identical dbs with different names should not be + * equal. + */ + @Test + public void testEnvDifferentName() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, "_" + dbName); + + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertRecords(db2, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertFalse(ldf.diff(env1, env2)); + assertTrue(ldf.getDiffRegions() == null); + + db1.close(); + db2.close(); + env1.close(); + env2.close(); + } + + /** + * Two environments with different dbs should not be equal. + */ + @Test + public void testEnvDifferentDb() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + + byte[] keyarr2 = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertWithDiffKeys(db2, 60000, keyarr2); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertFalse(ldf.diff(env1, env2)); + List regions = ldf.getDiffRegions(); + assertEquals(regions.size(), 1); + MismatchedRegion region = regions.get(0); + assertEquals(region.getLocalDiffSize(), -1); + assertEquals(region.getRemoteDiffSize(), -1); + + db1.close(); + db2.close(); + env1.close(); + env2.close(); + } + + /** + * Two environments with different numbers of dbs should not be equal. + */ + @Test + public void testEnvDifferentNumDbs() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + Database db3 = LDiffTestUtils.createDatabase(env2, "_" + dbName); + + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertRecords(db2, 60000); + LDiffTestUtils.insertRecords(db3, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertFalse(ldf.diff(env1, env2)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db1.close(); + db2.close(); + db3.close(); + env1.close(); + env2.close(); + } + + /** + * Two environments with multiple identical dbs should be equal. + */ + @Test + public void testEnvMultipleDbs() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + Database db3 = LDiffTestUtils.createDatabase(env1, "_" + dbName); + Database db4 = LDiffTestUtils.createDatabase(env2, "_" + dbName);; + + byte[] keyarr2 = + { (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff }; + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertRecords(db2, 60000); + LDiffTestUtils.insertWithDiffKeys(db3, 60000, keyarr2); + LDiffTestUtils.insertWithDiffKeys(db4, 60000, keyarr2); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertTrue(ldf.diff(env1, env2)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db1.close(); + db2.close(); + db3.close(); + db4.close(); + env1.close(); + env2.close(); + } + + /** + * LDiff of two empty databases should succeed. + */ + @Test + public void testEmptyDbs() + throws Exception { + + Environment env = LDiffTestUtils.createEnvironment(envDir1); + Database db1 = LDiffTestUtils.createDatabase(env, "ldiff.a.db"); + Database db2 = LDiffTestUtils.createDatabase(env, "ldiff.b.db"); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + assertTrue(ldf.diff(db1, db2)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db1.close(); + db2.close(); + env.close(); + } + + /** + * LDiff of a database against itself should succeed. + */ + @Test + public void testSameDb() + throws Exception { + + Environment env = LDiffTestUtils.createEnvironment(envDir1); + Database db = LDiffTestUtils.createDatabase(env, dbName); + + LDiffTestUtils.insertRecords(db, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + BlockBag bag = ldf.createBlockBag(db); + assertTrue(ldf.diff(db, bag)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db.close(); + env.close(); + } + + /** + * Get a bag of blocks from a database, update the database and then diff + * it against the bag of blocks. + */ + @Test + public void testUpdatedDbs() + throws Exception { + + Environment env = LDiffTestUtils.createEnvironment(envDir1); + Database db = LDiffTestUtils.createDatabase(env, dbName); + + byte[] newKeys = LDiffTestUtils.insertRecords(db, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + BlockBag bag = ldf.createBlockBag(db); + + LDiffTestUtils.insertRecords(db, 40000, newKeys, true); + assertFalse(ldf.diff(db, bag)); + List regions = ldf.getDiffRegions(); + assertEquals(regions.size(), 1); + MismatchedRegion region = regions.get(0); + assertEquals(region.getLocalDiffSize(), -1); + assertEquals(region.getRemoteDiffSize(), -1); + + db.close(); + env.close(); + } + + /** + * Create two identical databases, remove and then reinsert all the records + * from one, creating two databases with the same data but different + * structure. + */ + @Test + public void testDifferentDbs() + throws Exception { + + Environment env = LDiffTestUtils.createEnvironment(envDir1); + Database db1 = LDiffTestUtils.createDatabase(env, "ldiff.a.db"); + Database db2 = LDiffTestUtils.createDatabase(env, "ldiff.b.db"); + + LDiffTestUtils.insertRecords(db1, 60000); + LDiffTestUtils.insertRecords(db2, 60000); + + LDiffConfig cfg = new LDiffConfig(); + LDiff ldf = new LDiff(cfg); + BlockBag bag= ldf.createBlockBag(db2); + assertTrue(ldf.diff(db1, bag)); + assertEquals(ldf.getDiffRegions().size(), 0); + + /* Remove all of dbA's data. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Cursor curs = db1.openCursor(null, null); + while (curs.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + curs.delete(); + } + curs.close(); + + bag = ldf.createBlockBag(db2); + assertFalse(ldf.diff(db1, bag)); + assertEquals(ldf.getDiffRegions().size(), 1); + assertTrue(ldf.getDiffRegions().get(0).isRemoteAdditional()); + + /* Re-populate dbA with the original data. */ + LDiffTestUtils.insertRecords(db1, 60000); + + bag = ldf.createBlockBag(db2); + assertTrue(ldf.diff(db1, bag)); + assertEquals(ldf.getDiffRegions().size(), 0); + + db1.close(); + db2.close(); + env.close(); + } + + /* Test the database has additonal blocks. */ + @Test + public void testAdditional() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + + LDiffTestUtils.insertAdditionalRecords(db1, db2); + + LDiffConfig config = new LDiffConfig(); + config.setBlockSize(10); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Test Block additional. */ + LDiff ldiff = new LDiff(config); + assertFalse(ldiff.diff(env1, env2)); + + List regions = ldiff.getDiffRegions(); + + assertEquals(regions.size(), 3); + + MismatchedRegion region = regions.get(0); + assertTrue(region.isRemoteAdditional()); + assertEquals(region.getRemoteDiffSize(), 100); + IntegerBinding.intToEntry(1, key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + StringBinding.stringToEntry("herococo", data); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + region = regions.get(1); + assertTrue(region.isRemoteAdditional()); + assertEquals(region.getRemoteDiffSize(), 20); + IntegerBinding.intToEntry(151, key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + region = regions.get(2); + assertTrue(region.isRemoteAdditional()); + assertEquals(region.getRemoteDiffSize(), -1); + IntegerBinding.intToEntry(201, key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + /* Test Window additional. */ + assertFalse(ldiff.diff(env2, env1)); + regions = ldiff.getDiffRegions(); + + assertEquals(regions.size(), 3); + region = regions.get(0); + assertTrue(region.isLocalAdditional()); + assertEquals(region.getLocalDiffSize(), 100); + IntegerBinding.intToEntry(1, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + + region = regions.get(1); + assertTrue(region.isLocalAdditional()); + assertEquals(region.getLocalDiffSize(), 20); + IntegerBinding.intToEntry(151, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + + region = regions.get(2); + assertTrue(region.isLocalAdditional()); + assertEquals(region.getLocalDiffSize(), -1); + IntegerBinding.intToEntry(201, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + + db1.close(); + db2.close(); + env1.close(); + env2.close(); + } + + /* Test two database have different blocks. */ + @Test + public void testDifferentArea() + throws Exception { + + Environment env1 = LDiffTestUtils.createEnvironment(envDir1); + Environment env2 = LDiffTestUtils.createEnvironment(envDir2); + Database db1 = LDiffTestUtils.createDatabase(env1, dbName); + Database db2 = LDiffTestUtils.createDatabase(env2, dbName); + + LDiffTestUtils.insertDifferentRecords(db1, db2); + + LDiffConfig config = new LDiffConfig(); + config.setBlockSize(10); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + LDiff ldiff = new LDiff(config); + assertFalse(ldiff.diff(env1, env2)); + List regions = ldiff.getDiffRegions(); + assertEquals(regions.size(), 4); + + MismatchedRegion region = regions.get(0); + assertEquals(region.getLocalDiffSize(), 10); + assertEquals(region.getRemoteDiffSize(), 10); + IntegerBinding.intToEntry(1, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + StringBinding.stringToEntry("herococo", data); + checkByteArrayEquality(region.getLocalBeginData(), data); + StringBinding.stringToEntry("hero-coco", data); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + region = regions.get(1); + assertEquals(region.getLocalDiffSize(), 30); + assertEquals(region.getRemoteDiffSize(), 30); + IntegerBinding.intToEntry(51, key); + StringBinding.stringToEntry("herococo", data); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + region = regions.get(2); + assertEquals(region.getLocalDiffSize(), 10); + assertEquals(region.getRemoteDiffSize(), 10); + IntegerBinding.intToEntry(91, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + region = regions.get(3); + assertEquals(region.getLocalDiffSize(), -1); + assertEquals(region.getRemoteDiffSize(), -1); + IntegerBinding.intToEntry(271, key); + checkByteArrayEquality(region.getLocalBeginKey(), key); + checkByteArrayEquality(region.getRemoteBeginKey(), key); + checkByteArrayEquality(region.getLocalBeginData(), data); + StringBinding.stringToEntry("hero-coco", data); + checkByteArrayEquality(region.getRemoteBeginData(), data); + + db1.close(); + db2.close(); + env1.close(); + env2.close(); + } + + private void checkByteArrayEquality(byte[] arr1, DatabaseEntry entry) { + assertEquals(arr1.length, entry.getData().length); + + for (int i = 0; i < arr1.length; i++) { + assertEquals(arr1[i], entry.getData()[i]); + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/LDiffTestUtils.java b/test/com/sleepycat/je/rep/util/ldiff/LDiffTestUtils.java new file mode 100644 index 0000000..ba96168 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/LDiffTestUtils.java @@ -0,0 +1,173 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +public class LDiffTestUtils { + private static final byte[] keyArr = {0, 0, 0, 0}; + public static final byte[] dataArr = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + + /* Delete a directory. */ + public static void deleteDir(File dir) { + if (dir.exists()) { + for (File f : dir.listFiles()) { + f.delete(); + } + dir.delete(); + } + } + + /* Create an Environment. */ + public static Environment createEnvironment(File envHome) + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + return new Environment(envHome, envConfig); + } + + /* Create a database. */ + public static Database createDatabase(Environment env, String dbName) + throws Exception { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + return env.openDatabase(null, dbName, dbConfig); + } + + public static byte[] insertRecords(Database db, + int iters) + throws Exception { + + return insertRecords(db, iters, keyArr, dataArr); + } + + public static byte[] insertRecords(Database db, + int iters, + byte[] data, + boolean key) + throws Exception { + + if (key) { + return insertRecords(db, iters, data, dataArr); + } else { + return insertRecords(db, iters, keyArr, data); + } + } + + /* Insert records. */ + public static byte[] insertRecords(Database db, + int iters, + byte[] usedKeys, + byte[] usedData) + throws Exception { + + byte[] keys = new byte[usedKeys.length]; + System.arraycopy(usedKeys, 0, keys, 0, usedKeys.length); + for (int i = 0; i < iters; i++) { + db.put(null, new DatabaseEntry(keys), new DatabaseEntry(usedData)); + if (++keys[3] == 0) + if (++keys[2] == 0) + if (++keys[1] == 0) + keys[0]++; + } + + return keys; + } + + /* Insert records, let a database has some additional blocks. */ + public static void insertAdditionalRecords(Database db1, + Database db2) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 101; i <= 200; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + if (i <= 150 || i >= 171) { + db1.put(null, key, data); + } + } + + for (int i = 1; i <= 296; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db2.put(null, key, data); + } + } + + /* Insert different records into two databases. */ + public static void insertDifferentRecords(Database db1, + Database db2) + throws Exception { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 296; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db1.put(null, key, data); + } + + for (int i = 1; i <= 290; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + if (i <= 5) { + StringBinding.stringToEntry("hero-coco", data); + } + if (i >= 55 && i <= 80) { + StringBinding.stringToEntry("hero-coco", data); + } + if (i == 100) { + StringBinding.stringToEntry("hero-coco", data); + } + if (i >= 271 && i <= 290) { + StringBinding.stringToEntry("hero-coco", data); + } + db2.put(null, key, data); + } + } + + + /* Insert the records with keys which are different from keyArr. */ + public static void insertWithDiffKeys(Database db, + int iters, + byte[] usedKeys) + throws Exception { + + byte[] keys = new byte[usedKeys.length]; + System.arraycopy(usedKeys, 0, keys, 0, usedKeys.length); + for (int i = 0; i < iters; i++) { + db.put(null, new DatabaseEntry(keys), new DatabaseEntry(dataArr)); + if (--keys[3] == 0) + if (--keys[2] == 0) + if (--keys[1] == 0) + keys[0]--; + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/LDiffUtilTest.java b/test/com/sleepycat/je/rep/util/ldiff/LDiffUtilTest.java new file mode 100644 index 0000000..fc1078d --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/LDiffUtilTest.java @@ -0,0 +1,56 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class LDiffUtilTest extends TestBase { + byte[][] al = new byte[][] { + StringUtils.toUTF8("key1|Value1"), + StringUtils.toUTF8("key2|Value2"), + StringUtils.toUTF8("key3|Value3"), + StringUtils.toUTF8("key4|Value4"), + StringUtils.toUTF8("key5|Value5"), + StringUtils.toUTF8("key6|Value6"), + StringUtils.toUTF8("key7|Value7"), + StringUtils.toUTF8("key8|Value8"), + StringUtils.toUTF8("key9|Value9"), + StringUtils.toUTF8("key10|Value10") }; + + @Test + public void testPlaceHolder() { + /* + * A Junit test will fail if there are no tests cases at all, so + * here is a placeholder test. + */ + } + + /* Verifies the basics of the rolling checksum computation. */ + /* + * public void testgetRollingChksum() { List tlist = + * Arrays.asList(al); int blockSize = 5; long rsum = + * LDiffUtil.getRollingChksum(tlist.subList(0, blockSize)); for (int i = 1; + * (i + blockSize) <= tlist.size(); i++) { int removeIndex = i - 1; int + * addIndex = removeIndex + blockSize; List list = + * tlist.subList(removeIndex + 1, addIndex + 1); // The reference value. + * long ref = LDiffUtil.getRollingChksum(list); // The incrementally + * computed chksum rsum = LDiffUtil.rollChecksum(rsum, blockSize, + * LDiffUtil.getXi(al[removeIndex]), LDiffUtil.getXi(al[addIndex])); + * assertEquals(ref, rsum); // System.err.printf("ref:%x, rsum:%x\n", ref, + * rsum); } } + */ +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/ProtocolTest.java b/test/com/sleepycat/je/rep/util/ldiff/ProtocolTest.java new file mode 100644 index 0000000..4cbd0c6 --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/ProtocolTest.java @@ -0,0 +1,100 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.util.TestChannel; +import com.sleepycat.je.rep.utilint.BinaryProtocol.Message; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.TestBase; + +public class ProtocolTest extends TestBase { + + Protocol protocol; + private Message[] messages; + private Block testBlock; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + protocol = new Protocol(new NameIdPair("n1", (short)1), + null); + testBlock = new Block(5); + byte[] beginKey = {0, 1, 2, 3}; + testBlock.setBeginKey(beginKey); + byte[] beginData = {(byte)0xde, (byte)0xad, (byte)0xbe, (byte)0xef}; + testBlock.setBeginData(beginData); + byte[] md5Hash = {(byte)0xdb, (byte)0xcd, (byte)0xdb, (byte)0xcd}; + testBlock.setMd5Hash(md5Hash); + testBlock.setNumRecords(1 << 13); + testBlock.setRollingChksum(123456789L); + + MismatchedRegion region = new MismatchedRegion(); + region.setLocalBeginKey(beginKey); + region.setLocalBeginData(beginData); + region.setLocalDiffSize(10); + region.setRemoteBeginKey(beginKey); + region.setRemoteBeginData(beginData); + region.setRemoteDiffSize(10); + + Record record = new Record(beginKey, beginData, new VLSN(5)); + + messages = new Message[] { + protocol.new DbBlocks("test.db", 1 << 13), + protocol.new DbMismatch("test.db does not exist"), + protocol.new BlockListStart(), + protocol.new BlockListEnd(), + protocol.new BlockInfo(testBlock), + protocol.new EnvDiff(), + protocol.new EnvInfo(4), + protocol.new RemoteDiffRequest(region), + protocol.new RemoteRecord(record), + protocol.new DiffAreaStart(), + protocol.new DiffAreaEnd(), + protocol.new Done(), + protocol.new Error("An LDiff Error") + }; + } + + @Test + public void testBasic() + throws IOException { + + assertEquals(protocol.messageCount() - + protocol.getPredefinedMessageCount(), + messages.length); + for (Message m : messages) { + ByteBuffer testWireFormat = m.wireFormat().duplicate(); + Message newMessage = + protocol.read(new TestChannel(testWireFormat)); + assertTrue(newMessage.getOp() + " " + + Arrays.toString(testWireFormat.array()) + "!=" + + Arrays.toString(newMessage.wireFormat().array()), + Arrays.equals(testWireFormat.array().clone(), + newMessage.wireFormat().array().clone())); + } + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/RemoteProcessingTest.java b/test/com/sleepycat/je/rep/util/ldiff/RemoteProcessingTest.java new file mode 100644 index 0000000..1245caa --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/RemoteProcessingTest.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class RemoteProcessingTest extends TestBase { + private File envHome; + private final int dbCount = 25000; + private final String DB_NAME = "testDb"; + + /* The remote database environment. */ + private static Environment env; + /* The list of blocks constituting RDB. */ + static final List rbList = new ArrayList(); + + public RemoteProcessingTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + fillData(); + } + + private void fillData() throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + Database db = env.openDatabase(null, DB_NAME, dbConfig); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= dbCount; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("bdb je", data); + db.put(null, key, data); + } + db.close(); + env.close(); + } + + public static void configure(String envDir) { + env = LDiffUtil.openEnv(envDir); + } + + @Test + public void testPlaceHolder() { + /* + * A Junit test will fail if there are no tests cases at all, so + * here is a placeholder test. + */ + } +} diff --git a/test/com/sleepycat/je/rep/util/ldiff/WindowTest.java b/test/com/sleepycat/je/rep/util/ldiff/WindowTest.java new file mode 100644 index 0000000..90de99b --- /dev/null +++ b/test/com/sleepycat/je/rep/util/ldiff/WindowTest.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.util.ldiff; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.test.SharedTestUtils; + +public class WindowTest { + private static File envDir = SharedTestUtils.getTestDir(); + private static String dbName = "window.db"; + + @Before + public void setUp() + throws Exception { + + if (envDir.exists()) { + for (File f : envDir.listFiles()) + f.delete(); + envDir.delete(); + } + envDir.mkdir(); + } + + /** + * Test that rolling the checksum yields the same value as calculating the + * checksum directly. + */ + @Test + public void testRollingChecksum() { + Cursor c1, c2; + Database db; + DatabaseEntry data, key; + Environment env; + Window w1, w2; + byte[] dataarr = + { (byte) 0xdb, (byte) 0xdb, (byte) 0xdb, (byte) 0xdb }; + byte[] keyarr = { 0, 0, 0, 0 }; + final int blockSize = 5; + final int dbSize = 2 * blockSize; + + /* Open the database environment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + /* envConfig.setTransactional(false); */ + envConfig.setAllowCreate(true); + try { + env = new Environment(envDir, envConfig); + } catch (Exception e) { + assertTrue(false); + return; + } + + /* Open a database within the environment. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setSortedDuplicates(true); + try { + db = env.openDatabase(null, dbName, dbConfig); + } catch (Exception e) { + assertTrue(false); + return; + } + + for (int i = 0; i < dbSize; i++) { + key = new DatabaseEntry(keyarr); + data = new DatabaseEntry(dataarr); + db.put(null, key, data); + keyarr[3]++; + } + + c1 = db.openCursor(null, null); + c2 = db.openCursor(null, null); + try { + w1 = new Window(c1, blockSize); + w2 = new Window(c2, blockSize); + } catch (Exception e) { + c1.close(); + c2.close(); + db.close(); + env.close(); + assertTrue(false); + return; + } + assertEquals(w1.getChecksum(), w2.getChecksum()); + key = new DatabaseEntry(); + key.setPartial(0, 0, true); + data = new DatabaseEntry(); + data.setPartial(0, 0, true); + for (int i = blockSize; i < dbSize; i++) { + try { + /* Advance w1 by one key/data pair. */ + w1.rollWindow(); + + /* + * Position c2 to the next key/data pair and get a new window + * (Constructing the window modifiers the cursor, so we need to + * reposition it. + */ + assertTrue(c2.getFirst(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS); + for (int j = 0; j < i - blockSize; j++) + assertTrue(c2.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS); + w2 = new Window(c2, blockSize); + + /* + * The windows are referring to the same range of key/data + * pairs. + */ + assertEquals(w1.getChecksum(), w2.getChecksum()); + } catch (Exception e) { + c1.close(); + c2.close(); + db.close(); + env.close(); + assertTrue(false); + return; + } + } + c1.close(); + c2.close(); + db.close(); + env.close(); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/HandshakeTest.java b/test/com/sleepycat/je/rep/utilint/HandshakeTest.java new file mode 100644 index 0000000..2175f4d --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/HandshakeTest.java @@ -0,0 +1,317 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.subscription.SubscriptionAuthTestHelper; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.ServiceConnectFailedException; +import com.sleepycat.je.rep.utilint.ServiceHandshake.AuthenticationMethod; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +/** + * Check various service handshake cases + */ +public class HandshakeTest extends TestBase { + + private final static String SERVICE_NAME = "testing"; + + private InetSocketAddress dispAddr; + private ServiceDispatcher dispatcher; + + @Override + @After + public void tearDown() + throws Exception { + + if (dispatcher != null) { + dispatcher.shutdown(); + dispatcher = null; + } + } + + /** + * Sanity check that no authentication works. + */ + @Test + public void testBasicConfig() + throws Exception { + + DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct( + ReplicationNetworkConfig.createDefault()); + + initDispatcher(dcFactory, null); + + DataChannel channel = + dcFactory.connect(dispAddr, new ConnectOptions()); + + ServiceDispatcher.doServiceHandshake(channel, SERVICE_NAME); + } + + /** + * Try authentication with our test password authentication implemenation. + */ + @Test + public void testPwAuth() + throws Exception { + + AuthenticationMethod[] authInfo = + new AuthenticationMethod[] { new TestPasswordAuthentication( + new TestPasswordSource("hello")) }; + + DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct( + ReplicationNetworkConfig.createDefault()); + + initDispatcher(dcFactory, authInfo); + + DataChannel channel = dcFactory.connect(dispAddr, new ConnectOptions()); + + ServiceDispatcher.doServiceHandshake(channel, SERVICE_NAME, authInfo); + } + + /** + * Test no authentication provided when authentication is required. + */ + @Test + public void testNoAuthProvided() + throws Exception { + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + + AuthenticationMethod[] authInfo = + new AuthenticationMethod[] { new TestPasswordAuthentication( + new TestPasswordSource("hello")) }; + + DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct(repNetConfig); + + initDispatcher(dcFactory, authInfo); + + DataChannel channel = dcFactory.connect(dispAddr, new ConnectOptions()); + + try { + ServiceDispatcher.doServiceHandshake(channel, SERVICE_NAME); + fail("expected exception"); + } catch (ServiceConnectFailedException e) { + /* expected exception */ + } + } + + /** + * Test no common authentication provided when authentication is required. + */ + @Test + public void testNoCommonAuth() + throws Exception { + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + + AuthenticationMethod[] authInfo = + new AuthenticationMethod[] { new TestPasswordAuthentication( + new TestPasswordSource("hello")) }; + + AuthenticationMethod privateAuth = + new TestPasswordAuthentication(new TestPasswordSource("hello")) { + + @Override + public String getMechanismName() { + return "private"; + } + }; + + AuthenticationMethod[] privateAuthInfo = + new AuthenticationMethod[] { privateAuth }; + + DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct(repNetConfig); + + initDispatcher(dcFactory, authInfo); + + DataChannel channel = dcFactory.connect(dispAddr, new ConnectOptions()); + + try { + ServiceDispatcher.doServiceHandshake(channel, SERVICE_NAME, + privateAuthInfo); + fail("expected exception"); + } catch (ServiceConnectFailedException e) { + /* expected exception */ + } + } + + /** + * Test failed authentication + */ + @Test + public void testfailedAuth() + throws Exception { + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.createDefault(); + + AuthenticationMethod[] authInfo = + new AuthenticationMethod[] { new TestPasswordAuthentication( + new TestPasswordSource("hello")) }; + + AuthenticationMethod[] badAuthInfo = + new AuthenticationMethod[] { new TestPasswordAuthentication( + new TestPasswordSource("xhello")) }; + + DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct(repNetConfig); + + initDispatcher(dcFactory, authInfo); + + DataChannel channel = dcFactory.connect(dispAddr, new ConnectOptions()); + + try { + ServiceDispatcher.doServiceHandshake(channel, SERVICE_NAME, + badAuthInfo); + fail("expected exception"); + } catch (ServiceConnectFailedException e) { + /* expected exception */ + } + } + + /** + * Test a successful subscription authentication + */ + @Test + public void testSubscriptionAuthSucc() throws Exception { + /* use a good token */ + testSubscriptionAuth(SubscriptionAuthTestHelper.TokenType.GOOD); + } + + /** + * Test a failed subscription authentication + */ + @Test + public void testSubscriptionAuthFail() throws Exception { + /* use a bad token */ + testSubscriptionAuth(SubscriptionAuthTestHelper.TokenType.BAD); + } + + /** + * Test a subscription authentication with empty token + */ + @Test + public void testSubscriptionAuthEmptyToken() throws Exception { + testSubscriptionAuth(SubscriptionAuthTestHelper.TokenType.EMPTY); + } + + /** + * Test a subscription authentication without any token + */ + @Test + public void testSubscriptionAuthWithoutToken() throws Exception { + testSubscriptionAuth(SubscriptionAuthTestHelper.TokenType.NONE); + } + + private void testSubscriptionAuth(SubscriptionAuthTestHelper.TokenType type) + throws Exception { + + /* create auth method with specified type of token */ + final AuthenticationMethod[] authInfo = new AuthenticationMethod[]{ + new SubscriptionAuthTestHelper(type)}; + final DataChannelFactory dcFactory = + DataChannelFactoryBuilder.construct( + ReplicationNetworkConfig.createDefault()); + + initDispatcher(dcFactory, authInfo); + + final DataChannel channel = dcFactory.connect(dispAddr, + new ConnectOptions()); + + if (type == SubscriptionAuthTestHelper.TokenType.GOOD) { + /* expect successful authentication */ + ServiceDispatcher + .doServiceHandshake(channel, SERVICE_NAME, authInfo); + return; + } + + if (type == SubscriptionAuthTestHelper.TokenType.BAD) { + /* expect failed authentication */ + try { + ServiceDispatcher + .doServiceHandshake(channel, SERVICE_NAME, authInfo); + fail("Authentication should fail"); + } catch (ServiceConnectFailedException scfe) { + assertEquals("Unexpected response", + ServiceDispatcher.Response.INVALID, + scfe.getResponse()); + } + return; + } + + if (type == SubscriptionAuthTestHelper.TokenType.EMPTY || + type == SubscriptionAuthTestHelper.TokenType.NONE) { + + /* expect failed authentication */ + try { + ServiceDispatcher + .doServiceHandshake(channel, SERVICE_NAME, authInfo); + fail("Authentication should fail"); + } catch (IOException ioe) { + /* expected ioe due to empty or null token */ + } + } + } + + private void initDispatcher(DataChannelFactory channelFactory, + AuthenticationMethod[] authInfo) + throws Exception { + + FreePortLocator locator = new FreePortLocator("localhost", 5000, 6000); + int freePort = locator.next(); + + dispAddr = new InetSocketAddress("localhost", freePort); + dispatcher = new ServiceDispatcher(dispAddr, channelFactory); + dispatcher.addTestAuthentication(authInfo); + final BlockingQueue serviceQueue = new + LinkedBlockingQueue<>(); + dispatcher.register(SERVICE_NAME, serviceQueue); + dispatcher.start(); + } + + private class TestPasswordSource implements PasswordSource { + private final String password; + + private TestPasswordSource(String password) { + this.password = password; + } + + @Override + public char[] getPassword() { + return password.toCharArray(); + } + } +} diff --git a/test/com/sleepycat/je/rep/utilint/LocalAliasNameService.java b/test/com/sleepycat/je/rep/utilint/LocalAliasNameService.java new file mode 100644 index 0000000..f3acc7a --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/LocalAliasNameService.java @@ -0,0 +1,238 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.lang.reflect.Field; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import java.util.logging.Logger; +import sun.net.spi.nameservice.NameService; +import sun.net.spi.nameservice.NameServiceDescriptor; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Define a JDK name service provider that can be controlled by tests to + * simulate DNS failures. The idea is to define dummy DNS names that translate + * to the loopback address, and then undefine them as needed. + * + *

        To use this class, you need to make a few modifications to the JVM that + * wants to use it:

          + * + *
        • Tell the JDK how to find this service provider by creating a resource, + * available in the class path, named + * META-INF/services/sun.net.spi.nameservice.NameService, which + * contains the fully qualified name of the {@link Descriptor} class. You can + * do that by create a file with that pathname relative to a component of the + * classpath. + * + *
        • Add the new name service provider as a second provider after the + * standard provider by setting the following system properties: + *
          + * -Dsun.net.spi.nameservice.provider.1=dns,default
          + * -Dsun.net.spi.nameservice.provider.2=dns,localalias
          + * 
          + * Note that these properties need to be set on the command line so they are + * available at JVM startup time. + * + *
        • Disable the DNS cache so that changes made to this provider will take + * effect. Do this by calling {@link #setDNSCachePolicy} with 0 for both cache + * policies, and reverting to the original values afterwards. + * + *
        + * + * Although the name service provider facility is undocumented, at last check + * it appears to be supported by the J9 and icedtea JVM implementations. + */ +public class LocalAliasNameService implements NameService { + + static final Logger logger = + LoggerUtils.getLoggerFixedPrefix(LocalAliasNameService.class, "Test"); + + /* Only referenced by getLocalHost */ + private static InetAddress localHost = null; + private static boolean computingLocalHost = false; + + /** + * The service descriptor that defines {@code LocalAliasNameService} as a + * "dns" provider named "localalias". + */ + public static class Descriptor implements NameServiceDescriptor { + + @Override + public NameService createNameService() { + return new LocalAliasNameService(); + } + @Override + public String getProviderName() { + return "localalias"; + } + @Override + public String getType() { + return "dns"; + } + } + + private static final Set aliases = + Collections.synchronizedSet(new HashSet()); + + private LocalAliasNameService() { + logger.info("Created LocalAliasNameService"); + } + + /** + * Add a new alias for the loopback address. + * + * @param alias the new alias + */ + public static void addAlias(String alias) { + logger.info("LocalAliasNameService.addAlias: " + alias); + aliases.add(alias); + } + + /** + * Remove an alias for the loopback address. + * + * @param alias the alias to remove + */ + public static void removeAlias(String alias) { + logger.info("LocalAliasNameService.removeAlias: " + alias); + aliases.remove(alias); + } + + /** + * Remove all aliases for the loopback address. + */ + public static void clearAllAliases() { + logger.info("LocalAliasNameService.clearAllAliases"); + aliases.clear(); + } + + /** + * Use reflection to set the internal DNS cache policy. The {@link + * InetAddress} class documents security properties for controlling this + * (networkaddress.cache.ttl and networkaddress.cache.negative.ttl), but + * those settings only take effect when they are specified before any + * address lookups are performed. The JUnit test infrastructure does + * address lookups before testing starts, so the security properties are + * not effective. Instead, use reflection to set the field values + * directly. + * + *

        The cache policy values specify the time in milliseconds that the + * cache should remain valid, with 0 meaning don't cache and -1 meaning + * cache stays valid forever. + * + * @param positive the cache policy for successful lookups + * @param negative the cache policy for unsuccessful lookups + * @return an array of the previous cache policies, with the positive cache + * value appearing first + */ + public static int[] setDNSCachePolicy(int positive, int negative) { + try { + Class cachePolicyClass + = Class.forName("sun.net.InetAddressCachePolicy"); + Field positiveField = + cachePolicyClass.getDeclaredField("cachePolicy"); + positiveField.setAccessible(true); + Field negativeField = + cachePolicyClass.getDeclaredField("negativeCachePolicy"); + negativeField.setAccessible(true); + int[] result = new int[2]; + synchronized (cachePolicyClass) { + result[0] = positiveField.getInt(null); + result[1] = negativeField.getInt(null); + positiveField.setInt(null, positive); + negativeField.setInt(null, negative); + } + return result; + } catch (Exception e) { + throw new RuntimeException( + "Unexpected exception when setting DNS cache: " + e, e); + } + } + + /** + * This implementation returns the loopback address for hosts that match a + * current alias. + */ + @Override + public InetAddress[] lookupAllHostAddr(String host) + throws UnknownHostException { + + if (!aliases.contains(host)) { + logger.info("LocalAliasNameService.lookupAllHostAddr:" + + " Unknown host: " + host); + throw new UnknownHostException("Unknown host: " + host); + } + final InetAddress lh = getLocalHost(); + logger.info("LocalAliasNameService.lookupAllHostAddr:" + host + + " => " + lh); + return new InetAddress[] { lh }; + } + + /** + * Compute the local host if needed, avoiding circularities. The main name + * service provider should provide the local host when called from + * InetAddress.getLocalHost, so it should be OK to throw + * UnknownHostException if this method is called recursively. + */ + private static synchronized InetAddress getLocalHost() + throws UnknownHostException { + + if (localHost == null) { + if (computingLocalHost) { + throw new UnknownHostException("Local host"); + } + computingLocalHost = true; + try { + localHost = InetAddress.getLocalHost(); + } finally { + computingLocalHost = false; + } + } + return localHost; + } + + /** + * This implementation returns one of the current aliases if the argument + * matches the loopback address and there is at least one alias. + */ + @Override + public String getHostByAddr(byte[] addr) + throws UnknownHostException { + + final InetAddress inetAddr = InetAddress.getByAddress(addr); + if (!getLocalHost().equals(inetAddr.getHostAddress())) { + logger.info("LocalAliasNameService.getHostByAddr:" + + " No mapping for address"); + throw new UnknownHostException("No mapping for address"); + } + synchronized (aliases) { + final Iterator iter = aliases.iterator(); + if (iter.hasNext()) { + String hostname = iter.next(); + logger.info("LocalAliasNameService.getHostByAddr: " + + hostname); + return hostname; + } + } + logger.info("LocalAliasNameService.getHostByAddr:" + + " No mapping for address"); + throw new UnknownHostException("No mapping for address"); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/RepTestUtils.java b/test/com/sleepycat/je/rep/utilint/RepTestUtils.java new file mode 100644 index 0000000..d734e70 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/RepTestUtils.java @@ -0,0 +1,2155 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.NoConsistencyRequiredPolicy.NO_CONSISTENCY; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.ReplicaConsistencyPolicy; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.cleaner.VerifyUtils; +import com.sleepycat.je.config.ConfigParam; +import com.sleepycat.je.dbi.DbType; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.CommitPointConsistencyPolicy; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.QuorumPolicy; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.RollbackException; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.elections.Acceptor; +import com.sleepycat.je.rep.elections.Learner; +import com.sleepycat.je.rep.impl.PointConsistencyPolicy; +import com.sleepycat.je.rep.impl.RepGroupDB; +import com.sleepycat.je.rep.impl.RepGroupDB.GroupBinding; +import com.sleepycat.je.rep.impl.RepGroupDB.NodeBinding; +import com.sleepycat.je.rep.impl.RepGroupImpl; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.impl.node.FeederManager; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.stream.FeederReader; +import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.rep.stream.ReplicaFeederSyncup.TestHook; +import com.sleepycat.je.rep.utilint.RepUtils.ConsistencyPolicyFormat; +import com.sleepycat.je.rep.vlsn.VLSNIndex; +import com.sleepycat.je.rep.vlsn.VLSNRange; +import com.sleepycat.je.util.DbBackup; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; + +import org.junit.Assert; + +/** + * Static utility methods and instances for replication unit tests. + * + * Examples of useful constructs here are methods that: + *

          + *
        • Create multiple environment directories suitable for unit testing + * a set of replicated nodes. + *
        • Create a router config that is initialized with exception and event + * listeners that will dump asynchronous exceptions to stderr, and which + * can be conditionalized to ignore exceptions at certain points when the + * test expects a disconnected node or other error condition. + *
        • Methods that compare two environments to see if they have a common + * replication stream. + *
        • etc ... + *
        + */ +public class RepTestUtils { + + public static final String TEST_HOST = "localhost"; + private static final String REPDIR = "rep"; + public static final String TEST_REP_GROUP_NAME = "UnitTestGroup"; + private static final String[] BUP_SUFFIXES = { FileManager.BUP_SUFFIX }; + + /* + * If -DoverridePort= is set, then replication groups will be + * set up with this default port value. + */ + public static final String OVERRIDE_PORT = "overridePort"; + + /* + * If -DlongTimeout is true, then this test will run with very long + * timeouts, to make interactive debugging easier. + */ + private static final boolean longTimeout = + Boolean.getBoolean("longTimeout"); + + public static final int MINUTE_MS = 60 * 1000; + + /* Time to wait for each node to start up and join the group. */ + private static final long JOIN_WAIT_TIME = 20000; + + /* The basis for varying log file size */ + private static int envCount = 1; + + /* Convenient constants */ + + public final static Durability SYNC_SYNC_ALL_DURABILITY = + new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, + Durability.ReplicaAckPolicy.ALL); + + public final static Durability SYNC_SYNC_NONE_DURABILITY = + new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, + Durability.ReplicaAckPolicy.NONE); + + public final static Durability WNSYNC_NONE_DURABILITY = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.NONE); + + public static final Durability DEFAULT_DURABILITY = + new Durability(Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.SyncPolicy.WRITE_NO_SYNC, + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + public final static TransactionConfig SYNC_SYNC_ALL_TC = + new TransactionConfig().setDurability(SYNC_SYNC_ALL_DURABILITY); + + public final static TransactionConfig SYNC_SYNC_NONE_TC = + new TransactionConfig().setDurability(SYNC_SYNC_NONE_DURABILITY); + + public final static TransactionConfig WNSYNC_NONE_TC = + new TransactionConfig().setDurability(WNSYNC_NONE_DURABILITY); + + public final static TransactionConfig DEFAULT_TC = + new TransactionConfig().setDurability(DEFAULT_DURABILITY); + + public static final TransactionConfig NO_CONSISTENCY_TC = + new TransactionConfig() + .setConsistencyPolicy(NO_CONSISTENCY) + .setDurability(SYNC_SYNC_NONE_DURABILITY); + + public static File[] getRepEnvDirs(File envRoot, int nNodes) { + File envDirs[] = new File[nNodes]; + for (int i = 0; i < nNodes; i++) { + envDirs[i] = new File(envRoot, RepTestUtils.REPDIR + i); + } + return envDirs; + } + + /** + * Create nNode directories within the envRoot directory nodes, for housing + * a set of replicated environments. Each directory will be named + * /rep#, i.e /rep1, /rep2, etc. + */ + public static File[] makeRepEnvDirs(File envRoot, int nNodes) + throws IOException { + + File[] envHomes = new File[nNodes]; + for (int i = 0; i < nNodes; i++) { + envHomes[i] = makeRepEnvDir(envRoot, i); + } + return envHomes; + } + + /** + * Create a directory within the envRoot directory nodes for housing a + * single replicated environment. The directory will be named + * /rep + */ + public static File makeRepEnvDir(File envRoot, int i) + throws IOException { + + File jeProperties = new File(envRoot, "je.properties"); + File envHome = new File(envRoot, REPDIR + i); + envHome.mkdir(); + + /* Copy the test je.properties into the new directories. */ + File repProperties = new File(envHome, "je.properties"); + FileInputStream from = null; + FileOutputStream to = null; + try { + try { + from = new FileInputStream(jeProperties); + } catch (FileNotFoundException e) { + jeProperties.createNewFile(); + + from = new FileInputStream(jeProperties); + } + to = new FileOutputStream(repProperties); + byte[] buffer = new byte[4096]; + int bytesRead; + + while ((bytesRead = from.read(buffer)) != -1) { + to.write(buffer, 0, bytesRead); + } + } finally { + if (from != null) { + try { + from.close(); + } catch (IOException ignore) { + } + } + if (to != null) { + try { + to.close(); + } catch (IOException ignore) { + } + } + } + + return envHome; + } + + /* Create the sub directories for replicated Environments. */ + public static void createRepSubDirs(RepEnvInfo[] repEnvInfo, + int subDirNumber) { + for (RepEnvInfo envInfo : repEnvInfo) { + if (envInfo != null) { + TestUtils.createEnvHomeWithSubDir + (envInfo.getEnvHome(), subDirNumber); + } + } + } + + /* Remove the sub directories inside the replicated Environment home. */ + public static void removeRepSubDirs(RepEnvInfo[] repEnvInfo) { + for (RepEnvInfo envInfo : repEnvInfo) { + if (envInfo != null) { + TestUtils.removeSubDirs(envInfo.getEnvHome()); + } + } + } + + /** Convenience method to {@link #removeRepEnv} multiple nodes. */ + public static void removeRepDirs(RepEnvInfo... repEnvInfo) { + for (RepEnvInfo envInfo : repEnvInfo) { + if (envInfo != null) { + removeRepEnv(envInfo.getEnvHome()); + } + } + } + + /** + * Remove all the log files in the /rep* directories directory. + */ + public static void removeRepEnvironments(File envRoot) { + File[] repEnvs = envRoot.listFiles(); + for (File repEnv : repEnvs) { + if (repEnv.isDirectory()) { + removeRepEnv(repEnv); + } + } + removeRepEnv(envRoot); + } + + /** Removes log/lck/bkp files from a single env home directory. */ + public static void removeRepEnv(File envHome) { + TestUtils.removeLogFiles("removeRepEnv", envHome, false); + new File(envHome, "je.lck").delete(); + removeBackupFiles(envHome); + } + + private static void removeBackupFiles(File repEnv) { + for (String fileName : + FileManager.listFiles(repEnv, BUP_SUFFIXES, false)) { + new File(repEnv, fileName).delete(); + } + } + + /** + * Create an array of environments, with basically the same environment + * configuration. + */ + + public static RepEnvInfo[] setupEnvInfos(File envRoot, int nNodes) + throws IOException { + + return setupEnvInfos(envRoot, nNodes, DEFAULT_DURABILITY); + } + + /** + * Fill in an array of environments, with basically the same environment + * configuration. Only fill in the array slots which are null. Used to + * initialize semi-populated set of environments. + * @throws IOException + */ + public static RepEnvInfo[] setupEnvInfos(File envRoot, + int nNodes, + Durability envDurability) + throws IOException { + + File[] envHomes = makeRepEnvDirs(envRoot, nNodes); + RepEnvInfo[] repEnvInfo = new RepEnvInfo[envHomes.length]; + + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvInfo[i] = setupEnvInfo(envHomes[i], + envDurability, + (short) (i + 1), // nodeId + repEnvInfo[0]); + } + return repEnvInfo; + } + + public static RepEnvInfo[] setupEnvInfos(File envRoot, + int nNodes, + EnvironmentConfig envConfig) + throws IOException { + + return setupEnvInfos(envRoot, nNodes, envConfig, null); + } + + public static RepEnvInfo[] setupEnvInfos(File envRoot, + int nNodes, + EnvironmentConfig envConfig, + ReplicationConfig repConfig) + throws IOException { + + File[] envdirs = makeRepEnvDirs(envRoot, nNodes); + RepEnvInfo[] repEnvInfo = new RepEnvInfo[envdirs.length]; + + for (int i = 0; i < repEnvInfo.length; i++) { + repEnvInfo[i] = setupEnvInfo(envdirs[i], + envConfig.clone(), + createRepConfig(repConfig, i + 1), + repEnvInfo[0]); + } + return repEnvInfo; + } + + /** + * Adds an additional replicated environment to the specified list and + * returns the extended list. Uses the ReplicationConfig from the first + * initial node as the basis for the new node. + */ + public static RepEnvInfo[] setupExtendEnvInfo( + final RepEnvInfo[] initialEnvInfo, + final int nNodes) + throws IOException { + + final int initialNodesCount = initialEnvInfo.length; + final RepEnvInfo[] result = + Arrays.copyOf(initialEnvInfo, initialNodesCount + nNodes); + final File envRoot = initialEnvInfo[0].getEnvHome().getParentFile(); + final ReplicationConfig baseRepConfig = + initialEnvInfo[0].getRepConfig(); + for (int i = 0; i < nNodes; i++) { + final int pos = initialNodesCount + i; + result[pos] = setupEnvInfo(makeRepEnvDir(envRoot, pos), + createEnvConfig(DEFAULT_DURABILITY), + createRepConfig(baseRepConfig, pos + 1), + initialEnvInfo[0]); + } + return result; + } + + /** + * Create info for a single replicated environment. + */ + public static RepEnvInfo setupEnvInfo(File envHome, + Durability envDurability, + int nodeId, + RepEnvInfo helper) { + + EnvironmentConfig envConfig = createEnvConfig(envDurability); + return setupEnvInfo(envHome, envConfig, nodeId, helper); + } + + /** + * Create info for a single replicated environment. + */ + public static RepEnvInfo setupEnvInfo(File envHome, + EnvironmentConfig envConfig, + int nodeId, + RepEnvInfo helper) { + return setupEnvInfo(envHome, + envConfig, + createRepConfig(nodeId), + helper); + } + + /** + * Create info for a single replicated environment. + */ + public static RepEnvInfo setupEnvInfo(File envHome, + EnvironmentConfig envConfig, + ReplicationConfig repConfig, + RepEnvInfo helper) { + + /* + * Give all the environments the same environment configuration. + * + * If the file size is not set by the calling test, stagger their log + * file length to give them slightly different logs and VLSNs. Disable + * parameter validation because we want to make the log file length + * smaller than the minimums, for testing. + */ + if (!envConfig.isConfigParamSet(EnvironmentConfig.LOG_FILE_MAX)) { + DbInternal.disableParameterValidation(envConfig); + /* Vary the file size */ + long fileLen = ((envCount++ % 100) + 1) * 10000; + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, + Long.toString(fileLen)); + } + + repConfig.setHelperHosts((helper == null) ? + repConfig.getNodeHostPort() : + helper.getRepConfig().getNodeHostPort()); + + /* + * If -DlongTimeout is true, then this test will run with very long + * timeouts, to make interactive debugging easier. + */ + if (longTimeout) { + setLongTimeouts(repConfig); + } + + /* + * If -DlongAckTimeout is true, then the test will set the + * REPLICA_TIMEOUT to 50secs. + */ + if (Boolean.getBoolean("longAckTimeout")) { + repConfig.setReplicaAckTimeout(50, TimeUnit.SECONDS); + } + return new RepEnvInfo(envHome, repConfig, envConfig); + } + + public static EnvironmentConfig createEnvConfig(Durability envDurability) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(envDurability); + + /* + * Replicated tests use multiple environments, configure shared cache + * to reduce the memory consumption. + */ + envConfig.setSharedCache(true); + + return envConfig; + } + + /** + * Create a test RepConfig for the node with the specified id. Note that + * the helper is not configured. + */ + public static ReplicationConfig createRepConfig(int nodeId) + throws NumberFormatException, IllegalArgumentException { + + return createRepConfig(null, nodeId); + } + + private static int getDefaultPort() { + return Integer.getInteger + (OVERRIDE_PORT, + Integer.parseInt(RepParams.DEFAULT_PORT.getDefault())); + } + + /** + * Create a test RepConfig for the node with the specified id, using the + * specified repConfig. The repConfig may have other parameters set + * already. Note that the helper is not configured. + */ + public static + ReplicationConfig createRepConfig(ReplicationConfig repConfig, + int nodeId) + throws NumberFormatException, IllegalArgumentException { + + ReplicationConfig filledInConfig = + repConfig == null ? new ReplicationConfig() : repConfig.clone(); + + final int firstPort = getDefaultPort(); + filledInConfig.setConfigParam + (RepParams.ENV_SETUP_TIMEOUT.getName(), "60 s"); + filledInConfig.setConfigParam + (ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, "60 s"); + filledInConfig.setGroupName(TEST_REP_GROUP_NAME); + filledInConfig.setNodeName("Node" + nodeId); + String nodeHost = TEST_HOST + ":" + (firstPort + (nodeId - 1)); + filledInConfig.setNodeHostPort(nodeHost); + + /* Minimize socket bind exceptions in tests. */ + filledInConfig.setConfigParam(RepParams.SO_REUSEADDR.getName(), + "true"); + filledInConfig.setConfigParam(RepParams.SO_BIND_WAIT_MS.getName(), + "150000"); + return filledInConfig; + } + + public static ReplicationNetworkConfig readRepNetConfig() { + /* Call to force class loading and parameter registration */ + ReplicationNetworkConfig.registerParams(); + return ReplicationNetworkConfig.create(readNetProps()); + } + + public static Properties readNetProps() { + final File propFile = + new File(SharedTestUtils.getTestDir(), "je.properties"); + final Properties props = new Properties(); + RepUtils.populateNetProps(props, propFile); + return props; + } + + /** + * Set timeouts to long intervals for debugging interactively + */ + public static void setLongTimeouts(ReplicationConfig repConfig) { + + RepInternal.disableParameterValidation(repConfig); + + /* Wait an hour for this node to join the group.*/ + repConfig.setConfigParam(RepParams.ENV_SETUP_TIMEOUT.getName(), + "1 h"); + repConfig.setConfigParam(ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, + "1 h"); + + /* Wait an hour for replica acks. */ + repConfig.setConfigParam(ReplicationConfig.REPLICA_ACK_TIMEOUT, + "1 h"); + + /* Have a heartbeat every five minutes. */ + repConfig.setConfigParam(RepParams.HEARTBEAT_INTERVAL.getName(), + "5 min"); + } + + /** + * Shuts down the environments with a checkpoint at the end. + * + * @param repEnvInfo the environments to be shutdown + */ + public static void shutdownRepEnvs(RepEnvInfo... repEnvInfo) { + + shutdownRepEnvs(repEnvInfo, true); + } + + /** + * Shut down the environment, with an optional checkpoint. It sequences the + * shutdown so that all replicas are shutdown before the master. This + * sequencing avoids side-effects in the tests where shutting down the + * master first results in elections and one of the "to be shutdown" + * replicas becomes a master and so on. + *

        + * If an exception occurs for any reason while closing one env, other envs + * may be left open. TODO: Determine if this behavior is really desired. + * + * @param repEnvInfo the environments to be shutdown + * + * @param doCheckpoint whether do a checkpoint at the end of close + */ + public static void shutdownRepEnvs(RepEnvInfo[] repEnvInfo, + boolean doCheckpoint) { + + if (repEnvInfo == null) { + return; + } + + RepEnvInfo master = null; + for (RepEnvInfo ri : repEnvInfo) { + if ((ri.repEnv == null) || !ri.repEnv.isValid()) { + ri.repEnv = null; + continue; + } + if (ri.repEnv.getState().isMaster()) { + if (master != null) { + throw new IllegalStateException + ("Multiple masters: " + master.getEnv().getNodeName() + + " and " + ri.repEnv.getNodeName() + + " are both masters."); + } + master = ri; + } else { + try { + if (doCheckpoint) { + RepImpl repImpl = + RepInternal.getNonNullRepImpl(ri.repEnv); + ri.repEnv.close(); + if (!repImpl.isClosed()) { + throw new IllegalStateException + ("Environment: " + ri.getEnvHome() + + " not released"); + } + } else { + RepInternal.getNonNullRepImpl(ri.repEnv).close(false); + } + } finally { + ri.repEnv = null; + } + } + } + + if (master != null) { + try { + if (doCheckpoint) { + master.getEnv().close(); + } else { + RepInternal.getNonNullRepImpl(master.getEnv()). + close(false); + } + } finally { + master.repEnv = null; + } + } + } + + /** + * All the non-closed, non-null environments in the array join the group. + * @return the replicator who is the master. + */ + public static ReplicatedEnvironment + openRepEnvsJoin(RepEnvInfo[] repEnvInfo) { + + return joinGroup(getOpenRepEnvs(repEnvInfo)); + } + + /* Get open replicated environments from an array. */ + public static RepEnvInfo[] getOpenRepEnvs(RepEnvInfo[] repEnvInfo) { + Set repSet = new HashSet(); + for (RepEnvInfo ri : repEnvInfo) { + if ((ri != null) && + (ri.getEnv() != null) && + ri.getEnv().isValid()) { + repSet.add(ri); + } + } + + return repSet.toArray(new RepEnvInfo[repSet.size()]); + } + + /** + * Environment handles are created using the config information in + * repEnvInfo. Note that since this method causes handles to be created + * serially, it cannot be used to restart an existing group from scratch. + * It can only be used to start a new group, or have nodes join a group + * that is already active. + * + * @return the replicated environment associated with the master. + */ + public static + ReplicatedEnvironment joinGroup(RepEnvInfo ... repEnvInfo) { + + int retries = 10; + final int retryWaitMillis = 5000; + ReplicatedEnvironment master = null; + List joinNotFinished = + new LinkedList(Arrays.asList(repEnvInfo)); + + while (joinNotFinished.size() != 0) { + for (RepEnvInfo ri : joinNotFinished) { + try { + ReplicatedEnvironment.State joinState; + if (ri.getEnv() != null) { + + /* + * Handle exists, make sure it's not in UNKNOWN state. + */ + RepImpl rimpl = + RepInternal.getNonNullRepImpl(ri.getEnv()); + joinState = rimpl.getState(); + Assert.assertFalse( + "Node " + ri.getEnv().getNodeName() + + " was detached", + joinState.equals(State.DETACHED)); + } else { + joinState = ri.openEnv().getState(); + } + + if (joinState.equals(State.MASTER)) { + if (master != null) { + if (--retries > 0) { + Thread.sleep(retryWaitMillis); + + /* + * Start over. The master is slow making its + * transition, one of them has not realized + * that they are no longer the master. + */ + joinNotFinished = new LinkedList + (Arrays.asList(repEnvInfo)); + master = null; + break; + } + throw new RuntimeException + ("Dual masters: " + ri.getEnv().getNodeName() + + " and " + + master.getNodeName() + " despite retries"); + } + master = ri.getEnv(); + } + joinNotFinished.remove(ri); + if ((joinNotFinished.size() == 0) && (master == null)) { + if (--retries == 0) { + throw new RuntimeException + ("No master established despite retries"); + } + Thread.sleep(retryWaitMillis); + /* Start over, an election is still in progress. */ + joinNotFinished = new LinkedList + (Arrays.asList(repEnvInfo)); + } + break; + } catch (UnknownMasterException retry) { + /* Just retry. */ + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + return master; + } + + /** + * Used to ensure that the entire group is in sync, that is, all replicas + * are consistent with the master's last commit. Note that it requires all + * the nodes in the replication group to be available. + * + * @param repEnvInfo the array holding the environments + * @param numSyncNodes the expected number of nodes to be synced; includes + * the master + * @throws InterruptedException + */ + public static VLSN syncGroupToLastCommit(RepEnvInfo[] repEnvInfo, + int numSyncNodes) + throws InterruptedException { + + CommitToken masterCommitToken = null; + + /* + * Create a transaction just to make sure all the replicas are awake + * and connected. + */ + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (rep.getState().isMaster()) { + try { + Transaction txn = + rep. + beginTransaction(null, RepTestUtils.SYNC_SYNC_ALL_TC); + txn.commit(); + } catch (InsufficientReplicasException e) { + if (e.getAvailableReplicas().size() != (numSyncNodes-1)) { + throw new IllegalStateException + ("Expected replicas: " + (numSyncNodes - 1) + + "available replicas: " + + e.getAvailableReplicas()); + } + } + + /* + * Handshakes with all replicas are now completed, if they were + * not before. Now get a token to represent the last committed + * point in the replication stream, from the master's + * perspective. + */ + RepNode repNode = + RepInternal.getNonNullRepImpl(rep).getRepNode(); + masterCommitToken = new CommitToken + (repNode.getUUID(), + repNode.getCurrentTxnEndVLSN().getSequence()); + break; + } + } + + if (masterCommitToken == null) { + throw new IllegalStateException("No current master"); + } + + CommitPointConsistencyPolicy policy = + new CommitPointConsistencyPolicy(masterCommitToken, MINUTE_MS, + TimeUnit.MILLISECONDS); + + /* + * Check that the environments are caught up with the last master + * commit at the time of the call to this method. + */ + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if ((rep == null) || + !rep.isValid() || + rep.getState().isMaster() || + rep.getState().isDetached()) { + continue; + } + policy.ensureConsistency(RepInternal.getNonNullRepImpl(rep)); + } + return new VLSN(masterCommitToken.getVLSN()); + } + + /** + * Used to ensure that the group is in sync with respect to a given + * VLSN. If numSyncNodes == repEnvInfo.length, all the nodes in the + * replication group must be alive and available. If numSyncNodes is less + * than the size of the group, a quorum will need to be alive and + * available. + * + * @param repEnvInfo the array holding the environments + * @param numSyncNodes the expected number of nodes to be synced; includes + * the master + * @throws InterruptedException + */ + public static void syncGroupToVLSN(RepEnvInfo[] repEnvInfo, + int numSyncNodes, + VLSN targetVLSN) + throws InterruptedException { + + /* + * Create a transaction just to make sure all the replicas are awake + * and connected. + */ + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (rep == null) { + continue; + } + + if (rep.getState().isMaster()) { + TransactionConfig txnConfig = null; + if (numSyncNodes == repEnvInfo.length) { + txnConfig = RepTestUtils.SYNC_SYNC_ALL_TC; + } else { + txnConfig = new TransactionConfig(); + txnConfig.setDurability + (new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY)); + } + + try { + Transaction txn = rep.beginTransaction(null, txnConfig); + txn.commit(); + } catch (InsufficientReplicasException e) { + if (e.getAvailableReplicas().size() != + (numSyncNodes - 1)) { + throw new IllegalStateException + ("Expected replicas: " + (numSyncNodes - 1) + + ", available replicas: " + + e.getAvailableReplicas()); + } + } + } + } + + syncGroup(repEnvInfo, targetVLSN); + } + + /* Syncs the group to the specific VLSN. */ + private static void syncGroup(RepEnvInfo[] repEnvInfo, VLSN targetVLSN) + throws InterruptedException { + PointConsistencyPolicy policy = new PointConsistencyPolicy(targetVLSN); + + /* Check that the environments are caught up with this VLSN. */ + for (RepEnvInfo repi : repEnvInfo) { + ReplicatedEnvironment rep = repi.getEnv(); + if (rep == null || + !rep.isValid() || + rep.getState().isMaster()) { + continue; + } + policy.ensureConsistency(RepInternal.getNonNullRepImpl(rep)); + } + } + + /** + * Synchronizes the group to the current vlsn on the master. Used to ensure + * that application level changes, even mid-transaction changes have been + * replicated to all the nodes before the method returns. + * + * Note that since CBVLSN updates are asynchronous the vlsn may continue + * moving forward, but the application level changes will have been + * propagated. + */ + public static VLSN syncGroup(RepEnvInfo[] repEnvInfo) { + RepEnvInfo master = RepTestBase.findMaster(repEnvInfo); + if (master == null) { + throw new IllegalStateException("no master"); + } + VLSN vlsn = master.getRepImpl().getVLSNIndex().getRange().getLast(); + try { + syncGroup(repEnvInfo, vlsn); + } catch (Exception e) { + throw new IllegalStateException("unexpected exception"); + } + return vlsn; + } + + public static void checkUtilizationProfile(RepEnvInfo ... repEnvInfo) { + checkUtilizationProfile(null, repEnvInfo); + } + + /** + * Run utilization profile checking on all databases in the set of + * RepEnvInfo. The environment must be quiescent. The utility will lock + * out any cleaning by using DbBackup, during the check. + */ + public static void checkUtilizationProfile(PrintStream out, + RepEnvInfo ... repEnvInfo) { + for (RepEnvInfo info : repEnvInfo) { + if (out != null) { + out.println("checking " + info.getEnvHome()); + } + + Environment env = info.getEnv(); + + /* Use DbBackup to prevent log file deletion. */ + DbBackup backup = new DbBackup(env); + backup.startBackup(); + + try { + List dbNames = env.getDatabaseNames(); + + for (String dbName : dbNames) { + if (out != null) { + out.println("\tchecking " + dbName); + } + DatabaseConfig dbConfig = new DatabaseConfig(); + DbInternal.setUseExistingConfig(dbConfig, true); + dbConfig.setTransactional(true); + Database db = env.openDatabase(null, dbName, dbConfig); + + try { + VerifyUtils.checkLsns(db); + } finally { + db.close(); + } + } + } finally { + backup.endBackup(); + } + } + } + + /** + * Confirm that all the nodes in this group match. Check number of + * databases, names of databases, per-database count, per-database + * records. Use the master node as the reference if it exists, else use the + * first replicator. + * + * @param limit The replication stream portion of the equality check is + * bounded at the upper end by this value. Limit is usually the commit sync + * or vlsn sync point explicitly called by a test before calling + * checkNodeEquality. Each node must contain VLSNs up to and including the + * limit, and may also include additional VSLNs due to heartbeats, etc. + * + * @throws InterruptedException + * + * @throws RuntimeException if there is an incompatibility + */ + public static void checkNodeEquality(VLSN limit, + boolean verbose, + RepEnvInfo ... repEnvInfo) + throws InterruptedException { + + int referenceIndex = -1; + assert repEnvInfo.length > 0; + for (int i = 0; i < repEnvInfo.length; i++) { + if ((repEnvInfo[i] == null) || + (repEnvInfo[i].getEnv() == null)) { + continue; + } + ReplicatedEnvironment repEnv = repEnvInfo[i].getEnv(); + if (repEnv.isValid() && repEnv.getState().isMaster()) { + referenceIndex = i; + break; + } + } + assert referenceIndex != -1; + + ReplicatedEnvironment reference = repEnvInfo[referenceIndex].getEnv(); + for (int i = 0; i < repEnvInfo.length; i++) { + if (i != referenceIndex) { + if ((repEnvInfo[i] == null) || + (repEnvInfo[i].getEnv() == null)) { + continue; + } + + ReplicatedEnvironment repEnv = repEnvInfo[i].getEnv(); + if (verbose) { + System.out.println("Comparing master node " + + reference.getNodeName() + + " to node " + + repEnv.getNodeName()); + } + + if (repEnv.isValid()) { + checkNodeEquality(reference, repEnv, limit, verbose); + } + } + } + } + + /* Enable or disable the log cleaning on a replica. */ + private static void enableCleanerFileDeletion(ReplicatedEnvironment repEnv, + boolean enable) + throws InterruptedException { + + if (repEnv.isValid()) { + RepImpl repImpl = RepInternal.getNonNullRepImpl(repEnv); + repImpl.getCleaner().enableFileDeletion(enable); + Thread.sleep(100); + } + } + + /** + * Confirm that the contents of these two nodes match. Check number of + * databases, names of databases, per-database count, per-database records. + * + * @throws InterruptedException + * @throws RuntimeException if there is an incompatiblity + */ + public static void checkNodeEquality(ReplicatedEnvironment replicatorA, + ReplicatedEnvironment replicatorB, + VLSN limit, + boolean verbose) + throws InterruptedException { + + enableCleanerFileDeletion(replicatorA, false); + enableCleanerFileDeletion(replicatorB, false); + + String nodeA = replicatorA.getNodeName(); + String nodeB = replicatorB.getNodeName(); + + Environment envA = replicatorA; + Environment envB = replicatorB; + + RepImpl repImplA = RepInternal.getNonNullRepImpl(replicatorA); + RepImpl repImplB = RepInternal.getNonNullRepImpl(replicatorB); + + try { + + /* Compare the replication related sequences. */ + if (verbose) { + System.out.println("Comparing sequences"); + } + + /* replicated node id sequence. */ + + /* + long nodeIdA = + envImplA.getNodeSequence().getLastReplicatedNodeId(); + long nodeIdB = + envImplB.getNodeSequence().getLastReplicatedNodeId(); + + // TEMPORARILY DISABLED: sequences not synced up. This may + // actually apply right now to database and txn ids too, + // but it's less likely to manifest itself. + if (nodeIdA != nodeIdB) { + throw new RuntimeException + ("NodeId mismatch. " + nodeA + + " lastRepNodeId=" + nodeIdA + " " + nodeB + + " lastRepNodeId=" + nodeIdB); + } + */ + + /* replicated txn id sequence. */ + + /* + long txnIdA = repImplA.getTxnManager().getLastReplicatedTxnId(); + long txnIdB = repmplB.getTxnManager().getLastReplicatedTxnId(); + if (txnIdA != txnIdB) { + throw new RuntimeException + ("TxnId mismatch. A.lastRepTxnId=" + txnIdA + + " B.lastRepTxnId=" + txnIdB); + } + */ + + /* Replicated database id sequence. */ + long dbIdA = repImplA.getDbTree().getLastReplicatedDbId(); + long dbIdB = repImplB.getDbTree().getLastReplicatedDbId(); + if (dbIdA != dbIdB) { + throw new RuntimeException + ("DbId mismatch. A.lastRepDbId=" + dbIdA + + " B.lastRepDbId=" + dbIdB); + } + + /* Check name and number of application databases first. */ + List dbListA = envA.getDatabaseNames(); + List dbListB = envB.getDatabaseNames(); + + if (verbose) { + System.out.println("envEquals: check db list: " + nodeA + + "=" + dbListA + " " + nodeB + "=" + + dbListB); + } + + if (!dbListA.equals(dbListB)) { + throw new RuntimeException("Mismatch: dbNameList " + nodeA + + " =" + dbListA + " " + + nodeB + " =" + dbListB); + } + + /* Check record count and contents of each database. */ + DatabaseConfig checkConfig = new DatabaseConfig(); + checkConfig.setReadOnly(true); + checkConfig.setTransactional(true); + DbInternal.setUseExistingConfig(checkConfig, true); + for (String dbName : dbListA) { + + Database dbA = null; + Database dbB = null; + try { + dbA = envA.openDatabase(null, dbName, checkConfig); + dbB = envB.openDatabase(null, dbName, checkConfig); + + int count = checkDbContents(dbA, dbB); + + if (verbose) { + System.out.println("compared " + count + " records"); + } + } finally { + if (dbA != null) { + dbA.close(); + } + if (dbB != null) { + dbB.close(); + } + } + } + + /* + * Check the replication stream of each environment. The subset of + * VLSN entries common to both nodes should match. + */ + checkStreamIntersection(nodeA, + nodeB, + RepInternal.getNonNullRepImpl(replicatorA), + RepInternal.getNonNullRepImpl(replicatorB), + limit, + verbose); + } catch (DatabaseException e) { + throw new RuntimeException(e); + } catch (IOException e) { + throw new RuntimeException(e); + } + + enableCleanerFileDeletion(replicatorA, true); + enableCleanerFileDeletion(replicatorB, true); + } + + /** + * @throws RuntimeException if dbA and dbB don't have the same contents. + */ + private static int checkDbContents(Database dbA, Database dbB) { + + Cursor cursorA = null; + Cursor cursorB = null; + Transaction txnA = null; + Transaction txnB = null; + int debugCount = 0; + boolean isGroupDB = + dbA.getDatabaseName().equals(DbType.REP_GROUP.getInternalName()); + + try { + txnA = dbA.getEnvironment().beginTransaction(null, null); + txnB = dbB.getEnvironment().beginTransaction(null, null); + cursorA = dbA.openCursor(txnA, CursorConfig.READ_UNCOMMITTED); + cursorB = dbB.openCursor(txnB, CursorConfig.READ_UNCOMMITTED); + DatabaseEntry keyA = new DatabaseEntry(); + DatabaseEntry keyB = new DatabaseEntry(); + DatabaseEntry dataA = new DatabaseEntry(); + DatabaseEntry dataB = new DatabaseEntry(); + NodeBinding nodeBinding = null; + + OperationResult resultA; + + while ((resultA = cursorA.get(keyA, dataA, Get.NEXT, null)) + != null) { + debugCount++; + + OperationResult resultB = cursorB.get( + keyB, dataB, Get.NEXT, null); + + if (resultB == null) { + throw new RuntimeException("Mismatch: debugCount=" + + debugCount + "bad resultB"); + } + if (!Arrays.equals(keyA.getData(), keyB.getData())) { + throw new RuntimeException("Mismatch: debugCount=" + + debugCount + " keyA=" + + keyA + " keyB=" + + keyB); + + } + if (!Arrays.equals(dataA.getData(), dataB.getData())) { + if (isGroupDB && + equalsNode(dataA.getData(), dataB.getData(), + nodeBinding)) { + continue; + } + throw new RuntimeException("Mismatch: debugCount=" + + debugCount + " dataA=" + + dataA + " dataB=" + + dataB); + } + + if (resultA.getExpirationTime() != + resultB.getExpirationTime()) { + + throw new RuntimeException( + "Mismatch: debugCount=" + debugCount + + " expireA=" + + TTL.formatExpirationTime(resultA.getExpirationTime()) + + " expireB=" + + TTL.formatExpirationTime(resultB.getExpirationTime())); + } + + if (isGroupDB && + (nodeBinding == null) && + RepGroupDB.GROUP_KEY.equals( + StringBinding.entryToString(keyA))) { + final RepGroupImpl group = + new GroupBinding().entryToObject(dataA); + nodeBinding = new NodeBinding(group.getFormatVersion()); + } + } + if (cursorB.get(keyB, dataB, Get.NEXT, null) != null) { + throw new RuntimeException("Mismatch: debugCount=" + + debugCount + " keyA is missing" + + " keyB=" + keyB + + " dataB=" + dataB); + } + return debugCount; + } catch (DatabaseException e) { + throw new RuntimeException(e); + } finally { + try { + if (cursorA != null) { + cursorA.close(); + } + if (cursorB != null) { + cursorB.close(); + } + if (txnA != null) { + txnA.commit(); + } + if (txnB != null) { + txnB.commit(); + } + } catch (DatabaseException e) { + throw new RuntimeException(e); + } + } + } + + /* + * Implements a special check for group nodes which skips the syncup field. + */ + private static boolean equalsNode(byte[] data1, byte[] data2, + NodeBinding nodeBinding) { + Assert.assertNotNull("Node binding", nodeBinding); + RepNodeImpl n1 = nodeBinding.entryToObject(new TupleInput(data1)); + RepNodeImpl n2 = nodeBinding.entryToObject(new TupleInput(data2)); + return n1.equivalent(n2); + } + + /** + * @throws InterruptedException + * @throws IOException + * @throws RuntimeException if envA and envB don't have the same set of + * VLSN mappings, VLSN-tagged log entries, and replication sequences. + */ + @SuppressWarnings("unused") + private static void checkStreamIntersection(String nodeA, + String nodeB, + RepImpl repA, + RepImpl repB, + VLSN limit, + boolean verbose) + throws IOException, InterruptedException { + + if (verbose) { + System.out.println("Check intersection for " + nodeA + + " and " + nodeB); + } + + VLSNIndex repAMap = repA.getVLSNIndex(); + VLSNRange repARange = repAMap.getRange(); + VLSNIndex repBMap = repB.getVLSNIndex(); + VLSNRange repBRange = repBMap.getRange(); + + /* + * Compare the vlsn ranges held on each environment and find the subset + * common to both replicas. + */ + VLSN firstA = repARange.getFirst(); + VLSN lastA = repARange.getLast(); + VLSN firstB = repBRange.getFirst(); + VLSN lastB = repBRange.getLast(); + VLSN lastSyncA = repARange.getLastSync(); + + if (lastA.compareTo(limit) < 0) { + throw new RuntimeException + ("CheckRepStream error: repA (" + repA.getNameIdPair() + + ") lastVLSN = " + lastA + + " < limit = " + limit); + } + + if (lastB.compareTo(limit) < 0) { + throw new RuntimeException + ("CheckRepStream error: repB (" + repB.getNameIdPair() + + ") lastVLSN = " + lastB + + " < limit = " + limit + ")"); + } + + /* + * Calculate the largest VLSN range starting point and the smallest + * VLSN range ending point for these two Replicators. + */ + VLSN firstLarger = (firstA.compareTo(firstB) > 0) ? firstA : firstB; + VLSN lastSmaller = (lastA.compareTo(lastB) < 0) ? lastA : lastB; + + try { + /* The two replicas can read from the larger of the first VLSNs. */ + FeederReader readerA = new FeederReader(repA, + repAMap, + DbLsn.NULL_LSN, + 100000); + readerA.initScan(firstLarger); + + FeederReader readerB = new FeederReader(repB, + repBMap, + DbLsn.NULL_LSN, + 100000); + readerB.initScan(firstLarger); + + /* They should both find the smaller of the last VLSNs. */ + for (long vlsnVal = firstLarger.getSequence(); + vlsnVal <= lastSmaller.getSequence(); + vlsnVal++) { + + OutputWireRecord wireRecordA = + readerA.scanForwards(new VLSN(vlsnVal), 0); + OutputWireRecord wireRecordB = + readerB.scanForwards(new VLSN(vlsnVal), 0); + + if (!(wireRecordA.match(wireRecordB))) { + throw new RuntimeException(nodeA + " at vlsn " + vlsnVal + + " has " + wireRecordA + " " + + nodeB + " has " + wireRecordB); + } + + /* Check that db id, node id, txn id are negative. */ + if (!repA.isRepConverted()) { + wireRecordA.verifyNegativeSequences(nodeA); + } + if (!repB.isRepConverted()) { + wireRecordB.verifyNegativeSequences(nodeB); + } + } + + if (verbose) { + System.out.println("Checked from vlsn " + firstLarger + + " to " + lastSmaller); + } + } catch (Exception e) { + e.printStackTrace(); + + System.err.println(nodeA + " vlsnMap="); + repAMap.dumpDb(true); + System.err.println(nodeB + " vlsnMap="); + repBMap.dumpDb(true); + + throw new RuntimeException(e); + } + } + + /** + * Return the number of nodes that constitute a quorum for this size + * group. This should be replaced by ReplicaAckPolicy.requiredNodes; + */ + public static int getQuorumSize(int groupSize) { + assert groupSize > 0 : "groupSize = " + groupSize; + if (groupSize == 1) { + return 1; + } else if (groupSize == 2) { + return 1; + } else { + return (groupSize/2) + 1; + } + } + + /** + * Create a rep group of a specified size on the local host, using the + * default port configuration. + * + * @param electableNodes number of electable nodes in test group + * @param monitorNodes number of monitor nodes in test group + * + * @return the simulated test RepGroup + * + * @throws UnknownHostException + */ + public static RepGroupImpl createTestRepGroup(int electableNodes, + int monitorNodes) + throws UnknownHostException { + + return createTestRepGroup(electableNodes, monitorNodes, 0); + } + + /** + * Create a rep group of a specified size on the local host, using the + * default port configuration. + * + * @param electableNodes number of electable nodes in test group + * @param monitorNodes number of monitor nodes in test group + * @param secondaryNodes number of secondary nodes in the test group + * + * @return the simulated test RepGroup + * + * @throws UnknownHostException + */ + public static RepGroupImpl createTestRepGroup(int electableNodes, + int monitorNodes, + int secondaryNodes) + throws UnknownHostException { + + Map allNodeInfo = + new HashMap(); + final InetAddress ia = InetAddress.getLocalHost(); + int port = getDefaultPort(); + RepGroupImpl repGroup = new RepGroupImpl("TestGroup", null); + + for (int i = 1; i <= electableNodes; i++) { + allNodeInfo.put(i, new RepNodeImpl(new NameIdPair("node" + i,i), + NodeType.ELECTABLE, + true, + false, + ia.getHostName(), + port, + repGroup.getChangeVersion(), + null)); + port++; + } + for (int i = (electableNodes + 1); + i <= (electableNodes + monitorNodes); + i++) { + allNodeInfo.put(i, new RepNodeImpl(new NameIdPair("mon" + i,i), + NodeType.MONITOR, + true, + false, + ia.getHostName(), + port, + repGroup.getChangeVersion(), + null)); + port++; + } + for (int i = electableNodes + monitorNodes + 1; + i <= electableNodes + monitorNodes + secondaryNodes; + i++) { + allNodeInfo.put(i, new RepNodeImpl(new NameIdPair("sec" + i, i), + NodeType.SECONDARY, + true, + false, + ia.getHostName(), + port, + repGroup.getChangeVersion(), + null)); + port++; + } + repGroup.setNodes(allNodeInfo); + return repGroup; + } + + public static class RepEnvInfo { + private final File envHome; + private final ReplicationConfig repConfig; + private EnvironmentConfig envConfig; + private QuorumPolicy initialElectionPolicy = + QuorumPolicy.SIMPLE_MAJORITY; + + private ReplicatedEnvironment repEnv = null; + + public RepEnvInfo(File envHome, + ReplicationConfig repConfig, + EnvironmentConfig envConfig) { + super(); + this.envHome = envHome; + this.repConfig = repConfig; + this.envConfig = envConfig; + } + + public ReplicatedEnvironment openEnv() { + if (repEnv != null) { + throw new IllegalStateException("rep env already exists"); + } + + repEnv = new ReplicatedEnvironment(envHome, + getRepConfig(), + envConfig, + null, + initialElectionPolicy); + return repEnv; + } + + public ReplicatedEnvironment openEnv(ReplicaConsistencyPolicy cp) { + + if (repEnv != null) { + throw new IllegalStateException("rep env already exists"); + } + repEnv = new ReplicatedEnvironment + (envHome, getRepConfig(), envConfig, cp, + initialElectionPolicy); + return repEnv; + } + + public ReplicatedEnvironment openEnv(RepEnvInfo helper) { + + repConfig.setHelperHosts((helper == null) ? + repConfig.getNodeHostPort() : + helper.getRepConfig().getNodeHostPort()); + return openEnv(); + } + + public ReplicatedEnvironment getEnv() { + return repEnv; + } + + public RepImpl getRepImpl() { + return RepInternal.getNonNullRepImpl(repEnv); + } + + public RepNode getRepNode() { + return getRepImpl().getRepNode(); + } + + public ReplicationConfig getRepConfig() { + return repConfig; + } + + public File getEnvHome() { + return envHome; + } + + public void setEnvConfig(final EnvironmentConfig envConfig) { + this.envConfig = envConfig; + } + + public EnvironmentConfig getEnvConfig() { + return envConfig; + } + + public QuorumPolicy getInitialElectionPolicy() { + return initialElectionPolicy; + } + + public void setInitialElectionPolicy( + final QuorumPolicy initialElectionPolicy) { + this.initialElectionPolicy = initialElectionPolicy; + } + + public void closeEnv() { + try { + if (repEnv != null) { + repEnv.close(); + } + } finally { + repEnv = null; + } + } + + /** + * Convenience method that guards against a NPE when checking whether + * the state of a node is MASTER. + */ + public boolean isMaster() { + return (repEnv != null) && repEnv.getState().isMaster(); + } + + /** + * Convenience method that guards against a NPE when checking whether + * the state of a node is REPLICA. + */ + public boolean isReplica() { + return (repEnv != null) && repEnv.getState().isReplica(); + } + + /** + * Convenience method that guards against a NPE when checking whether + * the state of a node is UNKNOWN. + */ + public boolean isUnknown() { + return (repEnv != null) && repEnv.getState().isUnknown(); + } + + /** + * Simulate a crash of the environment, don't do a graceful close. + */ + public void abnormalCloseEnv() { + try { + if (repEnv.isValid()) { + + /* + * Although we want an abnormal close, we do want to flush. + * And if the env is valid, we expect it to work; so avoid + * ignoring exceptions from this call. + */ + RepInternal.getNonNullRepImpl(repEnv).getLogManager(). + flushNoSync(); + } + try { + RepInternal.getNonNullRepImpl(repEnv).abnormalClose(); + } catch (DatabaseException ignore) { + + /* + * The close will face problems like unclosed txns, ignore. + * We're trying to simulate a crash. + */ + } + } finally { + repEnv = null; + } + } + + @Override + public String toString() { + return (repEnv == null) ? + envHome.toString() : repEnv.getNodeName(); + } + } + + public static String stackTraceString(final Throwable exception) { + ByteArrayOutputStream bao = new ByteArrayOutputStream(); + PrintStream printStream = new PrintStream(bao); + exception.printStackTrace(printStream); + String stackTraceString = bao.toString(); + return stackTraceString; + } + + /** + * Restarts a group associated with an existing environment on disk. + * Returns the environment associated with the master. + */ + public static ReplicatedEnvironment + restartGroup(RepEnvInfo ... repEnvInfo) { + + return restartGroup(false /*replicasOnly*/, false, repEnvInfo); + } + + public static ReplicatedEnvironment + restartGroup(boolean allowILE, RepEnvInfo ... repEnvInfo) { + + return restartGroup(false, allowILE, repEnvInfo); + } + + /** + * Restarts a group of replicas associated with an existing environment on + * disk. + */ + public static void restartReplicas(RepEnvInfo ... repEnvInfo) { + + restartGroup(true /*replicasOnly*/, false, repEnvInfo); + } + + /** + * Restarts a group associated with an existing environment on disk. + * Returns the environment associated with the master. + */ + private static ReplicatedEnvironment + restartGroup(boolean replicasOnly, + boolean allowILE, + RepEnvInfo ... repEnvInfo) { + + /* To avoid the jdk bug: NullPointerException in Selector.open(). The + * bug report can be found in + * http://bugs.sun.com/view_bug.do?bug_id=6427854 + */ + System.setProperty("sun.nio.ch.bugLevel", + System.getProperty("sun.nio.ch.bugLevel","")); + + /* Restart the group, a thread for each node. */ + JoinThread threads[] = new JoinThread[repEnvInfo.length]; + for (int i = 0; i < repEnvInfo.length; i++) { + threads[i] = new JoinThread(repEnvInfo[i], allowILE); + threads[i].start(); + } + + /* + * Wait for each thread to have joined the group. The group must be + * re-started in parallel to ensure that all nodes are up and elections + * can be held. + */ + RuntimeException firstFailure = null; + for (int i = 0; i < repEnvInfo.length; i++) { + JoinThread jt = threads[i]; + try { + jt.join(JOIN_WAIT_TIME); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + final Throwable exception = jt.testException; + RuntimeException failure = null; + if (exception != null) { + failure = new RuntimeException( + "Join thread exception for " + repEnvInfo[i] + + " still alive = " + jt.isAlive() + "\n" + + RepTestUtils.stackTraceString(exception)); + } else if (jt.isAlive()) { + failure = new IllegalStateException( + "Join thread for " + repEnvInfo[i] + + " still alive after " + JOIN_WAIT_TIME + "ms," + + " and testException is null."); + } + if (failure != null) { + if (firstFailure == null) { + firstFailure = failure; + } else { + System.err.println(failure); + } + } + } + if (firstFailure != null) { + throw firstFailure; + } + + /* All join threads are quiescent, now pick the master. */ + if (replicasOnly) { + return null; + } + + return getMaster(repEnvInfo, false /*openIfNeeded*/); + } + + /** + * Find the authoritative master (wait for election to quiesce). + */ + public static ReplicatedEnvironment getMaster(RepEnvInfo[] repEnvInfo, + boolean openIfNeeded) { + + final int maxRetries = 100; + int retries = maxRetries; + while (true) { + int masterId = -1; + boolean multipleMasters = false; + boolean nonAuthoritativeMaster = false; + for (int i = 0; i < repEnvInfo.length; i++) { + if (openIfNeeded && repEnvInfo[i].getEnv() == null) { + final boolean VERBOSE = false; + if (VERBOSE) { + System.out.println("Opening node " + (i + 1)); + } + try { + repEnvInfo[i].openEnv(); + } catch (RollbackException|UnknownMasterException e) { + /* + * This node was unable to join because it could not + * determine the master, or because a hard rollback is + * needed. If this prevents an authoritative master + * from being determined, we will retry below. + */ + continue; + } + } + if (repEnvInfo[i].getEnv().getState().isMaster()) { + if (!repEnvInfo[i].getRepImpl().getRepNode(). + isAuthoritativeMaster()) { + nonAuthoritativeMaster = true; + } + if (masterId >= 0) { + multipleMasters = true; + } else { + masterId = i; + } + } + } + if (masterId >= 0 && + !multipleMasters && + !nonAuthoritativeMaster) { + return repEnvInfo[masterId].getEnv(); + } + if (--retries >= 0) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + continue; + } + if (nonAuthoritativeMaster) { + throw new IllegalStateException( + "Non-authoritative master after " + + maxRetries + " retries."); + } + if (multipleMasters) { + throw new IllegalStateException( + "More than one master in group after " + + maxRetries + " retries."); + } + if (masterId < 0) { + throw new IllegalStateException + ("Node id of the elected master is invalid."); + } + } + } + + /** + * Threads used to simulate a parallel join group when multiple replication + * nodes are first brought up for an existing environment. + */ + private static class JoinThread extends Thread { + + final RepEnvInfo repEnvInfo; + final boolean allowILE; + + /* + * Captures any exception encountered in the process of joining. The + * presence of a non-null testException field indicates to the caller + * that the join failed. + */ + volatile Throwable testException = null; + private static final int NUM_RETRIES = 100; + + /* The state of the node at the time of joining the group. */ + @SuppressWarnings("unused") + ReplicatedEnvironment.State state = + ReplicatedEnvironment.State.UNKNOWN; + + JoinThread(RepEnvInfo repEnvInfo, boolean allowILE) { + this.repEnvInfo = repEnvInfo; + this.allowILE = allowILE; + } + + @Override + public void run() { + + /* + * The open of this environment may fail due to timing mishaps if + * the environment has just been shutdown, as can happen in a + * number of tests that repeatedly open and close + * environments. Retry a few time to give the node a chance to + * settle down. + */ + int numRetries = 0; + while (numRetries < NUM_RETRIES) { + try { + state = repEnvInfo.openEnv().getState(); + testException = null; + break; + } catch (InsufficientLogException ile) { + if (allowILE) { + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig nrc = new NetworkRestoreConfig(); + nrc.setRetainLogFiles(false); + restore.execute(ile, nrc); + state = repEnvInfo.openEnv().getState(); + testException = null; + } else { + testException = ile; + } + break; + } catch (GroupShutdownException ge) { + /* Retry, this node is still shutting down. */ + numRetries++; + testException = ge; + try { + Thread.sleep(100); + } catch (InterruptedException ignore) { + } + } catch (Throwable e) { + testException = e; + break; + } + } + } + } + + /** + * Issue DbSync on a group. All nodes are presumed to be closed. + * + * @param timeoutMs is the DbSync timeout (max time for replica to catch up + * with master) as well as the join timeout for each thread calling DbSync. + */ + public static void syncupGroup(long timeoutMs, RepEnvInfo ... repEnvInfo) { + + /* + * The call to DbSync blocks until the sync is done, so it must + * be executed concurrently by a set of threads. + */ + SyncThread threads[] = new SyncThread[repEnvInfo.length]; + String helperHost = repEnvInfo[0].getRepConfig().getNodeHostPort(); + for (int i = 0; i < repEnvInfo.length; i++) { + threads[i] = new SyncThread(timeoutMs, repEnvInfo[i], helperHost); + threads[i].start(); + } + + /* + * Wait for each thread to open, sync, and close the node. + */ + for (int i = 0; i < repEnvInfo.length; i++) { + SyncThread t = threads[i]; + try { + t.join(timeoutMs); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + if (t.isAlive()) { + throw new IllegalStateException("Expect SyncThread " + i + + " dead, but it's alive."); + } + final Throwable exception = t.testException; + if (exception != null) { + throw new RuntimeException + ("Join thread exception.\n" + + RepTestUtils.stackTraceString(exception)); + } + } + } + + /** + * Threads used to simulate a parallel join group when multiple replication + * nodes are first brought up for an existing environment. + */ + private static class SyncThread extends Thread { + + final RepEnvInfo repEnvInfo; + final String helperHost; + final long timeoutMs; + + /* Captures any exception encountered in the process of joining. */ + Throwable testException = null; + + SyncThread(long timeoutMs, RepEnvInfo repEnvInfo, String helperHost) { + this.timeoutMs = timeoutMs; + this.repEnvInfo = repEnvInfo; + this.helperHost = helperHost; + } + + @Override + public void run() { + try { + ReplicationConfig config = repEnvInfo.getRepConfig(); + DbSync syncAgent = + new DbSync(repEnvInfo.getEnvHome().toString(), + repEnvInfo.getEnvConfig(), + config, + helperHost, + timeoutMs); + syncAgent.sync(); + } catch (Throwable e) { + testException = e; + } + } + } + + /** + * Disables network listening services, as a way of + * simulating a network partition for testing. + */ + public static void disableServices(final RepEnvInfo repEnvInfo) { + final ServiceDispatcher sd1 = + repEnvInfo.getRepNode().getServiceDispatcher(); + sd1.setSimulateIOException(Learner.SERVICE_NAME, true); + sd1.setSimulateIOException(Acceptor.SERVICE_NAME, true); + sd1.setSimulateIOException(FeederManager.FEEDER_SERVICE, true); + } + + /** + * Re-enables network services, to reverse the effect of a simulated + * network partition. + * @see #disableServices + */ + public static void reenableServices(final RepEnvInfo repEnvInfo) { + final ServiceDispatcher sd1 = + repEnvInfo.getRepNode().getServiceDispatcher(); + sd1.setSimulateIOException(Learner.SERVICE_NAME, false); + sd1.setSimulateIOException(Acceptor.SERVICE_NAME, false); + sd1.setSimulateIOException(FeederManager.FEEDER_SERVICE, false); + } + + public static void awaitCondition(Callable predicate) + throws Exception { + + awaitCondition(predicate, 5000); + } + + public static void awaitCondition(Callable predicate, + long timeout) + throws Exception { + + boolean done = false; + long deadline = System.currentTimeMillis() + timeout; + while (System.currentTimeMillis() < deadline) { + if (predicate.call()) { + done = true; + break; + } + Thread.sleep(100); + } + Assert.assertTrue(done); + } + + /** + * Used for testing to force consistency checks to fail. + */ + public static class AlwaysFail implements ReplicaConsistencyPolicy { + + public static final String NAME = "AlwaysFailConsistency"; + + public AlwaysFail() { + } + + @Override + public void ensureConsistency(EnvironmentImpl repInstance) + throws InterruptedException { + + throw new ReplicaConsistencyException("Always fails for testing", + this); + } + + /** + * Always returns 0, no timeout is needed for this policy. + */ + @Override + public long getTimeout(TimeUnit unit) { + return 1; + } + + @Override + public String getName() { + return NAME; + } + } + + /** + * Set the basic SSL properties. These rely on the build.xml configuration + * that copies keystore and truststore files to the test environment. + */ + public static void setUnitTestSSLProperties(Properties props) { + File destDir = SharedTestUtils.getDestDir(); + String sslPath = new File(destDir.getPath(), "ssl").getPath(); + + props.put("je.rep.channelType", "ssl"); + props.put("je.rep.ssl.keyStoreFile", + convertPath(new File(sslPath, "keys.store").getPath())); + props.put("je.rep.ssl.keyStorePassword", "unittest"); + props.put("je.rep.ssl.trustStoreFile", + convertPath(new File(sslPath, "trust.store").getPath())); + props.put("je.rep.ssl.clientKeyAlias", "mykey"); + props.put("je.rep.ssl.serverKeyAlias", "mykey"); + } + + /** + * Converts path if run on windows. + * @param path + * @return + */ + private static String convertPath(String path) { + if (File.separator.equals("\\")) { + return path.replace("\\", "\\\\"); + } + return path; + } + + /** + * Used for testing to force consistency checks to fail. Register the + * format at the beginning of the test as follows: + * + * // Register custom consistency policy format while quiescent. + * RepUtils.addConsistencyPolicyFormat + * (RepTestUtils.AlwaysFail.NAME, + * new RepTestUtils.AlwaysFailFormat()); + */ + public static class AlwaysFailFormat + implements ConsistencyPolicyFormat { + + @Override + public String policyToString(final AlwaysFail policy) { + return AlwaysFail.NAME; + } + + @Override + public AlwaysFail stringToPolicy(final String string) { + return new AlwaysFail(); + } + } + + /** + * Wait until a replica/feeder syncup has been tried numSyncupAttempt times + * on this node. + */ + public static CountDownLatch setupWaitForSyncup + (final ReplicatedEnvironment node, int numSyncupAttempts) { + final CountDownLatch waiter = new CountDownLatch(numSyncupAttempts); + + TestHook syncupFinished = new TestHook() { + @Override + public void doHook() throws InterruptedException { + waiter.countDown(); + } + }; + + RepInternal.getNonNullRepImpl(node).getRepNode(). + replica().setReplicaFeederSyncupHook(syncupFinished); + return waiter; + } + + /** + * Modify the existing rep configuration with the new parameter value pair. + */ + public static void setConfigParam(ConfigParam param, + String value, + RepEnvInfo repEnvInfo[]) { + + for (RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam(param.getName(), value); + } + } +} diff --git a/test/com/sleepycat/je/rep/utilint/SimpleTxnMapTest.java b/test/com/sleepycat/je/rep/utilint/SimpleTxnMapTest.java new file mode 100644 index 0000000..84905ad --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/SimpleTxnMapTest.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.MemoryType; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Test; + +import com.sleepycat.je.txn.Txn; + +/** + * Test operation of the simple map. + */ +public class SimpleTxnMapTest { + + @Test + public void testBasic() { + try { + @SuppressWarnings("unused") + SimpleTxnMap ignore = new SimpleTxnMap(10); + fail("Expected ISE"); + } catch (IllegalArgumentException ise) { + /* Expected. */ + } + final int arrayMapSize = 128; + SimpleTxnMap m = new SimpleTxnMap(arrayMapSize); + Map rm = new HashMap<>(); + + check(rm, m); + + for (long i=0; i < arrayMapSize; i++) { + TestTxn t = new TestTxn(i); + rm.put(i, t); + m.put(t); + check(rm, m); + } + + assertEquals(0, m.getBackupMap().size()); + + /* create holes in array map */ + for (long i=0; i < arrayMapSize; i+=2) { + TestTxn t1 = rm.remove(i); + TestTxn t2 = m.remove(i); + assertEquals(t1, t2); + check(rm, m); + } + + assertEquals(0, m.getBackupMap().size()); + + /* Use emptied array slots and create backup map entries. */ + for (long i=arrayMapSize; i < arrayMapSize*2; i++) { + TestTxn t = new TestTxn(i); + rm.put(i, t); + m.put(t); + check(rm, m); + } + /* 1/2 the entries should be in the backup map. */ + assertEquals(arrayMapSize/2, m.getBackupMap().size()); + + /* Remove some more entries and check */ + for (long i=1; i < arrayMapSize; i+=2) { + TestTxn t1 = rm.remove(i); + TestTxn t2 = m.remove(i); + assertEquals(t1, t2); + check(rm, m); + } + + /* Check for truly long txn ids. */ + for (long i=Integer.MAX_VALUE * 2l; + i < ((Integer.MAX_VALUE * 2l) + arrayMapSize); + i++) { + TestTxn t = new TestTxn(i); + rm.put(i, t); + m.put(t); + check(rm, m); + } + + rm.clear(); + m.clear(); + check(rm, m); + } + + private static final int testSize = 100000000; + + /** + * A very rough way to ensure that the code path is shorter with the simple + * map. Runs show a 2-4X perf improvement over map. + * + * There's a benefit in terms of heap allocation as well which would + * translate into request latency improvements due to less frequent new + * space gcs. The mbean output can be used to confirm the heap benefits. + * + * This test is normally turned off. It should be run by hand in isolation + * to get a handle on the perf benefits. + */ + public void OfftestSimpleMapPerf() { + + final int arrayMapSize = 128; + TestTxn t = new TestTxn(5); + + List mbeans = ManagementFactory.getMemoryPoolMXBeans(); + dumpHeapUsage(mbeans); + final SimpleTxnMap m = new SimpleTxnMap(arrayMapSize); + long startMs = System.currentTimeMillis(); + for (int i=0; i < testSize; i++) { + m.put(t); + m.get(t.getId()); + m.remove(t.getId()); + } + long endMs = System.currentTimeMillis(); + dumpHeapUsage(mbeans); + + System.err.println("Elapsed time simple map:" + (endMs - startMs)); + + final Map rm = + Collections.synchronizedMap(new HashMap()); + startMs = System.currentTimeMillis(); + for (int i=0; i < testSize; i++) { + rm.put(t.getId(), t); + rm.get(t.getId()); + rm.remove(t.getId()); + } + endMs = System.currentTimeMillis(); + System.err.println("Elapsed time java map:" + (endMs - startMs)); + dumpHeapUsage(mbeans); + } + + private void dumpHeapUsage(List mbeans) { + System.err.println("Heap usage:"); + for (MemoryPoolMXBean mb : mbeans) { + if (mb.getType() == MemoryType.HEAP) { + System.err.println(mb.getName() + " peak:" + mb.getPeakUsage()); + mb.resetPeakUsage(); + } + } + } + + private class TestTxn extends Txn { + TestTxn(long id) { + this.id = id; + } + } + + private void check(Map rm, SimpleTxnMap m) { + assertEquals(rm.size(), m.size()); + assertEquals(rm.isEmpty(), m.isEmpty()); + + for (TestTxn rmt : rm.values()) { + assertEquals(rmt, m.get(rmt.getId())); + } + } +} diff --git a/test/com/sleepycat/je/rep/utilint/SizeAwaitMapTest.java b/test/com/sleepycat/je/rep/utilint/SizeAwaitMapTest.java new file mode 100644 index 0000000..6400672 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/SizeAwaitMapTest.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.rep.utilint.SizeAwaitMap.Predicate; +import com.sleepycat.util.test.TestBase; + +public class SizeAwaitMapTest extends TestBase { + + SizeAwaitMap smap = null; + SizeWaitThread testThreads[]; + AtomicInteger doneThreads; + + CountDownLatch startLatch = null; + + /* Large number to help expose concurrency issues, if any. */ + static final int threadCount = 200; + + /** + * Set up the test, creating the SizeAwaitMap with the specified predicate. + */ + private void setUp(final Predicate predicate) + throws Exception { + + smap = new SizeAwaitMap(null, predicate); + testThreads = new SizeWaitThread[threadCount]; + doneThreads = new AtomicInteger(0); + startLatch = new CountDownLatch(threadCount); + for (int i=0; i < threadCount; i++) { + testThreads[i] = + new SizeWaitThread(i, Long.MAX_VALUE, TimeUnit.MILLISECONDS); + testThreads[i].start(); + } + // Wait for threads to start up + startLatch.await(); + } + + private void checkLiveThreads(int checkStart) { + for (int j=checkStart; j < threadCount; j++) { + assertTrue(testThreads[j].isAlive()); + } + assertEquals(checkStart, doneThreads.intValue()); + } + + /** + * Tests basic put/remove operations + */ + @Test + public void testBasic() throws Exception { + setUp(null); + joinThread(0); + assertEquals(1, doneThreads.intValue()); + for (int i=1; i < threadCount; i++) { + assertTrue(testThreads[i].isAlive()); + smap.put(i, i); + joinThread(i); + assertTrue(testThreads[i].success); + // All subsequent threads continue to live + checkLiveThreads(i+1); + + // Remove should have no impact + smap.remove(i); + checkLiveThreads(i+1); + + // Re-adding should have no impact + smap.put(i, i); + checkLiveThreads(i+1); + } + } + + /* + * Tests clear operation. + */ + @Test + public void testClear() throws Exception { + setUp(null); + joinThread(0); + assertEquals(1, doneThreads.intValue()); + /* Wait for the threads */ + while (smap.latchCount()!= (threadCount-1)) { + Thread.sleep(10); + } + + smap.clear(new MyTestException()); + assertTrue(smap.size() == 0); + for (int i=1; i < threadCount; i++) { + joinThread(i); + assertTrue(testThreads[i].cleared); + assertFalse(testThreads[i].interrupted); + } + assertEquals(threadCount, doneThreads.intValue()); + } + + /** + * Tests put and remove operations with a predicate. + */ + @Test + public void testPredicate() throws Exception { + /* Only count even values */ + setUp(new Predicate() { + @Override + public boolean match(final Integer i) { + return ((i % 2) == 0); + } + }); + joinThread(0); + assertEquals(1, doneThreads.intValue()); + for (int i = 1; i < threadCount; i++) { + assertTrue(testThreads[i].isAlive()); + + // Odd value + int value = (2 * i) - 1; + smap.put(value, value); + + // No change + checkLiveThreads(i); + + // Remove should have no impact + smap.remove(value); + checkLiveThreads(i); + + // Re-adding should have no impact + smap.put(value, value); + checkLiveThreads(i); + + // Even value + value++; + smap.put(value, value); + joinThread(i); + assertTrue(testThreads[i].success); + // All subsequent threads continue to live + checkLiveThreads(i+1); + + // Remove should have no impact + smap.remove(value); + checkLiveThreads(i+1); + + // Re-adding should have no impact + smap.put(value, value); + checkLiveThreads(i+1); + } + } + + /** + * Threads which wait for specific map sizes. + */ + private class SizeWaitThread extends Thread { + + /* The size to wait for. */ + final int size; + final long timeout; + final TimeUnit unit; + boolean interrupted = false; + boolean cleared = false; + boolean success = false; + + SizeWaitThread(int size, long timeout, TimeUnit unit) { + this.size = size; + this.timeout = timeout; + this.unit = unit; + } + + public void run() { + startLatch.countDown(); + try { + success = smap.sizeAwait(size, timeout, unit); + } catch (MyTestException mte) { + cleared = true; + } catch (InterruptedException e) { + interrupted = true; + } finally { + doneThreads.incrementAndGet(); + } + + } + } + + @SuppressWarnings("serial") + private class MyTestException extends DatabaseException { + MyTestException() { + super("testing"); + } + } + + /** + * Wait no more than 5 seconds to join the specified thread and check that + * it died. + */ + private void joinThread(final int threadNum) throws InterruptedException { + testThreads[threadNum].join(5000); + assertFalse("thread alive", testThreads[threadNum].isAlive()); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/TestPasswordAuthentication.java b/test/com/sleepycat/je/rep/utilint/TestPasswordAuthentication.java new file mode 100644 index 0000000..502b95d --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/TestPasswordAuthentication.java @@ -0,0 +1,217 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.util.Arrays; +import java.util.logging.Level; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.je.rep.utilint.ServiceHandshake.AuthenticationMethod; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ClientHandshake; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ClientInitOp; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ServerHandshake; +import com.sleepycat.je.rep.utilint.ServiceHandshake.ServerInitOp; +import com.sleepycat.je.rep.utilint.ServiceHandshake.InitResult; +import com.sleepycat.je.rep.utilint.ServiceHandshake.IOAdapter; +import com.sleepycat.utilint.StringUtils; + +/** + * This class provides a sample implementation of an authentication method + * that uses clear text passwords. + */ +class TestPasswordAuthentication implements AuthenticationMethod { + + /* The indicator for the clear password authentication method. */ + static final String MECHANISM = "TestPassword"; + + protected PasswordSource passwordSource; + + TestPasswordAuthentication(PasswordSource passwordSource) { + this.passwordSource = passwordSource; + } + + @Override + public String getMechanismName() { + return MECHANISM; + } + + @Override + public ClientInitOp getClientOp(ClientHandshake initState, + String ignoredParams) { + return new ClientPasswordOp(initState, passwordSource); + } + + @Override + public ServerInitOp getServerOp(ServerHandshake initState) { + return new ServerPasswordOp(initState, passwordSource); + } + + @Override + public String getServerParams() { + return ""; + } + + /** + * Server-side implementation. Reads the password and compares + * to the expected password. + * Password format is: + * Length: 1 byte, range 0-127 + * Password: bytes ASCII encoded string + */ + static class ServerPasswordOp extends ServerInitOp { + + private final static int INITIAL_BUFFER_SIZE = 1; + private final PasswordSource passwordSource; + private ByteBuffer buffer; + private boolean sizeRead = false; + + ServerPasswordOp(ServerHandshake initState, + PasswordSource passwordSource) { + super(initState); + this.passwordSource = passwordSource; + this.buffer = ByteBuffer.allocate(INITIAL_BUFFER_SIZE); + } + + @Override + protected InitResult processOp(DataChannel channel) throws IOException { + InitResult readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + + if (sizeRead == false) { + /* We've just read the size into the buffer */ + sizeRead = true; + buffer.flip(); + final int passwordLen = buffer.get(); + if (passwordLen <= 0) { + initState.logMsg(Level.WARNING, + true, // noteError + "Bad password length: " + passwordLen); + sendBuffer(channel, Response.FORMAT_ERROR.byteBuffer()); + closeChannel(channel); + return InitResult.FAIL; + } + buffer = ByteBuffer.allocate(passwordLen); + + readResult = fillBuffer(channel, buffer); + if (readResult != InitResult.DONE) { + return readResult; + } + } + + buffer.flip(); + /* Now get the password itself */ + final CharBuffer passwordBuf = StringUtils.fromASCII(buffer); + + /* Erase the password info since we no longer need it */ + zero(buffer); + final char[] passwordChars = passwordBuf.array(); + + try { + if (Arrays.equals(passwordSource.getPassword(), + passwordChars)) { + return InitResult.DONE; + } + } finally { + /* Again, clean up after ourselves */ + zero(passwordChars); + } + + initState.logMsg(Level.WARNING, + true, // noteError + "Bad password received - length: " + + buffer.capacity()); + /* + * Don't immediately communicate the failure to the client + * in case this is a password guessing attack. Make them + * wait for a response. + */ + return InitResult.REJECT; + } + } + + /** + * Client-side: authenticate via password. + */ + static class ClientPasswordOp extends ClientInitOp { + + private final PasswordSource passwordSource; + + ClientPasswordOp(ClientHandshake initState, + PasswordSource passwordSource) { + super(initState); + this.passwordSource = passwordSource; + } + + @Override + protected InitResult processOp(IOAdapter ioAdapter) throws IOException { + final char[] password = passwordSource.getPassword(); + final byte[] passwordMessage = servicePasswordMessage(password); + zero(password); + ioAdapter.write(passwordMessage); + zero(passwordMessage); + + final byte[] responseByte = new byte[1]; + final int result = ioAdapter.read(responseByte); + if (result < 0) { + throw new IOException( + "No service authenticate response byte: " + result); + } + final Response response = Response.get(responseByte[0]); + if (response == null) { + throw new IOException("Unexpected read response byte: " + + responseByte[0]); + } + setResponse(response); + return InitResult.DONE; + } + + /** + * Builds a password component of the authentication message + */ + private byte[] servicePasswordMessage(char[] password) { + final CharBuffer passwordCharBuf = CharBuffer.wrap(password); + final ByteBuffer passwordByteBuf = + StringUtils.toASCII(passwordCharBuf); + final int length = 1 + passwordByteBuf.limit(); + final ByteBuffer buffer = ByteBuffer.allocate(length); + buffer.put((byte)passwordByteBuf.limit()).put(passwordByteBuf); + zero(passwordByteBuf); + return buffer.array(); + } + } + + /** + * Zero out the buffer. + */ + private static void zero(ByteBuffer buf) { + buf.clear(); + for (int i = 0; i < buf.limit(); i++) { + buf.put((byte)0); + } + } + + private static void zero(byte[] buf) { + Arrays.fill(buf, (byte)0); + } + + private static void zero(char[] buf) { + Arrays.fill(buf, ' '); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/ValidStateListener.java b/test/com/sleepycat/je/rep/utilint/ValidStateListener.java new file mode 100644 index 0000000..65fbc4c --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/ValidStateListener.java @@ -0,0 +1,35 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +public class ValidStateListener implements StateChangeListener { + CountDownLatch waitForValidState = new CountDownLatch(1); + + public void stateChange(StateChangeEvent stateChangeEvent) { + if (stateChangeEvent.getState().isActive()) { + waitForValidState.countDown(); + } + } + + public void awaitValidState() + throws InterruptedException { + + waitForValidState.await(); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/WaitForDetachedListener.java b/test/com/sleepycat/je/rep/utilint/WaitForDetachedListener.java new file mode 100644 index 0000000..9c12453 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/WaitForDetachedListener.java @@ -0,0 +1,39 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +public class WaitForDetachedListener implements StateChangeListener { + + private static final long DEFAULT_TIMEOUT_MS = 30000; + private final CountDownLatch waitForDetached = new CountDownLatch(1); + + public void stateChange(StateChangeEvent stateChangeEvent) { + if (stateChangeEvent.getState().isDetached()) { + waitForDetached.countDown(); + } + } + + public boolean awaitDetached() + throws InterruptedException { + + return waitForDetached.await( + DEFAULT_TIMEOUT_MS, TimeUnit.MILLISECONDS); + } +} diff --git a/test/com/sleepycat/je/rep/utilint/WaitForListener.java b/test/com/sleepycat/je/rep/utilint/WaitForListener.java new file mode 100644 index 0000000..1e4b765 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/WaitForListener.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.MASTER; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; +import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +/** + * Utility class to wait for one of a set of state change events. + * + * This is the preferred class to use as an alternative to the WaitForXXX + * sequence of classes in this package. + */ +public class WaitForListener implements StateChangeListener { + + final CountDownLatch latch = new CountDownLatch(1); + final Set waitStates; + + private boolean success = true; + + public WaitForListener(State... states) { + waitStates = new HashSet(Arrays.asList(states)); + } + + public void stateChange(StateChangeEvent stateChangeEvent) { + + if (waitStates.contains(stateChangeEvent.getState())) { + latch.countDown(); + return; + } + + if (stateChangeEvent.getState().isDetached()) { + /* It will never transition out of this state. */ + success = false; + latch.countDown(); + return; + } + + /* Some other intermediate state, not of interest; ignore it. */ + } + + public boolean await() + throws InterruptedException { + + latch.await(); + return success; + } + + /** + * Specialized listener for Active states + */ + public static class Active extends WaitForListener { + public Active() { + super(MASTER, REPLICA); + } + } + + /** + * Specialized listener for transition to UNKNOWN + */ + public static class Unknown extends WaitForListener { + public Unknown() { + super(UNKNOWN); + } + } +} diff --git a/test/com/sleepycat/je/rep/utilint/WaitForMasterListener.java b/test/com/sleepycat/je/rep/utilint/WaitForMasterListener.java new file mode 100644 index 0000000..3a8d920 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/WaitForMasterListener.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +public class WaitForMasterListener implements StateChangeListener { + CountDownLatch waitForMaster = new CountDownLatch(1); + private boolean success = true; + + public void stateChange(StateChangeEvent stateChangeEvent) { + if (stateChangeEvent.getState().equals + (ReplicatedEnvironment.State.MASTER)) { + waitForMaster.countDown(); + } + + if (stateChangeEvent.getState().isDetached()) { + /* It will never return to the replica state. */ + success = false; + waitForMaster.countDown(); + } + } + + public boolean awaitMastership() + throws InterruptedException { + + waitForMaster.await(); + return success; + } +} diff --git a/test/com/sleepycat/je/rep/utilint/WaitForReplicaListener.java b/test/com/sleepycat/je/rep/utilint/WaitForReplicaListener.java new file mode 100644 index 0000000..497ba15 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/WaitForReplicaListener.java @@ -0,0 +1,43 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint; + +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.StateChangeListener; + +public class WaitForReplicaListener implements StateChangeListener { + CountDownLatch waitForReplica = new CountDownLatch(1); + private boolean success = true; + + @Override + public void stateChange(StateChangeEvent stateChangeEvent) { + if (stateChangeEvent.getState().isReplica()) { + waitForReplica.countDown(); + } + if (stateChangeEvent.getState().isDetached()) { + /* It will never return to the replica state. */ + success = false; + waitForReplica.countDown(); + } + } + + public boolean awaitReplica() + throws InterruptedException { + + waitForReplica.await(); + return success; + } +} diff --git a/test/com/sleepycat/je/rep/utilint/net/SSLChannelTest.java b/test/com/sleepycat/je/rep/utilint/net/SSLChannelTest.java new file mode 100644 index 0000000..f043de5 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/net/SSLChannelTest.java @@ -0,0 +1,1175 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_CLASS; +import static + com.sleepycat.je.rep.ReplicationSSLConfig.SSL_KEYSTORE_PASSWORD_PARAMS; +import static com.sleepycat.je.rep.ReplicationSSLConfig.SSL_PROTOCOLS; +import static com.sleepycat.je.rep.ReplicationSSLConfig.SSL_CIPHER_SUITES; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.Properties; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.ReplicationSSLConfig; +import com.sleepycat.je.rep.net.PasswordSource; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.net.InstanceContext; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.utilint.FreePortLocator; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.SSLChannelFactory; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder.ChannelFormatter; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder.ChannelLoggerFactory; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * This test is intended to check the behavior of the SSLDataChannel class. + * It can serve as both a unit test and as a stress test. When run normally, + * it runs as a unit test, completing in about 1 minute. When run with the + * ant argument -Dlongtest=true, the length of the streams and number of streams + * are both increased, for a significantly longer run time. + */ +public class SSLChannelTest extends TestBase { + + /* The socket on which the dispatcher is listening */ + private static InetSocketAddress socketAddress = null; + + /* + * The selector that watches for accept events on the server socket and + * on subsequent read events. + */ + private static Selector selector; + + /* The server socket channel */ + private static ServerSocketChannel serverChannel; + + /* Packet direction */ + final static int TO_SERVER = 0; + final static int TO_CLIENT = 1; + + /* Number of iterations at each test case */ + private static int N_ITERS = 5; + + /* Multiplier for number of packets per stream */ + private static int STREAM_MULTIPLIER = 1; + + private final static String SHOW_THRUPUT = "test.showThruput"; + private final static String SHOW_ERRORS = "test.showErrors"; + private boolean showThruput = false; + private boolean showErrors = false; + private static long randomSeed = System.nanoTime(); + + /* The IBM JVM names cipher suites differently. */ + private static final String SUPPORTED_CIPHER_SUITE = + System.getProperty("java.vendor", "unknown").startsWith("IBM") ? + "SSL_RSA_WITH_AES_128_CBC_SHA256" : + "TLS_RSA_WITH_AES_128_CBC_SHA256"; + + /** + * A tuple of a DataChannel and an associated ByteBuffer + */ + static class ChannelBuffer { + + private final DataChannel dataChannel; + private ByteBuffer byteBuffer; + + public ChannelBuffer(DataChannel dataChannel, ByteBuffer byteBuffer) { + this.dataChannel = dataChannel; + this.byteBuffer = byteBuffer; + } + + DataChannel getChannel() { + return dataChannel; + } + + ByteBuffer getBuffer() { + return byteBuffer; + } + void setBuffer(ByteBuffer newByteBuffer) { + this.byteBuffer = newByteBuffer; + } + } + + public SSLChannelTest() { + showThruput = Boolean.getBoolean(SHOW_THRUPUT); + showErrors = Boolean.getBoolean(SHOW_ERRORS); + } + + @BeforeClass + public static void setUpOnce() + throws Exception { + + if (SharedTestUtils.runLongTests()) { + N_ITERS = 100; + STREAM_MULTIPLIER = 10; + } + + initServerSocket(); + System.out.println("Random seed = " + randomSeed); + } + + @AfterClass + public static void shutdownOnce() + throws Exception { + + closeServerSocket(); + } + + static void initServerSocket() { + try { + FreePortLocator locator = + new FreePortLocator("localhost", 5000, 6000); + int freePort = locator.next(); + + socketAddress = new InetSocketAddress("localhost", freePort); + serverChannel = ServerSocketChannel.open(); + serverChannel.configureBlocking(false); + selector = Selector.open(); + serverChannel.register(selector, SelectionKey.OP_ACCEPT); + + ServerSocket acceptSocket = serverChannel.socket(); + + /* No timeout */ + acceptSocket.setSoTimeout(0); + acceptSocket.bind(socketAddress); + } catch (IOException ioe) { + System.out.println("Error initializing server socket"); + ioe.printStackTrace(); + } + } + + static void closeServerSocket() { + try { + serverChannel.socket().close(); + selector.close(); + } catch (IOException ioe) { + System.out.println("Error closing server socket"); + ioe.printStackTrace(); + } + } + + class Packet { + private int direction; + private int packetSize; + private int startIdx; + + /* length of the byte buffer array to hold the data */ + private int bufArrLen; + private int bytesPerBuf; + private int bytesLastBuf; + + public Packet(int dir, int packetSize, int startIdx, int bufArrLen) { + assert bufArrLen <= packetSize; + this.direction = dir; + this.packetSize = packetSize; + this.startIdx = startIdx; + this.bufArrLen = bufArrLen; + this.bytesPerBuf = packetSize / bufArrLen; + this.bytesLastBuf = packetSize - (bufArrLen - 1) * bytesPerBuf; + } + + public boolean toServer() { + return (direction == TO_SERVER); + } + + public int getPacketSize() { + return this.packetSize; + } + + public int getStartIdx() { + return this.startIdx; + } + + public int getBufArrLen() { + return this.bufArrLen; + } + + /* Get the byte buffer array. */ + public ByteBuffer[] getBufArr() { + ByteBuffer[] bufarr = new ByteBuffer[bufArrLen]; + for (int i = 0; i < bufArrLen; ++i) { + int n = (i == bufArrLen - 1) ? bytesLastBuf : bytesPerBuf; + bufarr[i] = ByteBuffer.allocate(n); + } + return bufarr; + } + + /* Get the half way index of the array. */ + public int getArrHalfwayIdx() { + return bufArrLen / 2; + } + + /** + * Get the num of bytes in the array from the index 0 (inclusive) to + * the result of getArrHalfwayIdx(exclusive). + */ + public int nbytesArrHalf1() { + return (bufArrLen / 2) * bytesPerBuf; + } + + /** + * Get the num of bytes in the array from the index of the result of + * getArrHalfwayIdx(inclusive) to the end. + */ + public int nbytesArrHalf2() { + return (bufArrLen - 1 - bufArrLen / 2) * bytesPerBuf + bytesLastBuf; + } + + /** + * Fill the packet data. + * + * Buffers are flip to be ready to get after filled. + */ + public void fill(ByteBuffer[] bufarr) { + int val = startIdx; + for (int idx = 0; idx < bufArrLen; ++idx) { + ByteBuffer buf = bufarr[idx]; + buf.clear(); + for (int i = 0; i < buf.capacity(); ++i) { + buf.put((byte)(val++)); + } + buf.clear(); + } + } + + /* Check the packet data. */ + public void check(ByteBuffer[] bufarr) { + int val = startIdx; + for (int idx = 0; idx < bufArrLen; ++idx) { + ByteBuffer buf = bufarr[idx]; + buf.clear(); + for (int i = 0; i < buf.capacity(); ++i) { + if (((byte)(val++)) != buf.get()) { + System.out.println("Data mismatch"); + return; + } + } + } + } + } + + interface Checker { + void check(ClientTask client, ServerTask server); + } + + class BasicChecker implements Checker { + public void check(ClientTask client, ServerTask server) { + assertEquals(null, client.getError()); + assertFalse(client.getOtherError()); + assertFalse(server.getOtherError()); + assertTrue(server.channelSecure); + assertFalse(server.channelTrusted); + } + } + + class PeerAuthenticatedChecker implements Checker { + private boolean expectTrusted; + + PeerAuthenticatedChecker(boolean trusted) { + expectTrusted = trusted; + } + + public void check(ClientTask client, ServerTask server) { + assertEquals(null, client.getError()); + assertFalse(client.getOtherError()); + assertFalse(server.getOtherError()); + assertTrue(server.channelSecure); + assertEquals(server.channelTrusted, expectTrusted); + } + } + + class PeerNotVerifiedChecker implements Checker { + PeerNotVerifiedChecker() { + } + + public void check(ClientTask client, ServerTask server) { + assertTrue(client.getError() != null); + assertTrue(client.getError().getMessage().contains( + "Server identity could not be verified")); + } + } + + /** + * Tests initiation of connections with packet delivery using a + * sequence of small packets + */ + @Test + public void testSmallPackets() throws InterruptedException { + Random random = new Random(randomSeed); + + final int N_PACKETS = 50; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, false, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + + /** + * Force thoughput testing to be sure it doesn't bit rot. + */ + @Test + public void testBasicThroughput() throws InterruptedException { + boolean origShowThruput = showThruput; + try { + showThruput = true; + Random random = new Random(randomSeed); + + final int ONE_ITER = 1; + final int N_PACKETS = 1000; + final int MIN_PACKET_SIZE = 100; + final int MAX_PACKET_SIZE = 200; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, true, channelFactory, + ONE_ITER, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } finally { + showThruput = origShowThruput; + } + } + + /** + * Tests throughput by sending packets in only one direction with "smallish" + * packets. + */ + @Test + public void testSmallThroughput() throws InterruptedException { + if (showThruput) { + Random random = new Random(randomSeed); + + final int N_PACKETS = 100000; + final int MIN_PACKET_SIZE = 100; + final int MAX_PACKET_SIZE = 200; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, true, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + } + + /** + * Tests initiation of connections with packet delivery using a + * sequence of medium sized packets + */ + @Test + public void testMediumPackets() throws InterruptedException { + Random random = new Random(randomSeed); + + final int N_PACKETS = 50; + final int MIN_PACKET_SIZE = 1000; + final int MAX_PACKET_SIZE = 5000; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, false, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + + /** + * Tests initiation of connections with packet delivery using a + * sequence of largepackets + */ + @Test + public void testLargePackets() throws InterruptedException { + Random random = new Random(randomSeed); + + final int N_PACKETS = 20; + final int MIN_PACKET_SIZE = 20000; + final int MAX_PACKET_SIZE = 50000; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, false, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + + /** + * Tests initiation of connections with packet delivery using a + * sequence of huge packets. + */ + @Test + public void testHugePackets() throws InterruptedException { + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 100000; + final int MAX_PACKET_SIZE = 500000; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, false, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + + /** + * Tests througput by sending packets in only one direction with "huge" + * packets. + */ + @Test + public void testHugeThroughput() throws InterruptedException { + if (showThruput) { + Random random = new Random(randomSeed); + + final int N_PACKETS = 100; + final int MIN_PACKET_SIZE = 100000; + final int MAX_PACKET_SIZE = 500000; + DataChannelFactory channelFactory = + createSSLFactory(createSSLFactoryProps()); + + runScenario(random, true, channelFactory, + N_ITERS, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + } + } + + /** + * Tests peer authentication with pattern authentication + */ + @Test + public void testPeerPatternAuthentication() throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + /* This version should be trusted */ + DataChannelFactory channelFactory = + createSSLFactory(addPatternAuthentication(createSSLFactoryProps(), + "CN=Unit Test", + "CN=Unit Test")); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerAuthenticatedChecker(true)); + + /* This version should not be trusted */ + channelFactory = + createSSLFactory(addPatternAuthentication(createSSLFactoryProps(), + "CN=Not A Unit Test", + "CN=Unit Test")); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerAuthenticatedChecker(false)); + + /* This case should fail verification */ + channelFactory = + createSSLFactory(addPatternAuthentication(createSSLFactoryProps(), + "CN=Unit Test", + "CN=Not A Unit Test")); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerNotVerifiedChecker()); + + /* Test assymetric keys */ + channelFactory = + createSSLFactory( + setClientKeyAlias( + addPatternAuthentication(createSSLFactoryProps(), + "CN=Other Test 1", + "CN=Unit Test"), + "otherkey1")); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerAuthenticatedChecker(true)); + } + + + /** + * Tests peer authentication with basic mirror authentication + */ + @Test + public void testPeerMirrorAuthentication() throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + /* This version should be trusted */ + DataChannelFactory channelFactory = + createSSLFactory(addMirrorAuthentication(createSSLFactoryProps())); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerAuthenticatedChecker(true)); + + /* No easy way to force a failure */ + } + + /** + * Tests server verification with standard verification. + * This test only tests the negative path currently. + */ + @Test + public void testStdHostVerification() throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + /* This case should fail verification */ + DataChannelFactory channelFactory = + createSSLFactory(addStdVerification(createSSLFactoryProps())); + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new PeerNotVerifiedChecker()); + } + + /** + * Tests that we can set cipher suites and protocols. + * We can't really check whether it used what we want, but we can check + * including invalid entries doesn't kill things, provided that there + * is at least one valid entry. + */ + @Test + public void testProtosAndCiphers() + throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + /* First, a successful attempt */ + + Properties props = createSSLFactoryProps(); + props.setProperty(SSL_CIPHER_SUITES, + "Foo," + SUPPORTED_CIPHER_SUITE + ",Bar"); + props.setProperty(SSL_PROTOCOLS, + "Foo,TLSv1.2,,Bar"); + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.create(props); + InstanceParams params = makeParams(repNetConfig, null); + DataChannelFactory channelFactory = new SSLChannelFactory(params); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + + /* Now, verify that no valid cipher entries causes an error */ + + props = createSSLFactoryProps(); + props.setProperty(SSL_CIPHER_SUITES, "Foo,Bar"); + props.setProperty(SSL_PROTOCOLS, "TLSv1,TLSv1.1,TLSv1.2"); + repNetConfig = ReplicationNetworkConfig.create(props); + params = makeParams(repNetConfig, null); + try { + channelFactory = new SSLChannelFactory(params); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + + /* Verify that no valid protocol entries causes an error */ + + props = createSSLFactoryProps(); + props.setProperty(SSL_CIPHER_SUITES, SUPPORTED_CIPHER_SUITE); + props.setProperty(SSL_PROTOCOLS, "Foo,Bar"); + repNetConfig = ReplicationNetworkConfig.create(props); + params = makeParams(repNetConfig, null); + try { + channelFactory = new SSLChannelFactory(params); + fail("expected exception"); + } catch (IllegalArgumentException iae) { + } + } + + /** + * Tests the keystore password source mechanism for keystore access + */ + @Test + public void testKeyStorePasswordSource() + throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + Properties props = createSSLFactoryProps(); + + String pwPropName = SSL_KEYSTORE_PASSWORD; + final String ksPw = props.getProperty(pwPropName); + props.remove(pwPropName); + + PasswordSource pwSource = new PasswordSource () { + public char[] getPassword() { return ksPw.toCharArray(); } + }; + + ReplicationSSLConfig repNetConfig = new ReplicationSSLConfig(props); + repNetConfig.setSSLKeyStorePasswordSource(pwSource); + + InstanceParams params = makeParams(repNetConfig, null); + DataChannelFactory channelFactory = new SSLChannelFactory(params); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + + /* No easy way to force a failure */ + } + + /** + * Tests the keystore password source loading mechanism for keystore access + */ + @Test + public void testLoadedKeyStorePasswordSource() + throws InterruptedException { + + Random random = new Random(randomSeed); + + final int N_PACKETS = 10; + final int MIN_PACKET_SIZE = 1; + final int MAX_PACKET_SIZE = 10; + + Properties props = createSSLFactoryProps(); + + String pwPropName = SSL_KEYSTORE_PASSWORD; + final String ksPw = props.getProperty(pwPropName); + props.remove(pwPropName); + + props.setProperty(SSL_KEYSTORE_PASSWORD_CLASS, + TestPasswordSource.class.getName()); + props.setProperty(SSL_KEYSTORE_PASSWORD_PARAMS, + ksPw); + + ReplicationNetworkConfig repNetConfig = + ReplicationNetworkConfig.create(props); + + InstanceParams params = makeParams(repNetConfig, null); + DataChannelFactory channelFactory = new SSLChannelFactory(params); + + runScenario(random, false, channelFactory, + 1, N_PACKETS, MIN_PACKET_SIZE, MAX_PACKET_SIZE, + new BasicChecker()); + + /* No easy way to force a failure */ + } + + public void runScenario(Random random, + boolean unidirectional, + DataChannelFactory channelFactory, + int nIters, + int numPackets, + int minPacketSize, + int maxPacketSize, + Checker checker) throws InterruptedException { + + numPackets *= STREAM_MULTIPLIER; + for (int iter = 0; iter < nIters; iter++) { + + Packet[] packets = new Packet[numPackets]; + + int packetIdx = 0; + for (int pkt = 0; pkt < numPackets; pkt++) { + int packetSize = minPacketSize + + random.nextInt(maxPacketSize-minPacketSize); + int dir = (pkt == 0 || unidirectional) ? + TO_SERVER : random.nextInt(2); + int bufArrLen = Math.min(packetSize, random.nextInt(8) + 1); + packets[pkt] = new Packet(dir, packetSize, + packetIdx, bufArrLen); + packetIdx += packetSize; + } + + long startTime = System.nanoTime(); + runSequence(packets, channelFactory, checker); + + /* + * To enable, add + * to the do-junit task + */ + if (showThruput) { + long endTime = System.nanoTime(); + long throughput = packetIdx * 1000000000L; + throughput /= (endTime-startTime); + System.out.println("stream " + iter + " has " + numPackets + + " packets with " + packetIdx + " bytes: " + + throughput + " bytes/sec"); + } + } + } + + void runSequence(Packet[] packets, + DataChannelFactory channelFactory, + Checker checker) { + ServerTask server = spawnServer(packets, channelFactory); + ClientTask client = spawnClient(packets, channelFactory); + try { + client.getThread().join(); + server.exitLoop(); + server.getThread().join(); + } catch (InterruptedException ie) { + System.out.println("Unexpected interruption"); + } + if (client.getError() != null && showErrors) { + System.out.println("client error"); + client.getError().printStackTrace(); + } + checker.check(client, server); + } + + private Properties createSSLFactoryProps() { + /* Set up properties to get the pre-built keystore and truststore */ + Properties props = new Properties(); + RepTestUtils.setUnitTestSSLProperties(props); + return props; + } + + private Properties addPatternAuthentication(Properties props, + String authPattern, + String hvPattern) { + props.put(ReplicationSSLConfig.SSL_AUTHENTICATOR_CLASS, + SSLDNAuthenticator.class.getName()); + props.put(ReplicationSSLConfig.SSL_AUTHENTICATOR_PARAMS, + authPattern); + props.put(ReplicationSSLConfig.SSL_HOST_VERIFIER_CLASS, + SSLDNHostVerifier.class.getName()); + props.put(ReplicationSSLConfig.SSL_HOST_VERIFIER_PARAMS, + hvPattern); + return props; + } + + private Properties setClientKeyAlias(Properties props, String alias) { + props.put(ReplicationSSLConfig.SSL_CLIENT_KEY_ALIAS, alias); + return props; + } + + private Properties addMirrorAuthentication(Properties props) { + props.put(ReplicationSSLConfig.SSL_AUTHENTICATOR_CLASS, + SSLMirrorAuthenticator.class.getName()); + props.put(ReplicationSSLConfig.SSL_HOST_VERIFIER_CLASS, + SSLMirrorHostVerifier.class.getName()); + return props; + } + + private Properties addStdVerification(Properties props) { + props.put(ReplicationSSLConfig.SSL_HOST_VERIFIER_CLASS, + SSLStdHostVerifier.class.getName()); + return props; + } + + private DataChannelFactory createSSLFactory(Properties props) { + final InstanceParams params = makeParams( + ReplicationNetworkConfig.create(props), null); + return new SSLChannelFactory(params); + } + + private InstanceParams makeParams(ReplicationNetworkConfig config, + String paramVal) { + return new InstanceParams( + new InstanceContext(config, + new ChannelLoggerFactory( + null, /* envImpl */ + new ChannelFormatter("SSLChannelTest"))), + null); + } + + public final static String INITIAL_MESSAGE_STR = "Hello"; + public final static byte[] INITIAL_MESSAGE = + StringUtils.toASCII(INITIAL_MESSAGE_STR); + public final static int INITIAL_BUFFER_SIZE = INITIAL_MESSAGE.length; + + ServerTask spawnServer( + Packet[] packets, DataChannelFactory channelFactory) { + + ServerTask serverTask = new ServerTask(packets, channelFactory); + Thread serverThread = new Thread(serverTask); + serverTask.setThread(serverThread); + serverThread.start(); + return serverTask; + } + + ClientTask spawnClient( + Packet[] packets, DataChannelFactory channelFactory) { + + ClientTask clientTask = new ClientTask(packets, channelFactory); + Thread clientThread = new Thread(clientTask); + clientTask.setThread(clientThread); + clientThread.start(); + return clientTask; + } + + class BasicTask { + + private Thread executingThread = null; + private Exception error = null; + private boolean otherError = false; + + void processPackets( + DataChannel dataChannel, boolean asServer, Packet[] packets) { + + int midPoint = packets.length/2; + int pkt = -1; + + try { + for (pkt = 0; pkt < packets.length; pkt++) { + Packet packet = packets[pkt]; + + ByteBuffer[] bufarr = packet.getBufArr(); + + /* + * For the server, switch from non-blocking to blocking + * half-way through the sequence of packets. + */ + if (asServer && pkt == midPoint) { + dataChannel.getSocketChannel().configureBlocking(true); + } + + if (packet.toServer() != asServer) { + /* Send a packet */ + packet.fill(bufarr); + + /* Write first half */ + int remaining = packet.nbytesArrHalf1(); + int halfOffset = packet.getArrHalfwayIdx(); + while (remaining > 0) { + long n = dataChannel.write( + bufarr, 0, halfOffset); + remaining -= n; + } + /* Write the rest */ + remaining = packet.nbytesArrHalf2(); + while (remaining > 0) { + long n = dataChannel.write( + bufarr, halfOffset, + packet.getBufArrLen() - halfOffset); + remaining -= n; + } + while (dataChannel.flush() != + DataChannel.FlushStatus.DONE) { + /* repeat */ + } + } else { + /* Receive a packet */ + /* read first half */ + int remaining = packet.nbytesArrHalf1(); + int halfOffset = packet.getArrHalfwayIdx(); + while (remaining > 0) { + long n = dataChannel.read( + bufarr, 0, halfOffset); + remaining -= n; + } + /* read the rest */ + remaining = packet.nbytesArrHalf2(); + while (remaining > 0) { + long n = dataChannel.read( + bufarr, halfOffset, + packet.getBufArrLen() - halfOffset); + remaining -= n; + } + packet.check(bufarr); + } + } + } catch (IOException ioe) { + System.out.println("Basic task as " + + (asServer ? "server" : "client") + + " got IOException: " + ioe + + " on packet " + pkt + " of " + + packets.length); + } + + try { + dataChannel.close(); + } catch (IOException ioe) { + /* ignore */ + } + } + + Thread getThread() { + return executingThread; + } + void setThread(Thread t) { + executingThread = t; + } + + Exception getError() { + return error; + } + + void setError(Exception e) { + error = e; + } + + void noteOtherError() { + otherError = true; + } + + boolean getOtherError() { + return otherError; + } + + } + + class ClientTask extends BasicTask implements Runnable { + + Packet[] packets; + DataChannelFactory channelFactory; + + ClientTask(Packet[] packets, DataChannelFactory channelFactory) { + this.packets = packets; + this.channelFactory = channelFactory; + } + + public void run() { + DataChannel dataChannel = null; + try { + dataChannel = channelFactory.connect(socketAddress, + new ConnectOptions()); + ByteBuffer buffer = + ByteBuffer.allocate(INITIAL_BUFFER_SIZE); + buffer.put(INITIAL_MESSAGE); + buffer.flip(); + dataChannel.write(buffer); + + processPackets(dataChannel, + false, /* asServer */ + packets); + + } catch (IOException ioe) { + + if (showErrors) { + System.out.println("Client task got IOException: " + ioe); + } + setError(ioe); + } + + try { + dataChannel.close(); + } catch (IOException ioe) { + if (getError() == null) { + setError(ioe); + } + } + } + } + + class ServerTask extends BasicTask implements Runnable { + + Packet[] packets; + DataChannelFactory channelFactory; + volatile boolean exit = false; + boolean channelSecure = false; + boolean channelTrusted = false; + + ServerTask(Packet[] packets, DataChannelFactory channelFactory) { + + this.packets = packets; + this.channelFactory = channelFactory; + } + + void exitLoop() { + exit = true; + } + + public void run() { + while (true) { + try { + int result = selector.select(10L); + + if (exit) { + return; + } + if (result == 0) { + continue; + } + } catch (IOException e) { + if (showErrors) { + System.out.println( + "Server socket exception " + e.getMessage()); + } + if (getError() == null) { + setError(e); + } + } + + Set skeys = selector.selectedKeys(); + for (SelectionKey key : skeys) { + switch (key.readyOps()) { + + case SelectionKey.OP_ACCEPT: + processAccept(); + break; + + case SelectionKey.OP_READ: + boolean proceed = processRead(key); + if (!proceed) { + break; + } + key.cancel(); + ChannelBuffer cb = (ChannelBuffer)key.attachment(); + + this.channelSecure = cb.getChannel().isSecure(); + this.channelTrusted = cb.getChannel().isTrusted(); + + processPackets(cb.getChannel(), + true, /*asServer*/ + packets); + try { + cb.getChannel().close(); + } catch (IOException ioe) { + if (getError() == null) { + setError(ioe); + } + } + break; + + default: + System.out.println( + "Unexpected ops bit set: " + key.readyOps()); + } + } + /* All keys have been processed clear them. */ + skeys.clear(); + } + } + + void processAccept() { + SocketChannel socketChannel = null; + try { + socketChannel = serverChannel.accept(); + socketChannel.configureBlocking(false); + DataChannel dataChannel = + channelFactory.acceptChannel(socketChannel); + + ChannelBuffer channelBuffer = + new ChannelBuffer(dataChannel, + ByteBuffer.allocate(INITIAL_BUFFER_SIZE)); + + /* Register the selector with the base SocketChannel */ + socketChannel.register + (selector, + SelectionKey.OP_READ, + channelBuffer); + } catch (IOException e) { + if (showErrors) { + System.out.println( + "Server accept exception: " + e.getMessage()); + } + try { + socketChannel.close(); + } catch (IOException ioe) { + } + } + } + + private boolean processRead(SelectionKey readKey) { + DataChannel dataChannel = null; + try { + ChannelBuffer readChannelBuffer = + (ChannelBuffer) readKey.attachment(); + dataChannel = readChannelBuffer.getChannel(); + ByteBuffer readBuffer = readChannelBuffer.getBuffer(); + int readBytes = dataChannel.read(readBuffer); + if (readBytes < 0 ) { + /* Premature EOF */ + if (showErrors) { + System.out.println( + "Premature EOF on channel: " + + dataChannel + " read() returned: " + + readBytes); + } + dataChannel.close(); + return false; + } + if (readBuffer.remaining() == 0) { + readBuffer.flip(); + String message = StringUtils.fromASCII + (readBuffer.array(), 0, INITIAL_MESSAGE.length); + if (!message.equals(INITIAL_MESSAGE_STR)) { + dataChannel.close(); + return false; + } + + return true; + } + + /* Buffer not full as yet, keep reading */ + return false; + } catch (IOException e) { + if (showErrors) { + System.out.println( + "Exception during read: " + e.getMessage()); + } + try { + dataChannel.close(); + } catch (IOException ioe) { + } + + return false; + } + } + } + + public static class TestPasswordSource implements PasswordSource { + private final String configuredPassword; + + public TestPasswordSource(InstanceParams params) { + configuredPassword = params.getClassParams(); + } + + public char[] getPassword() { + if (configuredPassword == null) { + return null; + } + return configuredPassword.toCharArray(); + } + } +} diff --git a/test/com/sleepycat/je/rep/utilint/net/SSLMultiThreadTest.java b/test/com/sleepycat/je/rep/utilint/net/SSLMultiThreadTest.java new file mode 100644 index 0000000..f3d9816 --- /dev/null +++ b/test/com/sleepycat/je/rep/utilint/net/SSLMultiThreadTest.java @@ -0,0 +1,633 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.utilint.net; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.PrintStream; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.Set; + +import com.sleepycat.je.rep.ReplicationNetworkConfig; +import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.DataChannelFactory; +import com.sleepycat.je.rep.net.DataChannelFactory.ConnectOptions; +import com.sleepycat.je.rep.net.InstanceContext; +import com.sleepycat.je.rep.net.InstanceParams; +import com.sleepycat.je.rep.utilint.FreePortLocator; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.net.SSLChannelFactory; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder.ChannelFormatter; +import com.sleepycat.je.rep.utilint.net.DataChannelFactoryBuilder.ChannelLoggerFactory; + +import org.junit.Test; + +/** + * This test is intended to check the behavior of the SSLDataChannel class when + * multiple threads access the channel concurrently. + * + * TODO: refactor some of this to add reusability. + */ +public class SSLMultiThreadTest { + + private final String hostname = "localhost"; + private final PrintStream log = System.out; + private int port = 0; + private InetSocketAddress socketAddress; + private DataChannelFactory channelFactory; + + /** + * This test exercises a case that can occur with JE HA feeders, where there + * are separate threads for read and write. When operating in blocking + * mode, a case was found that the a blocked writer would prevent a + * reader from reading, leading to a cross-process deadlock. + */ + @Test + public void TestWritesBeforeReads() + throws IOException { + + channelFactory = createSSLFactory(createSSLFactoryProps()); + findFreePort(); + socketAddress = new InetSocketAddress(hostname, port); + + /* + * A place to put newly accepted incoming channels. + */ + final List channelList = + Collections.synchronizedList(new ArrayList()); + + ConnectionHandler handler = new ConnectionHandler() { + @Override + public void newChannel(DataChannel channel) { + channelList.add(channel); + }}; + + ListenerTask listener = spawnListener(handler); + + /* + * Allow time for the Listener thread to get started and to bind + * its listen socket. + */ + delay(500); + + final DataChannel clientChannel = + channelFactory.connect(socketAddress, + new ConnectOptions(). + setBlocking(true)); + + /* + * Wait for the handler to push the channel on our list. Provide a + * limit on the number of iterations to avoid a full test timeout + * in the event something goes wrong. + */ + for (int i = 0; i < 100 && channelList.isEmpty(); i++) { + delay(100); + } + + final DataChannel serverChannel = channelList.get(0); + + /* switch to blocking mode */ + serverChannel.getSocketChannel().configureBlocking(true); + + /* + * This is how much data will be written from each end. This needs + * to be large enough to fill the TCP window buffers plus SSL internal + * buffers. More is better still, but the key point is to ensure that + * writers are blocked on writing. + */ + final int writeMB = 10; + final int writeAmount = writeMB * 1024 * 1024; + + WriteTask clientWriter = + spawnWriter("client", clientChannel, writeAmount); + WriteTask serverWriter = + spawnWriter("server", serverChannel, writeAmount); + + /* Pause to allow the writers to fill their output buffers */ + delay(2000); + + /* + * check that we've supplied enough to let the writers get + * stuck + */ + final Stats clStats = clientWriter.getStats(); + assertTrue("client didn't write any", clStats.getBytes() > 0); + assertTrue("client wrote all", clStats.getBytes() < writeAmount); + + final Stats srStats = serverWriter.getStats(); + assertTrue("server didn't write any", srStats.getBytes() > 0); + assertTrue("server wrote all", srStats.getBytes() < writeAmount); + + /* Now fire up the readers */ + final ReadTask clientReader = spawnReader("client", clientChannel); + final ReadTask serverReader = spawnReader("server", serverChannel); + + boolean completed = false; + + final long startTime = System.currentTimeMillis(); + while (true) { + + if (clientReader.getStats().getBytes() >= writeAmount && + serverReader.getStats().getBytes() >= writeAmount) { + completed = true; + break; + } + + /* + * Allow up to 1 second per MB to let the transmission complete + */ + if ((System.currentTimeMillis() - startTime) > writeMB * 1000L) { + break; + } + + delay(100); + } + + assertTrue("didn't complete successfully", completed); + + /* + * Tell the readers and writers that their work is done so that they + * don't complain when they see an exception when the sockets get + * shut down. + */ + clientReader.setDone(); + clientWriter.setDone(); + serverReader.setDone(); + serverWriter.setDone(); + + /* Close the client end of the socket */ + clientChannel.close(); + + /* Tell the listener to quit listening */ + listener.exitLoop(); + + /* wait for the readers and writers to stop */ + joinThread(clientReader.getThread()); + joinThread(clientWriter.getThread()); + joinThread(serverReader.getThread()); + joinThread(serverWriter.getThread()); + + /* wait for the listner to stop */ + joinThread(listener.getThread()); + } + + private void joinThread(Thread t) { + while (true) { + try { + t.join(); + return; + } catch (InterruptedException ie) { + } + } + } + + private void delay(int ms) { + try { + Thread.sleep(ms); + } catch (InterruptedException ie) { + } + } + + private Properties createSSLFactoryProps() { + /* Set up properties to get the pre-built keystore and truststore */ + Properties props = new Properties(); + RepTestUtils.setUnitTestSSLProperties(props); + return props; + } + + private static DataChannelFactory createSSLFactory(Properties props) { + final InstanceParams params = makeParams( + ReplicationNetworkConfig.create(props), null); + return new SSLChannelFactory(params); + } + + private static InstanceParams makeParams(ReplicationNetworkConfig config, + String paramVal) { + return new InstanceParams( + new InstanceContext(config, + new ChannelLoggerFactory( + null, /* envImpl */ + new ChannelFormatter("SSLChannelTest"))), + null); + } + + ListenerTask spawnListener(ConnectionHandler handler) { + + ListenerTask listenerTask = new ListenerTask(handler); + Thread listenerThread = new Thread(listenerTask); + listenerThread.setName("Server Listener"); + listenerTask.setThread(listenerThread); + listenerThread.start(); + return listenerTask; + } + + ReadTask spawnReader(String owner, DataChannel channel) { + + ReadTask readTask = new ReadTask(owner, channel); + Thread readThread = new Thread(readTask); + readThread.setName(owner + "Reader"); + readTask.setThread(readThread); + readThread.start(); + return readTask; + } + + WriteTask spawnWriter(String owner, DataChannel channel, int writeAmt) { + + WriteTask writeTask = new WriteTask(owner, channel, writeAmt); + Thread writeThread = new Thread(writeTask); + writeThread.setName(owner + "Writer"); + writeTask.setThread(writeThread); + writeThread.start(); + return writeTask; + } + + class Stats { + private long bytes; + private long packets; + private int problems; + + Stats() { + bytes = 0; + packets = 0; + } + + Stats(long bytes, long packets) { + this.bytes = bytes; + this.packets = packets; + } + + synchronized Stats copy() { + return new Stats(bytes, packets); + } + + synchronized void update(int packetSize) { + packets++; + bytes += packetSize; + } + + synchronized void hadProblem() { + problems += 1; + } + + synchronized int getProblems() { + return problems; + } + + synchronized long getPackets() { + return packets; + } + + synchronized long getBytes() { + return bytes; + } + + Stats subtract(Stats other) { + long newBytes = bytes - other.bytes; + long newPackets = packets - other.packets; + return new Stats(newBytes, newPackets); + } + } + + class BasicTask { + + private volatile Thread executingThread = null; + + BasicTask() { + } + + Thread getThread() { + return executingThread; + } + void setThread(Thread t) { + executingThread = t; + } + } + + class IOTask extends BasicTask { + + protected final Random random = new Random(); + protected final DataChannel channel; + protected final Stats stats = new Stats(); + protected final String owner; + protected volatile boolean taskDone; + + IOTask(String owner, DataChannel channel) { + this.owner = owner; + this.channel = channel; + this.taskDone = false; + } + + void setDone() { + taskDone = true; + } + + Stats getStats() { + return stats.copy(); + } + } + + class ReadTask extends IOTask implements Runnable{ + + private int currPktNum = 0; + private int currByteNum = 0; + + ReadTask(String owner, DataChannel channel) { + super(owner, channel); + } + + public void run() { + + /* + * packet# + bytecount + */ + final int HDR_SIZE = 8; + ByteBuffer hdrBuf = ByteBuffer.allocate(HDR_SIZE); + + final int MAX_DATA_SIZE = 0x10000; + byte[] msgData = new byte[MAX_DATA_SIZE]; + boolean done = false; + + try { + while (!done) { + /* Receive a packet */ + while (hdrBuf.remaining() > 0) { + if (channel.read(hdrBuf) < 0) { + done = true; + break; + } + } + if (done) { + break; + } + hdrBuf.flip(); + int pktNum = hdrBuf.getInt(); + int pktCount = hdrBuf.getInt(); + hdrBuf.compact(); + + if (pktNum != currPktNum) { + fatalCorruption(owner + ": packetNumber mismatch"); + } + + if (pktCount > MAX_DATA_SIZE) { + fatalCorruption(owner + ": illegal packet size"); + } + + ByteBuffer msgBuf = ByteBuffer.allocate(pktCount); + while (msgBuf.remaining() > 0) { + channel.read(msgBuf); + } + + msgBuf.flip(); + msgBuf.get(msgData, 0, pktCount); + + for (int i = 0; i < pktCount; i++) { + if (msgData[i] != (byte)(currByteNum + i)) { + fatalCorruption(owner + ": data content mismatch"); + } + } + currByteNum += pktCount; + currPktNum++; + + stats.update(pktCount); + } + } catch (IOException ioe) { + if (!taskDone) { + log.println("Read task got IOException: " + ioe); + ioe.printStackTrace(log); + } + } + } + } + + class WriteTask extends IOTask implements Runnable { + + private int currPktNum = 0; + private int currByteNum = 0; + private final int writeAmount; + + WriteTask(String owner, DataChannel channel, int writeAmount) { + super(owner, channel); + this.writeAmount = writeAmount; + } + + public void run() { + + /* + * packet# + bytecount + */ + final int HDR_SIZE = 8; + final int MAX_DATA_SIZE = 0x10000; + byte[] msgData = new byte[MAX_DATA_SIZE + HDR_SIZE]; + final Random random = new Random(); + int bytesWritten = 0; + + try { + while (bytesWritten < writeAmount) { + int pktSize = (random.nextInt() & 0xffff) + 1; + + if (pktSize > (writeAmount - bytesWritten)) { + pktSize = (writeAmount - bytesWritten); + } + + /* pktBrk is the chunk size for splitting the message */ + int pktBrk = random.nextInt() & 0x1ffff; + + if (pktBrk < HDR_SIZE) { + pktBrk = HDR_SIZE; + } else if (pktBrk > pktSize + HDR_SIZE) { + pktBrk = pktSize + HDR_SIZE; + } + + for (int i = 0; i < pktSize; i++) { + msgData[i] = (byte) (currByteNum + i); + } + currByteNum += pktSize; + + ByteBuffer buffer = ByteBuffer.allocate(pktBrk); + buffer.putInt(currPktNum++); + buffer.putInt(pktSize); + + int sent = 0; + while (sent < pktSize) { + int toSend = pktSize - sent; + int rem = buffer.remaining(); + if (rem > 0) { + if (rem > toSend) { + /* can't happen in the first pass */ + buffer = ByteBuffer.allocate(toSend); + } else if (toSend > rem) { + toSend = rem; + } + buffer.put(msgData, sent, toSend); + sent += toSend; + } + buffer.flip(); + channel.write(buffer); + buffer.compact(); + + } + stats.update(pktSize); + bytesWritten += pktSize; + } + } catch (IOException ioe) { + if (!taskDone) { + log.println("Write task got IOException: " + ioe); + } + } + } + } + + interface ConnectionHandler { + /** + * Called by the listener task when an new incoming connection is + * accepted. The underlying socket channel is in non-blocking mode. + */ + void newChannel(DataChannel channel); + } + + class ListenerTask extends BasicTask implements Runnable { + + /* + * The selector that watches for accept events on the server socket and + * on subsequent read events. + */ + private Selector selector; + + /* The server socket channel */ + private ServerSocketChannel serverChannel; + + private volatile boolean exit; + + private ConnectionHandler handler; + + ListenerTask(ConnectionHandler handler) { + this.handler = handler; + } + + void exitLoop() { + exit = true; + } + + public void run() { + initServerSocket(); + processIncoming(); + closeServerSocket(); + } + + void initServerSocket() { + try { + serverChannel = ServerSocketChannel.open(); + serverChannel.configureBlocking(false); + selector = Selector.open(); + serverChannel.register(selector, SelectionKey.OP_ACCEPT); + + ServerSocket acceptSocket = serverChannel.socket(); + + /* No timeout */ + acceptSocket.setSoTimeout(0); + acceptSocket.bind(socketAddress); + + } catch (IOException ioe) { + log.println("Error initializing server socket"); + ioe.printStackTrace(); + } + } + + void closeServerSocket() { + try { + serverChannel.socket().close(); + selector.close(); + } catch (IOException ioe) { + log.println("Error closing server socket"); + ioe.printStackTrace(); + } + } + + void processIncoming() { + + while (!exit) { + try { + int result = selector.select(10L); + + if (result == 0) { + continue; + } + } catch (IOException e) { + log.println("Server socket exception " + e.getMessage()); + } + + Set skeys = selector.selectedKeys(); + for (SelectionKey key : skeys) { + switch (key.readyOps()) { + + case SelectionKey.OP_ACCEPT: + processAccept(); + break; + + default: + log.println( + "Unexpected ops bit set: " + key.readyOps()); + } + } + /* All keys have been processed clear them. */ + skeys.clear(); + } + } + + void processAccept() { + SocketChannel socketChannel = null; + try { + socketChannel = serverChannel.accept(); + socketChannel.configureBlocking(false); + DataChannel dataChannel = + channelFactory.acceptChannel(socketChannel); + handler.newChannel(dataChannel); + } catch (IOException e) { + log.println("Server accept exception: " + e.getMessage()); + try { + socketChannel.close(); + } catch (IOException ioe) { + } + } + } + } + + private void fatalCorruption(String msg) { + log.println("fatal corruption encountered: " + msg); + while (true) { + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + } + } + } + + private void findFreePort() { + final FreePortLocator locator = + new FreePortLocator(hostname, 5000, 6000); + port = locator.next(); + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/MergeTest.java b/test/com/sleepycat/je/rep/vlsn/MergeTest.java new file mode 100644 index 0000000..ac3c32d --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/MergeTest.java @@ -0,0 +1,432 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.log.Provisional; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.log.entry.SingleItemEntry; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.vlsn.VLSNIndex.ForwardVLSNScanner; +import com.sleepycat.je.txn.RollbackStart; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class MergeTest extends TestBase { + + private final String testMapDb = "TEST_MAP_DB"; + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envHome; + private final byte lnType = + LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum(); + + private Environment env; + private EnvironmentImpl envImpl; + private int bucketStride = 4; + private int bucketMaxMappings = 3; + private int recoveryStride = 3; + private int recoveryMaxMappings = 4; + + + public MergeTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private Environment makeEnvironment() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(false); + return new Environment(envHome, envConfig); + } + + /** + * Test the tricky business of recovering the VLSNIndex. See VLSNIndex(), + * and how the vlsnIndex is initialized from what's persistent on disk, and + * another tracker is filled with vlsn->lsn mappings gleaned from reading + * the log during recovery. The recovery tracker's contents are used to + * override what is on the on-disk tracker. + */ + @Test + public void testMerge() + throws Throwable { + env = makeEnvironment(); + envImpl = DbInternal.getNonNullEnvImpl(env); + + /* + * VLSN ranges for the test: + * start->initialSize are mapped before recovery + * secondStart ->recoverySize are mapped in the recovery tracker. + */ + long start = 1; + long initialSize = 40; + long secondStart = start + initialSize - 20; + long recoverySize = 30; + + RecoveryTrackerGenerator generator = + new NoRollbackGenerator(secondStart, recoverySize); + try { + doMerge(generator, initialSize); + } finally { + env.close(); + } + } + + @Test + public void testSingleRBMerge() + throws Throwable { + env = makeEnvironment(); + envImpl = DbInternal.getNonNullEnvImpl(env); + + long initialSize = 40; + + RecoveryTrackerGenerator generator = new RollbackGenerator + (new RollbackInfo(new VLSN(20), DbLsn.makeLsn(5, 20 * 10))); + + try { + doMerge(generator, initialSize); + } finally { + env.close(); + } + } + + @Test + public void testMultiRBMerge() + throws Throwable { + env = makeEnvironment(); + envImpl = DbInternal.getNonNullEnvImpl(env); + + long initialSize = 50; + + RecoveryTrackerGenerator generator = new RollbackGenerator + (new RollbackInfo(new VLSN(30), DbLsn.makeLsn(5, 30 * 10)), + new TestInfo(31, DbLsn.makeLsn(6, (31 * 30))), + new TestInfo(32, DbLsn.makeLsn(6, (32 * 30))), + new TestInfo(33, DbLsn.makeLsn(6, (33 * 30))), + new TestInfo(34, DbLsn.makeLsn(6, (34 * 30))), + new TestInfo(35, DbLsn.makeLsn(6, (35 * 30))), + new TestInfo(36, DbLsn.makeLsn(6, (36 * 30))), + new TestInfo(37, DbLsn.makeLsn(6, (37 * 30))), + new RollbackInfo(new VLSN(33), DbLsn.makeLsn(6, (33 * 30))), + new TestInfo(34, DbLsn.makeLsn(7, (34 * 40))), + new TestInfo(35, DbLsn.makeLsn(7, (35 * 40))), + new TestInfo(36, DbLsn.makeLsn(7, (36 * 40))), + new TestInfo(37, DbLsn.makeLsn(7, (37 * 40)))); + + try { + doMerge(generator, initialSize); + } finally { + env.close(); + } + } + + @Test + public void testRBInRecoveryLogMerge() + throws Throwable { + env = makeEnvironment(); + envImpl = DbInternal.getNonNullEnvImpl(env); + + long initialSize = 50; + + RecoveryTrackerGenerator generator = new RollbackGenerator + (new TestInfo(51, DbLsn.makeLsn(6, (51 * 30))), + new TestInfo(52, DbLsn.makeLsn(6, (52 * 30))), + new TestInfo(53, DbLsn.makeLsn(6, (53 * 30))), + new TestInfo(54, DbLsn.makeLsn(6, (54 * 30))), + new TestInfo(55, DbLsn.makeLsn(6, (55 * 30))), + new TestInfo(56, DbLsn.makeLsn(6, (56 * 30))), + new TestInfo(57, DbLsn.makeLsn(6, (57 * 30))), + new TestInfo(58, DbLsn.makeLsn(6, (58 * 30))), + new TestInfo(59, DbLsn.makeLsn(6, (59 * 30))), + new TestInfo(60, DbLsn.makeLsn(6, (60 * 30))), + new TestInfo(61, DbLsn.makeLsn(6, (61 * 30))), + new RollbackInfo(new VLSN(55), DbLsn.makeLsn(6, (55 * 30))), + new TestInfo(56, DbLsn.makeLsn(7, (56 * 40))), + new TestInfo(57, DbLsn.makeLsn(7, (57 * 40)))); + + try { + doMerge(generator, initialSize); + } finally { + env.close(); + } + } + + private void doMerge(RecoveryTrackerGenerator generator, + long initialSize) + throws Throwable { + + for (int flushPoint = 1; flushPoint <= initialSize; flushPoint++) { + if (verbose) { + System.out.println("flush=" + flushPoint + + " initSize = " + initialSize); + } + + VLSNIndex vlsnIndex = new VLSNIndex(envImpl, testMapDb, + new NameIdPair("node1", 1), + bucketStride, bucketMaxMappings, + 10000, new RecoveryInfo()); + try { + + List expected = new ArrayList(); + + populate(flushPoint, vlsnIndex, initialSize, expected); + vlsnIndex.merge(generator.makeRecoveryTracker(expected)); + + assertTrue(vlsnIndex.verify(verbose)); + checkMerge(vlsnIndex, expected); + + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + vlsnIndex.close(); + env.removeDatabase(null, testMapDb); + } + } + } + + /** + * Fill up an initial VLSNIndex, flushing at different spots to create a + * different tracker/on-disk mix. + */ + private void populate(int flushPoint, + VLSNIndex vlsnIndex, + long initialSize, + List expected) { + + + for (long i = 1; i <= initialSize; i++) { + TestInfo info = new TestInfo(i, DbLsn.makeLsn(5, i * 10)); + + /* populate vlsn index */ + vlsnIndex.put(makeLogItem(info)); + + /* populate expected list */ + expected.add(info); + + if (i == flushPoint) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + } + } + + private LogItem makeLogItem(TestInfo info) { + LogItem item = new LogItem(); + item.header = info.header; + item.lsn = info.lsn; + return item; + } + + private void checkMerge(VLSNIndex vlsnIndex, List expected) { + + /* The new tracker should have the right range. */ + VLSNRange range = vlsnIndex.getRange(); + assertEquals(new VLSN(1), range.getFirst()); + VLSN lastVLSN = expected.get(expected.size() - 1).vlsn; + assertEquals(lastVLSN, range.getLast()); + + // TODO: test that the sync and commit fields in the tracker are + // correct. + + ForwardVLSNScanner scanner = new ForwardVLSNScanner(vlsnIndex); + long firstLsn = scanner.getStartingLsn(expected.get(0).vlsn); + assertEquals(DbLsn.getNoFormatString(expected.get(0).lsn) + + " saw first VLSN " + DbLsn.getNoFormatString(firstLsn), + expected.get(0).lsn, firstLsn); + + boolean vlsnForLastInRange = false; + + int validMappings = 0; + for (TestInfo info : expected) { + long lsn = scanner.getPreciseLsn(info.vlsn); + if (lsn != DbLsn.NULL_LSN) { + if (verbose) { + System.out.println(info); + } + + assertEquals(DbLsn.getNoFormatString(info.lsn), info.lsn, lsn); + validMappings++; + + if (info.vlsn.equals(lastVLSN)) { + vlsnForLastInRange = true; + } + } + } + + /* Should see a lsn value for the last VLSN in the range. */ + assertTrue(vlsnForLastInRange); + + /* Some portion of the expected set should be mapped. */ + assertTrue(validMappings > (expected.size()/bucketStride) - 1); + } + + interface RecoveryTrackerGenerator { + public VLSNRecoveryTracker makeRecoveryTracker(List expected); + } + + private class NoRollbackGenerator implements RecoveryTrackerGenerator { + + private long secondStart; + private long recoverySize; + + NoRollbackGenerator(long secondStart, + long recoverySize) { + this.secondStart = secondStart; + this.recoverySize = recoverySize; + } + + public VLSNRecoveryTracker makeRecoveryTracker + (List expected) { + + VLSNRecoveryTracker recoveryTracker = + new VLSNRecoveryTracker(envImpl, + recoveryStride, + recoveryMaxMappings, + 100000); + + /* Truncate the expected mappings list. */ + Iterator iter = expected.iterator(); + while (iter.hasNext()) { + TestInfo ti = iter.next(); + if (ti.vlsn.getSequence() >= secondStart) { + iter.remove(); + } + } + + for (long i = secondStart; i < secondStart + recoverySize; i ++) { + TestInfo info = new TestInfo(i, DbLsn.makeLsn(6,i * 20)); + recoveryTracker.trackMapping(info.lsn, + info.header, + info.entry); + expected.add(info); + } + + return recoveryTracker; + } + } + + private class RollbackGenerator implements RecoveryTrackerGenerator { + private final Object[] recoveryLog; + + RollbackGenerator(Object ... recoveryLog) { + this.recoveryLog = recoveryLog; + } + + public VLSNRecoveryTracker + makeRecoveryTracker(List expected) { + + VLSNRecoveryTracker recoveryTracker = + new VLSNRecoveryTracker(envImpl, + recoveryStride, + recoveryMaxMappings, + 100000); + + for (Object info : recoveryLog) { + if (info instanceof TestInfo) { + TestInfo t = (TestInfo) info; + recoveryTracker.trackMapping(t.lsn, t.header, t.entry); + expected.add(t); + } else if (info instanceof RollbackInfo) { + RollbackInfo r = (RollbackInfo) info; + + /* Register the pseudo rollback with the tracker. */ + recoveryTracker.trackMapping(0 /* lsn */, + r.header, r.rollbackEntry); + + /* Truncate the expected mappings list. */ + Iterator iter = expected.iterator(); + while (iter.hasNext()) { + TestInfo ti = iter.next(); + if (ti.vlsn.compareTo(r.matchpointVLSN) > 0) { + iter.remove(); + } + } + } + } + + return recoveryTracker; + } + } + + private class TestInfo { + final long lsn; + final VLSN vlsn; + final LogEntryHeader header; + final LogEntry entry; + + TestInfo(long vlsnVal, long lsn, LogEntry entry) { + this.lsn = lsn; + this.vlsn = new VLSN(vlsnVal); + this.header = new LogEntryHeader(entry.getLogType().getTypeNum(), + 0, 0, vlsn); + this.entry = entry; + } + + TestInfo(long vlsnVal, long lsn) { + this.lsn = lsn; + this.vlsn = new VLSN(vlsnVal); + this.header = new LogEntryHeader(lnType, 0, 0, vlsn); + this.entry = null; + } + + @Override + public String toString() { + return "vlsn=" + vlsn + " lsn=" + DbLsn.getNoFormatString(lsn) + + " entryType=" + header.getType(); + } + } + + private class RollbackInfo { + final VLSN matchpointVLSN; + final LogEntryHeader header; + final LogEntry rollbackEntry; + + RollbackInfo(VLSN matchpointVLSN, long matchpointLsn) { + + this.matchpointVLSN = matchpointVLSN; + Set noActiveTxns = Collections.emptySet(); + rollbackEntry = + SingleItemEntry.create(LogEntryType.LOG_ROLLBACK_START, + new RollbackStart(matchpointVLSN, + matchpointLsn, + noActiveTxns)); + + header = new LogEntryHeader(rollbackEntry, Provisional.NO, + ReplicationContext.NO_REPLICATE); + } + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/SyncupWithGapsTest.java b/test/com/sleepycat/je/rep/vlsn/SyncupWithGapsTest.java new file mode 100644 index 0000000..bf98068 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/SyncupWithGapsTest.java @@ -0,0 +1,451 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.Durability.SyncPolicy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.NetworkRestore; +import com.sleepycat.je.rep.NetworkRestoreConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Log cleaning can create gaps after the rightmost vlsn, due to the greater + * effort spend in finding cleanable files that don't interfere with the + * replication stream. This test checks that we proactively clean files to the + * right of the last VLSN, if possible, and that we can syncup when logs + * have gaps due to this kind of proactive cleaning. [#21069] + */ +public class SyncupWithGapsTest extends TestBase { + + private static final String STUFF = + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"; + + private final File envRoot; + private final String dbName = "testDB"; + private RepEnvInfo[] repEnvInfo; + private Database db; + private final Logger logger; + private final StatsConfig clearConfig; + + public SyncupWithGapsTest() { + envRoot = SharedTestUtils.getTestDir(); + logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + clearConfig = new StatsConfig(); + clearConfig.setClear(true); + clearConfig.setFast(false); + } + + private EnvironmentConfig makeEnvConfig() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + "10000"); + /* Control cleaning explicitly. */ + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + return envConfig; + } + + private int findMasterIndex(Environment master) { + for (int i = 0; i < repEnvInfo.length; i++) { + if (repEnvInfo[i].getEnv().equals(master)) { + return i; + } + } + + fail("Master should exist"); + return 0; + } + + /** Write data into the database. */ + private void generateData(Environment master, + int numTxns, + Durability durability, + boolean doCommit) { + + /* Write some data. */ + DatabaseEntry key = new DatabaseEntry(); + byte[] dataPadding = new byte[1000]; + DatabaseEntry data = new DatabaseEntry(dataPadding); + + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setDurability(durability); + + for (int i = 0; i < numTxns; i++) { + final Transaction txn = + master.beginTransaction(null,txnConfig); + // long keyPrefix = i << 10; + // LongBinding.longToEntry(keyPrefix + i, key); + LongBinding.longToEntry(i, key); + db.put(txn, key, data); + + if (doCommit) { + txn.commit(); + } else { + txn.abort(); + } + } + } + + private void readData(Environment env, + int lastRecordVal) { + Database readDb = null; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + try { + readDb = openDatabase(env); + + for (int i = 0; i < lastRecordVal; i++) { + LongBinding.longToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, LockMode.DEFAULT)); + } + } finally { + if (readDb != null) { + readDb.close(); + } + } + } + + private Database openDatabase(Environment env) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + return env.openDatabase(null, dbName, dbConfig); + } + + private int cleanLog(Environment env) { + int numCleaned = 0; + int total = 0; + do { + numCleaned = env.cleanLog(); + total += numCleaned; + } while (numCleaned > 0); + + logger.info("cleaned " + total); + return total; + } + + private void closeReplicas(int masterIndex) { + for (int i = 0; i < repEnvInfo.length; i++) { + if (i != masterIndex) { + repEnvInfo[i].closeEnv(); + } + } + } + + private void openReplicas(int masterIndex) { + RepEnvInfo[] restartList = new RepEnvInfo[2]; + int a = 0; + for (int i = 0; i < repEnvInfo.length; i++) { + if (i != masterIndex) { + restartList[a++] = repEnvInfo[i]; + } + } + + RepTestUtils.restartGroup(restartList); + } + + /** + * Create a log that will have swathes of cleaned files that follow the + * replication stream, or are intermingled in the replication stream. + * @return master + */ + private Environment setupLogWithCleanedGaps(boolean multipleGaps) + throws Exception { + + db = null; + repEnvInfo = + RepTestUtils.setupEnvInfos(envRoot, 3, makeEnvConfig()); + Environment master = RepTestUtils.joinGroup(repEnvInfo); + int masterIdx = findMasterIndex(master); + db = openDatabase(master); + + /* Write some data so there is a replication stream. */ + generateData(master, 50, Durability.COMMIT_NO_SYNC, true); + + /* + * Make the master have a low-utilization log, and gate cleaning + * with a non-updating global cbvlsn. Shut down the replicas so the + * global cbvlsn remains low, and then fill the master with junk. + * The junk will either entirely be to the right of the last VLSN, + * or (since we can't predict RepGroupDB updates) at least within + * the range of the active VLSN range. + */ + closeReplicas(masterIdx); + fillLogWithTraceMsgs(master, 50); + + if (multipleGaps) { + Durability noAck = new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + /* Write more data */ + generateData(master, 50, noAck, true); + + /* Make a second cleanup area of junk */ + fillLogWithTraceMsgs(master, 50); + } + + CheckpointConfig cc = new CheckpointConfig(); + cc.setForce(true); + master.checkpoint(cc); + + EnvironmentStats stats = master.getStats(clearConfig); + stats = master.getStats(clearConfig); + + /* Clean the log */ + int totalCleaned = 0; + int cleanedThisPass = 0; + do { + cleanedThisPass = cleanLog(master); + totalCleaned += cleanedThisPass; + master.checkpoint(cc); + + stats = master.getStats(clearConfig); + logger.info("after cleaning, cleaner backlog = " + + stats.getCleanerBacklog() + " deletionBacklog=" + + stats.getFileDeletionBacklog()); + } while (cleanedThisPass > 0); + + assertTrue(totalCleaned > 0); + + return master; + } + @Test + public void testReplicaHasOneGapNetworkRestore() + throws Throwable { + doReplicaHasGapNetworkRestore(false); + } + + @Test + public void testReplicaHasMultipleGapsNetworkRestore() + throws Throwable { + doReplicaHasGapNetworkRestore(true); + } + + private void doReplicaHasGapNetworkRestore(boolean multiGaps) + throws Throwable { + + Durability noAck = new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + db = null; + try { + Environment master = setupLogWithCleanedGaps(multiGaps); + int masterIdx = findMasterIndex(master); + /* + * Write a record, so that we are sure that there will be a + * network restore, because we have to cross a checkpoint. + */ + generateData(master, 1, noAck, false); + CheckpointConfig cc = new CheckpointConfig(); + master.checkpoint(cc); + EnvironmentStats stats = master.getStats(clearConfig); + assertEquals(0, stats.getCleanerBacklog()); + if (multiGaps) { + logger.info("Multigap: deletion backlog = " + + stats.getFileDeletionBacklog()); + } else { + assertEquals(0, stats.getFileDeletionBacklog()); + } + + db.close(); + db = null; + repEnvInfo[masterIdx].closeEnv(); + + /* Start up the two replicas */ + openReplicas(masterIdx); + + /* Start the node that had been the master */ + try { + repEnvInfo[masterIdx].openEnv(); + fail("Should be a network restore"); + } catch(InsufficientLogException ile) { + repEnvInfo[masterIdx].closeEnv(); + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setRetainLogFiles(true); + restore.execute(ile, config); + repEnvInfo[masterIdx].openEnv(); + } + + /* Check its last VLSN and size. */ + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (db != null) { + db.close(); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + @Test + public void testReplicaHasGap() + throws Throwable { + + db = null; + try { + Environment master = setupLogWithCleanedGaps(false); + int masterIdx = findMasterIndex(master); + db.close(); + db = null; + repEnvInfo[masterIdx].closeEnv(); + + /* Start up the two replicas */ + openReplicas(masterIdx); + + /* Start the master */ + try { + repEnvInfo[masterIdx].openEnv(); + } catch(InsufficientLogException ile) { + repEnvInfo[masterIdx].closeEnv(); + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setRetainLogFiles(true); + restore.execute(ile, config); + repEnvInfo[masterIdx].openEnv(); + } + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (db != null) { + db.close(); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /** + * On the master, generate a log that has + * section A: a lot of records packed together + * section B: a lot of junk that gets cleaned away, creating a gap in + * the log + * section C: a new section of data + * + * Bring the replicas down after A is replicated, but before C is written. + * When the replicas come up, they will have to be fed by the feeder + * from point A. + */ + @Test + public void testFeederHasGap() + throws Throwable { + + Durability noAck = new Durability(SyncPolicy.NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + db = null; + try { + Environment master = setupLogWithCleanedGaps(false); + int masterIdx = findMasterIndex(master); + + /* + * Write a single record, and then junk, so that we are sure there + * is a new VLSN, and that the replicas will have to sync up to + * this point, across the gap of cleaned junk. + */ + generateData(master, 1, noAck, false); + EnvironmentStats stats = master.getStats(clearConfig); + assertEquals(0, stats.getCleanerBacklog()); + assertEquals(0, stats.getFileDeletionBacklog()); + + /* Start up the two replicas */ + for (int i = 0; i < repEnvInfo.length; i++) { + if (i != masterIdx) { + + repEnvInfo[i].openEnv(); + /* make sure we have up to date data */ + readData(repEnvInfo[i].getEnv(), 50); + } + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (db != null) { + db.close(); + } + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + private void fillLogWithTraceMsgs(Environment env, + int numToAdd) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = envImpl.getFileManager(); + Long beforeTracing = getLastFileNum(env); + logger.info("BeforeTracing end file = 0x" + + Long.toHexString(beforeTracing)); + do { + for (int i = 0; i <= 100; i++) { + Trace.trace(envImpl, STUFF + i); + } + } while (fileManager.getLastFileNum() <= (beforeTracing + numToAdd)); + Long afterTracing = fileManager.getLastFileNum(); + logger.info("AfterTracing end file = 0x" + + Long.toHexString(afterTracing)); + /* Check that we've grown the log by a good bit - at least 40 files */ + assertTrue((afterTracing - beforeTracing) > 40 ); + } + + private long getLastFileNum(Environment env) { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + FileManager fileManager = envImpl.getFileManager(); + return fileManager.getLastFileNum(); + } +} + diff --git a/test/com/sleepycat/je/rep/vlsn/VLPair.java b/test/com/sleepycat/je/rep/vlsn/VLPair.java new file mode 100644 index 0000000..e111cb6 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLPair.java @@ -0,0 +1,40 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; + +/** + * Just a struct for testing convenience. + */ +class VLPair { + final VLSN vlsn; + final long lsn; + + VLPair(VLSN vlsn, long lsn) { + this.vlsn = vlsn; + this.lsn = lsn; + } + + VLPair(int vlsnSequence, long fileNumber, long offset) { + this.vlsn = new VLSN(vlsnSequence); + this.lsn = DbLsn.makeLsn(fileNumber, offset); + } + + @Override + public String toString() { + return vlsn + "/" + DbLsn.getNoFormatString(lsn); + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNAwaitConsistencyTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNAwaitConsistencyTest.java new file mode 100644 index 0000000..6a59b50 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNAwaitConsistencyTest.java @@ -0,0 +1,146 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.node.cbvlsn.LocalCBVLSNUpdater; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Make sure that the VLSNIndex.awaitConsistency method correctly checks for + * invalidated environment and exits its while loop. Before the fix for + * [#20919], the method could hang if a thread failed after it had allocated a + * new VLSN, but before the vlsn mapping was entered into the VLSNIndex. + */ +public class VLSNAwaitConsistencyTest extends TestBase { + + private final File envRoot; + + public VLSNAwaitConsistencyTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Make a thread allocate a vlsn, but then fail before it's tracked + * by the vlsn index. This happened in [#20919] when + * 1.rep environment close was called + * 2.the repNode was nulled out + * 3.a concurrent writing thread got a NPE within its call to LogManager.log + * because the repNode was null. This thread exited after it had bumped + * the vlsn, but before it had entered the vlsn in the vlsnIndex + * 4.rep environment close tried to do a checkpoint, but the checkpoint + * hung. + * This fix works by having (3) invalidate the environment, and by having + * (4) check for an invalidated environment. + * + */ + @Test + public void testLoggingFailure() + throws DatabaseException, IOException { + + /* Make a single replicated environment. */ + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); + RepTestUtils.joinGroup(repEnvInfo); + + /* + * Disable cleaning and CBVLSN updating, to control vlsn creation + * explicitly. + */ + Environment env = repEnvInfo[0].getEnv(); + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setConfigParam("je.env.runCleaner", "false"); + env.setMutableConfig(config); + LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + DatabaseEntry value = new DatabaseEntry(new byte[4]); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + LogManager logManager = + DbInternal.getNonNullEnvImpl(env).getLogManager(); + + /* + * Inject an exception into the next call to log() that is made + * for a replicated log entry. + */ + logManager.setDelayVLSNRegisterHook(new ForceException()); + + VLSNIndex vlsnIndex = ((RepImpl)envImpl).getVLSNIndex(); + + try { + db.put(null, value, value); + fail("Should throw exception"); + } catch (Exception expected) { + assertTrue("latest=" + vlsnIndex.getLatestAllocatedVal() + + " last mapped=" + + vlsnIndex.getRange().getLast().getSequence(), + vlsnIndex.getLatestAllocatedVal() > + vlsnIndex.getRange().getLast().getSequence()); + } + + try { + VLSNIndex.AWAIT_CONSISTENCY_MS = 1000; + envImpl.awaitVLSNConsistency(); + fail("Should throw and break out"); + } catch (DatabaseException expected) { + } + + /* Before the fix, this test hung. */ + } + + private class ForceException implements TestHook { + + public void doHook() { + throw new NullPointerException("fake NPE"); + } + + public void hookSetup() { + } + + public void doIOHook() throws IOException { + + } + + public void doHook(Object obj) { + } + + public CountDownLatch getHookValue() { + return null; + } + } +} \ No newline at end of file diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNBucketTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNBucketTest.java new file mode 100644 index 0000000..76c9c6a --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNBucketTest.java @@ -0,0 +1,378 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; + +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.TestBase; + +/** + * Low level test for basic VLSNBucket functionality + * TODO: add more tests for buckets with non-populated offsets. + */ +public class VLSNBucketTest extends TestBase { + + private final boolean verbose = Boolean.getBoolean("verbose"); + + @Test + public void testBasic() { + int stride = 3; + int maxMappings = 2; + int maxDistance = 50; + + /* + * Make a list of vlsn->lsns mappings for test data: + * vlsn=1,lsn=3/10, + * vlsn=2,lsn=3/20, + * ... etc .. + */ + List vals = initData(); + VLSNBucket bucket = new VLSNBucket(3, // fileNumber, + stride, + maxMappings, + maxDistance, + vals.get(0).vlsn); + + /* Insert vlsn 1, 2 */ + assertTrue(bucket.empty()); + assertTrue(bucket.put(vals.get(0).vlsn, vals.get(0).lsn)); + assertFalse(bucket.empty()); + assertTrue(bucket.put(vals.get(1).vlsn, vals.get(1).lsn)); + + /* + * Do some error checking - Make sure we can't put in a lsn for another + * file. + */ + assertFalse(bucket.put(vals.get(2).vlsn, DbLsn.makeLsn(4, 20))); + + /* Make sure we can't put in a lsn that's too far away. */ + assertFalse(bucket.put(vals.get(2).vlsn, DbLsn.makeLsn(3, 100))); + + assertTrue(bucket.owns(vals.get(0).vlsn)); + assertTrue(bucket.owns(vals.get(1).vlsn)); + assertFalse(bucket.owns(vals.get(2).vlsn)); + + assertTrue(bucket.put(vals.get(2).vlsn, vals.get(2).lsn)); + + /* + * Check the mappings. There are three that were put in, and only + * one that is stored. (1/10, (stored) 2/20, 3/30), plus the last lsn + */ + assertEquals(vals.get(0).lsn, + bucket.getGTELsn(vals.get(0).vlsn)); + assertEquals(vals.get(2).lsn, + bucket.getGTELsn(vals.get(1).vlsn)); + assertEquals(vals.get(2).lsn, + bucket.getGTELsn(vals.get(2).vlsn)); + + assertEquals(1, bucket.getNumOffsets()); + + /* Fill the bucket up so there's more mappings. Add 4/40, 5/50, 6/60 */ + assertTrue(bucket.put(vals.get(3).vlsn, vals.get(3).lsn)); + assertTrue(bucket.put(vals.get(4).vlsn, vals.get(4).lsn)); + assertTrue(bucket.put(vals.get(5).vlsn, vals.get(5).lsn)); + + /* + * Check that we reached the max mappings limit, and that this put is + * refused. + */ + assertFalse(bucket.put(new VLSN(7), DbLsn.makeLsn(3,70))); + + checkAccess(bucket, stride, vals); + } + + /* + * This bucket holds vlsn 1-6, just check all the access methods. */ + private void checkAccess(VLSNBucket bucket, + int stride, + List vals) { + + /* + * All the mappings should be there, and we should be able to retrieve + * them. + */ + for (int i = 0; i < vals.size(); i += stride) { + VLPair pair = vals.get(i); + assertTrue(bucket.owns(pair.vlsn)); + assertEquals(pair.lsn, bucket.getLsn(pair.vlsn)); + } + + /* + * With the strides, it's more work to use a loop to check GTE and LTE + * than to just hard code the checks. If the expected array is grown, + * add to these checks! + */ + assertEquals(vals.get(0).lsn, + bucket.getLTELsn(vals.get(0).vlsn)); + assertEquals(vals.get(0).lsn, + bucket.getLTELsn(vals.get(1).vlsn)); + assertEquals(vals.get(0).lsn, + bucket.getLTELsn(vals.get(2).vlsn)); + assertEquals(vals.get(3).lsn, + bucket.getLTELsn(vals.get(3).vlsn)); + assertEquals(vals.get(3).lsn, + bucket.getLTELsn(vals.get(4).vlsn)); + assertEquals(vals.get(5).lsn, + bucket.getLTELsn(vals.get(5).vlsn)); + + assertEquals(vals.get(0).lsn, + bucket.getGTELsn(vals.get(0).vlsn)); + assertEquals(vals.get(3).lsn, + bucket.getGTELsn(vals.get(1).vlsn)); + assertEquals(vals.get(3).lsn, + bucket.getGTELsn(vals.get(2).vlsn)); + assertEquals(vals.get(3).lsn, + bucket.getGTELsn(vals.get(3).vlsn)); + assertEquals(vals.get(5).lsn, + bucket.getGTELsn(vals.get(4).vlsn)); + assertEquals(vals.get(5).lsn, + bucket.getGTELsn(vals.get(5).vlsn)); + } + + /** + * Make a list of vlsn->lsns mappings for test data: + * vlsn=1,lsn=3/10, + * vlsn=2,lsn=3/20, + * ... etc .. + */ + private List initData() { + + List vals = new ArrayList(); + for (int i = 1; i <= 6; i++) { + vals.add(new VLPair(i, 3, 10 * i)); + } + return vals; + } + + @Test + public void testOutOfOrderPuts() { + int stride = 3; + int maxMappings = 2; + int maxDistance = 50; + + List vals = initData(); + VLSNBucket bucket = new VLSNBucket(3, // fileNumber, + stride, + maxMappings, + maxDistance, + vals.get(0).vlsn); + + /* Insert vlsn 2, 1 */ + assertTrue(bucket.empty()); + assertTrue(bucket.put(vals.get(1).vlsn, vals.get(1).lsn)); + assertFalse(bucket.empty()); + + assertTrue(bucket.owns(vals.get(1).vlsn)); + assertTrue(bucket.owns(vals.get(0).vlsn)); + assertFalse(bucket.owns(vals.get(2).vlsn)); + + assertTrue(bucket.put(vals.get(0).vlsn, vals.get(0).lsn)); + + /* + * Do some error checking - Make sure we can't put in a lsn for another + * file. + */ + assertFalse(bucket.put(vals.get(2).vlsn, DbLsn.makeLsn(4, 20))); + + /* Make sure we can't put in a lsn that's too far away. */ + assertFalse(bucket.put(vals.get(2).vlsn, DbLsn.makeLsn(3, 100))); + + assertFalse(bucket.owns(vals.get(2).vlsn)); + + /* + * Check the mappings. There are three that were put in, and only + * one that is stored. (1/10, (stored) 2/20, 3/30) + */ + assertEquals(1, bucket.getNumOffsets()); + + /* + * Fill the bucket up so there's more mappings. Add 4/40, 5/50, 6/60 + * out of order. + */ + assertTrue(bucket.put(vals.get(4).vlsn, vals.get(4).lsn)); + assertTrue(bucket.put(vals.get(5).vlsn, vals.get(5).lsn)); + assertTrue(bucket.put(vals.get(2).vlsn, vals.get(2).lsn)); + assertTrue(bucket.put(vals.get(3).vlsn, vals.get(3).lsn)); + + /* + * Check that we reached the max mappings limit, and that this put is + * refused. + */ + assertFalse(bucket.put(new VLSN(7), DbLsn.makeLsn(3,70))); + + checkAccess(bucket, stride, vals); + } + + /* + * Create a bucket with some out of order puts, so that there are empty + * offsets, and make sure that the non-null gets succeed. + */ + @Test + public void testGetNonNullWithHoles() { + + VLSNBucket bucket = new VLSNBucket(0, // fileNumber, + 2, // stride, + 20, // maxMappings + 10000, // maxDist + new VLSN(1)); + assertTrue(bucket.put(new VLSN(1), 10)); + assertTrue(bucket.put(new VLSN(3), 30)); + /* + * Note that when we put in VLSN 6, the bucet's file offset array + * will be smaller than it would normally be. It will only be + * size=2. Do this to test the edge case of getNonNullLTELsn on + * a too-small array. + */ + assertTrue(bucket.put(new VLSN(6), 60)); + + assertEquals(10, bucket.getLTELsn(new VLSN(1))); + assertEquals(10, bucket.getLTELsn(new VLSN(2))); + assertEquals(30, bucket.getLTELsn(new VLSN(3))); + assertEquals(30, bucket.getLTELsn(new VLSN(4))); + assertEquals(30, bucket.getLTELsn(new VLSN(5))); + assertEquals(60, bucket.getLTELsn(new VLSN(6))); + + assertEquals(10, bucket.getGTELsn(new VLSN(1))); + assertEquals(30, bucket.getGTELsn(new VLSN(2))); + assertEquals(30, bucket.getGTELsn(new VLSN(3))); + assertEquals(60, bucket.getGTELsn(new VLSN(4))); + assertEquals(60, bucket.getGTELsn(new VLSN(5))); + assertEquals(60, bucket.getGTELsn(new VLSN(6))); + + assertEquals(10, bucket.getGTELsn(new VLSN(1))); + assertEquals(30, bucket.getGTELsn(new VLSN(2))); + assertEquals(30, bucket.getGTELsn(new VLSN(3))); + assertEquals(60, bucket.getGTELsn(new VLSN(4))); + assertEquals(60, bucket.getGTELsn(new VLSN(5))); + assertEquals(60, bucket.getGTELsn(new VLSN(6))); + } + + @Test + public void testRemoveFromTail() { + int stride = 3; + + /* Create a set of test mappings. */ + List expected = new ArrayList(); + int start = 10; + int end = 20; + for (int i = start; i < end; i++) { + expected.add(new VLPair( i, 0, i*10)); + } + + /* + * Load a bucket with the expected mappings. Call removeFromTail() + * at different points, and then check that all expected values remain. + */ + for (int startDeleteVal = start-1; + startDeleteVal < end + 1; + startDeleteVal++) { + + VLSNBucket bucket = loadBucket(expected, stride); + + VLSN startDeleteVLSN = new VLSN(startDeleteVal); + if (verbose) { + System.out.println("startDelete=" + startDeleteVal); + } + bucket.removeFromTail(startDeleteVLSN, + (startDeleteVal - 1) * 10); // prevLsn + + if (verbose) { + System.out.println("bucket=" + bucket); + } + + for (VLPair p : expected) { + long lsn = DbLsn.NULL_LSN; + if (bucket.owns(p.vlsn)) { + lsn = bucket.getLsn(p.vlsn); + } + + if (p.vlsn.compareTo(startDeleteVLSN) >= 0) { + /* Anything >= startDeleteVLSN should be truncated. */ + assertEquals("startDelete = " + startDeleteVLSN + + " p=" + p + " bucket=" + bucket, + DbLsn.NULL_LSN, lsn); + } else { + + if (((p.vlsn.getSequence() - start) % stride) == 0) { + /* + * If is on a stride boundary, there should be a + * mapping. + */ + assertEquals("bucket=" + bucket + " p= " + p, + p.lsn, lsn); + } else if (p.vlsn.compareTo + (startDeleteVLSN.getPrev()) == 0) { + /* It's the last mapping. */ + assertEquals(p.lsn, lsn); + } else { + assertEquals(DbLsn.NULL_LSN, lsn); + } + } + } + } + } + + /** + * [#20796] + * Truncate a bucket when the truncation point is between the last file + * offset and the last vlsn. + */ + @Test + public void testTruncateAfterFileOffset() { + int stride = 3; + + /* Create a set of test mappings. */ + List testMappings = new ArrayList(); + testMappings.add(new VLPair( 10, 0, 10)); + testMappings.add(new VLPair( 15, 0, 20)); + testMappings.add(new VLPair( 20, 0, 30)); + /* Skip the 25 stride offset -- assume that vlsn 28 came in first. */ + testMappings.add(new VLPair( 28, 0, 40)); + + VLSNBucket bucket = loadBucket(testMappings, 5); + bucket.removeFromTail(new VLSN(26), DbLsn.NULL_LSN); + assertEquals(bucket.getLast(), new VLSN(20)); + assertEquals(bucket.getLastLsn(), DbLsn.makeLsn(0, 30)); + + bucket = loadBucket(testMappings, 5); + bucket.removeFromTail(new VLSN(26), DbLsn.makeLsn(0, 33)); + assertEquals(bucket.getLast(), new VLSN(25)); + assertEquals(bucket.getLastLsn(), DbLsn.makeLsn(0, 33)); + } + + private VLSNBucket loadBucket(List expected, int stride) { + int maxMappings = 5; + int maxDistance = 50; + + VLSNBucket bucket = new VLSNBucket(0, // fileNumber, + stride, + maxMappings, + maxDistance, + new VLSN(10)); + for (VLPair pair : expected) { + assertTrue("pair = " + pair, + bucket.put(pair.vlsn, pair.lsn)); + } + return bucket; + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNCacheTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNCacheTest.java new file mode 100644 index 0000000..d78dff7 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNCacheTest.java @@ -0,0 +1,781 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.impl.RepParams; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.INLongRep; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests the vlsnCache class. + */ +public class VLSNCacheTest extends TestBase { + + private final long NULL_VLSN = VLSN.NULL_VLSN_SEQUENCE; + + private ReplicatedEnvironment master; + private final File envRoot; + + private boolean embeddedLNs = false; + + public VLSNCacheTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (master != null) { + try { + master.close(); + } catch (Exception e) { + System.out.println("During tearDown: " + e); + } + master = null; + } + } + + /** + * White box test that checks that all values allowed for VLSNs, and + * special/boundary values, can be stored into and retrieved from the + * cache. Also checks that mutation occurs when the value exceeds the + * threshold for each byte length. + */ + @Test + public void testCacheValues() { + + /* Entries per BIN, which is entries per cache. */ + final int N_ENTRIES = 5; + + /* + * Open env and db only to create a BIN, although the BIN is not + * attached to the Btree. + */ + final ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setConfigParam + (RepParams.PRESERVE_RECORD_VERSION.getName(), "true"); + + final Database db = openEnv(repConfig); + final BIN bin = new BIN(DbInternal.getDbImpl(db), new byte[1], + N_ENTRIES, 1); + final long oldMemSize = bin.getInMemorySize(); + closeEnv(db); + + /* Max value plus one that can be stored for each byte length. */ + final long[] limitPerByteLength = { + 1L, + 1L << 8, + 1L << 16, + 1L << 24, + 1L << 32, + 1L << 40, + 1L << 48, + 1L << 56, + Long.MAX_VALUE, + }; + + /* + * The BIN always starts with an EMPTY_REP. Set minLength to 1 so we + * can test the full range of version byte sizes. + */ + INLongRep c = new INLongRep.EmptyRep(1, false); + + /* Check each block of values, one for each byte length. */ + for (int block = 1; block < limitPerByteLength.length; block += 1) { + + /* Never mutate when storing a null. */ + for (int entry = 0; entry < N_ENTRIES; entry += 1) { + assertSame(c, c.set(entry, 0, bin)); + assertEquals(0, c.get(entry)); + } + + /* Define low and high (max plus one) limits of the block. */ + final long lowVal = limitPerByteLength[block - 1]; + final long highVal = limitPerByteLength[block]; + + /* + * Check several values at each of the block's value range. + * Expect the cache to mutate when the first value is stored. + */ + boolean mutated = false; + for (long val : + new long[] { lowVal, lowVal + 1, lowVal + 2, + highVal - 2, highVal - 1 }) { + + /* Set and get the value in each slot. */ + for (int entry = 0; entry < N_ENTRIES; entry += 1) { + final String msg = "val=" + val + + " entry=" + entry + + " block=" + block; + assertTrue(msg, val != 0); + final INLongRep newCache = c.set(entry, val, bin); + if (mutated) { + assertSame(msg, c, newCache); + } else { + assertNotSame(msg, c, newCache); + c = newCache; + mutated = true; + } + assertEquals(msg, val, c.get(entry)); + } + + /* Get values again to check for overwriting boundaries. */ + for (int entry = 0; entry < N_ENTRIES; entry += 1) { + assertEquals(val, c.get(entry)); + } + } + } + + /* + * BIN mem size should have increased by current cache size. We cannot + * check bin.computeMemorySize here because the BIN was not actually + * used to cache VLSNs, so it doesn't contain the mutated cache object. + */ + assertEquals(oldMemSize + c.getMemorySize(), bin.getInMemorySize()); + } + + /** + * After eviction of LNs, the version is available although the vlsnCache + * without having to fetch the LNs. + */ + @Test + public void testEvictLNs() { + final Database db = openEnv(); + + /* After insertion, VLSN is available without fetching. */ + insert(db, 1); + + final long v1 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs /*expectEmptyCache*/); + assertTrue(v1 != NULL_VLSN); + + /* After LN eviction, VLSN is available via the vlsnCache. */ + evict(db, 1, CacheMode.EVICT_LN); + + final long v2 = getVLSN( + db, 1, false /*allowFetch*/, false /*expectEmptyCache*/); + assertTrue(v2 != NULL_VLSN); + assertEquals(v1, v2); + + /* After update, new VLSN is available without fetching. */ + update(db, 1); + final long v3 = getVLSN( + db, 1, false /*allowFetch*/, false /*expectEmptyCache*/); + assertTrue(v3 != NULL_VLSN); + assertTrue(v3 > v2); + + /* + * When an update is performed and the vlsnCache has the old value, the + * vlsnCache is not updated. The vlsnCache is out-of-date but this is + * harmless because the VLSN in the LN takes precedence over the + * vlsnCache. + */ + final BIN bin = (BIN) DbInternal.getDbImpl(db). + getTree(). + getFirstNode(CacheMode.DEFAULT); + try { + if (embeddedLNs) { + assertEquals(v3, bin.getVLSNCache().get(0)); + } else { + assertEquals(v2, bin.getVLSNCache().get(0)); + } + } finally { + bin.releaseLatch(); + } + + /* After LN eviction, VLSN is available via the vlsnCache. */ + evict(db, 1, CacheMode.EVICT_LN); + final long v4 = getVLSN( + db, 1, false /*allowFetch*/, false /*expectEmptyCache*/); + assertTrue(v4 != NULL_VLSN); + assertEquals(v3, v4); + + /* After fetch, new VLSN is available without fetching. */ + fetch(db, 1); + final long v5 = getVLSN( + db, 1, false /*allowFetch*/, false /*expectEmptyCache*/); + assertEquals(v5, v4); + + /* After LN eviction, VLSN is available via the vlsnCache. */ + evict(db, 1, CacheMode.EVICT_LN); + final long v6 = getVLSN( + db, 1, false /*allowFetch*/, false /*expectEmptyCache*/); + assertTrue(v6 != NULL_VLSN); + assertEquals(v6, v5); + + closeEnv(db); + } + + /** + * After eviction of BINs, the version is only available by fetching the + * LNs. + */ + @Test + public void testEvictBINs() { + final Database db = openEnv(); + + /* After insertion, VLSN is available without fetching. */ + insert(db, 1); + + final long v1 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v1 != NULL_VLSN); + + /* After BIN eviction, VLSN is only available by fetching. */ + evict(db, 1, CacheMode.EVICT_BIN); + + final long v2 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + if (embeddedLNs) { + assertEquals(v1, v2); + } else { + assertEquals(v2, NULL_VLSN); + } + + final long v3 = getVLSN( + db, 1, true /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v3 != NULL_VLSN); + assertEquals(v3, v1); + + /* After update, new VLSN is available without fetching. */ + update(db, 1); + + final long v4 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v4 != NULL_VLSN); + assertTrue(v4 > v3); + + /* After BIN eviction, VLSN is only available by fetching. */ + evict(db, 1, CacheMode.EVICT_BIN); + + final long v5 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + if (embeddedLNs) { + assertEquals(v4, v5); + } else { + assertEquals(v5, NULL_VLSN); + } + + final long v6 = getVLSN( + db, 1, true /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v6 != NULL_VLSN); + assertEquals(v6, v4); + + closeEnv(db); + } + + /** + * With no eviction, LNs are resident so version is available without any + * fetching even though the vlsnCache is empty. + */ + @Test + public void testNoEviction() { + final Database db = openEnv(); + + /* After insertion, VLSN is available without fetching. */ + insert(db, 1); + final long v1 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v1 != NULL_VLSN); + + /* After update, new VLSN is available without fetching. */ + update(db, 1); + final long v2 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertTrue(v2 != NULL_VLSN); + assertTrue(v2 > v1); + + /* After fetch, new VLSN is available without fetching. */ + fetch(db, 1); + final long v3 = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs/*expectEmptyCache*/); + assertEquals(v3, v2); + + closeEnv(db); + } + + /** + * Inserts the given record, should not fetch. + */ + private void insert(Database db, int keyNum) { + + final EnvironmentStats stats1 = master.getStats(null); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + final OperationStatus status = db.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + final EnvironmentStats stats2 = master.getStats(null); + + assertEquals(stats1.getNLNsFetchMiss(), + stats2.getNLNsFetchMiss()); + assertEquals(stats1.getNBINsFetchMiss(), + stats2.getNBINsFetchMiss()); + } + + /** + * Updates the given record, should not fetch. + */ + private void update(Database db, int keyNum) { + + final EnvironmentStats stats1 = master.getStats(null); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + final OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + final EnvironmentStats stats2 = master.getStats(null); + + assertEquals(stats1.getNLNsFetchMiss(), + stats2.getNLNsFetchMiss()); + assertEquals(stats1.getNBINsFetchMiss(), + stats2.getNBINsFetchMiss()); + } + + /** + * Fetches given record. + */ + private void fetch(Database db, int keyNum) { + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + final DatabaseEntry data = new DatabaseEntry(); + + final OperationStatus status = db.get(null, key, data, null); + assertSame(OperationStatus.SUCCESS, status); + } + + /** + * Evict the given record according to the given CacheMode. + */ + private void evict(Database db, int keyNum, CacheMode cacheMode) { + + assertNotNull(db); + assertTrue(cacheMode == CacheMode.EVICT_LN || + cacheMode == CacheMode.EVICT_BIN); + + if (cacheMode == CacheMode.EVICT_BIN) { + /* EVICT_BIN will not evict a dirty BIN. */ + master.sync(); + } + + final EnvironmentStats stats1 = master.getStats(null); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(0, 0, true); + + final Cursor cursor = db.openCursor(null, null); + final BIN bin; + final int binIndex; + try { + cursor.setCacheMode(cacheMode); + final OperationStatus status = + cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + bin = DbInternal.getCursorImpl(cursor).getBIN(); + binIndex = DbInternal.getCursorImpl(cursor).getIndex(); + } finally { + cursor.close(); + } + + if (cacheMode == CacheMode.EVICT_LN) { + assertNull(bin.getTarget(binIndex)); + assertFalse(bin.getVLSNCache() instanceof INLongRep.EmptyRep); + } + + final EnvironmentStats stats2 = master.getStats(null); + assertEquals(stats1.getNLNsFetchMiss(), + stats2.getNLNsFetchMiss()); + assertEquals(stats1.getNBINsFetchMiss(), + stats2.getNBINsFetchMiss()); + + if (cacheMode == CacheMode.EVICT_BIN) { + assertEquals(1, stats2.getNNodesEvicted() - + stats1.getNNodesEvicted()); + assertTrue(stats1.getNBytesEvictedCacheMode() < + stats2.getNBytesEvictedCacheMode()); + } else { + assertEquals(stats1.getNBytesEvictedCacheMode(), + stats2.getNBytesEvictedCacheMode()); + } + } + + /** + * Deletes the given record. + */ + private void delete(Database db, int keyNum) { + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + + final OperationStatus status = db.delete(null, key); + assertSame(OperationStatus.SUCCESS, status); + + master.compress(); + } + + /** + * Move cursor to first record and return its VLSN. + * + * For a primary DB, we do not need to fetch the data to get the VLSN, but + * for a secondary DB we do (because a Btree lookup in the primary DB is + * needed to get its VLSN). In the future we could add a way to do a + * key-only secondary read that does the primary DB lookup and populates + * the VLSN, if needed. + */ + private long getVLSN(Database db, + int keyNum, + boolean allowFetch, + boolean expectEmptyCache) { + + assertNotNull(db); + + final EnvironmentStats stats1 = master.getStats(null); + + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyNum, key); + final DatabaseEntry data = new DatabaseEntry(); + if (!(db instanceof SecondaryDatabase)) { + data.setPartial(0, 0, true); + } + + final Cursor cursor = db.openCursor(null, null); + final long vlsn; + final BIN bin; + try { + final OperationStatus status = + cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + bin = DbInternal.getCursorImpl(cursor).getBIN(); + + final EnvironmentStats stats2 = master.getStats(null); + assertEquals(stats1.getNLNsFetchMiss(), + stats2.getNLNsFetchMiss()); + + vlsn = DbInternal.getCursorImpl(cursor). + getCurrentVersion(allowFetch). + getVLSN(); + + assertTrue(vlsn != 0); + + if (allowFetch) { + assertTrue(vlsn != NULL_VLSN); + } + + if (!allowFetch) { + final EnvironmentStats stats3 = master.getStats(null); + assertEquals(stats2.getNLNsFetchMiss(), + stats3.getNLNsFetchMiss()); + } + } finally { + cursor.close(); + } + + assertEquals( + expectEmptyCache, + bin.getVLSNCache() instanceof INLongRep.EmptyRep); + + return vlsn; + } + + /** + * In a ReplicatedEnvironment it is possible to have non-txnl databases, + * although currently these are only allowed for internal databases. The + * LNs for such databases will not have VLSNs. Do a basic test of eviction + * here to ensure that version caching for non-txnl databases doesn't cause + * problems. + * + * At one point the eviction in a non-txnl DB caused an assertion to fire + * in vlsnCache.DefaultRep.set because the VersionedLN was returning a + * zero VLSN sequence. That bug was fixed and VersionedLN.getVLSNSequence + * now returns -1 (null) for the VLSN sequence, when the LN does not have a + * VLSN and therefore VersionedLN.setVLSNSequence is never called. + */ + @Test + public void testNonTxnlEviction() { + + final Database db = openEnv(); + + /* + * Reach into internals and get the VLSNIndex database, which happens + * to be non-transactional, and its first BIN. We flush the VLSNIndex + * database to create a BIN. + */ + final DatabaseImpl nonTxnlDbImpl = + RepInternal.getNonNullRepImpl(master). + getVLSNIndex().getDatabaseImpl(); + RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + flushToDatabase(Durability.COMMIT_NO_SYNC); + final BIN bin = + (BIN) nonTxnlDbImpl.getTree().getFirstNode(CacheMode.DEFAULT); + + /* Evict LNs and ensure that the EMPTY_REP is still used. */ + try { + bin.evictLNs(); + } finally { + bin.releaseLatch(); + } + assertTrue(bin.getVLSNCache() instanceof INLongRep.EmptyRep); + + closeEnv(db); + } + + /** + * Tests that vlsnCache is adjusted correctly after movement of a slot due + * to an insertion or deletion. + */ + @Test + public void testInsertAndDelete() { + + final Database db = openEnv(); + final List vlsnList = new ArrayList(); + + /* Insert keys 1, 3, 5. */ + for (int i = 1; i <= 5; i += 2) { + vlsnList.add(insertEvictAndGetVLSN(db, i)); + checkVlsns(db, vlsnList); + } + + /* Insert keys 0, 2, 4, 6. */ + for (int i = 0; i <= 6; i += 2) { + vlsnList.add(i, insertEvictAndGetVLSN(db, i)); + checkVlsns(db, vlsnList); + } + + /* Delete keys 1, 3, 5. */ + for (int i = 5; i >= 1; i -= 2) { + delete(db, i); + vlsnList.remove(i); + checkVlsns(db, vlsnList); + } + + /* Delete keys 0, 2, 4, 6. */ + for (int i = 6; i >= 0; i -= 2) { + delete(db, i); + vlsnList.remove(vlsnList.size() - 1); + checkVlsns(db, vlsnList); + } + + closeEnv(db); + } + + private long insertEvictAndGetVLSN(Database db, int keyNum) { + insert(db, keyNum); + evict(db, keyNum, CacheMode.EVICT_LN); + final long vlsn = getVLSN + (db, keyNum, false /*allowFetch*/, false /*expectEmptyCache*/); + assertTrue(vlsn != NULL_VLSN); + return vlsn; + } + + /** + * Checks that the given VLSNs match the VLSNs in the Btree. + */ + private void checkVlsns(Database db, List vlsnList) { + int listIndex = 0; + final Cursor cursor = db.openCursor(null, null); + try { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + data.setPartial(0, 0, true); + OperationStatus status = cursor.getFirst(key, data, null); + while (status == OperationStatus.SUCCESS) { + + /* Check VLSN via Cursor. */ + Long vlsn = + DbInternal.getCursorImpl(cursor). + getCurrentVersion(false /*allowFetch*/). + getVLSN(); + assertEquals(vlsnList.get(listIndex), vlsn); + + /* Check VLSN via vlsnCache. */ + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + final int binIndex = + DbInternal.getCursorImpl(cursor).getIndex(); + vlsn = bin.getVLSNCache().get(binIndex); + assertEquals(vlsnList.get(listIndex), vlsn); + + /* Check VLSNs in BINCache that are out of bounds. */ + for (int i = bin.getNEntries(); + i < bin.getMaxEntries(); + i += 1) { + assertEquals(0, bin.getVLSNCache().get(i)); + } + + listIndex += 1; + status = cursor.getNext(key, data, null); + } + } finally { + cursor.close(); + } + assertEquals(vlsnList.size(), listIndex); + } + + /** + * Tests that vlsnCache is adjusted correctly after movement of slots due + * to a split. + */ + @Test + public void testSplit() { + + final Database db = openEnv(); + final List vlsnList = new ArrayList(); + + for (int i = 0; i < 200; i += 1) { + vlsnList.add(insertEvictAndGetVLSN(db, i)); + } + checkVlsns(db, vlsnList); + + closeEnv(db); + } + + /** + * Checks that the version returned by a secondary cursor is actually the + * version of the associated primary record. + */ + @Test + public void testSecondaryVersion() { + final Database db = openEnv(); + + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setTransactional(true); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + + /* Secondary key has the same value as the primary key. */ + secConfig.setKeyCreator(new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(key.getData()); + return true; + } + }); + + final SecondaryDatabase secDb = + master.openSecondaryDatabase(null, "TEST_SEC", db, secConfig); + + insert(db, 1); + + final long priVlsn = getVLSN( + db, 1, false /*allowFetch*/, !embeddedLNs /*expectEmptyCache*/); + + final long secVlsn = getVLSN( + secDb, 1, false /*allowFetch*/, true /*expectEmptyCache*/); + + assertTrue(priVlsn != NULL_VLSN); + assertEquals(priVlsn, secVlsn); + + secDb.close(); + closeEnv(db); + } + + private Database openEnv() { + final ReplicationConfig repConfig = new ReplicationConfig(); + + repConfig.setConfigParam + (RepParams.PRESERVE_RECORD_VERSION.getName(), "true"); + + return openEnv(repConfig); + } + + private Database openEnv(ReplicationConfig repConfig) { + + final EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setAllowCreate(true); + + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + final RepEnvInfo masterInfo = RepTestUtils.setupEnvInfo + (envRoot, envConfig, repConfig, null); + + master = RepTestUtils.joinGroup(masterInfo); + + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(master); + + embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + return master.openDatabase(null, "TEST", dbConfig); + } + + private void closeEnv(Database db) { + TestUtils.validateNodeMemUsage(DbInternal.getNonNullEnvImpl(master), + true /*assertOnError*/); + db.close(); + master.close(); + master = null; + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNCleanerTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNCleanerTest.java new file mode 100644 index 0000000..4298882 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNCleanerTest.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +/** + * Exercise VLSNIndex and cleaning + */ +public class VLSNCleanerTest extends TestBase { + private final boolean verbose = Boolean.getBoolean("verbose"); + private ReplicatedEnvironment master; + private Database db; + private final File envRoot; + + public VLSNCleanerTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + master.close(); + } + + @Test + public void testBasic() + throws DatabaseException { + + /* + * Set the environment config to use very small files, have high + * utilization, and permit manual cleaning. + */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, + "90"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "10000"); + /* Need a disk limit to delete files. */ + envConfig.setConfigParam( + EnvironmentConfig.MAX_DISK, String.valueOf(10 * 10000)); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setAllowCreate(true); + + RepEnvInfo masterInfo = RepTestUtils.setupEnvInfo( + envRoot, envConfig, (short) 1, null); + + master = RepTestUtils.joinGroup(masterInfo); + + setupDatabase(); + int maxDeletions = 10; + Environment env = master; + EnvironmentStats stats = null; + RepImpl repImpl = RepInternal.getNonNullRepImpl(master); + + for (int i = 0; i < 100; i += 1) { + putAndDelete(); + + boolean anyCleaned = false; + while (env.cleanLog() > 0) { + anyCleaned = true; + if (verbose) { + System.out.println("anyCleaned"); + } + } + + if (anyCleaned) { + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + env.checkpoint(force); + } + + stats = env.getStats(null); + long nDeletions = stats.getNCleanerDeletions(); + + if (verbose) { + System.out.println("ckpt w/nCleanerDeletions=" + nDeletions); + } + + assertTrue(repImpl.getVLSNIndex().verify(verbose)); + + if (nDeletions >= maxDeletions) { + db.close(); + master.close(); + return; + } + } + + fail("" + maxDeletions + " files were not deleted. " + stats); + } + + private void setupDatabase() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = master.openDatabase(null, "TEST", dbConfig); + } + + private void putAndDelete() + throws DatabaseException { + + int WORK = 100; + DatabaseEntry value = new DatabaseEntry(); + + for (int i = 0; i < WORK; i++) { + IntegerBinding.intToEntry(i, value); + db.put(null, value, value); + db.delete(null, value); + } + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNConsistencyTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNConsistencyTest.java new file mode 100644 index 0000000..ff816a4 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNConsistencyTest.java @@ -0,0 +1,418 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.LogEntryHeader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.LogItem; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.rep.vlsn.VLSNIndex.WaitTimeOutException; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Jan from Nokia runs into the issue that the on disk VLSNIndex is not + * consistent with what we have in the cache when doing recovery. This happens + * in the following senario: + * 1.Thread A writes a log entry to the log already, but it sleeps before it + * registers the VLSN A of that entry + * 2.The daemon Checkpointer thread wakes up, and flush the VLSNIndex to the + * disk, however, VLSN A is not registered so that it is not written + * 3.Thread A wakes up again, and it registers VLSN A, it also continues + * logging some other entries + * 4.JE crashes at this moment, and while doing recovery, JE will read VLSNs + * from the CkptStart, but VLSN A is logged before the CkptStart, so JE will + * find a gap between the VLSN in the durable VLSNRange and what we have in + * the cache + * See SR [#19754] for more details. + * + * Also test SR [#20165], which was a problem with this bug fix. + */ +public class VLSNConsistencyTest extends TestBase { + private final File envRoot; + private final String dbName = "testDB"; + private RepEnvInfo[] repEnvInfo; + + public VLSNConsistencyTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + /** + * Test that the VLSNs in the durable VLSNIndex and the recovery cache are + * consistent, no matter what happens. + */ + @Test + public void testVLSNConsistency() + throws Throwable { + + try { + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1, + makeEnvConfig()); + ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); + RepNode repNode = + RepInternal.getNonNullRepImpl(master).getRepNode(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database db = master.openDatabase(null, dbName, dbConfig); + + /* Write some data. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i <= 100; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + + /* Record the last commit VLSN on the master. */ + VLSN commitVLSN = repNode.getCurrentTxnEndVLSN(); + + final Transaction txn = master.beginTransaction(null, null); + IntegerBinding.intToEntry(101, key); + StringBinding.stringToEntry("herococo", data); + db.put(txn, key, data); + + /* Make sure the VLSN of this new record is registered. */ + int counter = 1; + VLSN lastVLSN = repNode.getVLSNIndex().getRange().getLast(); + while (lastVLSN.compareTo(commitVLSN) <= 0) { + Thread.sleep(1000); + counter++; + if (counter == 10) { + throw new IllegalStateException + ("Test is in invalid state, the leaf node is not " + + "flushed."); + } + } + + /* This hook delays the registration of the TxnCommit's VLSN. */ + VLSNHook testHook = new VLSNHook(); + /* This hook is used to flush the TxnCommit to the log. */ + CountDownLatch flushHook = new CountDownLatch(1); + FlushHook flushLogHook = + new FlushHook( + flushHook, RepInternal.getNonNullRepImpl(master)); + RepInternal.getNonNullRepImpl(master).getLogManager(). + setDelayVLSNRegisterHook(testHook); + RepInternal.getNonNullRepImpl(master).getLogManager(). + setFlushLogHook(flushLogHook); + + /* + * Commit the transaction in another thread, so that it won't block + * this test. + */ + JUnitThread commitThread = new JUnitThread("Commit") { + @Override + public void testBody() { + try { + txn.commit(); + } catch (Throwable t) { + t.printStackTrace(); + } + } + }; + + commitThread.start(); + + /* Wait until the TxnCommit is logged on the disk. */ + flushHook.await(); + + StatsConfig stConfig = new StatsConfig(); + stConfig.setFast(true); + stConfig.setClear(true); + long lastCkptEnd = + master.getStats(stConfig).getLastCheckpointEnd(); + + /* Do a checkpoint to flush the VLSNIndex. */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + master.checkpoint(ckptConfig); + + long newCkptEnd = + master.getStats(stConfig).getLastCheckpointEnd(); + while (newCkptEnd <= lastCkptEnd) { + Thread.sleep(1000); + newCkptEnd = master.getStats(stConfig).getLastCheckpointEnd(); + counter++; + if (counter == 20) { + throw new IllegalStateException + ("Checkpointer didn't finish in specified time"); + } + } + + /* Release the CountDownLatch so that the VLSN is registered. */ + commitThread.finishTest(); + + /* Write some transactions, so that RecoveryTracker is not null. */ + for (int i = 102; i <= 200; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry("herococo", data); + db.put(null, key, data); + } + + /* + * Abnormally close the Environment, so the VLSNIndex flushed by + * the last checkpointer is the only version on the disk. + */ + repEnvInfo[0].abnormalCloseEnv(); + + /* Reopen the Environment again to see if the test fails. */ + repEnvInfo[0].openEnv(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + RepTestUtils.shutdownRepEnvs(repEnvInfo); + } + } + + /* Hook used to block the VLSN registration. */ + private static class VLSNHook implements TestHook{ + + private boolean doDelay; + + public VLSNHook() { + doDelay = true; + } + + public void hookSetup() { + } + + public void doIOHook() + throws IOException { + } + + public void doHook(Object obj) { + } + + public void doHook() { + try { + if (doDelay) { + doDelay = false; + Thread.sleep(VLSNIndex.AWAIT_CONSISTENCY_MS * 2); + } + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + public Object getHookValue() { + return null; + } + } + + /* Hook used to guarantee that the TxnCommit is flushed. */ + private static class FlushHook implements TestHook { + private final CountDownLatch flushLatch; + private final EnvironmentImpl envImpl; + + public FlushHook(CountDownLatch flushLatch, EnvironmentImpl envImpl) { + this.flushLatch = flushLatch; + this.envImpl = envImpl; + } + + public void hookSetup() { + } + + public void doIOHook() + throws IOException { + } + + public void doHook(CountDownLatch obj) { + } + + public void doHook() { + try { + envImpl.getLogManager().flushSync(); + flushLatch.countDown(); + } catch (Throwable t) { + t.printStackTrace(); + } + } + + public CountDownLatch getHookValue() { + return flushLatch; + } + } + + /** + * [#20165] All feeders must wait for the same vlsn, but the checkpointer + * is picking a vlsn to wait for that may be great than any currently + * registed in the VLSNIndex. This can provoke an assertion, because the + * waitForVLSN system assumes all feeders are lodged at the same point. + */ + @Test + public void testCheckpointerAhead() throws Exception { + Environment env = new Environment(envRoot, makeEnvConfig()); + VLSNIndex vlsnIndex = null; + + try { + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + "TEST_MAP_DB", new NameIdPair("n1",1), + 10, 100, 1000, + new RecoveryInfo()); + vlsnIndex.initAsMaster(); + CountDownLatch checker = new CountDownLatch(2); + + /* + * Thread A begins a write. First it bumps the vlsn. This + * is done within the log write latch. It is still logging, and + * has not yet registered the vlsn. + */ + VLSN vlsnA = vlsnIndex.bump(); + + /* + * A feeder is either already waiting on vlsnA, or begns to wait + * for it now. It must wait because the vlsn is not yet written to + * the log and registered. + */ + MockFeeder feederA = new MockFeeder(vlsnIndex, vlsnA, checker); + feederA.start(); + + /* Thread B begins a write. */ + VLSN vlsnB = vlsnIndex.bump(); + + /* Make sure that the waitVLSN is at vlsn A already. */ + while (true) { + VLSN waitFor = vlsnIndex.getPutWaitVLSN(); + if ((waitFor != null) && + (waitFor.equals(vlsnA))) { + break; + } + Thread.sleep(100); + } + + /* + * Now the checkpoint awaits consistency. It uses the latest + * generated VLSN, which is VLSN B. In the original bug, this + * caused an assertion because the vlsnIndex.waitForVLSN + * rightfully assumes that all Feeders await the same address, + * and objects that the checkpointer is waiting on VLSN B + */ + CountDownLatch ckptStarted = new CountDownLatch(1); + MockCheckpointer ckpter = new MockCheckpointer(vlsnIndex, + checker, + ckptStarted); + ckpter.start(); + ckptStarted.await(); + vlsnIndex.put(makeLogItem(100, vlsnA)); + vlsnIndex.put(makeLogItem(1000, vlsnB)); + checker.await(); + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + env.close(); + } + } + + private EnvironmentConfig makeEnvConfig() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam (EnvironmentConfig.LOG_FILE_MAX, "1000000"); + return envConfig; + } + + private LogItem makeLogItem(long useLsn, VLSN useVLSN) { + LogItem item = new LogItem(); + item.header = new LogEntryHeader(LogEntryType.LOG_TRACE.getTypeNum(), + 1, + 100, + useVLSN); + item.lsn = useLsn; + return item; + } + + private class MockFeeder extends Thread { + private final VLSN target; + private final VLSNIndex vlsnIndex; + private final CountDownLatch checker; + + MockFeeder(VLSNIndex index, VLSN target, CountDownLatch checker) { + this.target = target; + this.vlsnIndex = index; + this.checker = checker; + } + + @Override + public void run() { + try { + vlsnIndex.waitForVLSN(target, 100000); + checker.countDown(); + } catch (InterruptedException e) { + fail("unexpected: " + e); + } catch (WaitTimeOutException e) { + fail("unexpected: " + e); + } + } + } + + private class MockCheckpointer extends Thread { + private final VLSNIndex vlsnIndex; + private final CountDownLatch checker; + private final CountDownLatch started; + + MockCheckpointer(VLSNIndex index, + CountDownLatch checker, + CountDownLatch started) { + this.vlsnIndex = index; + this.checker = checker; + this.started = started; + } + + @Override + public void run() { + started.countDown(); + try { + vlsnIndex.awaitConsistency(); + } catch (Exception e) { + fail(e.toString()); + } + checker.countDown(); + } + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNIndexTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNIndexTest.java new file mode 100644 index 0000000..389d518 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNIndexTest.java @@ -0,0 +1,1461 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.util.TestLogItem; +import com.sleepycat.je.rep.vlsn.VLSNIndex.BackwardVLSNScanner; +import com.sleepycat.je.rep.vlsn.VLSNIndex.ForwardVLSNScanner; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Exercise VLSNIndex + */ +public class VLSNIndexTest extends TestBase { + + private final String testMapDb = "TEST_MAP_DB"; + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envRoot; + + public VLSNIndexTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + private Environment makeEnvironment() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(false); + return new Environment(envRoot, envConfig); + } + + @Test + public void testNonFlushedGets() + throws Throwable { + + doGets(false); // flush + } + + @Test + public void testFlushedGets() + throws Throwable { + + doGets(true); // flush + } + + // TODO: test decrementing the vlsn + + /** + * Populate a vlsnIndex, and retrieve mappings. + * @param flush if true, write the vlsn index to disk, so that the + * subsequent get() calls fetch the mappings off disk. + */ + private void doGets(boolean flush) + throws Throwable { + + int stride = 3; + int maxMappings = 4; + int maxDist = 1000; + + Environment env = makeEnvironment(); + VLSNIndex vlsnIndex = null; + + try { + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + int numEntries = 25; + + /* + * Put some mappings in. With the strides, we expect them to + * end up in + * Bucket 1 = vlsn 1, 4, 7, 10, 12 + * Bucket 2 = vlsn 13, 16, 19, 22, 24 + * Bucket 3 = vlsn 25 + */ + for (int i = 1; i <= numEntries; i++) { + putEntryToVLSNIndex(i, 33, 100, vlsnIndex); + } + + /* We expect these mappings. */ + TreeMap expected = new TreeMap(); + long[] expectedVLSN = { + 1, 4, 7, 10, 12, 13, + 16, 19, 22, 24, 25 + }; + makeExpectedMapping(expected, expectedVLSN, 33, 100); + + if (flush) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + + VLSNRange range = vlsnIndex.getRange(); + assertEquals(expected.firstKey(), range.getFirst()); + assertEquals(expected.lastKey(), range.getLast()); + + ForwardVLSNScanner fScanner = new ForwardVLSNScanner(vlsnIndex); + Long startLsn = fScanner.getStartingLsn(expected.firstKey()); + assertEquals(expected.get(expected.firstKey()), startLsn); + + for (int i = 1; i <= numEntries; i++) { + VLSN vlsn = new VLSN(i); + Long expectedLsn = expected.get(vlsn); + Long scannerLsn = fScanner.getPreciseLsn(vlsn); + + if (expectedLsn == null) { + assertEquals((Long)DbLsn.NULL_LSN, scannerLsn); + + /* + * If there's no exact match, approximate search should + * return the one just previous. + */ + Long prevLsn = null; + for (int find = i - 1; find >= 0; find--) { + prevLsn = expected.get(new VLSN(find)); + if (prevLsn != null) + break; + } + assertEquals(prevLsn, + (Long) fScanner.getApproximateLsn(vlsn)); + } else { + assertEquals(expectedLsn, scannerLsn); + assertEquals(expectedLsn, + (Long) fScanner.getApproximateLsn(vlsn)); + } + } + + BackwardVLSNScanner bScanner= new BackwardVLSNScanner(vlsnIndex); + startLsn = bScanner.getStartingLsn(expected.lastKey()); + assertEquals(expected.get(expected.lastKey()), startLsn); + + for (int i = numEntries; i >= 1; i--) { + VLSN vlsn = new VLSN(i); + Long expectedLsn = expected.get(vlsn); + Long scannerLsn = bScanner.getPreciseLsn(vlsn); + + if (expectedLsn == null) { + assertEquals((Long)DbLsn.NULL_LSN, scannerLsn); + } else { + assertEquals(expectedLsn, scannerLsn); + } + } + + /* + * Check that we get the less than or equal mapping when we + * ask to start at a given VLSN. + */ + ForwardVLSNScanner forwards = new ForwardVLSNScanner(vlsnIndex); + BackwardVLSNScanner backwards = new BackwardVLSNScanner(vlsnIndex); + checkStartLsn(forwards, backwards, 1, + DbLsn.makeLsn(33, 100), + DbLsn.makeLsn(33, 100)); + checkStartLsn(forwards, backwards, 2, + DbLsn.makeLsn(33, 100), + DbLsn.makeLsn(33, 400)); + checkStartLsn(forwards, backwards, 3, + DbLsn.makeLsn(33, 100), + DbLsn.makeLsn(33, 400)); + checkStartLsn(forwards, backwards, 4, + DbLsn.makeLsn(33, 400), + DbLsn.makeLsn(33, 400)); + checkStartLsn(forwards, backwards, 5, + DbLsn.makeLsn(33, 400), + DbLsn.makeLsn(33, 700)); + checkStartLsn(forwards, backwards, 6, + DbLsn.makeLsn(33, 400), + DbLsn.makeLsn(33, 700)); + checkStartLsn(forwards, backwards, 7, + DbLsn.makeLsn(33, 700), + DbLsn.makeLsn(33, 700)); + checkStartLsn(forwards, backwards, 8, + DbLsn.makeLsn(33, 700), + DbLsn.makeLsn(33, 1000)); + checkStartLsn(forwards, backwards, 9, + DbLsn.makeLsn(33, 700), + DbLsn.makeLsn(33, 1000)); + checkStartLsn(forwards, backwards, 10, + DbLsn.makeLsn(33, 1000), + DbLsn.makeLsn(33, 1000)); + checkStartLsn(forwards, backwards, 11, + DbLsn.makeLsn(33, 1000), + DbLsn.makeLsn(33, 1200)); + checkStartLsn(forwards, backwards, 12, + DbLsn.makeLsn(33, 1200), + DbLsn.makeLsn(33, 1200)); + checkStartLsn(forwards, backwards, 13, + DbLsn.makeLsn(33, 1300), + DbLsn.makeLsn(33, 1300)); + checkStartLsn(forwards, backwards, 14, + DbLsn.makeLsn(33, 1300), + DbLsn.makeLsn(33, 1600)); + checkStartLsn(forwards, backwards, 15, + DbLsn.makeLsn(33, 1300), + DbLsn.makeLsn(33, 1600)); + checkStartLsn(forwards, backwards, 16, + DbLsn.makeLsn(33, 1600), + DbLsn.makeLsn(33, 1600)); + checkStartLsn(forwards, backwards, 17, + DbLsn.makeLsn(33, 1600), + DbLsn.makeLsn(33, 1900)); + checkStartLsn(forwards, backwards, 18, + DbLsn.makeLsn(33, 1600), + DbLsn.makeLsn(33, 1900)); + checkStartLsn(forwards, backwards, 19, + DbLsn.makeLsn(33, 1900), + DbLsn.makeLsn(33, 1900)); + checkStartLsn(forwards, backwards, 20, + DbLsn.makeLsn(33, 1900), + DbLsn.makeLsn(33, 2200)); + checkStartLsn(forwards, backwards, 21, + DbLsn.makeLsn(33, 1900), + DbLsn.makeLsn(33, 2200)); + checkStartLsn(forwards, backwards, 22, + DbLsn.makeLsn(33, 2200), + DbLsn.makeLsn(33, 2200)); + checkStartLsn(forwards, backwards, 23, + DbLsn.makeLsn(33, 2200), + DbLsn.makeLsn(33, 2400)); + checkStartLsn(forwards, backwards, 24, + DbLsn.makeLsn(33, 2400), + DbLsn.makeLsn(33, 2400)); + checkStartLsn(forwards, backwards, 25, + DbLsn.makeLsn(33, 2500), + DbLsn.makeLsn(33, 2500)); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + + env.removeDatabase(null, testMapDb); + env.close(); + } + } + + private void checkStartLsn(ForwardVLSNScanner forwardScanner, + BackwardVLSNScanner backwardScanner, + int targetVLSNVal, + long expectedForwardStart, + long expectedBackwardStart) + throws DatabaseException { + + VLSN target = new VLSN(targetVLSNVal); + long startLsn = forwardScanner.getStartingLsn(target); + long endLsn = backwardScanner.getStartingLsn(target); + + assertEquals("target=" + + DbLsn.getNoFormatString(expectedForwardStart) + + " got = " + DbLsn.getNoFormatString(startLsn), + expectedForwardStart, startLsn); + + assertEquals("target=" + + DbLsn.getNoFormatString(expectedBackwardStart) + + " got = " + DbLsn.getNoFormatString(endLsn), + expectedBackwardStart, endLsn); + } + + /* + * VLSN puts are done out of the log write latch, and can therefore show + * up out of order. + */ + @Test + public void testOutOfOrderPuts() + throws Throwable { + + int stride = 3; + int maxMappings = 4; + int maxDist = 1000; + + Environment env = makeEnvironment(); + byte lnType = LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum(); + byte commitType = LogEntryType.LOG_TXN_COMMIT.getTypeNum(); + byte syncType = LogEntryType.LOG_MATCHPOINT.getTypeNum(); + + Mapping[] mappings = new Mapping[] {new Mapping(1, 1, 0, lnType), + new Mapping(2, 2, 100, commitType), + new Mapping(3, 2, 200, lnType), + new Mapping(4, 3, 100, commitType), + new Mapping(5, 3, 200, lnType), + new Mapping(6, 4, 100, lnType), + new Mapping(7, 4, 200, syncType), + new Mapping(8, 4, 300, lnType), + new Mapping(9, 5, 100, lnType)}; + + Long[] loadOrder = new Long [] {1L, 2L, 5L, 3L, 6L, 4L, 8L, 9L, 7L}; + + try { + for (int flushIndex = -1; + flushIndex < mappings.length; + flushIndex++ ) { + + MappingLoader loader = null; + try { + loader = new MappingLoader(env, + stride, + maxMappings, + maxDist, + mappings, + loadOrder, + 4, // minimum mappings + flushIndex); + loader.verify(new VLSN(7), // lastSync + new VLSN(4)); // lastTxnEnd + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (loader != null) { + loader.close(); + } + + env.removeDatabase(null, testMapDb); + } + } + } finally { + env.close(); + } + } + + private class MappingLoader { + final private int minimumMappings; + final TreeMap expected = new TreeMap(); + final VLSNIndex vlsnIndex; + final private Long firstInRange; + + MappingLoader(Environment env, + int stride, + int maxMappings, + int maxDist, + Mapping[] mappings, + Long[] loadOrder, + int minimumMappings, + int flushIndex) + throws DatabaseException { + + this.minimumMappings = minimumMappings; + + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1", 1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + /* initialize the expected map. */ + for (Mapping m : mappings) { + expected.put(m.vlsn.getSequence(), m); + } + + /* Load the vlsnIndex. */ + for (int i = 0; i < loadOrder.length; i++) { + long vlsnVal = loadOrder[i]; + Mapping m = expected.get(vlsnVal); + if (verbose) { + System.out.println("put " + m); + } + vlsnIndex.put(new TestLogItem(new VLSN(vlsnVal), m.lsn, + m.entryTypeNum)); + + if (i == flushIndex) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + } + firstInRange = mappings[0].vlsn.getSequence(); + + if (verbose) { + System.out.println("flush at " + flushIndex); + } + + } + + void verify(VLSN lastSyncVLSN, VLSN lastTxnEnd) + throws DatabaseException { + + VLSNRange range = vlsnIndex.getRange(); + assert(firstInRange == range.getFirst().getSequence()) : + "first=" + firstInRange + " range=" + range; + assert(expected.lastKey() == range.getLast().getSequence()) : + "last=" + expected.lastKey() + " range=" + range; + assert vlsnIndex.verify(verbose); + + assertEquals(lastSyncVLSN, range.getLastSync()); + assertEquals(lastTxnEnd, range.getLastTxnEnd()); + + /* + * Check that the mappings, both vlsn and lsn value, are what + * we expect. Scan forwards. + * + * This test assumes that the first vlsn may not be mapped. In + * reality, vlsn1 is always create first, thereby guaranteeing that + * the vlsn index always has a starting range point. Log file + * cleaning and head truncation maintain that by creating the ghost + * bucket. But in this test case, there isn't a starting mapping, + * so we call scanner.getStartingLsn from the first in the range. + */ + int numMappings = 0; + ForwardVLSNScanner fScanner = new ForwardVLSNScanner(vlsnIndex); + assertEquals(new Long(expected.get(firstInRange).lsn), + new Long(fScanner.getStartingLsn + (new VLSN(firstInRange)))); + + for (Map.Entry e : expected.entrySet()) { + Long vlsnValue = e.getKey(); + if (vlsnValue < firstInRange) { + continue; + } + VLSN vlsn = new VLSN(vlsnValue); + long scannedLsn = fScanner.getPreciseLsn(vlsn); + if (scannedLsn != DbLsn.NULL_LSN) { + numMappings++; + assert(e.getValue().lsn == scannedLsn); + } + } + + assert numMappings >= minimumMappings : "numMappings = " + + numMappings; + + /* Scan backwards. */ + numMappings = 0; + BackwardVLSNScanner bScanner = new BackwardVLSNScanner(vlsnIndex); + Long lastKey = expected.lastKey(); + assertEquals(expected.get(lastKey).lsn, + bScanner.getStartingLsn(new VLSN(lastKey))); + + SortedMap reverse = reverseExpected(expected); + for (Map.Entry e : reverse.entrySet()) { + Long vlsnValue = e.getKey(); + if (vlsnValue < firstInRange) { + break; + } + VLSN vlsn = new VLSN(vlsnValue); + long scannedLsn = bScanner.getPreciseLsn(vlsn); + if (scannedLsn != DbLsn.NULL_LSN) { + numMappings++; + assert(e.getValue().lsn == scannedLsn); + } + } + + assert numMappings >= minimumMappings : "numMappings = " + + numMappings; + } + + void close() + throws DatabaseException { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + } + } + + private SortedMap + reverseExpected(SortedMap expected) { + + SortedMap r = new TreeMap (new Reverse()); + r.putAll(expected); + return r; + } + + private static class Reverse implements Comparator { + @Override + public int compare(Long a, Long b) { + return (int) (b - a); + } + } + + /** Package together the inputs for a new vlsn->lsn mapping */ + private static class Mapping { + final VLSN vlsn; + final long lsn; + final byte entryTypeNum; + + Mapping(long vlsnVal, + long fileNumber, + long offset, + byte entryTypeNum) { + this.vlsn = new VLSN(vlsnVal); + this.lsn = DbLsn.makeLsn(fileNumber, offset); + this.entryTypeNum = entryTypeNum; + } + + @Override + public String toString() { + return "vlsn=" + vlsn + " lsn=" + DbLsn.getNoFormatString(lsn) + + " type=" + entryTypeNum; + } + } + + /** + * Add information onto the test mapping inputs about what the expected + * outcomes would be if the vlsn index was truncated at this mapping. For + * example, suppose a mapping of vlsn 8 -> lsn 108 was given for a test, + * and it ended up in a bucket that looks like this: + * + * first vlsn 1, last vlsn 10, + * mappings: 1->00, 3->103, 6->106, 9->109, 10->110 + * + * Then if the vlsn index was truncated at vlsn 8, and a "capping" lsn is + * supplied, the last expected on-disk vlsn would be 7. (A new mapping of + * 7->cap lsn would be manufactured). If no "capping" lsn is supplied, then + * the bucket is truncated to the last known mapping, which would be vlsn + * 6->106, and the "expectedLastOnDiskNoCap" would be 6. + */ + private static class TruncateMapping extends Mapping { + final VLSN expectedLastOnDisk; + final VLSN expectedLastOnDiskNoCap; + + TruncateMapping(long vlsnVal, + long fileNumber, + long offset, + byte entryTypeNum, + long expectedLastOnDisk, + long expectedLastOnDiskNoCap) { + + super(vlsnVal, fileNumber, offset,entryTypeNum); + this.expectedLastOnDisk = new VLSN(expectedLastOnDisk); + this.expectedLastOnDiskNoCap = new VLSN(expectedLastOnDiskNoCap); + } + + @Override + public String toString() { + return super.toString() + " expectedLast=" + expectedLastOnDisk + + " expectedNoCap=" + expectedLastOnDiskNoCap; + } + } + + /** + * VLSN puts are done outside of the log write latch, and can therefore + * show up out of order, which can result in gaps in the VLSN bucket + * sequence. Check that tail truncation in the gap works, and that the + * requirement that the start and endpoints of the VLSN range have mappings + * is obeyed. + */ + @Test + public void testTruncateTailOutOfOrder() { + byte lnType = LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum(); + + Mapping[] mapping = new Mapping[] + {new Mapping(1, 1, 10, lnType), + new Mapping(2, 1, 20, lnType), + new Mapping(3, 1, 30, lnType), + new Mapping(4, 1, 40, lnType), + new Mapping(5, 1, 50, lnType), + new Mapping(6, 1, 60, lnType), + new Mapping(7, 1, 70, lnType), + new Mapping(8, 1, 80, lnType), + new Mapping(9, 1, 90, lnType), + new Mapping(10, 1, 100, lnType), + new Mapping(11, 1, 110, lnType), + new Mapping(12, 1, 120, lnType), + new Mapping(13, 1, 130, lnType), + new Mapping(14, 1, 140, lnType), + new Mapping(15, 1, 150, lnType), + new Mapping(16, 1, 160, lnType), + new Mapping(20, 1, 1020, lnType), + new Mapping(17, 1, 170, lnType), + new Mapping(18, 1, 180, lnType), + new Mapping(19, 1, 190, lnType), + new Mapping(21, 1, 1021, lnType)}; + + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + /* + * Truncate non-existing mappings when the portion on disk includes + * mappings that preceed the gap + */ + truncateOutOfOrder(logger, mapping, 15, 17); + truncateOutOfOrder(logger, mapping, 15, 18); + truncateOutOfOrder(logger, mapping, 15, 19); + + /* + * Truncate non-existing mappings when the portion on disk is exactly + * before the gap. + */ + truncateOutOfOrder(logger, mapping, 16, 17); + truncateOutOfOrder(logger, mapping, 16, 18); + truncateOutOfOrder(logger, mapping, 16, 19); + + /* + * Truncate non-existing mappings when the portion on disk is covers + * the gap, and extra buckets must be put on disk. + */ + truncateOutOfOrder(logger, mapping, 20, 17); + truncateOutOfOrder(logger, mapping, 20, 18); + truncateOutOfOrder(logger, mapping, 20, 19); + } + + private void truncateOutOfOrder(Logger logger, + Mapping[] mapping, + int flushPoint, + int truncatePoint) { + int stride = 3; + int maxMappings = 3; + int maxDist = 20; + + Environment env = makeEnvironment(); + VLSNIndex vlsnIndex = null; + + try { + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + + + /* + * Put some mappings in. With the strides, we expect them to + * end up in + * Bucket 1 = vlsn 1 -> 16 + * Bucket 2 = vlsn 20 -> 21 + */ + for (Mapping m : mapping) { + TestLogItem logItem = new TestLogItem + (m.vlsn, m.lsn, m.entryTypeNum); + vlsnIndex.put(logItem); + if (m.vlsn.getSequence() == flushPoint) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + } + logger.info("--------------------\n"); + vlsnIndex.verify(true); + logger.info("Test case: flush=" + flushPoint + + " truncate=" + truncatePoint); + + checkBoundaryVLSN(vlsnIndex, 16); + checkBoundaryVLSN(vlsnIndex, 17); + checkBoundaryVLSN(vlsnIndex, 18); + checkBoundaryVLSN(vlsnIndex, 19); + checkBoundaryVLSN(vlsnIndex, 20); + + vlsnIndex.truncateFromTail(new VLSN(truncatePoint), + DbLsn.makeLsn(1, 170)); + vlsnIndex.verify(true); + for (int i = 16; i < truncatePoint; i++) { + logger.info("after truncation, check buckets for " + i); + checkBoundaryVLSN(vlsnIndex, i); + } + } finally { + + if (vlsnIndex != null) { + vlsnIndex.close(); + } + + env.removeDatabase(null, testMapDb); + env.close(); + } + } + + /** + * Exercise pruning the database tail in a vlsn index that has a gap in the + * buckets. + */ + @Test + public void testDatabasePruning() { + byte lnType = LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum(); + + /* + * These mapping should produce buckets that look like this: + * 1 : vlsn 1 - 9 (lsns for 1, 4, 7, 9) + * 10 : vlsn 10 - 16 (lsns for 10, 13, 16); + * 20 : vlsn 20 - 21 (lsns for 20, 21); + */ + TruncateMapping[] mapping = new TruncateMapping[] + {new TruncateMapping(1, 1, 10, lnType, VLSN.NULL_VLSN_SEQUENCE, + VLSN.NULL_VLSN_SEQUENCE), + new TruncateMapping(2, 1, 20, lnType, 1, 1), + new TruncateMapping(3, 1, 30, lnType, 2, 1), + new TruncateMapping(4, 1, 40, lnType, 3, 1), + new TruncateMapping(5, 1, 50, lnType, 4, 4), + new TruncateMapping(6, 1, 60, lnType, 5, 4), + new TruncateMapping(7, 1, 70, lnType, 6, 4), + new TruncateMapping(8, 1, 80, lnType, 7, 7), + new TruncateMapping(9, 1, 90, lnType, 8, 7), + new TruncateMapping(10, 1, 100, lnType, 9 , 9), + new TruncateMapping(11, 1, 110, lnType, 10, 10), + new TruncateMapping(12, 1, 120, lnType, 11, 10), + new TruncateMapping(13, 1, 130, lnType, 12, 10), + new TruncateMapping(14, 1, 140, lnType, 13, 13), + new TruncateMapping(15, 1, 150, lnType, 14, 13), + new TruncateMapping(16, 1, 160, lnType, 15, 13), + new TruncateMapping(20, 1, 1020, lnType, 16, 16), + new TruncateMapping(17, 1, 170, lnType, 16, 16), + new TruncateMapping(18, 1, 180, lnType, 16, 16), + new TruncateMapping(19, 1, 190, lnType, 16, 16), + new TruncateMapping(21, 1, 1021, lnType, 20, 20)}; + + Map mappingMap = + new HashMap(); + + for (TruncateMapping m : mapping) { + mappingMap.put(m.vlsn.getSequence(), m); + } + + for (TruncateMapping m: mapping) { + pruneTail(true, m, mapping, mappingMap); + pruneTail(false, m, mapping, mappingMap); + } + } + + private void pruneTail(boolean useCap, + TruncateMapping m, + TruncateMapping[] mapping, + Map mappingMap) { + + long pruneStart = m.vlsn.getSequence(); + Logger logger = LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + logger.info("prune point = " + pruneStart); + + int stride = 3; + int maxMappings = 3; + int maxDist = 40; + + /* Load up a vlsn index, dump it to disk */ + Environment env = makeEnvironment(); + VLSNIndex vlsnIndex = null; + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + for (Mapping mp : mapping) { + TestLogItem logItem = + new TestLogItem(mp.vlsn, mp.lsn, mp.entryTypeNum); + vlsnIndex.put(logItem); + } + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + + /* Prune database at the prune point */ + Txn txn = Txn.createLocalTxn(DbInternal.getNonNullEnvImpl(env), + new TransactionConfig()); + + long lastLsn = (pruneStart == 1) ? DbLsn.NULL_LSN : + mappingMap.get(pruneStart-1).lsn; + + VLSN lastOnDisk = + vlsnIndex.pruneDatabaseTail(m.vlsn, + (useCap ? lastLsn : DbLsn.NULL_LSN), + txn); + + /* Check the value for lastOnDisk */ + assertEquals(useCap ? + m.expectedLastOnDisk.getSequence(): + m.expectedLastOnDiskNoCap.getSequence(), + lastOnDisk.getSequence()); + + txn.commit(); + vlsnIndex.close(); + env.removeDatabase(null, testMapDb); + env.close(); + } + + /** + * Check that it's possible to get GTE and LTE buckets for vlsnVal. + */ + @SuppressWarnings("null") + private void checkBoundaryVLSN(VLSNIndex vlsnIndex, int vlsnVal) { + VLSN target = new VLSN(vlsnVal); + VLSNBucket bucket = vlsnIndex.getGTEBucket(target, null); + assertTrue(bucket != null); + assertTrue("bucket=" + bucket + " target=" + target, + bucket.getFirst().compareTo(target) >= 0); + bucket = vlsnIndex.getLTEBucket(target); + assertTrue(bucket != null); + if (!bucket.owns(target)) { + assertTrue("bucket=" + bucket + " target=" + target, + bucket.getLast().compareTo(target) < 0); + } + } + + /* + * [SR#17765] Create a VLSNBucket with holes in it, then check that if + * VLSNScanners can work forwards and backwards. + * + * There are three test cases: + * 1. Small holes between two neighbor buckets (gap = 1); + * 2. Large holes between two neighbor buckets (gap > 1); + * 3. A ghostBucket is inserted at the beginning of the VLSN range. + * + * For the first 2 test cases, we use the test logic like this: + * 1. Manually create holes between the buckets; + * 2. Use forwardVLSNScanner and backwardVLSNScanner to travel through + * the buckets; + * 3. In forward scanning, if the missing VLSN is visited, the previous + * LSN will be returned; + * 4. In backward scanning, if the missing VLSN is visited, the LSN_NULL + * will be returned; + * 5. Make sure no exception will be thrown during the scanning. Any + * exception means there's an issue in the VLSNBuckt code. + * + * This function tests the first case as mentioned above. + * + */ + @Test + public void testNonContiguousBucketSmallHoles() + throws Throwable { + + /* The JE database log file (.jdb file) number. */ + int fileNum = 33; + /* The offset between two contiguous VLSN in log file. */ + int offset = 100; + /* The stride between two neighbor VLSN sequence in one bucket. */ + int stride = 3; + /* The max number of VLSN->LSN mappings saved in one bucket. */ + int maxMappings = 4; + /* The max distance between two neighbor VLSNs in one bucket. */ + int maxDist = 1000; + /* The number of VLSN entries specified in one bucket. */ + int numEntries = 30; + + /* + * We create small holes in the buckets, each hole misses one VLSN (in + * this case, the missing VLSNs are 12 and 24). + * + * With the strides and holes, we expect the buckets to end up like: + * Bucket 1 = { vlsn = 1, 4, 7, 10, 11 } + * Bucket 2 = { vlsn = 13, 16, 19, 22, 23 } + * Bucket 3 = { vlsn = 25, 28, 30 } + */ + long[] holes = { 12, 24 }; + /* We will use expectedVLSN to generate the expected mappings.*/ + long[] expectedVLSN = { + 1, 4, 7, 10, 11, 13, 16, + 19, 22, 23, 25, 28, 30 + }; + + /* Now traverse through (back and forth) buckets with small holes. */ + scanNonContiguousBucketWithHoles(fileNum, offset, stride, maxMappings, + maxDist, numEntries, holes, + expectedVLSN, false /* if flush */); + } + + /* + * [SR#17765] Create a VLSNBucket with holes in it, then check that if + * VLSNScanners can work forwards and backwards. + * + * There are three test cases: + * 1. Small holes between two neighbor buckets (gap = 1); + * 2. Large holes between two neighbor buckets (gap > 1); + * 3. A ghostBucket is inserted at the beginning of the VLSN range. + * + * For the first 2 test cases, we use the test logic like this: + * 1. Manually create holes between the buckets; + * 2. Use forwardVLSNScanner and backwardVLSNScanner to travel through + * the buckets; + * 3. In forward scanning, if the missing VLSN is visited, the previous + * LSN will be returned; + * 4. In backward scanning, if the missing VLSN is visited, the LSN_NULL + * will be returned; + * 5. Make sure no exception will be thrown during the scanning. Any + * exception means there's an issue in the VLSNBuckt code. + * + * This function tests the second case as mentioned above. + * + */ + @Test + public void testNonContiguousBucketLargeHoles() + throws Throwable { + + /* The JE database log file (.jdb file) number. */ + long fileNum = 33; + /* The offset between two contiguous VLSN in log file. */ + long offset = 100; + /* The stride between two neighbor VLSN sequence in one bucket. */ + int stride = 5; + /* The max number of VLSN->LSN mappings saved in one bucket. */ + int maxMappings = 4; + /* The max distance between two neighbor VLSNs in one bucket. */ + int maxDist = 1000; + /* The number of VLSN entries specified in one bucket. */ + int numEntries = 50; + + /* + * We create large holes in the buckets, each hole misses three VLSN + * (in this case, the missing VLSNs are 18,19,20, and 38,39,40). + * + * With the strides and holes, we expect the buckets to end up like: + * Bucket 1 = { vlsn = 1, 6, 11, 16, 17 } + * Bucket 2 = { vlsn = 21, 26, 31, 36, 37 } + * Bucket 3 = { vlsn = 41, 46, 50 } + */ + long[] holes = { 18, 19, 20, 38, 39, 40 }; + /* We will use expectedVLSN to generate the expected mappings. */ + long[] expectedVLSN = { + 1, 6, 11, 16, 17, 21, 26, + 31, 36, 37, 41, 46, 50 + }; + + /* Now traverse through (back and forth) buckets with small holes. */ + scanNonContiguousBucketWithHoles(fileNum, offset, stride, maxMappings, + maxDist, numEntries, holes, + expectedVLSN, false /* if flush */); + } + + /* + * The buckets with given holes are created, then use VLSNScanner to scan + * the buckets forwards and backwards. + */ + private void scanNonContiguousBucketWithHoles(long fileNum, + long offset, + int stride, + int maxMappings, + int maxDist, + int numEntries, + long[] holesInAscOrder, + long[] expectedVLSN, + boolean flush) + throws Throwable { + + /* Use a standalone env to simplify the test - no rep env required. */ + Environment env = makeEnvironment(); + /* The vlsnIndex is used to put and read VLSNs. */ + VLSNIndex vlsnIndex = null; + try { + + /* + * Create a vlsnIndex with the given stride, maxMappings and + * maxDist, under the given environment and mapping database. + */ + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + /* + * Put some mappings in the buckets through vlsnIndex. We create + * holes in the buckets, according to the given holes parameter. + */ + for (int i = 1; i <= numEntries; i++) { + + /* + * Since holes[] is already in sorted order, we can use + * Arrays.binarySearch to check if an item is in holes[]. + */ + if (java.util.Arrays.binarySearch(holesInAscOrder, i) < 0) { + /* If not exist, insert it into the VLSNIndex. */ + putEntryToVLSNIndex(i, fileNum, offset, vlsnIndex); + } + } + if (flush) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + + TreeMap expected = new TreeMap(); + + /* + * We expect these mappings. These expected mappings assist in + * checking the correctness of the scanning process. + */ + makeExpectedMapping(expected, expectedVLSN, fileNum, offset); + VLSNRange range = vlsnIndex.getRange(); + assertEquals(expected.firstKey(), range.getFirst()); + assertEquals(expected.lastKey(), range.getLast()); + + /* + * Scanning the VLSN buckets forwards and backwards. The starting + * point is from every VLSN. + */ + for(int i = 1; i <= numEntries; i++) { + forwardScanning(vlsnIndex, numEntries, expected, i); + backwardScanning(vlsnIndex, expected, i); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + env.removeDatabase(null, testMapDb); + env.close(); + } + } + + /* + * [#17765] In the third case, a ghostBucket is created and inserted into + * the beginning of the VLSN range, i.e., the ghostBucket is the first in + * the bucket chain . A ghostBucket is a placeholder for a set of unknown + * VLSNs. + * + * We use the following test logic to ensure the quality of VLSNBucket: + * 1. Use forwardVLSNScanner and backwardVLSNScanner to travel through + * the buckets. + * 2. In forward scanning, if the missing VLSN is visited, and this + * missing VLSN is not in the ghostBucket, the previous LSN will be + * returned. If the missing VLSN is in the ghostBucket, the LSN_NULL + * will be returned. + * 3. In backward scanning, if the missing VLSN (no matter in the + * ghostBucket or not) is visited, the LSN_NULL will be returned. + * 4. Make sure no exception will be thrown during the scanning. Any + * exception means that there are some problems in the VLSNBuckt + * scanning process. + */ + @Test + public void testNonContiguousGhostBucket() + throws Throwable { + + /* Use a standalone env to simplify the test - no rep env required. */ + Environment env = makeEnvironment(); + VLSNIndex vlsnIndex = null; + DatabaseImpl mappingDbImpl = null; + Database mappingDb = null; + /* The JE database log file (.jdb file) number. */ + long fileNum = 33; + /* The offset between two contiguous VLSN in log file. */ + long offset = 100; + /* The stride between two neighbor VLSN sequence in one bucket. */ + int stride = 3; + /* The max number of VLSN->LSN mappings in one bucket. */ + int maxMappings = 4; + /* The max distance between two neighbor VLSNs in one bucket. */ + int maxDist = 1000; + /* The number of VLSN entries in one bucket. */ + int numEntries = 40; + + try { + + /* + * Create a vlsnIndex with the given stride, maxMappings and + * maxDist, under the given environment and mapping database. + */ + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + /* Get the mapping database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + mappingDb = env.openDatabase(null, testMapDb, dbConfig); + mappingDbImpl = DbInternal.getDbImpl(mappingDb); + + /* + * Create a GostBucket for bucket1. This ghostBucket represents the + * unknown VLSNs 1-12 at the beginning of the VLSN range. + */ + VLSNBucket placeholder = new GhostBucket + (new VLSN(1), + DbLsn.makeLsn(fileNum, offset), + 13 * offset); + TransactionConfig config = new TransactionConfig(); + config.setDurability(Durability.COMMIT_NO_SYNC); + Txn txn = Txn.createLocalTxn(DbInternal.getNonNullEnvImpl(env), + config); + boolean success = false; + try { + /* Write the GhostBucket to the mapping database. */ + placeholder.writeToDatabase(DbInternal.getNonNullEnvImpl(env), + mappingDbImpl, txn); + success = true; + } finally { + if (success) { + txn.commit(); + } else { + txn.abort(); + } + } + + /* + * We create holes in the buckets, each hole misses one VLSN (In + * this test case, the missing VLSN is 24, 36). + * + * With the strides and holes, we expect the buckets to end up like + * Bucket 1 = GhostBucket (has been put before) + * Bucket 2 = { vlsn = 13, 16, 19, 22, 23 } + * Bucket 3 = { vlsn = 25, 28, 31, 34, 35 } + * Bucket 4 = { vlsn = 37, 40 } + */ + for (int i = 13; i <= numEntries; i++) { + if(i != 24 && i != 36) { + putEntryToVLSNIndex(i, fileNum, offset, vlsnIndex); + } + } + + /* + * We expect these mappings. These expected mappings assist in + * checking the correctness of the scanning process. + */ + long[] expectedVLSN = { + 13, 16, 19, 22, 23, 25, + 28, 31, 34, 35, 37, 40 + }; + TreeMap expected = new TreeMap(); + makeExpectedMapping(expected, expectedVLSN, fileNum, offset); + + VLSNRange range = vlsnIndex.getRange(); + assertEquals(expected.firstKey(), range.getFirst()); + assertEquals(expected.lastKey(), range.getLast()); + + /* Scanning the VLSN buckets forwards and backwards. */ + for(int i = 1; i <= numEntries; i++) { + forwardScanning(vlsnIndex, numEntries, expected, i); + backwardScanning(vlsnIndex, expected, i); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + if (mappingDb != null) { + mappingDb.close(); + } + env.removeDatabase(null, testMapDb); + env.close(); + } + } + + /* Generate a TestLogItem and insert it into the VLSNIndex. */ + private void putEntryToVLSNIndex(int pos, + long fileNum, + long offset, + VLSNIndex vlsnIndex) + throws Throwable { + + VLSN vlsn = new VLSN(pos); + long lsn = DbLsn.makeLsn(fileNum, pos * offset); + /* We create TestLogItems with the VLSN->LSN mappings. */ + TestLogItem logItem = new TestLogItem + (vlsn, lsn, LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum()); + vlsnIndex.put(logItem); + } + + /* Generate the expected VLSN->LSN mapping. */ + private void makeExpectedMapping(TreeMap expected, + long[] vlsnSet, + long fileNum, + long offSet) { + assert(expected != null) : "expected TreeMap is null"; + for(int i = 0; i < vlsnSet.length; i++) { + expected.put(new VLSN(vlsnSet[i]), + DbLsn.makeLsn(fileNum, vlsnSet[i] * offSet)); + } + } + + /* Scan the VLSN buckets forwards. */ + private void forwardScanning(VLSNIndex vlsnIndex, + int numEntries, + TreeMap expected, + int startVLSN) { + Long startLsn; + ForwardVLSNScanner fScanner = new ForwardVLSNScanner(vlsnIndex); + startLsn = fScanner.getStartingLsn(new VLSN(startVLSN)); + + /* + * expectedStartVLSN is not equal to startVLSN, when the startVLSN is + * in the gap. For example, if there are buckets (1,3,5) and (7,9,10), + * the startVLSN is 6, then the expectedStartVLSN should be 5. + * expectedStartVLSN is found in the expected mapping set. + */ + long expectedStartVLSN; + boolean ifGhostBucket = true; + /* Find the expectedStartVLSN forward in the expected mapping*/ + for(expectedStartVLSN = startVLSN; expectedStartVLSN >= 1; + expectedStartVLSN--) { + Long expectedLsn = expected.get(new VLSN(expectedStartVLSN)); + if(expectedLsn != null) { + /* We have found the expectedStartVLSN. */ + ifGhostBucket = false; + break; + } + } + + /* + * One of the motivation of this test: + * Suppose the vlsn index and buckets are (1,3,5) and (7,9,10) and + * there is a forward scan. the correct scan should be: + * getStartingLsn(3) would return the lsn for vlsn 3, + * getStartingLsn(4) would return the lsn for vlsn 3, + * getStartingLsn(5) would return the lsn for vlsn 5, + * getStartingLsn(6) would return the lsn for vlsn 5, rather than 7. + * + * The startVLSN is not in the ghostBucket. + */ + if (!ifGhostBucket) { + assertEquals(expected.get(new VLSN(expectedStartVLSN)), startLsn); + } else { + /* The startVLSN is in the ghostBucket*/ + VLSNBucket bucket = + vlsnIndex.getLTEBucketFromDatabase(new VLSN(startVLSN)); + assertEquals(GhostBucket.class, bucket.getClass()); + } + + /* Start forward scanning from the found startVLSN. */ + for (long i = startVLSN; i <= numEntries; i++) { + VLSN vlsn = new VLSN(i); + Long expectedLsn = expected.get(vlsn); + Long scannerLsn = fScanner.getPreciseLsn(vlsn); + + if (expectedLsn == null) { + assertEquals((Long) DbLsn.NULL_LSN, scannerLsn); + + /* + * If there's no exact match, approximate search should + * return the one just previous. If the VLSN is in the + * ghostBucket, there is no any previous VLSN in the expected + * mapping set. + */ + Long prevLsn = null; + for (long find = i - 1; find >= 0; find--) { + prevLsn = expected.get(new VLSN(find)); + if (prevLsn != null) + break; + } + + /* If the vlsn is not in a ghostbucket. */ + if(prevLsn != null) { + assertEquals(prevLsn, + (Long) fScanner.getApproximateLsn(vlsn)); + } else { + /* If the vlsn is in a ghostbucket. */ + VLSNBucket bucket = + vlsnIndex.getLTEBucketFromDatabase(vlsn); + assertEquals(GhostBucket.class, bucket.getClass()); + } + } else { + assertEquals(expectedLsn, scannerLsn); + assertEquals + (expectedLsn, (Long) fScanner.getApproximateLsn(vlsn)); + } + } + } + + /* Scan the VLSN buckets backwards. */ + private void backwardScanning(VLSNIndex vlsnIndex, + TreeMap expected, + int startVLSN) { + Long startLsn; + BackwardVLSNScanner bScanner = new BackwardVLSNScanner(vlsnIndex); + startLsn = bScanner.getStartingLsn(new VLSN(startVLSN)); + + /* + * expectedStartVLSN is not equal to startVLSN, when the startVLSN is + * in the gap. For example, if there are buckets (1,3,5) and (7,9,10), + * the startVLSN is 6, then the expectedStartVLSN should be 7. + * expectedStartVLSN is found in the expected mapping set. + */ + long expectedStartVLSN; + boolean ifGhostBucket = true; + /* Find the expectedStartVLSN backward in the expected mapping*/ + for(expectedStartVLSN = startVLSN; + expectedStartVLSN <= expected.lastKey().getSequence(); + expectedStartVLSN++) { + Long expectedLsn = expected.get(new VLSN(expectedStartVLSN)); + if(expectedLsn != null) { + /* We have found the expectedStartVLSN. */ + ifGhostBucket = false; + break; + } + } + + /* + * One of the motivation of this test: + * Suppose the vlsn index and buckets are (1,3,5) and (7,9,10) and + * there is a forward scan. the correct scan should be: + * getStartingLsn(9) would return the lsn for vlsn 9, + * getStartingLsn(8) would return the lsn for vlsn 9, + * getStartingLsn(7) would return the lsn for vlsn 7, + * getStartingLsn(6) would return the lsn for vlsn 7, rather than 5 + * + * The startVLSN is not in the ghostBucket. + */ + if (!ifGhostBucket) { + assertEquals(expected.get(new VLSN(expectedStartVLSN)), startLsn); + } else { + /* The startVLSN is in the ghostBucket*/ + VLSNBucket bucket = + vlsnIndex.getLTEBucketFromDatabase(new VLSN(startVLSN)); + assertEquals(GhostBucket.class, bucket.getClass()); + } + + /* Start backward scanning from the decided startVLSN. */ + for (long i = startVLSN; i >= 1; i --) { + VLSN vlsn = new VLSN(i); + Long expectedLsn = expected.get(vlsn); + Long scannerLsn = bScanner.getPreciseLsn(vlsn); + if (expectedLsn == null) { + assertEquals((Long)DbLsn.NULL_LSN, scannerLsn); + + /* Judge if the vlsn is in a ghostbucket. */ + Long prevLsn = null; + for (long find = i - 1; find >= 1; find--) { + prevLsn = expected.get(new VLSN(find)); + if (prevLsn != null) + break; + } + + /* + * If the vlsn is in a ghostbucket, there is no any previous + * VLSN in the expected mapping set. + */ + if(prevLsn == null) { + VLSNBucket bucket = + vlsnIndex.getLTEBucketFromDatabase(vlsn); + assertEquals(GhostBucket.class, bucket.getClass()); + } + } else { + assertEquals(expectedLsn, scannerLsn); + } + } + } + + /** + * Tests a timing window when a VLSNIndex flush occurs during a call to + * VLSNIndex.getGTEBucket, which resulted in a "Can't Find GTE Bucket for + * VLSN XXX" error. See the SR for details. + */ + @Test + public void testSR20726GTESearch() throws Throwable { + + int stride = 5; + int maxMappings = 2; + int maxDist = 1000; + + Environment env = makeEnvironment(); + VLSNIndex vlsnIndex = null; + + try { + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + + /* + * Put some mappings in. With the strides, we expect them to + * end up in these buckets. + * Bucket 1 = vlsn 1, 6, 10 + * Bucket 2 = vlsn 11, 16, 20, + * Bucket 3 = vlsn 21, 25 + */ + for (int i = 1; i <= 25; i++) { + putEntryToVLSNIndex(i, 33, 100, vlsnIndex); + } + + /* Make them persistent. */ + vlsnIndex.flushToDatabase(Durability.COMMIT_SYNC); + VLSN target = new VLSN(22); + VLSNBucket foundBucket = vlsnIndex.getGTEBucket(target, null); + assertEquals(new VLSN(21), foundBucket.getFirst()); + assertEquals(new VLSN(25), foundBucket.getLast()); + + /* + * Add more mappings to tracker which start a different bucket. + * This bucket will be found in the tracker; it hasn't been + * flushed. + * + * Bucket 4 = vlsn 26, 30 + */ + for (int i = 26; i <= 30; i++) { + putEntryToVLSNIndex(i, 34, 100, vlsnIndex); + } + foundBucket = vlsnIndex.getGTEBucket(target, null); + assertEquals(new VLSN(21), foundBucket.getFirst()); + assertEquals(new VLSN(25), foundBucket.getLast()); + + /* + * Now provoke a call to flushToDatabase while we call + * getGTEBucket. This mimics what happens when a feeder is running + * and a checkpointer flushes the index. Before SR 20726 was fixed, + * this resulted in an EnvironmentFailureException out of the + * getGTEBucket call. + */ + FlushVLSNIndex hook = new FlushVLSNIndex(vlsnIndex); + vlsnIndex.setGTEHook(hook); + foundBucket = vlsnIndex.getGTEBucket(target, null); + assertEquals(new VLSN(21), foundBucket.getFirst()); + assertEquals(new VLSN(25), foundBucket.getLast()); + assertTrue(hook.wasExecuted()); + + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + } + + env.removeDatabase(null, testMapDb); + env.close(); + } + } + + /** Force a flush of the vlsn index. */ + private static class FlushVLSNIndex implements TestHook { + + private final VLSNIndex index; + private boolean executed; + + FlushVLSNIndex(VLSNIndex index) { + this.index = index; + } + + @Override + public void doHook() { + index.flushToDatabase(Durability.COMMIT_SYNC); + executed = true; + } + + public boolean wasExecuted() { + return executed; + } + + @Override + public void hookSetup() { + } + + @Override + public void doIOHook() throws IOException { + } + + @Override + public void doHook(Object obj) { + } + + @Override + public Object getHookValue() { + return null; + } + } +} diff --git a/test/com/sleepycat/je/rep/vlsn/VLSNIndexTruncateTest.java b/test/com/sleepycat/je/rep/vlsn/VLSNIndexTruncateTest.java new file mode 100644 index 0000000..f3cd623 --- /dev/null +++ b/test/com/sleepycat/je/rep/vlsn/VLSNIndexTruncateTest.java @@ -0,0 +1,508 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.rep.vlsn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.recovery.RecoveryInfo; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.util.TestLogItem; +import com.sleepycat.je.rep.vlsn.VLSNIndex.ForwardVLSNScanner; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; + +/** + * Exercise VLSNIndex truncation + */ +public class VLSNIndexTruncateTest extends TestBase { + + private final boolean verbose = Boolean.getBoolean("verbose"); + private final File envRoot; + + public VLSNIndexTruncateTest() { + envRoot = SharedTestUtils.getTestDir(); + } + + private Environment makeEnvironment() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(false); + return new Environment(envRoot, envConfig); + } + + /** + * Test head truncate extensively. Load up a VLSNIndex. Vary these factors: + * - truncate at every vlsn + * - flush at every vlsn, so that we exercise truncating when mappings + * are in the tracker, when they're in the database, and when they're in + * both places. + */ + @Test + public void testHeadTruncateManyFiles() + throws Throwable { + + int firstVal = 1; + int lastVal = 40; + + Environment env = makeEnvironment(); + try { + /* + * Load up a set of expected values. Each VLSN is in its own + * file. + */ + List expected = new ArrayList(); + for (int i = firstVal; i <= lastVal; i++) { + VLPair m = new VLPair(new VLSN(i), DbLsn.makeLsn(i, i)); + expected.add(m); + } + + /* Truncate and verify */ + for (int flushPoint = 0; flushPoint < lastVal; flushPoint++) { + TruncateTester tester = new HeadTruncater(env, + flushPoint, + verbose, + expected); + tester.runTest(); + } + } finally { + env.close(); + } + } + + @Test + public void testHeadTruncateSeveralFiles() + throws Throwable { + + int firstVal = 1; + int lastVal = 40; + + Environment env = makeEnvironment(); + + try { + /* Load up a set of expected values. 8 VLSNs are in each file. */ + List expected = new ArrayList(); + for (int i = firstVal; i <= lastVal; i++) { + VLPair m = new VLPair(new VLSN(i), DbLsn.makeLsn(i/8, i)); + expected.add(m); + } + + /* Truncate and verify */ + for (int flushPoint = 0; flushPoint < lastVal; flushPoint++) { + TruncateTester tester = + new HeadTruncater(env, + flushPoint, + verbose, + expected) { + @Override + boolean skipMapping(VLPair m) { + /* return true only for the last VLSN of each file. */ + return (m.vlsn.getSequence() % 8)!=7; + } + }; + tester.runTest(); + } + } finally { + env.close(); + } + } + + /** + * Make the first vlsn in every file go out of order, and therefore be + * skipped. At truncation, we have to add a ghost bucket. + * + * Specifically, load vlsns 1->40 in this order, with 8 vlsns per file, so + * that the first vlsn of a new file is loaded out of order, and is + * skipped: + * + * vlsnIndex.put(vlsn=2, lsn=1/2) + * vlsnIndex.put(vlsn=1, lsn=1/0) + * vlsnIndex.put(vlsn=3, lsn=1/3) + * ... + * vlsnIndex.put(vlsn=9, lsn=2/9) + * vlsnIndex.put(vlsn=8, lsn=2/0) + * vlsnIndex.put(vlsn=10, lsn=2/10) + * .. + * That results in a vlsn index full of buckets that have a "skipped" + * mapping at the head of each one. + * + * Then truncate the vlsnindex on file boundaries. Specifically, truncate + * at lsn 7, 15, 23, etc. + * + * Then try to scan the vlsn index. It should have adjusted for the fact + * that the first vlsn is not there. + * TODO: disabled while placeholder bucket work is in progress. + */ + @Test + public void testHeadTruncateoutOfOrderMappings() + throws Throwable { + + int firstVal = 1; + int lastVal = 40; + + Environment env = makeEnvironment(); + /* + * This test needs about 6 files, because log cleaning related + * truncation of the vlsn index looks at the files in the directory + * to create ghost buckets. Make fake files, to mimic what would happen + * if we were really logging vlsns. + */ + for (int i = 1; i < 7; i++) { + File f = new File(envRoot, "0000000" + i + ".jdb"); + assertTrue(f.createNewFile()); + } + + try { + /* Load up a set of expected values. 8 VLSNs are in each file. */ + List expected = new ArrayList(); + for (int i = firstVal; i <= lastVal; i++) { + + /* + * The first vlsn that should be in a file (1, 9, 17, etc) + * will have a lsn of file/0 offset, because the ghost bucket + * will provide that as the lsn. + */ + long lsn = ((i%8) == 1) ? + DbLsn.makeLsn(i/8, 0): + DbLsn.makeLsn((i-1)/8, i); + + VLPair m = new VLPair(new VLSN(i), lsn); + expected.add(m); + } + + /* Truncate and verify */ + for (int flushPoint = 0; flushPoint < lastVal; flushPoint++) { + TruncateTester tester = + new HeadTruncater(env, + flushPoint, + verbose, + expected) { + @Override + boolean skipMapping(VLPair m) { + /* + * Skip mappings for everything except the last + * VLSN of each file. + */ + return (m.vlsn.getSequence() % 8)!=0; + } + + @Override + void loadMappings() { + /* + * Load them in an out of order fashion, creating + * buckets that are missing the first mapping. + */ + for (int i = 0; i < expected.size(); i+=8) { + loadOne(i+1); + loadOne(i); + loadOne(i+2); + loadOne(i+4); + loadOne(i+5); + loadOne(i+3); + loadOne(i+6); + loadOne(i+7); + } + } + + private void loadOne(int index) { + VLPair m = expected.get(index); + vlsnIndex.put(new TestLogItem(m.vlsn, m.lsn, lnType)); + if (m.vlsn.getSequence() == flushPoint) { + vlsnIndex.flushToDatabase + (Durability.COMMIT_NO_SYNC); + } + } + }; + tester.runTest(); + } + } catch (Throwable e) { + e.printStackTrace(); + throw e; + } finally { + env.close(); + } + } + + /** + * Test tail truncate extensively. Load up a VLSNIndex. Vary these factors: + * - truncate at every vlsn + * - flush at every vlsn, so that we exercise truncating when mappings + * are in the tracker, when they're in the database, and when they're in + * both places. + */ + @Test + public void testTailTruncate() + throws Throwable { + + int firstVal = 1; + int lastVal = 40; + + Environment env = makeEnvironment(); + + try { + + /* + * Load up a set of expected values. Each VLSN is in the same + * file, in order to load up the mappings + */ + List expected = new ArrayList(); + for (int i = firstVal; i <= lastVal; i++) { + VLPair m = new VLPair(new VLSN(i), i); + expected.add(m); + } + + for (int flushPoint = 0; flushPoint < lastVal; flushPoint++) { + TruncateTester tester = new TailTruncater(env, + flushPoint, + verbose, + expected); + tester.runTest(); + } + } finally { + env.close(); + } + } + + /** + * Test VLSNIndex.truncateFromTail + */ + private static class TailTruncater extends TruncateTester { + + TailTruncater(Environment env, + int flushPoint, + boolean verbose, + List expected) { + super(env, flushPoint, verbose, expected); + } + + @Override + void doTruncate(VLPair deletePoint) + throws DatabaseException { + + vlsnIndex.truncateFromTail(deletePoint.vlsn, deletePoint.lsn-1); + if (verbose) { + System.out.println(debugHeader); + } + + assertTrue(debugHeader, vlsnIndex.verify(verbose)); + + if (deletePoint.vlsn.equals(expected.get(0).vlsn)) { + postTruncateFirst = VLSN.NULL_VLSN; + postTruncateLast = VLSN.NULL_VLSN; + } else { + postTruncateFirst = expected.get(0).vlsn; + postTruncateLast = deletePoint.vlsn.getPrev(); + } + } + } + + /** + * Test VLSNIndex.truncateFromTail + */ + private static class HeadTruncater extends TruncateTester { + + HeadTruncater(Environment env, + int flushPoint, + boolean verbose, + List expected) { + super(env, flushPoint, verbose, expected); + } + + @Override + void doTruncate(VLPair deletePoint) + throws DatabaseException { + + if (verbose) { + System.out.println("----" + debugHeader); + } + vlsnIndex.truncateFromHead(deletePoint.vlsn, + DbLsn.getFileNumber(deletePoint.lsn)); + + assertTrue(debugHeader, vlsnIndex.verify(verbose)); + + VLSN lastVLSN = expected.get(expected.size()-1).vlsn; + + if (deletePoint.vlsn.equals(lastVLSN)) { + /* We deleted everything out of the index. */ + postTruncateFirst = VLSN.NULL_VLSN; + postTruncateLast = VLSN.NULL_VLSN; + } else { + postTruncateFirst = deletePoint.vlsn.getNext(); + postTruncateLast = lastVLSN; + } + } + } + + /** + * TruncateTesters truncate a VLSNIndex from either the head or the tail + * and then check that the range and mappings are as expected. + */ + private abstract static class TruncateTester { + + private static final String testMapDb = "TEST_MAP_DB"; + + private final int stride = 5; + private final int maxMappings = 4; + private final int maxDist = 1000; + private final Environment env; + protected final boolean verbose; + protected final int flushPoint; + + protected VLSNIndex vlsnIndex; + protected VLSN postTruncateFirst; + protected VLSN postTruncateLast; + protected String debugHeader; + protected final byte lnType = + LogEntryType.LOG_INS_LN_TRANSACTIONAL.getTypeNum(); + + protected List expected; + + TruncateTester(Environment env, + int flushPoint, + boolean verbose, + List expected) { + this.env = env; + this.flushPoint = flushPoint; + this.verbose = verbose; + this.expected = expected; + } + + /** + * Create a VLSNIndex loaded with the values in the expected List. + */ + private void initIndex() + throws DatabaseException { + + vlsnIndex = new VLSNIndex(DbInternal.getNonNullEnvImpl(env), + testMapDb, new NameIdPair("n1",1), + stride, maxMappings, maxDist, + new RecoveryInfo()); + loadMappings(); + } + + void loadMappings() { + for (VLPair m : expected) { + vlsnIndex.put(new TestLogItem(m.vlsn, m.lsn, lnType)); + if (m.vlsn.getSequence() == flushPoint) { + vlsnIndex.flushToDatabase(Durability.COMMIT_NO_SYNC); + } + } + } + + /* + * @return true if we should not test truncate at this VLSN. In real + * life, head truncation always happens on the last VLSN in the file, + * because head truncation is done by the cleaner, which searches out + * the last vlsn. + * Tail truncation can specify any vlsn as the truncation point. + */ + boolean skipMapping(VLPair m) { + return false; + } + + void runTest() + throws Throwable { + + for (VLPair mapping : expected) { + try { + if (skipMapping(mapping)) { + continue; + } + + initIndex(); + + /* Truncate the VLSNIndex. */ + debugHeader = "deletePoint=" + mapping.vlsn + + " flushPoint=" + flushPoint; + doTruncate(mapping); + + /* Check the range. */ + VLSNRange truncatedRange = vlsnIndex.getRange(); + assertEquals(postTruncateFirst, truncatedRange.getFirst()); + assertEquals(postTruncateLast, truncatedRange.getLast()); + + if (postTruncateFirst.equals(VLSN.NULL_VLSN)) { + continue; + } + + /* + * Scan the index and check all mappings. We've already + * verified the index, so we can use a dump of the mappings + * from the index to verify the scanner results. + */ + Map dumpedMappings = vlsnIndex.dumpDb(verbose); + ForwardVLSNScanner scanner = + new ForwardVLSNScanner(vlsnIndex); + long startLsn = scanner.getStartingLsn(postTruncateFirst); + + long expectedIndex = postTruncateFirst.getSequence() - 1L; + assertEquals(new Long(expected.get((int)expectedIndex).lsn), + new Long(startLsn)); + + for (VLPair m : expected) { + if ((m.vlsn.compareTo(postTruncateFirst) >= 0) && + (m.vlsn.compareTo(postTruncateLast) <= 0)) { + + + Long onDiskLsn = dumpedMappings.get(m.vlsn); + if (onDiskLsn == null) { + continue; + } + + long scannedLsn = scanner.getPreciseLsn(m.vlsn); + if (onDiskLsn.longValue() != DbLsn.NULL_LSN) { + assertEquals(onDiskLsn.longValue(), scannedLsn); + } + } + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (vlsnIndex != null) { + vlsnIndex.close(); + vlsnIndex = null; + + /* + * Remove the on-disk mapping database which represents + * the persistent storage of the vlsn index, so each + * test run starts with a clean slate. + */ + env.removeDatabase(null, testMapDb); + } + } + } + } + + abstract void doTruncate(VLPair deletePoint) + throws DatabaseException; + } +} diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.BtreeStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.BtreeStats.out new file mode 100644 index 0000000..a491d74 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.BtreeStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.CommitToken.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.CommitToken.out new file mode 100644 index 0000000..d3bc96c Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.CommitToken.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseExistsException.out new file mode 100644 index 0000000..c062dc2 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseNotFoundException.out new file mode 100644 index 0000000..01cddda Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DatabaseNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeadlockException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeadlockException.out new file mode 100644 index 0000000..2b4d4a7 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeadlockException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeleteConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeleteConstraintException.out new file mode 100644 index 0000000..84fcec6 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DeleteConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DuplicateDataException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DuplicateDataException.out new file mode 100644 index 0000000..60c5b47 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.DuplicateDataException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentFailureException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentFailureException.out new file mode 100644 index 0000000..b170f57 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentFailureException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentLockedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentLockedException.out new file mode 100644 index 0000000..a3f8abd Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentLockedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentNotFoundException.out new file mode 100644 index 0000000..7ccad37 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentStats.out new file mode 100644 index 0000000..74d2ec0 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.EnvironmentStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ForeignConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ForeignConstraintException.out new file mode 100644 index 0000000..42ac59f Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ForeignConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotAvailableException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotAvailableException.out new file mode 100644 index 0000000..96009c7 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotAvailableException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotGrantedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotGrantedException.out new file mode 100644 index 0000000..41524ed Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockNotGrantedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockStats.out new file mode 100644 index 0000000..8bb7895 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockTimeoutException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockTimeoutException.out new file mode 100644 index 0000000..ca6fe10 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LockTimeoutException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LogWriteException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LogWriteException.out new file mode 100644 index 0000000..44e2631 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.LogWriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStats.out new file mode 100644 index 0000000..caa2aa4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStatus.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStatus.out new file mode 100644 index 0000000..72826ec Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.PreloadStatus.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SecondaryIntegrityException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SecondaryIntegrityException.out new file mode 100644 index 0000000..e62c7ae Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SecondaryIntegrityException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceExistsException.out new file mode 100644 index 0000000..0fa3698 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceIntegrityException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceIntegrityException.out new file mode 100644 index 0000000..5d18ae4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceIntegrityException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceNotFoundException.out new file mode 100644 index 0000000..c0f6258 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceOverflowException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceOverflowException.out new file mode 100644 index 0000000..9a1f3f4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceOverflowException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceStats.out new file mode 100644 index 0000000..d4e5f32 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.SequenceStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ThreadInterruptedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ThreadInterruptedException.out new file mode 100644 index 0000000..dc5deb8 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.ThreadInterruptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats$Active.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats$Active.out new file mode 100644 index 0000000..3b0d1ee Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats$Active.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats.out new file mode 100644 index 0000000..10813fa Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionTimeoutException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionTimeoutException.out new file mode 100644 index 0000000..140bd95 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.TransactionTimeoutException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.UniqueConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.UniqueConstraintException.out new file mode 100644 index 0000000..a39fb15 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.UniqueConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.VersionMismatchException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.VersionMismatchException.out new file mode 100644 index 0000000..589c248 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.VersionMismatchException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.XAFailureException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.XAFailureException.out new file mode 100644 index 0000000..00772e1 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.XAFailureException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.DatabasePreemptedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.DatabasePreemptedException.out new file mode 100644 index 0000000..7a4aa56 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.DatabasePreemptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.GroupShutdownException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.GroupShutdownException.out new file mode 100644 index 0000000..b5b80af Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.GroupShutdownException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientAcksException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientAcksException.out new file mode 100644 index 0000000..2b0b1a5 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientAcksException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientLogException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientLogException.out new file mode 100644 index 0000000..e75d825 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientLogException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientReplicasException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientReplicasException.out new file mode 100644 index 0000000..59ad88e Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.InsufficientReplicasException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LockPreemptedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LockPreemptedException.out new file mode 100644 index 0000000..6fd324f Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LockPreemptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LogOverwriteException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LogOverwriteException.out new file mode 100644 index 0000000..1737f76 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.LogOverwriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MasterStateException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MasterStateException.out new file mode 100644 index 0000000..a7109cc Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MasterStateException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MemberNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MemberNotFoundException.out new file mode 100644 index 0000000..2484911 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.MemberNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaConsistencyException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaConsistencyException.out new file mode 100644 index 0000000..4e17056 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaConsistencyException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaWriteException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaWriteException.out new file mode 100644 index 0000000..2f4d5f1 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicaWriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out new file mode 100644 index 0000000..ea14703 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackException.out new file mode 100644 index 0000000..20460a7 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackProhibitedException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackProhibitedException.out new file mode 100644 index 0000000..d0bf0a4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.RollbackProhibitedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.UnknownMasterException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.UnknownMasterException.out new file mode 100644 index 0000000..ca35bc3 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.rep.UnknownMasterException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.CursorsExistException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.CursorsExistException.out new file mode 100644 index 0000000..75c52df Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.CursorsExistException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.NodeNotEmptyException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.NodeNotEmptyException.out new file mode 100644 index 0000000..f5cc3ec Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.tree.NodeNotEmptyException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.util.LogVerificationException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.util.LogVerificationException.out new file mode 100644 index 0000000..6af5fdf Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.util.LogVerificationException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.utilint.InternalException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.utilint.InternalException.out new file mode 100644 index 0000000..e842dd3 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.je.utilint.InternalException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.IndexNotAvailableException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.IndexNotAvailableException.out new file mode 100644 index 0000000..7a822af Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.IndexNotAvailableException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreExistsException.out new file mode 100644 index 0000000..6b1f802 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreNotFoundException.out new file mode 100644 index 0000000..441d40b Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.StoreNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.DeletedClassException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.DeletedClassException.out new file mode 100644 index 0000000..91de029 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.DeletedClassException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.IncompatibleClassException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.IncompatibleClassException.out new file mode 100644 index 0000000..f016c95 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.persist.evolve.IncompatibleClassException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.IOExceptionWrapper.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.IOExceptionWrapper.out new file mode 100644 index 0000000..b3ba43d Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.IOExceptionWrapper.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.RuntimeExceptionWrapper.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.RuntimeExceptionWrapper.out new file mode 100644 index 0000000..947964f Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.RuntimeExceptionWrapper.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.keyrange.KeyRangeException.out b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.keyrange.KeyRangeException.out new file mode 100644 index 0000000..8aeb616 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.0.106/com.sleepycat.util.keyrange.KeyRangeException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.BtreeStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.BtreeStats.out new file mode 100644 index 0000000..a491d74 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.BtreeStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.CommitToken.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.CommitToken.out new file mode 100644 index 0000000..d3bc96c Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.CommitToken.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseExistsException.out new file mode 100644 index 0000000..5b93f8d Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseNotFoundException.out new file mode 100644 index 0000000..b3dd70f Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DatabaseNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeadlockException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeadlockException.out new file mode 100644 index 0000000..8ada6a8 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeadlockException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeleteConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeleteConstraintException.out new file mode 100644 index 0000000..929eb02 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DeleteConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DuplicateDataException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DuplicateDataException.out new file mode 100644 index 0000000..5898645 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.DuplicateDataException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentFailureException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentFailureException.out new file mode 100644 index 0000000..88aee91 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentFailureException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentLockedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentLockedException.out new file mode 100644 index 0000000..84ce8f7 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentLockedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentNotFoundException.out new file mode 100644 index 0000000..5c052d5 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentStats.out new file mode 100644 index 0000000..74d2ec0 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.EnvironmentStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ForeignConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ForeignConstraintException.out new file mode 100644 index 0000000..7ffdc89 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ForeignConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotAvailableException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotAvailableException.out new file mode 100644 index 0000000..11abf43 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotAvailableException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotGrantedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotGrantedException.out new file mode 100644 index 0000000..7920604 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockNotGrantedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockStats.out new file mode 100644 index 0000000..8bb7895 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockTimeoutException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockTimeoutException.out new file mode 100644 index 0000000..fb8fb47 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LockTimeoutException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LogWriteException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LogWriteException.out new file mode 100644 index 0000000..fdcb7ac Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.LogWriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStats.out new file mode 100644 index 0000000..caa2aa4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStatus.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStatus.out new file mode 100644 index 0000000..72826ec Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.PreloadStatus.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SecondaryIntegrityException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SecondaryIntegrityException.out new file mode 100644 index 0000000..f0a232c Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SecondaryIntegrityException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceExistsException.out new file mode 100644 index 0000000..36e1df1 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceIntegrityException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceIntegrityException.out new file mode 100644 index 0000000..763b74c Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceIntegrityException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceNotFoundException.out new file mode 100644 index 0000000..eec7990 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceOverflowException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceOverflowException.out new file mode 100644 index 0000000..ccf1224 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceOverflowException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceStats.out new file mode 100644 index 0000000..d4e5f32 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.SequenceStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ThreadInterruptedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ThreadInterruptedException.out new file mode 100644 index 0000000..92b3849 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.ThreadInterruptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats$Active.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats$Active.out new file mode 100644 index 0000000..3b0d1ee Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats$Active.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats.out new file mode 100644 index 0000000..10813fa Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionTimeoutException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionTimeoutException.out new file mode 100644 index 0000000..b60a40b Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.TransactionTimeoutException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.UniqueConstraintException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.UniqueConstraintException.out new file mode 100644 index 0000000..388d038 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.UniqueConstraintException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.VersionMismatchException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.VersionMismatchException.out new file mode 100644 index 0000000..f4b020f Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.VersionMismatchException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.XAFailureException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.XAFailureException.out new file mode 100644 index 0000000..6f83481 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.XAFailureException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.log.ChecksumException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.log.ChecksumException.out new file mode 100644 index 0000000..79aecb5 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.log.ChecksumException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.DatabasePreemptedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.DatabasePreemptedException.out new file mode 100644 index 0000000..b8ff4ad Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.DatabasePreemptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.GroupShutdownException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.GroupShutdownException.out new file mode 100644 index 0000000..2dd0c12 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.GroupShutdownException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientAcksException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientAcksException.out new file mode 100644 index 0000000..5efa7ae Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientAcksException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientLogException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientLogException.out new file mode 100644 index 0000000..223c2ab Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientLogException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientReplicasException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientReplicasException.out new file mode 100644 index 0000000..2a20153 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.InsufficientReplicasException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LockPreemptedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LockPreemptedException.out new file mode 100644 index 0000000..23f983d Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LockPreemptedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LogOverwriteException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LogOverwriteException.out new file mode 100644 index 0000000..dbc7596 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.LogOverwriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterReplicaTransitionException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterReplicaTransitionException.out new file mode 100644 index 0000000..30222fe Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterReplicaTransitionException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterStateException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterStateException.out new file mode 100644 index 0000000..495430a Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MasterStateException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MemberNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MemberNotFoundException.out new file mode 100644 index 0000000..2e9b458 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.MemberNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaConsistencyException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaConsistencyException.out new file mode 100644 index 0000000..a2a47ab Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaConsistencyException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaWriteException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaWriteException.out new file mode 100644 index 0000000..98d0009 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicaWriteException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out new file mode 100644 index 0000000..ea14703 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.ReplicatedEnvironmentStats.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackException.out new file mode 100644 index 0000000..abd7926 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackProhibitedException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackProhibitedException.out new file mode 100644 index 0000000..b013d92 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.RollbackProhibitedException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.UnknownMasterException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.UnknownMasterException.out new file mode 100644 index 0000000..6a3c198 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.UnknownMasterException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.Block.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.Block.out new file mode 100644 index 0000000..c3eb7d1 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.Block.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.LDiffRecordRequestException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.LDiffRecordRequestException.out new file mode 100644 index 0000000..5a4cc8b Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.rep.util.ldiff.LDiffRecordRequestException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.CursorsExistException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.CursorsExistException.out new file mode 100644 index 0000000..15945c4 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.CursorsExistException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.NodeNotEmptyException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.NodeNotEmptyException.out new file mode 100644 index 0000000..a67ff67 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.tree.NodeNotEmptyException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.util.LogVerificationException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.util.LogVerificationException.out new file mode 100644 index 0000000..46ed118 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.util.LogVerificationException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.InternalException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.InternalException.out new file mode 100644 index 0000000..32b595c Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.InternalException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.Timestamp.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.Timestamp.out new file mode 100644 index 0000000..c9fb4d6 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.Timestamp.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.VLSN.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.VLSN.out new file mode 100644 index 0000000..fdc6125 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.je.utilint.VLSN.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.IndexNotAvailableException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.IndexNotAvailableException.out new file mode 100644 index 0000000..accdbe2 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.IndexNotAvailableException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreExistsException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreExistsException.out new file mode 100644 index 0000000..dc9b5aa Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreExistsException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreNotFoundException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreNotFoundException.out new file mode 100644 index 0000000..0cff084 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.StoreNotFoundException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.DeletedClassException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.DeletedClassException.out new file mode 100644 index 0000000..5b0f7c6 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.DeletedClassException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.IncompatibleClassException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.IncompatibleClassException.out new file mode 100644 index 0000000..3d40b18 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.persist.evolve.IncompatibleClassException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.IOExceptionWrapper.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.IOExceptionWrapper.out new file mode 100644 index 0000000..250f210 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.IOExceptionWrapper.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.RuntimeExceptionWrapper.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.RuntimeExceptionWrapper.out new file mode 100644 index 0000000..9e7bfdf Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.RuntimeExceptionWrapper.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.keyrange.KeyRangeException.out b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.keyrange.KeyRangeException.out new file mode 100644 index 0000000..753efb9 Binary files /dev/null and b/test/com/sleepycat/je/serializecompatibility/4.1.6/com.sleepycat.util.keyrange.KeyRangeException.out differ diff --git a/test/com/sleepycat/je/serializecompatibility/SerializeReadObjectsTest.java b/test/com/sleepycat/je/serializecompatibility/SerializeReadObjectsTest.java new file mode 100644 index 0000000..531fe93 --- /dev/null +++ b/test/com/sleepycat/je/serializecompatibility/SerializeReadObjectsTest.java @@ -0,0 +1,124 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.serializecompatibility; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InvalidClassException; +import java.io.ObjectInputStream; +import java.util.Map; + +import org.junit.Test; + +import com.sleepycat.je.JEVersion; +import com.sleepycat.util.test.TestBase; + +/* + * Test whether those serializable classes of prior versions can be read by + * the latest one. + * + * This test is used in conjunction with SerializeWriteObjects, a main program + * which is used to generate the serialized outputs of those serializable + * classes for a JE version. When a new version is to be released, + * run SerializedWriteObjects to generate serialized outputs, and then + * add a test_x_y_z() method to this class. + */ +public class SerializeReadObjectsTest extends TestBase { + + /* Used to identify the two versions is compatible. */ + private boolean serializedSuccess = true; + + /* The directory where serialized files saved. */ + private File outputDir; + + /* The directory where outputDir saved. */ + private static final String parentDir = + "test/com/sleepycat/je/serializecompatibility"; + + /** + * Test whether the latest version is compatible with 4.0.0. + * @throws ClassNotFoundException when the test is enabled + */ + @Test + public void test_4_0_0() + throws ClassNotFoundException, IOException { + + doTest(new JEVersion("4.0.106")); + } + + /** + * Test whether the latest version is compatible with 4.1.0. + * @throws ClassNotFoundException when the test is enabled + */ + @Test + public void test_4_1_0() + throws ClassNotFoundException, IOException { + + doTest(new JEVersion("4.1.6")); + } + + /* + * Read these serialized files and convert it. If it's compatible, it + * won't throw the InvalidClassException; if not, it would throw out the + * exception, serializedSuccess is false. + */ + public void doTest(JEVersion version) + throws ClassNotFoundException, IOException { + + outputDir = new File(parentDir, version.getNumericVersionString()); + if (!outputDir.exists()) { + System.out.println("No such directory, try it again"); + System.exit(1); + } + + try { + ObjectInputStream in; + for (Map.Entry entry : + SerializeUtils.getSerializedSet().entrySet()) { + + /* + * Do the check when the latest version larger than the + * assigned version. + */ + if (JEVersion.CURRENT_VERSION.compareTo(version) >= 0) { + in = new ObjectInputStream + (new FileInputStream + (outputDir.getPath() + + System.getProperty("file.separator") + + entry.getKey() + ".out")); + /* Check that we can read the object successfully. */ + in.readObject(); + in.close(); + } + } + } catch (InvalidClassException e) { + /* Reading serialized output failed.*/ + serializedSuccess = false; + } catch (FileNotFoundException fnfe) { + /* A class doesn't exist in the former version, do nothing. */ + } + + if (serializedSuccess) { + System.out.println("Serialization is compatible"); + } else { + System.out.println("Serialization is not compatible"); + } + + assertTrue(serializedSuccess); + } +} diff --git a/test/com/sleepycat/je/serializecompatibility/SerializeUtils.java b/test/com/sleepycat/je/serializecompatibility/SerializeUtils.java new file mode 100644 index 0000000..62506c2 --- /dev/null +++ b/test/com/sleepycat/je/serializecompatibility/SerializeUtils.java @@ -0,0 +1,362 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.serializecompatibility; + +import java.io.File; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; + +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CommitToken; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseExistsException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DeadlockException; +import com.sleepycat.je.DeleteConstraintException; +import com.sleepycat.je.DuplicateDataException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Durability.ReplicaAckPolicy; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentLockedException; +import com.sleepycat.je.EnvironmentNotFoundException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.ForeignConstraintException; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.LockStats; +import com.sleepycat.je.LockTimeoutException; +import com.sleepycat.je.LogWriteException; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.PreloadStatus; +import com.sleepycat.je.SecondaryIntegrityException; +import com.sleepycat.je.SequenceExistsException; +import com.sleepycat.je.SequenceIntegrityException; +import com.sleepycat.je.SequenceNotFoundException; +import com.sleepycat.je.SequenceOverflowException; +import com.sleepycat.je.SequenceStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionStats; +import com.sleepycat.je.TransactionTimeoutException; +import com.sleepycat.je.UniqueConstraintException; +import com.sleepycat.je.VersionMismatchException; +import com.sleepycat.je.XAFailureException; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.log.ChecksumException; +import com.sleepycat.je.rep.DatabasePreemptedException; +import com.sleepycat.je.rep.GroupShutdownException; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientLogException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.LockPreemptedException; +import com.sleepycat.je.rep.LogOverwriteException; +import com.sleepycat.je.rep.MasterReplicaTransitionException; +import com.sleepycat.je.rep.MasterStateException; +import com.sleepycat.je.rep.MemberNotFoundException; +import com.sleepycat.je.rep.NodeType; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicaConsistencyException; +import com.sleepycat.je.rep.ReplicaWriteException; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.ReplicationNode; +import com.sleepycat.je.rep.RollbackException; +import com.sleepycat.je.rep.RollbackProhibitedException; +import com.sleepycat.je.rep.StateChangeEvent; +import com.sleepycat.je.rep.UnknownMasterException; +import com.sleepycat.je.rep.impl.RepImpl; +import com.sleepycat.je.rep.impl.RepNodeImpl; +import com.sleepycat.je.rep.impl.node.NameIdPair; +import com.sleepycat.je.rep.impl.node.RepNode; +import com.sleepycat.je.rep.impl.node.cbvlsn.CleanerBarrierState; +import com.sleepycat.je.rep.stream.MatchpointSearchResults; +import com.sleepycat.je.rep.txn.MasterTxn; +import com.sleepycat.je.rep.util.ldiff.Block; +import com.sleepycat.je.rep.util.ldiff.LDiffRecordRequestException; +import com.sleepycat.je.tree.CursorsExistException; +import com.sleepycat.je.tree.NodeNotEmptyException; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.LockerFactory; +import com.sleepycat.je.util.LogVerificationException; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.InternalException; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.Timestamp; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.IndexNotAvailableException; +import com.sleepycat.persist.StoreExistsException; +import com.sleepycat.persist.StoreNotFoundException; +import com.sleepycat.persist.evolve.DeletedClassException; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.util.IOExceptionWrapper; +import com.sleepycat.util.RuntimeExceptionWrapper; +import com.sleepycat.util.keyrange.KeyRangeException; +import com.sleepycat.util.test.SharedTestUtils; + +/* + * A utility class that lists all JE classes that support serialization. Any + * class that may be serialized by the application and implements a + * serialVersionUID should be included here. + */ +@SuppressWarnings("deprecation") +public class SerializeUtils { + + /* Create serialized objects. */ + public static Map getSerializedSet() { + + /* Create objects for constructing those exceptions and stats. */ + final StatGroup fakeStats = new StatGroup("SerializeUtils", + "For testing"); + final String message = "test"; + final DatabaseEntry entry = new DatabaseEntry(); + final Logger logger = Logger.getLogger(message); + + /* Get the Environment home, delete log files created in last test. */ + File envHome = SharedTestUtils.getTestDir(); + SharedTestUtils.cleanUpTestDir(envHome); + + /* Create a ReplicatedEnvironment. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + ReplicationConfig repConfig = new ReplicationConfig(); + repConfig.setGroupName(message); + repConfig.setNodeName(message); + repConfig.setNodeHostPort("localhost:5001"); + repConfig.setHelperHosts("localhost:5001"); + + ReplicatedEnvironment repEnv = + new ReplicatedEnvironment(envHome, repConfig, envConfig); + + assert repEnv.getState().isMaster(); + + /* Get the EnvironmentImpl and locker to construct the exceptions. */ + final RepImpl envImpl = RepInternal.getNonNullRepImpl(repEnv); + final Locker locker = + LockerFactory.getInternalReadOperationLocker(envImpl); + final RepNode repNode = envImpl.getRepNode(); + final MasterTxn masterTxn = + new MasterTxn(envImpl, new TransactionConfig(), + envImpl.getNameIdPair()); + masterTxn.commit(Durability.COMMIT_NO_SYNC); + + /* Collection used to save the serialized objects. */ + Map infoSet = new HashMap(); + + /* com.sleepycat.je package. */ + infoSet.put("com.sleepycat.je.BtreeStats", new BtreeStats()); + infoSet.put("com.sleepycat.je.CommitToken", + new CommitToken(new UUID(1, 2), 0)); + infoSet.put("com.sleepycat.je.DatabaseExistsException", + new DatabaseExistsException(message)); + infoSet.put("com.sleepycat.je.DatabaseNotFoundException", + new DatabaseNotFoundException(message)); + infoSet.put("com.sleepycat.je.DeadlockException", + new DeadlockException(locker, message)); + infoSet.put("com.sleepycat.je.DeleteConstraintException", + new DeleteConstraintException + (locker, message, message, entry, entry, 0)); + infoSet.put("com.sleepycat.je.DuplicateDataException", + new DuplicateDataException(message)); + infoSet.put("com.sleepycat.je.EnvironmentFailureException", + new EnvironmentFailureException + (envImpl, EnvironmentFailureReason.ENV_LOCKED)); + infoSet.put("com.sleepycat.je.EnvironmentStats", + new EnvironmentStats()); + infoSet.put("com.sleepycat.je.EnvironmentLockedException", + new EnvironmentLockedException(envImpl, message)); + infoSet.put("com.sleepycat.je.EnvironmentNotFoundException", + new EnvironmentNotFoundException(envImpl, message)); + infoSet.put("com.sleepycat.je.ForeignConstraintException", + new ForeignConstraintException + (locker, message, message, entry, entry, 0)); + infoSet.put("com.sleepycat.je.LockNotAvailableException", + new LockNotAvailableException(locker, message)); + infoSet.put("com.sleepycat.je.LockStats", + new LockStats(fakeStats, fakeStats, fakeStats)); + infoSet.put("com.sleepycat.je.LockTimeoutException", + new LockTimeoutException(locker, message)); + infoSet.put("com.sleepycat.je.LogWriteException", + new LogWriteException(envImpl, message)); + infoSet.put("com.sleepycat.je.PreloadStats", new PreloadStats()); + infoSet.put("com.sleepycat.je.PreloadStatus", + new PreloadStatus(message)); + infoSet.put("com.sleepycat.je.SecondaryIntegrityException", + new SecondaryIntegrityException + (null, locker, message, message, entry, entry, 0)); + infoSet.put("com.sleepycat.je.SequenceExistsException", + new SequenceExistsException(message)); + infoSet.put("com.sleepycat.je.SequenceIntegrityException", + new SequenceIntegrityException(message)); + infoSet.put("com.sleepycat.je.SequenceNotFoundException", + new SequenceNotFoundException(message)); + infoSet.put("com.sleepycat.je.SequenceOverflowException", + new SequenceOverflowException(message)); + infoSet.put("com.sleepycat.je.SequenceStats", + new SequenceStats(fakeStats)); + infoSet.put("com.sleepycat.je.ThreadInterruptedException", + new ThreadInterruptedException(envImpl, new Exception())); + infoSet.put("com.sleepycat.je.TransactionStats", + new TransactionStats(fakeStats)); + infoSet.put("com.sleepycat.je.TransactionStats$Active", + new TransactionStats.Active(message, 0, 0)); + infoSet.put("com.sleepycat.je.TransactionTimeoutException", + new TransactionTimeoutException(locker, message)); + infoSet.put("com.sleepycat.je.UniqueConstraintException", + new UniqueConstraintException + (locker, message, message, entry, entry, 0)); + infoSet.put("com.sleepycat.je.VersionMismatchException", + new VersionMismatchException(envImpl, message)); + infoSet.put("com.sleepycat.je.XAFailureException", + new XAFailureException(locker)); + + /* + * com.sleepycat.je.jca.ra package. + * And because these classes need j2ee.jar to compile, but we currently + * don't have it in CVS, so ignore them now. + */ + /****** + infoSet.put("com.sleepycat.je.jca.ra.JEConnectionFactoryImpl", + new JEConnectionFactoryImpl(null, null)); + infoSet.put("com.sleepycat.je.jca.ra.JEException", + new JEException("test")); + infoSet.put("com.sleepycat.je.jca.ra.JEManagedConnectionFactory", + new JEManagedConnectionFactory()); + ******/ + + /* com.sleepycat.je.log package. */ + infoSet.put("com.sleepycat.je.log.ChecksumException", + new ChecksumException("test")); + + /* com.sleepycat.je.rep package. */ + infoSet.put("com.sleepycat.je.rep.DatabasePreemptedException", + new DatabasePreemptedException(message, message, null)); + infoSet.put("com.sleepycat.je.rep.GroupShutdownException", + new GroupShutdownException(logger, repNode, 0)); + infoSet.put("com.sleepycat.je.rep.InsufficientAcksException", + new InsufficientAcksException(masterTxn, -1, 0, message)); + final NameIdPair nid = new NameIdPair("foo"); + final CleanerBarrierState barrierState = + new CleanerBarrierState(VLSN.FIRST_VLSN, 0l); + ReplicationNode rn = + new RepNodeImpl(nid, NodeType.ELECTABLE, false, false, + "h", 5000, + barrierState, + 0, + JEVersion.CURRENT_VERSION); + infoSet.put("com.sleepycat.je.rep.InsufficientLogException", + new InsufficientLogException(envImpl.getRepNode(), + Collections.singleton(rn))); + infoSet.put + ("com.sleepycat.je.rep.InsufficientReplicasException", + new InsufficientReplicasException + (locker, ReplicaAckPolicy.NONE, -1, new HashSet())); + infoSet.put("com.sleepycat.je.rep.LockPreemptedException", + new LockPreemptedException(locker, new Exception())); + infoSet.put("com.sleepycat.je.rep.LogOverwriteException", + new LogOverwriteException(message)); + infoSet.put("com.sleepycat.je.rep.MasterStateException", + new MasterStateException(message)); + infoSet.put("com.sleepycat.je.rep.MemberNotFoundException", + new MemberNotFoundException("")); + infoSet.put("com.sleepycat.je.rep.MasterReplicaTransitionException", + new MasterReplicaTransitionException + (envImpl, new Exception("test"))); + infoSet.put("com.sleepycat.je.rep.ReplicaConsistencyException", + new ReplicaConsistencyException(message, null)); + final StateChangeEvent stateChangeEvent = + new StateChangeEvent(State.MASTER, nid); + infoSet.put("com.sleepycat.je.rep.ReplicaWriteException", + new ReplicaWriteException(locker, stateChangeEvent)); + infoSet.put + ("com.sleepycat.je.rep.ReplicatedEnvironmentStats", + RepInternal.makeReplicatedEnvironmentStats + (envImpl, new StatsConfig())); + infoSet.put("com.sleepycat.je.rep.RollbackException", + new RollbackException + (envImpl, new VLSN(1), + new MatchpointSearchResults(envImpl))); + infoSet.put("com.sleepycat.je.rep.RollbackProhibitedException", + new RollbackProhibitedException + (envImpl, 0, false, new VLSN(1), + new MatchpointSearchResults(envImpl))); + infoSet.put("com.sleepycat.je.rep.UnknownMasterException", + new UnknownMasterException(message)); + + /* com.sleepycat.je.rep.util.ldiff package. */ + infoSet.put + ("com.sleepycat.je.rep.util.ldiff.LDiffRecordRequestException", + new LDiffRecordRequestException("test")); + infoSet.put("com.sleepycat.je.rep.util.ldiff.Block", + new Block(1)); + + /* com.sleepycat.je.tree package. */ + infoSet.put("com.sleepycat.je.tree.CursorsExistException", + new CursorsExistException()); + infoSet.put("com.sleepycat.je.tree.NodeNotEmptyException", + new NodeNotEmptyException()); + + /* com.sleepycat.je.util package. */ + infoSet.put("com.sleepycat.je.util.LogVerificationException", + new LogVerificationException(message)); + + /* com.sleepycat.je.utilint package. */ + infoSet.put("com.sleepycat.je.utilint.InternalException", + new InternalException()); + infoSet.put("com.sleepycat.je.utilint.VLSN", + new VLSN(1)); + infoSet.put("com.sleepycat.je.utilint.Timestamp", + new Timestamp(1)); + + /* com.sleepycat.persist package. */ + infoSet.put("com.sleepycat.persist.IndexNotAvailableException", + new IndexNotAvailableException(message)); + infoSet.put("com.sleepycat.persist.StoreExistsException", + new StoreExistsException(message)); + infoSet.put("com.sleepycat.persist.StoreNotFoundException", + new StoreNotFoundException(message)); + + /* com.sleepycat.persist.evolve package. */ + infoSet.put("com.sleepycat.persist.evolve.DeletedClassException", + new DeletedClassException(message)); + infoSet.put("com.sleepycat.persist.evolve.IncompatibleClassException", + new IncompatibleClassException(message)); + + /* com.sleepycat.util package. */ + infoSet.put("com.sleepycat.util.IOExceptionWrapper", + new IOExceptionWrapper(new Throwable())); + infoSet.put("com.sleepycat.util.RuntimeExceptionWrapper", + new RuntimeExceptionWrapper(new Throwable())); + + /* com.sleepycat.util.keyrange package. */ + infoSet.put("com.sleepycat.util.keyrange.KeyRangeException", + new KeyRangeException(message)); + + /* Release the locker, close Environment and delete log files. */ + locker.operationEnd(true); + //masterTxn.abort(); + repEnv.close(); + TestUtils.removeLogFiles("TearDown", envHome, false); + + return infoSet; + } +} diff --git a/test/com/sleepycat/je/serializecompatibility/SerializeWriteObjects.java b/test/com/sleepycat/je/serializecompatibility/SerializeWriteObjects.java new file mode 100644 index 0000000..94915b2 --- /dev/null +++ b/test/com/sleepycat/je/serializecompatibility/SerializeWriteObjects.java @@ -0,0 +1,93 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.serializecompatibility; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.util.Map; + +import com.sleepycat.je.JEVersion; + +public class SerializeWriteObjects { + private File outputDir; + + public SerializeWriteObjects(String dirName) { + outputDir = new File(dirName); + } + + /* Delete the existed directory for serialized outputs. */ + private void deleteExistDir(File fileDir) { + if (!fileDir.exists()) + return; + if (fileDir.isFile()) { + fileDir.delete(); + return; + } + + File[] files = fileDir.listFiles(); + for (int i = 0; i < files.length; i++) + deleteExistDir(files[i]); + + fileDir.delete(); + } + + /* + * If the directory doesn't exist, create a new one; + * Or delete it and make a fresh one. + */ + private void createHome() { + if (outputDir.exists()) { + deleteExistDir(outputDir); + } + + outputDir.mkdirs(); + } + + /* + * Generate a directory of .out files representing the serialized versions + * of all serializable classes for this JE version. The directory will be + * named with JE version number, and each file will be named + * .out. These files will be used by SerializedReadObjectsTest. + */ + public void writeObjects() + throws IOException { + + createHome(); + ObjectOutputStream out; + for (Map.Entry entry : + SerializeUtils.getSerializedSet().entrySet()) { + out = new ObjectOutputStream + (new FileOutputStream + (outputDir.getPath() + System.getProperty("file.separator") + + entry.getValue().getClass().getName() + ".out")); + out.writeObject(entry.getValue()); + out.close(); + } + } + + /* + * When do the test, it will create a sub process to run this main program + * to call the writeObjects() method to generate serialized outputs. + */ + public static void main(String args[]) + throws IOException { + + String dirName = args[0] + System.getProperty("file.separator") + + JEVersion.CURRENT_VERSION.toString(); + SerializeWriteObjects writeTest = new SerializeWriteObjects(dirName); + writeTest.writeObjects(); + } +} diff --git a/test/com/sleepycat/je/statcap/StatFile.java b/test/com/sleepycat/je/statcap/StatFile.java new file mode 100644 index 0000000..24a2de4 --- /dev/null +++ b/test/com/sleepycat/je/statcap/StatFile.java @@ -0,0 +1,210 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.statcap; + +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileFilter; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.utilint.StatCaptureRepDefinitions; +import com.sleepycat.je.utilint.Stat; +import com.sleepycat.je.utilint.StatDefinition; +import com.sleepycat.je.utilint.StatDefinition.StatType; +import com.sleepycat.je.utilint.StatGroup; + +public class StatFile { + + static final StatCaptureDefinitions csd = new StatCaptureDefinitions(); + static final StatCaptureRepDefinitions rcsd = + new StatCaptureRepDefinitions(); + + public static SortedMap sumItUp(File dir, String fileprefix) + throws IOException { + + Map isIncMap = setIncrementalMap(); + FindFile ff = new FindFile(fileprefix); + File[] statfiles = dir.listFiles(ff); + SortedMap sortedFiles = new TreeMap(); + for (File statfile : statfiles) { + sortedFiles.put(statfile.getName(), statfile); + } + + SortedMap retmap = new TreeMap(); + for (Entry fe : sortedFiles.entrySet()) { + Map datamap = sumItUp(fe.getValue()); + for (Entry e : datamap.entrySet()) { + Long oldval = retmap.get(e.getKey()); + if (oldval != null && getValue(isIncMap.get(e.getKey()))) { + retmap.put(e.getKey(), + new Long(oldval.longValue() + + e.getValue().longValue())); + } + else { + retmap.put(e.getKey(), e.getValue()); + } + } + } + return retmap; + } + + public static SortedMap sumItUp(BufferedReader input) + throws IOException { + + try { + SortedMap retmap = new TreeMap(); + String[] header = input.readLine().split(","); + boolean[] isIncremental = new boolean[header.length]; + for (int i = 0; i < header.length; i++) { + retmap.put(header[i], Long.valueOf(0)); + StatDefinition sd = csd.getDefinition(header[i]); + if (sd == null) { + sd = rcsd.getDefinition(header[i]); + } + + if (sd == null) { + /* could be custom or java stats */ + isIncremental[i] = false; + } else { + isIncremental[i] = + (sd.getType() == StatType.INCREMENTAL) ? true : false; + } + } + + String row = null; + while ( (row = input.readLine()) != null) { + String[] val = row.split(","); + assertTrue("header and row mismatch columns header " + + header.length + " body columns " + val.length, + val.length == header.length); + for (int i = 0; i= 0); + rv += retmap.get(header[i]); + } + retmap.put(header[i], rv); + } catch (NumberFormatException e) { + /* ignore this row may not be numeric */ + } + } + } + return retmap; + } finally { + if (input != null) { + input.close(); + } + } + } + + public static SortedMap sumItUp(File sf) throws IOException { + + try { + if (!sf.exists()) { + return new TreeMap(); + } + return sumItUp(new BufferedReader(new FileReader(sf))); + } catch (FileNotFoundException e) { + throw EnvironmentFailureException.unexpectedState( + "Unexpected Exception accessing file " + + sf.getAbsolutePath() + e.getMessage()); + } + } + + public static SortedMap getMap(Collection csg) { + + TreeMap statsMap = new TreeMap(); + for (StatGroup sg : csg) { + for (Entry> e : + sg.getStats().entrySet()) { + String mapName = + (sg.getName() + ":" + + e.getKey().getName()).intern(); + Object val = e.getValue().get(); + /* get stats back as strings. */ + if (val instanceof Number) { + statsMap.put(mapName, + ((Number) val).longValue()); + } + } + } + return statsMap; + } + + public static SortedMap> + getNameValueMap(Collection csg) { + + TreeMap> statsMap = new TreeMap>(); + for (StatGroup sg : csg) { + for (Entry> e : sg.getStats().entrySet()) { + String mapName = + (sg.getName() + ":" + e.getKey().getName()).intern(); + statsMap.put(mapName, e.getValue()); + } + } + return statsMap; + } + + private static Map setIncrementalMap() { + SortedSet projections = rcsd.getStatisticProjections(); + Map retmap = new HashMap(); + for (String name : projections) { + StatDefinition sd = csd.getDefinition(name); + if (sd == null) { + sd = rcsd.getDefinition(name); + } + boolean isIncremental = false; + if (sd != null && + sd.getType() == StatType.INCREMENTAL) { + isIncremental = true; + } + retmap.put(name, isIncremental); + } + return retmap; + } + + private static boolean getValue(Boolean b) { + return (b == null) ? false : b; + } + + static class FindFile implements FileFilter { + + String fileprefix; + FindFile(String fileprefix) { + this.fileprefix = fileprefix; + } + + @Override + public boolean accept(File f) { + return f.getName().startsWith(fileprefix); + } + } +} diff --git a/test/com/sleepycat/je/test/AtomicPutTest.java b/test/com/sleepycat/je/test/AtomicPutTest.java new file mode 100644 index 0000000..310d240 --- /dev/null +++ b/test/com/sleepycat/je/test/AtomicPutTest.java @@ -0,0 +1,325 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; + +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.junit.JUnitMethodThread; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Tests put() (overwrite) and putNoOverwrite() to check that they work + * atomically under concurrent access. These tests were added after put() + * and putNoOverwrite() were changed to work atomically. The history of the + * bugs is below. + * + * Old Algorithm + * ------------- + * put(X, Y): + * if duplicates: + * return insertDup(X, Y) + * else: + * search(X) + * if SUCCESS: + * putCurrent(Y) + * return SUCCESS + * else: + * return insert(X,Y) + * + * putNoOverwrite(X, Y): + * search(X) + * if SUCCESS: + * return KEYEXIST + * else: + * if duplicates: + * insertDup(X, Y) + * else: + * insert(X, Y) + * + * Bug #1: In put with duplicates: Returned KEYEXIST when trying to overwrite + * a duplicate duplicate. + * + * Bug #2: In put without duplicates: Returned KEYEXIST if another thread + * inserted in between a search that returned NOTFOUND and the insert(). + * + * Bug #3: In putNoOverwrite with duplicates: Added a duplicate if another + * thread inserted in between a search that returned NOTFOUND and the + * insert(). + * + * New Algorithm + * ------------- + * put(X, Y): + * if duplicates: + * insertDup(X, Y) + * else: + * insert(X, Y) + * if KEYEXIST: + * putCurrent(Y) + * return SUCCESS + * + * putNoOverwrite(X, Y): + * return insert(X, Y) + * + * Potential Bug #4: In put, if the lock is not acquired: Another thread may + * overwrite in between the insert and the putCurrent. But then putCurrent + * wouldn't be able to get a write lock, right? I can't think of how a + * problem could occur. + + * Potential Bug #5: In putNoOverwrite, if we need to lock an existing record + * in order to return KEYEXIST, we may cause more deadlocks than is necessary. + * + * Low level operations + * -------------------- + * insert(X, Y): insert if key is not present, else return KEYEXIST + * insertDup(X, Y): insert if key and data are not present, else return + * KEYEXIST + * + * Both insert methods obtain a lock on the existing record when returning + * KEYEXIST, to support overwrite. + */ +@RunWith(Parameterized.class) +public class AtomicPutTest extends TxnTestCase { + + private static final int MAX_KEY = 400; //50000; + + private int nextKey; + private Database db; + + @Parameters + public static List genParams() { + return getTxnParams(new String[] {TxnTestCase.TXN_USER}, false); + } + + public AtomicPutTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + /** + * Closes databases, then calls the super.tearDown to close the env. + */ + @After + public void tearDown() + throws Exception { + + if (db != null) { + try { + db.close(); + } catch (Exception e) {} + db = null; + } + super.tearDown(); + } + + /** + * Tests that put (overwrite), with no duplicates allowed, never causes a + * KEYEXIST status return. + */ + @Test + public void testOverwriteNoDuplicates() + throws Throwable { + + String method = "runOverwriteNoDuplicates"; + JUnitMethodThread tester1 = new JUnitMethodThread(method + "-t1", + method, this); + JUnitMethodThread tester2 = new JUnitMethodThread(method + "-t2", + method, this); + db = openDb("foo", false); + tester1.start(); + tester2.start(); + finishTests(new JUnitThread[] { tester1, tester2 }); + db.close(); + db = null; + } + + /** + * The old put() implementation first did a search, then inserted if + * NOTFOUND was returned by the search. This test tries to create the + * situation where one thread does a search on a key that returns NOTFOUND + * and another thread immediately afterwards inserts the same key, before + * the first thread has a chance to start the insert. Before the fix to + * make put() atomic, the first thread would have returned KEYEXIST from + * put(), and that should never happen. + */ + public void runOverwriteNoDuplicates() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (nextKey < MAX_KEY) { + + /* + * Attempt to insert the same key as was just inserted by the other + * thread. We need to keep incrementing the key, since the error + * only occurs for a non-existing key value. + */ + int val = nextKey++ / 2; + Transaction txn = txnBegin(); + key.setData(TestUtils.getTestArray(val)); + data.setData(TestUtils.getTestArray(val)); + boolean commit = true; + try { + OperationStatus status = db.put(txn, key, data); + assertEquals("Key=" + val, OperationStatus.SUCCESS, status); + } catch (LockConflictException e) { + commit = false; + } + if (commit) { + txnCommit(txn); + } else { + txnAbort(txn); + } + } + } + + /** + * Tests that putNoOverwrite, with duplicates allowed, never inserts a + * duplicate. + */ + @Test + public void testNoOverwriteWithDuplicates() + throws Throwable { + + String method = "runNoOverwriteWithDuplicates"; + JUnitMethodThread tester1 = new JUnitMethodThread(method + "-t1", + method, this); + JUnitMethodThread tester2 = new JUnitMethodThread(method + "-t2", + method, this); + db = openDb("foo", true); + tester1.start(); + tester2.start(); + finishTests(new JUnitThread[] { tester1, tester2 }); + db.close(); + db = null; + } + + /** + * The old putNoOverwrite() inserted a duplicate after a search returned + * NOTFOUND, when duplicates were configured. This test tries to create + * the situation where the second thread inserting with a given key inserts + * a duplicate, which should never happen since we're using + * putNoOverwrite(). + */ + public void runNoOverwriteWithDuplicates() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (nextKey < MAX_KEY) { + + /* + * Attempt to insert a duplicate for the same key as was just + * inserted by the other thread. Each thread uses a different data + * value (modulo 2) so to avoid a duplicate-duplicate, which would + * not be inserted. + */ + int val = nextKey++; + int keyVal = val / 2; + int dataVal = val % 2; + key.setData(TestUtils.getTestArray(keyVal)); + data.setData(TestUtils.getTestArray(dataVal)); + while (true) { + Transaction txn = txnBegin(); + boolean commit = true; + try { + db.putNoOverwrite(txn, key, data); + } catch (LockConflictException e) { + commit = false; + } + if (commit) { + txnCommit(txn); + break; + } else { + txnAbort(txn); + } + } + + Transaction txn = txnBegin(); + Cursor cursor = db.openCursor(txn, null); + try { + OperationStatus status = cursor.getSearchKey(key, data, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, cursor.count()); + status = cursor.getNextDup(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + } finally { + cursor.close(); + txnCommit(txn); + } + } + } + + /** + * Opens a database. + */ + private Database openDb(String name, boolean dups) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } + + /** + * When one thread throws an assertion, the other threads need to be + * stopped, otherwise we will see side effects that mask the real problem. + */ + private void finishTests(JUnitThread[] threads) + throws Throwable { + + Throwable ex = null; + for (int i = 0; i < threads.length; i += 1) { + try { + threads[i].finishTest(); + } catch (Throwable e) { + if (ex == null) { + ex = e; + } + } + } + if (ex != null) { + throw ex; + } + } +} diff --git a/test/com/sleepycat/je/test/DeferredWriteTest.java b/test/com/sleepycat/je/test/DeferredWriteTest.java new file mode 100644 index 0000000..e68f076 --- /dev/null +++ b/test/com/sleepycat/je/test/DeferredWriteTest.java @@ -0,0 +1,1718 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.cleaner.VerifyUtils; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class DeferredWriteTest extends TestBase { + + private static boolean DEBUG = false; + private static String DBNAME = "foo"; + private static String DBNAME2 = "foo2"; + + private static DatabaseEntry MAIN_KEY_FOR_DUPS = + new DatabaseEntry(new byte[10]); + + private static final CheckpointConfig CHECKPOINT_FORCE_CONFIG = + new CheckpointConfig(); + + static { + CHECKPOINT_FORCE_CONFIG.setForce(true); + } + + private static final StatsConfig STATS_CLEAR_CONFIG = new StatsConfig(); + + static { + STATS_CLEAR_CONFIG.setClear(true); + } + + private final File envHome; + private Environment env; + + private boolean truncateOrRemoveDone; + private boolean dups; + private boolean embeddedLNs; + private JUnitThread junitThread; + + @Parameters + public static List genParams() { + + return Arrays.asList(new Object[][]{{true}, {false}}); + } + + public DeferredWriteTest(boolean dup) { + dups = dup; + embeddedLNs = false; + customName = dups ? "Duplicate" : "NoDuplicate"; + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.err.println("TearDown: " + e); + } + } + env = null; + } + + private EnvironmentConfig getEnvConfig(boolean transactional) { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(transactional); + envConfig.setAllowCreate(true); + + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + + envConfig.setConfigParam( + EnvironmentParams.NODE_MAX_DUPTREE.getName(), "4"); + + /* Force correct LN obsolete size calculation during recovery. */ + envConfig.setConfigParam( + EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE.getName(), "true"); + + return envConfig; + } + + private void closeEnv(boolean normalClose) + throws DatabaseException { + + closeEnv(normalClose, + true /*expectAccurateObsoleteLNCount*/, + true /*expectAccurateDbUtilization*/); + } + + /** + * @param expectAccurateObsoleteLNCount should be false only when an LN + * cannot be counted obsolete during recovery as explained in + * RecoveryManager.redoUtilizationInfo. + * + * @param expectAccurateDbUtilization should be false only when DB info is + * not accurate because INs are evicted and then recovered without a + * checkpoint. The provisional INs are counted obsolete by recovery in the + * per-DB info because the MapLN is not flushed, but not in the per-file + * info because the FileSummaryLNs are flushed by eviction. + */ + private void closeEnv(boolean normalClose, + boolean expectAccurateObsoleteLNCount, + boolean expectAccurateDbUtilization) + throws DatabaseException { + + if (env != null) { + + /* Stop daemons first to stop utilization from changing. */ + DbInternal.getNonNullEnvImpl(env).shutdownDaemons(); + + /* + * We pass expectAccurateDbUtilization as false when + * truncateOrRemoveDone, because the database utilization info for + * that database is now gone. + */ + VerifyUtils.verifyUtilization + (DbInternal.getNonNullEnvImpl(env), + expectAccurateObsoleteLNCount, + true, // expectAccurateObsoleteLNSize, + expectAccurateDbUtilization && + !truncateOrRemoveDone); // expectAccurateDbUtilization + + if (normalClose) { + env.close(); + } else { + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + } + env = null; + } + } + + private Database createDb(boolean deferredWrite) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(deferredWrite); + dbConfig.setSortedDuplicates(dups); + + Database db = env.openDatabase(null, DBNAME, dbConfig); + + assertEquals + (deferredWrite, + DbInternal.getDbImpl(db).isDurableDeferredWrite()); + assertEquals + (deferredWrite, + DbInternal.getDbImpl(db).isDeferredWriteMode()); + assertEquals + (false, + DbInternal.getDbImpl(db).isTemporary()); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + DatabaseEntry entry = new DatabaseEntry(); + IntegerBinding.intToEntry(Integer.MAX_VALUE, entry); + + embeddedLNs = (!dups && envImpl.getMaxEmbeddedLN() >= entry.getSize()); + return db; + } + + private Database createTempDb() + throws DatabaseException { + + return createTempDb(DBNAME); + } + + private Database createTempDb(String dbName) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTemporary(true); + dbConfig.setSortedDuplicates(dups); + + Database db = env.openDatabase(null, dbName, dbConfig); + + assertEquals + (false, + DbInternal.getDbImpl(db).isDurableDeferredWrite()); + assertEquals + (true, + DbInternal.getDbImpl(db).isDeferredWriteMode()); + assertEquals + (true, + DbInternal.getDbImpl(db).isTemporary()); + + return db; + } + + /** + * Check that all INs are removed from the INList for a DB that is removed + * before it is sync'ed (or checkpointed). Before the bug fix, INs were + * not removed if the DB root IN was never logged (was still null). This + * caused a DatabaseException when evicting, because the evictor expects no + * INs for deleted DBs on the INList. + */ + @Test + public void testRemoveNonPersistentDbSR15317() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + /* Disable compressor for test predictability. */ + envConfig.setConfigParam("je.env.runINCompressor", "false"); + env = new Environment(envHome, envConfig); + Database db = createDb(true); + /* Insert some data to cause eviction later. */ + insert(db, + null, // txn + 1, // start + 30000, // end + new HashSet(), // expected + false); // useRandom + db.close(); + env.removeDatabase(null, DBNAME); + truncateOrRemoveDone = true; + + envConfig = env.getConfig(); + /* Switch to a small cache to force eviction. */ + envConfig.setCacheSize(96 * 1024); + env.setMutableConfig(envConfig); + for (int i = 0; i < 10; i += 1) { + env.evictMemory(); + } + closeEnv(true /*normalClose*/); + } + + @Test + public void testEmptyDatabaseSR14744() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + env = new Environment(envHome, envConfig); + Database db = createDb(true); + db.sync(); + db.close(); + env.sync(); + closeEnv(true /*normalClose*/); + } + + /** + * Check that deferred write db re-opens at expected state. + */ + @Test + public void testCloseOpen() + throws Throwable { + + HashSet expectedSet = + doCloseOpen(true, /* useDeferredWrites */ + true, /* doSync */ + 1, /* starting value */ + new HashSet()); /* initial ExpectedSet */ + expectedSet = + doCloseOpen(false, /* useDeferredWrites */ + true, /* doSync */ + 100, /* starting value */ + expectedSet); + expectedSet = + doCloseOpen(true, /* useDeferredWrites */ + true, /* doSync */ + 200, /* starting value */ + expectedSet); + } + + /** + * Check that after crashing without a close/sync/checkpoint, a deferred + * write DB does not contain the unflushed data. + */ + @Test + public void testCloseOpenNoSync() + throws Throwable { + + HashSet expectedSet = + doCloseOpen(true, /* useDeferredWrites */ + false, /* doSync */ + 1, /* starting value */ + new HashSet()); /* initial ExpectedSet */ + expectedSet = + doCloseOpen(true, /* useDeferredWrites */ + false, /* doSync */ + 100, /* starting value */ + expectedSet); + } + + /** + * Check that deferred write and durable databases re-open at expected + * state. + */ + private HashSet doCloseOpen(boolean useDeferredWrite, + boolean doSync, + int startingValue, + HashSet initialSet) + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + env = new Environment(envHome, envConfig); + Database db = createDb(useDeferredWrite); + + /* We'll do inserts in two batches. */ + HashSet expectedBatch1 = new HashSet(); + expectedBatch1.addAll(initialSet); + HashSet expectedBatch2 = new HashSet(); + HashSet finalExpectedSet = null; + + int batch1Size = 40; + int batch2Size = 50; + + /* + * Insert non-random values in two batches. Don't use random inserts in + * order to be sure we have a set of non-conflicting values for the + * test. + */ + insert(db, null, startingValue, startingValue + batch1Size, + expectedBatch1, false); + checkExactContentMatch(db, expectedBatch1); + if (useDeferredWrite) { + db.sync(); + } + + /* Insert a second batch */ + insert(db, null, + startingValue + batch1Size, + startingValue + batch1Size + batch2Size, + expectedBatch2, false); + expectedBatch2.addAll(expectedBatch1); + checkExactContentMatch(db, expectedBatch2); + + /* Close/reopen, database should hold the expectedBatch2 set. */ + if (doSync) { + db.close(); + db = createDb(useDeferredWrite); + checkExactContentMatch(db, expectedBatch2); + } + + /* + * Recover the environment. batch2 changes should show up even if the + * db was deferred write, because a sync is done when the database is + * closed. batch2 changes should NOT show up only when doSync is + * false and deferred write is used. + * + * If a flush of INs occured followed by an abnormal close and + * recovery, obsolete LNs will not always be counted correctly. + */ + closeEnv(false /*normalClose*/, + false /*expectAccurateObsoleteLNCount*/, + true /*expectAccurateDbUtilization*/); + env = new Environment(envHome, envConfig); + + db = createDb(useDeferredWrite); + + finalExpectedSet = (useDeferredWrite && !doSync) ? + expectedBatch1 : expectedBatch2; + + checkExactContentMatch(db, finalExpectedSet); + db.close(); + env.sync(); + + /* + */ + closeEnv(true /*normalClose*/, + false /*expectAccurateObsoleteLNCount*/, + true /*expectAccurateDbUtilization*/); + + return finalExpectedSet; + } + + /** + * Test that a checkpoint syncs a durable deferred-write DB. + */ + @Test + public void testCheckpoint() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + + Database db = createDb(true); + HashSet expected = insertAndCheck(db); + + env.checkpoint(CHECKPOINT_FORCE_CONFIG); + closeEnv(false /*normalClose*/); + env = new Environment(envHome, envConfig); + + db = createDb(true); + checkExactContentMatch(db, expected); + db.close(); + + closeEnv(true /*normalClose*/); + } + + /** + * Test that a checkpoint does not sync a temp DB. + */ + @Test + public void testCheckpointTemp() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + + Database db = createTempDb(); + env.sync(); + EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG); + + insertAndCheck(db); + + env.sync(); + stats = env.getStats(STATS_CLEAR_CONFIG); + + /* With a non-temp DB, more than 30 BINs are flushed. */ + assertTrue(String.valueOf(stats.getNFullBINFlush()), + stats.getNFullBINFlush() <= 2); + assertTrue(String.valueOf(stats.getNFullINFlush()), + stats.getNFullINFlush() <= 4); + assertTrue(String.valueOf(stats.getNDeltaINFlush()), + stats.getNDeltaINFlush() <= 2); + + db.close(); + closeEnv(true /*normalClose*/); + } + + /** + * Check that temp db works in deferred write mode. + */ + @Test + public void testTempIsDeferredWriteMode() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + Database db = createTempDb(); + + long origEndOfLog = DbInternal.getNonNullEnvImpl(env) + .getFileManager() + .getNextLsn(); + + insertAndCheck(db); + + long endOfLog = DbInternal.getNonNullEnvImpl(env) + .getFileManager() + .getNextLsn(); + + /* Check that no writing occurred after inserts. */ + assertEquals("origEndOfLog=" + DbLsn.getNoFormatString(origEndOfLog) + + " endOfLog=" + DbLsn.getNoFormatString(endOfLog), + origEndOfLog, endOfLog); + + db.close(); + closeEnv(true /*normalClose*/); + } + + /** + * Check that temp db is removed on close and by recovery. + */ + @Test + public void testTempRemoval() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + + /* Create DB and close() to remove it. */ + Database db = createTempDb(DBNAME); + insertAndCheck(db); + assertTrue(env.getDatabaseNames().contains(DBNAME)); + db.close(); + assertTrue(!env.getDatabaseNames().contains(DBNAME)); + + /* + * Create multiple DBs and run recovery to remove them. Recovery keeps + * a set of temp DBs, and we want to make sure it removes all of them. + */ + db = createTempDb(DBNAME); + Database db2 = createTempDb(DBNAME2); + insertAndCheck(db); + insertAndCheck(db2); + assertTrue(env.getDatabaseNames().contains(DBNAME)); + assertTrue(env.getDatabaseNames().contains(DBNAME2)); + closeEnv(false /*normalClose*/); + env = new Environment(envHome, envConfig); + assertTrue(!env.getDatabaseNames().contains(DBNAME)); + assertTrue(!env.getDatabaseNames().contains(DBNAME2)); + + /* + * Test that recovery deletes a temp DB after several checkpoints. + * This test requires that the MapLN for every open temp DB is logged + * during each checkpoint interval. + */ + db = createTempDb(DBNAME); + insertAndCheck(db); + assertTrue(env.getDatabaseNames().contains(DBNAME)); + env.sync(); + env.sync(); + env.sync(); + closeEnv(false /*normalClose*/); + env = new Environment(envHome, envConfig); + assertTrue(!env.getDatabaseNames().contains(DBNAME)); + + closeEnv(true /*normalClose*/); + } + + @Test + public void testTempEvictionAndObsoleteCounting() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "128"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX_DUPTREE.getName(), "128"); + /* Use a small cache to cause eviction. */ + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + envConfig.setConfigParam("je.env.runCleaner", "false"); + envConfig.setConfigParam("je.env.runCheckpointer", "false"); + envConfig.setConfigParam("je.env.runINCompressor", "false"); + envConfig.setConfigParam("je.env.runEvictor", "false"); + + env = new Environment(envHome, envConfig); + + /* Create DB and insert until 1000 INs are evicted. */ + Database db = createTempDb(DBNAME); + int start; + for (start = 1;; start += 1000) { + insert(db, + null, // txn + start, // start + start + 1000, // end + new HashSet(), // expected + true); // useRandom + + EnvironmentStats stats = env.getStats(null); + if (stats.getNNodesExplicitlyEvicted() > 1000) { + break; + } + } + + /* + * Update all records twice, to cause eviction and log multiple + * versions of the INs. + */ + int lastStart = start; + for (start = 1; start <= lastStart; start += 1000) { + update(db, + null, // txn + start, // start + start + 1000); // end + } + for (start = 1; start < lastStart; start += 1000) { + update(db, + null, // txn + start, // start + start + 1000); // end + } + + assertTrue(DbInternal.getNonNullEnvImpl(env). + getUtilizationProfile(). + getFileSummaryMap(true). + get(0L). + obsoleteINCount > 1000); + + db.close(); + closeEnv(true /*normalClose*/, + true /*expectAccurateObsoleteLNCount*/, + false /*expectAccurateDbUtilization*/); + } + + private HashSet insertAndCheck(Database db) + throws DatabaseException { + + HashSet expected = new HashSet(); + insert(db, null, 1, 100, expected, false); + checkExactContentMatch(db, expected); + return expected; + } + + @Test + public void testRecoverNoSync() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + doRecover(envConfig, + 30, /* numRecords */ + false, /* syncBeforeRecovery. */ + false); /* expectEviction */ + } + + @Test + public void testRecoverSync() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + doRecover(envConfig, + 30, /* numRecords */ + true, /* syncBeforeRecovery. */ + false); /* expectEviction */ + } + + @Test + public void testRecoverNoSyncEvict() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + doRecover(envConfig, + 3000, /* numRecords */ + false, /* syncBeforeRecovery. */ + true); /* expectEviction */ + } + + @Test + public void testRecoverSyncEvict() + throws Throwable { + + EnvironmentConfig envConfig = getEnvConfig(true); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + doRecover(envConfig, + 3000, /* numRecords */ + true, /* syncBeforeRecovery. */ + true); /* expectEviction */ + } + + private void doRecover(EnvironmentConfig envConfig, + int numRecords, + boolean syncBeforeRecovery, + boolean expectEviction) + throws DatabaseException { + + env = new Environment(envHome, envConfig); + Database db = createDb(true); + HashSet expected = new HashSet(); + + /* Insert */ + EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG); + insert(db, null, 1, numRecords, expected, true); + checkForEvictionActivity(expectEviction, /* evict activity */ + expectEviction); /* cache miss */ + checkExactContentMatch(db, expected); + checkForEvictionActivity(expectEviction, /* evict activity */ + expectEviction); /* cache miss */ + + /* + * optional sync; do not checkpoint because checkpoints include a + * sync of non-temporary DBs. + */ + DatabaseConfig saveConfig = db.getConfig(); + if (syncBeforeRecovery) { + db.sync(); + } + + /* Close without sync or checkpoint to force recovery. */ + closeEnv(false /*normalClose*/); + + /* recover and re-open. */ + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, DBNAME, saveConfig); + + /* Check the contents. */ + HashSet useExpected = null; + if (syncBeforeRecovery) { + useExpected = expected; + } else { + useExpected = new HashSet(); + } + + checkExactContentMatch(db, useExpected); + db.close(); + + /* + * When eviction precedes the abnormal close and recovery, obsolete LNs + * and INs will not always be counted correctly. + */ + closeEnv(true /*normalClose*/, + false /*expectAccurateObsoleteLNCount*/, + false /*expectAccurateDbUtilization*/); + } + + /** + * Performs a basic check of deferred-write w/duplicates for verifying the + * fix to duplicate logging on 3.2.x. [#15365] + */ + @Test + public void testDups() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, DBNAME, dbConfig); + + /* Insert {9,0} and {9,1}. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(9, key); + IntegerBinding.intToEntry(0, data); + assertSame(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + IntegerBinding.intToEntry(1, data); + assertSame(OperationStatus.SUCCESS, + db.putNoDupData(null, key, data)); + + /* Check that both exist. */ + Cursor c = db.openCursor(null, null); + try { + assertSame(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals(9, IntegerBinding.entryToInt(key)); + assertEquals(0, IntegerBinding.entryToInt(data)); + + assertSame(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals(9, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + + assertSame(OperationStatus.NOTFOUND, + c.getNext(key, data, LockMode.DEFAULT)); + } finally { + c.close(); + } + + /* Close without a checkpoint to redo the LNs during recovery. */ + db.sync(); + db.close(); + DbInternal.getNonNullEnvImpl(env).close(false); + env = null; + + /* Recover and check again. */ + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, DBNAME, dbConfig); + c = db.openCursor(null, null); + try { + assertSame(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + + /* + * Before fixing the problem with deferred-write duplicate logging, + * the key read below was 0 instead of 9. The bug was that the + * data (0) was being logged as the main tree key. + */ + assertEquals(9, IntegerBinding.entryToInt(key)); + assertEquals(0, IntegerBinding.entryToInt(data)); + + assertSame(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals(9, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + + assertSame(OperationStatus.NOTFOUND, + c.getNext(key, data, LockMode.DEFAULT)); + } finally { + c.close(); + } + + db.close(); + env.close(); + env = null; + } + + /** + * Tests a fix for a bug where reusing a slot caused a non-deleted record + * to be compressed. [#15684] + */ + @Test + public void testCompressAfterSlotReuse() + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + /* Disable daemons to prevent async compression. */ + envConfig.setConfigParam("je.env.runCleaner", "false"); + envConfig.setConfigParam("je.env.runCheckpointer", "false"); + envConfig.setConfigParam("je.env.runINCompressor", "false"); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + Database db = env.openDatabase(null, DBNAME, dbConfig); + + /* Reuse slot: Insert key 0, delete 0, insert 0 */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(0, key); + IntegerBinding.intToEntry(0, data); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + assertSame(OperationStatus.SUCCESS, + db.delete(null, key)); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + + /* + * Because of the delete() above, a compressor entry is queued for key + * 0, although it was re-inserted. And there is no LSN for the slot + * because it has never been logged. When we compress now, we run into + * the BIN.compress bug where it assumes an entry is deleted if its LSN + * is null. + */ + env.compress(); + + /* + * Before the bug fix, the following assert would fail because the + * entry was compressed and NOTFOUND. + */ + assertSame(OperationStatus.SUCCESS, + db.get(null, key, data, null)); + + db.close(); + env.close(); + env = null; + } + + @Test + public void testPreloadNoSync() + throws DatabaseException { + + doPreload(false); /* syncBeforeRecovery */ + } + + @Test + public void testPreloadSync() + throws DatabaseException { + + doPreload(true); /* syncBeforeRecovery */ + } + + private void doPreload(boolean syncBeforeRecovery) + throws DatabaseException { + + EnvironmentConfig envConfig = getEnvConfig(false); + envConfig.setCacheSize(MemoryBudget.MIN_MAX_MEMORY_SIZE); + env = new Environment(envHome, envConfig); + Database db = createDb(true); + HashSet expected = new HashSet(); + + int numRecords = 3000; + + /* Insert */ + EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG); + insert(db, null, 1, numRecords, expected, true); + checkForEvictionActivity(true, /* evict activity */ + true); /* cache miss */ + + /* + * Change the cache size to the default value so a preload will + * have enough cache to pull items in. + */ + envConfig.setCacheSize(0); + env.setMutableConfig(envConfig); + if (DEBUG) { + System.out.println("after mutable " + + env.getConfig().getCacheSize()); + } + + PreloadConfig pConfig = new PreloadConfig(); + pConfig.setLoadLNs(true); + PreloadStats pStats = db.preload(pConfig); + + if (DEBUG) { + System.out.println("first preload " + pStats); + } + assertTrue(String.valueOf(pStats.getNBINsLoaded()), + pStats.getNBINsLoaded() > 50); + assertTrue(String.valueOf(pStats.getNINsLoaded()), + pStats.getNINsLoaded() > 50); + + if (embeddedLNs) { + assertTrue(String.valueOf(pStats.getNEmbeddedLNs()), + pStats.getNEmbeddedLNs() > 50); + } else { + assertTrue(String.valueOf(pStats.getNLNsLoaded()), + pStats.getNLNsLoaded() > 50); + } + + checkExactContentMatch(db, expected); + + DatabaseConfig saveConfig = db.getConfig(); + if (syncBeforeRecovery) { + db.sync(); + } + + /* Close db and env without sync or checkpoint */ + closeEnv(false /*normalClose*/); + + /* recover and re-open. */ + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, DBNAME, saveConfig); + pStats = db.preload(pConfig); + if (DEBUG) { + System.out.println("second preload " + pStats); + } + + /* Check the contents. */ + HashSet useExpected = null; + if (syncBeforeRecovery) { + useExpected = expected; + assertTrue(String.valueOf(pStats.getNBINsLoaded()), + pStats.getNBINsLoaded() > 50); + assertTrue(String.valueOf(pStats.getNINsLoaded()), + pStats.getNINsLoaded() > 50); + + if (embeddedLNs) { + assertTrue(String.valueOf(pStats.getNEmbeddedLNs()), + pStats.getNEmbeddedLNs() > 50); + } else { + assertTrue(String.valueOf(pStats.getNLNsLoaded()), + pStats.getNLNsLoaded() > 50); + } + } else { + useExpected = new HashSet(); + assertEquals(0, pStats.getNBINsLoaded()); + assertEquals(0, pStats.getNINsLoaded()); + assertEquals(0, pStats.getNLNsLoaded()); + } + + checkExactContentMatch(db, useExpected); + + db.close(); + } + + private void checkForEvictionActivity(boolean expectEviction, + boolean expectCacheMiss) + throws DatabaseException { + + EnvironmentStats stats = env.getStats(STATS_CLEAR_CONFIG); + if (DEBUG) { + System.out.println("EvictPasses=" + stats.getNEvictPasses()); + System.out.println("Selected=" + stats.getNNodesSelected()); + System.out.println("Stripped=" + stats.getNBINsStripped()); + System.out.println("Evicted=" + + stats.getNNodesExplicitlyEvicted()); + System.out.println("CacheMiss=" + + stats.getNCacheMiss()); + } + + if (expectEviction) { + + assertTrue(String.valueOf(stats.getNNodesSelected()), + stats.getNNodesSelected() > 50); + + /* Duplicate DB reads never read the LN. */ + if (!dups && !embeddedLNs) { + assertTrue(String.valueOf(stats.getNBINsStripped()), + stats.getNBINsStripped() > 50); + } + assertTrue(String.valueOf(stats.getNNodesExplicitlyEvicted()), + stats.getNNodesExplicitlyEvicted() > 50); + } + + if (expectCacheMiss) { + assertTrue(String.valueOf(stats.getNCacheMiss()), + stats.getNCacheMiss() > 50); + } + } + + @Test + public void testBadConfigurations() + throws Throwable { + + env = new Environment(envHome, getEnvConfig(true)); + + DatabaseConfig dbConfigDeferred = new DatabaseConfig(); + dbConfigDeferred.setAllowCreate(true); + dbConfigDeferred.setDeferredWrite(true); + dbConfigDeferred.setSortedDuplicates(dups); + + DatabaseConfig dbConfigNoDeferred = new DatabaseConfig(); + dbConfigNoDeferred.setAllowCreate(true); + dbConfigNoDeferred.setSortedDuplicates(dups); + + /* A txnal deferred database is not possible */ + try { + dbConfigDeferred.setTransactional(true); + @SuppressWarnings("unused") + Database db = env.openDatabase(null, "foo", dbConfigDeferred); + fail("No support yet for txnal, deferred-write databases"); + } catch (IllegalArgumentException expected) { + } + + dbConfigDeferred.setTransactional(false); + + /* + * Open a db first with deferred write, then secondly without deferred + * write, should fail. + */ + Database db1 = env.openDatabase(null, "foo", dbConfigDeferred); + try { + @SuppressWarnings("unused") + Database db2 = env.openDatabase(null, "foo", dbConfigNoDeferred); + fail("Database already opened with deferred write"); + } catch (IllegalArgumentException expected) { + } + db1.close(); + + /* + * Open a db first without deferred write, then secondly with deferred + * write, should fail. + */ + db1 = env.openDatabase(null, "foo", dbConfigNoDeferred); + try { + @SuppressWarnings("unused") + Database db2 = env.openDatabase(null, "foo", dbConfigDeferred); + fail("Database already opened with out deferred write"); + } catch (IllegalArgumentException expected) { + } + db1.close(); + + /* Sync is only allowed for deferred-write databases. */ + Database db = env.openDatabase(null, "foo", dbConfigNoDeferred); + try { + db.sync(); + fail("Sync not permitted"); + } catch (UnsupportedOperationException expected) { + if (DEBUG) { + System.out.println("expected=" + expected); + } + db.close(); + } + } + + @Test + public void testCleaning5000() + throws Throwable { + + doCleaning("90", "4200"); /* log file size. */ + } + + private void doCleaning(String minUtilization, String logFileSize) + throws DatabaseException { + + /* + * Run with a small cache so there's plenty of logging. But use a + * slightly bigger cache than the minimum so that eviction during + * cleaning has enough working room on 64-bit systems [#15176]. + */ + long cacheSize = MemoryBudget.MIN_MAX_MEMORY_SIZE + + (MemoryBudget.MIN_MAX_MEMORY_SIZE / 2); + EnvironmentConfig envConfig = getEnvConfig(true); + DbInternal.disableParameterValidation(envConfig); + envConfig.setCacheSize(cacheSize); + envConfig.setConfigParam("je.cleaner.minUtilization", + minUtilization); + envConfig.setConfigParam("je.log.fileMax", logFileSize); + envConfig.setConfigParam("je.cleaner.expunge", "false"); + /* Disable cleaner thread so batch cleaning is predictable. [#15176] */ + envConfig.setConfigParam("je.env.runCleaner", "false"); + /* With tiny files we can't log expiration profile records. */ + DbInternal.setCreateEP(envConfig, false); + env = new Environment(envHome, envConfig); + Database db = createDb(true); + + /* We'll do inserts in two batches. */ + HashSet expectedBatch1 = new HashSet(); + HashSet expectedBatch2 = new HashSet(); + + int batch1Size = 100; + int batch2Size = 110; + + /* + * Insert non-random values in two batches. Don't use random + * inserts in order to be sure we have a set of non-conflicting + * values for the test. + */ + int startingValue = 1; + insert(db, + null, + startingValue, + startingValue + batch1Size, + expectedBatch1, + false); /* random */ + checkExactContentMatch(db, expectedBatch1); + db.sync(); + + /* Insert a second batch with no sync */ + insertAndUpdate(db, + null, + startingValue + batch1Size, + startingValue + batch2Size, + expectedBatch2, + false); /* random */ + expectedBatch2.addAll(expectedBatch1); + checkExactContentMatch(db, expectedBatch2); + env.checkpoint(CHECKPOINT_FORCE_CONFIG); + Trace.trace(DbInternal.getNonNullEnvImpl(env), "before clean"); + batchClean(); + + Trace.trace(DbInternal.getNonNullEnvImpl(env), "after clean"); + + checkExactContentMatch(db, expectedBatch2); + + /* + * Recover the environment a few times. Whether the batch2 changes + * show up depend on whether the db was deferred write, and whether + * a sync was done. + */ + for (int i = 0; i < 4; i++) { + /* Do an abnormal close, we do not want to sync the database. */ + db = null; + closeEnv(false /*normalClose*/); + env = new Environment(envHome, envConfig); + + db = createDb(true); + checkContents(db, + expectedBatch2, + false); /* exact match. */ + + batchClean(); + checkContents(db, + expectedBatch2, + false); /* exact match. */ + } + + db.close(); + closeEnv(true /*normalClose*/); + } + + /** + * Insert a set of records, record the values in the expected set. + * @param useRandom If True, use random values. + */ + private void insert(Database db, + Transaction txn, + int start, + int end, + Set expected, + boolean useRandom) + throws DatabaseException{ + + OperationStatus status; + DatabaseEntry entry = new DatabaseEntry(); + Random rand = new Random(); + for (int i = start; i < end; i++) { + int value = useRandom ? rand.nextInt() : i; + + IntegerBinding.intToEntry(value, entry); + if (dups) { + status = db.putNoDupData(txn, MAIN_KEY_FOR_DUPS, entry); + } else { + status = db.putNoOverwrite(txn, entry, entry); + } + if (!useRandom) { + assertEquals(OperationStatus.SUCCESS, status); + } + expected.add(new Integer(value)); + } + } + + /** + * Insert and modify a set of records, record the values in the + * expected set. + * @param useRandom If True, use random values. + */ + private void insertAndUpdate(Database db, + Transaction txn, + int start, + int end, + Set expected, + boolean useRandom) + throws DatabaseException{ + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Random rand = new Random(); + for (int i = start; i < end; i++) { + int value = useRandom ? rand.nextInt() : i; + + IntegerBinding.intToEntry(value, key); + if (dups) { + OperationStatus status = + db.putNoDupData(txn, MAIN_KEY_FOR_DUPS, key); + if (status == OperationStatus.SUCCESS) { + /* Update it */ + db.put(txn, MAIN_KEY_FOR_DUPS, key); + expected.add(new Integer(value)); + } + } else { + IntegerBinding.intToEntry(value - 1, data); + OperationStatus status = db.putNoOverwrite(txn, key, data); + if (status == OperationStatus.SUCCESS) { + /* Update it */ + IntegerBinding.intToEntry(value, data); + db.put(txn, key, data); + expected.add(new Integer(value)); + } + } + } + } + + /** + * Update a set of records. + */ + private void update(Database db, + Transaction txn, + int start, + int end) + throws DatabaseException{ + + OperationStatus status; + DatabaseEntry entry = new DatabaseEntry(); + for (int i = start; i < end; i++) { + IntegerBinding.intToEntry(i, entry); + if (dups) { + status = db.put(txn, MAIN_KEY_FOR_DUPS, entry); + } else { + status = db.put(txn, entry, entry); + } + assertEquals(OperationStatus.SUCCESS, status); + } + } + + /** + * Delete a set of records, update the values in the expected set. + * @param useRandom If True, use random values. + */ + private void delete(Database db, + Transaction txn, + int start, + int end, + Set expected, + boolean useRandom) + throws DatabaseException{ + + DatabaseEntry entry = new DatabaseEntry(); + Random rand = new Random(); + for (int i = start; i < end; i++) { + int value = useRandom ? (start + rand.nextInt(end - start)) : i; + + IntegerBinding.intToEntry(value, entry); + if (dups) { + final Cursor c = db.openCursor(txn, null); + try { + if (c.getSearchBoth(MAIN_KEY_FOR_DUPS, entry, null) == + OperationStatus.SUCCESS) { + c.delete(); + } + } finally { + c.close(); + } + } else { + db.delete(txn, entry); + } + expected.remove(new Integer(value)); + } + } + + /** + * The database should hold exactly the values in the expected set. + */ + private void checkExactContentMatch(Database db, HashSet expected) + throws DatabaseException{ + + checkContents(db, expected, true); + } + + /** + * The database should hold only values that are in the expected set. + * Note that this assumes that the key and data are the same value. + * @param exactMatch if true, the database ought to hold all the values + * in the expected set. + */ + private void checkContents(Database db, + HashSet expected, + boolean exactMatch) + throws DatabaseException{ + + Cursor c = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + Set useExpected = (Set) expected.clone(); + + if (DEBUG) { + System.err.println("Start checking"); + } + + while (c.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + int value = IntegerBinding.entryToInt(dups ? data : key); + + if (DEBUG) { + System.err.println("checkDatabase: found " + value); + } + + assertTrue(value + " not in useExpected set. Expected size=" + + useExpected.size(), + useExpected.remove(new Integer(value))); + assertEquals(value, IntegerBinding.entryToInt(data)); + } + + if (exactMatch) { + assertEquals(useExpected.toString(), 0, useExpected.size()); + } else { + if (DEBUG) { + System.out.println(useExpected.size() + + " is leftover in expected set"); + } + } + c.close(); + } + + private void batchClean() + throws DatabaseException { + + int cleaned = 0; + int cleanedThisRound = 0; + do { + cleanedThisRound = env.cleanLog(); + cleaned += cleanedThisRound; + } while (cleanedThisRound > 0); + + if (DEBUG) { + System.out.println("numCleaned = " + cleaned); + } + + assertTrue("cleaned must be > 0, was only " + cleaned + + " but may vary on machine to machine", cleaned > 0); + + if (cleaned > 0) { + CheckpointConfig force = new CheckpointConfig(); + force.setForce(true); + env.checkpoint(force); + } + } + + /** + * Tests that record deletion is durable after Database.sync, when a crash + * ocurs after the sync and the previous version of the LN is in the + * recovery interval. Before logging deferred-write LNs provisionally, the + * previous version of the LN was reinserted into the BIN by recovery. + * + * [#16864] + */ + @Test + public void testDelete() + throws DatabaseException { + + final EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + Database db = createDb(true); + + final int NUM_RECORDS = 100; + final HashSet expected = new HashSet(); + insert(db, null, 1, NUM_RECORDS, expected, false); + db.sync(); + delete(db, null, 1, NUM_RECORDS, expected, false); + db.sync(); + assertTrue(expected.isEmpty()); + checkExactContentMatch(db, expected); + + /* Close without a checkpoint to redo the LNs during recovery. */ + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + env = null; + + /* Recover and check again. */ + env = new Environment(envHome, envConfig); + db = createDb(true); + checkExactContentMatch(db, expected); + db.close(); + env.close(); + env = null; + } + + /** + * Tests a fix for a LogFileNotFound exception in the following sequence + * for a deferred-write database. + * + * 100 LN-A + * 200 BIN-B, parent of LN-A + * ... LN-A is deleted, marked dirty and not logged + * ... BIN-B is compressed, LN-A is counted obsolete in utilization tracker + * 300 BIN-B flushed by eviction (this step is optional) + * 400 FileSummaryLN with LN-A obsolete offset is flushed as the result of + * utilization tracker eviction + * ... Crash and recover, LN-A is mistakedly inserted into BIN-B by redo + * + * When the log file containing 100 LN-A is cleaned, it will not be + * migrated because it was counted obsolete. Yet it is referenced by its + * BIN parent. This caused a LogFileNotFound exception later when + * attempting to access the LN. + * + * [#16864] + */ + @Test + public void testCleanAfterDelete() { + if (dups) { + /* There is no variant of this test for dups. */ + return; + } + final int CACHE_SIZE = 4 << 20; + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentConfig.MAX_MEMORY, String.valueOf(CACHE_SIZE)); + envConfig.setConfigParam + (EnvironmentConfig.CLEANER_DETAIL_MAX_MEMORY_PERCENTAGE, "1"); + envConfig.setConfigParam + (EnvironmentConfig.CLEANER_EXPUNGE, "false"); + /* Disable daemons to prevent async compression. */ + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + Database db = env.openDatabase(null, DBNAME, dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[1000]); + final int N_RECORDS = 10000; + + IntegerBinding.intToEntry(0, key); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + IntegerBinding.intToEntry(1, key); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + db.sync(); + IntegerBinding.intToEntry(0, key); + assertSame(OperationStatus.SUCCESS, + db.delete(null, key)); + env.compress(); + db.sync(); + + /* Cause enough eviction to flush the FileSummaryLNs. */ + for (int j = 1; j <= 3; j += 1) { + for (int i = 1; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.put(null, key, data)); + } + db.sync(); + } + + /* Crash and recover. */ + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + db = null; + env = null; + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + dbConfig.setAllowCreate(false); + db = env.openDatabase(null, DBNAME, dbConfig); + + /* Create enough waste to cause log file zero to be cleaned. */ + for (int i = 1; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.put(null, key, data)); + } + db.sync(); + for (int i = 1; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.delete(null, key)); + } + db.sync(); + env.cleanLog(); + env.checkpoint(CHECKPOINT_FORCE_CONFIG); + assertTrue(!(new File(envHome, TestUtils.LOG_FILE_NAME)).exists()); + + /* Before the fix, a LogFileNotFound exception was thrown here. */ + IntegerBinding.intToEntry(0, key); + assertSame(OperationStatus.NOTFOUND, + db.get(null, key, data, null)); + + db.close(); + env.close(); + env = null; + } + + /** + * Tests a fix for a bug that incorrectly counts INs obsolete when they are + * pruned (via the compressor). Since with a DW DB, the parents of the + * pruned INs are not logged, the pruned INs should not be immediately + * counted obsolete. [#21348] + */ + @Test + public void testPruneBINs() { + if (dups) { + /* There is no special variant of this test for dups. */ + return; + } + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentConfig.CLEANER_EXPUNGE, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam + (EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setDeferredWrite(true); + Database db = env.openDatabase(null, DBNAME, dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[10]); + final int N_RECORDS = 2000; + + /* Fill a few BINs and sync to make them durable. */ + for (int i = 1; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.put(null, key, data)); + } + db.sync(); + + /* Delete enough records to empty a few BINs. */ + for (int i = 1; i < N_RECORDS / 2; i += 1) { + IntegerBinding.intToEntry(i, key); + assertSame(OperationStatus.SUCCESS, + db.delete(null, key)); + } + + /* + * Sync env to make deletions durable and do a checkpoint. The + * compression below must be after the last CkptEnd, in order to + * provoke the bug. If the compression were done before the + * checkpoint, the checkpoint would flush the compressed parents. + */ + env.sync(); + + /* + * Due to the bug, compress will record pruned IN offsets obsolete. + * However, in a DW DB we have not yet flushed the parent INs. + */ + env.compress(); + env.flushLog(true); + + /* Crash and recover. */ + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + db = null; + env = null; + envConfig.setAllowCreate(false); + env = new Environment(envHome, envConfig); + dbConfig.setAllowCreate(false); + db = env.openDatabase(null, DBNAME, dbConfig); + + /* Check for LSN counted obsolete incorrectly. */ + VerifyUtils.checkLsns(db); + + db.close(); + env.close(); + env = null; + } + + /** + * Ensure that a cursor keeps a lock in the face of the LSN changing, which + * occurs when logging during a eviction, checkpoint or DB sync. + */ + @Test + public void testLockDuringLogging() { + + final EnvironmentConfig envConfig = + getEnvConfig(false /*transactional*/); + envConfig.setLockTimeout(1, TimeUnit.MILLISECONDS); + env = new Environment(envHome, envConfig); + + final Database db = createDb(true /*deferredWrite*/); + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data1 = new DatabaseEntry(new byte[1]); + final DatabaseEntry data2 = new DatabaseEntry(new byte[2]); + + /* Insert dup records with a cursor, holding a write lock. */ + final Cursor cursor1 = db.openCursor(null, null); + cursor1.put(key, data1); + final Cursor cursor2 = db.openCursor(null, null); + cursor2.put(key, data2); + + /* Sync DB to cause LSN to change. New LSN should be locked. */ + db.sync(); + + /* + * In a separate thread (cursors in the same thread share locks), + * attempt to read the record. This should cause a lock conflict. + */ + junitThread = new JUnitThread("testLockDuringLogging") { + @Override + public void testBody() { + try { + db.get(null, key, data1, null); + fail(); + } catch (LockConflictException expected) { + } + } + }; + + junitThread.start(); + try { + junitThread.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } + + cursor1.close(); + cursor2.close(); + db.close(); + closeEnv(true /*normalClose*/); + } + + /* Test that the transient Lsn under deferred write mode obeys the rule. */ + @Test + public void testTransientLsn() + throws Throwable { + + /* Duplicate support is not completed yet. */ + dups = false; + + /* Open a deferred write database. */ + final EnvironmentConfig envConfig = getEnvConfig(false); + env = new Environment(envHome, envConfig); + final Database db = createDb(true); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + final Cursor cursor = db.openCursor(null, null); + IntegerBinding.intToEntry(1, key); + StringBinding.stringToEntry("herococo", data); + + /* If no slot reuse, its file number should be 0xFFFFFFFFL. */ + cursor.put(key, data); + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + long currentLsn = cursorImpl.getBIN().getLsn(cursorImpl.getIndex()); + assertTrue(DbLsn.isTransient(currentLsn)); + + /* Reuse the slot shouldn't locate another new transient Lsn. */ + StringBinding.stringToEntry("coco", data); + cursor.put(key, data); + cursorImpl = DbInternal.getCursorImpl(cursor); + long newLsn = cursorImpl.getBIN().getLsn(cursorImpl.getIndex()); + assertTrue(newLsn == currentLsn); + + /* After sync, the lsn should be a real Lsn. */ + db.sync(); + currentLsn = cursorImpl.getBIN().getLsn(cursorImpl.getIndex()); + assertFalse(DbLsn.isTransient(currentLsn)); + + cursor.close(); + db.close(); + closeEnv(true); + } +} diff --git a/test/com/sleepycat/je/test/ForeignKeyTest.java b/test/com/sleepycat/je/test/ForeignKeyTest.java new file mode 100644 index 0000000..65ff7d9 --- /dev/null +++ b/test/com/sleepycat/je/test/ForeignKeyTest.java @@ -0,0 +1,449 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DeleteConstraintException; +import com.sleepycat.je.ForeignConstraintException; +import com.sleepycat.je.ForeignKeyDeleteAction; +import com.sleepycat.je.ForeignKeyNullifier; +import com.sleepycat.je.ForeignMultiKeyNullifier; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; + +@RunWith(Parameterized.class) +public class ForeignKeyTest extends MultiKeyTxnTestCase { + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + protected static List paramsHelper(boolean rep) { + final String[] txnTypes = getTxnTypes(null, rep); + final List newParams = new ArrayList(); + for (final String type : txnTypes) { + newParams.add(new Object[] {type, true}); + newParams.add(new Object[] {type, false}); + } + + return newParams; + } + public ForeignKeyTest(String type, boolean multiKey){ + initEnvConfig(); + txnType = type; + useMultiKey = multiKey; + isTransactional = (txnType != TXN_NULL); + customName = ((useMultiKey) ? "multiKey" : "") + "-" + txnType; + } + + @Test + public void testDupsNotAllowed() + throws DatabaseException { + + Database priDb1 = openPrimary("pri1"); + Database priDb2 = openPrimary("pri2", true /*duplicates*/); + + try { + openSecondary(priDb1, "sec2", priDb2, ForeignKeyDeleteAction.ABORT); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue + (msg, msg.indexOf("Duplicates must not be allowed") >= 0); + } + + priDb1.close(); + priDb2.close(); + } + + @Test + public void testIllegalNullifier() + throws DatabaseException { + + Database priDb1 = openPrimary("pri1"); + Transaction txn = txnBegin(); + MyKeyCreator myCreator = new MyKeyCreator(); + SecondaryConfig config; + + /* A nullifier is required with NULLIFY. */ + config = new SecondaryConfig(); + config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY); + config.setKeyCreator(myCreator); + try { + env.openSecondaryDatabase(txn, "sec1", priDb1, config); + fail(); + } catch (IllegalArgumentException expected) { } + + /* Both nullifiers are not allowed. */ + config = new SecondaryConfig(); + config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY); + config.setKeyCreator(myCreator); + config.setForeignKeyNullifier(myCreator); + config.setForeignMultiKeyNullifier(myCreator); + try { + env.openSecondaryDatabase(txn, "sec1", priDb1, config); + fail(); + } catch (IllegalArgumentException expected) { } + + /* ForeignKeyNullifier is not allowed with MultiKeyCreator. */ + config = new SecondaryConfig(); + config.setForeignKeyDeleteAction(ForeignKeyDeleteAction.NULLIFY); + config.setMultiKeyCreator(new SimpleMultiKeyCreator(myCreator)); + config.setForeignKeyNullifier(myCreator); + try { + env.openSecondaryDatabase(txn, "sec1", priDb1, config); + fail(); + } catch (IllegalArgumentException expected) { } + + txnCommit(txn); + priDb1.close(); + } + + @Test + public void testAbort() + throws DatabaseException { + + doTest(ForeignKeyDeleteAction.ABORT); + } + + @Test + public void testCascade() + throws DatabaseException { + + doTest(ForeignKeyDeleteAction.CASCADE); + } + + @Test + public void testNullify() + throws DatabaseException { + + doTest(ForeignKeyDeleteAction.NULLIFY); + } + + private void doTest(ForeignKeyDeleteAction onDelete) + throws DatabaseException { + + Database priDb1 = openPrimary("pri1"); + Database priDb2 = openPrimary("pri2"); + + SecondaryDatabase secDb1 = openSecondary(priDb1, "sec1", null, null); + SecondaryDatabase secDb2 = openSecondary(priDb2, "sec2", priDb1, + onDelete); + + OperationStatus status; + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry pkey = new DatabaseEntry(); + Transaction txn = txnBegin(); + + /* + * pri1 has a record with primary key 1 and index key 3. + * pri2 has a record with primary key 2 and foreign key 1, + * which is the primary key of pri1. + * pri2 has another record with primary key 3 and foreign key 1, + * to enable testing cascade and nullify for secondary duplicates. + */ + + /* Add three records. */ + + status = priDb1.put(txn, entry(1), entry(3)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb2.put(txn, entry(2), entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb2.put(txn, entry(3), entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + + /* Verify record data. */ + + status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, val(data)); + + status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, val(data)); + + status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(data)); + + status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(data)); + + SecondaryCursor cursor = secDb2.openSecondaryCursor(txn, null); + status = cursor.getFirst(key, pkey, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(key)); + assertEquals(2, val(pkey)); + assertEquals(1, val(data)); + status = cursor.getNext(key, pkey, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, val(key)); + assertEquals(3, val(pkey)); + assertEquals(1, val(data)); + status = cursor.getNext(key, pkey, data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + + txnCommit(txn); + txn = txnBegin(); + + /* Test delete action. */ + + if (onDelete == ForeignKeyDeleteAction.ABORT) { + + /* Test that we abort trying to delete a referenced key. */ + + try { + status = priDb1.delete(txn, entry(1)); + fail(); + } catch (DeleteConstraintException expected) { + txnAbort(txn); + txn = txnBegin(); + } + + /* Test that we can put a record into pri2 with a null foreign key + * value. */ + + status = priDb2.put(txn, entry(2), entry(0)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb2.put(txn, entry(3), entry(0)); + assertEquals(OperationStatus.SUCCESS, status); + + /* The sec2 records should not be present since the key was set + * to null above. */ + + status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Test that now we can delete the record in pri1, since it is no + * longer referenced. */ + + status = priDb1.delete(txn, entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + } else if (onDelete == ForeignKeyDeleteAction.NULLIFY) { + + /* Delete the referenced key. */ + + status = priDb1.delete(txn, entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + /* The pri2 records should still exist, but should have a zero/null + * secondary key since it was nullified. */ + + status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(0, val(data)); + + status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(0, val(data)); + + status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + } else if (onDelete == ForeignKeyDeleteAction.CASCADE) { + + /* Delete the referenced key. */ + + status = priDb1.delete(txn, entry(1)); + assertEquals(OperationStatus.SUCCESS, status); + + status = priDb1.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = secDb1.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + /* The pri2 records should have deleted also. */ + + status = priDb2.get(txn, entry(2), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = priDb2.get(txn, entry(3), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + status = secDb2.get(txn, entry(1), data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + } else { + throw new IllegalStateException(); + } + + /* + * Test that a foreign key value may not be used that is not present + * in the foreign db. Key 2 is not in pri1 in this case. + */ + try { + status = priDb2.put(txn, entry(3), entry(2)); + fail(); + } catch (ForeignConstraintException expected) { } + + txnAbort(txn); + secDb1.close(); + secDb2.close(); + priDb1.close(); + priDb2.close(); + } + + private Database openPrimary(String name) + throws DatabaseException { + + return openPrimary(name, false); + } + + private Database openPrimary(String name, boolean duplicates) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicates); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } + + private SecondaryDatabase openSecondary(Database priDb, String dbName, + Database foreignDb, + ForeignKeyDeleteAction onDelete) + throws DatabaseException { + + SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + MyKeyCreator keyCreator = new MyKeyCreator(); + if (useMultiKey) { + dbConfig.setMultiKeyCreator(new SimpleMultiKeyCreator(keyCreator)); + } else { + dbConfig.setKeyCreator(keyCreator); + } + + if (foreignDb != null) { + + if (useMultiKey) { + dbConfig.setForeignMultiKeyNullifier(keyCreator); + } else { + dbConfig.setForeignKeyNullifier(keyCreator); + } + dbConfig.setForeignKeyDatabase(foreignDb); + dbConfig.setForeignKeyDeleteAction(onDelete); + } + + Transaction txn = txnBegin(); + try { + return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig); + } finally { + txnCommit(txn); + } + } + + static private DatabaseEntry entry(int val) { + + return new DatabaseEntry(TestUtils.getTestArray(val)); + } + + static private int val(DatabaseEntry entry) { + + return TestUtils.getTestVal(entry.getData()); + } + + private class MyKeyCreator implements SecondaryKeyCreator, + ForeignMultiKeyNullifier, + ForeignKeyNullifier { + + /* SecondaryKeyCreator */ + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + int val = val(data); + if (val != 0) { + result.setData(TestUtils.getTestArray(val)); + return true; + } else { + return false; + } + } + + /* ForeignMultiKeyNullifier */ + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry secKey) { + DatabaseEntry entry = new DatabaseEntry(); + assertTrue(createSecondaryKey(secondary, null, data, entry)); + assertEquals(entry, secKey); + + return nullifyForeignKey(secondary, data); + } + + /* ForeignKeyNullifier */ + public boolean nullifyForeignKey(SecondaryDatabase secondary, + DatabaseEntry data) { + int val = val(data); + if (val != 0) { + data.setData(TestUtils.getTestArray(0)); + return true; + } else { + return false; + } + } + } +} diff --git a/test/com/sleepycat/je/test/InternalCursorTest.java b/test/com/sleepycat/je/test/InternalCursorTest.java new file mode 100644 index 0000000..864beae --- /dev/null +++ b/test/com/sleepycat/je/test/InternalCursorTest.java @@ -0,0 +1,152 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Tests the use of the Cursor class for internal operations where + * DbInternal.makeCursor is called instead of Database.openCursor. The + * makeCursor method calls Cursor.setNonCloning(true), so this tests the + * NonCloning feature. The NonCloning feature is not available for public API + * Cursors. + */ +@RunWith(Parameterized.class) +public class InternalCursorTest extends TxnTestCase { + + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public InternalCursorTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + /** + * Ensures that a Cursor is removed from the current BIN when Cursor + * methods such as put() and search() are called. These methods pass false + * for the samePosition parameter of beginMoveCursor. Previously the + * CursorImpl was not reset when cloning was disabled, which caused Cursors + * to accumulate in BINs. This test goes along new assertions in + * CursorImpl.setBIN/setDupBIN which check for residual cursors. [#16280] + */ + @Test + public void testAddCursorFix() { + final Database db = openDb("foo", false /*duplicates*/); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(123, data); + + final Transaction txn = txnBeginCursor(); + final Locker locker = (txn != null) ? + DbInternal.getLocker(txn) : + BasicLocker.createBasicLocker(DbInternal.getNonNullEnvImpl(env)); + /* Create a non-sticky Cursor. */ + final Cursor cursor = DbInternal.makeCursor + (DbInternal.getDbImpl(db), locker, null); + + /* Add records to create 2 BINs. */ + OperationStatus status; + for (int i = 1; i <= 200; i += 1) { + IntegerBinding.intToEntry(i, key); + status = cursor.put(key, data); + assertSame(OperationStatus.SUCCESS, status); + } + + /* Move to first BIN. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + /* Put in second BIN. */ + IntegerBinding.intToEntry(200, key); + status = cursor.put(key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Search in first BIN. */ + IntegerBinding.intToEntry(1, key); + status = cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + /* Put in second BIN. */ + IntegerBinding.intToEntry(200, key); + status = cursor.put(key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Traverse all records. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + for (int i = 1; i <= 200; i += 1) { + assertEquals(i, IntegerBinding.entryToInt(key)); + status = cursor.getNext(key, data, null); + assertSame((i == 200) ? + OperationStatus.NOTFOUND : + OperationStatus.SUCCESS, + status); + } + + /* Put in first BIN. */ + IntegerBinding.intToEntry(1, key); + status = cursor.put(key, data); + assertSame(OperationStatus.SUCCESS, status); + + cursor.close(); + if (txn != null) { + txnCommit(txn); + } else { + locker.operationEnd(true); + } + + db.close(); + } + + private Database openDb(String name, boolean duplicates) { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicates); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } +} diff --git a/test/com/sleepycat/je/test/JoinTest.java b/test/com/sleepycat/je/test/JoinTest.java new file mode 100644 index 0000000..8ab75e5 --- /dev/null +++ b/test/com/sleepycat/je/test/JoinTest.java @@ -0,0 +1,489 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.JoinConfig; +import com.sleepycat.je.JoinCursor; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; + +@RunWith(Parameterized.class) +public class JoinTest extends MultiKeyTxnTestCase { + + /* + * DATA sets are pairs of arrays for each record. The first array is + * the record data and has three values in the 0/1/2 positions for the + * secondary key values with key IDs 0/1/2. second array contains a single + * value which is the primary key. + * + * JOIN sets are also pairs of arrays. The first array in each pair has 3 + * values for setting the input cursors. Entries 0/1/2 in that array are + * for secondary keys 0/1/2. The second array is the set of primary keys + * that are expected to match in the join operation. + * + * A zero value for an index key means "don't index", so zero values are + * never used for join index keys since we wouldn't be able to successfully + * position the input cursor. + * + * These values are all stored as bytes, not ints, in the actual records, + * so all values must be within the range of a signed byte. + */ + private static final int[][][] ALL = { + /* Data set #1 - single match possible per record. */ + { + {1, 1, 1}, {11}, + {2, 2, 2}, {12}, + {3, 3, 3}, {13}, + }, { + {1, 1, 1}, {11}, + {2, 2, 2}, {12}, + {3, 3, 3}, {13}, + {1, 2, 3}, {}, + {1, 1, 2}, {}, + {3, 2, 2}, {}, + }, + /* Data set #2 - no match possible when all indices are not present + * (when some are zero). */ + { + {1, 1, 0}, {11}, + {2, 0, 2}, {12}, + {0, 3, 3}, {13}, + {3, 2, 1}, {14}, + }, { + {1, 1, 1}, {}, + {2, 2, 2}, {}, + {3, 3, 3}, {}, + }, + /* Data set #3 - one match in the presence of non-matching records + * (with missing/zero index keys). */ + { + {1, 0, 0}, {11}, + {1, 1, 0}, {12}, + {1, 1, 1}, {13}, + {0, 0, 0}, {14}, + }, { + {1, 1, 1}, {13}, + }, + /* Data set #4 - one match in the presence of non-matching records + * (with non-matching but non-zero values). */ + { + {1, 2, 3}, {11}, + {1, 1, 3}, {12}, + {1, 1, 1}, {13}, + {3, 2, 1}, {14}, + }, { + {1, 1, 1}, {13}, + }, + /* Data set #5 - two matches in the presence of non-matching records. + */ + { + {1, 2, 3}, {11}, + {1, 1, 3}, {12}, + {1, 1, 1}, {13}, + {1, 2, 3}, {14}, + }, { + {1, 2, 3}, {11, 14}, + }, + /* Data set #6 - three matches in the presence of non-matching records. + * Also used to verify that cursors are sorted by count: 2, 1, 0 */ + { + {1, 2, 3}, {11}, + {1, 1, 3}, {12}, + {1, 1, 1}, {13}, + {1, 2, 3}, {14}, + {1, 1, 1}, {15}, + {1, 0, 0}, {16}, + {1, 1, 0}, {17}, + {1, 1, 1}, {18}, + {0, 0, 0}, {19}, + {3, 2, 1}, {20}, + }, { + {1, 1, 1}, {13, 15, 18}, + }, + /* Data set #7 - three matches by themselves. */ + { + {1, 2, 3}, {11}, + {1, 2, 3}, {12}, + {1, 2, 3}, {13}, + }, { + {1, 2, 3}, {11, 12, 13}, + }, + }; + + /* Used for testing the cursors are sorted by count. */ + private static final int CURSOR_ORDER_SET = 6; + private static final int[] CURSOR_ORDER = {2, 1, 0}; + + private static EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + static { + envConfig.setAllowCreate(true); + } + + private static JoinConfig joinConfigNoSort = new JoinConfig(); + static { + joinConfigNoSort.setNoSort(true); + } + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + protected static List paramsHelper(boolean rep) { + final String[] txnTypes = getTxnTypes(null, rep); + final List newParams = new ArrayList(); + for (final String type : txnTypes) { + newParams.add(new Object[] {type, true}); + newParams.add(new Object[] {type, false}); + } + return newParams; + } + + public JoinTest(String type, boolean multiKey){ + super.envConfig = envConfig; + txnType = type; + useMultiKey = multiKey; + isTransactional = (txnType != TXN_NULL); + customName = ((useMultiKey) ? "multiKey" : "") + "-" + txnType; + } + + @Test + public void testJoin() + throws DatabaseException { + + for (CursorConfig config : + new CursorConfig[] { null, CursorConfig.READ_UNCOMMITTED }) { + for (boolean withData : new boolean[] { false, true }) { + for (int i = 0; i < ALL.length; i += 2) { + doJoin(ALL[i], ALL[i + 1], (i / 2) + 1, withData, config); + } + } + } + } + + private void doJoin(int[][] dataSet, + int[][] joinSet, + int setNum, + boolean withData, + CursorConfig cursorConfig) + throws DatabaseException { + + String name = "Set#" + setNum; + Database priDb = openPrimary("pri"); + SecondaryDatabase secDb0 = openSecondary(priDb, "sec0", true, 0); + SecondaryDatabase secDb1 = openSecondary(priDb, "sec1", true, 1); + SecondaryDatabase secDb2 = openSecondary(priDb, "sec2", true, 2); + + OperationStatus status; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn; + txn = txnBegin(); + + for (int i = 0; i < dataSet.length; i += 2) { + int[] vals = dataSet[i]; + setData(data, vals[0], vals[1], vals[2]); + setKey(key, dataSet[i + 1][0]); + status = priDb.put(txn, key, data); + assertEquals(name, OperationStatus.SUCCESS, status); + } + + txnCommit(txn); + txn = txnBeginCursor(); + + SecondaryCursor c0 = secDb0.openSecondaryCursor(txn, cursorConfig); + SecondaryCursor c1 = secDb1.openSecondaryCursor(txn, cursorConfig); + SecondaryCursor c2 = secDb2.openSecondaryCursor(txn, cursorConfig); + SecondaryCursor[] cursors = {c0, c1, c2}; + + for (int i = 0; i < joinSet.length; i += 2) { + int[] indexKeys = joinSet[i]; + int[] priKeys = joinSet[i + 1]; + String prefix = name + " row=" + i; + for (int k = 0; k < 3; k += 1) { + String msg = prefix + " k=" + k + " ikey=" + indexKeys[k]; + setKey(key, indexKeys[k]); + status = cursors[k].getSearchKey(key, data, + LockMode.DEFAULT); + assertEquals(msg, OperationStatus.SUCCESS, status); + } + for (int j = 0; j < 2; j += 1) { + JoinConfig config = (j == 0) ? null : joinConfigNoSort; + JoinCursor jc = priDb.join(cursors, config); + assertSame(priDb, jc.getDatabase()); + for (int k = 0; k < priKeys.length; k += 1) { + String msg = prefix + " k=" + k + " pkey=" + priKeys[k]; + if (withData) { + status = jc.getNext(key, data, LockMode.DEFAULT); + } else { + status = jc.getNext(key, LockMode.DEFAULT); + } + assertEquals(msg, OperationStatus.SUCCESS, status); + assertEquals(msg, priKeys[k], key.getData()[0]); + if (withData) { + boolean dataFound = false; + for (int m = 0; m < dataSet.length; m += 2) { + int[] vals = dataSet[m]; + int priKey = dataSet[m + 1][0]; + if (priKey == priKeys[k]) { + for (int n = 0; n < 3; n += 1) { + assertEquals(msg, vals[n], + data.getData()[n]); + dataFound = true; + } + } + } + assertTrue(msg, dataFound); + } + } + String msg = prefix + " no more expected"; + if (withData) { + status = jc.getNext(key, data, LockMode.DEFAULT); + } else { + status = jc.getNext(key, LockMode.DEFAULT); + } + assertEquals(msg, OperationStatus.NOTFOUND, status); + + Cursor[] sorted = DbInternal.getSortedCursors(jc); + assertEquals(CURSOR_ORDER.length, sorted.length); + if (config == joinConfigNoSort) { + Database db0 = sorted[0].getDatabase(); + Database db1 = sorted[1].getDatabase(); + Database db2 = sorted[2].getDatabase(); + assertSame(db0, secDb0); + assertSame(db1, secDb1); + assertSame(db2, secDb2); + } else if (setNum == CURSOR_ORDER_SET) { + Database db0 = sorted[CURSOR_ORDER[0]].getDatabase(); + Database db1 = sorted[CURSOR_ORDER[1]].getDatabase(); + Database db2 = sorted[CURSOR_ORDER[2]].getDatabase(); + assertSame(db0, secDb0); + assertSame(db1, secDb1); + assertSame(db2, secDb2); + } + jc.close(); + } + } + + c0.close(); + c1.close(); + c2.close(); + txnCommit(txn); + + secDb0.close(); + secDb1.close(); + secDb2.close(); + priDb.close(); + + /* Remove dbs since we reuse them multiple times in a single case. */ + txn = txnBegin(); + env.removeDatabase(txn, "pri"); + env.removeDatabase(txn, "sec0"); + env.removeDatabase(txn, "sec1"); + env.removeDatabase(txn, "sec2"); + txnCommit(txn); + } + + /** + * Checks that a join operation does not block writers from inserting + * duplicates with the same main key as the search key. Writers were being + * blocked before we changed join() to use READ_UNCOMMITTED when getting + * the duplicate count for each cursor. [#11833] + */ + @Test + public void testWriteDuringJoin() + throws DatabaseException { + + Database priDb = openPrimary("pri"); + SecondaryDatabase secDb0 = openSecondary(priDb, "sec0", true, 0); + SecondaryDatabase secDb1 = openSecondary(priDb, "sec1", true, 1); + SecondaryDatabase secDb2 = openSecondary(priDb, "sec2", true, 2); + + OperationStatus status; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn; + txn = txnBegin(); + + setKey(key, 13); + setData(data, 1, 1, 1); + status = priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + setKey(key, 14); + setData(data, 1, 1, 1); + status = priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + txnCommit(txn); + txn = txnBeginCursor(); + + SecondaryCursor c0 = secDb0.openSecondaryCursor(txn, null); + SecondaryCursor c1 = secDb1.openSecondaryCursor(txn, null); + SecondaryCursor c2 = secDb2.openSecondaryCursor(txn, null); + SecondaryCursor[] cursors = {c0, c1, c2}; + + for (int i = 0; i < 3; i += 1) { + setKey(key, 1); + status = cursors[i].getSearchKey(key, data, + LockMode.READ_UNCOMMITTED); + assertEquals(OperationStatus.SUCCESS, status); + } + + /* join() will get the cursor counts. */ + JoinCursor jc = priDb.join(cursors, null); + + /* + * After calling join(), try inserting dups for the same main key. + * Before the fix to use READ_UNCOMMITTED, this would cause a deadlock. + */ + Transaction writerTxn = txnBegin(); + setKey(key, 12); + setData(data, 1, 1, 1); + status = priDb.put(writerTxn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + + /* The join should retrieve two records, 13 and 14. */ + status = jc.getNext(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(13, key.getData()[0]); + status = jc.getNext(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(14, key.getData()[0]); + status = jc.getNext(key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Try writing again after calling getNext(). */ + setKey(key, 11); + setData(data, 1, 1, 1); + status = priDb.put(writerTxn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + txnCommit(writerTxn); + + jc.close(); + + c0.close(); + c1.close(); + c2.close(); + txnCommit(txn); + + secDb0.close(); + secDb1.close(); + secDb2.close(); + priDb.close(); + } + + private Database openPrimary(String name) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } + + private SecondaryDatabase openSecondary(Database priDb, String dbName, + boolean dups, int keyId) + throws DatabaseException { + + SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + if (useMultiKey) { + dbConfig.setMultiKeyCreator + (new SimpleMultiKeyCreator(new MyKeyCreator(keyId))); + } else { + dbConfig.setKeyCreator(new MyKeyCreator(keyId)); + } + + Transaction txn = txnBegin(); + try { + return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig); + } finally { + txnCommit(txn); + } + } + + private static void setKey(DatabaseEntry key, int priKey) { + + byte[] a = new byte[1]; + a[0] = (byte) priKey; + key.setData(a); + } + + private static void setData(DatabaseEntry data, + int key1, int key2, int key3) { + + byte[] a = new byte[4]; + a[0] = (byte) key1; + a[1] = (byte) key2; + a[2] = (byte) key3; + data.setData(a); + } + + private static class MyKeyCreator implements SecondaryKeyCreator { + + private final int keyId; + + MyKeyCreator(int keyId) { + + this.keyId = keyId; + } + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + byte val = data.getData()[keyId]; + if (val != 0) { + result.setData(new byte[] { val }); + return true; + } else { + return false; + } + } + } +} diff --git a/test/com/sleepycat/je/test/KeyScanTest.java b/test/com/sleepycat/je/test/KeyScanTest.java new file mode 100644 index 0000000..28ade12 --- /dev/null +++ b/test/com/sleepycat/je/test/KeyScanTest.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class KeyScanTest extends TestBase { + + private File envHome; + private Environment env; + + public KeyScanTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + closeEnv(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + envHome = null; + env = null; + } + + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testKeyScan() { + doKeyScan(false /*dups*/); + } + + @Test + public void testKeyScanDup() { + doKeyScan(true /*dups*/); + } + + private void doKeyScan(final boolean dups) { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + final int RECORD_COUNT = 3 * 500; + OperationStatus status; + + /* Open env, write data, close. */ + openEnv(); + Database db = env.openDatabase(null, "foo", dbConfig); + for (int i = 0; i < RECORD_COUNT; i += 1) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(1, data); + status = db.putNoOverwrite(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + if (dups && ((i % 2) == 1)) { + IntegerBinding.intToEntry(2, data); + status = db.putNoDupData(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + } + } + db.close(); + closeEnv(); + + /* Open env, preload without loading LNs. */ + openEnv(); + dbConfig.setAllowCreate(false); + db = env.openDatabase(null, "foo", dbConfig); + db.preload(new PreloadConfig()); + + /* Clear stats. */ + final StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); + + /* Key scan with dirty read. */ + for (int variant = 0; variant < 2; variant += 1) { + LockMode lockMode = null; + CursorConfig cursorConfig = null; + switch (variant) { + case 0: + lockMode = LockMode.READ_UNCOMMITTED; + break; + case 1: + cursorConfig = CursorConfig.READ_UNCOMMITTED; + break; + default: + fail(); + } + data.setPartial(0, 0, true); + Cursor c = db.openCursor(null, cursorConfig); + int count = 0; + int expectKey = 0; + if (dups) { + while (c.getNextNoDup(key, data, lockMode) == + OperationStatus.SUCCESS) { + assertEquals(count, IntegerBinding.entryToInt(key)); + count += 1; + } + } else { + while (c.getNext(key, data, lockMode) == + OperationStatus.SUCCESS) { + assertEquals(count, IntegerBinding.entryToInt(key)); + count += 1; + } + } + assertEquals(RECORD_COUNT, count); + + /* Try other misc operations. */ + status = c.getFirst(key, data, lockMode); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(0, IntegerBinding.entryToInt(key)); + + status = c.getLast(key, data, lockMode); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(RECORD_COUNT - 1, IntegerBinding.entryToInt(key)); + + IntegerBinding.intToEntry(RECORD_COUNT / 2, key); + status = c.getSearchKey(key, data, lockMode); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(RECORD_COUNT / 2, IntegerBinding.entryToInt(key)); + + IntegerBinding.intToEntry(RECORD_COUNT / 2, key); + status = c.getSearchKeyRange(key, data, lockMode); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(RECORD_COUNT / 2, IntegerBinding.entryToInt(key)); + + c.close(); + + /* Expect no cache misses. */ + stats = env.getStats(statsConfig); + assertEquals(0, stats.getNCacheMiss()); + assertEquals(0, stats.getNNotResident()); + } + + db.close(); + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/test/LogFileDeletionCrashEnvTest.java b/test/com/sleepycat/je/test/LogFileDeletionCrashEnvTest.java new file mode 100644 index 0000000..ece4170 --- /dev/null +++ b/test/com/sleepycat/je/test/LogFileDeletionCrashEnvTest.java @@ -0,0 +1,379 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/* + * Test the unexpected log file deletion detect code. + * With this functionality, EnvironmentFailureException can be thrown asap. + * Without this functionality, Read access can proceed even when log files + * are deleted unexpectedly. + */ +public class LogFileDeletionCrashEnvTest extends TestBase { + private Environment env; + private Database db; + private File envHome; + private Cursor c; + + private final int recNum = 1000 * 50; //(1000 * 500) * 50 files + private final int dataLen = 500; + private static final int dirs = 3; + + private static final EnvironmentConfig envConfigWithDetectSingle + = initConfig(); + private static final EnvironmentConfig envConfigWithoutDetectSingle + = initConfig(); + private static final EnvironmentConfig envConfigWithDetectMulti + = initConfig(); + private static final EnvironmentConfig envConfigWithoutDetectMulti + = initConfig(); + + static { + envConfigWithoutDetectSingle.setConfigParam( + EnvironmentParams.LOG_DETECT_FILE_DELETE.getName(), "false"); + + envConfigWithDetectMulti.setConfigParam( + EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), dirs + ""); + + envConfigWithoutDetectMulti.setConfigParam( + EnvironmentParams.LOG_DETECT_FILE_DELETE.getName(), "false"); + envConfigWithoutDetectMulti.setConfigParam( + EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), dirs + ""); + } + + @Before + public void setUp() + throws Exception { + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() + throws Exception { + + if (c != null) { + try { + c.close(); + } catch (EnvironmentFailureException efe) { + // do nothing + } + c = null; + } + + if (db != null) { + try { + db.close(); + } catch (EnvironmentFailureException efe) { + // do nothing + } + db = null; + } + + if (env != null) { + try { + env.close(); + } catch (EnvironmentFailureException efe) { + // do nothing + } + env = null; + } + + super.tearDown(); + } + + private static EnvironmentConfig initConfig() { + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + config.setCacheSize(1000000); + config.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000"); + return config; + } + + /* + * The following 4 test cases test log file deletion. + */ + @Test + public void testLogDeleteWithDetectForSingleEnv() { + testLogFileDeletionInternal(envConfigWithDetectSingle); + } + + @Test + public void testLogDeleteWithoutDetectForSingleEnv() { + testLogFileDeletionInternal(envConfigWithoutDetectSingle); + } + + @Test + public void testLogDeleteWithDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testLogFileDeletionInternal(envConfigWithDetectMulti); + } + + @Test + public void testLogDeleteWithoutDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testLogFileDeletionInternal(envConfigWithoutDetectMulti); + } + + /* + * The following 6 test cases test directly directory deletion. + */ + @Test + public void testRootDirDeleteWithDetectForSingleEnv() { + testDirDeletionInternal(envConfigWithDetectSingle, "envHome", null); + } + + @Test + public void testRootDirDeleteWithoutDetectForSingleEnv() { + testDirDeletionInternal(envConfigWithoutDetectSingle, "envHome", null); + } + + @Test + public void testRootDirDeleteWithDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithDetectMulti, "envHome", null); + } + + @Test + public void testRootDirDeleteWithoutDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithoutDetectMulti, "envHome", null); + } + + @Test + public void testDataDirDeleteWithDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithDetectMulti, "datadir", null); + } + + @Test + public void testDataDirDeleteWithoutDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithoutDetectMulti, "datadir", null); + } + + /* + * The following 6 test cases test directory rename. + */ + @Test + public void testRootRenameWithDetectForSingleEnv() { + testDirDeletionInternal(envConfigWithDetectSingle, "rename", true); + } + + @Test + public void testRootRenameWithoutDetectForSingleEnv() { + testDirDeletionInternal(envConfigWithoutDetectSingle, "rename", true); + } + + @Test + public void testDataDirRenameWithDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithDetectMulti, "rename", false); + } + + @Test + public void testDataDirRenameWithoutDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithoutDetectMulti, "rename", false); + } + + @Test + public void testRootRenameWithDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithDetectMulti, "rename", true); + } + + @Test + public void testRootRenameWithoutDetectForMultiEnv() { + TestUtils.createEnvHomeWithSubDir(envHome, dirs); + testDirDeletionInternal(envConfigWithoutDetectMulti, "rename", true); + } + + private void testLogFileDeletionInternal(EnvironmentConfig config) { + openEnvAndDb(config); + initialDb(); + /* The first pass traverse to add file handles to fileCache. */ + tranverseDb(false); + deleteFiles(); + tranverseDb(true); + } + + private void testDirDeletionInternal( + EnvironmentConfig config, + String action, + Boolean root) { + + openEnvAndDb(config); + initialDb(); + /* The first pass traverse to add file handles to fileCache. */ + tranverseDb(false); + deleteDir(action, root); + tranverseDb(true); + } + + public void openEnvAndDb(EnvironmentConfig config) { + env = new Environment(envHome, config); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + final String dbName = "tempDB"; + db = env.openDatabase(null, dbName, dbConfig); + + c = db.openCursor(null, null); + } + + public void initialDb() { + try { + for (int i = 0 ; i < recNum; i++) { + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(i, key); + final DatabaseEntry data = new DatabaseEntry(new byte[dataLen]); + db.put(null, key, data); + } + } catch (DatabaseException dbe) { + throw new RuntimeException("Initiate Database fails.", dbe); + } + + final int totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + assert totalFiles < 100 : "Total file number is " + totalFiles; + } + + public void deleteFiles() { + final EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); + final FileManager fm = envImpl.getFileManager(); + int nDirs = envImpl.getConfigManager().getInt( + EnvironmentParams.LOG_N_DATA_DIRECTORIES); + if (nDirs == 0) { + nDirs = 1; + } + final File[] files = fm.listJDBFiles(); + for (int index = 0; index < files.length - nDirs; index++) { + files[index].delete(); + } + + final int totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + assert totalFiles == nDirs : "Total file number is " + totalFiles; + } + + public void deleteDir(String action, Boolean root) { + String shellCmd = ""; + try { + String envHomePath = envHome.getCanonicalPath(); + String dataDirPath = envHomePath + "/data001"; + String envHomePathNew = envHomePath + ".new"; + String dataDirPathNew = dataDirPath + ".new" ; + + if (action.equals("envHome")) { + shellCmd = "rm -rf " + envHomePath; + } else if (action.equals("datadir")) { + shellCmd = "rm -rf " + dataDirPath; + } else if (action.equals("rename")) { + if (root.booleanValue()) { + shellCmd = + "mv " + envHomePath + " " + envHomePathNew + + " && " + "sleep 5" + + " && " + "rm -rf " + envHomePathNew; + } else { + shellCmd = "mv " + dataDirPath + " " + dataDirPathNew; + } + } + + final ProcessBuilder pb = + new ProcessBuilder("/bin/bash", "-c", shellCmd); + pb.redirectErrorStream(true); + final Process p = pb.start(); + if (p != null) { + final int retvalue = p.waitFor(); + if (retvalue != 0) { + throw new IOException( + "The created process exit abnormally"); + } + } else { + throw new IOException("The created process is null"); + } + } catch (Exception e) { + throw new RuntimeException( + "Some error happens when executing " + shellCmd, e); + } + } + + public void tranverseDb(boolean check) { + boolean detect = DbInternal.getEnvironmentImpl(env).getConfigManager(). + getBoolean(EnvironmentParams.LOG_DETECT_FILE_DELETE); + try { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + assert c.getFirst(key, data, null) == OperationStatus.SUCCESS : + "The db should contain at least one record"; + /* Sleep at least 1s to let the TimerTask to execute. */ + try {Thread.sleep(1000);} catch (Exception e) {} + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + //Do nothing + } + + if (check) { + if (detect) { + fail("With detecting log file deletion, we should catch" + + "EnvironmentFailureException."); + } + } + } catch (EnvironmentFailureException efe) { + if (check) { + if (!detect) { + efe.printStackTrace(); + fail("Without detecting log file deletion, we should" + + "not catch EnvironmentFailureException"); + } + } + // Leave tearDown() to close cursor, db and env. + } + } +} \ No newline at end of file diff --git a/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java b/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java new file mode 100644 index 0000000..2e20b05 --- /dev/null +++ b/test/com/sleepycat/je/test/MultiEnvOpenCloseTest.java @@ -0,0 +1,99 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import java.io.File; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test out-of-memory fix to DaemonThread [#10504]. + */ +public class MultiEnvOpenCloseTest extends TestBase { + + private File envHome; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @Test + public void testMultiOpenClose() + throws Exception { + + /* + * Before fixing the bug in DaemonThread [#10504] this test would run + * out of memory after 7 iterations. The bug was, if we open an + * environment read-only we won't start certain daemon threads, they + * will not be GC'ed because they are part of a thread group, and they + * will retain a reference to the Environment. The fix was to not + * create the threads until we need to start them. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + + final int DATA_SIZE = 1024 * 10; + final int N_RECORDS = 1000; + final int N_ITERS = 30; + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[DATA_SIZE]); + + Environment env = new Environment(envHome, envConfig); + Database db = env.openDatabase(null, "MultiEnvOpenCloseTest", + dbConfig); + for (int i = 0; i < N_RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data); + } + + db.close(); + env.close(); + + envConfig.setAllowCreate(false); + envConfig.setReadOnly(true); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + + for (int i = 1; i <= N_ITERS; i += 1) { + //System.out.println("MultiEnvOpenCloseTest iteration # " + i); + env = new Environment(envHome, envConfig); + db = env.openDatabase(null, "MultiEnvOpenCloseTest", dbConfig); + for (int j = 0; j < N_RECORDS; j += 1) { + IntegerBinding.intToEntry(j, key); + db.get(null, key, data, null); + } + db.close(); + env.close(); + } + } +} diff --git a/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java b/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java new file mode 100644 index 0000000..39a9683 --- /dev/null +++ b/test/com/sleepycat/je/test/MultiKeyTxnTestCase.java @@ -0,0 +1,58 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import java.util.Set; + +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Permutes a TxnTestCase over a boolean property for using multiple secondary + * keys. + */ +public abstract class MultiKeyTxnTestCase extends TxnTestCase { + + boolean useMultiKey = false; + + /** + * Wraps a single key creator to exercise the multi-key code for tests that + * only create a single secondary key. + */ + static class SimpleMultiKeyCreator + implements SecondaryMultiKeyCreator { + + private SecondaryKeyCreator keyCreator; + + SimpleMultiKeyCreator(SecondaryKeyCreator keyCreator) { + this.keyCreator = keyCreator; + } + + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) + throws DatabaseException { + + DatabaseEntry result = new DatabaseEntry(); + if (keyCreator.createSecondaryKey(secondary, key, data, result)) { + results.add(result); + } + } + } +} diff --git a/test/com/sleepycat/je/test/OpStatsTest.java b/test/com/sleepycat/je/test/OpStatsTest.java new file mode 100644 index 0000000..e87dff2 --- /dev/null +++ b/test/com/sleepycat/je/test/OpStatsTest.java @@ -0,0 +1,628 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.Put; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +import org.junit.Before; +import org.junit.Test; + +/** + * Tests throughput/Op statistics. + */ +public class OpStatsTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + groupSize = 3; + super.setUp(); + for (RepEnvInfo info : repEnvInfo) { + info.getEnvConfig().setDurability(new Durability( + Durability.SyncPolicy.NO_SYNC, + Durability.SyncPolicy.NO_SYNC, + Durability.ReplicaAckPolicy.ALL)); + } + } + + @Test + public void testPriOps() { + + createGroup(); + + final ReplicatedEnvironment env = repEnvInfo[0].getEnv(); + final Database db = env.openDatabase(null, "foo", dbconfig); + + expectStats(0, null); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + data.setData(new byte[100]); + + /* Primary insert. */ + + IntegerBinding.intToEntry(1, key); + OperationResult result = + db.put(null, key, data, Put.NO_OVERWRITE, null); + assertNotNull(result); + + Transaction txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.put(key, data, Put.OVERWRITE, null); + assertNotNull(result); + } + txn.commit(); + + expectStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriInsertOps()); + } + }); + + /* Primary insert failure. */ + + IntegerBinding.intToEntry(1, key); + result = db.put(null, key, data, Put.NO_OVERWRITE, null); + assertNull(result); + + txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.put(key, data, Put.NO_OVERWRITE, null); + assertNull(result); + } + txn.commit(); + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriInsertFailOps()); + } + }); + + /* Primary update. */ + + IntegerBinding.intToEntry(1, key); + result = db.put(null, key, data, Put.OVERWRITE, null); + assertNotNull(result); + + txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.put(key, data, Put.OVERWRITE, null); + assertNotNull(result); + } + txn.commit(); + + expectStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriUpdateOps()); + } + }); + + /* Primary search. */ + + IntegerBinding.intToEntry(1, key); + result = db.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + + try (final Cursor c = db.openCursor(null, null)) { + IntegerBinding.intToEntry(2, key); + result = c.get(key, data, Get.SEARCH, null); + assertNotNull(result); + } + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriSearchOps()); + } + }); + + /* Primary search failure. */ + + IntegerBinding.intToEntry(10, key); + result = db.get(null, key, data, Get.SEARCH, null); + assertNull(result); + + try (final Cursor c = db.openCursor(null, null)) { + IntegerBinding.intToEntry(20, key); + result = c.get(key, data, Get.SEARCH, null); + assertNull(result); + } + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriSearchFailOps()); + } + }); + + /* Primary position. */ + + try (final Cursor c = db.openCursor(null, null)) { + result = c.get(key, data, Get.FIRST, null); + assertNotNull(result); + result = c.get(key, data, Get.NEXT, null); + assertNotNull(result); + result = c.get(key, data, Get.NEXT, null); + assertNull(result); + } + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriPositionOps()); + } + }); + + /* Primary deletion. */ + + IntegerBinding.intToEntry(1, key); + result = db.delete(null, key, null); + assertNotNull(result); + + txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.get(key, data, Get.SEARCH, null); + assertNotNull(result); + result = c.delete(null); + assertNotNull(result); + } + txn.commit(); + + expectStats(2, 1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + if (isMaster) { + assertEquals(1, stats.getPriSearchOps()); + } + assertEquals(2, stats.getPriDeleteOps()); + } + }); + + /* Primary deletion failure. */ + + IntegerBinding.intToEntry(1, key); + result = db.delete(null, key, null); + assertNull(result); + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(1, stats.getPriDeleteFailOps()); + } + }); + + /* Close */ + + db.close(); + closeNodes(repEnvInfo); + } + + @Test + public void testSecOps() { + + createGroup(); + + final Database[] priDbs = new Database[repEnvInfo.length]; + + final SecondaryDatabase[] secDbs = + new SecondaryDatabase[repEnvInfo.length]; + + for (int i = 0; i < repEnvInfo.length; i += 1) { + + final ReplicatedEnvironment env = repEnvInfo[i].getEnv(); + + final Database db = env.openDatabase(null, "pri", dbconfig); + + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(i == 0); + secConfig.setTransactional(true); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey( + SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(key.getData()); + return true; + } + }); + + final SecondaryDatabase secDb = + env.openSecondaryDatabase(null, "secDb", db, secConfig); + + priDbs[i] = db; + secDbs[i] = secDb; + } + + final ReplicatedEnvironment env = repEnvInfo[0].getEnv(); + final Database db = priDbs[0]; + final SecondaryDatabase secDb = secDbs[0]; + + expectStats(0, null); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + data.setData(new byte[100]); + + /* Secondary insert. */ + + IntegerBinding.intToEntry(1, key); + OperationResult result = + db.put(null, key, data, Put.NO_OVERWRITE, null); + assertNotNull(result); + + Transaction txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.put(key, data, Put.OVERWRITE, null); + assertNotNull(result); + } + txn.commit(); + + expectStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriInsertOps()); + assertEquals(2, stats.getSecInsertOps()); + } + }); + + /* Secondary update. */ + + IntegerBinding.intToEntry(1, key); + result = db.put( + null, key, data, Put.OVERWRITE, + new WriteOptions().setTTL(1).setUpdateTTL(true)); + assertNotNull(result); + + txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.put( + key, data, Put.OVERWRITE, null); + assertNotNull(result); + } + txn.commit(); + + expectStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriUpdateOps()); + assertEquals(1, stats.getSecUpdateOps()); + } + }); + + /* Secondary search. */ + + IntegerBinding.intToEntry(1, key); + result = secDb.get(null, key, data, Get.SEARCH, null); + assertNotNull(result); + + try (final Cursor c = secDb.openCursor(null, null)) { + IntegerBinding.intToEntry(2, key); + result = c.get(key, data, Get.SEARCH, null); + assertNotNull(result); + } + + expectMasterStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getSecSearchOps()); + assertEquals(2, stats.getPriSearchOps()); + } + }); + + /* Secondary search failure. */ + + IntegerBinding.intToEntry(10, key); + result = secDb.get(null, key, data, Get.SEARCH, null); + assertNull(result); + + try (final Cursor c = secDb.openCursor(null, null)) { + IntegerBinding.intToEntry(20, key); + result = c.get(key, data, Get.SEARCH, null); + assertNull(result); + } + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getSecSearchFailOps()); + } + }); + + /* Secondary position. */ + + try (final Cursor c = secDb.openCursor(null, null)) { + result = c.get(key, data, Get.FIRST, null); + assertNotNull(result); + result = c.get(key, data, Get.NEXT, null); + assertNotNull(result); + result = c.get(key, data, Get.NEXT, null); + assertNull(result); + } + + expectMasterStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getSecPositionOps()); + assertEquals(2, stats.getPriSearchOps()); + } + }); + + /* Secondary deletion via primary DB. */ + + IntegerBinding.intToEntry(1, key); + result = db.delete(null, key, null); + assertNotNull(result); + + expectStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(1, stats.getPriDeleteOps()); + assertEquals(1, stats.getSecDeleteOps()); + } + }); + + /* Secondary deletion via secondary DB. */ + + IntegerBinding.intToEntry(2, key); + result = secDb.delete(null, key, null); + assertNotNull(result); + + expectStats(2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(1, stats.getPriDeleteOps()); + assertEquals(1, stats.getSecDeleteOps()); + } + }); + + /* Insert records again so we can delete them again. */ + + IntegerBinding.intToEntry(1, key); + result = db.put(null, key, data, Put.NO_OVERWRITE, null); + assertNotNull(result); + + IntegerBinding.intToEntry(2, key); + result = db.put(null, key, data, Put.NO_OVERWRITE, null); + assertNotNull(result); + + for (final RepEnvInfo info : repEnvInfo) { + info.getEnv().getStats(StatsConfig.CLEAR); + } + + /* Secondary deletion via primary cursor. */ + + txn = env.beginTransaction(null, null); + try (final Cursor c = db.openCursor(txn, null)) { + IntegerBinding.intToEntry(1, key); + result = c.get(key, data, Get.SEARCH, null); + assertNotNull(result); + result = c.delete(null); + assertNotNull(result); + } + txn.commit(); + + expectStats(3, 2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + if (isMaster) { + assertEquals(1, stats.getPriSearchOps()); + } + assertEquals(1, stats.getPriDeleteOps()); + assertEquals(1, stats.getSecDeleteOps()); + } + }); + + /* Secondary deletion via secondary cursor. */ + + txn = env.beginTransaction(null, null); + try (final Cursor c = secDb.openCursor(txn, null)) { + IntegerBinding.intToEntry(2, key); + result = c.get(key, data, Get.SEARCH, null); + assertNotNull(result); + result = c.delete(null); + assertNotNull(result); + } + txn.commit(); + + expectStats(4, 2, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + if (isMaster) { + assertEquals(1, stats.getSecSearchOps()); + assertEquals(1, stats.getPriSearchOps()); + } + assertEquals(1, stats.getPriDeleteOps()); + assertEquals(1, stats.getSecDeleteOps()); + } + }); + + /* Secondary deletion failure. */ + + IntegerBinding.intToEntry(1, key); + result = db.delete(null, key, null); + assertNull(result); + + IntegerBinding.intToEntry(1, key); + result = secDb.delete(null, key, null); + assertNull(result); + + expectMasterStats(1, new ExpectStats() { + @Override + public void check(EnvironmentStats stats, boolean isMaster) { + assertEquals(2, stats.getPriDeleteFailOps()); + } + }); + + /* Close */ + + for (int i = 0; i < repEnvInfo.length; i += 1) { + secDbs[i].close(); + priDbs[i].close(); + } + closeNodes(repEnvInfo); + } + + private interface ExpectStats { + void check(EnvironmentStats stats, boolean isMaster); + } + + private void expectStats(int nNonZero, ExpectStats expectStats) { + expectStats(nNonZero, nNonZero, expectStats); + } + + private void expectStats( + int nNonZeroMaster, + int nNonZeroReplica, + ExpectStats expectStats) { + + for (int i = 0; i < repEnvInfo.length; i += 1) { + + EnvironmentStats stats = + repEnvInfo[i].getEnv().getStats(StatsConfig.CLEAR); + + try { + if (expectStats != null) { + expectStats.check(stats, i == 0); + } + + expectNonZeroStats( + stats, + (i == 0) ? nNonZeroMaster : nNonZeroReplica); + + } catch (final Throwable e) { + System.out.println("master: " + (i == 0) + "\n" + stats); + throw e; + } + } + } + + private void expectMasterStats(int nNonZero, ExpectStats expectStats) { + + for (int i = 0; i < repEnvInfo.length; i += 1) { + + EnvironmentStats stats = + repEnvInfo[i].getEnv().getStats(StatsConfig.CLEAR); + + try { + if (i == 0) { + if (expectStats != null) { + expectStats.check(stats, true); + } + expectNonZeroStats(stats, nNonZero); + } else { + expectNonZeroStats(stats, 0); + } + } catch (final Throwable e) { + System.out.println("master: " + (i == 0) + "\n" + stats); + throw e; + } + } + } + + private void expectNonZeroStats(EnvironmentStats stats, int nNonZero) { + for (int i = 0;; i += 1) { + long val; + switch (i) { + case 0: + val = stats.getPriSearchOps(); + break; + case 1: + val = stats.getPriSearchFailOps(); + break; + case 2: + val = stats.getPriPositionOps(); + break; + case 3: + val = stats.getPriInsertOps(); + break; + case 4: + val = stats.getPriInsertFailOps(); + break; + case 5: + val = stats.getPriUpdateOps(); + break; + case 6: + val = stats.getPriDeleteOps(); + break; + case 7: + val = stats.getPriDeleteFailOps(); + break; + case 8: + val = stats.getSecSearchOps(); + break; + case 9: + val = stats.getSecSearchFailOps(); + break; + case 10: + val = stats.getSecPositionOps(); + break; + case 11: + val = stats.getSecInsertOps(); + break; + case 12: + val = stats.getSecUpdateOps(); + break; + case 13: + val = stats.getSecDeleteOps(); + break; + default: + assertEquals(0, nNonZero); + return; + } + + if (val == 0) { + continue; + } + + nNonZero -= 1; + + assertTrue(nNonZero >= 0); + } + } +} diff --git a/test/com/sleepycat/je/test/PhantomRestartTest.java b/test/com/sleepycat/je/test/PhantomRestartTest.java new file mode 100644 index 0000000..4c5c6b2 --- /dev/null +++ b/test/com/sleepycat/je/test/PhantomRestartTest.java @@ -0,0 +1,528 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests read operation restarts that are the by product of phantom prevention + * (range locking) added in SR [#10477]. + */ +@RunWith(Parameterized.class) +public class PhantomRestartTest extends DualTestCase { + + /* + * Spec Parameters: Oper name, InsertKey1, InsertKey2, Oper instance + * + * A- InsertKey1 is inserted in transaction T0 and committed. + * B- T1 starts and performs Oper passing InsertKey1; it finishes the + * operation, but doesn't commit. + * C- T2 starts and attempts to insert InsertKey2, but is blocked by T1. + * D- T3 starts and performs Oper passing InsertKey2, but is restarted + * because it is blocked by T2. + * E- T1 is committed, allowing T2 and T3 to finish also. + * F- T4 performs Oper a final time passing InsertKey2. + * + * For each Spec below the Lock owners and waiters are described in between + * steps D and E above. This state describes the condition where the read + * operation (Oper) is performing restarts because it is blocked by a + * RANGE_INSERT. + * + * To understand how read operation restarts work consider the "First" + * Spec below. When T1 releases K2, T2 should finish, and T3 should read + * K1. If restart were not implemented in the lock manager, T3 would read + * K2 instead of K1; K1 would then be a phantom with respect to T3. If + * search restarts were not implemented, a RangeRestartException would + * surface at the user level. These errors were observed when running this + * test before search restarts were fully implemented. + */ + private static Spec[] SPECS = { + + /* + * T1 calls getFirst -- owns RANGE_READ on K2. + * T2 inserts K1 -- waits for RANGE_INSERT on K2. + * T3 calls getFirst -- requests RANGE_READ on K2: restarts. + */ + new Spec("First", 2, 1, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + status = cursor.getFirst(key, data, null); + checkStatus(OperationStatus.SUCCESS); + checkKey(insertedKey); + } + }), + + /* + * T1 calls getLast -- owns RANGE_READ on EOF. + * T2 inserts K2 -- waits for RANGE_INSERT on EOF. + * T3 calls getLast -- requests RANGE_READ on EOF: restarts. + */ + new Spec("Last", 1, 2, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + status = cursor.getLast(key, data, null); + checkStatus(OperationStatus.SUCCESS); + checkKey(insertedKey); + } + }), + + /* + * T1 calls getSearchKey on K1 -- owns RANGE_READ on K2. + * T2 inserts K1 -- waits for RANGE_INSERT on K2. + * T3 calls getSearchKey on K1 -- requests RANGE_READ on K2: restarts. + */ + new Spec("Search", 2, 1, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + setKey(1); + status = dups ? cursor.getSearchBoth(key, data, null) + : cursor.getSearchKey(key, data, null); + checkStatus((insertedKey == 1) ? OperationStatus.SUCCESS + : OperationStatus.NOTFOUND); + } + }), + + /* + * T1 calls getSearchKeyRange on K0 -- owns RANGE_READ on K2. + * T2 inserts K1 -- waits for RANGE_INSERT on K2. + * T3 calls getSearchKeyRange on K0 -- requests RANGE_READ on K2: + * restarts. + */ + new Spec("SearchRange", 2, 1, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + setKey(0); + status = dups ? cursor.getSearchBothRange(key, data, null) + : cursor.getSearchKeyRange(key, data, null); + checkStatus(OperationStatus.SUCCESS); + checkKey(insertedKey); + } + }), + + /* + * T1 calls getNext from K1 -- owns RANGE_READ on EOF. + * T2 inserts K2 -- waits for RANGE_INSERT on EOF. + * T3 calls getNext from K1 -- requests RANGE_READ on EOF: restarts. + */ + new Spec("Next", 1, 2, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + status = cursor.getFirst(key, data, null); + checkStatus(OperationStatus.SUCCESS); + checkKey(1); + status = cursor.getNext(key, data, null); + checkStatus((insertedKey == 2) ? OperationStatus.SUCCESS + : OperationStatus.NOTFOUND); + } + }), + + /* + * T1 calls getPrev from K2 -- owns RANGE_READ on K2. + * T2 inserts K1 -- waits for RANGE_INSERT on K2. + * T3 calls getPrev from K2 -- requests RANGE_READ on K2: restarts. + */ + new Spec("Prev", 2, 1, new Oper() { + void doOper(int insertedKey) throws DatabaseException { + status = cursor.getLast(key, data, null); + checkStatus(OperationStatus.SUCCESS); + checkKey(2); + status = cursor.getPrev(key, data, null); + checkStatus((insertedKey == 1) ? OperationStatus.SUCCESS + : OperationStatus.NOTFOUND); + } + }), + + /* + * NextDup, NextNoDup, PrevDup and PrevNoDup are not tested here. + * Restarts for these operations are implemented together with Next and + * Prev operations, so testing was skipped. + */ + }; + + private static abstract class Oper { + + PhantomRestartTest test; + boolean dups; + Cursor cursor; + DatabaseEntry key; + DatabaseEntry data; + OperationStatus status; + + void init(PhantomRestartTest test, Cursor cursor) { + this.test = test; + this.cursor = cursor; + this.dups = test.dups; + this.key = new DatabaseEntry(); + this.data = new DatabaseEntry(); + this.status = null; + } + + void checkStatus(OperationStatus expected) { + assertEquals(expected, status); + } + + void setKey(int val) { + if (dups) { + IntegerBinding.intToEntry(100, key); + IntegerBinding.intToEntry(val, data); + } else { + IntegerBinding.intToEntry(val, key); + } + } + + void checkKey(int expected) { + if (dups) { + assertEquals(100, IntegerBinding.entryToInt(key)); + assertEquals + (expected, IntegerBinding.entryToInt(data)); + } else { + assertEquals + (expected, IntegerBinding.entryToInt(key)); + } + } + + abstract void doOper(int insertedKey) + throws DatabaseException; + } + + protected static class Spec { + + String name; + int insertKey1; + int insertKey2; + Oper oper; + + Spec(String name, int insertKey1, int insertKey2, Oper oper) { + this.name = name; + this.insertKey1 = insertKey1; + this.insertKey2 = insertKey2; + this.oper = oper; + } + } + + @Parameters + public static List genParams() { + List list = new ArrayList(); + for (Spec spec : SPECS) { + list.add(new Object[]{spec, true}); + list.add(new Object[]{spec, false}); + } + + return list; + } + + private static final int MAX_INSERT_MILLIS = 5000; + + private File envHome; + private Environment env; + private Database db; + private JUnitThread writerThread; + private JUnitThread readerThread; + private final boolean dups; + private final Spec spec; + + public PhantomRestartTest(Spec spec, Boolean dups) { + this.spec = spec; + this.dups = dups; + envHome = SharedTestUtils.getTestDir(); + customName = spec.name + (dups ? "-Dups" : ""); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + envHome = null; + env = null; + db = null; + + if (writerThread != null) { + writerThread.shutdown(); + writerThread = null; + } + + if (readerThread != null) { + readerThread.shutdown(); + readerThread = null; + } + } + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setTxnSerializableIsolation(true); + + /* Disable the daemons so the don't interfere with stats. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, "PhantomRestartTest", dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void runTest() + throws DatabaseException, InterruptedException { + + openEnv(); + + /* T0 inserts first key. */ + if (dups) { + + /* + * Create a dup tree and delete it to avoid deadlocking. Note that + * we have the compressor disabled to make this work. Deadlocking + * occurs without a dup tree because insertion locks the sibling + * key when creating a dup tree from a single LN. This extra + * locking throws off our test. + */ + insert(100, 0); + insert(100, 1); + DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(100, key); + db.delete(null, key); + + /* Insert the dup key we're testing with. */ + insert(100, spec.insertKey1); + } else { + insert(spec.insertKey1, 0); + } + + /* T1 performs Oper. */ + Transaction readerTxn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(readerTxn, null); + spec.oper.init(this, cursor); + spec.oper.doOper(spec.insertKey1); + + /* T2 starts to insert second key, waits on T1. */ + if (dups) { + startInsert(100, spec.insertKey2); + } else { + startInsert(spec.insertKey2, 0); + } + + /* T3 performs Oper. */ + startReadOper(spec.insertKey2); + + /* Close T1 to allow T2 and T3 to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + waitForReadOper(); + + /* T4 performs Oper again in this thread as a double-check. */ + readerTxn = env.beginTransaction(null, null); + cursor = db.openCursor(readerTxn, null); + spec.oper.init(this, cursor); + spec.oper.doOper(spec.insertKey2); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + /** + * Inserts the given key and data in a new transaction and commits it. + */ + private void insert(int keyVal, int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + OperationStatus status; + Transaction writerTxn = env.beginTransaction(null, null); + try { + if (dups) { + status = db.putNoDupData(writerTxn, key, data); + } else { + status = db.putNoOverwrite(writerTxn, key, data); + } + } catch (LockConflictException e) { + writerTxn.abort(); + throw e; + } + assertEquals(OperationStatus.SUCCESS, status); + writerTxn.commit(Durability.COMMIT_NO_SYNC); + } + + /** + * Starts writer thread and waits for it to start the insert. + */ + private void startInsert(final int keyVal, final int dataVal) + throws DatabaseException, InterruptedException { + + EnvironmentStats origStats = env.getStats(null); + + writerThread = new JUnitThread("Writer") { + public void testBody() + throws DatabaseException { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + Transaction writerTxn = env.beginTransaction(null, null); + OperationStatus status; + if (dups) { + status = db.putNoDupData(writerTxn, key, data); + } else { + status = db.putNoOverwrite(writerTxn, key, data); + } + assertEquals(OperationStatus.SUCCESS, status); + writerTxn.commit(Durability.COMMIT_NO_SYNC); + } + }; + + writerThread.start(); + waitForBlock(origStats); + } + + /** + * Waits for the writer thread to finish. + */ + private void waitForInsert() { + + try { + writerThread.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } finally { + writerThread = null; + } + } + + /** + * Starts reader thread and waits for it to start the read operation. + */ + private void startReadOper(final int operKeyParam) + throws DatabaseException, InterruptedException { + + EnvironmentStats origStats = env.getStats(null); + + readerThread = new JUnitThread("Reader") { + public void testBody() + throws DatabaseException { + Transaction readerTxn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(readerTxn, null); + spec.oper.init(PhantomRestartTest.this, cursor); + spec.oper.doOper(operKeyParam); + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + } + }; + + readerThread.start(); + waitForBlock(origStats); + } + + /** + * Waits for a new locker to block waiting for a lock. + */ + private void waitForBlock(EnvironmentStats origStats) + throws DatabaseException, InterruptedException { + + long startTime = System.currentTimeMillis(); + while (true) { + + /* Give some time to the thread. */ + Thread.yield(); + Thread.sleep(10); + if (System.currentTimeMillis() - startTime > MAX_INSERT_MILLIS) { + fail("Timeout"); + } + + /* Wait for the operation to block. */ + EnvironmentStats stats = env.getStats(null); + if (stats.getNWaiters() > origStats.getNWaiters()) { + break; + } + } + } + + /** + * Waits for the reader thread to finish. + */ + private void waitForReadOper() { + + try { + readerThread.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } finally { + readerThread = null; + } + } +} diff --git a/test/com/sleepycat/je/test/PhantomTest.java b/test/com/sleepycat/je/test/PhantomTest.java new file mode 100644 index 0000000..d08761a --- /dev/null +++ b/test/com/sleepycat/je/test/PhantomTest.java @@ -0,0 +1,3192 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests phantom prevention (range locking) added in SR [#10477]. + * + *

        We test that with a serializable txn, range locking will prevent phantoms + * from appearing. We also test that phantoms *do* appear for non-serializable + * isolation levels. These include read-uncommitted, read-committed and + * repeatable-read now.

        + * + *

        Test method names have the suffix _Sucess or _NotFound depending on + * whether they're testing a read operation with a SUCCESS or NOTFOUND outcome. + * If they're testing duplicates, the _Dup suffix is also added. Finally, a + * suffix is added for the isolation level at run time.

        + * + *

        All tests are for the case where the reader txn locks a range and then + * the writer txn tries to insert into the locked range. The reverse (where + * the writer inserts first) works without range locking because the reader + * will block on the inserted key, so we don't test that here.

        + * + *

        We test all read operations with and without duplicates (with duplicates + * the test name has _Dup appended) except for the following cases which are + * meaningless without duplicates because get{Next,Prev}Dup always return + * NOTFOUND when duplicates are not configured: + * testGetNextDup_Success, testGetNextDup_NotFound, + * testGetPrevDup_Success, testGetPrevDup_NotFound.

        + */ +@RunWith(Parameterized.class) +public class PhantomTest extends DualTestCase { + + private static final TransactionConfig READ_UNCOMMITTED_CONFIG + = new TransactionConfig(); + private static final TransactionConfig READ_COMMITTED_CONFIG + = new TransactionConfig(); + private static final TransactionConfig REPEATABLE_READ_CONFIG + = new TransactionConfig(); + private static final TransactionConfig SERIALIZABLE_CONFIG + = new TransactionConfig(); + static { + READ_UNCOMMITTED_CONFIG.setReadUncommitted(true); + READ_COMMITTED_CONFIG.setReadCommitted(true); + SERIALIZABLE_CONFIG.setSerializableIsolation(true); + } + private static final TransactionConfig[] TXN_CONFIGS = { + READ_UNCOMMITTED_CONFIG, + READ_COMMITTED_CONFIG, + REPEATABLE_READ_CONFIG, + SERIALIZABLE_CONFIG, + }; + + private static final String DB_NAME = "PhantomTest"; + + private static final int MAX_INSERT_MILLIS = 5000; + + private boolean disableBtreeVerifier = false; + + private File envHome; + private Environment env; + private Database db; + private final TransactionConfig txnConfig; + private JUnitThread writerThread; + private final boolean txnSerializable; + private boolean dups; + private boolean insertFinished; + + @Parameters + public static List genParams() { + List list = new ArrayList(); + + for (TransactionConfig txnConfig : TXN_CONFIGS) + list.add(new Object[]{txnConfig}); + + return list; + } + + public PhantomTest(TransactionConfig txnConfig) { + envHome = SharedTestUtils.getTestDir(); + this.txnConfig = txnConfig; + txnSerializable = (txnConfig == SERIALIZABLE_CONFIG); + String txnType; + if (txnConfig == SERIALIZABLE_CONFIG) { + txnType = "-Serializable"; + } else if (txnConfig == REPEATABLE_READ_CONFIG) { + txnType = "-RepeatableRead"; + } else if (txnConfig == READ_COMMITTED_CONFIG) { + txnType = "-ReadCommitted"; + } else if (txnConfig == READ_UNCOMMITTED_CONFIG) { + txnType = "-ReadUncommitted"; + } else { + throw new IllegalStateException(); + } + customName = txnType; + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + envHome = null; + env = null; + db = null; + + if (writerThread != null) { + writerThread.shutdown(); + writerThread = null; + } + } + + /** + * Opens the environment and database. + */ + private void openEnv(boolean dups) + throws DatabaseException { + + openEnv(dups, null); + } + + /** + * Opens the environment and database. + */ + private void openEnv(boolean dups, EnvironmentConfig envConfig) + throws DatabaseException { + + this.dups = dups; + if (envConfig == null) { + envConfig = TestUtils.initEnvConfig(); + /* Control over isolation level is required by this test. */ + TestUtils.clearIsolationLevel(envConfig); + } + + /* Disable the daemons so the don't interfere with stats. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + if (disableBtreeVerifier) { + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + } + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(dups); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + close(env); + env = null; + } + } + + @Test + public void testGetSearchKey_Success() + throws DatabaseException { + + openEnv(false); + + /* Insert key 2. */ + insert(2); + + /* getSearchKey returns key 2. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + + /* Insertions are never blocked. */ + try { + insert(1); + insert(3); + } catch (LockConflictException e) { + fail(); + } + + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + closeEnv(); + } + + @Test + public void testGetSearchKey_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + + /* Insert dups. */ + insert(1, 2); + insert(1, 3); + + /* getSearchKey returns key {1,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 2)); + + /* Insertions after {1, 2} are never blocked. */ + try { + insert(1, 4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getSearchKey should return {1,2} again, otherwise + * getSearchKey should see {1,1}. + */ + if (txnSerializable) { + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 2)); + } else { + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKey returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKey_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + + /* Insert key 1. */ + insert(1); + + /* getSearchKey for key 2 returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 2)); + + /* Insertions before 2 are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getSearchKey should return NOTFOUND again; + * otherwise getSearchKey should see key 2. + */ + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 2)); + } else { + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKey returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKey_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + + /* Insert dups. */ + insert(2, 1); + insert(2, 2); + + /* getSearchKey for {1,1} returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 1, 1)); + + /* Insertions after {2,2} are never blocked. */ + try { + insert(2, 3); + insert(3, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getSearchKey should return NOTFOUND again; + * otherwise getSearchKey should see {1,1}. + */ + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, searchKey(cursor, 1, 1)); + } else { + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKey returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchBoth_Success() + throws DatabaseException { + + doTestGetSearchBoth_Success(false /*useRangeSearch*/); + } + + /** + * In a non-duplicates DB, getSearchBoth and getSearchBothRange are + * equivalent. + */ + private void doTestGetSearchBoth_Success(boolean useRangeSearch) + throws DatabaseException { + + openEnv(false); + + /* Insert key 2. */ + insert(2); + + /* getSearchBoth[Range] returns {2,0}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, + searchBoth(cursor, 2, 0, useRangeSearch)); + + /* Insertions are never blocked. */ + try { + insert(1); + insert(3); + } catch (LockConflictException e) { + fail(); + } + + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + closeEnv(); + } + + @Test + public void testGetSearchBoth_Success_Dup() + throws DatabaseException { + + openEnv(true); + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getSearchBoth returns key {1,3}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + + /* Insertions are never blocked. */ + try { + insert(0, 0); + insert(1, 0); + insert(1, 2); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + closeEnv(); + } + + @Test + public void testGetSearchBoth_NotFound() + throws DatabaseException, InterruptedException { + + doTestGetSearchBoth_NotFound(false /*useRangeSearch*/); + } + + /** + * In a non-duplicates DB, getSearchBoth and getSearchBothRange are + * equivalent. + */ + private void doTestGetSearchBoth_NotFound(boolean useRangeSearch) + throws DatabaseException, InterruptedException { + + openEnv(false); + + /* Insert key 1. */ + insert(1); + + /* getSearchBoth for key 2 returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.NOTFOUND, + searchBoth(cursor, 2, useRangeSearch)); + + /* Insertions before 2 are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getSearchBoth should return NOTFOUND again; + * otherwise getSearchBoth should see key 2. + */ + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, + searchBoth(cursor, 2, useRangeSearch)); + } else { + assertEquals(OperationStatus.SUCCESS, + searchBoth(cursor, 2, useRangeSearch)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchBoth returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, + searchBoth(cursor, 2, useRangeSearch)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchBoth_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getSearchBoth for {1,2} returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 1, 2)); + + /* Insertions before {1,2} or after {1,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,2} in a writer thread. */ + startInsert(1, 2); + + /* + * If serializable, getSearchBoth should return NOTFOUND again; + * otherwise getSearchBoth should see {1,2}. + */ + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, searchBoth(cursor, 1, 2)); + } else { + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchBoth returns {1,2}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKeyRange_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1 and 3. */ + insert(1); + insert(3); + + /* getSearchKeyRange for key 2 returns key 3. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + + /* Insertions before 2 and after 3 are never blocked. */ + try { + insert(0); + insert(4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getSearchKeyRange should return key 3 again; + * otherwise getSearchKeyRange should see key 2. + */ + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKeyRange returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKeyRange_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + insert(3, 2); + insert(3, 3); + + /* getSearchKeyRange for key 2 returns {3,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + assertEquals(OperationStatus.SUCCESS, status); + + /* Insertions before 2 and after {3,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(3, 4); + insert(4, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {3,1} in a writer thread. */ + startInsert(3, 1); + + /* + * If serializable, getSearchKeyRange should return {3,2} again; + * otherwise getSearchKeyRange should see {3,1}. + */ + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } else { + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKeyRange returns {3,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKeyRange_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1. */ + insert(1); + + /* getSearchKeyRange for key 2 returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before 2 are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 3 in a writer thread. */ + startInsert(3); + + /* + * If serializable, getSearchKeyRange should return NOTFOUND again; + * otherwise getSearchKeyRange should see key 3. + */ + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKeyRange returns key 3. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchKeyRange_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + + /* getSearchKeyRange for key 2 returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before 2 are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {3,1} in a writer thread. */ + startInsert(3, 1); + + /* + * If serializable, getSearchKeyRange should return NOTFOUND again; + * otherwise getSearchKeyRange should see {3,1}. + */ + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchKeyRange returns {3,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + status = cursor.getSearchKeyRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + /* + * A testGetSearchBothRange_Success test case is not possible because it is + * not possible to insert a duplicate when only one LN for the key already + * exists, without locking the existing LN. Therefore, the insert thread + * will deadlock with the reader thread, which has the existing LN locked. + * This is a testing anomoly, not a bug. + */ + + @Test + public void testGetSearchBothRange_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + insert(3, 2); + insert(3, 3); + + /* getSearchBothRange for {3, 0} returns {3,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(0, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {3,2} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(3, 4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {3,1} in a writer thread. */ + startInsert(3, 1); + + /* + * If serializable, getSearchBothRange should return {3,2} again; + * otherwise getSearchBothRange should see {3,1}. + */ + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(0, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } else { + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchBothRange returns {3,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(0, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetSearchBothRange_NotFound() + throws DatabaseException, InterruptedException { + + doTestGetSearchBoth_NotFound(true /*useRangeSearch*/); + } + + @Test + public void testGetSearchBothRange_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(3, 0); + insert(3, 1); + + /* getSearchBothRange for {3, 2} returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(2, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before {3,0} are never blocked. */ + try { + insert(3, -1); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {3,3} in a writer thread. */ + startInsert(3, 3); + + /* + * If serializable, getSearchBothRange should return NOTFOUND again; + * otherwise getSearchBothRange should see {3,3}. + */ + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(2, data); + status = cursor.getSearchBothRange(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getSearchBothRange returns {3,3}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(3, key); + IntegerBinding.intToEntry(2, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetFirst_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 2. */ + insert(2); + + /* getFirst returns key 2. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + + /* Insertions after 2 are never blocked. */ + try { + insert(3); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 1 in a writer thread. */ + startInsert(1); + + /* + * If serializable, getFirst should return key 2 again; otherwise + * getFirst should see key 1. + */ + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(2, IntegerBinding.entryToInt(key)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getFirst returns key 1. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetFirst_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 2); + insert(1, 3); + + /* getFirst returns {1,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + + /* Insertions after {1,3} are never blocked. */ + try { + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getFirst should return {1,2} again; otherwise + * getFirst should see {1,1}. + */ + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getFirst returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetFirst_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* getFirst returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insert key 1 in a writer thread. */ + startInsert(1); + + /* + * If serializable, getFirst should return NOTFOUND again; otherwise + * getFirst should see key 1. + */ + status = cursor.getFirst(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getFirst returns key 1. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetFirst_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* getFirst returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getFirst should return NOTFOUND again; otherwise + * getFirst should see {1,1}. + */ + status = cursor.getFirst(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getFirst returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetLast_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1. */ + insert(1); + + /* getLast returns key 1. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + + /* Insertions before current position are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getLast should return key 1 again; otherwise + * getLast should see key 2. + */ + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getLast returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetLast_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 0); + insert(1, 2); + + /* getLast returns {1,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + + /* Insertions before current position are never blocked. */ + try { + insert(1, 1); + insert(0, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,3} in a writer thread. */ + startInsert(1, 3); + + /* + * If serializable, getLast should return {1,2} again; otherwise + * getLast should see {1,3}. + */ + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getLast returns {1,3}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetLast_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* getLast returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insert key 1 in a writer thread. */ + startInsert(1); + + /* + * If serializable, getLast should return NOTFOUND again; otherwise + * getLast should see key 1. + */ + status = cursor.getLast(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getLast returns key 1. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetLast_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* getLast returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getLast should return NOTFOUND again; otherwise + * getLast should see {1,1}. + */ + status = cursor.getLast(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getLast returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + status = cursor.getLast(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNext_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1 and 3. */ + insert(1); + insert(3); + + /* getNext returns key 3. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + + /* Insertions before 1 and after 3 are never blocked. */ + try { + insert(0); + insert(4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getNext should return key 3 again; otherwise + * getNext should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNext returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNext_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getNext returns {1,3}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {1,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,2} in a writer thread. */ + startInsert(1, 2); + + /* + * If serializable, getNext should return {1,3} again; otherwise + * getNext should see {1,2}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNext returns {1,2}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNext_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1. */ + insert(1); + + /* getNext returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before 1 are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getNext should return NOTFOUND again; otherwise + * getNext should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNext returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNext_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + + /* getNext returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before {1,1} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,3} in a writer thread. */ + startInsert(1, 3); + + /* + * If serializable, getNext should return NOTFOUND again; otherwise + * getNext should see {1,3}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNext(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNext returns {1,3}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNext(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextDup_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getNextDup returns {1,3}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {1,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,2} in a writer thread. */ + startInsert(1, 2); + + /* + * If serializable, getNextDup should return {1,3} again; otherwise + * getNextDup should see {1,2}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextDup returns {1,2}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextDup_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + insert(2, 1); + insert(2, 2); + + /* getNextDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before {1,1} and after {2,2} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(2, 3); + insert(3, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,3} in a writer thread. */ + startInsert(1, 3); + + /* + * If serializable, getNextDup should return NOTFOUND again; otherwise + * getNextDup should see {1,3}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNextDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextDup returns {1,3}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 2)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(3, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextNoDup_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1 and 3. */ + insert(1); + insert(3); + + /* getNextNoDup returns key 3. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + + /* Insertions before 1 and after 3 are never blocked. */ + try { + insert(0); + insert(4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getNextNoDup should return key 3 again; otherwise + * getNextNoDup should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextNoDup returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextNoDup_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + insert(3, 1); + insert(3, 2); + + /* getNextNoDup returns {3,1}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {3,2} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(3, 3); + insert(4, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {2,1} in a writer thread. */ + startInsert(2, 1); + + /* + * If serializable, getNextNoDup should return {3,1} again; otherwise + * getNextNoDup should see {2,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(3, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextNoDup returns {2,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextNoDup_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1. */ + insert(1); + + /* getNextNoDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before 1 are never blocked. */ + try { + insert(0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getNextNoDup should return NOTFOUND again; + * otherwise getNextNoDup should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextNoDup returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetNextNoDup_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 2); + + /* getNextNoDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions before {1,1} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {2,1} in a writer thread. */ + startInsert(2, 1); + + /* + * If serializable, getNextNoDup should return NOTFOUND again; + * otherwise getNextNoDup should see {2,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getNextNoDup returns {2,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + status = cursor.getNextNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrev_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1 and 3. */ + insert(1); + insert(3); + + /* getPrev returns key 1. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + + /* Insertions before 1 and after 3 are never blocked. */ + try { + insert(0); + insert(4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getPrev should return key 1 again; otherwise + * getPrev should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrev returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrev_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getPrev returns {1,1}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {1,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,2} in a writer thread. */ + startInsert(1, 2); + + /* + * If serializable, getPrev should return {1,1} again; otherwise + * getPrev should see {1,2}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrev returns {1,2}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrev_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 2. */ + insert(2); + + /* getPrev returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions after 2 are never blocked. */ + try { + insert(3); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 1 in a writer thread. */ + startInsert(1); + + /* + * If serializable, getPrev should return NOTFOUND again; otherwise + * getPrev should see key 1. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrev(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrev returns key 1. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrev_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(2, 2); + insert(2, 3); + + /* getPrev returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions after {2,3} are never blocked. */ + try { + insert(2, 4); + insert(3, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {2,1} in a writer thread. */ + startInsert(2, 1); + + /* + * If serializable, getPrev should return NOTFOUND again; otherwise + * getPrev should see {2,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrev(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrev returns {2,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrev(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevDup_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(1, 3); + + /* getPrevDup returns {1,1}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrevDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,1} and after {1,3} are never blocked. */ + try { + insert(1, 0); + insert(0, 0); + insert(1, 4); + insert(2, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,2} in a writer thread. */ + startInsert(1, 2); + + /* + * If serializable, getPrevDup should return {1,1} again; otherwise + * getPrevDup should see {1,2}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrevDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } else { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevDup returns {1,2}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 3)); + status = cursor.getPrevDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevDup_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(2, 2); + insert(2, 3); + + /* getPrevDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions after {2,3} are never blocked. */ + try { + insert(2, 4); + insert(3, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {2,1} in a writer thread. */ + startInsert(2, 1); + + /* + * If serializable, getPrevDup should return NOTFOUND again; otherwise + * getPrevDup should see {2,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevDup returns {2,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevNoDup_Success() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 1 and 3. */ + insert(1); + insert(3); + + /* getPrevNoDup returns key 1. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + + /* Insertions before 1 and after 3 are never blocked. */ + try { + insert(0); + insert(4); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 2 in a writer thread. */ + startInsert(2); + + /* + * If serializable, getPrevNoDup should return key 1 again; otherwise + * getPrevNoDup should see key 2. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevNoDup returns key 2. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 3)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevNoDup_Success_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 0); + insert(1, 2); + insert(3, 1); + insert(3, 2); + + /* getPrevNoDup returns {1,2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + + /* Insertions before {1,2} and after {3,2} are never blocked. */ + try { + insert(1, 1); + insert(0, 0); + insert(3, 3); + insert(4, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {2,1} in a writer thread. */ + startInsert(2, 1); + + /* + * If serializable, getPrevNoDup should return {1,2} again; otherwise + * getPrevNoDup should see {2,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + if (txnSerializable) { + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + } else { + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevNoDup returns {2,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 3, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevNoDup_NotFound() + throws DatabaseException, InterruptedException { + + openEnv(false); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 2. */ + insert(2); + + /* getPrevNoDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions after 2 are never blocked. */ + try { + insert(3); + } catch (LockConflictException e) { + fail(); + } + + /* Insert key 1 in a writer thread. */ + startInsert(1); + + /* + * If serializable, getPrevNoDup should return NOTFOUND again; + * otherwise getPrevNoDup should see key 1. + */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrevNoDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevNoDup returns key 1. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testGetPrevNoDup_NotFound_Dup() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(2, 1); + insert(2, 2); + + /* getPrevNoDup returns NOTFOUND. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + + /* Insertions after {2,2} are never blocked. */ + try { + insert(2, 3); + insert(3, 0); + } catch (LockConflictException e) { + fail(); + } + + /* Insert {1,1} in a writer thread. */ + startInsert(1, 1); + + /* + * If serializable, getPrevNoDup should return NOTFOUND again; + * otherwise getPrevNoDup should see {1,1}. + */ + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevNoDup(key, data, null); + if (txnSerializable) { + assertEquals(OperationStatus.NOTFOUND, status); + } else { + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + } + + /* Close reader to allow writer to finish. */ + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + waitForInsert(); + + /* getPrevNoDup returns {1,1}. */ + readerTxn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(readerTxn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 2)); + status = cursor.getPrevNoDup(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(1, IntegerBinding.entryToInt(key)); + assertEquals(1, IntegerBinding.entryToInt(data)); + cursor.close(); + readerTxn.commit(); + + closeEnv(); + } + + @Test + public void testIllegalTransactionConfig() + throws DatabaseException { + + openEnv(false); + TransactionConfig config = new TransactionConfig(); + config.setSerializableIsolation(true); + config.setReadUncommitted(true); + try { + Transaction txn = env.beginTransaction(null, config); + txn.abort(); + fail(); + } catch (IllegalArgumentException expected) { + } + closeEnv(); + } + + /* + * In other tests we test TransactionConfig.setReadUncommitted and + * TransactionConfig.setSerializableIsolation to make sure they result in + * expected non-serializable or serializable behavior. Below we check + * EnvironmentConfig.setSerializableIsolation, + * CursorConfig.setSerializableIsolation, CursorConfig.setReadUncommitted + * and LockMode.READ_UNCOMMITTED, although for a single test case only. + */ + + @Test + public void testEnvironmentConfig() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + /* Control over isolation level is required by this test. */ + TestUtils.clearIsolationLevel(config); + checkSerializable(false, config, null, null); + + config.setTxnSerializableIsolation(true); + checkSerializable(true, config, null, null); + } + + @Test + public void testCursorConfig() + throws DatabaseException { + + CursorConfig config = new CursorConfig(); + checkSerializable(false, null, config, null); + + config.setReadUncommitted(true); + checkSerializable(false, null, config, null); + } + + @Test + public void testReadUncommittedLockMode() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + /* Control over isolation level is required by this test. */ + TestUtils.clearIsolationLevel(envConfig); + envConfig.setTxnSerializableIsolation(true); + + checkSerializable(false, envConfig, null, LockMode.READ_UNCOMMITTED); + } + + private void checkSerializable(boolean expectSerializable, + EnvironmentConfig envConfig, + CursorConfig cursorConfig, + LockMode lockMode) + throws DatabaseException { + + openEnv(false, envConfig); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert key 2. */ + insert(2); + + /* getFirst returns key 2. */ + Transaction readerTxn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(readerTxn, cursorConfig); + status = cursor.getFirst(key, data, lockMode); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + + /* Should deadlock iff serializable. */ + try { + insert(1); + assertTrue(!expectSerializable); + } catch (LockConflictException e) { + assertTrue(expectSerializable); + } + + cursor.close(); + readerTxn.commit(); + + /* This method is called multiple times so remove the database. */ + db.close(); + db = null; + env.removeDatabase(null, DB_NAME); + + closeEnv(); + } + + /** + * Tests that with a single degree 3 txn we don't obtain the extra lock + * during insert. + */ + @Test + public void testSingleDegree3TxnOptimization() + throws DatabaseException { + + disableBtreeVerifier = true; + + try { + openEnv(false); + + /* Insert key 2. */ + insert(2); + + StatsConfig clearStats = new StatsConfig(); + clearStats.setClear(true); + + /* Clear before inserting. */ + EnvironmentStats stats = env.getStats(clearStats); + + /* Insert key 1, which would lock key 2 while inserting. */ + insert(1); + + /* Expect a single lock was requested. */ + stats = env.getStats(clearStats); + assertEquals(1, stats.getNRequests()); + + closeEnv(); + } finally { + disableBtreeVerifier = false; + } + } + + /** + * Tests a particular getSearchBothRange bug that has come up in several + * contexts. This test is probably redundant with GetSearchBothTest but + * I've left it here for good measure. + */ + @Test + public void testSingleDatumBug() + throws DatabaseException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + insert(1, 1); + insert(2, 2); + + /* getSearchBothRange for {2, 1} returns {2, 2}. */ + Transaction readerTxn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(readerTxn, null); + IntegerBinding.intToEntry(2, key); + IntegerBinding.intToEntry(1, data); + status = cursor.getSearchBothRange(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(2, IntegerBinding.entryToInt(key)); + assertEquals(2, IntegerBinding.entryToInt(data)); + + /* If serializable, inserting in the locked range should deadlock. */ + try { + insert(1, 2); + if (txnSerializable) { + fail(); + } + } catch (LockConflictException e) { + if (!txnSerializable) { + fail(); + } + } + + cursor.close(); + readerTxn.commit(Durability.COMMIT_NO_SYNC); + closeEnv(); + } + + /** + * Tests that searchKey returns SUCCESS when it must skip over a deleted + * duplicate. This did not work at one point and was causing warnings + * (Cursor Not Initialized) in duplicate.conf testing. + */ + @Test + public void testSearchKeySkipDeletedDup() + throws DatabaseException { + + openEnv(true); + + /* Insert {1,1} and {1,2}. */ + insert(1, 1); + insert(1, 2); + + /* Delete {1,1}. */ + Transaction txn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(txn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 1, 1)); + OperationStatus status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + + /* Search for key 1 -- should not return NOTFOUND. */ + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 2)); + + cursor.close(); + txn.commit(Durability.COMMIT_NO_SYNC); + closeEnv(); + } + + /** + * Tests that getNextDup returns NOTFOUND when it skips over a deleted + * duplicate for the following main key. [#19026] + */ + @Test + public void testNextAfterDupDeleteBug() + throws DatabaseException, InterruptedException { + + openEnv(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + /* Insert dups. */ + insert(1, 1); + insert(2, 1); + insert(2, 2); + + /* Delete {2,1}. */ + Transaction txn = env.beginTransaction(null, txnConfig); + Cursor cursor = db.openCursor(txn, null); + assertEquals(OperationStatus.SUCCESS, searchBoth(cursor, 2, 1)); + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + cursor.close(); + txn.commit(Durability.COMMIT_NO_SYNC); + + /* + * When positioned on {1,1}, getNextDup should always return NOTFOUND. + * A bug (fixed in [#19026]) caused the cursor to move to {2,2} and + * return SUCCESS. This only occurred with serializable isolation, and + * when the deleted {2,1} record had not been compressed. The + * underlying cause is that CursorImpl.getNextWithKeyChangeStatus was + * not indicating a key change when skipping over the first deleted + * duplicate ({2,1} in this case) in the duplicate set. + */ + txn = env.beginTransaction(null, txnConfig); + cursor = db.openCursor(txn, null); + assertEquals(OperationStatus.SUCCESS, searchKey(cursor, 1, 1)); + status = cursor.getNextDup(key, data, null); + assertEquals(OperationStatus.NOTFOUND, status); + cursor.close(); + txn.commit(Durability.COMMIT_NO_SYNC); + + closeEnv(); + } + + /** + * Performs getSearchKey on the given key, expects data to be zero. + */ + private OperationStatus searchKey(Cursor cursor, int keyVal) + throws DatabaseException { + + return searchKey(cursor, keyVal, 0); + } + + /** + * Performs getSearchKey on the given key, expects given data value. + */ + private OperationStatus searchKey(Cursor cursor, int keyVal, int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + OperationStatus status = cursor.getSearchKey(key, data, null); + if (status == OperationStatus.SUCCESS) { + assertEquals(keyVal, IntegerBinding.entryToInt(key)); + assertEquals(dataVal, IntegerBinding.entryToInt(data)); + } + return status; + } + + /** + * Performs getSearchBoth on the given key and zero data. + */ + private OperationStatus searchBoth(Cursor cursor, int keyVal) + throws DatabaseException { + + return searchBoth(cursor, keyVal, 0, false); + } + + /** + * Performs getSearchBoth on the given key and zero data. + * + * getSearchBoth and getSearchBothRange are equivalent for a non-dup DB, so + * we allowing testing either. + */ + private OperationStatus searchBoth(Cursor cursor, + int keyVal, + boolean useRangeSearch) + throws DatabaseException { + + return searchBoth(cursor, keyVal, 0, useRangeSearch); + } + + /** + * Performs getSearchBoth on the given key and data. + */ + private OperationStatus searchBoth(Cursor cursor, int keyVal, int dataVal) + throws DatabaseException { + + return searchBoth(cursor, keyVal, dataVal, false); + } + + /** + * Performs getSearchBoth on the given key and data. + * + * getSearchBoth and getSearchBothRange are equivalent for a non-dup DB, so + * we allowing testing either. + */ + private OperationStatus searchBoth(Cursor cursor, + int keyVal, + int dataVal, + boolean useRangeSearch) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + OperationStatus status; + if (useRangeSearch) { + status = cursor.getSearchBothRange(key, data, null); + } else { + status = cursor.getSearchBoth(key, data, null); + } + if (status == OperationStatus.SUCCESS) { + assertEquals(keyVal, IntegerBinding.entryToInt(key)); + assertEquals(dataVal, IntegerBinding.entryToInt(data)); + } + return status; + } + + /** + * Inserts the given key in a new transaction and commits it. + */ + private void insert(int keyVal) + throws DatabaseException { + + insert(keyVal, 0); + } + + /** + * Inserts the given key and data in a new transaction and commits it. + */ + private void insert(int keyVal, int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + OperationStatus status; + Transaction writerTxn = env.beginTransaction(null, txnConfig); + try { + if (dups) { + status = db.putNoDupData(writerTxn, key, data); + } else { + status = db.putNoOverwrite(writerTxn, key, data); + } + } catch (LockConflictException e) { + writerTxn.abort(); + throw e; + } + assertEquals(OperationStatus.SUCCESS, status); + writerTxn.commit(Durability.COMMIT_NO_SYNC); + } + + /** + * Starts writer thread and waits for it to start the insert. + */ + private void startInsert(final int keyVal) + throws DatabaseException, InterruptedException { + + startInsert(keyVal, 0); + } + + /** + * Starts writer thread and waits for it to start the insert. + */ + private void startInsert(final int keyVal, final int dataVal) + throws DatabaseException, InterruptedException { + + EnvironmentStats origStats = env.getStats(null); + insertFinished = false; + + writerThread = new JUnitThread("Writer") { + public void testBody() + throws DatabaseException { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + IntegerBinding.intToEntry(keyVal, key); + IntegerBinding.intToEntry(dataVal, data); + Transaction writerTxn = env.beginTransaction(null, txnConfig); + if (dups) { + status = db.putNoDupData(writerTxn, key, data); + } else { + status = db.putNoOverwrite(writerTxn, key, data); + } + assertEquals(OperationStatus.SUCCESS, status); + writerTxn.commit(Durability.COMMIT_NO_SYNC); + insertFinished = true; + } + }; + + writerThread.start(); + + long startTime = System.currentTimeMillis(); + while (true) { + + /* Give some time to the writer thread. */ + Thread.yield(); + Thread.sleep(10); + if (System.currentTimeMillis() - startTime > MAX_INSERT_MILLIS) { + fail("Timeout doing insert"); + } + + if (txnSerializable) { + + /* Wait for the insert to block. */ + EnvironmentStats stats = env.getStats(null); + if (stats.getNWaiters() > origStats.getNWaiters()) { + break; + } + } else { + + /* Wait for the operation to complete. */ + if (insertFinished) { + insertFinished = false; + break; + } + } + } + } + + /** + * Waits for the writer thread to finish. + */ + private void waitForInsert() { + + try { + writerThread.finishTest(); + } catch (Throwable e) { + e.printStackTrace(); + fail(e.toString()); + } finally { + writerThread = null; + } + } +} diff --git a/test/com/sleepycat/je/test/SR11297Test.java b/test/com/sleepycat/je/test/SR11297Test.java new file mode 100644 index 0000000..1729484 --- /dev/null +++ b/test/com/sleepycat/je/test/SR11297Test.java @@ -0,0 +1,189 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Fix for SR11297. When the first BIN in database was empty, + * CursorImpl.positionFirstOrLast(true, null) was returning false, causing + * Cursor.getFirst to return NOTFOUND. This test reproduces that problem by + * creating a database with the first BIN empty and the second BIN non-empty. + * + *

        A specific sequence where partial compression takes place is necessary to + * reproduce the problem. A duplicate is added as the first entry in the first + * BIN, then that BIN is filled and one entry is added to the next BIN. Then + * all records in the first BIN are deleted. compress() is called once, which + * deletes the duplicate tree and all entries in the first BIN, but the first + * BIN will not be deleted until the next compression. At that point in time, + * getFirst failed to find the record in the second BIN.

        + */ +public class SR11297Test extends TestBase { + + /* Minimum child entries per BIN. */ + private static int N_ENTRIES = 4; + + private static CheckpointConfig forceCheckpoint = new CheckpointConfig(); + static { + forceCheckpoint.setForce(true); + } + + private File envHome; + private Environment env; + + public SR11297Test() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + + envHome = null; + env = null; + } + + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + /* Make as small a log as possible to save space in CVS. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + /* Use a 100 MB log file size to ensure only one file is written. */ + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(100 * (1 << 20))); + /* Force BIN-delta. */ + envConfig.setConfigParam + (EnvironmentParams.BIN_DELTA_PERCENT.getName(), + Integer.toString(75)); + /* Force INDelete. */ + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), + Integer.toString(N_ENTRIES)); + env = new Environment(envHome, envConfig); + } + + private void closeEnv() + throws DatabaseException { + + env.close(); + env = null; + } + + @Test + public void test11297() + throws DatabaseException { + + openEnv(); + + /* Write db0 and db1. */ + for (int i = 0; i < 2; i += 1) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, "db" + i, dbConfig); + + /* Write: {0, 0}, {0, 1}, {1, 0}, {2, 0}, {3, 0} */ + for (int j = 0; j < N_ENTRIES; j += 1) { + db.put(null, entry(j), entry(0)); + } + db.put(null, entry(0), entry(1)); + + /* Delete everything but the last record. */ + for (int j = 0; j < N_ENTRIES - 1; j += 1) { + db.delete(null, entry(j)); + } + + db.close(); + } + + checkFirstRecord(); + env.compress(); + checkFirstRecord(); + + closeEnv(); + } + + /** + * First and only record in db1 should be {3,0}. + */ + private void checkFirstRecord() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + dbConfig.setSortedDuplicates(true); + Database db = env.openDatabase(null, "db1", dbConfig); + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = cursor.getFirst(key, data, null); + assertEquals(OperationStatus.SUCCESS, status); + assertEquals(3, value(key)); + assertEquals(0, value(data)); + cursor.close(); + db.close(); + } + + static DatabaseEntry entry(int val) { + + byte[] data = new byte[] { (byte) val }; + return new DatabaseEntry(data); + } + + static int value(DatabaseEntry entry) { + + byte[] data = entry.getData(); + if (data.length != 1) { + throw new IllegalStateException("len=" + data.length); + } + return data[0]; + } +} diff --git a/test/com/sleepycat/je/test/SecondaryAssociationTest.java b/test/com/sleepycat/je/test/SecondaryAssociationTest.java new file mode 100644 index 0000000..f8661e1 --- /dev/null +++ b/test/com/sleepycat/je/test/SecondaryAssociationTest.java @@ -0,0 +1,941 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryAssociation; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests SecondaryAssociation with complex associations. + * + * SecondaryTest tests SecondaryAssociation in the simple case where each + * secondary is associated with a single primary. It performs a more exhaustive + * API test. + * + * This test is focused on complex associations and concurrent operations. It + * includes: + * - Multiple primary DBs per index + * - Multiple "tables" per primary DB + * - Incremental primary key deletion + * + * This test is intended to be run either as part of the unit test suite, or as + * a longer running stress test when -Dlongtest=true is specified. In the + * default mode, it runs in less than one minute but still exercises concurrent + * operations to some degree. When -Dlongtest=true is specified, it takes + * around 15 minutes. + * + * For simplicity and speed of execution, this is not a DualTestCase because + * SecondaryAssociation-with-HA testing is done by SecondaryTest. TxnTestCase + * is also not used to vary txn type; all operations are transactional. + * + * In this test, a many-many mapping between primaries and secondaries is + * implemented as follows: + * - Each primary key is 4 bytes long. + * - A logical "table" is labeled by a primary key prefix Tn in the first two + * bytes of the key: T0, T1, T2, etc. + * - The next 2 bytes of the primary key are a randomly generated + * discriminator, meaning that there are 64K maximum records per table. + * - Primary records for all tables are spread among m primary DBs, and a + * primary key is hashed to determine the primary DB ID. + * - Each table labeled Tn has n secondaries, e.g., T0 has no secondaries, and + * T5 has 4 secondaries. + * - The secondaries have integer IDs from 0 to n-1, which are locally unique + * for each table. + * - Each secondary key is one byte long. It is extracted from the primary + * data at index N, where N is the secondary ID. + * + * It is the application's responsibility to guarantee that a primary or + * secondary DB is not accessed after it is closed. This test uses a "clean + * cycle" mechanism to ensure that all in-progress operations on a DB are + * completed after it is removed from the association, and before it is closed. + * A clean cycle is defined as a complete operation based on current + * information derived from the association. + * + * Limitations + * =========== + * Secondary addition/removal is not tested concurrently with primary + * addition/removal, although these combinations should work in principle. + */ +public class SecondaryAssociationTest extends TestBase { + private static final int N_TABLES; + private static final int N_PRIMARIES; + private static final int N_KEY_DISCRIMINATOR_BYTES = 2; + private static final int SLEEP_MS_BETWEEN_PHASES; + private static final boolean VERBOSE; + + static { + if (SharedTestUtils.runLongTests()) { + N_TABLES = 20; + N_PRIMARIES = 50; + SLEEP_MS_BETWEEN_PHASES = 60 * 1000; + VERBOSE = true; + } else { + N_TABLES = 3; + N_PRIMARIES = 20; + SLEEP_MS_BETWEEN_PHASES = 1000; + VERBOSE = false; + } + } + + private final Random rnd; + private final AtomicBoolean shutdownFlag; + private final AtomicReference failureException; + private final AtomicInteger nWrites; + private final AtomicInteger nInserts; + private final AtomicInteger nUpdates; + private final AtomicInteger nDeletes; + private final MyAssociation assoc; + private final File envHome = SharedTestUtils.getTestDir(); + private Environment env; + private ExecutorService executor; + private volatile int removedPriId = -1; + private volatile int addedPriId = -1; + private volatile Database addedPriDb; + + public SecondaryAssociationTest() { + rnd = new Random(123); + shutdownFlag = new AtomicBoolean(false); + failureException = new AtomicReference(null); + nWrites = new AtomicInteger(0); + nInserts = new AtomicInteger(0); + nUpdates = new AtomicInteger(0); + nDeletes = new AtomicInteger(0); + assoc = new MyAssociation(); + executor = Executors.newCachedThreadPool(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + + final EnvironmentConfig config = new EnvironmentConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + config.setDurability(Durability.COMMIT_NO_SYNC); + + /* Avoid lock timeouts on slow test machines. */ + config.setLockTimeout(5, TimeUnit.SECONDS); + + env = new Environment(envHome, config); + } + + @After + public void tearDown() + throws Exception { + + /* Ensure resources are released for the sake of tests that follow. */ + try { + if (executor != null) { + executor.shutdownNow(); + } + } finally { + executor = null; + try { + if (env != null) { + env.close(); + } + } finally { + env = null; + /* Always call superclass method. */ + super.tearDown(); + } + } + } + + @Test + public void concurrentTests() + throws InterruptedException, ExecutionException, TimeoutException { + + /* Sleep calls are to let writes/verify run between stages. */ + createAllTables(); + final TaskMonitor writeMonitor = startPrimaryWrites(); + final TaskMonitor verifyMonitor = startVerify(); + waitForFullPrimaries(); + addSecondaries(); + Thread.sleep(SLEEP_MS_BETWEEN_PHASES); + removeOnePrimary(writeMonitor, verifyMonitor); + Thread.sleep(SLEEP_MS_BETWEEN_PHASES); + addOnePrimary(writeMonitor, verifyMonitor); + Thread.sleep(SLEEP_MS_BETWEEN_PHASES); + removeSecondaries(writeMonitor, verifyMonitor); + Thread.sleep(SLEEP_MS_BETWEEN_PHASES); + writeMonitor.stop(); + verifyMonitor.stop(); + shutdown(); + closeAllTables(); + checkFailure(); + } + + private void createAllTables() { + for (int tableId = 0; tableId < N_TABLES; tableId += 1) { + assoc.addTable(tableId); + } + for (int priId = 0; priId < N_PRIMARIES; priId += 1) { + final Database db = openPrimaryDatabase(priId); + assoc.addPrimary(priId, db); + } + } + + private Database openPrimaryDatabase(final int priId) { + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setSecondaryAssociation(assoc); + return env.openDatabase(null, "P" + priId, dbConfig); + } + + private void closeAllTables() { + for (final Database db : assoc.getAllPrimaries()) { + db.close(); + } + for (final SecondaryDatabase secDb : assoc.getAllSecondaries()) { + secDb.close(); + } + } + + private void addSecondaries() { + if (VERBOSE) { + System.out.println("Start adding secondaries"); + } + for (int secId = 0; secId < N_TABLES; secId += 1) { + /* Add one secondary (at most) to each table. */ + final Collection dbsAdded = + new ArrayList(); + for (int tableId = 0; tableId < N_TABLES; tableId += 1) { + if (secId >= tableId) { + continue; + } + final SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setSecondaryAssociation(assoc); + dbConfig.setKeyCreator(new MyKeyCreator(secId)); + dbConfig.setSortedDuplicates(true); + final SecondaryDatabase db = env.openSecondaryDatabase( + null, "T" + tableId + "S" + secId, null, dbConfig); + /* Enable incremental mode BEFORE adding to association. */ + db.startIncrementalPopulation(); + assoc.addSecondary(tableId, secId, db); + dbsAdded.add(db); + checkFailure(); + } + /* Populate the secondaries we just created. */ + for (final Database db : assoc.getAllPrimaries()) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final Cursor cursor = db.openCursor( + null, CursorConfig.READ_COMMITTED); + OperationResult result; + while ((result = cursor.get( + keyEntry, dataEntry, Get.NEXT, null)) != null) { + db.populateSecondaries( + null, keyEntry, dataEntry, result.getExpirationTime(), + null); + } + cursor.close(); + } + /* Disable incremental mode now that population is complete. */ + for (final SecondaryDatabase db : dbsAdded) { + db.endIncrementalPopulation(); + } + if (VERBOSE) { + System.out.format("Added %d secondaries after %,d writes\n", + dbsAdded.size(), nWrites.get()); + } + } + if (VERBOSE) { + System.out.println("Done adding secondaries"); + } + } + + private void removeSecondaries(final TaskMonitor writeMonitor, + final TaskMonitor verifyMonitor) + throws InterruptedException { + + if (VERBOSE) { + System.out.println("Start removing secondaries"); + } + for (int tableId = 0; tableId < N_TABLES; tableId += 1) { + for (int secId = 0; secId < tableId; secId += 1) { + /* 1. Remove from association. */ + final SecondaryDatabase db = + assoc.removeSecondary(tableId, secId); + /* 2. Wait for in-progress operations to complete. */ + writeMonitor.waitForCleanCycle(); + verifyMonitor.waitForCleanCycle(); + /* 3. Close/remove database. */ + final String dbName = db.getDatabaseName(); + db.close(); + env.removeDatabase(null, dbName); + checkFailure(); + } + assertEquals(0, assoc.getSecondaries(tableId).size()); + } + if (VERBOSE) { + System.out.println("Done removing secondaries"); + } + } + + private void removeOnePrimary(final TaskMonitor writeMonitor, + final TaskMonitor verifyMonitor) + throws InterruptedException { + + if (VERBOSE) { + System.out.println("Start removing primary"); + } + + /* + * 1. Remove from association. + * + * Remove last primary, as it has the most secondaries. removedPriId is + * set as an indicator that this DB should not longer be used for + * verify/writes. + */ + removedPriId = N_PRIMARIES - 1; + final Database db = assoc.removePrimary(removedPriId); + final long recCount = db.count(); + + if (VERBOSE) { + System.out.println("Wait for removed primary operations to stop"); + } + + /* 2. Wait for in-progress operations to complete. */ + writeMonitor.waitForCleanCycle(); + verifyMonitor.waitForCleanCycle(); + + if (VERBOSE) { + System.out.format("Close and remove primary DB with %,d records\n", + recCount); + } + + /* 3. Close/remove database. */ + final String dbName = db.getDatabaseName(); + db.close(); + env.removeDatabase(null, dbName); + if (VERBOSE) { + System.out.println("Delete obsolete primary keys"); + } + for (final SecondaryDatabase secDb : assoc.getAllSecondaries()) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + while (secDb.deleteObsoletePrimaryKeys(keyEntry, dataEntry, 100)) { + checkFailure(); + } + } + if (VERBOSE) { + System.out.println("Done removing primary"); + } + } + + private void addOnePrimary(final TaskMonitor writeMonitor, + final TaskMonitor verifyMonitor) + throws InterruptedException { + + if (VERBOSE) { + System.out.println("Start adding primary"); + } + + assertTrue(removedPriId >= 0); + assertTrue(addedPriId < 0); + assertNull(addedPriDb); + + addedPriDb = openPrimaryDatabase(addedPriId); + addedPriId = removedPriId; + + final int initialWrites = nWrites.get(); + while (nWrites.get() - initialWrites < 100000) { + Thread.sleep(10); + checkFailure(); + } + + final long recCount = addedPriDb.count(); + + assoc.addPrimary(addedPriId, addedPriDb); + + removedPriId = -1; + addedPriId = -1; + addedPriDb = null; + + if (VERBOSE) { + System.out.format("Done adding primary, wrote %,d\n", recCount); + } + } + + /** + * Starts two threads to do writes. + * + * Waits for at least 500 writes before returning, to ensure the next step + * is done concurrently with writing. + */ + private TaskMonitor startPrimaryWrites() + throws InterruptedException { + + final AtomicBoolean stopTaskFlag = new AtomicBoolean(false); + + class WriteTask extends Task { + private final AtomicInteger cleanCycle; + private final String label; + + WriteTask(final AtomicInteger cleanCycle, final String label) { + this.cleanCycle = cleanCycle; + this.label = label; + } + + public void execute() { + runPrimaryWrites(stopTaskFlag, cleanCycle, label); + } + } + + final AtomicInteger cleanCycle1 = new AtomicInteger(0); + final AtomicInteger cleanCycle2 = new AtomicInteger(0); + final Runnable task1 = new WriteTask(cleanCycle1, "t1"); + final Runnable task2 = new WriteTask(cleanCycle2, "t2"); + final Future future1 = executor.submit(task1); + final Future future2 = executor.submit(task2); + + final int initialWrites = nWrites.get(); + while (nWrites.get() - initialWrites < 500) { + Thread.sleep(10); + checkFailure(); + } + + final TaskMonitor taskMonitor = new TaskMonitor(stopTaskFlag); + taskMonitor.add(future1, cleanCycle1); + taskMonitor.add(future2, cleanCycle2); + return taskMonitor; + } + + /** + * Writes randomly generated primary records until shutdown/stop. + * + * Since the keyspace is small (64K maximum keys per table), this will + * eventually do updates as well as inserts. For 1/5 records, they are + * immediately deleted after being written. + */ + private void runPrimaryWrites(final AtomicBoolean stopTaskFlag, + final AtomicInteger cleanCycle, + final String label) { + /* Key and data are fixed length. */ + final byte[] keyBytes = + new byte[2 + N_KEY_DISCRIMINATOR_BYTES]; + final byte[] dataBytes = new byte[N_TABLES]; + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + /* First byte of key is fixed. */ + keyBytes[0] = 'T'; + /* Write until shutdown or stopped. */ + while (true) { + for (int tableId = 0; tableId < N_TABLES; tableId += 1) { + if (shutdownFlag.get() || stopTaskFlag.get()) { + return; + } + /* Second byte of key is table ID. */ + keyBytes[1] = (byte) tableId; + /* Rest of key is random. */ + for (int j = 2; j < keyBytes.length; j += 1) { + keyBytes[j] = (byte) rnd.nextInt(256); + } + /* Insert or update with random data. */ + keyEntry.setData(keyBytes); + dataEntry.setData(dataBytes); + Database priDb = assoc.getPrimary(keyEntry); + if (priDb == null) { + final int priId = getPrimaryId(keyEntry); + if (priId == addedPriId) { + priDb = addedPriDb; + } else { + assertEquals(removedPriId, priId); + cleanCycle.incrementAndGet(); + continue; + } + } + rnd.nextBytes(dataBytes); + if (priDb.putNoOverwrite(null, keyEntry, dataEntry) == + OperationStatus.SUCCESS) { + nInserts.incrementAndGet(); + } else { + priDb.put(null, keyEntry, dataEntry); + nUpdates.incrementAndGet(); + } + /* Delete 1/5 records written. */ + if (rnd.nextInt(5) == 1) { + priDb.delete(null, keyEntry); + nDeletes.incrementAndGet(); + } + nWrites.incrementAndGet(); + if (VERBOSE && (nWrites.get() % 100000 == 0)) { + printWriteTotals(label); + } + cleanCycle.incrementAndGet(); + } + } + } + + /** + * Waits for updates to be at least 1/5 of all writes, meaning that the + * keyspace for the primaries has been populated. + */ + private void waitForFullPrimaries() + throws InterruptedException { + + while (4.0 * nUpdates.get() < nInserts.get()) { + Thread.sleep(10); + checkFailure(); + } + if (VERBOSE) { + printWriteTotals(""); + } + } + + /** + * Starts one thread to do verification. + */ + private TaskMonitor startVerify() { + + final AtomicBoolean stopTaskFlag = new AtomicBoolean(false); + final AtomicInteger nPriVerified = new AtomicInteger(0); + final AtomicInteger nSecVerified = new AtomicInteger(0); + final AtomicInteger cleanCycles = new AtomicInteger(0); + final Runnable task = new Task() { + public void execute() { + while (!shutdownFlag.get() && !stopTaskFlag.get()) { + runVerify(stopTaskFlag, cleanCycles, + nPriVerified, nSecVerified); + } + } + }; + + final Future future = executor.submit(task); + final TaskMonitor taskMonitor = new TaskMonitor(stopTaskFlag); + taskMonitor.add(future, cleanCycles); + return taskMonitor; + } + + /** + * Checks primary-secondary linkages/integrity, namely that a primary + * record contains secondary keys matching the records present in the + * secondary databases. + */ + private void runVerify(final AtomicBoolean stopTaskFlag, + final AtomicInteger cleanCycles, + final AtomicInteger nPriVerified, + final AtomicInteger nSecVerified) { + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final DatabaseEntry secKeyEntry = new DatabaseEntry(); + final DatabaseEntry noReturnData = new DatabaseEntry(); + noReturnData.setPartial(0, 0, true); + + for (int priId = 0; priId < N_PRIMARIES; priId += 1) { + final Database db = assoc.getPrimary(priId); + if (db == null) { + assertEquals(removedPriId, priId); + continue; + } + final Cursor c = db.openCursor(null, CursorConfig.READ_COMMITTED); + try { + while (c.getNext(keyEntry, dataEntry, null) == + OperationStatus.SUCCESS) { + if (assoc.getPrimary(priId) == null) { + break; + } + final int tableId = keyEntry.getData()[1]; + final byte[] dataBytes = dataEntry.getData(); + for (int secId = 0; secId < tableId; secId += 1) { + if (shutdownFlag.get() || stopTaskFlag.get()) { + return; + } + final SecondaryDatabase secDb = + assoc.getSecondary(tableId, secId); + if (secDb == null || + secDb.isIncrementalPopulationEnabled()) { + continue; + } + secKeyEntry.setData(new byte[] {dataBytes[secId]}); + final OperationStatus status = secDb.getSearchBoth( + null, secKeyEntry, keyEntry, noReturnData, + LockMode.READ_UNCOMMITTED); + if (OperationStatus.SUCCESS != status) { + if (assoc.getPrimary(priId) == null) { + break; + } + fail("Sec key missing " + status + ' ' + + secDb.getDatabaseName() + ' ' + priId + ' ' + + secKeyEntry + ' ' + keyEntry); + } + } + nPriVerified.incrementAndGet(); + if (VERBOSE && nPriVerified.get() % 500000 == 0) { + System.out.format("nPriVerified %,d\n", + nPriVerified.get()); + } + } + } finally { + c.close(); + } + cleanCycles.incrementAndGet(); + } + + /* + * TODO: Perform with normal locking rather than dirty-read, once the + * deadlock-free secondary feature is implemented. + */ + for (int tableId = 0; tableId < N_TABLES; tableId += 1) { + for (int secId = 0; secId < tableId; secId += 1) { + final SecondaryDatabase secDb = + assoc.getSecondary(tableId, secId); + if (secDb == null || + secDb.isIncrementalPopulationEnabled()) { + continue; + } + final SecondaryCursor c = + secDb.openCursor(null, CursorConfig.READ_UNCOMMITTED); + try { + while (c.getNext(secKeyEntry, keyEntry, dataEntry, null) == + OperationStatus.SUCCESS) { + if (shutdownFlag.get() || stopTaskFlag.get()) { + return; + } + assertEquals(tableId, keyEntry.getData()[1]); + assertEquals(dataEntry.getData()[secId], + secKeyEntry.getData()[0]); + nSecVerified.incrementAndGet(); + if (VERBOSE && nSecVerified.get() % 500000 == 0) { + System.out.format("nSecVerified %,d\n", + nSecVerified.get()); + } + } + } finally { + c.close(); + } + cleanCycles.incrementAndGet(); + } + } + } + + private class TaskMonitor { + private final AtomicBoolean stopFlag; + private final List> futures; + private final List cleanCycles; + + TaskMonitor(final AtomicBoolean stopFlag) { + this.stopFlag = stopFlag; + futures = new ArrayList>(); + cleanCycles = new ArrayList(); + } + + void add(final Future future, final AtomicInteger cleanCycle) { + futures.add(future); + cleanCycles.add(cleanCycle); + } + + void waitForCleanCycle() + throws InterruptedException { + + final int[] prevCleanCycles = new int[cleanCycles.size()]; + for (int i = 0; i < prevCleanCycles.length; i += 1) { + prevCleanCycles[i] = cleanCycles.get(i).get(); + } + while (true) { + boolean allDone = true; + for (int i = 0; i < prevCleanCycles.length; i += 1) { + if (prevCleanCycles[i] >= cleanCycles.get(i).get()) { + allDone = false; + break; + } + } + if (allDone) { + break; + } + Thread.sleep(10); + checkFailure(); + } + } + + void stop() + throws InterruptedException, ExecutionException, TimeoutException { + + stopFlag.set(true); + for (final Future future : futures) { + future.get(10, TimeUnit.SECONDS); + } + } + } + + /** + * Saves first exception encountered, which also serves as a failure + * indicator -- non-null means failure. + */ + private void noteFailure(Throwable t) { + + t.printStackTrace(System.out); + failureException.compareAndSet(null, t); + } + + /** + * If an exception caused a failure, throw it so it appears as the cause of + * the JUnit test failure. This method is meant to be called from the + * main thread, i.e., the one running the JUnit test. + */ + private void checkFailure() { + final Throwable t = failureException.get(); + if (t == null) { + return; + } + throw new IllegalStateException( + "See cause exception. Other exceptions in output may also be " + + "related.", t); + } + + /** + * A Runnable that calls an execute() method, that is implemented by the + * caller, and handles exceptions. + */ + private abstract class Task implements Runnable { + + public void run() { + try { + execute(); + } catch (Throwable t) { + noteFailure(t); + } + } + + abstract void execute() throws Throwable; + } + + private void shutdown() + throws InterruptedException { + + shutdownFlag.set(true); + executor.shutdown(); + if (!executor.awaitTermination(20, TimeUnit.SECONDS)) { + executor.shutdownNow(); + throw new IllegalStateException( + "Could not terminate executor normally"); + } + if (VERBOSE) { + printWriteTotals("final"); + } + checkFailure(); + } + + private void printWriteTotals(final String label) { + System.out.format( + "%s nWrites %,d nInserts %,d, nUpdates %,d nDeletes %,d\n", label, + nWrites.get(), nInserts.get(), nUpdates.get(), nDeletes.get()); + } + + /** + * Performs a simplistic (not very evenly distributed) hash of the primary + * key to get a primary DB ID between zero and (N_PRIMARIES - 1). For + * this to work best, the primary key should contain some randomly + * generated values. + */ + private static int getPrimaryId(final DatabaseEntry primaryKey) { + int sum = 0; + final byte[] data = primaryKey.getData(); + for (int i = 0; i < data.length; i += 1) { + sum += data[i]; + } + return Math.abs(sum % N_PRIMARIES); + } + + /** + * Creates a secondary key from the Nth byte of the primary data, where + * N is the secondary ID passed to the constructor. + * + * TODO replace with new SecondaryKeyExtractor when available. + */ + private static class MyKeyCreator implements SecondaryKeyCreator { + private final int secId; + + MyKeyCreator(int secId) { + this.secId = secId; + } + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(new byte[] {data.getData()[secId]}); + return true; + } + } + + /** + * This class implements a SecondaryAssociation in a semi-realistic manner, + * simulating an app that maintains associations per logical table. + * + * However, in a real app, it is expected that the association metadata + * would be maintained separately and accessed in a read-only manner via + * this class. In other words, this class might not contain methods for + * adding and removing members in the association. + * + * Non-blocking data structures are used to hold association info, to avoid + * blocking on the methods in SecondaryAssociation, which are frequently + * called by many threads. + */ + private static class MyAssociation implements SecondaryAssociation { + + /* Maps a primary DB ID to the primary DB. */ + private final Map primaries = + new ConcurrentHashMap(); + + /* Maps a table ID to its associated secondaries. */ + private final Map> tables = + new ConcurrentHashMap>(); + + /* Cheap-to-read indicator that any secondary DBs are present. */ + private final AtomicInteger nSecondaries = new AtomicInteger(0); + + public boolean isEmpty() { + return (nSecondaries.get() == 0); + } + + public Database getPrimary(final DatabaseEntry primaryKey) { + final int priId = getPrimaryId(primaryKey); + return getPrimary(priId); + } + + public Collection getSecondaries( + final DatabaseEntry primaryKey) { + + final int tableId = primaryKey.getData()[1]; + return getSecondaries(tableId); + } + + Collection getSecondaries(final int tableId) { + final Map secondaries = + tables.get(tableId); + assertNotNull(secondaries); + return secondaries.values(); + } + + Database getPrimary(final int priId) { + assertTrue(String.valueOf(priId), priId >= 0); + assertTrue(String.valueOf(priId), priId < N_PRIMARIES); + return primaries.get(priId); + } + + void addPrimary(final int priId, final Database priDb) { + final Object oldVal = primaries.put(priId, priDb); + assertNull(oldVal); + } + + Database removePrimary(final int priId) { + final Database db = primaries.remove(priId); + assertNotNull(db); + return db; + } + + void addTable(final int tableId) { + final Map secondaries = + new ConcurrentHashMap(); + final Object oldVal = tables.put(tableId, secondaries); + assertNull(oldVal); + } + + SecondaryDatabase getSecondary(final int tableId, final int secId) { + final Map secondaries = + tables.get(tableId); + assertNotNull(secondaries); + final SecondaryDatabase secDb = secondaries.get(secId); + return secDb; + } + + void addSecondary(final int tableId, + final int secId, + final SecondaryDatabase secDb) { + final Map secondaries = + tables.get(tableId); + assertNotNull(secondaries); + final Object oldVal = secondaries.put(secId, secDb); + assertNull(oldVal); + nSecondaries.incrementAndGet(); + } + + SecondaryDatabase removeSecondary(final int tableId, final int secId) { + final Map secondaries = + tables.get(tableId); + assertNotNull(secondaries); + final SecondaryDatabase secDb = secondaries.remove(secId); + assertNotNull(secDb); + nSecondaries.decrementAndGet(); + return secDb; + } + + Collection getAllPrimaries() { + return primaries.values(); + } + + Collection getAllSecondaries() { + final Collection dbs = + new ArrayList(); + for (final Map secondaries : + tables.values()) { + dbs.addAll(secondaries.values()); + } + return dbs; + } + } +} diff --git a/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java b/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java new file mode 100644 index 0000000..c53200d --- /dev/null +++ b/test/com/sleepycat/je/test/SecondaryDirtyReadTest.java @@ -0,0 +1,558 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.util.test.SharedTestUtils; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.JoinCursor; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.junit.JUnitMethodThread; +import com.sleepycat.je.util.TestUtils; + +/** + * Tests for multithreading problems when using read-uncommitted with + * secondaries. If a primary record is updated while performing a + * read-uncommitted (in between reading the secondary and the primary), we need + * to be sure that we don't return inconsistent results to the user. For + * example, we should not return a primary data value that no longer contains + * the secondary key. We also need to ensure that deleting a primary record in + * the middle of a secondary read does not appear as a corrupt secondary. In + * both of these cases it should appear that the record does not exist, from + * the viewpoint of an application using a cursor. + * + *

        These tests create two threads, one reading and the other deleting or + * updating. The intention is for the reading thread and delete/update thread + * to race in operating on the same key (nextKey). If the reading thread reads + * the secondary, then the other thread deletes the primary, then the reading + * thread tries to read the primary, we've accomplished our goal. Prior to + * when we handled that case in SecondaryCursor, that situation would cause a + * "secondary corrupt" exception.

        + */ +@RunWith(Parameterized.class) +public class SecondaryDirtyReadTest extends MultiKeyTxnTestCase { + + private static final int MAX_KEY = + SharedTestUtils.runLongTests() ? 500000 : 1000; + private static final int N_DUPS = 3; + + private volatile int nextKey; + private Database priDb; + private SecondaryDatabase secDb; + private final LockMode lockMode; + private final boolean dups; + + @Parameters + public static List genParams() { + + return paramsHelper(false); + } + + protected static List paramsHelper(boolean rep) { + final String[] txnTypes = getTxnTypes(null, rep); + final List newParams = new ArrayList(); + for (final String type : txnTypes) { + newParams.add(new Object[] {type, true, true, false}); + newParams.add(new Object[] {type, false, true, false}); + newParams.add(new Object[] {type, false, false, false}); + newParams.add(new Object[] {type, true, false, false}); + newParams.add(new Object[] {type, false, true, true}); + newParams.add(new Object[] {type, false, false, true}); + } + return newParams; + } + + public SecondaryDirtyReadTest(String type, + boolean multiKey, + boolean duplicates, + boolean dirtyReadAll){ + initEnvConfig(); + txnType =type; + useMultiKey = multiKey; + isTransactional = (txnType != TXN_NULL); + dups = duplicates; + lockMode = dirtyReadAll ? + LockMode.READ_UNCOMMITTED_ALL : + LockMode.READ_UNCOMMITTED; + customName = ((useMultiKey) ? "multiKey" : "") + + "-" + txnType + "-" + dups; + } + + /** + * Closes databases, then calls the super.tearDown to close the env. + */ + @After + public void tearDown() + throws Exception { + + if (secDb != null) { + try { + secDb.close(); + } catch (Exception e) {} + secDb = null; + } + if (priDb != null) { + try { + priDb.close(); + } catch (Exception e) {} + priDb = null; + } + super.tearDown(); + } + + /** + * Tests that deleting primary records does not cause secondary + * read-uncommitted to throw a "secondary corrupt" exception. + */ + @Test + public void testDeleteWhileReadingByKey() + throws Throwable { + + doTest("runReadUncommittedByKey", "runPrimaryDelete"); + } + + /** + * Same as testDeleteWhileReadingByKey but does a scan. Read-uncommitted + * for scan and keyed reads are implemented differently, since scanning + * moves to the next record when a deletion is detected while a keyed read + * returns NOTFOUND. + */ + @Test + public void testDeleteWhileScanning() + throws Throwable { + + doTest("runReadUncommittedScan", "runPrimaryDelete"); + } + + /** + * Same as testDeleteWhileScanning but additionally does a join. + */ + @Test + public void testDeleteWithJoin() + throws Throwable { + + doTest("runReadUncommittedJoin", "runPrimaryDelete"); + } + + /** + * Tests that updating primary records, to cause deletion of the secondary + * key record, does not cause secondary read-uncommitted to return + * inconsistent data (a primary datum without a secondary key value). + */ + @Test + public void testUpdateWhileReadingByKey() + throws Throwable { + + doTest("runReadUncommittedByKey", "runPrimaryUpdate"); + } + + /** + * Same as testUpdateWhileReadingByKey but does a scan. + */ + @Test + public void testUpdateWhileScanning() + throws Throwable { + + doTest("runReadUncommittedScan", "runPrimaryUpdate"); + } + + /** + * Same as testUpdateWhileScanning but additionally does a join. + */ + @Test + public void testUpdateWithJoin() + throws Throwable { + + doTest("runReadUncommittedJoin", "runPrimaryUpdate"); + } + + @Test + public void testAlternatingInsertDelete() + throws Throwable { + + doTest("runReadFirstRecordByKey", "runAlternatingInsertDelete", + false /*doAddRecords*/); + } + + private void doTest(String method1, String method2) + throws Throwable { + + doTest(method1, method2, true /*doAddRecords*/); + } + + /** + * Runs two threads for the given method names, after populating the + * database. + */ + private void doTest(String method1, String method2, boolean doAddRecords) + throws Throwable { + + JUnitMethodThread tester1 = new JUnitMethodThread(method1 + "-t1", + method1, this); + JUnitMethodThread tester2 = new JUnitMethodThread(method2 + "-t2", + method2, this); + priDb = openPrimary("testDB"); + secDb = openSecondary(priDb, "testSecDB"); + if (doAddRecords) { + addRecords(); + } + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + secDb.close(); + secDb = null; + priDb.close(); + priDb = null; + } + + /** + * Deletes the key that is being read by the other thread. + */ + public void runPrimaryDelete() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + while (nextKey < MAX_KEY - 1) { + Transaction txn = txnBeginCursor(); + key.setData(TestUtils.getTestArray(nextKey)); + /* Alternate use of Cursor.delete and Database.delete. */ + OperationStatus status; + if ((nextKey & 1) == 0) { + final Cursor cursor = priDb.openCursor(txn, null); + status = cursor.getSearchKey(key, new DatabaseEntry(), + LockMode.RMW); + if (status == OperationStatus.SUCCESS) { + status = cursor.delete(); + } + cursor.close(); + } else { + status = priDb.delete(txn, key); + } + if (status != OperationStatus.SUCCESS) { + assertEquals(OperationStatus.NOTFOUND, status); + } + txnCommit(txn); + } + } + + /** + * Updates the record for the key that is being read by the other thread, + * changing the datum to -1 so it will cause the secondary key record to + * be deleted. + */ + public void runPrimaryUpdate() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (nextKey < MAX_KEY - 1) { + Transaction txn = txnBegin(); + key.setData(TestUtils.getTestArray(nextKey)); + data.setData(TestUtils.getTestArray(-1)); + OperationStatus status = priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + txnCommit(txn); + } + } + + /** + * Does a read-uncommitted by key, retrying until it is deleted by the + * delete/update thread, then moves to the next key. We shouldn't get an + * exception, just a NOTFOUND when it is deleted. + */ + public void runReadUncommittedByKey() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry pKey = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (nextKey < MAX_KEY - 1) { + key.setData(TestUtils.getTestArray(nextKey)); + OperationStatus status = secDb.get(null, key, pKey, data, + lockMode); + if (status != OperationStatus.SUCCESS) { + assertEquals(OperationStatus.NOTFOUND, status); + nextKey += 1; + } else { + assertEquals(nextKey, TestUtils.getTestVal(key.getData())); + assertEquals(nextKey, getSecKey(pKey)); + assertEquals(nextKey, getSecKey(data)); + /* For dups, advance next key to the primary key we found. */ + nextKey = TestUtils.getTestVal(pKey.getData()); + } + } + } + + /** + * Does a read-uncommitted scan through the whole key range, but moves + * forward only after the key is deleted by the delete/update thread. We + * shouldn't get an exception or a NOTFOUND, but we may skip values when a + * key is deleted. + */ + public void runReadUncommittedScan() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry pKey = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + SecondaryCursor cursor = secDb.openSecondaryCursor(null, null); + while (nextKey < MAX_KEY - 1) { + OperationStatus status = cursor.getNext(key, pKey, data, + lockMode); + assertEquals("nextKey=" + nextKey, + OperationStatus.SUCCESS, status); + int keyFound = TestUtils.getTestVal(pKey.getData()); + assertEquals(keyFound, TestUtils.getTestVal(data.getData())); + assertEquals(getSecKey(keyFound), + TestUtils.getTestVal(key.getData())); + /* Let the delete/update thread catch up. */ + nextKey = keyFound; + if (nextKey < MAX_KEY - 1) { + while (status != OperationStatus.KEYEMPTY) { + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getCurrent(key, pKey, data, + lockMode); + } + nextKey = keyFound + 1; + } + } + cursor.close(); + } + + /** + * Like runReadUncommittedScan, but also performs a join on each secondary + * key. + */ + public void runReadUncommittedJoin() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry pKey = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry joinPKey = new DatabaseEntry(); + DatabaseEntry joinData = new DatabaseEntry(); + SecondaryCursor cursor = secDb.openSecondaryCursor(null, null); + while (nextKey < MAX_KEY - 1) { + OperationStatus status = cursor.getNext(key, pKey, data, lockMode); + assertEquals("nextKey=" + nextKey, + OperationStatus.SUCCESS, status); + int keyFound = TestUtils.getTestVal(pKey.getData()); + assertEquals(keyFound, TestUtils.getTestVal(data.getData())); + assertEquals(getSecKey(keyFound), + TestUtils.getTestVal(key.getData())); + + /* Do a join on this value. Use two cursors for the same DB. */ + SecondaryCursor cursor2 = cursor.dup(true /*samePosition*/); + JoinCursor joinCursor = + priDb.join(new Cursor[] { cursor, cursor2 }, null); + int nDups = 0; + OperationStatus joinStatus = + joinCursor.getNext(joinPKey, joinData, lockMode); + while (joinStatus == OperationStatus.SUCCESS) { + assertEquals(getSecKey(keyFound), getSecKey(joinPKey)); + assertEquals(getSecKey(keyFound), getSecKey(joinData)); + joinStatus = joinCursor.getNext(joinPKey, joinData, lockMode); + } + assertTrue("" + nDups, nDups <= N_DUPS); + assertEquals("nextKey=" + nextKey, + OperationStatus.NOTFOUND, joinStatus); + cursor2.close(); + joinCursor.close(); + + /* Let the delete/update thread catch up. */ + nextKey = keyFound; + if (nextKey < MAX_KEY - 1) { + while (status != OperationStatus.KEYEMPTY) { + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.getCurrent(key, pKey, data, + lockMode); + } + nextKey = keyFound + 1; + } + } + cursor.close(); + } + + /** + * Alternate insertion and deletion of key 0. + */ + public void runAlternatingInsertDelete() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(0)); + data.setData(TestUtils.getTestArray(0)); + while (nextKey == 0) { + Transaction txn = txnBegin(); + OperationStatus status = priDb.putNoOverwrite(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + status = priDb.delete(txn, key); + assertEquals(OperationStatus.SUCCESS, status); + txnCommit(txn); + } + } + + /** + * Read key 0 while runAlternatingInsertDelete is executing. The idea is + * to reproduce a bug that caused SecondaryIntegrityException [#22603]. + */ + public void runReadFirstRecordByKey() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry pKey = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(TestUtils.getTestArray(0)); + int nDeletions = 0; + while (nDeletions < MAX_KEY * 10) { + OperationStatus status = secDb.get(null, key, pKey, data, + lockMode); + if (status != OperationStatus.SUCCESS) { + assertEquals(OperationStatus.NOTFOUND, status); + nDeletions += 1; + } else { + assertEquals(0, TestUtils.getTestVal(key.getData())); + assertEquals(0, getSecKey(pKey)); + assertEquals(0, getSecKey(data)); + } + } + nextKey = 1; + } + + /** + * Adds records for the entire key range. + */ + private void addRecords() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + Transaction txn = txnBegin(); + for (int i = 0; i < MAX_KEY; i += 1) { + byte[] val = TestUtils.getTestArray(i); + key.setData(val); + data.setData(val); + OperationStatus status = priDb.putNoOverwrite(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + txnCommit(txn); + } + + /** + * Opens the primary database. + */ + private Database openPrimary(String name) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + Transaction txn = txnBegin(); + Database priDb; + try { + priDb = env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + assertNotNull(priDb); + return priDb; + } + + /** + * Opens the secondary database. + */ + private SecondaryDatabase openSecondary(Database priDb, String dbName) + throws DatabaseException { + + SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + if (useMultiKey) { + dbConfig.setMultiKeyCreator + (new SimpleMultiKeyCreator(new MyKeyCreator())); + } else { + dbConfig.setKeyCreator(new MyKeyCreator()); + } + Transaction txn = txnBegin(); + SecondaryDatabase secDb; + try { + secDb = env.openSecondaryDatabase(txn, dbName, priDb, dbConfig); + } finally { + txnCommit(txn); + } + return secDb; + } + + /** + * Creates secondary keys for a primary datum with a non-negative value. + */ + private class MyKeyCreator implements SecondaryKeyCreator { + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + int val = getSecKey(data); + if (val >= 0) { + result.setData(TestUtils.getTestArray(val)); + return true; + } else { + return false; + } + } + } + + private int getSecKey(DatabaseEntry data) { + int val = TestUtils.getTestVal(data.getData()); + return getSecKey(val); + } + + /** + * When dups are configured, the secondary key is truncated to a multiple + * of N_DUPS. + */ + private int getSecKey(int val) { + if (val < 0) { + return val; + } + if (dups) { + return val - (val % N_DUPS); + } + return val; + } +} diff --git a/test/com/sleepycat/je/test/SecondaryMultiComplexTest.java b/test/com/sleepycat/je/test/SecondaryMultiComplexTest.java new file mode 100644 index 0000000..481738e --- /dev/null +++ b/test/com/sleepycat/je/test/SecondaryMultiComplexTest.java @@ -0,0 +1,529 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.fail; +import static org.junit.Assert.assertSame; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Test involving secondary indexes with respect to deadlock free access. + * + * @author dwchung + * + */ +@RunWith(Parameterized.class) +public class SecondaryMultiComplexTest extends TestBase { + + private enum ReadType {GETSEARCHKEY, GETSEARCHKEY2, GETSEARCHBOTH, + GETSEARCHBOTH2, GETSEARCHKEYRANGE, + GETSEARCHKEYRANGE2}; + private final ReadType[] readTypes = ReadType.values(); + private static final String DB_NAME = "foo"; + private static final String SDB_NAME = "fooSec"; + private volatile int currentEvent; + private volatile int threadid = 0; + private final boolean useDuplicate; + Environment env = null; + Database db = null; + SecondaryDatabase sdb = null; + + public SecondaryMultiComplexTest(boolean useDuplicate) { + this.useDuplicate = useDuplicate; + } + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + /* + * The parameters for the test is a boolean that + * determines if the secondary db supports duplicates. + */ + private static List paramsHelper(boolean rep) { + final List newParams = new ArrayList(); + newParams.add(new Object[] {false}); + newParams.add(new Object[] {true}); + return newParams; + } + + /* + * + */ + @Before + public void setup() { + File envHome = SharedTestUtils.getTestDir(); + + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnTimeout(5, TimeUnit.SECONDS); + envConfig.setLockTimeout(5, TimeUnit.SECONDS); + envConfig.setTxnSerializableIsolation(false); + + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + sdb = openSecondary(env, db, SDB_NAME, new SecondaryConfig()); + } + + @After + public void tearDown() throws Exception { + if (sdb != null) { + sdb.close(); + sdb = null; + } + + if (db != null) { + db.close(); + db = null; + } + + if (env != null) { + env.close(); + env = null; + } + } + + /* + * This test checks the code in the area of secondary read + * deadlock avoidance. The initial non-locking scan on the + * secondary, followed by the locking primary scan is exercised. + * The following is the test: + * Primary Secondary Data Description + * A A 1 Data populated + * readers started reading secondary key A + * Writer transaction begin + * - - - delete Pk + * A B 0 insert record + * B A 1 insert record + * Writer commit transaction + * + * The readers may block on the primary lock after having + * retrieved the primary key from the secondary index + * without locking. When the primary lock is granted, + * the primary/secondary association has changed from + * what it was on the initial scan on the secondary to + * get the primary key. Initially B -> A, now A -> B. + * So this row should be skipped (non-serializable isolation). + */ + @Test + public void testMultiReadMoveSecondary() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int NEWKEY = DATACOUNT + 1; + final int RTHREADS = 20; + final int WTHREADS = 1; + + Thread[] threads = new Thread[RTHREADS + WTHREADS]; + ReadIt[] readers = new ReadIt[RTHREADS]; + MoveIt[] writers = new MoveIt[WTHREADS]; + int curReadType = 0; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(createData(1, i))); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + tdb = sdb; + ReadType trt = tdb instanceof SecondaryDatabase ? + readTypes[curReadType++ % readTypes.length] : + ReadType.GETSEARCHKEY; + readers[i] = + new ReadIt(KEY, env, tdb, 0, 0, 1, + OperationStatus.SUCCESS, trt); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < writers.length; i++) { + writers[i] = + new MoveIt(KEY, NEWKEY, env, db, + readers.length / 2, 0, 1); + threads[i + readers.length] = new Thread(writers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + for (int i = 0; i < writers.length; i++) { + threads[i + readers.length].join(); + } + for (int i = 0; i < readers.length; i++) { + readers[i].setDone(true); + } + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + for (int i = 0; i < readers.length; i++) { + if (readers[i].getFailure() != null) { + fail(readers[i].getFailure().getMessage()); + } + } + for (int i = 0; i < writers.length; i++) { + if (writers[i].getFailure() != null) { + fail(writers[i].getFailure().getMessage()); + } + } + + for (int i = 0; i < writers.length; i++) { + OperationStatus result = writers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + } + + class MoveIt implements Runnable { + int oldkey; + int newkey; + Database db; + Environment env; + OperationStatus retstat = null; + int waitEventPre; + int waitEventPost; + int id; + long waittime; + Exception failureException; + + MoveIt(int oldkey, + int newkey, + Environment env, + Database db, + int waiteventpre, + int waiteventpost, + long waittime) { + this.oldkey = oldkey; + this.newkey = newkey; + this.db = db; + this.env = env; + this.waitEventPre = waiteventpre; + this.waitEventPost = waiteventpost; + id = threadid++; + this.waittime = waittime; + } + + public void run() { + try { + while (!doWork()); + } catch (Exception e) { + failureException = e; + } + } + + private boolean doWork() throws Exception { + boolean done = false; + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + } + } + currentEvent++; + while (!done) { + Transaction xact = env.beginTransaction(null, null); + try { + retstat = db.delete(xact, createEntry(oldkey)); + retstat = + db.put(xact, + createEntry(oldkey), + new DatabaseEntry(createData(0, newkey))); + retstat = + db.put(xact, + createEntry(newkey), + new DatabaseEntry(createData(1, oldkey))); + + } catch (LockConflictException e) { + Transaction tx = xact; + xact = null; + tx.abort(); + throw new Exception("deadlock occured but not expected."); + } + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + } + } + // sleep after the event is flagged + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + } + + if (xact != null) { + xact.commit(); + done = true; + } + } + currentEvent++; + return done; + } + + public OperationStatus getResult() { + return retstat; + } + + Exception getFailure() { + return failureException; + } + } + + class ReadIt implements Runnable { + int key; + Database db; + Environment env; + int waitEventPre; + int waitEventPost; + long waitTime; + OperationStatus result; + int id; + OperationStatus initStatus; + ReadType readType; + boolean done = false; + Exception failureException; + + ReadIt(int key, + Environment env, + Database db, + int waitEventPre, + int waitEventPost, + long waitTime, + OperationStatus initStatus, + ReadType readType) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waitEventPre; + this.waitEventPost = waitEventPost; + this.waitTime = waitTime; + id = threadid++; + this.initStatus = initStatus; + this.readType = readType; + } + + public void run() { + + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + } + } + currentEvent++; + + while (!done) { + try { + doWork(); + } catch (Exception e) { + failureException = e; + } + } + + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + } + } + } + + private void doWork() throws Exception { + DatabaseEntry dek = createEntry(key); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry seckey = new DatabaseEntry(dek.getData()); + DatabaseEntry prikey = new DatabaseEntry(dek.getData()); + Transaction xact = null; + Cursor c = null; + try { + xact = env.beginTransaction(null, null); + c = db.openCursor(xact, null); + if (readType == ReadType.GETSEARCHKEY) { + result = c.getSearchKey(dek, data, null); + } else if (readType == ReadType.GETSEARCHKEY2) { + result = + ((SecondaryCursor)c).getSearchKey(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHBOTH) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHBOTH2) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHKEYRANGE) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + data, null); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } else if (readType == ReadType.GETSEARCHKEYRANGE2) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + prikey, + data, + null); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } + if (result == OperationStatus.SUCCESS) { + int readSKey = byteToInt(data.getData(), 0); + int readDVal = byteToInt(data.getData(), 4); + if (readDVal != 1) { + throw new Exception( + "read invalid data value expected 1 read " + + readDVal); + } + } + } catch (LockConflictException e) { + if (c != null) { + c.close(); + c = null; + } + Transaction tx = xact; + xact = null; + tx.abort(); + xact = null; + result = null; + if (!deadlockCanHappen()) { + throw new Exception("deadlock occured but not expected."); + } + } + finally { + if (c != null) { + c.close(); + } + if (xact != null) { + xact.commit(); + } + } + } + + public OperationStatus getResult() { + return result; + } + + private boolean deadlockCanHappen() { + return false; + } + public synchronized void setDone(boolean done) { + this.done = done; + } + public Exception getFailure() { + return failureException; + } + } + + DatabaseEntry createEntry(int val) { + return new DatabaseEntry(Integer.valueOf(val).toString().getBytes()); + } + + private SecondaryDatabase + openSecondary(Environment env, + Database priDb, + String dbName, + SecondaryConfig dbConfig) { + dbConfig.setAllowPopulate(true); + dbConfig.setSortedDuplicates(useDuplicate); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setKeyCreator(new MyKeyCreator()); + return env.openSecondaryDatabase(null, dbName, + priDb, dbConfig); + } + + int byteToInt(byte[] b, int offset) { + int i = + (b[0 + offset] << 24) & 0xff000000 | + (b[1 + offset] << 16) & 0xff0000 | + (b[2 + offset] << 8) & 0xff00 | + (b[3 + offset] << 0) & 0xff; + return i; + } + + void intToByte(int input, byte[] dest, int destOffset) { + dest[destOffset + 3] = (byte) (input & 0xff); + input >>= 8; + dest[destOffset + 2] = (byte) (input & 0xff); + input >>= 8; + dest[destOffset + 1] = (byte) (input & 0xff); + input >>= 8; + dest[destOffset] = (byte) input; + } + + byte[] createData(int data, int seckey) { + byte[] retval = new byte[8]; + intToByte(data, retval, 4); + intToByte(seckey, retval, 0); + return retval; + } + + class MyKeyCreator implements SecondaryKeyCreator { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) { + byte[] dbuf = data.getData(); + int skey = byteToInt(dbuf, 0); + + result.setData( Integer.valueOf(skey).toString().getBytes()); + return true; + } + } +} diff --git a/test/com/sleepycat/je/test/SecondaryMultiTest.java b/test/com/sleepycat/je/test/SecondaryMultiTest.java new file mode 100644 index 0000000..121309c --- /dev/null +++ b/test/com/sleepycat/je/test/SecondaryMultiTest.java @@ -0,0 +1,1221 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertSame; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.ThreadInterruptedException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Tests involving secondary indexes with respect to deadlock free access. + * Access is performed using Database, SecondaryDatabase, Cursor and + * SecondaryCursor. When serialization is NOT being used, concurrent access + * of a record should not deadlock. + * + * @author dwchung + * + */ +@RunWith(Parameterized.class) +public class SecondaryMultiTest extends TestBase { + + private enum ReadType {GETSEARCHKEY, GETSEARCHKEY2, GETSEARCHBOTH, + GETSEARCHBOTH2, GETSEARCHKEYRANGE, + GETSEARCHKEYRANGE2}; + private final ReadType[] readTypes = ReadType.values(); + private static final String DB_NAME = "foo"; + private static final String SDB_NAME = "fooSec"; + private volatile int currentEvent; + private volatile boolean testDone = false; + private volatile int threadid = 0; + private final boolean db1UsePrimary; + private final boolean db2UsePrimary; + private final boolean useSerialization; + Environment env = null; + Database db = null; + SecondaryDatabase sdb = null; + + public SecondaryMultiTest(boolean db1UsePrimary, + boolean db2UsePrimary, + boolean useSerialization) { + this.db1UsePrimary = db1UsePrimary; + this.db2UsePrimary = db2UsePrimary; + this.useSerialization = useSerialization; + } + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + /* + * The parameters for the test are three booleans + * db1UsePrimary, db2UsePrimary, useSeralization. + * Test may access the data using the various + * combinations of primary/secondary access + * methods. + */ + private static List paramsHelper(boolean rep) { + final List newParams = new ArrayList(); + newParams.add(new Object[] {false, false, false}); + newParams.add(new Object[] {false, false, true}); + newParams.add(new Object[] {false, true, false}); + newParams.add(new Object[] {true, false, false}); + + /* + * The next two tests cause deadlocks. The tests + * are written to handle them, but the time the + * test runs is not predictable. The tests are + * currently commented out since this set of tests + * target secondary index deadlock avoidance. + * Both of the tests use serialization, which is + * not part of the "deadlock avoidance project. + newParams.add(new Object[] {false, true, true}); + newParams.add(new Object[] {true, false, true}); + */ + newParams.add(new Object[] {true, true, false}); + newParams.add(new Object[] {true, true, true}); + + return newParams; + } + + /* + * + */ + @Before + public void setup() { + File envHome = SharedTestUtils.getTestDir(); + + /* Init the Environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setTxnTimeout(5, TimeUnit.SECONDS); + envConfig.setLockTimeout(5, TimeUnit.SECONDS); + envConfig.setTxnSerializableIsolation(useSerialization); + + env = new Environment(envHome, envConfig); + + /* Open a database and insert some data. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + sdb = openSecondary(env, db, SDB_NAME, new SecondaryConfig()); + } + + @After + public void tearDown() throws Exception { + if (sdb != null) { + sdb.close(); + sdb = null; + } + + if (db != null) { + db.close(); + db = null; + } + + if (env != null) { + env.close(); + env = null; + } + } + + /** + * Have two threads attempt to delete the same record. + * One thread commits the other should get not found. + */ + @Test + public void testMultiDelete() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + DeleteIt t1 = + new DeleteIt(KEY, env, db1UsePrimary ? db : sdb, + 0, 3, 1000); + DeleteIt t2 = + new DeleteIt(KEY, env, db2UsePrimary ? db : sdb, + 2, 4, 1000); + + new Thread(t1).start(); + new Thread(t2).start(); + while (currentEvent < 6 && !testDone) { + try { + Thread.sleep(1000); + } catch (ThreadInterruptedException e) { + + } + } + + assertSame(t1.getResult(), OperationStatus.SUCCESS); + assertSame(t2.getResult(), OperationStatus.NOTFOUND); + } + + /** + * Have multiple threads trying to delete the same record. + * + * @throws Exception + */ + @Test + public void testMultiDeleteUnordered() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int DELETE_ERS = 20; + + Thread[] threads = new Thread[DELETE_ERS]; + DeleteIt[] deleters = new DeleteIt[DELETE_ERS]; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < threads.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + } else { + tdb = db2UsePrimary ? db : sdb; + } + deleters[i] = + new DeleteIt(KEY, env, tdb, 0, deleters.length / 2, 1); + threads[i] = new Thread(deleters[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + int successcount = 0; + + for (int i = 0; i < threads.length; i++) { + OperationStatus result = deleters[i].getResult(); + if (result == OperationStatus.SUCCESS) { + successcount++; + } else { + assertSame(result, OperationStatus.NOTFOUND); + } + } + + /* + * Exactly one deleter should have succeeded. + */ + assertEquals(successcount, 1); + } + + /* + * Have multiple readers and writer threads accessing the + * same record. + */ + @Test + public void testMultiReadInsert() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int RTHREADS = 20; + final int WTHREADS = 2; + + Thread[] threads = new Thread[RTHREADS + WTHREADS]; + ReadIt[] readers = new ReadIt[RTHREADS]; + InsertIt[] writers = new InsertIt[WTHREADS]; + int curReadType = 0; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + if (i == KEY) { + continue; + } + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + } else { + tdb = db2UsePrimary ? db : sdb; + } + ReadType trt = ReadType.GETSEARCHKEY; + if (tdb instanceof SecondaryDatabase) { + trt = readTypes[curReadType % readTypes.length]; + curReadType = curReadType + 1; + } + + readers[i] = + new ReadIt(KEY, env, tdb, 0, 0, 1, + OperationStatus.NOTFOUND, trt); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < writers.length; i++) { + writers[i] = new InsertIt(KEY, env, db, 0, 0, 1); + threads[i + readers.length] = new Thread(writers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + + for (int i = 0; i < readers.length; i++) { + OperationStatus result = readers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + + for (int i = 0; i < writers.length; i++) { + OperationStatus result = writers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + } + + /* + * Have multiple readers accessing a record that is deleted. + */ + @Test + public void testMultiReadDelete() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int RTHREADS = 20; + final int WTHREADS = 1; + + Thread[] threads = new Thread[RTHREADS + WTHREADS]; + ReadIt[] readers = new ReadIt[RTHREADS]; + DeleteIt[] writers = new DeleteIt[WTHREADS]; + int curReadType = 0; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + } else { + tdb = db2UsePrimary ? db : sdb; + } + ReadType trt = tdb instanceof SecondaryDatabase ? + readTypes[curReadType++ % readTypes.length] : + ReadType.GETSEARCHKEY; + readers[i] = + new ReadIt(KEY, env, tdb, 0, 0, 1, + OperationStatus.SUCCESS, trt); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < writers.length; i++) { + writers[i] = + new DeleteIt(KEY, env, db1UsePrimary ? db : sdb, + 0, readers.length / 2, 1); + threads[i + readers.length] = new Thread(writers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + + for (int i = 0; i < readers.length; i++) { + OperationStatus result = readers[i].getResult(); + assertSame(result, OperationStatus.NOTFOUND); + } + + for (int i = 0; i < writers.length; i++) { + OperationStatus result = writers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + } + + /* + * Have multiple threads deleting, inserting, and reading + * the same record. + */ + @Test + public void testMultiReadDeleteInsert() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int RTHREADS = 20; + final int WTHREADS = 1; + final int DTHREADS = 1; + final long TESTRUNTIME = 2000; + + Thread[] threads = new Thread[RTHREADS + WTHREADS + DTHREADS]; + ReadTillITellYou[] readers = new ReadTillITellYou[RTHREADS]; + DeleteIt[] deleters = new DeleteIt[WTHREADS]; + InsertIt[] writers = new InsertIt[WTHREADS]; + + int curReadType = 0; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + } else { + tdb = db2UsePrimary ? db : sdb; + } + ReadType trt = ReadType.GETSEARCHKEY; + if (tdb instanceof SecondaryDatabase) { + trt = readTypes[curReadType % readTypes.length]; + curReadType = curReadType + 1; + } + readers[i] = + new ReadTillITellYou(KEY, env, tdb, 0, 0, 1, trt, null); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < writers.length; i++) { + writers[i] = + new InsertIt(KEY, env, db, + 0, readers.length / 2, 1); + threads[i + readers.length] = new Thread(writers[i]); + } + + for (int i = 0; i < deleters.length; i++) { + deleters[i] = + new DeleteIt(KEY, env, db1UsePrimary ? db : sdb, + 0, readers.length / 2, 1); + threads[i + readers.length + writers.length] = + new Thread(writers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + try { + Thread.sleep(TESTRUNTIME); + } catch (ThreadInterruptedException e) { + + } + + for (int i = 0; i < readers.length; i++) { + readers[i].setDone(true); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + + for (int i = 0; i < writers.length; i++) { + OperationStatus result = writers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + } + + /* + * Have multiple threads update (delete/insert in a + * transaction) and read the same record. + */ + @Test + public void testMultiReadUpdate() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int RTHREADS = 20; + final int WTHREADS = 5; + final long TESTRUNTIME = 2000; + + Thread[] threads = new Thread[RTHREADS + WTHREADS]; + ReadTillITellYou[] readers = new ReadTillITellYou[RTHREADS]; + UpdateIt[] writers = new UpdateIt[WTHREADS]; + + int curReadType = 0; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + } else { + tdb = db2UsePrimary ? db : sdb; + } + ReadType trt = ReadType.GETSEARCHKEY; + if (tdb instanceof SecondaryDatabase) { + trt = readTypes[curReadType % readTypes.length]; + curReadType = curReadType + 1; + } + readers[i] = + new ReadTillITellYou(KEY, env, tdb, 0, 0, 1, trt, null); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < writers.length; i++) { + writers[i] = + new UpdateIt(KEY, env, db, sdb, + 0, readers.length / 2, 1); + threads[i + readers.length] = new Thread(writers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + try { + Thread.sleep(TESTRUNTIME); + } catch (ThreadInterruptedException e) { + + } + + for (int i = 0; i < readers.length; i++) { + readers[i].setDone(true); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + + for (int i = 0; i < writers.length; i++) { + OperationStatus result = writers[i].getResult(); + assertSame(result, OperationStatus.SUCCESS); + } + + for (int i = 0; i < readers.length; i++) { + assertEquals(readers[i].getNotFoundCount(), 0); + } + + } + + /* + * Multiple readers reading the same record with different + * lock types. + */ + @Test + public void testMultiReaders() throws Exception { + + final int DATACOUNT = 99; + final int KEY = 55; + final int RTHREADS = 20; + final long TESTRUNTIME = 2000; + + Thread[] threads = new Thread[RTHREADS]; + ReadTillITellYou[] readers = new ReadTillITellYou[RTHREADS]; + + int curReadType = 0; + LockMode lockMode; + + /* populate */ + for (int i = 0; i < DATACOUNT; i++) { + byte[] val = Integer.valueOf(i).toString().getBytes(); + db.put(null, + new DatabaseEntry(val), + new DatabaseEntry(val)); + } + + for (int i = 0; i < readers.length; i++) { + Database tdb; + if ((i % 2) == 0) { + tdb = db1UsePrimary ? db : sdb; + lockMode = LockMode.RMW; + } else { + tdb = db2UsePrimary ? db : sdb; + lockMode = null; + } + ReadType trt = ReadType.GETSEARCHKEY; + if (tdb instanceof SecondaryDatabase) { + trt = readTypes[curReadType % readTypes.length]; + curReadType = curReadType + 1; + } + readers[i] = + new ReadTillITellYou(KEY, env, tdb, 0, 0, 1, trt, lockMode); + threads[i] = new Thread(readers[i]); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].start(); + } + + try { + Thread.sleep(TESTRUNTIME); + } catch (ThreadInterruptedException e) { + + } + + for (int i = 0; i < readers.length; i++) { + readers[i].setDone(true); + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + + for (int i = 0; i < readers.length; i++) { + assertEquals(readers[i].getNotFoundCount(), 0); + } + + } + + public boolean deadlocksCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + + class DeleteIt implements Runnable { + int key; + Database db; + Environment env; + OperationStatus retstat = null; + int waitEventPre; + int waitEventPost; + int id; + long waittime; + + DeleteIt(int key, + Environment env, + Database db, + int waiteventpre, + int waiteventpost, + long waittime) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waiteventpre; + this.waitEventPost = waiteventpost; + id = threadid++; + this.waittime = waittime; + } + + private boolean deadlockCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + + public void run() { + while (!doWork()); + } + + private boolean doWork() { + boolean done = false; + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + } + currentEvent++; + while (!done) { + Transaction xact = env.beginTransaction(null, null); + try { + retstat = db.delete(xact, createEntry(key)); + } catch (LockConflictException e) { + if (!deadlockCanHappen() ) { + fail("deadlock occured but not expected."); + } + Transaction tx = xact; + xact = null; + tx.abort(); + } + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + testDone = true; + } + } + // sleep after the event is flagged + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + + if (xact != null) { + xact.commit(); + done = true; + } + } + currentEvent++; + return done; + } + + public OperationStatus getResult() { + return retstat; + } + } + + class ReadIt implements Runnable { + int key; + Database db; + Environment env; + int waitEventPre; + int waitEventPost; + long waitTime; + OperationStatus result; + int id; + OperationStatus initStatus; + ReadType readType; + + ReadIt(int key, + Environment env, + Database db, + int waitEventPre, + int waitEventPost, + long waitTime, + OperationStatus initStatus, + ReadType readType) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waitEventPre; + this.waitEventPost = waitEventPost; + this.waitTime = waitTime; + id = threadid++; + this.initStatus = initStatus; + this.readType = readType; + } + + public void run() { + + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + } + } + currentEvent++; + + while (result == null || result == initStatus) { + doWork(); + } + + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + testDone = true; + } + } + } + + private void doWork() { + DatabaseEntry dek = createEntry(key); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry seckey = new DatabaseEntry(dek.getData()); + DatabaseEntry prikey = new DatabaseEntry(dek.getData()); + Transaction xact = null; + Cursor c = null; + try { + xact = env.beginTransaction(null, null); + c = db.openCursor(xact, null); + if (readType == ReadType.GETSEARCHKEY) { + result = c.getSearchKey(dek, data, null); + } else if (readType == ReadType.GETSEARCHKEY2) { + result = + ((SecondaryCursor)c).getSearchKey(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHBOTH) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHBOTH2) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, null); + } else if (readType == ReadType.GETSEARCHKEYRANGE) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + data, null); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } else if (readType == ReadType.GETSEARCHKEYRANGE2) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + prikey, + data, + null); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } + } catch (LockConflictException e) { + if (c != null) { + c.close(); + c = null; + } + Transaction tx = xact; + xact = null; + tx.abort(); + xact = null; + result = null; + if (!deadlockCanHappen()) { + fail("deadlock occured but not expected."); + } + } + finally { + if (c != null) { + c.close(); + } + if (xact != null) { + xact.commit(); + } + } + } + + public OperationStatus getResult() { + return result; + } + + private boolean deadlockCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + } + + class ReadTillITellYou implements Runnable { + int key; + Database db; + Environment env; + int waitEventPre; + int waitEventPost; + long waitTime; + OperationStatus result; + int id; + boolean done = false; + ReadType readType; + long found; + long notFound; + LockMode lockMode; + + ReadTillITellYou(int key, + Environment env, + Database db, + int waitEventPre, + int waitEventPost, + long waitTime, + ReadType readType, + LockMode lkmode) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waitEventPre; + this.waitEventPost = waitEventPost; + this.waitTime = waitTime; + id = threadid++; + this.readType = readType; + lockMode = lkmode; + } + + public void run() { + + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + } + } + currentEvent++; + + while (!done) { + doWork(); + } + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + testDone = true; + } + } + } + + private void doWork() { + DatabaseEntry dek = createEntry(key); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry seckey = new DatabaseEntry(dek.getData()); + DatabaseEntry prikey = new DatabaseEntry(dek.getData()); + Transaction xact = null; + Cursor c = null; + try { + xact = env.beginTransaction(null, null); + c = db.openCursor(xact, null); + if (readType == ReadType.GETSEARCHKEY) { + result = c.getSearchKey(dek, data, lockMode); + } else if (readType == ReadType.GETSEARCHKEY2) { + result = + ((SecondaryCursor)c).getSearchKey(dek, dek, + data, lockMode); + } else if (readType == ReadType.GETSEARCHBOTH) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, lockMode); + } else if (readType == ReadType.GETSEARCHBOTH2) { + result = + ((SecondaryCursor)c).getSearchBoth(dek, dek, + data, lockMode); + } else if (readType == ReadType.GETSEARCHKEYRANGE) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + data, lockMode); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } else if (readType == ReadType.GETSEARCHKEYRANGE2) { + result = + ((SecondaryCursor)c).getSearchKeyRange(seckey, + prikey, + data, + lockMode); + if (result == OperationStatus.SUCCESS) { + if (!seckey.equals(dek)) { + result = OperationStatus.NOTFOUND; + } + } + } + if (result == OperationStatus.SUCCESS) { + found++; + } else if (result == OperationStatus.NOTFOUND) { + notFound++; + } else { + fail("Read operation returned "+result); + } + } catch (LockConflictException e) { + if (!deadlockCanHappen()) { + fail("deadlock occured but not expected."); + } + Transaction tx = xact; + xact = null; + tx.abort(); + result = null; + } + finally { + if (c != null) { + c.close(); + } + if (xact != null) { + xact.commit(); + } + } + } + + public OperationStatus getResult() { + return result; + } + + public synchronized void setDone(boolean done) { + this.done = done; + } + + public long getFoundCount() { + return found; + } + + public long getNotFoundCount() { + return notFound; + } + + private boolean deadlockCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + } + + class InsertIt implements Runnable { + int key; + Database db; + Environment env; + OperationStatus retstat = null; + int waitEventPre; + int waitEventPost; + int id; + long waittime; + + InsertIt(int key, + Environment env, + Database db, + int waiteventpre, + int waiteventpost, + long waittime) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waiteventpre; + this.waitEventPost = waiteventpost; + id = threadid++; + this.waittime = waittime; + } + + public void run() { + Transaction xact = env.beginTransaction(null, null); + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + } + currentEvent++; + try { + retstat = db.put(xact, createEntry(key), createEntry(key)); + } catch (LockConflictException e) { + if (!deadlockCanHappen()) { + fail("deadlock not expected."); + } + Transaction tx = xact; + xact = null; + tx.abort(); + } + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + testDone = true; + } + } + // sleep after the event is flagged + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + + if (xact != null) { + xact.commit(); + } + currentEvent++; + } + + private boolean deadlockCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + + public OperationStatus getResult() { + return retstat; + } + } + + class UpdateIt implements Runnable { + int key; + Database db; + SecondaryDatabase sdb; + Environment env; + OperationStatus retstat = null; + int waitEventPre; + int waitEventPost; + int id; + long waittime; + + UpdateIt(int key, + Environment env, + Database db, + SecondaryDatabase sdb, + int waiteventpre, + int waiteventpost, + long waittime) { + this.key = key; + this.db = db; + this.env = env; + this.waitEventPre = waiteventpre; + this.waitEventPost = waiteventpost; + id = threadid++; + this.waittime = waittime; + this.sdb = sdb; + } + + private boolean deadlockCanHappen() { + if (useSerialization && + db1UsePrimary ^ db2UsePrimary) { + return true; + } + return false; + } + + public void run() { + while (!doWork()); + } + + private boolean doWork() { + boolean done = false; + while (currentEvent < waitEventPre) { + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + } + currentEvent++; + while (!done) { + Transaction xact = env.beginTransaction(null, null); + try { + retstat = sdb.delete(xact, createEntry(key)); + assertEquals(retstat, OperationStatus.SUCCESS); + retstat = db.put(xact, createEntry(key), createEntry(key)); + assertEquals(retstat, OperationStatus.SUCCESS); + } catch (LockConflictException e) { + if (!deadlockCanHappen() ) { + fail("deadlock occured but not expected."); + } + Transaction tx = xact; + xact = null; + tx.abort(); + } + currentEvent++; + while (currentEvent < waitEventPost) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + testDone = true; + } + } + // sleep after the event is flagged + try { + Thread.sleep(waittime); + } catch (InterruptedException e) { + testDone = true; + } + + if (xact != null) { + xact.commit(); + done = true; + } + } + currentEvent++; + return done; + } + + public OperationStatus getResult() { + return retstat; + } + } + + DatabaseEntry createEntry(int val) { + return new DatabaseEntry(Integer.valueOf(val).toString().getBytes()); + } + + private SecondaryDatabase + openSecondary(Environment env, + Database priDb, + String dbName, + SecondaryConfig dbConfig) { + dbConfig.setAllowPopulate(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setKeyCreator(new MyKeyCreator()); + return env.openSecondaryDatabase(null, dbName, + priDb, dbConfig); + } + + class MyKeyCreator implements SecondaryKeyCreator { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) { + result.setData(key.getData()); + + return true; + } + } +} diff --git a/test/com/sleepycat/je/test/SecondarySplitTestMain.java b/test/com/sleepycat/je/test/SecondarySplitTestMain.java new file mode 100644 index 0000000..332c620 --- /dev/null +++ b/test/com/sleepycat/je/test/SecondarySplitTestMain.java @@ -0,0 +1,224 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import java.io.File; +import java.util.Random; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests that splits during secondary inserts don't cause a LatchException + * (latch already held). This was caused by a latch that wasn't released + * during a duplicate insert, when a split occurred during the insert. See + * [#12841] in Tree.java. + * + * The record keys are random long values and the record data is the long + * time (millis) of the record creation. The secondary database is indexed on + * the full data value (the timestamp). When a record is updated, its timstamp + * is changed to the current time, cause secondary deletes and inserts. This + * scenario is what happened to bring out the bug in SR [#12841]. + */ +public class SecondarySplitTestMain { + + private static final int WRITER_THREADS = 2; + private static final int INSERTS_PER_ITER = 2; + private static final int UPDATES_PER_ITER = 1; + private static final int ITERS_PER_THREAD = 20000; + private static final int ITERS_PER_TRACE = 1000; + + private final File envHome; + private Environment env; + private Database priDb; + private SecondaryDatabase secDb; + private final Random rnd = new Random(123); + + public static void main(String[] args) { + try { + SecondarySplitTestMain test = new SecondarySplitTestMain(); + test.doTest(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + + public SecondarySplitTestMain() { + envHome = SharedTestUtils.getTestDir(); + } + + private void doTest() + throws Exception { + + TestUtils.removeLogFiles("Setup", envHome, false); + open(); + Thread[] writers = new Thread[WRITER_THREADS]; + for (int i = 0; i < writers.length; i += 1) { + writers[i] = new Writer(i); + } + for (int i = 0; i < writers.length; i += 1) { + writers[i].start(); + } + for (int i = 0; i < writers.length; i += 1) { + writers[i].join(); + } + close(); + TestUtils.removeLogFiles("TearDown", envHome, false); + System.out.println("SUCCESS"); + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + env = new Environment(envHome, envConfig); + + DatabaseConfig priConfig = new DatabaseConfig(); + priConfig.setAllowCreate(true); + + priDb = env.openDatabase(null, "pri", priConfig); + + SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(true); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new KeyCreator()); + + secDb = env.openSecondaryDatabase(null, "sec", priDb, secConfig); + } + + private void close() + throws DatabaseException { + + secDb.close(); + secDb = null; + + priDb.close(); + priDb = null; + + env.close(); + env = null; + } + + static class KeyCreator implements SecondaryKeyCreator { + + public boolean createSecondaryKey(SecondaryDatabase db, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(data.getData(), data.getOffset(), data.getSize()); + return true; + } + } + + private class Writer extends Thread { + + Writer(int id) { + super("[Writer " + id + ']'); + } + + @Override + public void run() { + + int inserts = 0; + int updates = 0; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + + for (int iter = 1; iter <= ITERS_PER_THREAD; iter += 1) { + + Cursor cursor = null; + + try { + + /* Inserts */ + for (int i = 0; i < INSERTS_PER_ITER; i += 1) { + LongBinding.longToEntry(rnd.nextLong(), key); + long time = System.currentTimeMillis(); + LongBinding.longToEntry(time, data); + status = priDb.putNoOverwrite(null, key, data); + if (status == OperationStatus.SUCCESS) { + inserts += 1; + } else { + System.out.println + (getName() + " *** INSERT " + status); + } + } + + /* Updates */ + for (int i = 0; i < UPDATES_PER_ITER; i += 1) { + + cursor = priDb.openCursor(null, null); + + LongBinding.longToEntry(rnd.nextLong(), key); + status = cursor.getSearchKeyRange(key, data, + LockMode.RMW); + if (status == OperationStatus.NOTFOUND) { + status = cursor.getFirst(key, data, LockMode.RMW); + } + + if (status == OperationStatus.SUCCESS) { + long time = System.currentTimeMillis(); + LongBinding.longToEntry(time, data); + cursor.putCurrent(data); + updates += 1; + } else { + System.out.println + (getName() + " *** UPDATE " + status); + } + + cursor.close(); + cursor = null; + } + + } catch (Throwable e) { + + e.printStackTrace(System.out); + + if (cursor != null) { + try { + cursor.close(); + } catch (Exception e2) { + e2.printStackTrace(System.out); + } + } + } + + if (iter % ITERS_PER_TRACE == 0) { + System.out.println + (getName() + + " inserts=" + inserts + + " updates=" + updates); + } + } + } + } +} diff --git a/test/com/sleepycat/je/test/SecondaryTest.java b/test/com/sleepycat/je/test/SecondaryTest.java new file mode 100644 index 0000000..0a0d52a --- /dev/null +++ b/test/com/sleepycat/je/test/SecondaryTest.java @@ -0,0 +1,2212 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryAssociation; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.UniqueConstraintException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.TestUtils; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Tests basic SecondaryDatabase functionality. Tests a SecondaryAssociation, + * but only in a limited mode where there is only one primary DB. + */ +@RunWith(Parameterized.class) +public class SecondaryTest extends MultiKeyTxnTestCase { + + private static final int NUM_RECS = 5; + private static final int KEY_OFFSET = 100; + + private JUnitThread junitThread; + private final boolean resetOnFailure; + private final boolean useCustomAssociation; + private CustomAssociation customAssociation; + + protected static EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + static { + envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + /* + * For testGet, there will be intentional deadlocks, so we will + * set the lockTimeout to be 1 to speed up this test. + * + * But for most other test cases. First, BtreeVerifier will verify + * the index corruption, so it will first READ_LOCK the secondary + * record and then try to lock the primary record with nonblocking. + * Second, in many places of the test cases, it will call put or + * delete the primary record, e.g. priDb.delete(txn, entry(val)). + * This will first WRITE_LOCK the primary record and then WRITE_LOCK + * the secondary record. So when the test case wants to WRITE_LOCK + * the secondary record, the secondary record may have already been + * locked by BtreeVerifier. So the test case will first wait. After + * the BtreeVerifier can not lock the primary record and finish + * the subsequent work, the BtreeVerifier will release the lock on + * the secondary record, the test case then can get this lock. If + * we set the lockTimeout to be 1, then the test case will wake up + * quickly, think that it is Timeout and then throw LockTimeoutEx. + * So for these test cases, we just use the default lockTimeout + * value to simulate the real product environment. + */ + //envConfig.setLockTimeout(1); // to speed up + envConfig.setAllowCreate(true); + + /* Disable daemons so that stats are reliable. */ + envConfig.setConfigParam(EnvironmentConfig.STATS_COLLECT, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + } + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + protected static List paramsHelper(boolean rep) { + final String[] txnTypes = getTxnTypes(null, rep); + final List newParams = new ArrayList(); +// if (true) { +// newParams.add(new Object[] {txnTypes[0], false, true, false}); +// return newParams; +// } + for (final String type : txnTypes) { + for (final boolean b1 : new boolean[] {false, true, false}) { + newParams.add(new Object[] {type, b1, false, true}); + for (final boolean b2 : new boolean[] {false, true}) { + newParams.add(new Object[] {type, b1, b2, false}); + } + } + } + + return newParams; + } + + public SecondaryTest(String type, + boolean multiKey, + boolean customAssociation, + boolean resetOnFailure) { + super.envConfig = envConfig; + txnType = type; + useMultiKey = multiKey; + useCustomAssociation = customAssociation; + this.resetOnFailure = resetOnFailure; + isTransactional = (txnType != TXN_NULL); + customName = ((useCustomAssociation) ? "customAssoc-" : "") + + ((useMultiKey) ? "multiKey-" : "") + + ((resetOnFailure) ? "resetOnFailure-" : "") + + txnType; + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + } + + @Test + public void testPutAndDelete() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + OperationStatus status; + Transaction txn = txnBegin(); + + /* Database.put() */ + status = priDb.put(txn, entry(1), entry(2)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(2), data); + + /* Database.putNoOverwrite() */ + status = priDb.putNoOverwrite(txn, entry(1), entry(1)); + assertSame(OperationStatus.KEYEXIST, status); + status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(2), data); + + /* Database.put() overwrite */ + status = priDb.put(txn, entry(1), entry(3)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = secDb.get(txn, entry(103), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(3), data); + + /* Database.delete() */ + status = priDb.delete(txn, entry(1)); + assertSame(OperationStatus.SUCCESS, status); + status = priDb.delete(txn, entry(1)); + assertSame(OperationStatus.NOTFOUND, status); + status = secDb.get(txn, entry(103), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryDatabase.delete() */ + status = priDb.put(txn, entry(1), entry(1)); + assertSame(OperationStatus.SUCCESS, status); + status = priDb.put(txn, entry(2), entry(1)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(101), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(1), data); + status = secDb.delete(txn, entry(101)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.delete(txn, entry(101)); + assertSame(OperationStatus.NOTFOUND, status); + status = secDb.get(txn, entry(101), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = priDb.get(txn, entry(1), data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = priDb.get(txn, entry(2), data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* + * Database.putNoDupData() cannot be called since the primary cannot be + * configured for duplicates. + */ + + /* Primary and secondary are empty now. */ + + /* Get a txn for a cursor. */ + txnCommit(txn); + txn = txnBeginCursor(); + + Cursor priCursor = null; + SecondaryCursor secCursor = null; + try { + priCursor = openCursor(priDb, txn); + secCursor = openCursor(secDb, txn); + + /* Cursor.putNoOverwrite() */ + status = priCursor.putNoOverwrite(entry(1), entry(2)); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(priCursor, 1); + status = secCursor.getSearchKey(entry(102), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(secCursor, 0); + assertDataEquals(entry(1), key); + assertDataEquals(entry(2), data); + + /* Cursor.putCurrent() */ + status = priCursor.putCurrent(entry(3)); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(priCursor, 2); + status = secCursor.getSearchKey(entry(102), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + assertNSecWrites(secCursor, 0); + status = secCursor.getSearchKey(entry(103), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(secCursor, 0); + assertDataEquals(entry(1), key); + assertDataEquals(entry(3), data); + + /* Cursor.delete() */ + status = priCursor.delete(); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(priCursor, 1); + status = priCursor.delete(); + assertSame(OperationStatus.KEYEMPTY, status); + status = secCursor.getSearchKey(entry(103), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = priCursor.getSearchKey(entry(1), data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* Cursor.put() */ + status = priCursor.put(entry(1), entry(4)); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(priCursor, 1); + status = secCursor.getSearchKey(entry(104), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(secCursor, 0); + assertDataEquals(entry(1), key); + assertDataEquals(entry(4), data); + + /* SecondaryCursor.delete() */ + status = secCursor.delete(); + assertSame(OperationStatus.SUCCESS, status); + assertNSecWrites(secCursor, 0); // Known deficiency. + status = secCursor.delete(); + assertSame(OperationStatus.KEYEMPTY, status); + status = secCursor.getCurrent(new DatabaseEntry(), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.KEYEMPTY, status); + status = secCursor.getSearchKey(entry(104), key, data, + LockMode.DEFAULT); + assertNSecWrites(secCursor, 0); + status = priCursor.getSearchKey(entry(1), data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* + * Cursor.putNoDupData() cannot be called since the primary cannot + * be configured for duplicates. + */ + + /* Primary and secondary are empty now. */ + } finally { + if (secCursor != null) { + secCursor.close(); + } + if (priCursor != null) { + priCursor.close(); + } + } + + txnCommit(txn); + secDb.close(); + priDb.close(); + } + + @Test + public void testPartialDataPut() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + OperationStatus status; + Transaction txn = txnBegin(); + + /* Database.put() */ + status = priDb.putNoOverwrite(txn, entry(1), partialEntry(0, 1)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(101), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(1), data); + status = priDb.put(txn, entry(1), partialEntry(1, 2)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(2), data); + status = priDb.put(txn, entry(1), partialEntry(2, 3)); + assertSame(OperationStatus.SUCCESS, status); + status = secDb.get(txn, entry(102), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = secDb.get(txn, entry(103), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(3), data); + + /* Get a txn for a cursor. */ + txnCommit(txn); + txn = txnBeginCursor(); + + Cursor priCursor = null; + SecondaryCursor secCursor = null; + try { + priCursor = openCursor(priDb, txn); + secCursor = openCursor(secDb, txn); + + /* Cursor.put() */ + status = priCursor.put(entry(1), partialEntry(3, 2)); + assertSame(OperationStatus.SUCCESS, status); + status = secCursor.getSearchKey(entry(102), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(2), data); + + /* Cursor.putCurrent() */ + status = priCursor.putCurrent(partialEntry(2, 3)); + assertSame(OperationStatus.SUCCESS, status); + status = secCursor.getSearchKey(entry(102), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + status = secCursor.getSearchKey(entry(103), key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), key); + assertDataEquals(entry(3), data); + } finally { + if (secCursor != null) { + secCursor.close(); + } + if (priCursor != null) { + priCursor.close(); + } + } + + txnCommit(txn); + secDb.close(); + priDb.close(); + } + + @Test + public void testGet() { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.setLockTimeout(1); + + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry secKey = new DatabaseEntry(); + OperationStatus status; + Transaction txn = txnBegin(); + + /* + * For parameters that do not require initialization with a non-null + * data array, we set them to null to make sure this works. [#12121] + */ + + /* Add one record for each key with one data/duplicate. */ + for (int i = 0; i < NUM_RECS; i += 1) { + status = priDb.put(txn, entry(i), entry(i)); + assertSame(OperationStatus.SUCCESS, status); + } + + /* SecondaryDatabase.get() */ + for (int i = 0; i < NUM_RECS; i += 1) { + + data.setData(null); + status = secDb.get(txn, entry(i + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + } + data.setData(null); + status = secDb.get(txn, entry(NUM_RECS + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryDatabase.getSearchBoth() */ + for (int i = 0; i < NUM_RECS; i += 1) { + data.setData(null); + status = secDb.getSearchBoth(txn, entry(i + KEY_OFFSET), entry(i), + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), data); + } + data.setData(null); + status = secDb.getSearchBoth(txn, entry(NUM_RECS + KEY_OFFSET), + entry(NUM_RECS), data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* Get a cursor txn. */ + txnCommit(txn); + txn = txnBeginCursor(); + + SecondaryCursor cursor = openCursor(secDb, txn); + try { + /* SecondaryCursor.getFirst()/getNext() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getNext(secKey, key, data, LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getCurrent() (last) */ + if (!resetOnFailure) { + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getCurrent( + secKey, key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(NUM_RECS - 1 + KEY_OFFSET), secKey); + assertDataEquals(entry(NUM_RECS - 1), key); + assertDataEquals(entry(NUM_RECS - 1), data); + assertPriLocked(priDb, key); + } + + /* SecondaryCursor.getLast()/getPrev() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getLast(secKey, key, data, LockMode.DEFAULT); + for (int i = NUM_RECS - 1; i >= 0; i -= 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getPrev(secKey, key, data, LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getCurrent() (first) */ + if (!resetOnFailure) { + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getCurrent(secKey, key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0 + KEY_OFFSET), secKey); + assertDataEquals(entry(0), key); + assertDataEquals(entry(0), data); + assertPriLocked(priDb, key); + } + + /* SecondaryCursor.getSearchKey() */ + key.setData(null); + data.setData(null); + status = cursor.getSearchKey(entry(KEY_OFFSET - 1), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + for (int i = 0; i < NUM_RECS; i += 1) { + key.setData(null); + data.setData(null); + status = cursor.getSearchKey(entry(i + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key); + } + key.setData(null); + data.setData(null); + status = cursor.getSearchKey(entry(NUM_RECS + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getSearchBoth() */ + data.setData(null); + status = cursor.getSearchKey(entry(KEY_OFFSET - 1), entry(0), + data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + for (int i = 0; i < NUM_RECS; i += 1) { + data.setData(null); + status = cursor.getSearchBoth(entry(i + KEY_OFFSET), entry(i), + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, entry(i)); + } + data.setData(null); + status = cursor.getSearchBoth(entry(NUM_RECS + KEY_OFFSET), + entry(NUM_RECS), data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getSearchKeyRange() */ + key.setData(null); + data.setData(null); + status = cursor.getSearchKeyRange(entry(KEY_OFFSET - 1), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertDataEquals(entry(0), data); + assertPriLocked(priDb, key); + for (int i = 0; i < NUM_RECS; i += 1) { + key.setData(null); + data.setData(null); + status = cursor.getSearchKeyRange(entry(i + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key); + } + key.setData(null); + data.setData(null); + status = cursor.getSearchKeyRange(entry(NUM_RECS + KEY_OFFSET), + key, data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getSearchBothRange() */ + data.setData(null); + status = cursor.getSearchBothRange(entry(1 + KEY_OFFSET), entry(1), + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(1), data); + assertPriLocked(priDb, entry(1)); + for (int i = 0; i < NUM_RECS; i += 1) { + data.setData(null); + status = cursor.getSearchBothRange(entry(i + KEY_OFFSET), + entry(i), data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, entry(i)); + } + data.setData(null); + status = cursor.getSearchBothRange(entry(NUM_RECS + KEY_OFFSET), + entry(NUM_RECS), data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + /* Add one duplicate for each key. */ + Cursor priCursor = openCursor(priDb, txn); + try { + for (int i = 0; i < NUM_RECS; i += 1) { + status = priCursor.put(entry(i + KEY_OFFSET), entry(i)); + assertSame(OperationStatus.SUCCESS, status); + } + } finally { + priCursor.close(); + } + + /* SecondaryCursor.getNextDup() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getNextDup(secKey, key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i + KEY_OFFSET), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getNextDup(secKey, key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getNext(secKey, key, data, LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getNextNoDup() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getFirst(secKey, key, data, LockMode.DEFAULT); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getNextNoDup(secKey, key, data, + LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getPrevDup() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getLast(secKey, key, data, LockMode.DEFAULT); + for (int i = NUM_RECS - 1; i >= 0; i -= 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i + KEY_OFFSET), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getPrevDup(secKey, key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getPrevDup(secKey, key, data, + LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getPrev(secKey, key, data, LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + + /* SecondaryCursor.getPrevNoDup() */ + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getLast(secKey, key, data, LockMode.DEFAULT); + for (int i = NUM_RECS - 1; i >= 0; i -= 1) { + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(i + KEY_OFFSET), secKey); + assertDataEquals(entry(i + KEY_OFFSET), key); + assertDataEquals(entry(i), data); + assertPriLocked(priDb, key, data); + secKey.setData(null); + key.setData(null); + data.setData(null); + status = cursor.getPrevNoDup(secKey, key, data, + LockMode.DEFAULT); + } + assertSame(OperationStatus.NOTFOUND, status); + } finally { + cursor.close(); + } + + txnCommit(txn); + secDb.close(); + priDb.close(); + } + + @Test + public void testOpenAndClose() { + Database priDb = openPrimary(false, "testDB", false); + + /* Open two secondaries as regular databases and as secondaries. */ + Database secDbDetached = openDatabase(true, "testSecDB", false); + SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB", + false, false); + Database secDb2Detached = openDatabase(true, "testSecDB2", false); + SecondaryDatabase secDb2 = openSecondary(priDb, true, "testSecDB2", + false, false); + assertEquals(getSecondaries(priDb), new HashSet( + Arrays.asList(new SecondaryDatabase[] {secDb, secDb2}))); + + Transaction txn = txnBegin(); + + /* Check that primary writes to both secondaries. */ + checkSecondaryUpdate(txn, priDb, 1, secDbDetached, true, + secDb2Detached, true); + + /* New txn before closing database. */ + txnCommit(txn); + txn = txnBegin(); + + /* Close 2nd secondary. */ + closeSecondary(secDb2); + assertEquals(getSecondaries(priDb), new HashSet( + Arrays.asList(new SecondaryDatabase[] {secDb }))); + + /* Check that primary writes to 1st secondary only. */ + checkSecondaryUpdate(txn, priDb, 2, secDbDetached, true, + secDb2Detached, false); + + /* New txn before closing database. */ + txnCommit(txn); + txn = txnBegin(); + + /* Close 1st secondary. */ + closeSecondary(secDb); + assertEquals(0, getSecondaries(priDb).size()); + + /* Check that primary writes to no secondaries. */ + checkSecondaryUpdate(txn, priDb, 3, secDbDetached, false, + secDb2Detached, false); + + /* Open the two secondaries again. */ + secDb = openSecondary(priDb, true, "testSecDB", false, false); + secDb2 = openSecondary(priDb, true, "testSecDB2", false, false); + assertEquals(getSecondaries(priDb), new HashSet( + Arrays.asList(new SecondaryDatabase[] {secDb, secDb2}))); + + /* Check that primary writes to both secondaries. */ + checkSecondaryUpdate(txn, priDb, 4, secDbDetached, true, + secDb2Detached, true); + + txnCommit(txn); + + /* Close the primary first to generate exception. */ + try { + priDb.close(); + if (!useCustomAssociation) { + fail(); + } + } catch (IllegalStateException e) { + if (useCustomAssociation) { + throw e; + } + assertTrue( + e.getMessage().contains("2 associated SecondaryDatabases")); + } + /* Orphaned secondaries can be closed without errors. */ + secDb2.close(); + secDb.close(); + + secDb2Detached.close(); + secDbDetached.close(); + } + + /** + * Check that primary put() writes to each secondary that is open. + */ + private void checkSecondaryUpdate(Transaction txn, + Database priDb, + int val, + Database secDb, + boolean expectSecDbVal, + Database secDb2, + boolean expectSecDb2Val) { + OperationStatus status; + DatabaseEntry data = new DatabaseEntry(); + int secVal = KEY_OFFSET + val; + + status = priDb.put(txn, entry(val), entry(val)); + assertSame(OperationStatus.SUCCESS, status); + + status = secDb.get(txn, entry(secVal), data, LockMode.DEFAULT); + assertSame(expectSecDbVal ? OperationStatus.SUCCESS + : OperationStatus.NOTFOUND, status); + + status = secDb2.get(txn, entry(secVal), data, LockMode.DEFAULT); + assertSame(expectSecDb2Val ? OperationStatus.SUCCESS + : OperationStatus.NOTFOUND, status); + + status = priDb.delete(txn, entry(val)); + assertSame(OperationStatus.SUCCESS, status); + } + + @Test + public void testReadOnly() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + OperationStatus status; + Transaction txn = txnBegin(); + + for (int i = 0; i < NUM_RECS; i += 1) { + status = priDb.put(txn, entry(i), entry(i)); + assertSame(OperationStatus.SUCCESS, status); + } + + /* + * Secondaries can be opened without a key creator if the primary is + * read only. openSecondary will specify a null key creator if the + * readOnly param is false. + */ + Database readOnlyPriDb = openPrimary(false, "testDB", true); + SecondaryDatabase readOnlySecDb = openSecondary(readOnlyPriDb, + true, "testSecDB", + false, true); + assertNull(readOnlySecDb.getSecondaryConfig().getKeyCreator()); + verifyRecords(txn, readOnlySecDb, NUM_RECS, true); + + txnCommit(txn); + readOnlySecDb.close(); + readOnlyPriDb.close(); + secDb.close(); + priDb.close(); + } + + /** + * Tests population of newly created secondary database, which occurs + * automatically in openSecondary when AllowPopulate is configured. + */ + @Test + public void testAutomaticPopulate() { + + /* Open primary without any secondaries and write data. */ + Database priDb = openPrimary(false, "testDB", false); + Transaction txn = txnBegin(); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, + priDb.put(txn, entry(i), entry(i))); + } + txnCommit(txn); + + /* + * Open secondary with allowPopulate option and it will automatically + * be populated. + */ + + SecondaryDatabase secDb; + try { + secDb = openSecondary( + priDb, true, "testSecDB", true /*allowPopulate*/, false); + if (useCustomAssociation) { + fail(); + } + } catch (IllegalArgumentException e) { + if (useCustomAssociation) { + + /* + * Automatic population not allowed when a user-supplied + * SecondaryAssociation is configured. + */ + final String msg = e.toString(); + assertTrue(msg, msg.contains("AllowPopulate must be false")); + priDb.close(); + return; + } + throw e; + } + txn = txnBegin(); + verifyRecords(txn, secDb, NUM_RECS, true); + txnCommit(txn); + + /* + * Clear secondary and perform populate again, to test the case where + * an existing database is opened, and therefore a write txn will only + * be created in order to populate it + */ + Database secDbDetached = openDatabase(true, "testSecDB", false); + secDb.close(); + txn = txnBegin(); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, + secDbDetached.delete(txn, entry(i + KEY_OFFSET))); + } + verifyRecords(txn, secDbDetached, 0, true); + txnCommit(txn); + secDb = openSecondary(priDb, true, "testSecDB", true, false); + txn = txnBegin(); + verifyRecords(txn, secDb, NUM_RECS, true); + verifyRecords(txn, secDbDetached, NUM_RECS, true); + + txnCommit(txn); + secDbDetached.close(); + secDb.close(); + priDb.close(); + } + + /** + * Tests population of newly created secondary database via explicit calls + * to incremental indexing methods. This is a very basic test and not + * intended to take the place of a stress test that performs concurrent + * writes. + */ + @Test + public void testIncrementalPopulate() { + /* Open primary without any secondaries and write data. */ + final Database priDb = openPrimary(false, "testDB", false); + final Transaction txn = txnBegin(); + for (int i = 0; i < NUM_RECS; i += 1) { + assertSame(OperationStatus.SUCCESS, + priDb.put(txn, entry(i), entry(i))); + } + txnCommit(txn); + + /* + * Open secondary without allowPopulate option. It will initially be + * empty. + */ + final SecondaryDatabase secDb = openSecondary( + priDb, true, "testSecDB", false /*allowPopulate*/, false); + assertEquals(0, secDb.count()); + + /* Enable incremental population. Secondary reads are not allowed. */ + secDb.startIncrementalPopulation(); + try { + secDb.get(null, new DatabaseEntry(new byte[0]), + new DatabaseEntry(), null); + fail(); + } catch (IllegalStateException expected) { + } + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final Cursor cursor = priDb.openCursor( + null, CursorConfig.READ_COMMITTED); + OperationResult result; + while ((result = cursor.get( + keyEntry, dataEntry, Get.NEXT, null)) != null) { + priDb.populateSecondaries( + null, keyEntry, dataEntry, result.getExpirationTime(), null); + } + cursor.close(); + + /* After disabling incremental population we can read via secondary. */ + secDb.endIncrementalPopulation(); + verifyRecords(null, secDb, NUM_RECS, true); + + secDb.close(); + priDb.close(); + } + + @Test + public void testTruncate() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + Transaction txn = txnBegin(); + + for (int i = 0; i < NUM_RECS; i += 1) { + priDb.put(txn, entry(i), entry(i)); + } + verifyRecords(txn, priDb, NUM_RECS, false); + verifyRecords(txn, secDb, NUM_RECS, true); + txnCommit(txn); + secDb.close(); + priDb.close(); + + txn = txnBegin(); + assertEquals(NUM_RECS, env.truncateDatabase(txn, "testDB", true)); + assertEquals(NUM_RECS, env.truncateDatabase(txn, "testSecDB", true)); + txnCommit(txn); + + secDb = initDb(); + priDb = getPrimaryDatabase(secDb); + + txn = txnBegin(); + verifyRecords(txn, priDb, 0, false); + verifyRecords(txn, secDb, 0, true); + txnCommit(txn); + + secDb.close(); + priDb.close(); + } + + private void verifyRecords(Transaction txn, + Database db, + int numRecs, + boolean isSecondary) { + /* We're only reading, so txn may be null. */ + Cursor cursor = openCursor(db, txn); + try { + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + OperationStatus status; + int count = 0; + status = cursor.getFirst(key, data, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + assertDataEquals(entry(count), data); + if (isSecondary) { + assertDataEquals(entry(count + KEY_OFFSET), key); + } else { + assertDataEquals(entry(count), key); + } + count += 1; + status = cursor.getNext(key, data, LockMode.DEFAULT); + } + assertEquals(numRecs, count); + } finally { + cursor.close(); + } + } + + @Test + public void testUniqueSecondaryKey() { + Database priDb = openPrimary(false, "testDB", false); + SecondaryDatabase secDb = + openSecondary(priDb, false, "testSecDB", false, false); + DatabaseEntry key; + DatabaseEntry data; + DatabaseEntry pkey = new DatabaseEntry(); + Transaction txn; + + /* Put {0, 0} */ + txn = txnBegin(); + key = entry(0); + data = entry(0); + priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, entry(0 + KEY_OFFSET), + pkey, data, null)); + assertEquals(0, TestUtils.getTestVal(pkey.getData())); + assertEquals(0, TestUtils.getTestVal(data.getData())); + txnCommit(txn); + + /* Put {1, 1} */ + txn = txnBegin(); + key = entry(1); + data = entry(1); + priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, entry(1 + KEY_OFFSET), + pkey, data, null)); + txnCommit(txn); + assertEquals(1, TestUtils.getTestVal(pkey.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + + /* Put {2, 0} */ + txn = txnBegin(); + key = entry(2); + data = entry(0); + try { + priDb.put(txn, key, data); + /* Expect exception because secondary key must be unique. */ + fail(); + } catch (UniqueConstraintException e) { + txnAbort(txn); + /* Ensure that primary record was not inserted. */ + assertEquals(OperationStatus.NOTFOUND, + secDb.get(null, key, data, null)); + /* Ensure that secondary record has not changed. */ + assertEquals(OperationStatus.SUCCESS, + secDb.get(null, entry(0 + KEY_OFFSET), + pkey, data, null)); + assertEquals(0, TestUtils.getTestVal(pkey.getData())); + assertEquals(0, TestUtils.getTestVal(data.getData())); + } + + /* Overwrite {1, 1} */ + txn = txnBegin(); + key = entry(1); + data = entry(1); + priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, entry(1 + KEY_OFFSET), + pkey, data, null)); + assertEquals(1, TestUtils.getTestVal(pkey.getData())); + assertEquals(1, TestUtils.getTestVal(data.getData())); + txnCommit(txn); + + /* Modify secondary key to {1, 3} */ + txn = txnBegin(); + key = entry(1); + data = entry(3); + priDb.put(txn, key, data); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, entry(3 + KEY_OFFSET), + pkey, data, null)); + assertEquals(1, TestUtils.getTestVal(pkey.getData())); + assertEquals(3, TestUtils.getTestVal(data.getData())); + txnCommit(txn); + + secDb.close(); + priDb.close(); + } + + @Test + public void testOperationsNotAllowed() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + Transaction txn = txnBegin(); + + /* Open secondary without a key creator. */ + try { + env.openSecondaryDatabase(txn, "xxx", priDb, null); + fail(); + } catch (IllegalArgumentException expected) { } + try { + env.openSecondaryDatabase(txn, "xxx", priDb, + new SecondaryConfig()); + fail(); + } catch (IllegalArgumentException expected) { } + + /* Open secondary with both single and multi key creators. */ + SecondaryConfig config = new SecondaryConfig(); + config.setKeyCreator(new MyKeyCreator()); + config.setMultiKeyCreator + (new SimpleMultiKeyCreator(new MyKeyCreator())); + try { + env.openSecondaryDatabase(txn, "xxx", priDb, config); + fail(); + } catch (IllegalArgumentException expected) { } + + /* Open secondary with non-null primaryDb and SecondaryAssociation. */ + config = new SecondaryConfig(); + config.setKeyCreator(new MyKeyCreator()); + config.setSecondaryAssociation(new CustomAssociation()); + try { + env.openSecondaryDatabase(txn, "xxx", priDb, config); + fail(); + } catch (IllegalArgumentException expected) { + final String msg = expected.toString(); + assertTrue(msg, msg.contains( + "Exactly one must be non-null: " + + "PrimaryDatabase or SecondaryAssociation")); + } + + /* Database operations. */ + + DatabaseEntry key = entry(1); + DatabaseEntry data = entry(2); + + try { + secDb.getSearchBoth(txn, key, data, LockMode.DEFAULT); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + secDb.put(txn, key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + secDb.putNoOverwrite(txn, key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + secDb.putNoDupData(txn, key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + secDb.join(new Cursor[0], null); + fail(); + } catch (UnsupportedOperationException expected) { } + + /* Cursor operations. */ + + txnCommit(txn); + txn = txnBeginCursor(); + + SecondaryCursor cursor = null; + try { + cursor = openCursor(secDb, txn); + + try { + cursor.getSearchBoth(key, data, LockMode.DEFAULT); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + cursor.getSearchBothRange(key, data, LockMode.DEFAULT); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + cursor.putCurrent(data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + cursor.put(key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + cursor.putNoOverwrite(key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + + try { + cursor.putNoDupData(key, data); + fail(); + } catch (UnsupportedOperationException expected) { } + } finally { + if (cursor != null) { + cursor.close(); + } + } + + txnCommit(txn); + secDb.close(); + priDb.close(); + + /* Primary with duplicates. */ + try { + priDb = openPrimary(true, "testDBWithDups", false); + assertTrue(!useCustomAssociation); + try { + openSecondary(priDb, true, "testSecDB", false, false); + fail(); + } catch (IllegalArgumentException expected) { + } + } catch (IllegalArgumentException e) { + if (!useCustomAssociation) { + throw e; + } + } + priDb.close(); + + /* Single secondary with two primaries.*/ + if (!useCustomAssociation) { + Database pri1 = openPrimary(false, "pri1", false); + Database pri2 = openPrimary(false, "pri2", false); + Database sec1 = openSecondary(pri1, false, "sec", false, false); + try { + openSecondary(pri2, false, "sec", false, false); + fail(); + } catch (IllegalArgumentException expected) {} + sec1.close(); + pri1.close(); + pri2.close(); + } + } + + /** + * Test that null can be passed for the LockMode to all get methods. + */ + @Test + public void testNullLockMode() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + Transaction txn = txnBegin(); + + DatabaseEntry key = entry(0); + DatabaseEntry data = entry(0); + DatabaseEntry secKey = entry(KEY_OFFSET); + DatabaseEntry found = new DatabaseEntry(); + DatabaseEntry found2 = new DatabaseEntry(); + DatabaseEntry found3 = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + priDb.put(txn, key, data)); + assertEquals(OperationStatus.SUCCESS, + priDb.put(txn, entry(1), data)); + assertEquals(OperationStatus.SUCCESS, + priDb.put(txn, entry(2), entry(2))); + + /* Database operations. */ + + assertEquals(OperationStatus.SUCCESS, + priDb.get(txn, key, found, null)); + assertEquals(OperationStatus.SUCCESS, + priDb.getSearchBoth(txn, key, data, null)); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, secKey, found, null)); + assertEquals(OperationStatus.SUCCESS, + secDb.get(txn, secKey, found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secDb.getSearchBoth(txn, secKey, key, found, null)); + + /* Cursor operations. */ + + txnCommit(txn); + txn = txnBeginCursor(); + Cursor cursor = openCursor(priDb, txn); + SecondaryCursor secCursor = openCursor(secDb, txn); + + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, found, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(key, data, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKeyRange(key, found, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBothRange(key, data, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getFirst(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getPrev(found, found2, null)); + assertEquals(OperationStatus.NOTFOUND, + cursor.getNextDup(found, found2, null)); + assertEquals(OperationStatus.NOTFOUND, + cursor.getPrevDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getNextNoDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getPrevNoDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + cursor.getLast(found, found2, null)); + + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchKey(secKey, found, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchKeyRange(secKey, found, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getFirst(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNext(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrev(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNextDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrevDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNextNoDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrevNoDup(found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getLast(found, found2, null)); + + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchKey(secKey, found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchBoth(secKey, data, found, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchKeyRange(secKey, found, found2, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchBothRange(secKey, data, found, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getFirst(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNext(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrev(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNextDup(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrevDup(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getNextNoDup(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getPrevNoDup(found, found2, found3, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getLast(found, found2, found3, null)); + + secCursor.close(); + cursor.close(); + txnCommit(txn); + secDb.close(); + priDb.close(); + closeEnv(); + env = null; + } + + /** + * Test that an exception is thrown when a cursor is used in the wrong + * state. No put or get is allowed in the closed state, and certain gets + * and puts are not allowed in the uninitialized state. + */ + @Test + public void testCursorState() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + Transaction txn = txnBegin(); + + DatabaseEntry key = entry(0); + DatabaseEntry data = entry(0); + DatabaseEntry secKey = entry(KEY_OFFSET); + DatabaseEntry found = new DatabaseEntry(); + DatabaseEntry found2 = new DatabaseEntry(); + + assertEquals(OperationStatus.SUCCESS, + priDb.put(txn, key, data)); + + txnCommit(txn); + txn = txnBeginCursor(); + Cursor cursor = openCursor(priDb, txn); + SecondaryCursor secCursor = openCursor(secDb, txn); + + /* Check the uninitialized state for certain operations. */ + + try { + cursor.count(); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.delete(); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.putCurrent(data); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getCurrent(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getNextDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getPrevDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + + try { + secCursor.count(); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.delete(); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getCurrent(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getNextDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getPrevDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + + /* Cursor.dup works whether initialized or not. */ + { + Cursor c2 = secCursor.dup(false); + c2.close(); + c2 = secCursor.dup(true); + c2.close(); + c2 = secCursor.dup(false); + c2.close(); + c2 = secCursor.dup(true); + c2.close(); + } + + /* Initialize, then close, then check all operations. */ + + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, found, null)); + assertEquals(OperationStatus.SUCCESS, + secCursor.getSearchKey(secKey, found, null)); + + /* Cursor.dup works whether initialized or not. */ + { + Cursor c2 = cursor.dup(false); + c2.close(); + c2 = cursor.dup(true); + c2.close(); + c2 = secCursor.dup(false); + c2.close(); + c2 = secCursor.dup(true); + c2.close(); + } + + /* Close, then check all operations. */ + + secCursor.close(); + cursor.close(); + + try { + cursor.close(); + } catch (RuntimeException expected) { + fail("Caught IllegalStateException while re-closing a Cursor."); + } + + try { + cursor.count(); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.delete(); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.put(key, data); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.putNoOverwrite(key, data); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.putNoDupData(key, data); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.putCurrent(data); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getCurrent(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getSearchKey(key, found, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getSearchBoth(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getSearchKeyRange(key, found, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getSearchBothRange(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getFirst(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getNext(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getPrev(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getNextDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getPrevDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getNextNoDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getPrevNoDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + cursor.getLast(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + + try { + secCursor.close(); + } catch (RuntimeException e) { + fail("Caught exception while re-closing a SecondaryCursor."); + } + + try { + secCursor.count(); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.delete(); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getCurrent(key, data, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getSearchKey(secKey, found, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getSearchKeyRange(secKey, found, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getFirst(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getNext(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getPrev(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getNextDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getPrevDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getNextNoDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getPrevNoDup(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + try { + secCursor.getLast(found, found2, null); + fail(); + } catch (IllegalStateException expected) {} + + txnCommit(txn); + secDb.close(); + priDb.close(); + closeEnv(); + env = null; + } + + /** + * [#14966] + */ + @Test + public void testDirtyReadPartialGet() { + SecondaryDatabase secDb = initDb(); + Database priDb = getPrimaryDatabase(secDb); + + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry secKey = new DatabaseEntry(); + OperationStatus status; + + /* Put a record */ + Transaction txn = txnBegin(); + status = priDb.put(txn, entry(0), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + + /* Regular get */ + status = secDb.get(txn, entry(0 + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertDataEquals(entry(0), data); + + /* Dirty read returning no data */ + data.setPartial(0, 0, true); + status = secDb.get(txn, entry(0 + KEY_OFFSET), key, + data, LockMode.READ_UNCOMMITTED); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertEquals(0, data.getData().length); + assertEquals(0, data.getSize()); + + /* Dirty read returning partial data */ + data.setPartial(0, 1, true); + status = secDb.get(txn, entry(0 + KEY_OFFSET), key, + data, LockMode.READ_UNCOMMITTED); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertEquals(1, data.getData().length); + assertEquals(1, data.getSize()); + txnCommit(txn); + + secDb.close(); + priDb.close(); + } + + /** + * Tests ImmutableSecondaryKey optimization. + * + * expectFetchOnDelete is true because the data (not just the key) is used + * by the key creator/extractor. + */ + @Test + public void testImmutableSecondaryKey() { + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setImmutableSecondaryKey(true); + checkUpdateWithNoFetchOfOldData(secConfig, + true /*expectFetchOnDelete*/); + } + + /** + * Tests ExtractFromPrimaryKeyOnly optimization. + * + * expectFetchOnDelete is false because only the key is used by the key + * creator/extractor. + */ + @Test + public void testExtractFromPrimaryKeyOnly() { + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setExtractFromPrimaryKeyOnly(true); + if (useMultiKey) { + secConfig.setMultiKeyCreator( + new SimpleMultiKeyCreator(new KeyOnlyKeyCreator())); + } else { + secConfig.setKeyCreator(new KeyOnlyKeyCreator()); + } + checkUpdateWithNoFetchOfOldData(secConfig, + false /*expectFetchOnDelete*/); + } + + /** + * Tests ImmutableSecondaryKey and ExtractFromPrimaryKeyOnly optimizations + * together. + * + * expectFetchOnDelete is false because only the key is used by the key + * creator/extractor. + */ + @Test + public void testImmutableSecondaryKeyAndExtractFromPrimaryKeyOnly() { + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setImmutableSecondaryKey(true); + secConfig.setExtractFromPrimaryKeyOnly(true); + if (useMultiKey) { + secConfig.setMultiKeyCreator( + new SimpleMultiKeyCreator(new KeyOnlyKeyCreator())); + } else { + secConfig.setKeyCreator(new KeyOnlyKeyCreator()); + } + checkUpdateWithNoFetchOfOldData(secConfig, + false /*expectFetchOnDelete*/); + } + + /** + * When ImmutableSecondaryKey or ExtractFromPrimaryKeyOnly is configured, + * the data should not be fetched when updating the record. It should also + * not be fetched during a delete when ExtractFromPrimaryKeyOnly is + * configured (i.e., when expectFetchOnDelete is true). + */ + private void checkUpdateWithNoFetchOfOldData( + final SecondaryConfig secConfig, + final boolean expectFetchOnDelete) { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.getDataVerifier().shutdown(); + + boolean embeddedLNs = (envImpl.getMaxEmbeddedLN() >= 4); + + final Database priDb = openPrimary(false, "testDB", false); + + secConfig.setSortedDuplicates(true); + final SecondaryDatabase secDb = openSecondary( + priDb, "testSecDB", secConfig); + + final StatsConfig clearStats = new StatsConfig().setClear(true); + final DatabaseEntry data = new DatabaseEntry(); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry secKey = new DatabaseEntry(); + OperationStatus status; + + /* Insert a record */ + env.getStats(clearStats); + Transaction txn = txnBegin(); + status = priDb.put(txn, entry(0), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + final EnvironmentStats stats1 = env.getStats(null); + assertEquals(0, stats1.getNLNsFetch()); + + /* Get via secondary. */ + env.getStats(clearStats); + status = secDb.get( + txn, entry(0 + KEY_OFFSET), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertDataEquals(entry(0), data); + final EnvironmentStats stats2 = env.getStats(null); + assertEquals((embeddedLNs ? 0 : 1), stats2.getNLNsFetch()); + + /* Update to same value -- should not fetch old data. */ + env.getStats(clearStats); + status = priDb.put(txn, entry(0), entry(0)); + assertSame(OperationStatus.SUCCESS, status); + final EnvironmentStats stats3 = env.getStats(null); + assertEquals(0, stats3.getNLNsFetch()); + + /* Get via secondary. */ + env.getStats(clearStats); + status = secDb.get( + txn, entry(0 + KEY_OFFSET), key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + assertDataEquals(entry(0), key); + assertDataEquals(entry(0), data); + final EnvironmentStats stats4 = env.getStats(null); + assertEquals((embeddedLNs ? 0 : 1), stats4.getNLNsFetch()); + + /* Delete -- only fetches old data if expectFetchOnDelete is true. */ + env.getStats(clearStats); + status = priDb.delete(txn, entry(0)); + assertSame(OperationStatus.SUCCESS, status); + final EnvironmentStats stats5 = env.getStats(null); + assertEquals( + (expectFetchOnDelete && !embeddedLNs ? 1 : 0), + stats5.getNLNsFetch()); + + /* Get via secondary. */ + status = secDb.get(txn, entry(0 + KEY_OFFSET), key, + data, LockMode.DEFAULT); + assertSame(OperationStatus.NOTFOUND, status); + + txnCommit(txn); + secDb.close(); + priDb.close(); + } + + /** + * Open environment, primary and secondary db + */ + private SecondaryDatabase initDb() { + Database priDb = openPrimary(false, "testDB", false); + SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB", + false, false); + return secDb; + } + + private Database openPrimary(boolean allowDuplicates, String name, + boolean readOnly) { + return openDbInternal(allowDuplicates, name, readOnly, true); + } + + private Database openDatabase(boolean allowDuplicates, String name, + boolean readOnly) { + return openDbInternal(allowDuplicates, name, readOnly, false); + } + + private Database openDbInternal(boolean allowDuplicates, String name, + boolean readOnly, boolean isPrimary) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(allowDuplicates); + dbConfig.setReadOnly(readOnly); + if (isPrimary && useCustomAssociation) { + customAssociation = new CustomAssociation(); + dbConfig.setSecondaryAssociation(customAssociation); + } + Transaction txn = txnBegin(); + Database priDb; + try { + priDb = env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + assertNotNull(priDb); + if (isPrimary && useCustomAssociation) { + customAssociation.initPrimary(priDb); + } + return priDb; + } + + private SecondaryDatabase openSecondary(Database priDb, + boolean allowDuplicates, + String dbName, + boolean allowPopulate, + boolean readOnly) { + final SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setSortedDuplicates(allowDuplicates); + dbConfig.setReadOnly(readOnly); + dbConfig.setAllowPopulate(allowPopulate); + return openSecondary(priDb, dbName, dbConfig); + } + + + + private SecondaryDatabase openSecondary(Database priDb, + String dbName, + SecondaryConfig dbConfig) { + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + if (!dbConfig.getReadOnly() && + dbConfig.getMultiKeyCreator() == null && + dbConfig.getKeyCreator() == null) { + if (useMultiKey) { + dbConfig.setMultiKeyCreator( + new SimpleMultiKeyCreator(new MyKeyCreator())); + } else { + dbConfig.setKeyCreator(new MyKeyCreator()); + } + } + final Database priDbParam; + if (useCustomAssociation) { + priDbParam = null; + assertSame(priDb, customAssociation.getPrimary(null)); + dbConfig.setSecondaryAssociation(customAssociation); + } else { + priDbParam = priDb; + assertNull(customAssociation); + } + final Collection secListBefore = + getSecondaries(priDb); + final Transaction txn = txnBegin(); + final SecondaryDatabase secDb; + try { + secDb = env.openSecondaryDatabase(txn, dbName, priDbParam, + dbConfig); + } finally { + txnCommit(txn); + } + assertNotNull(secDb); + if (useCustomAssociation) { + customAssociation.addSecondary(secDb); + } + + /* Check configuration. */ + if (useCustomAssociation) { + assertNull(secDb.getPrimaryDatabase()); + } else { + assertSame(priDb, secDb.getPrimaryDatabase()); + } + final SecondaryConfig config2 = secDb.getSecondaryConfig(); + assertEquals(dbConfig.getAllowPopulate(), config2.getAllowPopulate()); + assertEquals(dbConfig.getKeyCreator(), config2.getKeyCreator()); + assertEquals(dbConfig.getMultiKeyCreator(), + config2.getMultiKeyCreator()); + assertSame(customAssociation, config2.getSecondaryAssociation()); + + /* Make sure the new secondary is added to the primary's list. */ + final Collection secListAfter = + getSecondaries(priDb); + assertTrue(secListAfter.remove(secDb)); + assertEquals(secListBefore, secListAfter); + + return secDb; + } + + private void closeSecondary(SecondaryDatabase secDb) { + if (useCustomAssociation) { + customAssociation.removeSecondary(secDb); + } + secDb.close(); + } + + private Database getPrimaryDatabase(SecondaryDatabase secDb) { + if (useCustomAssociation) { + return customAssociation.getPrimary(null); + } + return secDb.getPrimaryDatabase(); + } + + /** + * Returns a copy of the secondaries as a set, so it can be compared to an + * expected set. + */ + private Set getSecondaries(Database priDb) { + if (useCustomAssociation) { + return new HashSet( + customAssociation.getSecondaries(null)); + } + return new HashSet(priDb.getSecondaryDatabases()); + } + + private static class CustomAssociation implements SecondaryAssociation { + + private Database priDb = null; + private final Set secondaries = + new HashSet(); + + void initPrimary(final Database priDb) { + this.priDb = priDb; + } + + public boolean isEmpty() { + return secondaries.isEmpty(); + } + + public Database getPrimary(@SuppressWarnings("unused") + DatabaseEntry primaryKey) { + assertNotNull(priDb); + return priDb; + } + + public Collection + getSecondaries(@SuppressWarnings("unused") + DatabaseEntry primaryKey) { + return secondaries; + } + + public void addSecondary(final SecondaryDatabase secDb) { + secondaries.add(secDb); + } + + public void removeSecondary(final SecondaryDatabase secDb) { + secondaries.remove(secDb); + } + } + + private Cursor openCursor(Database priDb, Transaction txn) { + final CursorConfig config = + resetOnFailure ? + (new CursorConfig().setNonSticky(true)) : + null; + return priDb.openCursor(txn, config); + } + + private SecondaryCursor openCursor(SecondaryDatabase secDb, + Transaction txn) { + final CursorConfig config = + resetOnFailure ? + (new CursorConfig().setNonSticky(true)) : + null; + return secDb.openCursor(txn, config); + } + + private DatabaseEntry entry(int val) { + + return new DatabaseEntry(TestUtils.getTestArray(val)); + } + + /** + * Creates a partial entry for changing oldVal to newVal. + */ + private DatabaseEntry partialEntry(final int oldVal, final int newVal) { + + final DatabaseEntry oldEntry = + new DatabaseEntry(TestUtils.getTestArray(oldVal)); + final DatabaseEntry newEntry = + new DatabaseEntry(TestUtils.getTestArray(newVal)); + + final int size = oldEntry.getSize(); + assertEquals(size, newEntry.getSize()); + + int begOff; + for (begOff = 0; begOff < size; begOff += 1) { + if (oldEntry.getData()[begOff] != newEntry.getData()[begOff]) { + break; + } + } + + int endOff; + for (endOff = size - 1; endOff >= begOff; endOff += 1) { + if (oldEntry.getData()[endOff] != newEntry.getData()[endOff]) { + break; + } + } + + final int partialSize = endOff - begOff + 1; + final byte[] newData = new byte[partialSize]; + System.arraycopy(newEntry.getData(), begOff, newData, 0, partialSize); + newEntry.setData(newData); + newEntry.setPartial(begOff, partialSize, true); + return newEntry; + } + + private void assertDataEquals(DatabaseEntry e1, DatabaseEntry e2) { + assertTrue(e1.equals(e2)); + } + + private void assertPriLocked(Database priDb, DatabaseEntry key) { + assertPriLocked(priDb, key, null); + } + + /** + * Checks that the given key (or both key and data if data is non-null) is + * locked in the primary database. The primary record should be locked + * whenever a secondary cursor is positioned to point to that primary + * record. [#15573] + */ + private void assertPriLocked(final Database priDb, + final DatabaseEntry key, + final DatabaseEntry data) { + + /* + * Whether the record is locked transactionally or not in the current + * thread, we should not be able to write lock the record + * non-transactionally in another thread. + */ + final StringBuilder error = new StringBuilder(); + junitThread = new JUnitThread("primary-locker") { + @Override + public void testBody() { + Cursor cursor = openCursor(priDb, null); + try { + if (data != null) { + cursor.getSearchBoth(key, data, LockMode.RMW); + } else { + DatabaseEntry myData = new DatabaseEntry(); + cursor.getSearchKey(key, myData, LockMode.RMW); + } + error.append("Expected LockConflictException"); + } catch (Exception expected) { + assertTrue( + expected.toString(), + expected instanceof LockConflictException); + } finally { + cursor.close(); + } + } + }; + + junitThread.start(); + Throwable t = null; + try { + junitThread.finishTest(); + } catch (Throwable e) { + t = e; + } finally { + junitThread = null; + } + + if (t != null) { + t.printStackTrace(); + fail(t.toString()); + } + if (error.length() > 0) { + fail(error.toString()); + } + } + + private void assertNSecWrites(final Cursor cursor, + final int expectNWrites) { + assertEquals( + customName, + expectNWrites, + DbInternal.getCursorImpl(cursor).getNSecondaryWrites()); + } + + private static class MyKeyCreator implements SecondaryKeyCreator { + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData( + TestUtils.getTestArray( + TestUtils.getTestVal(data.getData()) + KEY_OFFSET)); + return true; + } + } + + /** + * Derives secondary key from primary key alone, to test the + * ExtractFromPrimaryKeyOnly optimization. The data param may be null. + */ + private static class KeyOnlyKeyCreator implements SecondaryKeyCreator { + + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData( + TestUtils.getTestArray( + TestUtils.getTestVal(key.getData()) + KEY_OFFSET)); + return true; + } + } +} diff --git a/test/com/sleepycat/je/test/SequenceTest.java b/test/com/sleepycat/je/test/SequenceTest.java new file mode 100644 index 0000000..5ae545e --- /dev/null +++ b/test/com/sleepycat/je/test/SequenceTest.java @@ -0,0 +1,520 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Sequence; +import com.sleepycat.je.SequenceConfig; +import com.sleepycat.je.SequenceStats; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.util.test.TxnTestCase; + +@RunWith(Parameterized.class) +public class SequenceTest extends TxnTestCase { + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public SequenceTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + @Test + public void testIllegal() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + + /* Duplicates not allowed. */ + + Database db = openDb("dups", true); + Transaction txn = txnBegin(); + try { + db.openSequence(txn, key, config); + fail(); + } catch (UnsupportedOperationException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("duplicates") >= 0); + } + txnCommit(txn); + db.close(); + + db = openDb("foo"); + txn = txnBegin(); + + /* Range min must be less than max. */ + + config.setRange(0, 0); + try { + db.openSequence(txn, key, config); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("less than the maximum") >= 0); + } + + /* Initial value must be within range. */ + + config.setRange(-10, 10); + config.setInitialValue(-11); + try { + db.openSequence(txn, key, config); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("out of range") >= 0); + } + config.setInitialValue(11); + try { + db.openSequence(txn, key, config); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("out of range") >= 0); + } + + /* Cache size must be within range. */ + + config.setRange(-10, 10); + config.setCacheSize(21); + config.setInitialValue(0); + try { + db.openSequence(txn, key, config); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("cache size is larger") >= 0); + } + + /* Create with legal range values. */ + + config.setRange(1, 2); + config.setInitialValue(1); + config.setCacheSize(0); + Sequence seq = db.openSequence(txn, key, config); + + /* Key must not exist if ExclusiveCreate=true. */ + + config.setExclusiveCreate(true); + try { + db.openSequence(txn, key, config); + fail(); + } catch (DatabaseException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("already exists") >= 0); + } + config.setExclusiveCreate(false); + seq.close(); + + /* Key must exist if AllowCreate=false. */ + + db.removeSequence(txn, key); + config.setAllowCreate(false); + try { + db.openSequence(txn, key, config); + fail(); + } catch (DatabaseException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("does not exist") >= 0); + } + + /* Check wrapping not allowed. */ + + db.removeSequence(txn, key); + config.setAllowCreate(true); + config.setRange(-5, 5); + config.setInitialValue(-5); + seq = db.openSequence(txn, key, config); + for (long i = config.getRangeMin(); i <= config.getRangeMax(); i++) { + assertEquals(i, seq.get(txn, 1)); + } + try { + seq.get(txn, 1); + fail(); + } catch (DatabaseException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("overflow") >= 0); + } + + /* Check wrapping not allowed, decrement. */ + + db.removeSequence(txn, key); + config.setAllowCreate(true); + config.setAllowCreate(true); + config.setRange(-5, 5); + config.setInitialValue(5); + config.setDecrement(true); + seq = db.openSequence(txn, key, config); + for (long i = config.getRangeMax(); i >= config.getRangeMin(); i--) { + assertEquals(i, seq.get(txn, 1)); + } + try { + seq.get(txn, 1); + fail(); + } catch (DatabaseException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("overflow") >= 0); + } + + /* Check delta less than one. */ + try { + seq.get(txn, 0); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("greater than zero") >= 0); + } + + /* Check delta greater than range. */ + try { + seq.get(txn, 11); + fail(); + } catch (IllegalArgumentException expected) { + String msg = expected.getMessage(); + assertTrue(msg, msg.indexOf("larger than the range") >= 0); + } + + seq.close(); + txnCommit(txn); + db.close(); + } + + @Test + public void testBasic() + throws DatabaseException { + + Database db = openDb("foo"); + DatabaseEntry key = new DatabaseEntry(new byte[0]); + DatabaseEntry data = new DatabaseEntry(); + + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + + Transaction txn = txnBegin(); + Sequence seq = db.openSequence(txn, key, config); + txnCommit(txn); + + txn = txnBegin(); + + /* Check default values before calling get(). */ + + SequenceStats stats = seq.getStats(null); + assertEquals(0, stats.getCurrent()); + assertEquals(0, stats.getCacheSize()); + assertEquals(0, stats.getNGets()); + assertEquals(Long.MIN_VALUE, stats.getMin()); + assertEquals(Long.MAX_VALUE, stats.getMax()); + + /* Get the first value. */ + + long val = seq.get(txn, 1); + assertEquals(0, val); + stats = seq.getStats(null); + assertEquals(1, stats.getCurrent()); + assertEquals(1, stats.getValue()); + assertEquals(0, stats.getLastValue()); + assertEquals(1, stats.getNGets()); + + /* Use deltas greater than one. */ + + assertEquals(1, seq.get(txn, 2)); + assertEquals(3, seq.get(txn, 3)); + assertEquals(6, seq.get(txn, 1)); + assertEquals(7, seq.get(txn, 1)); + + /* Remove a sequence and expect the key to be deleted. */ + + seq.close(); + db.removeSequence(txn, key); + assertEquals(OperationStatus.NOTFOUND, db.get(txn, key, data, null)); + txnCommit(txn); + assertEquals(OperationStatus.NOTFOUND, db.get(null, key, data, null)); + + db.close(); + } + + @Test + public void testMultipleHandles() + throws DatabaseException { + + Database db = openDb("foo"); + DatabaseEntry key = new DatabaseEntry(new byte[0]); + + /* Create a sequence. */ + + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + config.setDecrement(true); + config.setRange(1, 3); + config.setInitialValue(3); + + Transaction txn = txnBegin(); + Sequence seq = db.openSequence(txn, key, config); + assertEquals(3, seq.get(txn, 1)); + txnCommit(txn); + + /* Open another handle on the same sequence -- config should match. */ + + txn = txnBegin(); + Sequence seq2 = db.openSequence(txn, key, config); + assertEquals(2, seq2.get(txn, 1)); + txnCommit(txn); + + SequenceStats stats = seq2.getStats(null); + assertEquals(1, stats.getCurrent()); + assertEquals(1, stats.getMin()); + assertEquals(3, stats.getMax()); + + /* Values are assigned from a single sequence for both handles. */ + + assertEquals(1, seq.get(null, 1)); + + seq.close(); + seq2.close(); + db.close(); + } + + @Test + public void testRanges() + throws DatabaseException { + + Database db = openDb("foo"); + + /* Positive and negative ranges. */ + + doRange(db, 1, 10, 1, 0); + doRange(db, -10, -1, 1, 0); + doRange(db, -10, 10, 1, 0); + + /* Extreme min/max values. */ + + doRange(db, Integer.MIN_VALUE, Integer.MIN_VALUE + 10, 1, 0); + doRange(db, Integer.MAX_VALUE - 10, Integer.MAX_VALUE, 1, 0); + + doRange(db, Long.MIN_VALUE, Long.MIN_VALUE + 10, 1, 0); + doRange(db, Long.MAX_VALUE - 10, Long.MAX_VALUE, 1, 0); + + /* Deltas greater than one. */ + + doRange(db, -10, 10, 2, 0); + doRange(db, -10, 10, 3, 0); + doRange(db, -10, 10, 5, 0); + doRange(db, -10, 10, 10, 0); + doRange(db, -10, 10, 20, 0); + + /* + * Cache sizes. We cheat a little by making the cache size an even + * multiple of the delta whenever the cache size is greater than the + * delta; otherwise, it is too difficult to predict caching. + */ + + doRange(db, -10, 10, 1, 1); + doRange(db, -10, 10, 1, 2); + doRange(db, -10, 10, 1, 3); + doRange(db, -10, 10, 1, 7); + doRange(db, -10, 10, 1, 20); + doRange(db, -10, 10, 3, 1); + doRange(db, -10, 10, 3, 2); + doRange(db, -10, 10, 3, 3); + doRange(db, -10, 10, 3, 9); + doRange(db, -10, 10, 3, 18); + + db.close(); + } + + private void doRange(Database db, long min, long max, int delta, int cache) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + boolean incr; + boolean wrap; + + for (int option = 0; option < 4; option += 1) { + switch (option) { + case 0: + incr = true; + wrap = false; + break; + case 1: + incr = true; + wrap = false; + break; + case 2: + incr = true; + wrap = false; + break; + case 3: + incr = true; + wrap = false; + break; + default: + throw new IllegalStateException(); + } + + SequenceConfig config = new SequenceConfig(); + config.setAllowCreate(true); + config.setInitialValue(incr ? min : max); + config.setWrap(wrap); + config.setDecrement(!incr); + config.setRange(min, max); + config.setCacheSize(cache); + + String msg = + "incr=" + incr + + " wrap=" + wrap + + " min=" + min + + " max=" + max + + " delta=" + delta + + " cache=" + cache; + + Transaction txn = txnBegin(); + db.removeSequence(txn, key); + Sequence seq = db.openSequence(txn, key, config); + txnCommit(txn); + + txn = txnBegin(); + + if (incr) { + for (long i = min;; i += delta) { + + boolean expectCached = false; + if (cache != 0) { + expectCached = delta < cache && i != max && + (((i - min) % cache) != 0); + } + + doOne(msg, seq, txn, delta, i, expectCached); + + /* Test for end without causing long overflow. */ + if (i > max - delta) { + if (delta == 1) { + assertEquals(msg, i, max); + } + break; + } + } + if (wrap) { + assertEquals(msg, min, seq.get(txn, delta)); + assertEquals(msg, min + delta, seq.get(txn, delta)); + } + } else { + for (long i = max;; i -= delta) { + + boolean expectCached = false; + if (cache != 0) { + expectCached = delta < cache && i != min && + (((max - i) % cache) != 0); + } + + doOne(msg, seq, txn, delta, i, expectCached); + + /* Test for end without causing long overflow. */ + if (i < min + delta) { + if (delta == 1) { + assertEquals(msg, i, min); + } + break; + } + } + if (wrap) { + assertEquals(msg, max, seq.get(txn, delta)); + assertEquals(msg, max - delta, seq.get(txn, delta)); + } + } + + if (!wrap) { + try { + seq.get(txn, delta); + fail(msg); + } catch (DatabaseException expected) { + String emsg = expected.getMessage(); + assertTrue(emsg, emsg.indexOf("overflow") >= 0); + } + } + + txnCommit(txn); + seq.close(); + } + } + + private void doOne(String msg, + Sequence seq, + Transaction txn, + int delta, + long expectValue, + boolean expectCached) + throws DatabaseException { + + msg += " value=" + expectValue; + + try { + assertEquals(msg, expectValue, seq.get(txn, delta)); + } catch (DatabaseException e) { + fail(msg + ' ' + e); + } + + StatsConfig clearConfig = new StatsConfig(); + clearConfig.setFast(true); + clearConfig.setClear(true); + SequenceStats stats = seq.getStats(clearConfig); + + assertEquals(msg, 1, stats.getNGets()); + assertEquals(msg, expectCached ? 1 : 0, stats.getNCachedGets()); + } + + private Database openDb(String name) + throws DatabaseException { + + return openDb(name, false); + } + + private Database openDb(String name, boolean duplicates) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicates); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } +} diff --git a/test/com/sleepycat/je/test/SkipTest.java b/test/com/sleepycat/je/test/SkipTest.java new file mode 100644 index 0000000..1d64fca --- /dev/null +++ b/test/com/sleepycat/je/test/SkipTest.java @@ -0,0 +1,365 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class SkipTest extends TestBase { + + private static final boolean DEBUG = false; + + private static final int[] nDupsArray = + {2, 3, 20, 50, 500, 5000, 500, 50, 20, 3, 2}; + private static final int grandTotal; + static { + int total = 0; + for (int i = 0; i < nDupsArray.length; i += 1) { + total += nDupsArray[i]; + } + grandTotal = total; + } + + private final File envHome; + private Environment env; + private Database db; + + public SkipTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("during tearDown: " + e); + } + } + + private void openEnv() { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void closeEnv() { + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + * Basic test that writes data and checks it. + */ + @Test + public void testSkip() { + openEnv(); + insertData(); + checkData(); + closeEnv(); + } + + /** + * Adds uncompressed deleted slots in between existing data records, + * checks it. + */ + @Test + public void testUncompressedDeletions() { + openEnv(); + insertData(); + insertAndDelete(); + checkData(); + closeEnv(); + } + + /** + * Passes a larger number for maxCount than the number of records before + * (after) the cursor position. Includes testing a zero return. + */ + @Test + public void testSkipPastEnd() { + + openEnv(); + insertData(); + insertAndDelete(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final int lastKey = nDupsArray.length - 1; + final int lastData = nDupsArray[lastKey] - 1; + final int firstKey = 0; + final int firstData = 0; + + long total = 0; + final Cursor c = db.openCursor(null, null); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + total += 1; + + /* Skip forward to last record. */ + Cursor c2 = c.dup(true /*samePosition*/); + key.setData(null); + data.setData(null); + long count = c2.skipNext(grandTotal, key, data, null); + assertEquals(grandTotal - total, count); + if (count == 0) { + assertNull(key.getData()); + assertNull(data.getData()); + } else { + assertEquals(lastKey, IntegerBinding.entryToInt(key)); + assertEquals(lastData, IntegerBinding.entryToInt(data)); + } + c2.close(); + + /* Skip backward to first record. */ + c2 = c.dup(true /*samePosition*/); + key.setData(null); + data.setData(null); + count = c2.skipPrev(grandTotal, key, data, null); + assertEquals(total - 1, count); + if (count == 0) { + assertNull(key.getData()); + assertNull(data.getData()); + } else { + assertEquals(firstKey, IntegerBinding.entryToInt(key)); + assertEquals(firstData, IntegerBinding.entryToInt(data)); + } + c2.close(); + } + c.close(); + closeEnv(); + } + + /** + * Tests exceptions. + */ + @Test + public void testExceptions() { + openEnv(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + key.setData(new byte[1]); + data.setData(new byte[1]); + OperationStatus status = db.putNoDupData(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + data.setData(new byte[2]); + status = db.putNoDupData(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Cursor must be initialized to call skip. */ + Cursor c = db.openCursor(null, null); + try { + c.skipNext(1, key, data, null); + fail(); + } catch (IllegalStateException expected) { + } + try { + c.skipPrev(1, key, data, null); + fail(); + } catch (IllegalStateException expected) { + } + + /* Passing maxCount LTE zero not allowed. */ + status = c.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + try { + c.skipNext(0, key, data, null); + fail(); + } catch (IllegalArgumentException expected) { + } + try { + c.skipPrev(0, key, data, null); + fail(); + } catch (IllegalArgumentException expected) { + } + try { + c.skipNext(-1, key, data, null); + fail(); + } catch (IllegalArgumentException expected) { + } + try { + c.skipPrev(-1, key, data, null); + fail(); + } catch (IllegalArgumentException expected) { + } + + c.close(); + closeEnv(); + } + + /** + * Inserts duplicates according to nDupsArray, where the key is the index + * into nDupsArray (0 to nDupsArray.length-1), and the data is 0 to N, + * where N is the number of dups for that key (nDupsArray[index]). + */ + private void insertData() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < nDupsArray.length; i += 1) { + final int nDups = nDupsArray[i]; + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < nDups; j += 1) { + IntegerBinding.intToEntry(j, data); + final OperationStatus status = db.putNoDupData(null, key, + data); + assertSame(OperationStatus.SUCCESS, status); + } + } + } + + /** + * Inserts records between each two records inserted earlier, and then + * deletes the new records. This leaves uncompressed deleted slots, which + * should be ignored by the skip methods. + */ + private void insertAndDelete() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final Cursor c = db.openCursor(null, null); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) { + final byte[] a = new byte[data.getSize() + 1]; + System.arraycopy(data.getData(), 0, a, 0, data.getSize()); + data.setData(a); + OperationStatus status = c.putNoDupData(key, data); + assertSame(OperationStatus.SUCCESS, status); + status = c.delete(); + assertSame(OperationStatus.SUCCESS, status); + } + c.close(); + } + + /** + * Starts at the beginning (end) of the database and skips to the beginning + * and end of each duplicate set. + */ + private void checkData() { + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + /* Skip in forward direction. */ + final Cursor c = db.openCursor(null, null); + long total = nDupsArray[0]; + for (int i = 1; i < nDupsArray.length; i += 1) { + + /* Move to first record. */ + final OperationStatus status = c.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + long skipInit = total; + total += nDupsArray[i]; + + for (int j = i; j < nDupsArray.length; j += 1) { + final long dupsMinusOne = nDupsArray[j] - 1; + + /* Skip to first dup. */ + key.setData(null); + data.setData(null); + long count = c.skipNext(skipInit, key, data, null); + assertEquals(skipInit, count); + assertEquals(j, IntegerBinding.entryToInt(key)); + assertEquals(0, IntegerBinding.entryToInt(data)); + + /* Skip to last dup. */ + key.setData(null); + data.setData(null); + count = c.skipNext(dupsMinusOne, key, data, null); + assertEquals(dupsMinusOne, count); + assertEquals(j, IntegerBinding.entryToInt(key)); + assertEquals(dupsMinusOne, IntegerBinding.entryToInt(data)); + + skipInit = 1; + } + } + + /* Skip in reverse direction. */ + total = nDupsArray[nDupsArray.length - 1]; + for (int i = nDupsArray.length - 2; i >= 0; i -= 1) { + + /* Move to last record. */ + final OperationStatus status = c.getLast(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + long skipInit = total; + total += nDupsArray[i]; + + for (int j = i; j >= 0; j -= 1) { + final long dupsMinusOne = nDupsArray[j] - 1; + + /* Skip to last dup. */ + key.setData(null); + data.setData(null); + long count = c.skipPrev(skipInit, key, data, null); + assertEquals(skipInit, count); + assertEquals(j, IntegerBinding.entryToInt(key)); + assertEquals(dupsMinusOne, IntegerBinding.entryToInt(data)); + + /* Skip to first dup. */ + key.setData(null); + data.setData(null); + count = c.skipPrev(dupsMinusOne, key, data, null); + assertEquals(dupsMinusOne, count); + assertEquals(j, IntegerBinding.entryToInt(key)); + assertEquals(0, IntegerBinding.entryToInt(data)); + + skipInit = 1; + } + } + c.close(); + } +} diff --git a/test/com/sleepycat/je/test/SpeedyTTLTime.java b/test/com/sleepycat/je/test/SpeedyTTLTime.java new file mode 100644 index 0000000..6044ced --- /dev/null +++ b/test/com/sleepycat/je/test/SpeedyTTLTime.java @@ -0,0 +1,79 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.test; + +import java.io.IOException; + +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.utilint.TestHook; + +/** + * Sets a TTL.timeTestHook that provides a time that elapses at a different + * rate than normal. Every fakeMillisPerHour after calling this method, JE + * TTL processing will behave as if one hour has elapsed. + * + * In unit tests using this class, add the following to tearDown: + * TTL.setTimeTestHook(null); + */ +public class SpeedyTTLTime { + + private final long fakeMillisPerHour; + private long baseTime; + + public SpeedyTTLTime(final long fakeMillisPerHour) { + this.fakeMillisPerHour = fakeMillisPerHour; + } + + public long realTimeToFakeTime(final long realTime) { + + assert realTime > baseTime; + + final long elapsed = realTime - baseTime; + + return baseTime + + (TTL.MILLIS_PER_HOUR * (elapsed / fakeMillisPerHour)); + + } + + public void start() { + baseTime = System.currentTimeMillis(); + + TTL.setTimeTestHook(new TestHook() { + + @Override + public Long getHookValue() { + return realTimeToFakeTime(System.currentTimeMillis()); + } + + @Override + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook() { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook(Long obj) { + throw new UnsupportedOperationException(); + } + }); + } +} diff --git a/test/com/sleepycat/je/test/TTLTest.java b/test/com/sleepycat/je/test/TTLTest.java new file mode 100644 index 0000000..51199e6 --- /dev/null +++ b/test/com/sleepycat/je/test/TTLTest.java @@ -0,0 +1,1933 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TimeZone; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DiskOrderedCursor; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Get; +import com.sleepycat.je.JEVersion; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.PreloadConfig; +import com.sleepycat.je.PreloadStats; +import com.sleepycat.je.Put; +import com.sleepycat.je.ReadOptions; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.utilint.PollCondition; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.TxnTestCase; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Tests TTL functionality. + */ +@RunWith(Parameterized.class) +public class TTLTest extends TxnTestCase { + + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + private static final SimpleDateFormat TIME_FORMAT = + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.zzz"); + + private static final Calendar BASE_CAL = Calendar.getInstance(UTC); + + public static volatile long fixedSystemTime = 0; + + static { + TIME_FORMAT.setTimeZone(UTC); + + BASE_CAL.set(2016, Calendar.FEBRUARY, + 3 /*day*/, 4 /*hour*/, 5 /*min*/, 6 /*sec*/); + } + + private static final long BASE_MS = BASE_CAL.getTimeInMillis(); + + private Database db; + + @Parameters + public static List genParams() { + + return getTxnParams(null, false /*rep*/); + +// return getTxnParams( +// new String[] {TxnTestCase.TXN_USER}, +// false /*rep*/); + } + + public TTLTest(String type) { + + initEnvConfig(); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + txnType = type; + isTransactional = !txnType.equals(TXN_NULL); + customName = txnType; + } + + @After + @Override + public void tearDown() throws Exception { + db = null; + super.tearDown(); + TTL.setTimeTestHook(null); + fixedSystemTime = 0; + } + + /** + * Tests TTL static functions. + */ + @Test + public void testTimeCalculations() { + + /* + * Ensure that when dividing the system time by MILLIS_PER_DAY and + * MILLIS_PER_HOUR you really get calendars days and hours, and there + * isn't some funny business having to do with leap seconds or another + * date/time anomaly. + */ + Calendar cal = cloneCalendar(BASE_CAL); + int calDays = 0; + + while (cal.getTimeInMillis() > 0) { + cal.add(Calendar.DAY_OF_MONTH, -1); + calDays += 1; + } + + assertEquals( + calDays, + (BASE_MS + TTL.MILLIS_PER_DAY - 1) / TTL.MILLIS_PER_DAY); + + cal = cloneCalendar(BASE_CAL); + int calHours = 0; + + while (cal.getTimeInMillis() > 0) { + cal.add(Calendar.HOUR_OF_DAY, -1); + calHours += 1; + } + + assertEquals( + calHours, + (BASE_MS + TTL.MILLIS_PER_HOUR - 1) / TTL.MILLIS_PER_HOUR); + + /* Test with legal non-zero values. */ + + for (final boolean hours : new boolean[]{false, true}) { + + for (final int ttl : + new int[]{1, 2, 23, 24, 25, 364, 365, 366, 500, 10000}) { + + final TimeUnit ttlUnit = + hours ? TimeUnit.HOURS : TimeUnit.DAYS; + + setFixedTimeHook(BASE_MS); + + cal = cloneCalendar(BASE_CAL); + + cal.add( + hours ? Calendar.HOUR_OF_DAY : Calendar.DAY_OF_MONTH, + ttl); + + final long calMsBeforeRoundingUp = cal.getTimeInMillis(); + + cal.add( + hours ? Calendar.HOUR_OF_DAY : Calendar.DAY_OF_MONTH, + 1 /* round up */); + + if (hours) { + truncateToHours(cal); + } else { + truncateToDays(cal); + } + + final long calMs = cal.getTimeInMillis(); + + final int expiration = TTL.ttlToExpiration(ttl, ttlUnit); + + final long expireSystemMs = + TTL.expirationToSystemTime(expiration, hours); + + final String label = + "ttl = " + ttl + " hours = " + hours + + " baseMs = " + BASE_MS + + " expect = " + TIME_FORMAT.format(calMs) + + " got = " + TIME_FORMAT.format(expireSystemMs); + + assertEquals(label, calMs, expireSystemMs); + + assertEquals(label, hours, TTL.isSystemTimeInHours(calMs)); + + assertEquals( + label, + expiration, TTL.systemTimeToExpiration(calMs, hours)); + + assertEquals( + ttl, + new WriteOptions().setExpirationTime( + calMsBeforeRoundingUp, ttlUnit).getTTL()); + + assertSame( + ttlUnit, + new WriteOptions().setExpirationTime( + expireSystemMs, null).getTTLUnit()); + + assertFalse(label, TTL.isExpired(expiration, hours)); + fixedSystemTime = expireSystemMs; + assertFalse(label, TTL.isExpired(expiration, hours)); + fixedSystemTime = expireSystemMs + 1; + assertTrue(label, TTL.isExpired(expiration, hours)); + } + } + + /* Test legal zero values. */ + + assertEquals(0, TTL.ttlToExpiration(0, null)); + assertFalse(TTL.isExpired(0, false)); + assertFalse(TTL.isExpired(0, true)); + + /* Test with illegal values. */ + + try { + TTL.ttlToExpiration(-1, TimeUnit.DAYS); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Illegal ttl value")); + } + + final Set badUnits = + new HashSet<>(EnumSet.allOf(TimeUnit.class)); + + badUnits.remove(TimeUnit.DAYS); + badUnits.remove(TimeUnit.HOURS); + badUnits.add(null); + + for (final TimeUnit unit : badUnits) { + try { + TTL.ttlToExpiration(1, unit); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("ttlUnits not allowed")); + } + } + } + + private static Calendar cloneCalendar(Calendar cal) { + Calendar clone = Calendar.getInstance(cal.getTimeZone()); + clone.setTimeInMillis(cal.getTimeInMillis()); + return clone; + } + + private static Calendar truncateToDays(Calendar cal) { + cal.set(Calendar.HOUR_OF_DAY, 0); + truncateToHours(cal); + return cal; + } + + private static Calendar truncateToHours(Calendar cal) { + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + return cal; + } + + /** + * Tests storage and retrieval of expiration time. + */ + @Test + public void testExpirationTimeStorage() + throws FileNotFoundException { + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + /* + * Insert with and without a TTL. Units will be HOURS in all LNs, BINs. + * The BIN's baseExpiration will change when we write record 3, because + * it has a lower ttl than record 1. + */ + write(1, 30, TimeUnit.DAYS); + write(2, 0, TimeUnit.DAYS); + write(3, 20, TimeUnit.DAYS); + write(4, 0, TimeUnit.DAYS); + + read(1, 30, TimeUnit.DAYS); + read(2, 0, TimeUnit.DAYS); + read(3, 20, TimeUnit.DAYS); + read(4, 0, TimeUnit.DAYS); + + /* + * Updating with and without changing the TTL. When false is passed + * for updateTtl we add 10 to the ttl and check that it doesn't change. + */ + write(3, 20, TimeUnit.DAYS, TimeUnit.DAYS, false /*updateTtl*/); + write(3, 0, TimeUnit.DAYS); + write(3, 25, TimeUnit.DAYS); + + read(1, 30, TimeUnit.DAYS); + read(2, 0, TimeUnit.DAYS); + read(3, 25, TimeUnit.DAYS); + read(4, 0, TimeUnit.DAYS); + + /* + * Insert record 4 with HOURS and the BIN units in all other entries + * will change to hours. + */ + write(4, 17, TimeUnit.HOURS); + + read(1, 30, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 0, TimeUnit.DAYS, TimeUnit.HOURS); + read(3, 25, TimeUnit.DAYS, TimeUnit.HOURS); + read(4, 17, TimeUnit.HOURS, TimeUnit.HOURS); + + /* + * Special case: Change BIN expirationBase at same time as changing + * units from DAYS to HOURS. Use a new empty database. + */ + db.close(); + db = openDb("bar"); + + write(1, 2, TimeUnit.DAYS); + write(2, 30, TimeUnit.HOURS); + + read(1, 2, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 30, TimeUnit.HOURS, TimeUnit.HOURS); + + /* Close and recover. */ + db.close(); + closeEnv(); + openEnv(); + + db = openDb("foo"); + + read(1, 30, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 0, TimeUnit.DAYS, TimeUnit.HOURS); + read(3, 25, TimeUnit.DAYS, TimeUnit.HOURS); + read(4, 17, TimeUnit.HOURS, TimeUnit.HOURS); + + db.close(); + db = openDb("bar"); + + read(1, 2, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 30, TimeUnit.HOURS, TimeUnit.HOURS); + + db.close(); + } + + /** + * Tests undo/redo of expiration time. Abort and recovery are tested, but + * not rollback. + */ + @Test + public void testUndoAndRedo() + throws FileNotFoundException { + + if (!isTransactional) { + return; + } + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + /* + * Insert a single record with 2 DAYS, then update it and change its + * ttl to 30 HOURS, but abort the update. The previous ttl is restored + * by the abort, including the BIN units because it is the only record + * in the BIN. + */ + write(1, 2, TimeUnit.DAYS); + + Transaction txn = txnBeginCursor(); + write(txn, 1, 30, TimeUnit.HOURS, TimeUnit.HOURS, true /*updateTtl*/); + txnAbort(txn); + + read(1, 2, TimeUnit.DAYS); + + /* + * Insert another record using DAYS, then repeat the update with abort. + * In this case the BIN units stays set to HOURS. We don't currently + * convert existing records back from HOURS to DAYS. + */ + write(2, 3, TimeUnit.DAYS); + + txn = txnBeginCursor(); + write(txn, 1, 20, TimeUnit.HOURS, TimeUnit.HOURS, true /*updateTtl*/); + txnAbort(txn); + + read(1, 2, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 3, TimeUnit.DAYS, TimeUnit.HOURS); + + /* + * Crash and recover. The BIN unit is restored to DAYS because only the + * committed LNs are replayed. + */ + env.flushLog(false); + abnormalClose(env); + openEnv(); + db = openDb("foo"); + + read(1, 2, TimeUnit.DAYS); + read(2, 3, TimeUnit.DAYS); + + /* + * Insert one committed record, and do an update that is a aborted. + * Then "crash" and recover, causing one redo and one undo. + */ + write(3, 10, TimeUnit.DAYS); + + txn = txnBeginCursor(); + write(txn, 2, 6, TimeUnit.HOURS, TimeUnit.HOURS, true /*updateTtl*/); + txnAbort(txn); + + read(1, 2, TimeUnit.DAYS, TimeUnit.HOURS); + read(2, 3, TimeUnit.DAYS, TimeUnit.HOURS); + read(3, 10, TimeUnit.DAYS, TimeUnit.HOURS); + + env.flushLog(false); + abnormalClose(env); + openEnv(); + db = openDb("foo"); + + /* Recovery happens to restore the units to DAYS further above. */ + read(1, 2, TimeUnit.DAYS); + read(2, 3, TimeUnit.DAYS); + read(3, 10, TimeUnit.DAYS); + + db.close(); + } + + /** + * Tests that the lazyCompress method removes expired slots correctly. Does + * not test that lazyCompress is called at appropriate times. + */ + @Test + public void testCompression() + throws FileNotFoundException { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + /* Insert 100 records with a ttl equal to their key. */ + for (int i = 1; i <= 100; i += 1) { + write(i, i, TimeUnit.DAYS); + } + + final BIN bin = getFirstBIN(); + assertEquals(100, bin.getNEntries()); + + /* + * Incrementing the clock by one day at a time will purge one slot at a + * time. We're compressing dirty slots here, just for testing. + */ + bin.latch(); + try { + for (int i = 0; i <= 100; i += 1) { + fixedSystemTime += TTL.MILLIS_PER_DAY; + envImpl.lazyCompress(bin, true /*compressDirtySlots*/); + assertEquals(100 - i, bin.getNEntries()); + } + } finally { + bin.releaseLatch(); + } + + /* + * Insert 100 records with a 1 day ttl, then sync to flush the full + * BIN and clear the slot dirty flags, then insert 10 more records + * that will be dirty and would cause logging as a BIN-delta. + */ + fixedSystemTime = BASE_MS; + + for (int i = 1; i <= 100; i += 1) { + write(i, 1, TimeUnit.DAYS); + } + + env.sync(); + + for (int i = 101; i <= 110; i += 1) { + write(i, 1, TimeUnit.DAYS); + } + + /* + * lazyCompress with no second param will not purge dirty slots, + * since a BIN-delta should be logged. Calling again to compress all + * slots will purge the rest. + */ + bin.latch(); + try { + fixedSystemTime += 2 * TTL.MILLIS_PER_DAY; + envImpl.lazyCompress(bin); + assertEquals(10, bin.getNEntries()); + envImpl.lazyCompress(bin, true /*compressDirtySlots*/); + assertEquals(0, bin.getNEntries()); + } finally { + bin.releaseLatch(); + } + + db.close(); + } + + /** + * Tests that the expiration time is preserved by BIN-deltas. + */ + @Test + public void testBINDelta() + throws FileNotFoundException { + + db = openDb("foo"); + setFixedTimeHook(BASE_MS); + + /* + * Insert 100 records with a a ttl equal to their key, then sync to + * flush the full BIN and clear the slot dirty flags, then insert 10 + * more records that will be dirty and would cause logging as a + * BIN-delta. + */ + for (int i = 1; i <= 100; i += 1) { + write(i, i, TimeUnit.DAYS); + } + + final BIN bin = getFirstBIN(); + assertEquals(100, bin.getNEntries()); + + env.sync(); + + for (int i = 101; i <= 110; i += 1) { + write(i, i, TimeUnit.DAYS); + } + + assertEquals(110, bin.getNEntries()); + + for (int i = 1; i <= 110; i += 1) { + read(i, i, TimeUnit.DAYS); + } + + /* + * Check that BIN-delta has 10 slots with correct expiration. + */ + bin.latch(); + bin.mutateToBINDelta(); + bin.releaseLatch(); + + assertEquals(10, bin.getNEntries()); + + for (int i = 101; i <= 110; i += 1) { + read(i, i, TimeUnit.DAYS); + } + + /* + * Check that reconstituted BIN has all slots with correct expiration. + */ + bin.latch(); + bin.mutateToFullBIN(false /*leaveSlotFree*/); + bin.releaseLatch(); + + for (int i = 1; i <= 110; i += 1) { + read(i, i, TimeUnit.DAYS); + } + + db.close(); + } + + /** + * Tests that expired records are filtered out of queries. + */ + @Test + public void testFiltering() + throws FileNotFoundException { + + db = openDb("foo"); + + /* + * Insert 11 records with a ttl equal to their key. + * Record 0 has no TTL. + */ + setFixedTimeHook(BASE_MS); + + final Map expected = new HashMap<>(); + for (int i = 0; i <= 10; i += 1) { + final long expTime = write(i, i, TimeUnit.DAYS); + expected.put(i, expTime); + } + + final BIN bin = getFirstBIN(); + assertEquals(11, bin.getNEntries()); + + /* + * Incrementing the clock by one day at a time will cause one slot at a + * time to expire, starting from record 1. Compression will not occur, + * but expired records will be filtered out of queries. + */ + for (int i = 0; i <= 10; i += 1) { + + fixedSystemTime += TTL.MILLIS_PER_DAY; + + if (i != 0) { + expected.remove(i); + } + + for (int j = 0; j <= 10; j += 1) { + if (expected.containsKey(j)) { + readExpectFound(j); + } else { + readExpectNotFound(j); + } + } + + diskOrderedScan(expected); + + final PreloadStats stats = + db.preload(new PreloadConfig().setLoadLNs(true)); + + assertEquals( + 11 - i, stats.getNLNsLoaded() + stats.getNEmbeddedLNs()); + + /* Make sure compression did not occur. */ + assertEquals(11, bin.getNEntries()); + } + + db.close(); + } + + private long write(final int key, final int ttl, final TimeUnit ttlUnits) + throws FileNotFoundException { + + return write(key, ttl, ttlUnits, ttlUnits, true /*updateTtl*/); + } + + private long write(final int key, + final int ttl, + final TimeUnit ttlUnits, + final TimeUnit binUnits, + final boolean updateTtl) + throws FileNotFoundException { + + final Transaction txn = txnBeginCursor(); + write(txn, key, ttl, ttlUnits, binUnits, updateTtl); + txnCommit(txn); + + return read(key, ttl, ttlUnits, binUnits); + } + + private void write(final Transaction txn, + final int key, + final int ttl, + final TimeUnit ttlUnits, + final TimeUnit binUnits, + final boolean updateTtl) + throws FileNotFoundException { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + + final WriteOptions options = new WriteOptions(); + + if (updateTtl) { + if (ttlUnits == TimeUnit.DAYS) { + options.setTTL(ttl); + } else { + options.setTTL(ttl, ttlUnits); + } + options.setUpdateTTL(true); + } else { + options.setTTL(ttl + 10, ttlUnits); + } + + IntegerBinding.intToEntry(ttl, dataEntry); + + final OperationResult result = db.put( + txn, keyEntry, dataEntry, Put.OVERWRITE, options); + + assertNotNull(result); + + checkedStoredTtl(txn, keyEntry, ttl, ttlUnits, binUnits); + } + + private long read(final int key, + final int ttl, + final TimeUnit ttlUnits) + throws FileNotFoundException { + + return read(key, ttl, ttlUnits, ttlUnits); + } + + private long read(final int key, + final int ttl, + final TimeUnit ttlUnits, + final TimeUnit binUnits) + throws FileNotFoundException { + + final Transaction txn = txnBeginCursor(); + final Cursor cursor = db.openCursor(txn, null); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + + final OperationResult result = + cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + + assertNotNull(result); + assertEquals(ttl, IntegerBinding.entryToInt(dataEntry)); + + checkedStoredTtl(txn, keyEntry, ttl, ttlUnits, binUnits); + + cursor.close(); + txnCommit(txn); + + return result.getExpirationTime(); + } + + private void checkedStoredTtl(final Transaction txn, + final DatabaseEntry keyEntry, + final int ttl, + final TimeUnit ttlUnits, + final TimeUnit binUnits) + throws FileNotFoundException { + + final boolean ttlHours = (ttlUnits == TimeUnit.HOURS); + final boolean binHours = (binUnits == TimeUnit.HOURS); + + /* + * ttlToExpiration uses the current time, which may have changed since + * the record was inserted. This happens to be OK, because the test + * doesn't call this method after changing the time. + */ + final int ttlExpiration = TTL.ttlToExpiration(ttl, ttlUnits); + + final long expireTime = + TTL.expirationToSystemTime(ttlExpiration, ttlHours); + + OperationResult result = db.get(txn, keyEntry, null, Get.SEARCH, null); + assertNotNull(result); + + assertEquals(expireTime, result.getExpirationTime()); + + final int expiration = + TTL.systemTimeToExpiration(expireTime, binHours); + + final Cursor cursor = db.openCursor(txn, null); + result = cursor.get(keyEntry, null, Get.SEARCH, null); + assertNotNull(result); + final CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + final BIN bin = cursorImpl.getBIN(); + final int index = cursorImpl.getIndex(); + cursor.close(); + + assertEquals(binHours, bin.isExpirationInHours()); + assertEquals(expiration, bin.getExpiration(index)); + + final LNLogEntry logEntry = (LNLogEntry) + DbInternal.getNonNullEnvImpl(env). + getLogManager(). + getLogEntry(bin.getLsn(index)); + + assertEquals(ttlExpiration, logEntry.getExpiration()); + assertEquals(ttlHours, logEntry.isExpirationInHours()); + } + + private Database openDb(final String name) { + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + + final Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } + + private BIN getFirstBIN() { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + final Cursor cursor = db.openCursor(null, null); + + assertSame( + OperationStatus.SUCCESS, + cursor.getFirst(keyEntry, dataEntry, null)); + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + + cursor.close(); + + return bin; + } + + private void readExpectFound(final int key) { + readExpectStatus(key, OperationStatus.SUCCESS); + } + + private void readExpectNotFound(final int key) { + readExpectStatus(key, OperationStatus.NOTFOUND); + } + + private void readExpectStatus(final int key, + final OperationStatus expectStatus) { + + final Transaction txn = txnBegin(); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(key, keyEntry); + + final OperationStatus status = + db.get(null, keyEntry, dataEntry, null); + + assertSame("key = " + key, expectStatus, status); + + txnCommit(txn); + } + + private void diskOrderedScan(final Map expected) { + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final Map found = new HashMap<>(); + + try (final DiskOrderedCursor cursor = db.openCursor(null)) { + OperationResult result; + while ((result = cursor.get( + keyEntry, dataEntry, Get.NEXT, null)) != null) { + found.put( + IntegerBinding.entryToInt(keyEntry), + result.getExpirationTime()); + } + } + + assertEquals(expected, found); + } + + /** + * Tests that the expiration time is stored in the secondary DB to match the + * primary DB. + */ + @Test + public void testSecondaryExpirationTimeStorage() + throws FileNotFoundException { + + /* + * First byte of primary data is secondary key. + * Byte value 100 means null or no key. + */ + final SecondaryKeyCreator keyCreator = new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + final byte val = data.getData()[0]; + if (val == 100) { + return false; + } + result.setData(new byte[]{val}); + return true; + } + }; + + /* + * Each byte of primary data is secondary key. + * Byte value 100 means null or no key. + */ + final SecondaryMultiKeyCreator multiKeyCreator = + new SecondaryMultiKeyCreator() { + @Override + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) { + for (byte val : data.getData()) { + if (val == 100) { + continue; + } + results.add(new DatabaseEntry(new byte[]{val})); + } + } + }; + + db = openDb("primary"); + final SecondaryDatabase secDb1; + final SecondaryDatabase secDb2; + + final SecondaryConfig config = new SecondaryConfig(); + config.setTransactional(isTransactional); + config.setAllowCreate(true); + config.setSortedDuplicates(true); + + final Transaction txn = txnBegin(); + config.setKeyCreator(keyCreator); + config.setMultiKeyCreator(null); + secDb1 = env.openSecondaryDatabase(txn, "sec1", db, config); + + config.setMultiKeyCreator(multiKeyCreator); + config.setKeyCreator(null); + secDb2 = env.openSecondaryDatabase(txn, "sec2", db, config); + txnCommit(txn); + + setFixedTimeHook(BASE_MS); + + final int priKey = 123; + final byte[] data = new byte[3]; + + data[0] = 0; + data[1] = 1; + data[2] = 2; + + checkSecondary(db, secDb1, secDb2, priKey, data, 1); + checkSecondary(db, secDb1, secDb2, priKey, data, 2); + + data[0] = 1; + data[1] = 2; + data[2] = 3; + + checkSecondary(db, secDb1, secDb2, priKey, data, 3); + + data[0] = 2; + data[1] = 2; + data[2] = 4; + + checkSecondary(db, secDb1, secDb2, priKey, data, 4); + + data[0] = 2; + data[1] = 3; + data[2] = 100; + + checkSecondary(db, secDb1, secDb2, priKey, data, 5); + + data[0] = 100; + data[1] = 3; + data[2] = 4; + + checkSecondary(db, secDb1, secDb2, priKey, data, 6); + + secDb1.close(); + secDb2.close(); + db.close(); + } + + private void checkSecondary(final Database priDb, + final SecondaryDatabase secDb1, + final SecondaryDatabase secDb2, + final int priKey, + final byte[] data, + final int ttl) { + + final long baseExpirationTime = + truncateToHours(cloneCalendar(BASE_CAL)).getTimeInMillis() + + TTL.MILLIS_PER_HOUR; + + final long expireTime = + baseExpirationTime + (ttl * TTL.MILLIS_PER_HOUR); + + final WriteOptions options = new WriteOptions(); + options.setTTL(ttl, TimeUnit.HOURS); + options.setUpdateTTL(true); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + dataEntry.setData(data); + IntegerBinding.intToEntry(priKey, keyEntry); + + final Transaction txn = txnBegin(); + + final OperationResult result = priDb.put( + txn, keyEntry, dataEntry, Put.OVERWRITE, options); + + assertNotNull(result); + + checkExpirationTime(priDb, txn, keyEntry, expireTime); + + keyEntry.setData(new byte[]{data[0]}); + + checkExpirationTime(secDb1, txn, keyEntry, expireTime); + + for (final byte val : data) { + keyEntry.setData(new byte[]{val}); + checkExpirationTime(secDb2, txn, keyEntry, expireTime); + } + + txnCommit(txn); + } + + private void checkExpirationTime(final Database db, + final Transaction txn, + final DatabaseEntry keyEntry, + final long expireTime) { + + final OperationResult result = + db.get(txn, keyEntry, null, Get.SEARCH, null); + + if (keyEntry.getData()[0] == 100) { + assertNull(result); + return; + } + + assertNotNull(result); + + assertEquals( + "Expect = " + TTL.formatExpirationTime(expireTime) + + " Got = " + TTL.formatExpirationTime(result.getExpirationTime()), + expireTime, result.getExpirationTime()); + } + + /** + * If a record expires after being locked, it should be treated as not + * expired. + */ + @Test + public void testRepeatableRead() throws Throwable { + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(123, keyEntry); + dataEntry.setData(new byte[1]); + + Transaction txn = txnBeginCursor(); + final Cursor cursor = db.openCursor(txn, CursorConfig.READ_COMMITTED); + + /* + * Write a record that will expire in one hour. + */ + final WriteOptions options = + new WriteOptions().setTTL(1, TimeUnit.HOURS); + + OperationResult result = cursor.put( + keyEntry, dataEntry, Put.NO_OVERWRITE, options); + assertNotNull(result); + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + /* + * When time advances, record will be expired but still returned when + * reading a second time. + */ + fixedSystemTime = result.getExpirationTime() + 1; + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + cursor.close(); + txnCommit(txn); + + /* + * When reading with a different transaction, the expired record is not + * returned. + */ + txn = txnBegin(); + + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, null); + assertNull(result); + + txnCommit(txn); + + /* + * A special case is when we have to wait for a lock, and while waiting + * the TTL is changed via an update or an aborted update. + */ + final CountDownLatch latch1 = new CountDownLatch(1); + + final AtomicReference thread1Ex = + new AtomicReference<>(null); + + final Thread thread1 = new Thread() { + @Override + public void run() { + try { + final Transaction txn1 = txnBeginCursor(); + final Cursor cursor1 = db.openCursor(txn1, null); + + /* First insert record with a one hour TTL. */ + OperationResult result = cursor1.put( + keyEntry, dataEntry, Put.NO_OVERWRITE, + new WriteOptions().setTTL(1).setUpdateTTL(true)); + + assertNotNull(result); + + fixedSystemTime = result.getExpirationTime() + 1; + + latch1.countDown(); + + /* + * Give main thread time to block on read lock. + */ + Thread.sleep(100); + + /* + * While main thread is waiting for the lock, change the + * TTL to zero. + */ + result = cursor1.put( + null, dataEntry, Put.CURRENT, + new WriteOptions().setTTL(0).setUpdateTTL(true)); + + assertNotNull(result); + + cursor1.close(); + txnCommit(txn1); + + } catch (Throwable e) { + thread1Ex.set(e); + } + } + }; + + try { + /* + * Record is initially expired (this was confirmed above). Thread1 + * will insert another record with no TTL in the same slot. + */ + thread1.start(); + + /* Wait for thread1 to get write lock. */ + latch1.await(30, TimeUnit.SECONDS); + + /* + * While thread1 is sleeping, try to read, which will block. + * CursorImpl.lockLN will initially see the record as expired, + * which is the special case we're exercising. + */ + txn = txnBegin(); + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, null); + txnCommit(txn); + + /* + * Thread1 has updated the record and given it a TTL of 1. + * When thread1 commits, the record will not be expired. + */ + assertNotNull(result); + + } finally { + new PollCondition(1, 30000) { + @Override + public boolean condition() { + return !thread1.isAlive(); + } + }.await(); + } + + if (thread1Ex.get() != null) { + throw thread1Ex.get(); + } + + db.close(); + } + + /** + * An extra lock on the secondary is needed to support repeatable-read. The + * lock should only be taken when the record will expire within {@link + * EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. + */ + @Test + public void testSecondaryRepeatableRead() { + + /* First byte of primary data is secondary key. */ + final SecondaryKeyCreator keyCreator = new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(new byte[]{data.getData()[0]}); + return true; + } + }; + + db = openDb("primary"); + + final SecondaryConfig config = new SecondaryConfig(); + config.setTransactional(isTransactional); + config.setAllowCreate(true); + config.setSortedDuplicates(true); + + config.setKeyCreator(keyCreator); + config.setMultiKeyCreator(null); + + Transaction txn = txnBegin(); + + final SecondaryDatabase secDb = env.openSecondaryDatabase( + txn, "sec", db, config); + + txnCommit(txn); + + setFixedTimeHook(BASE_MS); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final DatabaseEntry secKeyEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(123, keyEntry); + dataEntry.setData(new byte[]{4}); + secKeyEntry.setData(new byte[]{4}); + + /* + * Write a record that will expire in one hour. + */ + WriteOptions options = new WriteOptions().setTTL(1, TimeUnit.HOURS); + + txn = txnBeginCursor(); + + OperationResult result = db.put( + txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + assertNotNull(result); + + txnCommit(txn); + + txn = txnBeginCursor(); + + Cursor cursor = db.openCursor(txn, CursorConfig.READ_COMMITTED); + + SecondaryCursor secCursor = secDb.openCursor( + txn, CursorConfig.READ_COMMITTED); + + int nOrigLocks = env.getStats(null).getNTotalLocks(); + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + assertEquals(1 + nOrigLocks, env.getStats(null).getNTotalLocks()); + + result = secCursor.get( + secKeyEntry, null, dataEntry, Get.SEARCH, null); + + assertNotNull(result); + + /* A lock on the secondary is held to support repeatable-read. */ + assertEquals(2 + nOrigLocks, env.getStats(null).getNTotalLocks()); + + /* + * When time advances, record will be expired but still returned when + * reading a second time. + */ + fixedSystemTime = result.getExpirationTime() + 1; + + result = secCursor.get( + secKeyEntry, keyEntry, dataEntry, Get.SEARCH, null); + + assertNotNull(result); + + secCursor.close(); + cursor.close(); + txnCommit(txn); + + /* + * When reading with a different transaction, the expired record is not + * returned. + */ + txn = txnBegin(); + + result = secDb.get( + txn, secKeyEntry, keyEntry, dataEntry, Get.SEARCH, null); + + assertNull(result); + + txnCommit(txn); + + /* + * When the record does not expire within 24 hours, an extra lock is + * not needed. + */ + options = new WriteOptions().setTTL(3, TimeUnit.DAYS); + + txn = txnBeginCursor(); + + result = db.put(txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + assertNotNull(result); + + txnCommit(txn); + + txn = txnBeginCursor(); + + cursor = db.openCursor(txn, CursorConfig.READ_COMMITTED); + + secCursor = secDb.openCursor( + txn, CursorConfig.READ_COMMITTED); + + nOrigLocks = env.getStats(null).getNTotalLocks(); + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + assertEquals(1 + nOrigLocks, env.getStats(null).getNTotalLocks()); + + result = secCursor.get( + secKeyEntry, keyEntry, dataEntry, Get.SEARCH, null); + + assertNotNull(result); + + /* No extra lock on the secondary is needed. */ + assertEquals(1 + nOrigLocks, env.getStats(null).getNTotalLocks()); + + secCursor.close(); + cursor.close(); + txnCommit(txn); + + secDb.close(); + db.close(); + } + + /** + * In the set of records consisting of a primary record and its associated + * secondary records, if only some records are locked, the other records + * may expire. We test locking only the primary and only the secondary. + */ + @Test + public void testSecondaryLimitations() { + + /* First byte of primary data is secondary key. */ + final SecondaryKeyCreator keyCreator = new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData(new byte[]{data.getData()[0]}); + return true; + } + }; + + db = openDb("primary"); + + final SecondaryConfig config = new SecondaryConfig(); + config.setTransactional(isTransactional); + config.setAllowCreate(true); + config.setSortedDuplicates(true); + + config.setKeyCreator(keyCreator); + config.setMultiKeyCreator(null); + + Transaction txn = txnBegin(); + + final SecondaryDatabase secDb = env.openSecondaryDatabase( + txn, "sec", db, config); + + txnCommit(txn); + + setFixedTimeHook(BASE_MS); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final DatabaseEntry secKeyEntry = new DatabaseEntry(); + + IntegerBinding.intToEntry(123, keyEntry); + dataEntry.setData(new byte[]{4}); + secKeyEntry.setData(new byte[]{4}); + + /* + * Write a record that will expire in one hour. + */ + WriteOptions options = new WriteOptions().setTTL(1, TimeUnit.HOURS); + + txn = txnBeginCursor(); + + OperationResult result = db.put( + txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + assertNotNull(result); + + txnCommit(txn); + + txn = txnBeginCursor(); + + /* + * Lock the primary only. + */ + Cursor cursor = db.openCursor(txn, CursorConfig.READ_COMMITTED); + + SecondaryCursor secCursor = secDb.openCursor( + txn, CursorConfig.READ_COMMITTED); + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + /* + * When time advances, the primary record will not expire, but the + * secondary record will expire. + */ + fixedSystemTime = result.getExpirationTime() + 1; + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNotNull(result); + + result = secCursor.get( + secKeyEntry, keyEntry, dataEntry, Get.SEARCH, null); + + assertNull(result); + + secCursor.close(); + cursor.close(); + txnCommit(txn); + + /* + * Write a record that will expire in one hour. + */ + options = new WriteOptions().setTTL(1, TimeUnit.HOURS); + + txn = txnBeginCursor(); + + result = db.put( + txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + assertNotNull(result); + + txnCommit(txn); + + txn = txnBeginCursor(); + + /* + * Lock the secondary only. Null is passed for the data param, which + * means that the primary is not read or locked. + */ + cursor = db.openCursor(txn, CursorConfig.READ_COMMITTED); + secCursor = secDb.openCursor(txn, CursorConfig.READ_COMMITTED); + + result = secCursor.get( + secKeyEntry, keyEntry, null, Get.SEARCH, null); + + assertNotNull(result); + + /* + * When time advances, the secondary record will not expire, but the + * primary record will expire. + */ + fixedSystemTime = result.getExpirationTime() + 1; + + result = secCursor.get( + secKeyEntry, keyEntry, null, Get.SEARCH, null); + + assertNotNull(result); + + result = cursor.get(keyEntry, dataEntry, Get.SEARCH, null); + assertNull(result); + + result = secCursor.get( + secKeyEntry, keyEntry, dataEntry, Get.SEARCH, null); + + assertNull(result); + + secCursor.close(); + cursor.close(); + txnCommit(txn); + + secDb.close(); + db.close(); + } + + /** + * Tests that disk space for purged LNs is reclaimed by the cleaner, and + * that special cases of repeatable-read (when the LN is purged) are + * handled properly. + */ + @Test + public void testPurgedLNs() throws Throwable { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + + /* Each record with 1MB of data will fill a .jdb file. */ + final DatabaseEntry dataEntry = + new DatabaseEntry(new byte[1024 * 1024 * 10]); + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + /* + * Write five records that will expire one each hour, each in a + * separate .jdb file. + */ + Transaction txn = txnBegin(); + OperationResult result; + + for (int i = 0; i < 5; i += 1) { + + final WriteOptions options = + new WriteOptions().setTTL(i + 1, TimeUnit.HOURS); + + keyEntry.setData(new byte[]{(byte) i}); + + result = db.put( + txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + + assertNotNull(result); + } + + txnCommit(txn); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Add a couple more .jdb files so that the preceding .jdb files are + * eligible for cleaning. + */ + envImpl.forceLogFileFlip(); + envImpl.forceLogFileFlip(); + + /* + * Position a cursor on the BIN to prevent expired slots from being + * purged. No lock is held so. + */ + final Cursor holderCursor = db.openCursor(null, null); + + result = holderCursor.get( + null, null, Get.FIRST, + new ReadOptions().setLockMode(LockMode.READ_UNCOMMITTED)); + + assertNotNull(result); + + /* + * Advance time in one hour increments, and expect one record to expire + * at each increments. + * + * Use EVICT_LN for reading to detect when an LN has been purged. + * + * No purging will have taken place (cleaning and compression are + * disabled), so this is just confirming that filtering is happening as + * expected. + */ + final ReadOptions options = + new ReadOptions().setCacheMode(CacheMode.EVICT_LN); + + fixedSystemTime += TTL.MILLIS_PER_HOUR; + + for (int j = 0; j < 5; j += 1) { + + fixedSystemTime += TTL.MILLIS_PER_HOUR; + + for (int i = 0; i < 5; i += 1) { + txn = txnBegin(); + + keyEntry.setData(new byte[]{(byte) i}); + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, options); + + final String msg = "i=" + i + " j=" + j; + + if (i > j) { + assertNotNull(msg, result); + } else { + assertNull(msg, result); + } + + txnCommit(txn); + } + } + + /* + * Reset clock back to the write time. Expect all records exists. This + * ensures that LNs have not been purged yet. + */ + fixedSystemTime -= 6 * TTL.MILLIS_PER_HOUR; + + txn = txnBegin(); + + for (int i = 0; i < 5; i += 1) { + keyEntry.setData(new byte[]{(byte) i}); + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, options); + assertNotNull(result); + } + + txnCommit(txn); + + /* + * Advance time so that all records expire and clean all eligible + * files. The first checkpoint is needed to advance the FirstActiveLSN, + * and the second to delete the cleaned files. + */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + fixedSystemTime += 6 * TTL.MILLIS_PER_HOUR; + + envImpl.getCleaner().doClean( + true /*cleanMultipleFiles*/, true /*forceCleaning*/); + + assertEquals(5, env.getStats(null).getNLNsExpired()); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Confirm that all records are expired. + */ + txn = txnBegin(); + + for (int i = 0; i < 5; i += 1) { + keyEntry.setData(new byte[]{(byte) i}); + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, options); + assertNull(result); + } + + txnCommit(txn); + + /* + * Reset clock back to the write time, then advance time in one hour + * increments. At this point all LNs have been purged, but slots still + * exist and are not expired until we advance the time. + */ + fixedSystemTime -= 6 * TTL.MILLIS_PER_HOUR; + + txn = txnBeginCursor(); + final Cursor cursor = db.openCursor(txn, null); + + fixedSystemTime += TTL.MILLIS_PER_HOUR; + + for (int i = 0; i < 5; i += 1) { + + /* + * When reading with a null 'data' param, the slot is not expired + * so the result is non-null, even though the LN was purged. + */ + keyEntry.setData(new byte[]{(byte) i}); + + result = cursor.get(keyEntry, null, Get.SEARCH, options); + assertNotNull(result); + + fixedSystemTime += TTL.MILLIS_PER_HOUR; + + /* + * Read again a null 'data' param. The slot is now expired, but the + * result is non-null because we hold a lock. + */ + result = cursor.get(keyEntry, null, Get.SEARCH, options); + assertNotNull(result); + + /* + * Read again with a non-null 'data' param. This time the result is + * null because the LN cannot be fetched. + */ + result = cursor.get(keyEntry, dataEntry, Get.CURRENT, options); + assertNull(result); + + /* + * Try to update with a partial 'data' param. Null is returned + * because the old LN cannot be fetched. + */ + final DatabaseEntry newData = new DatabaseEntry(); + newData.setData(new byte[1]); + newData.setPartial(0, 1, true); + result = cursor.put(null, newData, Put.CURRENT, null); + assertNull(result); + + /* + * An update of an expired record should work, however, when + * reading the old LN is not required. + */ + newData.setPartial(false); + result = cursor.put( + null, newData, Put.CURRENT, + new WriteOptions().setUpdateTTL(true)); + assertNotNull(result); + } + + cursor.close(); + txnCommit(txn); + + /* + * Since we the TTL to zero in all records, expect they all exist. + */ + txn = txnBegin(); + + for (int i = 0; i < 5; i += 1) { + keyEntry.setData(new byte[]{(byte) i}); + result = db.get(txn, keyEntry, dataEntry, Get.SEARCH, options); + assertNotNull(result); + assertEquals(1, dataEntry.getSize()); + } + + txnCommit(txn); + + holderCursor.close(); + db.close(); + } + + /** + * Tests that disk space for purged slots is reclaimed by the cleaner. + * All records in the test are small and should be embedded, so that LN + * purging is factored out. + */ + @Test + public void testPurgedSlots() { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final FileManager fileManager = envImpl.getFileManager(); + + final String embedMaxSize = env.getConfig().getConfigParam( + EnvironmentConfig.TREE_MAX_EMBEDDED_LN); + + if (Integer.parseInt(embedMaxSize) < 5) { + System.out.println( + "testPurgedSlots not run, embedded LN max size is too small"); + return; + } + + db = openDb("foo"); + + setFixedTimeHook(BASE_MS); + + /* + * Write 5 files worth of records that expire in one hour, and other 5 + * files worth that expire in 2 hours. Each file is about half LNs that + * are immediately obsolete and half BINs that are not. When we clean, + * we'll reclaim all the LNs plus the expired BIN slots. + */ + OperationResult result; + + final WriteOptions options = new WriteOptions(); + + final long startFile = fileManager.getCurrentFileNum(); + + /* Use large keys to fill files more quickly. */ + final DatabaseEntry keyEntry = new DatabaseEntry(new byte[500]); + final DatabaseEntry dataEntry = new DatabaseEntry(new byte[5]); + final DatabaseEntry tempEntry = new DatabaseEntry(); + + for (int i = 0;; i += 1) { + + final long filesAdded = + fileManager.getCurrentFileNum() - startFile; + + if (filesAdded >= 10) { + break; + } + + options.setTTL(filesAdded >= 5 ? 2 : 1, TimeUnit.HOURS); + + IntegerBinding.intToEntry(i, tempEntry); + + System.arraycopy( + tempEntry.getData(), 0, keyEntry.getData(), 0, + tempEntry.getSize()); + + final Transaction txn = txnBegin(); + + result = db.put( + txn, keyEntry, dataEntry, Put.NO_OVERWRITE, options); + + assertNotNull(result); + txnCommit(txn); + } + + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* + * Clean 3 times, advancing the clock an hour each time. To begin with + * we have over 100GB of data. After each hour passes: + * + * 1. Nothing expires but we clean to below 50MB because LNs are all + * embedded and immediately obsolete. + * + * 2. Half the data expires and we clean to below 25MB. + * + * 3. The rest of the data expires and we clean to below 5MB. + */ + final int[] expectMB = new int[] {50, 25, 5}; + for (int i = 0; i < 3; i += 1) { + + fixedSystemTime += TTL.MILLIS_PER_HOUR; + + /* + * Add a couple more .jdb files so that the preceding .jdb files + * are eligible for cleaning. The first checkpoint is needed to + * advance the FirstActiveLSN, and the second to delete the cleaned + * files. + */ + envImpl.forceLogFileFlip(); + envImpl.forceLogFileFlip(); + + envImpl.getCleaner().doClean( + true /*cleanMultipleFiles*/, true /*forceCleaning*/); + + env.checkpoint(new CheckpointConfig().setForce(true)); + + final long maxSize = expectMB[i] * 1024 * 1024L; + + final long actualSize = env.getStats(null).getTotalLogSize(); + + final String msg = String.format( + "actualSize=%,d maxSize=%,d", actualSize, maxSize); + + assertTrue(msg, actualSize < maxSize); + } + + /* + * After compressing, all data should be gone, leaving a single BIN. + */ + env.compress(); + final BtreeStats dbStats = (BtreeStats) db.getStats(null); + assertEquals(1, dbStats.getBottomInternalNodeCount()); + + db.close(); + } + + /** + * Sets a TTL.timeTestHook that provides a fixed time of initTime, meaning + * that JE TTL processing will behave as if the system time is initTime. + * Thereafter, changing fixedSystemTime will change the test time. + * + * In unit tests calling this method, add the following to tearDown: + * TTL.setTimeTestHook(null); + * fixedSystemTime = 0; + */ + public static void setFixedTimeHook(final long initTime) { + + fixedSystemTime = initTime; + + TTL.setTimeTestHook(new TestHook() { + + @Override + public Long getHookValue() { + return fixedSystemTime; + } + + @Override + public void hookSetup() { + throw new UnsupportedOperationException(); + } + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public void doHook() { + throw new UnsupportedOperationException(); + } + @Override + public void doHook(Long obj) { + throw new UnsupportedOperationException(); + } + }); + } + + /** + * Checks that using TTL is not allowed in a replicated env with any node + * having a version less than {@link TTL#MIN_JE_VERSION}. + */ + @Test + public void testTTLNotAvailable() { + + final boolean isRep = isReplicatedTest(getClass()); + db = openDb("foo"); + + try { + TTL.TEST_MIN_JE_VERSION = new JEVersion("10000.0.0"); + + /* Allow put if no TTL is specified. */ + try { + db.put( + null, + new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[1]), + Put.OVERWRITE, null); + } catch (IllegalStateException e) { + fail(); + } + + /* Disallow put if a non-zero TTL is specified. */ + db.put( + null, + new DatabaseEntry(new byte[1]), + new DatabaseEntry(new byte[1]), + Put.OVERWRITE, + new WriteOptions().setTTL(1)); + + assertFalse(isRep); + + } catch (IllegalStateException e) { + + assertTrue(isRep); + assertTrue(e.getMessage().contains("TTL")); + + } finally { + TTL.TEST_MIN_JE_VERSION = null; + } + + db.close(); + } +} diff --git a/test/com/sleepycat/je/test/ToManyTest.java b/test/com/sleepycat/je/test/ToManyTest.java new file mode 100644 index 0000000..4aa59dc --- /dev/null +++ b/test/com/sleepycat/je/test/ToManyTest.java @@ -0,0 +1,392 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Put; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.util.test.TxnTestCase; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Tests multi-key secondary operations. Exhaustive API testing of multi-key + * secondaries is part of SecondaryTest and ForeignKeyTest, which test the use + * of a single key with SecondaryMultiKeyCreator. This class adds tests for + * multiple keys per record. + */ +@RunWith(Parameterized.class) +public class ToManyTest extends TxnTestCase { + + private static final Function> NEW_SET = + (k -> new HashSet<>()); + + /* + * The primary database has a single byte key and byte[] array data. Each + * byte of the data array is a secondary key in the to-many index. + * + * The primary map mirrors the primary database and contains Byte keys and + * a set of Byte objects for each map entry value. The secondary map + * mirrors the secondary database, and for every secondary key (Byte) + * contains a set of primary keys (set of Byte). + */ + private Map> priMap0 = new HashMap<>(); + private Map> secMap0 = new HashMap<>(); + private Database priDb; + private SecondaryDatabase secDb; + + @Parameters + public static List genParams() { + + /* + * This test does not work with TXN_NULL because with transactions we + * cannot abort the update in a one-to-many test when secondary key + * already exists in another primary record. + */ + return getTxnParams( + new String[] {TxnTestCase.TXN_USER, TxnTestCase.TXN_AUTO}, false); + } + + public ToManyTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + priMap0 = null; + secMap0 = null; + priDb = null; + secDb = null; + } + + @Test + public void testManyToMany() + throws DatabaseException { + + priDb = openPrimary("pri"); + secDb = openSecondary(priDb, "sec", true /*dups*/); + + writeAndVerify((byte) 0, new byte[] {}); + writeAndVerify((byte) 0, null); + writeAndVerify((byte) 0, new byte[] {0, 1, 2}); + writeAndVerify((byte) 0, null); + writeAndVerify((byte) 0, new byte[] {}); + writeAndVerify((byte) 0, new byte[] {0}); + writeAndVerify((byte) 0, new byte[] {0, 1}); + writeAndVerify((byte) 0, new byte[] {0, 1, 2}); + writeAndVerify((byte) 0, new byte[] {1, 2}); + writeAndVerify((byte) 0, new byte[] {2}); + writeAndVerify((byte) 0, new byte[] {}); + writeAndVerify((byte) 0, null); + + writeAndVerify((byte) 0, new byte[] {0, 1, 2}); + writeAndVerify((byte) 1, new byte[] {1, 2, 3}); + writeAndVerify((byte) 0, null); + writeAndVerify((byte) 1, null); + writeAndVerify((byte) 0, new byte[] {0, 1, 2}); + writeAndVerify((byte) 1, new byte[] {1, 2, 3}); + writeAndVerify((byte) 0, new byte[] {0}); + writeAndVerify((byte) 1, new byte[] {3}); + writeAndVerify((byte) 0, null); + writeAndVerify((byte) 1, null); + + secDb.close(); + priDb.close(); + } + + @Test + public void testOneToMany() + throws DatabaseException { + + priDb = openPrimary("pri"); + secDb = openSecondary(priDb, "sec", false /*dups*/); + + writeAndVerify((byte) 0, new byte[] {1, 5}); + writeAndVerify((byte) 1, new byte[] {2, 4}); + writeAndVerify((byte) 0, new byte[] {0, 1, 5, 6}); + writeAndVerify((byte) 1, new byte[] {2, 3, 4}); + write((byte) 0, new byte[] {3}, true /*expectException*/); + writeAndVerify((byte) 1, new byte[] {}); + writeAndVerify((byte) 0, new byte[] {0, 1, 2, 3, 4, 5, 6}); + writeAndVerify((byte) 0, null); + writeAndVerify((byte) 1, new byte[] {0, 1, 2, 3, 4, 5, 6}); + writeAndVerify((byte) 1, null); + + secDb.close(); + priDb.close(); + } + + /** + * Puts or deletes a single primary record, updates the maps, and verifies + * that the maps match the databases. + */ + private void writeAndVerify(byte priKey, byte[] priData) + throws DatabaseException { + + int nWrites = write(priKey, priData, false /*expectException*/); + updateMaps(priKey, bytesToSet(priData), nWrites); + verify(); + } + + /** + * Puts or deletes a single primary record. + * + * @return n of secondary writes. + */ + private int write(byte priKey, byte[] priData, boolean expectException) + throws DatabaseException { + + DatabaseEntry keyEntry = new DatabaseEntry(new byte[] { priKey }); + DatabaseEntry dataEntry = new DatabaseEntry(priData); + + Transaction txn = txnBeginCursor(); + try { + int nWrites; + try (Cursor c = priDb.openCursor(txn, null)) { + OperationResult r; + if (priData != null) { + r = c.put(keyEntry, dataEntry, Put.OVERWRITE, null); + } else { + r = c.get(keyEntry, null, Get.SEARCH, null); + assertNotNull(r); + r = c.delete(null); + } + assertNotNull(r); + nWrites = DbInternal.getCursorImpl(c).getNSecondaryWrites(); + } + txnCommit(txn); + assertTrue(!expectException); + return nWrites; + } catch (Exception e) { + txnAbort(txn); + assertTrue(e.toString(), expectException); + return 0; + } + } + + /** + * Updates map 0 to reflect a record added to the primary database. + * + * @param nWrites number of secondary records inserted/deleted, which + * should equal the number of map changes. + */ + private void updateMaps(Byte priKey, Set newPriData, int nWrites) { + + int nChanges = 0; + + /* Remove old secondary keys. */ + Set oldPriData = priMap0.get(priKey); + if (oldPriData != null) { + for (Byte secKey : oldPriData) { + if (newPriData != null && newPriData.contains(secKey)) { + continue; + } + Set priKeySet = secMap0.get(secKey); + assertNotNull(priKeySet); + assertTrue(priKeySet.remove(priKey)); + nChanges += 1; + if (priKeySet.isEmpty()) { + secMap0.remove(secKey); + } + } + } + + if (newPriData != null) { + /* Put primary entry. */ + priMap0.put(priKey, newPriData); + /* Add new secondary keys. */ + for (Byte secKey : newPriData) { + if (oldPriData != null && oldPriData.contains(secKey)) { + continue; + } + Set priKeySet = + secMap0.computeIfAbsent(secKey, NEW_SET); + assertTrue(priKeySet.add(priKey)); + nChanges += 1; + } + } else { + /* Remove primary entry. */ + priMap0.remove(priKey); + } + + assertEquals(nChanges, nWrites); + } + + /** + * Verifies that the maps match the databases. + */ + private void verify() + throws DatabaseException { + + Transaction txn = txnBeginCursor(); + + DatabaseEntry priKeyEntry = new DatabaseEntry(); + DatabaseEntry secKeyEntry = new DatabaseEntry(); + DatabaseEntry dataEntry = new DatabaseEntry(); + + Map> priMap1 = new HashMap<>(); + Map> priMap2 = new HashMap<>(); + Map> secMap1 = new HashMap<>(); + Map> secMap2 = new HashMap<>(); + + /* Build map 1 from the primary database. */ + Cursor priCursor = priDb.openCursor(txn, null); + + while (priCursor.getNext(priKeyEntry, dataEntry, null) == + OperationStatus.SUCCESS) { + + Byte priKey = priKeyEntry.getData()[0]; + Set priData = bytesToSet(dataEntry.getData()); + + /* Update primary map. */ + priMap1.put(priKey, priData); + + /* Update secondary map. */ + for (Byte secKey : priData) { + + Set priKeySet = + secMap1.computeIfAbsent(secKey, NEW_SET); + + assertTrue(priKeySet.add(priKey)); + } + + /* + * Add empty primary records to priMap2 while we're here, since + * they cannot be built from the secondary database. + */ + if (priData.isEmpty()) { + priMap2.put(priKey, priData); + } + } + priCursor.close(); + + /* Build map 2 from the secondary database. */ + SecondaryCursor secCursor = secDb.openSecondaryCursor(txn, null); + + while (secCursor.getNext(secKeyEntry, priKeyEntry, dataEntry, null) == + OperationStatus.SUCCESS) { + + Byte priKey = priKeyEntry.getData()[0]; + Byte secKey = secKeyEntry.getData()[0]; + + /* Update primary map. */ + Set priData = priMap2.computeIfAbsent(priKey, NEW_SET); + priData.add(secKey); + + /* Update secondary map. */ + Set secData = secMap2.computeIfAbsent(secKey, NEW_SET); + secData.add(priKey); + } + + secCursor.close(); + + /* Compare. */ + assertEquals(priMap0, priMap1); + assertEquals(priMap1, priMap2); + assertEquals(secMap0, secMap1); + assertEquals(secMap1, secMap2); + + txnCommit(txn); + } + + private Set bytesToSet(byte[] bytes) { + if (bytes == null) { + return null; + } + Set set = new HashSet<>(); + for (byte b : bytes) { + set.add(b); + } + return set; + } + + private Database openPrimary(String name) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + + Transaction txn = txnBegin(); + try { + return env.openDatabase(txn, name, dbConfig); + } finally { + txnCommit(txn); + } + } + + private SecondaryDatabase openSecondary(Database priDb, + String dbName, + boolean dups) + throws DatabaseException { + + SecondaryConfig dbConfig = new SecondaryConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + dbConfig.setMultiKeyCreator(new MyKeyCreator()); + + Transaction txn = txnBegin(); + try { + return env.openSecondaryDatabase(txn, dbName, priDb, dbConfig); + } finally { + txnCommit(txn); + } + } + + private static class MyKeyCreator implements SecondaryMultiKeyCreator { + + public void createSecondaryKeys(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + Set results) { + for (int i = 0; i < data.getSize(); i+= 1) { + results.add(new DatabaseEntry(data.getData(), i, 1)); + } + } + } +} diff --git a/test/com/sleepycat/je/tree/BinDeltaTest.java b/test/com/sleepycat/je/tree/BinDeltaTest.java new file mode 100644 index 0000000..1a8f6f2 --- /dev/null +++ b/test/com/sleepycat/je/tree/BinDeltaTest.java @@ -0,0 +1,322 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.log.LogManager; +import com.sleepycat.je.log.entry.BINDeltaLogEntry; +import com.sleepycat.je.log.entry.LogEntry; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * Exercise the delta based BIN logging. + */ +public class BinDeltaTest extends TestBase { + private static final String DB_NAME = "test"; + private static final boolean DEBUG = false; + private Environment env; + private final File envHome; + private Database db; + private LogManager logManager; + + public BinDeltaTest() { + envHome = SharedTestUtils.getTestDir(); + + /* Print keys as numbers */ + Key.DUMP_TYPE = DumpType.BINARY; + } + + @Before + public void setUp() + throws Exception { + + /* + * Properties for creating an environment. Disable the evictor for + * this test, use larger BINS. + */ + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "true"); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "50"); + envConfig.setConfigParam + (EnvironmentParams.BIN_DELTA_PERCENT.getName(), "50"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + logManager = DbInternal.getNonNullEnvImpl(env).getLogManager(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (DatabaseException E) { + } + } + } + + /** + * Create a db, fill with numRecords, return the first BIN. + */ + private BIN initDb(int start, int end) + throws DatabaseException { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + + addRecords(start, end); + + /* Now reach into the tree and get the first BIN */ + Locker txn = BasicLocker. + createBasicLocker(DbInternal.getNonNullEnvImpl(env)); + CursorImpl internalCursor = + new CursorImpl(DbInternal.getDbImpl(db), txn); + assertTrue(internalCursor.positionFirstOrLast(true)); + BIN firstBIN = internalCursor.getBIN(); + firstBIN.releaseLatch(); + internalCursor.close(); + txn.operationEnd(); + return firstBIN; + } + + /** + * Modify the data, just to dirty the BIN. + */ + private void modifyRecords(int start, int end, int increment) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry searchKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + DatabaseEntry newData = new DatabaseEntry(); + + for (int i = start; i <= end; i++) { + searchKey.setData(TestUtils.getTestArray(i)); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(searchKey, foundData, + LockMode.DEFAULT)); + newData.setData(TestUtils.getTestArray(i+increment)); + cursor.putCurrent(newData); + } + cursor.close(); + txn.commit(); + } + + /* + * Add the specified records. + */ + private void addRecords(int start, int end) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = start; i < end; i++) { + byte[] keyData = TestUtils.getTestArray(i); + byte[] dataData = TestUtils.byteArrayCopy(keyData); + key.setData(keyData); + data.setData(dataData); + db.put(null, key, data); + } + } + + /** + * Simple test, delta a BIN several times, reconstruct. + */ + @Test + public void testSimple() + throws Throwable { + + try { + /* Create a db, insert records value 10 - 30, get the first BIN */ + BIN bin = initDb(10, 30); + + /* Log a full version. */ + bin.latch(); + long fullLsn = bin.log(true, false, false, null); + bin.releaseLatch(); + assertTrue(fullLsn != DbLsn.NULL_LSN); + assertEquals(fullLsn, bin.getLastFullLsn()); + assertEquals(fullLsn, bin.getLastLoggedLsn()); + assertEquals(DbLsn.NULL_LSN, bin.getLastDeltaLsn()); + assertFalse(bin.getDirty()); + + if (DEBUG) { + System.out.println("Start"); + System.out.println(bin.dumpString(0, true)); + } + + /* Modify some of the data, add data so the BIN is changed. */ + modifyRecords(11,13,10); + addRecords(1,3); + logAndCheck(bin); + + /* Modify more of the data, so the BIN is changed. */ + modifyRecords(14,15,10); + logAndCheck(bin); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + db.close(); + } + } + + /** + * Test that a delta is correctly generated when there are entries + * that have been aborted and rolled back. + * + * The case we're trying to test, (that was in error before) + * - a record is deleted + * - a full version of BIN x is written to the log, reflecting that + * deletion. + * - the deleting txn is aborted, so the record is restored. Now the + * BIN has an entry where the child LSN is less than the last full + * BIN version LSN. + * - generate a delta, make sure that the restoration of the record is + * present. + */ + @Test + public void testUndo() + throws Throwable { + + try { + /* Create a db, insert records value 10 - 30, get the first BIN */ + BIN bin = initDb(10, 30); + + /* Delete the first record, then abort the delete. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry firstKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + OperationStatus status = cursor.getFirst(firstKey, foundData, + LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + status = cursor.delete(); + assertEquals(OperationStatus.SUCCESS, status); + cursor.close(); + + /* Log a full version. This will reflect the delete. */ + bin.latch(); + long fullLsn = bin.log(true, false, false, null); + bin.releaseLatch(); + assertTrue(fullLsn != DbLsn.NULL_LSN); + + /* + * Roll back the deletion. Now the full version of the LSN is out + * of date. + */ + txn.abort(); + + /* + * Make sure a delta reflect the abort, even though the abort + * returns an older LSN back into the BIN. + */ + logAndCheck(bin); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + db.close(); + } + } + + /* Check if full is logged when percent > max */ + /* Check that max deltas works. */ + /* check knownDelete. */ + + /** + * Log the targetBIN, then read it back from the log and make sure + * the recreated BIN matches the in memory BIN. + */ + private void logAndCheck(BIN targetBIN) + throws IOException, DatabaseException { + + /* + * Log it as a delta. + */ + final long fullLsn = targetBIN.getLastFullLsn(); + final long deltaLsn; + targetBIN.latch(); + try { + deltaLsn = targetBIN.log(true, false, false, null); + } finally { + targetBIN.releaseLatch(); + } + assertEquals(deltaLsn, targetBIN.getLastDeltaLsn()); + assertEquals(deltaLsn, targetBIN.getLastLoggedLsn()); + assertEquals(fullLsn, targetBIN.getLastFullLsn()); + assertFalse(targetBIN.getDirty()); + + /* Read the delta back. */ + LogEntry partial = + logManager.getLogEntry(targetBIN.getLastDeltaLsn()); + + /* Make sure that this is was a delta entry. */ + assertTrue(partial instanceof BINDeltaLogEntry); + BIN delta = ((BINDeltaLogEntry) partial).getMainItem(); + assertTrue(delta.isBINDelta(false)); + delta.setDatabase(targetBIN.getDatabase()); + + /* Compare to the current version. */ + BIN createdBIN = + delta.reconstituteBIN(targetBIN.getDatabase()); + if (DEBUG) { + System.out.println("created"); + System.out.println(createdBIN.dumpString(0, true)); + } + + assertEquals(targetBIN.getClass().getName(), + createdBIN.getClass().getName()); + assertEquals(targetBIN.getNEntries(), createdBIN.getNEntries()); + + for (int i = 0; i < createdBIN.getNEntries(); i++) { + assertEquals("LSN " + i, targetBIN.getLsn(i), + createdBIN.getLsn(i)); + } + assertFalse(createdBIN.getDirty()); + } +} diff --git a/test/com/sleepycat/je/tree/CountEstimatorTest.java b/test/com/sleepycat/je/tree/CountEstimatorTest.java new file mode 100644 index 0000000..966eabb --- /dev/null +++ b/test/com/sleepycat/je/tree/CountEstimatorTest.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.Relationship; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CountEstimatorTest extends TestBase { + + private static final boolean DEBUG = false; + + private final File envHome; + private Environment env; + private Database db; + + public CountEstimatorTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("during tearDown: " + e); + } + } + + private void openEnv() { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void closeEnv() { + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /** + * When insertions are sequential, the estimate is always correct because + * all INs (except on the right edge of the btree) have the same number of + * entries. + */ + @Test + public void testDupsInsertSequential() { + + openEnv(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final int[] nDupsArray = + {1, 2, 3, 20, 50, 500, 5000, 500, 50, 20, 3, 2, 1}; + + long total = 0; + for (int i = 0; i < nDupsArray.length; i += 1) { + final int nDups = nDupsArray[i]; + IntegerBinding.intToEntry(i, key); + for (int j = 0; j < nDups; j += 1) { + IntegerBinding.intToEntry(j, data); + final OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + } + total += nDups; + assertEquals(total, db.count()); + } + + final Cursor cursor = db.openCursor(null, null); + for (int i = 0; i < nDupsArray.length; i += 1) { + final int nDups = nDupsArray[i]; + IntegerBinding.intToEntry(i, key); + final OperationStatus status = + cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(0, IntegerBinding.entryToInt(data)); + assertEquals(nDups, cursor.count()); + assertEquals(nDups, cursor.countEstimate()); + } + cursor.close(); + + closeEnv(); + } + + /** + * When insertions are non-sequential, the estimate may be off by a factor + * of two. The estimate is accurate, however, for counts less than the + * nodeMax setting. + */ + @Test + public void testDupsInsertNonSequential() { + + openEnv(); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final int[] nDupsArray = + {1, 2, 3, 20, 50, 500, 5000, 500, 50, 20, 3, 2, 1}; + + long total = 0; + for (int j = 0;; j += 1) { + boolean wroteOne = false; + IntegerBinding.intToEntry(j, data); + for (int i = 0; i < nDupsArray.length; i += 1) { + final int nDups = nDupsArray[i]; + if (j < nDups) { + IntegerBinding.intToEntry(i, key); + final OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + wroteOne = true; + total += 1; + } + } + if (!wroteOne) { + break; + } + assertEquals(total, db.count()); + } + + final Cursor cursor = db.openCursor(null, null); + for (int i = 0; i < nDupsArray.length; i += 1) { + final int nDups = nDupsArray[i]; + IntegerBinding.intToEntry(i, key); + final OperationStatus status = + cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals(0, IntegerBinding.entryToInt(data)); + assertEquals(nDups, cursor.count()); + if (nDups <= 100) { + assertEquals(nDups, cursor.countEstimate()); + } else { + final long est = cursor.countEstimate(); + assertTrue(est > nDups / 2 && est < nDups * 2); + } + /* + System.out.println("nDups=" + nDups + " countEstimate=" + + cursor.countEstimate()); + */ + } + cursor.close(); + + closeEnv(); + } + + @Entity + static class MyEntity { + @PrimaryKey + int id; + @SecondaryKey(relate=Relationship.MANY_TO_ONE) + int dept; + } + + /** + * EntityIndex.countEstimate simply forwards to Cursor.countEstimate, but + * we should still call it once. + */ + @Test + public void testDPL() { + openEnv(); + + final EntityStore store = new EntityStore + (env, "foo", new StoreConfig().setAllowCreate(true)); + + final PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + final SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "dept"); + + MyEntity entity = new MyEntity(); + entity.id = 1; + entity.dept = 9; + priIndex.put(entity); + + entity = new MyEntity(); + entity.id = 2; + entity.dept = 9; + priIndex.put(entity); + + EntityCursor c = secIndex.entities(); + assertNotNull(c.next()); + + assertEquals(2, c.count()); + assertEquals(2, c.countEstimate()); + + c.close(); + store.close(); + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/tree/CreateOldVersionLogs.java b/test/com/sleepycat/je/tree/CreateOldVersionLogs.java new file mode 100644 index 0000000..5f93b0b --- /dev/null +++ b/test/com/sleepycat/je/tree/CreateOldVersionLogs.java @@ -0,0 +1,486 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import java.io.File; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment.State; +//import com.sleepycat.je.rep.util.DbRepPreUpgrade_4_1; +//import com.sleepycat.je.util.DbPreUpgrade_4_1; + +/** + * This program is used to generate the old data used for test dup convert on + * JE 5.0. Before running this program, you have to uncomment those commented + * codes, since there are some classes only exist in JE 4.1. Besure to run this + * program on JE 4.1. + * + * Please ask Eric and Tao if you have more questions. + */ +public class CreateOldVersionLogs { + private static File envHome = new File("data"); + private File[] envHomes; + File logFile = new File(envHome, "00000000.jdb"); + Environment env; + Database db; + + private static DatabaseEntry theKey = new DatabaseEntry(); + private static DatabaseEntry theData = new DatabaseEntry(); + private static int N_ENTRIES = 4; + private static int repNodes = 3; + ReplicatedEnvironment[] repEnvs = new ReplicatedEnvironment[repNodes]; + + private String singletonLN_jdb = "je-4.1.7_logWithSingletonLN.jdb"; + private String DIN_jdb = "je-4.1.7_logWithDIN.jdb"; + private String DeletedLNCommit_jdb = "je-4.1.7_logWithDeletedLNCommit.jdb"; + private String DeletedLNNoCommit_jdb = + "je-4.1.7_logWithDeletedLNNoCommit.jdb"; + private String MixIN_jdb = "je-4.1.7_logWithMixIN.jdb"; + + private String singletonLNRep_jdb = "je-4.1.7_singletonLN"; + private String DINRep_jdb = "je-4.1.7_din"; + private String DeletedLNCommitRep_jdb = + "je-4.1.7_deletedLNCommit"; + private String DeletedLNNoCommitRep_jdb = + "je-4.1.7_deletedLNNoCommit"; + private String MixINRep_jdb = "je-4.1.7_mixIN"; + + /* + * Logs where the preupgrade utility has not been run, and an exception + * should be thrown by recovery. + */ + private String NoPreUpgrade_Dups_jdb = "je-4.1.7_noPreUpgrade_dups"; + private String NoPreUpgrade_Deltas_jdb = "je-4.1.7_noPreUpgrade_deltas"; + + public static void main(String args[]) throws Exception { + CreateOldVersionLogs covl = new CreateOldVersionLogs(); + + /* Uncomment one or more methods before running. */ + + /* Create standalone jdb. */ + //covl.createLogWithSingletonLN(); + //covl.createLogWithDIN(); + //covl.createLogWithDeletedLN(true); + //covl.createLogWithDeletedLN(false); + //covl.createLogWithMixIN(); + + /* Create rep jdb. */ + //covl.createRepLogWithSingletonLN(); + //covl.createRepLogWithDIN(); + //covl.createRepLogWithDeletedLN(true); + //covl.createRepLogWithDeletedLN(false); + //covl.createRepLogWithMixIN(); + + /* Create no-preupgrade jdb. */ + covl.logFile = new File(envHome, "00000000.jdb"); + covl.createNoPreUpgradeDups(); + //covl.createNoPreUpgradeDeltas(); + + System.out.println("Finish all creation!"); + } + + void openStandaloneEnv() { + env = new Environment(envHome, makeEnvConfig()); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + EnvironmentConfig makeEnvConfig() { + return makeEnvConfig(true /*smallNodeMax*/); + } + + EnvironmentConfig makeEnvConfig(boolean smallNodeMax) { + EnvironmentConfig envConfig = new EnvironmentConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + /* Disable all daemon threads. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + + /* Use a 100 MB log file size to ensure only one file is written. */ + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(100 * (1 << 20))); + if (smallNodeMax) { + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), + Integer.toString(N_ENTRIES)); + } + return envConfig; + } + + void openRepEnv() throws InterruptedException { + envHomes = new File[repNodes]; + for (int i = 0; i < repNodes; i++) { + activateOneNode(i); + } + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + void activateOneNode(int i) { + envHomes[i] = new File(envHome, "rep" + i); + envHomes[i].mkdir(); + ReplicationConfig repConfig; + repConfig = new ReplicationConfig(); + repConfig.setGroupName("UnitTestGroup"); + repConfig.setNodeName("Node " + (i + 1)); + repConfig.setNodeHostPort("localhost:" + (5001 + i)); + repConfig.setHelperHosts("localhost:5001"); + repEnvs[i] = new ReplicatedEnvironment(envHomes[i], repConfig, + makeEnvConfig()); + ReplicatedEnvironment.State joinState = repEnvs[i].getState(); + if (joinState.equals(State.MASTER)) { + env = repEnvs[i]; + } + } + + void close(boolean rep) { + if (db != null) { + db.close(); + db = null; + } + if (rep) { + for (int i = repNodes - 1; i >= 0; i--) { + if (repEnvs[i] != null) { + repEnvs[i].close(); + repEnvs[i] = null; + } + } + } + if (env != null) { + env.close(); + env = null; + } + } + + void abnormalClose(boolean rep) { + if (rep) { + for (int i = repNodes - 1; i >= 0; i--) { + if (repEnvs[i] != null) { + RepInternal. + getNonNullRepImpl((ReplicatedEnvironment) repEnvs[i]). + abnormalClose(); + repEnvs[i] = null; + } + } + } else { + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + env = null; + } + } + + void createLogWithSingletonLN() throws Exception { + openStandaloneEnv(); + writeSingletonLNData(0, 100); + close(false); + upgradeJDB(envHome); + File renamedLogFile = new File(envHome, singletonLN_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + singletonLN_jdb); + } + + void createRepLogWithSingletonLN() throws Exception { + openRepEnv(); + writeSingletonLNData(0, 100); + upgradeAllRepJDB(false); + for (int i = 0; i < repNodes; i++) { + File renamedLogFile = + new File(envHomes[i], singletonLNRep_jdb + "_" + i + ".jdb"); + logFile = new File(envHomes[i], "00000000.jdb"); + renameJDB(renamedLogFile); + } + System.out.println("Finish creation: " + singletonLNRep_jdb); + } + + void writeSingletonLNData(int start, int end) { + for (int i = start; i < end; i++) { + theKey = makeEntry(i); + theData = makeEntry(i); + db.put(null, theKey, theData); + } + } + + void createLogWithDIN() throws Exception { + openStandaloneEnv(); + writeDINData(0, 100, 10); + close(false); + upgradeJDB(envHome); + File renamedLogFile = new File(envHome, DIN_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + DIN_jdb); + } + + void createRepLogWithDIN() throws Exception { + openRepEnv(); + writeDINData(0, 100, 10); + upgradeAllRepJDB(false); + for (int i = 0; i < repNodes; i++) { + File renamedLogFile = + new File(envHomes[i], DINRep_jdb + "_" + i + ".jdb"); + logFile = new File(envHomes[i], "00000000.jdb"); + renameJDB(renamedLogFile); + } + System.out.println("Finish creation: " + DINRep_jdb); + } + + void writeDINData(int start, int end, int keys) { + for(int i = start; i < end; i++) { + /* Insert ten records for a given key. */ + theKey = makeEntry(i % keys); + theData = makeEntry(i); + db.put(null, theKey, theData); + } + } + + void createLogWithDeletedLN(boolean ifCommit) + throws Exception { + + openStandaloneEnv(); + writeDINData(0, 100, 10); + deleteDIN(10, 10, 5, ifCommit); + abnormalClose(false); + upgradeJDB(envHome); + File renamedLogFile = new File(envHome, ifCommit ? + DeletedLNCommit_jdb : + DeletedLNNoCommit_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + (ifCommit ? + DeletedLNCommit_jdb : + DeletedLNNoCommit_jdb)); + } + + void createRepLogWithDeletedLN(boolean ifCommit) + throws Exception { + + openRepEnv(); + writeDINData(0, 100, 10); + + deleteDIN(10, 10, 5, ifCommit); + upgradeAllRepJDB(true); + for (int i = 0; i < repNodes; i++) { + File renamedLogFile = new File(envHomes[i], + (ifCommit ? + DeletedLNCommitRep_jdb : + DeletedLNNoCommitRep_jdb) + "_" + + i + ".jdb"); + logFile = new File(envHomes[i], "00000000.jdb"); + renameJDB(renamedLogFile); + } + System.out.println("Finish creation: " + + (ifCommit ? + DeletedLNCommitRep_jdb : + DeletedLNNoCommitRep_jdb)); + } + + void deleteDIN(int keys, int dups, int deleteDups, boolean ifCommit) { + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setSync(true); + Transaction txn = env.beginTransaction(null, txnConfig); + Cursor cur = db.openCursor(txn, null); + try { + for(int i = 0; i < keys; i++) { + for (int j = 0; j < dups; j++) { + cur.getNext(theKey, theData, null); + /* Delete half of dup records. */ + if (j < deleteDups) { + cur.delete(); + } + } + } + } finally { + /* Do a checkpoint before closing the cursor. */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + + if (ifCommit) { + cur.close(); + txn.commit(); + } + } + } + + void createLogWithMixIN() throws Exception { + openStandaloneEnv(); + writeDINData(0, 70, 10); + writeSingletonLNData(70, 100); + deleteDIN(10, 7, 3, true); + deleteDIN(10, 7, 3, false); + abnormalClose(false); + upgradeJDB(envHome); + File renamedLogFile = new File(envHome, MixIN_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + MixIN_jdb); + } + + void createRepLogWithMixIN() throws Exception { + openRepEnv(); + writeDINData(0, 70, 10); + writeSingletonLNData(70, 100); + deleteDIN(10, 7, 3, true); + deleteDIN(10, 7, 3, false); + upgradeAllRepJDB(true); + for (int i = 0; i < repNodes; i++) { + File renamedLogFile = + new File(envHomes[i], MixINRep_jdb + "_" + i + ".jdb"); + logFile = new File(envHomes[i], "00000000.jdb"); + renameJDB(renamedLogFile); + } + System.out.println("Finish creation: " + MixINRep_jdb); + } + + /** + * Create a log with DBINs/DINs in the last checkpoint. Do not run the + * preupgrade utility. An exception should be thrown when opening this + * environment with JE 5. + */ + void createNoPreUpgradeDups() throws Exception { + openStandaloneEnv(); + writeDINData(0, 100, 10); + + /* Write DBINs/DINs in a checkpoint. */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* Do not do a clean close. We want DBINs/DINs in last checkpoint. */ + abnormalClose(false); + + File renamedLogFile = new File(envHome, NoPreUpgrade_Dups_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + NoPreUpgrade_Dups_jdb); + } + + /** + * Create a log with regular deltas (no dups) in the last checkpoint. Do + * not run the preupgrade utility. An exception should be thrown when + * opening this environment with JE 5. + */ + void createNoPreUpgradeDeltas() throws Exception { + env = new Environment(envHome, makeEnvConfig(false /*smallNodeMax*/)); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "testDB", dbConfig); + + final int NUM_KEYS = 500; + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[10]); + + /* Insert records. */ + for (int j = 0; j < NUM_KEYS; j += 1) { + IntegerBinding.intToEntry(j, key); + final OperationStatus status = db.putNoOverwrite(null, key, data); + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException(); + } + } + + /* Update records to create deltas. */ + data.setData(new byte[11]); + for (int j = 0; j < NUM_KEYS; j += 10) { + IntegerBinding.intToEntry(j, key); + final OperationStatus status = db.put(null, key, data); + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException(); + } + } + + /* Write deltas in a checkpoint. */ + env.checkpoint(new CheckpointConfig().setForce(true)); + + /* Do not do a clean close. We want deltas in the last checkpoint. */ + abnormalClose(false); + + File renamedLogFile = new File(envHome, NoPreUpgrade_Deltas_jdb); + renameJDB(renamedLogFile); + System.out.println("Finish creation: " + NoPreUpgrade_Deltas_jdb); + } + + DatabaseEntry makeEntry(int val) { + byte[] data = new byte[] { (byte) val }; + return new DatabaseEntry(data); + } + + void renameJDB(File newFile) throws Exception { + if (!logFile.renameTo(newFile)) { + throw new Exception + ("Could not rename: " + logFile + " to " + newFile); + } + } + + void upgradeRepJDB(File envHome, + String groupName, + String nodeName, + String nodeHostPort, + String helperHosts) { + /*DbRepPreUpgrade_4_1 upgrade = + new DbRepPreUpgrade_4_1(envHome, groupName, nodeName, nodeHostPort, + helperHosts); + upgrade.preUpgrade();*/ + } + + /* Upgrade all the rep jdbs. */ + void upgradeAllRepJDB(boolean abnormalClose) { + for (int i = repNodes - 1; i >= 0; i--) { + if (repEnvs[i] != null) { + if (abnormalClose) { + RepInternal. + getNonNullRepImpl((ReplicatedEnvironment) repEnvs[i]). + abnormalClose(); + } else { + if (i == 0) { + db.close(); + } + repEnvs[i].close(); + } + upgradeRepJDB(envHomes[i], "UnitTestGroup", "Node " + (i + 1), + "localhost:" + (5001 + i), "localhost:5001"); + } + } + } + + void upgradeJDB(File envHome) { + /*DbPreUpgrade_4_1 upgrade = new DbPreUpgrade_4_1(envHome); + upgrade.preUpgrade();*/ + } +} diff --git a/test/com/sleepycat/je/tree/DupConvertTest.java b/test/com/sleepycat/je/tree/DupConvertTest.java new file mode 100644 index 0000000..3f46b89 --- /dev/null +++ b/test/com/sleepycat/je/tree/DupConvertTest.java @@ -0,0 +1,340 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class DupConvertTest extends TestBase { + private File envHome; + private Environment env; + Database db; + private static DatabaseEntry theKey = new DatabaseEntry(); + private static DatabaseEntry theData = new DatabaseEntry(); + DatabaseEntry findKey = new DatabaseEntry(); + DatabaseEntry findData = new DatabaseEntry(); + private static int N_ENTRIES = 4; + private final String singletonLN_jdb = "je-4.1.7_logWithSingletonLN.jdb"; + private final String DIN_jdb = "je-4.1.7_logWithDIN.jdb"; + private final String DeletedLNCommit_jdb = + "je-4.1.7_logWithDeletedLNCommit.jdb"; + private final String DeletedLNNoCommit_jdb = + "je-4.1.7_logWithDeletedLNNoCommit.jdb"; + private final String MixIN_jdb = "je-4.1.7_logWithMixIN.jdb"; + + /* + * Logs where the preupgrade utility has not been run, and an exception + * should be thrown by recovery. + */ + private final String NoPreUpgrade_Dups_jdb = "je-4.1.7_noPreUpgrade_dups"; + private final String NoPreUpgrade_Deltas_jdb = + "je-4.1.7_noPreUpgrade_deltas"; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + envHome = null; + env = null; + } + + private void openEnv(String logName, boolean readOnly, boolean loadLog) + throws DatabaseException, IOException { + /* Copy log file resource to log file zero. */ + if (loadLog) { + TestUtils.loadLog(getClass(), logName, envHome); + } + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(false); + envConfig.setReadOnly(readOnly); + envConfig.setTransactional(true); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), + Integer.toString(N_ENTRIES)); + env = new Environment(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(readOnly); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + private void closeEnv() + throws DatabaseException { + + db.close(); + db = null; + env.close(); + env = null; + } + + @Test + public void testLogWithSingletonLN() + throws DatabaseException, IOException { + + openEnv(singletonLN_jdb, false, true); + + /* Verify the records are correctly read. */ + Cursor cur = db.openCursor(null, null); + try { + for(int i = 0; i < 100; i++) { + theKey = makeEntry(i); + theData = makeEntry(i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } finally { + cur.close(); + } + + insertNoDupRecordTest(); + insertDupRecordTest(); + deleteRecordTest(); + + closeEnv(); + } + + @Test + public void testLogWithDIN() + throws DatabaseException, IOException { + + openEnv(DIN_jdb, false, true); + + /* Verify the records are correctly read. */ + Cursor cur = db.openCursor(null, null); + try { + for(int i = 0; i < 10; i++) { + for (int j = 0; j < 10; j++) { + theKey = makeEntry(i); + theData = makeEntry(j * 10 + i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } + } finally { + cur.close(); + } + + insertNoDupRecordTest(); + insertDupRecordTest(); + deleteRecordTest(); + + closeEnv(); + } + + @Test + public void testLogWithDeleteLNCommit() + throws DatabaseException, IOException { + + openEnv(DeletedLNCommit_jdb, false, true); + + /* Verify the records are correctly read. */ + Cursor cur = db.openCursor(null, null); + try { + for(int i = 0; i < 10; i++) { + /* Have deleted half of the dup records. */ + for (int j = 5; j < 10; j++) { + theKey = makeEntry(i); + theData = makeEntry(j * 10 + i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } + } finally { + cur.close(); + } + + insertNoDupRecordTest(); + insertDupRecordTest(); + deleteRecordTest(); + + closeEnv(); + } + + @Test + public void testLogWithDeleteLNNoCommit() + throws DatabaseException, IOException { + + openEnv(DeletedLNNoCommit_jdb, false, true); + + /* Verify the records are correctly read. */ + Cursor cur = db.openCursor(null, null); + try { + for(int i = 0; i < 10; i++) { + /* The delete actions have not been committed. */ + for (int j = 0; j < 10; j++) { + theKey = makeEntry(i); + theData = makeEntry(j * 10 + i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } + } finally { + cur.close(); + } + + insertNoDupRecordTest(); + insertDupRecordTest(); + deleteRecordTest(); + + closeEnv(); + } + + @Test + public void testLogWithMixIN() + throws DatabaseException, IOException { + + openEnv(MixIN_jdb, false, true); + + /* Verify the records are correctly read. */ + Cursor cur = db.openCursor(null, null); + try { + for(int i = 0; i < 10; i++) { + /* Have deleted some of the dup records. */ + for (int j = 3; j < 7; j++) { + theKey = makeEntry(i); + theData = makeEntry(j * 10 + i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } + for(int i = 70; i < 100; i++) { + theKey = makeEntry(i); + theData = makeEntry(i); + cur.getNext(findKey, findData, null); + assertEquals(theKey, findKey); + assertEquals(theData, findData); + } + } finally { + cur.close(); + } + + insertNoDupRecordTest(); + insertDupRecordTest(); + deleteRecordTest(); + + closeEnv(); + } + + /** + * Open a log with DBINs/DINs in the last checkpoint. The preupgrade + * utility was not run, so an exception should be thrown when opening this + * environment with JE 5. + */ + @Test + public void testNoPreUpgradeDups() + throws IOException { + + try { + openEnv(NoPreUpgrade_Dups_jdb, false, true); + fail(); + } catch (EnvironmentFailureException e) { + e.getMessage().contains("PreUpgrade"); + } + } + + /** + * Open a log with deltas in the last checkpoint. The preupgrade utility + * was not run, so an exception should be thrown when opening this + * environment with JE 5. + */ + @Test + public void testNoPreUpgradeDeltas() + throws IOException { + + try { + openEnv(NoPreUpgrade_Deltas_jdb, false, true); + fail(); + } catch (EnvironmentFailureException e) { + e.getMessage().contains("PreUpgrade"); + } + } + + private void insertNoDupRecordTest() { + OperationStatus status; + theKey = makeEntry(100); + theData = makeEntry(100); + status = db.putNoOverwrite(null, theKey, theData); + assertEquals(OperationStatus.SUCCESS, status); + } + + private void insertDupRecordTest() { + OperationStatus status; + theKey = makeEntry(5); + theData = makeEntry(101); + status = db.putNoOverwrite(null, theKey, theData); + assertEquals(OperationStatus.KEYEXIST, status); + status = db.put(null, theKey, theData); + assertEquals(OperationStatus.SUCCESS, status); + } + + private void deleteRecordTest() { + OperationStatus status; + status = db.delete(null, makeEntry(6)); + assertEquals(OperationStatus.SUCCESS, status); + status = db.get(null, makeEntry(6), findData, null); + assertEquals(OperationStatus.NOTFOUND, status); + } + + private DatabaseEntry makeEntry(int val) { + byte[] data = new byte[] { (byte) val }; + return new DatabaseEntry(data); + } +} diff --git a/test/com/sleepycat/je/tree/FetchWithNoLatchTest.java b/test/com/sleepycat/je/tree/FetchWithNoLatchTest.java new file mode 100644 index 0000000..cd3e5ce --- /dev/null +++ b/test/com/sleepycat/je/tree/FetchWithNoLatchTest.java @@ -0,0 +1,545 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.TestHook; + +public class FetchWithNoLatchTest extends TestBase { + static private final boolean DEBUG = false; + + private final File envHome; + private Environment env; + private Database db1; + private Database db2; + private Database db3; + + public FetchWithNoLatchTest() { + envHome = SharedTestUtils.getTestDir(); + + Key.DUMP_TYPE = DumpType.BINARY; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + initEnv(); + } + + @After + public void tearDown() { + + try { + db1.close(); + db2.close(); + db3.close(); + env.close(); + } catch (DatabaseException E) { + } + } + + private void initEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_OFFHEAP_EVICTOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + env = new Environment(envHome, envConfig); + + String databaseName = "testDb1"; + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db1 = env.openDatabase(txn, databaseName, dbConfig); + txn.commit(); + + databaseName = "testDb2"; + txn = env.beginTransaction(null, null); + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db2 = env.openDatabase(txn, databaseName, dbConfig); + txn.commit(); + + databaseName = "testDb3"; + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db3 = env.openDatabase(null, databaseName, dbConfig); + } + + @Test + public void testSplit1() + throws Exception { + + Key.DUMP_TYPE = DumpType.BINARY; + final Database db = db1; + + try { + /* Create the initial tree */ + Tree tree = createTree(db); + + /* Evict the BIN B containing the key 20 (BIN 14) */ + final Pair pair = evictBIN(db, 20); + final IN parent = pair.first(); + final IN bin = pair.second(); + + /* + * Refetch B (via getParentINForChildIN() below) while splitting + * its sibling and parent (BINs 11 and 13) at the "same" time. The + * split is done via the test hook below, which is executed right + * after IN.fetchIN() releases the latch on B's parent in order to + * fetch B. The split causes B's parent to change. + */ + FetchINTestHook fetchINHook = new FetchINTestHook( + db, parent, 66, true, false); + + parent.setFetchINHook(fetchINHook); + tree.setFetchINHook(fetchINHook); + + bin.latch(); + SearchResult result = tree.getParentINForChildIN( + bin, false, /*useTargetLevel*/ true, /*doFetch*/ + CacheMode.UNCHANGED); + + result.parent.releaseLatch(); + + /* Make sure everything went fine. */ + assertTrue(fetchINHook.foundNullChild); + assertTrue(result.exactParentFound); + assertTrue(parent != result.parent); + assertTrue(result.parent.getNodeId() > parent.getNodeId()); + + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + + @Test + public void testSplit2() + throws Exception { + + Key.DUMP_TYPE = DumpType.BINARY; + final Database db = db2; + + try { + /* Create the initial tree */ + Tree tree = createTree(db); + + /* Evict the BIN B containing the key 20 (BIN 14) */ + final Pair pair = evictBIN(db, 30); + final IN parent = pair.first(); + final IN bin = pair.second(); + + /* + * Refetch B (via getParentINForChildIN() below) while splitting B + * itself and its parent (BIN 13) at the "same" time. The split is + * done via the FetchINTestHook, which is executed right after + * IN.fetchIN() releases the latch on B's parent in order to fetch + * B. The split does not change B's parent. However, the version + * B that fetchIN() is fetching becomes obsolete as a result of + * the split, and should not be attached to the tree. This is + * indeed avaoided because fetchIN() will find B's new version + * in the tree, and will return that one instead of the one just + * fetched from disk. + */ + FetchINTestHook fetchINHook = new FetchINTestHook( + db, parent, 33, true, false); + + parent.setFetchINHook(fetchINHook); + tree.setFetchINHook(fetchINHook); + + bin.latch(); + SearchResult result = tree.getParentINForChildIN( + bin, false, /*useTargetLevel*/ true, /*doFetch*/ + CacheMode.UNCHANGED); + + /* Make sure everything went fine. */ + assertTrue(fetchINHook.foundNullChild); + assertTrue(result.exactParentFound); + assertTrue(parent != result.parent); + + result.parent.releaseLatch(); + + rangeSearch(db); + + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + + @Test + public void testSplit3() + throws Exception { + + Key.DUMP_TYPE = DumpType.BINARY; + final Database db = db3; + + try { + /* Create the initial tree */ + Tree tree = createTree(db); + + /* Evict the BIN B containing the key 50 (BIN 11) */ + final Pair pair = evictBIN(db, 50); + final IN parent = pair.first(); + final IN bin = pair.second(); + + /* + * Refetch B (via getParentINForChildIN() below) while splitting B + * itself and its parent (BIN 13) at the "same" time. The split is + * done via the FetchINTestHook, which is executed right after + * IN.fetchIN() releases the latch on B's parent in order to fetch + * B. The split does not change B's parent. However, the version + * B that fetchIN() is fetching becomes obsolete as a result of + * the split, and should not be attached to the tree. In this test, + * after B is split, we evict B before resuming with fetchIN(). + * fetchIN() will not attach the fetched (obsolete) version in the + * tree because the parent slot has an LSN that is different than + * the fetched LSN. + */ + FetchINTestHook fetchINHook = new FetchINTestHook( + db, parent, 66, true, true); + + parent.setFetchINHook(fetchINHook); + tree.setFetchINHook(fetchINHook); + + bin.latch(); + SearchResult result = tree.getParentINForChildIN( + bin, false, /*useTargetLevel*/ true, /*doFetch*/ + CacheMode.UNCHANGED); + + result.parent.releaseLatch(); + + /* Make sure everything went fine. */ + assertTrue(fetchINHook.foundNullChild); + assertTrue(result.exactParentFound); + assertTrue(parent == result.parent); + + rangeSearch(db); + + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + @Test + public void testSplit4() + throws Exception { + + Key.DUMP_TYPE = DumpType.BINARY; + final Database db = db2; + + try { + /* Create the initial tree */ + Tree tree = createTree(db); + + /* Evict the BIN B containing the key 120 (BIN 9) */ + final Pair pair = evictBIN(db, 120); + final IN parent = pair.first(); + + /* + * Execute search(110) while splitting B (but not its parent) at + * the "same" time. The split is done via the FetchINTestHook, + * which is executed right after IN.fetchIN() (called from + * search(110)) releases the latch on B's parent in order to + * fetch B. The split does not change B's parent. However, + * the key we are looking for is now in B's new sibling and + * fetchIN() should return that sibling without causing a + * restart of the search. + */ + FetchINTestHook fetchINHook = new FetchINTestHook( + db, parent, 126, false, false); + + parent.setFetchINHook(fetchINHook); + //parent.setIdentifierKey(new byte[]{(byte)40}); + + Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT); + + assertEquals( + OperationStatus.SUCCESS, + cursor.getSearchKey( + new DatabaseEntry(new byte[]{(byte)110}), + new DatabaseEntry(), + LockMode.DEFAULT)); + + cursor.close(); + + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + /* + * A test hook assigned to a givan IN P and triggerred when P.fetchIN() + * is called to fetch a missing child of P, after P is unlatched. The + * doHook() method causes P to split by inserting a given key in one of + * P's children. + */ + class FetchINTestHook implements TestHook { + + Database db; + IN parent; + int newKey; + boolean parentSplits; + boolean evict; + boolean foundNullChild = false; + + FetchINTestHook( + Database db, + IN parent, + int newKey, + boolean parentSplits, + boolean evict) { + + this.db = db; + this.parent = parent; + this.newKey = newKey; + this.parentSplits = parentSplits; + this.evict = evict; + } + + @Override + public void doHook() { + /* Only process the first call to the hook. */ + parent.setFetchINHook(null); + + int numEntries = parent.getNEntries(); + + /* split the parent of the missing bin. */ + assertEquals( + OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[]{(byte)newKey}), + new DatabaseEntry(new byte[] {1}))); + + if (parentSplits) { + assert(numEntries > parent.getNEntries()); + } + + if (evict) { + evictBIN(db, newKey); + } + } + + @Override public void doHook(Object obj) { + assertFalse(foundNullChild); + foundNullChild = true; + } + + @Override public void hookSetup() { } + @Override public void doIOHook() { } + + @Override public Object getHookValue() { return foundNullChild; } + }; + + /* + * Create a tree that looks like this: + * + * --------------------- + * | nid: 12 - key: 70 | + * |...................| + * | 70 | 110 | + * --------------------- + * / \ + * / \ + * ---------------------- .. Subtree shown + * | nid: 13 - key: 70 | below. + * |....................| + * | 40 | 50 | 80 | + * ---------------------- + * / | \ 80 <= k < 110 + * ------------------ | ----------------- + * | | | + * --------------------- --------------------- ---------------------- + * | nid: 14 - key: 40 | | nid: 11 - key: 70 | | nid: 10 - key: 100 | + * |...................| |...................| |....................| + * | 10 | 20 | 30 | 40 | | 50 | 60 | 65 | 70 | | 80 | 90 | 95 | 100 | + * --------------------- --------------------- ---------------------- + * + * + * ---------------------- + * | nid: 8 - key: 160 | + * |....................| + * | 110 | 140 | + * ---------------------- + * / \ + * / \ + * / \ + * ------------------------- ------------------------ + * | nid: 9 - key: 130 | | nid: 7 - key: 160 | + * |.......................| |......................| + * | 110 | 120 | 125 | 130 | | 140 | 150 | 160 | + * ------------------------- ------------------------ + */ + Tree createTree(Database db) { + + for (int i = 160; i > 0; i-= 10) { + assertEquals( + OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[] { (byte) i }), + new DatabaseEntry(new byte[] {1}))); + } + + assertEquals( + OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[]{(byte)65}), + new DatabaseEntry(new byte[] {1}))); + + assertEquals( + OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[]{(byte)95}), + new DatabaseEntry(new byte[] {1}))); + + assertEquals( + OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[]{(byte)125}), + new DatabaseEntry(new byte[] {1}))); + + Tree tree = DbInternal.getDbImpl(db).getTree(); + + if (DEBUG) { + System.out.println(""); + tree.dump(); + } + + return tree; + } + + /* + * Evict the BIN containing the given key and return the BIN and its parent. + */ + Pair evictBIN(Database db, int keyVal) { + + /* EVICT_BIN will not evict a dirty BIN. */ + env.sync(); + + Tree tree = DbInternal.getDbImpl(db).getTree(); + + Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT); + cursor.setCacheMode(CacheMode.EVICT_BIN); + + CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + + DatabaseEntry key = new DatabaseEntry(new byte[] { (byte) keyVal }); + DatabaseEntry data = new DatabaseEntry(); + assertEquals( + OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, LockMode.DEFAULT)); + + IN bin = cursorImpl.getBIN(); + bin.latch(); + + SearchResult result = tree.getParentINForChildIN( + bin, false, /*useTargetLevel*/ true, /*doFetch*/ + CacheMode.UNCHANGED); + + assertTrue(result.exactParentFound); + + final IN parent = result.parent; + parent.releaseLatch(); + + /* evict the BIN */ + cursor.close(); + + return new Pair<>(parent, bin); + } + + /* + * Do a range search for keys in [10, 100] + */ + void rangeSearch(Database db) { + + Cursor cursor = db.openCursor(null, CursorConfig.DEFAULT); + DatabaseEntry key = new DatabaseEntry(new byte[] { (byte) 10 }); + DatabaseEntry data = new DatabaseEntry(); + assertEquals( + OperationStatus.SUCCESS, + cursor.getSearchKeyRange(key, data, LockMode.DEFAULT)); + + OperationStatus status = OperationStatus.SUCCESS; + int keyVal = 0; + int numKeys = 0; + do { + status = cursor.getNext(key, data, LockMode.DEFAULT); + + keyVal = (int)(key.getData()[0]); + if (keyVal > 100) { + break; + } + + ++numKeys; + System.out.println(keyVal); + } while (status == OperationStatus.SUCCESS); + + cursor.close(); + + assertEquals(numKeys, 12); + } +} diff --git a/test/com/sleepycat/je/tree/GetParentNodeTest.java b/test/com/sleepycat/je/tree/GetParentNodeTest.java new file mode 100644 index 0000000..dbdd996 --- /dev/null +++ b/test/com/sleepycat/je/tree/GetParentNodeTest.java @@ -0,0 +1,374 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class GetParentNodeTest extends TestBase { + static private final boolean DEBUG = false; + + private final File envHome; + private Environment env; + private Database db; + private IN rootIN; + private IN firstLevel2IN; + private BIN firstBIN; + + public GetParentNodeTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + initEnv(); + } + + @After + public void tearDown() { + + try { + db.close(); + env.close(); + } catch (DatabaseException E) { + } + } + + private void initEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + String databaseName = "testDb"; + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(txn, databaseName, dbConfig); + txn.commit(); + } + + /** + * Test getParentINForChildIN and GetParentBINForChildLN painstakingly on a + * hand constructed tree. + */ + @Test + public void testBasic() + throws Exception { + + try { + /* + * Make a tree w/3 levels in the main tree and a single dup + * tree. The dupTree has two levels. The tree looks like this: + * + * root(key=a) + * | + * +---------------------------+ + * IN(key=a) IN(key=e) + * | | + * +------------------+ +--------+--------+ + * BIN(key=a) BIN(c) BIN(e) BIN(g) BIN(i) + * | | | | | | | | | | | + * LNa,b LNc,d LNe,f LNg,h LNi,j,k + */ + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("a"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("b"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("c"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("d"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("e"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("f"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("g"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("h"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("i"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("j"), + new StringDbt("data1"))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new StringDbt("k"), + new StringDbt("data1"))); + + /* + * Test exact matches. + */ + checkTreeUsingExistingNodes(true); + checkTreeUsingExistingNodes(false); + + /* Test potential matches. */ + checkTreeUsingPotentialNodes(); + + /* Should be no latches held. */ + assertEquals(0, LatchSupport.nBtreeLatchesHeld()); + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + private void checkTreeUsingExistingNodes(boolean requireExactMatch) + throws DatabaseException { + + /* Start at the root. */ + DatabaseImpl database = DbInternal.getDbImpl(db); + Tree tree = database.getTree(); + + if (DEBUG) { + tree.dump(); + } + + rootIN = tree.withRootLatchedShared + (new GetRoot(DbInternal.getDbImpl(db))); + assertEquals(rootIN.getNEntries(), 2); + + /* Second and third level. */ + firstBIN = null; + for (int i = 0; i < rootIN.getNEntries(); i++) { + /* Each level 2 IN. */ + IN in = (IN) rootIN.getTarget(i); + + if (i == 0) { + firstLevel2IN = in; + } + checkMatch(tree, in, rootIN, i, requireExactMatch); + + /* For each BIN, find its parent, and then find its LNs. */ + for (int j = 0; j < in.getNEntries(); j++) { + BIN bin = (BIN) in.getTarget(j); + + if (firstBIN == null) { + firstBIN = bin; + } + checkMatch(tree, bin, in, j, requireExactMatch); + + for (int k = 0; k < bin.getNEntries(); k++) { + checkMatch(tree, bin, bin.getKey(k), k, bin.getLsn(k)); + } + } + } + } + + /* + * Do a parent search, expect to find the parent, check that we do. + */ + private void checkMatch(Tree tree, + IN target, + IN parent, + int index, + boolean requireExactMatch) + throws DatabaseException { + + target.latch(); + + long targetId = target.getNodeId(); + byte[] targetKey = target.getIdentifierKey(); + int targetLevel = -1; + int exclusiveLevel = target.getLevel() + 1; + + target.releaseLatch(); + + SearchResult result = tree.getParentINForChildIN( + targetId, targetKey, targetLevel, + exclusiveLevel, requireExactMatch, true, + CacheMode.DEFAULT, null); + + assertTrue(result.exactParentFound); + assertEquals("Target=" + target + " parent=" + parent, + index, result.index); + assertEquals(parent, result.parent); + parent.releaseLatch(); + } + + /* + * Search for the BIN for this LN. + */ + private void checkMatch(Tree tree, + BIN parent, + byte[] mainKey, + int index, + long expectedLsn) + throws DatabaseException { + TreeLocation location = new TreeLocation(); + + assertTrue(tree.getParentBINForChildLN + (location, mainKey, false, false, CacheMode.DEFAULT)); + location.bin.releaseLatch(); + assertEquals(parent, location.bin); + assertEquals(index, location.index); + assertEquals(expectedLsn, location.childLsn); + + assertTrue(tree.getParentBINForChildLN + (location, mainKey, true, false, CacheMode.DEFAULT)); + location.bin.releaseLatch(); + assertEquals(parent, location.bin); + assertEquals(index, location.index); + assertEquals(expectedLsn, location.childLsn); + + assertTrue(tree.getParentBINForChildLN + (location, mainKey, true, false, CacheMode.DEFAULT)); + location.bin.releaseLatch(); + assertEquals(parent, location.bin); + assertEquals(index, location.index); + assertEquals(expectedLsn, location.childLsn); + } + + private class GetRoot implements WithRootLatched { + + private final DatabaseImpl db; + + GetRoot(DatabaseImpl db) { + this.db = db; + } + + public IN doWork(ChildReference root) + throws DatabaseException { + + return (IN) root.fetchTarget(db, null); + } + } + + /** + * Make up non-existent nodes and see where they'd fit in. This exercises + * recovery type processing and cleaning. + */ + private void checkTreeUsingPotentialNodes() + throws DatabaseException { + + DatabaseImpl database = DbInternal.getDbImpl(db); + Tree tree = database.getTree(); + + /* + * Make an IN with the key "ab". Its potential parent should be the + * first level 2 IN. + */ + IN inAB = new IN(database, StringUtils.toUTF8("ab"), 4, 2); + checkPotential(tree, inAB, firstLevel2IN); + + /* + * Make an BIN with the key "x". Its potential parent should be the + * first level 2 IN. + */ + BIN binAB = + new BIN(database, StringUtils.toUTF8("ab"), 4, 1); + checkPotential(tree, binAB, firstLevel2IN); + + /* + * Make an LN with the key "ab". It's potential parent should be the + * BINa. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + byte[] mainKey = StringUtils.toUTF8("ab"); + checkPotential(tree, firstBIN, mainKey, mainKey); + } + + private void checkPotential(Tree tree, IN potential, IN expectedParent) + throws DatabaseException { + + /* Try an exact match, expect a failure, then try an inexact match. */ + potential.latch(); + SearchResult result = tree.getParentINForChildIN( + potential, false, /*useTargetLevel*/ + true, /*dofetch*/ CacheMode.DEFAULT); + + assertFalse(result.exactParentFound); + assertTrue(result.parent == null); + + potential.latch(); + + long targetId = potential.getNodeId(); + byte[] targetKey = potential.getIdentifierKey(); + int targetLevel = -1; + int exclusiveLevel = potential.getLevel() + 1; + + potential.releaseLatch(); + + result = tree.getParentINForChildIN( + targetId, targetKey, targetLevel, + exclusiveLevel, false, /*requireExactMatch*/ true, /*dofetch*/ + CacheMode.DEFAULT, null); + + assertFalse(result.exactParentFound); + assertEquals("expected = " + expectedParent.getNodeId() + + " got" + result.parent.getNodeId(), + expectedParent, result.parent); + result.parent.releaseLatch(); + } + + private void checkPotential(Tree tree, + BIN expectedParent, + byte[] mainKey, + byte[] expectedKey) + throws DatabaseException { + + /* Try an exact match, expect a failure, then try an inexact match. */ + TreeLocation location = new TreeLocation(); + boolean found = tree.getParentBINForChildLN( + location, mainKey, false, false, CacheMode.DEFAULT); + + assertTrue(!found || location.bin.isEntryKnownDeleted(location.index)); + + location.bin.releaseLatch(); + assertEquals(location.bin, expectedParent); + assertEquals(expectedKey, location.lnKey); + } +} diff --git a/test/com/sleepycat/je/tree/INEntryTestBase.java b/test/com/sleepycat/je/tree/INEntryTestBase.java new file mode 100644 index 0000000..bd902c6 --- /dev/null +++ b/test/com/sleepycat/je/tree/INEntryTestBase.java @@ -0,0 +1,170 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.nio.ByteBuffer; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class INEntryTestBase extends TestBase { + + File envHome = SharedTestUtils.getTestDir(); + + EnvironmentConfig envConfig; + + int nodeMaxEntries; + + short compactMaxKeyLength = 0; + + CacheMode cacheMode = CacheMode.DEFAULT; + + Environment env = null; + + protected static String DB_NAME = "TestDb"; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + envConfig.setConfigParam(EnvironmentConfig.TREE_COMPACT_MAX_KEY_LENGTH, + String.valueOf(compactMaxKeyLength)); + nodeMaxEntries = Integer.parseInt + (envConfig.getConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES)); + env = new Environment(envHome, envConfig); + } + + @After + public void tearDown() { + env.close(); + } + + /* Assumes the test creates just one IN node. */ + protected void verifyINMemorySize(DatabaseImpl dbImpl) { + BIN in = (BIN)(dbImpl.getTree().getFirstNode(cacheMode)); + in.releaseLatch(); + + final IN lastNode = dbImpl.getTree().getLastNode(cacheMode); + assertEquals(in, lastNode); + assertTrue(in.verifyMemorySize()); + + in.releaseLatch(); + TestUtils.validateNodeMemUsage(dbImpl.getEnv(), true); + } + + protected Database createDb(String dbName, + int keySize, + int count, + boolean keyPrefixingEnabled) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(false); + dbConfig.setKeyPrefixing(keyPrefixingEnabled); + + Database db = env.openDatabase(null, dbName, dbConfig); + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + DatabaseEntry key = new DatabaseEntry(); + + for (int i=0; i < count; i++) { + key.setData(createByteVal(i, keySize)); + db.put(null, key, key); + verifyINMemorySize(dbImpl); + } + return db; + } + + protected Database createDb(String dbName, + int keySize, + int count) { + return createDb(dbName, keySize, count, false); + } + + protected Database createDupDb(String dbName, + int keySize, + int count) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + Database db = env.openDatabase(null, dbName, dbConfig); + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < count; i++) { + key.setData(new byte[0]); + data.setData(createByteVal(i, keySize)); + db.put(null, key, data); + verifyINMemorySize(dbImpl); + } + return db; + } + + protected byte[] createByteVal(int val, int arrayLength) { + ByteBuffer byteBuffer = ByteBuffer.allocate(arrayLength); + if (arrayLength >= 4) { + byteBuffer.putInt(val); + } else if (arrayLength >= 2) { + byteBuffer.putShort((short) val); + } else { + byteBuffer.put((byte) val); + } + return byteBuffer.array(); + } + + /* Dummy test IN. */ + class TestIN extends IN { + private int maxEntries; + + TestIN(int capacity) { + maxEntries = capacity; + } + + @Override + protected int getCompactMaxKeyLength() { + return compactMaxKeyLength; + } + + @Override + public int getMaxEntries() { + return maxEntries; + } + } +} diff --git a/test/com/sleepycat/je/tree/INKeyRepTest.java b/test/com/sleepycat/je/tree/INKeyRepTest.java new file mode 100644 index 0000000..f161bff --- /dev/null +++ b/test/com/sleepycat/je/tree/INKeyRepTest.java @@ -0,0 +1,612 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Random; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.PartialComparator; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.tree.INKeyRep.Default; +import com.sleepycat.je.tree.INKeyRep.MaxKeySize; +import com.sleepycat.je.tree.INKeyRep.Type; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.je.utilint.VLSN; + +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class INKeyRepTest extends INEntryTestBase { + + /** + * Permute tests over various compactMaxKeyLength sizes. + */ + @Parameters + public static List genParams() { + + return Arrays.asList( + new Object[][]{{(short)5}, {(short)16}, {(short)202}}); + } + + public INKeyRepTest(short keyLength) { + compactMaxKeyLength = keyLength; + customName = ":maxLen=" + compactMaxKeyLength; + } + + /** + * Test use of the representations at the IN level. Checks memory + * bookkeeping after each operation. + */ + @Test + public void testINBasic() { + + int keySize = compactMaxKeyLength / 2; + + Database db = createDb(DB_NAME, keySize, nodeMaxEntries); + + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + EnvironmentImpl env = dbImpl.getEnv(); + + boolean embeddedLNs = (env.getMaxEmbeddedLN() >= keySize); + + if (embeddedLNs) { + verifyAcrossINEvict(db, Type.DEFAULT, Type.DEFAULT); + } else { + verifyAcrossINEvict(db, Type.DEFAULT, Type.MAX_KEY_SIZE); + } + + db.close(); + + /* Ensure that default value constants are kept in sync. */ + assertEquals + (String.valueOf(INKeyRep.MaxKeySize.DEFAULT_MAX_KEY_LENGTH), + EnvironmentParams.TREE_COMPACT_MAX_KEY_LENGTH.getDefault()); + } + + @Test + public void testDINEvict() { + + int keySize = compactMaxKeyLength / 2; + + Database db = createDupDb(DB_NAME, keySize, nodeMaxEntries); + + verifyAcrossINEvict(db, Type.MAX_KEY_SIZE, Type.MAX_KEY_SIZE); + + db.close(); + } + + private BIN verifyAcrossINEvict(Database db, + Type pre, + Type post) { + + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + BIN firstBin = (BIN)(dbImpl.getTree().getFirstNode(cacheMode)); + assertEquals(pre, firstBin.getKeyVals().getType()); + + firstBin.evictLNs(); + firstBin.releaseLatch(); + assertEquals(post, firstBin.getKeyVals().getType()); + + verifyINMemorySize(dbImpl); + return firstBin; + } + + @Test + public void testINMutate() { + + commonINMutate(false); + } + + @Test + public void testINMutatePrefix() { + commonINMutate(true); + } + + public void commonINMutate(boolean prefixKeys) { + + final int keySize = compactMaxKeyLength / 2; + + final Database db = createDb(DB_NAME, keySize, nodeMaxEntries-1, + prefixKeys); + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + EnvironmentImpl env = dbImpl.getEnv(); + + boolean embeddedLNs = (env.getMaxEmbeddedLN() >= keySize); + + BIN bin = (BIN)(dbImpl.getTree().getFirstNode(cacheMode)); + bin.evictLNs(); + + if (embeddedLNs) { + assertEquals(Type.DEFAULT, bin.getKeyVals().getType()); + } else { + assertEquals(Type.MAX_KEY_SIZE, bin.getKeyVals().getType()); + } + + bin.releaseLatch(); + + DatabaseEntry key = new DatabaseEntry(); + key.setData(createByteVal(nodeMaxEntries, keySize+1)); + db.put(null, key, key); + + verifyINMemorySize(dbImpl); + assertEquals(Type.DEFAULT, bin.getKeyVals().getType()); + + db.close(); + } + + @Test + public void testBasic() { + final int size = 32; + final IN parent = new TestIN(size); + commonTest(parent, new Default(size)); + commonTest(parent, new MaxKeySize(size, + (short) Math.max(1, (compactMaxKeyLength - 9)))); + } + + public void commonTest(IN parent, INKeyRep targets) { + targets = targets.set(1, new byte[]{1}, parent); + assertEquals(1, targets.get(1)[0]); + + targets.copy(0, 5, 1, parent); + assertEquals(1, targets.get(1)[0]); + + targets.copy(0, 5, 2, parent); + assertEquals(1, targets.get(6)[0]); + + targets.set(1, null, parent); + + assertEquals(null, targets.get(1)); + + targets.copy(5, 0, 2, null); + assertEquals(1, targets.get(1)[0]); + } + + @Test + public void testDefaultKeyVals() { + final int size = 128; + final IN parent = new TestIN(size); + Default defrep = new Default(size); + byte[][] refEntries = initRep(parent, defrep); + checkEquals(refEntries, defrep); + } + + @Test + public void testMaxKeyVals() { + final int size = 128; + final IN parent = new TestIN(size); + MaxKeySize defrep = new MaxKeySize(size, compactMaxKeyLength); + byte[][] refEntries = initRep(parent, defrep); + checkEquals(refEntries, defrep); + } + + @Test + public void testMaxKeyMutation() { + final int size = 32; + final IN parent = new TestIN(size); + INKeyRep defrep = new MaxKeySize(size, compactMaxKeyLength); + initRep(parent, defrep); + + /* No mutation on null. */ + defrep = defrep.set(0, null, parent); + assertEquals(Type.MAX_KEY_SIZE, defrep.getType()); + + /* No mutation on change */ + defrep = defrep.set(0, new byte[0], parent); + assertEquals(Type.MAX_KEY_SIZE, defrep.getType()); + + /* Mutate on large key. */ + defrep = defrep.set(0, new byte[compactMaxKeyLength+1], parent); + assertEquals(Type.DEFAULT, defrep.getType()); + } + + @Test + public void testRampUp() { + + /* The sizes here only work for CompressedOops */ + Assume.assumeTrue(!JVMSystemUtils.ZING_JVM); + + final int size = 128; + final IN parent = new TestIN(size); + byte refEntries[][] = new byte[size][]; + INKeyRep defrep = new Default(size); + + for (int i=0; i < defrep.length(); i++) { + + int keyLength = Math.max(4, i % compactMaxKeyLength); + + ByteBuffer byteBuffer = ByteBuffer.allocate(keyLength); + byteBuffer.putInt(i); + + defrep.set(i, byteBuffer.array(), parent); + + refEntries[i] = byteBuffer.array(); + + checkEquals(refEntries, defrep); + + defrep = defrep.compact(parent); + + checkEquals(refEntries, defrep); + } + + if (compactMaxKeyLength < 20) { + /* Should have transitioned as a result of the compaction. */ + assertEquals(Type.MAX_KEY_SIZE, defrep.getType()); + } else { + /* + * With compactMaxKeyLength == 202, the java mem overhead of + * storing each key as a separate byte array is smaller than + * using 202 bytes for all 128 keys. + */ + assertEquals(Type.DEFAULT, defrep.getType()); + } + } + + @Test + public void testShiftEntries() { + int size = 128; + final IN parent = new TestIN(size); + commonShiftEntries(parent, new Default(size)); + commonShiftEntries(parent, new MaxKeySize(size, (short)8)); + } + + public void commonShiftEntries(IN parent, INKeyRep entries) { + int size = entries.length(); + byte refEntries[][] = new byte[size][]; + + Random rand = new Random(); + + for (int i = 0; i < 10000; i++) { + int slot = rand.nextInt(size); + byte[] n = (i % 10) == 0 ? null : createByteVal(slot, 8); + refEntries[slot] = n; + entries = entries.set(slot, n, parent); + checkEquals(refEntries, entries); + + /* Simulate an insertion */ + entries = entries.copy(slot, slot + 1, size - (slot + 1), parent); + System.arraycopy(refEntries, slot, refEntries, slot + 1, + size - (slot + 1)); + checkEquals(refEntries, entries); + + /* Simulate a deletion. */ + entries = entries.copy(slot + 1, slot, size - (slot + 1), parent); + entries = entries.set(size-1, null, parent); + System.arraycopy(refEntries, slot + 1, refEntries, + slot, size - (slot + 1)); + refEntries[size-1] = null; + checkEquals(refEntries, entries); + } + } + + @Test + public void testKeySizeChange_IN_updateEntry() { + + commonKeySizeChange( + new ChangeKey() { + public void changeKey(final BIN bin, + final int index, + byte[] newKey) { + bin.insertRecord( + index, (LN)bin.getTarget(index), bin.getLsn(index), + bin.getLastLoggedSize(index), newKey, null, + 0 /*expiration*/, false /*expirationInHours*/); + } + }); + } + + @Test + public void testKeySizeChange_IN_updateNode1() { + + commonKeySizeChange( + new ChangeKey() { + public void changeKey(final BIN bin, + final int index, + byte[] newKey) { + long lnMemSize = 0; + if (!bin.isEmbeddedLN(index)) { + LN ln = bin.fetchLN(index, CacheMode.UNCHANGED); + lnMemSize = ln.getMemorySizeIncludedByParent(); + assertEquals(Type.MAX_KEY_SIZE, bin.getKeyVals().getType()); + } else { + assertEquals(Type.DEFAULT, bin.getKeyVals().getType()); + } + + bin.updateRecord( + index, lnMemSize, bin.getLsn(index), + VLSN.NULL_VLSN_SEQUENCE, + bin.getLastLoggedSize(index), newKey, null, + 0 /*expiration*/, false /*expirationInHours*/); + } + }); + } + + @Test + public void testKeySizeChange_IN_updateNode2() { + + commonKeySizeChange( + new ChangeKey() { + public void changeKey(final BIN bin, + final int index, + byte[] newKey) { + + if (!bin.isEmbeddedLN(index)) { + LN ln = bin.fetchLN(index, CacheMode.UNCHANGED); + bin.detachNode(index, false, -1); + assertEquals(Type.MAX_KEY_SIZE, bin.getKeyVals().getType()); + bin.attachNode(index, ln, newKey); + } else { + bin.convertKey(index, newKey); + } + } + }); + } + + @Test + public void testKeySizeChange_IN_updateNode3() { + + commonKeySizeChange( + new ChangeKey() { + public void changeKey(final BIN bin, + final int index, + byte[] newKey) { + LN ln = null; + if (!bin.isEmbeddedLN(index)) { + ln = bin.fetchLN(index, CacheMode.UNCHANGED); + assertEquals(Type.MAX_KEY_SIZE, bin.getKeyVals().getType()); + } + + bin.insertRecord( + index, ln, bin.getLsn(index), + bin.getLastLoggedSize(index), newKey, null, + 0 /*expiration*/, false /*expirationInHours*/); + } + }); + } + + interface ChangeKey { + void changeKey(final BIN bin, final int index, byte[] newKey); + } + + /** + * Force key size changes using internal IN methods, to ensure that memory + * budgeting is correct. [#19295] + * + * Although the key size for an existing IN slot normally doesn't change, + * it can change when a "partial key comparator" is used, which compares + * a subset of the complete key. In addition, conversion to the new dup + * format requires changing keys arbitrarily. + */ + private void commonKeySizeChange(final ChangeKey changeKey) { + + final int keySize = compactMaxKeyLength / 2; + + for (int testCase = 0; testCase < 1; testCase += 1) { + + final Database db = createDb(DB_NAME, keySize, nodeMaxEntries); + final DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + EnvironmentImpl env = dbImpl.getEnv(); + + boolean embeddedLNs = (env.getMaxEmbeddedLN() >= keySize); + + /* + * A non-null custom comparator is necessary to test the IN.setNode + * family of methods, which update the key only if a partial + * comparator may be configured. + */ + dbImpl.setBtreeComparator(new StandardComparator(), true); + + /* Mutate BIN to MAX_KEY_SIZE rep using eviction. */ + if (embeddedLNs) { + verifyAcrossINEvict(db, Type.DEFAULT, Type.DEFAULT); + } else { + verifyAcrossINEvict(db, Type.DEFAULT, Type.MAX_KEY_SIZE); + } + + /* Manufacture new key with one extra byte. */ + final BIN bin = (BIN)(dbImpl.getTree().getFirstNode(cacheMode)); + final int index = nodeMaxEntries / 2; + final byte[] oldKey = bin.getKey(index); + final byte[] newKey = new byte[oldKey.length + 1]; + System.arraycopy(oldKey, 0, newKey, 0, oldKey.length); + + /* + * Test changing size of BIN slot key using various IN methods. + * The rep should mutate to DEFAULT because the key size increased. + */ + changeKey.changeKey(bin, index, newKey); + assertEquals(Type.DEFAULT, bin.getKeyVals().getType()); + bin.releaseLatch(); + + /* Prior to the fix for [#19295], the memory check failed. */ + verifyINMemorySize(dbImpl); + + db.close(); + } + } + + @Test + public void testCompareKeys() { + final int size = 32; + final IN parent = new TestIN(size); + + for (final boolean embeddedData : new boolean[] {false, true}) { + + for (int cmp = 0; cmp < 2; cmp += 1) { + + final Comparator comparator = + (cmp == 0) ? null : new StandardComparator(); + + for (final INKeyRep targets : + new INKeyRep[] {new Default(size), + new MaxKeySize(size, (short) 8)}) { + + compareKeysWithoutPrefixes( + parent, targets, embeddedData, comparator); + + compareKeysWithPrefixes( + parent, targets, embeddedData, comparator); + + } + } + } + } + + public void compareKeysWithoutPrefixes(IN parent, + INKeyRep targets, + boolean embeddedData, + Comparator comparator) { + + final byte[] data = embeddedData ? new byte[] {1, 2} : null; + + final byte[] k12 = new byte[]{1, 2}; + final byte[] k123 = new byte[]{1, 2, 3}; + final byte[] k34 = new byte[]{3, 4}; + final byte[] k345 = new byte[]{3, 4, 5}; + + final int i123 = 0; + final int i345 = 1; + + targets = targets.set(i123, k123, data, parent); + targets = targets.set(i345, k345, data, parent); + + int cmp; + + cmp = targets.compareKeys(k12, null, i123, embeddedData, comparator); + assertTrue(cmp < 0); + cmp = targets.compareKeys(k12, null, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k123, null, i123, embeddedData, comparator); + assertTrue(cmp == 0); + cmp = targets.compareKeys(k123, null, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k34, null, i123, embeddedData, comparator); + assertTrue(cmp > 0); + cmp = targets.compareKeys(k34, null, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k345, null, i123, embeddedData, comparator); + assertTrue(cmp > 0); + cmp = targets.compareKeys(k345, null, i345, embeddedData, comparator); + assertTrue(cmp == 0); + } + + public void compareKeysWithPrefixes(IN parent, + INKeyRep targets, + boolean embeddedData, + Comparator comparator) { + + final byte[] data = embeddedData ? new byte[] {1, 2} : null; + + final byte[] k0 = new byte[]{0}; + final byte[] k00 = new byte[]{0, 0}; + final byte[] k0012 = new byte[]{0, 0, 1, 2}; + final byte[] k00123 = new byte[]{0, 0, 1, 2, 3}; + final byte[] k0034 = new byte[]{0, 0, 3, 4}; + final byte[] k00345 = new byte[]{0, 0, 3, 4, 5}; + + final byte[] k123 = new byte[]{1, 2, 3}; + final byte[] k345 = new byte[]{3, 4, 5}; + + final int i123 = 0; + final int i345 = 1; + + targets = targets.set(i123, k123, data, parent); + targets = targets.set(i345, k345, data, parent); + + int cmp; + + cmp = targets.compareKeys(k0012, k00, i123, embeddedData, comparator); + assertTrue(cmp < 0); + cmp = targets.compareKeys(k0012, k00, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k00123, k00, i123, embeddedData, comparator); + assertTrue(cmp == 0); + cmp = targets.compareKeys(k00123, k00, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k0034, k00, i123, embeddedData, comparator); + assertTrue(cmp > 0); + cmp = targets.compareKeys(k0034, k00, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k00345, k00, i123, embeddedData, comparator); + assertTrue(cmp > 0); + cmp = targets.compareKeys(k00345, k00, i345, embeddedData, comparator); + assertTrue(cmp == 0); + + /* + * Special case where search key is smaller than full key in the slot, + * and the search key is contained in the prefix. This failed from JE + * 6.2.41 to 6.4.11 [#24583]. + */ + cmp = targets.compareKeys(k0, k00, i123, embeddedData, comparator); + assertTrue(cmp < 0); + cmp = targets.compareKeys(k0, k00, i345, embeddedData, comparator); + assertTrue(cmp < 0); + + cmp = targets.compareKeys(k00, k00, i123, embeddedData, comparator); + assertTrue(cmp < 0); + cmp = targets.compareKeys(k00, k00, i345, embeddedData, comparator); + assertTrue(cmp < 0); + } + + private class StandardComparator + implements Comparator, PartialComparator, Serializable { + + public int compare(final byte[] k1, final byte[] k2) { + return Key.compareKeys(k1, k2, null); + } + } + + private void checkEquals(byte[][] refEntries, INKeyRep entries) { + for (int i=0; i < refEntries.length; i++) { + Arrays.equals(refEntries[i], entries.get(i)); + } + } + + private byte[][] initRep(IN parent, INKeyRep rep) { + int size = rep.length(); + byte[][] refEntries = new byte[size][]; + for (int i = 0; i < rep.length(); i++) { + int keyLength = Math.max(4, i % compactMaxKeyLength); + ByteBuffer byteBuffer = ByteBuffer.allocate(keyLength); + byteBuffer.putInt(i); + INKeyRep nrep = rep.set(i, byteBuffer.array(), parent); + assertTrue(rep == nrep); + refEntries[i] = byteBuffer.array(); + checkEquals(refEntries, rep); + } + return refEntries; + } +} diff --git a/test/com/sleepycat/je/tree/INTargetRepTest.java b/test/com/sleepycat/je/tree/INTargetRepTest.java new file mode 100644 index 0000000..10ac1f1 --- /dev/null +++ b/test/com/sleepycat/je/tree/INTargetRepTest.java @@ -0,0 +1,302 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static com.sleepycat.je.tree.INTargetRep.NONE; +import static org.junit.Assert.assertEquals; + +import java.nio.ByteBuffer; +import java.util.Random; + +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.Loggable; +import com.sleepycat.je.tree.INTargetRep.Default; +import com.sleepycat.je.tree.INTargetRep.Sparse; +import com.sleepycat.je.tree.INTargetRep.Type; + +public class INTargetRepTest extends INEntryTestBase { + + final int size = 32; + final IN parent = new TestIN(size); + + /** + * Test use of the representations at the IN level. Checks memory + * bookkeeping after each operation. + */ + @Test + public void testINs() { + + int keySize = 8; // same size used for data as well + + Database db = createDb(DB_NAME, keySize, nodeMaxEntries); + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + EnvironmentImpl env = dbImpl.getEnv(); + + boolean embeddedLNs = (env.getMaxEmbeddedLN() >= keySize); + + BIN firstBin; + + if (embeddedLNs) { + firstBin = verifyAcrossINEvict(db, Type.NONE, Type.NONE); + } else { + firstBin = verifyAcrossINEvict(db, Type.DEFAULT, Type.NONE); + } + + /* Mutate to sparse. */ + DatabaseEntry key = new DatabaseEntry(); + key.setData(createByteVal(0, 8)); + DatabaseEntry data = new DatabaseEntry(); + db.get(null, key, data, LockMode.DEFAULT); + + if (embeddedLNs) { + assertEquals(Type.NONE, firstBin.getTargets().getType()); + } else { + assertEquals(Type.SPARSE, firstBin.getTargets().getType()); + } + + for (int i = 0; i < nodeMaxEntries; i++) { + key.setData(createByteVal(i, keySize)); + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + assertEquals(OperationStatus.SUCCESS, status); + verifyINMemorySize(dbImpl); + } + + if (embeddedLNs) { + assertEquals(Type.NONE, firstBin.getTargets().getType()); + } else { + assertEquals(Type.DEFAULT, firstBin.getTargets().getType()); + } + + db.close(); + } + + private BIN verifyAcrossINEvict(Database db, + Type pre, + Type post) { + + DatabaseImpl dbImpl = DbInternal.getDbImpl(db); + + BIN firstBin = (BIN)(dbImpl.getTree().getFirstNode(cacheMode)); + + assertEquals(pre, firstBin.getTargets().getType()); + + firstBin.evictLNs(); + firstBin.releaseLatch(); + assertEquals(post, firstBin.getTargets().getType()); + + verifyINMemorySize(dbImpl); + return firstBin; + } + + @Test + public void testBasic() { + commonTest(new Default(size)); + commonTest(new Sparse(size)); + } + + public void commonTest(INArrayRep targets) { + targets = targets.set(1,new TestNode(1), parent); + assertEquals(1, ((TestNode) targets.get(1)).id); + + targets.copy(0, 5, 1, parent); + assertEquals(1, ((TestNode) targets.get(1)).id); + + targets.copy(0, 5, 2, parent); + assertEquals(1, ((TestNode) targets.get(6)).id); + + targets.set(1, null, parent); + + assertEquals(null, targets.get(1)); + + targets.copy(5, 0, 2, parent); + assertEquals(1, ((TestNode) targets.get(1)).id); + } + + @Test + public void testCompact() { + Default te = new Default(size); + INArrayRep rep = te.compact(parent); + assertEquals(Type.NONE, rep.getType()); + + te = new Default(size); + for (int i=0; i < Sparse.MAX_ENTRIES; i++) { + te.set(i, new TestNode(i), parent); + } + assertEquals(Type.DEFAULT, te.getType()); + rep = te.compact(parent); + assertEquals(Type.SPARSE, rep.getType()); + + te = new Default(size); + for (int i=0; i <= Sparse.MAX_ENTRIES; i++) { + te.set(i, new TestNode(i), parent); + } + + /* Above the threshold. */ + assertEquals(Type.DEFAULT, te.getType()); + rep = te.compact(parent); + assertEquals(Type.DEFAULT, rep.getType()); + } + + @Test + public void testRampUpDown() { + INArrayRep entries = NONE; + Node refEntries[] = new TestNode[size]; + + /* Ramp up */ + for (int i=0; i < size; i++) { + TestNode n = new TestNode(i); + entries = entries.set(i, n, parent); + if ((i+1) <= Sparse.MAX_ENTRIES) { + assertEquals(Type.SPARSE, entries.getType()); + } else { + assertEquals(Type.DEFAULT, entries.getType()); + } + refEntries[i] = n; + checkEquals(refEntries, entries); + } + + /* Ramp down with compact. */ + for (int i=0; i < size; i++) { + entries = entries.set(i, null, parent); + entries = entries.compact(parent); + if ((size - (i+1)) <= Sparse.MAX_ENTRIES) { + if ((size - (i+1)) == 0) { + assertEquals(Type.NONE, entries.getType()); + } else { + assertEquals(Type.SPARSE, entries.getType()); + } + } else { + assertEquals(Type.DEFAULT, entries.getType()); + } + refEntries[i] = null; + checkEquals(refEntries, entries); + } + } + + @Test + public void testRandomEntries() { + INArrayRep entries = NONE; + Node refEntries[] = new TestNode[size]; + Random rand = new Random(); + for (int repeat = 1; repeat < 100; repeat++) { + for (int i=0; i < 10*size; i++) { + int slot = rand.nextInt(size); + Node n = (i % 5) == 0 ? null : new TestNode(slot); + refEntries[slot] = n; + entries = entries.set(slot, n, parent); + checkEquals(refEntries, entries); + entries = entries.compact(parent); + checkEquals(refEntries, entries); + } + } + } + + @Test + public void testShiftEntries() { + INArrayRep entries = NONE; + Node refEntries[] = new TestNode[size]; + + Random rand = new Random(); + + for (int i = 0; i < 10000; i++) { + int slot = rand.nextInt(size); + Node n = (i % 10) == 0 ? null : new TestNode(slot); + refEntries[slot] = n; + entries = entries.set(slot, n, parent); + checkEquals(refEntries, entries); + + /* Simulate an insertion */ + entries = entries.copy(slot, slot + 1, size - (slot + 1), parent); + System.arraycopy(refEntries, slot, refEntries, slot + 1, + size - (slot + 1)); + checkEquals(refEntries, entries); + + /* Simulate a deletion. */ + entries = entries.copy(slot + 1, slot, size - (slot + 1), parent); + entries = entries.set(size-1, null, parent); + System.arraycopy(refEntries, slot + 1, refEntries, + slot, size - (slot + 1)); + refEntries[size - 1] = null; + checkEquals(refEntries, entries); + } + } + + private void checkEquals(Node[] refEntries, + INArrayRep entries) { + for (int i=0; i < refEntries.length; i++) { + assertEquals(refEntries[i], entries.get(i)); + } + } + + /* Dummy test node. */ + @SuppressWarnings("unused") + class TestNode extends Node { + final int id; + + public TestNode(int id) { + this.id = id; + } + + @Override + public LogEntryType getGenericLogType() { + return null; + } + + @Override + public void incFetchStats(EnvironmentImpl envImpl, boolean isMiss) { + } + + @Override + boolean isValidForDelete() throws DatabaseException { + return false; + } + + @Override + void rebuildINList(INList inList) throws DatabaseException { + } + + @Override + public int getLogSize() { + return 0; + } + + @Override + public void writeToLog(ByteBuffer logBuffer) { + } + + @Override + public void readFromLog(ByteBuffer itemBuffer, int entryVersion) { + } + + @Override + public void dumpLog(StringBuilder sb, boolean verbose) { + } + + @Override + public boolean logicalEquals(Loggable other) { + return false; + } + } +} diff --git a/test/com/sleepycat/je/tree/INTest.java b/test/com/sleepycat/je/tree/INTest.java new file mode 100644 index 0000000..beec3be --- /dev/null +++ b/test/com/sleepycat/je/tree/INTest.java @@ -0,0 +1,431 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Random; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentFailureReason; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class INTest extends TestBase { + static private final int N_BYTES_IN_KEY = 3; + private int initialINCapacity; + private DatabaseImpl db = null; + static private long FAKE_LSN = DbLsn.makeLsn(0, 0); + private Environment noLogEnv; + private final File envHome; + + public INTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + noLogEnv = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(noLogEnv); + initialINCapacity = envImpl.getConfigManager(). + getInt(EnvironmentParams.NODE_MAX); + db = new DatabaseImpl(null, + "foo", new DatabaseId(11), envImpl, + new DatabaseConfig()); + } + + @After + public void tearDown() { + + db.releaseTreeAdminMemory(); + noLogEnv.close(); + } + + @Test + public void testFindEntry() + throws DatabaseException { + + IN in = new IN(db, new byte[0], initialINCapacity, 7); + in.latch(); + + byte[] zeroBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + zeroBytes[i] = 0x00; + } + + byte[] maxBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + + /* + * Use FF since that sets the sign bit negative on a byte. This + * checks the Key.compareTo routine for proper unsigned + * comparisons. + */ + maxBytes[i] = (byte) 0xFF; + } + + assertTrue(in.findEntry(zeroBytes, false, false) == -1); + assertTrue(in.findEntry(maxBytes, false, false) == -1); + assertTrue(in.findEntry(zeroBytes, false, true) == -1); + assertTrue(in.findEntry(maxBytes, false, true) == -1); + assertTrue(in.findEntry(zeroBytes, true, false) == -1); + assertTrue(in.findEntry(maxBytes, true, false) == -1); + assertTrue(in.findEntry(zeroBytes, true, true) == -1); + assertTrue(in.findEntry(maxBytes, true, true) == -1); + for (int i = 0; i < initialINCapacity; i++) { + + /* + * Insert a key and check that we get the same index in return from + * the binary search. Check the next highest and next lowest keys + * also. + */ + byte[] keyBytes = new byte[N_BYTES_IN_KEY]; + byte[] nextKeyBytes = new byte[N_BYTES_IN_KEY]; + byte[] prevKeyBytes = new byte[N_BYTES_IN_KEY]; + nextKeyBytes[0] = prevKeyBytes[0] = keyBytes[0] = 0x01; + nextKeyBytes[1] = prevKeyBytes[1] = keyBytes[1] = (byte) i; + nextKeyBytes[2] = prevKeyBytes[2] = keyBytes[2] = 0x10; + nextKeyBytes[2]++; + prevKeyBytes[2]--; + + int insertionFlags = in.insertEntry1( + null, keyBytes, null, FAKE_LSN, false); + + assertTrue((insertionFlags & IN.INSERT_SUCCESS) != 0); + assertEquals(i, insertionFlags & ~IN.INSERT_SUCCESS); + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == i); + assertTrue(in.findEntry(zeroBytes, false, true) == -1); + assertTrue(in.findEntry(maxBytes, false, true) == -1); + assertTrue(in.findEntry(zeroBytes, true, false) == -1); + assertTrue(in.findEntry(maxBytes, true, false) == i); + assertTrue(in.findEntry(zeroBytes, true, true) == -1); + assertTrue(in.findEntry(maxBytes, true, true) == -1); + for (int j = 1; j < in.getNEntries(); j++) { // 0th key is virtual + assertTrue(in.findEntry(in.getKey(j), false, false) + == j); + assertTrue(in.findEntry(in.getKey(j), false, true) + == j); + assertTrue(in.findEntry(in.getKey(j), true, false) == + (j | IN.EXACT_MATCH)); + assertTrue(in.findEntry(in.getKey(j), true, true) == + (j | IN.EXACT_MATCH)); + assertTrue(in.findEntry(nextKeyBytes, false, false) == i); + assertTrue(in.findEntry(prevKeyBytes, false, false) == i - 1); + assertTrue(in.findEntry(nextKeyBytes, false, true) == -1); + assertTrue(in.findEntry(prevKeyBytes, false, true) == -1); + } + } + in.releaseLatch(); + } + + @Test + public void testInsertEntry() + throws DatabaseException { + + for (int i = 0; i < 10; i++) { // cwl: consider upping this + doInsertEntry(false); + doInsertEntry(true); + } + } + + private void doInsertEntry(boolean withMinMax) + throws DatabaseException { + + IN in = new IN(db, new byte[0], initialINCapacity, 7); + in.latch(); + + byte[] zeroBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + zeroBytes[i] = 0x00; + } + + byte[] maxBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + maxBytes[i] = (byte) 0xFF; + } + + if (withMinMax) { + try { + in.insertEntry(null, zeroBytes, FAKE_LSN); + in.insertEntry(null, maxBytes, FAKE_LSN); + } catch (Exception e) { + fail("caught " + e); + } + + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == 1); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, false, true) == 0); + assertTrue(in.findEntry(maxBytes, false, true) == 1); + + assertTrue(in.findEntry(zeroBytes, true, false) == IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, false) == + (1 | IN.EXACT_MATCH)); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, true, true) == IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, true) == + (1 | IN.EXACT_MATCH)); + } + + Random rnd = new Random(); + + try { + for (int i = 0; + i < initialINCapacity - (withMinMax ? 2 : 0); + i++) { + + /* + * Insert a key and check that we get the same index in return + * from the binary search. Check the next highest and next + * lowest keys also. + */ + byte[] keyBytes = new byte[N_BYTES_IN_KEY]; + + /* + * There's a small chance that we may generate the same + * sequence of bytes that are already present. + */ + while (true) { + rnd.nextBytes(keyBytes); + int index = in.findEntry(keyBytes, true, false); + if ((index & IN.EXACT_MATCH) != 0 && + index >= 0) { + continue; + } + break; + } + + in.insertEntry(null, keyBytes, FAKE_LSN); + + if (withMinMax) { + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == + in.getNEntries() - 1); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, false, true) == 0); + assertTrue(in.findEntry(maxBytes, false, true) == + in.getNEntries() - 1); + + assertTrue(in.findEntry(zeroBytes, true, false) == + IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, false) == + ((in.getNEntries() - 1) | IN.EXACT_MATCH)); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, true, true) == + IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, true) == + ((in.getNEntries() - 1) | IN.EXACT_MATCH)); + } else { + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == + in.getNEntries() - 1); + assertTrue(in.findEntry(zeroBytes, false, true) == -1); + assertTrue(in.findEntry(maxBytes, false, true) == -1); + + assertTrue(in.findEntry(zeroBytes, true, false) == -1); + assertTrue(in.findEntry(maxBytes, true, false) == + in.getNEntries() - 1); + } + + for (int j = 1; j < in.getNEntries(); j++) { + assertTrue(in.findEntry(in.getKey(j), false, false) == j); + assertTrue(in.findEntry(in.getKey(j), false, true) == j); + + assertTrue(in.findEntry(in.getKey(j), false, true) == j); + assertTrue(in.findEntry(in.getKey(j), true, false) == + (j | IN.EXACT_MATCH)); + } + } + } catch (Exception e) { + fail("caught " + e); + } + + /* Should be full so insertEntry should return false. */ + byte[] keyBytes = new byte[N_BYTES_IN_KEY]; + rnd.nextBytes(keyBytes); + + try { + in.insertEntry(null, keyBytes, FAKE_LSN); + fail("should have caught UNEXPECTED_STATE, but didn't"); + } catch (EnvironmentFailureException e) { + assertSame(EnvironmentFailureReason. + UNEXPECTED_STATE_FATAL, e.getReason()); + } + in.releaseLatch(); + } + + @Test + public void testDeleteEntry() + throws DatabaseException { + + for (int i = 0; i < 10; i++) { // cwl: consider upping this + doDeleteEntry(true); + doDeleteEntry(false); + } + } + + private void doDeleteEntry(boolean withMinMax) + throws DatabaseException { + + IN in = new IN(db, new byte[0], initialINCapacity, 7); + in.latch(); + + byte[] zeroBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + zeroBytes[i] = 0x00; + } + + byte[] maxBytes = new byte[N_BYTES_IN_KEY]; + for (int i = 0; i < N_BYTES_IN_KEY; i++) { + maxBytes[i] = (byte) 0xFF; + } + + if (withMinMax) { + try { + in.insertEntry(null, zeroBytes, FAKE_LSN); + in.insertEntry(null, maxBytes, FAKE_LSN); + } catch (Exception e) { + fail("caught " + e); + } + + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == 1); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, false, true) == 0); + assertTrue(in.findEntry(maxBytes, false, true) == 1); + + assertTrue(in.findEntry(zeroBytes, true, false) == IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, false) == + (1 | IN.EXACT_MATCH)); + /* Shadowed by the virtual 0'th key. */ + assertTrue(in.findEntry(zeroBytes, true, true) == IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, true) == + (1 | IN.EXACT_MATCH)); + } + + Random rnd = new Random(); + + try { + /* Fill up the IN with random entries. */ + for (int i = 0; + i < initialINCapacity - (withMinMax ? 2 : 0); + i++) { + + /* + * Insert a key and check that we get the same index in return + * from the binary search. Check the next highest and next + * lowest keys also. + */ + byte[] keyBytes = new byte[N_BYTES_IN_KEY]; + + /* + * There's a small chance that we may generate the same + * sequence of bytes that are already present. + */ + while (true) { + rnd.nextBytes(keyBytes); + int index = in.findEntry(keyBytes, true, false); + if ((index & IN.EXACT_MATCH) != 0 && + index >= 0) { + continue; + } + break; + } + + in.insertEntry(null, keyBytes, FAKE_LSN); + } + + if (withMinMax) { + assertTrue(in.findEntry(zeroBytes, false, false) == 0); + assertTrue(in.findEntry(maxBytes, false, false) == + in.getNEntries() - 1); + /* + * zeroBytes is in the 0th entry, but that's the virtual key so + * it's not an exact match. + */ + assertTrue(in.findEntry(zeroBytes, false, true) == 0); + assertTrue(in.findEntry(maxBytes, false, true) == + in.getNEntries() - 1); + + assertTrue(in.findEntry(zeroBytes, false, true) == 0); + assertTrue(in.findEntry(maxBytes, false, true) == + in.getNEntries() - 1); + assertTrue(in.findEntry(zeroBytes, true, false) == + IN.EXACT_MATCH); + assertTrue(in.findEntry(maxBytes, true, false) == + ((in.getNEntries() - 1) | IN.EXACT_MATCH)); + } + + while (in.getNEntries() > 1) { + int i = rnd.nextInt(in.getNEntries() - 1) + 1; + assertTrue(deleteEntry(in, in.getKey(i))); + } + + /* + * We should only be able to delete the zero Key if it was inserted + * in the first place. + */ + assertEquals(withMinMax, deleteEntry(in, zeroBytes)); + } catch (Exception e) { + e.printStackTrace(); + fail("caught " + e); + } + in.releaseLatch(); + } + + private boolean deleteEntry(IN in, byte[] key) + throws DatabaseException { + + assert(!in.isBINDelta()); + + if (in.getNEntries() == 0) { + return false; + } + + int index = in.findEntry(key, false, true); + if (index < 0) { + return false; + } + + /* We cannot validate because the FAKE_LSN is present. */ + in.deleteEntry(index, true /*makeDirty*/, false /*validate*/); + return true; + } +} diff --git a/test/com/sleepycat/je/tree/KeyPrefixTest.java b/test/com/sleepycat/je/tree/KeyPrefixTest.java new file mode 100644 index 0000000..3826ae9 --- /dev/null +++ b/test/com/sleepycat/je/tree/KeyPrefixTest.java @@ -0,0 +1,601 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.Serializable; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Random; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.utilint.StringUtils; + +public class KeyPrefixTest extends DualTestCase { + + private final File envHome; + private Environment env; + private Database db; + + public KeyPrefixTest() { + System.setProperty("longAckTimeout", "true"); + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + db = null; + env = null; + } + + private void initEnv(int nodeMax) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + if (nodeMax > 0) { + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + Integer.toString(nodeMax)); + } + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setTxnNoSync(true); + env = create(envHome, envConfig); + + String databaseName = "testDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setKeyPrefixing(true); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, databaseName, dbConfig); + } + + private void closeEnv() { + try { + db.close(); + close(env); + db = null; + env = null; + } catch (DatabaseException e) { + e.printStackTrace(); + throw e; + } + } + + private static final String[] keys = { + "aaa", "aab", "aac", "aae", // BIN1 + "aaf", "aag", "aah", "aaj", // BIN2 + "aak", "aala", "aalb", "aam", // BIN3 + "aan", "aao", "aap", "aas", // BIN4 + "aat", "aau", "aav", "aaz", // BIN5 + "baa", "bab", "bac", "bam", // BIN6 + "ban", "bax", "bay", "baz", // BIN7 + "caa", "cab", "cay", "caz", // BIN8 + "daa", "eaa", "faa", "fzz", // BIN10 + "Aaza", "Aazb", "aal", "aama" + }; + + @Test + public void testPrefixBasic() + throws Exception { + + initEnv(5); + Key.DUMP_TYPE = DumpType.TEXT; + try { + + /* Build up a tree. */ + for (String key : keys) { + assertEquals(OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry + (StringUtils.toUTF8(key)), + new DatabaseEntry(new byte[] { 1 }))); + } + + String[] sortedKeys = new String[keys.length]; + System.arraycopy(keys, 0, sortedKeys, 0, keys.length); + Arrays.sort(sortedKeys); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = null; + int i = 0; + try { + cursor = db.openCursor(txn, CursorConfig.READ_UNCOMMITTED); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + boolean somePrefixSeen = false; + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + assertEquals(StringUtils.fromUTF8(key.getData()), + sortedKeys[i++]); + byte[] prefix = + DbInternal.getCursorImpl(cursor).getBIN(). + getKeyPrefix(); + if (prefix != null) { + somePrefixSeen = true; + } + } + assertTrue(somePrefixSeen); + } finally { + if (cursor != null) { + cursor.close(); + } + txn.commit(); + } + + if (false) { + System.out.println(""); + DbInternal.getDbImpl(db).getTree().dump(); + } + + closeEnv(); + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + @Test + public void testPrefixManyRandom() + throws Exception { + + doTestPrefixMany(true); + } + + @Test + public void testPrefixManySequential() + throws Exception { + + doTestPrefixMany(false); + } + + private void doTestPrefixMany(boolean random) + throws Exception { + + initEnv(0); + final int N_EXTRA_ENTRIES = 1000; + Key.DUMP_TYPE = DumpType.BINARY; + try { + + /* 2008-02-28 11:06:50.009 */ + long start = 1204214810009L; + + /* 2 years after start. Prefixes will be 3 and 4 bytes long. */ + long end = start + (2L * 365L * 24L * 60L * 60L * 1000L); + + /* This will yield 94,608 entries. */ + long inc = 1000000L; + int nEntries = insertTimestamps(start, end, inc, random); + + /* + * This will force some splits on the left side of the tree which + * will force recalculating the suffix on the leg after the initial + * prefix/suffix calculation. + */ + insertExtraTimestamps(0, N_EXTRA_ENTRIES); + + /* Do the same on the right side of the tree. */ + insertExtraTimestamps(end, N_EXTRA_ENTRIES); + assertEquals((nEntries + 2 * N_EXTRA_ENTRIES), db.count()); + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = null; + try { + cursor = db.openCursor(txn, CursorConfig.READ_UNCOMMITTED); + + verifyEntries(0, N_EXTRA_ENTRIES, cursor, 1); + verifyEntries(start, nEntries, cursor, inc); + verifyEntries(end, N_EXTRA_ENTRIES, cursor, 1); + + deleteEntries(txn, start, nEntries); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + cursor.close(); + cursor = db.openCursor(txn, CursorConfig.READ_UNCOMMITTED); + verifyEntries(0, N_EXTRA_ENTRIES, cursor, 1); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + assertEquals(end, LongBinding.entryToLong(key)); + } finally { + if (cursor != null) { + cursor.close(); + } + txn.commit(); + } + + if (false) { + System.out.println(""); + DbInternal.getDbImpl(db).getTree().dump(); + } + + closeEnv(); + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + private int insertTimestamps(long start, + long end, + long inc, + boolean random) + throws DatabaseException { + + int nEntries = (int) ((end - start) / inc); + List keyList = new ArrayList(nEntries); + long[] keys = null; + if (random) { + for (long i = start; i < end; i += inc) { + keyList.add(i); + } + keys = new long[keyList.size()]; + Random rnd = new Random(10); // fixed seed + int nextKeyIdx = 0; + while (keyList.size() > 0) { + int idx = rnd.nextInt(keyList.size()); + keys[nextKeyIdx++] = keyList.get(idx); + keyList.remove(idx); + } + } + + /* Build up a tree. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + data.setData(new byte[1]); + int j = 0; + for (long i = start; i < end; i += inc) { + if (random) { + LongBinding.longToEntry(keys[j], key); + } else { + LongBinding.longToEntry(i, key); + } + j++; + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + } + return j; + } + + private void insertExtraTimestamps(long start, int nExtraEntries) + throws DatabaseException { + + /* Add (more than one node's worth) to the left side of the tree.*/ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[] { 0 }); + long next = start; + for (int i = 0; i < nExtraEntries; i++) { + LongBinding.longToEntry(next, key); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data)); + next++; + } + } + + private void deleteEntries(Transaction txn, long start, int nEntries) + throws DatabaseException { + + /* + * READ_UNCOMMITTED is used here as a trick to reduce the amount of + * memory taken by locks. Because we don't lock the record before + * deleting it, we don't lock the LSN, which reduces the number of + * locks by half. + */ + Cursor cursor = db.openCursor(txn, null); + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + LongBinding.longToEntry(start, key); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(key, data, + LockMode.READ_UNCOMMITTED)); + for (int i = 0; i < nEntries; i++) { + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, + LockMode.READ_UNCOMMITTED)); + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + private void verifyEntries(long start, + int nEntries, + Cursor cursor, + long inc) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + long check = start; + for (int i = 0; i < nEntries; i++) { + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + long keyInfo = LongBinding.entryToLong(key); + assertTrue(keyInfo == check); + check += inc; + } + } + + /** + * Tests use of an RLE based comparator function + */ + @Test + public void testRLEComparator() { + initEnv(0); + db.close(); + db = env.openDatabase + (null, "testKeyComparator", + configDatabaseWithComparator(new RLEKeyComparator())); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + final int keyCount = 1000; + + /* seed the database. */ + Random rand = new Random(0); + + for (int i=0; i < keyCount; i++) { + String str = Long.toString(rand.nextLong()); + final byte[] keyBytes = strToRLEbytes(str); + + key.setData(keyBytes); + data.setData(key.getData()); + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + status = db.get(null, key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + } + + /* Ensure bin prefixes are computed. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = null; + try { + cursor = db.openCursor(txn, CursorConfig.READ_UNCOMMITTED); + BIN pbin = null; + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + if (bin != pbin) { + bin.latch(); + bin.recalcKeyPrefix(); + bin.releaseLatch(); + pbin = bin; + } + } + } finally { + if (cursor != null) { + cursor.close(); + } + txn.commit(); + } + + /* Verify that expected keys are all present in the database. */ + rand = new Random(0); + for (int i=0; i < keyCount; i++) { + String str = Long.toString(rand.nextLong()); + + final byte[] keyBytes = strToRLEbytes(str); + + key.setData(keyBytes); + OperationStatus status = db.get(null, key, data, LockMode.DEFAULT); + assertSame(OperationStatus.SUCCESS, status); + } + + closeEnv(); + } + + static public String rleBytesToStr(byte encodedBytes[]) { + ByteBuffer bb = ByteBuffer.wrap(encodedBytes); + StringBuffer sb = new StringBuffer(); + + while (bb.remaining() > 0){ + int length = bb.getInt(); + final char c = bb.getChar(); + while (length-- > 0) { + sb.append(c); + } + } + + return sb.toString(); + } + + static public byte[] strToRLEbytes(String str) { + ByteBuffer bb = ByteBuffer.allocate(1000); + for (int i = 0; i < str.length(); i++) { + int length = 1; + while ((i+1) < str.length() && str.charAt(i) == str.charAt(i+1)) { + length++; + i++; + } + bb.putInt(length); + bb.putChar(str.charAt(i)); + } + byte encodedBytes[] = new byte[bb.position()]; + bb.rewind(); + bb.get(encodedBytes); + return encodedBytes; + } + + static class RLEKeyComparator + implements Comparator, Serializable { + private static final long serialVersionUID = 1L; + + @Override + public int compare(byte[] k1, byte[] k2) { + String s1 = rleBytesToStr(k1); + String s2 = rleBytesToStr(k2); + return s1.compareTo(s2); + } + } + + private DatabaseConfig configDatabaseWithComparator + (Comparator testKeyComparator) { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setTransactional(true); + dbConfig.setKeyPrefixing(true); + dbConfig.setBtreeComparator(testKeyComparator); + return dbConfig; + } + + /** + * Tests key prefixing with a key comparator that causes keys to be sorted + * such that the first and last keys in a range do not represent the first + * last values if instead they were sorted byte-by-byte. For example, + * string keys might be stored with a preceding 1 byte integer length: + *
        +     *  2, "aa"
        +     *  2, "bb"
        +     *  4, "cccc"
        +     *  2, "dd"
        +     * 
        + * If prefixing were performed by only considering the first and last key + * in the range, the first byte would be in common and used as the prefix. + * But this prefix would be incorrect for the middle key. + *

        + * The test uses these four keys, rather than just three, because the old + * (incorrect) code checked the first two keys and the last keys to + * determine the prefix. In this test case, the first byte has the same + * value (2) for these three keys. + *

        + * [#21405] + */ + @Test + public void testLeadingLengthKeys() { + + initEnv(0); + db.close(); + db = null; + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setExclusiveCreate(true); + dbConfig.setTransactional(true); + dbConfig.setKeyPrefixing(true); + dbConfig.setBtreeComparator(new LeadingLengthKeyComparator()); + db = env.openDatabase(null, "testKeyComparator", dbConfig); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* Insert leading-length keys. */ + for (String s : new String[] { "aa", "bb", "cccc", "dd" }) { + key.setData(getLeadingLengthKeyBytes(s)); + data.setData(key.getData()); + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + } + + /* Force prefixing. */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + bin.latch(); + bin.recalcKeyPrefix(); + bin.releaseLatch(); + + /* Check keys. */ + status = cursor.getFirst(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals("aa", getLeadingLengthKeyString(key)); + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals("bb", getLeadingLengthKeyString(key)); + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals("cccc", getLeadingLengthKeyString(key)); + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + assertEquals("dd", getLeadingLengthKeyString(key)); + status = cursor.getNext(key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + + cursor.close(); + closeEnv(); + } + + static byte[] getLeadingLengthKeyBytes(String s) { + assertTrue(s.length() < Byte.MAX_VALUE); + byte[] b = new byte[1 + s.length()]; + b[0] = (byte) s.length(); + try { + System.arraycopy(s.getBytes("US-ASCII"), 0, b, 1, s.length()); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + return b; + } + + static String getLeadingLengthKeyString(DatabaseEntry entry) { + return getLeadingLengthKeyString(entry.getData()); + } + + static String getLeadingLengthKeyString(byte[] b) { + int len = b[0]; + try { + return new String(b, 1, len, "US-ASCII"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + static class LeadingLengthKeyComparator + implements Comparator, Serializable { + + public int compare(byte[] k1, byte[] k2) { + String s1 = getLeadingLengthKeyString(k1); + String s2 = getLeadingLengthKeyString(k2); + return s1.compareTo(s2); + } + } +} diff --git a/test/com/sleepycat/je/tree/KeyTest.java b/test/com/sleepycat/je/tree/KeyTest.java new file mode 100644 index 0000000..604d81e --- /dev/null +++ b/test/com/sleepycat/je/tree/KeyTest.java @@ -0,0 +1,191 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class KeyTest extends TestBase { + private File envHome; + private Environment env; + + @Override + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + @Override + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (DatabaseException E) { + } + } + } + + @Test + public void testKeyPrefixer() { + assertEquals("aaa", makePrefix("aaaa", "aaab")); + assertEquals("a", makePrefix("abaa", "aaab")); + assertNull(makePrefix("baaa", "aaab")); + assertEquals("aaa", makePrefix("aaa", "aaa")); + assertEquals("aaa", makePrefix("aaa", "aaab")); + } + + private String makePrefix(String k1, String k2) { + byte[] ret = Key.createKeyPrefix(StringUtils.toUTF8(k1), + StringUtils.toUTF8(k2)); + if (ret == null) { + return null; + } + return StringUtils.fromUTF8(ret); + } + + @Test + public void testKeyPrefixSubsetting() { + keyPrefixSubsetTest("aaa", "aaa", true); + keyPrefixSubsetTest("aa", "aaa", true); + keyPrefixSubsetTest("aaa", "aa", false); + keyPrefixSubsetTest("", "aa", false); + keyPrefixSubsetTest(null, "aa", false); + keyPrefixSubsetTest("baa", "aa", false); + } + + private void keyPrefixSubsetTest(String keyPrefix, + String newKey, + boolean expect) { + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + byte[] keyPrefixBytes = + (keyPrefix == null ? null : StringUtils.toUTF8(keyPrefix)); + byte[] newKeyBytes = StringUtils.toUTF8(newKey); + DatabaseConfig dbConf = new DatabaseConfig(); + dbConf.setKeyPrefixing(true); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + DatabaseImpl databaseImpl = + new DatabaseImpl(null, + "dummy", new DatabaseId(10), envImpl, dbConf); + IN in = new IN(databaseImpl, null, 10, 10); + in.setKeyPrefix(keyPrefixBytes); + boolean result = compareToKeyPrefix(in, newKeyBytes); + assertTrue(result == expect); + } catch (Exception E) { + E.printStackTrace(); + fail("caught " + E); + } finally { + env.close(); + env = null; + } + } + + /* + * Returns whether the current prefix (if any) is also a prefix of a + * given newKey. + * + * This has default protection for the unit tests. + */ + private boolean compareToKeyPrefix(IN in, byte[] newKey) { + + byte[] keyPrefix = in.getKeyPrefix(); + + if (keyPrefix == null || keyPrefix.length == 0) { + return false; + } + + int newKeyLen = newKey.length; + + for (int i = 0; i < keyPrefix.length; i++) { + if (i < newKeyLen && keyPrefix[i] == newKey[i]) { + continue; + } else { + return false; + } + } + + return true; + } + + @Test + public void testKeyComparisonPerformance() { + byte[] key1 = StringUtils.toUTF8("abcdefghijabcdefghij"); + byte[] key2 = StringUtils.toUTF8("abcdefghijabcdefghij"); + + for (int i = 0; i < 1000000; i++) { + assertTrue(Key.compareKeys(key1, key2, null) == 0); + } + } + + @Test + public void testKeyComparison() { + byte[] key1 = StringUtils.toUTF8("aaa"); + byte[] key2 = StringUtils.toUTF8("aab"); + assertTrue(Key.compareKeys(key1, key2, null) < 0); + assertTrue(Key.compareKeys(key2, key1, null) > 0); + assertTrue(Key.compareKeys(key1, key1, null) == 0); + + key1 = StringUtils.toUTF8("aa"); + key2 = StringUtils.toUTF8("aab"); + assertTrue(Key.compareKeys(key1, key2, null) < 0); + assertTrue(Key.compareKeys(key2, key1, null) > 0); + + key1 = StringUtils.toUTF8(""); + key2 = StringUtils.toUTF8("aab"); + assertTrue(Key.compareKeys(key1, key2, null) < 0); + assertTrue(Key.compareKeys(key2, key1, null) > 0); + assertTrue(Key.compareKeys(key1, key1, null) == 0); + + key1 = StringUtils.toUTF8(""); + key2 = StringUtils.toUTF8(""); + assertTrue(Key.compareKeys(key1, key2, null) == 0); + + byte[] ba1 = { -1, -1, -1 }; + byte[] ba2 = { 0x7f, 0x7f, 0x7f }; + assertTrue(Key.compareKeys(ba1, ba2, null) > 0); + + try { + Key.compareKeys(key1, null, null); + fail("NullPointerException not caught"); + } catch (NullPointerException NPE) { + } + } +} diff --git a/test/com/sleepycat/je/tree/LSNArrayTest.java b/test/com/sleepycat/je/tree/LSNArrayTest.java new file mode 100644 index 0000000..f11bc27 --- /dev/null +++ b/test/com/sleepycat/je/tree/LSNArrayTest.java @@ -0,0 +1,90 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.TestBase; + +public class LSNArrayTest extends TestBase { + private static final int N_ELTS = 128; + + private IN theIN; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + theIN = new IN(); + } + + @Test + public void testPutGetElement() { + doTest(N_ELTS); + } + + @Test + public void testOverflow() { + doTest(N_ELTS << 2); + } + + @Test + public void testFileOffsetGreaterThan3Bytes() { + theIN.initEntryLsn(10); + theIN.setLsnInternal(0, 0xfffffe); + assertTrue(theIN.getLsn(0) == 0xfffffe); + assertTrue(theIN.getEntryLsnByteArray() != null); + assertTrue(theIN.getEntryLsnLongArray() == null); + theIN.setLsnInternal(1, 0xffffff); + assertTrue(theIN.getLsn(1) == 0xffffff); + assertTrue(theIN.getEntryLsnLongArray() != null); + assertTrue(theIN.getEntryLsnByteArray() == null); + + theIN.initEntryLsn(10); + theIN.setLsnInternal(0, 0xfffffe); + assertTrue(theIN.getLsn(0) == 0xfffffe); + assertTrue(theIN.getEntryLsnByteArray() != null); + assertTrue(theIN.getEntryLsnLongArray() == null); + theIN.setLsnInternal(1, 0xffffff + 1); + assertTrue(theIN.getLsn(1) == 0xffffff + 1); + assertTrue(theIN.getEntryLsnLongArray() != null); + assertTrue(theIN.getEntryLsnByteArray() == null); + } + + private void doTest(int nElts) { + theIN.initEntryLsn(nElts); + for (int i = nElts - 1; i >= 0; i--) { + long thisLsn = DbLsn.makeLsn(i, i); + theIN.setLsnInternal(i, thisLsn); + if (theIN.getLsn(i) != thisLsn) { + System.out.println(i + " found: " + + DbLsn.toString(theIN.getLsn(i)) + + " expected: " + + DbLsn.toString(thisLsn)); + } + assertTrue(theIN.getLsn(i) == thisLsn); + } + + for (int i = 0; i < nElts; i++) { + long thisLsn = DbLsn.makeLsn(i, i); + theIN.setLsn(i, thisLsn); + assertTrue(theIN.getLsn(i) == thisLsn); + } + } +} diff --git a/test/com/sleepycat/je/tree/MemorySizeTest.java b/test/com/sleepycat/je/tree/MemorySizeTest.java new file mode 100644 index 0000000..8134fe6 --- /dev/null +++ b/test/com/sleepycat/je/tree/MemorySizeTest.java @@ -0,0 +1,519 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.INList; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.utilint.StringUtils; + +/** + * Check maintenance of the memory size count within nodes. + */ +public class MemorySizeTest extends DualTestCase { + private Environment env; + private final File envHome; + private Database db; + + public MemorySizeTest() { + envHome = SharedTestUtils.getTestDir(); + /* Print keys as numbers */ + Key.DUMP_TYPE = DumpType.BINARY; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + + IN.ACCUMULATED_LIMIT = 0; + Txn.ACCUMULATED_LIMIT = 0; + + /* + * Properties for creating an environment. + * Disable the evictor for this test, use larger BINS + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), + "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CLEANER.getName(), + "false"); + + /* Don't checkpoint utilization info for this test. */ + DbInternal.setCheckpointUP(envConfig, false); + + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + } + + @After + public void tearDown() + throws Exception { + + if (env != null) { + close(env); + } + super.tearDown(); + } + + /* Test that the KeyPrefix changes should result in memory changes. */ + @Test + public void testKeyPrefixChange() + throws Throwable { + + final String dbName = "testDB"; + final String value = "herococo"; + + /* Create the database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setKeyPrefixing(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, dbName, dbConfig); + + /* Insert some records with the same KeyPrefix. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 1; i <= 9; i++) { + StringBinding.stringToEntry(value + i, key); + StringBinding.stringToEntry(value, data); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + /* Traverse the BIN. */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = null; + boolean success = false; + try { + cursor = db.openCursor(txn, null); + int counter = 0; + boolean hasKeyPrefix = false; + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + counter++; + + final String keyString = StringUtils.fromUTF8(key.getData()); + final int keyLength = keyString.length(); + final BIN bin = DbInternal.getCursorImpl(cursor).getBIN(); + + /* Check that record is ordered by bytes. */ + assertEquals(keyString.substring(0, keyLength - 2), value); + assertEquals(keyString.substring(keyLength - 2, keyLength - 1), + new Integer(counter).toString()); + + /* + * If the BIN has KeyPrefix, reset the KeyPrefix and check the + * MemoryBudget. Note we don't do the recalculation here, so + * checking that the memory does change is sufficient. + */ + if (bin.getKeyPrefix() != null) { + assertEquals(StringUtils.fromUTF8(bin.getKeyPrefix()), + value); + hasKeyPrefix = true; + long formerMemorySize = bin.getInMemorySize(); + /* Change the KeyPrefix of this BIN. */ + bin.latch(); + bin.setKeyPrefix(StringUtils.toUTF8("hero")); + bin.releaseLatch(); + assertEquals(StringUtils.fromUTF8(bin.getKeyPrefix()), + "hero"); + /* Check the MemorySize has changed. */ + assertTrue(bin.getInMemorySize() != formerMemorySize); + break; + } + } + assertTrue(hasKeyPrefix); + success = true; + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (cursor != null) { + cursor.close(); + } + + if (success) { + txn.commit(); + } else { + txn.abort(); + } + } + + db.close(); + } + + /* + * Do a series of these actions and make sure that the stored memory + * sizes match the calculated memory size. + * - create db + * - insert records, no split + * - cause IN split + * - modify + * - delete, compress + * - checkpoint + * - evict + * - insert duplicates + * - cause duplicate IN split + * - do an abort + * + * After duplicate storage was redone in JE 5, this test no longer + * exercises memory maintenance as thoroughly, since dup LNs are always + * evicted immediately. A non-dups test may be needed in the future. + */ + @Test + public void testMemSizeMaintenanceDups() + throws Throwable { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + try { + initDb(true); + + /* Insert one record. Adds two INs and an LN to our cost.*/ + insert((byte) 1, 10, (byte) 1, 100, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Fill out the node. */ + insert((byte) 2, 10, (byte) 2, 100, true); + insert((byte) 3, 10, (byte) 3, 100, true); + insert((byte) 4, 10, (byte) 4, 100, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Cause a split */ + insert((byte) 5, 10, (byte) 5, 100, true); + insert((byte) 6, 10, (byte) 6, 100, true); + insert((byte) 7, 10, (byte) 7, 100, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Modify data */ + modify((byte) 1, 10, (byte) 1, 1010, true); + modify((byte) 7, 10, (byte) 7, 1010, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Delete data */ + delete((byte) 2, 10, true); + delete((byte) 6, 10, true); + checkCount(5); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Compress. */ + compress(); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Checkpoint */ + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* + * Check count, with side effect of fetching all records and + * preventing further fetches that impact mem usage. + */ + checkCount(5); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* insert duplicates */ + insert((byte) 3, 10, (byte) 30, 200, true); + insert((byte) 3, 10, (byte) 31, 200, true); + insert((byte) 3, 10, (byte) 32, 200, true); + insert((byte) 3, 10, (byte) 33, 200, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* create duplicate split. */ + insert((byte) 3, 10, (byte) 34, 200, true); + insert((byte) 3, 10, (byte) 35, 200, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* There should be 11 records. */ + checkCount(11); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* modify (use same data) and abort */ + modify((byte) 5, 10, (byte) 5, 100, false); + /* Abort will evict LN. Count to fetch LN back into tree. */ + checkCount(11); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* modify (use different data) and abort */ + modify((byte) 5, 10, (byte) 30, 1000, false); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* delete and abort */ + delete((byte) 1, 10, false); + delete((byte) 7, 10, false); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Delete dup */ + delete((byte) 3, 10, (byte)34, 200, false); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* insert and abort */ + insert((byte) 2, 10, (byte) 5, 100, false); + insert((byte) 6, 10, (byte) 6, 100, false); + insert((byte) 8, 10, (byte) 7, 100, false); + TestUtils.validateNodeMemUsage(envImpl, true); + + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } finally { + if (db != null) { + db.close(); + } + } + } + + /* + * Do a series of these actions and make sure that the stored memory + * sizes match the calculated memory size. + * - create db + * - insert records, cause split + * - delete + * - insert and re-use slots. + */ + @Test + public void testSlotReuseMaintenance() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + try { + + initDb(true /*dups*/); + + /* Insert enough records to create one node. */ + insert((byte) 1, 10, (byte) 1, 100, true); + insert((byte) 2, 10, (byte) 2, 100, true); + insert((byte) 3, 10, (byte) 3, 100, true); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Delete */ + delete((byte) 3, 10, true); + checkCount(2); + TestUtils.validateNodeMemUsage(envImpl, true); + + /* Insert again, reuse those slots */ + insert((byte) 3, 10, (byte) 2, 400, true); + TestUtils.validateNodeMemUsage(envImpl, true); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + if (db != null) { + db.close(); + } + } + } + + private void initDb(boolean dups) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(dups); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void insert(byte keyVal, int keySize, + byte dataVal, int dataSize, + boolean commit) + throws DatabaseException { + + Transaction txn = null; + if (!commit) { + txn = env.beginTransaction(null, null); + } + assertEquals(OperationStatus.SUCCESS, + db.put(null, getEntry(keyVal, keySize), + getEntry(dataVal, dataSize))); + if (!commit) { + txn.abort(); + } + } + + private void modify(byte keyVal, int keySize, + byte dataVal, int dataSize, + boolean commit) + throws DatabaseException { + + Transaction txn = null; + + txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + DatabaseEntry data = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchKey(getEntry(keyVal, keySize), + data, LockMode.DEFAULT)); + /* To avoid changing memory sizes, do not delete unless necessary. */ + if (!data.equals(getEntry(dataVal, dataSize))) { + assertEquals(OperationStatus.SUCCESS, + cursor.delete()); + } + assertEquals(OperationStatus.SUCCESS, + cursor.put(getEntry(keyVal, keySize), + getEntry(dataVal, dataSize))); + cursor.close(); + + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + } + + private void delete(byte keyVal, int keySize, boolean commit) + throws DatabaseException { + + Transaction txn = null; + if (!commit) { + txn = env.beginTransaction(null, null); + } + assertEquals(OperationStatus.SUCCESS, + db.delete(txn, getEntry(keyVal, keySize))); + if (!commit) { + txn.abort(); + } + } + + private void delete(byte keyVal, int keySize, + byte dataVal, int dataSize, boolean commit) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + assertEquals(OperationStatus.SUCCESS, + cursor.getSearchBoth(getEntry(keyVal, keySize), + getEntry(dataVal, dataSize), + LockMode.DEFAULT)); + assertEquals(OperationStatus.SUCCESS, cursor.delete()); + cursor.close(); + + if (commit) { + txn.commit(); + } else { + txn.abort(); + } + } + + /* + * Fake compressing daemon by call BIN.compress explicitly on all + * BINS on the IN list. + */ + private void compress() + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + INList inList = envImpl.getInMemoryINs(); + for (IN in : inList) { + in.latch(); + envImpl.lazyCompress(in); + in.releaseLatch(); + } + } + + /* + * Fake eviction daemon by call BIN.evictLNs explicitly on all + * BINS on the IN list. + * + * Not currently used but may be needed later for a non-dups test. + */ + private void evict() + throws DatabaseException { + + INList inList = DbInternal.getNonNullEnvImpl(env).getInMemoryINs(); + for (IN in : inList) { + if (in instanceof BIN && + !in.getDatabase().getDbType().isInternal()) { + BIN bin = (BIN) in; + bin.latch(); + try { + /* Expect to evict LNs. */ + if ((bin.evictLNs() & ~IN.NON_EVICTABLE_IN) > 0) { + return; + } + fail("No LNs evicted."); + } finally { + bin.releaseLatch(); + } + } + } + } + + private DatabaseEntry getEntry(byte val, int size) { + byte[] bArray = new byte[size]; + bArray[0] = val; + return new DatabaseEntry(bArray); + } + + private void checkCount(int expectedCount) + throws DatabaseException { + + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + int count = 0; + while (cursor.getNext(new DatabaseEntry(), new DatabaseEntry(), + LockMode.DEFAULT) == OperationStatus.SUCCESS) { + count++; + } + cursor.close(); + txn.commit(); + assertEquals(expectedCount, count); + } + + private void dumpINList() { + INList inList = DbInternal.getNonNullEnvImpl(env).getInMemoryINs(); + for (IN in : inList) { + System.out.println("in nodeId=" + in.getNodeId()); + } + } +} diff --git a/test/com/sleepycat/je/tree/ReleaseLatchesTest.java b/test/com/sleepycat/je/tree/ReleaseLatchesTest.java new file mode 100644 index 0000000..070a9d9 --- /dev/null +++ b/test/com/sleepycat/je/tree/ReleaseLatchesTest.java @@ -0,0 +1,538 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * @excludeDualMode + * Check that latches are release properly even if we run into read errors. + */ +@RunWith(Parameterized.class) +public class ReleaseLatchesTest extends TestBase { + private static final boolean DEBUG = false; + + private Environment env; + private final File envHome; + private Database db; + private TestDescriptor testActivity; + + /* + * The OPERATIONS declared here define the test cases for this test. Each + * TestDescriptor describes a particular JE activity. The + * testCheckLatchLeaks method generates read i/o exceptions during the test + * descriptor's action, and will check that we come up clean. + */ + public static TestDescriptor[] OPERATIONS = { + + /* + * TestDescriptor params: + * - operation name: for debugging + * - number of times to generate an exception. For example if N, + * the test action will be executed in a loop N times, with an + * read/io on read 1, read 2, read 3 ... read n-1 + * - number of records in the database. + */ + new TestDescriptor("database put", 6, 30, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.populate(false); + } + + @Override + void reinit(ReleaseLatchesTest test) + throws DatabaseException{ + + test.closeDb(); + test.getEnv().truncateDatabase(null, "foo", false); + } + }, + new TestDescriptor("cursor scan", 31, 20, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.scan(); + } + }, + new TestDescriptor("cursor scan duplicates", 23, 3, true) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.scan(); + } + }, +//* + new TestDescriptor("database get", 31, 20, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.get(); + } + }, +//*/ + new TestDescriptor("database delete", 40, 30, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.delete(); + } + + @Override + void reinit(ReleaseLatchesTest test) + throws DatabaseException{ + + test.populate(false); + } + }, + new TestDescriptor("checkpoint", 40, 10, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.modify(exceptionCount); + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + if (DEBUG) { + System.out.println("Got to checkpoint"); + } + test.getEnv().checkpoint(config); + } + }, + new TestDescriptor("clean", 100, 5, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.modify(exceptionCount); + CheckpointConfig config = new CheckpointConfig(); + config.setForce(true); + if (DEBUG) { + System.out.println("Got to cleaning"); + } + test.getEnv().cleanLog(); + } + }, + new TestDescriptor("compress", 20, 10, false) { + @Override + void doAction(ReleaseLatchesTest test, int exceptionCount) + throws DatabaseException { + + test.delete(); + if (DEBUG) { + System.out.println("Got to compress"); + } + test.getEnv().compress(); + } + + @Override + void reinit(ReleaseLatchesTest test) + throws DatabaseException{ + + test.populate(false); + } + } + }; + + @Parameters + public static List genParams() { + List list = new ArrayList(); + for (TestDescriptor action : OPERATIONS) + list.add(new Object[]{action}); + + return list; + } + + public ReleaseLatchesTest(TestDescriptor action) { + + envHome = SharedTestUtils.getTestDir(); + testActivity = action; + customName = action.getName(); + } + + private void init(boolean duplicates) + throws DatabaseException { + + openEnvAndDb(); + + populate(duplicates); + env.checkpoint(null); + db.close(); + db = null; + env.close(); + env = null; + } + + private void openEnvAndDb() + throws DatabaseException { + + /* + * Make an environment with small nodes and no daemons. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); + envConfig.setConfigParam("je.env.runEvictor", "false"); + envConfig.setConfigParam("je.env.runCheckpointer", "false"); + envConfig.setConfigParam("je.env.runCleaner", "false"); + envConfig.setConfigParam("je.env.runINCompressor", "false"); + envConfig.setConfigParam + (EnvironmentParams.CLEANER_MIN_UTILIZATION.getName(), "90"); + envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), + Integer.toString(20000)); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + /* Calling close under -ea will check for leaked latches. */ + private void doCloseAndCheckLeaks() + throws Throwable { + + try { + if (db != null) { + db.close(); + db = null; + } + + if (env != null) { + env.close(); + env = null; + } + } catch (Throwable t) { + System.out.println("operation = " + testActivity.name); + t.printStackTrace(); + throw t; + } + } + + private void closeDb() + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + } + + private Environment getEnv() { + return env; + } + + /* + * This is the heart of the unit test. Given a TestDescriptor, run the + * operation's activity in a loop, generating read i/o exceptions at + * different points. Check for latch leaks after the i/o exception + * happens. + */ + @Test + public void testCheckLatchLeaks() + throws Throwable { + + int maxExceptionCount = testActivity.getNumExceptions(); + if (DEBUG) { + System.out.println("Starting test: " + testActivity.getName()); + } + + try { + init(testActivity.getDuplicates()); + + /* + * Run the action repeatedly, generating exceptions at different + * points. + */ + for (int i = 1; i <= maxExceptionCount; i++) { + + /* + * Open the env and database anew each time, so that we need to + * fault in objects and will trigger read i/o exceptions. + */ + openEnvAndDb(); + EnvironmentImpl envImpl = + DbInternal.getNonNullEnvImpl(env); + boolean exceptionOccurred = false; + + try { + ReadIOExceptionHook readHook = new ReadIOExceptionHook(i); + envImpl.getLogManager().setReadHook(readHook); + testActivity.doAction(this, i); + } catch (Throwable e) { + if (!env.isValid()) { + + /* + * It's possible for a read error to induce a + * RunRecoveryException if the read error happens when + * we are opening a new write file channel. (We read + * and validate the file header). In that case, check + * for latches, and re-open the database. + */ + checkLatchCount((DatabaseException) e, i); + env.close(); + openEnvAndDb(); + envImpl = DbInternal.getNonNullEnvImpl(env); + exceptionOccurred = true; + } else if (e instanceof DatabaseException) { + checkLatchCount((DatabaseException) e, i); + exceptionOccurred = true; + } else { + throw e; + } + } + + if (DEBUG && !exceptionOccurred) { + System.out.println("Don't need ex count " + i + + " for test activity " + + testActivity.getName()); + } + + envImpl.getLogManager().setReadHook(null); + testActivity.reinit(this); + doCloseAndCheckLeaks(); + } + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + private void checkLatchCount(DatabaseException e, + int exceptionCount) + throws DatabaseException { + + /* Only rethrow the exception if we didn't clean up latches. */ + if (LatchSupport.nBtreeLatchesHeld() > 0) { + LatchSupport.dumpBtreeLatchesHeld(); + System.out.println("Operation = " + testActivity.getName() + + " exception count=" + exceptionCount + + " Held latches = " + + LatchSupport.nBtreeLatchesHeld()); + /* Show stacktrace where the latch was lost. */ + e.printStackTrace(); + throw e; + } + } + + /* Insert records into a database. */ + private void populate(boolean duplicates) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry data1 = new DatabaseEntry(); + DatabaseEntry data2 = new DatabaseEntry(); + DatabaseEntry data3 = new DatabaseEntry(); + DatabaseEntry data4 = new DatabaseEntry(); + IntegerBinding.intToEntry(0, data); + IntegerBinding.intToEntry(1, data1); + IntegerBinding.intToEntry(2, data2); + IntegerBinding.intToEntry(3, data3); + IntegerBinding.intToEntry(4, data4); + + for (int i = 0; i < testActivity.getNumRecords(); i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + if (duplicates) { + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data1)); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data2)); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data3)); + assertEquals(OperationStatus.SUCCESS, + db.put(null, key, data4)); + } + } + } + + /* Modify the database. */ + private void modify(int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(dataVal, data); + + for (int i = 0; i < testActivity.getNumRecords(); i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + } + + /* Cursor scan the data. */ + private void scan() + throws DatabaseException { + + Cursor cursor = null; + try { + cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + } + } finally { + if (cursor != null) { + cursor.close(); + } + } + } + + /* Database.get() for all records. */ + private void get() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < testActivity.getNumRecords(); i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, LockMode.DEFAULT)); + } + } + + /* Delete all records. */ + private void delete() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + for (int i = 0; i < testActivity.getNumRecords(); i++) { + IntegerBinding.intToEntry(i, key); + assertEquals("key = " + IntegerBinding.entryToInt(key), + OperationStatus.SUCCESS, db.delete(null, key)); + } + } + + /* + * This TestHook implementation generates io exceptions during reads. + */ + static class ReadIOExceptionHook implements TestHook { + private int counter = 0; + private final int throwCount; + + ReadIOExceptionHook(int throwCount) { + this.throwCount = throwCount; + } + public void doIOHook() + throws IOException { + + if (throwCount == counter) { + counter++; + throw new IOException("Generated exception: " + + this.getClass().getName()); + } else { + counter++; + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + } + + static abstract class TestDescriptor { + private final String name; + private final int numExceptions; + private final int numRecords; + private final boolean duplicates; + + TestDescriptor(String name, + int numExceptions, + int numRecords, + boolean duplicates) { + this.name = name; + this.numExceptions = numExceptions; + this.numRecords = numRecords; + this.duplicates = duplicates; + } + + int getNumRecords() { + return numRecords; + } + + int getNumExceptions() { + return numExceptions; + } + + String getName() { + return name; + } + + boolean getDuplicates() { + return duplicates; + } + + /* Do a series of operations. */ + abstract void doAction(ReleaseLatchesTest test, + int exceptionCount) + throws DatabaseException; + + /** + * Reinitialize the database if doAction modified it. + * @throws DatabaseException from subclasses. + */ + void reinit(ReleaseLatchesTest test) + throws DatabaseException { + + } + } +} diff --git a/test/com/sleepycat/je/tree/SR13034Test.java b/test/com/sleepycat/je/tree/SR13034Test.java new file mode 100644 index 0000000..84cfdb5 --- /dev/null +++ b/test/com/sleepycat/je/tree/SR13034Test.java @@ -0,0 +1,164 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Because LNs no longer have node IDs, the bug this test was checking is no + * longer applicable. However, the test is generic and should run. + * + * Original description + * -------------------- + * Reproduce a bug where fetchEntry rather than fetchEntryIgnoreKnownDeleted + * was being called when searching the duplicate tree by LN node ID during + * recovery. + * + * The trick is to create a DBIN with a KnownDeleted flag set on an entry. And + * to cause recovery to search that DBIN by node ID during redo of a deleted + * LN. This deleted LN log entry must not have any data -- it must have been + * deleted before creation of the dup tree as in SR 8984. + * + * In addition, the deleted LN must appear after the entries with KnownDeleted + * set in the BIN, otherwise the search by node ID will find the LN before + * it encounters a KnownDeleted entry. + + * The sequence in the test is as follows. I'm not positive this was the same + * sequence as seen by the user, since the user did not send their logs, but + * I believe the bug fix is general enough to cover similar cases. + * + * 1) Insert {A, C} (LN with key A, data C) in T1. + * 2) Delete {A, C} in T1. The LN log entry will not have any data. + * 3) Commit T1 so these log entries will be replayed during recovery redo. + * 4) Insert {A, A} and {A, B} in T2. + * 5) Abort T2 so that the KnownDeleted flag will be set on these DBIN entries + * during recovery. + * 6) Close without a checkpoint and recover. When replaying the deleted LN + * {A, C}, we don't have a dup key because it was deleted before the dup tree + * was created. So we search the dup tree by LN node ID. Calling fetchEntry + * on {A, A} (or {A, B}) throws an exception because KnownDeleted is set. We + * neglected to check KnownDeleted. + */ +public class SR13034Test extends DualTestCase { + + private final File envHome; + private Environment env; + private Database db; + + public SR13034Test() { + envHome = SharedTestUtils.getTestDir(); + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + /* Do not run the daemons to avoid timing considerations. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void close() + throws DatabaseException { + + db.close(); + db = null; + + close(env); + env = null; + } + + @Test + public void testSR13034() + throws DatabaseException { + + open(); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status; + Transaction txn; + + /* + * Insert {A, C}, then delete it. No dup tree has been created, so + * this logs a deleted LN with no data. + */ + txn = env.beginTransaction(null, null); + StringBinding.stringToEntry("A", key); + StringBinding.stringToEntry("C", data); + status = db.putNoOverwrite(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + status = db.delete(txn, key); + assertEquals(OperationStatus.SUCCESS, status); + txn.commit(); + + /* + * Insert {A, A}, {A, B}, which creates a dup tree. Then abort to set + * KnownDeleted on these entries. + */ + txn = env.beginTransaction(null, null); + StringBinding.stringToEntry("A", key); + StringBinding.stringToEntry("A", data); + status = db.putNoDupData(txn, key, data); + StringBinding.stringToEntry("A", key); + StringBinding.stringToEntry("B", data); + status = db.putNoDupData(txn, key, data); + assertEquals(OperationStatus.SUCCESS, status); + txn.abort(); + + /* + * Close without a checkpoint and recover. Before the bug fix, the + * recovery would throw DatabaseException "attempt to fetch a deleted + * entry". + */ + db.close(); + closeNoCheckpoint(env); + open(); + + close(); + } +} diff --git a/test/com/sleepycat/je/tree/SR13126Test.java b/test/com/sleepycat/je/tree/SR13126Test.java new file mode 100644 index 0000000..e17aee8 --- /dev/null +++ b/test/com/sleepycat/je/tree/SR13126Test.java @@ -0,0 +1,229 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertNull; + +import java.io.File; + +import com.sleepycat.collections.CurrentTransaction; +import com.sleepycat.collections.TransactionRunner; +import com.sleepycat.collections.TransactionWorker; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.RunRecoveryException; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.JVMSystemUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Test; + +/** + */ +public class SR13126Test extends TestBase { + + private final File envHome; + private Environment env; + private Database db; + private long maxMem; + + public SR13126Test() { + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() { + try { + if (env != null) { + env.close(); + } + } catch (Exception e) { + System.out.println("During tearDown: " + e); + } + + env = null; + db = null; + } + + private boolean open() + throws DatabaseException { + + maxMem = JVMSystemUtils.getRuntimeMaxMemory(); + if (maxMem == -1) { + System.out.println + ("*** Warning: not able to run this test because the JVM " + + "heap size is not available"); + return false; + } + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + /* Do not run the daemons to avoid timing considerations. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "foo", dbConfig); + + return true; + } + + private void close() + throws DatabaseException { + + db.close(); + db = null; + + env.close(); + env = null; + } + + @Test + public void testSR13126() + throws DatabaseException { + + if (!open()) { + return; + } + + Transaction txn = env.beginTransaction(null, null); + + try { + insertUntilOutOfMemory(txn); + /* OOME outside of put() -- fall through. */ + txn.abort(); + db.close(); + } catch (OutOfMemoryError expected) { + } catch (RunRecoveryException expected) { + } + + verifyDataAndClose(); + } + + @Test + public void testTransactionRunner() + throws Exception { + + if (!open()) { + return; + } + + final CurrentTransaction currentTxn = + CurrentTransaction.getInstance(env); + + TransactionRunner runner = new TransactionRunner(env); + /* Don't print exception stack traces during test runs. */ + DbCompat.TRANSACTION_RUNNER_PRINT_STACK_TRACES = false; + try { + runner.run(new TransactionWorker() { + public void doWork() + throws Exception { + + insertUntilOutOfMemory(currentTxn.getTransaction()); + } + }); + /* OOME outside of put() -- env is not invalid. */ + db.close(); + return; + } catch (OutOfMemoryError expected) { + } catch (RunRecoveryException expected) { + } + + /* + * If TransactionRunner does not abort the transaction, this thread + * will be left with a transaction attached. + */ + assertNull(currentTxn.getTransaction()); + + verifyDataAndClose(); + } + + private void insertUntilOutOfMemory(Transaction txn) + throws DatabaseException, OutOfMemoryError { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(); + + int startMem = (int) (maxMem / 3); + int bumpMem = (int) ((maxMem - maxMem / 3) / 5); + + /* Insert larger and larger LNs until an OutOfMemoryError occurs. */ + for (int memSize = startMem;; memSize += bumpMem) { + + /* + * If the memory error occurs when we do "new byte[]" below, this + * is not a test of the bug in question. + */ + try { + data.setData(new byte[memSize]); + } catch (OutOfMemoryError e) { + System.out.println("OOME outside of put(), invalid test."); + return; + } + + try { + db.put(null, key, data); + } catch (OutOfMemoryError e) { + //System.err.println("Error during write " + memSize); + throw e; + } + } + } + + private void verifyDataAndClose() + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + /* + * If a NULL_LSN is present in a BIN entry because of an incomplete + * insert, an assertion will fire during the checkpoint when writing + * the BIN. + */ + env.close(); + env = null; + + /* + * If the NULL_LSN was written above because assertions are disabled, + * check that we don't get an exception when fetching it. + */ + open(); + Cursor c = db.openCursor(null, null); + while (c.getNext(key, data, null) == OperationStatus.SUCCESS) {} + c.close(); + close(); + } +} diff --git a/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java b/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java new file mode 100644 index 0000000..b9bb975 --- /dev/null +++ b/test/com/sleepycat/je/tree/SplitRace_SR11144Test.java @@ -0,0 +1,306 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; + +/********************************************************************* + Exercise a race condition in split processing. The case requires a + at least 3 level btree where the root has maxEntries-1 children. + i.e suppose node max = 4. Our test case will start with data like this: + + RootIN + +--------+----------+ + / | \ + INa INb INc + / | \ / | \ + BIN BIN BINx BIN BIN BINy + /||\ /||\ + + Note that it takes some finagling to make the data look this way. An insert + of sequentially ascending values won't look like this, because opportunistic + splitting prevents all but the righitmost BIN from being completely full. + + At this point, suppose that thread1 wants to insert into BINx and thread2 + wants to insert into BINy. Our split code looks like this: + + Body of Tree.searchSplitsAllowed() + + rootLatch.acquire() + fetch rootIN + rootIN.latch + opportunitically split root (dropping and re-acquiring rootINlatches) + splitting the root requires updating the dbmapping tree + rootLatch.release() + + // leave this block of code owning the rootIN latch. + call searchSubTreeSplitsAllowed() + + Body of Tree.searchSubTreeSplitsAllowed() + while (true) { + try { + // throws if finds a node that needs splitting + return searchSubTreeUntilSplit() + } catch (SplitRequiredException e) { + // acquire latches down the depth of the tree + forceSplit(); + } + } + + If code is executed in this order: + + thread 1 executes searchSplitsAllowed(), root doesn't need splitting + thread 1 executes searchSubTreeUntilSplit(), throws out because of BINx + thread 1 hold no latches before executing forceSplit() + thread 2 executes searchSplitsAllowed(), root doesn't need splitting + thread 2 executes searchSubTreeUntilSplit(), throws out because of BINy + thread 2 hold no latches before executing forceSplit() + thread 1 executes forceSplit, splits BINx, which ripples upward, + adding a new level 2 IN. The root is full + thread 2 executes forceSplit, splits BINy, which ripples upward, + adding a new level 2 IN. The root can't hold the new child! + + The root split is done this way, outside forceSplit, because it's special + because you must hold the rootLatch. + + This case does not exist for duplicates because: + a. in 1 case, the owning BIN (the equivalent of the root) stays latched + b. in a 2nd case, the caller is recovery, which is single threaded. + + The solution was to check for root fullness in forceSplit(), before + latching down the whole depth of the tree. In that case, we throw out + and re-execute the rootLatch latching. + +********************************************************************/ + +public class SplitRace_SR11144Test extends DualTestCase { + private static final boolean DEBUG = false; + private final File envHome; + private Environment env = null; + private Database db = null; + + public SplitRace_SR11144Test() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testSplitRootRace() + throws Throwable { + + /* Create tree topology described in header comments. */ + initData(); + + /* + * Create two threads, and hold them in a barrier at the + * designated point in Tree.java. They'll insert keys which + * will split BINx and BINy. + */ + + InsertThread a = new InsertThread(92, db); + InsertThread b = new InsertThread(202, db); + setWaiterHook(); + b.start(); + a.start(); + + a.join(); + b.join(); + + close(); + } + + /** + * Create this: + * RootIN + * +--------+----------+ + * / | \ + * INa INb INc + * / | \ / | \ + * BIN BIN BINx BIN BIN BINy + * /||\ /||\ + * + */ + private void initData() { + try { + initEnvInternal(true); + + /* + * Opportunistic splitting will cause the following inserts to + * add three child entries per parent. + */ + int value = 0; + for (int i = 0; i < 23; i++) { + put(db, value); + value += 10; + } + + /* Add a fourth child to BINx and BINy */ + put(db, 91); + put(db, 201); + + if (DEBUG) { + dump(); + } + } catch (DatabaseException DBE) { + throw new RuntimeException(DBE); + } + } + + private static void put(Database db, int value) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + /* put the value in the key. */ + IntegerBinding.intToEntry(11, data); + IntegerBinding.intToEntry(value, key); + + OperationStatus status = db.putNoOverwrite(null, key, data); + if (status != OperationStatus.SUCCESS) { + throw new RuntimeException("status=" + status); + } + } + + private void close() { + try { + db.close(); + close(env); + } catch (DatabaseException DBE) { + throw new RuntimeException(DBE); + } + } + + private void dump() { + try { + Cursor cursor = db.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (cursor.getNext(key, data, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + System.out.println(""); + } + DbInternal.getDbImpl(db).getTree().dump(); + cursor.close(); + } catch (DatabaseException DBE) { + throw new RuntimeException(DBE); + } + } + + private void initEnvInternal(boolean create) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(create); + envConfig.setConfigParam("je.nodeMaxEntries", "4"); + envConfig.setConfigParam("je.nodeDupTreeMaxEntries", "4"); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(create); + dbConfig.setTransactional(true); + dbConfig.setExclusiveCreate(create); + db = env.openDatabase(null, "foo", dbConfig); + } + + private void setWaiterHook() { + TestHook hook = new WaiterHook(); + DbInternal.getDbImpl(db).getTree().setWaitHook(hook); + } + + /* + * This hook merely acts as a barrier. 2 threads enter and cannot + * proceed until both have arrived at that point. + */ + static class WaiterHook implements TestHook { + private int numArrived; + private final Object block; + + WaiterHook() { + numArrived = 0; + block = new Object(); + } + public void doHook() { + synchronized (block) { + if (numArrived == 0) { + numArrived = 1; + try { + block.wait(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } else if (numArrived == 1) { + numArrived = 2; + block.notify(); + } + } + } + public Object getHookValue() { + throw new UnsupportedOperationException(); + } + public void doIOHook() { + throw new UnsupportedOperationException(); + } + public void hookSetup() { + throw new UnsupportedOperationException(); + } + public void doHook(Object obj) { + throw new UnsupportedOperationException(); + } + } + + /* This thread merely inserts the specified value. */ + static class InsertThread extends Thread { + private final int value; + private final Database db; + + InsertThread(int value, Database db) { + this.value = value; + this.db = db; + } + + @Override + public void run() { + try { + put(db, value); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + } +} diff --git a/test/com/sleepycat/je/tree/SplitTest.java b/test/com/sleepycat/je/tree/SplitTest.java new file mode 100644 index 0000000..a9710b9 --- /dev/null +++ b/test/com/sleepycat/je/tree/SplitTest.java @@ -0,0 +1,310 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.File; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Put; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.tree.Key.DumpType; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class SplitTest extends DualTestCase { + private static final boolean DEBUG = false; + + private final File envHome; + private Environment env; + private Database db; + + public SplitTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + } + + private void open(int nodeMaxEntries) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam( + EnvironmentConfig.NODE_MAX_ENTRIES, + String.valueOf(nodeMaxEntries)); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + env = create(envHome, envConfig); + + String databaseName = "testDb"; + Transaction txn = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setNodeMaxEntries(nodeMaxEntries); + db = env.openDatabase(txn, databaseName, dbConfig); + txn.commit(); + } + + private void close() { + db.close(); + close(env); + } + + /** + * Test splits on a case where the 0th entry gets promoted. + */ + @Test + public void test0Split() + throws Exception { + + open(4); + + Key.DUMP_TYPE = DumpType.BINARY; + try { + /* Build up a tree. */ + for (int i = 160; i > 0; i-= 10) { + assertEquals(OperationStatus.SUCCESS, + db.put(null, new DatabaseEntry + (new byte[] { (byte) i }), + new DatabaseEntry(new byte[] {1}))); + } + + if (DEBUG) { + System.out.println(""); + DbInternal.getDbImpl(db).getTree().dump(); + } + + assertEquals(OperationStatus.SUCCESS, + db.put(null, new DatabaseEntry(new byte[]{(byte)151}), + new DatabaseEntry(new byte[] {1}))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new DatabaseEntry(new byte[]{(byte)152}), + new DatabaseEntry(new byte[] {1}))); + assertEquals(OperationStatus.SUCCESS, + db.put(null, new DatabaseEntry(new byte[]{(byte)153}), + new DatabaseEntry(new byte[] {1}))); + + if (DEBUG) { + DbInternal.getDbImpl(db).getTree().dump(); + System.out.println(""); + } + + /* + * These inserts make a tree where the right most mid-level IN + * has an idkey greater than its parent entry. + * + * +---------------+ + * | id = 90 | + * | 50 | 90 | 130 | + * +---------------+ + * | | | + * | + * +-----------------+ + * | id = 160 | + * | 130 | 150 | 152 | + * +-----------------+ + * | | | + * | | +-----------+ + * | | | + * +-----------+ +-----------+ +-----------------+ + * | BIN | | BIN | | BIN | + * | id = 130 | | id = 150 | | id=160 | + * | 130 | 140 | | 150 | 151 | | 152 | 153 | 160 | + * +-----------+ +-----------+ +-----------------+ + * + * Now delete records 130 and 140 to empty out the subtree with BIN + * with id=130. + */ + assertEquals(OperationStatus.SUCCESS, + db.delete(null, + new DatabaseEntry(new byte[]{(byte) 130}))); + assertEquals(OperationStatus.SUCCESS, + db.delete(null, + new DatabaseEntry(new byte[]{(byte) 140}))); + env.compress(); + + /* + * These deletes make the mid level IN's 0th entry > its parent + * reference. + * + * +---------------+ + * | id = 90 | + * | 50 | 90 | 130 | + * +---------------+ + * | | | + * | + * +-----------+ + * | id = 160 | + * | 150 | 152 | + * +-----------+ + * | | + * | | + * | | + * +-----------+ +-----------------+ + * | BIN | | BIN | + * | id = 150 | | id=160 | + * | 150 | 151 | | 152 | 153 | 160 | + * +-----------+ +-----------------+ + * + * Now insert 140 into BIN (id = 150) so that its first entry is + * less than the mid level IN. + */ + assertEquals(OperationStatus.SUCCESS, + db.put(null, new DatabaseEntry(new byte[]{(byte)140}), + new DatabaseEntry(new byte[] {1}))); + + /* + * Now note that the mid level tree's 0th entry is greater than its + * reference in the root. + * + * +---------------+ + * | id = 90 | + * | 50 | 90 | 130 | + * +---------------+ + * | | | + * | + * +-----------+ + * | id = 160 | + * | 150 | 152 | + * +-----------+ + * | | + * | | + * | | + * +----------------+ +-----------------+ + * | BIN | | BIN | + * | id = 150 | | id=160 | + * | 140 |150 | 151 | | 152 | 153 | 160 | + * +----------------+ +-----------------+ + * + * Now split the mid level node, putting the new child on the left. + */ + for (int i = 154; i < 159; i++) { + assertEquals(OperationStatus.SUCCESS, + db.put(null, + new DatabaseEntry(new byte[]{(byte)i}), + new DatabaseEntry(new byte[] {1}))); + } + + /* + * This used to result in the following broken tree, which would + * cause us to not be able to retrieve record 140. With the new + * split code, entry "150" in the root should stay 130. + * + * +---------------------+ + * | id = 90 | + * | 50 | 90 | 150 | 154 | NOTE: we'v lost record 140 + * +---------------------+ + * | | | \ + * | \ + * +-----------+ +----------+ + * | id = 150 | |id=160 | + * | 150 | 152 | |154 | 156 | + * +-----------+ +----------+ + * | | + * | | + * | | + * +------------+ +-------+ + * | BIN | | BIN | + * | id = 150 | | id=152| + * | 140|150|151| |152|153| + * +------------+ +-------+ + */ + DatabaseEntry data = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + db.get(null, new DatabaseEntry(new byte[] + { (byte) 140 }), + data, LockMode.DEFAULT)); + close(); + + } catch (Throwable t) { + t.printStackTrace(); + throw new Exception(t); + } + } + + /** + * Tests a fix to a bug [#24917] where splits could fail with + * ArrayIndexOutOfBoundsException when reducing the fanout. It happens + * when a node is full before reducing the fanout, the full node is on the + * leftmost side of the Btree, and then a record is inserted in the + * leftmost position of the node (a key value lower than any other key). + * + * This was never noticed until BINs occasionally started growing beyond + * their max size due to TTL-related changes to compression. However, it + * could have also failed when the user reduced the fanout for an existing + * database, and that's the scenario that is tested here. + */ + @Test + public void testSplitOverSizedNode() { + + /* DB initially has a 156 fanout. */ + open(256); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[100]); + + /* Fill BIN with 256 records. */ + for (int i = 1000; i < 1256; i += 1) { + IntegerBinding.intToEntry(i, key); + assertNotNull(db.put(null, key, data, Put.NO_OVERWRITE, null)); + } + + /* + * Change DB fanout to 128. The extra open/close is needed because the + * first time the fanout is changed it isn't applied in cache, due to a + * bug. + */ + close(); + open(128); + close(); + open(128); + + /* + * Insert records at the beginning of the BIN. The first insertion will + * cause a split where the existing node holds only one record and the + * new sibling holds the other 255 entries. The bug was that the new + * sibling was sized according to the new fanout (128), and would not + * hold the 255 entries. + */ + for (int i = 999; i >= 0; i -= 1) { + IntegerBinding.intToEntry(i, key); + assertNotNull(db.put(null, key, data, Put.NO_OVERWRITE, null)); + } + + close(); + } +} diff --git a/test/com/sleepycat/je/tree/TreeTest.java b/test/com/sleepycat/je/tree/TreeTest.java new file mode 100644 index 0000000..4960353 --- /dev/null +++ b/test/com/sleepycat/je/tree/TreeTest.java @@ -0,0 +1,422 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.je.BtreeStats; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseStats; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.NullCursor; +import com.sleepycat.je.txn.BasicLocker; +import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.utilint.StringUtils; + +public class TreeTest extends TreeTestBase { + + public TreeTest() { + super(); + } + + /** + * Rudimentary insert/retrieve test. + */ + @Test + public void testSimpleTreeCreation() + throws DatabaseException { + initEnv(false); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn = BasicLocker. + createBasicLocker(DbInternal.getNonNullEnvImpl(env)); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + insertAndRetrieve(cursor, StringUtils.toUTF8("aaaaa"), + new LN(new byte[0])); + insertAndRetrieve(cursor, StringUtils.toUTF8("aaaab"), + new LN(new byte[0])); + insertAndRetrieve(cursor, StringUtils.toUTF8("aaaa"), + new LN(new byte[0])); + insertAndRetrieve(cursor, StringUtils.toUTF8("aaa"), + new LN(new byte[0])); + txn.operationEnd(); + } + + /** + * Slightly less rudimentary test inserting a handfull of keys and LN's. + */ + @Test + public void testMultipleInsertRetrieve0() + throws DatabaseException { + + /* + * Set the seed to reproduce a specific problem found while debugging: + * IN.split was splitting with the identifier key being on the right + * side. + */ + initEnv(false); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + for (int i = 0; i < 21; i++) { + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + insertAndRetrieve(cursor, key, new LN(new byte[0])); + } + txn.operationEnd(); + } + + /** + * Insert a bunch of keys and test that they retrieve back ok. While we + * insert, maintain the highest and lowest keys inserted. Verify that + * getFirstNode and getLastNode return those two entries. Lather, rinse, + * repeat. + */ + @Test + public void testMultipleInsertRetrieve1() + throws DatabaseException { + + initEnv(false); + doMultipleInsertRetrieve1(); + } + + /** + * Helper routine for above. + */ + private void doMultipleInsertRetrieve1() + throws DatabaseException { + + byte[][] keys = new byte[N_KEYS][]; + LN[] lns = new LN[N_KEYS]; + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + + for (int i = 0; i < N_KEYS; i++) { + byte[] key = new byte[N_KEY_BYTES]; + keys[i] = key; + lns[i] = new LN(new byte[0]); + TestUtils.generateRandomAlphaBytes(key); + insertAndRetrieve(cursor, key, lns[i]); + } + + for (int i = 0; i < N_KEYS; i++) { + LN foundLN = retrieveLN(keys[i]); + assertTrue(foundLN == lns[i] || foundLN.logicalEquals(lns[i])); + } + + TestUtils.checkLatchCount(); + IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT); + + assertTrue(leftMostNode instanceof BIN); + BIN lmn = (BIN) leftMostNode; + lmn.releaseLatch(); + TestUtils.checkLatchCount(); + assertTrue(Key.compareKeys(lmn.getKey(0), minKey, null) == 0); + + TestUtils.checkLatchCount(); + IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT); + + assertTrue(rightMostNode instanceof BIN); + BIN rmn = (BIN) rightMostNode; + rmn.releaseLatch(); + TestUtils.checkLatchCount(); + assertTrue(Key.compareKeys + (rmn.getKey(rmn.getNEntries() - 1), maxKey, null) == 0); + assertTrue(tree.getTreeStats() > 1); + + txn.operationEnd(); + } + + /** + * Create a tree. After creation, walk the bins forwards using getNextBin + * counting the keys and validating that the keys are being returned in + * ascending order. Ensure that the correct number of keys were returned. + */ + @Test + public void testCountAndValidateKeys() + throws DatabaseException { + + initEnv(false); + doCountAndValidateKeys(); + } + + /** + * Helper routine for above test. + */ + private void doCountAndValidateKeys() + throws DatabaseException { + byte[][] keys = new byte[N_KEYS][]; + LN[] lns = new LN[N_KEYS]; + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + + for (int i = 0; i < N_KEYS; i++) { + byte[] key = new byte[N_KEY_BYTES]; + keys[i] = key; + lns[i] = new LN(new byte[0]); + TestUtils.generateRandomAlphaBytes(key); + insertAndRetrieve(cursor, key, lns[i]); + } + assertTrue(countAndValidateKeys(tree) == N_KEYS); + txn.operationEnd(); + } + + /** + * Create a tree. After creation, walk the bins backwards using getPrevBin + * counting the keys and validating that the keys are being returned in + * descending order. Ensure that the correct number of keys were returned. + */ + @Test + public void testCountAndValidateKeysBackwards() + throws DatabaseException { + + initEnv(false); + doCountAndValidateKeysBackwards(); + } + + /** + * Helper routine for above test. + */ + public void doCountAndValidateKeysBackwards() + throws DatabaseException { + + byte[][] keys = new byte[N_KEYS][]; + LN[] lns = new LN[N_KEYS]; + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + for (int i = 0; i < N_KEYS; i++) { + byte[] key = new byte[N_KEY_BYTES]; + keys[i] = key; + lns[i] = new LN(new byte[0]); + TestUtils.generateRandomAlphaBytes(key); + insertAndRetrieve(cursor, key, lns[i]); + } + assertTrue(countAndValidateKeysBackwards(tree) == N_KEYS); + txn.operationEnd(); + } + + @Test + public void testAscendingInsertBalance() + throws DatabaseException { + + initEnv(false); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + + /* Fill up a db with data */ + for (int i = 0; i < N_KEYS; i++) { + byte[] keyBytes = new byte[4]; + TestUtils.putUnsignedInt(keyBytes, TestUtils.alphaKey(i)); + insertAndRetrieve(cursor, keyBytes, + new LN(new byte[0])); + } + + TestUtils.checkLatchCount(); + + /* Count the number of levels on the left. */ + IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT); + assertTrue(leftMostNode instanceof BIN); + int leftSideLevels = 0; + do { + SearchResult result = tree.getParentINForChildIN( + leftMostNode, false, /*useTargetLevel*/ + true, /*doFetch*/ CacheMode.DEFAULT); + + leftMostNode = result.parent; + leftSideLevels++; + + if (leftMostNode != null && leftMostNode.isRoot()) { + leftMostNode.releaseLatch(); + break; + } + + } while (leftMostNode != null); + + TestUtils.checkLatchCount(); + + /* Count the number of levels on the right. */ + IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT); + assertTrue(rightMostNode instanceof BIN); + int rightSideLevels = 0; + do { + SearchResult result = tree.getParentINForChildIN( + rightMostNode, false, /*useTargetLevel*/ + true, /*doFetch*/ CacheMode.DEFAULT); + + rightMostNode = result.parent; + rightSideLevels++; + + if (rightMostNode != null && rightMostNode.isRoot()) { + rightMostNode.releaseLatch(); + break; + } + + } while (rightMostNode != null); + + TestUtils.checkLatchCount(); + + if (leftSideLevels > 10 || + rightSideLevels > 10) { + fail("Levels too high (" + + leftSideLevels + + "/" + + rightSideLevels + + ") on descending insert"); + } + txn.operationEnd(); + } + + @Test + public void testDescendingInsertBalance() + throws DatabaseException { + initEnv(false); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + + for (int i = N_KEYS; i >= 0; --i) { + byte[] keyBytes = new byte[4]; + TestUtils.putUnsignedInt(keyBytes, TestUtils.alphaKey(i)); + insertAndRetrieve(cursor, keyBytes, + new LN(new byte[0])); + } + + TestUtils.checkLatchCount(); + IN leftMostNode = tree.getFirstNode(CacheMode.DEFAULT); + + assertTrue(leftMostNode instanceof BIN); + int leftSideLevels = 0; + do { + SearchResult result = tree.getParentINForChildIN( + leftMostNode, false, /*useTargetLevel*/ + true, CacheMode.DEFAULT); + + leftMostNode = result.parent; + leftSideLevels++; + + if (leftMostNode != null && leftMostNode.isRoot()) { + leftMostNode.releaseLatch(); + break; + } + } while (leftMostNode != null); + + TestUtils.checkLatchCount(); + + IN rightMostNode = tree.getLastNode(CacheMode.DEFAULT); + + assertTrue(rightMostNode instanceof BIN); + int rightSideLevels = 0; + do { + SearchResult result = tree.getParentINForChildIN( + rightMostNode, false, /*useTargetLevel*/ + true, CacheMode.DEFAULT); + + rightMostNode = result.parent; + rightSideLevels++; + + if (rightMostNode != null && rightMostNode.isRoot()) { + rightMostNode.releaseLatch(); + break; + } + } while (rightMostNode != null); + + TestUtils.checkLatchCount(); + + if (leftSideLevels > 10 || + rightSideLevels > 10) { + fail("Levels too high (" + + leftSideLevels + + "/" + + rightSideLevels + + ") on descending insert"); + } + txn.operationEnd(); + } + + /** + * Insert a bunch of keys. Call verify and validate the results. + */ + @Test + public void testVerify() + throws DatabaseException { + + initEnv(false); + byte[][] keys = new byte[N_KEYS][]; + LN[] lns = new LN[N_KEYS]; + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Locker txn = BasicLocker.createBasicLocker(envImpl); + NullCursor cursor = new NullCursor(tree.getDatabase(), txn); + + for (int i = 0; i < N_KEYS; i++) { + byte[] key = new byte[N_KEY_BYTES]; + keys[i] = key; + lns[i] = new LN((byte[]) new byte[1]); + TestUtils.generateRandomAlphaBytes(key); + insertAndRetrieve(cursor, key, lns[i]); + } + + /* + * Note that verify will attempt to continue past errors, so + * assertTrue on the status return. + */ + assertTrue(env.verify(new VerifyConfig(), System.err)); + DatabaseStats stats = db.verify(new VerifyConfig()); + BtreeStats btStats = (BtreeStats) stats; + + assertTrue(btStats.getInternalNodeCount() < + btStats.getBottomInternalNodeCount()); + assertTrue(btStats.getBottomInternalNodeCount() < + btStats.getLeafNodeCount() + + btStats.getDeletedLeafNodeCount()); + assertTrue(btStats.getLeafNodeCount() + + btStats.getDeletedLeafNodeCount() == + N_KEYS); + txn.operationEnd(); + + /* Now intentionally create LogFileNotFoundExceptions */ + /* + db.close(); + env.close(); + + This is disabled until the method for flipping files is + introduced. It's too hard to create a LogFileNotFoundException + by brute force deleting a file; often recovery doesn't work. + Instead, use a flipped file later on. + + String[] jeFiles = + FileManager.listFiles(envHome, + new String[] {FileManager.JE_SUFFIX}); + int targetIdx = jeFiles.length / 2; + assertTrue(targetIdx > 0); + File targetFile = new File(envHome, jeFiles[targetIdx]); + assertTrue(targetFile.delete()); + + initEnv(false); + assertFalse(env.verify(new VerifyConfig(), System.err)); + */ + } +} diff --git a/test/com/sleepycat/je/tree/TreeTestBase.java b/test/com/sleepycat/je/tree/TreeTestBase.java new file mode 100644 index 0000000..3850d76 --- /dev/null +++ b/test/com/sleepycat/je/tree/TreeTestBase.java @@ -0,0 +1,232 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.NullCursor; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class TreeTestBase extends TestBase { + static protected final boolean DEBUG = true; + + static protected int N_KEY_BYTES = 10; + static protected int N_ITERS = 1; + static protected int N_KEYS = 10000; + static protected int MAX_ENTRIES_PER_NODE = 6; + + protected Tree tree = null; + protected byte[] minKey = null; + protected byte[] maxKey = null; + protected Database db = null; + protected Environment env = null; + protected File envHome = null; + + public TreeTestBase() { + envHome = SharedTestUtils.getTestDir(); + } + + void initEnv(boolean duplicatesAllowed) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + Integer.toString(MAX_ENTRIES_PER_NODE)); + envConfig.setAllowCreate(true); + envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicatesAllowed); + db = env.openDatabase(null, "foo", dbConfig); + + tree = DbInternal.getDbImpl(db).getTree(); + minKey = null; + maxKey = null; + } + + @After + public void tearDown() + throws Exception { + + db.close(); + if (env != null) { + env.close(); + } + env = null; + db = null; + tree = null; + minKey = null; + maxKey = null; + } + + protected IN makeDupIN(IN old) { + + IN ret = new IN( + DbInternal.getDbImpl(db), + old.getIdentifierKey(), MAX_ENTRIES_PER_NODE, 2); + + ret.setNodeId(old.getNodeId()); + ret.setIsRoot(old.isRoot()); + + for (int i = 0; i < old.getNEntries(); i++) { + ret.appendEntryFromOtherNode(old, i); + } + + return ret; + } + + /** + * Helper routine to insert a key and immediately read it back. + */ + protected void insertAndRetrieve(NullCursor cursor, byte[] key, LN ln) + throws DatabaseException { + + if (minKey == null) { + minKey = key; + } else if (Key.compareKeys(key, minKey, null) < 0) { + minKey = key; + } + + if (maxKey == null) { + maxKey = key; + } else if (Key.compareKeys(maxKey, key, null) < 0) { + maxKey = key; + } + + cursor.reset(); + + TestUtils.checkLatchCount(); + + assertTrue(cursor.insertRecord( + key, ln, false, ReplicationContext.NO_REPLICATE)); + + TestUtils.checkLatchCount(); + + LN foundLN = retrieveLN(key); + + assertTrue(foundLN == ln || foundLN.logicalEquals(ln)); + } + + /** + * Helper routine to read the LN referred to by key. + */ + protected LN retrieveLN(byte[] key) + throws DatabaseException { + + TestUtils.checkLatchCount(); + IN n = tree.search(key, Tree.SearchType.NORMAL, null, + CacheMode.DEFAULT, null /*keyComparator*/); + if (!(n instanceof BIN)) { + fail("search didn't return a BIN for key: " + key); + } + BIN bin = (BIN) n; + try { + int index = bin.findEntry(key, false, true); + if (index == -1) { + fail("Didn't read back key: " + key); + } else { + Node node = bin.getTarget(index); + if (node instanceof LN) { + return (LN) node; + } else if (bin.isEmbeddedLN(index)) { + return new LN(bin.getData(index)); + } else { + fail("Didn't read back LN for: " + key); + } + } + return null; + } finally { + bin.releaseLatch(); + TestUtils.checkLatchCount(); + } + } + + /** + * Using getNextBin, count all the keys in the database. Ensure that + * they're returned in ascending order. + */ + protected int countAndValidateKeys(Tree tree) + throws DatabaseException { + + TestUtils.checkLatchCount(); + BIN nextBin = (BIN) tree.getFirstNode(CacheMode.DEFAULT); + byte[] prevKey = { 0x00 }; + + int cnt = 0; + + while (nextBin != null) { + for (int i = 0; i < nextBin.getNEntries(); i++) { + byte[] curKey = nextBin.getKey(i); + if (Key.compareKeys(curKey, prevKey, null) <= 0) { + throw new RuntimeException + ("keys are out of order"); + } + cnt++; + prevKey = curKey; + } + nextBin = tree.getNextBin(nextBin, CacheMode.DEFAULT); + } + TestUtils.checkLatchCount(); + return cnt; + } + + /** + * Using getPrevBin, count all the keys in the database. Ensure that + * they're returned in descending order. + */ + protected int countAndValidateKeysBackwards(Tree tree) + throws DatabaseException { + + TestUtils.checkLatchCount(); + BIN nextBin = (BIN) tree.getLastNode(CacheMode.DEFAULT); + byte[] prevKey = null; + + int cnt = 0; + + while (nextBin != null) { + for (int i = nextBin.getNEntries() - 1; i >= 0; i--) { + byte[] curKey = nextBin.getKey(i); + if (prevKey != null && + Key.compareKeys(prevKey, curKey, null) <= 0) { + throw new RuntimeException + ("keys are out of order"); + } + cnt++; + prevKey = curKey; + } + nextBin = tree.getPrevBin(nextBin, CacheMode.DEFAULT); + } + return cnt; + } +} diff --git a/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java b/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java new file mode 100644 index 0000000..192da54 --- /dev/null +++ b/test/com/sleepycat/je/tree/ValidateSubtreeDeleteTest.java @@ -0,0 +1,172 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.tree; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class ValidateSubtreeDeleteTest extends TestBase { + + private final File envHome; + private Environment env; + private Database testDb; + + public ValidateSubtreeDeleteTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), + "false"); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + testDb = env.openDatabase(null, "Test", dbConfig); + } + + @After + public void tearDown() + throws Exception { + + testDb.close(); + if (env != null) { + try { + env.close(); + } catch (DatabaseException E) { + } + } + } + + @Test + public void testBasic() + throws Exception { + try { + /* Make a 3 level tree full of data */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + byte[] testData = new byte[1]; + testData[0] = 1; + data.setData(testData); + + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < 15; i ++) { + key.setData(TestUtils.getTestArray(i)); + testDb.put(txn, key, data); + } + + /* Should not be able to delete any of it */ + assertFalse(DbInternal.getDbImpl(testDb).getTree().validateDelete(0)); + assertFalse(DbInternal.getDbImpl(testDb).getTree().validateDelete(1)); + + /* + * Should be able to delete both, the txn is aborted and the data + * isn't there. + */ + txn.abort(); + assertTrue(DbInternal.getDbImpl(testDb).getTree().validateDelete(0)); + assertTrue(DbInternal.getDbImpl(testDb).getTree().validateDelete(1)); + + /* + * Try explicit deletes. + */ + txn = env.beginTransaction(null, null); + for (int i = 0; i < 15; i ++) { + key.setData(TestUtils.getTestArray(i)); + testDb.put(txn, key, data); + } + for (int i = 0; i < 15; i ++) { + key.setData(TestUtils.getTestArray(i)); + testDb.delete(txn, key); + } + assertFalse(DbInternal.getDbImpl(testDb).getTree().validateDelete(0)); + assertFalse(DbInternal.getDbImpl(testDb).getTree().validateDelete(1)); + + // XXX, now commit the delete and compress and test that the + // subtree is deletable. Not finished yet! Also must test deletes. + txn.abort(); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + + @Test + public void testDuplicates() + throws Exception { + try { + /* Make a 3 level tree full of data */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + byte[] testData = new byte[1]; + testData[0] = 1; + key.setData(testData); + + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < 4; i ++) { + data.setData(TestUtils.getTestArray(i)); + testDb.put(txn, key, data); + } + + /* Should not be able to delete any of it */ + Tree tree = DbInternal.getDbImpl(testDb).getTree(); + assertFalse(tree.validateDelete(0)); + + /* + * Should be able to delete, the txn is aborted and the data + * isn't there. + */ + txn.abort(); + assertTrue(tree.validateDelete(0)); + + /* + * Try explicit deletes. + */ + } catch (Exception e) { + e.printStackTrace(); + throw e; + } + } + +} diff --git a/test/com/sleepycat/je/tree/je-4.1.7_logWithDIN.jdb b/test/com/sleepycat/je/tree/je-4.1.7_logWithDIN.jdb new file mode 100644 index 0000000..d62900a Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_logWithDIN.jdb differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNCommit.jdb b/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNCommit.jdb new file mode 100644 index 0000000..5b30cfc Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNCommit.jdb differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNNoCommit.jdb b/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNNoCommit.jdb new file mode 100644 index 0000000..122dd77 Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_logWithDeletedLNNoCommit.jdb differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_logWithMixIN.jdb b/test/com/sleepycat/je/tree/je-4.1.7_logWithMixIN.jdb new file mode 100644 index 0000000..404143a Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_logWithMixIN.jdb differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_logWithSingletonLN.jdb b/test/com/sleepycat/je/tree/je-4.1.7_logWithSingletonLN.jdb new file mode 100644 index 0000000..8514684 Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_logWithSingletonLN.jdb differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_deltas b/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_deltas new file mode 100644 index 0000000..ca9d755 Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_deltas differ diff --git a/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_dups b/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_dups new file mode 100644 index 0000000..ec20073 Binary files /dev/null and b/test/com/sleepycat/je/tree/je-4.1.7_noPreUpgrade_dups differ diff --git a/test/com/sleepycat/je/trigger/ConfigTest.java b/test/com/sleepycat/je/trigger/ConfigTest.java new file mode 100644 index 0000000..150c710 --- /dev/null +++ b/test/com/sleepycat/je/trigger/ConfigTest.java @@ -0,0 +1,75 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; + +import java.util.Arrays; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.rep.dual.trigger.InvokeTest; + +public class ConfigTest extends TestBase { + + @Test + public void testConflictingTypes() { + DatabaseConfig dc = new DatabaseConfig(); + try { + dc.setTriggers(Arrays.asList((Trigger) new DBT("t1"), + (Trigger) new InvokeTest.RDBT("t2"))); + fail("IAE expected"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + + @Test + public void testConflictingNames() { + DatabaseConfig dc = new DatabaseConfig(); + try { + dc.setTriggers(Arrays.asList((Trigger) new DBT("t1"), + (Trigger) new DBT("t1"))); + fail("IAE expected"); + } catch (IllegalArgumentException iae) { + // Expected + } + } + + @Test + public void testSecondaryConfig() { + SecondaryConfig sc = new SecondaryConfig(); + + try { + sc.setTriggers(Arrays.asList((Trigger) new DBT("t1"), + (Trigger) new DBT("t2"))); + fail("IAE expected"); + } catch (IllegalArgumentException iae) { + // Expected + } + + try { + sc.setOverrideTriggers(true); + fail("IAE expected"); + } catch (IllegalArgumentException iae) { + // Expected + } + + assertEquals(0,sc.getTriggers().size()); + assertFalse(sc.getOverrideTriggers()); + } +} diff --git a/test/com/sleepycat/je/trigger/InvokeTest.java b/test/com/sleepycat/je/trigger/InvokeTest.java new file mode 100644 index 0000000..af72405 --- /dev/null +++ b/test/com/sleepycat/je/trigger/InvokeTest.java @@ -0,0 +1,660 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.TriggerManager.MapOver; + +/** + * This set of unit tests exercises all standalone trigger invocations. + */ + +public class InvokeTest extends TestBase { + + Environment env; + Database db1 = null; + int triggerCount = -1; + /* The number of nodes. it's > 1 if this is a replicated environment. */ + protected int nNodes = 1; + + protected List getTriggers() { + return new LinkedList(Arrays.asList((Trigger) new DBT("t1"), + (Trigger) new DBT("t2"))); + } + + protected List getTransientTriggers() { + return new LinkedList(Arrays.asList((Trigger) new TDBT("tt1"), + (Trigger) new TDBT("tt2"))); + } + + protected List getTriggersPlusOne() { + List triggers = getTriggers(); + triggers.add(new InvokeTest.DBT("t3")); + return triggers; + } + + protected TransactionConfig getTransactionConfig() { + return null; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + List triggers = getTriggers(); + triggerCount = triggers.size(); + dbConfig.setTriggers(triggers); + + dbConfig.setOverrideTriggers(true); + + env = create(envRoot, envConfig); + Transaction transaction = + env.beginTransaction(null, getTransactionConfig()); + db1 = env.openDatabase(transaction, "db1", dbConfig); + transaction.commit(); + dbConfig.setOverrideTriggers(false); + resetTriggers(); + } + + @After + public void tearDown() + throws Exception { + + db1.close(); + close(env); + super.tearDown(); + } + + @Test + public void testAddRemoveTriggerExistindDbTrans() { + Transaction transaction = + env.beginTransaction(null, getTransactionConfig()); + addRemoveTriggerExistingDb(transaction); + transaction.commit(); + } + + @Test + public void testAddRemoveTriggerExistindDbAuto() { + addRemoveTriggerExistingDb(null); + } + + void addRemoveTriggerExistingDb(Transaction transaction) { + db1.close(); + resetTriggers(); + + /* read/write open. */ + db1 = env.openDatabase(transaction, "db1", dbConfig); + verifyAddTrigger(0); + verifyRemoveTrigger(0); + checkNullOpenTriggerCount(1); + db1.close(); + resetTriggers(); + + dbConfig.setOverrideTriggers(true); + dbConfig.setTriggers(getTriggersPlusOne()); + db1 = env.openDatabase(transaction, "db1", dbConfig); + DatabaseImpl db1Impl = DbInternal.getDbImpl(db1); + DBT t3 = (DBT) db1Impl.getTriggers().get(2); + assertEquals("t3", t3.getName()); + assertEquals(1, t3.ts.nAddTrigger); + assertEquals(0, t3.ts.nRemoveTrigger); + db1.close(); + resetTriggers(); + + dbConfig.setTriggers(getTriggers()); + db1 = env.openDatabase(transaction, "db1", dbConfig); + db1Impl = DbInternal.getDbImpl(db1); + assertEquals("t3", t3.getName()); + assertEquals(0, t3.ts.nAddTrigger); + assertEquals(1, t3.ts.nRemoveTrigger); + } + + /** + * Simply verifies that transient triggers are indeed transient, i.e., not + * stored in the DatabaseImpl. Also checks that a transient trigger can be + * added when setOverrideTriggers(true) is not called. + */ + @Test + public void testBasicTransientTrigger() { + + /* Set two transient triggers in new DB. */ + List tList = getTransientTriggers(); + DatabaseConfig tdbConfig = getDBConfig(); + tdbConfig.setOverrideTriggers(false); + tdbConfig.setTriggers(tList); + Database dbc = env.openDatabase(null, "dbc", tdbConfig); + assertEquals(tList, dbc.getConfig().getTriggers()); + for (Trigger trigger : dbc.getConfig().getTriggers()) { + assertEquals("dbc", trigger.getDatabaseName()); + } + verifyAddTrigger(1); + checkTransientTriggerCount(1); + resetTriggers(); + + /* Test simple transctional put(). */ + Transaction transaction = env.beginTransaction(null, null); + DatabaseEntry key = new DatabaseEntry(new byte[] {1}); + DatabaseEntry data = new DatabaseEntry(new byte[] {2}); + dbc.put(transaction, key, data); + verifyPut(1, key, data, null); + transaction.commit(); + verifyCommit(1); + checkTransientTriggerCount(1); + resetTriggers(); + + /* Close DB and reopen -- there should be no triggers. */ + dbc.close(); + verifyRemoveTrigger(1); + checkTransientTriggerCount(1); + resetTriggers(); + tdbConfig = getDBConfig(); + tdbConfig.setOverrideTriggers(false); + tdbConfig.setAllowCreate(false); + dbc = env.openDatabase(null, "dbc", tdbConfig); + assertNull(dbc.getConfig().getTriggers()); + verifyAddTrigger(0); + dbc.close(); + verifyRemoveTrigger(0); + checkTransientTriggerCount(0); + + /* Remove DB and recreate -- there should be no triggers. */ + env.removeDatabase(null, "dbc"); + tdbConfig = getDBConfig(); + tdbConfig.setOverrideTriggers(false); + dbc = env.openDatabase(null, "dbc", tdbConfig); + assertNull(dbc.getConfig().getTriggers()); + verifyAddTrigger(0); + dbc.close(); + verifyRemoveTrigger(0); + checkTransientTriggerCount(0); + + /* Add triggers to existing DB without overriding config. */ + tdbConfig = getDBConfig(); + tdbConfig.setOverrideTriggers(false); + tdbConfig.setTriggers(tList); + tdbConfig.setAllowCreate(false); + dbc = env.openDatabase(null, "dbc", tdbConfig); + assertEquals(tList, dbc.getConfig().getTriggers()); + for (Trigger trigger : dbc.getConfig().getTriggers()) { + assertEquals("dbc", trigger.getDatabaseName()); + } + verifyAddTrigger(1); + dbc.close(); + verifyRemoveTrigger(1); + checkTransientTriggerCount(1); + resetTriggers(); + + /* Clean up. */ + env.removeDatabase(null, "dbc"); + } + + private void create(Transaction transaction) { + List tgs = getTriggers(); + DatabaseConfig tdbConfig = getDBConfig(); + dbConfig.setOverrideTriggers(true); + tdbConfig.setTriggers(tgs); + verifyOpen(0, 0); + Database dbc = env.openDatabase(transaction, "dbc", tdbConfig ); + + for (Trigger trigger : dbc.getConfig().getTriggers()) { + assertEquals("dbc", trigger.getDatabaseName()); + } + verifyOpen(1, 1); + verifyAddTrigger(1); + + if (transaction == null) { + verifyCommit(1); + } + dbc.close(); + verifyClose(1); + /* Read triggers from the existing database. */ + checkTriggerCount(1); + resetTriggers(); + dbc = env.openDatabase(transaction, "dbc", dbConfig); + verifyOpen(0,1); + verifyAddTrigger(0); + /* Not a new database no create and consequently commit triggers. */ + verifyCommit(0); + checkNullOpenTriggerCount(1); + assertEquals(2, db1.getConfig().getTriggers().size()); + dbc.close(); + env.removeDatabase(transaction, "dbc"); + } + + @Test + public void testCreateAuto() { + create(null); + } + + @Test + public void testCreateTrans() { + Transaction transaction = + env.beginTransaction(null, getTransactionConfig()); + create(transaction); + transaction.commit(); + } + + @Test + public void testOpenAuto() { + open(null); + } + + @Test + public void testOpenTrans() { + Transaction transaction = + env.beginTransaction(null, getTransactionConfig()); + open(transaction); + transaction.commit(); + } + + private void open(Transaction transaction) { + db1.close(); + resetTriggers(); + /* read/write open. */ + db1 = env.openDatabase(transaction, "db1", dbConfig); + verifyOpen(0,1); + checkNullOpenTriggerCount(1); + db1.close(); + resetTriggers(); + + DatabaseConfig config = getDBConfig(); + config.setReadOnly(true); + db1 = env.openDatabase(transaction, "db1", config); + verifyOpen(0,0); + checkTriggerCount(0); + resetTriggers(); + + config.setReadOnly(false); + Database db11 = env.openDatabase(transaction, "db1", config); + verifyOpen(0,1); + checkNullOpenTriggerCount(1); + db11.close(); + resetTriggers(); + } + + @Test + public void testClose() { + closeDb(); + } + + private void closeDb() { + resetTriggers(); + db1.close(); + verifyClose(1); + checkNullOpenTriggerCount(1); + } + + private void rename(Transaction transaction) { + db1.close(); + env.renameDatabase(transaction, "db1", "dbr1"); + verifyRename("dbr1", 1); + checkTriggerCount(1); + } + + @Test + public void testRenameAuto() { + rename(null); + } + + @Test + public void testRenameTrans() { + Transaction transaction = env.beginTransaction(null, null); + rename(transaction); + checkTriggerCount(1); + transaction.commit(); + } + + @Test + public void testRenameAbort() { + Transaction transaction = env.beginTransaction(null, null); + rename(transaction); + checkTriggerCount(1); + transaction.abort(); + db1 = env.openDatabase(null, "db1", dbConfig); + verifyDB1Triggers(); + verifyAbort(1); + } + + private void truncate(Transaction transaction) { + db1.close(); + env.truncateDatabase(transaction, "db1", false); + verifyTruncate(1); + checkTriggerCount(1); + } + + @Test + public void testTruncateAuto() { + truncate(null); + } + + @Test + public void testTruncateTrans() { + Transaction transaction = env.beginTransaction(null, null); + truncate(transaction); + transaction.commit(); + + /* + * Truncate does a rename under the covers so make sure the triggers + * are present on the new empty database. + */ + db1 = env.openDatabase(null, "db1", dbConfig); + verifyDB1Triggers(); + } + + private void verifyDB1Triggers() { + assertEquals("db1", db1.getDatabaseName()); + assertEquals(triggerCount, db1.getConfig().getTriggers().size()); + for (Trigger t : db1.getConfig().getTriggers()) { + assertEquals("db1", t.getDatabaseName()); + } + } + + private void remove(Transaction transaction) { + db1.close(); + env.removeDatabase(transaction, "db1"); + verifyRemove(1); + verifyRemoveTrigger(1); + checkTriggerCount(1); + } + + @Test + public void testRemoveAuto() { + remove(null); + } + + @Test + public void testRemoveTrans() { + Transaction transaction = env.beginTransaction(null, null); + remove(transaction); + transaction.commit(); + } + + @Test + public void testKVOpsAuto() { + KVOps(null); + } + + @Test + public void testKVOpsTrans() { + Transaction transaction = env.beginTransaction(null, null); + KVOps(transaction); + transaction.commit(); + } + + @Test + public void testKVOpsAbort() { + Transaction transaction = env.beginTransaction(null, null); + KVOps(transaction); + transaction.abort(); + verifyAbort(1); + } + + private void KVOps(Transaction transaction) { + + DatabaseEntry key = new DatabaseEntry(); + key.setData(new byte[]{1}); + DatabaseEntry data1 = new DatabaseEntry(); + data1.setData(new byte[]{2}); + verifyPut(0, null, null, null); + + db1.put(transaction, key, data1); + verifyPut(1, key, data1, null); + checkTriggerCount(1); + resetTriggers(); + DatabaseEntry data2 = new DatabaseEntry(); + data2.setData(new byte[]{3}); + db1.put(transaction, key, data2); + verifyPut(1, key, data2, data1); + checkTriggerCount(1); + resetTriggers(); + + OperationStatus status = db1.delete(transaction, key); + assertEquals(OperationStatus.SUCCESS, status); + verifyDelete(1, key, data2); + checkTriggerCount(1); + resetTriggers(); + + status = db1.delete(transaction, key); + assertEquals(OperationStatus.NOTFOUND, status); + verifyDelete(0, null, null); + checkTriggerCount(0); + + db1.close(); + } + + /** + * Ensure recovery replay of MapLNs executes properly. This tests a fix + * for an NPE that occurred during DatabaseImpl.readFromLog, which was + * calling DatabaseImpl.getName prior to instantiation of DbTree. + */ + @Test + public void testBasicRecovery() { + db1.close(); + close(env); + resetTriggers(); + env = create(envRoot, envConfig); + db1 = env.openDatabase(null, "db1", dbConfig); + checkNullOpenTriggerCount(1); + verifyAddTrigger(0); + verifyRemoveTrigger(0); + } + + private void resetTriggers() { + for (Trigger t : TestBase.invokedTriggers) { + ((TDBT)t).clear(); + } + TestBase.invokedTriggers.clear(); + } + + protected void verifyDelete(final int nDelete, + final DatabaseEntry key, + final DatabaseEntry oldData) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + final TDBT dbt = (TDBT)e; + assertEquals(nDelete, dbt.ts.nDelete); + assertEquals(key, dbt.ts.key); + assertEquals(oldData, dbt.ts.oldData); + dbt.ts.nDelete = 0; + return e; + } + }.run(); + } + + protected void verifyPut(final int nPut, + final DatabaseEntry key, + final DatabaseEntry newData, + final DatabaseEntry oldData) {; + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + final TDBT dbt = (TDBT)e; + assertEquals(nPut, dbt.ts.nPut); + dbt.ts.nPut = 0; + assertEquals(key, dbt.ts.key); + assertEquals(newData, dbt.ts.newData); + assertEquals(oldData, dbt.ts.oldData); + return e; + } + }.run(); + } + + protected void verifyOpen(final int nCreate, final int nOpen) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nOpen, ((TDBT)e).ts.nOpen); + assertEquals(nCreate, ((TDBT)e).ts.nCreate); + ((TDBT)e).ts.nCreate = 0; + ((TDBT)e).ts.nOpen = 0; + return e; + } + }.run(); + } + + protected void verifyClose(final int nClose) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nClose, ((TDBT)e).ts.nClose); + ((TDBT)e).ts.nClose = 0; + return e; + } + }.run(); + } + + /* The triggers should have been executed on all nodes. */ + protected void checkTriggerCount(final int count) { + assertEquals((count * nNodes * triggerCount), + TestBase.invokedTriggers.size()); + } + + /* Transient triggers are only executed on the node they're configured. */ + protected void checkTransientTriggerCount(final int count) { + assertEquals((count * triggerCount), + TestBase.invokedTriggers.size()); + } + + /* + * Null open triggers, ones where the db is opened for writes, + * but no writes are actually done, will not fire on replica nodes. + */ + protected void checkNullOpenTriggerCount(final int count) { + assertEquals((count * triggerCount), + TestBase.invokedTriggers.size()); + } + + protected void verifyRemove(final int nRemove) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nRemove, ((DBT)e).ts.nRemove); + ((DBT)e).ts.nRemove = 0; + return e; + } + }.run(); + } + + protected void verifyTruncate(final int nTruncate) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nTruncate, ((DBT)e).ts.nTruncate); + ((DBT)e).ts.nTruncate = 0; + return e; + } + }.run(); + } + + protected void verifyRename(final String newName, + final int nRename) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + final DBT dbt = (DBT)e; + assertEquals(nRename, dbt.ts.nRename); + newName.equals(dbt.ts.newName); + dbt.ts.nRename = 0; + assertEquals(newName, dbt.getDatabaseName()); + return e; + } + }.run(); + } + + protected void verifyCommit(final int nCommit) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nCommit, ((TDBT)e).ts.nCommit); + ((TDBT)e).ts.nCommit = 0; + return e; + } + }.run(); + } + + protected void verifyAbort(final int nAbort) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nAbort, ((TDBT)e).ts.nAbort); + ((TDBT)e).ts.nAbort = 0; + return e; + } + }.run(); + } + + protected void verifyRemoveTrigger(final int nRemoveTrigger) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nRemoveTrigger, ((TDBT)e).ts.nRemoveTrigger); + ((TDBT)e).ts.nRemoveTrigger = 0; + return e; + } + }.run(); + } + + protected void verifyAddTrigger(final int nAddTrigger) { + + new MapOver(TestBase.invokedTriggers) { + + @Override + protected Trigger fun(Trigger e) { + assertEquals(nAddTrigger, ((TDBT)e).ts.nAddTrigger); + ((TDBT)e).ts.nAddTrigger = 0; + return e; + } + }.run(); + } +} diff --git a/test/com/sleepycat/je/trigger/TestBase.java b/test/com/sleepycat/je/trigger/TestBase.java new file mode 100644 index 0000000..d62e263 --- /dev/null +++ b/test/com/sleepycat/je/trigger/TestBase.java @@ -0,0 +1,223 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.trigger; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.Serializable; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import org.junit.Before; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.util.test.SharedTestUtils; + +public class TestBase extends DualTestCase { + + static class TestState { + + Transaction transaction = null; + DatabaseEntry key = null; + DatabaseEntry oldData = null; + DatabaseEntry newData = null; + + String newName = null; + + int nAddTrigger = 0; + int nRemoveTrigger = 0; + + int nCreate = 0; + int nClose = 0; + int nOpen = 0; + int nRemove = 0; + int nTruncate = 0; + int nRename = 0; + + int nPut = 0; + int nDelete = 0; + + int nCommit = 0; + int nAbort = 0; + } + + /* + * Synchronized since multiple replicas may insert entries at the same time. + */ + public static Set invokedTriggers = + Collections.synchronizedSet(new HashSet()); + + /** + * Transient DBT class. Does not implement PersistentTrigger, for minimal + * testing of transient triggers, but must implement Serializable since it + * is the superclass of a serializable class (DBT). + */ + public static class TDBT + implements Trigger, TransactionTrigger, Serializable { + + transient TestState ts = new TestState(); + + private static final long serialVersionUID = 1L; + final String name; + transient String databaseName = null; + + public TDBT(String name) { + super(); + this.name = name; + } + + public Trigger setDatabaseName(String databaseName) { + this.databaseName = databaseName; + if (ts == null) { + ts = new TestState(); + } + return this; + } + + public String getDatabaseName() { + return databaseName; + } + + public void delete(Transaction txn, + DatabaseEntry key, + DatabaseEntry oldData) { + assertTrue(key != null); + invokedTriggers.add(this); + ts.transaction = txn; + ts.key = key; + ts.oldData = oldData; + ts.nDelete++; + } + + public void put(Transaction txn, + DatabaseEntry key, + DatabaseEntry oldData, + DatabaseEntry newData) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.key = key; + ts.oldData = oldData; + ts.newData = newData; + ts.nPut++; + } + + public String getName() { + return name; + } + + public void abort(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nAbort++; + } + + public void commit(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nCommit++; + } + + public void clear() { + ts = new TestState(); + } + + public void addTrigger(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nAddTrigger++; + } + + public void removeTrigger(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nRemoveTrigger++; + } + } + + /** + * Regular/persistent trigger class. + */ + public static class DBT extends TDBT implements PersistentTrigger { + + public DBT(String name) { + super(name); + } + + public void open(Transaction txn, Environment env, boolean isNew) { + + assertTrue(env != null); + invokedTriggers.add(this); + ts.transaction = txn; + if (isNew) { + ts.nCreate++; + } + ts.nOpen++; + } + + public void close() { + invokedTriggers.add(this); + ts.transaction = null; + ts.nClose++; + } + + public void remove(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nRemove++; + } + + public void rename(Transaction txn, String newName) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.newName = newName; + ts.nRename++; + } + + public void truncate(Transaction txn) { + invokedTriggers.add(this); + ts.transaction = txn; + ts.nTruncate++; + } + } + + protected final File envRoot = SharedTestUtils.getTestDir(); + protected EnvironmentConfig envConfig = null; + protected DatabaseConfig dbConfig = null; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envConfig = RepTestUtils. + createEnvConfig(RepTestUtils.SYNC_SYNC_ALL_DURABILITY); + + dbConfig = getDBConfig(); + } + + DatabaseConfig getDBConfig() { + DatabaseConfig config = new DatabaseConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + config.setSortedDuplicates(false); + return config; + } +} diff --git a/test/com/sleepycat/je/txn/CursorTxnTest.java b/test/com/sleepycat/je/txn/CursorTxnTest.java new file mode 100644 index 0000000..afe16d6 --- /dev/null +++ b/test/com/sleepycat/je/txn/CursorTxnTest.java @@ -0,0 +1,239 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class CursorTxnTest extends TestBase { + private final File envHome; + private Environment env; + private Database myDb; + private int initialEnvReadLocks; + private int initialEnvWriteLocks; + private boolean noLocking; + + public CursorTxnTest() { + envHome = SharedTestUtils.getTestDir(); + DbEnvPool.getInstance().clear(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.setLoadPropertyFile(envConfig, false); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_INCOMPRESSOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + EnvironmentConfig envConfigAsSet = env.getConfig(); + noLocking = !(envConfigAsSet.getLocking()); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + myDb = env.openDatabase(null, "test", dbConfig); + } + + @After + public void tearDown() { + try { + myDb.close(); + } catch (DatabaseException ignored) {} + try { + env.close(); + } catch (DatabaseException ignored) {} + } + + /** + * Create a cursor with a null transaction. + */ + @Test + public void testNullTxnLockRelease() + throws DatabaseException { + + getInitialEnvStats(); + Cursor cursor = myDb.openCursor(null, null); + + /* First put() holds a write lock on the non-duplicate entry. */ + insertData(cursor, 10, 1); + checkReadWriteLockCounts(cursor, 0, 1); + + // Check that count does not add more locks + int count = cursor.count(); + assertEquals(1, count); + checkReadWriteLockCounts(cursor, 0, 1); + + /* + * Second put() holds a single write lock, now that we no longer create + * a DIN/DBIN tree. + */ + insertData(cursor, 10, 2); + checkReadWriteLockCounts(cursor, 0, 1); + + /* Check that count does not add more locks. */ + count = cursor.count(); + assertEquals(2, count); + checkReadWriteLockCounts(cursor, 0, 1); + + /* + * Third put() holds one write lock. + */ + insertData(cursor, 10, 3); + checkReadWriteLockCounts(cursor, 0, 1); + + DatabaseEntry foundKey = new DatabaseEntry(); + DatabaseEntry foundData = new DatabaseEntry(); + + /* Check that read locks are held on forward traversal. */ + OperationStatus status = + cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + checkReadWriteLockCounts(cursor, 1, 0); + int numSeen = 0; + while (status == OperationStatus.SUCCESS) { + numSeen++; + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + checkReadWriteLockCounts(cursor, 1, 0); + if (status != OperationStatus.SUCCESS) { + break; + } + + status = cursor.getCurrent(foundKey, foundData, + LockMode.DEFAULT); + checkReadWriteLockCounts(cursor, 1, 0); + } + assertEquals(30, numSeen); + + /* Check that read locks are held on backwards traversal and count. */ + status = cursor.getLast(foundKey, foundData, LockMode.DEFAULT); + checkReadWriteLockCounts(cursor, 1, 0); + + while (status == OperationStatus.SUCCESS) { + count = cursor.count(); + assertEquals("For key " + + TestUtils.dumpByteArray(foundKey.getData()), + 3, count); + status = cursor.getPrev(foundKey, foundData, LockMode.DEFAULT); + checkReadWriteLockCounts(cursor, 1, 0); + } + + /* Check that delete holds a write lock. */ + status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + assertEquals("For key " + + TestUtils.dumpByteArray(foundKey.getData()), + OperationStatus.SUCCESS, cursor.delete()); + /* Two write locks (old/new LSNs) on deleted LN. */ + checkReadWriteLockCounts(cursor, 0, 2); + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + if (status == OperationStatus.SUCCESS) { + checkReadWriteLockCounts(cursor, 1, 0); + } else { + checkReadWriteLockCounts(cursor, 0, 2); + } + } + + /* Check that count does not add more locks. */ + count = cursor.count(); + assertEquals(0, count); + checkReadWriteLockCounts(cursor, 0, 2); + + cursor.close(); + } + + private void checkReadWriteLockCounts(Cursor cursor, + int expectReadLocks, + int expectWriteLocks) + throws DatabaseException { + + if (noLocking) { + expectReadLocks = expectWriteLocks = 0; + } + + CursorImpl cursorImpl = DbTestProxy.dbcGetCursorImpl(cursor); + StatGroup cursorStats = cursorImpl.getLockStats(); + assertEquals(expectReadLocks, cursorStats.getInt(LOCK_READ_LOCKS)); + assertEquals(expectWriteLocks, cursorStats.getInt(LOCK_WRITE_LOCKS)); + + EnvironmentStats lockStats = env.getStats(null); + assertEquals(initialEnvReadLocks + expectReadLocks, + lockStats.getNReadLocks()); + assertEquals(initialEnvWriteLocks + expectWriteLocks, + lockStats.getNWriteLocks()); + } + + private void getInitialEnvStats() + throws DatabaseException { + + EnvironmentStats lockStats = env.getStats(null); + initialEnvReadLocks = lockStats.getNReadLocks(); + initialEnvWriteLocks = lockStats.getNWriteLocks(); + } + + private void insertData(Cursor cursor, int numRecords, int dataVal) + throws DatabaseException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < numRecords; i++) { + byte[] keyData = TestUtils.getTestArray(i); + byte[] dataData = new byte[1]; + dataData[0] = (byte) dataVal; + key.setData(keyData); + data.setData(dataData); + OperationStatus status = cursor.putNoDupData(key, data); + assertEquals(OperationStatus.SUCCESS, status); + } + } +} diff --git a/test/com/sleepycat/je/txn/DeadlockTest.java b/test/com/sleepycat/je/txn/DeadlockTest.java new file mode 100644 index 0000000..35598c7 --- /dev/null +++ b/test/com/sleepycat/je/txn/DeadlockTest.java @@ -0,0 +1,2062 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DeadlockException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockTimeoutException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionTimeoutException; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; + +public class DeadlockTest extends DualTestCase { + + private final int lockerNum = 6; + private Locker[] txns = new Locker[lockerNum]; + private JUnitThread[] testers = new JUnitThread[lockerNum]; + private AtomicInteger sequence; + private boolean verbose; + + private Environment env; + private final File envHome; + + public DeadlockTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.N_LOCK_TABLES.getName(), + "11"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + + /* + * Definitely guarantee a time point whether the left lock actions + * can continue to execute. + * + * For example, for testDeadlockIntersectionWithOneCommonLocker, + * if we use owned(lock) to check whether a lock is owned and + * whether the current locker can continue to execute, + * it may have the following interleaving: + * + * Locker1 locker2 locker3 + * + * C(3L) + * A(1L) A + * B(2L) + * + * check Owne(2L) check Owner(1L)=2 + * *************************************** + * B A + * + * Deadlock detection abort + * this locker2, release B + * and C + * + * check Owners(3L) + * // This will loop forever + * // Becasue 3L is not owned now + * C + * + * So for this example, we shuold use sequence rather than owned(lock). + * + * Locker1 locker2 locker3 + * C(3L) + * A(1L) A + * B(2L) + * + * sequence++ sequence++ sequence++ + * check(seq>3) check(seq>3) check(seq>3) + * *************************************** + * B A C + * + * Besides, for other test cases where only one deadlock is formed, + * we can still use owned(lock). Because no lock will be released + * before all owned(lock) is statisfied, and this means that + * owned(lock) will not loop infinitely. + * + * Other possible methods which avoid looping: + * 1. CountDownLatch + * 2. com.sleepycat.je.utilint.PollCondition + */ + sequence = new AtomicInteger(0); + verbose = true; + + for (int i = 0; i < lockerNum; i++) { + testers[i] = null; + } + } + + @After + public void tearDown() + throws Exception { + + LockManager.simulatePartialDeadlockHook = null; + + closeEnv(); + + for (int i = 0; i < lockerNum; i++) { + if (testers[i] != null) { + testers[i].shutdown(); + testers[i] = null; + } + } + + super.tearDown(); + } + + private void initLockers() + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + for (int i = 0; i < lockerNum; i++) { + txns[i] = BasicLocker.createBasicLocker(envImpl); + } + } + + private void initTxns(TransactionConfig config, EnvironmentImpl envImpl) + throws DatabaseException { + + for (int i = 0; i < lockerNum; i++) { + txns[i] = Txn.createLocalTxn(envImpl, config); + } + } + + private void closeEnv() + throws DatabaseException { + + if (txns[1] instanceof Txn) { + for (int i = 0; i < lockerNum; i++) { + ((Txn) txns[i]).abort(false); + } + } + + for (int i = 0; i < lockerNum; i++) { + txns[i].operationEnd(); + } + + close(env); + } + + /** + * Test the deadlock between two lockers. Locker1 first acquires L1 and + * then L2. Locker2 first acquires L2 and then L1. + * + * One locker will be chosen as the victim and the victim will throw + * DeadlockException. + */ + @Test + public void testDeadlockBetweenTwoLockers() + throws Throwable { + if (verbose) { + echo("testDeadlockBetweenTwoLockers"); + } + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + initLockers(); + + testers[1] = new JUnitThread("BetweenTwoLockerstestDeadlockLocker1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 1, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for locker2 to own L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If locker2 is chosen + * as the victim, then L2 will be granted after + * locker2 aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("BetweenTwoLockerstestDeadlockLocker2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for locker1 to own L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, can wait forever. If locker1 is + * chosen as the victim, then L1 will be granted after + * locker1 aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + } + + /** + * Test a deadlock between two Txns. Txn1 first acquires L1 and then L2. + * Txn2 first acquires L2 and then L1. + * + * One Txn will be chosen as the victim and the victim will throw + * DeadlockException. + */ + @Test + public void testDeadlockBetweenTwoTxns() + throws Throwable { + if (verbose) { + echo("testDeadlockBetweenTwoTxns"); + } + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("BetweenTwoTxnstestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 1, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for txns[2] to own L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("BetweenTwoTxnstestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for txns[1] to own L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + } + + + /** + * Test a deadlock among three lockers. + * Locker1 owns L1 and waits for L2. + * Locker2 owns L2 and waits for L3 + * Locker3 owns L3 and waits for L1 + */ + @Test + public void testDeadlockAmongThreeLockers() + throws Throwable { + if (verbose) { + echo("testDeadlockAmongThreeLockers"); + } + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + initLockers(); + + testers[1] = new JUnitThread("AmongThreeLockerstestDeadlockLocker1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for Locker 2 to lock L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. Lock is granted after + * Locker2 aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("AmongThreeLockerstestDeadlockLocker2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for Locker 3 to lock L3. */ + while (lockManager.nOwners(3L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L3, wait forever. Lock is granted after + * Locker3 aborts and releases L3. + */ + lockManager.lock( + 3L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[3] = new JUnitThread("AmongThreeLockerstestDeadlockLocker3") { + public void testBody() throws Throwable { + try { + /* Lock L3, should always be granted */ + lockManager.lock( + 3L, txns[3], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(3L, txns[3], LockType.WRITE)); + + /* Wait for Locker1 to lock L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, wait forever. Lock is granted + * after Locker1 aborts and releases L1. + */ + lockManager.lock( + 1L, txns[3], LockType.WRITE, 0, + false, false, null); + + lockManager.release(3L, txns[3]); + lockManager.release(1L, txns[3]); + txns[3].removeLock(3L); + txns[3].removeLock(1L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(3L, txns[3]); + txns[3].removeLock(3L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + } + + + /** + * Tests a deadlock among three Txns. + * Locker1 owns L1 and waits for L2. + * Locker2 owns L2 and waits for L3 + * Locker3 owns L3 and waits for L1 + */ + @Test + public void testDeadlockAmongThreeTxns() + throws Throwable { + if (verbose) { + echo("testDeadlockAmongThreeTxns"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("AmongThreeTxnstestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for Locker2 to lock L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. Lock is granted after + * Locker2 aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("AmongThreeTxnstestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for Locker3 to lock L3. */ + while (lockManager.nOwners(3L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L3, wait forever. Lock is granted after + * Locker3 aborts and releases L3. + */ + lockManager.lock( + 3L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[3] = new JUnitThread("AmongThreeTxnstestDeadlockTxn3") { + public void testBody() throws Throwable { + try { + /* Lock L3, should always be granted */ + lockManager.lock( + 3L, txns[3], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(3L, txns[3], LockType.WRITE)); + + /* Wait for Locker1 to lock L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, wait forever. Lock is granted + * after Locker1 aborts and releases L1. + */ + lockManager.lock( + 1L, txns[3], LockType.WRITE, 0, + false, false, null); + + lockManager.release(3L, txns[3]); + lockManager.release(1L, txns[3]); + txns[3].removeLock(3L); + txns[3].removeLock(1L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(3L, txns[3]); + txns[3].removeLock(3L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + } + + /** + * Tests deadlock among Four Txns. Because 3 is always a magic number. + * Txn1 owns L1 and waits for L2. + * Txn2 owns L2 and waits for L3 + * Txn3 owns L3 and waits for L4 + * Txn4 owns L4 and waits for L1 + */ + @Test + public void testDeadlockAmongFourTxns() + throws Throwable { + if (verbose) { + echo("testDeadlockAmongFourTxns"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("AmongFourTxnstestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for Locker2 to lock L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. Lock is granted after + * Locker2 aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("AmongFourTxnstestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L2 should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for Locker3 to lock L3. */ + while (lockManager.nOwners(3L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L3, wait forever. Lock is granted after + * Locker3 aborts and releases L3. + */ + lockManager.lock( + 3L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[3] = new JUnitThread("AmongFourTxnstestDeadlockTxn3") { + public void testBody() throws Throwable { + try { + /* Lock L3, should always be granted */ + lockManager.lock( + 3L, txns[3], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(3L, txns[3], LockType.WRITE)); + + /* Wait for Locker1 to lock L4. */ + while (lockManager.nOwners(4L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, wait forever. Lock is granted + * after Locker1 aborts and releases L1. + */ + lockManager.lock( + 4L, txns[3], LockType.WRITE, 0, + false, false, null); + + lockManager.release(3L, txns[3]); + lockManager.release(4L, txns[3]); + txns[3].removeLock(3L); + txns[3].removeLock(4L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(3L, txns[3]); + txns[3].removeLock(3L); + } + } + }; + + testers[4] = new JUnitThread("AmongFourTxnstestDeadlockTxn4") { + public void testBody() throws Throwable { + try { + /* Lock L4, should always be granted */ + lockManager.lock( + 4L, txns[4], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(4L, txns[4], LockType.WRITE)); + + /* Wait for Locker1 to lock L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, wait forever. Lock is granted + * after Locker1 aborts and releases L1. + */ + lockManager.lock( + 1L, txns[4], LockType.WRITE, 0, + false, false, null); + + lockManager.release(4L, txns[4]); + lockManager.release(1L, txns[4]); + txns[4].removeLock(4L); + txns[4].removeLock(1L); + + } catch (LockConflictException e) { + checkFail(e); + lockManager.release(4L, txns[4]); + txns[4].removeLock(4L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[4].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + testers[4].finishTest(); + } + + /** + * Tests that the correct Exception type is thrown. + * + * When a true deadlock exist, DeadlockException is thrown. + * When a timeout occurs, LockTimeoutException is thrown. + */ + @Test + public void testThrowCorrectException() + throws Throwable { + if (verbose) { + echo("testThrowCorrectException"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("CorrectExceptiontestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 1, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for txns[2] to own L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + checkFail(e); + assertDeadlock(e); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("CorrectExceptiontestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for txns[1] to own L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (LockConflictException e) { + checkFail(e); + assertDeadlock(e); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + + testers[3] = new JUnitThread("CorrectExceptiontestDeadlockLocker3") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[3], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[3], LockType.WRITE)); + + sequence.incrementAndGet(); + + /* Wait for txns[4] to own L2. */ + while (sequence.get() < 2) { + Thread.yield(); + } + + lockManager.release(1L, txns[3]); + txns[3].removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + testers[4] = new JUnitThread("CorrectExceptiontestDeadlockLocker4") { + public void testBody() throws Throwable { + try { + /* Wait for txns[3] to lock L1. */ + while (sequence.get() < 1) { + Thread.yield(); + } + + /* Lock L1, can not be granted. */ + lockManager.lock( + 1L, txns[4], LockType.WRITE, 500, + false, false, null); + + fail("Should throw LockTimeout Exception"); + } catch (LockConflictException e) { + checkFail(e); + assertLockTimeout(e); + sequence.incrementAndGet(); + } + } + }; + + testers[3].start(); + testers[4].start(); + testers[3].finishTest(); + testers[4].finishTest(); + } + + /** + * DeadlockException is thrown: + * 1. Before lock timeout wait + * 2. After lock timeout wait, i.e. one locker/lock timeout + * + * This test case focuses on option 1. + * This test may be time-sensitive. This means that this test may fail + * under some specific situation, e.g. due to the CPU schedule. + */ + @Test + public void testDeadlockExceptionThrowBeforeLongTimeWait() + throws Throwable { + if (verbose) { + echo("testDeadlockExceptionThrowBeforeLongTimeWait"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + final long lockTimeout = 500; + + testers[1] = new JUnitThread("BeforeLongTimeWaittestDeadlockTxn1") { + public void testBody() throws Throwable { + long startTime = 0; + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for txns[2] to lock L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + startTime = System.currentTimeMillis(); + lockManager.lock( + 2L, txns[1], LockType.WRITE, lockTimeout, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + long currentTime = System.currentTimeMillis(); + checkFail(e); + assertTrue(currentTime - startTime < lockTimeout); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("BeforeLongTimeWaittestDeadlockTxn2") { + public void testBody() throws Throwable { + long startTime = 0; + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for txns[1] to lock L1. */ + while (lockManager.nOwners(1L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + startTime = System.currentTimeMillis(); + lockManager.lock( + 1L, txns[2], LockType.WRITE, lockTimeout, + false, false, null); + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (LockConflictException e) { + long currentTime = System.currentTimeMillis(); + checkFail(e); + assertTrue(currentTime - startTime < lockTimeout); + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + } + + /** + * DeadlockException is thrown: + * 1. Before lock timeout wait + * 2. After lock timeout wait, i.e. one locker/lock timeout + * + * This test case focuses on option 2. + * This test may be time-sensitive. This means that this test may fail + * under some specific situation, e.g. due to the CPU schedule. + */ + @Test + public void testDeadlockExceptionAfterLongTimeWait() + throws Throwable { + if (verbose) { + echo("testDeadlockExceptionAfterLongTimeWait"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + final long lockTimeout = 500; + + testers[1] = new JUnitThread("AfterLongTimeWaittestDeadlockTxn1") { + public void testBody() throws Throwable { + long startTime = 0; + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 1, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + /* Wait for txns[2] to lock L2. */ + while (lockManager.nOwners(2L) < 1) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + startTime = System.currentTimeMillis(); + lockManager.lock( + 2L, txns[1], LockType.WRITE, lockTimeout, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (LockConflictException e) { + long currentTime = System.currentTimeMillis(); + assertTrue( + currentTime - startTime >= (lockTimeout - 10)); + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("AfterLongTimeWaittestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + /* Wait for txns[1] to lock L1. */ + while (lockManager.nOwners(1L) < 1 || + lockManager.nWaiters(2L) < 1) { + Thread.yield(); + } + + /* + * Let txns[1] enter the wait region. + */ + try { + Thread.sleep(250); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + /* + * The following LockManager.lock() will wait + * 500 ms between setting waitingFor and detecting + * deadlock. + */ + MyHook hook = new MyHook(); + LockManager.simulatePartialDeadlockHook = hook; + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (LockConflictException e) { + fail( + "Locker1 should timeout and should throw" + + "DeadlockException directly"); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + } + + /** + * Tests a deadlock produced by two lockers waiting on the same lock. + * + * The test scenario is as follows: + * locker1 locker2 + * + * read lock on 1L + * + * read lock on 1L + * + * write lock on 1L + * + * write lock on 1L + */ + @Test + public void testDeadlockProducedByTwoLockersOnOneLock() + throws Throwable { + if (verbose) { + echo("testDeadlockProducedByTwoLockersOnOneLock"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("TwoLockersOnOneLocktestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1 with READ type, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.READ)); + + /* Wait for txns[2] to lock L2. */ + while (lockManager.nOwners(1L) < 2) { + Thread.yield(); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } catch (LockConflictException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockProducedByTwoLockersOnOneLock:Txn1" + + e.getMessage()); + } + + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("TwoLockersOnOneLocktestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L1 with READ type, should always be granted */ + lockManager.lock( + 1L, txns[2], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[2], LockType.READ)); + + /* Wait for txns[1] to lock L1. */ + while (lockManager.nOwners(1L) < 2) { + Thread.yield(); + } + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[2]); + txns[2].removeLock(1L); + } catch (LockConflictException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockProducedByTwoLockersOnOneLock:Txn2" + + e.getMessage()); + } + + lockManager.release(1L, txns[2]); + txns[2].removeLock(1L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[1].finishTest(); + testers[2].finishTest(); + } + + /** + * Test partial deadlock + * + * This test case will test the following scenario. A true deadlock is + * formed between locker2 and locker3. But the deadlock is first detected + * by locker1 when it acquires A. + * Locker3 locker1 locker2 + * A + * B + * B + * A + * A + * + */ + @Test + public void testPartialDeadlock() + throws Throwable { + if (verbose) { + echo("testPartialDeadlock"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + MyHook hook = new MyHook(); + LockManager.simulatePartialDeadlockHook = hook; + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("PartialDeadlocktestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.WRITE, 1, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.WRITE)); + + sequence.incrementAndGet(); + while (sequence.get() < 2) { + Thread.yield(); + } + sequence.incrementAndGet(); + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testPartialDeadlock: Txn1: " + e.getMessage()); + } + + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("PartialDeadlocktestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L2, should always be granted */ + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + sequence.incrementAndGet(); + while (sequence.get() < 2) { + Thread.yield(); + } + sequence.incrementAndGet(); + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testPartialDeadlock: Txn2: " + e.getMessage()); + } + + lockManager.release(2L, txns[2]); + txns[2].removeLock(2L); + } + } + }; + + testers[3] = new JUnitThread("PartialDeadlocktestDeadlockTxn3") { + public void testBody() throws Throwable { + try { + /* + * Wait for txns[1] to wait on lock L2 and txns[2] to + * wait on lock L1. + */ + while (sequence.get() < 4) { + Thread.yield(); + } + + /* + * In order to guarantee that txns[1] and txns[2] have + * already set their waitingFor, we sleep here. + * + * Because we set + * lockManager.simulatePartialDeadlockHook to be + * non-null, so in above two testers, when inovking + * LockManager.lock(), bewtween the setting of + * waitingFor and detecting deadlock, the thread + * will sleep for 500 millis. + * + * So after the following 200 millis sleep, txns[3] + * will have enough time to do the deadlock detection + * as the first locker of txns[1], txns[2] and txns[3]. + */ + try { + Thread.sleep(200); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + LockManager.simulatePartialDeadlockHook = null; + + /* + * Here we want to check that + * DeadlockChecker.hasCycleInternal() will not be + * invoked infinitely. + */ + lockManager.lock( + 1L, txns[3], LockType.WRITE, 0, + false, false, null); + + lockManager.release(1L, txns[3]); + txns[3].removeLock(1L); + } catch (DeadlockException e) { + fail( + "testPartialDeadlock: Txn3 should not" + + "throw DeadlockException."); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + } + + /** + * Two deadlock cycles may intersect: + * 1. Only have one common locker + * 2. More than one common locker + * + * This test case tests option 1, i.e. the following scenario: locker1 + * and locker2 have a deadlock; locker2 and locker3 have a deadlock. + * locker2 is the common locker. + * + * Locker1 locker2 locker3 + * C(3L) + * A(1L) A + * B(2L) + * + * *************************************** + * B A C + */ + @Test + public void testDeadlockIntersectionWithOneCommonLocker() + throws Throwable { + if (verbose) { + echo("testDeadlockIntersectionWithOneCommonLocker"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("OneCommonLockertestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.READ)); + + if (verbose) { + echo("OneCommonLocker: Txn1 owns 1L"); + } + + + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + + if (verbose) { + echo("OneCommon: Txn1 finish yield"); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("OneCommonLocker: Txn1 get 2L"); + } + + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithOneCommonLocker:" + + " Txn1: " + e.getMessage()); + } + + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("OneCommonLockertestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L3 and L2, should always be granted */ + lockManager.lock( + 3L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + assertTrue( + lockManager.isOwner(3L, txns[2], LockType.WRITE)); + + if (verbose) { + echo("OneCommonLocker: Txn2 owns 2L and 3L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + + if (verbose) { + echo("OneCommon: Txn2 finish yield"); + } + + /* + * Try to lock L1, can wait forever. If txns[1] is + * chosen as the victim, then L1 will be granted after + * txns[1] aborts and releases L1. + */ + lockManager.lock( + 1L, txns[2], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("OneCommonLocker: Txn2 get 1L"); + } + + lockManager.release(1L, txns[2]); + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(1L); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithOneCommonLocker:" + + " Txn2: " + e.getMessage()); + } + + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + } + } + }; + + testers[3] = new JUnitThread("OneCommonLockertestDeadlockTxn3") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[3], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[3], LockType.READ)); + + if (verbose) { + echo("OneCommonLocker: Txn3 owns 1L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + + if (verbose) { + echo("OneCommon: Txn3 finish yield"); + } + + lockManager.lock( + 3L, txns[3], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("OneCommonLocker: Txn3 get 3L"); + } + + lockManager.release(1L, txns[3]); + lockManager.release(3L, txns[3]); + txns[3].removeLock(1L); + txns[3].removeLock(3L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithOneCommonLocker:" + + " Txn3: " + e.getMessage()); + } + lockManager.release(1L, txns[3]); + txns[3].removeLock(1L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + } + + /** + * Two deadlock cycles may intersect: + * 1. Only have one common locker + * 2. More than one common locker + * + * This test case tests option 2, i.e. the following scenario: locker1, + * locker2 and locker3 have a deadlock; locker2, locker3 and locker4 + * have a deadlock. locker2 and locker 3 are the common lockers. + * + * Locker1 locker2 locker3 locker4 + * D(4L) + * A(1L) C(3L) A + * B(2L) + * + * *************************************************** + * B C A D + */ + @Test + public void testDeadlockIntersectionWithTwoCommonLocker() + throws Throwable { + if (verbose) { + echo("testDeadlockIntersectionWithTwoCommonLocker"); + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + + final TransactionConfig config = + new TransactionConfig().setDurability(Durability.COMMIT_NO_SYNC); + + initTxns(config, envImpl); + + testers[1] = new JUnitThread("TwoCommonLockertestDeadlockTxn1") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[1], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[1], LockType.READ)); + + if (verbose) { + echo("TwoCommonLocker: Txn1 owns 1L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + if (verbose) { + echo("TwoCommon: Txn1 finish yield"); + } + + /* + * Try to lock L2, wait forever. If txns[2] is chosen + * as the victim, then L2 will be granted after + * txns[2] aborts and releases L2. + */ + lockManager.lock( + 2L, txns[1], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("TwoCommonLocker: Txn1 get 2L"); + } + + lockManager.release(1L, txns[1]); + lockManager.release(2L, txns[1]); + txns[1].removeLock(1L); + txns[1].removeLock(2L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithTwoCommonLocker:" + + " Txn1: " + e.getMessage()); + } + lockManager.release(1L, txns[1]); + txns[1].removeLock(1L); + } + } + }; + + testers[2] = new JUnitThread("TwoCommonLockertestDeadlockTxn2") { + public void testBody() throws Throwable { + try { + /* Lock L4 and L2, should always be granted */ + lockManager.lock( + 4L, txns[2], LockType.WRITE, 0, + false, false, null); + + lockManager.lock( + 2L, txns[2], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(2L, txns[2], LockType.WRITE)); + + assertTrue( + lockManager.isOwner(4L, txns[2], LockType.WRITE)); + + if (verbose) { + echo("TwoCommonLocker: Txn2 owns 2L and 4L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + if (verbose) { + echo("TwoCommon: Txn2 finish yield"); + } + + /* + * Try to lock L3, can wait forever. If txns[3] is + * chosen as the victim, then L3 will be granted after + * txns[3] aborts and releases L3. + */ + lockManager.lock( + 3L, txns[2], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("TwoCommonLocker: Txn2 get 2L"); + } + + lockManager.release(4L, txns[2]); + lockManager.release(2L, txns[2]); + lockManager.release(3L, txns[2]); + txns[2].removeLock(4L); + txns[2].removeLock(2L); + txns[2].removeLock(3L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithTwoCommonLocker:" + + " Txn2: " + e.getMessage()); + } + + lockManager.release(2L, txns[2]); + lockManager.release(4L, txns[2]); + txns[2].removeLock(2L); + txns[2].removeLock(4L); + } + } + }; + + testers[3] = new JUnitThread("TwoCommonLockertestDeadlockTxn3") { + public void testBody() throws Throwable { + try { + /* Lock L3, should always be granted */ + lockManager.lock( + 3L, txns[3], LockType.WRITE, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(3L, txns[3], LockType.WRITE)); + + if (verbose) { + echo("TwoCommonLocker: Txn3 owns 3L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + if (verbose) { + echo("TwoCommon: Txn3 finish yield"); + } + + lockManager.lock( + 1L, txns[3], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("TwoCommonLocker: Txn3 get 1L"); + } + + lockManager.release(1L, txns[3]); + lockManager.release(3L, txns[3]); + txns[3].removeLock(1L); + txns[3].removeLock(3L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithTwoCommonLocker:" + + " Txn3: " + e.getMessage()); + } + lockManager.release(3L, txns[3]); + txns[3].removeLock(3L); + } + } + }; + + testers[4] = new JUnitThread("TwoCommonLockertestDeadlockTxn4") { + public void testBody() throws Throwable { + try { + /* Lock L1, should always be granted */ + lockManager.lock( + 1L, txns[4], LockType.READ, 0, + false, false, null); + + assertTrue( + lockManager.isOwner(1L, txns[4], LockType.READ)); + + if (verbose) { + echo("TwoCommonLocker: Txn4 owns 1L"); + } + + sequence.incrementAndGet(); + while (sequence.get() < 4) { + Thread.yield(); + } + + if (verbose) { + echo("TwoCommon: Txn4 finish yield"); + } + + lockManager.lock( + 4L, txns[4], LockType.WRITE, 0, + false, false, null); + + if (verbose) { + echo("TwoCommonLocker: Txn4 get 4L"); + } + + lockManager.release(1L, txns[4]); + lockManager.release(4L, txns[4]); + txns[4].removeLock(1L); + txns[4].removeLock(4L); + } catch (DeadlockException e) { + checkFail(e); + if (verbose) { + echo("testDeadlockIntersectionWithTwoCommonLocker:" + + " Txn4: " + e.getMessage()); + } + lockManager.release(1L, txns[4]); + txns[4].removeLock(1L); + } + } + }; + + testers[1].start(); + testers[2].start(); + testers[3].start(); + testers[4].start(); + testers[1].finishTest(); + testers[2].finishTest(); + testers[3].finishTest(); + testers[4].finishTest(); + } + + private void assertLockTimeout(LockConflictException e) { + assertTrue(TestUtils.skipVersion(e).startsWith("Lock ")); + assertSame(LockTimeoutException.class, e.getClass()); + } + + private void assertTxnTimeout(LockConflictException e) { + assertTrue(TestUtils.skipVersion(e).startsWith("Transaction ")); + assertSame(TransactionTimeoutException.class, e.getClass()); + } + + private void assertDeadlock(LockConflictException e) { + assertSame(DeadlockException.class, e.getClass()); + } + + /* + * We may add another Exception API to clarify the deadlock thrown + * in normal case and in abnormal case. The normal case is that the + * deadlock is thrown by the chosen victim. The abnormal case is that + * the deadlock can not be broken by the chosen victim and is thrown + * when timeout expires. + * + * But now in this unit test, I only check the content of + * DeadlockException to check whether the deadlock is thrown in + * unexpected places. + */ + private void checkFail(LockConflictException e) { + if (e.getMessage().contains( + "Unable to break deadlock using random victim")) { + e.printStackTrace(); + fail("Deadlock should be break by radom victim"); + } + } + + private int echo(String str) { + System.out.println(str); + return 0; + } + + class MyHook implements TestHook { + + @Override + public void doHook() { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + @Override + public void doHook(Void obj) { + } + @Override + public void hookSetup() { + } + @Override + public void doIOHook() throws IOException { + } + @Override + public Void getHookValue() { + return null; + } + } +} diff --git a/test/com/sleepycat/je/txn/LockManagerTest.java b/test/com/sleepycat/je/txn/LockManagerTest.java new file mode 100644 index 0000000..50c482b --- /dev/null +++ b/test/com/sleepycat/je/txn/LockManagerTest.java @@ -0,0 +1,1313 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.DbTree; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class LockManagerTest extends DualTestCase { + + private Locker txn1; + private Locker txn2; + private Locker txn3; + private Locker txn4; + private Long nid; + private AtomicInteger sequence; + + private Environment env; + private final File envHome; + + public LockManagerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setConfigParam(EnvironmentParams.N_LOCK_TABLES.getName(), + "11"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + + nid = new Long(1); + sequence = new AtomicInteger(0); + } + + private void initLockers() + throws DatabaseException { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + txn1 = BasicLocker.createBasicLocker(envImpl); + txn2 = BasicLocker.createBasicLocker(envImpl); + txn3 = BasicLocker.createBasicLocker(envImpl); + txn4 = BasicLocker.createBasicLocker(envImpl); + } + + private void initTxns(TransactionConfig config, EnvironmentImpl envImpl) + throws DatabaseException { + + txn1 = Txn.createLocalTxn(envImpl, config); + txn2 = Txn.createLocalTxn(envImpl, config); + txn3 = Txn.createLocalTxn(envImpl, config); + txn4 = Txn.createLocalTxn(envImpl, config); + } + + private void closeEnv() + throws DatabaseException { + + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + txn4.operationEnd(); + + close(env); + } + + /* + * SR15926 showed a bug where node IDs that are > 0x80000000 produce + * negative lock table indexes becuase of the modulo arithmetic in + * LockManager.getLockTableIndex(). + * + * Since node IDs are no longer used for locking, this test is somewhat + * outdated. However, it is still a good idea to check that we can lock + * an LSN value with the sign bit set. + */ + @Test + public void testSR15926LargeNodeIds() + throws Exception { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + try { + lockManager.lock(0x80000000L, txn1, LockType.WRITE, + 0, false, false, null); + } catch (Exception e) { + fail("shouldn't get exception " + e); + } finally { + closeEnv(); + } + } + + @Test + public void testNegatives() + throws Exception { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + try { + assertFalse(lockManager.isOwner(nid, txn1, LockType.READ)); + assertFalse(lockManager.isOwner(nid, txn1, LockType.WRITE)); + assertFalse(lockManager.isLocked(nid)); + assertFalse(lockManager.isWaiter(nid, txn1)); + lockManager.lock(1, txn1, LockType.READ, 0, false, false, null); + + /* already holds this lock */ + assertEquals(LockGrantType.EXISTING, + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null)); + assertFalse(lockManager.isOwner(nid, txn2, LockType.READ)); + assertFalse(lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.isLocked(nid)); + assertTrue(lockManager.nOwners(new Long(2)) == -1); + assertTrue(lockManager.nWaiters(new Long(2)) == -1); + + /* lock 2 doesn't exist, shouldn't affect any the existing lock */ + lockManager.release(2L, txn1); + txn1.removeLock(2L); + assertTrue(lockManager.isLocked(nid)); + + /* txn2 is not the owner, shouldn't release lock 1. */ + lockManager.release(1L, txn2); + txn2.removeLock(1L); + assertTrue(lockManager.isLocked(nid)); + assertTrue(lockManager.isOwner(nid, txn1, LockType.READ)); + assertTrue(lockManager.nOwners(nid) == 1); + + /* Now really release. */ + lockManager.release(1L, txn1); + txn1.removeLock(1L); + assertFalse(lockManager.isLocked(nid)); + assertFalse(lockManager.isOwner(nid, txn1, LockType.READ)); + assertFalse(lockManager.nOwners(nid) == 1); + + lockManager.lock(1, txn1, LockType.WRITE, 0, false, false, null); + /* holds write and subsequent request for READ is ok */ + lockManager.lock(1, txn1, LockType.READ, 0, false, false, null); + /* already holds this lock */ + assertTrue(lockManager.lock(1, txn1, LockType.WRITE, + 0, false, false, null) == + LockGrantType.EXISTING); + assertFalse(lockManager.isWaiter(nid, txn1)); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + closeEnv(); + } + } + + /** + * Acquire three read locks and make sure that they share nicely. + */ + @Test + public void testMultipleReaders() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testMultipleReaders1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testMultipleReaders2") { + public void testBody() { + try { + lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testMultipleReaders3") { + public void testBody() { + try { + lockManager.lock(1, txn3, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn3, LockType.READ)); + sequence.incrementAndGet(); + while (sequence.get() < 3) { + Thread.yield(); + } + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + closeEnv(); + } + + /** + * Grab two read locks, hold them, and make sure that a write lock + * waits for them to be released. + */ + @Test + public void testMultipleReadersSingleWrite1() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testMultipleReaders1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + assertTrue(lockManager.isWaiter(nid, txn3)); + assertFalse(lockManager.isWaiter(nid, txn1)); + lockManager.release(1L, txn1); + txn1.removeLock(1L); + assertFalse + (lockManager.isOwner(nid, txn1, LockType.READ)); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testMultipleReaders2") { + public void testBody() { + try { + lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + assertTrue(lockManager.isWaiter(nid, txn3)); + lockManager.release(1L, txn2); + txn2.removeLock(1L); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.READ)); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testMultipleReaders3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn3, LockType.WRITE, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + closeEnv(); + } + + /** + * Acquire two read locks, put a write locker behind the two + * read lockers, and then queue a read locker behind the writer. + * Ensure that the third reader is not granted until the writer + * releases the lock. + */ + @Test + public void testMultipleReadersSingleWrite2() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testMultipleReaders1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (lockManager.nWaiters(nid) < 2) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testMultipleReaders2") { + public void testBody() { + try { + lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (lockManager.nWaiters(nid) < 2) { + Thread.yield(); + } + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testMultipleReaders3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn3, LockType.WRITE, 0, + false, false, null); + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester4 = + new JUnitThread("testMultipleReaders4") { + public void testBody() { + try { + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + lockManager.lock(1, txn4, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn4, LockType.READ)); + lockManager.release(1L, txn4); + txn4.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester4.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + tester4.finishTest(); + closeEnv(); + } + + /** + * Acquire two read locks for two transactions, then request a write + * lock for a third transaction. Then request a write lock for one + * of the first transactions that already has a read lock (i.e. + * request an upgrade lock). Make sure it butts in front of the + * existing wait lock. + */ + @Test + public void testUpgradeLock() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testUpgradeLock1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (lockManager.nWaiters(nid) < 2) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testUpgradeLock2") { + public void testBody() { + try { + lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + lockManager.lock(1, txn2, LockType.WRITE, 0, + false, false, null); + assertTrue(lockManager.nWaiters(nid) == 1); + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testUpgradeLock3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn3, LockType.WRITE, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + closeEnv(); + } + + /** + * Acquire a read lock, then request a write lock for a second + * transaction in non-blocking mode. Make sure it fails. + */ + @Test + public void testNonBlockingLock1() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testNonBlocking1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testNonBlocking2") { + public void testBody() { + try { + /* wait for tester1 */ + while (lockManager.nOwners(nid) < 1) { + Thread.yield(); + } + LockGrantType grant = lockManager.lock + (1, txn2, LockType.WRITE, 0, true, false, null); + assertSame(LockGrantType.DENIED, grant); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.READ)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + sequence.incrementAndGet(); + /* wait for tester1 to release the lock */ + while (lockManager.nOwners(nid) > 0) { + Thread.yield(); + } + assertTrue + (lockManager.lock(1, txn2, LockType.WRITE, 0, + false, false, null) == + LockGrantType.NEW); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + closeEnv(); + } + + /** + * Acquire a write lock, then request a read lock for a second + * transaction in non-blocking mode. Make sure it fails. + */ + @Test + public void testNonBlockingLock2() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testNonBlocking1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.WRITE, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.WRITE)); + sequence.incrementAndGet(); + while (sequence.get() < 2) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testNonBlocking2") { + public void testBody() { + try { + /* wait for tester1 */ + while (sequence.get() < 1) { + Thread.yield(); + } + LockGrantType grant = lockManager.lock + (1, txn2, LockType.READ, 0, true, false, null); + assertSame(LockGrantType.DENIED, grant); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.READ)); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + sequence.incrementAndGet(); + /* wait for tester1 to release the lock */ + while (lockManager.nOwners(nid) > 0) { + Thread.yield(); + } + assertTrue + (lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null) == + LockGrantType.NEW); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + closeEnv(); + } + + /** + * Acquire a write lock, then request a read lock for a second + * transaction in blocking mode. Make sure it waits. + */ + @Test + public void testWaitingLock() + throws Throwable { + + initLockers(); + + final LockManager lockManager = DbInternal.getNonNullEnvImpl(env). + getTxnManager().getLockManager(); + + JUnitThread tester1 = + new JUnitThread("testBlocking1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.WRITE, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.WRITE)); + sequence.incrementAndGet(); + while (sequence.get() < 2) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testBlocking2") { + public void testBody() { + try { + /* wait for tester1 */ + while (sequence.get() < 1) { + Thread.yield(); + } + try { + lockManager.lock(1, txn2, LockType.READ, 500, + false, false, null); + fail("didn't time out"); + } catch (LockConflictException e) { + assertTrue + (TestUtils.skipVersion(e).startsWith("Lock ")); + } + assertFalse + (lockManager.isOwner(nid, txn2, LockType.READ)); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + sequence.incrementAndGet(); + /* wait for tester1 to release the lock */ + while (lockManager.nOwners(nid) > 0) { + Thread.yield(); + } + assertTrue + (lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null) == + LockGrantType.NEW); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + assertFalse + (lockManager.isOwner(nid, txn2, LockType.WRITE)); + assertTrue(lockManager.nWaiters(nid) == 0); + assertTrue(lockManager.nOwners(nid) == 1); + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + tester1.start(); + tester2.start(); + tester1.finishTest(); + tester2.finishTest(); + closeEnv(); + } + + /** + * Test that LockConflictException has the correct owners and waiters when + * it is thrown due to a timeout. + * + * Create five threads, the first two of which take a read lock and the + * second two of which try for a write lock backed up behind the two + * read locks. Then have a fifth thread try for a read lock which backs + * up behind all of them. The first two threads (read lockers) are owners + * and the second two threads are waiters. When the fifth thread catches + * the LockConflictException make sure that it contains the txn ids for the + * two readers in the owners array and the txn ids for the two writers + * in the waiters array. + */ + @Test + public void testLockConflictInfo() + throws Throwable { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + TransactionConfig config = new TransactionConfig(); + + initTxns(config, envImpl); + + final Txn txn5 = Txn.createLocalTxn(envImpl, config); + + sequence.set(0); + JUnitThread tester1 = + new JUnitThread("testMultipleReaders1") { + public void testBody() { + try { + lockManager.lock(1, txn1, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + lockManager.release(1L, txn1); + txn1.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testMultipleReaders2") { + public void testBody() { + try { + lockManager.lock(1, txn2, LockType.READ, 0, + false, false, null); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + lockManager.release(1L, txn2); + txn2.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testMultipleReaders3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn3, LockType.WRITE, 0, + false, false, null); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester4 = + new JUnitThread("testMultipleReaders4") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn4, LockType.WRITE, 0, + false, false, null); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn4, LockType.WRITE)); + lockManager.release(1L, txn4); + txn4.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester5 = + new JUnitThread("testMultipleReaders5") { + public void testBody() { + try { + while (lockManager.nWaiters(nid) < 2) { + Thread.yield(); + } + lockManager.lock(1, txn5, LockType.READ, 900, + false, false, null); + fail("expected LockConflictException"); + } catch (LockConflictException e) { + + long[] owners = e.getOwnerTxnIds(); + long[] waiters = e.getWaiterTxnIds(); + + assertTrue((owners[0] == txn1.getId() && + owners[1] == txn2.getId()) || + (owners[1] == txn1.getId() && + owners[0] == txn2.getId())); + + assertTrue((waiters[0] == txn3.getId() && + waiters[1] == txn4.getId()) || + (waiters[1] == txn3.getId() && + waiters[0] == txn4.getId())); + + } catch (DatabaseException DBE) { + fail("expected LockConflictException"); + DBE.printStackTrace(System.out); + } + sequence.set(1); + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester4.start(); + tester5.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + tester4.finishTest(); + tester5.finishTest(); + ((Txn) txn1).abort(false); + ((Txn) txn2).abort(false); + ((Txn) txn3).abort(false); + ((Txn) txn4).abort(false); + txn5.abort(false); + closeEnv(); + } + + /** + * Test lock stealing. + * + * Create five threads, with the first two taking a read lock and the + * second two trying for a write lock backed up behind the two read locks. + * Then have a fifth importunate thread try for a write lock which will + * flush the first two owners. The first two threads (read lockers) are + * owners and the second two threads are waiters. When the importunate + * thread steals the lock, make sure that the other two owners become + * onlyAbortable. + */ + @Test + public void testImportunateTxn1() + throws Throwable { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* LockPreemptedException is thrown only if replicated. */ + if (!envImpl.isReplicated()) { + close(env); + return; + } + + /* + * Use an arbitrary DatabaseImpl so that the Txn.lock method can be + * called below with a non-null database param. Although this is a + * LockManager test, lock preemption requires calling Txn.lock. + */ + final DatabaseImpl dbImpl = + envImpl.getDbTree().getDb(DbTree.NAME_DB_ID); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + TransactionConfig config = new TransactionConfig(); + + initTxns(config, envImpl); + + final Txn txn5 = Txn.createLocalTxn(envImpl, config); + txn5.setImportunate(true); + + sequence.set(0); + JUnitThread tester1 = + new JUnitThread("testImportunateTxn1.1") { + public void testBody() { + try { + txn1.setLockTimeout(0); + txn1.lock(1, LockType.READ, false, dbImpl); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue(txn1.isPreempted()); + sequence.incrementAndGet(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testImportunateTxn1.2") { + public void testBody() { + try { + txn2.setLockTimeout(0); + txn2.lock(1, LockType.READ, false, dbImpl); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue(txn1.isPreempted()); + sequence.incrementAndGet(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testImportunateTxn1.3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + txn3.setLockTimeout(0); + txn3.lock(1, LockType.WRITE, false, dbImpl); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester4 = + new JUnitThread("testImportunateTxn1.4") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 2) { + Thread.yield(); + } + txn4.setLockTimeout(0); + txn4.lock(1, LockType.WRITE, false, dbImpl); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn4, LockType.WRITE)); + lockManager.release(1L, txn4); + txn4.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester5 = + new JUnitThread("testImportunateTxn1.5") { + public void testBody() { + try { + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + txn5.setImportunate(true); + txn5.setLockTimeout(900); + txn5.lock(1, LockType.WRITE, false, dbImpl); + sequence.set(1); + while (sequence.get() < 3) { + Thread.yield(); + } + lockManager.release(1L, txn5); + txn5.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(System.out); + fail("unexpected DatabaseException"); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester4.start(); + tester5.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + tester4.finishTest(); + tester5.finishTest(); + ((Txn) txn1).abort(false); + ((Txn) txn2).abort(false); + ((Txn) txn3).abort(false); + ((Txn) txn4).abort(false); + txn5.abort(false); + closeEnv(); + } + + /** + * Test lock stealing. + * + * Create five threads, with the first two taking a read lock and the + * second two trying for a write lock backed up behind the two read locks. + * Then have a fifth importunate thread take a read lock and try for a + * write lock (upgrade) which will flush the first two read lock owners. + * The first two threads (read lockers) are owners and the second two + * threads are waiters. When the importunate thread steals the lock, make + * sure that the other two owners become onlyAbortable. + */ + @Test + public void testImportunateTxn2() + throws Throwable { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* LockPreemptedException is thrown only if replicated. */ + if (!envImpl.isReplicated()) { + close(env); + return; + } + + /* + * Use an arbitrary DatabaseImpl so that the Txn.lock method can be + * called below with a non-null database param. Although this is a + * LockManager test, lock preemption requires calling Txn.lock. + */ + final DatabaseImpl dbImpl = + envImpl.getDbTree().getDb(DbTree.NAME_DB_ID); + + final LockManager lockManager = + envImpl.getTxnManager().getLockManager(); + TransactionConfig config = new TransactionConfig(); + + initTxns(config, envImpl); + + final Txn txn5 = Txn.createLocalTxn(envImpl, config); + txn5.setImportunate(true); + + sequence.set(0); + JUnitThread tester1 = + new JUnitThread("testImportunateTxn1.1") { + public void testBody() { + try { + txn1.setLockTimeout(0); + txn1.lock(1, LockType.READ, false, dbImpl); + assertTrue + (lockManager.isOwner(nid, txn1, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue(txn1.isPreempted()); + sequence.incrementAndGet(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester2 = + new JUnitThread("testImportunateTxn1.2") { + public void testBody() { + try { + txn2.setLockTimeout(0); + txn2.lock(1, LockType.READ, false, dbImpl); + assertTrue + (lockManager.isOwner(nid, txn2, LockType.READ)); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue(txn2.isPreempted()); + sequence.incrementAndGet(); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester3 = + new JUnitThread("testImportunateTxn1.3") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 3) { + Thread.yield(); + } + txn3.setLockTimeout(0); + txn3.lock(1, LockType.WRITE, false, dbImpl); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn3, LockType.WRITE)); + lockManager.release(1L, txn3); + txn3.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester4 = + new JUnitThread("testImportunateTxn1.4") { + public void testBody() { + try { + while (lockManager.nOwners(nid) < 3) { + Thread.yield(); + } + txn4.setLockTimeout(0); + txn4.lock(1, LockType.WRITE, false, dbImpl); + while (sequence.get() < 1) { + Thread.yield(); + } + assertTrue + (lockManager.isOwner(nid, txn4, LockType.WRITE)); + lockManager.release(1L, txn4); + txn4.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(); + fail("caught DatabaseException " + DBE); + } + } + }; + + JUnitThread tester5 = + new JUnitThread("testImportunateTxn1.5") { + public void testBody() { + try { + txn5.setLockTimeout(0); + txn5.lock(1, LockType.READ, false, dbImpl); + while (lockManager.nWaiters(nid) < 1) { + Thread.yield(); + } + txn5.setImportunate(true); + txn5.setLockTimeout(900); + txn5.lock(1, LockType.WRITE, false, dbImpl); + sequence.set(1); + while (sequence.get() < 3) { + Thread.yield(); + } + lockManager.release(1L, txn5); + txn5.removeLock(1L); + } catch (DatabaseException DBE) { + DBE.printStackTrace(System.out); + fail("unexpected DatabaseException"); + } + } + }; + + tester1.start(); + tester2.start(); + tester3.start(); + tester4.start(); + tester5.start(); + tester1.finishTest(); + tester2.finishTest(); + tester3.finishTest(); + tester4.finishTest(); + tester5.finishTest(); + ((Txn) txn1).abort(false); + ((Txn) txn2).abort(false); + ((Txn) txn3).abort(false); + ((Txn) txn4).abort(false); + txn5.abort(false); + closeEnv(); + } +} diff --git a/test/com/sleepycat/je/txn/LockTest.java b/test/com/sleepycat/je/txn/LockTest.java new file mode 100644 index 0000000..95b74a2 --- /dev/null +++ b/test/com/sleepycat/je/txn/LockTest.java @@ -0,0 +1,982 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +public class LockTest extends DualTestCase { + private Environment env; + private final File envHome; + + public LockTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = create(envHome, envConfig); + } + + @Test + public void testLockConflicts() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = BasicLocker.createBasicLocker(envImpl); + Locker txn2 = BasicLocker.createBasicLocker(envImpl); + Locker txn3 = BasicLocker.createBasicLocker(envImpl); + + MemoryBudget mb = envImpl.getMemoryBudget(); + try { + + /* + * Start fresh. Ask for a read lock from txn1 twice, + * should only be one owner. Then add multiple + * would-be-writers as waiters. + */ + Lock lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.EXISTING, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + + /* txn1 has a READ lock. */ + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + + /* txn2 asks for a read lock, gets it. */ + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + + /* txn1 asks for WRITE, must wait */ + assertEquals(LockGrantType.WAIT_PROMOTION, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + + /* txn2 write request must wait */ + assertEquals(LockGrantType.WAIT_PROMOTION, + lock.lock(LockType.WRITE, txn2, false, false, mb, 0). + lockGrant); + + /* Two read locks, two write waiters */ + assertEquals(2, lock.nOwners()); + assertEquals(2, lock.nWaiters()); + + /* + * Release txn1 read lock, which causes txn2's read lock to be + * promoted to a write lock. + */ + lock.release(txn1, mb, 0 /* lockTableIndex */); + assertEquals(1, lock.nOwners()); + assertEquals(1, lock.nWaiters()); + + /* Release txn2 write lock, now txn1 will get its write lock. */ + lock.release(txn2, mb, 0 /* lockTableIndex */); + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + + /* Release txn1's write lock. */ + lock.release(txn1, mb, 0 /* lockTableIndex */); + assertEquals(0, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + + /* Start fresh. Get a write lock, then get a read lock. */ + lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.EXISTING, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + + /* Start fresh. Get a read lock, upgrade to a write lock. */ + lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.PROMOTION, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + + /* + * Start fresh. Get a read lock, then ask for a non-blocking + * write lock. The latter should be denied. + */ + lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.DENIED, + lock.lock(LockType.WRITE, txn2, true, false, mb, 0). + lockGrant); + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + + /* Two write requests, should be one owner. */ + lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.EXISTING, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + assertEquals(1, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + + /* + * Ensure that a read request behind a write request that waits + * also waits. blocking requests. + */ + lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.WRITE, txn2, false, false, mb, 0). + lockGrant); + + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn3, false, false, mb, 0). + lockGrant); + + assertEquals(1, lock.nOwners()); + assertEquals(2, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + lock.release(txn2, mb, 0 /* lockTableIndex */); + lock.release(txn3, mb, 0 /* lockTableIndex */); + + /* Check non blocking requests */ + lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + + /* Since non-blocking request, this fails and doesn't go + on the wait queue. */ + assertEquals(LockGrantType.DENIED, + lock.lock(LockType.WRITE, txn2, true, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn3, true, false, mb, 0). + lockGrant); + assertEquals(2, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + lock.release(txn3, mb, 0 /* lockTableIndex */); + + lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn3, false, false, mb, 0). + lockGrant); + assertEquals(3, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + lock.release(txn1, mb, 0 /* lockTableIndex */); + lock.release(txn2, mb, 0 /* lockTableIndex */); + lock.release(txn3, mb, 0 /* lockTableIndex */); + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + close(env); + } + } + + @Test + public void testOwners() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = BasicLocker.createBasicLocker(envImpl); + Locker txn2 = BasicLocker.createBasicLocker(envImpl); + Locker txn3 = BasicLocker.createBasicLocker(envImpl); + Locker txn4 = BasicLocker.createBasicLocker(envImpl); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + /* + * Build up 3 owners and waiters for a lock, to test the + * lazy initialization and optimization for single owner/waiter. + */ + Lock lock = new LockImpl(); + /* should be no writer. */ + assertTrue(lock.getWriteOwnerLocker() == null); + + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn3, false, false, mb, 0). + lockGrant); + + /* should be no writer. */ + assertTrue(lock.getWriteOwnerLocker() == null); + + /* expect 3 owners, 0 waiters. */ + Set expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.READ)); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + expectedOwners.add(new LockInfo(txn3, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + + /* release the first locker. */ + lock.release(txn1, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + expectedOwners.add(new LockInfo(txn3, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + + /* Add more. */ + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn4, false, false, mb, 0). + lockGrant); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + expectedOwners.add(new LockInfo(txn3, LockType.READ)); + expectedOwners.add(new LockInfo(txn4, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + + /* release */ + lock.release(txn2, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn3, LockType.READ)); + expectedOwners.add(new LockInfo(txn4, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + + /* release */ + lock.release(txn3, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn4, LockType.READ)); + /* only 1 lock, in the owner set, but not a write owner. */ + assertTrue(lock.getWriteOwnerLocker() == null); + + /* release */ + lock.release(txn4, mb, 0); + expectedOwners = new HashSet(); + checkOwners(expectedOwners, lock, 0); + + /* Add owners again. */ + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.READ)); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + + /* Release for the sake of the memory leak checking */ + lock.release(txn1, mb, 0); + lock.release(txn2, mb, 0); + + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + txn4.operationEnd(); + close(env); + } + } + + @Test + public void testWaiters() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn2 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn3 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn4 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn5 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + /* + * Build up 1 owners and 3waiters for a lock, to test the + * lazy initialization and optimization for single owner/waiter. + */ + Lock lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.WRITE, txn3, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.WRITE, txn4, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_PROMOTION, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + + /* should be no writer. */ + assertTrue(lock.getWriteOwnerLocker() == null); + + /* expect 2 owners, 3 waiters. */ + Set expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.READ)); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + checkOwners(expectedOwners, lock, 3); + + List waiters = new ArrayList(); + waiters.add(new LockInfo(txn1, LockType.WRITE)); + waiters.add(new LockInfo(txn3, LockType.WRITE)); + waiters.add(new LockInfo(txn4, LockType.WRITE)); + checkWaiters(waiters, lock); + + /* release a waiter, shouldn't change anything. */ + lock.release(txn4, mb, 0); + checkWaiters(waiters, lock); + + /* + * Release the other read lock, expect txn1 to be promoted to a + * write lock. + */ + lock.release(txn2, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.WRITE)); + checkOwners(expectedOwners, lock, 2); + + waiters.remove(0); + checkWaiters(waiters, lock); + + /* release */ + lock.release(txn1, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn3, LockType.WRITE)); + checkOwners(expectedOwners, lock, 1); + + waiters.remove(0); + checkWaiters(waiters, lock); + + /* + * Add multiple read lock waiters so that we can promoting multiple + * waiters. + */ + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn5, false, false, mb, 0). + lockGrant); + + checkOwners(expectedOwners, lock, 4); + waiters.add(new LockInfo(txn1, LockType.READ)); + waiters.add(new LockInfo(txn2, LockType.READ)); + waiters.add(new LockInfo(txn5, LockType.READ)); + checkWaiters(waiters, lock); + + /* flush one of the waiters. */ + lock.flushWaiter(txn5, mb, 0); + waiters.remove(3); + checkWaiters(waiters, lock); + + /* re-add. */ + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn5, false, false, mb, 0). + lockGrant); + waiters.add(new LockInfo(txn5, LockType.READ)); + + /* release txn3 */ + lock.release(txn3, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn4, LockType.WRITE)); + checkOwners(expectedOwners, lock, 3); + waiters.remove(0); + checkWaiters(waiters, lock); + + /* release txn4, expect all read locks to promote. */ + lock.release(txn4, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.READ)); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + expectedOwners.add(new LockInfo(txn5, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + waiters.clear(); + checkWaiters(waiters, lock); + + /* Release for the sake of the memory leak checking */ + lock.release(txn1, mb, 0); + lock.release(txn2, mb, 0); + lock.release(txn5, mb, 0); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + txn4.operationEnd(); + txn5.operationEnd(); + close(env); + } + } + + @Test + public void testPromotion() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn2 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn3 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn4 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn5 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + /* + * Build up 1 owners and 3 read waiters for a lock. Then + * check that all the waiters promote properly. + */ + Lock lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.WRITE, txn1, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn2, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn3, false, false, mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.READ, txn4, false, false, mb, 0). + lockGrant); + + /* Check that 1 owner, 3 waiters exist. */ + Set expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.WRITE)); + checkOwners(expectedOwners, lock, 3); + + List waiters = new ArrayList(); + waiters.add(new LockInfo(txn2, LockType.READ)); + waiters.add(new LockInfo(txn3, LockType.READ)); + waiters.add(new LockInfo(txn4, LockType.READ)); + checkWaiters(waiters, lock); + + /* Release the writer, expect all 3 waiters to promote. */ + lock.release(txn1, mb, 0); + expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn2, LockType.READ)); + expectedOwners.add(new LockInfo(txn3, LockType.READ)); + expectedOwners.add(new LockInfo(txn4, LockType.READ)); + checkOwners(expectedOwners, lock, 0); + waiters.clear(); + checkWaiters(waiters, lock); + + /* Release for the sake of the memory leak checking */ + lock.release(txn2, mb, 0); + lock.release(txn3, mb, 0); + lock.release(txn4, mb, 0); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + txn4.operationEnd(); + txn5.operationEnd(); + close(env); + } + } + + /** + * Tests conflicts between range locks and all other lock types. + */ + @Test + public void testRangeConflicts() + throws Exception { + + /* No owner */ + checkConflict(null, + LockType.RANGE_READ, + LockGrantType.NEW); + checkConflict(null, + LockType.RANGE_WRITE, + LockGrantType.NEW); + checkConflict(null, + LockType.RANGE_INSERT, + LockGrantType.NEW); + + /* Owner has READ */ + checkConflict(LockType.READ, + LockType.RANGE_READ, + LockGrantType.NEW); + checkConflict(LockType.READ, + LockType.RANGE_WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.READ, + LockType.RANGE_INSERT, + LockGrantType.NEW); + + /* Owner has WRITE */ + checkConflict(LockType.WRITE, + LockType.RANGE_READ, + LockGrantType.WAIT_NEW); + checkConflict(LockType.WRITE, + LockType.RANGE_WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.WRITE, + LockType.RANGE_INSERT, + LockGrantType.NEW); + + /* Owner has RANGE_READ */ + checkConflict(LockType.RANGE_READ, + LockType.READ, + LockGrantType.NEW); + checkConflict(LockType.RANGE_READ, + LockType.WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_READ, + LockType.RANGE_READ, + LockGrantType.NEW); + checkConflict(LockType.RANGE_READ, + LockType.RANGE_WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_READ, + LockType.RANGE_INSERT, + LockGrantType.WAIT_NEW); + + /* Owner has RANGE_WRITE */ + checkConflict(LockType.RANGE_WRITE, + LockType.READ, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_WRITE, + LockType.WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_WRITE, + LockType.RANGE_READ, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_WRITE, + LockType.RANGE_WRITE, + LockGrantType.WAIT_NEW); + checkConflict(LockType.RANGE_WRITE, + LockType.RANGE_INSERT, + LockGrantType.WAIT_NEW); + + /* Owner has RANGE_INSERT */ + checkConflict(LockType.RANGE_INSERT, + LockType.READ, + LockGrantType.NEW); + checkConflict(LockType.RANGE_INSERT, + LockType.WRITE, + LockGrantType.NEW); + checkConflict(LockType.RANGE_INSERT, + LockType.RANGE_READ, + LockGrantType.WAIT_RESTART); + checkConflict(LockType.RANGE_INSERT, + LockType.RANGE_WRITE, + LockGrantType.WAIT_RESTART); + checkConflict(LockType.RANGE_INSERT, + LockType.RANGE_INSERT, + LockGrantType.NEW); + close(env); + } + + /** + * Tests that when the first request is held and the second request is + * requested, the second grant type is returned. + */ + private void checkConflict(LockType firstRequest, + LockType secondRequest, + LockGrantType secondGrantType) + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn2 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + Lock lock = new LockImpl(); + + if (firstRequest != null) { + assertEquals(LockGrantType.NEW, + lock.lock(firstRequest, txn1, false, false, mb, 0). + lockGrant); + } + LockGrantType typeGranted = + lock.lock(secondRequest, txn2, false, false, mb, 0). + lockGrant; + assertEquals(secondGrantType, typeGranted); + + boolean wait = (typeGranted == LockGrantType.WAIT_NEW || + typeGranted == LockGrantType.WAIT_PROMOTION || + typeGranted == LockGrantType.WAIT_RESTART); + boolean given = (typeGranted == LockGrantType.NEW); + boolean restart = (typeGranted == LockGrantType.WAIT_RESTART); + + Set expectedOwners = new HashSet(); + List expectedWaiters = new ArrayList(); + + if (firstRequest != null) { + expectedOwners.add(new LockInfo(txn1, firstRequest)); + } + if (given) { + expectedOwners.add(new LockInfo(txn2, secondRequest)); + } else if (wait) { + if (restart) { + expectedWaiters.add(new LockInfo(txn2, LockType.RESTART)); + } else { + expectedWaiters.add(new LockInfo(txn2, secondRequest)); + } + } + + checkOwners(expectedOwners, lock, expectedWaiters.size()); + checkWaiters(expectedWaiters, lock); + + lock.release(txn1, mb, 0); + if (wait) { + if (restart) { + checkOwners(new HashSet(), lock, 0); + } else { + checkOwners(new HashSet(expectedWaiters), lock, 0); + } + } + lock.release(txn2, mb, 0); + assertEquals(0, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + } + } + + /** + * Tests upgrades between range locks and all other lock types. + */ + @Test + public void testRangeUpgrades() + throws Exception { + + /* Owner has READ */ + checkUpgrade(LockType.READ, + LockType.RANGE_READ, + LockGrantType.EXISTING, + LockType.RANGE_READ); + checkUpgrade(LockType.READ, + LockType.RANGE_WRITE, + LockGrantType.PROMOTION, + LockType.RANGE_WRITE); + checkUpgrade(LockType.READ, + LockType.RANGE_INSERT, + null, + LockType.READ); + + /* Owner has WRITE */ + checkUpgrade(LockType.WRITE, + LockType.RANGE_READ, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.WRITE, + LockType.RANGE_WRITE, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.WRITE, + LockType.RANGE_INSERT, + null, + LockType.WRITE); + + /* Owner has RANGE_READ */ + checkUpgrade(LockType.RANGE_READ, + LockType.READ, + LockGrantType.EXISTING, + LockType.RANGE_READ); + checkUpgrade(LockType.RANGE_READ, + LockType.WRITE, + LockGrantType.PROMOTION, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_READ, + LockType.RANGE_READ, + LockGrantType.EXISTING, + LockType.RANGE_READ); + checkUpgrade(LockType.RANGE_READ, + LockType.RANGE_WRITE, + LockGrantType.PROMOTION, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_READ, + LockType.RANGE_INSERT, + null, + LockType.RANGE_READ); + + /* Owner has RANGE_WRITE */ + checkUpgrade(LockType.RANGE_WRITE, + LockType.READ, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_WRITE, + LockType.WRITE, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_WRITE, + LockType.RANGE_READ, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_WRITE, + LockType.RANGE_WRITE, + LockGrantType.EXISTING, + LockType.RANGE_WRITE); + checkUpgrade(LockType.RANGE_WRITE, + LockType.RANGE_INSERT, + null, + LockType.RANGE_WRITE); + + /* Owner has RANGE_INSERT */ + checkUpgrade(LockType.RANGE_INSERT, + LockType.READ, + null, + LockType.RANGE_INSERT); + checkUpgrade(LockType.RANGE_INSERT, + LockType.WRITE, + null, + LockType.RANGE_INSERT); + checkUpgrade(LockType.RANGE_INSERT, + LockType.RANGE_READ, + null, + LockType.RANGE_INSERT); + checkUpgrade(LockType.RANGE_INSERT, + LockType.RANGE_WRITE, + null, + LockType.RANGE_INSERT); + checkUpgrade(LockType.RANGE_INSERT, + LockType.RANGE_INSERT, + LockGrantType.EXISTING, + LockType.RANGE_INSERT); + close(env); + } + + /** + * Tests that when the first request is held and the second request is + * requested, the second grant type is returned and the final type is then + * held. A null secondGrantType arg means that an assertion is expected. + */ + private void checkUpgrade(LockType firstRequest, + LockType secondRequest, + LockGrantType secondGrantType, + LockType finalType) + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + Lock lock = new LockImpl(); + + assertEquals(LockGrantType.NEW, + lock.lock(firstRequest, txn1, false, false, mb, 0). + lockGrant); + LockGrantType typeGranted = null; + try { + typeGranted = + lock.lock(secondRequest, txn1, false, false, mb, 0). + lockGrant; + if (secondGrantType == null) { + fail("expected AssertionError"); + } + } catch (AssertionError e) { + if (secondGrantType != null) { + fail(e.toString()); + } + } + assertEquals(secondGrantType, typeGranted); + + Set expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, finalType)); + checkOwners(expectedOwners, lock, 0); + lock.release(txn1, mb, 0); + assertEquals(0, lock.nOwners()); + assertEquals(0, lock.nWaiters()); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + } + } + + /** + * Tests that when a range read/write is requested, and a range insert is + * waiting but not held, a WAIT_RESTART occurs. This requires that the + * waiter list is examined by Lock.lock(). + */ + @Test + public void testRangeInsertWaiterConflict() + throws Exception { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + Locker txn1 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn2 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + Locker txn3 = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); + MemoryBudget mb = envImpl.getMemoryBudget(); + + try { + Lock lock = new LockImpl(); + assertEquals(LockGrantType.NEW, + lock.lock(LockType.RANGE_READ, txn1, false, false, + mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_NEW, + lock.lock(LockType.RANGE_INSERT, txn2, false, false, + mb, 0). + lockGrant); + assertEquals(LockGrantType.WAIT_RESTART, + lock.lock(LockType.RANGE_READ, txn3, false, false, + mb, 0). + lockGrant); + + /* Check that 1 owner, 1 waiter exist. */ + + Set expectedOwners = new HashSet(); + expectedOwners.add(new LockInfo(txn1, LockType.RANGE_READ)); + checkOwners(expectedOwners, lock, 2); + + List waiters = new ArrayList(); + waiters.add(new LockInfo(txn2, LockType.RANGE_INSERT)); + waiters.add(new LockInfo(txn3, LockType.RESTART)); + checkWaiters(waiters, lock); + + /* Release for the sake of the memory leak checking */ + lock.release(txn1, mb, 0); + lock.release(txn2, mb, 0); + lock.release(txn3, mb, 0); + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + txn1.operationEnd(); + txn2.operationEnd(); + txn3.operationEnd(); + close(env); + } + } + + private void checkOwners(Set expectedOwners, + Lock lock, + int numExpectedWaiters) { + + /* check number of owners. */ + Set owners = lock.getOwnersClone(); + assertEquals(expectedOwners.size(), owners.size()); + + /* check number of waiters. */ + assertEquals(numExpectedWaiters, lock.nWaiters()); + + /* Make sure that isOwner returns the right thing. */ + Iterator iter = expectedOwners.iterator(); + while (iter.hasNext()) { + LockInfo info = iter.next(); + + /* Make sure it's an owner, of the right type of lock. */ + assertEquals(info.getLockType().isWriteLock(), + lock.isOwnedWriteLock(info.getLocker())); + assertTrue(lock.isOwner(info.getLocker(), info.getLockType())); + } + } + + private void checkWaiters(List expectedWaiters, + Lock lock) { + List waiters = lock.getWaitersListClone(); + assertEquals(expectedWaiters.size(), waiters.size()); + + /* check order of the list. */ + for (int i = 0; i < expectedWaiters.size(); i++) { + LockInfo info = expectedWaiters.get(i); + LockInfo waiterInfo = (LockInfo) waiters.get(i); + assertEquals("i=" + i, info.getLocker(), waiterInfo.getLocker()); + assertEquals("i=" + i, + info.getLockType(), waiterInfo.getLockType()); + assertFalse(lock.isOwner(info.getLocker(), info.getLockType())); + assertTrue(lock.isWaiter(info.getLocker())); + } + } +} diff --git a/test/com/sleepycat/je/txn/ReadCommitLockersTest.java b/test/com/sleepycat/je/txn/ReadCommitLockersTest.java new file mode 100644 index 0000000..f4e227b --- /dev/null +++ b/test/com/sleepycat/je/txn/ReadCommitLockersTest.java @@ -0,0 +1,306 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.UnsupportedEncodingException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/* + * This is a test for SR #23783. Before fixing the bug described there, the + * following scenario would cause a deadlock, when no real deadlock exists. + * + * 1. Cursor C1 in thread T1 reads a record R using Txn X1. C1 creates a + * ReadCommittedLocker L1, with X1 as its buddy. L1 locks R. + * + * 2. Cursor C2 in thread T2 tries to write-lock R, using another Txn X2. + * X2 waits for L1 (==> T2 waits for T1). + * + * 3. Cursor C3 in thread T1 tries to read R using X1. C3 creates a + * ReadCommittedLocker L3, with X1 as its buddy. L3 tries to lock R. L1 and + * L3 are not recognized as buddies, so L3 waits for X2 (==> T1 waits for T2) + */ +public class ReadCommitLockersTest extends TestBase { + + File envHome; + Environment env; + Database db; + + DatabaseEntry key; + + Object synchronizer1 = new Object(); + Object synchronizer2 = new Object(); + + boolean wait1 = true; + boolean wait2 = true; + + public static DatabaseEntry createDatabaseEntry(String key) { + + DatabaseEntry keyDBEntry = null; + try { + keyDBEntry = new DatabaseEntry(key.getBytes("UTF-8")); + } catch (UnsupportedEncodingException e) { + System.out.println("Unexpected UnsupportedEncodingException"); + } + + return keyDBEntry; + } + + + public ReadCommitLockersTest() { + + envHome = SharedTestUtils.getTestDir(); + DbEnvPool.getInstance().clear(); + + key = createDatabaseEntry("key"); + } + + @Before + public void setUp() throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + db = env.openDatabase(null, "test", dbConfig); + } + + @After + public void tearDown() { + try { + db.close(); + } catch (DatabaseException ignored) {} + try { + env.close(); + } catch (DatabaseException ignored) {} + } + + @Test + public void runTest() + throws DatabaseException, + UnsupportedEncodingException, + InterruptedException { + + /* + * Insert a record R in the DB + */ + Transaction txn = env.beginTransaction(null, null); + Cursor cursor = db.openCursor(txn, null); + + DatabaseEntry data = createDatabaseEntry("data"); + + assertEquals(OperationStatus.SUCCESS, cursor.put(key, data)); + + cursor.close(); + txn.commit(); + + /* + * Start thread T1 and then wait until it reads record R via a + * read-comitted cursor C1. + */ + Thread T1 = new TestThread1(); + T1.start(); + + synchronized (synchronizer1) { + while (wait1) { + synchronizer1.wait(); + } + } + + /* + * Start thread T2. In the mentime, T1 is waiting on synchronizer2. + */ + Thread T2 = new TestThread2(); + T2.start(); + + /* + * Sleep for a while to allow T2 to block on its lock request. + */ + while (T2.getState() == Thread.State.RUNNABLE || + T2.getState() == Thread.State.NEW) { + Thread.currentThread().sleep(10); + } + + /* + * Let thread T1 continue with another search on R via another + * read-comitted cursor C2. + */ + synchronized (synchronizer2) { + wait2 = false; + synchronizer2.notify(); + } + + /* + * Wait for both T1 and T2 to terminate. + */ + T1.join(); + T2.join(); + + data = new DatabaseEntry(); + + cursor = db.openCursor(null, null); + + try { + OperationStatus status = cursor.getSearchKey(key, data, null); + assertTrue(status == OperationStatus.SUCCESS); + + String dataStr = new String(data.getData()); + System.out.println(dataStr); + assertTrue(dataStr.equals("newdata")); + } finally { + cursor.close(); + } + } + + private class TestThread1 extends Thread + { + Environment env; + Database db; + + DatabaseEntry key; + + Object synchronizer1; + Object synchronizer2; + + public TestThread1() { + super("Thread-1"); + this.env = ReadCommitLockersTest.this.env; + this.db = ReadCommitLockersTest.this.db; + this.synchronizer1 = ReadCommitLockersTest.this.synchronizer1; + this.synchronizer2 = ReadCommitLockersTest.this.synchronizer2; + this.key = ReadCommitLockersTest.this.key; + } + + @Override + public void run() { + + OperationStatus status; + CursorConfig config = new CursorConfig(); + config.setReadCommitted(true); + + DatabaseEntry data = new DatabaseEntry(); + data.setPartial(true); + + Transaction X1 = env.beginTransaction(null, null); + + /* Do a read-committed search for R via cursor C1 and txn X1. */ + Cursor C1 = db.openCursor(X1, config); + status = C1.getSearchKey(key, data, LockMode.DEFAULT); + assertTrue(status == OperationStatus.SUCCESS); + + /* Wake up the main thread so that it will start thread T2. */ + synchronized (synchronizer1) { + ReadCommitLockersTest.this.wait1 = false; + synchronizer1.notify(); + } + + /* Wait until thread T2 blocks trying to write-lock R. */ + synchronized (synchronizer2) { + while (ReadCommitLockersTest.this.wait2) { + try { + synchronizer2.wait(); + } catch (InterruptedException e) { + System.out.println("Unexpected InterruptedException"); + } + } + } + + /* Do a read-committed search for R via cursor C3 and txn X1. */ + Cursor C3 = db.openCursor(X1, config); + status = C3.getSearchKey(key, data, LockMode.DEFAULT); + assertTrue(status == OperationStatus.SUCCESS); + + C1.close(); + C3.close(); + X1.commit(); + } + } + + + private class TestThread2 extends Thread + { + Environment env; + Database db; + + DatabaseEntry key; + + public TestThread2() { + super("Thread-2"); + this.env = ReadCommitLockersTest.this.env; + this.db = ReadCommitLockersTest.this.db; + this.key = ReadCommitLockersTest.this.key; + } + + @Override + public void run() { + + boolean success = false; + OperationStatus status; + DatabaseEntry data = createDatabaseEntry("newdata"); + + Transaction X2 = env.beginTransaction(null, null); + + /* Update R via cursor C2 and txn X2. */ + Cursor C2 = db.openCursor(X2, null); + + try { + status = C2.put(key, data); + assertTrue(status == OperationStatus.SUCCESS); + success = true; + } finally { + C2.close(); + if (success) { + X2.commit(); + } else { + X2.abort(); + } + } + } + } +} diff --git a/test/com/sleepycat/je/txn/TwoPCTest.java b/test/com/sleepycat/je/txn/TwoPCTest.java new file mode 100644 index 0000000..ff1a926 --- /dev/null +++ b/test/com/sleepycat/je/txn/TwoPCTest.java @@ -0,0 +1,227 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; + +import javax.transaction.xa.XAResource; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionStats; +import com.sleepycat.je.XAEnvironment; +import com.sleepycat.je.log.LogUtils.XidImpl; +import com.sleepycat.je.util.StringDbt; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/* + * Simple 2PC transaction testing. + */ +public class TwoPCTest extends TestBase { + private final File envHome; + private XAEnvironment env; + private Database db; + + public TwoPCTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + env = new XAEnvironment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + } + + @After + public void tearDown() + throws Exception { + + db.close(); + env.close(); + } + + /** + * Basic Two Phase Commit calls. + */ + @Test + public void testBasic2PC() { + try { + TransactionStats stats = + env.getTransactionStats(TestUtils.FAST_STATS); + /* + * 4 commits for setting up XA env, opening cleaner dbs. + */ + int numBegins = 4; + int numCommits = 4; + int numXAPrepares = 0; + int numXACommits = 0; + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + + Transaction txn = env.beginTransaction(null, null); + stats = env.getTransactionStats(TestUtils.FAST_STATS); + numBegins++; + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + assertEquals(1, stats.getNActive()); + + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + env.setXATransaction(xid, txn); + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + assertEquals(1, stats.getNActive()); + + StringDbt key = new StringDbt("key"); + StringDbt data = new StringDbt("data"); + db.put(txn, key, data); + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + assertEquals(1, stats.getNActive()); + + env.prepare(xid); + numXAPrepares++; + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + assertEquals(1, stats.getNActive()); + + env.commit(xid, false); + numCommits++; + numXACommits++; + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(numXAPrepares, stats.getNXAPrepares()); + assertEquals(numXACommits, stats.getNXACommits()); + assertEquals(0, stats.getNActive()); + } catch (Exception E) { + System.out.println("caught " + E); + } + } + + /** + * Basic readonly-prepare. + */ + @Test + public void testROPrepare() { + try { + Transaction txn = env.beginTransaction(null, null); + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest1"), null); + env.setXATransaction(xid, txn); + + assertEquals(XAResource.XA_RDONLY, env.prepare(xid)); + } catch (Exception E) { + System.out.println("caught " + E); + } + } + + /** + * Test calling prepare twice (should throw exception). + */ + @Test + public void testTwicePreparedTransaction() + throws Throwable { + + Transaction txn = env.beginTransaction(null, null); + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + env.setXATransaction(xid, txn); + StringDbt key = new StringDbt("key"); + StringDbt data = new StringDbt("data"); + db.put(txn, key, data); + + try { + env.prepare(xid); + env.prepare(xid); + fail("should not be able to prepare twice"); + } catch (Exception E) { + env.commit(xid, false); + } + } + + /** + * Test calling rollback(xid) on an unregistered xa txn. + */ + @Test + public void testRollbackNonExistent() + throws Throwable { + + Transaction txn = env.beginTransaction(null, null); + StringDbt key = new StringDbt("key"); + StringDbt data = new StringDbt("data"); + db.put(txn, key, data); + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + + try { + env.rollback(xid); + fail("should not be able to call rollback on an unknown xid"); + } catch (Exception E) { + } + txn.abort(); + } + + /** + * Test calling commit(xid) on an unregistered xa txn. + */ + @Test + public void testCommitNonExistent() + throws Throwable { + + Transaction txn = env.beginTransaction(null, null); + StringDbt key = new StringDbt("key"); + StringDbt data = new StringDbt("data"); + db.put(txn, key, data); + XidImpl xid = new XidImpl(1, StringUtils.toUTF8("TwoPCTest2"), null); + + try { + env.commit(xid, false); + fail("should not be able to call commit on an unknown xid"); + } catch (Exception E) { + } + txn.abort(); + } +} diff --git a/test/com/sleepycat/je/txn/TxnEndTest.java b/test/com/sleepycat/je/txn/TxnEndTest.java new file mode 100644 index 0000000..aff1b78 --- /dev/null +++ b/test/com/sleepycat/je/txn/TxnEndTest.java @@ -0,0 +1,808 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ABORTS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ACTIVE; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_ACTIVE_TXNS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_BEGINS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_COMMITS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XAABORTS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XACOMMITS; +import static com.sleepycat.je.dbi.TxnStatDefinition.TXN_XAPREPARES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.util.Arrays; +import java.util.Date; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionStats; +import com.sleepycat.je.VerifyConfig; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.junit.JUnitThread; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.ActiveTxnArrayStat; +import com.sleepycat.je.utilint.IntStat; +import com.sleepycat.je.utilint.LongStat; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/* + * @excludeDualMode + * This test checks the value of nAborts, but the replication environment may + * legitimately have a nAborts value of 1 or 0, due to the + * transaction handling in RepImpl.openGroupDB. Exclude the test. + * + * Test transaction aborts and commits. + */ +public class TxnEndTest extends TestBase { + private static final int NUM_DBS = 1; + private Environment env; + private final File envHome; + private Database[] dbs; + private Cursor[] cursors; + private JUnitThread junitThread; + + public TxnEndTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + /* + * Run environment without in compressor on so we can check the + * compressor queue in a deterministic way. + */ + super.setUp(); + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentConfig.NODE_MAX_ENTRIES, "6"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_CLEANER, "false"); + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + } + + @After + public void tearDown() { + if (junitThread != null) { + junitThread.shutdown(); + junitThread = null; + } + + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("tearDown: " + e); + } + } + env = null; + TestUtils.removeFiles("TearDown", envHome, FileManager.JE_SUFFIX); + } + + private void createDbs() + throws DatabaseException { + + dbs = new Database[NUM_DBS]; + cursors = new Cursor[NUM_DBS]; + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + for (int i = 0; i < NUM_DBS; i++) { + dbs[i] = env.openDatabase(null, "testDB" + i, dbConfig); + } + } + + private void closeAll() + throws DatabaseException { + + for (int i = 0; i < NUM_DBS; i++) { + dbs[i].close(); + } + dbs = null; + env.close(); + env = null; + } + + /** + * Create cursors with this owning transaction + */ + private void createCursors(Transaction txn) + throws DatabaseException { + + for (int i = 0; i < cursors.length; i++) { + cursors[i] = dbs[i].openCursor(txn, null); + } + } + + /** + * Close the current set of cursors + */ + private void closeCursors() + throws DatabaseException { + + for (int i = 0; i < cursors.length; i++) { + cursors[i].close(); + } + } + + /** + * Insert keys from i=start; i 0); + + /* Modify data, abort, check that data is unchanged. */ + txn = env.beginTransaction(null, null); + createCursors(txn); + cursorModifyData(0, numKeys * 2, 1); + closeCursors(); + txn.abort(); + verifyData(numKeys*2, 0); + + /* Delete data, abort, check that data is still there. */ + txn = env.beginTransaction(null, null); + createCursors(txn); + cursorDeleteData(numKeys+1, numKeys*2); + closeCursors(); + txn.abort(); + verifyData(numKeys*2, 0); + /* Check the in compressor queue, nothing should be loaded. */ + envStat = env.getStats(TestUtils.FAST_STATS); + assertEquals(queueSize, envStat.getInCompQueueSize()); + + /* Delete data, commit, check that data is gone. */ + txn = env.beginTransaction(null, null); + createCursors(txn); + cursorDeleteData(numKeys, numKeys*2); + closeCursors(); + txn.commit(); + verifyData(numKeys, 0); + + /* Check the inCompressor queue, there should be more entries. */ + envStat = env.getStats(TestUtils.FAST_STATS); + assertTrue(envStat.getInCompQueueSize() > queueSize); + + closeAll(); + + } catch (Throwable t) { + /* Print stacktrace before attempt to run tearDown. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test that txn commit fails with open cursors. + */ + @Test + public void testTxnClose() + throws DatabaseException { + + createDbs(); + Transaction txn = env.beginTransaction(null, null); + createCursors(txn); + + try { + txn.commit(); + fail("Commit should fail, cursors are open."); + } catch (IllegalStateException e) { + txn.abort(); + } + closeCursors(); + + txn = env.beginTransaction(null, null); + createCursors(txn); + closeCursors(); + txn.commit(); + + try { + txn.abort(); + } catch (RuntimeException e) { + fail("Txn abort after commit shouldn't fail."); + } + + txn = env.beginTransaction(null, null); + createCursors(txn); + closeCursors(); + txn.abort(); + + try { + txn.abort(); + } catch (RuntimeException e) { + fail("Double abort shouldn't fail."); + } + + closeAll(); + } + + /** + * Test use through db. + */ + @Test + public void testBasicDb() + throws Throwable { + + try { + TransactionStats stats = + env.getTransactionStats(TestUtils.FAST_STATS); + int initialAborts = 0; + assertEquals(initialAborts, stats.getNAborts()); + /* 3 commits for adding cleaner dbs. */ + int initialCommits = 3; + assertEquals(initialCommits, stats.getNCommits()); + + long locale = new Date().getTime(); + TransactionStats.Active[] at = new TransactionStats.Active[4]; + + for(int i = 0; i < 4; i++) { + at[i] = new TransactionStats.Active("TransactionStatForTest", + i, i - 1); + } + + StatGroup group = new StatGroup("test", "test"); + ActiveTxnArrayStat arrayStat = + new ActiveTxnArrayStat(group, TXN_ACTIVE_TXNS, at); + new LongStat(group, TXN_ABORTS, 12); + new LongStat(group, TXN_XAABORTS, 15); + new IntStat(group, TXN_ACTIVE, 20); + new LongStat(group, TXN_BEGINS, 25); + new LongStat(group, TXN_COMMITS, 1); + new LongStat(group, TXN_XACOMMITS, 30); + new LongStat(group, TXN_XAPREPARES, 20); + stats = new TransactionStats(group); + + TransactionStats.Active[] at1 = stats.getActiveTxns(); + + for(int i = 0; i < 4; i++) { + assertEquals("TransactionStatForTest", at1[i].getName()); + assertEquals(i, at1[i].getId()); + assertEquals(i - 1, at1[i].getParentId()); + at1[i].toString(); + } + assertEquals(12, stats.getNAborts()); + assertEquals(15, stats.getNXAAborts()); + assertEquals(20, stats.getNActive()); + assertEquals(25, stats.getNBegins()); + assertEquals(1, stats.getNCommits()); + assertEquals(30, stats.getNXACommits()); + assertEquals(20, stats.getNXAPrepares()); + stats.toString(); + + arrayStat.set(null); + stats.toString(); + + int numKeys = 7; + createDbs(); + + /* Insert data with autocommit. */ + dbInsertData(0, numKeys, null); + verifyData(numKeys, 0); + + /* Insert data with a txn. */ + Transaction txn = env.beginTransaction(null, null); + dbInsertData(numKeys, numKeys*2, txn); + txn.commit(); + verifyData(numKeys*2, 0); + + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(initialAborts, stats.getNAborts()); + assertEquals((initialCommits + 1 + // 1 explicit commit above + (1 * NUM_DBS) + // 1 per create/open + (numKeys*NUM_DBS)), // 1 per record, using autotxn + stats.getNCommits()); + + /* Delete data with a txn, abort. */ + txn = env.beginTransaction(null, null); + dbDeleteData(numKeys, numKeys * 2, txn); + verifyData(numKeys, 0); // verify w/dirty read + txn.abort(); + + closeAll(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test TransactionStats. + */ + @Test + public void testTxnStats() + throws Throwable { + + try { + TransactionStats stats = + env.getTransactionStats(TestUtils.FAST_STATS); + int initialAborts = 0; + assertEquals(initialAborts, stats.getNAborts()); + /* 3 commits for adding cleaner dbs. */ + int numBegins = 3; + int numCommits = 3; + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + + int numKeys = 7; + createDbs(); + numBegins += NUM_DBS; // 1 begins per database + numCommits += NUM_DBS; // 1 commits per database + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + + /* Insert data with autocommit. */ + dbInsertData(0, numKeys, null); + numBegins += (numKeys * NUM_DBS); + numCommits += (numKeys * NUM_DBS); + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + verifyData(numKeys, 0); + + /* Insert data with a txn. */ + Transaction txn = env.beginTransaction(null, null); + numBegins++; + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(1, stats.getNActive()); + dbInsertData(numKeys, numKeys*2, txn); + txn.commit(); + numCommits++; + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(0, stats.getNActive()); + verifyData(numKeys*2, 0); + + /* Delete data with a txn, abort. */ + txn = env.beginTransaction(null, null); + numBegins++; + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(1, stats.getNActive()); + + dbDeleteData(numKeys, numKeys * 2, txn); + verifyData(numKeys, 0); // verify w/dirty read + txn.abort(); + stats = env.getTransactionStats(TestUtils.FAST_STATS); + assertEquals(numBegins, stats.getNBegins()); + assertEquals(numCommits, stats.getNCommits()); + assertEquals(initialAborts + 1, stats.getNAborts()); + assertEquals(0, stats.getNActive()); + + closeAll(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + /** + * Test db creation and deletion + */ + + @Test + public void testDbCreation() + throws DatabaseException { + + Transaction txnA = env.beginTransaction(null, null); + Transaction txnB = env.beginTransaction(null, null); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + Database dbA = + env.openDatabase(txnA, "foo", dbConfig); + + /* Try to see this database with another txn -- we should not see it. */ + + dbConfig.setAllowCreate(false); + + try { + txnB.setLockTimeout(1000); + + env.openDatabase(txnB, "foo", dbConfig); + fail("Shouldn't be able to open foo"); + } catch (DatabaseException e) { + } + + /* txnB must be aborted since openDatabase timed out. */ + txnB.abort(); + + /* Open this database with the same txn and another handle. */ + Database dbC = + env.openDatabase(txnA, "foo", dbConfig); + + /* Now commit txnA and txnB should be able to open this. */ + txnA.commit(); + txnB = env.beginTransaction(null, null); + Database dbB = + env.openDatabase(txnB, "foo", dbConfig); + txnB.commit(); + + /* XXX, test db deletion. */ + + dbA.close(); + dbB.close(); + dbC.close(); + } + + /* Test that the transaction is unusable after a close. */ + @Test + public void testClose() + throws DatabaseException { + + Transaction txnA = env.beginTransaction(null, null); + txnA.commit(); + + try { + env.openDatabase(txnA, "foo", null); + fail("Should not be able to use a closed transaction"); + } catch (IllegalArgumentException expected) { + } + } + + /** + * Simulates a race condition between two threads that previously caused a + * latch deadlock. [#19321] + * + * One thread is aborting a txn. The other thread is using the same txn to + * perform a cursor operation. While the BIN is held, it attempts to get a + * non-blocking lock. + */ + @Test + public void testAbortLatchDeadlock() { + + /* Create DB. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "foo", dbConfig); + + /* Insert one record. */ + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + assertSame(OperationStatus.SUCCESS, + db.putNoOverwrite(null, key, data)); + + /* Begin txn, to be shared by both threads. */ + final Transaction txn = env.beginTransaction(null, null); + + /* Simulate cursor operation that latches BIN. */ + final Cursor cursor = db.openCursor(txn, null); + assertSame(OperationStatus.SUCCESS, cursor.put(key, data)); + final CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + cursorImpl.latchBIN(); + + /* Run abort in a separate thread. */ + junitThread = new JUnitThread("testAbortLatchDeadlock") { + @Override + public void testBody() { + + /* + * The cursor is not closed before the abort is allowed to + * continue, sometimes causing an "open cursors" exception, + * depending on timing. This is acceptable for this test, + * since we are checking that a hang does not occur. + */ + try { + txn.abort(); + } catch (IllegalStateException e) { + assertTrue(e.getMessage().contains + ("detected open cursors")); + } + } + }; + junitThread.start(); + + /* + * Wait for abort to attempt latch on BIN. + * + * Because now BtreeVerifier code also need to latch the BIN, so the + * previous condition 'cursorImpl.getBIN().getLatchNWaiters() == 1' + * can not guarantee that txn.abort has already attempted to latch + * on the BIN. 'cursorImpl.getBIN().getLatchNWaiters() == 2' can + * guarantee that the BtreeVerifier and txn.abort both wait for the + * latch. + * + * But this new condition may need to wait the BtreeVerifier to run + * btree verify action, because now we set the schedule to be every + * minutes for unit test, this means that this condition may need to + * wait 1 minute. + */ + while (cursorImpl.getBIN().getLatchNWaiters() < 2) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + fail(); + } + } + + /* + * Simulate cursor operation that gets non-blocking lock. Before the + * fix [#19321], a latch deadlock would occur here. + */ + try { + cursorImpl.getLocker().nonBlockingLock + (123L, LockType.WRITE, false, DbInternal.getDbImpl(db)); + fail(); + } catch (IllegalStateException expected) { + } finally { + /* Release latch, allow abort to continue. */ + cursorImpl.releaseBIN(); + cursor.close(); + } + + /* Finish test. */ + Throwable t = null; + try { + junitThread.finishTest(); + } catch (Throwable e) { + t = e; + } finally { + junitThread = null; + } + if (t != null) { + t.printStackTrace(); + fail(t.toString()); + } + + db.close(); + } + + /* + * Test the case where truncateDatabase and removeDatabase operations are + * done on the same database in the same txn. [#19636] + */ + @Test + public void testTruncateDeleteDB() { + /* Create test DB. */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + String dbName = "test-db"; + + /* Test single truncation and removement. */ + Database db = env.openDatabase(null, dbName, dbConfig); + db.close(); + Transaction txn = env.beginTransaction(null, null); + /* Truncate and remove a database in the same txn. */ + env.truncateDatabase(txn, dbName, false); + env.removeDatabase(txn, dbName); + txn.abort(); + /* No database is removed after aborting the txn.. */ + assertEquals(1, env.getDatabaseNames().size()); + txn = env.beginTransaction(null, null); + env.truncateDatabase(txn, dbName, false); + env.removeDatabase(txn, dbName); + txn.commit(); + /* the database has been removed after committing the txn. */ + assertEquals(0, env.getDatabaseNames().size()); + + /* Test multiple truncations before single removement. */ + db = env.openDatabase(null, dbName, dbConfig); + db.close(); + txn = env.beginTransaction(null, null); + /* Truncate a database three times then remove it in the same txn. */ + env.truncateDatabase(txn, dbName, false); + env.truncateDatabase(txn, dbName, false); + env.truncateDatabase(txn, dbName, false); + env.removeDatabase(txn, dbName); + txn.abort(); + /* No database is removed after aborting the txn. */ + assertEquals(1, env.getDatabaseNames().size()); + txn = env.beginTransaction(null, null); + env.truncateDatabase(txn, dbName, false); + env.truncateDatabase(txn, dbName, false); + env.truncateDatabase(txn, dbName, false); + env.removeDatabase(txn, dbName); + txn.commit(); + /* the database has been removed after committing the txn. */ + assertEquals(0, env.getDatabaseNames().size()); + } +} diff --git a/test/com/sleepycat/je/txn/TxnFSyncTest.java b/test/com/sleepycat/je/txn/TxnFSyncTest.java new file mode 100644 index 0000000..2792e23 --- /dev/null +++ b/test/com/sleepycat/je/txn/TxnFSyncTest.java @@ -0,0 +1,149 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; + +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DbEnvPool; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.TxnTestCase; +import com.sleepycat.utilint.StringUtils; + +/* + * Make sure that transactions sync to disk. Mimic a crash by failing to + * close the environment and explicitly flush the log manager. If we haven't + * properly written and synced data to disk, we'll have unflushed data and + * we won't find the expected data in the log. + * + * Note that this test is run with the TxnTestCase framework and will + * be exercised with app-created and autocommit txns. + */ +@RunWith(Parameterized.class) +public class TxnFSyncTest extends TxnTestCase { + + private static final int NUM_RECS = 5; + + private static EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + static { + envConfig.setAllowCreate(true); + setupEnvConfig(envConfig); + } + + private static void setupEnvConfig(EnvironmentConfig envConfig) { + envConfig.setTransactional(true); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + } + + @Parameters + public static List genParams() { + return getTxnParams( + new String[] {TxnTestCase.TXN_USER, TxnTestCase.TXN_AUTO}, false); + } + + public TxnFSyncTest(String type){ + super.envConfig = envConfig; + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + + @Test + public void testFSyncButNoClose() + throws Exception { + + try { + /* Create a database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(isTransactional); + dbConfig.setAllowCreate(true); + Transaction txn = txnBegin(); + Database db = env.openDatabase(txn, "foo", dbConfig); + + /* Insert data. */ + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < NUM_RECS; i++) { + Integer val = new Integer(i); + key.setData(StringUtils.toUTF8(val.toString())); + data.setData(StringUtils.toUTF8(val.toString())); + + assertEquals(OperationStatus.SUCCESS, + db.putNoOverwrite(txn, key, data)); + } + txnCommit(txn); + + /* + * Now throw away this environment WITHOUT flushing the log + * manager. We do need to release the environment file lock + * and all file handles so we can recover in this test and + * run repeated test cases within this one test program. + */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + envImpl.getFileManager().clear(); // release file handles + envImpl.getFileManager().close(); // release file lock + envImpl.closeHandlers(); // release logging files + env = null; + DbEnvPool.getInstance().clear(); + + /* + * Open the environment and database again. The database should + * exist. + */ + EnvironmentConfig envConfig2 = TestUtils.initEnvConfig(); + setupEnvConfig(envConfig2); + env = create(envHome, envConfig2); + dbConfig.setAllowCreate(false); + db = env.openDatabase(null, "foo", dbConfig); + + /* Read all the data. */ + for (int i = 0; i < NUM_RECS; i++) { + Integer val = new Integer(i); + key.setData(StringUtils.toUTF8(val.toString())); + + assertEquals(OperationStatus.SUCCESS, + db.get(null, key, data, LockMode.DEFAULT)); + /* add test of data. */ + } + db.close(); + env.close(); + env = null; + } catch (Exception e) { + e.printStackTrace(); + throw e; + } finally { + if (env != null) { + env.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/txn/TxnMemoryTest.java b/test/com/sleepycat/je/txn/TxnMemoryTest.java new file mode 100644 index 0000000..7de78af --- /dev/null +++ b/test/com/sleepycat/je/txn/TxnMemoryTest.java @@ -0,0 +1,305 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +@RunWith(Parameterized.class) +public class TxnMemoryTest extends DualTestCase { + private static final boolean DEBUG = false; + private static final String DB_NAME = "foo"; + + private static final String LOCK_AUTOTXN = "lock-autotxn"; + private static final String LOCK_USERTXN = "lock-usertxn"; + private static final String LOCK_NOTXN = "lock-notxn"; + private static final String COMMIT = "commit"; + private static final String ABORT = "abort"; + private static final String[] END_MODE = {COMMIT, ABORT}; + + private final File envHome; + private Environment env; + private EnvironmentImpl envImpl; + private MemoryBudget mb; + private Database db; + private final DatabaseEntry keyEntry = new DatabaseEntry(); + private final DatabaseEntry dataEntry = new DatabaseEntry(); + private final String lockMode; + private final String endMode; + + private long beforeAction; + private long afterTxnsCreated; + private long afterAction; + private Transaction[] txns; + + private final int numTxns = 2; + private final int numRecordsPerTxn = 30; + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + public static List paramsHelper(boolean rep) { + String[] testModes = null; + + if (rep){ + testModes = new String[] {LOCK_USERTXN}; + } else { + testModes = new String[] {LOCK_AUTOTXN, LOCK_USERTXN, LOCK_NOTXN}; + } + List list = new ArrayList(); + + for (String testMode : testModes) { + for (String eMode : END_MODE) { + list.add(new Object[] {testMode, eMode}); + } + } + + return list; + } + + public TxnMemoryTest(String testMode, String eMode) { + envHome = SharedTestUtils.getTestDir(); + this.lockMode = testMode; + this.endMode = eMode; + customName = lockMode + '-' + endMode; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + + IN.ACCUMULATED_LIMIT = 0; + Txn.ACCUMULATED_LIMIT = 0; + + } + + /** + * Opens the environment and database. + */ + private void openEnv() + throws DatabaseException { + + EnvironmentConfig config = TestUtils.initEnvConfig(); + + /* + * ReadCommitted isolation is not allowed by this test because we + * expect no locks/memory to be freed when using a transaction. + */ + DbInternal.setTxnReadCommitted(config, false); + + /* Cleaner detail tracking adds to the memory budget; disable it. */ + config.setConfigParam + (EnvironmentParams.CLEANER_TRACK_DETAIL.getName(), "false"); + + config.setTransactional(true); + config.setAllowCreate(true); + env = create(envHome, config); + envImpl = DbInternal.getNonNullEnvImpl(env); + mb = envImpl.getMemoryBudget(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(!lockMode.equals(LOCK_NOTXN)); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + /** + * Closes the environment and database. + */ + private void closeEnv(boolean doCheckpoint) + throws DatabaseException { + + if (db != null) { + db.close(); + db = null; + } + if (env != null) { + close(env); + env = null; + } + } + + /** + * Insert and then update some records. Measure memory usage at different + * points in this sequence, asserting that the memory usage count is + * properly decremented. + */ + @Test + public void testWriteLocks() + throws DatabaseException { + + loadData(); + + /* + * Now update the database transactionally. This should not change + * the node related memory, but should add txn related cache + * consumption. If this is a user transaction, we should + * hold locks and consume more memory. + */ + for (int t = 0; t < numTxns; t++) { + for (int i = 0; i < numRecordsPerTxn; i++) { + int value = i + (t*numRecordsPerTxn); + IntegerBinding.intToEntry(value, keyEntry); + IntegerBinding.intToEntry(value+1, dataEntry); + assertEquals(db.put(txns[t], keyEntry, dataEntry), + OperationStatus.SUCCESS); + } + } + afterAction = mb.getLockMemoryUsage(); + + closeTxns(true); + } + + /** + * Insert and then scan some records. Measure memory usage at different + * points in this sequence, asserting that the memory usage count is + * properly decremented. + */ + @Test + public void testReadLocks() + throws DatabaseException { + + loadData(); + + /* + * Now scan the database. Make sure all locking overhead is + * released. + */ + for (int t = 0; t < numTxns; t++) { + Cursor c = db.openCursor(txns[t], null); + while (c.getNext(keyEntry, dataEntry, null) == + OperationStatus.SUCCESS) { + } + c.close(); + } + afterAction = mb.getLockMemoryUsage(); + + closeTxns(false); + } + + private void loadData() + throws DatabaseException { + + openEnv(); + + /* Build up a database to establish a given cache size. */ + for (int t = 0; t < numTxns; t++) { + for (int i = 0; i < numRecordsPerTxn; i++) { + + int value = i + (t*numRecordsPerTxn); + IntegerBinding.intToEntry(value, keyEntry); + IntegerBinding.intToEntry(value, dataEntry); + assertEquals(db.put(null, keyEntry, dataEntry), + OperationStatus.SUCCESS); + } + } + + beforeAction = mb.getLockMemoryUsage(); + + /* Make some transactions. */ + txns = new Transaction[numTxns]; + if (lockMode.equals(LOCK_USERTXN)) { + for (int t = 0; t < numTxns; t++) { + txns[t] = env.beginTransaction(null, null); + } + + afterTxnsCreated = mb.getLockMemoryUsage(); + assertTrue( "afterTxns=" + afterTxnsCreated + + "beforeUpdate=" + beforeAction, + (afterTxnsCreated > beforeAction)); + } + } + + private void closeTxns(boolean writesDone) + throws DatabaseException { + + assertTrue(afterAction > afterTxnsCreated); + + /* + * If this is not a user transactional lock, we should be done + * with all locking overhead. If it is a user transaction, we + * only release memory after locks are released at commit or + * abort. + */ + if (lockMode.equals(LOCK_USERTXN)) { + + /* + * Note: expectedLockUsage is annoyingly fragile. If we change + * the lock implementation, this may not be the right number + * to check. + */ + long expectedLockUsage = + (numRecordsPerTxn * numTxns * + MemoryBudget.THINLOCKIMPL_OVERHEAD); + + assertTrue((afterAction - afterTxnsCreated) >= expectedLockUsage); + + for (int t = 0; t < numTxns; t++) { + Transaction txn = txns[t]; + if (endMode.equals(COMMIT)) { + txn.commit(); + } else { + txn.abort(); + } + } + + long afterTxnEnd = mb.getLockMemoryUsage(); + + assertTrue("lockMode=" + lockMode + + " endMode=" + endMode + + " afterTxnEnd=" + afterTxnEnd + + " beforeAction=" + beforeAction, + (afterTxnEnd <= beforeAction)); + } + if (DEBUG) { + System.out.println("afterUpdate = " + afterAction + + " before=" + beforeAction); + } + + closeEnv(true); + } +} diff --git a/test/com/sleepycat/je/txn/TxnTest.java b/test/com/sleepycat/je/txn/TxnTest.java new file mode 100644 index 0000000..2156503 --- /dev/null +++ b/test/com/sleepycat/je/txn/TxnTest.java @@ -0,0 +1,1240 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNCS; +import static com.sleepycat.je.log.LogStatDefinition.GROUP_NAME; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS; +import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.LockNotAvailableException; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.DatabaseImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.log.ReplicationContext; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.StatGroup; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import org.junit.Before; +import org.junit.Test; + +/* + * Simple transaction testing + */ +public class TxnTest extends DualTestCase { + private final File envHome; + private Environment env; + private Database db; + + public TxnTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Override + @Before + public void setUp() + throws Exception { + + super.setUp(); + + /* Ignore the node equality check in replicated test. */ + if (isReplicatedTest(getClass())) { + resetNodeEqualityCheck(); + } + + IN.ACCUMULATED_LIMIT = 0; + Txn.ACCUMULATED_LIMIT = 0; + } + + private void createEnv() { + try { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), + "6"); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = create(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, "foo", dbConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + private void closeEnv() { + try { + if (db != null) { + db.close(); + } + db = null; + if (env != null) { + close(env); + } + env = null; + } catch (DatabaseException e) { + e.printStackTrace(); + } + } + + /** + * Test transaction locking and releasing. + */ + @Test + public void testBasicLocking() + throws Throwable { + + createEnv(); + + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + long lsn = envImpl.getNodeSequence().getNextTransientLsn(); + + /* + * Make a null txn that will lock. Take a lock and then end the + * operation. + */ + MemoryBudget mb = envImpl.getMemoryBudget(); + + long beforeLock = mb.getCacheMemoryUsage(); + Locker nullTxn = BasicLocker.createBasicLocker(envImpl); + + LockGrantType lockGrant = nullTxn.lock + (lsn, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.NEW, lockGrant); + long afterLock = mb.getCacheMemoryUsage(); + checkHeldLocks(nullTxn, 1, 0); + + nullTxn.releaseNonTxnLocks(); + long afterRelease = mb.getCacheMemoryUsage(); + checkHeldLocks(nullTxn, 0, 0); + checkCacheUsage(beforeLock, afterLock, afterRelease, + LockManager.TOTAL_THINLOCKIMPL_OVERHEAD); + + /* Take a lock, release it. */ + beforeLock = mb.getCacheMemoryUsage(); + lockGrant = nullTxn.lock + (lsn, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + afterLock = mb.getCacheMemoryUsage(); + assertEquals(LockGrantType.NEW, lockGrant); + checkHeldLocks(nullTxn, 1, 0); + + nullTxn.releaseLock(lsn); + checkHeldLocks(nullTxn, 0, 0); + afterRelease = mb.getCacheMemoryUsage(); + checkCacheUsage(beforeLock, afterLock, afterRelease, + LockManager.TOTAL_THINLOCKIMPL_OVERHEAD); + + /* + * Make a user transaction, check lock and release. + */ + beforeLock = mb.getCacheMemoryUsage(); + + /* Use a Master replication context in a replicated test. */ + ReplicationContext context = null; + if (isReplicatedTest(getClass())) { + context = ReplicationContext.MASTER; + } else { + context = ReplicationContext.NO_REPLICATE; + } + Txn userTxn = Txn.createLocalTxn(envImpl, new TransactionConfig()); + + lockGrant = userTxn.lock + (lsn, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + afterLock = mb.getCacheMemoryUsage(); + + assertEquals(LockGrantType.NEW, lockGrant); + checkHeldLocks(userTxn, 1, 0); + + /* Try demoting, nothing should happen. */ + try { + userTxn.demoteLock(lsn); + fail("exception not thrown on phoney demoteLock"); + } catch (AssertionError e){ + } + checkHeldLocks(userTxn, 1, 0); + long afterDemotion = mb.getCacheMemoryUsage(); + assertEquals(afterLock, afterDemotion); + + /* Make it a write lock, then demote. */ + lockGrant = userTxn.lock + (lsn, LockType.WRITE, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.PROMOTION, lockGrant); + long afterWriteLock = mb.getCacheMemoryUsage(); + assertTrue(afterWriteLock > afterLock); + assertTrue(afterLock > beforeLock); + + checkHeldLocks(userTxn, 0, 1); + userTxn.demoteLock(lsn); + checkHeldLocks(userTxn, 1, 0); + + /* Shouldn't release at operation end. */ + userTxn.operationEnd(); + checkHeldLocks(userTxn, 1, 0); + + userTxn.releaseLock(lsn); + checkHeldLocks(userTxn, 0, 0); + userTxn.commit(Durability.COMMIT_SYNC); + afterRelease = mb.getCacheMemoryUsage(); + assertTrue(afterLock > beforeLock); + + closeEnv(); + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test lock mutation. + */ + @Test + public void testLockMutation() + throws Throwable { + + createEnv(); + + try { + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + long lsn = envImpl.getNodeSequence().getNextTransientLsn(); + + MemoryBudget mb = envImpl.getMemoryBudget(); + + long beforeLock = mb.getCacheMemoryUsage(); + Txn userTxn1 = Txn.createUserTxn(envImpl, new TransactionConfig()); + Txn userTxn2 = Txn.createUserTxn(envImpl, new TransactionConfig()); + + EnvironmentStats envStats = env.getStats(null); + assertEquals(1, envStats.getNTotalLocks()); + LockGrantType lockGrant1 = userTxn1.lock + (lsn, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.NEW, lockGrant1); + checkHeldLocks(userTxn1, 1, 0); + envStats = env.getStats(null); + assertEquals(2, envStats.getNTotalLocks()); + + try { + userTxn2.lock(lsn, LockType.WRITE, false, + DbInternal.getDbImpl(db)).getLockGrant(); + } catch (LockConflictException DE) { + // ok + } + envStats = env.getStats(null); + assertEquals(2, envStats.getNTotalLocks()); + checkHeldLocks(userTxn2, 0, 0); + + userTxn1.commit(); + userTxn2.abort(false); + + /* + * Since the replicated tests use shared cache to reduce the + * memory usage, so this check would fail. Ignore it in replicated + * tests. + */ + long afterRelease = mb.getCacheMemoryUsage(); + if (!isReplicatedTest(getClass())) { + assertEquals(beforeLock, afterRelease); + } + + closeEnv(); + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + private void checkHeldLocks(Locker txn, + int numReadLocks, + int numWriteLocks) + throws DatabaseException { + + StatGroup stat = txn.collectStats(); + assertEquals(numReadLocks, stat.getInt(LOCK_READ_LOCKS)); + assertEquals(numWriteLocks, stat.getInt(LOCK_WRITE_LOCKS)); + } + + /** + * Test transaction commit, from the locking point of view. + */ + @Test + public void testCommit() + throws Throwable { + + createEnv(); + + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + long lsn1 = envImpl.getNodeSequence().getNextTransientLsn(); + long lsn2 = envImpl.getNodeSequence().getNextTransientLsn(); + + /* Use a Master replication context in a replicated test. */ + ReplicationContext context = null; + if (isReplicatedTest(getClass())) { + context = ReplicationContext.MASTER; + } else { + context = ReplicationContext.NO_REPLICATE; + } + Txn userTxn = Txn.createUserTxn(envImpl, new TransactionConfig()); + + /* Get read lock 1. */ + LockGrantType lockGrant = userTxn.lock + (lsn1, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.NEW, lockGrant); + checkHeldLocks(userTxn, 1, 0); + + /* Get read lock 2. */ + lockGrant = userTxn.lock + (lsn2, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.NEW, lockGrant); + checkHeldLocks(userTxn, 2, 0); + + /* Upgrade read lock 2 to a write. */ + lockGrant = userTxn.lock + (lsn2, LockType.WRITE, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.PROMOTION, lockGrant); + checkHeldLocks(userTxn, 1, 1); + + /* Read lock 1 again, shouldn't increase count. */ + lockGrant = userTxn.lock + (lsn1, LockType.READ, false, + DbInternal.getDbImpl(db)). + getLockGrant(); + assertEquals(LockGrantType.EXISTING, lockGrant); + checkHeldLocks(userTxn, 1, 1); + + /* + * The commit won't actually write a log record if this + * transaction has never done an update, so fake it out and simulate + * a write. + */ + userTxn.addLogInfo(DbLsn.makeLsn(1, 1000)); + long commitLsn = userTxn.commit(Durability.COMMIT_SYNC); + checkHeldLocks(userTxn, 0, 0); + + TxnCommit commitRecord = + (TxnCommit) envImpl.getLogManager().getEntry(commitLsn); + + assertEquals(userTxn.getId(), commitRecord.getId()); + assertEquals(userTxn.getLastLsn(), commitRecord.getLastLsn()); + + closeEnv(); + } catch (Throwable t) { + /* Print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Make sure an abort never tries to split the tree. + */ + @Test + public void testAbortNoSplit() + throws Throwable { + + createEnv(); + + try { + Transaction txn = env.beginTransaction(null, null); + + DatabaseEntry keyDbt = new DatabaseEntry(); + DatabaseEntry dataDbt = new DatabaseEntry(); + dataDbt.setData(new byte[1]); + + /* Insert enough data so that the tree is ripe for a split. */ + int numForSplit = 25; + for (int i = 0; i < numForSplit; i++) { + keyDbt.setData(TestUtils.getTestArray(i)); + db.put(txn, keyDbt, dataDbt); + } + + /* Check that we're ready for a split. */ + DatabaseImpl database = DbInternal.getDbImpl(db); + CheckReadyToSplit splitChecker = new CheckReadyToSplit(database); + database.getTree().withRootLatchedShared(splitChecker); + assertTrue(splitChecker.getReadyToSplit()); + + /* + * Make another txn that will get a read lock on the map + * LSN. Then abort the first txn. It shouldn't try to do a + * split, if it does, we'll run into the + * no-latches-while-locking check. + */ + Transaction txnSpoiler = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + Database dbSpoiler = env.openDatabase(txnSpoiler, "foo", dbConfig); + + txn.abort(); + + /* + * The database should be empty + */ + Cursor cursor = dbSpoiler.openCursor(txnSpoiler, null); + + assertTrue(cursor.getFirst(keyDbt, dataDbt, LockMode.DEFAULT) != + OperationStatus.SUCCESS); + cursor.close(); + txnSpoiler.abort(); + + closeEnv(); + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + @Test + public void testTransactionName() + throws Throwable { + + createEnv(); + + try { + Transaction txn = env.beginTransaction(null, null); + txn.setName("blort"); + assertEquals("blort", txn.getName()); + txn.abort(); + + /* + * [#14349] Make sure the txn is printable after closing. We + * once had a NullPointerException. + */ + txn.toString(); + + closeEnv(); + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Test all combinations of sync, nosync, and writeNoSync for txn + * commits. + */ + + /* SyncCombo expresses all the combinations of txn sync properties. */ + private static class SyncCombo { + private final boolean envNoSync; + private final boolean envWriteNoSync; + private final boolean txnNoSync; + private final boolean txnWriteNoSync; + private final boolean txnSync; + boolean expectSync; + boolean expectWrite; + boolean expectException; + + SyncCombo(int envWriteNoSync, + int envNoSync, + int txnSync, + int txnWriteNoSync, + int txnNoSync, + boolean expectSync, + boolean expectWrite, + boolean expectException) { + this.envNoSync = (envNoSync == 0) ? false : true; + this.envWriteNoSync = (envWriteNoSync == 0) ? false : true; + this.txnNoSync = (txnNoSync == 0) ? false : true; + this.txnWriteNoSync = (txnWriteNoSync == 0) ? false : true; + this.txnSync = (txnSync == 0) ? false : true; + this.expectSync = expectSync; + this.expectWrite = expectWrite; + this.expectException = expectException; + } + + TransactionConfig getTxnConfig() { + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setSync(txnSync); + txnConfig.setWriteNoSync(txnWriteNoSync); + txnConfig.setNoSync(txnNoSync); + return txnConfig; + } + + void setEnvironmentMutableConfig(Environment env) + throws DatabaseException { + + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setTxnNoSync(envNoSync); + config.setTxnWriteNoSync(envWriteNoSync); + env.setMutableConfig(config); + } + } + + @Test + public void testSyncCombo() + throws Throwable { + + createEnv(); + + RandomAccessFile logFile = + new RandomAccessFile(new File(env.getHome(), "00000000.jdb"), "r"); + try { + SyncCombo[] testCombinations = { + /* Env Env Txn Txn Txn Expect Expect Expect + * WrNoSy NoSy Sync WrNoSy NoSyc Sync Write IAE*/ + new SyncCombo( 0, 0, 0, 0, 0, true, true, false), + new SyncCombo( 0, 0, 0, 0, 1, false, false, false), + new SyncCombo( 0, 0, 0, 1, 0, false, true, false), + new SyncCombo( 0, 0, 0, 1, 1, false, true, true), + new SyncCombo( 0, 0, 1, 0, 0, true, true, false), + new SyncCombo( 0, 0, 1, 0, 1, true, true, true), + new SyncCombo( 0, 0, 1, 1, 0, true, true, true), + new SyncCombo( 0, 0, 1, 1, 1, true, true, true), + new SyncCombo( 0, 1, 0, 0, 0, false, false, false), + new SyncCombo( 0, 1, 0, 0, 1, false, false, false), + new SyncCombo( 0, 1, 0, 1, 0, false, true, false), + new SyncCombo( 0, 1, 0, 1, 1, false, true, true), + new SyncCombo( 0, 1, 1, 0, 0, true, true, false), + new SyncCombo( 0, 1, 1, 0, 1, true, true, true), + new SyncCombo( 0, 1, 1, 1, 0, true, true, true), + new SyncCombo( 0, 1, 1, 1, 1, true, true, true), + new SyncCombo( 1, 0, 0, 0, 0, false, true, false), + new SyncCombo( 1, 0, 0, 0, 1, false, false, false), + new SyncCombo( 1, 0, 0, 1, 0, false, true, false), + new SyncCombo( 1, 0, 0, 1, 1, false, true, true), + new SyncCombo( 1, 0, 1, 0, 0, true, true, false), + new SyncCombo( 1, 0, 1, 0, 1, true, true, true), + new SyncCombo( 1, 0, 1, 1, 0, true, true, true), + new SyncCombo( 1, 0, 1, 1, 1, true, true, true), + new SyncCombo( 1, 1, 0, 0, 0, false, true, true), + new SyncCombo( 1, 1, 0, 0, 1, false, false, true), + new SyncCombo( 1, 1, 0, 1, 0, false, true, true), + new SyncCombo( 1, 1, 0, 1, 1, false, true, true), + new SyncCombo( 1, 1, 1, 0, 0, true, true, true), + new SyncCombo( 1, 1, 1, 0, 1, true, true, true), + new SyncCombo( 1, 1, 1, 1, 0, true, true, true), + new SyncCombo( 1, 1, 1, 1, 1, true, true, true) + }; + + /* envNoSync=false with default env config */ + assertTrue(!env.getMutableConfig().getTxnNoSync()); + + /* envWriteNoSync=false with default env config */ + assertTrue(!env.getMutableConfig().getTxnWriteNoSync()); + + /* + * For each combination of settings, call commit and + * check that we have the expected sync and log + * write. Make sure that commitSync(), commitNoSync always + * override all preferences. + */ + for (int i = 0; i < testCombinations.length; i++) { + SyncCombo combo = testCombinations[i]; + boolean IAECaught = false; + try { + TransactionConfig txnConfig = combo.getTxnConfig(); + combo.setEnvironmentMutableConfig(env); + syncExplicit(logFile, txnConfig, + combo.expectSync, combo.expectWrite); + } catch (IllegalArgumentException IAE) { + IAECaught = true; + } + assertTrue(IAECaught == combo.expectException); + } + + SyncCombo[] autoCommitCombinations = { + /* Env Env Txn Txn Txn Expect Expect Expect + * WrNoSy NoSy Sync WrNoSy NoSyc Sync Write IAE*/ + new SyncCombo( 0, 0, 0, 0, 0, true, true, false), + new SyncCombo( 0, 1, 0, 0, 0, false, false, false), + new SyncCombo( 1, 0, 0, 0, 0, false, true, false), + new SyncCombo( 1, 1, 0, 0, 0, false, true, true) + }; + + for (int i = 0; i < autoCommitCombinations.length; i++) { + SyncCombo combo = autoCommitCombinations[i]; + boolean IAECaught = false; + try { + combo.setEnvironmentMutableConfig(env); + } catch (IllegalArgumentException IAE) { + IAECaught = true; + } + assertTrue(IAECaught == combo.expectException); + syncAutoCommit(logFile, combo.expectSync, combo.expectWrite); + } + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } finally { + logFile.close(); + + closeEnv(); + } + } + + enum DurabilityAPI {SYNC_API, DUR_API, DEFAULT_API}; + + /* + * Returns true if there is mixed mode usage across the two apis + */ + private boolean mixedModeUsage(DurabilityAPI outerAPI, + DurabilityAPI innerAPI) { + if ((innerAPI == DurabilityAPI.DEFAULT_API) || + (outerAPI == DurabilityAPI.DEFAULT_API)){ + return false; + } + + if (innerAPI == outerAPI) { + return false; + } + /* Mix of sync and durability APIs */ + return true; + } + + /* + * Does a three level check at the env, config and transaction levels to + * check for mixed mode uaage + */ + boolean mixedModeUsage(DurabilityAPI envAPI, + DurabilityAPI tconfigAPI, + DurabilityAPI transAPI) { + DurabilityAPI outerAPI; + if (tconfigAPI == DurabilityAPI.DEFAULT_API) { + outerAPI = envAPI; + } else { + outerAPI = tconfigAPI; + } + return mixedModeUsage(outerAPI, transAPI); + } + + /* + * Test local mixed mode operations on MutableConfig and TransactionConfig + */ + @Test + public void testOneLevelDurabilityComboErrors() { + createEnv(); + + EnvironmentMutableConfig config = new EnvironmentMutableConfig(); + config.setTxnNoSync(true); + try { + config.setDurability(Durability.COMMIT_NO_SYNC); + fail("expected exception"); + } catch (IllegalArgumentException e) { + assertTrue(true); // pass expected exception + } + config = new EnvironmentMutableConfig(); + config.setDurability(Durability.COMMIT_NO_SYNC); + try { + config.setTxnNoSync(true); + fail("expected exception"); + } catch (IllegalArgumentException e) { + assertTrue(true); // pass expected exception + } + + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setNoSync(true); + try { + txnConfig.setDurability(Durability.COMMIT_NO_SYNC); + fail("expected exception"); + } catch (IllegalArgumentException e) { + assertTrue(true); // pass expected exception + } + + txnConfig = new TransactionConfig(); + txnConfig.setDurability(Durability.COMMIT_NO_SYNC); + try { + txnConfig.setNoSync(true); + fail("expected exception"); + } catch (IllegalArgumentException e) { + assertTrue(true); // pass expected exception + } + + closeEnv(); + } + + /* + * Test for exceptions resulting from mixed mode usage. + */ + @Test + public void testMultiLevelLocalDurabilityComboErrors() + throws Throwable { + + createEnv(); + + for (DurabilityAPI envAPI: DurabilityAPI.values()) { + EnvironmentMutableConfig config = new EnvironmentMutableConfig(); + switch (envAPI) { + case SYNC_API: + config.setTxnNoSync(true); + break; + case DUR_API: + config.setDurability(Durability.COMMIT_NO_SYNC); + break; + case DEFAULT_API: + break; + } + env.setMutableConfig(config); + for (DurabilityAPI tconfigAPI: DurabilityAPI.values()) { + TransactionConfig txnConfig = new TransactionConfig(); + switch (tconfigAPI) { + case SYNC_API: + txnConfig.setNoSync(true); + break; + case DUR_API: + txnConfig.setDurability(Durability.COMMIT_NO_SYNC); + break; + case DEFAULT_API: + txnConfig = null; + break; + } + try { + Transaction txn = env.beginTransaction(null, txnConfig); + txn.abort(); + assertFalse(mixedModeUsage(envAPI,tconfigAPI)); + for (DurabilityAPI transAPI : DurabilityAPI.values()) { + Transaction t = env.beginTransaction(null, txnConfig); + try { + switch (transAPI) { + case SYNC_API: + t.commitNoSync(); + break; + case DUR_API: + t.commit(Durability.COMMIT_NO_SYNC); + break; + case DEFAULT_API: + t.commit(); + break; + } + assertFalse(mixedModeUsage(envAPI, + tconfigAPI, + transAPI)); + } catch (IllegalArgumentException e) { + t.abort(); + assertTrue(mixedModeUsage(envAPI, + tconfigAPI, + transAPI)); + } + } + } catch (IllegalArgumentException e) { + assertTrue(mixedModeUsage(envAPI,tconfigAPI)); + } + } + } + closeEnv(); + } + + @Test + public void testLocalDurabilityCombo() + throws Throwable { + + createEnv(); + + RandomAccessFile logFile = + new RandomAccessFile(new File(env.getHome(), "00000000.jdb"), "r"); + /* Note that the default must be first. An "unspecified" durability is + * represented by a null value in the env props. It's not possible to + * restore durability back to its "unspecified" state, since + * Environment.setMutableConfig effectively does a merge operation. + */ + Durability[] localDurabilities = new Durability[] { + null, /* Run default settings first. */ + Durability.COMMIT_SYNC, + Durability.COMMIT_WRITE_NO_SYNC, + Durability.COMMIT_NO_SYNC + }; + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[1]); + + try { + for (Durability envDurability : localDurabilities) { + EnvironmentMutableConfig config = env.getMutableConfig(); + config.setDurability(envDurability); + env.setMutableConfig(config); + for (Durability transConfigDurability : localDurabilities) { + TransactionConfig txnConfig = null; + if (transConfigDurability != null) { + txnConfig = new TransactionConfig(); + txnConfig.setDurability(transConfigDurability); + } + for (Durability transDurability : localDurabilities) { + long beforeSyncs = getNSyncs(); + Transaction txn = env.beginTransaction(null, txnConfig); + db.put(txn, key, data); + long beforeLength = logFile.length(); + if (transDurability == null) { + txn.commit(); + } else { + txn.commit(transDurability); + } + Durability effectiveDurability = + (transDurability != null) ? + transDurability : + ((transConfigDurability != null) ? + transConfigDurability : + ((envDurability != null) ? + envDurability : + Durability.COMMIT_SYNC)); + + long afterSyncs = getNSyncs(); + long afterLength = logFile.length(); + boolean syncOccurred = afterSyncs > beforeSyncs; + boolean writeOccurred = afterLength > beforeLength; + switch (effectiveDurability.getLocalSync()) { + case SYNC: + assertTrue(syncOccurred); + assertTrue(writeOccurred); + break; + case NO_SYNC: + if (syncOccurred) { + assertFalse(syncOccurred); + } + assertFalse(writeOccurred); + break; + case WRITE_NO_SYNC: + assertFalse(syncOccurred); + assertTrue(writeOccurred); + break; + } + } + } + } + } finally { + logFile.close(); + + closeEnv(); + } + } + + /** + * Does an explicit commit and returns whether an fsync occured. + */ + private void syncExplicit(RandomAccessFile lastLogFile, + TransactionConfig config, + boolean expectSync, + boolean expectWrite) + throws DatabaseException, IOException { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[1]); + + long beforeSyncs = getNSyncs(); + Transaction txn = env.beginTransaction(null, config); + db.put(txn, key, data); + long beforeLength = lastLogFile.length(); + txn.commit(); + long afterSyncs = getNSyncs(); + long afterLength = lastLogFile.length(); + boolean syncOccurred = afterSyncs > beforeSyncs; + boolean writeOccurred = afterLength > beforeLength; + assertEquals(expectSync, syncOccurred); + assertEquals(expectWrite, writeOccurred); + + /* + * Make sure explicit sync/noSync/writeNoSync always works. + */ + + /* Expect a sync and write. */ + beforeSyncs = getNSyncs(); + beforeLength = lastLogFile.length(); + txn = env.beginTransaction(null, config); + db.put(txn, key, data); + txn.commitSync(); + afterSyncs = getNSyncs(); + afterLength = lastLogFile.length(); + assert(afterSyncs > beforeSyncs); + assert(afterLength > beforeLength); + + /* Expect neither a sync nor write. */ + beforeSyncs = getNSyncs(); + beforeLength = lastLogFile.length(); + txn = env.beginTransaction(null, config); + db.put(txn, key, data); + txn.commitNoSync(); + afterSyncs = getNSyncs(); + afterLength = lastLogFile.length(); + assert(afterSyncs == beforeSyncs); + assert(afterLength == beforeLength); + + /* Expect no sync but do expect a write. */ + beforeSyncs = getNSyncs(); + beforeLength = lastLogFile.length(); + txn = env.beginTransaction(null, config); + db.put(txn, key, data); + txn.commitWriteNoSync(); + afterSyncs = getNSyncs(); + afterLength = lastLogFile.length(); + assert(afterSyncs == beforeSyncs); + assert(afterLength > beforeLength); + } + + /** + * Does an auto-commit and returns whether an fsync occured. + */ + private void syncAutoCommit(RandomAccessFile lastLogFile, + boolean expectSync, + boolean expectWrite) + throws DatabaseException, IOException { + + DatabaseEntry key = new DatabaseEntry(new byte[1]); + DatabaseEntry data = new DatabaseEntry(new byte[1]); + long beforeSyncs = getNSyncs(); + long beforeLength = lastLogFile.length(); + db.put(null, key, data); + long afterLength = lastLogFile.length(); + long afterSyncs = getNSyncs(); + boolean syncOccurred = afterSyncs > beforeSyncs; + assertEquals(expectSync, syncOccurred); + assertEquals(expectWrite, (afterLength > beforeLength)); + } + + /** + * Returns number of fsyncs statistic. + */ + private long getNSyncs() { + EnvironmentStats es = env.getStats(null); + Mapgrpmap = es.getStatGroupsMap(); + StatGroup sg = grpmap.get(GROUP_NAME); + return sg.getLong(FSYNCMGR_FSYNCS); + } + + @Test + public void testNoWaitConfig() + throws Throwable { + + createEnv(); + + try { + TransactionConfig defaultConfig = new TransactionConfig(); + TransactionConfig noWaitConfig = new TransactionConfig(); + noWaitConfig.setNoWait(true); + Transaction txn; + + /* noWait=false */ + + expectNoWaitTxn(null, false); + + txn = env.beginTransaction(null, null); + expectNoWaitTxn(txn, false); + txn.abort(); + + txn = env.beginTransaction(null, defaultConfig); + expectNoWaitTxn(txn, false); + txn.abort(); + + /* noWait=true */ + + txn = env.beginTransaction(null, noWaitConfig); + expectNoWaitTxn(txn, true); + txn.abort(); + + closeEnv(); + } catch (Throwable t) { + /* print stack trace before going to teardown. */ + t.printStackTrace(); + throw t; + } + } + + /** + * Asserts that the given txn is a no-wait txn, or if the txn parameter + * is null asserts that an auto-commit txn is a no-wait txn. + */ + private void expectNoWaitTxn(Transaction txn, boolean expectNoWaitTxn) + throws DatabaseException { + + final DatabaseEntry key = new DatabaseEntry(new byte[1]); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + /* Use a wait txn to get a write lock. */ + final Transaction txn2 = env.beginTransaction(null, null); + db.put(txn2, key, data); + + try { + db.put(txn, key, data); + fail("Lock should not have been granted"); + } catch (LockNotAvailableException e) { + assertTrue(expectNoWaitTxn); + } catch (LockConflictException e) { + assertFalse(expectNoWaitTxn); + } + + /* Also check get() with read-committed. [#23653] */ + try { + db.get(txn, key, data, LockMode.READ_COMMITTED); + fail("Lock should not have been granted"); + } catch (LockNotAvailableException e) { + assertTrue(expectNoWaitTxn); + } catch (LockConflictException e) { + assertFalse(expectNoWaitTxn); + } + + txn2.abort(); + } + + /* + * Assert that cache utilization is correctly incremented by locks and + * txns, and decremented after release. + */ + private void checkCacheUsage(long beforeLock, + long afterLock, + long afterRelease, + long expectedSize) { + assertEquals(beforeLock, afterRelease); + assertEquals(afterLock, (beforeLock + expectedSize)); + } + + class CheckReadyToSplit implements WithRootLatched { + private boolean readyToSplit; + private final DatabaseImpl database; + + CheckReadyToSplit(DatabaseImpl database) { + readyToSplit = false; + this.database = database; + } + + public boolean getReadyToSplit() { + return readyToSplit; + } + + public IN doWork(ChildReference root) + throws DatabaseException { + + IN rootIN = (IN) root.fetchTarget(database, null); + readyToSplit = rootIN.needsSplitting(); + return null; + } + } + + /** + * Ensures that when an operation failure sets a txn to abort-only, the + * same exeption is rethrown if the user insists on continuing to use the + * txn. + */ + @Test + public void testRepeatingOperationFailures() { + createEnv(); + + final Transaction txn1 = env.beginTransaction(null, null); + txn1.setLockTimeout(0); + final Transaction txn2 = env.beginTransaction(null, null); + + assertTrue(txn2.isValid()); + assertTrue(txn1.isValid()); + assertSame(Transaction.State.OPEN, txn2.getState()); + assertSame(Transaction.State.OPEN, txn1.getState()); + + final DatabaseEntry key1 = new DatabaseEntry(new byte[] {1}); + final DatabaseEntry key2 = new DatabaseEntry(new byte[] {2}); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + db.put(txn1, key1, data); + OperationFailureException expected = null; + try { + db.put(txn2, key1, data); + fail(); + } catch (OperationFailureException e) { + expected = e; + } + assertTrue(!txn2.isValid()); + assertTrue(txn1.isValid()); + assertSame(Transaction.State.MUST_ABORT, txn2.getState()); + assertSame(Transaction.State.OPEN, txn1.getState()); + + try { + db.put(txn2, key2, data); + fail(); + } catch (OperationFailureException e) { + assertSame(expected, e.getCause()); + } + assertTrue(!txn2.isValid()); + assertTrue(txn1.isValid()); + assertSame(Transaction.State.MUST_ABORT, txn2.getState()); + assertSame(Transaction.State.OPEN, txn1.getState()); + + txn2.abort(); + txn1.commit(); + + assertTrue(!txn2.isValid()); + assertTrue(!txn1.isValid()); + assertSame(Transaction.State.ABORTED, txn2.getState()); + assertSame(Transaction.State.COMMITTED, txn1.getState()); + + closeEnv(); + } + + /** + * Tests that Transaction.State.POSSIBLY_COMMITTED is set when an exception + * occurs while logging the commit entry. Checks that the env is + * invalidated. + */ + @Test + public void testPossiblyCommittedState() { + + /* + * With SYNC or WRITE_NO_SYNC, the data is durable in spite of the + * exception, since the exception is thrown after writing but before + * the fsync. + */ + writePossiblyCommittedData(1, Durability.COMMIT_SYNC); + checkPossiblyCommittedData(1, true /*expectDurable*/); + + writePossiblyCommittedData(2, Durability.COMMIT_WRITE_NO_SYNC); + checkPossiblyCommittedData(2, true /*expectDurable*/); + + /* But with NO_SYNC, the data is not durable. */ + writePossiblyCommittedData(3, Durability.COMMIT_NO_SYNC); + checkPossiblyCommittedData(3, false /*expectDurable*/); + } + + /** + * Causes the Transaction.State.POSSIBLY_COMMITTED to be set by throwing an + * exception after writing the commit entry but before the fsync. Checks + * that the env is invalidated and the txn state is correct. + */ + private void writePossiblyCommittedData(int keyVal, + Durability durability) { + createEnv(); + final DatabaseEntry key = new DatabaseEntry(new byte[] {(byte) keyVal}); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + final Transaction txn = env.beginTransaction(null, null); + db.put(txn, key, data); + + /* Throw an exception during logging. */ + DbInternal.getNonNullEnvImpl(env).getLogManager(). + setFlushLogHook(new FlushHook()); + + assertSame(Transaction.State.OPEN, txn.getState()); + try { + txn.commit(durability); + fail(); + } catch (RuntimeException e) { + assertTrue(e instanceof EnvironmentFailureException); + assertTrue(e.getCause() != null); + assertTrue(e.getCause().getMessage().contains("FlushHook")); + } + assertSame(Transaction.State.POSSIBLY_COMMITTED, txn.getState()); + assertFalse(txn.isValid()); + assertFalse(env.isValid()); + + if (isReplicatedTest(getClass())) { + /* Replicas will throw errors when they join. */ + abnormalClose(env); + db = null; + env = null; + } else { + /* Do not close DB with an invalid env. */ + db = null; + closeEnv(); + } + } + + private void checkPossiblyCommittedData(int keyVal, + boolean expectDurable) { + createEnv(); + final DatabaseEntry key = new DatabaseEntry(new byte[] {(byte) keyVal}); + final DatabaseEntry data = new DatabaseEntry(new byte[1]); + + final OperationStatus status = db.get(null, key, data, null); + + if (expectDurable) { + assertSame(OperationStatus.SUCCESS, status); + } else { + assertSame(OperationStatus.NOTFOUND, status); + } + + closeEnv(); + } + + /** + * Throws a runtime exception after writing the log entry but before the + * fsync. The commit will be durable, since the OS doesn't crash. + * + * The fact that the type param is CountDownLatch is incidental, and a + * CountDownLatch is not used in this implementation of the hook. + */ + private static class FlushHook implements TestHook { + + public void hookSetup() { + } + + public void doIOHook() + throws IOException { + } + + public void doHook(CountDownLatch obj) { + } + + public void doHook() { + throw new RuntimeException("Generated by FlushHook"); + } + + public CountDownLatch getHookValue() { + return null; + } + } +} diff --git a/test/com/sleepycat/je/txn/TxnTimeoutTest.java b/test/com/sleepycat/je/txn/TxnTimeoutTest.java new file mode 100644 index 0000000..9024959 --- /dev/null +++ b/test/com/sleepycat/je/txn/TxnTimeoutTest.java @@ -0,0 +1,671 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.txn; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockTimeoutException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.TransactionTimeoutException; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.LockPreemptedException; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/* + * Test transaction and lock timeouts. + */ +public class TxnTimeoutTest extends DualTestCase { + + private Environment env; + private File envHome; + + public TxnTimeoutTest() { + envHome = SharedTestUtils.getTestDir(); + } + + private void createEnv(boolean setTimeout, + long txnTimeoutVal, + long lockTimeoutVal) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + if (setTimeout) { + envConfig.setTxnTimeout(txnTimeoutVal); + envConfig.setLockTimeout(lockTimeoutVal); + } + + env = create(envHome, envConfig); + } + + private void closeEnv() + throws DatabaseException { + + close(env); + env = null; + } + + /** + * Test timeout set at txn level. + */ + @Test + public void testTxnTimeout() + throws DatabaseException, InterruptedException { + + createEnv(false, 0, 0); + + Transaction txnA = env.beginTransaction(null, null); + + /* Grab a lock */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + env.openDatabase(txnA, "foo", dbConfig); + + /* Now make a second txn so we can induce some blocking. */ + Transaction txnB = env.beginTransaction(null, null); + txnB.setTxnTimeout(300000); // microseconds + txnB.setLockTimeout(9000000); + Thread.sleep(400); + + try { + env.openDatabase(txnB, "foo", dbConfig); + fail("Should time out"); + } catch (LockConflictException e) { + /* Skip the version string. */ + assertTxnTimeout(e); + assertEquals(300, e.getTimeoutMillis()); + /* Good, expect this exception */ + txnB.abort(); + } catch (Exception e) { + e.printStackTrace(); + fail("Should not get another kind of exception"); + } + + /* Now try a lock timeout. */ + txnB = env.beginTransaction(null, null); + txnB.setLockTimeout(100000); + + try { + env.openDatabase(txnB, "foo", dbConfig); + fail("Should time out"); + } catch (LockConflictException e) { + assertLockTimeout(e); + assertEquals(100, e.getTimeoutMillis()); + /* Good, expect this exception */ + txnB.abort(); + } catch (Exception e) { + e.printStackTrace(); + fail("Should not get another kind of exception"); + } + + txnA.abort(); + EnvironmentStats stats = env.getStats(TestUtils.FAST_STATS); + assertEquals(2, stats.getNWaits()); + + closeEnv(); + } + + /** + * Use Txn.setTimeout(), expect a txn timeout. + */ + @Test + public void testPerTxnTimeout() + throws DatabaseException, InterruptedException { + + doEnvTimeout(false, true, true, 300000, 9000000, false); + } + + /** + * Use EnvironmentConfig.setTxnTimeout(), expect a txn timeout. + */ + @Test + public void testEnvTxnTimeout() + throws DatabaseException, InterruptedException { + + doEnvTimeout(true, true, true, 300000, 9000000, false); + } + + /** + * Use EnvironmentConfig.setTxnTimeout(), use + * EnvironmentConfig.setLockTimeout(0), expect a txn timeout. + */ + @Test + public void testEnvNoLockTimeout() + throws DatabaseException, InterruptedException { + + doEnvTimeout(true, true, true, 300000, 0, false); + } + + /** + * Use Txn.setLockTimeout(), expect a lock timeout. + */ + @Test + public void testPerLockTimeout() + throws DatabaseException, InterruptedException { + + doEnvTimeout(false, false, true, 0, 100000, true); + } + + /** + * Use EnvironmentConfig.setTxnTimeout(0), Use + * EnvironmentConfig.setLockTimeout(xxx), expect a lcok timeout. + */ + @Test + public void testEnvLockTimeout() + throws DatabaseException, InterruptedException { + + doEnvTimeout(true, false, true, 0, 100000, true); + } + + /** + * @param setEnvConfigTimeout + * if true, use EnvironmentConfig.set{Lock,Txn}Timeout + * @param setPerTxnTimeout if true, use Txn.setTxnTimeout() + * @param setPerLockTimeout if true, use Txn.setLockTimeout() + * @param txnTimeout value for txn timeout + * @param lockTimeout value for lock timeout + * @param expectLockException if true, expect a LockTimoutException, if + * false, expect a TxnTimeoutException + */ + private void doEnvTimeout(boolean setEnvConfigTimeout, + boolean setPerTxnTimeout, + boolean setPerLockTimeout, + long txnTimeout, + long lockTimeout, + boolean expectLockException) + throws DatabaseException, InterruptedException { + + createEnv(setEnvConfigTimeout, txnTimeout, lockTimeout); + + Transaction txnA = env.beginTransaction(null, null); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database dbA = env.openDatabase(txnA, "foo", dbConfig); + + /* + * Now make a second txn so we can induce some blocking. Make the + * txn timeout environment wide. + */ + Transaction txnB = env.beginTransaction(null, null); + long expectTxnTimeoutMillis; + long expectLockTimeoutMillis; + if (setEnvConfigTimeout) { + expectTxnTimeoutMillis = txnTimeout / 1000; + expectLockTimeoutMillis = lockTimeout / 1000; + } else { + if (setPerTxnTimeout) { + txnB.setTxnTimeout(300000); + expectTxnTimeoutMillis = 300; + } else { + expectTxnTimeoutMillis = 500; + } + if (setPerLockTimeout) { + txnB.setLockTimeout(9000000); + expectLockTimeoutMillis = 9000; + } else { + expectLockTimeoutMillis = 500; + } + } + + Thread.sleep(400); + + try { + env.openDatabase(txnB, "foo", dbConfig); + fail("Should time out"); + } catch (LockConflictException e) { + if (expectLockException) { + assertLockTimeout(e); + assertEquals(expectLockTimeoutMillis, + e.getTimeoutMillis()); + } else { + assertTxnTimeout(e); + assertEquals(expectTxnTimeoutMillis, e.getTimeoutMillis()); + } + + /* Good, expect this exception */ + txnB.abort(); + } catch (Exception e) { + e.printStackTrace(); + fail("Should not get another kind of exception"); + } + + dbA.close(); + txnA.abort(); + + closeEnv(); + } + + /** + * Use Locker.setTxnTimeout(), expect a lock timeout. + */ + @Test + public void testPerLockerTimeout() + throws DatabaseException, InterruptedException { + + createEnv(true, 500000000, 0); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* + * Create our Locker object and set the transaction timeout to 0. + * 0 should mean no timeout per berkeley API docs). + */ + Locker locker = BasicLocker.createBasicLocker(envImpl); + locker.setTxnTimeout(0); + /* Wait for a short period. */ + Thread.sleep(100); + /* Set the timeout to zero and should never be timed out. */ + assertFalse(locker.isTimedOut()); + + /* Set timeout to 10 milliseconds. */ + locker.setTxnTimeout(10); + /* Wait for 100 milliseconds. */ + Thread.sleep(100); + /* Should be timed out. */ + assertTrue(locker.isTimedOut()); + + try { + + /* + * Set timeout to a negative value, and expect a + * IllegalArgumentException. + */ + locker.setTxnTimeout(-1000); + fail("should get an exception"); + } catch (IllegalArgumentException ie) { + assertTrue(ie. + getMessage(). + contains("the timeout value cannot be negative")); + } catch (Exception e) { + e.printStackTrace(); + fail("Should not get another kind of exception"); + } + + try { + + /* + * Set timeout to a value greater than 2^32, and expect a + * IllegalArgumentException. + */ + long timeout = (long) Math.pow(2, 33); + locker.setTxnTimeout(timeout); + fail("should get an exception"); + } catch (IllegalArgumentException ie) { + assertTrue(ie.getMessage().contains + ("the timeout value cannot be greater than 2^32")); + } catch (Exception e) { + e.printStackTrace(); + fail("Should not get another kind of exception"); + } + + closeEnv(); + } + + @Test + public void testReadCommittedTxnTimeout() + throws DatabaseException, InterruptedException { + + doReadCommittedTimeout(true); + } + + @Test + public void testReadCommittedLockTimeout() + throws DatabaseException, InterruptedException { + + doReadCommittedTimeout(false); + } + + /** + * Tests that Transaction.setTxnTimeout and setLockTimeout work with the + * BuddyLocker used for ReadCommitted reads. [#16017] + */ + private void doReadCommittedTimeout(boolean useTxnTimeout) + throws DatabaseException, InterruptedException { + + createEnv(false, 0, 0); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setReadCommitted(true); + + Transaction txnA = null; + Transaction txnB = null; + + try { + /* Insert a record with txnA and keep it write-locked. */ + txnA = env.beginTransaction(null, txnConfig); + key.setData(new byte[1]); + data.setData(new byte[1]); + OperationStatus status = db.put(txnA, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* + * An insert with txnB will block because entire range is locked by + * txnA. + */ + txnB = env.beginTransaction(null, txnConfig); + if (useTxnTimeout) { + txnB.setTxnTimeout(100 * 1000); + txnB.setLockTimeout(9000 * 1000); + /* Ensure txn timeout triggers before waiting. */ + Thread.sleep(150); + } else { + txnB.setTxnTimeout(9000 * 1000); + txnB.setLockTimeout(100 * 1000); + } + key.setData(new byte[1]); + try { + db.get(txnB, key, data, null); + fail(); + } catch (LockConflictException e) { + if (useTxnTimeout) { + assertTxnTimeout(e); + } else { + assertLockTimeout(e); + } + assertEquals(100, e.getTimeoutMillis()); + } + } finally { + if (txnB != null) { + txnB.abort(); + } + if (txnA != null) { + txnA.abort(); + } + } + + db.close(); + closeEnv(); + } + + @Test + public void testSerializableTxnTimeout() + throws DatabaseException, InterruptedException { + + doSerializableTimeout(true); + } + + @Test + public void testSerializableLockTimeout() + throws DatabaseException, InterruptedException { + + doSerializableTimeout(false); + } + + /** + * Tests that Transaction.setTxnTimeout and setLockTimeout work with the + * BuddyLocker used for Serializable inserts. [#16017] + */ + private void doSerializableTimeout(boolean useTxnTimeout) + throws DatabaseException, InterruptedException { + + createEnv(false, 0, 0); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + TransactionConfig txnConfig = new TransactionConfig(); + txnConfig.setSerializableIsolation(true); + + Transaction txnA = null; + Transaction txnB = null; + + try { + /* Lock virtual EOF node with txnA by scanning an empty DB. */ + txnA = env.beginTransaction(null, txnConfig); + Cursor c = db.openCursor(txnA, null); + OperationStatus status = c.getFirst(key, data, null); + assertSame(OperationStatus.NOTFOUND, status); + c.close(); + + /* + * Insert with txnB will block because entire range is locked by + * txnA. + */ + txnB = env.beginTransaction(null, txnConfig); + if (useTxnTimeout) { + txnB.setTxnTimeout(100 * 1000); + txnB.setLockTimeout(9000 * 1000); + /* Ensure txn timeout triggers before waiting. */ + Thread.sleep(150); + } else { + txnB.setTxnTimeout(9000 * 1000); + txnB.setLockTimeout(100 * 1000); + } + key.setData(new byte[1]); + data.setData(new byte[1]); + try { + db.put(txnB, key, data); + fail(); + } catch (LockConflictException e) { + if (useTxnTimeout) { + assertTxnTimeout(e); + } else { + assertLockTimeout(e); + } + assertEquals(100, e.getTimeoutMillis()); + } + } finally { + if (txnB != null) { + txnB.abort(); + } + if (txnA != null) { + txnA.abort(); + } + } + + db.close(); + closeEnv(); + } + + @Test + public void testImportunateOperations() + throws DatabaseException { + + createEnv(false, 0, 0); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* LockPreemptedException is thrown only if replicated. */ + if (!envImpl.isReplicated()) { + closeEnv(); + return; + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + TransactionConfig txnConfig = new TransactionConfig(); + + Transaction txnA = null; + Transaction txnB = null; + + try { + /* Insert a record with txnA and keep it write-locked. */ + txnA = env.beginTransaction(null, txnConfig); + key.setData(new byte[1]); + data.setData(new byte[1]); + OperationStatus status = db.put(txnA, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* An insert with txnB will succeed because it is importunate. */ + txnB = env.beginTransaction(null, txnConfig); + DbInternal.getTxn(txnB).setImportunate(true); + assertTrue(txnA.isValid()); + key.setData(new byte[1]); + try { + assertEquals(OperationStatus.SUCCESS, + db.get(txnB, key, data, null)); + } catch (DatabaseException e) { + fail("caught unexpected exception " + e); + } + txnB.commit(); + txnB = null; + + /* Another read with txnA will fail. */ + try { + db.get(txnA, key, data, null); + fail(); + } catch (LockPreemptedException e) { + // expected + } + assertTrue(!txnA.isValid()); + } finally { + if (txnB != null) { + txnB.abort(); + } + if (txnA != null) { + txnA.abort(); + } + } + + db.close(); + closeEnv(); + } + + @Test + public void testImportunateReadCommitted() + throws DatabaseException { + + createEnv(false, 0, 0); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + /* LockPreemptedException is thrown only if replicated. */ + if (!envImpl.isReplicated()) { + closeEnv(); + return; + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + + TransactionConfig txnConfig = new TransactionConfig(); + + Transaction txnA = null; + Transaction txnB = null; + Cursor cursor = null; + + try { + /* Insert record with auto-commit. */ + key.setData(new byte[1]); + data.setData(new byte[1]); + OperationStatus status = db.put(null, key, data); + assertSame(OperationStatus.SUCCESS, status); + + /* Read-committed with txnA and keep it read-locked. */ + txnA = env.beginTransaction(null, txnConfig); + key.setData(new byte[1]); + cursor = db.openCursor(txnA, + new CursorConfig().setReadCommitted(true)); + status = cursor.getSearchKey(key, data, null); + assertSame(OperationStatus.SUCCESS, status); + + /* An insert with txnB will succeed because it is importunate. */ + txnB = env.beginTransaction(null, txnConfig); + DbInternal.getTxn(txnB).setImportunate(true); + assertTrue(txnA.isValid()); + key.setData(new byte[1]); + data.setData(new byte[1]); + try { + assertEquals(OperationStatus.SUCCESS, + db.put(txnB, key, data)); + } catch (DatabaseException e) { + fail("caught unexpected exception " + e); + } + txnB.commit(); + txnB = null; + + /* Another read with txnA will fail. */ + try { + db.get(txnA, key, data, null); + fail(); + } catch (LockPreemptedException e) { + // expected + } + assertTrue(!txnA.isValid()); + } finally { + if (cursor != null) { + cursor.close(); + } + if (txnB != null) { + txnB.abort(); + } + if (txnA != null) { + txnA.abort(); + } + } + + db.close(); + closeEnv(); + } + + private void assertLockTimeout(LockConflictException e) { + assertTrue(TestUtils.skipVersion(e).startsWith("Lock ")); + assertSame(LockTimeoutException.class, e.getClass()); + } + + private void assertTxnTimeout(LockConflictException e) { + assertTrue(TestUtils.skipVersion(e).startsWith("Transaction ")); + assertSame(TransactionTimeoutException.class, e.getClass()); + } +} diff --git a/test/com/sleepycat/je/util/Adler32Test.java b/test/com/sleepycat/je/util/Adler32Test.java new file mode 100644 index 0000000..45a0136 --- /dev/null +++ b/test/com/sleepycat/je/util/Adler32Test.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; + +import java.util.Random; +import java.util.zip.Checksum; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +public class Adler32Test extends TestBase { + + static private int N_ITERS = 1000; + + @Test + public void testRandomAdler32ByteArray() { + Checksum javaChecksum = new java.util.zip.Adler32(); + Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32(); + Checksum chunkingChecksum = + new com.sleepycat.je.utilint.Adler32.ChunkingAdler32(128); + Random rnd = new Random(); + for (int i = 0; i < N_ITERS; i++) { + int nBytes = rnd.nextInt(65535); + byte[] b = new byte[nBytes]; + rnd.nextBytes(b); + javaChecksum.reset(); + jeChecksum.reset(); + chunkingChecksum.reset(); + javaChecksum.update(b, 0, nBytes); + jeChecksum.update(b, 0, nBytes); + chunkingChecksum.update(b, 0, nBytes); + assertEquals(javaChecksum.getValue(), jeChecksum.getValue()); + assertEquals(javaChecksum.getValue(), chunkingChecksum.getValue()); + } + } + + public void xtestRandomAdler32ByteArrayPerformance() { + Checksum javaChecksum = new java.util.zip.Adler32(); + Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32(); + Random rnd = new Random(); + byte[][] baa = new byte[N_ITERS][]; + int[] lengths = new int[N_ITERS]; + long totalBytes = 0; + for (int i = 0; i < N_ITERS; i++) { + int nBytes = rnd.nextInt(65535); + byte[] b = new byte[nBytes]; + baa[i] = b; + lengths[i] = nBytes; + totalBytes += nBytes; + rnd.nextBytes(b); + } + long jeChecksumTime = + measureChecksum(baa, lengths, jeChecksum, false); + long javaChecksumTime = + measureChecksum(baa, lengths, javaChecksum, false); + long jeChecksumTimeByteAtATime = + measureChecksum(baa, lengths, jeChecksum, true); + long javaChecksumTimeByteAtATime = + measureChecksum(baa, lengths, javaChecksum, true); + System.out.println(N_ITERS + " Iterations, " + + totalBytes + " bytes:\n " + + javaChecksumTime + " millis. for java\n" + + jeChecksumTime + " millis. for je\n" + + javaChecksumTimeByteAtATime + + " millis. for java byte at a time\n" + + jeChecksumTimeByteAtATime + + " millis. for je byte at a time"); + } + + private long measureChecksum(byte[][] baa, + int[] lengths, + Checksum cksum, + boolean byteAtATime) { + long startTime = System.currentTimeMillis(); + for (int i = 0; i < N_ITERS; i++) { + byte[] b = baa[i]; + int len = lengths[i]; + cksum.reset(); + if (byteAtATime) { + for (int j = 0; j < len; j++) { + cksum.update(b[j]); + } + } else { + cksum.update(b, 0, len); + } + } + long endTime = System.currentTimeMillis(); + return (endTime - startTime); + } + + @Test + public void testRandomAdler32SingleBytes() { + Checksum javaChecksum = new java.util.zip.Adler32(); + Checksum jeChecksum = new com.sleepycat.je.utilint.Adler32(); + Random rnd = new Random(); + for (int i = 0; i < N_ITERS; i++) { + int nBytes = rnd.nextInt(65535); + javaChecksum.reset(); + jeChecksum.reset(); + for (int j = 0; j < nBytes; j++) { + byte b = (byte) (rnd.nextInt(256) & 0xff); + javaChecksum.update(b); + jeChecksum.update(b); + } + assertEquals(javaChecksum.getValue(), jeChecksum.getValue()); + } + } +} diff --git a/test/com/sleepycat/je/util/BadFileFilter.java b/test/com/sleepycat/je/util/BadFileFilter.java new file mode 100644 index 0000000..5c20c87 --- /dev/null +++ b/test/com/sleepycat/je/util/BadFileFilter.java @@ -0,0 +1,52 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.FilenameFilter; +import java.util.StringTokenizer; + +public class BadFileFilter implements FilenameFilter { + + /** + * Accept files of this format: + * .bad. + */ + public boolean accept(File dir, String name) { + boolean ok = false; + StringTokenizer tokenizer = new StringTokenizer(name, "."); + /* There should be two parts. */ + if (tokenizer.countTokens() == 3) { + String fileNumber = tokenizer.nextToken(); + String fileSuffix = tokenizer.nextToken(); + String repeat = tokenizer.nextToken(); + + /* Check the length and the suffix. */ + if ((fileNumber.length() == 8) && + (fileSuffix.equals("bad"))) { + + /* The first and third parts should be a numbers. */ + try { + Integer.parseInt(fileNumber); + Integer.parseInt(repeat); + ok = true; + } catch (NumberFormatException e) { + ok = false; + } + } + } + + return ok; + } +} diff --git a/test/com/sleepycat/je/util/BtreeCorruptionTest.java b/test/com/sleepycat/je/util/BtreeCorruptionTest.java new file mode 100644 index 0000000..6d6dd58 --- /dev/null +++ b/test/com/sleepycat/je/util/BtreeCorruptionTest.java @@ -0,0 +1,1275 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryIntegrityException; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.log.FileManager.FileMode; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.Key; +import com.sleepycat.je.txn.LockManager; +import com.sleepycat.je.util.verify.BtreeVerifier; +import com.sleepycat.je.utilint.CronScheduleParser; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +/* + * Test the data corruption caused by internal bugs, i.e. Btree corruption. + */ +public class BtreeCorruptionTest extends TestBase { + + private static final String DB_NAME = "tempDB"; + private static final String SEC_DB1_NAME = "secDb1"; + private static final String SEC_DB2_NAME = "secDb2"; + private static final String DB_PRI_NAME = "priDb"; + private static final String DB_FOREIGN_NAME = "foreignDb"; + + private Environment env; + private File envHome; + private final int recNum = 1000; //1000 * 50; //(1000 * 500) * 50 files + private final int dataLen = 500; + private final int totalWaitTries = 70; + private int totalFiles = 0; + + /* Used to test basic btree verification code. */ + private Database db; + private Cursor cursor; + + /* + * Use two secondary database aims to check whether BtreeVerifer can still + * continue to do check after find one corruption secondary database, i.e. + * we expect that there should be two WARNING message for two corrupted + * secondary database. + */ + private SecondaryDatabase secDb1; + private SecondaryDatabase secDb2; + private SecondaryConfig secConfig1; + private SecondaryConfig secConfig2; + private Database priDb; + private Database foreignDb; + + private static final EnvironmentConfig envConfigWithVerifier + = initConfig(); + private static final EnvironmentConfig envConfigWithoutVerifier + = initConfig(); + + static { + envConfigWithoutVerifier.setConfigParam( + EnvironmentParams.ENV_RUN_VERIFIER.getName(), "false"); + } + + @Before + public void setUp() + throws Exception { + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() + throws Exception { + CronScheduleParser.setCurCalHook = null; + + if (cursor != null) { + try { + cursor.close(); + } catch (EnvironmentFailureException efe) { + + } + cursor = null; + } + + if (db != null) { + try { + db.close(); + } catch (EnvironmentFailureException efe) { + + } + db = null; + } + + if (secDb1 != null) { + try { + secDb1.close(); + } catch (EnvironmentFailureException efe) { + + } + secDb1 = null; + } + + if (secDb2 != null) { + try { + secDb2.close(); + } catch (EnvironmentFailureException efe) { + + } + secDb2 = null; + } + + if (priDb != null) { + try { + priDb.close(); + } catch (EnvironmentFailureException efe) { + + } + priDb = null; + } + + if (foreignDb != null) { + try { + foreignDb.close(); + } catch (EnvironmentFailureException efe) { + + } + foreignDb = null; + } + + if (env != null) { + env.close(); + env = null; + } + + BtreeVerifier.databaseOperBeforeBatchCheckHook = null; + BtreeVerifier.databaseOperDuringBatchCheckHook = null; + + super.tearDown(); + } + + private static EnvironmentConfig initConfig() { + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + config.setCacheSize(1000000); + config.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000"); + config.setConfigParam(EnvironmentConfig.VERIFY_SCHEDULE, "* * * * *"); + return config; + } + + public void openEnvAndDb(EnvironmentConfig config) { + env = new Environment(envHome, config); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + + cursor = db.openCursor(null, null); + } + + public void initialDb() { + try { + for (int i = 0 ; i < recNum; i++) { + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(i, key); + final DatabaseEntry data = new DatabaseEntry(new byte[dataLen]); + db.put(null, key, data); + } + } catch (DatabaseException dbe) { + throw new RuntimeException("Initiate Database fails.", dbe); + } + + DbInternal.getNonNullEnvImpl(env).getLogManager().flushSync(); + totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + System.out.println("Create files: " + totalFiles); + assert totalFiles < 100 : "Total file number is " + totalFiles; + } + + /* + * TODO: This method only simulate the lsn/keyOrder/indentifierKey issues + * on BIN. But it may not cover the following aspects: + * 1. Issues on upperIN, see getParentIN + * 2. Issues on BIN-delta, see DatabaseTest.mutateBINs. + */ + private void createBtreeCorrupt(String type, boolean persistent) { + final String persis = persistent ? "persistent" : "transient"; + System.out.println("Create " + persis + " Corrupt type: " + type); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final int usedKey = recNum / 100; + final int usedIndex = 10; + IntegerBinding.intToEntry(usedKey, key); + assert cursor.get(key, data, Get.SEARCH, null) != null : + "The db should contain this record: key is " + usedKey; + + final CursorImpl cursorImpl = DbInternal.getCursorImpl(cursor); + cursorImpl.latchBIN(); + final BIN bin = cursorImpl.getBIN(); + try { + if (type.equals("lsn")) { + final long origLsn = bin.getLsn(usedIndex); + bin.setLsn( + usedIndex, DbLsn.makeLsn(totalFiles + 100, 0x100)); + System.out.println( + "Chosen key: " + Key.dumpString(bin.getKey(usedIndex), 2)); + System.out.println( + "Problematic LSN: " + + DbLsn.getNoFormatString(bin.getLsn(usedIndex)) + + "Original LSN: " + + DbLsn.getNoFormatString(origLsn)); + } else if (type.equals("keyorder")) { + final byte[] key1 = bin.getKey(usedIndex); + final byte[] key2 = bin.getKey(usedIndex + 1); + + bin.setKey(usedIndex, key2, null, false); + bin.setKey(usedIndex + 1, key1, null, false); + + System.out.println("Chosen keys are: " + + Key.dumpString(key1, 2) + "(index:" + usedIndex + ")" + + " " + Key.dumpString(key2, 2) + "(index:" + + (usedIndex + 1) + ")"); + } else if (type.equals("idenkey")) { + final byte[] origIdenKey = bin.getIdentifierKey(); + + /* The key of the last entry of the BIN*/ + final byte[] maxKey = bin.getKey(bin.getNEntries() - 1); + final int len = maxKey.length; + final byte[] newIdenKey = new byte[(len + 1)]; + System.arraycopy(maxKey, 0, newIdenKey, 0, len); + newIdenKey[len] = 100; + bin.setIdentifierKey(newIdenKey, true); + + System.out.println("Original Identifier Key is: " + + Key.dumpString(origIdenKey, 2) + + "Current Identifier Key is: " + + Key.dumpString(newIdenKey, 2)); + } + } finally { + cursorImpl.releaseBIN(); + } + + if (persistent) { + CheckpointConfig cc = new CheckpointConfig(); + cc.setForce(true); + env.checkpoint(cc); + DbInternal.getNonNullEnvImpl(env).getLogManager().flushSync(); + } + } + + /* + * The first pass traverse aims to cache all the log files. The second + * pass traverse aims to check whether the Read operation can succeed + * when one log file is corrupted, depending on whether ENV_RUN_VERIFIER + * is set. + */ + private void traverseDb(boolean check, boolean persistent) { + boolean verify = DbInternal.getEnvironmentImpl(env).getConfigManager(). + getBoolean(EnvironmentParams.ENV_RUN_VERIFIER); + try { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int recordCount = 0; + int firstKey; + do { + if (!check) { + firstKey = 0; + } else { + firstKey = recNum / 2; + } + IntegerBinding.intToEntry(firstKey, key); + + assert cursor.get(key, data, Get.SEARCH, null) != null : + "The db should contain this record: key is " + firstKey; + + if (!check) { + while (cursor.get(key, data, Get.NEXT, null) != null) { + // Do nothing. + } + } + /* + * The smallest interval of the VERIFY_SCHEDULE is 1 minutes, + * so here we try to sleep 1s for totalWaitTries times to + * guarantee that the data corruption verifier task run at + * least once. + */ + try {Thread.sleep(1000);} catch (Exception e) {} + System.out.println("check " + recordCount + " times."); + } while (check && ++recordCount < totalWaitTries); + + if (check) { + if (verify) { + fail("With verifying data corruption, we should catch" + + "EnvironmentFailureException."); + } + } + } catch (EnvironmentFailureException efe) { + //efe.printStackTrace(); + if (persistent) { + assertTrue(efe.isCorrupted()); + } else { + assertTrue(!efe.isCorrupted()); + } + + if (check) { + if (!verify) { + fail("Without verifying data corruption, we should" + + "not catch EnvironmentFailureException"); + } + } + // Leave tearDown() to close cursor, db and env. + } + } + + /* + * Lsn Dangling test + */ + @Test + public void testLSNDanglingWithVerifierTransient() { + System.out.println("testLSNDanglingWithVerifierTransient"); + testCorruptionInternal(envConfigWithVerifier, "lsn", false); + } + + @Test + public void testLSNDanglingWithVerifierPersitent() { + System.out.println("testLSNDanglingWithVerifierPersitent"); + testCorruptionInternal(envConfigWithVerifier, "lsn", true); + } + + @Test + public void testLSNDanglingWithoutVerifierTransient() { + System.out.println("testLSNDanglingWithoutVerifierTransient"); + testCorruptionInternal(envConfigWithoutVerifier, "lsn", false); + } + + @Test + public void testLSNDanglingWithoutVerifierPersitent() { + System.out.println("testLSNDanglingWithoutVerifierPersitent"); + testCorruptionInternal(envConfigWithoutVerifier, "lsn", true); + } + + /* + * Key order test + */ + @Test + public void testKeyOrderWithVerifierTransient() { + System.out.println("testKeyOrderWithVerifierTransient"); + testCorruptionInternal(envConfigWithVerifier, "keyorder", false); + } + + @Test + @Ignore + /* + * TODO: Why ignore? + * I indeed create the key order violation, do checkpoint and at last + * call flushSync. + * + * But above action causes a BIN-delta to be logged. So when the + * BtreeVerifier wants to check whether the issue is persistent, + * BtreeVerifier will read the BIN-delta and the full BIN, and then + * re-constitute the BIN. During the process, for each slot of the + * BIN-delta, it will be checked whether its key is in the full BIN. + * If the key is in the full BIN, the corresponding slot of the full + * BIN will be updated. This kind of processing method will cause that + * the key order of the new reconstituted full BIN is right. So + * BtreeVerifier will think that the corruption is not persistent. + * + * Because key order issue seems to be rare and it seems that the work + * will be a little complicate to handle this issue, I am not sure + * how to handle this. + */ + public void testKeyOrderWithVerifierPersitent() { + System.out.println("testKeyOrderWithVerifierPersitent"); + testCorruptionInternal(envConfigWithVerifier, "keyorder", true); + } + + @Test + public void testKeyOrderWithoutVerifierTransient() { + System.out.println("testKeyOrderWithoutVerifierTransient"); + testCorruptionInternal(envConfigWithoutVerifier, "keyorder", false); + } + + @Test + public void testKeyOrderWithoutVerifierPersitent() { + System.out.println("testKeyOrderWithoutVerifierPersitent"); + testCorruptionInternal(envConfigWithoutVerifier, "keyorder", true); + } + + /* + * IndentifierKey test + */ + @Test + public void testIndentifyKeyWithVerifierTransient() { + System.out.println("testIndentifyKeyWithVerifierTransient"); + testCorruptionInternal(envConfigWithVerifier, "idenkey", false); + } + + @Test + public void testIndentifyKeyWithVerifierPersitent() { + System.out.println("testIndentifyKeyWithVerifierPersitent"); + testCorruptionInternal(envConfigWithVerifier, "idenkey", true); + } + + @Test + public void testIndentifyKeyWithoutVerifierTransient() { + System.out.println("testIndentifyKeyWithoutVerifierTransient"); + testCorruptionInternal(envConfigWithoutVerifier, "idenkey", false); + } + + @Test + public void testIndentifyKeyWithoutVerifierPersitent() { + System.out.println("testIndentifyKeyWithoutVerifierPersitent"); + testCorruptionInternal(envConfigWithoutVerifier, "idenkey", true); + } + + private void testCorruptionInternal( + EnvironmentConfig config, + String type, + boolean persistent) { + + openEnvAndDb(config); + System.out.println("Finish open env"); + initialDb(); + System.out.println("Finish init db"); + traverseDb(false, persistent); + System.out.println("Finish first pass traverse"); + createBtreeCorrupt(type, persistent); + System.out.println("Finish create btree corruption"); + traverseDb(true, persistent); + System.out.println("Finish second pass traverse"); + } + + /* + * The following tests aims to test index corruption verification code. + */ + + /* + * The following part is used to test part 1 of index corruption + * verification, i.e. BtreeVerifier can detect the corruption. + */ + + public void openEnvAndDbForIndexCorrupt( + EnvironmentConfig config, + String mode) { + + env = new Environment(envHome, config); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + priDb = env.openDatabase(null, DB_PRI_NAME, dbConfig); + + if (mode.equals("foreignNotExist")) { + foreignDb = env.openDatabase(null, DB_FOREIGN_NAME, dbConfig); + } + + secConfig1 = new SecondaryConfig(); + secConfig1.setAllowCreate(true); + secConfig1.setAllowPopulate(true); + secConfig1.setSortedDuplicates(true); + if (mode.equals("foreignNotExist")) { + secConfig1.setForeignKeyDatabase(foreignDb); + } + secConfig1.setKeyCreator(new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData + (data.getData(), data.getOffset(), data.getSize()); + return true; + } + }); + secDb1 = + env.openSecondaryDatabase(null, SEC_DB1_NAME, priDb, secConfig1); + + /* + * For testing the scenario where the foreing record does not exist, + * we only use one secondary database. For other two modes, we use + * two secondary databases. + */ + if (!mode.equals("foreignNotExist")) { + secConfig2 = new SecondaryConfig(); + secConfig2.setAllowCreate(true); + secConfig2.setAllowPopulate(true); + secConfig2.setSortedDuplicates(true); + secConfig2.setKeyCreator(new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + int origData = IntegerBinding.entryToInt(data); + int newSecKey = origData * 2; + DatabaseEntry newKey = new DatabaseEntry(); + IntegerBinding.intToEntry(newSecKey, newKey); + result.setData + (newKey.getData(), newKey.getOffset(), newKey.getSize()); + return true; + } + }); + secDb2 = + env.openSecondaryDatabase(null, SEC_DB2_NAME, priDb, secConfig2); + } + } + + public void initialDbForIndexCorrupt(String mode) { + try { + for (int i = 0 ; i < recNum; i++) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i + 1, data); + /* + * Need to insert record to foreign db before inserting + * record to primary db, because the latter will update + * the secondary db, which will check whether the secondary + * key exist in the foreign db. + */ + if (mode.equals("foreignNotExist")) { + foreignDb.put(null, data, new DatabaseEntry(new byte[10])); + } + priDb.put(null, key, data); + } + } catch (DatabaseException dbe) { + throw new RuntimeException("Initiate Database fails.", dbe); + } + + DbInternal.getNonNullEnvImpl(env).getLogManager().flushSync(); + totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + System.out.println("Create files: " + totalFiles); + assert totalFiles < 100 : "Total file number is " + totalFiles; + } + + private void traverseOneDb(Database dbHandle, boolean sleep) { + if (dbHandle == null) { + return; + } + + Cursor tmpCursor = dbHandle.openCursor(null, null); + try { + if (!sleep) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + OperationResult opResult = + tmpCursor.get(key, data, Get.FIRST, null); + assert opResult != null; + while (opResult != null) { + opResult = tmpCursor.get(key, data, Get.NEXT, null); + } + } else { + int saftToAccessKeyValue = recNum / 10; + DatabaseEntry saftToAccessKey = new DatabaseEntry(); + IntegerBinding.intToEntry(saftToAccessKeyValue, saftToAccessKey); + DatabaseEntry data = new DatabaseEntry(); + int count = 0; + while (count++ < totalWaitTries) { + OperationResult opResult = + tmpCursor.get(saftToAccessKey, data, Get.SEARCH, null); + assert opResult != null; + try {Thread.sleep(1000);} catch (Exception e) {} + System.out.println( + "Wait for " + count + " times. IsCorrupt: " + + DbInternal.isCorrupted(dbHandle)); + } + } + } finally { + if (tmpCursor != null) { + tmpCursor.close(); + } + } + } + + private void traverseDbNormally() { + traverseOneDb(priDb, false); + traverseOneDb(secDb1, false); + traverseOneDb(secDb2, false); + traverseOneDb(foreignDb, false); + } + + private void createIndexCorrupt(String mode) { + if (mode.equals("primaryNotExist")) { + /* Step1: close secondary db first. */ + secDb1.close(); + secDb2.close(); + + /* + * Step2: Delete one primary record. This will create index + * corruption because now the secondary databases can not + * be updated. + */ + final int usedPriKeyValue = recNum / 100; + DatabaseEntry usedPriKey = new DatabaseEntry(); + IntegerBinding.intToEntry(usedPriKeyValue, usedPriKey); + priDb.delete(null, usedPriKey); + + /* + * Step3: Reopen the secondary database. It seems to not matter + * whether I set allowPopulate to be true or false. + * 1. Open as secondary is necessary because our verifier + * only check index when the dbImpl has secondary db handle. + * 2. We can not use SecondaryCursor to traverse the secondary + * database, because SecondaryCursor can also detect SIE when + * accessing the problematic record. So we just use + * SecondaryCursor to access one safe record to wait for the + * verifier to detect the corruption. + */ + secConfig1.setAllowPopulate(false); + secConfig2.setAllowPopulate(false); + secDb1 = + env.openSecondaryDatabase(null, SEC_DB1_NAME, priDb, secConfig1); + secDb2 = + env.openSecondaryDatabase(null, SEC_DB2_NAME, priDb, secConfig2); + } else if (mode.equals("foreignNotExist")) { + /* Step1: close secondary db first. */ + secDb1.close(); + + /* + * Step2: Delete one foreign record. This will create index + * corruption because now the secondary databases can not + * be updated. + */ + final int usedForeingKeyValue = recNum / 100 + 1; + DatabaseEntry usedForeignKey = new DatabaseEntry(); + IntegerBinding.intToEntry(usedForeingKeyValue, usedForeignKey); + foreignDb.delete(null, usedForeignKey); + + /* + * Step3: Reopen the secondary database. It seems to not matter + * whether I set allowPopulate to be true or false. + */ + secConfig1.setAllowPopulate(false); + secDb1 = + env.openSecondaryDatabase(null, SEC_DB1_NAME, priDb, secConfig1); + } else if (mode.equals("SecondaryNotExist")) { + /* + * Step1: close primary db first. But if we openSecondaryDb with + * priDb, we should first close secDb before closing priDb. + */ + secDb1.close(); + secDb2.close(); + priDb.close(); + + /* + * Step2: Delete secondary record. Open the secondary database + * as normal database. Now the primary databases can not + * be updated. The value is set according to the + * SecondaryKeyCreator. + */ + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + + Database tmpSecDb1 = env.openDatabase(null, SEC_DB1_NAME, dbConfig); + final int usedSecKeyValue1 = recNum / 100 + 1; + DatabaseEntry usedSecKey1 = new DatabaseEntry(); + IntegerBinding.intToEntry(usedSecKeyValue1, usedSecKey1); + tmpSecDb1.delete(null, usedSecKey1); + + Database tmpSecDb2 = env.openDatabase(null, SEC_DB2_NAME, dbConfig); + final int usedSecKeyValue2 = (recNum / 100 + 2) * 2; + DatabaseEntry usedSecKey2 = new DatabaseEntry(); + IntegerBinding.intToEntry(usedSecKeyValue2, usedSecKey2); + tmpSecDb2.delete(null, usedSecKey2); + + tmpSecDb1.close(); + tmpSecDb2.close(); + + /* + * Step3: Reopen the secondary database. It seems to not matter + * whether I set allowPopulate to be true or false. + */ + final DatabaseConfig dbConfig1 = new DatabaseConfig(); + dbConfig1.setAllowCreate(true); + priDb = env.openDatabase(null, DB_PRI_NAME, dbConfig1); + + secConfig1.setAllowPopulate(false); + secConfig2.setAllowPopulate(false); + secDb1 = + env.openSecondaryDatabase(null, SEC_DB1_NAME, priDb, secConfig1); + secDb2 = + env.openSecondaryDatabase(null, SEC_DB2_NAME, priDb, secConfig2); + } else { + fail("Should at least specify one mode in 'primaryNotExist', " + + "'foreignNotExist' and 'SecondaryNotExist'"); + } + } + + private void traverseDbWaitToCheck(String mode) { + /* + * For any situation, at any time, traversing primary database and + * foreign database should be OK. + */ + try { + traverseOneDb(priDb, false); + traverseOneDb(foreignDb, false); + } catch (DatabaseException dbe) { + fail("For any situation, at any time, traversing primary" + + "database and foreign database should be OK."); + } + + boolean verify = + DbInternal.getEnvironmentImpl(env).getConfigManager(). + getBoolean(EnvironmentParams.ENV_RUN_VERIFIER); + + try { + traverseOneDb(secDb1, true); + if (verify) { + fail("With verifying index corruption, the status of the " + + "secondary database should be CORRUPT. So we should" + + "catch OperationFailureException when calling" + + "Database.checkOpen."); + } + } catch (OperationFailureException ofe) { + assert (ofe instanceof SecondaryIntegrityException); + assert DbInternal.isCorrupted(secDb1); + if (mode.equals("primaryNotExist")) { + assert ((SecondaryIntegrityException) ofe.getCause()). + getMessage().contains( + "Secondary refers to a missing key in the primary database"); + } else if (mode.equals("foreignNotExist")) { + assert ((SecondaryIntegrityException) ofe.getCause()). + getMessage().contains( + "Secondary key does not exist in foreign database"); + } else if (mode.equals("SecondaryNotExist")) { + assert ((SecondaryIntegrityException) ofe.getCause()). + getMessage().contains( + "the primary record contains a key that is not present " + + "in this secondary database"); + } + + if (!verify) { + fail("Without verifying index corruption, we should" + + "not catch OperationFailureException"); + } + } + + if (!mode.equals("foreignNotExist")) { + try { + traverseOneDb(secDb2, true); + if (verify) { + fail("With verifying index corruption, the status of the" + + "secondary database should be CORRUPT. So we should" + + "catch OperationFailureException when calling" + + "Database.checkOpen."); + } + } catch (OperationFailureException ofe) { + assert (ofe instanceof SecondaryIntegrityException); + assert DbInternal.isCorrupted(secDb2); + if (mode.equals("primaryNotExist")) { + assert ((SecondaryIntegrityException) ofe.getCause()). + getMessage().contains( + "Secondary refers to a missing key in the primary database"); + } else if (mode.equals("SecondaryNotExist")) { + assert ((SecondaryIntegrityException) ofe.getCause()). + getMessage().contains( + "the primary record contains a key that is not present " + + "in this secondary database"); + } + + if (!verify) { + fail("Without verifying index corruption, we should" + + "not catch OperationFailureException"); + } + } + } + + /* Check the log contains the WARNING message if we enable verifier. */ + RandomAccessFile raf = null; + try { + final File file = new File(envHome.getCanonicalFile(), "je.info.0"); + assert file.exists(); + raf = new RandomAccessFile( + file.getCanonicalFile(), + FileMode.READWRITE_MODE.getModeValue()); + String newLine; + int count = 0; + while ((newLine = raf.readLine()) != null) { + if (newLine.contains( + "Secondary corruption is detected during btree " + + "verification")) { + count++; + } + } + + /* + * TODO: need to confirm. For each corrupted secondary database, + * there exist and only exist one WARNING message. + */ + if (verify) { + if (mode.equals("foreignNotExist")) { + assert count == 1 : count; + } else { + assert count == 2 : count; + } + } else { + assert count == 0 : count; + } + } catch (IOException ioe) { + + } finally { + try { raf.close();} catch (IOException e) {} + } + } + + private void testIndexCorruptionInternal( + EnvironmentConfig config, String mode) { + + openEnvAndDbForIndexCorrupt(config, mode); + System.out.println("Finish open env"); + initialDbForIndexCorrupt(mode); + System.out.println("Finish init db"); + traverseDbNormally(); + System.out.println("Finish first pass traverse"); + createIndexCorrupt(mode); + System.out.println("Finish create index corruption"); + traverseDbWaitToCheck(mode); + System.out.println("Finish check phase"); + } + + @Test + public void testWithVerifierPrimaryRecordDoesNotExist () { + testIndexCorruptionInternal(envConfigWithVerifier, "primaryNotExist"); + } + + @Test + public void testWithoutVerifierPrimaryRecordDoesNotExist () { + testIndexCorruptionInternal(envConfigWithoutVerifier, "primaryNotExist"); + } + + @Test + public void testWithVerifierForeignRecordDoesNotExist () { + testIndexCorruptionInternal(envConfigWithVerifier, "foreignNotExist"); + } + + @Test + public void testWithoutVerifierForeignRecordDoesNotExist () { + testIndexCorruptionInternal(envConfigWithoutVerifier, "foreignNotExist"); + } + + @Test + public void testWithVerifierSecondaryRecordDoesNotExist () { + EnvironmentConfig usedConfig = envConfigWithVerifier.clone(); + usedConfig.setConfigParam( + EnvironmentParams.VERIFY_DATA_RECORDS.getName(), "true"); + testIndexCorruptionInternal(usedConfig, "SecondaryNotExist"); + } + + @Test + public void testWithoutVerifierSecondaryRecordDoesNotExist () { + testIndexCorruptionInternal(envConfigWithoutVerifier, "SecondaryNotExist"); + } + + /* + * The following part is used to test part 2 of index corruption + * verification, i.e. BtreeVerifier can still run normally after we inject + * some abnormal situation, e.g. close or remove databases before or during + * the batch check procedure. + */ + + private void openEnvAndDbForIndexCorruptPart2( + boolean foreign, + boolean secondary) { + + EnvironmentConfig usedConfig = envConfigWithVerifier.clone(); + usedConfig.setConfigParam( + EnvironmentParams.VERIFY_DATA_RECORDS.getName(), "true"); + env = new Environment(envHome, usedConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + priDb = env.openDatabase(null, DB_PRI_NAME, dbConfig); + + if (foreign) { + foreignDb = env.openDatabase(null, DB_FOREIGN_NAME, dbConfig); + } + + if (secondary) { + secConfig1 = new SecondaryConfig(); + secConfig1.setAllowCreate(true); + secConfig1.setAllowPopulate(true); + secConfig1.setSortedDuplicates(true); + if (foreign) { + secConfig1.setForeignKeyDatabase(foreignDb); + } + secConfig1.setKeyCreator(new SecondaryKeyCreator() { + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, + DatabaseEntry data, + DatabaseEntry result) { + result.setData + (data.getData(), data.getOffset(), data.getSize()); + return true; + } + }); + secDb1 = + env.openSecondaryDatabase(null, SEC_DB1_NAME, priDb, secConfig1); + } + } + + private void initialDbForIndexCorruptPart2(boolean foreign) { + try { + for (int i = 0 ; i < recNum; i++) { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i + 1, data); + /* + * Need to insert record to foreign db before inserting + * record to primary db, because the latter will update + * the secondary db, which will check whether the secondary + * key exist in the foreign db. + */ + if (foreign) { + foreignDb.put(null, data, new DatabaseEntry(new byte[10])); + } + priDb.put(null, key, data); + } + } catch (DatabaseException dbe) { + throw new RuntimeException("Initiate Database fails.", dbe); + } + + DbInternal.getNonNullEnvImpl(env).getLogManager().flushSync(); + totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + System.out.println("Create files: " + totalFiles); + assert totalFiles < 100 : "Total file number is " + totalFiles; + } + + private void testPart2Internal ( + boolean foreign, + boolean secondary, + String testName, + boolean isPri, + boolean isSec, + String action, + String position) { + + openEnvAndDbForIndexCorruptPart2(foreign, secondary); + System.out.println("Finish open env and db."); + + initialDbForIndexCorruptPart2(foreign); + System.out.println("Finish init db."); + + /* Skip the first time run of BtreeVerifier .*/ + for (int i = 0; i < 3; i++) { + try {Thread.sleep(10000);} catch (Exception e) {} + System.out.println( + i + "th: Sleep 10 seconds to skip first time run " + + "of BtreeVerifier"); + } + + MyHook testHook = new MyHook(isPri, isSec, testName, action); + if (position.equals("before")) { + BtreeVerifier.databaseOperBeforeBatchCheckHook = testHook; + } else if (position.equals("during")) { + BtreeVerifier.databaseOperDuringBatchCheckHook = testHook; + } else { + fail("Wrong option"); + } + + /* Wait for the second time run of BtreeVerifier. */ + int i = 0; + while (testHook.getCount() <= 0) { + try {Thread.sleep(10000);} catch (Exception e) {} + System.out.println( + i + "th: Sleep 10 seconds to wait second time run " + + "of BtreeVerifier"); + i++; + } + + /* + * Wait 5 more seconds to let the second time run of BtreeVerifier + * finish. + */ + try {Thread.sleep(5000);} catch (Exception e) {} + System.out.println( + "Wait 5 more seconds to let the second time run " + + "of BtreeVerifier finish."); + + /* Check whether the test pass. */ + assert(env.isValid()); + if (testHook.getThrowable() != null) { + testHook.getThrowable().printStackTrace(); + } + assert testHook.getThrowable() == null; + + /* + * TODO: Maybe need to add check whether the secondary database is + * corrupt and whether the log contains WARNING messages. + * + * FurtherMore, if we simulate index corruption, then whether + * the secondary database is corrupt and whether the log + * contains WARNING messages if we close/remove db before/during + * batch check. + */ + } + + class MyHook implements TestHook { + + private boolean isPri; + private boolean isSec; + private String testName; + private String action; + + /* + * To indicate how many times doHook run really. + */ + private int count = 0; + private Throwable anyT; + private boolean needSleep; + + public MyHook( + boolean isPri, + boolean isSec, + String testName, + String action) { + this.isPri = isPri; + this.isSec = isSec; + this.testName = testName; + this.action = action; + } + + @Override + public void doHook(final Database obj) { + new Thread(testName) { + @Override + public void run() { + try { + if (isPri) { + if (obj == priDb && action.equals("close")) { + priDb.close(); + needSleep = true; + count++; + } + + if (obj == priDb && action.equals("remove")) { + if (secDb1 != null) { + secDb1.close(); + } + priDb.close(); + env.removeDatabase(null, DB_PRI_NAME); + if (secDb1 != null) { + env.removeDatabase(null, SEC_DB1_NAME); + } + needSleep = true; + count++; + } + + if (obj == priDb && action.equals("truncate")) { + priDb.close(); + env.truncateDatabase(null, DB_PRI_NAME, false); + needSleep = true; + count++; + } + } else if (isSec) { + if (obj == secDb1 && action.equals("close")) { + secDb1.close(); + needSleep = true; + count++; + } + + if (obj == secDb1 && action.equals("remove")) { + secDb1.close(); + env.removeDatabase(null, SEC_DB1_NAME); + needSleep = true; + count++; + } + + if (obj == secDb1 && action.equals("truncate")) { + secDb1.close(); + env.truncateDatabase(null, SEC_DB1_NAME, false); + needSleep = true; + count++; + } + } else { + fail("Wrong option"); + } + } catch (Throwable t) { + anyT = t; + } finally { + + } + } + + }.start(); + + if (needSleep) { + /* Wait 5 more seconds to let above thread to run. */ + try {Thread.sleep(5000);} catch (Exception e) {} + System.out.println( + "Wait 5 seconds to let testHook do " + + "close/remove/truncate."); + needSleep = false; + } + } + + public int getCount() { + return count; + } + + public Throwable getThrowable() { + return anyT; + } + + @Override + public void doHook() { + } + @Override + public void hookSetup() { + } + @Override + public void doIOHook() throws IOException { + } + @Override + public Database getHookValue() { + return null; + } + } + + /* + * TODO: We also need to add test cases about foreign database. Now the + * following test cases only contain Primary/Secondary databases. + */ + /* + @Test + public void testClosePriDbBeforeBatch () { + testPart2Internal( + false, false, "testClosePriDbBeforeBatch", true, false, + "close", "before"); + } + + @Test + public void testCloseSecDbBeforeBatch () { + testPart2Internal( + false, true, "testCloseSecDbBeforeBatch", false, true, + "close", "before"); + } + + @Test + public void testRemovePriDbBeforeBatch () { + testPart2Internal( + false, false, "testRemovePriDbBeforeBatch", true, false, + "remove", "before"); + } + + @Test + public void testRemoveSecDbBeforeBatch () { + testPart2Internal( + false, true, "testRemoveSecDbBeforeBatch", false, true, + "remove", "before"); + } + + @Test + public void testTruncatePriDbBeforeBatch () { + testPart2Internal( + false, false, "testRemovePriDbBeforeBatch", true, false, + "truncate", "before"); + } + + @Test + public void testTruncateSecDbBeforeBatch () { + testPart2Internal( + false, true, "testRemoveSecDbBeforeBatch", false, true, + "truncate", "before"); + } + + @Test + public void testRemovePriAndSecDbBeforeBatch () { + testPart2Internal( + false, true, "testRemovePriAndSecDbBeforeBatch", true, false, + "remove", "before"); + } + + @Test + public void testClosePriDbDuringBatch () { + testPart2Internal( + false, false, "testClosePriDbBeforeBatch", true, false, + "close", "during"); + } + + @Test + public void testCloseSecDbDuringBatch () { + testPart2Internal( + false, true, "testCloseSecDbBeforeBatch", false, true, + "close", "during"); + } + + @Test + public void testRemovePriDbDuringBatch () { + testPart2Internal( + false, false, "testRemovePriDbBeforeBatch", true, false, + "remove", "during"); + } + + @Test + public void testRemoveSecDbDuringBatch () { + testPart2Internal( + false, true, "testRemoveSecDbBeforeBatch", false, true, + "remove", "during"); + } + + @Test + public void testTruncatePriDbDuringBatch () { + testPart2Internal( + false, false, "testRemovePriDbBeforeBatch", true, false, + "truncate", "during"); + } + + @Test + public void testTruncateSecDbDuringBatch () { + testPart2Internal( + false, true, "testRemoveSecDbBeforeBatch", false, true, + "truncate", "during"); + } + + @Test + public void testRemovePriAndSecDbDuringBatch () { + testPart2Internal( + false, true, "testRemovePriAndSecDbBeforeBatch", true, false, + "remove", "during"); + } + */ +} + diff --git a/test/com/sleepycat/je/util/CustomDbPrintLogTest.java b/test/com/sleepycat/je/util/CustomDbPrintLogTest.java new file mode 100644 index 0000000..83f4dca --- /dev/null +++ b/test/com/sleepycat/je/util/CustomDbPrintLogTest.java @@ -0,0 +1,213 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.util.ArrayList; + +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.dbi.DatabaseId; +import com.sleepycat.je.junit.JUnitProcessThread; +import com.sleepycat.je.log.LNFileReader; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * @excludeDualMode + * This test does not run in Replication Dual Mode. There are several + * logistical issues. + * + * -It assumes that all log files are in the directory, whereas + * dual mode environments are in /rep* + */ +public class CustomDbPrintLogTest extends TestBase { + + public static int COUNTER = 0; + public static DatabaseId CHECK_ID; + + private Environment env; + private final File envHome; + + public CustomDbPrintLogTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + deletePrintInfo(); + } + + @After + public void tearDown() { + + /* + * Close down environments in case the unit test failed so that the log + * files can be removed. + */ + try { + if (env != null) { + env.close(); + env = null; + } + } catch (Exception e) { + e.printStackTrace(); + return; + } + + deletePrintInfo(); + } + + /* Delete the dumpLog file created by TestDumper. */ + private void deletePrintInfo() { + for (File file : envHome.listFiles()) { + if (file.isFile() && + file.getName().contains(TestDumper.SAVE_INFO_FILE)) { + boolean deleted = file.delete(); + assertTrue(deleted); + } + } + } + + private void createEnv() { + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, "foo", dbConfig); + CHECK_ID = DbInternal.getDbImpl(db).getId(); + DatabaseEntry key = new DatabaseEntry(new byte[1000]); + DatabaseEntry data = new DatabaseEntry(new byte[1000]); + for (int i = 0; i < 10; i += 1) { + db.put(null, key, data); + } + db.close(); + + CheckpointConfig ckptConfig = new CheckpointConfig(); + ckptConfig.setForce(true); + env.checkpoint(ckptConfig); + } + + /* + * Use the custom log printer to list types of log entries. + * + * Note that we run the custom log printer in a separate process, whereas + * it would have seemed more intuitive to merely call DbPrintLog + * programmatically. It's done this way instead because the classpath + * doesn't work out right within junit and it doesn't recognize the + * custom dumper class. + */ + @Test + public void testCustom() + throws Throwable { + + createEnv(); + LNFileReader reader = + new LNFileReader(DbInternal.getNonNullEnvImpl(env), + 10000, 0, true, DbLsn.NULL_LSN, DbLsn.NULL_LSN, + null, DbLsn.NULL_LSN); + /* Specify the entry types looking for. */ + reader.addTargetType(LogEntryType.LOG_DEL_LN); + reader.addTargetType(LogEntryType.LOG_INS_LN); + reader.addTargetType(LogEntryType.LOG_UPD_LN); + + /* Check the LN count. */ + int count = 0; + ArrayList lnMessages = new ArrayList(); + while(reader.readNextEntry()) { + count++; + lnMessages.add(reader.getLNLogEntry().getLogType() + " lsn=" + + DbLsn.getNoFormatString(reader.getLastLsn())); + } + assertTrue("count: " + count, count == 10); + + TestDumper foo = new TestDumper(DbInternal.getNonNullEnvImpl(env), + 1000, + 0L, 0L, 0L, null, null, false, false, + true); + + /* Invoke process to call the DbPrintLog. */ + String[] commands = new String[5]; + commands[0] = "com.sleepycat.je.util.DbPrintLog"; + commands[1] = "-h"; + commands[2] = envHome.getAbsolutePath(); + commands[3] = "-c"; + commands[4] = foo.getClass().getName(); + + JUnitProcessThread thread = + new JUnitProcessThread("TestDumper", 0, null, commands, true); + thread.start(); + + try { + thread.finishTest(); + } catch (Throwable t) { + fail("Unexpected exception: " + t); + } + + /* Read from the info file and checks the size is the same. */ + ArrayList messages = readMessages(); + assertTrue(messages.size() > count); + + /* + * Check messages read by the LNFileReader must exist in the list read + * by TestDumper. + */ + for (String message : lnMessages) { + assertTrue("message: " + message + " is not in information " + + "read by TestDumper", messages.contains(message)); + } + + /* Close Environment. */ + env.close(); + env = null; + } + + /* Read messages written by TestDumper. */ + private ArrayList readMessages() + throws Exception { + + File file = new File(envHome, TestDumper.SAVE_INFO_FILE); + assertTrue(file.exists()); + + ArrayList messages = new ArrayList(); + BufferedReader reader = new BufferedReader(new FileReader(file)); + String line = null; + while ((line = reader.readLine()) != null) { + messages.add(line); + } + reader.close(); + + return messages; + } +} diff --git a/test/com/sleepycat/je/util/DbBackupTest.java b/test/com/sleepycat/je/util/DbBackupTest.java new file mode 100644 index 0000000..1b15ea9 --- /dev/null +++ b/test/com/sleepycat/je/util/DbBackupTest.java @@ -0,0 +1,794 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class DbBackupTest extends TestBase { + + private static StatsConfig CLEAR_CONFIG = new StatsConfig(); + static { + CLEAR_CONFIG.setClear(true); + } + + private static CheckpointConfig FORCE_CONFIG = new CheckpointConfig(); + static { + FORCE_CONFIG.setForce(true); + } + + private static final String SAVE1 = "save1"; + private static final String SAVE2 = "save2"; + private static final String SAVE3 = "save3"; + private static final int NUM_RECS = 200; + private static final int N_DATA_DIRS = 3; + + private final File envHome; + private Environment env; + private FileManager fileManager; + private final boolean useMultiEnvDirs; + + @Parameters + public static List genParams() { + return Arrays.asList(new Object[][]{{false}, {true}}); + } + + public DbBackupTest(boolean multiEnv) { + envHome = SharedTestUtils.getTestDir(); + useMultiEnvDirs = multiEnv; + customName = useMultiEnvDirs ? ":multi-env-dirs" : ""; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + deleteSaveDir(SAVE1); + deleteSaveDir(SAVE2); + deleteSaveDir(SAVE3); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } finally { + env = null; + } + } + maybeDeleteDataDirs(envHome, "TearDown", false); + + TestUtils.removeLogFiles("TearDown", envHome, false); + deleteSaveDir(SAVE1); + deleteSaveDir(SAVE2); + deleteSaveDir(SAVE3); + } + + /** + * Test basic backup, make sure log cleaning isn't running. + */ + @Test + public void testBackupVsCleaning() + throws Throwable { + + env = createEnv(false, envHome); /* read-write env */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + + /* + * Grow files, creating obsolete entries to create cleaner + * opportunity. + */ + growFiles("db1", env, 8); + + /* Start backup. */ + DbBackup backupHelper = new DbBackup(env); + backupHelper.startBackup(); + String[] backupFiles = backupHelper.getLogFilesInBackupSet(); + + long lastFileNum = backupHelper.getLastFileInBackupSet(); + long checkLastFileNum = lastFileNum; + + /* Copy the backup set. */ + saveFiles(backupHelper, -1, lastFileNum, SAVE1); + + /* + * Try to clean and checkpoint. Check that the logs grew as + * a result. + */ + batchClean(0, backupFiles); + long newLastFileNum = (fileManager.getLastFileNum()).longValue(); + assertTrue(checkLastFileNum < newLastFileNum); + checkLastFileNum = newLastFileNum; + + /* Copy the backup set after attempting cleaning */ + saveFiles(backupHelper, -1, lastFileNum, SAVE2); + + /* Insert more data. */ + growFiles("db2", env, 8); + + /* + * Try to clean and checkpoint. Check that the logs grew as + * a result. + */ + batchClean(0, backupFiles); + newLastFileNum = fileManager.getLastFileNum(); + assertTrue(checkLastFileNum < newLastFileNum); + checkLastFileNum = newLastFileNum; + + /* Copy the backup set after inserting more data */ + saveFiles(backupHelper, -1, lastFileNum, SAVE3); + + /* Check the membership of the saved set. */ + long lastFile = backupHelper.getLastFileInBackupSet(); + String[] backupSet = backupHelper.getLogFilesInBackupSet(); + assertEquals((lastFile + 1), backupSet.length); + + /* End backup. */ + backupHelper.endBackup(); + + /* + * Run cleaning, and verify that quite a few files are deleted. + */ + long numCleaned = batchClean(100, backupFiles); + assertTrue(numCleaned > 5); + env.close(); + env = null; + + /* Verify backups. */ + maybeDeleteDataDirs(envHome, "Verify", true); + + TestUtils.removeFiles("Verify", envHome, FileManager.JE_SUFFIX); + verifyDb(SAVE1, true); + + maybeDeleteDataDirs(envHome, "Verify", true); + + TestUtils.removeFiles("Verify", envHome, FileManager.JE_SUFFIX); + verifyDb(SAVE2, true); + + maybeDeleteDataDirs(envHome, "Verify", true); + + TestUtils.removeFiles("Verify", envHome, FileManager.JE_SUFFIX); + verifyDb(SAVE3, true); + } + + /** + * Test basic backup, make sure environment can't be closed mid-stream. + */ + @Test + public void testBackupVsClose() + throws Throwable { + + env = createEnv(false, envHome); /* read-write env */ + + growFiles("db1", env, 8); + + /* Start backup. */ + DbBackup backupHelper = new DbBackup(env); + backupHelper.startBackup(); + + try { + env.close(); + fail("expected whining about backup being in progress."); + } catch (EnvironmentFailureException expected) { + // expected + } + env = null; + backupHelper.endBackup(); + + /* Verify backups. */ + maybeDeleteDataDirs(envHome, "Verify", true); + + TestUtils.removeLogFiles("Verify", envHome, false); + maybeDeleteDataDirs(envHome, "Verify", true); + } + + /** + * Test multiple backup passes + */ + @Test + public void testIncrementalBackup() + throws Throwable { + + env = createEnv(false, envHome); /* read-write env */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + fileManager = envImpl.getFileManager(); + + growFiles("db1", env, 8); + + /* Backup1. */ + DbBackup backupHelper1 = new DbBackup(env); + backupHelper1.startBackup(); + long b1LastFile = backupHelper1.getLastFileInBackupSet(); + saveFiles(backupHelper1, -1, b1LastFile, SAVE1); + String lastName = + fileManager.getFullFileName(b1LastFile, FileManager.JE_SUFFIX); + long b1LastFileLen = new File(lastName).length(); + backupHelper1.endBackup(); + + /* + * Add more data. Check that the file did flip, and is not modified + * by the additional data. + */ + growFiles("db2", env, 8); + checkFileLen(b1LastFile, b1LastFileLen); + + /* Backup2. */ + DbBackup backupHelper2 = new DbBackup(env, b1LastFile); + backupHelper2.startBackup(); + long b2LastFile = backupHelper2.getLastFileInBackupSet(); + saveFiles(backupHelper2, b1LastFile, b2LastFile, SAVE2); + lastName = + fileManager.getFullFileName(b2LastFile, FileManager.JE_SUFFIX); + long b2LastFileLen = new File(lastName).length(); + backupHelper2.endBackup(); + + /* Test deprecated getLogFilesInBackupSet(long) method. */ + DbBackup backupHelper3 = new DbBackup(env); + backupHelper3.startBackup(); + String[] fileList3 = + backupHelper3.getLogFilesInBackupSet(b1LastFile); + assertEquals(b1LastFile + 1, + fileManager.getNumFromName(fileList3[0]).longValue()); + backupHelper3.endBackup(); + + env.close(); + env = null; + + /* Verify backup 1. */ + maybeDeleteDataDirs(envHome, "Verify", true); + TestUtils.removeFiles("Verify", envHome, FileManager.JE_SUFFIX); + verifyDb(SAVE1, false); + + /* Last file should be immutable after a restore. */ + checkFileLen(b1LastFile, b1LastFileLen); + + /* Verify backup 2. */ + maybeDeleteDataDirs(envHome, "Verify", false); + TestUtils.removeFiles("Verify", envHome, FileManager.JE_SUFFIX); + verifyBothDbs(SAVE1, SAVE2); + + /* Last file should be immutable after a restore. */ + checkFileLen(b2LastFile, b2LastFileLen); + } + + private void maybeDeleteDataDirs(File envDir, + String comment, + boolean subdirsOnly) { + if (useMultiEnvDirs) { + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + File dataDir = new File(envDir, "data00" + i); + TestUtils.removeFiles + (comment, dataDir, FileManager.JE_SUFFIX); + if (!subdirsOnly) { + dataDir.delete(); + } + } + } + } + + @Test + public void testBadUsage() + throws Exception { + + env = createEnv(false, envHome); /* read-write env */ + + DbBackup backup = new DbBackup(env); + + /* end can only be called after start. */ + try { + backup.endBackup(); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + /* start can't be called twice. */ + backup.startBackup(); + try { + backup.startBackup(); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + /* + * You can only get the backup set when you're in between start + * and end. + */ + backup.endBackup(); + + try { + backup.getLastFileInBackupSet(); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + try { + backup.getLogFilesInBackupSet(); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + try { + backup.getLogFilesInBackupSet(0); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + try { + backup.getLogFilesInSnapshot(); + fail("should fail"); + } catch (IllegalStateException expected) { + } + + env.close(); + env = null; + } + + @Test + public void testReadOnly() + throws Exception { + + if (useMultiEnvDirs) { + return; // not worth the trouble + } + + /* Make a read-only handle on a read-write environment directory.*/ + env = createEnv(true, envHome); + + try { + @SuppressWarnings("unused") + DbBackup backup = new DbBackup(env); + fail("Should fail because env is read/only."); + } catch (IllegalArgumentException expected) { + } + + env.close(); + env = null; + + /* + * Make a read-only handle on a read-only environment directory. Use a + * new environment directory because we're going to set it read0nly and + * there doesn't seem to be a way of undoing that. + */ + File tempEnvDir = new File(envHome, SAVE1); + assertTrue(tempEnvDir.mkdirs()); + env = createEnv(false, tempEnvDir); + growFiles("db1", env, 8); + env.close(); + env = null; + + if (!tempEnvDir.setWritable(false)) { + System.out.println( + "Skipping testReadOnly because platform doesn't support " + + "setting file permissions"); + return; + } + + try { + env = createEnv(true, tempEnvDir); + + DbBackup backupHelper = new DbBackup(env); + backupHelper.startBackup(); + + FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + long lastFile = fileManager.getLastFileNum().longValue(); + assertEquals(lastFile, backupHelper.getLastFileInBackupSet()); + + backupHelper.endBackup(); + env.close(); + env = null; + } finally { + assertTrue(tempEnvDir.setWritable(true)); + } + } + + private Environment createEnv(boolean readOnly, File envDir) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setAllowCreate(true); + envConfig.setReadOnly(readOnly); + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, + "10000"); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, + "false"); + + if (useMultiEnvDirs) { + envConfig.setConfigParam + (EnvironmentParams.LOG_N_DATA_DIRECTORIES.getName(), + N_DATA_DIRS + ""); + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + new File(envDir, "data00" + i).mkdir(); + } + } + Environment env = new Environment(envDir, envConfig); + + return env; + } + + private long growFiles(String dbName, + Environment env, + int minNumFiles) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + Database db = env.openDatabase(null, dbName, dbConfig); + FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + long startLastFileNum = + DbLsn.getFileNumber(fileManager.getLastUsedLsn()); + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[1024]); + /* Update twice, in order to create plenty of cleaning opportunity. */ + for (int i = 0; i < NUM_RECS; i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + for (int i = 0; i < NUM_RECS; i++) { + IntegerBinding.intToEntry(i, key); + assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + db.close(); + + long endLastFileNum = + DbLsn.getFileNumber(fileManager.getLastUsedLsn()); + assertTrue((endLastFileNum - + startLastFileNum) >= minNumFiles); + return endLastFileNum; + } + + private long batchClean(int maxDeletions, String[] backupFiles) + throws DatabaseException { + + EnvironmentStats stats = env.getStats(CLEAR_CONFIG); + while (env.cleanLog() > 0) { + } + env.checkpoint(FORCE_CONFIG); + + int nDeletions = 0; + for (String name : backupFiles) { + if (!fileManager.isFileValid(fileManager.getNumFromName(name))) { + nDeletions += 1; + } + } + + return nDeletions; + } + + private void saveFiles(DbBackup backupHelper, + long lastFileFromPrevBackup, + long lastFileNum, + String saveDirName) + throws DatabaseException { + + /* Check that the backup set contains only the files it should have. */ + String[] fileList = backupHelper.getLogFilesInBackupSet(); + assertEquals(lastFileFromPrevBackup + 1, + fileManager.getNumFromName(fileList[0]). + longValue()); + assertEquals(lastFileNum, + fileManager.getNumFromName(fileList[fileList.length - 1]). + longValue()); + + final String[] snapshotFiles = backupHelper.getLogFilesInSnapshot(); + if (lastFileFromPrevBackup < 0) { + /* In a full backup, the snapshot is the same as the backup set. */ + assertTrue(Arrays.equals(fileList, snapshotFiles)); + } else { + /* In an incremental backup, the snapshot should be larger. */ + final HashSet backupSet = new HashSet(); + final HashSet snapshotSet = new HashSet(); + Collections.addAll(backupSet, fileList); + Collections.addAll(snapshotSet, snapshotFiles); + assertTrue(snapshotSet.containsAll(backupSet)); + assertTrue(snapshotFiles.length > fileList.length); + } + + /* Make a new save directory. */ + File saveDir = new File(envHome, saveDirName); + assertTrue(saveDir.mkdir()); + + if (useMultiEnvDirs) { + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + new File(saveDir, "data00" + i).mkdir(); + } + } + + copyFiles(envHome, saveDir, fileList); + } + + private void copyFiles(File sourceDir, File destDir, String[] fileList) + throws DatabaseException { + + try { + for (int i = 0; i < fileList.length; i++) { + File source = new File(sourceDir, fileList[i]); + FileChannel sourceChannel = + new FileInputStream(source).getChannel(); + File save = new File(destDir, fileList[i]); + FileChannel saveChannel = + new FileOutputStream(save).getChannel(); + + saveChannel.transferFrom(sourceChannel, 0, + sourceChannel.size()); + + /* Close the channels. */ + sourceChannel.close(); + saveChannel.close(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Delete all the contents and the directory itself. + */ + private void deleteSaveDir(String saveDirName) { + if (useMultiEnvDirs) { + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + String saveSubdirName = + saveDirName + File.separator + "data00" + i; + deleteSaveDir1(saveSubdirName); + } + } + deleteSaveDir1(saveDirName); + } + + private void deleteSaveDir1(String saveDirName) { + File saveDir = new File(envHome, saveDirName); + if (saveDir.exists()) { + String[] savedFiles = saveDir.list(); + if (savedFiles != null) { + for (int i = 0; i < savedFiles.length; i++) { + File f = new File(saveDir, savedFiles[i]); + assertTrue(f.delete()); + } + + assertTrue(saveDir.delete()); + } + } + } + + /** + * Copy the saved files in, check values. + */ + private void verifyDb(String saveDirName, boolean rename) + throws DatabaseException { + + if (useMultiEnvDirs) { + for (int i = 1; i <= N_DATA_DIRS; i += 1) { + String saveSubdirName = File.separator + "data00" + i; + verifyDbPart1(saveDirName, saveSubdirName, rename); + } + verifyDbPart2(); + } else { + verifyDbPart1(saveDirName, "", rename); + verifyDbPart2(); + } + } + + private void verifyDbPart1(String saveDirName, + String subdirName, + boolean rename) + throws DatabaseException { + + File saveDir = new File(envHome, saveDirName + subdirName); + String[] savedFiles = saveDir.list(); + if (rename) { + for (int i = 0; i < savedFiles.length; i++) { + File saved = new File(saveDir, savedFiles[i]); + File dest = new File(envHome + subdirName, savedFiles[i]); + assertTrue(saved.renameTo(dest)); + } + } else { + /* copy. */ + copyFiles(saveDir, new File(envHome + subdirName), savedFiles); + } + } + + private void verifyDbPart2() + throws DatabaseException { + + env = createEnv(false, envHome); + checkDb("db1"); + + /* Db 2 should not exist. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + try { + @SuppressWarnings("unused") + Database db = env.openDatabase(null, "db2", dbConfig); + fail("db2 should not exist"); + } catch (DatabaseException expected) { + } + + env.close(); + env = null; + } + + /** + * Copy the saved files in, check values. + */ + private void verifyBothDbs(String saveDirName1, String saveDirName2) + throws DatabaseException { + + File saveDir = new File(envHome, saveDirName1); + String[] savedFiles = saveDir.list(); + for (int i = 0; i < savedFiles.length; i++) { + File saved = new File(saveDir, savedFiles[i]); + File dest = new File(envHome, savedFiles[i]); + assertTrue(saved.renameTo(dest)); + } + + if (useMultiEnvDirs) { + for (int j = 1; j <= N_DATA_DIRS; j += 1) { + String saveSubDirName2 = File.separator + "data00" + j; + + saveDir = new File(envHome, saveDirName2 + saveSubDirName2); + savedFiles = saveDir.list(); + for (int i = 0; i < savedFiles.length; i++) { + File saved = new File(saveDir, savedFiles[i]); + File dest = + new File(envHome + saveSubDirName2, savedFiles[i]); + assertTrue(saved.renameTo(dest)); + } + } + } else { + saveDir = new File(envHome, saveDirName2); + savedFiles = saveDir.list(); + for (int i = 0; i < savedFiles.length; i++) { + File saved = new File(saveDir, savedFiles[i]); + File dest = new File(envHome, savedFiles[i]); + assertTrue(saved.renameTo(dest)); + } + } + + env = createEnv(false, envHome); + checkDb("db1"); + checkDb("db2"); + env.close(); + env = null; + } + + private void checkDb(String dbName) + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + Database db = env.openDatabase(null, dbName, dbConfig); + Cursor c = null; + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + c = db.openCursor(null, null); + + for (int i = 0; i < NUM_RECS; i++) { + assertEquals(OperationStatus.SUCCESS, + c.getNext(key, data, LockMode.DEFAULT)); + assertEquals(i, IntegerBinding.entryToInt(key)); + } + assertEquals(OperationStatus.NOTFOUND, + c.getNext(key, data, LockMode.DEFAULT)); + } finally { + if (c != null) + c.close(); + db.close(); + } + } + + private void checkFileLen(long fileNum, long length) { + String fileName = fileManager.getFullFileName(fileNum, + FileManager.JE_SUFFIX); + File f = new File(fileName); + assertEquals(length, f.length()); + } + + /** + * Tests EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE [#22834]. + */ + @Test + public void testForceNewFile() { + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + envConfig.setAllowCreate(false); + + /* File is not flipped by default. */ + assertEquals(0, getLastFile()); + env.close(); + env = new Environment(envHome, envConfig); + assertEquals(0, getLastFile()); + env.close(); + env = null; + final long fileSize = getFileSize(0); + + /* File flips when ENV_RECOVERY_FORCE_NEW_FILE is true. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE, + "true"); + env = new Environment(envHome, envConfig); + assertEquals(1, getLastFile()); + env.close(); + env = null; + assertEquals(fileSize, getFileSize(0)); + + /* File does not flip when ENV_RECOVERY_FORCE_NEW_FILE is false. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RECOVERY_FORCE_NEW_FILE, + "false"); + env = new Environment(envHome, envConfig); + assertEquals(1, getLastFile()); + env.close(); + env = null; + assertEquals(fileSize, getFileSize(0)); + } + + private long getLastFile() { + return DbInternal.getNonNullEnvImpl(env). + getFileManager(). + getCurrentFileNum(); + } + + private long getFileSize(long fileNum) { + final File file = new File(envHome, FileManager.getFileName(fileNum)); + return file.length(); + } +} diff --git a/test/com/sleepycat/je/util/DbCacheSizeTest.java b/test/com/sleepycat/je/util/DbCacheSizeTest.java new file mode 100644 index 0000000..7ee3617 --- /dev/null +++ b/test/com/sleepycat/je/util/DbCacheSizeTest.java @@ -0,0 +1,270 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.util.test.TestBase; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Checks the DbCacheSize returns consistent results by comparing the + * calculated and measured values. If this test fails, it probably means the + * technique used by DbCacheSize for estimating or measuring has become + * outdated or incorrect. Or, it could indicate a bug in memory budget + * calculations or IN memory management. Try running DbCacheSize manually to + * debug, using the cmd string for the test that failed. + */ +@RunWith(Parameterized.class) +public class DbCacheSizeTest extends TestBase { + + private static final boolean VERBOSE = false; + + /* + * It is acceptable for the measured values to be somewhat different than + * the calculated values, due to differences in actual BIN density, for + * example. + */ + private static final double ERROR_ALLOWED = 0.08; + + static final String[] COMMANDS = { + // 0 + "-records 100000 -key 10 -data 100", + // 1 + "-records 100000 -key 10 -data 100 -orderedinsertion", + // 2 + "-records 100000 -key 10 -data 100 -duplicates", + // 3 + "-records 100000 -key 10 -data 100 -duplicates " + + "-orderedinsertion", + // 4 + "-records 100000 -key 10 -data 100 -nodemax 250", + // 5 + "-records 100000 -key 10 -data 100 -nodemax 250 " + + "-orderedinsertion", + // 6 + "-records 100000 -key 20 -data 100 -keyprefix 10", + // 7 + "-records 100000 -key 20 -data 100 -keyprefix 2 " + + "-je.tree.compactMaxKeyLength 19", + // 8 + "-records 100000 -key 10 -data 100 -replicated", + // 9 + "-records 100000 -key 10 -data 100 " + + "-replicated -je.rep.preserveRecordVersion true", + // 10 + "-records 100000 -key 10 -data 100 -duplicates " + + "-replicated -je.rep.preserveRecordVersion true", + // 11 + "-records 100000 -key 10 -data 100 -orderedinsertion " + + "-replicated -je.rep.preserveRecordVersion true", + // 12 + "-records 100000 -key 10 -data 100 -ttl", + // 13 + "-records 10000 -key 10 -data 20 " + + "-offheap -maincache 9000000", + // 14 + "-records 100000 -key 10 -data 100 " + + "-offheap -maincache 9000000", + // 15 + "-records 150000 -key 10 -data 100 " + + "-offheap -maincache 9000000", + // 16 + "-records 150000 -key 10 -data 100 -duplicates " + + "-offheap -maincache 9000000", + // 17 + "-records 10000 -key 10 -data 100 -duplicates " + + "-offheap -maincache 9000000", + // 18 + "-records 10000 -key 10 -data 20 " + + "-offheap", + // 19 + "-records 100000 -key 10 -data 100 " + + "-offheap", + // 20 + "-records 150000 -key 10 -data 100 " + + "-offheap", + // 21 + "-records 150000 -key 10 -data 100 -duplicates " + + "-offheap", + // 22 + "-records 10000 -key 10 -data 100 -duplicates " + + "-offheap", + }; + + /* + * We always use a large file size so that the LSN compact representation + * is not used. This representation is usually not effective for larger + * data sets, and is disabled by DbCacheSize except under certain + * conditions. In this test we use smallish data sets, so we use a large + * file size to ensure that the compact representation is not used. + */ + private int ONE_GB = 1024 * 1024 * 1024; + private final String ADD_COMMANDS = + "-measure -btreeinfo -je.log.fileMax " + ONE_GB; + + private String cmd; + private int testNum; + + @Parameters + public static List genParams() { + List list = new ArrayList(); +// if (true) { +// list.add(new Object[]{COMMANDS[22], 0}); +// return list; +// } + int i = 0; + for (String cmd : COMMANDS) { + list.add(new Object[]{cmd, i}); + i++; + } + + return list; + } + + public DbCacheSizeTest(String cmd, int testNum){ + this.cmd = cmd; + this.testNum = testNum; + customName = "-" + testNum; + + } + + @Test + public void testSize() { + + /* Get estimated cache sizes and measured sizes. */ + final String[] args = (cmd + " " + ADD_COMMANDS).split(" "); + DbCacheSize util = new DbCacheSize(); + try { + util.parseArgs(args); + util.calculateCacheSizes(); + if (VERBOSE) { + util.printCacheSizes(System.out); + } + util.measure(VERBOSE ? System.out : null); + } finally { + util.cleanup(); + } + + final boolean offHeap = cmd.contains("-offheap"); + + /* + * Check that calculated and measured sizes are within some error + * tolerance. + */ + check( + "mainNoLNsOrVLSNs", + util.getMainNoLNsOrVLSNs(), + util.getMeasuredMainNoLNsOrVLSNs(), + util.getMainNoLNsOrVLSNs() + util.getOffHeapNoLNsOrVLSNs()); + + if (offHeap) { + assertEquals(0, util.getMainNoLNsWithVLSNs()); + } else { + check( + "mainNoLNsWithVLSNs", + util.getMainNoLNsWithVLSNs(), + util.getMeasuredMainNoLNsWithVLSNs(), + util.getMainNoLNsWithVLSNs()); + } + + check( + "mainWithLNsAndVLSNs", + util.getMainWithLNsAndVLSNs(), + util.getMeasuredMainWithLNsAndVLSNs(), + util.getMainWithLNsAndVLSNs() + util.getOffHeapWithLNsAndVLSNs()); + + check( + "offHeapNoLNsOrVLSNs", + util.getOffHeapNoLNsOrVLSNs(), + util.getMeasuredOffHeapNoLNsOrVLSNs(), + util.getMainNoLNsOrVLSNs() + util.getOffHeapNoLNsOrVLSNs()); + + check( + "offHeapWithLNsAndVLSNs", + util.getOffHeapWithLNsAndVLSNs(), + util.getMeasuredOffHeapWithLNsAndVLSNs(), + util.getMainWithLNsAndVLSNs() + util.getOffHeapWithLNsAndVLSNs()); + + /* + * Do the same for the preloaded values, which is really a self-check + * to ensure that preload gives the same results. + */ + check( + "noLNsOrVLSNsPreload", + util.getMainNoLNsOrVLSNs(), + util.getPreloadMainNoLNsOrVLSNs(), + util.getMainNoLNsOrVLSNs() + util.getOffHeapNoLNsOrVLSNs()); + + if (offHeap) { + assertEquals(0, util.getPreloadMainNoLNsWithVLSNs()); + } else { + check( + "noLNsWithVLSNsPreload", + util.getMainNoLNsWithVLSNs(), + util.getPreloadMainNoLNsWithVLSNs(), + util.getMainNoLNsWithVLSNs()); + } + + check( + "withLNsAndVLSNsPreload", + util.getMainWithLNsAndVLSNs(), + util.getPreloadMainWithLNsAndVLSNs(), + util.getMainWithLNsAndVLSNs() + util.getOffHeapWithLNsAndVLSNs()); + } + + /** + * @param name the name of the property being checked. + * + * @param expected the expected value as computed by DbCacheSize. + * + * @param actual the actual value as computed by measuring with an + * actual data set. + * + * @param total the expected total of off-heap and main cache sizes. Used + * to determine whether the difference between the expected and actual + * values, when divided by the total, is under the allowed threshold. When + * the main cache holds everything, small differences may result in a small + * off-heap cache size, as compared to an expected zero size. If the + * difference were divided by the expected value, the error would be + * infinity. This illustrates why the error is calculated by dividing by + * the total. + */ + private void check(String name, + double expected, + double actual, + long total) { + + final double error = (Math.abs(expected - actual) / total); + + if (VERBOSE) { + System.out.format("%d %s Error %.2f %n", testNum, name, error); + } + + if (error > ERROR_ALLOWED) { + fail(String.format( + "%d %s Error allowed = %.2f but got = %.2f " + + "Value expected= %,.0f but got = %,.0f out of total %d %n", + testNum, name, ERROR_ALLOWED, error, expected, actual, total)); + } + } +} diff --git a/test/com/sleepycat/je/util/DbDeleteReservedFilesTest.java b/test/com/sleepycat/je/util/DbDeleteReservedFilesTest.java new file mode 100644 index 0000000..c8072ba --- /dev/null +++ b/test/com/sleepycat/je/util/DbDeleteReservedFilesTest.java @@ -0,0 +1,302 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeSet; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.Get; +import com.sleepycat.je.Put; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.impl.RepTestBase; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.Pair; + +import org.junit.Before; +import org.junit.Test; + +/** + * Create an env with reserved files, then deletes them with the + * DbDeleteReservedFiles utility. Checks that sizes and files deleted are as + * expected. + * + * Does this to each node in a rep group and checks that we can open + * the env after running the utility do writes and reads. When opening an + * env after deleting reserved files, the VLSNIndex is truncated + * automatically, and this is exercised by the test. + */ +public class DbDeleteReservedFilesTest extends RepTestBase { + + private static final long ONE_MB = 1L << 20; + private static final long FILE_SIZE = 5 * ONE_MB; + private static final int RECORDS = 100; + private static final int RESERVED_FILES = 5; + private static final long RESERVED_SIZE = (RESERVED_FILES + 1) * FILE_SIZE; + + public DbDeleteReservedFilesTest() { + super(); + } + + @Override + @Before + public void setUp() + throws Exception { + + groupSize = 3; + super.setUp(); + + for (int i = 0; i < groupSize; i += 1) { + final EnvironmentConfig envConfig = repEnvInfo[i].getEnvConfig(); + + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, String.valueOf(FILE_SIZE)); + } + } + + @Test + public void testDeleteReservedFiles() + throws Exception { + + RepTestUtils.joinGroup(repEnvInfo); + writeReservedFiles(); + + for (final RepEnvInfo info : repEnvInfo) { + + readData(info); + + final long reservedBytes = + info.getEnv().getStats(null).getReservedLogSize(); + + final long reservedFiles = + (reservedBytes + (FILE_SIZE / 2)) / FILE_SIZE; + + assertTrue(reservedFiles >= RESERVED_FILES); + + RepTestUtils.shutdownRepEnvs(info); + + /* Delete 1 file, first file remaining is 0x1. */ + deleteReservedFiles(info, 1, 1); + + /* Delete 2 files, first file remaining is 0x3. */ + deleteReservedFiles(info, 3, 2); + + /* + * Delete remaining reserved files, first file remaining is the + * first non-reserved file. + */ + deleteReservedFiles(info, reservedFiles, reservedFiles - 3); + + /* + * Try reading after joining group. This will truncate the + * VLSNIndex. + */ + RepTestUtils.joinGroup(repEnvInfo); + readData(info); + } + + /* + * Make sure we can still write/read in this group. + */ + writeReservedFiles(); + + for (final RepEnvInfo info : repEnvInfo) { + readData(info); + } + } + + private void deleteReservedFiles(final RepEnvInfo info, + final long firstFileRemaining, + final long numFilesToDelete) + throws Exception { + + SortedSet prevFiles = listDataFiles(info); + + final long deleteBytes = + (numFilesToDelete * FILE_SIZE) - (FILE_SIZE / 2); + + final long deleteMb = deleteBytes / ONE_MB; + + final ArrayList args = new ArrayList<>(); + args.add("-h"); + args.add(info.getEnvHome().getAbsolutePath()); + args.add("-s"); + args.add(String.valueOf(deleteMb)); + + /* + * First just list the files to be deleted. + */ + args.add("-l"); + + DbDeleteReservedFiles util = + new DbDeleteReservedFiles(args.toArray(new String[0])); + + Pair> result = util.execute(); + + prevFiles = verifyResult( + info, prevFiles, numFilesToDelete, deleteMb, + result.first(), (SortedSet) result.second().keySet(), + firstFileRemaining, false); + + /* + * Now delete the files -- remove the -l param. + */ + args.remove(args.size() - 1); + + util = new DbDeleteReservedFiles(args.toArray(new String[0])); + + result = util.execute(); + + verifyResult( + info, prevFiles, numFilesToDelete, deleteMb, + result.first(), (SortedSet) result.second().keySet(), + firstFileRemaining, true); + } + + private SortedSet verifyResult( + final RepEnvInfo info, + final SortedSet prevFiles, + final long numFilesToDelete, + final long deleteMb, + final long reportedDb, + final SortedSet filesToDelete, + final long firstFileRemaining, + final boolean expectDeleted) { + + final SortedSet currFiles = listDataFiles(info); + + if (expectDeleted) { + + final File firstFile = + new File( + info.getEnvHome(), + FileManager.getFileName(firstFileRemaining)). + getAbsoluteFile(); + + assertEquals(firstFile, currFiles.first()); + } + + assertTrue( + "prevFiles=" + prevFiles + " filesToDelete=" + filesToDelete, + prevFiles.containsAll(filesToDelete)); + + assertEquals(numFilesToDelete, filesToDelete.size()); + + if (!expectDeleted) { + long size = 0; + for (final File file : filesToDelete) { + size += file.length(); + } + size /= ONE_MB; + assertEquals(size, reportedDb); + assertTrue(size >= deleteMb); + } + + if (expectDeleted) { + prevFiles.removeAll(filesToDelete); + assertEquals(prevFiles, currFiles); + } else { + assertEquals(prevFiles, currFiles); + } + + + return currFiles; + } + + private SortedSet listDataFiles(final RepEnvInfo info) { + + final File dir = info.getEnvHome(); + + final String[] names = FileManager.listFiles( + info.getEnvHome(), + new String[] { FileManager.JE_SUFFIX }, + false); + + final SortedSet set = new TreeSet<>(); + + for (final String name : names) { + final File file = new File(dir, name).getAbsoluteFile(); + set.add(file); + } + + return set; + } + + private void writeReservedFiles() { + + final ReplicatedEnvironment master = + RepTestUtils.getMaster(repEnvInfo, false); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(new byte[1024]); + + final Database db = master.openDatabase( + null, TEST_DB_NAME, + new DatabaseConfig().setTransactional(true).setAllowCreate(true)); + + while (true) { + for (int i = 0; i < RECORDS; i += 1) { + IntegerBinding.intToEntry(i, key); + db.put(null, key, data, Put.OVERWRITE, null); + } + final EnvironmentStats stats = master.getStats(null); + if (stats.getReservedLogSize() >= RESERVED_SIZE) { + break; + } + } + + db.close(); + RepTestUtils.syncGroup(repEnvInfo); + } + + private void readData(final RepEnvInfo info) { + + final ReplicatedEnvironment env = info.getEnv(); + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + final Database db = env.openDatabase( + null, TEST_DB_NAME, + new DatabaseConfig().setTransactional(true)); + + int records = 0; + try (final Cursor cursor = db.openCursor(null, null)) { + while (cursor.get(key, data, Get.NEXT, null) != null) { + assertEquals(records, IntegerBinding.entryToInt(key)); + assertEquals(1024, data.getSize()); + records += 1; + } + } + + assertEquals(RECORDS, records); + db.close(); + } +} diff --git a/test/com/sleepycat/je/util/DbDumpTest.java b/test/com/sleepycat/je/util/DbDumpTest.java new file mode 100644 index 0000000..872c950 --- /dev/null +++ b/test/com/sleepycat/je/util/DbDumpTest.java @@ -0,0 +1,277 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.util.Hashtable; + +import org.junit.Test; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.tree.Key; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class DbDumpTest extends TestBase { + + private final File envHome; + + private static final int N_KEYS = 100; + private static final int N_KEY_BYTES = 1000; + private static final String dbName = "testDB"; + + private Environment env; + + public DbDumpTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /** + * A simple test to check if JE's dump format matches Core. + */ + @Test + public void testMatchCore() + throws Throwable { + + try { + /* Set up a new environment. */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + /* + * Make a stream holding a small dump in a format known to be + * the same as Core DB. + */ + ByteArrayOutputStream dumpInfo = new ByteArrayOutputStream(); + PrintStream dumpStream = new PrintStream(dumpInfo); + dumpStream.println("VERSION=3"); + dumpStream.println("format=print"); + dumpStream.println("type=btree"); + dumpStream.println("dupsort=0"); + dumpStream.println("HEADER=END"); + dumpStream.println(" abc"); + dumpStream.println(" firstLetters"); + dumpStream.println(" xyz"); + dumpStream.println(" lastLetters"); + dumpStream.println("DATA=END"); + + /* load it */ + DbLoad loader = new DbLoad(); + loader.setEnv(env); + loader.setInputReader(new BufferedReader(new InputStreamReader + (new ByteArrayInputStream(dumpInfo.toByteArray())))); + loader.setNoOverwrite(false); + loader.setDbName("foobar"); + loader.load(); + + /* Make sure we retrieve the expected data. */ + Database checkDb = env.openDatabase(null, "foobar", null); + Cursor cursor = checkDb.openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + assertEquals("abc", StringUtils.fromUTF8(key.getData())); + assertEquals("firstLetters", StringUtils.fromUTF8(data.getData())); + assertEquals(OperationStatus.SUCCESS, + cursor.getNext(key, data, LockMode.DEFAULT)); + assertEquals("xyz", StringUtils.fromUTF8(key.getData())); + assertEquals("lastLetters", StringUtils.fromUTF8(data.getData())); + assertEquals(OperationStatus.NOTFOUND, + cursor.getNext(key, data, LockMode.DEFAULT)); + cursor.close(); + checkDb.close(); + + /* Check that a dump of the database matches the input file. */ + ByteArrayOutputStream dump2 = new ByteArrayOutputStream(); + DbDump dumper2 = new DbDump(env, "foobar", + new PrintStream(dump2), true); + dumper2.dump(); + assertEquals(dump2.toString(), dumpInfo.toString()); + + env.close(); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDumpLoadBinary() + throws Throwable { + + try { + doDumpLoadTest(false, 1); + } catch (Throwable t) { + t.printStackTrace(); + throw t; + } + } + + @Test + public void testDumpLoadPrintable() + throws IOException, DatabaseException { + + doDumpLoadTest(true, 1); + } + + @Test + public void testDumpLoadTwo() + throws IOException, DatabaseException { + + doDumpLoadTest(false, 2); + } + + @Test + public void testDumpLoadThree() + throws IOException, DatabaseException { + + doDumpLoadTest(true, 3); + } + + private void doDumpLoadTest(boolean printable, int nDumps) + throws IOException, DatabaseException { + + Hashtable[] dataMaps = new Hashtable[nDumps]; + for (int i = 0; i < nDumps; i += 1) { + dataMaps[i] = new Hashtable(); + } + initDbs(nDumps, dataMaps); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(baos); + for (int i = 0; i < nDumps; i += 1) { + DbDump dumper = + new DbDump(env, dbName + i, out, printable); + dumper.dump(); + } + byte[] baosba = baos.toByteArray(); + BufferedReader rdr = new BufferedReader + (new InputStreamReader(new ByteArrayInputStream(baosba))); + for (int i = 0; i < nDumps; i += 1) { + DbLoad loader = new DbLoad(); + loader.setEnv(env); + loader.setInputReader(rdr); + loader.setNoOverwrite(false); + loader.setDbName(dbName + i); + loader.load(); + verifyDb(dataMaps[i], i); + } + + ByteArrayOutputStream baos2 = new ByteArrayOutputStream(); + PrintStream out2 = new PrintStream(baos2); + for (int i = 0; i < nDumps; i += 1) { + DbDump dumper2 = + new DbDump(env, dbName + i, out2, printable); + dumper2.dump(); + } + assertEquals(0, Key.compareKeys(baosba, baos2.toByteArray(), null)); + + env.close(); + } + + /** + * Set up the environment and db. + */ + private void initDbs(int nDumps, Hashtable[] dataMaps) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + /* Make a db and open it. */ + for (int i = 0; i < nDumps; i += 1) { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(true); + Database myDb = env.openDatabase(null, dbName + i, dbConfig); + Cursor cursor = myDb.openCursor(null, null); + doLargePut(dataMaps[i], cursor, N_KEYS); + cursor.close(); + myDb.close(); + } + } + + private void verifyDb(Hashtable dataMap, int dumpIndex) + throws DatabaseException { + + DatabaseConfig config = new DatabaseConfig(); + config.setReadOnly(true); + DbInternal.setUseExistingConfig(config, true); + Database myDb = env.openDatabase(null, dbName + dumpIndex, config); + Cursor cursor = myDb.openCursor(null, null); + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + OperationStatus status = + cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + while (status == OperationStatus.SUCCESS) { + String foundKeyString = foundKey.getString(); + String foundDataString = foundData.getString(); + if (dataMap.get(foundKeyString) != null) { + assertTrue((dataMap.get(foundKeyString)). + equals(foundDataString)); + dataMap.remove(foundKeyString); + } else { + fail("didn't find key in either map (" + + foundKeyString + + ")"); + } + status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + } + assertTrue(dataMap.size() == 0); + cursor.close(); + myDb.close(); + } + + private void doLargePut(Hashtable dataMap, Cursor cursor, int nKeys) + throws DatabaseException { + + for (int i = 0; i < nKeys; i++) { + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + String dataString = Integer.toString(i); + OperationStatus status = + cursor.put(new StringDbt(key), + new StringDbt(dataString)); + assertEquals(OperationStatus.SUCCESS, status); + if (dataMap != null) { + dataMap.put(keyString, dataString); + } + } + } +} diff --git a/test/com/sleepycat/je/util/DbLsnTest.java b/test/com/sleepycat/je/util/DbLsnTest.java new file mode 100644 index 0000000..aa4313e --- /dev/null +++ b/test/com/sleepycat/je/util/DbLsnTest.java @@ -0,0 +1,274 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class DbLsnTest extends TestBase { + long[] values = { 0xFF, 0xFFFF, 0xFFFFFF, 0x7FFFFFFF, 0xFFFFFFFFL }; + + @Test + public void testDbLsn() { + for (int i = 0; i < values.length; i++) { + long value = values[i]; + long lsn = DbLsn.makeLsn(value, value); + assertTrue((DbLsn.getFileNumber(lsn) == value) && + (DbLsn.getFileOffset(lsn) == value)); + } + } + + @Test + public void testComparableEquality() { + /* Test equality */ + + /* Don't bother with last values[] entry -- it makes NULL_LSN. */ + int lastValue = values.length - 1; + for (int i = 0; i < lastValue; i++) { + long value = values[i]; + long lsn1 = DbLsn.makeLsn(value, value); + long lsn2 = DbLsn.makeLsn(value, value); + assertTrue(DbLsn.compareTo(lsn1, lsn2) == 0); + } + + /* Check NULL_LSN. */ + assertTrue(DbLsn.makeLsn(values[lastValue], + values[lastValue]) == + DbLsn.makeLsn(values[lastValue], + values[lastValue])); + } + + @Test + public void testComparableException() { + /* Check that compareTo throws EnvironmentFailureException */ + + try { + long lsn1 = DbLsn.makeLsn(0, 0); + DbLsn.compareTo(lsn1, DbLsn.NULL_LSN); + fail("compareTo(null) didn't throw EnvironmentFailureException"); + } catch (EnvironmentFailureException expected) { + } + + try { + long lsn1 = DbLsn.makeLsn(0, 0); + DbLsn.compareTo(DbLsn.NULL_LSN, lsn1); + fail("compareTo(null) didn't throw EnvironmentFailureException"); + } catch (EnvironmentFailureException expected) { + } + } + + @Test + public void testComparableInequalityFileNumber() { + /* Check for inequality in the file number */ + + /* Don't bother with last values[] entry -- it makes NULL_LSN. */ + int lastValue = values.length - 1; + for (int i = 0; i < lastValue; i++) { + long value = values[i]; + long lsn1 = DbLsn.makeLsn(value, value); + long lsn2 = DbLsn.makeLsn(0, value); + assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1); + assertTrue(DbLsn.compareTo(lsn2, lsn1) == -1); + } + + /* Check against NULL_LSN. */ + long lsn1 = DbLsn.makeLsn(values[lastValue], values[lastValue]); + long lsn2 = DbLsn.makeLsn(0, values[lastValue]); + try { + assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1); + } catch (EnvironmentFailureException expected) { + } + + try { + assertTrue(DbLsn.compareTo(lsn2, lsn1) == 1); + } catch (EnvironmentFailureException expected) { + } + } + + @Test + public void testComparableInequalityFileOffset() { + /* Check for inequality in the file offset */ + + for (int i = 0; i < values.length - 1; i++) { + long value = values[i]; + long lsn1 = DbLsn.makeLsn(value, value); + long lsn2 = DbLsn.makeLsn(value, 0); + /* Can't compareTo(NULL_LSN). */ + if (lsn1 != DbLsn.NULL_LSN && + lsn2 != DbLsn.NULL_LSN) { + assertTrue(DbLsn.compareTo(lsn1, lsn2) == 1); + assertTrue(DbLsn.compareTo(lsn2, lsn1) == -1); + } + } + } + + @Test + public void testNoCleaningDistance() { + long a = DbLsn.makeLsn(1, 10); + long b = DbLsn.makeLsn(3, 40); + assertEquals(230, DbLsn.getNoCleaningDistance(b, a, 100)); + assertEquals(230, DbLsn.getNoCleaningDistance(a, b, 100)); + + long c = DbLsn.makeLsn(1, 50); + assertEquals(40, DbLsn.getNoCleaningDistance(a, c, 100)); + assertEquals(40, DbLsn.getNoCleaningDistance(c, a, 100)); + } + + @Test + public void testWithCleaningDistance() + throws Exception { + + /* Try with non-consecutive files (due to cleaning). */ + + final File envHome = SharedTestUtils.getTestDir(); + TestUtils.removeLogFiles("testWithCleaningDistance", envHome, false); + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + Environment env = new Environment(envHome, envConfig); + + try { + final FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + + createFile(fileManager, 1L, 0); + createFile(fileManager, 3L, 0); + + final long a = DbLsn.makeLsn(1, 10); + final long b = DbLsn.makeLsn(3, 40); + + assertEquals( + 130, + DbLsn.getWithCleaningDistance(b, a, 100, fileManager)); + + assertEquals( + 130, + DbLsn.getWithCleaningDistance(a, b, 100, fileManager)); + + final long c = DbLsn.makeLsn(1, 50); + + assertEquals( + 40, + DbLsn.getWithCleaningDistance(a, c, 100, fileManager)); + + assertEquals( + 40, + DbLsn.getWithCleaningDistance(c, a, 100, fileManager)); + + env.close(); + env = null; + } finally { + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + /* Ignore this. Another exception is in flight. */ + } + } + } + } + + @Test + public void testTrueDistance() + throws Exception { + + /* Try with non-consecutive files (due to cleaning). */ + + final File envHome = SharedTestUtils.getTestDir(); + TestUtils.removeLogFiles("testTrueDistance", envHome, false); + + final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + + Environment env = new Environment(envHome, envConfig); + + try { + final FileManager fileManager = + DbInternal.getNonNullEnvImpl(env).getFileManager(); + + createFile(fileManager, 1L, 100); + createFile(fileManager, 3L, 300); + createFile(fileManager, 5L, 500); + + final long a = DbLsn.makeLsn(1, 10); + final long b = DbLsn.makeLsn(1, 50); + final long c = DbLsn.makeLsn(3, 70); + final long d = DbLsn.makeLsn(5, 90); + + expectTrueDistance(0, a, a, fileManager); + expectTrueDistance(0, b, b, fileManager); + expectTrueDistance(0, c, c, fileManager); + expectTrueDistance(0, d, d, fileManager); + + expectTrueDistance(40, a, b, fileManager); + expectTrueDistance(40, b, a, fileManager); + + expectTrueDistance(160, a, c, fileManager); + expectTrueDistance(480, a, d, fileManager); + expectTrueDistance(120, b, c, fileManager); + expectTrueDistance(440, b, d, fileManager); + expectTrueDistance(320, c, d, fileManager); + + env.close(); + env = null; + } finally { + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + /* Ignore this. Another exception is in flight. */ + } + } + } + } + + private void expectTrueDistance(final long expectValue, + final long lsn1, + final long lsn2, + final FileManager fileManager) { + assertEquals( + expectValue, + DbLsn.getTrueDistance(lsn1, lsn2, fileManager)); + } + + private void createFile(final FileManager fileManager, + final long fileNum, + final int length) + throws IOException { + + final String path = fileManager.getFullFileName(fileNum); + final File file = new File(path); + file.createNewFile(); + try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) { + raf.setLength(length); + } + } +} diff --git a/test/com/sleepycat/je/util/DbScavengerTest.java b/test/com/sleepycat/je/util/DbScavengerTest.java new file mode 100644 index 0000000..301fe18 --- /dev/null +++ b/test/com/sleepycat/je/util/DbScavengerTest.java @@ -0,0 +1,570 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.RandomAccessFile; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.StringTokenizer; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DatabaseNotFoundException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.utilint.StringUtils; + +public class DbScavengerTest extends TestBase { + + private static final int TRANSACTIONAL = 1 << 0; + private static final int WRITE_MULTIPLE = 1 << 1; + private static final int PRINTABLE = 1 << 2; + private static final int ABORT_BEFORE = 1 << 3; + private static final int ABORT_AFTER = 1 << 4; + private static final int CORRUPT_LOG = 1 << 5; + private static final int DELETE_DATA = 1 << 6; + private static final int AGGRESSIVE = 1 << 7; + + private static final int N_DBS = 3; + private static final int N_KEYS = 100; + private static final int N_DATA_BYTES = 100; + private static final int LOG_SIZE = 10000; + + private final String envHomeName; + private final File envHome; + + private Environment env; + + private Database[] dbs = new Database[N_DBS]; + + private final boolean duplicatesAllowed = true; + + public DbScavengerTest() { + envHome = SharedTestUtils.getTestDir(); + envHomeName = envHome.getAbsolutePath(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("TearDown: " + e); + } + env = null; + } + } + + @Test + public void testScavenger1() { + doScavengerTest(PRINTABLE | TRANSACTIONAL | + ABORT_BEFORE | ABORT_AFTER); + } + + @Test + public void testScavenger2() { + doScavengerTest(PRINTABLE | TRANSACTIONAL | ABORT_BEFORE); + } + + @Test + public void testScavenger3() { + doScavengerTest(PRINTABLE | TRANSACTIONAL | ABORT_AFTER); + } + + @Test + public void testScavenger4() { + doScavengerTest(PRINTABLE | TRANSACTIONAL); + } + + @Test + public void testScavenger5() { + doScavengerTest(PRINTABLE | WRITE_MULTIPLE | TRANSACTIONAL); + } + + @Test + public void testScavenger6() { + doScavengerTest(PRINTABLE); + } + + @Test + public void testScavenger7() { + doScavengerTest(TRANSACTIONAL | ABORT_BEFORE | ABORT_AFTER); + } + + @Test + public void testScavenger8() { + doScavengerTest(TRANSACTIONAL | ABORT_BEFORE); + } + + @Test + public void testScavenger9() { + doScavengerTest(TRANSACTIONAL); + } + + @Test + public void testScavenger10() { + doScavengerTest(TRANSACTIONAL | ABORT_AFTER); + } + + @Test + public void testScavenger11() { + doScavengerTest(0); + } + + @Test + public void testScavenger12() { + doScavengerTest(CORRUPT_LOG); + } + + @Test + public void testScavenger13() { + doScavengerTest(DELETE_DATA); + } + + @Test + public void testScavenger14() { + doScavengerTest(AGGRESSIVE); + } + + @Test + public void testScavengerAbortedDbLevelOperations() { + createEnv(true, true, false); + boolean doAbort = true; + byte[] dataBytes = new byte[N_DATA_BYTES]; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(dataBytes); + IntegerBinding.intToEntry(1, key); + TestUtils.generateRandomAlphaBytes(dataBytes); + for (int i = 0; i < 2; i++) { + Transaction txn = env.beginTransaction(null, null); + for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) { + String databaseName = null; + if (doAbort) { + databaseName = "abortedDb" + dbCnt; + } else { + databaseName = "simpleDb" + dbCnt; + } + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setSortedDuplicates(duplicatesAllowed); + dbConfig.setTransactional(true); + if (dbs[dbCnt] != null) { + throw new RuntimeException("database already open"); + } + Database db = + env.openDatabase(txn, databaseName, dbConfig); + dbs[dbCnt] = db; + db.put(txn, key, data); + } + if (doAbort) { + txn.abort(); + dbs = new Database[N_DBS]; + } else { + txn.commit(); + } + doAbort = !doAbort; + } + + closeEnv(); + createEnv(false, false, false); + openDbs(false, false, duplicatesAllowed, null); + dumpDbs(false, false); + + /* Close the environment, delete it completely from the disk. */ + closeEnv(); + TestUtils.removeLogFiles("doScavengerTest", envHome, false); + + /* Recreate and reload the environment from the scavenger files. */ + createEnv(true, true, false); + loadDbs(); + + /* Verify that the data is the same as when it was created. */ + for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) { + String databaseName = "abortedDb" + dbCnt; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(false); + try { + env.openDatabase(null, databaseName, dbConfig); + fail("expected DatabaseNotFoundException"); + } catch (DatabaseNotFoundException DNFE) { + /* Expected. */ + } + } + closeEnv(); + } + + private void doScavengerTest(int config) + throws DatabaseException { + + boolean printable = (config & PRINTABLE) != 0; + boolean transactional = (config & TRANSACTIONAL) != 0; + boolean writeMultiple = (config & WRITE_MULTIPLE) != 0; + boolean abortBefore = (config & ABORT_BEFORE) != 0; + boolean abortAfter = (config & ABORT_AFTER) != 0; + boolean corruptLog = (config & CORRUPT_LOG) != 0; + boolean deleteData = (config & DELETE_DATA) != 0; + boolean aggressive = (config & AGGRESSIVE) != 0; + + assert transactional || + (!abortBefore && !abortAfter); + + Map[] dataMaps = new Map[N_DBS]; + Set lsnsToCorrupt = new HashSet(); + /* Create the environment and some data. */ + createEnvAndDbs(dataMaps, + writeMultiple, + transactional, + abortBefore, + abortAfter, + corruptLog, + lsnsToCorrupt, + deleteData); + closeEnv(); + createEnv(false, false, corruptLog); + if (corruptLog) { + corruptFiles(lsnsToCorrupt); + } + openDbs(false, false, duplicatesAllowed, null); + dumpDbs(printable, aggressive); + + /* Close the environment, delete it completely from the disk. */ + closeEnv(); + TestUtils.removeLogFiles("doScavengerTest", envHome, false); + + /* Recreate the environment and load it from the scavenger files. */ + createEnv(true, transactional, corruptLog); + loadDbs(); + + /* Verify that the data is the same as when it was created. */ + openDbs(false, false, duplicatesAllowed, null); + verifyDbs(dataMaps); + closeEnv(); + } + + private void closeEnv() + throws DatabaseException { + + for (int i = 0; i < N_DBS; i++) { + if (dbs[i] != null) { + dbs[i].close(); + dbs[i] = null; + } + } + + env.close(); + env = null; + } + + private void createEnv( + boolean create, + boolean transactional, + boolean corruptLog) + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + DbInternal.disableParameterValidation(envConfig); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CHECKPOINTER.getName(), "false"); + envConfig.setConfigParam( + EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setConfigParam( + EnvironmentParams.LOG_FILE_MAX.getName(), "" + LOG_SIZE); + envConfig.setTransactional(transactional); + envConfig.setAllowCreate(create); + if (corruptLog) { + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + } + env = new Environment(envHome, envConfig); + } + + private void createEnvAndDbs(Map[] dataMaps, + boolean writeMultiple, + boolean transactional, + boolean abortBefore, + boolean abortAfter, + boolean corruptLog, + Set lsnsToCorrupt, + boolean deleteData) + throws DatabaseException { + + createEnv(true, transactional, corruptLog); + Transaction txn = null; + if (transactional) { + txn = env.beginTransaction(null, null); + } + + openDbs(true, transactional, duplicatesAllowed, txn); + + if (transactional) { + txn.commit(); + } + + long lastCorruptedFile = -1; + for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) { + Map dataMap = new HashMap(); + dataMaps[dbCnt] = dataMap; + Database db = dbs[dbCnt]; + + for (int i = 0; i < N_KEYS; i++) { + byte[] dataBytes = new byte[N_DATA_BYTES]; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(dataBytes); + IntegerBinding.intToEntry(i, key); + TestUtils.generateRandomAlphaBytes(dataBytes); + + boolean corruptedThisEntry = false; + + if (transactional) { + txn = env.beginTransaction(null, null); + } + + if (transactional && + abortBefore) { + assertEquals(OperationStatus.SUCCESS, + db.put(txn, key, data)); + txn.abort(); + txn = env.beginTransaction(null, null); + } + + assertEquals(OperationStatus.SUCCESS, + db.put(txn, key, data)); + if (corruptLog) { + long currentLsn = getLastLsn(); + long fileNumber = DbLsn.getFileNumber(currentLsn); + long fileOffset = DbLsn.getFileOffset(currentLsn); + if (fileOffset > (LOG_SIZE >> 1) && + /* We're writing in the second half of the file. */ + fileNumber > lastCorruptedFile) { + /* Corrupt this file. */ + lsnsToCorrupt.add(new Long(currentLsn)); + lastCorruptedFile = fileNumber; + corruptedThisEntry = true; + } + } + + if (writeMultiple) { + assertEquals(OperationStatus.SUCCESS, + db.delete(txn, key)); + assertEquals(OperationStatus.SUCCESS, + db.put(txn, key, data)); + } + + if (deleteData) { + assertEquals(OperationStatus.SUCCESS, + db.delete(txn, key)); + /* overload this for deleted data. */ + corruptedThisEntry = true; + } + + if (!corruptedThisEntry) { + dataMap.put(new Integer(i), + StringUtils.fromUTF8(dataBytes)); + } + + if (transactional) { + txn.commit(); + } + + if (transactional && + abortAfter) { + txn = env.beginTransaction(null, null); + assertEquals(OperationStatus.SUCCESS, + db.put(txn, key, data)); + txn.abort(); + } + } + } + } + + private void openDbs(boolean create, + boolean transactional, + boolean duplicatesAllowed, + Transaction txn) + throws DatabaseException { + + for (int dbCnt = 0; dbCnt < N_DBS; dbCnt++) { + String databaseName = "simpleDb" + dbCnt; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(create); + dbConfig.setSortedDuplicates(duplicatesAllowed); + dbConfig.setTransactional(transactional); + if (dbs[dbCnt] != null) { + throw new RuntimeException("database already open"); + } + dbs[dbCnt] = env.openDatabase(txn, databaseName, dbConfig); + } + } + + private void dumpDbs(boolean printable, boolean aggressive) + throws DatabaseException { + + try { + DbScavenger scavenger = + new DbScavenger(env, envHomeName, printable, aggressive, + false /* verbose */); + scavenger.dump(); + } catch (IOException IOE) { + throw new RuntimeException(IOE); + } + } + + private void loadDbs() + throws DatabaseException { + + try { + String dbNameBase = "simpleDb"; + for (int i = 0; i < N_DBS; i++) { + DbLoad loader = new DbLoad(); + File file = new File(envHomeName, dbNameBase + i + ".dump"); + FileInputStream is = new FileInputStream(file); + BufferedReader reader = + new BufferedReader(new InputStreamReader(is)); + loader.setEnv(env); + loader.setInputReader(reader); + loader.setNoOverwrite(false); + loader.setDbName(dbNameBase + i); + loader.load(); + is.close(); + } + } catch (IOException IOE) { + throw new RuntimeException(IOE); + } + } + + private void verifyDbs(Map[] dataMaps) + throws DatabaseException { + + for (int i = 0; i < N_DBS; i++) { + Map dataMap = dataMaps[i]; + Cursor cursor = dbs[i].openCursor(null, null); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + while (cursor.getNext(key, data, null) == + OperationStatus.SUCCESS) { + Integer keyInt = + new Integer(IntegerBinding.entryToInt(key)); + String databaseString = StringUtils.fromUTF8(data.getData()); + String originalString = (String) dataMap.get(keyInt); + if (originalString == null) { + fail("couldn't find " + keyInt); + } else if (databaseString.equals(originalString)) { + dataMap.remove(keyInt); + } else { + fail(" Mismatch: key=" + keyInt + + " Expected: " + originalString + + " Found: " + databaseString); + } + } + + if (dataMap.size() > 0) { + fail("entries still remain for db " + i + ": " + + dataMap.keySet()); + } + + cursor.close(); + } + } + + private static DumpFileFilter dumpFileFilter = new DumpFileFilter(); + + static class DumpFileFilter implements FilenameFilter { + + /** + * Accept files of this format: + * *.dump + */ + public boolean accept(File dir, String name) { + StringTokenizer tokenizer = new StringTokenizer(name, "."); + /* There should be two parts. */ + if (tokenizer.countTokens() == 2) { + tokenizer.nextToken(); + String fileSuffix = tokenizer.nextToken(); + + /* Check the length and the suffix. */ + if (fileSuffix.equals("dump")) { + return true; + } + } + + return false; + } + } + + private long getLastLsn() { + return DbInternal.getNonNullEnvImpl(env). + getFileManager().getLastUsedLsn(); + } + + private void corruptFiles(Set lsnsToCorrupt) + throws DatabaseException { + + Iterator iter = lsnsToCorrupt.iterator(); + while (iter.hasNext()) { + long lsn = iter.next().longValue(); + corruptFile(DbLsn.getFileNumber(lsn), + DbLsn.getFileOffset(lsn)); + } + } + + private void corruptFile(long fileNumber, long fileOffset) + throws DatabaseException { + + String fileName = DbInternal.getNonNullEnvImpl(env). + getFileManager().getFullFileName(fileNumber, + FileManager.JE_SUFFIX); + /* + System.out.println("corrupting 1 byte at " + + DbLsn.makeLsn(fileNumber, fileOffset)); + */ + try { + RandomAccessFile raf = new RandomAccessFile(fileName, "rw"); + raf.seek(fileOffset); + int current = raf.read(); + raf.seek(fileOffset); + raf.write(current + 1); + raf.close(); + } catch (IOException IOE) { + throw new RuntimeException(IOE); + } + } +} diff --git a/test/com/sleepycat/je/util/DebugRecordTest.java b/test/com/sleepycat/je/util/DebugRecordTest.java new file mode 100644 index 0000000..47e6bc1 --- /dev/null +++ b/test/com/sleepycat/je/util/DebugRecordTest.java @@ -0,0 +1,230 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.text.DateFormat; +import java.text.ParsePosition; +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; +import java.util.logging.Level; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.log.SearchFileReader; +import com.sleepycat.je.log.Trace; +import com.sleepycat.je.utilint.DbLsn; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TracerFormatter; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * This test originally ran in dual mode. After changes were made that started + * making replication recovery run an different path, this expected entries + * in the log began to differ substantially enough between a replicated + * and non-replicated environment, and the dual mode version was removed. + * If more test cases are eventually added to this test, we may want to + * return to dual mode. + */ +public class DebugRecordTest extends DualTestCase { + private File envHome; + private Environment env; + + public DebugRecordTest() { + envHome = SharedTestUtils.getTestDir(); + env = null; + } + + @Test + public void testDebugLogging() + throws DatabaseException, IOException { + + try { + + /* + * Turn on the txt file and db log logging, turn off the console. + */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setConfigParam + (EnvironmentParams.NODE_MAX.getName(), "6"); + envConfig.setAllowCreate(true); + /* Disable noisy cleaner DB creation. */ + DbInternal.setCreateUP(envConfig, false); + DbInternal.setCreateEP(envConfig, false); + /* Don't run the cleaner without a UtilizationProfile. */ + envConfig.setConfigParam + (EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); + envConfig.setTransactional(true); + + env = create(envHome, envConfig); + + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + List expectedDbLogRecords = new ArrayList(); + List expectedFileRecords = new ArrayList(); + + /* Log a message. */ + Trace.trace(envImpl, "hi there"); + expectedDbLogRecords.add(new Trace("hi there")); + + /* + * Log an exception. The je.info file defaults to SEVERE, and will + * only hold exceptions. + */ + RuntimeException e = new RuntimeException("fake exception"); + LoggerUtils.traceAndLogException(envImpl, "DebugRecordTest", + "testException", "foo", e); + Trace exceptionTrace = new Trace("foo\n" + + LoggerUtils.getStackTrace(e)); + expectedDbLogRecords.add(exceptionTrace); + + /* Log a split and flush the log to disk. */ + envImpl.getLogManager().flushSync(); + envImpl.getFileManager().clear(); + + /* Verify. */ + checkDatabaseLog(expectedDbLogRecords); + checkTextFile(expectedFileRecords); + + } finally { + if (env != null) { + close(env); + } + } + } + + /** + * Check what's in the database log. + */ + private void checkDatabaseLog(List expectedList) + throws DatabaseException { + + SearchFileReader searcher = + new SearchFileReader(DbInternal.getNonNullEnvImpl(env), + 1000, true, DbLsn.NULL_LSN, + DbLsn.NULL_LSN, LogEntryType.LOG_TRACE); + + int numSeen = 0; + while (searcher.readNextEntry()) { + Trace dRec = (Trace) searcher.getLastObject(); + assertEquals("Should see this as " + numSeen + " record: ", + expectedList.get(numSeen).getMessage(), + dRec.getMessage()); + numSeen++; + } + + assertEquals("Should see this many debug records", + expectedList.size(), numSeen); + } + + /** + * Check what's in the text file. + */ + private void checkTextFile(List expectedList) + throws IOException { + + FileReader fr = null; + BufferedReader br = null; + try { + String textFileName = + DbInternal.getNonNullEnvImpl(env).getEnvironmentHome() + + File.separator + "je.info.0"; + fr = new FileReader(textFileName); + br = new BufferedReader(fr); + + String line = br.readLine(); + int numSeen = 0; + + /* + * Read the file, checking only lines that start with valid Levels. + */ + while (line != null) { + try { + /* The line should start with a valid date. */ + ParsePosition pp = new ParsePosition(0); + DateFormat ff = TracerFormatter.makeDateFormat(); + ff.parse(line, pp); + + /* There should be a java.util.logging.level next. */ + int dateEnd = pp.getIndex(); + int levelEnd = line.indexOf(" ", dateEnd + 1); + String possibleLevel = line.substring(dateEnd + 1, + levelEnd); + Level.parse(possibleLevel); + + String expected = + expectedList.get(numSeen).getMessage(); + StringBuilder seen = new StringBuilder(); + seen.append(line.substring(levelEnd + 1)); + /* + * Assemble the log message by reading the right number + * of lines + */ + StringTokenizer st = + new StringTokenizer(expected, + Character.toString('\n'), false); + + for (int i = 1; i < st.countTokens(); i++) { + seen.append('\n'); + String l = br.readLine(); + seen.append(l); + if (i == (st.countTokens() -1)) { + seen.append('\n'); + } + } + /* XXX, diff of multiline stuff isn't right yet. */ + + /* + * The formatters for rep test and non-rep test + * different, so ignore this check here. + */ + if (!isReplicatedTest(getClass())) { + if (st.countTokens() == 1) { + assertEquals("Line " + numSeen + + " should be the same", + expected, seen.toString()); + } + } + numSeen++; + } catch (Exception e) { + /* Skip this line, not a message. */ + } + line = br.readLine(); + } + assertEquals("Should see this many debug records", + expectedList.size(), numSeen); + } finally { + if (br != null) { + br.close(); + } + + if (fr != null) { + fr.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/util/DualTestCase.java b/test/com/sleepycat/je/util/DualTestCase.java new file mode 100644 index 0000000..bdd8b85 --- /dev/null +++ b/test/com/sleepycat/je/util/DualTestCase.java @@ -0,0 +1,151 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.util.test.TestBase; + +public abstract class DualTestCase extends TestBase { + + /* All environment management APIs are forwarded to this wrapper. */ + private EnvTestWrapper envWrapper; + + /* Helps determine whether setUp()and tearDown() were invoked as a pair */ + private boolean setUpInvoked = false; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + setUpInvoked = true; + if (DualTestCase.isReplicatedTest(getClass())) { + try { + /* Load the class dynamically to avoid a dependency */ + Class cl = + Class.forName("com.sleepycat.je.rep.util.RepEnvWrapper"); + envWrapper = (EnvTestWrapper) cl.newInstance(); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } catch (InstantiationException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } else { + envWrapper = new EnvTestWrapper.LocalEnvWrapper(); + } + } + + @After + public void tearDown() + throws Exception { + + if (!setUpInvoked) { + throw new IllegalStateException + ("DualTestCase.tearDown was invoked without a corresponding " + + "DualTestCase.setUp() call"); + } + } + + /** + * Creates the environment to be used by the test case. If the environment + * already exists on disk, it reuses it. If not, it creates a new + * environment and returns it. + */ + protected Environment create(File envHome, + EnvironmentConfig envConfig) + throws DatabaseException { + + return envWrapper.create(envHome, envConfig); + } + + /** + * Closes the environment. + * + * @param environment the environment to be closed. + * + * @throws DatabaseException + */ + protected void close(Environment environment) + throws DatabaseException { + + envWrapper.close(environment); + } + + protected void resetNodeEqualityCheck() { + envWrapper.resetNodeEqualityCheck(); + } + + /** + * Closes the environment without a checkpoint. + * + * @param environment the environment to be closed. + * + * @throws DatabaseException + */ + protected void closeNoCheckpoint(Environment environment) + throws DatabaseException { + + envWrapper.closeNoCheckpoint(environment); + } + + /** + * Simulate a crash. + */ + protected void abnormalClose(Environment environment) + throws DatabaseException { + + envWrapper.abnormalClose(environment); + } + + /** + * Destroys the contents of the test directory used to hold the test + * environments. + * + * @throws Exception + */ + protected void destroy() + throws Exception { + + envWrapper.destroy(); + } + + /** + * Determines whether this test is to be run with a replicated environment. + * If the test is in the "rep" package it assumes that the test is to be + * run in a replicated environment. + * + * It's used to bypass the specifics of tests that may not be suitable for + * replication, e.g. non-transactional mode testing. + * + * @param testCaseClass the test case class + * @return true if the test uses a replicated environment, false otherwise. + */ + public static boolean isReplicatedTest(Class testCaseClass) { + return testCaseClass.getName().contains(".rep."); + } + + /* Returns the environment test wrapper. */ + protected EnvTestWrapper getWrapper() { + return envWrapper; + } +} diff --git a/test/com/sleepycat/je/util/EnvTestWrapper.java b/test/com/sleepycat/je/util/EnvTestWrapper.java new file mode 100644 index 0000000..7655b47 --- /dev/null +++ b/test/com/sleepycat/je/util/EnvTestWrapper.java @@ -0,0 +1,143 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * This wrapper encapsulates environment operations used while running unit + * tests. The encapsulation permits creation of standalone or replicated + * environments as needed behind the wrapper, so that the test does not have + * to deal with the mechanics of environment management in each case and can + * focus on just the test logic itself. + * + * It provides the following API needed to: + * + * 1) create new environments + * + * 2) Close open environments. + * + * 3) Clear out the test directory used to create environments for a test + * + */ +public abstract class EnvTestWrapper { + + /** + * Creates the environment to be used by the test case. If the environment + * already exists on disk, it's reused. If not, it creates a new + * environment and returns it. + */ + public abstract Environment create(File envHome, + EnvironmentConfig envConfig) + throws DatabaseException; + + /** + * Closes the environment. + * + * @param environment the environment to be closed. + * + * @throws DatabaseException + */ + public abstract void close(Environment environment) + throws DatabaseException; + + public abstract void closeNoCheckpoint(Environment environment) + throws DatabaseException; + + public abstract void abnormalClose(Environment env); + + public abstract void resetNodeEqualityCheck(); + + /** + * Destroys the contents of the test directory used to hold the test + * environments. + * + * @throws Exception + */ + public abstract void destroy() + throws Exception; + + /** + * A wrapper for local tests. + */ + public static class LocalEnvWrapper extends EnvTestWrapper { + private File envDir; + private final Map dirEnvMap = + new HashMap(); + + @Override + public Environment create(File envHome, + EnvironmentConfig envConfig) + throws DatabaseException { + + this.envDir = envHome; + Environment env = new Environment(envHome, envConfig); + dirEnvMap.put(envHome, env); + return env; + } + + @Override + public void close(Environment env) + throws DatabaseException { + + env.close(); + } + + @Override + public void resetNodeEqualityCheck() { + throw new UnsupportedOperationException + ("This opertaion is not supported by base environment."); + } + + /* Provide the utility for closing without a checkpoint. */ + @Override + public void closeNoCheckpoint(Environment env) + throws DatabaseException { + + DbInternal.getNonNullEnvImpl(env).close(false); + } + + @Override + public void abnormalClose(Environment env) { + DbInternal.getNonNullEnvImpl(env).abnormalClose(); + } + + @Override + public void destroy() { + if (dirEnvMap == null) { + return; + } + for (Environment env : dirEnvMap.values()) { + try { + /* Close in case we hit an exception and didn't close */ + env.close(); + } catch (RuntimeException e) { + /* OK if already closed */ + } + } + dirEnvMap.clear(); + if (envDir != null) { + TestUtils.removeLogFiles("TearDown", envDir, false); + } + envDir = null; + } + } +} diff --git a/test/com/sleepycat/je/util/HexFormatterTest.java b/test/com/sleepycat/je/util/HexFormatterTest.java new file mode 100644 index 0000000..1c9390a --- /dev/null +++ b/test/com/sleepycat/je/util/HexFormatterTest.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +import com.sleepycat.je.utilint.HexFormatter; +import com.sleepycat.util.test.TestBase; + +/** + * Trivial formatting class that sticks leading 0's on the front of a hex + * number. + */ +public class HexFormatterTest extends TestBase { + + @Test + public void testFormatLong() { + assertTrue(HexFormatter.formatLong(0).equals("0x0000000000000000")); + assertTrue(HexFormatter.formatLong(1).equals("0x0000000000000001")); + assertTrue(HexFormatter.formatLong(0x1234567890ABCDEFL).equals("0x1234567890abcdef")); + assertTrue(HexFormatter.formatLong(0x1234567890L).equals("0x0000001234567890")); + assertTrue(HexFormatter.formatLong(0xffffffffffffffffL).equals("0xffffffffffffffff")); + } +} diff --git a/test/com/sleepycat/je/util/InfoFileFilter.java b/test/com/sleepycat/je/util/InfoFileFilter.java new file mode 100644 index 0000000..85e077d --- /dev/null +++ b/test/com/sleepycat/je/util/InfoFileFilter.java @@ -0,0 +1,49 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.io.FilenameFilter; +import java.util.StringTokenizer; + +public class InfoFileFilter implements FilenameFilter { + + /** + * Accept files of this format: + * je.info.# + */ + public boolean accept(File dir, String name) { + boolean ok = false; + StringTokenizer tokenizer = new StringTokenizer(name, "."); + // there should be two parts + if (tokenizer.countTokens() == 3) { + String filePrefix = tokenizer.nextToken(); + String fileSuffix = tokenizer.nextToken(); + String repeat = tokenizer.nextToken(); + + // check the length and the suffix + if (filePrefix.equals("je") && fileSuffix.equals("info")) { + // The last part should be a number + try { + Integer.parseInt(repeat); + ok = true; + } catch (NumberFormatException e) { + ok = false; + } + } + } + + return ok; + } +} diff --git a/test/com/sleepycat/je/util/LogFileCorruptionTest.java b/test/com/sleepycat/je/util/LogFileCorruptionTest.java new file mode 100644 index 0000000..6ab3cbf --- /dev/null +++ b/test/com/sleepycat/je/util/LogFileCorruptionTest.java @@ -0,0 +1,457 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Calendar; +import java.util.TimerTask; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.Get; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.util.verify.DataVerifier; +import com.sleepycat.je.utilint.CronScheduleParser; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/* + * Test the data corruption caused by media/disk failure. In addition, the + * code to parse the task schedule is also tested. Btree corruption + * verification is tested in com.sleepycat.je.util.BtreeCorruptionTest. + */ +public class LogFileCorruptionTest extends TestBase { + + private static final String DB_NAME = "tempDB"; + + private Environment env; + private Database db; + private File envHome; + private Cursor c; + + private final int recNum = 1000 * 50; //(1000 * 500) * 50 files + private final int dataLen = 500; + private final int totalWaitTries = 100; + + private static long millsOneDay = 24 * 60 * 60 * 1000; + private static long millsOneHour = 60 * 60 * 1000; + private static long millsOneMinute = 60 * 1000; + + private static final EnvironmentConfig envConfigWithVerifier + = initConfig(); + private static final EnvironmentConfig envConfigWithoutVerifier + = initConfig(); + + private static Calendar generatedCurCal = Calendar.getInstance(); + + static { + envConfigWithoutVerifier.setConfigParam( + EnvironmentParams.ENV_RUN_VERIFIER.getName(), "false"); + + /* + * Set the current Calendar to be 00:01 Friday. + */ + generatedCurCal.set(Calendar.DAY_OF_WEEK, 6); + generatedCurCal.set(Calendar.HOUR_OF_DAY, 0); + generatedCurCal.set(Calendar.MINUTE, 1); + generatedCurCal.set(Calendar.SECOND, 0); + generatedCurCal.set(Calendar.MILLISECOND, 0); + } + + @Before + public void setUp() + throws Exception { + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() + throws Exception { + CronScheduleParser.setCurCalHook = null; + + if (c != null) { + try { + c.close(); + } catch (EnvironmentFailureException efe) { + + } + c = null; + } + + if (db != null) { + try { + db.close(); + } catch (EnvironmentFailureException efe) { + + } + db = null; + } + + if (env != null) { + env.close(); + env = null; + } + + super.tearDown(); + } + + /** + * Test config via EnvironmentConfig. + */ + @Test + public void testConfig() { + + checkConfig("* * * * *", 0, millsOneMinute); + + /* + * Because of the Daylight Saving Time or the Winter time, the + * calculated delay by using 7 * millsOneDay may not be right, i.e. + * loss or get one more hour. + */ + Calendar scheculedCal = (Calendar) generatedCurCal.clone(); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 7); + scheculedCal.set(Calendar.HOUR_OF_DAY, 0); + scheculedCal.set(Calendar.MINUTE, 0); + checkConfig( + null, + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneDay); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 6); + scheculedCal.set(Calendar.HOUR_OF_DAY, 0); + scheculedCal.set(Calendar.MINUTE, 5); + checkConfig( + "5 * * * *", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneHour); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 7); + scheculedCal.set(Calendar.HOUR_OF_DAY, 22); + scheculedCal.set(Calendar.MINUTE, 10); + checkConfig( + "10 22 * * 6", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + 7 * millsOneDay); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 4); + scheculedCal.set(Calendar.HOUR_OF_DAY, 22); + scheculedCal.set(Calendar.MINUTE, 10); + scheculedCal.add(Calendar.DATE, 7); + checkConfig( + "10 22 * * 3", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + 7 * millsOneDay); + } + + private void checkConfig(String cronSchedule, long delay, long interval) { + + EnvironmentConfig envConfig = initConfig(); + /* + * For current test, in order to let DataVerifier to run + * during the JE Standalone test, so I set the default value of + * VERIFY_SCHEDULE to be "0 * * * *". + * + * In future, I may set this value in each JE Standalone test and + * recover the default value to "0 0 * * *" even if when testing. + * + * For now, when we test configuration, we set it to be normal default + * value, i.e. "0 0 * * *". + * + */ + envConfig.setConfigParam( + EnvironmentConfig.VERIFY_SCHEDULE, "0 0 * * *"); + + if (cronSchedule != null) { + envConfig.setConfigParam( + EnvironmentConfig.VERIFY_SCHEDULE, cronSchedule); + } + + MyHook hook = new MyHook(); + CronScheduleParser.setCurCalHook = hook; + + env = new Environment(envHome, envConfig); + + DataVerifier verifier = + DbInternal.getEnvironmentImpl(env).getDataVerifier(); + + assertNotNull(verifier); + assertEquals(delay, verifier.getVerifyDelay()); + assertEquals(interval, verifier.getVerifyInterval()); + + env.close(); + env = null; + } + + @Test + public void testConfigChange() { + EnvironmentConfig envConfig = initConfig(); + envConfig.setConfigParam( + EnvironmentConfig.VERIFY_SCHEDULE, "0 0 * * *"); + + MyHook hook = new MyHook(); + CronScheduleParser.setCurCalHook = hook; + + env = new Environment(envHome, envConfig); + + DataVerifier verifier = + DbInternal.getEnvironmentImpl(env).getDataVerifier(); + + Calendar scheculedCal = (Calendar) generatedCurCal.clone(); + + /* The default VERIFY_SCHEDULE "0 0 * * *" */ + assertNotNull(verifier); + assertNotNull(verifier.getVerifyTask()); + assertNotNull(verifier.getCronSchedule()); + scheculedCal.set(Calendar.DAY_OF_WEEK, 7); + scheculedCal.set(Calendar.HOUR_OF_DAY, 0); + scheculedCal.set(Calendar.MINUTE, 0); + assertEquals( + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + verifier.getVerifyDelay()); + assertEquals(millsOneDay, verifier.getVerifyInterval()); + TimerTask oldVerifyTask = verifier.getVerifyTask(); + String oldCronSchedule = verifier.getCronSchedule(); + + /* Change VERIFY_SCHEDULE. */ + envConfig.setConfigParam( + EnvironmentConfig.VERIFY_SCHEDULE, "5 * * * *"); + env.setMutableConfig(envConfig); + assertNotNull(verifier); + assertNotNull(verifier.getVerifyTask()); + assertNotSame(oldVerifyTask, verifier.getVerifyTask()); + assertNotNull(verifier.getCronSchedule()); + assertNotSame(oldCronSchedule, verifier.getCronSchedule()); + scheculedCal.set(Calendar.DAY_OF_WEEK, 6); + scheculedCal.set(Calendar.HOUR_OF_DAY, 0); + scheculedCal.set(Calendar.MINUTE, 5); + assertEquals( + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + verifier.getVerifyDelay()); + assertEquals(millsOneHour, verifier.getVerifyInterval()); + + /* Disable ENV_RUN_VERIFIER */ + envConfig.setConfigParam( + EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + env.setMutableConfig(envConfig); + assertNotNull(verifier); + assertNull(verifier.getCronSchedule()); + assertEquals(0, verifier.getVerifyDelay()); + assertEquals(0, verifier.getVerifyInterval()); + + env.close(); + env = null; + } + + class MyHook implements TestHook { + + @Override + public void doHook() { + + CronScheduleParser.curCal = generatedCurCal; + } + + @Override + public void doHook(Void obj) { + } + @Override + public void hookSetup() { + } + @Override + public void doIOHook() throws IOException { + } + @Override + public Void getHookValue() { + return null; + } + } + + private static EnvironmentConfig initConfig() { + EnvironmentConfig config = TestUtils.initEnvConfig(); + config.setAllowCreate(true); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_EVICTOR, "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, + "false"); + config.setConfigParam(EnvironmentConfig.ENV_RUN_IN_COMPRESSOR, + "false"); + config.setCacheSize(1000000); + config.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, "1000000"); + config.setConfigParam(EnvironmentConfig.VERIFY_SCHEDULE, "* * * * *"); + return config; + } + + @Test + public void testDataCorruptWithVerifier() { + System.out.println("testDataCorruptWithVerifier"); + testDataCorruptionVerifierInternal(envConfigWithVerifier); + } + + @Test + public void testDataCorruptWithoutVerifier() { + System.out.println("testDataCorruptWithoutVerifier"); + testDataCorruptionVerifierInternal(envConfigWithoutVerifier); + } + + private void testDataCorruptionVerifierInternal(EnvironmentConfig config) { + openEnvAndDb(config); + initialDb(); + /* The first pass traverse to add file handles to fileCache. */ + traverseDb(false); + createDataCorrupt(); + traverseDb(true); + } + + public void openEnvAndDb(EnvironmentConfig config) { + env = new Environment(envHome, config); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + + c = db.openCursor(null, null); + } + + public void initialDb() { + try { + for (int i = 0 ; i < recNum; i++) { + final DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(i, key); + final DatabaseEntry data = new DatabaseEntry(new byte[dataLen]); + db.put(null, key, data); + } + } catch (DatabaseException dbe) { + throw new RuntimeException("Initiate Database fails.", dbe); + } + + final int totalFiles = + DbInternal.getEnvironmentImpl(env).getFileManager(). + getAllFileNumbers().length; + assert totalFiles < 100 : "Total file number is " + totalFiles; + } + + public void createDataCorrupt() { + final EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); + final FileManager fm = envImpl.getFileManager(); + final File[] files = fm.listJDBFiles(); + File choosenFile = null; + try { + for (File file : files) { + //System.out.println(file.getCanonicalPath()); + if (file.getCanonicalPath().contains("00000002.jdb")) { + choosenFile = file; + break; + } + } + + RandomAccessFile rafile = new RandomAccessFile(choosenFile, "rw"); + long fileLength = rafile.length(); + rafile.seek(fileLength / 2); + byte b = rafile.readByte(); + if (b == 255) { + b = (byte)(b - 1); + } else { + b = (byte)(b + 1); + } + rafile.seek(fileLength / 2); + rafile.writeByte(b); + rafile.close(); + } catch (Exception e) { + throw new RuntimeException("Create data corruption fails.", e); + } + } + + /* + * The first pass traverse aims to cache all the log files. The second + * pass traverse aims to check whether the Read operation can succeed + * when one log file is corrupted, depending on whether ENV_RUN_VERIFIER + * is set. + */ + private void traverseDb(boolean check) { + boolean verify = DbInternal.getEnvironmentImpl(env).getConfigManager(). + getBoolean(EnvironmentParams.ENV_RUN_VERIFIER); + try { + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + int recordCount = 0; + int firstKey; + do { + if (!check) { + firstKey = 0; + } else { + firstKey = 20000; + } + IntegerBinding.intToEntry(firstKey, key); + + assert c.get(key, data, Get.SEARCH, null) != null : + "The db should contain this record: key is " + firstKey; + + if (!check) { + while (c.get(key, data, Get.NEXT, null) != null) { + // Do nothing. + } + } + /* + * The smallest interval of the VERIFY_SCHEDULE is 1 minutes, + * so here we try to sleep 1s for totalWaitTries times to + * guarantee that the data corruption verifier task run at + * least once. + */ + try {Thread.sleep(1000);} catch (Exception e) {} + } while (check && ++recordCount < totalWaitTries); + + if (check) { + if (verify) { + fail("With verifying data corruption, we should catch" + + "EnvironmentFailureException."); + } + } + } catch (EnvironmentFailureException efe) { + assertTrue(efe.isCorrupted()); + if (check) { + if (!verify) { + fail("Without verifying data corruption, we should" + + "not catch EnvironmentFailureException"); + } + } + // Leave tearDown() to close cursor, db and env. + } + } +} diff --git a/test/com/sleepycat/je/util/MiniPerf.java b/test/com/sleepycat/je/util/MiniPerf.java new file mode 100644 index 0000000..9b60e55 --- /dev/null +++ b/test/com/sleepycat/je/util/MiniPerf.java @@ -0,0 +1,177 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.utilint.StringUtils; + +public class MiniPerf { + + private File envHome; + private Environment exampleEnv; + private Database exampleDb; + private Cursor cursor; + + static int nKeys; + + static public void main(String argv[]) + throws DatabaseException, NumberFormatException { + + boolean create = false; + if (argv.length > 0) { + nKeys = Integer.parseInt(argv[0]); + create = true; + } else { + create = false; + } + new MiniPerf().doit(create); + } + + void doit(boolean create) + throws DatabaseException { + + envHome = SharedTestUtils.getTestDir(); + setUp(create); + testIterationPerformance(create); + tearDown(); + } + + public void setUp(boolean create) + throws DatabaseException { + + if (create) { + TestUtils.removeLogFiles("Setup", envHome, false); + } + + // Set up an environment + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(create); + exampleEnv = new Environment(envHome, envConfig); + + // Set up a database + String databaseName = "simpleDb"; + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + exampleDb = exampleEnv.openDatabase(null, databaseName, dbConfig); + + // Set up cursors + cursor = exampleDb.openCursor(null, null); + } + + public void tearDown() + throws DatabaseException { + + exampleEnv.sync(); + + if (exampleDb != null) { + exampleDb.close(); + exampleDb = null; + } + if (exampleEnv != null) { + try { + exampleEnv.close(); + } catch (DatabaseException DE) { + /* + * Ignore this exception. It's caused by us calling + * tearDown() within the test. Each tearDown() call + * forces the database closed. So when the call from + * junit comes along, it's already closed. + */ + } + exampleEnv = null; + } + + cursor = null; + } + + public void testIterationPerformance(boolean create) + throws DatabaseException { + + final int N_KEY_BYTES = 10; + final int N_DATA_BYTES = 20; + + if (create) { + System.out.print("Creating..."); + for (int i = 0; i < nKeys; i++) { + if (i % 100000 == 0) { + System.out.println(i); + } + byte[] key = new byte[N_KEY_BYTES]; + TestUtils.generateRandomAlphaBytes(key); + String keyString = StringUtils.fromUTF8(key); + + byte[] data = new byte[N_DATA_BYTES]; + TestUtils.generateRandomAlphaBytes(data); + String dataString = StringUtils.fromUTF8(data); + cursor.put(new StringDbt(keyString), + new StringDbt(dataString)); + } + System.out.print("done."); + } else { + String middleKey = null; + int middleEntry = -1; + int count = 0; + for (int i = 0; i < 3; i++) { + System.out.print("Iterating..."); + StringDbt foundKey = new StringDbt(); + StringDbt foundData = new StringDbt(); + + long startTime = System.currentTimeMillis(); + OperationStatus status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); + + count = 0; + while (status == OperationStatus.SUCCESS) { + status = + cursor.getNext(foundKey, foundData, LockMode.DEFAULT); + count++; + if (count == middleEntry) { + middleKey = foundKey.getString(); + } + } + long endTime = System.currentTimeMillis(); + System.out.println("done."); + System.out.println(count + " records found."); + middleEntry = count >> 1; + System.out.println((endTime - startTime) + " millis"); + } + + System.out.println("Middle key: " + middleKey); + + StringDbt searchKey = new StringDbt(middleKey); + StringDbt searchData = new StringDbt(); + for (int j = 0; j < 3; j++) { + long startTime = System.currentTimeMillis(); + for (int i = 0; i < count; i++) { + if (cursor.getSearchKey(searchKey, + searchData, + LockMode.DEFAULT) != OperationStatus.SUCCESS) { + System.out.println("non-0 return"); + } + } + long endTime = System.currentTimeMillis(); + System.out.println((endTime - startTime) + " millis"); + } + } + } +} diff --git a/test/com/sleepycat/je/util/PropUtilTest.java b/test/com/sleepycat/je/util/PropUtilTest.java new file mode 100644 index 0000000..67a753f --- /dev/null +++ b/test/com/sleepycat/je/util/PropUtilTest.java @@ -0,0 +1,219 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.TimeUnit; + +import org.junit.Test; + +import com.sleepycat.je.utilint.PropUtil; +import com.sleepycat.util.test.TestBase; + +public class PropUtilTest extends TestBase { + + private static long NS_TO_MICRO = 1000; + private static long NS_TO_MILLI = 1000000; + private static long NS_TO_SECOND = 1000000000; + private static long NS_TO_MINUTE = 60 * NS_TO_SECOND; + private static long NS_TO_HOUR = 60 * NS_TO_MINUTE; + private static long MS_TO_SECOND = 1000; + private static long MS_TO_MINUTE = 60 * MS_TO_SECOND; + private static long MS_TO_HOUR = 60 * MS_TO_MINUTE; + + + @Test + public void testDurationToMillis() { + + /* Disallow negative values. */ + try { + PropUtil.durationToMillis(-1, TimeUnit.SECONDS); + } catch (IllegalArgumentException expected) { + } + + /* Disallow millis > Integer.MAX_VALUE. */ + try { + PropUtil.durationToMillis(((long) Integer.MAX_VALUE) + 1, + TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException expected) { + } + + /* Disallow null unit with non-zero time. */ + try { + PropUtil.durationToMillis(1, null); + } catch (IllegalArgumentException expected) { + } + + /* Allow null unit with zero time. */ + assertEquals(0, PropUtil.durationToMillis(0, null)); + + /* Positive input should result in at least 1 ms. */ + assertEquals(1, PropUtil.durationToMillis(1, TimeUnit.MICROSECONDS)); + assertEquals(1, PropUtil.durationToMillis(1, TimeUnit.NANOSECONDS)); + + /* Misc conversions. */ + assertEquals(0, PropUtil.durationToMillis(0, TimeUnit.SECONDS)); + assertEquals(1, PropUtil.durationToMillis(1, TimeUnit.MILLISECONDS)); + assertEquals(1, PropUtil.durationToMillis(999, TimeUnit.MICROSECONDS)); + assertEquals(1, PropUtil.durationToMillis(1000, TimeUnit.MICROSECONDS)); + assertEquals(1, PropUtil.durationToMillis(1001, TimeUnit.MICROSECONDS)); + assertEquals(1, PropUtil.durationToMillis(1999, TimeUnit.MICROSECONDS)); + assertEquals(2, PropUtil.durationToMillis(2000000, + TimeUnit.NANOSECONDS)); + } + + @Test + public void testMillisToDuration() { + + /* Disallow null unit. */ + try { + PropUtil.millisToDuration(0, null); + } catch (IllegalArgumentException expected) { + } + + /* Misc conversions. */ + assertEquals(0, PropUtil.millisToDuration(0, TimeUnit.SECONDS)); + assertEquals(1, PropUtil.millisToDuration(1000, TimeUnit.SECONDS)); + } + + @Test + public void testParseDuration() { + + /* parse value, value in ns, value in ms */ + String[][][] testvals = + {{{"1 ns", "1", "1"}}, + {{"1" , Long.toString(NS_TO_MICRO), "1"}}, + {{"1 us" , "1000", "1"}}, + {{"1 ms", "1000000", "1"}}, + {{"1 nanoseconds", "1", "1"}}, + {{"1 microseconds", "1000", "1"}}, + /* TimeUnitNames */ + {{"3000000 nanoseconds", "3000000", "3"}}, + {{"3000 microseconds", Long.toString(NS_TO_MICRO * 3000), "3"}}, + {{"3 milliseconds", Long.toString(NS_TO_MILLI * 3), "3"}}, + {{"3 seconds", Long.toString(NS_TO_SECOND * 3), "3000"}}, + /* IEEE abbreviations */ + {{"3000000 NS", "3000000", "3"}}, + {{"3000 US", Long.toString(NS_TO_MICRO * 3000), "3"}}, + {{"3 MS", Long.toString(NS_TO_MILLI * 3), "3"}}, + {{"3 S", Long.toString(NS_TO_SECOND * 3), Long.toString(MS_TO_SECOND * 3)}}, + {{"3 MIN", Long.toString(NS_TO_MINUTE * 3), Long.toString(MS_TO_MINUTE * 3)}}, + {{"3 H", Long.toString(NS_TO_HOUR * 3), Long.toString(MS_TO_HOUR * 3)}}, + {{"1 s", Long.toString(NS_TO_SECOND), + Long.toString(MS_TO_SECOND)}}, + {{"1 min", Long.toString(NS_TO_MINUTE), + Long.toString(MS_TO_MINUTE)}}, + {{"1 h", Long.toString(NS_TO_HOUR), Long.toString(MS_TO_HOUR)}}, + /* maximum 32 bit for ms*/ + {{"2147483647 ms", Long.toString(2147483647L * NS_TO_MILLI), + "2147483647"}}, + /* maximum 32 bit ns*/ + {{"596 h", Long.toString(596 * NS_TO_HOUR), + Long.toString(596 * MS_TO_HOUR)}}}; + + String[][][] exceeds32BitMillis = + {{{"2147483648 ms", Long.toString(2147483648L * NS_TO_MILLI), + "2147483648"}}, + {{"597 h", Long.toString(597 * NS_TO_HOUR), + Long.toString(597 * MS_TO_HOUR)}} + }; + + + /* Disallow empty string. */ + try { + PropUtil.parseDuration(""); + } catch (IllegalArgumentException expected) { + } + + /* Disallow whitespace. */ + try { + PropUtil.parseDuration(" \t"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow bad number. */ + try { + PropUtil.parseDuration("X"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow bad number with unit. */ + try { + PropUtil.parseDuration("X ms"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow bad unit. */ + try { + PropUtil.parseDuration("3 X"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow extra stuff after unit. */ + try { + PropUtil.parseDuration("3 ms X"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow negative number. */ + try { + PropUtil.parseDuration("-1"); + } catch (IllegalArgumentException expected) { + } + + /* Disallow negative number with unit. */ + try { + PropUtil.parseDuration("-1 ms"); + } catch (IllegalArgumentException expected) { + } + + for (String[][] val : testvals) { + long valueNano = PropUtil.parseDurationNS(val[0][0]); + assertTrue("expected " + val[0][1] + " got "+ val[0][0], + valueNano == Long.valueOf(val[0][1])); + + int valueMillis = PropUtil.parseDuration(val[0][0]); + assertTrue("expected " + val[0][2] + " got "+ val[0][0], + valueMillis == Long.valueOf(val[0][2])); + } + + for (String[][] val : exceeds32BitMillis) { + try { + long valueNano = PropUtil.parseDurationNS(val[0][0]); + assertTrue("expected " + val[0][1] + " got "+ val[0][0], + valueNano == Long.valueOf(val[0][1])); + + int valueMillis = PropUtil.parseDuration(val[0][0]); + fail("Exception not generated for value exceeding maximum."); + } catch (Exception e) { + // ignore expected exception. + } + } + } + + @Test + public void testFormatDuration() { + assertEquals("30 NANOSECONDS", + PropUtil.formatDuration(30, TimeUnit.NANOSECONDS)); + assertEquals("30 MICROSECONDS", + PropUtil.formatDuration(30, TimeUnit.MICROSECONDS)); + assertEquals("30 MILLISECONDS", + PropUtil.formatDuration(30, TimeUnit.MILLISECONDS)); + assertEquals("30 SECONDS", + PropUtil.formatDuration(30, TimeUnit.SECONDS)); + } +} diff --git a/test/com/sleepycat/je/util/RecordSearch.java b/test/com/sleepycat/je/util/RecordSearch.java new file mode 100644 index 0000000..56500db --- /dev/null +++ b/test/com/sleepycat/je/util/RecordSearch.java @@ -0,0 +1,161 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.File; +import java.util.logging.Level; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.config.EnvironmentParams; +import com.sleepycat.je.utilint.CmdUtil; +import com.sleepycat.utilint.StringUtils; + +/** + * KeySearch is a debugging aid that searches the database for a given + * record. + */ +public class RecordSearch { + + public static void main(String[] argv) { + try { + int whichArg = 0; + DatabaseEntry searchKey = null; + String dbName = null; + String keyVal = null; + String levelVal = "SEVERE"; + boolean dumpAll = false; + boolean searchKeyRange = false; + + /* + * Usage: -h (optional + * -db + * -ks + * -ksr + * -a + * -l + */ + String envHome = "."; // default to current directory + while (whichArg < argv.length) { + String nextArg = argv[whichArg]; + + if (nextArg.equals("-h")) { + whichArg++; + envHome = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-db")) { + whichArg++; + dbName = CmdUtil.getArg(argv, whichArg); + } else if (nextArg.equals("-ks")) { + whichArg++; + keyVal = CmdUtil.getArg(argv, whichArg); + searchKey = new DatabaseEntry(StringUtils.toUTF8(keyVal)); + } else if (nextArg.equals("-ksr")) { + whichArg++; + keyVal = CmdUtil.getArg(argv, whichArg); + searchKey = new DatabaseEntry(StringUtils.toUTF8(keyVal)); + searchKeyRange = true; + } else if (nextArg.equals("-l")) { + whichArg++; + levelVal = CmdUtil.getArg(argv, whichArg); + Level.parse(levelVal); // sanity check level + } else if (nextArg.equals("-a")) { + whichArg++; + String dumpVal = CmdUtil.getArg(argv, whichArg); + dumpAll = Boolean.valueOf(dumpVal).booleanValue(); + } else { + throw new IllegalArgumentException + (nextArg + " is not a supported option."); + } + whichArg++; + } + + if (dbName == null) { + usage(); + System.exit(1); + } + + /* Make a read only environment */ + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + + // Don't debug log to the database log. + envConfig.setConfigParam + (EnvironmentParams.JE_LOGGING_DBLOG.getName(), "false"); + + envConfig.setReadOnly(true); + + Environment envHandle = new Environment(new File(envHome), + envConfig); + + /* Open the db. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setReadOnly(true); + DbInternal.setUseExistingConfig(dbConfig, true); + Database db = envHandle.openDatabase(null, dbName, dbConfig); + + DatabaseEntry foundData = new DatabaseEntry(); + if (dumpAll) { + Cursor cursor = db.openCursor(null, null); + DatabaseEntry foundKey = new DatabaseEntry(); + int i = 0; + while (cursor.getNext(foundKey, foundData, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + System.out.println(i + ":key=" + + StringUtils.fromUTF8(foundKey.getData())); + i++; + } + cursor.close(); + } else if (searchKeyRange) { + /* Range Search for the key. */ + Cursor cursor = db.openCursor(null, null); + OperationStatus status = cursor.getSearchKeyRange + (searchKey, foundData, LockMode.DEFAULT); + cursor.close(); + System.out.println("Range Search for key " + keyVal + + " status = " + status + " => " + + StringUtils.fromUTF8(searchKey.getData())); + } else { + /* Search for the key. */ + OperationStatus status = db.get(null, searchKey, foundData, + LockMode.DEFAULT); + System.out.println("Search for key " + keyVal + + " status = " + status); + } + db.close(); + envHandle.close(); + + } catch (Exception e) { + e.printStackTrace(); + System.out.println(e.getMessage()); + usage(); + System.exit(1); + } + } + + private static void usage() { + System.out.println("Usage: RecordSearch"); + System.out.println(" -h "); + System.out.println(" -a "); + System.out.println(" -db "); + System.out.println(" -l logging level"); + System.out.println(" -ks . + * + * To try the custom dumper: + * java -cp "build/test/classes;build/classes" com.sleepycat.je.util.DbPrintLog + * -h -c com.sleepycat.je.util.TestDumper + * TestDumper will list a count of log entries, the log entry type, and the lsn. + */ +public class TestDumper extends DumpFileReader { + public static final String SAVE_INFO_FILE = "dumpLog"; + + private int counter = 0; + + public TestDumper(EnvironmentImpl env, + Integer readBufferSize, + Long startLsn, + Long finishLsn, + Long endOfFileLsn, + String entryTypes, + String txnIds, + Boolean verbose, + Boolean repEntriesOnly, + Boolean forwards) + throws DatabaseException { + + super(env, readBufferSize, startLsn, finishLsn, endOfFileLsn, + entryTypes, null /*dbIds*/, txnIds, verbose, repEntriesOnly, + forwards); + } + + @Override + protected boolean processEntry(ByteBuffer entryBuffer) + throws DatabaseException { + + /* Figure out what kind of log entry this is */ + byte curType = currentEntryHeader.getType(); + LogEntryType lastEntryType = LogEntryType.findType(curType); + + /* Read the entry and dump it into a string buffer. */ + LogEntry entry = lastEntryType.getSharedLogEntry(); + entry.readEntry(envImpl, currentEntryHeader, entryBuffer); + + writePrintInfo((lastEntryType + " lsn=" + + DbLsn.getNoFormatString(getLastLsn()))); + + return true; + } + + private void writePrintInfo(String message) { + PrintWriter out = null; + try { + File savedFile = + new File(envImpl.getEnvironmentHome(), SAVE_INFO_FILE); + out = new PrintWriter + (new BufferedWriter(new FileWriter(savedFile, true))); + out.println(message); + } catch (Exception e) { + throw new IllegalStateException("Exception happens while " + + "writing information into the " + + "info file " + e.getMessage()); + } finally { + if (out != null) { + try { + out.close(); + } catch (Exception e) { + throw new IllegalStateException("Run into exception " + + "while closing the BufferedWriter: " + + e.getMessage()); + } + } + } + } +} diff --git a/test/com/sleepycat/je/util/TestUtils.java b/test/com/sleepycat/je/util/TestUtils.java new file mode 100644 index 0000000..d41454b --- /dev/null +++ b/test/com/sleepycat/je/util/TestUtils.java @@ -0,0 +1,982 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.OutputStream; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Random; + +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.dbi.MemoryBudget; +import com.sleepycat.je.evictor.OffHeapCache; +import com.sleepycat.je.utilint.VLSN; +import junit.framework.TestCase; + +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.DbTestProxy; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ExceptionEvent; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.dbi.CursorImpl; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.FileManager; +import com.sleepycat.je.tree.BIN; +import com.sleepycat.je.tree.ChildReference; +import com.sleepycat.je.tree.IN; +import com.sleepycat.je.tree.LN; +import com.sleepycat.je.tree.SearchResult; +import com.sleepycat.je.tree.Tree; +import com.sleepycat.je.tree.WithRootLatched; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.utilint.StringUtils; + +public class TestUtils { + public static String DEST_DIR = SharedTestUtils.DEST_DIR; + public static String NO_SYNC = SharedTestUtils.NO_SYNC; + + public static final String LOG_FILE_NAME = "00000000.jdb"; + + public static final StatsConfig FAST_STATS; + + static { + FAST_STATS = new StatsConfig(); + FAST_STATS.setFast(true); + } + + private static final boolean DEBUG = true; + private static Random rnd = new Random(); + + public void debugMsg(String message) { + + if (DEBUG) { + System.out.println + (Thread.currentThread().toString() + " " + message); + } + } + + static public void setRandomSeed(int seed) { + + rnd = new Random(seed); + } + + static public void generateRandomAlphaBytes(byte[] bytes) { + + byte[] aAndZ = StringUtils.toUTF8("AZ"); + int range = aAndZ[1] - aAndZ[0] + 1; + + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) (rnd.nextInt(range) + aAndZ[0]); + } + } + + static public void checkLatchCount() { + TestCase.assertTrue(LatchSupport.nBtreeLatchesHeld() == 0); + } + + static public void printLatchCount(String msg) { + System.out.println(msg + " : " + LatchSupport.nBtreeLatchesHeld()); + } + + static public void printLatches(String msg) { + System.out.println(msg + " : "); + LatchSupport.dumpBtreeLatchesHeld(); + } + + /** + * Generate a synthetic base 26 four byte alpha key from an int. + * The bytes of the key are between 'A' and 'Z', inclusive. 0 maps + * to 'AAAA', 1 to 'AAAB', etc. + */ + static public int alphaKey(int i) { + + int ret = 0; + for (int j = 0; j < 4; j++) { + byte b = (byte) (i % 26); + ret <<= 8; + ret |= (b + 65); + i /= 26; + } + + return ret; + } + + /** + * Marshall an unsigned int (long) into a four byte buffer. + */ + static public void putUnsignedInt(byte[] buf, long value) { + + int i = 0; + buf[i++] = (byte) (value >>> 0); + buf[i++] = (byte) (value >>> 8); + buf[i++] = (byte) (value >>> 16); + buf[i] = (byte) (value >>> 24); + } + + /** + * All flavors of removeLogFiles should check if the remove has been + * disabled. (Used for debugging, so that the tester can dump the + * log file. + */ + private static boolean removeDisabled() { + + String doRemove = System.getProperty("removeLogFiles"); + return ((doRemove != null) && doRemove.equalsIgnoreCase("false")); + } + + /** + * Remove je log files from the home directory. Will be disabled + * if the unit test is run with -DremoveLogFiles=false + * @param msg prefix to append to error messages + * @param envFile environment directory + */ + public static void removeLogFiles(String msg, + File envFile, + boolean checkRemove) { + removeFiles(msg, envFile, FileManager.JE_SUFFIX, checkRemove); + removeSubDirs(envFile); + } + + /** + * Remove files with this suffix from the je home directory + * @param msg prefix to append to error messages + * @param envFile environment directory + * @param suffix files with this suffix will be removed + */ + public static void removeFiles(String msg, + File envFile, + String suffix) { + removeFiles(msg, envFile, suffix, false); + } + + /** + * Remove files with this suffix from the je home directory + * @param msg prefix to append to error messages + * @param envFile environment directory + * @param suffix files with this suffix will be removed + * @param checkRemove if true, check the -DremoveLogFiles system + * property before removing. + */ + public static void removeFiles(String msg, + File envFile, + String suffix, + boolean checkRemove) { + if (checkRemove && removeDisabled()) { + return; + } + + String[] suffixes = new String[] { suffix }; + String[] names = FileManager.listFiles(envFile, suffixes, false); + + /* Clean up any target files in this directory. */ + for (int i = 0; i < names.length; i++) { + File oldFile = new File(envFile, names[i]); + boolean done = oldFile.delete(); + assert done : + msg + " directory = " + envFile + + " couldn't delete " + names[i] + " out of " + + names[names.length - 1]; + oldFile = null; + } + } + + /** + * Remove files with the pattern indicated by the filename filter from the + * environment home directory. + * Note that BadFileFilter looks for this pattern: NNNNNNNN.bad.# + * InfoFileFilter looks for this pattern: je.info.# + * @param envFile environment directory + */ + public static void removeFiles(File envFile, FilenameFilter filter) { + if (removeDisabled()) { + return; + } + + File[] targetFiles = envFile.listFiles(filter); + + // Clean up any target files in this directory + for (int i = 0; i < targetFiles.length; i++) { + boolean done = targetFiles[i].delete(); + if (!done) { + System.out.println + ("Warning, couldn't delete " + + targetFiles[i] + + " out of " + + targetFiles[targetFiles.length - 1]); + } + } + } + + /** + * Useful utility for generating byte arrays with a known order. + * Vary the length just to introduce more variability. + * @return a byte array of length val % 100 with the value of "val" + */ + public static byte[] getTestArray(int val, boolean varLen) { + + int length; + + if (varLen) { + length = val % 10; + length = length < 4 ? 4 : length; + } else { + length = 4; + } + + byte[] test = new byte[length]; + test[3] = (byte) ((val >>> 0) & 0xff); + test[2] = (byte) ((val >>> 8) & 0xff); + test[1] = (byte) ((val >>> 16) & 0xff); + test[0] = (byte) ((val >>> 24) & 0xff); + return test; + } + + public static byte[] getTestArray(int val) { + return getTestArray(val, true); + } + + /** + * Return the value of a test data array generated with getTestArray + * as an int + */ + public static int getTestVal(byte[] testArray) { + + int val = 0; + val |= (testArray[3] & 0xff); + val |= ((testArray[2] & 0xff) << 8); + val |= ((testArray[1] & 0xff) << 16); + val |= ((testArray[0] & 0xff) << 24); + return val; + } + + /** + * @return length and data of a byte array, printed as decimal numbers + */ + public static String dumpByteArray(byte[] b) { + + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + /** + * @return a copy of the passed in byte array + */ + public static byte[] byteArrayCopy(byte[] ba) { + + int len = ba.length; + byte[] ret = new byte[len]; + System.arraycopy(ba, 0, ret, 0, len); + return ret; + } + + /* + * Check that the stored memory count for all INs on the inlist + * matches their computed count. The environment mem usage check + * may be run with assertions or not. + * + * In a multithreaded environment (or one with daemons running), + * you can't be sure that the cached size will equal the calculated size. + * + * Nodes, txns, and locks are all counted within the memory budget. + */ + public static long validateNodeMemUsage(EnvironmentImpl envImpl, + boolean assertOnError) + throws DatabaseException { + + TreeMemTally tally = tallyTreeMemUsage(envImpl); + long nodeTallyUsage = tally.treeNodeUsage; + long nodeCacheUsage = envImpl.getMemoryBudget().getTreeMemoryUsage(); + NumberFormat formatter = NumberFormat.getNumberInstance(); + if (assertOnError) { + assert (nodeTallyUsage == nodeCacheUsage) : + "treeNodeTallyUsage=" + formatter.format(nodeTallyUsage) + + " treeNodeCacheUsage=" + formatter.format(nodeCacheUsage); + } else { + if (DEBUG) { + if (nodeCacheUsage != nodeTallyUsage) { + double diff = Math.abs(nodeCacheUsage - nodeTallyUsage); + if ((diff / nodeCacheUsage) > .05) { + System.out.println("treeNodeTallyUsage=" + + formatter.format(nodeTallyUsage) + + " treeNodeCacheUsage=" + + formatter.format(nodeCacheUsage)); + } + } + } + } + + long adminTallyUsage = tally.treeAdminUsage; + long adminCacheUsage = + envImpl.getMemoryBudget().getTreeAdminMemoryUsage(); + if (assertOnError) { + assert (adminTallyUsage == adminCacheUsage) : + "treeAdminTallyUsage=" + formatter.format(adminTallyUsage) + + " treeAdminCacheUsage=" + formatter.format(adminCacheUsage); + } else { + if (DEBUG) { + if (adminCacheUsage != adminTallyUsage) { + double diff = Math.abs(adminCacheUsage - adminTallyUsage); + if ((diff / adminCacheUsage) > .05) { + System.out.println("treeAdminTallyUsage=" + + formatter.format(adminTallyUsage) + + " treeAdminCacheUsage=" + + formatter.format(adminCacheUsage)); + } + } + } + } + + return nodeCacheUsage; + } + + public static long tallyNodeMemUsage(EnvironmentImpl envImpl) + throws DatabaseException { + + return tallyTreeMemUsage(envImpl).treeNodeUsage; + } + + static class TreeMemTally { + final long treeNodeUsage; + final long treeAdminUsage; + + TreeMemTally(long treeNodeUsage, long treeAdminUsage) { + this.treeNodeUsage = treeNodeUsage; + this.treeAdminUsage = treeAdminUsage; + } + } + + private static TreeMemTally tallyTreeMemUsage(EnvironmentImpl envImpl) + throws DatabaseException { + + long treeNodeUsage = 0; + long treeAdminUsage = envImpl.getDbTree().getTreeAdminMemory(); + for (IN in : envImpl.getInMemoryINs()) { + in.latch(); + try { + assert in.verifyMemorySize(): + "in nodeId=" + in.getNodeId() + + ' ' + in.getClass().getName(); + + treeNodeUsage += in.getBudgetedMemorySize(); + + for (int i = 0; i < in.getNEntries(); i += 1) { + Object child = in.getTarget(i); + if (child instanceof LN) { + treeAdminUsage += ((LN) child).getTreeAdminMemory(); + } + } + } finally { + in.releaseLatch(); + } + } + return new TreeMemTally(treeNodeUsage, treeAdminUsage); + } + + /** + * Called by each unit test to enforce isolation level settings specified + * in the isolationLevel system property. Other system properties or + * default settings may be applied in the future. + */ + public static EnvironmentConfig initEnvConfig() { + + EnvironmentConfig config = new EnvironmentConfig(); + + String val = System.getProperty("isolationLevel"); + if (val != null && val.length() > 0) { + if ("serializable".equals(val)) { + config.setTxnSerializableIsolation(true); + } else if ("readCommitted".equals(val)) { + DbInternal.setTxnReadCommitted(config, true); + } else { + throw new IllegalArgumentException + ("Unknown isolationLevel system property value: " + val); + } + } + + val = System.getProperty("offHeapCacheSize"); + if (val != null && val.length() > 0) { + config.setConfigParam(EnvironmentConfig.MAX_OFF_HEAP_MEMORY, val); + } + + return config; + } + + /** + * If a unit test needs to override the isolation level, it should call + * this method after calling initEnvConfig. + */ + public static void clearIsolationLevel(EnvironmentConfig config) { + DbInternal.setTxnReadCommitted(config, false); + config.setTxnSerializableIsolation(false); + } + + /** + * Loads the given resource relative to the given class, and copies it to + * log file zero in the given directory. + */ + public static void loadLog(Class cls, String resourceName, File envHome) + throws IOException { + + loadLog(cls, resourceName, envHome, LOG_FILE_NAME); + } + + /** + * Loads the given resource relative to the given class, and copies it to + * the given log file in the given directory. + */ + public static void loadLog(Class cls, + String resourceName, + File envHome, + String logFileName) + throws IOException { + + File logFile = new File(envHome, logFileName); + InputStream is = cls.getResourceAsStream(resourceName); + OutputStream os = new FileOutputStream(logFile); + byte[] buf = new byte[is.available()]; + int len = is.read(buf); + if (buf.length != len) { + throw new IllegalStateException(); + } + os.write(buf, 0, len); + is.close(); + os.close(); + } + + /** + * Logs the BIN at the cursor provisionally and the parent IN + * non-provisionally. Used to simulate a partial checkpoint or eviction. + */ + public static void logBINAndIN(Environment env, Cursor cursor) + throws DatabaseException { + + logBINAndIN(env, cursor, false /*allowDeltas*/); + } + + public static void logBINAndIN(Environment env, + Cursor cursor, + boolean allowDeltas) + throws DatabaseException { + + BIN bin = getBIN(cursor); + Tree tree = bin.getDatabase().getTree(); + + /* Log the BIN and update its parent entry. */ + bin.latch(); + + SearchResult result = tree.getParentINForChildIN( + bin, false, /*useTargetLevel*/ + true, CacheMode.DEFAULT); + + assert result.parent != null; + assert result.exactParentFound; + IN binParent = result.parent; + + long binLsn = logIN(env, bin, allowDeltas, true, binParent); + + binParent.updateEntry( + result.index, binLsn, + VLSN.NULL_VLSN_SEQUENCE, 0 /*lastLoggedSize*/); + + result.parent.releaseLatch(); + + /* Log the BIN parent and update its parent entry. */ + binParent.latch(); + + if (binParent.isRoot()) { + binParent.releaseLatch(); + result.parent = null; + } else { + result = tree.getParentINForChildIN( + binParent, false, /*useTargetLevel*/ + true, CacheMode.DEFAULT); + } + + IN inParent = null; + if (result.parent != null) { + result.parent.releaseLatch(); + assert result.exactParentFound; + inParent = result.parent; + inParent.latch(); + } + + final long inLsn = logIN(env, binParent, allowDeltas, false, null); + + if (inParent != null) { + inParent.recoverIN( + result.index, binParent, inLsn, 0 /*lastLoggedSize*/); + + inParent.releaseLatch(); + } else { + tree.withRootLatchedExclusive(new WithRootLatched() { + public IN doWork(ChildReference root) { + root.setLsn(inLsn); + return null; + } + }); + } + } + + /** + * Logs the given IN. + */ + public static long logIN(Environment env, + IN in, + boolean allowDeltas, + boolean provisional, + IN parent) + throws DatabaseException { + + in.latch(); + long lsn = in.log( + allowDeltas, provisional, false /*backgroundIO*/, parent); + in.releaseLatch(); + return lsn; + } + + /** + * Returns the parent IN of the given IN. + */ + public static IN getIN(IN in) + throws DatabaseException { + + Tree tree = in.getDatabase().getTree(); + in.latch(); + + SearchResult result = tree.getParentINForChildIN( + in, false, /*useTargetLevel*/ + true, CacheMode.DEFAULT); + + assert result.parent != null; + result.parent.releaseLatch(); + assert result.exactParentFound; + return result.parent; + } + + /** + * Returns the target BIN for the given cursor. + */ + public static BIN getBIN(Cursor cursor) { + CursorImpl impl = DbTestProxy.dbcGetCursorImpl(cursor); + BIN bin = impl.getBIN(); + assert bin != null; + return bin; + } + + /** + * Assert if the tree is not this deep. Use to ensure that data setups + * are as expected. + */ + public static boolean checkTreeDepth(Database db, int desiredDepth) + throws DatabaseException { + + Tree tree = DbInternal.getDbImpl(db).getTree(); + IN rootIN = tree.getRootIN(CacheMode.UNCHANGED); + int level = 0; + if (rootIN != null) { + level = rootIN.getLevel() & IN.LEVEL_MASK; + rootIN.releaseLatch(); + } + + return (desiredDepth == level); + } + + /** + * @return true if long running tests are enabled. + */ + static public boolean runLongTests() { + return SharedTestUtils.runLongTests(); + } + + /** + * Skip over the JE version number at the start of the exception + * message for tests which are looking for a specific message. + */ + public static String skipVersion(Exception e) { + final String header = DatabaseException.getVersionHeader(); + final String msg = e.getMessage(); + if (msg == null || !msg.startsWith(header)) { + return msg; + } + return msg.substring(header.length()); + } + + public static void createEnvHomeWithSubDir(File envHome, + int subDirNumber) { + if (!envHome.exists()) { + throw new IllegalStateException + ("Environment home directory doesn't exist."); + } + + for (int i = 1; i <= subDirNumber; i++) { + String fileName = getSubDirName(i); + File subDir = new File(envHome, fileName); + subDir.mkdir(); + } + } + + public static String getSubDirName(int i) { + if (i < 10) { + return "data00" + i; + } else if (i < 100) { + return "data0" + i; + } else if (i <= 256) { + return "data" + i; + } else { + throw new IllegalArgumentException + ("The number of sub directories is invalid."); + } + } + + public static void removeSubDirs(File envHome) { + if (envHome == null || !envHome.exists()) { + return; + } + + File[] files = envHome.listFiles(); + for (File file : files) { + if (file.isDirectory() && file.getName().startsWith("data")) { + File[] subFiles = file.listFiles(); + for (File subFile : subFiles) { + subFile.delete(); + } + file.delete(); + } + } + } + + /* Read the je.properties and write a new configuration. */ + public static ArrayList readWriteJEProperties(File envHome, + String configure) + throws IOException { + + /* Read the je.properties. */ + File propertyFile = new File(envHome, "je.properties"); + BufferedReader reader = + new BufferedReader(new FileReader(propertyFile)); + ArrayList formerLines = new ArrayList(); + String line = null; + while ((line = reader.readLine()) != null) { + formerLines.add(line); + } + reader.close(); + + /* Write the replicated parameters in the je.properties file. */ + FileWriter writer = new FileWriter(propertyFile, true); + writer.append(configure + "\n"); + writer.flush(); + writer.close(); + + return formerLines; + } + + /* + * Rewrite the je.properties with configurations, it will delete the old + * file and rewrite a new one. + */ + public static void reWriteJEProperties(File envHome, + ArrayList formerLines) + throws IOException { + + File propertyFile = new File(envHome, "je.properties"); + /* Write the je.properties file with the former content. */ + if (propertyFile.exists() && propertyFile.isFile()) { + TestCase.assertTrue(propertyFile.delete()); + } + TestCase.assertTrue(!propertyFile.exists()); + + propertyFile = new File(envHome, "je.properties"); + TestCase.assertTrue(propertyFile.createNewFile()); + + FileWriter writer = new FileWriter(propertyFile, true); + for (String configure : formerLines) { + writer.append(configure + "\n"); + } + writer.flush(); + writer.close(); + } + + /* Serialize an object and read it again. */ + public static Object serializeAndReadObject(File envHome, Object object) + throws Exception { + + File output = new File(envHome, "configure.out"); + ObjectOutputStream out = + new ObjectOutputStream(new FileOutputStream(output)); + out.writeObject(object); + out.close(); + + if (!output.exists()) { + throw new IllegalStateException + ("Can't create the output for serialized object."); + } + + ObjectInputStream in = + new ObjectInputStream(new FileInputStream(output)); + Object newObject = in.readObject(); + in.close(); + + if (!output.delete()) { + throw new IllegalStateException + ("Can't delete the output for serialized object after " + + "testing is done."); + } + + return newObject; + } + + /** + * Dump any exception messages to stderr. + */ + public static class StdErrExceptionListener + implements ExceptionListener { + + public void exceptionThrown(ExceptionEvent event) { + System.err.println(Thread.currentThread() + + " received " + + event); + } + } + + /** + * Calls Closeable.close for each parameter in the order given, if it is + * non-null. + * + * If one or more close methods throws an Exception, all close methods will + * still be called and the first Exception will be rethrown. If an Error + * is thrown by a close method, it will be thrown by this method and no + * further close methods will be called. An IOException may be thrown by a + * close method because is declared by Closeable.close; however, the use of + * RuntimeExceptions is recommended. + */ + public static void closeAll(Closeable... objects) + throws Exception { + + closeAll(null, objects); + } + + /** + * Same as closeAll(Closeable...) but allows passing an initial exception, + * when one may have been thrown earlier during a shutdown procedure. If + * null is passed for the firstEx parameter, calling this method is + * equivalent to calling closeAll(Closeable...). + */ + public static void closeAll(Exception firstEx, Closeable... objects) + throws Exception { + + for (Closeable c : objects) { + if (c == null) { + continue; + } + try { + c.close(); + } catch (Exception e) { + if (firstEx == null) { + firstEx = e; + } + } + } + + if (firstEx != null) { + throw firstEx; + } + } + + /** + * Sets the cache size to the amount needed to hold the specified dataSize + * (with the current cache overhead). + * + * When a test precisely sizes the main cache for a particular data set, + * especially when the data set is small, it is difficult to predict how + * much to add for the cache overheads. For example, the off-heap cache + * adds tree-admin overhead for the LRU lists. This method adjusts the + * cache size to fit a specified data size, given the overheads currently + * present. When an off-heap cache is used, the initial LRU list (enough to + * hold 100k entries) will be pre-allocated to ensure this overhead is + * counted. + */ + public static long adjustCacheSize(final Environment env, + final long dataSize) { + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final MemoryBudget mem = envImpl.getMemoryBudget(); + + if (envImpl.getOffHeapCache().isEnabled()) { + envImpl.getOffHeapCache().preallocateLRUEntries(); + } + + final long overhead = + mem.getLocalCacheUsage() - mem.getTreeMemoryUsage(); + + final long newSize = overhead + dataSize; + + final EnvironmentMutableConfig config = env.getMutableConfig(); + config.setCacheSize(newSize); + env.setMutableConfig(config); + + return newSize; + } + + public static long adjustSharedCacheSize(final Environment[] envs, + final long dataSize) { + Environment oneEnv = null; + long curSize = 0; + long overhead = 0; + + for (final Environment env : envs) { + + if (env == null) { + continue; + } + + final EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + final MemoryBudget mem = envImpl.getMemoryBudget(); + + if (envImpl.getOffHeapCache().isEnabled()) { + envImpl.getOffHeapCache().preallocateLRUEntries(); + } + + if (oneEnv == null) { + oneEnv = env; + curSize = mem.getMaxMemory(); + } + + overhead += + mem.getLocalCacheUsage() - mem.getTreeMemoryUsage(); + } + + EnvironmentFailureException.assertState(oneEnv != null); + + final long newSize = Math.max(curSize, overhead + dataSize); + + final EnvironmentMutableConfig config = oneEnv.getMutableConfig(); + config.setCacheSize(newSize); + oneEnv.setMutableConfig(config); + + return newSize; + } + + /** + * When a test precisely sizes the main cache, and an off-heap cache is + * used, the main cache needs to be a little larger to hold the off-heap + * LRU lists. Note that all unit tests are run with an off-heap cache. + */ + public static void adjustCacheSizeForOffHeapCache(Environment env) { + + final EnvironmentMutableConfig config = env.getMutableConfig(); + + if (config.getOffHeapCacheSize() == 0) { + return; + } + + config.setCacheSize( + config.getCacheSize() + OffHeapCache.MIN_MAIN_CACHE_OVERHEAD); + + env.setMutableConfig(config); + + DbInternal.getNonNullEnvImpl(env). + getOffHeapCache().preallocateLRUEntries(); + } + + /** + * Returns the number of LNs loaded into the main cache, either from the + * file system or from the off-heap cache. + */ + public static long getNLNsLoaded(EnvironmentStats stats) { + return stats.getNLNsFetchMiss() + stats.getOffHeapLNsLoaded(); + } + + /** + * Returns the number of BINs loaded into the main cache, either from the + * file system or from the off-heap cache. + */ + public static long getNBINsLoaded(EnvironmentStats stats) { + return stats.getNBINsFetchMiss() + stats.getOffHeapBINsLoaded(); + } + + /** + * Uses 'ps' to return the PID of the running program with the given + * className in its command line. + * + * The matching of className is simplistic -- if it appears anywhere in the + * program command line, that's considered a match. Using 'jps' rather than + * 'ps' would improve matching, but 'jps' is not available in the IBM JDK. + * For testing purposes, the matching used here should be sufficient. + * + * @return the pid, or -1 if no match is found. + * + * @throws RuntimeException if more than one match is found (two programs + * are running with the same className). + */ + public static int getPid(final String className) throws IOException { + + final ProcessBuilder pBuilder = new ProcessBuilder( + "ps", "-e", "o", "pid=,cmd="); + + final Process process = pBuilder.start(); + + final BufferedReader reader = new BufferedReader( + new InputStreamReader(process.getInputStream())); + + int pid = -1; + + for (String line = reader.readLine(); line != null; + line = reader.readLine()) { + + if (line.contains(className)) { + + if (pid >= 0) { + throw new RuntimeException( + "Running more than once: " + className); + } + + /* The PID is the first token on the line. */ + String pidString = line.trim(); + pidString = pidString.substring(0, pidString.indexOf(' ')); + pid = Integer.parseInt(pidString); + } + } + + return pid; + } +} diff --git a/test/com/sleepycat/je/util/VerifyLogTest.java b/test/com/sleepycat/je/util/VerifyLogTest.java new file mode 100644 index 0000000..61d180e --- /dev/null +++ b/test/com/sleepycat/je/util/VerifyLogTest.java @@ -0,0 +1,385 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util; + +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.util.test.SharedTestUtils; + +public class VerifyLogTest extends DualTestCase { + + private static final String SAVE_DIR = "save"; + private static final int BUF_SIZE = 2048; + private static final int NUM_RECS = 5000; + + private final File envHome; + private final File tempDir; + private Environment env; + + public VerifyLogTest() { + envHome = SharedTestUtils.getTestDir(); + tempDir = new File(envHome, SAVE_DIR); + } + + @Override + @After + public void tearDown() + throws Exception { + + super.tearDown(); + try { + closeEnv(); + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + + super.tearDown(); + } + + @Test + public void testVerify() + throws Throwable { + + openEnv(); + writeData(); + + /* Use DbBackup to get a list of the log files. */ + final DbBackup backup = new DbBackup(env); + backup.startBackup(); + final String[] fileNames = backup.getLogFilesInBackupSet(); + backup.endBackup(); + + /* Verify files, copy while verifying, diff and verify the copy. */ + verifyFiles(fileNames, env.getHome()); + clearTempDir(); + copyFiles(env, fileNames, tempDir, BUF_SIZE); + diffFiles(fileNames, tempDir, false /*allowShorterLastFile*/); + verifyFiles(fileNames, tempDir); + + /* + * Use NIO channels to copy while verifying, diff and verify the copy + */ + clearTempDir(); + copyFilesNIO(env, fileNames, tempDir, BUF_SIZE); + diffFiles(fileNames, tempDir, false /*allowShorterLastFile*/); + verifyFiles(fileNames, tempDir); + + /* + * Modify a byte at a time and expect a verification exception. To + * prevent this from running for a very long time, use the first file + * only and limit the maximum file verifications to 5000. + */ + final String fileName = fileNames[0]; + final File file = new File(tempDir, fileName); + final long fileLen = file.length(); + final long maxIter = Math.min(5000, fileLen); + final RandomAccessFile raf = new RandomAccessFile(file, "rw"); + for (long offset = 0; offset < maxIter; offset += 1) { + raf.seek(offset); + int val = raf.read(); + raf.seek(offset); + /* Replace byte with bitwise complement. */ + raf.write(~val); + try { + verifyFiles(new String[] {fileName}, tempDir); + fail(String.format("Expected verify of %s to fail, " + + "offset: 0x%X, val: 0x%X", fileName, offset, val)); + } catch (LogVerificationException expected) { + } + /* Repair the damage we did above. */ + raf.seek(offset); + raf.write(val); + } + + /* Expect an exception when we append a byte at the end. */ + raf.seek(fileLen); + raf.write(0); + try { + verifyFiles(new String[] {fileName}, tempDir); + fail("Expected verify to fail after append: " + fileName); + } catch (LogVerificationException expected) { + } + /* Expect an exception when we remove the last byte. */ + raf.seek(fileLen - 1); + final int lastByte = raf.read(); + raf.setLength(fileLen - 1); + try { + verifyFiles(new String[] {fileName}, tempDir); + fail("Expected verify to fail after truncate: " + fileName); + } catch (LogVerificationException expected) { + } + /* Repair damage. */ + raf.seek(fileLen - 1); + raf.write(lastByte); + + /* Ensure that the repairs above were successful. */ + verifyFiles(fileNames, tempDir); + + closeEnv(); + raf.close(); + } + + private void openEnv() + throws DatabaseException { + + EnvironmentConfig envConfig = TestUtils.initEnvConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setTxnNoSync(true); + /* For simplicity, disable log file deletion. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + env = create(envHome, envConfig); + } + + private void closeEnv() + throws DatabaseException { + + if (env != null) { + try { + close(env); + } finally { + env = null; + } + } + } + + private void clearTempDir() { + deleteTempDir(); + assertTrue(tempDir.mkdir()); + } + + private void deleteTempDir() { + if (tempDir.exists()) { + final String[] fileNames = tempDir.list(); + if (fileNames != null) { + for (final String fileName : fileNames) { + final File f = new File(tempDir, fileName); + assertTrue("Can't delete " + f, f.delete()); + } + } + assertTrue(tempDir.delete()); + } + } + + /** + * Add records of sizes varying from small to large, increasing the size + * one byte at a time for each record. This creates log entries with + * varied sizes and buffer boundaries. + */ + private void writeData() + throws DatabaseException { + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + final Database db = env.openDatabase(null, "foo", dbConfig); + + final DatabaseEntry key = new DatabaseEntry(); + final DatabaseEntry data = new DatabaseEntry(); + + for (int i = 0; i < NUM_RECS; i += 1) { + IntegerBinding.intToEntry(i, key); + data.setData(new byte[i]); + assertSame(OperationStatus.SUCCESS, db.put(null, key, data)); + } + + db.close(); + } + + /** + * For every given file name in dir1, compare it to the same file name in + * the environment home directory. + * + * @param allowShorterLastFile is true if the last file in the array in + * dir1 may be shorter than the corresponding file in the environment home + * directory, because writing is still active in the environment. + */ + private void diffFiles(final String[] fileNames, + final File dir1, + final boolean allowShorterLastFile) + throws IOException, DatabaseException { + + final File dir2 = env.getHome(); + + for (final String fileName : fileNames) { + final File file1 = new File(dir1, fileName); + final FileInputStream is1 = new FileInputStream(file1); + try { + final File file2 = new File(dir2, fileName); + final FileInputStream is2 = new FileInputStream(file2); + try { + final byte[] buf1 = new byte[4096]; + final byte[] buf2 = new byte[4096]; + + long offset = 0; + while (true) { + final int len1 = is1.read(buf1); + final int len2 = is2.read(buf2); + if (len1 < 0 && len2 < 0) { + break; + } + if (len1 != len2) { + fail(String.format("Length mismatch file: %s " + + "offset: 0x%X len1: 0x%X len2: 0x%X", + fileName, offset, len1, len2)); + } + for (int i = 0; i < len1; i += 1) { + if (buf1[i] != buf2[i]) { + fail(String.format("Data mismatch file: %s " + + "offset: 0x%X byte1: 0x%X byte2: 0x%X", + fileName, offset + i, buf1[i], buf2[i])); + } + } + offset += len1; + } + } finally { + is2.close(); + } + } finally { + is1.close(); + } + } + } + + /** + * Copy specified log files to a given directory. This method is also + * present in the class javadoc of LogVerificationInputStream. This method + * should be kept in sync with the documented method in order to test it. + */ + void copyFiles(final Environment env, + final String[] fileNames, + final File destDir, + final int bufSize) + throws IOException, DatabaseException { + + final File srcDir = env.getHome(); + + for (final String fileName : fileNames) { + + final File destFile = new File(destDir, fileName); + final FileOutputStream fos = new FileOutputStream(destFile); + + final File srcFile = new File(srcDir, fileName); + final FileInputStream fis = new FileInputStream(srcFile); + final LogVerificationInputStream vis = + new LogVerificationInputStream(env, fis, fileName); + + final byte[] buf = new byte[bufSize]; + + try { + while (true) { + final int len = vis.read(buf); + if (len < 0) { + break; + } + fos.write(buf, 0, len); + } + } finally { + fos.close(); + vis.close(); + } + } + } + + /** + * Copy specified log files to a given directory using NIO channels. This + * method is also present in the class javadoc of + * LogVerificationReadableByteChannel, and should be kept in sync with the + * documented method in order to test it. + */ + void copyFilesNIO(final Environment env, + final String[] fileNames, + final File destDir, + final int bufSize) + throws IOException, DatabaseException { + + final File srcDir = env.getHome(); + + for (final String fileName : fileNames) { + + final File destFile = new File(destDir, fileName); + final FileOutputStream fos = new FileOutputStream(destFile); + final FileChannel foc = fos.getChannel(); + + final File srcFile = new File(srcDir, fileName); + final FileInputStream fis = new FileInputStream(srcFile); + final FileChannel fic = fis.getChannel(); + final LogVerificationReadableByteChannel vic = + new LogVerificationReadableByteChannel(env, fic, fileName); + + final ByteBuffer buf = ByteBuffer.allocateDirect(bufSize); + + try { + while (true) { + final int len = vic.read(buf); + if (len < 0) { + break; + } + buf.flip(); + foc.write(buf); + buf.clear(); + } + } finally { + fos.close(); + vic.close(); + } + } + } + + /** + * Verifies the given files without copying them. + */ + private void verifyFiles(final String[] fileNames, final File dir) + throws IOException { + + for (final String fileName : fileNames) { + final File file = new File(dir, fileName); + final FileInputStream fis = new FileInputStream(file); + final LogVerificationInputStream vis = + new LogVerificationInputStream(env, fis, fileName); + final byte[] buf = new byte[BUF_SIZE]; + try { + while (true) { + final int len = vis.read(buf); + if (len < 0) { + break; + } + } + } finally { + vis.close(); + } + } + } +} diff --git a/test/com/sleepycat/je/util/dbfilterstats/DbFilterStatsTest.java b/test/com/sleepycat/je/util/dbfilterstats/DbFilterStatsTest.java new file mode 100644 index 0000000..ae3ed93 --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/DbFilterStatsTest.java @@ -0,0 +1,372 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.util.dbfilterstats; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileWriter; +import java.io.PrintStream; +import java.io.StringReader; +import java.util.Map.Entry; +import java.util.SortedMap; + +import org.junit.Test; + +import com.sleepycat.je.statcap.StatFile; +import com.sleepycat.je.util.DbFilterStats; +import com.sleepycat.je.util.Splitter; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * DbFilterStats tests. + * + */ +public class DbFilterStatsTest extends TestBase { + + private static final char DELIMITER = ','; + + /* The directory where stats files are located. */ + private static final String testDir = + "test/com/sleepycat/je/util/dbfilterstats"; + /* projection file with all columns */ + private static final String allCols = + testDir + File.separator + "allcols.csv"; + /* projection file with some column prefixes */ + private static final String someCols = + testDir + File.separator + "somecols.csv"; + private static final String statfile = + testDir + File.separator + "je.stat.csv"; + private static final String statfile0 = + testDir + File.separator + "je.stat.0.csv"; + private static final String envstatfile = + testDir + File.separator + "je.config.csv"; + + + private final Splitter splitter = new Splitter(DELIMITER); + + /** + * Test that uses projection file to project all stats. + */ + @Test + public void testProjectAll() throws Exception { + String row; + int rowcount = 0; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-f", + allCols, + statfile0 + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + String header = br.readLine(); + rowcount++; + assertTrue("header is null", header != null); + String[] hcols = parseRow(header); + assertEquals("number of columns not expected", 169, hcols.length); + + while((row = br.readLine()) != null) { + String cols[] = parseRow(row); + assertEquals( + "number of columns not expected", 169, cols.length); + rowcount++; + } + assertEquals("number of rows not expected", 7, rowcount); + SortedMap fileMap = StatFile.sumItUp(new File(statfile0)); + SortedMap filterMap = + StatFile.sumItUp(new BufferedReader(new StringReader(filOut))); + for (Entry e : fileMap.entrySet()) { + Long filterVal = filterMap.get(e.getKey()); + assertEquals("values not equal ", filterVal, e.getValue()); + } + } + + /** + * Test that uses projection from command line. + */ + @Test + public void testProjectCmdLine() throws Exception { + String row; + int rowcount = 0; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-p", + "time, Environment:environmentCreationTime , Op, Node Compression", + statfile0 + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + String header = br.readLine(); + rowcount++; + assertTrue("header is null", header != null); + String[] hcols = parseRow(header); + assertEquals("number of columns not expected", 43, hcols.length); + assertEquals("first column not as expected", "time", hcols[0]); + assertEquals("second column not as expected", + "Environment:environmentCreationTime", + hcols[1]); + while((row = br.readLine()) != null) { + String cols[] = parseRow(row); + assertEquals( + "number of columns not expected", 43, cols.length); + rowcount++; + } + assertEquals("number of rows not expected", 7, rowcount); + SortedMap fileMap = StatFile.sumItUp(new File(statfile0)); + SortedMap filterMap = + StatFile.sumItUp(new BufferedReader(new StringReader(filOut))); + for (Entry e : filterMap.entrySet()) { + Long fileVal = fileMap.get(e.getKey()); + assertEquals("values not equal ", fileVal, e.getValue()); + } + } + + /** + * Test that uses projection from command line and projection file + */ + @Test + public void testFileAndCmdLine() throws Exception { + String row; + int rowcount = 0; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-p", + "time, Environment:environmentCreationTime", + "-f", + someCols, + statfile0 + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + String header = br.readLine(); + rowcount++; + assertTrue("header is null", header != null); + String[] hcols = parseRow(header); + assertEquals("number of columns not expected", 43, hcols.length); + assertEquals("first column not as expected", "time", hcols[0]); + assertEquals("second column not as expected", + "Environment:environmentCreationTime", + hcols[1]); + while((row = br.readLine()) != null) { + String cols[] = parseRow(row); + assertEquals( + "number of columns not expected", 43, cols.length); + rowcount++; + } + assertEquals("number of rows not expected", 7, rowcount); + SortedMap fileMap = StatFile.sumItUp(new File(statfile0)); + SortedMap filterMap = + StatFile.sumItUp(new BufferedReader(new StringReader(filOut))); + for (Entry e : filterMap.entrySet()) { + Long fileVal = fileMap.get(e.getKey()); + assertEquals("values not equal ", fileVal, e.getValue()); + } + } + + /** + * Test that uses projection file to project all stats. + */ + @Test + public void testProjectAllMultiInputFiles() throws Exception { + String row; + int rowcount = 0; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-f", + allCols, + statfile0, + statfile + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + String header = br.readLine(); + rowcount++; + assertTrue("header is null", header != null); + String[] hcols = parseRow(header); + assertEquals("number of columns not expected", 169, hcols.length); + + while((row = br.readLine()) != null) { + String cols[] = parseRow(row); + assertEquals( + "number of columns not expected", 169, cols.length); + rowcount++; + } + assertEquals("number of rows not expected", 9, rowcount); + SortedMap fileMap = + StatFile.sumItUp(new File(testDir), "je.stat."); + SortedMap filterMap = + StatFile.sumItUp(new BufferedReader(new StringReader(filOut))); + for (Entry e : fileMap.entrySet()) { + Long filterVal = filterMap.get(e.getKey()); + assertEquals("values not equal for " + e.getKey() + " ", + filterVal, e.getValue()); + } + } + + /** + * Test that uses projection from command line on environment + * statistics file. + */ + @Test + public void testEnvConfigProjectCmdLine() throws Exception { + String row; + int rowcount = 0; + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-p", + "time, java", + envstatfile + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + String header = br.readLine(); + rowcount++; + assertTrue("header is null", header != null); + String[] hcols = parseRow(header); + assertEquals("number of columns not expected", 6, hcols.length); + assertEquals("first column not as expected", "time", hcols[0]); + assertTrue("second column not as expected", + hcols[1].startsWith("java:")); + while((row = br.readLine()) != null) { + String cols[] = parseRow(row); + assertEquals( + "number of columns not expected", 6, cols.length); + rowcount++; + } + assertEquals("number of rows not expected", 2, rowcount); + } + + + /** + * Test that creates file with escapes and quoted strings and + * projects all the data. + */ + @Test + public void testQuotesEscapes() throws Exception { + final String TESTFILE = "test.csv"; + final String data[] = + {"col1,col2,col3,col4,col5", + " \"double quotestring\", escape\\,delimiter, ,,"}; + File envHome = SharedTestUtils.getTestDir(); + File tmpfile = + new File(envHome, TESTFILE); + if (tmpfile.exists()) { + tmpfile.delete(); + } + + BufferedWriter out = + new BufferedWriter(new FileWriter(tmpfile, true)); + for (int i = 0; i < data.length; i++) { + out.write(data[i]); + out.newLine(); + } + out.flush(); + out.close(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + String[] args = new String[] { + "-p", + data[0], + tmpfile.getAbsolutePath() + }; + + PrintStream original = System.out; + try { + System.setOut(new PrintStream(baos)); + if (!(new DbFilterStats()).execute(args)) { + fail("command did not return expected value"); + } + baos.flush(); + } finally { + System.setOut(original); + } + String filOut = baos.toString(); + BufferedReader br = new BufferedReader(new StringReader(filOut)); + int rowcount = 0; + String row; + while((row = br.readLine()) != null) { + assertEquals("row not expected", data[rowcount], row); + rowcount++; + } + } + + private String[] parseRow(String row) { + String [] vals = splitter.tokenize(row); + for (int i = 0; i < vals.length; i++) { + vals[i] = vals[i].trim(); + } + return vals; + } +} diff --git a/test/com/sleepycat/je/util/dbfilterstats/allcols.csv b/test/com/sleepycat/je/util/dbfilterstats/allcols.csv new file mode 100644 index 0000000..7fd8cc7 --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/allcols.csv @@ -0,0 +1 @@ +time,Cache:adminBytes,Cache:avgBatchCACHEMODE,Cache:avgBatchCRITICAL,Cache:avgBatchDAEMON,Cache:avgBatchEVICTORTHREAD,Cache:avgBatchMANUAL,Cache:cacheTotalBytes,Cache:dataAdminBytes,Cache:dataBytes,Cache:lockBytes,Cache:nBINsEvictedCACHEMODE,Cache:nBINsEvictedCRITICAL,Cache:nBINsEvictedDAEMON,Cache:nBINsEvictedEVICTORTHREAD,Cache:nBINsEvictedMANUAL,Cache:nBINsFetch,Cache:nBINsFetchMiss,Cache:nBINsStripped,Cache:nBatchesCACHEMODE,Cache:nBatchesCRITICAL,Cache:nBatchesDAEMON,Cache:nBatchesEVICTORTHREAD,Cache:nBatchesMANUAL,Cache:nCachedBINs,Cache:nCachedUpperINs,Cache:nEvictPasses,Cache:nINCompactKey,Cache:nINNoTarget,Cache:nINSparseTarget,Cache:nLNsFetch,Cache:nLNsFetchMiss,Cache:nNodesEvicted,Cache:nNodesScanned,Cache:nNodesSelected,Cache:nRootNodesEvicted,Cache:nSharedCacheEnvironments,Cache:nThreadUnavailable,Cache:nUpperINsEvictedCACHEMODE,Cache:nUpperINsEvictedCRITICAL,Cache:nUpperINsEvictedDAEMON,Cache:nUpperINsEvictedEVICTORTHREAD,Cache:nUpperINsEvictedMANUAL,Cache:nUpperINsFetch,Cache:nUpperINsFetchMiss,Cache:requiredEvictBytes,Cache:sharedCacheTotalBytes,Checkpoints:lastCheckpointEnd,Checkpoints:lastCheckpointId,Checkpoints:lastCheckpointStart,Checkpoints:nCheckpoints,Checkpoints:nDeltaINFlush,Checkpoints:nFullBINFlush,Checkpoints:nFullINFlush,Cleaning:cleanerBackLog,Cleaning:correctedAvgLNSize,Cleaning:fileDeletionBacklog,Cleaning:nBINDeltasCleaned,Cleaning:nBINDeltasDead,Cleaning:nBINDeltasMigrated,Cleaning:nBINDeltasObsolete,Cleaning:nCleanerDeletions,Cleaning:nCleanerEntriesRead,Cleaning:nCleanerProbeRuns,Cleaning:nCleanerRuns,Cleaning:nClusterLNsProcessed,Cleaning:nINsCleaned,Cleaning:nINsDead,Cleaning:nINsMigrated,Cleaning:nINsObsolete,Cleaning:nLNQueueHits,Cleaning:nLNsCleaned,Cleaning:nLNsDead,Cleaning:nLNsLocked,Cleaning:nLNsMarked,Cleaning:nLNsMigrated,Cleaning:nLNsObsolete,Cleaning:nMarkLNsProcessed,Cleaning:nPendingLNsLocked,Cleaning:nPendingLNsProcessed,Cleaning:nRepeatIteratorReads,Cleaning:nToBeCleanedLNsProcessed,Cleaning:pendingLNQueueSize,Cleaning:totalLogSize,Environment:btreeRelatchesRequired,Environment:environmentCreationTime,I/O:bufferBytes,I/O:endOfLog,I/O:nBytesReadFromWriteQueue,I/O:nBytesWrittenFromWriteQueue,I/O:nCacheMiss,I/O:nFSyncRequests,I/O:nFSyncTime,I/O:nFSyncTimeouts,I/O:nFSyncs,I/O:nFileOpens,I/O:nLogBuffers,I/O:nLogFSyncs,I/O:nNotResident,I/O:nOpenFiles,I/O:nRandomReadBytes,I/O:nRandomReads,I/O:nRandomWriteBytes,I/O:nRandomWrites,I/O:nReadsFromWriteQueue,I/O:nRepeatFaultReads,I/O:nSequentialReadBytes,I/O:nSequentialReads,I/O:nSequentialWriteBytes,I/O:nSequentialWrites,I/O:nTempBufferWrites,I/O:nWriteQueueOverflow,I/O:nWriteQueueOverflowFailures,I/O:nWritesFromWriteQueue,Jvm:PS MarkSweep.count,Jvm:PS MarkSweep.time,Jvm:PS Scavenge.count,Jvm:PS Scavenge.time,Jvm:heap,Jvm:loadAverage,Locks:nLatchAcquireNoWaitUnsuccessful,Locks:nLatchAcquiresNoWaitSuccessful,Locks:nLatchAcquiresNoWaiters,Locks:nLatchAcquiresSelfOwned,Locks:nLatchAcquiresWithContention,Locks:nLatchReleases,Locks:nRequests,Locks:nWaits,Node Compression:cursorsBins,Node Compression:dbClosedBins,Node Compression:inCompQueueSize,Node Compression:nonEmptyBins,Node Compression:processedBins,Node Compression:splitBins,Op:cursorDelete,Op:cursorGetCurrent,Op:cursorGetFirst,Op:cursorGetLast,Op:cursorGetNext,Op:cursorGetNextDup,Op:cursorGetNextNoDup,Op:cursorGetPrev,Op:cursorGetPrevDup,Op:cursorGetPrevNoDup,Op:cursorPut,Op:cursorPutCurrent,Op:cursorPutNoDupData,Op:cursorPutNoOverwrite,Op:dbDelete,Op:dbGet,Op:dbGetSearchBoth,Op:dbPut,Op:dbPutNoDupData,Op:dbPutNoOverWrite,Op:dbRemoveSequence,Op:dosCursorGetNext,Op:secondaryCursorDelete,Op:secondaryCursorGetCurrent,Op:secondaryCursorGetFirst,Op:secondaryCursorGetLast,Op:secondaryCursorGetNext,Op:secondaryCursorGetNextDup,Op:secondaryCursorGetNextNoDup,Op:secondaryCursorGetPrev,Op:secondaryCursorGetPrevDup,Op:secondaryCursorGetPrevNoDup,Op:secondaryDbDelete,Op:secondaryDbGet,Op:secondaryDbGetSearchBoth diff --git a/test/com/sleepycat/je/util/dbfilterstats/je.config.csv b/test/com/sleepycat/je/util/dbfilterstats/je.config.csv new file mode 100644 index 0000000..9f3cbb0 --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/je.config.csv @@ -0,0 +1,2 @@ +time,envcfg:com.sleepycat.je.util.ConsoleHandler.level,envcfg:com.sleepycat.je.util.FileHandler.level,envcfg:je.adler32.chunkSize,envcfg:je.checkpointer.bytesInterval,envcfg:je.checkpointer.deadlockRetry,envcfg:je.checkpointer.highPriority,envcfg:je.checkpointer.wakeupInterval,envcfg:je.cleaner.adjustUtilization,envcfg:je.cleaner.backgroundProactiveMigration,envcfg:je.cleaner.bytesInterval,envcfg:je.cleaner.calc.initialAdjustments,envcfg:je.cleaner.calc.maxProbeSkipFiles,envcfg:je.cleaner.calc.minProbeSkipFiles,envcfg:je.cleaner.calc.minUncountedLNs,envcfg:je.cleaner.calc.recentLNSizes,envcfg:je.cleaner.cluster,envcfg:je.cleaner.clusterAll,envcfg:je.cleaner.deadlockRetry,envcfg:je.cleaner.detailMaxMemoryPercentage,envcfg:je.cleaner.expunge,envcfg:je.cleaner.fetchObsoleteSize,envcfg:je.cleaner.forceCleanFiles,envcfg:je.cleaner.foregroundProactiveMigration,envcfg:je.cleaner.lazyMigration,envcfg:je.cleaner.lockTimeout,envcfg:je.cleaner.lookAheadCacheSize,envcfg:je.cleaner.maxBatchFiles,envcfg:je.cleaner.minAge,envcfg:je.cleaner.minFileUtilization,envcfg:je.cleaner.minFilesToDelete,envcfg:je.cleaner.minUtilization,envcfg:je.cleaner.readSize,envcfg:je.cleaner.restartRetries,envcfg:je.cleaner.retries,envcfg:je.cleaner.rmwFix,envcfg:je.cleaner.threads,envcfg:je.cleaner.trackDetail,envcfg:je.cleaner.upgradeToLogVersion,envcfg:je.compressor.deadlockRetry,envcfg:je.compressor.lockTimeout,envcfg:je.compressor.wakeupInterval,envcfg:je.deferredWrite.temp,envcfg:je.env.backgroundReadLimit,envcfg:je.env.backgroundSleepInterval,envcfg:je.env.backgroundWriteLimit,envcfg:je.env.checkLeaks,envcfg:je.env.comparatorsRequired,envcfg:je.env.dbCacheClearCount,envcfg:je.env.dbEviction,envcfg:je.env.diskOrderedScanLockTimeout,envcfg:je.env.dupConvertPreloadAll,envcfg:je.env.fairLatches,envcfg:je.env.forcedYield,envcfg:je.env.isLocking,envcfg:je.env.isReadOnly,envcfg:je.env.isTransactional,envcfg:je.env.latchTimeout,envcfg:je.env.logTrace,envcfg:je.env.recovery,envcfg:je.env.recoveryForceCheckpoint,envcfg:je.env.recoveryForceNewFile,envcfg:je.env.runCheckpointer,envcfg:je.env.runCleaner,envcfg:je.env.runEvictor,envcfg:je.env.runINCompressor,envcfg:je.env.sharedLatches,envcfg:je.env.startupThreshold,envcfg:je.env.terminateTimeout,envcfg:je.evictor.allowBinDeltas,envcfg:je.evictor.coreThreads,envcfg:je.evictor.criticalPercentage,envcfg:je.evictor.deadlockRetry,envcfg:je.evictor.evictBytes,envcfg:je.evictor.evictionBatchPercentage,envcfg:je.evictor.forcedYield,envcfg:je.evictor.keepAlive,envcfg:je.evictor.lruOnly,envcfg:je.evictor.maxThreads,envcfg:je.evictor.nodeScanPercentage,envcfg:je.evictor.nodesPerScan,envcfg:je.evictor.useMemoryFloor,envcfg:je.evictor.wakeupInterval,envcfg:je.haltOnCommitAfterChecksumException,envcfg:je.lock.nLockTables,envcfg:je.lock.oldLockExceptions,envcfg:je.lock.timeout,envcfg:je.log.bufferSize,envcfg:je.log.checksumRead,envcfg:je.log.chunkedNIO,envcfg:je.log.directNIO,envcfg:je.log.faultReadSize,envcfg:je.log.fileCacheSize,envcfg:je.log.fileMax,envcfg:je.log.fsyncTimeout,envcfg:je.log.groupCommitInterval,envcfg:je.log.groupCommitThreshold,envcfg:je.log.iteratorMaxSize,envcfg:je.log.iteratorReadSize,envcfg:je.log.memOnly,envcfg:je.log.nDataDirectories,envcfg:je.log.numBuffers,envcfg:je.log.totalBufferBytes,envcfg:je.log.useNIO,envcfg:je.log.useODSYNC,envcfg:je.log.useWriteQueue,envcfg:je.log.verifyChecksums,envcfg:je.log.writeQueueSize,envcfg:je.maxMemory,envcfg:je.maxMemoryPercent,envcfg:je.nodeDupTreeMaxEntries,envcfg:je.nodeMaxEntries,envcfg:je.sharedCache,envcfg:je.stats.collect,envcfg:je.stats.collect.interval,envcfg:je.stats.file.directory,envcfg:je.stats.file.row.count,envcfg:je.stats.max.files,envcfg:je.tree.binDelta,envcfg:je.tree.compactMaxKeyLength,envcfg:je.tree.maxDelta,envcfg:je.tree.minMemory,envcfg:je.txn.deadlockStackTrace,envcfg:je.txn.dumpLocks,envcfg:je.txn.durability,envcfg:je.txn.serializableIsolation,envcfg:je.txn.timeout,java:args,java:maxMemory,java:minMemory,java:vendor,java:version,je:version,mc:arch,mc:processors,os:name,os:version +2013-12-10 18:32:29.106 UTC,"OFF","INFO","0","20000000","3","false","0","true","false","0","5","20","5","1000","10","false","false","3","2","true","false","","false","false","500 ms","8192","0","2","5","5","50","0","5","10","true","1","true","0","3","500 ms","5 s","false","0","1 ms","0","true","false","100","true","10 seconds","true","false","false","true","false","true","5 min","true","true","false","false","true","true","true","true","true","5 min","10 s","true","1","0","3","524288","10","false","10 min","true","10","10","10","95","5 s","false","29","false","500 ms","1048576","true","0","false","2048","100","100000000","500 ms","1000","8","16777216","8192","false","0","3","0","false","false","true","false","1048576","0","60","128","128","false","true","1 min","","1440","10","25","16","10","512000","false","false","sync,sync,all","false","0"," -XX:+UseConcMarkSweepGC -XX:+UseAdaptiveGCBoundary -XX:+DisableExplicitGC","129957888","0","Sun Microsystems Inc.","1.6.0_35","6.0.3","amd64","4","Windows 7","6.1" diff --git a/test/com/sleepycat/je/util/dbfilterstats/je.stat.0.csv b/test/com/sleepycat/je/util/dbfilterstats/je.stat.0.csv new file mode 100644 index 0000000..bedd718 --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/je.stat.0.csv @@ -0,0 +1,7 @@ +time,Cache:adminBytes,Cache:avgBatchCACHEMODE,Cache:avgBatchCRITICAL,Cache:avgBatchDAEMON,Cache:avgBatchEVICTORTHREAD,Cache:avgBatchMANUAL,Cache:cacheTotalBytes,Cache:dataAdminBytes,Cache:dataBytes,Cache:lockBytes,Cache:nBINsEvictedCACHEMODE,Cache:nBINsEvictedCRITICAL,Cache:nBINsEvictedDAEMON,Cache:nBINsEvictedEVICTORTHREAD,Cache:nBINsEvictedMANUAL,Cache:nBINsFetch,Cache:nBINsFetchMiss,Cache:nBINsStripped,Cache:nBatchesCACHEMODE,Cache:nBatchesCRITICAL,Cache:nBatchesDAEMON,Cache:nBatchesEVICTORTHREAD,Cache:nBatchesMANUAL,Cache:nCachedBINs,Cache:nCachedUpperINs,Cache:nEvictPasses,Cache:nINCompactKey,Cache:nINNoTarget,Cache:nINSparseTarget,Cache:nLNsFetch,Cache:nLNsFetchMiss,Cache:nNodesEvicted,Cache:nNodesScanned,Cache:nNodesSelected,Cache:nRootNodesEvicted,Cache:nSharedCacheEnvironments,Cache:nThreadUnavailable,Cache:nUpperINsEvictedCACHEMODE,Cache:nUpperINsEvictedCRITICAL,Cache:nUpperINsEvictedDAEMON,Cache:nUpperINsEvictedEVICTORTHREAD,Cache:nUpperINsEvictedMANUAL,Cache:nUpperINsFetch,Cache:nUpperINsFetchMiss,Cache:requiredEvictBytes,Cache:sharedCacheTotalBytes,Checkpoints:lastCheckpointEnd,Checkpoints:lastCheckpointId,Checkpoints:lastCheckpointStart,Checkpoints:nCheckpoints,Checkpoints:nDeltaINFlush,Checkpoints:nFullBINFlush,Checkpoints:nFullINFlush,Cleaning:cleanerBackLog,Cleaning:correctedAvgLNSize,Cleaning:fileDeletionBacklog,Cleaning:nBINDeltasCleaned,Cleaning:nBINDeltasDead,Cleaning:nBINDeltasMigrated,Cleaning:nBINDeltasObsolete,Cleaning:nCleanerDeletions,Cleaning:nCleanerEntriesRead,Cleaning:nCleanerProbeRuns,Cleaning:nCleanerRuns,Cleaning:nClusterLNsProcessed,Cleaning:nINsCleaned,Cleaning:nINsDead,Cleaning:nINsMigrated,Cleaning:nINsObsolete,Cleaning:nLNQueueHits,Cleaning:nLNsCleaned,Cleaning:nLNsDead,Cleaning:nLNsLocked,Cleaning:nLNsMarked,Cleaning:nLNsMigrated,Cleaning:nLNsObsolete,Cleaning:nMarkLNsProcessed,Cleaning:nPendingLNsLocked,Cleaning:nPendingLNsProcessed,Cleaning:nRepeatIteratorReads,Cleaning:nToBeCleanedLNsProcessed,Cleaning:pendingLNQueueSize,Cleaning:totalLogSize,Environment:btreeRelatchesRequired,Environment:environmentCreationTime,I/O:bufferBytes,I/O:endOfLog,I/O:nBytesReadFromWriteQueue,I/O:nBytesWrittenFromWriteQueue,I/O:nCacheMiss,I/O:nFSyncRequests,I/O:nFSyncTime,I/O:nFSyncTimeouts,I/O:nFSyncs,I/O:nFileOpens,I/O:nLogBuffers,I/O:nLogFSyncs,I/O:nNotResident,I/O:nOpenFiles,I/O:nRandomReadBytes,I/O:nRandomReads,I/O:nRandomWriteBytes,I/O:nRandomWrites,I/O:nReadsFromWriteQueue,I/O:nRepeatFaultReads,I/O:nSequentialReadBytes,I/O:nSequentialReads,I/O:nSequentialWriteBytes,I/O:nSequentialWrites,I/O:nTempBufferWrites,I/O:nWriteQueueOverflow,I/O:nWriteQueueOverflowFailures,I/O:nWritesFromWriteQueue,Jvm:PS MarkSweep.count,Jvm:PS MarkSweep.time,Jvm:PS Scavenge.count,Jvm:PS Scavenge.time,Jvm:heap,Jvm:loadAverage,Locks:nLatchAcquireNoWaitUnsuccessful,Locks:nLatchAcquiresNoWaitSuccessful,Locks:nLatchAcquiresNoWaiters,Locks:nLatchAcquiresSelfOwned,Locks:nLatchAcquiresWithContention,Locks:nLatchReleases,Locks:nRequests,Locks:nWaits,Node Compression:cursorsBins,Node Compression:dbClosedBins,Node Compression:inCompQueueSize,Node Compression:nonEmptyBins,Node Compression:processedBins,Node Compression:splitBins,Op:cursorDelete,Op:cursorGetCurrent,Op:cursorGetFirst,Op:cursorGetLast,Op:cursorGetNext,Op:cursorGetNextDup,Op:cursorGetNextNoDup,Op:cursorGetPrev,Op:cursorGetPrevDup,Op:cursorGetPrevNoDup,Op:cursorPut,Op:cursorPutCurrent,Op:cursorPutNoDupData,Op:cursorPutNoOverwrite,Op:dbDelete,Op:dbGet,Op:dbGetSearchBoth,Op:dbPut,Op:dbPutNoDupData,Op:dbPutNoOverWrite,Op:dbRemoveSequence,Op:dosCursorGetNext,Op:secondaryCursorDelete,Op:secondaryCursorGetCurrent,Op:secondaryCursorGetFirst,Op:secondaryCursorGetLast,Op:secondaryCursorGetNext,Op:secondaryCursorGetNextDup,Op:secondaryCursorGetNextNoDup,Op:secondaryCursorGetPrev,Op:secondaryCursorGetPrevDup,Op:secondaryCursorGetPrevNoDup,Op:secondaryDbDelete,Op:secondaryDbGet,Op:secondaryDbGetSearchBoth +2013-03-14T23:34:00Z,161,0,0,0,0,0,3161086,1029,15197,0,0,0,0,0,0,1,0,0,0,0,0,0,0,3,3,0,0,1,5,0,0,0,0,0,0, ,0,0,0,0,0,0,0,0,0,0,943,1,369,1,0,2,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1363304040170,3145728,0,0,0,0,2,32,0,2,2,3,3,0,0,0,0,38,1,0,0,2048,1,954,2,0,0,0,0,0,0,0,0,16229576,-1,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-14T23:34:14Z,20976,0,0,0,0,0,139161284,18492,135994580,0,0,0,0,0,0,1875365,0,0,0,0,0,0,0,2081,28,0,2088,1,8,1579226,7,0,0,0,0, ,0,0,0,0,0,0,1338324,0,0,0,85908388945,10,85899400034,10,6321,4286,4466,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,215186903,0,1363304040170,3145728,90199509183,0,0,36,10,1498,0,10,57,3,31,36,5,34816,17,17355,28,0,0,98304,48,215169392,114181,0,0,0,0,1,73,19,162,772435472,-1,0,0,0,0,0,0,3450173,2739,1,0,647,0,0,3,0,0,0,0,0,0,0,0,0,0,199727,0,0,0,18649,1579016,0,74226,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-14T23:35:30Z,3542,0,0,0,0,0,3189666,18740,40396,0,0,0,0,0,0,66,3,0,0,0,0,0,17,3,3,0,3,1,4,69,29,0,0,0,0, ,0,0,0,0,0,0,0,0,0,0,90200646769,12,90199509272,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1363304130285,3145728,0,0,0,38,0,0,0,0,9,3,0,38,0,71680,23,0,0,0,1,13432072,1661,0,0,0,0,0,0,0,0,0,0,22159264,-1,0,0,0,0,0,0,50,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-14T23:35:46Z,56495,0,0,0,0,0,139794423,22584,136592200,0,0,0,0,0,0,1894453,2078,0,0,0,0,0,17,2197,28,0,2217,1,9,1596171,219,0,0,0,0, ,0,0,0,0,0,0,1880619,20,0,0,180391290608,10,176099010742,10,14182,7629,7882,0,0,0,0,0,0,6321,19,302354,0,21,0,0,0,0,10559,17,3537,2923,10,0,614,281902,0,48,58,0,0,0,242587404,2076,1363304130285,3145728,184686195744,0,0,4153,10,1935,0,10,255,3,32,4155,18,75185400,12055,22733214,8600,0,521,144931449,18321,193523038,105560,0,0,0,0,1,73,18,170,498573672,-1,0,0,0,0,0,0,3490663,2808,2,0,130,0,2,0,0,0,0,0,0,0,0,0,0,0,199784,0,0,0,18556,1580076,0,74021,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-14T23:36:23Z,3703,0,0,0,0,0,3194875,21220,45444,0,0,0,0,0,0,162,4,0,0,0,0,0,58,4,3,0,4,1,5,136,43,0,0,0,0, ,0,0,0,0,0,0,0,0,0,0,184687824525,24,184687821625,1,1,2,4,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,1363304182757,3145728,0,0,0,51,1,16,0,1,22,3,2,51,11,108544,47,3162,1,0,5,14443560,1784,0,0,0,0,0,0,0,0,0,0,31249832,-1,0,0,0,0,0,0,143,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-14T23:36:37Z,34191,0,0,0,0,0,139883467,22212,136703548,0,0,0,0,0,0,1894717,2193,0,0,0,0,0,58,2215,28,0,2234,1,9,1597242,226,0,0,0,0, ,0,0,0,0,0,0,1880061,20,0,0,274882969623,10,270587257864,10,14261,7836,8084,0,0,0,0,0,0,15972,20,321661,0,22,0,0,0,0,8717,28,3702,3283,14,0,419,293228,0,8,22,0,0,0,240255120,2064,1363304182757,3145728,279173143015,0,0,4107,10,1653,0,10,262,3,32,4107,15,83168024,12496,24408178,8706,0,759,147432338,18416,191618485,104781,0,0,0,0,2,160,18,185,417050688,-1,0,0,0,0,0,0,3490863,2872,0,0,839,0,2,0,0,0,0,0,0,0,0,0,0,0,199597,0,0,0,18331,1580323,0,74407,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/test/com/sleepycat/je/util/dbfilterstats/je.stat.csv b/test/com/sleepycat/je/util/dbfilterstats/je.stat.csv new file mode 100644 index 0000000..5fae5dd --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/je.stat.csv @@ -0,0 +1,3 @@ +time,Cache:adminBytes,Cache:avgBatchCACHEMODE,Cache:avgBatchCRITICAL,Cache:avgBatchDAEMON,Cache:avgBatchEVICTORTHREAD,Cache:avgBatchMANUAL,Cache:cacheTotalBytes,Cache:dataAdminBytes,Cache:dataBytes,Cache:lockBytes,Cache:nBINsEvictedCACHEMODE,Cache:nBINsEvictedCRITICAL,Cache:nBINsEvictedDAEMON,Cache:nBINsEvictedEVICTORTHREAD,Cache:nBINsEvictedMANUAL,Cache:nBINsFetch,Cache:nBINsFetchMiss,Cache:nBINsStripped,Cache:nBatchesCACHEMODE,Cache:nBatchesCRITICAL,Cache:nBatchesDAEMON,Cache:nBatchesEVICTORTHREAD,Cache:nBatchesMANUAL,Cache:nCachedBINs,Cache:nCachedUpperINs,Cache:nEvictPasses,Cache:nINCompactKey,Cache:nINNoTarget,Cache:nINSparseTarget,Cache:nLNsFetch,Cache:nLNsFetchMiss,Cache:nNodesEvicted,Cache:nNodesScanned,Cache:nNodesSelected,Cache:nRootNodesEvicted,Cache:nSharedCacheEnvironments,Cache:nThreadUnavailable,Cache:nUpperINsEvictedCACHEMODE,Cache:nUpperINsEvictedCRITICAL,Cache:nUpperINsEvictedDAEMON,Cache:nUpperINsEvictedEVICTORTHREAD,Cache:nUpperINsEvictedMANUAL,Cache:nUpperINsFetch,Cache:nUpperINsFetchMiss,Cache:requiredEvictBytes,Cache:sharedCacheTotalBytes,Checkpoints:lastCheckpointEnd,Checkpoints:lastCheckpointId,Checkpoints:lastCheckpointStart,Checkpoints:nCheckpoints,Checkpoints:nDeltaINFlush,Checkpoints:nFullBINFlush,Checkpoints:nFullINFlush,Cleaning:cleanerBackLog,Cleaning:correctedAvgLNSize,Cleaning:fileDeletionBacklog,Cleaning:nBINDeltasCleaned,Cleaning:nBINDeltasDead,Cleaning:nBINDeltasMigrated,Cleaning:nBINDeltasObsolete,Cleaning:nCleanerDeletions,Cleaning:nCleanerEntriesRead,Cleaning:nCleanerProbeRuns,Cleaning:nCleanerRuns,Cleaning:nClusterLNsProcessed,Cleaning:nINsCleaned,Cleaning:nINsDead,Cleaning:nINsMigrated,Cleaning:nINsObsolete,Cleaning:nLNQueueHits,Cleaning:nLNsCleaned,Cleaning:nLNsDead,Cleaning:nLNsLocked,Cleaning:nLNsMarked,Cleaning:nLNsMigrated,Cleaning:nLNsObsolete,Cleaning:nMarkLNsProcessed,Cleaning:nPendingLNsLocked,Cleaning:nPendingLNsProcessed,Cleaning:nRepeatIteratorReads,Cleaning:nToBeCleanedLNsProcessed,Cleaning:pendingLNQueueSize,Cleaning:totalLogSize,Environment:btreeRelatchesRequired,Environment:environmentCreationTime,I/O:bufferBytes,I/O:endOfLog,I/O:nBytesReadFromWriteQueue,I/O:nBytesWrittenFromWriteQueue,I/O:nCacheMiss,I/O:nFSyncRequests,I/O:nFSyncTime,I/O:nFSyncTimeouts,I/O:nFSyncs,I/O:nFileOpens,I/O:nLogBuffers,I/O:nLogFSyncs,I/O:nNotResident,I/O:nOpenFiles,I/O:nRandomReadBytes,I/O:nRandomReads,I/O:nRandomWriteBytes,I/O:nRandomWrites,I/O:nReadsFromWriteQueue,I/O:nRepeatFaultReads,I/O:nSequentialReadBytes,I/O:nSequentialReads,I/O:nSequentialWriteBytes,I/O:nSequentialWrites,I/O:nTempBufferWrites,I/O:nWriteQueueOverflow,I/O:nWriteQueueOverflowFailures,I/O:nWritesFromWriteQueue,Jvm:PS MarkSweep.count,Jvm:PS MarkSweep.time,Jvm:PS Scavenge.count,Jvm:PS Scavenge.time,Jvm:heap,Jvm:loadAverage,Locks:nLatchAcquireNoWaitUnsuccessful,Locks:nLatchAcquiresNoWaitSuccessful,Locks:nLatchAcquiresNoWaiters,Locks:nLatchAcquiresSelfOwned,Locks:nLatchAcquiresWithContention,Locks:nLatchReleases,Locks:nRequests,Locks:nWaits,Node Compression:cursorsBins,Node Compression:dbClosedBins,Node Compression:inCompQueueSize,Node Compression:nonEmptyBins,Node Compression:processedBins,Node Compression:splitBins,Op:cursorDelete,Op:cursorGetCurrent,Op:cursorGetFirst,Op:cursorGetLast,Op:cursorGetNext,Op:cursorGetNextDup,Op:cursorGetNextNoDup,Op:cursorGetPrev,Op:cursorGetPrevDup,Op:cursorGetPrevNoDup,Op:cursorPut,Op:cursorPutCurrent,Op:cursorPutNoDupData,Op:cursorPutNoOverwrite,Op:dbDelete,Op:dbGet,Op:dbGetSearchBoth,Op:dbPut,Op:dbPutNoDupData,Op:dbPutNoOverWrite,Op:dbRemoveSequence,Op:dosCursorGetNext,Op:secondaryCursorDelete,Op:secondaryCursorGetCurrent,Op:secondaryCursorGetFirst,Op:secondaryCursorGetLast,Op:secondaryCursorGetNext,Op:secondaryCursorGetNextDup,Op:secondaryCursorGetNextNoDup,Op:secondaryCursorGetPrev,Op:secondaryCursorGetPrevDup,Op:secondaryCursorGetPrevNoDup,Op:secondaryDbDelete,Op:secondaryDbGet,Op:secondaryDbGetSearchBoth +2013-03-15T16:26:14Z,4207,0,0,0,0,0,3197003,20724,47068,0,0,0,0,0,0,166,4,0,0,0,0,0,60,5,3,0,5,2,5,113,30,0,0,0,0, ,0,0,0,0,0,0,0,0,0,0,279174479451,36,279174474724,1,1,2,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,1363364772639,3145728,0,0,0,39,1,0,0,1,10,3,2,39,0,63488,25,4989,1,0,3,9911470,1231,0,0,0,0,0,0,0,0,0,0,22851704,-1,0,0,0,0,0,0,147,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +2013-03-15T16:26:40Z,77154,0,0,0,0,0,139922450,21592,136699568,0,0,0,0,0,0,1891353,2211,0,0,0,0,0,60,2236,28,0,2256,2,10,1595038,234,0,0,0,0, ,0,0,0,0,0,0,1878232,20,0,0,369370291203,10,365074242162,10,14185,7891,8137,1,0,0,0,0,0,13664,19,293347,0,21,0,0,0,0,7761,18,324,281,0,0,45,270437,0,0,0,0,2,0,247737308,2210,1363364772639,3145728,369374930953,0,0,4298,10,4276,0,10,811,3,31,4298,15,98118944,13438,29573996,10199,0,760,113777449,14320,186551270,104132,0,0,0,0,2,158,27,190,621831136,-1,0,0,0,0,0,0,3494657,2874,0,0,265,0,0,0,0,0,0,0,0,0,0,0,0,0,200279,0,0,0,18488,1579470,0,75003,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/test/com/sleepycat/je/util/dbfilterstats/somecols.csv b/test/com/sleepycat/je/util/dbfilterstats/somecols.csv new file mode 100644 index 0000000..a696a4a --- /dev/null +++ b/test/com/sleepycat/je/util/dbfilterstats/somecols.csv @@ -0,0 +1 @@ +Op, Node Compression diff --git a/test/com/sleepycat/je/utilint/AtomicLongComponentTest.java b/test/com/sleepycat/je/utilint/AtomicLongComponentTest.java new file mode 100644 index 0000000..49505b8 --- /dev/null +++ b/test/com/sleepycat/je/utilint/AtomicLongComponentTest.java @@ -0,0 +1,83 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the AtomicLongComponent class */ +public class AtomicLongComponentTest extends TestBase { + + @Test + public void testConstructor() { + AtomicLongComponent comp = new AtomicLongComponent(); + assertEquals(Long.valueOf(0), comp.get()); + } + + @Test + public void testSet() { + AtomicLongComponent comp = new AtomicLongComponent(); + comp.set(72); + assertEquals(Long.valueOf(72), comp.get()); + } + + @Test + public void testClear() { + AtomicLongComponent comp = new AtomicLongComponent(); + comp.set(37); + comp.clear(); + assertEquals(Long.valueOf(0), comp.get()); + } + + @Test + public void testCopy() { + AtomicLongComponent comp = new AtomicLongComponent(); + comp.set(70); + AtomicLongComponent copy = comp.copy(); + comp.clear(); + assertEquals(Long.valueOf(70), copy.get()); + copy.set(75); + assertEquals(Long.valueOf(0), comp.get()); + } + + @Test + public void testGetFormattedValue() { + AtomicLongComponent comp = new AtomicLongComponent(); + comp.set(123456789); + assertEquals("123,456,789", comp.getFormattedValue(true)); + assertEquals("123456789", comp.getFormattedValue(false)); + } + + @Test + public void testIsNotSet() { + AtomicLongComponent comp = new AtomicLongComponent(); + assertTrue(comp.isNotSet()); + comp.set(3); + assertFalse(comp.isNotSet()); + comp.clear(); + assertTrue(comp.isNotSet()); + } + + @Test + public void testToString() { + AtomicLongComponent comp = new AtomicLongComponent(); + comp.set(987654321); + assertEquals("987654321", comp.toString()); + } +} diff --git a/test/com/sleepycat/je/utilint/AtomicLongMapStatTest.java b/test/com/sleepycat/je/utilint/AtomicLongMapStatTest.java new file mode 100644 index 0000000..7637dcb --- /dev/null +++ b/test/com/sleepycat/je/utilint/AtomicLongMapStatTest.java @@ -0,0 +1,157 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.TreeMap; +import java.util.SortedMap; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.utilint.StatDefinition.StatType; +import com.sleepycat.util.test.TestBase; + +/** Test the AtomicLongMapStat class */ +public class AtomicLongMapStatTest extends TestBase { + + private static final StatGroup statGroup = + new StatGroup("TestGroup", "Test group"); + private static int statDefCount; + + private AtomicLongMapStat map; + private AtomicLongMapStat cumulativeMap; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + map = new AtomicLongMapStat(statGroup, getStatDef()); + cumulativeMap = new AtomicLongMapStat( + statGroup, + new StatDefinition(getStatDefName(), "", StatType.CUMULATIVE)); + } + + private StatDefinition getStatDef() { + return new StatDefinition(getStatDefName(), ""); + } + + private String getStatDefName() { + return "stat" + Integer.toString(++statDefCount); + } + + @Test + public void testCreateStat() { + AtomicLongComponent compA = map.createStat("a"); + compA.set(1); + AtomicLongComponent compB = map.createStat("b"); + compB.set(2); + assertEquals("a=1;b=2", map.get()); + } + + @Test + public void testCopy() { + AtomicLongComponent compA = map.createStat("a"); + compA.set(1); + AtomicLongMapStat copy = map.copy(); + AtomicLongComponent compB = map.createStat("b"); + compB.set(2); + AtomicLongComponent compC = copy.createStat("c"); + compC.set(3); + assertEquals("a=1;b=2", map.get()); + assertEquals("a=1;c=3", copy.get()); + } + + @Test + public void testComputeInterval() { + AtomicLongMapStat copy = map.copy(); + + AtomicLongMapStat interval = map.computeInterval(copy); + assertTrue(interval.isNotSet()); + + AtomicLongComponent compA = map.createStat("a"); + AtomicLongComponent copyCompA = copy.createStat("a"); + + interval = map.computeInterval(copy); + assertTrue(interval.isNotSet()); + + compA.set(3); + copyCompA.set(1); + interval = map.computeInterval(copy); + assertEquals("a=2", interval.get()); + interval = copy.computeInterval(map); + assertEquals("a=-2", interval.get()); + assertEquals("a=3", map.get()); + assertEquals("a=1", copy.get()); + + AtomicLongComponent compB = map.createStat("b"); + compB.set(7); + + interval = map.computeInterval(copy); + assertEquals("a=2;b=7", interval.get()); + interval = copy.computeInterval(map); + + /* Component b is not present in copy, so not included here */ + assertEquals("a=-2", interval.get()); + assertEquals("a=3;b=7", map.get()); + assertEquals("a=1", copy.get()); + + AtomicLongComponent cumulativeCompA = cumulativeMap.createStat("a"); + cumulativeCompA.set(9); + interval = cumulativeMap.computeInterval(map); + assertEquals("a=9", interval.get()); + } + + @Test + public void testNegate() { + map.negate(); + assertEquals("", map.get()); + AtomicLongComponent compA = map.createStat("a"); + compA.set(-33); + map.negate(); + assertEquals("a=33", map.get()); + + compA = cumulativeMap.createStat("a"); + compA.set(-33); + map.negate(); + assertEquals("a=-33", map.get()); + } + + @Test + public void getMap() { + SortedMap valueMap = new TreeMap<>(); + assertEquals(valueMap, map.getMap()); + AtomicLongComponent compA = map.createStat("a"); + assertEquals(valueMap, map.getMap()); + valueMap.put("a", 1L); + compA.set(1); + assertEquals(valueMap, map.getMap()); + } + + @Test + public void testIsNotSet() { + assertTrue(map.isNotSet()); + map.createStat("a"); + AtomicLongComponent compB = map.createStat("b"); + assertTrue(map.isNotSet()); + compB.set(2); + assertFalse(map.isNotSet()); + compB.clear(); + assertTrue(map.isNotSet()); + } +} diff --git a/test/com/sleepycat/je/utilint/BitMapTest.java b/test/com/sleepycat/je/utilint/BitMapTest.java new file mode 100644 index 0000000..e5a13d7 --- /dev/null +++ b/test/com/sleepycat/je/utilint/BitMapTest.java @@ -0,0 +1,77 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +public class BitMapTest extends TestBase { + + @Test + public void testSegments() { + + BitMap bmap = new BitMap(); + int startBit = 15; + int endBit = 62; + assertEquals(0, bmap.cardinality()); + assertEquals(0, bmap.getNumSegments()); + + assertFalse(bmap.get(1001L)); + assertEquals(0, bmap.getNumSegments()); + + /* set a bit in different segments. */ + for (int i = startBit; i <= endBit; i++) { + long index = 1L << i; + index += 17; + bmap.set(index); + } + + assertEquals((endBit - startBit +1), bmap.cardinality()); + assertEquals((endBit - startBit + 1), bmap.getNumSegments()); + + /* should be set. */ + for (int i = startBit; i <= endBit; i++) { + long index = 1L << i; + index += 17; + assertTrue(bmap.get(index)); + } + + /* should be clear. */ + for (int i = startBit; i <= endBit; i++) { + long index = 7 + (1L << i); + assertFalse(bmap.get(index)); + } + + /* checking for non-set bits should not create more segments. */ + assertEquals((endBit - startBit +1), bmap.cardinality()); + assertEquals((endBit - startBit + 1), bmap.getNumSegments()); + } + + @Test + public void testNegative() { + BitMap bMap = new BitMap(); + + try { + bMap.set(-300); + fail("should have thrown exception"); + } catch (IndexOutOfBoundsException expected) { + } + } +} diff --git a/test/com/sleepycat/je/utilint/CronScheduleParserTest.java b/test/com/sleepycat/je/utilint/CronScheduleParserTest.java new file mode 100644 index 0000000..289b025 --- /dev/null +++ b/test/com/sleepycat/je/utilint/CronScheduleParserTest.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Calendar; + +import org.junit.After; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +public class CronScheduleParserTest extends TestBase { + + private static long millsOneDay = 24 * 60 * 60 * 1000; + private static long millsOneHour = 60 * 60 * 1000; + private static long millsOneMinute = 60 * 1000; + + private static Calendar generatedCurCal = Calendar.getInstance(); + + static { + /* + * Set the current Calendar to be 05:01 Friday. + */ + generatedCurCal.set(Calendar.DAY_OF_WEEK, 6); + generatedCurCal.set(Calendar.HOUR_OF_DAY, 5); + generatedCurCal.set(Calendar.MINUTE, 1); + generatedCurCal.set(Calendar.SECOND, 0); + generatedCurCal.set(Calendar.MILLISECOND, 0); + } + + @After + public void tearDown() + throws Exception { + CronScheduleParser.setCurCalHook = null; + super.tearDown(); + } + + @Test + public void testCheckSame() { + assertTrue(CronScheduleParser.checkSame(null, null)); + assertFalse(CronScheduleParser.checkSame(null, "0 0 * * *")); + assertFalse(CronScheduleParser.checkSame("0 0 * * *", null)); + assertTrue(CronScheduleParser.checkSame("5 7 * * *", "5 7 * * *")); + assertTrue(CronScheduleParser.checkSame("5 7 * * 5", "5 7 * * 5")); + assertFalse(CronScheduleParser.checkSame("0 0 * * *", "5 7 * * *")); + assertFalse(CronScheduleParser.checkSame("5 7 * * 5", "5 7 * * 6")); + } + + @Test + public void testValidate() { + internalValidate(null, CronScheduleParser.nullCons); + validateCorrect("* * * * *"); + + /* + * Constraint 1: The standard string should be "* * * * *", i.e. + * there are 5 fields and 4 blank space. + */ + internalValidate(" * * *", CronScheduleParser.cons1); + internalValidate("* * * * ", CronScheduleParser.cons1); + internalValidate("* * * * * *", CronScheduleParser.cons1); + internalValidate("* *_* * *", CronScheduleParser.cons1); + validateCorrect("5 6 * * 4"); + + /* + * Constraint 2: Each filed can only be an int value or *. + */ + internalValidate("* * - * )", CronScheduleParser.cons2); + internalValidate("* * * 3.2 *", CronScheduleParser.cons2); + internalValidate("* ** * * *", CronScheduleParser.cons2); + validateCorrect("0 0 * * 6"); + + /* + * Constraint 3: Can not specify dayOfMonth and dayOfWeek + * simultaneously. + */ + internalValidate("* * 4 * 5", CronScheduleParser.cons3); + validateCorrect("59 23 * * 5"); + + /* + * Constraint 4: Can not specify dayOfMonth or month. + */ + internalValidate("* * 4 * *", CronScheduleParser.cons4); + internalValidate("* * * 4 *", CronScheduleParser.cons4); + internalValidate("* * 4 4 *", CronScheduleParser.cons4); + validateCorrect("59 23 * * *"); + + /* + * Constraint 5: If the field is a int value, then the value should + * be in the correct range. + */ + internalValidate("-1 * * * *", CronScheduleParser.cons5); + internalValidate("60 * * * *", CronScheduleParser.cons5); + validateCorrect("0 * * * *"); + validateCorrect("59 * * * *"); + internalValidate("1 -1 * * *", CronScheduleParser.cons5); + internalValidate("1 24 * * *", CronScheduleParser.cons5); + validateCorrect("1 0 * * *"); + validateCorrect("1 23 * * *"); + internalValidate("1 1 * * -1", CronScheduleParser.cons5); + internalValidate("1 1 * * 7", CronScheduleParser.cons5); + validateCorrect("1 1 * * 0"); + validateCorrect("1 1 * * 6"); + + /* + * Constraint 6: If dayOfWeek is a concrete value, then minute or + * hour can not be '*'. + */ + internalValidate("* * * * 6", CronScheduleParser.cons6); + internalValidate("1 * * * 6", CronScheduleParser.cons6); + internalValidate("* 1 * * 6", CronScheduleParser.cons6); + validateCorrect("1 1 * * 6"); + + /* + * Constraint 7: If hour is a concrete value, minute can not be '*'. + */ + internalValidate("* 23 * * *", CronScheduleParser.cons7); + validateCorrect("1 23 * * *"); + } + + private void internalValidate(String cronSchedule, String mess) { + try { + new CronScheduleParser(cronSchedule); + fail("Should throw IllegalArgumentException"); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains(mess)); + } + } + + private void validateCorrect(String cronSchedule) { + try { + new CronScheduleParser(cronSchedule); + } catch (Exception e) { + fail("Should not throw Exception"); + } + } + + @Test + public void testParser() { + MyHook hook = new MyHook(); + CronScheduleParser.setCurCalHook = hook; + + check("* * * * *", 0, millsOneMinute); + + /* + * Because of the Daylight Saving Time or the Winter time, the + * calculated delay by using 7 * millsOneDay may not be right, i.e. + * loss or get one more hour. + */ + Calendar scheculedCal = (Calendar) generatedCurCal.clone(); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 6); + scheculedCal.set(Calendar.HOUR_OF_DAY, 5); + scheculedCal.set(Calendar.MINUTE, 5); + check( + "5 * * * *", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneHour); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 6); + scheculedCal.set(Calendar.HOUR_OF_DAY, 6); + scheculedCal.set(Calendar.MINUTE, 0); + check( + "0 * * * *", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneHour); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 6); + scheculedCal.set(Calendar.HOUR_OF_DAY, 7); + scheculedCal.set(Calendar.MINUTE, 59); + check( + "59 7 * * *", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneDay); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 7); + scheculedCal.set(Calendar.HOUR_OF_DAY, 1); + scheculedCal.set(Calendar.MINUTE, 30); + check( + "30 1 * * *", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + millsOneDay); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 7); + scheculedCal.set(Calendar.HOUR_OF_DAY, 4); + scheculedCal.set(Calendar.MINUTE, 10); + check( + "10 4 * * 6", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + 7 * millsOneDay); + + scheculedCal.set(Calendar.DAY_OF_WEEK, 4); + scheculedCal.set(Calendar.HOUR_OF_DAY, 4); + scheculedCal.set(Calendar.MINUTE, 10); + scheculedCal.add(Calendar.DATE, 7); + check( + "10 4 * * 3", + scheculedCal.getTimeInMillis() - generatedCurCal.getTimeInMillis(), + 7 * millsOneDay); + } + + private void check(String cronSchedule, long delay, long interval) { + CronScheduleParser csp = new CronScheduleParser(cronSchedule); + assertEquals(delay, csp.getDelayTime()); + assertEquals(interval, csp.getInterval()); + } + + class MyHook implements TestHook { + + @Override + public void doHook() { + + CronScheduleParser.curCal = generatedCurCal; + } + + @Override + public void doHook(Void obj) { + } + @Override + public void hookSetup() { + } + @Override + public void doIOHook() throws IOException { + } + @Override + public Void getHookValue() { + return null; + } + } +} diff --git a/test/com/sleepycat/je/utilint/DoubleExpMovingAvgTest.java b/test/com/sleepycat/je/utilint/DoubleExpMovingAvgTest.java new file mode 100644 index 0000000..87547a8 --- /dev/null +++ b/test/com/sleepycat/je/utilint/DoubleExpMovingAvgTest.java @@ -0,0 +1,117 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the DoubleExpMovingAvg class. */ +public class DoubleExpMovingAvgTest extends TestBase { + + private DoubleExpMovingAvg avg; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + avg = new DoubleExpMovingAvg("stat", 3000); + } + + @Test + public void testConstructorPeriodMillis() { + avg.add(1, 1000); + avg.add(2, 2000); + avg.add(4, 3000); + avg.add(8, 4000); + assertEquals(3.7, avg.get(), 0.1); + + /* Shorter period skews result towards later entries */ + avg = new DoubleExpMovingAvg("stat", 2000); + avg.add(1, 1000); + avg.add(2, 2000); + avg.add(4, 3000); + avg.add(8, 4000); + assertEquals(4.6, avg.get(), 0.1); + } + + @Test + public void testCopyConstructor() { + avg.add(2, 1000); + assertEquals(2, avg.get(), 0); + DoubleExpMovingAvg copy = new DoubleExpMovingAvg(avg); + assertEquals(avg.get(), copy.get(), 0); + copy.add(4, 2000); + assertEquals(2, avg.get(), 0); + assertEquals(2.5, copy.get(), 0.1); + } + + @Test + public void testGetAndAdd() { + assertEquals(0, avg.get(), 0); + + avg.add(1, 1000); + assertEquals(1, avg.get(), 0); + avg.add(4.2, 2000); + assertEquals(2, avg.get(), 0.1); + avg.add(5.5, 3000); + assertEquals(3, avg.get(), 0.1); + avg.add(3, 4000); + assertEquals(3, avg.get(), 0.1); + avg.add(-0.3, 5000); + assertEquals(2, avg.get(), 0.1); + avg.add(-1.3, 6000); + assertEquals(1, avg.get(), 0.1); + avg.add(-2.4, 7000); + assertEquals(0, avg.get(), 0.1); + avg.add(0, 8000); + assertEquals(0, avg.get(), 0.1); + + /* Ignore items at same and earlier times */ + avg.add(123, 8000); + avg.add(456, 2000); + assertEquals(0, avg.get(), 0.1); + } + + @Test + public void testGetFormattedValue() { + assertEquals("unknown", avg.getFormattedValue(true)); + avg.add(10000, 1000); + assertEquals("10,000", avg.getFormattedValue(true)); + assertEquals("10000.00", avg.getFormattedValue(false)); + + /* + * Probably don't want to add NaN values, since they will keep the + * average as NaN from then on, but at least make sure that toString + * doesn't do something weird in this case. + */ + avg.add(Double.NaN, 2000); + assertEquals("NaN", avg.getFormattedValue()); + } + + @Test + public void testIsNotSet() { + assertTrue(avg.isNotSet()); + avg.add(1, 1000); + assertFalse(avg.isNotSet()); + avg.add(2, 2000); + assertFalse(avg.isNotSet()); + } +} diff --git a/test/com/sleepycat/je/utilint/DummyFileStoreInfo.java b/test/com/sleepycat/je/utilint/DummyFileStoreInfo.java new file mode 100644 index 0000000..610fd6e --- /dev/null +++ b/test/com/sleepycat/je/utilint/DummyFileStoreInfo.java @@ -0,0 +1,70 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import java.io.IOException; + +/** + * Define a FileStoreInfo implementation that can be controlled by the test, + * both to isolate the test from current file system free space conditions, and + * to permit testing with specific free space conditions. + */ +public class DummyFileStoreInfo extends FileStoreInfo + implements FileStoreInfo.Factory { + + public static DummyFileStoreInfo INSTANCE = new DummyFileStoreInfo(); + + protected DummyFileStoreInfo() { } + + /* Implement Factory */ + + @Override + public void factoryCheckSupported() { } + + @Override + public FileStoreInfo factoryGetInfo(final String file) + throws IOException { + + factoryCheckSupported(); + return this; + } + + /* Implement FileStoreInfo */ + + @Override + public long getTotalSpace() + throws IOException { + + return Long.MAX_VALUE; + } + + @Override + public long getUsableSpace() + throws IOException { + + return Long.MAX_VALUE; + } + + /* Object methods */ + + @Override + public boolean equals(final Object o) { + return getClass().isInstance(o); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/test/com/sleepycat/je/utilint/ExceptionListenerTest.java b/test/com/sleepycat/je/utilint/ExceptionListenerTest.java new file mode 100644 index 0000000..14287e5 --- /dev/null +++ b/test/com/sleepycat/je/utilint/ExceptionListenerTest.java @@ -0,0 +1,129 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import java.io.File; + +import org.junit.Test; + +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ExceptionEvent; +import com.sleepycat.je.ExceptionListener; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class ExceptionListenerTest extends TestBase { + + private final File envHome; + + private volatile boolean exceptionThrownCalled = false; + + private DaemonThread dt = null; + + public ExceptionListenerTest() { + envHome = SharedTestUtils.getTestDir(); + } + + @Test + public void testExceptionListener() + throws Exception { + + /* Open with a listener. */ + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setExceptionListener(new MyExceptionListener()); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + + assertSame(envConfig.getExceptionListener(), + envImpl.getExceptionListener()); + + dt = new MyDaemonThread(0, Environment.CLEANER_NAME, envImpl); + DaemonThread.stifleExceptionChatter = true; + dt.runOrPause(true); + long startTime = System.currentTimeMillis(); + while (!dt.isShutdownRequested() && + System.currentTimeMillis() - startTime < 10 * 10000) { + Thread.yield(); + } + assertTrue("ExceptionListener apparently not called", + exceptionThrownCalled); + + env.close(); + + /* Open without a listener. */ + envConfig.setExceptionListener(null); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + + assertNull(envImpl.getExceptionListener()); + + /* Set an exception listener. */ + envConfig = env.getConfig(); + exceptionThrownCalled = false; + envConfig.setExceptionListener(new MyExceptionListener()); + env.setMutableConfig(envConfig); + + assertSame(envConfig.getExceptionListener(), + envImpl.getExceptionListener()); + + dt = new MyDaemonThread(0, Environment.CLEANER_NAME, envImpl); + dt.stifleExceptionChatter = true; + dt.runOrPause(true); + startTime = System.currentTimeMillis(); + while (!dt.isShutdownRequested() && + System.currentTimeMillis() - startTime < 10 * 10000) { + Thread.yield(); + } + assertTrue("ExceptionListener apparently not called", + exceptionThrownCalled); + } + + private class MyDaemonThread extends DaemonThread { + MyDaemonThread(long waitTime, String name, EnvironmentImpl envImpl) { + super(waitTime, name, envImpl); + } + + @Override + protected void onWakeup() { + throw new RuntimeException("test exception listener"); + } + } + + private class MyExceptionListener implements ExceptionListener { + public void exceptionThrown(ExceptionEvent event) { + assertEquals("daemonName should be CLEANER_NAME", + Environment.CLEANER_NAME, + event.getThreadName()); + + /* + * Be sure to set the exceptionThrownFlag before calling + * shutdown, so the main test thread will see the right + * value of the flag when it comes out of the loop in + * testExceptionList that waits for the daemon thread to + * finish. + */ + exceptionThrownCalled = true; + dt.requestShutdown(); + } + } +} diff --git a/test/com/sleepycat/je/utilint/FileStoreInfoTest.java b/test/com/sleepycat/je/utilint/FileStoreInfoTest.java new file mode 100644 index 0000000..b0c7cc1 --- /dev/null +++ b/test/com/sleepycat/je/utilint/FileStoreInfoTest.java @@ -0,0 +1,106 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; + +import static org.hamcrest.core.IsNull.nullValue; + +import java.io.File; +import java.io.IOException; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** Test {@link FileStoreInfo}. */ +public class FileStoreInfoTest { + + private final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + /** Test when running on Java 6. */ + @Test + public void testJava6() + throws Exception { + + try { + Class.forName(FileStoreInfo.FILE_STORE_CLASS); + assumeThat("Skip when running Java 7 or later", nullValue()); + } catch (ClassNotFoundException e) { + } + + try { + FileStoreInfo.checkSupported(); + fail("Expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + logger.info("Got expected unsupported exception for Java 6: " + e); + } + + try { + FileStoreInfo.getInfo(System.getProperty("user.dir")); + fail("Expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + logger.info("Got expected exception for Java 6: " + e); + } + } + + /** Test when running on Java 7 or later. */ + @Test + public void testJava7() + throws Exception { + + try { + Class.forName(FileStoreInfo.FILE_STORE_CLASS); + } catch (ClassNotFoundException e) { + assumeThat("Skip when running Java 6", nullValue()); + } + + FileStoreInfo.checkSupported(); + + final File file1 = File.createTempFile("file1", null); + file1.deleteOnExit(); + final FileStoreInfo info1 = FileStoreInfo.getInfo(file1.getPath()); + + assertFalse(info1.equals(null)); + assertFalse(info1.equals(Boolean.TRUE)); + assertEquals(info1, info1); + assertEquals(info1, FileStoreInfo.getInfo(file1.getPath())); + + assertTrue("Total space greater than zero", + info1.getTotalSpace() > 0); + assertTrue("Usable space greater than zero", + info1.getUsableSpace() > 0); + + final File file2 = File.createTempFile("file2", null); + file2.deleteOnExit(); + final FileStoreInfo info2 = FileStoreInfo.getInfo(file2.getPath()); + + assertEquals("Equal file store info for files in same directory", + info1, info2); + + file2.delete(); + try { + FileStoreInfo.getInfo(file2.getPath()).getTotalSpace(); + fail("Expected IOException"); + } catch (IOException e) { + logger.info("Got expected exception for deleted file: " + e); + } + } +} diff --git a/test/com/sleepycat/je/utilint/LoggerUtilsTest.java b/test/com/sleepycat/je/utilint/LoggerUtilsTest.java new file mode 100644 index 0000000..64e4abd --- /dev/null +++ b/test/com/sleepycat/je/utilint/LoggerUtilsTest.java @@ -0,0 +1,640 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.List; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogManager; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentMutableConfig; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.junit.JUnitProcessThread; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +/** + * A unit test for testing JE logging programatically. + */ +public class LoggerUtilsTest extends TestBase { + + /** + * If a logging config file is specified, this test cannot expect the + * logging properties to have default settings. + */ + private static final boolean DEFAULT_LOGGING_PROPERTIES = + System.getProperty("java.util.logging.config.file", "").equals(""); + + private final File envHome; + private static final String loggerPrefix = "com.sleepycat.je."; + /* Logging configure properties file name. */ + private static final String fileName = "logging.properties"; + /* Logging settings in the properties file. */ + private static final String consoleLevel = + "com.sleepycat.je.util.ConsoleHandler.level=INFO"; + private static final String fileLevel = + "com.sleepycat.je.util.FileHandler.level=WARNING"; + + public LoggerUtilsTest() { + envHome = SharedTestUtils.getTestDir(); + } + + /* + * Remove the named files from the environment directory. + */ + private void removeFiles(File envDir, String name) { + File[] files = envDir.listFiles(); + for (File file : files) { + if (file.getName().contains(name)) { + assertTrue("couldn't delete " + name + "for " + envDir, + file.delete()); + } + } + } + + /* + * Test whether a JE logger's level can be set programmatically. + */ + @Test + public void testLoggerLevelsRWEnv() + throws Exception { + + changeLoggerLevels(false /* readOnly */); + } + + /* + * Test whether a JE logger's level can be set programmatically, and that + * logging works in a read only environment. + */ + @Test + public void testLoggerLevelsROEnv() + throws Exception { + + changeLoggerLevels(true /* readOnly */); + } + + private void changeLoggerLevels(boolean readOnlyEnv) + throws Exception { + + /* + * Set the parent's level to OFF, so all logging messages shouldn't be + * written to je.info files. + */ + Logger parent = Logger.getLogger("com.sleepycat.je"); + parent.setLevel(Level.OFF); + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, "ALL"); + envConfig.setReadOnly(readOnlyEnv); + Environment env = new Environment(envHome, envConfig); + + /* Init a test messages list. */ + ArrayList messages = new ArrayList(); + messages.add("Hello, Linda!"); + messages.add("Hello, Sam!"); + messages.add("Hello, Charlie!"); + messages.add("Hello, Mark!"); + messages.add("Hello, Tao!"); + messages.add("Hello, Eric!"); + + /* Check the logger level before reset. */ + checkLoggerLevel(); + + /* Log the test messages. */ + logMsg(DbInternal.getNonNullEnvImpl(env), messages); + + /* + * The loggers were turned off with a level setting of OFF, so there is + * should be nothing in the je.info files. + */ + ArrayList readMsgs = readFromInfoFile(readOnlyEnv); + assertTrue(readMsgs.size() == 0); + + /* + * Reset the parent level to ALL, so that all logging messages should + * be logged. + */ + parent.setLevel(Level.ALL); + + /* Log the test messages. */ + logMsg(DbInternal.getNonNullEnvImpl(env), messages); + + /* Check that each test message is in the je.info.0 file. */ + readMsgs = readFromInfoFile(readOnlyEnv); + + /* + * Since JE logger's level has been set to ALL, additional JE logging + * messages from normal operations may also be written to je.info + * files. We should check that the actual messages in je.info should be + * equal to or larger than the size we directly log. + */ + if (readOnlyEnv) { + /* Read only Environment won't log anything to JE FileHandler. */ + assertTrue(readMsgs.size() == 0); + } else { + + /* + * Since JE logger's level has been set to ALL, additional JE + * logging messages from normal operations may also be written to + * je.info files. We should check that the actual messages in + * je.info files should be equal to or larger than the size we + * directly log. + */ + assertTrue(readMsgs.size() >= messages.size()); + for (int i = 0; i < messages.size(); i++) { + boolean contained = false; + for (int j = 0; j < readMsgs.size(); j++) { + if (readMsgs.get(j).contains(messages.get(i))) { + contained = true; + break; + } + } + assertTrue(contained); + } + } + + /* Check to see that all JE loggers' level are not changed. */ + checkLoggerLevel(); + + env.close(); + } + + /* Check the level for all JE loggers. */ + private void checkLoggerLevel() { + Enumeration loggerNames = + LogManager.getLogManager().getLoggerNames(); + while (loggerNames.hasMoreElements()) { + String loggerName = loggerNames.nextElement(); + if (loggerName.startsWith(loggerPrefix)) { + Logger logger = Logger.getLogger(loggerName); + assertNull(logger.getLevel()); + } + } + } + + /* Log some messages. */ + private void logMsg(EnvironmentImpl envImpl, ArrayList messages) { + Logger envLogger = envImpl.getLogger(); + for (String message : messages) { + LoggerUtils.info(envLogger, envImpl, message); + } + } + + /* Read the contents in the je.info files. */ + private ArrayList readFromInfoFile(boolean readOnlyEnv) + throws Exception { + + /* Get the file for je.info.0. */ + File[] files = envHome.listFiles(); + File infoFile = null; + for (File file : files) { + if (("je.info.0").equals(file.getName())) { + infoFile = file; + break; + } + } + + /* Make sure the file exists. */ + ArrayList messages = new ArrayList(); + if (readOnlyEnv) { + return messages; + } + + assertTrue(infoFile != null); + + /* Read the messages from the file. */ + BufferedReader in = new BufferedReader(new FileReader(infoFile)); + String message = new String(); + while ((message = in.readLine()) != null) { + messages.add(message); + } + in.close(); + + return messages; + } + + /* + * Test FileHandler and ConsoleHandler level setting. + */ + @Test + public void testHandlerLevels() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + Environment env = new Environment(envHome, envConfig); + + /* Check the initial handler level settings. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Level consoleHandlerLevel = envImpl.getConsoleHandler().getLevel(); + Level fileHandlerLevel = envImpl.getFileHandler().getLevel(); + if (DEFAULT_LOGGING_PROPERTIES) { + assertEquals(Level.OFF, consoleHandlerLevel); + assertEquals(Level.INFO, fileHandlerLevel); + } + + env.close(); + + /* Reopen the Environment with params setting. */ + envConfig.setConfigParam(EnvironmentConfig.CONSOLE_LOGGING_LEVEL, + "WARNING"); + envConfig.setConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, + "SEVERE"); + env = new Environment(envHome, envConfig); + envImpl = DbInternal.getNonNullEnvImpl(env); + Level newConsoleHandlerLevel = envImpl.getConsoleHandler().getLevel(); + Level newFileHandlerLevel = envImpl.getFileHandler().getLevel(); + /* Check that the new level are the same as param setting. */ + assertEquals(Level.WARNING, newConsoleHandlerLevel); + assertEquals(Level.SEVERE, newFileHandlerLevel); + + /* Make sure the levels are different before and after param setting. */ + if (DEFAULT_LOGGING_PROPERTIES) { + assertFalse(consoleHandlerLevel.equals(newConsoleHandlerLevel)); + assertFalse(fileHandlerLevel.equals(newFileHandlerLevel)); + } + + env.close(); + } + + /* + * Test whether the configurations inside the properties file are set + * correctly in JE Environment. + */ + @Test + public void testPropertiesSetting() + throws Exception { + + invokeProcess(false); + } + + /** + * Start the process and check the exit value. + * + * @param bothSetting presents whether the logging configuration and JE + * params are both set on a same Environment. + */ + private void invokeProcess(boolean bothSetting) + throws Exception { + + /* Create a property file and write configurations into the file. */ + String propertiesFile = createPropertiesFile(); + + /* + * If bothSetting is true, which means we need to set JE params, so + * obviously we need to add another two arguments. + */ + String[] envCommand = bothSetting ? new String[8] : new String[6]; + envCommand[0] = "-Djava.util.logging.config.file=" + propertiesFile; + envCommand[1] = "com.sleepycat.je.utilint.LoggerUtilsTest$" + + "PropertiesSettingProcess"; + envCommand[2] = envHome.getAbsolutePath(); + envCommand[3] = "INFO"; + envCommand[4] = "WARNING"; + envCommand[5] = bothSetting ? "true" : "false"; + /* JE Param setting. */ + if (bothSetting) { + envCommand[6] = "WARNING"; + envCommand[7] = "SEVERE"; + } + + /* Start a process. */ + JUnitProcessThread thread = + new JUnitProcessThread("PropertiesSettingProcess", envCommand); + thread.start(); + + try { + thread.finishTest(); + } catch (Throwable t) { + System.err.println(t.toString()); + } + + /* We expect that the process exited normally. */ + assertEquals(0, thread.getExitVal()); + + /* Remove the created property file. */ + removeFiles(envHome, fileName); + } + + /* Create a properties file for use. */ + private String createPropertiesFile() + throws Exception { + + String name = envHome.getAbsolutePath() + + System.getProperty("file.separator") + fileName; + File file = new File(name); + PrintWriter out = + new PrintWriter(new BufferedWriter(new FileWriter(file))); + out.println(consoleLevel); + out.println(fileLevel); + out.close(); + + return name; + } + + /* + * Test that JE ConsoleHandler and FileHandler get the correct level when + * their levels are set both by the java.util.logging properties file and + * JE params. + * + * We want JE params to override the levels set by the standard + * properties file. + */ + @Test + public void testPropertiesAndParamSetting() + throws Exception { + + invokeProcess(true); + } + + /* + * Test that handler levels are mutable. + */ + @Test + public void testMutableConfig() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam(EnvironmentConfig.CONSOLE_LOGGING_LEVEL, + "WARNING"); + envConfig.setConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, + "SEVERE"); + Environment env = new Environment(envHome, envConfig); + + /* Check the init handlers' level setting. */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + Level consoleHandlerLevel = envImpl.getConsoleHandler().getLevel(); + Level fileHandlerLevel = envImpl.getFileHandler().getLevel(); + assertEquals(Level.WARNING, consoleHandlerLevel); + assertEquals(Level.SEVERE, fileHandlerLevel); + + /* Change the handler param setting for an open Environment. */ + EnvironmentMutableConfig mutableConfig = env.getMutableConfig(); + mutableConfig.setConfigParam(EnvironmentConfig.CONSOLE_LOGGING_LEVEL, + "SEVERE"); + mutableConfig.setConfigParam(EnvironmentConfig.FILE_LOGGING_LEVEL, + "WARNING"); + env.setMutableConfig(mutableConfig); + + /* Check the handler's level has changed. */ + Level newConsoleHandlerLevel = envImpl.getConsoleHandler().getLevel(); + Level newFileHandlerLevel = envImpl.getFileHandler().getLevel(); + assertEquals(Level.SEVERE, newConsoleHandlerLevel); + assertEquals(Level.WARNING, newFileHandlerLevel); + assertTrue(newConsoleHandlerLevel != consoleHandlerLevel); + assertTrue(newFileHandlerLevel != fileHandlerLevel); + + /* Check levels again. */ + mutableConfig = env.getMutableConfig(); + env.setMutableConfig(mutableConfig); + consoleHandlerLevel = envImpl.getConsoleHandler().getLevel(); + fileHandlerLevel = envImpl.getFileHandler().getLevel(); + assertEquals(Level.SEVERE, consoleHandlerLevel); + assertEquals(Level.WARNING, fileHandlerLevel); + assertTrue(newConsoleHandlerLevel == consoleHandlerLevel); + assertTrue(newFileHandlerLevel == fileHandlerLevel); + + env.close(); + } + + /* + * A process for starting a JE Environment with properties file or + * configured JE params. + */ + static class PropertiesSettingProcess { + private final File envHome; + /* Handler levels set through properties configuration file. */ + private final Level propertyConsole; + private final Level propertyFile; + /* Handler levels set through JE params. */ + private final Level paramConsole; + private final Level paramFile; + /* Indicates whether property file and params are both set. */ + private final boolean bothSetting; + private Environment env; + + public PropertiesSettingProcess(File envHome, + Level propertyConsole, + Level propertyFile, + boolean bothSetting, + Level paramConsole, + Level paramFile) { + this.envHome = envHome; + this.propertyConsole = propertyConsole; + this.propertyFile = propertyFile; + this.bothSetting = bothSetting; + this.paramConsole = paramConsole; + this.paramFile = paramFile; + } + + /* Open a JE Environment. */ + public void openEnv() { + try { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + + /* If bothSetting is true, set the JE params. */ + if (bothSetting) { + envConfig.setConfigParam + (EnvironmentConfig.CONSOLE_LOGGING_LEVEL, + paramConsole.toString()); + envConfig.setConfigParam + (EnvironmentConfig.FILE_LOGGING_LEVEL, + paramFile.toString()); + } + + env = new Environment(envHome, envConfig); + } catch (DatabaseException e) { + e.printStackTrace(); + System.exit(1); + } + } + + /* Check the configured levels. */ + public void check() { + if (bothSetting) { + doCheck(paramConsole, paramFile); + } else { + doCheck(propertyConsole, propertyFile); + } + } + + private void doCheck(Level cLevel, Level fLevel) { + try { + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + assertTrue + (envImpl.getConsoleHandler().getLevel() == cLevel); + assertTrue(envImpl.getFileHandler().getLevel() == fLevel); + } catch (Exception e) { + e.printStackTrace(); + System.exit(2); + } finally { + env.close(); + } + } + + public static void main(String args[]) { + PropertiesSettingProcess process = null; + try { + Level paramConsole = null; + Level paramFile = null; + if (args.length == 6) { + paramConsole = Level.parse(args[4]); + paramFile = Level.parse(args[5]); + } + + process = new PropertiesSettingProcess(new File(args[0]), + Level.parse(args[1]), + Level.parse(args[2]), + new Boolean(args[3]), + paramConsole, + paramFile); + } catch (Exception e) { + e.printStackTrace(); + System.exit(3); + } + + if (process == null) { + throw new RuntimeException("Process should have been created"); + } + process.openEnv(); + process.check(); + } + } + + /** + * Set up two environments with configured handlers, and make sure + * that they only log records from the appropriate environment. + */ + @Test + public void testConfiguredHandler() { + TestInfo infoA = setupMultipleEnvs("A"); + TestInfo infoB = setupMultipleEnvs("B"); + int numTestMsgs = 10; + try { + Logger loggerA = LoggerUtils.getLogger + (com.sleepycat.je.utilint.LoggerUtils.class); + Logger loggerB = LoggerUtils.getLogger + (com.sleepycat.je.utilint.LoggerUtils.class); + + for (int i = 0; i < numTestMsgs; i++) { + LoggerUtils.logMsg(loggerA, + infoA.envImpl, + Level.SEVERE, infoA.prefix + i); + LoggerUtils.logMsg(loggerB, + infoB.envImpl, + Level.SEVERE, infoB.prefix + i); + } + + infoA.handler.verify(numTestMsgs); + infoB.handler.verify(numTestMsgs); + } finally { + cleanup(infoA.env, infoA.dir); + cleanup(infoB.env, infoB.dir); + } + } + + private void cleanup(Environment env, File envDir) { + env.close(); + TestUtils.removeLogFiles("2 envs", envDir, false); + removeFiles(envDir, "je.info"); + } + + private TestInfo setupMultipleEnvs(String name) { + String testPrefix = "TEXT" + name; + File dir = new File(envHome, name); + dir.mkdirs(); + + EnvironmentConfig config = new EnvironmentConfig(); + config.setAllowCreate(true); + TestHandler handler = new TestHandler(testPrefix); + handler.setLevel(Level.SEVERE); + config.setLoggingHandler(handler); + Environment env = new Environment(dir, config); + + return new TestInfo(env, handler, testPrefix, dir); + } + + private class TestInfo { + EnvironmentImpl envImpl; + Environment env; + String prefix; + TestHandler handler; + File dir; + + TestInfo(Environment env, TestHandler handler, + String testPrefix, File dir) { + this.env = env; + this.envImpl = DbInternal.getNonNullEnvImpl(env); + this.handler = handler; + this.prefix = testPrefix; + this.dir = dir; + } + } + + private class TestHandler extends Handler { + + private final String prefix; + private final List logged; + + TestHandler(String prefix) { + this.prefix = prefix; + logged = new ArrayList(); + } + + void verify(int numExpected) { + assertEquals(numExpected, logged.size()); + for (int i = 0; i < numExpected; i++) { + assertEquals(logged.get(i), (prefix + i)); + } + } + + @Override + public void publish(LogRecord record) { + logged.add(record.getMessage()); + } + + @Override + public void flush() { + } + + @Override + public void close() throws SecurityException { + } + } +} diff --git a/test/com/sleepycat/je/utilint/LongAvgRateMapStatTest.java b/test/com/sleepycat/je/utilint/LongAvgRateMapStatTest.java new file mode 100644 index 0000000..368c4e6 --- /dev/null +++ b/test/com/sleepycat/je/utilint/LongAvgRateMapStatTest.java @@ -0,0 +1,272 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.util.TreeMap; +import java.util.SortedMap; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the LongAvgRateMapStat class */ +public class LongAvgRateMapStatTest extends TestBase { + + private static final StatGroup statGroup = + new StatGroup("TestGroup", "Test group"); + private static int statDefCount; + + private LongAvgRateMapStat map; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + map = new LongAvgRateMapStat( + statGroup, getStatDef(), 3000, MILLISECONDS); + } + + private StatDefinition getStatDef() { + return new StatDefinition(getStatDefName(), ""); + } + + private String getStatDefName() { + return "stat" + Integer.toString(++statDefCount); + } + + @Test + public void testCreateStat() { + LongAvgRate compA = map.createStat("a"); + compA.add(0, 1000); + compA.add(1000, 2000); + LongAvgRate compB = map.createStat("b"); + compB.add(0, 1000); + compB.add(2000, 2000); + assertEquals("a=1;b=2", map.get()); + } + + @Test + public void testRemoveStat() { + map.removeStat("a"); + LongAvgRate compA = map.createStat("a"); + compA.add(0, 1000); + compA.add(1000, 2000); + LongAvgRate compB = map.createStat("b"); + compB.add(0, 1000); + compB.add(2000, 2000); + assertEquals("a=1;b=2", map.get()); + map.removeStat("c"); + assertEquals("a=1;b=2", map.get()); + map.removeStat("a"); + assertEquals("b=2", map.get()); + } + + @Test + public void testCopy() { + LongAvgRate compA = map.createStat("a"); + compA.add(0, 1000); + compA.add(1000, 2000); + LongAvgRateMapStat copy = map.copy(); + LongAvgRate compB = map.createStat("b"); + compB.add(0, 1000); + compB.add(2000, 2000); + LongAvgRate compC = copy.createStat("c"); + compC.add(0, 1000); + compC.add(3000, 2000); + assertEquals("a=1;b=2", map.get()); + assertEquals("a=1;c=3", copy.get()); + } + + @Test + public void testComputeInterval() { + LongAvgRateMapStat other = map.copy(); + + LongAvgRateMapStat interval = map.computeInterval(other); + assertTrue(interval.isNotSet()); + + /* + * Consider all combinations of entries with a value, an unset entry, + * no entry, and a removed entry: + * + * a: present with non-zero value + * b: present with zero/cleared value + * c: absent + * d: removed + * + * If both maps have an entry for the same key, then they should be + * merged. If the newer of the two maps has no entry, then the result + * should not include one. The removed case is one where the removal + * can make the map newer even though it has no entries present to + * prove it. If the maps have the same modification time -- an + * unlikely case in practice -- then the this argument controls which + * entries appear. + */ + LongAvgRate comp1a2a = map.createStat("1a2a"); + LongAvgRate comp1a2b = map.createStat("1a2b"); + LongAvgRate comp1a2c = map.createStat("1a2c"); + LongAvgRate comp1a2d = map.createStat("1a2d"); + map.createStat("1b2a"); + LongAvgRate comp1d2a = map.createStat("1d2a"); + LongAvgRate otherComp1a2a = other.createStat("1a2a"); + other.createStat("1a2b"); + LongAvgRate otherComp1a2d = other.createStat("1a2d"); + LongAvgRate otherComp1b2a = other.createStat("1b2a"); + LongAvgRate otherComp1c2a = other.createStat("1c2a"); + LongAvgRate otherComp1d2a = other.createStat("1d2a"); + + /* Test with both maps empty */ + interval = map.computeInterval(other); + assertTrue(interval.isNotSet()); + + /* Test with other map empty */ + comp1a2a.add(0, 1000); + comp1a2a.add(3000, 2000); + comp1a2b.add(0, 1000); + comp1a2b.add(6000, 2000); + comp1a2c.add(0, 1000); + comp1a2c.add(9000, 2000); + comp1a2d.add(0, 1000); + comp1a2d.add(12000, 2000); + comp1d2a.add(0, 1000); + comp1d2a.add(15000, 2000); + map.removeStat("1d2a", 2001); + + interval = map.computeInterval(other); + assertEquals("1a2a=3;1a2b=6;1a2c=9;1a2d=12", + interval.get()); + interval = other.computeInterval(map); + assertEquals("1a2a=3;1a2b=6;1a2c=9;1a2d=12", + interval.get()); + + /* Test with other map newer */ + otherComp1a2a.add(10000, 4000); + otherComp1a2a.add(40000, 5000); + otherComp1a2d.add(10000, 4000); + otherComp1a2d.add(70000, 5000); + other.removeStat("1a2d",5001); + otherComp1b2a.add(10000, 4000); + otherComp1b2a.add(100000, 5000); + otherComp1c2a.add(10000, 4000); + otherComp1c2a.add(130000, 5000); + otherComp1d2a.add(10000, 4000); + otherComp1d2a.add(160000, 5000); + + interval = map.computeInterval(other); + assertEquals("1a2a=20;1a2b=6;1b2a=90;1c2a=120;1d2a=150", + interval.get()); + interval = other.computeInterval(map); + assertEquals("1a2a=20;1a2b=6;1b2a=90;1c2a=120;1d2a=150", + interval.get()); + + /* Test with other map older */ + comp1a2a.clear(); + comp1a2b.clear(); + comp1a2c.clear(); + comp1a2d.clear(); + comp1d2a = map.createStat("1d2a"); + + otherComp1a2a.clear(); + otherComp1a2d = other.createStat("1a2d"); + otherComp1b2a.clear(); + otherComp1c2a.clear(); + otherComp1d2a.clear(); + + comp1a2a.add(0, 4000); + comp1a2a.add(3000, 5000); + comp1a2b.add(0, 4000); + comp1a2b.add(6000, 5000); + comp1a2c.add(0, 4000); + comp1a2c.add(9000, 5000); + comp1a2d.add(0, 4000); + comp1a2d.add(12000, 5000); + comp1d2a.add(0, 4000); + comp1d2a.add(15000, 5000); + map.removeStat("1d2a", 5001); + + otherComp1a2a.add(10000, 1000); + otherComp1a2a.add(40000, 2000); + otherComp1a2d.add(10000, 1000); + otherComp1a2d.add(70000, 2000); + other.removeStat("1a2d", 2001); + otherComp1b2a.add(10000, 1000); + otherComp1b2a.add(100000, 2000); + otherComp1c2a.add(10000, 1000); + otherComp1c2a.add(130000, 2000); + otherComp1d2a.add(10000, 1000); + otherComp1d2a.add(160000, 2000); + + interval = map.computeInterval(other); + assertEquals("1a2a=13;1a2b=6;1a2c=9;1a2d=12;1b2a=90", + interval.get()); + interval = other.computeInterval(map); + assertEquals("1a2a=13;1a2b=6;1a2c=9;1a2d=12;1b2a=90", + interval.get()); + + /* + * Remove an entry from map using the same timestamp as on the other + * map, and confirm that the entry only appears if it is present in the + * this argument. + */ + other.removeStat("1a2a", 5001); + interval = map.computeInterval(other); + assertEquals("1a2a=3;1a2b=6;1a2c=9;1a2d=12;1b2a=90", + interval.get()); + interval = other.computeInterval(map); + assertEquals("1a2b=6;1b2a=90;1c2a=120;1d2a=150", + interval.get()); + + /* + * Now remove everything from the older map, and make sure makes the + * result empty + */ + map.removeStat("1a2a", 5002); + map.removeStat("1a2b", 5002); + map.removeStat("1a2c", 5002); + map.removeStat("1a2d", 5002); + map.removeStat("1b2a", 5002); + interval = map.computeInterval(other); + assertEquals("", interval.get()); + interval = other.computeInterval(map); + assertEquals("", interval.get()); + } + + @Test + public void testNegate() { + map.negate(); + assertEquals("", map.get()); + LongAvgRate compA = map.createStat("a"); + compA.add(0, 1000); + compA.add(3000, 2000); + map.negate(); + assertEquals("a=3", map.get()); + } + + @Test + public void getMap() { + SortedMap valueMap = new TreeMap<>(); + assertEquals(valueMap, map.getMap()); + LongAvgRate compA = map.createStat("a"); + assertEquals(valueMap, map.getMap()); + valueMap.put("a", 3L); + compA.add(0, 1000); + compA.add(3000, 2000); + assertEquals(valueMap, map.getMap()); + } +} diff --git a/test/com/sleepycat/je/utilint/LongAvgRateStatTest.java b/test/com/sleepycat/je/utilint/LongAvgRateStatTest.java new file mode 100644 index 0000000..92c7bd2 --- /dev/null +++ b/test/com/sleepycat/je/utilint/LongAvgRateStatTest.java @@ -0,0 +1,94 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the LongAvgRateStat class. */ +public class LongAvgRateStatTest extends TestBase { + + private static final StatGroup statGroup = + new StatGroup("TestGroup", "Test group"); + private static int statDefCount; + + private LongAvgRateStat stat; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + stat = new LongAvgRateStat( + statGroup, getStatDef(), 3000, MILLISECONDS); + } + + private static StatDefinition getStatDef() { + return new StatDefinition(getStatDefName(), ""); + } + + private static String getStatDefName() { + return "stat" + Integer.toString(++statDefCount); + } + + @Test + public void testCopy() { + stat.add(0, 1000); + stat.add(3000, 2000); + LongAvgRateStat copy = stat.copy(); + stat.add(9000, 3000); + copy.add(15000, 3000); + assertEquals(Long.valueOf(4), stat.get()); + assertEquals(Long.valueOf(6), copy.get()); + } + + @Test + public void testComputeInterval() { + LongAvgRateStat other = stat.copy(); + + LongAvgRateStat interval = stat.computeInterval(other); + assertTrue(interval.isNotSet()); + + stat.add(0, 1000); + stat.add(3000, 2000); + interval = stat.computeInterval(other); + assertEquals(Long.valueOf(3), interval.get()); + interval = other.computeInterval(stat); + assertEquals(Long.valueOf(3), interval.get()); + + other.add(10000, 4000); + other.add(40000, 5000); + interval = stat.computeInterval(other); + assertEquals(Long.valueOf(20), interval.get()); + interval = other.computeInterval(stat); + assertEquals(Long.valueOf(20), interval.get()); + + stat.clear(); + other.clear(); + stat.add(10000, 1000); + stat.add(40000, 2000); + other.add(0, 4000); + other.add(3000, 5000); + interval = stat.computeInterval(other); + assertEquals(Long.valueOf(13), interval.get()); + interval = other.computeInterval(stat); + assertEquals(Long.valueOf(13), interval.get()); + } +} diff --git a/test/com/sleepycat/je/utilint/LongAvgRateTest.java b/test/com/sleepycat/je/utilint/LongAvgRateTest.java new file mode 100644 index 0000000..f3b0baf --- /dev/null +++ b/test/com/sleepycat/je/utilint/LongAvgRateTest.java @@ -0,0 +1,241 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the LongAvgRate class */ +public class LongAvgRateTest extends TestBase { + + private LongAvgRate avg; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + avg = new LongAvgRate("stat", 3000, MILLISECONDS); + } + + @Test + public void testConstructorPeriodMillis() { + avg.add(1000, 1000); + avg.add(2000, 2000); + avg.add(4000, 3000); + avg.add(8000, 4000); + assertEquals(Long.valueOf(2), avg.get()); + + /* Shorter period skews result towards later entries */ + avg = new LongAvgRate("stat", 2000, MILLISECONDS); + avg.add(1000, 1000); + avg.add(2000, 2000); + avg.add(4000, 3000); + avg.add(8000, 4000); + assertEquals(Long.valueOf(2), avg.get()); + } + + @Test + public void testConstructorReportTimeUnit() { + avg = new LongAvgRate("stat", 3000, NANOSECONDS); + avg.add(2000000000L, 1000); + avg.add(4000000000L, 2000); + assertEquals(Long.valueOf(2), avg.get()); + + avg = new LongAvgRate("stat", 3000, MICROSECONDS); + avg.add(2000000000L, 1000); + avg.add(4000000000L, 2000); + assertEquals(Long.valueOf(2000), avg.get()); + + avg = new LongAvgRate("stat", 3000, MILLISECONDS); + avg.add(2000000000L, 1000); + avg.add(4000000000L, 2000); + assertEquals(Long.valueOf(2000000), avg.get()); + + avg = new LongAvgRate("stat", 3000, SECONDS); + avg.add(2000000000L, 1000); + avg.add(4000000000L, 2000); + assertEquals(Long.valueOf(2000000000L), avg.get()); + } + + /** Test behavior relative to MIN_PERIOD */ + @Test + public void testMinPeriod() { + avg.add(2000, 1000); + + /* Entry with time delta less than 200 ms is ignored */ + avg.add(3000, 1100); + assertEquals(Long.valueOf(0), avg.get()); + + /* Computes back to the initial entry */ + avg.add(4000, 2000); + assertEquals(Long.valueOf(2), avg.get()); + } + + @Test + public void testAdd() { + // Empty + assertEquals(Long.valueOf(0), avg.get()); + + // One value, no delta + avg.add(2000, 1000); + assertEquals(Long.valueOf(0), avg.get()); + + // Second value prior to MIN_PERIOD is ignored + avg.add(7700, 1100); + assertEquals(Long.valueOf(0), avg.get()); + + avg.add(4000, 2000); + avg.add(7700, 2100); + assertEquals(Long.valueOf(2), avg.get()); + + avg.add(8000, 3000); + avg.add(17700, 3100); + assertEquals(Long.valueOf(3), avg.get()); + } + + @Test(expected=NullPointerException.class) + public void testAddAverageNullArg() { + avg.add(null); + } + + @Test + public void testAddAverage() { + LongAvgRate other = new LongAvgRate("stat", 3000, SECONDS); + avg.add(other); + assertTrue("Add empty on empty has no effect", avg.isNotSet()); + + avg.add(3000, 1000); + avg.add(6000, 2000); + avg.add(other); + assertEquals("Add empty has no effect", Long.valueOf(3), avg.get()); + + other.add(6000, 1000); + other.add(12000, 2000); + avg.add(other); + assertEquals("Add older has no effect", Long.valueOf(3), avg.get()); + + other.clear(); + other.add(6000, 3000); + other.add(12000, 4000); + avg.add(other); + assertEquals("Add newer has effect", Long.valueOf(4), avg.get()); + + avg.clear(); + avg.add(other); + assertEquals("Add to empty", 6, avg.get(), 0); + } + + @Test + public void testCopyLatest() { + LongAvgRate other = new LongAvgRate("stat", 3000, MILLISECONDS); + + LongAvgRate latest = avg.copyLatest(other); + assertTrue(latest.isNotSet()); + + avg.add(0, 1000); + avg.add(3000, 2000); + latest = avg.copyLatest(other); + assertEquals(Long.valueOf(3), latest.get()); + latest = other.copyLatest(avg); + assertEquals(Long.valueOf(3), latest.get()); + + /* The later rate is 30, so the result is closer to that */ + other.add(10000, 4000); + other.add(40000, 5000); + latest = avg.copyLatest(other); + assertEquals(Long.valueOf(20), latest.get()); + latest = other.copyLatest(avg); + assertEquals(Long.valueOf(20), latest.get()); + + /* The later rate is 3, so the result is smaller */ + avg.clear(); + other.clear(); + avg.add(10000, 1000); + avg.add(40000, 2000); + other.add(0, 4000); + other.add(3000, 5000); + latest = avg.copyLatest(other); + assertEquals(Long.valueOf(13), latest.get()); + latest = other.copyLatest(avg); + assertEquals(Long.valueOf(13), latest.get()); + } + + @Test + public void testClear() { + avg.add(3, 1000); + avg.add(6, 2000); + avg.clear(); + assertEquals(Long.valueOf(0), avg.get()); + assertTrue(avg.isNotSet()); + } + + @Test + public void testCopy() { + avg.add(3000, 1000); + avg.add(6000, 2000); + LongAvgRate copy = avg.copy(); + assertEquals(avg.getName(), copy.getName()); + avg.add(12000, 3000); + copy.add(24000, 3000); + assertEquals(Long.valueOf(4), avg.get()); + assertEquals(Long.valueOf(7), copy.get()); + } + + @Test + public void testGetFormattedValue() { + avg = new LongAvgRate("stat", 3000, MICROSECONDS); + assertEquals("unknown", avg.getFormattedValue(true)); + avg.add(0, 1000); + avg.add(987698769, 2000); + assertEquals("988", avg.getFormattedValue(true)); + assertEquals("988", avg.getFormattedValue(false)); + + avg = new LongAvgRate("stat", 3000, MILLISECONDS); + assertEquals("unknown", avg.getFormattedValue(true)); + avg.add(0, 1000); + avg.add(987698769, 2000); + assertEquals("987,699", avg.getFormattedValue(true)); + assertEquals("987699", avg.getFormattedValue(false)); + + avg = new LongAvgRate("stat", 3000, SECONDS); + assertEquals("unknown", avg.getFormattedValue(true)); + avg.add(0, 1000); + avg.add(987698769, 2000); + assertEquals("987,698,769", avg.getFormattedValue(true)); + assertEquals("987698769", avg.getFormattedValue(false)); + } + + @Test + public void testIsNotSet() { + assertTrue(avg.isNotSet()); + avg.add(0, 1000); + assertTrue(avg.isNotSet()); + avg.add(2000, 2000); + assertFalse(avg.isNotSet()); + avg.add(4000, 3000); + assertFalse(avg.isNotSet()); + } +} diff --git a/test/com/sleepycat/je/utilint/LongDiffStatTest.java b/test/com/sleepycat/je/utilint/LongDiffStatTest.java new file mode 100644 index 0000000..c4e7c46 --- /dev/null +++ b/test/com/sleepycat/je/utilint/LongDiffStatTest.java @@ -0,0 +1,101 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +/** Test the LongDiffStat class. */ +public class LongDiffStatTest extends TestBase { + + private static final StatGroup statGroup = + new StatGroup("TestGroup", "Test group"); + private static int statDefCount; + + private AtomicLongStat base; + private LongDiffStat stat; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + base = new AtomicLongStat(statGroup, getStatDef()); + base.set(1000L); + stat = new LongDiffStat(base, 3000); + } + + private static StatDefinition getStatDef() { + return new StatDefinition(getStatDefName(), ""); + } + + private static String getStatDefName() { + return "stat" + Integer.toString(++statDefCount); + } + + @Test + public void testGet() { + assertEquals(0, stat.get(1000)); + stat.set(300, 1000); + base.set(2000L); + assertEquals(700, stat.get(2000)); + assertEquals(1700, stat.get(5000)); + stat.set(3000, 6000); + assertEquals(0, stat.get(7000)); + } + + @Test + public void testClear() { + stat.set(10, 1000); + assertEquals(990, stat.get(1000)); + assertFalse(stat.isNotSet()); + stat.clear(); + assertEquals(0, stat.get(1000)); + assertTrue(stat.isNotSet()); + } + + @Test + public void testCopy() { + stat.set(300, 1000); + LongDiffStat copy = stat.copy(); + stat.set(350, 2000); + base.set(2000L); + assertEquals(700, copy.get(1000)); + copy.set(400, 3000); + assertEquals(650, stat.get(3000)); + } + + @Test + public void testGetFormattedValue() { + base.set(123456790L); + stat.set(1, System.currentTimeMillis()); + assertEquals("123,456,789", stat.getFormattedValue(true)); + assertEquals("123456789", stat.getFormattedValue(false)); + } + + @Test + public void testIsNotSet() { + assertTrue(stat.isNotSet()); + stat.set(200, 1000); + assertFalse(stat.isNotSet()); + stat.clear(); + assertTrue(stat.isNotSet()); + } +} diff --git a/test/com/sleepycat/je/utilint/StoppableThreadTest.java b/test/com/sleepycat/je/utilint/StoppableThreadTest.java new file mode 100644 index 0000000..dc0f966 --- /dev/null +++ b/test/com/sleepycat/je/utilint/StoppableThreadTest.java @@ -0,0 +1,102 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Logger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.EnvironmentWedgedException; +import com.sleepycat.je.rep.impl.RepTestBase; + +/** + * Tests to verify that StoppableThread shutdown works as expected. + */ +public class StoppableThreadTest extends RepTestBase { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + } + + @Test + public void testBasic() { + createGroup(3); + + final AtomicBoolean stop = new AtomicBoolean(false); + final StoppableThread testThread = + new StoppableThread(repEnvInfo[0].getRepImpl(), "test)") { + + @Override + protected Logger getLogger() { + return null; + } + + @Override + protected int initiateSoftShutdown() { + return -1; + } + + @Override + public void run() { + while (!stop.get()) { + /* loop uninterruptibly to simulate a runaway thread */ + } + } + }; + + testThread.start(); + + /* It should fail to stop the thread and invalidate the environment. */ + testThread.shutdownThread(Logger.getLogger("test")); + + /* Looping thread is still alive. */ + assertTrue(testThread.isAlive()); + + /* Environment has been invalidated. */ + assertTrue(!repEnvInfo[0].getEnv().isValid()); + + stop.set(true); + boolean isDead = new PollCondition(100, 10000) { + + @Override + protected boolean condition() { + return !testThread.isAlive(); + } + }.await(); + + assertTrue(isDead); + + /* Close the invalidated environment. */ + try { + repEnvInfo[0].getRepImpl().close(); + fail("Expected EnvironmentWedgedException"); + } catch (EnvironmentWedgedException e) { + /* Expected. */ + } + } +} diff --git a/test/com/sleepycat/je/utilint/TestAction.java b/test/com/sleepycat/je/utilint/TestAction.java new file mode 100644 index 0000000..bc6fb61 --- /dev/null +++ b/test/com/sleepycat/je/utilint/TestAction.java @@ -0,0 +1,108 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.logging.Logger; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** + * Perform a test action in another thread, providing methods to wait for + * completion, and to check for success and failure. + */ +public abstract class TestAction extends Thread { + + /** Logger for this class. */ + protected final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + /** The exception thrown by the action or null. */ + public volatile Throwable exception; + + /** The action */ + protected abstract void action() + throws Exception; + + /** + * Assert that the action completed, either by succeeding or throwing an + * exception, within the specified number of milliseconds. + * + * @param timeout the number of milliseconds to wait + * @throws InterruptedException if waiting is interrupted + */ + public void assertCompleted(final long timeout) + throws InterruptedException { + + join(timeout); + assertTrue("Thread should have completed", !isAlive()); + } + + /** + * Assert that the action completed successfully within the specified + * number of milliseconds. + * + * @param timeout the number of milliseconds to wait + * @throws InterruptedException if waiting is interrupted + */ + public void assertSucceeded(final long timeout) + throws InterruptedException { + + assertCompleted(timeout); + if (exception != null) { + final AssertionError err = + new AssertionError("Unexpected exception: " + exception); + err.initCause(exception); + throw err; + } + } + + /** + * Assert that the action failed with an exception of the specified class, + * or a subclass of it, within the specified number of milliseconds. + * + * @param timeout the number of milliseconds to wait + * @param exceptionClass the exception class + * @throws InterruptedException if waiting is interrupted + */ + public void assertException( + final long timeout, + final Class exceptionClass) + throws InterruptedException { + + assertCompleted(timeout); + assertNotNull("Expected exception", exception); + if (!exceptionClass.isInstance(exception)) { + final AssertionError err = + new AssertionError("Unexpected exception: " + exception); + err.initCause(exception); + throw err; + } + logger.info("Got expected exception: " + exception); + } + + /** + * Call {@link #action} and catch any thrown exceptions. + */ + @Override + public void run() { + try { + action(); + } catch (Throwable t) { + exception = t; + } + } +} diff --git a/test/com/sleepycat/je/utilint/WaitTestHook.java b/test/com/sleepycat/je/utilint/WaitTestHook.java new file mode 100644 index 0000000..0016d79 --- /dev/null +++ b/test/com/sleepycat/je/utilint/WaitTestHook.java @@ -0,0 +1,91 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.je.utilint; + +import static org.junit.Assert.assertTrue; + +import java.util.logging.Logger; + +import com.sleepycat.je.utilint.LoggerUtils; + +/** Define a test hook for coordinating waiting. */ +public class WaitTestHook extends TestHookAdapter { + + /** Logger for this class. */ + protected final Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "Test"); + + /** Whether the hook is waiting. */ + private boolean waiting = false; + + /** Whether the hook should stop waiting. */ + private boolean stopWaiting = false; + + /** + * Creates a test hook that will cause {@link #awaitWaiting} to stop + * waiting when it starts waiting, and will itself stop waiting when {@link + * #stopWaiting()} is called. + */ + public WaitTestHook() { } + + /** + * Assert that the test hook is called and begins waiting within the + * specified number of milliseconds. + */ + public synchronized void awaitWaiting(final long timeout) + throws InterruptedException { + + final long start = System.currentTimeMillis(); + while (!waiting && (start + timeout > System.currentTimeMillis())) { + wait(10000); + } + logger.info(this + ": Awaited waiting for " + + (System.currentTimeMillis() - start) + " milliseconds"); + assertTrue(this + ": Should be waiting", waiting); + } + + /** + * Tell the test hook to stop waiting, asserting that it has started + * waiting. + */ + public synchronized void stopWaiting() { + assertTrue(this + ": Should be waiting", waiting); + stopWaiting = true; + notifyAll(); + logger.info(this + ": Stopped waiting"); + } + + /** Wait until {@link #stopWaiting()} is called. */ + @Override + public synchronized void doHook() { + waiting = true; + notifyAll(); + logger.info(this + ": Now waiting"); + while (!stopWaiting) { + try { + wait(10000); + } catch (InterruptedException e) { + break; + } + } + } + + /** + * Wait until {@link #stopWaiting()} is called, regardless of the argument. + */ + @Override + public void doHook(T obj) { + doHook(); + } +} diff --git a/test/com/sleepycat/persist/test/AddNewSecKeyToAbstractClassTest.java b/test/com/sleepycat/persist/test/AddNewSecKeyToAbstractClassTest.java new file mode 100644 index 0000000..58fc9c7 --- /dev/null +++ b/test/com/sleepycat/persist/test/AddNewSecKeyToAbstractClassTest.java @@ -0,0 +1,215 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/* + * A unit test for testing adding a new seconday key to an abstract entity + * class. There are two test cases, one is to add a null seconday key, the + * other is to add a non-null secondary key. [#19385] + */ + +public class AddNewSecKeyToAbstractClassTest extends TestBase { + + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + private EntityStore store; + private static boolean notNull = true; + + + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + store = null; + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + store = new EntityStore(env, STORE_NAME, storeConfig); + } + + private void close() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testAddNullSecKeyToAbstractClass() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_AbstractClassData.jdb", + envHome); + notNull = false; + open(); + + /* + * Adding a new but null secondary key into an abstract class is + * allowed. + */ + PrimaryIndex primary = + store.getPrimaryIndex(Long.class, AbstractEntity1.class); + AbstractEntity1 entity = primary.put(null, new EntityData1(1)); + assertNotNull(entity); + entity = primary.put(null, new EntityData1(2)); + assertTrue(entity == null); + close(); + } + + public void xxtestAddNotNullSecKeyToAbstractClass() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_AbstractClassData.jdb", + envHome); + notNull = true; + + try { + open(); + PrimaryIndex primary = + store.getPrimaryIndex(Long.class, AbstractEntity2.class); + fail(); + } catch(Exception e) { + + /* + * Expected exception. Adding a new but not null secondary key into + * an abstract class is not allowed. + */ + close(); + } + } + + @Entity(version = 1) + static abstract class AbstractEntity1 { + AbstractEntity1(Long i) { + this.id = i; + } + + private AbstractEntity1(){} + + @PrimaryKey + private Long id; + + // Adding a null SecondaryKey. + @SecondaryKey( relate = MANY_TO_ONE ) + private Long sk1; + } + + @Persistent(version = 1) + static class EntityData1 extends AbstractEntity1{ + private int f1; + + private EntityData1(){} + + EntityData1(int i) { + super(Long.valueOf(i)); + this.f1 = i; + } + } + + @Entity(version = 1) + static abstract class AbstractEntity2 { + AbstractEntity2(Long i) { + this.id = i; + } + + private AbstractEntity2(){} + + @PrimaryKey + private Long id; + + // Adding a non-null SecondaryKey. + @SecondaryKey( relate = MANY_TO_ONE ) + private final Long sk1 = notNull ? 1L : null; + } + + @Persistent(version = 1) + static class EntityData2 extends AbstractEntity2{ + private int f1; + + private EntityData2(){} + + EntityData2(int i) { + super(Long.valueOf(i)); + this.f1 = i; + } + } +} diff --git a/test/com/sleepycat/persist/test/BindingTest.java b/test/com/sleepycat/persist/test/BindingTest.java new file mode 100644 index 0000000..895c083 --- /dev/null +++ b/test/com/sleepycat/persist/test/BindingTest.java @@ -0,0 +1,2599 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.lang.reflect.Array; +import java.lang.reflect.Field; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.bind.EntryBinding; +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.ForeignMultiKeyNullifier; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.SecondaryMultiKeyCreator; +import com.sleepycat.persist.impl.PersistCatalog; +import com.sleepycat.persist.impl.PersistComparator; +import com.sleepycat.persist.impl.PersistEntityBinding; +import com.sleepycat.persist.impl.PersistKeyBinding; +import com.sleepycat.persist.impl.PersistKeyCreator; +import com.sleepycat.persist.impl.RefreshException; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.PrimaryKeyMetadata; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.persist.model.SecondaryKeyMetadata; +import com.sleepycat.persist.raw.RawField; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +public class BindingTest extends TestBase { + + private static final String STORE_PREFIX = "persist#foo#"; + + private File envHome; + private Environment env; + private EntityModel model; + private PersistCatalog catalog; + private DatabaseEntry keyEntry; + private DatabaseEntry dataEntry; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + + keyEntry = new DatabaseEntry(); + dataEntry = new DatabaseEntry(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (Exception e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + catalog = null; + keyEntry = null; + dataEntry = null; + } + + /** + * @throws FileNotFoundException from DB core. + */ + private void open() + throws FileNotFoundException, DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + openCatalog(); + } + + private void openCatalog() + throws DatabaseException { + + model = new AnnotationModel(); + model.registerClass(LocalizedTextProxy.class); + model.registerClass(LocaleProxy.class); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + DbCompat.setTypeBtree(dbConfig); + catalog = new PersistCatalog + (env, STORE_PREFIX, STORE_PREFIX + "catalog", dbConfig, model, + null, false /*rawAccess*/, null /*Store*/); + } + + private void close() + throws DatabaseException { + + /* Close/open/close catalog to test checks for class evolution. */ + catalog.close(); + PersistCatalog.expectNoClassChanges = true; + try { + openCatalog(); + } finally { + PersistCatalog.expectNoClassChanges = false; + } + catalog.close(); + catalog = null; + + env.close(); + env = null; + } + + @Test + public void testBasic() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(Basic.class, + new Basic(1, "one", 2.2, "three")); + checkEntity(Basic.class, + new Basic(0, null, 0, null)); + checkEntity(Basic.class, + new Basic(-1, "xxx", -2, "xxx")); + + checkMetadata(Basic.class.getName(), new String[][] { + {"id", "long"}, + {"one", "java.lang.String"}, + {"two", "double"}, + {"three", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class Basic implements MyEntity { + + @PrimaryKey + private long id; + private String one; + private double two; + private String three; + + private Basic() { } + + private Basic(long id, String one, double two, String three) { + this.id = id; + this.one = one; + this.two = two; + this.three = three; + } + + public String getBasicOne() { + return one; + } + + public Object getPriKeyObject() { + return id; + } + + public void validate(Object other) { + Basic o = (Basic) other; + TestCase.assertEquals(id, o.id); + TestCase.assertTrue(nullOrEqual(one, o.one)); + TestCase.assertEquals(two, o.two); + TestCase.assertTrue(nullOrEqual(three, o.three)); + } + + @Override + public String toString() { + return "" + id + ' ' + one + ' ' + two; + } + } + + @Test + public void testSimpleTypes() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(SimpleTypes.class, new SimpleTypes()); + + checkMetadata(SimpleTypes.class.getName(), new String[][] { + {"f0", "boolean"}, + {"f1", "char"}, + {"f2", "byte"}, + {"f3", "short"}, + {"f4", "int"}, + {"f5", "long"}, + {"f6", "float"}, + {"f7", "double"}, + {"f8", "java.lang.String"}, + {"f9", "java.math.BigInteger"}, + {"f10", "java.math.BigDecimal"}, + {"f11", "java.util.Date"}, + {"f12", "java.lang.Boolean"}, + {"f13", "java.lang.Character"}, + {"f14", "java.lang.Byte"}, + {"f15", "java.lang.Short"}, + {"f16", "java.lang.Integer"}, + {"f17", "java.lang.Long"}, + {"f18", "java.lang.Float"}, + {"f19", "java.lang.Double"}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class SimpleTypes implements MyEntity { + + @PrimaryKey + private final boolean f0 = true; + private final char f1 = 'a'; + private final byte f2 = 123; + private final short f3 = 123; + private final int f4 = 123; + private final long f5 = 123; + private final float f6 = 123.4f; + private final double f7 = 123.4; + private final String f8 = "xxx"; + private final BigInteger f9 = BigInteger.valueOf(123); + private BigDecimal f10 = new BigDecimal("123.1234000"); + private final Date f11 = new Date(); + private final Boolean f12 = true; + private final Character f13 = 'a'; + private final Byte f14 = 123; + private final Short f15 = 123; + private final Integer f16 = 123; + private final Long f17 = 123L; + private final Float f18 = 123.4f; + private final Double f19 = 123.4; + + SimpleTypes() { } + + public Object getPriKeyObject() { + return f0; + } + + public void validate(Object other) { + SimpleTypes o = (SimpleTypes) other; + TestCase.assertEquals(f0, o.f0); + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + TestCase.assertEquals(f3, o.f3); + TestCase.assertEquals(f4, o.f4); + TestCase.assertEquals(f5, o.f5); + TestCase.assertEquals(f6, o.f6); + TestCase.assertEquals(f7, o.f7); + TestCase.assertEquals(f8, o.f8); + TestCase.assertEquals(f9, o.f9); + /* The sorted BigDecimal cannot preserve the precision. */ + TestCase.assertTrue(!f10.equals(o.f10)); + TestCase.assertEquals(f10.compareTo(o.f10), 0); + TestCase.assertEquals(f10.stripTrailingZeros(), o.f10); + TestCase.assertEquals(f11, o.f11); + TestCase.assertEquals(f12, o.f12); + TestCase.assertEquals(f13, o.f13); + TestCase.assertEquals(f14, o.f14); + TestCase.assertEquals(f15, o.f15); + TestCase.assertEquals(f16, o.f16); + TestCase.assertEquals(f17, o.f17); + TestCase.assertEquals(f18, o.f18); + TestCase.assertEquals(f19, o.f19); + } + } + + @Test + public void testArrayTypes() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(ArrayTypes.class, new ArrayTypes()); + + checkMetadata(ArrayTypes.class.getName(), new String[][] { + {"id", "int"}, + {"f0", boolean[].class.getName()}, + {"f1", char[].class.getName()}, + {"f2", byte[].class.getName()}, + {"f3", short[].class.getName()}, + {"f4", int[].class.getName()}, + {"f5", long[].class.getName()}, + {"f6", float[].class.getName()}, + {"f7", double[].class.getName()}, + {"f8", String[].class.getName()}, + {"f9", Address[].class.getName()}, + {"f10", boolean[][][].class.getName()}, + {"f11", String[][][].class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class ArrayTypes implements MyEntity { + + @PrimaryKey + private final int id = 1; + private final boolean[] f0 = {false, true}; + private final char[] f1 = {'a', 'b'}; + private final byte[] f2 = {1, 2}; + private final short[] f3 = {1, 2}; + private final int[] f4 = {1, 2}; + private final long[] f5 = {1, 2}; + private final float[] f6 = {1.1f, 2.2f}; + private final double[] f7 = {1.1, 2,2}; + private final String[] f8 = {"xxx", null, "yyy"}; + private final Address[] f9 = {new Address("city", "state", 123), + null, + new Address("x", "y", 444)}; + private final boolean[][][] f10 = + { + { + {false, true}, + {false, true}, + }, + null, + { + {false, true}, + {false, true}, + }, + }; + private final String[][][] f11 = + { + { + {"xxx", null, "yyy"}, + null, + {"xxx", null, "yyy"}, + }, + null, + { + {"xxx", null, "yyy"}, + null, + {"xxx", null, "yyy"}, + }, + }; + + ArrayTypes() { } + + public Object getPriKeyObject() { + return id; + } + + public void validate(Object other) { + ArrayTypes o = (ArrayTypes) other; + TestCase.assertEquals(id, o.id); + TestCase.assertTrue(Arrays.equals(f0, o.f0)); + TestCase.assertTrue(Arrays.equals(f1, o.f1)); + TestCase.assertTrue(Arrays.equals(f2, o.f2)); + TestCase.assertTrue(Arrays.equals(f3, o.f3)); + TestCase.assertTrue(Arrays.equals(f4, o.f4)); + TestCase.assertTrue(Arrays.equals(f5, o.f5)); + TestCase.assertTrue(Arrays.equals(f6, o.f6)); + TestCase.assertTrue(Arrays.equals(f7, o.f7)); + TestCase.assertTrue(Arrays.equals(f8, o.f8)); + TestCase.assertTrue(Arrays.deepEquals(f9, o.f9)); + TestCase.assertTrue(Arrays.deepEquals(f10, o.f10)); + TestCase.assertTrue(Arrays.deepEquals(f11, o.f11)); + } + } + + @Test + public void testEnumTypes() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(EnumTypes.class, new EnumTypes()); + + checkMetadata(EnumTypes.class.getName(), new String[][] { + {"f0", "int"}, + {"f1", Thread.State.class.getName()}, + {"f2", MyEnum.class.getName()}, + {"f3", Object.class.getName()}, + {"f4", MyEnumCSM.class.getName()}, + {"f5", Object.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + enum MyEnum { ONE, TWO }; + + enum MyEnumCSM { + + A { + void Foo() { + System.out.println("This is A!"); + } + }, + + B(true) { + void Foo() { + System.out.println("This is B!"); + } + }; + + private boolean b; + + MyEnumCSM() { + this(false); + } + + MyEnumCSM(boolean b) { + this.b = b; + } + + abstract void Foo(); + }; + + @Entity + static class EnumTypes implements MyEntity { + + @PrimaryKey + private final int f0 = 1; + private final Thread.State f1 = Thread.State.RUNNABLE; + private final MyEnum f2 = MyEnum.ONE; + private final Object f3 = MyEnum.TWO; + private final MyEnumCSM f4 = MyEnumCSM.A; + private final Object f5 = MyEnumCSM.B; + + EnumTypes() { } + + public Object getPriKeyObject() { + return f0; + } + + public void validate(Object other) { + EnumTypes o = (EnumTypes) other; + TestCase.assertEquals(f0, o.f0); + TestCase.assertSame(f1, o.f1); + TestCase.assertSame(f2, o.f2); + TestCase.assertSame(f3, o.f3); + TestCase.assertSame(f4, o.f4); + TestCase.assertSame(f5, o.f5); + } + } + + @Test + public void testEnumObjectTypes() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(EnumObjectTypes.class, new EnumObjectTypes()); + + checkMetadata(EnumObjectTypes.class.getName(), new String[][] { + {"f0", "int"}, + {"f1", Object.class.getName()}, + {"f2", Object.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class EnumObjectTypes implements MyEntity { + + @PrimaryKey + private final int f0 = 1; + private final Object f1 = MyEnum.ONE; + private final Object f2 = MyEnumCSM.A; + + EnumObjectTypes() { } + + public Object getPriKeyObject() { + return f0; + } + + public void validate(Object other) { + EnumObjectTypes o = (EnumObjectTypes) other; + TestCase.assertEquals(f0, o.f0); + TestCase.assertSame(f1, o.f1); + TestCase.assertSame(f2, o.f2); + } + } + + @Test + public void testProxyTypes() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(ProxyTypes.class, new ProxyTypes()); + + checkMetadata(ProxyTypes.class.getName(), new String[][] { + {"f0", "int"}, + {"f1", Locale.class.getName()}, + {"f2", Set.class.getName()}, + {"f3", Set.class.getName()}, + {"f4", Object.class.getName()}, + {"f5", HashMap.class.getName()}, + {"f6", TreeMap.class.getName()}, + {"f7", List.class.getName()}, + {"f8", LinkedList.class.getName()}, + {"f9", LocalizedText.class.getName()}, + {"f10", LinkedHashMap.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class ProxyTypes implements MyEntity { + + @PrimaryKey + private final int f0 = 1; + private final Locale f1 = Locale.getDefault(); + private final Set f2 = new HashSet(); + private final Set f3 = new TreeSet(); + private final Object f4 = new HashSet

        (); + private final HashMap f5 = + new HashMap(); + private final TreeMap f6 = + new TreeMap(); + private final List f7 = new ArrayList(); + private final LinkedList f8 = new LinkedList(); + private final LocalizedText f9 = new LocalizedText(f1, "xyz"); + private final LinkedHashMap f10 = + new LinkedHashMap(); + + ProxyTypes() { + f2.add(123); + f2.add(456); + f3.add(456); + f3.add(123); + HashSet
        s = (HashSet) f4; + s.add(new Address("city", "state", 11111)); + s.add(new Address("city2", "state2", 22222)); + s.add(new Address("city3", "state3", 33333)); + f5.put("one", 111); + f5.put("two", 222); + f5.put("three", 333); + f6.put("one", new Address("city", "state", 11111)); + f6.put("two", new Address("city2", "state2", 22222)); + f6.put("three", new Address("city3", "state3", 33333)); + f7.add(123); + f7.add(456); + f8.add(123); + f8.add(456); + f10.put("one", 111); + f10.put("two", 222); + f10.put("three", 333); + } + + public Object getPriKeyObject() { + return f0; + } + + public void validate(Object other) { + ProxyTypes o = (ProxyTypes) other; + TestCase.assertEquals(f0, o.f0); + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + TestCase.assertEquals(f3, o.f3); + TestCase.assertEquals(f4, o.f4); + TestCase.assertEquals(f5, o.f5); + TestCase.assertEquals(f6, o.f6); + TestCase.assertEquals(f7, o.f7); + TestCase.assertEquals(f8, o.f8); + TestCase.assertEquals(f9, o.f9); + TestCase.assertEquals(f10, o.f10); + } + } + + @Persistent(proxyFor=Locale.class) + static class LocaleProxy implements PersistentProxy { + + String language; + String country; + String variant; + + private LocaleProxy() {} + + public void initializeProxy(Locale object) { + language = object.getLanguage(); + country = object.getCountry(); + variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(language, country, variant); + } + } + + static class LocalizedText { + + Locale locale; + String text; + + LocalizedText(Locale locale, String text) { + this.locale = locale; + this.text = text; + } + + @Override + public boolean equals(Object other) { + LocalizedText o = (LocalizedText) other; + return text.equals(o.text) && + locale.equals(o.locale); + } + } + + @Persistent(proxyFor=LocalizedText.class) + static class LocalizedTextProxy implements PersistentProxy { + + Locale locale; + String text; + + private LocalizedTextProxy() {} + + public void initializeProxy(LocalizedText object) { + locale = object.locale; + text = object.text; + } + + public LocalizedText convertProxy() { + return new LocalizedText(locale, text); + } + } + + @Test + public void testEmbedded() + throws FileNotFoundException, DatabaseException { + + open(); + + Address a1 = new Address("city", "state", 123); + Address a2 = new Address("Wikieup", "AZ", 85360); + + checkEntity(Embedded.class, + new Embedded("x", a1, a2)); + checkEntity(Embedded.class, + new Embedded("y", a1, null)); + checkEntity(Embedded.class, + new Embedded("", a2, a2)); + + checkMetadata(Embedded.class.getName(), new String[][] { + {"id", "java.lang.String"}, + {"idShadow", "java.lang.String"}, + {"one", Address.class.getName()}, + {"two", Address.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + checkMetadata(Address.class.getName(), new String[][] { + {"street", "java.lang.String"}, + {"city", "java.lang.String"}, + {"zip", "int"}, + }, + -1 /*priKeyIndex*/, null); + + close(); + } + + @Entity + static class Embedded implements MyEntity { + + @PrimaryKey + private String id; + private String idShadow; + private Address one; + private Address two; + + private Embedded() { } + + private Embedded(String id, Address one, Address two) { + this.id = id; + idShadow = id; + this.one = one; + this.two = two; + } + + public Object getPriKeyObject() { + return id; + } + + public void validate(Object other) { + Embedded o = (Embedded) other; + TestCase.assertEquals(id, o.id); + if (one != null) { + one.validate(o.one); + } else { + assertNull(o.one); + } + if (two != null) { + two.validate(o.two); + } else { + assertNull(o.two); + } + if (one == two) { + TestCase.assertSame(o.one, o.two); + } + } + + @Override + public String toString() { + return "" + id + ' ' + one + ' ' + two; + } + } + + @Persistent + static class Address { + + private String street; + private String city; + private int zip; + + private Address() {} + + Address(String street, String city, int zip) { + this.street = street; + this.city = city; + this.zip = zip; + } + + void validate(Address o) { + TestCase.assertTrue(nullOrEqual(street, o.street)); + TestCase.assertTrue(nullOrEqual(city, o.city)); + TestCase.assertEquals(zip, o.zip); + } + + @Override + public String toString() { + return "" + street + ' ' + city + ' ' + zip; + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + Address o = (Address) other; + return nullOrEqual(street, o.street) && + nullOrEqual(city, o.city) && + nullOrEqual(zip, o.zip); + } + + @Override + public int hashCode() { + return zip; + } + } + + @Test + public void testSubclass() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(Basic.class, + new Subclass(-1, "xxx", -2, "xxx", "xxx", true)); + + checkMetadata(Basic.class.getName(), new String[][] { + {"id", "long"}, + {"one", "java.lang.String"}, + {"two", "double"}, + {"three", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + checkMetadata(Subclass.class.getName(), new String[][] { + {"one", "java.lang.String"}, + {"two", "boolean"}, + }, + -1 /*priKeyIndex*/, Basic.class.getName()); + + close(); + } + + @Persistent + static class Subclass extends Basic { + + private String one; + private boolean two; + + private Subclass() { + } + + private Subclass(long id, String one, double two, String three, + String subOne, boolean subTwo) { + super(id, one, two, three); + this.one = subOne; + this.two = subTwo; + } + + @Override + public void validate(Object other) { + super.validate(other); + Subclass o = (Subclass) other; + TestCase.assertTrue(nullOrEqual(one, o.one)); + TestCase.assertEquals(two, o.two); + } + } + + @Test + public void testSuperclass() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(UseSuperclass.class, + new UseSuperclass(33, "xxx")); + + checkMetadata(Superclass.class.getName(), new String[][] { + {"id", "int"}, + {"one", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + checkMetadata(UseSuperclass.class.getName(), new String[][] { + }, + -1 /*priKeyIndex*/, Superclass.class.getName()); + + close(); + } + + @Persistent + static class Superclass implements MyEntity { + + @PrimaryKey + private int id; + private String one; + + private Superclass() { } + + private Superclass(int id, String one) { + this.id = id; + this.one = one; + } + + public Object getPriKeyObject() { + return id; + } + + public void validate(Object other) { + Superclass o = (Superclass) other; + TestCase.assertEquals(id, o.id); + TestCase.assertTrue(nullOrEqual(one, o.one)); + } + } + + @Entity + static class UseSuperclass extends Superclass { + + private UseSuperclass() { } + + private UseSuperclass(int id, String one) { + super(id, one); + } + } + + @Test + public void testAbstract() + throws FileNotFoundException, DatabaseException { + + open(); + + checkEntity(EntityUseAbstract.class, + new EntityUseAbstract(33, "xxx")); + + checkMetadata(Abstract.class.getName(), new String[][] { + {"one", "java.lang.String"}, + }, + -1 /*priKeyIndex*/, null); + checkMetadata(EmbeddedUseAbstract.class.getName(), new String[][] { + {"two", "java.lang.String"}, + }, + -1 /*priKeyIndex*/, Abstract.class.getName()); + checkMetadata(EntityUseAbstract.class.getName(), new String[][] { + {"id", "int"}, + {"f1", EmbeddedUseAbstract.class.getName()}, + {"f2", Abstract.class.getName()}, + {"f3", Object.class.getName()}, + {"f4", Interface.class.getName()}, + {"a1", EmbeddedUseAbstract[].class.getName()}, + {"a2", Abstract[].class.getName()}, + {"a3", Abstract[].class.getName()}, + {"a4", Object[].class.getName()}, + {"a5", Interface[].class.getName()}, + {"a6", Interface[].class.getName()}, + {"a7", Interface[].class.getName()}, + }, + 0 /*priKeyIndex*/, Abstract.class.getName()); + + close(); + } + + @Persistent + static abstract class Abstract implements Interface { + + String one; + + private Abstract() { } + + private Abstract(String one) { + this.one = one; + } + + public void validate(Object other) { + Abstract o = (Abstract) other; + TestCase.assertTrue(nullOrEqual(one, o.one)); + } + + @Override + public boolean equals(Object other) { + Abstract o = (Abstract) other; + return nullOrEqual(one, o.one); + } + } + + interface Interface { + void validate(Object other); + } + + @Persistent + static class EmbeddedUseAbstract extends Abstract { + + private String two; + + private EmbeddedUseAbstract() { } + + private EmbeddedUseAbstract(String one, String two) { + super(one); + this.two = two; + } + + @Override + public void validate(Object other) { + super.validate(other); + EmbeddedUseAbstract o = (EmbeddedUseAbstract) other; + TestCase.assertTrue(nullOrEqual(two, o.two)); + } + + @Override + public boolean equals(Object other) { + if (!super.equals(other)) { + return false; + } + EmbeddedUseAbstract o = (EmbeddedUseAbstract) other; + return nullOrEqual(two, o.two); + } + } + + @Entity + static class EntityUseAbstract extends Abstract implements MyEntity { + + @PrimaryKey + private int id; + + private EmbeddedUseAbstract f1; + private Abstract f2; + private Object f3; + private Interface f4; + private EmbeddedUseAbstract[] a1; + private Abstract[] a2; + private Abstract[] a3; + private Object[] a4; + private Interface[] a5; + private Interface[] a6; + private Interface[] a7; + + private EntityUseAbstract() { } + + private EntityUseAbstract(int id, String one) { + super(one); + this.id = id; + f1 = new EmbeddedUseAbstract(one, one); + f2 = new EmbeddedUseAbstract(one + "x", one + "y"); + f3 = new EmbeddedUseAbstract(null, null); + f4 = new EmbeddedUseAbstract(null, null); + a1 = new EmbeddedUseAbstract[3]; + a2 = new EmbeddedUseAbstract[3]; + a3 = new Abstract[3]; + a4 = new Object[3]; + a5 = new EmbeddedUseAbstract[3]; + a6 = new Abstract[3]; + a7 = new Interface[3]; + for (int i = 0; i < 3; i += 1) { + a1[i] = new EmbeddedUseAbstract("1" + i, null); + a2[i] = new EmbeddedUseAbstract("2" + i, null); + a3[i] = new EmbeddedUseAbstract("3" + i, null); + a4[i] = new EmbeddedUseAbstract("4" + i, null); + a5[i] = new EmbeddedUseAbstract("5" + i, null); + a6[i] = new EmbeddedUseAbstract("6" + i, null); + a7[i] = new EmbeddedUseAbstract("7" + i, null); + } + } + + public Object getPriKeyObject() { + return id; + } + + @Override + public void validate(Object other) { + super.validate(other); + EntityUseAbstract o = (EntityUseAbstract) other; + TestCase.assertEquals(id, o.id); + f1.validate(o.f1); + f2.validate(o.f2); + ((Abstract) f3).validate(o.f3); + f4.validate(o.f4); + assertTrue(arrayToString(a1) + ' ' + arrayToString(o.a1), + Arrays.equals(a1, o.a1)); + assertTrue(Arrays.equals(a2, o.a2)); + assertTrue(Arrays.equals(a3, o.a3)); + assertTrue(Arrays.equals(a4, o.a4)); + assertTrue(Arrays.equals(a5, o.a5)); + assertTrue(Arrays.equals(a6, o.a6)); + assertTrue(Arrays.equals(a7, o.a7)); + assertSame(EmbeddedUseAbstract.class, f2.getClass()); + assertSame(EmbeddedUseAbstract.class, f3.getClass()); + assertSame(EmbeddedUseAbstract[].class, a1.getClass()); + assertSame(EmbeddedUseAbstract[].class, a2.getClass()); + assertSame(Abstract[].class, a3.getClass()); + assertSame(Object[].class, a4.getClass()); + assertSame(EmbeddedUseAbstract[].class, a5.getClass()); + assertSame(Abstract[].class, a6.getClass()); + assertSame(Interface[].class, a7.getClass()); + } + } + + @Test + public void testCompositeKey() + throws FileNotFoundException, DatabaseException { + + open(); + + CompositeKey key = + new CompositeKey(123, 456L, "xyz", BigInteger.valueOf(789), + MyEnum.ONE, MyEnumCSM.A, + BigDecimal.valueOf(123.123)); + checkEntity(UseCompositeKey.class, + new UseCompositeKey(key, "one")); + + checkMetadata(UseCompositeKey.class.getName(), new String[][] { + {"key", CompositeKey.class.getName()}, + {"one", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + + checkMetadata(CompositeKey.class.getName(), new String[][] { + {"f1", "int"}, + {"f2", "java.lang.Long"}, + {"f3", "java.lang.String"}, + {"f4", "java.math.BigInteger"}, + {"f5", MyEnum.class.getName()}, + {"f6", MyEnumCSM.class.getName()}, + {"f7", BigDecimal.class.getName()}, + }, + -1 /*priKeyIndex*/, null); + + close(); + } + + @Persistent + static class CompositeKey { + @KeyField(3) + private int f1; + @KeyField(2) + private Long f2; + @KeyField(1) + private String f3; + @KeyField(4) + private BigInteger f4; + @KeyField(5) + private MyEnum f5; + @KeyField(6) + private MyEnumCSM f6; + @KeyField(7) + private BigDecimal f7; + + private CompositeKey() {} + + CompositeKey(int f1, + Long f2, + String f3, + BigInteger f4, + MyEnum f5, + MyEnumCSM f6, + BigDecimal f7) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + this.f4 = f4; + this.f5 = f5; + this.f6 = f6; + this.f7 = f7; + } + + void validate(CompositeKey o) { + TestCase.assertEquals(f1, o.f1); + TestCase.assertTrue(nullOrEqual(f2, o.f2)); + TestCase.assertTrue(nullOrEqual(f3, o.f3)); + TestCase.assertTrue(nullOrEqual(f4, o.f4)); + TestCase.assertEquals(f5, o.f5); + TestCase.assertTrue(nullOrEqual(f5, o.f5)); + TestCase.assertEquals(f5, o.f5); + TestCase.assertTrue(nullOrEqual(f6, o.f6)); + TestCase.assertTrue(nullOrEqual(f7, o.f7)); + } + + @Override + public boolean equals(Object other) { + CompositeKey o = (CompositeKey) other; + return f1 == o.f1 && + nullOrEqual(f2, o.f2) && + nullOrEqual(f3, o.f3) && + nullOrEqual(f4, o.f4) && + nullOrEqual(f5, o.f5) && + nullOrEqual(f6, o.f6) && + nullOrEqual(f7, o.f7); + } + + @Override + public int hashCode() { + return f1; + } + + @Override + public String toString() { + return "" + f1 + ' ' + f2 + ' ' + f3 + ' ' + f4 + ' ' + f5 + ' ' + + f6 + ' ' + f7; + } + } + + @Entity + static class UseCompositeKey implements MyEntity { + + @PrimaryKey + private CompositeKey key; + private String one; + + private UseCompositeKey() { } + + private UseCompositeKey(CompositeKey key, String one) { + this.key = key; + this.one = one; + } + + public Object getPriKeyObject() { + return key; + } + + public void validate(Object other) { + UseCompositeKey o = (UseCompositeKey) other; + TestCase.assertNotNull(key); + TestCase.assertNotNull(o.key); + key.validate(o.key); + TestCase.assertTrue(nullOrEqual(one, o.one)); + } + } + + @Test + public void testComparableKey() + throws FileNotFoundException, DatabaseException { + + open(); + + ComparableKey key = new ComparableKey(123, 456); + checkEntity(UseComparableKey.class, + new UseComparableKey(key, "one")); + + checkMetadata(UseComparableKey.class.getName(), new String[][] { + {"key", ComparableKey.class.getName()}, + {"one", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + + checkMetadata(ComparableKey.class.getName(), new String[][] { + {"f1", "int"}, + {"f2", "int"}, + }, + -1 /*priKeyIndex*/, null); + + ClassMetadata classMeta = + model.getClassMetadata(UseComparableKey.class.getName()); + assertNotNull(classMeta); + + PersistKeyBinding binding = new PersistKeyBinding + (catalog, ComparableKey.class.getName(), false); + + PersistComparator comparator = new PersistComparator(binding); + + compareKeys(comparator, binding, new ComparableKey(1, 1), + new ComparableKey(1, 1), 0); + compareKeys(comparator, binding, new ComparableKey(1, 2), + new ComparableKey(1, 1), -1); + compareKeys(comparator, binding, new ComparableKey(2, 1), + new ComparableKey(1, 1), -1); + compareKeys(comparator, binding, new ComparableKey(2, 1), + new ComparableKey(3, 1), 1); + + close(); + } + + private void compareKeys(Comparator comparator, + EntryBinding binding, + Object key1, + Object key2, + int expectResult) { + DatabaseEntry entry1 = new DatabaseEntry(); + DatabaseEntry entry2 = new DatabaseEntry(); + binding.objectToEntry(key1, entry1); + binding.objectToEntry(key2, entry2); + int result = comparator.compare(entry1.getData(), entry2.getData()); + assertEquals(expectResult, result); + } + + @Persistent + static class ComparableKey implements Comparable { + @KeyField(2) + private int f1; + @KeyField(1) + private int f2; + + private ComparableKey() {} + + ComparableKey(int f1, int f2) { + this.f1 = f1; + this.f2 = f2; + } + + void validate(ComparableKey o) { + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + } + + @Override + public boolean equals(Object other) { + ComparableKey o = (ComparableKey) other; + return f1 == o.f1 && f2 == o.f2; + } + + @Override + public int hashCode() { + return f1 + f2; + } + + @Override + public String toString() { + return "" + f1 + ' ' + f2; + } + + /** Compare f1 then f2, in reverse integer order. */ + public int compareTo(ComparableKey o) { + if (f1 != o.f1) { + return o.f1 - f1; + } else { + return o.f2 - f2; + } + } + } + + @Entity + static class UseComparableKey implements MyEntity { + + @PrimaryKey + private ComparableKey key; + private String one; + + private UseComparableKey() { } + + private UseComparableKey(ComparableKey key, String one) { + this.key = key; + this.one = one; + } + + public Object getPriKeyObject() { + return key; + } + + public void validate(Object other) { + UseComparableKey o = (UseComparableKey) other; + TestCase.assertNotNull(key); + TestCase.assertNotNull(o.key); + key.validate(o.key); + TestCase.assertTrue(nullOrEqual(one, o.one)); + } + } + + @Test + public void testSecKeys() + throws FileNotFoundException, DatabaseException { + + open(); + + SecKeys obj = new SecKeys(); + checkEntity(SecKeys.class, obj); + + checkMetadata(SecKeys.class.getName(), new String[][] { + {"id", "long"}, + {"f0", "boolean"}, + {"g0", "boolean"}, + {"f1", "char"}, + {"g1", "char"}, + {"f2", "byte"}, + {"g2", "byte"}, + {"f3", "short"}, + {"g3", "short"}, + {"f4", "int"}, + {"g4", "int"}, + {"f5", "long"}, + {"g5", "long"}, + {"f6", "float"}, + {"g6", "float"}, + {"f7", "double"}, + {"g7", "double"}, + {"f8", "java.lang.String"}, + {"g8", "java.lang.String"}, + {"f9", "java.math.BigInteger"}, + {"g9", "java.math.BigInteger"}, + {"f10", "java.math.BigDecimal"}, + {"g10", "java.math.BigDecimal"}, + {"f11", "java.util.Date"}, + {"g11", "java.util.Date"}, + {"f12", "java.lang.Boolean"}, + {"g12", "java.lang.Boolean"}, + {"f13", "java.lang.Character"}, + {"g13", "java.lang.Character"}, + {"f14", "java.lang.Byte"}, + {"g14", "java.lang.Byte"}, + {"f15", "java.lang.Short"}, + {"g15", "java.lang.Short"}, + {"f16", "java.lang.Integer"}, + {"g16", "java.lang.Integer"}, + {"f17", "java.lang.Long"}, + {"g17", "java.lang.Long"}, + {"f18", "java.lang.Float"}, + {"g18", "java.lang.Float"}, + {"f19", "java.lang.Double"}, + {"g19", "java.lang.Double"}, + {"f20", CompositeKey.class.getName()}, + {"g20", CompositeKey.class.getName()}, + {"f21", int[].class.getName()}, + {"g21", int[].class.getName()}, + {"f22", Integer[].class.getName()}, + {"g22", Integer[].class.getName()}, + {"f23", Set.class.getName()}, + {"g23", Set.class.getName()}, + {"f24", CompositeKey[].class.getName()}, + {"g24", CompositeKey[].class.getName()}, + {"f25", Set.class.getName()}, + {"g25", Set.class.getName()}, + {"f26", MyEnum.class.getName()}, + {"g26", MyEnum.class.getName()}, + {"f27", MyEnum[].class.getName()}, + {"g27", MyEnum[].class.getName()}, + {"f28", Set.class.getName()}, + {"g28", Set.class.getName()}, + {"f31", "java.util.Date"}, + {"f32", "java.lang.Boolean"}, + {"f33", "java.lang.Character"}, + {"f34", "java.lang.Byte"}, + {"f35", "java.lang.Short"}, + {"f36", "java.lang.Integer"}, + {"f37", "java.lang.Long"}, + {"f38", "java.lang.Float"}, + {"f39", "java.lang.Double"}, + {"f40", CompositeKey.class.getName()}, + {"f41", MyEnumCSM.class.getName()}, + {"g41", MyEnumCSM.class.getName()}, + {"f42", MyEnumCSM[].class.getName()}, + {"g42", MyEnumCSM[].class.getName()}, + {"f43", Set.class.getName()}, + {"g43", Set.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + checkSecKey(obj, "f0", obj.f0, Boolean.class); + checkSecKey(obj, "f1", obj.f1, Character.class); + checkSecKey(obj, "f2", obj.f2, Byte.class); + checkSecKey(obj, "f3", obj.f3, Short.class); + checkSecKey(obj, "f4", obj.f4, Integer.class); + checkSecKey(obj, "f5", obj.f5, Long.class); + checkSecKey(obj, "f6", obj.f6, Float.class); + checkSecKey(obj, "f7", obj.f7, Double.class); + checkSecKey(obj, "f8", obj.f8, String.class); + checkSecKey(obj, "f9", obj.f9, BigInteger.class); + checkSecKey(obj, "f10", obj.f10, BigDecimal.class); + checkSecKey(obj, "f11", obj.f11, Date.class); + checkSecKey(obj, "f12", obj.f12, Boolean.class); + checkSecKey(obj, "f13", obj.f13, Character.class); + checkSecKey(obj, "f14", obj.f14, Byte.class); + checkSecKey(obj, "f15", obj.f15, Short.class); + checkSecKey(obj, "f16", obj.f16, Integer.class); + checkSecKey(obj, "f17", obj.f17, Long.class); + checkSecKey(obj, "f18", obj.f18, Float.class); + checkSecKey(obj, "f19", obj.f19, Double.class); + checkSecKey(obj, "f20", obj.f20, CompositeKey.class); + checkSecKey(obj, "f26", obj.f26, MyEnum.class); + checkSecKey(obj, "f41", obj.f41, MyEnumCSM.class); + + checkSecMultiKey(obj, "f21", toSet(obj.f21), Integer.class); + checkSecMultiKey(obj, "f22", toSet(obj.f22), Integer.class); + checkSecMultiKey(obj, "f23", toSet(obj.f23), Integer.class); + checkSecMultiKey(obj, "f24", toSet(obj.f24), CompositeKey.class); + checkSecMultiKey(obj, "f25", toSet(obj.f25), CompositeKey.class); + checkSecMultiKey(obj, "f27", toSet(obj.f27), MyEnum.class); + checkSecMultiKey(obj, "f28", toSet(obj.f28), MyEnum.class); + checkSecMultiKey(obj, "f42", toSet(obj.f42), MyEnumCSM.class); + checkSecMultiKey(obj, "f43", toSet(obj.f43), MyEnumCSM.class); + + nullifySecKey(obj, "f8", obj.f8, String.class); + nullifySecKey(obj, "f9", obj.f9, BigInteger.class); + nullifySecKey(obj, "f10", obj.f10, BigDecimal.class); + nullifySecKey(obj, "f11", obj.f11, Date.class); + nullifySecKey(obj, "f12", obj.f12, Boolean.class); + nullifySecKey(obj, "f13", obj.f13, Character.class); + nullifySecKey(obj, "f14", obj.f14, Byte.class); + nullifySecKey(obj, "f15", obj.f15, Short.class); + nullifySecKey(obj, "f16", obj.f16, Integer.class); + nullifySecKey(obj, "f17", obj.f17, Long.class); + nullifySecKey(obj, "f18", obj.f18, Float.class); + nullifySecKey(obj, "f19", obj.f19, Double.class); + nullifySecKey(obj, "f20", obj.f20, CompositeKey.class); + nullifySecKey(obj, "f26", obj.f26, MyEnum.class); + nullifySecKey(obj, "f41", obj.f41, MyEnumCSM.class); + + nullifySecMultiKey(obj, "f21", obj.f21, Integer.class); + nullifySecMultiKey(obj, "f22", obj.f22, Integer.class); + nullifySecMultiKey(obj, "f23", obj.f23, Integer.class); + nullifySecMultiKey(obj, "f24", obj.f24, CompositeKey.class); + nullifySecMultiKey(obj, "f25", obj.f25, CompositeKey.class); + nullifySecMultiKey(obj, "f27", obj.f27, MyEnum.class); + nullifySecMultiKey(obj, "f28", obj.f28, MyEnum.class); + nullifySecMultiKey(obj, "f42", obj.f42, MyEnumCSM.class); + nullifySecMultiKey(obj, "f43", obj.f43, MyEnumCSM.class); + + nullifySecKey(obj, "f31", obj.f31, Date.class); + nullifySecKey(obj, "f32", obj.f32, Boolean.class); + nullifySecKey(obj, "f33", obj.f33, Character.class); + nullifySecKey(obj, "f34", obj.f34, Byte.class); + nullifySecKey(obj, "f35", obj.f35, Short.class); + nullifySecKey(obj, "f36", obj.f36, Integer.class); + nullifySecKey(obj, "f37", obj.f37, Long.class); + nullifySecKey(obj, "f38", obj.f38, Float.class); + nullifySecKey(obj, "f39", obj.f39, Double.class); + nullifySecKey(obj, "f40", obj.f40, CompositeKey.class); + + close(); + } + + static Set toSet(int[] a) { + Set set = new HashSet(); + for (int i : a) { + set.add(i); + } + return set; + } + + static Set toSet(Object[] a) { + return new HashSet(Arrays.asList(a)); + } + + static Set toSet(Set s) { + return s; + } + + @Entity + static class SecKeys implements MyEntity { + + @PrimaryKey + long id; + + @SecondaryKey(relate=MANY_TO_ONE) + private final boolean f0 = false; + private final boolean g0 = false; + + @SecondaryKey(relate=MANY_TO_ONE) + private final char f1 = '1'; + private final char g1 = '1'; + + @SecondaryKey(relate=MANY_TO_ONE) + private final byte f2 = 2; + private final byte g2 = 2; + + @SecondaryKey(relate=MANY_TO_ONE) + private final short f3 = 3; + private final short g3 = 3; + + @SecondaryKey(relate=MANY_TO_ONE) + private final int f4 = 4; + private final int g4 = 4; + + @SecondaryKey(relate=MANY_TO_ONE) + private final long f5 = 5; + private final long g5 = 5; + + @SecondaryKey(relate=MANY_TO_ONE) + private final float f6 = 6.6f; + private final float g6 = 6.6f; + + @SecondaryKey(relate=MANY_TO_ONE) + private final double f7 = 7.7; + private final double g7 = 7.7; + + @SecondaryKey(relate=MANY_TO_ONE) + private final String f8 = "8"; + private final String g8 = "8"; + + @SecondaryKey(relate=MANY_TO_ONE) + private BigInteger f9; + private BigInteger g9; + + @SecondaryKey(relate=MANY_TO_ONE) + private BigDecimal f10; + private BigDecimal g10; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Date f11 = new Date(11); + private final Date g11 = new Date(11); + + @SecondaryKey(relate=MANY_TO_ONE) + private final Boolean f12 = true; + private final Boolean g12 = true; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Character f13 = '3'; + private final Character g13 = '3'; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Byte f14 = 14; + private final Byte g14 = 14; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Short f15 = 15; + private final Short g15 = 15; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Integer f16 = 16; + private final Integer g16 = 16; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Long f17= 17L; + private final Long g17= 17L; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Float f18 = 18.18f; + private final Float g18 = 18.18f; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Double f19 = 19.19; + private final Double g19 = 19.19; + + @SecondaryKey(relate=MANY_TO_ONE) + private final CompositeKey f20 = + new CompositeKey(20, 20L, "20", BigInteger.valueOf(20), + MyEnum.ONE, MyEnumCSM.A, + BigDecimal.valueOf(123.123)); + private final CompositeKey g20 = + new CompositeKey(20, 20L, "20", BigInteger.valueOf(20), + MyEnum.TWO, MyEnumCSM.B, + BigDecimal.valueOf(123.123)); + + private static int[] arrayOfInt = { 100, 101, 102 }; + + private static Integer[] arrayOfInteger = { 100, 101, 102 }; + + private static CompositeKey[] arrayOfCompositeKey = { + new CompositeKey(100, 100L, "100", BigInteger.valueOf(100), + MyEnum.ONE, MyEnumCSM.A, + BigDecimal.valueOf(123.123)), + new CompositeKey(101, 101L, "101", BigInteger.valueOf(101), + MyEnum.TWO, MyEnumCSM.B, + BigDecimal.valueOf(123.123)), + new CompositeKey(102, 102L, "102", BigInteger.valueOf(102), + MyEnum.TWO, MyEnumCSM.B, + BigDecimal.valueOf(123.123)), + }; + + private static MyEnum[] arrayOfEnum = + new MyEnum[] { MyEnum.ONE, MyEnum.TWO }; + + private static MyEnumCSM[] arrayOfEnumCSM = + new MyEnumCSM[] { MyEnumCSM.A, MyEnumCSM.B }; + + @SecondaryKey(relate=ONE_TO_MANY) + private final int[] f21 = arrayOfInt; + private final int[] g21 = f21; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Integer[] f22 = arrayOfInteger; + private final Integer[] g22 = f22; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set f23 = toSet(arrayOfInteger); + private final Set g23 = f23; + + @SecondaryKey(relate=ONE_TO_MANY) + private final CompositeKey[] f24 = arrayOfCompositeKey; + private final CompositeKey[] g24 = f24; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set f25 = toSet(arrayOfCompositeKey); + private final Set g25 = f25; + + @SecondaryKey(relate=MANY_TO_ONE) + private final MyEnum f26 = MyEnum.TWO; + private final MyEnum g26 = f26; + + @SecondaryKey(relate=ONE_TO_MANY) + private final MyEnum[] f27 = arrayOfEnum; + private final MyEnum[] g27 = f27; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set f28 = toSet(arrayOfEnum); + private final Set g28 = f28; + + @SecondaryKey(relate=MANY_TO_ONE) + private final MyEnumCSM f41 = MyEnumCSM.B; + private final MyEnumCSM g41 = f41; + + @SecondaryKey(relate=ONE_TO_MANY) + private final MyEnumCSM[] f42 = arrayOfEnumCSM; + private final MyEnumCSM[] g42 = f42; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set f43 = toSet(arrayOfEnumCSM); + private final Set g43 = f43; + + /* Repeated key values to test shared references. */ + + @SecondaryKey(relate=MANY_TO_ONE) + private final Date f31 = f11; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Boolean f32 = f12; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Character f33 = f13; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Byte f34 = f14; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Short f35 = f15; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Integer f36 = f16; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Long f37= f17; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Float f38 = f18; + + @SecondaryKey(relate=MANY_TO_ONE) + private final Double f39 = f19; + + @SecondaryKey(relate=MANY_TO_ONE) + private final CompositeKey f40 = f20; + + public Object getPriKeyObject() { + return id; + } + + public void validate(Object other) { + SecKeys o = (SecKeys) other; + TestCase.assertEquals(id, o.id); + + TestCase.assertEquals(f0, o.f0); + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + TestCase.assertEquals(f3, o.f3); + TestCase.assertEquals(f4, o.f4); + TestCase.assertEquals(f5, o.f5); + TestCase.assertEquals(f6, o.f6); + TestCase.assertEquals(f7, o.f7); + TestCase.assertEquals(f8, o.f8); + TestCase.assertEquals(f9, o.f9); + TestCase.assertEquals(f10, o.f10); + TestCase.assertEquals(f11, o.f11); + TestCase.assertEquals(f12, o.f12); + TestCase.assertEquals(f13, o.f13); + TestCase.assertEquals(f14, o.f14); + TestCase.assertEquals(f15, o.f15); + TestCase.assertEquals(f16, o.f16); + TestCase.assertEquals(f17, o.f17); + TestCase.assertEquals(f18, o.f18); + TestCase.assertEquals(f19, o.f19); + TestCase.assertEquals(f20, o.f20); + TestCase.assertTrue(Arrays.equals(f21, o.f21)); + TestCase.assertTrue(Arrays.equals(f22, o.f22)); + TestCase.assertEquals(f23, o.f23); + TestCase.assertTrue(Arrays.equals(f24, o.f24)); + TestCase.assertEquals(f25, o.f25); + TestCase.assertEquals(f26, o.f26); + TestCase.assertTrue(Arrays.equals(f27, o.f27)); + TestCase.assertEquals(f28, o.f28); + TestCase.assertEquals(f41, o.f41); + TestCase.assertTrue(Arrays.equals(f42, o.f42)); + TestCase.assertEquals(f43, o.f43); + + TestCase.assertEquals(g0, o.g0); + TestCase.assertEquals(g1, o.g1); + TestCase.assertEquals(g2, o.g2); + TestCase.assertEquals(g3, o.g3); + TestCase.assertEquals(g4, o.g4); + TestCase.assertEquals(g5, o.g5); + TestCase.assertEquals(g6, o.g6); + TestCase.assertEquals(g7, o.g7); + TestCase.assertEquals(g8, o.g8); + TestCase.assertEquals(g9, o.g9); + TestCase.assertEquals(g10, o.g10); + TestCase.assertEquals(g11, o.g11); + TestCase.assertEquals(g12, o.g12); + TestCase.assertEquals(g13, o.g13); + TestCase.assertEquals(g14, o.g14); + TestCase.assertEquals(g15, o.g15); + TestCase.assertEquals(g16, o.g16); + TestCase.assertEquals(g17, o.g17); + TestCase.assertEquals(g18, o.g18); + TestCase.assertEquals(g19, o.g19); + TestCase.assertEquals(g20, o.g20); + TestCase.assertTrue(Arrays.equals(g21, o.g21)); + TestCase.assertTrue(Arrays.equals(g22, o.g22)); + TestCase.assertEquals(g23, o.g23); + TestCase.assertTrue(Arrays.equals(g24, o.g24)); + TestCase.assertEquals(g25, o.g25); + TestCase.assertEquals(g26, o.g26); + TestCase.assertTrue(Arrays.equals(g27, o.g27)); + TestCase.assertEquals(g28, o.g28); + TestCase.assertEquals(g41, o.g41); + TestCase.assertTrue(Arrays.equals(g42, o.g42)); + TestCase.assertEquals(g43, o.g43); + + TestCase.assertEquals(f31, o.f31); + TestCase.assertEquals(f32, o.f32); + TestCase.assertEquals(f33, o.f33); + TestCase.assertEquals(f34, o.f34); + TestCase.assertEquals(f35, o.f35); + TestCase.assertEquals(f36, o.f36); + TestCase.assertEquals(f37, o.f37); + TestCase.assertEquals(f38, o.f38); + TestCase.assertEquals(f39, o.f39); + TestCase.assertEquals(f40, o.f40); + + checkSameIfNonNull(o.f31, o.f11); + checkSameIfNonNull(o.f32, o.f12); + checkSameIfNonNull(o.f33, o.f13); + checkSameIfNonNull(o.f34, o.f14); + checkSameIfNonNull(o.f35, o.f15); + checkSameIfNonNull(o.f36, o.f16); + checkSameIfNonNull(o.f37, o.f17); + checkSameIfNonNull(o.f38, o.f18); + checkSameIfNonNull(o.f39, o.f19); + checkSameIfNonNull(o.f40, o.f20); + } + } + + @Test + public void testSecKeyRefToPriKey() + throws FileNotFoundException, DatabaseException { + + open(); + + SecKeyRefToPriKey obj = new SecKeyRefToPriKey(); + checkEntity(SecKeyRefToPriKey.class, obj); + + checkMetadata(SecKeyRefToPriKey.class.getName(), new String[][] { + {"priKey", "java.lang.String"}, + {"secKey1", "java.lang.String"}, + {"secKey2", String[].class.getName()}, + {"secKey3", Set.class.getName()}, + }, + 0 /*priKeyIndex*/, null); + + checkSecKey(obj, "secKey1", obj.secKey1, String.class); + checkSecMultiKey(obj, "secKey2", toSet(obj.secKey2), String.class); + checkSecMultiKey(obj, "secKey3", toSet(obj.secKey3), String.class); + + close(); + } + + @Entity + static class SecKeyRefToPriKey implements MyEntity { + + @PrimaryKey + private final String priKey; + + @SecondaryKey(relate=ONE_TO_ONE) + private final String secKey1; + + @SecondaryKey(relate=ONE_TO_MANY) + private final String[] secKey2; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set secKey3 = new HashSet(); + + private SecKeyRefToPriKey() { + priKey = "sharedValue"; + secKey1 = priKey; + secKey2 = new String[] { priKey }; + secKey3.add(priKey); + } + + public Object getPriKeyObject() { + return priKey; + } + + public void validate(Object other) { + SecKeyRefToPriKey o = (SecKeyRefToPriKey) other; + TestCase.assertEquals(priKey, o.priKey); + TestCase.assertNotNull(o.secKey1); + TestCase.assertEquals(1, o.secKey2.length); + TestCase.assertEquals(1, o.secKey3.size()); + } + } + + @Test + public void testSecKeyInSuperclass() + throws FileNotFoundException, DatabaseException { + + open(); + + SecKeyInSuperclassEntity obj = new SecKeyInSuperclassEntity(); + checkEntity(SecKeyInSuperclassEntity.class, obj); + + checkMetadata(SecKeyInSuperclass.class.getName(), + new String[][] { + {"priKey", "java.lang.String"}, + {"secKey1", String.class.getName()}, + }, + 0/*priKeyIndex*/, null); + + checkMetadata(SecKeyInSuperclassEntity.class.getName(), + new String[][] { + {"secKey2", "java.lang.String"}, + }, + -1 /*priKeyIndex*/, SecKeyInSuperclass.class.getName()); + + checkSecKey + (obj, SecKeyInSuperclassEntity.class, "secKey1", obj.secKey1, + String.class); + checkSecKey + (obj, SecKeyInSuperclassEntity.class, "secKey2", obj.secKey2, + String.class); + + close(); + } + + @Persistent + static class SecKeyInSuperclass implements MyEntity { + + @PrimaryKey + String priKey = "1"; + + @SecondaryKey(relate=ONE_TO_ONE) + String secKey1 = "1"; + + public Object getPriKeyObject() { + return priKey; + } + + public void validate(Object other) { + SecKeyInSuperclass o = (SecKeyInSuperclass) other; + TestCase.assertEquals(secKey1, o.secKey1); + } + } + + @Entity + static class SecKeyInSuperclassEntity extends SecKeyInSuperclass { + + @SecondaryKey(relate=ONE_TO_ONE) + String secKey2 = "2"; + + @Override + public void validate(Object other) { + super.validate(other); + SecKeyInSuperclassEntity o = (SecKeyInSuperclassEntity) other; + TestCase.assertEquals(priKey, o.priKey); + TestCase.assertEquals(secKey2, o.secKey2); + } + } + + @Test + public void testSecKeyInSubclass() + throws FileNotFoundException, DatabaseException { + + open(); + + SecKeyInSubclass obj = new SecKeyInSubclass(); + checkEntity(SecKeyInSubclassEntity.class, obj); + + checkMetadata(SecKeyInSubclassEntity.class.getName(), new String[][] { + {"priKey", "java.lang.String"}, + {"secKey1", "java.lang.String"}, + }, + 0 /*priKeyIndex*/, null); + + checkMetadata(SecKeyInSubclass.class.getName(), new String[][] { + {"secKey2", String.class.getName()}, + }, + -1 /*priKeyIndex*/, + SecKeyInSubclassEntity.class.getName()); + + checkSecKey + (obj, SecKeyInSubclassEntity.class, "secKey1", obj.secKey1, + String.class); + checkSecKey + (obj, SecKeyInSubclassEntity.class, "secKey2", obj.secKey2, + String.class); + + close(); + } + + @Entity + static class SecKeyInSubclassEntity implements MyEntity { + + @PrimaryKey + String priKey = "1"; + + @SecondaryKey(relate=ONE_TO_ONE) + String secKey1; + + public Object getPriKeyObject() { + return priKey; + } + + public void validate(Object other) { + SecKeyInSubclassEntity o = (SecKeyInSubclassEntity) other; + TestCase.assertEquals(priKey, o.priKey); + TestCase.assertEquals(secKey1, o.secKey1); + } + } + + @Persistent + static class SecKeyInSubclass extends SecKeyInSubclassEntity { + + @SecondaryKey(relate=ONE_TO_ONE) + String secKey2 = "2"; + + @Override + public void validate(Object other) { + super.validate(other); + SecKeyInSubclass o = (SecKeyInSubclass) other; + TestCase.assertEquals(secKey2, o.secKey2); + } + } + + private static void checkSameIfNonNull(Object o1, Object o2) { + if (o1 != null && o2 != null) { + assertSame(o1, o2); + } + } + + private void checkEntity(Class entityCls, MyEntity entity) { + Object priKey = entity.getPriKeyObject(); + Class keyCls = priKey.getClass(); + DatabaseEntry keyEntry2 = new DatabaseEntry(); + DatabaseEntry dataEntry2 = new DatabaseEntry(); + + /* Write object, read it back and validate (compare) it. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), false); + entityBinding.objectToData(entity, dataEntry); + entityBinding.objectToKey(entity, keyEntry); + Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry); + entity.validate(entity2); + + /* Read back the primary key and validate it. */ + PersistKeyBinding keyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), false); + Object priKey2 = keyBinding.entryToObject(keyEntry); + assertEquals(priKey, priKey2); + keyBinding.objectToEntry(priKey2, keyEntry2); + assertEquals(keyEntry, keyEntry2); + + /* Check raw entity binding. */ + PersistEntityBinding rawEntityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), true); + RawObject rawEntity = + (RawObject) rawEntityBinding.entryToObject(keyEntry, dataEntry); + rawEntityBinding.objectToKey(rawEntity, keyEntry2); + rawEntityBinding.objectToData(rawEntity, dataEntry2); + entity2 = entityBinding.entryToObject(keyEntry2, dataEntry2); + entity.validate(entity2); + RawObject rawEntity2 = + (RawObject) rawEntityBinding.entryToObject(keyEntry2, dataEntry2); + assertEquals(rawEntity, rawEntity2); + assertEquals(dataEntry, dataEntry2); + assertEquals(keyEntry, keyEntry2); + + /* Check that raw entity can be converted to a regular entity. */ + try { + entity2 = catalog.convertRawObject(rawEntity, null); + } catch (RefreshException e) { + fail(e.toString()); + } + entity.validate(entity2); + + /* Check raw key binding. */ + PersistKeyBinding rawKeyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), true); + Object rawKey = rawKeyBinding.entryToObject(keyEntry); + rawKeyBinding.objectToEntry(rawKey, keyEntry2); + priKey2 = keyBinding.entryToObject(keyEntry2); + assertEquals(priKey, priKey2); + assertEquals(keyEntry, keyEntry2); + } + + private void checkSecKey(MyEntity entity, + String keyName, + Object keyValue, + Class keyCls) + throws DatabaseException { + + checkSecKey(entity, entity.getClass(), keyName, keyValue, keyCls); + } + + private void checkSecKey(MyEntity entity, + Class entityCls, + String keyName, + Object keyValue, + Class keyCls) + throws DatabaseException { + + /* Get entity metadata. */ + EntityMetadata entityMeta = + model.getEntityMetadata(entityCls.getName()); + assertNotNull(entityMeta); + + /* Get secondary key metadata. */ + SecondaryKeyMetadata secKeyMeta = + entityMeta.getSecondaryKeys().get(keyName); + assertNotNull(secKeyMeta); + + /* Create key creator/nullifier. */ + SecondaryKeyCreator keyCreator = new PersistKeyCreator + (catalog, entityMeta, keyCls.getName(), secKeyMeta, + false /*rawAcess*/); + + /* Convert entity to bytes. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), false); + entityBinding.objectToData(entity, dataEntry); + entityBinding.objectToKey(entity, keyEntry); + + /* Extract secondary key bytes from entity bytes. */ + DatabaseEntry secKeyEntry = new DatabaseEntry(); + boolean isKeyPresent = keyCreator.createSecondaryKey + (null, keyEntry, dataEntry, secKeyEntry); + assertEquals(keyValue != null, isKeyPresent); + + /* Convert secondary key bytes back to an object. */ + PersistKeyBinding keyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), false); + if (isKeyPresent) { + Object keyValue2 = keyBinding.entryToObject(secKeyEntry); + assertEquals(keyValue, keyValue2); + DatabaseEntry secKeyEntry2 = new DatabaseEntry(); + keyBinding.objectToEntry(keyValue2, secKeyEntry2); + assertEquals(secKeyEntry, secKeyEntry2); + } + } + + private void checkSecMultiKey(MyEntity entity, + String keyName, + Set keyValues, + Class keyCls) + throws DatabaseException { + + /* Get entity metadata. */ + Class entityCls = entity.getClass(); + EntityMetadata entityMeta = + model.getEntityMetadata(entityCls.getName()); + assertNotNull(entityMeta); + + /* Get secondary key metadata. */ + SecondaryKeyMetadata secKeyMeta = + entityMeta.getSecondaryKeys().get(keyName); + assertNotNull(secKeyMeta); + + /* Create key creator/nullifier. */ + SecondaryMultiKeyCreator keyCreator = new PersistKeyCreator + (catalog, entityMeta, keyCls.getName(), secKeyMeta, + false /*rawAcess*/); + + /* Convert entity to bytes. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), false); + entityBinding.objectToData(entity, dataEntry); + entityBinding.objectToKey(entity, keyEntry); + + /* Extract secondary key bytes from entity bytes. */ + Set results = new HashSet(); + keyCreator.createSecondaryKeys + (null, keyEntry, dataEntry, results); + assertEquals(keyValues.size(), results.size()); + + /* Convert secondary key bytes back to objects. */ + PersistKeyBinding keyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), false); + Set keyValues2 = new HashSet(); + for (DatabaseEntry secKeyEntry : results) { + Object keyValue2 = keyBinding.entryToObject(secKeyEntry); + keyValues2.add(keyValue2); + } + assertEquals(keyValues, keyValues2); + } + + private void nullifySecKey(MyEntity entity, + String keyName, + Object keyValue, + Class keyCls) + throws DatabaseException { + + /* Get entity metadata. */ + Class entityCls = entity.getClass(); + EntityMetadata entityMeta = + model.getEntityMetadata(entityCls.getName()); + assertNotNull(entityMeta); + + /* Get secondary key metadata. */ + SecondaryKeyMetadata secKeyMeta = + entityMeta.getSecondaryKeys().get(keyName); + assertNotNull(secKeyMeta); + + /* Create key creator/nullifier. */ + ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator + (catalog, entityMeta, keyCls.getName(), secKeyMeta, + false /*rawAcess*/); + + /* Convert entity to bytes. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), false); + entityBinding.objectToData(entity, dataEntry); + entityBinding.objectToKey(entity, keyEntry); + + /* Convert secondary key to bytes. */ + PersistKeyBinding keyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), false); + DatabaseEntry secKeyEntry = new DatabaseEntry(); + if (keyValue != null) { + keyBinding.objectToEntry(keyValue, secKeyEntry); + } + + /* Nullify secondary key bytes within entity bytes. */ + boolean isKeyPresent = keyNullifier.nullifyForeignKey + (null, keyEntry, dataEntry, secKeyEntry); + assertEquals(keyValue != null, isKeyPresent); + + /* Convert modified entity bytes back to an entity. */ + Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry); + setFieldToNull(entity, keyName); + entity.validate(entity2); + + /* Do a full check after nullifying it. */ + checkSecKey(entity, keyName, null, keyCls); + } + + private void nullifySecMultiKey(MyEntity entity, + String keyName, + Object keyValue, + Class keyCls) + throws DatabaseException { + + /* Get entity metadata. */ + Class entityCls = entity.getClass(); + EntityMetadata entityMeta = + model.getEntityMetadata(entityCls.getName()); + assertNotNull(entityMeta); + + /* Get secondary key metadata. */ + SecondaryKeyMetadata secKeyMeta = + entityMeta.getSecondaryKeys().get(keyName); + assertNotNull(secKeyMeta); + + /* Create key creator/nullifier. */ + ForeignMultiKeyNullifier keyNullifier = new PersistKeyCreator + (catalog, entityMeta, keyCls.getName(), secKeyMeta, + false /*rawAcess*/); + + /* Convert entity to bytes. */ + PersistEntityBinding entityBinding = + new PersistEntityBinding(catalog, entityCls.getName(), false); + entityBinding.objectToData(entity, dataEntry); + entityBinding.objectToKey(entity, keyEntry); + + /* Get secondary key binding. */ + PersistKeyBinding keyBinding = + new PersistKeyBinding(catalog, keyCls.getName(), false); + DatabaseEntry secKeyEntry = new DatabaseEntry(); + + /* Nullify one key value at a time until all of them are gone. */ + while (true) { + Object fieldObj = getField(entity, keyName); + fieldObj = nullifyFirstElement(fieldObj, keyBinding, secKeyEntry); + if (fieldObj == null) { + break; + } + setField(entity, keyName, fieldObj); + + /* Nullify secondary key bytes within entity bytes. */ + boolean isKeyPresent = keyNullifier.nullifyForeignKey + (null, keyEntry, dataEntry, secKeyEntry); + assertEquals(keyValue != null, isKeyPresent); + + /* Convert modified entity bytes back to an entity. */ + Object entity2 = entityBinding.entryToObject(keyEntry, dataEntry); + entity.validate(entity2); + + /* Do a full check after nullifying it. */ + Set keyValues; + if (fieldObj instanceof Set) { + keyValues = (Set) fieldObj; + } else if (fieldObj instanceof Object[]) { + keyValues = toSet((Object[]) fieldObj); + } else if (fieldObj instanceof int[]) { + keyValues = toSet((int[]) fieldObj); + } else { + throw new IllegalStateException(fieldObj.getClass().getName()); + } + checkSecMultiKey(entity, keyName, keyValues, keyCls); + } + } + + /** + * Nullifies the first element of an array or collection object by removing + * it from the array or collection. Returns the resulting array or + * collection. Also outputs the removed element to the keyEntry using the + * keyBinding. + */ + private Object nullifyFirstElement(Object obj, + EntryBinding keyBinding, + DatabaseEntry keyEntry) { + if (obj instanceof Collection) { + Iterator i = ((Collection) obj).iterator(); + if (i.hasNext()) { + Object elem = i.next(); + i.remove(); + keyBinding.objectToEntry(elem, keyEntry); + return obj; + } else { + return null; + } + } else if (obj instanceof Object[]) { + Object[] a1 = (Object[]) obj; + if (a1.length > 0) { + Object[] a2 = (Object[]) Array.newInstance + (obj.getClass().getComponentType(), a1.length - 1); + System.arraycopy(a1, 1, a2, 0, a2.length); + keyBinding.objectToEntry(a1[0], keyEntry); + return a2; + } else { + return null; + } + } else if (obj instanceof int[]) { + int[] a1 = (int[]) obj; + if (a1.length > 0) { + int[] a2 = new int[a1.length - 1]; + System.arraycopy(a1, 1, a2, 0, a2.length); + keyBinding.objectToEntry(a1[0], keyEntry); + return a2; + } else { + return null; + } + } else { + throw new IllegalStateException(obj.getClass().getName()); + } + } + + private void checkMetadata(String clsName, + String[][] nameTypePairs, + int priKeyIndex, + String superClsName) + throws DatabaseException { + + /* Check metadata/types against the live model. */ + checkMetadata + (catalog, model, clsName, nameTypePairs, priKeyIndex, + superClsName); + + /* + * Open a catalog that uses the stored model. + */ + PersistCatalog storedCatalog = null; + storedCatalog = new PersistCatalog + (env, STORE_PREFIX, STORE_PREFIX + "catalog", new DatabaseConfig(), + null, null, false /*useCurrentModel*/, null /*Store*/); + EntityModel storedModel = storedCatalog.getResolvedModel(); + + /* Check metadata/types against the stored catalog/model. */ + checkMetadata + (storedCatalog, storedModel, clsName, nameTypePairs, priKeyIndex, + superClsName); + + storedCatalog.close(); + } + + private void checkMetadata(PersistCatalog checkCatalog, + EntityModel checkModel, + String clsName, + String[][] nameTypePairs, + int priKeyIndex, + String superClsName) { + ClassMetadata classMeta = checkModel.getClassMetadata(clsName); + assertNotNull(clsName, classMeta); + + PrimaryKeyMetadata priKeyMeta = classMeta.getPrimaryKey(); + if (priKeyIndex >= 0) { + assertNotNull(priKeyMeta); + String fieldName = nameTypePairs[priKeyIndex][0]; + String fieldType = nameTypePairs[priKeyIndex][1]; + assertEquals(priKeyMeta.getName(), fieldName); + assertEquals(priKeyMeta.getClassName(), fieldType); + assertEquals(priKeyMeta.getDeclaringClassName(), clsName); + assertNull(priKeyMeta.getSequenceName()); + } else { + assertNull(priKeyMeta); + } + + RawType type = checkCatalog.getFormat(clsName); + assertNotNull(type); + assertEquals(clsName, type.getClassName()); + assertEquals(0, type.getVersion()); + assertTrue(!type.isSimple()); + assertTrue(!type.isPrimitive()); + assertTrue(!type.isEnum()); + assertNull(type.getEnumConstants()); + assertTrue(!type.isArray()); + assertEquals(0, type.getDimensions()); + assertNull(type.getComponentType()); + RawType superType = type.getSuperType(); + if (superClsName != null) { + assertNotNull(superType); + assertEquals(superClsName, superType.getClassName()); + } else { + assertNull(superType); + } + + Map fields = type.getFields(); + assertNotNull(fields); + + int nFields = nameTypePairs.length; + assertEquals(nFields, fields.size()); + + for (String[] pair : nameTypePairs) { + String fieldName = pair[0]; + String fieldType = pair[1]; + Class fieldCls; + try { + fieldCls = checkCatalog.resolveClass(fieldType); + } catch (ClassNotFoundException e) { + fail(e.toString()); + return; /* For compiler */ + } + RawField field = fields.get(fieldName); + assertNotNull(field); + assertEquals(fieldName, field.getName()); + type = field.getType(); + assertNotNull(type); + int dim = getArrayDimensions(fieldType); + while (dim > 0) { + assertEquals(dim, type.getDimensions()); + assertEquals(dim, getArrayDimensions(fieldType)); + assertEquals(true, type.isArray()); + assertEquals(fieldType, type.getClassName()); + assertEquals(0, type.getVersion()); + assertTrue(!type.isSimple()); + assertTrue(!type.isPrimitive()); + assertTrue(!type.isEnum()); + assertNull(type.getEnumConstants()); + fieldType = getArrayComponent(fieldType, dim); + type = type.getComponentType(); + assertNotNull(fieldType, type); + dim -= 1; + } + assertEquals(fieldType, type.getClassName()); + List enums = getEnumConstants(fieldType); + assertEquals(isSimpleType(fieldType), type.isSimple()); + assertEquals(isPrimitiveType(fieldType), type.isPrimitive()); + assertNull(type.getComponentType()); + assertTrue(!type.isArray()); + assertEquals(0, type.getDimensions()); + if (enums != null) { + assertTrue(type.isEnum()); + assertEquals(enums, type.getEnumConstants()); + assertNull(type.getSuperType()); + } else { + assertTrue(!type.isEnum()); + assertNull(type.getEnumConstants()); + } + } + } + + private List getEnumConstants(String clsName) { + if (isPrimitiveType(clsName)) { + return null; + } + Class cls; + try { + cls = Class.forName(clsName); + } catch (ClassNotFoundException e) { + fail(e.toString()); + return null; /* Never happens. */ + } + if (!cls.isEnum()) { + return null; + } + List enums = new ArrayList(); + Object[] vals = cls.getEnumConstants(); + for (Object val : vals) { + enums.add(val.toString()); + } + return enums; + } + + private String getArrayComponent(String clsName, int dim) { + clsName = clsName.substring(1); + if (dim > 1) { + return clsName; + } + if (clsName.charAt(0) == 'L' && + clsName.charAt(clsName.length() - 1) == ';') { + return clsName.substring(1, clsName.length() - 1); + } + if (clsName.length() != 1) { + fail(); + } + switch (clsName.charAt(0)) { + case 'Z': return "boolean"; + case 'B': return "byte"; + case 'C': return "char"; + case 'D': return "double"; + case 'F': return "float"; + case 'I': return "int"; + case 'J': return "long"; + case 'S': return "short"; + default: fail(); + } + return null; /* Should never happen. */ + } + + private static int getArrayDimensions(String clsName) { + int i = 0; + while (clsName.charAt(i) == '[') { + i += 1; + } + return i; + } + + private static boolean isSimpleType(String clsName) { + return isPrimitiveType(clsName) || + clsName.equals("java.lang.Boolean") || + clsName.equals("java.lang.Character") || + clsName.equals("java.lang.Byte") || + clsName.equals("java.lang.Short") || + clsName.equals("java.lang.Integer") || + clsName.equals("java.lang.Long") || + clsName.equals("java.lang.Float") || + clsName.equals("java.lang.Double") || + clsName.equals("java.lang.String") || + clsName.equals("java.math.BigInteger") || + clsName.equals("java.math.BigDecimal") || + clsName.equals("java.util.Date"); + } + + private static boolean isPrimitiveType(String clsName) { + return clsName.equals("boolean") || + clsName.equals("char") || + clsName.equals("byte") || + clsName.equals("short") || + clsName.equals("int") || + clsName.equals("long") || + clsName.equals("float") || + clsName.equals("double"); + } + + interface MyEntity { + Object getPriKeyObject(); + void validate(Object other); + } + + private static boolean nullOrEqual(Object o1, Object o2) { + return (o1 != null) ? o1.equals(o2) : (o2 == null); + } + + private static String arrayToString(Object[] array) { + StringBuilder buf = new StringBuilder(); + buf.append('['); + for (Object o : array) { + if (o instanceof Object[]) { + buf.append(arrayToString((Object[]) o)); + } else { + buf.append(o); + } + buf.append(','); + } + buf.append(']'); + return buf.toString(); + } + + private void setFieldToNull(Object obj, String fieldName) { + try { + Field field = obj.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(obj, null); + } catch (NoSuchFieldException e) { + fail(e.toString()); + } catch (IllegalAccessException e) { + fail(e.toString()); + } + } + + private void setField(Object obj, String fieldName, Object fieldValue) { + try { + Field field = obj.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(obj, fieldValue); + } catch (NoSuchFieldException e) { + throw new IllegalStateException(e.toString()); + } catch (IllegalAccessException e) { + throw new IllegalStateException(e.toString()); + } + } + + private Object getField(Object obj, String fieldName) { + try { + Field field = obj.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + return field.get(obj); + } catch (NoSuchFieldException e) { + throw new IllegalStateException(e.toString()); + } catch (IllegalAccessException e) { + throw new IllegalStateException(e.toString()); + } + } +} diff --git a/test/com/sleepycat/persist/test/ConvertAndAddTest.java b/test/com/sleepycat/persist/test/ConvertAndAddTest.java new file mode 100644 index 0000000..8ec808f --- /dev/null +++ b/test/com/sleepycat/persist/test/ConvertAndAddTest.java @@ -0,0 +1,182 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Conversion; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Test a bug fix where an IndexOutOfBoundsException occurs when adding a field + * and converting another field, where the latter field is alphabetically + * higher than the former. This is also tested by + * EvolveClasses.FieldAddAndConvert, but that class does not test evolving an + * entity that was created by catalog version 0. [#15797] + * + * A modified version of this program was run manually with JE 3.2.30 to + * produce a log, which is the result of the testSetup() test. The sole log + * file was renamed from 00000000.jdb to ConvertAndAddTest.jdb and added to CVS + * in this directory. When that log file is opened here, the bug is + * reproduced. The modifications to this program for 3.2.30 are: + * + * + X in testSetup + * + X out testConvertAndAddField + * + don't remove log files in tearDown + * + @Entity version is 0 + * + removed field MyEntity.a + * + * This test should be excluded from the BDB build because it uses a stored JE + * log file and it tests a fix for a bug that was never present in BDB. + * + * @author Mark Hayes + */ +public class ConvertAndAddTest extends TestBase { + + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + } + + private EntityStore open(boolean addConverter) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + Mutations mutations = new Mutations(); + mutations.addConverter(new Converter + (MyEntity.class.getName(), 0, "b", new MyConversion())); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setMutations(mutations); + return new EntityStore(env, "foo", storeConfig); + } + + private void close(EntityStore store) + throws DatabaseException { + + store.close(); + env.close(); + env = null; + } + + @Test + public void testConvertAndAddField() + throws DatabaseException, IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "ConvertAndAddTest.jdb", envHome); + + EntityStore store = open(true /*addConverter*/); + + PrimaryIndex index = + store.getPrimaryIndex(Long.class, MyEntity.class); + + MyEntity entity = index.get(1L); + assertNotNull(entity); + assertEquals(123, entity.b); + + close(store); + } + + public void xtestSetup() + throws DatabaseException { + + EntityStore store = open(false /*addConverter*/); + + PrimaryIndex index = + store.getPrimaryIndex(Long.class, MyEntity.class); + + MyEntity entity = new MyEntity(); + entity.key = 1; + entity.b = 123; + index.put(entity); + + close(store); + } + + @Entity(version=1) + static class MyEntity { + + @PrimaryKey + long key; + + int a; // added in version 1 + int b; + + private MyEntity() {} + } + + @SuppressWarnings("serial") + public static class MyConversion implements Conversion { + + public void initialize(EntityModel model) { + } + + public Object convert(Object fromValue) { + return fromValue; + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion; + } + } +} diff --git a/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb b/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb new file mode 100644 index 0000000..1dd55ac Binary files /dev/null and b/test/com/sleepycat/persist/test/ConvertAndAddTest.jdb differ diff --git a/test/com/sleepycat/persist/test/CreateAbstractClassData.java b/test/com/sleepycat/persist/test/CreateAbstractClassData.java new file mode 100644 index 0000000..a499026 --- /dev/null +++ b/test/com/sleepycat/persist/test/CreateAbstractClassData.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import java.io.File; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; + +/* + * Create a database which stores abstract entity classes. This database will be + * used in the unit test c.s.persist.test.AddNewSecKeyToAbstractClassTest. + */ +public class CreateAbstractClassData { + private Environment env; + private EntityStore store; + private PrimaryIndex primary1; + private PrimaryIndex primary2; + + public static void main(String args[]) { + CreateAbstractClassData epc = new CreateAbstractClassData(); + epc.open(); + epc.writeData(); + epc.close(); + } + + private void writeData() { + primary1.put(null, new EntityData1(1)); + primary2.put(null, new EntityData2(1)); + } + + private void close() { + store.close(); + store = null; + + env.close(); + env = null; + } + + private void open() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + File envHome = new File("./"); + env = new Environment(envHome, envConfig); + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + store = new EntityStore(env, "test", config); + primary1 = store.getPrimaryIndex(Long.class, AbstractEntity1.class); + primary2 = store.getPrimaryIndex(Long.class, AbstractEntity2.class); + } + + @Entity + static abstract class AbstractEntity1 { + AbstractEntity1(Long i) { + this.id = i; + } + + private AbstractEntity1(){} + + @PrimaryKey + private Long id; + } + + @Persistent + static class EntityData1 extends AbstractEntity1{ + private int f1; + + private EntityData1(){} + + EntityData1(int i) { + super(Long.valueOf(i)); + this.f1 = i; + } + } + + @Entity + static abstract class AbstractEntity2 { + AbstractEntity2(Long i) { + this.id = i; + } + + private AbstractEntity2(){} + + @PrimaryKey + private Long id; + } + + @Persistent + static class EntityData2 extends AbstractEntity2{ + private int f1; + + private EntityData2(){} + + EntityData2(int i) { + super(Long.valueOf(i)); + this.f1 = i; + } + } +} diff --git a/test/com/sleepycat/persist/test/CreateOldVersionBigDecimalDb.java b/test/com/sleepycat/persist/test/CreateOldVersionBigDecimalDb.java new file mode 100644 index 0000000..4505911 --- /dev/null +++ b/test/com/sleepycat/persist/test/CreateOldVersionBigDecimalDb.java @@ -0,0 +1,122 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import java.io.File; +import java.math.BigDecimal; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; + +/* + * Create an old version (before je-4.1) database, which stores BigDecimal data + * using a proxy class. This database will be used in the unit test + * com.sleepycat.persist.test.ProxyToSimpleTypeTest. + */ +public class CreateOldVersionBigDecimalDb { + + private Environment env; + private EntityStore store; + private PrimaryIndex primary; + + public static void main(String args[]) { + CreateOldVersionBigDecimalDb sbd = new CreateOldVersionBigDecimalDb(); + sbd.open(); + sbd.writeData(); + sbd.close(); + sbd.open(); + sbd.getData(); + sbd.close(); + } + + private void writeData() { + primary.put(null, + new BigDecimalData (1, new BigDecimal("123.1234000"))); + } + + private void getData() { + BigDecimalData data = primary.get(1); + System.out.println(data.getF1()); + } + + private void close() { + store.close(); + store = null; + + env.close(); + env = null; + } + + private void open() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + File envHome = new File("./"); + env = new Environment(envHome, envConfig); + EntityModel model = new AnnotationModel(); + model.registerClass(BigDecimalProxy.class); + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + config.setModel(model); + store = new EntityStore(env, "test", config); + primary = store.getPrimaryIndex(Integer.class, BigDecimalData.class); + } + + @Entity + static class BigDecimalData { + @PrimaryKey + private int id; + private BigDecimal f1; + + BigDecimalData() { } + + BigDecimalData(int id, BigDecimal f1) { + this.id = id; + this.f1 = f1; + } + + int getId() { + return id; + } + + BigDecimal getF1() { + return f1; + } + } + + @Persistent(proxyFor=BigDecimal.class) + static class BigDecimalProxy + implements PersistentProxy { + + private String rep; + private BigDecimalProxy() {} + public BigDecimal convertProxy() { + return new BigDecimal(rep); + } + + public void initializeProxy(BigDecimal o) { + rep = o.toString(); + } + } +} \ No newline at end of file diff --git a/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparator.java b/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparator.java new file mode 100644 index 0000000..d567df7 --- /dev/null +++ b/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparator.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.io.File; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/* + * Create a database without a comparator for secondary duplicates with je-4.0. + * This database will be used in the unit test com.sleepycat.persist.test. + * SecondaryDupOrderTest. + */ +public class CreateSecDupsWithoutComparator { + Environment env; + private EntityStore store; + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private SecondaryIndex secIndex2; + + public static void main(String args[]) { + CreateSecDupsWithoutComparator csd = + new CreateSecDupsWithoutComparator(); + csd.open(); + csd.writeData(); + csd.readData(); + csd.close(); + } + + private void open() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + File envHome = new File("./"); + env = new Environment(envHome, envConfig); + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + store = new EntityStore(env, "test", config); + priIndex = store.getPrimaryIndex(StoredComparatorEntity.Key.class, + StoredComparatorEntity.class); + secIndex = store.getSecondaryIndex + (priIndex, StoredComparatorEntity.MyEnum.class, "secKey"); + secIndex2 = store.getSecondaryIndex + (priIndex, Integer.class, "secKey2"); + } + + private void close() { + store.close(); + store = null; + env.close(); + env = null; + } + + private void writeData() { + final StoredComparatorEntity.Key[] priKeys = + new StoredComparatorEntity.Key[] { + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.C, 0, + StoredComparatorEntity.MyEnum.C), + }; + + final StoredComparatorEntity.MyEnum[] secKeys = + new StoredComparatorEntity.MyEnum[] { + StoredComparatorEntity.MyEnum.C, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.A, + null, + StoredComparatorEntity.MyEnum.A, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.C, + }; + + final Integer[] secKeys2 = new Integer[] { 2, 1, 0, null, 0, 1, 2, }; + final int nEntities = priKeys.length; + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < nEntities; i += 1) { + priIndex.put(txn, new StoredComparatorEntity + (priKeys[i], secKeys[i], secKeys2[i])); + } + txn.commit(); + } + + private void readData() { + Transaction txn = env.beginTransaction(null, null); + EntityCursor entities = + priIndex.entities(txn, null); + System.out.println("Primary database order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + txn = env.beginTransaction(null, null); + entities = secIndex.entities(txn, null); + System.out.println("Secondary database 1 order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + txn = env.beginTransaction(null, null); + entities = secIndex2.entities(txn, null); + System.out.println("Secondary database 2 order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + + } + + @Entity + static class StoredComparatorEntity { + + enum MyEnum { A, B, C }; + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + MyEnum f1; + + @KeyField(2) + Integer f2; + + @KeyField(3) + MyEnum f3; + + private Key() {} + + Key(MyEnum f1, Integer f2, MyEnum f3) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + } + + public int compareTo(Key o) { + /* Reverse the natural order. */ + int i = f1.compareTo(o.f1); + if (i != 0) return -i; + i = f2.compareTo(o.f2); + if (i != 0) return -i; + i = f3.compareTo(o.f3); + if (i != 0) return -i; + return 0; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof Key)) { + return false; + } + Key o = (Key) other; + return f1 == o.f1 && + f2.equals(o.f2) && + f3 == o.f3; + } + + @Override + public int hashCode() { + return f1.ordinal() + f2 + f3.ordinal(); + } + + @Override + public String toString() { + return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']'; + } + } + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + @SecondaryKey(relate=MANY_TO_ONE) + private Integer secKey2; + + private StoredComparatorEntity() {} + + StoredComparatorEntity(Key key, MyEnum secKey, Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.secKey2 = secKey2; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " sec2 = " + + secKey2 + ']'; + } + } +} \ No newline at end of file diff --git a/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparatorEvolve.java b/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparatorEvolve.java new file mode 100644 index 0000000..3e6bf91 --- /dev/null +++ b/test/com/sleepycat/persist/test/CreateSecDupsWithoutComparatorEvolve.java @@ -0,0 +1,277 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.io.File; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/* + * Create a database without a comparator for secondary duplicates with je-4.0. + * This database will be used in the unit test com.sleepycat.persist.test. + * SecondaryDupOrderEvolveTest. + */ +public class CreateSecDupsWithoutComparatorEvolve { + enum MyEnum { A, B, C }; + Environment env; + private EntityStore store; + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private SecondaryIndex secIndex2; + + public static void main(String args[]) { + CreateSecDupsWithoutComparatorEvolve csd = + new CreateSecDupsWithoutComparatorEvolve(); + csd.open(); + csd.writeData(); + csd.readData(); + csd.close(); + } + + private void open() { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + File envHome = new File("./"); + env = new Environment(envHome, envConfig); + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + EntityModel model = new AnnotationModel(); + config.setModel(model); + store = new EntityStore(env, "test", config); + priIndex = store.getPrimaryIndex(Key.class, + StoredComparatorEntity.class); + secIndex = store.getSecondaryIndex + (priIndex, MyEnum.class, "secKey"); + secIndex2 = store.getSecondaryIndex + (priIndex, Integer.class, "secKey2"); + } + + private void close() { + store.close(); + store = null; + env.close(); + env = null; + } + + private void writeData() { + final Key[] priKeys = + new Key[] { + new Key + (MyEnum.A, 1, + MyEnum.A), + new Key + (MyEnum.A, 1, + MyEnum.B), + new Key + (MyEnum.A, 2, + MyEnum.A), + new Key + (MyEnum.A, 2, + MyEnum.B), + new Key + (MyEnum.B, 1, + MyEnum.A), + new Key + (MyEnum.B, 1, + MyEnum.B), + new Key + (MyEnum.C, 0, + MyEnum.C), + }; + + final MyEnum[] secKeys = + new MyEnum[] { + MyEnum.C, + MyEnum.B, + MyEnum.A, + null, + MyEnum.A, + MyEnum.B, + MyEnum.C, + }; + + final Integer[] secKeys2 = new Integer[] { 2, 1, 0, null, 0, 1, 2, }; + final int nEntities = priKeys.length; + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < nEntities; i += 1) { + priIndex.put(txn, new StoredComparatorEntity + (priKeys[i], secKeys[i], secKeys2[i])); + } + txn.commit(); + } + + private void readData() { + Transaction txn = env.beginTransaction(null, null); + StoredComparatorEntity data = + priIndex.get(new Key + (MyEnum.A, 1, + MyEnum.A)); + System.out.println(data); + EntityCursor entities = + priIndex.entities(txn, null); + System.out.println("Primary database order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + txn = env.beginTransaction(null, null); + entities = secIndex.entities(txn, null); + System.out.println("Secondary database 1 order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + txn = env.beginTransaction(null, null); + entities = secIndex2.entities(txn, null); + System.out.println("Secondary database 2 order:"); + for (StoredComparatorEntity e : entities) { + System.out.println(e); + } + entities.close(); + txn.commit(); + + } + + @Entity + static class StoredComparatorEntity + extends StoredComparatorEntity_Base { + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + @SecondaryKey(relate=MANY_TO_ONE) + private Integer secKey2; + + private StoredComparatorEntity() {} + + StoredComparatorEntity(Key key, MyEnum secKey, Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.secKey2 = secKey2; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " sec2 = " + + secKey2 + ']'; + } + + @Override + void checkAllSecondaryDBOrder(EntityStore store, boolean ifReverse1, + boolean ifReverse2) { + } + + @Override + void checkPrimaryDBOrder(EntityStore store) { + } + + @Override + Mutations getMutations() { + return null; + } + } + + @Persistent + static abstract class StoredComparatorEntity_Base { + + abstract Mutations getMutations(); + abstract void checkPrimaryDBOrder(EntityStore store); + abstract void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2); + } + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + MyEnum f1; + + @KeyField(2) + Integer f2; + + @KeyField(3) + MyEnum f3; + + private Key() {} + + Key(MyEnum f1, Integer f2, MyEnum f3) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + } + + public int compareTo(Key o) { + /* Reverse the natural order. */ + int i = f1.compareTo(o.f1); + if (i != 0) return -i; + i = f2.compareTo(o.f2); + if (i != 0) return -i; + i = f3.compareTo(o.f3); + if (i != 0) return -i; + return 0; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof Key)) { + return false; + } + Key o = (Key) other; + return f1 == o.f1 && + f2.equals(o.f2) && + f3 == o.f3; + } + + @Override + public int hashCode() { + return f1.ordinal() + f2 + f3.ordinal(); + } + + @Override + public String toString() { + return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']'; + } + } +} diff --git a/test/com/sleepycat/persist/test/CreateStringDataDB.java b/test/com/sleepycat/persist/test/CreateStringDataDB.java new file mode 100644 index 0000000..cda20e4 --- /dev/null +++ b/test/com/sleepycat/persist/test/CreateStringDataDB.java @@ -0,0 +1,162 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/* + * Create a database which stores StringData classes. This database will be + * used in the unit test c.s.persist.test.StringFormatCompatibilityTest. + */ +public class CreateStringDataDB { + Environment env; + private EntityStore store; + private PrimaryIndex primary; + + public static void main(String args[]) { + CreateStringDataDB csd = new CreateStringDataDB(); + csd.open(); + csd.writeData(); + csd.close(); + } + + private void writeData() { + CompositeKey compK = new CompositeKey("CompKey1_1", "CompKey1_2"); + CompositeKey compK2 = new CompositeKey("CompKey2_1", "CompKey2_2"); + String[] f3 = {"f3_1", "f3_2"}; + List f4 = new ArrayList(); + f4.add("f4_1"); + f4.add("f4_2"); + primary.put + (null, new StringData ("pk1", "sk1", compK, "f1", "f2", f3, f4)); + f4.clear(); + f4.add("f4_1_2"); + f4.add("f4_2_2"); + primary.put + (null, new StringData ("pk2", "sk2", compK2, "f1", "f2", f3, f4)); + } + + private void close() { + store.close(); + store = null; + + env.close(); + env = null; + } + + private void open() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + File envHome = new File("./"); + env = new Environment(envHome, envConfig); + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + store = new EntityStore(env, "test", config); + primary = store.getPrimaryIndex(String.class, StringData.class); + } + + @Entity + static class StringData { + @PrimaryKey + private String pk; + @SecondaryKey (relate = MANY_TO_ONE) + private String sk1; + @SecondaryKey (relate = MANY_TO_ONE) + private CompositeKey sk2; + private String f1; + private String f2; + private String[] f3; + private List f4; + + StringData() { } + + StringData(String pk, String sk1, CompositeKey sk2, String f1, + String f2, String[] f3, List f4) { + this.pk = pk; + this.sk1 = sk1; + this.sk2 = sk2; + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + this.f4 = f4; + } + + String getPK() { + return pk; + } + + String getSK1() { + return sk1; + } + + CompositeKey getSK2() { + return sk2; + } + + String getF1() { + return f1; + } + + String getF2() { + return f2; + } + + String[] getF3() { + return f3; + } + + List getF4() { + return f4; + } + } + + @Persistent + static class CompositeKey { + @KeyField(2) + private String f1; + @KeyField(1) + private String f2; + + private CompositeKey() {} + + CompositeKey(String f1, String f2) { + this.f1 = f1; + this.f2 = f2; + } + + String getF1() { + return f1; + } + + String getF2() { + return f2; + } + } +} diff --git a/test/com/sleepycat/persist/test/DevolutionTest.java b/test/com/sleepycat/persist/test/DevolutionTest.java new file mode 100644 index 0000000..f4686fb --- /dev/null +++ b/test/com/sleepycat/persist/test/DevolutionTest.java @@ -0,0 +1,185 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.io.IOException; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Test a bug fix for an evolution error when a class is evolved and then + * changed back to its original version. Say there are two versions of a + * class A1 and A2 in the catalog, plus a new version A3 of the class. The + * problem occurs when A2 is different than A3 and must be evolved, but A1 + * happens to be identical to A3 and no evolution is needed. In that case, A3 + * was never added to the format list in the catalog (never assigned a format + * ID), but was still used as the "latest version" of A2. This caused all + * kinds of trouble since the class catalog was effectively corrupt. [#16467] + * + * We reproduce this scenario using type Other[], which is represented using + * ArrayObjectFormat internally. By renaming Other to Other2, and then back to + * Other, we create the scenario described above for the array format itself. + * Array formats are only evolved if their component class name has changed + * (see ArrayObjectFormat.evolve). + * + * A modified version of this program was run manually with JE 3.3.71 to + * produce a log, which is the result of the testSetup() test. The sole log + * file was renamed from 00000000.jdb to DevolutionTest.jdb and added to CVS + * in this directory. When that log file is opened here, the bug is + * reproduced. + * + * This test should be excluded from the BDB build because it uses a stored JE + * log file and it tests a fix for a bug that was never present in BDB. + * + * @author Mark Hayes + */ +public class DevolutionTest extends TestBase { + + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (env != null) { + try { + env.close(); + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + } + + private EntityStore open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + /* + * When version 0 of Other is used, no renamer is configured. When + * version 1 is used, a renamer from Other version 0 to Other2 is used. + * For version 2, the current version, a renamer from Other2 version 1 + * to Other is used. + */ + String clsName = getClass().getName() + "$Other"; + Renamer renamer = new Renamer(clsName + '2', 1, clsName); + Mutations mutations = new Mutations(); + mutations.addRenamer(renamer); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setMutations(mutations); + return new EntityStore(env, "foo", storeConfig); + } + + private void close(EntityStore store) + throws DatabaseException { + + store.close(); + env.close(); + env = null; + } + + @Test + public void testDevolution() + throws DatabaseException, IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "DevolutionTest.jdb", envHome); + + EntityStore store = open(); + + PrimaryIndex index = + store.getPrimaryIndex(Long.class, MyEntity.class); + + MyEntity entity = index.get(1L); + assertNotNull(entity); + assertEquals(123, entity.b); + + close(store); + } + + public void xtestSetup() + throws DatabaseException { + + EntityStore store = open(); + + PrimaryIndex index = + store.getPrimaryIndex(Long.class, MyEntity.class); + + MyEntity entity = new MyEntity(); + entity.key = 1L; + entity.b = 123; + index.put(entity); + + close(store); + } + + /** + * This class name is changed from Other to Other2 in version 1 and back to + * Other in the version 2. testSetup is executed for versions 0 and 1, + * which evolves the format. testDevolution is run with version 2. + */ + @Persistent(version=2) + static class Other { + } + + @Entity(version=0) + static class MyEntity { + + @PrimaryKey + long key; + + Other[] a; + + int b; + + private MyEntity() {} + } +} diff --git a/test/com/sleepycat/persist/test/DevolutionTest.jdb b/test/com/sleepycat/persist/test/DevolutionTest.jdb new file mode 100644 index 0000000..8f7456f Binary files /dev/null and b/test/com/sleepycat/persist/test/DevolutionTest.jdb differ diff --git a/test/com/sleepycat/persist/test/Enhanced0.ASMified b/test/com/sleepycat/persist/test/Enhanced0.ASMified new file mode 100644 index 0000000..ba47e3b --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced0.ASMified @@ -0,0 +1,82 @@ +package asm.com.sleepycat.persist.test; +import java.util.*; +import org.objectweb.asm.*; +import org.objectweb.asm.attrs.*; +public class Enhanced0Dump implements Opcodes { + +public static byte[] dump () throws Exception { + +ClassWriter cw = new ClassWriter(false); +FieldVisitor fv; +MethodVisitor mv; +AnnotationVisitor av0; + +cw.visit(V1_5, ACC_SUPER, "com/sleepycat/persist/test/Enhanced0", null, "java/lang/Object", null); + +cw.visitSource("Enhanced0.java", null); + +{ +av0 = cw.visitAnnotation("Lcom/sleepycat/persist/model/Entity;", true); +av0.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f1", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/PrimaryKey;", true); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f2", "I", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f3", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f4", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f5", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f6", "Ljava/lang/String;", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f7", "Ljava/lang/String;", null, null); +fv.visitEnd(); +} +{ +mv = cw.visitMethod(0, "", "()V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); +mv.visitInsn(RETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +cw.visitEnd(); + +return cw.toByteArray(); +} +} diff --git a/test/com/sleepycat/persist/test/Enhanced0.java b/test/com/sleepycat/persist/test/Enhanced0.java new file mode 100644 index 0000000..8583c97 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced0.java @@ -0,0 +1,41 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * For running ASMifier -- before any enhancements. + */ +@Entity +class Enhanced0 { + + @PrimaryKey + private String f1; + + @SecondaryKey(relate=MANY_TO_ONE) + private int f2; + @SecondaryKey(relate=MANY_TO_ONE) + private String f3; + @SecondaryKey(relate=MANY_TO_ONE) + private String f4; + + private int f5; + private String f6; + private String f7; +} diff --git a/test/com/sleepycat/persist/test/Enhanced1.ASMified b/test/com/sleepycat/persist/test/Enhanced1.ASMified new file mode 100644 index 0000000..39036e9 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced1.ASMified @@ -0,0 +1,607 @@ +package asm.com.sleepycat.persist.test; +import java.util.*; +import org.objectweb.asm.*; +import org.objectweb.asm.attrs.*; +public class Enhanced1Dump implements Opcodes { + +public static byte[] dump () throws Exception { + +ClassWriter cw = new ClassWriter(false); +FieldVisitor fv; +MethodVisitor mv; +AnnotationVisitor av0; + +cw.visit(V1_5, ACC_SUPER, "com/sleepycat/persist/test/Enhanced1", null, "java/lang/Object", new String[] { "com/sleepycat/persist/impl/Enhanced" }); + +cw.visitSource("Enhanced1.java", null); + +{ +av0 = cw.visitAnnotation("Lcom/sleepycat/persist/model/Entity;", true); +av0.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f1", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/PrimaryKey;", true); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f2", "I", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f3", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f4", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/SecondaryKey;", true); +av0.visitEnum("relate", "Lcom/sleepycat/persist/model/Relationship;", "MANY_TO_ONE"); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f5", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f6", "Ljava/lang/String;", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f7", "Ljava/lang/String;", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f8", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f9", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f10", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f11", "I", null, null); +fv.visitEnd(); +} +{ +fv = cw.visitField(ACC_PRIVATE, "f12", "I", null, null); +fv.visitEnd(); +} +{ +mv = cw.visitMethod(0, "", "()V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); +mv.visitInsn(RETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewInstance", "()Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced1"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "", "()V"); +mv.visitInsn(ARETURN); +mv.visitMaxs(2, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewArray", "(I)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 1); +mv.visitTypeInsn(ANEWARRAY, "com/sleepycat/persist/test/Enhanced1"); +mv.visitInsn(ARETURN); +mv.visitMaxs(1, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbIsPriKeyFieldNullOrZero", "()Z", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f1", "Ljava/lang/String;"); +Label l0 = new Label(); +mv.visitJumpInsn(IFNONNULL, l0); +mv.visitInsn(ICONST_1); +Label l1 = new Label(); +mv.visitJumpInsn(GOTO, l1); +mv.visitLabel(l0); +mv.visitInsn(ICONST_0); +mv.visitLabel(l1); +mv.visitInsn(IRETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWritePriKeyField", "(Lcom/sleepycat/persist/impl/EntityOutput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f1", "Ljava/lang/String;"); +mv.visitVarInsn(ALOAD, 2); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeKeyObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadPriKeyField", "(Lcom/sleepycat/persist/impl/EntityInput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 2); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readKeyObject", "(Lcom/sleepycat/persist/impl/Format;)Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f1", "Ljava/lang/String;"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f1", "Ljava/lang/String;"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "registerPriKeyObject", "(Ljava/lang/Object;)V"); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f2", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +mv.visitInsn(ACONST_NULL); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +mv.visitInsn(ACONST_NULL); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f1", "Ljava/lang/String;"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "registerPriKeyObject", "(Ljava/lang/Object;)V"); +mv.visitVarInsn(ILOAD, 4); +Label l0 = new Label(); +mv.visitJumpInsn(IFGT, l0); +mv.visitVarInsn(ILOAD, 2); +Label l1 = new Label(); +Label l2 = new Label(); +Label l3 = new Label(); +mv.visitTableSwitchInsn(0, 2, l0, new Label[] { l1, l2, l3 }); +mv.visitLabel(l1); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f2", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitJumpInsn(IFNE, l2); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l2); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readObject", "()Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_1); +mv.visitJumpInsn(IF_ICMPNE, l3); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l3); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readObject", "()Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +mv.visitLabel(l0); +mv.visitInsn(RETURN); +mv.visitMaxs(2, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f5", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +mv.visitInsn(ACONST_NULL); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +mv.visitInsn(ACONST_NULL); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f8", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f9", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f10", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f11", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f12", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 4); +Label l0 = new Label(); +mv.visitJumpInsn(IFGT, l0); +mv.visitVarInsn(ILOAD, 2); +Label l1 = new Label(); +Label l2 = new Label(); +Label l3 = new Label(); +Label l4 = new Label(); +Label l5 = new Label(); +Label l6 = new Label(); +Label l7 = new Label(); +Label l8 = new Label(); +mv.visitTableSwitchInsn(0, 7, l0, new Label[] { l1, l2, l3, l4, l5, l6, l7, l8 }); +mv.visitLabel(l1); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f5", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitJumpInsn(IFNE, l2); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l2); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readObject", "()Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_1); +mv.visitJumpInsn(IF_ICMPNE, l3); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l3); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readObject", "()Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_2); +mv.visitJumpInsn(IF_ICMPNE, l4); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l4); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f8", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_3); +mv.visitJumpInsn(IF_ICMPNE, l5); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l5); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f9", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_4); +mv.visitJumpInsn(IF_ICMPNE, l6); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l6); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f10", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_5); +mv.visitJumpInsn(IF_ICMPNE, l7); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l7); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f11", "I"); +mv.visitVarInsn(ILOAD, 3); +mv.visitIntInsn(BIPUSH, 6); +mv.visitJumpInsn(IF_ICMPNE, l8); +mv.visitJumpInsn(GOTO, l0); +mv.visitLabel(l8); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f12", "I"); +mv.visitLabel(l0); +mv.visitInsn(RETURN); +mv.visitMaxs(2, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteCompositeKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;[Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadCompositeKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;[Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNullifyKeyField", "(Ljava/lang/Object;IIZLjava/lang/Object;)Z", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l0); +mv.visitVarInsn(ILOAD, 4); +Label l1 = new Label(); +mv.visitJumpInsn(IFEQ, l1); +mv.visitVarInsn(ILOAD, 2); +Label l2 = new Label(); +Label l3 = new Label(); +Label l4 = new Label(); +mv.visitLookupSwitchInsn(l4, new int[] { 1, 2 }, new Label[] { l2, l3 }); +mv.visitLabel(l2); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +Label l5 = new Label(); +mv.visitJumpInsn(IFNULL, l5); +mv.visitVarInsn(ALOAD, 0); +mv.visitInsn(ACONST_NULL); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +mv.visitInsn(ICONST_1); +mv.visitInsn(IRETURN); +mv.visitLabel(l5); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l3); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +Label l6 = new Label(); +mv.visitJumpInsn(IFNULL, l6); +mv.visitVarInsn(ALOAD, 0); +mv.visitInsn(ACONST_NULL); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +mv.visitInsn(ICONST_1); +mv.visitInsn(IRETURN); +mv.visitLabel(l6); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l4); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l1); +mv.visitVarInsn(ILOAD, 2); +Label l7 = new Label(); +Label l8 = new Label(); +Label l9 = new Label(); +mv.visitLookupSwitchInsn(l9, new int[] { 1, 2 }, new Label[] { l7, l8 }); +mv.visitLabel(l7); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +Label l10 = new Label(); +mv.visitJumpInsn(IFNULL, l10); +mv.visitVarInsn(ALOAD, 0); +mv.visitInsn(ACONST_NULL); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +mv.visitInsn(ICONST_1); +mv.visitInsn(IRETURN); +mv.visitLabel(l10); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l8); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +Label l11 = new Label(); +mv.visitJumpInsn(IFNULL, l11); +mv.visitVarInsn(ALOAD, 0); +mv.visitInsn(ACONST_NULL); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +mv.visitInsn(ICONST_1); +mv.visitInsn(IRETURN); +mv.visitLabel(l11); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitLabel(l9); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitMaxs(2, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbGetField", "(Ljava/lang/Object;IIZ)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +Label l1 = new Label(); +mv.visitJumpInsn(GOTO, l1); +mv.visitLabel(l0); +mv.visitVarInsn(ILOAD, 4); +Label l2 = new Label(); +mv.visitJumpInsn(IFEQ, l2); +mv.visitVarInsn(ILOAD, 2); +Label l3 = new Label(); +Label l4 = new Label(); +Label l5 = new Label(); +Label l6 = new Label(); +mv.visitTableSwitchInsn(0, 2, l6, new Label[] { l3, l4, l5 }); +mv.visitLabel(l3); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f2", "I"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "valueOf", "(I)Ljava/lang/Integer;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l4); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l5); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l6); +mv.visitJumpInsn(GOTO, l1); +mv.visitLabel(l2); +mv.visitVarInsn(ILOAD, 2); +Label l7 = new Label(); +Label l8 = new Label(); +Label l9 = new Label(); +mv.visitTableSwitchInsn(0, 2, l1, new Label[] { l7, l8, l9 }); +mv.visitLabel(l7); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f5", "I"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "valueOf", "(I)Ljava/lang/Integer;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l8); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l9); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l1); +mv.visitInsn(ACONST_NULL); +mv.visitInsn(ARETURN); +mv.visitMaxs(1, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbSetField", "(Ljava/lang/Object;IIZLjava/lang/Object;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +Label l1 = new Label(); +mv.visitJumpInsn(GOTO, l1); +mv.visitLabel(l0); +mv.visitVarInsn(ILOAD, 4); +Label l2 = new Label(); +mv.visitJumpInsn(IFEQ, l2); +mv.visitVarInsn(ILOAD, 2); +Label l3 = new Label(); +Label l4 = new Label(); +Label l5 = new Label(); +Label l6 = new Label(); +mv.visitTableSwitchInsn(0, 2, l6, new Label[] { l3, l4, l5 }); +mv.visitLabel(l3); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/Integer"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Integer", "intValue", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f2", "I"); +mv.visitInsn(RETURN); +mv.visitLabel(l4); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f3", "Ljava/lang/String;"); +mv.visitInsn(RETURN); +mv.visitLabel(l5); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f4", "Ljava/lang/String;"); +mv.visitInsn(RETURN); +mv.visitLabel(l6); +mv.visitJumpInsn(GOTO, l1); +mv.visitLabel(l2); +mv.visitVarInsn(ILOAD, 2); +Label l7 = new Label(); +Label l8 = new Label(); +Label l9 = new Label(); +mv.visitTableSwitchInsn(0, 2, l1, new Label[] { l7, l8, l9 }); +mv.visitLabel(l7); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/Integer"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Integer", "intValue", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f5", "I"); +mv.visitInsn(RETURN); +mv.visitLabel(l8); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f6", "Ljava/lang/String;"); +mv.visitInsn(RETURN); +mv.visitLabel(l9); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 5); +mv.visitTypeInsn(CHECKCAST, "java/lang/String"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced1", "f7", "Ljava/lang/String;"); +mv.visitInsn(RETURN); +mv.visitLabel(l1); +mv.visitInsn(RETURN); +mv.visitMaxs(2, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_STATIC, "", "()V", null, null); +mv.visitCode(); +mv.visitInsn(ACONST_NULL); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced1"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "", "()V"); +mv.visitMethodInsn(INVOKESTATIC, "com/sleepycat/persist/impl/EnhancedAccessor", "registerClass", "(Ljava/lang/String;Lcom/sleepycat/persist/impl/Enhanced;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 0); +mv.visitEnd(); +} +cw.visitEnd(); + +return cw.toByteArray(); +} +} diff --git a/test/com/sleepycat/persist/test/Enhanced1.java b/test/com/sleepycat/persist/test/Enhanced1.java new file mode 100644 index 0000000..632b191 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced1.java @@ -0,0 +1,282 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import com.sleepycat.persist.impl.Enhanced; +import com.sleepycat.persist.impl.EnhancedAccessor; +import com.sleepycat.persist.impl.EntityInput; +import com.sleepycat.persist.impl.EntityOutput; +import com.sleepycat.persist.impl.Format; +import com.sleepycat.persist.impl.RefreshException; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; + +/** + * For running ASMifier -- adds minimal enhancements. + */ +@Entity +class Enhanced1 implements Enhanced { + + @PrimaryKey + private String f1; + + @SecondaryKey(relate=MANY_TO_ONE) + private int f2; + @SecondaryKey(relate=MANY_TO_ONE) + private String f3; + @SecondaryKey(relate=MANY_TO_ONE) + private String f4; + + private int f5; + private String f6; + private String f7; + private int f8; + private int f9; + private int f10; + private int f11; + private int f12; + + static { + EnhancedAccessor.registerClass(null, new Enhanced1()); + } + + public Object bdbNewInstance() { + return new Enhanced1(); + } + + public Object bdbNewArray(int len) { + return new Enhanced1[len]; + } + + public boolean bdbIsPriKeyFieldNullOrZero() { + return f1 == null; + } + + public void bdbWritePriKeyField(EntityOutput output, Format format) + throws RefreshException { + + output.writeKeyObject(f1, format); + } + + public void bdbReadPriKeyField(EntityInput input, Format format) + throws RefreshException { + + f1 = (String) input.readKeyObject(format); + } + + public void bdbWriteSecKeyFields(EntityOutput output) + throws RefreshException { + + /* If primary key is an object: */ + output.registerPriKeyObject(f1); + /* Always: */ + output.writeInt(f2); + output.writeObject(f3, null); + output.writeObject(f4, null); + } + + public void bdbReadSecKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + /* If primary key is an object: */ + input.registerPriKeyObject(f1); + + if (superLevel <= 0) { + switch (startField) { + case 0: + f2 = input.readInt(); + if (endField == 0) break; + case 1: + f3 = (String) input.readObject(); + if (endField == 1) break; + case 2: + f4 = (String) input.readObject(); + } + } + } + + public void bdbWriteNonKeyFields(EntityOutput output) + throws RefreshException { + + output.writeInt(f5); + output.writeObject(f6, null); + output.writeObject(f7, null); + output.writeInt(f8); + output.writeInt(f9); + output.writeInt(f10); + output.writeInt(f11); + output.writeInt(f12); + } + + public void bdbReadNonKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + if (superLevel <= 0) { + switch (startField) { + case 0: + f5 = input.readInt(); + if (endField == 0) break; + case 1: + f6 = (String) input.readObject(); + if (endField == 1) break; + case 2: + f7 = (String) input.readObject(); + if (endField == 2) break; + case 3: + f8 = input.readInt(); + if (endField == 3) break; + case 4: + f9 = input.readInt(); + if (endField == 4) break; + case 5: + f10 = input.readInt(); + if (endField == 5) break; + case 6: + f11 = input.readInt(); + if (endField == 6) break; + case 7: + f12 = input.readInt(); + } + } + } + + public void bdbWriteCompositeKeyFields(EntityOutput output, + Format[] formats) { + } + + public void bdbReadCompositeKeyFields(EntityInput input, + Format[] formats) { + } + + public boolean bdbNullifyKeyField(Object o, + int field, + int superLevel, + boolean isSecField, + Object keyElement) { + if (superLevel > 0) { + return false; + } else if (isSecField) { + switch (field) { + case 1: + if (f3 != null) { + f3 = null; + return true; + } else { + return false; + } + case 2: + if (f4 != null) { + f4 = null; + return true; + } else { + return false; + } + default: + return false; + } + } else { + switch (field) { + case 1: + if (f6 != null) { + f6 = null; + return true; + } else { + return false; + } + case 2: + if (f7 != null) { + f7 = null; + return true; + } else { + return false; + } + default: + return false; + } + } + } + + public Object bdbGetField(Object o, + int field, + int superLevel, + boolean isSecField) { + if (superLevel > 0) { + } else if (isSecField) { + switch (field) { + case 0: + return Integer.valueOf(f2); + case 1: + return f3; + case 2: + return f4; + } + } else { + switch (field) { + case 0: + return Integer.valueOf(f5); + case 1: + return f6; + case 2: + return f7; + } + } + return null; + } + + public void bdbSetField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + if (superLevel > 0) { + } else if (isSecField) { + switch (field) { + case 0: + f2 = ((Integer) value).intValue(); + return; + case 1: + f3 = (String) value; + return; + case 2: + f4 = (String) value; + return; + } + } else { + switch (field) { + case 0: + f5 = ((Integer) value).intValue(); + return; + case 1: + f6 = (String) value; + return; + case 2: + f7 = (String) value; + return; + } + } + } + + public void bdbSetPriField(Object o, Object value) { + f1 = (String) value; + } +} diff --git a/test/com/sleepycat/persist/test/Enhanced2.ASMified b/test/com/sleepycat/persist/test/Enhanced2.ASMified new file mode 100644 index 0000000..0364a32 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced2.ASMified @@ -0,0 +1,218 @@ +package asm.com.sleepycat.persist.test; +import java.util.*; +import org.objectweb.asm.*; +import org.objectweb.asm.attrs.*; +public class Enhanced2Dump implements Opcodes { + +public static byte[] dump () throws Exception { + +ClassWriter cw = new ClassWriter(false); +FieldVisitor fv; +MethodVisitor mv; +AnnotationVisitor av0; + +cw.visit(V1_5, ACC_SUPER, "com/sleepycat/persist/test/Enhanced2", null, "com/sleepycat/persist/test/Enhanced1", null); + +cw.visitSource("Enhanced2.java", null); + +{ +av0 = cw.visitAnnotation("Lcom/sleepycat/persist/model/Persistent;", true); +av0.visitEnd(); +} +{ +mv = cw.visitMethod(0, "", "()V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "", "()V"); +mv.visitInsn(RETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewInstance", "()Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced2"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced2", "", "()V"); +mv.visitInsn(ARETURN); +mv.visitMaxs(2, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewArray", "(I)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 1); +mv.visitTypeInsn(ANEWARRAY, "com/sleepycat/persist/test/Enhanced2"); +mv.visitInsn(ARETURN); +mv.visitMaxs(1, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbIsPriKeyFieldNullOrZero", "()Z", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbIsPriKeyFieldNullOrZero", "()Z"); +mv.visitInsn(IRETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWritePriKeyField", "(Lcom/sleepycat/persist/impl/EntityOutput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 2); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbWritePriKeyField", "(Lcom/sleepycat/persist/impl/EntityOutput;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadPriKeyField", "(Lcom/sleepycat/persist/impl/EntityInput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 2); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbReadPriKeyField", "(Lcom/sleepycat/persist/impl/EntityInput;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbWriteSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(2, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 4); +Label l0 = new Label(); +mv.visitJumpInsn(IFEQ, l0); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ILOAD, 2); +mv.visitVarInsn(ILOAD, 3); +mv.visitVarInsn(ILOAD, 4); +mv.visitInsn(ICONST_1); +mv.visitInsn(ISUB); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbReadSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V"); +mv.visitLabel(l0); +mv.visitInsn(RETURN); +mv.visitMaxs(6, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbWriteNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(2, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 4); +Label l0 = new Label(); +mv.visitJumpInsn(IFEQ, l0); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ILOAD, 2); +mv.visitVarInsn(ILOAD, 3); +mv.visitVarInsn(ILOAD, 4); +mv.visitInsn(ICONST_1); +mv.visitInsn(ISUB); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbReadNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V"); +mv.visitLabel(l0); +mv.visitInsn(RETURN); +mv.visitMaxs(6, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNullifyKeyField", "(Ljava/lang/Object;IIZLjava/lang/Object;)Z", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ILOAD, 2); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_1); +mv.visitInsn(ISUB); +mv.visitVarInsn(ILOAD, 4); +mv.visitVarInsn(ALOAD, 5); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbNullifyKeyField", "(Ljava/lang/Object;IIZLjava/lang/Object;)Z"); +mv.visitInsn(IRETURN); +mv.visitLabel(l0); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitMaxs(6, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbGetField", "(Ljava/lang/Object;IIZ)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ILOAD, 2); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_1); +mv.visitInsn(ISUB); +mv.visitVarInsn(ILOAD, 4); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbGetField", "(Ljava/lang/Object;IIZ)Ljava/lang/Object;"); +mv.visitInsn(ARETURN); +mv.visitLabel(l0); +mv.visitInsn(ACONST_NULL); +mv.visitInsn(ARETURN); +mv.visitMaxs(5, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbSetField", "(Ljava/lang/Object;IIZLjava/lang/Object;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 3); +Label l0 = new Label(); +mv.visitJumpInsn(IFLE, l0); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ILOAD, 2); +mv.visitVarInsn(ILOAD, 3); +mv.visitInsn(ICONST_1); +mv.visitInsn(ISUB); +mv.visitVarInsn(ILOAD, 4); +mv.visitVarInsn(ALOAD, 5); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced1", "bdbSetField", "(Ljava/lang/Object;IIZLjava/lang/Object;)V"); +mv.visitLabel(l0); +mv.visitInsn(RETURN); +mv.visitMaxs(6, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_STATIC, "", "()V", null, null); +mv.visitCode(); +mv.visitInsn(ACONST_NULL); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced2"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced2", "", "()V"); +mv.visitMethodInsn(INVOKESTATIC, "com/sleepycat/persist/impl/EnhancedAccessor", "registerClass", "(Ljava/lang/String;Lcom/sleepycat/persist/impl/Enhanced;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 0); +mv.visitEnd(); +} +cw.visitEnd(); + +return cw.toByteArray(); +} +} diff --git a/test/com/sleepycat/persist/test/Enhanced2.java b/test/com/sleepycat/persist/test/Enhanced2.java new file mode 100644 index 0000000..93aaa2a --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced2.java @@ -0,0 +1,132 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import com.sleepycat.persist.impl.EnhancedAccessor; +import com.sleepycat.persist.impl.EntityInput; +import com.sleepycat.persist.impl.EntityOutput; +import com.sleepycat.persist.impl.Format; +import com.sleepycat.persist.impl.RefreshException; +import com.sleepycat.persist.model.Persistent; + +/** + * For running ASMifier -- entity sublcass. + */ +@Persistent +class Enhanced2 extends Enhanced1 { + + static { + EnhancedAccessor.registerClass(null, new Enhanced2()); + } + + public Object bdbNewInstance() { + return new Enhanced2(); + } + + public Object bdbNewArray(int len) { + return new Enhanced2[len]; + } + + public boolean bdbIsPriKeyFieldNullOrZero() { + return super.bdbIsPriKeyFieldNullOrZero(); + } + + public void bdbWritePriKeyField(EntityOutput output, Format format) + throws RefreshException { + + super.bdbWritePriKeyField(output, format); + } + + public void bdbReadPriKeyField(EntityInput input, Format format) + throws RefreshException { + + super.bdbReadPriKeyField(input, format); + } + + public void bdbWriteSecKeyFields(EntityOutput output) + throws RefreshException { + + super.bdbWriteSecKeyFields(output); + } + + public void bdbReadSecKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + if (superLevel != 0) { + super.bdbReadSecKeyFields + (input, startField, endField, superLevel - 1); + } + } + + public void bdbWriteNonKeyFields(EntityOutput output) + throws RefreshException { + + super.bdbWriteNonKeyFields(output); + } + + public void bdbReadNonKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) + throws RefreshException { + + if (superLevel != 0) { + super.bdbReadNonKeyFields + (input, startField, endField, superLevel - 1); + } + } + + public boolean bdbNullifyKeyField(Object o, + int field, + int superLevel, + boolean isSecField, + Object keyElement) { + if (superLevel > 0) { + return super.bdbNullifyKeyField + (o, field, superLevel - 1, isSecField, keyElement); + } else { + return false; + } + } + + public Object bdbGetField(Object o, + int field, + int superLevel, + boolean isSecField) { + if (superLevel > 0) { + return super.bdbGetField + (o, field, superLevel - 1, isSecField); + } else { + return null; + } + } + + public void bdbSetField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + if (superLevel > 0) { + super.bdbSetField + (o, field, superLevel - 1, isSecField, value); + } + } + + public void bdbSetPriField(Object o, Object value) { + super.bdbSetPriField(o, value); + } +} diff --git a/test/com/sleepycat/persist/test/Enhanced3.ASMified b/test/com/sleepycat/persist/test/Enhanced3.ASMified new file mode 100644 index 0000000..8b6b193 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced3.ASMified @@ -0,0 +1,541 @@ +package asm.com.sleepycat.persist.test; +import java.util.*; +import org.objectweb.asm.*; +import org.objectweb.asm.attrs.*; +public class Enhanced3Dump implements Opcodes { + +public static byte[] dump () throws Exception { + +ClassWriter cw = new ClassWriter(false); +FieldVisitor fv; +MethodVisitor mv; +AnnotationVisitor av0; + +cw.visit(V1_5, ACC_SUPER, "com/sleepycat/persist/test/Enhanced3", null, "java/lang/Object", new String[] { "com/sleepycat/persist/impl/Enhanced" }); + +cw.visitSource("Enhanced3.java", null); + +{ +av0 = cw.visitAnnotation("Lcom/sleepycat/persist/model/Persistent;", true); +av0.visitEnd(); +} +cw.visitInnerClass("com/sleepycat/persist/test/Enhanced3$MyEnum", "com/sleepycat/persist/test/Enhanced3", "MyEnum", ACC_FINAL + ACC_STATIC + ACC_ENUM); + +{ +fv = cw.visitField(0, "z", "Z", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(1)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "c", "C", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(2)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "b", "B", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(3)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "s", "S", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(4)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "i", "I", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(5)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "l", "J", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(6)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "f", "F", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(7)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "d", "D", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(8)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "zw", "Ljava/lang/Boolean;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(9)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "cw", "Ljava/lang/Character;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(10)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "bw", "Ljava/lang/Byte;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(11)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "sw", "Ljava/lang/Short;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(12)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "iw", "Ljava/lang/Integer;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(13)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "lw", "Ljava/lang/Long;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(14)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "fw", "Ljava/lang/Float;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(15)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "dw", "Ljava/lang/Double;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(16)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "date", "Ljava/util/Date;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(17)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "str", "Ljava/lang/String;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(18)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "e", "Lcom/sleepycat/persist/test/Enhanced3$MyEnum;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(19)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +fv = cw.visitField(0, "bigint", "Ljava/math/BigInteger;", null, null); +{ +av0 = fv.visitAnnotation("Lcom/sleepycat/persist/model/KeyField;", true); +av0.visit("value", new Integer(20)); +av0.visitEnd(); +} +fv.visitEnd(); +} +{ +mv = cw.visitMethod(0, "", "()V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "", "()V"); +mv.visitInsn(RETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewInstance", "()Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced3"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced3", "", "()V"); +mv.visitInsn(ARETURN); +mv.visitMaxs(2, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNewArray", "(I)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitVarInsn(ILOAD, 1); +mv.visitTypeInsn(ANEWARRAY, "com/sleepycat/persist/test/Enhanced3"); +mv.visitInsn(ARETURN); +mv.visitMaxs(1, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbIsPriKeyFieldNullOrZero", "()Z", null, null); +mv.visitCode(); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitMaxs(1, 1); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWritePriKeyField", "(Lcom/sleepycat/persist/impl/EntityOutput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadPriKeyField", "(Lcom/sleepycat/persist/impl/EntityInput;Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadSecKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 2); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadNonKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;III)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbWriteCompositeKeyFields", "(Lcom/sleepycat/persist/impl/EntityOutput;[Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "z", "Z"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeBoolean", "(Z)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "c", "C"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeChar", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "b", "B"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeByte", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "s", "S"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeShort", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "i", "I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "l", "J"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeLong", "(J)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "f", "F"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeSortedFloat", "(F)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "d", "D"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeSortedDouble", "(D)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "zw", "Ljava/lang/Boolean;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Boolean", "booleanValue", "()Z"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeBoolean", "(Z)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "cw", "Ljava/lang/Character;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Character", "charValue", "()C"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeChar", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "bw", "Ljava/lang/Byte;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Byte", "byteValue", "()B"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeByte", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "sw", "Ljava/lang/Short;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Short", "shortValue", "()S"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeShort", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "iw", "Ljava/lang/Integer;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Integer", "intValue", "()I"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeInt", "(I)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "lw", "Ljava/lang/Long;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Long", "longValue", "()J"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeLong", "(J)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "fw", "Ljava/lang/Float;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Float", "floatValue", "()F"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeSortedFloat", "(F)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "dw", "Ljava/lang/Double;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Double", "doubleValue", "()D"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeSortedDouble", "(D)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "date", "Ljava/util/Date;"); +mv.visitMethodInsn(INVOKEVIRTUAL, "java/util/Date", "getTime", "()J"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeLong", "(J)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "str", "Ljava/lang/String;"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeString", "(Ljava/lang/String;)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "e", "Lcom/sleepycat/persist/test/Enhanced3$MyEnum;"); +mv.visitVarInsn(ALOAD, 2); +mv.visitIntInsn(BIPUSH, 18); +mv.visitInsn(AALOAD); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeKeyObject", "(Ljava/lang/Object;Lcom/sleepycat/persist/impl/Format;)V"); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 0); +mv.visitFieldInsn(GETFIELD, "com/sleepycat/persist/test/Enhanced3", "bigint", "Ljava/math/BigInteger;"); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityOutput", "writeBigInteger", "(Ljava/math/BigInteger;)Lcom/sleepycat/bind/tuple/TupleOutput;"); +mv.visitInsn(POP); +mv.visitInsn(RETURN); +mv.visitMaxs(4, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbReadCompositeKeyFields", "(Lcom/sleepycat/persist/impl/EntityInput;[Lcom/sleepycat/persist/impl/Format;)V", null, null); +mv.visitCode(); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readBoolean", "()Z"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "z", "Z"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readChar", "()C"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "c", "C"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readByte", "()B"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "b", "B"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readShort", "()S"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "s", "S"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "i", "I"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readLong", "()J"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "l", "J"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readSortedFloat", "()F"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "f", "F"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readSortedDouble", "()D"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "d", "D"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readBoolean", "()Z"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Boolean", "valueOf", "(Z)Ljava/lang/Boolean;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "zw", "Ljava/lang/Boolean;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readChar", "()C"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Character", "valueOf", "(C)Ljava/lang/Character;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "cw", "Ljava/lang/Character;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readByte", "()B"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Byte", "valueOf", "(B)Ljava/lang/Byte;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "bw", "Ljava/lang/Byte;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readShort", "()S"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Short", "valueOf", "(S)Ljava/lang/Short;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "sw", "Ljava/lang/Short;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readInt", "()I"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "valueOf", "(I)Ljava/lang/Integer;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "iw", "Ljava/lang/Integer;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readLong", "()J"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Long", "valueOf", "(J)Ljava/lang/Long;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "lw", "Ljava/lang/Long;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readSortedFloat", "()F"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Float", "valueOf", "(F)Ljava/lang/Float;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "fw", "Ljava/lang/Float;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readSortedDouble", "()D"); +mv.visitMethodInsn(INVOKESTATIC, "java/lang/Double", "valueOf", "(D)Ljava/lang/Double;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "dw", "Ljava/lang/Double;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitTypeInsn(NEW, "java/util/Date"); +mv.visitInsn(DUP); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readLong", "()J"); +mv.visitMethodInsn(INVOKESPECIAL, "java/util/Date", "", "(J)V"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "date", "Ljava/util/Date;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readString", "()Ljava/lang/String;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "str", "Ljava/lang/String;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitVarInsn(ALOAD, 2); +mv.visitIntInsn(BIPUSH, 18); +mv.visitInsn(AALOAD); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readKeyObject", "(Lcom/sleepycat/persist/impl/Format;)Ljava/lang/Object;"); +mv.visitTypeInsn(CHECKCAST, "com/sleepycat/persist/test/Enhanced3$MyEnum"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "e", "Lcom/sleepycat/persist/test/Enhanced3$MyEnum;"); +mv.visitVarInsn(ALOAD, 0); +mv.visitVarInsn(ALOAD, 1); +mv.visitMethodInsn(INVOKEINTERFACE, "com/sleepycat/persist/impl/EntityInput", "readBigInteger", "()Ljava/math/BigInteger;"); +mv.visitFieldInsn(PUTFIELD, "com/sleepycat/persist/test/Enhanced3", "bigint", "Ljava/math/BigInteger;"); +mv.visitInsn(RETURN); +mv.visitMaxs(5, 3); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbNullifyKeyField", "(Ljava/lang/Object;IIZLjava/lang/Object;)Z", null, null); +mv.visitCode(); +mv.visitInsn(ICONST_0); +mv.visitInsn(IRETURN); +mv.visitMaxs(1, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbGetField", "(Ljava/lang/Object;IIZ)Ljava/lang/Object;", null, null); +mv.visitCode(); +mv.visitInsn(ACONST_NULL); +mv.visitInsn(ARETURN); +mv.visitMaxs(1, 5); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_PUBLIC, "bdbSetField", "(Ljava/lang/Object;IIZLjava/lang/Object;)V", null, null); +mv.visitCode(); +mv.visitInsn(RETURN); +mv.visitMaxs(0, 6); +mv.visitEnd(); +} +{ +mv = cw.visitMethod(ACC_STATIC, "", "()V", null, null); +mv.visitCode(); +mv.visitInsn(ACONST_NULL); +mv.visitTypeInsn(NEW, "com/sleepycat/persist/test/Enhanced3"); +mv.visitInsn(DUP); +mv.visitMethodInsn(INVOKESPECIAL, "com/sleepycat/persist/test/Enhanced3", "", "()V"); +mv.visitMethodInsn(INVOKESTATIC, "com/sleepycat/persist/impl/EnhancedAccessor", "registerClass", "(Ljava/lang/String;Lcom/sleepycat/persist/impl/Enhanced;)V"); +mv.visitInsn(RETURN); +mv.visitMaxs(3, 0); +mv.visitEnd(); +} +cw.visitEnd(); + +return cw.toByteArray(); +} +} diff --git a/test/com/sleepycat/persist/test/Enhanced3.java b/test/com/sleepycat/persist/test/Enhanced3.java new file mode 100644 index 0000000..47ab6b9 --- /dev/null +++ b/test/com/sleepycat/persist/test/Enhanced3.java @@ -0,0 +1,188 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Date; + +import com.sleepycat.persist.impl.Enhanced; +import com.sleepycat.persist.impl.EnhancedAccessor; +import com.sleepycat.persist.impl.EntityInput; +import com.sleepycat.persist.impl.EntityOutput; +import com.sleepycat.persist.impl.Format; +import com.sleepycat.persist.impl.RefreshException; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; + +/** + * For running ASMifier -- a composite key class using all simple data types, + * does not follow from previous EnhancedN.java files + */ +@Persistent +class Enhanced3 implements Enhanced { + + enum MyEnum { ONE, TWO }; + + @KeyField(1) boolean z; + @KeyField(2) char c; + @KeyField(3) byte b; + @KeyField(4) short s; + @KeyField(5) int i; + @KeyField(6) long l; + @KeyField(7) float f; + @KeyField(8) double d; + + @KeyField(9) Boolean zw; + @KeyField(10) Character cw; + @KeyField(11) Byte bw; + @KeyField(12) Short sw; + @KeyField(13) Integer iw; + @KeyField(14) Long lw; + @KeyField(15) Float fw; + @KeyField(16) Double dw; + + @KeyField(17) Date date; + @KeyField(18) String str; + @KeyField(19) MyEnum e; + @KeyField(20) BigInteger bigint; + @KeyField(21) BigDecimal bigdec; + + static { + EnhancedAccessor.registerClass(null, new Enhanced3()); + } + + public Object bdbNewInstance() { + return new Enhanced3(); + } + + public Object bdbNewArray(int len) { + return new Enhanced3[len]; + } + + public boolean bdbIsPriKeyFieldNullOrZero() { + return false; + } + + public void bdbWritePriKeyField(EntityOutput output, Format format) { + } + + public void bdbReadPriKeyField(EntityInput input, Format format) { + } + + public void bdbWriteSecKeyFields(EntityOutput output) { + } + + public void bdbReadSecKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) { + } + + public void bdbWriteNonKeyFields(EntityOutput output) { + } + + public void bdbReadNonKeyFields(EntityInput input, + int startField, + int endField, + int superLevel) { + } + + public void bdbWriteCompositeKeyFields(EntityOutput output, + Format[] formats) + throws RefreshException { + + output.writeBoolean(z); + output.writeChar(c); + output.writeByte(b); + output.writeShort(s); + output.writeInt(i); + output.writeLong(l); + output.writeSortedFloat(f); + output.writeSortedDouble(d); + + output.writeBoolean(zw.booleanValue()); + output.writeChar(cw.charValue()); + output.writeByte(bw.byteValue()); + output.writeShort(sw.shortValue()); + output.writeInt(iw.intValue()); + output.writeLong(lw.longValue()); + output.writeSortedFloat(fw.floatValue()); + output.writeSortedDouble(dw.doubleValue()); + + output.writeLong(date.getTime()); + output.writeString(str); + output.writeKeyObject(e, formats[18]); + output.writeBigInteger(bigint); + output.writeSortedBigDecimal(bigdec); + } + + public void bdbReadCompositeKeyFields(EntityInput input, + Format[] formats) + throws RefreshException { + + z = input.readBoolean(); + c = input.readChar(); + b = input.readByte(); + s = input.readShort(); + i = input.readInt(); + l = input.readLong(); + f = input.readSortedFloat(); + d = input.readSortedDouble(); + + zw = Boolean.valueOf(input.readBoolean()); + cw = Character.valueOf(input.readChar()); + bw = Byte.valueOf(input.readByte()); + sw = Short.valueOf(input.readShort()); + iw = Integer.valueOf(input.readInt()); + lw = Long.valueOf(input.readLong()); + fw = Float.valueOf(input.readSortedFloat()); + dw = Double.valueOf(input.readSortedDouble()); + + date = new Date(input.readLong()); + str = input.readString(); + e = (MyEnum) input.readKeyObject(formats[18]); + bigint = input.readBigInteger(); + bigdec = input.readSortedBigDecimal(); + } + + public boolean bdbNullifyKeyField(Object o, + int field, + int superLevel, + boolean isSecField, + Object keyElement) { + // Didn't bother with this one. + return false; + } + + public Object bdbGetField(Object o, + int field, + int superLevel, + boolean isSecField) { + // Didn't bother with this one. + return null; + } + + public void bdbSetField(Object o, + int field, + int superLevel, + boolean isSecField, + Object value) { + // Didn't bother with this one. + } + + public void bdbSetPriField(Object o, Object value) { + // Didn't bother with this one. + } +} diff --git a/test/com/sleepycat/persist/test/EvolveCase.java b/test/com/sleepycat/persist/test/EvolveCase.java new file mode 100644 index 0000000..fc3a4f3 --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveCase.java @@ -0,0 +1,233 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import java.util.Iterator; +import java.util.List; + +import junit.framework.TestCase; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.ClassMetadata; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.persist.raw.RawType; + +@Persistent +abstract class EvolveCase { + + static final String STORE_NAME = "foo"; + + transient boolean updated; + transient boolean newMetadataWritten; + + Mutations getMutations() { + return null; + } + + void configure(EntityModel model, StoreConfig config) { + } + + String getStoreOpenException() { + return null; + } + + int getNRecordsExpected() { + return 1; + } + + void checkUnevolvedModel(EntityModel model, Environment env) { + } + + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + } + + /** + * @throws DatabaseException from subclasses. + */ + void writeObjects(EntityStore store) + throws DatabaseException { + } + + /** + * @throws DatabaseException from subclasses. + */ + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + } + + /** + * @throws DatabaseException from subclasses. + */ + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + } + + /** + * @throws DatabaseException from subclasses. + */ + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + } + + /** + * Checks for equality and prints the entire values rather than + * abbreviated values like TestCase.assertEquals does. + */ + static void checkEquals(Object expected, Object got) { + if ((expected != null) ? (!expected.equals(got)) : (got != null)) { + TestCase.fail("Expected:\n" + expected + "\nBut got:\n" + got); + } + } + + /** + * Asserts than an entity database exists or does not exist. + */ + void assertDbExists(boolean expectExists, + Environment env, + String entityClassName) { + assertDbExists(expectExists, env, entityClassName, null); + } + + /** + * Checks that an entity class exists or does not exist. + */ + void checkEntity(boolean exists, + EntityModel model, + Environment env, + String className, + int version, + String secKeyName) { + if (exists) { + TestCase.assertNotNull(model.getEntityMetadata(className)); + ClassMetadata meta = model.getClassMetadata(className); + TestCase.assertNotNull(meta); + TestCase.assertEquals(version, meta.getVersion()); + TestCase.assertTrue(meta.isEntityClass()); + + RawType raw = model.getRawType(className); + TestCase.assertNotNull(raw); + TestCase.assertEquals(version, raw.getVersion()); + + RawType rawVersion = model.getRawTypeVersion(className, version); + TestCase.assertNotNull(rawVersion); + TestCase.assertTrue(!rawVersion.isDeleted()); + } else { + TestCase.assertNull(model.getEntityMetadata(className)); + TestCase.assertNull(model.getClassMetadata(className)); + TestCase.assertNull(model.getRawType(className)); + + RawType rawVersion = model.getRawTypeVersion(className, version); + TestCase.assertTrue(rawVersion == null || rawVersion.isDeleted()); + } + + assertDbExists(exists, env, className); + if (secKeyName != null) { + assertDbExists(exists, env, className, secKeyName); + } + } + + /** + * Checks that a non-entity class exists or does not exist. + */ + void checkNonEntity(boolean exists, + EntityModel model, + Environment env, + String className, + int version) { + if (exists) { + ClassMetadata meta = model.getClassMetadata(className); + TestCase.assertNotNull(meta); + TestCase.assertEquals(version, meta.getVersion()); + TestCase.assertTrue(!meta.isEntityClass()); + + RawType raw = model.getRawType(className); + TestCase.assertNotNull(raw); + TestCase.assertEquals(version, raw.getVersion()); + + RawType rawVersion = model.getRawTypeVersion(className, version); + TestCase.assertNotNull(rawVersion); + TestCase.assertTrue(!rawVersion.isDeleted()); + } else { + TestCase.assertNull(model.getClassMetadata(className)); + TestCase.assertNull(model.getRawType(className)); + + RawType rawVersion = model.getRawTypeVersion(className, version); + TestCase.assertTrue(rawVersion == null || rawVersion.isDeleted()); + } + + TestCase.assertNull(model.getEntityMetadata(className)); + assertDbExists(false, env, className); + } + + /** + * Asserts than a database expectExists or does not exist. If keyName is + * null, checks an entity database. If keyName is non-null, checks a + * secondary database. + */ + void assertDbExists(boolean expectExists, + Environment env, + String entityClassName, + String keyName) { + + /* + * If the evolved metadata has not been written (e.g., we're in + * read-only mode), then class evolution will not yet have created, + * removed or renamed databases, and we cannot check their existence. + */ + if (newMetadataWritten) { + PersistTestUtils.assertDbExists + (expectExists, env, STORE_NAME, entityClassName, keyName); + } + } + + static void checkVersions(EntityModel model, String name, int version) { + checkVersions(model, new String[] {name}, new int[] {version}); + } + + static void checkVersions(EntityModel model, + String name1, + int version1, + String name2, + int version2) { + checkVersions + (model, new String[] {name1, name2}, + new int[] {version1, version2}); + } + + private static void checkVersions(EntityModel model, + String[] names, + int[] versions) { + List all = model.getAllRawTypeVersions(names[0]); + TestCase.assertNotNull(all); + + assert names.length == versions.length; + TestCase.assertEquals(all.toString(), names.length, all.size()); + + Iterator iter = all.iterator(); + for (int i = 0; i < names.length; i += 1) { + RawType type = iter.next(); + TestCase.assertEquals(versions[i], type.getVersion()); + TestCase.assertEquals(names[i], type.getClassName()); + } + } +} diff --git a/test/com/sleepycat/persist/test/EvolveClasses.java b/test/com/sleepycat/persist/test/EvolveClasses.java new file mode 100644 index 0000000..3300428 --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveClasses.java @@ -0,0 +1,8066 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.StringTokenizer; + +import junit.framework.TestCase; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Conversion; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.Deleter; +import com.sleepycat.persist.evolve.EntityConverter; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.persist.raw.RawType; + +/** + * Nested classes are modified versions of classes of the same name in + * EvolveClasses.java.original. See EvolveTestBase.java for the steps that are + * taken to add a new class (test case). + * + * @author Mark Hayes + */ +class EvolveClasses { + + private static final String PREFIX = EvolveClasses.class.getName() + '$'; + private static final String CASECLS = EvolveCase.class.getName(); + + private static RawObject readRaw(RawStore store, + Object key, + Object... classVersionPairs) + throws DatabaseException { + + return readRaw(store, null, key, classVersionPairs); + } + + /** + * Reads a raw object and checks its superclass names and versions. + */ + private static RawObject readRaw(RawStore store, + String entityClsName, + Object key, + Object... classVersionPairs) + throws DatabaseException { + + TestCase.assertNotNull(store); + TestCase.assertNotNull(key); + + if (entityClsName == null) { + entityClsName = (String) classVersionPairs[0]; + } + PrimaryIndex index = + store.getPrimaryIndex(entityClsName); + TestCase.assertNotNull(index); + + RawObject obj = index.get(key); + TestCase.assertNotNull(obj); + + checkRawType(obj.getType(), classVersionPairs); + + RawObject superObj = obj.getSuper(); + for (int i = 2; i < classVersionPairs.length; i += 2) { + Object[] a = new Object[classVersionPairs.length - i]; + System.arraycopy(classVersionPairs, i, a, 0, a.length); + TestCase.assertNotNull(superObj); + checkRawType(superObj.getType(), a); + superObj = superObj.getSuper(); + } + + return obj; + } + + /** + * Reads a raw object and checks its superclass names and versions. + */ + private static void checkRawType(RawType type, + Object... classVersionPairs) { + TestCase.assertNotNull(type); + TestCase.assertNotNull(classVersionPairs); + TestCase.assertTrue(classVersionPairs.length % 2 == 0); + + for (int i = 0; i < classVersionPairs.length; i += 2) { + String clsName = (String) classVersionPairs[i]; + int clsVersion = (Integer) classVersionPairs[i + 1]; + TestCase.assertEquals(clsName, type.getClassName()); + TestCase.assertEquals(clsVersion, type.getVersion()); + type = type.getSuperType(); + } + TestCase.assertNull(type); + } + + /** + * Checks that a raw object contains the specified field values. Does not + * check superclass fields. + */ + private static void checkRawFields(RawObject obj, + Object... nameValuePairs) { + TestCase.assertNotNull(obj); + TestCase.assertNotNull(obj.getValues()); + TestCase.assertNotNull(nameValuePairs); + TestCase.assertTrue(nameValuePairs.length % 2 == 0); + + Map values = obj.getValues(); + TestCase.assertEquals(nameValuePairs.length / 2, values.size()); + + for (int i = 0; i < nameValuePairs.length; i += 2) { + String name = (String) nameValuePairs[i]; + Object value = nameValuePairs[i + 1]; + TestCase.assertEquals(name, value, values.get(name)); + } + } + + private static Map makeValues(Object... nameValuePairs) { + TestCase.assertTrue(nameValuePairs.length % 2 == 0); + Map values = new HashMap(); + for (int i = 0; i < nameValuePairs.length; i += 2) { + values.put((String) nameValuePairs[i], nameValuePairs[i + 1]); + } + return values; + } + + /** + * Disallow removing an entity class when no Deleter mutation is specified. + */ + static class DeletedEntity1_ClassRemoved_NoMutation extends EvolveCase { + + private static final String NAME = + PREFIX + "DeletedEntity1_ClassRemoved"; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedEntity1_ClassRemoved"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "skey"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Allow removing an entity class when a Deleter mutation is specified. + */ + static class DeletedEntity2_ClassRemoved_WithDeleter extends EvolveCase { + + private static final String NAME = + PREFIX + "DeletedEntity2_ClassRemoved"; + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(false, model, env, NAME, 0, "skey"); + if (oldTypesExist) { + checkVersions(model, NAME, 0); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Disallow removing the Entity annotation when no Deleter mutation is + * specified. + */ + static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase { + + private static final String NAME = + DeletedEntity3_AnnotRemoved_NoMutation.class.getName(); + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedEntity3_AnnotRemoved_NoMutation"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "skey"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Allow removing the Entity annotation when a Deleter mutation is + * specified. + */ + static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase { + + private static final String NAME = + DeletedEntity4_AnnotRemoved_WithDeleter.class.getName(); + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(false, model, env, NAME, 0, "skey"); + if (oldTypesExist) { + checkVersions(model, NAME, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) { + try { + store.getPrimaryIndex + (Integer.class, + DeletedEntity4_AnnotRemoved_WithDeleter.class); + TestCase.fail(); + } catch (Exception e) { + checkEquals + ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity4_AnnotRemoved_WithDeleter", + e.toString()); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Disallow changing the Entity annotation to Persistent when no Deleter + * mutation is specified. + */ + @Persistent(version=1) + static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase { + + private static final String NAME = + DeletedEntity5_EntityToPersist_NoMutation.class.getName(); + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity5_EntityToPersist_NoMutation version: 1 Error: @Entity switched to/from @Persistent"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "skey"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Allow changing the Entity annotation to Persistent when a Deleter + * mutation is specified. + */ + @Persistent(version=1) + static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase { + + private static final String NAME = + DeletedEntity6_EntityToPersist_WithDeleter.class.getName(); + private static final String NAME2 = + Embed_DeletedEntity6_EntityToPersist_WithDeleter.class.getName(); + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkNonEntity(true, model, env, NAME, 1); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + /* Cannot get the primary index for the former entity class. */ + try { + store.getPrimaryIndex + (Integer.class, + DeletedEntity6_EntityToPersist_WithDeleter.class); + TestCase.fail(); + } catch (Exception e) { + checkEquals + ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedEntity6_EntityToPersist_WithDeleter", + e.toString()); + } + + if (newMetadataWritten) { + /* Can embed the new persistent class in another entity. */ + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, + Embed_DeletedEntity6_EntityToPersist_WithDeleter.class); + + if (doUpdate) { + Embed_DeletedEntity6_EntityToPersist_WithDeleter embed = + new Embed_DeletedEntity6_EntityToPersist_WithDeleter(); + index.put(embed); + embed = index.get(embed.key); + /* This new type should exist only after update. */ + Environment env = store.getEnvironment(); + EntityModel model = store.getModel(); + checkEntity(true, model, env, NAME2, 0, null); + checkVersions(model, NAME2, 0); + } + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + @Entity + static class Embed_DeletedEntity6_EntityToPersist_WithDeleter { + + @PrimaryKey + long key = 99; + + DeletedEntity6_EntityToPersist_WithDeleter embedded = + new DeletedEntity6_EntityToPersist_WithDeleter(); + } + + /** + * Disallow removing a Persistent class when no Deleter mutation is + * specified, even when the Entity class that embedded the Persistent class + * is deleted properly (by removing the Entity annotation in this case). + */ + static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase { + + private static final String NAME = + PREFIX + "DeletedPersist1_ClassRemoved"; + + private static final String NAME2 = + DeletedPersist1_ClassRemoved_NoMutation.class.getName(); + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$DeletedPersist1_ClassRemoved"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkNonEntity(true, model, env, NAME, 0); + checkEntity(true, model, env, NAME2, 0, null); + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + /** + * Allow removing a Persistent class when a Deleter mutation is + * specified, and the Entity class that embedded the Persistent class + * is also deleted properly (by removing the Entity annotation in this + * case). + */ + static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase { + + private static final String NAME = + PREFIX + "DeletedPersist2_ClassRemoved"; + private static final String NAME2 = + DeletedPersist2_ClassRemoved_WithDeleter.class.getName(); + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkNonEntity(false, model, env, NAME, 0); + checkEntity(false, model, env, NAME2, 0, null); + if (oldTypesExist) { + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) { + try { + store.getPrimaryIndex + (Integer.class, + DeletedPersist2_ClassRemoved_WithDeleter.class); + TestCase.fail(); + } catch (Exception e) { + checkEquals + ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist2_ClassRemoved_WithDeleter", + e.toString()); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + static class DeletedPersist3_AnnotRemoved { + + int f = 123; + } + + /** + * Disallow removing the Persistent annotation when no Deleter mutation is + * specified, even when the Entity class that embedded the Persistent class + * is deleted properly (by removing the Entity annotation in this case). + */ + static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase { + + private static final String NAME = + DeletedPersist3_AnnotRemoved.class.getName(); + private static final String NAME2 = + DeletedPersist3_AnnotRemoved_NoMutation.class.getName(); + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: com.sleepycat.persist.test.EvolveClasses$DeletedPersist3_AnnotRemoved"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkNonEntity(true, model, env, NAME, 0); + checkEntity(true, model, env, NAME2, 0, null); + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + static class DeletedPersist4_AnnotRemoved { + + int f = 123; + } + + /** + * Allow removing the Persistent annotation when a Deleter mutation is + * specified, and the Entity class that embedded the Persistent class + * is also deleted properly (by removing the Entity annotation in this + * case). + */ + static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase { + + private static final String NAME = + DeletedPersist4_AnnotRemoved.class.getName(); + private static final String NAME2 = + DeletedPersist4_AnnotRemoved_WithDeleter.class.getName(); + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkNonEntity(false, model, env, NAME, 0); + checkEntity(false, model, env, NAME2, 0, null); + if (oldTypesExist) { + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) { + try { + store.getPrimaryIndex + (Integer.class, + DeletedPersist4_AnnotRemoved_WithDeleter.class); + TestCase.fail(); + } catch (Exception e) { + checkEquals + ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist4_AnnotRemoved_WithDeleter", + e.toString()); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + @Entity(version=1) + static class DeletedPersist5_PersistToEntity { + + @PrimaryKey + int key = 99; + + int f = 123; + } + + /** + * Disallow changing the Entity annotation to Persistent when no Deleter + * mutation is specified, even when the Entity class that embedded the + * Persistent class is deleted properly (by removing the Entity annotation + * in this case). + */ + static class DeletedPersist5_PersistToEntity_NoMutation + extends EvolveCase { + + private static final String NAME = + DeletedPersist5_PersistToEntity.class.getName(); + private static final String NAME2 = + DeletedPersist5_PersistToEntity_NoMutation.class.getName(); + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist5_PersistToEntity version: 1 Error: @Entity switched to/from @Persistent"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkNonEntity(true, model, env, NAME, 0); + checkEntity(true, model, env, NAME2, 0, null); + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + @Entity(version=1) + static class DeletedPersist6_PersistToEntity { + + @PrimaryKey + int key = 99; + + int f = 123; + } + + /** + * Allow changing the Entity annotation to Persistent when a Deleter + * mutation is specified, and the Entity class that embedded the Persistent + * class is also deleted properly (by removing the Entity annotation in + * this case). + */ + static class DeletedPersist6_PersistToEntity_WithDeleter + extends EvolveCase { + + private static final String NAME = + DeletedPersist6_PersistToEntity.class.getName(); + private static final String NAME2 = + DeletedPersist6_PersistToEntity_WithDeleter.class.getName(); + + @Override + int getNRecordsExpected() { + return 0; + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(false, model, env, NAME2, 0, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + /* Cannot get the primary index for the former entity class. */ + try { + store.getPrimaryIndex + (Integer.class, + DeletedPersist6_PersistToEntity_WithDeleter.class); + TestCase.fail(); + } catch (Exception e) { + checkEquals + ("java.lang.IllegalArgumentException: Class could not be loaded or is not an entity class: com.sleepycat.persist.test.EvolveClasses$DeletedPersist6_PersistToEntity_WithDeleter", + e.toString()); + } + + if (newMetadataWritten) { + /* Can use the primary index of the new entity class. */ + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist6_PersistToEntity.class); + + if (doUpdate) { + DeletedPersist6_PersistToEntity obj = + new DeletedPersist6_PersistToEntity(); + index.put(obj); + obj = index.get(obj.key); + /* This new type should exist only after update. */ + Environment env = store.getEnvironment(); + EntityModel model = store.getModel(); + checkEntity(true, model, env, NAME, 1, null); + } + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeletedPersist6_PersistToEntity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((DeletedPersist6_PersistToEntity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + return; + } + + RawType embedType = store.getModel().getRawType(NAME); + checkRawType(embedType, NAME, 0); + + RawObject embed = + new RawObject(embedType, makeValues("f", 123), null); + + RawObject obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + /** + * Disallow renaming an entity class without a Renamer mutation. + */ + @Entity(version=1) + static class RenamedEntity1_NewEntityName_NoMutation + extends EvolveCase { + + private static final String NAME = + PREFIX + "RenamedEntity1_NewEntityName"; + private static final String NAME2 = + RenamedEntity1_NewEntityName_NoMutation.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName version: 0 Error: java.lang.ClassNotFoundException: com.sleepycat.persist.test.EvolveClasses$RenamedEntity1_NewEntityName"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "skey"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + /** + * Allow renaming an entity class with a Renamer mutation. + */ + @Entity(version=1) + static class RenamedEntity2_NewEntityName_WithRenamer + extends EvolveCase { + + private static final String NAME = + PREFIX + "RenamedEntity2_NewEntityName"; + private static final String NAME2 = + RenamedEntity2_NewEntityName_WithRenamer.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, NAME2)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(false, model, env, NAME, 0, null); + checkEntity(true, model, env, NAME2, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + RenamedEntity2_NewEntityName_WithRenamer.class); + RenamedEntity2_NewEntityName_WithRenamer obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.skey); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + obj = sindex.get(88); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.skey); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + RenamedEntity2_NewEntityName_WithRenamer.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((RenamedEntity2_NewEntityName_WithRenamer) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME2, 1, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + } + checkRawFields(obj, "key", 99, "skey", 88); + } + } + + @Persistent + static class DeleteSuperclass1_BaseClass + extends EvolveCase { + + int f = 123; + } + + /** + * Disallow deleting a superclass from the hierarchy when the superclass + * has persistent fields and no Deleter or Converter is specified. + */ + @Entity + static class DeleteSuperclass1_NoMutation + extends EvolveCase { + + private static final String NAME = + DeleteSuperclass1_BaseClass.class.getName(); + private static final String NAME2 = + DeleteSuperclass1_NoMutation.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_NoMutation version: 0 Error: When a superclass is removed from the class hierarchy, the superclass or all of its persistent fields must be deleted with a Deleter: com.sleepycat.persist.test.EvolveClasses$DeleteSuperclass1_BaseClass"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkNonEntity(true, model, env, NAME, 0); + checkEntity(true, model, env, NAME2, 0, null); + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + checkRawFields(obj.getSuper(), "f", 123); + checkRawFields(obj.getSuper().getSuper()); + } + } + + @Persistent + static class DeleteSuperclass2_BaseClass + extends EvolveCase { + + int f; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey; + } + + /** + * Allow deleting a superclass from the hierarchy when the superclass has + * persistent fields and a class Converter is specified. Also check that + * the secondary key field in the deleted base class is handled properly. + */ + @Entity(version=1) + static class DeleteSuperclass2_WithConverter extends EvolveCase { + + private static final String NAME = + DeleteSuperclass2_BaseClass.class.getName(); + private static final String NAME2 = + DeleteSuperclass2_WithConverter.class.getName(); + + @PrimaryKey + int key; + + int ff; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer skey2; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey3; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addConverter(new EntityConverter + (NAME2, 0, new MyConversion(), + Collections.singleton("skey"))); + return m; + } + + @SuppressWarnings("serial") + static class MyConversion implements Conversion { + + transient RawType newType; + + public void initialize(EntityModel model) { + newType = model.getRawType(NAME2); + TestCase.assertNotNull(newType); + } + + public Object convert(Object fromValue) { + TestCase.assertNotNull(newType); + RawObject obj = (RawObject) fromValue; + RawObject newSuper = obj.getSuper().getSuper(); + return new RawObject(newType, obj.getValues(), newSuper); + } + + @Override + public boolean equals(Object other) { + return other instanceof MyConversion; + } + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME2, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + checkNonEntity(true, model, env, NAME, 0); + checkVersions(model, NAME, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass2_WithConverter.class); + DeleteSuperclass2_WithConverter obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertSame + (EvolveCase.class, obj.getClass().getSuperclass()); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + TestCase.assertEquals(Integer.valueOf(77), obj.skey2); + TestCase.assertEquals(66, obj.skey3); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteSuperclass2_WithConverter.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((DeleteSuperclass2_WithConverter) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME2, 1, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0); + } + checkRawFields + (obj, "key", 99, "ff", 88, "skey2", 77, "skey3", 66); + if (expectEvolved) { + checkRawFields(obj.getSuper()); + } else { + checkRawFields(obj.getSuper(), "f", 123, "skey", 456); + checkRawFields(obj.getSuper().getSuper()); + } + Environment env = store.getEnvironment(); + assertDbExists(!expectEvolved, env, NAME2, "skey"); + assertDbExists(true, env, NAME2, "skey3"); + } + } + + static class DeleteSuperclass3_BaseClass + extends EvolveCase { + + int f; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey; + } + + /** + * Allow deleting a superclass from the hierarchy when the superclass + * has persistent fields and a class Deleter is specified. Also check that + * the secondary key field in the deleted base class is handled properly. + */ + @Entity(version=1) + static class DeleteSuperclass3_WithDeleter extends EvolveCase { + + private static final String NAME = + DeleteSuperclass3_BaseClass.class.getName(); + private static final String NAME2 = + DeleteSuperclass3_WithDeleter.class.getName(); + + @PrimaryKey + int key; + + int ff; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME2, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + checkNonEntity(false, model, env, NAME, 0); + checkVersions(model, NAME, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass3_WithDeleter.class); + DeleteSuperclass3_WithDeleter obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertSame + (EvolveCase.class, obj.getClass().getSuperclass()); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteSuperclass3_WithDeleter.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((DeleteSuperclass3_WithDeleter) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME2, 1, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0); + } + checkRawFields(obj, "key", 99, "ff", 88); + if (expectEvolved) { + checkRawFields(obj.getSuper()); + } else { + checkRawFields(obj.getSuper(), "f", 123, "skey", 456); + checkRawFields(obj.getSuper().getSuper()); + } + Environment env = store.getEnvironment(); + assertDbExists(!expectEvolved, env, NAME2, "skey"); + } + } + + @Persistent + static class DeleteSuperclass4_BaseClass + extends EvolveCase { + } + + /** + * Allow deleting a superclass from the hierarchy when the superclass + * has NO persistent fields. No mutations are needed. + */ + @Entity(version=1) + static class DeleteSuperclass4_NoFields extends EvolveCase { + + private static final String NAME = + DeleteSuperclass4_BaseClass.class.getName(); + private static final String NAME2 = + DeleteSuperclass4_NoFields.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME2, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + checkNonEntity(true, model, env, NAME, 0); + checkVersions(model, NAME, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass4_NoFields.class); + DeleteSuperclass4_NoFields obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertSame + (EvolveCase.class, obj.getClass().getSuperclass()); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteSuperclass4_NoFields.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((DeleteSuperclass4_NoFields) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME2, 1, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME2, 0, NAME, 0, CASECLS, 0); + } + checkRawFields(obj, "key", 99, "ff", 88); + checkRawFields(obj.getSuper()); + if (expectEvolved) { + TestCase.assertNull(obj.getSuper().getSuper()); + } else { + checkRawFields(obj.getSuper().getSuper()); + } + } + } + + @Persistent(version=1) + static class DeleteSuperclass5_Embedded { + + int f; + + @Override + public String toString() { + return "" + f; + } + } + + /** + * Ensure that a superclass at the top of the hierarchy can be deleted. A + * class Deleter is used. + */ + @Entity + static class DeleteSuperclass5_Top + extends EvolveCase { + + private static final String NAME = + DeleteSuperclass5_Top.class.getName(); + private static final String NAME2 = + DeleteSuperclass5_Embedded.class.getName(); + private static final String NAME3 = + PREFIX + "DeleteSuperclass5_Embedded_Base"; + + @PrimaryKey + int key = 99; + + int ff; + + DeleteSuperclass5_Embedded embed = + new DeleteSuperclass5_Embedded(); + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME3, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkNonEntity(true, model, env, NAME2, 1); + checkNonEntity(false, model, env, NAME3, 0); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + checkVersions(model, NAME3, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass5_Top.class); + DeleteSuperclass5_Top obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + TestCase.assertEquals(123, obj.embed.f); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteSuperclass5_Top.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((DeleteSuperclass5_Top) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject embedSuper = null; + if (!expectEvolved) { + RawType embedSuperType = store.getModel().getRawType(NAME3); + embedSuper = new RawObject + (embedSuperType, makeValues("g", 456), null); + } + RawObject embed = + new RawObject(embedType, makeValues("f", 123), embedSuper); + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88, "embed", embed); + } + } + + @Persistent + static class InsertSuperclass1_BaseClass + extends EvolveCase { + + int f = 123; + } + + /** + * Allow inserting a superclass between two existing classes in the + * hierarchy. No mutations are needed. + */ + @Entity(version=1) + static class InsertSuperclass1_Between + extends InsertSuperclass1_BaseClass { + + private static final String NAME = + InsertSuperclass1_BaseClass.class.getName(); + private static final String NAME2 = + InsertSuperclass1_Between.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkNonEntity(true, model, env, NAME, 0); + checkEntity(true, model, env, NAME2, 1, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + InsertSuperclass1_Between.class); + InsertSuperclass1_Between obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertSame + (InsertSuperclass1_BaseClass.class, + obj.getClass().getSuperclass()); + TestCase.assertSame + (EvolveCase.class, + obj.getClass().getSuperclass().getSuperclass()); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + TestCase.assertEquals(123, obj.f); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + InsertSuperclass1_Between.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((InsertSuperclass1_Between) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME2, 1, NAME, 0, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME2, 0, CASECLS, 0); + } + checkRawFields(obj, "key", 99, "ff", 88); + if (expectEvolved) { + if (expectUpdated) { + checkRawFields(obj.getSuper(), "f", 123); + } else { + checkRawFields(obj.getSuper()); + } + checkRawFields(obj.getSuper().getSuper()); + TestCase.assertNull(obj.getSuper().getSuper().getSuper()); + } else { + checkRawFields(obj.getSuper()); + TestCase.assertNull(obj.getSuper().getSuper()); + } + } + } + + @Persistent + static class InsertSuperclass2_Embedded_Base { + + int g = 456; + } + + @Persistent(version=1) + static class InsertSuperclass2_Embedded + extends InsertSuperclass2_Embedded_Base { + + int f; + } + + /** + * Allow inserting a superclass at the top of the hierarchy. No mutations + * are needed. + */ + @Entity + static class InsertSuperclass2_Top + extends EvolveCase { + + private static final String NAME = + InsertSuperclass2_Top.class.getName(); + private static final String NAME2 = + InsertSuperclass2_Embedded.class.getName(); + private static final String NAME3 = + InsertSuperclass2_Embedded_Base.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + InsertSuperclass2_Embedded embed = + new InsertSuperclass2_Embedded(); + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkNonEntity(true, model, env, NAME2, 1); + checkNonEntity(true, model, env, NAME3, 0); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + checkVersions(model, NAME3, 0); + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + InsertSuperclass2_Top.class); + InsertSuperclass2_Top obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.ff); + TestCase.assertEquals(123, obj.embed.f); + TestCase.assertEquals(456, obj.embed.g); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + InsertSuperclass2_Top.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((InsertSuperclass2_Top) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject embedSuper = null; + if (expectEvolved) { + RawType embedSuperType = store.getModel().getRawType(NAME3); + Map values = + expectUpdated ? makeValues("g", 456) : makeValues(); + embedSuper = new RawObject(embedSuperType, values, null); + } + RawObject embed = + new RawObject(embedType, makeValues("f", 123), embedSuper); + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88, "embed", embed); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_PrimitiveToObject + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_PrimitiveToObject.class.getName(); + + @PrimaryKey + int key = 99; + + String ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_PrimitiveToObject version: 1 Error: Old field type: int is not compatible with the new type: java.lang.String for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_ObjectToPrimitive + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_ObjectToPrimitive.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToPrimitive version: 1 Error: Old field type: java.lang.String is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", "88"); + } + } + + @Persistent + static class MyType { + + @Override + public boolean equals(Object o) { + return o instanceof MyType; + } + } + + @Persistent + static class MySubtype extends MyType { + + @Override + public boolean equals(Object o) { + return o instanceof MySubtype; + } + } + + @Entity(version=1) + static class DisallowNonKeyField_ObjectToSubtype + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_ObjectToSubtype.class.getName(); + + @PrimaryKey + int key = 99; + + MySubtype ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToSubtype version: 1 Error: Old field type: com.sleepycat.persist.test.EvolveClasses$MyType is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MySubtype for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawType embedType = store.getModel().getRawType + (MyType.class.getName()); + RawObject embed = new RawObject(embedType, makeValues(), null); + + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", embed); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_ObjectToUnrelatedSimple + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_ObjectToUnrelatedSimple.class.getName(); + + @PrimaryKey + int key = 99; + + String ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedSimple version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: java.lang.String for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_ObjectToUnrelatedOther + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_ObjectToUnrelatedOther.class.getName(); + + @PrimaryKey + int key = 99; + + MyType ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_ObjectToUnrelatedOther version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: com.sleepycat.persist.test.EvolveClasses$MyType for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_byte2boolean + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_byte2boolean.class.getName(); + + @PrimaryKey + int key = 99; + + boolean ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_byte2boolean version: 1 Error: Old field type: byte is not compatible with the new type: boolean for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (byte) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_short2byte + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_short2byte.class.getName(); + + @PrimaryKey + int key = 99; + + byte ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_short2byte version: 1 Error: Old field type: short is not compatible with the new type: byte for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (short) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_int2short + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_int2short.class.getName(); + + @PrimaryKey + int key = 99; + + short ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_int2short version: 1 Error: Old field type: int is not compatible with the new type: short for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_long2int + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_long2int.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_long2int version: 1 Error: Old field type: long is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (long) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_float2long + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_float2long.class.getName(); + + @PrimaryKey + int key = 99; + + long ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2long version: 1 Error: Old field type: float is not compatible with the new type: long for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (float) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_double2float + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_double2float.class.getName(); + + @PrimaryKey + int key = 99; + + float ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_double2float version: 1 Error: Old field type: double is not compatible with the new type: float for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (double) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Byte2byte + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Byte2byte.class.getName(); + + @PrimaryKey + int key = 99; + + byte ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Byte2byte version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: byte for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (byte) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Character2char + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Character2char.class.getName(); + + @PrimaryKey + int key = 99; + + char ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Character2char version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: char for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (char) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Short2short + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Short2short.class.getName(); + + @PrimaryKey + int key = 99; + + short ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Short2short version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: short for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (short) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Integer2int + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Integer2int.class.getName(); + + @PrimaryKey + int key = 99; + + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Integer2int version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Long2long + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Long2long.class.getName(); + + @PrimaryKey + int key = 99; + + long ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Long2long version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: long for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (long) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Float2float + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Float2float.class.getName(); + + @PrimaryKey + int key = 99; + + float ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Float2float version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: float for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (float) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_Double2double + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_Double2double.class.getName(); + + @PrimaryKey + int key = 99; + + double ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_Double2double version: 1 Error: Old field type: java.lang.Double is not compatible with the new type: double for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (double) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_float2BigInt + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_float2BigInt.class.getName(); + + @PrimaryKey + int key = 99; + + BigInteger ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_float2BigInt version: 1 Error: Old field type: float is not compatible with the new type: java.math.BigInteger for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (float) 88); + } + } + + @Entity(version=1) + static class DisallowNonKeyField_BigInt2long + extends EvolveCase { + + private static final String NAME = + DisallowNonKeyField_BigInt2long.class.getName(); + + @PrimaryKey + int key = 99; + + long ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowNonKeyField_BigInt2long version: 1 Error: Old field type: java.math.BigInteger is not compatible with the new type: long for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", BigInteger.valueOf(88)); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_byte2short + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_byte2short.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + short ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (byte) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_char2int + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_char2int.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (char) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_short2int + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_short2int.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (short) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_int2long + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_int2long.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + long ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_long2float + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_long2float.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + float ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (long) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_float2double + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_float2double.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + double ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (float) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Byte2short2 + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Byte2short2.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + short ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (byte) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Character2int + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Character2int.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (char) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Short2int2 + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Short2int2.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (short) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Integer2long + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Integer2long.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + long ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Long2float2 + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Long2float2.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + float ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (long) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_Float2double2 + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_Float2double2.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + double ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", (float) 88); + } + } + + @Entity(version=1) + static class DisallowSecKeyField_int2BigInt + extends EvolveCase { + + private static final String NAME = + DisallowSecKeyField_int2BigInt.class.getName(); + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + BigInteger ff; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowSecKeyField_int2BigInt version: 1 Error: Old field type: int is not compatible with the new type: java.math.BigInteger for field: ff"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, "ff"); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "ff", 88); + } + } + + // --- + + @Entity(version=1) + static class DisallowPriKeyField_byte2short + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_byte2short.class.getName(); + + @PrimaryKey + short key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_byte2short version: 1 Error: Old field type: byte is not compatible with the new type: short for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (byte) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_char2int + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_char2int.class.getName(); + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_char2int version: 1 Error: Old field type: char is not compatible with the new type: int for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (char) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_short2int + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_short2int.class.getName(); + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_short2int version: 1 Error: Old field type: short is not compatible with the new type: int for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (short) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_int2long + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_int2long.class.getName(); + + @PrimaryKey + long key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_int2long version: 1 Error: Old field type: int is not compatible with the new type: long for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_long2float + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_long2float.class.getName(); + + @PrimaryKey + float key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_long2float version: 1 Error: Old field type: long is not compatible with the new type: float for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (long) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_float2double + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_float2double.class.getName(); + + @PrimaryKey + double key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_float2double version: 1 Error: Old field type: float is not compatible with the new type: double for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (float) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Byte2short2 + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Byte2short2.class.getName(); + + @PrimaryKey + short key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Byte2short2 version: 1 Error: Old field type: java.lang.Byte is not compatible with the new type: short for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (byte) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Character2int + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Character2int.class.getName(); + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Character2int version: 1 Error: Old field type: java.lang.Character is not compatible with the new type: int for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (char) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (char) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Short2int2 + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Short2int2.class.getName(); + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Short2int2 version: 1 Error: Old field type: java.lang.Short is not compatible with the new type: int for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (short) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (short) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Integer2long + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Integer2long.class.getName(); + + @PrimaryKey + long key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Integer2long version: 1 Error: Old field type: java.lang.Integer is not compatible with the new type: long for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Long2float2 + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Long2float2.class.getName(); + + @PrimaryKey + float key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2float2 version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: float for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (long) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (long) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Float2double2 + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Float2double2.class.getName(); + + @PrimaryKey + double key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Float2double2 version: 1 Error: Old field type: java.lang.Float is not compatible with the new type: double for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, (float) 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", (float) 99); + } + } + + @Entity(version=1) + static class DisallowPriKeyField_Long2BigInt + extends EvolveCase { + + private static final String NAME = + DisallowPriKeyField_Long2BigInt.class.getName(); + + @PrimaryKey + BigInteger key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowPriKeyField_Long2BigInt version: 1 Error: Old field type: java.lang.Long is not compatible with the new type: java.math.BigInteger for field: key"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawObject obj = readRaw(store, 99L, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99L); + } + } + + @Persistent(version=1) + static class DisallowCompositeKeyField_byte2short_Key { + + @KeyField(1) + int f1 = 1; + + @KeyField(2) + short f2 = 2; + + @KeyField(3) + String f3 = "3"; + } + + @Entity + static class DisallowCompositeKeyField_byte2short + extends EvolveCase { + + private static final String NAME = + DisallowCompositeKeyField_byte2short.class.getName(); + private static final String NAME2 = + DisallowCompositeKeyField_byte2short_Key.class.getName(); + + @PrimaryKey + DisallowCompositeKeyField_byte2short_Key key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Type may not be changed for a primary key field or composite key class field when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowCompositeKeyField_byte2short_Key version: 1 Error: Old field type: byte is not compatible with the new type: short for field: f2"; + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + checkEntity(true, model, env, NAME, 0, null); + checkNonEntity(true, model, env, NAME2, 0); + checkVersions(model, NAME, 0); + checkVersions(model, NAME2, 0); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + if (expectEvolved) { + TestCase.fail(); + } + RawType rawKeyType = store.getModel().getRawType(NAME2); + RawObject rawKey = new RawObject + (rawKeyType, + makeValues("f1", 1, "f2", (byte) 2, "f3", "3"), + null); + + RawObject obj = readRaw(store, rawKey, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", rawKey); + } + } + + @Entity(version=1) + static class AllowPriKeyField_byte2Byte + extends EvolveCase { + + private static final String NAME = + AllowPriKeyField_byte2Byte.class.getName(); + + @PrimaryKey + Byte key = 99; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, + AllowPriKeyField_byte2Byte.class); + AllowPriKeyField_byte2Byte obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertEquals(Byte.valueOf((byte) 99), obj.key); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Byte.class, + AllowPriKeyField_byte2Byte.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99); + index.put((AllowPriKeyField_byte2Byte) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0); + } else { + obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0); + } + checkRawFields(obj, "key", (byte) 99); + } + } + + @Entity(version=1) + static class AllowPriKeyField_Byte2byte2 + extends EvolveCase { + + private static final String NAME = + AllowPriKeyField_Byte2byte2.class.getName(); + + @PrimaryKey + byte key = 99; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, + AllowPriKeyField_Byte2byte2.class); + AllowPriKeyField_Byte2byte2 obj = index.get(key); + TestCase.assertNotNull(obj); + TestCase.assertEquals((byte) 99, obj.key); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Byte.class, + AllowPriKeyField_Byte2byte2.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get((byte) 99); + index.put((AllowPriKeyField_Byte2byte2) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, (byte) 99, NAME, 1, CASECLS, 0); + } else { + obj = readRaw(store, (byte) 99, NAME, 0, CASECLS, 0); + } + checkRawFields(obj, "key", (byte) 99); + } + } + + @Persistent(version=1) + static class AllowFieldTypeChanges_Key { + + AllowFieldTypeChanges_Key() { + this(false); + } + + AllowFieldTypeChanges_Key(boolean init) { + if (init) { + f1 = true; + f2 = (byte) 2; + f3 = (short) 3; + f4 = 4; + f5 = 5L; + f6 = 6F; + f7 = 7D; + f8 = (char) 8; + f9 = true; + f10 = (byte) 10; + f11 = (short) 11; + f12 = 12; + f13 = 13L; + f14 = 14F; + f15 = 15D; + f16 = (char) 16; + } + } + + @KeyField(1) + boolean f1; + + @KeyField(2) + byte f2; + + @KeyField(3) + short f3; + + @KeyField(4) + int f4; + + @KeyField(5) + long f5; + + @KeyField(6) + float f6; + + @KeyField(7) + double f7; + + @KeyField(8) + char f8; + + @KeyField(9) + Boolean f9; + + @KeyField(10) + Byte f10; + + @KeyField(11) + Short f11; + + @KeyField(12) + Integer f12; + + @KeyField(13) + Long f13; + + @KeyField(14) + Float f14; + + @KeyField(15) + Double f15; + + @KeyField(16) + Character f16; + } + + @Persistent(version=1) + static class AllowFieldTypeChanges_Base + extends EvolveCase { + + @SecondaryKey(relate=ONE_TO_ONE) + AllowFieldTypeChanges_Key kComposite1; + + Integer f_long2Integer; + Long f_String2Long; + } + + /** + * Allow field type changes: automatic widening, supported widening, + * and Converter mutations. Also tests primary and secondary key field + * renaming. + */ + @Entity(version=1) + static class AllowFieldTypeChanges + extends AllowFieldTypeChanges_Base { + + private static final String NAME = + AllowFieldTypeChanges.class.getName(); + private static final String NAME2 = + AllowFieldTypeChanges_Base.class.getName(); + private static final String NAME3 = + AllowFieldTypeChanges_Key.class.getName(); + + @PrimaryKey + Integer pkeyInt1; + + @SecondaryKey(relate=ONE_TO_ONE) + Boolean kBoolean1; + + @SecondaryKey(relate=ONE_TO_ONE) + Byte kByte1; + + @SecondaryKey(relate=ONE_TO_ONE) + Short kShort1; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer kInt1; + + @SecondaryKey(relate=ONE_TO_ONE) + Long kLong1; + + @SecondaryKey(relate=ONE_TO_ONE) + Float kFloat1; + + @SecondaryKey(relate=ONE_TO_ONE) + Double kDouble1; + + @SecondaryKey(relate=ONE_TO_ONE) + Character kCharacter1; + + short f01; + int f02; + long f03; + float f04; + double f06; + int f07; + long f08; + float f09; + double f10; + int f11; + long f12; + float f13; + double f14; + long f15; + float f16; + double f17; + float f18; + double f19; + double f20; + + Short f21; + Integer f22; + Long f23; + Float f24; + Double f26; + Integer f27; + Long f28; + Float f29; + Double f30; + Integer f31; + Long f32; + Float f33; + Double f34; + Long f35; + Float f36; + Double f37; + Float f38; + Double f39; + Double f40; + + Short f41; + Integer f42; + Long f43; + Float f44; + Double f46; + Integer f47; + Long f48; + Float f49; + Double f50; + Integer f51; + Long f52; + Float f53; + Double f54; + Long f55; + Float f56; + Double f57; + Float f58; + Double f59; + Double f60; + + BigInteger f70; + BigInteger f71; + BigInteger f72; + BigInteger f73; + BigInteger f74; + BigInteger f75; + BigInteger f76; + BigInteger f77; + BigInteger f78; + BigInteger f79; + + int f_long2int; + long f_String2long; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, "pkeyint", "pkeyInt1")); + m.addRenamer(new Renamer(NAME, 0, "kboolean", "kBoolean1")); + m.addRenamer(new Renamer(NAME, 0, "kbyte", "kByte1")); + m.addRenamer(new Renamer(NAME, 0, "kshort", "kShort1")); + m.addRenamer(new Renamer(NAME, 0, "kint", "kInt1")); + m.addRenamer(new Renamer(NAME, 0, "klong", "kLong1")); + m.addRenamer(new Renamer(NAME, 0, "kfloat", "kFloat1")); + m.addRenamer(new Renamer(NAME, 0, "kdouble", "kDouble1")); + m.addRenamer(new Renamer(NAME, 0, "kchar", "kCharacter1")); + m.addRenamer(new Renamer(NAME2, 0, "kcomposite", "kComposite1")); + + Conversion conv1 = new MyConversion1(); + Conversion conv2 = new MyConversion2(); + + m.addConverter(new Converter(NAME, 0, "f_long2int", conv1)); + m.addConverter(new Converter(NAME, 0, "f_String2long", conv2)); + m.addConverter(new Converter(NAME2, 0, "f_long2Integer", conv1)); + m.addConverter(new Converter(NAME2, 0, "f_String2Long", conv2)); + return m; + } + + @SuppressWarnings("serial") + static class MyConversion1 implements Conversion { + + public void initialize(EntityModel model) {} + + public Object convert(Object o) { + return ((Long) o).intValue(); + } + + @Override + public boolean equals(Object other) { return true; } + } + + @SuppressWarnings("serial") + static class MyConversion2 implements Conversion { + + public void initialize(EntityModel model) {} + + public Object convert(Object o) { + return Long.valueOf((String) o); + } + + @Override + public boolean equals(Object other) { return true; } + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + checkNonEntity(true, model, env, NAME2, 1); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 1, NAME2, 0); + checkVersions(model, NAME3, 1, NAME3, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 1); + checkVersions(model, NAME3, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AllowFieldTypeChanges.class); + AllowFieldTypeChanges obj = index.get(99); + checkValues(obj); + checkSecondaries(store, index); + + if (doUpdate) { + index.put(obj); + checkSecondaries(store, index); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, AllowFieldTypeChanges.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((AllowFieldTypeChanges) + newStore.getModel().convertRawObject(raw)); + } + + private void checkSecondaries(EntityStore store, + PrimaryIndex + index) + throws DatabaseException { + + if (!newMetadataWritten) { + return; + } + checkValues(store.getSecondaryIndex + (index, Boolean.class, "kBoolean1").get(true)); + checkValues(store.getSecondaryIndex + (index, Byte.class, "kByte1").get((byte) 77)); + checkValues(store.getSecondaryIndex + (index, Short.class, "kShort1").get((short) 66)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "kInt1").get(55)); + checkValues(store.getSecondaryIndex + (index, Long.class, "kLong1").get((long) 44)); + checkValues(store.getSecondaryIndex + (index, Float.class, "kFloat1").get((float) 33)); + checkValues(store.getSecondaryIndex + (index, Double.class, "kDouble1").get((double) 22)); + checkValues(store.getSecondaryIndex + (index, Character.class, "kCharacter1").get((char) 11)); + checkValues(store.getSecondaryIndex + (index, AllowFieldTypeChanges_Key.class, "kComposite1").get + (new AllowFieldTypeChanges_Key(true))); + } + + private void checkValues(AllowFieldTypeChanges obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(obj.pkeyInt1, Integer.valueOf(99)); + TestCase.assertEquals(obj.kBoolean1, Boolean.valueOf(true)); + TestCase.assertEquals(obj.kByte1, Byte.valueOf((byte) 77)); + TestCase.assertEquals(obj.kShort1, Short.valueOf((short) 66)); + TestCase.assertEquals(obj.kInt1, Integer.valueOf(55)); + TestCase.assertEquals(obj.kLong1, Long.valueOf(44)); + TestCase.assertEquals(obj.kFloat1, Float.valueOf(33)); + TestCase.assertEquals(obj.kDouble1, Double.valueOf(22)); + TestCase.assertEquals(obj.kCharacter1, + Character.valueOf((char) 11)); + + AllowFieldTypeChanges_Key embed = obj.kComposite1; + TestCase.assertNotNull(embed); + TestCase.assertEquals(embed.f1, true); + TestCase.assertEquals(embed.f2, (byte) 2); + TestCase.assertEquals(embed.f3, (short) 3); + TestCase.assertEquals(embed.f4, 4); + TestCase.assertEquals(embed.f5, 5L); + TestCase.assertEquals(embed.f6, 6F); + TestCase.assertEquals(embed.f7, 7D); + TestCase.assertEquals(embed.f8, (char) 8); + TestCase.assertEquals(embed.f9, Boolean.valueOf(true)); + TestCase.assertEquals(embed.f10, Byte.valueOf((byte) 10)); + TestCase.assertEquals(embed.f11, Short.valueOf((short) 11)); + TestCase.assertEquals(embed.f12, Integer.valueOf(12)); + TestCase.assertEquals(embed.f13, Long.valueOf(13L)); + TestCase.assertEquals(embed.f14, Float.valueOf(14F)); + TestCase.assertEquals(embed.f15, Double.valueOf(15D)); + TestCase.assertEquals(embed.f16, Character.valueOf((char) 16)); + + TestCase.assertEquals(obj.f01, (short) 1); + TestCase.assertEquals(obj.f02, 2); + TestCase.assertEquals(obj.f03, 3); + TestCase.assertEquals(obj.f04, (float) 4); + TestCase.assertEquals(obj.f06, (double) 6); + TestCase.assertEquals(obj.f07, 7); + TestCase.assertEquals(obj.f08, 8); + TestCase.assertEquals(obj.f09, (float) 9); + TestCase.assertEquals(obj.f10, (double) 10); + TestCase.assertEquals(obj.f11, 11); + TestCase.assertEquals(obj.f12, 12); + TestCase.assertEquals(obj.f13, (float) 13); + TestCase.assertEquals(obj.f14, (double) 14); + TestCase.assertEquals(obj.f15, 15L); + TestCase.assertEquals(obj.f16, 16F); + TestCase.assertEquals(obj.f17, 17D); + TestCase.assertEquals(obj.f18, (float) 18); + TestCase.assertEquals(obj.f19, (double) 19); + TestCase.assertEquals(obj.f20, (double) 20); + + TestCase.assertEquals(obj.f21, Short.valueOf((byte) 21)); + TestCase.assertEquals(obj.f22, Integer.valueOf((byte) 22)); + TestCase.assertEquals(obj.f23, Long.valueOf((byte) 23)); + TestCase.assertEquals(obj.f24, Float.valueOf((byte) 24)); + TestCase.assertEquals(obj.f26, Double.valueOf((byte) 26)); + TestCase.assertEquals(obj.f27, Integer.valueOf((short) 27)); + TestCase.assertEquals(obj.f28, Long.valueOf((short) 28)); + TestCase.assertEquals(obj.f29, Float.valueOf((short) 29)); + TestCase.assertEquals(obj.f30, Double.valueOf((short) 30)); + TestCase.assertEquals(obj.f31, Integer.valueOf((char) 31)); + TestCase.assertEquals(obj.f32, Long.valueOf((char) 32)); + TestCase.assertEquals(obj.f33, Float.valueOf((char) 33)); + TestCase.assertEquals(obj.f34, Double.valueOf((char) 34)); + TestCase.assertEquals(obj.f35, Long.valueOf(35)); + TestCase.assertEquals(obj.f36, Float.valueOf(36)); + TestCase.assertEquals(obj.f37, Double.valueOf(37)); + TestCase.assertEquals(obj.f38, Float.valueOf(38)); + TestCase.assertEquals(obj.f39, Double.valueOf(39)); + TestCase.assertEquals(obj.f40, Double.valueOf(40)); + + TestCase.assertEquals(obj.f41, Short.valueOf((byte) 41)); + TestCase.assertEquals(obj.f42, Integer.valueOf((byte) 42)); + TestCase.assertEquals(obj.f43, Long.valueOf((byte) 43)); + TestCase.assertEquals(obj.f44, Float.valueOf((byte) 44)); + TestCase.assertEquals(obj.f46, Double.valueOf((byte) 46)); + TestCase.assertEquals(obj.f47, Integer.valueOf((short) 47)); + TestCase.assertEquals(obj.f48, Long.valueOf((short) 48)); + TestCase.assertEquals(obj.f49, Float.valueOf((short) 49)); + TestCase.assertEquals(obj.f50, Double.valueOf((short) 50)); + TestCase.assertEquals(obj.f51, Integer.valueOf((char) 51)); + TestCase.assertEquals(obj.f52, Long.valueOf((char) 52)); + TestCase.assertEquals(obj.f53, Float.valueOf((char) 53)); + TestCase.assertEquals(obj.f54, Double.valueOf((char) 54)); + TestCase.assertEquals(obj.f55, Long.valueOf(55)); + TestCase.assertEquals(obj.f56, Float.valueOf(56)); + TestCase.assertEquals(obj.f57, Double.valueOf(57)); + TestCase.assertEquals(obj.f58, Float.valueOf(58)); + TestCase.assertEquals(obj.f59, Double.valueOf(59)); + TestCase.assertEquals(obj.f60, Double.valueOf(60)); + + TestCase.assertEquals(obj.f70, BigInteger.valueOf(70)); + TestCase.assertEquals(obj.f71, BigInteger.valueOf(71)); + TestCase.assertEquals(obj.f72, BigInteger.valueOf(72)); + TestCase.assertEquals(obj.f73, BigInteger.valueOf(73)); + TestCase.assertEquals(obj.f74, BigInteger.valueOf(74)); + TestCase.assertEquals(obj.f75, BigInteger.valueOf(75)); + TestCase.assertEquals(obj.f76, BigInteger.valueOf(76)); + TestCase.assertEquals(obj.f77, BigInteger.valueOf(77)); + TestCase.assertEquals(obj.f78, BigInteger.valueOf(78)); + TestCase.assertEquals(obj.f79, BigInteger.valueOf(79)); + + TestCase.assertEquals(obj.f_long2Integer, Integer.valueOf(111)); + TestCase.assertEquals(obj.f_String2Long, Long.valueOf(222)); + TestCase.assertEquals(obj.f_long2int, 333); + TestCase.assertEquals(obj.f_String2long, 444L); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME3); + RawObject embed = new RawObject + (embedType, + makeValues + ("f1", true, + "f2", (byte) 2, + "f3", (short) 3, + "f4", 4, + "f5", 5L, + "f6", 6F, + "f7", 7D, + "f8", (char) 8, + "f9", true, + "f10", (byte) 10, + "f11", (short) 11, + "f12", 12, + "f13", 13L, + "f14", 14F, + "f15", 15D, + "f16", (char) 16), + null); + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME, 1, NAME2, 1, CASECLS, 0); + checkRawFields(obj, "pkeyInt1", 99, + "kBoolean1", true, + "kByte1", (byte) 77, + "kShort1", (short) 66, + "kInt1", 55, + "kLong1", (long) 44, + "kFloat1", (float) 33, + "kDouble1", (double) 22, + "kCharacter1", (char) 11, + + "f01", (short) 1, + "f02", 2, + "f03", (long) 3, + "f04", (float) 4, + "f06", (double) 6, + "f07", 7, + "f08", (long) 8, + "f09", (float) 9, + "f10", (double) 10, + "f11", 11, + "f12", (long) 12, + "f13", (float) 13, + "f14", (double) 14, + "f15", 15L, + "f16", 16F, + "f17", 17D, + "f18", (float) 18, + "f19", (double) 19, + "f20", (double) 20, + + "f21", (short) 21, + "f22", 22, + "f23", (long) 23, + "f24", (float) 24, + "f26", (double) 26, + "f27", 27, + "f28", (long) 28, + "f29", (float) 29, + "f30", (double) 30, + "f31", 31, + "f32", (long) 32, + "f33", (float) 33, + "f34", (double) 34, + "f35", 35L, + "f36", 36F, + "f37", 37D, + "f38", (float) 38, + "f39", (double) 39, + "f40", (double) 40, + + "f41", (short) 41, + "f42", 42, + "f43", (long) 43, + "f44", (float) 44, + "f46", (double) 46, + "f47", 47, + "f48", (long) 48, + "f49", (float) 49, + "f50", (double) 50, + "f51", 51, + "f52", (long) 52, + "f53", (float) 53, + "f54", (double) 54, + "f55", 55L, + "f56", 56F, + "f57", 57D, + "f58", (float) 58, + "f59", (double) 59, + "f60", (double) 60, + + "f70", BigInteger.valueOf(70), + "f71", BigInteger.valueOf(71), + "f72", BigInteger.valueOf(72), + "f73", BigInteger.valueOf(73), + "f74", BigInteger.valueOf(74), + "f75", BigInteger.valueOf(75), + "f76", BigInteger.valueOf(76), + "f77", BigInteger.valueOf(77), + "f78", BigInteger.valueOf(78), + "f79", BigInteger.valueOf(79), + + "f_long2int", 333, + "f_String2long", 444L); + checkRawFields(obj.getSuper(), + "kComposite1", embed, + "f_long2Integer", 111, + "f_String2Long", 222L); + } else { + obj = readRaw(store, 99, NAME, 0, NAME2, 0, CASECLS, 0); + checkRawFields(obj, "pkeyint", 99, + "kboolean", true, + "kbyte", (byte) 77, + "kshort", (short) 66, + "kint", 55, + "klong", (long) 44, + "kfloat", (float) 33, + "kdouble", (double) 22, + "kchar", (char) 11, + + "f01", (byte) 1, + "f02", (byte) 2, + "f03", (byte) 3, + "f04", (byte) 4, + "f06", (byte) 6, + "f07", (short) 7, + "f08", (short) 8, + "f09", (short) 9, + "f10", (short) 10, + "f11", (char) 11, + "f12", (char) 12, + "f13", (char) 13, + "f14", (char) 14, + "f15", 15, + "f16", 16, + "f17", 17, + "f18", (long) 18, + "f19", (long) 19, + "f20", (float) 20, + + "f21", (byte) 21, + "f22", (byte) 22, + "f23", (byte) 23, + "f24", (byte) 24, + "f26", (byte) 26, + "f27", (short) 27, + "f28", (short) 28, + "f29", (short) 29, + "f30", (short) 30, + "f31", (char) 31, + "f32", (char) 32, + "f33", (char) 33, + "f34", (char) 34, + "f35", 35, + "f36", 36, + "f37", 37, + "f38", (long) 38, + "f39", (long) 39, + "f40", (float) 40, + + "f41", (byte) 41, + "f42", (byte) 42, + "f43", (byte) 43, + "f44", (byte) 44, + "f46", (byte) 46, + "f47", (short) 47, + "f48", (short) 48, + "f49", (short) 49, + "f50", (short) 50, + "f51", (char) 51, + "f52", (char) 52, + "f53", (char) 53, + "f54", (char) 54, + "f55", 55, + "f56", 56, + "f57", 57, + "f58", (long) 58, + "f59", (long) 59, + "f60", (float) 60, + + "f70", (byte) 70, + "f71", (short) 71, + "f72", (char) 72, + "f73", 73, + "f74", (long) 74, + "f75", (byte) 75, + "f76", (short) 76, + "f77", (char) 77, + "f78", 78, + "f79", (long) 79, + + "f_long2int", 333L, + "f_String2long", "444"); + + checkRawFields(obj.getSuper(), + "kcomposite", embed, + "f_long2Integer", 111L, + "f_String2Long", "222"); + } + Environment env = store.getEnvironment(); + + assertDbExists(expectEvolved, env, NAME, "kBoolean1"); + assertDbExists(expectEvolved, env, NAME, "kByte1"); + assertDbExists(expectEvolved, env, NAME, "kShort1"); + assertDbExists(expectEvolved, env, NAME, "kInt1"); + assertDbExists(expectEvolved, env, NAME, "kLong1"); + assertDbExists(expectEvolved, env, NAME, "kFloat1"); + assertDbExists(expectEvolved, env, NAME, "kDouble1"); + assertDbExists(expectEvolved, env, NAME, "kCharacter1"); + assertDbExists(expectEvolved, env, NAME, "kComposite1"); + + assertDbExists(!expectEvolved, env, NAME, "kboolean"); + assertDbExists(!expectEvolved, env, NAME, "kbyte"); + assertDbExists(!expectEvolved, env, NAME, "kshort"); + assertDbExists(!expectEvolved, env, NAME, "kint"); + assertDbExists(!expectEvolved, env, NAME, "klong"); + assertDbExists(!expectEvolved, env, NAME, "kfloat"); + assertDbExists(!expectEvolved, env, NAME, "kdouble"); + assertDbExists(!expectEvolved, env, NAME, "kchar"); + assertDbExists(!expectEvolved, env, NAME, "kcomposite"); + } + } + + @SuppressWarnings("serial") + static class ConvertFieldContent_Conversion implements Conversion { + + public void initialize(EntityModel model) { + } + + public Object convert(Object fromValue) { + String s1 = (String) fromValue; + return (new StringBuilder(s1)).reverse().toString(); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertFieldContent_Conversion; + } + } + + @Entity(version=1) + static class ConvertFieldContent_Entity + extends EvolveCase { + + private static final String NAME = + ConvertFieldContent_Entity.class.getName(); + + @PrimaryKey + int key = 99; + + String f1; + String f2; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertFieldContent_Entity.class.getName(), 0, + "f1", new ConvertFieldContent_Conversion()); + m.addConverter(converter); + converter = new Converter + (ConvertFieldContent_Entity.class.getName(), 0, + "f2", new ConvertFieldContent_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertFieldContent_Entity.class); + ConvertFieldContent_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals("43210", obj.f1); + TestCase.assertEquals("98765", obj.f2); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertFieldContent_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertFieldContent_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = + readRaw(store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectEvolved) { + checkRawFields(obj, "key", 99, + "f1", "43210", + "f2", "98765"); + } else { + checkRawFields(obj, "key", 99, + "f1", "01234", + "f2", "56789"); + } + } + } + + @Persistent(version=1) + static class ConvertExample1_Address { + String street; + String city; + String state; + int zipCode; + } + + @SuppressWarnings("serial") + static class ConvertExample1_Conversion implements Conversion { + + public void initialize(EntityModel model) { + } + + public Object convert(Object fromValue) { + return Integer.valueOf((String) fromValue); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertExample1_Conversion; + } + } + + @Entity + static class ConvertExample1_Entity + extends EvolveCase { + + private static final String NAME = + ConvertExample1_Entity.class.getName(); + private static final String NAME2 = + ConvertExample1_Address.class.getName(); + + @PrimaryKey + int key = 99; + + ConvertExample1_Address embed; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample1_Address.class.getName(), 0, + "zipCode", new ConvertExample1_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample1_Entity.class); + ConvertExample1_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals("street", obj.embed.street); + TestCase.assertEquals("city", obj.embed.city); + TestCase.assertEquals("state", obj.embed.state); + TestCase.assertEquals(12345, obj.embed.zipCode); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample1_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample1_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject embed; + if (expectEvolved) { + embed = new RawObject + (embedType, + makeValues("street", "street", + "city", "city", + "state", "state", + "zipCode", 12345), + null); + } else { + embed = new RawObject + (embedType, + makeValues("street", "street", + "city", "city", + "state", "state", + "zipCode", "12345"), + null); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + @Persistent + static class ConvertExample2_Address { + String street; + String city; + String state; + int zipCode; + } + + @Entity(version=1) + static class ConvertExample2_Person + extends EvolveCase { + + private static final String NAME = + ConvertExample2_Person.class.getName(); + private static final String NAME2 = + ConvertExample2_Address .class.getName(); + + @PrimaryKey + int key; + + ConvertExample2_Address address; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample2_Person.class.getName(), 0, + "address", new ConvertExample2_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + checkVersions(model, NAME2, 0); + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample2_Person.class); + ConvertExample2_Person obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.address); + TestCase.assertEquals("street", obj.address.street); + TestCase.assertEquals("city", obj.address.city); + TestCase.assertEquals("state", obj.address.state); + TestCase.assertEquals(12345, obj.address.zipCode); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample2_Person.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample2_Person) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + Object embed; + if (expectEvolved) { + RawType embedType = store.getModel().getRawType(NAME2); + embed = new RawObject + (embedType, + makeValues("street", "street", + "city", "city", + "state", "state", + "zipCode", 12345), + null); + } else { + embed = "street#city#state#12345"; + } + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "address", embed); + } + } + + @SuppressWarnings("serial") + static class ConvertExample2_Conversion implements Conversion { + private transient RawType addressType; + + public void initialize(EntityModel model) { + addressType = model.getRawType + (ConvertExample2_Address.class.getName()); + } + + public Object convert(Object fromValue) { + + String oldAddress = (String) fromValue; + Map addressValues = new HashMap(); + addressValues.put("street", parseAddress(1, oldAddress)); + addressValues.put("city", parseAddress(2, oldAddress)); + addressValues.put("state", parseAddress(3, oldAddress)); + addressValues.put("zipCode", + Integer.valueOf(parseAddress(4, oldAddress))); + + return new RawObject(addressType, addressValues, null); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertExample2_Conversion; + } + + private String parseAddress(int fieldNum, String oldAddress) { + StringTokenizer tokens = new StringTokenizer(oldAddress, "#"); + String field = null; + for (int i = 0; i < fieldNum; i += 1) { + field = tokens.nextToken(); + } + return field; + } + } + + @Persistent + static class ConvertExample3_Address { + String street; + String city; + String state; + int zipCode; + } + + @SuppressWarnings("serial") + static class ConvertExample3_Conversion implements Conversion { + private transient RawType newPersonType; + private transient RawType addressType; + + public void initialize(EntityModel model) { + newPersonType = model.getRawType + (ConvertExample3_Person.class.getName()); + addressType = model.getRawType + (ConvertExample3_Address.class.getName()); + } + + public Object convert(Object fromValue) { + + RawObject person = (RawObject) fromValue; + Map personValues = person.getValues(); + Map addressValues = new HashMap(); + RawObject address = new RawObject + (addressType, addressValues, null); + + addressValues.put("street", personValues.remove("street")); + addressValues.put("city", personValues.remove("city")); + addressValues.put("state", personValues.remove("state")); + addressValues.put("zipCode", personValues.remove("zipCode")); + personValues.put("address", address); + + return new RawObject + (newPersonType, personValues, person.getSuper()); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertExample3_Conversion; + } + } + + @Entity(version=1) + static class ConvertExample3_Person + extends EvolveCase { + + private static final String NAME = + ConvertExample3_Person.class.getName(); + private static final String NAME2 = + ConvertExample3_Address .class.getName(); + + @PrimaryKey + int key; + + ConvertExample3_Address address; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample3_Person.class.getName(), 0, + new ConvertExample3_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + checkVersions(model, NAME2, 0); + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample3_Person.class); + ConvertExample3_Person obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.address); + TestCase.assertEquals("street", obj.address.street); + TestCase.assertEquals("city", obj.address.city); + TestCase.assertEquals("state", obj.address.state); + TestCase.assertEquals(12345, obj.address.zipCode); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample3_Person.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample3_Person) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectEvolved) { + RawType embedType = store.getModel().getRawType(NAME2); + Object embed = new RawObject + (embedType, + makeValues("street", "street", + "city", "city", + "state", "state", + "zipCode", 12345), + null); + checkRawFields(obj, "key", 99, "address", embed); + } else { + checkRawFields(obj, "key", 99, + "street", "street", + "city", "city", + "state", "state", + "zipCode", 12345); + } + } + } + + @SuppressWarnings("serial") + static class ConvertExample3Reverse_Conversion implements Conversion { + private transient RawType newPersonType; + + public void initialize(EntityModel model) { + newPersonType = model.getRawType + (ConvertExample3Reverse_Person.class.getName()); + } + + public Object convert(Object fromValue) { + + RawObject person = (RawObject) fromValue; + Map personValues = person.getValues(); + RawObject address = (RawObject) personValues.remove("address"); + Map addressValues = address.getValues(); + + personValues.put("street", addressValues.remove("street")); + personValues.put("city", addressValues.remove("city")); + personValues.put("state", addressValues.remove("state")); + personValues.put("zipCode", addressValues.remove("zipCode")); + + return new RawObject + (newPersonType, personValues, person.getSuper()); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertExample3Reverse_Conversion; + } + } + + @Entity(version=1) + static class ConvertExample3Reverse_Person + extends EvolveCase { + + private static final String NAME = + ConvertExample3Reverse_Person.class.getName(); + private static final String NAME2 = + PREFIX + "ConvertExample3Reverse_Address"; + + @PrimaryKey + int key; + + String street; + String city; + String state; + int zipCode; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample3Reverse_Person.class.getName(), 0, + new ConvertExample3Reverse_Conversion()); + m.addConverter(converter); + m.addDeleter(new Deleter(NAME2, 0)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample3Reverse_Person.class); + ConvertExample3Reverse_Person obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals("street", obj.street); + TestCase.assertEquals("city", obj.city); + TestCase.assertEquals("state", obj.state); + TestCase.assertEquals(12345, obj.zipCode); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample3Reverse_Person.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample3Reverse_Person) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectEvolved) { + checkRawFields(obj, "key", 99, + "street", "street", + "city", "city", + "state", "state", + "zipCode", 12345); + } else { + RawType embedType = store.getModel().getRawType(NAME2); + Object embed = new RawObject + (embedType, + makeValues("street", "street", + "city", "city", + "state", "state", + "zipCode", 12345), + null); + checkRawFields(obj, "key", 99, "address", embed); + } + } + } + + @Persistent(version=1) + static class ConvertExample4_A extends ConvertExample4_B { + } + + @Persistent(version=1) + static class ConvertExample4_B { + String name; + } + + @SuppressWarnings("serial") + static class Example4_Conversion implements Conversion { + private transient RawType newAType; + private transient RawType newBType; + + public void initialize(EntityModel model) { + newAType = model.getRawType(ConvertExample4_A.class.getName()); + newBType = model.getRawType(ConvertExample4_B.class.getName()); + } + + public Object convert(Object fromValue) { + RawObject oldA = (RawObject) fromValue; + RawObject oldB = oldA.getSuper(); + Map aValues = oldA.getValues(); + Map bValues = oldB.getValues(); + bValues.put("name", aValues.remove("name")); + RawObject newB = new RawObject(newBType, bValues, oldB.getSuper()); + RawObject newA = new RawObject(newAType, aValues, newB); + return newA; + } + + @Override + public boolean equals(Object o) { + return o instanceof Example4_Conversion; + } + } + + @Entity(version=1) + static class ConvertExample4_Entity + extends EvolveCase { + + private static final String NAME = + ConvertExample4_Entity.class.getName(); + private static final String NAME2 = + ConvertExample4_A .class.getName(); + private static final String NAME3 = + ConvertExample4_B .class.getName(); + + @PrimaryKey + int key; + + ConvertExample4_A embed; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample4_A.class.getName(), 0, + new Example4_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 1, NAME2, 0); + checkVersions(model, NAME3, 1, NAME3, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 1); + checkVersions(model, NAME3, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample4_Entity.class); + ConvertExample4_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals("name", obj.embed.name); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample4_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample4_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedTypeA = store.getModel().getRawType(NAME2); + RawType embedTypeB = store.getModel().getRawType(NAME3); + Object embed; + if (expectEvolved) { + embed = new RawObject(embedTypeA, makeValues(), + new RawObject + (embedTypeB, makeValues("name", "name"), null)); + } else { + embed = new RawObject(embedTypeA, makeValues("name", "name"), + new RawObject + (embedTypeB, makeValues(), null)); + } + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + @Persistent(version=1) + static class ConvertExample5_Pet { + String name; + } + + @Persistent + static class ConvertExample5_Cat extends ConvertExample5_Pet { + int finickyLevel; + } + + @Persistent + static class ConvertExample5_Dog extends ConvertExample5_Pet { + double barkVolume; + } + + @SuppressWarnings("serial") + static class ConvertExample5_Conversion implements Conversion { + private transient RawType newPetType; + private transient RawType dogType; + private transient RawType catType; + + public void initialize(EntityModel model) { + newPetType = model.getRawType(ConvertExample5_Pet.class.getName()); + dogType = model.getRawType(ConvertExample5_Dog.class.getName()); + catType = model.getRawType(ConvertExample5_Cat.class.getName()); + } + + public Object convert(Object fromValue) { + RawObject pet = (RawObject) fromValue; + Map petValues = pet.getValues(); + Map subTypeValues = new HashMap(); + Boolean isCat = (Boolean) petValues.remove("isCatNotDog"); + Integer finickyLevel = (Integer) petValues.remove("finickyLevel"); + Double barkVolume = (Double) petValues.remove("barkVolume"); + RawType newSubType; + if (isCat) { + newSubType = catType; + subTypeValues.put("finickyLevel", finickyLevel); + } else { + newSubType = dogType; + subTypeValues.put("barkVolume", barkVolume); + } + RawObject newPet = new RawObject + (newPetType, petValues, pet.getSuper()); + return new RawObject(newSubType, subTypeValues, newPet); + } + + @Override + public boolean equals(Object o) { + return o instanceof ConvertExample5_Conversion; + } + } + + @Entity(version=1) + static class ConvertExample5_Entity + extends EvolveCase { + + private static final String NAME = + ConvertExample5_Entity.class.getName(); + private static final String NAME2 = + ConvertExample5_Pet.class.getName(); + private static final String NAME3 = + ConvertExample5_Cat.class.getName(); + private static final String NAME4 = + ConvertExample5_Dog.class.getName(); + + @PrimaryKey + int key; + + ConvertExample5_Cat cat; + ConvertExample5_Dog dog; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = new Converter + (ConvertExample5_Pet.class.getName(), 0, + new ConvertExample5_Conversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 1); + } + checkVersions(model, NAME3, 0); + checkVersions(model, NAME4, 0); + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ConvertExample5_Entity.class); + ConvertExample5_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.cat); + TestCase.assertEquals("Jeffry", obj.cat.name); + TestCase.assertEquals(999, obj.cat.finickyLevel); + TestCase.assertNotNull(obj.dog); + TestCase.assertEquals("Nelson", obj.dog.name); + TestCase.assertEquals(0.01, obj.dog.barkVolume); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ConvertExample5_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ConvertExample5_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType petType = store.getModel().getRawType(NAME2); + RawObject cat; + RawObject dog; + if (expectEvolved) { + RawType catType = store.getModel().getRawType(NAME3); + RawType dogType = store.getModel().getRawType(NAME4); + cat = new RawObject(catType, makeValues("finickyLevel", 999), + new RawObject(petType, makeValues("name", "Jeffry"), + null)); + dog = new RawObject(dogType, makeValues("barkVolume", 0.01), + new RawObject(petType, makeValues("name", "Nelson"), + null)); + } else { + cat = new RawObject(petType, makeValues("name", "Jeffry", + "isCatNotDog", true, + "finickyLevel", 999, + "barkVolume", 0.0), + null); + dog = new RawObject(petType, makeValues("name", "Nelson", + "isCatNotDog", false, + "finickyLevel", 0, + "barkVolume", 0.01), + null); + } + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "cat", cat, "dog", dog); + } + } + + @Persistent(version=1) + static class AllowFieldAddDelete_Embed { + private final String f0 = "0"; + private String f2; + private final int f3 = 3; + private String f4; + private final int f5 = 5; + private final String f8 = "8"; + private final int f9 = 9; + } + + @Persistent(version=1) + static class AllowFieldAddDelete_Base + extends EvolveCase { + + private final String f0 = "0"; + private String f2; + private final int f3 = 3; + private String f4; + private final int f5 = 5; + private final String f8 = "8"; + private final int f9 = 9; + } + + @Entity(version=1) + static class AllowFieldAddDelete + extends AllowFieldAddDelete_Base { + + private static final String NAME = + AllowFieldAddDelete.class.getName(); + private static final String NAME2 = + AllowFieldAddDelete_Base.class.getName(); + private static final String NAME3 = + AllowFieldAddDelete_Embed.class.getName(); + + @PrimaryKey + int key; + + AllowFieldAddDelete_Embed embed; + + private final String f0 = "0"; + private String f2; + private final int f3 = 3; + private String f4; + private final int f5 = 5; + private final String f8 = "8"; + private final int f9 = 9; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + for (String name : new String[] {NAME, NAME2, NAME3}) { + m.addDeleter(new Deleter(name, 0, "f1")); + m.addDeleter(new Deleter(name, 0, "f6")); + m.addDeleter(new Deleter(name, 0, "f7")); + } + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 1, NAME2, 0); + checkVersions(model, NAME3, 1, NAME3, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 1); + checkVersions(model, NAME3, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + AllowFieldAddDelete.class); + AllowFieldAddDelete obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + { + AllowFieldAddDelete o = obj; + + TestCase.assertNotNull(o); + TestCase.assertEquals("0", o.f0); + TestCase.assertEquals("2", o.f2); + TestCase.assertEquals(3, o.f3); + TestCase.assertEquals("4", o.f4); + TestCase.assertEquals(5, o.f5); + TestCase.assertEquals("8", o.f8); + TestCase.assertEquals(9, o.f9); + } + { + AllowFieldAddDelete_Base o = obj; + + TestCase.assertNotNull(o); + TestCase.assertEquals("0", o.f0); + TestCase.assertEquals("2", o.f2); + TestCase.assertEquals(3, o.f3); + TestCase.assertEquals("4", o.f4); + TestCase.assertEquals(5, o.f5); + TestCase.assertEquals("8", o.f8); + TestCase.assertEquals(9, o.f9); + } + { + AllowFieldAddDelete_Embed o = obj.embed; + + TestCase.assertNotNull(o); + TestCase.assertEquals("0", o.f0); + TestCase.assertEquals("2", o.f2); + TestCase.assertEquals(3, o.f3); + TestCase.assertEquals("4", o.f4); + TestCase.assertEquals(5, o.f5); + TestCase.assertEquals("8", o.f8); + TestCase.assertEquals(9, o.f9); + } + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + AllowFieldAddDelete.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((AllowFieldAddDelete) + newStore.getModel().convertRawObject(raw)); + } + + static final Object[] fixedFields0 = { + "f1", 1, + "f2", "2", + "f4", "4", + "f6", 6, + "f7", "7", + }; + + static final Object[] fixedFields1 = { + "f2", "2", + "f4", "4", + }; + + static final Object[] fixedFields2 = { + "f0", "0", + "f2", "2", + "f3", 3, + "f4", "4", + "f5", 5, + "f8", "8", + "f9", 9, + }; + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType baseType = store.getModel().getRawType(NAME2); + RawType embedType = store.getModel().getRawType(NAME3); + + Object[] ff; + if (expectEvolved) { + if (expectUpdated) { + ff = fixedFields2; + } else { + ff = fixedFields1; + } + } else { + ff = fixedFields0; + } + RawObject embed = new RawObject(embedType, makeValues(ff), null); + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, + NAME2, expectEvolved ? 1 : 0, + CASECLS, 0); + checkRaw(obj, ff, "key", 99, "embed", embed); + checkRaw(obj.getSuper(), ff); + } + + private void checkRaw(RawObject obj, + Object[] fixedFields, + Object... otherFields) { + Object[] allFields = + new Object[otherFields.length + fixedFields.length]; + System.arraycopy(otherFields, 0, allFields, 0, otherFields.length); + System.arraycopy(fixedFields, 0, allFields, + otherFields.length, fixedFields.length); + checkRawFields(obj, allFields); + } + } + + static class ProxiedClass { + int data; + + ProxiedClass(int data) { + this.data = data; + } + } + + @Persistent(version=1, proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy implements PersistentProxy { + long data; + + public void initializeProxy(ProxiedClass o) { + data = o.data; + } + + public ProxiedClass convertProxy() { + return new ProxiedClass((int) data); + } + } + + @Entity + static class ProxiedClass_Entity + extends EvolveCase { + + private static final String NAME = + ProxiedClass_Entity.class.getName(); + private static final String NAME2 = + ProxiedClass_Proxy.class.getName(); + + @PrimaryKey + int key; + + ProxiedClass embed; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy.class); + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ProxiedClass_Entity.class); + ProxiedClass_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(88, obj.embed.data); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ProxiedClass_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ProxiedClass_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject embed; + if (expectEvolved) { + embed = new RawObject + (embedType, makeValues("data", 88L), null); + } else { + embed = new RawObject + (embedType, makeValues("data", 88), null); + } + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed); + } + } + + @Persistent(proxyFor=StringBuilder.class) + static class DisallowChangeProxyFor_Proxy2 + implements PersistentProxy { + + String data; + + public void initializeProxy(StringBuilder o) { + data = o.toString(); + } + + public StringBuilder convertProxy() { + return new StringBuilder(data); + } + } + + @Persistent(proxyFor=StringBuilder.class) + static class DisallowChangeProxyFor_Proxy + implements PersistentProxy { + + String data; + + public void initializeProxy(StringBuilder o) { + data = o.toString(); + } + + public StringBuilder convertProxy() { + return new StringBuilder(data); + } + } + + @Entity + static class DisallowChangeProxyFor + extends EvolveCase { + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Error when evolving class: java.lang.StringBuilder version: 0 to class: java.lang.StringBuilder version: 0 Error: The proxy class for this type has been changed from: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy to: com.sleepycat.persist.test.EvolveClasses$DisallowChangeProxyFor_Proxy2"; + } + + @Override + void configure(EntityModel model, StoreConfig config) { + //model.registerClass(DisallowChangeProxyFor_Proxy.class); + model.registerClass(DisallowChangeProxyFor_Proxy2.class); + } + } + + @Persistent + static class DisallowDeleteProxyFor_Proxy { + String data; + } + + @Entity + static class DisallowDeleteProxyFor + extends EvolveCase { + + @PrimaryKey + int key; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Mutation is missing to evolve class: java.lang.StringBuilder version: 0 Error: java.lang.IllegalArgumentException: Class could not be loaded or is not persistent: java.lang.StringBuilder"; + } + } + + @Persistent(version=1) + static class ArrayNameChange_Component_Renamed { + + long data; + } + + @Entity + static class ArrayNameChange_Entity + extends EvolveCase { + + private static final String NAME = + ArrayNameChange_Entity.class.getName(); + private static final String NAME2 = + ArrayNameChange_Component_Renamed.class.getName(); + private static final String NAME3 = + PREFIX + "ArrayNameChange_Component"; + + @PrimaryKey + int key; + + ArrayNameChange_Component_Renamed[] embed; + ArrayNameChange_Component_Renamed embed2; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME3, 0, NAME2)); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME3, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + ArrayNameChange_Entity.class); + ArrayNameChange_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(1, obj.embed.length); + TestCase.assertEquals(88L, obj.embed[0].data); + TestCase.assertSame(obj.embed2, obj.embed[0]); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + ArrayNameChange_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((ArrayNameChange_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + String compTypeName = expectEvolved ? NAME2 : NAME3; + String arrayTypeName = "[L" + compTypeName + ';'; + RawType compType = store.getModel().getRawType(compTypeName); + RawType arrayType = store.getModel().getRawType(arrayTypeName); + RawObject embed2; + if (expectEvolved) { + embed2 = new RawObject + (compType, makeValues("data", 88L), null); + } else { + embed2 = new RawObject + (compType, makeValues("data", 88), null); + } + RawObject embed = new RawObject + (arrayType, new Object[] { embed2 }); + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 99, "embed", embed, "embed2", embed2); + } + } + + enum AddEnumConstant_Enum { + A, B, C; + } + + @Entity(version=1) + static class AddEnumConstant_Entity + extends EvolveCase { + + private static final String NAME = + AddEnumConstant_Entity.class.getName(); + private static final String NAME2 = + AddEnumConstant_Enum.class.getName(); + + @PrimaryKey + int key; + + AddEnumConstant_Enum e1; + AddEnumConstant_Enum e2; + AddEnumConstant_Enum e3 = AddEnumConstant_Enum.C; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + AddEnumConstant_Entity.class); + AddEnumConstant_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertSame(AddEnumConstant_Enum.A, obj.e1); + TestCase.assertSame(AddEnumConstant_Enum.B, obj.e2); + TestCase.assertSame(AddEnumConstant_Enum.C, obj.e3); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + AddEnumConstant_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((AddEnumConstant_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + RawType enumType = store.getModel().getRawType(NAME2); + if (expectUpdated) { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B"), + "e3", new RawObject(enumType, "C")); + } else { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B")); + } + } + } + + enum InsertEnumConstant_Enum { + X, A, Y, B, Z; + } + + @Persistent + static class InsertEnumConstant_KeyClass + implements Comparable { + + @KeyField(1) + InsertEnumConstant_Enum key; + + private InsertEnumConstant_KeyClass() {} + + InsertEnumConstant_KeyClass(InsertEnumConstant_Enum key) { + this.key = key; + } + + public int compareTo(InsertEnumConstant_KeyClass o) { + /* Use the natural order, in spite of insertions. */ + return key.compareTo(o.key); + } + } + + @Entity(version=1) + static class InsertEnumConstant_Entity + extends EvolveCase { + + private static final String NAME = + InsertEnumConstant_Entity.class.getName(); + private static final String NAME2 = + InsertEnumConstant_Enum.class.getName(); + private static final String NAME3 = + InsertEnumConstant_KeyClass.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + InsertEnumConstant_KeyClass secKey; + + InsertEnumConstant_Enum e1; + InsertEnumConstant_Enum e2; + InsertEnumConstant_Enum e3 = InsertEnumConstant_Enum.X; + InsertEnumConstant_Enum e4 = InsertEnumConstant_Enum.Y; + InsertEnumConstant_Enum e5 = InsertEnumConstant_Enum.Z; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0, NAME2, 0); + checkVersions(model, NAME3, 0, NAME3, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 0); + checkVersions(model, NAME3, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + InsertEnumConstant_Entity.class); + InsertEnumConstant_Entity obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + if (updated) { + TestCase.assertSame(InsertEnumConstant_Enum.X, obj.secKey.key); + } else { + TestCase.assertSame(InsertEnumConstant_Enum.A, obj.secKey.key); + } + TestCase.assertSame(InsertEnumConstant_Enum.A, obj.e1); + TestCase.assertSame(InsertEnumConstant_Enum.B, obj.e2); + TestCase.assertSame(InsertEnumConstant_Enum.X, obj.e3); + TestCase.assertSame(InsertEnumConstant_Enum.Y, obj.e4); + TestCase.assertSame(InsertEnumConstant_Enum.Z, obj.e5); + + if (doUpdate) { + obj.secKey = + new InsertEnumConstant_KeyClass(InsertEnumConstant_Enum.X); + index.put(obj); + updated = true; + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + InsertEnumConstant_Entity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((InsertEnumConstant_Entity) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + RawType enumType = store.getModel().getRawType(NAME2); + + Map secKeyFields = new HashMap(); + RawType secKeyType = store.getModel().getRawType(NAME3); + RawObject secKeyObject = + new RawObject(secKeyType, secKeyFields, null /*superObject*/); + + if (expectUpdated) { + secKeyFields.put("key", new RawObject(enumType, "X")); + checkRawFields(obj, "key", 99, + "secKey", secKeyObject, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B"), + "e3", new RawObject(enumType, "X"), + "e4", new RawObject(enumType, "Y"), + "e5", new RawObject(enumType, "Z")); + } else { + secKeyFields.put("key", new RawObject(enumType, "A")); + checkRawFields(obj, "key", 99, + "secKey", secKeyObject, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B")); + } + } + } + + enum DeleteEnumConstant_Enum { + A, C; + } + + /** + * Don't allow deleting (or renaming, which appears as a deletion) enum + * values without mutations. + */ + @Entity + static class DeleteEnumConstant_NoMutation + extends EvolveCase { + + @PrimaryKey + int key; + + DeleteEnumConstant_Enum e1; + DeleteEnumConstant_Enum e2; + DeleteEnumConstant_Enum e3; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Incompatible enum type changed detected when evolving class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DeleteEnumConstant_Enum version: 0 Error: Enum values may not be removed: [B]"; + } + } + + /** + * With a Deleter, deleted enum values are null. Note that version is not + * bumped. + */ + /* Disabled until support for enum deletion is added. + @Entity + static class DeleteEnumConstant_WithDeleter + extends EvolveCase { + + private static final String NAME = + DeleteEnumConstant_WithDeleter.class.getName(); + private static final String NAME2 = + DeleteEnumConstant_Enum.class.getName(); + + @PrimaryKey + int key; + + DeleteEnumConstant_Enum e1; + DeleteEnumConstant_Enum e2; + DeleteEnumConstant_Enum e3; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 0, NAME2, 0); + } else { + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteEnumConstant_WithDeleter.class); + DeleteEnumConstant_WithDeleter obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertSame(DeleteEnumConstant_Enum.A, obj.e1); + TestCase.assertSame(null, obj.e2); + TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e3); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteEnumConstant_WithDeleter.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((DeleteEnumConstant_WithDeleter) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + RawType enumType = store.getModel().getRawType(NAME2); + if (expectUpdated) { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", null, + "e3", new RawObject(enumType, "C")); + } else { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B"), + "e3", new RawObject(enumType, "C")); + } + } + } + */ + + /** + * A field converter can assign deleted enum values. Version must be + * bumped when a converter is added. + */ + /* Disabled until support for enum deletion is added. + @Entity(version=1) + static class DeleteEnumConstant_WithConverter + extends EvolveCase { + + private static final String NAME = + DeleteEnumConstant_WithConverter.class.getName(); + private static final String NAME2 = + DeleteEnumConstant_Enum.class.getName(); + + @PrimaryKey + int key; + + DeleteEnumConstant_Enum e1; + DeleteEnumConstant_Enum e2; + DeleteEnumConstant_Enum e3; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Conversion c = new MyConversion(); + m.addConverter(new Converter(NAME, 0, "e1", c)); + m.addConverter(new Converter(NAME, 0, "e2", c)); + m.addConverter(new Converter(NAME, 0, "e3", c)); + return m; + } + + @SuppressWarnings("serial") + static class MyConversion implements Conversion { + + transient RawType newType; + + public void initialize(EntityModel model) { + newType = model.getRawType(NAME2); + TestCase.assertNotNull(newType); + } + + public Object convert(Object fromValue) { + TestCase.assertNotNull(newType); + RawObject obj = (RawObject) fromValue; + String val = obj.getEnum(); + TestCase.assertNotNull(val); + if ("B".equals(val)) { + val = "C"; + } + return new RawObject(newType, val); + } + + @Override + public boolean equals(Object other) { + return other instanceof MyConversion; + } + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteEnumConstant_WithConverter.class); + DeleteEnumConstant_WithConverter obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertSame(DeleteEnumConstant_Enum.A, obj.e1); + TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e2); + TestCase.assertSame(DeleteEnumConstant_Enum.C, obj.e3); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteEnumConstant_WithConverter.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((DeleteEnumConstant_WithConverter) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw(store, 99, NAME, expectEvolved ? 1 : 0, + CASECLS, 0); + RawType enumType = store.getModel().getRawType(NAME2); + if (expectEvolved) { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "C"), + "e3", new RawObject(enumType, "C")); + } else { + checkRawFields(obj, "key", 99, + "e1", new RawObject(enumType, "A"), + "e2", new RawObject(enumType, "B"), + "e3", new RawObject(enumType, "C")); + } + } + } + */ + + @Entity + static class DisallowChangeKeyRelate + extends EvolveCase { + + private static final String NAME = + DisallowChangeKeyRelate.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + int skey; + + @Override + public String getStoreOpenException() { + return "com.sleepycat.persist.evolve.IncompatibleClassException: Change detected in the relate attribute (Relationship) of a secondary key when evolving class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 to class: com.sleepycat.persist.test.EvolveClasses$DisallowChangeKeyRelate version: 0 Error: Old key: skey relate: ONE_TO_ONE new key: skey relate: MANY_TO_ONE"; + } + } + + @Entity(version=1) + static class AllowChangeKeyMetadata + extends EvolveCase { + + private static final String NAME = + AllowChangeKeyMetadata.class.getName(); + + @PrimaryKey + int key; + + /* + * Combined fields from version 0 and 1: + * addAnnotation = 88; + * dropField = 77; + * dropAnnotation = 66; + * addField = 55; + * renamedField = 44; // was toBeRenamedField + * aa = 33; + * ff = 22; + */ + + int aa; + + @SecondaryKey(relate=ONE_TO_ONE) + int addAnnotation; + + int dropAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer addField; + + @SecondaryKey(relate=ONE_TO_ONE) + int renamedField; + + int ff; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0, "dropField")); + m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField", + "renamedField")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + AllowChangeKeyMetadata.class); + AllowChangeKeyMetadata obj = index.get(99); + checkValues(obj); + + if (newMetadataWritten) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "addAnnotation").get(88)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "renamedField").get(44)); + if (updated) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } else { + TestCase.assertNull(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } + + if (doUpdate) { + obj.addField = 55; + index.put(obj); + updated = true; + checkValues(store.getSecondaryIndex + (index, Integer.class, "addAnnotation").get(88)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + AllowChangeKeyMetadata.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((AllowChangeKeyMetadata) + newStore.getModel().convertRawObject(raw)); + } + + private void checkValues(AllowChangeKeyMetadata obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.addAnnotation); + TestCase.assertEquals(66, obj.dropAnnotation); + TestCase.assertEquals(44, obj.renamedField); + TestCase.assertEquals(33, obj.aa); + TestCase.assertEquals(22, obj.ff); + if (updated) { + TestCase.assertEquals(Integer.valueOf(55), obj.addField); + } else { + TestCase.assertNull(obj.addField); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectUpdated) { + checkRawFields(obj, "key", 99, + "addAnnotation", 88, + "dropAnnotation", 66, + "addField", 55, + "renamedField", 44, + "aa", 33, + "ff", 22); + } else if (expectEvolved) { + checkRawFields(obj, "key", 99, + "addAnnotation", 88, + "dropAnnotation", 66, + "renamedField", 44, + "aa", 33, + "ff", 22); + } else { + checkRawFields(obj, "key", 99, + "addAnnotation", 88, + "dropField", 77, + "dropAnnotation", 66, + "toBeRenamedField", 44, + "aa", 33, + "ff", 22); + } + Environment env = store.getEnvironment(); + assertDbExists(expectEvolved, env, NAME, "addAnnotation"); + assertDbExists(expectEvolved, env, NAME, "addField"); + assertDbExists(expectEvolved, env, NAME, "renamedField"); + assertDbExists(!expectEvolved, env, NAME, "toBeRenamedField"); + assertDbExists(!expectEvolved, env, NAME, "dropField"); + assertDbExists(!expectEvolved, env, NAME, "dropAnnotation"); + } + } + + /** + * Same test as AllowChangeKeyMetadata but with the secondary keys in an + * entity subclass. [#16253] + */ + @Persistent(version=1) + static class AllowChangeKeyMetadataInSubclass + extends AllowChangeKeyMetadataEntity { + + private static final String NAME = + AllowChangeKeyMetadataInSubclass.class.getName(); + private static final String NAME2 = + AllowChangeKeyMetadataEntity.class.getName(); + + /* + * Combined fields from version 0 and 1: + * addAnnotation = 88; + * dropField = 77; + * dropAnnotation = 66; + * addField = 55; + * renamedField = 44; // was toBeRenamedField + * aa = 33; + * ff = 22; + */ + + int aa; + + @SecondaryKey(relate=ONE_TO_ONE) + int addAnnotation; + + int dropAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer addField; + + @SecondaryKey(relate=ONE_TO_ONE) + int renamedField; + + int ff; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(AllowChangeKeyMetadataInSubclass.class); + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addDeleter(new Deleter(NAME, 0, "dropField")); + m.addRenamer(new Renamer(NAME, 0, "toBeRenamedField", + "renamedField")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkNonEntity(true, model, env, NAME, 1); + checkEntity(true, model, env, NAME2, 0, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + checkVersions(model, NAME2, 0); + } else { + checkVersions(model, NAME, 1); + checkVersions(model, NAME2, 0); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + AllowChangeKeyMetadataEntity.class); + AllowChangeKeyMetadataEntity obj = index.get(99); + checkValues(obj); + + if (newMetadataWritten) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "addAnnotation").get(88)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "renamedField").get(44)); + if (updated) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } else { + TestCase.assertNull(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } + + if (doUpdate) { + ((AllowChangeKeyMetadataInSubclass) obj).addField = 55; + index.put(obj); + updated = true; + checkValues(store.getSecondaryIndex + (index, Integer.class, "addAnnotation").get(88)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "addField").get(55)); + } + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + AllowChangeKeyMetadataEntity.class); + RawObject raw = rawStore.getPrimaryIndex(NAME2).get(99); + index.put((AllowChangeKeyMetadataInSubclass) + newStore.getModel().convertRawObject(raw)); + } + + private void checkValues(AllowChangeKeyMetadataEntity objParam) { + AllowChangeKeyMetadataInSubclass obj = + (AllowChangeKeyMetadataInSubclass) objParam; + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals(88, obj.addAnnotation); + TestCase.assertEquals(66, obj.dropAnnotation); + TestCase.assertEquals(44, obj.renamedField); + TestCase.assertEquals(33, obj.aa); + TestCase.assertEquals(22, obj.ff); + if (updated) { + TestCase.assertEquals(Integer.valueOf(55), obj.addField); + } else { + TestCase.assertNull(obj.addField); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, NAME2, 99, NAME, expectEvolved ? 1 : 0, + NAME2, 0, CASECLS, 0); + checkRawFields(obj.getSuper(), "key", 99); + if (expectUpdated) { + checkRawFields(obj, + "addAnnotation", 88, + "dropAnnotation", 66, + "addField", 55, + "renamedField", 44, + "aa", 33, + "ff", 22); + } else if (expectEvolved) { + checkRawFields(obj, + "addAnnotation", 88, + "dropAnnotation", 66, + "renamedField", 44, + "aa", 33, + "ff", 22); + } else { + checkRawFields(obj, + "addAnnotation", 88, + "dropField", 77, + "dropAnnotation", 66, + "toBeRenamedField", 44, + "aa", 33, + "ff", 22); + } + Environment env = store.getEnvironment(); + assertDbExists(expectEvolved, env, NAME2, "addAnnotation"); + assertDbExists(expectEvolved, env, NAME2, "addField"); + assertDbExists(expectEvolved, env, NAME2, "renamedField"); + assertDbExists(!expectEvolved, env, NAME2, "toBeRenamedField"); + assertDbExists(!expectEvolved, env, NAME2, "dropField"); + assertDbExists(!expectEvolved, env, NAME2, "dropAnnotation"); + } + } + + @Entity + static class AllowChangeKeyMetadataEntity + extends EvolveCase { + + @PrimaryKey + int key; + } + + /** + * Special case of adding secondaries that caused + * IndexOutOfBoundsException. [#15524] + */ + @Entity(version=1) + static class AllowAddSecondary + extends EvolveCase { + + private static final String NAME = + AllowAddSecondary.class.getName(); + + @PrimaryKey + long key; + + @SecondaryKey(relate=ONE_TO_ONE) + int a; + + @SecondaryKey(relate=ONE_TO_ONE) + int b; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, + AllowAddSecondary.class); + AllowAddSecondary obj = index.get(99L); + checkValues(obj); + + if (newMetadataWritten) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "a").get(1)); + if (updated) { + checkValues(store.getSecondaryIndex + (index, Integer.class, "b").get(3)); + TestCase.assertNull(store.getSecondaryIndex + (index, Integer.class, "b").get(2)); + } else { + checkValues(store.getSecondaryIndex + (index, Integer.class, "b").get(2)); + TestCase.assertNull(store.getSecondaryIndex + (index, Integer.class, "b").get(3)); + } + + if (doUpdate) { + obj.b = 3; + index.put(obj); + updated = true; + checkValues(store.getSecondaryIndex + (index, Integer.class, "a").get(1)); + checkValues(store.getSecondaryIndex + (index, Integer.class, "b").get(3)); + } + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Long.class, + AllowAddSecondary.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99L); + index.put((AllowAddSecondary) + newStore.getModel().convertRawObject(raw)); + } + + private void checkValues(AllowAddSecondary obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(99L, obj.key); + TestCase.assertEquals(1, obj.a); + if (updated) { + TestCase.assertEquals(3, obj.b); + } else { + TestCase.assertEquals(2, obj.b); + } + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99L, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectUpdated) { + checkRawFields(obj, "key", 99L, + "a", 1, + "b", 3); + } else { + checkRawFields(obj, "key", 99L, + "a", 1, + "b", 2); + } + Environment env = store.getEnvironment(); + assertDbExists(expectEvolved, env, NAME, "a"); + assertDbExists(expectEvolved, env, NAME, "b"); + } + } + + @Entity(version=1) + static class FieldAddAndConvert + extends EvolveCase { + + private static final String NAME = + FieldAddAndConvert.class.getName(); + + @PrimaryKey + int key; + + private final String f0 = "0"; // new field + private final String f1 = "1"; // converted field + private final String f2 = "2"; // new field + private final String f3 = "3"; // converted field + private final String f4 = "4"; // new field + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addConverter(new Converter(NAME, 0, "f1", new IntToString())); + m.addConverter(new Converter(NAME, 0, "f3", new IntToString())); + return m; + } + + @SuppressWarnings("serial") + private static class IntToString implements Conversion { + + public void initialize(EntityModel model) { + } + + public Object convert(Object fromValue) { + return fromValue.toString(); + } + + @Override + public boolean equals(Object other) { + return other instanceof IntToString; + } + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + FieldAddAndConvert.class); + FieldAddAndConvert obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + TestCase.assertEquals("0", obj.f0); + TestCase.assertEquals("1", obj.f1); + TestCase.assertEquals("2", obj.f2); + TestCase.assertEquals("3", obj.f3); + TestCase.assertEquals("4", obj.f4); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + FieldAddAndConvert.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((FieldAddAndConvert) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj = readRaw + (store, 99, NAME, expectEvolved ? 1 : 0, CASECLS, 0); + if (expectUpdated) { + checkRawFields(obj, + "key", 99, + "f0", "0", + "f1", "1", + "f2", "2", + "f3", "3", + "f4", "4"); + } else if (expectEvolved) { + checkRawFields(obj, + "key", 99, + "f1", "1", + "f3", "3"); + } else { + checkRawFields(obj, + "key", 99, + "f1", 1, + "f3", 3); + } + } + } + + /* + * [18961]Rename secKey2, so the order of secondary keys' names is changed + * from: secKey->seckey2->secKey3 to new_seckey2->secKey->secKey3. + */ + @Entity(version=1) + static class RenameSecFieldDestroyOrder_1 extends EvolveCase{ + + private static final String NAME = + RenameSecFieldDestroyOrder_1.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey; + + /* Rename secKey2 to new_secKey2. */ + @SecondaryKey(relate=MANY_TO_ONE) + int new_secKey2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, "secKey2", "new_secKey2")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, RenameSecFieldDestroyOrder_1.class); + RenameSecFieldDestroyOrder_1 obj = index.get(1); + checkValues(obj); + checkSecondaries(store, index); + + if (doUpdate) { + index.put(obj); + checkSecondaries(store, index); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, RenameSecFieldDestroyOrder_1.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((RenameSecFieldDestroyOrder_1) + newStore.getModel().convertRawObject(raw)); + } + + private void checkSecondaries(EntityStore store, + PrimaryIndex + index) + throws DatabaseException { + + if (!newMetadataWritten) { + return; + } + checkValues(store.getSecondaryIndex + (index, String.class, "secKey").get("aa")); + checkValues(store.getSecondaryIndex + (index, Integer.class, "new_secKey2").get(2)); + checkValues(store.getSecondaryIndex + (index, String.class, "secKey3").get("bb")); + } + + private void checkValues(RenameSecFieldDestroyOrder_1 obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(obj.key, 1); + TestCase.assertEquals(obj.secKey, "aa"); + TestCase.assertEquals(obj.new_secKey2, 2); + TestCase.assertEquals(obj.secKey3, "bb"); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 1, NAME, 1, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "new_secKey2", 2, + "secKey3", "bb"); + } else { + obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "secKey2", 2, + "secKey3", "bb"); + } + Environment env = store.getEnvironment(); + + assertDbExists(expectEvolved, env, NAME, "new_secKey2"); + assertDbExists(!expectEvolved, env, NAME, "secKey2"); + } + } + + /* + * [18961]Rename secKey2 and secKey3, so the order of secondary keys' names + * is changed from : secKey->seckey2->secKey3 to new_seckey2->new_secKey3-> + * secKey1. + */ + @Entity(version=1) + static class RenameSecFieldDestroyOrder_2 extends EvolveCase{ + + private static final String NAME = + RenameSecFieldDestroyOrder_2.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey; + + /* Rename secKey2 to new_secKey2. */ + @SecondaryKey(relate=MANY_TO_ONE) + int new_secKey2; + + /* Rename secKey3 to new_secKey3. */ + @SecondaryKey(relate=MANY_TO_ONE) + String new_secKey3; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, "secKey2", "new_secKey2")); + m.addRenamer(new Renamer(NAME, 0, "secKey3", "new_secKey3")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, RenameSecFieldDestroyOrder_2.class); + RenameSecFieldDestroyOrder_2 obj = index.get(1); + checkValues(obj); + checkSecondaries(store, index); + + if (doUpdate) { + index.put(obj); + checkSecondaries(store, index); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, RenameSecFieldDestroyOrder_2.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((RenameSecFieldDestroyOrder_2) + newStore.getModel().convertRawObject(raw)); + } + + private void checkSecondaries(EntityStore store, + PrimaryIndex + index) + throws DatabaseException { + + if (!newMetadataWritten) { + return; + } + checkValues(store.getSecondaryIndex + (index, String.class, "secKey").get("aa")); + checkValues(store.getSecondaryIndex + (index, Integer.class, "new_secKey2").get(2)); + checkValues(store.getSecondaryIndex + (index, String.class, "new_secKey3").get("bb")); + } + + private void checkValues(RenameSecFieldDestroyOrder_2 obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(obj.key, 1); + TestCase.assertEquals(obj.secKey, "aa"); + TestCase.assertEquals(obj.new_secKey2, 2); + TestCase.assertEquals(obj.new_secKey3, "bb"); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 1, NAME, 1, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "new_secKey2", 2, + "new_secKey3", "bb"); + } else { + obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "secKey2", 2, + "secKey3", "bb"); + } + Environment env = store.getEnvironment(); + + assertDbExists(expectEvolved, env, NAME, "new_secKey2"); + assertDbExists(!expectEvolved, env, NAME, "secKey2"); + assertDbExists(expectEvolved, env, NAME, "new_secKey3"); + assertDbExists(!expectEvolved, env, NAME, "secKey3"); + } + } + + /* + * [18961]Rename secKey2 and secKey3, so the order of secondary keys' names + * is changed from : secKey->seckey2->secKey3 to new_seckey3->pnew_secKey2 + * ->secKey1. + */ + @Entity(version=1) + static class RenameSecFieldDestroyOrder_3 extends EvolveCase{ + + private static final String NAME = + RenameSecFieldDestroyOrder_3.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey; + + /* Rename secKey2 to pnew_secKey2. */ + @SecondaryKey(relate=MANY_TO_ONE) + int pnew_secKey2; + + /* Rename secKey3 to new_secKey3. */ + @SecondaryKey(relate=MANY_TO_ONE) + String new_secKey3; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, "secKey2", "pnew_secKey2")); + m.addRenamer(new Renamer(NAME, 0, "secKey3", "new_secKey3")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, RenameSecFieldDestroyOrder_3.class); + RenameSecFieldDestroyOrder_3 obj = index.get(1); + checkValues(obj); + checkSecondaries(store, index); + + if (doUpdate) { + index.put(obj); + checkSecondaries(store, index); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + RenameSecFieldDestroyOrder_3.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((RenameSecFieldDestroyOrder_3) + newStore.getModel().convertRawObject(raw)); + } + + private void checkSecondaries(EntityStore store, + PrimaryIndex + index) + throws DatabaseException { + + if (!newMetadataWritten) { + return; + } + checkValues(store.getSecondaryIndex + (index, String.class, "secKey").get("aa")); + checkValues(store.getSecondaryIndex + (index, Integer.class, "pnew_secKey2").get(2)); + checkValues(store.getSecondaryIndex + (index, String.class, "new_secKey3").get("bb")); + } + + private void checkValues(RenameSecFieldDestroyOrder_3 obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(obj.key, 1); + TestCase.assertEquals(obj.secKey, "aa"); + TestCase.assertEquals(obj.pnew_secKey2, 2); + TestCase.assertEquals(obj.new_secKey3, "bb"); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 1, NAME, 1, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "pnew_secKey2", 2, + "new_secKey3", "bb"); + } else { + obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "secKey2", 2, + "secKey3", "bb"); + } + Environment env = store.getEnvironment(); + + assertDbExists(expectEvolved, env, NAME, "pnew_secKey2"); + assertDbExists(!expectEvolved, env, NAME, "secKey2"); + assertDbExists(expectEvolved, env, NAME, "new_secKey3"); + assertDbExists(!expectEvolved, env, NAME, "secKey3"); + } + } + + /* + * [#18961]Delete secKey2's SecondaryKey annotation. so the order of non + * keys' names is changed from : anonKey->znonkey to anonKey->secKey2-> + * xnonKey. + */ + @Entity(version=1) + static class DeleteSecAnnotationDestroyOrder extends EvolveCase{ + + private static final String NAME = + DeleteSecAnnotationDestroyOrder.class.getName(); + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey; + + /* Delete secKey2's SecondaryKey annotation. */ + //@SecondaryKey(relate=MANY_TO_ONE) + int secKey2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3; + + int znonKey; + String xnonKey = "cc"; + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + m.addRenamer(new Renamer(NAME, 0, "anonKey", "znonKey")); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DeleteSecAnnotationDestroyOrder.class); + DeleteSecAnnotationDestroyOrder obj = index.get(1); + checkValues(obj); + checkSecondaries(store, index); + + if (doUpdate) { + index.put(obj); + checkSecondaries(store, index); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + DeleteSecAnnotationDestroyOrder.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((DeleteSecAnnotationDestroyOrder) + newStore.getModel().convertRawObject(raw)); + } + + private void checkSecondaries(EntityStore store, + PrimaryIndex + index) + throws DatabaseException { + + if (!newMetadataWritten) { + return; + } + checkValues(store.getSecondaryIndex + (index, String.class, "secKey").get("aa")); + checkValues(store.getSecondaryIndex + (index, String.class, "secKey3").get("bb")); + } + + private void checkValues(DeleteSecAnnotationDestroyOrder obj) { + TestCase.assertNotNull(obj); + TestCase.assertEquals(obj.key, 1); + TestCase.assertEquals(obj.secKey, "aa"); + TestCase.assertEquals(obj.secKey2, 2); + TestCase.assertEquals(obj.secKey3, "bb"); + TestCase.assertEquals(obj.znonKey, 3); + TestCase.assertEquals(obj.xnonKey, "cc"); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 1, NAME, 1, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "secKey2", 2, + "secKey3", "bb", + "znonKey", 3, + "xnonKey", "cc"); + } else { + obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, + "secKey", "aa", + "secKey2", 2, + "secKey3", "bb", + "anonKey", 3, + "xnonKey", "cc"); + } + Environment env = store.getEnvironment(); + + assertDbExists(!expectEvolved, env, NAME, "secKey2"); + } + } + + /* + * [#19377]Change one field of the proxy class from Map to + * Map. + */ + @Entity + static class ProxyClassFieldChanged extends EvolveCase { + private static final String NAME = + ProxyClassFieldChanged.class.getName(); + private static final String NAME2 = + ProxiedClass_Proxy2.class.getName(); + + @PrimaryKey + int key; + + private ProxiedClass embed; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy2.class); + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = + new Converter(ProxiedClass_Proxy2.class.getName(), 0, "data", + new MyConversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassFieldChanged.class); + ProxyClassFieldChanged obj = index.get(1); + TestCase.assertNotNull(obj); + TestCase.assertEquals(1, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(2, obj.embed.data); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, ProxyClassFieldChanged.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((ProxyClassFieldChanged) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject data = makeRawObject(store, expectEvolved); + RawObject embed = new RawObject + (embedType, makeValues("data", data), null); + RawObject obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, "embed", embed); + } + + static RawObject makeRawObject(RawStore store, + boolean expectEvolved) { + RawType dataType = store.getModel().getRawType + ("com.sleepycat.persist.impl.MapProxy$HashMapProxy"); + RawType dataSuperType = store.getModel().getRawType + ("com.sleepycat.persist.impl.MapProxy"); + RawType listRawType = + store.getModel().getRawType(Object[].class.getName()); + RawType myEnumType = + store.getModel().getRawType(MyEnum.class.getName()); + RawObject keyRawObject = null; + if (expectEvolved) { + ArrayList dataKeyValue = + new ArrayList(); + RawObject myEnumRawObject = new RawObject(myEnumType, "DATA"); + dataKeyValue.add(myEnumRawObject); + keyRawObject = new RawObject + (listRawType, dataKeyValue.toArray()); + } else { + ArrayList dataKeyValue = new ArrayList(); + dataKeyValue.add("data"); + keyRawObject = new RawObject + (listRawType, dataKeyValue.toArray()); + } + + ArrayList dataValueValue = new ArrayList(); + dataValueValue.add(2); + + RawObject valueRawObject = new RawObject + (listRawType, dataValueValue.toArray()); + + Map + dataSuperValue = new HashMap(); + dataSuperValue.put("keys", keyRawObject); + dataSuperValue.put("values", valueRawObject); + RawObject dataSuperRawObject = + new RawObject(dataSuperType, dataSuperValue, null); + Map dataValue = + new HashMap(); + RawObject dataRawObject = new RawObject(dataType, + dataValue, dataSuperRawObject); + return dataRawObject; + } + + static class MyConversion implements Conversion { + private static final long serialVersionUID = 1L; + private transient RawType newDataType; + private transient RawType newDataSuperType; + private transient RawType myEnumType; + + public void initialize(EntityModel model) { + newDataType = model.getRawType + ("com.sleepycat.persist.impl.MapProxy$HashMapProxy"); + newDataSuperType = + model.getRawType("com.sleepycat.persist.impl.MapProxy"); + myEnumType = model.getRawType(MyEnum.class.getName()); + } + + public Object convert(Object fromValue) { + + // Get field value maps for old and new objects. + RawObject oldDataRawObject = (RawObject) fromValue; + RawObject oldKeyRawObject = (RawObject)oldDataRawObject. + getSuper().getValues().get("keys"); + Object[] oldDataKeyValue = + (Object[])oldKeyRawObject.getElements(); + RawObject oldValueRawObject = (RawObject)oldDataRawObject. + getSuper().getValues().get("values"); + + ArrayList newDataKeyValue = + new ArrayList(); + RawObject myEnumRawObject = new RawObject(myEnumType, "DATA"); + newDataKeyValue.add(myEnumRawObject); + + RawObject newKeyRawObject = new RawObject + (oldKeyRawObject.getType(), newDataKeyValue.toArray()); + Map + newDataSuperValue = new HashMap(); + newDataSuperValue.put("keys", newKeyRawObject); + newDataSuperValue.put("values", oldValueRawObject); + RawObject newDataSuperRawObject = + new RawObject(newDataSuperType, newDataSuperValue, null); + Map newDataValue = + new HashMap(); + RawObject newDataRawObject = + new RawObject(newDataType, newDataValue, + newDataSuperRawObject); + return newDataRawObject; + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion; + } + } + } + + @Persistent(proxyFor=ProxiedClass.class, version=1) + static class ProxiedClass_Proxy2 implements PersistentProxy { + Map data; + + public void initializeProxy(ProxiedClass o) { + data = new HashMap(); + data.put(MyEnum.DATA, o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data.get(MyEnum.DATA)); + } + } + + enum MyEnum { DATA }; + + /* + * [#19377]Change one field of the proxy class from Map to + * Map, and the Object component then will be assigned a + * MyEnum object, which is not known for DPL when open a store. + */ + @Entity + static class ProxyClassObjectFieldChanged extends EvolveCase { + private static final String NAME = + ProxyClassObjectFieldChanged.class.getName(); + private static final String NAME2 = + ProxiedClass_Proxy3.class.getName(); + + @PrimaryKey + int key; + + private ProxiedClass embed; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy3.class); + + /* + * Because the DPL does not know MyEnum class, we have to register + * it in advance of using it. + */ + model.registerClass(MyEnum.class); + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = + new Converter(ProxiedClass_Proxy3.class.getName(), 0, "data", + new ProxyClassFieldChanged.MyConversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassObjectFieldChanged.class); + ProxyClassObjectFieldChanged obj = index.get(1); + TestCase.assertNotNull(obj); + TestCase.assertEquals(1, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(2, obj.embed.data); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, ProxyClassObjectFieldChanged.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((ProxyClassObjectFieldChanged) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject data = + ProxyClassFieldChanged.makeRawObject(store, expectEvolved); + RawObject embed = + new RawObject(embedType, makeValues("data", data), null); + RawObject obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, "embed", embed); + + } + } + + @Persistent(proxyFor=ProxiedClass.class, version=1) + static class ProxiedClass_Proxy3 implements PersistentProxy { + /* Changed from Map to Map. */ + Map data; + + public void initializeProxy(ProxiedClass o) { + data = new HashMap(); + data.put(MyEnum.DATA, o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data.get(MyEnum.DATA)); + } + } + + /* + * [#19377]Change one field of the proxy class from Integer[] to + * IntegerClass[]. + */ + @Entity + static class ProxyClassArrayFieldChanged extends EvolveCase { + private static final String NAME = + ProxyClassArrayFieldChanged.class.getName(); + private static final String NAME2 = + ProxiedClass_Proxy4.class.getName(); + + @PrimaryKey + int key; + + private ProxiedClass embed; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy4.class); + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = + new Converter(ProxiedClass_Proxy4.class.getName(), 0, "data", + new MyConversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassArrayFieldChanged.class); + ProxyClassArrayFieldChanged obj = index.get(1); + TestCase.assertNotNull(obj); + TestCase.assertEquals(1, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(2, obj.embed.data); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, ProxyClassArrayFieldChanged.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((ProxyClassArrayFieldChanged) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject data = makeRawObject(store, expectEvolved); + RawObject embed = new RawObject + (embedType, makeValues("data", data), null); + RawObject obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, "embed", embed); + + } + + static RawObject makeRawObject(RawStore store, + boolean expectEvolved) { + RawType integerClassType = + store.getModel().getRawType(IntegerClass.class.getName()); + RawObject dataRawObject = null; + if (expectEvolved) { + RawType dataType = store.getModel().getRawType + (IntegerClass[].class.getName()); + Map integerClassValues = + new HashMap(); + integerClassValues.put("data", 2); + RawObject integerClassObject = + new RawObject(integerClassType, integerClassValues, null); + RawObject[] elements = new RawObject[1]; + elements[0] = integerClassObject; + dataRawObject = new RawObject(dataType, elements); + } else { + RawType dataType = + store.getModel().getRawType(Integer[].class.getName()); + Integer[] elements = new Integer[1]; + elements[0] = 2; + dataRawObject = new RawObject(dataType, elements); + } + return dataRawObject; + } + + static class MyConversion implements Conversion { + private static final long serialVersionUID = 1L; + private transient RawType newDataType; + private transient RawType integerClassType; + + public void initialize(EntityModel model) { + newDataType = model.getRawType(IntegerClass[].class.getName()); + integerClassType = + model.getRawType(IntegerClass.class.getName()); + } + + public Object convert(Object fromValue) { + + // Get field value maps for old and new objects. + RawObject oldDataRawObject = (RawObject) fromValue; + Object[] oldElements = oldDataRawObject.getElements(); + Map integerClassValues = + new HashMap(); + integerClassValues.put("data", oldElements[0]); + RawObject integerClassObject = + new RawObject(integerClassType, integerClassValues, null); + RawObject[] newElements = new RawObject[1]; + newElements[0] = integerClassObject; + RawObject newDataRawObject = + new RawObject(newDataType, newElements); + return newDataRawObject; + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion; + } + } + } + + @Persistent(proxyFor=ProxiedClass.class, version=1) + static class ProxiedClass_Proxy4 implements PersistentProxy { + IntegerClass[] data; + + public void initializeProxy(ProxiedClass o) { + data = new IntegerClass[1]; + data[0] = new IntegerClass(o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data[0].data); + } + } + + @Persistent + static class IntegerClass { + int data; + + IntegerClass(){} + + IntegerClass(int data) { + this.data = data; + } + } + + /* + * [#19377]Change one field of the proxy class from Integer[] to Object[], + * and the Object component then will be assigned a IntegerClass object, + * which is not known for DPL when open a store. + */ + @Entity + static class ProxyClassObjectArrayFieldChanged extends EvolveCase { + private static final String NAME = + ProxyClassObjectArrayFieldChanged.class.getName(); + private static final String NAME2 = + ProxiedClass_Proxy5.class.getName(); + + @PrimaryKey + int key; + + private ProxiedClass embed; + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy5.class); + model.registerClass(IntegerClass[].class); + } + + @Override + Mutations getMutations() { + Mutations m = new Mutations(); + Converter converter = + new Converter(ProxiedClass_Proxy5.class.getName(), 0, "data", + new ProxyClassArrayFieldChanged.MyConversion()); + m.addConverter(converter); + return m; + } + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 0, null); + checkVersions(model, NAME, 0); + if (oldTypesExist) { + checkVersions(model, NAME2, 1, NAME2, 0); + } else { + checkVersions(model, NAME2, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassObjectArrayFieldChanged.class); + ProxyClassObjectArrayFieldChanged obj = index.get(1); + TestCase.assertNotNull(obj); + TestCase.assertEquals(1, obj.key); + TestCase.assertNotNull(obj.embed); + TestCase.assertEquals(2, obj.embed.data); + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, ProxyClassObjectArrayFieldChanged.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(1); + index.put((ProxyClassObjectArrayFieldChanged) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawType embedType = store.getModel().getRawType(NAME2); + RawObject data = + makeRawObject(store, expectEvolved, expectUpdated); + RawObject embed = new RawObject + (embedType, makeValues("data", data), null); + RawObject obj = readRaw(store, 1, NAME, 0, CASECLS, 0); + checkRawFields(obj, "key", 1, "embed", embed); + + } + + static RawObject makeRawObject(RawStore store, + boolean expectEvolved, + boolean expectUpdated) { + RawType integerClassType = + store.getModel().getRawType(IntegerClass.class.getName()); + RawObject dataRawObject = null; + if (expectEvolved) { + RawType dataType = null; + if (expectUpdated) { + dataType = store.getModel().getRawType + (Object[].class.getName()); + } else { + dataType = store.getModel().getRawType + (IntegerClass[].class.getName()); + } + Map integerClassValues = + new HashMap(); + integerClassValues.put("data", 2); + RawObject integerClassObject = + new RawObject(integerClassType, integerClassValues, null); + RawObject[] elements = new RawObject[1]; + elements[0] = integerClassObject; + dataRawObject = new RawObject(dataType, elements); + } else { + RawType dataType = + store.getModel().getRawType(Integer[].class.getName()); + Integer[] elements = new Integer[1]; + elements[0] = 2; + dataRawObject = new RawObject(dataType, elements); + } + return dataRawObject; + } + } + + @Persistent(proxyFor=ProxiedClass.class, version=1) + static class ProxiedClass_Proxy5 implements PersistentProxy { + Object[] data; + + public void initializeProxy(ProxiedClass o) { + data = new Object[1]; + data[0] = new IntegerClass(o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(((IntegerClass)data[0]).data); + } + } + + @Persistent(version=1) + static class MultipleSelfRefsEmbed { + MultipleSelfRefs ref; + MultipleSelfRefsEmbed embed; + /* ref2 is a new field. */ + MultipleSelfRefs ref2; + } + + /** + * Test multiple refs in an attempt to reproduce a bug where an assertion + * fired in Evolver.evolveInternal. This did not reproduce the bug, + * apparently because a very specific sequence of nested references is + * needed. But the test is included in case it is useful for other + * reasons. [#21869] + */ + @Entity(version=1) + static class MultipleSelfRefs + extends EvolveCase { + + private static final String NAME = PREFIX + "MultipleSelfRefs"; + + @PrimaryKey + int key; + + MultipleSelfRefs ref; + MultipleSelfRefsEmbed embed; + /* ref2 is a new field. */ + MultipleSelfRefs ref2; + + @Override + void checkEvolvedModel(EntityModel model, + Environment env, + boolean oldTypesExist) { + checkEntity(true, model, env, NAME, 1, null); + if (oldTypesExist) { + checkVersions(model, NAME, 1, NAME, 0); + } else { + checkVersions(model, NAME, 1); + } + } + + @Override + void readObjects(EntityStore store, boolean doUpdate) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + MultipleSelfRefs.class); + MultipleSelfRefs obj = index.get(99); + TestCase.assertNotNull(obj); + TestCase.assertEquals(99, obj.key); + + if (doUpdate) { + index.put(obj); + } + } + + @Override + void copyRawObjects(RawStore rawStore, EntityStore newStore) + throws DatabaseException { + + PrimaryIndex + index = newStore.getPrimaryIndex + (Integer.class, + MultipleSelfRefs.class); + RawObject raw = rawStore.getPrimaryIndex(NAME).get(99); + index.put((MultipleSelfRefs) + newStore.getModel().convertRawObject(raw)); + } + + @Override + void readRawObjects(RawStore store, + boolean expectEvolved, + boolean expectUpdated) + throws DatabaseException { + + RawObject obj; + if (expectEvolved) { + obj = readRaw(store, 99, NAME, 1, CASECLS, 0); + } else { + obj = readRaw(store, 99, NAME, 0, CASECLS, 0); + } + if (expectEvolved && expectUpdated) { + checkRawFields(obj, "key", 99, "ref", ref, "embed", embed, + "ref2", ref2); + } else { + checkRawFields(obj, "key", 99, "ref", ref, "embed", embed); + } + } + } +} diff --git a/test/com/sleepycat/persist/test/EvolveClasses.java.original b/test/com/sleepycat/persist/test/EvolveClasses.java.original new file mode 100644 index 0000000..6eee108 --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveClasses.java.original @@ -0,0 +1,3176 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved. + * + */ +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; + +import java.math.BigInteger; +import java.util.HashMap; +import java.util.Map; + +import junit.framework.TestCase; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.persist.raw.RawStore; + +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; + +/** + * Nested classes are original versions of classes of the same name in + * EvolveClasses.java. See EvolveTestBase.java for the steps that are taken to + * add a new class (test case). + * + * @author Mark Hayes + */ +class EvolveClasses { + + @Entity + static class DeletedEntity1_ClassRemoved extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex index = + store.getPrimaryIndex + (Integer.class, DeletedEntity1_ClassRemoved.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity1_ClassRemoved.class.getName()); + assertDbExists + (true, env, + DeletedEntity1_ClassRemoved.class.getName(), "skey"); + } + } + + @Entity + static class DeletedEntity2_ClassRemoved extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex index = + store.getPrimaryIndex + (Integer.class, DeletedEntity2_ClassRemoved.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity2_ClassRemoved.class.getName()); + assertDbExists + (true, env, + DeletedEntity2_ClassRemoved.class.getName(), "skey"); + } + } + + @Entity + static class DeletedEntity3_AnnotRemoved_NoMutation extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedEntity3_AnnotRemoved_NoMutation.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity3_AnnotRemoved_NoMutation.class.getName()); + assertDbExists + (true, env, + DeletedEntity3_AnnotRemoved_NoMutation.class.getName(), + "skey"); + } + } + + @Entity + static class DeletedEntity4_AnnotRemoved_WithDeleter extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedEntity4_AnnotRemoved_WithDeleter.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity4_AnnotRemoved_WithDeleter.class.getName()); + assertDbExists + (true, env, + DeletedEntity4_AnnotRemoved_WithDeleter.class.getName(), + "skey"); + } + } + + @Entity + static class DeletedEntity5_EntityToPersist_NoMutation extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedEntity5_EntityToPersist_NoMutation.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity5_EntityToPersist_NoMutation.class.getName()); + assertDbExists + (true, env, + DeletedEntity5_EntityToPersist_NoMutation.class.getName(), + "skey"); + } + } + + @Entity + static class DeletedEntity6_EntityToPersist_WithDeleter extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedEntity6_EntityToPersist_WithDeleter.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + DeletedEntity6_EntityToPersist_WithDeleter.class.getName()); + assertDbExists + (true, env, + DeletedEntity6_EntityToPersist_WithDeleter.class.getName(), + "skey"); + } + } + + @Persistent + static class DeletedPersist1_ClassRemoved { + + int f = 123; + } + + @Entity + static class DeletedPersist1_ClassRemoved_NoMutation extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist1_ClassRemoved embed = + new DeletedPersist1_ClassRemoved(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist1_ClassRemoved_NoMutation.class); + index.put(this); + } + } + + @Persistent + static class DeletedPersist2_ClassRemoved { + + int f = 123; + } + + @Entity + static class DeletedPersist2_ClassRemoved_WithDeleter extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist2_ClassRemoved embed = + new DeletedPersist2_ClassRemoved(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist2_ClassRemoved_WithDeleter.class); + index.put(this); + } + } + + @Persistent + static class DeletedPersist3_AnnotRemoved { + + int f = 123; + } + + @Entity + static class DeletedPersist3_AnnotRemoved_NoMutation extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist3_AnnotRemoved embed = + new DeletedPersist3_AnnotRemoved(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist3_AnnotRemoved_NoMutation.class); + index.put(this); + } + } + + @Persistent + static class DeletedPersist4_AnnotRemoved { + + int f = 123; + } + + @Entity + static class DeletedPersist4_AnnotRemoved_WithDeleter extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist4_AnnotRemoved embed = + new DeletedPersist4_AnnotRemoved(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist4_AnnotRemoved_WithDeleter.class); + index.put(this); + } + } + + @Persistent + static class DeletedPersist5_PersistToEntity { + + int f = 123; + } + + @Entity + static class DeletedPersist5_PersistToEntity_NoMutation + extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist5_PersistToEntity embed = + new DeletedPersist5_PersistToEntity(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist5_PersistToEntity_NoMutation.class); + index.put(this); + } + } + + @Persistent + static class DeletedPersist6_PersistToEntity { + + int f = 123; + } + + @Entity + static class DeletedPersist6_PersistToEntity_WithDeleter + extends EvolveCase { + + @PrimaryKey + int key = 99; + + DeletedPersist6_PersistToEntity embed = + new DeletedPersist6_PersistToEntity(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeletedPersist6_PersistToEntity_WithDeleter.class); + index.put(this); + } + } + + @Entity + static class RenamedEntity1_NewEntityName + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, RenamedEntity1_NewEntityName.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + RenamedEntity1_NewEntityName.class.getName()); + assertDbExists + (true, env, + RenamedEntity1_NewEntityName.class.getName(), "skey"); + } + } + + @Entity + static class RenamedEntity2_NewEntityName + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, RenamedEntity2_NewEntityName.class); + index.put(this); + + SecondaryIndex + sindex = store.getSecondaryIndex(index, Integer.class, "skey"); + TestCase.assertNotNull(sindex.get(88)); + } + + @Override + void checkUnevolvedModel(EntityModel model, Environment env) { + assertDbExists + (true, env, + RenamedEntity2_NewEntityName.class.getName()); + assertDbExists + (true, env, + RenamedEntity2_NewEntityName.class.getName(), "skey"); + } + } + + @Persistent + static class DeleteSuperclass1_BaseClass + extends EvolveCase { + + int f = 123; + } + + @Entity + static class DeleteSuperclass1_NoMutation + extends DeleteSuperclass1_BaseClass { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass1_NoMutation.class); + index.put(this); + } + } + + @Persistent + static class DeleteSuperclass2_BaseClass + extends EvolveCase { + + int f = 123; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 456; + } + + @Entity + static class DeleteSuperclass2_WithConverter + extends DeleteSuperclass2_BaseClass { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey2 = 77; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer skey3 = 66; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass2_WithConverter.class); + index.put(this); + } + } + + @Persistent + static class DeleteSuperclass3_BaseClass + extends EvolveCase { + + int f = 123; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey = 456; + } + + @Entity + static class DeleteSuperclass3_WithDeleter + extends DeleteSuperclass3_BaseClass { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass3_WithDeleter.class); + index.put(this); + } + } + + @Persistent + static class DeleteSuperclass4_BaseClass + extends EvolveCase { + } + + @Entity + static class DeleteSuperclass4_NoFields + extends DeleteSuperclass4_BaseClass { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass4_NoFields.class); + index.put(this); + } + } + + @Persistent + static class DeleteSuperclass5_Embedded_Base { + + int g = 456; + } + + @Persistent + static class DeleteSuperclass5_Embedded + extends DeleteSuperclass5_Embedded_Base { + + int f = 123; + } + + @Entity + static class DeleteSuperclass5_Top + extends EvolveCase { + + @PrimaryKey + int key = 99; + + int ff = 88; + + DeleteSuperclass5_Embedded embed = + new DeleteSuperclass5_Embedded(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSuperclass5_Top.class); + index.put(this); + } + } + + @Entity + static class InsertSuperclass1_Between + extends EvolveCase { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + InsertSuperclass1_Between.class); + index.put(this); + } + } + + @Persistent + static class InsertSuperclass2_Embedded { + + int f = 123; + } + + @Entity + static class InsertSuperclass2_Top + extends EvolveCase { + + @PrimaryKey + int key = 99; + + int ff = 88; + + InsertSuperclass2_Embedded embed = + new InsertSuperclass2_Embedded(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + InsertSuperclass2_Top.class); + index.put(this); + } + } + + /* + @Persistent + static class RenameFields1_Base + extends EvolveCase { + + int f = 123; + } + + @Entity + static class RenameFields1 + extends RenameFields1_Base { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + RenameFields1.class); + index.put(this); + } + } + */ + + @Entity + static class DisallowNonKeyField_PrimitiveToObject + extends EvolveCase { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_PrimitiveToObject.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_ObjectToPrimitive + extends EvolveCase { + + @PrimaryKey + int key = 99; + + String ff = "88"; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_ObjectToPrimitive.class); + index.put(this); + } + } + + @Persistent + static class MyType { + + @Override + public boolean equals(Object o) { + return o instanceof MyType; + } + } + + @Persistent + static class MySubtype extends MyType { + + @Override + public boolean equals(Object o) { + return o instanceof MySubtype; + } + } + + @Entity + static class DisallowNonKeyField_ObjectToSubtype + extends EvolveCase { + + @PrimaryKey + int key = 99; + + MyType ff = new MyType(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_ObjectToSubtype.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_ObjectToUnrelatedSimple + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Integer ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_ObjectToUnrelatedSimple.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_ObjectToUnrelatedOther + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Integer ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_ObjectToUnrelatedOther.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_byte2boolean + extends EvolveCase { + + @PrimaryKey + int key = 99; + + byte ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_byte2boolean.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_short2byte + extends EvolveCase { + + @PrimaryKey + int key = 99; + + short ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_short2byte.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_int2short + extends EvolveCase { + + @PrimaryKey + int key = 99; + + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_int2short.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_long2int + extends EvolveCase { + + @PrimaryKey + int key = 99; + + long ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_long2int.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_float2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + float ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_float2long.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_double2float + extends EvolveCase { + + @PrimaryKey + int key = 99; + + double ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_double2float.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Byte2byte + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Byte ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Byte2byte.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Character2char + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Character ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Character2char.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Short2short + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Short ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Short2short.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Integer2int + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Integer ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Integer2int.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Long2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Long ff = 88L; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Long2long.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Float2float + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Float ff = 88F; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Float2float.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_Double2double + extends EvolveCase { + + @PrimaryKey + int key = 99; + + Double ff = 88D; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_Double2double.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_float2BigInt + extends EvolveCase { + + @PrimaryKey + int key = 99; + + float ff = 88F; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_float2BigInt.class); + index.put(this); + } + } + + @Entity + static class DisallowNonKeyField_BigInt2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + BigInteger ff = BigInteger.valueOf(88); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowNonKeyField_BigInt2long.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_byte2short + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + byte ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_byte2short.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_char2int + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + char ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_char2int.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_short2int + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + short ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_short2int.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_int2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_int2long.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_long2float + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + long ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_long2float.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_float2double + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + float ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_float2double.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Byte2short2 + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Byte ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Byte2short2.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Character2int + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Character ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Character2int.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Short2int2 + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Short ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Short2int2.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Integer2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Integer ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Integer2long.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Long2float2 + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Long ff = 88L; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Long2float2.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_Float2double2 + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + Float ff = 88F; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_Float2double2.class); + index.put(this); + } + } + + @Entity + static class DisallowSecKeyField_int2BigInt + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + int ff = 88; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowSecKeyField_int2BigInt.class); + index.put(this); + } + } + + // -- + + @Entity + static class DisallowPriKeyField_byte2short + extends EvolveCase { + + @PrimaryKey + byte key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, + DisallowPriKeyField_byte2short.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_char2int + extends EvolveCase { + + @PrimaryKey + char key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Character.class, + DisallowPriKeyField_char2int.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_short2int + extends EvolveCase { + + @PrimaryKey + short key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Short.class, + DisallowPriKeyField_short2int.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_int2long + extends EvolveCase { + + @PrimaryKey + int key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowPriKeyField_int2long.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_long2float + extends EvolveCase { + + @PrimaryKey + long key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, + DisallowPriKeyField_long2float.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_float2double + extends EvolveCase { + + @PrimaryKey + float key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Float.class, + DisallowPriKeyField_float2double.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Byte2short2 + extends EvolveCase { + + @PrimaryKey + Byte key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, + DisallowPriKeyField_Byte2short2.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Character2int + extends EvolveCase { + + @PrimaryKey + Character key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Character.class, + DisallowPriKeyField_Character2int.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Short2int2 + extends EvolveCase { + + @PrimaryKey + Short key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Short.class, + DisallowPriKeyField_Short2int2.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Integer2long + extends EvolveCase { + + @PrimaryKey + Integer key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DisallowPriKeyField_Integer2long.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Long2float2 + extends EvolveCase { + + @PrimaryKey + Long key = 99L; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, + DisallowPriKeyField_Long2float2.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Float2double2 + extends EvolveCase { + + @PrimaryKey + Float key = 99F; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Float.class, + DisallowPriKeyField_Float2double2.class); + index.put(this); + } + } + + @Entity + static class DisallowPriKeyField_Long2BigInt + extends EvolveCase { + + @PrimaryKey + Long key = 99L; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, + DisallowPriKeyField_Long2BigInt.class); + index.put(this); + } + } + + @Persistent + static class DisallowCompositeKeyField_byte2short_Key { + + @KeyField(1) + int f1 = 1; + + @KeyField(2) + byte f2 = 2; + + @KeyField(3) + String f3 = "3"; + } + + @Entity + static class DisallowCompositeKeyField_byte2short + extends EvolveCase { + + @PrimaryKey + DisallowCompositeKeyField_byte2short_Key key = + new DisallowCompositeKeyField_byte2short_Key(); + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (DisallowCompositeKeyField_byte2short_Key.class, + DisallowCompositeKeyField_byte2short.class); + index.put(this); + } + } + + @Entity + static class AllowPriKeyField_byte2Byte + extends EvolveCase { + + @PrimaryKey + byte key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, AllowPriKeyField_byte2Byte.class); + index.put(this); + } + } + + @Entity + static class AllowPriKeyField_Byte2byte2 + extends EvolveCase { + + @PrimaryKey + Byte key = 99; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Byte.class, AllowPriKeyField_Byte2byte2.class); + index.put(this); + } + } + + @Persistent + static class AllowFieldTypeChanges_Key { + + AllowFieldTypeChanges_Key() { + this(false); + } + + AllowFieldTypeChanges_Key(boolean init) { + if (init) { + f1 = true; + f2 = (byte) 2; + f3 = (short) 3; + f4 = 4; + f5 = 5L; + f6 = 6F; + f7 = 7D; + f8 = (char) 8; + f9 = true; + f10 = (byte) 10; + f11 = (short) 11; + f12 = 12; + f13 = 13L; + f14 = 14F; + f15 = 15D; + f16 = (char) 16; + } + } + + @KeyField(1) + boolean f1; + + @KeyField(2) + byte f2; + + @KeyField(3) + short f3; + + @KeyField(4) + int f4; + + @KeyField(5) + long f5; + + @KeyField(6) + float f6; + + @KeyField(7) + double f7; + + @KeyField(8) + char f8; + + @KeyField(9) + Boolean f9; + + @KeyField(10) + Byte f10; + + @KeyField(11) + Short f11; + + @KeyField(12) + Integer f12; + + @KeyField(13) + Long f13; + + @KeyField(14) + Float f14; + + @KeyField(15) + Double f15; + + @KeyField(16) + Character f16; + } + + @Persistent + static class AllowFieldTypeChanges_Base + extends EvolveCase { + + @SecondaryKey(relate=ONE_TO_ONE) + AllowFieldTypeChanges_Key kcomposite = + new AllowFieldTypeChanges_Key(true); + + long f_long2Integer = 111; + String f_String2Long = "222"; + } + + @Entity + static class AllowFieldTypeChanges + extends AllowFieldTypeChanges_Base { + + @PrimaryKey + int pkeyint = 99; + + @SecondaryKey(relate=ONE_TO_ONE) + boolean kboolean = true; + + @SecondaryKey(relate=ONE_TO_ONE) + byte kbyte = 77; + + @SecondaryKey(relate=ONE_TO_ONE) + short kshort = 66; + + @SecondaryKey(relate=ONE_TO_ONE) + int kint = 55; + + @SecondaryKey(relate=ONE_TO_ONE) + long klong = 44; + + @SecondaryKey(relate=ONE_TO_ONE) + float kfloat = 33; + + @SecondaryKey(relate=ONE_TO_ONE) + double kdouble = 22; + + @SecondaryKey(relate=ONE_TO_ONE) + char kchar = 11; + + byte f01; + byte f02; + byte f03; + byte f04; + byte f06; + short f07; + short f08; + short f09; + short f10; + char f11; + char f12; + char f13; + char f14; + int f15; + int f16; + int f17; + long f18; + long f19; + float f20; + + byte f21; + byte f22; + byte f23; + byte f24; + byte f26; + short f27; + short f28; + short f29; + short f30; + char f31; + char f32; + char f33; + char f34; + int f35; + int f36; + int f37; + long f38; + long f39; + float f40; + + Byte f41; + Byte f42; + Byte f43; + Byte f44; + Byte f46; + Short f47; + Short f48; + Short f49; + Short f50; + Character f51; + Character f52; + Character f53; + Character f54; + Integer f55; + Integer f56; + Integer f57; + Long f58; + Long f59; + Float f60; + + byte f70; + short f71; + char f72; + int f73; + long f74; + Byte f75; + Short f76; + Character f77; + Integer f78; + Long f79; + + long f_long2int = 333; + String f_String2long = "444"; + + private void init() { + f01 = (byte) 1; + f02 = (byte) 2; + f03 = (byte) 3; + f04 = (byte) 4; + f06 = (byte) 6; + f07 = (short) 7; + f08 = (short) 8; + f09 = (short) 9; + f10 = (short) 10; + f11 = (char) 11; + f12 = (char) 12; + f13 = (char) 13; + f14 = (char) 14; + f15 = 15; + f16 = 16; + f17 = 17; + f18 = (long) 18; + f19 = (long) 19; + f20 = (float) 20; + + f21 = (byte) 21; + f22 = (byte) 22; + f23 = (byte) 23; + f24 = (byte) 24; + f26 = (byte) 26; + f27 = (short) 27; + f28 = (short) 28; + f29 = (short) 29; + f30 = (short) 30; + f31 = (char) 31; + f32 = (char) 32; + f33 = (char) 33; + f34 = (char) 34; + f35 = 35; + f36 = 36; + f37 = 37; + f38 = (long) 38; + f39 = (long) 39; + f40 = (float) 40; + + f41 = (byte) 41; + f42 = (byte) 42; + f43 = (byte) 43; + f44 = (byte) 44; + f46 = (byte) 46; + f47 = (short) 47; + f48 = (short) 48; + f49 = (short) 49; + f50 = (short) 50; + f51 = (char) 51; + f52 = (char) 52; + f53 = (char) 53; + f54 = (char) 54; + f55 = 55; + f56 = 56; + f57 = 57; + f58 = (long) 58; + f59 = (long) 59; + f60 = (float) 60; + + f70 = (byte) 70; + f71 = (short) 71; + f72 = (char) 72; + f73 = 73; + f74 = (long) 74; + f75 = (byte) 75; + f76 = (short) 76; + f77 = (char) 77; + f78 = 78; + f79 = (long) 79; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AllowFieldTypeChanges.class); + init(); + index.put(this); + } + } + + @Entity + static class ConvertFieldContent_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + String f1; + String f2; + + private void init() { + key = 99; + f1 = "01234"; + f2 = "56789"; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertFieldContent_Entity.class); + init(); + index.put(this); + } + } + + @Persistent + static class ConvertExample1_Address { + String street; + String city; + String state; + String zipCode; + } + + @Entity + static class ConvertExample1_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + ConvertExample1_Address embed; + + private void init() { + key = 99; + embed = new ConvertExample1_Address(); + embed.street = "street"; + embed.city = "city"; + embed.state = "state"; + embed.zipCode = "12345"; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample1_Entity.class); + init(); + index.put(this); + } + } + + @Entity + static class ConvertExample2_Person + extends EvolveCase { + + @PrimaryKey + int key; + + String address; + + private void init() { + key = 99; + address = "street#city#state#12345"; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample2_Person.class); + init(); + index.put(this); + } + } + + @Entity + static class ConvertExample3_Person + extends EvolveCase { + + @PrimaryKey + int key; + + String street; + String city; + String state; + int zipCode; + + private void init() { + key = 99; + street = "street"; + city = "city"; + state = "state"; + zipCode = 12345; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample3_Person.class); + init(); + index.put(this); + } + } + + @Persistent + static class ConvertExample3Reverse_Address { + String street; + String city; + String state; + int zipCode; + } + + @Entity + static class ConvertExample3Reverse_Person + extends EvolveCase { + + @PrimaryKey + int key; + + ConvertExample3Reverse_Address address; + + private void init() { + key = 99; + address = new ConvertExample3Reverse_Address(); + address.street = "street"; + address.city = "city"; + address.state = "state"; + address.zipCode = 12345; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample3Reverse_Person.class); + init(); + index.put(this); + } + } + + @Persistent + static class ConvertExample4_A extends ConvertExample4_B { + String name; + } + + @Persistent + static class ConvertExample4_B { + } + + @Entity + static class ConvertExample4_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + ConvertExample4_A embed; + + private void init() { + key = 99; + embed = new ConvertExample4_A(); + embed.name = "name"; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample4_Entity.class); + init(); + index.put(this); + } + } + + @Persistent + static class ConvertExample5_Pet { + String name; + boolean isCatNotDog; + int finickyLevel; + double barkVolume; + } + + @Entity + static class ConvertExample5_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + ConvertExample5_Pet cat; + ConvertExample5_Pet dog; + + private void init() { + key = 99; + cat = new ConvertExample5_Pet(); + cat.name = "Jeffry"; + cat.isCatNotDog = true; + cat.finickyLevel = 999; + dog = new ConvertExample5_Pet(); + dog.name = "Nelson"; + dog.isCatNotDog = false; + dog.barkVolume = 0.01; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ConvertExample5_Entity.class); + init(); + index.put(this); + } + } + + @Persistent + static class AllowFieldAddDelete_Embed { + private int f1 = 1; + private String f2 = "2"; + private String f4 = "4"; + private int f6 = 6; + private String f7 = "7"; + } + + @Persistent + static class AllowFieldAddDelete_Base + extends EvolveCase { + + private int f1 = 1; + private String f2 = "2"; + private String f4 = "4"; + private int f6 = 6; + private String f7 = "7"; + } + + @Entity + static class AllowFieldAddDelete + extends AllowFieldAddDelete_Base { + + @PrimaryKey + int key; + + AllowFieldAddDelete_Embed embed; + + private int f1 = 1; + private String f2 = "2"; + private String f4 = "4"; + private int f6 = 6; + private String f7 = "7"; + + private void init() { + key = 99; + embed = new AllowFieldAddDelete_Embed(); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AllowFieldAddDelete.class); + init(); + index.put(this); + } + } + + static class ProxiedClass { + int data; + + ProxiedClass(int data) { + this.data = data; + } + } + + @Persistent(proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy implements PersistentProxy { + int data; + + public void initializeProxy(ProxiedClass o) { + data = o.data; + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data); + } + } + + @Entity + static class ProxiedClass_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + ProxiedClass embed; + + private void init() { + key = 99; + embed = new ProxiedClass(88); + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxiedClass_Entity.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=StringBuilder.class) + static class DisallowChangeProxyFor_Proxy + implements PersistentProxy { + + String data; + + public void initializeProxy(StringBuilder o) { + data = o.toString(); + } + + public StringBuilder convertProxy() { + return new StringBuilder(data); + } + } + + @Entity + static class DisallowChangeProxyFor + extends EvolveCase { + + @PrimaryKey + int key; + + private void init() { + key = 99; + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(DisallowChangeProxyFor_Proxy.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DisallowChangeProxyFor.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=StringBuilder.class) + static class DisallowDeleteProxyFor_Proxy + implements PersistentProxy { + + String data; + + public void initializeProxy(StringBuilder o) { + data = o.toString(); + } + + public StringBuilder convertProxy() { + return new StringBuilder(data); + } + } + + @Entity + static class DisallowDeleteProxyFor + extends EvolveCase { + + @PrimaryKey + int key; + + private void init() { + key = 99; + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(DisallowDeleteProxyFor_Proxy.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DisallowDeleteProxyFor.class); + init(); + index.put(this); + } + } + + @Persistent + static class ArrayNameChange_Component { + + int data; + } + + @Entity + static class ArrayNameChange_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + ArrayNameChange_Component[] embed; + ArrayNameChange_Component embed2; + + private void init() { + key = 99; + embed2 = new ArrayNameChange_Component(); + embed2.data = 88; + embed = new ArrayNameChange_Component[] { embed2 }; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ArrayNameChange_Entity.class); + init(); + index.put(this); + } + } + + enum AddEnumConstant_Enum { + A, B; + } + + @Entity + static class AddEnumConstant_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + AddEnumConstant_Enum e1; + AddEnumConstant_Enum e2; + + private void init() { + key = 99; + e1 = AddEnumConstant_Enum.A; + e2 = AddEnumConstant_Enum.B; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AddEnumConstant_Entity.class); + init(); + index.put(this); + } + } + + enum InsertEnumConstant_Enum { + A, B; + } + + @Persistent + static class InsertEnumConstant_KeyClass + implements Comparable { + + @KeyField(1) + InsertEnumConstant_Enum key; + + private InsertEnumConstant_KeyClass() {} + + InsertEnumConstant_KeyClass(InsertEnumConstant_Enum key) { + this.key = key; + } + + public int compareTo(InsertEnumConstant_KeyClass o) { + /* Use the natural order, in spite of insertions. */ + return key.compareTo(o.key); + } + } + + @Entity + static class InsertEnumConstant_Entity + extends EvolveCase { + + @PrimaryKey + int key; + + @SecondaryKey(relate=MANY_TO_ONE) + InsertEnumConstant_KeyClass secKey; + + InsertEnumConstant_Enum e1; + InsertEnumConstant_Enum e2; + + private void init() { + key = 99; + secKey = + new InsertEnumConstant_KeyClass(InsertEnumConstant_Enum.A); + e1 = InsertEnumConstant_Enum.A; + e2 = InsertEnumConstant_Enum.B; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, InsertEnumConstant_Entity.class); + init(); + index.put(this); + } + } + + enum DeleteEnumConstant_Enum { + A, B, C; + } + + @Entity + static class DeleteEnumConstant_NoMutation + extends EvolveCase { + + @PrimaryKey + int key; + + DeleteEnumConstant_Enum e1; + DeleteEnumConstant_Enum e2; + DeleteEnumConstant_Enum e3; + + private void init() { + key = 99; + e1 = DeleteEnumConstant_Enum.A; + e2 = DeleteEnumConstant_Enum.B; + e3 = DeleteEnumConstant_Enum.C; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DeleteEnumConstant_NoMutation.class); + init(); + index.put(this); + } + } + + /* Disabled until support for enum deletion is added. + @Entity + static class DeleteEnumConstant_WithConverter + extends EvolveCase { + + @PrimaryKey + int key; + + DeleteEnumConstant_Enum e1; + DeleteEnumConstant_Enum e2; + DeleteEnumConstant_Enum e3; + + private void init() { + key = 99; + e1 = DeleteEnumConstant_Enum.A; + e2 = DeleteEnumConstant_Enum.B; + e3 = DeleteEnumConstant_Enum.C; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DeleteEnumConstant_WithConverter.class); + init(); + index.put(this); + } + } + */ + + @Entity + static class DisallowChangeKeyRelate + extends EvolveCase { + + @PrimaryKey + int key; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey; + + private void init() { + key = 99; + skey = 88; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, DisallowChangeKeyRelate.class); + init(); + index.put(this); + } + } + + @Entity + static class AllowChangeKeyMetadata + extends EvolveCase { + + @PrimaryKey + int key; + + int aa; + + int addAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + int dropField; + + @SecondaryKey(relate=ONE_TO_ONE) + int dropAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + int toBeRenamedField; + + int ff; + + private void init() { + key = 99; + addAnnotation = 88; + dropField = 77; + dropAnnotation = 66; + toBeRenamedField = 44; + aa = 33; + ff = 22; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AllowChangeKeyMetadata.class); + init(); + index.put(this); + } + } + + /** [#16253] */ + @Persistent + static class AllowChangeKeyMetadataInSubclass + extends AllowChangeKeyMetadataEntity { + + int aa; + + int addAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + int dropField; + + @SecondaryKey(relate=ONE_TO_ONE) + int dropAnnotation; + + @SecondaryKey(relate=ONE_TO_ONE) + int toBeRenamedField; + + int ff; + + private void init() { + key = 99; + addAnnotation = 88; + dropField = 77; + dropAnnotation = 66; + toBeRenamedField = 44; + aa = 33; + ff = 22; + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(AllowChangeKeyMetadataInSubclass.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, AllowChangeKeyMetadataEntity.class); + init(); + index.put(this); + } + } + + @Entity + static class AllowChangeKeyMetadataEntity + extends EvolveCase { + + @PrimaryKey + int key; + } + + /** [#15524] */ + @Entity + static class AllowAddSecondary + extends EvolveCase { + + @PrimaryKey + long key; + + int a; + int b; + + private void init() { + key = 99; + a = 1; + b = 2; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Long.class, AllowAddSecondary.class); + init(); + index.put(this); + } + } + + /** [#15797] */ + @Entity + static class FieldAddAndConvert + extends EvolveCase { + + @PrimaryKey + int key; + + private int f1 = 1; + private int f3 = 3; + + private void init() { + key = 99; + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, FieldAddAndConvert.class); + init(); + index.put(this); + } + } + + @Entity + static class RenameSecFieldDestroyOrder_1 extends EvolveCase { + + @PrimaryKey + int key = 1; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey = "aa"; + + @SecondaryKey(relate=MANY_TO_ONE) + int secKey2 = 2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3 = "bb"; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + RenameSecFieldDestroyOrder_1.class); + index.put(this); + } + } + + @Entity + static class RenameSecFieldDestroyOrder_2 extends EvolveCase { + + @PrimaryKey + int key = 1; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey = "aa"; + + @SecondaryKey(relate=MANY_TO_ONE) + int secKey2 = 2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3 = "bb"; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + RenameSecFieldDestroyOrder_2.class); + index.put(this); + } + } + + @Entity + static class RenameSecFieldDestroyOrder_3 extends EvolveCase { + + @PrimaryKey + int key = 1; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey = "aa"; + + @SecondaryKey(relate=MANY_TO_ONE) + int secKey2 = 2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3 = "bb"; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + RenameSecFieldDestroyOrder_3.class); + index.put(this); + } + } + + @Entity + static class DeleteSecAnnotationDestroyOrder extends EvolveCase { + + @PrimaryKey + int key = 1; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey = "aa"; + + @SecondaryKey(relate=MANY_TO_ONE) + int secKey2 = 2; + + @SecondaryKey(relate=MANY_TO_ONE) + String secKey3 = "bb"; + + int anonKey = 3; + String xnonKey = "cc"; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, + DeleteSecAnnotationDestroyOrder.class); + index.put(this); + } + } + + /** [#19377] */ + @Entity + static class ProxyClassFieldChanged extends EvolveCase { + + @PrimaryKey + int key; + + private ProxiedClass embed; + + private void init() { + key = 1; + embed = new ProxiedClass(2); + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy2.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassFieldChanged.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy2 implements PersistentProxy { + Map data; + + public void initializeProxy(ProxiedClass o) { + data = new HashMap(); + data.put("data", o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data.get("data")); + } + } + + /** [#19377] */ + @Entity + static class ProxyClassObjectFieldChanged extends EvolveCase { + + @PrimaryKey + int key; + + private ProxiedClass embed; + + private void init() { + key = 1; + embed = new ProxiedClass(2); + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy3.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassObjectFieldChanged.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy3 implements PersistentProxy { + Map data; + + public void initializeProxy(ProxiedClass o) { + data = new HashMap(); + data.put("data", o.data); + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data.get("data")); + } + } + + /** [#19377] */ + @Entity + static class ProxyClassArrayFieldChanged extends EvolveCase { + + @PrimaryKey + int key; + + private ProxiedClass embed; + + private void init() { + key = 1; + embed = new ProxiedClass(2); + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy4.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassArrayFieldChanged.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy4 implements PersistentProxy { + Integer[] data; + + public void initializeProxy(ProxiedClass o) { + data = new Integer[1]; + data[0] = o.data; + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data[0]); + } + } + + /** [#19377] */ + @Entity + static class ProxyClassObjectArrayFieldChanged extends EvolveCase { + + @PrimaryKey + int key; + + private ProxiedClass embed; + + private void init() { + key = 1; + embed = new ProxiedClass(2); + } + + @Override + void configure(EntityModel model, StoreConfig config) { + model.registerClass(ProxiedClass_Proxy5.class); + } + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, ProxyClassObjectArrayFieldChanged.class); + init(); + index.put(this); + } + } + + @Persistent(proxyFor=ProxiedClass.class) + static class ProxiedClass_Proxy5 implements PersistentProxy { + Integer[] data; + + public void initializeProxy(ProxiedClass o) { + data = new Integer[1]; + data[0] = o.data; + } + + public ProxiedClass convertProxy() { + return new ProxiedClass(data[0]); + } + } + + /* [#21869] */ + @Persistent + static class MultipleSelfRefsEmbed { + MultipleSelfRefs ref; + MultipleSelfRefsEmbed embed; + } + + @Entity + static class MultipleSelfRefs + extends EvolveCase { + + @PrimaryKey + int key; + + MultipleSelfRefs ref; + MultipleSelfRefsEmbed embed; + + @Override + void writeObjects(EntityStore store) + throws DatabaseException { + + PrimaryIndex + index = store.getPrimaryIndex + (Integer.class, MultipleSelfRefs.class); + key = 99; + index.put(this); + } + } +} diff --git a/test/com/sleepycat/persist/test/EvolveProxyClassTest.java b/test/com/sleepycat/persist/test/EvolveProxyClassTest.java new file mode 100644 index 0000000..8ebb4ee --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveProxyClassTest.java @@ -0,0 +1,502 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Conversion; +import com.sleepycat.persist.evolve.Converter; +import com.sleepycat.persist.evolve.Deleter; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawType; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * If make changes to a PersistentProxy class, a class converter is needed to + * provide backwards compatibility [#19312]. + * + * We generate a database which stores proxied data using je-4.0.103. Then we + * make change to the proxy class, and provide mutations for such change. + * This unit test serves test all kinds of mutations for the proxy class. + * + * The old version proxy class: + * + * @Persistent(proxyFor=Locale.class) + * static class LocaleProxy implements PersistentProxy { + * + * String language; + * String country; + * String variant; + * + * private LocaleProxy() {} + * + * public void initializeProxy(Locale object) { + * language = object.getLanguage(); + * country = object.getCountry(); + * variant = object.getVariant(); + * } + * + * public Locale convertProxy() { + * return new Locale(language, country, variant); + * } + * } + * + */ +public class EvolveProxyClassTest extends TestBase { + + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + private EntityStore store; + private PrimaryIndex primary; + private enum mutationTypes { FIELD_CONVERSION, DELETE_FIELD, + CLASS_CONVERSION, HIERARCHY_CONVERSION, }; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + store = null; + env = null; + } + + private void open(mutationTypes muType) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + EntityModel model = new AnnotationModel(); + Mutations mutations = new Mutations(); + Renamer classRenamer; + switch (muType) { + case DELETE_FIELD : + model.registerClass(LocaleProxy_DeleteField.class); + classRenamer = new Renamer + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, + LocaleProxy_DeleteField.class.getName()); + Deleter deleteField = new Deleter + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, "variant"); + mutations.addRenamer(classRenamer); + mutations.addDeleter(deleteField); + break; + case CLASS_CONVERSION : + model.registerClass(LocaleProxy_ClassConversion.class); + Converter classConverter = new Converter + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, new MyConversion_ClassConversion()); + classRenamer = new Renamer + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, + LocaleProxy_ClassConversion.class.getName()); + mutations.addRenamer(classRenamer); + mutations.addConverter(classConverter); + break; + case FIELD_CONVERSION : + model.registerClass(LocaleProxy_FieldConversion.class); + classRenamer = new Renamer + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, + LocaleProxy_FieldConversion.class.getName()); + Converter fieldConverter = new Converter + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, "language", + new MyConversion_FieldConversion()); + mutations.addRenamer(classRenamer); + mutations.addConverter(fieldConverter); + break; + case HIERARCHY_CONVERSION : + model.registerClass(LocaleProxy_HierarchyConversion.class); + Converter hierarchyConverter = new Converter + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, new MyConversion_HierarchyConversion()); + classRenamer = new Renamer + ("com.sleepycat.persist.test.EvolveProxyClassTest$" + + "LocaleProxy", 0, + LocaleProxy_HierarchyConversion.class.getName()); + mutations.addRenamer(classRenamer); + mutations.addConverter(hierarchyConverter); + break; + default: break; + } + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(envConfig.getAllowCreate()); + storeConfig.setTransactional(envConfig.getTransactional()); + storeConfig.setModel(model); + storeConfig.setMutations(mutations); + store = new EntityStore(env, STORE_NAME, storeConfig); + primary = store.getPrimaryIndex(Integer.class, LocaleData.class); + } + + private void close() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testDeleteFieldForProxyClass() + throws IOException { + + evolveProxyClassTest(mutationTypes.DELETE_FIELD); + } + + @Test + public void testClassConversionForProxyClass() + throws IOException { + + evolveProxyClassTest(mutationTypes.CLASS_CONVERSION); + } + + @Test + public void testFieldConversionForProxyClass() + throws IOException { + + evolveProxyClassTest(mutationTypes.FIELD_CONVERSION); + } + + @Test + public void testHierarchyConversionForProxyClass() + throws IOException { + + evolveProxyClassTest(mutationTypes.HIERARCHY_CONVERSION); + } + + private void evolveProxyClassTest(mutationTypes muType) + throws IOException { + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_EvolveProxyClass.jdb", + envHome); + + open(muType); + LocaleData entity = primary.get(1); + assertNotNull(entity); + String variant; + if (muType == mutationTypes.DELETE_FIELD) { + variant = ""; + } else { + variant = "A"; + } + entity.validate + (new LocaleData (1, new Locale("English", "America", variant))); + close(); + } + + @Entity + static class LocaleData { + @PrimaryKey + private int id; + private Locale f1; + + LocaleData() { } + + LocaleData(int id, Locale f1) { + this.id = id; + this.f1 = f1; + } + + int getId() { + return id; + } + + Locale getF1() { + return f1; + } + + public void validate(Object other) { + LocaleData o = (LocaleData) other; + TestCase.assertEquals(f1.getCountry(), o.f1.getCountry()); + TestCase.assertEquals(f1.getLanguage(), o.f1.getLanguage()); + TestCase.assertEquals(f1.getVariant(), o.f1.getVariant()); + } + + } + + /* + * New version proxy class: + * Reneame the class, and convert the class. + */ + @Persistent(proxyFor=Locale.class, version=1) + static class LocaleProxy_ClassConversion + implements PersistentProxy { + + MyLocale locale; + + private LocaleProxy_ClassConversion() {} + + public void initializeProxy(Locale object) { + locale = new MyLocale(); + locale.language = object.getLanguage(); + locale.country = object.getCountry(); + locale.variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(locale.language, locale.country, locale.variant); + } + } + + @Persistent + static class MyLocale { + String language; + String country; + String variant; + private MyLocale() {} + } + + static class MyConversion_ClassConversion implements Conversion { + private static final long serialVersionUID = 1L; + private transient RawType newLocaleProxyType; + private transient RawType myLocaleType; + + public void initialize(EntityModel model) { + newLocaleProxyType = + model.getRawType(LocaleProxy_ClassConversion.class.getName()); + myLocaleType = model.getRawType(MyLocale.class.getName()); + } + + public Object convert(Object fromValue) { + + RawObject localeProxy = (RawObject) fromValue; + Map localeProxyValues = localeProxy.getValues(); + Map myLocaleValues = new HashMap(); + + myLocaleValues.put("language", + localeProxyValues.remove("language")); + myLocaleValues.put("country", localeProxyValues.remove("country")); + myLocaleValues.put("variant", localeProxyValues.remove("variant")); + RawObject myLocale = + new RawObject(myLocaleType, myLocaleValues, null); + localeProxyValues.put("locale", myLocale); + + return new RawObject(newLocaleProxyType, localeProxyValues, + localeProxy.getSuper()); + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion_ClassConversion; + } + } + + /* + * New version proxy class: + * Reneame the class, and convert one of the fields. + */ + @Persistent(proxyFor=Locale.class, version=1) + static class LocaleProxy_FieldConversion + implements PersistentProxy { + + MyLanguage language; + String country; + String variant; + + private LocaleProxy_FieldConversion() {} + + public void initializeProxy(Locale object) { + language = new MyLanguage(); + language.language = object.getLanguage(); + country = object.getCountry(); + variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(language.language, country, variant); + } + } + + @Persistent + static class MyLanguage { + String language; + private MyLanguage() {} + } + + static class MyConversion_FieldConversion implements Conversion { + private static final long serialVersionUID = 1L; + private transient RawType myLanguageType; + + public void initialize(EntityModel model) { + myLanguageType = model.getRawType(MyLanguage.class.getName()); + } + + public Object convert(Object fromValue) { + + String oldLanguage = (String) fromValue; + Map myLanguageValues = + new HashMap(); + + myLanguageValues.put("language", fromValue); + return new RawObject(myLanguageType, myLanguageValues, null); + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion_FieldConversion; + } + } + + /* + * New version proxy class: + * Reneame the class, and delete one of the fields. + */ + @Persistent(proxyFor=Locale.class, version=1) + static class LocaleProxy_DeleteField + implements PersistentProxy { + + String language; + String country; + //Delete one field. + //String variant; + + private LocaleProxy_DeleteField() {} + + public void initializeProxy(Locale object) { + language = object.getLanguage(); + country = object.getCountry(); + //variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(language, country); + } + } + + /* + * New version proxy class: + * Reneame the class, and change the class hierarchy. + */ + @Persistent(proxyFor=Locale.class, version=1) + static class LocaleProxy_HierarchyConversion + extends LocaleProxy_Base implements PersistentProxy { + + + private LocaleProxy_HierarchyConversion() {} + + public void initializeProxy(Locale object) { + language = object.getLanguage(); + country = object.getCountry(); + variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(language, country, variant); + } + } + + @Persistent + abstract static class LocaleProxy_Base { + String language; + String country; + String variant; + } + + static class MyConversion_HierarchyConversion implements Conversion { + private static final long serialVersionUID = 1L; + private transient RawType newLocaleProxyType; + private transient RawType localeProxyBaseType; + + public void initialize(EntityModel model) { + newLocaleProxyType = model.getRawType + (LocaleProxy_HierarchyConversion.class.getName()); + localeProxyBaseType = + model.getRawType(LocaleProxy_Base.class.getName()); + } + + public Object convert(Object fromValue) { + + RawObject oldLocaleProxy = (RawObject) fromValue; + Map localeProxyValues = oldLocaleProxy.getValues(); + Map localeProxyBaseValues = + new HashMap(); + + localeProxyBaseValues.put("language", + localeProxyValues.remove("language")); + localeProxyBaseValues.put("country", + localeProxyValues.remove("country")); + localeProxyBaseValues.put("variant", + localeProxyValues.remove("variant")); + RawObject localeProxyBase = new RawObject + (localeProxyBaseType, localeProxyBaseValues, null); + RawObject newLocaleProxy = new RawObject + (newLocaleProxyType, localeProxyValues, localeProxyBase); + + return newLocaleProxy; + } + + @Override + public boolean equals(Object o) { + return o instanceof MyConversion_HierarchyConversion; + } + } +} diff --git a/test/com/sleepycat/persist/test/EvolveTest.java b/test/com/sleepycat/persist/test/EvolveTest.java new file mode 100644 index 0000000..79ba2db --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveTest.java @@ -0,0 +1,278 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.persist.evolve.EvolveConfig; +import com.sleepycat.persist.evolve.EvolveEvent; +import com.sleepycat.persist.evolve.EvolveListener; +import com.sleepycat.persist.evolve.EvolveStats; +import com.sleepycat.persist.impl.PersistCatalog; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Runs part two of the EvolveTest. This part is run with the new/updated + * version of EvolveClasses in the classpath. It uses the environment and + * store created by EvolveTestInit. It verifies that it can read/write/evolve + * objects serialized using the old class format, and that it can create new + * objects with the new class format. + * + * @author Mark Hayes + */ +public class EvolveTest extends EvolveTestBase { + + public EvolveTest(String originalClsName, String evolvedClsName) + throws Exception { + super(originalClsName, evolvedClsName); + } + + /* Toggle to use listener every other test case. */ + private static boolean useEvolveListener; + + private int evolveNRead; + private int evolveNConverted; + + boolean useEvolvedClass() { + return true; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + + /* Copy the log files created by EvolveTestInit. */ + envHome = getTestInitHome(true /*evolved*/); + envHome.mkdirs(); + SharedTestUtils.emptyDir(envHome); + SharedTestUtils.copyFiles(getTestInitHome(false /*evolved*/), envHome); + } + + @After + public void tearDown() { + try { super.tearDown(); } catch (Throwable e) { } + } + + @Test + public void testLazyEvolve() + throws Exception { + + openEnv(); + + /* + * Open in raw mode to check unevolved raw object and formats. This + * is possible whether or not we can open the store further below to + * evolve formats without errors. + */ + openRawStore(); + caseObj.checkUnevolvedModel(rawStore.getModel(), env); + caseObj.readRawObjects + (rawStore, false /*expectEvolved*/, false /*expectUpdated*/); + closeRawStore(); + + /* + * Check evolution in read-only mode. Since Replica upgrade mode is + * effectively read-only mode, this also helps to test evolution during + * replication group upgrades. [#18690] + */ + if (openStoreReadOnly()) { + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, false /*doUpdate*/); + closeStore(); + } + + if (openStoreReadWrite()) { + + /* + * When opening read-write, formats are evolved lazily. Check by + * reading evolved objects. + */ + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, false /*doUpdate*/); + closeStore(); + + /* + * Read raw objects again to check that the evolved objects are + * returned even though the stored objects were not evolved. + */ + openRawStore(); + caseObj.checkEvolvedModel + (rawStore.getModel(), env, true /*oldTypesExist*/); + caseObj.readRawObjects + (rawStore, true /*expectEvolved*/, false /*expectUpdated*/); + closeRawStore(); + + /* + * Open read-only to ensure that the catalog does not need to + * change (evolve formats) unnecessarily. + */ + PersistCatalog.expectNoClassChanges = true; + try { + assertTrue(openStoreReadOnly()); + } finally { + PersistCatalog.expectNoClassChanges = false; + } + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, false /*doUpdate*/); + closeStore(); + + /* + * Open read-write to update objects and store them in evolved + * format. + */ + openStoreReadWrite(); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, true /*doUpdate*/); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + closeStore(); + + /* + * Check raw objects again after the evolved objects were stored. + */ + openRawStore(); + caseObj.checkEvolvedModel + (rawStore.getModel(), env, true /*oldTypesExist*/); + caseObj.readRawObjects + (rawStore, true /*expectEvolved*/, true /*expectUpdated*/); + closeRawStore(); + } + + closeAll(); + } + + @Test + public void testEagerEvolve() + throws Exception { + + /* If the store cannot be opened, this test is not appropriate. */ + if (caseObj.getStoreOpenException() != null) { + return; + } + + EvolveConfig config = new EvolveConfig(); + + /* + * Use listener every other time to ensure that the stats are returned + * correctly when no listener is configured. [#17024] + */ + useEvolveListener = !useEvolveListener; + if (useEvolveListener) { + config.setEvolveListener(new EvolveListener() { + public boolean evolveProgress(EvolveEvent event) { + EvolveStats stats = event.getStats(); + evolveNRead = stats.getNRead(); + evolveNConverted = stats.getNConverted(); + return true; + } + }); + } + + openEnv(); + + openStoreReadWrite(); + + /* + * Evolve and expect that the expected number of entities are + * converted. + */ + int nExpected = caseObj.getNRecordsExpected(); + evolveNRead = 0; + evolveNConverted = 0; + PersistCatalog.unevolvedFormatsEncountered = false; + EvolveStats stats = store.evolve(config); + if (nExpected > 0) { + assertTrue(PersistCatalog.unevolvedFormatsEncountered); + } + assertTrue(stats.getNRead() == nExpected); + assertTrue(stats.getNConverted() == nExpected); + assertTrue(stats.getNConverted() >= stats.getNRead()); + if (useEvolveListener) { + assertEquals(evolveNRead, stats.getNRead()); + assertEquals(evolveNConverted, stats.getNConverted()); + } + + /* Evolve again and expect that no entities are converted. */ + evolveNRead = 0; + evolveNConverted = 0; + PersistCatalog.unevolvedFormatsEncountered = false; + stats = store.evolve(config); + assertTrue(!PersistCatalog.unevolvedFormatsEncountered); + assertEquals(0, stats.getNRead()); + assertEquals(0, stats.getNConverted()); + if (useEvolveListener) { + assertTrue(evolveNRead == 0); + assertTrue(evolveNConverted == 0); + } + + /* Ensure that we can read all entities without evolution. */ + PersistCatalog.unevolvedFormatsEncountered = false; + caseObj.readObjects(store, false /*doUpdate*/); + assertTrue(!PersistCatalog.unevolvedFormatsEncountered); + + /* + * When automatic unused type deletion is implemented in the future the + * oldTypesExist parameters below should be changed to false. + */ + + /* Open again and try an update. */ + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, true /*doUpdate*/); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + closeStore(); + + /* Open read-only and double check that everything is OK. */ + assertTrue(openStoreReadOnly()); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, false /*doUpdate*/); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + closeStore(); + + /* Check raw objects. */ + openRawStore(); + caseObj.checkEvolvedModel + (rawStore.getModel(), env, true /*oldTypesExist*/); + caseObj.readRawObjects + (rawStore, true /*expectEvolved*/, true /*expectUpdated*/); + + /* + * Test copy raw object to new store via convertRawObject. In this + * test we can pass false for oldTypesExist because newStore starts + * with the new/evolved class model. + */ + openNewStore(); + caseObj.copyRawObjects(rawStore, newStore); + caseObj.readObjects(newStore, true /*doUpdate*/); + caseObj.checkEvolvedModel + (newStore.getModel(), env, false /*oldTypesExist*/); + closeNewStore(); + closeRawStore(); + + closeAll(); + } +} diff --git a/test/com/sleepycat/persist/test/EvolveTestBase.java b/test/com/sleepycat/persist/test/EvolveTestBase.java new file mode 100644 index 0000000..62503f4 --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveTestBase.java @@ -0,0 +1,473 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.List; + +import org.junit.After; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.IncompatibleClassException; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * Base class for EvolveTest and EvolveTestInit. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public abstract class EvolveTestBase extends TestBase { + + /* + * When adding a evolve test class, three places need to be changed: + * 1) Add the unmodified class to EvolveClass.java.original. + * 2) Add the modified class to EvolveClass.java. + * 3) Add the class name to the ALL list below as a pair of strings. The + * first string in each pair is the name of the original class, and the + * second string is the name of the evolved class or null if the evolved + * name is the same as the original. The index in the list identifies a + * test case, and the class at that position identifies the old and new + * class to use for the test. + */ + private static final String[] ALL = { +//* + "DeletedEntity1_ClassRemoved", + "DeletedEntity1_ClassRemoved_NoMutation", + "DeletedEntity2_ClassRemoved", + "DeletedEntity2_ClassRemoved_WithDeleter", + "DeletedEntity3_AnnotRemoved_NoMutation", + null, + "DeletedEntity4_AnnotRemoved_WithDeleter", + null, + "DeletedEntity5_EntityToPersist_NoMutation", + null, + "DeletedEntity6_EntityToPersist_WithDeleter", + null, + "DeletedPersist1_ClassRemoved_NoMutation", + null, + "DeletedPersist2_ClassRemoved_WithDeleter", + null, + "DeletedPersist3_AnnotRemoved_NoMutation", + null, + "DeletedPersist4_AnnotRemoved_WithDeleter", + null, + "DeletedPersist5_PersistToEntity_NoMutation", + null, + "DeletedPersist6_PersistToEntity_WithDeleter", + null, + "RenamedEntity1_NewEntityName", + "RenamedEntity1_NewEntityName_NoMutation", + "RenamedEntity2_NewEntityName", + "RenamedEntity2_NewEntityName_WithRenamer", + "DeleteSuperclass1_NoMutation", + null, + "DeleteSuperclass2_WithConverter", + null, + "DeleteSuperclass3_WithDeleter", + null, + "DeleteSuperclass4_NoFields", + null, + "DeleteSuperclass5_Top", + null, + "InsertSuperclass1_Between", + null, + "InsertSuperclass2_Top", + null, + "DisallowNonKeyField_PrimitiveToObject", + null, + "DisallowNonKeyField_ObjectToPrimitive", + null, + "DisallowNonKeyField_ObjectToSubtype", + null, + "DisallowNonKeyField_ObjectToUnrelatedSimple", + null, + "DisallowNonKeyField_ObjectToUnrelatedOther", + null, + "DisallowNonKeyField_byte2boolean", + null, + "DisallowNonKeyField_short2byte", + null, + "DisallowNonKeyField_int2short", + null, + "DisallowNonKeyField_long2int", + null, + "DisallowNonKeyField_float2long", + null, + "DisallowNonKeyField_double2float", + null, + "DisallowNonKeyField_Byte2byte", + null, + "DisallowNonKeyField_Character2char", + null, + "DisallowNonKeyField_Short2short", + null, + "DisallowNonKeyField_Integer2int", + null, + "DisallowNonKeyField_Long2long", + null, + "DisallowNonKeyField_Float2float", + null, + "DisallowNonKeyField_Double2double", + null, + "DisallowNonKeyField_float2BigInt", + null, + "DisallowNonKeyField_BigInt2long", + null, + "DisallowSecKeyField_byte2short", + null, + "DisallowSecKeyField_char2int", + null, + "DisallowSecKeyField_short2int", + null, + "DisallowSecKeyField_int2long", + null, + "DisallowSecKeyField_long2float", + null, + "DisallowSecKeyField_float2double", + null, + "DisallowSecKeyField_Byte2short2", + null, + "DisallowSecKeyField_Character2int", + null, + "DisallowSecKeyField_Short2int2", + null, + "DisallowSecKeyField_Integer2long", + null, + "DisallowSecKeyField_Long2float2", + null, + "DisallowSecKeyField_Float2double2", + null, + "DisallowSecKeyField_int2BigInt", + null, + "DisallowPriKeyField_byte2short", + null, + "DisallowPriKeyField_char2int", + null, + "DisallowPriKeyField_short2int", + null, + "DisallowPriKeyField_int2long", + null, + "DisallowPriKeyField_long2float", + null, + "DisallowPriKeyField_float2double", + null, + "DisallowPriKeyField_Byte2short2", + null, + "DisallowPriKeyField_Character2int", + null, + "DisallowPriKeyField_Short2int2", + null, + "DisallowPriKeyField_Integer2long", + null, + "DisallowPriKeyField_Long2float2", + null, + "DisallowPriKeyField_Float2double2", + null, + "DisallowPriKeyField_Long2BigInt", + null, + "DisallowCompositeKeyField_byte2short", + null, + "AllowPriKeyField_Byte2byte2", + null, + "AllowPriKeyField_byte2Byte", + null, + "AllowFieldTypeChanges", + null, + "ConvertFieldContent_Entity", + null, + "ConvertExample1_Entity", + null, + "ConvertExample2_Person", + null, + "ConvertExample3_Person", + null, + "ConvertExample3Reverse_Person", + null, + "ConvertExample4_Entity", + null, + "ConvertExample5_Entity", + null, + "AllowFieldAddDelete", + null, + "ProxiedClass_Entity", + null, + "DisallowChangeProxyFor", + null, + "DisallowDeleteProxyFor", + null, + "ArrayNameChange_Entity", + null, + "AddEnumConstant_Entity", + null, + "InsertEnumConstant_Entity", + null, + "DeleteEnumConstant_NoMutation", + null, + "DisallowChangeKeyRelate", + null, + "AllowChangeKeyMetadata", + null, + "AllowChangeKeyMetadataInSubclass", + null, + "AllowAddSecondary", + null, + "FieldAddAndConvert", + null, + "RenameSecFieldDestroyOrder_1", + null, + "RenameSecFieldDestroyOrder_2", + null, + "RenameSecFieldDestroyOrder_3", + null, + "DeleteSecAnnotationDestroyOrder", + null, + "ProxyClassFieldChanged", + null, + "ProxyClassObjectFieldChanged", + null, + "ProxyClassArrayFieldChanged", + null, + "ProxyClassObjectArrayFieldChanged", + null, + "MultipleSelfRefs", + null, +//*/ + }; + + File envHome; + Environment env; + EntityStore store; + RawStore rawStore; + EntityStore newStore; + String caseClsName; + Class caseCls; + EvolveCase caseObj; + String caseLabel; + + @Parameters + public static List genParams() { + return paramsHelper(); + } + + protected static List paramsHelper() { + List list = new ArrayList(); + for (int i = 0; i < ALL.length; i += 2) { + if (ALL[i+1] == null) { + list.add(new Object[]{ALL[i], ALL[i]}); + } else { + list.add(new Object[]{ALL[i], ALL[i+1]}); + } + } + + return list; + } + + public EvolveTestBase(String originalClsName, String evolvedClsName) + throws Exception{ + String caseClsName = useEvolvedClass() ? evolvedClsName + : originalClsName; + caseClsName = "com.sleepycat.persist.test.EvolveClasses$" + caseClsName; + + this.caseClsName = caseClsName; + this.caseCls = Class.forName(caseClsName); + this.caseObj = (EvolveCase) caseCls.newInstance(); + this.caseLabel = evolvedClsName; + customName = "-" + caseLabel; + } + + abstract boolean useEvolvedClass(); + + File getTestInitHome(boolean evolved) { + return new File + (System.getProperty("testevolvedir"), + (evolved ? "evolved" : "original") + '/' + caseLabel); + } + + @After + public void tearDown() { + + if (env != null) { + try { + closeAll(); + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + store = null; + caseCls = null; + caseObj = null; + caseLabel = null; + + /* Do not delete log files so they can be used by 2nd phase of test. */ + } + + /** + * @throws FileNotFoundException from DB core. + */ + void openEnv() + throws FileNotFoundException, DatabaseException { + + EnvironmentConfig config = TestEnv.TXN.getConfig(); + config.setAllowCreate(true); + env = new Environment(envHome, config); + } + + /** + * Returns true if the store was opened successfully. Returns false if the + * store could not be opened because an exception was expected -- this is + * not a test failure but no further tests for an EntityStore may be run. + */ + private boolean openStore(StoreConfig config) + throws Exception { + + config.setTransactional(true); + config.setMutations(caseObj.getMutations()); + + EntityModel model = new AnnotationModel(); + config.setModel(model); + caseObj.configure(model, config); + + String expectException = caseObj.getStoreOpenException(); + try { + store = new EntityStore(env, EvolveCase.STORE_NAME, config); + if (expectException != null) { + fail("Expected: " + expectException); + } + } catch (Exception e) { + if (expectException != null) { + String actualMsg = e.getMessage(); + /* */ + actualMsg = com.sleepycat.je.util.TestUtils.skipVersion(e); + /* */ + if (e instanceof IncompatibleClassException) { + actualMsg = actualMsg.substring + (0, actualMsg.lastIndexOf("\n---\n(Note that")); + } + actualMsg = e.getClass().getName() + ": " + actualMsg; + if (!expectException.equals(actualMsg)) { + e.printStackTrace(); + } + EvolveCase.checkEquals(expectException, actualMsg); + return false; + } else { + throw e; + } + } + return true; + } + + boolean openStoreReadOnly() + throws Exception { + + StoreConfig config = new StoreConfig(); + config.setReadOnly(true); + return openStore(config); + } + + boolean openStoreReadWrite() + throws Exception { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + final boolean retVal = openStore(config); + if (retVal) { + caseObj.newMetadataWritten = true; + } + return retVal; + } + + void openRawStore() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setTransactional(true); + rawStore = new RawStore(env, EvolveCase.STORE_NAME, config); + } + + void closeStore() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + } + + void openNewStore() + throws Exception { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + + EntityModel model = new AnnotationModel(); + config.setModel(model); + caseObj.configure(model, config); + + newStore = new EntityStore(env, "new", config); + } + + void closeNewStore() + throws DatabaseException { + + if (newStore != null) { + newStore.close(); + newStore = null; + } + } + + void closeRawStore() + throws DatabaseException { + + if (rawStore != null) { + rawStore.close(); + rawStore = null; + } + } + + void closeEnv() + throws DatabaseException { + + if (env != null) { + env.close(); + env = null; + } + } + + void closeAll() + throws DatabaseException { + + closeStore(); + closeRawStore(); + closeNewStore(); + closeEnv(); + } +} diff --git a/test/com/sleepycat/persist/test/EvolveTestInit.java b/test/com/sleepycat/persist/test/EvolveTestInit.java new file mode 100644 index 0000000..dda67ac --- /dev/null +++ b/test/com/sleepycat/persist/test/EvolveTestInit.java @@ -0,0 +1,62 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import static org.junit.Assert.fail; + +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Runs part one of the EvolveTest. This part is run with the old/original + * version of EvolveClasses in the classpath. It creates a fresh environment + * and store containing instances of the original class. When EvolveTest is + * run, it will read/write/evolve these objects from the store created here. + * + * @author Mark Hayes + */ +public class EvolveTestInit extends EvolveTestBase { + + public EvolveTestInit(String originalClsName, String evolvedClsName) + throws Exception { + super(originalClsName, evolvedClsName); + } + + @Override + boolean useEvolvedClass() { + return false; + } + + @Before + public void setUp() { + + envHome = getTestInitHome(false /*evolved*/); + envHome.mkdirs(); + SharedTestUtils.emptyDir(envHome); + } + + @Test + public void testInit() + throws Exception { + + openEnv(); + if (!openStoreReadWrite()) { + fail(); + } + caseObj.writeObjects(store); + caseObj.checkUnevolvedModel(store.getModel(), env); + closeAll(); + } +} diff --git a/test/com/sleepycat/persist/test/ForeignKeyTest.java b/test/com/sleepycat/persist/test/ForeignKeyTest.java new file mode 100644 index 0000000..e72944b --- /dev/null +++ b/test/com/sleepycat/persist/test/ForeignKeyTest.java @@ -0,0 +1,360 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.DeleteAction.ABORT; +import static com.sleepycat.persist.model.DeleteAction.CASCADE; +import static com.sleepycat.persist.model.DeleteAction.NULLIFY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DeleteConstraintException; +import com.sleepycat.je.ForeignConstraintException; +/* */ +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.DeleteAction; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.TxnTestCase; + +/** + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class ForeignKeyTest extends TxnTestCase { + + protected static final DeleteAction[] ACTIONS = { + ABORT, + NULLIFY, + CASCADE, + }; + + protected static final String[] ACTION_LABELS = { + "ABORT", + "NULLIFY", + "CASCADE", + }; + + @Parameters + public static List genParams() { + return paramsHelper(false); + } + + protected static List paramsHelper(boolean rep) { + final String[] txnTypes = getTxnTypes(null, rep); + final List newParams = new ArrayList(); + int i = 0; + for (final DeleteAction action : ACTIONS) { + for (final String type : txnTypes) { + newParams.add(new Object[] + {type, action, ACTION_LABELS[i], "UseSubclass"}); + newParams.add(new Object[] + {type, action, ACTION_LABELS[i], "UseBaseclass"}); + } + i++; + } + return newParams; + } + + public ForeignKeyTest(String type, + DeleteAction action, + String label, + String useClassLabel){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + onDelete = action; + onDeleteLabel = label; + useSubclassLabel = useClassLabel; + customName = txnType + '-' + onDeleteLabel + "-" + useSubclassLabel; + } + + private EntityStore store; + private PrimaryIndex pri1; + private PrimaryIndex pri2; + private SecondaryIndex sec1; + private SecondaryIndex sec2; + private final DeleteAction onDelete; + private final String onDeleteLabel; + private boolean useSubclass; + private final String useSubclassLabel; + + private void open() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + + store = new EntityStore(env, "test", config); + + pri1 = store.getPrimaryIndex(String.class, Entity1.class); + sec1 = store.getSecondaryIndex(pri1, String.class, "sk"); + pri2 = store.getPrimaryIndex(String.class, Entity2.class); + sec2 = store.getSecondaryIndex + (pri2, String.class, "sk_" + onDeleteLabel); + } + + private void close() + throws DatabaseException { + + store.close(); + } + + @Test + public void testForeignKeys() + throws Exception { + + open(); + Transaction txn = txnBegin(); + + Entity1 o1 = new Entity1("pk1", "sk1"); + assertNull(pri1.put(txn, o1)); + + assertEquals(o1, pri1.get(txn, "pk1", null)); + assertEquals(o1, sec1.get(txn, "sk1", null)); + + Entity2 o2 = (useSubclass ? + new Entity3("pk2", "pk1", onDelete) : + new Entity2("pk2", "pk1", onDelete)); + assertNull(pri2.put(txn, o2)); + + assertEquals(o2, pri2.get(txn, "pk2", null)); + assertEquals(o2, sec2.get(txn, "pk1", null)); + + txnCommit(txn); + txn = txnBegin(); + + /* + * pri1 contains o1 with primary key "pk1" and index key "sk1". + * + * pri2 contains o2 with primary key "pk2" and foreign key "pk1", + * which is the primary key of pri1. + */ + if (onDelete == ABORT) { + + /* Test that we abort trying to delete a referenced key. */ + + try { + pri1.delete(txn, "pk1"); + fail(); + /* */ + } catch (DeleteConstraintException expected) { + txnAbort(txn); + txn = txnBegin(); + /* */ + } catch (DatabaseException expected) { + assertTrue(!DbCompat.NEW_JE_EXCEPTIONS); + txnAbort(txn); + txn = txnBegin(); + } + + /* + * Test that we can put a record into store2 with a null foreign + * key value. + */ + o2 = (useSubclass ? + new Entity3("pk2", null, onDelete) : + new Entity2("pk2", null, onDelete)); + assertNotNull(pri2.put(txn, o2)); + assertEquals(o2, pri2.get(txn, "pk2", null)); + + /* + * The index2 record should have been deleted since the key was set + * to null above. + */ + assertNull(sec2.get(txn, "pk1", null)); + + /* + * Test that now we can delete the record in store1, since it is no + * longer referenced. + */ + assertNotNull(pri1.delete(txn, "pk1")); + assertNull(pri1.get(txn, "pk1", null)); + assertNull(sec1.get(txn, "sk1", null)); + + } else if (onDelete == NULLIFY) { + + /* Delete the referenced key. */ + assertNotNull(pri1.delete(txn, "pk1")); + assertNull(pri1.get(txn, "pk1", null)); + assertNull(sec1.get(txn, "sk1", null)); + + /* + * The store2 record should still exist, but should have an empty + * secondary key since it was nullified. + */ + o2 = pri2.get(txn, "pk2", null); + assertNotNull(o2); + assertEquals("pk2", o2.pk); + assertEquals(null, o2.getSk(onDelete)); + + } else if (onDelete == CASCADE) { + + /* Delete the referenced key. */ + assertNotNull(pri1.delete(txn, "pk1")); + assertNull(pri1.get(txn, "pk1", null)); + assertNull(sec1.get(txn, "sk1", null)); + + /* The store2 record should have deleted also. */ + assertNull(pri2.get(txn, "pk2", null)); + assertNull(sec2.get(txn, "pk1", null)); + + } else { + throw new IllegalStateException(); + } + + /* + * Test that a foreign key value may not be used that is not present in + * the foreign store. "pk2" is not in store1 in this case. + */ + Entity2 o3 = (useSubclass ? + new Entity3("pk3", "pk2", onDelete) : + new Entity2("pk3", "pk2", onDelete)); + try { + pri2.put(txn, o3); + fail(); + /* */ + } catch (ForeignConstraintException expected) { + /* */ + } catch (DatabaseException expected) { + assertTrue(!DbCompat.NEW_JE_EXCEPTIONS); + } + + txnAbort(txn); + close(); + } + + @Entity + static class Entity1 { + + @PrimaryKey + String pk; + + @SecondaryKey(relate=ONE_TO_ONE) + String sk; + + private Entity1() {} + + Entity1(String pk, String sk) { + this.pk = pk; + this.sk = sk; + } + + @Override + public boolean equals(Object other) { + Entity1 o = (Entity1) other; + return nullOrEqual(pk, o.pk) && + nullOrEqual(sk, o.sk); + } + } + + @Entity + static class Entity2 { + + @PrimaryKey + String pk; + + @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class, + onRelatedEntityDelete=ABORT) + String sk_ABORT; + + @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class, + onRelatedEntityDelete=CASCADE) + String sk_CASCADE; + + @SecondaryKey(relate=ONE_TO_ONE, relatedEntity=Entity1.class, + onRelatedEntityDelete=NULLIFY) + String sk_NULLIFY; + + private Entity2() {} + + Entity2(String pk, String sk, DeleteAction action) { + this.pk = pk; + switch (action) { + case ABORT: + sk_ABORT = sk; + break; + case CASCADE: + sk_CASCADE = sk; + break; + case NULLIFY: + sk_NULLIFY = sk; + break; + default: + throw new IllegalArgumentException(); + } + } + + String getSk(DeleteAction action) { + switch (action) { + case ABORT: + return sk_ABORT; + case CASCADE: + return sk_CASCADE; + case NULLIFY: + return sk_NULLIFY; + default: + throw new IllegalArgumentException(); + } + } + + @Override + public boolean equals(Object other) { + Entity2 o = (Entity2) other; + return nullOrEqual(pk, o.pk) && + nullOrEqual(sk_ABORT, o.sk_ABORT) && + nullOrEqual(sk_CASCADE, o.sk_CASCADE) && + nullOrEqual(sk_NULLIFY, o.sk_NULLIFY); + } + } + + @Persistent + static class Entity3 extends Entity2 { + Entity3() {} + + Entity3(String pk, String sk, DeleteAction action) { + super(pk, sk, action); + } + } + + static boolean nullOrEqual(Object o1, Object o2) { + if (o1 == null) { + return o2 == null; + } else { + return o1.equals(o2); + } + } +} diff --git a/test/com/sleepycat/persist/test/GetLastRestartTest.java b/test/com/sleepycat/persist/test/GetLastRestartTest.java new file mode 100644 index 0000000..f7ad2b5 --- /dev/null +++ b/test/com/sleepycat/persist/test/GetLastRestartTest.java @@ -0,0 +1,533 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import com.sleepycat.compat.DbCompat; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DeadlockException; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityIndex; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Tests that getLast restarts work correctly. See RangeCursor.getLast. + * + * This tests getLast via the DPL API simply because it's convenient. It could + * have been tested via the collections API, or directly using RangeCursor. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class GetLastRestartTest extends TxnTestCase { + + private static final int N_ITERS = 5000; + + @Entity + static class MyEntity { + + @PrimaryKey + private int priKey; + + @SecondaryKey(relate=MANY_TO_ONE) + private Integer secKey; + + private MyEntity() {} + + MyEntity(final int priKey, final Integer secKey) { + this.priKey = priKey; + this.secKey = secKey; + } + } + + private EntityStore store; + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private volatile Thread insertThread; + private volatile Exception insertException; + + @Parameters + public static List genParams() { + + /* TXN_NULL in DB doesn't support multi-threading. */ + + String[] txnTypes = new String[] { + TxnTestCase.TXN_USER, + TxnTestCase.TXN_AUTO, + TxnTestCase.TXN_CDB }; + + /* */ + txnTypes = new String[] { + TxnTestCase.TXN_NULL, + TxnTestCase.TXN_USER, + TxnTestCase.TXN_AUTO, + TxnTestCase.TXN_CDB }; + /* */ + + return getTxnParams(txnTypes, false); + } + + public GetLastRestartTest(String type) + throws DatabaseException { + + initEnvConfig(); + + DbCompat.enableDeadlockDetection(envConfig, type.equals(TXN_CDB)); + + /* + * Use large lock timeout because getLast is retrying in a loop while + * it holds a lock, since it doesn't release locks when it restarts the + * operation. This is a disadvantage of implementing getLast on top of + * the Cursor API, rather than in the cursor code. However, the looping + * is more of a problem in this test than it should be in the real + * life, because here we're tightly looping in another thread, + * inserting/deleting a single record. + */ + /* */ + envConfig.setLockTimeout(10, TimeUnit.SECONDS); + /* */ + + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + private void open() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + store = new EntityStore(env, "test", config); + priIndex = store.getPrimaryIndex(Integer.class, MyEntity.class); + secIndex = store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + } + + private void close() + throws DatabaseException { + + try { + store.close(); + } finally { + store = null; + priIndex = null; + secIndex = null; + } + } + + @Override + @After + public void tearDown() + throws Exception { + + final boolean stoppedInserts = stopInserts(); + + try { + if (store != null) { + close(); + } + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + + super.tearDown(); + + if (!stoppedInserts) { + fail("Could not kill insert thread"); + } + + if (insertException != null) { + throw insertException; + } + } + + /** + * Keys: 1, 2. Range: (-, 3) Expect: getLast == 2. + * + * RangeCursor.getLast calls getSearchKeyRange(3) which returns NOTFOUND. + * It calls getLast, but insertThread has inserted key 3, so getLast lands + * on key 3 which is outside the range. It must restart. + */ + @Test + public void testMainKeyRangeNoDups_GetLast() + throws DatabaseException, InterruptedException { + + open(); + + insert(1); + insert(2); + startInserts(3); + checkRange( + 3 /*endKey*/, false /*endInclusive*/, + 2 /*expectLastKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Keys: 1, 2, 4. Range: (-, 3) Expect: getLast == 2. + * + * RangeCursor.getLast calls getSearchKeyRange(3) which lands on key 4. It + * calls getPrev, but insertThread has inserted key 3, so getPrev lands on + * key 3 which is outside the range. It must restart. + */ + @Test + public void testMainKeyRangeNoDups_GetPrev() + throws DatabaseException, InterruptedException { + + open(); + + insert(1); + insert(2); + insert(4); + startInserts(3); + checkRange( + 3 /*endKey*/, false /*endInclusive*/, + 2 /*expectLastKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Records: 1/1, 2/2, 3/2. SecRange: [2, 2] Expect: getLast == 3/2. + * + * RangeCursor.getLast calls getSearchKeyRange(2) which returns 2/2. It + * calls getNextNoDup which returns NOTFOUND. It calls getLast, but + * insertThread has inserted key 4/3, so getLast lands on key 4/3 which is + * outside the range. It must restart. + */ + @Test + public void testMainKeyRangeWithDups_GetLast() + throws DatabaseException, InterruptedException { + + open(); + + insert(1, 1); + insert(2, 2); + insert(3, 2); + startInserts(4, 3); + checkSecRange( + 2 /*secKey*/, + 3 /*expectLastPKey*/, 2 /*expectLastSecKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Records: 1/1, 2/2, 3/2, 4/4. SecRange: [2, 2] Expect: getLast == 3/2. + * + * RangeCursor.getLast calls getSearchKeyRange(2) which returns 2/2. It + * calls getNextNoDup which returns 4/4. It calls getPrev, but insertThread + * has inserted key 5/3, so getPrev lands on key 5/3 which is outside the + * range. It must restart. + */ + @Test + public void testMainKeyRangeWithDups_GetPrev() + throws DatabaseException, InterruptedException { + + open(); + + insert(1, 1); + insert(2, 2); + insert(3, 2); + insert(4, 4); + startInserts(5, 3); + checkSecRange( + 2 /*secKey*/, + 3 /*expectLastPKey*/, 2 /*expectLastSecKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Records: 1/1, 2/2, 3/2. SecRange: [2, 2] PKeyRange: [2, -) + * Expect: getLast == 3/2. + * + * RangeCursor.getLast calls getSearchKey(2) which returns 2/2. It calls + * getNextNoDup which returns NOTFOUND. It calls getLast, but insertThread + * has inserted key 4/3, so getLast lands on key 4/3 which is outside the + * range. It must restart. + */ + @Test + public void testDupRangeNoEndKey_GetLast() + throws DatabaseException, InterruptedException { + + open(); + + insert(1, 1); + insert(2, 2); + insert(3, 2); + startInserts(4, 3); + checkPKeyRange( + 2 /*secKey*/, + 2 /*beginPKey*/, true /*beginInclusive*/, + null /*endPKey*/, false /*endInclusive*/, + 3 /*expectLastPKey*/, 2 /*expectLastSecKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Records: 1/1, 2/2, 3/2, 4/4. SecRange: [2, 2] PKeyRange: [2, -) + * Expect: getLast == 3/2. + * + * RangeCursor.getLast calls getSearchKey(2) which returns 2/2. It calls + * getNextNoDup which returns 4/4. It calls getPrev, but insertThread has + * inserted key 5/3, so getPrev lands on key 5/3 which is outside the + * range. It must restart. + */ + @Test + public void testDupRangeNoEndKey_GetPrev() + throws DatabaseException, InterruptedException { + + open(); + + insert(1, 1); + insert(2, 2); + insert(3, 2); + insert(4, 4); + startInserts(5, 3); + checkPKeyRange( + 2 /*secKey*/, + 2 /*beginPKey*/, true /*beginInclusive*/, + null /*endPKey*/, false /*endInclusive*/, + 3 /*expectLastPKey*/, 2 /*expectLastSecKey*/); + + assertTrue(stopInserts()); + close(); + } + + /** + * Records: 1/1, 2/2, 3/2, 5/2. SecRange: [2, 2] PKeyRange: (-, 4) + * Expect: getLast == 3/2. + * + * RangeCursor.getLast calls getSearchBothRange(2, 4) which returns 5/2. It + * calls getPrevDup, but insertThread has inserted key 4/2, so getPrevDup + * lands on key 4/2 which is outside the range. It must restart. + */ + @Test + public void testDupRangeWithEndKey() + throws DatabaseException, InterruptedException { + + open(); + + insert(1, 1); + insert(2, 2); + insert(3, 2); + insert(5, 2); + startInserts(4, 2); + checkPKeyRange( + 2 /*secKey*/, + null /*beginPKey*/, false /*beginInclusive*/, + 4 /*endPKey*/, false /*endInclusive*/, + 3 /*expectLastPKey*/, 2 /*expectLastSecKey*/); + + assertTrue(stopInserts()); + close(); + } + + private void checkRange(int endKey, + boolean endInclusive, + int expectLastKey) + throws DatabaseException { + + for (int i = 0; i < N_ITERS; i += 1) { + final Transaction txn = txnBeginCursor(); + + final EntityCursor c = priIndex.entities( + txn, null, false, endKey, endInclusive, null); + + try { + final MyEntity e = c.last(); + assertNotNull(e); + assertEquals(expectLastKey, e.priKey); + } finally { + c.close(); + txnCommit(txn); + } + } + } + + private void checkSecRange(int secKey, + int expectLastPKey, + Integer expectLastSecKey) + throws DatabaseException { + + final EntityIndex subIndex = + secIndex.subIndex(secKey); + + for (int i = 0; i < N_ITERS; i += 1) { + final Transaction txn = txnBeginCursor(); + + final EntityCursor c = subIndex.entities(txn, null); + + try { + final MyEntity e = c.last(); + assertNotNull(e); + assertEquals(expectLastPKey, e.priKey); + assertEquals(expectLastSecKey, e.secKey); + } finally { + c.close(); + txnCommit(txn); + } + } + } + + private void checkPKeyRange(int secKey, + Integer beginPKey, + boolean beginInclusive, + Integer endPKey, + boolean endInclusive, + int expectLastPKey, + Integer expectLastSecKey) + throws DatabaseException { + + final EntityIndex subIndex = + secIndex.subIndex(secKey); + + for (int i = 0; i < N_ITERS; i += 1) { + final Transaction txn = txnBeginCursor(); + + final EntityCursor c = subIndex.entities( + txn, beginPKey, beginInclusive, endPKey, endInclusive, null); + + try { + final MyEntity e = c.last(); + assertNotNull(e); + assertEquals(expectLastPKey, e.priKey); + assertEquals(expectLastSecKey, e.secKey); + } finally { + c.close(); + txnCommit(txn); + } + } + } + + private void startInserts(int priKey) + throws DatabaseException { + + startInserts(priKey, null); + } + + private void startInserts(final int priKey, final Integer secKey) + throws DatabaseException { + + final EntityIndex subIndex = + (secKey != null) ? + secIndex.subIndex(secKey) : + null; + + insertThread = new Thread() { + @Override + public void run() { + try { + while (insertThread != null) { + try { + if (secKey != null) { + insert(priKey, secKey); + subIndex.delete(priKey); + } else { + insert(priKey); + priIndex.delete(priKey); + } + } catch (DeadlockException e) { + /* */ + throw e; + /* */ + // ignore on DB, but not JE. + } + } + } catch (Exception e) { + insertException = e; + } + } + }; + + insertThread.start(); + } + + private boolean stopInserts() + throws InterruptedException { + + if (insertThread == null) { + return true; + } + + final Thread t = insertThread; + insertThread = null; + + /* + * First try stopping without interrupts, since they will invalidate + * the Environment. + */ + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 10 * 1000) { + if (!t.isAlive()) { + return true; + } + Thread.sleep(10); + } + + /* Try thread interrupts as a last resort. */ + start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < 10 * 1000) { + t.interrupt(); + Thread.sleep(10); + if (!t.isAlive()) { + return true; + } + } + + return false; + } + + private void insert(int priKey) + throws DatabaseException { + + insert(priKey, null); + } + + private void insert(int priKey, Integer secKey) + throws DatabaseException { + + priIndex.put(new MyEntity(priKey, secKey)); + } +} diff --git a/test/com/sleepycat/persist/test/IndexTest.java b/test/com/sleepycat/persist/test/IndexTest.java new file mode 100644 index 0000000..7848dbf --- /dev/null +++ b/test/com/sleepycat/persist/test/IndexTest.java @@ -0,0 +1,893 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY; +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.collections.MapEntryParameter; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityIndex; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.persist.raw.RawObject; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.persist.raw.RawType; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Tests EntityIndex and EntityCursor in all their permutations. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class IndexTest extends TxnTestCase { + + private static final int N_RECORDS = 5; + private static final int THREE_TO_ONE = 3; + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public IndexTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + private EntityStore store; + private PrimaryIndex primary; + private SecondaryIndex oneToOne; + private SecondaryIndex manyToOne; + private SecondaryIndex oneToMany; + private SecondaryIndex manyToMany; + private RawStore rawStore; + private RawType entityType; + private PrimaryIndex primaryRaw; + private SecondaryIndex oneToOneRaw; + private SecondaryIndex manyToOneRaw; + private SecondaryIndex oneToManyRaw; + private SecondaryIndex manyToManyRaw; + + /** + * Opens the store. + */ + private void open() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + + store = new EntityStore(env, "test", config); + + primary = store.getPrimaryIndex(Integer.class, MyEntity.class); + oneToOne = + store.getSecondaryIndex(primary, Integer.class, "oneToOne"); + manyToOne = + store.getSecondaryIndex(primary, Integer.class, "manyToOne"); + oneToMany = + store.getSecondaryIndex(primary, Integer.class, "oneToMany"); + manyToMany = + store.getSecondaryIndex(primary, Integer.class, "manyToMany"); + + assertNotNull(primary); + assertNotNull(oneToOne); + assertNotNull(manyToOne); + assertNotNull(oneToMany); + assertNotNull(manyToMany); + + rawStore = new RawStore(env, "test", config); + String clsName = MyEntity.class.getName(); + entityType = rawStore.getModel().getRawType(clsName); + assertNotNull(entityType); + + primaryRaw = rawStore.getPrimaryIndex(clsName); + oneToOneRaw = rawStore.getSecondaryIndex(clsName, "oneToOne"); + manyToOneRaw = rawStore.getSecondaryIndex(clsName, "manyToOne"); + oneToManyRaw = rawStore.getSecondaryIndex(clsName, "oneToMany"); + manyToManyRaw = rawStore.getSecondaryIndex(clsName, "manyToMany"); + + assertNotNull(primaryRaw); + assertNotNull(oneToOneRaw); + assertNotNull(manyToOneRaw); + assertNotNull(oneToManyRaw); + assertNotNull(manyToManyRaw); + } + + /** + * Closes the store. + */ + private void close() + throws DatabaseException { + + store.close(); + store = null; + rawStore.close(); + rawStore = null; + } + + /** + * The store must be closed before closing the environment. + */ + @After + public void tearDown() + throws Exception { + + try { + if (rawStore != null) { + rawStore.close(); + } + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + try { + if (store != null) { + store.close(); + } + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + store = null; + rawStore = null; + super.tearDown(); + } + + /** + * Primary keys: {0, 1, 2, 3, 4} + */ + @Test + public void testPrimary() + throws DatabaseException { + + SortedMap> expected = + new TreeMap>(); + + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + SortedSet values = new TreeSet(); + values.add(priKey); + expected.put(priKey, values); + } + + open(); + addEntities(primary); + checkIndex(primary, expected, keyGetter, entityGetter); + checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter); + + /* Close and reopen, then recheck indices. */ + close(); + open(); + checkIndex(primary, expected, keyGetter, entityGetter); + checkIndex(primaryRaw, expected, rawKeyGetter, rawEntityGetter); + + /* Check primary delete, last key first for variety. */ + for (int priKey = N_RECORDS - 1; priKey >= 0; priKey -= 1) { + boolean useRaw = ((priKey & 1) != 0); + Transaction txn = txnBegin(); + if (useRaw) { + primaryRaw.delete(txn, priKey); + } else { + primary.delete(txn, priKey); + } + txnCommit(txn); + expected.remove(priKey); + checkIndex(primary, expected, keyGetter, entityGetter); + } + checkAllEmpty(); + + /* Check PrimaryIndex put operations. */ + MyEntity e; + Transaction txn = txnBegin(); + /* put() */ + e = primary.put(txn, new MyEntity(1)); + assertNull(e); + e = primary.get(txn, 1, null); + assertEquals(1, e.key); + /* putNoReturn() */ + primary.putNoReturn(txn, new MyEntity(2)); + e = primary.get(txn, 2, null); + assertEquals(2, e.key); + /* putNoOverwrite */ + assertTrue(!primary.putNoOverwrite(txn, new MyEntity(1))); + assertTrue(!primary.putNoOverwrite(txn, new MyEntity(2))); + assertTrue(primary.putNoOverwrite(txn, new MyEntity(3))); + e = primary.get(txn, 3, null); + assertEquals(3, e.key); + txnCommit(txn); + close(); + } + + /** + * { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 } + */ + @Test + public void testOneToOne() + throws DatabaseException { + + SortedMap> expected = + new TreeMap>(); + + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + SortedSet values = new TreeSet(); + values.add(priKey); + Integer secKey = (-priKey); + expected.put(secKey, values); + } + + open(); + addEntities(primary); + checkSecondary(oneToOne, oneToOneRaw, expected); + checkDelete(oneToOne, oneToOneRaw, expected); + close(); + } + + /** + * { 0:0, 1:1, 2:2, 3:0, 4:1 } + */ + @Test + public void testManyToOne() + throws DatabaseException { + + SortedMap> expected = + new TreeMap>(); + + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + Integer secKey = priKey % THREE_TO_ONE; + SortedSet values = expected.get(secKey); + if (values == null) { + values = new TreeSet(); + expected.put(secKey, values); + } + values.add(priKey); + } + + open(); + addEntities(primary); + checkSecondary(manyToOne, manyToOneRaw, expected); + checkDelete(manyToOne, manyToOneRaw, expected); + close(); + } + + /** + * { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43} + */ + @Test + public void testOneToMany() + throws DatabaseException { + + SortedMap> expected = + new TreeMap>(); + + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + for (int i = 0; i < priKey; i += 1) { + Integer secKey = (N_RECORDS * priKey) + i; + SortedSet values = expected.get(secKey); + if (values == null) { + values = new TreeSet(); + expected.put(secKey, values); + } + values.add(priKey); + } + } + + open(); + addEntities(primary); + checkSecondary(oneToMany, oneToManyRaw, expected); + checkDelete(oneToMany, oneToManyRaw, expected); + close(); + } + + /** + * { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3} + */ + @Test + public void testManyToMany() + throws DatabaseException { + + SortedMap> expected = + new TreeMap>(); + + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + for (int i = 0; i < priKey; i += 1) { + Integer secKey = i; + SortedSet values = expected.get(secKey); + if (values == null) { + values = new TreeSet(); + expected.put(secKey, values); + } + values.add(priKey); + } + } + + open(); + addEntities(primary); + checkSecondary(manyToMany, manyToManyRaw, expected); + checkDelete(manyToMany, manyToManyRaw, expected); + close(); + } + + private void addEntities(PrimaryIndex primary) + throws DatabaseException { + + Transaction txn = txnBegin(); + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + MyEntity prev = primary.put(txn, new MyEntity(priKey)); + assertNull(prev); + } + txnCommit(txn); + } + + private void checkDelete + (SecondaryIndex index, + SecondaryIndex indexRaw, + SortedMap> expected) + throws DatabaseException { + + SortedMap> expectedSubIndex = + new TreeMap>(); + + while (expected.size() > 0) { + Integer delSecKey = expected.firstKey(); + SortedSet deletedPriKeys = expected.remove(delSecKey); + for (SortedSet priKeys : expected.values()) { + priKeys.removeAll(deletedPriKeys); + } + Transaction txn = txnBegin(); + boolean deleted = index.delete(txn, delSecKey); + assertEquals(deleted, !deletedPriKeys.isEmpty()); + deleted = index.delete(txn, delSecKey); + assertTrue(!deleted); + assertNull(index.get(txn, delSecKey, null)); + txnCommit(txn); + checkSecondary(index, indexRaw, expected); + } + + /* + * Delete remaining records so that the primary index is empty. Use + * the RawStore for variety. + */ + Transaction txn = txnBegin(); + for (int priKey = 0; priKey < N_RECORDS; priKey += 1) { + primaryRaw.delete(txn, priKey); + } + txnCommit(txn); + checkAllEmpty(); + } + + private void checkSecondary + (SecondaryIndex index, + SecondaryIndex indexRaw, + SortedMap> expected) + throws DatabaseException { + + checkIndex(index, expected, keyGetter, entityGetter); + checkIndex(index.keysIndex(), expected, keyGetter, keyGetter); + + checkIndex(indexRaw, expected, rawKeyGetter, rawEntityGetter); + checkIndex(indexRaw.keysIndex(), expected, rawKeyGetter, rawKeyGetter); + + SortedMap> expectedSubIndex = + new TreeMap>(); + + for (Integer secKey : expected.keySet()) { + expectedSubIndex.clear(); + for (Integer priKey : expected.get(secKey)) { + SortedSet values = new TreeSet(); + values.add(priKey); + expectedSubIndex.put(priKey, values); + } + checkIndex(index.subIndex(secKey), + expectedSubIndex, + keyGetter, + entityGetter); + checkIndex(indexRaw.subIndex(secKey), + expectedSubIndex, + rawKeyGetter, + rawEntityGetter); + } + } + + private void checkIndex(EntityIndex index, + SortedMap> + expected, + Getter kGetter, + Getter vGetter) + throws DatabaseException { + + SortedMap map = index.sortedMap(); + + Transaction txn = txnBegin(); + for (int i : expected.keySet()) { + K k = kGetter.fromInt(i); + SortedSet dups = expected.get(i); + if (dups.isEmpty()) { + + /* EntityIndex */ + V v = index.get(txn, k, null); + assertNull(v); + assertTrue(!index.contains(txn, k, null)); + + /* Map/Collection */ + v = map.get(i); + assertNull(v); + assertTrue(!map.containsKey(i)); + } else { + int j = dups.first(); + + /* EntityIndex */ + V v = index.get(txn, k, null); + assertNotNull(v); + assertEquals(j, vGetter.getKey(v)); + assertTrue(index.contains(txn, k, null)); + + /* Map/Collection */ + v = map.get(i); + assertNotNull(v); + assertEquals(j, vGetter.getKey(v)); + assertTrue(map.containsKey(i)); + assertTrue("" + i + ' ' + j + ' ' + v + ' ' + map, + map.containsValue(v)); + assertTrue(map.keySet().contains(i)); + assertTrue(map.values().contains(v)); + assertTrue + (map.entrySet().contains(new MapEntryParameter(i, v))); + } + } + txnCommit(txn); + + int keysSize = expandKeySize(expected); + int valuesSize = expandValueSize(expected); + + /* EntityIndex.count */ + assertEquals("keysSize=" + keysSize, valuesSize, index.count()); + + /* Map/Collection size */ + assertEquals(valuesSize, map.size()); + assertEquals(valuesSize, map.values().size()); + assertEquals(valuesSize, map.entrySet().size()); + assertEquals(keysSize, map.keySet().size()); + + /* Map/Collection isEmpty */ + assertEquals(valuesSize == 0, map.isEmpty()); + assertEquals(valuesSize == 0, map.values().isEmpty()); + assertEquals(valuesSize == 0, map.entrySet().isEmpty()); + assertEquals(keysSize == 0, map.keySet().isEmpty()); + + txn = txnBeginCursor(); + + /* Unconstrained cursors. */ + checkCursor + (index.keys(txn, null), + map.keySet(), true, + expandKeys(expected), kGetter); + checkCursor + (index.entities(txn, null), + map.values(), false, + expandValues(expected), vGetter); + + /* Range cursors. */ + if (expected.isEmpty()) { + checkOpenRanges(txn, 0, index, expected, kGetter, vGetter); + checkClosedRanges(txn, 0, 1, index, expected, kGetter, vGetter); + } else { + int firstKey = expected.firstKey(); + int lastKey = expected.lastKey(); + for (int i = firstKey - 1; i <= lastKey + 1; i += 1) { + checkOpenRanges(txn, i, index, expected, kGetter, vGetter); + int j = i + 1; + if (j < lastKey + 1) { + checkClosedRanges + (txn, i, j, index, expected, kGetter, vGetter); + } + } + } + + txnCommit(txn); + } + + private void checkOpenRanges(Transaction txn, int i, + EntityIndex index, + SortedMap> + expected, + Getter kGetter, + Getter vGetter) + throws DatabaseException { + + SortedMap map = index.sortedMap(); + SortedMap> rangeExpected; + K k = kGetter.fromInt(i); + K kPlusOne = kGetter.fromInt(i + 1); + + /* Head range exclusive. */ + rangeExpected = expected.headMap(i); + checkCursor + (index.keys(txn, null, false, k, false, null), + map.headMap(k).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, null, false, k, false, null), + map.headMap(k).values(), false, + expandValues(rangeExpected), vGetter); + + /* Head range inclusive. */ + rangeExpected = expected.headMap(i + 1); + checkCursor + (index.keys(txn, null, false, k, true, null), + map.headMap(kPlusOne).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, null, false, k, true, null), + map.headMap(kPlusOne).values(), false, + expandValues(rangeExpected), vGetter); + + /* Tail range exclusive. */ + rangeExpected = expected.tailMap(i + 1); + checkCursor + (index.keys(txn, k, false, null, false, null), + map.tailMap(kPlusOne).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, k, false, null, false, null), + map.tailMap(kPlusOne).values(), false, + expandValues(rangeExpected), vGetter); + + /* Tail range inclusive. */ + rangeExpected = expected.tailMap(i); + checkCursor + (index.keys(txn, k, true, null, false, null), + map.tailMap(k).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, k, true, null, false, null), + map.tailMap(k).values(), false, + expandValues(rangeExpected), vGetter); + } + + private void checkClosedRanges(Transaction txn, int i, int j, + EntityIndex index, + SortedMap> + expected, + Getter kGetter, + Getter vGetter) + throws DatabaseException { + + SortedMap map = index.sortedMap(); + SortedMap> rangeExpected; + K k = kGetter.fromInt(i); + K kPlusOne = kGetter.fromInt(i + 1); + K l = kGetter.fromInt(j); + K lPlusOne = kGetter.fromInt(j + 1); + + /* Sub range exclusive. */ + rangeExpected = expected.subMap(i + 1, j); + checkCursor + (index.keys(txn, k, false, l, false, null), + map.subMap(kPlusOne, l).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, k, false, l, false, null), + map.subMap(kPlusOne, l).values(), false, + expandValues(rangeExpected), vGetter); + + /* Sub range inclusive. */ + rangeExpected = expected.subMap(i, j + 1); + checkCursor + (index.keys(txn, k, true, l, true, null), + map.subMap(k, lPlusOne).keySet(), true, + expandKeys(rangeExpected), kGetter); + checkCursor + (index.entities(txn, k, true, l, true, null), + map.subMap(k, lPlusOne).values(), false, + expandValues(rangeExpected), vGetter); + } + + private List> + expandKeys(SortedMap> map) { + + List> list = new ArrayList>(); + for (Integer key : map.keySet()) { + SortedSet values = map.get(key); + List dups = new ArrayList(); + for (int i = 0; i < values.size(); i += 1) { + dups.add(key); + } + list.add(dups); + } + return list; + } + + private List> + expandValues(SortedMap> map) { + + List> list = new ArrayList>(); + for (SortedSet values : map.values()) { + list.add(new ArrayList(values)); + } + return list; + } + + private int expandKeySize(SortedMap> map) { + + int size = 0; + for (SortedSet values : map.values()) { + if (values.size() > 0) { + size += 1; + } + } + return size; + } + + private int expandValueSize(SortedMap> map) { + + int size = 0; + for (SortedSet values : map.values()) { + size += values.size(); + } + return size; + } + + private void checkCursor(EntityCursor cursor, + Collection collection, + boolean collectionIsKeySet, + List> expected, + Getter getter) + throws DatabaseException { + + boolean first; + boolean firstDup; + Iterator iterator = collection.iterator(); + + for (List dups : expected) { + for (int i : dups) { + T o = cursor.next(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + /* Value iterator over duplicates. */ + if (!collectionIsKeySet) { + assertTrue(iterator.hasNext()); + o = iterator.next(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + } + } + } + + first = true; + for (List dups : expected) { + firstDup = true; + for (int i : dups) { + T o = first ? cursor.first() + : (firstDup ? cursor.next() : cursor.nextDup()); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + first = false; + firstDup = false; + } + } + + first = true; + for (List dups : expected) { + if (!dups.isEmpty()) { + int i = dups.get(0); + T o = first ? cursor.first() : cursor.nextNoDup(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + /* Key iterator over non-duplicates. */ + if (collectionIsKeySet) { + assertTrue(iterator.hasNext()); + o = iterator.next(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + } + first = false; + } + } + + List> reversed = new ArrayList>(); + for (List dups : expected) { + ArrayList reversedDups = new ArrayList(dups); + Collections.reverse(reversedDups); + reversed.add(reversedDups); + } + Collections.reverse(reversed); + + first = true; + for (List dups : reversed) { + for (int i : dups) { + T o = first ? cursor.last() : cursor.prev(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + first = false; + } + } + + first = true; + for (List dups : reversed) { + firstDup = true; + for (int i : dups) { + T o = first ? cursor.last() + : (firstDup ? cursor.prev() : cursor.prevDup()); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + first = false; + firstDup = false; + } + } + + first = true; + for (List dups : reversed) { + if (!dups.isEmpty()) { + int i = dups.get(0); + T o = first ? cursor.last() : cursor.prevNoDup(); + assertNotNull(o); + assertEquals(i, getter.getKey(o)); + first = false; + } + } + + cursor.close(); + } + + private void checkAllEmpty() + throws DatabaseException { + + checkEmpty(primary); + checkEmpty(oneToOne); + checkEmpty(oneToMany); + checkEmpty(manyToOne); + checkEmpty(manyToMany); + } + + private void checkEmpty(EntityIndex index) + throws DatabaseException { + + Transaction txn = txnBeginCursor(); + EntityCursor keys = index.keys(txn, null); + assertNull(keys.next()); + assertTrue(!keys.iterator().hasNext()); + keys.close(); + EntityCursor entities = index.entities(txn, null); + assertNull(entities.next()); + assertTrue(!entities.iterator().hasNext()); + entities.close(); + txnCommit(txn); + } + + private interface Getter { + int getKey(T o); + T fromInt(int i); + } + + private static Getter entityGetter = + new Getter() { + public int getKey(MyEntity o) { + return o.key; + } + public MyEntity fromInt(int i) { + throw new UnsupportedOperationException(); + } + }; + + private static Getter keyGetter = + new Getter() { + public int getKey(Integer o) { + return o; + } + public Integer fromInt(int i) { + return Integer.valueOf(i); + } + }; + + private static Getter rawEntityGetter = + new Getter() { + public int getKey(RawObject o) { + Object val = o.getValues().get("key"); + return ((Integer) val).intValue(); + } + public RawObject fromInt(int i) { + throw new UnsupportedOperationException(); + } + }; + + private static Getter rawKeyGetter = + new Getter() { + public int getKey(Object o) { + return ((Integer) o).intValue(); + } + public Object fromInt(int i) { + return Integer.valueOf(i); + } + }; + + @Entity + private static class MyEntity { + + @PrimaryKey + private int key; + + @SecondaryKey(relate=ONE_TO_ONE) + private int oneToOne; + + @SecondaryKey(relate=MANY_TO_ONE) + private int manyToOne; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set oneToMany = new TreeSet(); + + @SecondaryKey(relate=MANY_TO_MANY) + private final Set manyToMany = new TreeSet(); + + private MyEntity() {} + + private MyEntity(int key) { + + /* example keys: {0, 1, 2, 3, 4} */ + this.key = key; + + /* { 0:0, 1:-1, 2:-2, 3:-3, 4:-4 } */ + oneToOne = -key; + + /* { 0:0, 1:1, 2:2, 3:0, 4:1 } */ + manyToOne = key % THREE_TO_ONE; + + /* { 0:{}, 1:{10}, 2:{20,21}, 3:{30,31,32}, 4:{40,41,42,43} */ + for (int i = 0; i < key; i += 1) { + oneToMany.add((N_RECORDS * key) + i); + } + + /* { 0:{}, 1:{0}, 2:{0,1}, 3:{0,1,2}, 4:{0,1,2,3} */ + for (int i = 0; i < key; i += 1) { + manyToMany.add(i); + } + } + + @Override + public String toString() { + return "MyEntity " + key; + } + } +} diff --git a/test/com/sleepycat/persist/test/JoinTest.java b/test/com/sleepycat/persist/test/JoinTest.java new file mode 100644 index 0000000..6d79f6a --- /dev/null +++ b/test/com/sleepycat/persist/test/JoinTest.java @@ -0,0 +1,194 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityJoin; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.ForwardCursor; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.TxnTestCase; + +/** + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class JoinTest extends TxnTestCase { + + private static final int N_RECORDS = 5; + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public JoinTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + private EntityStore store; + private PrimaryIndex primary; + private SecondaryIndex sec1; + private SecondaryIndex sec2; + private SecondaryIndex sec3; + + /** + * Opens the store. + */ + private void open() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + + store = new EntityStore(env, "test", config); + + primary = store.getPrimaryIndex(Integer.class, MyEntity.class); + sec1 = store.getSecondaryIndex(primary, Integer.class, "k1"); + sec2 = store.getSecondaryIndex(primary, Integer.class, "k2"); + sec3 = store.getSecondaryIndex(primary, Integer.class, "k3"); + } + + /** + * Closes the store. + */ + private void close() + throws DatabaseException { + + store.close(); + } + + @Test + public void testJoin() + throws DatabaseException { + + open(); + + /* + * Primary keys: { 0, 1, 2, 3, 4 } + * Secondary k1: { 0:0, 0:1, 0:2, 0:3, 0:4 } + * Secondary k2: { 0:0, 1:1, 0:2, 1:3, 0:4 } + * Secondary k3: { 0:0, 1:1, 2:2, 0:3, 1:4 } + */ + Transaction txn = txnBegin(); + for (int i = 0; i < N_RECORDS; i += 1) { + MyEntity e = new MyEntity(i, 0, i % 2, i % 3); + boolean ok = primary.putNoOverwrite(txn, e); + assertTrue(ok); + } + txnCommit(txn); + + /* + * k1, k2, k3, -> { primary keys } + * -1 means don't include the key in the join. + */ + doJoin( 0, 0, 0, new int[] { 0 }); + doJoin( 0, 0, 1, new int[] { 4 }); + doJoin( 0, 0, -1, new int[] { 0, 2, 4 }); + doJoin(-1, 1, 1, new int[] { 1 }); + doJoin(-1, 2, 2, new int[] { }); + doJoin(-1, -1, 2, new int[] { 2 }); + + close(); + } + + private void doJoin(int k1, int k2, int k3, int[] expectKeys) + throws DatabaseException { + + List expect = new ArrayList(); + for (int i : expectKeys) { + expect.add(i); + } + EntityJoin join = new EntityJoin(primary); + if (k1 >= 0) { + join.addCondition(sec1, k1); + } + if (k2 >= 0) { + join.addCondition(sec2, k2); + } + if (k3 >= 0) { + join.addCondition(sec3, k3); + } + List found; + Transaction txn = txnBegin(); + + /* Keys */ + found = new ArrayList(); + ForwardCursor keys = join.keys(txn, null); + for (int i : keys) { + found.add(i); + } + keys.close(); + assertEquals(expect, found); + + /* Entities */ + found = new ArrayList(); + ForwardCursor entities = join.entities(txn, null); + for (MyEntity e : entities) { + found.add(e.id); + } + entities.close(); + assertEquals(expect, found); + + txnCommit(txn); + } + + @Entity + private static class MyEntity { + @PrimaryKey + int id; + @SecondaryKey(relate=MANY_TO_ONE) + int k1; + @SecondaryKey(relate=MANY_TO_ONE) + int k2; + @SecondaryKey(relate=MANY_TO_ONE) + int k3; + + private MyEntity() {} + + MyEntity(int id, int k1, int k2, int k3) { + this.id = id; + this.k1 = k1; + this.k2 = k2; + this.k3 = k3; + } + + @Override + public String toString() { + return "MyEntity " + id + ' ' + k1 + ' ' + k2 + ' ' + k3; + } + } +} diff --git a/test/com/sleepycat/persist/test/NegativeTest.java b/test/com/sleepycat/persist/test/NegativeTest.java new file mode 100644 index 0000000..cf02f60 --- /dev/null +++ b/test/com/sleepycat/persist/test/NegativeTest.java @@ -0,0 +1,738 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.DeleteAction.NULLIFY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SequenceConfig; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Negative tests. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class NegativeTest extends TxnTestCase { + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public NegativeTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + private EntityStore store; + + private void open() + throws DatabaseException { + + open(null); + } + + private void open(Class clsToRegister) + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + config.setTransactional(envConfig.getTransactional()); + + if (clsToRegister != null) { + AnnotationModel model = new AnnotationModel(); + model.registerClass(clsToRegister); + config.setModel(model); + } + + store = new EntityStore(env, "test", config); + } + + private void close() + throws DatabaseException { + + store.close(); + store = null; + } + + @After + public void tearDown() + throws Exception { + + if (store != null) { + try { + store.close(); + } catch (Throwable e) { + System.out.println("tearDown: " + e); + } + store = null; + } + super.tearDown(); + } + + @Test + public void testBadKeyClass1() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(BadKeyClass1.class, UseBadKeyClass1.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf("@KeyField") >= 0); + } + close(); + } + + /** Missing @KeyField in composite key class. */ + @Persistent + static class BadKeyClass1 { + + private int f1; + } + + @Entity + static class UseBadKeyClass1 { + + @PrimaryKey + private final BadKeyClass1 f1 = new BadKeyClass1(); + + @SecondaryKey(relate=ONE_TO_ONE) + private final BadKeyClass1 f2 = new BadKeyClass1(); + } + + @Test + public void testBadSequenceKeys() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(Boolean.class, BadSequenceKeyEntity1.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("Type not allowed for sequence") >= 0); + } + try { + store.getPrimaryIndex(BadSequenceKeyEntity2.Key.class, + BadSequenceKeyEntity2.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("Type not allowed for sequence") >= 0); + } + try { + store.getPrimaryIndex(BadSequenceKeyEntity3.Key.class, + BadSequenceKeyEntity3.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("A composite key class used with a sequence may contain " + + "only a single key field")>= 0); + } + close(); + } + + /** Boolean not allowed for sequence key. */ + @Entity + static class BadSequenceKeyEntity1 { + + @PrimaryKey(sequence="X") + private boolean key; + } + + /** Composite key with non-integer field not allowed for sequence key. */ + @Entity + static class BadSequenceKeyEntity2 { + + @PrimaryKey(sequence="X") + private Key key; + + @Persistent + static class Key { + @KeyField(1) + boolean key; + } + } + + /** Composite key with multiple key fields not allowed for sequence key. */ + @Entity + static class BadSequenceKeyEntity3 { + + @PrimaryKey(sequence="X") + private Key key; + + @Persistent + static class Key { + @KeyField(1) + int key; + @KeyField(2) + int key2; + } + } + + /** + * A proxied object may not current contain a field that references the + * parent proxy. [#15815] + */ + @Test + public void testProxyNestedRef() + throws DatabaseException { + + open(); + PrimaryIndex index = store.getPrimaryIndex + (Integer.class, ProxyNestedRef.class); + ProxyNestedRef entity = new ProxyNestedRef(); + entity.list.add(entity.list); + try { + index.put(entity); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("Cannot embed a reference to a proxied object") >= 0); + } + close(); + } + + @Entity + static class ProxyNestedRef { + + @PrimaryKey + private int key; + + ArrayList list = new ArrayList(); + } + + /** + * Disallow primary keys on entity subclasses. [#15757] + */ + @Test + public void testEntitySubclassWithPrimaryKey() + throws DatabaseException { + + open(); + PrimaryIndex index = store.getPrimaryIndex + (Integer.class, EntitySuperClass.class); + EntitySuperClass e1 = new EntitySuperClass(1, "one"); + index.put(e1); + assertEquals(e1, index.get(1)); + EntitySubClass e2 = new EntitySubClass(2, "two", "foo", 9); + try { + index.put(e2); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains + ("PrimaryKey may not appear on an Entity subclass")); + } + assertEquals(e1, index.get(1)); + close(); + } + + @Entity + static class EntitySuperClass { + + @PrimaryKey + private int x; + + private String y; + + EntitySuperClass(int x, String y) { + assert y != null; + this.x = x; + this.y = y; + } + + private EntitySuperClass() {} + + @Override + public String toString() { + return "x=" + x + " y=" + y; + } + + @Override + public boolean equals(Object other) { + if (other instanceof EntitySuperClass) { + EntitySuperClass o = (EntitySuperClass) other; + return x == o.x && y.equals(o.y); + } else { + return false; + } + } + } + + @Persistent + static class EntitySubClass extends EntitySuperClass { + + @PrimaryKey + private String foo; + + private int z; + + EntitySubClass(int x, String y, String foo, int z) { + super(x, y); + assert foo != null; + this.foo = foo; + this.z = z; + } + + private EntitySubClass() {} + + @Override + public String toString() { + return super.toString() + " z=" + z; + } + + @Override + public boolean equals(Object other) { + if (other instanceof EntitySubClass) { + EntitySubClass o = (EntitySubClass) other; + return super.equals(o) && z == o.z; + } else { + return false; + } + } + } + + /** + * Disallow storing null entities. [#19085] + */ + @Test + public void testNullEntity() + throws DatabaseException { + + open(); + PrimaryIndex index = store.getPrimaryIndex + (Integer.class, EntitySuperClass.class); + try { + index.put(null); + fail(); + } catch (IllegalArgumentException expected) { + } + try { + index.sortedMap().put(1, null); + fail(); + } catch (IllegalArgumentException expected) { + } + close(); + } + + /** + * Disallow embedded entity classes and subclasses. [#16077] + */ + @Test + public void testEmbeddedEntity() + throws DatabaseException { + + open(); + PrimaryIndex index = store.getPrimaryIndex + (Integer.class, EmbeddingEntity.class); + EmbeddingEntity e1 = new EmbeddingEntity(1, null); + index.put(e1); + assertEquals(e1, index.get(1)); + + EmbeddingEntity e2 = + new EmbeddingEntity(2, new EntitySuperClass(2, "two")); + try { + index.put(e2); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains + ("References to entities are not allowed")); + } + + EmbeddingEntity e3 = new EmbeddingEntity + (3, new EmbeddedEntitySubClass(3, "three", "foo", 9)); + try { + index.put(e3); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.toString(), e.getMessage().contains + ("References to entities are not allowed")); + } + + assertEquals(e1, index.get(1)); + close(); + } + + @Entity + static class EmbeddingEntity { + + @PrimaryKey + private int x; + + private EntitySuperClass y; + + /* References to self are allowed. [#17525] */ + private EmbeddingEntity self; + + EmbeddingEntity(int x, EntitySuperClass y) { + this.x = x; + this.y = y; + this.self = this; + } + + private EmbeddingEntity() {} + + @Override + public String toString() { + return "x=" + x + " y=" + y; + } + + @Override + public boolean equals(Object other) { + if (other instanceof EmbeddingEntity) { + EmbeddingEntity o = (EmbeddingEntity) other; + return x == o.x && + ((y == null) ? (o.y == null) : y.equals(o.y)); + } else { + return false; + } + } + } + + @Persistent + static class EmbeddedEntitySubClass extends EntitySuperClass { + + private String foo; + + private int z; + + EmbeddedEntitySubClass(int x, String y, String foo, int z) { + super(x, y); + assert foo != null; + this.foo = foo; + this.z = z; + } + + private EmbeddedEntitySubClass() {} + + @Override + public String toString() { + return super.toString() + " z=" + z; + } + + @Override + public boolean equals(Object other) { + if (other instanceof EmbeddedEntitySubClass) { + EmbeddedEntitySubClass o = (EmbeddedEntitySubClass) other; + return super.equals(o) && z == o.z; + } else { + return false; + } + } + } + + /** + * Disallow SecondaryKey collection with no type parameter. [#15950] + */ + @Test + public void testTypelessKeyCollection() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex + (Integer.class, TypelessKeyCollectionEntity.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.toString(), e.getMessage().contains + ("Collection typed secondary key field must have a " + + "single generic type argument and a wildcard or type " + + "bound is not allowed")); + } + close(); + } + + @Entity + static class TypelessKeyCollectionEntity { + + @PrimaryKey + private int x; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Collection keys = new ArrayList(); + + TypelessKeyCollectionEntity(int x) { + this.x = x; + } + + private TypelessKeyCollectionEntity() {} + } + + /** + * Disallow a persistent proxy that extends an entity. [#15950] + */ + @Test + public void testProxyEntity() + throws DatabaseException { + + try { + open(ProxyExtendsEntity.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.toString(), e.getMessage().contains + ("A proxy may not be an entity")); + } + } + + @Persistent(proxyFor=Locale.class) + static class ProxyExtendsEntity + extends EntitySuperClass + implements PersistentProxy { + + String language; + String country; + String variant; + + public void initializeProxy(Locale object) { + language = object.getLanguage(); + country = object.getCountry(); + variant = object.getVariant(); + } + + public Locale convertProxy() { + return new Locale(language, country, variant); + } + } + + /** + * Wrapper type not allowed for nullified foreign key. + */ + @Test + public void testBadNullifyKey() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(Integer.class, BadNullifyKeyEntity1.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("NULLIFY may not be used with primitive fields") >= 0); + } + close(); + } + + @Entity + static class BadNullifyKeyEntity1 { + + @PrimaryKey + private int key; + + @SecondaryKey(relate=ONE_TO_ONE, + relatedEntity=BadNullifyKeyEntity2.class, + onRelatedEntityDelete=NULLIFY) + private int secKey; // Should be Integer, not int. + } + + @Entity + static class BadNullifyKeyEntity2 { + + @PrimaryKey + private int key; + } + + /** + * @Persistent not allowed on an enum. + */ + @Test + public void testPersistentEnum() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(Integer.class, PersistentEnumEntity.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("not allowed for enum, interface, or primitive") >= 0); + } + close(); + } + + @Entity + static class PersistentEnumEntity { + + @PrimaryKey + private int key; + + @Persistent + enum MyEnum {X, Y, Z}; + + MyEnum f1; + } + + /** + * Disallow a reference to an interface marked @Persistent. + */ + @Test + public void testPersistentInterface() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(Integer.class, + PersistentInterfaceEntity1.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("not allowed for enum, interface, or primitive") >= 0); + } + close(); + } + + @Entity + static class PersistentInterfaceEntity1 { + + @PrimaryKey + private int key; + + @SecondaryKey(relate=ONE_TO_ONE, + relatedEntity=PersistentInterfaceEntity2.class) + private int secKey; // Should be Integer, not int. + } + + @Persistent + interface PersistentInterfaceEntity2 { + } + + /** + * Disallow reference to @Persistent inner class. + */ + @Test + public void testPersistentInnerClass() + throws DatabaseException { + + open(); + try { + store.getPrimaryIndex(Integer.class, + PersistentInnerClassEntity1.class); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().indexOf + ("Inner classes not allowed") >= 0); + } + close(); + } + + @Entity + static class PersistentInnerClassEntity1 { + + @PrimaryKey + private int key; + + private PersistentInnerClass f; + } + + /* An inner (non-static) class is illegal. */ + @Persistent + class PersistentInnerClass { + + private int x; + } + + /** + * Disallow @Entity inner class. + */ + @Test + public void testSetConfigAfterOpen() + throws DatabaseException { + + open(); + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, + SetConfigAfterOpenEntity.class); + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "skey"); + + DatabaseConfig priConfig = + store.getPrimaryConfig(SetConfigAfterOpenEntity.class); + assertNotNull(priConfig); + try { + store.setPrimaryConfig(SetConfigAfterOpenEntity.class, priConfig); + fail(); + } catch (IllegalStateException expected) { + assertTrue(expected.getMessage().indexOf + ("Cannot set config after DB is open") >= 0); + } + + SecondaryConfig secConfig = + store.getSecondaryConfig(SetConfigAfterOpenEntity.class, "skey"); + assertNotNull(secConfig); + try { + store.setSecondaryConfig(SetConfigAfterOpenEntity.class, "skey", + secConfig); + fail(); + } catch (IllegalStateException expected) { + assertTrue(expected.getMessage().indexOf + ("Cannot set config after DB is open") >= 0); + } + + SequenceConfig seqConfig = store.getSequenceConfig("foo"); + assertNotNull(seqConfig); + try { + store.setSequenceConfig("foo", seqConfig); + fail(); + } catch (IllegalStateException expected) { + assertTrue(expected.getMessage().indexOf + ("Cannot set config after Sequence is open") >= 0); + } + + close(); + } + + @Entity + static class SetConfigAfterOpenEntity { + + @PrimaryKey(sequence="foo") + private int key; + + @SecondaryKey(relate=ONE_TO_ONE) + int skey; + } +} diff --git a/test/com/sleepycat/persist/test/OperationTest.java b/test/com/sleepycat/persist/test/OperationTest.java new file mode 100644 index 0000000..e13732a --- /dev/null +++ b/test/com/sleepycat/persist/test/OperationTest.java @@ -0,0 +1,1860 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.DeleteAction.CASCADE; +import static com.sleepycat.persist.model.DeleteAction.NULLIFY; +import static com.sleepycat.persist.model.Relationship.MANY_TO_MANY; +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static com.sleepycat.persist.model.Relationship.ONE_TO_MANY; +import static com.sleepycat.persist.model.Relationship.ONE_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.compat.DbCompat; +/* */ +import com.sleepycat.je.CacheMode; +/* */ +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.EnvironmentStats; +/* */ +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityIndex; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.impl.Store; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.NotPersistent; +import com.sleepycat.persist.model.NotTransient; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.persist.raw.RawStore; +import com.sleepycat.util.test.TxnTestCase; + +/** + * Tests misc store and index operations that are not tested by IndexTest. + * + * @author Mark Hayes + */ +@RunWith(Parameterized.class) +public class OperationTest extends TxnTestCase { + + private static final String STORE_NAME = "test"; + + @Parameters + public static List genParams() { + return getTxnParams(null, false); + } + + public OperationTest(String type){ + initEnvConfig(); + txnType = type; + isTransactional = (txnType != TXN_NULL); + customName = txnType; + } + + private EntityStore store; + + private void openReadOnly() + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setReadOnly(true); + open(config); + } + + private void open() + throws DatabaseException { + + open((Class) null); + } + + private void open(Class clsToRegister) + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(envConfig.getAllowCreate()); + if (clsToRegister != null) { + com.sleepycat.persist.model.EntityModel model = + new com.sleepycat.persist.model.AnnotationModel(); + model.registerClass(clsToRegister); + config.setModel(model); + } + open(config); + } + + private void open(StoreConfig config) + throws DatabaseException { + + config.setTransactional(envConfig.getTransactional()); + store = new EntityStore(env, STORE_NAME, config); + } + + private void close() + throws DatabaseException { + + store.close(); + store = null; + } + + /** + * The store must be closed before closing the environment. + */ + @After + public void tearDown() + throws Exception { + + try { + if (store != null) { + store.close(); + } + } catch (Throwable e) { + System.out.println("During tearDown: " + e); + } + store = null; + super.tearDown(); + } + + @Test + public void testReadOnly() + throws DatabaseException { + + open(); + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class); + Transaction txn = txnBegin(); + SharedSequenceEntity1 e = new SharedSequenceEntity1(); + priIndex.put(txn, e); + assertEquals(1, e.key); + txnCommit(txn); + close(); + + /* + * Check that we can open the store read-only and read the records + * written above. + */ + openReadOnly(); + priIndex = + store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class); + e = priIndex.get(1); + assertNotNull(e); + close(); + } + + /* */ + @Test + public void testGetStoreNames() + throws DatabaseException { + + open(); + close(); + Set names = EntityStore.getStoreNames(env); + assertEquals(1, names.size()); + assertEquals("test", names.iterator().next()); + } + /* */ + + /* */ + @Test + public void testCacheMode() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + Transaction txn = txnBeginCursor(); + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(txn, e); + + EntityCursor entities = priIndex.entities(txn, null); + + assertSame(CacheMode.DEFAULT, entities.getCacheMode()); + e = entities.first(); + assertNotNull(e); + assertSame(CacheMode.DEFAULT, entities.getCacheMode()); + entities.setCacheMode(CacheMode.KEEP_HOT); + assertSame(CacheMode.KEEP_HOT, entities.getCacheMode()); + e = entities.first(); + assertNotNull(e); + assertSame(CacheMode.KEEP_HOT, entities.getCacheMode()); + entities.setCacheMode(CacheMode.UNCHANGED); + entities.update(e); + entities.setCacheMode(CacheMode.UNCHANGED); + + entities.close(); + txnCommit(txn); + close(); + } + /* */ + + @Test + public void testUninitializedCursor() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + Transaction txn = txnBeginCursor(); + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(txn, e); + + EntityCursor entities = + priIndex.entities(txn, getWriteCursorConfig()); + try { + entities.nextDup(); + fail(); + } catch (IllegalStateException expected) {} + try { + entities.prevDup(); + fail(); + } catch (IllegalStateException expected) {} + try { + entities.current(); + fail(); + } catch (IllegalStateException expected) {} + try { + entities.delete(); + fail(); + } catch (IllegalStateException expected) {} + try { + entities.update(e); + fail(); + } catch (IllegalStateException expected) {} + try { + entities.count(); + fail(); + } catch (IllegalStateException expected) {} + + entities.close(); + txnCommit(txn); + close(); + } + + @Test + public void testCursorCount() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + Transaction txn = txnBeginCursor(); + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(txn, e); + + EntityCursor cursor = secIndex.entities(txn, null); + cursor.next(); + assertEquals(1, cursor.count()); + cursor.close(); + + e.priKey = 2; + priIndex.put(txn, e); + cursor = secIndex.entities(txn, null); + cursor.next(); + assertEquals(2, cursor.count()); + cursor.close(); + + txnCommit(txn); + close(); + } + + @Test + public void testCursorUpdate() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + Transaction txn = txnBeginCursor(); + + Integer k; + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 2; + priIndex.put(txn, e); + + /* update() with primary entity cursor. */ + EntityCursor entities = + priIndex.entities(txn, getWriteCursorConfig()); + e = entities.next(); + assertNotNull(e); + assertEquals(1, e.priKey); + assertEquals(Integer.valueOf(2), e.secKey); + e.secKey = null; + assertTrue(entities.update(e)); + e = entities.current(); + assertNotNull(e); + assertEquals(1, e.priKey); + assertEquals(null, e.secKey); + e.secKey = 3; + assertTrue(entities.update(e)); + e = entities.current(); + assertNotNull(e); + assertEquals(1, e.priKey); + assertEquals(Integer.valueOf(3), e.secKey); + entities.close(); + + /* update() with primary keys cursor. */ + EntityCursor keys = priIndex.keys(txn, + getWriteCursorConfig()); + k = keys.next(); + assertNotNull(k); + assertEquals(Integer.valueOf(1), k); + try { + keys.update(2); + fail(); + } catch (UnsupportedOperationException expected) { + } + keys.close(); + + /* update() with secondary entity cursor. */ + entities = secIndex.entities(txn, null); + e = entities.next(); + assertNotNull(e); + assertEquals(1, e.priKey); + assertEquals(Integer.valueOf(3), e.secKey); + try { + entities.update(e); + fail(); + } catch (UnsupportedOperationException expected) { + } catch (IllegalArgumentException expectedForDbCore) { + } + entities.close(); + + /* update() with secondary keys cursor. */ + keys = secIndex.keys(txn, null); + k = keys.next(); + assertNotNull(k); + assertEquals(Integer.valueOf(3), k); + try { + keys.update(k); + fail(); + } catch (UnsupportedOperationException expected) { + } + keys.close(); + + txnCommit(txn); + close(); + } + + @Test + public void testCursorDelete() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + Transaction txn = txnBeginCursor(); + + /* delete() with primary and secondary entities cursor. */ + + for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) { + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(txn, e); + e.priKey = 2; + priIndex.put(txn, e); + + EntityCursor cursor = + index.entities(txn, getWriteCursorConfig()); + + e = cursor.next(); + assertNotNull(e); + assertEquals(1, e.priKey); + e = cursor.current(); + assertNotNull(e); + assertEquals(1, e.priKey); + assertTrue(cursor.delete()); + assertTrue(!cursor.delete()); + assertNull(cursor.current()); + + e = cursor.next(); + assertNotNull(e); + assertEquals(2, e.priKey); + e = cursor.current(); + assertNotNull(e); + assertEquals(2, e.priKey); + assertTrue(cursor.delete()); + assertTrue(!cursor.delete()); + assertNull(cursor.current()); + + e = cursor.next(); + assertNull(e); + + if (index == priIndex) { + e = new MyEntity(); + e.priKey = 2; + e.secKey = 1; + assertTrue(!cursor.update(e)); + } + + cursor.close(); + } + + /* delete() with primary and secondary keys cursor. */ + + for (EntityIndex index : new EntityIndex[] { priIndex, secIndex }) { + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(txn, e); + e.priKey = 2; + priIndex.put(txn, e); + + EntityCursor cursor = index.keys(txn, + getWriteCursorConfig()); + + Integer k = cursor.next(); + assertNotNull(k); + assertEquals(1, k.intValue()); + k = cursor.current(); + assertNotNull(k); + assertEquals(1, k.intValue()); + assertTrue(cursor.delete()); + assertTrue(!cursor.delete()); + assertNull(cursor.current()); + + int expectKey = (index == priIndex) ? 2 : 1; + k = cursor.next(); + assertNotNull(k); + assertEquals(expectKey, k.intValue()); + k = cursor.current(); + assertNotNull(k); + assertEquals(expectKey, k.intValue()); + assertTrue(cursor.delete()); + assertTrue(!cursor.delete()); + assertNull(cursor.current()); + + k = cursor.next(); + assertNull(k); + + cursor.close(); + } + + txnCommit(txn); + close(); + } + + @Test + public void testDeleteFromSubIndex() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + Transaction txn = txnBegin(); + MyEntity e = new MyEntity(); + e.secKey = 1; + e.priKey = 1; + priIndex.put(txn, e); + e.priKey = 2; + priIndex.put(txn, e); + e.priKey = 3; + priIndex.put(txn, e); + e.priKey = 4; + priIndex.put(txn, e); + txnCommit(txn); + + EntityIndex subIndex = secIndex.subIndex(1); + txn = txnBeginCursor(); + e = subIndex.get(txn, 1, null); + assertEquals(1, e.priKey); + assertEquals(Integer.valueOf(1), e.secKey); + e = subIndex.get(txn, 2, null); + assertEquals(2, e.priKey); + assertEquals(Integer.valueOf(1), e.secKey); + e = subIndex.get(txn, 3, null); + assertEquals(3, e.priKey); + assertEquals(Integer.valueOf(1), e.secKey); + e = subIndex.get(txn, 5, null); + assertNull(e); + + boolean deleted = subIndex.delete(txn, 1); + assertTrue(deleted); + assertNull(subIndex.get(txn, 1, null)); + assertNotNull(subIndex.get(txn, 2, null)); + + EntityCursor cursor = + subIndex.entities(txn, getWriteCursorConfig()); + boolean saw4 = false; + for (MyEntity e2 = cursor.first(); e2 != null; e2 = cursor.next()) { + if (e2.priKey == 3) { + cursor.delete(); + } + if (e2.priKey == 4) { + saw4 = true; + } + } + cursor.close(); + assertTrue(saw4); + assertNull(subIndex.get(txn, 1, null)); + assertNull(subIndex.get(txn, 3, null)); + assertNotNull(subIndex.get(txn, 2, null)); + assertNotNull(subIndex.get(txn, 4, null)); + + txnCommit(txn); + close(); + } + + @Entity + static class MyEntity { + + @PrimaryKey + private int priKey; + + @SecondaryKey(relate=MANY_TO_ONE) + private Integer secKey; + + private MyEntity() {} + } + + @Test + public void testSharedSequence() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex1 = + store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class); + + PrimaryIndex priIndex2 = + store.getPrimaryIndex(Integer.class, SharedSequenceEntity2.class); + + Transaction txn = txnBegin(); + SharedSequenceEntity1 e1 = new SharedSequenceEntity1(); + SharedSequenceEntity2 e2 = new SharedSequenceEntity2(); + priIndex1.put(txn, e1); + assertEquals(1, e1.key); + priIndex2.putNoOverwrite(txn, e2); + assertEquals(Integer.valueOf(2), e2.key); + e1.key = 0; + priIndex1.putNoOverwrite(txn, e1); + assertEquals(3, e1.key); + e2.key = null; + priIndex2.put(txn, e2); + assertEquals(Integer.valueOf(4), e2.key); + txnCommit(txn); + + close(); + } + + @Entity + static class SharedSequenceEntity1 { + + @PrimaryKey(sequence="shared") + private int key; + } + + @Entity + static class SharedSequenceEntity2 { + + @PrimaryKey(sequence="shared") + private Integer key; + } + + @Test + public void testSeparateSequence() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex1 = + store.getPrimaryIndex + (Integer.class, SeparateSequenceEntity1.class); + + PrimaryIndex priIndex2 = + store.getPrimaryIndex + (Integer.class, SeparateSequenceEntity2.class); + + Transaction txn = txnBegin(); + SeparateSequenceEntity1 e1 = new SeparateSequenceEntity1(); + SeparateSequenceEntity2 e2 = new SeparateSequenceEntity2(); + priIndex1.put(txn, e1); + assertEquals(1, e1.key); + priIndex2.putNoOverwrite(txn, e2); + assertEquals(Integer.valueOf(1), e2.key); + e1.key = 0; + priIndex1.putNoOverwrite(txn, e1); + assertEquals(2, e1.key); + e2.key = null; + priIndex2.put(txn, e2); + assertEquals(Integer.valueOf(2), e2.key); + txnCommit(txn); + + close(); + } + + @Entity + static class SeparateSequenceEntity1 { + + @PrimaryKey(sequence="seq1") + private int key; + } + + @Entity + static class SeparateSequenceEntity2 { + + @PrimaryKey(sequence="seq2") + private Integer key; + } + + @Test + public void testCompositeSequence() + throws DatabaseException { + + open(); + + PrimaryIndex + priIndex1 = + store.getPrimaryIndex + (CompositeSequenceEntity1.Key.class, + CompositeSequenceEntity1.class); + + PrimaryIndex + priIndex2 = + store.getPrimaryIndex + (CompositeSequenceEntity2.Key.class, + CompositeSequenceEntity2.class); + + Transaction txn = txnBegin(); + CompositeSequenceEntity1 e1 = new CompositeSequenceEntity1(); + CompositeSequenceEntity2 e2 = new CompositeSequenceEntity2(); + priIndex1.put(txn, e1); + assertEquals(1, e1.key.key); + priIndex2.putNoOverwrite(txn, e2); + assertEquals(Integer.valueOf(1), e2.key.key); + e1.key = null; + priIndex1.putNoOverwrite(txn, e1); + assertEquals(2, e1.key.key); + e2.key = null; + priIndex2.put(txn, e2); + assertEquals(Integer.valueOf(2), e2.key.key); + txnCommit(txn); + + txn = txnBeginCursor(); + EntityCursor c1 = + priIndex1.entities(txn, null); + e1 = c1.next(); + assertEquals(2, e1.key.key); + e1 = c1.next(); + assertEquals(1, e1.key.key); + e1 = c1.next(); + assertNull(e1); + c1.close(); + txnCommit(txn); + + txn = txnBeginCursor(); + EntityCursor c2 = + priIndex2.entities(txn, null); + e2 = c2.next(); + assertEquals(Integer.valueOf(2), e2.key.key); + e2 = c2.next(); + assertEquals(Integer.valueOf(1), e2.key.key); + e2 = c2.next(); + assertNull(e2); + c2.close(); + txnCommit(txn); + + close(); + } + + @Entity + static class CompositeSequenceEntity1 { + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + private int key; + + public int compareTo(Key o) { + /* Reverse the natural order. */ + return o.key - key; + } + } + + @PrimaryKey(sequence="seq1") + private Key key; + } + + /** + * Same as CompositeSequenceEntity1 but using Integer rather than int for + * the key type. + */ + @Entity + static class CompositeSequenceEntity2 { + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + private Integer key; + + public int compareTo(Key o) { + /* Reverse the natural order. */ + return o.key - key; + } + } + + @PrimaryKey(sequence="seq2") + private Key key; + } + + /** + * When opening read-only, secondaries are not opened when the primary is + * opened, causing a different code path to be used for opening + * secondaries. For a RawStore in particular, this caused an unreported + * NullPointerException in JE 3.0.12. No SR was created because the use + * case is very obscure and was discovered by code inspection. + */ + @Test + public void testOpenRawStoreReadOnly() + throws DatabaseException { + + open(); + store.getPrimaryIndex(Integer.class, MyEntity.class); + close(); + + StoreConfig config = new StoreConfig(); + config.setReadOnly(true); + config.setTransactional(envConfig.getTransactional()); + RawStore rawStore = new RawStore(env, "test", config); + + String clsName = MyEntity.class.getName(); + rawStore.getSecondaryIndex(clsName, "secKey"); + + rawStore.close(); + } + + /** + * When opening an X_TO_MANY secondary that has a persistent key class, the + * key class was not recognized as being persistent if it was never before + * referenced when getSecondaryIndex was called. This was a bug in JE + * 3.0.12, reported on OTN. [#15103] + */ + @Test + public void testToManyKeyClass() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class); + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, ToManyKey.class, "key2"); + + priIndex.put(new ToManyKeyEntity()); + secIndex.get(new ToManyKey()); + + close(); + } + + /** + * Test a fix for a bug where opening a TO_MANY secondary index would fail + * fail with "IllegalArgumentException: Wrong secondary key class: ..." + * when the store was opened read-only. [#15156] + */ + @Test + public void testToManyReadOnly() + throws DatabaseException { + + open(); + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class); + priIndex.put(new ToManyKeyEntity()); + close(); + + openReadOnly(); + priIndex = store.getPrimaryIndex(Integer.class, ToManyKeyEntity.class); + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, ToManyKey.class, "key2"); + secIndex.get(new ToManyKey()); + close(); + } + + @Persistent + static class ToManyKey { + + @KeyField(1) + int value = 99; + } + + @Entity + static class ToManyKeyEntity { + + @PrimaryKey + int key = 88; + + @SecondaryKey(relate=ONE_TO_MANY) + Set key2; + + ToManyKeyEntity() { + key2 = new HashSet(); + key2.add(new ToManyKey()); + } + } + + /* */ + @Test + public void testDeferredWrite() + throws DatabaseException { + + if (envConfig.getTransactional()) { + /* Deferred write cannot be used with transactions. */ + return; + } + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setDeferredWrite(true); + storeConfig.setAllowCreate(true); + open(storeConfig); + assertTrue(store.getConfig().getDeferredWrite()); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + DatabaseConfig dbConfig = priIndex.getDatabase().getConfig(); + assertTrue(dbConfig.getDeferredWrite()); + dbConfig = secIndex.getDatabase().getConfig(); + assertTrue(dbConfig.getDeferredWrite()); + + MyEntity e = new MyEntity(); + e.priKey = 1; + e.secKey = 1; + priIndex.put(e); + + EntityCursor cursor = secIndex.entities(); + cursor.next(); + assertEquals(1, cursor.count()); + cursor.close(); + + e.priKey = 2; + priIndex.put(e); + cursor = secIndex.entities(); + cursor.next(); + assertEquals(2, cursor.count()); + cursor.close(); + + class MySyncHook implements Store.SyncHook { + + List synced = new ArrayList(); + + public void onSync(Database db) { + synced.add(db); + } + } + + MySyncHook hook = new MySyncHook(); + Store.setSyncHook(hook); + store.sync(); + assertEquals(2, hook.synced.size()); + assertTrue(hook.synced.contains(priIndex.getDatabase())); + assertTrue(hook.synced.contains(secIndex.getDatabase())); + + close(); + } + /* */ + + /* */ + @Test + public void testTemporary() + throws DatabaseException { + + if (envConfig.getTransactional()) { + /* Temporary cannot be used with transactions. */ + return; + } + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setTemporary(true); + storeConfig.setAllowCreate(true); + open(storeConfig); + assertTrue(store.getConfig().getTemporary()); + + PrimaryIndex priIndex = + store.getPrimaryIndex(Integer.class, MyEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex(priIndex, Integer.class, "secKey"); + + PrimaryIndex priIndex1 = + store.getPrimaryIndex(Integer.class, SharedSequenceEntity1.class); + + /* All temporary databases exist before closing. */ + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, MyEntity.class.getName(), null); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, MyEntity.class.getName(), "secKey"); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, SharedSequenceEntity1.class.getName(), + null); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, "com.sleepycat.persist.formats", null); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, "com.sleepycat.persist.sequences", null); + + close(); + + /* All temporary databases are deleted after before closing. */ + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, MyEntity.class.getName(), null); + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, MyEntity.class.getName(), "secKey"); + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, SharedSequenceEntity1.class.getName(), + null); + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, "com.sleepycat.persist.formats", null); + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, "com.sleepycat.persist.sequences", null); + } + /* */ + + /** + * When Y is opened and X has a key with relatedEntity=Y.class, X should + * be opened automatically. If X is not opened, foreign key constraints + * will not be enforced. [#15358] + */ + @Test + public void testAutoOpenRelatedEntity() + throws DatabaseException { + + PrimaryIndex priY; + PrimaryIndex priX; + + /* Opening X should create (and open) Y and enforce constraints. */ + open(); + priX = store.getPrimaryIndex(Integer.class, RelatedX.class); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, RelatedY.class.getName(), null); + if (isTransactional) { + /* Constraint enforcement requires transactions. */ + try { + priX.put(new RelatedX()); + fail(); + } catch (DatabaseException e) { + assertTrue + ("" + e.getMessage(), (e.getMessage().indexOf + ("foreign key not allowed: it is not present") >= 0) || + (e.getMessage().indexOf("DB_FOREIGN_CONFLICT") >= 0)); + } + } + priY = store.getPrimaryIndex(Integer.class, RelatedY.class); + priY.put(new RelatedY()); + priX.put(new RelatedX()); + close(); + + /* Delete should cascade even when X is not opened explicitly. */ + open(); + priY = store.getPrimaryIndex(Integer.class, RelatedY.class); + assertEquals(1, priY.count()); + priY.delete(88); + assertEquals(0, priY.count()); + priX = store.getPrimaryIndex(Integer.class, RelatedX.class); + assertEquals(0, priX.count()); /* Failed prior to [#15358] fix. */ + close(); + } + + @Entity + static class RelatedX { + + @PrimaryKey + int key = 99; + + @SecondaryKey(relate=ONE_TO_ONE, + relatedEntity=RelatedY.class, + onRelatedEntityDelete=CASCADE) + int key2 = 88; + + RelatedX() { + } + } + + @Entity + static class RelatedY { + + @PrimaryKey + int key = 88; + + RelatedY() { + } + } + + @Test + public void testSecondaryBulkLoad1() + throws DatabaseException { + + doSecondaryBulkLoad(true); + } + + @Test + public void testSecondaryBulkLoad2() + throws DatabaseException { + + doSecondaryBulkLoad(false); + } + + private void doSecondaryBulkLoad(boolean closeAndOpenNormally) + throws DatabaseException { + + PrimaryIndex priX; + PrimaryIndex priY; + SecondaryIndex secX; + + /* Open priX with SecondaryBulkLoad=true. */ + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setSecondaryBulkLoad(true); + open(config); + + /* Getting priX should not create the secondary index. */ + priX = store.getPrimaryIndex(Integer.class, RelatedX.class); + PersistTestUtils.assertDbExists + (false, env, STORE_NAME, RelatedX.class.getName(), "key2"); + + /* We can put records that violate the secondary key constraint. */ + priX.put(new RelatedX()); + + if (closeAndOpenNormally) { + /* Open normally and attempt to populate the secondary. */ + close(); + open(); + if (isTransactional && DbCompat.POPULATE_ENFORCES_CONSTRAINTS) { + /* Constraint enforcement requires transactions. */ + try { + /* Before adding the foreign key, constraint is violated. */ + priX = store.getPrimaryIndex(Integer.class, + RelatedX.class); + fail(); + } catch (DatabaseException e) { + assertTrue + (e.toString(), + e.toString().contains("foreign key not allowed")); + } + } + /* Open priX with SecondaryBulkLoad=true. */ + close(); + open(config); + /* Add the foreign key to avoid the constraint error. */ + priY = store.getPrimaryIndex(Integer.class, RelatedY.class); + priY.put(new RelatedY()); + /* Open normally and the secondary will be populated. */ + close(); + open(); + priX = store.getPrimaryIndex(Integer.class, RelatedX.class); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, RelatedX.class.getName(), "key2"); + secX = store.getSecondaryIndex(priX, Integer.class, "key2"); + } else { + /* Get secondary index explicitly and it will be populated. */ + if (isTransactional && DbCompat.POPULATE_ENFORCES_CONSTRAINTS) { + /* Constraint enforcement requires transactions. */ + try { + /* Before adding the foreign key, constraint is violated. */ + secX = store.getSecondaryIndex(priX, Integer.class, + "key2"); + fail(); + } catch (DatabaseException e) { + assertTrue + (e.toString(), + e.toString().contains("foreign key not allowed")); + } + } + /* Add the foreign key. */ + priY = store.getPrimaryIndex(Integer.class, RelatedY.class); + priY.put(new RelatedY()); + secX = store.getSecondaryIndex(priX, Integer.class, "key2"); + PersistTestUtils.assertDbExists + (true, env, STORE_NAME, RelatedX.class.getName(), "key2"); + } + + RelatedX x = secX.get(88); + assertNotNull(x); + close(); + } + + @Test + public void testPersistentFields() + throws DatabaseException { + + open(); + PrimaryIndex pri = + store.getPrimaryIndex(Integer.class, PersistentFields.class); + PersistentFields o1 = new PersistentFields(-1, 1, 2, 3, 4, 5, 6); + assertNull(pri.put(o1)); + PersistentFields o2 = pri.get(-1); + assertNotNull(o2); + assertEquals(0, o2.transient1); + assertEquals(0, o2.transient2); + assertEquals(0, o2.transient3); + assertEquals(4, o2.persistent1); + assertEquals(5, o2.persistent2); + assertEquals(6, o2.persistent3); + close(); + } + + @Entity + static class PersistentFields { + + @PrimaryKey int key; + + transient int transient1; + @NotPersistent int transient2; + @NotPersistent transient int transient3; + + int persistent1; + @NotTransient int persistent2; + @NotTransient transient int persistent3; + + PersistentFields(int k, + int t1, + int t2, + int t3, + int p1, + int p2, + int p3) { + key = k; + transient1 = t1; + transient2 = t2; + transient3 = t3; + persistent1 = p1; + persistent2 = p2; + persistent3 = p3; + } + + private PersistentFields() {} + } + + /** + * When a primary or secondary has a persistent key class, the key class + * was not recognized as being persistent when getPrimaryConfig, + * getSecondaryConfig, or getSubclassIndex was called, if that key class + * was not previously referenced. All three cases are tested by calling + * getSecondaryConfig. This was a bug in JE 3.3.69, reported on OTN. + * [#16407] + */ + @Test + public void testKeyClassInitialization() + throws DatabaseException { + + open(); + store.getSecondaryConfig(ToManyKeyEntity.class, "key2"); + close(); + } + + @Test + public void testKeyName() + throws DatabaseException { + + open(); + + PrimaryIndex pri1 = + store.getPrimaryIndex(Long.class, BookEntity.class); + PrimaryIndex pri2 = + store.getPrimaryIndex(Long.class, AuthorEntity.class); + + BookEntity book = new BookEntity(); + pri1.put(book); + AuthorEntity author = new AuthorEntity(); + author.bookIds.add(book.bookId); + pri2.put(author); + + close(); + + open(); + pri1 = store.getPrimaryIndex(Long.class, BookEntity.class); + pri2 = store.getPrimaryIndex(Long.class, AuthorEntity.class); + book = pri1.get(1L); + assertNotNull(book); + author = pri2.get(1L); + assertNotNull(author); + close(); + } + + @Entity + static class AuthorEntity { + + @PrimaryKey(sequence="authorSeq") + long authorId; + + @SecondaryKey(relate=MANY_TO_MANY, relatedEntity=BookEntity.class, + name="bookId", onRelatedEntityDelete=NULLIFY) + Set bookIds = new HashSet(); + } + + @Entity + static class BookEntity { + + @PrimaryKey(sequence="bookSeq") + long bookId; + } + + /** + * Checks that we get an appropriate exception when storing an entity + * subclass instance, which contains a secondary key, without registering + * the subclass up front. [#16399] + */ + @Test + public void testPutEntitySubclassWithoutRegisterClass() + throws DatabaseException { + + open(); + + final PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + + final Transaction txn = txnBegin(); + pri.put(txn, new Statement(1)); + try { + pri.put(txn, new ExtendedStatement(2, null)); + fail(); + } catch (IllegalArgumentException expected) { + assertTrue(expected.toString(), expected.getMessage().contains + ("Entity subclasses defining a secondary key must be " + + "registered by calling EntityModel.registerClass or " + + "EntityStore.getSubclassIndex before storing an instance " + + "of the subclass: " + ExtendedStatement.class.getName())); + } + txnAbort(txn); + + close(); + } + + /** + * Checks that registerClass avoids an exception when storing an entity + * subclass instance, which defines a secondary key. [#16399] + */ + @Test + public void testPutEntitySubclassWithRegisterClass() + throws DatabaseException { + + open(ExtendedStatement.class); + + final PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + + final Transaction txn = txnBegin(); + pri.put(txn, new Statement(1)); + pri.put(txn, new ExtendedStatement(2, "abc")); + txnCommit(txn); + + final SecondaryIndex sec = + store.getSubclassIndex(pri, ExtendedStatement.class, + String.class, "name"); + + ExtendedStatement o = sec.get("abc"); + assertNotNull(o); + assertEquals(2, o.id); + + close(); + } + + /** + * Same as testPutEntitySubclassWithRegisterClass but store the first + * instance of the subclass after closing and reopening the store, + * *without* calling registerClass. This ensures that a single call to + * registerClass is sufficient and subsequent use of the store does not + * require it. [#16399] + */ + @Test + public void testPutEntitySubclassWithRegisterClass2() + throws DatabaseException { + + open(ExtendedStatement.class); + + PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + + Transaction txn = txnBegin(); + pri.put(txn, new Statement(1)); + txnCommit(txn); + + close(); + open(); + + pri = store.getPrimaryIndex(Long.class, Statement.class); + + txn = txnBegin(); + pri.put(txn, new ExtendedStatement(2, "abc")); + txnCommit(txn); + + final SecondaryIndex sec = + store.getSubclassIndex(pri, ExtendedStatement.class, + String.class, "name"); + + ExtendedStatement o = sec.get("abc"); + assertNotNull(o); + assertEquals(2, o.id); + + close(); + } + + /** + * Checks that getSubclassIndex can be used instead of registerClass to + * avoid an exception when storing an entity subclass instance, which + * defines a secondary key. [#16399] + */ + @Test + public void testPutEntitySubclassWithGetSubclassIndex() + throws DatabaseException { + + open(); + + final PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + + final SecondaryIndex sec = + store.getSubclassIndex(pri, ExtendedStatement.class, + String.class, "name"); + + final Transaction txn = txnBegin(); + pri.put(txn, new Statement(1)); + pri.put(txn, new ExtendedStatement(2, "abc")); + txnCommit(txn); + + ExtendedStatement o = sec.get("abc"); + assertNotNull(o); + assertEquals(2, o.id); + + close(); + } + + /** + * Same as testPutEntitySubclassWithGetSubclassIndex2 but store the first + * instance of the subclass after closing and reopening the store, + * *without* calling getSubclassIndex. This ensures that a single call to + * getSubclassIndex is sufficient and subsequent use of the store does not + * require it. [#16399] + */ + @Test + public void testPutEntitySubclassWithGetSubclassIndex2() + throws DatabaseException { + + open(); + + PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + + SecondaryIndex sec = + store.getSubclassIndex(pri, ExtendedStatement.class, + String.class, "name"); + + Transaction txn = txnBegin(); + pri.put(txn, new Statement(1)); + txnCommit(txn); + + close(); + open(); + + pri = store.getPrimaryIndex(Long.class, Statement.class); + + txn = txnBegin(); + pri.put(txn, new ExtendedStatement(2, "abc")); + txnCommit(txn); + + sec = store.getSubclassIndex(pri, ExtendedStatement.class, + String.class, "name"); + + ExtendedStatement o = sec.get("abc"); + assertNotNull(o); + assertEquals(2, o.id); + + close(); + } + + /** + * Checks that secondary population occurs only once when an index is + * created, not every time it is opened, even when it is empty. This is a + * JE-only test because we don't have a portable way to get stats that + * indicate whether primary reads were performed. [#16399] + */ + /* */ + @Test + public void testRepeatingSecondaryPopulate() { + + /* + * Write 100 records and expect over 100 lock requests. During this + * step, the secondary is not opened (we do not call registerClass or + * getSubclassIndex). + */ + open(); + PrimaryIndex pri = + store.getPrimaryIndex(Long.class, Statement.class); + for (int i = 0; i < 100; i += 1) { + pri.put(null, new Statement(i)); + } + close(); + + final StatsConfig clearStats = new StatsConfig().setClear(true); + EnvironmentStats stats = env.getStats(clearStats); + assertTrue(stats.getNRequests() > 100); + + /* + * Open 3 times, calling registerClass to cause the secondary to be + * opened. Only the first iteration should cause secondary population. + */ + for (int i = 0; i < 3; i += 1) { + open(ExtendedStatement.class); + pri = store.getPrimaryIndex(Long.class, Statement.class); + stats = env.getStats(clearStats); + if (i == 0) { + assertTrue(stats.getNRequests() > 100); + } else { + assertTrue(stats.getNRequests() < 100); + } + close(); + } + } + /* */ + + @Entity + static class Statement { + + @PrimaryKey + long id; + + Statement(long id) { + this.id = id; + } + + private Statement() {} + } + + @Persistent + static class ExtendedStatement extends Statement { + + @SecondaryKey(relate=MANY_TO_ONE) + String name; + + ExtendedStatement(long id, String name) { + super(id); + this.name = name; + } + + private ExtendedStatement() {} + } + + @Test + public void testCustomCompare() + throws DatabaseException { + + open(); + + PrimaryIndex + priIndex = store.getPrimaryIndex + (ReverseIntKey.class, CustomCompareEntity.class); + + SecondaryIndex + secIndex1 = store.getSecondaryIndex(priIndex, ReverseIntKey.class, + "secKey1"); + + SecondaryIndex + secIndex2 = store.getSecondaryIndex(priIndex, ReverseIntKey.class, + "secKey2"); + + Transaction txn = txnBegin(); + for (int i = 1; i <= 5; i += 1) { + assertTrue(priIndex.putNoOverwrite(txn, + new CustomCompareEntity(i))); + } + txnCommit(txn); + + txn = txnBeginCursor(); + EntityCursor c = priIndex.entities(txn, null); + for (int i = 5; i >= 1; i -= 1) { + CustomCompareEntity e = c.next(); + assertNotNull(e); + assertEquals(new ReverseIntKey(i), e.key); + } + c.close(); + txnCommit(txn); + + txn = txnBeginCursor(); + c = secIndex1.entities(txn, null); + for (int i = -1; i >= -5; i -= 1) { + CustomCompareEntity e = c.next(); + assertNotNull(e); + assertEquals(new ReverseIntKey(-i), e.key); + assertEquals(new ReverseIntKey(i), e.secKey1); + } + c.close(); + txnCommit(txn); + + txn = txnBeginCursor(); + c = secIndex2.entities(txn, null); + for (int i = -1; i >= -5; i -= 1) { + CustomCompareEntity e = c.next(); + assertNotNull(e); + assertEquals(new ReverseIntKey(-i), e.key); + assertTrue(e.secKey2.contains(new ReverseIntKey(i))); + } + c.close(); + txnCommit(txn); + + close(); + } + + @Entity + static class CustomCompareEntity { + + @PrimaryKey + private ReverseIntKey key; + + @SecondaryKey(relate=MANY_TO_ONE) + private ReverseIntKey secKey1; + + @SecondaryKey(relate=ONE_TO_MANY) + private final Set secKey2 = new HashSet(); + + private CustomCompareEntity() {} + + CustomCompareEntity(int i) { + key = new ReverseIntKey(i); + secKey1 = new ReverseIntKey(-i); + secKey2.add(new ReverseIntKey(-i)); + } + } + + @Persistent + static class ReverseIntKey implements Comparable { + + @KeyField(1) + private int key; + + public int compareTo(ReverseIntKey o) { + /* Reverse the natural order. */ + return o.key - key; + } + + private ReverseIntKey() {} + + ReverseIntKey(int key) { + this.key = key; + } + + @Override + public boolean equals(Object o) { + return key == ((ReverseIntKey) o).key; + } + + @Override + public int hashCode() { + return key; + } + + @Override + public String toString() { + return "Key = " + key; + } + } + + /** + * Ensures that custom comparators are persisted and work correctly during + * recovery. JE recovery uses comparators, so they are serialized and + * stored in the DatabaseImpl. They are deserialized during recovery prior + * to opening the EntityStore and its format catalog. But the formats are + * needed by the comparator, so they are specially created when needed. + * + * In particular we need to ensure that enum key fields work correctly, + * since their formats are not static (like simple type formats are). + * [#17140] + * + * Note that we don't need to actually cause a recovery in order to test + * the deserialization and subsequent use of comparators. The JE + * DatabaseConfig.setBtreeComparator method serializes and deserializes the + * comparator. The comparator is initialized on its first use, just as if + * recovery were run. + */ + @Test + public void testStoredComparators() + throws DatabaseException { + + open(); + + PrimaryIndex priIndex = + store.getPrimaryIndex(StoredComparatorEntity.Key.class, + StoredComparatorEntity.class); + + SecondaryIndex secIndex = + store.getSecondaryIndex + (priIndex, StoredComparatorEntity.MyEnum.class, "secKey"); + + final StoredComparatorEntity.Key[] priKeys = + new StoredComparatorEntity.Key[] { + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.C, 0, + StoredComparatorEntity.MyEnum.C), + }; + + final StoredComparatorEntity.MyEnum[] secKeys = + new StoredComparatorEntity.MyEnum[] { + StoredComparatorEntity.MyEnum.C, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.A, + null, + StoredComparatorEntity.MyEnum.A, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.C, + }; + + assertEquals(priKeys.length, secKeys.length); + final int nEntities = priKeys.length; + + Transaction txn = txnBegin(); + for (int i = 0; i < nEntities; i += 1) { + priIndex.put(txn, + new StoredComparatorEntity(priKeys[i], secKeys[i])); + } + txnCommit(txn); + + txn = txnBeginCursor(); + EntityCursor entities = + priIndex.entities(txn, null); + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + txnCommit(txn); + + txn = txnBeginCursor(); + entities = secIndex.entities(txn, null); + for (StoredComparatorEntity.MyEnum myEnum : + EnumSet.allOf(StoredComparatorEntity.MyEnum.class)) { + for (int i = nEntities - 1; i >= 0; i -= 1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + } + } + assertNull(entities.next()); + entities.close(); + txnCommit(txn); + + close(); + } + + @Entity + static class StoredComparatorEntity { + + enum MyEnum { A, B, C }; + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + MyEnum f1; + + @KeyField(2) + Integer f2; + + @KeyField(3) + MyEnum f3; + + private Key() {} + + Key(MyEnum f1, Integer f2, MyEnum f3) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + } + + public int compareTo(Key o) { + /* Reverse the natural order. */ + int i = f1.compareTo(o.f1); + if (i != 0) return -i; + i = f2.compareTo(o.f2); + if (i != 0) return -i; + i = f3.compareTo(o.f3); + if (i != 0) return -i; + return 0; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof Key)) { + return false; + } + Key o = (Key) other; + return f1 == o.f1 && + f2.equals(o.f2) && + f3 == o.f3; + } + + @Override + public int hashCode() { + return f1.ordinal() + f2 + f3.ordinal(); + } + + @Override + public String toString() { + return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']'; + } + } + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + private StoredComparatorEntity() {} + + StoredComparatorEntity(Key key, MyEnum secKey) { + this.key = key; + this.secKey = secKey; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + ']'; + } + } + + @Test + public void testEmbeddedMapTypes() + throws DatabaseException { + open(); + PrimaryIndex pri = + store.getPrimaryIndex(Integer.class, EmbeddedMapTypes.class); + pri.put(null, new EmbeddedMapTypes()); + close(); + + open(); + pri = store.getPrimaryIndex(Integer.class, EmbeddedMapTypes.class); + EmbeddedMapTypes entity = pri.get(1); + assertNotNull(entity); + EmbeddedMapTypes entity2 = new EmbeddedMapTypes(); + assertEquals(entity.getF1(), entity2.getF1()); + close(); + } + + enum MyEnum { ONE, TWO }; + + @Entity + static class EmbeddedMapTypes { + + @PrimaryKey + private final int f0 = 1; + private final Map> f1; + + EmbeddedMapTypes() { + f1 = new HashMap>(); + HashMap f2 = new HashMap(); + f2.put(MyEnum.ONE, MyEnum.ONE); + f1.put(MyEnum.ONE, f2); + f2 = new HashMap(); + f2.put(MyEnum.TWO, MyEnum.TWO); + f1.put(MyEnum.TWO, f2); + } + + public int getPriKey() { + return f0; + } + + public Map> getF1() { + return f1; + } + } +} diff --git a/test/com/sleepycat/persist/test/PersistTestUtils.java b/test/com/sleepycat/persist/test/PersistTestUtils.java new file mode 100644 index 0000000..f60aba5 --- /dev/null +++ b/test/com/sleepycat/persist/test/PersistTestUtils.java @@ -0,0 +1,54 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import junit.framework.TestCase; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.Environment; + +class PersistTestUtils { + + /** + * Asserts than a database expectExists or does not exist. If keyName is + * null, checks an entity database. If keyName is non-null, checks a + * secondary database. + */ + static void assertDbExists(boolean expectExists, + Environment env, + String storeName, + String entityClassName, + String keyName) { + String fileName; + String dbName; + if (DbCompat.SEPARATE_DATABASE_FILES) { + fileName = storeName + '-' + entityClassName; + if (keyName != null) { + fileName += "-" + keyName; + } + dbName = null; + } else { + fileName = null; + dbName = "persist#" + storeName + '#' + entityClassName; + if (keyName != null) { + dbName += "#" + keyName; + } + } + boolean exists = DbCompat.databaseExists(env, fileName, dbName); + if (expectExists != exists) { + TestCase.fail + ((expectExists ? "Does not exist: " : "Does exist: ") + + dbName); + } + } +} diff --git a/test/com/sleepycat/persist/test/ProxyToSimpleTypeTest.java b/test/com/sleepycat/persist/test/ProxyToSimpleTypeTest.java new file mode 100644 index 0000000..0b57ea4 --- /dev/null +++ b/test/com/sleepycat/persist/test/ProxyToSimpleTypeTest.java @@ -0,0 +1,294 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.math.BigDecimal; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PersistentProxy; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * BigDecimal is not a built-in SimpleType before je-4.1 version. The + * application using previous je versions will use BigDecimalProxy to store + * BigDecimal data. Therefore, we need to test if the BigDecimal data stored by + * BigDecimal proxy can be correctly read and updated by built-in BigDecimal + * format in je-4.1 version. + * + * We generate a database using je-4.0.103, which will contain a record. The + * record has BigDecimal data, which is stored by BigDecimalProxy: + * + * @Entity + * static class BigDecimalData { + * @PrimaryKey + * private int id; + * private BigDecimal f1; + * + * BigDecimalData() { } + * + * BigDecimalData(int id, BigDecimal f1) { + * this.id = id; + * this.f1 = f1; + * } + * + * int getId() { + * return id; + * } + * + * BigDecimal getF1() { + * return f1; + * } + * } + * + * @Persistent(proxyFor=BigDecimal.class) + * static class BigDecimalProxy + * implements PersistentProxy { + * + * private String rep; + * private BigDecimalProxy() {} + * public BigDecimal convertProxy() { + * return new BigDecimal(rep); + * } + * + * public void initializeProxy(BigDecimal o) { + * rep = o.toString(); + * } + * } + * + * The record stored is {1, new BigDecimal("123.1234000")}. + * + * This test should be excluded from the BDB build because it uses a stored JE + * log file. + */ +public class ProxyToSimpleTypeTest extends TestBase { + + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + private EntityStore store; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + try { + TestUtils.removeLogFiles("TearDown", envHome, false); + } catch (Error e) { + System.out.println("During tearDown: " + e); + } + envHome = null; + store = null; + env = null; + } + + private void open(boolean registerProxy) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + if (registerProxy) { + EntityModel model = new AnnotationModel(); + model.registerClass(BigDecimalProxy.class); + storeConfig.setModel(model); + } + store = new EntityStore(env, STORE_NAME, storeConfig); + } + + private void close() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testReadOldVersionBigDecimalByProxy() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_BigDecimal.jdb", envHome); + + /* We do not register BigDecimalProxy. */ + open(false /* registerProxy */); + PrimaryIndex primary = + store.getPrimaryIndex(Integer.class, BigDecimalData.class); + BigDecimalData entity = primary.get(1); + assertNotNull(entity); + + /* The precision will be preserved in the old version BigDecimal. */ + assertEquals(new BigDecimal("123.1234000"), entity.getF1()); + close(); + } + + /* + * SimpleForamt (FBigDec) will be used to update the data. Then new data + * will be read also by SimpleForamt (FBigDec). + */ + @Test + public void testWriteReadSortedBigDecimal() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_BigDecimal.jdb", envHome); + + open(false /* registerProxy */); + + PrimaryIndex primary = + store.getPrimaryIndex(Integer.class, BigDecimalData.class); + + /* + * DPL will use FBigDec format to write the BigDecimal in sorted + * BigDecimal. + */ + primary.put(null, + new BigDecimalData (1, new BigDecimal("1234.1234000"))); + + /* + * DPL will use FBigDec format to read the BigDecimal in sorted + * BigDecimal. + */ + BigDecimalData entity = primary.get(1); + assertNotNull(entity); + + /* Sorted BigDecimal cannot preserve precision. */ + assertEquals(new BigDecimal("1234.1234"), entity.getF1()); + close(); + + /* Re-open and read the data again. */ + open(false /*registerProxy*/); + primary = store.getPrimaryIndex(Integer.class, BigDecimalData.class); + + /* + * In the future, DPL will use FBigDec format to read the BigDecimal in + * sorted BigDecimal. + */ + entity = primary.get(1); + assertNotNull(entity); + + /* Sorted BigDecimal cannot preserve precision. */ + assertEquals(new BigDecimal("1234.1234"), entity.getF1()); + close(); + } + + /* + * If register proxy for SimpleType, IllegalArgumentException will be + * thrown. + */ + @Test + public void testRegisterProxyForSimpleType() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_BigDecimal.jdb", envHome); + try { + open(true /* registerProxy */); + fail(); + } catch (IllegalArgumentException e) { + /* We expect the exception. */ + } + close(); + } + + @Entity + static class BigDecimalData { + @PrimaryKey + private int id; + private BigDecimal f1; + + BigDecimalData() { } + + BigDecimalData(int id, BigDecimal f1) { + this.id = id; + this.f1 = f1; + } + + int getId() { + return id; + } + + BigDecimal getF1() { + return f1; + } + } + + @Persistent(proxyFor=BigDecimal.class) + static class BigDecimalProxy + implements PersistentProxy { + + private String rep; + private BigDecimalProxy() {} + + public BigDecimal convertProxy() { + return new BigDecimal(rep); + } + + public void initializeProxy(BigDecimal o) { + rep = o.toString(); + } + } +} diff --git a/test/com/sleepycat/persist/test/SecDupsWithoutComparatorEvolve_je_4_0.jdb b/test/com/sleepycat/persist/test/SecDupsWithoutComparatorEvolve_je_4_0.jdb new file mode 100644 index 0000000..862e72d Binary files /dev/null and b/test/com/sleepycat/persist/test/SecDupsWithoutComparatorEvolve_je_4_0.jdb differ diff --git a/test/com/sleepycat/persist/test/SecDupsWithoutComparator_je_4_0.jdb b/test/com/sleepycat/persist/test/SecDupsWithoutComparator_je_4_0.jdb new file mode 100644 index 0000000..239e380 Binary files /dev/null and b/test/com/sleepycat/persist/test/SecDupsWithoutComparator_je_4_0.jdb differ diff --git a/test/com/sleepycat/persist/test/SecondaryDupOrderEvolveTest.java b/test/com/sleepycat/persist/test/SecondaryDupOrderEvolveTest.java new file mode 100644 index 0000000..e5bb07e --- /dev/null +++ b/test/com/sleepycat/persist/test/SecondaryDupOrderEvolveTest.java @@ -0,0 +1,894 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.evolve.Deleter; +import com.sleepycat.persist.evolve.Mutations; +import com.sleepycat.persist.evolve.Renamer; +import com.sleepycat.persist.impl.ComplexFormat; +import com.sleepycat.persist.impl.Store; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * In the versions before je4.1, we failed to set the dup comparator. This + * bug applies only when the primary key has a comparator. The bug was fixed + * by setting the dup comparator to the primary key comparator, for all new + * secondary databases. [#17252] + */ +@RunWith(Parameterized.class) +public class SecondaryDupOrderEvolveTest extends TestBase { + private static final String STORE_NAME = "test"; + private File envHome; + private Environment env; + private EntityStore store; + private enum MyEnum { A, B, C }; + + private static String packageName = + "com.sleepycat.persist.test.SecondaryDupOrderEvolveTest$"; + private final String originalClsName; + private final String evolvedClsName; + private final String caseLabel; + private Class caseCls; + private StoredComparatorEntity_Base caseObj; + private static final String secDBName = + "persist#test#com.sleepycat.persist.test." + + "SecondaryDupOrderEvolveTest$StoredComparatorEntity#secKey"; + private static final String secDBName2 = + "persist#test#com.sleepycat.persist.test." + + "SecondaryDupOrderEvolveTest$StoredComparatorEntity#new_secKey2"; + + private final static Key[] priKeys = + new Key[] { new Key(MyEnum.A, 1, MyEnum.A), + new Key(MyEnum.A, 1, MyEnum.B), + new Key(MyEnum.A, 2, MyEnum.A), + new Key(MyEnum.A, 2, MyEnum.B), + new Key(MyEnum.B, 1, MyEnum.A), + new Key(MyEnum.B, 1, MyEnum.B), + new Key(MyEnum.C, 0, MyEnum.C), + }; + + private final static MyEnum[] + secKeys = new MyEnum[] { MyEnum.C, MyEnum.B, MyEnum.A, + null, + MyEnum.A, MyEnum.B, MyEnum.C, + }; + + private final static Integer[] secKeys2 = + new Integer[] { 2, 1, 0, null, 0, 1, 2, }; + + private static final String[] EvolveCase = { + "StoredComparatorEntity_RenameSecField", + "StoredComparatorEntity_DeleteSecAnnotation", + "StoredComparatorEntity_DeleteSecField", + }; + + @Parameters + public static List genParams() { + List list = new ArrayList(); + for (String evolvedClsName : EvolveCase) + list.add(new Object[]{"StoredComparatorEntity", evolvedClsName}); + + return list; + } + + public SecondaryDupOrderEvolveTest(String originalClsName, + String evolvedClsName) { + this.originalClsName = packageName + originalClsName; + this.evolvedClsName =packageName + evolvedClsName; + this.caseLabel = evolvedClsName; + customName = "-" + caseLabel; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envHome = SharedTestUtils.getTestDir(); + TestUtils.removeLogFiles("Setup", envHome, false); + /* Copy log file resource to log file zero. */ + TestUtils.loadLog + (getClass(), "SecDupsWithoutComparatorEvolve_je_4_0.jdb", envHome); + } + + @After + public void tearDown() { + + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + try { + TestUtils.removeLogFiles("TearDown", envHome, false); + } catch (Error e) { + System.out.println("During tearDown: " + e); + } + envHome = null; + env = null; + } + + private void open(StoredComparatorEntity_Base caseObj) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + storeConfig.setMutations(caseObj.getMutations()); + store = new EntityStore(env, STORE_NAME, storeConfig); + } + + /* Open the old database and delete the sec database. */ + private void openAndDeleteSecDatabase(String secDBName) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, secDBName); + txn.commit(); + close(); + } + + private void close() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /* + * During class evolution of ComplexFormat, we'll need to make sure that + * the new field, incorrectlyOrderedSecKeys, is copied from the old + * format to the new format, and key names are renamed or delete if + * appropriate. + */ + @Test + public void testEvolveOldDatabaseOrder() + throws Exception { + + /* Delete the seckey secondary database. */ + openAndDeleteSecDatabase(secDBName); + caseCls = Class.forName(originalClsName); + caseObj = (StoredComparatorEntity_Base) caseCls.newInstance(); + + /* + * Re-open the database will create a new sec database for seckey with + * correct dup Order. catalog.flush will be called to update the + * ComplexFormat. + */ + Store.expectFlush = true; + try { + open(caseObj); + + /* + * incorrectlyOrderedSecKeys is null for the database created by + * old version je. + */ + ComplexFormat format = + (ComplexFormat) store.getModel().getRawType(originalClsName); + assertEquals(format.getIncorrectlyOrderedSecKeys(), null); + } finally { + Store.expectFlush = false; + } + /* Check primary database order. */ + caseObj.checkPrimaryDBOrder(store); + ComplexFormat format = + (ComplexFormat) store.getModel().getRawType(originalClsName); + + /* + * incorrectlyOrderedSecKeys is initialized and added the name of + * new_secKey2, which means new_secKey2 secondary database need to + * be assigned duplicate comparator. + */ + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 1); + assertTrue + (format.getIncorrectlyOrderedSecKeys().contains("new_secKey2")); + + /* + * Check all of the secondary databases order. + * + * Sec DB1: + * This secondary database is first deleted, and then re-created by new + * je. The dup comparator will be set to primary key comparator, so the + * Secondary duplicates order should be reverse order. + * + * Sec DB2 : + * Because the dup comparator will not set to primary key comparator in + * the old secondary databases, the secondary duplicates order should + * be nature order. + */ + caseObj.checkAllSecondaryDBOrder(store, true, false); + close(); + + /* Delete the seckey2 secondary database. */ + openAndDeleteSecDatabase(secDBName2); + caseCls = Class.forName(evolvedClsName); + caseObj = (StoredComparatorEntity_Base) caseCls.newInstance(); + + /* + * Re-open the database will create a new sec database for seckey2 with + * correct dup Order. catalog.flush will be called to update the + * ComplexFormat. + */ + Store.expectFlush = true; + try { + open(caseObj); + format = (ComplexFormat) + store.getModel().getRawType(evolvedClsName); + if (evolvedClsName.equals(packageName + EvolveCase[0])) { + + /* + * new_secKey2 will be changed to new_new_secKey2 in the new + * format's incorrectlyOrderedSecKeys. + */ + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 1); + assertTrue(format.getIncorrectlyOrderedSecKeys(). + contains("new_new_secKey2")); + } else { + + /* + * new_secKey2 will be deleted in the new format's + * incorrectlyOrderedSecKeys. + */ + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 0); + } + } finally { + Store.expectFlush = false; + } + + /* + * Check all of the secondary databases order. + * + * Sec DB1: + * This secondary database is created by new je, so the Secondary + * duplicates order should be reverse order. + * + * Sec DB2 : + * This secondary database is first deleted, and then re-created by new + * je. The dup comparator will be set to primary key comparator, so the + * Secondary duplicates order should be reverse order. + */ + caseObj.checkAllSecondaryDBOrder(store, true, true); + + /* + * incorrectlyOrderedSecKeys is empty after re-open the new_secKey2 + * secondary database. + */ + format = (ComplexFormat) store.getModel().getRawType(evolvedClsName); + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 0); + close(); + } + + @Entity(version=1) + static class StoredComparatorEntity + extends StoredComparatorEntity_Base{ + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + /* Rename secKey2 to new_secKey2. */ + @SecondaryKey(relate=MANY_TO_ONE) + private Integer new_secKey2; + + StoredComparatorEntity() {} + + StoredComparatorEntity(Key key, MyEnum secKey, Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.new_secKey2 = secKey2; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " new_sec2 = " + + new_secKey2 + ']'; + } + + @Override + void checkPrimaryDBOrder(EntityStore store) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex(Key.class, + StoredComparatorEntity.class); + EntityCursor entities = + priIndex.entities(null, null); + final int nEntities = priKeys.length; + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + } + + @Override + void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity.class); + SecondaryIndex secIndex; + SecondaryIndex secIndex2; + secIndex = store.getSecondaryIndex(priIndex, MyEnum.class, + "secKey"); + secIndex2 = store.getSecondaryIndex(priIndex, Integer.class, + "new_secKey2"); + final int nEntities = priKeys.length; + int[] order1 = new int[nEntities]; + int[] order2 = new int[nEntities]; + if (ifReverse1) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order1[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order1[k] = i; + } + } + if (ifReverse2) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order2[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order2[k] = i; + } + } + + EntityCursor entities = + secIndex.entities(null, null); + for (MyEnum myEnum : EnumSet.allOf(MyEnum.class)) { + for (int i : order1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + } + } + assertNull(entities.next()); + entities.close(); + + entities = secIndex2.entities(null, null); + for (int secKey = 0; secKey < 3; secKey++) { + for (int i : order2) { + if (secKeys2[i] != null && secKeys2[i] == secKey) { + StoredComparatorEntity e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys2[i], e.new_secKey2); + } + } + } + assertNull(entities.next()); + entities.close(); + } + + @Override + Mutations getMutations() { + Mutations mutations = new Mutations(); + String clsName = + StoredComparatorEntity.class.getName(); + mutations.addRenamer(new Renamer(clsName, 0, "secKey2", + "new_secKey2")); + return mutations; + } + } + + /* Rename Entity name to StoredComparatorEntity_RenameSecField. */ + @Entity(version=2) + static class StoredComparatorEntity_RenameSecField + extends StoredComparatorEntity_Base { + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + /* Rename secKey2 from new_secKey2 to new_new_secKey2. */ + @SecondaryKey(relate=MANY_TO_ONE) + private Integer new_new_secKey2; + + StoredComparatorEntity_RenameSecField() {} + + StoredComparatorEntity_RenameSecField(Key key, + MyEnum secKey, + Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.new_new_secKey2 = secKey2; + } + + @Override + void checkPrimaryDBOrder(EntityStore store) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_RenameSecField.class); + EntityCursor entities = + priIndex.entities(null, null); + final int nEntities = priKeys.length; + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity_RenameSecField e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + } + + @Override + void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_RenameSecField.class); + SecondaryIndex secIndex; + SecondaryIndex secIndex2; + secIndex = store.getSecondaryIndex(priIndex, MyEnum.class, + "secKey"); + secIndex2 = store.getSecondaryIndex(priIndex, Integer.class, + "new_new_secKey2"); + final int nEntities = priKeys.length; + int[] order1 = new int[nEntities]; + int[] order2 = new int[nEntities]; + if (ifReverse1) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order1[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order1[k] = i; + } + } + if (ifReverse2) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order2[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order2[k] = i; + } + } + + EntityCursor entities = + secIndex.entities(null, null); + for (MyEnum myEnum : EnumSet.allOf(MyEnum.class)) { + for (int i : order1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity_RenameSecField e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + } + } + } + assertNull(entities.next()); + entities.close(); + + entities = secIndex2.entities(null, null); + for (int secKey = 0; secKey < 3; secKey++) { + for (int i : order2) { + if (secKeys2[i] != null && secKeys2[i] == secKey) { + StoredComparatorEntity_RenameSecField e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys2[i], e.new_new_secKey2); + } + } + } + assertNull(entities.next()); + entities.close(); + } + + @Override + Mutations getMutations() { + Mutations mutations = new Mutations(); + String clsName1 = + StoredComparatorEntity.class.getName(); + String clsName2 = + StoredComparatorEntity_RenameSecField.class.getName(); + mutations.addRenamer(new Renamer(clsName1, 0, "secKey2", + "new_secKey2")); + mutations.addRenamer(new Renamer(clsName1, 0, "secKey2", + "new_new_secKey2")); + mutations.addRenamer(new Renamer(clsName1, 0, clsName2)); + mutations.addRenamer(new Renamer(clsName1, 1, clsName2)); + mutations.addRenamer(new Renamer(clsName1, 1, "new_secKey2", + "new_new_secKey2")); + return mutations; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " new_new_sec2 = " + + new_new_secKey2 + ']'; + } + } + + /* Rename Entity name to StoredComparatorEntity_DeleteSecAnnotation. */ + @Entity(version=2) + static class StoredComparatorEntity_DeleteSecAnnotation + extends StoredComparatorEntity_Base{ + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + /* Delete @SecondaryKdy annotation of new_secKey2. */ + private Integer new_secKey2; + + StoredComparatorEntity_DeleteSecAnnotation() {} + + StoredComparatorEntity_DeleteSecAnnotation(Key key, + MyEnum secKey, + Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.new_secKey2 = secKey2; + } + + @Override + void checkPrimaryDBOrder(EntityStore store) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_DeleteSecAnnotation.class); + EntityCursor entities = + priIndex.entities(null, null); + final int nEntities = priKeys.length; + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity_DeleteSecAnnotation e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + } + + @Override + void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2) { + PrimaryIndex + priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_DeleteSecAnnotation.class); + SecondaryIndex secIndex; + secIndex = store.getSecondaryIndex(priIndex, MyEnum.class, + "secKey"); + final int nEntities = priKeys.length; + int[] order1 = new int[nEntities]; + int[] order2 = new int[nEntities]; + if (ifReverse1) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order1[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order1[k] = i; + } + } + if (ifReverse2) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order2[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order2[k] = i; + } + } + + EntityCursor entities = + secIndex.entities(null, null); + for (MyEnum myEnum : EnumSet.allOf(MyEnum.class)) { + for (int i : order1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity_DeleteSecAnnotation e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + } + } + } + assertNull(entities.next()); + entities.close(); + } + + @Override + Mutations getMutations() { + Mutations mutations = new Mutations(); + String clsName1 = + StoredComparatorEntity.class.getName(); + String clsName2 = + StoredComparatorEntity_DeleteSecAnnotation.class.getName(); + mutations.addRenamer(new Renamer(clsName1, 0, "secKey2", + "new_secKey2")); + mutations.addRenamer(new Renamer(clsName1, 0, clsName2)); + mutations.addRenamer(new Renamer(clsName1, 1, clsName2)); + return mutations; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " sec2 = " + + new_secKey2 + ']'; + } + } + + /* Rename Entity name to StoredComparatorEntity_DeleteSecField. */ + @Entity(version=2) + static class StoredComparatorEntity_DeleteSecField + extends StoredComparatorEntity_Base { + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + /* Delete secKey2. */ + //private Integer new_secKey2; + + StoredComparatorEntity_DeleteSecField() {} + + StoredComparatorEntity_DeleteSecField(Key key, MyEnum secKey) { + this.key = key; + this.secKey = secKey; + } + + @Override + void checkPrimaryDBOrder(EntityStore store) { + PrimaryIndex priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_DeleteSecField.class); + EntityCursor entities = + priIndex.entities(null, null); + final int nEntities = priKeys.length; + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity_DeleteSecField e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + } + + @Override + void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2) { + PrimaryIndex + priIndex; + priIndex = store.getPrimaryIndex + (Key.class, StoredComparatorEntity_DeleteSecField.class); + SecondaryIndex secIndex; + secIndex = store.getSecondaryIndex(priIndex, MyEnum.class, + "secKey"); + final int nEntities = priKeys.length; + int[] order1 = new int[nEntities]; + int[] order2 = new int[nEntities]; + if (ifReverse1) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order1[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order1[k] = i; + } + } + if (ifReverse2) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order2[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order2[k] = i; + } + } + + EntityCursor entities = + secIndex.entities(null, null); + for (MyEnum myEnum : EnumSet.allOf(MyEnum.class)) { + for (int i : order1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity_DeleteSecField e = + entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + } + } + } + assertNull(entities.next()); + entities.close(); + } + + @Override + Mutations getMutations() { + Mutations mutations = new Mutations(); + String clsName1 = + StoredComparatorEntity.class.getName(); + String clsName2 = + StoredComparatorEntity_DeleteSecField.class.getName(); + mutations.addRenamer(new Renamer(clsName1, 0, clsName2)); + mutations.addDeleter(new Deleter(clsName1, 0, "secKey2")); + mutations.addRenamer(new Renamer(clsName1, 1, clsName2)); + mutations.addDeleter(new Deleter(clsName1, 1, "new_secKey2")); + return mutations; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + ']'; + } + } + + @Persistent + static abstract class StoredComparatorEntity_Base { + + abstract Mutations getMutations(); + abstract void checkPrimaryDBOrder(EntityStore store); + abstract void checkAllSecondaryDBOrder(EntityStore store, + boolean ifReverse1, + boolean ifReverse2); + } + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + MyEnum f1; + + @KeyField(2) + Integer f2; + + @KeyField(3) + MyEnum f3; + + private Key() {} + + Key(MyEnum f1, Integer f2, MyEnum f3) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + } + + public int compareTo(Key o) { + /* Reverse the natural order. */ + int i = f1.compareTo(o.f1); + if (i != 0) return -i; + i = f2.compareTo(o.f2); + if (i != 0) return -i; + i = f3.compareTo(o.f3); + if (i != 0) return -i; + return 0; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof Key)) { + return false; + } + Key o = (Key) other; + return f1 == o.f1 && + f2.equals(o.f2) && + f3 == o.f3; + } + + @Override + public int hashCode() { + return f1.ordinal() + f2 + f3.ordinal(); + } + + @Override + public String toString() { + return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']'; + } + } +} + diff --git a/test/com/sleepycat/persist/test/SecondaryDupOrderTest.java b/test/com/sleepycat/persist/test/SecondaryDupOrderTest.java new file mode 100644 index 0000000..9ca0db6 --- /dev/null +++ b/test/com/sleepycat/persist/test/SecondaryDupOrderTest.java @@ -0,0 +1,544 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.EnumSet; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +/* */ +import com.sleepycat.je.DbInternal; +/* */ +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +/* */ +import com.sleepycat.je.dbi.EnvironmentImpl; +/* */ +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.impl.ComplexFormat; +import com.sleepycat.persist.impl.Store; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * In the versions before je4.1, we failed to set the dup comparator. This + * bug applies only when the primary key has a comparator. The bug was fixed + * by setting the dup comparator to the primary key comparator, for all new + * secondary databases. [#17252] + */ +public class SecondaryDupOrderTest extends TestBase { + private static final String STORE_NAME = "test"; + + private File envHome; + private Environment env; + private EntityStore store; + private PrimaryIndex priIndex; + private SecondaryIndex secIndex; + private SecondaryIndex secIndex2; + + private final static String secDBName = + "persist#test#com.sleepycat.persist.test." + + "SecondaryDupOrderTest$StoredComparatorEntity#secKey"; + private final static String secDBName2 = + "persist#test#com.sleepycat.persist.test." + + "SecondaryDupOrderTest$StoredComparatorEntity#secKey2"; + + private final static StoredComparatorEntity.Key[] priKeys = + new StoredComparatorEntity.Key[] { + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.A, 2, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.A), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.B, 1, + StoredComparatorEntity.MyEnum.B), + new StoredComparatorEntity.Key + (StoredComparatorEntity.MyEnum.C, 0, + StoredComparatorEntity.MyEnum.C), + }; + + private final static StoredComparatorEntity.MyEnum[] secKeys = + new StoredComparatorEntity.MyEnum[] { + StoredComparatorEntity.MyEnum.C, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.A, + null, + StoredComparatorEntity.MyEnum.A, + StoredComparatorEntity.MyEnum.B, + StoredComparatorEntity.MyEnum.C, + }; + + final Integer[] secKeys2 = new Integer[] { 2, 1, 0, null, 0, 1, 2, }; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentConfig.VERIFY_BTREE, "false"); + env = new Environment(envHome, envConfig); + + /* */ + EnvironmentImpl envImpl = DbInternal.getNonNullEnvImpl(env); + TestUtils.validateNodeMemUsage(envImpl, true /*assertOnError*/); + /* */ + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, STORE_NAME, storeConfig); + + priIndex = store.getPrimaryIndex(StoredComparatorEntity.Key.class, + StoredComparatorEntity.class); + secIndex = store.getSecondaryIndex(priIndex, + StoredComparatorEntity.MyEnum.class, + "secKey"); + secIndex2 = + store.getSecondaryIndex(priIndex, Integer.class, "secKey2"); + } + + /* Open the old database and delete the sec database. */ + private void openAndDeleteSecDatabase(String secDBName) + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + env = new Environment(envHome, envConfig); + Transaction txn = env.beginTransaction(null, null); + env.removeDatabase(txn, secDBName); + txn.commit(); + close(); + } + + private void close() + throws DatabaseException { + + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + /* + * [17252] Wrong order for DPL secondary index duplicates. + * + * When the user define a comparator for the primary key order, the output + * of the secondary duplicates are in the user-define order, rather than in + * the natural order. + */ + @Test + public void testStoredComparators() + throws DatabaseException { + + open(); + final int nEntities = priKeys.length; + Transaction txn = env.beginTransaction(null, null); + for (int i = 0; i < nEntities; i += 1) { + priIndex.put(txn, new StoredComparatorEntity + (priKeys[i], secKeys[i], secKeys2[i])); + } + txn.commit(); + close(); + + open(); + + /* Check the primary database order. */ + checkPrimaryDBOrder(); + + /* + * Check all of the secondary databases order. + * + * Sec DB1 and Sec DB2 : + * The Secondary duplicates order should also be reverse order, + * which is the same as primary database. + */ + checkAllSecondaryDBOrder(true, true); + close(); + } + + + /* + * In the versions before je4.1, we failed to set the dup comparator. This + * bug applies only when the primary key has a comparator. The bug was + * fixed by setting the dup comparator to the primary key comparator, for + * all new secondary databases. [#17252] + * + * When reading old database stored by earlier je version, the dup + * comparator will not be set to the primary key comparator. When the user + * wants to correct the ordering for an incorrectly ordered secondary + * database, he must delete the database but does not need to increment + * the class version. + */ + @Test + public void testReadOldDatabaseWithoutComparator() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog + (getClass(), "SecDupsWithoutComparator_je_4_0.jdb", envHome); + + open(); + + /* Check primary database order. */ + checkPrimaryDBOrder(); + + /* + * Check all of the secondary databases order. + * + * Sec DB1 and Sec DB2 : + * Because the dup comparator will not set to primary key comparator in + * the old secondary databases, the secondary duplicates order should + * be nature order. + */ + checkAllSecondaryDBOrder(false, false); + close(); + } + + /* + * When the user wants to correct the ordering for an incorrectly ordered + * secondary database created by older je, he must delete the secondary + * database but does not need to increment the class version, then + * re-create the database with je4.1. + */ + @Test + public void testCorrectOldDatabaseOrder() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog + (getClass(), "SecDupsWithoutComparator_je_4_0.jdb", envHome); + String clsName = StoredComparatorEntity.class.getName(); + + /* Delete the seckey secondary database. */ + openAndDeleteSecDatabase(secDBName); + + /* + * Re-open the database will create a new sec database for seckey with + * correct dup Order. catalog.flush will be called to update the + * ComplexFormat. + */ + Store.expectFlush = true; + try { + open(); + + /* + * incorrectlyOrderedSecKeys is null for the database created by + * old version je. + */ + ComplexFormat format = + (ComplexFormat) store.getModel().getRawType(clsName); + /* + * incorrectlyOrderedSecKeys is initialized and added the name of + * secKey2, which means secKey2 secondary database need to be + * assigned duplicate comparator. + */ + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 1); + assertTrue + (format.getIncorrectlyOrderedSecKeys().contains("secKey2")); + } finally { + Store.expectFlush = false; + } + + /* Check primary database order. */ + checkPrimaryDBOrder(); + + /* + * Check all of the secondary databases order. + * + * Sec DB1: + * This secondary database is first deleted, and then re-created by new + * je. The dup comparator will be set to primary key comparator, so the + * Secondary duplicates order should be reverse order. + * + * Sec DB2 : + * Because the dup comparator will not set to primary key comparator in + * the old secondary databases, the secondary duplicates order should + * be nature order. + */ + checkAllSecondaryDBOrder(true, false); + close(); + + /* Delete the seckey2 secondary database. */ + openAndDeleteSecDatabase(secDBName2); + + /* + * Re-open the database will create a new sec database for seckey2 with + * correct dup Order. catalog.flush will be called to update the + * ComplexFormat. + */ + Store.expectFlush = true; + try { + open(); + + /* + * incorrectlyOrderedSecKeys is empty after re-open the secKey2 + * secondary database. + */ + ComplexFormat format = + (ComplexFormat) store.getModel().getRawType(clsName); + assertEquals(format.getIncorrectlyOrderedSecKeys().size(), 0); + } finally { + Store.expectFlush = false; + } + + /* + * Check all of the secondary databases order. + * + * Sec DB1: + * This secondary database is created by new je, so the Secondary + * duplicates order should be reverse order. + * + * Sec DB2 : + * This secondary database is first deleted, and then re-created by new + * je. The dup comparator will be set to primary key comparator, so the + * Secondary duplicates order should be reverse order. + */ + checkAllSecondaryDBOrder(true, true); + close(); + } + + private void checkPrimaryDBOrder() { + Transaction txn = env.beginTransaction(null, null); + EntityCursor entities = + priIndex.entities(txn, null); + final int nEntities = priKeys.length; + for (int i = nEntities - 1; i >= 0; i -= 1) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + assertNull(entities.next()); + entities.close(); + txn.commit(); + } + + private void checkAllSecondaryDBOrder(boolean ifReverse1, + boolean ifReverse2) { + final int nEntities = priKeys.length; + int[] order1 = new int[nEntities]; + int[] order2 = new int[nEntities]; + if (ifReverse1) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order1[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order1[k] = i; + } + } + if (ifReverse2) { + /* The reverse order. */ + for (int k = 0, i = nEntities - 1; i >= 0; i -= 1, k += 1) { + order2[k] = i; + } + } else { + /* The nature order. */ + for (int k = 0, i = 0; i < nEntities; i += 1, k += 1) { + order2[k] = i; + } + } + + Transaction txn = env.beginTransaction(null, null); + EntityCursor entities = + secIndex.entities(txn, null); + for (StoredComparatorEntity.MyEnum myEnum : + EnumSet.allOf(StoredComparatorEntity.MyEnum.class)) { + for (int i : order1) { + if (secKeys[i] == myEnum) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys[i], e.secKey); + } + } + } + assertNull(entities.next()); + entities.close(); + txn.commit(); + + txn = env.beginTransaction(null, null); + entities = secIndex2.entities(txn, null); + for (int secKey = 0; secKey < 3; secKey++) { + for (int i : order2) { + if (secKeys2[i] != null && secKeys2[i] == secKey) { + StoredComparatorEntity e = entities.next(); + assertNotNull(e); + assertEquals(priKeys[i], e.key); + assertEquals(secKeys2[i], e.secKey2); + } + } + } + assertNull(entities.next()); + entities.close(); + txn.commit(); + } + + @Entity + static class StoredComparatorEntity { + + enum MyEnum { A, B, C }; + + @Persistent + static class Key implements Comparable { + + @KeyField(1) + MyEnum f1; + + @KeyField(2) + Integer f2; + + @KeyField(3) + MyEnum f3; + + private Key() {} + + Key(MyEnum f1, Integer f2, MyEnum f3) { + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + } + + public int compareTo(Key o) { + /* Reverse the natural order. */ + int i = f1.compareTo(o.f1); + if (i != 0) return -i; + i = f2.compareTo(o.f2); + if (i != 0) return -i; + i = f3.compareTo(o.f3); + if (i != 0) return -i; + return 0; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof Key)) { + return false; + } + Key o = (Key) other; + return f1 == o.f1 && + f2.equals(o.f2) && + f3 == o.f3; + } + + @Override + public int hashCode() { + return f1.ordinal() + f2 + f3.ordinal(); + } + + @Override + public String toString() { + return "[Key " + f1 + ' ' + f2 + ' ' + f3 + ']'; + } + } + + @PrimaryKey + Key key; + + @SecondaryKey(relate=MANY_TO_ONE) + private MyEnum secKey; + + @SecondaryKey(relate=MANY_TO_ONE) + private Integer secKey2; + + private StoredComparatorEntity() {} + + StoredComparatorEntity(Key key, MyEnum secKey, Integer secKey2) { + this.key = key; + this.secKey = secKey; + this.secKey2 = secKey2; + } + + @Override + public String toString() { + return "[pri = " + key + " sec = " + secKey + " sec2 = " + + secKey2 + ']'; + } + } +} diff --git a/test/com/sleepycat/persist/test/SequenceTest.java b/test/com/sleepycat/persist/test/SequenceTest.java new file mode 100644 index 0000000..74696ef --- /dev/null +++ b/test/com/sleepycat/persist/test/SequenceTest.java @@ -0,0 +1,478 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static org.junit.Assert.assertEquals; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestEnv; + +/** + * @author Mark Hayes + */ +public class SequenceTest extends DualTestCase { + + private File envHome; + private Environment env; + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envHome = SharedTestUtils.getTestDir(); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + envHome = null; + env = null; + } + + @Test + public void testSequenceKeys() + throws Exception { + + Class[] classes = { + SequenceEntity_Long.class, + SequenceEntity_Integer.class, + SequenceEntity_Short.class, + SequenceEntity_Byte.class, + SequenceEntity_tlong.class, + SequenceEntity_tint.class, + SequenceEntity_tshort.class, + SequenceEntity_tbyte.class, + SequenceEntity_Long_composite.class, + SequenceEntity_Integer_composite.class, + SequenceEntity_Short_composite.class, + SequenceEntity_Byte_composite.class, + SequenceEntity_tlong_composite.class, + SequenceEntity_tint_composite.class, + SequenceEntity_tshort_composite.class, + SequenceEntity_tbyte_composite.class, + }; + + EnvironmentConfig envConfig = TestEnv.TXN.getConfig(); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + EntityStore store = new EntityStore(env, "foo", storeConfig); + + long seq = 0; + + for (int i = 0; i < classes.length; i += 1) { + Class entityCls = classes[i]; + SequenceEntity entity = (SequenceEntity) entityCls.newInstance(); + Class keyCls = entity.getKeyClass(); + + PrimaryIndex index = + store.getPrimaryIndex(keyCls, entityCls); + index.putNoReturn(entity); + seq += 1; + assertEquals(seq, entity.getKey()); + + index.putNoReturn(entity); + assertEquals(seq, entity.getKey()); + + entity.nullifyKey(); + index.putNoReturn(entity); + seq += 1; + assertEquals(seq, entity.getKey()); + } + + store.close(); + close(env); + env = null; + } + + interface SequenceEntity { + Class getKeyClass(); + long getKey(); + void nullifyKey(); + } + + @Entity + static class SequenceEntity_Long implements SequenceEntity { + + @PrimaryKey(sequence="X") + Long priKey; + + public Class getKeyClass() { + return Long.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Integer implements SequenceEntity { + + @PrimaryKey(sequence="X") + Integer priKey; + + public Class getKeyClass() { + return Integer.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Short implements SequenceEntity { + + @PrimaryKey(sequence="X") + Short priKey; + + public Class getKeyClass() { + return Short.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Byte implements SequenceEntity { + + @PrimaryKey(sequence="X") + Byte priKey; + + public Class getKeyClass() { + return Byte.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_tlong implements SequenceEntity { + + @PrimaryKey(sequence="X") + long priKey; + + public Class getKeyClass() { + return Long.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = 0; + } + } + + @Entity + static class SequenceEntity_tint implements SequenceEntity { + + @PrimaryKey(sequence="X") + int priKey; + + public Class getKeyClass() { + return Integer.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = 0; + } + } + + @Entity + static class SequenceEntity_tshort implements SequenceEntity { + + @PrimaryKey(sequence="X") + short priKey; + + public Class getKeyClass() { + return Short.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = 0; + } + } + + @Entity + static class SequenceEntity_tbyte implements SequenceEntity { + + @PrimaryKey(sequence="X") + byte priKey; + + public Class getKeyClass() { + return Byte.class; + } + + public long getKey() { + return priKey; + } + + public void nullifyKey() { + priKey = 0; + } + } + + @Entity + static class SequenceEntity_Long_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + Long priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Integer_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + Integer priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Short_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + Short priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_Byte_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + Byte priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_tlong_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + long priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_tint_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + int priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_tshort_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + short priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } + + @Entity + static class SequenceEntity_tbyte_composite implements SequenceEntity { + + @PrimaryKey(sequence="X") + Key priKey; + + @Persistent + static class Key { + @KeyField(1) + byte priKey; + } + + public Class getKeyClass() { + return Key.class; + } + + public long getKey() { + return priKey.priKey; + } + + public void nullifyKey() { + priKey = null; + } + } +} diff --git a/test/com/sleepycat/persist/test/StringFormatCompatibilityTest.java b/test/com/sleepycat/persist/test/StringFormatCompatibilityTest.java new file mode 100644 index 0000000..9485b34 --- /dev/null +++ b/test/com/sleepycat/persist/test/StringFormatCompatibilityTest.java @@ -0,0 +1,310 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertNotNull; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.util.TestUtils; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.impl.PersistCatalog; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.KeyField; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; +import com.sleepycat.util.test.TestEnv; + +/** + * String was treated as an object in pre-5.0 JE version. But String will be + * treated as primitive type in JE 5.0. Therefore we need to test if the String + * data stored using older je version can be correctly read and updated in JE + * 5.0. [#19247] + * + * The old database is created using je-4.0.103. + * + * This test should be excluded from the BDB build because it uses a stored JE + * log file. + */ +public class StringFormatCompatibilityTest extends TestBase { + + private static final String STORE_NAME = "test"; + private static final String STORE_PREFIX = "persist#foo#"; + + private File envHome; + private Environment env; + private EntityStore store; + private PersistCatalog catalog; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() { + if (catalog != null) { + try { + catalog.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (store != null) { + try { + store.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + if (env != null) { + try { + env.close(); + } catch (DatabaseException e) { + System.out.println("During tearDown: " + e); + } + } + envHome = null; + env = null; + store = null; + catalog = null; + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.BDB.getConfig(); + envConfig.setAllowCreate(true); + env = new Environment(envHome, envConfig); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setAllowCreate(true); + store = new EntityStore(env, STORE_NAME, storeConfig); + openCatalog(); + } + + private void openCatalog() + throws DatabaseException { + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + DbCompat.setTypeBtree(dbConfig); + catalog = new PersistCatalog + (env, STORE_PREFIX, STORE_PREFIX + "catalog", dbConfig, null, + null, false /*rawAccess*/, null /*Store*/); + } + + private void close() + throws DatabaseException { + + if (catalog != null) { + catalog.close(); + catalog = null; + } + if (store != null) { + store.close(); + store = null; + } + if (env != null) { + env.close(); + env = null; + } + } + + @Test + public void testReadOldStringData() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_StringData.jdb", envHome); + + open(); + PrimaryIndex primary = + store.getPrimaryIndex(String.class, StringData.class); + + /* Read the older String data. */ + StringData entity = primary.get("pk1"); + assertNotNull(entity); + CompositeKey compK = new CompositeKey("CompKey1_1", "CompKey1_2"); + String[] f3 = {"f3_1", "f3_2"}; + List f4 = new ArrayList(); + f4.add("f4_1"); + f4.add("f4_2"); + entity.validate + (new StringData ("pk1", "sk1", compK, "f1", "f2", f3, f4)); + close(); + } + + @Test + public void testWriteReadOldStringData() + throws IOException { + + /* Copy log file resource to log file zero. */ + TestUtils.loadLog(getClass(), "je-4.0.103_StringData.jdb", envHome); + + open(); + + PrimaryIndex primary = + store.getPrimaryIndex(String.class, StringData.class); + CompositeKey compK = + new CompositeKey("new_CompKey2_1", "new_CompKey2_2"); + String[] f3 = {"new_f3_1", "new_f3_2"}; + List f4 = new ArrayList(); + f4.add("new_f4_1"); + f4.add("new_f4_2"); + + /* Put the String data in a new foramt.*/ + primary.put(null, new StringData("pk2", "new_sk2", compK, "new_f1", + "new_f2", f3, f4)); + + /* Read the String data using new format. */ + StringData entity = primary.get("pk2"); + assertNotNull(entity); + entity.validate(new StringData("pk2", "new_sk2", compK, "new_f1", + "new_f2", f3, f4)); + + /* Read the old String data. */ + entity = primary.get("pk1"); + assertNotNull(entity); + compK = new CompositeKey("CompKey1_1", "CompKey1_2"); + f3 = new String[]{"f3_1", "f3_2"}; + f4 = new ArrayList(); + f4.add("f4_1"); + f4.add("f4_2"); + entity.validate + (new StringData ("pk1", "sk1", compK, "f1", "f2", f3, f4)); + close(); + } + + @Entity (version = 1) + static class StringData { + @PrimaryKey + private String pk; + @SecondaryKey (relate = MANY_TO_ONE) + private String sk1; + @SecondaryKey (relate = MANY_TO_ONE) + private CompositeKey sk2; + private String f1; + + /* + * This filed is changed to an Object, which will be converted + * automatically by class widening evolution. + */ + private Object f2; + private String[] f3; + private List f4; + + StringData() { } + + StringData(String pk, + String sk1, + CompositeKey sk2, + String f1, + String f2, + String[] f3, + List f4) { + this.pk = pk; + this.sk1 = sk1; + this.sk2 = sk2; + this.f1 = f1; + this.f2 = f2; + this.f3 = f3; + this.f4 = f4; + } + + String getPriKeyObject() { + return pk; + } + + String getSK1() { + return sk1; + } + + CompositeKey getSK2() { + return sk2; + } + + String getF1() { + return f1; + } + + String getF2() { + return (String)f2; + } + + String[] getF3() { + return f3; + } + + List getF4() { + return f4; + } + + public void validate(Object other) { + StringData o = (StringData) other; + TestCase.assertEquals(pk, o.pk); + TestCase.assertEquals(sk1, o.sk1); + sk2.validate(o.sk2); + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + for (int i = 0; i < f3.length; i++) { + TestCase.assertEquals(f3[i], o.f3[i]); + } + TestCase.assertEquals(f4, o.f4); + } + } + + @Persistent + static class CompositeKey { + @KeyField(2) + private String f1; + @KeyField(1) + private String f2; + + private CompositeKey() {} + + CompositeKey(String f1, String f2) { + this.f1 = f1; + this.f2 = f2; + } + + void validate(CompositeKey o) { + TestCase.assertEquals(f1, o.f1); + TestCase.assertEquals(f2, o.f2); + } + } +} diff --git a/test/com/sleepycat/persist/test/SubclassIndexTest.java b/test/com/sleepycat/persist/test/SubclassIndexTest.java new file mode 100644 index 0000000..688af1d --- /dev/null +++ b/test/com/sleepycat/persist/test/SubclassIndexTest.java @@ -0,0 +1,264 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import static com.sleepycat.persist.model.Relationship.MANY_TO_ONE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.util.DualTestCase; +import com.sleepycat.persist.EntityCursor; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; +import com.sleepycat.persist.SecondaryIndex; +import com.sleepycat.persist.StoreConfig; +import com.sleepycat.persist.model.AnnotationModel; +import com.sleepycat.persist.model.Entity; +import com.sleepycat.persist.model.EntityModel; +import com.sleepycat.persist.model.Persistent; +import com.sleepycat.persist.model.PrimaryKey; +import com.sleepycat.persist.model.SecondaryKey; +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestEnv; + +public class SubclassIndexTest extends DualTestCase { + + private File envHome; + private Environment env; + private EntityStore store; + + @Before + public void setUp() + throws Exception { + + envHome = SharedTestUtils.getTestDir(); + super.setUp(); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + envHome = null; + env = null; + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = TestEnv.TXN.getConfig(); + envConfig.setAllowCreate(true); + env = create(envHome, envConfig); + + EntityModel model = new AnnotationModel(); + model.registerClass(Manager.class); + model.registerClass(SalariedManager.class); + + StoreConfig storeConfig = new StoreConfig(); + storeConfig.setModel(model); + storeConfig.setAllowCreate(true); + storeConfig.setTransactional(true); + store = new EntityStore(env, "foo", storeConfig); + } + + private void close() + throws DatabaseException { + + store.close(); + store = null; + close(env); + env = null; + } + + @Test + public void testSubclassIndex() + throws DatabaseException { + + open(); + + PrimaryIndex employeesById = + store.getPrimaryIndex(String.class, Employee.class); + + employeesById.put(new Employee("1")); + employeesById.put(new Manager("2", "a")); + employeesById.put(new Manager("3", "a")); + employeesById.put(new Manager("4", "b")); + + Employee e; + Manager m; + + e = employeesById.get("1"); + assertNotNull(e); + assertTrue(!(e instanceof Manager)); + + /* Ensure DB exists BEFORE calling getSubclassIndex. [#15247] */ + PersistTestUtils.assertDbExists + (true, env, "foo", Employee.class.getName(), "dept"); + + /* Normal use: Subclass index for a key in the subclass. */ + SecondaryIndex managersByDept = + store.getSubclassIndex + (employeesById, Manager.class, String.class, "dept"); + + m = managersByDept.get("a"); + assertNotNull(m); + assertEquals("2", m.id); + + m = managersByDept.get("b"); + assertNotNull(m); + assertEquals("4", m.id); + + Transaction txn = env.beginTransaction(null, null); + EntityCursor managers = managersByDept.entities(txn, null); + try { + m = managers.next(); + assertNotNull(m); + assertEquals("2", m.id); + m = managers.next(); + assertNotNull(m); + assertEquals("3", m.id); + m = managers.next(); + assertNotNull(m); + assertEquals("4", m.id); + m = managers.next(); + assertNull(m); + } finally { + managers.close(); + txn.commit(); + } + + /* Getting a subclass index for the entity class is also allowed. */ + store.getSubclassIndex + (employeesById, Employee.class, String.class, "other"); + + /* Getting a subclass index for a base class key is not allowed. */ + try { + store.getSubclassIndex + (employeesById, Manager.class, String.class, "other"); + fail(); + } catch (IllegalArgumentException expected) { + } + + close(); + } + + /** + * Previously this tested that a secondary key database was added only + * AFTER storing the first instance of the subclass that defines the key. + * Now that we require registering the subclass up front, the database is + * created up front also. So this test is somewhat less useful, but still + * nice to have around. [#16399] + */ + @Test + public void testAddSecKey() + throws DatabaseException { + + open(); + PrimaryIndex employeesById = + store.getPrimaryIndex(String.class, Employee.class); + employeesById.put(new Employee("1")); + assertTrue(hasEntityKey("dept")); + close(); + + open(); + employeesById = store.getPrimaryIndex(String.class, Employee.class); + assertTrue(hasEntityKey("dept")); + employeesById.put(new Manager("2", "a")); + assertTrue(hasEntityKey("dept")); + close(); + + open(); + assertTrue(hasEntityKey("dept")); + close(); + + open(); + employeesById = store.getPrimaryIndex(String.class, Employee.class); + assertTrue(hasEntityKey("salary")); + employeesById.put(new SalariedManager("3", "a", "111")); + assertTrue(hasEntityKey("salary")); + close(); + + open(); + assertTrue(hasEntityKey("dept")); + assertTrue(hasEntityKey("salary")); + close(); + } + + private boolean hasEntityKey(String keyName) { + return store.getModel(). + getRawType(Employee.class.getName()). + getEntityMetadata(). + getSecondaryKeys(). + keySet(). + contains(keyName); + } + + @Entity + private static class Employee { + + @PrimaryKey + String id; + + @SecondaryKey(relate=MANY_TO_ONE) + String other; + + Employee(String id) { + this.id = id; + } + + private Employee() {} + } + + @Persistent + private static class Manager extends Employee { + + @SecondaryKey(relate=MANY_TO_ONE) + String dept; + + Manager(String id, String dept) { + super(id); + this.dept = dept; + } + + private Manager() {} + } + + @Persistent + private static class SalariedManager extends Manager { + + @SecondaryKey(relate=MANY_TO_ONE) + String salary; + + SalariedManager(String id, String dept, String salary) { + super(id, dept); + this.salary = salary; + } + + private SalariedManager() {} + } +} diff --git a/test/com/sleepycat/persist/test/TestVersionCompatibility.java b/test/com/sleepycat/persist/test/TestVersionCompatibility.java new file mode 100644 index 0000000..d801979 --- /dev/null +++ b/test/com/sleepycat/persist/test/TestVersionCompatibility.java @@ -0,0 +1,78 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ +package com.sleepycat.persist.test; + +import org.junit.Before; +import org.junit.Test; + +/** + * Test that the catalog and data records created with a different version of + * the DPL are compatible with this version. This test is actually called by + * TestVersionCompatibilitySuite, since it contains two parts, firstly to run + * TestVersionCompatibility tests check previously evolved data without + * changing it, then run EvolveTest to try evolving it. + * + * @author Mark Hayes + */ +public class TestVersionCompatibility extends EvolveTestBase { + + + public TestVersionCompatibility(String originalClsName, + String evolvedClsName) throws Exception { + super(originalClsName, evolvedClsName); + } + + @Override + boolean useEvolvedClass() { + return true; + } + + @Before + public void setUp() { + envHome = getTestInitHome(true /*evolved*/); + } + + @Test + public void testPreviouslyEvolved() + throws Exception { + + /* If the store cannot be opened, this test is not appropriate. */ + if (caseObj.getStoreOpenException() != null) { + return; + } + + /* The update occurred previously. */ + caseObj.updated = true; + + openEnv(); + + /* Open read-only and double check that everything is OK. */ + openStoreReadOnly(); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + caseObj.readObjects(store, false /*doUpdate*/); + caseObj.checkEvolvedModel + (store.getModel(), env, true /*oldTypesExist*/); + closeStore(); + + /* Check raw objects. */ + openRawStore(); + caseObj.checkEvolvedModel + (rawStore.getModel(), env, true /*oldTypesExist*/); + caseObj.readRawObjects + (rawStore, true /*expectEvolved*/, true /*expectUpdated*/); + closeRawStore(); + + closeAll(); + } +} diff --git a/test/com/sleepycat/persist/test/TestVersionCompatibilitySuite.java b/test/com/sleepycat/persist/test/TestVersionCompatibilitySuite.java new file mode 100644 index 0000000..bc27df5 --- /dev/null +++ b/test/com/sleepycat/persist/test/TestVersionCompatibilitySuite.java @@ -0,0 +1,63 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.persist.test; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +/** + * Test that the catalog and data records created with a different version of + * the DPL are compatible with this version. This test is run as follows: + * + * 1) Run EvolveTest with version X of JE. For example: + * + * cd /jeX + * ant -Dtestcase=com.sleepycat.persist.test.EvolveTest test + * or + * ant -Dsuite=persist/test test + * or + * ant test + * + * Step (1) leaves the log files from all tests in the testevolve directory. + * + * 2) Run TestVersionCompatibility with version Y of JE, passing the JE + * testevolve directory from step (1). For example: + * + * cd /jeY + * ant -Dtestcase=com.sleepycat.persist.test.TestVersionCompatibilitySuite \ + * -Dunittest.testevolvedir=/jeX/build/test/testevolve \ + * test + * + * Currently there are 2 sets of X and Y that can be tested, one set for the + * CVS branch and one for the CVS trunk: + * + * CVS Version X Version Y + * branch je-3_2_56 je-3_2_57 or greater + * trunk je-3_3_41 je-3_3_42 or greater + * + * This test is not run along with the regular JE test suite run, because the + * class name does not end with Test. It must be run separately as described + * above. + */ + +@RunWith(Suite.class) +@SuiteClasses({TestVersionCompatibility.class, EvolveTest.class}) +public class TestVersionCompatibilitySuite { + /* + * Run TestVersionCompatibility tests first to check previously evolved + * data without changing it. Then run the EvolveTest to try evolving + * it. + */ +} diff --git a/test/com/sleepycat/persist/test/je-4.0.103_AbstractClassData.jdb b/test/com/sleepycat/persist/test/je-4.0.103_AbstractClassData.jdb new file mode 100644 index 0000000..87d632d Binary files /dev/null and b/test/com/sleepycat/persist/test/je-4.0.103_AbstractClassData.jdb differ diff --git a/test/com/sleepycat/persist/test/je-4.0.103_BigDecimal.jdb b/test/com/sleepycat/persist/test/je-4.0.103_BigDecimal.jdb new file mode 100644 index 0000000..3d7e0d0 Binary files /dev/null and b/test/com/sleepycat/persist/test/je-4.0.103_BigDecimal.jdb differ diff --git a/test/com/sleepycat/persist/test/je-4.0.103_EvolveProxyClass.jdb b/test/com/sleepycat/persist/test/je-4.0.103_EvolveProxyClass.jdb new file mode 100644 index 0000000..ec727fa Binary files /dev/null and b/test/com/sleepycat/persist/test/je-4.0.103_EvolveProxyClass.jdb differ diff --git a/test/com/sleepycat/persist/test/je-4.0.103_StringData.jdb b/test/com/sleepycat/persist/test/je-4.0.103_StringData.jdb new file mode 100644 index 0000000..30744d2 Binary files /dev/null and b/test/com/sleepycat/persist/test/je-4.0.103_StringData.jdb differ diff --git a/test/com/sleepycat/util/test/ExceptionWrapperTest.java b/test/com/sleepycat/util/test/ExceptionWrapperTest.java new file mode 100644 index 0000000..bc3fa21 --- /dev/null +++ b/test/com/sleepycat/util/test/ExceptionWrapperTest.java @@ -0,0 +1,118 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; + +import org.junit.Test; + +import com.sleepycat.util.ExceptionUnwrapper; +import com.sleepycat.util.IOExceptionWrapper; +import com.sleepycat.util.RuntimeExceptionWrapper; + +/** + * @author Mark Hayes + */ +public class ExceptionWrapperTest extends TestBase { + + @Test + public void testIOWrapper() { + try { + throw new IOExceptionWrapper(new RuntimeException("msg")); + } catch (IOException e) { + Exception ee = ExceptionUnwrapper.unwrap(e); + assertTrue(ee instanceof RuntimeException); + assertEquals("msg", ee.getMessage()); + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof RuntimeException); + assertEquals("msg", t.getMessage()); + } + } + + @Test + public void testRuntimeWrapper() { + try { + throw new RuntimeExceptionWrapper(new IOException("msg")); + } catch (RuntimeException e) { + Exception ee = ExceptionUnwrapper.unwrap(e); + assertTrue(ee instanceof IOException); + assertEquals("msg", ee.getMessage()); + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof IOException); + assertEquals("msg", t.getMessage()); + } + } + + @Test + public void testErrorWrapper() { + try { + throw new RuntimeExceptionWrapper(new Error("msg")); + } catch (RuntimeException e) { + try { + ExceptionUnwrapper.unwrap(e); + fail(); + } catch (Error ee) { + assertTrue(ee instanceof Error); + assertEquals("msg", ee.getMessage()); + } + + Throwable t = ExceptionUnwrapper.unwrapAny(e); + assertTrue(t instanceof Error); + assertEquals("msg", t.getMessage()); + } + } + + /** + * Generates a stack trace for a nested exception and checks the output + * for the nested exception. + */ + @Test + public void testStackTrace() { + + /* Nested stack traces are not avilable in Java 1.3. */ + String version = System.getProperty("java.version"); + if (version.startsWith("1.3.")) { + return; + } + + Exception ex = new Exception("some exception"); + String causedBy = "Caused by: java.lang.Exception: some exception"; + + try { + throw new RuntimeExceptionWrapper(ex); + } catch (RuntimeException e) { + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + String s = sw.toString(); + assertTrue(s.indexOf(causedBy) != -1); + } + + try { + throw new IOExceptionWrapper(ex); + } catch (IOException e) { + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + String s = sw.toString(); + assertTrue(s.indexOf(causedBy) != -1); + } + } +} diff --git a/test/com/sleepycat/util/test/FastOutputStreamTest.java b/test/com/sleepycat/util/test/FastOutputStreamTest.java new file mode 100644 index 0000000..4307739 --- /dev/null +++ b/test/com/sleepycat/util/test/FastOutputStreamTest.java @@ -0,0 +1,45 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +import com.sleepycat.util.FastOutputStream; + +/** + * @author Mark Hayes + */ +public class FastOutputStreamTest extends TestBase { + + @Test + public void testBufferSizing() { + FastOutputStream fos = new FastOutputStream(); + assertEquals + (FastOutputStream.DEFAULT_INIT_SIZE, fos.getBufferBytes().length); + + /* Write X+1 bytes, expect array size 2X+1 */ + fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]); + assertEquals + ((FastOutputStream.DEFAULT_INIT_SIZE * 2) + 1, + fos.getBufferBytes().length); + + /* Write X+1 bytes, expect array size 4X+3 = (2(2X+1) + 1) */ + fos.write(new byte[FastOutputStream.DEFAULT_INIT_SIZE + 1]); + assertEquals + ((FastOutputStream.DEFAULT_INIT_SIZE * 4) + 3, + fos.getBufferBytes().length); + } +} diff --git a/test/com/sleepycat/util/test/GreaterThan.java b/test/com/sleepycat/util/test/GreaterThan.java new file mode 100644 index 0000000..42aaa6f --- /dev/null +++ b/test/com/sleepycat/util/test/GreaterThan.java @@ -0,0 +1,64 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; +import org.hamcrest.Matcher; + +/** A JUnit matcher that matches a number greater than the specified value. */ +public class GreaterThan extends BaseMatcher { + + private final Number value; + + /** + * Returns a matcher that checks for a number greater than the specified + * value. + * + * @param value the value to check against + * @return the matcher + */ + public static Matcher greaterThan(Number value) { + return new GreaterThan(value); + } + + /** + * Creates a matcher that checks for a number greater than the specified + * value. + * + * @param value the value to check against + */ + public GreaterThan(Number value) { + this.value = value; + } + + @Override + public boolean matches(Object item) { + if (!(item instanceof Number)) { + return false; + } + if ((item instanceof Double) || (item instanceof Float)) { + final double d = ((Number) item).doubleValue(); + return d > value.doubleValue(); + } else { + final long l = ((Number) item).longValue(); + return l > value.longValue(); + } + } + + @Override + public void describeTo(Description desc) { + desc.appendText(" number greater than ").appendValue(value); + } +} diff --git a/test/com/sleepycat/util/test/PackedIntegerTest.java b/test/com/sleepycat/util/test/PackedIntegerTest.java new file mode 100644 index 0000000..27c0c04 --- /dev/null +++ b/test/com/sleepycat/util/test/PackedIntegerTest.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import static org.junit.Assert.fail; + +import org.junit.Test; + +import com.sleepycat.util.PackedInteger; + +public class PackedIntegerTest extends TestBase { + static final long V119 = 119L; + static final long MAX_1 = 0xFFL; + static final long MAX_2 = 0xFFFFL; + static final long MAX_3 = 0xFFFFFFL; + static final long MAX_4 = 0xFFFFFFFFL; + static final long MAX_5 = 0xFFFFFFFFFFL; + static final long MAX_6 = 0xFFFFFFFFFFFFL; + static final long MAX_7 = 0xFFFFFFFFFFFFFFL; + + @Test + public void runTest() { + + /* Packed int tests. */ + + testIntRange(-V119, V119, 1); + + testIntRange(-MAX_1 - V119, -1 - V119, 2); + testIntRange(1 + V119, MAX_1 + V119, 2); + + testIntRange(-MAX_2 - V119, -MAX_2 + 99, 3); + testIntRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3); + testIntRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3); + testIntRange(MAX_2 - 99, MAX_2 + V119, 3); + + testIntRange(-MAX_3 - V119, -MAX_3 + 99, 4); + testIntRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4); + testIntRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4); + testIntRange(MAX_3 - 99, MAX_3 + V119, 4); + + testIntRange(Integer.MIN_VALUE, Integer.MIN_VALUE + 99, 5); + testIntRange(Integer.MAX_VALUE - 99, Integer.MAX_VALUE, 5); + + /* Packed long tests. */ + + testLongRange(-V119, V119, 1); + + testLongRange(-MAX_1 - V119, -1 - V119, 2); + testLongRange(1 + V119, MAX_1 + V119, 2); + + testLongRange(-MAX_2 - V119, -MAX_2 + 99, 3); + testLongRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3); + testLongRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3); + testLongRange(MAX_2 - 99, MAX_2 + V119, 3); + + testLongRange(-MAX_3 - V119, -MAX_3 + 99, 4); + testLongRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4); + testLongRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4); + testLongRange(MAX_3 - 99, MAX_3 + V119, 4); + + testLongRange(-MAX_4 - V119, -MAX_4 + 99, 5); + testLongRange(-MAX_3 - V119 - 99, -MAX_3 - V119 - 1, 5); + testLongRange(MAX_3 + V119 + 1, MAX_3 + V119 + 99, 5); + testLongRange(MAX_4 - 99, MAX_4 + V119, 5); + + testLongRange(-MAX_5 - V119, -MAX_5 + 99, 6); + testLongRange(-MAX_4 - V119 - 99, -MAX_4 - V119 - 1, 6); + testLongRange(MAX_4 + V119 + 1, MAX_4 + V119 + 99, 6); + testLongRange(MAX_5 - 99, MAX_5 + V119, 6); + + testLongRange(-MAX_6 - V119, -MAX_6 + 99, 7); + testLongRange(-MAX_5 - V119 - 99, -MAX_5 - V119 - 1, 7); + testLongRange(MAX_5 + V119 + 1, MAX_5 + V119 + 99, 7); + testLongRange(MAX_6 - 99, MAX_6 + V119, 7); + + testLongRange(-MAX_7 - V119, -MAX_7 + 99, 8); + testLongRange(-MAX_6 - V119 - 99, -MAX_6 - V119 - 1, 8); + testLongRange(MAX_6 + V119 + 1, MAX_6 + V119 + 99, 8); + testLongRange(MAX_7 - 99, MAX_7 + V119, 8); + + testLongRange(Long.MIN_VALUE, Long.MIN_VALUE + 99, 9); + testLongRange(Long.MAX_VALUE - 99, Long.MAX_VALUE - 1, 9); + + /* */ + + /* Reverse-packed int tests. */ + + testReverseIntRange(-V119, V119, 1); + + testReverseIntRange(-MAX_1 - V119, -1 - V119, 2); + testReverseIntRange(1 + V119, MAX_1 + V119, 2); + + testReverseIntRange(-MAX_2 - V119, -MAX_2 + 99, 3); + testReverseIntRange(-MAX_1 - V119 - 99, -MAX_1 - V119 - 1, 3); + testReverseIntRange(MAX_1 + V119 + 1, MAX_1 + V119 + 99, 3); + testReverseIntRange(MAX_2 - 99, MAX_2 + V119, 3); + + testReverseIntRange(-MAX_3 - V119, -MAX_3 + 99, 4); + testReverseIntRange(-MAX_2 - V119 - 99, -MAX_2 - V119 - 1, 4); + testReverseIntRange(MAX_2 + V119 + 1, MAX_2 + V119 + 99, 4); + testReverseIntRange(MAX_3 - 99, MAX_3 + V119, 4); + + testReverseIntRange(Integer.MIN_VALUE, Integer.MIN_VALUE + 99, 5); + testReverseIntRange(Integer.MAX_VALUE - 99, Integer.MAX_VALUE, 5); + + /* */ + } + + private void testIntRange(long firstValue, + long lastValue, + int bytesExpected) { + + byte[] buf = new byte[1000]; + int off = 0; + + for (long longI = firstValue; longI <= lastValue; longI += 1) { + int i = (int) longI; + int before = off; + off = PackedInteger.writeInt(buf, off, i); + int bytes = off - before; + if (bytes != bytesExpected) { + fail("output of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + bytes = PackedInteger.getWriteIntLength(i); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + } + + off = 0; + + for (long longI = firstValue; longI <= lastValue; longI += 1) { + int i = (int) longI; + int bytes = PackedInteger.getReadIntLength(buf, off); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + int value = PackedInteger.readInt(buf, off); + if (value != i) { + fail("input of value=" + i + " but got=" + value); + } + off += bytes; + } + } + + private void testLongRange(long firstValue, + long lastValue, + int bytesExpected) { + + byte[] buf = new byte[2000]; + int off = 0; + + for (long longI = firstValue; longI <= lastValue; longI += 1) { + long i = longI; + int before = off; + off = PackedInteger.writeLong(buf, off, i); + int bytes = off - before; + if (bytes != bytesExpected) { + fail("output of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + bytes = PackedInteger.getWriteLongLength(i); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + } + + off = 0; + + for (long longI = firstValue; longI <= lastValue; longI += 1) { + long i = longI; + int bytes = PackedInteger.getReadLongLength(buf, off); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + long value = PackedInteger.readLong(buf, off); + if (value != i) { + fail("input of value=" + i + " but got=" + value); + } + off += bytes; + } + } + + /* */ + + private void testReverseIntRange(long firstValue, + long lastValue, + int bytesExpected) { + + byte[] buf = new byte[1000]; + int off = 0; + + for (long longI = firstValue; longI <= lastValue; longI += 1) { + int i = (int) longI; + int before = off; + off = PackedInteger.writeReverseInt(buf, off, i); + int bytes = off - before; + if (bytes != bytesExpected) { + fail("output of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + bytes = PackedInteger.getWriteIntLength(i); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + } + + off -= 1; + + for (long longI = lastValue; longI >= firstValue; longI -= 1) { + int i = (int) longI; + int bytes = PackedInteger.getReadIntLength(buf, off); + if (bytes != bytesExpected) { + fail("count of value=" + i + " bytes=" + bytes + + " bytesExpected=" + bytesExpected); + } + int value = PackedInteger.readReverseInt(buf, off); + if (value != i) { + fail("input of value=" + i + " but got=" + value); + } + off -= bytes; + } + } + + /* */ +} diff --git a/test/com/sleepycat/util/test/SharedTestUtils.java b/test/com/sleepycat/util/test/SharedTestUtils.java new file mode 100644 index 0000000..de892e1 --- /dev/null +++ b/test/com/sleepycat/util/test/SharedTestUtils.java @@ -0,0 +1,322 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.channels.FileChannel; + +import junit.framework.TestCase; + +import com.sleepycat.je.DatabaseConfig; + +/** + * Test utility methods shared by JE and DB core tests. Collections and + * persist package test are used in both JE and DB core. + */ +public class SharedTestUtils { + + /* Common system properties for running tests */ + public static String DEST_DIR = "testdestdir"; + public static String TEST_ENV_DIR = "testenvdirroot"; + public static String FAILURE_DIR = "failurecopydir"; + public static String DEFAULT_DEST_DIR = "build/test/classes"; + public static String DEFAULT_TEST_DIR_ROOT = "build/test/envdata"; + public static String DEFAULT_FAIL_DIR = "build/test/failures"; + public static String NO_SYNC = "txnnosync"; + public static String LONG_TEST = "longtest"; + public static String COPY_LIMIT = "copylimit"; + + public static final DatabaseConfig DBCONFIG_CREATE = new DatabaseConfig(); + static { + DBCONFIG_CREATE.setAllowCreate(true); + } + + /** + * The environment store compiled class files and generated environment by + * test that is distinctive with test environment. + */ + public static File getDestDir() { + String dir = System.getProperty(DEST_DIR, DEFAULT_DEST_DIR); + File file = new File(dir); + if (!file.isDirectory()) + file.mkdir(); + + return file; + } + + /** + * If not define system property "testenvdirroot", use build/test/envdata + * as test environment root directory. + */ + public static File getTestDir() { + String dir = System.getProperty(TEST_ENV_DIR, DEFAULT_TEST_DIR_ROOT); + File file = new File(dir); + if (!file.isDirectory()) + file.mkdir(); + + return file; + } + + /** + * Allow to set up self defined directory store failure copy. + */ + public static File getFailureCopyDir() { + String dir = System.getProperty(FAILURE_DIR, DEFAULT_FAIL_DIR); + File file = new File(dir); + if (!file.isDirectory()) + file.mkdir(); + + return file; + } + + /** + * If test failed, copy its environment to other location. The default + * limit is 10, but our test support the value via system property. + */ + public static int getCopyLimit() { + String limit = System.getProperty(COPY_LIMIT, "10"); + + return Integer.parseInt(limit); + } + + /** + * @return true if long running tests are enabled via setting the system + * property longtest=true. + */ + public static boolean runLongTests() { + String longTestProp = System.getProperty(LONG_TEST); + if ((longTestProp != null) && + longTestProp.equalsIgnoreCase("true")) { + return true; + } else { + return false; + } + } + + public static void printTestName(String name) { + // don't want verbose printing for now + // System.out.println(name); + } + + public static File getExistingDir(String name) { + File dir = new File(getTestDir(), name); + if (!dir.exists() || !dir.isDirectory()) { + throw new IllegalStateException( + "Not an existing directory: " + dir); + } + return dir; + } + + public static File getNewDir() { + return getNewDir("test-dir"); + } + + public static void emptyDir(File dir) { + if (dir.isDirectory()) { + String[] files = dir.list(); + if (files != null) { + for (int i = 0; i < files.length; i += 1) { + new File(dir, files[i]).delete(); + } + } + } else { + dir.delete(); + dir.mkdirs(); + } + } + + /** + * @return A sub-directory of current test destination directory. + */ + public static File getNewDir(String name) { + File dir = new File(getTestDir(), name); + emptyDir(dir); + return dir; + } + + public static File getNewFile() { + return getNewFile("test-file"); + } + + public static File getNewFile(String name) { + return getNewFile(getTestDir(), name); + } + + public static File getNewFile(File dir, String name) { + File file = new File(dir, name); + file.delete(); + return file; + } + + public static boolean copyResource(Class cls, String fileName, File toDir) + throws IOException { + + InputStream in = cls.getResourceAsStream("testdata/" + fileName); + if (in == null) { + return false; + } + in = new BufferedInputStream(in); + File file = new File(toDir, fileName); + OutputStream out = new FileOutputStream(file); + out = new BufferedOutputStream(out); + int c; + while ((c = in.read()) >= 0) out.write(c); + in.close(); + out.close(); + return true; + } + + public static String qualifiedTestName(TestCase test) { + + String s = test.getClass().getName(); + int i = s.lastIndexOf('.'); + if (i >= 0) { + s = s.substring(i + 1); + } + return s + '.' + test.getName(); + } + + /** + * Copies all files in fromDir to toDir. Does not copy subdirectories. + */ + public static void copyFiles(File fromDir, File toDir) + throws IOException { + + String[] names = fromDir.list(); + if (names != null) { + for (int i = 0; i < names.length; i += 1) { + File fromFile = new File(fromDir, names[i]); + if (fromFile.isDirectory()) { + continue; + } + File toFile = new File(toDir, names[i]); + int len = (int) fromFile.length(); + byte[] data = new byte[len]; + FileInputStream fis = null; + FileOutputStream fos = null; + try { + fis = new FileInputStream(fromFile); + fos = new FileOutputStream(toFile); + fis.read(data); + fos.write(data); + } finally { + if (fis != null) { + fis.close(); + } + if (fos != null) { + fos.close(); + } + } + } + } + } + + /** + * Copy everything in test destination directory to another place for + * future evaluation when test failed. + */ + public static void copyDir(File fromDir, File toDir) + throws Exception { + + if (fromDir == null || toDir == null) + throw new NullPointerException("File location error"); + + if (!fromDir.isDirectory()) + throw new IllegalStateException + (fromDir + " should be a directory"); + + if (!toDir.exists() && !toDir.mkdirs()) + throw new IllegalStateException("Unable to create copy dest dir:" + + toDir); + + String[] list = fromDir.list(); + if (list != null) { + + for (String fileName : list) { + File file = new File(fromDir, fileName); + if (file.isDirectory()) + copyDir(file, new File(toDir, fileName)); + else + copyFile(file, new File(toDir, fileName)); + } + } + } + + /** + * Copy file to specified location. + */ + private static void copyFile(File from, File to) + throws Exception { + + if (to.isDirectory()) + to = new File(to, from.getName()); + + FileInputStream fis = null; + FileOutputStream fos = null; + FileChannel fcin = null; + FileChannel fcout = null; + + try { + fis = new FileInputStream(from); + fos = new FileOutputStream(to); + fcin = fis.getChannel(); + fcout = fos.getChannel(); + fcin.transferTo(0, fcin.size(), fcout); + } finally { + if (fis != null) { + fis.close(); + } + if (fos != null) { + fos.close(); + } + } + } + + /** + * Clean up everything in JE test destination directory including all kind + * files and sub directories generated by last test except je.properties. + */ + public static void cleanUpTestDir(File dir) { + if (!dir.isDirectory() || !dir.exists()) + throw new IllegalStateException( + "Not an existing directory: " + dir); + File[] files = dir.listFiles(); + if (files == null) + return; + + for (File file : files) { + if ("je.properties".equals(file.getName())) + continue; + + if (file.isDirectory()) { + cleanUpTestDir(file); + + if (file.list().length == 0 && !file.delete()) + throw new IllegalStateException( + "Unable to delete" + file); + } else { + if(!file.delete()) + throw new IllegalStateException( + "Unable to delete " + file); + } + } + } +} diff --git a/test/com/sleepycat/util/test/TestBase.java b/test/com/sleepycat/util/test/TestBase.java new file mode 100644 index 0000000..0ec05d9 --- /dev/null +++ b/test/com/sleepycat/util/test/TestBase.java @@ -0,0 +1,112 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import java.io.File; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +/** + * The base class for all JE unit tests. + */ +public abstract class TestBase { + + private static final boolean copySucceeded = + Boolean.getBoolean("test.copySucceeded"); + + /* + * Need to provide a customized name suffix for those tests which are + * Parameterized. + * + * This is because we need to provide a unique directory name for those + * failed tests. Parameterized class would reuse test cases, so class name + * plus the test method is not unique. User should set the customName + * in the constructor of a Parameterized test. + */ + protected String customName; + + /** + * The rule we use to control every test case, the core of this rule is + * copy the testing environment, files, sub directories to another place + * for future investigation, if any of test failed. But we do have a limit + * to control how many times we copy because of disk space. So once the + * failure counter exceed limit, it won't copy the environment any more. + */ + @Rule + public TestRule watchman = new TestWatcher() { + + /* Copy Environments when the test failed. */ + @Override + protected void failed(Throwable t, Description desc) { + doCopy(desc); + } + + @Override + protected void succeeded(Description desc){ + if (copySucceeded) { + doCopy(desc); + } + } + + private void doCopy(Description desc) { + String dirName = makeFileName(desc); + try { + copyEnvironments(dirName); + } catch (Exception e) { + throw new RuntimeException + ("can't copy env dir to " + dirName + " after failure", e); + } + } + }; + + @Before + public void setUp() + throws Exception { + + SharedTestUtils.cleanUpTestDir(SharedTestUtils.getTestDir()); + } + + @After + public void tearDown() throws Exception { + // Provision for future use + } + + /** + * Copy the testing directory to other place. + */ + private void copyEnvironments(String path) throws Exception{ + + File failureDir = SharedTestUtils.getFailureCopyDir(); + if (failureDir.list().length < SharedTestUtils.getCopyLimit()) { + SharedTestUtils.copyDir(SharedTestUtils.getTestDir(), + new File(failureDir, path)); + } + } + + /** + * Get failure copy directory name. + */ + private String makeFileName(Description desc) { + String name = desc.getClassName() + "-" + desc.getMethodName(); + if (customName != null) { + name = name + "-" + customName; + } + return name; + } +} diff --git a/test/com/sleepycat/util/test/TestEnv.java b/test/com/sleepycat/util/test/TestEnv.java new file mode 100644 index 0000000..91f04a5 --- /dev/null +++ b/test/com/sleepycat/util/test/TestEnv.java @@ -0,0 +1,147 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; + +/** + * @author Mark Hayes + */ +public class TestEnv { + + public static final TestEnv BDB; + public static final TestEnv CDB; + public static final TestEnv TXN; + static { + EnvironmentConfig config; + + config = newEnvConfig(); + BDB = new TestEnv("bdb", config); + + if (DbCompat.CDB) { + config = newEnvConfig(); + DbCompat.setInitializeCDB(config, true); + CDB = new TestEnv("cdb", config); + } else { + CDB = null; + } + + config = newEnvConfig(); + config.setTransactional(true); + DbCompat.setInitializeLocking(config, true); + TXN = new TestEnv("txn", config); + } + + private static EnvironmentConfig newEnvConfig() { + + EnvironmentConfig config = new EnvironmentConfig(); + config.setTxnNoSync(Boolean.getBoolean(SharedTestUtils.NO_SYNC)); + if (DbCompat.MEMORY_SUBSYSTEM) { + DbCompat.setInitializeCache(config, true); + } + return config; + } + + public static final TestEnv[] ALL; + static { + if (DbCompat.CDB) { + ALL = new TestEnv[] { BDB, CDB, TXN }; + } else { + ALL = new TestEnv[] { BDB, TXN }; + } + } + + private final String name; + private final EnvironmentConfig config; + + protected TestEnv(String name, EnvironmentConfig config) { + + this.name = name; + this.config = config; + } + + public String getName() { + + return name; + } + + public EnvironmentConfig getConfig() { + return config; + } + + void copyConfig(EnvironmentConfig copyToConfig) { + DbCompat.setInitializeCache + (copyToConfig, DbCompat.getInitializeCache(config)); + DbCompat.setInitializeLocking + (copyToConfig, DbCompat.getInitializeLocking(config)); + DbCompat.setInitializeCDB + (copyToConfig, DbCompat.getInitializeCDB(config)); + copyToConfig.setTransactional(config.getTransactional()); + } + + public boolean isTxnMode() { + + return config.getTransactional(); + } + + public boolean isCdbMode() { + + return DbCompat.getInitializeCDB(config); + } + + public Environment open(String testName) + throws IOException, DatabaseException { + + return open(testName, true); + } + + public Environment open(String testName, boolean create) + throws IOException, DatabaseException { + + config.setAllowCreate(create); + /* OLDEST deadlock detection on DB matches the use of timeouts on JE.*/ + DbCompat.setLockDetectModeOldest(config); + File dir = getDirectory(testName, create); + return newEnvironment(dir, config); + } + + /** + * Is overridden in XACollectionTest. + * @throws FileNotFoundException from DB core. + */ + protected Environment newEnvironment(File dir, EnvironmentConfig config) + throws DatabaseException, FileNotFoundException { + + return new Environment(dir, config); + } + + public File getDirectory(String testName) { + return getDirectory(testName, true); + } + + public File getDirectory(String testName, boolean create) { + if (create) { + return SharedTestUtils.getNewDir(testName); + } else { + return SharedTestUtils.getExistingDir(testName); + } + } +} diff --git a/test/com/sleepycat/util/test/TxnTestCase.java b/test/com/sleepycat/util/test/TxnTestCase.java new file mode 100644 index 0000000..19b74ca --- /dev/null +++ b/test/com/sleepycat/util/test/TxnTestCase.java @@ -0,0 +1,243 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.junit.After; +import org.junit.Before; + +import com.sleepycat.compat.DbCompat; +import com.sleepycat.je.CursorConfig; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.util.DualTestCase; + +/** + * Permutes test cases over three transaction types: null (non-transactional), + * auto-commit, and user (explicit). + * + *

        Overrides runTest, setUp and tearDown to open/close the environment and + * to set up protected members for use by test cases.

        + * + *

        If a subclass needs to override setUp or tearDown, the overridden method + * should call super.setUp or super.tearDown.

        + * + *

        When writing a test case based on this class, write it as if a user txn + * were always used: call txnBegin, txnCommit and txnAbort for all write + * operations. Use the isTransactional protected field for setup of a database + * config.

        + */ +public abstract class TxnTestCase extends DualTestCase { + + public static final String TXN_NULL = "txn-null"; + public static final String TXN_AUTO = "txn-auto"; + public static final String TXN_USER = "txn-user"; + public static final String TXN_CDB = "txn-cdb"; + + protected File envHome; + protected Environment env; + protected EnvironmentConfig envConfig; + protected String txnType; + protected boolean isTransactional; + + public static List getTxnParams(String[] txnTypes, boolean rep) { + final List list = new ArrayList<>(); + for (final String type : getTxnTypes(txnTypes, rep)) { + list.add(new Object[] {type}); + } + return list; + } + + public static String[] getTxnTypes(String[] txnTypes, boolean rep) { + if (txnTypes == null) { + if (rep) { + txnTypes = new String[] { // Skip non-transactional tests + TxnTestCase.TXN_USER, + TxnTestCase.TXN_AUTO }; + } else if (!DbCompat.CDB) { + txnTypes = new String[] { TxnTestCase.TXN_NULL, + TxnTestCase.TXN_USER, + TxnTestCase.TXN_AUTO }; + } else { + txnTypes = new String[] { TxnTestCase.TXN_NULL, + TxnTestCase.TXN_USER, + TxnTestCase.TXN_AUTO, + TxnTestCase.TXN_CDB }; + } + } else { + if (!DbCompat.CDB) { + /* Remove TxnTestCase.TXN_CDB, if there is any. */ + final ArrayList tmp = + new ArrayList<>(Arrays.asList(txnTypes)); + tmp.remove(TxnTestCase.TXN_CDB); + txnTypes = new String[tmp.size()]; + tmp.toArray(txnTypes); + } + } + return txnTypes; + } + + @Before + public void setUp() + throws Exception { + + super.setUp(); + envHome = SharedTestUtils.getNewDir(); + openEnv(); + } + + @After + public void tearDown() + throws Exception { + + super.tearDown(); + closeEnv(); + env = null; + } + + protected void initEnvConfig() { + if (envConfig == null) { + envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + + /* Always use write-no-sync (by default) to speed up tests. */ + if (!envConfig.getTxnNoSync() && !envConfig.getTxnWriteNoSync()) { + envConfig.setTxnWriteNoSync(true); + } + } + } + + /** + * Closes the environment and sets the env field to null. + * Used for closing and reopening the environment. + */ + public void closeEnv() + throws DatabaseException { + + if (env != null) { + close(env); + env = null; + } + } + + /** + * Opens the environment based on the txnType for this test case. + * Used for closing and reopening the environment. + */ + public void openEnv() + throws DatabaseException { + + if (txnType == TXN_NULL) { + TestEnv.BDB.copyConfig(envConfig); + env = create(envHome, envConfig); + } else if (txnType == TXN_AUTO) { + TestEnv.TXN.copyConfig(envConfig); + env = create(envHome, envConfig); + } else if (txnType == TXN_USER) { + TestEnv.TXN.copyConfig(envConfig); + env = create(envHome, envConfig); + } else if (txnType == TXN_CDB) { + TestEnv.CDB.copyConfig(envConfig); + env = create(envHome, envConfig); + } else { + assert false; + } + } + + /** + * Begin a txn if in TXN_USER mode; otherwise return null; + */ + protected Transaction txnBegin() + throws DatabaseException { + + return txnBegin(null, null); + } + + /** + * Begin a txn if in TXN_USER mode; otherwise return null; + */ + protected Transaction txnBegin(Transaction parentTxn, + TransactionConfig config) + throws DatabaseException { + + if (txnType == TXN_USER) { + return env.beginTransaction(parentTxn, config); + } + return null; + } + + /** + * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null; + */ + protected Transaction txnBeginCursor() + throws DatabaseException { + + return txnBeginCursor(null, null); + } + + /** + * Begin a txn if in TXN_USER or TXN_AUTO mode; otherwise return null; + */ + protected Transaction txnBeginCursor(Transaction parentTxn, + TransactionConfig config) + throws DatabaseException { + + if (txnType == TXN_USER || txnType == TXN_AUTO) { + return env.beginTransaction(parentTxn, config); + } else { + return null; + } + } + + /** + * Create a write cursor config; + */ + public CursorConfig getWriteCursorConfig() { + if (txnType != TXN_CDB) { + return null; + } + final CursorConfig config = new CursorConfig(); + DbCompat.setWriteCursor(config, true); + return config; + } + + /** + * Commit a txn if non-null. + */ + protected void txnCommit(Transaction txn) + throws DatabaseException { + + if (txn != null) { + txn.commit(); + } + } + + /** + * Commit a txn if non-null. + */ + protected void txnAbort(Transaction txn) + throws DatabaseException { + + if (txn != null) { + txn.abort(); + } + } +} diff --git a/test/com/sleepycat/util/test/UtfTest.java b/test/com/sleepycat/util/test/UtfTest.java new file mode 100644 index 0000000..47bc5d0 --- /dev/null +++ b/test/com/sleepycat/util/test/UtfTest.java @@ -0,0 +1,142 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.util.test; + +import static org.junit.Assert.fail; + +import java.io.DataOutputStream; +import java.util.Arrays; + +import org.junit.Test; + +import com.sleepycat.util.FastOutputStream; +import com.sleepycat.util.UtfOps; + +/** + * @author Mark Hayes + */ +public class UtfTest extends TestBase { + + /** + * Compares the UtfOps implementation to the java.util.DataOutputStream + * (and by implication DataInputStream) implementation, character for + * character in the full Unicode set. + */ + @Test + public void testMultibyte() + throws Exception { + + char c = 0; + byte[] buf = new byte[10]; + byte[] javaBuf = new byte[10]; + char[] cArray = new char[1]; + FastOutputStream javaBufStream = new FastOutputStream(javaBuf); + DataOutputStream javaOutStream = new DataOutputStream(javaBufStream); + + try { + for (int cInt = Character.MIN_VALUE; cInt <= Character.MAX_VALUE; + cInt += 1) { + c = (char) cInt; + cArray[0] = c; + int byteLen = UtfOps.getByteLength(cArray); + + javaBufStream.reset(); + javaOutStream.writeUTF(new String(cArray)); + int javaByteLen = javaBufStream.size() - 2; + + if (byteLen != javaByteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps size " + byteLen + + " != JavaIO size " + javaByteLen); + } + + Arrays.fill(buf, (byte) 0); + UtfOps.charsToBytes(cArray, 0, buf, 0, 1); + + if (byteLen == 1 && buf[0] == (byte) 0xff) { + fail("Character 0x" + Integer.toHexString(c) + + " was encoded as FF, which is reserved for null"); + } + + for (int i = 0; i < byteLen; i += 1) { + if (buf[i] != javaBuf[i + 2]) { + fail("Character 0x" + Integer.toHexString(c) + + " byte offset " + i + + " UtfOps byte " + Integer.toHexString(buf[i]) + + " != JavaIO byte " + + Integer.toHexString(javaBuf[i + 2])); + } + } + + int charLen = UtfOps.getCharLength(buf, 0, byteLen); + if (charLen != 1) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps char len " + charLen + + " but should be one"); + } + + cArray[0] = (char) 0; + int len = UtfOps.bytesToChars(buf, 0, cArray, 0, byteLen, + true); + if (len != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/byteLen) len " + len + + " but should be " + byteLen); + } + + if (cArray[0] != c) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/byteLen) char " + + Integer.toHexString(cArray[0])); + } + + cArray[0] = (char) 0; + len = UtfOps.bytesToChars(buf, 0, cArray, 0, 1, false); + if (len != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/charLen) len " + len + + " but should be " + byteLen); + } + + if (cArray[0] != c) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps bytesToChars(w/charLen) char " + + Integer.toHexString(cArray[0])); + } + + String s = new String(cArray, 0, 1); + byte[] sBytes = UtfOps.stringToBytes(s); + if (sBytes.length != byteLen) { + fail("Character 0x" + Integer.toHexString(c) + + " UtfOps stringToBytes() len " + sBytes.length + + " but should be " + byteLen); + } + + for (int i = 0; i < byteLen; i += 1) { + if (sBytes[i] != javaBuf[i + 2]) { + fail("Character 0x" + Integer.toHexString(c) + + " byte offset " + i + + " UtfOps byte " + Integer.toHexString(sBytes[i]) + + " != JavaIO byte " + + Integer.toHexString(javaBuf[i + 2])); + } + } + } + } catch (Exception e) { + System.out.println("Character 0x" + Integer.toHexString(c) + + " exception occurred"); + throw e; + } + } +} diff --git a/test/com/sleepycat/utilint/LatencyStatTest.java b/test/com/sleepycat/utilint/LatencyStatTest.java new file mode 100644 index 0000000..7a62df2 --- /dev/null +++ b/test/com/sleepycat/utilint/LatencyStatTest.java @@ -0,0 +1,417 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import org.junit.Test; + +public class LatencyStatTest { + + private static final int STRESS_SECONDS = 20; + private static final double DELTA = 1e-15; + + private volatile LatencyStat stressStat = new LatencyStat(100); + private AtomicLong stressExpectOps = new AtomicLong(); + private AtomicLong stressActualOps = new AtomicLong(); + private AtomicLong stressExpectReq = new AtomicLong(); + private AtomicLong stressActualReq = new AtomicLong(); + private final Random globalRnd = new Random(123); + + @Test + public void testMillisLatency() { + LatencyStat interval = new LatencyStat(100); + LatencyStat accumulate = new LatencyStat(100); + + long totalTime = 0; + for (int i = 0; i <= 11; i++) { + totalTime += (i * 10 * 1000000); + interval.set(i * 10 * 1000000); + accumulate.set(i * 10 * 1000000); + } + + Latency results = interval.calculateAndClear(); + checkResults(results, 12, 12, 0, 110, 55.0f, 80, 80, 2); + results = accumulate.calculate(); + checkResults(results, 12, 12, 0, 110, 55.0f, 80, 80, 2); + + for (int i = 0; i < 20; i++) { + totalTime += 92000000; + interval.set(92000000); + accumulate.set(92000000); + } + + checkResults(interval.calculateAndClear(), + 20, 20, 92, 92, 92.0f, 92, 92, 0); + checkResults(accumulate.calculate(), + 32, 32, 0, 110, 78.125f, 92, 92, 2); + + interval.clear(); + accumulate.clear(); + + for (int i = 0; i < 100; i++) { + interval.set(i * 1000000); + accumulate.set(i * 1000000); + } + checkResults(interval.calculateAndClear(), + 100, 100, 0, 99, 49.5f, 94, 98, 0); + checkResults(accumulate.calculate(), + 100, 100, 0, 99, 49.5f, 94, 98, 0); + + } + + @Test + public void testNanoLatency() { + LatencyStat interval = new LatencyStat(100); + LatencyStat accumulate = new LatencyStat(100); + + long totalTime = 0; + for (int i = 0; i <= 11; i++) { + totalTime += (i * 10000); + interval.set(i * 10000); + accumulate.set(i * 10000); + } + + checkResults(interval.calculateAndClear(), + 12, 12, 0, 0, .055f, 0, 0, 0); + checkResults(accumulate.calculate(), + 12, 12, 0, 0, .055f, 0, 0, 0); + + long time2 = 0; + for (int i = 1; i <= 10; i++) { + time2 += (i * 1000000) + 500000; + totalTime += (i * 1000000) + 500000; + interval.set((i * 1000000) + 500000); + accumulate.set((i * 1000000) + 500000); + } + checkResults(interval.calculateAndClear(), + 10, 10, 2, 11, 6.0f, 10, 10, 0); + checkResults(accumulate.calculate(), + 22, 22, 0, 11, 2.7572727f, 9, 10, 0); + } + + /** Test Latency rollup. */ + @Test + public void testRollup() { + LatencyStat stat1 = new LatencyStat(100); + LatencyStat stat2 = new LatencyStat(100); + + for (int i = 0; i <= 11; i++) { + stat1.set(i * 10 * 1000000); + stat2.set(5, i * 20 * 1000000); + } + + Latency result1 = stat1.calculate(); + checkResults(result1, 12, 12, 0, 110, 55, 80, 80, 2); + Latency result2 = stat2.calculate(); + checkResults(result2, 12, 60, 0, 220, 110, 60, 60, 7); + + /* 95th and 99th become 0 because they are not preserved by rollup. */ + result1.rollup(result2); + checkResults(result1, 24, 72, 0, 220, 82.5f, 0, 0, 9); + } + + /** + * When there is only one op, the 95% and 99% numbers should be the latency + * for that op, not -1. [#21763] + * + * For other small numbers of ops, only the highest value is not included + * in the 95% and 99% values. + */ + @Test + public void testSmallNumberOfOps() { + final LatencyStat stat = new LatencyStat(100); + + stat.set(6900000); + checkResults(stat.calculateAndClear(), + 1, 1, 7, 7, 6.9f, 7, 7, 0); + + stat.set(7 * 1000000); + checkResults(stat.calculate(), + 1, 1, 7, 7, 7, 7, 7, 0); + + stat.set(8 * 1000000); + checkResults(stat.calculate(), + 2, 2, 7, 8, 7.5f, 7, 7, 0); + + stat.set(9 * 1000000); + checkResults(stat.calculate(), + 3, 3, 7, 9, 8, 8, 8, 0); + } + + /** + * Tests LatencyStat.set when passing numRecordedOps GT 1. + */ + @Test + public void testMultiOps() { + final LatencyStat stat = new LatencyStat(100); + + /* Basic check of a single request. */ + stat.set(10, 3 * 1000000); + checkResults(stat.calculateAndClear(), + 1, 10, 3, 3, 3f, 3, 3, 0); + + /* Two requests, no overflow */ + stat.set(5, 1 * 1000000); + stat.set(10, 3 * 1000000); + checkResults(stat.calculateAndClear(), + 2, 15, 1, 3, 2f, 1, 1, 0); + + /* Three requests, one overflow */ + stat.set(5, 3 * 1000000); + stat.set(10, 16 * 1000000); + stat.set(10, 101 * 1000000); + checkResults(stat.calculateAndClear(), + 3, 25, 3, 101, 40f, 3, 3, 1); + + /* Three requests, all overflows. */ + stat.set(5, 101 * 1000000); + stat.set(5, 102 * 1000000); + stat.set(5, 103 * 1000000); + checkResults(stat.calculateAndClear(), + 3, 15, 101, 103, 102, -1, -1, 3); + + /* + * Check that when the very highest recorded latency is high, and the + * rest (95% and 99%) are low, we don't report the high value. Prior + * to a bug fix, the high value was reported. In particular, before the + * bug fix both checks below reported 77 for the 95% and 99% values, + * but 7 is the correct value. [#21763] + */ + for (int i = 0; i < 100; i += 1) { + stat.set(10, 7 * 1000000); + } + stat.set(20, 1 * 77 * 1000000); + checkResults(stat.calculateAndClear(), + 101, 1020, 7, 77, 7.6930695f, 7, 7, 0); + } + + private void checkResults(Latency results, + int expectedReq, + int expectedOps, + int expectedMin, + int expectedMax, + float expectedAvg, + int expected95, + int expected99, + int reqOverflow) { + assertEquals(expectedReq, results.getTotalRequests()); + assertEquals(expectedOps, results.getTotalOps()); + assertEquals(expectedMin, results.getMin()); + assertEquals(expectedMax, results.getMax()); + assertEquals(expectedAvg, results.getAvg(), DELTA); + assertEquals(expected95, results.get95thPercent()); + assertEquals(expected99, results.get99thPercent()); + assertEquals(reqOverflow, results.getRequestsOverflow()); + } + + /** + * Checks that when set(), calculate() and calculateAndClear() are run + * concurrently, we see reasonable values returned by calculate(). + */ + @Test + public void testConcurrentSetCalculateClear() + throws Throwable { + + /* Zero counters. */ + stressStat.clear(); + stressExpectOps.set(0); + stressActualOps.set(0); + stressExpectReq.set(0); + stressActualReq.set(0); + + final long endTime = System.currentTimeMillis() + + (STRESS_SECONDS * 1000); + + /* Do the test. */ + exec(endTime, + new DoSet(), new DoSet(), new DoSet(), new DoSet(), + new DoSet(true), new DoSet(true), new DoSet(true), + new DoCalc(), new DoCalc(), new DoCalc(true)); + + /* Count the very last interval. */ + final Latency latency = stressStat.calculateAndClear(); + stressActualOps.addAndGet(latency.getTotalOps()); + stressActualReq.addAndGet(latency.getTotalRequests()); + + final String msg = String.format + ("expectOps=%,d actualOps=%,d expectReq=%,d actualReq=%,d", + stressExpectOps.get(), stressActualOps.get(), + stressExpectReq.get(), stressActualReq.get()); + + /* Expect LT 0.1% missed ops/requests due to concurrent changes. */ + final double missedOps = stressExpectOps.get() - stressActualOps.get(); + final double missedReq = stressExpectReq.get() - stressActualReq.get(); + assertTrue(msg, missedOps >= 0); + assertTrue(msg, missedReq >= 0); + assertTrue(msg, (missedOps / stressExpectOps.get()) < 0.01); + assertTrue(msg, (missedReq / stressExpectReq.get()) < 0.01); + + //System.out.println(missedOps / stressExpectOps.get()); + //System.out.println(missedReq / stressExpectReq.get()); + } + + class DoSet implements Runnable { + private final boolean multi; + private final Random rnd = new Random(globalRnd.nextInt()); + + DoSet() { + this(false); + } + + DoSet(final boolean multi) { + this.multi = multi; + } + + public void run() { + final int nanos = (rnd.nextInt(99) + 1) * 1000000; + final int nOps = multi ? (rnd.nextInt(10) + 1) : 1; + stressStat.set(nOps, nanos); + stressExpectOps.addAndGet(nOps); + stressExpectReq.addAndGet(1); + } + } + + class DoCalc implements Runnable { + private final boolean clear; + + DoCalc() { + this(false); + } + + DoCalc(final boolean clear) { + this.clear = clear; + } + + public void run() { + final Latency latency = clear ? + stressStat.calculateAndClear() : + stressStat.calculate(); + if (latency.getTotalOps() == 0) { + return; + } + if (clear) { + stressActualOps.addAndGet(latency.getTotalOps()); + stressActualReq.addAndGet(latency.getTotalRequests()); + } + assertTrue(latency.toString(), + latency.get95thPercent() >= 0); + assertTrue(latency.toString(), + latency.get95thPercent() >= 0); + assertTrue(latency.toString(), + latency.getMin() >= 0); + assertTrue(latency.toString(), + latency.getMin() != Integer.MAX_VALUE); + assertTrue(latency.toString(), + latency.getMin() <= Math.round(latency.getAvg())); + assertTrue(latency.toString(), + latency.getMin() <= latency.get95thPercent()); + assertTrue(latency.toString(), + latency.getMin() <= latency.get99thPercent()); + assertTrue(latency.toString(), + latency.getMax() >= latency.getMin()); + assertTrue(latency.toString(), + latency.getMax() >= Math.round(latency.getAvg())); + assertTrue(latency.toString(), + latency.getMax() >= latency.get95thPercent()); + assertTrue(latency.toString(), + latency.getMax() >= latency.get99thPercent()); + assertTrue(latency.toString(), + latency.getAvg() > 0); + assertTrue(latency.toString(), + latency.getRequestsOverflow() == 0); + } + } + + private static void exec(final long endTime, final Runnable... tasks) + throws Throwable { + + final int nThreads = tasks.length; + final Thread[] threads = new Thread[nThreads]; + final CountDownLatch startSignal = new CountDownLatch(nThreads); + + final AtomicReference firstEx = + new AtomicReference(null); + + for (int i = 0; i < nThreads; i += 1) { + final Runnable task = tasks[i]; + threads[i] = new Thread() { + @Override + public void run() { + try { + startSignal.countDown(); + startSignal.await(); + while (System.currentTimeMillis() < endTime) { + task.run(); + } + } catch (Throwable e) { + firstEx.compareAndSet(null, e); + } + } + }; + } + + for (Thread t : threads) { + t.start(); + } + + for (Thread t : threads) { + t.join(); + } + + if (firstEx.get() != null) { + throw firstEx.get(); + } + } + + /** + * Checks that when a Latency object previously serialized with JE 5.0.69 + * is deserialized here, the totalRequests field (added in JE 5.0.70) is + * initialized to the totalOps. The latency-5-0-69 file in this package + * was created using the WriteLatencyObject program that exists (only) in + * JE 5.0.69, also in this package. [#21763] + */ + @Test + public void testNewTotalRequestsField() + throws Exception { + + final InputStream is = + getClass().getResourceAsStream("latency-5-0-69"); + assertNotNull(is); + + final ObjectInputStream ois = new ObjectInputStream(is); + final Latency l = (Latency) ois.readObject(); + + assertEquals(100, l.getMaxTrackedLatencyMillis()); + assertEquals(1, l.getMin()); + assertEquals(10, l.getMax()); + assertEquals(1.1f, l.getAvg(), DELTA); + assertEquals(500, l.getTotalOps()); + assertEquals(2, l.get95thPercent()); + assertEquals(3, l.get99thPercent()); + assertEquals(4, l.getRequestsOverflow()); + + assertEquals(500, l.getTotalRequests()); + } +} + diff --git a/test/com/sleepycat/utilint/StatLoggerTest.java b/test/com/sleepycat/utilint/StatLoggerTest.java new file mode 100644 index 0000000..b1c6311 --- /dev/null +++ b/test/com/sleepycat/utilint/StatLoggerTest.java @@ -0,0 +1,236 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import static org.junit.Assert.assertEquals; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileFilter; +import java.io.FileReader; +import java.io.IOException; + +import org.junit.Test; + +import com.sleepycat.util.test.SharedTestUtils; +import com.sleepycat.util.test.TestBase; + +public class StatLoggerTest extends TestBase { + + String filename = "testStatFile"; + + @Test + public void testBasic() throws IOException{ + StringBuffer rowbuf = new StringBuffer(); + String fileext = "csv"; + int columnCount = 4; + String columnDelimiter = ","; + int rowsInFile = 4; + int numFiles = 3; + FileFilter ff = new FindFile(filename); + String header; + + File envHome = SharedTestUtils.getTestDir(); + File[] files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + + StatLogger sl = new StatLogger( + envHome, filename, fileext, numFiles, rowsInFile); + + rowbuf.setLength(0); + for (int i = 0; i < columnCount; i++) { + if (i > 0) { + rowbuf.append(columnDelimiter); + } + rowbuf.append("Column Header" + i); + } + header = rowbuf.toString(); + sl.setHeader(header); + rowbuf.setLength(0); + for (int i = 0; i < columnCount; i++) { + if (i > 0) { + rowbuf.append(columnDelimiter); + } + rowbuf.append(i); + } + + for (int i = 0; i < rowsInFile - 1; i++) { + sl.log(rowbuf.toString()); + } + + files = envHome.listFiles(ff); + assertEquals(files.length, 1); + + for (int j = 0; j < numFiles - 1; j++) { + for (int i = 0; i < rowsInFile - 1; i++) { + sl.log(rowbuf.toString()); + } + } + files = envHome.listFiles(ff); + assertEquals(files.length, numFiles); + + /* add more rows but file number should be max */ + for (int j = 0; j < numFiles; j++) { + for (int i = 0; i < rowsInFile; i++) { + sl.log(rowbuf.toString()); + } + } + + sl.log(rowbuf.toString()); + files = envHome.listFiles(ff); + assertEquals(files.length, numFiles); + + /* Recreate logger like a reboot with existing + * stat file. Make sure file count is correct. + */ + files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + + sl = new StatLogger( + envHome, filename, fileext, numFiles, rowsInFile); + sl.setHeader(header); + + for (int i = 0; i < rowsInFile - 1; i++) { + sl.log(rowbuf.toString()); + } + files = envHome.listFiles(ff); + assertEquals(files.length, 1); + + sl = new StatLogger( + envHome, filename, fileext, numFiles, rowsInFile); + sl.setHeader(header); + files = envHome.listFiles(ff); + assertEquals(files.length, 1); + + sl.log(rowbuf.toString()); + files = envHome.listFiles(ff); + assertEquals(files.length, 2); + + /* Test changing the row count */ + sl.setRowCount(10); + for (int i = 0; i < 5; i++) { + sl.log(rowbuf.toString()); + } + files = envHome.listFiles(ff); + assertEquals(files.length, 2); + } + + @Test + public void testDelta() throws IOException { + StringBuffer rowbuf = new StringBuffer(); + String fileext = "csv"; + int columnCount = 4; + String columnDelimiter = ","; + int rowsInFile = 100; + int numFiles = 3; + FileFilter ff = new FindFile(filename); + String header; + File envHome = SharedTestUtils.getTestDir(); + File testfile = + new File(envHome.getAbsolutePath() + File.separator + filename + + "." + fileext); + + File[] files = envHome.listFiles(ff); + for (File f : files) { + f.delete(); + } + + StatLogger sl = new StatLogger( + envHome, filename, fileext, numFiles, rowsInFile); + + rowbuf.setLength(0); + for (int i = 0; i < columnCount; i++) { + if (i > 0) { + rowbuf.append(columnDelimiter); + } + rowbuf.append("Column Header" + i); + } + header = rowbuf.toString(); + rowbuf.setLength(0); + for (int i = 0; i < columnCount; i++) { + if (i > 0) { + rowbuf.append(columnDelimiter); + } + rowbuf.append(i); + } + + for (int i = 0; i < 10; i++) { + sl.setHeader(header); + sl.logDelta(rowbuf.toString()); + } + + files = envHome.listFiles(ff); + assertEquals(files.length, 1); + /* should only have a header and a data row. */ + assertEquals( + "Number of rows not expected", getRowCount(testfile), 2); + + rowbuf.append("1"); + sl.setHeader(header); + sl.logDelta(rowbuf.toString()); + assertEquals( + "Number of rows not expected", getRowCount(testfile), 3); + + /* simulate a reboot. */ + sl = new StatLogger( + envHome, filename, fileext, numFiles, rowsInFile); + sl.setHeader(header); + sl.logDelta(rowbuf.toString()); + assertEquals( + "Number of rows not expected", getRowCount(testfile), 3); + + sl.setHeader("a" + header); + assertEquals( + "Number of rows not expected", getRowCount(testfile), 1); + files = envHome.listFiles(ff); + assertEquals(files.length, 2); + for (int i = 0; i < 2; i++) { + sl.logDelta(rowbuf.toString()); + } + assertEquals( + "Number of rows not expected", getRowCount(testfile), 2); + } + + private int getRowCount(File file) throws IOException { + BufferedReader fr = null; + int currentRowCount = 0; + try { + fr = new BufferedReader(new FileReader(file)); + while (fr.readLine() != null) { + currentRowCount++; + } + } finally { + if (fr != null) { + fr.close(); + } + } + return currentRowCount; + } + + class FindFile implements FileFilter { + String fileprefix; + FindFile(String fileprefix) { + this.fileprefix = fileprefix; + } + + @Override + public boolean accept(File f) { + return f.getName().startsWith(fileprefix); + } + } +} diff --git a/test/com/sleepycat/utilint/StatsTrackerTest.java b/test/com/sleepycat/utilint/StatsTrackerTest.java new file mode 100644 index 0000000..6c50d0f --- /dev/null +++ b/test/com/sleepycat/utilint/StatsTrackerTest.java @@ -0,0 +1,81 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +package com.sleepycat.utilint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.logging.Logger; + +import org.junit.Test; + +import com.sleepycat.util.test.TestBase; + +public class StatsTrackerTest extends TestBase { + + enum TestType {GET, PUT, DELETE} ; + + /* + * Test thread stack trace dumping + */ + @Test + public void testActivityCounter() + throws InterruptedException { + + Integer maxNumThreadDumps = 3; + + Logger logger = Logger.getLogger("test"); + StatsTracker tracker = + new StatsTracker(TestType.values(), + logger, + 2, + 1, + maxNumThreadDumps, + 100); + + /* + * If there is only one concurrent thread, there should be no thread + * dumps. + */ + for (int i = 0; i < 20; i++) { + long startA = tracker.markStart(); + Thread.sleep(10); + tracker.markFinish(TestType.GET, startA); + } + + /* Did we see some thread dumps? */ + assertEquals(0, tracker.getNumCompletedDumps() ); + + /* + * Simulate three concurrent threads. There should be automatic thread + * dumping, because the tracker is configured to dump when there are + * more than two concurrent threads with operations of > 1 ms. + */ + for (int i = 0; i < 20; i++) { + long startA = tracker.markStart(); + long startB = tracker.markStart(); + long startC = tracker.markStart(); + Thread.sleep(10); + tracker.markFinish(TestType.GET, startA); + tracker.markFinish(TestType.GET, startB); + tracker.markFinish(TestType.GET, startC); + } + + long expectedMaxDumps = maxNumThreadDumps; + + /* Did we see some thread dumps? */ + assertTrue(tracker.getNumCompletedDumps() > 1); + assertTrue(tracker.getNumCompletedDumps() <= expectedMaxDumps); + } +} diff --git a/test/com/sleepycat/utilint/latency-5-0-69 b/test/com/sleepycat/utilint/latency-5-0-69 new file mode 100644 index 0000000..6b08d01 Binary files /dev/null and b/test/com/sleepycat/utilint/latency-5-0-69 differ diff --git a/test/je.properties b/test/je.properties new file mode 100644 index 0000000..d458c02 --- /dev/null +++ b/test/je.properties @@ -0,0 +1,86 @@ +# Property file for unit test usage. Usually, all +# unit tests should run w/out a je.properties file, so +# the test can have total control over its environment. +# It may be useful to use a property file when debugging. +# This file should always be checked in with all properties +# commented out. + +# Settings for permutations of unit testing: +#je.tree.maxEmbeddedLN=0 +#je.lock.oldLockExceptions=false +#je.sharedCache=true +#je.env.sharedLatches=false +#je.evictor.lruOnly=false +#je.evictor.forcedYield=false +#je.env.forcedYield=true +#je.log.useNIO=true +#je.log.directNIO=true +#je.log.chunkedNIO=4096 +#je.cleaner.threads=3 +#je.log.checksumRead=false +#je.checkpointer.highPriority=true + +# Set this property only for rep tests (-Dreponly=1) +#je.rep.preserveRecordVersion=true + +# Setting je.txn.serializable=true here will cause all unit tests +# to run with the Serializable isolation level, regardless of what +# isolation level is set in code via EnvironmentConfig. +# But not all tests work in serializable isolation, for tests testing +# other three isolation degrees. In this case, these tests would fail. +# By using -DisolationLevel=serializable, test code can override this setting, +# by calling EnvironmentConfig.setSerializable(false). +# In other words, it won't influence tests which set different isolation level. +# So we should use ant test -DisolationLevel=serializable instead +#je.txn.serializableIsolation=true + +#je.txn.deadlockStackTrace=true + +#java.util.logging.ConsoleHandler.on=true +#java.util.logging.FileHandler.on=true +#java.util.logging.level=INFO + +#je.env.runINCompressor=true +#je.compressor.deadlockRetry=3 +#je.compressor.lockTimeout=5000 + +#je.env.runEvictor=true +#je.maxMemory defaults to 93% of jdb.maxMemory unless specified +#je.maxMemory=256000 +#je.evictor.nodeScanPercentage=25 +#je.evictor.evictionBatchPercentage=25 + +#je.env.runCheckpointer=true +#je.checkpointer.deadlockRetry=3 + +#je.verify.tree.dump=true +#je.verify.inlist=true +#je.verify.throw=false + +#je.env.runCleaner=true +#je.cleaner.deadlockRetry=3 +#je.cleaner.lockTimeout=5000 +#je.cleaner.expunge=false +#je.cleaner.cluster=true + +#je.env.backgroundReadLimit=50 +#je.env.backgroundReadSleep=50000 +#je.env.backgroundWriteLimit=1 +#je.env.backgroundWriteSleep=500000 + +# Set for group commit +#je.log.groupCommitInterval="1 ms" +#je.log.groupCommitThreshold=1 + +# Set for SSL +#je.rep.channelType=ssl +#je.rep.ssl.keyStoreFile=/build/test/classes/ssl/keys.store +#je.rep.ssl.keyStorePassword=unittest +#je.rep.ssl.trustStoreFile=/build/test/classes/ssl/trust.store +#je.rep.ssl.cipherSuites= +#je.rep.ssl.protocols= +#je.rep.ssl.authenticatorClass=com.sleepycat.je.rep.utilint.SSLDNMatchAuthenticator +#je.rep.ssl.authenticatorParams=CN=Unit Test + +# Test running with the Global CBVLSN disabled +#je.rep.repStreamTimeout=0 s diff --git a/test/jenkins/JE_test_deploy.xlsx b/test/jenkins/JE_test_deploy.xlsx new file mode 100644 index 0000000..8ad1553 Binary files /dev/null and b/test/jenkins/JE_test_deploy.xlsx differ diff --git a/test/jenkins/README b/test/jenkins/README new file mode 100644 index 0000000..e354f00 --- /dev/null +++ b/test/jenkins/README @@ -0,0 +1,17 @@ +This directory backups all the test scripts of JE tests that run on Jenkins. + +There are 4 serious of tests for JE: + Coverage Test + Dbsim Test + Unit Test + Standalone Test + +The description and usage about these 4 serious of tests are in the README file at their own directory. + +In addition, the file JE_test_deploy.xlsx gives the visualization of some information about all JE tests, including the test machine, the issued time and the duration time of each test. + +All 15 JE Standalone tests run on slc04arq and they use the same test script je_standalone.sh. +All 11 JE Unit tests run on slc04arq and slc04atb, where slc04arq runs 6 weekly running tests and slc04atb runs 5 daily running tests. They use the same test script je_unit.sh. Note that one daily running test will be always run with the default repository. +All 5 JE DBsim tests run on slc04aro and they use the same test script je_dbsim.sh. + +JE Standalone tests and JE DBsim tests need to extract and check error from log file. They use the same error extract script error_extract_je.sh. diff --git a/test/jenkins/coverage/README b/test/jenkins/coverage/README new file mode 100644 index 0000000..d17f569 --- /dev/null +++ b/test/jenkins/coverage/README @@ -0,0 +1,10 @@ +This series test only contain one project in Jenkins. + +Jenkins Machine: slc04ark +Test Machine: slc04arm +Current Duration: 21hours +Issued Time: 04:33 UTC Every day +Usage: + scp /scratch/jenkins/bin/je_cover.sh tests@slc00brq:~/ + ssh -l tests slc00brq "bash je_cover.sh -t je_cover -h /scratch/tests/bin -j 8 -R ${je_repo} -b ${je_branch} -r ${je_version}" +Emails: adam.qian@oracle.com, mark.hayes@oracle.com, dwayne.chung@oracle.com, dave.rubin@oracle.com, sam.haradhvala@oracle.com, linda.q.lee@oracle.com, tim.blackman@oracle.com, nosql_eng_cn_grp@oracle.com, markos.zaharioudakis@oracle.com, junyi.xie@oracle.com diff --git a/test/jenkins/coverage/je_cover.sh b/test/jenkins/coverage/je_cover.sh new file mode 100644 index 0000000..7c328dc --- /dev/null +++ b/test/jenkins/coverage/je_cover.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +# The arguments that are passed by Jenkins system +TEST_ARG="" +JDK_VERSION="8" +TASK_NAME="" +LOG_LEVEL="" +BRANCH="default" +JEREPO="" +JEREVISION=0 +JEREVISIONARG="" +HGPATH="" + +# Jenkins VM and Test VM +JENKINSVMIP="slc04ark" +JENKINSVMUSERNAME="jenkins" +JENKINSVM="${JENKINSVMUSERNAME}@${JENKINSVMIP}" +TESTVM=`hostname -s` +TESTVMUSERNAME="tests" +TESTVMUSERPASSWORD="123456" + +# The user name used to get the je repository +JEREPUSER="adqian" + +# Some basic direcotory/path/filename +BASEDIR="/scratch/tests" +JENKINSBASEDIR="/scratch/jenkins/jobs" +JENKINSBINDIR="/scratch/jenkins/bin" +CLOVERDIR="clover_for_je" +CHANGESETFILE="jenkins_changeset.txt" +ENVINFOFILE="location_of_environment_and_log.txt" + +while getopts "O:j:t:R:b:r:l:h:" OPTION +do + case $OPTION in + O) + TEST_ARG=$OPTARG + ;; + j) + JDK_VERSION=$OPTARG + ;; + t) + TASK_NAME=$OPTARG + ;; + R) + JEREPO=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + r) + JEREVISION=$OPTARG + ;; + l) + LOG_LEVEL=$OPTARG + ;; + h) + HGPATH=$OPTARG + ;; + esac +done + +if [ "${JEREPO}" == "" ]; then + echo "JE repository must be specified" + exit 1 +fi + +if [ "${JEREVISION}" != "0" ]; then + JEREVISIONARG=" -u ${JEREVISION}" +fi + +if [ "${HGPATH}" != "" ]; then + HGPATH="${HGPATH}/" +fi + +echo "Task name: $TASK_NAME" +echo "Test args: $TEST_ARG" +echo "JE repo: ssh://${JEREPUSER}@${JEREPO}" +echo "JE branch: $BRANCH" +echo "JE revision(0 means the top): $JEREVISION" + +# hg clone je +rm -rf ${BASEDIR}/${TASK_NAME} && mkdir -p ${BASEDIR}/${TASK_NAME} +echo "hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO}" +cd ${BASEDIR}/${TASK_NAME} && ${HGPATH}hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO} ./je +cd je && ${HGPATH}hg log -l 1 -v > ./${CHANGESETFILE} && cd .. +BUILD_VER=`cd ${BASEDIR}/${TASK_NAME}/je && ${HGPATH}hg parent` + +if [ X$JDK_VERSION == X"8" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_8 +elif [ X$JDK_VERSION == X"7" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_7 +elif [ X$JDK_VERSION == X"6" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_6 +elif [ X$JDK_VERSION == X"AIX" ] ; then + export JAVA_HOME=${BASEDIR}/app/ibm-java-ppc64-80 +else + export JAVA_HOME=${BASEDIR}/app/Java_5 +fi + +export ANT_HOME=${BASEDIR}/app/ant +export PATH=$ANT_HOME/bin:$JAVA_HOME/bin:$PATH +export CLASSPATH=${BASEDIR}/app/ant/lib/junit-4.10.jar:$CLASSPATH + +ROOT_DIR=${BASEDIR}/${TASK_NAME} + +ANT_VERN=`ant -version` + +echo " " +echo "=========================================================" +echo " " +java -version +ant -version +echo "JAVA_HOME=$JAVA_HOME " +echo "ANT_HOME=$ANT_HOME " +echo "Code branch: $BRANCH $BUILD_VER " +echo " " +echo "=========================================================" +echo " " + +if [ X$LOG_LEVEL == X"INFO" ] ; then + echo "com.sleepycat.je.util.ConsoleHandler.level=INFO" > ${ROOT_DIR}/je/logging.properties +fi + +# In previous method,we will use a diff file to change the interanl.xml +# and build.xml to be suitable for je cover test. But this method has a +# disadvantage, i.e. every time we change this two files, we need to +# generate the new diff file, other the diff file can not apply for the +# new je version. The diff file generation is very complicate. +# scp ${JENKINSVM}:~/bin/je_cover.diff ${ROOT_DIR}/je/ +# cd ${ROOT_DIR}/je && ${HGPATH}hg import --no-commit ./je_cover.diff +# In current method, we know which place need to be modified. +# So we directly substitute these places. +scp ${JENKINSVM}:${JENKINSBINDIR}/${CLOVERDIR}/clover*.* ${BASEDIR}/app/ant/lib +cd ${ROOT_DIR}/je +sed -i 's/inheritall=\"false\"/inheritall=\"true\"/g' ./ant/internal.xml +sed -i 's/name=\"clover.tmpdir\" value=\"${builddir}\/tmp\"/name=\"clover.tmpdir\" value=\"${builddir}\/clover_tmp\"/g' ./build.xml +sed -i 's/name=\"clover.libdir\" value=\"\/clover\/lib\"/name=\"clover.libdir\" value=\"\/scratch\/tests\/app\/ant\/lib\"/g' ./build.xml +sed -i 's/inheritall=\"false\"/inheritall=\"true\"/g' ./build.xml +sed -i 's/format=\"frames\"/format=\"noframes\"/g' ./build.xml +sed -i 's/resource=\"clovertasks\"/resource=\"cloverlib.xml\" classpathref=\"clover.classpath\"/g' ./build.xml + +ant -lib junit-4.10.jar clean clover.alltestsdone -Dclover.ignorefailure=true + + +# Back up the result of this time test run +BUILDID=`ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "cat ${JENKINSBASEDIR}/${TASK_NAME}/nextBuildNumber"` +BUILDID=`expr $BUILDID - 1` + +LOGLOCATION=${BASEDIR}/log_archive/${TASK_NAME}/$BUILDID +mkdir -p $LOGLOCATION +cd $LOGLOCATION +cp -r ${ROOT_DIR}/je $LOGLOCATION + +# Generate the test environment information +echo "Host: ${TESTVM}.us.oracle.com" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Directory: `pwd`" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Username: ${TESTVMUSERNAME}" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Password: ${TESTVMUSERPASSWORD}" >> ${ROOT_DIR}/je/${ENVINFOFILE} + +ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "rm -rf ${JENKINSBASEDIR}/${TASK_NAME}/workspace/*" +cd ${ROOT_DIR}/je && scp ./${CHANGESETFILE} ./${ENVINFOFILE} cloverage.xml ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +cd ${ROOT_DIR}/je && scp -r clover_html build/test/data/ ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ + + + + +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clean $TEST_ARG1 +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar init-clover $TEST_ARG1 +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clean-clover $TEST_ARG1 +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clover.setup $TEST_ARG1 +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clover.runtest +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clean clover.singletestdone -Dtestcase=com.sleepycat.persist.test.EvolveProxyClassTest +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clover.alltestsrun $TEST_ARG1 +#cd ${ROOT_DIR}/je && ant -lib junit-4.10.jar clover.alltestsdone $TEST_ARG1 + +# log files +#cd ${ROOT_DIR}/je && tar czf ${TASK_NAME}.tar.gz ./build ./build.xml ./jenkins_changeset.txt ./cloverage.xml ./clover_html +#cd ${ROOT_DIR}/je && scp ${TASK_NAME}.tar.gz jenkins@slc04ark:~/jobs/${TASK_NAME}/workspace/ diff --git a/test/jenkins/dbsim/README b/test/jenkins/dbsim/README new file mode 100644 index 0000000..5f86c85 --- /dev/null +++ b/test/jenkins/dbsim/README @@ -0,0 +1,42 @@ +This series test contains five projects in Jenkins + je_dbsim_abortstress + je_dbsim_duplicate + je_dbsim_dwstress + je_dbsim_embedded_abort + je_dbsim_recovery + +Jenkins Machine: slc04ark +Emails: adam.qian@oracle.com, mark.hayes@oracle.com, dwayne.chung@oracle.com, dave.rubin@oracle.com, sam.haradhvala@oracle.com, linda.q.lee@oracle.com, tim.blackman@oracle.com, nosql_eng_cn_grp@oracle.com, markos.zaharioudakis@oracle.com, junyi.xie@oracle.com + +je_dbsim_abortstress: + Test Machine: slc04aro + Current Duration: 18hours + Issued Time: 00:00 UTC Every Sunday + + +je_dbsim_duplicate: + Test Machine: slc04aro + Current Duration: 34hours + Issued Time: 01:00 UTC Every Monday + + +je_dbsim_dwstress: + Test Machine: slc04aro + Current Duration: 20minutes + Issued Time: 02:00 UTC Every Wednesday + + +je_dbsim_embedded_abort: + Test Machine: slc04aro + Current Duration: 27minutes + Issued Time: 04:00 UTC Every Wednesday + + +je_dbsim_recovery: + Test Machine: slc04aro + Current Duration: 34hours + Issued Time: 12:00 UTC Every Friday + +Example Usage: + scp /scratch/jenkins/bin/je_dbsim.sh tests@slc04aro:/scratch/tests/ + ssh -l tests slc04aro "bash /scratch/tests/je_dbsim.sh -t je_dbsim_duplicate -j 8 -R ${je_repo} -b ${je_branch} -r ${je_version}" diff --git a/test/jenkins/dbsim/abortstress.conf b/test/jenkins/dbsim/abortstress.conf new file mode 100644 index 0000000..a3c35a2 --- /dev/null +++ b/test/jenkins/dbsim/abortstress.conf @@ -0,0 +1,137 @@ +########################################### +## Environment and miscellaneous +########################################### + +appl_type TDS + +## Number of iterations +cache_warm_iter 0 +total_iter 1000000 +set_parameter je.lock.oldLockExceptions true + +set_parameter je.cleaner.expunge false + +########################################### +## Database files and access methods +########################################### +file 0 { + file_name testdb + file_method btree + + data_source random + + data_length_dists { + ## Uniform distribution from 9 to 400 + ## % type param1 param2 +# dist 100 U 9 400 + dist 100 U 10 10 + } + + key_group 0 { +# key_size 3 +# key_chars_per_slot 26 26 26 + key_size 5 + key_count 300 + } +} + +file 1 { + file_name referencedb + file_copy 0 + file_contents 0 + file_method btree + verify_func compare_contents 0 +} + +txn_type 0 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 +} + +txn_type 1 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 +} + +txn_type 2 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 + del 0 +} + +txn_type 3 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 + add 0 +} + +txn_type 4 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 + add 1 +} + +txn_type 5 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 + del 1 +} + +########################################### +## Threads +########################################### +SET threads_per_group RANGE {1 2} +thread_type 0 { + thread_count $threads_per_group + abort_freq 100 +# timeout 5000000 5000000 + + ## Each transaction type is chosen with nearly equal frequency. + ## weight txn_type + txn_type 25 0 + txn_type 25 1 + txn_type 25 2 + txn_type 25 3 + + ## Perform up to 30 transaction types per actual transaction. + txn_size_dists { + dist 100 U 1 30 + } +} + +thread_type 1 { + thread_count $threads_per_group + abort_freq 0 +# timeout 5000000 5000000 + + ## Each transaction type is chosen with nearly equal frequency. + ## weight txn_type + txn_type 50 4 + txn_type 50 5 + + ## Perform up to 30 transaction types per actual transaction. + txn_size_dists { + dist 100 U 1 10 + } +} diff --git a/test/jenkins/dbsim/je_dbsim.sh b/test/jenkins/dbsim/je_dbsim.sh new file mode 100644 index 0000000..7649956 --- /dev/null +++ b/test/jenkins/dbsim/je_dbsim.sh @@ -0,0 +1,244 @@ +#!/bin/bash + +# The arguments that are passed by Jenkins system +TEST_ARG="" +JDK_VERSION="8" +TASK_NAME="" +LOG_LEVEL="" +BRANCH="default" +JEREPO="" +JEREVISION=0 +JEREVISIONARG="" +HGPATH="" +PRE="" + +# Jenkins VM and Test VM +JENKINSVMIP="slc04ark" +JENKINSVMUSERNAME="jenkins" +JENKINSVM="${JENKINSVMUSERNAME}@${JENKINSVMIP}" +TESTVM=`hostname -s` +TESTVMUSERNAME="tests" +TESTVMUSERPASSWORD="123456" + +# The user name used to get the je repository +JEREPUSER="adqian" + +# Some basic direcotory/path/filename +BASEDIR="/scratch/tests" +JENKINSBASEDIR="/scratch/jenkins/jobs" +JENKINSBINDIR="/scratch/jenkins/bin" +CHANGESETFILE="jenkins_changeset.txt" +ENVINFOFILE="location_of_environment_and_log.txt" + +# The script to do error extract +ERROREXTRACTSCRIPT="error_extract_je.sh" +GENXMLSCRIPT="gen_xml.sh" + +# Some standalone tests may need different error pattern +# For example, for je_standalone_envsharedcache, its normal +# result output contains "Fail" +ERRORPATTERN="exception error fail" +IGNORECASE="true" +EXCEPTION_EXPRS="setting je.lock.oldLockExceptions to true" + +# DBsim test related +run_class=com.sleepycat.util.sim.Dbsim +CASE="" +VERIFYITER="" + +while getopts "O:j:t:R:b:r:l:h:p:" OPTION +do + case $OPTION in + O) + TEST_ARG=$OPTARG + ;; + j) + JDK_VERSION=$OPTARG + ;; + t) + TASK_NAME=$OPTARG + ;; + R) + JEREPO=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + r) + JEREVISION=$OPTARG + ;; + l) + LOG_LEVEL=$OPTARG + ;; + h) + HGPATH=$OPTARG + ;; + p) + PRE=$OPTARG + esac +done + +if [ "${JEREPO}" == "" ]; then + echo "JE repository must be specified" + exit 1 +fi + +if [ "${JEREVISION}" != "0" ]; then + JEREVISIONARG=" -u ${JEREVISION}" +fi + +if [ "${HGPATH}" != "" ]; then + HGPATH="${HGPATH}/" +fi + +echo "Task name: $TASK_NAME" +echo "Test args: $TEST_ARG" +echo "JE repo: ssh://${JEREPUSER}@${JEREPO}" +echo "JE branch: $BRANCH" +echo "JE revision(0 means the top): $JEREVISION" + +# hg clone the repository +rm -rf ${BASEDIR}/${TASK_NAME} && mkdir -p ${BASEDIR}/${TASK_NAME} +echo "hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO}" +cd ${BASEDIR}/${TASK_NAME} && ${HGPATH}hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO} ./je +sleep 3 +${HGPATH}hg clone -b default ssh://${JEREPUSER}@sleepycat-scm.us.oracle.com://a/hgroot/dbsim +cd je && ${HGPATH}hg log -l 1 -v > ./${CHANGESETFILE} && cd .. +BUILD_VER=`cd ${BASEDIR}/${TASK_NAME}/je && ${HGPATH}hg parent` + +# Check the jdk version +if [ X$JDK_VERSION == X"8" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_8 +elif [ X$JDK_VERSION == X"7" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_7 +elif [ X$JDK_VERSION == X"6" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_6 +elif [ X$JDK_VERSION == X"AIX" ] ; then + export JAVA_HOME=${BASEDIR}/app/ibm-java-ppc64-80 +else + export JAVA_HOME=${BASEDIR}/app/Java_5 +fi + +export ANT_HOME=${BASEDIR}/app/ant +export PATH=$ANT_HOME/bin:$JAVA_HOME/bin:$PATH + +ROOT_DIR=${BASEDIR}/${TASK_NAME} +DBSIM_DIR=${ROOT_DIR}/dbsim +DBSIM_CLSDIR=${DBSIM_DIR}/build/classes +DBSIM_TEST_DIR=${DBSIM_DIR}/build/test +ANT_VERN=`ant -version` + +echo " " +echo "=========================================================" +echo " " +java -version +ant -version +echo "JAVA_HOME=$JAVA_HOME " +echo "ANT_HOME=$ANT_HOME " +echo "Code branch: $BRANCH $BUILD_VER " +echo " " +echo "=========================================================" +echo " " + +case ${TASK_NAME} in + je_dbsim_abortstress) + CASE="abortstress" + scp ${JENKINSVM}:${JENKINSBINDIR}/je.${CASE}.conf ${ROOT_DIR}/dbsim/configs/${CASE}.conf + ;; + je_dbsim_duplicate) + CASE="duplicate" + VERIFYITER=100 + ;; + je_dbsim_dwstress) + CASE="dwStress" + ;; + je_dbsim_embedded_abort) + CASE="embedded_abort" + EXCEPTION_EXPRS="Lock expired. Locker\|setting je.lock.oldLockExceptions to true" + ;; + je_dbsim_recovery) + CASE="recovery" + scp ${JENKINSVM}:${JENKINSBINDIR}/je.${CASE}.conf ${ROOT_DIR}/dbsim/configs/${CASE}.conf + ;; + *) + echo "The task name is wrong. Please check." + exit 1 +esac + +if [ X$LOG_LEVEL == X"INFO" ]; then + echo "com.sleepycat.je.util.ConsoleHandler.level=INFO" > ${ROOT_DIR}/je/logging.properties +fi + +# compile and generate the je.jar +cd ${ROOT_DIR}/je && ant jar +cd ${ROOT_DIR}/je/build/lib && cp je.jar ${DBSIM_DIR}/lib/ + +date_start=`date +"%s"` + +export CLASSPATH=$CLASSPATH:${DBSIM_CLSDIR}:${DBSIM_DIR}/lib/antlr.jar:${DBSIM_DIR}/lib/je.jar +cd ${DBSIM_DIR} +# compile dbsim source codes +ant clean compile + +# create the environment directory for running dbsim tests +mkdir ${DBSIM_TEST_DIR} + +# Copy the error_extract_je.sh and gen_xml.sh +scp ${JENKINSVM}:${JENKINSBINDIR}/${ERROREXTRACTSCRIPT} ${BASEDIR}/ +scp ${JENKINSVM}:${JENKINSBINDIR}/${GENXMLSCRIPT} ${BASEDIR}/ + +# run Dbsim test +case ${CASE} in + "recovery"|"duplicate") + date -u +'%Y-%m-%d %H:%M:%S %Z' + java -cp $CLASSPATH -DsetErrorListener=true -ea ${run_class} -h ${DBSIM_TEST_DIR} -c ${DBSIM_DIR}/configs/${CASE}.conf -V ${VERIFYITER} -B >& ${DBSIM_DIR}/Case_${CASE}.tmp + date -u +'%Y-%m-%d %H:%M:%S %Z' + ;; + "abortstress"|"dwStress"|"embedded_abort") + date -u +'%Y-%m-%d %H:%M:%S %Z' + java -cp $CLASSPATH -DsetErrorListener=true -ea ${run_class} -h ${DBSIM_TEST_DIR} -c ${DBSIM_DIR}/configs/${CASE}.conf -I + date -u +'%Y-%m-%d %H:%M:%S %Z' + java -cp $CLASSPATH -DsetErrorListener=true -ea ${run_class} -h ${DBSIM_TEST_DIR} -c ${DBSIM_DIR}/configs/${CASE}.conf >& ${DBSIM_DIR}/Case_${CASE}.tmp + date -u +'%Y-%m-%d %H:%M:%S %Z' + ;; +esac +date_end=`date +"%s"` +intervel=$[$date_end - $date_start] +cd ${BASEDIR} && bash ${BASEDIR}/${ERROREXTRACTSCRIPT} ${DBSIM_DIR}/Case_${CASE}.tmp output.log "JE.DBSim" "${CASE}" $intervel ${ROOT_DIR}/test.xml "0" "${ERRORPATTERN}" "${IGNORECASE}" "${BASEDIR}" "${EXCEPTION_EXPRS}" +cp ${DBSIM_DIR}/Case_${CASE}.tmp ${DBSIM_DIR}/Case_${CASE}.tmp.bk +tail ${DBSIM_DIR}/Case_${CASE}.tmp -n 100 > ${DBSIM_DIR}/DBSim_${CASE}.log + +# Back up the result of this time test run +BUILDID=`ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "cat ${JENKINSBASEDIR}/${TASK_NAME}/nextBuildNumber"` +BUILDID=`expr $BUILDID - 1` + +LOGLOCATION=${BASEDIR}/log_archive/${TASK_NAME}/$BUILDID +mkdir -p $LOGLOCATION +cd $LOGLOCATION +cp -r ${ROOT_DIR}/je $LOGLOCATION + +# Generate the test environment information +echo "Host: ${TESTVM}.us.oracle.com" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Directory: `pwd`" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Username: ${TESTVMUSERNAME}" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Password: ${TESTVMUSERPASSWORD}" >> ${ROOT_DIR}/je/${ENVINFOFILE} + +ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "rm -rf ${JENKINSBASEDIR}/${TASK_NAME}/workspace/*" +cd ${ROOT_DIR}/je && scp ./${CHANGESETFILE} ./${ENVINFOFILE} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +scp ${ROOT_DIR}/test.xml ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +scp ${DBSIM_DIR}/DBSim_${CASE}.log ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ + + +#COMP_VER=B_je-3_3_x +#COMP_DIR=${ROOT_DIR}/compare +#if [ X"$CASE" = X"fileformat" ] ; then +# # checkout the +# mkdir ${COMP_DIR} +# cd ${COMP_DIR} && hg clone -b default ssh://adqian@sleepycat-scm.us.oracle.com://a/hgroot/je +# cd ${COMP_DIR}/je && ant jar +# +# cd ${DBSIM_DIR} +# mv lib/je.jar lib/main.jar +# cp ${COMP_DIR}/je/build/lib/je.jar ./lib/compare.jar +# ant filefmt -Dtestjar.v1=lib/compare.jar -Dtestjar.v2=lib/main.jar >& ${DBSIM_DIR}/Case_${CASE}-JDK_${JAVA_VERN}.tmp +#fi diff --git a/test/jenkins/dbsim/recovery.conf b/test/jenkins/dbsim/recovery.conf new file mode 100644 index 0000000..a73b075 --- /dev/null +++ b/test/jenkins/dbsim/recovery.conf @@ -0,0 +1,146 @@ +########################################### +## Environment and miscellaneous +########################################### + +appl_type TDS + +## Number of iterations +cache_warm_iter 0 +total_iter 1000000 +set_parameter je.lock.oldLockExceptions true + +killtest_iter 2000 +#killtest_interval SELECT {30 45 60 75 90 105 120) +#killtest_interval SELECT {30 45 60 75 90 105 120 135 150 165 180 195 210 225 240 255 270 285 300} +killtest_interval 120 +close_after_verify 75 + +set_parameter je.maxMemory 1000000 +set_parameter je.log.fileMax 1000000 +set_parameter je.checkpointer.bytesInterval 500000 +set_parameter je.cleaner.expunge false + +########################################### +## Database files and access methods +########################################### +file 0 { + file_name testdb + file_method btree + + data_source random + + data_length_dists { + ## Uniform distribution from 9 to 400 + ## % type param1 param2 +# dist 100 U 9 400 + dist 100 U 10 10 + } + + key_group 0 { +# key_size 3 +# key_chars_per_slot 26 26 26 + key_size 5 + key_count 300 + } +} + +file 1 { + file_name referencedb + file_copy 0 + file_contents 0 + file_method btree + verify_func compare_contents 0 +} + +txn_type 0 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 +} + +txn_type 1 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 +} + +txn_type 2 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 + del 0 +} + +txn_type 3 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 + add 0 +} + +txn_type 4 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + add 0 + add 1 +} + +txn_type 5 { + ## Use the same key and data for both operations. + txn_type_special same_key + txn_type_special same_data + txn_type_special no_transaction + read_existing 0 + del 0 + del 1 +} + +########################################### +## Threads +########################################### +SET threads_per_group RANGE {1 2} +thread_type 0 { + thread_count $threads_per_group + abort_freq 100 +# timeout 5000000 5000000 + + ## Each transaction type is chosen with nearly equal frequency. + ## weight txn_type + txn_type 25 0 + txn_type 25 1 + txn_type 25 2 + txn_type 25 3 + + ## Perform up to 30 transaction types per actual transaction. + txn_size_dists { + dist 100 U 1 30 + } +} + +thread_type 1 { + thread_count $threads_per_group + abort_freq 0 +# timeout 5000000 5000000 + + ## Each transaction type is chosen with nearly equal frequency. + ## weight txn_type + txn_type 50 4 + txn_type 50 5 + + ## Perform up to 30 transaction types per actual transaction. + txn_size_dists { + dist 100 U 1 10 + } +} diff --git a/test/jenkins/standalone/README b/test/jenkins/standalone/README new file mode 100644 index 0000000..a19e4b1 --- /dev/null +++ b/test/jenkins/standalone/README @@ -0,0 +1,47 @@ +This series test contains fifteen projects in Jenkins. + je_standalone_cleanwsc + je_standalone_closedbevi + je_standalone_envsharedcache + je_standalone_failoverhybrid + je_standalone_failovermaster + je_standalone_failoverrep + je_standalone_ioerror + je_standalone_memstress + je_standalone_openenv + je_standalone_remdb + je_standalone_repclean + je_standalone_repdbops + je_standalone_repread + je_standalone_tempdb + je_standalone_txinmthd + + +Jenkins Machine: slc04ark +Emails: adam.qian@oracle.com, mark.hayes@oracle.com, dwayne.chung@oracle.com, dave.rubin@oracle.com, sam.haradhvala@oracle.com, linda.q.lee@oracle.com, tim.blackman@oracle.com, nosql_eng_cn_grp@oracle.com, markos.zaharioudakis@oracle.com, junyi.xie@oracle.com + +The running methods of these tests are similar. They all use je_standalone.sh to drive the test only with one difference, i.e. the project name which is provided to je_standalone.sh as an argument. They all use error_extract_je.sh and gen_xml.sh to check the test result and generate the final report. + +The usage example looks like: + scp /scratch/jenkins/bin/je_standalone.sh tests@slc04arq:~/ + ssh -l tests slc04arq "bash je_standalone.sh -t je_standalone_failoverhybrid -R ${je_repo} -b ${je_branch} -r ${je_version}" +where slc04arq is the test machine where this standalone test runs and je_standalone_failoverhybrid is the project name which is just one of the above 15 project names. + +So in the following, we just give the test machine, issued time, duration time of each standalone test. + + Test Machine Issued Time Duration Time + je_standalone_cleanwsc slc04arq 22:00 UTC Saturday 1hours + je_standalone_closedbevi slc00arq 00:00 UTC Sunday 10hours + je_standalone_envsharedcache slc00arq 16:00 UTC Sunday 1hours + je_standalone_failoverhybrid slc00arq 06:00 UTC Monday 5hours + je_standalone_failovermaster slc00arq 14:00 UTC Monday 5.5hours + je_standalone_failoverrep slc00arq 22:00 UTC Monday 4hours + je_standalone_ioerror slc00brg 03:00 UTC Tuesday 4hours + je_standalone_memstress slc00arq 08:00 UTC Tuesday 1hours + je_standalone_openenv slc00arq 10:00 UTC Tuesday 1hours + je_standalone_remdb slc00arq 12:00 UTC Tuesday 2.5hours + je_standalone_repclean slc00arq 15:00 UTC Tuesday 3hours + je_standalone_repdbops slc00arq 00:00 UTC Thursday 1hours + je_standalone_repread slc00arq 02:00 UTC Thursday 12.5hours + je_standalone_tempdb slc00arq 15:00 UTC Thursday 4.5hours + je_standalone_txinmthd slc00arq 00:00 UTC Friday 1hours + diff --git a/test/jenkins/standalone/error_extract_je.sh b/test/jenkins/standalone/error_extract_je.sh new file mode 100644 index 0000000..0e562b1 --- /dev/null +++ b/test/jenkins/standalone/error_extract_je.sh @@ -0,0 +1,79 @@ +rm -f $2 + +ls $1 >& list.txt + +error_pattern="$8" +ignore_case="$9" +base_dir="${10}" +exception_exprs="${11}" + +#exception_exprs="setting je.lock.oldLockExceptions to true" + +# error_exprs is the possibel error patters, it may looks like +# "warn exception error fail unexpected" +error_exprs="${error_pattern}" +while read LINE +do + start_pos=0 + + # find the first position where an potential error pattern appears + for fail_message in $error_exprs + do + #sed -n "$fail_message" $LINE >& fails.txt + if [ "${ignore_case}" == "true" ]; then + grep -n -i "$fail_message" $LINE | grep -v "$exception_exprs" | cut -d ":" -f 1 > fails.txt + else + grep -n "$fail_message" $LINE | grep -v "$exception_exprs" | cut -d ":" -f 1 > fails.txt + fi + err_pos=`sed -n '1p' fails.txt` + if [ "$err_pos" = "" ]; then + err_pos=0 + fi + + if [ "$err_pos" != 0 ]; then + if [ "$start_pos" = 0 ]; then + start_pos=$err_pos + elif [ "$start_pos" -gt 0 ]; then + if [ "$start_pos" -ge "$err_pos" ]; then + start_pos=$err_pos + fi + fi + fi + rm -rf fails.txt + done + + # For all standalone tests, + # if it return 0, then it successes + # if it return non-0, then it fails. + # Only under this situation, + # we need to report the error and the potential error information + # But the location will not be accurate, because some expected + # exeptions may contain such error pattern, so we should see the log + if [ "$7" != 0 ]; then + # cut the following 500 lines as the err log + if [ "$start_pos" != 0 ]; then + exist_failure="true" + end_pos=`expr $start_pos + 500` + data=`sed -n "$start_pos"','"$end_pos"'p' $LINE` + message=`sed -n "$start_pos"','"$start_pos"'p' $LINE` + message=${message//\&/&} + message=${message//\'/'} + message=${message//\"/"} + message=${message//\/>} + + bash ${base_dir}/gen_xml.sh "$3" "$4" "$5" 1 1 "$message" "$data" "$6" + else + bash ${base_dir}/gen_xml.sh "$3" "$4" "$5" 1 1 "No concrete error message, see the log" "No concrete error message, see the log" "$6" + fi + else + bash ${base_dir}/gen_xml.sh "$3" "$4" "$5" 1 0 "" "" "$6" + fi +done < list.txt + +# save error log to server +if [ "$exist_failure" = "true" ]; then + ls *.log >& list.txt +fi + + diff --git a/test/jenkins/standalone/gen_xml.sh b/test/jenkins/standalone/gen_xml.sh new file mode 100644 index 0000000..b808a2f --- /dev/null +++ b/test/jenkins/standalone/gen_xml.sh @@ -0,0 +1,39 @@ +# $1 classname +# $2 name +# $3 time +# $4 number of tests +# $5 number of failed tests +# $6 error message +# $7 error data +# $8 out file name + +#echo "1=$1" +#echo "2=$2" +#echo "3=$3" +#echo "4=$4" +#echo "5=$5" +#echo "6=$6" +#echo "7=$7" +#echo "8=$8" +rm -f $8 + +echo "" >> $8 +echo "" >> $8 + +echo "" >> $8 + +if [ $5 -eq 1 ]; then + echo "" >> $8 + echo "" >> $8 + echo "" >> $8 + + echo "" >> $8 + echo "" >> $8 + echo "" >> $8 +fi +echo "" >> $8 + +echo "" >> $8 +echo "" >> $8 + +echo "" >> $8 diff --git a/test/jenkins/standalone/je_standalone.sh b/test/jenkins/standalone/je_standalone.sh new file mode 100644 index 0000000..1d936e8 --- /dev/null +++ b/test/jenkins/standalone/je_standalone.sh @@ -0,0 +1,460 @@ +#!/bin/bash + +# The arguments that are passed by Jenkins system +TEST_ARG="" +JDK_VERSION="8" +TASK_NAME="" +LOG_LEVEL="" +BRANCH="default" +JEREPO="" +JEREVISION=0 +JEREVISIONARG="" + + +# Arguments for "ant -Dtestcase=*** standalone" +TESTNAME="" +TESTARG1="" +XMLRESULT1="" +LOGRESULT1="" + +# Arguments for "ant -Dtestcase=*** -Dargs=*** standalone" +TESTNAMEWITHARG2="" +TESTARG2="" +XMLRESULT2="" +LOGRESULT2="" + +# Some standalone test want to test two kinds of different arguments +# Arguments for "ant -Dtestcase=*** -Dargs=*** standalone" +TESTNAMEWITHARG3="" +TESTARG3="" +XMLRESULT3="" +LOGRESULT3="" + +# Jenkins VM and Test VM +JENKINSVMIP="slc04ark" +JENKINSVMUSERNAME="jenkins" +JENKINSVM="${JENKINSVMUSERNAME}@${JENKINSVMIP}" +TESTVM=`hostname -s` +TESTVMUSERNAME="tests" +TESTVMUSERPASSWORD="123456" + +# The user name used to get the je repository +JEREPUSER="adqian" + +# Some basic direcotory/path/filename +BASEDIR="/scratch/tests" +JENKINSBASEDIR="/scratch/jenkins/jobs" +JENKINSBINDIR="/scratch/jenkins/bin" +JESTANDALONEPATH="je/build/test/standalone" +#JERESULTARCH="je_standalone_test_result_archive" +CHANGESETFILE="jenkins_changeset.txt" +ENVINFOFILE="location_of_environment_and_log.txt" + +# The script to do error extract +ERROREXTRACTSCRIPT="error_extract_je.sh" +GENXMLSCRIPT="gen_xml.sh" + +# Some standalone tests may need different error pattern +# For example, for je_standalone_envsharedcache, its normal +# result output contains "Fail" +ERRORPATTERN="warn exception error fail" +IGNORECASE="true" +error_message="Error: This standalone test fail because it exits with a non-zero exit code" +EXCEPTION_EXPRS="setting je.lock.oldLockExceptions to true" + +while getopts "O:j:t:R:b:r:l:T:h:" OPTION +do + case $OPTION in + O) + TEST_ARG=$OPTARG + ;; + j) + JDK_VERSION=$OPTARG + ;; + t) + TASK_NAME=$OPTARG + ;; + R) + JEREPO=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + r) + JEREVISION=$OPTARG + ;; + l) + LOG_LEVEL=$OPTARG + ;; + T) + TEST_TIMO=$OPTARG + ;; + h) + HGPATH=$OPTARG + ;; + esac +done + +if [ "${JEREPO}" == "" ]; then + echo "JE repository must be specified" + exit 1 +fi + +if [ "${JEREVISION}" != "0" ]; then + JEREVISIONARG=" -u ${JEREVISION}" +fi + +if [ "${HGPATH}" != "" ]; then + HGPATH="${HGPATH}/" +fi + +echo "Task name: $TASK_NAME" +echo "Test args: $TEST_ARG" +echo "JE repo: ssh://${JEREPUSER}@${JEREPO}" +echo "JE branch: $BRANCH" +echo "JE revision(0 means the top): $JEREVISION" + + +## create the dir to save the standalone test result +#cd ${BASEDIR} +#mkdir -p ${JERESULTARCH} +#cd ${JERESULTARCH} +#if [ -d ${TASK_NAME} ]; then +# rm -rf ${TASK_NAME} +#fi +#mkdir -p ${TASK_NAME} +#cd ${TASK_NAME} +#TEMPDIRNAME=$(date +%Y%m%d%H%M%S) +#mkdir -p ${TEMPDIRNAME} +#SAVEPATH=${BASEDIR}/${JERESULTARCH}/${TASK_NAME}/${TEMPDIRNAME} + + +# hg clone je +rm -rf ${BASEDIR}/${TASK_NAME} && mkdir -p ${BASEDIR}/${TASK_NAME} +echo "hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO}" +cd ${BASEDIR}/${TASK_NAME} && ${HGPATH}hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO} ./je +cd je && ${HGPATH}hg log -l 1 -v > ./${CHANGESETFILE} && cd .. + +# Choose the jdk version +if [ X$JDK_VERSION == X"8" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_8 +elif [ X$JDK_VERSION == X"7" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_7 +elif [ X$JDK_VERSION == X"5" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_5 +elif [ X$JDK_VERSION == X"AIX" ] ; then + export JAVA_HOME=${BASEDIR}/app/ibm-java-ppc64-80 +else + export JAVA_HOME=${BASEDIR}/app/Java_6 +fi + +export ANT_HOME=${BASEDIR}/app/ant +export PATH=$ANT_HOME/bin:$JAVA_HOME/bin:$PATH + +ROOT_DIR=${BASEDIR}/${TASK_NAME} +TEST_DIR=${ROOT_DIR}/${JESTANDALONEPATH} + +ANT_VERN=`ant -version` +BUILD_VER=`cd $ROOT_DIR/je && ${HGPATH}hg parent` + +echo " " +echo "=========================================================" +echo " " +java -version +ant -version +echo "JAVA_HOME=$JAVA_HOME " +echo "ANT_HOME=$ANT_HOME " +echo "Code branch: $BRANCH $BUILD_VER " +echo " " +echo "=========================================================" +echo " " + +if [ X$LOG_LEVEL == X"INFO" ] ; then + echo "com.sleepycat.je.util.ConsoleHandler.level=INFO" > ${ROOT_DIR}/je/logging.properties +fi + +# 1.je_standalone_cleanwsc +if [ "${TASK_NAME}" == "je_standalone_cleanwsc" ]; then + TESTNAME="CleanWithSmallCache" + XMLRESULT1="test.xml" + LOGRESULT1="cleanwsc_log.txt" + + if [ X$TEST_TIMO != X"" ] ; then + bash je_cwsc_timo $TEST_TIMO + fi + +# 2.je_standalone_closedbevi +elif [ "${TASK_NAME}" == "je_standalone_closedbevi" ]; then + TESTNAME="ClosedDbEviction" + XMLRESULT1="test_1.xml" + LOGRESULT1="closeddbevi_log.txt" + + TESTNAMEWITHARG2="ClosedDbEvictionRecovery" + TESTARG2="-recovery 10000000" + XMLRESULT2="test_2.xml" + LOGRESULT2="closeddbevi_warg_log.txt" + + ERRORPATTERN="FAIL exception Exception error Error" + IGNORECASE="false" + +# 3.je_standalone_envsharedcache +elif [ "${TASK_NAME}" == "je_standalone_envsharedcache" ]; then + TESTNAME="EnvSharedCache" + XMLRESULT1="test_1.xml" + LOGRESULT1="envsharedcache_log.txt" + + TESTNAMEWITHARG2="EnvSharedCacheOpenTest" + TESTARG2="-opentest" + XMLRESULT2="test_2.xml" + LOGRESULT2="envsharedcache_open_log.txt" + + TESTNAMEWITHARG3="EnvSharedCacheEvenTest" + TESTARG3="-eventest" + XMLRESULT3="test_3.xml" + LOGRESULT3="envsharedcache_event_log.txt" + + ERRORPATTERN="Warn warn exception Exception error Error" + IGNORECASE="false" + +# 4. je_standalone_failoverhybrid +elif [ "${TASK_NAME}" == "je_standalone_failoverhybrid" ]; then + TESTNAME="FailoverHybrid" + XMLRESULT1="test_1.xml" + LOGRESULT1="failoverhybrid_log.txt" + + TESTNAMEWITHARG2="EFailoverHybridRepGroup" + TESTARG2="-repGroupSize 8" + XMLRESULT2="test_2.xml" + LOGRESULT2="failoverhybrid_repgroup_log.txt" + +# 5.je_standalone_failovermaster +elif [ "${TASK_NAME}" == "je_standalone_failovermaster" ]; then + TESTNAME="FailoverMaster" + XMLRESULT1="test_1.xml" + LOGRESULT1="failovermaster_log.txt" + + TESTNAMEWITHARG2="FailoverMasterRepGroup" + TESTARG2="-repGroupSize 8" + XMLRESULT2="test_2.xml" + LOGRESULT2="failovermaster_repgroup_log.txt" + +# 6. je_standalone_failoverrep +elif [ "${TASK_NAME}" == "je_standalone_failoverrep" ]; then + TESTNAME="FailoverReplica" + XMLRESULT1="test_1.xml" + LOGRESULT1="failoverrep_log.txt" + + TESTNAMEWITHARG2="FailoverReplicaRepGroup" + TESTARG2="-repGroupSize 8" + XMLRESULT2="test_2.xml" + LOGRESULT2="failoverrep_repgroup_log.txt" + +# 7.je_standalone_ioerror +elif [ "${TASK_NAME}" == "je_standalone_ioerror" ]; then + TESTNAME="IOErrorStress" + XMLRESULT1="test_1.xml" + LOGRESULT1="ioerror_log.txt" + + TESTNAMEWITHARG2="IOErrorStressWithArgs" + TESTARG2="-cacheMB 1" + XMLRESULT2="test_2.xml" + LOGRESULT2="ioerror_cache_log.txt" + + ERRORPATTERN="Warn warn Fail fail \*\*\*Unexpected" + IGNORECASE="false" + error_message="Fail: This standalone test fail because it exits with a non-zero exit code" + +# 8.je_standalone_memstress +elif [ "${TASK_NAME}" == "je_standalone_memstress" ]; then + TESTNAME="MemoryStress" + XMLRESULT1="test_1.xml" + LOGRESULT1="memstress_log.txt" + + TESTNAMEWITHARG2="MemoryStressDup" + TESTARG2="-dups" + XMLRESULT2="test_2.xml" + LOGRESULT2="memstress_dups_log.txt" + +# 9.je_standalone_openenv +elif [ "${TASK_NAME}" == "je_standalone_openenv" ]; then + TESTNAME="OpenEnvStress" + XMLRESULT1="test.xml" + LOGRESULT1="openenv_log.txt" + +# 10.je_standalone_remdb +elif [ "${TASK_NAME}" == "je_standalone_remdb" ]; then + TESTNAME="RemoveDbStress" + XMLRESULT1="test.xml" + LOGRESULT1="remdb_log.txt" + +# 11.je_standalone_repclean +elif [ "${TASK_NAME}" == "je_standalone_repclean" ]; then + TESTNAME="ReplicationCleaning" + XMLRESULT1="test_1.xml" + LOGRESULT1="repclean_log.txt" + + TESTNAMEWITHARG2="ReplicationCleaningRepNodeNum" + TESTARG2="-repNodeNum 8" + XMLRESULT2="test_2.xml" + LOGRESULT2="repclean_repnodenum_log.txt" + +# 12.je_standalone_repdbops +elif [ "${TASK_NAME}" == "je_standalone_repdbops" ]; then + TESTNAME="ReplicaDbOps" + XMLRESULT1="test_1.xml" + LOGRESULT1="repdbops_log.txt" + + TESTNAMEWITHARG2="ReplicaDbOpsNThread" + TESTARG2="-nThreads 4" + XMLRESULT2="test_2.xml" + LOGRESULT2="repdbops_nthread_log.txt" + +# 13.je_standalone_repread +elif [ "${TASK_NAME}" == "je_standalone_repread" ]; then + TESTNAME="ReplicaReading" + XMLRESULT1="test_1.xml" + LOGRESULT1="repread_log.txt" + + TESTNAMEWITHARG2="ReplicaReadingThread" + TESTARG2="-nPriThreads 4 -nSecThreads 4 -txnOps 30" + XMLRESULT2="test_2.xml" + LOGRESULT2="repread_thread_log.txt" + +# 14. Now it is not a Standalone test. So we just ignore it +# 15.je_standalone_tempdb +elif [ "${TASK_NAME}" == "je_standalone_tempdb" ]; then + TESTNAME="TemporaryDbStress" + XMLRESULT1="test.xml" + LOGRESULT1="tempdb_log.txt" + +# 16.je_standalone_txinmthd +elif [ "${TASK_NAME}" == "je_standalone_txinmthd" ]; then + TESTNAME="TxnInMultiThreadsStress" + XMLRESULT1="test.xml" + LOGRESULT1="txinmthd_log.txt" + +# 17(Added on 2016-02-06).je_standalone_ttl +# ant -Dtestcase=TTLStress standalone +elif [ "${TASK_NAME}" == "je_standalone_ttl" ]; then + TESTNAME="TTLStress" + XMLRESULT1="test.xml" + LOGRESULT1="ttl_log.txt" + +# 18(Added on 2017-05-17).je_standalone_disklimit +elif [ "${TASK_NAME}" == "je_standalone_disklimit" ]; then + TESTNAME="DiskLimitStress" + TESTARG1="-nodes 1 -minutes 15" + XMLRESULT1="test_1.xml" + LOGRESULT1="disklimit_onenode.txt" + + TESTNAMEWITHARG2="DiskLimitStressHA" + TESTARG2="-nodes 3 -minutes 15" + XMLRESULT2="test_2.xml" + LOGRESULT2="disklimit_HA.txt" + + TESTNAMEWITHARG3="DiskLimitStressViolation" + TESTARG3="-nodes 3 -violations true -minutes 25" + XMLRESULT3="test_3.xml" + LOGRESULT3="disklimit_violation.txt" + +# Task name can not be empty +elif [ X"${TASK_NAME}" == X"" ]; then + echo "You must specify the task name" + exit 1 + +# The wrong task name +else + echo "The task name is wrong. Please check" + exit 1 +fi + +# Copy the error_extract_je.sh and gen_xml.sh +scp ${JENKINSVM}:${JENKINSBINDIR}/${ERROREXTRACTSCRIPT} ${BASEDIR}/ +scp ${JENKINSVM}:${JENKINSBINDIR}/${GENXMLSCRIPT} ${BASEDIR}/ + +# Back up the result of this time test run. Determin the store directory. +BUILDID=`ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "cat ${JENKINSBASEDIR}/${TASK_NAME}/nextBuildNumber"` +BUILDID=`expr $BUILDID - 1` + +# Since there are many standalone tests running on one test VM, +# the disk is easy to be exhausted. We just reserve the data +# file(.jdb) of each standalone test until to next build. +if [ -d ${BASEDIR}/log_archive/${TASK_NAME} ]; then + rm -rf ${BASEDIR}/log_archive/${TASK_NAME} +fi +SAVEPATH=${BASEDIR}/log_archive/${TASK_NAME}/$BUILDID +mkdir -p $SAVEPATH + +# DO the Standalone Test +# $1: Test case name +# $2: The name showed in xml files +# $3: The xml files +# $4: The log files +# $5: The error pattern +# $6: Whether the error pattern ignore cases +# $7: The Arguments for the standalone test +# It is placed at last because it may be empty +do_standalone_test() { + date_start=`date +"%s"` + cd ${ROOT_DIR}/je && ant -Dtestcase="$1" -Dargs="$8" standalone + retvalue=$? + if [ "${retvalue}" != 0 ]; then + echo ${error_message} >> ${ROOT_DIR}/${JESTANDALONEPATH}/log + fi + date_end=`date +"%s"` + intervel=$[$date_end - $date_start] + cd ${BASEDIR} && bash ${BASEDIR}/${ERROREXTRACTSCRIPT} ${ROOT_DIR}/${JESTANDALONEPATH}/log output.log "JE.Standalone" "$2" $intervel ${ROOT_DIR}/je/"$3" "${retvalue}" "$5" "$6" "${BASEDIR}" "$7" + cd ${TEST_DIR} && cp log "$4" + cp -r ${TEST_DIR} ${SAVEPATH}/standalone_$2 +} + +# ant -Dtestcase=*** standalone +do_standalone_test "${TESTNAME}" "${TESTNAME}" "${XMLRESULT1}" "${LOGRESULT1}" "${ERRORPATTERN}" "${IGNORECASE}" "${EXCEPTION_EXPRS}" "${TESTARG1}" + +# ant -Dtestcase=*** -Dargs=*** standalone +if [ X"${TESTNAMEWITHARG2}" != X"" ]; then + do_standalone_test "${TESTNAME}" "${TESTNAMEWITHARG2}" "${XMLRESULT2}" "${LOGRESULT2}" "${ERRORPATTERN}" "${IGNORECASE}" "${EXCEPTION_EXPRS}" "${TESTARG2}" +fi + +# ant -Dtestcase=*** -Dargs=*** standalone +if [ X"${TESTNAMEWITHARG3}" != X"" ]; then + do_standalone_test "${TESTNAME}" "${TESTNAMEWITHARG3}" "${XMLRESULT3}" "${LOGRESULT3}" "${ERRORPATTERN}" "${IGNORECASE}" "${EXCEPTION_EXPRS}" "${TESTARG3}" +fi + + +# Generate the test environment information, including log/data store directory +echo "Host: ${TESTVM}.us.oracle.com" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Username: ${TESTVMUSERNAME}" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Password: ${TESTVMUSERPASSWORD}" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Directory for \"ant -Dtestcase=$TESTNAME standalone\" is:" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo " ${SAVEPATH}/standalone_$TESTNAME" >> ${ROOT_DIR}/je/${ENVINFOFILE} +if [ X"${TESTNAMEWITHARG2}" != X"" ]; then + echo "Directory for \"ant -Dtestcase=$TESTNAME -Dargs='${TESTARG2}' standalone\" is:" >> ${ROOT_DIR}/je/${ENVINFOFILE} + echo " ${SAVEPATH}/standalone_$TESTNAMEWITHARG2" >> ${ROOT_DIR}/je/${ENVINFOFILE} +fi +if [ X"${TESTNAMEWITHARG3}" != X"" ]; then + echo "Directory for \"ant -Dtestcase=$TESTNAME -Dargs='${TESTARG3}' standalone\" is:" >> ${ROOT_DIR}/je/${ENVINFOFILE} + echo " ${SAVEPATH}/standalone_$TESTNAMEWITHARG3" >> ${ROOT_DIR}/je/${ENVINFOFILE} +fi + + +ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "rm -rf ${JENKINSBASEDIR}/${TASK_NAME}/workspace/*" +# Copy the needed files to jenkins VM +cd ${ROOT_DIR}/je && scp ./${CHANGESETFILE} ./${ENVINFOFILE} ./${XMLRESULT1} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +cd ${TEST_DIR} && scp ./${LOGRESULT1} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ + +if [ X"${TESTNAMEWITHARG2}" != X"" ]; then + cd ${ROOT_DIR}/je && scp ./${XMLRESULT2} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ + cd ${TEST_DIR} && scp ./${LOGRESULT2} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +fi + +if [ X"${TESTNAMEWITHARG3}" != X"" ]; then + cd ${ROOT_DIR}/je && scp ./${XMLRESULT3} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ + cd ${TEST_DIR} && scp ./${LOGRESULT3} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +fi + +# Since we save the result of this time run to ${SAVEPATH}/standalone_*, +# we delete the files here to save some disk space. +if [ -d ${TEST_DIR} ]; then + rm -rf ${TEST_DIR} +fi diff --git a/test/jenkins/unittest/README b/test/jenkins/unittest/README new file mode 100644 index 0000000..cc36407 --- /dev/null +++ b/test/jenkins/unittest/README @@ -0,0 +1,101 @@ +This series test contains 11 projects in Jenkins. 6 weekly running tests run on slc04arq and 5 daily running tests run on slc04atb. + Daily running tests: + je_unit_jdk7 + je_unit_jdk8 + je_unit_jdk7_branch_5098 + je_unit_jdk6_branch_5098 + je_unit_jdk8_always_default_repository + + Weekly running tests: + je_unit_aix + je_unit_jdk7_iso + je_unit_jdk7_je_rep_pre + je_unit_jdk7_no_embedded_ln + je_unit_jdk8_je_rep_pre + je_unit_secondassociation + + +Jenkins Machine: slc04ark +Emails: adam.qian@oracle.com, mark.hayes@oracle.com, dwayne.chung@oracle.com, dave.rubin@oracle.com, sam.haradhvala@oracle.com, linda.q.lee@oracle.com, tim.blackman@oracle.com, nosql_eng_cn_grp@oracle.com, markos.zaharioudakis@oracle.com, junyi.xie@oracle.com + +Daily running tests: + je_unit_jdk7: + Current Duration: 3hours + + je_unit_jdk8: + Current Duration: 2.5hours + + je_unit_jdk7_branch_5098: + Current Duration: 2hours + + je_unit_jdk6_branch_5098 + Current Duration: 2hours + + je_unit_jdk8_always_default_repository + Current Duration: 3hours + +Weekly running tests: + je_unit_aix + Test Machine: stuzx68 + Current Duration: 3hours 42minutes + Issued Time: 00:00 UTC Every Sunday + + je_unit_jdk7_iso + Test Machine: slc04arq + Current Duration: 2.5hours + Issued Time: 14:00 UTC Every Friday + + je_unit_jdk7_je_rep_pre + Test Machine: slc04arq + Current Duration: 5hours + Issued Time: 18:00 UTC Every Friday + + je_unit_jdk7_no_embedded_ln + Test Machine: slc04arq + Current Duration: 2.5hours + Issued Time: 06:00 UTC Every Saturday + + je_unit_jdk8_je_rep_pre + Test Machine: slc04arq + Current Duration: 4.5hours + Issued Time: 00:00 UTC Every Saturday + + je_unit_secondassociation: + Test Machine: slc04arq + Current Duration: 1hours + Issued Time: 11:00 UTC Every Thursday + +Usage: +je_unit_jdk7_iso + ssh -l tests slc04arq "bash je_unit.sh -t je_unit_jdk7_iso -j 7 -l INFO -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true -DisolationLevel=serializable -Dlogging.config.file=logging.properties'" + +je_unit_jdk7_je_rep_pre + ssh -l tests slc04arq "bash je_unit.sh -t je_unit_jdk7_je_rep_pre -j 7 -l INFO -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true -Dlogging.config.file=logging.properties -Dreponly=1' -p TRUE" + +je_unit_jdk7_no_embedded_ln + ssh -l tests slc04arq "bash je_unit.sh -t je_unit_jdk7_no_embedded_ln -j 7 -l INFO -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true -Dlogging.config.file=logging.properties' -p TRUE" + +je_unit_jdk8_je_rep_pre + ssh -l tests slc04arq "bash je_unit.sh -t je_unit_jdk8_je_rep_pre -j 8 -l INFO -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true -Dlogging.config.file=logging.properties -Dreponly=1' -p TRUE" + +je_aix_unit_test + ssh -l nosql stuzx68 "bash je_unit.sh -t je_unit_aix -R ${je_repo} -b ${je_branch} -r ${je_version}" + TEST_ARG="-Dproxy.host=www-proxy -Dproxy.port=80 -Djvm=${JAVA_HOME}/bin/java" + +je_unit_jdk7 + ssh -l tests slc04atb "bash je_unit.sh -t je_unit_jdk7 -j 7 -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true'" + +je_unit_jdk8 + ssh -l tests slc04atb "bash je_unit.sh -t je_unit_jdk8 -j 8 -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dalltests=true'" + +je_unit_jdk7_branch_5098 + ssh -l tests slc04atb "bash je_unit.sh -t je_unit_jdk7_branch_5098 -j 7 -R ${je_repo} -b je-5.0.98_branch -r ${je_version} -O '-Dalltests=true'" + +je_unit_jdk6_branch_5098 + ssh -l tests slc04atb "bash je_unit.sh -t je_unit_jdk6_branch_5098 -j 6 -R ${je_repo} -b je-5.0.98_branch -r ${je_version} -O '-Dalltests=true'" + +je_unit_secondassociation + ssh -l tests slc04arq "bash je_unit.sh -t je_unit_secondassociation -R ${je_repo} -b ${je_branch} -r ${je_version} -O '-Dlongtest=true + +je_unit_jdk8_always_default_repository + ssh -l tests slc04atb "bash je_unit.sh -t je_unit_jdk8 -j 8 -R 'sleepycat-scm.us.oracle.com//a/hgroot/je' -b ${je_branch} -r ${je_version} -O '-Dalltests=true'" diff --git a/test/jenkins/unittest/je_unit.sh b/test/jenkins/unittest/je_unit.sh new file mode 100644 index 0000000..033d258 --- /dev/null +++ b/test/jenkins/unittest/je_unit.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +# The arguments that are passed by Jenkins system +TEST_ARG="" +JDK_VERSION="8" +TASK_NAME="" +LOG_LEVEL="" +BRANCH="default" +JEREPO="" +JEREVISION=0 +JEREVISIONARG="" +HGPATH="" +PRE="" + +# Jenkins VM and Test VM +JENKINSVMIP="slc04ark" +JENKINSVMUSERNAME="jenkins" +JENKINSVM="${JENKINSVMUSERNAME}@${JENKINSVMIP}" +TESTVM=`hostname -s` +TESTVMUSERNAME="tests" +TESTVMUSERPASSWORD="123456" + +# The user name used to get the je repository +JEREPUSER="adqian" + +# Some basic direcotory/path/filename +BASEDIR="/scratch/tests" +REMOTEBASEDIR="/scratch/tests" +JENKINSBASEDIR="/scratch/jenkins/jobs" +CHANGESETFILE="jenkins_changeset.txt" +ENVINFOFILE="location_of_environment_and_log.txt" + +while getopts "O:j:t:R:b:r:l:h:p:" OPTION +do + case $OPTION in + O) + TEST_ARG=$OPTARG + ;; + j) + JDK_VERSION=$OPTARG + ;; + t) + TASK_NAME=$OPTARG + ;; + R) + JEREPO=$OPTARG + ;; + b) + BRANCH=$OPTARG + ;; + r) + JEREVISION=$OPTARG + ;; + l) + LOG_LEVEL=$OPTARG + ;; + h) + HGPATH=$OPTARG + ;; + p) + PRE=$OPTARG + esac +done + +if [ "${JEREPO}" == "" ]; then + echo "JE repository must be specified" + exit 1 +fi + +if [ "${JEREVISION}" != "0" ]; then + JEREVISIONARG=" -u ${JEREVISION}" +fi + +if [ "${HGPATH}" != "" ]; then + HGPATH="${HGPATH}/" +fi + +# 1.Only je_unit_aix needs to be handled specially +if [ "${TASK_NAME}" == "je_unit_aix" ]; then + JDK_VERSION="AIX" + BASEDIR="/scratch/nosql" + TESTVMUSERNAME="nosql" + TESTVMUSERPASSWORD="q" +fi + +echo "Task name: $TASK_NAME" +echo "Test args: $TEST_ARG" +echo "JE repo: ssh://${JEREPUSER}@${JEREPO}" +echo "JE branch: $BRANCH" +echo "JE revision(0 means the top): $JEREVISION" + +# hg clone je +if [ "${TASK_NAME}" == "je_unit_aix" ]; then + ssh tests@slc04arq "rm -rf ${REMOTEBASEDIR}/${TASK_NAME} && mkdir -p ${REMOTEBASEDIR}/${TASK_NAME}" + echo "hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO}" + ssh tests@slc04arq "cd ${REMOTEBASEDIR}/${TASK_NAME} && ${HGPATH}hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO} ./je" + ssh tests@slc04arq "cd ${REMOTEBASEDIR}/${TASK_NAME}/je && ${HGPATH}hg log -l 1 -v > ./jenkins_changeset.txt" + BUILD_VER=`ssh tests@slc04arq "cd ${REMOTEBASEDIR}/${TASK_NAME}/je && ${HGPATH}hg parent"` + rm -rf ${BASEDIR}/${TASK_NAME} + scp -r tests@slc04arq:${REMOTEBASEDIR}/${TASK_NAME} ${BASEDIR} +else + rm -rf ${BASEDIR}/${TASK_NAME} && mkdir -p ${BASEDIR}/${TASK_NAME} + echo "hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO}" + cd ${BASEDIR}/${TASK_NAME} && ${HGPATH}hg clone -b ${BRANCH} ${JEREVISIONARG} ssh://${JEREPUSER}@${JEREPO} ./je + cd je && ${HGPATH}hg log -l 1 -v > ./${CHANGESETFILE} && cd .. + BUILD_VER=`cd ${BASEDIR}/${TASK_NAME}/je && ${HGPATH}hg parent` +fi + +if [ X$JDK_VERSION == X"8" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_8 +elif [ X$JDK_VERSION == X"7" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_7 +elif [ X$JDK_VERSION == X"6" ] ; then + export JAVA_HOME=${BASEDIR}/app/Java_6 +elif [ X$JDK_VERSION == X"AIX" ] ; then + export JAVA_HOME=${BASEDIR}/app/ibm-java-ppc64-80 +else + export JAVA_HOME=${BASEDIR}/app/Java_5 +fi + +export ANT_HOME=${BASEDIR}/app/ant +export PATH=$ANT_HOME/bin:$JAVA_HOME/bin:$PATH + +ROOT_DIR=${BASEDIR}/${TASK_NAME} +ANT_VERN=`ant -version` + +echo " " +echo "=========================================================" +echo " " +java -version +ant -version +echo "JAVA_HOME=$JAVA_HOME " +echo "ANT_HOME=$ANT_HOME " +echo "Code branch: $BRANCH $BUILD_VER " +echo " " +echo "=========================================================" +echo " " + +if [ X$LOG_LEVEL == X"INFO" ]; then + echo "com.sleepycat.je.util.ConsoleHandler.level=INFO" > ${ROOT_DIR}/je/logging.properties +fi + +if [ X$PRE == X"TRUE" ]; then + echo " je.rep.preserveRecordVersion=true" >> ${ROOT_DIR}/je/test/je.properties +fi + +if [ X$TASK_NAME == X"je_unit_jdk7_no_embedded_ln" ]; then + echo "je.tree.maxEmbeddedLN=0" >> ${ROOT_DIR}/je/test/je.properties +fi + +if [ "${TASK_NAME}" == "je_unit_aix" ]; then + TEST_ARG="-Dproxy.host=www-proxy -Dproxy.port=80 -Djvm=${JAVA_HOME}/bin/java" +fi + +cd ${ROOT_DIR}/je && ant -lib ${BASEDIR}/app/ant/lib/junit-4.10.jar test $TEST_ARG + +# Back up the result of this time test run +#echo "ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} 'cat ${JENKINSBASEDIR}/${TASK_NAME}/nextBuildNumber'" +BUILDID=`ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "cat ${JENKINSBASEDIR}/${TASK_NAME}/nextBuildNumber"` +BUILDID=`expr $BUILDID - 1` + +LOGLOCATION=${BASEDIR}/log_archive/${TASK_NAME}/$BUILDID +mkdir -p $LOGLOCATION +cd $LOGLOCATION +cp -r ${ROOT_DIR}/je $LOGLOCATION + +# Generate the test environment information +echo "Host: ${TESTVM}.us.oracle.com" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Directory: `pwd`" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Username: ${TESTVMUSERNAME}" >> ${ROOT_DIR}/je/${ENVINFOFILE} +echo "Password: ${TESTVMUSERPASSWORD}" >> ${ROOT_DIR}/je/${ENVINFOFILE} + +ssh -l ${JENKINSVMUSERNAME} ${JENKINSVMIP} "rm -rf ${JENKINSBASEDIR}/${TASK_NAME}/workspace/*" +cd ${ROOT_DIR}/je && scp ./${CHANGESETFILE} ./${ENVINFOFILE} ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ +cd ${ROOT_DIR}/je && scp -r build/test/data/ ${JENKINSVM}:${JENKINSBASEDIR}/${TASK_NAME}/workspace/ diff --git a/test/ssl/keys.store b/test/ssl/keys.store new file mode 100644 index 0000000..f68b6ec Binary files /dev/null and b/test/ssl/keys.store differ diff --git a/test/ssl/make-ks.sh b/test/ssl/make-ks.sh new file mode 100644 index 0000000..794a2b1 --- /dev/null +++ b/test/ssl/make-ks.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +rm keys.store + +pw=unittest + +# make self-signed key pair +keytool -genkeypair -keystore keys.store -storepass ${pw} -keypass ${pw} \ + -alias mykey -dname 'cn="Unit Test"' -keyAlg RSA -validity 36500 + +# export cert +rm mykey.cert +keytool -export -alias mykey -file mykey.cert -keystore keys.store \ + -storepass ${pw} + +# make a second self-signed key pair +keytool -genkeypair -keystore keys.store -storepass ${pw} -keypass ${pw} \ + -alias otherkey1 -dname 'cn="Other Test 1"' -keyAlg RSA -validity 36500 + +# export cert +rm otherkey2.cert +keytool -export -alias otherkey1 -file otherkey1.cert -keystore keys.store \ + -storepass ${pw} + +# make a third self-signed key pair +# this one is not added to the truststore +keytool -genkeypair -keystore keys.store -storepass ${pw} -keypass ${pw} \ + -alias otherkey2 -dname 'cn="Other Test 2"' -keyAlg RSA -validity 36500 + +# export cert +rm otherkey2.cert +keytool -export -alias otherkey2 -file otherkey2.cert -keystore keys.store \ + -storepass ${pw} + +rm trust.store + +# import mykey.cert and otherkey1.cert into truststore +keytool -import -alias mykey -file mykey.cert -keystore trust.store \ + -storepass ${pw} -noprompt +keytool -import -alias otherkey1 -file otherkey1.cert -keystore trust.store \ + -storepass ${pw} -noprompt + diff --git a/test/ssl/trust.store b/test/ssl/trust.store new file mode 100644 index 0000000..51bc056 Binary files /dev/null and b/test/ssl/trust.store differ diff --git a/test/standalone/BigDW.java b/test/standalone/BigDW.java new file mode 100644 index 0000000..60d00dd --- /dev/null +++ b/test/standalone/BigDW.java @@ -0,0 +1,444 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.math.BigInteger; +import java.security.SecureRandom; +import java.util.Arrays; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; + +/** + * A large database with random key distribution has lots of IN waste, + * especially if records are small; this creates a worst-case scenario for the + * cleaner and also possibly for the evictor. Simulate such an application and + * measure how well the cleaner and evictor keep up. + * + * Some commonly used command lines for running this program are: + * + * # Init new DB, causes duplicates to be created and deleted [#15588] + * java BigDW -h HOME -init -dupdel + * + * Each transaction does the following in "grow" mode. In "no grow" mode, it + * does one less insert, keeping the total number of keys constant. + * + * 2 inserts, 1 delete, 1 update, 10 reads + * + * The delete and update operations include a read to find the record. + * + */ +public class BigDW implements Runnable { + + private String homeDir = "tmp"; + private Environment env; + private Database refDB; + private Database testDB; + private boolean done; + private int nDeadlocks; + private boolean init; + private boolean verbose; + private boolean dupDel; + private int nTransactions; + private int nMaxTransactions = 20000; + private int nThreads = 4; + + private int subDir = 0; + private int keySize = 10; + private int dataSize = 10; + private int nReadsPerWrite = 1; + private int maxRetries = 100; + private float totalSecs; + private float throughput; + private SecureRandom random = new SecureRandom(); + private long startTime; + private long time; + private long mainCacheSize = 20000000; + + public static void main(String args[]) { + try { + new BigDW().run(args); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + + /* Output command-line input arguments to log. */ + private void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + private void usage(String error) { + + if (error != null) { + System.err.println(error); + } + System.err.println + ("java " + getClass().getName() + '\n' + + " [-h ] [-v] [-init] [-dupdel]\n" + + " [-txns ]\n"); + System.exit(1); + } + + private void run(String args[]) throws Exception { + + try { + if (args.length == 0) { + throw new IllegalArgumentException(); + } + /* Parse command-line input arguments. */ + for (int i = 0; i < args.length; i += 1) { + String arg = args[i]; + boolean moreArgs = i < args.length - 1; + if (arg.equals("-v")) { + verbose = true; + } else if (arg.equals("-dupdel")) { + dupDel = true; + } else if (arg.equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (arg.equals("-init")) { + init = true; + } else if (arg.equals("-txns") && moreArgs) { + nMaxTransactions = Integer.parseInt(args[++i]); + } else if (arg.equals("-threads") && moreArgs) { + nThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else { + usage("Unknown arg: " + arg); + } + } + printArgs(args); + } catch (IllegalArgumentException e) { + usage("IllegalArguments! "); + e.printStackTrace(); + System.exit(1); + } + + openEnv(); + startTime = System.currentTimeMillis(); + + Thread[] threads = new Thread[nThreads]; + for (int i = 0; i < nThreads; i += 1) { + threads[i] = new Thread(this); + threads[i].start(); + Thread.sleep(1000); /* Stagger threads. */ + } + for (int i = 0; i < nThreads; i += 1) { + if (threads[i] != null) { + threads[i].join(); + } + } + + time = System.currentTimeMillis(); + closeEnv(); + + totalSecs = (float) (time - startTime) / 1000; + throughput = (float) nTransactions / totalSecs; + if (verbose) { + System.out.println("\nTotal seconds: " + totalSecs + + " txn/sec: " + throughput); + } + } + + private void openEnv() throws Exception { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(init); + envConfig.setCacheSize(mainCacheSize); + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + Utils.createSubDirs(new File(homeDir), subDir); + } + env = new Environment(new File(homeDir), envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(init); + dbConfig.setExclusiveCreate(init); + dbConfig.setSortedDuplicates(dupDel); + refDB = env.openDatabase(null, "BigDWRef", dbConfig); + + dbConfig.setDeferredWrite(true); + testDB = env.openDatabase(null, "BigDWTest", dbConfig); + + compare(); + } + + private void closeEnv() + throws Exception { + + refDB.close(); + testDB.sync(); + testDB.close(); + env.close(); + } + + public void run() { + + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + byte[] lastInsertKey = null; + + while (!done) { + + /* JE-only begin */ + try { + + /* Perform the transaction. */ + for (int retry = 0;; retry += 1) { + Cursor refCursor = refDB.openCursor(null, null); + Cursor testCursor = testDB.openCursor(null, null); + + try { + if (init) { + key.setData(lastInsertKey); + insert(refCursor, testCursor, key, data); + lastInsertKey = copyData(key); + } + + /* Insert */ + key.setData(lastInsertKey); + insert(refCursor, testCursor, key, data); + lastInsertKey = copyData(key); + + /* Dup-key insert. */ + byte[] dupDataBA = copyData(data); + for (int i = 0; i < 5; i++) { + dupDataBA[0]++; + DatabaseEntry dupData = + new DatabaseEntry(dupDataBA); + OperationStatus status1 = + refCursor.put(key, dupData); + @SuppressWarnings("unused") + boolean insertDone1 = checkInsertStatus(status1); + if (status1 != OperationStatus.SUCCESS) { + throw new RuntimeException("insert1 " + + status1); + } + OperationStatus status2 = + testCursor.put(key, dupData); + if (status2 != OperationStatus.SUCCESS) { + throw new RuntimeException("insert2 " + + status2); + } + @SuppressWarnings("unused") + boolean insertDone2 = checkInsertStatus(status2); + } + + /* Delete */ + getRandom(refCursor, "BigDWRef", + testCursor, "BigDWTest", + key, data, LockMode.RMW); + DatabaseEntry dummy1 = new DatabaseEntry(); + DatabaseEntry dummy2 = new DatabaseEntry(); + while (refCursor.delete() == + OperationStatus.SUCCESS && + refCursor.getNextDup + (dummy1, dummy2, null) == + OperationStatus.SUCCESS) { + } + while (testCursor.delete() == + OperationStatus.SUCCESS && + refCursor.getNextDup + (dummy1, dummy2, null) == + OperationStatus.SUCCESS) { + } + + /* Read */ + for (int i = 0; i < nReadsPerWrite; i += 1) { + getRandom(refCursor, "BigDWRef", + testCursor, "BigDWTest", + key, data, LockMode.RMW); + } + refCursor.close(); + testCursor.close(); + nTransactions += 1; + if (nMaxTransactions != 0 && + nTransactions >= nMaxTransactions) { + done = true; + } + break; + } catch (LockConflictException e) { + refCursor.close(); + testCursor.close(); + if (retry >= maxRetries) { + throw e; + } + /* Break deadlock cycle with a small sleep. */ + Thread.sleep(5); + nDeadlocks += 1; + } + } /* for */ + + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private void checkStatus(OperationStatus status) + throws Exception { + if (status != OperationStatus.SUCCESS) { + throw new Exception("problemStatus = " + status); + } + } + + private void compare() + throws Exception { + + DatabaseEntry refKey = new DatabaseEntry(); + DatabaseEntry refData = new DatabaseEntry(); + DatabaseEntry testKey = new DatabaseEntry(); + DatabaseEntry testData = new DatabaseEntry(); + + Cursor refCursor = refDB.openCursor(null, null); + Cursor testCursor = testDB.openCursor(null, null); + + System.out.println("Compare starts"); + try { + while (refCursor.getNext(refKey, refData, LockMode.DEFAULT) == + OperationStatus.SUCCESS) { + checkStatus(testCursor.getNext(testKey, testData, + LockMode.DEFAULT)); + + if (!Arrays.equals(refKey.getData(), + testKey.getData())) { + throw new Exception("Keys don't match"); + } + + if (!Arrays.equals(refData.getData(), + testData.getData())) { + throw new Exception("Data don't match"); + } + } + + if (testCursor.getNext(testKey, testData, LockMode.DEFAULT) != + OperationStatus.NOTFOUND) { + throw new Exception("testCursor has extra data"); + } + } finally { + refCursor.close(); + testCursor.close(); + } + System.out.println("Compare ends"); + } + + private void insert(Cursor c1, Cursor c2, + DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + + makeData(data); + boolean insertDone1 = false; + while (!insertDone1) { + makeInsertKey(key); + OperationStatus status1 = c1.putNoOverwrite(key, data); + insertDone1 = checkInsertStatus(status1); + OperationStatus status2 = c2.putNoOverwrite(key, data); + boolean insertDone2 = checkInsertStatus(status2); + assert insertDone1 == insertDone2 : + "status1=" + status1 + + " status2=" + status2; + } + } + + private boolean checkInsertStatus(OperationStatus status) { + if (status == OperationStatus.KEYEXIST) { + System.out.println("****** Duplicate random key."); + return false; // try again. + } else { + if (status != OperationStatus.SUCCESS) { + System.out.println + ("Unexpected return value from insert(): " + status); + } + return true; // end one way or another + } + } + + private void getRandom(Cursor c1, String db1, + Cursor c2, String db2, + DatabaseEntry key, DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + makeRandomKey(key); + getRandomWork(c1, db1, key, data, lockMode); + getRandomWork(c2, db2, key, data, lockMode); + } + + private void getRandomWork(Cursor c, + String dbName, + DatabaseEntry key, + DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + OperationStatus status = c.getSearchKeyRange(key, data, lockMode); + if (status == OperationStatus.NOTFOUND) { + status = c.getLast(key, data, lockMode); + if (status != OperationStatus.SUCCESS) { + System.out.println + ("Unexpected return value from " + dbName + + ".getRandomWork(): " + status); + } + } + } + + private void makeInsertKey(DatabaseEntry key) { + if (key.getData() != null) { + BigInteger num = new BigInteger(copyData(key)); + num = num.add(BigInteger.ONE); + key.setData(num.toByteArray()); + } else { + makeRandomKey(key); + } + } + + private void makeRandomKey(DatabaseEntry key) { + byte[] bytes = new byte[keySize]; + random.nextBytes(bytes); + key.setData(bytes); + } + + private void makeData(DatabaseEntry data) { + + byte[] bytes = new byte[dataSize]; + for (int i = 0; i < bytes.length; i += 1) { + bytes[i] = (byte) i; + } + data.setData(bytes); + } + + private byte[] copyData(DatabaseEntry data) { + + byte[] buf = new byte[data.getSize()]; + System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length); + return buf; + } +} diff --git a/test/standalone/BigRandom.java b/test/standalone/BigRandom.java new file mode 100644 index 0000000..9016877 --- /dev/null +++ b/test/standalone/BigRandom.java @@ -0,0 +1,624 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.math.BigInteger; +import java.security.SecureRandom; + +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.dbi.MemoryBudget; + +/** + * A large database with random key distribution has lots of IN waste, + * especially if records are small; this creates a worst-case scenario for the + * cleaner and also possibly for the evictor. Simulate such an application and + * measure how well the cleaner and evictor keep up. + * + * Some commonly used command lines for running this program are: + * + * # Init new DB, then do updates forever. + * java BigRandom -h HOME -init + * + * # Do updates on an existing DB forever. + * java BigRandom -h HOME + * + * # Init new DB, then stop and print total rate (MOST COMMON OPTION) + * java BigRandom -h HOME -initonly + * + * # -locality N adds locality of reference for N transactions. + * java BigRandom -h HOME -initonly -locality 5 + * + * # -nosync speeds things up quite a bit + * java BigRandom -h HOME -initonly -locality 5 -nosync + * + * Each transaction does the following in "grow" mode. In "no grow" mode, it + * does one less insert, keeping the total number of keys constant. + * + * 2 inserts, 1 delete, 1 update, 10 reads + * + * The delete and update operations include a read to find the record. + * + * Every operation uses a random key, unless the -locality option is used. If + * "-locality 100" is specified, each thread will perform 100 transactions by + * incrementing the insertion key rather than generating a random number. Then + * a random number is generated as the next starting key. This is done per + * thread, so each thread will be working in a different key area. + */ +public class BigRandom implements Runnable { + + private String homeDir = "tmp"; + private Environment env; + private Database db; + private boolean done; + private int nDeadlocks; + private boolean init; + private boolean initOnly; + private boolean fastInit; + private boolean verbose; + private boolean sequentialKeys; + private boolean noSync; + private int nMaxKeys = 10000000; + private long nKeys; + private long sequence; + private int nTransactions; + private int nMaxTransactions; + private int nThreads = 4; + private int oneThreadKeys; + private long traceInterval = 10000; // 10 seconds + private boolean preload; + private int maxLocalKeyTxns; + private int keySize = 10; + private int dataSize = 20; + private int nReadsPerWrite = 10; + private int maxRetries = 100; + private SecureRandom random = new SecureRandom(); + private long startTime; + private long priorTime = startTime; + private int priorTxns; + private int[] tpTxns = new int[120]; // 120 * 10 sec = ~20 minutes worth + private long[] tpMillis = new long[tpTxns.length]; + private int tpIndex = tpTxns.length - 1; + private int tpMaxIndex; + private long tpTotalTxns; + private long tpTotalMillis; + private int thisTxns; + private int thisSecs; + private int thisTp; + private int avgTp; + private long time; + private int totalSecs; + private int subDir = 0; + + public static void main(String args[]) { + try { + new BigRandom().run(args); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + + private void run(String args[]) + throws Exception { + + for (int i = 0; i < args.length; i += 1) { + String arg = args[i]; + boolean moreArgs = i < args.length - 1; + if (arg.equals("-v")) { + verbose = true; + } else if (arg.equals("-seq")) { + sequentialKeys = true; + } else if (arg.equals("-nosync")) { + noSync = true; + } else if (arg.equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (arg.equals("-preload")) { + preload = true; + } else if (arg.equals("-init")) { + init = true; + } else if (arg.equals("-initonly")) { + init = true; + initOnly = true; + } else if (arg.equals("-fastinit")) { + init = true; + fastInit = true; + initOnly = true; + } else if (arg.equals("-keys") && moreArgs) { + nMaxKeys = Integer.parseInt(args[++i]); + } else if (arg.equals("-txns") && moreArgs) { + nMaxTransactions = Integer.parseInt(args[++i]); + } else if (arg.equals("-threads") && moreArgs) { + nThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-onethreadkeys") && moreArgs) { + oneThreadKeys = Integer.parseInt(args[++i]); + } else if (arg.equals("-locality") && moreArgs) { + maxLocalKeyTxns = Integer.parseInt(args[++i]); + } else if (arg.equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else { + usage("Unknown arg: " + arg); + } + } + openEnv(); + printArgs(args); + printLegend(); + if (sequentialKeys) { + sequence = getLastSequence(); + } + if (preload) { + doPreload(); + } + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setFast(true); + statsConfig.setClear(true); + startTime = System.currentTimeMillis(); + priorTime = startTime; + + Thread[] threads = new Thread[nThreads]; + if (oneThreadKeys > 0) { + threads[0] = new Thread(this); + threads[0].start(); + } else { + for (int i = 0; i < nThreads; i += 1) { + threads[i] = new Thread(this); + threads[i].start(); + Thread.sleep(1000); /* Stagger threads. */ + } + } + + while (!done) { + Thread.sleep(traceInterval); + calcThroughput(); + /* JE-only begin */ + EnvironmentStats stats = env.getStats(statsConfig); + MemoryBudget mb = + DbInternal.getNonNullEnvImpl(env).getMemoryBudget(); + int inListSize = + DbInternal.getNonNullEnvImpl(env).getInMemoryINs(). + getSize(); + System.out.println("\nsec: " + totalSecs + ',' + thisSecs + + " txn: " + thisTxns + ',' + + thisTp + ',' + avgTp + + " keys: " + nKeys + + " dlck: " + nDeadlocks + + " buf: " + + stats.getNNotResident() + ',' + + stats.getNCacheMiss() + + "\ncleaner: " + + stats.getNCleanerEntriesRead() + ',' + + stats.getNCleanerRuns() + ',' + + stats.getNCleanerDeletions() + ',' + + stats.getCleanerBacklog() + + " evict: " + + stats.getNBINsStripped() + ',' + + stats.getNNodesExplicitlyEvicted() + ',' + + mb.getCacheMemoryUsage() + ',' + + inListSize + + " ckpt: " + + stats.getNCheckpoints() + ',' + + stats.getNFullINFlush() + ',' + + stats.getNFullBINFlush() + ',' + + stats.getNDeltaINFlush()); + /* JE-only end */ + nDeadlocks = 0; + + if (oneThreadKeys > 0 && oneThreadKeys >= nKeys) { + for (int i = 1; i < nThreads; i += 1) { + threads[i] = new Thread(this); + threads[i].start(); + Thread.sleep(1000); /* Stagger threads. */ + } + oneThreadKeys = 0; + } + } + + for (int i = 0; i < nThreads; i += 1) { + if (threads[i] != null) { + threads[i].join(); + } + } + + time = System.currentTimeMillis(); + totalSecs = (int) ((time - startTime) / 1000); + System.out.println("\nTotal seconds: " + totalSecs + + " txn/sec: " + (nTransactions / totalSecs)); + closeEnv(); + } + + private void calcThroughput() { + + time = System.currentTimeMillis(); + totalSecs = (int) ((time - startTime) / 1000); + int txns = nTransactions; + thisTxns = txns - priorTxns; + int thisMillis = (int) (time - priorTime); + thisSecs = thisMillis / 1000; + thisTp = thisTxns / thisSecs; + + tpIndex += 1; + if (tpIndex == tpTxns.length) { + tpIndex = 0; + } + tpTotalTxns += thisTxns; + tpTotalTxns -= tpTxns[tpIndex]; + tpTotalMillis += thisMillis; + tpTotalMillis -= tpMillis[tpIndex]; + tpTxns[tpIndex] = thisTxns; + tpMillis[tpIndex] = thisMillis; + if (tpMaxIndex < tpTxns.length) { + tpMaxIndex = tpIndex + 1; + } + avgTp = (int) ((tpTotalTxns / (tpTotalMillis / 1000))); + + priorTxns = txns; + priorTime = time; + } + + private void printArgs(String[] args) + throws DatabaseException { + + System.out.print("Command line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + System.out.println(); + System.out.println("Environment configuration:"); + System.out.println(env.getConfig()); + System.out.println(); + } + + private void printLegend() { + + /* JE-only begin */ + System.out.println( + "Legend:\n" + + "sec: ,\n" + + "txn: ,,\n" + + "keys: \n" + + "dlck: \n" + + "buf: ,\n" + + "clean: ,,,\n" + + "evict: ,,,\n" + + "ckpt: ,,,"); + /* JE-only end */ + } + + private void usage(String error) { + + if (error != null) { + System.err.println(error); + } + System.err.println + ("java " + getClass().getName() + '\n' + + " [-h ] [-v] [-init | -initonly | -fastinit]\n" + + " [-keys ] [-txns ] [-seq]\n" + + " [-threads ] [-onethreadkeys ]\n" + + " [-locality ] [-nosync] [-preload]"); + System.exit(2); + } + + private void openEnv() throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(init); + envConfig.setCacheMode(CacheMode.EVICT_LN); + envConfig.setConfigParam(EnvironmentConfig.MAX_OFF_HEAP_MEMORY, + "" + (50 * 1024 * 1024)); + + if (noSync) { + envConfig.setTxnNoSync(true); + } + + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + Utils.createSubDirs(new File(homeDir), subDir); + } + + long startTime = System.currentTimeMillis(); + env = new Environment(new File(homeDir), envConfig); + long endTime = System.currentTimeMillis(); + System.out.println("Recovery time: " + ((endTime - startTime) / 1000)); + System.out.println(); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(init); + dbConfig.setExclusiveCreate(init); + dbConfig.setTransactional(true); + /* JE-only begin */ + db = env.openDatabase(null, "BigRandom", dbConfig); + /* JE-only end */ + } + + private void closeEnv() + throws DatabaseException { + + db.close(); + env.close(); + } + + public void run() { + + /* + * The key is reused over multiple loop iterations for computing a + * local insertion key, so it must be instantiated at the top of the + * loop. In makeInsertKey a local insertion key is creating by adding + * one to the last key accessed. + */ + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + int localKeyTxns = 0; + byte[] lastInsertKey = null; + OperationStatus status; + + while (!done) { + + try { + /* + * When using local keys, only the first insert will be with a + * random key, and only if we've exceeded the maximum number of + * local key transactions. When not using local keys, all keys + * are randomly generated. + */ + boolean useLocalKeys = maxLocalKeyTxns > 0; + boolean insertRandomKey = true; + if (useLocalKeys) { + if (localKeyTxns < maxLocalKeyTxns) { + insertRandomKey = false; + localKeyTxns += 1; + } else { + localKeyTxns = 0; + } + } + + /* Perform the transaction. */ + for (int retry = 0;; retry += 1) { + Transaction txn = env.beginTransaction(null, null); + Cursor c = db.openCursor(txn, null); + try { + boolean addedKey = false; + if (init && nKeys < nMaxKeys) { + key.setData(lastInsertKey); + insert(c, key, data, insertRandomKey); + lastInsertKey = copyData(key); + insertRandomKey = !useLocalKeys; + addedKey = true; + } + if (!fastInit) { + /* Insert. */ + key.setData(lastInsertKey); + insert(c, key, data, insertRandomKey); + lastInsertKey = copyData(key); + if (useLocalKeys) { + /* Update the following key. */ + status = c.getNext(key, data, LockMode.RMW); + if (status == OperationStatus.SUCCESS) { + c.putCurrent(data); + /* Delete the following key. */ + status = c.getNext + (key, data, LockMode.RMW); + if (status == OperationStatus.SUCCESS) { + c.delete(); + } + } + /* Read. Use RMW to avoid deadlocks. */ + for (int i = 0; i < nReadsPerWrite; i += 1) { + c.getNext(key, data, LockMode.RMW); + } + } else { + /* Update */ + getRandom(c, key, data, LockMode.RMW); + c.putCurrent(data); + /* Delete */ + getRandom(c, key, data, LockMode.RMW); + c.delete(); + /* Read */ + for (int i = 0; i < nReadsPerWrite; i += 1) { + getRandom(c, key, data, null); + } + } + } + c.close(); + txn.commit(); + nTransactions += 1; + if (addedKey) { + nKeys += 1; + } + if (initOnly && nKeys >= nMaxKeys) { + done = true; + } + if (nMaxTransactions != 0 && + nTransactions >= nMaxTransactions) { + done = true; + } + break; + } catch (LockConflictException e) { + c.close(); + txn.abort(); + if (retry >= maxRetries) { + throw e; + } + /* Break deadlock cycle with a small sleep. */ + Thread.sleep(5); + nDeadlocks += 1; + } + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private void insert(Cursor c, DatabaseEntry key, DatabaseEntry data, + boolean insertRandomKey) + throws DatabaseException { + + makeData(data); + while (true) { + makeInsertKey(c, key, insertRandomKey); + OperationStatus status = c.putNoOverwrite(key, data); + if (status == OperationStatus.KEYEXIST) { + if (sequentialKeys) { + System.out.println("****** Duplicate sequential key."); + } else if (insertRandomKey) { + System.out.println("****** Duplicate random key."); + } else { + System.out.println("****** Duplicate local key."); + } + } else { + if (status != OperationStatus.SUCCESS) { + System.out.println + ("Unexpected return value from insert(): " + status); + } + break; + } + } + } + + private void getRandom(Cursor c, DatabaseEntry key, DatabaseEntry data, + LockMode lockMode) + throws DatabaseException { + + makeRandomKey(key); + OperationStatus status = c.getSearchKeyRange(key, data, lockMode); + if (status == OperationStatus.NOTFOUND) { + status = c.getLast(key, data, lockMode); + if (status != OperationStatus.SUCCESS) { + System.out.println + ("Unexpected return value from getRandom(): " + status); + } + } + } + + private long getLastSequence() + throws DatabaseException { + + if (!sequentialKeys) throw new IllegalStateException(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + Cursor c = db.openCursor(null, null); + try { + OperationStatus status = c.getLast(key, data, null); + if (status == OperationStatus.SUCCESS) { + TupleInput in = new TupleInput(key.getData(), + key.getOffset(), + key.getSize()); + return in.readLong(); + } else { + return 0; + } + } finally { + c.close(); + } + } + + private void doPreload() + throws DatabaseException { + + System.out.println("Preloading"); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry key = new DatabaseEntry(); + Cursor c = db.openCursor(null, null); + try { + long startTime = System.currentTimeMillis(); + int count = 0; + while (c.getNext(key, data, LockMode.READ_UNCOMMITTED) == + OperationStatus.SUCCESS) { + count += 1; + } + long endTime = System.currentTimeMillis(); + int seconds = (int) ((endTime - startTime) / 1000); + System.out.println + ("Preloaded records=" + count + " seconds=" + seconds); + } finally { + c.close(); + } + } + + private void makeInsertKey(Cursor c, DatabaseEntry key, + boolean insertRandomKey) { + if (sequentialKeys) { + long val; + synchronized (this) { + val = ++sequence; + } + makeLongKey(key, val); + } else if (!insertRandomKey && key.getData() != null) { + BigInteger num = new BigInteger(copyData(key)); + num = num.add(BigInteger.ONE); + key.setData(num.toByteArray()); + } else { + makeRandomKey(key); + } + } + + private void makeRandomKey(DatabaseEntry key) { + + if (sequentialKeys) { + makeLongKey(key, (long) (random.nextFloat() * sequence)); + } else { + byte[] bytes = new byte[keySize]; + random.nextBytes(bytes); + key.setData(bytes); + } + } + + private void makeLongKey(DatabaseEntry key, long val) { + + TupleOutput out = new TupleOutput(); + out.writeLong(val); + byte[] pad = new byte[keySize - 8]; + out.writeFast(pad); + if (out.getBufferOffset() != 0 || out.getBufferLength() != keySize) { + throw new IllegalStateException(); + } + key.setData(out.getBufferBytes(), 0, keySize); + } + + private void makeData(DatabaseEntry data) { + + byte[] bytes = new byte[dataSize]; + for (int i = 0; i < bytes.length; i += 1) { + bytes[i] = (byte) i; + } + data.setData(bytes); + } + + private byte[] copyData(DatabaseEntry data) { + + byte[] buf = new byte[data.getSize()]; + System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length); + return buf; + } +} diff --git a/test/standalone/CleanWithSmallCache.java b/test/standalone/CleanWithSmallCache.java new file mode 100644 index 0000000..4aa2e5d --- /dev/null +++ b/test/standalone/CleanWithSmallCache.java @@ -0,0 +1,528 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.text.NumberFormat; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.DbInternal; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.StatsConfig; + +/** + * Used to test a small cache and log cleaning. For example, to create a large + * set of log files (over 10 GB) that are almost 100% obsolete: + * + * java -Xmx6m -cp .:before.jar CleanWithSmallCache \ + * -records 40000 -key 48 -data 10 -h tmp -random -cache 250k \ + * -seconds 2000 -write 10000 + * + * And then to clean that set of logs: + * + * java -Xmx15m -cp .:before.jar CleanWithSmallCache \ + * -records 40000 -key 48 -data 10 -h tmp -random -cache 250k \ + * -seconds 22000 -read 10 -clean + */ +public class CleanWithSmallCache { + + private static final NumberFormat INT_FORMAT = + NumberFormat.getIntegerInstance(); + private static final NumberFormat NUMBER_FORMAT = + NumberFormat.getNumberInstance(); + + private File envHome = null; + private int cacheSize = 0; + private boolean offHeap = false; + private int records = -1; + private int keySize = -1; + private int dataSize = -1; + private int fanout = 128; + private boolean doReads = false; + private boolean doWrites = false; + private int totalSeconds = 0; + private long beginTime = 0; + private long endTime = 0; + private boolean randomKeys = false; + private boolean doClean = false; + private boolean fillCache = false; + private Random random = new Random(123); + private AtomicInteger nReads = new AtomicInteger(0); + private AtomicInteger nWrites = new AtomicInteger(0); + private boolean programDone = false; + private Environment env = null; + private Database db = null; + + public static void main(String[] args) { + try { + System.out.print("Command line: "); + for (String s : args) { + System.out.print(s); + System.out.print(' '); + } + System.out.println(); + CleanWithSmallCache test = new CleanWithSmallCache(args); + long start = System.currentTimeMillis(); + System.out.println("Opening environment"); + test.open(); + System.out.println("Starting test"); + test.execute(); + test.close(); + long end = System.currentTimeMillis(); + System.out.println("Time: " + ((end - start) / 1000) + " sec"); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + private CleanWithSmallCache(String[] args) { + + for (int i = 0; i < args.length; i += 1) { + String name = args[i]; + String val = null; + if (i < args.length - 1 && !args[i + 1].startsWith("-")) { + i += 1; + val = args[i]; + } + if (name.equals("-h")) { + if (val == null) { + usage("No value after -h"); + } + envHome = new File(val); + } else if (name.equals("-cache")) { + if (val == null) { + usage("No value after -cache"); + } + boolean mb = false; + boolean kb = false; + if (val.endsWith("m")) { + mb = true; + val = val.substring(0, val.length() - 1); + } else if (val.endsWith("k")) { + kb = true; + val = val.substring(0, val.length() - 1); + } + try { + cacheSize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (cacheSize <= 0) { + usage(val + " is not a positive integer"); + } + if (mb) { + cacheSize *= 1024 * 1024; + } else if (kb) { + cacheSize *= 1024; + } + } else if (name.equals("-offheap")) { + if (val == null) { + usage("No value after -offheap"); + } + offHeap = Boolean.parseBoolean(val); + } else if (name.equals("-records")) { + if (val == null) { + usage("No value after -records"); + } + try { + records = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (records <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-key")) { + if (val == null) { + usage("No value after -key"); + } + try { + keySize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (keySize <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-data")) { + if (val == null) { + usage("No value after -data"); + } + try { + dataSize = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (dataSize < 0) { + usage(val + " is not a non-negative integer"); + } + } else if (name.equals("-read")) { + if (val == null) { + usage("No value after -read"); + } + doReads = Boolean.parseBoolean(val); + } else if (name.equals("-write")) { + if (val == null) { + usage("No value after -write"); + } + doWrites = Boolean.parseBoolean(val); + } else if (name.equals("-seconds")) { + if (val == null) { + usage("No value after -seconds"); + } + try { + totalSeconds = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (totalSeconds < 0) { + usage(val + " is not a non-negative integer"); + } + } else if (name.equals("-fanout")) { + if (val == null) { + usage("No value after -fanout"); + } + try { + fanout = Integer.parseInt(val); + } catch (NumberFormatException e) { + usage(val + " is not a number"); + } + if (fanout <= 0) { + usage(val + " is not a positive integer"); + } + } else if (name.equals("-random")) { + randomKeys = true; + } else if (name.equals("-clean")) { + doClean = true; + } else if (name.equals("-fillcache")) { + fillCache = true; + } else { + usage("Unknown arg: " + name); + } + } + + if (envHome == null) { + usage("-h not specified"); + } + + if (cacheSize <= 0) { + usage("-cache not specified"); + } + + if (records <= 0) { + usage("-records not specified"); + } + + if (keySize <= 0) { + usage("-key not specified"); + } + + if (dataSize <= 0) { + usage("-data not specified"); + } + + int maxRecNum; + switch (keySize) { + case 1: + maxRecNum = Byte.MAX_VALUE; + break; + case 2: + case 3: + maxRecNum = Short.MAX_VALUE; + break; + default: + maxRecNum = Integer.MAX_VALUE; + } + if (records > maxRecNum) { + usage("-key size too small for number of records"); + } + } + + private void usage(String msg) { + + if (msg != null) { + System.out.println(msg); + } + + System.out.println + ("usage:" + + "\njava " + CleanWithSmallCache.class.getName() + + "\n -h " + + "\n # Environment home directory" + + "\n -records " + + "\n # Total records (key/data pairs); required" + + "\n -key " + + "\n # Key bytes per record; required" + + "\n -data " + + "\n # Data bytes per record; required" + + "\n [-fanout ]" + + "\n # Number of entries per Btree node; default: 128" + + "\n [-read ]" + + "\n # Number of read operations per second; default: 0" + + "\n [-write ]" + + "\n # Number of write operations per second; default: 0" + + "\n [-random]" + + "\n # Write randomly generated keys;" + + "\n # default: write sequential keys" + + "\n [-seconds ]" + + "\n # Number of seconds to run; default: 0 or forever" + + "\n [-clean]" + + "\n # Perform log cleaning; default: false" + + "\n [-offheap]" + + "\n # Use an off-heap cache; default: false" + + "\n [-fillcache]" + + "\n # Artificially fill the cache; default: false"); + + System.exit(2); + } + + private void open() + throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setConfigParam("je.env.runCleaner", "false"); + envConfig.setCacheSize(cacheSize); + if (offHeap) { + envConfig.setOffHeapCacheSize(cacheSize); + envConfig.setConfigParam( + EnvironmentConfig.OFFHEAP_EVICT_BYTES, "1024"); + } + env = new Environment(envHome, envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setNodeMaxEntries(fanout); + db = env.openDatabase(null, "foo", dbConfig); + + if (fillCache) { + DbInternal.getNonNullEnvImpl(env).getMemoryBudget(). + updateAdminMemoryUsage(cacheSize * 2); + } + } + + private void close() + throws DatabaseException { + + db.close(); + env.close(); + } + + private int makeKey(int recNum, DatabaseEntry entry) { + if (randomKeys) { + recNum = random.nextInt(records - 1) + 1; + } else { + recNum += 1; + if (recNum > records) { + recNum = 1; + } + } + if (keySize == 1) { + entry.setData(new byte[] { (byte) recNum }); + } else { + TupleOutput out = new TupleOutput(new byte[keySize]); + int written; + if (keySize == 2 || keySize == 3) { + out.writeUnsignedShort((short) recNum); + written = 2; + } else { + out.writeUnsignedInt(recNum); + written = 4; + } + while (written < keySize) { + out.writeFast(0); + written += 1; + } + TupleBinding.outputToEntry(out, entry); + } + return recNum; + } + + private void execute() + throws InterruptedException { + + Thread monitor = new Monitor(); + Thread cleaner = null; + if (doClean) { + cleaner = new Cleaner(); + } + Thread writer = null; + if (doWrites) { + writer = new OperationRunner(nWrites, new Operation() { + public void doOperation(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + db.put(null, key, data); + } + }); + } + Thread reader = null; + if (doReads) { + reader = new OperationRunner(nReads, new Operation() { + public void doOperation(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException { + Cursor cursor = db.openCursor(null, null); + cursor.getSearchKeyRange(key, data, null); + cursor.close(); + } + }); + } + beginTime = System.currentTimeMillis(); + if (totalSeconds > 0) { + endTime = beginTime + (totalSeconds * 1000); + } + monitor.start(); + if (cleaner != null) { + cleaner.start(); + } + if (writer != null) { + writer.start(); + } + if (reader != null) { + reader.start(); + } + monitor.join(); + if (cleaner != null) { + cleaner.join(); + } + if (writer != null) { + writer.join(); + } + if (reader != null) { + reader.join(); + } + } + + private class Monitor extends Thread { + public void run() { + try { + long lastTime = System.currentTimeMillis(); + while ((totalSeconds == 0 || lastTime < endTime) && + !programDone) { + Thread.sleep(5000); + long time = System.currentTimeMillis(); + printStats(time); + lastTime = time; + } + programDone = true; + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private class Cleaner extends Thread { + public void run() { + CheckpointConfig forceConfig = new CheckpointConfig(); + forceConfig.setForce(true); + try { + boolean cleanedSome; + do { + cleanedSome = false; + while (true) { + int nFiles = env.cleanLog(); + if (nFiles == 0) { + break; + } + cleanedSome = true; + } + env.checkpoint(forceConfig); + } while (cleanedSome && !programDone); + programDone = true; + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private interface Operation { + void doOperation(DatabaseEntry key, DatabaseEntry data) + throws DatabaseException; + } + + private class OperationRunner extends Thread { + + private Operation op; + private AtomicInteger nOps; + + OperationRunner(AtomicInteger nOps, Operation op) { + this.nOps = nOps; + this.op = op; + } + + public void run() { + + int recNum = 0; + int ops = 0; + long beforeTime = System.currentTimeMillis(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(new byte[dataSize]); + + try { + while (!programDone) { + recNum = makeKey(recNum, key); + op.doOperation(key, data); + ops += 1; + nOps.incrementAndGet(); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + private void printStats(long currentTime) + throws DatabaseException { + + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); + + float secs = (currentTime - beginTime) / 1000.0f; + float writesPerSec = nWrites.get() / secs; + float readsPerSec = nReads.get() / secs; + + System.out.println("\nWrites/Sec=" + + NUMBER_FORMAT.format(writesPerSec) + + " Reads/Sec=" + + NUMBER_FORMAT.format(readsPerSec) + + " CacheSize=" + + INT_FORMAT.format(stats.getCacheTotalBytes()) + + " DataSize=" + + INT_FORMAT.format(stats.getDataBytes()) + + " AdminSize=" + + INT_FORMAT.format(stats.getAdminBytes()) + + " LockSize=" + + INT_FORMAT.format(stats.getLockBytes()) + + " NEvictPasses=" + + INT_FORMAT.format(stats.getNEvictPasses()) + + " NCacheMiss=" + + INT_FORMAT.format(stats.getNCacheMiss()) + + " TotalLogSize=" + + INT_FORMAT.format(stats.getTotalLogSize())); + } +} diff --git a/test/standalone/ClosedDbEviction.java b/test/standalone/ClosedDbEviction.java new file mode 100644 index 0000000..dac16e6 --- /dev/null +++ b/test/standalone/ClosedDbEviction.java @@ -0,0 +1,789 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.util.Random; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.utilint.JVMSystemUtils; + +/** + * Applications with a large number of databases, randomly open and close + * databases at any time when needed. The mapping tree nodes (roots) in closed + * databases won't be evicted from cache immediately. As the applications run + * over time, this could cause a lot of waste in cache or even bad performance + * and OutOfMemoryError if cache overflows. + * + * We want to simulate such a scenario to test the efficiency of eviction of + * closed databases for SR 13415, to make sure that the eviction would not + * cause corruption or concurrency bugs: + * + Ensure that concurrency bugs don't occur when multiple threads are trying + * to close, evict and open a single database. + * + Another potential problem is that the database doesn't open correctly + * after being closed and evicted; + * + Cache budgeting is not done correctly during eviction or re-loading of + * the database after eviction. + * + */ +public class ClosedDbEviction { + private static int nDataAccessDbs = 1; + private static int nRegularDbs = 100000; + private static int nDbRecords = 100; + private static int nInitThreads = 8; + private static int nContentionThreads = 4; + private static int nDbsPerSet = 5; + private static int nKeepOpenedDbs = 100; + private static int subDir = 3; + private static boolean offHeap = false; + private static int nOps[] = new int[nContentionThreads]; + private static long nTxnPerRecovery = 1000000l; + private static long nTotalTxns = 100000000l; + private static boolean verbose = false; + private static boolean init = false; + private static boolean contention = false; + private static boolean evict = false; + private static boolean recovery = false; + private static boolean runDataAccessThread = true; + private static String homeDir = "./tmp"; + private static Environment env = null; + private static Database dataAccessDb = null; + private static Database metadataDb = null; + private static Database[] openDbList = new Database[nKeepOpenedDbs]; + private static Random random = new Random(); + private static Runtime rt = Runtime.getRuntime(); + + public static void main(String[] args) { + try { + ClosedDbEviction eviction = new ClosedDbEviction(); + eviction.start(args); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /* Output command-line input arguments to log. */ + private void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + void start(String[] args) { + try { + if (args.length == 0) { + throw new IllegalArgumentException(); + } + + /* Parse command-line input arguments. */ + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + String arg2 = (i < args.length - 1) ? args[i + 1] : null; + if (arg.equals("-v")) { + verbose = true; + } else if (arg.equals("-h")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + homeDir = args[++i]; + } else if (arg.equals("-init")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + try { + nRegularDbs = Integer.parseInt(args[++i]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(arg2); + } + init = true; + } else if (arg.equals("-contention")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + try { + nTotalTxns = Long.parseLong(args[++i]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(arg2); + } + contention = true; + } else if (arg.equals("-evict")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + try { + nTotalTxns = Long.parseLong(args[++i]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(arg2); + } + evict = true; + } else if (arg.equals("-recovery")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + try { + nTxnPerRecovery = Long.parseLong(args[++i]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(arg2); + } + recovery = true; + } else if (arg.equals("-subDir")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + try { + subDir = Integer.parseInt(args[++i]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(arg2); + } + } else if (arg.equals("-offheap")) { + if (arg2 == null) { + throw new IllegalArgumentException(arg); + } + offHeap = Boolean.parseBoolean(args[++i]); + } else { + throw new IllegalArgumentException(arg); + } + } + /* Correctness self-check: nTotalTxns >= nTxnPerRecovery. */ + if (nTotalTxns < nTxnPerRecovery) { + System.err.println + ("ERROR: argument should be larger than " + + nTxnPerRecovery + "!"); + System.exit(1); + } + printArgs(args); + } catch (IllegalArgumentException e) { + System.out.println + ("Usage: ClosedDbEviction [-v] -h -init \n" + + "Usage: ClosedDbEviction [-v] -h " + + "[-contention | -evict ] " + + "[-recovery ]"); + e.printStackTrace(); + System.exit(1); + } + + try { + if (init) { + doInit(); + } else if (contention) { + doContention(); + } else if (evict) { + doEvict(); + } else { + System.err.println("No such argument."); + System.exit(1); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * Initialize nRegularDBs, one dataAccessDb and one metadataDB. + */ + private void doInit() { + + class InitThread extends Thread { + public int id; + private Environment env = null; + private Database db = null; + + /** + * Constructor used for initializing databases. + */ + InitThread(int id, Environment env) { + this.id = id; + this.env = env; + } + + public void run() { + try { + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; + i <= ((nRegularDbs + nDataAccessDbs) / nInitThreads); + i++) { + + int dbId = id + (i * nInitThreads); + int totalRecords = nDbRecords; + boolean isDataAccessDb = false; + String dbName = "db" + dbId; + dbConfig.setDeferredWrite(dbId <= (nRegularDbs / 10)); + if (dbId >= nRegularDbs) { + if (dbId < (nRegularDbs + nDataAccessDbs)) { + isDataAccessDb = true; + dbName = "dataAccessDb"; + totalRecords = 10 * nDbRecords; + } else { + break; + } + } + /* Open the database. */ + db = env.openDatabase(null, dbName, dbConfig); + /* Insert totalRecords into database. */ + for (int j = 0; j < totalRecords; j++) { + key.setData(Integer.toString(j).getBytes("UTF-8")); + makeData(data, j, isDataAccessDb); + OperationStatus status = db.put(null, key, data); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("ERROR: failed to insert the #" + j + + " key/data pair into " + + db.getDatabaseName()); + System.exit(1); + } + } + db.close(); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * Generate the data. nDataAccessDbs should have a bigger size of + * data entry; regularDbs only make data entry equal to + * (index + "th-dataEntry"). + */ + private void makeData(DatabaseEntry data, + int index, + boolean isDataAccessDb) throws Exception { + + assert (data != null) : "makeData: Null data pointer"; + + if (isDataAccessDb) { + byte[] bytes = new byte[1024]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) i; + } + data.setData(bytes); + } else { + data.setData((Integer.toString(index) + "th-dataEntry"). + getBytes("UTF-8")); + } + } + } + + /* + * Initialize "nRegularDbs" regular Dbs, one dataAccessDb and one + * metaDataDb according to these rules: + * - The "nRegularDBs" databases, with the dbIds range from + * 0 to (nRegularDBs - 1). Each of them would have "nDbRecords". + * - 10% of all "nRegularDBs" are deferredWrite databases. + * - 90% of all "nRegularDBs" are regular databases. + * - The dataAccessDb has "10 * nDbRecords" key/data pairs. + * - The metaDataDb is to save "nRegularDbs" info for contention test. + */ + try { + openEnv(128 * 1024 * 1024); + saveMetadata(); + InitThread[] threads = new InitThread[nInitThreads]; + long startTime = System.currentTimeMillis(); + for (int i = 0; i < threads.length; i++) { + InitThread t = new InitThread(i, env); + t.start(); + threads[i] = t; + } + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + long endTime = System.currentTimeMillis(); + if (verbose) { + float elapsedSeconds = (endTime - startTime) / 1000f; + float throughput = (nRegularDbs * nDbRecords) / elapsedSeconds; + System.out.println + ("\nInitialization Statistics Report" + + "\n Run starts at: " + (new java.util.Date(startTime)) + + ", finishes at: " + (new java.util.Date(endTime)) + + "\n Initialized " + nRegularDbs + " databases, " + + "each contains " + nDbRecords + " records." + + "\n Elapsed seconds: " + elapsedSeconds + + ", throughput: " + throughput + " ops/sec."); + } + closeEnv(); + } catch (DatabaseException de) { + de.printStackTrace(); + System.exit(1); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * Simulate some contentions to make sure that the eviction would not + * cause corruption or concurrency bugs. + */ + private void doContention() { + + class ContentionThread extends Thread { + public int id; + private float dataCheckPossibility = .01f; + private long txns; + private boolean done = false; + private Database currentDb = null; + private Database lastOpenedDb = null; + + /** + * Constructor used for initializing databases. + */ + ContentionThread(int id, long txns) { + this.id = id; + this.txns = txns; + } + + public void run() { + try { + + /* Start dataAccessThread here. */ + startDataAccessor(); + + /* + * All contention threads try to open "nDbsPerSet" DBs + * from the same set concurrently. + */ + while (!done) { + int dbId = random.nextInt(nDbsPerSet); + currentDb = env.openDatabase(null, "db" + dbId, null); + if (lastOpenedDb != null) { + lastOpenedDb.close(); + } + lastOpenedDb = currentDb; + if (random.nextFloat() <= dataCheckPossibility) { + verifyData(); + } + nOps[id]++; + if (nOps[id] > txns) { + if (lastOpenedDb != null) { + lastOpenedDb.close(); + } + done = true; + } + } + + /* Stop dataAccessThread here. */ + stopDataAccessor(); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + private void startDataAccessor() { + runDataAccessThread = true; + } + + private void stopDataAccessor() { + runDataAccessThread = false; + } + + /** + * Do the corruption check: just check that the data + * that is present looks correct. + */ + private void verifyData() throws Exception { + long dbCount = currentDb.count(); + if (dbCount != nDbRecords) { + System.err.println + ("WARNING: total records in " + + currentDb.getDatabaseName() + ": " + dbCount + + " doesn't meet the expected value: " + nDbRecords); + System.exit(1); + } else { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < nDbRecords; i++) { + key.setData(Integer.toString(i).getBytes("UTF-8")); + OperationStatus status = + currentDb.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("ERROR: failed to retrieve the #" + + i + " key/data pair from " + + currentDb.getDatabaseName()); + System.exit(1); + } else if (!(new String(data.getData(), "UTF-8")). + equals((Integer.toString(i) + + "th-dataEntry"))) { + System.err.println + ("ERROR: current key/data pair: " + i + + "/" + (new String(data.getData(), "UTF-8")) + + " doesn't match the expected: " + + i + "/" + i +"th-dataEntry in " + + currentDb.getDatabaseName()); + System.exit(1); + } + } + } + } + } + + class DataAccessThread extends Thread { + + public void run() { + try { + while (runDataAccessThread) { + /* Access records to fill up cache. */ + DatabaseEntry key = new DatabaseEntry(); + key.setData(Integer. + toString(random.nextInt(10 * nDbRecords)). + getBytes("UTF-8")); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = + dataAccessDb.get(null, key, data, + LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("ERROR: failed to retrieve the #" + + new String(key.getData(), "UTF-8") + + " key/data pair from dataAccessDb."); + System.exit(1); + } + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + /* + * Simulate some contentions according to following rules: + * - Several threads try to open/close a set of databases repeatedly. + * - The other thread will continually access records from dataAccessDb + * to fill up cache. + */ + try { + long startTime = System.currentTimeMillis(); + long txns = nTotalTxns; + if (recovery) { + txns = nTxnPerRecovery; + } + for (int loop = 0; loop < nTotalTxns / txns; loop++) { + /* Clear nOps[] before each run starts. */ + for (int i = 0; i < nContentionThreads; i++) { + nOps[i] = 0; + } + openEnv(1024 * 1024); + readMetadata(); + DataAccessThread dat = new DataAccessThread(); + ContentionThread[] threads = + new ContentionThread[nContentionThreads]; + for (int i = 0; i < threads.length; i++) { + ContentionThread t = + new ContentionThread(i, txns); + t.start(); + threads[i] = t; + } + dat.start(); + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + dat.join(); + if (!checkStats(txns)) { + System.err.println + ("doContention: stats check failed."); + System.exit(1); + } + closeEnv(); + } + long endTime = System.currentTimeMillis(); + float elapsedSecs = (endTime - startTime) / 1000f; + float throughput = nTotalTxns / elapsedSecs; + if (verbose) { + System.out.println + ("\nContention Test Statistics Report" + + "\n Starts at: " + (new java.util.Date(startTime)) + + ", Finishes at: " + (new java.util.Date(endTime)) + + "\n Total operations: " + nTotalTxns + + ", Elapsed seconds: " + elapsedSecs + + ", Throughput: " + throughput + " ops/sec."); + } + } catch (DatabaseException de) { + de.printStackTrace(); + System.exit(1); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + private void doEvict() { + final int offset = random.nextInt(nRegularDbs - nKeepOpenedDbs); + + class EvictThread extends Thread { + public int id; + private float dataAccessPossibility = .01f; + private long txns = 0; + private Database currentDb = null; + private Database lastOpenedDb = null; + + /** + * Constructor. + */ + public EvictThread(int id, long txns) { + this.id = id; + this.txns = txns; + } + + public void run() { + try { + int dbId; + boolean done = false; + while (!done) { + dbId = random.nextInt(nRegularDbs); + if ((0 <= (dbId - offset)) && + ((dbId - offset) < nKeepOpenedDbs)) { + + /* + * Randomly select nKeepOpenedDbs databases opened + * in a time. The dbId ranges from to + * . + */ + if (openDbList[dbId - offset] == null) { + openDbList[dbId - offset] = + env.openDatabase(null, "db" + dbId, null); + } + } else { + /* Each thread select randomly from all DBs. */ + currentDb = + env.openDatabase(null, "db" + dbId, null); + if (random.nextFloat() < dataAccessPossibility) { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + key.setData(Integer.toString + (random.nextInt(nDbRecords)). + getBytes("UTF-8")); + currentDb.get(null, key, data, + LockMode.DEFAULT); + } + if (lastOpenedDb != null) { + lastOpenedDb.close(); + } + lastOpenedDb = currentDb; + } + nOps[id]++; + if (nOps[id] > txns) { + if (lastOpenedDb != null) { + lastOpenedDb.close(); + } + /* Close nKeepOpenedDbs before exit. */ + for (int i = 0; i < nKeepOpenedDbs; i++) { + currentDb = openDbList[i]; + if (currentDb != null) { + currentDb.close(); + openDbList[i] = null; + } + } + done = true; + } + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } + + /* + * Simulate some contentions according to following rules: + * - Several threads try to open/close a set of databases repeatedly. + * - The other thread will continually access records from dataAccessDb + * to fill up cache. + */ + try { + long startTime = System.currentTimeMillis(); + long txns = nTotalTxns; + if (recovery) { + txns = nTxnPerRecovery; + } + for (int loop = 0; loop < nTotalTxns / txns; loop++) { + /* Clear nOps[] before each run starts. */ + for (int i = 0; i < nContentionThreads; i++) { + nOps[i] = 0; + } + /* When using Zing JDK, the cache size should be increased. */ + if (JVMSystemUtils.ZING_JVM) { + openEnv(8 * 1024 * 1024); + } else { + openEnv(512 * 1024); + } + readMetadata(); + EvictThread[] threads = new EvictThread[nContentionThreads]; + for (int i = 0; i < threads.length; i++) { + EvictThread t = new EvictThread(i, txns); + t.start(); + threads[i] = t; + } + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + if (!checkStats(txns)) { + System.err.println("doEvict: stats check failed."); + System.exit(1); + } + closeEnv(); + } + long endTime = System.currentTimeMillis(); + if (verbose) { + float elapsedSeconds = (endTime - startTime) / 1000f; + float throughput = nTotalTxns / elapsedSeconds; + System.out.println + ("\nEviction Test Statistics Report" + + "\n Run starts at: " + (new java.util.Date(startTime)) + + ", finishes at: " + (new java.util.Date(endTime)) + + "\n Total operations: " + nTotalTxns + + ", Elapsed seconds: " + elapsedSeconds + + ", Throughput: " + throughput + " ops/sec."); + } + } catch (DatabaseException de) { + de.printStackTrace(); + System.exit(1); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * Open an Environment. + */ + private void openEnv(long cacheSize) throws DatabaseException { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(cacheSize); + if (offHeap) { + /* Do not reduce main cache size, test will run too slowly. */ + envConfig.setOffHeapCacheSize(cacheSize); + } + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + Utils.createSubDirs(new File(homeDir), subDir, true); + } + env = new Environment(new File(homeDir), envConfig); + if (contention) { + dataAccessDb = env.openDatabase(null, "dataAccessDb", null); + } + } + + /** + * Check to see if stats looks correct. + */ + private boolean checkStats(long txns) throws DatabaseException { + + /* Get EnvironmentStats numbers. */ + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setFast(true); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); + long evictedINs = stats.getNNodesExplicitlyEvicted(); + long evictedRoots = stats.getNRootNodesEvicted(); + long dataBytes = stats.getDataBytes(); + /* Check the eviction of INs and ROOTs actually happens. */ + boolean nodesCheck = (evictedINs > 0); + boolean rootsCheck = (evictedRoots > 0); + if (verbose) { + System.out.printf + ("\n\tEviction Statistics(calc txns: %d)%n" + + " Data Pass/Fail%n" + + " ---------- ---------%n" + + "EvictedINs: %10d %9S%n" + + "EvictedRoots:%10d %9S%n" + + "DataBytes: %10d%n" + + "jvm.maxMem: %10d%n" + + "jvm.freeMem: %10d%n" + + "jvm.totlMem: %10d%n", + txns, evictedINs, (nodesCheck ? "PASS" : "FAIL"), + evictedRoots, (rootsCheck ? "PASS" : "FAIL"), + dataBytes, rt.maxMemory(), rt.freeMemory(), rt.totalMemory()); + System.out.println + ("The test criteria: EvictedINs > 0, EvictedRoots > 0."); + } + + return nodesCheck && rootsCheck; + } + + /** + * Close the Databases and Environment. + */ + private void closeEnv() throws DatabaseException { + + if (dataAccessDb != null) { + dataAccessDb.close(); + } + if (env != null) { + env.close(); + } + } + + /** + * Store meta-data information into metadataDb. + */ + private void saveMetadata() throws Exception { + + /* Store meta-data information into one additional database. */ + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + metadataDb = env.openDatabase(null, "metadataDb", dbConfig); + OperationStatus status = + metadataDb.put(null, + new DatabaseEntry("nRegularDbs".getBytes("UTF-8")), + new DatabaseEntry(Integer. + toString(nRegularDbs). + getBytes("UTF-8"))); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("Not able to save info into the metadata database."); + System.exit(1); + } + metadataDb.close(); + } + + /** + * Retrieve meta-data information from metadataDb. + */ + private void readMetadata() throws Exception { + + /* Retrieve meta-data information from metadataDB. */ + metadataDb = env.openDatabase(null, "metadataDb", null); + DatabaseEntry key = new DatabaseEntry("nRegularDbs".getBytes("UTF-8")); + DatabaseEntry data = new DatabaseEntry(); + OperationStatus status = + metadataDb.get(null, key, data, LockMode.DEFAULT); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("Couldn't retrieve info from the metadata database."); + System.exit(1); + } + nRegularDbs = Integer.parseInt(new String (data.getData(), "UTF-8")); + metadataDb.close(); + } +} diff --git a/test/standalone/DeadlockStress.java b/test/standalone/DeadlockStress.java new file mode 100644 index 0000000..3ba6d33 --- /dev/null +++ b/test/standalone/DeadlockStress.java @@ -0,0 +1,1455 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; + +/** + * Application to simulate different deadlock scenarios. + * + * The simple scenario: + * Two threads access two records in opposite order with their own txns. + */ +public class DeadlockStress { + + private String homeDir = "./tmp"; + private Environment env = null; + private Database db; + private int dbSize = 100; + private int totalTxns = 1000; + private int factor = 100; + private int maxRetry = 100; + /* The number of operations in each Txn in mix access mode. */ + private int opNum = 5; + /* + * The number of threads used in mix access mode. It should be divided + * by dbSize. + */ + private int threadNum = 20; + + /* The run time for mix access mode. */ + private long runtime = 5 * 60 * 1000; // 10minutes + + boolean verbose = true; + + private CountDownLatch startSignal; + + private boolean deadlockDone = false; + + + void openEnv() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + /* + envConfig.setConfigParam + (EnvironmentParams.LOCK_TIMEOUT.getName(), "1000 ms"); + */ + try { + File envHome = new File(homeDir); + env = new Environment(envHome, envConfig); + } catch (Error e) { + e.printStackTrace(); + System.exit(1); + } + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + void closeEnv() { + try { + + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + public static void main(String args[]){ + try { + DeadlockStress test = new DeadlockStress(); + test.parseArgs(args); + test.run(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + + /* Output command-line input arguments to log. */ + private void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + protected void parseArgs(String args[]) + throws Exception { + + for (int i = 0; i < args.length; i++) { + boolean moreArgs = i < args.length - 1; + if (args[i].equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (args[i].equals("-dbSize") && moreArgs) { + dbSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-totalTxns") && moreArgs) { + totalTxns = Integer.parseInt(args[++i]); + } else if (args[i].equals("-retry") && moreArgs) { + maxRetry = Integer.parseInt(args[++i]); + } else if (args[i].equals("-opnum") && moreArgs) { + opNum = Integer.parseInt(args[++i]); + } else if (args[i].equals("-threads") && moreArgs) { + threadNum = Integer.parseInt(args[++i]); + } else if (args[i].equals("-time") && moreArgs) { + runtime = Integer.parseInt(args[++i]); + } else if (args[i].equals("-verbose") && moreArgs ) { + verbose = Boolean.parseBoolean(args[++i]); + } else { + usage("Error: Unknown arg: " + args[i]); + } + } + + printArgs(args); + } + + + private void usage(String error) { + + if (error != null) { + System.err.println(error); + } + System.err.println + ("java " + getClass().getName() + '\n' + + " [-h ] [-dbsize] [-totalTxns]\n"); + System.exit(1); + } + + public void run() + throws Exception { + openEnv(); + insertRecords(); + compareExceptionMessFoDebug(); + doTwoThreadsDeadlock(); + doTwoThreadsNoInteraction(); + doTwoThreadsPartInteraction(); + doThreeThreadsDeadlock(); + doThreeThreadsNoInteraction(); + doThreeThreadsPartInteraction(); + doDeadlockOnOneRecord(); + noDeadlockOnOneRecord(); + doDeadlockOneCommonLocker(); + doDeadlockTwoCommonLockers(); + doMixedOperationWithDeadlock(); + doMixedOperationSortedToNoDeadlock(); + doMixedOperationNoInteraction(); + doMixedOperationWithDeadlockSecondary(); + doMixedOperationSortedToNoDeadlockSecondary(); + doMixedOperationNoInteractionSecondary(); + closeEnv(); + } + + private void insertRecords() + throws Exception, InterruptedException { + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < dbSize; i++) { + IntegerBinding.intToEntry(i, key); + IntegerBinding.intToEntry(i, data); + db.put(null, key, data); + } + } + + public void compareExceptionMessFoDebug() + throws InterruptedException { + System.out.println("Compare Exception content"); + startSignal = new CountDownLatch(1); + + AccessThreadBreakWhenDeadlock thread1 = + new AccessThreadBreakWhenDeadlock(1,1,2,-1,false); + AccessThreadBreakWhenDeadlock thread2 = + new AccessThreadBreakWhenDeadlock(2,2,1,-1,false); + + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + } + + public void doTwoThreadsDeadlock() + throws InterruptedException { + System.out.println("Deadlock between two threads"); + startSignal = new CountDownLatch(1); + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,2,1,-1,false); + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + } + + public void doTwoThreadsNoInteraction() + throws InterruptedException { + System.out.println("Two threads do not have any interaction"); + startSignal = new CountDownLatch(1); + totalTxns = factor * totalTxns; + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,3,4,-1,false); + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + totalTxns = totalTxns / factor; + } + + public void doTwoThreadsPartInteraction() + throws InterruptedException { + System.out.println("Two threads have part interaction"); + startSignal = new CountDownLatch(1); + totalTxns = factor * totalTxns; + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,3,1,-1,false); + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + totalTxns = totalTxns / factor; + } + + public void doThreeThreadsDeadlock() + throws InterruptedException { + + System.out.println("Deadlock between three threads"); + startSignal = new CountDownLatch(1); + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,2,3,-1,false); + AccessThread thread3 = new AccessThread(3,3,1,-1,false); + thread1.start(); + thread2.start(); + thread3.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + thread3.join(); + } + + public void doThreeThreadsNoInteraction() + throws InterruptedException { + + System.out.println("Three threads do not have any interaction"); + startSignal = new CountDownLatch(1); + totalTxns = factor * totalTxns; + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,3,4,-1,false); + AccessThread thread3 = new AccessThread(3,5,6,-1,false); + thread1.start(); + thread2.start(); + thread3.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + thread3.join(); + totalTxns = totalTxns / factor; + } + + public void doThreeThreadsPartInteraction() + throws InterruptedException { + + System.out.println("Three threads have part interaction"); + startSignal = new CountDownLatch(1); + totalTxns = factor * totalTxns; + + AccessThread thread1 = new AccessThread(1,1,2,-1,false); + AccessThread thread2 = new AccessThread(2,1,3,-1,false); + AccessThread thread3 = new AccessThread(3,4,1,-1,false); + thread1.start(); + thread2.start(); + thread3.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + thread3.join(); + totalTxns = totalTxns / factor; + } + + public void doDeadlockOnOneRecord() + throws InterruptedException { + System.out.println("Deadlock formed on one record"); + startSignal = new CountDownLatch(1); + + AccessThread thread1 = new AccessThread(1,1,1,-1,true); + AccessThread thread2 = new AccessThread(2,1,1,-1,true); + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + } + + public void noDeadlockOnOneRecord() + throws InterruptedException { + System.out.println("No Deadlock formed on one record"); + startSignal = new CountDownLatch(1); + totalTxns = factor * totalTxns; + + AccessThread thread1 = new AccessThread(1,1,1,-1,false); + AccessThread thread2 = new AccessThread(2,1,1,-1,false); + thread1.start(); + thread2.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + totalTxns = totalTxns / factor; + } + + public void doDeadlockOneCommonLocker() + throws InterruptedException { + + System.out.println("Deadlock with one common locker"); + startSignal = new CountDownLatch(1); + + AccessThread thread1 = new AccessThread(1,1,2,-1,true); + AccessThread thread2 = new AccessThread(2,3,2,1,false); + AccessThread thread3 = new AccessThread(3,1,3,-1,true); + thread1.start(); + thread2.start(); + thread3.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + thread3.join(); + } + + public void doDeadlockTwoCommonLockers() + throws InterruptedException { + + System.out.println("Deadlock with two common lockers"); + startSignal = new CountDownLatch(1); + + AccessThread thread1 = new AccessThread(1,1,2,-1,true); + AccessThread thread2 = new AccessThread(2,4,2,3,false); + AccessThread thread3 = new AccessThread(3,3,1,-1,false); + AccessThread thread4 = new AccessThread(4,1,4,-1,true); + thread1.start(); + thread2.start(); + thread3.start(); + thread4.start(); + + startSignal.countDown(); + + thread1.join(); + thread2.join(); + thread3.join(); + thread4.join(); + } + + public void doMixedOperationWithDeadlock() { + System.out.println("Mix access mode with possible Deadlock"); + int[] distribution = new int[] {25, 25, 25, 25}; + MixedAccessThread[] mixedThreads = new MixedAccessThread[threadNum]; + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i] = + new MixedAccessThread(i, distribution, false, false); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + } + + public void doMixedOperationSortedToNoDeadlock() { + System.out.println("Mix access mode sorted to no deadlock"); + int[] distribution = new int[] {25, 25, 25, 25}; + MixedAccessThread[] mixedThreads = new MixedAccessThread[threadNum]; + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i] = + new MixedAccessThread(i, distribution, true, false); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + } + + public void doMixedOperationNoInteraction() { + System.out.println("Mix access mode no interaction to no deadlock"); + int[] distribution = new int[] {25, 25, 25, 25}; + MixedAccessThread[] mixedThreads = new MixedAccessThread[threadNum]; + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i] = + new MixedAccessThread(i, distribution, false, true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + } + + public void doMixedOperationWithDeadlockSecondary() { + System.out.println("Mix access mode with possible Deadlock Secondary"); + int[] distribution = new int[] {25, 25, 25, 25}; + SecondaryAccessThread[] mixedThreads = + new SecondaryAccessThread[threadNum]; + + SecondaryDatabase sdb = + openSecondary(env, db, "secDb", new SecondaryConfig()); + + boolean secondary; + Database usedDb; + + for (int i = 0; i < mixedThreads.length; i++) { + if ( i % 2 == 0) { + secondary = false; + usedDb = db; + } else { + secondary = true; + usedDb = sdb; + } + mixedThreads[i] = new SecondaryAccessThread( + i, distribution, false, false, secondary, usedDb); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + + if (sdb != null) { + sdb.close(); + } + } + + public void doMixedOperationSortedToNoDeadlockSecondary() { + System.out.println("Mix access mode sorted to no deadlock Secondary"); + int[] distribution = new int[] {25, 25, 25, 25}; + SecondaryAccessThread[] mixedThreads = + new SecondaryAccessThread[threadNum]; + + SecondaryDatabase sdb = + openSecondary(env, db, "secDb", new SecondaryConfig()); + + boolean secondary; + Database usedDb; + + for (int i = 0; i < mixedThreads.length; i++) { + if ( i % 2 == 0) { + secondary = false; + usedDb = db; + } else { + secondary = true; + usedDb = sdb; + } + mixedThreads[i] = new SecondaryAccessThread( + i, distribution, true, false, secondary, usedDb); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + + if (sdb != null) { + sdb.close(); + } + } + + public void doMixedOperationNoInteractionSecondary() { + System.out.println("Mix access mode no interaction Secondary"); + int[] distribution = new int[] {25, 25, 25, 25}; + SecondaryAccessThread[] mixedThreads = + new SecondaryAccessThread[threadNum]; + + SecondaryDatabase sdb = + openSecondary(env, db, "secDb", new SecondaryConfig()); + + boolean secondary; + Database usedDb; + + for (int i = 0; i < mixedThreads.length; i++) { + if ( i % 2 == 0) { + secondary = false; + usedDb = db; + } else { + secondary = true; + usedDb = sdb; + } + mixedThreads[i] = new SecondaryAccessThread( + i, distribution, false, true, secondary, usedDb); + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].start(); + } + + try { + Thread.sleep(runtime); + } catch (InterruptedException e) { + + } + + for (int i = 0; i < mixedThreads.length; i++) { + mixedThreads[i].setDone(true); + } + + for (int i = 0; i < mixedThreads.length; i++) { + try { + mixedThreads[i].join(); + } catch (InterruptedException e) { + + } + } + + if (sdb != null) { + sdb.close(); + } + } + + private SecondaryDatabase openSecondary( + Environment env, + Database priDb, + String dbName, + SecondaryConfig dbConfig) { + + dbConfig.setAllowPopulate(true); + dbConfig.setSortedDuplicates(true); + dbConfig.setTransactional(true); + dbConfig.setAllowCreate(true); + dbConfig.setKeyCreator(new MyKeyCreator()); + return env.openSecondaryDatabase(null, dbName, + priDb, dbConfig); + } + + class SecondaryAccessThread extends Thread { + boolean done = false; + + private int id; + private CRUDGenerator cg; + private int opsNumEachThread = dbSize / threadNum; + private boolean secondary; + private Database usedDb; + + /* + * The records involved in each Txn of different threads may contain + * the same record(s), but in order to avoid deadlock, we sort these + * records by their int key and at the same time, guarantee that in + * each txn, the int keys of records are different. + */ + private boolean sort = false; + + /* + * In order to avoid deadlock, the records involved in each Txn of + * different threads do not have intersection. + */ + private boolean noInteraction = false; + + SecondaryAccessThread(int id, + int[] distribution, + boolean sort, + boolean noInteraction, + boolean secondary, + Database usedDb) { + this.id = id; + this.sort = sort; + this.noInteraction = noInteraction; + this.secondary = secondary; + this.usedDb = usedDb; + cg = new CRUDGenerator(id, distribution); + } + + public void run() { + long startTime = System.currentTimeMillis(); + long count = 0; + while (!done) { + doOneTxnWithRetry(); + count++; + } + + long endTime = System.currentTimeMillis(); + float elapsedSec = (float) ((endTime - startTime) / 1e3); + float throughput = ((float) count) / elapsedSec; + System.out.println + ("Thread " + id + " finishes " + count + + " iterations in: " + elapsedSec + + " sec, average throughput: " + throughput + " op/sec."); + } + + @SuppressWarnings("unchecked") + public void doOneTxnWithRetry() { + ArrayList ops = new ArrayList<>(); + ArrayList keyInts = new ArrayList<>(); + + for (int i = 0; i < opNum; i++) { + int keyInt = generateKeyInt(keyInts); + DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyInt, key); + + CRUDTYPE op = cg.nextRandomCRUD(); + switch (op) { + case CREATE: + ops.add(new CursorCreate(key, secondary)); + break; + case READ: + ops.add(new CursorRead(key, secondary)); + break; + case UPDATE: + ops.add(new CursorUpdate(key, secondary)); + break; + case DELETE: + ops.add(new CursorDelete(key, secondary)); + break; + default: + throw new IllegalStateException("Unknown op: " + op); + } + } + + if (sort) { + final CursorOperation[] coArray = + ops.toArray(new CursorOperation[0]); + final CursorComparator cc = new CursorComparator(); + Arrays.sort(coArray, cc); + ops.clear(); + ops.addAll(Arrays.asList(coArray)); + + } + + int tries = 0; + while (tries < maxRetry) { + Transaction txn = env.beginTransaction(null, null); + Cursor c = usedDb.openCursor(txn, null); + try { + for (CursorOperation cursorOp: ops) { + cursorOp.execute(txn, c); + } + + if (c != null) { + c.close(); + } + + txn.commit(); + break; + } catch (LockConflictException e) { + if (c != null) { + c.close(); + } + txn.abort(); + tries++; + } catch (OperationFailureException ofe) { + ofe.printStackTrace(); + } + + } + //if (tries == maxRetry) { + //if (tries > 0) { + // System.out.println("Thread: " + id + " Retry times: " + tries); + //} + } + + private int generateKeyInt(ArrayList keyInts) { + if (sort) { + while (true) { + boolean repeated = false; + int tmp = cg.nextRandomKeyInt(dbSize); + for (Integer I : keyInts) { + if (I.intValue() == tmp) { + repeated = true; + break; + } + } + + if (!repeated) { + keyInts.add(new Integer(tmp)); + return tmp; + } + } + } else if (noInteraction) { + int tmp = cg.nextRandomKeyInt(opsNumEachThread); + return tmp + opsNumEachThread * id; + } else { + return cg.nextRandomKeyInt(dbSize); + } + } + + public synchronized void setDone(boolean done) { + this.done = done; + } + } + + class MyKeyCreator implements SecondaryKeyCreator { + @Override + public boolean createSecondaryKey(SecondaryDatabase secondary, + DatabaseEntry key, DatabaseEntry data, DatabaseEntry result) { + result.setData(key.getData()); + + return true; + } + } + + abstract class CursorOperation { + protected final DatabaseEntry key; + protected final boolean secondary; + + public CursorOperation(DatabaseEntry key, + boolean secondary) { + this.key = key; + this.secondary = secondary; + } + + abstract OperationStatus execute(Transaction txn, Cursor c) + throws DatabaseException; + + public int getKeyInt() { + return IntegerBinding.entryToInt(key); + } + } + + /* + * Create. + * + * For secondary database, we can not create record. So we actually do + * read actions with different search mode: getSearchBothRange. + */ + class CursorCreate extends CursorOperation { + CursorCreate(DatabaseEntry key, boolean secondary) { + super(key, secondary); + } + + @Override + OperationStatus execute(Transaction txn, Cursor c) + throws DatabaseException { + if (txn.isValid()) { + if (secondary) { + return ((SecondaryCursor)c).getSearchBothRange( + key, key, new DatabaseEntry(), null); + } else { + return c.put(key, new DatabaseEntry(new byte[10])); + } + } else { + return null; + } + } + } + + /* Read */ + class CursorRead extends CursorOperation { + CursorRead(DatabaseEntry key, boolean secondary) { + super(key, secondary); + } + + @Override + OperationStatus execute(Transaction txn, Cursor c) + throws DatabaseException { + if (txn.isValid()) { + if (secondary) { + return ((SecondaryCursor)c).getSearchKey( + key, new DatabaseEntry(), new DatabaseEntry(), null); + } else { + return c.getSearchKey(key, new DatabaseEntry(), null); + } + + } else { + return null; + } + } + } + + /* + * Update. + * + * For secondary database, we can not create record. So we actually do + * read actions with different search mode: getSearchKeyRange. + */ + class CursorUpdate extends CursorOperation { + CursorUpdate(DatabaseEntry key, boolean secondary) { + super(key, secondary); + } + + @Override + OperationStatus execute(Transaction txn, Cursor c) + throws DatabaseException { + if (txn.isValid()) { + if (secondary) { + return ((SecondaryCursor)c).getSearchKeyRange( + key, new DatabaseEntry(), new DatabaseEntry(), null); + } else { + return c.getSearchKey(key, new DatabaseEntry(), null); + } + } else { + return null; + } + } + } + + /* Delete */ + class CursorDelete extends CursorOperation { + CursorDelete(DatabaseEntry key, boolean secondary) { + super(key, secondary); + } + + @Override + OperationStatus execute(Transaction txn, Cursor c) + throws DatabaseException { + if (txn.isValid()) { + if (secondary) { + if (((SecondaryCursor)c).getSearchKey( + key, new DatabaseEntry(), new DatabaseEntry(), null) == + OperationStatus.SUCCESS) { + return ((SecondaryCursor)c).delete(); + } + } else { + if (c.getSearchKey(key, new DatabaseEntry(), null) == + OperationStatus.SUCCESS) { + return c.delete(); + } + } + } else { + return null; + } + return null; + } + } + + class CursorComparator implements Comparator { + @Override + public int compare (Object obj1, Object obj2) { + final CursorOperation cop1 = (CursorOperation) obj1; + final CursorOperation cop2 = (CursorOperation) obj2; + return (cop1.getKeyInt() - cop2.getKeyInt()); + } + } + + class MixedAccessThread extends Thread { + boolean done = false; + + private int id; + private CRUDGenerator cg; + private int opsNumEachThread = dbSize / threadNum; + + /* + * The records involved in each Txn of different threads may contain + * the same record(s), but in order to avoid deadlock, we sort these + * records by their int key and at the same time, guarantee that in + * each txn, the int keys of records are different. + */ + private boolean sort = false; + + /* + * In order to avoid deadlock, the records involved in each Txn of + * different threads do not have intersection. + */ + private boolean noInteraction = false; + + MixedAccessThread(int id, + int[] distribution, + boolean sort, + boolean noInteraction) { + this.id = id; + this.sort = sort; + this.noInteraction = noInteraction; + cg = new CRUDGenerator(id, distribution); + } + + public void run() { + long startTime = System.currentTimeMillis(); + long count = 0; + while (!done) { + doOneTxnWithRetry(); + count++; + } + + long endTime = System.currentTimeMillis(); + float elapsedSec = (float) ((endTime - startTime) / 1e3); + float throughput = ((float) count) / elapsedSec; + System.out.println + ("Thread " + id + " finishes " + count + + " iterations in: " + elapsedSec + + " sec, average throughput: " + throughput + " op/sec."); + } + + @SuppressWarnings("unchecked") + public void doOneTxnWithRetry() { + ArrayList ops = new ArrayList<>(); + ArrayList keyInts = new ArrayList<>(); + + for (int i = 0; i < opNum; i++) { + int keyInt = generateKeyInt(keyInts); + DatabaseEntry key = new DatabaseEntry(); + IntegerBinding.intToEntry(keyInt, key); + + CRUDTYPE op = cg.nextRandomCRUD(); + switch (op) { + case CREATE: + ops.add(new Create(db, key)); + break; + case READ: + ops.add(new Read(db, key)); + break; + case UPDATE: + ops.add(new Update(db, key)); + break; + case DELETE: + ops.add(new Delete(db, key)); + break; + default: + throw new IllegalStateException("Unknown op: " + op); + } + } + + if (sort) { + final CRUDOperation[] coArray = + ops.toArray(new CRUDOperation[0]); + final OpsComparator oc = new OpsComparator(); + Arrays.sort(coArray, oc); + ops.clear(); + ops.addAll(Arrays.asList(coArray)); + + } + + int tries = 0; + while (tries < maxRetry) { + Transaction txn = env.beginTransaction(null, null); + try { + for (CRUDOperation crudOp: ops) { + crudOp.execute(txn); + } + txn.commit(); + break; + } catch (LockConflictException e) { + txn.abort(); + tries++; + } + + } + //if (tries == maxRetry) { + //if (tries > 0) { + // System.out.println("Thread: " + id + " Retry times: " + tries); + //} + } + + private int generateKeyInt(ArrayList keyInts) { + if (sort) { + while (true) { + boolean repeated = false; + int tmp = cg.nextRandomKeyInt(dbSize); + for (Integer I : keyInts) { + if (I.intValue() == tmp) { + repeated = true; + break; + } + } + + if (!repeated) { + keyInts.add(new Integer(tmp)); + return tmp; + } + } + } else if (noInteraction) { + int tmp = cg.nextRandomKeyInt(opsNumEachThread); + return tmp + opsNumEachThread * id; + } else { + return cg.nextRandomKeyInt(dbSize); + } + } + + public synchronized void setDone(boolean done) { + this.done = done; + } + } + + class OpsComparator implements Comparator { + @Override + public int compare (Object obj1, Object obj2) { + final CRUDOperation op1 = (CRUDOperation) obj1; + final CRUDOperation op2 = (CRUDOperation) obj2; + return (op1.getKeyInt() - op2.getKeyInt()); + } + } + + /* The type of possible operations. */ + enum CRUDTYPE { + CREATE, + READ, + UPDATE, + DELETE; + } + + class CRUDGenerator { + + int[] distribution; + int id; + Random opRandom; + + CRUDGenerator(int id, int distribution[]) { + this.id = id; + this.distribution = distribution; + opRandom = new Random(System.currentTimeMillis() * id); + + int total = 0; + for (int i = 0; i < distribution.length; i++) { + total += distribution[i]; + } + + if (total != 100) { + throw new IllegalArgumentException( + "Distribution should add to 100 not " + total); + } + } + + /* + * Returns the next random CRUDTYPE based on the current + * distribution setup. + */ + public CRUDTYPE nextRandomCRUD() { + int rpercent = opRandom.nextInt(100); + int total = 0; + for (int i = 0; i < distribution.length; i++) { + total += distribution[i]; + if (rpercent < total) { + switch (i) { + case 0: + return CRUDTYPE.CREATE; + case 1: + return CRUDTYPE.READ; + case 2: + return CRUDTYPE.UPDATE; + case 3: + return CRUDTYPE.DELETE; + } + } + } + throw new IllegalArgumentException("Something is wrong"); + } + + public int nextRandomKeyInt(int range) { + return opRandom.nextInt(range); + } + } + + abstract class CRUDOperation { + protected final Database db; + protected final DatabaseEntry key; + + public CRUDOperation(Database db, DatabaseEntry key) { + this.db=db; + this.key = key; + + } + + abstract OperationStatus execute(Transaction txn) + throws DatabaseException; + + public int getKeyInt() { + return IntegerBinding.entryToInt(key); + } + } + + /* Create */ + class Create extends CRUDOperation { + Create(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = new DatabaseEntry(new byte[10]); + if (txn.isValid()) { + return db.put(txn, key, dataEntry); + } else { + return null; + } + } + } + + /* Read */ + class Read extends CRUDOperation { + Read(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = new DatabaseEntry(); + if (txn.isValid()) { + return db.get(txn, key, dataEntry, null); + } else { + return null; + } + } + } + + /* Update */ + class Update extends CRUDOperation { + Update(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = new DatabaseEntry(new byte[10]); + if (txn.isValid()) { + return db.put(txn, key, dataEntry); + } else { + return null; + } + } + } + + /* Delete */ + class Delete extends CRUDOperation { + Delete(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + if (txn.isValid()) { + return db.delete(txn, key); + } else { + return null; + } + } + } + + class AccessThread extends Thread { + /** The identifier of the current thread. */ + private int id; + + private int key1; + private int key2; + private int key3; + + /* + * Determine whether do the read access or write access. + * + * 1. When deadlock is formed on one record, i.e. two threads first + * read and then write. We need to let the first operation be + * read access. + * + * 2. When two deadlock cycles involove the same locker, two threads + * need to own the lock on one same record, so now we also need to + * do the read request to let two threads own the read lock on the + * same record. + */ + boolean firstRead; + + public AccessThread( + int id, + int key1, + int key2, + int key3, + boolean firstRead) { + + this.id = id; + this.key1 = key1; + this.key2 = key2; + this.key3 = key3; + this.firstRead = firstRead; + } + + /** + * This thread is responsible for executing transactions. + */ + public void run() { + try { + startSignal.await(); + long startTime = System.currentTimeMillis(); + for (int op = 0; op < totalTxns; op++) { + int tries = 0; + while (tries < maxRetry) { + Transaction txn = env.beginTransaction(null, null); + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(key1, key); + if (firstRead) { + db.get(txn, key, data, LockMode.DEFAULT); + } else { + IntegerBinding.intToEntry(key1, data); + db.put(txn, key, data); + } + + + IntegerBinding.intToEntry(key2, key); + IntegerBinding.intToEntry(key2, data); + db.put(txn, key, data); + + if (key3 > 0) { + IntegerBinding.intToEntry(key3, key); + IntegerBinding.intToEntry(key3, data); + db.put(txn, key, data); + } + + txn.commit(); + break; + } catch (LockConflictException e) { + txn.abort(); + tries++; + } + } + /* + if (verbose && tries > 0) { + System.out.println( + "Thread: " + id + " Retry times: " + tries); + } + */ + } + long endTime = System.currentTimeMillis(); + float elapsedSec = (float) ((endTime - startTime) / 1e3); + float throughput = ((float) totalTxns) / elapsedSec; + System.out.println + ("Thread " + id + " finishes " + totalTxns + + " iterations in: " + elapsedSec + + " sec, average throughput: " + throughput + " op/sec."); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + class AccessThreadBreakWhenDeadlock extends Thread { + + /** The identifier of the current thread. */ + private int id; + + private int key1; + private int key2; + private int key3; + + /* + * Determine whether do the read access or write access. + * + * 1. When deadlock is formed on one record, i.e. two threads first + * read and then write. We need to let the first operation be + * read access. + * + * 2. When two deadlock cycles involove the same locker, two threads + * need to own the lock on one same record, so now we also need to + * do the read request to let two threads own the read lock on the + * same record. + */ + boolean firstRead; + + public AccessThreadBreakWhenDeadlock( + int id, + int key1, + int key2, + int key3, + boolean firstRead) { + + this.id = id; + this.key1 = key1; + this.key2 = key2; + this.key3 = key3; + this.firstRead = firstRead; + } + + /** + * This thread is responsible for executing transactions. + */ + public void run() { + try { + startSignal.await(); + for (int op = 0; op < totalTxns && !deadlockDone; op++) { + Transaction txn = env.beginTransaction(null, null); + try { + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + IntegerBinding.intToEntry(key1, key); + if (firstRead) { + db.get(txn, key, data, LockMode.DEFAULT); + } else { + IntegerBinding.intToEntry(key1, data); + db.put(txn, key, data); + } + + + IntegerBinding.intToEntry(key2, key); + IntegerBinding.intToEntry(key2, data); + db.put(txn, key, data); + + if (key3 > 0) { + IntegerBinding.intToEntry(key3, key); + IntegerBinding.intToEntry(key3, data); + db.put(txn, key, data); + } + + txn.commit(); + } catch (LockConflictException e) { + System.out.println(e.getMessage()); + deadlockDone = true; + txn.abort(); + } + } + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } +} diff --git a/test/standalone/DiskLimitStress.java b/test/standalone/DiskLimitStress.java new file mode 100644 index 0000000..1491864 --- /dev/null +++ b/test/standalone/DiskLimitStress.java @@ -0,0 +1,718 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import static junit.framework.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Level; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.CheckpointConfig; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DiskLimitException; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.OperationFailureException; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.Put; +import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.rep.InsufficientAcksException; +import com.sleepycat.je.rep.InsufficientReplicasException; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.util.test.SharedTestUtils; + +/** + * Tests a full write load with a disk limit to ensure that files are deleted + * quickly enough to avoid hitting the disk limit. With HA, reserved files + * (files cleaned and ready-to-delete) are retained until the disk limit is + * approached. A steady-state update workload is used to generate waste as + * quickly as possible. + *

        + * Suggested steady-state runs: + *

        + *   # Test steady-state max throughput with one node.
        + *   DiskLimitStress -nodes 1 -minutes 15
        + *   # Test steady-state HA throughput.
        + *   DiskLimitStress -nodes 3 -minutes 15
        + * 
        + *

        + * In addition this test periodically lowers the disk limit on one or more + * nodes to cause it to be violated, then restores the original limit and + * expects the write load to continue as before. There are three cases that + * are tested: + *

        + * 1. Master node violates disk limit, both replicas do not. No writes can
        + *    occur.
        + *
        + * 2. One replica node violates disk limit, but not the master and the other
        + *    replica. Writes can continue. The replica which violates the limit
        + *    will lag, but the test doesn't stay in this mode long enough that a
        + *    network restore is needed.
        + *
        + * 3. Both replicas violate disk limit, but not the master.
        + *    InsufficientAcksException and/or InsufficientReplicasException will be
        + *    thrown.
        + * 
        + *

        + * Suggested run to test the three disk limit violation scenarios: + *

        + *   # Test violations of disk limit
        + *   DiskLimitStress -nodes 3 -violations true -minutes 25
        + * 
        + */ +public class DiskLimitStress { + + /* + * Sizes are designed so the data set fits within MAX_DISK, but also so + * reserved files will be deleted. These sizes are used for the + * steady-state test mode (-violations false). + */ + private static final long ONE_MB = 1L << 20; + private static final long FILE_SIZE = 100 * ONE_MB; + private static final long CLEANER_BYTES_INTERVAL = 100 * ONE_MB; + private static final int DATA_SIZE = 1024; + private static final int ACTIVE_FILES = 20; + private static final int TOTAL_FILES = 30; + private static final int RECORDS = + (int) ((ACTIVE_FILES * FILE_SIZE) / DATA_SIZE); + private static final long MAX_DISK = TOTAL_FILES * FILE_SIZE; + private static final long FREE_DISK = 0; + private static final long HA_TIMEOUT_MS = 2000; + + /* + * With disk violations, use a very small data set. With a larger data + * set, there is no guarantee that all records will be updated and + * cleaned, making the use of disk space difficult to predict. + */ + private static final int VIOLATIONS_MODE_RECORDS = 1000; + + private boolean withViolations; + private long runStopTime; + private String homeDir; + private int nodes; + private int updateThreads; + private int cleanerThreads; + private int minutes; + private int records; + private volatile boolean stopFlag = false; + private final AtomicReference unexpectedEx = + new AtomicReference<>(null); + private final UpdateThread[] threads; + private RepEnvInfo[] repEnvInfo; + private Database masterDb; + private long lastStatTime; + private long lastOperations; + private long lastExceptions; + private boolean allowDiskLimitEx; + private boolean allowDurabilityEx; + private boolean expectDiskLimitEx; + private boolean expectDurabilityEx; + + public static void main(final String[] args) { + try { + printArgs(args); + final DiskLimitStress test = new DiskLimitStress(args); + test.runTest(); + System.out.println("SUCCESS"); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(-1); + } + } + + private static void printArgs(String[] args) { + System.out.print("Command line args:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + private DiskLimitStress(String[] args) + throws IOException { + + homeDir = "tmp"; + nodes = 1; + updateThreads = 4; + cleanerThreads = 2; + minutes = 10; + withViolations = false; + + for (int i = 0; i < args.length; i += 1) { + final String arg = args[i]; + final boolean moreArgs = i < args.length - 1; + if (arg.equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (arg.equals("-violations") && moreArgs) { + withViolations = Boolean.parseBoolean(args[++i]); + } else if (arg.equals("-nodes") && moreArgs) { + nodes = Integer.parseInt(args[++i]); + } else if (arg.equals("-threads") && moreArgs) { + updateThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-cleaners") && moreArgs) { + cleanerThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-minutes") && moreArgs) { + minutes= Integer.parseInt(args[++i]); + } else { + throw new IllegalArgumentException("Unknown arg: " + arg); + } + } + + threads = new UpdateThread[updateThreads]; + + records = withViolations ? VIOLATIONS_MODE_RECORDS : RECORDS; + + System.out.println( + "Resolved args:" + + " homeDir=" + homeDir + + " nodes=" + nodes + + " updateThreads=" + updateThreads + + " cleanerThreads=" + cleanerThreads + + " totalRecords=" + records + + " minutes=" + minutes); + } + + private void start(final boolean smallRepTimeouts) + throws IOException { + + final EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setMaxDisk(MAX_DISK); + envConfig.setSharedCache(true); + envConfig.setCacheMode(CacheMode.EVICT_LN); + envConfig.setCachePercent(70); + + envConfig.setConfigParam( + EnvironmentConfig.FREE_DISK, + String.valueOf(FREE_DISK)); + + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_THREADS, + String.valueOf(cleanerThreads)); + + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_BYTES_INTERVAL, + String.valueOf(CLEANER_BYTES_INTERVAL)); + + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(FILE_SIZE)); + + /* The verifier slows down the test and causes timeouts. */ + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_VERIFIER, "false"); + + final ReplicationConfig repConfig = new ReplicationConfig(); + + /* + * Use small timeouts to promptly cause InsufficientAcksException + * and InsufficientReplicasException when replicas have a disk limit + * violation, and to promptly restore normal write operations when + * the violation is cleared. + */ + if (smallRepTimeouts) { + final String timeout = String.valueOf(HA_TIMEOUT_MS) + " ms"; + repConfig.setConfigParam( + ReplicationConfig.FEEDER_TIMEOUT, timeout); + repConfig.setConfigParam( + ReplicationConfig.REPLICA_TIMEOUT, timeout); + repConfig.setConfigParam( + ReplicationConfig.REPLICA_ACK_TIMEOUT, timeout); + repConfig.setConfigParam( + ReplicationConfig.INSUFFICIENT_REPLICAS_TIMEOUT, timeout); + } + + SharedTestUtils.cleanUpTestDir(new File(homeDir)); + + repEnvInfo = RepTestUtils.setupEnvInfos( + new File(homeDir), nodes, envConfig, repConfig); + + final Environment masterEnv = RepTestUtils.joinGroup(repEnvInfo); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + + masterDb = masterEnv.openDatabase(null, "foo", dbConfig); + + final int keysPerThread = (records / updateThreads) + 1; + int startKey = 0; + for (int i = 0; i < updateThreads; i += 1) { + threads[i] = new UpdateThread(keysPerThread, startKey); + startKey += keysPerThread; + } + + stopFlag = false; + + for (final Thread t : threads) { + t.start(); + } + } + + private void stop() + throws Throwable { + + if (unexpectedEx.get() != null) { + throw unexpectedEx.get(); + } + + stopFlag = true; + + for (final Thread t : threads) { + t.join(10 * 1000); + if (t.isAlive()) { + t.interrupt(); + t.join(10 * 1000); + if (t.isAlive()) { + throw new RuntimeException( + "Thread " + t.getName() + " still running"); + } + } + } + + masterDb.close(); + + /* Tolerate DiskLimitException to simplify test. Close master last. */ + for (int i = nodes - 1; i >= 0; i -= 1) { + try { + repEnvInfo[i].closeEnv(); + } catch (DiskLimitException expected) { + /* Do nothing. */ + } + } + } + + private void runTest() throws Throwable { + + lastStatTime = System.currentTimeMillis(); + long durationMs = minutes * 60 * 1000; + + if (withViolations) { + if (nodes > 1) { + durationMs /= 3; + } + + runStopTime = System.currentTimeMillis() + durationMs; + start(false /*smallRepTimeouts*/); + runWithViolationsOnMaster(); + stop(); + + if (nodes > 1) { + runStopTime += durationMs; + start(false /*smallRepTimeouts*/); + runWithViolationsOnOneReplica(); + stop(); + + runStopTime += durationMs; + start(true /*smallRepTimeouts*/); + runWithViolationsOnAllReplicas(); + stop(); + } + } else { + runStopTime = System.currentTimeMillis() + durationMs; + start(false /*smallRepTimeouts*/); + runWithNoViolations(); + stop(); + } + } + + private void setDiskLimitViolation(final RepEnvInfo info) { + setMaxDisk(info, 1); + } + + private void clearDiskLimitViolation(final RepEnvInfo info) { + setMaxDisk(info, MAX_DISK); + } + + private void setMaxDisk(final RepEnvInfo info, final long size) { + + final Environment env = info.getEnv(); + + env.setMutableConfig(env.getMutableConfig().setMaxDisk(size)); + + info.getRepImpl().getCleaner().manageDiskUsage(); + } + + private void runWithNoViolations() + throws Throwable { + + while (!stopFlag) { + + runAndCheckExceptions( + System.currentTimeMillis() + 5000); + + refreshStats(); + } + } + + private void runWithViolationsOnMaster() + throws Throwable { + + /* Time to run before lowering MAX_DISK to cause a violation. */ + final long steadyStateMs = 5 * 1000; + + /* Time to wait for lowering MAX_DISK to take effect. */ + final long setViolationMs = 3 * 1000; + + /* Time to wait for restoring MAX_DISK to take effect. */ + final long clearViolationMs = 3 * 1000; + + allowDurabilityEx = false; + expectDurabilityEx = false; + + while (!stopFlag) { + + System.out.println("Steady state with no violations"); + + allowDiskLimitEx = false; + expectDiskLimitEx = false; + + runAndCheckExceptions( + System.currentTimeMillis() + steadyStateMs); + + cleanLog(); + + System.out.println("Start violations on MASTER"); + + setDiskLimitViolation(repEnvInfo[0]); + + allowDiskLimitEx = true; + expectDiskLimitEx = true; + + runAndCheckExceptions( + System.currentTimeMillis() + setViolationMs); + + System.out.println("Clear violations on MASTER"); + + clearDiskLimitViolation(repEnvInfo[0]); + + allowDiskLimitEx = true; + expectDiskLimitEx = false; + + runAndCheckExceptions( + System.currentTimeMillis() + clearViolationMs); + + for (final UpdateThread t : threads) { + t.clearExceptions(); + } + } + } + + private void runWithViolationsOnOneReplica() + throws Throwable { + + /* Time to run before lowering MAX_DISK to cause a violation. */ + final long steadyStateMs = 3 * HA_TIMEOUT_MS; + + /* + * Time to wait for lowering MAX_DISK to take effect. + * For replicas we should to wait long enough to cause + * InsufficientReplicasException as well as InsufficientAcksException. + */ + final long setViolationMs = 3 * HA_TIMEOUT_MS; + + /* Time to wait for restoring MAX_DISK to take effect. */ + final long clearViolationMs = 5 * HA_TIMEOUT_MS; + + allowDiskLimitEx = false; + expectDiskLimitEx = false; + + allowDurabilityEx = false; + expectDurabilityEx = false; + + while (!stopFlag) { + + System.out.println("Steady state with no violations"); + + runAndCheckExceptions( + System.currentTimeMillis() + steadyStateMs); + + cleanLog(); + + System.out.println("Start violations on ONE_REPLICA"); + + setDiskLimitViolation(repEnvInfo[1]); + + runAndCheckExceptions( + System.currentTimeMillis() + setViolationMs); + + System.out.println("Clear violations on ONE_REPLICA"); + + clearDiskLimitViolation(repEnvInfo[1]); + + runAndCheckExceptions( + System.currentTimeMillis() + clearViolationMs); + + for (final UpdateThread t : threads) { + t.clearExceptions(); + } + } + } + + private void runWithViolationsOnAllReplicas() + throws Throwable { + + /* Time to run before lowering MAX_DISK to cause a violation. */ + final long steadyStateMs = 3 * HA_TIMEOUT_MS; + + /* + * Time to wait for lowering MAX_DISK to take effect. + * For replicas we should to wait long enough to cause + * InsufficientReplicasException as well as InsufficientAcksException. + */ + final long setViolationMs = 3 * HA_TIMEOUT_MS; + + /* + * Time to wait for restoring MAX_DISK to take effect. + * For replicas this takes longer because the connection has + * to be reestablished. + */ + final long clearViolationMs = 5 * HA_TIMEOUT_MS; + + allowDiskLimitEx = false; + expectDiskLimitEx = false; + + while (!stopFlag) { + + System.out.println("Steady state with no violations"); + + allowDurabilityEx = false; + expectDurabilityEx = false; + + runAndCheckExceptions( + System.currentTimeMillis() + steadyStateMs); + + cleanLog(); + + System.out.println("Start violations on ALL_REPLICAS"); + + for (int i = 1; i < repEnvInfo.length; i += 1) { + setDiskLimitViolation(repEnvInfo[i]); + } + + allowDurabilityEx = true; + expectDurabilityEx = true; + + runAndCheckExceptions( + System.currentTimeMillis() + setViolationMs); + + System.out.println("Clear violations on ALL_REPLICAS"); + + for (int i = 1; i < repEnvInfo.length; i += 1) { + clearDiskLimitViolation(repEnvInfo[i]); + } + + allowDurabilityEx = true; + expectDurabilityEx = false; + + runAndCheckExceptions( + System.currentTimeMillis() + clearViolationMs); + + for (final UpdateThread t : threads) { + t.clearExceptions(); + } + } + } + + private void runAndCheckExceptions(final long stopTime) + throws Throwable { + + if (stopTime > runStopTime) { + stopFlag = true; + return; + } + + while (!stopFlag && System.currentTimeMillis() < stopTime) { + + Thread.sleep(1000); + + for (final UpdateThread t : threads) { + + if (!allowDiskLimitEx && t.diskLimitEx != null) { + throw new IllegalStateException(t.diskLimitEx); + } + + if (!allowDurabilityEx && t.durabilityEx != null) { + throw new IllegalStateException(t.durabilityEx); + } + } + } + + if (stopFlag) { + return; + } + + refreshStats(); + + for (final UpdateThread t : threads) { + + if (expectDiskLimitEx && t.diskLimitEx == null) { + throw new IllegalStateException( + "Expected DiskLimitException"); + } + + if (expectDurabilityEx && t.durabilityEx == null) { + throw new IllegalStateException( + "Expected InsufficientAcksException or " + + "InsufficientReplicasException"); + } + } + + if (expectDiskLimitEx || expectDurabilityEx) { + assertTrue(lastExceptions > 0); + } + + if (!allowDiskLimitEx && !allowDurabilityEx) { + assertEquals(0, lastExceptions); + assertTrue(lastOperations > 0); + } + } + + /** + * During steady state, when we know there are no violations, give + * cleaner/checkpointer a chance to run to completion. + */ + private void cleanLog() { + for (final RepEnvInfo info : repEnvInfo) { + final Environment env = info.getEnv(); + env.cleanLog(); + env.checkpoint(new CheckpointConfig().setForce(true)); + } + } + + @SuppressWarnings("unused") + private void dumpThreads() { + final EnvironmentImpl envImpl = repEnvInfo[0].getRepImpl(); + + LoggerUtils.fullThreadDump( + envImpl.getLogger(), envImpl, Level.SEVERE); + } + + private void refreshStats() { + + final long curTime = System.currentTimeMillis(); + final long statMs = curTime - lastStatTime; + + if (statMs < 1000) { + return; + } + + long ops = 0; + long exceptions = 0; + + for (final UpdateThread t : threads) { + ops += t.getOps(); + exceptions += t.getExceptions(); + } + + lastStatTime = curTime; + lastOperations = ops; + lastExceptions = exceptions; + + final long statSec = statMs / 1000; + final long opsPerSec = ops / statSec; + final long exceptionsPerSec = exceptions / statSec; + + System.out.format( + "Ops: %,d Exc: %,d Ops/s: %,d Exc/s: %,d %n", + ops, exceptions, opsPerSec, exceptionsPerSec); + +// System.out.println(envImpl.getCleaner().getDiskLimitMessage()); +// final EnvironmentStats stats = masterEnv.getStats(null); + } + + class UpdateThread extends Thread { + + private final int keysPerThread; + private final int startKey; + private volatile int ops = 0; + private volatile int exceptions = 0; + volatile DiskLimitException diskLimitEx; + volatile OperationFailureException durabilityEx; + + UpdateThread(final int keysPerThread, final int startKey) { + this.keysPerThread = keysPerThread; + this.startKey = startKey; + } + + int getOps() { + final int ret = ops; + ops = 0; + return ret; + } + + int getExceptions() { + final int ret = exceptions; + exceptions = 0; + return ret; + } + + void clearExceptions() { + diskLimitEx = null; + durabilityEx = null; + } + + @Override + public void run() { + + try { + final DatabaseEntry key = new DatabaseEntry(); + + final DatabaseEntry data = + new DatabaseEntry(new byte[DATA_SIZE]); + + while (!stopFlag) { + for (int i = startKey; + i < (startKey + keysPerThread) && !stopFlag; + i += 1) { + + IntegerBinding.intToEntry(i, key); + + try { + final OperationResult result = masterDb.put( + null, key, data, Put.OVERWRITE, null); + if (result == null) { + throw new IllegalStateException("put failed"); + } + ops += 1; + } catch (DiskLimitException e) { + diskLimitEx = e; + exceptions += 1; + } catch (InsufficientAcksException| + InsufficientReplicasException e) { + durabilityEx = e; + exceptions += 1; + } + } + } + } catch (Throwable e) { + e.printStackTrace(System.out); + unexpectedEx.compareAndSet(null, e); + stopFlag = true; + } + } + } +} diff --git a/test/standalone/EnvSharedCache.java b/test/standalone/EnvSharedCache.java new file mode 100644 index 0000000..c5a3d5a --- /dev/null +++ b/test/standalone/EnvSharedCache.java @@ -0,0 +1,978 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Properties; + +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentStats; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.utilint.JVMSystemUtils; + +/** + * Typical usage: + * # Initialize the DBs + * java EnvSharedCache -h HOME -initonly + * + * # Run updates with two classes of worker threads (different cache size) + * java EnvSharedCache -h HOME -shared -cachetest -txns 1000000 + */ +public class EnvSharedCache implements Runnable { + + private static final int INSERT = 1; + private static final int UPDATE = 2; + private static final int SELECT = 3; + private static boolean verbose = false; + private static boolean debug = false; + private static boolean openTest = false; + private static boolean cacheTest = false; + private static boolean sharedTest = false; + private static boolean evenTest = false; + private static boolean initOnly = false; + private static String delimiter = System.getProperty("file.separator"); + private static String homeDirPrefix = "db"; + private static StringBuilder inputArgs = new StringBuilder(); + private static int nEnvs = 4; + private static int nThreadsPerEnv = 4; + private static int nMaxKeys = 1000000; + private static int subDir = 0; + private static int nMaxTransactions = 100000; + private static float nCacheMissThreshold = 0.5f; + private static float nCacheSizeThreshold = 0.40f; + private static float nThruputThreshold = 0.5f; + private Environment[] envs; + private Database[] dbs; + private EnvironmentStats[] envStats; + private SecureRandom random = new SecureRandom(); + private boolean isSharedCacheRun = false; + private int keySize = 10; + private int dataSize = 100; + private int nRecordsPerThread = 0; + private int nDeletesPerThread = 0; + private int nInitEnvs = 0; + private int nInitThreadsPerEnv = 0; + private int nTransactions[][]; + private int nInserts[][]; + private int nUpdates[][]; + private int nDeletes[][]; + private int nSelects[][]; + private int nReadsPerWrite = 10; + private float nThroughput = 0.0f; + private long nElapsedTime[][]; + + public static void main(String args[]) { + try { + /* Parse command-line input arguments. */ + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + boolean moreArgs = i < args.length - 1; + if (arg.equals("-v")) { + verbose = true; + } else if (arg.equals("-d")) { + debug = true; + } else if (arg.equals("-initonly")) { + initOnly = true; + } else if (arg.equals("-opentest")) { + openTest = true; + } else if (arg.equals("-cachetest")) { + cacheTest = true; + } else if (arg.equals("-eventest")) { + evenTest = true; + } else if (arg.equals("-h") && moreArgs) { + homeDirPrefix = args[++i] + delimiter + homeDirPrefix; + } else if (arg.equals("-shared")) { + sharedTest = true; + } else if (arg.equals("-envs") && moreArgs) { + nEnvs = Integer.parseInt(args[++i]); + } else if (arg.equals("-keys") && moreArgs) { + nMaxKeys = Integer.parseInt(args[++i]); + } else if (arg.equals("-txns") && moreArgs) { + nMaxTransactions = Integer.parseInt(args[++i]); + } else if (arg.equals("-threads") && moreArgs) { + nThreadsPerEnv = Integer.parseInt(args[++i]); + } else if (arg.equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else if (arg.equals("-help")) { + usage(null); + System.exit(0); + } else { + usage("Unknown arg: " + arg); + System.exit(1); + } + } + /* Save command-line input arguments. */ + for (String s : args) { + inputArgs.append(" " + s); + } + System.out.println("\nCommand-line input arguments:\n " + + inputArgs); + /* + * If -shared flag is specified, compare EnvironmentStats + * between shareCache and nonSharedCache runs to judge + * whether environment shared cache test passes/fails. + */ + if (sharedTest) { + EnvSharedCache nonSharedCacheRun = new EnvSharedCache(); + nonSharedCacheRun.setSharedCacheRun(false); + + EnvSharedCache sharedCacheRun = new EnvSharedCache(); + sharedCacheRun.setSharedCacheRun(true); + + System.out.println("Starting non-sharedCache test..."); + nonSharedCacheRun.startTest(); + System.out.println("\nStarting sharedCache test..."); + sharedCacheRun.startTest(); + /* Compare stats to judge test passes/fails. */ + if (!verifyResults(nonSharedCacheRun, sharedCacheRun)) { + /* Failed to meet test criteria, exit with error. */ + System.exit(1); + } + } else { + new EnvSharedCache().startTest(); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * Print the usage. + */ + private static void usage(String msg) { + String usageStr; + if (msg != null) { + System.err.println(msg); + } + usageStr = "Usage: java EnvSharedCache\n" + + " [-v] [-d] [-h ]\n" + + " [-envs ]\n" + + " [-threads ]\n" + + " [-keys ] [-initonly]\n\n" + + "Usage: java EnvSharedCache\n" + + " [-v] [-d] [-h ]\n" + + " [-envs ]\n" + + " [-threads ]\n" + + " [-txns ]\n" + + " [-cachetest [-shared] [-opentest] [-eventest]]"; + System.err.println(usageStr); + } + + /** + * Compare results between non-shared and shared cache run. + */ + public static boolean verifyResults(EnvSharedCache nonSharedCache, + EnvSharedCache sharedCache) { + EnvironmentStats nonSharedStatsArray[] = nonSharedCache.getEnvStats(); + EnvironmentStats sharedStatsArray[] = sharedCache.getEnvStats(); + boolean thruputCheck = false; + boolean cacheMissCheck = false; + boolean cacheSizeCheck = false; + boolean overallCheck = true; + System.out.println + ("\n\n " + + "Multi-Env SharedCache Test Summary Report At: " + + new java.util.Date()); + System.out.println + (" Non-Shared Shared Pass/Fail"); + System.out.println + (" ---------- ---------- ----------"); + /* Check to see if throughput meet the given threshold. */ + if (evenTest) { + thruputCheck = + (Math.abs(sharedCache.nThroughput - nonSharedCache.nThroughput) + / nonSharedCache.nThroughput) + <= nThruputThreshold; + overallCheck &= thruputCheck; + } + System.out.printf + ("Throughput(%.2f): %10.2f %10.2f %10S%n", + nThruputThreshold, + nonSharedCache.nThroughput, + sharedCache.nThroughput, + (evenTest ? (thruputCheck ? "PASS" : "FAIL") : "N/A")); + for (int i = 0; i < nEnvs; i++) { + EnvironmentStats nonSharedStats = nonSharedStatsArray[i]; + EnvironmentStats sharedStats = sharedStatsArray[i]; + System.out.printf("Env(%d)\n", i); + /* + * Check if the regular worker's NCacheMiss variation meet + * the given threshold. This check doesn't make sense + * to smallCache workers. + */ + if ((!openTest) && (!evenTest) && ((i % 2) != 1)) { + cacheMissCheck = sharedStats.getNCacheMiss() + <= (nonSharedStats.getNCacheMiss() * nCacheMissThreshold); + } else { + cacheMissCheck = true; + } + overallCheck &= cacheMissCheck; + System.out.printf + (" NCacheMiss(%.2f):%10d %10d %10S\n", + nCacheMissThreshold, + nonSharedStats.getNCacheMiss(), + sharedStats.getNCacheMiss(), + (!openTest) && (!evenTest) + ? (cacheMissCheck ? "PASS" : "FAIL") + : "N/A"); + /* For eventest, check CacheDataBytes to see if within 25%. */ + if (evenTest) { + cacheSizeCheck = + ((float) Math.abs(sharedStats.getDataBytes() + - nonSharedStats.getDataBytes()) + / nonSharedStats.getDataBytes()) + <= nCacheSizeThreshold; + overallCheck &= cacheSizeCheck; + } + System.out.printf + (" DataBytes(%.2f):%10d %10d %10S\n", + nCacheSizeThreshold, + nonSharedStats.getDataBytes(), + sharedStats.getDataBytes(), + (evenTest ? (cacheSizeCheck ? "PASS" : "FAIL") : "N/A")); + System.out.printf + (" NLogBuffers:%10d %10d\n", + nonSharedStats.getNLogBuffers(), + sharedStats.getNLogBuffers()); + System.out.printf + (" LogBuffersBytes:%10d %10d\n", + nonSharedStats.getBufferBytes(), + sharedStats.getBufferBytes()); + System.out.printf + (" CacheTotalBytes:%10d %10d\n", + nonSharedStats.getCacheTotalBytes(), + sharedStats.getCacheTotalBytes()); + System.out.printf + (" NNotResident:%10d %10d\n", + nonSharedStats.getNNotResident(), + sharedStats.getNNotResident()); + System.out.printf + (" NSharedCacheEnv:%10d %10d\n", + nonSharedStats.getNSharedCacheEnvironments(), + sharedStats.getNSharedCacheEnvironments()); + System.out.printf + (" SCacheTotalBytes:%10d %10d\n", + nonSharedStats.getSharedCacheTotalBytes(), + sharedStats.getSharedCacheTotalBytes()); + } + System.out.print("\nThe run is: " + (sharedTest ? "-shared " : "") + + (openTest ? "-opentest " : "") + + (evenTest ? "-eventest " : "") + + "\nThe run is considered as: " + + (overallCheck ? "PASS" : "FAIL") + "\n"); + return overallCheck; + } + + /** + * Set the isSharedCacheRun flag. + */ + private void setSharedCacheRun(boolean flag) { + isSharedCacheRun = flag; + } + + /** + * Get the envStats. + */ + private EnvironmentStats[] getEnvStats() { + return envStats; + } + + /** + * Precheck if database files exist before starting the run. + */ + private boolean validateHomeDir() { + for (int i = 0; i < nEnvs; i++) { + File f = new File(homeDirPrefix + i); + if (f.isDirectory()) { + continue; + } else if (initOnly) { + f.mkdirs(); + } else { + return false; + } + } + return true; + } + + private void startTest() throws Exception { + + if (!validateHomeDir()) { + System.err.println("ERROR: Invalid HomeDirPrefix!" + + " Please specify a valid HomeDirPrefix parameter" + + " that points to your *.jdb files."); + System.exit(1); + } + /* Read properties from ${DB0}/run.properties file. */ + File file = new File(homeDirPrefix + "0" + + System.getProperty("file.separator") + "run.properties"); + Properties prop = new Properties(); + if (file.exists()) { + FileInputStream in = new FileInputStream(file); + prop.load(in); + nRecordsPerThread = + Integer.parseInt(prop.getProperty("RecordsPerThread")); + nDeletesPerThread = + Integer.parseInt(prop.getProperty("DeletesPerThread")); + nInitEnvs = + Integer.parseInt(prop.getProperty("InitEnvs")); + nInitThreadsPerEnv = + Integer.parseInt(prop.getProperty("InitThreadsPerEnv")); + in.close(); + } + if (initOnly) { + nInitEnvs = nEnvs; + nInitThreadsPerEnv = nThreadsPerEnv; + } else if (nInitEnvs > 0 && nEnvs > nInitEnvs) { + System.out.println("Warning: The number of environments" + + " specified here is beyond the value of environments" + + " when last initiating databases.\nAuto adjust to" + + " last initiating value: " + nInitEnvs); + } else if (nInitThreadsPerEnv > 0 + && nThreadsPerEnv > nInitThreadsPerEnv) { + System.out.println("Warning: The number of threads specified" + + " here is beyond the value of threads when last" + + " initiating databases.\nAuto adjust to last" + + " initiating value: " + nInitThreadsPerEnv); + nThreadsPerEnv = nInitThreadsPerEnv; + } + + envs = new Environment[nEnvs]; + dbs = new Database[nEnvs]; + envStats = new EnvironmentStats[nEnvs]; + nInserts = new int[nEnvs][nThreadsPerEnv]; + nUpdates = new int[nEnvs][nThreadsPerEnv]; + nDeletes = new int[nEnvs][nThreadsPerEnv]; + nSelects = new int[nEnvs][nThreadsPerEnv]; + nTransactions = new int[nEnvs][nThreadsPerEnv]; + nElapsedTime = new long[nEnvs][nThreadsPerEnv]; + + /* + * Initialize the Environments and open the Databases. For + * open/close test, we initialize with each transaction in the + * thread main loop. + */ + if (!openTest) { + for (int i = 0; i < nEnvs; i++) { + envs[i] = openEnv(i); + dbs[i] = openDB(envs[i], i); + } + } + + /* Create the workers and initialize operation counters. */ + Thread[][] threads = new Thread[nEnvs][nThreadsPerEnv]; + for (int i = 0; i < nEnvs; i++) { + for (int j = 0; j < nThreadsPerEnv; j++) { + nInserts[i][j] = 0; + nUpdates[i][j] = 0; + nDeletes[i][j] = 0; + nSelects[i][j] = 0; + nTransactions[i][j] = 0; + threads[i][j] = + new Thread(this, Integer.toString(i * nThreadsPerEnv + j)); + threads[i][j].start(); + Thread.sleep(100); + } + } + + /* Wait until threads finished. */ + for (int i = 0; i < nEnvs; i++) { + for (int j = 0; j < nThreadsPerEnv; j++) { + if (threads[i][j] != null) { + threads[i][j].join(); + } + } + } + + if (!openTest) { + for (int i = 0; i < nEnvs; i++) { + /* Put EnvironmentStats objects into arrays before closing. */ + envStats[i] = getStats(envs[i], i); + } + + for (int i = 0; i < nEnvs; i++) { + closeEnv(envs[i], dbs[i]); + } + } + + /* Calculate elapsed time, transactions and throughput. */ + int transactions = 0; + long timeMillis = 0; + float elapsedSecs = 0.0f; + float throughput = 0.0f; + for (int i = 0; i < nEnvs; i++) { + int inserts = 0, updates = 0, deletes = 0, selects = 0; + for (int j = 0; j < nThreadsPerEnv; j++) { + inserts += nInserts[i][j]; + updates += nUpdates[i][j]; + deletes += nDeletes[i][j]; + selects += nSelects[i][j]; + transactions += nTransactions[i][j]; + timeMillis += nElapsedTime[i][j]; + elapsedSecs = (float) nElapsedTime[i][j] / 1000; + throughput = (float) nTransactions[i][j] / elapsedSecs; + if (verbose) { + System.out.printf("%nENV(%d) Thread %d " + + " Running time: %.2f secs Transactions: %d" + + " Throughput: %.2f txns/sec", i, j, elapsedSecs, + nTransactions[i][j], throughput); + } + } + if (verbose) { + System.out.println("\nENV(" + i + "): " + inserts + " inserts " + + updates + " updates " + deletes + " deletes " + + selects + " selects "); + } + } + elapsedSecs = (float) timeMillis / (nEnvs * nThreadsPerEnv * 1000); + throughput = (float) transactions / elapsedSecs; + nThroughput = throughput; + System.out.printf("%nAverage elapsed time: %.2f secs" + + " Transactions: %d Throughput: %.2f txns/sec%n", + elapsedSecs, transactions, throughput); + + /* Create/Update ${DB0}/run.properties file. */ + FileOutputStream out = new FileOutputStream(file); + prop.setProperty("RecordsPerThread", Integer.toString(nRecordsPerThread + + nInserts[0][0] - nDeletes[0][0])); + prop.setProperty("DeletesPerThread", Integer.toString(nDeletesPerThread + + nDeletes[0][0])); + prop.setProperty("InitEnvs", Integer.toString(nInitEnvs)); + prop.setProperty("InitThreadsPerEnv", + Integer.toString(nInitThreadsPerEnv)); + prop.store(out, "EnvSharedCache test runtime properties." + + " Please don't update/remove this file."); + out.close(); + } + + /** + * Print and return the cache related stats for the env. + */ + private EnvironmentStats getStats(Environment env, int envId) + throws Exception { + + assert (env != null) : "getStats: Null env pointer"; + + StatsConfig statsConfig = new StatsConfig(); + statsConfig.setFast(true); + statsConfig.setClear(true); + EnvironmentStats stats = env.getStats(statsConfig); + return stats; + } + + /** + * Open an Environment. + */ + private Environment openEnv(int i) throws Exception { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setAllowCreate(true); + + /* + * When using Zing JDK, the object may occupy more 40% percent + * memory than Oracle JDk. When the cache size is 10M, it is + * enough for caching most of the records, but it is not enough + * for Zing JDK. So for Zing JDK, we increase the cache size. + */ + int factor = 1; + if (JVMSystemUtils.ZING_JVM) { + factor = 2; + } + if (isSharedCacheRun) { + envConfig.setCacheSize(10000000 * nEnvs * factor); + envConfig.setSharedCache(true); + } else { + envConfig.setCacheSize(10000000 * factor); + envConfig.setSharedCache(false); + } + + /* + * Because the evictor has multiple LRU lists per LRUSet, the accuracy + * of the LRU varies too much to be predictable in this test, + * especially due to outliers on some machines. Use a single LRU list + * per LRUSet. + */ + envConfig.setConfigParam( + EnvironmentConfig.EVICTOR_N_LRU_LISTS, "1"); + + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + Utils.createSubDirs(new File(homeDirPrefix + i), subDir, true); + } + + Environment env = new Environment(new File(homeDirPrefix + i), + envConfig); + return env; + } + + /** + * Open a Database. + */ + private Database openDB(Environment env, int i) throws Exception { + + assert (env != null) : "openDB: Null env pointer"; + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + return env.openDatabase(null, "db" + i, dbConfig); + } + + /** + * Close the Database and Environment. + */ + private void closeEnv(Environment env, Database db) + throws DatabaseException { + + assert (db != null) : "closeEnv: Null db pointer"; + assert (env != null) : "closeEnv: Null env pointer"; + + db.close(); + env.close(); + } + + /** + * Generate the data. + */ + private void makeData(DatabaseEntry data) { + + assert (data != null) : "makeData: Null data pointer"; + + byte[] bytes = new byte[dataSize]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) i; + } + data.setData(bytes); + } + + /** + * Generate the random data. + */ + private void makeRandomData(DatabaseEntry data) { + + assert (data != null) : "makeRandomData: Null data pointer"; + + byte[] bytes = new byte[dataSize]; + random.nextBytes(bytes); + data.setData(bytes); + } + + /** + * Return a copy of the byte array in data. + */ + private byte[] copyData(DatabaseEntry data) { + + assert (data != null) : "copyData: Null data pointer"; + + byte[] buf = new byte[data.getSize()]; + System.arraycopy(data.getData(), data.getOffset(), buf, 0, buf.length); + return buf; + } + + /** + * Return a copy of the byte array in data starting at the offset. + */ + private byte[] copyData(DatabaseEntry data, int offset) { + + assert (data != null) : "copyData: Null data pointer"; + + byte[] buf = new byte[data.getSize() - offset]; + System.arraycopy(data.getData(), data.getOffset() + offset, + buf, 0, buf.length); + return buf; + } + + /** + * Generate the insert key with a prefix string. + */ + private void makeInsertKey(Cursor c, + DatabaseEntry key, + String keyPrefix, + boolean smallCache) { + + assert (c != null) : "makeInsertKey: Null cursor pointer"; + assert (key != null) : "makeInsertKey: Null key pointer"; + assert (keyPrefix != null) : "makeInsertKey: Null keyPrefix pointer"; + + String buf = keyPrefix; + int num; + if (key.getData() != null) { + num = Integer.parseInt + (new String(copyData(key, keyPrefix.length()))); + num++; + } else { + /* + * For regular working set, we define: + * deletion always occurs at the first database record, + * and insertion always appends to the last record, + * search randomly between the first and last. + */ + if (smallCache) { + num = nRecordsPerThread; + } else { + num = nRecordsPerThread + nDeletesPerThread; + } + } + buf += Integer.toString(num); + key.setData(buf.getBytes()); + } + + /** + * Insert a record. + */ + private void insert(Cursor c, + DatabaseEntry key, + DatabaseEntry data, + String keyPrefix, + boolean smallCache) throws DatabaseException { + + assert (c != null) : "insert: Null cursor pointer"; + assert (key != null) : "insert: Null key pointer"; + assert (data != null) : "insert: Null data pointer"; + + makeData(data); + boolean done = false; + while (!done) { + /* + * Generate a key that is prefixed with the thread name so each + * thread is working on its own data set to reduce deadlocks. + */ + makeInsertKey(c, key, keyPrefix, smallCache); + OperationStatus status = c.putNoOverwrite(key, data); + if (status == OperationStatus.KEYEXIST) { + System.out.println("Duplicate key."); + } else { + if (status != OperationStatus.SUCCESS) { + System.out.println("Unexpected insert error: " + status); + } + done = true; + } + } + } + + /** + * Generate the search key with a prefix string. + */ + private void makeSearchKey(Cursor c, + DatabaseEntry key, + String keyPrefix, + boolean smallCache, + int offset) { + + assert (c != null) : "makeSearchKey: Null cursor pointer"; + assert (key != null) : "makeSearchKey: Null key pointer"; + assert (keyPrefix != null) : "makeSearchKey: Null keyPrefix pointer"; + + String buf = keyPrefix; + int num; + if (smallCache) { + num = offset; + } else { + /* + * For regular working set, we create the random search key + * between the current "beginning" and "end" of database records. + */ + num = random.nextInt(nRecordsPerThread) + nDeletesPerThread + + offset; + } + buf += Integer.toString(num); + key.setData(buf.getBytes()); + } + + public void run() { + Environment env = null; + Database db = null; + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + DatabaseEntry searchKey = new DatabaseEntry(); + DatabaseEntry searchData = new DatabaseEntry(); + boolean done = false; + boolean smallCache = false; + byte[] lastInsertKey = null; + Transaction txn = null; + Cursor c = null; + int nKeys = 0; + OperationStatus status; + + String threadName = Thread.currentThread().getName(); + int envId = Integer.parseInt(threadName) / nThreadsPerEnv; + int threadId = Integer.parseInt(threadName) % nThreadsPerEnv; + String keyPrefix = threadId + "-"; + + if (verbose) { + System.out.println("Thread " + threadId + " started on ENV(" + + envId + ")"); + } + + /* Initialize with start time. */ + nElapsedTime[envId][threadId] = System.currentTimeMillis(); + + /* + * If it is not evenTest (even work load on each env), to test cache + * utilization efficiency, we create two classes of users. One set + * will simply insert, update, and delete the same record repeatedly + * and the other set will have a larger working set. + * The former will use very little cache and will result in waste + * in non-shared cache case. + */ + smallCache = (!evenTest) & ((envId % 2) == 1); + + if (!openTest) { + env = envs[envId]; + db = dbs[envId]; + } + + while (!done) { + try { + /* Test the env open/close */ + if (openTest) { + env = openEnv(envId); + db = openDB(env, envId); + } + + txn = env.beginTransaction(null, null); + c = db.openCursor(txn, null); + + if (initOnly && nKeys < nMaxKeys) { + insert(c, key, data, keyPrefix, smallCache); + checkCorrectness(INSERT, key, data, keyPrefix, smallCache, + nKeys); + nKeys++; + nInserts[envId][threadId]++; + } + + if (!initOnly) { + /* Insert */ + if (smallCache) { + /* + * Set key to null, so every time + * it will insert the same key. + */ + key.setData(null); + } + insert(c, key, data, keyPrefix, smallCache); + if (smallCache) { + checkCorrectness(INSERT, key, data, keyPrefix, + smallCache, nRecordsPerThread); + } else { + checkCorrectness(INSERT, key, data, keyPrefix, + smallCache, + (nRecordsPerThread + nDeletesPerThread + + nInserts[envId][threadId])); + } + lastInsertKey = copyData(key); + nInserts[envId][threadId]++; + /* Update */ + if (smallCache) { + searchKey.setData(lastInsertKey); + } else { + makeSearchKey(c, searchKey, keyPrefix, smallCache, + nDeletes[envId][threadId]); + } + status = c.getSearchKeyRange(searchKey, searchData, + LockMode.DEFAULT); + if (status == OperationStatus.SUCCESS) { + makeRandomData(data); + status = c.putCurrent(data); + if (status == OperationStatus.SUCCESS) { + c.getSearchKey(searchKey, searchData, + LockMode.DEFAULT); + if (smallCache) { + checkCorrectness(UPDATE, searchKey, searchData, + keyPrefix, smallCache, + nRecordsPerThread); + } else { + checkCorrectness(UPDATE, searchKey, searchData, + keyPrefix, smallCache, + nDeletes[envId][threadId]); + } + nUpdates[envId][threadId]++; + } + /* Delete */ + if (!smallCache) { + String buf = keyPrefix + + Integer.toString(nDeletesPerThread + + nDeletes[envId][threadId]); + searchKey.setData(buf.getBytes()); + status = c.getSearchKey(searchKey, searchData, + LockMode.DEFAULT); + } + if (status == OperationStatus.SUCCESS) { + status = c.delete(); + if (status == OperationStatus.SUCCESS) { + status = c.getSearchKey(searchKey, searchData, + LockMode.DEFAULT); + /* + * Delete correctness check: only checks if + * the record still exists. + */ + if (status != OperationStatus.NOTFOUND) { + System.err.println + ("DELETE Correctness Check Failed: " + + "key/data pair still exists after " + + "deletion."); + System.exit(1); + } + nDeletes[envId][threadId]++; + } + } + } + /* Read */ + if (nReadsPerWrite > 0) { + int i; + for (i = 0; i < nReadsPerWrite; i++) { + if (smallCache) { + makeSearchKey(c, searchKey, keyPrefix, + smallCache, i); + c.getSearchKey(searchKey, searchData, + LockMode.DEFAULT); + checkCorrectness(SELECT, searchKey, searchData, + keyPrefix, smallCache, i); + } else { + makeSearchKey(c, searchKey, keyPrefix, + smallCache, nDeletes[envId][threadId]); + c.getSearchKey(searchKey, searchData, + LockMode.DEFAULT); + checkCorrectness(SELECT, searchKey, searchData, + keyPrefix, smallCache, + nDeletes[envId][threadId]); + } + + /* + * Call Thread.yield() to try to eliminate the + * possible unfair-thread-scheduling issue which + * may cause the throughput cache failure. + */ + Thread.yield(); + } + nSelects[envId][threadId] += i; + } + } + c.close(); + txn.commit(); + nTransactions[envId][threadId]++; + if (initOnly) { + if (nKeys >= nMaxKeys) { + done = true; + } + } else if (nMaxTransactions != 0 + && nTransactions[envId][threadId] >= nMaxTransactions) { + done = true; + } + if (done && openTest && (threadId == (nThreadsPerEnv - 1))) { + envStats[envId] = getStats(env, envId); + } + if (openTest) { + closeEnv(env, db); + } + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } // End of while loop. + + /* Calculate elapsed time. */ + nElapsedTime[envId][threadId] = System.currentTimeMillis() + - nElapsedTime[envId][threadId]; + if (verbose) { + System.out.println("Thread " + threadId + " finished on ENV(" + + envId + ")"); + } + } + + /** + * Operation correctness check. + */ + private void checkCorrectness(int operationType, + DatabaseEntry key, + DatabaseEntry data, + String keyPrefix, + boolean smallCache, + int checkNum) { + + assert (key != null) : "checkCorrectness: Null key pointer"; + assert (keyPrefix != null) : "checkCorrectness: Null keyPrefix pointer"; + + String s = new String(key.getData()); + int num = Integer.parseInt(s.substring(s.indexOf("-") + 1)); + DatabaseEntry d = new DatabaseEntry(); + makeData(d); + if (operationType == INSERT) { + if (num != checkNum) { + System.err.println("INSERT Correctness Check Failed: " + + "key value: " + s + " doesn't match checkNum: " + + checkNum + "."); + System.exit(1); + } + } else if (operationType == UPDATE) { + if (smallCache && (num != checkNum)) { + System.err.println("UPDATE Correctness Check Failed: " + + "key value " + s + " doesn't match checkNum " + + checkNum + "."); + System.exit(1); + } else if (!smallCache) { + if (num < checkNum) { + System.err.println("UPDATE Correctness Check Failed: " + + "key value should be larger than " + + checkNum + "."); + System.exit(1); + } else if (num + > (nRecordsPerThread + nDeletesPerThread + checkNum)) { + System.err.println("UPDATE Correctness Check Failed: " + + "key value should be smaller than " + + (nRecordsPerThread + nDeletesPerThread + checkNum) + + "."); + System.exit(1); + } + } else if (Arrays.equals(data.getData(), d.getData())) { + System.err.println("UPDATE Correctness Check Failed: " + + "data value doesn't change."); + System.exit(1); + } + } else if (operationType == SELECT) { + if (smallCache && num != checkNum) { + System.err.println("SELECT Correctness Check Failed: " + + "key value: " + s + " doesn't match checkNum: " + + checkNum + "."); + System.exit(1); + } else if (!smallCache) { + if (num < checkNum) { + System.err.println("SELECT Correctness Check Failed: " + + "key value should be larger than " + + checkNum + "."); + System.exit(1); + } else if (num + > (nRecordsPerThread + nDeletesPerThread + checkNum)) { + System.err.println("SELECT Correctness Check Failed: " + + "key value should be smaller than " + + (nRecordsPerThread + nDeletesPerThread + checkNum) + + "."); + System.exit(1); + } + } + } + } +} diff --git a/test/standalone/FailoverTest.java b/test/standalone/FailoverTest.java new file mode 100644 index 0000000..cd016c2 --- /dev/null +++ b/test/standalone/FailoverTest.java @@ -0,0 +1,561 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicatedEnvironment; +import com.sleepycat.je.rep.ReplicationConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; +import com.sleepycat.je.utilint.LoggerUtils; +import com.sleepycat.je.utilint.TracerFormatter; +import com.sleepycat.je.utilint.VLSN; +import com.sleepycat.persist.EntityStore; +import com.sleepycat.persist.PrimaryIndex; + +/** + * Exercises replica-only, master-only, and replica/master hybrid failover. + */ +public class FailoverTest { + private static final boolean verbose = Boolean.getBoolean("verbose"); + + private static final TracerFormatter dateFormat = new TracerFormatter(); + + /* Time to wait for each node to syncup to the master. */ + private static final long NODE_SYNC_MS = 60 * 1000; + + private RepEnvInfo[] repEnvInfo; + + /* -------------------Configurable params----------------*/ + /* Environment home root for whole replication group. */ + private File envRoot; + /* Replication group size. */ + private int nNodes = 5; + /* Database size. */ + private int dbSize = 2000; + /* Steady state will finish after doing this number of transactions. */ + private int steadyTxns = 60000; + + /* + * Specify the JE log file size and checkpoint bytes to provoke log + * cleaning. + */ + private long logFileSize = 409600; + private long checkpointBytes = 1000000; + + /* + * More than enough disk space to hold the data set and allow cleaning. + * Note that repeated syncups can prevent file deletion and more space + * may be needed for outlier cases. + */ + private long maxDisk = 500 * 1000000; + + /* Select a new master after doing this number of operations. */ + private int txnsPerRound = 100; + private static final int opsPerTxn = 20; + + /* Cycle through the array nodes when provoking failovers. */ + private int roundRobinIdx; + + /* A value to use to create new records. */ + private int nextVal = dbSize + 1; + + /* Used for generating random keys. */ + private final Random random = new Random(); + + /* Determines whether master, replica, or hybrid failover is tested. */ + private FailoverAgent failoverAgent; + + private int subDir = 0; + private boolean offHeap = false; + + private Logger logger = + LoggerUtils.getLoggerFixedPrefix(getClass(), "FailoverTest"); + + private FailoverTest() { + } + + public static void main(String args[]) { + try { + FailoverTest test = new FailoverTest(); + test.parseArgs(args); + test.doRampup(); + test.doSteadyState(); + if (verbose) { + System.err.println("Test done"); + } + } catch (Throwable t) { + t.printStackTrace(System.err); + System.exit(1); + } + } + + /** + * Grow the data store to the appropriate size for the steady state + * portion of the test. + */ + private void doRampup() + throws Exception { + + /* + * Clean up from previous runs. This test will not succeed if there is + * anything in the test environments. + */ + RepTestUtils.removeRepEnvironments(envRoot); + + final long mainCacheSize; + final long offHeapCacheSize; + + if (offHeap) { + mainCacheSize = nNodes * 10 * 1024 * 1024; + offHeapCacheSize = 100 * 1024 * 1024; + } else { + mainCacheSize = 0; + offHeapCacheSize = 0; + } + + EnvironmentConfig envConfig = Utils.createEnvConfig( + logFileSize, maxDisk, checkpointBytes, subDir, + mainCacheSize, offHeapCacheSize); + + /* + * We have a lot of environments open in a single process, so reduce + * the cache size lest we run out of file descriptors. + */ + envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_CACHE_SIZE, "30"); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig); + + /* Increase timeout to avoid InsufficientAcksException. */ + for (RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setReplicaAckTimeout(50, TimeUnit.SECONDS); + } + + if (subDir > 0) { + RepTestUtils.createRepSubDirs(repEnvInfo, subDir); + } + + ReplicatedEnvironment master = Utils.getMaster(repEnvInfo); + EntityStore store = Utils.openStore(master, Utils.DB_NAME); + RepTestData.insertData(store, dbSize); + Utils.doSyncAndCheck(repEnvInfo); + } + + /** + * Execute a steady stream of work until the steady state phase is over. + */ + private void doSteadyState() + throws Exception { + + if (verbose) { + System.err.println("Steady state starting"); + } + + int round = 0; + failoverAgent.init(); + + while (true) { + round++; + if (verbose) { + System.err.println ("Round " + round); + } + + /* + * If doWork returns false, it means the steady state stage is + * over. In this loop, the master stays up. The replicas are + * killed in a round robin fashion. No need to sync up between + * rounds; avoiding the syncup and check of data makes for more + * variation. + */ + if (!oneRoundWorkAndFailover(round)) { + if (verbose) { + System.err.println("Steady state is over, ending test."); + } + break; + } + } + + /* + * Shutdown all nodes and sync them up using DbSync. Then check + * for node equality. + */ + ReplicatedEnvironment master = Utils.getMaster(repEnvInfo); + VLSN lastTxnEnd = RepInternal.getNonNullRepImpl(master).getVLSNIndex(). + getRange().getLastTxnEnd(); + for (RepEnvInfo info : repEnvInfo) { + info.closeEnv(); + } + + /* + * Run DbSync for each node. This is done concurrently in a thread per + * node, but the syncups are competing for resources, so the time to + * syncup and gain consistency for one node could in the worst case be + * as long as the time to do it serially for all nodes. + */ + long maxTimeoutMs = NODE_SYNC_MS * nNodes; + String maxTimeoutStr = String.valueOf(maxTimeoutMs) + " ms"; + for (RepEnvInfo info : repEnvInfo) { + info.getRepConfig().setConfigParam( + ReplicationConfig.ENV_CONSISTENCY_TIMEOUT, maxTimeoutStr); + info.getRepConfig().setConfigParam( + ReplicationConfig.ENV_SETUP_TIMEOUT, maxTimeoutStr); + } + RepTestUtils.syncupGroup(maxTimeoutMs, repEnvInfo); + + /* Re-open them to check node equality. */ + for (RepEnvInfo info : repEnvInfo) { + info.getEnvConfig().setConfigParam("je.env.runCleaner", "false"); + info.getEnvConfig().setConfigParam("je.env.runINCompressor", + "false"); + } + + RepTestUtils.restartGroup(repEnvInfo); + RepTestUtils.checkNodeEquality(lastTxnEnd, verbose, repEnvInfo); + RepTestUtils.checkUtilizationProfile(repEnvInfo); + + for (RepEnvInfo info : repEnvInfo) { + info.closeEnv(); + } + + /* Use DbSpace to check utilization. */ + } + + /** + * One round of work: the master executes updates. The replicas are killed + * off in the middle of a round, so that they die on a non-commit boundary, + * and need to do some rollback. + * + * @return false if the rampup stage has finished. + */ + private boolean oneRoundWorkAndFailover(int round) + throws Exception { + + boolean runAble = true; + + // Utils.getMaster re-opens the whole group. Maybe we should try + // letting the killed nodes stay down longer. + + ReplicatedEnvironment master = Utils.getMaster(repEnvInfo); + for (RepEnvInfo rep : repEnvInfo) { + logger.info( + RepInternal.getNonNullRepImpl(rep.getEnv()).dumpState()); + } + + EntityStore dbStore = Utils.openStore(master, Utils.DB_NAME); + PrimaryIndex primaryIndex = + dbStore.getPrimaryIndex(Integer.class, RepTestData.class); + + for (int whichTxn = 0; whichTxn < txnsPerRound; whichTxn++) { + Transaction txn = master.beginTransaction(null, null); + for (int i = 0; i < opsPerTxn; i++) { + + /* Do a random update here. */ + int key = random.nextInt(dbSize); + RepTestData data = new RepTestData(); + data.setKey(key); + data.setData(nextVal++); + data.setName(RepTestData.generateNameField(key)); + primaryIndex.put(txn, data); + } + + /* + * Kill some set of nodes once during this round, in the middle + * of a transaction. + */ + if (failoverAgent.killOnIteration(whichTxn)) { + roundRobinIdx = failoverAgent.shutdownNodes(round, + whichTxn, + roundRobinIdx); + } + + if (master.isValid()) { + /* + * This txn may be invalid if the master was killed by the + * failover agent. + */ + txn.commit(); + } + + /* Check whether the steady stage should break. */ + if (--steadyTxns == 0) { + runAble = false; + break; + } + } + + try { + dbStore.close(); + } catch (RuntimeException e) { + if (master.isValid()) { + throw e; + } + } + + return runAble; + } + + private void setFailoverAgent(String modeValue) + throws IllegalArgumentException { + + if (modeValue.equalsIgnoreCase("replica")) { + failoverAgent = new ReplicaFailover(); + } else if (modeValue.equalsIgnoreCase("master")) { + failoverAgent = new MasterFailover(); + } else if (modeValue.equalsIgnoreCase("hybrid")) { + failoverAgent = new HybridFailover(); + } else { + throw new IllegalArgumentException + (modeValue + + " is not a legal value for -mode. " + + " Only master | replica | hybrid is accepted."); + } + } + + private void parseArgs(String args[]) + throws Exception { + + for (int i = 0; i < args.length; i++) { + boolean moreArgs = i < args.length - 1; + if (args[i].equals("-h") && moreArgs) { + envRoot = new File(args[++i]); + } else if (args[i].equals("-repGroupSize") && moreArgs) { + nNodes = Integer.parseInt(args[++i]); + } else if (args[i].equals("-dbSize") && moreArgs) { + dbSize = Integer.parseInt(args[++i]); + } else if (args[i].equals("-logFileSize") && moreArgs) { + logFileSize = Long.parseLong(args[++i]); + } else if (args[i].equals("-maxDisk") && moreArgs) { + maxDisk = Long.parseLong(args[++i]); + } else if (args[i].equals("-steadyTxns") && moreArgs) { + steadyTxns = Integer.parseInt(args[++i]); + } else if (args[i].equals("-txnsPerRound") && moreArgs) { + txnsPerRound = Integer.parseInt(args[++i]); + } else if (args[i].equals("-checkpointBytes") && moreArgs) { + checkpointBytes = Long.parseLong(args[++i]); + } else if (args[i].equals("-mode") && moreArgs) { + setFailoverAgent(args[++i]); + } else if (args[i].equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else if (args[i].equals("-offheap") && moreArgs) { + offHeap = Boolean.parseBoolean(args[++i]); + } else { + usage("Unknown arg: " + args[i]); + } + } + + if (nNodes <= 2) { + throw new IllegalArgumentException + ("Replication group size should > 2!"); + } + + if (steadyTxns < txnsPerRound) { + throw new IllegalArgumentException + ("steadyTxns should be larger than txnsPerRound!"); + } + + if (failoverAgent == null) { + usage("-mode must be specified"); + } + } + + private void usage(String error) { + if (error != null) { + System.err.println(error); + } + + System.err.println + ("java " + getClass().getName() + "\n" + + " [-h ]\n" + + " [-mode \n" + + " [-repGroupSize ]\n" + + " [-dbSize ]\n" + + " [-logFileSize ]\n" + + " [-checkpointBytes ]\n" + + " [-steadyTxns ]\n" + + " [-txnsPerRound ]\n" + + " [-forceCheckpoint ]\n"); + System.exit(2); + } + + public static void main(String args[]) { + try { + ReplicationCleaning test = new ReplicationCleaning(); + test.parseArgs(args); + test.doRampup(); + test.doSteadyState(); + } catch (Throwable t) { + t.printStackTrace(System.err); + System.exit(1); + } + } +} diff --git a/test/standalone/TTLStress.java b/test/standalone/TTLStress.java new file mode 100644 index 0000000..2a2d867 --- /dev/null +++ b/test/standalone/TTLStress.java @@ -0,0 +1,829 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.text.DateFormat; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import com.sleepycat.bind.tuple.LongBinding; +import com.sleepycat.bind.tuple.TupleBase; +import com.sleepycat.bind.tuple.TupleInput; +import com.sleepycat.bind.tuple.TupleOutput; +import com.sleepycat.je.CacheMode; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.Get; +import com.sleepycat.je.OperationResult; +import com.sleepycat.je.Put; +import com.sleepycat.je.SecondaryConfig; +import com.sleepycat.je.SecondaryCursor; +import com.sleepycat.je.SecondaryDatabase; +import com.sleepycat.je.SecondaryKeyCreator; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.test.SpeedyTTLTime; +import com.sleepycat.je.utilint.TracerFormatter; + +/** + * Tests concurrent access when using TTL. + * + * Goals: + * - Read records that are expiring. + * - Delete records that are expiring. + * - Update TTL for records that are expiring, including changing TTL to zero. + * - Lock a record multiple times per txn. + * - Read key then data in separate ops. + * - Use secondaries that expire. + * - Read primary and secondary records individually in separate ops. + * - Verify expected data. + * - Tolerate only expected deviant behavior, only on expiration boundaries. + */ +public class TTLStress { + + private static final WriteOptions ONE_HOUR_TTL = + new WriteOptions().setTTL(1, TimeUnit.HOURS).setUpdateTTL(true); + + /* Must be at least long enough to empty the full queue. */ + private static final int TERMINATION_SEC = 10 * 60; + + /* Since a TTL of 1 hour is used, this is the TTL in millis. */ + private static final int FAKE_MILLIS_PER_HOUR = 100; + + /* + * Must be at least the time for a thread to wake up and do an insert. The + * insert and a read operation for that record are queued one after the + * other and may both be assigned to threads at about the same time. + */ + private static final int THREAD_SWITCH_TIME = 5000; + + /* + * The read operation expects to read the record, and then for it to + * expire, all before EXPIRATION_MAX_MILLIS. + */ + private static final int EXPIRATION_MAX_MILLIS = + (FAKE_MILLIS_PER_HOUR * 2) + THREAD_SWITCH_TIME; + + private static final DateFormat DATE_FORMAT = + TracerFormatter.makeDateFormat(); + + private static final int DEFAULT_TEST_THREADS = 10; + private static final int DEFAULT_CLEANER_THREADS = 2; + private static final int DEFAULT_DURATION_MINUTES = 30; + private static final int DEFAULT_MAIN_CACHE_MB = 200; + private static final int DEFAULT_OFFHEAP_CACHE_MB = 200; + private static final int DEFAULT_QUEUE_SIZE = 1000; + private static final String DEFAULT_HOME_DIR = "tmp"; + + public static void main(final String[] args) { + try { + printArgs(args); + final TTLStress test = new TTLStress(args); + test.runTest(); + test.close(); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(-1); + } + } + + private Environment env; + private Database db; + private SecondaryDatabase secDb; + private int durationMinutes = DEFAULT_DURATION_MINUTES; + private ThreadPoolExecutor executor; + private final SpeedyTTLTime speedyTime = + new SpeedyTTLTime(FAKE_MILLIS_PER_HOUR); + private final AtomicInteger nInserts = new AtomicInteger(0); + private final AtomicInteger nUpdates = new AtomicInteger(0); + private final AtomicInteger nDeletions = new AtomicInteger(0); + private final AtomicInteger nDeleteExpired = new AtomicInteger(0); + private final AtomicInteger nPriReads = new AtomicInteger(0); + private final AtomicInteger nPriExpired = new AtomicInteger(0); + private final AtomicInteger nPriExpiredData = new AtomicInteger(0); + private final AtomicInteger nPriDeleted = new AtomicInteger(0); + private final AtomicInteger nPriNotFound = new AtomicInteger(0); + private final AtomicInteger nSecReads = new AtomicInteger(0); + private final AtomicInteger nSecExpired = new AtomicInteger(0); + private final AtomicInteger nSecExpiredData = new AtomicInteger(0); + private final AtomicInteger nSecDeleted = new AtomicInteger(0); + private final AtomicInteger nSecNotFound = new AtomicInteger(0); + + private TTLStress(String[] args) { + + int nTestThreads = DEFAULT_TEST_THREADS; + int queueSize = DEFAULT_QUEUE_SIZE; + int nCleanerThreads = DEFAULT_CLEANER_THREADS; + int mainCacheMb = DEFAULT_MAIN_CACHE_MB; + int offheapCacheMb = DEFAULT_OFFHEAP_CACHE_MB; + String homeDir = DEFAULT_HOME_DIR; + + /* Parse arguments. */ + for (int i = 0; i < args.length; i += 1) { + final String arg = args[i]; + final boolean moreArgs = i < args.length - 1; + if (arg.equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (arg.equals("-threads") && moreArgs) { + nTestThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-cleaners") && moreArgs) { + nCleanerThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-minutes") && moreArgs) { + durationMinutes = Integer.parseInt(args[++i]); + } else if (arg.equals("-cacheMB") && moreArgs) { + mainCacheMb = Integer.parseInt(args[++i]); + } else if (arg.equals("-offheapMB") && moreArgs) { + offheapCacheMb = Integer.parseInt(args[++i]); + } else if (arg.equals("-queueSize") && moreArgs) { + queueSize = Integer.parseInt(args[++i]); + } else { + throw new IllegalArgumentException("Unknown arg: " + arg); + } + } + + executor = new ThreadPoolExecutor( + nTestThreads, nTestThreads, + 0L, TimeUnit.SECONDS, + new ArrayBlockingQueue(queueSize), + new ThreadPoolExecutor.AbortPolicy()); + + executor.prestartAllCoreThreads(); + + open(homeDir, nCleanerThreads, mainCacheMb, offheapCacheMb); + } + + private static void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + private void open(final String homeDir, + final int nCleanerThreads, + final int mainCacheMb, + final int offheapCacheMb) { + + final EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + envConfig.setDurability(Durability.COMMIT_NO_SYNC); + envConfig.setConfigParam( + EnvironmentConfig.LOG_FILE_MAX, + String.valueOf(1024 * 1024)); + envConfig.setConfigParam( + EnvironmentConfig.CLEANER_THREADS, + String.valueOf(nCleanerThreads)); + envConfig.setCacheSize(mainCacheMb * (1024 * 1024)); + envConfig.setOffHeapCacheSize(offheapCacheMb * (1024 * 1024)); + /* Account for very slow test machines. */ + envConfig.setLockTimeout(30, TimeUnit.SECONDS); + + env = new Environment(new File(homeDir), envConfig); + + final DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setCacheMode(CacheMode.EVICT_LN); + + db = env.openDatabase(null, "priDb", dbConfig); + + final SecondaryConfig secConfig = new SecondaryConfig(); + secConfig.setAllowCreate(true); + secConfig.setTransactional(true); + secConfig.setCacheMode(CacheMode.EVICT_LN); + secConfig.setSortedDuplicates(true); + secConfig.setKeyCreator(new SecondaryKeyCreator() { + @Override + public boolean createSecondaryKey(final SecondaryDatabase secDb, + final DatabaseEntry key, + final DatabaseEntry data, + final DatabaseEntry result) { + result.setData(key.getData()); + return true; + } + }); + + secDb = env.openSecondaryDatabase(null, "secDb", db, secConfig); + } + + private void close() throws InterruptedException { + + log("Starting shutdown"); + + executor.shutdown(); + + if (!executor.awaitTermination(TERMINATION_SEC, TimeUnit.SECONDS)) { + + System.out.println( + "Could not terminate gracefully after " + + TERMINATION_SEC + " seconds"); + + final List stillRunning = executor.shutdownNow(); + + if (!stillRunning.isEmpty()) { + + System.out.println( + "Did not empty queue during close after " + + TERMINATION_SEC + " seconds, " + stillRunning.size() + + " tasks still running."); + + System.exit(1); + } + } + + secDb.close(); + db.close(); + env.close(); + + log(String.format( + "Test succeeded %n" + + "nInserts: %,d %n" + + "nUpdates: %,d %n" + + "nDeletions: %,d %n" + + "nPriReads: %,d %n" + + "nPriExpired: %,d %n" + + "nPriExpiredData: %,d %n" + + "nPriDeleted: %,d %n" + + "nPriNotFound: %,d %n" + + "nSecReads: %,d %n" + + "nSecExpired: %,d %n" + + "nSecExpiredData: %,d %n" + + "nSecDeleted: %,d %n" + + "nSecNotFound: %,d %n" + + "nDeleteExpired: %,d ", + nInserts.get(), + nUpdates.get(), + nDeletions.get(), + nPriReads.get(), + nPriExpired.get(), + nPriExpiredData.get(), + nPriDeleted.get(), + nPriNotFound.get(), + nSecReads.get(), + nSecExpired.get(), + nSecExpiredData.get(), + nSecDeleted.get(), + nSecNotFound.get(), + nDeleteExpired.get())); + } + + private static void log(final String msg) { + synchronized (DATE_FORMAT) { + System.out.println( + DATE_FORMAT.format(System.currentTimeMillis()) + " " + msg); + } + } + + private void runTest() throws Throwable { + + final long endTime = + System.currentTimeMillis() + (durationMinutes * 60 * 1000); + + final Random rnd = new Random(123); + final BlockingQueue queue = executor.getQueue(); + + speedyTime.start(); + + while (System.currentTimeMillis() < endTime) { + + final boolean doDelete; + final boolean doUpdate; + int nOps = 3; // insert, read primary and secondary + + switch (rnd.nextInt(3)) { + case 0: + doDelete = false; + doUpdate = false; + break; + case 1: + doDelete = false; + doUpdate = true; + nOps += 1; + break; + case 2: + doDelete = true; + doUpdate = false; + nOps += 1; + break; + default: + throw new RuntimeException(); + } + + if (queue.remainingCapacity() < nOps) { + continue; + } + + final long key = rnd.nextLong(); + final AtomicBoolean abandonOp = new AtomicBoolean(false); + final AtomicLong expirationTime = new AtomicLong(0); + + executor.execute(new Write( + key, abandonOp, expirationTime, !doUpdate /*doInsert*/)); + + executor.execute(new PrimaryRead( + key, abandonOp, expirationTime, doDelete)); + + executor.execute(new SecondaryRead( + key, abandonOp, expirationTime, doDelete, doUpdate)); + + if (doUpdate) { + executor.execute(new Write( + key, abandonOp, expirationTime, false /*doInsert*/)); + } + + if (doDelete) { + executor.execute(new Delete( + key, abandonOp, expirationTime)); + } + } + } + + private class Write implements Runnable { + + private static final boolean DEBUG_WRITE = false; + + private final long key; + private final AtomicBoolean abandonOp; + private final AtomicLong expirationTime; + private final boolean doInsert; + + Write(final long key, + final AtomicBoolean abandonOp, + final AtomicLong expirationTime, + final boolean doInsert) { + this.key = key; + this.abandonOp = abandonOp; + this.doInsert = doInsert; + this.expirationTime = expirationTime; + } + + @Override + public void run() { + + Thread.currentThread().setName("Write"); + + try { + final DatabaseEntry keyEntry = new DatabaseEntry(); + LongBinding.longToEntry(key, keyEntry); + + final boolean useSeparateLN = (key & 1) != 0; + final byte[] dataBytes = new byte[useSeparateLN ? 20 : 8]; + final DatabaseEntry dataEntry = new DatabaseEntry(); + + TupleBase.outputToEntry( + new TupleOutput(dataBytes).writeLong(key), + dataEntry); + + final Transaction txn = env.beginTransaction(null, null); + + final long opBeforeTime = + speedyTime.realTimeToFakeTime(System.currentTimeMillis()); + + OperationResult result = db.put( + txn, keyEntry, dataEntry, + doInsert ? Put.NO_OVERWRITE : Put.OVERWRITE, + ONE_HOUR_TTL); + + final long opAfterTime = + speedyTime.realTimeToFakeTime(System.currentTimeMillis()); + + if (result == null && doInsert) { + log("Apparent duplicate random number as key: " + key); + txn.abort(); + abandonOp.set(true); + return; + } + + assertNotNull("Could not write: " + key, result); + + final long opBeforeExpTime = + writeTimeToExpirationTime(opBeforeTime); + + final long opAfterExpTime = + writeTimeToExpirationTime(opAfterTime); + + final long resultExpTime = result.getExpirationTime(); + boolean resultExpTimeMatches = false; + + for (long time = opBeforeExpTime; + time <= opAfterExpTime; + time += TTL.MILLIS_PER_HOUR) { + + if (resultExpTime == time) { + resultExpTimeMatches = true; + break; + } + } + + if (!resultExpTimeMatches) { + fail( + "key: " + key + + " opBeforeTime: " + + formatTime(opBeforeTime) + + " opAfterTime: " + + formatTime(opAfterTime) + + " opBeforeExpTime: " + + formatTime(opBeforeExpTime) + + " opAfterExpTime: " + + formatTime(opAfterExpTime) + + " resultExpTime: " + + formatTime(resultExpTime)); + } + + expirationTime.set(resultExpTime); + + if (DEBUG_WRITE) { + log( + (doInsert ? "Inserted " : "Updated ") + + "key: " + key + + " opBeforeTime: " + + formatTime(opBeforeTime) + + " opAfterTime: " + + formatTime(opAfterTime) + + " resultExpTime: " + + formatTime(resultExpTime)); + + result = db.get( + txn, keyEntry, dataEntry, Get.SEARCH, null); + + assertNotNull("Could not read: " + key, result); + + assertEquals( + resultExpTime, result.getExpirationTime()); + } + + txn.commit(); + + if (doInsert) { + nInserts.incrementAndGet(); + } else { + nUpdates.incrementAndGet(); + } + + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + } + + private class Delete implements Runnable { + + private final long key; + private final AtomicBoolean abandonOp; + private final AtomicLong expirationTime; + + Delete(final long key, + final AtomicBoolean abandonOp, + final AtomicLong expirationTime) { + this.key = key; + this.abandonOp = abandonOp; + this.expirationTime = expirationTime; + } + + @Override + public void run() { + + Thread.currentThread().setName("Delete"); + + final long startTime = System.currentTimeMillis(); + final DatabaseEntry keyEntry = new DatabaseEntry(); + LongBinding.longToEntry(key, keyEntry); + + try { + while (true) { + + if (abandonOp.get()) { + return; + } + + final long sysTime = System.currentTimeMillis(); + final long opTime = speedyTime.realTimeToFakeTime(sysTime); + + if (sysTime - startTime > EXPIRATION_MAX_MILLIS) { + + if (TTL.isExpired(expirationTime.get())) { + nDeleteExpired.incrementAndGet(); + + } else { + fail("Did not expire: " + key + + " opTime: " + formatTime(opTime) + + " expirationTime: " + + formatTime(expirationTime.get())); + } + break; + } + + final OperationResult result = db.delete( + null, keyEntry, null); + + if (result == null) { + + if (TTL.isExpired(expirationTime.get())) { + nDeleteExpired.incrementAndGet(); + return; + } + + continue; + } + + assertEquals( + expirationTime.get(), result.getExpirationTime()); + + nDeletions.incrementAndGet(); + + return; + } + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + } + + private class PrimaryRead implements Runnable { + + private final long key; + private final AtomicBoolean abandonOp; + private final AtomicLong expirationTime; + private final boolean doDelete; + + PrimaryRead(final long key, + final AtomicBoolean abandonOp, + final AtomicLong expirationTime, + final boolean doDelete) { + this.key = key; + this.abandonOp = abandonOp; + this.expirationTime = expirationTime; + this.doDelete = doDelete; + } + + @Override + public void run() { + + Thread.currentThread().setName("PrimaryRead"); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final long startTime = System.currentTimeMillis(); + OperationResult result; + + while (true) { + + if (abandonOp.get()) { + return; + } + + try (final Cursor cursor = db.openCursor(null, null)) { + + final long sysTime = System.currentTimeMillis(); + final long opTime = speedyTime.realTimeToFakeTime(sysTime); + + if (sysTime - startTime > EXPIRATION_MAX_MILLIS) { + + if (TTL.isExpired(expirationTime.get())) { + nPriExpired.incrementAndGet(); + + } else if (doDelete) { + nPriDeleted.incrementAndGet(); + + } else { + fail("Did not expire: " + key + + " opTime: " + formatTime(opTime) + + " expirationTime: " + + formatTime(expirationTime.get())); + } + break; + } + + nPriReads.incrementAndGet(); + + LongBinding.longToEntry(key, keyEntry); + final boolean readDataSeparately = (key % 3) == 0; + + result = cursor.get( + keyEntry, + readDataSeparately ? null : dataEntry, + Get.SEARCH, null); + + if (result == null) { + if (TTL.isExpired(expirationTime.get())) { + nPriExpired.incrementAndGet(); + break; + } + continue; + } + + assertEquals(key, LongBinding.entryToLong(keyEntry)); + assertEquals( + expirationTime.get(), result.getExpirationTime()); + + if (!readDataSeparately) { + final TupleInput input = + TupleBase.entryToInput(dataEntry); + assertEquals(key, input.readLong()); + } + + result = cursor.get( + keyEntry, dataEntry, Get.CURRENT, null); + + if (result == null) { + if (readDataSeparately && + TTL.isExpired(expirationTime.get())) { + nPriExpiredData.incrementAndGet(); + break; + } + fail("Could not read locked record: " + key); + } + + assertEquals(key, LongBinding.entryToLong(keyEntry)); + final TupleInput input = TupleBase.entryToInput(dataEntry); + assertEquals(key, input.readLong()); + assertEquals( + expirationTime.get(), result.getExpirationTime()); + + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + } + } + + private class SecondaryRead implements Runnable { + + private final long key; + private final AtomicBoolean abandonOp; + private final AtomicLong expirationTime; + private final boolean doDelete; + private final boolean doUpdate; + + SecondaryRead(final long key, + final AtomicBoolean abandonOp, + final AtomicLong expirationTime, + final boolean doDelete, + final boolean doUpdate) { + this.key = key; + this.abandonOp = abandonOp; + this.expirationTime = expirationTime; + this.doDelete = doDelete; + this.doUpdate = doUpdate; + } + + @Override + public void run() { + + Thread.currentThread().setName("SecondaryRead"); + + final DatabaseEntry keyEntry = new DatabaseEntry(); + final DatabaseEntry pKeyEntry = new DatabaseEntry(); + final DatabaseEntry dataEntry = new DatabaseEntry(); + final long startTime = System.currentTimeMillis(); + OperationResult result; + + while (true) { + + if (abandonOp.get()) { + return; + } + + try (final SecondaryCursor cursor = + secDb.openCursor(null, null)) { + + final long sysTime = System.currentTimeMillis(); + final long opTime = speedyTime.realTimeToFakeTime(sysTime); + + if (sysTime - startTime > EXPIRATION_MAX_MILLIS) { + + if (TTL.isExpired(expirationTime.get())) { + nSecExpired.incrementAndGet(); + + } else if (doDelete) { + nSecDeleted.incrementAndGet(); + + } else { + fail("Did not expire: " + key + + " currentTime: " + formatTime(opTime) + + " expirationTime: " + + formatTime(expirationTime.get())); + } + break; + } + + nSecReads.incrementAndGet(); + + LongBinding.longToEntry(key, keyEntry); + + /* + * If we read data separately when an update or deletion is + * being done, this would cause deadlocks. + */ + final boolean readDataSeparately = + !doUpdate && !doDelete && (key % 3) == 0; + + result = cursor.get( + keyEntry, pKeyEntry, + readDataSeparately ? null : dataEntry, + Get.SEARCH, null); + + if (result == null) { + if (TTL.isExpired(expirationTime.get())) { + nSecExpired.incrementAndGet(); + break; + } + continue; + } + + assertEquals(key, LongBinding.entryToLong(keyEntry)); + assertEquals( + expirationTime.get(), result.getExpirationTime()); + + assertTrue(Arrays.equals( + keyEntry.getData(), pKeyEntry.getData())); + + if (!readDataSeparately) { + final TupleInput input = + TupleBase.entryToInput(dataEntry); + assertEquals(key, input.readLong()); + } + + result = cursor.get( + keyEntry, pKeyEntry, dataEntry, Get.CURRENT, null); + + if (result == null) { + if (readDataSeparately && + TTL.isExpired(expirationTime.get())) { + nSecExpiredData.incrementAndGet(); + break; + } + fail("Could not read locked record: " + key + + " readDataSeparately: " + readDataSeparately); + } + + assertEquals(key, LongBinding.entryToLong(keyEntry)); + + assertTrue(Arrays.equals( + keyEntry.getData(), pKeyEntry.getData())); + + final TupleInput input = TupleBase.entryToInput(dataEntry); + assertEquals(key, input.readLong()); + assertEquals( + expirationTime.get(), result.getExpirationTime()); + + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + } + } + + private long writeTimeToExpirationTime(final long writeTime) { + + final int expiration = TTL.systemTimeToExpiration( + writeTime + TTL.MILLIS_PER_HOUR, + true /*hours*/); + + return TTL.expirationToSystemTime(expiration, true /*hours*/); + } + + private String formatTime(final long time) { + synchronized (DATE_FORMAT) { + return DATE_FORMAT.format(time); + } + } + +} diff --git a/test/standalone/TemporaryDbStress.java b/test/standalone/TemporaryDbStress.java new file mode 100644 index 0000000..51321d1 --- /dev/null +++ b/test/standalone/TemporaryDbStress.java @@ -0,0 +1,275 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.util.Random; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.bind.tuple.IntegerBinding; +import com.sleepycat.bind.tuple.StringBinding; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.LockConflictException; +import com.sleepycat.je.OperationStatus; + +/* + * A temporary database may throw a LFNF exception if it runs with high + * concurrency of Cleaner and Evictor. + * + * This test simulates such case: a small cache size, a relatively large + * database and large data value setting, so that there exists lots of eviction + * during the test, also multiple cleaner threads is enabled, so that the test + * is running with high concurrency. + * + * The UpdataThread would update all the threads in this database, and the + * test starts 4 threads, so there are lots of updates, and that would create + * even more eviction. + * + * Commonly used command lines for running this program are: + * + * java TemporaryDbStress -h HOME + */ +public class TemporaryDbStress { + + private String envHome; + private static final String DB_NAME = "testDb"; + /* Database size. */ + private int dbSize = 20000; + /* Number of updating threads. */ + private int numThreads = 4; + /* Set a large cleaner threads number. */ + private String numCleanerThreads = "4"; + + /* Set a small cache size, which is 10M. */ + private int cacheSize = 10 * 1024 * 1024; + /* Set large record value size, so that it needs more eviction. */ + private int dataSize = 2000; + private int subDir = 0; + /* Total update operations. */ + private volatile int totalOps = 50000000; + /* The data field for a value. */ + private String dataValue = ""; + + private Environment env; + private Database db; + + public static void main(String args[]) { + try { + TemporaryDbStress test = new TemporaryDbStress(); + test.parseArgs(args); + test.doWork(); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(-1); + } + } + + /* Output command-line input arguments to log. */ + private void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + private void usage(String error) { + if (error != null) { + System.err.println(error); + } + System.err.println + ("java " + getClass().getName() + '\n' + + " [-h ] [-cacheSize] [-dataSize] [-dbSize]\n" + + " [-threads ]\n" + + " [-cleanerThreads ]\n" + + " [-subDir ]\n" + + " [-totalOps]\n"); + System.exit(1); + } + + private void parseArgs(String args[]) { + try { + if (args.length == 0) { + throw new IllegalArgumentException(); + } + /* Parse command-line input arguments. */ + for (int i = 0; i < args.length; i += 1) { + String arg = args[i]; + boolean moreArgs = i < args.length - 1; + if (arg.equals("-h") && moreArgs) { + envHome = args[++i]; + } else if (arg.equals("-cacheSize") && moreArgs) { + cacheSize = Integer.parseInt(args[++i]); + } else if (arg.equals("-dataSize") && moreArgs) { + dataSize = Integer.parseInt(args[++i]); + } else if (arg.equals("-dbSize") && moreArgs) { + dbSize = Integer.parseInt(args[++i]); + } else if (arg.equals("-threads") && moreArgs) { + numThreads = Integer.parseInt(args[++i]); + } else if (arg.equals("-cleanerThreads") && moreArgs) { + numCleanerThreads = args[++i]; + } else if (arg.equals("-totalOps") && moreArgs) { + totalOps = Integer.parseInt(args[++i]); + } else if (arg.equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else { + usage("Unknown arg: " + arg); + } + } + printArgs(args); + } catch (IllegalArgumentException e) { + usage("IllegalArguments! "); + e.printStackTrace(); + System.exit(1); + } + } + + /* Do the work. */ + public void doWork() + throws Exception { + + openEnv(); + + System.out.println("Starting test....."); + + /* Insert some records first. */ + insertRecords(); + + CountDownLatch startSignal = new CountDownLatch(1); + CountDownLatch endSignal = new CountDownLatch(numThreads); + + /* Start the threads. */ + for (int i = 0; i < numThreads; i++) { + UpdateThread thread = new UpdateThread(startSignal, endSignal); + thread.start(); + } + startSignal.countDown(); + + endSignal.await(); + + System.out.println("Test finishes."); + closeEnv(); + } + + /* Open the Environment and insert some data to database. */ + private void openEnv() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setCacheSize(cacheSize); + envConfig.setConfigParam(EnvironmentConfig.CLEANER_THREADS, + numCleanerThreads); + + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + Utils.createSubDirs(new File(envHome), subDir); + } + + env = new Environment(new File(envHome), envConfig); + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTemporary(true); + db = env.openDatabase(null, DB_NAME, dbConfig); + } + + private void insertRecords() + throws Exception { + + for (int i = 1; i <= dataSize; i++) { + dataValue += "a"; + } + + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + for (int i = 0; i < dbSize; i++) { + IntegerBinding.intToEntry(i, key); + StringBinding.stringToEntry(dataValue, data); + db.put(null, key, data); + } + } + + /* Close the database and Environment. */ + private void closeEnv() + throws Exception { + + if (db != null) { + db.close(); + } + + if (env != null) { + env.close(); + } + } + + /* The updating thread on temporary database. */ + class UpdateThread extends Thread { + private final CountDownLatch start; + private final CountDownLatch end; + + public UpdateThread(CountDownLatch start, CountDownLatch end) { + this.start = start; + this.end = end; + } + + public void run() { + try { + start.await(); + + Random random = new Random(); + DatabaseEntry key = new DatabaseEntry(); + DatabaseEntry data = new DatabaseEntry(); + /* Do updates on each record in the database. */ + while (true) { + synchronized (this) { + int currentIndex = totalOps; + + if (--totalOps <= 0) { + break; + } + + IntegerBinding.intToEntry(random.nextInt(dbSize), key); + StringBinding.stringToEntry + (dataValue + currentIndex, data); + int retries = 10; + while (retries > 0) { + try { + OperationStatus status = + db.put(null, key, data); + if (status != OperationStatus.SUCCESS) { + System.err.println + ("Update a new key failed " + + currentIndex + "."); + } + break; + } catch (LockConflictException e) { + retries--; + } + } + } + } + + end.countDown(); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + } +} diff --git a/test/standalone/TxnInMultiThreadsStress.java b/test/standalone/TxnInMultiThreadsStress.java new file mode 100644 index 0000000..ad3dee0 --- /dev/null +++ b/test/standalone/TxnInMultiThreadsStress.java @@ -0,0 +1,583 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.config.EnvironmentParams; + +/** + * Transaction objects are advertised to work when used from multiple threads. + * This stress test serves to confirm sucn concurrency. + * + * This stress test simulates such a scenario: + * There are three threads. One is CRUD thread, which create a new txn object + * and processes CRUD operations within this txn. The other two threas are + * commit thread and abort thread, which commits/aborts the same txn created by + * CRUD thread. + * [#19513] + */ + +public class TxnInMultiThreadsStress { + private String homeDir = "tmp"; + private Environment env; + private Database db; + private Transaction txn; + private BlockingQueue CRUDOps; + private int preInsertNum = 5000; + private int NumOperations = 200000; + private int numThreads = 1; + private boolean commitThread = false; + private boolean abortThread = false; + private Random random = new Random(); + private int keySize = 3000; + private List threads = new ArrayList(); + private List keyEntrys = new ArrayList(); + private List txnContainer = new ArrayList(); + private List unExpectedExceptions = new ArrayList(); + private Map exceptionCollection = + new HashMap(); + + void openEnv() { + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setAllowCreate(true); + envConfig.setTransactional(true); + //envConfig.setConfigParam + //(EnvironmentParams.CLEANER_REMOVE.getName(), "false"); + envConfig.setConfigParam + (EnvironmentParams.LOCK_TIMEOUT.getName(), "1000 ms"); + try { + File envHome = new File(homeDir); + env = new Environment(envHome, envConfig); + } catch (Error e) { + e.printStackTrace(); + System.exit(1); + } + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setAllowCreate(true); + dbConfig.setTransactional(true); + dbConfig.setSortedDuplicates(true); + db = env.openDatabase(null, "testDB", dbConfig); + } + + void closeEnv() { + for (int i = 0; i < txnContainer.size(); i++) { + Transaction txn = txnContainer.get(i); + if (env.isValid()) { + txn.abort(); + } + } + try { + if (env.isValid()) { + db.close(); + } + env.close(); + } catch (Throwable e) { + e.printStackTrace(); + System.exit(1); + } + } + + public static void main(String args[]) + throws Exception { + //TxnInMultiThreadsStress app = new TxnInMultiThreadsStress(); + //app.runTest(); + + try { + new TxnInMultiThreadsStress().runTest(args); + System.exit(0); + } catch (Throwable e) { + e.printStackTrace(System.out); + System.exit(1); + } + } + + /* Output command-line input arguments to log. */ + private void printArgs(String[] args) { + System.out.print("\nCommand line arguments:"); + for (String arg : args) { + System.out.print(' '); + System.out.print(arg); + } + System.out.println(); + } + + private void usage(String error) { + + if (error != null) { + System.err.println(error); + } + System.err.println + ("java " + getClass().getName() + '\n' + + " [-h ] [-c ] [-a ] [-ops]\n" + + " [-keySize]\n"); + System.exit(1); + } + + private void runTest(String args[]) + throws Exception { + + try { + if (args.length == 0) { + throw new IllegalArgumentException(); + } + /* Parse command-line input arguments. */ + for (int i = 0; i < args.length; i += 1) { + String arg = args[i]; + boolean moreArgs = i < args.length - 1; + if (arg.equals("-c")) { + commitThread = true; + numThreads++; + } else if (arg.equals("-a")) { + abortThread = true; + numThreads++; + } else if (arg.equals("-h") && moreArgs) { + homeDir = args[++i]; + } else if (arg.equals("-ops") && moreArgs) { + NumOperations = Integer.parseInt(args[++i]); + } else if (arg.equals("-keySize") && moreArgs) { + keySize = Integer.parseInt(args[++i]); + } else { + usage("Unknown arg: " + arg); + } + } + printArgs(args); + } catch (IllegalArgumentException e) { + usage("IllegalArguments! "); + e.printStackTrace(); + System.exit(1); + } + + openEnv(); + CRUDOps = createOperations(NumOperations); + txn = env.beginTransaction(null, null); + txnContainer.add(txn); + CountDownLatch endSignal = new CountDownLatch(numThreads); + //insertData(preInsertNum); + createAndStartThreads(endSignal); + closeEnv(); + if(unExpectedExceptions.isEmpty()) { + System.out.println("Successful completion."); + } else { + for (int i = 0; i < unExpectedExceptions.size(); i++) { + unExpectedExceptions.get(i).printStackTrace(); + } + System.out.println("Test failed."); + System.exit(1); + } + } + + /* Initialize the threads that will be used to run the tests. */ + private void createAndStartThreads(CountDownLatch endSignal) { + OperationThread abort = null; + OperationThread commit = null; + if (abortThread) { + abort= new AbortThread(endSignal); + abort.setName("Abort_thread"); + threads.add(abort); + } + if (commitThread) { + commit= new CommitThread(endSignal); + commit.setName("Commit_thread"); + threads.add(commit); + } + + CRUDThread crud= new CRUDThread(endSignal); + crud.setName("CRUD_thread"); + threads.add(crud); + crud.start(); + + if (abortThread) { + abort.start(); + } + + if (commitThread) { + commit.start(); + } + + try { + endSignal.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + System.exit(1); + } + } + + private void insertData(int num) { + for (int i = 0; i < num; i++) { + DatabaseEntry key = genNewKey(); + DatabaseEntry dataEntry = new DatabaseEntry(new byte[1]); + Transaction txn = env.beginTransaction(null, null); + db.put(txn, key, dataEntry); + txn.commit(); + } + } + + private ArrayBlockingQueue createOperations(int NumOps) { + ArrayBlockingQueue ops = + new ArrayBlockingQueue(NumOperations); + CRUD.setDistribution(new int[] {40, 20, 20, 20}); + CRUD op; + for (int i = 0; i < NumOps; i++) { + op = CRUD.nextRandom(); + DatabaseEntry key; + switch (op) { + case CREATE: + key = genNewKey(); + ops.add(new Create(db, key)); + break; + case READ: + key = genExistingKey(); + ops.add(new Read(db, key)); + break; + case UPDATE: + key = genExistingKey(); + ops.add(new Update(db, key)); + break; + case DELETE: + key = genExistingKey(); + ops.add(new Delete(db, key)); + break; + default: + throw new IllegalStateException("Unknown op: " + op); + } + } + return ops; + } + + private DatabaseEntry genNewKey() { + byte[] key = new byte[keySize]; + random.nextBytes(key); + DatabaseEntry keyEntry = new DatabaseEntry(key); + keyEntrys.add(keyEntry); + return keyEntry; + } + + private DatabaseEntry genExistingKey() { + DatabaseEntry keyEntry; + if (keyEntrys.size() > 0) { + int index = random.nextInt(keyEntrys.size()); + keyEntry = keyEntrys.get(index); + } else { + keyEntry = new DatabaseEntry(new byte[keySize]); + } + return keyEntry; + } + + private DatabaseEntry genData(int length) { + return new DatabaseEntry(new byte[1]); + } + + enum CRUD { + /* The operations */ + CREATE, READ, UPDATE, DELETE; + + /* The distribution of CRUD operations -- should add up to 100% */ + private int percent; + + /* The threshold values used to guide randomization, range from 0-100 */ + private int threshold; + + final static int size = CRUD.values().length; + + private static Random opRandom = new Random(1); + + /* + * Sets the distribution that will be used for the random generation of + * CRUD operations. + */ + static void setDistribution(int distribution[]) { + if (distribution.length != size) { + throw new IllegalArgumentException + ("incorrect argument length: " + distribution.length); + } + + int threshold = 0; + int i = 0; + for (CRUD op : CRUD.values()) { + op.percent = distribution[i++]; + threshold = (op.threshold = op.percent + threshold); + } + + if (threshold != 100) { + throw new IllegalArgumentException + ("Percentage should add to 100 not " + threshold); + } + } + + /* + * Returns the next random CRUD operation based on the current distribution + * setup. + */ + static CRUD nextRandom() { + int rpercent = opRandom.nextInt(100); + for (CRUD op : CRUD.values()) { + if (rpercent < op.threshold) { + return op; + } + } + assert (false); + return null; + } + } + + abstract class CRUDOperation { + protected final Database db; + protected final DatabaseEntry key; + protected Random random = new Random(); + + public CRUDOperation(Database db, DatabaseEntry key) { + this.db=db; + this.key = key; + } + abstract OperationStatus execute(Transaction txn) + throws DatabaseException; + + + } + + /* Create */ + class Create extends CRUDOperation { + Create(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = genData(1); + if (txn.isValid()) { + return db.putNoOverwrite(txn, key, dataEntry); + } else { + return null; + } + } + } + + /* Read */ + class Read extends CRUDOperation { + Read(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = new DatabaseEntry(); + if (txn.isValid()) { + return db.get(txn, key, dataEntry, null); + } else { + return null; + } + } + } + + /* Update */ + class Update extends CRUDOperation { + Update(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + DatabaseEntry dataEntry = genData(2); + if (txn.isValid()) { + return db.put(txn, key, dataEntry); + } else { + return null; + } + } + } + + /* Delete */ + class Delete extends CRUDOperation { + Delete(Database db, DatabaseEntry key) { + super(db, key); + } + + @Override + OperationStatus execute(Transaction txn) throws DatabaseException { + if (txn.isValid()) { + return db.delete(txn, key); + } else { + return null; + } + } + } + + + abstract class OperationThread extends Thread { + Random random = new Random(); + CountDownLatch endSignal; + + public OperationThread(CountDownLatch endSignal) { + this.endSignal = endSignal; + } + + @Override + public abstract void run(); + + } + + class CRUDThread extends OperationThread { + public CRUDThread(CountDownLatch endSignal) { + super(endSignal); + } + + + @Override + public void run() { + boolean ifRun = true; + try { + while(!CRUDOps.isEmpty() && ifRun) { + try { + if (!env.isValid()) { + break; + } + if (!txn.isValid()) { + txn = env.beginTransaction(null, null); + txnContainer.add(txn); + } + CRUDOps.take().execute(txn); + } catch (IllegalStateException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + } catch (IllegalArgumentException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + } catch (EnvironmentFailureException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (RuntimeException e) { + unExpectedExceptions.add(e); + throw e; + } + if (endSignal.getCount() < 3) { + ifRun = false; + } + } + }finally { + this.endSignal.countDown(); + } + } + } + + class AbortThread extends OperationThread{ + public AbortThread(CountDownLatch endSignal) { + super(endSignal); + } + + @Override + public void run() { + try { + boolean ifRun = true; + while (ifRun) { + try { + int waitTime = random.nextInt(25) + 150; + Thread.sleep(waitTime); + } catch (InterruptedException e) { + e.printStackTrace(); + } + if (!env.isValid()) { + break; + } + try { + txn.abort(); + } catch (IllegalStateException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + } catch (EnvironmentFailureException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + } catch (RuntimeException e) { + unExpectedExceptions.add(e); + throw e; + } + if (endSignal.getCount() < 3) { + ifRun = false; + } + } + } finally { + this.endSignal.countDown(); + } + } + } + + class CommitThread extends OperationThread{ + public CommitThread(CountDownLatch endSignal) { + super(endSignal); + } + + @Override + public void run() { + try { + boolean ifRun = true; + while (ifRun) { + try { + int waitTime = random.nextInt(5) + 5; + Thread.sleep(waitTime); + } catch (InterruptedException e) { + e.printStackTrace(); + } + if (!env.isValid()) { + break; + } + if (txn.isValid()) { + try { + txn.commit(); + } catch (IllegalStateException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + + } catch (EnvironmentFailureException e) { + exceptionCollection.put + (e.getStackTrace()[0].toString() + + e.getStackTrace().length, e); + + } catch (RuntimeException e) { + unExpectedExceptions.add(e); + throw e; + } + } + if (endSignal.getCount() < 3) { + ifRun = false; + } + } + } finally { + this.endSignal.countDown(); + } + } + } +} diff --git a/test/standalone/UtilizationChecker.java b/test/standalone/UtilizationChecker.java new file mode 100644 index 0000000..bcd7c98 --- /dev/null +++ b/test/standalone/UtilizationChecker.java @@ -0,0 +1,109 @@ +/*- + * Copyright (C) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle Berkeley + * DB Java Edition made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle Berkeley DB Java Edition for a copy of the + * license and additional information. + */ + +import java.io.File; + +import com.sleepycat.je.EnvironmentConfig; +import com.sleepycat.je.rep.utilint.RepTestUtils; +import com.sleepycat.je.rep.utilint.RepTestUtils.RepEnvInfo; + +/** + * Run RepTestUtils.checkUtilizationProfile on a rep group. + */ +public class UtilizationChecker { + + private RepEnvInfo[] repEnvInfo; + + /* Environment home root for whole replication group. */ + private File envRoot; + private int nNodes = 5; + private int subDir = 0; + + public static void main(String args[]) { + try { + UtilizationChecker utilizationChecker = new UtilizationChecker(); + utilizationChecker.parseArgs(args); + utilizationChecker.setup(); + utilizationChecker.check(); + } catch (Throwable t) { + t.printStackTrace(System.err); + System.exit(1); + } + } + + /** + * Grow the data store to the appropriate size for the steady state + * portion of the test. + */ + private void setup() + throws Exception { + + EnvironmentConfig envConfig = new EnvironmentConfig(); + envConfig.setTransactional(true); + envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); + if (subDir > 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + } + + /* + * We have a lot of environments open in a single process, so reduce + * the cache size lest we run out of file descriptors. + */ + envConfig.setConfigParam("je.log.fileCacheSize", "30"); + + repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, nNodes, envConfig); + } + + private void check() { + + System.out.println("Check starting"); + + RepTestUtils.restartGroup(repEnvInfo); + RepTestUtils.checkUtilizationProfile(System.out, repEnvInfo); + System.out.println("Check finishing"); + for (RepEnvInfo info : repEnvInfo) { + info.closeEnv(); + } + } + + private void parseArgs(String args[]) + throws Exception { + + for (int i = 0; i < args.length; i++) { + boolean moreArgs = i < args.length - 1; + if (args[i].equals("-h") && moreArgs) { + envRoot = new File(args[++i]); + } else if (args[i].equals("-repGroupSize") && moreArgs) { + nNodes = Integer.parseInt(args[++i]); + } else if (args[i].equals("-subDir") && moreArgs) { + subDir = Integer.parseInt(args[++i]); + } else { + usage("Unknown arg: " + args[i]); + } + } + } + + private void usage(String error) { + if (error != null) { + System.err.println(error); + } + + System.err.println + ("java " + getClass().getName() + "\n" + + " [-h ]\n" + + " [-repGroupSize ]\n" + + " [-subDir 0) { + envConfig.setConfigParam + (EnvironmentConfig.LOG_N_DATA_DIRECTORIES, subDir + ""); + } + + envConfig.setCacheSize(mainCacheSize); + envConfig.setOffHeapCacheSize(offHeapCacheSize); + + return envConfig; + } + + /* Start up the group and return the generated RepEnvInfo array. */ + static RepEnvInfo[] setupGroup(File envRoot, + int nNodes, + long fileSize, + long maxDisk, + long checkpointBytes, + int subDir, + long mainCacheSize, + long offHeapCacheSize) + throws Exception { + + RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos( + envRoot, nNodes, + createEnvConfig( + fileSize, maxDisk, checkpointBytes, subDir, mainCacheSize, + offHeapCacheSize)); + + if (subDir > 0) { + RepTestUtils.createRepSubDirs(repEnvInfo, subDir); + } + + if (nNodes == 2) { + repEnvInfo[0].getRepConfig().setDesignatedPrimary(true); + } + + return repEnvInfo; + } + + /* + * Join the ReplicatedEnvironments of a group of RepEnvInfo and return the + * authoritative master (wait for election to quiesce). + */ + public static ReplicatedEnvironment getMaster(RepEnvInfo[] repEnvInfo) { + return RepTestUtils.getMaster(repEnvInfo, true /*openIfNeeded*/); + } + + public static ReplicatedEnvironment assignMaster(RepEnvInfo[] repEnvInfo, + int masterId, + boolean restart) + throws Exception { + + assert repEnvInfo.length == 2 : + "This method can only be called by a replication group size of 2."; + + ReplicationMutableConfig newConfig = new ReplicationMutableConfig(); + newConfig.setDesignatedPrimary(true); + + if (restart) { + repEnvInfo[masterId - 1].getRepConfig().setDesignatedPrimary(false); + repEnvInfo[masterId - 1].openEnv(); + } else { + repEnvInfo[2 - masterId].getEnv().setRepMutableConfig(newConfig); + } + + assert repEnvInfo[2 - masterId].getEnv().getState().isMaster() : + "Can't find out a master."; + + return repEnvInfo[2 - masterId].getEnv(); + } + + /* Create or open a database for test. */ + public static EntityStore openStore(ReplicatedEnvironment repEnv, + String dbName) + throws DatabaseException { + + StoreConfig config = new StoreConfig(); + config.setAllowCreate(true); + config.setTransactional(true); + + return new EntityStore(repEnv, dbName, config); + } + + /** + * Sync replicas to the master, and check that all nodes have the same + * contents. + */ + public static void doSyncAndCheck(RepEnvInfo[] replicators) + throws Exception { + + /* Do the sync and check the node equality. */ + VLSN commitVLSN = + RepTestUtils.syncGroupToLastCommit(replicators, + replicators.length); + RepTestUtils.checkNodeEquality(commitVLSN, VERBOSE, replicators); + } + + /* Check the log cleaning and close the replicas. */ + public static void closeEnvAndCheckLogCleaning(RepEnvInfo[] repEnvInfo, + long[] fileDeletions, + boolean checkCleaning) + throws Exception { + + /* Initiate an array for saving the largest log file number. */ + long [] lastFileNumbers = new long[repEnvInfo.length]; + + if (checkCleaning) { + /* Get the cleaner deletion stat for all replicas. */ + int index = 0; + + /* A stats config for getting stats. */ + StatsConfig stConfig = new StatsConfig(); + stConfig.setFast(true); + stConfig.setClear(true); + + for (RepEnvInfo repInfo : repEnvInfo) { + if (repInfo.getEnv() != null && + repInfo.getEnv().isValid()) { + ReplicatedEnvironment repEnv = repInfo.getEnv(); + fileDeletions[index] += + repEnv.getStats(stConfig).getNCleanerDeletions(); + /* Get largest log file number for each environment. */ + lastFileNumbers[index] = + RepInternal.getNonNullRepImpl(repEnv). + getFileManager().getLastFileNum(); + } + index++; + } + } + + /* Shut down the replicas. */ + RepTestUtils.shutdownRepEnvs(repEnvInfo); + + if (checkCleaning) { + /* Check if there is replica doesn't do log cleaning. */ + for (int i = 0; i < fileDeletions.length; i++) { + System.err.println("Deleted files on replica " + i + " = " + + fileDeletions[i]); + System.err.println("Total used log files on replica " + i + + " = " + (lastFileNumbers[i] + 1)); + if ((fileDeletions[i] * 100) / (lastFileNumbers[i] + 1) < 40) { + throw new IllegalStateException + ("Expect to see log cleaning on replica " + i + + " exceeds 40%, but it doesn't."); + } + } + } + } + + public static void createSubDirs(File envHome, int subDirNumber) { + createSubDirs(envHome, subDirNumber, false); + } + + public static void createSubDirs(File envHome, + int subDirNumber, + boolean useExistEnvHome) { + if (!envHome.exists()) { + throw new IllegalStateException + ("The environment home is not created yet."); + } + + for (int i = 1; i <= subDirNumber; i++) { + String fileName = null; + if (i < 10) { + fileName = "data00" + i; + } else if (i < 100) { + fileName = "data0" + i; + } else if (i <= 256) { + fileName = "data" + i; + } else { + throw new IllegalArgumentException + ("The number of sub directories is invalid."); + } + + File subDir = new File(envHome, fileName); + + if (subDir.exists() && useExistEnvHome) { + continue; + } + + if (subDir.exists()) { + throw new IllegalStateException + ("The sub directories shouldn't be created yet."); + } + + if (!subDir.mkdir()) { + throw new IllegalStateException + ("The sub directories are not created successfully."); + } + } + } +}